aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Tissoires <benjamin.tissoires@redhat.com>2022-10-05 10:21:55 +0100
committerBenjamin Tissoires <benjamin.tissoires@redhat.com>2022-10-05 10:21:55 +0100
commitedd1533d3ccd82dd5d600986d27d524e6be4c5fd (patch)
tree1ac5ae82ea63114d5c13212e2819531e4507f800
parentMerge branch 'for-6.1/core' into for-linus (diff)
parenthid: hid-logitech-hidpp: avoid unnecessary assignments in hidpp_connect_event (diff)
downloadlinux-dev-edd1533d3ccd82dd5d600986d27d524e6be4c5fd.tar.xz
linux-dev-edd1533d3ccd82dd5d600986d27d524e6be4c5fd.zip
Merge branch 'for-6.1/logitech' into for-linus
- Add hanlding of all Bluetooth HID++ devices and fixes in hid++ (Bastien Nocera)
-rw-r--r--.clang-format1
-rw-r--r--.get_maintainer.ignore2
-rw-r--r--.mailmap9
-rw-r--r--Documentation/ABI/stable/sysfs-driver-mlxreg-io81
-rw-r--r--Documentation/ABI/testing/procfs-smaps_rollup1
-rw-r--r--Documentation/ABI/testing/sysfs-bus-cxl305
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-caps18
-rw-r--r--Documentation/ABI/testing/sysfs-bus-surface_aggregator-tabletsw57
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu1
-rw-r--r--Documentation/ABI/testing/sysfs-driver-xen-blkback2
-rw-r--r--Documentation/ABI/testing/sysfs-driver-xen-blkfront2
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs30
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-ksm2
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-slab4
-rw-r--r--Documentation/PCI/endpoint/index.rst2
-rw-r--r--Documentation/PCI/endpoint/pci-vntb-function.rst129
-rw-r--r--Documentation/PCI/endpoint/pci-vntb-howto.rst167
-rw-r--r--Documentation/PCI/pci-iov-howto.rst7
-rw-r--r--Documentation/PCI/sysfs-pci.rst2
-rw-r--r--Documentation/admin-guide/README.rst30
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst31
-rw-r--r--Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst14
-rw-r--r--Documentation/admin-guide/hw-vuln/spectre.rst8
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt145
-rw-r--r--Documentation/admin-guide/mm/concepts.rst2
-rw-r--r--Documentation/admin-guide/mm/damon/index.rst3
-rw-r--r--Documentation/admin-guide/mm/damon/lru_sort.rst294
-rw-r--r--Documentation/admin-guide/mm/damon/reclaim.rst8
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst28
-rw-r--r--Documentation/admin-guide/mm/hugetlbpage.rst4
-rw-r--r--Documentation/admin-guide/mm/index.rst1
-rw-r--r--Documentation/admin-guide/mm/memory-hotplug.rst4
-rw-r--r--Documentation/admin-guide/mm/shrinker_debugfs.rst135
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst12
-rw-r--r--Documentation/admin-guide/sysctl/net.rst2
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst10
-rw-r--r--Documentation/arm64/elf_hwcaps.rst10
-rw-r--r--Documentation/arm64/silicon-errata.rst2
-rw-r--r--Documentation/atomic_bitops.txt10
-rw-r--r--Documentation/block/null_blk.rst22
-rw-r--r--Documentation/bpf/bpf_design_QA.rst25
-rw-r--r--Documentation/conf.py1
-rw-r--r--Documentation/core-api/bus-virt-phys-mapping.rst220
-rw-r--r--Documentation/core-api/dma-api-howto.rst14
-rw-r--r--Documentation/core-api/dma-api.rst14
-rw-r--r--Documentation/core-api/index.rst3
-rw-r--r--Documentation/core-api/mm-api.rst8
-rw-r--r--Documentation/dev-tools/kmemleak.rst1
-rw-r--r--Documentation/devicetree/bindings/Makefile4
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-sysregs.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml3
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml8
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml3
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml3
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/vexpress-sysreg.yaml10
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-ceva.txt63
-rw-r--r--Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml189
-rw-r--r--Documentation/devicetree/bindings/bus/qcom,ssc-block-bus.yaml25
-rw-r--r--Documentation/devicetree/bindings/chosen.txt137
-rw-r--r--Documentation/devicetree/bindings/chrome/google,cros-ec-typec.yaml15
-rw-r--r--Documentation/devicetree/bindings/chrome/google,cros-kbd-led-backlight.yaml35
-rw-r--r--Documentation/devicetree/bindings/clock/efm32-clock.txt11
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml40
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml5
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml16
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml5
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sdm845.yaml3
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml85
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml7
-rw-r--r--Documentation/devicetree/bindings/clock/sprd,ums512-clk.yaml71
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,flexgen.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/davinci/pll.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/dra7-atl.txt2
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.yaml152
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml1
-rw-r--r--Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/arm,pl11x.yaml15
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sii902x.txt78
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sil,sii9022.yaml131
-rw-r--r--Documentation/devicetree/bindings/display/ilitek,ili9341.txt27
-rw-r--r--Documentation/devicetree/bindings/display/panel/arm,rtsm-display.yaml27
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml49
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/simple-framebuffer.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/sitronix,st7735r.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml7
-rw-r--r--Documentation/devicetree/bindings/dma/apple,admac.yaml80
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,edma.yaml155
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-edma.txt111
-rw-r--r--Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml1
-rw-r--r--Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml4
-rw-r--r--Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml100
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_bam_dma.txt52
-rw-r--r--Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml7
-rw-r--r--Documentation/devicetree/bindings/dma/ste-dma40.txt138
-rw-r--r--Documentation/devicetree/bindings/dma/stericsson,dma40.yaml159
-rw-r--r--Documentation/devicetree/bindings/dsp/mediatek,mt8186-dsp.yaml91
-rw-r--r--Documentation/devicetree/bindings/dsp/mediatek,mt8195-dsp.yaml10
-rw-r--r--Documentation/devicetree/bindings/eeprom/at25.yaml5
-rw-r--r--Documentation/devicetree/bindings/eeprom/microchip,93lc46b.yaml (renamed from Documentation/devicetree/bindings/misc/eeprom-93xx46.yaml)11
-rw-r--r--Documentation/devicetree/bindings/fpga/fpga-region.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt93
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml146
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca9570.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pisosr.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt16
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-tpic2810.yaml51
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml4
-rw-r--r--Documentation/devicetree/bindings/gpio/rockchip,gpio-bank.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpio/sifive,gpio.yaml4
-rw-r--r--Documentation/devicetree/bindings/gpio/x-powers,axp209-gpio.yaml6
-rw-r--r--Documentation/devicetree/bindings/hwinfo/samsung,exynos-chipid.yaml (renamed from Documentation/devicetree/bindings/soc/samsung/exynos-chipid.yaml)2
-rw-r--r--Documentation/devicetree/bindings/hwinfo/samsung,s5pv210-chipid.yaml30
-rw-r--r--Documentation/devicetree/bindings/hwinfo/ti,k3-socinfo.yaml (renamed from Documentation/devicetree/bindings/soc/ti/k3-socinfo.yaml)2
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml1
-rw-r--r--Documentation/devicetree/bindings/hwmon/adt7475.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/ibm,p8-occ-hwmon.txt (renamed from Documentation/devicetree/bindings/i2c/ibm,p8-occ-hwmon.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-efm32.txt33
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt96
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml242
-rw-r--r--Documentation/devicetree/bindings/iio/accel/fsl,mma7455.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/nxp,lpc1850-adc.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,adc108s102.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,ads124s08.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/imu/nxp,fxos8700.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/adc-joystick.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/adc-keys.txt67
-rw-r--r--Documentation/devicetree/bindings/input/adc-keys.yaml103
-rw-r--r--Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml5
-rw-r--r--Documentation/devicetree/bindings/input/ariel-pwrbutton.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml41
-rw-r--r--Documentation/devicetree/bindings/input/fsl,mpr121-touchkey.yaml4
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.yaml167
-rw-r--r--Documentation/devicetree/bindings/input/input.yaml24
-rw-r--r--Documentation/devicetree/bindings/input/iqs269a.yaml17
-rw-r--r--Documentation/devicetree/bindings/input/iqs626a.yaml13
-rw-r--r--Documentation/devicetree/bindings/input/iqs62x-keys.yaml9
-rw-r--r--Documentation/devicetree/bindings/input/max77650-onkey.yaml8
-rw-r--r--Documentation/devicetree/bindings/input/microchip,cap11xx.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml8
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt61
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.yaml43
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml1
-rw-r--r--Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml17
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/common.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/gpio-backlight.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/led-backlight.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/richtek,rt4831-backlight.yaml5
-rw-r--r--Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/issi,is31fl319x.yaml193
-rw-r--r--Documentation/devicetree/bindings/leds/leds-aat1290.txt77
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm63138.yaml95
-rw-r--r--Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml34
-rw-r--r--Documentation/devicetree/bindings/leds/leds-is31fl319x.txt61
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lp50xx.yaml116
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lp55xx.yaml222
-rw-r--r--Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml51
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml3
-rw-r--r--Documentation/devicetree/bindings/leds/skyworks,aat1290.yaml95
-rw-r--r--Documentation/devicetree/bindings/mailbox/arm,mhu.yaml1
-rw-r--r--Documentation/devicetree/bindings/mailbox/fsl,mu.yaml6
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml46
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/gpio-ir-receiver.txt20
-rw-r--r--Documentation/devicetree/bindings/media/gpio-ir-receiver.yaml40
-rw-r--r--Documentation/devicetree/bindings/media/rc.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/canaan,k210-sram.yaml52
-rw-r--r--Documentation/devicetree/bindings/mfd/da9063.txt114
-rw-r--r--Documentation/devicetree/bindings/mfd/dlg,da9063.yaml132
-rw-r--r--Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/google,cros-ec.yaml3
-rw-r--r--Documentation/devicetree/bindings/mfd/mps,mp2629.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/mt6397.txt8
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt94
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml190
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,tcsr.txt24
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml50
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml28
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml37
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml12
-rw-r--r--Documentation/devicetree/bindings/mips/lantiq/rcu.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml32
-rw-r--r--Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt94
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt29
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-spi-slot.yaml77
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.yaml62
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml7
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/samsung,exynos-dw-mshc.yaml160
-rw-r--r--Documentation/devicetree/bindings/mmc/samsung,s3c6410-sdhci.yaml81
-rw-r--r--Documentation/devicetree/bindings/mmc/samsung-sdhci.txt32
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.yaml84
-rw-r--r--Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml7
-rw-r--r--Documentation/devicetree/bindings/mtd/mxc-nand.yaml2
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.txt17
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.yaml28
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/partition.yaml20
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml27
-rw-r--r--Documentation/devicetree/bindings/mtd/qcom,nandc.yaml27
-rw-r--r--Documentation/devicetree/bindings/net/altera_tse.txt2
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt2
-rw-r--r--Documentation/devicetree/bindings/net/emac_rockchip.txt52
-rw-r--r--Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/qcom-emac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/rockchip,emac.yaml115
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83822.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83867.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83869.yaml2
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-base.yaml10
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml15
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/host-generic-pci.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek-pcie.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml319
-rw-r--r--Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt245
-rw-r--r--Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.yaml350
-rw-r--r--Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt84
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.yaml55
-rw-r--r--Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml186
-rw-r--r--Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml4
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml38
-rw-r--r--Documentation/devicetree/bindings/perf/arm,ccn.yaml40
-rw-r--r--Documentation/devicetree/bindings/perf/arm-ccn.txt23
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml16
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nuvoton,wpcm450-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt8186.yaml31
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml64
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml41
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8909-tlmm.yaml152
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm6375-tlmm.yaml158
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzv2m-pinctrl.yaml170
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/sunplus,sp7021-pinctrl.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml6
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/brcm,bcm63xx-power.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/renesas,apmu.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/reset/msm-poweroff.txt17
-rw-r--r--Documentation/devicetree/bindings/power/reset/qcom,pon.yaml8
-rw-r--r--Documentation/devicetree/bindings/power/reset/qcom,pshold.yaml35
-rw-r--r--Documentation/devicetree/bindings/power/reset/regulator-poweroff.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/supply/active-semi,act8945a-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq2415x.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq24190.yaml6
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq24257.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq24735.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq2515x.yaml7
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq256xx.yaml6
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq25890.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq25980.yaml7
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq27xxx.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/charger-manager.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/supply/cpcap-battery.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/cpcap-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/dlg,da9150-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/dlg,da9150-fuel-gauge.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/isp1704.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/lego,ev3-battery.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/lltc,lt3651-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/lltc,ltc294x.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,ds2760.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max14656.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max8903.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/nokia,n900-battery.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/olpc-battery.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/power-supply.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom,pm8941-charger.yaml9
-rw-r--r--Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/richtek,rt9455.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/stericsson,ab8500-btemp.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/stericsson,ab8500-chargalg.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/stericsson,ab8500-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/stericsson,ab8500-fg.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml6
-rw-r--r--Documentation/devicetree/bindings/power/supply/tps65090-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/tps65217-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/twl4030-charger.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/x-powers,axp20x-ac-power-supply.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/cpus.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/opal/power-mgt.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml11
-rw-r--r--Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml74
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,glink-edge.yaml72
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt90
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml1
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sc7180-mss-pil.yaml245
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sc7280-mss-pil.yaml266
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml21
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml1
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,smd-edge.yaml85
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml5
-rw-r--r--Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml68
-rw-r--r--Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/reset/ti,tps380x-reset.yaml49
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml5
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml6
-rw-r--r--Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/nuvoton,nct3018y.yaml45
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt32
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf85063.yaml92
-rw-r--r--Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-ds1307.txt52
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-ds1307.yaml102
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mt6397.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/ti,k3-rtc.yaml62
-rw-r--r--Documentation/devicetree/bindings/rtc/trivial-rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml12
-rw-r--r--Documentation/devicetree/bindings/serial/8250.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/efm32-uart.txt20
-rw-r--r--Documentation/devicetree/bindings/serial/mediatek,uart.yaml120
-rw-r--r--Documentation/devicetree/bindings/serial/mtk-uart.txt59
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,hscif.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/rs485.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serio/ps2-gpio.txt23
-rw-r--r--Documentation/devicetree/bindings/serio/ps2-gpio.yaml64
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml33
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml52
-rw-r--r--Documentation/devicetree/bindings/sound/adi,adau1977.yaml7
-rw-r--r--Documentation/devicetree/bindings/sound/adi,max98396.yaml30
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/atmel,sama5d2-classd.yaml100
-rw-r--r--Documentation/devicetree/bindings/sound/atmel,sama5d2-i2s.yaml85
-rw-r--r--Documentation/devicetree/bindings/sound/atmel,sama5d2-pdmic.yaml98
-rw-r--r--Documentation/devicetree/bindings/sound/atmel-classd.txt55
-rw-r--r--Documentation/devicetree/bindings/sound/atmel-i2s.txt46
-rw-r--r--Documentation/devicetree/bindings/sound/atmel-pdmic.txt55
-rw-r--r--Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/da9055.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/designware-i2s.txt35
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,micfil.txt33
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,micfil.yaml85
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,mqs.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,spdif.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/fsl-sai.txt11
-rw-r--r--Documentation/devicetree/bindings/sound/mt6358.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/mt8186-afe-pcm.yaml175
-rw-r--r--Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml75
-rw-r--r--Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml75
-rw-r--r--Documentation/devicetree/bindings/sound/nau8821.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-mbdrc.yaml47
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-ope.yaml87
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra210-peq.yaml48
-rw-r--r--Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,sdm845.txt91
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,sm8250.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wsa883x.yaml74
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-i2s.yaml7
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/snps,designware-i2s.yaml94
-rw-r--r--Documentation/devicetree/bindings/sound/tas2562.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320adcx140.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,wm8731.yaml9
-rw-r--r--Documentation/devicetree/bindings/spi/cdns,qspi-nor-peripheral-props.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/efm32-spi.txt39
-rw-r--r--Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml2
-rw-r--r--Documentation/devicetree/bindings/sram/qcom,imem.yaml75
-rw-r--r--Documentation/devicetree/bindings/sram/qcom,ocmem.yaml10
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml2
-rw-r--r--Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt2
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.yaml2
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal-zones.yaml1
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml4
-rw-r--r--Documentation/devicetree/bindings/ufs/qcom,ufs.yaml2
-rw-r--r--Documentation/devicetree/bindings/ufs/renesas,ufs.yaml61
-rw-r--r--Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml18
-rw-r--r--Documentation/devicetree/bindings/virtio/mmio.yaml4
-rw-r--r--Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom,pm8916-wdt.txt28
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom,pm8916-wdt.yaml51
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/realtek,otto-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/writing-bindings.rst2
-rw-r--r--Documentation/driver-api/cxl/memory-devices.rst8
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst10
-rw-r--r--Documentation/driver-api/driver-model/devres.rst1
-rw-r--r--Documentation/driver-api/serial/driver.rst482
-rw-r--r--Documentation/driver-api/serial/serial-rs485.rst38
-rw-r--r--Documentation/driver-api/surface_aggregator/client.rst6
-rw-r--r--Documentation/driver-api/vfio-mediated-device.rst16
-rw-r--r--Documentation/fault-injection/fault-injection.rst7
-rw-r--r--Documentation/filesystems/ext4/blockmap.rst2
-rw-r--r--Documentation/filesystems/f2fs.rst5
-rw-r--r--Documentation/filesystems/fuse.rst29
-rw-r--r--Documentation/filesystems/proc.rst11
-rw-r--r--Documentation/filesystems/xfs-delayed-logging-design.rst361
-rw-r--r--Documentation/i2c/i2c-protocol.rst11
-rw-r--r--Documentation/i2c/i2c-sysfs.rst24
-rw-r--r--Documentation/i2c/instantiating-devices.rst16
-rw-r--r--Documentation/i2c/smbus-protocol.rst6
-rw-r--r--Documentation/index.rst2
-rw-r--r--Documentation/kbuild/kconfig-language.rst6
-rw-r--r--Documentation/loongarch/introduction.rst2
-rw-r--r--Documentation/m68k/kernel-options.rst4
-rw-r--r--Documentation/mm/active_mm.rst (renamed from Documentation/vm/active_mm.rst)0
-rw-r--r--Documentation/mm/arch_pgtable_helpers.rst (renamed from Documentation/vm/arch_pgtable_helpers.rst)0
-rw-r--r--Documentation/mm/balance.rst (renamed from Documentation/vm/balance.rst)0
-rw-r--r--Documentation/mm/bootmem.rst (renamed from Documentation/vm/bootmem.rst)0
-rw-r--r--Documentation/mm/damon/api.rst (renamed from Documentation/vm/damon/api.rst)0
-rw-r--r--Documentation/mm/damon/design.rst (renamed from Documentation/vm/damon/design.rst)0
-rw-r--r--Documentation/mm/damon/faq.rst (renamed from Documentation/vm/damon/faq.rst)0
-rw-r--r--Documentation/mm/damon/index.rst (renamed from Documentation/vm/damon/index.rst)0
-rw-r--r--Documentation/mm/free_page_reporting.rst (renamed from Documentation/vm/free_page_reporting.rst)0
-rw-r--r--Documentation/mm/frontswap.rst (renamed from Documentation/vm/frontswap.rst)0
-rw-r--r--Documentation/mm/highmem.rst (renamed from Documentation/vm/highmem.rst)31
-rw-r--r--Documentation/mm/hmm.rst (renamed from Documentation/vm/hmm.rst)0
-rw-r--r--Documentation/mm/hugetlbfs_reserv.rst (renamed from Documentation/vm/hugetlbfs_reserv.rst)0
-rw-r--r--Documentation/mm/hwpoison.rst (renamed from Documentation/vm/hwpoison.rst)0
-rw-r--r--Documentation/mm/index.rst (renamed from Documentation/vm/index.rst)0
-rw-r--r--Documentation/mm/ksm.rst (renamed from Documentation/vm/ksm.rst)0
-rw-r--r--Documentation/mm/memory-model.rst (renamed from Documentation/vm/memory-model.rst)2
-rw-r--r--Documentation/mm/mmu_notifier.rst (renamed from Documentation/vm/mmu_notifier.rst)0
-rw-r--r--Documentation/mm/numa.rst (renamed from Documentation/vm/numa.rst)0
-rw-r--r--Documentation/mm/oom.rst (renamed from Documentation/vm/oom.rst)0
-rw-r--r--Documentation/mm/overcommit-accounting.rst (renamed from Documentation/vm/overcommit-accounting.rst)0
-rw-r--r--Documentation/mm/page_allocation.rst (renamed from Documentation/vm/page_allocation.rst)0
-rw-r--r--Documentation/mm/page_cache.rst (renamed from Documentation/vm/page_cache.rst)0
-rw-r--r--Documentation/mm/page_frags.rst (renamed from Documentation/vm/page_frags.rst)0
-rw-r--r--Documentation/mm/page_migration.rst (renamed from Documentation/vm/page_migration.rst)0
-rw-r--r--Documentation/mm/page_owner.rst (renamed from Documentation/vm/page_owner.rst)0
-rw-r--r--Documentation/mm/page_reclaim.rst (renamed from Documentation/vm/page_reclaim.rst)0
-rw-r--r--Documentation/mm/page_table_check.rst (renamed from Documentation/vm/page_table_check.rst)0
-rw-r--r--Documentation/mm/page_tables.rst (renamed from Documentation/vm/page_tables.rst)0
-rw-r--r--Documentation/mm/physical_memory.rst (renamed from Documentation/vm/physical_memory.rst)0
-rw-r--r--Documentation/mm/process_addrs.rst (renamed from Documentation/vm/process_addrs.rst)0
-rw-r--r--Documentation/mm/remap_file_pages.rst (renamed from Documentation/vm/remap_file_pages.rst)0
-rw-r--r--Documentation/mm/shmfs.rst (renamed from Documentation/vm/shmfs.rst)0
-rw-r--r--Documentation/mm/slab.rst (renamed from Documentation/vm/slab.rst)0
-rw-r--r--Documentation/mm/slub.rst (renamed from Documentation/vm/slub.rst)0
-rw-r--r--Documentation/mm/split_page_table_lock.rst (renamed from Documentation/vm/split_page_table_lock.rst)0
-rw-r--r--Documentation/mm/swap.rst (renamed from Documentation/vm/swap.rst)0
-rw-r--r--Documentation/mm/transhuge.rst (renamed from Documentation/vm/transhuge.rst)0
-rw-r--r--Documentation/mm/unevictable-lru.rst (renamed from Documentation/vm/unevictable-lru.rst)0
-rw-r--r--Documentation/mm/vmalloc.rst (renamed from Documentation/vm/vmalloc.rst)0
-rw-r--r--Documentation/mm/vmalloced-kernel-stacks.rst (renamed from Documentation/vm/vmalloced-kernel-stacks.rst)0
-rw-r--r--Documentation/mm/vmemmap_dedup.rst (renamed from Documentation/vm/vmemmap_dedup.rst)72
-rw-r--r--Documentation/mm/z3fold.rst (renamed from Documentation/vm/z3fold.rst)0
-rw-r--r--Documentation/mm/zsmalloc.rst (renamed from Documentation/vm/zsmalloc.rst)0
-rw-r--r--Documentation/networking/bonding.rst9
-rw-r--r--Documentation/powerpc/elf_hwcaps.rst231
-rw-r--r--Documentation/powerpc/index.rst1
-rw-r--r--Documentation/process/kernel-docs.rst2
-rw-r--r--Documentation/s390/index.rst1
-rw-r--r--Documentation/s390/vfio-ap-locking.rst115
-rw-r--r--Documentation/s390/vfio-ap.rst498
-rw-r--r--Documentation/scsi/ufs.rst15
-rw-r--r--Documentation/sound/soc/codec.rst2
-rw-r--r--Documentation/sound/soc/platform.rst2
-rw-r--r--Documentation/sphinx/kerneldoc-preamble.sty22
-rw-r--r--Documentation/tools/rtla/rtla-timerlat-hist.rst2
-rw-r--r--Documentation/trace/index.rst1
-rw-r--r--Documentation/trace/kprobetrace.rst8
-rw-r--r--Documentation/trace/rv/da_monitor_instrumentation.rst171
-rw-r--r--Documentation/trace/rv/da_monitor_synthesis.rst147
-rw-r--r--Documentation/trace/rv/deterministic_automata.rst184
-rw-r--r--Documentation/trace/rv/index.rst14
-rw-r--r--Documentation/trace/rv/monitor_wip.rst55
-rw-r--r--Documentation/trace/rv/monitor_wwnr.rst45
-rw-r--r--Documentation/trace/rv/runtime-verification.rst231
-rw-r--r--Documentation/trace/uprobetracer.rst8
-rw-r--r--Documentation/translations/ja_JP/SubmittingPatches3
-rw-r--r--Documentation/translations/zh_CN/admin-guide/mm/damon/index.rst2
-rw-r--r--Documentation/translations/zh_CN/admin-guide/mm/damon/reclaim.rst2
-rw-r--r--Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst8
-rw-r--r--Documentation/translations/zh_CN/core-api/index.rst3
-rw-r--r--Documentation/translations/zh_CN/index.rst2
-rw-r--r--Documentation/translations/zh_CN/loongarch/introduction.rst4
-rw-r--r--Documentation/translations/zh_CN/mm/active_mm.rst (renamed from Documentation/translations/zh_CN/vm/active_mm.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/balance.rst (renamed from Documentation/translations/zh_CN/vm/balance.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/damon/api.rst (renamed from Documentation/translations/zh_CN/vm/damon/api.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/damon/design.rst (renamed from Documentation/translations/zh_CN/vm/damon/design.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/damon/faq.rst (renamed from Documentation/translations/zh_CN/vm/damon/faq.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/damon/index.rst (renamed from Documentation/translations/zh_CN/vm/damon/index.rst)5
-rw-r--r--Documentation/translations/zh_CN/mm/free_page_reporting.rst (renamed from Documentation/translations/zh_CN/vm/free_page_reporting.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/frontswap.rst (renamed from Documentation/translations/zh_CN/vm/frontswap.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/highmem.rst (renamed from Documentation/translations/zh_CN/vm/highmem.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/hmm.rst (renamed from Documentation/translations/zh_CN/vm/hmm.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/hugetlbfs_reserv.rst (renamed from Documentation/translations/zh_CN/vm/hugetlbfs_reserv.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/hwpoison.rst (renamed from Documentation/translations/zh_CN/vm/hwpoison.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/index.rst (renamed from Documentation/translations/zh_CN/vm/index.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/ksm.rst (renamed from Documentation/translations/zh_CN/vm/ksm.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/memory-model.rst (renamed from Documentation/translations/zh_CN/vm/memory-model.rst)4
-rw-r--r--Documentation/translations/zh_CN/mm/mmu_notifier.rst (renamed from Documentation/translations/zh_CN/vm/mmu_notifier.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/numa.rst (renamed from Documentation/translations/zh_CN/vm/numa.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/overcommit-accounting.rst (renamed from Documentation/translations/zh_CN/vm/overcommit-accounting.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/page_frags.rst (renamed from Documentation/translations/zh_CN/vm/page_frags.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/page_migration.rst (renamed from Documentation/translations/zh_CN/vm/page_migration.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/page_owner.rst (renamed from Documentation/translations/zh_CN/vm/page_owner.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/page_table_check.rst (renamed from Documentation/translations/zh_CN/vm/page_table_check.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/remap_file_pages.rst (renamed from Documentation/translations/zh_CN/vm/remap_file_pages.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/split_page_table_lock.rst (renamed from Documentation/translations/zh_CN/vm/split_page_table_lock.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/vmalloced-kernel-stacks.rst (renamed from Documentation/translations/zh_CN/vm/vmalloced-kernel-stacks.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/z3fold.rst (renamed from Documentation/translations/zh_CN/vm/z3fold.rst)2
-rw-r--r--Documentation/translations/zh_CN/mm/zsmalloc.rst (renamed from Documentation/translations/zh_CN/vm/zsmalloc.rst)2
-rw-r--r--Documentation/translations/zh_TW/index.rst2
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst2
-rw-r--r--Documentation/virt/kvm/api.rst10
-rw-r--r--Documentation/watchdog/watchdog-parameters.rst12
-rw-r--r--Documentation/x86/sgx.rst15
-rw-r--r--Documentation/x86/x86_64/boot-options.rst8
-rw-r--r--MAINTAINERS181
-rw-r--r--Makefile37
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/bitops.h35
-rw-r--r--arch/alpha/include/asm/dma.h9
-rw-r--r--arch/alpha/include/asm/floppy.h2
-rw-r--r--arch/alpha/include/asm/io.h8
-rw-r--r--arch/alpha/include/asm/pci.h6
-rw-r--r--arch/alpha/include/asm/pgtable.h17
-rw-r--r--arch/alpha/kernel/smp.c6
-rw-r--r--arch/alpha/mm/fault.c4
-rw-r--r--arch/alpha/mm/init.c22
-rw-r--r--arch/arc/configs/axs101_defconfig1
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/hsdk_defconfig1
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig1
-rw-r--r--arch/arc/include/asm/dma.h5
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h18
-rw-r--r--arch/arc/kernel/smp.c8
-rw-r--r--arch/arc/mm/fault.c4
-rw-r--r--arch/arc/mm/mmap.c20
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/boot/dts/imxrt1170-pinfunc.h1561
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi8
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi1
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi1
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi1
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi8
-rw-r--r--arch/arm/common/Kconfig6
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/dmabounce.c582
-rw-r--r--arch/arm/common/sa1111.c64
-rw-r--r--arch/arm/include/asm/bitops.h18
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/include/asm/dma-direct.h49
-rw-r--r--arch/arm/include/asm/dma-mapping.h128
-rw-r--r--arch/arm/include/asm/dma.h6
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/pci.h5
-rw-r--r--arch/arm/include/asm/pgtable.h17
-rw-r--r--arch/arm/kernel/head.S34
-rw-r--r--arch/arm/kernel/irq.c3
-rw-r--r--arch/arm/kernel/smp.c8
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c2
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-footbridge/common.c19
-rw-r--r--arch/arm/mach-footbridge/include/mach/dma-direct.h8
-rw-r--r--arch/arm/mach-footbridge/include/mach/memory.h4
-rw-r--r--arch/arm/mach-highbank/highbank.c2
-rw-r--r--arch/arm/mach-mvebu/coherency.c2
-rw-r--r--arch/arm/mach-pxa/eseries.c4
-rw-r--r--arch/arm/mach-pxa/tosa.c4
-rw-r--r--arch/arm/mm/dma-mapping.c652
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/mm/mmu.c20
-rw-r--r--arch/arm64/Kconfig19
-rw-r--r--arch/arm64/boot/dts/apple/t8103-pmgr.dtsi7
-rw-r--r--arch/arm64/boot/dts/apple/t8103.dtsi34
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi8
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/fpsimd.h4
-rw-r--r--arch/arm64/include/asm/hugetlb.h3
-rw-r--r--arch/arm64/include/asm/io.h41
-rw-r--r--arch/arm64/include/asm/kvm_host.h4
-rw-r--r--arch/arm64/include/asm/memory.h9
-rw-r--r--arch/arm64/include/asm/pci.h18
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h18
-rw-r--r--arch/arm64/include/asm/setup.h17
-rw-r--r--arch/arm64/include/asm/sysreg.h5
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm64/kernel/cacheinfo.c6
-rw-r--r--arch/arm64/kernel/cpu_errata.c12
-rw-r--r--arch/arm64/kernel/cpufeature.c5
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/fpsimd.c21
-rw-r--r--arch/arm64/kernel/pi/kaslr_early.c8
-rw-r--r--arch/arm64/kernel/ptrace.c6
-rw-r--r--arch/arm64/kernel/signal.c14
-rw-r--r--arch/arm64/kernel/smp.c8
-rw-r--r--arch/arm64/kernel/topology.c32
-rw-r--r--arch/arm64/kvm/arm.c3
-rw-r--r--arch/arm64/kvm/guest.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile7
-rw-r--r--arch/arm64/kvm/mmu.c8
-rw-r--r--arch/arm64/kvm/sys_regs.c4
-rw-r--r--arch/arm64/mm/fault.c4
-rw-r--r--arch/arm64/mm/flush.c13
-rw-r--r--arch/arm64/mm/hugetlbpage.c56
-rw-r--r--arch/arm64/mm/mmap.c21
-rw-r--r--arch/arm64/mm/mmu.c18
-rw-r--r--arch/arm64/net/bpf_jit_comp.c16
-rw-r--r--arch/arm64/tools/cpucaps1
-rw-r--r--arch/csky/include/asm/pci.h23
-rw-r--r--arch/csky/include/asm/pgalloc.h2
-rw-r--r--arch/csky/include/asm/pgtable.h24
-rw-r--r--arch/csky/kernel/smp.c5
-rw-r--r--arch/csky/mm/fault.c4
-rw-r--r--arch/csky/mm/init.c20
-rw-r--r--arch/hexagon/include/asm/bitops.h37
-rw-r--r--arch/hexagon/include/asm/pgtable.h27
-rw-r--r--arch/hexagon/kernel/smp.c5
-rw-r--r--arch/hexagon/mm/init.c42
-rw-r--r--arch/hexagon/mm/vm_fault.c4
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/bitops.h45
-rw-r--r--arch/ia64/include/asm/dma.h2
-rw-r--r--arch/ia64/include/asm/io.h8
-rw-r--r--arch/ia64/include/asm/mmu_context.h5
-rw-r--r--arch/ia64/include/asm/pci.h6
-rw-r--r--arch/ia64/include/asm/pgtable.h18
-rw-r--r--arch/ia64/include/asm/processor.h2
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h28
-rw-r--r--arch/ia64/kernel/smp.c6
-rw-r--r--arch/ia64/mm/fault.c4
-rw-r--r--arch/ia64/mm/init.c28
-rw-r--r--arch/loongarch/Kconfig21
-rw-r--r--arch/loongarch/Kconfig.debug29
-rw-r--r--arch/loongarch/Makefile2
-rw-r--r--arch/loongarch/configs/loongson3_defconfig34
-rw-r--r--arch/loongarch/include/asm/addrspace.h16
-rw-r--r--arch/loongarch/include/asm/bootinfo.h2
-rw-r--r--arch/loongarch/include/asm/cmpxchg.h98
-rw-r--r--arch/loongarch/include/asm/dma.h11
-rw-r--r--arch/loongarch/include/asm/inst.h52
-rw-r--r--arch/loongarch/include/asm/io.h19
-rw-r--r--arch/loongarch/include/asm/irq.h11
-rw-r--r--arch/loongarch/include/asm/page.h4
-rw-r--r--arch/loongarch/include/asm/pci.h25
-rw-r--r--arch/loongarch/include/asm/percpu.h8
-rw-r--r--arch/loongarch/include/asm/pgalloc.h6
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h19
-rw-r--r--arch/loongarch/include/asm/pgtable.h34
-rw-r--r--arch/loongarch/include/asm/processor.h9
-rw-r--r--arch/loongarch/include/asm/reboot.h10
-rw-r--r--arch/loongarch/include/asm/stacktrace.h20
-rw-r--r--arch/loongarch/include/asm/switch_to.h14
-rw-r--r--arch/loongarch/include/asm/uaccess.h4
-rw-r--r--arch/loongarch/include/asm/unwind.h42
-rw-r--r--arch/loongarch/include/asm/vdso.h1
-rw-r--r--arch/loongarch/include/asm/vdso/vdso.h15
-rw-r--r--arch/loongarch/kernel/Makefile4
-rw-r--r--arch/loongarch/kernel/acpi.c38
-rw-r--r--arch/loongarch/kernel/asm-offsets.c8
-rw-r--r--arch/loongarch/kernel/head.S19
-rw-r--r--arch/loongarch/kernel/proc.c2
-rw-r--r--arch/loongarch/kernel/process.c90
-rw-r--r--arch/loongarch/kernel/reset.c69
-rw-r--r--arch/loongarch/kernel/smp.c5
-rw-r--r--arch/loongarch/kernel/stacktrace.c78
-rw-r--r--arch/loongarch/kernel/switch.S2
-rw-r--r--arch/loongarch/kernel/time.c2
-rw-r--r--arch/loongarch/kernel/traps.c24
-rw-r--r--arch/loongarch/kernel/unwind_guess.c67
-rw-r--r--arch/loongarch/kernel/unwind_prologue.c176
-rw-r--r--arch/loongarch/kernel/vdso.c25
-rw-r--r--arch/loongarch/mm/cache.c46
-rw-r--r--arch/loongarch/mm/fault.c4
-rw-r--r--arch/loongarch/mm/mmap.c11
-rw-r--r--arch/loongarch/mm/pgtable.c2
-rw-r--r--arch/loongarch/mm/tlbex.S6
-rw-r--r--arch/loongarch/pci/acpi.c175
-rw-r--r--arch/loongarch/pci/pci.c101
-rw-r--r--arch/loongarch/vdso/Makefile2
-rw-r--r--arch/loongarch/vdso/vdso.lds.S1
-rw-r--r--arch/loongarch/vdso/vgetcpu.c45
-rw-r--r--arch/loongarch/vdso/vgettimeofday.c15
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/coldfire/device.c6
-rw-r--r--arch/m68k/coldfire/intc-2.c2
-rw-r--r--arch/m68k/coldfire/m523x.c2
-rw-r--r--arch/m68k/include/asm/bitops.h46
-rw-r--r--arch/m68k/include/asm/dma.h6
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h59
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h29
-rw-r--r--arch/m68k/include/asm/pci.h2
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h23
-rw-r--r--arch/m68k/include/asm/virtconvert.h4
-rw-r--r--arch/m68k/mm/fault.c4
-rw-r--r--arch/m68k/mm/mcfmmu.c55
-rw-r--r--arch/m68k/mm/motorola.c29
-rw-r--r--arch/m68k/mm/sun3mmu.c20
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/include/asm/dma.h6
-rw-r--r--arch/microblaze/include/asm/io.h2
-rw-r--r--arch/microblaze/include/asm/pgtable.h17
-rw-r--r--arch/microblaze/kernel/entry.S2
-rw-r--r--arch/microblaze/mm/fault.c4
-rw-r--r--arch/microblaze/mm/init.c20
-rw-r--r--arch/mips/Kbuild.platforms1
-rw-r--r--arch/mips/Kconfig25
-rw-r--r--arch/mips/Makefile1
-rw-r--r--arch/mips/alchemy/devboards/pm.c2
-rw-r--r--arch/mips/ath79/early_printk.c17
-rw-r--r--arch/mips/bcm47xx/board.c2
-rw-r--r--arch/mips/bcm47xx/buttons.c10
-rw-r--r--arch/mips/bcm47xx/leds.c11
-rw-r--r--arch/mips/bcm47xx/prom.c2
-rw-r--r--arch/mips/bcm47xx/workarounds.c1
-rw-r--r--arch/mips/bmips/setup.c14
-rw-r--r--arch/mips/boot/dts/img/pistachio_marduk.dts4
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts2
-rw-r--r--arch/mips/boot/dts/ingenic/gcw0.dts31
-rw-r--r--arch/mips/boot/dts/ingenic/rs90.dts18
-rw-r--r--arch/mips/boot/dts/mscc/ocelot.dtsi9
-rw-r--r--arch/mips/boot/dts/pic32/pic32mzda_sk.dts9
-rw-r--r--arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts6
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dpt_module.dts4
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts6
-rw-r--r--arch/mips/boot/dts/qca/ar9331_omega.dts4
-rw-r--r--arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts4
-rw-r--r--arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts8
-rw-r--r--arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts2
-rw-r--r--arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts2
-rw-r--r--arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts2
-rw-r--r--arch/mips/cavium-octeon/Kconfig12
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c21
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c4
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c3
-rw-r--r--arch/mips/cavium-octeon/setup.c38
-rw-r--r--arch/mips/configs/capcella_defconfig91
-rw-r--r--arch/mips/configs/e55_defconfig37
-rw-r--r--arch/mips/configs/mpc30x_defconfig53
-rw-r--r--arch/mips/configs/tb0219_defconfig76
-rw-r--r--arch/mips/configs/tb0226_defconfig71
-rw-r--r--arch/mips/configs/tb0287_defconfig84
-rw-r--r--arch/mips/configs/workpad_defconfig67
-rw-r--r--arch/mips/fw/cfe/cfe_api.c68
-rw-r--r--arch/mips/include/asm/cpu-type.h11
-rw-r--r--arch/mips/include/asm/cpu.h3
-rw-r--r--arch/mips/include/asm/dma.h8
-rw-r--r--arch/mips/include/asm/fw/cfe/cfe_api.h2
-rw-r--r--arch/mips/include/asm/io.h11
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h1
-rw-r--r--arch/mips/include/asm/mach-vr41xx/irq.h9
-rw-r--r--arch/mips/include/asm/mipsregs.h14
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h12
-rw-r--r--arch/mips/include/asm/pci.h6
-rw-r--r--arch/mips/include/asm/pgalloc.h8
-rw-r--r--arch/mips/include/asm/pgtable-32.h24
-rw-r--r--arch/mips/include/asm/pgtable-64.h66
-rw-r--r--arch/mips/include/asm/pgtable.h22
-rw-r--r--arch/mips/include/asm/vermagic.h2
-rw-r--r--arch/mips/include/asm/vr41xx/capcella.h30
-rw-r--r--arch/mips/include/asm/vr41xx/giu.h41
-rw-r--r--arch/mips/include/asm/vr41xx/irq.h97
-rw-r--r--arch/mips/include/asm/vr41xx/mpc30x.h24
-rw-r--r--arch/mips/include/asm/vr41xx/pci.h77
-rw-r--r--arch/mips/include/asm/vr41xx/siu.h45
-rw-r--r--arch/mips/include/asm/vr41xx/tb0219.h29
-rw-r--r--arch/mips/include/asm/vr41xx/tb0226.h30
-rw-r--r--arch/mips/include/asm/vr41xx/tb0287.h30
-rw-r--r--arch/mips/include/asm/vr41xx/vr41xx.h148
-rw-r--r--arch/mips/kernel/asm-offsets.c5
-rw-r--r--arch/mips/kernel/cpu-probe.c40
-rw-r--r--arch/mips/kernel/mips-mt.c4
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/vdso.c2
-rw-r--r--arch/mips/kvm/mmu.c14
-rw-r--r--arch/mips/lib/dump_tlb.c8
-rw-r--r--arch/mips/loongson64/numa.c1
-rw-r--r--arch/mips/math-emu/dsemul.c9
-rw-r--r--arch/mips/mm/c-r4k.c44
-rw-r--r--arch/mips/mm/cache.c3
-rw-r--r--arch/mips/mm/context.c5
-rw-r--r--arch/mips/mm/fault.c4
-rw-r--r--arch/mips/mm/pgtable.c2
-rw-r--r--arch/mips/mm/physaddr.c14
-rw-r--r--arch/mips/mm/tlbex.c53
-rw-r--r--arch/mips/pci/Makefile6
-rw-r--r--arch/mips/pci/fixup-capcella.c37
-rw-r--r--arch/mips/pci/fixup-lemote2f.c2
-rw-r--r--arch/mips/pci/fixup-mpc30x.c36
-rw-r--r--arch/mips/pci/fixup-tb0219.c38
-rw-r--r--arch/mips/pci/fixup-tb0226.c73
-rw-r--r--arch/mips/pci/fixup-tb0287.c52
-rw-r--r--arch/mips/pci/msi-octeon.c16
-rw-r--r--arch/mips/pci/ops-vr41xx.c113
-rw-r--r--arch/mips/pci/pci-vr41xx.c309
-rw-r--r--arch/mips/pci/pci-vr41xx.h141
-rw-r--r--arch/mips/sgi-ip22/ip22-gio.c2
-rw-r--r--arch/mips/vr41xx/Kconfig104
-rw-r--r--arch/mips/vr41xx/Makefile5
-rw-r--r--arch/mips/vr41xx/Platform29
-rw-r--r--arch/mips/vr41xx/casio-e55/Makefile6
-rw-r--r--arch/mips/vr41xx/casio-e55/setup.c27
-rw-r--r--arch/mips/vr41xx/common/Makefile6
-rw-r--r--arch/mips/vr41xx/common/bcu.c210
-rw-r--r--arch/mips/vr41xx/common/cmu.c242
-rw-r--r--arch/mips/vr41xx/common/giu.c110
-rw-r--r--arch/mips/vr41xx/common/icu.c714
-rw-r--r--arch/mips/vr41xx/common/init.c60
-rw-r--r--arch/mips/vr41xx/common/irq.c106
-rw-r--r--arch/mips/vr41xx/common/pmu.c123
-rw-r--r--arch/mips/vr41xx/common/rtc.c105
-rw-r--r--arch/mips/vr41xx/common/siu.c142
-rw-r--r--arch/mips/vr41xx/common/type.c11
-rw-r--r--arch/mips/vr41xx/ibm-workpad/Makefile6
-rw-r--r--arch/mips/vr41xx/ibm-workpad/setup.c27
-rw-r--r--arch/nios2/include/asm/entry.h3
-rw-r--r--arch/nios2/include/asm/pgtable.h23
-rw-r--r--arch/nios2/include/asm/ptrace.h2
-rw-r--r--arch/nios2/kernel/entry.S22
-rw-r--r--arch/nios2/kernel/signal.c3
-rw-r--r--arch/nios2/kernel/syscall_table.c1
-rw-r--r--arch/nios2/mm/fault.c4
-rw-r--r--arch/nios2/mm/init.c25
-rw-r--r--arch/nios2/mm/pgtable.c2
-rw-r--r--arch/openrisc/Kconfig5
-rw-r--r--arch/openrisc/configs/virt_defconfig108
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/io.h4
-rw-r--r--arch/openrisc/include/asm/pgtable.h18
-rw-r--r--arch/openrisc/kernel/smp.c6
-rw-r--r--arch/openrisc/mm/fault.c4
-rw-r--r--arch/openrisc/mm/init.c20
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/parisc/Kconfig22
-rw-r--r--arch/parisc/include/asm/bitops.h8
-rw-r--r--arch/parisc/include/asm/dma.h6
-rw-r--r--arch/parisc/include/asm/floppy.h4
-rw-r--r--arch/parisc/include/asm/io.h2
-rw-r--r--arch/parisc/include/asm/pci.h5
-rw-r--r--arch/parisc/include/asm/pgalloc.h6
-rw-r--r--arch/parisc/include/asm/pgtable.h26
-rw-r--r--arch/parisc/kernel/cache.c3
-rw-r--r--arch/parisc/kernel/drivers.c9
-rw-r--r--arch/parisc/kernel/hardware.c11
-rw-r--r--arch/parisc/kernel/head.S43
-rw-r--r--arch/parisc/kernel/irq.c2
-rw-r--r--arch/parisc/kernel/pci-dma.c4
-rw-r--r--arch/parisc/kernel/smp.c7
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/parisc/kernel/unaligned.c2
-rw-r--r--arch/parisc/mm/fault.c6
-rw-r--r--arch/parisc/mm/init.c22
-rw-r--r--arch/powerpc/Kconfig21
-rw-r--r--arch/powerpc/Kconfig.debug4
-rw-r--r--arch/powerpc/Makefile34
-rw-r--r--arch/powerpc/boot/dts/fsl/p2020si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/turris1x.dts483
-rw-r--r--arch/powerpc/configs/44x/akebono_defconfig2
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig2
-rw-r--r--arch/powerpc/configs/44x/fsp2_defconfig2
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig2
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig2
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig2
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig2
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig2
-rw-r--r--arch/powerpc/configs/adder875_defconfig2
-rw-r--r--arch/powerpc/configs/ep8248e_defconfig2
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig2
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config2
-rw-r--r--arch/powerpc/configs/mgcoge_defconfig2
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig2
-rw-r--r--arch/powerpc/configs/mpc8272_ads_defconfig2
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig2
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig2
-rw-r--r--arch/powerpc/configs/pq2fads_defconfig2
-rw-r--r--arch/powerpc/configs/ps3_defconfig2
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig2
-rw-r--r--arch/powerpc/include/asm/archrandom.h16
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h11
-rw-r--r--arch/powerpc/include/asm/atomic.h5
-rw-r--r--arch/powerpc/include/asm/barrier.h2
-rw-r--r--arch/powerpc/include/asm/bitops.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h26
-rw-r--r--arch/powerpc/include/asm/cputable.h3
-rw-r--r--arch/powerpc/include/asm/cputime.h1
-rw-r--r--arch/powerpc/include/asm/dma.h6
-rw-r--r--arch/powerpc/include/asm/firmware.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h23
-rw-r--r--arch/powerpc/include/asm/hw_irq.h77
-rw-r--r--arch/powerpc/include/asm/inst.h19
-rw-r--r--arch/powerpc/include/asm/interrupt.h1
-rw-r--r--arch/powerpc/include/asm/io.h3
-rw-r--r--arch/powerpc/include/asm/irq.h1
-rw-r--r--arch/powerpc/include/asm/kasan.h13
-rw-r--r--arch/powerpc/include/asm/kexec.h1
-rw-r--r--arch/powerpc/include/asm/kprobes.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h16
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/include/asm/mman.h1
-rw-r--r--arch/powerpc/include/asm/mmu.h12
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h4
-rw-r--r--arch/powerpc/include/asm/mpc5xxx.h9
-rw-r--r--arch/powerpc/include/asm/nmi.h2
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h5
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h23
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h4
-rw-r--r--arch/powerpc/include/asm/pci.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h20
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h5
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h15
-rw-r--r--arch/powerpc/include/asm/probes.h4
-rw-r--r--arch/powerpc/include/asm/prom.h13
-rw-r--r--arch/powerpc/include/asm/setup.h6
-rw-r--r--arch/powerpc/include/asm/simple_spinlock.h15
-rw-r--r--arch/powerpc/include/asm/synch.h5
-rw-r--r--arch/powerpc/include/asm/uaccess.h1
-rw-r--r--arch/powerpc/include/asm/uprobes.h2
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h2
-rw-r--r--arch/powerpc/kernel/Makefile11
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cputable.c67
-rw-r--r--arch/powerpc/kernel/dawr.c1
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c4
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/head_64.S3
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S4
-rw-r--r--arch/powerpc/kernel/interrupt.c161
-rw-r--r--arch/powerpc/kernel/iommu.c5
-rw-r--r--arch/powerpc/kernel/irq.c500
-rw-r--r--arch/powerpc/kernel/irq_64.c466
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/mce.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c48
-rw-r--r--arch/powerpc/kernel/pci_32.c27
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/pci_dn.c2
-rw-r--r--arch/powerpc/kernel/prom.c10
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-vsx.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c8
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/signal_64.c9
-rw-r--r--arch/powerpc/kernel/smp.c36
-rw-r--r--arch/powerpc/kernel/syscall.c190
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c38
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/vdso/cacheflush.S1
-rw-r--r--arch/powerpc/kernel/watchdog.c23
-rw-r--r--arch/powerpc/kexec/core.c2
-rw-r--r--arch/powerpc/kexec/crash.c77
-rw-r--r--arch/powerpc/kexec/file_load_64.c55
-rw-r--r--arch/powerpc/kvm/Kconfig21
-rw-r--r--arch/powerpc/kvm/Makefile1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c28
-rw-r--r--arch/powerpc/kvm/book3s_hv.h10
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c25
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c257
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_perf.c219
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S24
-rw-r--r--arch/powerpc/kvm/book3s_xics.h1
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/kvm/trace_hv.h21
-rw-r--r--arch/powerpc/lib/test_emulate_step.c15
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c12
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c1
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c2
-rw-r--r--arch/powerpc/mm/book3s64/pkeys.c1
-rw-r--r--arch/powerpc/mm/book3s64/radix_hugetlbpage.c10
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c35
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c14
-rw-r--r--arch/powerpc/mm/copro_fault.c5
-rw-r--r--arch/powerpc/mm/fault.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/init_32.c37
-rw-r--r--arch/powerpc/mm/kasan/Makefile1
-rw-r--r--arch/powerpc/mm/kasan/init_32.c2
-rw-r--r--arch/powerpc/mm/kasan/init_book3e_64.c133
-rw-r--r--arch/powerpc/mm/kasan/init_book3s_64.c2
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/mmu_decl.h1
-rw-r--r--arch/powerpc/mm/nohash/40x.c9
-rw-r--r--arch/powerpc/mm/nohash/8xx.c13
-rw-r--r--arch/powerpc/mm/nohash/book3e_hugetlbpage.c30
-rw-r--r--arch/powerpc/mm/nohash/tlb_low.S4
-rw-r--r--arch/powerpc/mm/nohash/tlb_low_64e.S147
-rw-r--r--arch/powerpc/mm/pgtable.c24
-rw-r--r--arch/powerpc/mm/pgtable_32.c6
-rw-r--r--arch/powerpc/mm/ptdump/shared.c6
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c72
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c96
-rw-r--r--arch/powerpc/perf/core-book3s.c66
-rw-r--r--arch/powerpc/perf/e500-pmu.c9
-rw-r--r--arch/powerpc/perf/e6500-pmu.c5
-rw-r--r--arch/powerpc/perf/generic-compat-pmu.c12
-rw-r--r--arch/powerpc/perf/hv-24x7.c6
-rw-r--r--arch/powerpc/perf/isa207-common.c3
-rw-r--r--arch/powerpc/perf/isa207-common.h1
-rw-r--r--arch/powerpc/perf/mpc7450-pmu.c5
-rw-r--r--arch/powerpc/perf/power10-pmu.c17
-rw-r--r--arch/powerpc/perf/power5+-pmu.c6
-rw-r--r--arch/powerpc/perf/power5-pmu.c5
-rw-r--r--arch/powerpc/perf/power6-pmu.c5
-rw-r--r--arch/powerpc/perf/power7-pmu.c7
-rw-r--r--arch/powerpc/perf/power8-pmu.c15
-rw-r--r--arch/powerpc/perf/power9-pmu.c14
-rw-r--r--arch/powerpc/perf/ppc970-pmu.c7
-rw-r--r--arch/powerpc/platforms/4xx/cpm.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c37
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c27
-rw-r--r--arch/powerpc/platforms/83xx/misc.c14
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c52
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig18
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype54
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c7
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/powernv/rng.c66
-rw-r--r--arch/powerpc/platforms/powernv/vas-fault.c2
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig13
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c2
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c1
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c97
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c91
-rw-r--r--arch/powerpc/platforms/pseries/papr_platform_attributes.c1
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c4
-rw-r--r--arch/powerpc/platforms/pseries/plpks.c460
-rw-r--r--arch/powerpc/platforms/pseries/plpks.h71
-rw-r--r--arch/powerpc/platforms/pseries/setup.c19
-rw-r--r--arch/powerpc/platforms/pseries/vas.c3
-rw-r--r--arch/powerpc/purgatory/.gitignore1
-rw-r--r--arch/powerpc/purgatory/Makefile8
-rw-r--r--arch/powerpc/purgatory/kexec-purgatory.S14
-rw-r--r--arch/powerpc/sysdev/cpm2.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c9
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h1
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c12
-rw-r--r--arch/powerpc/sysdev/mpc5xxx_clocks.c41
-rw-r--r--arch/powerpc/sysdev/of_rtc.c2
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c1
-rw-r--r--arch/powerpc/xmon/xmon.c4
-rw-r--r--arch/riscv/Kconfig79
-rw-r--r--arch/riscv/Kconfig.erratas13
-rw-r--r--arch/riscv/Kconfig.socs4
-rw-r--r--arch/riscv/Makefile8
-rw-r--r--arch/riscv/boot/dts/canaan/Makefile10
-rw-r--r--arch/riscv/boot/dts/canaan/canaan_kd233.dts6
-rw-r--r--arch/riscv/boot/dts/canaan/k210.dtsi85
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_go.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maixduino.dts2
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts3
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-polarberry.dts3
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs.dtsi32
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi24
-rw-r--r--arch/riscv/boot/dts/sifive/fu740-c000.dtsi24
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts44
-rw-r--r--arch/riscv/boot/dts/starfive/jh7100.dtsi18
-rw-r--r--arch/riscv/configs/32-bit.config2
-rw-r--r--arch/riscv/configs/defconfig65
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig1
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig1
-rw-r--r--arch/riscv/configs/nommu_virt_defconfig1
-rw-r--r--arch/riscv/configs/rv32_defconfig1
-rw-r--r--arch/riscv/errata/thead/errata.c52
-rw-r--r--arch/riscv/include/asm/asm.h15
-rw-r--r--arch/riscv/include/asm/barrier.h2
-rw-r--r--arch/riscv/include/asm/cache.h4
-rw-r--r--arch/riscv/include/asm/cacheflush.h10
-rw-r--r--arch/riscv/include/asm/cpu_ops.h1
-rw-r--r--arch/riscv/include/asm/cpu_ops_sbi.h2
-rw-r--r--arch/riscv/include/asm/csr.h5
-rw-r--r--arch/riscv/include/asm/errata_list.h67
-rw-r--r--arch/riscv/include/asm/hwcap.h32
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_timer.h7
-rw-r--r--arch/riscv/include/asm/page.h1
-rw-r--r--arch/riscv/include/asm/pci.h32
-rw-r--r--arch/riscv/include/asm/pgtable.h20
-rw-r--r--arch/riscv/include/asm/processor.h4
-rw-r--r--arch/riscv/include/asm/sbi.h32
-rw-r--r--arch/riscv/include/asm/signal.h12
-rw-r--r--arch/riscv/include/asm/smp.h4
-rw-r--r--arch/riscv/include/asm/switch_to.h4
-rw-r--r--arch/riscv/include/asm/thread_info.h2
-rw-r--r--arch/riscv/include/asm/vdso/processor.h21
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h1
-rw-r--r--arch/riscv/kernel/alternative.c18
-rw-r--r--arch/riscv/kernel/cpu.c29
-rw-r--r--arch/riscv/kernel/cpu_ops.c5
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c4
-rw-r--r--arch/riscv/kernel/cpu_ops_spinwait.c6
-rw-r--r--arch/riscv/kernel/cpufeature.c74
-rw-r--r--arch/riscv/kernel/crash_save_regs.S2
-rw-r--r--arch/riscv/kernel/machine_kexec.c28
-rw-r--r--arch/riscv/kernel/probes/uprobes.c6
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/signal.c1
-rw-r--r--arch/riscv/kernel/smp.c10
-rw-r--r--arch/riscv/kernel/smpboot.c9
-rw-r--r--arch/riscv/kernel/sys_riscv.c5
-rw-r--r--arch/riscv/kernel/traps.c7
-rw-r--r--arch/riscv/kernel/traps_misaligned.c8
-rw-r--r--arch/riscv/kvm/mmu.c4
-rw-r--r--arch/riscv/kvm/vcpu.c8
-rw-r--r--arch/riscv/kvm/vcpu_timer.c144
-rw-r--r--arch/riscv/lib/uaccess.S4
-rw-r--r--arch/riscv/mm/Makefile1
-rw-r--r--arch/riscv/mm/dma-noncoherent.c116
-rw-r--r--arch/riscv/mm/fault.c4
-rw-r--r--arch/riscv/mm/init.c24
-rw-r--r--arch/riscv/purgatory/.gitignore1
-rw-r--r--arch/riscv/purgatory/Makefile10
-rw-r--r--arch/riscv/purgatory/kexec-purgatory.S14
-rw-r--r--arch/s390/boot/startup.c10
-rw-r--r--arch/s390/boot/uv.c5
-rw-r--r--arch/s390/boot/uv.h7
-rw-r--r--arch/s390/crypto/aes_s390.c2
-rw-r--r--arch/s390/crypto/chacha-glue.c2
-rw-r--r--arch/s390/crypto/crc32-vx.c2
-rw-r--r--arch/s390/crypto/des_s390.c2
-rw-r--r--arch/s390/crypto/ghash_s390.c2
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/crypto/sha3_256_s390.c2
-rw-r--r--arch/s390/crypto/sha3_512_s390.c2
-rw-r--r--arch/s390/crypto/sha512_s390.c2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c2
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/ap.h6
-rw-r--r--arch/s390/include/asm/bitops.h63
-rw-r--r--arch/s390/include/asm/cpufeature.h23
-rw-r--r--arch/s390/include/asm/dma.h6
-rw-r--r--arch/s390/include/asm/mmu.h14
-rw-r--r--arch/s390/include/asm/os_info.h17
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/pgtable.h17
-rw-r--r--arch/s390/include/asm/sclp.h4
-rw-r--r--arch/s390/include/asm/softirq_stack.h3
-rw-r--r--arch/s390/include/asm/uaccess.h1
-rw-r--r--arch/s390/include/asm/unwind.h2
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/cpufeature.c46
-rw-r--r--arch/s390/kernel/crash_dump.c116
-rw-r--r--arch/s390/kernel/nmi.c8
-rw-r--r--arch/s390/kernel/process.c22
-rw-r--r--arch/s390/kernel/processor.c10
-rw-r--r--arch/s390/kernel/setup.c13
-rw-r--r--arch/s390/mm/fault.c16
-rw-r--r--arch/s390/mm/maccess.c26
-rw-r--r--arch/s390/mm/mmap.c20
-rw-r--r--arch/s390/pci/pci_bus.c82
-rw-r--r--arch/sh/include/asm/bitops-op32.h40
-rw-r--r--arch/sh/include/asm/dma.h6
-rw-r--r--arch/sh/include/asm/pci.h6
-rw-r--r--arch/sh/include/asm/pgtable.h17
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/mm/fault.c4
-rw-r--r--arch/sh/mm/mmap.c20
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/bitops_32.h18
-rw-r--r--arch/sparc/include/asm/dma.h8
-rw-r--r--arch/sparc/include/asm/pci.h10
-rw-r--r--arch/sparc/include/asm/pgtable_32.h19
-rw-r--r--arch/sparc/include/asm/pgtable_64.h19
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/pci.c149
-rw-r--r--arch/sparc/kernel/smp_32.c5
-rw-r--r--arch/sparc/kernel/smp_64.c6
-rw-r--r--arch/sparc/lib/atomic32.c12
-rw-r--r--arch/sparc/mm/fault_32.c4
-rw-r--r--arch/sparc/mm/fault_64.c5
-rw-r--r--arch/sparc/mm/init_32.c20
-rw-r--r--arch/sparc/mm/init_64.c3
-rw-r--r--arch/um/Kconfig17
-rw-r--r--arch/um/drivers/Kconfig54
-rw-r--r--arch/um/drivers/random.c2
-rw-r--r--arch/um/drivers/virtio_uml.c1
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/um/include/asm/cpufeature.h15
-rw-r--r--arch/um/include/asm/kasan.h37
-rw-r--r--arch/um/include/asm/pci.h24
-rw-r--r--arch/um/include/asm/pgtable.h17
-rw-r--r--arch/um/include/asm/processor-generic.h5
-rw-r--r--arch/um/include/asm/xor.h2
-rw-r--r--arch/um/include/shared/user.h3
-rw-r--r--arch/um/kernel/dyn.lds.S6
-rw-r--r--arch/um/kernel/mem.c39
-rw-r--r--arch/um/kernel/stacktrace.c2
-rw-r--r--arch/um/kernel/trap.c4
-rw-r--r--arch/um/kernel/uml.lds.S1
-rw-r--r--arch/um/os-Linux/mem.c22
-rw-r--r--arch/um/os-Linux/skas/process.c17
-rw-r--r--arch/um/os-Linux/umid.c3
-rw-r--r--arch/um/os-Linux/user_syms.c4
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile4
-rw-r--r--arch/x86/boot/compressed/misc.h12
-rw-r--r--arch/x86/boot/compressed/sev.c8
-rw-r--r--arch/x86/configs/xen.config1
-rw-r--r--arch/x86/entry/Makefile3
-rw-r--r--arch/x86/entry/entry_64_compat.S2
-rw-r--r--arch/x86/entry/thunk_32.S2
-rw-r--r--arch/x86/entry/thunk_64.S4
-rw-r--r--arch/x86/entry/vdso/Makefile2
-rw-r--r--arch/x86/events/intel/core.c4
-rw-r--r--arch/x86/events/intel/ds.c13
-rw-r--r--arch/x86/events/intel/lbr.c8
-rw-r--r--arch/x86/events/intel/uncore_snb.c18
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/bitops.h43
-rw-r--r--arch/x86/include/asm/cpufeature.h15
-rw-r--r--arch/x86/include/asm/cpufeatures.h5
-rw-r--r--arch/x86/include/asm/dma.h8
-rw-r--r--arch/x86/include/asm/extable_fixup_types.h2
-rw-r--r--arch/x86/include/asm/ibt.h11
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/io.h9
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/mem_encrypt.h2
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/include/asm/nospec-branch.h77
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/pgtable_types.h19
-rw-r--r--arch/x86/include/asm/rmwcc.h6
-rw-r--r--arch/x86/include/asm/sev.h2
-rw-r--r--arch/x86/include/asm/sgx.h8
-rw-r--r--arch/x86/include/asm/word-at-a-time.h46
-rw-r--r--arch/x86/include/asm/xen/cpuid.h2
-rw-r--r--arch/x86/include/asm/xen/events.h3
-rw-r--r--arch/x86/include/uapi/asm/sgx.h62
-rw-r--r--arch/x86/kernel/apic/apic.c5
-rw-r--r--arch/x86/kernel/cpu/bugs.c110
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c6
-rw-r--r--arch/x86/kernel/cpu/common.c50
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c27
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c330
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.h16
-rw-r--r--arch/x86/kernel/cpu/sgx/encls.h33
-rw-r--r--arch/x86/kernel/cpu/sgx/ioctl.c641
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c75
-rw-r--r--arch/x86/kernel/cpu/sgx/sgx.h3
-rw-r--r--arch/x86/kernel/ftrace.c1
-rw-r--r--arch/x86/kernel/kprobes/core.c20
-rw-r--r--arch/x86/kernel/sev.c18
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/tboot.c15
-rw-r--r--arch/x86/kernel/unwind_orc.c15
-rw-r--r--arch/x86/kvm/emulate.c34
-rw-r--r--arch/x86/kvm/lapic.c8
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/mmu/mmu.c24
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h4
-rw-r--r--arch/x86/kvm/mmu/spte.c28
-rw-r--r--arch/x86/kvm/mmu/spte.h17
-rw-r--r--arch/x86/kvm/svm/sev.c10
-rw-r--r--arch/x86/kvm/svm/svm.c9
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c12
-rw-r--r--arch/x86/kvm/vmx/vmenter.S8
-rw-r--r--arch/x86/kvm/vmx/vmx.h29
-rw-r--r--arch/x86/kvm/x86.c14
-rw-r--r--arch/x86/kvm/xen.c31
-rw-r--r--arch/x86/mm/extable.c55
-rw-r--r--arch/x86/mm/fault.c4
-rw-r--r--arch/x86/mm/hugetlbpage.c47
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c6
-rw-r--r--arch/x86/mm/numa.c4
-rw-r--r--arch/x86/mm/pat/memtype.c10
-rw-r--r--arch/x86/mm/pgprot.c28
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c2
-rw-r--r--arch/x86/um/Kconfig10
-rw-r--r--arch/x86/um/Makefile4
-rw-r--r--arch/x86/um/mem_32.c2
-rw-r--r--arch/x86/um/shared/sysdep/stub_64.h1
-rw-r--r--arch/x86/um/sysrq_64.c4
-rw-r--r--arch/x86/um/vdso/Makefile3
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/enlighten_hvm.c24
-rw-r--r--arch/x86/xen/suspend_hvm.c10
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/dma.h7
-rw-r--r--arch/xtensa/include/asm/io.h3
-rw-r--r--arch/xtensa/include/asm/pci.h3
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h19
-rw-r--r--arch/xtensa/mm/fault.c4
-rw-r--r--arch/xtensa/mm/init.c22
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/bio.c54
-rw-r--r--block/blk-core.c9
-rw-r--r--block/blk-map.c7
-rw-r--r--block/blk-merge.c185
-rw-r--r--block/blk-mq.c33
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk.h47
-rw-r--r--block/bounce.c26
-rw-r--r--block/bsg.c4
-rw-r--r--block/fops.c6
-rw-r--r--block/genhd.c12
-rw-r--r--certs/Makefile14
-rw-r--r--certs/blacklist_hashes.c1
-rw-r--r--certs/blacklist_nohashes.c6
-rwxr-xr-xcerts/check-blacklist-hashes.awk (renamed from scripts/check-blacklist-hashes.awk)0
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/af_alg.c3
-rw-r--r--crypto/algif_hash.c5
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c18
-rw-r--r--crypto/asymmetric_keys/public_key.c7
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c3
-rw-r--r--crypto/kpp.c6
-rw-r--r--crypto/shash.c6
-rw-r--r--drivers/accessibility/braille/braille_console.c2
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/accessibility/speakup/serialio.h3
-rw-r--r--drivers/acpi/arm64/iort.c360
-rw-r--r--drivers/acpi/pci_mcfg.c13
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/property.c463
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/utils.c38
-rw-r--r--drivers/acpi/viot.c6
-rw-r--r--drivers/android/binder_alloc.c68
-rw-r--r--drivers/android/binder_alloc.h2
-rw-r--r--drivers/android/binder_alloc_selftest.c2
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-scsi.c1
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/atm/idt77252.c1
-rw-r--r--drivers/block/Kconfig9
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c49
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/null_blk/main.c108
-rw-r--r--drivers/block/null_blk/null_blk.h2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rbd.c6
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c201
-rw-r--r--drivers/block/rnbd/rnbd-clt.h18
-rw-r--r--drivers/block/rnbd/rnbd-srv.c20
-rw-r--r--drivers/block/rnbd/rnbd-srv.h4
-rw-r--r--drivers/block/sx8.c1582
-rw-r--r--drivers/block/ublk_drv.c381
-rw-r--r--drivers/block/virtio_blk.c24
-rw-r--r--drivers/block/xen-blkback/xenbus.c20
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/zram/zcomp.c11
-rw-r--r--drivers/block/zram/zram_drv.c48
-rw-r--r--drivers/block/zram/zram_drv.h1
-rw-r--r--drivers/char/agp/intel-gtt.c17
-rw-r--r--drivers/char/hw_random/powernv-rng.c2
-rw-r--r--drivers/char/hw_random/s390-trng.c2
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm.h1
-rw-r--r--drivers/char/tpm/tpm1-cmd.c7
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.c14
-rw-r--r--drivers/char/tpm/tpm_tis_core.h10
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c390
-rw-r--r--drivers/clk/clk-devres.c91
-rw-r--r--drivers/clk/clk-fixed-factor.c56
-rw-r--r--drivers/clk/clk.c48
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c36
-rw-r--r--drivers/clk/imx/clk-imx93.c6
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-g3d.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c22
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c22
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c10
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c12
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c22
-rw-r--r--drivers/clk/mediatek/clk-mt7629-eth.c10
-rw-r--r--drivers/clk/mediatek/clk-mt7629-hif.c12
-rw-r--r--drivers/clk/mediatek/clk-mt8135.c22
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c22
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c18
-rw-r--r--drivers/clk/mediatek/clk-mt8186-infra_ao.c23
-rw-r--r--drivers/clk/mediatek/clk-mt8192-msdc.c21
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c29
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c24
-rw-r--r--drivers/clk/mediatek/clk-mtk.c7
-rw-r--r--drivers/clk/mediatek/clk-mtk.h9
-rw-r--r--drivers/clk/mediatek/reset.c198
-rw-r--r--drivers/clk/mediatek/reset.h82
-rw-r--r--drivers/clk/meson/axg-audio.c36
-rw-r--r--drivers/clk/qcom/Kconfig22
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c4
-rw-r--r--drivers/clk/qcom/camcc-sm8250.c16
-rw-r--r--drivers/clk/qcom/camcc-sm8450.c2856
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c144
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h11
-rw-r--r--drivers/clk/qcom/clk-hfpll.c15
-rw-r--r--drivers/clk/qcom/clk-krait.c23
-rw-r--r--drivers/clk/qcom/clk-krait.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c16
-rw-r--r--drivers/clk/qcom/clk-regmap-phy-mux.c62
-rw-r--r--drivers/clk/qcom/clk-regmap-phy-mux.h33
-rw-r--r--drivers/clk/qcom/clk-rpm.c24
-rw-r--r--drivers/clk/qcom/clk-rpmh.c5
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c64
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c104
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c35
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c47
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c6
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c8
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c49
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c142
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c2
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c49
-rw-r--r--drivers/clk/qcom/gdsc.c36
-rw-r--r--drivers/clk/qcom/gdsc.h4
-rw-r--r--drivers/clk/qcom/gpucc-sm8350.c637
-rw-r--r--drivers/clk/qcom/krait-cc.c8
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c1052
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c4
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c22
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c20
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c31
-rw-r--r--drivers/clk/renesas/clk-r8a7779.c27
-rw-r--r--drivers/clk/renesas/clk-rz.c33
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c26
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c10
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c32
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c32
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c17
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c5
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c2
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c15
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c77
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c113
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c80
-rw-r--r--drivers/clk/sunxi/Kconfig4
-rw-r--r--drivers/clk/ti/clk-44xx.c210
-rw-r--r--drivers/clk/ti/clk-54xx.c160
-rw-r--r--drivers/clk/ti/clkctrl.c4
-rw-r--r--drivers/clk/x86/Makefile4
-rw-r--r--drivers/clocksource/timer-riscv.c40
-rw-r--r--drivers/comedi/drivers/comedi_isadma.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c19
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c12
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c14
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c109
-rw-r--r--drivers/cpufreq/sti-cpufreq.c27
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c31
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c4
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c12
-rw-r--r--drivers/cpufreq/ti-cpufreq.c42
-rw-r--r--drivers/cpuidle/cpuidle.c6
-rw-r--r--drivers/cxl/Kconfig9
-rw-r--r--drivers/cxl/acpi.c243
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/core.h51
-rw-r--r--drivers/cxl/core/hdm.c691
-rw-r--r--drivers/cxl/core/mbox.c95
-rw-r--r--drivers/cxl/core/memdev.c4
-rw-r--r--drivers/cxl/core/pci.c181
-rw-r--r--drivers/cxl/core/pmem.c4
-rw-r--r--drivers/cxl/core/port.c738
-rw-r--r--drivers/cxl/core/region.c1896
-rw-r--r--drivers/cxl/cxl.h312
-rw-r--r--drivers/cxl/cxlmem.h42
-rw-r--r--drivers/cxl/cxlpci.h1
-rw-r--r--drivers/cxl/mem.c49
-rw-r--r--drivers/cxl/pci.c46
-rw-r--r--drivers/cxl/pmem.c259
-rw-r--r--drivers/cxl/port.c53
-rw-r--r--drivers/dax/super.c67
-rw-r--r--drivers/devfreq/exynos-bus.c21
-rw-r--r--drivers/devfreq/tegra30-devfreq.c22
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/apple-admac.c818
-rw-r--r--drivers/dma/at_xdmac.c2
-rw-r--r--drivers/dma/dma-axi-dmac.c16
-rw-r--r--drivers/dma/dma-jz4780.c2
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmatest.c45
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c11
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c141
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h31
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c83
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c49
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.h4
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c18
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.h8
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c3
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/fsl-edma-common.c3
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c38
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c2
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mv_xor_v2.c2
-rw-r--r--drivers/dma/owl-dma.c2
-rw-r--r--drivers/dma/s3c24xx-dma.c2
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c44
-rw-r--r--drivers/dma/sh/rz-dmac.c17
-rw-r--r--drivers/dma/sprd-dma.c5
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/stm32-mdma.c5
-rw-r--r--drivers/dma/sun4i-dma.c32
-rw-r--r--drivers/dma/tegra186-gpc-dma.c26
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c122
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c6
-rw-r--r--drivers/edac/Kconfig1
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/pnd2_edac.c62
-rw-r--r--drivers/edac/ppc4xx_edac.c1
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c107
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c13
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c36
-rw-r--r--drivers/gpio/Kconfig22
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c249
-rw-r--r--drivers/gpio/gpio-104-idi-48.c157
-rw-r--r--drivers/gpio/gpio-104-idio-16.c60
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c19
-rw-r--r--drivers/gpio/gpio-adnp.c19
-rw-r--r--drivers/gpio/gpio-adp5588.c26
-rw-r--r--drivers/gpio/gpio-brcmstb.c9
-rw-r--r--drivers/gpio/gpio-davinci.c83
-rw-r--r--drivers/gpio/gpio-gpio-mm.c202
-rw-r--r--drivers/gpio/gpio-i8255.c287
-rw-r--r--drivers/gpio/gpio-i8255.h46
-rw-r--r--drivers/gpio/gpio-lp3943.c16
-rw-r--r--drivers/gpio/gpio-pca9570.c2
-rw-r--r--drivers/gpio/gpio-pch.c43
-rw-r--r--drivers/gpio/gpio-rockchip.c3
-rw-r--r--drivers/gpio/gpio-twl4030.c18
-rw-r--r--drivers/gpio/gpio-ucb1400.c20
-rw-r--r--drivers/gpio/gpio-vr41xx.c541
-rw-r--r--drivers/gpio/gpio-ws16c48.c120
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c3
-rw-r--r--drivers/gpio/gpiolib-cdev.c291
-rw-r--r--drivers/gpio/gpiolib-devres.c32
-rw-r--r--drivers/gpio/gpiolib-of.c13
-rw-r--r--drivers/gpio/gpiolib.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c342
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c376
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c812
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h106
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c15
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c14
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c2
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c21
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c1
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_prime.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c77
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c120
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c33
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h1
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c5
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c2
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.c12
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c5
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c22
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c18
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c164
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c38
-rw-r--r--drivers/hv/connection.c11
-rw-r--r--drivers/hv/hv_balloon.c135
-rw-r--r--drivers/hv/hyperv_vmbus.h7
-rw-r--r--drivers/hv/vmbus_drv.c27
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/nct6775-core.c3
-rw-r--r--drivers/hwmon/nct6775-platform.c2
-rw-r--r--drivers/hwmon/nct6775.h2
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c6
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h8
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-altera.c2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c2
-rw-r--r--drivers/i2c/busses/i2c-au1550.c2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c2
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c43
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c2
-rw-r--r--drivers/i2c/busses/i2c-icy.c2
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c20
-rw-r--r--drivers/i2c/busses/i2c-kempld.c1
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c2
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c7
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c45
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-opal.c4
-rw-r--r--drivers/i2c/busses/i2c-parport.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c7
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-riic.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-scmi.c9
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-simtec.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c2
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-wmt.c2
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/i2c/i2c-smbus.c2
-rw-r--r--drivers/infiniband/Kconfig15
-rw-r--r--drivers/infiniband/core/cma.c230
-rw-r--r--drivers/infiniband/core/cma_priv.h1
-rw-r--r--drivers/infiniband/core/rdma_core.c2
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c2
-rw-r--r--drivers/infiniband/core/rw.c45
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c25
-rw-r--r--drivers/infiniband/hw/erdma/Kconfig12
-rw-r--r--drivers/infiniband/hw/erdma/Makefile4
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h287
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c1430
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.h167
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c493
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c205
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c329
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h508
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c608
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c566
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c1460
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h342
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig2
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c4
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c2
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c248
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h13
-rw-r--r--drivers/infiniband/hw/irdma/cm.c11
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c8
-rw-r--r--drivers/infiniband/hw/irdma/hw.c33
-rw-r--r--drivers/infiniband/hw/irdma/main.h2
-rw-r--r--drivers/infiniband/hw/irdma/utils.c1
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c16
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c165
-rw-r--r--drivers/infiniband/hw/mlx5/main.c38
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h79
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c514
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c78
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c8
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c23
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c49
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c213
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c106
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c36
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c137
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c236
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c78
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h27
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c14
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c50
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h21
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c32
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c32
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h15
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c156
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h18
-rw-r--r--drivers/input/input-core-private.h16
-rw-r--r--drivers/input/input-mt.c48
-rw-r--r--drivers/input/input.c149
-rw-r--r--drivers/input/joystick/adc-joystick.c15
-rw-r--r--drivers/input/joystick/sensehat-joystick.c4
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c206
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c89
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c18
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c98
-rw-r--r--drivers/input/keyboard/omap4-keypad.c26
-rw-r--r--drivers/input/misc/iqs7222.c178
-rw-r--r--drivers/input/mouse/cyapa_gen6.c2
-rw-r--r--drivers/input/mouse/gpio_mouse.c2
-rw-r--r--drivers/input/serio/gscps2.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h1270
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c96
-rw-r--r--drivers/input/touchscreen/exc3000.c7
-rw-r--r--drivers/input/touchscreen/goodix.c22
-rw-r--r--drivers/input/touchscreen/zinitix.c112
-rw-r--r--drivers/iommu/Kconfig10
-rw-r--r--drivers/iommu/amd/amd_iommu.h18
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h186
-rw-r--r--drivers/iommu/amd/init.c942
-rw-r--r--drivers/iommu/amd/io_pgtable.c6
-rw-r--r--drivers/iommu/amd/iommu.c585
-rw-r--r--drivers/iommu/amd/iommu_v2.c67
-rw-r--r--drivers/iommu/amd/quirks.c4
-rw-r--r--drivers/iommu/apple-dart.c4
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c144
-rw-r--r--drivers/iommu/arm/arm-smmu/Makefile1
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c142
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c34
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h28
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c73
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h1
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c18
-rw-r--r--drivers/iommu/dma-iommu.c124
-rw-r--r--drivers/iommu/exynos-iommu.c182
-rw-r--r--drivers/iommu/fsl_pamu_domain.c5
-rw-r--r--drivers/iommu/hyperv-iommu.c4
-rw-r--r--drivers/iommu/intel/cap_audit.c2
-rw-r--r--drivers/iommu/intel/debugfs.c51
-rw-r--r--drivers/iommu/intel/dmar.c41
-rw-r--r--drivers/iommu/intel/iommu.c447
-rw-r--r--drivers/iommu/intel/iommu.h (renamed from include/linux/intel-iommu.h)35
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/intel/pasid.c107
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/perf.c2
-rw-r--r--drivers/iommu/intel/svm.c11
-rw-r--r--drivers/iommu/intel/trace.c2
-rw-r--r--drivers/iommu/intel/trace.h (renamed from include/trace/events/intel_iommu.h)7
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c75
-rw-r--r--drivers/iommu/iommu.c59
-rw-r--r--drivers/iommu/iova.c12
-rw-r--r--drivers/iommu/msm_iommu.c7
-rw-r--r--drivers/iommu/mtk_iommu.c71
-rw-r--r--drivers/iommu/mtk_iommu_v1.c5
-rw-r--r--drivers/iommu/sprd-iommu.c11
-rw-r--r--drivers/iommu/sun50i-iommu.c3
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c3
-rw-r--r--drivers/iommu/virtio-iommu.c31
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c2
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c13
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c40
-rw-r--r--drivers/irqchip/irq-riscv-intc.c7
-rw-r--r--drivers/irqchip/irq-sifive-plic.c7
-rw-r--r--drivers/irqchip/irq-tegra.c10
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/blink/Kconfig14
-rw-r--r--drivers/leds/blink/Makefile1
-rw-r--r--drivers/leds/blink/leds-bcm63138.c307
-rw-r--r--drivers/leds/leds-is31fl319x.c529
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c8
-rw-r--r--drivers/leds/simple/Kconfig6
-rw-r--r--drivers/leds/simple/Makefile1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.c105
-rw-r--r--drivers/leds/simple/simatic-ipc-leds.c80
-rw-r--r--drivers/macintosh/adb.c2
-rw-r--r--drivers/mailbox/imx-mailbox.c40
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c11
-rw-r--r--drivers/md/bcache/Kconfig2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/dm-bufio.c45
-rw-r--r--drivers/md/dm-ebs-target.c3
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/dm-raid.c1
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-target.c172
-rw-r--r--drivers/md/dm-verity.h6
-rw-r--r--drivers/md/dm-writecache.c3
-rw-r--r--drivers/md/dm-zoned-metadata.c4
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/md-autodetect.c21
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md.c424
-rw-r--r--drivers/md/md.h19
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c3
-rw-r--r--drivers/md/raid10.c18
-rw-r--r--drivers/md/raid5-cache.c40
-rw-r--r--drivers/md/raid5-log.h77
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c729
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/i2c/tda1997x.c1
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c10
-rw-r--r--drivers/memory/tegra/tegra124-emc.c11
-rw-r--r--drivers/memstick/core/ms_block.c15
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/mfd/Kconfig6
-rw-r--r--drivers/mfd/asic3.c9
-rw-r--r--drivers/mfd/axp20x.c9
-rw-r--r--drivers/mfd/cros_ec_dev.c9
-rw-r--r--drivers/mfd/db8500-prcmu.c2
-rw-r--r--drivers/mfd/dln2.c17
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c194
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c27
-rw-r--r--drivers/mfd/lpc_ich.c161
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/max77714.c4
-rw-r--r--drivers/mfd/mt6358-irq.c24
-rw-r--r--drivers/mfd/mt6397-core.c91
-rw-r--r--drivers/mfd/mt6397-irq.c9
-rw-r--r--drivers/mfd/qcom-pm8008.c53
-rw-r--r--drivers/mfd/syscon.c3
-rw-r--r--drivers/mfd/t7l66xb.c6
-rw-r--r--drivers/mfd/tc6393xb.c5
-rw-r--r--drivers/mfd/twl-core.c323
-rw-r--r--drivers/mfd/ucb1400_core.c6
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mmc/core/block.c32
-rw-r--r--drivers/mmc/core/bus.c4
-rw-r--r--drivers/mmc/core/core.c10
-rw-r--r--drivers/mmc/core/debugfs.c80
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/quirks.h4
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/core/sdio.c30
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/cavium-octeon.c1
-rw-r--r--drivers/mmc/host/cavium-thunderx.c4
-rw-r--r--drivers/mmc/host/cqhci-core.c9
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c4
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c4
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c4
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c6
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c94
-rw-r--r--drivers/mmc/host/mxcmmc.c4
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/renesas_sdhi.h1
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c42
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c11
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c76
-rw-r--r--drivers/mmc/host/sdhci-msm.c29
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c5
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c9
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c209
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c7
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c34
-rw-r--r--drivers/mmc/host/sdhci-st.c5
-rw-r--r--drivers/mmc/host/sdhci.c59
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.h6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c28
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c8
-rw-r--r--drivers/mtd/devices/powernv_flash.c4
-rw-r--r--drivers/mtd/devices/spear_smi.c10
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c23
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c6
-rw-r--r--drivers/mtd/hyperbus/hyperbus-core.c8
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c13
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c4
-rw-r--r--drivers/mtd/maps/physmap-core.c13
-rw-r--r--drivers/mtd/maps/physmap-versatile.c2
-rw-r--r--drivers/mtd/mtdchar.c13
-rw-r--r--drivers/mtd/mtdcore.c63
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c16
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c9
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c17
-rw-r--r--drivers/mtd/nand/raw/omap2.c6
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c306
-rw-r--r--drivers/mtd/nand/raw/sm_common.c2
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c5
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/ato.c86
-rw-r--r--drivers/mtd/nand/spi/core.c1
-rw-r--r--drivers/mtd/parsers/Kconfig9
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/ofpart_bcm4908.c3
-rw-r--r--drivers/mtd/parsers/redboot.c1
-rw-r--r--drivers/mtd/parsers/scpart.c249
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c8
-rw-r--r--drivers/mtd/spi-nor/core.c70
-rw-r--r--drivers/mtd/spi-nor/core.h21
-rw-r--r--drivers/mtd/spi-nor/debugfs.c2
-rw-r--r--drivers/mtd/spi-nor/esmt.c2
-rw-r--r--drivers/mtd/spi-nor/issi.c31
-rw-r--r--drivers/mtd/spi-nor/micron-st.c12
-rw-r--r--drivers/mtd/spi-nor/otp.c12
-rw-r--r--drivers/mtd/spi-nor/sfdp.c34
-rw-r--r--drivers/mtd/spi-nor/spansion.c185
-rw-r--r--drivers/mtd/spi-nor/xilinx.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c41
-rw-r--r--drivers/net/bonding/bond_alb.c10
-rw-r--r--drivers/net/bonding/bond_main.c47
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c18
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c53
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c3
-rw-r--r--drivers/net/dsa/ocelot/felix.c3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c573
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c553
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c8
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/freescale/fec.h10
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c42
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c35
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c59
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h18
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c8
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c42
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c62
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c55
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c468
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c95
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig6
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h2
-rw-r--r--drivers/net/geneve.c15
-rw-r--r--drivers/net/ipa/ipa_mem.c2
-rw-r--r--drivers/net/ipa/ipa_reg.h2
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/macsec.c69
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/phy-c45.c34
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/tap.c20
-rw-r--r--drivers/net/usb/ax88179_178a.c26
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c27
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c292
-rw-r--r--drivers/net/vxlan/vxlan_core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h7
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h14
-rw-r--r--drivers/net/wireless/ath/trace.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h12
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h3
-rw-r--r--drivers/nfc/pn533/uart.c1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c48
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c6
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c12
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h7
-rw-r--r--drivers/ntb/test/ntb_tool.c8
-rw-r--r--drivers/nvdimm/pmem.c17
-rw-r--r--drivers/nvdimm/region_devs.c28
-rw-r--r--drivers/nvdimm/virtio_pmem.c9
-rw-r--r--drivers/nvme/Kconfig1
-rw-r--r--drivers/nvme/Makefile1
-rw-r--r--drivers/nvme/common/Kconfig4
-rw-r--r--drivers/nvme/common/Makefile7
-rw-r--r--drivers/nvme/common/auth.c483
-rw-r--r--drivers/nvme/host/Kconfig15
-rw-r--r--drivers/nvme/host/Makefile4
-rw-r--r--drivers/nvme/host/apple.c28
-rw-r--r--drivers/nvme/host/auth.c1017
-rw-r--r--drivers/nvme/host/constants.c3
-rw-r--r--drivers/nvme/host/core.c493
-rw-r--r--drivers/nvme/host/fabrics.c102
-rw-r--r--drivers/nvme/host/fabrics.h7
-rw-r--r--drivers/nvme/host/fc.c5
-rw-r--r--drivers/nvme/host/multipath.c9
-rw-r--r--drivers/nvme/host/nvme.h41
-rw-r--r--drivers/nvme/host/pci.c228
-rw-r--r--drivers/nvme/host/rdma.c106
-rw-r--r--drivers/nvme/host/tcp.c98
-rw-r--r--drivers/nvme/host/trace.c32
-rw-r--r--drivers/nvme/host/trace.h2
-rw-r--r--drivers/nvme/target/Kconfig15
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/nvme/target/auth.c525
-rw-r--r--drivers/nvme/target/configfs.c136
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c544
-rw-r--r--drivers/nvme/target/fabrics-cmd.c55
-rw-r--r--drivers/nvme/target/loop.c8
-rw-r--r--drivers/nvme/target/nvmet.h75
-rw-r--r--drivers/nvme/target/rdma.c2
-rw-r--r--drivers/nvme/target/tcp.c3
-rw-r--r--drivers/of/address.c17
-rw-r--r--drivers/of/base.c2
-rw-r--r--drivers/of/device.c5
-rw-r--r--drivers/of/fdt.c25
-rw-r--r--drivers/of/kexec.c17
-rw-r--r--drivers/of/of_reserved_mem.c3
-rw-r--r--drivers/of/overlay.c20
-rw-r--r--drivers/of/unittest.c17
-rw-r--r--drivers/opp/core.c1571
-rw-r--r--drivers/opp/cpu.c12
-rw-r--r--drivers/opp/debugfs.c27
-rw-r--r--drivers/opp/of.c150
-rw-r--r--drivers/opp/opp.h56
-rw-r--r--drivers/opp/ti-opp-supply.c77
-rw-r--r--drivers/parisc/ccio-dma.c13
-rw-r--r--drivers/parisc/lba_pci.c6
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--drivers/pci/Kconfig8
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c6
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c22
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c19
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c670
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c34
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c12
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c2
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c92
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c404
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c25
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c472
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h178
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c36
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c431
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194-acpi.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c684
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c6
-rw-r--r--drivers/pci/controller/pci-aardvark.c112
-rw-r--r--drivers/pci/controller/pci-loongson.c206
-rw-r--r--drivers/pci/controller/pci-mvebu.c4
-rw-r--r--drivers/pci/controller/pci-rcar-gen2.c1
-rw-r--r--drivers/pci/controller/pci-tegra.c9
-rw-r--r--drivers/pci/controller/pci-xgene.c2
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c443
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c62
-rw-r--r--drivers/pci/controller/pcie-mediatek.c8
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c4
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c8
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c60
-rw-r--r--drivers/pci/controller/vmd.c13
-rw-r--r--drivers/pci/doe.c536
-rw-r--r--drivers/pci/endpoint/functions/Kconfig12
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c117
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c1442
-rw-r--r--drivers/pci/mmap.c44
-rw-r--r--drivers/pci/p2pdma.c93
-rw-r--r--drivers/pci/pci-acpi.c5
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer.c15
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/pci/pcie/err.c12
-rw-r--r--drivers/pci/pcie/portdrv_core.c9
-rw-r--r--drivers/pci/probe.c92
-rw-r--r--drivers/pci/proc.c7
-rw-r--r--drivers/pci/quirks.c24
-rw-r--r--drivers/pci/switch/switchtec.c7
-rw-r--r--drivers/perf/riscv_pmu.c1
-rw-r--r--drivers/perf/riscv_pmu_legacy.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c30
-rw-r--r--drivers/phy/samsung/phy-exynos-pcie.c25
-rw-r--r--drivers/pinctrl/Kconfig2
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c21
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx93.c1
-rw-r--r--drivers/pinctrl/intel/Kconfig8
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c18
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c16
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c28
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h25
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c14
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c417
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c296
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c10
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c242
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1376
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c7
-rw-r--r--drivers/pinctrl/pinctrl-at91.c10
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c14
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c64
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c4
-rw-r--r--drivers/pinctrl/pinctrl-starfive.c5
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c11
-rw-r--r--drivers/pinctrl/qcom/Kconfig19
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c956
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c1544
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c3
-rw-r--r--drivers/pinctrl/renesas/Kconfig18
-rw-r--r--drivers/pinctrl/renesas/Makefile2
-rw-r--r--drivers/pinctrl/renesas/core.c6
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779f0.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c4262
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c1119
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h9
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h8
-rw-r--r--drivers/pinctrl/sunxi/Kconfig8
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c840
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c22
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c25
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c156
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h109
-rw-r--r--drivers/platform/Kconfig5
-rw-r--r--drivers/platform/chrome/Kconfig11
-rw-r--r--drivers/platform/chrome/Makefile5
-rw-r--r--drivers/platform/chrome/cros_ec.c11
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c473
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test.c2753
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h8
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c93
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c196
-rw-r--r--drivers/platform/chrome/cros_kunit_util.c130
-rw-r--r--drivers/platform/chrome/cros_kunit_util.h48
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c23
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c82
-rw-r--r--drivers/platform/olpc/olpc-ec.c2
-rw-r--r--drivers/platform/surface/Kconfig58
-rw-r--r--drivers/platform/surface/Makefile2
-rw-r--r--drivers/platform/surface/aggregator/Kconfig2
-rw-r--r--drivers/platform/surface/aggregator/Makefile2
-rw-r--r--drivers/platform/surface/aggregator/bus.c151
-rw-r--r--drivers/platform/surface/aggregator/bus.h2
-rw-r--r--drivers/platform/surface/aggregator/controller.c55
-rw-r--r--drivers/platform/surface/aggregator/controller.h2
-rw-r--r--drivers/platform/surface/aggregator/core.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_msgb.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_parser.h2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.h2
-rw-r--r--drivers/platform/surface/aggregator/trace.h82
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c29
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_hub.c371
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c362
-rw-r--r--drivers/platform/surface/surface_aggregator_tabletsw.c533
-rw-r--r--drivers/platform/surface/surface_dtx.c2
-rw-r--r--drivers/platform/surface/surface_gpe.c14
-rw-r--r--drivers/platform/surface/surface_hotplug.c2
-rw-r--r--drivers/platform/surface/surface_platform_profile.c2
-rw-r--r--drivers/platform/x86/Kconfig52
-rw-r--r--drivers/platform/x86/Makefile9
-rw-r--r--drivers/platform/x86/acer-wmi.c7
-rw-r--r--drivers/platform/x86/amd/Kconfig31
-rw-r--r--drivers/platform/x86/amd/Makefile10
-rw-r--r--drivers/platform/x86/amd/hsmp.c (renamed from drivers/platform/x86/amd_hsmp.c)0
-rw-r--r--drivers/platform/x86/amd/pmc.c (renamed from drivers/platform/x86/amd-pmc.c)0
-rw-r--r--drivers/platform/x86/apple-gmux.c5
-rw-r--r--drivers/platform/x86/asus-wmi.c25
-rw-r--r--drivers/platform/x86/compal-laptop.c4
-rw-r--r--drivers/platform/x86/dell/Kconfig1
-rw-r--r--drivers/platform/x86/intel/pmt/class.c23
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c18
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c39
-rw-r--r--drivers/platform/x86/intel/vsec.c130
-rw-r--r--drivers/platform/x86/intel/vsec.h11
-rw-r--r--drivers/platform/x86/mlx-platform.c491
-rw-r--r--drivers/platform/x86/p2sb.c133
-rw-r--r--drivers/platform/x86/panasonic-laptop.c28
-rw-r--r--drivers/platform/x86/pmc_atom.c19
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c70
-rw-r--r--drivers/platform/x86/simatic-ipc.c43
-rw-r--r--drivers/platform/x86/sony-laptop.c7
-rw-r--r--drivers/platform/x86/system76_acpi.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c168
-rw-r--r--drivers/pnp/resource.c5
-rw-r--r--drivers/power/reset/Kconfig6
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-reset.c184
-rw-r--r--drivers/power/reset/pwr-mlxbf.c97
-rw-r--r--drivers/power/supply/ab8500-chargalg.h4
-rw-r--r--drivers/power/supply/ab8500_btemp.c1
-rw-r--r--drivers/power/supply/ab8500_chargalg.c70
-rw-r--r--drivers/power/supply/ab8500_charger.c48
-rw-r--r--drivers/power/supply/ab8500_fg.c3
-rw-r--r--drivers/power/supply/bq24257_charger.c2
-rw-r--r--drivers/power/supply/cros_peripheral_charger.c2
-rw-r--r--drivers/power/supply/goldfish_battery.c4
-rw-r--r--drivers/power/supply/lp8788-charger.c2
-rw-r--r--drivers/power/supply/max77976_charger.c4
-rw-r--r--drivers/power/supply/olpc_battery.c5
-rw-r--r--drivers/power/supply/pm2301_charger.h492
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/power/supply/surface_battery.c4
-rw-r--r--drivers/power/supply/surface_charger.c4
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/cros-ec-regulator.c36
-rw-r--r--drivers/remoteproc/imx_rproc.c7
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c3
-rw-r--r--drivers/remoteproc/mtk_scp.c23
-rw-r--r--drivers/remoteproc/omap_remoteproc.c6
-rw-r--r--drivers/remoteproc/pru_rproc.c1
-rw-r--r--drivers/remoteproc/qcom_common.c4
-rw-r--r--drivers/remoteproc/qcom_q6v5.c4
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c54
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c105
-rw-r--r--drivers/remoteproc/qcom_sysmon.c16
-rw-r--r--drivers/remoteproc/qcom_wcnss.c10
-rw-r--r--drivers/remoteproc/remoteproc_core.c28
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c12
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c2
-rw-r--r--drivers/reset/Kconfig13
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-tps380x.c126
-rw-r--r--drivers/rpmsg/mtk_rpmsg.c2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c10
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c2
-rw-r--r--drivers/rpmsg/qcom_smd.c9
-rw-r--r--drivers/rpmsg/rpmsg_char.c7
-rw-r--r--drivers/rpmsg/rpmsg_core.c3
-rw-r--r--drivers/rpmsg/rpmsg_internal.h4
-rw-r--r--drivers/rtc/Kconfig41
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/class.c6
-rw-r--r--drivers/rtc/dev.c8
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c5
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c5
-rw-r--r--drivers/rtc/rtc-bq32k.c5
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-core.h5
-rw-r--r--drivers/rtc/rtc-cros-ec.c4
-rw-r--r--drivers/rtc/rtc-ds1374.c5
-rw-r--r--drivers/rtc/rtc-ds1672.c5
-rw-r--r--drivers/rtc/rtc-ds3232.c5
-rw-r--r--drivers/rtc/rtc-em3027.c5
-rw-r--r--drivers/rtc/rtc-fm3130.c5
-rw-r--r--drivers/rtc/rtc-hym8563.c5
-rw-r--r--drivers/rtc/rtc-isl12022.c5
-rw-r--r--drivers/rtc/rtc-isl1208.c10
-rw-r--r--drivers/rtc/rtc-max6900.c5
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c8
-rw-r--r--drivers/rtc/rtc-mpfs.c323
-rw-r--r--drivers/rtc/rtc-nct3018y.c553
-rw-r--r--drivers/rtc/rtc-pcf8523.c5
-rw-r--r--drivers/rtc/rtc-pcf85363.c5
-rw-r--r--drivers/rtc/rtc-pcf8563.c5
-rw-r--r--drivers/rtc/rtc-pcf8583.c5
-rw-r--r--drivers/rtc/rtc-rv3029c2.c5
-rw-r--r--drivers/rtc/rtc-rv8803.c98
-rw-r--r--drivers/rtc/rtc-rx6110.c5
-rw-r--r--drivers/rtc/rtc-rx8025.c22
-rw-r--r--drivers/rtc/rtc-rx8581.c5
-rw-r--r--drivers/rtc/rtc-s35390a.c5
-rw-r--r--drivers/rtc/rtc-sd3078.c5
-rw-r--r--drivers/rtc/rtc-spear.c2
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-ti-k3.c680
-rw-r--r--drivers/rtc/rtc-vr41xx.c363
-rw-r--r--drivers/rtc/rtc-x1205.c5
-rw-r--r--drivers/rtc/rtc-zynqmp.c115
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dasd_diag.c1
-rw-r--r--drivers/s390/block/dasd_eckd.c1
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/uvdevice.c5
-rw-r--r--drivers/s390/char/zcore.c55
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c205
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h12
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c58
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c99
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c114
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h13
-rw-r--r--drivers/s390/crypto/ap_bus.c34
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/ap_queue.c2
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c124
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c1544
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h54
-rw-r--r--drivers/s390/net/qeth_core_main.c168
-rw-r--r--drivers/s390/net/qeth_ethtool.c12
-rw-r--r--drivers/s390/scsi/zfcp_diag.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c29
-rw-r--r--drivers/s390/scsi/zfcp_fc.h6
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c4
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c3
-rw-r--r--drivers/scsi/BusLogic.c35
-rw-r--r--drivers/scsi/FlashPoint.c4
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a2091.c63
-rw-r--r--drivers/scsi/a3000.c53
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c21
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c4
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h441
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h136
-rw-r--r--drivers/scsi/dpt/dptsig.h336
-rw-r--r--drivers/scsi/dpt/osd_defs.h79
-rw-r--r--drivers/scsi/dpt/osd_util.h358
-rw-r--r--drivers/scsi/dpt/sys_info.h417
-rw-r--r--drivers/scsi/dpt_i2o.c3545
-rw-r--r--drivers/scsi/dpti.h331
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/fnic/fnic_main.c45
-rw-r--r--drivers/scsi/gvp11.c95
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c49
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c16
-rw-r--r--drivers/scsi/hosts.c27
-rw-r--r--drivers/scsi/iscsi_tcp.c74
-rw-r--r--drivers/scsi/iscsi_tcp.h2
-rw-r--r--drivers/scsi/libiscsi.c313
-rw-r--r--drivers/scsi/libiscsi_tcp.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c67
-rw-r--r--drivers/scsi/libsas/sas_init.c4
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/lpfc/lpfc.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c324
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c12
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mesh.c7
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h73
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c67
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c291
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c7
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c73
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c10
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c46
-rw-r--r--drivers/scsi/qedi/qedi_main.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h36
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c585
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h106
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c131
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c138
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c103
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c45
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_lib.c65
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/scsi_sysfs.c29
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c98
-rw-r--r--drivers/scsi/scsi_transport_sas.c6
-rw-r--r--drivers/scsi/sd.c7
-rw-r--r--drivers/scsi/sg.c53
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h27
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c405
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c11
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h4
-rw-r--r--drivers/scsi/snic/snic_fwint.h2
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c3
-rw-r--r--drivers/soc/tegra/common.c49
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/soundwire/intel.c16
-rw-r--r--drivers/spi/spi-meson-spicc.c129
-rw-r--r--drivers/spi/spi-mpc52xx.c2
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/target/iscsi/iscsi_target.c57
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c122
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c113
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c160
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/target_core_alua.c5
-rw-r--r--drivers/target/target_core_configfs.c27
-rw-r--r--drivers/target/target_core_device.c38
-rw-r--r--drivers/target/target_core_file.c37
-rw-r--r--drivers/target/target_core_iblock.c13
-rw-r--r--drivers/target/target_core_pr.c28
-rw-r--r--drivers/target/target_core_sbc.c99
-rw-r--r--drivers/target/target_core_stat.c10
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/tee/tee_shm.c3
-rw-r--r--drivers/thermal/Kconfig4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c9
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c2
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/thermal/thermal_sysfs.c10
-rw-r--r--drivers/tty/amiserial.c20
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/tty/n_gsm.c757
-rw-r--r--drivers/tty/n_tty.c92
-rw-r--r--drivers/tty/serial/8250/8250.h24
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c7
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c24
-rw-r--r--drivers/tty/serial/8250/8250_core.c8
-rw-r--r--drivers/tty/serial/8250/8250_dw.c68
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c152
-rw-r--r--drivers/tty/serial/8250/8250_early.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c25
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c31
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c4
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c2
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c28
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c7
-rw-r--r--drivers/tty/serial/8250/8250_pci.c135
-rw-r--r--drivers/tty/serial/8250/8250_pericom.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c157
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig18
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl011.c15
-rw-r--r--drivers/tty/serial/ar933x_uart.c27
-rw-r--r--drivers/tty/serial/atmel_serial.c103
-rw-r--r--drivers/tty/serial/earlycon.c3
-rw-r--r--drivers/tty/serial/fsl_lpuart.c67
-rw-r--r--drivers/tty/serial/imx.c21
-rw-r--r--drivers/tty/serial/kgdboc.c2
-rw-r--r--drivers/tty/serial/max310x.c272
-rw-r--r--drivers/tty/serial/mcf.c10
-rw-r--r--drivers/tty/serial/meson_uart.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c4
-rw-r--r--drivers/tty/serial/msm_serial.c550
-rw-r--r--drivers/tty/serial/mux.c6
-rw-r--r--drivers/tty/serial/mvebu-uart.c11
-rw-r--r--drivers/tty/serial/omap-serial.c18
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pch_uart.c7
-rw-r--r--drivers/tty/serial/pic32_uart.c4
-rw-r--r--drivers/tty/serial/pmac_zilog.c1
-rw-r--r--drivers/tty/serial/pxa.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c91
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c90
-rw-r--r--drivers/tty/serial/sc16is7xx.c10
-rw-r--r--drivers/tty/serial/serial-tegra.c5
-rw-r--r--drivers/tty/serial/serial_core.c452
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c48
-rw-r--r--drivers/tty/serial/sifive.c10
-rw-r--r--drivers/tty/serial/st-asc.c1
-rw-r--r--drivers/tty/serial/stm32-usart.c79
-rw-r--r--drivers/tty/serial/stm32-usart.h68
-rw-r--r--drivers/tty/serial/sunsu.c4
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/vr41xx_siu.c934
-rw-r--r--drivers/tty/tty_buffer.c59
-rw-r--r--drivers/tty/tty_io.c2
-rw-r--r--drivers/tty/tty_ioctl.c4
-rw-r--r--drivers/tty/tty_port.c21
-rw-r--r--drivers/tty/vt/Makefile2
-rw-r--r--drivers/tty/vt/consolemap.c684
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped6
-rw-r--r--drivers/tty/vt/selection.c3
-rw-r--r--drivers/tty/vt/vt.c16
-rw-r--r--drivers/ufs/core/ufshcd-priv.h6
-rw-r--r--drivers/ufs/core/ufshcd.c102
-rw-r--r--drivers/ufs/host/Kconfig12
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ufs-exynos.c182
-rw-r--r--drivers/ufs/host/ufs-exynos.h1
-rw-r--r--drivers/ufs/host/ufs-mediatek.c324
-rw-r--r--drivers/ufs/host/ufs-mediatek.h74
-rw-r--r--drivers/ufs/host/ufs-qcom.c23
-rw-r--r--drivers/ufs/host/ufs-renesas.c412
-rw-r--r--drivers/ufs/host/ufshcd-pci.c18
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c15
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h6
-rw-r--r--drivers/usb/chipidea/trace.h4
-rw-r--r--drivers/usb/core/hcd.c17
-rw-r--r--drivers/usb/host/ohci-sa1111.c25
-rw-r--r--drivers/usb/host/xhci-trace.h4
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c2
-rw-r--r--drivers/usb/mtu3/mtu3_trace.h6
-rw-r--r--drivers/usb/musb/musb_trace.h4
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c14
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c144
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h11
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c173
-rw-r--r--drivers/vdpa/vdpa.c14
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c18
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h1
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c176
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c3
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c102
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h8
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c180
-rw-r--r--drivers/vfio/Makefile2
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_private.h2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c11
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c14
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h4
-rw-r--r--drivers/vfio/pci/mlx5/main.c11
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c7
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h21
-rw-r--r--drivers/vfio/vfio.h17
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c14
-rw-r--r--drivers/vfio/vfio_iommu_type1.c197
-rw-r--r--drivers/vfio/vfio_main.c (renamed from drivers/vfio/vfio.c)192
-rw-r--r--drivers/vhost/scsi.c89
-rw-r--r--drivers/vhost/vdpa.c38
-rw-r--r--drivers/vhost/vringh.c78
-rw-r--r--drivers/video/backlight/lp855x_bl.c21
-rw-r--r--drivers/video/backlight/platform_lcd.c10
-rw-r--r--drivers/video/backlight/rt4831-backlight.c33
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/console/vgacon.c12
-rw-r--r--drivers/video/fbdev/68328fb.c7
-rw-r--r--drivers/video/fbdev/amba-clcd.c24
-rw-r--r--drivers/video/fbdev/amifb.c15
-rw-r--r--drivers/video/fbdev/arkfb.c9
-rw-r--r--drivers/video/fbdev/atafb.c103
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c2
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c48
-rw-r--r--drivers/video/fbdev/bw2.c2
-rw-r--r--drivers/video/fbdev/chipsfb.c1
-rw-r--r--drivers/video/fbdev/cirrusfb.c4
-rw-r--r--drivers/video/fbdev/clps711x-fb.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c37
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c4
-rw-r--r--drivers/video/fbdev/cyber2000fb.c8
-rw-r--r--drivers/video/fbdev/dnfb.c2
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/fm2fb.c4
-rw-r--r--drivers/video/fbdev/geode/gx1fb_core.c6
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/hpfb.c4
-rw-r--r--drivers/video/fbdev/i740fb.c11
-rw-r--r--drivers/video/fbdev/imxfb.c136
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c6
-rw-r--r--drivers/video/fbdev/offb.c1
-rw-r--r--drivers/video/fbdev/omap/hwa742.c3
-rw-r--r--drivers/video/fbdev/omap/omapfb.h9
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c9
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c2
-rw-r--r--drivers/video/fbdev/pxafb.c2
-rw-r--r--drivers/video/fbdev/q40fb.c2
-rw-r--r--drivers/video/fbdev/s3fb.c4
-rw-r--r--drivers/video/fbdev/sa1100fb.c41
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/sis/init.c4
-rw-r--r--drivers/video/fbdev/sis/sis_main.c278
-rw-r--r--drivers/video/fbdev/skeletonfb.c6
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c2
-rw-r--r--drivers/video/fbdev/sstfb.c2
-rw-r--r--drivers/video/fbdev/sunxvr1000.c2
-rw-r--r--drivers/video/fbdev/sunxvr2500.c2
-rw-r--r--drivers/video/fbdev/sunxvr500.c2
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tdfxfb.c4
-rw-r--r--drivers/video/fbdev/tgafb.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c2
-rw-r--r--drivers/video/fbdev/valkyriefb.c10
-rw-r--r--drivers/video/fbdev/vt8623fb.c2
-rw-r--r--drivers/virtio/Kconfig11
-rw-r--r--drivers/virtio/virtio.c4
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_mem.c6
-rw-r--r--drivers/virtio/virtio_mmio.c5
-rw-r--r--drivers/virtio/virtio_pci_common.c12
-rw-r--r--drivers/virtio/virtio_pci_legacy.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c136
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c39
-rw-r--r--drivers/virtio/virtio_ring.c778
-rw-r--r--drivers/virtio/virtio_vdpa.c2
-rw-r--r--drivers/watchdog/Kconfig9
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c2
-rw-r--r--drivers/watchdog/bcm7038_wdt.c8
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/dw_wdt.c8
-rw-r--r--drivers/watchdog/f71808e_wdt.c4
-rw-r--r--drivers/watchdog/max77620_wdt.c4
-rw-r--r--drivers/watchdog/mtk_wdt.c10
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pm8916_wdt.c41
-rw-r--r--drivers/watchdog/pseries-wdt.c239
-rw-r--r--drivers/watchdog/realtek_otto_wdt.c1
-rw-r--r--drivers/watchdog/s3c2410_wdt.c9
-rw-r--r--drivers/watchdog/sama5d4_wdt.c8
-rw-r--r--drivers/watchdog/simatic-ipc-wdt.c15
-rw-r--r--drivers/watchdog/sp5100_tco.c1
-rw-r--r--drivers/watchdog/sp805_wdt.c5
-rw-r--r--drivers/watchdog/st_lpc_wdt.c9
-rw-r--r--drivers/watchdog/tegra_wdt.c14
-rw-r--r--drivers/watchdog/wdat_wdt.c7
-rw-r--r--drivers/xen/events/events_base.c53
-rw-r--r--drivers/xen/privcmd.c21
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c2
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
-rw-r--r--fs/9p/fid.c61
-rw-r--r--fs/9p/fid.h6
-rw-r--r--fs/9p/vfs_addr.c4
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_dir.c2
-rw-r--r--fs/9p/vfs_file.c9
-rw-r--r--fs/9p/vfs_inode.c89
-rw-r--r--fs/9p/vfs_inode_dotl.c82
-rw-r--r--fs/9p/vfs_super.c8
-rw-r--r--fs/9p/xattr.c8
-rw-r--r--fs/Kconfig12
-rw-r--r--fs/afs/cell.c61
-rw-r--r--fs/afs/cmservice.c4
-rw-r--r--fs/afs/inode.c2
-rw-r--r--fs/afs/internal.h16
-rw-r--r--fs/afs/proc.c6
-rw-r--r--fs/afs/rxrpc.c31
-rw-r--r--fs/afs/server.c46
-rw-r--r--fs/afs/vl_list.c19
-rw-r--r--fs/afs/volume.c21
-rw-r--r--fs/afs/write.c2
-rw-r--r--fs/attr.c2
-rw-r--r--fs/autofs/autofs_i.h7
-rw-r--r--fs/autofs/expire.c2
-rw-r--r--fs/autofs/inode.c1
-rw-r--r--fs/autofs/root.c108
-rw-r--r--fs/btrfs/block-group.c51
-rw-r--r--fs/btrfs/block-group.h4
-rw-r--r--fs/btrfs/ctree.c3
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/dev-replace.c5
-rw-r--r--fs/btrfs/disk-io.c82
-rw-r--r--fs/btrfs/disk-io.h10
-rw-r--r--fs/btrfs/extent-tree.c48
-rw-r--r--fs/btrfs/extent_io.c44
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode.c14
-rw-r--r--fs/btrfs/locking.c91
-rw-r--r--fs/btrfs/locking.h14
-rw-r--r--fs/btrfs/relocation.c9
-rw-r--r--fs/btrfs/root-tree.c5
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/tree-checker.c25
-rw-r--r--fs/btrfs/tree-log.c8
-rw-r--r--fs/btrfs/volumes.c5
-rw-r--r--fs/btrfs/xattr.c3
-rw-r--r--fs/ceph/addr.c61
-rw-r--r--fs/ceph/caps.c38
-rw-r--r--fs/ceph/dir.c79
-rw-r--r--fs/ceph/file.c132
-rw-r--r--fs/ceph/inode.c13
-rw-r--r--fs/ceph/mds_client.c165
-rw-r--r--fs/ceph/mds_client.h13
-rw-r--r--fs/ceph/mdsmap.c22
-rw-r--r--fs/ceph/super.c19
-rw-r--r--fs/ceph/super.h31
-rw-r--r--fs/ceph/xattr.c12
-rw-r--r--fs/cifs/Makefile6
-rw-r--r--fs/cifs/cached_dir.c388
-rw-r--r--fs/cifs/cached_dir.h64
-rw-r--r--fs/cifs/cifs_debug.c74
-rw-r--r--fs/cifs/cifsacl.c2
-rw-r--r--fs/cifs/cifsencrypt.c9
-rw-r--r--fs/cifs/cifsfs.c74
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/cifsglob.h186
-rw-r--r--fs/cifs/cifsproto.h13
-rw-r--r--fs/cifs/cifsroot.c2
-rw-r--r--fs/cifs/cifssmb.c477
-rw-r--r--fs/cifs/connect.c326
-rw-r--r--fs/cifs/dfs_cache.c8
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/file.c316
-rw-r--r--fs/cifs/fs_context.c9
-rw-r--r--fs/cifs/fs_context.h8
-rw-r--r--fs/cifs/fscache.h16
-rw-r--r--fs/cifs/inode.c65
-rw-r--r--fs/cifs/ioctl.c2
-rw-r--r--fs/cifs/link.c8
-rw-r--r--fs/cifs/misc.c64
-rw-r--r--fs/cifs/netmisc.c4
-rw-r--r--fs/cifs/readdir.c11
-rw-r--r--fs/cifs/sess.c5
-rw-r--r--fs/cifs/smb1ops.c10
-rw-r--r--fs/cifs/smb2file.c1
-rw-r--r--fs/cifs/smb2inode.c11
-rw-r--r--fs/cifs/smb2misc.c62
-rw-r--r--fs/cifs/smb2ops.c483
-rw-r--r--fs/cifs/smb2pdu.c73
-rw-r--r--fs/cifs/smb2proto.h16
-rw-r--r--fs/cifs/smb2transport.c38
-rw-r--r--fs/cifs/transport.c341
-rw-r--r--fs/cifs/xattr.c5
-rw-r--r--fs/crypto/fname.c36
-rw-r--r--fs/crypto/fscrypt_private.h9
-rw-r--r--fs/crypto/hooks.c6
-rw-r--r--fs/crypto/policy.c35
-rw-r--r--fs/dax.c401
-rw-r--r--fs/dcache.c87
-rw-r--r--fs/direct-io.c5
-rw-r--r--fs/erofs/super.c10
-rw-r--r--fs/erofs/utils.c2
-rw-r--r--fs/eventpoll.c22
-rw-r--r--fs/exec.c17
-rw-r--r--fs/exfat/exfat_fs.h19
-rw-r--r--fs/exfat/fatent.c2
-rw-r--r--fs/exfat/file.c82
-rw-r--r--fs/exfat/inode.c41
-rw-r--r--fs/exfat/misc.c17
-rw-r--r--fs/exfat/namei.c22
-rw-r--r--fs/exfat/nls.c4
-rw-r--r--fs/exfat/super.c4
-rw-r--r--fs/ext2/super.c7
-rw-r--r--fs/ext2/xattr.c170
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/ext4.h16
-rw-r--r--fs/ext4/ext4_jbd2.c3
-rw-r--r--fs/ext4/extents_status.c3
-rw-r--r--fs/ext4/fast_commit.c44
-rw-r--r--fs/ext4/indirect.c4
-rw-r--r--fs/ext4/inline.c33
-rw-r--r--fs/ext4/inode.c26
-rw-r--r--fs/ext4/ioctl.c105
-rw-r--r--fs/ext4/mballoc.c31
-rw-r--r--fs/ext4/migrate.c4
-rw-r--r--fs/ext4/namei.c30
-rw-r--r--fs/ext4/orphan.c24
-rw-r--r--fs/ext4/resize.c39
-rw-r--r--fs/ext4/super.c22
-rw-r--r--fs/ext4/symlink.c15
-rw-r--r--fs/ext4/xattr.c168
-rw-r--r--fs/ext4/xattr.h16
-rw-r--r--fs/f2fs/compress.c229
-rw-r--r--fs/f2fs/data.c82
-rw-r--r--fs/f2fs/debug.c2
-rw-r--r--fs/f2fs/f2fs.h102
-rw-r--r--fs/f2fs/file.c79
-rw-r--r--fs/f2fs/gc.c11
-rw-r--r--fs/f2fs/gc.h21
-rw-r--r--fs/f2fs/inode.c3
-rw-r--r--fs/f2fs/node.c14
-rw-r--r--fs/f2fs/segment.c79
-rw-r--r--fs/f2fs/segment.h11
-rw-r--r--fs/f2fs/super.c92
-rw-r--r--fs/f2fs/sysfs.c56
-rw-r--r--fs/fat/namei_vfat.c231
-rw-r--r--fs/fs-writeback.c12
-rw-r--r--fs/fscache/cookie.c9
-rw-r--r--fs/fuse/control.c4
-rw-r--r--fs/fuse/dax.c2
-rw-r--r--fs/fuse/dev.c7
-rw-r--r--fs/fuse/dir.c16
-rw-r--r--fs/fuse/file.c44
-rw-r--r--fs/fuse/inode.c16
-rw-r--r--fs/fuse/ioctl.c15
-rw-r--r--fs/fuse/virtio_fs.c9
-rw-r--r--fs/gfs2/aops.c26
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/file.c5
-rw-r--r--fs/gfs2/glock.c202
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/glops.c31
-rw-r--r--fs/gfs2/incore.h6
-rw-r--r--fs/gfs2/lock_dlm.c2
-rw-r--r--fs/gfs2/log.c5
-rw-r--r--fs/gfs2/main.c3
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/quota.c28
-rw-r--r--fs/gfs2/rgrp.c12
-rw-r--r--fs/gfs2/rgrp.h5
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/gfs2/xattr.c2
-rw-r--r--fs/hugetlbfs/inode.c44
-rw-r--r--fs/inode.c58
-rw-r--r--fs/iomap/buffered-io.c15
-rw-r--r--fs/iomap/direct-io.c2
-rw-r--r--fs/jbd2/checkpoint.c6
-rw-r--r--fs/jbd2/commit.c32
-rw-r--r--fs/jbd2/journal.c44
-rw-r--r--fs/jbd2/recovery.c30
-rw-r--r--fs/jbd2/revoke.c8
-rw-r--r--fs/jbd2/transaction.c40
-rw-r--r--fs/kernel_read_file.c38
-rw-r--r--fs/ksmbd/auth.c56
-rw-r--r--fs/ksmbd/auth.h11
-rw-r--r--fs/ksmbd/connection.c9
-rw-r--r--fs/ksmbd/connection.h10
-rw-r--r--fs/ksmbd/ksmbd_netlink.h2
-rw-r--r--fs/ksmbd/mgmt/share_config.c20
-rw-r--r--fs/ksmbd/mgmt/share_config.h3
-rw-r--r--fs/ksmbd/mgmt/tree_connect.c21
-rw-r--r--fs/ksmbd/mgmt/tree_connect.h4
-rw-r--r--fs/ksmbd/mgmt/user_session.c95
-rw-r--r--fs/ksmbd/mgmt/user_session.h13
-rw-r--r--fs/ksmbd/oplock.c46
-rw-r--r--fs/ksmbd/server.c8
-rw-r--r--fs/ksmbd/smb2misc.c12
-rw-r--r--fs/ksmbd/smb2pdu.c147
-rw-r--r--fs/ksmbd/smb_common.h2
-rw-r--r--fs/ksmbd/smbacl.c130
-rw-r--r--fs/ksmbd/smbacl.h2
-rw-r--r--fs/ksmbd/vfs.c8
-rw-r--r--fs/ksmbd/vfs_cache.c2
-rw-r--r--fs/lockd/svc4proc.c12
-rw-r--r--fs/lockd/svclock.c10
-rw-r--r--fs/lockd/svcproc.c5
-rw-r--r--fs/lockd/xdr4.c19
-rw-r--r--fs/locks.c1
-rw-r--r--fs/mbcache.c125
-rw-r--r--fs/namei.c80
-rw-r--r--fs/namespace.c7
-rw-r--r--fs/nfs/blocklayout/dev.c42
-rw-r--r--fs/nfs/client.c13
-rw-r--r--fs/nfs/dir.c83
-rw-r--r--fs/nfs/direct.c58
-rw-r--r--fs/nfs/file.c17
-rw-r--r--fs/nfs/filelayout/filelayout.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c4
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c6
-rw-r--r--fs/nfs/fs_context.c26
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/internal.h51
-rw-r--r--fs/nfs/nfs3client.c1
-rw-r--r--fs/nfs/nfs42xattr.c7
-rw-r--r--fs/nfs/nfs42xdr.c170
-rw-r--r--fs/nfs/nfs4client.c4
-rw-r--r--fs/nfs/nfs4file.c6
-rw-r--r--fs/nfs/nfs4idmap.c46
-rw-r--r--fs/nfs/nfs4proc.c32
-rw-r--r--fs/nfs/nfstrace.h215
-rw-r--r--fs/nfs/pnfs.c1
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nfs/write.c64
-rw-r--r--fs/nfsd/acl.h6
-rw-r--r--fs/nfsd/filecache.c751
-rw-r--r--fs/nfsd/filecache.h11
-rw-r--r--fs/nfsd/netns.h3
-rw-r--r--fs/nfsd/nfs2acl.c6
-rw-r--r--fs/nfsd/nfs3acl.c4
-rw-r--r--fs/nfsd/nfs3proc.c35
-rw-r--r--fs/nfsd/nfs4acl.c46
-rw-r--r--fs/nfsd/nfs4callback.c37
-rw-r--r--fs/nfsd/nfs4proc.c330
-rw-r--r--fs/nfsd/nfs4state.c127
-rw-r--r--fs/nfsd/nfs4xdr.c123
-rw-r--r--fs/nfsd/nfscache.c3
-rw-r--r--fs/nfsd/nfsctl.c21
-rw-r--r--fs/nfsd/nfsd.h6
-rw-r--r--fs/nfsd/nfsfh.c27
-rw-r--r--fs/nfsd/nfsfh.h58
-rw-r--r--fs/nfsd/nfsproc.c27
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/nfsd/trace.h327
-rw-r--r--fs/nfsd/vfs.c256
-rw-r--r--fs/nfsd/vfs.h33
-rw-r--r--fs/nfsd/xdr4.h60
-rw-r--r--fs/ntfs3/attrib.c557
-rw-r--r--fs/ntfs3/bitmap.c12
-rw-r--r--fs/ntfs3/file.c110
-rw-r--r--fs/ntfs3/frecord.c128
-rw-r--r--fs/ntfs3/fslog.c4
-rw-r--r--fs/ntfs3/fsntfs.c92
-rw-r--r--fs/ntfs3/index.c33
-rw-r--r--fs/ntfs3/inode.c19
-rw-r--r--fs/ntfs3/namei.c6
-rw-r--r--fs/ntfs3/ntfs_fs.h16
-rw-r--r--fs/ntfs3/record.c27
-rw-r--r--fs/ntfs3/run.c108
-rw-r--r--fs/ntfs3/super.c17
-rw-r--r--fs/ntfs3/xattr.c51
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c14
-rw-r--r--fs/ocfs2/dlmglue.c8
-rw-r--r--fs/ocfs2/heartbeat.c27
-rw-r--r--fs/ocfs2/namei.c1
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/ocfs2/super.c3
-rw-r--r--fs/overlayfs/export.c2
-rw-r--r--fs/overlayfs/inode.c15
-rw-r--r--fs/overlayfs/namei.c4
-rw-r--r--fs/overlayfs/overlayfs.h6
-rw-r--r--fs/overlayfs/super.c13
-rw-r--r--fs/posix_acl.c15
-rw-r--r--fs/proc/array.c5
-rw-r--r--fs/proc/base.c46
-rw-r--r--fs/proc/inode.c22
-rw-r--r--fs/proc/kmsg.c1
-rw-r--r--fs/proc/nommu.c1
-rw-r--r--fs/proc/proc_net.c9
-rw-r--r--fs/proc/proc_tty.c2
-rw-r--r--fs/proc/root.c8
-rw-r--r--fs/proc/task_mmu.c14
-rw-r--r--fs/proc/vmcore.c1
-rw-r--r--fs/proc_namespace.c2
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/read_write.c6
-rw-r--r--fs/remap_range.c31
-rw-r--r--fs/splice.c54
-rw-r--r--fs/squashfs/Makefile4
-rw-r--r--fs/squashfs/block.c10
-rw-r--r--fs/squashfs/decompressor.h1
-rw-r--r--fs/squashfs/file.c133
-rw-r--r--fs/squashfs/file_direct.c90
-rw-r--r--fs/squashfs/lz4_wrapper.c7
-rw-r--r--fs/squashfs/lzo_wrapper.c7
-rw-r--r--fs/squashfs/page_actor.c53
-rw-r--r--fs/squashfs/page_actor.h62
-rw-r--r--fs/squashfs/super.c33
-rw-r--r--fs/squashfs/xz_wrapper.c11
-rw-r--r--fs/squashfs/zlib_wrapper.c12
-rw-r--r--fs/squashfs/zstd_wrapper.c12
-rw-r--r--fs/super.c39
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/userfaultfd.c10
-rw-r--r--fs/xfs/Makefile6
-rw-r--r--fs/xfs/libxfs/xfs_ag.c171
-rw-r--r--fs/xfs/libxfs/xfs_ag.h75
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c145
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h58
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c9
-rw-r--r--fs/xfs/libxfs/xfs_attr.c22
-rw-r--r--fs/xfs/libxfs/xfs_attr.h10
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c28
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c15
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c84
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c10
-rw-r--r--fs/xfs/libxfs/xfs_btree.c29
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c6
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c8
-rw-r--r--fs/xfs/libxfs/xfs_format.h2
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c86
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h25
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c20
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c15
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c65
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h27
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c19
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c5
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c8
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c9
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c2
-rw-r--r--fs/xfs/libxfs/xfs_types.c73
-rw-r--r--fs/xfs/libxfs/xfs_types.h9
-rw-r--r--fs/xfs/scrub/agheader.c25
-rw-r--r--fs/xfs/scrub/agheader_repair.c21
-rw-r--r--fs/xfs/scrub/alloc.c7
-rw-r--r--fs/xfs/scrub/bmap.c16
-rw-r--r--fs/xfs/scrub/btree.c2
-rw-r--r--fs/xfs/scrub/common.c6
-rw-r--r--fs/xfs/scrub/dabtree.c2
-rw-r--r--fs/xfs/scrub/dir.c2
-rw-r--r--fs/xfs/scrub/fscounters.c4
-rw-r--r--fs/xfs/scrub/health.c2
-rw-r--r--fs/xfs/scrub/ialloc.c12
-rw-r--r--fs/xfs/scrub/quota.c2
-rw-r--r--fs/xfs/scrub/refcount.c9
-rw-r--r--fs/xfs/scrub/repair.c49
-rw-r--r--fs/xfs/scrub/rmap.c6
-rw-r--r--fs/xfs/scrub/symlink.c6
-rw-r--r--fs/xfs/xfs_attr_inactive.c23
-rw-r--r--fs/xfs/xfs_attr_list.c9
-rw-r--r--fs/xfs/xfs_bmap_util.c37
-rw-r--r--fs/xfs/xfs_buf.c301
-rw-r--r--fs/xfs/xfs_buf.h27
-rw-r--r--fs/xfs/xfs_dir2_readdir.c2
-rw-r--r--fs/xfs/xfs_discard.c2
-rw-r--r--fs/xfs/xfs_dquot.c2
-rw-r--r--fs/xfs/xfs_extfree_item.c18
-rw-r--r--fs/xfs/xfs_file.c57
-rw-r--r--fs/xfs/xfs_filestream.c4
-rw-r--r--fs/xfs/xfs_fsmap.c3
-rw-r--r--fs/xfs/xfs_fsops.c16
-rw-r--r--fs/xfs/xfs_icache.c16
-rw-r--r--fs/xfs/xfs_inode.c693
-rw-r--r--fs/xfs/xfs_inode.h70
-rw-r--r--fs/xfs/xfs_inode_item.c58
-rw-r--r--fs/xfs/xfs_ioctl.c10
-rw-r--r--fs/xfs/xfs_iomap.c38
-rw-r--r--fs/xfs/xfs_iomap.h1
-rw-r--r--fs/xfs/xfs_iops.c13
-rw-r--r--fs/xfs/xfs_iops.h3
-rw-r--r--fs/xfs/xfs_itable.c4
-rw-r--r--fs/xfs/xfs_iunlink_item.c180
-rw-r--r--fs/xfs/xfs_iunlink_item.h27
-rw-r--r--fs/xfs/xfs_log.c69
-rw-r--r--fs/xfs/xfs_log.h3
-rw-r--r--fs/xfs/xfs_log_cil.c472
-rw-r--r--fs/xfs/xfs_log_priv.h58
-rw-r--r--fs/xfs/xfs_log_recover.c196
-rw-r--r--fs/xfs/xfs_mount.c3
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_notify_failure.c226
-rw-r--r--fs/xfs/xfs_qm.c17
-rw-r--r--fs/xfs/xfs_reflink.c256
-rw-r--r--fs/xfs/xfs_reflink.h3
-rw-r--r--fs/xfs/xfs_super.c39
-rw-r--r--fs/xfs/xfs_super.h1
-rw-r--r--fs/xfs/xfs_symlink.c2
-rw-r--r--fs/xfs/xfs_trace.h3
-rw-r--r--fs/xfs/xfs_trans.c95
-rw-r--r--fs/xfs/xfs_trans.h7
-rw-r--r--fs/xfs/xfs_trans_priv.h3
-rw-r--r--fs/zonefs/super.c8
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/asm-generic/bitops/atomic.h6
-rw-r--r--include/asm-generic/bitops/generic-non-atomic.h175
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h47
-rw-r--r--include/asm-generic/bitops/non-atomic.h122
-rw-r--r--include/asm-generic/bitops/non-instrumented-non-atomic.h17
-rw-r--r--include/asm-generic/io.h105
-rw-r--r--include/asm-generic/pci.h39
-rw-r--r--include/asm-generic/pci_iomap.h2
-rw-r--r--include/asm-generic/sections.h7
-rw-r--r--include/asm-generic/softirq_stack.h2
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/kpp.h2
-rw-r--r--include/dt-bindings/clock/efm32-cmu.h43
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq8074.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8939.h1
-rw-r--r--include/dt-bindings/clock/r9a07g043-cpg.h20
-rw-r--r--include/dt-bindings/clock/sprd,ums512-clk.h397
-rw-r--r--include/dt-bindings/gpio/gpio.h3
-rw-r--r--include/dt-bindings/pinctrl/r7s9210-pinctrl.h2
-rw-r--r--include/dt-bindings/pinctrl/rzg2l-pinctrl.h2
-rw-r--r--include/dt-bindings/pinctrl/rzv2m-pinctrl.h23
-rw-r--r--include/dt-bindings/reset/mt8186-resets.h5
-rw-r--r--include/dt-bindings/reset/mt8192-resets.h8
-rw-r--r--include/dt-bindings/reset/mt8195-resets.h6
-rw-r--r--include/dt-bindings/reset/sama7g5-reset.h10
-rw-r--r--include/dt-bindings/sound/qcom,wcd9335.h15
-rw-r--r--include/linux/acpi.h8
-rw-r--r--include/linux/acpi_iort.h14
-rw-r--r--include/linux/amd-iommu.h4
-rw-r--r--include/linux/audit.h5
-rw-r--r--include/linux/backing-dev-defs.h7
-rw-r--r--include/linux/backing-dev.h23
-rw-r--r--include/linux/base64.h16
-rw-r--r--include/linux/bitmap.h37
-rw-r--r--include/linux/bitops.h51
-rw-r--r--include/linux/blk-mq.h1
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/bpfptr.h8
-rw-r--r--include/linux/buffer_head.h25
-rw-r--r--include/linux/ceph/ceph_fs.h8
-rw-r--r--include/linux/ceph/mdsmap.h1
-rw-r--r--include/linux/ceph/osd_client.h5
-rw-r--r--include/linux/cgroup.h5
-rw-r--r--include/linux/clk-provider.h36
-rw-r--r--include/linux/clk.h134
-rw-r--r--include/linux/compiler-gcc.h11
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/console_struct.h6
-rw-r--r--include/linux/consolemap.h60
-rw-r--r--include/linux/cpumask.h246
-rw-r--r--include/linux/damon.h25
-rw-r--r--include/linux/dax.h56
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/dm-bufio.h8
-rw-r--r--include/linux/dma-map-ops.h67
-rw-r--r--include/linux/dma-mapping.h10
-rw-r--r--include/linux/dma/edma.h61
-rw-r--r--include/linux/dma/imx-dma.h13
-rw-r--r--include/linux/dma/qcom-gpi-dma.h2
-rw-r--r--include/linux/dmaengine.h20
-rw-r--r--include/linux/dmar.h6
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h77
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h5
-rw-r--r--include/linux/fs.h21
-rw-r--r--include/linux/fscrypt.h5
-rw-r--r--include/linux/gfp.h348
-rw-r--r--include/linux/gfp_types.h348
-rw-r--r--include/linux/gpio.h6
-rw-r--r--include/linux/gpio/machine.h1
-rw-r--r--include/linux/highmem.h30
-rw-r--r--include/linux/hmm.h4
-rw-r--r--include/linux/huge_mm.h94
-rw-r--r--include/linux/hugetlb.h52
-rw-r--r--include/linux/hypervisor.h8
-rw-r--r--include/linux/io-pgtable.h15
-rw-r--r--include/linux/io_uring_types.h9
-rw-r--r--include/linux/iomap.h3
-rw-r--r--include/linux/iommu.h27
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/iova.h2
-rw-r--r--include/linux/isa-dma.h14
-rw-r--r--include/linux/jbd2.h6
-rw-r--r--include/linux/kernel_read_file.h32
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/khugepaged.h30
-rw-r--r--include/linux/kmemleak.h8
-rw-r--r--include/linux/kvm_host.h66
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/libnvdimm.h5
-rw-r--r--include/linux/limits.h1
-rw-r--r--include/linux/lockd/lockd.h1
-rw-r--r--include/linux/lockd/xdr.h2
-rw-r--r--include/linux/lsm_hook_defs.h1
-rw-r--r--include/linux/lsm_hooks.h3
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h10
-rw-r--r--include/linux/mbcache.h33
-rw-r--r--include/linux/mdev.h5
-rw-r--r--include/linux/memcontrol.h89
-rw-r--r--include/linux/memory_hotplug.h9
-rw-r--r--include/linux/memremap.h35
-rw-r--r--include/linux/mfd/ipaq-micro.h4
-rw-r--r--include/linux/mfd/max77714.h2
-rw-r--r--include/linux/mfd/mt6331/core.h40
-rw-r--r--include/linux/mfd/mt6331/registers.h584
-rw-r--r--include/linux/mfd/mt6332/core.h65
-rw-r--r--include/linux/mfd/mt6332/registers.h642
-rw-r--r--include/linux/mfd/mt6357/core.h119
-rw-r--r--include/linux/mfd/mt6357/registers.h1574
-rw-r--r--include/linux/mfd/mt6397/core.h3
-rw-r--r--include/linux/mfd/t7l66xb.h1
-rw-r--r--include/linux/mfd/tc6387xb.h1
-rw-r--r--include/linux/mfd/tc6393xb.h2
-rw-r--r--include/linux/mfd/twl.h57
-rw-r--r--include/linux/migrate.h1
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mlx5/fs.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h6
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h8
-rw-r--r--include/linux/mm.h153
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/mmc/card.h9
-rw-r--r--include/linux/mmc/host.h26
-rw-r--r--include/linux/mmc/mmc.h6
-rw-r--r--include/linux/mmc/sdio.h5
-rw-r--r--include/linux/mmdebug.h10
-rw-r--r--include/linux/mmu_notifier.h2
-rw-r--r--include/linux/mmzone.h159
-rw-r--r--include/linux/mtd/hyperbus.h4
-rw-r--r--include/linux/mtd/spi-nor.h4
-rw-r--r--include/linux/mtd/spinand.h1
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h20
-rw-r--r--include/linux/netfilter_bridge/ebtables.h4
-rw-r--r--include/linux/nfs_fs.h11
-rw-r--r--include/linux/nfs_page.h3
-rw-r--r--include/linux/nfs_ssc.h2
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/nmi.h2
-rw-r--r--include/linux/nodemask.h24
-rw-r--r--include/linux/nvme-auth.h41
-rw-r--r--include/linux/nvme.h213
-rw-r--r--include/linux/of.h5
-rw-r--r--include/linux/of_gpio.h1
-rw-r--r--include/linux/once.h2
-rw-r--r--include/linux/overflow.h1
-rw-r--r--include/linux/page-flags.h55
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/pagevec.h1
-rw-r--r--include/linux/pci-doe.h77
-rw-r--r--include/linux/pci-ecam.h1
-rw-r--r--include/linux/pci-p2pdma.h27
-rw-r--r--include/linux/pci.h12
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pgtable.h28
-rw-r--r--include/linux/pinctrl/pinctrl.h20
-rw-r--r--include/linux/pipe_fs_i.h20
-rw-r--r--include/linux/platform_data/cros_ec_commands.h8
-rw-r--r--include/linux/platform_data/cros_ec_proto.h8
-rw-r--r--include/linux/platform_data/video-imxfb.h70
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h1
-rw-r--r--include/linux/platform_data/x86/p2sb.h28
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h2
-rw-r--r--include/linux/platform_data/x86/simatic-ipc-base.h2
-rw-r--r--include/linux/pm_opp.h322
-rw-r--r--include/linux/psi.h2
-rw-r--r--include/linux/radix-tree.h2
-rw-r--r--include/linux/rbtree.h2
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/rv.h70
-rw-r--r--include/linux/scatterlist.h69
-rw-r--r--include/linux/sched.h13
-rw-r--r--include/linux/sched/mm.h4
-rw-r--r--include/linux/security.h5
-rw-r--r--include/linux/serial.h15
-rw-r--r--include/linux/serial_8250.h7
-rw-r--r--include/linux/serial_core.h364
-rw-r--r--include/linux/serial_s3c.h2
-rw-r--r--include/linux/shmem_fs.h6
-rw-r--r--include/linux/shrinker.h33
-rw-r--r--include/linux/skmsg.h3
-rw-r--r--include/linux/soundwire/sdw_intel.h3
-rw-r--r--include/linux/sunrpc/clnt.h5
-rw-r--r--include/linux/sunrpc/sched.h4
-rw-r--r--include/linux/sunrpc/xdr.h11
-rw-r--r--include/linux/sunrpc/xprt.h6
-rw-r--r--include/linux/sunrpc/xprtmultipath.h7
-rw-r--r--include/linux/surface_aggregator/controller.h149
-rw-r--r--include/linux/surface_aggregator/device.h213
-rw-r--r--include/linux/surface_aggregator/serial_hub.h75
-rw-r--r--include/linux/swap.h12
-rw-r--r--include/linux/swapops.h21
-rw-r--r--include/linux/swiotlb.h17
-rw-r--r--include/linux/sysctl.h4
-rw-r--r--include/linux/sysfs.h16
-rw-r--r--include/linux/tboot.h2
-rw-r--r--include/linux/time64.h2
-rw-r--r--include/linux/tpm_eventlog.h2
-rw-r--r--include/linux/trace_events.h20
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/linux/tty_buffer.h1
-rw-r--r--include/linux/tty_ldisc.h14
-rw-r--r--include/linux/tty_port.h2
-rw-r--r--include/linux/ucb1400.h2
-rw-r--r--include/linux/uio.h35
-rw-r--r--include/linux/userfaultfd_k.h2
-rw-r--r--include/linux/vdpa.h4
-rw-r--r--include/linux/vfio.h106
-rw-r--r--include/linux/vfio_pci_core.h65
-rw-r--r--include/linux/virtio.h16
-rw-r--r--include/linux/virtio_config.h20
-rw-r--r--include/linux/virtio_pci_modern.h9
-rw-r--r--include/linux/virtio_ring.h10
-rw-r--r--include/linux/vm_event_item.h15
-rw-r--r--include/linux/wait_bit.h8
-rw-r--r--include/net/9p/client.h49
-rw-r--r--include/net/ax88796.h4
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bonding.h13
-rw-r--r--include/net/busy_poll.h2
-rw-r--r--include/net/genetlink.h5
-rw-r--r--include/net/gro.h2
-rw-r--r--include/net/mptcp.h4
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/netfilter/nf_flow_table.h3
-rw-r--r--include/net/netfilter/nf_tables.h14
-rw-r--r--include/net/netns/conntrack.h2
-rw-r--r--include/net/sock.h93
-rw-r--r--include/net/tls.h2
-rw-r--r--include/ras/ras_event.h1
-rw-r--r--include/rdma/ib_verbs.h13
-rw-r--r--include/rdma/rdma_cm.h1
-rw-r--r--include/rv/automata.h75
-rw-r--r--include/rv/da_monitor.h544
-rw-r--r--include/rv/instrumentation.h29
-rw-r--r--include/scsi/libiscsi.h13
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/scsi/sas.h42
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_host.h4
-rw-r--r--include/scsi/scsi_transport_iscsi.h3
-rw-r--r--include/soc/mscc/ocelot.h179
-rw-r--r--include/sound/control.h4
-rw-r--r--include/sound/core.h14
-rw-r--r--include/sound/cs35l41.h7
-rw-r--r--include/sound/dmaengine_pcm.h2
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/hdaudio.h1
-rw-r--r--include/sound/hdmi-codec.h4
-rw-r--r--include/sound/madera-pdata.h2
-rw-r--r--include/sound/pcm.h71
-rw-r--r--include/sound/rawmidi.h6
-rw-r--r--include/sound/simple_card_utils.h5
-rw-r--r--include/sound/soc-acpi-intel-match.h2
-rw-r--r--include/sound/soc-card.h1
-rw-r--r--include/sound/soc-component.h7
-rw-r--r--include/sound/soc-dai.h6
-rw-r--r--include/sound/soc.h15
-rw-r--r--include/sound/sof.h1
-rw-r--r--include/sound/sof/dai-amd.h7
-rw-r--r--include/sound/sof/dai-intel.h2
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/ipc4/header.h8
-rw-r--r--include/sound/sof/stream.h6
-rw-r--r--include/target/iscsi/iscsi_target_core.h14
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_base.h4
-rw-r--r--include/trace/events/9p.h48
-rw-r--r--include/trace/events/afs.h36
-rw-r--r--include/trace/events/devlink.h7
-rw-r--r--include/trace/events/fib.h6
-rw-r--r--include/trace/events/fib6.h8
-rw-r--r--include/trace/events/fscache.h2
-rw-r--r--include/trace/events/iscsi.h4
-rw-r--r--include/trace/events/kvm.h2
-rw-r--r--include/trace/events/neigh.h2
-rw-r--r--include/trace/events/power.h22
-rw-r--r--include/trace/events/qla.h4
-rw-r--r--include/trace/events/rv.h142
-rw-r--r--include/trace/events/rwmmio.h97
-rw-r--r--include/trace/events/scsi.h35
-rw-r--r--include/trace/events/sunrpc.h34
-rw-r--r--include/trace/stages/stage1_struct_define.h3
-rw-r--r--include/trace/stages/stage2_data_offsets.h3
-rw-r--r--include/trace/stages/stage4_event_fields.h11
-rw-r--r--include/trace/stages/stage5_get_offsets.h4
-rw-r--r--include/trace/stages/stage6_event_callback.h12
-rw-r--r--include/uapi/asm-generic/termbits-common.h1
-rw-r--r--include/uapi/linux/atm_zatm.h47
-rw-r--r--include/uapi/linux/elf.h1
-rw-r--r--include/uapi/linux/f2fs.h2
-rw-r--r--include/uapi/linux/genetlink.h5
-rw-r--r--include/uapi/linux/idxd.h6
-rw-r--r--include/uapi/linux/io_uring.h8
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_LOG.h2
-rw-r--r--include/uapi/linux/pci_regs.h29
-rw-r--r--include/uapi/linux/serial.h20
-rw-r--r--include/uapi/linux/serial_core.h4
-rw-r--r--include/uapi/linux/serial_reg.h4
-rw-r--r--include/uapi/linux/swab.h6
-rw-r--r--include/uapi/linux/ublk_cmd.h80
-rw-r--r--include/uapi/linux/vduse.h47
-rw-r--r--include/uapi/linux/vhost.h9
-rw-r--r--include/uapi/linux/vhost_types.h2
-rw-r--r--include/uapi/linux/virtio_config.h7
-rw-r--r--include/uapi/linux/virtio_net.h34
-rw-r--r--include/uapi/linux/virtio_pci.h2
-rw-r--r--include/uapi/linux/virtio_ring.h16
-rw-r--r--include/uapi/linux/xfrm.h2
-rw-r--r--include/uapi/mtd/mtd-abi.h4
-rw-r--r--include/uapi/rdma/erdma-abi.h49
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h1
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h17
-rw-r--r--include/uapi/sound/compress_offload.h2
-rw-r--r--include/uapi/sound/compress_params.h6
-rw-r--r--include/uapi/sound/sof/abi.h4
-rw-r--r--include/uapi/sound/sof/header.h30
-rw-r--r--include/uapi/sound/sof/tokens.h44
-rw-r--r--include/ufs/ufshcd.h21
-rw-r--r--include/ufs/ufshci.h6
-rw-r--r--include/ufs/unipro.h104
-rw-r--r--include/xen/hvm.h2
-rw-r--r--include/xen/interface/hvm/hvm_op.h19
-rw-r--r--init/Kconfig313
-rw-r--r--init/main.c18
-rw-r--r--init/version.c17
-rw-r--r--io_uring/advise.c8
-rw-r--r--io_uring/cancel.c6
-rw-r--r--io_uring/epoll.c4
-rw-r--r--io_uring/fs.c28
-rw-r--r--io_uring/io-wq.c3
-rw-r--r--io_uring/io_uring.c26
-rw-r--r--io_uring/kbuf.c10
-rw-r--r--io_uring/msg_ring.c8
-rw-r--r--io_uring/net.c134
-rw-r--r--io_uring/net.h1
-rw-r--r--io_uring/notif.c12
-rw-r--r--io_uring/notif.h4
-rw-r--r--io_uring/opdef.c4
-rw-r--r--io_uring/opdef.h2
-rw-r--r--io_uring/openclose.c16
-rw-r--r--io_uring/poll.c16
-rw-r--r--io_uring/rsrc.c10
-rw-r--r--io_uring/rw.c28
-rw-r--r--io_uring/splice.c8
-rw-r--r--io_uring/sqpoll.c4
-rw-r--r--io_uring/statx.c6
-rw-r--r--io_uring/sync.c12
-rw-r--r--io_uring/timeout.c26
-rw-r--r--io_uring/uring_cmd.c24
-rw-r--r--io_uring/xattr.c18
-rw-r--r--ipc/mqueue.c2
-rw-r--r--kernel/audit_fsnotify.c1
-rw-r--r--kernel/auditsc.c29
-rw-r--r--kernel/bpf/arraymap.c6
-rw-r--r--kernel/bpf/bpf_iter.c11
-rw-r--r--kernel/bpf/hashtab.c8
-rw-r--r--kernel/bpf/reuseport_array.c9
-rw-r--r--kernel/bpf/syscall.c35
-rw-r--r--kernel/bpf/trampoline.c5
-rw-r--r--kernel/cgroup/cgroup-v1.c2
-rw-r--r--kernel/cgroup/cgroup.c80
-rw-r--r--kernel/cgroup/cpuset.c5
-rw-r--r--kernel/configs/xen.config1
-rw-r--r--kernel/crash_core.c29
-rw-r--r--kernel/dma/coherent.c10
-rw-r--r--kernel/dma/direct.c43
-rw-r--r--kernel/dma/direct.h8
-rw-r--r--kernel/dma/mapping.c47
-rw-r--r--kernel/dma/swiotlb.c263
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c14
-rw-r--r--kernel/hung_task.c2
-rw-r--r--kernel/kallsyms.c23
-rw-r--r--kernel/kallsyms_internal.h30
-rw-r--r--kernel/kexec_file.c10
-rw-r--r--kernel/kprobes.c12
-rw-r--r--kernel/module/Kconfig293
-rw-r--r--kernel/module/decompress.c8
-rw-r--r--kernel/module/internal.h2
-rw-r--r--kernel/module/kallsyms.c41
-rw-r--r--kernel/module/main.c45
-rw-r--r--kernel/module/procfs.c2
-rw-r--r--kernel/profile.c15
-rw-r--r--kernel/rcu/tree.c2
-rw-r--r--kernel/resource.c185
-rw-r--r--kernel/sched/core.c16
-rw-r--r--kernel/sched/psi.c10
-rw-r--r--kernel/sched/sched.h7
-rw-r--r--kernel/sched/wait_bit.c2
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c20
-rw-r--r--kernel/time/posix-stubs.c3
-rw-r--r--kernel/time/time.c4
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/ftrace.c89
-rw-r--r--kernel/trace/rv/Kconfig78
-rw-r--r--kernel/trace/rv/Makefile8
-rw-r--r--kernel/trace/rv/monitors/wip/wip.c88
-rw-r--r--kernel/trace/rv/monitors/wip/wip.h46
-rw-r--r--kernel/trace/rv/monitors/wwnr/wwnr.c87
-rw-r--r--kernel/trace/rv/monitors/wwnr/wwnr.h46
-rw-r--r--kernel/trace/rv/reactor_panic.c43
-rw-r--r--kernel/trace/rv/reactor_printk.c42
-rw-r--r--kernel/trace/rv/rv.c799
-rw-r--r--kernel/trace/rv/rv.h68
-rw-r--r--kernel/trace/rv/rv_reactors.c510
-rw-r--r--kernel/trace/trace.c35
-rw-r--r--kernel/trace/trace.h9
-rw-r--r--kernel/trace/trace_dynevent.c2
-rw-r--r--kernel/trace/trace_eprobe.c128
-rw-r--r--kernel/trace/trace_event_perf.c7
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_events_hist.c5
-rw-r--r--kernel/trace/trace_events_user.c2
-rw-r--r--kernel/trace/trace_kprobe.c16
-rw-r--r--kernel/trace/trace_probe.c33
-rw-r--r--kernel/trace/trace_probe.h5
-rw-r--r--kernel/trace/trace_uprobe.c12
-rw-r--r--kernel/watchdog.c21
-rw-r--r--lib/Kconfig16
-rw-r--r--lib/Kconfig.debug20
-rw-r--r--lib/Makefile7
-rw-r--r--lib/base64.c103
-rw-r--r--lib/bitmap.c11
-rw-r--r--lib/btree.c30
-rw-r--r--lib/cpumask.c97
-rw-r--r--lib/cpumask_kunit.c148
-rw-r--r--lib/crypto/Kconfig1
-rw-r--r--lib/devres.c15
-rw-r--r--lib/error-inject.c28
-rw-r--r--lib/flex_proportions.c10
-rw-r--r--lib/iov_iter.c826
-rw-r--r--lib/list_debug.c12
-rw-r--r--lib/livepatch/test_klp_callbacks_busy.c8
-rw-r--r--lib/lru_cache.c4
-rw-r--r--lib/lz4/lz4_decompress.c6
-rw-r--r--lib/lzo/lzo1x_compress.c6
-rw-r--r--lib/mpi/mpiutil.c2
-rw-r--r--lib/nodemask.c31
-rw-r--r--lib/radix-tree.c4
-rw-r--r--lib/ratelimit.c12
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/stackdepot.c59
-rw-r--r--lib/test_bitmap.c68
-rw-r--r--lib/test_free_pages.c2
-rw-r--r--lib/test_hmm.c347
-rw-r--r--lib/test_hmm_uapi.h19
-rw-r--r--lib/test_printf.c21
-rw-r--r--lib/test_vmalloc.c15
-rw-r--r--lib/trace_readwrite.c47
-rw-r--r--lib/ts_bm.c2
-rw-r--r--mm/Kconfig20
-rw-r--r--mm/Makefile1
-rw-r--r--mm/backing-dev.c10
-rw-r--r--mm/bootmem_info.c2
-rw-r--r--mm/cma_debug.c2
-rw-r--r--mm/compaction.c5
-rw-r--r--mm/damon/Kconfig8
-rw-r--r--mm/damon/Makefile1
-rw-r--r--mm/damon/dbgfs.c82
-rw-r--r--mm/damon/lru_sort.c548
-rw-r--r--mm/damon/ops-common.c42
-rw-r--r--mm/damon/ops-common.h2
-rw-r--r--mm/damon/paddr.c60
-rw-r--r--mm/damon/reclaim.c44
-rw-r--r--mm/damon/sysfs.c69
-rw-r--r--mm/debug_vm_pgtable.c2
-rw-r--r--mm/filemap.c18
-rw-r--r--mm/frontswap.c2
-rw-r--r--mm/gup.c158
-rw-r--r--mm/gup_test.c2
-rw-r--r--mm/highmem.c2
-rw-r--r--mm/huge_memory.c250
-rw-r--r--mm/hugetlb.c270
-rw-r--r--mm/hugetlb_cgroup.c1
-rw-r--r--mm/hugetlb_vmemmap.c633
-rw-r--r--mm/hugetlb_vmemmap.h45
-rw-r--r--mm/internal.h19
-rw-r--r--mm/kasan/common.c8
-rw-r--r--mm/kasan/hw_tags.c32
-rw-r--r--mm/kasan/kasan.h3
-rw-r--r--mm/kasan/report.c12
-rw-r--r--mm/kasan/shadow.c29
-rw-r--r--mm/kfence/core.c4
-rw-r--r--mm/khugepaged.c230
-rw-r--r--mm/kmemleak.c260
-rw-r--r--mm/ksm.c10
-rw-r--r--mm/list_lru.c2
-rw-r--r--mm/madvise.c14
-rw-r--r--mm/memblock.c39
-rw-r--r--mm/memcontrol.c224
-rw-r--r--mm/memory-failure.c509
-rw-r--r--mm/memory.c27
-rw-r--r--mm/memory_hotplug.c57
-rw-r--r--mm/mempolicy.c10
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/memremap.c16
-rw-r--r--mm/migrate.c43
-rw-r--r--mm/migrate_device.c80
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mmap.c58
-rw-r--r--mm/mprotect.c84
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c439
-rw-r--r--mm/page_vma_mapped.c5
-rw-r--r--mm/percpu.c6
-rw-r--r--mm/rmap.c114
-rw-r--r--mm/shmem.c106
-rw-r--r--mm/shrinker_debug.c286
-rw-r--r--mm/slab.c10
-rw-r--r--mm/sparse-vmemmap.c405
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/swap.c599
-rw-r--r--mm/swap.h19
-rw-r--r--mm/swap_state.c56
-rw-r--r--mm/swapfile.c31
-rw-r--r--mm/userfaultfd.c29
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmalloc.c148
-rw-r--r--mm/vmscan.c332
-rw-r--r--mm/vmstat.c9
-rw-r--r--mm/workingset.c2
-rw-r--r--mm/zsmalloc.c18
-rw-r--r--net/9p/client.c207
-rw-r--r--net/9p/protocol.c3
-rw-r--r--net/9p/trans_fd.c13
-rw-r--r--net/9p/trans_rdma.c2
-rw-r--r--net/9p/trans_virtio.c7
-rw-r--r--net/9p/trans_xen.c2
-rw-r--r--net/ax25/ax25_timer.c4
-rw-r--r--net/batman-adv/trace.h9
-rw-r--r--net/bluetooth/aosp.c15
-rw-r--r--net/bluetooth/hci_conn.c11
-rw-r--r--net/bluetooth/hci_event.c7
-rw-r--r--net/bluetooth/iso.c35
-rw-r--r--net/bluetooth/l2cap_core.c13
-rw-r--r--net/bluetooth/mgmt.c7
-rw-r--r--net/bluetooth/msft.c15
-rw-r--r--net/bpf/test_run.c1
-rw-r--r--net/bridge/netfilter/ebtable_broute.c8
-rw-r--r--net/bridge/netfilter/ebtable_filter.c8
-rw-r--r--net/bridge/netfilter/ebtable_nat.c8
-rw-r--r--net/bridge/netfilter/ebtables.c8
-rw-r--r--net/can/j1939/socket.c5
-rw-r--r--net/can/j1939/transport.c8
-rw-r--r--net/ceph/osd_client.c15
-rw-r--r--net/ceph/osdmap.c32
-rw-r--r--net/ceph/pagelist.c2
-rw-r--r--net/core/bpf_sk_storage.c17
-rw-r--r--net/core/datagram.c3
-rw-r--r--net/core/dev.c20
-rw-r--r--net/core/devlink.c4
-rw-r--r--net/core/filter.c18
-rw-r--r--net/core/gen_stats.c2
-rw-r--r--net/core/gro_cells.c2
-rw-r--r--net/core/neighbour.c48
-rw-r--r--net/core/net_namespace.c7
-rw-r--r--net/core/page_pool.c2
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/skmsg.c12
-rw-r--r--net/core/sock.c18
-rw-r--r--net/core/sock_map.c20
-rw-r--r--net/core/sysctl_net_core.c15
-rw-r--r--net/dsa/port.c7
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/ipv4/devinet.c16
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/tcp.c53
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ip6_tunnel.c19
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c1
-rw-r--r--net/ipv6/seg6_local.c10
-rw-r--r--net/key/af_key.c3
-rw-r--r--net/mac80211/trace_msg.h6
-rw-r--r--net/mptcp/protocol.c49
-rw-r--r--net/mptcp/protocol.h13
-rw-r--r--net/mptcp/subflow.c3
-rw-r--r--net/netfilter/Kconfig4
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c4
-rw-r--r--net/netfilter/nf_conntrack_ftp.c24
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c10
-rw-r--r--net/netfilter/nf_conntrack_irc.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c31
-rw-r--r--net/netfilter/nf_conntrack_sane.c68
-rw-r--r--net/netfilter/nf_flow_table_core.c15
-rw-r--r--net/netfilter/nf_flow_table_offload.c8
-rw-r--r--net/netfilter/nf_tables_api.c272
-rw-r--r--net/netfilter/nf_tables_core.c21
-rw-r--r--net/netfilter/nfnetlink.c83
-rw-r--r--net/netfilter/nft_bitwise.c66
-rw-r--r--net/netfilter/nft_cmp.c44
-rw-r--r--net/netfilter/nft_dynset.c2
-rw-r--r--net/netfilter/nft_immediate.c22
-rw-r--r--net/netfilter/nft_osf.c18
-rw-r--r--net/netfilter/nft_payload.c29
-rw-r--r--net/netfilter/nft_range.c27
-rw-r--r--net/netfilter/nft_tproxy.c8
-rw-r--r--net/netfilter/nft_tunnel.c1
-rw-r--r--net/netlabel/netlabel_unlabeled.c2
-rw-r--r--net/netlink/genetlink.c6
-rw-r--r--net/netlink/policy.c14
-rw-r--r--net/qrtr/mhi.c12
-rw-r--r--net/rds/ib_recv.c1
-rw-r--r--net/rds/message.c3
-rw-r--r--net/rose/rose_loopback.c3
-rw-r--r--net/rxrpc/call_object.c4
-rw-r--r--net/rxrpc/sendmsg.c92
-rw-r--r--net/sched/cls_route.c12
-rw-r--r--net/sched/sch_generic.c10
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c11
-rw-r--r--net/sunrpc/backchannel_rqst.c14
-rw-r--r--net/sunrpc/clnt.c209
-rw-r--r--net/sunrpc/sched.c1
-rw-r--r--net/sunrpc/svc_xprt.c2
-rw-r--r--net/sunrpc/sysfs.c34
-rw-r--r--net/sunrpc/xdr.c168
-rw-r--r--net/sunrpc/xprt.c59
-rw-r--r--net/sunrpc/xprtmultipath.c111
-rw-r--r--net/sunrpc/xprtrdma/transport.c6
-rw-r--r--net/sunrpc/xprtsock.c18
-rw-r--r--net/tls/tls_device.c46
-rw-r--r--net/tls/tls_device_fallback.c3
-rw-r--r--net/tls/tls_strp.c2
-rw-r--r--net/tls/tls_sw.c8
-rw-r--r--net/vmw_vsock/af_vsock.c10
-rw-r--r--net/wireless/sme.c8
-rw-r--r--net/x25/af_x25.c5
-rw-r--r--net/xfrm/espintcp.c2
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_output.c1
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_state.c1
-rw-r--r--samples/trace_events/trace-events-sample.c14
-rw-r--r--samples/trace_events/trace-events-sample.h32
-rw-r--r--scripts/Kconfig.include2
-rw-r--r--scripts/Makefile.build5
-rw-r--r--scripts/Makefile.compiler2
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--scripts/Makefile.gcc-plugins2
-rw-r--r--scripts/Makefile.modinst3
-rw-r--r--scripts/Makefile.package4
-rwxr-xr-xscripts/bloat-o-meter47
-rwxr-xr-xscripts/checkpatch.pl5
-rwxr-xr-xscripts/checkstack.pl4
-rwxr-xr-xscripts/clang-tools/run-clang-tools.py1
-rw-r--r--scripts/coccinelle/api/alloc/zalloc-simple.cocci2
-rw-r--r--scripts/coccinelle/api/atomic_as_refcounter.cocci2
-rw-r--r--scripts/coccinelle/api/check_bq27xxx_data.cocci2
-rw-r--r--scripts/coccinelle/api/d_find_alias.cocci2
-rw-r--r--scripts/coccinelle/api/err_cast.cocci2
-rw-r--r--scripts/coccinelle/api/kstrdup.cocci2
-rw-r--r--scripts/coccinelle/api/memdup.cocci2
-rw-r--r--scripts/coccinelle/api/memdup_user.cocci2
-rw-r--r--scripts/coccinelle/api/pm_runtime.cocci2
-rw-r--r--scripts/coccinelle/api/resource_size.cocci2
-rw-r--r--scripts/coccinelle/free/clk_put.cocci2
-rw-r--r--scripts/coccinelle/free/devm_free.cocci2
-rw-r--r--scripts/coccinelle/free/ifnulldev_put.cocci55
-rw-r--r--scripts/coccinelle/free/iounmap.cocci2
-rw-r--r--scripts/coccinelle/free/kfree.cocci2
-rw-r--r--scripts/coccinelle/free/kfreeaddr.cocci2
-rw-r--r--scripts/coccinelle/free/pci_free_consistent.cocci2
-rw-r--r--scripts/coccinelle/iterators/device_node_continue.cocci2
-rw-r--r--scripts/coccinelle/iterators/for_each_child.cocci2
-rw-r--r--scripts/coccinelle/iterators/itnull.cocci2
-rw-r--r--scripts/coccinelle/iterators/list_entry_update.cocci2
-rw-r--r--scripts/coccinelle/iterators/use_after_iter.cocci2
-rw-r--r--scripts/coccinelle/locks/call_kern.cocci2
-rw-r--r--scripts/coccinelle/locks/double_lock.cocci2
-rw-r--r--scripts/coccinelle/locks/flags.cocci2
-rw-r--r--scripts/coccinelle/locks/mini_lock.cocci2
-rw-r--r--scripts/coccinelle/misc/boolreturn.cocci59
-rw-r--r--scripts/coccinelle/misc/cstptr.cocci2
-rw-r--r--scripts/coccinelle/misc/doubleinit.cocci2
-rw-r--r--scripts/coccinelle/misc/ifcol.cocci2
-rw-r--r--scripts/coccinelle/misc/newline_in_nl_msg.cocci2
-rw-r--r--scripts/coccinelle/misc/noderef.cocci2
-rw-r--r--scripts/coccinelle/misc/orplus.cocci2
-rw-r--r--scripts/coccinelle/misc/returnvar.cocci2
-rw-r--r--scripts/coccinelle/misc/semicolon.cocci2
-rw-r--r--scripts/coccinelle/misc/test_addr.cocci (renamed from scripts/coccinelle/misc/ifaddr.cocci)6
-rw-r--r--scripts/coccinelle/misc/warn.cocci2
-rw-r--r--scripts/coccinelle/null/badzero.cocci2
-rw-r--r--scripts/coccinelle/null/deref_null.cocci2
-rw-r--r--scripts/coccinelle/null/eno.cocci2
-rw-r--r--scripts/coccinelle/null/kmerr.cocci2
-rw-r--r--scripts/coccinelle/tests/doublebitand.cocci2
-rw-r--r--scripts/coccinelle/tests/doubletest.cocci2
-rw-r--r--scripts/coccinelle/tests/odd_ptr_err.cocci2
-rw-r--r--scripts/coccinelle/tests/unsigned_lesser_than_zero.cocci2
-rw-r--r--scripts/dummy-tools/dummy-plugin-dir/include/plugin-version.h0
-rwxr-xr-xscripts/dummy-tools/gcc10
-rwxr-xr-xscripts/faddr2line7
-rwxr-xr-xscripts/gcc-goto.sh22
-rw-r--r--scripts/gdb/linux/dmesg.py9
-rw-r--r--scripts/gdb/linux/utils.py14
-rw-r--r--scripts/gdb/vmlinux-gdb.py2
-rwxr-xr-xscripts/headers_install.sh2
-rwxr-xr-xscripts/kconfig/qconf-cfg.sh1
-rw-r--r--scripts/mod/file2alias.c4
-rw-r--r--scripts/mod/modpost.c285
-rw-r--r--scripts/mod/modpost.h33
-rw-r--r--scripts/module.lds.S2
-rwxr-xr-xscripts/package/mkspec3
-rwxr-xr-xscripts/remove-stale-files4
-rw-r--r--scripts/sign-file.c2
-rwxr-xr-xscripts/tracing/draw_functrace.py2
-rw-r--r--security/apparmor/Kconfig86
-rw-r--r--security/apparmor/apparmorfs.c103
-rw-r--r--security/apparmor/audit.c2
-rw-r--r--security/apparmor/domain.c5
-rw-r--r--security/apparmor/include/apparmor.h1
-rw-r--r--security/apparmor/include/apparmorfs.h14
-rw-r--r--security/apparmor/include/file.h3
-rw-r--r--security/apparmor/include/ipc.h18
-rw-r--r--security/apparmor/include/label.h2
-rw-r--r--security/apparmor/include/lib.h5
-rw-r--r--security/apparmor/include/path.h4
-rw-r--r--security/apparmor/include/policy.h6
-rw-r--r--security/apparmor/include/policy_ns.h1
-rw-r--r--security/apparmor/include/policy_unpack.h2
-rw-r--r--security/apparmor/include/secid.h5
-rw-r--r--security/apparmor/include/task.h18
-rw-r--r--security/apparmor/ipc.c110
-rw-r--r--security/apparmor/label.c29
-rw-r--r--security/apparmor/lib.c27
-rw-r--r--security/apparmor/lsm.c38
-rw-r--r--security/apparmor/mount.c13
-rw-r--r--security/apparmor/net.c3
-rw-r--r--security/apparmor/policy.c35
-rw-r--r--security/apparmor/policy_ns.c53
-rw-r--r--security/apparmor/policy_unpack.c53
-rw-r--r--security/apparmor/policy_unpack_test.c16
-rw-r--r--security/apparmor/procattr.c2
-rw-r--r--security/apparmor/secid.c56
-rw-r--r--security/apparmor/task.c114
-rw-r--r--security/loadpin/loadpin.c6
-rw-r--r--security/security.c4
-rw-r--r--security/selinux/hooks.c24
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--security/smack/smack_lsm.c32
-rw-r--r--sound/ac97/bus.c2
-rw-r--r--sound/aoa/soundbus/sysfs.c22
-rw-r--r--sound/core/Kconfig37
-rw-r--r--sound/core/compress_offload.c9
-rw-r--r--sound/core/control.c290
-rw-r--r--sound/core/control_led.c29
-rw-r--r--sound/core/device.c2
-rw-r--r--sound/core/info.c8
-rw-r--r--sound/core/init.c18
-rw-r--r--sound/core/isadma.c5
-rw-r--r--sound/core/memalloc.c10
-rw-r--r--sound/core/misc.c94
-rw-r--r--sound/core/pcm.c7
-rw-r--r--sound/core/pcm_dmaengine.c30
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/core/pcm_memory.c4
-rw-r--r--sound/core/pcm_native.c8
-rw-r--r--sound/core/rawmidi.c274
-rw-r--r--sound/core/timer.c11
-rw-r--r--sound/core/vmaster.c3
-rw-r--r--sound/hda/ext/hdac_ext_controller.c7
-rw-r--r--sound/hda/hdac_bus.c2
-rw-r--r--sound/hda/hdac_controller.c7
-rw-r--r--sound/hda/hdac_sysfs.c42
-rw-r--r--sound/hda/intel-dsp-config.c5
-rw-r--r--sound/hda/trace.h41
-rw-r--r--sound/isa/wavefront/wavefront_synth.c2
-rw-r--r--sound/pci/asihpi/hpi6000.c2
-rw-r--r--sound/pci/asihpi/hpi6205.c2
-rw-r--r--sound/pci/emu10k1/memory.c2
-rw-r--r--sound/pci/ens1370.c2
-rw-r--r--sound/pci/hda/Kconfig12
-rw-r--r--sound/pci/hda/Makefile2
-rw-r--r--sound/pci/hda/cs35l41_hda.c963
-rw-r--r--sound/pci/hda/cs35l41_hda.h39
-rw-r--r--sound/pci/hda/cs35l41_hda_i2c.c19
-rw-r--r--sound/pci/hda/cs35l41_hda_spi.c16
-rw-r--r--sound/pci/hda/hda_bind.c7
-rw-r--r--sound/pci/hda/hda_codec.c55
-rw-r--r--sound/pci/hda/hda_component.h3
-rw-r--r--sound/pci/hda/hda_cs_dsp_ctl.c240
-rw-r--r--sound/pci/hda/hda_cs_dsp_ctl.h39
-rw-r--r--sound/pci/hda/hda_sysfs.c23
-rw-r--r--sound/pci/hda/patch_cirrus.c1
-rw-r--r--sound/pci/hda/patch_conexant.c11
-rw-r--r--sound/pci/hda/patch_cs8409-tables.c10
-rw-r--r--sound/pci/hda/patch_cs8409.h2
-rw-r--r--sound/pci/hda/patch_realtek.c168
-rw-r--r--sound/pci/ice1712/quartet.c2
-rw-r--r--sound/soc/Makefile4
-rw-r--r--sound/soc/adi/axi-i2s.c1
-rw-r--r--sound/soc/adi/axi-spdif.c1
-rw-r--r--sound/soc/amd/Kconfig22
-rw-r--r--sound/soc/amd/Makefile3
-rw-r--r--sound/soc/amd/acp-config.c30
-rw-r--r--sound/soc/amd/acp-es8336.c318
-rw-r--r--sound/soc/amd/acp-pcm-dma.c50
-rw-r--r--sound/soc/amd/acp.h13
-rw-r--r--sound/soc/amd/acp/Kconfig12
-rw-r--r--sound/soc/amd/acp/Makefile2
-rw-r--r--sound/soc/amd/acp/acp-i2s.c169
-rw-r--r--sound/soc/amd/acp/acp-legacy-mach.c32
-rw-r--r--sound/soc/amd/acp/acp-mach-common.c301
-rw-r--r--sound/soc/amd/acp/acp-mach.h9
-rw-r--r--sound/soc/amd/acp/acp-pci.c35
-rw-r--r--sound/soc/amd/acp/acp-pdm.c10
-rw-r--r--sound/soc/amd/acp/acp-platform.c55
-rw-r--r--sound/soc/amd/acp/acp-rembrandt.c401
-rw-r--r--sound/soc/amd/acp/acp-renoir.c48
-rw-r--r--sound/soc/amd/acp/acp-sof-mach.c30
-rw-r--r--sound/soc/amd/acp/amd.h86
-rw-r--r--sound/soc/amd/acp/chip_offset_byte.h40
-rw-r--r--sound/soc/amd/mach-config.h1
-rw-r--r--sound/soc/amd/raven/acp3x-i2s.c3
-rw-r--r--sound/soc/amd/renoir/acp3x-pdm-dma.c13
-rw-r--r--sound/soc/amd/rpl/Makefile5
-rw-r--r--sound/soc/amd/rpl/rpl-pci-acp6x.c227
-rw-r--r--sound/soc/amd/rpl/rpl_acp6x.h36
-rw-r--r--sound/soc/amd/rpl/rpl_acp6x_chip_offset_byte.h30
-rw-r--r--sound/soc/amd/vangogh/acp5x-i2s.c5
-rw-r--r--sound/soc/amd/vangogh/acp5x-mach.c3
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c20
-rw-r--r--sound/soc/amd/yc/acp6x-pdm-dma.c13
-rw-r--r--sound/soc/amd/yc/pci-acp6x.c2
-rw-r--r--sound/soc/atmel/atmel-classd.c1
-rw-r--r--sound/soc/atmel/atmel-i2s.c7
-rw-r--r--sound/soc/atmel/atmel-pdmic.c1
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c30
-rw-r--r--sound/soc/atmel/mchp-i2s-mcc.c11
-rw-r--r--sound/soc/atmel/mchp-pdmc.c7
-rw-r--r--sound/soc/atmel/mchp-spdifrx.c22
-rw-r--r--sound/soc/atmel/mchp-spdiftx.c25
-rw-r--r--sound/soc/atmel/mikroe-proto.c4
-rw-r--r--sound/soc/au1x/ac97c.c3
-rw-r--r--sound/soc/au1x/i2sc.c5
-rw-r--r--sound/soc/au1x/psc-ac97.c3
-rw-r--r--sound/soc/au1x/psc-i2s.c7
-rw-r--r--sound/soc/bcm/bcm2835-i2s.c23
-rw-r--r--sound/soc/bcm/bcm63xx-i2s-whistler.c1
-rw-r--r--sound/soc/bcm/cygnus-ssp.c11
-rw-r--r--sound/soc/cirrus/ep93xx-ac97.c3
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c11
-rw-r--r--sound/soc/codecs/88pm860x-codec.c1
-rw-r--r--sound/soc/codecs/Kconfig28
-rw-r--r--sound/soc/codecs/Makefile6
-rw-r--r--sound/soc/codecs/ab8500-codec.c3
-rw-r--r--sound/soc/codecs/ab8500-codec.h2
-rw-r--r--sound/soc/codecs/ac97.c1
-rw-r--r--sound/soc/codecs/ad1836.c1
-rw-r--r--sound/soc/codecs/ad193x.c1
-rw-r--r--sound/soc/codecs/ad1980.c1
-rw-r--r--sound/soc/codecs/ad73311.c1
-rw-r--r--sound/soc/codecs/adau1373.c1
-rw-r--r--sound/soc/codecs/adau1701.c1
-rw-r--r--sound/soc/codecs/adau1761.c1
-rw-r--r--sound/soc/codecs/adau1781.c1
-rw-r--r--sound/soc/codecs/adau1977.c1
-rw-r--r--sound/soc/codecs/adau7002.c1
-rw-r--r--sound/soc/codecs/adau7118.c1
-rw-r--r--sound/soc/codecs/adav80x.c1
-rw-r--r--sound/soc/codecs/ads117x.c1
-rw-r--r--sound/soc/codecs/ak4104.c1
-rw-r--r--sound/soc/codecs/ak4118.c1
-rw-r--r--sound/soc/codecs/ak4375.c1
-rw-r--r--sound/soc/codecs/ak4458.c2
-rw-r--r--sound/soc/codecs/ak4535.c1
-rw-r--r--sound/soc/codecs/ak4554.c1
-rw-r--r--sound/soc/codecs/ak4613.c7
-rw-r--r--sound/soc/codecs/ak4641.c1
-rw-r--r--sound/soc/codecs/ak4642.c1
-rw-r--r--sound/soc/codecs/ak4671.c1
-rw-r--r--sound/soc/codecs/ak5386.c1
-rw-r--r--sound/soc/codecs/ak5558.c2
-rw-r--r--sound/soc/codecs/alc5623.c1
-rw-r--r--sound/soc/codecs/alc5632.c1
-rw-r--r--sound/soc/codecs/bd28623.c1
-rw-r--r--sound/soc/codecs/bt-sco.c1
-rw-r--r--sound/soc/codecs/cpcap.c1
-rw-r--r--sound/soc/codecs/cq93vc.c1
-rw-r--r--sound/soc/codecs/cros_ec_codec.c1
-rw-r--r--sound/soc/codecs/cs35l32.c1
-rw-r--r--sound/soc/codecs/cs35l33.c1
-rw-r--r--sound/soc/codecs/cs35l34.c1
-rw-r--r--sound/soc/codecs/cs35l35.c1
-rw-r--r--sound/soc/codecs/cs35l36.c1
-rw-r--r--sound/soc/codecs/cs35l41-lib.c82
-rw-r--r--sound/soc/codecs/cs35l41-spi.c1
-rw-r--r--sound/soc/codecs/cs35l41.c101
-rw-r--r--sound/soc/codecs/cs35l45-i2c.c4
-rw-r--r--sound/soc/codecs/cs35l45.c6
-rw-r--r--sound/soc/codecs/cs35l45.h4
-rw-r--r--sound/soc/codecs/cs4234.c1
-rw-r--r--sound/soc/codecs/cs4265.c1
-rw-r--r--sound/soc/codecs/cs4270.c2
-rw-r--r--sound/soc/codecs/cs4271.c1
-rw-r--r--sound/soc/codecs/cs42l42.c4
-rw-r--r--sound/soc/codecs/cs42l51.c1
-rw-r--r--sound/soc/codecs/cs42l52.c1
-rw-r--r--sound/soc/codecs/cs42l56.c1
-rw-r--r--sound/soc/codecs/cs42l73.c1
-rw-r--r--sound/soc/codecs/cs42xx8.c1
-rw-r--r--sound/soc/codecs/cs43130.c1
-rw-r--r--sound/soc/codecs/cs4341.c1
-rw-r--r--sound/soc/codecs/cs4349.c1
-rw-r--r--sound/soc/codecs/cs47l15.c1
-rw-r--r--sound/soc/codecs/cs47l24.c1
-rw-r--r--sound/soc/codecs/cs47l35.c1
-rw-r--r--sound/soc/codecs/cs47l85.c1
-rw-r--r--sound/soc/codecs/cs47l90.c1
-rw-r--r--sound/soc/codecs/cs47l92.c1
-rw-r--r--sound/soc/codecs/cs53l30.c1
-rw-r--r--sound/soc/codecs/cx20442.c1
-rw-r--r--sound/soc/codecs/cx2072x.c17
-rw-r--r--sound/soc/codecs/da7210.c3
-rw-r--r--sound/soc/codecs/da7213.c1
-rw-r--r--sound/soc/codecs/da7218.c1
-rw-r--r--sound/soc/codecs/da7219.c7
-rw-r--r--sound/soc/codecs/da732x.c7
-rw-r--r--sound/soc/codecs/da9055.c1
-rw-r--r--sound/soc/codecs/dmic.c1
-rw-r--r--sound/soc/codecs/es7134.c1
-rw-r--r--sound/soc/codecs/es7241.c1
-rw-r--r--sound/soc/codecs/es8316.c21
-rw-r--r--sound/soc/codecs/es8328.c1
-rw-r--r--sound/soc/codecs/gtm601.c1
-rw-r--r--sound/soc/codecs/hda-dai.c102
-rw-r--r--sound/soc/codecs/hda.c395
-rw-r--r--sound/soc/codecs/hda.h19
-rw-r--r--sound/soc/codecs/hdac_hdmi.c1
-rw-r--r--sound/soc/codecs/hdmi-codec.c19
-rw-r--r--sound/soc/codecs/ics43432.c1
-rw-r--r--sound/soc/codecs/inno_rk3036.c1
-rw-r--r--sound/soc/codecs/isabelle.c1
-rw-r--r--sound/soc/codecs/jz4740.c2
-rw-r--r--sound/soc/codecs/lm49453.c7
-rw-r--r--sound/soc/codecs/lochnagar-sc.c1
-rw-r--r--sound/soc/codecs/lpass-va-macro.c11
-rw-r--r--sound/soc/codecs/max98088.c33
-rw-r--r--sound/soc/codecs/max98090.c13
-rw-r--r--sound/soc/codecs/max98095.c1
-rw-r--r--sound/soc/codecs/max98357a.c1
-rw-r--r--sound/soc/codecs/max98371.c1
-rw-r--r--sound/soc/codecs/max98373-i2c.c1
-rw-r--r--sound/soc/codecs/max98373.c16
-rw-r--r--sound/soc/codecs/max98390.c3
-rw-r--r--sound/soc/codecs/max98396.c271
-rw-r--r--sound/soc/codecs/max98396.h10
-rw-r--r--sound/soc/codecs/max9850.c1
-rw-r--r--sound/soc/codecs/max98520.c1
-rw-r--r--sound/soc/codecs/max9860.c7
-rw-r--r--sound/soc/codecs/max9867.c1
-rw-r--r--sound/soc/codecs/max98925.c1
-rw-r--r--sound/soc/codecs/max98926.c1
-rw-r--r--sound/soc/codecs/max98927.c1
-rw-r--r--sound/soc/codecs/mc13783.c1
-rw-r--r--sound/soc/codecs/ml26124.c1
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c1
-rw-r--r--sound/soc/codecs/msm8916-wcd-digital.c47
-rw-r--r--sound/soc/codecs/mt6358.c1
-rw-r--r--sound/soc/codecs/mt6359-accdet.c1
-rw-r--r--sound/soc/codecs/mt6359.c1
-rw-r--r--sound/soc/codecs/nau8315.c1
-rw-r--r--sound/soc/codecs/nau8540.c1
-rw-r--r--sound/soc/codecs/nau8810.c1
-rw-r--r--sound/soc/codecs/nau8821.c76
-rw-r--r--sound/soc/codecs/nau8821.h1
-rw-r--r--sound/soc/codecs/nau8822.c15
-rw-r--r--sound/soc/codecs/nau8822.h2
-rw-r--r--sound/soc/codecs/nau8824.c1
-rw-r--r--sound/soc/codecs/nau8825.c3
-rw-r--r--sound/soc/codecs/pcm1681.c1
-rw-r--r--sound/soc/codecs/pcm1789.c1
-rw-r--r--sound/soc/codecs/pcm179x.c1
-rw-r--r--sound/soc/codecs/pcm186x.c2
-rw-r--r--sound/soc/codecs/pcm3008.c1
-rw-r--r--sound/soc/codecs/pcm3168a.c1
-rw-r--r--sound/soc/codecs/pcm5102a.c1
-rw-r--r--sound/soc/codecs/pcm512x.c1
-rw-r--r--sound/soc/codecs/rk3328_codec.c6
-rw-r--r--sound/soc/codecs/rk817_codec.c1
-rw-r--r--sound/soc/codecs/rt1011.c1
-rw-r--r--sound/soc/codecs/rt1015.c1
-rw-r--r--sound/soc/codecs/rt1015p.c1
-rw-r--r--sound/soc/codecs/rt1016.c1
-rw-r--r--sound/soc/codecs/rt1019.c1
-rw-r--r--sound/soc/codecs/rt1305.c1
-rw-r--r--sound/soc/codecs/rt1308-sdw.c12
-rw-r--r--sound/soc/codecs/rt1308.c1
-rw-r--r--sound/soc/codecs/rt1316-sdw.c12
-rw-r--r--sound/soc/codecs/rt274.c11
-rw-r--r--sound/soc/codecs/rt286.c19
-rw-r--r--sound/soc/codecs/rt286.h2
-rw-r--r--sound/soc/codecs/rt298.c61
-rw-r--r--sound/soc/codecs/rt298.h2
-rw-r--r--sound/soc/codecs/rt5514.c1
-rw-r--r--sound/soc/codecs/rt5616.c1
-rw-r--r--sound/soc/codecs/rt5631.c1
-rw-r--r--sound/soc/codecs/rt5640.c18
-rw-r--r--sound/soc/codecs/rt5645.c1
-rw-r--r--sound/soc/codecs/rt5651.c1
-rw-r--r--sound/soc/codecs/rt5659.c1
-rw-r--r--sound/soc/codecs/rt5660.c1
-rw-r--r--sound/soc/codecs/rt5663.c1
-rw-r--r--sound/soc/codecs/rt5665.c1
-rw-r--r--sound/soc/codecs/rt5668.c1
-rw-r--r--sound/soc/codecs/rt5670.c1
-rw-r--r--sound/soc/codecs/rt5677.c1
-rw-r--r--sound/soc/codecs/rt5682.c1
-rw-r--r--sound/soc/codecs/rt5682s.c1
-rw-r--r--sound/soc/codecs/rt700.c5
-rw-r--r--sound/soc/codecs/rt711-sdca.c5
-rw-r--r--sound/soc/codecs/rt711.c5
-rw-r--r--sound/soc/codecs/rt715-sdca.c12
-rw-r--r--sound/soc/codecs/rt715.c12
-rw-r--r--sound/soc/codecs/sgtl5000.c1
-rw-r--r--sound/soc/codecs/si476x.c1
-rw-r--r--sound/soc/codecs/spdif_receiver.c1
-rw-r--r--sound/soc/codecs/spdif_transmitter.c1
-rw-r--r--sound/soc/codecs/ssm2518.c5
-rw-r--r--sound/soc/codecs/ssm2602.c7
-rw-r--r--sound/soc/codecs/ssm4567.c5
-rw-r--r--sound/soc/codecs/sta32x.c5
-rw-r--r--sound/soc/codecs/sta350.c5
-rw-r--r--sound/soc/codecs/sta529.c1
-rw-r--r--sound/soc/codecs/stac9766.c2
-rw-r--r--sound/soc/codecs/sti-sas.c7
-rw-r--r--sound/soc/codecs/tas2552.c13
-rw-r--r--sound/soc/codecs/tas2562.c2
-rw-r--r--sound/soc/codecs/tas2764.c1
-rw-r--r--sound/soc/codecs/tas2770.c105
-rw-r--r--sound/soc/codecs/tas2770.h5
-rw-r--r--sound/soc/codecs/tas2780.c663
-rw-r--r--sound/soc/codecs/tas2780.h101
-rw-r--r--sound/soc/codecs/tas5086.c3
-rw-r--r--sound/soc/codecs/tas571x.c1
-rw-r--r--sound/soc/codecs/tas5720.c6
-rw-r--r--sound/soc/codecs/tas5805m.c1
-rw-r--r--sound/soc/codecs/tas6424.c7
-rw-r--r--sound/soc/codecs/tfa9879.c5
-rw-r--r--sound/soc/codecs/tfa989x.c31
-rw-r--r--sound/soc/codecs/tlv320adc3xxx.c3
-rw-r--r--sound/soc/codecs/tlv320adcx140.c11
-rw-r--r--sound/soc/codecs/tlv320aic23.c8
-rw-r--r--sound/soc/codecs/tlv320aic26.c16
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c20
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c20
-rw-r--r--sound/soc/codecs/tlv320aic3x.c12
-rw-r--r--sound/soc/codecs/tlv320dac33.c13
-rw-r--r--sound/soc/codecs/tscs42xx.c1
-rw-r--r--sound/soc/codecs/twl4030.c102
-rw-r--r--sound/soc/codecs/twl6040.c1
-rw-r--r--sound/soc/codecs/uda1334.c3
-rw-r--r--sound/soc/codecs/uda134x.c1
-rw-r--r--sound/soc/codecs/uda1380.c1
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.c12
-rw-r--r--sound/soc/codecs/wcd9335.c98
-rw-r--r--sound/soc/codecs/wl1273.c1
-rw-r--r--sound/soc/codecs/wm0010.c1
-rw-r--r--sound/soc/codecs/wm1250-ev1.c1
-rw-r--r--sound/soc/codecs/wm2000.c1
-rw-r--r--sound/soc/codecs/wm2200.c1
-rw-r--r--sound/soc/codecs/wm5100.c1
-rw-r--r--sound/soc/codecs/wm5102.c1
-rw-r--r--sound/soc/codecs/wm5110.c1
-rw-r--r--sound/soc/codecs/wm8350.c1
-rw-r--r--sound/soc/codecs/wm8400.c1
-rw-r--r--sound/soc/codecs/wm8510.c1
-rw-r--r--sound/soc/codecs/wm8523.c1
-rw-r--r--sound/soc/codecs/wm8524.c1
-rw-r--r--sound/soc/codecs/wm8580.c1
-rw-r--r--sound/soc/codecs/wm8711.c1
-rw-r--r--sound/soc/codecs/wm8727.c1
-rw-r--r--sound/soc/codecs/wm8728.c1
-rw-r--r--sound/soc/codecs/wm8731.c1
-rw-r--r--sound/soc/codecs/wm8737.c1
-rw-r--r--sound/soc/codecs/wm8741.c1
-rw-r--r--sound/soc/codecs/wm8750.c1
-rw-r--r--sound/soc/codecs/wm8753.c1
-rw-r--r--sound/soc/codecs/wm8770.c1
-rw-r--r--sound/soc/codecs/wm8776.c1
-rw-r--r--sound/soc/codecs/wm8782.c1
-rw-r--r--sound/soc/codecs/wm8804.c1
-rw-r--r--sound/soc/codecs/wm8900.c1
-rw-r--r--sound/soc/codecs/wm8903.c1
-rw-r--r--sound/soc/codecs/wm8904.c1
-rw-r--r--sound/soc/codecs/wm8940.c1
-rw-r--r--sound/soc/codecs/wm8955.c1
-rw-r--r--sound/soc/codecs/wm8960.c1
-rw-r--r--sound/soc/codecs/wm8961.c1
-rw-r--r--sound/soc/codecs/wm8962.c1
-rw-r--r--sound/soc/codecs/wm8971.c1
-rw-r--r--sound/soc/codecs/wm8974.c1
-rw-r--r--sound/soc/codecs/wm8978.c1
-rw-r--r--sound/soc/codecs/wm8983.c1
-rw-r--r--sound/soc/codecs/wm8985.c1
-rw-r--r--sound/soc/codecs/wm8988.c1
-rw-r--r--sound/soc/codecs/wm8990.c1
-rw-r--r--sound/soc/codecs/wm8991.c1
-rw-r--r--sound/soc/codecs/wm8993.c1
-rw-r--r--sound/soc/codecs/wm8994.c1
-rw-r--r--sound/soc/codecs/wm8995.c1
-rw-r--r--sound/soc/codecs/wm8996.c2
-rw-r--r--sound/soc/codecs/wm8997.c1
-rw-r--r--sound/soc/codecs/wm8998.c1
-rw-r--r--sound/soc/codecs/wm9081.c1
-rw-r--r--sound/soc/codecs/wm9090.c1
-rw-r--r--sound/soc/codecs/wm9705.c1
-rw-r--r--sound/soc/codecs/wm9712.c1
-rw-r--r--sound/soc/codecs/wm9713.c1
-rw-r--r--sound/soc/codecs/wm_adsp.c25
-rw-r--r--sound/soc/codecs/wsa881x.c16
-rw-r--r--sound/soc/codecs/wsa883x.c1511
-rw-r--r--sound/soc/codecs/zl38060.c1
-rw-r--r--sound/soc/dwc/dwc-i2s.c15
-rw-r--r--sound/soc/fsl/Kconfig3
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c5
-rw-r--r--sound/soc/fsl/fsl_asrc.c6
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c19
-rw-r--r--sound/soc/fsl/fsl_aud2htx.c3
-rw-r--r--sound/soc/fsl/fsl_audmix.c6
-rw-r--r--sound/soc/fsl/fsl_easrc.c16
-rw-r--r--sound/soc/fsl/fsl_easrc.h2
-rw-r--r--sound/soc/fsl/fsl_esai.c11
-rw-r--r--sound/soc/fsl/fsl_micfil.c55
-rw-r--r--sound/soc/fsl/fsl_micfil.h9
-rw-r--r--sound/soc/fsl/fsl_mqs.c136
-rw-r--r--sound/soc/fsl/fsl_rpmsg.c3
-rw-r--r--sound/soc/fsl/fsl_sai.c375
-rw-r--r--sound/soc/fsl/fsl_sai.h28
-rw-r--r--sound/soc/fsl/fsl_spdif.c51
-rw-r--r--sound/soc/fsl/fsl_ssi.c23
-rw-r--r--sound/soc/fsl/fsl_utils.c69
-rw-r--r--sound/soc/fsl/fsl_utils.h7
-rw-r--r--sound/soc/fsl/fsl_xcvr.c12
-rw-r--r--sound/soc/fsl/imx-audmix.c4
-rw-r--r--sound/soc/fsl/imx-audmux.c24
-rw-r--r--sound/soc/fsl/imx-card.c24
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c3
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c8
-rw-r--r--sound/soc/generic/audio-graph-card.c4
-rw-r--r--sound/soc/generic/audio-graph-card2-custom-sample.dtsi101
-rw-r--r--sound/soc/generic/audio-graph-card2.c78
-rw-r--r--sound/soc/generic/simple-card-utils.c44
-rw-r--r--sound/soc/generic/test-component.c20
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c19
-rw-r--r--sound/soc/img/img-i2s-in.c7
-rw-r--r--sound/soc/img/img-i2s-out.c21
-rw-r--r--sound/soc/img/img-parallel-out.c3
-rw-r--r--sound/soc/img/img-spdif-in.c3
-rw-r--r--sound/soc/img/img-spdif-out.c3
-rw-r--r--sound/soc/img/pistachio-internal-dac.c1
-rw-r--r--sound/soc/intel/Kconfig5
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.c8
-rw-r--r--sound/soc/intel/atom/sst/sst.c2
-rw-r--r--sound/soc/intel/atom/sst/sst_ipc.c8
-rw-r--r--sound/soc/intel/avs/Makefile3
-rw-r--r--sound/soc/intel/avs/boards/Kconfig121
-rw-r--r--sound/soc/intel/avs/boards/Makefile27
-rw-r--r--sound/soc/intel/avs/boards/da7219.c282
-rw-r--r--sound/soc/intel/avs/boards/dmic.c93
-rw-r--r--sound/soc/intel/avs/boards/hdaudio.c294
-rw-r--r--sound/soc/intel/avs/boards/i2s_test.c180
-rw-r--r--sound/soc/intel/avs/boards/max98357a.c154
-rw-r--r--sound/soc/intel/avs/boards/max98373.c239
-rw-r--r--sound/soc/intel/avs/boards/nau8825.c353
-rw-r--r--sound/soc/intel/avs/boards/rt274.c310
-rw-r--r--sound/soc/intel/avs/boards/rt286.c281
-rw-r--r--sound/soc/intel/avs/boards/rt298.c281
-rw-r--r--sound/soc/intel/avs/boards/rt5682.c340
-rw-r--r--sound/soc/intel/avs/boards/ssm4567.c271
-rw-r--r--sound/soc/intel/avs/cldma.c12
-rw-r--r--sound/soc/intel/avs/core.c13
-rw-r--r--sound/soc/intel/avs/dsp.c11
-rw-r--r--sound/soc/intel/avs/ipc.c1
-rw-r--r--sound/soc/intel/avs/loader.c2
-rw-r--r--sound/soc/intel/avs/messages.c18
-rw-r--r--sound/soc/intel/avs/path.c54
-rw-r--r--sound/soc/intel/avs/pcm.c6
-rw-r--r--sound/soc/intel/avs/topology.c27
-rw-r--r--sound/soc/intel/boards/Kconfig5
-rw-r--r--sound/soc/intel/boards/Makefile4
-rw-r--r--sound/soc/intel/boards/bdw-rt5650.c1
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c1
-rw-r--r--sound/soc/intel/boards/bdw_rt286.c280
-rw-r--r--sound/soc/intel/boards/broadwell.c336
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c21
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c2
-rw-r--r--sound/soc/intel/boards/bytcht_cx2072x.c2
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c2
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c2
-rw-r--r--sound/soc/intel/boards/bytcht_nocodec.c2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c4
-rw-r--r--sound/soc/intel/boards/bytcr_wm5102.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c3
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c8
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c2
-rw-r--r--sound/soc/intel/boards/cml_rt1011_rt5682.c23
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c21
-rw-r--r--sound/soc/intel/boards/haswell.c202
-rw-r--r--sound/soc/intel/boards/hda_dsp_common.c4
-rw-r--r--sound/soc/intel/boards/hsw_rt5640.c177
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c21
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c21
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c21
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c21
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c4
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c19
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c19
-rw-r--r--sound/soc/intel/boards/skl_rt286.c2
-rw-r--r--sound/soc/intel/boards/sof_cs42l42.c109
-rw-r--r--sound/soc/intel/boards/sof_da7219_max98373.c23
-rw-r--r--sound/soc/intel/boards/sof_es8336.c164
-rw-r--r--sound/soc/intel/boards/sof_nau8825.c33
-rw-r--r--sound/soc/intel/boards/sof_pcm512x.c2
-rw-r--r--sound/soc/intel/boards/sof_realtek_common.c24
-rw-r--r--sound/soc/intel/boards/sof_realtek_common.h6
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c51
-rw-r--r--sound/soc/intel/boards/sof_sdw.c53
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt711.c3
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt711_sdca.c3
-rw-r--r--sound/soc/intel/catpt/device.c5
-rw-r--r--sound/soc/intel/catpt/pcm.c26
-rw-r--r--sound/soc/intel/catpt/sysfs.c4
-rw-r--r--sound/soc/intel/common/Makefile1
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-adl-match.c61
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c6
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-mtl-match.c89
-rw-r--r--sound/soc/intel/keembay/kmb_platform.c18
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c5
-rw-r--r--sound/soc/intel/skylake/skl-topology.c6
-rw-r--r--sound/soc/jz4740/Kconfig2
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c44
-rw-r--r--sound/soc/mediatek/Kconfig45
-rw-r--r--sound/soc/mediatek/Makefile1
-rw-r--r--sound/soc/mediatek/common/Makefile2
-rw-r--r--sound/soc/mediatek/common/mtk-dsp-sof-common.c196
-rw-r--r--sound/soc/mediatek/common/mtk-dsp-sof-common.h36
-rw-r--r--sound/soc/mediatek/common/mtk-soc-card.h17
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-mt6351.c6
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c10
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c9
-rw-r--r--sound/soc/mediatek/mt8186/Makefile22
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-clk.c652
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-clk.h106
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-common.h195
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-control.c255
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-gpio.c243
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-gpio.h19
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-pcm.c3000
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-audsys-clk.c150
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-audsys-clk.h15
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-audsys-clkid.h45
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-dai-adda.c865
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-dai-hostless.c298
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-dai-hw-gain.c236
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-dai-i2s.c1223
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-dai-pcm.c418
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-dai-src.c695
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-dai-tdm.c645
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-interconnection.h69
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-misc-control.c252
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-mt6366-common.c57
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-mt6366-common.h17
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c1002
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c978
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-reg.h2913
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-afe-clk.c8
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-dai-etdm.c6
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-dai-pcm.c6
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359.c233
-rw-r--r--sound/soc/meson/aiu-acodec-ctrl.c1
-rw-r--r--sound/soc/meson/aiu-codec-ctrl.c1
-rw-r--r--sound/soc/meson/aiu-encoder-i2s.c2
-rw-r--r--sound/soc/meson/axg-frddr.c3
-rw-r--r--sound/soc/meson/axg-pdm.c4
-rw-r--r--sound/soc/meson/axg-spdifin.c1
-rw-r--r--sound/soc/meson/axg-spdifout.c1
-rw-r--r--sound/soc/meson/axg-tdm-interface.c14
-rw-r--r--sound/soc/meson/axg-toddr.c3
-rw-r--r--sound/soc/meson/g12a-toacodec.c2
-rw-r--r--sound/soc/meson/g12a-tohdmitx.c1
-rw-r--r--sound/soc/meson/meson-codec-glue.c2
-rw-r--r--sound/soc/meson/t9015.c1
-rw-r--r--sound/soc/mxs/mxs-saif.c7
-rw-r--r--sound/soc/pxa/magician.c8
-rw-r--r--sound/soc/pxa/mmp-sspa.c15
-rw-r--r--sound/soc/pxa/pxa-ssp.c43
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c27
-rw-r--r--sound/soc/qcom/apq8016_sbc.c2
-rw-r--r--sound/soc/qcom/lpass-apq8016.c1
-rw-r--r--sound/soc/qcom/lpass-cpu.c7
-rw-r--r--sound/soc/qcom/qdsp6/audioreach.c4
-rw-r--r--sound/soc/qcom/qdsp6/q6adm.c8
-rw-r--r--sound/soc/qcom/qdsp6/q6afe.c6
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c23
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c2
-rw-r--r--sound/soc/qcom/sc7180.c2
-rw-r--r--sound/soc/qcom/sc7280.c33
-rw-r--r--sound/soc/qcom/sdm845.c6
-rw-r--r--sound/soc/qcom/sm8250.c4
-rw-r--r--sound/soc/rockchip/rk3288_hdmi_analog.c4
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c182
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c13
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c7
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c1
-rw-r--r--sound/soc/samsung/Kconfig20
-rw-r--r--sound/soc/samsung/aries_wm8994.c7
-rw-r--r--sound/soc/samsung/h1940_uda1380.c2
-rw-r--r--sound/soc/samsung/i2s.c8
-rw-r--r--sound/soc/samsung/neo1973_wm8753.c2
-rw-r--r--sound/soc/samsung/pcm.c7
-rw-r--r--sound/soc/samsung/rx1950_uda1380.c4
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c17
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c7
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c14
-rw-r--r--sound/soc/samsung/snow.c2
-rw-r--r--sound/soc/samsung/spdif.c7
-rw-r--r--sound/soc/sh/fsi.c6
-rw-r--r--sound/soc/sh/hac.c3
-rw-r--r--sound/soc/sh/rcar/core.c30
-rw-r--r--sound/soc/sh/rcar/ssiu.c3
-rw-r--r--sound/soc/sh/rz-ssi.c37
-rw-r--r--sound/soc/sh/siu_pcm.c17
-rw-r--r--sound/soc/sh/ssi.c13
-rw-r--r--sound/soc/soc-card.c6
-rw-r--r--sound/soc/soc-core.c167
-rw-r--r--sound/soc/soc-dai.c3
-rw-r--r--sound/soc/soc-dapm.c10
-rw-r--r--sound/soc/soc-ops.c51
-rw-r--r--sound/soc/soc-pcm.c6
-rw-r--r--sound/soc/soc-topology-test.c37
-rw-r--r--sound/soc/soc-topology.c2
-rw-r--r--sound/soc/soc-utils.c1
-rw-r--r--sound/soc/sof/Kconfig7
-rw-r--r--sound/soc/sof/Makefile16
-rw-r--r--sound/soc/sof/amd/Kconfig1
-rw-r--r--sound/soc/sof/amd/acp-dsp-offset.h2
-rw-r--r--sound/soc/sof/amd/acp.c36
-rw-r--r--sound/soc/sof/amd/acp.h4
-rw-r--r--sound/soc/sof/amd/pci-rn.c4
-rw-r--r--sound/soc/sof/amd/renoir.c4
-rw-r--r--sound/soc/sof/compress.c73
-rw-r--r--sound/soc/sof/core.c7
-rw-r--r--sound/soc/sof/debug.c11
-rw-r--r--sound/soc/sof/imx/Kconfig1
-rw-r--r--sound/soc/sof/intel/Kconfig27
-rw-r--r--sound/soc/sof/intel/Makefile4
-rw-r--r--sound/soc/sof/intel/apl.c1
-rw-r--r--sound/soc/sof/intel/atom.c16
-rw-r--r--sound/soc/sof/intel/bdw.c7
-rw-r--r--sound/soc/sof/intel/byt.c5
-rw-r--r--sound/soc/sof/intel/cnl.c39
-rw-r--r--sound/soc/sof/intel/hda-dai.c226
-rw-r--r--sound/soc/sof/intel/hda-dsp.c26
-rw-r--r--sound/soc/sof/intel/hda-ipc.c39
-rw-r--r--sound/soc/sof/intel/hda-loader.c28
-rw-r--r--sound/soc/sof/intel/hda-probes.c16
-rw-r--r--sound/soc/sof/intel/hda-stream.c21
-rw-r--r--sound/soc/sof/intel/hda.c237
-rw-r--r--sound/soc/sof/intel/hda.h70
-rw-r--r--sound/soc/sof/intel/icl.c1
-rw-r--r--sound/soc/sof/intel/mtl.c794
-rw-r--r--sound/soc/sof/intel/mtl.h76
-rw-r--r--sound/soc/sof/intel/pci-apl.c1
-rw-r--r--sound/soc/sof/intel/pci-cnl.c1
-rw-r--r--sound/soc/sof/intel/pci-icl.c1
-rw-r--r--sound/soc/sof/intel/pci-mtl.c71
-rw-r--r--sound/soc/sof/intel/pci-tgl.c1
-rw-r--r--sound/soc/sof/intel/shim.h2
-rw-r--r--sound/soc/sof/intel/tgl.c34
-rw-r--r--sound/soc/sof/ipc.c24
-rw-r--r--sound/soc/sof/ipc3-dtrace.c55
-rw-r--r--sound/soc/sof/ipc3-loader.c17
-rw-r--r--sound/soc/sof/ipc3-pcm.c11
-rw-r--r--sound/soc/sof/ipc3-topology.c73
-rw-r--r--sound/soc/sof/ipc3.c36
-rw-r--r--sound/soc/sof/ipc4-control.c216
-rw-r--r--sound/soc/sof/ipc4-pcm.c234
-rw-r--r--sound/soc/sof/ipc4-priv.h7
-rw-r--r--sound/soc/sof/ipc4-topology.c1921
-rw-r--r--sound/soc/sof/ipc4-topology.h270
-rw-r--r--sound/soc/sof/ipc4.c45
-rw-r--r--sound/soc/sof/mediatek/Kconfig1
-rw-r--r--sound/soc/sof/mediatek/adsp_helper.h1
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186-clk.c4
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195-clk.c7
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195-loader.c13
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.c50
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.h5
-rw-r--r--sound/soc/sof/ops.h6
-rw-r--r--sound/soc/sof/pcm.c13
-rw-r--r--sound/soc/sof/sof-audio.h12
-rw-r--r--sound/soc/sof/sof-client-ipc-msg-injector.c29
-rw-r--r--sound/soc/sof/sof-client-probes.c13
-rw-r--r--sound/soc/sof/sof-client-probes.h8
-rw-r--r--sound/soc/sof/sof-client.c4
-rw-r--r--sound/soc/sof/sof-priv.h15
-rw-r--r--sound/soc/sof/topology.c121
-rw-r--r--sound/soc/spear/spdif_in.c3
-rw-r--r--sound/soc/spear/spdif_out.c3
-rw-r--r--sound/soc/sti/sti_uniperif.c3
-rw-r--r--sound/soc/stm/stm32_adfsdm.c3
-rw-r--r--sound/soc/stm/stm32_i2s.c9
-rw-r--r--sound/soc/stm/stm32_sai_sub.c11
-rw-r--r--sound/soc/stm/stm32_spdifrx.c1
-rw-r--r--sound/soc/sunxi/sun4i-codec.c85
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c23
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c3
-rw-r--r--sound/soc/sunxi/sun50i-codec-analog.c8
-rw-r--r--sound/soc/sunxi/sun8i-codec.c7
-rw-r--r--sound/soc/tegra/Kconfig9
-rw-r--r--sound/soc/tegra/Makefile2
-rw-r--r--sound/soc/tegra/tegra20_ac97.c5
-rw-r--r--sound/soc/tegra/tegra20_das.c198
-rw-r--r--sound/soc/tegra/tegra20_das.h120
-rw-r--r--sound/soc/tegra/tegra20_i2s.c9
-rw-r--r--sound/soc/tegra/tegra20_spdif.c1
-rw-r--r--sound/soc/tegra/tegra210_adx.c2
-rw-r--r--sound/soc/tegra/tegra210_ahub.c39
-rw-r--r--sound/soc/tegra/tegra210_i2s.c7
-rw-r--r--sound/soc/tegra/tegra210_mbdrc.c1014
-rw-r--r--sound/soc/tegra/tegra210_mbdrc.h215
-rw-r--r--sound/soc/tegra/tegra210_ope.c419
-rw-r--r--sound/soc/tegra/tegra210_ope.h90
-rw-r--r--sound/soc/tegra/tegra210_peq.c434
-rw-r--r--sound/soc/tegra/tegra210_peq.h56
-rw-r--r--sound/soc/tegra/tegra30_i2s.c9
-rw-r--r--sound/soc/ti/davinci-i2s.c35
-rw-r--r--sound/soc/ti/davinci-mcasp.c16
-rw-r--r--sound/soc/ti/davinci-vcif.c3
-rw-r--r--sound/soc/ti/omap-dmic.c3
-rw-r--r--sound/soc/ti/omap-hdmi.c1
-rw-r--r--sound/soc/ti/omap-mcbsp.c15
-rw-r--r--sound/soc/ti/omap-mcpdm.c7
-rw-r--r--sound/soc/uniphier/evea.c1
-rw-r--r--sound/soc/ux500/mop500.c2
-rw-r--r--sound/soc/ux500/mop500_ab8500.c2
-rw-r--r--sound/soc/ux500/mop500_ab8500.h2
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c41
-rw-r--r--sound/soc/ux500/ux500_msp_dai.h2
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.c2
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.h2
-rw-r--r--sound/soc/ux500/ux500_pcm.c2
-rw-r--r--sound/soc/ux500/ux500_pcm.h2
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c18
-rw-r--r--sound/soc/xilinx/xlnx_i2s.c1
-rw-r--r--sound/soc/xilinx/xlnx_spdif.c1
-rw-r--r--sound/soc/xtensa/xtfpga-i2s.c19
-rw-r--r--sound/usb/6fire/pcm.c2
-rw-r--r--sound/usb/bcd2000/bcd2000.c3
-rw-r--r--sound/usb/card.c8
-rw-r--r--sound/usb/endpoint.c2
-rw-r--r--sound/usb/hiface/pcm.c2
-rw-r--r--sound/usb/line6/pod.c8
-rw-r--r--sound/usb/line6/podhd.c4
-rw-r--r--sound/usb/mixer_maps.c34
-rw-r--r--sound/usb/mixer_quirks.c188
-rw-r--r--sound/usb/mixer_scarlett_gen2.c91
-rw-r--r--sound/usb/pcm.c2
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/accounting/getdelays.c4
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h6
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h1
-rw-r--r--tools/arch/x86/include/asm/amd-ibs.h16
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h5
-rw-r--r--tools/arch/x86/include/asm/msr-index.h12
-rw-r--r--tools/arch/x86/include/asm/rmwcc.h21
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h10
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h4
-rw-r--r--tools/bpf/Makefile7
-rw-r--r--tools/bpf/bpf_jit_disasm.c5
-rw-r--r--tools/bpf/bpftool/Makefile10
-rw-r--r--tools/bpf/bpftool/jit_disasm.c42
-rw-r--r--tools/build/Makefile.feature4
-rw-r--r--tools/build/feature/Makefile16
-rw-r--r--tools/build/feature/test-all.c4
-rw-r--r--tools/build/feature/test-disassembler-init-styled.c13
-rw-r--r--tools/build/feature/test-libcrypto.c15
-rw-r--r--tools/cgroup/memcg_shrinker.py71
-rw-r--r--tools/include/asm-generic/bitops/non-atomic.h34
-rw-r--r--tools/include/linux/bitmap.h12
-rw-r--r--tools/include/linux/bitops.h16
-rw-r--r--tools/include/linux/compiler_types.h4
-rw-r--r--tools/include/linux/list.h11
-rw-r--r--tools/include/tools/dis-asm-compat.h55
-rw-r--r--tools/include/uapi/asm-generic/fcntl.h2
-rw-r--r--tools/include/uapi/drm/i915_drm.h387
-rw-r--r--tools/include/uapi/linux/fscrypt.h3
-rw-r--r--tools/include/uapi/linux/kvm.h108
-rw-r--r--tools/include/uapi/linux/perf_event.h7
-rw-r--r--tools/include/uapi/linux/vhost.h9
-rw-r--r--tools/lib/bitmap.c6
-rw-r--r--tools/lib/bpf/skel_internal.h4
-rw-r--r--tools/lib/perf/cpumap.c2
-rw-r--r--tools/lib/perf/evsel.c79
-rw-r--r--tools/lib/perf/include/internal/evsel.h4
-rw-r--r--tools/lib/perf/include/perf/cpumap.h2
-rw-r--r--tools/lib/perf/include/perf/event.h58
-rw-r--r--tools/lib/perf/include/perf/evsel.h4
-rw-r--r--tools/lib/perf/tests/test-evsel.c161
-rw-r--r--tools/objtool/check.c37
-rw-r--r--tools/perf/Build1
-rw-r--r--tools/perf/Documentation/guest-files.txt16
-rw-r--r--tools/perf/Documentation/guestmount.txt11
-rw-r--r--tools/perf/Documentation/intel-hybrid.txt10
-rw-r--r--tools/perf/Documentation/perf-buildid-list.txt4
-rw-r--r--tools/perf/Documentation/perf-c2c.txt31
-rw-r--r--tools/perf/Documentation/perf-dlfilter.txt22
-rw-r--r--tools/perf/Documentation/perf-inject.txt21
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt181
-rw-r--r--tools/perf/Documentation/perf-kvm.txt25
-rw-r--r--tools/perf/Documentation/perf-kwork.txt180
-rw-r--r--tools/perf/Documentation/perf-lock.txt55
-rw-r--r--tools/perf/Documentation/perf-record.txt17
-rw-r--r--tools/perf/Documentation/perf-script.txt16
-rw-r--r--tools/perf/Documentation/perf-stat.txt21
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt20
-rw-r--r--tools/perf/Makefile.config46
-rw-r--r--tools/perf/Makefile.perf22
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c2
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c2
-rw-r--r--tools/perf/arch/arm64/util/pmu.c4
-rw-r--r--tools/perf/arch/x86/tests/Build1
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c2
-rw-r--r--tools/perf/arch/x86/tests/intel-cqm.c2
-rw-r--r--tools/perf/arch/x86/tests/rdpmc.c182
-rw-r--r--tools/perf/arch/x86/util/cpuid.h34
-rw-r--r--tools/perf/arch/x86/util/evlist.c64
-rw-r--r--tools/perf/arch/x86/util/evsel.c72
-rw-r--r--tools/perf/arch/x86/util/header.c27
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c2
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c2
-rw-r--r--tools/perf/arch/x86/util/iostat.c2
-rw-r--r--tools/perf/arch/x86/util/topdown.c51
-rw-r--r--tools/perf/arch/x86/util/topdown.h1
-rw-r--r--tools/perf/arch/x86/util/tsc.c77
-rw-r--r--tools/perf/builtin-annotate.c8
-rw-r--r--tools/perf/builtin-buildid-list.c39
-rw-r--r--tools/perf/builtin-c2c.c454
-rw-r--r--tools/perf/builtin-inject.c1046
-rw-r--r--tools/perf/builtin-kvm.c8
-rw-r--r--tools/perf/builtin-kwork.c1832
-rw-r--r--tools/perf/builtin-list.c2
-rw-r--r--tools/perf/builtin-lock.c958
-rw-r--r--tools/perf/builtin-record.c55
-rw-r--r--tools/perf/builtin-report.c6
-rw-r--r--tools/perf/builtin-sched.c26
-rw-r--r--tools/perf/builtin-script.c27
-rw-r--r--tools/perf/builtin-stat.c81
-rw-r--r--tools/perf/builtin-timechart.c1
-rw-r--r--tools/perf/builtin-trace.c3
-rw-r--r--tools/perf/builtin.h1
-rw-r--r--tools/perf/command-list.txt1
-rw-r--r--tools/perf/include/perf/perf_dlfilter.h8
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/pmu-events/Build21
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/pai.json1101
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/cpu/metrics.json64
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/cache.json178
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/floating-point.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/frontend.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/other.json97
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/pipeline.json507
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json63
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json130
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json152
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json82
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/uncore.json278
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json142
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json3619
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json2867
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-other.json1233
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json492
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json576
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json3433
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json1428
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json2849
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-other.json3252
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json437
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/cache.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json730
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/other.json63
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json4401
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json22493
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json201
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/cache.json956
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json19
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json34
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/memory.json388
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/other.json527
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json203
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json151
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json78
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json85
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json75
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/uncore-other.json7
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json496
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json3422
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json1428
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json2839
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-other.json3170
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json477
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/cache.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json126
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/uncore-other.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/cache.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json697
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/memory.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/other.json51
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/pipeline.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json1523
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json37134
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json225
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json94
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/uncore-other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json100
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json3301
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json1741
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json1775
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-other.json2398
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json677
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json1900
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json824
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json445
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-other.json1538
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json351
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/uncore-other.json4103
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv74
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json262
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/memory.json185
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/other.json46
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json254
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json46
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/cache.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/memory.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/cache.json2950
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json182
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/frontend.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/memory.json672
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/other.json170
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json830
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json92
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/frontend.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/uncore-other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json135
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json23
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/other.json68
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json99
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json572
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-other.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json178
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json142
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/uncore-other.json79
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/uncore.json254
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/other.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json673
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json3567
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json22316
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json201
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/cache.json (renamed from tools/perf/pmu-events/arch/x86/tremontx/cache.json)60
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json (renamed from tools/perf/pmu-events/arch/x86/tremontx/floating-point.json)9
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/frontend.json (renamed from tools/perf/pmu-events/arch/x86/tremontx/frontend.json)20
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/memory.json (renamed from tools/perf/pmu-events/arch/x86/tremontx/memory.json)4
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/other.json (renamed from tools/perf/pmu-events/arch/x86/tremontx/other.json)18
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json (renamed from tools/perf/pmu-events/arch/x86/tremontx/pipeline.json)98
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json619
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-other.json25192
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json235
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json (renamed from tools/perf/pmu-events/arch/x86/tremontx/virtual-memory.json)69
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/cache.json48
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/other.json1
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json378
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json65
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json245
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json2395
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/uncore-power.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/empty-pmu-events.c342
-rw-r--r--tools/perf/pmu-events/jevents.c1342
-rwxr-xr-xtools/perf/pmu-events/jevents.py725
-rw-r--r--tools/perf/pmu-events/jsmn.c352
-rw-r--r--tools/perf/pmu-events/jsmn.h68
-rw-r--r--tools/perf/pmu-events/json.c162
-rw-r--r--tools/perf/pmu-events/json.h39
-rw-r--r--tools/perf/pmu-events/pmu-events.h40
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build6
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py32
-rw-r--r--tools/perf/tests/Build1
-rw-r--r--tools/perf/tests/bpf-script-example.c35
-rw-r--r--tools/perf/tests/builtin-test-list.c207
-rw-r--r--tools/perf/tests/builtin-test-list.h12
-rw-r--r--tools/perf/tests/builtin-test.c152
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/cpumap.c19
-rw-r--r--tools/perf/tests/event-times.c2
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c4
-rw-r--r--tools/perf/tests/expand-cgroup.c25
-rw-r--r--tools/perf/tests/expr.c13
-rw-r--r--tools/perf/tests/hists_cumulate.c2
-rw-r--r--tools/perf/tests/hists_filter.c4
-rw-r--r--tools/perf/tests/hists_link.c4
-rw-r--r--tools/perf/tests/hists_output.c2
-rw-r--r--tools/perf/tests/keep-tracking.c4
-rw-r--r--tools/perf/tests/mmap-basic.c127
-rw-r--r--tools/perf/tests/parse-metric.c77
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c4
-rw-r--r--tools/perf/tests/pmu-events.c474
-rw-r--r--tools/perf/tests/sample-parsing.c14
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py96
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh57
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh7
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh147
-rwxr-xr-xtools/perf/tests/shell/stat.sh19
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh47
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh30
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh114
-rw-r--r--tools/perf/tests/switch-tracking.c24
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h16
-rw-r--r--tools/perf/util/Build5
-rw-r--r--tools/perf/util/amd-sample-raw.c68
-rw-r--r--tools/perf/util/annotate.c7
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.c1
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.h12
-rw-r--r--tools/perf/util/arm-spe.c130
-rw-r--r--tools/perf/util/auxtrace.c30
-rw-r--r--tools/perf/util/auxtrace.h4
-rw-r--r--tools/perf/util/bpf-loader.c2
-rw-r--r--tools/perf/util/bpf_kwork.c346
-rw-r--r--tools/perf/util/bpf_lock_contention.c189
-rw-r--r--tools/perf/util/bpf_off_cpu.c53
-rw-r--r--tools/perf/util/bpf_skel/kwork_trace.bpf.c383
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c175
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c38
-rw-r--r--tools/perf/util/build-id.c106
-rw-r--r--tools/perf/util/build-id.h16
-rw-r--r--tools/perf/util/callchain.c18
-rw-r--r--tools/perf/util/cpumap.c80
-rw-r--r--tools/perf/util/cpumap.h4
-rw-r--r--tools/perf/util/cs-etm.c2
-rw-r--r--tools/perf/util/data-convert-json.c5
-rw-r--r--tools/perf/util/data.c43
-rw-r--r--tools/perf/util/data.h2
-rw-r--r--tools/perf/util/dlfilter.c2
-rw-r--r--tools/perf/util/dso.h6
-rw-r--r--tools/perf/util/dsos.c15
-rw-r--r--tools/perf/util/env.c62
-rw-r--r--tools/perf/util/env.h14
-rw-r--r--tools/perf/util/event.c1
-rw-r--r--tools/perf/util/event.h48
-rw-r--r--tools/perf/util/events_stats.h2
-rw-r--r--tools/perf/util/evlist.c53
-rw-r--r--tools/perf/util/evlist.h9
-rw-r--r--tools/perf/util/evsel.c70
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/expr.c13
-rw-r--r--tools/perf/util/genelf.c6
-rw-r--r--tools/perf/util/header.c192
-rw-r--r--tools/perf/util/header.h2
-rw-r--r--tools/perf/util/intel-pt.c183
-rw-r--r--tools/perf/util/jitdump.c9
-rw-r--r--tools/perf/util/kwork.h257
-rw-r--r--tools/perf/util/llvm-utils.c2
-rw-r--r--tools/perf/util/lock-contention.h147
-rw-r--r--tools/perf/util/machine.c57
-rw-r--r--tools/perf/util/machine.h7
-rw-r--r--tools/perf/util/mem-events.c46
-rw-r--r--tools/perf/util/mem-events.h3
-rw-r--r--tools/perf/util/metricgroup.c275
-rw-r--r--tools/perf/util/metricgroup.h5
-rw-r--r--tools/perf/util/ordered-events.h6
-rw-r--r--tools/perf/util/parse-events.c736
-rw-r--r--tools/perf/util/parse-events.h35
-rw-r--r--tools/perf/util/perf_api_probe.c2
-rw-r--r--tools/perf/util/pmu.c154
-rw-r--r--tools/perf/util/pmu.h12
-rw-r--r--tools/perf/util/print-events.c572
-rw-r--r--tools/perf/util/print-events.h22
-rw-r--r--tools/perf/util/probe-event.c8
-rw-r--r--tools/perf/util/record.c4
-rw-r--r--tools/perf/util/record.h1
-rw-r--r--tools/perf/util/s390-sample-raw.c50
-rw-r--r--tools/perf/util/scripting-engines/Build4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c36
-rw-r--r--tools/perf/util/session.c180
-rw-r--r--tools/perf/util/session.h4
-rw-r--r--tools/perf/util/setup.py12
-rw-r--r--tools/perf/util/stat-display.c383
-rw-r--r--tools/perf/util/stat-shadow.c24
-rw-r--r--tools/perf/util/stat.c1
-rw-r--r--tools/perf/util/stat.h2
-rw-r--r--tools/perf/util/symbol-elf.c27
-rw-r--r--tools/perf/util/symbol.c6
-rw-r--r--tools/perf/util/synthetic-events.c204
-rw-r--r--tools/perf/util/synthetic-events.h4
-rw-r--r--tools/perf/util/thread.c1
-rw-r--r--tools/perf/util/thread.h1
-rw-r--r--tools/perf/util/tool.h3
-rw-r--r--tools/perf/util/topdown.c7
-rw-r--r--tools/perf/util/topdown.h3
-rw-r--r--tools/perf/util/trace-event-info.c96
-rw-r--r--tools/perf/util/tracepoint.c63
-rw-r--r--tools/perf/util/tracepoint.h25
-rw-r--r--tools/perf/util/tsc.h1
-rw-r--r--tools/perf/util/util.c70
-rw-r--r--tools/perf/util/util.h15
-rw-r--r--tools/power/x86/intel-speed-select/hfi-events.c2
-rw-r--r--tools/power/x86/intel-speed-select/isst-daemon.c2
-rw-r--r--tools/testing/crypto/chacha20-s390/test-cipher.c9
-rw-r--r--tools/testing/cxl/Kbuild1
-rw-r--r--tools/testing/cxl/test/cxl.c131
-rw-r--r--tools/testing/cxl/test/mem.c53
-rw-r--r--tools/testing/cxl/test/mock.c8
-rw-r--r--tools/testing/memblock/Makefile3
-rw-r--r--tools/testing/memblock/README17
-rw-r--r--tools/testing/memblock/TODO14
-rw-r--r--tools/testing/memblock/internal.h11
-rw-r--r--tools/testing/memblock/linux/kmemleak.h2
-rw-r--r--tools/testing/memblock/linux/memory_hotplug.h8
-rw-r--r--tools/testing/memblock/main.c2
-rw-r--r--tools/testing/memblock/scripts/Makefile.include10
-rw-r--r--tools/testing/memblock/tests/alloc_api.c225
-rw-r--r--tools/testing/memblock/tests/alloc_helpers_api.c129
-rw-r--r--tools/testing/memblock/tests/alloc_nid_api.c351
-rw-r--r--tools/testing/memblock/tests/basic_api.c337
-rw-r--r--tools/testing/memblock/tests/common.c118
-rw-r--r--tools/testing/memblock/tests/common.h54
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c116
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c95
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lru_bug.c21
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c9
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c22
-rw-r--r--tools/testing/selftests/bpf/progs/lru_bug.c49
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile6
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh81
-rw-r--r--tools/testing/selftests/drivers/net/bonding/config1
-rw-r--r--tools/testing/selftests/drivers/net/bonding/settings1
-rw-r--r--tools/testing/selftests/filesystems/fat/.gitignore (renamed from Documentation/vm/.gitignore)3
-rw-r--r--tools/testing/selftests/filesystems/fat/Makefile7
-rw-r--r--tools/testing/selftests/filesystems/fat/config2
-rw-r--r--tools/testing/selftests/filesystems/fat/rename_exchange.c37
-rw-r--r--tools/testing/selftests/filesystems/fat/run_fat_tests.sh82
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc2
-rw-r--r--tools/testing/selftests/kvm/Makefile7
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c58
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c17
-rw-r--r--tools/testing/selftests/landlock/Makefile7
-rw-r--r--tools/testing/selftests/lib.mk1
-rw-r--r--tools/testing/selftests/net/.gitignore3
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/forwarding/custom_multipath_hash.sh24
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh24
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh24
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c26
-rw-r--r--tools/testing/selftests/net/tap.c434
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh377
-rwxr-xr-xtools/testing/selftests/netfilter/nft_trans_stress.sh81
-rw-r--r--tools/testing/selftests/powerpc/include/basic_asm.h63
-rw-r--r--tools/testing/selftests/powerpc/include/reg.h73
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h10
-rw-r--r--tools/testing/selftests/powerpc/lib/reg.S107
-rw-r--r--tools/testing/selftests/powerpc/math/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/math/mma.S3
-rw-r--r--tools/testing/selftests/powerpc/mce/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/attr_test.c30
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile11
-rw-r--r--tools/testing/selftests/powerpc/pmu/branch_loops.S28
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c1
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore20
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c132
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c109
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c116
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c130
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c60
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c54
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c70
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c56
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c56
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c96
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c74
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c88
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c67
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c77
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c44
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore18
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c105
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c57
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c135
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h9
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c77
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c65
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c69
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c66
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c69
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c74
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile40
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S52
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c125
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h14
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c18
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c20
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace.h79
-rw-r--r--tools/testing/selftests/powerpc/security/.gitignore1
-rw-r--r--tools/testing/selftests/proc/proc-pid-vm.c75
-rw-r--r--tools/testing/selftests/sgx/defines.h23
-rw-r--r--tools/testing/selftests/sgx/load.c41
-rw-r--r--tools/testing/selftests/sgx/main.c1435
-rw-r--r--tools/testing/selftests/sgx/main.h1
-rw-r--r--tools/testing/selftests/sgx/sigstruct.c6
-rw-r--r--tools/testing/selftests/sgx/test_encl.c68
-rw-r--r--tools/testing/selftests/sgx/test_encl_bootstrap.S6
-rw-r--r--tools/testing/selftests/vm/Makefile1
-rw-r--r--tools/testing/selftests/vm/hmm-tests.c325
-rw-r--r--tools/testing/selftests/vm/hugepage-mremap.c2
-rw-r--r--tools/testing/selftests/vm/hugetlb-madvise.c5
-rw-r--r--tools/testing/selftests/vm/mrelease_test.c16
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests.sh15
-rw-r--r--tools/testing/selftests/vm/soft-dirty.c67
-rwxr-xr-xtools/testing/selftests/vm/test_hmm.sh24
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c4
-rw-r--r--tools/testing/selftests/vm/va_128TBswitch.c8
-rwxr-xr-xtools/testing/selftests/vm/va_128TBswitch.sh54
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/riscv32.config1
-rw-r--r--tools/thermal/tmon/sysfs.c24
-rw-r--r--tools/tracing/rtla/Makefile72
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c2
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c2
-rw-r--r--tools/tracing/rtla/src/trace.c9
-rw-r--r--tools/tracing/rtla/src/utils.c7
-rw-r--r--tools/verification/dot2/Makefile26
-rw-r--r--tools/verification/dot2/automata.py174
-rw-r--r--tools/verification/dot2/dot2c26
-rw-r--r--tools/verification/dot2/dot2c.py254
-rw-r--r--tools/verification/dot2/dot2k47
-rw-r--r--tools/verification/dot2/dot2k.py177
-rw-r--r--tools/verification/dot2/dot2k_templates/main_global.c91
-rw-r--r--tools/verification/dot2/dot2k_templates/main_per_cpu.c91
-rw-r--r--tools/verification/dot2/dot2k_templates/main_per_task.c91
-rw-r--r--tools/verification/models/wip.dot16
-rw-r--r--tools/verification/models/wwnr.dot16
-rw-r--r--tools/virtio/linux/kernel.h2
-rw-r--r--tools/virtio/linux/vringh.h1
-rw-r--r--tools/virtio/virtio_test.c4
-rw-r--r--tools/vm/page_owner_sort.c30
-rw-r--r--tools/vm/slabinfo.c32
-rw-r--r--virt/kvm/kvm_main.c154
-rw-r--r--virt/kvm/pfncache.c17
5520 files changed, 369065 insertions, 83329 deletions
diff --git a/.clang-format b/.clang-format
index 9b87ea1fc16e..1247d54f9e49 100644
--- a/.clang-format
+++ b/.clang-format
@@ -516,6 +516,7 @@ ForEachMacros:
- 'of_property_for_each_string'
- 'of_property_for_each_u32'
- 'pci_bus_for_each_resource'
+ - 'pci_doe_for_each_off'
- 'pcl_for_each_chunk'
- 'pcl_for_each_segment'
- 'pcm_for_each_format'
diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
index a64d21913745..c298bab3d320 100644
--- a/.get_maintainer.ignore
+++ b/.get_maintainer.ignore
@@ -1,2 +1,4 @@
+Alan Cox <alan@lxorguk.ukuu.org.uk>
+Alan Cox <root@hraefn.swansea.linux.org.uk>
Christoph Hellwig <hch@lst.de>
Marc Gonzalez <marc.w.gonzalez@free.fr>
diff --git a/.mailmap b/.mailmap
index 71577c396252..8ded2e7c2906 100644
--- a/.mailmap
+++ b/.mailmap
@@ -78,6 +78,7 @@ Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
+Brendan Higgins <brendan.higgins@linux.dev> <brendanhiggins@google.com>
Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com>
Brian Silverman <bsilver16384@gmail.com> <brian.silverman@bluerivertech.com>
@@ -97,8 +98,7 @@ Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
Christian Marangi <ansuelsmth@gmail.com>
Christophe Ricard <christophe.ricard@gmail.com>
Christoph Hellwig <hch@lst.de>
-Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
-Colin Ian King <colin.king@intel.com> <colin.i.king@gmail.com>
+Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
Corey Minyard <minyard@acm.org>
Damian Hobson-Garcia <dhobsong@igel.co.jp>
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
@@ -149,6 +149,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@linux.vnet.ibm.com>
+Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@canonical.com>
Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
@@ -230,7 +232,7 @@ Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
-Kirill Tkhai <kirill.tkhai@openvz.org> <ktkhai@virtuozzo.com>
+Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
@@ -252,6 +254,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
+Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
index b312242d4f40..af0cbf143c48 100644
--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -1,7 +1,7 @@
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/asic_health
Date: June 2018
KernelVersion: 4.19
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: This file shows ASIC health status. The possible values are:
0 - health failed, 2 - health OK, 3 - ASIC in booting state.
@@ -11,7 +11,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld1_version
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld2_version
Date: June 2018
KernelVersion: 4.19
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files show with which CPLD versions have been burned
on carrier and switch boards.
@@ -20,7 +20,7 @@ Description: These files show with which CPLD versions have been burned
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/fan_dir
Date: December 2018
KernelVersion: 5.0
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: This file shows the system fans direction:
forward direction - relevant bit is set 0;
reversed direction - relevant bit is set 1.
@@ -30,7 +30,7 @@ Description: This file shows the system fans direction:
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version
Date: November 2018
KernelVersion: 5.0
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files show with which CPLD versions have been burned
on LED or Gearbox board.
@@ -39,7 +39,7 @@ Description: These files show with which CPLD versions have been burned
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable
Date: November 2018
KernelVersion: 5.0
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files enable and disable the access to the JTAG domain.
By default access to the JTAG domain is disabled.
@@ -48,7 +48,7 @@ Description: These files enable and disable the access to the JTAG domain.
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/select_iio
Date: June 2018
KernelVersion: 4.19
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: This file allows iio devices selection.
Attribute select_iio can be written with 0 or with 1. It
@@ -62,7 +62,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/psu1_on
/sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/pwr_down
Date: June 2018
KernelVersion: 4.19
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files allow asserting system power cycling, switching
power supply units on and off and system's main power domain
shutdown.
@@ -89,7 +89,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_short_pb
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sw_reset
Date: June 2018
KernelVersion: 4.19
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files show the system reset cause, as following: power
auxiliary outage or power refresh, ASIC thermal shutdown, halt,
hotswap, watchdog, firmware reset, long press power button,
@@ -106,7 +106,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_system
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_voltmon_upgrade_fail
Date: November 2018
KernelVersion: 5.0
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files show the system reset cause, as following: ComEx
power fail, reset from ComEx, system platform reset, reset
due to voltage monitor devices upgrade failure,
@@ -119,7 +119,7 @@ Description: These files show the system reset cause, as following: ComEx
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld4_version
Date: November 2018
KernelVersion: 5.0
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files show with which CPLD versions have been burned
on LED board.
@@ -133,7 +133,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sff_wd
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_swb_wd
Date: June 2019
KernelVersion: 5.3
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files show the system reset cause, as following:
COMEX thermal shutdown; wathchdog power off or reset was derived
by one of the next components: COMEX, switch board or by Small Form
@@ -148,7 +148,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/config1
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/config2
Date: January 2020
KernelVersion: 5.6
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files show system static topology identification
like system's static I2C topology, number and type of FPGA
devices within the system and so on.
@@ -161,7 +161,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_soc
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sw_pwr_off
Date: January 2020
KernelVersion: 5.6
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files show the system reset causes, as following: reset
due to AC power failure, reset invoked from software by
assertion reset signal through CPLD. reset caused by signal
@@ -173,7 +173,7 @@ Description: These files show the system reset causes, as following: reset
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/pcie_asic_reset_dis
Date: January 2020
KernelVersion: 5.6
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: This file allows to retain ASIC up during PCIe root complex
reset, when attribute is set 1.
@@ -182,7 +182,7 @@ Description: This file allows to retain ASIC up during PCIe root complex
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/vpd_wp
Date: January 2020
KernelVersion: 5.6
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: This file allows to overwrite system VPD hardware write
protection when attribute is set 1.
@@ -191,7 +191,7 @@ Description: This file allows to overwrite system VPD hardware write
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/voltreg_update_status
Date: January 2020
KernelVersion: 5.6
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: This file exposes the configuration update status of burnable
voltage regulator devices. The status values are as following:
0 - OK; 1 - CRC failure; 2 = I2C failure; 3 - in progress.
@@ -201,7 +201,7 @@ Description: This file exposes the configuration update status of burnable
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ufm_version
Date: January 2020
KernelVersion: 5.6
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: This file exposes the firmware version of burnable voltage
regulator devices.
@@ -217,7 +217,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version_min
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld4_version_min
Date: July 2020
KernelVersion: 5.9
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: These files show with which CPLD part numbers and minor
versions have been burned CPLD devices equipped on a
system.
@@ -471,7 +471,7 @@ Description: These files provide the maximum powered required for line card
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/phy_reset
Date: May 2022
KernelVersion: 5.19
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: This file allows to reset PHY 88E1548 when attribute is set 0
due to some abnormal PHY behavior.
Expected behavior:
@@ -483,7 +483,7 @@ Description: This file allows to reset PHY 88E1548 when attribute is set 0
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/mac_reset
Date: May 2022
KernelVersion: 5.19
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: This file allows to reset ASIC MT52132 when attribute is set 0
due to some abnormal ASIC behavior.
Expected behavior:
@@ -495,7 +495,7 @@ Description: This file allows to reset ASIC MT52132 when attribute is set 0
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/qsfp_pwr_good
Date: May 2022
KernelVersion: 5.19
-Contact: Vadim Pasternak <vadimpmellanox.com>
+Contact: Vadim Pasternak <vadimp@nvidia.com>
Description: This file shows QSFP ports power status. The value is set to 0
when one of any QSFP ports is plugged. The value is set to 1 when
there are no any QSFP ports are plugged.
@@ -503,3 +503,42 @@ Description: This file shows QSFP ports power status. The value is set to 0
0 - Power good, 1 - Not power good.
The files are read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/asic2_health
+Date: July 2022
+KernelVersion: 5.20
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: This file shows 2-nd ASIC health status. The possible values are:
+ 0 - health failed, 2 - health OK, 3 - ASIC in booting state.
+
+ The file is read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/asic_reset
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/asic2_reset
+Date: July 2022
+KernelVersion: 5.20
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: These files allow to each of ASICs by writing 1.
+
+ The files are write only.
+
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/comm_chnl_ready
+Date: July 2022
+KernelVersion: 5.20
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: This file is used to indicate remote end (for example BMC) that system
+ host CPU is ready for sending telemetry data to remote end.
+ For indication the file should be written 1.
+
+ The file is write only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/config3
+Date: January 2020
+KernelVersion: 5.6
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: The file indicates COME module hardware configuration.
+ The value is pushed by hardware through GPIO pins.
+ The purpose is to expose some minor BOM changes for the same system SKU.
+
+ The file is read only.
diff --git a/Documentation/ABI/testing/procfs-smaps_rollup b/Documentation/ABI/testing/procfs-smaps_rollup
index a4e31c465194..b446a7154a1b 100644
--- a/Documentation/ABI/testing/procfs-smaps_rollup
+++ b/Documentation/ABI/testing/procfs-smaps_rollup
@@ -22,6 +22,7 @@ Description:
MMUPageSize: 4 kB
Rss: 884 kB
Pss: 385 kB
+ Pss_Dirty: 68 kB
Pss_Anon: 301 kB
Pss_File: 80 kB
Pss_Shmem: 4 kB
diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl
index 7c2b846521f3..8494ef27e8d2 100644
--- a/Documentation/ABI/testing/sysfs-bus-cxl
+++ b/Documentation/ABI/testing/sysfs-bus-cxl
@@ -7,6 +7,7 @@ Description:
all descendant memdevs for unbind. Writing '1' to this attribute
flushes that work.
+
What: /sys/bus/cxl/devices/memX/firmware_version
Date: December, 2020
KernelVersion: v5.12
@@ -16,6 +17,7 @@ Description:
Memory Device Output Payload in the CXL-2.0
specification.
+
What: /sys/bus/cxl/devices/memX/ram/size
Date: December, 2020
KernelVersion: v5.12
@@ -25,6 +27,7 @@ Description:
identically named field in the Identify Memory Device Output
Payload in the CXL-2.0 specification.
+
What: /sys/bus/cxl/devices/memX/pmem/size
Date: December, 2020
KernelVersion: v5.12
@@ -34,6 +37,7 @@ Description:
identically named field in the Identify Memory Device Output
Payload in the CXL-2.0 specification.
+
What: /sys/bus/cxl/devices/memX/serial
Date: January, 2022
KernelVersion: v5.18
@@ -43,6 +47,7 @@ Description:
capability. Mandatory for CXL devices, see CXL 2.0 8.1.12.2
Memory Device PCIe Capabilities and Extended Capabilities.
+
What: /sys/bus/cxl/devices/memX/numa_node
Date: January, 2022
KernelVersion: v5.18
@@ -52,114 +57,334 @@ Description:
host PCI device for this memory device, emit the CPU node
affinity for this device.
+
What: /sys/bus/cxl/devices/*/devtype
Date: June, 2021
KernelVersion: v5.14
Contact: linux-cxl@vger.kernel.org
Description:
- CXL device objects export the devtype attribute which mirrors
- the same value communicated in the DEVTYPE environment variable
- for uevents for devices on the "cxl" bus.
+ (RO) CXL device objects export the devtype attribute which
+ mirrors the same value communicated in the DEVTYPE environment
+ variable for uevents for devices on the "cxl" bus.
+
What: /sys/bus/cxl/devices/*/modalias
Date: December, 2021
KernelVersion: v5.18
Contact: linux-cxl@vger.kernel.org
Description:
- CXL device objects export the modalias attribute which mirrors
- the same value communicated in the MODALIAS environment variable
- for uevents for devices on the "cxl" bus.
+ (RO) CXL device objects export the modalias attribute which
+ mirrors the same value communicated in the MODALIAS environment
+ variable for uevents for devices on the "cxl" bus.
+
What: /sys/bus/cxl/devices/portX/uport
Date: June, 2021
KernelVersion: v5.14
Contact: linux-cxl@vger.kernel.org
Description:
- CXL port objects are enumerated from either a platform firmware
- device (ACPI0017 and ACPI0016) or PCIe switch upstream port with
- CXL component registers. The 'uport' symlink connects the CXL
- portX object to the device that published the CXL port
+ (RO) CXL port objects are enumerated from either a platform
+ firmware device (ACPI0017 and ACPI0016) or PCIe switch upstream
+ port with CXL component registers. The 'uport' symlink connects
+ the CXL portX object to the device that published the CXL port
capability.
+
What: /sys/bus/cxl/devices/portX/dportY
Date: June, 2021
KernelVersion: v5.14
Contact: linux-cxl@vger.kernel.org
Description:
- CXL port objects are enumerated from either a platform firmware
- device (ACPI0017 and ACPI0016) or PCIe switch upstream port with
- CXL component registers. The 'dportY' symlink identifies one or
- more downstream ports that the upstream port may target in its
- decode of CXL memory resources. The 'Y' integer reflects the
- hardware port unique-id used in the hardware decoder target
- list.
+ (RO) CXL port objects are enumerated from either a platform
+ firmware device (ACPI0017 and ACPI0016) or PCIe switch upstream
+ port with CXL component registers. The 'dportY' symlink
+ identifies one or more downstream ports that the upstream port
+ may target in its decode of CXL memory resources. The 'Y'
+ integer reflects the hardware port unique-id used in the
+ hardware decoder target list.
+
What: /sys/bus/cxl/devices/decoderX.Y
Date: June, 2021
KernelVersion: v5.14
Contact: linux-cxl@vger.kernel.org
Description:
- CXL decoder objects are enumerated from either a platform
+ (RO) CXL decoder objects are enumerated from either a platform
firmware description, or a CXL HDM decoder register set in a
PCIe device (see CXL 2.0 section 8.2.5.12 CXL HDM Decoder
Capability Structure). The 'X' in decoderX.Y represents the
cxl_port container of this decoder, and 'Y' represents the
instance id of a given decoder resource.
+
What: /sys/bus/cxl/devices/decoderX.Y/{start,size}
Date: June, 2021
KernelVersion: v5.14
Contact: linux-cxl@vger.kernel.org
Description:
- The 'start' and 'size' attributes together convey the physical
- address base and number of bytes mapped in the decoder's decode
- window. For decoders of devtype "cxl_decoder_root" the address
- range is fixed. For decoders of devtype "cxl_decoder_switch" the
- address is bounded by the decode range of the cxl_port ancestor
- of the decoder's cxl_port, and dynamically updates based on the
- active memory regions in that address space.
+ (RO) The 'start' and 'size' attributes together convey the
+ physical address base and number of bytes mapped in the
+ decoder's decode window. For decoders of devtype
+ "cxl_decoder_root" the address range is fixed. For decoders of
+ devtype "cxl_decoder_switch" the address is bounded by the
+ decode range of the cxl_port ancestor of the decoder's cxl_port,
+ and dynamically updates based on the active memory regions in
+ that address space.
+
What: /sys/bus/cxl/devices/decoderX.Y/locked
Date: June, 2021
KernelVersion: v5.14
Contact: linux-cxl@vger.kernel.org
Description:
- CXL HDM decoders have the capability to lock the configuration
- until the next device reset. For decoders of devtype
- "cxl_decoder_root" there is no standard facility to unlock them.
- For decoders of devtype "cxl_decoder_switch" a secondary bus
- reset, of the PCIe bridge that provides the bus for this
- decoders uport, unlocks / resets the decoder.
+ (RO) CXL HDM decoders have the capability to lock the
+ configuration until the next device reset. For decoders of
+ devtype "cxl_decoder_root" there is no standard facility to
+ unlock them. For decoders of devtype "cxl_decoder_switch" a
+ secondary bus reset, of the PCIe bridge that provides the bus
+ for this decoders uport, unlocks / resets the decoder.
+
What: /sys/bus/cxl/devices/decoderX.Y/target_list
Date: June, 2021
KernelVersion: v5.14
Contact: linux-cxl@vger.kernel.org
Description:
- Display a comma separated list of the current decoder target
- configuration. The list is ordered by the current configured
- interleave order of the decoder's dport instances. Each entry in
- the list is a dport id.
+ (RO) Display a comma separated list of the current decoder
+ target configuration. The list is ordered by the current
+ configured interleave order of the decoder's dport instances.
+ Each entry in the list is a dport id.
+
What: /sys/bus/cxl/devices/decoderX.Y/cap_{pmem,ram,type2,type3}
Date: June, 2021
KernelVersion: v5.14
Contact: linux-cxl@vger.kernel.org
Description:
- When a CXL decoder is of devtype "cxl_decoder_root", it
+ (RO) When a CXL decoder is of devtype "cxl_decoder_root", it
represents a fixed memory window identified by platform
firmware. A fixed window may only support a subset of memory
types. The 'cap_*' attributes indicate whether persistent
memory, volatile memory, accelerator memory, and / or expander
memory may be mapped behind this decoder's memory window.
+
What: /sys/bus/cxl/devices/decoderX.Y/target_type
Date: June, 2021
KernelVersion: v5.14
Contact: linux-cxl@vger.kernel.org
Description:
- When a CXL decoder is of devtype "cxl_decoder_switch", it can
- optionally decode either accelerator memory (type-2) or expander
- memory (type-3). The 'target_type' attribute indicates the
- current setting which may dynamically change based on what
+ (RO) When a CXL decoder is of devtype "cxl_decoder_switch", it
+ can optionally decode either accelerator memory (type-2) or
+ expander memory (type-3). The 'target_type' attribute indicates
+ the current setting which may dynamically change based on what
memory regions are activated in this decode hierarchy.
+
+
+What: /sys/bus/cxl/devices/endpointX/CDAT
+Date: July, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) If this sysfs entry is not present no DOE mailbox was
+ found to support CDAT data. If it is present and the length of
+ the data is 0 reading the CDAT data failed. Otherwise the CDAT
+ data is reported.
+
+
+What: /sys/bus/cxl/devices/decoderX.Y/mode
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RW) When a CXL decoder is of devtype "cxl_decoder_endpoint" it
+ translates from a host physical address range, to a device local
+ address range. Device-local address ranges are further split
+ into a 'ram' (volatile memory) range and 'pmem' (persistent
+ memory) range. The 'mode' attribute emits one of 'ram', 'pmem',
+ 'mixed', or 'none'. The 'mixed' indication is for error cases
+ when a decoder straddles the volatile/persistent partition
+ boundary, and 'none' indicates the decoder is not actively
+ decoding, or no DPA allocation policy has been set.
+
+ 'mode' can be written, when the decoder is in the 'disabled'
+ state, with either 'ram' or 'pmem' to set the boundaries for the
+ next allocation.
+
+
+What: /sys/bus/cxl/devices/decoderX.Y/dpa_resource
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) When a CXL decoder is of devtype "cxl_decoder_endpoint",
+ and its 'dpa_size' attribute is non-zero, this attribute
+ indicates the device physical address (DPA) base address of the
+ allocation.
+
+
+What: /sys/bus/cxl/devices/decoderX.Y/dpa_size
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RW) When a CXL decoder is of devtype "cxl_decoder_endpoint" it
+ translates from a host physical address range, to a device local
+ address range. The range, base address plus length in bytes, of
+ DPA allocated to this decoder is conveyed in these 2 attributes.
+ Allocations can be mutated as long as the decoder is in the
+ disabled state. A write to 'dpa_size' releases the previous DPA
+ allocation and then attempts to allocate from the free capacity
+ in the device partition referred to by 'decoderX.Y/mode'.
+ Allocate and free requests can only be performed on the highest
+ instance number disabled decoder with non-zero size. I.e.
+ allocations are enforced to occur in increasing 'decoderX.Y/id'
+ order and frees are enforced to occur in decreasing
+ 'decoderX.Y/id' order.
+
+
+What: /sys/bus/cxl/devices/decoderX.Y/interleave_ways
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) The number of targets across which this decoder's host
+ physical address (HPA) memory range is interleaved. The device
+ maps every Nth block of HPA (of size ==
+ 'interleave_granularity') to consecutive DPA addresses. The
+ decoder's position in the interleave is determined by the
+ device's (endpoint or switch) switch ancestry. For root
+ decoders their interleave is specified by platform firmware and
+ they only specify a downstream target order for host bridges.
+
+
+What: /sys/bus/cxl/devices/decoderX.Y/interleave_granularity
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) The number of consecutive bytes of host physical address
+ space this decoder claims at address N before the decode rotates
+ to the next target in the interleave at address N +
+ interleave_granularity (assuming N is aligned to
+ interleave_granularity).
+
+
+What: /sys/bus/cxl/devices/decoderX.Y/create_pmem_region
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RW) Write a string in the form 'regionZ' to start the process
+ of defining a new persistent memory region (interleave-set)
+ within the decode range bounded by root decoder 'decoderX.Y'.
+ The value written must match the current value returned from
+ reading this attribute. An atomic compare exchange operation is
+ done on write to assign the requested id to a region and
+ allocate the region-id for the next creation attempt. EBUSY is
+ returned if the region name written does not match the current
+ cached value.
+
+
+What: /sys/bus/cxl/devices/decoderX.Y/delete_region
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (WO) Write a string in the form 'regionZ' to delete that region,
+ provided it is currently idle / not bound to a driver.
+
+
+What: /sys/bus/cxl/devices/regionZ/uuid
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RW) Write a unique identifier for the region. This field must
+ be set for persistent regions and it must not conflict with the
+ UUID of another region.
+
+
+What: /sys/bus/cxl/devices/regionZ/interleave_granularity
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RW) Set the number of consecutive bytes each device in the
+ interleave set will claim. The possible interleave granularity
+ values are determined by the CXL spec and the participating
+ devices.
+
+
+What: /sys/bus/cxl/devices/regionZ/interleave_ways
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RW) Configures the number of devices participating in the
+ region is set by writing this value. Each device will provide
+ 1/interleave_ways of storage for the region.
+
+
+What: /sys/bus/cxl/devices/regionZ/size
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RW) System physical address space to be consumed by the region.
+ When written trigger the driver to allocate space out of the
+ parent root decoder's address space. When read the size of the
+ address space is reported and should match the span of the
+ region's resource attribute. Size shall be set after the
+ interleave configuration parameters. Once set it cannot be
+ changed, only freed by writing 0. The kernel makes no guarantees
+ that data is maintained over an address space freeing event, and
+ there is no guarantee that a free followed by an allocate
+ results in the same address being allocated.
+
+
+What: /sys/bus/cxl/devices/regionZ/resource
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) A region is a contiguous partition of a CXL root decoder
+ address space. Region capacity is allocated by writing to the
+ size attribute, the resulting physical address space determined
+ by the driver is reflected here. It is therefore not useful to
+ read this before writing a value to the size attribute.
+
+
+What: /sys/bus/cxl/devices/regionZ/target[0..N]
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RW) Write an endpoint decoder object name to 'targetX' where X
+ is the intended position of the endpoint device in the region
+ interleave and N is the 'interleave_ways' setting for the
+ region. ENXIO is returned if the write results in an impossible
+ to map decode scenario, like the endpoint is unreachable at that
+ position relative to the root decoder interleave. EBUSY is
+ returned if the position in the region is already occupied, or
+ if the region is not in a state to accept interleave
+ configuration changes. EINVAL is returned if the object name is
+ not an endpoint decoder. Once all positions have been
+ successfully written a final validation for decode conflicts is
+ performed before activating the region.
+
+
+What: /sys/bus/cxl/devices/regionZ/commit
+Date: May, 2022
+KernelVersion: v5.20
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RW) Write a boolean 'true' string value to this attribute to
+ trigger the region to transition from the software programmed
+ state to the actively decoding in hardware state. The commit
+ operation in addition to validating that the region is in proper
+ configured state, validates that the decoders are being
+ committed in spec mandated order (last committed decoder id +
+ 1), and checks that the hardware accepts the commit request.
+ Reading this value indicates whether the region is committed or
+ not.
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps
new file mode 100644
index 000000000000..8757dcf41c08
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps
@@ -0,0 +1,18 @@
+What: /sys/bus/event_source/devices/<dev>/caps
+Date: May 2022
+KernelVersion: 5.19
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:
+ Attribute group to describe the capabilities exposed
+ for a particular pmu. Each attribute of this group can
+ expose information specific to a PMU, say pmu_name, so that
+ userspace can understand some of the feature which the
+ platform specific PMU supports.
+
+ One of the example available capability in supported platform
+ like Intel is pmu_name, which exposes underlying CPU name known
+ to the PMU driver.
+
+ Example output in powerpc:
+ grep . /sys/bus/event_source/devices/cpu/caps/*
+ /sys/bus/event_source/devices/cpu/caps/pmu_name:POWER9
diff --git a/Documentation/ABI/testing/sysfs-bus-surface_aggregator-tabletsw b/Documentation/ABI/testing/sysfs-bus-surface_aggregator-tabletsw
new file mode 100644
index 000000000000..74cd9d754e60
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-surface_aggregator-tabletsw
@@ -0,0 +1,57 @@
+What: /sys/bus/surface_aggregator/devices/01:0e:01:00:01/state
+Date: July 2022
+KernelVersion: 5.20
+Contact: Maximilian Luz <luzmaximilian@gmail.com>
+Description:
+ This attribute returns a string with the current type-cover
+ or device posture, as indicated by the embedded controller.
+ Currently returned posture states are:
+
+ - "disconnected": The type-cover has been disconnected.
+
+ - "closed": The type-cover has been folded closed and lies on
+ top of the display.
+
+ - "laptop": The type-cover is open and in laptop-mode, i.e.,
+ ready for normal use.
+
+ - "folded-canvas": The type-cover has been folded back
+ part-ways, but does not lie flush with the back side of the
+ device. In general, this means that the kick-stand is used
+ and extended atop of the cover.
+
+ - "folded-back": The type cover has been fully folded back and
+ lies flush with the back side of the device.
+
+ - "<unknown>": The current state is unknown to the driver, for
+ example due to newer as-of-yet unsupported hardware.
+
+ New states may be introduced with new hardware. Users therefore
+ must not rely on this list of states being exhaustive and
+ gracefully handle unknown states.
+
+What: /sys/bus/surface_aggregator/devices/01:26:01:00:01/state
+Date: July 2022
+KernelVersion: 5.20
+Contact: Maximilian Luz <luzmaximilian@gmail.com>
+Description:
+ This attribute returns a string with the current device posture, as indicated by the embedded controller. Currently
+ returned posture states are:
+
+ - "closed": The lid of the device is closed.
+
+ - "laptop": The lid of the device is opened and the device
+ operates as a normal laptop.
+
+ - "slate": The screen covers the keyboard or has been flipped
+ back and the device operates mainly based on touch input.
+
+ - "tablet": The device operates as tablet and exclusively
+ relies on touch input (or external peripherals).
+
+ - "<unknown>": The current state is unknown to the driver, for
+ example due to newer as-of-yet unsupported hardware.
+
+ New states may be introduced with new hardware. Users therefore
+ must not rely on this list of states being exhaustive and
+ gracefully handle unknown states.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 5bf61881f012..760c889b6cd1 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -523,6 +523,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
+ /sys/devices/system/cpu/vulnerabilities/retbleed
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback
index 7faf719af165..fac0f429a869 100644
--- a/Documentation/ABI/testing/sysfs-driver-xen-blkback
+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback
@@ -42,5 +42,5 @@ KernelVersion: 5.10
Contact: Maximilian Heyne <mheyne@amazon.de>
Description:
Whether to enable the persistent grants feature or not. Note
- that this option only takes effect on newly created backends.
+ that this option only takes effect on newly connected backends.
The default is Y (enable).
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkfront b/Documentation/ABI/testing/sysfs-driver-xen-blkfront
index 7f646c58832e..4d36c5a10546 100644
--- a/Documentation/ABI/testing/sysfs-driver-xen-blkfront
+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkfront
@@ -15,5 +15,5 @@ KernelVersion: 5.10
Contact: Maximilian Heyne <mheyne@amazon.de>
Description:
Whether to enable the persistent grants feature or not. Note
- that this option only takes effect on newly created frontends.
+ that this option only takes effect on newly connected frontends.
The default is Y (enable).
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 9b583dd0298b..083ac2d63eef 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -580,3 +580,33 @@ Date: January 2022
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description: Controls max # of node block writes to be used for roll forward
recovery. This can limit the roll forward recovery time.
+
+What: /sys/fs/f2fs/<disk>/unusable_blocks_per_sec
+Date: June 2022
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description: Shows the number of unusable blocks in a section which was defined by
+ the zone capacity reported by underlying zoned device.
+
+What: /sys/fs/f2fs/<disk>/current_atomic_write
+Date: July 2022
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Show the total current atomic write block count, which is not committed yet.
+ This is a read-only entry.
+
+What: /sys/fs/f2fs/<disk>/peak_atomic_write
+Date: July 2022
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Show the peak value of total current atomic write block count after boot.
+ If you write "0" here, you can initialize to "0".
+
+What: /sys/fs/f2fs/<disk>/committed_atomic_block
+Date: July 2022
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Show the accumulated total committed atomic write block count after boot.
+ If you write "0" here, you can initialize to "0".
+
+What: /sys/fs/f2fs/<disk>/revoked_atomic_block
+Date: July 2022
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Show the accumulated total revoked atomic write block count after boot.
+ If you write "0" here, you can initialize to "0".
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-ksm b/Documentation/ABI/testing/sysfs-kernel-mm-ksm
index 1c9bed5595f5..d244674a9480 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-ksm
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-ksm
@@ -41,7 +41,7 @@ Description: Kernel Samepage Merging daemon sysfs interface
sleep_millisecs: how many milliseconds ksm should sleep between
scans.
- See Documentation/vm/ksm.rst for more information.
+ See Documentation/mm/ksm.rst for more information.
What: /sys/kernel/mm/ksm/merge_across_nodes
Date: January 2013
diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
index c440f4946e12..cd5fb8fa3ddf 100644
--- a/Documentation/ABI/testing/sysfs-kernel-slab
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -37,7 +37,7 @@ Description:
The alloc_calls file is read-only and lists the kernel code
locations from which allocations for this cache were performed.
The alloc_calls file only contains information if debugging is
- enabled for that cache (see Documentation/vm/slub.rst).
+ enabled for that cache (see Documentation/mm/slub.rst).
What: /sys/kernel/slab/<cache>/alloc_fastpath
Date: February 2008
@@ -219,7 +219,7 @@ Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Description:
The free_calls file is read-only and lists the locations of
object frees if slab debugging is enabled (see
- Documentation/vm/slub.rst).
+ Documentation/mm/slub.rst).
What: /sys/kernel/slab/<cache>/free_fastpath
Date: February 2008
diff --git a/Documentation/PCI/endpoint/index.rst b/Documentation/PCI/endpoint/index.rst
index 38ea1f604b6d..4d2333e7ae06 100644
--- a/Documentation/PCI/endpoint/index.rst
+++ b/Documentation/PCI/endpoint/index.rst
@@ -13,6 +13,8 @@ PCI Endpoint Framework
pci-test-howto
pci-ntb-function
pci-ntb-howto
+ pci-vntb-function
+ pci-vntb-howto
function/binding/pci-test
function/binding/pci-ntb
diff --git a/Documentation/PCI/endpoint/pci-vntb-function.rst b/Documentation/PCI/endpoint/pci-vntb-function.rst
new file mode 100644
index 000000000000..0c51f53ab972
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-vntb-function.rst
@@ -0,0 +1,129 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+PCI vNTB Function
+=================
+
+:Author: Frank Li <Frank.Li@nxp.com>
+
+The difference between PCI NTB function and PCI vNTB function is
+
+PCI NTB function need at two endpoint instances and connect HOST1
+and HOST2.
+
+PCI vNTB function only use one host and one endpoint(EP), use NTB
+connect EP and PCI host
+
+.. code-block:: text
+
+
+ +------------+ +---------------------------------------+
+ | | | |
+ +------------+ | +--------------+
+ | NTB | | | NTB |
+ | NetDev | | | NetDev |
+ +------------+ | +--------------+
+ | NTB | | | NTB |
+ | Transfer | | | Transfer |
+ +------------+ | +--------------+
+ | | | | |
+ | PCI NTB | | | |
+ | EPF | | | |
+ | Driver | | | PCI Virtual |
+ | | +---------------+ | NTB Driver |
+ | | | PCI EP NTB |<------>| |
+ | | | FN Driver | | |
+ +------------+ +---------------+ +--------------+
+ | | | | | |
+ | PCI BUS | <-----> | PCI EP BUS | | Virtual PCI |
+ | | PCI | | | BUS |
+ +------------+ +---------------+--------+--------------+
+ PCI RC PCI EP
+
+Constructs used for Implementing vNTB
+=====================================
+
+ 1) Config Region
+ 2) Self Scratchpad Registers
+ 3) Peer Scratchpad Registers
+ 4) Doorbell (DB) Registers
+ 5) Memory Window (MW)
+
+
+Config Region:
+--------------
+
+It is same as PCI NTB Function driver
+
+Scratchpad Registers:
+---------------------
+
+It is appended after Config region.
+
+.. code-block:: text
+
+
+ +--------------------------------------------------+ Base
+ | |
+ | |
+ | |
+ | Common Config Register |
+ | |
+ | |
+ | |
+ +-----------------------+--------------------------+ Base + span_offset
+ | | |
+ | Peer Span Space | Span Space |
+ | | |
+ | | |
+ +-----------------------+--------------------------+ Base + span_offset
+ | | | + span_count * 4
+ | | |
+ | Span Space | Peer Span Space |
+ | | |
+ +-----------------------+--------------------------+
+ Virtual PCI Pcie Endpoint
+ NTB Driver NTB Driver
+
+
+Doorbell Registers:
+-------------------
+
+ Doorbell Registers are used by the hosts to interrupt each other.
+
+Memory Window:
+--------------
+
+ Actual transfer of data between the two hosts will happen using the
+ memory window.
+
+Modeling Constructs:
+====================
+
+32-bit BARs.
+
+====== ===============
+BAR NO CONSTRUCTS USED
+====== ===============
+BAR0 Config Region
+BAR1 Doorbell
+BAR2 Memory Window 1
+BAR3 Memory Window 2
+BAR4 Memory Window 3
+BAR5 Memory Window 4
+====== ===============
+
+64-bit BARs.
+
+====== ===============================
+BAR NO CONSTRUCTS USED
+====== ===============================
+BAR0 Config Region + Scratchpad
+BAR1
+BAR2 Doorbell
+BAR3
+BAR4 Memory Window 1
+BAR5
+====== ===============================
+
+
diff --git a/Documentation/PCI/endpoint/pci-vntb-howto.rst b/Documentation/PCI/endpoint/pci-vntb-howto.rst
new file mode 100644
index 000000000000..4ab8e4a26d4b
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-vntb-howto.rst
@@ -0,0 +1,167 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================================================
+PCI Non-Transparent Bridge (NTB) Endpoint Function (EPF) User Guide
+===================================================================
+
+:Author: Frank Li <Frank.Li@nxp.com>
+
+This document is a guide to help users use pci-epf-vntb function driver
+and ntb_hw_epf host driver for NTB functionality. The list of steps to
+be followed in the host side and EP side is given below. For the hardware
+configuration and internals of NTB using configurable endpoints see
+Documentation/PCI/endpoint/pci-vntb-function.rst
+
+Endpoint Device
+===============
+
+Endpoint Controller Devices
+---------------------------
+
+To find the list of endpoint controller devices in the system::
+
+ # ls /sys/class/pci_epc/
+ 5f010000.pcie_ep
+
+If PCI_ENDPOINT_CONFIGFS is enabled::
+
+ # ls /sys/kernel/config/pci_ep/controllers
+ 5f010000.pcie_ep
+
+Endpoint Function Drivers
+-------------------------
+
+To find the list of endpoint function drivers in the system::
+
+ # ls /sys/bus/pci-epf/drivers
+ pci_epf_ntb pci_epf_test pci_epf_vntb
+
+If PCI_ENDPOINT_CONFIGFS is enabled::
+
+ # ls /sys/kernel/config/pci_ep/functions
+ pci_epf_ntb pci_epf_test pci_epf_vntb
+
+
+Creating pci-epf-vntb Device
+----------------------------
+
+PCI endpoint function device can be created using the configfs. To create
+pci-epf-vntb device, the following commands can be used::
+
+ # mount -t configfs none /sys/kernel/config
+ # cd /sys/kernel/config/pci_ep/
+ # mkdir functions/pci_epf_vntb/func1
+
+The "mkdir func1" above creates the pci-epf-ntb function device that will
+be probed by pci_epf_vntb driver.
+
+The PCI endpoint framework populates the directory with the following
+configurable fields::
+
+ # ls functions/pci_epf_ntb/func1
+ baseclass_code deviceid msi_interrupts pci-epf-ntb.0
+ progif_code secondary subsys_id vendorid
+ cache_line_size interrupt_pin msix_interrupts primary
+ revid subclass_code subsys_vendor_id
+
+The PCI endpoint function driver populates these entries with default values
+when the device is bound to the driver. The pci-epf-vntb driver populates
+vendorid with 0xffff and interrupt_pin with 0x0001::
+
+ # cat functions/pci_epf_vntb/func1/vendorid
+ 0xffff
+ # cat functions/pci_epf_vntb/func1/interrupt_pin
+ 0x0001
+
+
+Configuring pci-epf-vntb Device
+-------------------------------
+
+The user can configure the pci-epf-vntb device using its configfs entry. In order
+to change the vendorid and the deviceid, the following
+commands can be used::
+
+ # echo 0x1957 > functions/pci_epf_vntb/func1/vendorid
+ # echo 0x0809 > functions/pci_epf_vntb/func1/deviceid
+
+In order to configure NTB specific attributes, a new sub-directory to func1
+should be created::
+
+ # mkdir functions/pci_epf_vntb/func1/pci_epf_vntb.0/
+
+The NTB function driver will populate this directory with various attributes
+that can be configured by the user::
+
+ # ls functions/pci_epf_vntb/func1/pci_epf_vntb.0/
+ db_count mw1 mw2 mw3 mw4 num_mws
+ spad_count
+
+A sample configuration for NTB function is given below::
+
+ # echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
+ # echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
+ # echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
+ # echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
+
+A sample configuration for virtual NTB driver for virutal PCI bus::
+
+ # echo 0x1957 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
+ # echo 0x080A > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
+ # echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
+
+Binding pci-epf-ntb Device to EP Controller
+--------------------------------------------
+
+NTB function device should be attached to PCI endpoint controllers
+connected to the host.
+
+ # ln -s controllers/5f010000.pcie_ep functions/pci-epf-ntb/func1/primary
+
+Once the above step is completed, the PCI endpoint controllers are ready to
+establish a link with the host.
+
+
+Start the Link
+--------------
+
+In order for the endpoint device to establish a link with the host, the _start_
+field should be populated with '1'. For NTB, both the PCI endpoint controllers
+should establish link with the host (imx8 don't need this steps)::
+
+ # echo 1 > controllers/5f010000.pcie_ep/start
+
+RootComplex Device
+==================
+
+lspci Output at Host side
+-------------------------
+
+Note that the devices listed here correspond to the values populated in
+"Creating pci-epf-ntb Device" section above::
+
+ # lspci
+ 00:00.0 PCI bridge: Freescale Semiconductor Inc Device 0000 (rev 01)
+ 01:00.0 RAM memory: Freescale Semiconductor Inc Device 0809
+
+Endpoint Device / Virtual PCI bus
+=================================
+
+lspci Output at EP Side / Virtual PCI bus
+-----------------------------------------
+
+Note that the devices listed here correspond to the values populated in
+"Creating pci-epf-ntb Device" section above::
+
+ # lspci
+ 10:00.0 Unassigned class [ffff]: Dawicontrol Computersysteme GmbH Device 1234 (rev ff)
+
+Using ntb_hw_epf Device
+-----------------------
+
+The host side software follows the standard NTB software architecture in Linux.
+All the existing client side NTB utilities like NTB Transport Client and NTB
+Netdev, NTB Ping Pong Test Client and NTB Tool Test Client can be used with NTB
+function device.
+
+For more information on NTB see
+:doc:`Non-Transparent Bridge <../../driver-api/ntb>`
diff --git a/Documentation/PCI/pci-iov-howto.rst b/Documentation/PCI/pci-iov-howto.rst
index b9fd003206f1..27d35933cea2 100644
--- a/Documentation/PCI/pci-iov-howto.rst
+++ b/Documentation/PCI/pci-iov-howto.rst
@@ -125,14 +125,14 @@ Following piece of code illustrates the usage of the SR-IOV API.
...
}
- static int dev_suspend(struct pci_dev *dev, pm_message_t state)
+ static int dev_suspend(struct device *dev)
{
...
return 0;
}
- static int dev_resume(struct pci_dev *dev)
+ static int dev_resume(struct device *dev)
{
...
@@ -165,8 +165,7 @@ Following piece of code illustrates the usage of the SR-IOV API.
.id_table = dev_id_table,
.probe = dev_probe,
.remove = dev_remove,
- .suspend = dev_suspend,
- .resume = dev_resume,
+ .driver.pm = &dev_pm_ops,
.shutdown = dev_shutdown,
.sriov_configure = dev_sriov_configure,
};
diff --git a/Documentation/PCI/sysfs-pci.rst b/Documentation/PCI/sysfs-pci.rst
index 742fbd21dc1f..f495185aa88a 100644
--- a/Documentation/PCI/sysfs-pci.rst
+++ b/Documentation/PCI/sysfs-pci.rst
@@ -125,7 +125,7 @@ implementation of that functionality. To support the historical interface of
mmap() through files in /proc/bus/pci, platforms may also set HAVE_PCI_MMAP.
Alternatively, platforms which set HAVE_PCI_MMAP may provide their own
-implementation of pci_mmap_page_range() instead of defining
+implementation of pci_mmap_resource_range() instead of defining
ARCH_GENERIC_PCI_MMAP_RESOURCE.
Platforms which support write-combining maps of PCI resources must define
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index caa3c09a5c3f..9eb6b9042f75 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -1,9 +1,9 @@
.. _readme:
-Linux kernel release 5.x <http://kernel.org/>
+Linux kernel release 6.x <http://kernel.org/>
=============================================
-These are the release notes for Linux version 5. Read them carefully,
+These are the release notes for Linux version 6. Read them carefully,
as they tell you what this is all about, explain how to install the
kernel, and what to do if something goes wrong.
@@ -63,7 +63,7 @@ Installing the kernel source
directory where you have permissions (e.g. your home directory) and
unpack it::
- xz -cd linux-5.x.tar.xz | tar xvf -
+ xz -cd linux-6.x.tar.xz | tar xvf -
Replace "X" with the version number of the latest kernel.
@@ -72,12 +72,12 @@ Installing the kernel source
files. They should match the library, and not get messed up by
whatever the kernel-du-jour happens to be.
- - You can also upgrade between 5.x releases by patching. Patches are
+ - You can also upgrade between 6.x releases by patching. Patches are
distributed in the xz format. To install by patching, get all the
newer patch files, enter the top level directory of the kernel source
- (linux-5.x) and execute::
+ (linux-6.x) and execute::
- xz -cd ../patch-5.x.xz | patch -p1
+ xz -cd ../patch-6.x.xz | patch -p1
Replace "x" for all versions bigger than the version "x" of your current
source tree, **in_order**, and you should be ok. You may want to remove
@@ -85,13 +85,13 @@ Installing the kernel source
that there are no failed patches (some-file-name# or some-file-name.rej).
If there are, either you or I have made a mistake.
- Unlike patches for the 5.x kernels, patches for the 5.x.y kernels
+ Unlike patches for the 6.x kernels, patches for the 6.x.y kernels
(also known as the -stable kernels) are not incremental but instead apply
- directly to the base 5.x kernel. For example, if your base kernel is 5.0
- and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1
- and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and
- want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is,
- patch -R) **before** applying the 5.0.3 patch. You can read more on this in
+ directly to the base 6.x kernel. For example, if your base kernel is 6.0
+ and you want to apply the 6.0.3 patch, you must not first apply the 6.0.1
+ and 6.0.2 patches. Similarly, if you are running kernel version 6.0.2 and
+ want to jump to 6.0.3, you must first reverse the 6.0.2 patch (that is,
+ patch -R) **before** applying the 6.0.3 patch. You can read more on this in
:ref:`Documentation/process/applying-patches.rst <applying_patches>`.
Alternatively, the script patch-kernel can be used to automate this
@@ -114,7 +114,7 @@ Installing the kernel source
Software requirements
---------------------
- Compiling and running the 5.x kernels requires up-to-date
+ Compiling and running the 6.x kernels requires up-to-date
versions of various software packages. Consult
:ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers
required and how to get updates for these packages. Beware that using
@@ -132,12 +132,12 @@ Build directory for the kernel
place for the output files (including .config).
Example::
- kernel source code: /usr/src/linux-5.x
+ kernel source code: /usr/src/linux-6.x
build directory: /home/name/build/kernel
To configure and build the kernel, use::
- cd /usr/src/linux-5.x
+ cd /usr/src/linux-6.x
make O=/home/name/build/kernel menuconfig
make O=/home/name/build/kernel
sudo make O=/home/name/build/kernel modules_install install
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index bf842b80bde9..be4a77baf784 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1237,6 +1237,13 @@ PAGE_SIZE multiple when read back.
the target cgroup. If less bytes are reclaimed than the
specified amount, -EAGAIN is returned.
+ Please note that the proactive reclaim (triggered by this
+ interface) is not meant to indicate memory pressure on the
+ memory cgroup. Therefore socket memory balancing triggered by
+ the memory reclaim normally is not exercised in this case.
+ This means that the networking layer will not adapt based on
+ reclaim induced by memory.reclaim.
+
memory.peak
A read-only single value file which exists on non-root
cgroups.
@@ -1441,6 +1448,24 @@ PAGE_SIZE multiple when read back.
workingset_nodereclaim
Number of times a shadow node has been reclaimed
+ pgscan (npn)
+ Amount of scanned pages (in an inactive LRU list)
+
+ pgsteal (npn)
+ Amount of reclaimed pages
+
+ pgscan_kswapd (npn)
+ Amount of scanned pages by kswapd (in an inactive LRU list)
+
+ pgscan_direct (npn)
+ Amount of scanned pages directly (in an inactive LRU list)
+
+ pgsteal_kswapd (npn)
+ Amount of reclaimed pages by kswapd
+
+ pgsteal_direct (npn)
+ Amount of reclaimed pages directly
+
pgfault (npn)
Total number of page faults incurred
@@ -1450,12 +1475,6 @@ PAGE_SIZE multiple when read back.
pgrefill (npn)
Amount of scanned pages (in an active LRU list)
- pgscan (npn)
- Amount of scanned pages (in an inactive LRU list)
-
- pgsteal (npn)
- Amount of reclaimed pages
-
pgactivate (npn)
Amount of pages moved to the active LRU list
diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
index 9393c50b5afc..c98fd11907cc 100644
--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
+++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
@@ -230,6 +230,20 @@ The possible values in this file are:
* - 'Mitigation: Clear CPU buffers'
- The processor is vulnerable and the CPU buffer clearing mitigation is
enabled.
+ * - 'Unknown: No mitigations'
+ - The processor vulnerability status is unknown because it is
+ out of Servicing period. Mitigation is not attempted.
+
+Definitions:
+------------
+
+Servicing period: The process of providing functional and security updates to
+Intel processors or platforms, utilizing the Intel Platform Update (IPU)
+process or other similar mechanisms.
+
+End of Servicing Updates (ESU): ESU is the date at which Intel will no
+longer provide Servicing, such as through IPU or other similar update
+processes. ESU dates will typically be aligned to end of quarter.
If the processor is vulnerable then the following information is appended to
the above information:
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index 9e9556826450..2ce2a38cdd55 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -422,6 +422,14 @@ The possible values in this file are:
'RSB filling' Protection of RSB on context switch enabled
============= ===========================================
+ - EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status:
+
+ =========================== =======================================================
+ 'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled
+ 'PBRSB-eIBRS: Vulnerable' CPU is vulnerable
+ 'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
+ =========================== =======================================================
+
Full mitigation might require a microcode update from the CPU
vendor. When the necessary microcode is not available, the kernel will
report vulnerability.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index ef9f80b1ddde..426fa892d311 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1158,8 +1158,12 @@
nopku [X86] Disable Memory Protection Keys CPU feature found
in some Intel CPUs.
- <module>.async_probe [KNL]
- Enable asynchronous probe on this module.
+ <module>.async_probe[=<bool>] [KNL]
+ If no <bool> value is specified or if the value
+ specified is not a valid <bool>, enable asynchronous
+ probe on this module. Otherwise, enable/disable
+ asynchronous probe on this module as indicated by the
+ <bool> value. See also: module.async_probe
early_ioremap_debug [KNL]
Enable debug messages in early_ioremap support. This
@@ -1673,6 +1677,19 @@
hlt [BUGS=ARM,SH]
+ hostname= [KNL] Set the hostname (aka UTS nodename).
+ Format: <string>
+ This allows setting the system's hostname during early
+ startup. This sets the name returned by gethostname.
+ Using this parameter to set the hostname makes it
+ possible to ensure the hostname is correctly set before
+ any userspace processes run, avoiding the possibility
+ that a process may call gethostname before the hostname
+ has been explicitly set, resulting in the calling
+ process getting an incorrect result. The string must
+ not exceed the maximum allowed hostname length (usually
+ 64 characters) and will be truncated otherwise.
+
hpet= [X86-32,HPET] option to control HPET usage
Format: { enable (default) | disable | force |
verbose }
@@ -1718,19 +1735,22 @@
hugetlb_free_vmemmap=
[KNL] Reguires CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
enabled.
+ Control if HugeTLB Vmemmap Optimization (HVO) is enabled.
Allows heavy hugetlb users to free up some more
memory (7 * PAGE_SIZE for each 2MB hugetlb page).
- Format: { [oO][Nn]/Y/y/1 | [oO][Ff]/N/n/0 (default) }
+ Format: { on | off (default) }
- [oO][Nn]/Y/y/1: enable the feature
- [oO][Ff]/N/n/0: disable the feature
+ on: enable HVO
+ off: disable HVO
Built with CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y,
the default is on.
- This is not compatible with memory_hotplug.memmap_on_memory.
- If both parameters are enabled, hugetlb_free_vmemmap takes
- precedence over memory_hotplug.memmap_on_memory.
+ Note that the vmemmap pages may be allocated from the added
+ memory block itself when memory_hotplug.memmap_on_memory is
+ enabled, those vmemmap pages cannot be optimized even if this
+ feature is enabled. Other vmemmap pages not allocated from
+ the added memory block itself do not be affected.
hung_task_panic=
[KNL] Should the hung task detector generate panics.
@@ -2272,23 +2292,39 @@
ivrs_ioapic [HW,X86-64]
Provide an override to the IOAPIC-ID<->DEVICE-ID
- mapping provided in the IVRS ACPI table. For
- example, to map IOAPIC-ID decimal 10 to
- PCI device 00:14.0 write the parameter as:
+ mapping provided in the IVRS ACPI table.
+ By default, PCI segment is 0, and can be omitted.
+ For example:
+ * To map IOAPIC-ID decimal 10 to PCI device 00:14.0
+ write the parameter as:
ivrs_ioapic[10]=00:14.0
+ * To map IOAPIC-ID decimal 10 to PCI segment 0x1 and
+ PCI device 00:14.0 write the parameter as:
+ ivrs_ioapic[10]=0001:00:14.0
ivrs_hpet [HW,X86-64]
Provide an override to the HPET-ID<->DEVICE-ID
- mapping provided in the IVRS ACPI table. For
- example, to map HPET-ID decimal 0 to
- PCI device 00:14.0 write the parameter as:
+ mapping provided in the IVRS ACPI table.
+ By default, PCI segment is 0, and can be omitted.
+ For example:
+ * To map HPET-ID decimal 0 to PCI device 00:14.0
+ write the parameter as:
ivrs_hpet[0]=00:14.0
+ * To map HPET-ID decimal 10 to PCI segment 0x1 and
+ PCI device 00:14.0 write the parameter as:
+ ivrs_ioapic[10]=0001:00:14.0
ivrs_acpihid [HW,X86-64]
Provide an override to the ACPI-HID:UID<->DEVICE-ID
- mapping provided in the IVRS ACPI table. For
- example, to map UART-HID:UID AMD0020:0 to
- PCI device 00:14.5 write the parameter as:
+ mapping provided in the IVRS ACPI table.
+
+ For example, to map UART-HID:UID AMD0020:0 to
+ PCI segment 0x1 and PCI device ID 00:14.5,
+ write the parameter as:
+ ivrs_acpihid[0001:00:14.5]=AMD0020:0
+
+ By default, PCI segment is 0, and can be omitted.
+ For example, PCI device 00:14.5 write the parameter as:
ivrs_acpihid[00:14.5]=AMD0020:0
js= [HW,JOY] Analog joystick
@@ -3073,10 +3109,12 @@
[KNL,X86,ARM] Boolean flag to enable this feature.
Format: {on | off (default)}
When enabled, runtime hotplugged memory will
- allocate its internal metadata (struct pages)
- from the hotadded memory which will allow to
- hotadd a lot of memory without requiring
- additional memory to do so.
+ allocate its internal metadata (struct pages,
+ those vmemmap pages cannot be optimized even
+ if hugetlb_free_vmemmap is enabled) from the
+ hotadded memory which will allow to hotadd a
+ lot of memory without requiring additional
+ memory to do so.
This feature is disabled by default because it
has some implication on large (e.g. GB)
allocations in some configurations (e.g. small
@@ -3086,10 +3124,6 @@
Note that even when enabled, there are a few cases where
the feature is not effective.
- This is not compatible with hugetlb_free_vmemmap. If
- both parameters are enabled, hugetlb_free_vmemmap takes
- precedence over memory_hotplug.memmap_on_memory.
-
memtest= [KNL,X86,ARM,M68K,PPC,RISCV] Enable memtest
Format: <integer>
default : 0 <disable>
@@ -3248,6 +3282,15 @@
For details see:
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
+ module.async_probe=<bool>
+ [KNL] When set to true, modules will use async probing
+ by default. To enable/disable async probing for a
+ specific module, use the module specific control that
+ is documented under <module>.async_probe. When both
+ module.async_probe and <module>.async_probe are
+ specified, <module>.async_probe takes precedence for
+ the specific module.
+
module.sig_enforce
[KNL] When CONFIG_MODULE_SIG is set, this means that
modules without (valid) signatures will fail to load.
@@ -3537,9 +3580,6 @@
noautogroup Disable scheduler automatic task group creation.
- nobats [PPC] Do not use BATs for mapping kernel lowmem
- on "Classic" PPC cores.
-
nocache [ARM]
nodsp [SH] Disable hardware DSP at boot time.
@@ -3709,9 +3749,6 @@
nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
- noltlbs [PPC] Do not use large page/tlb entries for kernel
- lowmem mapping on PPC40x and PPC8xx
-
nomca [IA-64] Disable machine check abort handling
nomce [X86-32] Disable Machine Check Exception
@@ -5237,20 +5274,33 @@
Speculative Code Execution with Return Instructions)
vulnerability.
+ AMD-based UNRET and IBPB mitigations alone do not stop
+ sibling threads from influencing the predictions of other
+ sibling threads. For that reason, STIBP is used on pro-
+ cessors that support it, and mitigate SMT on processors
+ that don't.
+
off - no mitigation
auto - automatically select a migitation
auto,nosmt - automatically select a mitigation,
disabling SMT if necessary for
the full mitigation (only on Zen1
and older without STIBP).
- ibpb - mitigate short speculation windows on
- basic block boundaries too. Safe, highest
- perf impact.
- unret - force enable untrained return thunks,
- only effective on AMD f15h-f17h
- based systems.
- unret,nosmt - like unret, will disable SMT when STIBP
- is not available.
+ ibpb - On AMD, mitigate short speculation
+ windows on basic block boundaries too.
+ Safe, highest perf impact. It also
+ enables STIBP if present. Not suitable
+ on Intel.
+ ibpb,nosmt - Like "ibpb" above but will disable SMT
+ when STIBP is not available. This is
+ the alternative for systems which do not
+ have STIBP.
+ unret - Force enable untrained return thunks,
+ only effective on AMD f15h-f17h based
+ systems.
+ unret,nosmt - Like unret, but will disable SMT when STIBP
+ is not available. This is the alternative for
+ systems which do not have STIBP.
Selecting 'auto' will choose a mitigation method at run
time according to the CPU.
@@ -5281,6 +5331,8 @@
rodata= [KNL]
on Mark read-only kernel memory as read-only (default).
off Leave read-only kernel memory writable for debugging.
+ full Mark read-only kernel memory and aliases as read-only
+ [arm64]
rockchip.usb_uart
Enable the uart passthrough on the designated usb port
@@ -5502,7 +5554,7 @@
cache (risks via metadata attacks are mostly
unchanged). Debug options disable merging on their
own.
- For more information see Documentation/vm/slub.rst.
+ For more information see Documentation/mm/slub.rst.
slab_max_order= [MM, SLAB]
Determines the maximum allowed order for slabs.
@@ -5516,13 +5568,13 @@
slub_debug can create guard zones around objects and
may poison objects when not in use. Also tracks the
last alloc / free. For more information see
- Documentation/vm/slub.rst.
+ Documentation/mm/slub.rst.
slub_max_order= [MM, SLUB]
Determines the maximum allowed order for slabs.
A high setting may cause OOMs due to memory
fragmentation. For more information see
- Documentation/vm/slub.rst.
+ Documentation/mm/slub.rst.
slub_min_objects= [MM, SLUB]
The minimum number of objects per slab. SLUB will
@@ -5531,12 +5583,12 @@
the number of objects indicated. The higher the number
of objects the smaller the overhead of tracking slabs
and the less frequently locks need to be acquired.
- For more information see Documentation/vm/slub.rst.
+ For more information see Documentation/mm/slub.rst.
slub_min_order= [MM, SLUB]
Determines the minimum page order for slabs. Must be
lower than slub_max_order.
- For more information see Documentation/vm/slub.rst.
+ For more information see Documentation/mm/slub.rst.
slub_merge [MM, SLUB]
Same with slab_merge.
@@ -5983,8 +6035,11 @@
it if 0 is given (See Documentation/admin-guide/cgroup-v1/memory.rst)
swiotlb= [ARM,IA-64,PPC,MIPS,X86]
- Format: { <int> | force | noforce }
+ Format: { <int> [,<int>] | force | noforce }
<int> -- Number of I/O TLB slabs
+ <int> -- Second integer after comma. Number of swiotlb
+ areas with their own lock. Will be rounded up
+ to a power of 2.
force -- force using of bounce buffers even if they
wouldn't be automatically used by the kernel
noforce -- Never use bounce buffers (for debugging)
diff --git a/Documentation/admin-guide/mm/concepts.rst b/Documentation/admin-guide/mm/concepts.rst
index b966fcff993b..c79f1e336222 100644
--- a/Documentation/admin-guide/mm/concepts.rst
+++ b/Documentation/admin-guide/mm/concepts.rst
@@ -125,7 +125,7 @@ processor. Each bank is referred to as a `node` and for each node Linux
constructs an independent memory management subsystem. A node has its
own set of zones, lists of free and used pages and various statistics
counters. You can find more details about NUMA in
-:ref:`Documentation/vm/numa.rst <numa>` and in
+:ref:`Documentation/mm/numa.rst <numa>` and in
:ref:`Documentation/admin-guide/mm/numa_memory_policy.rst <numa_memory_policy>`.
Page cache
diff --git a/Documentation/admin-guide/mm/damon/index.rst b/Documentation/admin-guide/mm/damon/index.rst
index 61aff88347f3..05500042f777 100644
--- a/Documentation/admin-guide/mm/damon/index.rst
+++ b/Documentation/admin-guide/mm/damon/index.rst
@@ -4,7 +4,7 @@
Monitoring Data Accesses
========================
-:doc:`DAMON </vm/damon/index>` allows light-weight data access monitoring.
+:doc:`DAMON </mm/damon/index>` allows light-weight data access monitoring.
Using DAMON, users can analyze the memory access patterns of their systems and
optimize those.
@@ -14,3 +14,4 @@ optimize those.
start
usage
reclaim
+ lru_sort
diff --git a/Documentation/admin-guide/mm/damon/lru_sort.rst b/Documentation/admin-guide/mm/damon/lru_sort.rst
new file mode 100644
index 000000000000..c09cace80651
--- /dev/null
+++ b/Documentation/admin-guide/mm/damon/lru_sort.rst
@@ -0,0 +1,294 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================
+DAMON-based LRU-lists Sorting
+=============================
+
+DAMON-based LRU-lists Sorting (DAMON_LRU_SORT) is a static kernel module that
+aimed to be used for proactive and lightweight data access pattern based
+(de)prioritization of pages on their LRU-lists for making LRU-lists a more
+trusworthy data access pattern source.
+
+Where Proactive LRU-lists Sorting is Required?
+==============================================
+
+As page-granularity access checking overhead could be significant on huge
+systems, LRU lists are normally not proactively sorted but partially and
+reactively sorted for special events including specific user requests, system
+calls and memory pressure. As a result, LRU lists are sometimes not so
+perfectly prepared to be used as a trustworthy access pattern source for some
+situations including reclamation target pages selection under sudden memory
+pressure.
+
+Because DAMON can identify access patterns of best-effort accuracy while
+inducing only user-specified range of overhead, proactively running
+DAMON_LRU_SORT could be helpful for making LRU lists more trustworthy access
+pattern source with low and controlled overhead.
+
+How It Works?
+=============
+
+DAMON_LRU_SORT finds hot pages (pages of memory regions that showing access
+rates that higher than a user-specified threshold) and cold pages (pages of
+memory regions that showing no access for a time that longer than a
+user-specified threshold) using DAMON, and prioritizes hot pages while
+deprioritizing cold pages on their LRU-lists. To avoid it consuming too much
+CPU for the prioritizations, a CPU time usage limit can be configured. Under
+the limit, it prioritizes and deprioritizes more hot and cold pages first,
+respectively. System administrators can also configure under what situation
+this scheme should automatically activated and deactivated with three memory
+pressure watermarks.
+
+Its default parameters for hotness/coldness thresholds and CPU quota limit are
+conservatively chosen. That is, the module under its default parameters could
+be widely used without harm for common situations while providing a level of
+benefits for systems having clear hot/cold access patterns under memory
+pressure while consuming only a limited small portion of CPU time.
+
+Interface: Module Parameters
+============================
+
+To use this feature, you should first ensure your system is running on a kernel
+that is built with ``CONFIG_DAMON_LRU_SORT=y``.
+
+To let sysadmins enable or disable it and tune for the given system,
+DAMON_LRU_SORT utilizes module parameters. That is, you can put
+``damon_lru_sort.<parameter>=<value>`` on the kernel boot command line or write
+proper values to ``/sys/modules/damon_lru_sort/parameters/<parameter>`` files.
+
+Below are the description of each parameter.
+
+enabled
+-------
+
+Enable or disable DAMON_LRU_SORT.
+
+You can enable DAMON_LRU_SORT by setting the value of this parameter as ``Y``.
+Setting it as ``N`` disables DAMON_LRU_SORT. Note that DAMON_LRU_SORT could do
+no real monitoring and LRU-lists sorting due to the watermarks-based activation
+condition. Refer to below descriptions for the watermarks parameter for this.
+
+commit_inputs
+-------------
+
+Make DAMON_LRU_SORT reads the input parameters again, except ``enabled``.
+
+Input parameters that updated while DAMON_LRU_SORT is running are not applied
+by default. Once this parameter is set as ``Y``, DAMON_LRU_SORT reads values
+of parametrs except ``enabled`` again. Once the re-reading is done, this
+parameter is set as ``N``. If invalid parameters are found while the
+re-reading, DAMON_LRU_SORT will be disabled.
+
+hot_thres_access_freq
+---------------------
+
+Access frequency threshold for hot memory regions identification in permil.
+
+If a memory region is accessed in frequency of this or higher, DAMON_LRU_SORT
+identifies the region as hot, and mark it as accessed on the LRU list, so that
+it could not be reclaimed under memory pressure. 50% by default.
+
+cold_min_age
+------------
+
+Time threshold for cold memory regions identification in microseconds.
+
+If a memory region is not accessed for this or longer time, DAMON_LRU_SORT
+identifies the region as cold, and mark it as unaccessed on the LRU list, so
+that it could be reclaimed first under memory pressure. 120 seconds by
+default.
+
+quota_ms
+--------
+
+Limit of time for trying the LRU lists sorting in milliseconds.
+
+DAMON_LRU_SORT tries to use only up to this time within a time window
+(quota_reset_interval_ms) for trying LRU lists sorting. This can be used
+for limiting CPU consumption of DAMON_LRU_SORT. If the value is zero, the
+limit is disabled.
+
+10 ms by default.
+
+quota_reset_interval_ms
+-----------------------
+
+The time quota charge reset interval in milliseconds.
+
+The charge reset interval for the quota of time (quota_ms). That is,
+DAMON_LRU_SORT does not try LRU-lists sorting for more than quota_ms
+milliseconds or quota_sz bytes within quota_reset_interval_ms milliseconds.
+
+1 second by default.
+
+wmarks_interval
+---------------
+
+The watermarks check time interval in microseconds.
+
+Minimal time to wait before checking the watermarks, when DAMON_LRU_SORT is
+enabled but inactive due to its watermarks rule. 5 seconds by default.
+
+wmarks_high
+-----------
+
+Free memory rate (per thousand) for the high watermark.
+
+If free memory of the system in bytes per thousand bytes is higher than this,
+DAMON_LRU_SORT becomes inactive, so it does nothing but periodically checks the
+watermarks. 200 (20%) by default.
+
+wmarks_mid
+----------
+
+Free memory rate (per thousand) for the middle watermark.
+
+If free memory of the system in bytes per thousand bytes is between this and
+the low watermark, DAMON_LRU_SORT becomes active, so starts the monitoring and
+the LRU-lists sorting. 150 (15%) by default.
+
+wmarks_low
+----------
+
+Free memory rate (per thousand) for the low watermark.
+
+If free memory of the system in bytes per thousand bytes is lower than this,
+DAMON_LRU_SORT becomes inactive, so it does nothing but periodically checks the
+watermarks. 50 (5%) by default.
+
+sample_interval
+---------------
+
+Sampling interval for the monitoring in microseconds.
+
+The sampling interval of DAMON for the cold memory monitoring. Please refer to
+the DAMON documentation (:doc:`usage`) for more detail. 5ms by default.
+
+aggr_interval
+-------------
+
+Aggregation interval for the monitoring in microseconds.
+
+The aggregation interval of DAMON for the cold memory monitoring. Please
+refer to the DAMON documentation (:doc:`usage`) for more detail. 100ms by
+default.
+
+min_nr_regions
+--------------
+
+Minimum number of monitoring regions.
+
+The minimal number of monitoring regions of DAMON for the cold memory
+monitoring. This can be used to set lower-bound of the monitoring quality.
+But, setting this too high could result in increased monitoring overhead.
+Please refer to the DAMON documentation (:doc:`usage`) for more detail. 10 by
+default.
+
+max_nr_regions
+--------------
+
+Maximum number of monitoring regions.
+
+The maximum number of monitoring regions of DAMON for the cold memory
+monitoring. This can be used to set upper-bound of the monitoring overhead.
+However, setting this too low could result in bad monitoring quality. Please
+refer to the DAMON documentation (:doc:`usage`) for more detail. 1000 by
+defaults.
+
+monitor_region_start
+--------------------
+
+Start of target memory region in physical address.
+
+The start physical address of memory region that DAMON_LRU_SORT will do work
+against. By default, biggest System RAM is used as the region.
+
+monitor_region_end
+------------------
+
+End of target memory region in physical address.
+
+The end physical address of memory region that DAMON_LRU_SORT will do work
+against. By default, biggest System RAM is used as the region.
+
+kdamond_pid
+-----------
+
+PID of the DAMON thread.
+
+If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread. Else,
+-1.
+
+nr_lru_sort_tried_hot_regions
+-----------------------------
+
+Number of hot memory regions that tried to be LRU-sorted.
+
+bytes_lru_sort_tried_hot_regions
+--------------------------------
+
+Total bytes of hot memory regions that tried to be LRU-sorted.
+
+nr_lru_sorted_hot_regions
+-------------------------
+
+Number of hot memory regions that successfully be LRU-sorted.
+
+bytes_lru_sorted_hot_regions
+----------------------------
+
+Total bytes of hot memory regions that successfully be LRU-sorted.
+
+nr_hot_quota_exceeds
+--------------------
+
+Number of times that the time quota limit for hot regions have exceeded.
+
+nr_lru_sort_tried_cold_regions
+------------------------------
+
+Number of cold memory regions that tried to be LRU-sorted.
+
+bytes_lru_sort_tried_cold_regions
+---------------------------------
+
+Total bytes of cold memory regions that tried to be LRU-sorted.
+
+nr_lru_sorted_cold_regions
+--------------------------
+
+Number of cold memory regions that successfully be LRU-sorted.
+
+bytes_lru_sorted_cold_regions
+-----------------------------
+
+Total bytes of cold memory regions that successfully be LRU-sorted.
+
+nr_cold_quota_exceeds
+---------------------
+
+Number of times that the time quota limit for cold regions have exceeded.
+
+Example
+=======
+
+Below runtime example commands make DAMON_LRU_SORT to find memory regions
+having >=50% access frequency and LRU-prioritize while LRU-deprioritizing
+memory regions that not accessed for 120 seconds. The prioritization and
+deprioritization is limited to be done using only up to 1% CPU time to avoid
+DAMON_LRU_SORT consuming too much CPU time for the (de)prioritization. It also
+asks DAMON_LRU_SORT to do nothing if the system's free memory rate is more than
+50%, but start the real works if it becomes lower than 40%. If DAMON_RECLAIM
+doesn't make progress and therefore the free memory rate becomes lower than
+20%, it asks DAMON_LRU_SORT to do nothing again, so that we can fall back to
+the LRU-list based page granularity reclamation. ::
+
+ # cd /sys/modules/damon_lru_sort/parameters
+ # echo 500 > hot_thres_access_freq
+ # echo 120000000 > cold_min_age
+ # echo 10 > quota_ms
+ # echo 1000 > quota_reset_interval_ms
+ # echo 500 > wmarks_high
+ # echo 400 > wmarks_mid
+ # echo 200 > wmarks_low
+ # echo Y > enabled
diff --git a/Documentation/admin-guide/mm/damon/reclaim.rst b/Documentation/admin-guide/mm/damon/reclaim.rst
index 46306f1f34b1..4f1479a11e63 100644
--- a/Documentation/admin-guide/mm/damon/reclaim.rst
+++ b/Documentation/admin-guide/mm/damon/reclaim.rst
@@ -48,12 +48,6 @@ DAMON_RECLAIM utilizes module parameters. That is, you can put
``damon_reclaim.<parameter>=<value>`` on the kernel boot command line or write
proper values to ``/sys/modules/damon_reclaim/parameters/<parameter>`` files.
-Note that the parameter values except ``enabled`` are applied only when
-DAMON_RECLAIM starts. Therefore, if you want to apply new parameter values in
-runtime and DAMON_RECLAIM is already enabled, you should disable and re-enable
-it via ``enabled`` parameter file. Writing of the new values to proper
-parameter values should be done before the re-enablement.
-
Below are the description of each parameter.
enabled
@@ -268,4 +262,4 @@ granularity reclamation. ::
.. [1] https://research.google/pubs/pub48551/
.. [2] https://lwn.net/Articles/787611/
-.. [3] https://www.kernel.org/doc/html/latest/vm/free_page_reporting.html
+.. [3] https://www.kernel.org/doc/html/latest/mm/free_page_reporting.html
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index 1bb7b72414b2..ca91ecc29078 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -30,11 +30,11 @@ DAMON provides below interfaces for different users.
<sysfs_interface>`. This will be removed after next LTS kernel is released,
so users should move to the :ref:`sysfs interface <sysfs_interface>`.
- *Kernel Space Programming Interface.*
- :doc:`This </vm/damon/api>` is for kernel space programmers. Using this,
+ :doc:`This </mm/damon/api>` is for kernel space programmers. Using this,
users can utilize every feature of DAMON most flexibly and efficiently by
writing kernel space DAMON application programs for you. You can even extend
DAMON for various address spaces. For detail, please refer to the interface
- :doc:`document </vm/damon/api>`.
+ :doc:`document </mm/damon/api>`.
.. _sysfs_interface:
@@ -50,10 +50,10 @@ For a short example, users can monitor the virtual address space of a given
workload as below. ::
# cd /sys/kernel/mm/damon/admin/
- # echo 1 > kdamonds/nr && echo 1 > kdamonds/0/contexts/nr
+ # echo 1 > kdamonds/nr_kdamonds && echo 1 > kdamonds/0/contexts/nr_contexts
# echo vaddr > kdamonds/0/contexts/0/operations
- # echo 1 > kdamonds/0/contexts/0/targets/nr
- # echo $(pidof <workload>) > kdamonds/0/contexts/0/targets/0/pid
+ # echo 1 > kdamonds/0/contexts/0/targets/nr_targets
+ # echo $(pidof <workload>) > kdamonds/0/contexts/0/targets/0/pid_target
# echo on > kdamonds/0/state
Files Hierarchy
@@ -185,7 +185,7 @@ controls the monitoring overhead, exist. You can set and get the values by
writing to and rading from the files.
For more details about the intervals and monitoring regions range, please refer
-to the Design document (:doc:`/vm/damon/design`).
+to the Design document (:doc:`/mm/damon/design`).
contexts/<N>/targets/
---------------------
@@ -264,6 +264,8 @@ that can be written to and read from the file and their meaning are as below.
- ``pageout``: Call ``madvise()`` for the region with ``MADV_PAGEOUT``
- ``hugepage``: Call ``madvise()`` for the region with ``MADV_HUGEPAGE``
- ``nohugepage``: Call ``madvise()`` for the region with ``MADV_NOHUGEPAGE``
+ - ``lru_prio``: Prioritize the region on its LRU lists.
+ - ``lru_deprio``: Deprioritize the region on its LRU lists.
- ``stat``: Do nothing but count the statistics
schemes/<N>/access_pattern/
@@ -364,12 +366,12 @@ memory rate becomes larger than 60%, or lower than 30%". ::
# echo 1 > kdamonds/0/contexts/0/schemes/nr_schemes
# cd kdamonds/0/contexts/0/schemes/0
# # set the basic access pattern and the action
- # echo 4096 > access_patterns/sz/min
- # echo 8192 > access_patterns/sz/max
- # echo 0 > access_patterns/nr_accesses/min
- # echo 5 > access_patterns/nr_accesses/max
- # echo 10 > access_patterns/age/min
- # echo 20 > access_patterns/age/max
+ # echo 4096 > access_pattern/sz/min
+ # echo 8192 > access_pattern/sz/max
+ # echo 0 > access_pattern/nr_accesses/min
+ # echo 5 > access_pattern/nr_accesses/max
+ # echo 10 > access_pattern/age/min
+ # echo 20 > access_pattern/age/max
# echo pageout > action
# # set quotas
# echo 10 > quotas/ms
@@ -402,7 +404,7 @@ Attributes
Users can get and set the ``sampling interval``, ``aggregation interval``,
``update interval``, and min/max number of monitoring target regions by
reading from and writing to the ``attrs`` file. To know about the monitoring
-attributes in detail, please refer to the :doc:`/vm/damon/design`. For
+attributes in detail, please refer to the :doc:`/mm/damon/design`. For
example, below commands set those values to 5 ms, 100 ms, 1,000 ms, 10 and
1000, and then check it again::
diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst b/Documentation/admin-guide/mm/hugetlbpage.rst
index a90330d0a837..8e2727dc18d4 100644
--- a/Documentation/admin-guide/mm/hugetlbpage.rst
+++ b/Documentation/admin-guide/mm/hugetlbpage.rst
@@ -164,8 +164,8 @@ default_hugepagesz
will all result in 256 2M huge pages being allocated. Valid default
huge page size is architecture dependent.
hugetlb_free_vmemmap
- When CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is set, this enables optimizing
- unused vmemmap pages associated with each HugeTLB page.
+ When CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is set, this enables HugeTLB
+ Vmemmap Optimization (HVO).
When multiple huge page sizes are supported, ``/proc/sys/vm/nr_hugepages``
indicates the current number of pre-allocated huge pages of the default size.
diff --git a/Documentation/admin-guide/mm/index.rst b/Documentation/admin-guide/mm/index.rst
index c21b5823f126..1bd11118dfb1 100644
--- a/Documentation/admin-guide/mm/index.rst
+++ b/Documentation/admin-guide/mm/index.rst
@@ -36,6 +36,7 @@ the Linux memory management.
numa_memory_policy
numaperf
pagemap
+ shrinker_debugfs
soft-dirty
swap_numa
transhuge
diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst
index 0f56ecd8ac05..a3c9e8ad8fa0 100644
--- a/Documentation/admin-guide/mm/memory-hotplug.rst
+++ b/Documentation/admin-guide/mm/memory-hotplug.rst
@@ -653,8 +653,8 @@ block might fail:
- Concurrent activity that operates on the same physical memory area, such as
allocating gigantic pages, can result in temporary offlining failures.
-- Out of memory when dissolving huge pages, especially when freeing unused
- vmemmap pages associated with each hugetlb page is enabled.
+- Out of memory when dissolving huge pages, especially when HugeTLB Vmemmap
+ Optimization (HVO) is enabled.
Offlining code may be able to migrate huge page contents, but may not be able
to dissolve the source huge page because it fails allocating (unmovable) pages
diff --git a/Documentation/admin-guide/mm/shrinker_debugfs.rst b/Documentation/admin-guide/mm/shrinker_debugfs.rst
new file mode 100644
index 000000000000..3887f0b294fe
--- /dev/null
+++ b/Documentation/admin-guide/mm/shrinker_debugfs.rst
@@ -0,0 +1,135 @@
+.. _shrinker_debugfs:
+
+==========================
+Shrinker Debugfs Interface
+==========================
+
+Shrinker debugfs interface provides a visibility into the kernel memory
+shrinkers subsystem and allows to get information about individual shrinkers
+and interact with them.
+
+For each shrinker registered in the system a directory in **<debugfs>/shrinker/**
+is created. The directory's name is composed from the shrinker's name and an
+unique id: e.g. *kfree_rcu-0* or *sb-xfs:vda1-36*.
+
+Each shrinker directory contains **count** and **scan** files, which allow to
+trigger *count_objects()* and *scan_objects()* callbacks for each memcg and
+numa node (if applicable).
+
+Usage:
+------
+
+1. *List registered shrinkers*
+
+ ::
+
+ $ cd /sys/kernel/debug/shrinker/
+ $ ls
+ dquota-cache-16 sb-devpts-28 sb-proc-47 sb-tmpfs-42
+ mm-shadow-18 sb-devtmpfs-5 sb-proc-48 sb-tmpfs-43
+ mm-zspool:zram0-34 sb-hugetlbfs-17 sb-pstore-31 sb-tmpfs-44
+ rcu-kfree-0 sb-hugetlbfs-33 sb-rootfs-2 sb-tmpfs-49
+ sb-aio-20 sb-iomem-12 sb-securityfs-6 sb-tracefs-13
+ sb-anon_inodefs-15 sb-mqueue-21 sb-selinuxfs-22 sb-xfs:vda1-36
+ sb-bdev-3 sb-nsfs-4 sb-sockfs-8 sb-zsmalloc-19
+ sb-bpf-32 sb-pipefs-14 sb-sysfs-26 thp-deferred_split-10
+ sb-btrfs:vda2-24 sb-proc-25 sb-tmpfs-1 thp-zero-9
+ sb-cgroup2-30 sb-proc-39 sb-tmpfs-27 xfs-buf:vda1-37
+ sb-configfs-23 sb-proc-41 sb-tmpfs-29 xfs-inodegc:vda1-38
+ sb-dax-11 sb-proc-45 sb-tmpfs-35
+ sb-debugfs-7 sb-proc-46 sb-tmpfs-40
+
+2. *Get information about a specific shrinker*
+
+ ::
+
+ $ cd sb-btrfs\:vda2-24/
+ $ ls
+ count scan
+
+3. *Count objects*
+
+ Each line in the output has the following format::
+
+ <cgroup inode id> <nr of objects on node 0> <nr of objects on node 1> ...
+ <cgroup inode id> <nr of objects on node 0> <nr of objects on node 1> ...
+ ...
+
+ If there are no objects on all numa nodes, a line is omitted. If there
+ are no objects at all, the output might be empty.
+
+ If the shrinker is not memcg-aware or CONFIG_MEMCG is off, 0 is printed
+ as cgroup inode id. If the shrinker is not numa-aware, 0's are printed
+ for all nodes except the first one.
+ ::
+
+ $ cat count
+ 1 224 2
+ 21 98 0
+ 55 818 10
+ 2367 2 0
+ 2401 30 0
+ 225 13 0
+ 599 35 0
+ 939 124 0
+ 1041 3 0
+ 1075 1 0
+ 1109 1 0
+ 1279 60 0
+ 1313 7 0
+ 1347 39 0
+ 1381 3 0
+ 1449 14 0
+ 1483 63 0
+ 1517 53 0
+ 1551 6 0
+ 1585 1 0
+ 1619 6 0
+ 1653 40 0
+ 1687 11 0
+ 1721 8 0
+ 1755 4 0
+ 1789 52 0
+ 1823 888 0
+ 1857 1 0
+ 1925 2 0
+ 1959 32 0
+ 2027 22 0
+ 2061 9 0
+ 2469 799 0
+ 2537 861 0
+ 2639 1 0
+ 2707 70 0
+ 2775 4 0
+ 2877 84 0
+ 293 1 0
+ 735 8 0
+
+4. *Scan objects*
+
+ The expected input format::
+
+ <cgroup inode id> <numa id> <number of objects to scan>
+
+ For a non-memcg-aware shrinker or on a system with no memory
+ cgrups **0** should be passed as cgroup id.
+ ::
+
+ $ cd /sys/kernel/debug/shrinker/
+ $ cd sb-btrfs\:vda2-24/
+
+ $ cat count | head -n 5
+ 1 212 0
+ 21 97 0
+ 55 802 5
+ 2367 2 0
+ 225 13 0
+
+ $ echo "55 0 200" > scan
+
+ $ cat count | head -n 5
+ 1 212 0
+ 21 96 0
+ 55 752 5
+ 2367 2 0
+ 225 13 0
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 8ab042beeb76..ee6572b1edad 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -592,6 +592,18 @@ to the guest kernel command line (see
Documentation/admin-guide/kernel-parameters.rst).
+nmi_wd_lpm_factor (PPC only)
+============================
+
+Factor to apply to the NMI watchdog timeout (only when ``nmi_watchdog`` is
+set to 1). This factor represents the percentage added to
+``watchdog_thresh`` when calculating the NMI watchdog timeout during an
+LPM. The soft lockup timeout is not impacted.
+
+A value of 0 means no change. The default value is 200 meaning the NMI
+watchdog is set to 30s (based on ``watchdog_thresh`` equal to 10).
+
+
numa_balancing
==============
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 805f2281e000..60d44165fba7 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -271,7 +271,7 @@ poll cycle or the number of packets processed reaches netdev_budget.
netdev_max_backlog
------------------
-Maximum number of packets, queued on the INPUT side, when the interface
+Maximum number of packets, queued on the INPUT side, when the interface
receives packets faster than kernel can process them.
netdev_rss_key
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index 5c9aa171a0d3..9b833e439f09 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -565,13 +565,11 @@ See Documentation/admin-guide/mm/hugetlbpage.rst
hugetlb_optimize_vmemmap
========================
-This knob is not available when memory_hotplug.memmap_on_memory (kernel parameter)
-is configured or the size of 'struct page' (a structure defined in
-include/linux/mm_types.h) is not power of two (an unusual system config could
+This knob is not available when the size of 'struct page' (a structure defined
+in include/linux/mm_types.h) is not power of two (an unusual system config could
result in this).
-Enable (set to 1) or disable (set to 0) the feature of optimizing vmemmap pages
-associated with each HugeTLB page.
+Enable (set to 1) or disable (set to 0) HugeTLB Vmemmap Optimization (HVO).
Once enabled, the vmemmap pages of subsequent allocation of HugeTLB pages from
buddy allocator will be optimized (7 pages per 2MB HugeTLB page and 4095 pages
@@ -760,7 +758,7 @@ and don't use much of it.
The default value is 0.
-See Documentation/vm/overcommit-accounting.rst and
+See Documentation/mm/overcommit-accounting.rst and
mm/util.c::__vm_enough_memory() for more information.
diff --git a/Documentation/arm64/elf_hwcaps.rst b/Documentation/arm64/elf_hwcaps.rst
index 52b75a25c205..311021f2e560 100644
--- a/Documentation/arm64/elf_hwcaps.rst
+++ b/Documentation/arm64/elf_hwcaps.rst
@@ -242,44 +242,34 @@ HWCAP2_MTE3
by Documentation/arm64/memory-tagging-extension.rst.
HWCAP2_SME
-
Functionality implied by ID_AA64PFR1_EL1.SME == 0b0001, as described
by Documentation/arm64/sme.rst.
HWCAP2_SME_I16I64
-
Functionality implied by ID_AA64SMFR0_EL1.I16I64 == 0b1111.
HWCAP2_SME_F64F64
-
Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1.
HWCAP2_SME_I8I32
-
Functionality implied by ID_AA64SMFR0_EL1.I8I32 == 0b1111.
HWCAP2_SME_F16F32
-
Functionality implied by ID_AA64SMFR0_EL1.F16F32 == 0b1.
HWCAP2_SME_B16F32
-
Functionality implied by ID_AA64SMFR0_EL1.B16F32 == 0b1.
HWCAP2_SME_F32F32
-
Functionality implied by ID_AA64SMFR0_EL1.F32F32 == 0b1.
HWCAP2_SME_FA64
-
Functionality implied by ID_AA64SMFR0_EL1.FA64 == 0b1.
HWCAP2_WFXT
-
Functionality implied by ID_AA64ISAR2_EL1.WFXT == 0b0010.
HWCAP2_EBF16
-
Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
4. Unused AT_HWCAP bits
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 33b04db8408f..fda97b3fcf01 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -52,6 +52,8 @@ stable kernels.
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2038923 | ARM64_ERRATUM_2038923 |
diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt
index 093cdaefdb37..edea4656c5c0 100644
--- a/Documentation/atomic_bitops.txt
+++ b/Documentation/atomic_bitops.txt
@@ -58,13 +58,11 @@ Like with atomic_t, the rule of thumb is:
- RMW operations that have a return value are fully ordered.
- - RMW operations that are conditional are unordered on FAILURE,
- otherwise the above rules apply. In the case of test_and_{}_bit() operations,
- if the bit in memory is unchanged by the operation then it is deemed to have
- failed.
+ - RMW operations that are conditional are fully ordered.
-Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
-clear_bit_unlock() which has RELEASE semantics.
+Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics,
+clear_bit_unlock() which has RELEASE semantics and test_bit_acquire which has
+ACQUIRE semantics.
Since a platform only has a single means of achieving atomic operations
the same barriers as for atomic_t are used, see atomic_t.txt.
diff --git a/Documentation/block/null_blk.rst b/Documentation/block/null_blk.rst
index edbbab2f12f8..4dd78f24d10a 100644
--- a/Documentation/block/null_blk.rst
+++ b/Documentation/block/null_blk.rst
@@ -72,6 +72,28 @@ submit_queues=[1..nr_cpus]: Default: 1
hw_queue_depth=[0..qdepth]: Default: 64
The hardware queue depth of the device.
+memory_backed=[0/1]: Default: 0
+ Whether or not to use a memory buffer to respond to IO requests
+
+ = =============================================
+ 0 Transfer no data in response to IO requests
+ 1 Use a memory buffer to respond to IO requests
+ = =============================================
+
+discard=[0/1]: Default: 0
+ Support discard operations (requires memory-backed null_blk device).
+
+ = =====================================
+ 0 Do not support discard operations
+ 1 Enable support for discard operations
+ = =====================================
+
+cache_size=[Size in MB]: Default: 0
+ Cache size in MB for memory-backed device.
+
+mbps=[Maximum bandwidth in MB/s]: Default: 0 (no limit)
+ Bandwidth limit for device performance.
+
Multi-queue specific parameters
-------------------------------
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst
index 437de2a7a5de..a210b8a4df00 100644
--- a/Documentation/bpf/bpf_design_QA.rst
+++ b/Documentation/bpf/bpf_design_QA.rst
@@ -214,6 +214,12 @@ A: NO. Tracepoints are tied to internal implementation details hence they are
subject to change and can break with newer kernels. BPF programs need to change
accordingly when this happens.
+Q: Are places where kprobes can attach part of the stable ABI?
+--------------------------------------------------------------
+A: NO. The places to which kprobes can attach are internal implementation
+details, which means that they are subject to change and can break with
+newer kernels. BPF programs need to change accordingly when this happens.
+
Q: How much stack space a BPF program uses?
-------------------------------------------
A: Currently all program types are limited to 512 bytes of stack
@@ -273,3 +279,22 @@ cc (congestion-control) implementations. If any of these kernel
functions has changed, both the in-tree and out-of-tree kernel tcp cc
implementations have to be changed. The same goes for the bpf
programs and they have to be adjusted accordingly.
+
+Q: Attaching to arbitrary kernel functions is an ABI?
+-----------------------------------------------------
+Q: BPF programs can be attached to many kernel functions. Do these
+kernel functions become part of the ABI?
+
+A: NO.
+
+The kernel function prototypes will change, and BPF programs attaching to
+them will need to change. The BPF compile-once-run-everywhere (CO-RE)
+should be used in order to make it easier to adapt your BPF programs to
+different versions of the kernel.
+
+Q: Marking a function with BTF_ID makes that function an ABI?
+-------------------------------------------------------------
+A: NO.
+
+The BTF_ID macro does not cause a function to become part of the ABI
+any more than does the EXPORT_SYMBOL_GPL macro.
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 934727e23e0e..255384d094bf 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -86,6 +86,7 @@ if major >= 3:
"__used",
"__weak",
"noinline",
+ "__fix_address",
# include/linux/memblock.h:
"__init_memblock",
diff --git a/Documentation/core-api/bus-virt-phys-mapping.rst b/Documentation/core-api/bus-virt-phys-mapping.rst
deleted file mode 100644
index c72b24a7d52c..000000000000
--- a/Documentation/core-api/bus-virt-phys-mapping.rst
+++ /dev/null
@@ -1,220 +0,0 @@
-==========================================================
-How to access I/O mapped memory from within device drivers
-==========================================================
-
-:Author: Linus
-
-.. warning::
-
- The virt_to_bus() and bus_to_virt() functions have been
- superseded by the functionality provided by the PCI DMA interface
- (see Documentation/core-api/dma-api-howto.rst). They continue
- to be documented below for historical purposes, but new code
- must not use them. --davidm 00/12/12
-
-::
-
- [ This is a mail message in response to a query on IO mapping, thus the
- strange format for a "document" ]
-
-The AHA-1542 is a bus-master device, and your patch makes the driver give the
-controller the physical address of the buffers, which is correct on x86
-(because all bus master devices see the physical memory mappings directly).
-
-However, on many setups, there are actually **three** different ways of looking
-at memory addresses, and in this case we actually want the third, the
-so-called "bus address".
-
-Essentially, the three ways of addressing memory are (this is "real memory",
-that is, normal RAM--see later about other details):
-
- - CPU untranslated. This is the "physical" address. Physical address
- 0 is what the CPU sees when it drives zeroes on the memory bus.
-
- - CPU translated address. This is the "virtual" address, and is
- completely internal to the CPU itself with the CPU doing the appropriate
- translations into "CPU untranslated".
-
- - bus address. This is the address of memory as seen by OTHER devices,
- not the CPU. Now, in theory there could be many different bus
- addresses, with each device seeing memory in some device-specific way, but
- happily most hardware designers aren't actually actively trying to make
- things any more complex than necessary, so you can assume that all
- external hardware sees the memory the same way.
-
-Now, on normal PCs the bus address is exactly the same as the physical
-address, and things are very simple indeed. However, they are that simple
-because the memory and the devices share the same address space, and that is
-not generally necessarily true on other PCI/ISA setups.
-
-Now, just as an example, on the PReP (PowerPC Reference Platform), the
-CPU sees a memory map something like this (this is from memory)::
-
- 0-2 GB "real memory"
- 2 GB-3 GB "system IO" (inb/out and similar accesses on x86)
- 3 GB-4 GB "IO memory" (shared memory over the IO bus)
-
-Now, that looks simple enough. However, when you look at the same thing from
-the viewpoint of the devices, you have the reverse, and the physical memory
-address 0 actually shows up as address 2 GB for any IO master.
-
-So when the CPU wants any bus master to write to physical memory 0, it
-has to give the master address 0x80000000 as the memory address.
-
-So, for example, depending on how the kernel is actually mapped on the
-PPC, you can end up with a setup like this::
-
- physical address: 0
- virtual address: 0xC0000000
- bus address: 0x80000000
-
-where all the addresses actually point to the same thing. It's just seen
-through different translations..
-
-Similarly, on the Alpha, the normal translation is::
-
- physical address: 0
- virtual address: 0xfffffc0000000000
- bus address: 0x40000000
-
-(but there are also Alphas where the physical address and the bus address
-are the same).
-
-Anyway, the way to look up all these translations, you do::
-
- #include <asm/io.h>
-
- phys_addr = virt_to_phys(virt_addr);
- virt_addr = phys_to_virt(phys_addr);
- bus_addr = virt_to_bus(virt_addr);
- virt_addr = bus_to_virt(bus_addr);
-
-Now, when do you need these?
-
-You want the **virtual** address when you are actually going to access that
-pointer from the kernel. So you can have something like this::
-
- /*
- * this is the hardware "mailbox" we use to communicate with
- * the controller. The controller sees this directly.
- */
- struct mailbox {
- __u32 status;
- __u32 bufstart;
- __u32 buflen;
- ..
- } mbox;
-
- unsigned char * retbuffer;
-
- /* get the address from the controller */
- retbuffer = bus_to_virt(mbox.bufstart);
- switch (retbuffer[0]) {
- case STATUS_OK:
- ...
-
-on the other hand, you want the bus address when you have a buffer that
-you want to give to the controller::
-
- /* ask the controller to read the sense status into "sense_buffer" */
- mbox.bufstart = virt_to_bus(&sense_buffer);
- mbox.buflen = sizeof(sense_buffer);
- mbox.status = 0;
- notify_controller(&mbox);
-
-And you generally **never** want to use the physical address, because you can't
-use that from the CPU (the CPU only uses translated virtual addresses), and
-you can't use it from the bus master.
-
-So why do we care about the physical address at all? We do need the physical
-address in some cases, it's just not very often in normal code. The physical
-address is needed if you use memory mappings, for example, because the
-"remap_pfn_range()" mm function wants the physical address of the memory to
-be remapped as measured in units of pages, a.k.a. the pfn (the memory
-management layer doesn't know about devices outside the CPU, so it
-shouldn't need to know about "bus addresses" etc).
-
-.. note::
-
- The above is only one part of the whole equation. The above
- only talks about "real memory", that is, CPU memory (RAM).
-
-There is a completely different type of memory too, and that's the "shared
-memory" on the PCI or ISA bus. That's generally not RAM (although in the case
-of a video graphics card it can be normal DRAM that is just used for a frame
-buffer), but can be things like a packet buffer in a network card etc.
-
-This memory is called "PCI memory" or "shared memory" or "IO memory" or
-whatever, and there is only one way to access it: the readb/writeb and
-related functions. You should never take the address of such memory, because
-there is really nothing you can do with such an address: it's not
-conceptually in the same memory space as "real memory" at all, so you cannot
-just dereference a pointer. (Sadly, on x86 it **is** in the same memory space,
-so on x86 it actually works to just deference a pointer, but it's not
-portable).
-
-For such memory, you can do things like:
-
- - reading::
-
- /*
- * read first 32 bits from ISA memory at 0xC0000, aka
- * C000:0000 in DOS terms
- */
- unsigned int signature = isa_readl(0xC0000);
-
- - remapping and writing::
-
- /*
- * remap framebuffer PCI memory area at 0xFC000000,
- * size 1MB, so that we can access it: We can directly
- * access only the 640k-1MB area, so anything else
- * has to be remapped.
- */
- void __iomem *baseptr = ioremap(0xFC000000, 1024*1024);
-
- /* write a 'A' to the offset 10 of the area */
- writeb('A',baseptr+10);
-
- /* unmap when we unload the driver */
- iounmap(baseptr);
-
- - copying and clearing::
-
- /* get the 6-byte Ethernet address at ISA address E000:0040 */
- memcpy_fromio(kernel_buffer, 0xE0040, 6);
- /* write a packet to the driver */
- memcpy_toio(0xE1000, skb->data, skb->len);
- /* clear the frame buffer */
- memset_io(0xA0000, 0, 0x10000);
-
-OK, that just about covers the basics of accessing IO portably. Questions?
-Comments? You may think that all the above is overly complex, but one day you
-might find yourself with a 500 MHz Alpha in front of you, and then you'll be
-happy that your driver works ;)
-
-Note that kernel versions 2.0.x (and earlier) mistakenly called the
-ioremap() function "vremap()". ioremap() is the proper name, but I
-didn't think straight when I wrote it originally. People who have to
-support both can do something like::
-
- /* support old naming silliness */
- #if LINUX_VERSION_CODE < 0x020100
- #define ioremap vremap
- #define iounmap vfree
- #endif
-
-at the top of their source files, and then they can use the right names
-even on 2.0.x systems.
-
-And the above sounds worse than it really is. Most real drivers really
-don't do all that complex things (or rather: the complexity is not so
-much in the actual IO accesses as in error handling and timeouts etc).
-It's generally not hard to fix drivers, and in many cases the code
-actually looks better afterwards::
-
- unsigned long signature = *(unsigned int *) 0xC0000;
- vs
- unsigned long signature = readl(0xC0000);
-
-I think the second version actually is more readable, no?
diff --git a/Documentation/core-api/dma-api-howto.rst b/Documentation/core-api/dma-api-howto.rst
index 358d495456d1..828846804e25 100644
--- a/Documentation/core-api/dma-api-howto.rst
+++ b/Documentation/core-api/dma-api-howto.rst
@@ -707,20 +707,6 @@ to use the dma_sync_*() interfaces::
}
}
-Drivers converted fully to this interface should not use virt_to_bus() any
-longer, nor should they use bus_to_virt(). Some drivers have to be changed a
-little bit, because there is no longer an equivalent to bus_to_virt() in the
-dynamic DMA mapping scheme - you have to always store the DMA addresses
-returned by the dma_alloc_coherent(), dma_pool_alloc(), and dma_map_single()
-calls (dma_map_sg() stores them in the scatterlist itself if the platform
-supports dynamic DMA mapping in hardware) in your driver structures and/or
-in the card registers.
-
-All drivers should be using these interfaces with no exceptions. It
-is planned to completely remove virt_to_bus() and bus_to_virt() as
-they are entirely deprecated. Some ports already do not provide these
-as it is impossible to correctly support them.
-
Handling Errors
===============
diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst
index 6d6d0edd2d27..829f20a193ca 100644
--- a/Documentation/core-api/dma-api.rst
+++ b/Documentation/core-api/dma-api.rst
@@ -206,6 +206,20 @@ others should not be larger than the returned value.
::
+ size_t
+ dma_opt_mapping_size(struct device *dev);
+
+Returns the maximum optimal size of a mapping for the device.
+
+Mapping larger buffers may take much longer in certain scenarios. In
+addition, for high-rate short-lived streaming mappings, the upfront time
+spent on the mapping may account for an appreciable part of the total
+request lifetime. As such, if splitting larger requests incurs no
+significant performance penalty, then device drivers are advised to
+limit total DMA streaming mappings length to the returned value.
+
+::
+
bool
dma_need_sync(struct device *dev, dma_addr_t dma_addr);
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index dedd4d853329..dc95df462eea 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -41,7 +41,6 @@ Library functionality that is used throughout the kernel.
rbtree
generic-radix-tree
packing
- bus-virt-phys-mapping
this_cpu_ops
timekeeping
errseq
@@ -87,7 +86,7 @@ Memory management
=================
How to allocate and use memory in the kernel. Note that there is a lot
-more memory-management documentation in Documentation/vm/index.rst.
+more memory-management documentation in Documentation/mm/index.rst.
.. toctree::
:maxdepth: 1
diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst
index f5b2f92822c8..1ebcc6c3fafe 100644
--- a/Documentation/core-api/mm-api.rst
+++ b/Documentation/core-api/mm-api.rst
@@ -22,16 +22,16 @@ Memory Allocation Controls
.. kernel-doc:: include/linux/gfp.h
:internal:
-.. kernel-doc:: include/linux/gfp.h
+.. kernel-doc:: include/linux/gfp_types.h
:doc: Page mobility and placement hints
-.. kernel-doc:: include/linux/gfp.h
+.. kernel-doc:: include/linux/gfp_types.h
:doc: Watermark modifiers
-.. kernel-doc:: include/linux/gfp.h
+.. kernel-doc:: include/linux/gfp_types.h
:doc: Reclaim modifiers
-.. kernel-doc:: include/linux/gfp.h
+.. kernel-doc:: include/linux/gfp_types.h
:doc: Useful GFP flag combinations
The Slab Cache
diff --git a/Documentation/dev-tools/kmemleak.rst b/Documentation/dev-tools/kmemleak.rst
index 1c935f41cd3a..5483fd39ef29 100644
--- a/Documentation/dev-tools/kmemleak.rst
+++ b/Documentation/dev-tools/kmemleak.rst
@@ -174,7 +174,6 @@ mapping:
- ``kmemleak_alloc_phys``
- ``kmemleak_free_part_phys``
-- ``kmemleak_not_leak_phys``
- ``kmemleak_ignore_phys``
Dealing with false positives/negatives
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index c9953f86b19d..1eaccf135b30 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -42,9 +42,7 @@ quiet_cmd_chk_bindings = CHKDT $@
quiet_cmd_mk_schema = SCHEMA $@
cmd_mk_schema = f=$$(mktemp) ; \
- $(if $(DT_MK_SCHEMA_FLAGS), \
- printf '%s\n' $(real-prereqs), \
- $(find_all_cmd)) > $$f ; \
+ $(find_all_cmd) > $$f ; \
$(DT_MK_SCHEMA) -j $(DT_MK_SCHEMA_FLAGS) @$$f > $@ ; \
rm -f $$f
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index 16eef600d599..ab1b352344ae 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -25,21 +25,6 @@ System Timer (ST) required properties:
Its subnodes can be:
- watchdog: compatible should be "atmel,at91rm9200-wdt"
-RSTC Reset Controller required properties:
-- compatible: Should be "atmel,<chip>-rstc".
- <chip> can be "at91sam9260", "at91sam9g45", "sama5d3" or "samx7"
- it also can be "microchip,sam9x60-rstc"
-- reg: Should contain registers location and length
-- clocks: phandle to input clock.
-
-Example:
-
- rstc@fffffd00 {
- compatible = "atmel,at91sam9260-rstc";
- reg = <0xfffffd00 0x10>;
- clocks = <&clk32k>;
- };
-
RAMC SDRAM/DDR Controller required properties:
- compatible: Should be "atmel,at91rm9200-sdramc", "syscon"
"atmel,at91sam9260-sdramc",
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 5c2e3a5f3789..a07c5bac7c46 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -138,6 +138,7 @@ properties:
- arm,cortex-a76
- arm,cortex-a77
- arm,cortex-a78
+ - arm,cortex-a78ae
- arm,cortex-a510
- arm,cortex-a710
- arm,cortex-m0
diff --git a/Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt b/Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt
index 052a967c1f28..c83245065d44 100644
--- a/Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt
@@ -72,7 +72,7 @@ mpp19 19 gpio, uart0(rxd), sdio(pw_off)
GPIO:
-----
For common binding part and usage, refer to
-Documentation/devicetree/bindings/gpio/gpio-mvebu.txt.
+Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml.
Required properties:
diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
index 0705e765f432..d84105c7c935 100644
--- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
@@ -156,7 +156,7 @@ GPIO:
-----
For common binding part and usage, refer to
-Documentation/devicetree/bindings/gpio/gpio-mvebu.txt.
+Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml.
Required properties:
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml
index 0886e2e335bb..661047d26e11 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml
@@ -39,6 +39,9 @@ properties:
'#clock-cells':
const: 1
+ '#reset-cells':
+ const: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml
index c8c67c033f8c..b57cc2e69efb 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml
@@ -24,7 +24,6 @@ properties:
- mediatek,mt8192-imp_iic_wrap_w
- mediatek,mt8192-imp_iic_wrap_n
- mediatek,mt8192-msdc_top
- - mediatek,mt8192-msdc
- mediatek,mt8192-mfgcfg
- mediatek,mt8192-imgsys
- mediatek,mt8192-imgsys2
@@ -108,13 +107,6 @@ examples:
};
- |
- msdc: clock-controller@11f60000 {
- compatible = "mediatek,mt8192-msdc";
- reg = <0x11f60000 0x1000>;
- #clock-cells = <1>;
- };
-
- - |
mfgcfg: clock-controller@13fbf000 {
compatible = "mediatek,mt8192-mfgcfg";
reg = <0x13fbf000 0x1000>;
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml
index 5705bcf1fe47..27f79175c678 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml
@@ -29,6 +29,9 @@ properties:
'#clock-cells':
const: 1
+ '#reset-cells':
+ const: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml
index 57a1503d95fe..95b6bdf99936 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml
@@ -37,6 +37,9 @@ properties:
'#clock-cells':
const: 1
+ '#reset-cells':
+ const: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt b/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt
index 94d50a949be1..c0e3c3a42bea 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,saw2.txt
@@ -10,7 +10,7 @@ system, notifying them when a low power state is entered or exited.
Multiple revisions of the SAW hardware are supported using these Device Nodes.
SAW2 revisions differ in the register offset and configuration data. Also, the
same revision of the SAW in different SoCs may have different configuration
-data due the the differences in hardware capabilities. Hence the SoC name, the
+data due the differences in hardware capabilities. Hence the SoC name, the
version of the SAW hardware in that SoC and the distinction between cpu (big
or Little) or cache, may be needed to uniquely identify the SAW register
configuration and initialization data. The compatible string is used to
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
index 564ae6aaccf7..7fd8d47b1be4 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
@@ -208,7 +208,7 @@ properties:
"^[a-z0-9]+$":
type: object
- patternProperties:
+ properties:
clocks:
minItems: 1
maxItems: 8
diff --git a/Documentation/devicetree/bindings/arm/vexpress-sysreg.yaml b/Documentation/devicetree/bindings/arm/vexpress-sysreg.yaml
index b5e26e41f88c..f04db802a732 100644
--- a/Documentation/devicetree/bindings/arm/vexpress-sysreg.yaml
+++ b/Documentation/devicetree/bindings/arm/vexpress-sysreg.yaml
@@ -29,6 +29,13 @@ properties:
ranges: true
+ gpio-controller:
+ deprecated: true
+
+ "#gpio-cells":
+ deprecated: true
+ const: 2
+
additionalProperties: false
patternProperties:
@@ -67,8 +74,7 @@ patternProperties:
required:
- compatible
- - "#address-cells"
- - "#size-cells"
+ - reg
examples:
- |
diff --git a/Documentation/devicetree/bindings/ata/ahci-ceva.txt b/Documentation/devicetree/bindings/ata/ahci-ceva.txt
deleted file mode 100644
index bfb6da0281ec..000000000000
--- a/Documentation/devicetree/bindings/ata/ahci-ceva.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-Binding for CEVA AHCI SATA Controller
-
-Required properties:
- - reg: Physical base address and size of the controller's register area.
- - compatible: Compatibility string. Must be 'ceva,ahci-1v84'.
- - clocks: Input clock specifier. Refer to common clock bindings.
- - interrupts: Interrupt specifier. Refer to interrupt binding.
- - ceva,p0-cominit-params: OOB timing value for COMINIT parameter for port 0.
- - ceva,p1-cominit-params: OOB timing value for COMINIT parameter for port 1.
- The fields for the above parameter must be as shown below:
- ceva,pN-cominit-params = /bits/ 8 <CIBGMN CIBGMX CIBGN CINMP>;
- CINMP : COMINIT Negate Minimum Period.
- CIBGN : COMINIT Burst Gap Nominal.
- CIBGMX: COMINIT Burst Gap Maximum.
- CIBGMN: COMINIT Burst Gap Minimum.
- - ceva,p0-comwake-params: OOB timing value for COMWAKE parameter for port 0.
- - ceva,p1-comwake-params: OOB timing value for COMWAKE parameter for port 1.
- The fields for the above parameter must be as shown below:
- ceva,pN-comwake-params = /bits/ 8 <CWBGMN CWBGMX CWBGN CWNMP>;
- CWBGMN: COMWAKE Burst Gap Minimum.
- CWBGMX: COMWAKE Burst Gap Maximum.
- CWBGN: COMWAKE Burst Gap Nominal.
- CWNMP: COMWAKE Negate Minimum Period.
- - ceva,p0-burst-params: Burst timing value for COM parameter for port 0.
- - ceva,p1-burst-params: Burst timing value for COM parameter for port 1.
- The fields for the above parameter must be as shown below:
- ceva,pN-burst-params = /bits/ 8 <BMX BNM SFD PTST>;
- BMX: COM Burst Maximum.
- BNM: COM Burst Nominal.
- SFD: Signal Failure Detection value.
- PTST: Partial to Slumber timer value.
- - ceva,p0-retry-params: Retry interval timing value for port 0.
- - ceva,p1-retry-params: Retry interval timing value for port 1.
- The fields for the above parameter must be as shown below:
- ceva,pN-retry-params = /bits/ 16 <RIT RCT>;
- RIT: Retry Interval Timer.
- RCT: Rate Change Timer.
-
-Optional properties:
- - ceva,broken-gen2: limit to gen1 speed instead of gen2.
- - phys: phandle for the PHY device
- - resets: phandle to the reset controller for the SATA IP
-
-Examples:
- ahci@fd0c0000 {
- compatible = "ceva,ahci-1v84";
- reg = <0xfd0c0000 0x200>;
- interrupt-parent = <&gic>;
- interrupts = <0 133 4>;
- clocks = <&clkc SATA_CLK_ID>;
- ceva,p0-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>;
- ceva,p0-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>;
- ceva,p0-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>;
- ceva,p0-retry-params = /bits/ 16 <0x0216 0x7F06>;
-
- ceva,p1-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>;
- ceva,p1-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>;
- ceva,p1-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>;
- ceva,p1-retry-params = /bits/ 16 <0x0216 0x7F06>;
- ceva,broken-gen2;
- phys = <&psgtr 1 PHY_TYPE_SATA 1 1>;
- resets = <&zynqmp_reset ZYNQMP_RESET_SATA>;
- };
diff --git a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
new file mode 100644
index 000000000000..9b31f864e071
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
@@ -0,0 +1,189 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/ceva,ahci-1v84.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ceva AHCI SATA Controller
+
+maintainers:
+ - Piyush Mehta <piyush.mehta@xilinx.com>
+
+description: |
+ The Ceva SATA controller mostly conforms to the AHCI interface with some
+ special extensions to add functionality, is a high-performance dual-port
+ SATA host controller with an AHCI compliant command layer which supports
+ advanced features such as native command queuing and frame information
+ structure (FIS) based switching for systems employing port multipliers.
+
+properties:
+ compatible:
+ const: ceva,ahci-1v84
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ dma-coherent: true
+
+ interrupts:
+ maxItems: 1
+
+ iommus:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ ceva,p0-cominit-params:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: |
+ OOB timing value for COMINIT parameter for port 0.
+ The fields for the above parameter must be as shown below:-
+ ceva,p0-cominit-params = /bits/ 8 <CIBGMN CIBGMX CIBGN CINMP>;
+ items:
+ - description: CINMP - COMINIT Negate Minimum Period.
+ - description: CIBGN - COMINIT Burst Gap Nominal.
+ - description: CIBGMX - COMINIT Burst Gap Maximum.
+ - description: CIBGMN - COMINIT Burst Gap Minimum.
+
+ ceva,p0-comwake-params:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: |
+ OOB timing value for COMWAKE parameter for port 0.
+ The fields for the above parameter must be as shown below:-
+ ceva,p0-comwake-params = /bits/ 8 <CWBGMN CWBGMX CWBGN CWNMP>;
+ items:
+ - description: CWBGMN - COMWAKE Burst Gap Minimum.
+ - description: CWBGMX - COMWAKE Burst Gap Maximum.
+ - description: CWBGN - COMWAKE Burst Gap Nominal.
+ - description: CWNMP - COMWAKE Negate Minimum Period.
+
+ ceva,p0-burst-params:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: |
+ Burst timing value for COM parameter for port 0.
+ The fields for the above parameter must be as shown below:-
+ ceva,p0-burst-params = /bits/ 8 <BMX BNM SFD PTST>;
+ items:
+ - description: BMX - COM Burst Maximum.
+ - description: BNM - COM Burst Nominal.
+ - description: SFD - Signal Failure Detection value.
+ - description: PTST - Partial to Slumber timer value.
+
+ ceva,p0-retry-params:
+ $ref: /schemas/types.yaml#/definitions/uint16-array
+ description: |
+ Retry interval timing value for port 0.
+ The fields for the above parameter must be as shown below:-
+ ceva,p0-retry-params = /bits/ 16 <RIT RCT>;
+ items:
+ - description: RIT - Retry Interval Timer.
+ - description: RCT - Rate Change Timer.
+
+ ceva,p1-cominit-params:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: |
+ OOB timing value for COMINIT parameter for port 1.
+ The fields for the above parameter must be as shown below:-
+ ceva,p1-cominit-params = /bits/ 8 <CIBGMN CIBGMX CIBGN CINMP>;
+ items:
+ - description: CINMP - COMINIT Negate Minimum Period.
+ - description: CIBGN - COMINIT Burst Gap Nominal.
+ - description: CIBGMX - COMINIT Burst Gap Maximum.
+ - description: CIBGMN - COMINIT Burst Gap Minimum.
+
+ ceva,p1-comwake-params:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: |
+ OOB timing value for COMWAKE parameter for port 1.
+ The fields for the above parameter must be as shown below:-
+ ceva,p1-comwake-params = /bits/ 8 <CWBGMN CWBGMX CWBGN CWNMP>;
+ items:
+ - description: CWBGMN - COMWAKE Burst Gap Minimum.
+ - description: CWBGMX - COMWAKE Burst Gap Maximum.
+ - description: CWBGN - COMWAKE Burst Gap Nominal.
+ - description: CWNMP - COMWAKE Negate Minimum Period.
+
+ ceva,p1-burst-params:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: |
+ Burst timing value for COM parameter for port 1.
+ The fields for the above parameter must be as shown below:-
+ ceva,p1-burst-params = /bits/ 8 <BMX BNM SFD PTST>;
+ items:
+ - description: BMX - COM Burst Maximum.
+ - description: BNM - COM Burst Nominal.
+ - description: SFD - Signal Failure Detection value.
+ - description: PTST - Partial to Slumber timer value.
+
+ ceva,p1-retry-params:
+ $ref: /schemas/types.yaml#/definitions/uint16-array
+ description: |
+ Retry interval timing value for port 1.
+ The fields for the above parameter must be as shown below:-
+ ceva,pN-retry-params = /bits/ 16 <RIT RCT>;
+ items:
+ - description: RIT - Retry Interval Timer.
+ - description: RCT - Rate Change Timer.
+
+ ceva,broken-gen2:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: |
+ limit to gen1 speed instead of gen2.
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ items:
+ - const: sata-phy
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+ - ceva,p0-cominit-params
+ - ceva,p0-comwake-params
+ - ceva,p0-burst-params
+ - ceva,p0-retry-params
+ - ceva,p1-cominit-params
+ - ceva,p1-comwake-params
+ - ceva,p1-burst-params
+ - ceva,p1-retry-params
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/power/xlnx-zynqmp-power.h>
+ #include <dt-bindings/reset/xlnx-zynqmp-resets.h>
+ #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
+ #include <dt-bindings/phy/phy.h>
+
+ sata: ahci@fd0c0000 {
+ compatible = "ceva,ahci-1v84";
+ reg = <0xfd0c0000 0x200>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 133 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&zynqmp_clk SATA_REF>;
+ ceva,p0-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>;
+ ceva,p0-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>;
+ ceva,p0-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>;
+ ceva,p0-retry-params = /bits/ 16 <0x0216 0x7F06>;
+ ceva,p1-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>;
+ ceva,p1-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>;
+ ceva,p1-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>;
+ ceva,p1-retry-params = /bits/ 16 <0x0216 0x7F06>;
+ ceva,broken-gen2;
+ phys = <&psgtr 1 PHY_TYPE_SATA 1 1>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_SATA>;
+ };
diff --git a/Documentation/devicetree/bindings/bus/qcom,ssc-block-bus.yaml b/Documentation/devicetree/bindings/bus/qcom,ssc-block-bus.yaml
index 5b9705079015..8e9e6ff35d7d 100644
--- a/Documentation/devicetree/bindings/bus/qcom,ssc-block-bus.yaml
+++ b/Documentation/devicetree/bindings/bus/qcom,ssc-block-bus.yaml
@@ -28,11 +28,9 @@ properties:
- const: qcom,ssc-block-bus
reg:
- description: |
- Shall contain the addresses of the SSCAON_CONFIG0 and SSCAON_CONFIG1
- registers
- minItems: 2
- maxItems: 2
+ items:
+ - description: SSCAON_CONFIG0 registers
+ - description: SSCAON_CONFIG1 registers
reg-names:
items:
@@ -48,7 +46,6 @@ properties:
ranges: true
clocks:
- minItems: 6
maxItems: 6
clock-names:
@@ -61,9 +58,9 @@ properties:
- const: ssc_ahbs
power-domains:
- description: Power domain phandles for the ssc_cx and ssc_mx power domains
- minItems: 2
- maxItems: 2
+ items:
+ - description: CX power domain
+ - description: MX power domain
power-domain-names:
items:
@@ -71,11 +68,11 @@ properties:
- const: ssc_mx
resets:
- description: |
- Reset phandles for the ssc_reset and ssc_bcr resets (note: ssc_bcr is the
- branch control register associated with the ssc_xo and ssc_ahbs clocks)
- minItems: 2
- maxItems: 2
+ items:
+ - description: Main reset
+ - description:
+ SSC Branch Control Register reset (associated with the ssc_xo and
+ ssc_ahbs clocks)
reset-names:
items:
diff --git a/Documentation/devicetree/bindings/chosen.txt b/Documentation/devicetree/bindings/chosen.txt
deleted file mode 100644
index 1cc3aa10dcb1..000000000000
--- a/Documentation/devicetree/bindings/chosen.txt
+++ /dev/null
@@ -1,137 +0,0 @@
-The chosen node
----------------
-
-The chosen node does not represent a real device, but serves as a place
-for passing data between firmware and the operating system, like boot
-arguments. Data in the chosen node does not represent the hardware.
-
-The following properties are recognized:
-
-
-kaslr-seed
------------
-
-This property is used when booting with CONFIG_RANDOMIZE_BASE as the
-entropy used to randomize the kernel image base address location. Since
-it is used directly, this value is intended only for KASLR, and should
-not be used for other purposes (as it may leak information about KASLR
-offsets). It is parsed as a u64 value, e.g.
-
-/ {
- chosen {
- kaslr-seed = <0xfeedbeef 0xc0def00d>;
- };
-};
-
-Note that if this property is set from UEFI (or a bootloader in EFI
-mode) when EFI_RNG_PROTOCOL is supported, it will be overwritten by
-the Linux EFI stub (which will populate the property itself, using
-EFI_RNG_PROTOCOL).
-
-stdout-path
------------
-
-Device trees may specify the device to be used for boot console output
-with a stdout-path property under /chosen, as described in the Devicetree
-Specification, e.g.
-
-/ {
- chosen {
- stdout-path = "/serial@f00:115200";
- };
-
- serial@f00 {
- compatible = "vendor,some-uart";
- reg = <0xf00 0x10>;
- };
-};
-
-If the character ":" is present in the value, this terminates the path.
-The meaning of any characters following the ":" is device-specific, and
-must be specified in the relevant binding documentation.
-
-For UART devices, the preferred binding is a string in the form:
-
- <baud>{<parity>{<bits>{<flow>}}}
-
-where
-
- baud - baud rate in decimal
- parity - 'n' (none), 'o', (odd) or 'e' (even)
- bits - number of data bits
- flow - 'r' (rts)
-
-For example: 115200n8r
-
-Implementation note: Linux will look for the property "linux,stdout-path" or
-on PowerPC "stdout" if "stdout-path" is not found. However, the
-"linux,stdout-path" and "stdout" properties are deprecated. New platforms
-should only use the "stdout-path" property.
-
-linux,booted-from-kexec
------------------------
-
-This property is set (currently only on PowerPC, and only needed on
-book3e) by some versions of kexec-tools to tell the new kernel that it
-is being booted by kexec, as the booting environment may differ (e.g.
-a different secondary CPU release mechanism)
-
-linux,usable-memory-range
--------------------------
-
-This property holds a base address and size, describing a limited region in
-which memory may be considered available for use by the kernel. Memory outside
-of this range is not available for use.
-
-This property describes a limitation: memory within this range is only
-valid when also described through another mechanism that the kernel
-would otherwise use to determine available memory (e.g. memory nodes
-or the EFI memory map). Valid memory may be sparse within the range.
-e.g.
-
-/ {
- chosen {
- linux,usable-memory-range = <0x9 0xf0000000 0x0 0x10000000>;
- };
-};
-
-The main usage is for crash dump kernel to identify its own usable
-memory and exclude, at its boot time, any other memory areas that are
-part of the panicked kernel's memory.
-
-While this property does not represent a real hardware, the address
-and the size are expressed in #address-cells and #size-cells,
-respectively, of the root node.
-
-linux,elfcorehdr
-----------------
-
-This property holds the memory range, the address and the size, of the elf
-core header which mainly describes the panicked kernel's memory layout as
-PT_LOAD segments of elf format.
-e.g.
-
-/ {
- chosen {
- linux,elfcorehdr = <0x9 0xfffff000 0x0 0x800>;
- };
-};
-
-While this property does not represent a real hardware, the address
-and the size are expressed in #address-cells and #size-cells,
-respectively, of the root node.
-
-linux,initrd-start and linux,initrd-end
----------------------------------------
-
-These properties hold the physical start and end address of an initrd that's
-loaded by the bootloader. Note that linux,initrd-start is inclusive, but
-linux,initrd-end is exclusive.
-e.g.
-
-/ {
- chosen {
- linux,initrd-start = <0x82000000>;
- linux,initrd-end = <0x82800000>;
- };
-};
diff --git a/Documentation/devicetree/bindings/chrome/google,cros-ec-typec.yaml b/Documentation/devicetree/bindings/chrome/google,cros-ec-typec.yaml
index 2d98f7c4d3bc..50ebd8c57795 100644
--- a/Documentation/devicetree/bindings/chrome/google,cros-ec-typec.yaml
+++ b/Documentation/devicetree/bindings/chrome/google,cros-ec-typec.yaml
@@ -20,13 +20,24 @@ properties:
compatible:
const: google,cros-ec-typec
- connector:
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ '^connector@[0-9a-f]+$':
$ref: /schemas/connector/usb-connector.yaml#
+ unevaluatedProperties: false
+ properties:
+ reg:
+ maxItems: 1
required:
- compatible
-additionalProperties: true #fixme
+additionalProperties: false
examples:
- |+
diff --git a/Documentation/devicetree/bindings/chrome/google,cros-kbd-led-backlight.yaml b/Documentation/devicetree/bindings/chrome/google,cros-kbd-led-backlight.yaml
new file mode 100644
index 000000000000..5b875af6a95a
--- /dev/null
+++ b/Documentation/devicetree/bindings/chrome/google,cros-kbd-led-backlight.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/chrome/google,cros-kbd-led-backlight.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ChromeOS keyboard backlight LED driver.
+
+maintainers:
+ - Tzung-Bi Shih <tzungbi@kernel.org>
+
+properties:
+ compatible:
+ const: google,cros-kbd-led-backlight
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cros_ec: ec@0 {
+ compatible = "google,cros-ec-spi";
+ reg = <0>;
+
+ kbd-led-backlight {
+ compatible = "google,cros-kbd-led-backlight";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/efm32-clock.txt b/Documentation/devicetree/bindings/clock/efm32-clock.txt
deleted file mode 100644
index 263d293f6a10..000000000000
--- a/Documentation/devicetree/bindings/clock/efm32-clock.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-* Clock bindings for Energy Micro efm32 Giant Gecko's Clock Management Unit
-
-Required properties:
-- compatible: Should be "efm32gg,cmu"
-- reg: Base address and length of the register set
-- interrupts: Interrupt used by the CMU
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock ID in
-its "clocks" phandle cell. The header efm32-clk.h contains a list of available
-IDs.
diff --git a/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml b/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
index f415845b38dd..0b02378a3a0c 100644
--- a/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
@@ -13,7 +13,6 @@ maintainers:
properties:
compatible:
enum:
- - allwinner,sun4i-a10-pll3-2x-clk
- fixed-factor-clock
"#clock-cells":
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
index 9fafcb080069..3cf404c9325a 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/clock/qcom,gcc-apq8064.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Qualcomm Global Clock & Reset Controller Binding for APQ8064
+title: Qualcomm Global Clock & Reset Controller Binding for APQ8064/MSM8960
allOf:
- $ref: qcom,gcc.yaml#
@@ -23,11 +23,25 @@ description: |
properties:
compatible:
- const: qcom,gcc-apq8064
+ oneOf:
+ - items:
+ - enum:
+ - qcom,gcc-apq8064
+ - qcom,gcc-msm8960
+ - const: syscon
+ - enum:
+ - qcom,gcc-apq8064
+ - qcom,gcc-msm8960
+ deprecated: true
+
+ thermal-sensor:
+ description: child tsens device
+ $ref: /schemas/thermal/qcom-tsens.yaml#
nvmem-cells:
minItems: 1
maxItems: 2
+ deprecated: true
description:
Qualcomm TSENS (thermal sensor device) on some devices can
be part of GCC and hence the TSENS properties can also be part
@@ -37,31 +51,39 @@ properties:
nvmem-cell-names:
minItems: 1
+ deprecated: true
items:
- const: calib
- const: calib_backup
'#thermal-sensor-cells':
const: 1
+ deprecated: true
required:
- compatible
- - nvmem-cells
- - nvmem-cell-names
- - '#thermal-sensor-cells'
unevaluatedProperties: false
examples:
- |
clock-controller@900000 {
- compatible = "qcom,gcc-apq8064";
+ compatible = "qcom,gcc-apq8064", "syscon";
reg = <0x00900000 0x4000>;
- nvmem-cells = <&tsens_calib>, <&tsens_backup>;
- nvmem-cell-names = "calib", "calib_backup";
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
- #thermal-sensor-cells = <1>;
+
+ thermal-sensor {
+ compatible = "qcom,msm8960-tsens";
+
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ interrupts = <0 178 4>;
+ interrupt-names = "uplow";
+
+ #qcom,sensors = <11>;
+ #thermal-sensor-cells = <1>;
+ };
};
...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
index 98572b4a9b60..21470f52ce36 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
@@ -24,6 +24,9 @@ properties:
'#clock-cells':
const: 1
+ '#power-domain-cells':
+ const: 1
+
'#reset-cells':
const: 1
@@ -38,6 +41,7 @@ required:
- compatible
- reg
- '#clock-cells'
+ - '#power-domain-cells'
- '#reset-cells'
additionalProperties: false
@@ -48,6 +52,7 @@ examples:
compatible = "qcom,gcc-ipq8074";
reg = <0x01800000 0x80000>;
#clock-cells = <1>;
+ #power-domain-cells = <1>;
#reset-cells = <1>;
};
...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
index 5a5b2214f0ca..005e0edd4609 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
@@ -22,16 +22,32 @@ properties:
const: qcom,gcc-msm8996
clocks:
+ minItems: 3
items:
- description: XO source
- description: Second XO source
- description: Sleep clock source
+ - description: PCIe 0 PIPE clock (optional)
+ - description: PCIe 1 PIPE clock (optional)
+ - description: PCIe 2 PIPE clock (optional)
+ - description: USB3 PIPE clock (optional)
+ - description: UFS RX symbol 0 clock (optional)
+ - description: UFS RX symbol 1 clock (optional)
+ - description: UFS TX symbol 0 clock (optional)
clock-names:
+ minItems: 3
items:
- const: cxo
- const: cxo2
- const: sleep_clk
+ - const: pcie_0_pipe_clk_src
+ - const: pcie_1_pipe_clk_src
+ - const: pcie_2_pipe_clk_src
+ - const: usb3_phy_pipe_clk_src
+ - const: ufs_rx_symbol_0_clk_src
+ - const: ufs_rx_symbol_1_clk_src
+ - const: ufs_tx_symbol_0_clk_src
'#clock-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml
index 6c45e0f85494..6c78df0c46a9 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-other.yaml
@@ -44,7 +44,6 @@ properties:
- qcom,gcc-msm8916
- qcom,gcc-msm8939
- qcom,gcc-msm8953
- - qcom,gcc-msm8960
- qcom,gcc-msm8974
- qcom,gcc-msm8974pro
- qcom,gcc-msm8974pro-ac
@@ -58,10 +57,10 @@ required:
unevaluatedProperties: false
examples:
- # Example for GCC for MSM8960:
+ # Example for GCC for MSM8974:
- |
clock-controller@900000 {
- compatible = "qcom,gcc-msm8960";
+ compatible = "qcom,gcc-msm8974";
reg = <0x900000 0x4000>;
#clock-cells = <1>;
#reset-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sdm845.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sdm845.yaml
index d902f137ab17..daf7906ebc40 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sdm845.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sdm845.yaml
@@ -43,6 +43,9 @@ properties:
'#reset-cells':
const: 1
+ power-domains:
+ maxItems: 1
+
'#power-domain-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml
index 9d296b89a8d0..d63b45ad06e8 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.yaml
@@ -49,15 +49,86 @@ properties:
const: 1
clocks:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
clock-names:
- const: xo
+ minItems: 1
+ maxItems: 2
required:
- compatible
- '#clock-cells'
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,rpmcc-apq8060
+ - qcom,rpmcc-ipq806x
+ - qcom,rpmcc-msm8660
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: pxo clock
+
+ clock-names:
+ items:
+ - const: pxo
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: qcom,rpmcc-apq8064
+ then:
+ properties:
+ clocks:
+ items:
+ - description: pxo clock
+ - description: cxo clock
+
+ clock-names:
+ items:
+ - const: pxo
+ - const: cxo
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,rpmcc-mdm9607
+ - qcom,rpmcc-msm8226
+ - qcom,rpmcc-msm8916
+ - qcom,rpmcc-msm8936
+ - qcom,rpmcc-msm8953
+ - qcom,rpmcc-msm8974
+ - qcom,rpmcc-msm8976
+ - qcom,rpmcc-msm8992
+ - qcom,rpmcc-msm8994
+ - qcom,rpmcc-msm8996
+ - qcom,rpmcc-msm8998
+ - qcom,rpmcc-qcm2290
+ - qcom,rpmcc-qcs404
+ - qcom,rpmcc-sdm660
+ - qcom,rpmcc-sm6115
+ - qcom,rpmcc-sm6125
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: xo clock
+
+ clock-names:
+ items:
+ - const: xo
+
additionalProperties: false
examples:
@@ -73,3 +144,13 @@ examples:
};
};
};
+
+ - |
+ rpm {
+ clock-controller {
+ compatible = "qcom,rpmcc-ipq806x", "qcom,rpmcc";
+ #clock-cells = <1>;
+ clocks = <&pxo_board>;
+ clock-names = "pxo";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml b/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
index 8880b834f264..d036675e0779 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,rzg2l-cpg.yaml
@@ -45,10 +45,9 @@ properties:
description: |
- For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
and a core clock reference, as defined in
- <dt-bindings/clock/r9a0*-cpg.h>
+ <dt-bindings/clock/r9a0*-cpg.h>,
- For module clocks, the two clock specifier cells must be "CPG_MOD" and
- a module number, as defined in the <dt-bindings/clock/r9a07g0*-cpg.h> or
- <dt-bindings/clock/r9a09g011-cpg.h>.
+ a module number, as defined in <dt-bindings/clock/r9a0*-cpg.h>.
const: 2
'#power-domain-cells':
@@ -62,7 +61,7 @@ properties:
'#reset-cells':
description:
The single reset specifier cell must be the module number, as defined in
- the <dt-bindings/clock/r9a07g0*-cpg.h> or <dt-bindings/clock/r9a09g011-cpg.h>.
+ <dt-bindings/clock/r9a0*-cpg.h>.
const: 1
required:
diff --git a/Documentation/devicetree/bindings/clock/sprd,ums512-clk.yaml b/Documentation/devicetree/bindings/clock/sprd,ums512-clk.yaml
new file mode 100644
index 000000000000..5f747b0471cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/sprd,ums512-clk.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2022 Unisoc Inc.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/clock/sprd,ums512-clk.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: UMS512 Soc clock controller
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - sprd,ums512-apahb-gate
+ - sprd,ums512-ap-clk
+ - sprd,ums512-aonapb-clk
+ - sprd,ums512-pmu-gate
+ - sprd,ums512-g0-pll
+ - sprd,ums512-g2-pll
+ - sprd,ums512-g3-pll
+ - sprd,ums512-gc-pll
+ - sprd,ums512-aon-gate
+ - sprd,ums512-audcpapb-gate
+ - sprd,ums512-audcpahb-gate
+ - sprd,ums512-gpu-clk
+ - sprd,ums512-mm-clk
+ - sprd,ums512-mm-gate-clk
+ - sprd,ums512-apapb-gate
+
+ "#clock-cells":
+ const: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 4
+ description: |
+ The input parent clock(s) phandle for the clock, only list
+ fixed clocks which are declared in devicetree.
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: ext-26m
+ - const: ext-32k
+ - const: ext-4m
+ - const: rco-100m
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - '#clock-cells'
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ ap_clk: clock-controller@20200000 {
+ compatible = "sprd,ums512-ap-clk";
+ reg = <0x20200000 0x1000>;
+ clocks = <&ext_26m>;
+ clock-names = "ext-26m";
+ #clock-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/st/st,flexgen.txt b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
index 55a18939bddd..c918075405ba 100644
--- a/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
@@ -78,7 +78,7 @@ Required properties:
- #clock-cells : from common clock binding; shall be set to 1 (multiple clock
outputs).
-- clocks : must be set to the parent's phandle. it's could be output clocks of
+- clocks : must be set to the parent's phandle. it could be output clocks of
a quadsfs or/and a pll or/and clk_sysin (up to 7 clocks)
- clock-output-names : List of strings used to name the clock outputs.
diff --git a/Documentation/devicetree/bindings/clock/ti/davinci/pll.txt b/Documentation/devicetree/bindings/clock/ti/davinci/pll.txt
index 36998e184821..c9894538315b 100644
--- a/Documentation/devicetree/bindings/clock/ti/davinci/pll.txt
+++ b/Documentation/devicetree/bindings/clock/ti/davinci/pll.txt
@@ -15,7 +15,7 @@ Required properties:
- for "ti,da850-pll1", shall be "clksrc"
Optional properties:
-- ti,clkmode-square-wave: Indicates that the the board is supplying a square
+- ti,clkmode-square-wave: Indicates that the board is supplying a square
wave input on the OSCIN pin instead of using a crystal oscillator.
This property is only valid when compatible = "ti,da850-pll0".
diff --git a/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt b/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt
index 21c002d28b9b..68504079f99f 100644
--- a/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt
+++ b/Documentation/devicetree/bindings/clock/ti/dra7-atl.txt
@@ -6,7 +6,7 @@ functional clock but can be configured to provide different clocks.
ATL can maintain a clock averages to some desired frequency based on the bws/aws
signals - can compensate the drift between the two ws signal.
-In order to provide the support for ATL and it's output clocks (which can be used
+In order to provide the support for ATL and its output clocks (which can be used
internally within the SoC or external components) two sets of bindings is needed:
Clock tree binding:
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.yaml b/Documentation/devicetree/bindings/connector/usb-connector.yaml
index 0420fa563532..ae515651fc6b 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.yaml
+++ b/Documentation/devicetree/bindings/connector/usb-connector.yaml
@@ -263,11 +263,11 @@ examples:
# Micro-USB connector with HS lines routed via controller (MUIC).
- |
muic-max77843 {
- usb_con1: connector {
- compatible = "usb-b-connector";
- label = "micro-USB";
- type = "micro";
- };
+ usb_con1: connector {
+ compatible = "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ };
};
# USB-C connector attached to CC controller (s2mm005), HS lines routed
@@ -275,34 +275,34 @@ examples:
# DisplayPort video lines are routed to the connector via SS mux in USB3 PHY.
- |
ccic: s2mm005 {
- usb_con2: connector {
- compatible = "usb-c-connector";
- label = "USB-C";
+ usb_con2: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- port@0 {
- reg = <0>;
- usb_con_hs: endpoint {
- remote-endpoint = <&max77865_usbc_hs>;
- };
- };
- port@1 {
- reg = <1>;
- usb_con_ss: endpoint {
- remote-endpoint = <&usbdrd_phy_ss>;
- };
- };
- port@2 {
- reg = <2>;
- usb_con_sbu: endpoint {
- remote-endpoint = <&dp_aux>;
+ port@0 {
+ reg = <0>;
+ usb_con_hs: endpoint {
+ remote-endpoint = <&max77865_usbc_hs>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ usb_con_ss: endpoint {
+ remote-endpoint = <&usbdrd_phy_ss>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ usb_con_sbu: endpoint {
+ remote-endpoint = <&dp_aux>;
+ };
+ };
};
- };
};
- };
};
# USB-C connector attached to a typec port controller(ptn5110), which has
@@ -310,16 +310,16 @@ examples:
- |
#include <dt-bindings/usb/pd.h>
typec: ptn5110 {
- usb_con3: connector {
- compatible = "usb-c-connector";
- label = "USB-C";
- power-role = "dual";
- try-power-role = "sink";
- source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
- sink-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)
- PDO_VAR(5000, 12000, 2000)>;
- op-sink-microwatt = <10000000>;
- };
+ usb_con3: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 12000, 2000)>;
+ op-sink-microwatt = <10000000>;
+ };
};
# USB-C connector attached to SoC and USB3 typec port controller(hd3ss3220)
@@ -332,20 +332,20 @@ examples:
data-role = "dual";
ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- hs_ep: endpoint {
- remote-endpoint = <&usb3_hs_ep>;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ hs_ep: endpoint {
+ remote-endpoint = <&usb3_hs_ep>;
};
- port@1 {
- reg = <1>;
- ss_ep: endpoint {
- remote-endpoint = <&hd3ss3220_in_ep>;
- };
+ };
+ port@1 {
+ reg = <1>;
+ ss_ep: endpoint {
+ remote-endpoint = <&hd3ss3220_in_ep>;
};
+ };
};
};
@@ -354,12 +354,12 @@ examples:
#include <dt-bindings/gpio/gpio.h>
usb {
- connector {
- compatible = "gpio-usb-b-connector", "usb-b-connector";
- type = "micro";
- id-gpios = <&pio 12 GPIO_ACTIVE_HIGH>;
- vbus-supply = <&usb_p0_vbus>;
- };
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ type = "micro";
+ id-gpios = <&pio 12 GPIO_ACTIVE_HIGH>;
+ vbus-supply = <&usb_p0_vbus>;
+ };
};
# Micro-USB connector with HS lines routed via controller (MUIC) and MHL
@@ -367,27 +367,27 @@ examples:
# mobile phone
- |
muic-max77843 {
- usb_con4: connector {
- compatible = "samsung,usb-connector-11pin", "usb-b-connector";
- label = "micro-USB";
- type = "micro";
+ usb_con4: connector {
+ compatible = "samsung,usb-connector-11pin", "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- port@0 {
- reg = <0>;
- muic_to_usb: endpoint {
- remote-endpoint = <&usb_to_muic>;
- };
- };
- port@3 {
- reg = <3>;
- usb_con_mhl: endpoint {
- remote-endpoint = <&sii8620_mhl>;
+ port@0 {
+ reg = <0>;
+ muic_to_usb: endpoint {
+ remote-endpoint = <&usb_to_muic>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ usb_con_mhl: endpoint {
+ remote-endpoint = <&sii8620_mhl>;
+ };
+ };
};
- };
};
- };
};
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
index 2f1b8b6852a0..24fa3d87a40b 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
@@ -25,6 +25,7 @@ properties:
- description: v2 of CPUFREQ HW (EPSS)
items:
- enum:
+ - qcom,sm6375-cpufreq-epss
- qcom,sm8250-cpufreq-epss
- const: qcom,cpufreq-epss
diff --git a/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml b/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
index 10b3a7a4af36..a11e1b867379 100644
--- a/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
@@ -22,6 +22,13 @@ select:
compatible:
contains:
enum:
+ - qcom,apq8064
+ - qcom,apq8096
+ - qcom,ipq8064
+ - qcom,msm8939
+ - qcom,msm8960
+ - qcom,msm8974
+ - qcom,msm8996
- qcom,qcs404
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
index 4a92a4c7dcd7..f8168986a0a9 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
@@ -233,6 +233,7 @@ allOf:
- allwinner,sun8i-a83t-tcon-lcd
- allwinner,sun8i-v3s-tcon
- allwinner,sun9i-a80-tcon-lcd
+ - allwinner,sun20i-d1-tcon-lcd
then:
properties:
@@ -252,6 +253,7 @@ allOf:
- allwinner,sun8i-a83t-tcon-tv
- allwinner,sun8i-r40-tcon-tv
- allwinner,sun9i-a80-tcon-tv
+ - allwinner,sun20i-d1-tcon-tv
then:
properties:
@@ -278,6 +280,7 @@ allOf:
- allwinner,sun9i-a80-tcon-lcd
- allwinner,sun4i-a10-tcon
- allwinner,sun8i-a83t-tcon-lcd
+ - allwinner,sun20i-d1-tcon-lcd
then:
required:
@@ -294,6 +297,7 @@ allOf:
- allwinner,sun8i-a23-tcon
- allwinner,sun8i-a33-tcon
- allwinner,sun8i-a83t-tcon-lcd
+ - allwinner,sun20i-d1-tcon-lcd
then:
properties:
diff --git a/Documentation/devicetree/bindings/display/arm,pl11x.yaml b/Documentation/devicetree/bindings/display/arm,pl11x.yaml
index b545c6d20325..6cc9045e5c68 100644
--- a/Documentation/devicetree/bindings/display/arm,pl11x.yaml
+++ b/Documentation/devicetree/bindings/display/arm,pl11x.yaml
@@ -159,25 +159,12 @@ examples:
};
panel {
- compatible = "arm,rtsm-display", "panel-dpi";
- power-supply = <&vcc_supply>;
+ compatible = "arm,rtsm-display";
port {
clcd_panel: endpoint {
remote-endpoint = <&clcd_pads>;
};
};
-
- panel-timing {
- clock-frequency = <25175000>;
- hactive = <640>;
- hback-porch = <40>;
- hfront-porch = <24>;
- hsync-len = <96>;
- vactive = <480>;
- vback-porch = <32>;
- vfront-porch = <11>;
- vsync-len = <2>;
- };
};
...
diff --git a/Documentation/devicetree/bindings/display/bridge/sii902x.txt b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
deleted file mode 100644
index 3bc760cc31cb..000000000000
--- a/Documentation/devicetree/bindings/display/bridge/sii902x.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-sii902x HDMI bridge bindings
-
-Required properties:
- - compatible: "sil,sii9022"
- - reg: i2c address of the bridge
-
-Optional properties:
- - interrupts: describe the interrupt line used to inform the host
- about hotplug events.
- - reset-gpios: OF device-tree gpio specification for RST_N pin.
- - iovcc-supply: I/O Supply Voltage (1.8V or 3.3V)
- - cvcc12-supply: Digital Core Supply Voltage (1.2V)
-
- HDMI audio properties:
- - #sound-dai-cells: <0> or <1>. <0> if only i2s or spdif pin
- is wired, <1> if the both are wired. HDMI audio is
- configured only if this property is found.
- - sil,i2s-data-lanes: Array of up to 4 integers with values of 0-3
- Each integer indicates which i2s pin is connected to which
- audio fifo. The first integer selects i2s audio pin for the
- first audio fifo#0 (HDMI channels 1&2), second for fifo#1
- (HDMI channels 3&4), and so on. There is 4 fifos and 4 i2s
- pins (SD0 - SD3). Any i2s pin can be connected to any fifo,
- but there can be no gaps. E.g. an i2s pin must be mapped to
- fifo#0 and fifo#1 before mapping a channel to fifo#2. Default
- value is <0>, describing SD0 pin beiging routed to hdmi audio
- fifo #0.
- - clocks: phandle and clock specifier for each clock listed in
- the clock-names property
- - clock-names: "mclk"
- Describes SII902x MCLK input. MCLK can be used to produce
- HDMI audio CTS values. This property follows
- Documentation/devicetree/bindings/clock/clock-bindings.txt
- consumer binding.
-
- If HDMI audio is configured the sii902x device becomes an I2S
- and/or spdif audio codec component (e.g a digital audio sink),
- that can be used in configuring a full audio devices with
- simple-card or audio-graph-card binding. See their binding
- documents on how to describe the way the sii902x device is
- connected to the rest of the audio system:
- Documentation/devicetree/bindings/sound/simple-card.yaml
- Documentation/devicetree/bindings/sound/audio-graph-card.yaml
- Note: In case of the audio-graph-card binding the used port
- index should be 3.
-
-Optional subnodes:
- - video input: this subnode can contain a video input port node
- to connect the bridge to a display controller output (See this
- documentation [1]).
-
-[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Example:
- hdmi-bridge@39 {
- compatible = "sil,sii9022";
- reg = <0x39>;
- reset-gpios = <&pioA 1 0>;
- iovcc-supply = <&v3v3_hdmi>;
- cvcc12-supply = <&v1v2_hdmi>;
-
- #sound-dai-cells = <0>;
- sil,i2s-data-lanes = < 0 1 2 >;
- clocks = <&mclk>;
- clock-names = "mclk";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- bridge_in: endpoint {
- remote-endpoint = <&dc_out>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/bridge/sil,sii9022.yaml b/Documentation/devicetree/bindings/display/bridge/sil,sii9022.yaml
new file mode 100644
index 000000000000..5a69547ad3d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/sil,sii9022.yaml
@@ -0,0 +1,131 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/sil,sii9022.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Silicon Image sii902x HDMI bridge
+
+maintainers:
+ - Boris Brezillon <bbrezillon@kernel.org>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - sil,sii9022-cpi # CEC Programming Interface
+ - sil,sii9022-tpi # Transmitter Programming Interface
+ - const: sil,sii9022
+ - const: sil,sii9022
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+ description: Interrupt line used to inform the host about hotplug events.
+
+ reset-gpios:
+ maxItems: 1
+
+ iovcc-supply:
+ description: I/O Supply Voltage (1.8V or 3.3V)
+
+ cvcc12-supply:
+ description: Digital Core Supply Voltage (1.2V)
+
+ '#sound-dai-cells':
+ enum: [ 0, 1 ]
+ description: |
+ <0> if only I2S or S/PDIF pin is wired,
+ <1> if both are wired.
+ HDMI audio is configured only if this property is found.
+ If HDMI audio is configured, the sii902x device becomes an I2S and/or
+ S/PDIF audio codec component (e.g. a digital audio sink), that can be
+ used in configuring full audio devices with simple-card or
+ audio-graph-card bindings. See their binding documents on how to describe
+ the way the
+ sii902x device is connected to the rest of the audio system:
+ Documentation/devicetree/bindings/sound/simple-card.yaml
+ Documentation/devicetree/bindings/sound/audio-graph-card.yaml
+ Note: In case of the audio-graph-card binding the used port index should
+ be 3.
+
+ sil,i2s-data-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 4
+ uniqueItems: true
+ items:
+ enum: [ 0, 1, 2, 3 ]
+ description:
+ Each integer indicates which I2S pin is connected to which audio FIFO.
+ The first integer selects the I2S audio pin for the first audio FIFO#0
+ (HDMI channels 1&2), the second for FIFO#1 (HDMI channels 3&4), and so
+ on. There are 4 FIFOs and 4 I2S pins (SD0 - SD3). Any I2S pin can be
+ connected to any FIFO, but there can be no gaps. E.g. an I2S pin must be
+ mapped to FIFO#0 and FIFO#1 before mapping a channel to FIFO#2. The
+ default value is <0>, describing SD0 pin being routed to HDMI audio
+ FIFO#0.
+
+ clocks:
+ maxItems: 1
+ description: MCLK input. MCLK can be used to produce HDMI audio CTS values.
+
+ clock-names:
+ const: mclk
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Parallel RGB input port
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: HDMI output port
+
+ port@3:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Sound input port
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi-bridge@39 {
+ compatible = "sil,sii9022";
+ reg = <0x39>;
+ reset-gpios = <&pioA 1 0>;
+ iovcc-supply = <&v3v3_hdmi>;
+ cvcc12-supply = <&v1v2_hdmi>;
+
+ #sound-dai-cells = <0>;
+ sil,i2s-data-lanes = < 0 1 2 >;
+ clocks = <&mclk>;
+ clock-names = "mclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ bridge_in: endpoint {
+ remote-endpoint = <&dc_out>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/ilitek,ili9341.txt b/Documentation/devicetree/bindings/display/ilitek,ili9341.txt
deleted file mode 100644
index 169b32e4ee4e..000000000000
--- a/Documentation/devicetree/bindings/display/ilitek,ili9341.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Ilitek ILI9341 display panels
-
-This binding is for display panels using an Ilitek ILI9341 controller in SPI
-mode.
-
-Required properties:
-- compatible: "adafruit,yx240qv29", "ilitek,ili9341"
-- dc-gpios: D/C pin
-- reset-gpios: Reset pin
-
-The node for this driver must be a child node of a SPI controller, hence
-all mandatory properties described in ../spi/spi-bus.txt must be specified.
-
-Optional properties:
-- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
- display@0{
- compatible = "adafruit,yx240qv29", "ilitek,ili9341";
- reg = <0>;
- spi-max-frequency = <32000000>;
- dc-gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
- rotation = <270>;
- backlight = <&backlight>;
- };
diff --git a/Documentation/devicetree/bindings/display/panel/arm,rtsm-display.yaml b/Documentation/devicetree/bindings/display/panel/arm,rtsm-display.yaml
new file mode 100644
index 000000000000..4ad484f09ba3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/arm,rtsm-display.yaml
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/arm,rtsm-display.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Arm RTSM Virtual Platforms Display
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: arm,rtsm-display
+
+ port: true
+
+required:
+ - compatible
+ - port
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
index 6058948a9764..99e0cb9440cf 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
@@ -21,8 +21,10 @@ properties:
compatible:
items:
- enum:
+ - adafruit,yx240qv29
# ili9341 240*320 Color on stm32f429-disco board
- st,sf-tc240t-9370-t
+ - canaan,kd233-tft
- const: ilitek,ili9341
reg: true
@@ -47,31 +49,50 @@ properties:
vddi-led-supply:
description: Voltage supply for the LED driver (1.65 .. 3.3 V)
-additionalProperties: false
+unevaluatedProperties: false
required:
- compatible
- reg
- dc-gpios
- - port
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - st,sf-tc240t-9370-t
+then:
+ required:
+ - port
examples:
- |+
+ #include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
panel: display@0 {
- compatible = "st,sf-tc240t-9370-t",
- "ilitek,ili9341";
- reg = <0>;
- spi-3wire;
- spi-max-frequency = <10000000>;
- dc-gpios = <&gpiod 13 0>;
- port {
- panel_in: endpoint {
- remote-endpoint = <&display_out>;
- };
- };
- };
+ compatible = "st,sf-tc240t-9370-t",
+ "ilitek,ili9341";
+ reg = <0>;
+ spi-3wire;
+ spi-max-frequency = <10000000>;
+ dc-gpios = <&gpiod 13 0>;
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+ display@1{
+ compatible = "adafruit,yx240qv29", "ilitek,ili9341";
+ reg = <1>;
+ spi-max-frequency = <10000000>;
+ dc-gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+ rotation = <270>;
+ backlight = <&backlight>;
};
+ };
...
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml b/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml
index b4314ce7b411..ee357e139ac0 100644
--- a/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml
+++ b/Documentation/devicetree/bindings/display/panel/lg,lg4573.yaml
@@ -15,13 +15,13 @@ maintainers:
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
const: lg,lg4573
reg: true
- spi-max-frequency: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
index 617aa8c8c03a..d62fd692bf10 100644
--- a/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
@@ -38,6 +38,7 @@ properties:
0 - burst-mode
1 - non-burst with sync event
2 - non-burst with sync pulse
+ $ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2]
required:
diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
index 27ba4323d221..1f905d85dd9c 100644
--- a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
+++ b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Simple Framebuffer Device Tree Bindings
maintainers:
- - Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
- Hans de Goede <hdegoede@redhat.com>
description: |+
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
index 157b1a7b18f9..53f181ef3670 100644
--- a/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
+++ b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
@@ -15,6 +15,7 @@ description:
allOf:
- $ref: panel/panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
index 3fbd87c2c120..669f70b1b4c4 100644
--- a/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
+++ b/Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml
@@ -49,9 +49,6 @@ properties:
vbat-supply:
description: The supply for VBAT
- # Only required for SPI
- spi-max-frequency: true
-
solomon,height:
$ref: /schemas/types.yaml#/definitions/uint32
default: 16
@@ -153,6 +150,8 @@ required:
- reg
allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
- if:
properties:
compatible:
@@ -223,7 +222,7 @@ allOf:
solomon,dclk-frq:
default: 10
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/dma/apple,admac.yaml b/Documentation/devicetree/bindings/dma/apple,admac.yaml
new file mode 100644
index 000000000000..bdc8c129c4f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/apple,admac.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/apple,admac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple Audio DMA Controller (ADMAC)
+
+description: |
+ Apple's Audio DMA Controller (ADMAC) is used to fetch and store audio samples
+ on SoCs from the "Apple Silicon" family.
+
+ The controller has been seen with up to 24 channels. Even-numbered channels
+ are TX-only, odd-numbered are RX-only. Individual channels are coupled to
+ fixed device endpoints.
+
+maintainers:
+ - Martin Povišer <povik+lin@cutebit.org>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - apple,t6000-admac
+ - apple,t8103-admac
+ - const: apple,admac
+
+ reg:
+ maxItems: 1
+
+ '#dma-cells':
+ const: 1
+ description:
+ Clients specify a single cell with channel number.
+
+ dma-channels:
+ maximum: 24
+
+ interrupts:
+ minItems: 4
+ maxItems: 4
+ description:
+ Interrupts that correspond to the 4 IRQ outputs of the controller. Usually
+ only one of the controller outputs will be connected as an usable interrupt
+ source. The remaining interrupts will be left without a valid value, e.g.
+ in an interrupts-extended list the disconnected positions will contain
+ an empty phandle reference <0>.
+
+required:
+ - compatible
+ - reg
+ - '#dma-cells'
+ - dma-channels
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/apple-aic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ aic: interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+
+ admac: dma-controller@238200000 {
+ compatible = "apple,t8103-admac", "apple,admac";
+ reg = <0x38200000 0x34000>;
+ dma-channels = <24>;
+ interrupts-extended = <0>,
+ <&aic AIC_IRQ 626 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>;
+ #dma-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
new file mode 100644
index 000000000000..050e6cd57727
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
@@ -0,0 +1,155 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/fsl,edma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale enhanced Direct Memory Access(eDMA) Controller
+
+description: |
+ The eDMA channels have multiplex capability by programmable
+ memory-mapped registers. channels are split into two groups, called
+ DMAMUX0 and DMAMUX1, specific DMA request source can only be multiplexed
+ by any channel of certain group, DMAMUX0 or DMAMUX1, but not both.
+
+maintainers:
+ - Peng Fan <peng.fan@nxp.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,vf610-edma
+ - fsl,imx7ulp-edma
+ - items:
+ - const: fsl,ls1028a-edma
+ - const: fsl,vf610-edma
+
+ reg:
+ minItems: 2
+ maxItems: 3
+
+ interrupts:
+ minItems: 2
+ maxItems: 17
+
+ interrupt-names:
+ minItems: 2
+ maxItems: 17
+
+ "#dma-cells":
+ const: 2
+
+ dma-channels:
+ const: 32
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ maxItems: 2
+
+ big-endian:
+ description: |
+ If present registers and hardware scatter/gather descriptors of the
+ eDMA are implemented in big endian mode, otherwise in little mode.
+ type: boolean
+
+required:
+ - "#dma-cells"
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - dma-channels
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,vf610-edma
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: dmamux0
+ - const: dmamux1
+ interrupts:
+ maxItems: 2
+ interrupt-names:
+ items:
+ - const: edma-tx
+ - const: edma-err
+ reg:
+ maxItems: 3
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx7ulp-edma
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: dma
+ - const: dmamux0
+ interrupts:
+ maxItems: 17
+ reg:
+ maxItems: 2
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/vf610-clock.h>
+
+ edma0: dma-controller@40018000 {
+ #dma-cells = <2>;
+ compatible = "fsl,vf610-edma";
+ reg = <0x40018000 0x2000>,
+ <0x40024000 0x1000>,
+ <0x40025000 0x1000>;
+ interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
+ <0 9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma-tx", "edma-err";
+ dma-channels = <32>;
+ clock-names = "dmamux0", "dmamux1";
+ clocks = <&clks VF610_CLK_DMAMUX0>, <&clks VF610_CLK_DMAMUX1>;
+ };
+
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/imx7ulp-clock.h>
+
+ edma1: dma-controller@40080000 {
+ #dma-cells = <2>;
+ compatible = "fsl,imx7ulp-edma";
+ reg = <0x40080000 0x2000>,
+ <0x40210000 0x1000>;
+ dma-channels = <32>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ /* last is eDMA2-ERR interrupt */
+ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "dma", "dmamux0";
+ clocks = <&pcc2 IMX7ULP_CLK_DMA1>, <&pcc2 IMX7ULP_CLK_DMA_MUX1>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/fsl-edma.txt b/Documentation/devicetree/bindings/dma/fsl-edma.txt
deleted file mode 100644
index ee1754739b4b..000000000000
--- a/Documentation/devicetree/bindings/dma/fsl-edma.txt
+++ /dev/null
@@ -1,111 +0,0 @@
-* Freescale enhanced Direct Memory Access(eDMA) Controller
-
- The eDMA channels have multiplex capability by programmble memory-mapped
-registers. channels are split into two groups, called DMAMUX0 and DMAMUX1,
-specific DMA request source can only be multiplexed by any channel of certain
-group, DMAMUX0 or DMAMUX1, but not both.
-
-* eDMA Controller
-Required properties:
-- compatible :
- - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
- - "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp
- - "fsl,ls1028a-edma" followed by "fsl,vf610-edma" for eDMA used on the
- LS1028A SoC.
-- reg : Specifies base physical address(s) and size of the eDMA registers.
- The 1st region is eDMA control register's address and size.
- The 2nd and the 3rd regions are programmable channel multiplexing
- control register's address and size.
-- interrupts : A list of interrupt-specifiers, one for each entry in
- interrupt-names on vf610 similar SoC. But for i.mx7ulp per channel
- per transmission interrupt, total 16 channel interrupt and 1
- error interrupt(located in the last), no interrupt-names list on
- i.mx7ulp for clean on dts.
-- #dma-cells : Must be <2>.
- The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1).
- Specific request source can only be multiplexed by specific channels
- group called DMAMUX.
- The 2nd cell specifies the request source(slot) ID.
- See the SoC's reference manual for all the supported request sources.
-- dma-channels : Number of channels supported by the controller
-- clock-names : A list of channel group clock names. Should contain:
- "dmamux0" - clock name of mux0 group
- "dmamux1" - clock name of mux1 group
- Note: No dmamux0 on i.mx7ulp, but another 'dma' clk added on i.mx7ulp.
-- clocks : A list of phandle and clock-specifier pairs, one for each entry in
- clock-names.
-
-Optional properties:
-- big-endian: If present registers and hardware scatter/gather descriptors
- of the eDMA are implemented in big endian mode, otherwise in little
- mode.
-- interrupt-names : Should contain the below on vf610 similar SoC but not used
- on i.mx7ulp similar SoC:
- "edma-tx" - the transmission interrupt
- "edma-err" - the error interrupt
-
-
-Examples:
-
-edma0: dma-controller@40018000 {
- #dma-cells = <2>;
- compatible = "fsl,vf610-edma";
- reg = <0x40018000 0x2000>,
- <0x40024000 0x1000>,
- <0x40025000 0x1000>;
- interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
- <0 9 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma-tx", "edma-err";
- dma-channels = <32>;
- clock-names = "dmamux0", "dmamux1";
- clocks = <&clks VF610_CLK_DMAMUX0>,
- <&clks VF610_CLK_DMAMUX1>;
-}; /* vf610 */
-
-edma1: dma-controller@40080000 {
- #dma-cells = <2>;
- compatible = "fsl,imx7ulp-edma";
- reg = <0x40080000 0x2000>,
- <0x40210000 0x1000>;
- dma-channels = <32>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
- /* last is eDMA2-ERR interrupt */
- <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "dma", "dmamux0";
- clocks = <&pcc2 IMX7ULP_CLK_DMA1>,
- <&pcc2 IMX7ULP_CLK_DMA_MUX1>;
-}; /* i.mx7ulp */
-
-* DMA clients
-DMA client drivers that uses the DMA function must use the format described
-in the dma.txt file, using a two-cell specifier for each channel: the 1st
-specifies the channel group(DMAMUX) in which this request can be multiplexed,
-and the 2nd specifies the request source.
-
-Examples:
-
-sai2: sai@40031000 {
- compatible = "fsl,vf610-sai";
- reg = <0x40031000 0x1000>;
- interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "sai";
- clocks = <&clks VF610_CLK_SAI2>;
- dma-names = "tx", "rx";
- dmas = <&edma0 0 21>,
- <&edma0 0 20>;
-};
diff --git a/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml b/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml
index 54d68fc688b5..19ea8dcbcbce 100644
--- a/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/mediatek,uart-dma.yaml
@@ -22,6 +22,7 @@ properties:
- items:
- enum:
- mediatek,mt2712-uart-dma
+ - mediatek,mt8365-uart-dma
- mediatek,mt8516-uart-dma
- const: mediatek,mt6577-uart-dma
- enum:
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml b/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml
index 9dd1476d1849..7e575296df0c 100644
--- a/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml
@@ -23,7 +23,9 @@ properties:
oneOf:
- const: nvidia,tegra186-gpcdma
- items:
- - const: nvidia,tegra194-gpcdma
+ - enum:
+ - nvidia,tegra234-gpcdma
+ - nvidia,tegra194-gpcdma
- const: nvidia,tegra186-gpcdma
"#dma-cells":
diff --git a/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml b/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
new file mode 100644
index 000000000000..9bf3a1b164f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/qcom,bam-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies Inc BAM DMA controller
+
+maintainers:
+ - Andy Gross <agross@kernel.org>
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ enum:
+ # APQ8064, IPQ8064 and MSM8960
+ - qcom,bam-v1.3.0
+ # MSM8974, APQ8074 and APQ8084
+ - qcom,bam-v1.4.0
+ # MSM8916
+ - qcom,bam-v1.7.0
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: bam_clk
+
+ "#dma-cells":
+ const: 1
+
+ interrupts:
+ maxItems: 1
+
+ iommus:
+ minItems: 1
+ maxItems: 4
+
+ num-channels:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Indicates supported number of DMA channels in a remotely controlled bam.
+
+ qcom,controlled-remotely:
+ type: boolean
+ description:
+ Indicates that the bam is controlled by remote proccessor i.e. execution
+ environment.
+
+ qcom,ee:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 7
+ description:
+ Indicates the active Execution Environment identifier (0-7) used in the
+ secure world.
+
+ qcom,num-ees:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Indicates supported number of Execution Environments in a remotely
+ controlled bam.
+
+ qcom,powered-remotely:
+ type: boolean
+ description:
+ Indicates that the bam is powered up by a remote processor but must be
+ initialized by the local processor.
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#dma-cells"
+ - interrupts
+ - qcom,ee
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/qcom,gcc-msm8974.h>
+
+ dma-controller@f9944000 {
+ compatible = "qcom,bam-v1.4.0";
+ reg = <0xf9944000 0x15000>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
deleted file mode 100644
index 6e9a5497b3f2..000000000000
--- a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-QCOM BAM DMA controller
-
-Required properties:
-- compatible: must be one of the following:
- * "qcom,bam-v1.4.0" for MSM8974, APQ8074 and APQ8084
- * "qcom,bam-v1.3.0" for APQ8064, IPQ8064 and MSM8960
- * "qcom,bam-v1.7.0" for MSM8916
-- reg: Address range for DMA registers
-- interrupts: Should contain the one interrupt shared by all channels
-- #dma-cells: must be <1>, the cell in the dmas property of the client device
- represents the channel number
-- clocks: required clock
-- clock-names: must contain "bam_clk" entry
-- qcom,ee : indicates the active Execution Environment identifier (0-7) used in
- the secure world.
-- qcom,controlled-remotely : optional, indicates that the bam is controlled by
- remote proccessor i.e. execution environment.
-- qcom,powered-remotely : optional, indicates that the bam is powered up by
- a remote processor but must be initialized by the local processor.
-- num-channels : optional, indicates supported number of DMA channels in a
- remotely controlled bam.
-- qcom,num-ees : optional, indicates supported number of Execution Environments
- in a remotely controlled bam.
-
-Example:
-
- uart-bam: dma@f9984000 = {
- compatible = "qcom,bam-v1.4.0";
- reg = <0xf9984000 0x15000>;
- interrupts = <0 94 0>;
- clocks = <&gcc GCC_BAM_DMA_AHB_CLK>;
- clock-names = "bam_clk";
- #dma-cells = <1>;
- qcom,ee = <0>;
- };
-
-DMA clients must use the format described in the dma.txt file, using a two cell
-specifier for each channel.
-
-Example:
- serial@f991e000 {
- compatible = "qcom,msm-uart";
- reg = <0xf991e000 0x1000>
- <0xf9944000 0x19000>;
- interrupts = <0 108 0>;
- clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
- <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "core", "iface";
-
- dmas = <&uart-bam 0>, <&uart-bam 1>;
- dma-names = "rx", "tx";
- };
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
index 4324a94b26b2..67aa7bb6d36a 100644
--- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
@@ -34,7 +34,12 @@ properties:
- const: axidma_apb_regs
interrupts:
- maxItems: 1
+ description:
+ If the IP-core synthesis parameter DMAX_INTR_IO_TYPE is set to 1, this
+ will be per-channel interrupts. Otherwise, this is a single combined IRQ
+ for all channels.
+ minItems: 1
+ maxItems: 8
clocks:
items:
diff --git a/Documentation/devicetree/bindings/dma/ste-dma40.txt b/Documentation/devicetree/bindings/dma/ste-dma40.txt
deleted file mode 100644
index 99ab5c4d331e..000000000000
--- a/Documentation/devicetree/bindings/dma/ste-dma40.txt
+++ /dev/null
@@ -1,138 +0,0 @@
-* DMA40 DMA Controller
-
-Required properties:
-- compatible: "stericsson,dma40"
-- reg: Address range of the DMAC registers
-- reg-names: Names of the above areas to use during resource look-up
-- interrupt: Should contain the DMAC interrupt number
-- #dma-cells: must be <3>
-- memcpy-channels: Channels to be used for memcpy
-
-Optional properties:
-- dma-channels: Number of channels supported by hardware - if not present
- the driver will attempt to obtain the information from H/W
-- disabled-channels: Channels which can not be used
-
-Example:
-
- dma: dma-controller@801c0000 {
- compatible = "stericsson,db8500-dma40", "stericsson,dma40";
- reg = <0x801C0000 0x1000 0x40010000 0x800>;
- reg-names = "base", "lcpa";
- interrupt-parent = <&intc>;
- interrupts = <0 25 0x4>;
-
- #dma-cells = <2>;
- memcpy-channels = <56 57 58 59 60>;
- disabled-channels = <12>;
- dma-channels = <8>;
- };
-
-Clients
-Required properties:
-- dmas: Comma separated list of dma channel requests
-- dma-names: Names of the aforementioned requested channels
-
-Each dmas request consists of 4 cells:
- 1. A phandle pointing to the DMA controller
- 2. Device signal number, the signal line for single and burst requests
- connected from the device to the DMA40 engine
- 3. The DMA request line number (only when 'use fixed channel' is set)
- 4. A 32bit mask specifying; mode, direction and endianness
- [NB: This list will grow]
- 0x00000001: Mode:
- Logical channel when unset
- Physical channel when set
- 0x00000002: Direction:
- Memory to Device when unset
- Device to Memory when set
- 0x00000004: Endianness:
- Little endian when unset
- Big endian when set
- 0x00000008: Use fixed channel:
- Use automatic channel selection when unset
- Use DMA request line number when set
- 0x00000010: Set channel as high priority:
- Normal priority when unset
- High priority when set
-
-Existing signal numbers for the DB8500 ASIC. Unless specified, the signals are
-bidirectional, i.e. the same for RX and TX operations:
-
-0: SPI controller 0
-1: SD/MMC controller 0 (unused)
-2: SD/MMC controller 1 (unused)
-3: SD/MMC controller 2 (unused)
-4: I2C port 1
-5: I2C port 3
-6: I2C port 2
-7: I2C port 4
-8: Synchronous Serial Port SSP0
-9: Synchronous Serial Port SSP1
-10: Multi-Channel Display Engine MCDE RX
-11: UART port 2
-12: UART port 1
-13: UART port 0
-14: Multirate Serial Port MSP2
-15: I2C port 0
-16: USB OTG in/out endpoints 7 & 15
-17: USB OTG in/out endpoints 6 & 14
-18: USB OTG in/out endpoints 5 & 13
-19: USB OTG in/out endpoints 4 & 12
-20: SLIMbus or HSI channel 0
-21: SLIMbus or HSI channel 1
-22: SLIMbus or HSI channel 2
-23: SLIMbus or HSI channel 3
-24: Multimedia DSP SXA0
-25: Multimedia DSP SXA1
-26: Multimedia DSP SXA2
-27: Multimedia DSP SXA3
-28: SD/MM controller 2
-29: SD/MM controller 0
-30: MSP port 1 on DB8500 v1, MSP port 3 on DB8500 v2
-31: MSP port 0 or SLIMbus channel 0
-32: SD/MM controller 1
-33: SPI controller 2
-34: i2c3 RX2 TX2
-35: SPI controller 1
-36: USB OTG in/out endpoints 3 & 11
-37: USB OTG in/out endpoints 2 & 10
-38: USB OTG in/out endpoints 1 & 9
-39: USB OTG in/out endpoints 8
-40: SPI controller 3
-41: SD/MM controller 3
-42: SD/MM controller 4
-43: SD/MM controller 5
-44: Multimedia DSP SXA4
-45: Multimedia DSP SXA5
-46: SLIMbus channel 8 or Multimedia DSP SXA6
-47: SLIMbus channel 9 or Multimedia DSP SXA7
-48: Crypto Accelerator 1
-49: Crypto Accelerator 1 TX or Hash Accelerator 1 TX
-50: Hash Accelerator 1 TX
-51: memcpy TX (to be used by the DMA driver for memcpy operations)
-52: SLIMbus or HSI channel 4
-53: SLIMbus or HSI channel 5
-54: SLIMbus or HSI channel 6
-55: SLIMbus or HSI channel 7
-56: memcpy (to be used by the DMA driver for memcpy operations)
-57: memcpy (to be used by the DMA driver for memcpy operations)
-58: memcpy (to be used by the DMA driver for memcpy operations)
-59: memcpy (to be used by the DMA driver for memcpy operations)
-60: memcpy (to be used by the DMA driver for memcpy operations)
-61: Crypto Accelerator 0
-62: Crypto Accelerator 0 TX or Hash Accelerator 0 TX
-63: Hash Accelerator 0 TX
-
-Example:
-
- uart@80120000 {
- compatible = "arm,pl011", "arm,primecell";
- reg = <0x80120000 0x1000>;
- interrupts = <0 11 0x4>;
-
- dmas = <&dma 13 0 0x2>, /* Logical - DevToMem */
- <&dma 13 0 0x0>; /* Logical - MemToDev */
- dma-names = "rx", "rx";
-
- };
diff --git a/Documentation/devicetree/bindings/dma/stericsson,dma40.yaml b/Documentation/devicetree/bindings/dma/stericsson,dma40.yaml
new file mode 100644
index 000000000000..8bddfb3b6fa0
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/stericsson,dma40.yaml
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/stericsson,dma40.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ST-Ericsson DMA40 DMA Engine
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ "#dma-cells":
+ const: 3
+ description: |
+ The first cell is the unique device channel number as indicated by this
+ table for DB8500 which is the only ASIC known to use DMA40:
+
+ 0: SPI controller 0
+ 1: SD/MMC controller 0 (unused)
+ 2: SD/MMC controller 1 (unused)
+ 3: SD/MMC controller 2 (unused)
+ 4: I2C port 1
+ 5: I2C port 3
+ 6: I2C port 2
+ 7: I2C port 4
+ 8: Synchronous Serial Port SSP0
+ 9: Synchronous Serial Port SSP1
+ 10: Multi-Channel Display Engine MCDE RX
+ 11: UART port 2
+ 12: UART port 1
+ 13: UART port 0
+ 14: Multirate Serial Port MSP2
+ 15: I2C port 0
+ 16: USB OTG in/out endpoints 7 & 15
+ 17: USB OTG in/out endpoints 6 & 14
+ 18: USB OTG in/out endpoints 5 & 13
+ 19: USB OTG in/out endpoints 4 & 12
+ 20: SLIMbus or HSI channel 0
+ 21: SLIMbus or HSI channel 1
+ 22: SLIMbus or HSI channel 2
+ 23: SLIMbus or HSI channel 3
+ 24: Multimedia DSP SXA0
+ 25: Multimedia DSP SXA1
+ 26: Multimedia DSP SXA2
+ 27: Multimedia DSP SXA3
+ 28: SD/MMC controller 2
+ 29: SD/MMC controller 0
+ 30: MSP port 1 on DB8500 v1, MSP port 3 on DB8500 v2
+ 31: MSP port 0 or SLIMbus channel 0
+ 32: SD/MMC controller 1
+ 33: SPI controller 2
+ 34: i2c3 RX2 TX2
+ 35: SPI controller 1
+ 36: USB OTG in/out endpoints 3 & 11
+ 37: USB OTG in/out endpoints 2 & 10
+ 38: USB OTG in/out endpoints 1 & 9
+ 39: USB OTG in/out endpoints 8
+ 40: SPI controller 3
+ 41: SD/MMC controller 3
+ 42: SD/MMC controller 4
+ 43: SD/MMC controller 5
+ 44: Multimedia DSP SXA4
+ 45: Multimedia DSP SXA5
+ 46: SLIMbus channel 8 or Multimedia DSP SXA6
+ 47: SLIMbus channel 9 or Multimedia DSP SXA7
+ 48: Crypto Accelerator 1
+ 49: Crypto Accelerator 1 TX or Hash Accelerator 1 TX
+ 50: Hash Accelerator 1 TX
+ 51: memcpy TX (to be used by the DMA driver for memcpy operations)
+ 52: SLIMbus or HSI channel 4
+ 53: SLIMbus or HSI channel 5
+ 54: SLIMbus or HSI channel 6
+ 55: SLIMbus or HSI channel 7
+ 56: memcpy (to be used by the DMA driver for memcpy operations)
+ 57: memcpy (to be used by the DMA driver for memcpy operations)
+ 58: memcpy (to be used by the DMA driver for memcpy operations)
+ 59: memcpy (to be used by the DMA driver for memcpy operations)
+ 60: memcpy (to be used by the DMA driver for memcpy operations)
+ 61: Crypto Accelerator 0
+ 62: Crypto Accelerator 0 TX or Hash Accelerator 0 TX
+ 63: Hash Accelerator 0 TX
+
+ The second cell is the DMA request line number. This is only used when
+ a fixed channel is allocated, and indicated by setting bit 3 in the
+ flags field (see below).
+
+ The third cell is a 32bit flags bitfield with the following possible
+ bits set:
+ 0x00000001 (bit 0) - mode:
+ Logical channel when unset
+ Physical channel when set
+ 0x00000002 (bit 1) - direction:
+ Memory to Device when unset
+ Device to Memory when set
+ 0x00000004 (bit 2) - endianness:
+ Little endian when unset
+ Big endian when set
+ 0x00000008 (bit 3) - use fixed channel:
+ Use automatic channel selection when unset
+ Use DMA request line number when set
+ 0x00000010 (bit 4) - set channel as high priority:
+ Normal priority when unset
+ High priority when set
+
+ compatible:
+ items:
+ - const: stericsson,db8500-dma40
+ - const: stericsson,dma40
+
+ reg:
+ items:
+ - description: DMA40 memory base
+ - description: LCPA memory base
+
+ reg-names:
+ items:
+ - const: base
+ - const: lcpa
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ memcpy-channels:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description: Array of u32 elements indicating which channels on the DMA
+ engine are elegible for memcpy transfers
+
+required:
+ - "#dma-cells"
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - memcpy-channels
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/mfd/dbx500-prcmu.h>
+ dma-controller@801C0000 {
+ compatible = "stericsson,db8500-dma40", "stericsson,dma40";
+ reg = <0x801C0000 0x1000>, <0x40010000 0x800>;
+ reg-names = "base", "lcpa";
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <3>;
+ memcpy-channels = <56 57 58 59 60>;
+ clocks = <&prcmu_clk PRCMU_DMACLK>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/dsp/mediatek,mt8186-dsp.yaml b/Documentation/devicetree/bindings/dsp/mediatek,mt8186-dsp.yaml
new file mode 100644
index 000000000000..3e63f79890b4
--- /dev/null
+++ b/Documentation/devicetree/bindings/dsp/mediatek,mt8186-dsp.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dsp/mediatek,mt8186-dsp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek mt8186 DSP core
+
+maintainers:
+ - Tinghan Shen <tinghan.shen@mediatek.com>
+
+description: |
+ MediaTek mt8186 SoC contains a DSP core used for
+ advanced pre- and post- audio processing.
+
+properties:
+ compatible:
+ const: mediatek,mt8186-dsp
+
+ reg:
+ items:
+ - description: Address and size of the DSP config registers
+ - description: Address and size of the DSP SRAM
+ - description: Address and size of the DSP secure registers
+ - description: Address and size of the DSP bus registers
+
+ reg-names:
+ items:
+ - const: cfg
+ - const: sram
+ - const: sec
+ - const: bus
+
+ clocks:
+ items:
+ - description: mux for audio dsp clock
+ - description: mux for audio dsp local bus
+
+ clock-names:
+ items:
+ - const: audiodsp
+ - const: adsp_bus
+
+ power-domains:
+ maxItems: 1
+
+ mboxes:
+ items:
+ - description: mailbox for receiving audio DSP requests.
+ - description: mailbox for transmitting requests to audio DSP.
+
+ mbox-names:
+ items:
+ - const: rx
+ - const: tx
+
+ memory-region:
+ items:
+ - description: dma buffer between host and DSP.
+ - description: DSP system memory.
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - power-domains
+ - mbox-names
+ - mboxes
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt8186-clk.h>
+ dsp@10680000 {
+ compatible = "mediatek,mt8186-dsp";
+ reg = <0x10680000 0x2000>,
+ <0x10800000 0x100000>,
+ <0x1068b000 0x100>,
+ <0x1068f000 0x1000>;
+ reg-names = "cfg", "sram", "sec", "bus";
+ clocks = <&topckgen CLK_TOP_AUDIODSP>,
+ <&topckgen CLK_TOP_ADSP_BUS>;
+ clock-names = "audiodsp",
+ "adsp_bus";
+ power-domains = <&spm 6>;
+ mbox-names = "rx", "tx";
+ mboxes = <&adsp_mailbox0>, <&adsp_mailbox1>;
+ };
diff --git a/Documentation/devicetree/bindings/dsp/mediatek,mt8195-dsp.yaml b/Documentation/devicetree/bindings/dsp/mediatek,mt8195-dsp.yaml
index b7e68b0dfa13..ca8d8661f872 100644
--- a/Documentation/devicetree/bindings/dsp/mediatek,mt8195-dsp.yaml
+++ b/Documentation/devicetree/bindings/dsp/mediatek,mt8195-dsp.yaml
@@ -50,13 +50,13 @@ properties:
mboxes:
items:
- - description: ipc reply between host and audio DSP.
- - description: ipc request between host and audio DSP.
+ - description: mailbox for receiving audio DSP requests.
+ - description: mailbox for transmitting requests to audio DSP.
mbox-names:
items:
- - const: mbox0
- - const: mbox1
+ - const: rx
+ - const: tx
memory-region:
items:
@@ -100,6 +100,6 @@ examples:
memory-region = <&adsp_dma_mem_reserved>,
<&adsp_mem_reserved>;
power-domains = <&spm 6>; //MT8195_POWER_DOMAIN_ADSP
- mbox-names = "mbox0", "mbox1";
+ mbox-names = "rx", "tx";
mboxes = <&adsp_mailbox0>, <&adsp_mailbox1>;
};
diff --git a/Documentation/devicetree/bindings/eeprom/at25.yaml b/Documentation/devicetree/bindings/eeprom/at25.yaml
index fbf99e346966..8b1c997caac1 100644
--- a/Documentation/devicetree/bindings/eeprom/at25.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at25.yaml
@@ -44,8 +44,6 @@ properties:
reg:
maxItems: 1
- spi-max-frequency: true
-
pagesize:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072]
@@ -105,6 +103,7 @@ required:
- spi-max-frequency
allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
- if:
properties:
compatible:
@@ -117,7 +116,7 @@ allOf:
- size
- address-width
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/misc/eeprom-93xx46.yaml b/Documentation/devicetree/bindings/eeprom/microchip,93lc46b.yaml
index 44fd2f6f0d8a..0c2f5ddb79c5 100644
--- a/Documentation/devicetree/bindings/misc/eeprom-93xx46.yaml
+++ b/Documentation/devicetree/bindings/eeprom/microchip,93lc46b.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/misc/eeprom-93xx46.yaml#
+$id: http://devicetree.org/schemas/eeprom/microchip,93lc46b.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Microchip 93xx46 SPI compatible EEPROM family dt bindings
@@ -28,9 +28,6 @@ properties:
description: chip select of EEPROM
maxItems: 1
- spi-max-frequency: true
- spi-cs-high: true
-
read-only:
description:
parameter-less property which disables writes to the EEPROM
@@ -42,14 +39,16 @@ properties:
of EEPROM (e.g. for SPI bus multiplexing)
maxItems: 1
-
required:
- compatible
- reg
- data-size
- spi-max-frequency
-additionalProperties: false
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/fpga/fpga-region.txt b/Documentation/devicetree/bindings/fpga/fpga-region.txt
index 7d3515264838..6694ef29a267 100644
--- a/Documentation/devicetree/bindings/fpga/fpga-region.txt
+++ b/Documentation/devicetree/bindings/fpga/fpga-region.txt
@@ -330,7 +330,7 @@ succeeded.
The Device Tree Overlay will contain:
* "target-path" or "target"
- The insertion point where the the contents of the overlay will go into the
+ The insertion point where the contents of the overlay will go into the
live tree. target-path is a full path, while target is a phandle.
* "ranges"
The address space mapping from processor to FPGA bus(ses).
diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
index f57d22d1ebd6..ae18603697d7 100644
--- a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
@@ -37,6 +37,8 @@ properties:
- fsl,imx8mp-gpio
- fsl,imx8mq-gpio
- fsl,imx8qxp-gpio
+ - fsl,imxrt1050-gpio
+ - fsl,imxrt1170-gpio
- const: fsl,imx35-gpio
reg:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
deleted file mode 100644
index 0fc6700ed800..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-* Marvell EBU GPIO controller
-
-Required properties:
-
-- compatible : Should be "marvell,orion-gpio", "marvell,mv78200-gpio",
- "marvell,armadaxp-gpio" or "marvell,armada-8k-gpio".
-
- "marvell,orion-gpio" should be used for Orion, Kirkwood, Dove,
- Discovery (except MV78200) and Armada 370. "marvell,mv78200-gpio"
- should be used for the Discovery MV78200.
-
- "marvel,armadaxp-gpio" should be used for all Armada XP SoCs
- (MV78230, MV78260, MV78460).
-
- "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K
- SoCs (either from AP or CP), see
- Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt
- for specific details about the offset property.
-
-- reg: Address and length of the register set for the device. Only one
- entry is expected, except for the "marvell,armadaxp-gpio" variant
- for which two entries are expected: one for the general registers,
- one for the per-cpu registers. Not used for marvell,armada-8k-gpio.
-
-- interrupts: The list of interrupts that are used for all the pins
- managed by this GPIO bank. There can be more than one interrupt
- (example: 1 interrupt per 8 pins on Armada XP, which means 4
- interrupts per bank of 32 GPIOs).
-
-- interrupt-controller: identifies the node as an interrupt controller
-
-- #interrupt-cells: specifies the number of cells needed to encode an
- interrupt source. Should be two.
- The first cell is the GPIO number.
- The second cell is used to specify flags:
- bits[3:0] trigger type and level flags:
- 1 = low-to-high edge triggered.
- 2 = high-to-low edge triggered.
- 4 = active high level-sensitive.
- 8 = active low level-sensitive.
-
-- gpio-controller: marks the device node as a gpio controller
-
-- ngpios: number of GPIOs this controller has
-
-- #gpio-cells: Should be two. The first cell is the pin number. The
- second cell is reserved for flags, unused at the moment.
-
-Optional properties:
-
-In order to use the GPIO lines in PWM mode, some additional optional
-properties are required.
-
-- compatible: Must contain "marvell,armada-370-gpio"
-
-- reg: an additional register set is needed, for the GPIO Blink
- Counter on/off registers.
-
-- reg-names: Must contain an entry "pwm" corresponding to the
- additional register range needed for PWM operation.
-
-- #pwm-cells: Should be two. The first cell is the GPIO line number. The
- second cell is the period in nanoseconds.
-
-- clocks: Must be a phandle to the clock for the GPIO controller.
-
-Example:
-
- gpio0: gpio@d0018100 {
- compatible = "marvell,armadaxp-gpio";
- reg = <0xd0018100 0x40>,
- <0xd0018800 0x30>;
- ngpios = <32>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <16>, <17>, <18>, <19>;
- };
-
- gpio1: gpio@18140 {
- compatible = "marvell,armada-370-gpio";
- reg = <0x18140 0x40>, <0x181c8 0x08>;
- reg-names = "gpio", "pwm";
- ngpios = <17>;
- gpio-controller;
- #gpio-cells = <2>;
- #pwm-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <87>, <88>, <89>;
- clocks = <&coreclk 0>;
- };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml b/Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml
new file mode 100644
index 000000000000..f1bd1e6b2e1f
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml
@@ -0,0 +1,146 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/gpio-mvebu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell EBU GPIO controller
+
+maintainers:
+ - Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ - Andrew Lunn <andrew@lunn.ch>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - marvell,armada-8k-gpio
+ - marvell,orion-gpio
+
+ - items:
+ - enum:
+ - marvell,mv78200-gpio
+ - marvell,armada-370-gpio
+ - const: marvell,orion-gpio
+
+ - description: Deprecated binding
+ items:
+ - const: marvell,armadaxp-gpio
+ - const: marvell,orion-gpio
+ deprecated: true
+
+ reg:
+ description: |
+ Address and length of the register set for the device. Not used for
+ marvell,armada-8k-gpio.
+
+ A second entry can be provided, for the PWM function using the GPIO Blink
+ Counter on/off registers.
+ minItems: 1
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: gpio
+ - const: pwm
+ minItems: 1
+
+ offset:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Offset in the register map for the gpio registers (in bytes)
+
+ interrupts:
+ description: |
+ The list of interrupts that are used for all the pins managed by this
+ GPIO bank. There can be more than one interrupt (example: 1 interrupt
+ per 8 pins on Armada XP, which means 4 interrupts per bank of 32
+ GPIOs).
+ minItems: 1
+ maxItems: 4
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ gpio-controller: true
+
+ ngpios:
+ minimum: 1
+ maximum: 32
+
+ "#gpio-cells":
+ const: 2
+
+ marvell,pwm-offset:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Offset in the register map for the pwm registers (in bytes)
+
+ "#pwm-cells":
+ description:
+ The first cell is the GPIO line number. The second cell is the period
+ in nanoseconds.
+ const: 2
+
+ clocks:
+ description:
+ Clock(s) used for PWM function.
+ items:
+ - description: Core clock
+ - description: AXI bus clock
+ minItems: 1
+
+ clock-names:
+ items:
+ - const: core
+ - const: axi
+ minItems: 1
+
+required:
+ - compatible
+ - gpio-controller
+ - ngpios
+ - "#gpio-cells"
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: marvell,armada-8k-gpio
+ then:
+ required:
+ - offset
+ else:
+ required:
+ - reg
+
+unevaluatedProperties: true
+
+examples:
+ - |
+ gpio@d0018100 {
+ compatible = "marvell,armadaxp-gpio", "marvell,orion-gpio";
+ reg = <0xd0018100 0x40>, <0xd0018800 0x30>;
+ ngpios = <32>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <16>, <17>, <18>, <19>;
+ };
+
+ - |
+ gpio@18140 {
+ compatible = "marvell,armada-370-gpio", "marvell,orion-gpio";
+ reg = <0x18140 0x40>, <0x181c8 0x08>;
+ reg-names = "gpio", "pwm";
+ ngpios = <17>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ #pwm-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <87>, <88>, <89>;
+ clocks = <&coreclk 0>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca9570.yaml b/Documentation/devicetree/bindings/gpio/gpio-pca9570.yaml
index 338c5312a106..1acaa0a3d35a 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca9570.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca9570.yaml
@@ -13,6 +13,7 @@ properties:
compatible:
enum:
- nxp,pca9570
+ - nxp,pca9571
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pisosr.txt b/Documentation/devicetree/bindings/gpio/gpio-pisosr.txt
index 414a01cdf715..fba3c61f6a5b 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pisosr.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pisosr.txt
@@ -14,7 +14,7 @@ Optional properties:
- ngpios : Number of used GPIO lines (0..n-1), default is 8.
- load-gpios : GPIO pin specifier attached to load enable, this
pin is pulsed before reading from the device to
- load input pin values into the the device.
+ load input pin values into the device.
For other required and optional properties of SPI slave
nodes please refer to ../spi/spi-bus.txt.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt b/Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt
deleted file mode 100644
index 1afc2de7a537..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-tpic2810.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-TPIC2810 GPIO controller bindings
-
-Required properties:
- - compatible : Should be "ti,tpic2810".
- - reg : The I2C address of the device
- - gpio-controller : Marks the device node as a GPIO controller.
- - #gpio-cells : Should be two. For consumer use see gpio.txt.
-
-Example:
-
- gpio@60 {
- compatible = "ti,tpic2810";
- reg = <0x60>;
- gpio-controller;
- #gpio-cells = <2>;
- };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-tpic2810.yaml b/Documentation/devicetree/bindings/gpio/gpio-tpic2810.yaml
new file mode 100644
index 000000000000..cb8a5c376e1e
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-tpic2810.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/gpio-tpic2810.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TPIC2810 GPIO controller bindings
+
+maintainers:
+ - Aswath Govindraju <a-govindraju@ti.com>
+
+properties:
+ compatible:
+ enum:
+ - ti,tpic2810
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-line-names:
+ minItems: 1
+ maxItems: 32
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - "#gpio-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@60 {
+ compatible = "ti,tpic2810";
+ reg = <0x60>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "LED A", "LED B", "LED C";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml b/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
index 0681a4790cd6..75e5da6a7cc0 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
@@ -49,10 +49,8 @@ properties:
- const: renesas,rcar-gen3-gpio # R-Car Gen3 or RZ/G2
- items:
- - const: renesas,gpio-r8a779a0 # R-Car V3U
-
- - items:
- enum:
+ - renesas,gpio-r8a779a0 # R-Car V3U
- renesas,gpio-r8a779f0 # R-Car S4-8
- const: renesas,rcar-gen4-gpio # R-Car Gen4
diff --git a/Documentation/devicetree/bindings/gpio/rockchip,gpio-bank.yaml b/Documentation/devicetree/bindings/gpio/rockchip,gpio-bank.yaml
index d4e42c2b995b..affd823c881d 100644
--- a/Documentation/devicetree/bindings/gpio/rockchip,gpio-bank.yaml
+++ b/Documentation/devicetree/bindings/gpio/rockchip,gpio-bank.yaml
@@ -27,6 +27,8 @@ properties:
- description: APB interface clock source
- description: GPIO debounce reference clock source
+ gpio-ranges: true
+
gpio-controller: true
gpio-line-names: true
diff --git a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
index 939e31c48081..fc095646adea 100644
--- a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
@@ -46,6 +46,10 @@ properties:
maximum: 32
default: 16
+ gpio-line-names:
+ minItems: 1
+ maxItems: 32
+
gpio-controller: true
required:
diff --git a/Documentation/devicetree/bindings/gpio/x-powers,axp209-gpio.yaml b/Documentation/devicetree/bindings/gpio/x-powers,axp209-gpio.yaml
index 0f628b088cec..14486aee97b4 100644
--- a/Documentation/devicetree/bindings/gpio/x-powers,axp209-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/x-powers,axp209-gpio.yaml
@@ -19,8 +19,14 @@ properties:
oneOf:
- enum:
- x-powers,axp209-gpio
+ - x-powers,axp221-gpio
- x-powers,axp813-gpio
- items:
+ - enum:
+ - x-powers,axp223-gpio
+ - x-powers,axp809-gpio
+ - const: x-powers,axp221-gpio
+ - items:
- const: x-powers,axp803-gpio
- const: x-powers,axp813-gpio
diff --git a/Documentation/devicetree/bindings/soc/samsung/exynos-chipid.yaml b/Documentation/devicetree/bindings/hwinfo/samsung,exynos-chipid.yaml
index 4bb8efb83ac1..95cbdcb56efe 100644
--- a/Documentation/devicetree/bindings/soc/samsung/exynos-chipid.yaml
+++ b/Documentation/devicetree/bindings/hwinfo/samsung,exynos-chipid.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/soc/samsung/exynos-chipid.yaml#
+$id: http://devicetree.org/schemas/hwinfo/samsung,exynos-chipid.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC series Chipid driver
diff --git a/Documentation/devicetree/bindings/hwinfo/samsung,s5pv210-chipid.yaml b/Documentation/devicetree/bindings/hwinfo/samsung,s5pv210-chipid.yaml
new file mode 100644
index 000000000000..563ded4fca83
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwinfo/samsung,s5pv210-chipid.yaml
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwinfo/samsung,s5pv210-chipid.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S5PV210 SoC ChipID
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ compatible:
+ const: samsung,s5pv210-chipid
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ chipid@e0000000 {
+ compatible = "samsung,s5pv210-chipid";
+ reg = <0xe0000000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/ti/k3-socinfo.yaml b/Documentation/devicetree/bindings/hwinfo/ti,k3-socinfo.yaml
index a1a8423b2e2e..dada28b47ea0 100644
--- a/Documentation/devicetree/bindings/soc/ti/k3-socinfo.yaml
+++ b/Documentation/devicetree/bindings/hwinfo/ti,k3-socinfo.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/soc/ti/k3-socinfo.yaml#
+$id: http://devicetree.org/schemas/hwinfo/ti,k3-socinfo.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments K3 Multicore SoC platforms chipid module
diff --git a/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml b/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
index 154bee851139..d794deb08bb7 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
@@ -8,7 +8,6 @@ title: Analog Devices ADM1177 Hot Swap Controller and Digital Power Monitor
maintainers:
- Michael Hennerich <michael.hennerich@analog.com>
- - Beniamin Bia <beniamin.bia@analog.com>
description: |
Analog Devices ADM1177 Hot Swap Controller and Digital Power Monitor
diff --git a/Documentation/devicetree/bindings/hwmon/adt7475.yaml b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
index 56baf2e5c6d1..ea595102a86e 100644
--- a/Documentation/devicetree/bindings/hwmon/adt7475.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
@@ -57,7 +57,7 @@ patternProperties:
Configures bypassing the individual voltage input attenuator. If
set to 1 the attenuator is bypassed if set to 0 the attenuator is
not bypassed. If the property is absent then the attenuator
- retains it's configuration from the bios/bootloader.
+ retains its configuration from the bios/bootloader.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1]
diff --git a/Documentation/devicetree/bindings/i2c/ibm,p8-occ-hwmon.txt b/Documentation/devicetree/bindings/hwmon/ibm,p8-occ-hwmon.txt
index 5dc5d2e2573d..5dc5d2e2573d 100644
--- a/Documentation/devicetree/bindings/i2c/ibm,p8-occ-hwmon.txt
+++ b/Documentation/devicetree/bindings/hwmon/ibm,p8-occ-hwmon.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-efm32.txt b/Documentation/devicetree/bindings/i2c/i2c-efm32.txt
deleted file mode 100644
index 3b30e54ae3c7..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-efm32.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-* Energymicro efm32 i2c controller
-
-Required properties :
-
- - reg : Offset and length of the register set for the device
- - compatible : should be "energymicro,efm32-i2c"
- - interrupts : the interrupt number
- - clocks : reference to the module clock
-
-Recommended properties :
-
- - clock-frequency : maximal I2C bus clock frequency in Hz.
- - energymicro,location : Decides the location of the USART I/O pins.
- Allowed range : [0 .. 6]
-
-Example:
- i2c0: i2c@4000a000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "energymicro,efm32-i2c";
- reg = <0x4000a000 0x400>;
- interrupts = <9>;
- clocks = <&cmu clk_HFPERCLKI2C0>;
- clock-frequency = <100000>;
- energymicro,location = <3>;
-
- eeprom@50 {
- compatible = "microchip,24c02";
- reg = <0x50>;
- pagesize = <16>;
- };
- };
-
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml
index 16a1a3118204..4e730fb7be56 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.yaml
@@ -27,6 +27,7 @@ properties:
- const: mediatek,mt8173-i2c
- const: mediatek,mt8183-i2c
- const: mediatek,mt8186-i2c
+ - const: mediatek,mt8188-i2c
- const: mediatek,mt8192-i2c
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt b/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt
deleted file mode 100644
index 166865e48849..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt
+++ /dev/null
@@ -1,96 +0,0 @@
-Qualcomm Camera Control Interface (CCI) I2C controller
-
-PROPERTIES:
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be one of:
- "qcom,msm8916-cci"
- "qcom,msm8974-cci"
- "qcom,msm8996-cci"
- "qcom,sdm845-cci"
- "qcom,sm8250-cci"
- "qcom,sm8450-cci"
-
-- reg
- Usage: required
- Value type: <prop-encoded-array>
- Definition: base address CCI I2C controller and length of memory
- mapped region.
-
-- interrupts:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: specifies the CCI I2C interrupt. The format of the
- specifier is defined by the binding document describing
- the node's interrupt parent.
-
-- clocks:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: a list of phandle, should contain an entry for each
- entries in clock-names.
-
-- clock-names
- Usage: required
- Value type: <string>
- Definition: a list of clock names, must include "cci" clock.
-
-- power-domains
- Usage: required for "qcom,msm8996-cci"
- Value type: <prop-encoded-array>
- Definition:
-
-SUBNODES:
-
-The CCI provides I2C masters for one (msm8916) or two i2c busses (msm8974,
-msm8996, sdm845, sm8250 and sm8450), described as subdevices named "i2c-bus@0"
-and "i2c-bus@1".
-
-PROPERTIES:
-
-- reg:
- Usage: required
- Value type: <u32>
- Definition: Index of the CCI bus/master
-
-- clock-frequency:
- Usage: optional
- Value type: <u32>
- Definition: Desired I2C bus clock frequency in Hz, defaults to 100
- kHz if omitted.
-
-Example:
-
- cci@a0c000 {
- compatible = "qcom,msm8996-cci";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0xa0c000 0x1000>;
- interrupts = <GIC_SPI 295 IRQ_TYPE_EDGE_RISING>;
- clocks = <&mmcc MMSS_MMAGIC_AHB_CLK>,
- <&mmcc CAMSS_TOP_AHB_CLK>,
- <&mmcc CAMSS_CCI_AHB_CLK>,
- <&mmcc CAMSS_CCI_CLK>,
- <&mmcc CAMSS_AHB_CLK>;
- clock-names = "mmss_mmagic_ahb",
- "camss_top_ahb",
- "cci_ahb",
- "cci",
- "camss_ahb";
-
- i2c-bus@0 {
- reg = <0>;
- clock-frequency = <400000>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- i2c-bus@1 {
- reg = <1>;
- clock-frequency = <400000>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
- };
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
new file mode 100644
index 000000000000..90c9e401229e
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
@@ -0,0 +1,242 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/qcom,i2c-cci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Camera Control Interface (CCI) I2C controller
+
+maintainers:
+ - Loic Poulain <loic.poulain@linaro.org>
+ - Robert Foss <robert.foss@linaro.org>
+
+properties:
+ compatible:
+ enum:
+ - qcom,msm8916-cci
+ - qcom,msm8974-cci
+ - qcom,msm8996-cci
+ - qcom,sdm845-cci
+ - qcom,sm8250-cci
+ - qcom,sm8450-cci
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ clocks:
+ minItems: 4
+ maxItems: 6
+
+ clock-names:
+ minItems: 4
+ maxItems: 6
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ "^i2c-bus@[01]$":
+ $ref: /schemas/i2c/i2c-controller.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ maxItems: 1
+
+ clock-frequency:
+ default: 100000
+
+required:
+ - compatible
+ - clock-names
+ - clocks
+ - interrupts
+ - reg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-cci
+ then:
+ required:
+ - power-domains
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8916-cci
+ then:
+ properties:
+ i2c-bus@1: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8916-cci
+ - qcom,msm8996-cci
+ then:
+ properties:
+ clocks:
+ maxItems: 4
+ clock-names:
+ items:
+ - const: camss_top_ahb
+ - const: cci_ahb
+ - const: cci
+ - const: camss_ahb
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sdm845-cci
+ then:
+ properties:
+ clocks:
+ minItems: 6
+ clock-names:
+ items:
+ - const: camnoc_axi
+ - const: soc_ahb
+ - const: slow_ahb_src
+ - const: cpas_ahb
+ - const: cci
+ - const: cci_src
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8250-cci
+ then:
+ properties:
+ clocks:
+ minItems: 5
+ maxItems: 5
+ clock-names:
+ items:
+ - const: camnoc_axi
+ - const: slow_ahb_src
+ - const: cpas_ahb
+ - const: cci
+ - const: cci_src
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,camcc-sdm845.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ cci@ac4a000 {
+ reg = <0x0ac4a000 0x4000>;
+ compatible = "qcom,sdm845-cci";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <GIC_SPI 460 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&clock_camcc TITAN_TOP_GDSC>;
+
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CCI_CLK>,
+ <&clock_camcc CAM_CC_CCI_CLK_SRC>;
+ clock-names = "camnoc_axi",
+ "soc_ahb",
+ "slow_ahb_src",
+ "cpas_ahb",
+ "cci",
+ "cci_src";
+
+ assigned-clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_CCI_CLK>;
+ assigned-clock-rates = <80000000>,
+ <37500000>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cci0_default &cci1_default>;
+ pinctrl-1 = <&cci0_sleep &cci1_sleep>;
+
+ i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ camera@10 {
+ compatible = "ovti,ov8856";
+ reg = <0x10>;
+
+ reset-gpios = <&tlmm 9 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam0_default>;
+
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "xvclk";
+ clock-frequency = <19200000>;
+
+ dovdd-supply = <&vreg_lvs1a_1p8>;
+ avdd-supply = <&cam0_avdd_2v8>;
+ dvdd-supply = <&cam0_dvdd_1v2>;
+
+ port {
+ ov8856_ep: endpoint {
+ link-frequencies = /bits/ 64 <360000000 180000000>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&csiphy0_ep>;
+ };
+ };
+ };
+ };
+
+ cci_i2c1: i2c-bus@1 {
+ reg = <1>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ camera@60 {
+ compatible = "ovti,ov7251";
+ reg = <0x60>;
+
+ enable-gpios = <&tlmm 21 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam3_default>;
+
+ clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
+ clock-names = "xclk";
+ clock-frequency = <24000000>;
+
+ vdddo-supply = <&vreg_lvs1a_1p8>;
+ vdda-supply = <&cam3_avdd_2v8>;
+
+ port {
+ ov7251_ep: endpoint {
+ data-lanes = <0 1>;
+ remote-endpoint = <&csiphy3_ep>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/accel/fsl,mma7455.yaml b/Documentation/devicetree/bindings/iio/accel/fsl,mma7455.yaml
index 7c8f8bdc2333..9c7c66feeffc 100644
--- a/Documentation/devicetree/bindings/iio/accel/fsl,mma7455.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/fsl,mma7455.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale MMA7455 and MMA7456 three axis accelerometers
maintainers:
- - Joachim Eastwood <manabian@gmail.com>
- Jonathan Cameron <jic23@kernel.org>
description:
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml
index 31ffa275f5fa..b97559f23b3a 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices AD7091R5 4-Channel 12-Bit ADC
maintainers:
- - Beniamin Bia <beniamin.bia@analog.com>
+ - Michael Hennerich <michael.hennerich@analog.com>
description: |
Analog Devices AD7091R5 4-Channel 12-Bit ADC
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
index 73775174cf57..516fc24d3346 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@ -7,8 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices AD7606 Simultaneous Sampling ADC
maintainers:
- - Beniamin Bia <beniamin.bia@analog.com>
- - Stefan Popa <stefan.popa@analog.com>
+ - Michael Hennerich <michael.hennerich@analog.com>
description: |
Analog Devices AD7606 Simultaneous Sampling ADC
diff --git a/Documentation/devicetree/bindings/iio/adc/nxp,lpc1850-adc.yaml b/Documentation/devicetree/bindings/iio/adc/nxp,lpc1850-adc.yaml
index 6404fb73f8ed..43abb300fa3d 100644
--- a/Documentation/devicetree/bindings/iio/adc/nxp,lpc1850-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/nxp,lpc1850-adc.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP LPC1850 ADC bindings
maintainers:
- - Joachim Eastwood <manabian@gmail.com>
+ - Jonathan Cameron <jic23@kernel.org>
description:
Supports the ADC found on the LPC1850 SoC.
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,adc108s102.yaml b/Documentation/devicetree/bindings/iio/adc/ti,adc108s102.yaml
index 54955f03df93..ae5ce60987fe 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti,adc108s102.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/ti,adc108s102.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments ADC108S102 and ADC128S102
maintainers:
- - Bogdan Pricop <bogdan.pricop@emutex.com>
+ - Jonathan Cameron <jic23@kernel.org>
description: |
Family of 8 channel, 10/12 bit, SPI, single ended ADCs.
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,ads124s08.yaml b/Documentation/devicetree/bindings/iio/adc/ti,ads124s08.yaml
index 9f5e96439c01..2e6abc9d746a 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti,ads124s08.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/ti,ads124s08.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments' ads124s08 and ads124s06 ADC chip
maintainers:
- - Dan Murphy <dmurphy@ti.com>
+ - Andrew Davis <afd@ti.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml b/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml
index a557761d8016..9fda56fa49c3 100644
--- a/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml
+++ b/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml
@@ -8,7 +8,6 @@ title: HMC425A 6-bit Digital Step Attenuator
maintainers:
- Michael Hennerich <michael.hennerich@analog.com>
- - Beniamin Bia <beniamin.bia@analog.com>
description: |
Digital Step Attenuator IIO device with gpio interface.
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml
index a8f7720d1e3e..29bd16dab546 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml
@@ -22,6 +22,8 @@ properties:
- adi,ad5767
output-range-microvolts:
+ $ref: /schemas/types.yaml#/definitions/int32-array
+ maxItems: 2
description: Select converter output range.
reg:
diff --git a/Documentation/devicetree/bindings/iio/imu/nxp,fxos8700.yaml b/Documentation/devicetree/bindings/iio/imu/nxp,fxos8700.yaml
index 479e7065d4eb..0203b83b8587 100644
--- a/Documentation/devicetree/bindings/iio/imu/nxp,fxos8700.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/nxp,fxos8700.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale FXOS8700 Inertial Measurement Unit
maintainers:
- - Robert Jones <rjones@gateworks.com>
+ - Jonathan Cameron <jic23@kernel.org>
description: |
Accelerometer and magnetometer combo device with an i2c and SPI interface.
diff --git a/Documentation/devicetree/bindings/input/adc-joystick.yaml b/Documentation/devicetree/bindings/input/adc-joystick.yaml
index 2ee04e03bc22..64d961458ac7 100644
--- a/Documentation/devicetree/bindings/input/adc-joystick.yaml
+++ b/Documentation/devicetree/bindings/input/adc-joystick.yaml
@@ -45,6 +45,7 @@ additionalProperties: false
patternProperties:
"^axis@[0-9a-f]+$":
type: object
+ $ref: input.yaml#
description: >
Represents a joystick axis bound to the given ADC channel.
For each entry in the io-channels list, one axis subnode with a matching
@@ -57,7 +58,6 @@ patternProperties:
description: Index of an io-channels list entry bound to this axis.
linux,code:
- $ref: /schemas/types.yaml#/definitions/uint32
description: EV_ABS specific event code generated by the axis.
abs-range:
diff --git a/Documentation/devicetree/bindings/input/adc-keys.txt b/Documentation/devicetree/bindings/input/adc-keys.txt
deleted file mode 100644
index 6c8be6a9ace2..000000000000
--- a/Documentation/devicetree/bindings/input/adc-keys.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-ADC attached resistor ladder buttons
-------------------------------------
-
-Required properties:
- - compatible: "adc-keys"
- - io-channels: Phandle to an ADC channel
- - io-channel-names = "buttons";
- - keyup-threshold-microvolt: Voltage above or equal to which all the keys are
- considered up.
-
-Optional properties:
- - poll-interval: Poll interval time in milliseconds
- - autorepeat: Boolean, Enable auto repeat feature of Linux input
- subsystem.
-
-Each button (key) is represented as a sub-node of "adc-keys":
-
-Required subnode-properties:
- - label: Descriptive name of the key.
- - linux,code: Keycode to emit.
- - press-threshold-microvolt: voltage above or equal to which this key is
- considered pressed.
-
-No two values of press-threshold-microvolt may be the same.
-All values of press-threshold-microvolt must be less than
-keyup-threshold-microvolt.
-
-Example:
-
-#include <dt-bindings/input/input.h>
-
- adc-keys {
- compatible = "adc-keys";
- io-channels = <&lradc 0>;
- io-channel-names = "buttons";
- keyup-threshold-microvolt = <2000000>;
-
- button-up {
- label = "Volume Up";
- linux,code = <KEY_VOLUMEUP>;
- press-threshold-microvolt = <1500000>;
- };
-
- button-down {
- label = "Volume Down";
- linux,code = <KEY_VOLUMEDOWN>;
- press-threshold-microvolt = <1000000>;
- };
-
- button-enter {
- label = "Enter";
- linux,code = <KEY_ENTER>;
- press-threshold-microvolt = <500000>;
- };
- };
-
-+--------------------------------+------------------------+
-| 2.000.000 <= value | no key pressed |
-+--------------------------------+------------------------+
-| 1.500.000 <= value < 2.000.000 | KEY_VOLUMEUP pressed |
-+--------------------------------+------------------------+
-| 1.000.000 <= value < 1.500.000 | KEY_VOLUMEDOWN pressed |
-+--------------------------------+------------------------+
-| 500.000 <= value < 1.000.000 | KEY_ENTER pressed |
-+--------------------------------+------------------------+
-| value < 500.000 | no key pressed |
-+--------------------------------+------------------------+
diff --git a/Documentation/devicetree/bindings/input/adc-keys.yaml b/Documentation/devicetree/bindings/input/adc-keys.yaml
new file mode 100644
index 000000000000..7aa078dead37
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/adc-keys.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/adc-keys.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ADC attached resistor ladder buttons
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+allOf:
+ - $ref: input.yaml#
+
+properties:
+ compatible:
+ const: adc-keys
+
+ io-channels:
+ maxItems: 1
+
+ io-channel-names:
+ const: buttons
+
+ keyup-threshold-microvolt:
+ description:
+ Voltage above or equal to which all the keys are considered up.
+
+ poll-interval: true
+ autorepeat: true
+
+patternProperties:
+ '^button-':
+ type: object
+ $ref: input.yaml#
+ additionalProperties: false
+ description:
+ Each button (key) is represented as a sub-node.
+
+ properties:
+ label: true
+
+ linux,code: true
+
+ press-threshold-microvolt:
+ description:
+ Voltage above or equal to which this key is considered pressed. No
+ two values of press-threshold-microvolt may be the same. All values
+ of press-threshold-microvolt must be less than
+ keyup-threshold-microvolt.
+
+ required:
+ - linux,code
+ - press-threshold-microvolt
+
+required:
+ - compatible
+ - io-channels
+ - io-channel-names
+ - keyup-threshold-microvolt
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/input/input.h>
+ // +--------------------------------+------------------------+
+ // | 2.000.000 <= value | no key pressed |
+ // +--------------------------------+------------------------+
+ // | 1.500.000 <= value < 2.000.000 | KEY_VOLUMEUP pressed |
+ // +--------------------------------+------------------------+
+ // | 1.000.000 <= value < 1.500.000 | KEY_VOLUMEDOWN pressed |
+ // +--------------------------------+------------------------+
+ // | 500.000 <= value < 1.000.000 | KEY_ENTER pressed |
+ // +--------------------------------+------------------------+
+ // | value < 500.000 | no key pressed |
+ // +--------------------------------+------------------------+
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&lradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <2000000>;
+
+ button-up {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ press-threshold-microvolt = <1500000>;
+ };
+
+ button-down {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ press-threshold-microvolt = <1000000>;
+ };
+
+ button-enter {
+ label = "Enter";
+ linux,code = <KEY_ENTER>;
+ press-threshold-microvolt = <500000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
index 3399fc288afb..9700dc468b25 100644
--- a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
+++ b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
@@ -44,14 +44,13 @@ properties:
patternProperties:
"^button-[0-9]+$":
type: object
+ $ref: input.yaml#
properties:
label:
$ref: /schemas/types.yaml#/definitions/string
description: Descriptive name of the key
- linux,code:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Keycode to emit
+ linux,code: true
channel:
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/input/ariel-pwrbutton.yaml b/Documentation/devicetree/bindings/input/ariel-pwrbutton.yaml
index b4ad829d7383..442f623bb294 100644
--- a/Documentation/devicetree/bindings/input/ariel-pwrbutton.yaml
+++ b/Documentation/devicetree/bindings/input/ariel-pwrbutton.yaml
@@ -17,6 +17,7 @@ description: |
allOf:
- $ref: input.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
index a3a1e5a65306..02e605fac408 100644
--- a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
+++ b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
@@ -37,10 +37,6 @@ properties:
device is temporarily held in hardware reset prior to initialization if
this property is present.
- azoteq,rf-filt-enable:
- type: boolean
- description: Enables the device's internal RF filter.
-
azoteq,max-counts:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2, 3]
@@ -421,6 +417,7 @@ patternProperties:
patternProperties:
"^event-(prox|touch)$":
type: object
+ $ref: input.yaml#
description:
Represents a proximity or touch event reported by the channel.
@@ -467,14 +464,9 @@ patternProperties:
The IQS7222B does not feature channel-specific timeouts; the time-
out specified for any one channel applies to all channels.
- linux,code:
- $ref: /schemas/types.yaml#/definitions/uint32
- description:
- Numeric key or switch code associated with the event. Specify
- KEY_RESERVED (0) to opt out of event reporting.
+ linux,code: true
linux,input-type:
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [1, 5]
default: 1
description:
@@ -537,9 +529,8 @@ patternProperties:
azoteq,bottom-speed:
$ref: /schemas/types.yaml#/definitions/uint32
- multipleOf: 4
minimum: 0
- maximum: 1020
+ maximum: 255
description:
Specifies the speed of movement after which coordinate filtering is
linearly reduced.
@@ -575,14 +566,13 @@ patternProperties:
patternProperties:
"^event-(press|tap|(swipe|flick)-(pos|neg))$":
type: object
+ $ref: input.yaml#
description:
Represents a press or gesture (IQS7222A only) event reported by
the slider.
properties:
- linux,code:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Numeric key code associated with the event.
+ linux,code: true
azoteq,gesture-max-ms:
multipleOf: 4
@@ -616,16 +606,15 @@ patternProperties:
azoteq,gpio-select:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
- maxItems: 1
+ maxItems: 3
items:
minimum: 0
- maximum: 0
+ maximum: 2
description: |
- Specifies an individual GPIO mapped to a tap, swipe or flick
- gesture as follows:
+ Specifies one or more GPIO mapped to the event as follows:
0: GPIO0
- 1: GPIO3 (reserved)
- 2: GPIO4 (reserved)
+ 1: GPIO3 (IQS7222C only)
+ 2: GPIO4 (IQS7222C only)
Note that although multiple events can be mapped to a single
GPIO, they must all be of the same type (proximity, touch or
@@ -710,6 +699,14 @@ allOf:
multipleOf: 4
maximum: 1020
+ patternProperties:
+ "^event-(press|tap|(swipe|flick)-(pos|neg))$":
+ properties:
+ azoteq,gpio-select:
+ maxItems: 1
+ items:
+ maximum: 0
+
else:
patternProperties:
"^channel-([0-9]|1[0-9])$":
@@ -726,8 +723,6 @@ allOf:
azoteq,gesture-dist: false
- azoteq,gpio-select: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/input/fsl,mpr121-touchkey.yaml b/Documentation/devicetree/bindings/input/fsl,mpr121-touchkey.yaml
index 878464f128dc..5139af287d3e 100644
--- a/Documentation/devicetree/bindings/input/fsl,mpr121-touchkey.yaml
+++ b/Documentation/devicetree/bindings/input/fsl,mpr121-touchkey.yaml
@@ -57,7 +57,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- mpr121@5a {
+ touchkey@5a {
compatible = "fsl,mpr121-touchkey";
reg = <0x5a>;
interrupt-parent = <&gpio1>;
@@ -77,7 +77,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- mpr121@5a {
+ touchkey@5a {
compatible = "fsl,mpr121-touchkey";
reg = <0x5a>;
poll-interval = <20>;
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.yaml b/Documentation/devicetree/bindings/input/gpio-keys.yaml
index 7fe1966ea28a..17ac9dff7972 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.yaml
+++ b/Documentation/devicetree/bindings/input/gpio-keys.yaml
@@ -15,107 +15,106 @@ properties:
- gpio-keys
- gpio-keys-polled
+ autorepeat: true
+
+ label:
+ description: Name of entire device
+
+ poll-interval: true
+
patternProperties:
- ".*":
- if:
- type: object
- then:
- $ref: input.yaml#
+ "^(button|event|key|switch|(button|event|key|switch)-[a-z0-9-]+|[a-z0-9-]+-(button|event|key|switch))$":
+ $ref: input.yaml#
- properties:
- gpios:
- maxItems: 1
+ properties:
+ gpios:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
- interrupts:
- maxItems: 1
+ label:
+ description: Descriptive name of the key.
- label:
- description: Descriptive name of the key.
+ linux,code:
+ description: Key / Axis code to emit.
- linux,code:
- description: Key / Axis code to emit.
- $ref: /schemas/types.yaml#/definitions/uint32
+ linux,input-type:
+ default: 1 # EV_KEY
- linux,input-type:
- description:
- Specify event type this button/key generates. If not specified defaults to
- <1> == EV_KEY.
- $ref: /schemas/types.yaml#/definitions/uint32
+ linux,input-value:
+ description: |
+ If linux,input-type is EV_ABS or EV_REL then this
+ value is sent for events this button generates when pressed.
+ EV_ABS/EV_REL axis will generate an event with a value of 0
+ when all buttons with linux,input-type == type and
+ linux,code == axis are released. This value is interpreted
+ as a signed 32 bit value, e.g. to make a button generate a
+ value of -1 use:
- default: 1
+ linux,input-value = <0xffffffff>; /* -1 */
- linux,input-value:
- description: |
- If linux,input-type is EV_ABS or EV_REL then this
- value is sent for events this button generates when pressed.
- EV_ABS/EV_REL axis will generate an event with a value of 0
- when all buttons with linux,input-type == type and
- linux,code == axis are released. This value is interpreted
- as a signed 32 bit value, e.g. to make a button generate a
- value of -1 use:
+ $ref: /schemas/types.yaml#/definitions/uint32
- linux,input-value = <0xffffffff>; /* -1 */
+ debounce-interval:
+ description:
+ Debouncing interval time in milliseconds. If not specified defaults to 5.
+ $ref: /schemas/types.yaml#/definitions/uint32
- $ref: /schemas/types.yaml#/definitions/uint32
+ default: 5
- debounce-interval:
- description:
- Debouncing interval time in milliseconds. If not specified defaults to 5.
- $ref: /schemas/types.yaml#/definitions/uint32
+ wakeup-source:
+ description: Button can wake-up the system.
- default: 5
+ wakeup-event-action:
+ description: |
+ Specifies whether the key should wake the system when asserted, when
+ deasserted, or both. This property is only valid for keys that wake up the
+ system (e.g., when the "wakeup-source" property is also provided).
- wakeup-source:
- description: Button can wake-up the system.
+ Supported values are defined in linux-event-codes.h:
- wakeup-event-action:
- description: |
- Specifies whether the key should wake the system when asserted, when
- deasserted, or both. This property is only valid for keys that wake up the
- system (e.g., when the "wakeup-source" property is also provided).
+ EV_ACT_ANY - both asserted and deasserted
+ EV_ACT_ASSERTED - asserted
+ EV_ACT_DEASSERTED - deasserted
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2]
- Supported values are defined in linux-event-codes.h:
+ linux,can-disable:
+ description:
+ Indicates that button is connected to dedicated (not shared) interrupt
+ which can be disabled to suppress events from the button.
+ type: boolean
- EV_ACT_ANY - both asserted and deasserted
- EV_ACT_ASSERTED - asserted
- EV_ACT_DEASSERTED - deasserted
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [0, 1, 2]
+ required:
+ - linux,code
- linux,can-disable:
- description:
- Indicates that button is connected to dedicated (not shared) interrupt
- which can be disabled to suppress events from the button.
- type: boolean
+ anyOf:
+ - required:
+ - interrupts
+ - required:
+ - interrupts-extended
+ - required:
+ - gpios
+ dependencies:
+ wakeup-event-action: [ wakeup-source ]
+ linux,input-value: [ gpios ]
+
+ unevaluatedProperties: false
+
+allOf:
+ - $ref: input.yaml#
+ - if:
+ properties:
+ compatible:
+ const: gpio-keys-polled
+ then:
required:
- - linux,code
-
- anyOf:
- - required:
- - interrupts
- - required:
- - gpios
-
- dependencies:
- wakeup-event-action: [ wakeup-source ]
- linux,input-value: [ gpios ]
-
- unevaluatedProperties: false
-
-if:
- properties:
- compatible:
- const: gpio-keys-polled
-then:
- properties:
- poll-interval:
- description:
- Poll interval time in milliseconds
- $ref: /schemas/types.yaml#/definitions/uint32
-
- required:
- - poll-interval
+ - poll-interval
+ else:
+ properties:
+ poll-interval: false
additionalProperties: false
@@ -127,13 +126,13 @@ examples:
compatible = "gpio-keys";
autorepeat;
- up {
+ key-up {
label = "GPIO Key UP";
linux,code = <103>;
gpios = <&gpio1 0 1>;
};
- down {
+ key-down {
label = "GPIO Key DOWN";
linux,code = <108>;
interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
diff --git a/Documentation/devicetree/bindings/input/input.yaml b/Documentation/devicetree/bindings/input/input.yaml
index d41d8743aad4..17512f4347fd 100644
--- a/Documentation/devicetree/bindings/input/input.yaml
+++ b/Documentation/devicetree/bindings/input/input.yaml
@@ -21,7 +21,26 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
items:
minimum: 0
- maximum: 0xff
+ maximum: 0x2ff
+
+ linux,code:
+ description:
+ Specifies a single numeric keycode value to be used for reporting
+ button/switch events. Specify KEY_RESERVED (0) to opt out of event
+ reporting.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 0x2ff
+
+ linux,input-type:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 1 # EV_KEY
+ - 2 # EV_REL
+ - 3 # EV_ABS
+ - 5 # EV_SW
+ description:
+ Specifies whether the event is to be interpreted as a key, relative,
+ absolute, or switch.
poll-interval:
description: Poll interval time in milliseconds.
@@ -39,4 +58,7 @@ properties:
reset automatically. Device with key pressed reset feature can specify
this property.
+dependencies:
+ linux,input-type: [ "linux,code" ]
+
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/input/iqs269a.yaml b/Documentation/devicetree/bindings/input/iqs269a.yaml
index 9c154e5e1a91..3c430d38594f 100644
--- a/Documentation/devicetree/bindings/input/iqs269a.yaml
+++ b/Documentation/devicetree/bindings/input/iqs269a.yaml
@@ -370,6 +370,7 @@ patternProperties:
patternProperties:
"^event-prox(-alt)?$":
type: object
+ $ref: input.yaml#
description:
Represents a proximity event reported by the channel in response to
a decrease in counts. Node names suffixed with '-alt' instead corre-
@@ -396,14 +397,13 @@ patternProperties:
default: 10
description: Specifies the threshold for the event.
- linux,code:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Numeric key or switch code associated with the event.
+ linux,code: true
additionalProperties: false
"^event-touch(-alt)?$":
type: object
+ $ref: input.yaml#
description: Represents a touch event reported by the channel.
properties:
@@ -421,14 +421,13 @@ patternProperties:
default: 4
description: Specifies the hysteresis for the event.
- linux,code:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Numeric key or switch code associated with the event.
+ linux,code: true
additionalProperties: false
"^event-deep(-alt)?$":
type: object
+ $ref: input.yaml#
description: Represents a deep-touch event reported by the channel.
properties:
@@ -446,9 +445,7 @@ patternProperties:
default: 0
description: Specifies the hysteresis for the event.
- linux,code:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Numeric key or switch code associated with the event.
+ linux,code: true
additionalProperties: false
@@ -475,7 +472,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- iqs269a@44 {
+ touch@44 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/input/iqs626a.yaml b/Documentation/devicetree/bindings/input/iqs626a.yaml
index 0cb736c541c9..7a27502095f3 100644
--- a/Documentation/devicetree/bindings/input/iqs626a.yaml
+++ b/Documentation/devicetree/bindings/input/iqs626a.yaml
@@ -449,6 +449,7 @@ patternProperties:
patternProperties:
"^event-(prox|touch|deep)(-alt)?$":
type: object
+ $ref: input.yaml#
description:
Represents a proximity, touch or deep-touch event reported by the
channel in response to a decrease in counts. Node names suffixed with
@@ -487,21 +488,15 @@ patternProperties:
Specifies the hysteresis for the event (touch and deep-touch
events only).
- linux,code:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Numeric key or switch code associated with the event.
+ linux,code: true
linux,input-type:
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [1, 5]
description:
Specifies whether the event is to be interpreted as a key (1) or
a switch (5). By default, Hall-channel events are interpreted as
switches and all others are interpreted as keys.
- dependencies:
- linux,input-type: ["linux,code"]
-
additionalProperties: false
dependencies:
@@ -511,6 +506,7 @@ patternProperties:
"^trackpad-3x[2-3]$":
type: object
+ $ref: input.yaml#
description:
Represents all channels associated with the trackpad. The channels are
collectively active if the trackpad is defined and inactive otherwise.
@@ -679,7 +675,6 @@ patternProperties:
Specifies the raw count filter strength during low-power mode.
linux,keycodes:
- $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 6
description: |
@@ -751,7 +746,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- iqs626a@44 {
+ touch@44 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/input/iqs62x-keys.yaml b/Documentation/devicetree/bindings/input/iqs62x-keys.yaml
index 77fe3b545b35..0aa951f0ab92 100644
--- a/Documentation/devicetree/bindings/input/iqs62x-keys.yaml
+++ b/Documentation/devicetree/bindings/input/iqs62x-keys.yaml
@@ -9,6 +9,9 @@ title: Azoteq IQS620A/621/622/624/625 Keys and Switches
maintainers:
- Jeff LaBundy <jeff@labundy.com>
+allOf:
+ - $ref: input.yaml#
+
description: |
The Azoteq IQS620A, IQS621, IQS622, IQS624 and IQS625 multi-function sensors
feature a variety of self-capacitive, mutual-inductive and Hall-effect sens-
@@ -30,7 +33,6 @@ properties:
- azoteq,iqs625-keys
linux,keycodes:
- $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 16
description: |
@@ -89,15 +91,14 @@ properties:
patternProperties:
"^hall-switch-(north|south)$":
type: object
+ $ref: input.yaml#
description:
Represents north/south-field Hall-effect sensor touch or proximity
events. Note that north/south-field orientation is reversed on the
IQS620AXzCSR device due to its flip-chip package.
properties:
- linux,code:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Numeric switch code associated with the event.
+ linux,code: true
azoteq,use-prox:
$ref: /schemas/types.yaml#/definitions/flag
diff --git a/Documentation/devicetree/bindings/input/max77650-onkey.yaml b/Documentation/devicetree/bindings/input/max77650-onkey.yaml
index 3a2ad6ec64db..48edc0c8c1dd 100644
--- a/Documentation/devicetree/bindings/input/max77650-onkey.yaml
+++ b/Documentation/devicetree/bindings/input/max77650-onkey.yaml
@@ -16,15 +16,15 @@ description: |
The onkey controller is represented as a sub-node of the PMIC node on
the device tree.
+allOf:
+ - $ref: input.yaml#
+
properties:
compatible:
const: maxim,max77650-onkey
linux,code:
- $ref: /schemas/types.yaml#/definitions/uint32
- description:
- The key-code to be reported when the key is pressed. Defaults
- to KEY_POWER.
+ default: 116 # KEY_POWER
maxim,onkey-slide:
$ref: /schemas/types.yaml#/definitions/flag
diff --git a/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml b/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml
index d5d6bced3148..96358b12f9b2 100644
--- a/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml
+++ b/Documentation/devicetree/bindings/input/microchip,cap11xx.yaml
@@ -112,7 +112,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- cap1188@28 {
+ touch@28 {
compatible = "microchip,cap1188";
interrupt-parent = <&gpio1>;
interrupts = <0 0>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
index 2e8da7470513..46bc8c028fe6 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
@@ -85,6 +85,14 @@ properties:
minimum: 0
maximum: 80
+ report-rate-hz:
+ description: |
+ Allows setting the scan rate in Hertz.
+ M06 supports range from 30 to 140 Hz.
+ M12 supports range from 1 to 255 Hz.
+ minimum: 1
+ maximum: 255
+
touchscreen-size-x: true
touchscreen-size-y: true
touchscreen-fuzz-x: true
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt b/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
index 5eef5e7d6aae..c9f2c9f578e3 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ektf2127.txt
@@ -6,7 +6,7 @@ Required properties:
- interrupts : interrupt specification for the ektf2127 interrupt
- power-gpios : GPIO specification for the pin connected to the
ektf2127's wake input. This needs to be driven high
- to take ektf2127 out of it's low power state
+ to take ektf2127 out of its low power state
For additional optional properties see: touchscreen.txt
diff --git a/Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt
deleted file mode 100644
index e0062aebf025..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-RDA Micro RDA8810PL Interrupt Controller
-
-The interrupt controller in RDA8810PL SoC is a custom interrupt controller
-which supports up to 32 interrupts.
-
-Required properties:
-
-- compatible: Should be "rda,8810pl-intc".
-- reg: Specifies base physical address of the registers set.
-- interrupt-controller: Identifies the node as an interrupt controller.
-- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value shall be 2.
-
-The interrupt sources are as follows:
-
-ID Name
-------------
-0: PULSE_DUMMY
-1: I2C
-2: NAND_NFSC
-3: SDMMC1
-4: SDMMC2
-5: SDMMC3
-6: SPI1
-7: SPI2
-8: SPI3
-9: UART1
-10: UART2
-11: UART3
-12: GPIO1
-13: GPIO2
-14: GPIO3
-15: KEYPAD
-16: TIMER
-17: TIMEROS
-18: COMREG0
-19: COMREG1
-20: USB
-21: DMC
-22: DMA
-23: CAMERA
-24: GOUDA
-25: GPU
-26: VPU_JPG
-27: VPU_HOST
-28: VOC
-29: AUIFC0
-30: AUIFC1
-31: L2CC
-
-Example:
- apb@20800000 {
- compatible = "simple-bus";
- ...
- intc: interrupt-controller@0 {
- compatible = "rda,8810pl-intc";
- reg = <0x0 0x1000>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.yaml
new file mode 100644
index 000000000000..96d6285d0087
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/rda,8810pl-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RDA Micro RDA8810PL interrupt controller
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ const: rda,8810pl-intc
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ intc: interrupt-controller@0 {
+ compatible = "rda,8810pl-intc";
+ reg = <0x0 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index 76fc2c0f4d54..9066e6df1ba1 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -42,6 +42,7 @@ properties:
- qcom,sdx55-smmu-500
- qcom,sdx65-smmu-500
- qcom,sm6350-smmu-500
+ - qcom,sm6375-smmu-500
- qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500
- qcom,sm8350-smmu-500
diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
index 2ae3bbad7f1a..fee0241b5098 100644
--- a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
+++ b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
@@ -101,6 +101,10 @@ properties:
items:
- const: bclk
+ mediatek,infracfg:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle to the mediatek infracfg syscon
+
mediatek,larbs:
$ref: /schemas/types.yaml#/definitions/phandle-array
minItems: 1
@@ -167,6 +171,18 @@ allOf:
required:
- power-domains
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - mediatek,mt2712-m4u
+ - mediatek,mt8173-m4u
+
+ then:
+ required:
+ - mediatek,infracfg
+
- if: # The IOMMUs don't have larbs.
not:
properties:
@@ -191,6 +207,7 @@ examples:
interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_LOW>;
clocks = <&infracfg CLK_INFRA_M4U>;
clock-names = "bclk";
+ mediatek,infracfg = <&infracfg>;
mediatek,larbs = <&larb0>, <&larb1>, <&larb2>,
<&larb3>, <&larb4>, <&larb5>;
#iommu-cells = <1>;
diff --git a/Documentation/devicetree/bindings/leds/backlight/common.yaml b/Documentation/devicetree/bindings/leds/backlight/common.yaml
index 702ba350d869..3b60afbab68b 100644
--- a/Documentation/devicetree/bindings/leds/backlight/common.yaml
+++ b/Documentation/devicetree/bindings/leds/backlight/common.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Common backlight properties
maintainers:
- - Lee Jones <lee.jones@linaro.org>
+ - Lee Jones <lee@kernel.org>
- Daniel Thompson <daniel.thompson@linaro.org>
- Jingoo Han <jingoohan1@gmail.com>
diff --git a/Documentation/devicetree/bindings/leds/backlight/gpio-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/gpio-backlight.yaml
index 75cc569b9c55..3300451fcfd5 100644
--- a/Documentation/devicetree/bindings/leds/backlight/gpio-backlight.yaml
+++ b/Documentation/devicetree/bindings/leds/backlight/gpio-backlight.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: gpio-backlight bindings
maintainers:
- - Lee Jones <lee.jones@linaro.org>
+ - Lee Jones <lee@kernel.org>
- Daniel Thompson <daniel.thompson@linaro.org>
- Jingoo Han <jingoohan1@gmail.com>
diff --git a/Documentation/devicetree/bindings/leds/backlight/led-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/led-backlight.yaml
index f5822f4ea667..0793d0adc4ec 100644
--- a/Documentation/devicetree/bindings/leds/backlight/led-backlight.yaml
+++ b/Documentation/devicetree/bindings/leds/backlight/led-backlight.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: led-backlight bindings
maintainers:
- - Lee Jones <lee.jones@linaro.org>
+ - Lee Jones <lee@kernel.org>
- Daniel Thompson <daniel.thompson@linaro.org>
- Jingoo Han <jingoohan1@gmail.com>
diff --git a/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
index 08fe5cf8614a..3c9b4054ed9a 100644
--- a/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
+++ b/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI LM3630A High-Efficiency Dual-String White LED
maintainers:
- - Lee Jones <lee.jones@linaro.org>
+ - Lee Jones <lee@kernel.org>
- Daniel Thompson <daniel.thompson@linaro.org>
- Jingoo Han <jingoohan1@gmail.com>
diff --git a/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml
index fcb8429f3088..78fbe20a1758 100644
--- a/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml
+++ b/Documentation/devicetree/bindings/leds/backlight/pwm-backlight.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: pwm-backlight bindings
maintainers:
- - Lee Jones <lee.jones@linaro.org>
+ - Lee Jones <lee@kernel.org>
- Daniel Thompson <daniel.thompson@linaro.org>
- Jingoo Han <jingoohan1@gmail.com>
diff --git a/Documentation/devicetree/bindings/leds/backlight/richtek,rt4831-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/richtek,rt4831-backlight.yaml
index e0ac68694b63..99e9e138fa92 100644
--- a/Documentation/devicetree/bindings/leds/backlight/richtek,rt4831-backlight.yaml
+++ b/Documentation/devicetree/bindings/leds/backlight/richtek,rt4831-backlight.yaml
@@ -47,6 +47,11 @@ properties:
minimum: 0
maximum: 3
+ richtek,bled-ocp-microamp:
+ description: |
+ Backlight over current protection level.
+ enum: [900000, 1200000, 1500000, 1800000]
+
richtek,channel-use:
description: |
Backlight LED channel to be used.
diff --git a/Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml b/Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
index 9362b1ef9e88..14bebe1ad8f8 100644
--- a/Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
+++ b/Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
@@ -33,6 +33,8 @@ patternProperties:
"^multi-led@[0-9a-b]$":
type: object
$ref: leds-class-multicolor.yaml#
+ unevaluatedProperties: false
+
description:
This node represents one of the RGB LED devices on Turris Omnia.
No subnodes need to be added for subchannels since this controller only
diff --git a/Documentation/devicetree/bindings/leds/issi,is31fl319x.yaml b/Documentation/devicetree/bindings/leds/issi,is31fl319x.yaml
new file mode 100644
index 000000000000..940333f2d69c
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/issi,is31fl319x.yaml
@@ -0,0 +1,193 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/issi,is31fl319x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ISSI LED controllers bindings for IS31FL319{0,1,3,6,9}
+
+maintainers:
+ - Vincent Knecht <vincent.knecht@mailoo.org>
+
+description: |
+ The IS31FL319X are LED controllers with I2C interface.
+ Previously known as Si-En SN319{0,1,3,6,9}.
+
+ For more product information please see the links below:
+ https://lumissil.com/assets/pdf/core/IS31FL3190_DS.pdf
+ https://lumissil.com/assets/pdf/core/IS31FL3191_DS.pdf
+ https://lumissil.com/assets/pdf/core/IS31FL3193_DS.pdf
+ https://lumissil.com/assets/pdf/core/IS31FL3196_DS.pdf
+ https://lumissil.com/assets/pdf/core/IS31FL3199_DS.pdf
+
+properties:
+ compatible:
+ enum:
+ - issi,is31fl3190
+ - issi,is31fl3191
+ - issi,is31fl3193
+ - issi,is31fl3196
+ - issi,is31fl3199
+ - si-en,sn3190
+ - si-en,sn3191
+ - si-en,sn3193
+ - si-en,sn3196
+ - si-en,sn3199
+
+ reg:
+ maxItems: 1
+
+ shutdown-gpios:
+ maxItems: 1
+ description: GPIO attached to the SDB pin.
+
+ audio-gain-db:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+ description: Audio gain selection for external analog modulation input.
+ enum: [0, 3, 6, 9, 12, 15, 18, 21]
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^led@[1-9]$":
+ type: object
+ $ref: common.yaml#
+
+ properties:
+ reg:
+ description: Index of the LED.
+ minimum: 1
+ maximum: 9
+
+ led-max-microamp:
+ description:
+ Note that a driver will take the lowest of all LED limits
+ since the chip has a single global setting. The lowest value
+ will be chosen due to the PWM specificity, where lower
+ brightness is achieved by reducing the duty-cycle of pulses
+ and not the current, which will always have its peak value
+ equal to led-max-microamp.
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - issi,is31fl3190
+ - issi,is31fl3191
+ - issi,is31fl3193
+ - si-en,sn3190
+ - si-en,sn3191
+ - si-en,sn3193
+ then:
+ properties:
+ reg:
+ enum: [0x68, 0x69, 0x6a, 0x6b]
+
+ audio-gain-db: false
+
+ patternProperties:
+ "^led@[1-9]$":
+ properties:
+ led-max-microamp:
+ default: 42000
+ enum: [5000, 10000, 17500, 30000, 42000]
+ else:
+ properties:
+ reg:
+ enum: [0x64, 0x65, 0x66, 0x67]
+
+ patternProperties:
+ "^led@[1-9]$":
+ properties:
+ led-max-microamp:
+ default: 20000
+ enum: [5000, 10000, 15000, 20000, 25000, 30000, 35000, 40000]
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - issi,is31fl3190
+ - issi,is31fl3191
+ - si-en,sn3190
+ - si-en,sn3191
+ then:
+ patternProperties:
+ "^led@[1-9]$":
+ properties:
+ reg:
+ maximum: 1
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - issi,is31fl3193
+ - si-en,sn3193
+ then:
+ patternProperties:
+ "^led@[1-9]$":
+ properties:
+ reg:
+ maximum: 3
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - issi,is31fl3196
+ - si-en,sn3196
+ then:
+ patternProperties:
+ "^led@[1-9]$":
+ properties:
+ reg:
+ maximum: 6
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@65 {
+ compatible = "issi,is31fl3196";
+ reg = <0x65>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ shutdown-gpios = <&gpio0 11 GPIO_ACTIVE_HIGH>;
+
+ led@1 {
+ reg = <1>;
+ label = "red:aux";
+ led-max-microamp = <10000>;
+ };
+
+ led@5 {
+ reg = <5>;
+ label = "green:power";
+ linux,default-trigger = "default-on";
+ };
+ };
+ };
+...
+
diff --git a/Documentation/devicetree/bindings/leds/leds-aat1290.txt b/Documentation/devicetree/bindings/leds/leds-aat1290.txt
deleted file mode 100644
index 62ed17ec075b..000000000000
--- a/Documentation/devicetree/bindings/leds/leds-aat1290.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-* Skyworks Solutions, Inc. AAT1290 Current Regulator for Flash LEDs
-
-The device is controlled through two pins: FL_EN and EN_SET. The pins when,
-asserted high, enable flash strobe and movie mode (max 1/2 of flash current)
-respectively. In order to add a capability of selecting the strobe signal source
-(e.g. CPU or camera sensor) there is an additional switch required, independent
-of the flash chip. The switch is controlled with pin control.
-
-Required properties:
-
-- compatible : Must be "skyworks,aat1290".
-- flen-gpios : Must be device tree identifier of the flash device FL_EN pin.
-- enset-gpios : Must be device tree identifier of the flash device EN_SET pin.
-
-Optional properties:
-- pinctrl-names : Must contain entries: "default", "host", "isp". Entries
- "default" and "host" must refer to the same pin configuration
- node, which sets the host as a strobe signal provider. Entry
- "isp" must refer to the pin configuration node, which sets the
- ISP as a strobe signal provider.
-
-A discrete LED element connected to the device must be represented by a child
-node - see Documentation/devicetree/bindings/leds/common.txt.
-
-Required properties of the LED child node:
-- led-max-microamp : see Documentation/devicetree/bindings/leds/common.txt
-- flash-max-microamp : see Documentation/devicetree/bindings/leds/common.txt
- Maximum flash LED supply current can be calculated using
- following formula: I = 1A * 162kohm / Rset.
-- flash-max-timeout-us : see Documentation/devicetree/bindings/leds/common.txt
- Maximum flash timeout can be calculated using following
- formula: T = 8.82 * 10^9 * Ct.
-
-Optional properties of the LED child node:
-- function : see Documentation/devicetree/bindings/leds/common.txt
-- color : see Documentation/devicetree/bindings/leds/common.txt
-- label : see Documentation/devicetree/bindings/leds/common.txt (deprecated)
-
-Example (by Ct = 220nF, Rset = 160kohm and exynos4412-trats2 board with
-a switch that allows for routing strobe signal either from the host or from
-the camera sensor):
-
-#include "exynos4412.dtsi"
-#include <dt-bindings/leds/common.h>
-
-led-controller {
- compatible = "skyworks,aat1290";
- flen-gpios = <&gpj1 1 GPIO_ACTIVE_HIGH>;
- enset-gpios = <&gpj1 2 GPIO_ACTIVE_HIGH>;
-
- pinctrl-names = "default", "host", "isp";
- pinctrl-0 = <&camera_flash_host>;
- pinctrl-1 = <&camera_flash_host>;
- pinctrl-2 = <&camera_flash_isp>;
-
- camera_flash: led {
- function = LED_FUNCTION_FLASH;
- color = <LED_COLOR_ID_WHITE>;
- led-max-microamp = <520833>;
- flash-max-microamp = <1012500>;
- flash-max-timeout-us = <1940000>;
- };
-};
-
-&pinctrl_0 {
- camera_flash_host: camera-flash-host {
- samsung,pins = "gpj1-0";
- samsung,pin-function = <1>;
- samsung,pin-val = <0>;
- };
-
- camera_flash_isp: camera-flash-isp {
- samsung,pins = "gpj1-0";
- samsung,pin-function = <1>;
- samsung,pin-val = <1>;
- };
-};
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml b/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml
new file mode 100644
index 000000000000..52252fb6bb32
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-bcm63138.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom's BCM63138 LEDs controller
+
+maintainers:
+ - Rafał Miłecki <rafal@milecki.pl>
+
+description: |
+ This LEDs controller was first used on BCM63138 and later reused on BCM4908,
+ BCM6848, BCM6858, BCM63138, BCM63148, BCM63381 and BCM68360 SoCs.
+
+ It supports up to 32 LEDs that can be connected parallelly or serially. It
+ also includes limited support for hardware blinking.
+
+ Binding serially connected LEDs isn't documented yet.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - brcm,bcm4908-leds
+ - brcm,bcm6848-leds
+ - brcm,bcm6858-leds
+ - brcm,bcm63148-leds
+ - brcm,bcm63381-leds
+ - brcm,bcm68360-leds
+ - const: brcm,bcm63138-leds
+ - const: brcm,bcm63138-leds
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^led@[a-f0-9]+$":
+ type: object
+
+ $ref: common.yaml#
+
+ properties:
+ reg:
+ maxItems: 1
+ description: LED pin number
+
+ active-low:
+ type: boolean
+ description: Makes LED active low
+
+ required:
+ - reg
+
+ unevaluatedProperties: false
+
+required:
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ leds@ff800800 {
+ compatible = "brcm,bcm4908-leds", "brcm,bcm63138-leds";
+ reg = <0xff800800 0xdc>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0x0>;
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ };
+
+ led@3 {
+ reg = <0x3>;
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ active-low;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
index f41d021ed677..31840e33dcf5 100644
--- a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Common properties for the multicolor LED class.
maintainers:
- - Dan Murphy <dmurphy@ti.com>
+ - Andrew Davis <afd@ti.com>
description: |
Bindings for multi color LEDs show how to describe current outputs of
@@ -19,22 +19,22 @@ description: |
LED class. Common LED nodes and properties are inherited from the common.yaml
within this documentation directory.
-patternProperties:
- "^multi-led(@[0-9a-f])?$":
- type: object
- description: Represents the LEDs that are to be grouped.
- properties:
- color:
- description: |
- For multicolor LED support this property should be defined as either
- LED_COLOR_ID_RGB or LED_COLOR_ID_MULTI which can be found in
- include/linux/leds/common.h.
- enum: [ 8, 9 ]
-
- $ref: "common.yaml#"
-
- required:
- - color
+properties:
+ $nodename:
+ pattern: "^multi-led(@[0-9a-f])?$"
+
+ color:
+ description: |
+ For multicolor LED support this property should be defined as either
+ LED_COLOR_ID_RGB or LED_COLOR_ID_MULTI which can be found in
+ include/linux/leds/common.h.
+ enum: [ 8, 9 ]
+
+required:
+ - color
+
+allOf:
+ - $ref: "common.yaml#"
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/leds/leds-is31fl319x.txt b/Documentation/devicetree/bindings/leds/leds-is31fl319x.txt
deleted file mode 100644
index 676d43ec8169..000000000000
--- a/Documentation/devicetree/bindings/leds/leds-is31fl319x.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-LEDs connected to is31fl319x LED controller chip
-
-Required properties:
-- compatible : Should be any of
- "issi,is31fl3190"
- "issi,is31fl3191"
- "issi,is31fl3193"
- "issi,is31fl3196"
- "issi,is31fl3199"
- "si-en,sn3199".
-- #address-cells: Must be 1.
-- #size-cells: Must be 0.
-- reg: 0x64, 0x65, 0x66, or 0x67.
-
-Optional properties:
-- audio-gain-db : audio gain selection for external analog modulation input.
- Valid values: 0 - 21, step by 3 (rounded down)
- Default: 0
-- shutdown-gpios : Specifier of the GPIO connected to SDB pin of the chip.
-
-Each led is represented as a sub-node of the issi,is31fl319x device.
-There can be less leds subnodes than the chip can support but not more.
-
-Required led sub-node properties:
-- reg : number of LED line
- Valid values: 1 - number of leds supported by the chip variant.
-
-Optional led sub-node properties:
-- label : see Documentation/devicetree/bindings/leds/common.txt.
-- linux,default-trigger :
- see Documentation/devicetree/bindings/leds/common.txt.
-- led-max-microamp : (optional)
- Valid values: 5000 - 40000, step by 5000 (rounded down)
- Default: 20000 (20 mA)
- Note: a driver will take the lowest of all led limits since the
- chip has a single global setting. The lowest value will be chosen
- due to the PWM specificity, where lower brightness is achieved
- by reducing the dury-cycle of pulses and not the current, which
- will always have its peak value equal to led-max-microamp.
-
-Examples:
-
-fancy_leds: leds@65 {
- compatible = "issi,is31fl3196";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x65>;
- shutdown-gpios = <&gpio0 11 GPIO_ACTIVE_HIGH>;
-
- red_aux: led@1 {
- label = "red:aux";
- reg = <1>;
- led-max-microamp = <10000>;
- };
-
- green_power: led@5 {
- label = "green:power";
- reg = <5>;
- linux,default-trigger = "default-on";
- };
-};
diff --git a/Documentation/devicetree/bindings/leds/leds-lp50xx.yaml b/Documentation/devicetree/bindings/leds/leds-lp50xx.yaml
index f12fe5b53f30..63da380748bf 100644
--- a/Documentation/devicetree/bindings/leds/leds-lp50xx.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-lp50xx.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: LED driver for LP50XX RGB LED from Texas Instruments.
maintainers:
- - Dan Murphy <dmurphy@ti.com>
+ - Andrew Davis <afd@ti.com>
description: |
The LP50XX is multi-channel, I2C RGB LED Drivers that can group RGB LEDs into
@@ -56,6 +56,8 @@ patternProperties:
'^multi-led@[0-9a-f]$':
type: object
$ref: leds-class-multicolor.yaml#
+ unevaluatedProperties: false
+
properties:
reg:
minItems: 1
@@ -65,8 +67,14 @@ patternProperties:
for the child node. The LED modules can either be used stand alone
or grouped into a module bank.
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
patternProperties:
- "(^led-[0-9a-f]$|led)":
+ "^led@[0-9a-f]+$":
type: object
$ref: common.yaml#
@@ -78,60 +86,66 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- #include <dt-bindings/leds/common.h>
-
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- led-controller@14 {
- compatible = "ti,lp5009";
- reg = <0x14>;
- #address-cells = <1>;
- #size-cells = <0>;
- enable-gpios = <&gpio1 16>;
-
- multi-led@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x1>;
- color = <LED_COLOR_ID_RGB>;
- function = LED_FUNCTION_CHARGING;
-
- led-0 {
- color = <LED_COLOR_ID_RED>;
- };
-
- led-1 {
- color = <LED_COLOR_ID_GREEN>;
- };
-
- led-2 {
- color = <LED_COLOR_ID_BLUE>;
- };
- };
-
- multi-led@2 {
- #address-cells = <1>;
- #size-cells = <2>;
- reg = <0x2 0x3 0x5>;
- color = <LED_COLOR_ID_RGB>;
- function = LED_FUNCTION_STANDBY;
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
- led-6 {
- color = <LED_COLOR_ID_RED>;
- };
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
- led-7 {
- color = <LED_COLOR_ID_GREEN>;
+ led-controller@14 {
+ compatible = "ti,lp5009";
+ reg = <0x14>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-gpios = <&gpio1 16>;
+
+ multi-led@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_CHARGING;
+
+ led@0 {
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@1 {
+ reg = <0x1>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@2 {
+ reg = <0x2>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
};
- led-8 {
- color = <LED_COLOR_ID_BLUE>;
+ multi-led@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>, <0x4>, <0x5>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STANDBY;
+
+ led@3 {
+ reg = <0x3>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@4 {
+ reg = <0x4>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@5 {
+ reg = <0x5>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
};
- };
- };
+ };
};
...
diff --git a/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml b/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml
index f552cd143d5b..7ec676e53851 100644
--- a/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-lp55xx.yaml
@@ -108,119 +108,119 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/leds/common.h>
-
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- led-controller@32 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "ti,lp8501";
- reg = <0x32>;
- clock-mode = /bits/ 8 <2>;
- pwr-sel = /bits/ 8 <3>; /* D1~9 connected to VOUT */
-
- led@0 {
- reg = <0>;
- chan-name = "d1";
- led-cur = /bits/ 8 <0x14>;
- max-cur = /bits/ 8 <0x20>;
- };
-
- led@1 {
- reg = <1>;
- chan-name = "d2";
- led-cur = /bits/ 8 <0x14>;
- max-cur = /bits/ 8 <0x20>;
- };
-
- led@2 {
- reg = <2>;
- chan-name = "d3";
- led-cur = /bits/ 8 <0x14>;
- max-cur = /bits/ 8 <0x20>;
- };
-
- led@3 {
- reg = <3>;
- chan-name = "d4";
- led-cur = /bits/ 8 <0x14>;
- max-cur = /bits/ 8 <0x20>;
- };
-
- led@4 {
- reg = <4>;
- chan-name = "d5";
- led-cur = /bits/ 8 <0x14>;
- max-cur = /bits/ 8 <0x20>;
- };
-
- led@5 {
- reg = <5>;
- chan-name = "d6";
- led-cur = /bits/ 8 <0x14>;
- max-cur = /bits/ 8 <0x20>;
- };
-
- led@6 {
- reg = <6>;
- chan-name = "d7";
- led-cur = /bits/ 8 <0x14>;
- max-cur = /bits/ 8 <0x20>;
- };
-
- led@7 {
- reg = <7>;
- chan-name = "d8";
- led-cur = /bits/ 8 <0x14>;
- max-cur = /bits/ 8 <0x20>;
- };
-
- led@8 {
- reg = <8>;
- chan-name = "d9";
- led-cur = /bits/ 8 <0x14>;
- max-cur = /bits/ 8 <0x20>;
- };
+ #include <dt-bindings/leds/common.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@32 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "ti,lp8501";
+ reg = <0x32>;
+ clock-mode = /bits/ 8 <2>;
+ pwr-sel = /bits/ 8 <3>; /* D1~9 connected to VOUT */
+
+ led@0 {
+ reg = <0>;
+ chan-name = "d1";
+ led-cur = /bits/ 8 <0x14>;
+ max-cur = /bits/ 8 <0x20>;
+ };
+
+ led@1 {
+ reg = <1>;
+ chan-name = "d2";
+ led-cur = /bits/ 8 <0x14>;
+ max-cur = /bits/ 8 <0x20>;
+ };
+
+ led@2 {
+ reg = <2>;
+ chan-name = "d3";
+ led-cur = /bits/ 8 <0x14>;
+ max-cur = /bits/ 8 <0x20>;
+ };
+
+ led@3 {
+ reg = <3>;
+ chan-name = "d4";
+ led-cur = /bits/ 8 <0x14>;
+ max-cur = /bits/ 8 <0x20>;
+ };
+
+ led@4 {
+ reg = <4>;
+ chan-name = "d5";
+ led-cur = /bits/ 8 <0x14>;
+ max-cur = /bits/ 8 <0x20>;
+ };
+
+ led@5 {
+ reg = <5>;
+ chan-name = "d6";
+ led-cur = /bits/ 8 <0x14>;
+ max-cur = /bits/ 8 <0x20>;
+ };
+
+ led@6 {
+ reg = <6>;
+ chan-name = "d7";
+ led-cur = /bits/ 8 <0x14>;
+ max-cur = /bits/ 8 <0x20>;
+ };
+
+ led@7 {
+ reg = <7>;
+ chan-name = "d8";
+ led-cur = /bits/ 8 <0x14>;
+ max-cur = /bits/ 8 <0x20>;
+ };
+
+ led@8 {
+ reg = <8>;
+ chan-name = "d9";
+ led-cur = /bits/ 8 <0x14>;
+ max-cur = /bits/ 8 <0x20>;
+ };
};
- led-controller@33 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "national,lp5523";
- reg = <0x33>;
- clock-mode = /bits/ 8 <0>;
-
- multi-led@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x2>;
- color = <LED_COLOR_ID_RGB>;
- function = LED_FUNCTION_STANDBY;
- linux,default-trigger = "heartbeat";
-
- led@0 {
- led-cur = /bits/ 8 <50>;
- max-cur = /bits/ 8 <100>;
- reg = <0x0>;
- color = <LED_COLOR_ID_GREEN>;
- };
-
- led@1 {
- led-cur = /bits/ 8 <50>;
- max-cur = /bits/ 8 <100>;
- reg = <0x1>;
- color = <LED_COLOR_ID_BLUE>;
- };
-
- led@6 {
- led-cur = /bits/ 8 <50>;
- max-cur = /bits/ 8 <100>;
- reg = <0x6>;
- color = <LED_COLOR_ID_RED>;
- };
+ led-controller@33 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "national,lp5523";
+ reg = <0x33>;
+ clock-mode = /bits/ 8 <0>;
+
+ multi-led@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STANDBY;
+ linux,default-trigger = "heartbeat";
+
+ led@0 {
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ reg = <0x0>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@1 {
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ reg = <0x1>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@6 {
+ led-cur = /bits/ 8 <50>;
+ max-cur = /bits/ 8 <100>;
+ reg = <0x6>;
+ color = <LED_COLOR_ID_RED>;
+ };
};
};
};
diff --git a/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
index 6625a528f727..bd6ec04a8727 100644
--- a/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
@@ -19,6 +19,14 @@ properties:
multi-led:
type: object
+ $ref: leds-class-multicolor.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ max-brightness:
+ description:
+ Maximum brightness possible for the LED
+ $ref: /schemas/types.yaml#/definitions/uint32
patternProperties:
"^led-[0-9a-z]+$":
@@ -33,6 +41,10 @@ properties:
pwm-names: true
+ active-low:
+ description: For PWMs where the LED is wired to supply rather than ground.
+ type: boolean
+
color: true
required:
@@ -42,9 +54,6 @@ properties:
required:
- compatible
-allOf:
- - $ref: leds-class-multicolor.yaml#
-
additionalProperties: false
examples:
@@ -55,24 +64,24 @@ examples:
compatible = "pwm-leds-multicolor";
multi-led {
- color = <LED_COLOR_ID_RGB>;
- function = LED_FUNCTION_INDICATOR;
- max-brightness = <65535>;
-
- led-red {
- pwms = <&pwm1 0 1000000>;
- color = <LED_COLOR_ID_RED>;
- };
-
- led-green {
- pwms = <&pwm2 0 1000000>;
- color = <LED_COLOR_ID_GREEN>;
- };
-
- led-blue {
- pwms = <&pwm3 0 1000000>;
- color = <LED_COLOR_ID_BLUE>;
- };
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_INDICATOR;
+ max-brightness = <65535>;
+
+ led-red {
+ pwms = <&pwm1 0 1000000>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led-green {
+ pwms = <&pwm2 0 1000000>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led-blue {
+ pwms = <&pwm3 0 1000000>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml b/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml
index 409a4c7298e1..497db289169d 100644
--- a/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-qcom-lpg.yaml
@@ -17,6 +17,7 @@ description: >
properties:
compatible:
enum:
+ - qcom,pm660l-lpg
- qcom,pm8150b-lpg
- qcom,pm8150l-lpg
- qcom,pm8350c-pwm
@@ -58,6 +59,8 @@ properties:
multi-led:
type: object
$ref: leds-class-multicolor.yaml#
+ unevaluatedProperties: false
+
properties:
"#address-cells":
const: 1
diff --git a/Documentation/devicetree/bindings/leds/skyworks,aat1290.yaml b/Documentation/devicetree/bindings/leds/skyworks,aat1290.yaml
new file mode 100644
index 000000000000..a6aaa92dbccd
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/skyworks,aat1290.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/skyworks,aat1290.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Skyworks Solutions, Inc. AAT1290 Current Regulator for Flash LEDs
+
+maintainers:
+ - Jacek Anaszewski <jacek.anaszewski@gmail.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description: |
+ The device is controlled through two pins:: FL_EN and EN_SET. The pins when,
+ asserted high, enable flash strobe and movie mode (max 1/2 of flash current)
+ respectively. In order to add a capability of selecting the strobe signal
+ source (e.g. CPU or camera sensor) there is an additional switch required,
+ independent of the flash chip. The switch is controlled with pin control.
+
+properties:
+ compatible:
+ const: skyworks,aat1290
+
+ enset-gpios:
+ maxItems: 1
+ description: EN_SET pin
+
+ flen-gpios:
+ maxItems: 1
+ description: FL_EN pin
+
+ led:
+ $ref: common.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ led-max-microamp: true
+
+ flash-max-microamp:
+ description: |
+ Maximum flash LED supply current can be calculated using following
+ formula:: I = 1A * 162 kOhm / Rset.
+
+ flash-max-timeout-us:
+ description: |
+ Maximum flash timeout can be calculated using following formula::
+ T = 8.82 * 10^9 * Ct.
+
+ required:
+ - flash-max-microamp
+ - flash-max-timeout-us
+ - led-max-microamp
+
+ pinctrl-names:
+ items:
+ - const: default
+ - const: host
+ - const: isp
+
+ pinctrl-0: true
+ pinctrl-1: true
+ pinctrl-2: true
+
+required:
+ - compatible
+ - enset-gpios
+ - flen-gpios
+ - led
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ // Ct = 220 nF, Rset = 160 kOhm
+ led-controller {
+ compatible = "skyworks,aat1290";
+ flen-gpios = <&gpj1 1 GPIO_ACTIVE_HIGH>;
+ enset-gpios = <&gpj1 2 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default", "host", "isp";
+ pinctrl-0 = <&camera_flash_host>;
+ pinctrl-1 = <&camera_flash_host>;
+ pinctrl-2 = <&camera_flash_isp>;
+
+ led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ led-max-microamp = <520833>;
+ flash-max-microamp = <1012500>;
+ flash-max-timeout-us = <1940000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/arm,mhu.yaml b/Documentation/devicetree/bindings/mailbox/arm,mhu.yaml
index bd49c201477d..d9a4f4a02d7c 100644
--- a/Documentation/devicetree/bindings/mailbox/arm,mhu.yaml
+++ b/Documentation/devicetree/bindings/mailbox/arm,mhu.yaml
@@ -57,6 +57,7 @@ properties:
maxItems: 1
interrupts:
+ minItems: 2
items:
- description: low-priority non-secure
- description: high-priority non-secure
diff --git a/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml b/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
index 7a86e7926dd2..191c1ce15009 100644
--- a/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
+++ b/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
@@ -72,14 +72,16 @@ properties:
type : Channel type
channel : Channel number
- This MU support 4 type of unidirectional channels, each type
- has 4 channels. A total of 16 channels. Following types are
+ This MU support 5 type of unidirectional channels, each type
+ has 4 channels except RST channel which only has 1 channel.
+ A total of 17 channels. Following types are
supported:
0 - TX channel with 32bit transmit register and IRQ transmit
acknowledgment support.
1 - RX channel with 32bit receive register and IRQ support
2 - TX doorbell channel. Without own register and no ACK support.
3 - RX doorbell channel.
+ 4 - RST channel
const: 2
clocks:
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
index 3b5ba7ecc19d..f504652fc0ea 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
@@ -15,26 +15,30 @@ maintainers:
properties:
compatible:
- enum:
- - qcom,ipq6018-apcs-apps-global
- - qcom,ipq8074-apcs-apps-global
- - qcom,msm8916-apcs-kpss-global
- - qcom,msm8939-apcs-kpss-global
- - qcom,msm8953-apcs-kpss-global
- - qcom,msm8976-apcs-kpss-global
- - qcom,msm8994-apcs-kpss-global
- - qcom,msm8996-apcs-hmss-global
- - qcom,msm8998-apcs-hmss-global
- - qcom,qcm2290-apcs-hmss-global
- - qcom,qcs404-apcs-apps-global
- - qcom,sc7180-apss-shared
- - qcom,sc8180x-apss-shared
- - qcom,sdm660-apcs-hmss-global
- - qcom,sdm845-apss-shared
- - qcom,sm6125-apcs-hmss-global
- - qcom,sm6115-apcs-hmss-global
- - qcom,sm8150-apss-shared
-
+ oneOf:
+ - items:
+ - enum:
+ - qcom,ipq6018-apcs-apps-global
+ - qcom,ipq8074-apcs-apps-global
+ - qcom,msm8976-apcs-kpss-global
+ - qcom,msm8996-apcs-hmss-global
+ - qcom,msm8998-apcs-hmss-global
+ - qcom,qcm2290-apcs-hmss-global
+ - qcom,sc7180-apss-shared
+ - qcom,sc8180x-apss-shared
+ - qcom,sdm660-apcs-hmss-global
+ - qcom,sdm845-apss-shared
+ - qcom,sm6125-apcs-hmss-global
+ - qcom,sm6115-apcs-hmss-global
+ - qcom,sm8150-apss-shared
+ - items:
+ - enum:
+ - qcom,msm8916-apcs-kpss-global
+ - qcom,msm8939-apcs-kpss-global
+ - qcom,msm8953-apcs-kpss-global
+ - qcom,msm8994-apcs-kpss-global
+ - qcom,qcs404-apcs-apps-global
+ - const: syscon
reg:
maxItems: 1
@@ -121,7 +125,7 @@ examples:
#define GCC_APSS_AHB_CLK_SRC 1
#define GCC_GPLL0_AO_OUT_MAIN 123
apcs: mailbox@b011000 {
- compatible = "qcom,qcs404-apcs-apps-global";
+ compatible = "qcom,qcs404-apcs-apps-global", "syscon";
reg = <0x0b011000 0x1000>;
#mbox-cells = <1>;
clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>;
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
index 1994be858940..baca4786ff94 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
@@ -25,6 +25,7 @@ properties:
items:
- enum:
- qcom,sm6350-ipcc
+ - qcom,sm6375-ipcc
- qcom,sm8250-ipcc
- qcom,sm8350-ipcc
- qcom,sm8450-ipcc
diff --git a/Documentation/devicetree/bindings/media/gpio-ir-receiver.txt b/Documentation/devicetree/bindings/media/gpio-ir-receiver.txt
deleted file mode 100644
index 108bf435b933..000000000000
--- a/Documentation/devicetree/bindings/media/gpio-ir-receiver.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Device-Tree bindings for GPIO IR receiver
-
-Required properties:
- - compatible: should be "gpio-ir-receiver".
- - gpios: specifies GPIO used for IR signal reception.
-
-Optional properties:
- - linux,rc-map-name: see rc.txt file in the same
- directory.
- - linux,autosuspend-period: autosuspend delay time,
- the unit is milisecond.
-
-Example node:
-
- ir: ir-receiver {
- compatible = "gpio-ir-receiver";
- gpios = <&gpio0 19 1>;
- linux,rc-map-name = "rc-rc6-mce";
- linux,autosuspend-period = <125>;
- };
diff --git a/Documentation/devicetree/bindings/media/gpio-ir-receiver.yaml b/Documentation/devicetree/bindings/media/gpio-ir-receiver.yaml
new file mode 100644
index 000000000000..61072745b983
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/gpio-ir-receiver.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/gpio-ir-receiver.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GPIO Based IR receiver
+
+maintainers:
+ - Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+
+allOf:
+ - $ref: rc.yaml#
+
+properties:
+ compatible:
+ const: gpio-ir-receiver
+
+ gpios:
+ maxItems: 1
+
+ linux,autosuspend-period:
+ description: autosuspend delay time in milliseconds
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+required:
+ - compatible
+ - gpios
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 19 1>;
+ linux,rc-map-name = "rc-rc6-mce";
+ linux,autosuspend-period = <125>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/rc.yaml b/Documentation/devicetree/bindings/media/rc.yaml
index d4c541c4b164..b11d14ab89c4 100644
--- a/Documentation/devicetree/bindings/media/rc.yaml
+++ b/Documentation/devicetree/bindings/media/rc.yaml
@@ -12,7 +12,7 @@ maintainers:
properties:
$nodename:
- pattern: "^ir(@[a-f0-9]+)?$"
+ pattern: "^ir(-receiver)?(@[a-f0-9]+)?$"
linux,rc-map-name:
description:
diff --git a/Documentation/devicetree/bindings/memory-controllers/canaan,k210-sram.yaml b/Documentation/devicetree/bindings/memory-controllers/canaan,k210-sram.yaml
new file mode 100644
index 000000000000..f81fb866e319
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/canaan,k210-sram.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/canaan,k210-sram.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Canaan K210 SRAM memory controller
+
+description:
+ The Canaan K210 SRAM memory controller is responsible for the system's 8 MiB
+ of SRAM. The controller is initialised by the bootloader, which configures
+ its clocks, before OS bringup.
+
+maintainers:
+ - Conor Dooley <conor@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - canaan,k210-sram
+
+ clocks:
+ minItems: 1
+ items:
+ - description: sram0 clock
+ - description: sram1 clock
+ - description: aisram clock
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: sram0
+ - const: sram1
+ - const: aisram
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/k210-clk.h>
+ memory-controller {
+ compatible = "canaan,k210-sram";
+ clocks = <&sysclk K210_CLK_SRAM0>,
+ <&sysclk K210_CLK_SRAM1>,
+ <&sysclk K210_CLK_AI>;
+ clock-names = "sram0", "sram1", "aisram";
+ };
diff --git a/Documentation/devicetree/bindings/mfd/da9063.txt b/Documentation/devicetree/bindings/mfd/da9063.txt
deleted file mode 100644
index aa8b800cc4ad..000000000000
--- a/Documentation/devicetree/bindings/mfd/da9063.txt
+++ /dev/null
@@ -1,114 +0,0 @@
-* Dialog DA9063/DA9063L Power Management Integrated Circuit (PMIC)
-
-DA9063 consists of a large and varied group of sub-devices (I2C Only):
-
-Device Supply Names Description
------- ------------ -----------
-da9063-regulator : : LDOs & BUCKs
-da9063-onkey : : On Key
-da9063-rtc : : Real-Time Clock (DA9063 only)
-da9063-watchdog : : Watchdog
-
-======
-
-Required properties:
-
-- compatible : Should be "dlg,da9063" or "dlg,da9063l"
-- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
- modified to match the chip's OTP settings).
-- interrupts : IRQ line information.
-- interrupt-controller
-
-Sub-nodes:
-
-- regulators : This node defines the settings for the LDOs and BUCKs.
- The DA9063(L) regulators are bound using their names listed below:
-
- bcore1 : BUCK CORE1
- bcore2 : BUCK CORE2
- bpro : BUCK PRO
- bmem : BUCK MEM
- bio : BUCK IO
- bperi : BUCK PERI
- ldo1 : LDO_1 (DA9063 only)
- ldo2 : LDO_2 (DA9063 only)
- ldo3 : LDO_3
- ldo4 : LDO_4 (DA9063 only)
- ldo5 : LDO_5 (DA9063 only)
- ldo6 : LDO_6 (DA9063 only)
- ldo7 : LDO_7
- ldo8 : LDO_8
- ldo9 : LDO_9
- ldo10 : LDO_10 (DA9063 only)
- ldo11 : LDO_11
-
- The component follows the standard regulator framework and the bindings
- details of individual regulator device can be found in:
- Documentation/devicetree/bindings/regulator/regulator.txt
-
-- rtc : This node defines settings for the Real-Time Clock associated with
- the DA9063 only. The RTC is not present in DA9063L. There are currently
- no entries in this binding, however compatible = "dlg,da9063-rtc" should
- be added if a node is created.
-
-- onkey : This node defines the OnKey settings for controlling the key
- functionality of the device. The node should contain the compatible property
- with the value "dlg,da9063-onkey".
-
- Optional onkey properties:
-
- - dlg,disable-key-power : Disable power-down using a long key-press. If this
- entry exists the OnKey driver will remove support for the KEY_POWER key
- press. If this entry does not exist then by default the key-press
- triggered power down is enabled and the OnKey will support both KEY_POWER
- and KEY_SLEEP.
-
-- watchdog : This node defines settings for the Watchdog timer associated
- with the DA9063 and DA9063L. The node should contain the compatible property
- with the value "dlg,da9063-watchdog".
-
- Optional watchdog properties:
- - dlg,use-sw-pm: Add this property to disable the watchdog during suspend.
- Only use this option if you can't use the watchdog automatic suspend
- function during a suspend (see register CONTROL_B).
-
-Example:
-
- pmic0: da9063@58 {
- compatible = "dlg,da9063"
- reg = <0x58>;
- interrupt-parent = <&gpio6>;
- interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
-
- rtc {
- compatible = "dlg,da9063-rtc";
- };
-
- wdt {
- compatible = "dlg,da9063-watchdog";
- };
-
- onkey {
- compatible = "dlg,da9063-onkey";
- dlg,disable-key-power;
- };
-
- regulators {
- DA9063_BCORE1: bcore1 {
- regulator-name = "BCORE1";
- regulator-min-microvolt = <300000>;
- regulator-max-microvolt = <1570000>;
- regulator-min-microamp = <500000>;
- regulator-max-microamp = <2000000>;
- regulator-boot-on;
- };
- DA9063_LDO11: ldo11 {
- regulator-name = "LDO_11";
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <3600000>;
- regulator-boot-on;
- };
- };
- };
-
diff --git a/Documentation/devicetree/bindings/mfd/dlg,da9063.yaml b/Documentation/devicetree/bindings/mfd/dlg,da9063.yaml
new file mode 100644
index 000000000000..d71933460e90
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/dlg,da9063.yaml
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/dlg,da9063.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Dialog DA9063/DA9063L Power Management Integrated Circuit (PMIC)
+
+maintainers:
+ - Steve Twiss <stwiss.opensource@diasemi.com>
+
+description: |
+ For device-tree bindings of other sub-modules refer to the binding documents
+ under the respective sub-system directories.
+
+properties:
+ compatible:
+ enum:
+ - dlg,da9063
+ - dlg,da9063l
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ dlg,use-sw-pm:
+ type: boolean
+ description:
+ Disable the watchdog during suspend.
+ Only use this option if you can't use the watchdog automatic suspend
+ function during a suspend (see register CONTROL_B).
+
+ watchdog:
+ type: object
+ $ref: /schemas/watchdog/watchdog.yaml#
+ unevaluatedProperties: false
+ properties:
+ compatible:
+ const: dlg,da9063-watchdog
+
+ rtc:
+ type: object
+ $ref: /schemas/rtc/rtc.yaml#
+ unevaluatedProperties: false
+ properties:
+ compatible:
+ const: dlg,da9063-rtc
+
+ onkey:
+ type: object
+ $ref: /schemas/input/input.yaml#
+ unevaluatedProperties: false
+ properties:
+ compatible:
+ const: dlg,da9063-onkey
+
+ dlg,disable-key-power:
+ type: boolean
+ description: |
+ Disable power-down using a long key-press.
+ If this entry does not exist then by default the key-press triggered
+ power down is enabled and the OnKey will support both KEY_POWER and
+ KEY_SLEEP.
+
+ regulators:
+ type: object
+ patternProperties:
+ "^(ldo[1-11]|bcore[1-2]|bpro|bmem|bio|bperi)$":
+ $ref: /schemas/regulator/regulator.yaml
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pmic@58 {
+ compatible = "dlg,da9063";
+ reg = <0x58>;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+
+ rtc {
+ compatible = "dlg,da9063-rtc";
+ };
+
+ watchdog {
+ compatible = "dlg,da9063-watchdog";
+ };
+
+ onkey {
+ compatible = "dlg,da9063-onkey";
+ dlg,disable-key-power;
+ };
+
+ regulators {
+ regulator-bcore1 {
+ regulator-name = "BCORE1";
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <500000>;
+ regulator-max-microamp = <2000000>;
+ regulator-boot-on;
+ };
+ regulator-ldo11 {
+ regulator-name = "LDO_11";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-boot-on;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml b/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
index 5a1e8d21f7a0..5e0fe3ebe1d2 100644
--- a/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
+++ b/Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
@@ -19,7 +19,6 @@ description: |
maintainers:
- Tim Harvey <tharvey@gateworks.com>
- - Robert Jones <rjones@gateworks.com>
properties:
$nodename:
diff --git a/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml b/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
index e25caf8ef9f4..04962bb29576 100644
--- a/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
+++ b/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
@@ -90,6 +90,9 @@ properties:
pwm:
$ref: "/schemas/pwm/google,cros-ec-pwm.yaml#"
+ kbd-led-backlight:
+ $ref: "/schemas/chrome/google,cros-kbd-led-backlight.yaml#"
+
keyboard-controller:
$ref: "/schemas/input/google,cros-ec-keyb.yaml#"
diff --git a/Documentation/devicetree/bindings/mfd/mps,mp2629.yaml b/Documentation/devicetree/bindings/mfd/mps,mp2629.yaml
index f91acc42d652..5ba849d78d8a 100644
--- a/Documentation/devicetree/bindings/mfd/mps,mp2629.yaml
+++ b/Documentation/devicetree/bindings/mfd/mps,mp2629.yaml
@@ -18,7 +18,9 @@ description: |
properties:
compatible:
- const: mps,mp2629
+ enum:
+ - mps,mp2629
+ - mps,mp2733
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
index 293db2a71ef2..0088442efca1 100644
--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
+++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
@@ -20,6 +20,7 @@ This document describes the binding for MFD device and its sub module.
Required properties:
compatible:
"mediatek,mt6323" for PMIC MT6323
+ "mediatek,mt6331" for PMIC MT6331 and MT6332
"mediatek,mt6358" for PMIC MT6358 and MT6366
"mediatek,mt6359" for PMIC MT6359
"mediatek,mt6397" for PMIC MT6397
@@ -29,6 +30,7 @@ Optional subnodes:
- rtc
Required properties: Should be one of follows
- compatible: "mediatek,mt6323-rtc"
+ - compatible: "mediatek,mt6331-rtc"
- compatible: "mediatek,mt6358-rtc"
- compatible: "mediatek,mt6397-rtc"
For details, see ../rtc/rtc-mt6397.txt
@@ -52,8 +54,10 @@ Optional subnodes:
see ../leds/leds-mt6323.txt
- keys
- Required properties:
- - compatible: "mediatek,mt6397-keys" or "mediatek,mt6323-keys"
+ Required properties: Should be one of the following
+ - compatible: "mediatek,mt6323-keys"
+ - compatible: "mediatek,mt6331-keys"
+ - compatible: "mediatek,mt6397-keys"
see ../input/mtk-pmic-keys.txt
- power-controller
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
deleted file mode 100644
index eb78e3ae7703..000000000000
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
+++ /dev/null
@@ -1,94 +0,0 @@
- Qualcomm SPMI PMICs multi-function device bindings
-
-The Qualcomm SPMI series presently includes PM8941, PM8841 and PMA8084
-PMICs. These PMICs use a QPNP scheme through SPMI interface.
-QPNP is effectively a partitioning scheme for dividing the SPMI extended
-register space up into logical pieces, and set of fixed register
-locations/definitions within these regions, with some of these regions
-specifically used for interrupt handling.
-
-The QPNP PMICs are used with the Qualcomm Snapdragon series SoCs, and are
-interfaced to the chip via the SPMI (System Power Management Interface) bus.
-Support for multiple independent functions are implemented by splitting the
-16-bit SPMI slave address space into 256 smaller fixed-size regions, 256 bytes
-each. A function can consume one or more of these fixed-size register regions.
-
-Required properties:
-- compatible: Should contain one of:
- "qcom,pm660",
- "qcom,pm660l",
- "qcom,pm7325",
- "qcom,pm8004",
- "qcom,pm8005",
- "qcom,pm8019",
- "qcom,pm8028",
- "qcom,pm8110",
- "qcom,pm8150",
- "qcom,pm8150b",
- "qcom,pm8150c",
- "qcom,pm8150l",
- "qcom,pm8226",
- "qcom,pm8350c",
- "qcom,pm8841",
- "qcom,pm8901",
- "qcom,pm8909",
- "qcom,pm8916",
- "qcom,pm8941",
- "qcom,pm8950",
- "qcom,pm8953",
- "qcom,pm8994",
- "qcom,pm8998",
- "qcom,pma8084",
- "qcom,pmd9635",
- "qcom,pmi8950",
- "qcom,pmi8962",
- "qcom,pmi8994",
- "qcom,pmi8998",
- "qcom,pmk8002",
- "qcom,pmk8350",
- "qcom,pmr735a",
- "qcom,smb2351",
- or generalized "qcom,spmi-pmic".
-- reg: Specifies the SPMI USID slave address for this device.
- For more information see:
- Documentation/devicetree/bindings/spmi/spmi.yaml
-
-Required properties for peripheral child nodes:
-- compatible: Should contain "qcom,xxx", where "xxx" is a peripheral name.
-
-Optional properties for peripheral child nodes:
-- interrupts: Interrupts are specified as a 4-tuple. For more information
- see:
- Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.yaml
-- interrupt-names: Corresponding interrupt name to the interrupts property
-
-Each child node of SPMI slave id represents a function of the PMIC. In the
-example below the rtc device node represents a peripheral of pm8941
-SID = 0. The regulator device node represents a peripheral of pm8941 SID = 1.
-
-Example:
-
- spmi {
- compatible = "qcom,spmi-pmic-arb";
-
- pm8941@0 {
- compatible = "qcom,pm8941", "qcom,spmi-pmic";
- reg = <0x0 SPMI_USID>;
-
- rtc {
- compatible = "qcom,rtc";
- interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "alarm";
- };
- };
-
- pm8941@1 {
- compatible = "qcom,pm8941", "qcom,spmi-pmic";
- reg = <0x1 SPMI_USID>;
-
- regulator {
- compatible = "qcom,regulator";
- regulator-name = "8941_boost";
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
new file mode 100644
index 000000000000..65cbc6dee545
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
@@ -0,0 +1,190 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/qcom,spmi-pmic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SPMI PMICs multi-function device
+
+description: |
+ Some Qualcomm PMICs used with the Snapdragon series SoCs are interfaced
+ to the chip via the SPMI (System Power Management Interface) bus.
+ Support for multiple independent functions are implemented by splitting the
+ 16-bit SPMI peripheral address space into 256 smaller fixed-size regions, 256 bytes
+ each. A function can consume one or more of these fixed-size register regions.
+
+ The Qualcomm SPMI series includes the PM8941, PM8841, PMA8084, PM8998 and other
+ PMICs. These PMICs use a "QPNP" scheme through SPMI interface.
+ QPNP is effectively a partitioning scheme for dividing the SPMI extended
+ register space up into logical pieces, and set of fixed register
+ locations/definitions within these regions, with some of these regions
+ specifically used for interrupt handling.
+
+maintainers:
+ - Stephen Boyd <sboyd@kernel.org>
+
+properties:
+ $nodename:
+ oneOf:
+ - pattern: '^pmic@.*$'
+ - pattern: '^pm(a|s)?[0-9]*@.*$'
+ deprecated: true
+
+ compatible:
+ items:
+ - enum:
+ - qcom,pm660
+ - qcom,pm660l
+ - qcom,pm6150
+ - qcom,pm6150l
+ - qcom,pm6350
+ - qcom,pm7325
+ - qcom,pm8004
+ - qcom,pm8005
+ - qcom,pm8009
+ - qcom,pm8019
+ - qcom,pm8110
+ - qcom,pm8150
+ - qcom,pm8150b
+ - qcom,pm8150l
+ - qcom,pm8226
+ - qcom,pm8350
+ - qcom,pm8350b
+ - qcom,pm8350c
+ - qcom,pm8841
+ - qcom,pm8909
+ - qcom,pm8916
+ - qcom,pm8941
+ - qcom,pm8950
+ - qcom,pm8994
+ - qcom,pm8998
+ - qcom,pma8084
+ - qcom,pmd9635
+ - qcom,pmi8950
+ - qcom,pmi8962
+ - qcom,pmi8994
+ - qcom,pmi8998
+ - qcom,pmk8350
+ - qcom,pmm8155au
+ - qcom,pmr735a
+ - qcom,pmr735b
+ - qcom,pms405
+ - qcom,pmx55
+ - qcom,pmx65
+ - qcom,smb2351
+ - const: qcom,spmi-pmic
+
+ reg:
+ minItems: 1
+ maxItems: 2
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ labibb:
+ type: object
+ $ref: /schemas/regulator/qcom-labibb-regulator.yaml#
+
+ regulators:
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+
+patternProperties:
+ "^adc@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/iio/adc/qcom,spmi-vadc.yaml#
+
+ "^adc-tm@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/thermal/qcom-spmi-adc-tm5.yaml#
+
+ "^audio-codec@[0-9a-f]+$":
+ type: object
+ additionalProperties: true # FIXME qcom,pm8916-wcd-analog-codec binding not converted yet
+
+ "extcon@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/extcon/qcom,pm8941-misc.yaml#
+
+ "gpio(s)?@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/pinctrl/qcom,pmic-gpio.yaml#
+
+ "pon@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/power/reset/qcom,pon.yaml#
+
+ "pwm@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/leds/leds-qcom-lpg.yaml#
+
+ "^rtc@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/rtc/qcom-pm8xxx-rtc.yaml#
+
+ "^temp-alarm@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/thermal/qcom,spmi-temp-alarm.yaml#
+
+ "^vibrator@[0-9a-f]+$":
+ type: object
+ additionalProperties: true # FIXME qcom,pm8916-vib binding not converted yet
+
+ "^mpps@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/pinctrl/qcom,pmic-mpp.yaml#
+
+ "(.*)?(wled|leds)@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/leds/backlight/qcom-wled.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/spmi/spmi.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x0c440000 0x1100>,
+ <0x0c600000 0x2000000>,
+ <0x0e600000 0x100000>,
+ <0x0e700000 0xa0000>,
+ <0x0c40a000 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+
+ pmi8998_lsid0: pmic@2 {
+ compatible = "qcom,pmi8998", "qcom,spmi-pmic";
+ reg = <0x2 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmi8998_gpio: gpios@c000 {
+ compatible = "qcom,pmi8998-gpio", "qcom,spmi-gpio";
+ reg = <0xc000>;
+ gpio-controller;
+ gpio-ranges = <&pmi8998_gpio 0 0 14>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/qcom,tcsr.txt b/Documentation/devicetree/bindings/mfd/qcom,tcsr.txt
deleted file mode 100644
index add61bcc3c74..000000000000
--- a/Documentation/devicetree/bindings/mfd/qcom,tcsr.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-QCOM Top Control and Status Register
-
-Qualcomm devices have a set of registers that provide various control and status
-functions for their peripherals. This node is intended to allow access to these
-registers via syscon.
-
-Required properties:
-- compatible: Should contain:
- "qcom,tcsr-ipq6018", "syscon", "simple-mfd" for IPQ6018
- "qcom,tcsr-ipq8064", "syscon" for IPQ8064
- "qcom,tcsr-apq8064", "syscon" for APQ8064
- "qcom,tcsr-msm8660", "syscon" for MSM8660
- "qcom,tcsr-msm8953", "syscon" for MSM8953
- "qcom,tcsr-msm8960", "syscon" for MSM8960
- "qcom,tcsr-msm8974", "syscon" for MSM8974
- "qcom,tcsr-apq8084", "syscon" for APQ8084
- "qcom,tcsr-msm8916", "syscon" for MSM8916
-- reg: Address range for TCSR registers
-
-Example:
- tcsr: syscon@1a400000 {
- compatible = "qcom,tcsr-msm8960", "syscon";
- reg = <0x1a400000 0x100>;
- };
diff --git a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
new file mode 100644
index 000000000000..2f816fd0c9ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/qcom,tcsr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Top Control and Status Register
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+ Qualcomm devices have a set of registers that provide various control and
+ status functions for their peripherals.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - qcom,tcsr-apq8064
+ - qcom,tcsr-apq8084
+ - qcom,tcsr-ipq8064
+ - qcom,tcsr-mdm9615
+ - qcom,tcsr-msm8660
+ - qcom,tcsr-msm8916
+ - qcom,tcsr-msm8953
+ - qcom,tcsr-msm8960
+ - qcom,tcsr-msm8974
+ - const: syscon
+ - items:
+ - const: qcom,tcsr-ipq6018
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ syscon@1a400000 {
+ compatible = "qcom,tcsr-msm8960", "syscon";
+ reg = <0x1a400000 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml b/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml
index 2568736701be..61bd0b3ce02f 100644
--- a/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm PM8xxx PMIC multi-function devices
maintainers:
- - Satya Priya <skakit@codeaurora.org>
+ - Satya Priya <quic_c_skakit@quicinc.com>
description: |
The PM8xxx family of Power Management ICs are used to provide regulated
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
index fe265bcab50d..fbface720678 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
@@ -74,7 +74,7 @@ properties:
rohm,enable-hidden-gpo:
description: |
The BD71815 has undocumented GPO at pin E5. Pin is marked as GND at the
- data-sheet as it's location in the middle of GND pins makes it hard to
+ data-sheet as its location in the middle of GND pins makes it hard to
use on PCB. If your board has managed to use this pin you can enable the
second GPO by defining this property. Dont enable this if you are unsure
about how the E5 pin is connected on your board.
diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
index ec7f0190f46e..a58f08aa430d 100644
--- a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
@@ -58,43 +58,43 @@ properties:
- "#pwm-cells"
- compatible
-patternProperties:
- "^trigger@[0-9]+$":
+ counter:
type: object
properties:
compatible:
- const: st,stm32-lptimer-trigger
-
- reg:
- description: Identify trigger hardware block.
- items:
- minimum: 0
- maximum: 2
+ const: st,stm32-lptimer-counter
required:
- compatible
- - reg
- counter:
+ timer:
type: object
properties:
compatible:
- const: st,stm32-lptimer-counter
+ const: st,stm32-lptimer-timer
required:
- compatible
- timer:
+patternProperties:
+ "^trigger@[0-9]+$":
type: object
properties:
compatible:
- const: st,stm32-lptimer-timer
+ const: st,stm32-lptimer-trigger
+
+ reg:
+ description: Identify trigger hardware block.
+ items:
+ minimum: 0
+ maximum: 2
required:
- compatible
+ - reg
required:
- "#address-cells"
diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
index 10b330d42901..5db00af8e116 100644
--- a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
@@ -33,7 +33,7 @@ properties:
items:
- const: int
- reset:
+ resets:
maxItems: 1
dmas:
@@ -46,6 +46,21 @@ properties:
minItems: 1
maxItems: 7
+ interrupts:
+ oneOf:
+ - maxItems: 1
+ - maxItems: 4
+
+ interrupt-names:
+ oneOf:
+ - items:
+ - const: global
+ - items:
+ - const: brk
+ - const: up
+ - const: trg-com
+ - const: cc
+
"#address-cells":
const: 1
@@ -87,6 +102,16 @@ properties:
- "#pwm-cells"
- compatible
+ counter:
+ type: object
+
+ properties:
+ compatible:
+ const: st,stm32-timer-counter
+
+ required:
+ - compatible
+
patternProperties:
"^timer@[0-9]+$":
type: object
@@ -107,16 +132,6 @@ patternProperties:
- compatible
- reg
- counter:
- type: object
-
- properties:
- compatible:
- const: st,stm32-timer-counter
-
- required:
- - compatible
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
index fb784045013f..c10f0b577268 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.yaml
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -17,7 +17,7 @@ description: |
and access the registers directly.
maintainers:
- - Lee Jones <lee.jones@linaro.org>
+ - Lee Jones <lee@kernel.org>
select:
properties:
diff --git a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml b/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
index fa86691ebf16..73cffc45e056 100644
--- a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
+++ b/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
@@ -48,6 +48,12 @@ patternProperties:
description:
This is the SERDES lane control mux.
+ "^clock-controller@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/clock/ti,am654-ehrpwm-tbclk.yaml#
+ description:
+ Clock provider for TI EHRPWM nodes.
+
required:
- compatible
- reg
@@ -79,5 +85,11 @@ examples:
<0x40c0 0x3>, <0x40c4 0x3>, <0x40c8 0x3>, <0x40cc 0x3>;
/* SERDES4 lane0/1/2/3 select */
};
+
+ clock-controller@4140 {
+ compatible = "ti,am654-ehrpwm-tbclk", "syscon";
+ reg = <0x4140 0x18>;
+ #clock-cells = <1>;
+ };
};
...
diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
index 58d51f480c9e..8ec6191c1712 100644
--- a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
+++ b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
@@ -2,7 +2,7 @@ Lantiq XWAY SoC RCU binding
===========================
This binding describes the RCU (reset controller unit) multifunction device,
-where each sub-device has it's own set of registers.
+where each sub-device has its own set of registers.
The RCU register range is used for multiple purposes. Mostly one device
uses one or multiple register exclusively, but for some registers some
diff --git a/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml
index 5ecdac9de484..dead421e17d6 100644
--- a/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml
+++ b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml
@@ -10,9 +10,6 @@ maintainers:
- Al Cooper <alcooperx@gmail.com>
- Florian Fainelli <f.fainelli@gmail.com>
-allOf:
- - $ref: mmc-controller.yaml#
-
properties:
compatible:
oneOf:
@@ -42,23 +39,46 @@ properties:
maxItems: 1
clocks:
- maxItems: 1
- description:
- handle to core clock for the sdhci controller.
+ minItems: 1
+ items:
+ - description: handle to core clock for the sdhci controller
+ - description: handle to improved 150Mhz clock for sdhci controller (Optional clock)
clock-names:
+ minItems: 1
items:
- const: sw_sdio
+ - const: sdio_freq # Optional clock
+
+ clock-frequency:
+ description:
+ Maximum operating frequency of sdio_freq sdhci controller clock
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 100000000
+ maximum: 150000000
sdhci,auto-cmd12:
type: boolean
description: Specifies that controller should use auto CMD12
+allOf:
+ - $ref: mmc-controller.yaml#
+ - if:
+ properties:
+ clock-names:
+ contains:
+ const: sdio_freq
+
+ then:
+ required:
+ - clock-frequency
+
required:
- compatible
- reg
- interrupts
- clocks
+ - clock-names
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
deleted file mode 100644
index 753e9d7d8956..000000000000
--- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-* Samsung Exynos specific extensions to the Synopsys Designware Mobile
- Storage Host Controller
-
-The Synopsys designware mobile storage host controller is used to interface
-a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsys dw mshc controller properties described
-by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
-extensions to the Synopsys Designware Mobile Storage Host Controller.
-
-Required Properties:
-
-* compatible: should be
- - "samsung,exynos4210-dw-mshc": for controllers with Samsung Exynos4210
- specific extensions.
- - "samsung,exynos4412-dw-mshc": for controllers with Samsung Exynos4412
- specific extensions.
- - "samsung,exynos5250-dw-mshc": for controllers with Samsung Exynos5250
- specific extensions.
- - "samsung,exynos5420-dw-mshc": for controllers with Samsung Exynos5420
- specific extensions.
- - "samsung,exynos7-dw-mshc": for controllers with Samsung Exynos7
- specific extensions.
- - "samsung,exynos7-dw-mshc-smu": for controllers with Samsung Exynos7
- specific extensions having an SMU.
- - "axis,artpec8-dw-mshc": for controllers with ARTPEC-8 specific
- extensions.
-
-* samsung,dw-mshc-ciu-div: Specifies the divider value for the card interface
- unit (ciu) clock. This property is applicable only for Exynos5 SoC's and
- ignored for Exynos4 SoC's. The valid range of divider value is 0 to 7.
-
-* samsung,dw-mshc-sdr-timing: Specifies the value of CIU clock phase shift value
- in transmit mode and CIU clock phase shift value in receive mode for single
- data rate mode operation. Refer notes below for the order of the cells and the
- valid values.
-
-* samsung,dw-mshc-ddr-timing: Specifies the value of CUI clock phase shift value
- in transmit mode and CIU clock phase shift value in receive mode for double
- data rate mode operation. Refer notes below for the order of the cells and the
- valid values.
-* samsung,dw-mshc-hs400-timing: Specifies the value of CIU TX and RX clock phase
- shift value for hs400 mode operation.
-
- Notes for the sdr-timing and ddr-timing values:
-
- The order of the cells should be
- - First Cell: CIU clock phase shift value for tx mode.
- - Second Cell: CIU clock phase shift value for rx mode.
-
- Valid values for SDR and DDR CIU clock timing for Exynos5250:
- - valid value for tx phase shift and rx phase shift is 0 to 7.
- - when CIU clock divider value is set to 3, all possible 8 phase shift
- values can be used.
- - if CIU clock divider value is 0 (that is divide by 1), both tx and rx
- phase shift clocks should be 0.
-
-* samsung,read-strobe-delay: RCLK (Data strobe) delay to control HS400 mode
- (Latency value for delay line in Read path)
-
-Required properties for a slot (Deprecated - Recommend to use one slot per host):
-
-* gpios: specifies a list of gpios used for command, clock and data bus. The
- first gpio is the command line and the second gpio is the clock line. The
- rest of the gpios (depending on the bus-width property) are the data lines in
- no particular order. The format of the gpio specifier depends on the gpio
- controller.
-(Deprecated - Refer to Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt)
-
-Example:
-
- The MSHC controller node can be split into two portions, SoC specific and
- board specific portions as listed below.
-
- dwmmc0@12200000 {
- compatible = "samsung,exynos5250-dw-mshc";
- reg = <0x12200000 0x1000>;
- interrupts = <0 75 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- dwmmc0@12200000 {
- cap-mmc-highspeed;
- cap-sd-highspeed;
- broken-cd;
- fifo-depth = <0x80>;
- card-detect-delay = <200>;
- samsung,dw-mshc-ciu-div = <3>;
- samsung,dw-mshc-sdr-timing = <2 3>;
- samsung,dw-mshc-ddr-timing = <1 2>;
- samsung,dw-mshc-hs400-timing = <0 2>;
- samsung,read-strobe-delay = <90>;
- bus-width = <8>;
- };
diff --git a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
deleted file mode 100644
index 5e74db69f581..000000000000
--- a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-MMC/SD/SDIO slot directly connected to a SPI bus
-
-This file documents differences between the core properties described
-by mmc.txt and the properties used by the mmc_spi driver.
-
-Required properties:
-- spi-max-frequency : maximum frequency for this device (Hz).
-
-Optional properties:
-- voltage-ranges : two cells are required, first cell specifies minimum
- slot voltage (mV), second cell specifies maximum slot voltage (mV).
- Several ranges could be specified. If not provided, 3.2v..3.4v is assumed.
-- gpios : may specify GPIOs in this order: Card-Detect GPIO,
- Write-Protect GPIO. Note that this does not follow the
- binding from mmc.txt, for historical reasons.
-
-Example:
-
- mmc-slot@0 {
- compatible = "fsl,mpc8323rdb-mmc-slot",
- "mmc-spi-slot";
- reg = <0>;
- gpios = <&qe_pio_d 14 1
- &qe_pio_d 15 0>;
- voltage-ranges = <3300 3300>;
- spi-max-frequency = <50000000>;
- interrupts = <42>;
- interrupt-parent = <&PIC>;
- };
diff --git a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.yaml b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.yaml
new file mode 100644
index 000000000000..c45b91099325
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/mmc-spi-slot.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MMC/SD/SDIO slot directly connected to a SPI bus
+
+maintainers:
+ - Ulf Hansson <ulf.hansson@linaro.org>
+
+allOf:
+ - $ref: "mmc-controller.yaml"
+ - $ref: /schemas/spi/spi-peripheral-props.yaml
+
+description: |
+ The extra properties used by an mmc connected via SPI.
+
+properties:
+ compatible:
+ const: mmc-spi-slot
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency: true
+
+ interrupts:
+ maxItems: 1
+
+ voltage-ranges:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description: |
+ Two cells are required, first cell specifies minimum slot voltage (mV),
+ second cell specifies maximum slot voltage (mV).
+ items:
+ - description: |
+ value for minimum slot voltage in mV
+ default: 3200
+ - description: |
+ value for maximum slot voltage in mV
+ default: 3400
+
+ gpios:
+ description: |
+ For historical reasons, this does not follow the generic mmc-controller
+ binding.
+ minItems: 1
+ items:
+ - description: Card-Detect GPIO
+ - description: Write-Protect GPIO
+
+required:
+ - compatible
+ - reg
+ - spi-max-frequency
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mmc@0 {
+ compatible = "mmc-spi-slot";
+ reg = <0>;
+ gpios = <&gpio 14 GPIO_ACTIVE_LOW>, <&gpio 15 GPIO_ACTIVE_HIGH>;
+ voltage-ranges = <3300 3300>;
+ spi-max-frequency = <50000000>;
+ interrupts = <42>;
+ interrupt-parent = <&PIC>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.yaml b/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
index 2a2e9fa8c188..083d1ec2f661 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
@@ -30,13 +30,11 @@ properties:
- const: mediatek,mt7623-mmc
- const: mediatek,mt2701-mmc
- items:
- - const: mediatek,mt8186-mmc
- - const: mediatek,mt8183-mmc
- - items:
- - const: mediatek,mt8192-mmc
- - const: mediatek,mt8183-mmc
- - items:
- - const: mediatek,mt8195-mmc
+ - enum:
+ - mediatek,mt8186-mmc
+ - mediatek,mt8188-mmc
+ - mediatek,mt8192-mmc
+ - mediatek,mt8195-mmc
- const: mediatek,mt8183-mmc
reg:
@@ -72,12 +70,27 @@ properties:
- const: ahb_cg
interrupts:
- maxItems: 1
+ description:
+ Should at least contain MSDC GIC interrupt. To support SDIO in-band wakeup, an extended
+ interrupt is required and be configured as wakeup source irq.
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ items:
+ - const: msdc
+ - const: sdio_wakeup
pinctrl-names:
+ description:
+ Should at least contain default and state_uhs. To support SDIO in-band wakeup, dat1 pin
+ will be switched between GPIO mode and SDIO DAT1 mode, state_eint is mandatory in this
+ scenario.
+ minItems: 2
items:
- const: default
- const: state_uhs
+ - const: state_eint
pinctrl-0:
description:
@@ -89,6 +102,11 @@ properties:
should contain uhs mode pin ctrl.
maxItems: 1
+ pinctrl-2:
+ description:
+ should switch dat1 pin to GPIO mode.
+ maxItems: 1
+
assigned-clocks:
description:
PLL of the source clock.
@@ -208,4 +226,32 @@ examples:
mediatek,hs400-cmd-resp-sel-rising;
};
+ mmc3: mmc@11260000 {
+ compatible = "mediatek,mt8173-mmc";
+ reg = <0x11260000 0x1000>;
+ clock-names = "source", "hclk";
+ clocks = <&pericfg CLK_PERI_MSDC30_3>,
+ <&topckgen CLK_TOP_MSDC50_2_H_SEL>;
+ interrupt-names = "msdc", "sdio_wakeup";
+ interrupts-extended = <&gic GIC_SPI 74 IRQ_TYPE_LEVEL_LOW 0>,
+ <&pio 23 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default", "state_uhs", "state_eint";
+ pinctrl-0 = <&mmc2_pins_default>;
+ pinctrl-1 = <&mmc2_pins_uhs>;
+ pinctrl-2 = <&mmc2_pins_eint>;
+ bus-width = <4>;
+ max-frequency = <200000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr104;
+ keep-power-in-suspend;
+ wakeup-source;
+ cap-sdio-irq;
+ no-mmc;
+ no-sd;
+ non-removable;
+ vmmc-supply = <&sdio_fixed_3v3>;
+ vqmmc-supply = <&mt6397_vgp3_reg>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ };
+
...
diff --git a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
index 9ac4986988c5..14945ebc31d2 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
+++ b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
@@ -56,11 +56,15 @@ properties:
- renesas,sdhi-r8a77980 # R-Car V3H
- renesas,sdhi-r8a77990 # R-Car E3
- renesas,sdhi-r8a77995 # R-Car D3
- - renesas,sdhi-r8a779a0 # R-Car V3U
- renesas,sdhi-r9a07g043 # RZ/G2UL
- renesas,sdhi-r9a07g044 # RZ/G2{L,LC}
- renesas,sdhi-r9a07g054 # RZ/V2L
- const: renesas,rcar-gen3-sdhi # R-Car Gen3 or RZ/G2
+ - items:
+ - enum:
+ - renesas,sdhi-r8a779a0 # R-Car V3U
+ - renesas,sdhi-r8a779f0 # R-Car S4-8
+ - const: renesas,rcar-gen4-sdhi # R-Car Gen4
reg:
maxItems: 1
@@ -141,6 +145,7 @@ allOf:
enum:
- renesas,rcar-gen2-sdhi
- renesas,rcar-gen3-sdhi
+ - renesas,rcar-gen4-sdhi
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
index 54fb59820d2b..8d888b435817 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
@@ -39,6 +39,7 @@ properties:
- rockchip,rk3399-dw-mshc
- rockchip,rk3568-dw-mshc
- rockchip,rv1108-dw-mshc
+ - rockchip,rv1126-dw-mshc
- const: rockchip,rk3288-dw-mshc
reg:
diff --git a/Documentation/devicetree/bindings/mmc/samsung,exynos-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/samsung,exynos-dw-mshc.yaml
new file mode 100644
index 000000000000..fdaa18481aa0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/samsung,exynos-dw-mshc.yaml
@@ -0,0 +1,160 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/samsung,exynos-dw-mshc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title:
+ Samsung Exynos SoC specific extensions to the Synopsys Designware Mobile
+ Storage Host Controller
+
+maintainers:
+ - Jaehoon Chung <jh80.chung@samsung.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - samsung,exynos4210-dw-mshc
+ - samsung,exynos4412-dw-mshc
+ - samsung,exynos5250-dw-mshc
+ - samsung,exynos5420-dw-mshc
+ - samsung,exynos5420-dw-mshc-smu
+ - samsung,exynos7-dw-mshc
+ - samsung,exynos7-dw-mshc-smu
+ - axis,artpec8-dw-mshc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+ description:
+ Handle to "biu" and "ciu" clocks for the
+ bus interface unit clock and the card interface unit clock.
+
+ clock-names:
+ items:
+ - const: biu
+ - const: ciu
+
+ samsung,dw-mshc-ciu-div:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 7
+ description:
+ The divider value for the card interface unit (ciu) clock.
+
+ samsung,dw-mshc-ddr-timing:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: CIU clock phase shift value for tx mode
+ minimum: 0
+ maximum: 7
+ - description: CIU clock phase shift value for rx mode
+ minimum: 0
+ maximum: 7
+ description:
+ The value of CUI clock phase shift value in transmit mode and CIU clock
+ phase shift value in receive mode for double data rate mode operation.
+ See also samsung,dw-mshc-hs400-timing property.
+
+ samsung,dw-mshc-hs400-timing:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: CIU clock phase shift value for tx mode
+ minimum: 0
+ maximum: 7
+ - description: CIU clock phase shift value for rx mode
+ minimum: 0
+ maximum: 7
+ description: |
+ The value of CIU TX and RX clock phase shift value for HS400 mode
+ operation.
+ Valid values for SDR and DDR CIU clock timing::
+ - valid value for tx phase shift and rx phase shift is 0 to 7.
+ - when CIU clock divider value is set to 3, all possible 8 phase shift
+ values can be used.
+ - if CIU clock divider value is 0 (that is divide by 1), both tx and rx
+ phase shift clocks should be 0.
+ If missing, values from samsung,dw-mshc-ddr-timing property are used.
+
+ samsung,dw-mshc-sdr-timing:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: CIU clock phase shift value for tx mode
+ minimum: 0
+ maximum: 7
+ - description: CIU clock phase shift value for rx mode
+ minimum: 0
+ maximum: 7
+ description:
+ The value of CIU clock phase shift value in transmit mode and CIU clock
+ phase shift value in receive mode for single data rate mode operation.
+ See also samsung,dw-mshc-hs400-timing property.
+
+ samsung,read-strobe-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ RCLK (Data strobe) delay to control HS400 mode (Latency value for delay
+ line in Read path). If missing, default from hardware is used.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - samsung,dw-mshc-ddr-timing
+ - samsung,dw-mshc-sdr-timing
+
+allOf:
+ - $ref: "synopsys-dw-mshc-common.yaml#"
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos5250-dw-mshc
+ - samsung,exynos5420-dw-mshc
+ - samsung,exynos7-dw-mshc
+ - samsung,exynos7-dw-mshc-smu
+ - axis,artpec8-dw-mshc
+ then:
+ required:
+ - samsung,dw-mshc-ciu-div
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos5420.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ mmc@12220000 {
+ compatible = "samsung,exynos5420-dw-mshc";
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x12220000 0x1000>;
+ clocks = <&clock CLK_MMC2>, <&clock CLK_SCLK_MMC2>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x40>;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 4>;
+ samsung,dw-mshc-ddr-timing = <0 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_wp &sd2_bus1 &sd2_bus4>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <200000000>;
+ vmmc-supply = <&ldo19_reg>;
+ vqmmc-supply = <&ldo13_reg>;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ sd-uhs-ddr50;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/samsung,s3c6410-sdhci.yaml b/Documentation/devicetree/bindings/mmc/samsung,s3c6410-sdhci.yaml
new file mode 100644
index 000000000000..5d873a60f650
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/samsung,s3c6410-sdhci.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/samsung,s3c6410-sdhci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC SDHCI Controller
+
+maintainers:
+ - Jaehoon Chung <jh80.chung@samsung.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - samsung,s3c6410-sdhci
+ - samsung,exynos4210-sdhci
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+ maxItems: 5
+
+ clock-names:
+ minItems: 2
+ items:
+ - const: hsmmc
+ - pattern: "^mmc_busclk.[0-3]$"
+ - pattern: "^mmc_busclk.[0-3]$"
+ - pattern: "^mmc_busclk.[0-3]$"
+ - pattern: "^mmc_busclk.[0-3]$"
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+allOf:
+ - $ref: mmc-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos4210-sdhci
+ then:
+ properties:
+ clocks:
+ maxItems: 2
+ clock-names:
+ items:
+ - const: hsmmc
+ - const: mmc_busclk.2
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos4.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ mmc@12510000 {
+ compatible = "samsung,exynos4210-sdhci";
+ reg = <0x12510000 0x100>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_SDMMC0>, <&clock CLK_SCLK_MMC0>;
+ clock-names = "hsmmc", "mmc_busclk.2";
+ bus-width = <4>;
+ cd-gpios = <&gpx3 4 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus4 &sdhci2_cd>;
+ pinctrl-names = "default";
+ vmmc-supply = <&ldo21_reg>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/samsung-sdhci.txt b/Documentation/devicetree/bindings/mmc/samsung-sdhci.txt
deleted file mode 100644
index 42e0a9afa100..000000000000
--- a/Documentation/devicetree/bindings/mmc/samsung-sdhci.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-* Samsung's SDHCI Controller device tree bindings
-
-Samsung's SDHCI controller is used as a connectivity interface with external
-MMC, SD and eMMC storage mediums. This file documents differences between the
-core mmc properties described by mmc.txt and the properties used by the
-Samsung implementation of the SDHCI controller.
-
-Required SoC Specific Properties:
-- compatible: should be one of the following
- - "samsung,s3c6410-sdhci": For controllers compatible with s3c6410 sdhci
- controller.
- - "samsung,exynos4210-sdhci": For controllers compatible with Exynos4 sdhci
- controller.
-
-Required Board Specific Properties:
-- pinctrl-0: Should specify pin control groups used for this controller.
-- pinctrl-names: Should contain only one value - "default".
-
-Example:
- sdhci@12530000 {
- compatible = "samsung,exynos4210-sdhci";
- reg = <0x12530000 0x100>;
- interrupts = <0 75 0>;
- bus-width = <4>;
- cd-gpios = <&gpk2 2 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4>;
- };
-
- Note: This example shows both SoC specific and board specific properties
- in a single device node. The properties can be actually be separated
- into SoC specific node and board specific node.
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
index e4236334e748..fc0e81c2066c 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
@@ -17,6 +17,9 @@ description:
properties:
compatible:
oneOf:
+ - enum:
+ - qcom,sdhci-msm-v4
+ deprecated: true
- items:
- enum:
- qcom,apq8084-sdhci
@@ -27,6 +30,10 @@ properties:
- qcom,msm8992-sdhci
- qcom,msm8994-sdhci
- qcom,msm8996-sdhci
+ - qcom,msm8998-sdhci
+ - const: qcom,sdhci-msm-v4 # for sdcc versions less than 5.0
+ - items:
+ - enum:
- qcom,qcs404-sdhci
- qcom,sc7180-sdhci
- qcom,sc7280-sdhci
@@ -38,20 +45,16 @@ properties:
- qcom,sm6350-sdhci
- qcom,sm8150-sdhci
- qcom,sm8250-sdhci
- - enum:
- - qcom,sdhci-msm-v4 # for sdcc versions less than 5.0
- - qcom,sdhci-msm-v5 # for sdcc version 5.0
- - items:
- - const: qcom,sdhci-msm-v4 # Deprecated (only for backward compatibility)
- # for sdcc versions less than 5.0
+ - qcom,sm8450-sdhci
+ - const: qcom,sdhci-msm-v5 # for sdcc version 5.0
reg:
minItems: 1
- items:
- - description: Host controller register map
- - description: SD Core register map
- - description: CQE register map
- - description: Inline Crypto Engine register map
+ maxItems: 4
+
+ reg-names:
+ minItems: 1
+ maxItems: 4
clocks:
minItems: 3
@@ -93,6 +96,9 @@ properties:
description:
Should specify pin control groups used for this controller.
+ resets:
+ maxItems: 1
+
qcom,ddr-config:
$ref: /schemas/types.yaml#/definitions/uint32
description: platform specific settings for DDR_CONFIG reg.
@@ -121,6 +127,18 @@ properties:
description: A phandle to sdhci power domain node
maxItems: 1
+ mmc-ddr-1_8v: true
+
+ mmc-hs200-1_8v: true
+
+ mmc-hs400-1_8v: true
+
+ bus-width: true
+
+ max-frequency: true
+
+ operating-points-v2: true
+
patternProperties:
'^opp-table(-[a-z0-9]+)?$':
if:
@@ -140,7 +158,47 @@ required:
- clock-names
- interrupts
-additionalProperties: true
+allOf:
+ - $ref: mmc-controller.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sdhci-msm-v4
+ then:
+ properties:
+ reg:
+ minItems: 2
+ items:
+ - description: Host controller register map
+ - description: SD Core register map
+ - description: CQE register map
+ - description: Inline Crypto Engine register map
+ reg-names:
+ minItems: 2
+ items:
+ - const: hc
+ - const: core
+ - const: cqhci
+ - const: ice
+ else:
+ properties:
+ reg:
+ minItems: 1
+ items:
+ - description: Host controller register map
+ - description: CQE register map
+ - description: Inline Crypto Engine register map
+ reg-names:
+ minItems: 1
+ items:
+ - const: hc
+ - const: cqhci
+ - const: ice
+
+unevaluatedProperties: false
examples:
- |
@@ -149,7 +207,7 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/power/qcom-rpmpd.h>
- sdhc_2: sdhci@8804000 {
+ sdhc_2: mmc@8804000 {
compatible = "qcom,sm8250-sdhci", "qcom,sdhci-msm-v5";
reg = <0 0x08804000 0 0x1000>;
diff --git a/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml b/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml
index 2cdf6bf3dc4a..8cc2a7ceb5fb 100644
--- a/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml
+++ b/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml
@@ -22,13 +22,14 @@ properties:
reg:
maxItems: 1
- spi-max-frequency: true
-
required:
- compatible
- reg
-additionalProperties: false
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/mtd/mxc-nand.yaml b/Documentation/devicetree/bindings/mtd/mxc-nand.yaml
index 73b86f2226c7..66da1b476ab7 100644
--- a/Documentation/devicetree/bindings/mtd/mxc-nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/mxc-nand.yaml
@@ -37,6 +37,4 @@ examples:
compatible = "fsl,imx27-nand";
reg = <0xd8000000 0x1000>;
interrupts = <29>;
- nand-bus-width = <8>;
- nand-ecc-mode = "hw";
};
diff --git a/Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.txt b/Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.txt
deleted file mode 100644
index d5c5616f6db5..000000000000
--- a/Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-ARM AFS - ARM Firmware Suite Partitions
-=======================================
-
-The ARM Firmware Suite is a flash partitioning system found on the
-ARM reference designs: Integrator AP, Integrator CP, Versatile AB,
-Versatile PB, the RealView family, Versatile Express and Juno.
-
-Required properties:
-- compatible : (required) must be "arm,arm-firmware-suite"
-
-Example:
-
-flash@0 {
- partitions {
- compatible = "arm,arm-firmware-suite";
- };
-};
diff --git a/Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.yaml b/Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.yaml
new file mode 100644
index 000000000000..76c88027b6d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.yaml
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/partitions/arm,arm-firmware-suite.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Firmware Suite (AFS) Partitions
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ The ARM Firmware Suite is a flash partitioning system found on the
+ ARM reference designs: Integrator AP, Integrator CP, Versatile AB,
+ Versatile PB, the RealView family, Versatile Express and Juno.
+
+properties:
+ compatible:
+ const: arm,arm-firmware-suite
+
+additionalProperties: false
+
+examples:
+ - |
+ partitions {
+ compatible = "arm,arm-firmware-suite";
+ };
+...
diff --git a/Documentation/devicetree/bindings/mtd/partitions/partition.yaml b/Documentation/devicetree/bindings/mtd/partitions/partition.yaml
index e1ac08064425..f1a02d840b12 100644
--- a/Documentation/devicetree/bindings/mtd/partitions/partition.yaml
+++ b/Documentation/devicetree/bindings/mtd/partitions/partition.yaml
@@ -11,6 +11,17 @@ description: |
relative offset and size specified. Depending on partition function extra
properties can be used.
+ A partition may be dynamically allocated by a specific parser at runtime.
+ In this specific case, a specific suffix is required to the node name.
+ Everything after 'partition-' will be used as the partition name to compare
+ with the one dynamically allocated by the specific parser.
+ If the partition contains invalid char a label can be provided that will
+ be used instead of the node name to make the comparison.
+ This is used to assign an OF node to the dynamiccally allocated partition
+ so that subsystem like NVMEM can provide an OF node and declare NVMEM cells.
+ The OF node will be assigned only if the partition label declared match the
+ one assigned by the parser at runtime.
+
maintainers:
- Rafał Miłecki <rafal@milecki.pl>
@@ -41,7 +52,12 @@ properties:
immune to paired-pages corruptions
type: boolean
-required:
- - reg
+if:
+ not:
+ required: [ reg ]
+then:
+ properties:
+ $nodename:
+ pattern: '^partition-.*$'
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml b/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml
index cf3f8c1e035d..dc07909af023 100644
--- a/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml
+++ b/Documentation/devicetree/bindings/mtd/partitions/qcom,smem-part.yaml
@@ -19,6 +19,10 @@ properties:
compatible:
const: qcom,smem-part
+patternProperties:
+ "^partition-[0-9a-z]+$":
+ $ref: partition.yaml#
+
required:
- compatible
@@ -31,3 +35,26 @@ examples:
compatible = "qcom,smem-part";
};
};
+
+ - |
+ /* Example declaring dynamic partition */
+ flash {
+ partitions {
+ compatible = "qcom,smem-part";
+
+ partition-art {
+ compatible = "nvmem-cells";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ label = "0:art";
+
+ macaddr_art_0: macaddr@0 {
+ reg = <0x0 0x6>;
+ };
+
+ macaddr_art_6: macaddr@6 {
+ reg = <0x6 0x6>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml b/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
index 84ad7ff30121..482a2c068740 100644
--- a/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
+++ b/Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
@@ -102,6 +102,31 @@ allOf:
- const: rx
- const: cmd
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,ipq806x-nand
+
+ then:
+ properties:
+ qcom,boot-partitions:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: offset
+ - description: size
+ description:
+ Boot partition use a different layout where the 4 bytes of spare
+ data are not protected by ECC. Use this to declare these special
+ partitions by defining first the offset and then the size.
+
+ It's in the form of <offset1 size1 offset2 size2 offset3 ...>
+ and should be declared in ascending order.
+
+ Refer to the ipq8064 example on how to use this special binding.
+
required:
- compatible
- reg
@@ -135,6 +160,8 @@ examples:
nand-ecc-strength = <4>;
nand-bus-width = <8>;
+ qcom,boot-partitions = <0x0 0x58a0000>;
+
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/net/altera_tse.txt b/Documentation/devicetree/bindings/net/altera_tse.txt
index 0b7d4d3758ea..1d9148ff5130 100644
--- a/Documentation/devicetree/bindings/net/altera_tse.txt
+++ b/Documentation/devicetree/bindings/net/altera_tse.txt
@@ -15,7 +15,7 @@ Required properties:
"rx_desc": MSGDMA Rx dispatcher descriptor space region
"rx_resp": MSGDMA Rx dispatcher response space region
"s1": SGDMA descriptor memory
-- interrupts: Should contain the TSE interrupts and it's mode.
+- interrupts: Should contain the TSE interrupts and its mode.
- interrupt-names: Should contain the interrupt names
"rx_irq": xDMA Rx dispatcher interrupt
"tx_irq": xDMA Tx dispatcher interrupt
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 7c7ac5eb0313..ef655f386b2e 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -20,7 +20,7 @@ Required properties:
- active_slave : Specifies the slave to use for time stamping,
ethtool and SIOCGMIIPHY
- cpsw-phy-sel : Specifies the phandle to the CPSW phy mode selection
- device. See also cpsw-phy-sel.txt for it's binding.
+ device. See also cpsw-phy-sel.txt for its binding.
Note that in legacy cases cpsw-phy-sel may be
a child device instead of a phandle
(DEPRECATED, use phys property instead).
diff --git a/Documentation/devicetree/bindings/net/emac_rockchip.txt b/Documentation/devicetree/bindings/net/emac_rockchip.txt
deleted file mode 100644
index 05bd7dafce17..000000000000
--- a/Documentation/devicetree/bindings/net/emac_rockchip.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-* ARC EMAC 10/100 Ethernet platform driver for Rockchip RK3036/RK3066/RK3188 SoCs
-
-Required properties:
-- compatible: should be "rockchip,<name>-emac"
- "rockchip,rk3036-emac": found on RK3036 SoCs
- "rockchip,rk3066-emac": found on RK3066 SoCs
- "rockchip,rk3188-emac": found on RK3188 SoCs
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the EMAC interrupts
-- rockchip,grf: phandle to the syscon grf used to control speed and mode
- for emac.
-- phy: see ethernet.txt file in the same directory.
-- phy-mode: see ethernet.txt file in the same directory.
-
-Optional properties:
-- phy-supply: phandle to a regulator if the PHY needs one
-
-Clock handling:
-- clocks: Must contain an entry for each entry in clock-names.
-- clock-names: Shall be "hclk" for the host clock needed to calculate and set
- polling period of EMAC and "macref" for the reference clock needed to transfer
- data to and from the phy.
-
-Child nodes of the driver are the individual PHY devices connected to the
-MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus.
-
-Examples:
-
-ethernet@10204000 {
- compatible = "rockchip,rk3188-emac";
- reg = <0xc0fc2000 0x3c>;
- interrupts = <6>;
- mac-address = [ 00 11 22 33 44 55 ];
-
- clocks = <&cru HCLK_EMAC>, <&cru SCLK_MAC>;
- clock-names = "hclk", "macref";
-
- pinctrl-names = "default";
- pinctrl-0 = <&emac_xfer>, <&emac_mdio>, <&phy_int>;
-
- rockchip,grf = <&grf>;
-
- phy = <&phy0>;
- phy-mode = "rmii";
- phy-supply = <&vcc_rmii>;
-
- #address-cells = <1>;
- #size-cells = <0>;
- phy0: ethernet-phy@0 {
- reg = <1>;
- };
-};
diff --git a/Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml b/Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
index e381a3c14836..b2558421268a 100644
--- a/Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP Semiconductors NCI NFC controller
maintainers:
- - Charles Gorand <charles.gorand@effinnov.com>
- Krzysztof Kozlowski <krzk@kernel.org>
properties:
diff --git a/Documentation/devicetree/bindings/net/qcom-emac.txt b/Documentation/devicetree/bindings/net/qcom-emac.txt
index 346e6c7f47b7..7ae8aa148634 100644
--- a/Documentation/devicetree/bindings/net/qcom-emac.txt
+++ b/Documentation/devicetree/bindings/net/qcom-emac.txt
@@ -14,7 +14,7 @@ MAC node:
- mac-address : The 6-byte MAC address. If present, it is the default
MAC address.
- internal-phy : phandle to the internal PHY node
-- phy-handle : phandle the the external PHY node
+- phy-handle : phandle to the external PHY node
Internal PHY node:
- compatible : Should be "qcom,fsm9900-emac-sgmii" or "qcom,qdf2432-emac-sgmii".
diff --git a/Documentation/devicetree/bindings/net/rockchip,emac.yaml b/Documentation/devicetree/bindings/net/rockchip,emac.yaml
new file mode 100644
index 000000000000..a6d4f14df442
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/rockchip,emac.yaml
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/rockchip,emac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3036/RK3066/RK3188 Ethernet Media Access Controller (EMAC)
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3036-emac
+ - rockchip,rk3066-emac
+ - rockchip,rk3188-emac
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+ items:
+ - description: host clock
+ - description: reference clock
+ - description: mac TX/RX clock
+
+ clock-names:
+ minItems: 2
+ items:
+ - const: hclk
+ - const: macref
+ - const: macclk
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon GRF used to control speed and mode for the EMAC.
+
+ phy-supply:
+ description:
+ Phandle to a regulator if the PHY needs one.
+
+ mdio:
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - rockchip,grf
+ - phy
+ - phy-mode
+ - mdio
+
+allOf:
+ - $ref: "ethernet-controller.yaml#"
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3036-emac
+
+ then:
+ properties:
+ clocks:
+ minItems: 3
+
+ clock-names:
+ minItems: 3
+
+ else:
+ properties:
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ maxItems: 2
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3188-cru-common.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ ethernet@10204000 {
+ compatible = "rockchip,rk3188-emac";
+ reg = <0xc0fc2000 0x3c>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_EMAC>, <&cru SCLK_MAC>;
+ clock-names = "hclk", "macref";
+ rockchip,grf = <&grf>;
+ pinctrl-0 = <&emac_xfer>, <&emac_mdio>, <&phy_int>;
+ pinctrl-names = "default";
+ phy = <&phy0>;
+ phy-mode = "rmii";
+ phy-supply = <&vcc_rmii>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ reg = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/ti,dp83822.yaml b/Documentation/devicetree/bindings/net/ti,dp83822.yaml
index 75e8712e903a..f2489a9c852f 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83822.yaml
+++ b/Documentation/devicetree/bindings/net/ti,dp83822.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: TI DP83822 ethernet PHY
maintainers:
- - Dan Murphy <dmurphy@ti.com>
+ - Andrew Davis <afd@ti.com>
description: |
The DP83822 is a low-power, single-port, 10/100 Mbps Ethernet PHY. It
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.yaml b/Documentation/devicetree/bindings/net/ti,dp83867.yaml
index 76ff08a477ba..b8c0e4b5b494 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83867.yaml
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.yaml
@@ -11,7 +11,7 @@ allOf:
- $ref: "ethernet-controller.yaml#"
maintainers:
- - Dan Murphy <dmurphy@ti.com>
+ - Andrew Davis <afd@ti.com>
description: |
The DP83867 device is a robust, low power, fully featured Physical Layer
diff --git a/Documentation/devicetree/bindings/net/ti,dp83869.yaml b/Documentation/devicetree/bindings/net/ti,dp83869.yaml
index 1b780dce61ab..b04ff0014a59 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83869.yaml
+++ b/Documentation/devicetree/bindings/net/ti,dp83869.yaml
@@ -11,7 +11,7 @@ allOf:
- $ref: "ethernet-phy.yaml#"
maintainers:
- - Dan Murphy <dmurphy@ti.com>
+ - Andrew Davis <afd@ti.com>
description: |
The DP83869HM device is a robust, fully-featured Gigabit (PHY) transceiver
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-base.yaml b/Documentation/devicetree/bindings/opp/opp-v2-base.yaml
index 76c8acd981b3..66d0ec763f0b 100644
--- a/Documentation/devicetree/bindings/opp/opp-v2-base.yaml
+++ b/Documentation/devicetree/bindings/opp/opp-v2-base.yaml
@@ -50,6 +50,16 @@ patternProperties:
property to uniquely identify the OPP nodes exists. Devices like power
domains must have another (implementation dependent) property.
+ Entries for multiple clocks shall be provided in the same field, as
+ array of frequencies. The OPP binding doesn't provide any provisions
+ to relate the values to their clocks or the order in which the clocks
+ need to be configured and that is left for the implementation
+ specific binding.
+ minItems: 1
+ maxItems: 16
+ items:
+ maxItems: 1
+
opp-microvolt:
description: |
Voltage for the OPP
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml b/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
index 30f7b596d609..59663e897dae 100644
--- a/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
+++ b/Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
@@ -98,6 +98,8 @@ examples:
capacity-dmips-mhz = <1024>;
clocks = <&kryocc 0>;
operating-points-v2 = <&cluster0_opp>;
+ power-domains = <&cpr>;
+ power-domain-names = "cpr";
#cooling-cells = <2>;
next-level-cache = <&L2_0>;
L2_0: l2-cache {
@@ -115,6 +117,8 @@ examples:
capacity-dmips-mhz = <1024>;
clocks = <&kryocc 0>;
operating-points-v2 = <&cluster0_opp>;
+ power-domains = <&cpr>;
+ power-domain-names = "cpr";
#cooling-cells = <2>;
next-level-cache = <&L2_0>;
};
@@ -128,6 +132,8 @@ examples:
capacity-dmips-mhz = <1024>;
clocks = <&kryocc 1>;
operating-points-v2 = <&cluster1_opp>;
+ power-domains = <&cpr>;
+ power-domain-names = "cpr";
#cooling-cells = <2>;
next-level-cache = <&L2_1>;
L2_1: l2-cache {
@@ -145,6 +151,8 @@ examples:
capacity-dmips-mhz = <1024>;
clocks = <&kryocc 1>;
operating-points-v2 = <&cluster1_opp>;
+ power-domains = <&cpr>;
+ power-domain-names = "cpr";
#cooling-cells = <2>;
next-level-cache = <&L2_1>;
};
@@ -182,18 +190,21 @@ examples:
opp-microvolt = <905000 905000 1140000>;
opp-supported-hw = <0x7>;
clock-latency-ns = <200000>;
+ required-opps = <&cpr_opp1>;
};
opp-1401600000 {
opp-hz = /bits/ 64 <1401600000>;
opp-microvolt = <1140000 905000 1140000>;
opp-supported-hw = <0x5>;
clock-latency-ns = <200000>;
+ required-opps = <&cpr_opp2>;
};
opp-1593600000 {
opp-hz = /bits/ 64 <1593600000>;
opp-microvolt = <1140000 905000 1140000>;
opp-supported-hw = <0x1>;
clock-latency-ns = <200000>;
+ required-opps = <&cpr_opp3>;
};
};
@@ -207,24 +218,28 @@ examples:
opp-microvolt = <905000 905000 1140000>;
opp-supported-hw = <0x7>;
clock-latency-ns = <200000>;
+ required-opps = <&cpr_opp1>;
};
opp-1804800000 {
opp-hz = /bits/ 64 <1804800000>;
opp-microvolt = <1140000 905000 1140000>;
opp-supported-hw = <0x6>;
clock-latency-ns = <200000>;
+ required-opps = <&cpr_opp4>;
};
opp-1900800000 {
opp-hz = /bits/ 64 <1900800000>;
opp-microvolt = <1140000 905000 1140000>;
opp-supported-hw = <0x4>;
clock-latency-ns = <200000>;
+ required-opps = <&cpr_opp5>;
};
opp-2150400000 {
opp-hz = /bits/ 64 <2150400000>;
opp-microvolt = <1140000 905000 1140000>;
opp-supported-hw = <0x1>;
clock-latency-ns = <200000>;
+ required-opps = <&cpr_opp6>;
};
};
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
index 252e5b72aee0..376e739bcad4 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
@@ -144,6 +144,7 @@ properties:
description: If present then the reset sequence using the GPIO
specified in the "reset-gpio" property is reversed (H=reset state,
L=operation state) (optional required).
+ type: boolean
vpcie-supply:
description: Should specify the regulator in charge of PCIe port power.
diff --git a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
index 6bcaa8f2c3cf..d25423aa7167 100644
--- a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
+++ b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
@@ -106,6 +106,9 @@ properties:
maxItems: 3
dma-coherent: true
+ iommu-map: true
+ iommu-map-mask: true
+ msi-parent: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
index 57ae73462272..684227522267 100644
--- a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
@@ -7,6 +7,7 @@ Required properties:
"mediatek,mt7622-pcie"
"mediatek,mt7623-pcie"
"mediatek,mt7629-pcie"
+ "airoha,en7523-pcie"
- device_type: Must be "pci"
- reg: Base addresses and lengths of the root ports.
- reg-names: Names of the above areas to use during resource lookup.
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml
new file mode 100644
index 000000000000..a24fb8307d29
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml
@@ -0,0 +1,319 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/nvidia,tegra194-pcie-ep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra194 (and later) PCIe Endpoint controller (Synopsys DesignWare Core based)
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Vidya Sagar <vidyas@nvidia.com>
+
+description: |
+ This PCIe controller is based on the Synopsys DesignWare PCIe IP and thus
+ inherits all the common properties defined in snps,dw-pcie-ep.yaml. Some
+ of the controller instances are dual mode; they can work either in Root
+ Port mode or Endpoint mode but one at a time.
+
+ On Tegra194, controllers C0, C4 and C5 support Endpoint mode.
+ On Tegra234, controllers C5, C6, C7 and C10 support Endpoint mode.
+
+ Note: On Tegra194's P2972-0000 platform, only C5 controller can be enabled to
+ operate in the Endpoint mode because of the way the platform is designed.
+
+properties:
+ compatible:
+ enum:
+ - nvidia,tegra194-pcie-ep
+ - nvidia,tegra234-pcie-ep
+
+ reg:
+ items:
+ - description: controller's application logic registers
+ - description: iATU and DMA registers. This is where the iATU (internal
+ Address Translation Unit) registers of the PCIe core are made
+ available for software access.
+ - description: aperture where the Root Port's own configuration
+ registers are available.
+ - description: aperture used to map the remote Root Complex address space
+
+ reg-names:
+ items:
+ - const: appl
+ - const: atu_dma
+ - const: dbi
+ - const: addr_space
+
+ interrupts:
+ items:
+ - description: controller interrupt
+
+ interrupt-names:
+ items:
+ - const: intr
+
+ clocks:
+ items:
+ - description: module clock
+
+ clock-names:
+ items:
+ - const: core
+
+ resets:
+ items:
+ - description: APB bus interface reset
+ - description: module reset
+
+ reset-names:
+ items:
+ - const: apb
+ - const: core
+
+ reset-gpios:
+ description: Must contain a phandle to a GPIO controller followed by GPIO
+ that is being used as PERST input signal. Please refer to pci.txt.
+
+ phys:
+ minItems: 1
+ maxItems: 8
+
+ phy-names:
+ minItems: 1
+ items:
+ - const: p2u-0
+ - const: p2u-1
+ - const: p2u-2
+ - const: p2u-3
+ - const: p2u-4
+ - const: p2u-5
+ - const: p2u-6
+ - const: p2u-7
+
+ power-domains:
+ maxItems: 1
+ description: |
+ A phandle to the node that controls power to the respective PCIe
+ controller and a specifier name for the PCIe controller.
+
+ Tegra194 specifiers are defined in "include/dt-bindings/power/tegra194-powergate.h"
+ Tegra234 specifiers are defined in "include/dt-bindings/power/tegra234-powergate.h"
+
+ interconnects:
+ items:
+ - description: memory read client
+ - description: memory write client
+
+ interconnect-names:
+ items:
+ - const: dma-mem # read
+ - const: write
+
+ dma-coherent: true
+
+ nvidia,bpmp:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ Must contain a pair of phandles to BPMP controller node followed by
+ controller ID. Following are the controller IDs for each controller:
+
+ Tegra194
+
+ 0: C0
+ 1: C1
+ 2: C2
+ 3: C3
+ 4: C4
+ 5: C5
+
+ Tegra234
+
+ 0 : C0
+ 1 : C1
+ 2 : C2
+ 3 : C3
+ 4 : C4
+ 5 : C5
+ 6 : C6
+ 7 : C7
+ 8 : C8
+ 9 : C9
+ 10: C10
+
+ items:
+ - items:
+ - description: phandle to BPMP controller node
+ - description: PCIe controller ID
+ maximum: 10
+
+ nvidia,aspm-cmrt-us:
+ description: Common Mode Restore Time for proper operation of ASPM to be
+ specified in microseconds
+
+ nvidia,aspm-pwr-on-t-us:
+ description: Power On time for proper operation of ASPM to be specified in
+ microseconds
+
+ nvidia,aspm-l0s-entrance-latency-us:
+ description: ASPM L0s entrance latency to be specified in microseconds
+
+ vddio-pex-ctl-supply:
+ description: A phandle to the regulator supply for PCIe side band signals
+
+ nvidia,refclk-select-gpios:
+ maxItems: 1
+ description: GPIO used to enable REFCLK to controller from the host
+
+ nvidia,enable-ext-refclk:
+ description: |
+ This boolean property needs to be present if the controller is configured
+ to receive Reference Clock from the host.
+ NOTE: This is applicable only for Tegra234.
+
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ nvidia,enable-srns:
+ description: |
+ This boolean property needs to be present if the controller is
+ configured to operate in SRNS (Separate Reference Clocks with No
+ Spread-Spectrum Clocking). NOTE: This is applicable only for
+ Tegra234.
+
+ $ref: /schemas/types.yaml#/definitions/flag
+
+allOf:
+ - $ref: /schemas/pci/snps,dw-pcie-ep.yaml#
+
+unevaluatedProperties: false
+
+required:
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - power-domains
+ - reset-gpios
+ - vddio-pex-ctl-supply
+ - num-lanes
+ - phys
+ - phy-names
+ - nvidia,bpmp
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra194-clock.h>
+ #include <dt-bindings/gpio/tegra194-gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/tegra194-powergate.h>
+ #include <dt-bindings/reset/tegra194-reset.h>
+
+ bus@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x8 0x0>;
+
+ pcie-ep@141a0000 {
+ compatible = "nvidia,tegra194-pcie-ep";
+ reg = <0x00 0x141a0000 0x0 0x00020000>, /* appl registers (128K) */
+ <0x00 0x3a040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
+ <0x00 0x3a080000 0x0 0x00040000>, /* DBI reg space (256K) */
+ <0x1c 0x00000000 0x4 0x00000000>; /* Address Space (16G) */
+ reg-names = "appl", "atu_dma", "dbi", "addr_space";
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "intr";
+
+ clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>;
+ clock-names = "core";
+
+ resets = <&bpmp TEGRA194_RESET_PEX1_CORE_5_APB>,
+ <&bpmp TEGRA194_RESET_PEX1_CORE_5>;
+ reset-names = "apb", "core";
+
+ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&clkreq_c5_bi_dir_state>;
+
+ nvidia,bpmp = <&bpmp 5>;
+
+ nvidia,aspm-cmrt-us = <60>;
+ nvidia,aspm-pwr-on-t-us = <20>;
+ nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+ vddio-pex-ctl-supply = <&vdd_1v8ao>;
+
+ reset-gpios = <&gpio TEGRA194_MAIN_GPIO(GG, 1) GPIO_ACTIVE_LOW>;
+
+ nvidia,refclk-select-gpios = <&gpio_aon TEGRA194_AON_GPIO(AA, 5)
+ GPIO_ACTIVE_HIGH>;
+
+ num-lanes = <8>;
+
+ phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
+ <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
+ <&p2u_nvhs_6>, <&p2u_nvhs_7>;
+
+ phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4",
+ "p2u-5", "p2u-6", "p2u-7";
+ };
+ };
+
+ - |
+ #include <dt-bindings/clock/tegra234-clock.h>
+ #include <dt-bindings/gpio/tegra234-gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/tegra234-powergate.h>
+ #include <dt-bindings/reset/tegra234-reset.h>
+
+ bus@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x8 0x0>;
+
+ pcie-ep@141a0000 {
+ compatible = "nvidia,tegra234-pcie-ep";
+ power-domains = <&bpmp TEGRA234_POWER_DOMAIN_PCIEX8A>;
+ reg = <0x00 0x141a0000 0x0 0x00020000>, /* appl registers (128K) */
+ <0x00 0x3a040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
+ <0x00 0x3a080000 0x0 0x00040000>, /* DBI reg space (256K) */
+ <0x27 0x40000000 0x4 0x00000000>; /* Address Space (16G) */
+ reg-names = "appl", "atu_dma", "dbi", "addr_space";
+
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "intr";
+
+ clocks = <&bpmp TEGRA234_CLK_PEX1_C5_CORE>;
+ clock-names = "core";
+
+ resets = <&bpmp TEGRA234_RESET_PEX1_CORE_5_APB>,
+ <&bpmp TEGRA234_RESET_PEX1_CORE_5>;
+ reset-names = "apb", "core";
+
+ nvidia,bpmp = <&bpmp 5>;
+
+ nvidia,enable-ext-refclk;
+ nvidia,aspm-cmrt-us = <60>;
+ nvidia,aspm-pwr-on-t-us = <20>;
+ nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+ vddio-pex-ctl-supply = <&p3701_vdd_1v8_ls>;
+
+ reset-gpios = <&gpio TEGRA234_MAIN_GPIO(AF, 1) GPIO_ACTIVE_LOW>;
+
+ nvidia,refclk-select-gpios = <&gpio_aon
+ TEGRA234_AON_GPIO(AA, 4)
+ GPIO_ACTIVE_HIGH>;
+
+ num-lanes = <8>;
+
+ phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
+ <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
+ <&p2u_nvhs_6>, <&p2u_nvhs_7>;
+
+ phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4",
+ "p2u-5", "p2u-6", "p2u-7";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt
deleted file mode 100644
index 8e4f9bfb316d..000000000000
--- a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt
+++ /dev/null
@@ -1,245 +0,0 @@
-NVIDIA Tegra PCIe controller (Synopsys DesignWare Core based)
-
-This PCIe controller is based on the Synopsis Designware PCIe IP
-and thus inherits all the common properties defined in snps,dw-pcie.yaml and
-snps,dw-pcie-ep.yaml.
-Some of the controller instances are dual mode where in they can work either
-in root port mode or endpoint mode but one at a time.
-
-Required properties:
-- power-domains: A phandle to the node that controls power to the respective
- PCIe controller and a specifier name for the PCIe controller. Following are
- the specifiers for the different PCIe controllers
- TEGRA194_POWER_DOMAIN_PCIEX8B: C0
- TEGRA194_POWER_DOMAIN_PCIEX1A: C1
- TEGRA194_POWER_DOMAIN_PCIEX1A: C2
- TEGRA194_POWER_DOMAIN_PCIEX1A: C3
- TEGRA194_POWER_DOMAIN_PCIEX4A: C4
- TEGRA194_POWER_DOMAIN_PCIEX8A: C5
- these specifiers are defined in
- "include/dt-bindings/power/tegra194-powergate.h" file.
-- reg: A list of physical base address and length pairs for each set of
- controller registers. Must contain an entry for each entry in the reg-names
- property.
-- reg-names: Must include the following entries:
- "appl": Controller's application logic registers
- "config": As per the definition in snps,dw-pcie.yaml
- "atu_dma": iATU and DMA registers. This is where the iATU (internal Address
- Translation Unit) registers of the PCIe core are made available
- for SW access.
- "dbi": The aperture where root port's own configuration registers are
- available
-- interrupts: A list of interrupt outputs of the controller. Must contain an
- entry for each entry in the interrupt-names property.
-- interrupt-names: Must include the following entries:
- "intr": The Tegra interrupt that is asserted for controller interrupts
-- clocks: Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names: Must include the following entries:
- - core
-- resets: Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names: Must include the following entries:
- - apb
- - core
-- phys: Must contain a phandle to P2U PHY for each entry in phy-names.
-- phy-names: Must include an entry for each active lane.
- "p2u-N": where N ranges from 0 to one less than the total number of lanes
-- nvidia,bpmp: Must contain a pair of phandle to BPMP controller node followed
- by controller-id. Following are the controller ids for each controller.
- 0: C0
- 1: C1
- 2: C2
- 3: C3
- 4: C4
- 5: C5
-- vddio-pex-ctl-supply: Regulator supply for PCIe side band signals
-
-RC mode:
-- compatible: Tegra19x must contain "nvidia,tegra194-pcie"
-- device_type: Must be "pci" for RC mode
-- interrupt-names: Must include the following entries:
- "msi": The Tegra interrupt that is asserted when an MSI is received
-- bus-range: Range of bus numbers associated with this controller
-- #address-cells: Address representation for root ports (must be 3)
- - cell 0 specifies the bus and device numbers of the root port:
- [23:16]: bus number
- [15:11]: device number
- - cell 1 denotes the upper 32 address bits and should be 0
- - cell 2 contains the lower 32 address bits and is used to translate to the
- CPU address space
-- #size-cells: Size representation for root ports (must be 2)
-- ranges: Describes the translation of addresses for root ports and standard
- PCI regions. The entries must be 7 cells each, where the first three cells
- correspond to the address as described for the #address-cells property
- above, the fourth and fifth cells are for the physical CPU address to
- translate to and the sixth and seventh cells are as described for the
- #size-cells property above.
- - Entries setup the mapping for the standard I/O, memory and
- prefetchable PCI regions. The first cell determines the type of region
- that is setup:
- - 0x81000000: I/O memory region
- - 0x82000000: non-prefetchable memory region
- - 0xc2000000: prefetchable memory region
- Please refer to the standard PCI bus binding document for a more detailed
- explanation.
-- #interrupt-cells: Size representation for interrupts (must be 1)
-- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties
- Please refer to the standard PCI bus binding document for a more detailed
- explanation.
-
-EP mode:
-In Tegra194, Only controllers C0, C4 & C5 support EP mode.
-- compatible: Tegra19x must contain "nvidia,tegra194-pcie-ep"
-- reg-names: Must include the following entries:
- "addr_space": Used to map remote RC address space
-- reset-gpios: Must contain a phandle to a GPIO controller followed by
- GPIO that is being used as PERST input signal. Please refer to pci.txt
- document.
-
-Optional properties:
-- pinctrl-names: A list of pinctrl state names.
- It is mandatory for C5 controller and optional for other controllers.
- - "default": Configures PCIe I/O for proper operation.
-- pinctrl-0: phandle for the 'default' state of pin configuration.
- It is mandatory for C5 controller and optional for other controllers.
-- supports-clkreq: Refer to Documentation/devicetree/bindings/pci/pci.txt
-- nvidia,update-fc-fixup: This is a boolean property and needs to be present to
- improve performance when a platform is designed in such a way that it
- satisfies at least one of the following conditions thereby enabling root
- port to exchange optimum number of FC (Flow Control) credits with
- downstream devices
- 1. If C0/C4/C5 run at x1/x2 link widths (irrespective of speed and MPS)
- 2. If C0/C1/C2/C3/C4/C5 operate at their respective max link widths and
- a) speed is Gen-2 and MPS is 256B
- b) speed is >= Gen-3 with any MPS
-- nvidia,aspm-cmrt-us: Common Mode Restore Time for proper operation of ASPM
- to be specified in microseconds
-- nvidia,aspm-pwr-on-t-us: Power On time for proper operation of ASPM to be
- specified in microseconds
-- nvidia,aspm-l0s-entrance-latency-us: ASPM L0s entrance latency to be
- specified in microseconds
-
-RC mode:
-- vpcie3v3-supply: A phandle to the regulator node that supplies 3.3V to the slot
- if the platform has one such slot. (Ex:- x16 slot owned by C5 controller
- in p2972-0000 platform).
-- vpcie12v-supply: A phandle to the regulator node that supplies 12V to the slot
- if the platform has one such slot. (Ex:- x16 slot owned by C5 controller
- in p2972-0000 platform).
-
-EP mode:
-- nvidia,refclk-select-gpios: Must contain a phandle to a GPIO controller
- followed by GPIO that is being used to enable REFCLK to controller from host
-
-NOTE:- On Tegra194's P2972-0000 platform, only C5 controller can be enabled to
-operate in the endpoint mode because of the way the platform is designed.
-
-Examples:
-=========
-
-Tegra194 RC mode:
------------------
-
- pcie@14180000 {
- compatible = "nvidia,tegra194-pcie";
- power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
- reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */
- 0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */
- 0x00 0x38040000 0x0 0x00040000>; /* iATU_DMA reg space (256K) */
- reg-names = "appl", "config", "atu_dma";
-
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- num-lanes = <8>;
- linux,pci-domain = <0>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&pex_rst_c5_out_state>, <&clkreq_c5_bi_dir_state>;
-
- clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_0>;
- clock-names = "core";
-
- resets = <&bpmp TEGRA194_RESET_PEX0_CORE_0_APB>,
- <&bpmp TEGRA194_RESET_PEX0_CORE_0>;
- reset-names = "apb", "core";
-
- interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
- <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */
- interrupt-names = "intr", "msi";
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
-
- nvidia,bpmp = <&bpmp 0>;
-
- supports-clkreq;
- nvidia,aspm-cmrt-us = <60>;
- nvidia,aspm-pwr-on-t-us = <20>;
- nvidia,aspm-l0s-entrance-latency-us = <3>;
-
- bus-range = <0x0 0xff>;
- ranges = <0x81000000 0x0 0x38100000 0x0 0x38100000 0x0 0x00100000 /* downstream I/O (1MB) */
- 0x82000000 0x0 0x38200000 0x0 0x38200000 0x0 0x01E00000 /* non-prefetchable memory (30MB) */
- 0xc2000000 0x18 0x00000000 0x18 0x00000000 0x4 0x00000000>; /* prefetchable memory (16GB) */
-
- vddio-pex-ctl-supply = <&vdd_1v8ao>;
- vpcie3v3-supply = <&vdd_3v3_pcie>;
- vpcie12v-supply = <&vdd_12v_pcie>;
-
- phys = <&p2u_hsio_2>, <&p2u_hsio_3>, <&p2u_hsio_4>,
- <&p2u_hsio_5>;
- phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3";
- };
-
-Tegra194 EP mode:
------------------
-
- pcie-ep@141a0000 {
- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
- power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
- reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
- 0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
- 0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */
- 0x1c 0x00000000 0x4 0x00000000>; /* Address Space (16G) */
- reg-names = "appl", "atu_dma", "dbi", "addr_space";
-
- num-lanes = <8>;
- num-ib-windows = <2>;
- num-ob-windows = <8>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&clkreq_c5_bi_dir_state>;
-
- clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>;
- clock-names = "core";
-
- resets = <&bpmp TEGRA194_RESET_PEX1_CORE_5_APB>,
- <&bpmp TEGRA194_RESET_PEX1_CORE_5>;
- reset-names = "apb", "core";
-
- interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
- interrupt-names = "intr";
-
- nvidia,bpmp = <&bpmp 5>;
-
- nvidia,aspm-cmrt-us = <60>;
- nvidia,aspm-pwr-on-t-us = <20>;
- nvidia,aspm-l0s-entrance-latency-us = <3>;
-
- vddio-pex-ctl-supply = <&vdd_1v8ao>;
-
- reset-gpios = <&gpio TEGRA194_MAIN_GPIO(GG, 1) GPIO_ACTIVE_LOW>;
-
- nvidia,refclk-select-gpios = <&gpio_aon TEGRA194_AON_GPIO(AA, 5)
- GPIO_ACTIVE_HIGH>;
-
- phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
- <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
- <&p2u_nvhs_6>, <&p2u_nvhs_7>;
-
- phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4",
- "p2u-5", "p2u-6", "p2u-7";
- };
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.yaml b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.yaml
new file mode 100644
index 000000000000..75da3e8eecb9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.yaml
@@ -0,0 +1,350 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/nvidia,tegra194-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra194 (and later) PCIe controller (Synopsys DesignWare Core based)
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Vidya Sagar <vidyas@nvidia.com>
+
+description: |
+ This PCIe controller is based on the Synopsys DesignWare PCIe IP and thus
+ inherits all the common properties defined in snps,dw-pcie.yaml. Some of
+ the controller instances are dual mode where in they can work either in
+ Root Port mode or Endpoint mode but one at a time.
+
+ See nvidia,tegra194-pcie-ep.yaml for details on the Endpoint mode device
+ tree bindings.
+
+properties:
+ compatible:
+ enum:
+ - nvidia,tegra194-pcie
+ - nvidia,tegra234-pcie
+
+ reg:
+ items:
+ - description: controller's application logic registers
+ - description: configuration registers
+ - description: iATU and DMA registers. This is where the iATU (internal
+ Address Translation Unit) registers of the PCIe core are made
+ available for software access.
+ - description: aperture where the Root Port's own configuration
+ registers are available.
+
+ reg-names:
+ items:
+ - const: appl
+ - const: config
+ - const: atu_dma
+ - const: dbi
+
+ interrupts:
+ items:
+ - description: controller interrupt
+ - description: MSI interrupt
+
+ interrupt-names:
+ items:
+ - const: intr
+ - const: msi
+
+ clocks:
+ items:
+ - description: module clock
+
+ clock-names:
+ items:
+ - const: core
+
+ resets:
+ items:
+ - description: APB bus interface reset
+ - description: module reset
+
+ reset-names:
+ items:
+ - const: apb
+ - const: core
+
+ phys:
+ minItems: 1
+ maxItems: 8
+
+ phy-names:
+ minItems: 1
+ items:
+ - const: p2u-0
+ - const: p2u-1
+ - const: p2u-2
+ - const: p2u-3
+ - const: p2u-4
+ - const: p2u-5
+ - const: p2u-6
+ - const: p2u-7
+
+ power-domains:
+ maxItems: 1
+ description: |
+ A phandle to the node that controls power to the respective PCIe
+ controller and a specifier name for the PCIe controller.
+
+ Tegra194 specifiers defined in "include/dt-bindings/power/tegra194-powergate.h"
+ Tegra234 specifiers defined in "include/dt-bindings/power/tegra234-powergate.h"
+
+ interconnects:
+ items:
+ - description: memory read client
+ - description: memory write client
+
+ interconnect-names:
+ items:
+ - const: dma-mem # read
+ - const: write
+
+ dma-coherent: true
+
+ nvidia,bpmp:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ Must contain a pair of phandles to BPMP controller node followed by
+ controller ID. Following are the controller IDs for each controller:
+
+ Tegra194
+
+ 0: C0
+ 1: C1
+ 2: C2
+ 3: C3
+ 4: C4
+ 5: C5
+
+ Tegra234
+
+ 0 : C0
+ 1 : C1
+ 2 : C2
+ 3 : C3
+ 4 : C4
+ 5 : C5
+ 6 : C6
+ 7 : C7
+ 8 : C8
+ 9 : C9
+ 10: C10
+
+ items:
+ - items:
+ - description: phandle to BPMP controller node
+ - description: PCIe controller ID
+ maximum: 10
+
+ nvidia,update-fc-fixup:
+ description: |
+ This is a boolean property and needs to be present to improve performance
+ when a platform is designed in such a way that it satisfies at least one
+ of the following conditions thereby enabling Root Port to exchange
+ optimum number of FC (Flow Control) credits with downstream devices:
+
+ NOTE: This is applicable only for Tegra194.
+
+ 1. If C0/C4/C5 run at x1/x2 link widths (irrespective of speed and MPS)
+ 2. If C0/C1/C2/C3/C4/C5 operate at their respective max link widths and
+ a) speed is Gen-2 and MPS is 256B
+ b) speed is >= Gen-3 with any MPS
+
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ nvidia,aspm-cmrt-us:
+ description: Common Mode Restore Time for proper operation of ASPM to be
+ specified in microseconds
+
+ nvidia,aspm-pwr-on-t-us:
+ description: Power On time for proper operation of ASPM to be specified in
+ microseconds
+
+ nvidia,aspm-l0s-entrance-latency-us:
+ description: ASPM L0s entrance latency to be specified in microseconds
+
+ vddio-pex-ctl-supply:
+ description: A phandle to the regulator supply for PCIe side band signals.
+
+ vpcie3v3-supply:
+ description: A phandle to the regulator node that supplies 3.3V to the slot
+ if the platform has one such slot, e.g., x16 slot owned by C5 controller
+ in p2972-0000 platform.
+
+ vpcie12v-supply:
+ description: A phandle to the regulator node that supplies 12V to the slot
+ if the platform has one such slot, e.g., x16 slot owned by C5 controller
+ in p2972-0000 platform.
+
+ nvidia,enable-srns:
+ description: |
+ This boolean property needs to be present if the controller is
+ configured to operate in SRNS (Separate Reference Clocks with No
+ Spread-Spectrum Clocking). NOTE: This is applicable only for
+ Tegra234.
+
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ nvidia,enable-ext-refclk:
+ description: |
+ This boolean property needs to be present if the controller is
+ configured to use the reference clocking coming in from an external
+ clock source instead of using the internal clock source.
+
+ $ref: /schemas/types.yaml#/definitions/flag
+
+allOf:
+ - $ref: /schemas/pci/snps,dw-pcie.yaml#
+
+unevaluatedProperties: false
+
+required:
+ - interrupts
+ - interrupt-names
+ - interrupt-map
+ - interrupt-map-mask
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - power-domains
+ - vddio-pex-ctl-supply
+ - num-lanes
+ - phys
+ - phy-names
+ - nvidia,bpmp
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra194-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/tegra194-powergate.h>
+ #include <dt-bindings/reset/tegra194-reset.h>
+
+ bus@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x8 0x0>;
+
+ pcie@14180000 {
+ compatible = "nvidia,tegra194-pcie";
+ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
+ reg = <0x0 0x14180000 0x0 0x00020000>, /* appl registers (128K) */
+ <0x0 0x38000000 0x0 0x00040000>, /* configuration space (256K) */
+ <0x0 0x38040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
+ <0x0 0x38080000 0x0 0x00040000>; /* DBI reg space (256K) */
+ reg-names = "appl", "config", "atu_dma", "dbi";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ num-lanes = <8>;
+ linux,pci-domain = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pex_rst_c5_out_state>, <&clkreq_c5_bi_dir_state>;
+
+ clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_0>;
+ clock-names = "core";
+
+ resets = <&bpmp TEGRA194_RESET_PEX0_CORE_0_APB>,
+ <&bpmp TEGRA194_RESET_PEX0_CORE_0>;
+ reset-names = "apb", "core";
+
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */
+ interrupt-names = "intr", "msi";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+
+ nvidia,bpmp = <&bpmp 0>;
+
+ supports-clkreq;
+ nvidia,aspm-cmrt-us = <60>;
+ nvidia,aspm-pwr-on-t-us = <20>;
+ nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x38100000 0x0 0x38100000 0x0 0x00100000>, /* downstream I/O */
+ <0x82000000 0x0 0x38200000 0x0 0x38200000 0x0 0x01e00000>, /* non-prefetch memory */
+ <0xc2000000 0x18 0x00000000 0x18 0x00000000 0x4 0x00000000>; /* prefetchable memory */
+
+ vddio-pex-ctl-supply = <&vdd_1v8ao>;
+ vpcie3v3-supply = <&vdd_3v3_pcie>;
+ vpcie12v-supply = <&vdd_12v_pcie>;
+
+ phys = <&p2u_hsio_2>, <&p2u_hsio_3>, <&p2u_hsio_4>,
+ <&p2u_hsio_5>;
+ phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3";
+ };
+ };
+
+ - |
+ #include <dt-bindings/clock/tegra234-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/tegra234-powergate.h>
+ #include <dt-bindings/reset/tegra234-reset.h>
+
+ bus@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x8 0x0>;
+
+ pcie@14160000 {
+ compatible = "nvidia,tegra234-pcie";
+ power-domains = <&bpmp TEGRA234_POWER_DOMAIN_PCIEX4BB>;
+ reg = <0x00 0x14160000 0x0 0x00020000>, /* appl registers (128K) */
+ <0x00 0x36000000 0x0 0x00040000>, /* configuration space (256K) */
+ <0x00 0x36040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
+ <0x00 0x36080000 0x0 0x00040000>; /* DBI reg space (256K) */
+ reg-names = "appl", "config", "atu_dma", "dbi";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ num-lanes = <4>;
+ num-viewport = <8>;
+ linux,pci-domain = <4>;
+
+ clocks = <&bpmp TEGRA234_CLK_PEX0_C4_CORE>;
+ clock-names = "core";
+
+ resets = <&bpmp TEGRA234_RESET_PEX0_CORE_4_APB>,
+ <&bpmp TEGRA234_RESET_PEX0_CORE_4>;
+ reset-names = "apb", "core";
+
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */
+ interrupt-names = "intr", "msi";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+
+ nvidia,bpmp = <&bpmp 4>;
+
+ nvidia,aspm-cmrt-us = <60>;
+ nvidia,aspm-pwr-on-t-us = <20>;
+ nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+ bus-range = <0x0 0xff>;
+ ranges = <0x43000000 0x21 0x40000000 0x21 0x40000000 0x2 0xe8000000>, /* prefetchable */
+ <0x02000000 0x0 0x40000000 0x24 0x28000000 0x0 0x08000000>, /* non-prefetchable */
+ <0x01000000 0x0 0x36100000 0x00 0x36100000 0x0 0x00100000>; /* downstream I/O */
+
+ vddio-pex-ctl-supply = <&p3701_vdd_AO_1v8>;
+
+ phys = <&p2u_hsio_4>, <&p2u_hsio_5>, <&p2u_hsio_6>,
+ <&p2u_hsio_7>;
+ phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
deleted file mode 100644
index aeba38f0a387..000000000000
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ /dev/null
@@ -1,84 +0,0 @@
-Renesas AHB to PCI bridge
--------------------------
-
-This is the bridge used internally to connect the USB controllers to the
-AHB. There is one bridge instance per USB port connected to the internal
-OHCI and EHCI controllers.
-
-Required properties:
-- compatible: "renesas,pci-r8a7742" for the R8A7742 SoC;
- "renesas,pci-r8a7743" for the R8A7743 SoC;
- "renesas,pci-r8a7744" for the R8A7744 SoC;
- "renesas,pci-r8a7745" for the R8A7745 SoC;
- "renesas,pci-r8a7790" for the R8A7790 SoC;
- "renesas,pci-r8a7791" for the R8A7791 SoC;
- "renesas,pci-r8a7793" for the R8A7793 SoC;
- "renesas,pci-r8a7794" for the R8A7794 SoC;
- "renesas,pci-rcar-gen2" for a generic R-Car Gen2 or
- RZ/G1 compatible device.
-
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first
- followed by the generic version.
-
-- reg: A list of physical regions to access the device: the first is
- the operational registers for the OHCI/EHCI controllers and the
- second is for the bridge configuration and control registers.
-- interrupts: interrupt for the device.
-- clocks: The reference to the device clock.
-- bus-range: The PCI bus number range; as this is a single bus, the range
- should be specified as the same value twice.
-- #address-cells: must be 3.
-- #size-cells: must be 2.
-- #interrupt-cells: must be 1.
-- interrupt-map: standard property used to define the mapping of the PCI
- interrupts to the GIC interrupts.
-- interrupt-map-mask: standard property that helps to define the interrupt
- mapping.
-
-Optional properties:
-- dma-ranges: a single range for the inbound memory region. If not supplied,
- defaults to 1GiB at 0x40000000. Note there are hardware restrictions on the
- allowed combinations of address and size.
-
-Example SoC configuration:
-
- pci0: pci@ee090000 {
- compatible = "renesas,pci-r8a7790", "renesas,pci-rcar-gen2";
- clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
- reg = <0x0 0xee090000 0x0 0xc00>,
- <0x0 0xee080000 0x0 0x1100>;
- interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
-
- bus-range = <0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
- interrupt-map-mask = <0xff00 0 0 0x7>;
- interrupt-map = <0x0000 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
- 0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
- 0x1000 0 0 2 &gic 0 108 IRQ_TYPE_LEVEL_HIGH>;
-
- usb@1,0 {
- reg = <0x800 0 0 0 0>;
- phys = <&usb0 0>;
- phy-names = "usb";
- };
-
- usb@2,0 {
- reg = <0x1000 0 0 0 0>;
- phys = <&usb0 0>;
- phy-names = "usb";
- };
- };
-
-Example board setup:
-
-&pci0 {
- status = "okay";
- pinctrl-0 = <&usb0_pins>;
- pinctrl-names = "default";
-};
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
index 0b69b12b849e..7d29e2a45183 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
@@ -11,7 +11,7 @@ maintainers:
- Stanimir Varbanov <svarbanov@mm-sol.com>
description: |
- Qualcomm PCIe root complex controller is bansed on the Synopsys DesignWare
+ Qualcomm PCIe root complex controller is based on the Synopsys DesignWare
PCIe IP.
properties:
@@ -43,11 +43,12 @@ properties:
maxItems: 5
interrupts:
- maxItems: 1
+ minItems: 1
+ maxItems: 8
interrupt-names:
- items:
- - const: msi
+ minItems: 1
+ maxItems: 8
# Common definitions for clocks, clock-names and reset.
# Platform constraints are described later.
@@ -614,7 +615,7 @@ allOf:
- if:
not:
properties:
- compatibles:
+ compatible:
contains:
enum:
- qcom,pcie-msm8996
@@ -623,6 +624,50 @@ allOf:
- resets
- reset-names
+ # Newer chipsets support either 1 or 8 MSI vectors
+ # On older chipsets it's always 1 MSI vector
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-msm8996
+ - qcom,pcie-sc7280
+ - qcom,pcie-sc8180x
+ - qcom,pcie-sdm845
+ - qcom,pcie-sm8150
+ - qcom,pcie-sm8250
+ - qcom,pcie-sm8450-pcie0
+ - qcom,pcie-sm8450-pcie1
+ then:
+ oneOf:
+ - properties:
+ interrupts:
+ maxItems: 1
+ interrupt-names:
+ items:
+ - const: msi
+ - properties:
+ interrupts:
+ minItems: 8
+ interrupt-names:
+ items:
+ - const: msi0
+ - const: msi1
+ - const: msi2
+ - const: msi3
+ - const: msi4
+ - const: msi5
+ - const: msi6
+ - const: msi7
+ else:
+ properties:
+ interrupts:
+ maxItems: 1
+ interrupt-names:
+ items:
+ - const: msi
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
new file mode 100644
index 000000000000..0f18cceba3d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
@@ -0,0 +1,186 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/renesas,pci-rcar-gen2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas AHB to PCI bridge
+
+maintainers:
+ - Marek Vasut <marek.vasut+renesas@gmail.com>
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+description: |
+ This is the bridge used internally to connect the USB controllers to the
+ AHB. There is one bridge instance per USB port connected to the internal
+ OHCI and EHCI controllers.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,pci-r8a7742 # RZ/G1H
+ - renesas,pci-r8a7743 # RZ/G1M
+ - renesas,pci-r8a7744 # RZ/G1N
+ - renesas,pci-r8a7745 # RZ/G1E
+ - renesas,pci-r8a7790 # R-Car H2
+ - renesas,pci-r8a7791 # R-Car M2-W
+ - renesas,pci-r8a7793 # R-Car M2-N
+ - renesas,pci-r8a7794 # R-Car E2
+ - const: renesas,pci-rcar-gen2 # R-Car Gen2 and RZ/G1
+ - items:
+ - enum:
+ - renesas,pci-r9a06g032 # RZ/N1D
+ - const: renesas,pci-rzn1 # RZ/N1
+
+ reg:
+ items:
+ - description: Operational registers for the OHCI/EHCI controllers.
+ - description: Bridge configuration and control registers.
+
+ interrupts:
+ maxItems: 1
+
+ clocks: true
+
+ clock-names: true
+
+ resets:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ bus-range:
+ description: |
+ The PCI bus number range; as this is a single bus, the range
+ should be specified as the same value twice.
+
+ dma-ranges:
+ description: |
+ A single range for the inbound memory region. If not supplied,
+ defaults to 1GiB at 0x40000000. Note there are hardware restrictions on
+ the allowed combinations of address and size.
+ maxItems: 1
+
+patternProperties:
+ 'usb@[0-1],0':
+ type: object
+
+ description:
+ This a USB controller PCI device
+
+ properties:
+ reg:
+ description:
+ Identify the correct bus, device and function number in the
+ form <bdf 0 0 0 0>.
+
+ items:
+ minItems: 5
+ maxItems: 5
+
+ phys:
+ description:
+ Reference to the USB phy
+ maxItems: 1
+
+ phy-names:
+ maxItems: 1
+
+ required:
+ - reg
+ - phys
+ - phy-names
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-map
+ - interrupt-map-mask
+ - clocks
+ - power-domains
+ - bus-range
+ - "#address-cells"
+ - "#size-cells"
+ - "#interrupt-cells"
+
+allOf:
+ - $ref: /schemas/pci/pci-bus.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,pci-rzn1
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Internal bus clock (AHB) for HOST
+ - description: Internal bus clock (AHB) Power Management
+ - description: PCI clock for USB subsystem
+ clock-names:
+ items:
+ - const: hclkh
+ - const: hclkpm
+ - const: pciclk
+ required:
+ - clock-names
+ else:
+ properties:
+ clocks:
+ items:
+ - description: Device clock
+ clock-names:
+ items:
+ - const: pclk
+ required:
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+
+ pci@ee090000 {
+ compatible = "renesas,pci-r8a7790", "renesas,pci-rcar-gen2";
+ device_type = "pci";
+ reg = <0xee090000 0xc00>,
+ <0xee080000 0x1100>;
+ clocks = <&cpg CPG_MOD 703>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 703>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+
+ bus-range = <0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges = <0x02000000 0 0xee080000 0xee080000 0 0x00010000>;
+ dma-ranges = <0x42000000 0 0x40000000 0x40000000 0 0x40000000>;
+ interrupt-map-mask = <0xf800 0 0 0x7>;
+ interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+
+ usb@1,0 {
+ reg = <0x800 0 0 0 0>;
+ phys = <&usb0 0>;
+ phy-names = "usb";
+ };
+
+ usb@2,0 {
+ reg = <0x1000 0 0 0 0>;
+ phys = <&usb0 0>;
+ phy-names = "usb";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
index c90e5e2d25f6..7287d395e1b6 100644
--- a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
@@ -34,8 +34,8 @@ properties:
minItems: 2
maxItems: 5
items:
- enum: [ dbi, dbi2, config, atu, app, elbi, mgmt, ctrl, parf, cfg, link,
- ulreg, smu, mpu, apb, phy ]
+ enum: [ dbi, dbi2, config, atu, atu_dma, app, appl, elbi, mgmt, ctrl,
+ parf, cfg, link, ulreg, smu, mpu, apb, phy ]
num-lanes:
description: |
diff --git a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
index cca395317a4c..24ddc2855b94 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
+++ b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
@@ -14,17 +14,23 @@ allOf:
properties:
compatible:
- const: xlnx,versal-cpm-host-1.00
+ enum:
+ - xlnx,versal-cpm-host-1.00
+ - xlnx,versal-cpm5-host
reg:
items:
- description: CPM system level control and status registers.
- description: Configuration space region and bridge registers.
+ - description: CPM5 control and status registers.
+ minItems: 2
reg-names:
items:
- const: cpm_slcr
- const: cfg
+ - const: cpm_csr
+ minItems: 2
interrupts:
maxItems: 1
@@ -95,4 +101,34 @@ examples:
interrupt-controller;
};
};
+
+ cpm5_pcie: pcie@fcdd0000 {
+ compatible = "xlnx,versal-cpm5-host";
+ device_type = "pci";
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ interrupts = <0 72 4>;
+ interrupt-parent = <&gic>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc_1 0>,
+ <0 0 0 2 &pcie_intc_1 1>,
+ <0 0 0 3 &pcie_intc_1 2>,
+ <0 0 0 4 &pcie_intc_1 3>;
+ bus-range = <0x00 0xff>;
+ ranges = <0x02000000 0x0 0xe0000000 0x0 0xe0000000 0x0 0x10000000>,
+ <0x43000000 0x80 0x00000000 0x80 0x00000000 0x0 0x80000000>;
+ msi-map = <0x0 &its_gic 0x0 0x10000>;
+ reg = <0x00 0xfcdd0000 0x00 0x1000>,
+ <0x06 0x00000000 0x00 0x1000000>,
+ <0x00 0xfce20000 0x00 0x1000000>;
+ reg-names = "cpm_slcr", "cfg", "cpm_csr";
+
+ pcie_intc_1: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+
};
diff --git a/Documentation/devicetree/bindings/perf/arm,ccn.yaml b/Documentation/devicetree/bindings/perf/arm,ccn.yaml
new file mode 100644
index 000000000000..0b0bb2091016
--- /dev/null
+++ b/Documentation/devicetree/bindings/perf/arm,ccn.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/perf/arm,ccn.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM CCN (Cache Coherent Network) Performance Monitors
+
+maintainers:
+ - Robin Murphy <robin.murphy@arm.com>
+
+properties:
+ compatible:
+ enum:
+ - arm,ccn-502
+ - arm,ccn-504
+ - arm,ccn-508
+ - arm,ccn-512
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ ccn@20000000 {
+ compatible = "arm,ccn-504";
+ reg = <0x20000000 0x1000000>;
+ interrupts = <0 181 4>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/perf/arm-ccn.txt b/Documentation/devicetree/bindings/perf/arm-ccn.txt
deleted file mode 100644
index 1c53b5aa3317..000000000000
--- a/Documentation/devicetree/bindings/perf/arm-ccn.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-* ARM CCN (Cache Coherent Network)
-
-Required properties:
-
-- compatible: (standard compatible string) should be one of:
- "arm,ccn-502"
- "arm,ccn-504"
- "arm,ccn-508"
- "arm,ccn-512"
-
-- reg: (standard registers property) physical address and size
- (16MB) of the configuration registers block
-
-- interrupts: (standard interrupt property) single interrupt
- generated by the control block
-
-Example:
-
- ccn@2000000000 {
- compatible = "arm,ccn-504";
- reg = <0x20 0x00000000 0 0x1000000>;
- interrupts = <0 181 4>;
- };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
index 4d01f3124e1c..a90fa1baadab 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
@@ -16,7 +16,7 @@ description: |+
- compatible: Should be the following:
"amlogic,meson-gx-hhi-sysctrl", "simple-mfd", "syscon"
- Refer to the the bindings described in
+ Refer to the bindings described in
Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
index 0681b9a3965f..d19d65c870aa 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
@@ -46,6 +46,7 @@ properties:
- allwinner,sun8i-v3s-pinctrl
- allwinner,sun9i-a80-pinctrl
- allwinner,sun9i-a80-r-pinctrl
+ - allwinner,sun20i-d1-pinctrl
- allwinner,sun50i-a64-pinctrl
- allwinner,sun50i-a64-r-pinctrl
- allwinner,sun50i-a100-pinctrl
@@ -80,9 +81,6 @@ properties:
- const: hosc
- const: losc
- resets:
- maxItems: 1
-
gpio-controller: true
interrupt-controller: true
gpio-line-names: true
@@ -185,6 +183,18 @@ allOf:
properties:
compatible:
enum:
+ - allwinner,sun20i-d1-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 6
+ maxItems: 6
+
+ - if:
+ properties:
+ compatible:
+ enum:
- allwinner,sun9i-a80-pinctrl
then:
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
index c689bea7ce6e..d3a8911728d0 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
@@ -16,7 +16,7 @@ description: |+
- compatible: Should be one of the following:
"aspeed,ast2400-scu", "syscon", "simple-mfd"
- Refer to the the bindings described in
+ Refer to the bindings described in
Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
index 9db904a528ee..5d2c1b1fb7fd 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
@@ -17,7 +17,7 @@ description: |+
"aspeed,ast2500-scu", "syscon", "simple-mfd"
"aspeed,g5-scu", "syscon", "simple-mfd"
- Refer to the the bindings described in
+ Refer to the bindings described in
Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
index 3666ac5b6518..e92686d2f062 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
@@ -16,7 +16,7 @@ description: |+
- compatible: Should be one of the following:
"aspeed,ast2600-scu", "syscon", "simple-mfd"
- Refer to the the bindings described in
+ Refer to the bindings described in
Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/nuvoton,wpcm450-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/nuvoton,wpcm450-pinctrl.yaml
index 47a56b83a610..7a11beb8f222 100644
--- a/Documentation/devicetree/bindings/pinctrl/nuvoton,wpcm450-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/nuvoton,wpcm450-pinctrl.yaml
@@ -152,7 +152,7 @@ examples:
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uid>, <&pinmux_uid>;
- uid {
+ button-uid {
label = "UID";
linux,code = <102>;
gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8186.yaml b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8186.yaml
index 8a2bb8608291..1eeb885ce0c6 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8186.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8186.yaml
@@ -28,6 +28,8 @@ properties:
gpio-ranges:
maxItems: 1
+ gpio-line-names: true
+
reg:
description: |
Physical address base for gpio base registers. There are 8 different GPIO
@@ -105,31 +107,8 @@ patternProperties:
drive-strength:
enum: [2, 4, 6, 8, 10, 12, 14, 16]
- mediatek,drive-strength-adv:
- description: |
- Describe the specific driving setup property.
- For I2C pins, the existing generic driving setup can only support
- 2/4/6/8/10/12/14/16mA driving. But in specific driving setup, they
- can support 0.125/0.25/0.5/1mA adjustment. If we enable specific
- driving setup, the existing generic setup will be disabled.
- The specific driving setup is controlled by E1E0EN.
- When E1=0/E0=0, the strength is 0.125mA.
- When E1=0/E0=1, the strength is 0.25mA.
- When E1=1/E0=0, the strength is 0.5mA.
- When E1=1/E0=1, the strength is 1mA.
- EN is used to enable or disable the specific driving setup.
- Valid arguments are described as below:
- 0: (E1, E0, EN) = (0, 0, 0)
- 1: (E1, E0, EN) = (0, 0, 1)
- 2: (E1, E0, EN) = (0, 1, 0)
- 3: (E1, E0, EN) = (0, 1, 1)
- 4: (E1, E0, EN) = (1, 0, 0)
- 5: (E1, E0, EN) = (1, 0, 1)
- 6: (E1, E0, EN) = (1, 1, 0)
- 7: (E1, E0, EN) = (1, 1, 1)
- So the valid arguments are from 0 to 7.
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [0, 1, 2, 3, 4, 5, 6, 7]
+ drive-strength-microamp:
+ enum: [125, 250, 500, 1000]
bias-pull-down:
oneOf:
@@ -291,7 +270,7 @@ examples:
pinmux = <PINMUX_GPIO127__FUNC_SCL0>,
<PINMUX_GPIO128__FUNC_SDA0>;
bias-pull-up = <MTK_PULL_SET_RSEL_001>;
- mediatek,drive-strength-adv = <7>;
+ drive-strength-microamp = <1000>;
};
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml
index c90a132fbc79..e0e943e5b874 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml
@@ -80,46 +80,30 @@ patternProperties:
dt-bindings/pinctrl/mt65xx.h. It can only support 2/4/6/8/10/12/14/16mA in mt8192.
enum: [2, 4, 6, 8, 10, 12, 14, 16]
- mediatek,drive-strength-adv:
- description: |
- Describe the specific driving setup property.
- For I2C pins, the existing generic driving setup can only support
- 2/4/6/8/10/12/14/16mA driving. But in specific driving setup, they
- can support 0.125/0.25/0.5/1mA adjustment. If we enable specific
- driving setup, the existing generic setup will be disabled.
- The specific driving setup is controlled by E1E0EN.
- When E1=0/E0=0, the strength is 0.125mA.
- When E1=0/E0=1, the strength is 0.25mA.
- When E1=1/E0=0, the strength is 0.5mA.
- When E1=1/E0=1, the strength is 1mA.
- EN is used to enable or disable the specific driving setup.
- Valid arguments are described as below:
- 0: (E1, E0, EN) = (0, 0, 0)
- 1: (E1, E0, EN) = (0, 0, 1)
- 2: (E1, E0, EN) = (0, 1, 0)
- 3: (E1, E0, EN) = (0, 1, 1)
- 4: (E1, E0, EN) = (1, 0, 0)
- 5: (E1, E0, EN) = (1, 0, 1)
- 6: (E1, E0, EN) = (1, 1, 0)
- 7: (E1, E0, EN) = (1, 1, 1)
- So the valid arguments are from 0 to 7.
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [0, 1, 2, 3, 4, 5, 6, 7]
-
- mediatek,pull-up-adv:
- description: |
- Pull up settings for 2 pull resistors, R0 and R1. User can
- configure those special pins. Valid arguments are described as below:
- 0: (R1, R0) = (0, 0) which means R1 disabled and R0 disabled.
- 1: (R1, R0) = (0, 1) which means R1 disabled and R0 enabled.
- 2: (R1, R0) = (1, 0) which means R1 enabled and R0 disabled.
- 3: (R1, R0) = (1, 1) which means R1 enabled and R0 enabled.
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [0, 1, 2, 3]
-
- bias-pull-down: true
-
- bias-pull-up: true
+ drive-strength-microamp:
+ enum: [125, 250, 500, 1000]
+
+ bias-pull-down:
+ oneOf:
+ - type: boolean
+ description: normal pull down.
+ - enum: [100, 101, 102, 103]
+ description: PUPD/R1/R0 pull down type. See MTK_PUPD_SET_R1R0_
+ defines in dt-bindings/pinctrl/mt65xx.h.
+ - enum: [200, 201, 202, 203]
+ description: RSEL pull down type. See MTK_PULL_SET_RSEL_
+ defines in dt-bindings/pinctrl/mt65xx.h.
+
+ bias-pull-up:
+ oneOf:
+ - type: boolean
+ description: normal pull up.
+ - enum: [100, 101, 102, 103]
+ description: PUPD/R1/R0 pull up type. See MTK_PUPD_SET_R1R0_
+ defines in dt-bindings/pinctrl/mt65xx.h.
+ - enum: [200, 201, 202, 203]
+ description: RSEL pull up type. See MTK_PULL_SET_RSEL_
+ defines in dt-bindings/pinctrl/mt65xx.h.
bias-disable: true
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml
index c5b755514c46..66fe17e9e4d3 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml
@@ -29,6 +29,8 @@ properties:
description: gpio valid number range.
maxItems: 1
+ gpio-line-names: true
+
reg:
description: |
Physical address base for gpio base registers. There are 8 GPIO
@@ -49,7 +51,7 @@ properties:
description: The interrupt outputs to sysirq.
maxItems: 1
- mediatek,rsel_resistance_in_si_unit:
+ mediatek,rsel-resistance-in-si-unit:
type: boolean
description: |
Identifying i2c pins pull up/down type which is RSEL. It can support
@@ -98,31 +100,8 @@ patternProperties:
drive-strength:
enum: [2, 4, 6, 8, 10, 12, 14, 16]
- mediatek,drive-strength-adv:
- description: |
- Describe the specific driving setup property.
- For I2C pins, the existing generic driving setup can only support
- 2/4/6/8/10/12/14/16mA driving. But in specific driving setup, they
- can support 0.125/0.25/0.5/1mA adjustment. If we enable specific
- driving setup, the existing generic setup will be disabled.
- The specific driving setup is controlled by E1E0EN.
- When E1=0/E0=0, the strength is 0.125mA.
- When E1=0/E0=1, the strength is 0.25mA.
- When E1=1/E0=0, the strength is 0.5mA.
- When E1=1/E0=1, the strength is 1mA.
- EN is used to enable or disable the specific driving setup.
- Valid arguments are described as below:
- 0: (E1, E0, EN) = (0, 0, 0)
- 1: (E1, E0, EN) = (0, 0, 1)
- 2: (E1, E0, EN) = (0, 1, 0)
- 3: (E1, E0, EN) = (0, 1, 1)
- 4: (E1, E0, EN) = (1, 0, 0)
- 5: (E1, E0, EN) = (1, 0, 1)
- 6: (E1, E0, EN) = (1, 1, 0)
- 7: (E1, E0, EN) = (1, 1, 1)
- So the valid arguments are from 0 to 7.
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [0, 1, 2, 3, 4, 5, 6, 7]
+ drive-strength-microamp:
+ enum: [125, 250, 500, 1000]
bias-pull-down:
oneOf:
@@ -142,7 +121,7 @@ patternProperties:
"MTK_PUPD_SET_R1R0_11" define in mt8195.
For pull down type is RSEL, it can add RSEL define & resistance
value(ohm) to set different resistance by identifying property
- "mediatek,rsel_resistance_in_si_unit".
+ "mediatek,rsel-resistance-in-si-unit".
It can support "MTK_PULL_SET_RSEL_000" & "MTK_PULL_SET_RSEL_001"
& "MTK_PULL_SET_RSEL_010" & "MTK_PULL_SET_RSEL_011"
& "MTK_PULL_SET_RSEL_100" & "MTK_PULL_SET_RSEL_101"
@@ -161,7 +140,7 @@ patternProperties:
};
An example of using si unit resistance value(ohm):
&pio {
- mediatek,rsel_resistance_in_si_unit;
+ mediatek,rsel-resistance-in-si-unit;
}
pincontroller {
i2c0_pin {
@@ -190,7 +169,7 @@ patternProperties:
"MTK_PUPD_SET_R1R0_11" define in mt8195.
For pull up type is RSEL, it can add RSEL define & resistance
value(ohm) to set different resistance by identifying property
- "mediatek,rsel_resistance_in_si_unit".
+ "mediatek,rsel-resistance-in-si-unit".
It can support "MTK_PULL_SET_RSEL_000" & "MTK_PULL_SET_RSEL_001"
& "MTK_PULL_SET_RSEL_010" & "MTK_PULL_SET_RSEL_011"
& "MTK_PULL_SET_RSEL_100" & "MTK_PULL_SET_RSEL_101"
@@ -209,7 +188,7 @@ patternProperties:
};
An example of using si unit resistance value(ohm):
&pio {
- mediatek,rsel_resistance_in_si_unit;
+ mediatek,rsel-resistance-in-si-unit;
}
pincontroller {
i2c0-pins {
@@ -302,7 +281,7 @@ examples:
pinmux = <PINMUX_GPIO8__FUNC_SDA0>,
<PINMUX_GPIO9__FUNC_SCL0>;
bias-disable;
- mediatek,drive-strength-adv = <7>;
+ drive-strength-microamp = <1000>;
};
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
index b83c7f476e19..931e5c190ead 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
@@ -144,7 +144,7 @@ examples:
#interrupt-cells = <2>;
gpio-controller;
#gpio-cells = <2>;
- gpio-ranges = <&tlmm 0 80>;
+ gpio-ranges = <&tlmm 0 0 80>;
serial3-pinmux {
pins = "gpio44", "gpio45";
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8909-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,msm8909-tlmm.yaml
new file mode 100644
index 000000000000..e03530091478
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8909-tlmm.yaml
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,msm8909-tlmm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. MSM8909 TLMM block
+
+maintainers:
+ - Stephan Gerhold <stephan@gerhold.net>
+
+description: |
+ This binding describes the Top Level Mode Multiplexer (TLMM) block found
+ in the MSM8909 platform.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,msm8909-tlmm
+
+ reg:
+ maxItems: 1
+
+ interrupts: true
+ interrupt-controller: true
+ '#interrupt-cells': true
+ gpio-controller: true
+ gpio-reserved-ranges: true
+ '#gpio-cells': true
+ gpio-ranges: true
+ wakeup-parent: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+patternProperties:
+ '-state$':
+ oneOf:
+ - $ref: "#/$defs/qcom-msm8909-tlmm-state"
+ - patternProperties:
+ ".*":
+ $ref: "#/$defs/qcom-msm8909-tlmm-state"
+
+$defs:
+ qcom-msm8909-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: "qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state"
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|10[0-9]|11[0-7])$"
+ - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk, sdc2_cmd,
+ sdc2_data, qdsd_clk, qdsd_cmd, qdsd_data0, qdsd_data1,
+ qdsd_data2, qdsd_data3 ]
+ minItems: 1
+ maxItems: 16
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ adsp_ext, atest_bbrx0, atest_bbrx1, atest_char, atest_char0,
+ atest_char1, atest_char2, atest_char3, atest_combodac,
+ atest_gpsadc0, atest_gpsadc1, atest_wlan0, atest_wlan1,
+ bimc_dte0, bimc_dte1, blsp_i2c1, blsp_i2c2, blsp_i2c3,
+ blsp_i2c4, blsp_i2c5, blsp_i2c6, blsp_spi1, blsp_spi1_cs1,
+ blsp_spi1_cs2, blsp_spi1_cs3, blsp_spi2, blsp_spi2_cs1,
+ blsp_spi2_cs2, blsp_spi2_cs3, blsp_spi3, blsp_spi3_cs1,
+ blsp_spi3_cs2, blsp_spi3_cs3, blsp_spi4, blsp_spi5, blsp_spi6,
+ blsp_uart1, blsp_uart2, blsp_uim1, blsp_uim2, cam_mclk,
+ cci_async, cci_timer0, cci_timer1, cci_timer2, cdc_pdm0,
+ dbg_out, dmic0_clk, dmic0_data, ebi0_wrcdc, ebi2_a, ebi2_lcd,
+ ext_lpass, gcc_gp1_clk_a, gcc_gp1_clk_b, gcc_gp2_clk_a,
+ gcc_gp2_clk_b, gcc_gp3_clk_a, gcc_gp3_clk_b, gcc_plltest, gpio,
+ gsm0_tx, ldo_en, ldo_update, m_voc, mdp_vsync, modem_tsync,
+ nav_pps, nav_tsync, pa_indicator, pbs0, pbs1, pbs2,
+ pri_mi2s_data0_a, pri_mi2s_data0_b, pri_mi2s_data1_a,
+ pri_mi2s_data1_b, pri_mi2s_mclk_a, pri_mi2s_mclk_b,
+ pri_mi2s_sck_a, pri_mi2s_sck_b, pri_mi2s_ws_a, pri_mi2s_ws_b,
+ prng_rosc, pwr_crypto_enabled_a, pwr_crypto_enabled_b,
+ pwr_modem_enabled_a, pwr_modem_enabled_b, pwr_nav_enabled_a,
+ pwr_nav_enabled_b, qdss_cti_trig_in_a0, qdss_cti_trig_in_a1,
+ qdss_cti_trig_in_b0, qdss_cti_trig_in_b1, qdss_cti_trig_out_a0,
+ qdss_cti_trig_out_a1, qdss_cti_trig_out_b0,
+ qdss_cti_trig_out_b1, qdss_traceclk_a, qdss_tracectl_a,
+ qdss_tracedata_a, qdss_tracedata_b, sd_write, sec_mi2s,
+ smb_int, ssbi0, ssbi1, uim1_clk, uim1_data, uim1_present,
+ uim1_reset, uim2_clk, uim2_data, uim2_present, uim2_reset,
+ uim3_clk, uim3_data, uim3_present, uim3_reset, uim_batt,
+ wcss_bt, wcss_fm, wcss_wlan ]
+
+ bias-disable: true
+ bias-pull-down: true
+ bias-pull-up: true
+ drive-strength: true
+ input-enable: true
+ output-high: true
+ output-low: true
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ pinctrl@1000000 {
+ compatible = "qcom,msm8909-tlmm";
+ reg = <0x1000000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 117>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ gpio-wo-subnode-state {
+ pins = "gpio1";
+ function = "gpio";
+ };
+
+ uart-w-subnodes-state {
+ rx {
+ pins = "gpio4";
+ function = "blsp_uart1";
+ bias-pull-up;
+ };
+
+ tx {
+ pins = "gpio5";
+ function = "blsp_uart1";
+ bias-disable;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
index 6f2efc3772cb..694898f382be 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
@@ -52,6 +52,7 @@ properties:
- qcom,pmi8998-gpio
- qcom,pmk8350-gpio
- qcom,pmm8155au-gpio
+ - qcom,pmp8074-gpio
- qcom,pmr735a-gpio
- qcom,pmr735b-gpio
- qcom,pms405-gpio
@@ -158,6 +159,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,pm8226-gpio
- qcom,pm8350b-gpio
- qcom,pm8950-gpio
then:
@@ -233,6 +235,7 @@ allOf:
- qcom,pm8150b-gpio
- qcom,pm8150l-gpio
- qcom,pmc8180c-gpio
+ - qcom,pmp8074-gpio
- qcom,pms405-gpio
then:
properties:
@@ -415,6 +418,7 @@ $defs:
- gpio1-gpio10 for pmi8994
- gpio1-gpio4 for pmk8350
- gpio1-gpio10 for pmm8155au
+ - gpio1-gpio12 for pmp8074 (holes on gpio1 and gpio12)
- gpio1-gpio4 for pmr735a
- gpio1-gpio4 for pmr735b
- gpio1-gpio12 for pms405 (holes on gpio1, gpio9
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
index d32ee32776e8..33d1d37fdf6d 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
@@ -19,6 +19,11 @@ properties:
compatible:
const: qcom,sc7280-lpass-lpi-pinctrl
+ qcom,adsp-bypass-mode:
+ description:
+ Tells ADSP is in bypass mode.
+ type: boolean
+
reg:
minItems: 2
maxItems: 2
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm6375-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm6375-tlmm.yaml
new file mode 100644
index 000000000000..3908807a8339
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm6375-tlmm.yaml
@@ -0,0 +1,158 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,sm6375-tlmm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. SM6375 TLMM block
+
+maintainers:
+ - Konrad Dybcio <konrad.dybcio@somainline.org>
+
+description: |
+ This binding describes the Top Level Mode Multiplexer (TLMM) block found
+ in the SM6375 platform.
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,sm6375-tlmm
+
+ reg:
+ maxItems: 1
+
+ interrupts: true
+ interrupt-controller: true
+ '#interrupt-cells': true
+ gpio-controller: true
+ gpio-reserved-ranges: true
+ '#gpio-cells': true
+ gpio-ranges: true
+ wakeup-parent: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+patternProperties:
+ '-state$':
+ oneOf:
+ - $ref: "#/$defs/qcom-sm6375-tlmm-state"
+ - patternProperties:
+ ".*":
+ $ref: "#/$defs/qcom-sm6375-tlmm-state"
+
+$defs:
+ qcom-sm6375-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: "qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state"
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-4][0-9]|15[0-6])$"
+ - enum: [ ufs_reset, sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk,
+ sdc2_cmd, sdc2_data ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+
+ enum: [ adsp_ext, agera_pll, atest_char, atest_char0, atest_char1,
+ atest_char2, atest_char3, atest_tsens, atest_tsens2,
+ atest_usb1, atest_usb10, atest_usb11, atest_usb12,
+ atest_usb13, atest_usb2, atest_usb20, atest_usb21,
+ atest_usb22, atest_usb23, audio_ref, btfm_slimbus, cam_mclk,
+ cci_async, cci_i2c, cci_timer0, cci_timer1, cci_timer2,
+ cci_timer3, cci_timer4, cri_trng, dbg_out, ddr_bist,
+ ddr_pxi0, ddr_pxi1, ddr_pxi2, ddr_pxi3, dp_hot, edp_lcd,
+ gcc_gp1, gcc_gp2, gcc_gp3, gp_pdm0, gp_pdm1, gp_pdm2, gpio,
+ gps_tx, ibi_i3c, jitter_bist, ldo_en, ldo_update, lpass_ext,
+ m_voc, mclk, mdp_vsync, mdp_vsync0, mdp_vsync1, mdp_vsync2,
+ mdp_vsync3, mi2s_0, mi2s_1, mi2s_2, mss_lte, nav_gpio,
+ nav_pps, pa_indicator, phase_flag0, phase_flag1, phase_flag10,
+ phase_flag11, phase_flag12, phase_flag13, phase_flag14,
+ phase_flag15, phase_flag16, phase_flag17, phase_flag18,
+ phase_flag19, phase_flag2, phase_flag20, phase_flag21,
+ phase_flag22, phase_flag23, phase_flag24, phase_flag25,
+ phase_flag26, phase_flag27, phase_flag28, phase_flag29,
+ phase_flag3, phase_flag30, phase_flag31, phase_flag4,
+ phase_flag5, phase_flag6, phase_flag7, phase_flag8,
+ phase_flag9, pll_bist, pll_bypassnl, pll_clk, pll_reset,
+ prng_rosc0, prng_rosc1, prng_rosc2, prng_rosc3, qdss_cti,
+ qdss_gpio, qdss_gpio0, qdss_gpio1, qdss_gpio10, qdss_gpio11,
+ qdss_gpio12, qdss_gpio13, qdss_gpio14, qdss_gpio15,
+ qdss_gpio2, qdss_gpio3, qdss_gpio4, qdss_gpio5, qdss_gpio6,
+ qdss_gpio7, qdss_gpio8, qdss_gpio9, qlink0_enable,
+ qlink0_request, qlink0_wmss, qlink1_enable, qlink1_request,
+ qlink1_wmss, qup00, qup01, qup02, qup10, qup11_f1, qup11_f2,
+ qup12, qup13_f1, qup13_f2, qup14, sd_write, sdc1_tb, sdc2_tb,
+ sp_cmu, tgu_ch0, tgu_ch1, tgu_ch2, tgu_ch3, tsense_pwm1,
+ tsense_pwm2, uim1_clk, uim1_data, uim1_present, uim1_reset,
+ uim2_clk, uim2_data, uim2_present, uim2_reset, usb2phy_ac,
+ usb_phy, vfr_1, vsense_trigger, wlan1_adc0, wlan1_adc1,
+ wlan2_adc0, wlan2_adc1 ]
+
+
+ bias-disable: true
+ bias-pull-down: true
+ bias-pull-up: true
+ drive-strength: true
+ input-enable: true
+ output-high: true
+ output-low: true
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ pinctrl@500000 {
+ compatible = "qcom,sm6375-tlmm";
+ reg = <0x00500000 0x800000>;
+ interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 157>;
+
+ gpio-wo-subnode-state {
+ pins = "gpio1";
+ function = "gpio";
+ };
+
+ uart-w-subnodes-state {
+ rx {
+ pins = "gpio18";
+ function = "qup13_f2";
+ bias-pull-up;
+ };
+
+ tx {
+ pins = "gpio19";
+ function = "qup13_f2";
+ bias-disable;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
index 2a57df75d832..4fc758fea7e6 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
@@ -45,6 +45,7 @@ properties:
- renesas,pfc-r8a77995 # R-Car D3
- renesas,pfc-r8a779a0 # R-Car V3U
- renesas,pfc-r8a779f0 # R-Car S4-8
+ - renesas,pfc-r8a779g0 # R-Car V4H
- renesas,pfc-sh73a0 # SH-Mobile AG5
reg:
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzv2m-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzv2m-pinctrl.yaml
new file mode 100644
index 000000000000..eac6245db7dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzv2m-pinctrl.yaml
@@ -0,0 +1,170 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/renesas,rzv2m-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/V2M combined Pin and GPIO controller
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+ - Phil Edworthy <phil.edworthy@renesas.com>
+
+description:
+ The Renesas RZ/V2M SoC features a combined Pin and GPIO controller.
+ Pin multiplexing and GPIO configuration is performed on a per-pin basis.
+ Each port features up to 16 pins, each of them configurable for GPIO function
+ (port mode) or in alternate function mode.
+ Up to 8 different alternate function modes exist for each single pin.
+
+properties:
+ compatible:
+ const: renesas,r9a09g011-pinctrl # RZ/V2M
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+ description:
+ The first cell contains the global GPIO port index, constructed using the
+ RZV2M_GPIO() helper macro in <dt-bindings/pinctrl/rzv2m-pinctrl.h> and the
+ second cell represents consumer flag as mentioned in ../gpio/gpio.txt
+ E.g. "RZV2M_GPIO(8, 1)" for P8_1.
+
+ gpio-ranges:
+ maxItems: 1
+
+ interrupts:
+ description: INEXINT[0..38] corresponding to individual pin inputs.
+ maxItems: 39
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+additionalProperties:
+ anyOf:
+ - type: object
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ description:
+ Pin controller client devices use pin configuration subnodes (children
+ and grandchildren) for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ phandle: true
+ pinmux:
+ description:
+ Values are constructed from GPIO port number, pin number, and
+ alternate function configuration number using the RZV2M_PORT_PINMUX()
+ helper macro in <dt-bindings/pinctrl/rzv2m-pinctrl.h>.
+ pins: true
+ bias-disable: true
+ bias-pull-down: true
+ bias-pull-up: true
+ drive-strength-microamp:
+ # Superset of supported values
+ enum: [ 1600, 1800, 2000, 3200, 3800, 4000, 6400, 7800, 8000,
+ 9000, 9600, 11000, 12000, 13000, 18000 ]
+ slew-rate:
+ description: 0 is slow slew rate, 1 is fast slew rate
+ enum: [ 0, 1 ]
+ gpio-hog: true
+ gpios: true
+ output-high: true
+ output-low: true
+ line-name: true
+
+ - type: object
+ properties:
+ phandle: true
+
+ additionalProperties:
+ $ref: "#/additionalProperties/anyOf/0"
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+ - interrupts
+ - clocks
+ - power-domains
+ - resets
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/rzv2m-pinctrl.h>
+ #include <dt-bindings/clock/r9a09g011-cpg.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ pinctrl: pinctrl@b6250000 {
+ compatible = "renesas,r9a09g011-pinctrl";
+ reg = <0xb6250000 0x800>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 0 352>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD R9A09G011_PFC_PCLK>;
+ resets = <&cpg R9A09G011_PFC_PRESETN>;
+ power-domains = <&cpg>;
+
+ i2c2_pins: i2c2 {
+ pinmux = <RZV2M_PORT_PINMUX(3, 8, 2)>, /* SDA */
+ <RZV2M_PORT_PINMUX(3, 9, 2)>; /* SCL */
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index 335ffc1353b5..d35dcc4f0242 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -59,6 +59,7 @@ properties:
patternProperties:
'^gpio@[0-9a-f]*$':
type: object
+ additionalProperties: false
properties:
gpio-controller: true
'#gpio-cells':
@@ -68,8 +69,7 @@ patternProperties:
maxItems: 1
clocks:
maxItems: 1
- reset:
- minItems: 1
+ resets:
maxItems: 1
gpio-ranges:
minItems: 1
diff --git a/Documentation/devicetree/bindings/pinctrl/sunplus,sp7021-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/sunplus,sp7021-pinctrl.yaml
index d8e75b3e64f1..15092fdd4b5b 100644
--- a/Documentation/devicetree/bindings/pinctrl/sunplus,sp7021-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/sunplus,sp7021-pinctrl.yaml
@@ -288,11 +288,14 @@ required:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
examples:
- |
#include <dt-bindings/pinctrl/sppctl-sp7021.h>
- pinctl@9c000100 {
+ pinctrl@9c000100 {
compatible = "sunplus,sp7021-pctl";
reg = <0x9c000100 0x100>, <0x9c000300 0x100>,
<0x9c0032e4 0x1c>, <0x9c000080 0x20>;
diff --git a/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml
index 2722dc7bb03d..1e2b9b627b12 100644
--- a/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml
@@ -274,6 +274,10 @@ patternProperties:
slew-rate:
enum: [0, 1]
+ output-enable:
+ description:
+ This will internally disable the tri-state for MIO pins.
+
drive-strength:
description:
Selects the drive strength for MIO pins, in mA.
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
index f005abac7079..5390e988a934 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
@@ -2,8 +2,8 @@
# Copyright 2019 BayLibre, SAS
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/amlogic,meson-ee-pwrc.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/amlogic,meson-ee-pwrc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Amlogic Meson Everything-Else Power Domains
@@ -17,7 +17,7 @@ description: |+
- compatible: Should be the following:
"amlogic,meson-gx-hhi-sysctrl", "simple-mfd", "syscon"
- Refer to the the bindings described in
+ Refer to the bindings described in
Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml
index 86e5f6513bb3..eab21bb2050a 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml
@@ -3,8 +3,8 @@
# Author: Jianxin Pan <jianxin.pan@amlogic.com>
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/amlogic,meson-sec-pwrc.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/amlogic,meson-sec-pwrc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Amlogic Meson Secure Power Domains
diff --git a/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml b/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml
index 19a194980142..94d369eb85de 100644
--- a/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml
+++ b/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml
@@ -10,7 +10,7 @@ maintainers:
- Hector Martin <marcan@marcan.st>
allOf:
- - $ref: "power-domain.yaml#"
+ - $ref: power-domain.yaml#
description: |
Apple SoCs include PMGR blocks responsible for power management,
diff --git a/Documentation/devicetree/bindings/power/brcm,bcm63xx-power.yaml b/Documentation/devicetree/bindings/power/brcm,bcm63xx-power.yaml
index 63b15ac6dde4..d867bd6976d8 100644
--- a/Documentation/devicetree/bindings/power/brcm,bcm63xx-power.yaml
+++ b/Documentation/devicetree/bindings/power/brcm,bcm63xx-power.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/brcm,bcm63xx-power.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/brcm,bcm63xx-power.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: BCM63xx power domain driver
diff --git a/Documentation/devicetree/bindings/power/renesas,apmu.yaml b/Documentation/devicetree/bindings/power/renesas,apmu.yaml
index d77fc88050c8..f2cc89e7f4e4 100644
--- a/Documentation/devicetree/bindings/power/renesas,apmu.yaml
+++ b/Documentation/devicetree/bindings/power/renesas,apmu.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/renesas,apmu.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/renesas,apmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Renesas Advanced Power Management Unit
diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
index 8d56bedd3390..0720b54881c2 100644
--- a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
+++ b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/renesas,rcar-sysc.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/renesas,rcar-sysc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Renesas R-Car and RZ/G System Controller
diff --git a/Documentation/devicetree/bindings/power/reset/msm-poweroff.txt b/Documentation/devicetree/bindings/power/reset/msm-poweroff.txt
deleted file mode 100644
index ce44ad357565..000000000000
--- a/Documentation/devicetree/bindings/power/reset/msm-poweroff.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-MSM Restart Driver
-
-A power supply hold (ps-hold) bit is set to power the msm chipsets.
-Clearing that bit allows us to restart/poweroff. The difference
-between poweroff and restart is determined by unique power manager IC
-settings.
-
-Required Properties:
--compatible: "qcom,pshold"
--reg: Specifies the physical address of the ps-hold register
-
-Example:
-
- restart@fc4ab000 {
- compatible = "qcom,pshold";
- reg = <0xfc4ab000 0x4>;
- };
diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
index 353f155df0f4..e7b436d2e757 100644
--- a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
+++ b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
@@ -30,11 +30,15 @@ properties:
pwrkey:
type: object
- $ref: "../../input/qcom,pm8941-pwrkey.yaml#"
+ $ref: /schemas/input/qcom,pm8941-pwrkey.yaml#
resin:
type: object
- $ref: "../../input/qcom,pm8941-pwrkey.yaml#"
+ $ref: /schemas/input/qcom,pm8941-pwrkey.yaml#
+
+ watchdog:
+ type: object
+ $ref: /schemas/watchdog/qcom,pm8916-wdt.yaml
required:
- compatible
diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pshold.yaml b/Documentation/devicetree/bindings/power/reset/qcom,pshold.yaml
new file mode 100644
index 000000000000..527962d54a8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/qcom,pshold.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/qcom,pshold.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SoC restart and power off
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+ A power supply hold (ps-hold) bit is set to power the Qualcomm chipsets.
+ Clearing that bit allows us to restart/power off. The difference between
+ power off and restart is determined by unique power manager IC settings.
+
+properties:
+ compatible:
+ const: qcom,pshold
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ reset-controller@fc4ab000 {
+ compatible = "qcom,pshold";
+ reg = <0xfc4ab000 0x4>;
+ };
diff --git a/Documentation/devicetree/bindings/power/reset/regulator-poweroff.yaml b/Documentation/devicetree/bindings/power/reset/regulator-poweroff.yaml
index 03bd1fa5a623..e9417557cd30 100644
--- a/Documentation/devicetree/bindings/power/reset/regulator-poweroff.yaml
+++ b/Documentation/devicetree/bindings/power/reset/regulator-poweroff.yaml
@@ -16,7 +16,7 @@ description: |
properties:
compatible:
- const: "regulator-poweroff"
+ const: regulator-poweroff
cpu-supply:
description:
diff --git a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml
index 68d7c14a7163..46de35861738 100644
--- a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml
+++ b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml
@@ -15,7 +15,7 @@ description: |
properties:
compatible:
- const: "xlnx,zynqmp-power"
+ const: xlnx,zynqmp-power
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/power/supply/active-semi,act8945a-charger.yaml b/Documentation/devicetree/bindings/power/supply/active-semi,act8945a-charger.yaml
index 3f74bc19415d..5220d9cb16d8 100644
--- a/Documentation/devicetree/bindings/power/supply/active-semi,act8945a-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/active-semi,act8945a-charger.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/active-semi,act8945a-charger.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/active-semi,act8945a-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Active-semi ACT8945A Charger Function
diff --git a/Documentation/devicetree/bindings/power/supply/bq2415x.yaml b/Documentation/devicetree/bindings/power/supply/bq2415x.yaml
index 118cf484cc69..a3c00e078918 100644
--- a/Documentation/devicetree/bindings/power/supply/bq2415x.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq2415x.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/bq2415x.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/bq2415x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Binding for TI bq2415x Li-Ion Charger
diff --git a/Documentation/devicetree/bindings/power/supply/bq24190.yaml b/Documentation/devicetree/bindings/power/supply/bq24190.yaml
index 0d7cbbdf808b..4884ec90e2b8 100644
--- a/Documentation/devicetree/bindings/power/supply/bq24190.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq24190.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/bq24190.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/bq24190.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Binding for TI BQ2419x Li-Ion Battery Charger
@@ -28,7 +28,7 @@ properties:
maxItems: 1
usb-otg-vbus:
- type: object
+ $ref: /schemas/regulator/regulator.yaml#
description: |
Regulator that is used to control the VBUS voltage direction for
either USB host mode or for charging on the OTG port
diff --git a/Documentation/devicetree/bindings/power/supply/bq24257.yaml b/Documentation/devicetree/bindings/power/supply/bq24257.yaml
index 3a0f6cd9015a..c7406bef0fa8 100644
--- a/Documentation/devicetree/bindings/power/supply/bq24257.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq24257.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/bq24257.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/bq24257.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Binding for bq24250, bq24251 and bq24257 Li-Ion Charger
diff --git a/Documentation/devicetree/bindings/power/supply/bq24735.yaml b/Documentation/devicetree/bindings/power/supply/bq24735.yaml
index 131be6782c4b..dd9176ce71b3 100644
--- a/Documentation/devicetree/bindings/power/supply/bq24735.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq24735.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/bq24735.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/bq24735.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Binding for TI BQ24735 Li-Ion Battery Charger
diff --git a/Documentation/devicetree/bindings/power/supply/bq2515x.yaml b/Documentation/devicetree/bindings/power/supply/bq2515x.yaml
index 813d6afde606..1a1b240034ef 100644
--- a/Documentation/devicetree/bindings/power/supply/bq2515x.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq2515x.yaml
@@ -2,14 +2,13 @@
# Copyright (C) 2020 Texas Instruments Incorporated
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/bq2515x.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/bq2515x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI bq2515x 500-mA Linear charger family
maintainers:
- - Dan Murphy <dmurphy@ti.com>
- - Ricardo Rivera-Matos <r-rivera-matos@ti.com>
+ - Andrew Davis <afd@ti.com>
description: |
The BQ2515x family is a highly integrated battery charge management IC that
diff --git a/Documentation/devicetree/bindings/power/supply/bq256xx.yaml b/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
index 92ec7ed25668..82f382a7ffb3 100644
--- a/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
@@ -2,13 +2,13 @@
# Copyright (C) 2020 Texas Instruments Incorporated
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/bq256xx.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/bq256xx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI bq256xx Switch Mode Buck Charger
maintainers:
- - Ricardo Rivera-Matos <r-rivera-matos@ti.com>
+ - Andrew Davis <afd@ti.com>
description: |
The bq256xx devices are a family of highly-integrated battery charge
diff --git a/Documentation/devicetree/bindings/power/supply/bq25890.yaml b/Documentation/devicetree/bindings/power/supply/bq25890.yaml
index bf823b615439..204c0147188f 100644
--- a/Documentation/devicetree/bindings/power/supply/bq25890.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq25890.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/bq25890.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/bq25890.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Binding for bq25890, bq25892, bq25895 and bq25896 Li-Ion Charger
diff --git a/Documentation/devicetree/bindings/power/supply/bq25980.yaml b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
index 8367a1fd4057..b687b8bcd705 100644
--- a/Documentation/devicetree/bindings/power/supply/bq25980.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
@@ -2,14 +2,13 @@
# Copyright (C) 2020 Texas Instruments Incorporated
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/bq25980.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/bq25980.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI BQ25980 Flash Charger
maintainers:
- - Dan Murphy <dmurphy@ti.com>
- - Ricardo Rivera-Matos <r-rivera-matos@ti.com>
+ - Andrew Davis <afd@ti.com>
description: |
The BQ25980, BQ25975, and BQ25960 are a series of flash chargers intended
diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml b/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml
index 6af41da3e055..65fc6049efc1 100644
--- a/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2020 Texas Instruments Incorporated
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/bq27xxx.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/bq27xxx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI BQ27XXX fuel gauge family
diff --git a/Documentation/devicetree/bindings/power/supply/charger-manager.yaml b/Documentation/devicetree/bindings/power/supply/charger-manager.yaml
index fbb2204769aa..5af1e0beaf29 100644
--- a/Documentation/devicetree/bindings/power/supply/charger-manager.yaml
+++ b/Documentation/devicetree/bindings/power/supply/charger-manager.yaml
@@ -50,6 +50,7 @@ properties:
cm-battery-stat:
description: battery status
+ $ref: /schemas/types.yaml#/definitions/uint32
enum:
- 0 # battery always present
- 1 # no battery
diff --git a/Documentation/devicetree/bindings/power/supply/cpcap-battery.yaml b/Documentation/devicetree/bindings/power/supply/cpcap-battery.yaml
index 7153fd4ce55f..694bfdb5815c 100644
--- a/Documentation/devicetree/bindings/power/supply/cpcap-battery.yaml
+++ b/Documentation/devicetree/bindings/power/supply/cpcap-battery.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/cpcap-battery.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/cpcap-battery.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Motorola CPCAP PMIC battery
diff --git a/Documentation/devicetree/bindings/power/supply/cpcap-charger.yaml b/Documentation/devicetree/bindings/power/supply/cpcap-charger.yaml
index cb6353683d7b..7e6bf30a0107 100644
--- a/Documentation/devicetree/bindings/power/supply/cpcap-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/cpcap-charger.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/cpcap-charger.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/cpcap-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Motorola CPCAP PMIC charger
diff --git a/Documentation/devicetree/bindings/power/supply/dlg,da9150-charger.yaml b/Documentation/devicetree/bindings/power/supply/dlg,da9150-charger.yaml
index 96336b05d76d..b289388952bf 100644
--- a/Documentation/devicetree/bindings/power/supply/dlg,da9150-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/dlg,da9150-charger.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/dlg,da9150-charger.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/dlg,da9150-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Dialog Semiconductor DA9150 Charger Power Supply bindings
diff --git a/Documentation/devicetree/bindings/power/supply/dlg,da9150-fuel-gauge.yaml b/Documentation/devicetree/bindings/power/supply/dlg,da9150-fuel-gauge.yaml
index 30c2fff7cf92..d47caf59d204 100644
--- a/Documentation/devicetree/bindings/power/supply/dlg,da9150-fuel-gauge.yaml
+++ b/Documentation/devicetree/bindings/power/supply/dlg,da9150-fuel-gauge.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/dlg,da9150-fuel-gauge.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/dlg,da9150-fuel-gauge.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Dialog Semiconductor DA9150 Fuel-Gauge Power Supply bindings
diff --git a/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml b/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml
index 76c227a7cd5c..46527038bf22 100644
--- a/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml
+++ b/Documentation/devicetree/bindings/power/supply/ingenic,battery.yaml
@@ -2,8 +2,8 @@
# Copyright 2019-2020 Artur Rojek
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/ingenic,battery.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/ingenic,battery.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ingenic JZ47xx battery bindings
diff --git a/Documentation/devicetree/bindings/power/supply/isp1704.yaml b/Documentation/devicetree/bindings/power/supply/isp1704.yaml
index 4c91da70011d..7e3449ed70d4 100644
--- a/Documentation/devicetree/bindings/power/supply/isp1704.yaml
+++ b/Documentation/devicetree/bindings/power/supply/isp1704.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/isp1704.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/isp1704.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Binding for NXP ISP1704 USB Charger Detection
diff --git a/Documentation/devicetree/bindings/power/supply/lego,ev3-battery.yaml b/Documentation/devicetree/bindings/power/supply/lego,ev3-battery.yaml
index 518eabb63588..a99d989f1450 100644
--- a/Documentation/devicetree/bindings/power/supply/lego,ev3-battery.yaml
+++ b/Documentation/devicetree/bindings/power/supply/lego,ev3-battery.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/lego,ev3-battery.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/lego,ev3-battery.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: LEGO MINDSTORMS EV3 Battery
diff --git a/Documentation/devicetree/bindings/power/supply/lltc,lt3651-charger.yaml b/Documentation/devicetree/bindings/power/supply/lltc,lt3651-charger.yaml
index e2d8d2aebb73..76cedf95a12c 100644
--- a/Documentation/devicetree/bindings/power/supply/lltc,lt3651-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/lltc,lt3651-charger.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/lltc,lt3651-charger.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/lltc,lt3651-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices LT3651 Charger Power Supply bindings
diff --git a/Documentation/devicetree/bindings/power/supply/lltc,ltc294x.yaml b/Documentation/devicetree/bindings/power/supply/lltc,ltc294x.yaml
index 043bf378040f..109b41a0d56c 100644
--- a/Documentation/devicetree/bindings/power/supply/lltc,ltc294x.yaml
+++ b/Documentation/devicetree/bindings/power/supply/lltc,ltc294x.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/lltc,ltc294x.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/lltc,ltc294x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Binding for LTC2941, LTC2942, LTC2943 and LTC2944 battery fuel gauges
diff --git a/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml b/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
index 6d7aa97a6475..cfffaeef8b09 100644
--- a/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
+++ b/Documentation/devicetree/bindings/power/supply/ltc4162-l.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2020 Topic Embedded Products
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/ltc4162-l.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/ltc4162-l.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Linear Technology (Analog Devices) LTC4162-L Charger
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,ds2760.yaml b/Documentation/devicetree/bindings/power/supply/maxim,ds2760.yaml
index 818647edf63d..c838efcf7e16 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,ds2760.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,ds2760.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/maxim,ds2760.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/maxim,ds2760.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim DS2760 DT bindings
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max14656.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max14656.yaml
index 0a41078ebd99..070ef6f96e60 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max14656.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max14656.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/maxim,max14656.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/maxim,max14656.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX14656 DT bindings
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml
index 6b4588a3253b..3a529326ecbd 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/maxim,max17040.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/maxim,max17040.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim 17040 fuel gauge series
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml
index 971b53c58cc6..aff5d0792e0f 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/maxim,max17042.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/maxim,max17042.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim 17042 fuel gauge series
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max8903.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max8903.yaml
index 4828ca0842ae..a8d625f285f1 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max8903.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max8903.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/maxim,max8903.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/maxim,max8903.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim Semiconductor MAX8903 Battery Charger
diff --git a/Documentation/devicetree/bindings/power/supply/nokia,n900-battery.yaml b/Documentation/devicetree/bindings/power/supply/nokia,n900-battery.yaml
index 4a1489f2b28d..5178e6207271 100644
--- a/Documentation/devicetree/bindings/power/supply/nokia,n900-battery.yaml
+++ b/Documentation/devicetree/bindings/power/supply/nokia,n900-battery.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/nokia,n900-battery.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/nokia,n900-battery.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Nokia N900 battery
diff --git a/Documentation/devicetree/bindings/power/supply/olpc-battery.yaml b/Documentation/devicetree/bindings/power/supply/olpc-battery.yaml
index 0bd7bf3b8e1b..dd89e2532a07 100644
--- a/Documentation/devicetree/bindings/power/supply/olpc-battery.yaml
+++ b/Documentation/devicetree/bindings/power/supply/olpc-battery.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/olpc-battery.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/olpc-battery.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: OLPC Battery
diff --git a/Documentation/devicetree/bindings/power/supply/power-supply.yaml b/Documentation/devicetree/bindings/power/supply/power-supply.yaml
index 9a490fbd32e1..2f672e6e8d72 100644
--- a/Documentation/devicetree/bindings/power/supply/power-supply.yaml
+++ b/Documentation/devicetree/bindings/power/supply/power-supply.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/power-supply.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/power-supply.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Power Supply Core Support
diff --git a/Documentation/devicetree/bindings/power/supply/qcom,pm8941-charger.yaml b/Documentation/devicetree/bindings/power/supply/qcom,pm8941-charger.yaml
index caeff68c66d5..cbac55d3cb92 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom,pm8941-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/qcom,pm8941-charger.yaml
@@ -117,11 +117,18 @@ properties:
be done externally to fully comply with the JEITA safety guidelines if this flag
is set.
+ usb-charge-current-limit:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 100000
+ maximum: 2500000
+ description: |
+ Default USB charge current limit in uA.
+
usb-otg-in-supply:
description: Reference to the regulator supplying power to the USB_OTG_IN pin.
otg-vbus:
- type: object
+ $ref: /schemas/regulator/regulator.yaml#
description: |
This node defines a regulator used to control the direction of VBUS voltage.
Specifically whether to supply voltage to VBUS for host mode operation of the OTG port,
diff --git a/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml b/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml
index ae647d3355a2..756c16d1727d 100644
--- a/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml
+++ b/Documentation/devicetree/bindings/power/supply/richtek,rt5033-battery.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/richtek,rt5033-battery.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/richtek,rt5033-battery.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Richtek RT5033 PMIC Fuel Gauge
diff --git a/Documentation/devicetree/bindings/power/supply/richtek,rt9455.yaml b/Documentation/devicetree/bindings/power/supply/richtek,rt9455.yaml
index e1c233462f29..bce15101318e 100644
--- a/Documentation/devicetree/bindings/power/supply/richtek,rt9455.yaml
+++ b/Documentation/devicetree/bindings/power/supply/richtek,rt9455.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/richtek,rt9455.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/richtek,rt9455.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Binding for Richtek rt9455 battery charger
diff --git a/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml b/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml
index b62c2431f94e..eeb043f9bb4f 100644
--- a/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/sc2731-charger.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/sc2731-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Spreadtrum SC2731 PMICs battery charger binding
diff --git a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml
index e019cffd1f38..d90a838a1744 100644
--- a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml
+++ b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/sc27xx-fg.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/sc27xx-fg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Spreadtrum SC27XX PMICs Fuel Gauge Unit Power Supply Bindings
diff --git a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-btemp.yaml b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-btemp.yaml
index 4b8a00cec39c..525abdfb3e2d 100644
--- a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-btemp.yaml
+++ b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-btemp.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/stericsson,ab8500-btemp.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/stericsson,ab8500-btemp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: AB8500 Battery Temperature Monitor
diff --git a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-chargalg.yaml b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-chargalg.yaml
index 6799224f7fb4..10bbdcfc87b6 100644
--- a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-chargalg.yaml
+++ b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-chargalg.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/stericsson,ab8500-chargalg.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/stericsson,ab8500-chargalg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: AB8500 Charging Algorithm
diff --git a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-charger.yaml b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-charger.yaml
index 9518eb7289d0..e33329b3af61 100644
--- a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-charger.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/stericsson,ab8500-charger.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/stericsson,ab8500-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: AB8500 Charger
diff --git a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-fg.yaml b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-fg.yaml
index 2ce408a7c0ae..6a724ca90e99 100644
--- a/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-fg.yaml
+++ b/Documentation/devicetree/bindings/power/supply/stericsson,ab8500-fg.yaml
@@ -2,8 +2,8 @@
# Copyright (C) 2021 Sebastian Reichel
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/stericsson,ab8500-fg.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/stericsson,ab8500-fg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: AB8500 Fuel Gauge
diff --git a/Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml b/Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml
index 20862cdfc116..2d552becbfe6 100644
--- a/Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/summit,smb347-charger.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/summit,smb347-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Battery charger driver for SMB345, SMB347 and SMB358
@@ -82,7 +82,7 @@ properties:
- 1 # SMB3XX_SYSOK_INOK_ACTIVE_HIGH
usb-vbus:
- $ref: "../../regulator/regulator.yaml#"
+ $ref: /schemas/regulator/regulator.yaml#
type: object
properties:
diff --git a/Documentation/devicetree/bindings/power/supply/tps65090-charger.yaml b/Documentation/devicetree/bindings/power/supply/tps65090-charger.yaml
index f2dd38bf078c..586745426341 100644
--- a/Documentation/devicetree/bindings/power/supply/tps65090-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/tps65090-charger.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/tps65090-charger.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/tps65090-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TPS65090 Frontend PMU with Switchmode Charger
diff --git a/Documentation/devicetree/bindings/power/supply/tps65217-charger.yaml b/Documentation/devicetree/bindings/power/supply/tps65217-charger.yaml
index 2c2fe883bb48..7ccf0cdffd3e 100644
--- a/Documentation/devicetree/bindings/power/supply/tps65217-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/tps65217-charger.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/tps65217-charger.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/tps65217-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TPS65217 Charger
diff --git a/Documentation/devicetree/bindings/power/supply/twl4030-charger.yaml b/Documentation/devicetree/bindings/power/supply/twl4030-charger.yaml
index fe3f32a0ea79..d8d3154f9cb1 100644
--- a/Documentation/devicetree/bindings/power/supply/twl4030-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/twl4030-charger.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/twl4030-charger.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/twl4030-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TWL4030 BCI (Battery Charger Interface)
diff --git a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-ac-power-supply.yaml b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-ac-power-supply.yaml
index de6a23aee977..5c8369fd3ef7 100644
--- a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-ac-power-supply.yaml
+++ b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-ac-power-supply.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/x-powers,axp20x-ac-power-supply.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/x-powers,axp20x-ac-power-supply.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: AXP20x AC power-supply
diff --git a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
index d055428ae39f..e0b95ecbbebd 100644
--- a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
+++ b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/x-powers,axp20x-battery-power-supply.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/x-powers,axp20x-battery-power-supply.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: AXP20x Battery power-supply
diff --git a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
index 0c371b55c9e1..3ce648dd91bd 100644
--- a/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
+++ b/Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/power/supply/x-powers,axp20x-usb-power-supply.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$id: http://devicetree.org/schemas/power/supply/x-powers,axp20x-usb-power-supply.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: AXP20x USB power-supply
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpus.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpus.txt
index d63ab1dec16d..801c66069121 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpus.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/cpus.txt
@@ -5,7 +5,7 @@ Copyright 2013 Freescale Semiconductor Inc.
Power Architecture CPUs in Freescale SOCs are represented in device trees as
per the definition in the Devicetree Specification.
-In addition to the the Devicetree Specification definitions, the properties
+In addition to the Devicetree Specification definitions, the properties
defined below may be present on CPU nodes.
PROPERTIES
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
index d096cf461d81..4571c857dbe5 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
@@ -172,7 +172,7 @@ Interrupt controller (fsl,mpc5200-pic) node
The mpc5200 pic binding splits hardware IRQ numbers into two levels. The
split reflects the layout of the PIC hardware itself, which groups
interrupts into one of three groups; CRIT, MAIN or PERP. Also, the
-Bestcomm dma engine has it's own set of interrupt sources which are
+Bestcomm dma engine has its own set of interrupt sources which are
cascaded off of peripheral interrupt 0, which the driver interprets as a
fourth group, SDMA.
diff --git a/Documentation/devicetree/bindings/powerpc/opal/power-mgt.txt b/Documentation/devicetree/bindings/powerpc/opal/power-mgt.txt
index 9d619e955576..d6658d3dd15e 100644
--- a/Documentation/devicetree/bindings/powerpc/opal/power-mgt.txt
+++ b/Documentation/devicetree/bindings/powerpc/opal/power-mgt.txt
@@ -39,7 +39,7 @@ otherwise. The length of all the property arrays must be the same.
- ibm,cpu-idle-state-flags:
Array of unsigned 32-bit values containing the values of the
- flags associated with the the aforementioned idle-states. The
+ flags associated with the aforementioned idle-states. The
flag bits are as follows:
0x00000001 /* Decrementer would stop */
0x00000002 /* Needs timebase restore */
diff --git a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
index b539781e39aa..835b53302db8 100644
--- a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
@@ -47,12 +47,6 @@ properties:
description:
Properties for single LDO regulator.
- properties:
- regulator-name:
- pattern: "^LDO[1-5]$"
- description:
- should be "LDO1", ..., "LDO5"
-
unevaluatedProperties: false
"^BUCK[1-6]$":
@@ -62,11 +56,6 @@ properties:
Properties for single BUCK regulator.
properties:
- regulator-name:
- pattern: "^BUCK[1-6]$"
- description:
- should be "BUCK1", ..., "BUCK6"
-
nxp,dvs-run-voltage:
$ref: "/schemas/types.yaml#/definitions/uint32"
minimum: 600000
diff --git a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
index eec3b9c4c713..7e091eaffc18 100644
--- a/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
@@ -18,6 +18,7 @@ properties:
enum:
- mediatek,mt8183-scp
- mediatek,mt8186-scp
+ - mediatek,mt8188-scp
- mediatek,mt8192-scp
- mediatek,mt8195-scp
@@ -80,6 +81,7 @@ allOf:
enum:
- mediatek,mt8183-scp
- mediatek,mt8186-scp
+ - mediatek,mt8188-scp
then:
properties:
reg:
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
index 947f94548d0e..3072af5f9d79 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
@@ -67,13 +67,28 @@ properties:
minItems: 1
maxItems: 8
+ interconnects:
+ maxItems: 1
+
interrupts:
minItems: 5
- maxItems: 6
+ items:
+ - description: Watchdog interrupt
+ - description: Fatal interrupt
+ - description: Ready interrupt
+ - description: Handover interrupt
+ - description: Stop acknowledge interrupt
+ - description: Shutdown acknowledge interrupt
interrupt-names:
minItems: 5
- maxItems: 6
+ items:
+ - const: wdog
+ - const: fatal
+ - const: ready
+ - const: handover
+ - const: stop-ack
+ - const: shutdown-ack
resets:
minItems: 1
@@ -116,7 +131,6 @@ properties:
- description: Stop the modem
qcom,smem-state-names:
- $ref: /schemas/types.yaml#/definitions/string-array
description: The names of the state bits used for SMP2P output
items:
- const: stop
@@ -134,13 +148,13 @@ properties:
three offsets within syscon for q6, modem and nc halt registers.
smd-edge:
- type: object
+ $ref: /schemas/remoteproc/qcom,smd-edge.yaml#
description:
Qualcomm Shared Memory subnode which represents communication edge,
channels and devices related to the ADSP.
glink-edge:
- type: object
+ $ref: /schemas/remoteproc/qcom,glink-edge.yaml#
description:
Qualcomm G-Link subnode which represents communication edge, channels
and devices related to the ADSP.
@@ -315,19 +329,9 @@ allOf:
then:
properties:
interrupts:
- items:
- - description: Watchdog interrupt
- - description: Fatal interrupt
- - description: Ready interrupt
- - description: Handover interrupt
- - description: Stop acknowledge interrupt
+ maxItems: 5
interrupt-names:
- items:
- - const: wdog
- - const: fatal
- - const: ready
- - const: handover
- - const: stop-ack
+ maxItems: 5
- if:
properties:
@@ -345,21 +349,9 @@ allOf:
then:
properties:
interrupts:
- items:
- - description: Watchdog interrupt
- - description: Fatal interrupt
- - description: Ready interrupt
- - description: Handover interrupt
- - description: Stop acknowledge interrupt
- - description: Shutdown acknowledge interrupt
+ minItems: 6
interrupt-names:
- items:
- - const: wdog
- - const: fatal
- - const: ready
- - const: handover
- - const: stop-ack
- - const: shutdown-ack
+ minItems: 6
- if:
properties:
@@ -379,6 +371,8 @@ allOf:
- qcom,msm8226-adsp-pil
- qcom,msm8996-adsp-pil
- qcom,msm8998-adsp-pas
+ - qcom,sm8150-adsp-pas
+ - qcom,sm8150-cdsp-pas
then:
properties:
power-domains:
@@ -447,19 +441,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,sm8150-adsp-pas
- - qcom,sm8150-cdsp-pas
- then:
- properties:
- power-domains:
- items:
- - description: CX power domain
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,sc7280-mpss-pas
- qcom,sdx55-mpss-pas
- qcom,sm6350-mpss-pas
@@ -594,11 +575,12 @@ allOf:
examples:
- |
#include <dt-bindings/clock/qcom,rpmcc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
adsp {
compatible = "qcom,msm8974-adsp-pil";
- interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -620,7 +602,7 @@ examples:
qcom,smem-state-names = "stop";
smd-edge {
- interrupts = <0 156 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
qcom,ipc = <&apcs 8 8>;
qcom,smd-edge = <1>;
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,glink-edge.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,glink-edge.yaml
new file mode 100644
index 000000000000..fa69f7b21eed
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,glink-edge.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,glink-edge.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm G-Link Edge communication channel nodes
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+ Qualcomm G-Link subnode represents communication edge, channels and devices
+ related to the remote processor.
+
+properties:
+ $nodename:
+ const: "glink-edge"
+
+ apr:
+ $ref: /schemas/soc/qcom/qcom,apr.yaml#
+ description:
+ Qualcomm APR/GPR (Asynchronous/Generic Packet Router)
+
+ fastrpc:
+ type: object
+ description:
+ See Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
+
+ interrupts:
+ maxItems: 1
+
+ label:
+ description: The names of the state bits used for SMP2P output
+
+ mboxes:
+ maxItems: 1
+
+ qcom,remote-pid:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ ID of the shared memory used by GLINK for communication with remote
+ processor.
+
+required:
+ - interrupts
+ - label
+ - mboxes
+ - qcom,remote-pid
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/mailbox/qcom-ipcc.h>
+
+ remoteproc@8a00000 {
+ reg = <0x08a00000 0x10000>;
+ // ...
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_WPSS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_WPSS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "wpss";
+ qcom,remote-pid = <13>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
index b677900b3aae..d0ebd16ee0e1 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
@@ -14,8 +14,6 @@ on the Qualcomm Hexagon core.
"qcom,msm8974-mss-pil"
"qcom,msm8996-mss-pil"
"qcom,msm8998-mss-pil"
- "qcom,sc7180-mss-pil"
- "qcom,sc7280-mss-pil"
"qcom,sdm845-mss-pil"
- reg:
@@ -37,7 +35,7 @@ on the Qualcomm Hexagon core.
- interrupt-names:
Usage: required
Value type: <stringlist>
- Definition: The interrupts needed depends on the the compatible
+ Definition: The interrupts needed depends on the compatible
string:
qcom,q6v5-pil:
qcom,ipq8074-wcss-pil:
@@ -47,8 +45,6 @@ on the Qualcomm Hexagon core.
must be "wdog", "fatal", "ready", "handover", "stop-ack"
qcom,msm8996-mss-pil:
qcom,msm8998-mss-pil:
- qcom,sc7180-mss-pil:
- qcom,sc7280-mss-pil:
qcom,sdm845-mss-pil:
must be "wdog", "fatal", "ready", "handover", "stop-ack",
"shutdown-ack"
@@ -86,11 +82,6 @@ on the Qualcomm Hexagon core.
qcom,msm8998-mss-pil:
must be "iface", "bus", "mem", "xo", "gpll0_mss",
"snoc_axi", "mnoc_axi", "qdss"
- qcom,sc7180-mss-pil:
- must be "iface", "bus", "xo", "snoc_axi", "mnoc_axi",
- "nav"
- qcom,sc7280-mss-pil:
- must be "iface", "xo", "snoc_axi", "offline", "pka"
qcom,sdm845-mss-pil:
must be "iface", "bus", "mem", "xo", "gpll0_mss",
"snoc_axi", "mnoc_axi", "prng"
@@ -102,7 +93,7 @@ on the Qualcomm Hexagon core.
reference to the list of 3 reset-controllers for the
wcss sub-system
reference to the list of 2 reset-controllers for the modem
- sub-system on SC7180, SC7280, SDM845 SoCs
+ sub-system on SDM845 SoCs
- reset-names:
Usage: required
@@ -111,7 +102,7 @@ on the Qualcomm Hexagon core.
must be "wcss_aon_reset", "wcss_reset", "wcss_q6_reset"
for the wcss sub-system
must be "mss_restart", "pdc_reset" for the modem
- sub-system on SC7180, SC7280, SDM845 SoCs
+ sub-system on SDM845 SoCs
For devices where the mba and mpss sub-nodes are not specified, mba/mpss region
should be referenced as follows:
@@ -176,10 +167,6 @@ For the compatible string below the following supplies are required:
qcom,msm8996-mss-pil:
qcom,msm8998-mss-pil:
must be "cx", "mx"
- qcom,sc7180-mss-pil:
- must be "cx", "mx", "mss"
- qcom,sc7280-mss-pil:
- must be "cx", "mss"
qcom,sdm845-mss-pil:
must be "cx", "mx", "mss"
@@ -205,36 +192,6 @@ For the compatible string below the following supplies are required:
Definition: a phandle reference to a syscon representing TCSR followed
by the three offsets within syscon for q6, modem and nc
halt registers.
- a phandle reference to a syscon representing TCSR followed
- by the four offsets within syscon for q6, modem, nc and vq6
- halt registers on SC7280 SoCs.
-
-For the compatible strings below the following phandle references are required:
- "qcom,sc7180-mss-pil"
-- qcom,spare-regs:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: a phandle reference to a syscon representing TCSR followed
- by the offset within syscon for conn_box_spare0 register
- used by the modem sub-system running on SC7180 SoC.
-
-For the compatible strings below the following phandle references are required:
- "qcom,sc7280-mss-pil"
-- qcom,ext-regs:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: two phandle references to syscons representing TCSR_REG and
- TCSR register space followed by the two offsets within the syscon
- to force_clk_en/rscc_disable and axim1_clk_off/crypto_clk_off
- registers respectively.
-
-- qcom,qaccept-regs:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: a phandle reference to a syscon representing TCSR followed
- by the three offsets within syscon for mdm, cx and axi
- qaccept registers used by the modem sub-system running on
- SC7280 SoC.
The Hexagon node must contain iommus property as described in ../iommu/iommu.txt
on platforms which do not have TrustZone.
@@ -257,29 +214,23 @@ related to the Hexagon. See ../soc/qcom/qcom,smd.yaml and
The following example describes the resources needed to boot control the
Hexagon, as it is found on MSM8974 boards.
- modem-rproc@fc880000 {
- compatible = "qcom,q6v5-pil";
- reg = <0xfc880000 0x100>,
- <0xfc820000 0x020>;
+ remoteproc@fc880000 {
+ compatible = "qcom,msm8974-mss-pil";
+ reg = <0xfc880000 0x100>, <0xfc820000 0x020>;
reg-names = "qdsp6", "rmb";
- interrupts-extended = <&intc 0 24 1>,
- <&modem_smp2p_in 0 0>,
- <&modem_smp2p_in 1 0>,
- <&modem_smp2p_in 2 0>,
- <&modem_smp2p_in 3 0>;
- interrupt-names = "wdog",
- "fatal",
- "ready",
- "handover",
- "stop-ack";
+ interrupts-extended = <&intc GIC_SPI 24 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
clocks = <&gcc GCC_MSS_Q6_BIMC_AXI_CLK>,
<&gcc GCC_MSS_CFG_AHB_CLK>,
- <&gcc GCC_BOOT_ROM_AHB_CLK>;
- clock-names = "iface", "bus", "mem";
-
- qcom,halt-regs = <&tcsr_mutex_block 0x1180 0x1200 0x1280>;
+ <&gcc GCC_BOOT_ROM_AHB_CLK>,
+ <&xo_board>;
+ clock-names = "iface", "bus", "mem", "xo";
resets = <&gcc GCC_MSS_RESTART>;
reset-names = "mss_restart";
@@ -289,6 +240,8 @@ Hexagon, as it is found on MSM8974 boards.
mx-supply = <&pm8841_s1>;
pll-supply = <&pm8941_l12>;
+ qcom,halt-regs = <&tcsr_mutex_block 0x1180 0x1200 0x1280>;
+
qcom,smem-states = <&modem_smp2p_out 0>;
qcom,smem-state-names = "stop";
@@ -299,4 +252,13 @@ Hexagon, as it is found on MSM8974 boards.
mpss {
memory-region = <&mpss_region>;
};
+
+ smd-edge {
+ interrupts = <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 8 12>;
+ qcom,smd-edge = <0>;
+
+ label = "modem";
+ };
};
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml
index 31413cfe10db..06f5f93f62a9 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-cdsp-pil.yaml
@@ -90,7 +90,6 @@ properties:
- description: Stop the modem
qcom,smem-state-names:
- $ref: /schemas/types.yaml#/definitions/string
description: The names of the state bits used for SMP2P output
items:
- const: stop
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-mss-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-mss-pil.yaml
new file mode 100644
index 000000000000..e76c861165dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-mss-pil.yaml
@@ -0,0 +1,245 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,sc7180-mss-pil.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SC7180 MSS Peripheral Image Loader
+
+maintainers:
+ - Sibi Sankar <quic_sibis@quicinc.com>
+
+description:
+ This document describes the hardware for a component that loads and boots firmware
+ on the Qualcomm Technology Inc. SC7180 Modem Hexagon Core.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sc7180-mss-pil
+
+ reg:
+ items:
+ - description: MSS QDSP6 registers
+ - description: RMB registers
+
+ reg-names:
+ items:
+ - const: qdsp6
+ - const: rmb
+
+ iommus:
+ items:
+ - description: MSA Stream 1
+ - description: MSA Stream 2
+
+ interrupts:
+ items:
+ - description: Watchdog interrupt
+ - description: Fatal interrupt
+ - description: Ready interrupt
+ - description: Handover interrupt
+ - description: Stop acknowledge interrupt
+ - description: Shutdown acknowledge interrupt
+
+ interrupt-names:
+ items:
+ - const: wdog
+ - const: fatal
+ - const: ready
+ - const: handover
+ - const: stop-ack
+ - const: shutdown-ack
+
+ clocks:
+ items:
+ - description: GCC MSS IFACE clock
+ - description: GCC MSS BUS clock
+ - description: GCC MSS NAV clock
+ - description: GCC MSS SNOC_AXI clock
+ - description: GCC MSS MFAB_AXIS clock
+ - description: RPMH XO clock
+
+ clock-names:
+ items:
+ - const: iface
+ - const: bus
+ - const: nav
+ - const: snoc_axi
+ - const: mnoc_axi
+ - const: xo
+
+ power-domains:
+ items:
+ - description: CX power domain
+ - description: MX power domain
+ - description: MSS power domain
+
+ power-domain-names:
+ items:
+ - const: cx
+ - const: mx
+ - const: mss
+
+ resets:
+ items:
+ - description: AOSS restart
+ - description: PDC reset
+
+ reset-names:
+ items:
+ - const: mss_restart
+ - const: pdc_reset
+
+ memory-region:
+ items:
+ - description: MBA reserved region
+ - description: modem reserved region
+
+ firmware-name:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ items:
+ - description: Name of MBA firmware
+ - description: Name of modem firmware
+
+ qcom,halt-regs:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Halt registers are used to halt transactions of various sub-components
+ within MSS.
+ items:
+ - items:
+ - description: phandle to TCSR_MUTEX registers
+ - description: offset to the Q6 halt register
+ - description: offset to the modem halt register
+ - description: offset to the nc halt register
+
+ qcom,spare-regs:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Spare registers are multipurpose registers used for errata
+ handling.
+ items:
+ - items:
+ - description: phandle to TCSR_MUTEX registers
+ - description: offset to the conn_box_spare0 register
+
+ qcom,qmp:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Reference to the AOSS side-channel message RAM.
+
+ qcom,smem-states:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: States used by the AP to signal the Hexagon core
+ items:
+ - description: Stop the modem
+
+ qcom,smem-state-names:
+ description: The names of the state bits used for SMP2P output
+ const: stop
+
+ glink-edge:
+ $ref: qcom,glink-edge.yaml#
+ description:
+ Qualcomm G-Link subnode which represents communication edge, channels
+ and devices related to the DSP.
+
+ properties:
+ interrupts:
+ items:
+ - description: IRQ from MSS to GLINK
+
+ mboxes:
+ items:
+ - description: Mailbox for communication between APPS and MSS
+
+ label:
+ const: modem
+
+ apr: false
+ fastrpc: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - iommus
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - power-domains
+ - power-domain-names
+ - resets
+ - reset-names
+ - qcom,halt-regs
+ - qcom,spare-regs
+ - memory-region
+ - qcom,qmp
+ - qcom,smem-states
+ - qcom,smem-state-names
+ - glink-edge
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sc7180.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+ #include <dt-bindings/reset/qcom,sdm845-aoss.h>
+ #include <dt-bindings/reset/qcom,sdm845-pdc.h>
+
+ remoteproc_mpss: remoteproc@4080000 {
+ compatible = "qcom,sc7180-mss-pil";
+ reg = <0x04080000 0x10000>, <0x04180000 0x48>;
+ reg-names = "qdsp6", "rmb";
+
+ iommus = <&apps_smmu 0x461 0x0>, <&apps_smmu 0x444 0x3>;
+
+ interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 3 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 7 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-names = "wdog", "fatal", "ready", "handover",
+ "stop-ack", "shutdown-ack";
+
+ clocks = <&gcc GCC_MSS_CFG_AHB_CLK>,
+ <&gcc GCC_MSS_Q6_MEMNOC_AXI_CLK>,
+ <&gcc GCC_MSS_NAV_AXI_CLK>,
+ <&gcc GCC_MSS_SNOC_AXI_CLK>,
+ <&gcc GCC_MSS_MFAB_AXIS_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "bus", "nav", "snoc_axi",
+ "mnoc_axi", "xo";
+
+ power-domains = <&rpmhpd SC7180_CX>,
+ <&rpmhpd SC7180_MX>,
+ <&rpmhpd SC7180_MSS>;
+ power-domain-names = "cx", "mx", "mss";
+
+ memory-region = <&mba_mem>, <&mpss_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&modem_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ resets = <&aoss_reset AOSS_CC_MSS_RESTART>,
+ <&pdc_reset PDC_MODEM_SYNC_RESET>;
+ reset-names = "mss_restart", "pdc_reset";
+
+ qcom,halt-regs = <&tcsr_mutex_regs 0x23000 0x25000 0x24000>;
+ qcom,spare-regs = <&tcsr_regs 0xb3e4>;
+
+ glink-edge {
+ interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&apss_shared 12>;
+ qcom,remote-pid = <1>;
+ label = "modem";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-mss-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-mss-pil.yaml
new file mode 100644
index 000000000000..da1a5de3d38b
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-mss-pil.yaml
@@ -0,0 +1,266 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,sc7280-mss-pil.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SC7280 MSS Peripheral Image Loader
+
+maintainers:
+ - Sibi Sankar <quic_sibis@quicinc.com>
+
+description:
+ This document describes the hardware for a component that loads and boots firmware
+ on the Qualcomm Technology Inc. SC7280 Modem Hexagon Core.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sc7280-mss-pil
+
+ reg:
+ items:
+ - description: MSS QDSP6 registers
+ - description: RMB registers
+
+ reg-names:
+ items:
+ - const: qdsp6
+ - const: rmb
+
+ iommus:
+ items:
+ - description: MSA Stream 1
+ - description: MSA Stream 2
+
+ interconnects:
+ items:
+ - description: Path leading to system memory
+
+ interrupts:
+ items:
+ - description: Watchdog interrupt
+ - description: Fatal interrupt
+ - description: Ready interrupt
+ - description: Handover interrupt
+ - description: Stop acknowledge interrupt
+ - description: Shutdown acknowledge interrupt
+
+ interrupt-names:
+ items:
+ - const: wdog
+ - const: fatal
+ - const: ready
+ - const: handover
+ - const: stop-ack
+ - const: shutdown-ack
+
+ clocks:
+ items:
+ - description: GCC MSS IFACE clock
+ - description: GCC MSS OFFLINE clock
+ - description: GCC MSS SNOC_AXI clock
+ - description: RPMH PKA clock
+ - description: RPMH XO clock
+
+ clock-names:
+ items:
+ - const: iface
+ - const: offline
+ - const: snoc_axi
+ - const: pka
+ - const: xo
+
+ power-domains:
+ items:
+ - description: CX power domain
+ - description: MSS power domain
+
+ power-domain-names:
+ items:
+ - const: cx
+ - const: mss
+
+ resets:
+ items:
+ - description: AOSS restart
+ - description: PDC reset
+
+ reset-names:
+ items:
+ - const: mss_restart
+ - const: pdc_reset
+
+ memory-region:
+ items:
+ - description: MBA reserved region
+ - description: modem reserved region
+
+ firmware-name:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ items:
+ - description: Name of MBA firmware
+ - description: Name of modem firmware
+
+ qcom,halt-regs:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Halt registers are used to halt transactions of various sub-components
+ within MSS.
+ items:
+ - items:
+ - description: phandle to TCSR_MUTEX registers
+ - description: offset to the Q6 halt register
+ - description: offset to the modem halt register
+ - description: offset to the nc halt register
+ - description: offset to the vq6 halt register
+
+ qcom,ext-regs:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: EXT registers are used for various power related functionality
+ items:
+ - items:
+ - description: phandle to TCSR_REG registers
+ - description: offset to the force_clk_en register
+ - description: offset to the rscc_disable register
+ - items:
+ - description: phandle to TCSR_MUTEX registers
+ - description: offset to the axim1_clk_off register
+ - description: offset to the crypto_clk_off register
+
+ qcom,qaccept-regs:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: QACCEPT registers are used to bring up/down Q-channels
+ items:
+ - items:
+ - description: phandle to TCSR_MUTEX registers
+ - description: offset to the mdm qaccept register
+ - description: offset to the cx qaccept register
+ - description: offset to the axi qaccept register
+
+ qcom,qmp:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Reference to the AOSS side-channel message RAM.
+
+ qcom,smem-states:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: States used by the AP to signal the Hexagon core
+ items:
+ - description: Stop the modem
+
+ qcom,smem-state-names:
+ description: The names of the state bits used for SMP2P output
+ const: stop
+
+ glink-edge:
+ $ref: qcom,glink-edge.yaml#
+ description:
+ Qualcomm G-Link subnode which represents communication edge, channels
+ and devices related to the DSP.
+
+ properties:
+ interrupts:
+ items:
+ - description: IRQ from MSS to GLINK
+
+ mboxes:
+ items:
+ - description: Mailbox for communication between APPS and MSS
+
+ label:
+ const: modem
+
+ apr: false
+ fastrpc: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - iommus
+ - interconnects
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - power-domains
+ - power-domain-names
+ - resets
+ - reset-names
+ - qcom,halt-regs
+ - qcom,ext-regs
+ - qcom,qaccept-regs
+ - memory-region
+ - qcom,qmp
+ - qcom,smem-states
+ - qcom,smem-state-names
+ - glink-edge
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sc7280.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/interconnect/qcom,sc7280.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/mailbox/qcom-ipcc.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+ #include <dt-bindings/reset/qcom,sdm845-aoss.h>
+ #include <dt-bindings/reset/qcom,sdm845-pdc.h>
+
+ remoteproc_mpss: remoteproc@4080000 {
+ compatible = "qcom,sc7280-mss-pil";
+ reg = <0x04080000 0x10000>, <0x04180000 0x48>;
+ reg-names = "qdsp6", "rmb";
+
+ iommus = <&apps_smmu 0x124 0x0>, <&apps_smmu 0x488 0x7>;
+
+ interconnects = <&mc_virt MASTER_LLCC 0 &mc_virt SLAVE_EBI1 0>;
+
+ interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 3 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 7 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-names = "wdog", "fatal", "ready", "handover",
+ "stop-ack", "shutdown-ack";
+
+ clocks = <&gcc GCC_MSS_CFG_AHB_CLK>,
+ <&gcc GCC_MSS_OFFLINE_AXI_CLK>,
+ <&gcc GCC_MSS_SNOC_AXI_CLK>,
+ <&rpmhcc RPMH_PKA_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "offline", "snoc_axi", "pka", "xo";
+
+ power-domains = <&rpmhpd SC7280_CX>,
+ <&rpmhpd SC7280_MSS>;
+ power-domain-names = "cx", "mss";
+
+ memory-region = <&mba_mem>, <&mpss_mem>;
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&modem_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ resets = <&aoss_reset AOSS_CC_MSS_RESTART>,
+ <&pdc_reset PDC_MODEM_SYNC_RESET>;
+ reset-names = "mss_restart", "pdc_reset";
+
+ qcom,halt-regs = <&tcsr_mutex 0x23000 0x25000 0x28000 0x33000>;
+ qcom,ext-regs = <&tcsr 0x10000 0x10004>, <&tcsr_mutex 0x26004 0x26008>;
+ qcom,qaccept-regs = <&tcsr_mutex 0x23030 0x23040 0x23020>;
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+ label = "modem";
+ qcom,remote-pid = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
index d99a729d2710..3f06d66cbe47 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
@@ -76,7 +76,7 @@ properties:
- const: pdc_sync
memory-region:
- $ref: /schemas/types.yaml#/definitions/phandle
+ maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
firmware-name:
@@ -102,13 +102,12 @@ properties:
- description: Stop the modem
qcom,smem-state-names:
- $ref: /schemas/types.yaml#/definitions/string
description: The names of the state bits used for SMP2P output
const: stop
glink-edge:
- type: object
- description: |
+ $ref: qcom,glink-edge.yaml#
+ description:
Qualcomm G-Link subnode which represents communication edge, channels
and devices related to the ADSP.
@@ -122,21 +121,11 @@ properties:
- description: Mailbox for communication between APPS and WPSS
label:
- description: The names of the state bits used for SMP2P output
items:
- const: wpss
- qcom,remote-pid:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: ID of the shared memory used by GLINK for communication with WPSS
-
- required:
- - interrupts
- - mboxes
- - label
- - qcom,remote-pid
-
- additionalProperties: false
+ apr: false
+ fastrpc: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml
index 1535bbbe25da..20df83a96ef3 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sdm845-adsp-pil.yaml
@@ -90,7 +90,6 @@ properties:
- description: Stop the modem
qcom,smem-state-names:
- $ref: /schemas/types.yaml#/definitions/string
description: The names of the state bits used for SMP2P output
items:
- const: stop
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,smd-edge.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,smd-edge.yaml
new file mode 100644
index 000000000000..06eebf791e32
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,smd-edge.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,smd-edge.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SMD Edge communication channel nodes
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+ Qualcomm SMD subnode represents a remote subsystem or a remote processor of
+ some sort - or in SMD language an "edge". The name of the edges are not
+ important.
+ See also Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
+
+properties:
+ $nodename:
+ const: "smd-edge"
+
+ interrupts:
+ maxItems: 1
+
+ label:
+ description:
+ Name of the edge, used for debugging and identification purposes. The
+ node name will be used if this is not present.
+
+ mboxes:
+ maxItems: 1
+ description:
+ Reference to the mailbox representing the outgoing doorbell in APCS for
+ this client.
+
+ qcom,ipc:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: phandle to a syscon node representing the APCS registers
+ - description: u32 representing offset to the register within the syscon
+ - description: u32 representing the ipc bit within the register
+ description:
+ Three entries specifying the outgoing ipc bit used for signaling the
+ remote processor.
+
+ qcom,smd-edge:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The identifier of the remote processor in the smd channel allocation
+ table.
+
+ qcom,remote-pid:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The identifier for the remote processor as known by the rest of the
+ system.
+
+required:
+ - interrupts
+ - qcom,smd-edge
+
+oneOf:
+ - required:
+ - mboxes
+ - required:
+ - qcom,ipc
+
+additionalProperties: true
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/mailbox/qcom-ipcc.h>
+
+ remoteproc {
+ // ...
+
+ smd-edge {
+ interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 8 8>;
+ qcom,smd-edge = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml
index d7c3a78e37e6..cd55d80137f7 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml
@@ -36,17 +36,18 @@ properties:
enum:
- ti,am3356-pru # for AM335x SoC family (AM3356+ SoCs only)
- ti,am4376-pru # for AM437x SoC family (AM4376+ SoCs only)
+ - ti,am5728-pru # for AM57xx SoC family
+ - ti,am625-pru # for PRUs in K3 AM62x SoC family
- ti,am642-pru # for PRUs in K3 AM64x SoC family
- ti,am642-rtu # for RTUs in K3 AM64x SoC family
- ti,am642-tx-pru # for Tx_PRUs in K3 AM64x SoC family
- - ti,am5728-pru # for AM57xx SoC family
- - ti,k2g-pru # for 66AK2G SoC family
- ti,am654-pru # for PRUs in K3 AM65x SoC family
- ti,am654-rtu # for RTUs in K3 AM65x SoC family
- ti,am654-tx-pru # for Tx_PRUs in K3 AM65x SR2.0 SoCs
- ti,j721e-pru # for PRUs in K3 J721E SoC family
- ti,j721e-rtu # for RTUs in K3 J721E SoC family
- ti,j721e-tx-pru # for Tx_PRUs in K3 J721E SoC family
+ - ti,k2g-pru # for 66AK2G SoC family
reg:
items:
diff --git a/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml b/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml
new file mode 100644
index 000000000000..98465d26949e
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/atmel,at91sam9260-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel/Microchip System Reset Controller
+
+maintainers:
+ - Claudiu Beznea <claudiu.beznea@microchip.com>
+
+description: |
+ The system reset controller can be used to reset the CPU. In case of
+ SAMA7G5 it can also reset some devices (e.g. USB PHYs).
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - atmel,at91sam9260-rstc
+ - atmel,at91sam9g45-rstc
+ - atmel,sama5d3-rstc
+ - microchip,sam9x60-rstc
+ - microchip,sama7g5-rstc
+ - items:
+ - const: atmel,sama5d3-rstc
+ - const: atmel,at91sam9g45-rstc
+
+ reg:
+ minItems: 1
+ items:
+ - description: base registers for system reset control
+ - description: registers for device specific reset control
+
+ clocks:
+ maxItems: 1
+
+ "#reset-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - microchip,sama7g5-rstc
+ then:
+ required:
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/at91.h>
+
+ reset-controller@fffffd00 {
+ compatible = "atmel,at91sam9260-rstc";
+ reg = <0xfffffd00 0x10>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_SLOW>;
+ };
diff --git a/Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml b/Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml
index 86c2569ced97..731b8ce01525 100644
--- a/Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml
+++ b/Documentation/devicetree/bindings/reset/renesas,rzg2l-usbphy-ctrl.yaml
@@ -17,6 +17,7 @@ properties:
compatible:
items:
- enum:
+ - renesas,r9a07g043-usbphy-ctrl # RZ/G2UL
- renesas,r9a07g044-usbphy-ctrl # RZ/G2{L,LC}
- renesas,r9a07g054-usbphy-ctrl # RZ/V2L
- const: renesas,rzg2l-usbphy-ctrl
diff --git a/Documentation/devicetree/bindings/reset/ti,tps380x-reset.yaml b/Documentation/devicetree/bindings/reset/ti,tps380x-reset.yaml
new file mode 100644
index 000000000000..afc835eda0ef
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/ti,tps380x-reset.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/ti,tps380x-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI TPS380x reset controller node bindings
+
+maintainers:
+ - Marco Felsch <kernel@pengutronix.de>
+
+description: |
+ The TPS380x family [1] of supervisory circuits monitor supply voltages to
+ provide circuit initialization and timing supervision. The devices assert a
+ RESET signal if the voltage drops below a preset threshold or upon a manual
+ reset input (MR). The RESET output remains asserted for the factory
+ programmed delay after the voltage return above its threshold or after the
+ manual reset input is released.
+
+ [1] https://www.ti.com/product/TPS3801
+
+properties:
+ compatible:
+ enum:
+ - ti,tps3801
+
+ reset-gpios:
+ maxItems: 1
+ description: Reference to the GPIO connected to the MR pin.
+
+ "#reset-cells":
+ const: 0
+
+required:
+ - compatible
+ - reset-gpios
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ reset: reset-controller {
+ compatible = "ti,tps3801";
+ #reset-cells = <0>;
+ reset-gpios = <&gpio3 2 GPIO_ACTIVE_LOW>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index d632ac76532e..873dd12f6e89 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -63,6 +63,11 @@ properties:
- riscv,sv48
- riscv,none
+ riscv,cbom-block-size:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The blocksize in bytes for the Zicbom cache operations.
+
riscv,isa:
description:
Identifies the specific RISC-V instruction set architecture
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
index e2d330bd4608..69cdab18d629 100644
--- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
+++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
@@ -46,7 +46,7 @@ properties:
const: 2
cache-sets:
- const: 1024
+ enum: [1024, 2048]
cache-size:
const: 2097152
@@ -84,6 +84,8 @@ then:
description: |
Must contain entries for DirError, DataError and DataFail signals.
maxItems: 3
+ cache-sets:
+ const: 1024
else:
properties:
@@ -91,6 +93,8 @@ else:
description: |
Must contain entries for DirError, DataError, DataFail, DirFail signals.
minItems: 4
+ cache-sets:
+ const: 2048
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml b/Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml
index 9593840a4a2b..60f9027e8299 100644
--- a/Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml
+++ b/Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml
@@ -32,6 +32,7 @@ properties:
- 11000
trickle-voltage-millivolt:
+ $ref: /schemas/types.yaml#/definitions/uint32
enum:
- 1750
- 3000
diff --git a/Documentation/devicetree/bindings/rtc/nuvoton,nct3018y.yaml b/Documentation/devicetree/bindings/rtc/nuvoton,nct3018y.yaml
new file mode 100644
index 000000000000..7a1857f5caa8
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/nuvoton,nct3018y.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/nuvoton,nct3018y.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NUVOTON NCT3018Y Real Time Clock
+
+allOf:
+ - $ref: "rtc.yaml#"
+
+maintainers:
+ - Medad CChien <ctcchien@nuvoton.com>
+ - Mia Lin <mimi05633@gmail.com>
+
+properties:
+ compatible:
+ const: nuvoton,nct3018y
+
+ reg:
+ maxItems: 1
+
+ start-year: true
+
+ reset-source: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@6f {
+ compatible = "nuvoton,nct3018y";
+ reg = <0x6f>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
deleted file mode 100644
index 217b7cd06c11..000000000000
--- a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-* NXP PCF85063 Real Time Clock
-
-Required properties:
-- compatible: Should one of contain:
- "nxp,pca85073a",
- "nxp,pcf85063",
- "nxp,pcf85063a",
- "nxp,pcf85063tp",
- "microcrystal,rv8263"
-- reg: I2C address for chip.
-
-Optional property:
-- quartz-load-femtofarads: The capacitive load of the quartz(x-tal),
- expressed in femto Farad (fF). Valid values are 7000 and 12500.
- Default value (if no value is specified) is 7000fF.
-
-Optional child node:
-- clock: Provide this if the square wave pin is used as boot-enabled fixed clock.
-
-Example:
-
-pcf85063: rtc@51 {
- compatible = "nxp,pcf85063";
- reg = <0x51>;
- quartz-load-femtofarads = <12500>;
-
- clock {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
-};
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.yaml
new file mode 100644
index 000000000000..2f892f8640d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/nxp,pcf85063.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PCF85063 Real Time Clock
+
+maintainers:
+ - Alexander Stein <alexander.stein@ew.tq-group.com>
+
+properties:
+ compatible:
+ enum:
+ - microcrystal,rv8263
+ - nxp,pcf85063
+ - nxp,pcf85063a
+ - nxp,pcf85063tp
+ - nxp,pca85073a
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 0
+
+ clock-output-names:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ quartz-load-femtofarads:
+ description:
+ The capacitive load of the quartz(x-tal).
+ enum: [7000, 12500]
+ default: 7000
+
+ clock:
+ $ref: /schemas/clock/fixed-clock.yaml
+ description:
+ Provide this if the square wave pin is used as boot-enabled
+ fixed clock.
+
+ wakeup-source: true
+
+allOf:
+ - $ref: rtc.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - microcrystal,rv8263
+ then:
+ properties:
+ quartz-load-femtofarads: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nxp,pcf85063
+ then:
+ properties:
+ quartz-load-femtofarads:
+ const: 7000
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@51 {
+ compatible = "nxp,pcf85063a";
+ reg = <0x51>;
+ quartz-load-femtofarads = <12500>;
+
+ clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
index 6fa7d9fc2dc7..23ab5bb4f395 100644
--- a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm PM8xxx PMIC RTC device
maintainers:
- - Satya Priya <skakit@codeaurora.org>
+ - Satya Priya <quic_c_skakit@quicinc.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt b/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt
deleted file mode 100644
index 36f610bb051e..000000000000
--- a/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-Dallas DS1307 and compatible RTC
-
-Required properties:
-- compatible: should be one of:
- "dallas,ds1307",
- "dallas,ds1308",
- "dallas,ds1337",
- "dallas,ds1338",
- "dallas,ds1339",
- "dallas,ds1388",
- "dallas,ds1340",
- "dallas,ds1341",
- "maxim,ds3231",
- "st,m41t0",
- "st,m41t00",
- "st,m41t11",
- "microchip,mcp7940x",
- "microchip,mcp7941x",
- "pericom,pt7c4338",
- "epson,rx8025",
- "isil,isl12057"
- "epson,rx8130"
-- reg: I2C bus address of the device
-
-Optional properties:
-- interrupts: rtc alarm interrupt.
-- clock-output-names: From common clock binding to override the default output
- clock name
-- wakeup-source: Enables wake up of host system on alarm
-- trickle-resistor-ohms : ds1339, ds1340 and ds 1388 only
- Selected resistor for trickle charger
- Possible values are 250, 2000, 4000
- Should be given if trickle charger should be enabled
-- aux-voltage-chargeable: ds1339, ds1340, ds1388 and rx8130 only
- Tells whether the battery/supercap of the RTC (if any) is
- chargeable or not.
- Possible values are 0 (not chargeable), 1 (chargeable)
-
-Deprecated properties:
-- trickle-diode-disable : ds1339, ds1340 and ds1388 only
- Do not use internal trickle charger diode
- Should be given if internal trickle charger diode should be disabled
- (superseded by aux-voltage-chargeable)
-
-Example:
- ds1339: rtc@68 {
- compatible = "dallas,ds1339";
- reg = <0x68>;
- interrupt-parent = <&gpio4>;
- interrupts = <20 0>;
- trickle-resistor-ohms = <250>;
- };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-ds1307.yaml b/Documentation/devicetree/bindings/rtc/rtc-ds1307.yaml
new file mode 100644
index 000000000000..98d10e680144
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-ds1307.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/rtc-ds1307.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Dallas DS1307 and compatible RTC
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - dallas,ds1307
+ - dallas,ds1308
+ - dallas,ds1337
+ - dallas,ds1338
+ - dallas,ds1339
+ - dallas,ds1388
+ - dallas,ds1340
+ - dallas,ds1341
+ - maxim,ds3231
+ - st,m41t0
+ - st,m41t00
+ - st,m41t11
+ - microchip,mcp7940x
+ - microchip,mcp7941x
+ - pericom,pt7c4338
+ - epson,rx8025
+ - isil,isl12057
+ - epson,rx8130
+
+ - items:
+ - enum:
+ - st,m41t00
+ - const: dallas,ds1338
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ maxItems: 2
+
+ "#clock-cells":
+ const: 1
+
+ clock-output-names:
+ description: From common clock binding to override the default output clock name.
+
+ wakeup-source:
+ description: Enables wake up of host system on alarm.
+
+ vcc-supply: true
+
+allOf:
+ - $ref: rtc.yaml
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - dallas,ds1339
+ - dallas,ds1340
+ - dallas,ds1388
+ then:
+ properties:
+ trickle-resistor-ohms:
+ description: Selected resistor for trickle charger. Should be specified if trickle
+ charger should be enabled.
+ enum: [ 250, 2000, 4000 ]
+
+ trickle-diode-disable:
+ description: Do not use internal trickle charger diode. Should be given if internal
+ trickle charger diode should be disabled (superseded by aux-voltage-chargeable)
+ deprecated: true
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <20 0>;
+ trickle-resistor-ohms = <250>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mt6397.txt b/Documentation/devicetree/bindings/rtc/rtc-mt6397.txt
index 55a0c8874c03..7212076a8f1b 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-mt6397.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-mt6397.txt
@@ -14,6 +14,8 @@ For MediaTek PMIC wrapper bus bindings, see:
Required properties:
- compatible: Should be one of follows
"mediatek,mt6323-rtc": for MT6323 PMIC
+ "mediatek,mt6358-rtc": for MT6358 PMIC
+ "mediatek,mt6366-rtc", "mediatek,mt6358-rtc": for MT6366 PMIC
"mediatek,mt6397-rtc": for MT6397 PMIC
Example:
diff --git a/Documentation/devicetree/bindings/rtc/ti,k3-rtc.yaml b/Documentation/devicetree/bindings/rtc/ti,k3-rtc.yaml
new file mode 100644
index 000000000000..d995ef04a6eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/ti,k3-rtc.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/ti,k3-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments K3 Real Time Clock
+
+maintainers:
+ - Nishanth Menon <nm@ti.com>
+
+description: |
+ This RTC appears in the AM62x family of SoCs.
+
+allOf:
+ - $ref: "rtc.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - ti,am62-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: VBUS Interface clock
+ - description: 32k Clock source (external or internal).
+
+ clock-names:
+ items:
+ - const: vbus
+ - const: osc32k
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ rtc@2b1f0000 {
+ compatible = "ti,am62-rtc";
+ reg = <0x2b1f0000 0x100>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&bar 0>;
+ clocks = <&foo 0>, <&foo 1>;
+ clock-names = "vbus", "osc32k";
+ wakeup-source;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
index 13925bb78ec7..d9fc120c61cc 100644
--- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
@@ -30,6 +30,8 @@ properties:
- dallas,ds1672
# Extremely Accurate I²C RTC with Integrated Crystal and SRAM
- dallas,ds3232
+ # EM Microelectronic EM3027 RTC
+ - emmicro,em3027
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE
- epson,rx8010
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE
diff --git a/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml b/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml
index bdb72d3ddf2a..7ed0230f6c67 100644
--- a/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/xlnx,zynqmp-rtc.yaml
@@ -23,8 +23,15 @@ properties:
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: rtc
+
interrupts:
- minItems: 2
+ maxItems: 2
interrupt-names:
items:
@@ -39,6 +46,7 @@ properties:
minimum: 0x1
maximum: 0x1FFFFF
default: 0x198233
+ deprecated: true
required:
- compatible
@@ -61,5 +69,7 @@ examples:
interrupts = <0 26 4>, <0 27 4>;
interrupt-names = "alarm", "sec";
calibration = <0x198233>;
+ clock-names = "rtc";
+ clocks = <&rtc_clk>;
};
};
diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
index 5f6b113d378f..6258f5f59b19 100644
--- a/Documentation/devicetree/bindings/serial/8250.yaml
+++ b/Documentation/devicetree/bindings/serial/8250.yaml
@@ -62,6 +62,7 @@ properties:
- const: mrvl,pxa-uart
- const: nuvoton,wpcm450-uart
- const: nuvoton,npcm750-uart
+ - const: nuvoton,npcm845-uart
- const: nvidia,tegra20-uart
- const: nxp,lpc3220-uart
- items:
diff --git a/Documentation/devicetree/bindings/serial/efm32-uart.txt b/Documentation/devicetree/bindings/serial/efm32-uart.txt
deleted file mode 100644
index 4f8d8fde0c1c..000000000000
--- a/Documentation/devicetree/bindings/serial/efm32-uart.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-* Energymicro efm32 UART
-
-Required properties:
-- compatible : Should be "energymicro,efm32-uart"
-- reg : Address and length of the register set
-- interrupts : Should contain uart interrupt
-
-Optional properties:
-- energymicro,location : Decides the location of the USART I/O pins.
- Allowed range : [0 .. 5]
- Default: 0
-
-Example:
-
-uart@4000c400 {
- compatible = "energymicro,efm32-uart";
- reg = <0x4000c400 0x400>;
- interrupts = <15>;
- energymicro,location = <0>;
-};
diff --git a/Documentation/devicetree/bindings/serial/mediatek,uart.yaml b/Documentation/devicetree/bindings/serial/mediatek,uart.yaml
new file mode 100644
index 000000000000..4ff27d6d4d5b
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/mediatek,uart.yaml
@@ -0,0 +1,120 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/mediatek,uart.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek Universal Asynchronous Receiver/Transmitter (UART)
+
+maintainers:
+ - Matthias Brugger <matthias.bgg@gmail.com>
+
+allOf:
+ - $ref: serial.yaml#
+
+description: |
+ The MediaTek UART is based on the basic 8250 UART and compatible
+ with 16550A, with enhancements for high speed baud rates and
+ support for DMA.
+
+properties:
+ compatible:
+ oneOf:
+ - const: mediatek,mt6577-uart
+ - items:
+ - enum:
+ - mediatek,mt2701-uart
+ - mediatek,mt2712-uart
+ - mediatek,mt6580-uart
+ - mediatek,mt6582-uart
+ - mediatek,mt6589-uart
+ - mediatek,mt6755-uart
+ - mediatek,mt6765-uart
+ - mediatek,mt6779-uart
+ - mediatek,mt6795-uart
+ - mediatek,mt6797-uart
+ - mediatek,mt7622-uart
+ - mediatek,mt7623-uart
+ - mediatek,mt7629-uart
+ - mediatek,mt7986-uart
+ - mediatek,mt8127-uart
+ - mediatek,mt8135-uart
+ - mediatek,mt8173-uart
+ - mediatek,mt8183-uart
+ - mediatek,mt8186-uart
+ - mediatek,mt8192-uart
+ - mediatek,mt8195-uart
+ - mediatek,mt8516-uart
+ - const: mediatek,mt6577-uart
+
+ reg:
+ description: The base address of the UART register bank
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: The clock the baudrate is derived from
+ - description: The bus clock for register accesses
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: baud
+ - const: bus
+
+ dmas:
+ items:
+ - description: phandle to TX DMA
+ - description: phandle to RX DMA
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ description:
+ The UART interrupt and optionally the RX in-band wakeup interrupt.
+ minItems: 1
+ items:
+ - const: uart
+ - const: wakeup
+
+ pinctrl-0: true
+ pinctrl-1: true
+
+ pinctrl-names:
+ minItems: 1
+ items:
+ - const: default
+ - const: sleep
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ serial@11006000 {
+ compatible = "mediatek,mt6589-uart", "mediatek,mt6577-uart";
+ reg = <0x11006000 0x400>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 52 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "uart", "wakeup";
+ clocks = <&uart_clk>, <&bus_clk>;
+ clock-names = "baud", "bus";
+ pinctrl-0 = <&uart_pin>;
+ pinctrl-1 = <&uart_pin_sleep>;
+ pinctrl-names = "default", "sleep";
+ };
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
deleted file mode 100644
index 113b5d6a2245..000000000000
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-* MediaTek Universal Asynchronous Receiver/Transmitter (UART)
-
-Required properties:
-- compatible should contain:
- * "mediatek,mt2701-uart" for MT2701 compatible UARTS
- * "mediatek,mt2712-uart" for MT2712 compatible UARTS
- * "mediatek,mt6580-uart" for MT6580 compatible UARTS
- * "mediatek,mt6582-uart" for MT6582 compatible UARTS
- * "mediatek,mt6589-uart" for MT6589 compatible UARTS
- * "mediatek,mt6755-uart" for MT6755 compatible UARTS
- * "mediatek,mt6765-uart" for MT6765 compatible UARTS
- * "mediatek,mt6779-uart" for MT6779 compatible UARTS
- * "mediatek,mt6795-uart" for MT6795 compatible UARTS
- * "mediatek,mt6797-uart" for MT6797 compatible UARTS
- * "mediatek,mt7622-uart" for MT7622 compatible UARTS
- * "mediatek,mt7623-uart" for MT7623 compatible UARTS
- * "mediatek,mt7629-uart" for MT7629 compatible UARTS
- * "mediatek,mt7986-uart", "mediatek,mt6577-uart" for MT7986 compatible UARTS
- * "mediatek,mt8127-uart" for MT8127 compatible UARTS
- * "mediatek,mt8135-uart" for MT8135 compatible UARTS
- * "mediatek,mt8173-uart" for MT8173 compatible UARTS
- * "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
- * "mediatek,mt8186-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
- * "mediatek,mt8192-uart", "mediatek,mt6577-uart" for MT8192 compatible UARTS
- * "mediatek,mt8195-uart", "mediatek,mt6577-uart" for MT8195 compatible UARTS
- * "mediatek,mt8516-uart" for MT8516 compatible UARTS
- * "mediatek,mt6577-uart" for MT6577 and all of the above
-
-- reg: The base address of the UART register bank.
-
-- interrupts:
- index 0: an interrupt specifier for the UART controller itself
- index 1: optional, an interrupt specifier with edge sensitivity on Rx pin to
- support Rx in-band wake up. If one would like to use this feature,
- one must create an addtional pinctrl to reconfigure Rx pin to normal
- GPIO before suspend.
-
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names:
- - "baud": The clock the baudrate is derived from
- - "bus": The bus clock for register accesses (optional)
-
-For compatibility with older device trees an unnamed clock is used for the
-baud clock if the baudclk does not exist. Do not use this for new designs.
-
-Example:
-
- uart0: serial@11006000 {
- compatible = "mediatek,mt6589-uart", "mediatek,mt6577-uart";
- reg = <0x11006000 0x400>;
- interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_LOW>,
- <GIC_SPI 52 IRQ_TYPE_EDGE_FALLING>;
- clocks = <&uart_clk>, <&bus_clk>;
- clock-names = "baud", "bus";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&uart_pin>;
- pinctrl-1 = <&uart_pin_sleep>;
- };
diff --git a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
index 87180d95cd4c..1957b9d782e8 100644
--- a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
@@ -57,6 +57,7 @@ properties:
- items:
- enum:
- renesas,hscif-r8a779a0 # R-Car V3U
+ - renesas,hscif-r8a779f0 # R-Car S4-8
- renesas,hscif-r8a779g0 # R-Car V4H
- const: renesas,rcar-gen4-hscif # R-Car Gen4
- const: renesas,hscif # generic HSCIF compatible UART
diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml
index f2c9c9fe6aa7..90a1bab40f05 100644
--- a/Documentation/devicetree/bindings/serial/rs485.yaml
+++ b/Documentation/devicetree/bindings/serial/rs485.yaml
@@ -22,12 +22,12 @@ properties:
- description: Delay between rts signal and beginning of data sent in
milliseconds. It corresponds to the delay before sending data.
default: 0
- maximum: 1000
+ maximum: 100
- description: Delay between end of data sent and rts signal in milliseconds.
It corresponds to the delay after sending data and actual release
of the line.
default: 0
- maximum: 1000
+ maximum: 100
rs485-rts-active-low:
description: drive RTS low when sending (default is high).
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
index 12137fe80acf..dc74643ae72e 100644
--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
@@ -33,7 +33,9 @@ properties:
- rockchip,rk3368-uart
- rockchip,rk3399-uart
- rockchip,rk3568-uart
+ - rockchip,rk3588-uart
- rockchip,rv1108-uart
+ - rockchip,rv1126-uart
- const: snps,dw-apb-uart
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/serio/ps2-gpio.txt b/Documentation/devicetree/bindings/serio/ps2-gpio.txt
deleted file mode 100644
index 7b7bc9cdf986..000000000000
--- a/Documentation/devicetree/bindings/serio/ps2-gpio.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Device-Tree binding for ps/2 gpio device
-
-Required properties:
- - compatible = "ps2-gpio"
- - data-gpios: the data pin
- - clk-gpios: the clock pin
- - interrupts: Should trigger on the falling edge of the clock line.
-
-Optional properties:
- - write-enable: Indicates whether write function is provided
- to serio device. Possibly providing the write fn will not work, because
- of the tough timing requirements.
-
-Example nodes:
-
-ps2@0 {
- compatible = "ps2-gpio";
- interrupt-parent = <&gpio>;
- interrupts = <23 IRQ_TYPE_EDGE_FALLING>;
- data-gpios = <&gpio 24 GPIO_ACTIVE_HIGH>;
- clk-gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
- write-enable;
-};
diff --git a/Documentation/devicetree/bindings/serio/ps2-gpio.yaml b/Documentation/devicetree/bindings/serio/ps2-gpio.yaml
new file mode 100644
index 000000000000..a63d9172346f
--- /dev/null
+++ b/Documentation/devicetree/bindings/serio/ps2-gpio.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serio/ps2-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bindings for GPIO based PS/2
+
+maintainers:
+ - Danilo Krummrich <danilokrummrich@dk-develop.de>
+
+properties:
+ compatible:
+ const: ps2-gpio
+
+ data-gpios:
+ description:
+ the gpio used for the data signal - this should be flagged as
+ active high using open drain with (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)
+ from <dt-bindings/gpio/gpio.h> since the signal is open drain by
+ definition
+ maxItems: 1
+
+ clk-gpios:
+ description:
+ the gpio used for the clock signal - this should be flagged as
+ active high using open drain with (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)
+ from <dt-bindings/gpio/gpio.h> since the signal is open drain by
+ definition
+ maxItems: 1
+
+ interrupts:
+ description:
+ The given interrupt should trigger on the falling edge of the clock line.
+ maxItems: 1
+
+ write-enable:
+ type: boolean
+ description:
+ Indicates whether write function is provided to serio device. Possibly
+ providing the write function will not work, because of the tough timing
+ requirements.
+
+required:
+ - compatible
+ - data-gpios
+ - clk-gpios
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ ps2 {
+ compatible = "ps2-gpio";
+ interrupt-parent = <&gpio>;
+ interrupts = <23 IRQ_TYPE_EDGE_FALLING>;
+ data-gpios = <&gpio 24 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ clk-gpios = <&gpio 23 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ write-enable;
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
index d01e98768153..a4eeb7e158e5 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.yaml
@@ -13,7 +13,7 @@ description:
This binding describes the hardware component responsible for side channel
requests to the always-on subsystem (AOSS), used for certain power management
requests that is not handled by the standard RPMh interface. Each client in the
- SoC has it's own block of message RAM and IRQ for communication with the AOSS.
+ SoC has its own block of message RAM and IRQ for communication with the AOSS.
The protocol used to communicate in the message RAM is known as Qualcomm
Messaging Protocol (QMP)
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
index 50f834563e19..09d5bfa920f2 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml
@@ -92,12 +92,33 @@ examples:
qcom,ipc = <&apcs 8 0>;
qcom,smd-edge = <15>;
- rpm-requests {
- compatible = "qcom,rpm-msm8974";
- qcom,smd-channels = "rpm_requests";
+ rpm-requests {
+ compatible = "qcom,rpm-msm8916";
+ qcom,smd-channels = "rpm_requests";
+
+ clock-controller {
+ compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc";
+ #clock-cells = <1>;
+ clocks = <&xo_board>;
+ clock-names = "xo";
+ };
- /* Regulator nodes to follow */
+ power-controller {
+ compatible = "qcom,msm8916-rpmpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmpd_opp_table>;
+
+ rpmpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-1 {
+ opp-level = <1>;
+ };
+ opp-2 {
+ opp-level = <2>;
+ };
+ };
};
};
- };
-...
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
index bca07bb13ebf..9b3efe97f47c 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.yaml
@@ -21,51 +21,13 @@ properties:
patternProperties:
"^.*-edge|rpm$":
- type: object
+ $ref: /schemas/remoteproc/qcom,smd-edge.yaml#
description:
Each subnode of the SMD node represents a remote subsystem or a remote
processor of some sort - or in SMD language an "edge". The name of the
edges are not important.
properties:
- interrupts:
- maxItems: 1
-
- label:
- $ref: /schemas/types.yaml#/definitions/string
- description:
- Name of the edge, used for debugging and identification purposes. The
- node name will be used if this is not present.
-
- mboxes:
- maxItems: 1
- description:
- Reference to the mailbox representing the outgoing doorbell in APCS for
- this client.
-
- qcom,ipc:
- $ref: /schemas/types.yaml#/definitions/phandle-array
- items:
- - items:
- - description: phandle to a syscon node representing the APCS registers
- - description: u32 representing offset to the register within the syscon
- - description: u32 representing the ipc bit within the register
- description:
- Three entries specifying the outgoing ipc bit used for signaling the
- remote processor.
-
- qcom,smd-edge:
- $ref: /schemas/types.yaml#/definitions/uint32
- description:
- The identifier of the remote processor in the smd channel allocation
- table.
-
- qcom,remote-pid:
- $ref: /schemas/types.yaml#/definitions/uint32
- description:
- The identifier for the remote processor as known by the rest of the
- system.
-
rpm-requests:
type: object
description:
@@ -89,17 +51,7 @@ patternProperties:
additionalProperties: true
- required:
- - interrupts
- - qcom,smd-edge
-
- oneOf:
- - required:
- - mboxes
- - required:
- - qcom,ipc
-
- additionalProperties: false
+ unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/sound/adi,adau1977.yaml b/Documentation/devicetree/bindings/sound/adi,adau1977.yaml
index b80454ad97da..847b83398d3d 100644
--- a/Documentation/devicetree/bindings/sound/adi,adau1977.yaml
+++ b/Documentation/devicetree/bindings/sound/adi,adau1977.yaml
@@ -32,8 +32,6 @@ properties:
reset-gpios:
maxItems: 1
- spi-max-frequency: true
-
AVDD-supply:
description: Analog power support for the device.
@@ -52,7 +50,10 @@ required:
- compatible
- AVDD-supply
-additionalProperties: false
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/sound/adi,max98396.yaml b/Documentation/devicetree/bindings/sound/adi,max98396.yaml
index ec4c10c2598a..8d2ef991db40 100644
--- a/Documentation/devicetree/bindings/sound/adi,max98396.yaml
+++ b/Documentation/devicetree/bindings/sound/adi,max98396.yaml
@@ -24,6 +24,21 @@ properties:
maxItems: 1
description: I2C address of the device.
+ avdd-supply:
+ description: A 1.8V supply that powers up the AVDD pin.
+
+ dvdd-supply:
+ description: A 1.2V supply that powers up the DVDD pin.
+
+ dvddio-supply:
+ description: A 1.2V or 1.8V supply that powers up the VDDIO pin.
+
+ pvdd-supply:
+ description: A 3.0V to 20V supply that powers up the PVDD pin.
+
+ vbat-supply:
+ description: A 3.3V to 5.5V supply that powers up the VBAT pin.
+
adi,vmon-slot-no:
description: slot number of the voltage sense monitor
$ref: "/schemas/types.yaml#/definitions/uint32"
@@ -36,13 +51,22 @@ properties:
$ref: "/schemas/types.yaml#/definitions/uint32"
minimum: 0
maximum: 15
- default: 0
+ default: 1
adi,spkfb-slot-no:
description: slot number of speaker DSP monitor
$ref: "/schemas/types.yaml#/definitions/uint32"
minimum: 0
maximum: 15
+ default: 2
+
+ adi,bypass-slot-no:
+ description:
+ Selects the PCM data input channel that is routed to the speaker
+ audio processing bypass path.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 15
default: 0
adi,interleave-mode:
@@ -72,6 +96,10 @@ examples:
max98396: amplifier@39 {
compatible = "adi,max98396";
reg = <0x39>;
+ dvdd-supply = <&regulator_1v2>;
+ dvddio-supply = <&regulator_1v8>;
+ avdd-supply = <&regulator_1v8>;
+ pvdd-supply = <&regulator_pvdd>;
adi,vmon-slot-no = <0>;
adi,imon-slot-no = <1>;
reset-gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml
index 3b764415c9ab..66859eb8f79a 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml
@@ -21,6 +21,11 @@ properties:
description:
Regulator for the headphone amplifier
+ allwinner,internal-bias-resistor:
+ description:
+ Enable the internal 2.2K bias resistor between HBIAS and MICDET pins
+ type: boolean
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/sound/atmel,sama5d2-classd.yaml b/Documentation/devicetree/bindings/sound/atmel,sama5d2-classd.yaml
new file mode 100644
index 000000000000..43d04702ac2d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/atmel,sama5d2-classd.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/atmel,sama5d2-classd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel ClassD Amplifier
+
+maintainers:
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+ - Claudiu Beznea <claudiu.beznea@microchip.com>
+
+description:
+ The Audio Class D Amplifier (CLASSD) is a digital input, Pulse Width
+ Modulated (PWM) output stereo Class D amplifier.
+
+properties:
+ compatible:
+ const: atmel,sama5d2-classd
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ const: tx
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: gclk
+
+ atmel,model:
+ $ref: /schemas/types.yaml#/definitions/string
+ default: CLASSD
+ description: The user-visible name of this sound complex.
+
+ atmel,pwm-type:
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - single
+ - diff
+ default: single
+ description: PWM modulation type.
+
+ atmel,non-overlap-time:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 5
+ - 10
+ - 15
+ - 20
+ default: 10
+ description:
+ Set non-overlapping time, the unit is nanosecond(ns).
+ Non-overlapping will be disabled if not specified.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - dmas
+ - dma-names
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/dma/at91.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ classd: sound@fc048000 {
+ compatible = "atmel,sama5d2-classd";
+ reg = <0xfc048000 0x100>;
+ interrupts = <59 IRQ_TYPE_LEVEL_HIGH 7>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(47))>;
+ dma-names = "tx";
+ clocks = <&classd_clk>, <&classd_gclk>;
+ clock-names = "pclk", "gclk";
+ assigned-clocks = <&classd_gclk>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_classd_default>;
+ atmel,model = "classd @ SAMA5D2-Xplained";
+ atmel,pwm-type = "diff";
+ atmel,non-overlap-time = <10>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/atmel,sama5d2-i2s.yaml b/Documentation/devicetree/bindings/sound/atmel,sama5d2-i2s.yaml
new file mode 100644
index 000000000000..0cd1ff89baed
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/atmel,sama5d2-i2s.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/atmel,sama5d2-i2s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel I2S controller
+
+maintainers:
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+ - Claudiu Beznea <claudiu.beznea@microchip.com>
+
+description:
+ Atmel I2S (Inter-IC Sound Controller) bus is the standard
+ interface for connecting audio devices, such as audio codecs.
+
+properties:
+ compatible:
+ const: atmel,sama5d2-i2s
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Peripheral clock
+ - description: Generated clock (Optional)
+ - description: I2S mux clock (Optional). Set
+ with gclk when Master Mode is required.
+ minItems: 1
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: gclk
+ - const: muxclk
+ minItems: 1
+
+ dmas:
+ items:
+ - description: TX DMA Channel
+ - description: RX DMA Channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - dmas
+ - dma-names
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/dma/at91.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ i2s@f8050000 {
+ compatible = "atmel,sama5d2-i2s";
+ reg = <0xf8050000 0x300>;
+ interrupts = <54 IRQ_TYPE_LEVEL_HIGH 7>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(31))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(32))>;
+ dma-names = "tx", "rx";
+ clocks = <&i2s0_clk>, <&i2s0_gclk>, <&i2s0muxck>;
+ clock-names = "pclk", "gclk", "muxclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2s0_default>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/atmel,sama5d2-pdmic.yaml b/Documentation/devicetree/bindings/sound/atmel,sama5d2-pdmic.yaml
new file mode 100644
index 000000000000..f320b561f24c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/atmel,sama5d2-pdmic.yaml
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/atmel,sama5d2-pdmic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel PDMIC decoder
+
+maintainers:
+ - Claudiu Beznea <claudiu.beznea@microchip.com>
+
+description:
+ Atmel Pulse Density Modulation Interface Controller
+ (PDMIC) peripheral is a mono PDM decoder module
+ that decodes an incoming PDM sample stream.
+
+properties:
+ compatible:
+ const: atmel,sama5d2-pdmic
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: peripheral clock
+ - description: generated clock
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: gclk
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ const: rx
+
+ atmel,mic-min-freq:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The minimal frequency that the microphone supports.
+
+ atmel,mic-max-freq:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The maximal frequency that the microphone supports.
+
+ atmel,model:
+ $ref: /schemas/types.yaml#/definitions/string
+ default: PDMIC
+ description: The user-visible name of this sound card.
+
+ atmel,mic-offset:
+ $ref: /schemas/types.yaml#/definitions/int32
+ default: 0
+ description: The offset that should be added.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - dmas
+ - dma-names
+ - clock-names
+ - clocks
+ - atmel,mic-min-freq
+ - atmel,mic-max-freq
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/dma/at91.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ pdmic: sound@f8018000 {
+ compatible = "atmel,sama5d2-pdmic";
+ reg = <0xf8018000 0x124>;
+ interrupts = <48 IRQ_TYPE_LEVEL_HIGH 7>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+ | AT91_XDMAC_DT_PERID(50))>;
+ dma-names = "rx";
+ clocks = <&pdmic_clk>, <&pdmic_gclk>;
+ clock-names = "pclk", "gclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pdmic_default>;
+ atmel,model = "PDMIC@sama5d2_xplained";
+ atmel,mic-min-freq = <1000000>;
+ atmel,mic-max-freq = <3246000>;
+ atmel,mic-offset = <0x0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/atmel-classd.txt b/Documentation/devicetree/bindings/sound/atmel-classd.txt
deleted file mode 100644
index 898551076382..000000000000
--- a/Documentation/devicetree/bindings/sound/atmel-classd.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-* Atmel ClassD driver under ALSA SoC architecture
-
-Required properties:
-- compatible
- Should be "atmel,sama5d2-classd".
-- reg
- Should contain ClassD registers location and length.
-- interrupts
- Should contain the IRQ line for the ClassD.
-- dmas
- One DMA specifiers as described in atmel-dma.txt and dma.txt files.
-- dma-names
- Must be "tx".
-- clock-names
- Tuple listing input clock names.
- Required elements: "pclk" and "gclk".
-- clocks
- Please refer to clock-bindings.txt.
-- assigned-clocks
- Should be <&classd_gclk>.
-
-Optional properties:
-- pinctrl-names, pinctrl-0
- Please refer to pinctrl-bindings.txt.
-- atmel,model
- The user-visible name of this sound complex.
- The default value is "CLASSD".
-- atmel,pwm-type
- PWM modulation type, "single" or "diff".
- The default value is "single".
-- atmel,non-overlap-time
- Set non-overlapping time, the unit is nanosecond(ns).
- There are four values,
- <5>, <10>, <15>, <20>, the default value is <10>.
- Non-overlapping will be disabled if not specified.
-
-Example:
-classd: classd@fc048000 {
- compatible = "atmel,sama5d2-classd";
- reg = <0xfc048000 0x100>;
- interrupts = <59 IRQ_TYPE_LEVEL_HIGH 7>;
- dmas = <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
- | AT91_XDMAC_DT_PERID(47))>;
- dma-names = "tx";
- clocks = <&classd_clk>, <&classd_gclk>;
- clock-names = "pclk", "gclk";
- assigned-clocks = <&classd_gclk>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_classd_default>;
- atmel,model = "classd @ SAMA5D2-Xplained";
- atmel,pwm-type = "diff";
- atmel,non-overlap-time = <10>;
-};
diff --git a/Documentation/devicetree/bindings/sound/atmel-i2s.txt b/Documentation/devicetree/bindings/sound/atmel-i2s.txt
deleted file mode 100644
index 40549f496a81..000000000000
--- a/Documentation/devicetree/bindings/sound/atmel-i2s.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-* Atmel I2S controller
-
-Required properties:
-- compatible: Should be "atmel,sama5d2-i2s".
-- reg: Should be the physical base address of the controller and the
- length of memory mapped region.
-- interrupts: Should contain the interrupt for the controller.
-- dmas: Should be one per channel name listed in the dma-names property,
- as described in atmel-dma.txt and dma.txt files.
-- dma-names: Two dmas have to be defined, "tx" and "rx".
- This IP also supports one shared channel for both rx and tx;
- if this mode is used, one "rx-tx" name must be used.
-- clocks: Must contain an entry for each entry in clock-names.
- Please refer to clock-bindings.txt.
-- clock-names: Should be one of each entry matching the clocks phandles list:
- - "pclk" (peripheral clock) Required.
- - "gclk" (generated clock) Optional (1).
- - "muxclk" (I2S mux clock) Optional (1).
-
-Optional properties:
-- pinctrl-0: Should specify pin control groups used for this controller.
-- princtrl-names: Should contain only one value - "default".
-
-
-(1) : Only the peripheral clock is required. The generated clock and the I2S
- mux clock are optional and should only be set together, when Master Mode
- is required.
-
-Example:
-
- i2s@f8050000 {
- compatible = "atmel,sama5d2-i2s";
- reg = <0xf8050000 0x300>;
- interrupts = <54 IRQ_TYPE_LEVEL_HIGH 7>;
- dmas = <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
- AT91_XDMAC_DT_PERID(31))>,
- <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
- AT91_XDMAC_DT_PERID(32))>;
- dma-names = "tx", "rx";
- clocks = <&i2s0_clk>, <&i2s0_gclk>, <&i2s0muxck>;
- clock-names = "pclk", "gclk", "muxclk";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2s0_default>;
- };
diff --git a/Documentation/devicetree/bindings/sound/atmel-pdmic.txt b/Documentation/devicetree/bindings/sound/atmel-pdmic.txt
deleted file mode 100644
index e0875f17c229..000000000000
--- a/Documentation/devicetree/bindings/sound/atmel-pdmic.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-* Atmel PDMIC driver under ALSA SoC architecture
-
-Required properties:
-- compatible
- Should be "atmel,sama5d2-pdmic".
-- reg
- Should contain PDMIC registers location and length.
-- interrupts
- Should contain the IRQ line for the PDMIC.
-- dmas
- One DMA specifiers as described in atmel-dma.txt and dma.txt files.
-- dma-names
- Must be "rx".
-- clock-names
- Required elements:
- - "pclk" peripheral clock
- - "gclk" generated clock
-- clocks
- Must contain an entry for each required entry in clock-names.
- Please refer to clock-bindings.txt.
-- atmel,mic-min-freq
- The minimal frequency that the micphone supports.
-- atmel,mic-max-freq
- The maximal frequency that the micphone supports.
-
-Optional properties:
-- pinctrl-names, pinctrl-0
- Please refer to pinctrl-bindings.txt.
-- atmel,model
- The user-visible name of this sound card.
- The default value is "PDMIC".
-- atmel,mic-offset
- The offset that should be added.
- The range is from -32768 to 32767.
- The default value is 0.
-
-Example:
- pdmic@f8018000 {
- compatible = "atmel,sama5d2-pdmic";
- reg = <0xf8018000 0x124>;
- interrupts = <48 IRQ_TYPE_LEVEL_HIGH 7>;
- dmas = <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
- | AT91_XDMAC_DT_PERID(50))>;
- dma-names = "rx";
- clocks = <&pdmic_clk>, <&pdmic_gclk>;
- clock-names = "pclk", "gclk";
-
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_pdmic_default>;
- atmel,model = "PDMIC @ sama5d2_xplained";
- atmel,mic-min-freq = <1000000>;
- atmel,mic-max-freq = <3246000>;
- atmel,mic-offset = <0x0>;
- };
diff --git a/Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt b/Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt
index 0720857089a7..8facbce53db8 100644
--- a/Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt
+++ b/Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt
@@ -16,7 +16,7 @@ Board connectors:
* Line In Jack
wm8731 pins:
-cf Documentation/devicetree/bindings/sound/wm8731.txt
+cf Documentation/devicetree/bindings/sound/wlf,wm8731.yaml
Example:
sound {
diff --git a/Documentation/devicetree/bindings/sound/da9055.txt b/Documentation/devicetree/bindings/sound/da9055.txt
index ed1b7cc6f249..75c6338b6ae2 100644
--- a/Documentation/devicetree/bindings/sound/da9055.txt
+++ b/Documentation/devicetree/bindings/sound/da9055.txt
@@ -2,7 +2,7 @@
DA9055 provides Audio CODEC support (I2C only).
-The Audio CODEC device in DA9055 has it's own I2C address which is configurable,
+The Audio CODEC device in DA9055 has its own I2C address which is configurable,
so the device is instantiated separately from the PMIC (MFD) device.
For details on accompanying PMIC I2C device, see the following:
diff --git a/Documentation/devicetree/bindings/sound/designware-i2s.txt b/Documentation/devicetree/bindings/sound/designware-i2s.txt
deleted file mode 100644
index 6a536d570e29..000000000000
--- a/Documentation/devicetree/bindings/sound/designware-i2s.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-DesignWare I2S controller
-
-Required properties:
- - compatible : Must be "snps,designware-i2s"
- - reg : Must contain the I2S core's registers location and length
- - clocks : Pairs of phandle and specifier referencing the controller's
- clocks. The controller expects one clock: the clock used as the sampling
- rate reference clock sample.
- - clock-names : "i2sclk" for the sample rate reference clock.
- - dmas: Pairs of phandle and specifier for the DMA channels that are used by
- the core. The core expects one or two dma channels: one for transmit and
- one for receive.
- - dma-names : "tx" for the transmit channel, "rx" for the receive channel.
-
-Optional properties:
- - interrupts: The interrupt line number for the I2S controller. Add this
- parameter if the I2S controller that you are using does not support DMA.
-
-For more details on the 'dma', 'dma-names', 'clock' and 'clock-names'
-properties please check:
- * resource-names.txt
- * clock/clock-bindings.txt
- * dma/dma.txt
-
-Example:
-
- soc_i2s: i2s@7ff90000 {
- compatible = "snps,designware-i2s";
- reg = <0x0 0x7ff90000 0x0 0x1000>;
- clocks = <&scpi_i2sclk 0>;
- clock-names = "i2sclk";
- #sound-dai-cells = <0>;
- dmas = <&dma0 5>;
- dma-names = "tx";
- };
diff --git a/Documentation/devicetree/bindings/sound/fsl,micfil.txt b/Documentation/devicetree/bindings/sound/fsl,micfil.txt
deleted file mode 100644
index 1ea05d4996c7..000000000000
--- a/Documentation/devicetree/bindings/sound/fsl,micfil.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-NXP MICFIL Digital Audio Interface (MICFIL).
-
-The MICFIL digital interface provides a 16-bit audio signal from a PDM
-microphone bitstream in a configurable output sampling rate.
-
-Required properties:
-
- - compatible : Compatible list, contains "fsl,imx8mm-micfil"
- or "fsl,imx8mp-micfil"
-
- - reg : Offset and length of the register set for the device.
-
- - interrupts : Contains the micfil interrupts.
-
- - clocks : Must contain an entry for each entry in clock-names.
-
- - clock-names : Must include the "ipg_clk" for register access and
- "ipg_clk_app" for internal micfil clock.
-
- - dmas : Generic dma devicetree binding as described in
- Documentation/devicetree/bindings/dma/dma.txt.
-
-Example:
-micfil: micfil@30080000 {
- compatible = "fsl,imx8mm-micfil";
- reg = <0x0 0x30080000 0x0 0x10000>;
- interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MM_CLK_PDM_IPG>,
- <&clk IMX8MM_CLK_PDM_ROOT>;
- clock-names = "ipg_clk", "ipg_clk_app";
- dmas = <&sdma2 24 26 0x80000000>;
-};
diff --git a/Documentation/devicetree/bindings/sound/fsl,micfil.yaml b/Documentation/devicetree/bindings/sound/fsl,micfil.yaml
new file mode 100644
index 000000000000..64d57758ee67
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,micfil.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl,micfil.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP MICFIL Digital Audio Interface (MICFIL)
+
+maintainers:
+ - Shengjiu Wang <shengjiu.wang@nxp.com>
+
+description: |
+ The MICFIL digital interface provides a 16-bit or 24-bit audio signal
+ from a PDM microphone bitstream in a configurable output sampling rate.
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8mm-micfil
+ - fsl,imx8mp-micfil
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Digital Microphone interface interrupt
+ - description: Digital Microphone interface error interrupt
+ - description: voice activity detector event interrupt
+ - description: voice activity detector error interrupt
+
+ dmas:
+ items:
+ - description: DMA controller phandle and request line for RX
+
+ dma-names:
+ items:
+ - const: rx
+
+ clocks:
+ items:
+ - description: The ipg clock for register access
+ - description: internal micfil clock
+ - description: PLL clock source for 8kHz series
+ - description: PLL clock source for 11kHz series
+ - description: External clock 3
+ minItems: 2
+
+ clock-names:
+ items:
+ - const: ipg_clk
+ - const: ipg_clk_app
+ - const: pll8k
+ - const: pll11k
+ - const: clkext3
+ minItems: 2
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - dmas
+ - dma-names
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/imx8mm-clock.h>
+ micfil: audio-controller@30080000 {
+ compatible = "fsl,imx8mm-micfil";
+ reg = <0x30080000 0x10000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_PDM_IPG>,
+ <&clk IMX8MM_CLK_PDM_ROOT>;
+ clock-names = "ipg_clk", "ipg_clk_app";
+ dmas = <&sdma2 24 25 0>;
+ dma-names = "rx";
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl,mqs.txt b/Documentation/devicetree/bindings/sound/fsl,mqs.txt
index 40353fc30255..d66284b8bef2 100644
--- a/Documentation/devicetree/bindings/sound/fsl,mqs.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,mqs.txt
@@ -2,7 +2,7 @@ fsl,mqs audio CODEC
Required properties:
- compatible : Must contain one of "fsl,imx6sx-mqs", "fsl,codec-mqs"
- "fsl,imx8qm-mqs", "fsl,imx8qxp-mqs".
+ "fsl,imx8qm-mqs", "fsl,imx8qxp-mqs", "fsl,imx93-mqs".
- clocks : A list of phandles + clock-specifiers, one for each entry in
clock-names
- clock-names : "mclk" - must required.
diff --git a/Documentation/devicetree/bindings/sound/fsl,spdif.yaml b/Documentation/devicetree/bindings/sound/fsl,spdif.yaml
index f226ec13167a..1d64e8337aa4 100644
--- a/Documentation/devicetree/bindings/sound/fsl,spdif.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,spdif.yaml
@@ -58,6 +58,8 @@ properties:
slave of the Shared Peripheral Bus and when two or more bus masters
(CPU, DMA or DSP) try to access it. This property is optional depending
on the SoC design.
+ - description: PLL clock source for 8kHz series rate, optional.
+ - description: PLL clock source for 11khz series rate, optional.
minItems: 9
clock-names:
@@ -72,6 +74,8 @@ properties:
- const: rxtx6
- const: rxtx7
- const: spba
+ - const: pll8k
+ - const: pll11k
minItems: 9
big-endian:
diff --git a/Documentation/devicetree/bindings/sound/fsl-sai.txt b/Documentation/devicetree/bindings/sound/fsl-sai.txt
index c71c5861d787..fbdefc3fade7 100644
--- a/Documentation/devicetree/bindings/sound/fsl-sai.txt
+++ b/Documentation/devicetree/bindings/sound/fsl-sai.txt
@@ -21,6 +21,9 @@ Required properties:
- clock-names : Must include the "bus" for register access and
"mclk1", "mclk2", "mclk3" for bit clock and frame
clock providing.
+ "pll8k", "pll11k" are optional, they are the clock
+ source for root clock, one is for 8kHz series rates
+ another one is for 11kHz series rates.
- dmas : Generic dma devicetree binding as described in
Documentation/devicetree/bindings/dma/dma.txt.
@@ -49,6 +52,14 @@ Required properties:
receive data by following their own bit clocks and
frame sync clocks separately.
+ - fsl,dataline : configure the dataline. it has 3 value for each configuration
+ first one means the type: I2S(1) or PDM(2)
+ second one is dataline mask for 'rx'
+ third one is dataline mask for 'tx'.
+ for example: fsl,dataline = <1 0xff 0xff 2 0xff 0x11>;
+ it means I2S type rx mask is 0xff, tx mask is 0xff, PDM type
+ rx mask is 0xff, tx mask is 0x11 (dataline 1 and 4 enabled).
+
Optional properties:
- big-endian : Boolean property, required if all the SAI
diff --git a/Documentation/devicetree/bindings/sound/mt6358.txt b/Documentation/devicetree/bindings/sound/mt6358.txt
index 59a73ffdf1d3..fbe9e55c68f5 100644
--- a/Documentation/devicetree/bindings/sound/mt6358.txt
+++ b/Documentation/devicetree/bindings/sound/mt6358.txt
@@ -7,7 +7,9 @@ Must be a child node of PMIC wrapper.
Required properties:
-- compatible : "mediatek,mt6358-sound".
+- compatible - "string" - One of:
+ "mediatek,mt6358-sound"
+ "mediatek,mt6366-sound"
- Avdd-supply : power source of AVDD
Optional properties:
diff --git a/Documentation/devicetree/bindings/sound/mt8186-afe-pcm.yaml b/Documentation/devicetree/bindings/sound/mt8186-afe-pcm.yaml
new file mode 100644
index 000000000000..88f82d096443
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt8186-afe-pcm.yaml
@@ -0,0 +1,175 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/mt8186-afe-pcm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek AFE PCM controller for mt8186
+
+maintainers:
+ - Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+properties:
+ compatible:
+ const: mediatek,mt8186-sound
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: audiosys
+
+ mediatek,apmixedsys:
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+ description: The phandle of the mediatek apmixedsys controller
+
+ mediatek,infracfg:
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+ description: The phandle of the mediatek infracfg controller
+
+ mediatek,topckgen:
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+ description: The phandle of the mediatek topckgen controller
+
+ clocks:
+ items:
+ - description: audio infra sys clock
+ - description: audio infra 26M clock
+ - description: audio top mux
+ - description: audio intbus mux
+ - description: mainpll 136.5M clock
+ - description: faud1 mux
+ - description: apll1 clock
+ - description: faud2 mux
+ - description: apll2 clock
+ - description: audio engen1 mux
+ - description: apll1_d8 22.5792M clock
+ - description: audio engen2 mux
+ - description: apll2_d8 24.576M clock
+ - description: i2s0 mclk mux
+ - description: i2s1 mclk mux
+ - description: i2s2 mclk mux
+ - description: i2s4 mclk mux
+ - description: tdm mclk mux
+ - description: i2s0_mck divider
+ - description: i2s1_mck divider
+ - description: i2s2_mck divider
+ - description: i2s4_mck divider
+ - description: tdm_mck divider
+ - description: audio hires mux
+ - description: 26M clock
+
+ clock-names:
+ items:
+ - const: aud_infra_clk
+ - const: mtkaif_26m_clk
+ - const: top_mux_audio
+ - const: top_mux_audio_int
+ - const: top_mainpll_d2_d4
+ - const: top_mux_aud_1
+ - const: top_apll1_ck
+ - const: top_mux_aud_2
+ - const: top_apll2_ck
+ - const: top_mux_aud_eng1
+ - const: top_apll1_d8
+ - const: top_mux_aud_eng2
+ - const: top_apll2_d8
+ - const: top_i2s0_m_sel
+ - const: top_i2s1_m_sel
+ - const: top_i2s2_m_sel
+ - const: top_i2s4_m_sel
+ - const: top_tdm_m_sel
+ - const: top_apll12_div0
+ - const: top_apll12_div1
+ - const: top_apll12_div2
+ - const: top_apll12_div4
+ - const: top_apll12_div_tdm
+ - const: top_mux_audio_h
+ - const: top_clk26m_clk
+
+required:
+ - compatible
+ - interrupts
+ - resets
+ - reset-names
+ - mediatek,apmixedsys
+ - mediatek,infracfg
+ - mediatek,topckgen
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ afe: mt8186-afe-pcm@11210000 {
+ compatible = "mediatek,mt8186-sound";
+ reg = <0x11210000 0x2000>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&watchdog 17>; //MT8186_TOPRGU_AUDIO_SW_RST
+ reset-names = "audiosys";
+ mediatek,apmixedsys = <&apmixedsys>;
+ mediatek,infracfg = <&infracfg>;
+ mediatek,topckgen = <&topckgen>;
+ clocks = <&infracfg_ao 44>, //CLK_INFRA_AO_AUDIO
+ <&infracfg_ao 54>, //CLK_INFRA_AO_AUDIO_26M_BCLK
+ <&topckgen 15>, //CLK_TOP_AUDIO
+ <&topckgen 16>, //CLK_TOP_AUD_INTBUS
+ <&topckgen 70>, //CLK_TOP_MAINPLL_D2_D4
+ <&topckgen 17>, //CLK_TOP_AUD_1
+ <&apmixedsys 12>, //CLK_APMIXED_APLL1
+ <&topckgen 18>, //CLK_TOP_AUD_2
+ <&apmixedsys 13>, //CLK_APMIXED_APLL2
+ <&topckgen 19>, //CLK_TOP_AUD_ENGEN1
+ <&topckgen 101>, //CLK_TOP_APLL1_D8
+ <&topckgen 20>, //CLK_TOP_AUD_ENGEN2
+ <&topckgen 104>, //CLK_TOP_APLL2_D8
+ <&topckgen 63>, //CLK_TOP_APLL_I2S0_MCK_SEL
+ <&topckgen 64>, //CLK_TOP_APLL_I2S1_MCK_SEL
+ <&topckgen 65>, //CLK_TOP_APLL_I2S2_MCK_SEL
+ <&topckgen 66>, //CLK_TOP_APLL_I2S4_MCK_SEL
+ <&topckgen 67>, //CLK_TOP_APLL_TDMOUT_MCK_SEL
+ <&topckgen 131>, //CLK_TOP_APLL12_CK_DIV0
+ <&topckgen 132>, //CLK_TOP_APLL12_CK_DIV1
+ <&topckgen 133>, //CLK_TOP_APLL12_CK_DIV2
+ <&topckgen 134>, //CLK_TOP_APLL12_CK_DIV4
+ <&topckgen 135>, //CLK_TOP_APLL12_CK_DIV_TDMOUT_M
+ <&topckgen 44>, //CLK_TOP_AUDIO_H
+ <&clk26m>;
+ clock-names = "aud_infra_clk",
+ "mtkaif_26m_clk",
+ "top_mux_audio",
+ "top_mux_audio_int",
+ "top_mainpll_d2_d4",
+ "top_mux_aud_1",
+ "top_apll1_ck",
+ "top_mux_aud_2",
+ "top_apll2_ck",
+ "top_mux_aud_eng1",
+ "top_apll1_d8",
+ "top_mux_aud_eng2",
+ "top_apll2_d8",
+ "top_i2s0_m_sel",
+ "top_i2s1_m_sel",
+ "top_i2s2_m_sel",
+ "top_i2s4_m_sel",
+ "top_tdm_m_sel",
+ "top_apll12_div0",
+ "top_apll12_div1",
+ "top_apll12_div2",
+ "top_apll12_div4",
+ "top_apll12_div_tdm",
+ "top_mux_audio_h",
+ "top_clk26m_clk";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml b/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml
new file mode 100644
index 000000000000..513cd28b2027
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt8186-mt6366-da7219-max98357.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/mt8186-mt6366-da7219-max98357.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek MT8186 with MT6366, DA7219 and MAX98357 ASoC sound card driver
+
+maintainers:
+ - Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+description:
+ This binding describes the MT8186 sound card.
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt8186-mt6366-da7219-max98357-sound
+
+ mediatek,platform:
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+ description: The phandle of MT8186 ASoC platform.
+
+ headset-codec:
+ type: object
+ additionalProperties: false
+ properties:
+ sound-dai:
+ maxItems: 1
+ required:
+ - sound-dai
+
+ playback-codecs:
+ type: object
+ additionalProperties: false
+ properties:
+ sound-dai:
+ items:
+ - description: phandle of dp codec
+ - description: phandle of l channel speaker codec
+ - description: phandle of r channel speaker codec
+ minItems: 2
+ required:
+ - sound-dai
+
+additionalProperties: false
+
+required:
+ - compatible
+ - mediatek,platform
+ - headset-codec
+ - playback-codecs
+
+examples:
+ - |
+
+ sound: mt8186-sound {
+ compatible = "mediatek,mt8186-mt6366-da7219-max98357-sound";
+ mediatek,platform = <&afe>;
+ pinctrl-names = "aud_clk_mosi_off",
+ "aud_clk_mosi_on";
+ pinctrl-0 = <&aud_clk_mosi_off>;
+ pinctrl-1 = <&aud_clk_mosi_on>;
+
+ headset-codec {
+ sound-dai = <&da7219>;
+ };
+
+ playback-codecs {
+ sound-dai = <&anx_bridge_dp>,
+ <&max98357a>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml b/Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml
new file mode 100644
index 000000000000..059a7629b2d3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/mt8186-mt6366-rt1019-rt5682s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek MT8186 with MT6366, RT1019 and RT5682S ASoC sound card driver
+
+maintainers:
+ - Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+description:
+ This binding describes the MT8186 sound card.
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt8186-mt6366-rt1019-rt5682s-sound
+
+ mediatek,platform:
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+ description: The phandle of MT8186 ASoC platform.
+
+ headset-codec:
+ type: object
+ additionalProperties: false
+ properties:
+ sound-dai:
+ maxItems: 1
+ required:
+ - sound-dai
+
+ playback-codecs:
+ type: object
+ additionalProperties: false
+ properties:
+ sound-dai:
+ items:
+ - description: phandle of dp codec
+ - description: phandle of l channel speaker codec
+ - description: phandle of r channel speaker codec
+ minItems: 2
+ required:
+ - sound-dai
+
+additionalProperties: false
+
+required:
+ - compatible
+ - mediatek,platform
+ - headset-codec
+ - playback-codecs
+
+examples:
+ - |
+
+ sound: mt8186-sound {
+ compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound";
+ mediatek,platform = <&afe>;
+ pinctrl-names = "aud_clk_mosi_off",
+ "aud_clk_mosi_on";
+ pinctrl-0 = <&aud_clk_mosi_off>;
+ pinctrl-1 = <&aud_clk_mosi_on>;
+
+ headset-codec {
+ sound-dai = <&rt5682s>;
+ };
+
+ playback-codecs {
+ sound-dai = <&it6505dptx>,
+ <&rt1019p>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/nau8821.txt b/Documentation/devicetree/bindings/sound/nau8821.txt
index 6c3baf7a5f21..7c84e7c7327a 100644
--- a/Documentation/devicetree/bindings/sound/nau8821.txt
+++ b/Documentation/devicetree/bindings/sound/nau8821.txt
@@ -34,7 +34,7 @@ Optional properties:
- nuvoton,jack-eject-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
- nuvoton,dmic-clk-threshold: the ADC threshold of DMIC clock.
-
+ - nuvoton,key_enable: Headset button detection switch.
Example:
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml
index 6df6f858038c..47b6e712e4fb 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-ahub.yaml
@@ -110,6 +110,10 @@ patternProperties:
type: object
$ref: nvidia,tegra186-asrc.yaml#
+ '^processing-engine@[0-9a-f]+$':
+ type: object
+ $ref: nvidia,tegra210-ope.yaml#
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-mbdrc.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-mbdrc.yaml
new file mode 100644
index 000000000000..5b9198602fc6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-mbdrc.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra210-mbdrc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Tegra210 MBDRC
+
+description:
+ The Multi Band Dynamic Range Compressor (MBDRC) is part of Output
+ Processing Engine (OPE) which interfaces with Audio Hub (AHUB) via
+ Audio Client Interface (ACIF). MBDRC can be used as a traditional
+ single full band or a dual band or a multi band dynamic processor.
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Mohan Kumar <mkumard@nvidia.com>
+ - Sameer Pujar <spujar@nvidia.com>
+
+properties:
+ compatible:
+ oneOf:
+ - const: nvidia,tegra210-mbdrc
+ - items:
+ - enum:
+ - nvidia,tegra234-mbdrc
+ - nvidia,tegra194-mbdrc
+ - nvidia,tegra186-mbdrc
+ - const: nvidia,tegra210-mbdrc
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ dynamic-range-compressor@702d8200 {
+ compatible = "nvidia,tegra210-mbdrc";
+ reg = <0x702d8200 0x200>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-ope.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-ope.yaml
new file mode 100644
index 000000000000..9dc9ba590fa3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-ope.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra210-ope.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Tegra210 OPE
+
+description:
+ The Output Processing Engine (OPE) is one of the AHUB client. It has
+ PEQ (Parametric Equalizer) and MBDRC (Multi Band Dynamic Range Compressor)
+ sub blocks for data processing.
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Mohan Kumar <mkumard@nvidia.com>
+ - Sameer Pujar <spujar@nvidia.com>
+
+allOf:
+ - $ref: name-prefix.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: nvidia,tegra210-ope
+ - items:
+ - enum:
+ - nvidia,tegra234-ope
+ - nvidia,tegra194-ope
+ - nvidia,tegra186-ope
+ - const: nvidia,tegra210-ope
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+ sound-name-prefix:
+ pattern: "^OPE[1-9]$"
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@0:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+ description:
+ OPE ACIF (Audio Client Interface) input port. This is connected
+ to corresponding ACIF output port on AHUB (Audio Hub).
+
+ port@1:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+ description:
+ OPE ACIF output port. This is connected to corresponding ACIF
+ input port on AHUB.
+
+patternProperties:
+ '^equalizer@[0-9a-f]+$':
+ type: object
+ $ref: nvidia,tegra210-peq.yaml#
+
+ '^dynamic-range-compressor@[0-9a-f]+$':
+ type: object
+ $ref: nvidia,tegra210-mbdrc.yaml#
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ processing-engine@702d8000 {
+ compatible = "nvidia,tegra210-ope";
+ reg = <0x702d8000 0x100>;
+ sound-name-prefix = "OPE1";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra210-peq.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra210-peq.yaml
new file mode 100644
index 000000000000..1e373c49d639
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra210-peq.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra210-peq.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Tegra210 PEQ
+
+description:
+ The Parametric Equalizer (PEQ) is a cascade of biquad filters with
+ each filter tuned based on certain parameters. It can be used to
+ equalize the irregularities in the speaker frequency response.
+ PEQ sits inside Output Processing Engine (OPE) which interfaces
+ with Audio Hub (AHUB) via Audio Client Interface (ACIF).
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Mohan Kumar <mkumard@nvidia.com>
+ - Sameer Pujar <spujar@nvidia.com>
+
+properties:
+ compatible:
+ oneOf:
+ - const: nvidia,tegra210-peq
+ - items:
+ - enum:
+ - nvidia,tegra234-peq
+ - nvidia,tegra194-peq
+ - nvidia,tegra186-peq
+ - const: nvidia,tegra210-peq
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ equalizer@702d8100 {
+ compatible = "nvidia,tegra210-peq";
+ reg = <0x702d8100 0x100>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml b/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
index b9b1dba40856..7f2e68ff6d34 100644
--- a/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
+++ b/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
@@ -15,6 +15,7 @@ allOf:
properties:
compatible:
enum:
+ - nxp,tfa9890
- nxp,tfa9895
- nxp,tfa9897
diff --git a/Documentation/devicetree/bindings/sound/qcom,sdm845.txt b/Documentation/devicetree/bindings/sound/qcom,sdm845.txt
deleted file mode 100644
index de4c604641da..000000000000
--- a/Documentation/devicetree/bindings/sound/qcom,sdm845.txt
+++ /dev/null
@@ -1,91 +0,0 @@
-* Qualcomm Technologies Inc. SDM845 ASoC sound card driver
-
-This binding describes the SDM845 sound card, which uses qdsp for audio.
-
-- compatible:
- Usage: required
- Value type: <stringlist>
- Definition: must be one of this
- "qcom,sdm845-sndcard"
- "qcom,db845c-sndcard"
- "lenovo,yoga-c630-sndcard"
-
-- audio-routing:
- Usage: Optional
- Value type: <stringlist>
- Definition: A list of the connections between audio components.
- Each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's
- source. Valid names could be power supplies, MicBias
- of codec and the jacks on the board.
-
-- model:
- Usage: required
- Value type: <stringlist>
- Definition: The user-visible name of this sound card.
-
-- aux-devs
- Usage: optional
- Value type: <array of phandles>
- Definition: A list of phandles for auxiliary devices (e.g. analog
- amplifiers) that do not appear directly within the DAI
- links. Should be connected to another audio component
- using "audio-routing".
-
-= dailinks
-Each subnode of sndcard represents either a dailink, and subnodes of each
-dailinks would be cpu/codec/platform dais.
-
-- link-name:
- Usage: required
- Value type: <string>
- Definition: User friendly name for dai link
-
-= CPU, PLATFORM, CODEC dais subnodes
-- cpu:
- Usage: required
- Value type: <subnode>
- Definition: cpu dai sub-node
-
-- codec:
- Usage: required
- Value type: <subnode>
- Definition: codec dai sub-node
-
-- platform:
- Usage: Optional
- Value type: <subnode>
- Definition: platform dai sub-node
-
-- sound-dai:
- Usage: required
- Value type: <phandle>
- Definition: dai phandle/s and port of CPU/CODEC/PLATFORM node.
-
-Example:
-
-audio {
- compatible = "qcom,sdm845-sndcard";
- model = "sdm845-snd-card";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&pri_mi2s_active &pri_mi2s_ws_active>;
- pinctrl-1 = <&pri_mi2s_sleep &pri_mi2s_ws_sleep>;
-
- mm1-dai-link {
- link-name = "MultiMedia1";
- cpu {
- sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
- };
- };
-
- pri-mi2s-dai-link {
- link-name = "PRI MI2S Playback";
- cpu {
- sound-dai = <&q6afedai PRIMARY_MI2S_RX>;
- };
-
- platform {
- sound-dai = <&q6routing>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
index 4ecd4080bb96..e6e27d09783e 100644
--- a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
@@ -16,8 +16,11 @@ description:
properties:
compatible:
enum:
+ - lenovo,yoga-c630-sndcard
- qcom,apq8016-sbc-sndcard
+ - qcom,db845c-sndcard
- qcom,msm8916-qdsp6-sndcard
+ - qcom,sdm845-sndcard
- qcom,sm8250-sndcard
- qcom,qrb5165-rb5-sndcard
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
index 9b225dbf8b79..8ca19f2b0b3d 100644
--- a/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
@@ -127,7 +127,7 @@ properties:
gpio@42:
type: object
- $ref: ../gpio/qcom,wcd934x-gpio.yaml#
+ $ref: /schemas/gpio/qcom,wcd934x-gpio.yaml#
patternProperties:
"^.*@[0-9a-f]+$":
diff --git a/Documentation/devicetree/bindings/sound/qcom,wsa883x.yaml b/Documentation/devicetree/bindings/sound/qcom,wsa883x.yaml
new file mode 100644
index 000000000000..6113f65f2990
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,wsa883x.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/qcom,wsa883x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bindings for The Qualcomm WSA8830/WSA8832/WSA8835
+ smart speaker amplifier
+
+maintainers:
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+description: |
+ WSA883X is the Qualcomm Aqstic smart speaker amplifier
+ Their primary operating mode uses a SoundWire digital audio
+ interface. This binding is for SoundWire interface.
+
+properties:
+ compatible:
+ const: sdw10217020200
+
+ reg:
+ maxItems: 1
+
+ powerdown-gpios:
+ description: GPIO spec for Powerdown/Shutdown line to use
+ maxItems: 1
+
+ vdd-supply:
+ description: VDD Supply for the Codec
+
+ '#thermal-sensor-cells':
+ const: 0
+
+ '#sound-dai-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+ - powerdown-gpios
+ - "#thermal-sensor-cells"
+ - "#sound-dai-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ soundwire-controller@3250000 {
+ #address-cells = <2>;
+ #size-cells = <0>;
+ reg = <0x3250000 0x2000>;
+
+ speaker@0,1 {
+ compatible = "sdw10217020200";
+ reg = <0 1>;
+ powerdown-gpios = <&tlmm 1 0>;
+ vdd-supply = <&vreg_s10b_1p8>;
+ #thermal-sensor-cells = <0>;
+ #sound-dai-cells = <0>;
+ };
+
+ speaker@0,2 {
+ compatible = "sdw10217020200";
+ reg = <0 2>;
+ powerdown-gpios = <&tlmm 89 0>;
+ vdd-supply = <&vreg_s10b_1p8>;
+ #thermal-sensor-cells = <0>;
+ #sound-dai-cells = <0>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml b/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml
index 5ea16b8ef93f..7e36e389e976 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml
@@ -61,6 +61,13 @@ properties:
- const: tx
- const: rx
+ pinctrl-names:
+ oneOf:
+ - const: default
+ - items:
+ - const: bclk_on
+ - const: bclk_off
+
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.yaml b/Documentation/devicetree/bindings/sound/sgtl5000.yaml
index e762c320b574..2bc7f00ce4a2 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.yaml
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.yaml
@@ -47,6 +47,7 @@ properties:
description: The bias voltage to be used in mVolts. The voltage can take
values from 1.25V to 3V by 250mV steps. If this node is not mentioned
or the value is unknown, then the value is set to 1.25V.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
enum: [ 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000 ]
lrclk-strength:
diff --git a/Documentation/devicetree/bindings/sound/snps,designware-i2s.yaml b/Documentation/devicetree/bindings/sound/snps,designware-i2s.yaml
new file mode 100644
index 000000000000..4b0795819064
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/snps,designware-i2s.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/snps,designware-i2s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: DesignWare I2S controller
+
+maintainers:
+ - Jose Abreu <joabreu@synopsys.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: canaan,k210-i2s
+ - const: snps,designware-i2s
+ - enum:
+ - snps,designware-i2s
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: |
+ The interrupt line number for the I2S controller. Add this
+ parameter if the I2S controller that you are using does not
+ support DMA.
+ maxItems: 1
+
+ clocks:
+ description: Sampling rate reference clock
+ maxItems: 1
+
+ clock-names:
+ const: i2sclk
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ items:
+ - description: TX DMA Channel
+ - description: RX DMA Channel
+ minItems: 1
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+ minItems: 1
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: canaan,k210-i2s
+
+then:
+ properties:
+ "#sound-dai-cells":
+ const: 1
+
+else:
+ properties:
+ "#sound-dai-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+oneOf:
+ - required:
+ - dmas
+ - dma-names
+ - required:
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ soc_i2s: i2s@7ff90000 {
+ compatible = "snps,designware-i2s";
+ reg = <0x7ff90000 0x1000>;
+ clocks = <&scpi_i2sclk 0>;
+ clock-names = "i2sclk";
+ #sound-dai-cells = <0>;
+ dmas = <&dma0 5>;
+ dma-names = "tx";
+ };
diff --git a/Documentation/devicetree/bindings/sound/tas2562.yaml b/Documentation/devicetree/bindings/sound/tas2562.yaml
index 5f7dd5d6cbca..30f6b029ac08 100644
--- a/Documentation/devicetree/bindings/sound/tas2562.yaml
+++ b/Documentation/devicetree/bindings/sound/tas2562.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Texas Instruments TAS2562 Smart PA
maintainers:
- - Dan Murphy <dmurphy@ti.com>
+ - Andrew Davis <afd@ti.com>
description: |
The TAS2562 is a mono, digital input Class-D audio amplifier optimized for
diff --git a/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml b/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
index 2ad17b361db0..ee698614862e 100644
--- a/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
+++ b/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments TLV320ADCX140 Quad Channel Analog-to-Digital Converter
maintainers:
- - Dan Murphy <dmurphy@ti.com>
+ - Andrew Davis <afd@ti.com>
description: |
The TLV320ADCX140 are multichannel (4-ch analog recording or 8-ch digital
@@ -68,9 +68,9 @@ properties:
array is defined as <PDMIN1 PDMIN2 PDMIN3 PDMIN4>.
0 - (default) Odd channel is latched on the negative edge and even
- channel is latched on the the positive edge.
+ channel is latched on the positive edge.
1 - Odd channel is latched on the positive edge and even channel is
- latched on the the negative edge.
+ latched on the negative edge.
PDMIN1 - PDMCLK latching edge used for channel 1 and 2 data
PDMIN2 - PDMCLK latching edge used for channel 3 and 4 data
diff --git a/Documentation/devicetree/bindings/sound/wlf,wm8731.yaml b/Documentation/devicetree/bindings/sound/wlf,wm8731.yaml
index e7220e8b49f0..15795f63b5a3 100644
--- a/Documentation/devicetree/bindings/sound/wlf,wm8731.yaml
+++ b/Documentation/devicetree/bindings/sound/wlf,wm8731.yaml
@@ -52,10 +52,6 @@ properties:
DCVDD-supply:
description: Digital core supply regulator for the DCVDD pin.
- spi-max-frequency: true
-
-additionalProperties: false
-
required:
- reg
- compatible
@@ -64,6 +60,11 @@ required:
- DBVDD-supply
- DCVDD-supply
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
examples:
- |
spi {
diff --git a/Documentation/devicetree/bindings/spi/cdns,qspi-nor-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/cdns,qspi-nor-peripheral-props.yaml
index 553601a441a7..510b82c177c0 100644
--- a/Documentation/devicetree/bindings/spi/cdns,qspi-nor-peripheral-props.yaml
+++ b/Documentation/devicetree/bindings/spi/cdns,qspi-nor-peripheral-props.yaml
@@ -10,7 +10,7 @@ description:
See spi-peripheral-props.yaml for more info.
maintainers:
- - Pratyush Yadav <p.yadav@ti.com>
+ - Vaishnav Achath <vaishnav.a@ti.com>
properties:
# cdns,qspi-nor.yaml
diff --git a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
index 0a537fa3a641..4707294d8f59 100644
--- a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
+++ b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Cadence Quad SPI controller
maintainers:
- - Pratyush Yadav <p.yadav@ti.com>
+ - Vaishnav Achath <vaishnav.a@ti.com>
allOf:
- $ref: spi-controller.yaml#
diff --git a/Documentation/devicetree/bindings/spi/efm32-spi.txt b/Documentation/devicetree/bindings/spi/efm32-spi.txt
deleted file mode 100644
index e0fa61a1be0c..000000000000
--- a/Documentation/devicetree/bindings/spi/efm32-spi.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-* Energy Micro EFM32 SPI
-
-Required properties:
-- #address-cells: see spi-bus.txt
-- #size-cells: see spi-bus.txt
-- compatible: should be "energymicro,efm32-spi"
-- reg: Offset and length of the register set for the controller
-- interrupts: pair specifying rx and tx irq
-- clocks: phandle to the spi clock
-- cs-gpios: see spi-bus.txt
-
-Recommended properties :
-- energymicro,location: Value to write to the ROUTE register's LOCATION
- bitfield to configure the pinmux for the device, see
- datasheet for values.
- If this property is not provided, keeping what is
- already configured in the hardware, so its either the
- reset default 0 or whatever the bootloader did.
-
-Example:
-
-spi1: spi@4000c400 { /* USART1 */
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "energymicro,efm32-spi";
- reg = <0x4000c400 0x400>;
- interrupts = <15 16>;
- clocks = <&cmu 20>;
- cs-gpios = <&gpio 51 1>; // D3
- energymicro,location = <1>;
-
- ks8851@0 {
- compatible = "ks8851";
- spi-max-frequency = <6000000>;
- reg = <0>;
- interrupt-parent = <&boardfpga>;
- interrupts = <4>;
- };
-};
diff --git a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
index ce048e782e80..a4abe1588005 100644
--- a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
@@ -16,7 +16,7 @@ description:
their own separate schema that should be referenced from here.
maintainers:
- - Pratyush Yadav <p.yadav@ti.com>
+ - Mark Brown <broonie@kernel.org>
properties:
reg:
diff --git a/Documentation/devicetree/bindings/sram/qcom,imem.yaml b/Documentation/devicetree/bindings/sram/qcom,imem.yaml
new file mode 100644
index 000000000000..e9199190198d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sram/qcom,imem.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sram/qcom,imem.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm IMEM memory region
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+ Qualcomm IMEM is dedicated memory region for various debug features and DMA
+ transactions.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,apq8064-imem
+ - qcom,msm8974-imem
+ - qcom,qcs404-imem
+ - qcom,sc7180-imem
+ - qcom,sc7280-imem
+ - qcom,sdm630-imem
+ - qcom,sdm845-imem
+ - qcom,sdx55-imem
+ - const: syscon
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+ ranges: true
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+ reboot-mode:
+ $ref: /schemas/power/reset/syscon-reboot-mode.yaml#
+
+patternProperties:
+ "^pil-reloc@[0-9a-f]+$":
+ $ref: /schemas/remoteproc/qcom,pil-info.yaml#
+ description: Peripheral image loader relocation region
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ sram@146bf000 {
+ compatible = "qcom,sdm845-imem", "syscon", "simple-mfd";
+ reg = <0 0x146bf000 0 0x1000>;
+ ranges = <0 0 0x146bf000 0x1000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pil-reloc@94c {
+ compatible = "qcom,pil-reloc-info";
+ reg = <0x94c 0xc8>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml b/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml
index 930188bc5e6a..071f2d676196 100644
--- a/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml
+++ b/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml
@@ -72,10 +72,10 @@ patternProperties:
examples:
- |
- #include <dt-bindings/clock/qcom,rpmcc.h>
- #include <dt-bindings/clock/qcom,mmcc-msm8974.h>
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+ #include <dt-bindings/clock/qcom,mmcc-msm8974.h>
- ocmem: ocmem@fdd00000 {
+ sram@fdd00000 {
compatible = "qcom,msm8974-ocmem";
reg = <0xfdd00000 0x2000>,
@@ -93,6 +93,6 @@ examples:
ranges = <0 0xfec00000 0x100000>;
gmu-sram@0 {
- reg = <0x0 0x100000>;
+ reg = <0x0 0x100000>;
};
- };
+ };
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml b/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
index 1ab5070c751d..89a2c32c0ab2 100644
--- a/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
@@ -16,7 +16,7 @@ description: |+
- compatible: Should be one of the following:
"brcm,bcm2711-avs-monitor", "syscon", "simple-mfd"
- Refer to the the bindings described in
+ Refer to the bindings described in
Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
diff --git a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
index db880e7ed713..aea4a2a178b9 100644
--- a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
+++ b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
@@ -96,7 +96,7 @@ critical trip point is reported back to the thermal framework to implement
software shutdown.
- the "hot" type trip points will be set to SOC_THERM hardware as the throttle
-temperature. Once the the temperature of this thermal zone is higher
+temperature. Once the temperature of this thermal zone is higher
than it, it will trigger the HW throttle event.
Example :
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
index 927de79ab4b5..119998d10ff4 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
@@ -42,7 +42,7 @@ properties:
description:
Address ranges of the thermal registers. If more then one range is given
the first one must be the common registers followed by each sensor
- according the the datasheet.
+ according to the datasheet.
minItems: 1
maxItems: 4
diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
index 2d34f3ccb257..8d2c6d74b605 100644
--- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
+++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
@@ -214,6 +214,7 @@ patternProperties:
- polling-delay
- polling-delay-passive
- thermal-sensors
+ - trips
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 5d87b8426ff4..61746755c107 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -95,8 +95,6 @@ properties:
- dh,dhcom-board
# DA9053: flexible system level PMIC with multicore support
- dlg,da9053
- # DA9063: system PMIC for quad-core application processors
- - dlg,da9063
# DMARD05: 3-axis I2C Accelerometer
- domintech,dmard05
# DMARD06: 3-axis I2C Accelerometer
@@ -141,6 +139,8 @@ properties:
- infineon,slb9635tt
# Infineon SLB9645 I2C TPM (new protocol, max 400khz)
- infineon,slb9645tt
+ # Infineon SLB9673 I2C TPM 2.0
+ - infineon,slb9673
# Infineon TLV493D-A1B6 I2C 3D Magnetic Sensor
- infineon,tlv493d-a1b6
# Infineon Multi-phase Digital VR Controller xdpe11280
diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
index dcd32c10205a..f2d6298d926c 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml
@@ -26,6 +26,7 @@ properties:
- qcom,msm8994-ufshc
- qcom,msm8996-ufshc
- qcom,msm8998-ufshc
+ - qcom,sc8280xp-ufshc
- qcom,sdm845-ufshc
- qcom,sm6350-ufshc
- qcom,sm8150-ufshc
@@ -98,6 +99,7 @@ allOf:
contains:
enum:
- qcom,msm8998-ufshc
+ - qcom,sc8280xp-ufshc
- qcom,sm8250-ufshc
- qcom,sm8350-ufshc
- qcom,sm8450-ufshc
diff --git a/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml b/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml
new file mode 100644
index 000000000000..f04f9f61fa9f
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/renesas,ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car UFS Host Controller
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+allOf:
+ - $ref: ufs-common.yaml
+
+properties:
+ compatible:
+ const: renesas,r8a779f0-ufs
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: fck
+ - const: ref_clk
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - power-domains
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a779f0-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a779f0-sysc.h>
+
+ ufs: ufs@e686000 {
+ compatible = "renesas,r8a779f0-ufs";
+ reg = <0xe6860000 0x100>;
+ interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1514>, <&ufs30_clk>;
+ clock-names = "fck", "ref_clk";
+ freq-table-hz = <200000000 200000000>, <38400000 38400000>;
+ power-domains = <&sysc R8A779F0_PD_ALWAYS_ON>;
+ resets = <&cpg 1514>;
+ };
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
index c949eb617313..2c715eec48b8 100644
--- a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
@@ -21,6 +21,7 @@ properties:
- samsung,exynos7-ufs
- samsung,exynosautov9-ufs
- samsung,exynosautov9-ufs-vh
+ - tesla,fsd-ufs
reg:
items:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 5a3bc64773a9..2f0151e9f6be 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -141,6 +141,8 @@ patternProperties:
description: ASIX Electronics Corporation
"^aspeed,.*":
description: ASPEED Technology Inc.
+ "^asrock,.*":
+ description: ASRock Inc.
"^asus,.*":
description: AsusTek Computer Inc.
"^atheros,.*":
@@ -198,12 +200,14 @@ patternProperties:
description: Broadcom Corporation
"^bsh,.*":
description: BSH Hausgeraete GmbH
+ "^bticino,.*":
+ description: Bticino International
"^buffalo,.*":
description: Buffalo, Inc.
"^bur,.*":
description: B&R Industrial Automation GmbH
- "^bticino,.*":
- description: Bticino International
+ "^bytedance,.*":
+ description: ByteDance Ltd.
"^calamp,.*":
description: CalAmp Corp.
"^calaosystems,.*":
@@ -308,6 +312,8 @@ patternProperties:
description: Dell Inc.
"^delta,.*":
description: Delta Electronics, Inc.
+ "^densitron,.*":
+ description: Densitron Technologies Ltd
"^denx,.*":
description: Denx Software Engineering
"^devantech,.*":
@@ -551,6 +557,8 @@ patternProperties:
description: Shenzhen Hugsun Technology Co. Ltd.
"^hwacom,.*":
description: HwaCom Systems Inc.
+ "^hxt,.*":
+ description: HXT Semiconductor
"^hycon,.*":
description: Hycon Technology Corp.
"^hydis,.*":
@@ -585,6 +593,8 @@ patternProperties:
description: Infineon Technologies
"^inforce,.*":
description: Inforce Computing
+ "^ingrasys,.*":
+ description: Ingrasys Technology Inc.
"^ivo,.*":
description: InfoVision Optoelectronics Kunshan Co. Ltd.
"^ingenic,.*":
@@ -605,6 +615,8 @@ patternProperties:
description: Inter Control Group
"^invensense,.*":
description: InvenSense Inc.
+ "^inventec,.*":
+ description: Inventec
"^inversepath,.*":
description: Inverse Path
"^iom,.*":
@@ -1019,6 +1031,8 @@ patternProperties:
description: Shenzhen QiShenglong Industrialist Co., Ltd.
"^qnap,.*":
description: QNAP Systems, Inc.
+ "^quanta,.*":
+ description: Quanta Computer Inc.
"^radxa,.*":
description: Radxa
"^raidsonic,.*":
diff --git a/Documentation/devicetree/bindings/virtio/mmio.yaml b/Documentation/devicetree/bindings/virtio/mmio.yaml
index 10c22b5bd16a..0aa8433f0a5e 100644
--- a/Documentation/devicetree/bindings/virtio/mmio.yaml
+++ b/Documentation/devicetree/bindings/virtio/mmio.yaml
@@ -33,6 +33,10 @@ properties:
description: Required for devices making accesses thru an IOMMU.
maxItems: 1
+ wakeup-source:
+ type: boolean
+ description: Required for setting irq of a virtio_mmio device as wakeup source.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml b/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml
index ca9e1beff76b..6ecd429f76b5 100644
--- a/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml
+++ b/Documentation/devicetree/bindings/watchdog/faraday,ftwdt010.yaml
@@ -55,7 +55,7 @@ examples:
compatible = "faraday,ftwdt010";
reg = <0x41000000 0x1000>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
- timeout-secs = <5>;
+ timeout-sec = <5>;
};
- |
watchdog: watchdog@98500000 {
diff --git a/Documentation/devicetree/bindings/watchdog/qcom,pm8916-wdt.txt b/Documentation/devicetree/bindings/watchdog/qcom,pm8916-wdt.txt
deleted file mode 100644
index 6fb984f31982..000000000000
--- a/Documentation/devicetree/bindings/watchdog/qcom,pm8916-wdt.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-QCOM PM8916 watchdog timer controller
-
-This pm8916 watchdog timer controller must be under pm8916-pon node.
-
-Required properties:
-- compatible: should be "qcom,pm8916-wdt"
-
-Optional properties :
-- interrupts : Watchdog pre-timeout (bark) interrupt.
-- timeout-sec : Watchdog timeout value in seconds.
-
-Example:
-
- pm8916_0: pm8916@0 {
- compatible = "qcom,pm8916", "qcom,spmi-pmic";
- reg = <0x0 SPMI_USID>;
-
- pon@800 {
- compatible = "qcom,pm8916-pon";
- reg = <0x800>;
-
- watchdog {
- compatible = "qcom,pm8916-wdt";
- interrupts = <0x0 0x8 6 IRQ_TYPE_EDGE_RISING>;
- timeout-sec = <10>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/watchdog/qcom,pm8916-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom,pm8916-wdt.yaml
new file mode 100644
index 000000000000..568eb8480fc3
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/qcom,pm8916-wdt.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/qcom,pm8916-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm PM8916 watchdog timer controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ const: qcom,pm8916-wdt
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/spmi/spmi.h>
+
+ pmic@0 {
+ compatible = "qcom,pm8916", "qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pon@800 {
+ compatible = "qcom,pm8916-pon";
+ reg = <0x800>;
+ mode-bootloader = <0x2>;
+ mode-recovery = <0x1>;
+
+ watchdog {
+ compatible = "qcom,pm8916-wdt";
+ interrupts = <0x0 0x8 6 IRQ_TYPE_EDGE_RISING>;
+ timeout-sec = <60>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
index 2bd6b4a52637..d8ac0be36e6c 100644
--- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
@@ -24,6 +24,7 @@ properties:
- qcom,apss-wdt-sc8280xp
- qcom,apss-wdt-sdm845
- qcom,apss-wdt-sdx55
+ - qcom,apss-wdt-sdx65
- qcom,apss-wdt-sm6350
- qcom,apss-wdt-sm8150
- qcom,apss-wdt-sm8250
diff --git a/Documentation/devicetree/bindings/watchdog/realtek,otto-wdt.yaml b/Documentation/devicetree/bindings/watchdog/realtek,otto-wdt.yaml
index 11b220a5e0f6..099245fe7b10 100644
--- a/Documentation/devicetree/bindings/watchdog/realtek,otto-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/realtek,otto-wdt.yaml
@@ -29,6 +29,7 @@ properties:
- realtek,rtl8380-wdt
- realtek,rtl8390-wdt
- realtek,rtl9300-wdt
+ - realtek,rtl9310-wdt
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/writing-bindings.rst b/Documentation/devicetree/bindings/writing-bindings.rst
index 5465eced2af1..1ad081de2dd0 100644
--- a/Documentation/devicetree/bindings/writing-bindings.rst
+++ b/Documentation/devicetree/bindings/writing-bindings.rst
@@ -53,7 +53,7 @@ Properties
- DO use common property unit suffixes for properties with scientific units.
Recommended suffixes are listed at
- https://github.com/devicetree-org/dt-schema/blob/master/schemas/property-units.yaml
+ https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/property-units.yaml
- DO define properties in terms of constraints. How many entries? What are
possible values? What is the order?
diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst
index db476bb170b6..5149ecdc53c7 100644
--- a/Documentation/driver-api/cxl/memory-devices.rst
+++ b/Documentation/driver-api/cxl/memory-devices.rst
@@ -362,6 +362,14 @@ CXL Core
.. kernel-doc:: drivers/cxl/core/mbox.c
:doc: cxl mbox
+CXL Regions
+-----------
+.. kernel-doc:: drivers/cxl/core/region.c
+ :doc: cxl core region
+
+.. kernel-doc:: drivers/cxl/core/region.c
+ :identifiers:
+
External Interfaces
===================
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
index 1e0f1f85d10e..ceac2a300e32 100644
--- a/Documentation/driver-api/dmaengine/provider.rst
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -162,16 +162,6 @@ Currently, the types available are:
- The device is able to do memory to memory copies
-- - DMA_MEMCPY_SG
-
- - The device supports memory to memory scatter-gather transfers.
-
- - Even though a plain memcpy can look like a particular case of a
- scatter-gather transfer, with a single chunk to copy, it's a distinct
- transaction type in the mem2mem transfer case. This is because some very
- simple devices might be able to do contiguous single-chunk memory copies,
- but have no support for more complex SG transfers.
-
- No matter what the overall size of the combined chunks for source and
destination is, only as many bytes as the smallest of the two will be
transmitted. That means the number and size of the scatter-gather buffers in
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index 2d39967bafcc..55272942e721 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -277,7 +277,6 @@ GPIO
devm_gpiochip_add_data()
devm_gpio_request()
devm_gpio_request_one()
- devm_gpio_free()
I2C
devm_i2c_new_dummy_device()
diff --git a/Documentation/driver-api/serial/driver.rst b/Documentation/driver-api/serial/driver.rst
index 7ef83fd3917b..23c6b956cd90 100644
--- a/Documentation/driver-api/serial/driver.rst
+++ b/Documentation/driver-api/serial/driver.rst
@@ -25,10 +25,10 @@ Console Support
---------------
The serial core provides a few helper functions. This includes identifing
-the correct port structure (via uart_get_console) and decoding command line
-arguments (uart_parse_options).
+the correct port structure (via uart_get_console()) and decoding command line
+arguments (uart_parse_options()).
-There is also a helper function (uart_console_write) which performs a
+There is also a helper function (uart_console_write()) which performs a
character by character write, translating newlines to CRLF sequences.
Driver writers are recommended to use this function rather than implementing
their own version.
@@ -39,7 +39,7 @@ Locking
It is the responsibility of the low level hardware driver to perform the
necessary locking using port->lock. There are some exceptions (which
-are described in the uart_ops listing below.)
+are described in the struct uart_ops listing below.)
There are two locks. A per-port spinlock, and an overall semaphore.
@@ -63,442 +63,20 @@ commonly referred to as the port mutex.
uart_ops
--------
-The uart_ops structure is the main interface between serial_core and the
-hardware specific driver. It contains all the methods to control the
-hardware.
-
- tx_empty(port)
- This function tests whether the transmitter fifo and shifter
- for the port described by 'port' is empty. If it is empty,
- this function should return TIOCSER_TEMT, otherwise return 0.
- If the port does not support this operation, then it should
- return TIOCSER_TEMT.
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- This call must not sleep
-
- set_mctrl(port, mctrl)
- This function sets the modem control lines for port described
- by 'port' to the state described by mctrl. The relevant bits
- of mctrl are:
-
- - TIOCM_RTS RTS signal.
- - TIOCM_DTR DTR signal.
- - TIOCM_OUT1 OUT1 signal.
- - TIOCM_OUT2 OUT2 signal.
- - TIOCM_LOOP Set the port into loopback mode.
-
- If the appropriate bit is set, the signal should be driven
- active. If the bit is clear, the signal should be driven
- inactive.
-
- Locking: port->lock taken.
-
- Interrupts: locally disabled.
-
- This call must not sleep
-
- get_mctrl(port)
- Returns the current state of modem control inputs. The state
- of the outputs should not be returned, since the core keeps
- track of their state. The state information should include:
-
- - TIOCM_CAR state of DCD signal
- - TIOCM_CTS state of CTS signal
- - TIOCM_DSR state of DSR signal
- - TIOCM_RI state of RI signal
-
- The bit is set if the signal is currently driven active. If
- the port does not support CTS, DCD or DSR, the driver should
- indicate that the signal is permanently active. If RI is
- not available, the signal should not be indicated as active.
-
- Locking: port->lock taken.
-
- Interrupts: locally disabled.
-
- This call must not sleep
-
- stop_tx(port)
- Stop transmitting characters. This might be due to the CTS
- line becoming inactive or the tty layer indicating we want
- to stop transmission due to an XOFF character.
-
- The driver should stop transmitting characters as soon as
- possible.
-
- Locking: port->lock taken.
-
- Interrupts: locally disabled.
-
- This call must not sleep
-
- start_tx(port)
- Start transmitting characters.
-
- Locking: port->lock taken.
-
- Interrupts: locally disabled.
-
- This call must not sleep
-
- throttle(port)
- Notify the serial driver that input buffers for the line discipline are
- close to full, and it should somehow signal that no more characters
- should be sent to the serial port.
- This will be called only if hardware assisted flow control is enabled.
-
- Locking: serialized with .unthrottle() and termios modification by the
- tty layer.
-
- unthrottle(port)
- Notify the serial driver that characters can now be sent to the serial
- port without fear of overrunning the input buffers of the line
- disciplines.
-
- This will be called only if hardware assisted flow control is enabled.
-
- Locking: serialized with .throttle() and termios modification by the
- tty layer.
-
- send_xchar(port,ch)
- Transmit a high priority character, even if the port is stopped.
- This is used to implement XON/XOFF flow control and tcflow(). If
- the serial driver does not implement this function, the tty core
- will append the character to the circular buffer and then call
- start_tx() / stop_tx() to flush the data out.
-
- Do not transmit if ch == '\0' (__DISABLED_CHAR).
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- stop_rx(port)
- Stop receiving characters; the port is in the process of
- being closed.
-
- Locking: port->lock taken.
-
- Interrupts: locally disabled.
-
- This call must not sleep
-
- enable_ms(port)
- Enable the modem status interrupts.
-
- This method may be called multiple times. Modem status
- interrupts should be disabled when the shutdown method is
- called.
-
- Locking: port->lock taken.
-
- Interrupts: locally disabled.
-
- This call must not sleep
-
- break_ctl(port,ctl)
- Control the transmission of a break signal. If ctl is
- nonzero, the break signal should be transmitted. The signal
- should be terminated when another call is made with a zero
- ctl.
-
- Locking: caller holds tty_port->mutex
-
- startup(port)
- Grab any interrupt resources and initialise any low level driver
- state. Enable the port for reception. It should not activate
- RTS nor DTR; this will be done via a separate call to set_mctrl.
-
- This method will only be called when the port is initially opened.
-
- Locking: port_sem taken.
-
- Interrupts: globally disabled.
-
- shutdown(port)
- Disable the port, disable any break condition that may be in
- effect, and free any interrupt resources. It should not disable
- RTS nor DTR; this will have already been done via a separate
- call to set_mctrl.
-
- Drivers must not access port->state once this call has completed.
-
- This method will only be called when there are no more users of
- this port.
-
- Locking: port_sem taken.
-
- Interrupts: caller dependent.
-
- flush_buffer(port)
- Flush any write buffers, reset any DMA state and stop any
- ongoing DMA transfers.
-
- This will be called whenever the port->state->xmit circular
- buffer is cleared.
-
- Locking: port->lock taken.
-
- Interrupts: locally disabled.
-
- This call must not sleep
-
- set_termios(port,termios,oldtermios)
- Change the port parameters, including word length, parity, stop
- bits. Update read_status_mask and ignore_status_mask to indicate
- the types of events we are interested in receiving. Relevant
- termios->c_cflag bits are:
-
- CSIZE
- - word size
- CSTOPB
- - 2 stop bits
- PARENB
- - parity enable
- PARODD
- - odd parity (when PARENB is in force)
- CREAD
- - enable reception of characters (if not set,
- still receive characters from the port, but
- throw them away.
- CRTSCTS
- - if set, enable CTS status change reporting
- CLOCAL
- - if not set, enable modem status change
- reporting.
-
- Relevant termios->c_iflag bits are:
-
- INPCK
- - enable frame and parity error events to be
- passed to the TTY layer.
- BRKINT / PARMRK
- - both of these enable break events to be
- passed to the TTY layer.
-
- IGNPAR
- - ignore parity and framing errors
- IGNBRK
- - ignore break errors, If IGNPAR is also
- set, ignore overrun errors as well.
-
- The interaction of the iflag bits is as follows (parity error
- given as an example):
-
- =============== ======= ====== =============================
- Parity error INPCK IGNPAR
- =============== ======= ====== =============================
- n/a 0 n/a character received, marked as
- TTY_NORMAL
- None 1 n/a character received, marked as
- TTY_NORMAL
- Yes 1 0 character received, marked as
- TTY_PARITY
- Yes 1 1 character discarded
- =============== ======= ====== =============================
-
- Other flags may be used (eg, xon/xoff characters) if your
- hardware supports hardware "soft" flow control.
-
- Locking: caller holds tty_port->mutex
-
- Interrupts: caller dependent.
-
- This call must not sleep
-
- set_ldisc(port,termios)
- Notifier for discipline change. See ../tty/tty_ldisc.rst.
-
- Locking: caller holds tty_port->mutex
-
- pm(port,state,oldstate)
- Perform any power management related activities on the specified
- port. State indicates the new state (defined by
- enum uart_pm_state), oldstate indicates the previous state.
-
- This function should not be used to grab any resources.
-
- This will be called when the port is initially opened and finally
- closed, except when the port is also the system console. This
- will occur even if CONFIG_PM is not set.
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- type(port)
- Return a pointer to a string constant describing the specified
- port, or return NULL, in which case the string 'unknown' is
- substituted.
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- release_port(port)
- Release any memory and IO region resources currently in use by
- the port.
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- request_port(port)
- Request any memory and IO region resources required by the port.
- If any fail, no resources should be registered when this function
- returns, and it should return -EBUSY on failure.
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- config_port(port,type)
- Perform any autoconfiguration steps required for the port. `type`
- contains a bit mask of the required configuration. UART_CONFIG_TYPE
- indicates that the port requires detection and identification.
- port->type should be set to the type found, or PORT_UNKNOWN if
- no port was detected.
-
- UART_CONFIG_IRQ indicates autoconfiguration of the interrupt signal,
- which should be probed using standard kernel autoprobing techniques.
- This is not necessary on platforms where ports have interrupts
- internally hard wired (eg, system on a chip implementations).
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- verify_port(port,serinfo)
- Verify the new serial port information contained within serinfo is
- suitable for this port type.
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- ioctl(port,cmd,arg)
- Perform any port specific IOCTLs. IOCTL commands must be defined
- using the standard numbering system found in <asm/ioctl.h>
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- poll_init(port)
- Called by kgdb to perform the minimal hardware initialization needed
- to support poll_put_char() and poll_get_char(). Unlike ->startup()
- this should not request interrupts.
-
- Locking: tty_mutex and tty_port->mutex taken.
-
- Interrupts: n/a.
-
- poll_put_char(port,ch)
- Called by kgdb to write a single character directly to the serial
- port. It can and should block until there is space in the TX FIFO.
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- This call must not sleep
-
- poll_get_char(port)
- Called by kgdb to read a single character directly from the serial
- port. If data is available, it should be returned; otherwise
- the function should return NO_POLL_CHAR immediately.
-
- Locking: none.
-
- Interrupts: caller dependent.
-
- This call must not sleep
+.. kernel-doc:: include/linux/serial_core.h
+ :identifiers: uart_ops
Other functions
---------------
-uart_update_timeout(port,cflag,baud)
- Update the FIFO drain timeout, port->timeout, according to the
- number of bits, parity, stop bits and baud rate.
-
- Locking: caller is expected to take port->lock
-
- Interrupts: n/a
-
-uart_get_baud_rate(port,termios,old,min,max)
- Return the numeric baud rate for the specified termios, taking
- account of the special 38400 baud "kludge". The B0 baud rate
- is mapped to 9600 baud.
-
- If the baud rate is not within min..max, then if old is non-NULL,
- the original baud rate will be tried. If that exceeds the
- min..max constraint, 9600 baud will be returned. termios will
- be updated to the baud rate in use.
-
- Note: min..max must always allow 9600 baud to be selected.
-
- Locking: caller dependent.
-
- Interrupts: n/a
-
-uart_get_divisor(port,baud)
- Return the divisor (baud_base / baud) for the specified baud
- rate, appropriately rounded.
-
- If 38400 baud and custom divisor is selected, return the
- custom divisor instead.
-
- Locking: caller dependent.
-
- Interrupts: n/a
-
-uart_match_port(port1,port2)
- This utility function can be used to determine whether two
- uart_port structures describe the same port.
-
- Locking: n/a
-
- Interrupts: n/a
-
-uart_write_wakeup(port)
- A driver is expected to call this function when the number of
- characters in the transmit buffer have dropped below a threshold.
-
- Locking: port->lock should be held.
-
- Interrupts: n/a
-
-uart_register_driver(drv)
- Register a uart driver with the core driver. We in turn register
- with the tty layer, and initialise the core driver per-port state.
-
- drv->port should be NULL, and the per-port structures should be
- registered using uart_add_one_port after this call has succeeded.
-
- Locking: none
-
- Interrupts: enabled
-
-uart_unregister_driver()
- Remove all references to a driver from the core driver. The low
- level driver must have removed all its ports via the
- uart_remove_one_port() if it registered them with uart_add_one_port().
-
- Locking: none
-
- Interrupts: enabled
-
-**uart_suspend_port()**
-
-**uart_resume_port()**
-
-**uart_add_one_port()**
-
-**uart_remove_one_port()**
+.. kernel-doc:: drivers/tty/serial/serial_core.c
+ :identifiers: uart_update_timeout uart_get_baud_rate uart_get_divisor
+ uart_match_port uart_write_wakeup uart_register_driver
+ uart_unregister_driver uart_suspend_port uart_resume_port
+ uart_add_one_port uart_remove_one_port uart_console_write
+ uart_parse_earlycon uart_parse_options uart_set_options
+ uart_get_lsr_info uart_handle_dcd_change uart_handle_cts_change
+ uart_try_toggle_sysrq uart_get_console
Other notes
-----------
@@ -519,31 +97,7 @@ Modem control lines via GPIO
Some helpers are provided in order to set/get modem control lines via GPIO.
-mctrl_gpio_init(port, idx):
- This will get the {cts,rts,...}-gpios from device tree if they are
- present and request them, set direction etc, and return an
- allocated structure. `devm_*` functions are used, so there's no need
- to call mctrl_gpio_free().
- As this sets up the irq handling make sure to not handle changes to the
- gpio input lines in your driver, too.
-
-mctrl_gpio_free(dev, gpios):
- This will free the requested gpios in mctrl_gpio_init().
- As `devm_*` functions are used, there's generally no need to call
- this function.
-
-mctrl_gpio_to_gpiod(gpios, gidx)
- This returns the gpio_desc structure associated to the modem line
- index.
-
-mctrl_gpio_set(gpios, mctrl):
- This will sets the gpios according to the mctrl state.
-
-mctrl_gpio_get(gpios, mctrl):
- This will update mctrl with the gpios values.
-
-mctrl_gpio_enable_ms(gpios):
- Enables irqs and handling of changes to the ms lines.
-
-mctrl_gpio_disable_ms(gpios):
- Disables irqs and handling of changes to the ms lines.
+.. kernel-doc:: drivers/tty/serial/serial_mctrl_gpio.c
+ :identifiers: mctrl_gpio_init mctrl_gpio_free mctrl_gpio_to_gpiod
+ mctrl_gpio_set mctrl_gpio_get mctrl_gpio_enable_ms
+ mctrl_gpio_disable_ms
diff --git a/Documentation/driver-api/serial/serial-rs485.rst b/Documentation/driver-api/serial/serial-rs485.rst
index 6bc824f948f9..6ebad75c74ed 100644
--- a/Documentation/driver-api/serial/serial-rs485.rst
+++ b/Documentation/driver-api/serial/serial-rs485.rst
@@ -38,10 +38,14 @@ RS485 Serial Communications
the values given by the device tree.
Any driver for devices capable of working both as RS232 and RS485 should
- implement the rs485_config callback in the uart_port structure. The
- serial_core calls rs485_config to do the device specific part in response
- to TIOCSRS485 and TIOCGRS485 ioctls (see below). The rs485_config callback
- receives a pointer to struct serial_rs485.
+ implement the rs485_config callback and provide rs485_supported in the
+ uart_port structure. The serial core calls rs485_config to do the device
+ specific part in response to TIOCSRS485 ioctl (see below). The rs485_config
+ callback receives a pointer to a sanitizated serial_rs485 structure. The
+ serial_rs485 userspace provides is sanitized before calling rs485_config
+ using rs485_supported that indicates what RS485 features the driver supports
+ for the uart_port. TIOCGRS485 ioctl can be used to read back the
+ serial_rs485 structure matching to the current configuration.
4. Usage from user-level
========================
@@ -95,7 +99,31 @@ RS485 Serial Communications
/* Error handling. See errno. */
}
-5. References
+5. Multipoint Addressing
+========================
+
+ The Linux kernel provides addressing mode for multipoint RS-485 serial
+ communications line. The addressing mode is enabled with SER_RS485_ADDRB
+ flag in serial_rs485. Struct serial_rs485 has two additional flags and
+ fields for enabling receive and destination addresses.
+
+ Address mode flags:
+ - SER_RS485_ADDRB: Enabled addressing mode (sets also ADDRB in termios).
+ - SER_RS485_ADDR_RECV: Receive (filter) address enabled.
+ - SER_RS485_ADDR_DEST: Set destination address.
+
+ Address fields (enabled with corresponding SER_RS485_ADDR_* flag):
+ - addr_recv: Receive address.
+ - addr_dest: Destination address.
+
+ Once a receive address is set, the communication can occur only with the
+ particular device and other peers are filtered out. It is left up to the
+ receiver side to enforce the filtering. Receive address will be cleared
+ if SER_RS485_ADDR_RECV is not set.
+
+ Note: not all devices supporting RS485 support multipoint addressing.
+
+6. References
=============
[1] include/uapi/linux/serial.h
diff --git a/Documentation/driver-api/surface_aggregator/client.rst b/Documentation/driver-api/surface_aggregator/client.rst
index e519d374c378..27f95abdbe99 100644
--- a/Documentation/driver-api/surface_aggregator/client.rst
+++ b/Documentation/driver-api/surface_aggregator/client.rst
@@ -17,6 +17,8 @@
.. |SSAM_DEVICE| replace:: :c:func:`SSAM_DEVICE`
.. |ssam_notifier_register| replace:: :c:func:`ssam_notifier_register`
.. |ssam_notifier_unregister| replace:: :c:func:`ssam_notifier_unregister`
+.. |ssam_device_notifier_register| replace:: :c:func:`ssam_device_notifier_register`
+.. |ssam_device_notifier_unregister| replace:: :c:func:`ssam_device_notifier_unregister`
.. |ssam_request_sync| replace:: :c:func:`ssam_request_sync`
.. |ssam_event_mask| replace:: :c:type:`enum ssam_event_mask <ssam_event_mask>`
@@ -312,7 +314,9 @@ Handling Events
To receive events from the SAM EC, an event notifier must be registered for
the desired event via |ssam_notifier_register|. The notifier must be
unregistered via |ssam_notifier_unregister| once it is not required any
-more.
+more. For |ssam_device| type clients, the |ssam_device_notifier_register| and
+|ssam_device_notifier_unregister| wrappers should be preferred as they properly
+handle hot-removal of client devices.
Event notifiers are registered by providing (at minimum) a callback to call
in case an event has been received, the registry specifying how the event
diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst
index 66bd00d7aecf..f47dca6645aa 100644
--- a/Documentation/driver-api/vfio-mediated-device.rst
+++ b/Documentation/driver-api/vfio-mediated-device.rst
@@ -112,11 +112,11 @@ to register and unregister itself with the core driver:
* Register::
- extern int mdev_register_driver(struct mdev_driver *drv);
+ int mdev_register_driver(struct mdev_driver *drv);
* Unregister::
- extern void mdev_unregister_driver(struct mdev_driver *drv);
+ void mdev_unregister_driver(struct mdev_driver *drv);
The mediated bus driver's probe function should create a vfio_device on top of
the mdev_device and connect it to an appropriate implementation of
@@ -125,8 +125,8 @@ vfio_device_ops.
When a driver wants to add the GUID creation sysfs to an existing device it has
probe'd to then it should call::
- extern int mdev_register_device(struct device *dev,
- struct mdev_driver *mdev_driver);
+ int mdev_register_device(struct device *dev,
+ struct mdev_driver *mdev_driver);
This will provide the 'mdev_supported_types/XX/create' files which can then be
used to trigger the creation of a mdev_device. The created mdev_device will be
@@ -134,7 +134,7 @@ attached to the specified driver.
When the driver needs to remove itself it calls::
- extern void mdev_unregister_device(struct device *dev);
+ void mdev_unregister_device(struct device *dev);
Which will unbind and destroy all the created mdevs and remove the sysfs files.
@@ -260,10 +260,10 @@ Translation APIs for Mediated Devices
The following APIs are provided for translating user pfn to host pfn in a VFIO
driver::
- int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage, int prot, unsigned long *phys_pfn);
+ int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
+ int npage, int prot, struct page **pages);
- int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
+ void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova,
int npage);
These functions call back into the back-end IOMMU module by using the pin_pages
diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst
index eb9c2d9a4f5f..17779a2772e5 100644
--- a/Documentation/fault-injection/fault-injection.rst
+++ b/Documentation/fault-injection/fault-injection.rst
@@ -169,6 +169,13 @@ configuration of fault-injection capabilities.
default is 'N', setting it to 'Y' will disable disconnect
injection on the RPC server.
+- /sys/kernel/debug/fail_sunrpc/ignore-cache-wait:
+
+ Format: { 'Y' | 'N' }
+
+ default is 'N', setting it to 'Y' will disable cache wait
+ injection on the RPC server.
+
- /sys/kernel/debug/fail_function/inject:
Format: { 'function-name' | '!function-name' | '' }
diff --git a/Documentation/filesystems/ext4/blockmap.rst b/Documentation/filesystems/ext4/blockmap.rst
index 2bd990402a5c..cc596541ce79 100644
--- a/Documentation/filesystems/ext4/blockmap.rst
+++ b/Documentation/filesystems/ext4/blockmap.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
+---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| i.i_block Offset | Where It Points |
+| i.i_block Offset | Where It Points |
+=====================+==============================================================================================================================================================================================================================+
| 0 to 11 | Direct map to file blocks 0 to 11. |
+---------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index 98dc24f5c6f0..d0c09663dae8 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -336,6 +336,11 @@ discard_unit=%s Control discard unit, the argument can be "block", "segment"
default, it is helpful for large sized SMR or ZNS devices to
reduce memory cost by getting rid of fs metadata supports small
discard.
+memory=%s Control memory mode. This supports "normal" and "low" modes.
+ "low" mode is introduced to support low memory devices.
+ Because of the nature of low memory devices, in this mode, f2fs
+ will try to save memory sometimes by sacrificing performance.
+ "normal" mode is the default mode and same as before.
======================== ============================================================
Debugfs Entries
diff --git a/Documentation/filesystems/fuse.rst b/Documentation/filesystems/fuse.rst
index 8120c3c0cb4e..1e31e87aee68 100644
--- a/Documentation/filesystems/fuse.rst
+++ b/Documentation/filesystems/fuse.rst
@@ -279,7 +279,7 @@ How are requirements fulfilled?
the filesystem or not.
Note that the *ptrace* check is not strictly necessary to
- prevent B/2/i, it is enough to check if mount owner has enough
+ prevent C/2/i, it is enough to check if mount owner has enough
privilege to send signal to the process accessing the
filesystem, since *SIGSTOP* can be used to get a similar effect.
@@ -288,10 +288,29 @@ I think these limitations are unacceptable?
If a sysadmin trusts the users enough, or can ensure through other
measures, that system processes will never enter non-privileged
-mounts, it can relax the last limitation with a 'user_allow_other'
-config option. If this config option is set, the mounting user can
-add the 'allow_other' mount option which disables the check for other
-users' processes.
+mounts, it can relax the last limitation in several ways:
+
+ - With the 'user_allow_other' config option. If this config option is
+ set, the mounting user can add the 'allow_other' mount option which
+ disables the check for other users' processes.
+
+ User namespaces have an unintuitive interaction with 'allow_other':
+ an unprivileged user - normally restricted from mounting with
+ 'allow_other' - could do so in a user namespace where they're
+ privileged. If any process could access such an 'allow_other' mount
+ this would give the mounting user the ability to manipulate
+ processes in user namespaces where they're unprivileged. For this
+ reason 'allow_other' restricts access to users in the same userns
+ or a descendant.
+
+ - With the 'allow_sys_admin_access' module option. If this option is
+ set, super user's processes have unrestricted access to mounts
+ irrespective of allow_other setting or user namespace of the
+ mounting user.
+
+Note that both of these relaxations expose the system to potential
+information leak or *DoS* as described in points B and C/2/i-ii in the
+preceding section.
Kernel - userspace interface
============================
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 1bc91fb8c321..e7aafc82be99 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -448,6 +448,7 @@ Memory Area, or VMA) there is a series of lines such as the following::
MMUPageSize: 4 kB
Rss: 892 kB
Pss: 374 kB
+ Pss_Dirty: 0 kB
Shared_Clean: 892 kB
Shared_Dirty: 0 kB
Private_Clean: 0 kB
@@ -479,7 +480,9 @@ dirty shared and private pages in the mapping.
The "proportional set size" (PSS) of a process is the count of pages it has
in memory, where each page is divided by the number of processes sharing it.
So if a process has 1000 pages all to itself, and 1000 shared with one other
-process, its PSS will be 1500.
+process, its PSS will be 1500. "Pss_Dirty" is the portion of PSS which
+consists of dirty pages. ("Pss_Clean" is not included, but it can be
+calculated by subtracting "Pss_Dirty" from "Pss".)
Note that even a page which is part of a MAP_SHARED mapping, but has only
a single pte mapped, i.e. is currently used by only one process, is accounted
@@ -514,8 +517,10 @@ replaced by copy-on-write) part of the underlying shmem object out on swap.
"SwapPss" shows proportional swap share of this mapping. Unlike "Swap", this
does not take into account swapped out page of underlying shmem objects.
"Locked" indicates whether the mapping is locked in memory or not.
+
"THPeligible" indicates whether the mapping is eligible for allocating THP
-pages - 1 if true, 0 otherwise. It just shows the current status.
+pages as well as the THP is PMD mappable or not - 1 if true, 0 otherwise.
+It just shows the current status.
"VmFlags" field deserves a separate description. This member represents the
kernel flags associated with the particular virtual memory area in two letter
@@ -1109,7 +1114,7 @@ CommitLimit
yield a CommitLimit of 7.3G.
For more details, see the memory overcommit documentation
- in vm/overcommit-accounting.
+ in mm/overcommit-accounting.
Committed_AS
The amount of memory presently allocated on the system.
The committed memory is a sum of all of the memory which
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.rst b/Documentation/filesystems/xfs-delayed-logging-design.rst
index 464405d2801e..4ef419f54663 100644
--- a/Documentation/filesystems/xfs-delayed-logging-design.rst
+++ b/Documentation/filesystems/xfs-delayed-logging-design.rst
@@ -1,29 +1,314 @@
.. SPDX-License-Identifier: GPL-2.0
-==========================
-XFS Delayed Logging Design
-==========================
-
-Introduction to Re-logging in XFS
-=================================
-
-XFS logging is a combination of logical and physical logging. Some objects,
-such as inodes and dquots, are logged in logical format where the details
-logged are made up of the changes to in-core structures rather than on-disk
-structures. Other objects - typically buffers - have their physical changes
-logged. The reason for these differences is to reduce the amount of log space
-required for objects that are frequently logged. Some parts of inodes are more
-frequently logged than others, and inodes are typically more frequently logged
-than any other object (except maybe the superblock buffer) so keeping the
-amount of metadata logged low is of prime importance.
-
-The reason that this is such a concern is that XFS allows multiple separate
-modifications to a single object to be carried in the log at any given time.
-This allows the log to avoid needing to flush each change to disk before
-recording a new change to the object. XFS does this via a method called
-"re-logging". Conceptually, this is quite simple - all it requires is that any
-new change to the object is recorded with a *new copy* of all the existing
-changes in the new transaction that is written to the log.
+==================
+XFS Logging Design
+==================
+
+Preamble
+========
+
+This document describes the design and algorithms that the XFS journalling
+subsystem is based on. This document describes the design and algorithms that
+the XFS journalling subsystem is based on so that readers may familiarize
+themselves with the general concepts of how transaction processing in XFS works.
+
+We begin with an overview of transactions in XFS, followed by describing how
+transaction reservations are structured and accounted, and then move into how we
+guarantee forwards progress for long running transactions with finite initial
+reservations bounds. At this point we need to explain how relogging works. With
+the basic concepts covered, the design of the delayed logging mechanism is
+documented.
+
+
+Introduction
+============
+
+XFS uses Write Ahead Logging for ensuring changes to the filesystem metadata
+are atomic and recoverable. For reasons of space and time efficiency, the
+logging mechanisms are varied and complex, combining intents, logical and
+physical logging mechanisms to provide the necessary recovery guarantees the
+filesystem requires.
+
+Some objects, such as inodes and dquots, are logged in logical format where the
+details logged are made up of the changes to in-core structures rather than
+on-disk structures. Other objects - typically buffers - have their physical
+changes logged. Long running atomic modifications have individual changes
+chained together by intents, ensuring that journal recovery can restart and
+finish an operation that was only partially done when the system stopped
+functioning.
+
+The reason for these differences is to keep the amount of log space and CPU time
+required to process objects being modified as small as possible and hence the
+logging overhead as low as possible. Some items are very frequently modified,
+and some parts of objects are more frequently modified than others, so keeping
+the overhead of metadata logging low is of prime importance.
+
+The method used to log an item or chain modifications together isn't
+particularly important in the scope of this document. It suffices to know that
+the method used for logging a particular object or chaining modifications
+together are different and are dependent on the object and/or modification being
+performed. The logging subsystem only cares that certain specific rules are
+followed to guarantee forwards progress and prevent deadlocks.
+
+
+Transactions in XFS
+===================
+
+XFS has two types of high level transactions, defined by the type of log space
+reservation they take. These are known as "one shot" and "permanent"
+transactions. Permanent transaction reservations can take reservations that span
+commit boundaries, whilst "one shot" transactions are for a single atomic
+modification.
+
+The type and size of reservation must be matched to the modification taking
+place. This means that permanent transactions can be used for one-shot
+modifications, but one-shot reservations cannot be used for permanent
+transactions.
+
+In the code, a one-shot transaction pattern looks somewhat like this::
+
+ tp = xfs_trans_alloc(<reservation>)
+ <lock items>
+ <join item to transaction>
+ <do modification>
+ xfs_trans_commit(tp);
+
+As items are modified in the transaction, the dirty regions in those items are
+tracked via the transaction handle. Once the transaction is committed, all
+resources joined to it are released, along with the remaining unused reservation
+space that was taken at the transaction allocation time.
+
+In contrast, a permanent transaction is made up of multiple linked individual
+transactions, and the pattern looks like this::
+
+ tp = xfs_trans_alloc(<reservation>)
+ xfs_ilock(ip, XFS_ILOCK_EXCL)
+
+ loop {
+ xfs_trans_ijoin(tp, 0);
+ <do modification>
+ xfs_trans_log_inode(tp, ip);
+ xfs_trans_roll(&tp);
+ }
+
+ xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+While this might look similar to a one-shot transaction, there is an important
+difference: xfs_trans_roll() performs a specific operation that links two
+transactions together::
+
+ ntp = xfs_trans_dup(tp);
+ xfs_trans_commit(tp);
+ xfs_log_reserve(ntp);
+
+This results in a series of "rolling transactions" where the inode is locked
+across the entire chain of transactions. Hence while this series of rolling
+transactions is running, nothing else can read from or write to the inode and
+this provides a mechanism for complex changes to appear atomic from an external
+observer's point of view.
+
+It is important to note that a series of rolling transactions in a permanent
+transaction does not form an atomic change in the journal. While each
+individual modification is atomic, the chain is *not atomic*. If we crash half
+way through, then recovery will only replay up to the last transactional
+modification the loop made that was committed to the journal.
+
+This affects long running permanent transactions in that it is not possible to
+predict how much of a long running operation will actually be recovered because
+there is no guarantee of how much of the operation reached stale storage. Hence
+if a long running operation requires multiple transactions to fully complete,
+the high level operation must use intents and deferred operations to guarantee
+recovery can complete the operation once the first transactions is persisted in
+the on-disk journal.
+
+
+Transactions are Asynchronous
+=============================
+
+In XFS, all high level transactions are asynchronous by default. This means that
+xfs_trans_commit() does not guarantee that the modification has been committed
+to stable storage when it returns. Hence when a system crashes, not all the
+completed transactions will be replayed during recovery.
+
+However, the logging subsystem does provide global ordering guarantees, such
+that if a specific change is seen after recovery, all metadata modifications
+that were committed prior to that change will also be seen.
+
+For single shot operations that need to reach stable storage immediately, or
+ensuring that a long running permanent transaction is fully committed once it is
+complete, we can explicitly tag a transaction as synchronous. This will trigger
+a "log force" to flush the outstanding committed transactions to stable storage
+in the journal and wait for that to complete.
+
+Synchronous transactions are rarely used, however, because they limit logging
+throughput to the IO latency limitations of the underlying storage. Instead, we
+tend to use log forces to ensure modifications are on stable storage only when
+a user operation requires a synchronisation point to occur (e.g. fsync).
+
+
+Transaction Reservations
+========================
+
+It has been mentioned a number of times now that the logging subsystem needs to
+provide a forwards progress guarantee so that no modification ever stalls
+because it can't be written to the journal due to a lack of space in the
+journal. This is achieved by the transaction reservations that are made when
+a transaction is first allocated. For permanent transactions, these reservations
+are maintained as part of the transaction rolling mechanism.
+
+A transaction reservation provides a guarantee that there is physical log space
+available to write the modification into the journal before we start making
+modifications to objects and items. As such, the reservation needs to be large
+enough to take into account the amount of metadata that the change might need to
+log in the worst case. This means that if we are modifying a btree in the
+transaction, we have to reserve enough space to record a full leaf-to-root split
+of the btree. As such, the reservations are quite complex because we have to
+take into account all the hidden changes that might occur.
+
+For example, a user data extent allocation involves allocating an extent from
+free space, which modifies the free space trees. That's two btrees. Inserting
+the extent into the inode's extent map might require a split of the extent map
+btree, which requires another allocation that can modify the free space trees
+again. Then we might have to update reverse mappings, which modifies yet
+another btree which might require more space. And so on. Hence the amount of
+metadata that a "simple" operation can modify can be quite large.
+
+This "worst case" calculation provides us with the static "unit reservation"
+for the transaction that is calculated at mount time. We must guarantee that the
+log has this much space available before the transaction is allowed to proceed
+so that when we come to write the dirty metadata into the log we don't run out
+of log space half way through the write.
+
+For one-shot transactions, a single unit space reservation is all that is
+required for the transaction to proceed. For permanent transactions, however, we
+also have a "log count" that affects the size of the reservation that is to be
+made.
+
+While a permanent transaction can get by with a single unit of space
+reservation, it is somewhat inefficient to do this as it requires the
+transaction rolling mechanism to re-reserve space on every transaction roll. We
+know from the implementation of the permanent transactions how many transaction
+rolls are likely for the common modifications that need to be made.
+
+For example, and inode allocation is typically two transactions - one to
+physically allocate a free inode chunk on disk, and another to allocate an inode
+from an inode chunk that has free inodes in it. Hence for an inode allocation
+transaction, we might set the reservation log count to a value of 2 to indicate
+that the common/fast path transaction will commit two linked transactions in a
+chain. Each time a permanent transaction rolls, it consumes an entire unit
+reservation.
+
+Hence when the permanent transaction is first allocated, the log space
+reservation is increases from a single unit reservation to multiple unit
+reservations. That multiple is defined by the reservation log count, and this
+means we can roll the transaction multiple times before we have to re-reserve
+log space when we roll the transaction. This ensures that the common
+modifications we make only need to reserve log space once.
+
+If the log count for a permanent transaction reaches zero, then it needs to
+re-reserve physical space in the log. This is somewhat complex, and requires
+an understanding of how the log accounts for space that has been reserved.
+
+
+Log Space Accounting
+====================
+
+The position in the log is typically referred to as a Log Sequence Number (LSN).
+The log is circular, so the positions in the log are defined by the combination
+of a cycle number - the number of times the log has been overwritten - and the
+offset into the log. A LSN carries the cycle in the upper 32 bits and the
+offset in the lower 32 bits. The offset is in units of "basic blocks" (512
+bytes). Hence we can do realtively simple LSN based math to keep track of
+available space in the log.
+
+Log space accounting is done via a pair of constructs called "grant heads". The
+position of the grant heads is an absolute value, so the amount of space
+available in the log is defined by the distance between the position of the
+grant head and the current log tail. That is, how much space can be
+reserved/consumed before the grant heads would fully wrap the log and overtake
+the tail position.
+
+The first grant head is the "reserve" head. This tracks the byte count of the
+reservations currently held by active transactions. It is a purely in-memory
+accounting of the space reservation and, as such, actually tracks byte offsets
+into the log rather than basic blocks. Hence it technically isn't using LSNs to
+represent the log position, but it is still treated like a split {cycle,offset}
+tuple for the purposes of tracking reservation space.
+
+The reserve grant head is used to accurately account for exact transaction
+reservations amounts and the exact byte count that modifications actually make
+and need to write into the log. The reserve head is used to prevent new
+transactions from taking new reservations when the head reaches the current
+tail. It will block new reservations in a FIFO queue and as the log tail moves
+forward it will wake them in order once sufficient space is available. This FIFO
+mechanism ensures no transaction is starved of resources when log space
+shortages occur.
+
+The other grant head is the "write" head. Unlike the reserve head, this grant
+head contains an LSN and it tracks the physical space usage in the log. While
+this might sound like it is accounting the same state as the reserve grant head
+- and it mostly does track exactly the same location as the reserve grant head -
+there are critical differences in behaviour between them that provides the
+forwards progress guarantees that rolling permanent transactions require.
+
+These differences when a permanent transaction is rolled and the internal "log
+count" reaches zero and the initial set of unit reservations have been
+exhausted. At this point, we still require a log space reservation to continue
+the next transaction in the sequeunce, but we have none remaining. We cannot
+sleep during the transaction commit process waiting for new log space to become
+available, as we may end up on the end of the FIFO queue and the items we have
+locked while we sleep could end up pinning the tail of the log before there is
+enough free space in the log to fulfil all of the pending reservations and
+then wake up transaction commit in progress.
+
+To take a new reservation without sleeping requires us to be able to take a
+reservation even if there is no reservation space currently available. That is,
+we need to be able to *overcommit* the log reservation space. As has already
+been detailed, we cannot overcommit physical log space. However, the reserve
+grant head does not track physical space - it only accounts for the amount of
+reservations we currently have outstanding. Hence if the reserve head passes
+over the tail of the log all it means is that new reservations will be throttled
+immediately and remain throttled until the log tail is moved forward far enough
+to remove the overcommit and start taking new reservations. In other words, we
+can overcommit the reserve head without violating the physical log head and tail
+rules.
+
+As a result, permanent transactions only "regrant" reservation space during
+xfs_trans_commit() calls, while the physical log space reservation - tracked by
+the write head - is then reserved separately by a call to xfs_log_reserve()
+after the commit completes. Once the commit completes, we can sleep waiting for
+physical log space to be reserved from the write grant head, but only if one
+critical rule has been observed::
+
+ Code using permanent reservations must always log the items they hold
+ locked across each transaction they roll in the chain.
+
+"Re-logging" the locked items on every transaction roll ensures that the items
+attached to the transaction chain being rolled are always relocated to the
+physical head of the log and so do not pin the tail of the log. If a locked item
+pins the tail of the log when we sleep on the write reservation, then we will
+deadlock the log as we cannot take the locks needed to write back that item and
+move the tail of the log forwards to free up write grant space. Re-logging the
+locked items avoids this deadlock and guarantees that the log reservation we are
+making cannot self-deadlock.
+
+If all rolling transactions obey this rule, then they can all make forwards
+progress independently because nothing will block the progress of the log
+tail moving forwards and hence ensuring that write grant space is always
+(eventually) made available to permanent transactions no matter how many times
+they roll.
+
+
+Re-logging Explained
+====================
+
+XFS allows multiple separate modifications to a single object to be carried in
+the log at any given time. This allows the log to avoid needing to flush each
+change to disk before recording a new change to the object. XFS does this via a
+method called "re-logging". Conceptually, this is quite simple - all it requires
+is that any new change to the object is recorded with a *new copy* of all the
+existing changes in the new transaction that is written to the log.
That is, if we have a sequence of changes A through to F, and the object was
written to disk after change D, we would see in the log the following series
@@ -42,16 +327,13 @@ transaction::
In other words, each time an object is relogged, the new transaction contains
the aggregation of all the previous changes currently held only in the log.
-This relogging technique also allows objects to be moved forward in the log so
-that an object being relogged does not prevent the tail of the log from ever
-moving forward. This can be seen in the table above by the changing
-(increasing) LSN of each subsequent transaction - the LSN is effectively a
-direct encoding of the location in the log of the transaction.
+This relogging technique allows objects to be moved forward in the log so that
+an object being relogged does not prevent the tail of the log from ever moving
+forward. This can be seen in the table above by the changing (increasing) LSN
+of each subsequent transaction, and it's the technique that allows us to
+implement long-running, multiple-commit permanent transactions.
-This relogging is also used to implement long-running, multiple-commit
-transactions. These transaction are known as rolling transactions, and require
-a special log reservation known as a permanent transaction reservation. A
-typical example of a rolling transaction is the removal of extents from an
+A typical example of a rolling transaction is the removal of extents from an
inode which can only be done at a rate of two extents per transaction because
of reservation size limitations. Hence a rolling extent removal transaction
keeps relogging the inode and btree buffers as they get modified in each
@@ -67,12 +349,13 @@ the log over and over again. Worse is the fact that objects tend to get
dirtier as they get relogged, so each subsequent transaction is writing more
metadata into the log.
-Another feature of the XFS transaction subsystem is that most transactions are
-asynchronous. That is, they don't commit to disk until either a log buffer is
-filled (a log buffer can hold multiple transactions) or a synchronous operation
-forces the log buffers holding the transactions to disk. This means that XFS is
-doing aggregation of transactions in memory - batching them, if you like - to
-minimise the impact of the log IO on transaction throughput.
+It should now also be obvious how relogging and asynchronous transactions go
+hand in hand. That is, transactions don't get written to the physical journal
+until either a log buffer is filled (a log buffer can hold multiple
+transactions) or a synchronous operation forces the log buffers holding the
+transactions to disk. This means that XFS is doing aggregation of transactions
+in memory - batching them, if you like - to minimise the impact of the log IO on
+transaction throughput.
The limitation on asynchronous transaction throughput is the number and size of
log buffers made available by the log manager. By default there are 8 log
diff --git a/Documentation/i2c/i2c-protocol.rst b/Documentation/i2c/i2c-protocol.rst
index b2092f8f815d..df0febfe6410 100644
--- a/Documentation/i2c/i2c-protocol.rst
+++ b/Documentation/i2c/i2c-protocol.rst
@@ -2,7 +2,8 @@
The I2C Protocol
================
-This document describes the I2C protocol. Or will, when it is finished :-)
+This document is an overview of the basic I2C transactions and the kernel
+APIs to perform them.
Key to symbols
==============
@@ -12,13 +13,9 @@ S Start condition
P Stop condition
Rd/Wr (1 bit) Read/Write bit. Rd equals 1, Wr equals 0.
A, NA (1 bit) Acknowledge (ACK) and Not Acknowledge (NACK) bit
-Addr (7 bits) I2C 7 bit address. Note that this can be expanded as usual to
+Addr (7 bits) I2C 7 bit address. Note that this can be expanded to
get a 10 bit I2C address.
-Comm (8 bits) Command byte, a data byte which often selects a register on
- the device.
-Data (8 bits) A plain data byte. Sometimes, I write DataLow, DataHigh
- for 16 bit data.
-Count (8 bits) A data byte containing the length of a block operation.
+Data (8 bits) A plain data byte.
[..] Data sent by I2C device, as opposed to data sent by the
host adapter.
diff --git a/Documentation/i2c/i2c-sysfs.rst b/Documentation/i2c/i2c-sysfs.rst
index 6b68b95cd427..78c54c658fa1 100644
--- a/Documentation/i2c/i2c-sysfs.rst
+++ b/Documentation/i2c/i2c-sysfs.rst
@@ -51,11 +51,10 @@ Google Pixel 3 phone for example::
``i2c-2`` is an I2C bus whose number is 2, and ``2-0049`` is an I2C device
on bus 2 address 0x49 bound with a kernel driver.
-Terminologies
-=============
+Terminology
+===========
-First, let us define a couple of terminologies to avoid confusions in the later
-sections.
+First, let us define some terms to avoid confusion in later sections.
(Physical) I2C Bus Controller
-----------------------------
@@ -100,9 +99,7 @@ Caveat
This may be a confusing part for people who only know about the physical I2C
design of a board. It is actually possible to rename the I2C bus physical number
to a different number in logical I2C bus level in Device Tree Source (DTS) under
-section ``aliases``. See
-`arch/arm/boot/dts/nuvoton-npcm730-gsj.dts
-<../../arch/arm/boot/dts/nuvoton-npcm730-gsj.dts>`_
+section ``aliases``. See ``arch/arm/boot/dts/nuvoton-npcm730-gsj.dts``
for an example of DTS file.
Best Practice: **(To kernel software developers)** It is better to keep the I2C
@@ -117,7 +114,7 @@ Walk through Logical I2C Bus
For the following content, we will use a more complex I2C topology as an
example. Here is a brief graph for the I2C topology. If you do not understand
-this graph at the first glance, do not be afraid to continue reading this doc
+this graph at first glance, do not be afraid to continue reading this doc
and review it when you finish reading.
::
@@ -290,8 +287,7 @@ MUX channel 0, and all the way to ``i2c-19`` for the MUX channel 3.
The kernel software developer is able to pin the fanout MUX channels to a static
logical I2C bus number in the DTS. This doc will not go through the details on
how to implement this in DTS, but we can see an example in:
-`arch/arm/boot/dts/aspeed-bmc-facebook-wedge400.dts
-<../../arch/arm/boot/dts/aspeed-bmc-facebook-wedge400.dts>`_
+``arch/arm/boot/dts/aspeed-bmc-facebook-wedge400.dts``
In the above example, there is an 8-channel I2C MUX at address 0x70 on physical
I2C bus 2. The channel 2 of the MUX is defined as ``imux18`` in DTS,
@@ -383,13 +379,9 @@ Sysfs for the I2C sensor device::
For more info on the Hwmon Sysfs, refer to the doc:
-`Naming and data format standards for sysfs files
-<../hwmon/sysfs-interface.rst>`_
+../hwmon/sysfs-interface.rst
Instantiate I2C Devices in I2C Sysfs
------------------------------------
-Refer to the doc:
-
-`How to instantiate I2C devices, Method 4: Instantiate from user-space
-<instantiating-devices.rst#method-4-instantiate-from-user-space>`_
+Refer to section "Method 4: Instantiate from user-space" of instantiating-devices.rst
diff --git a/Documentation/i2c/instantiating-devices.rst b/Documentation/i2c/instantiating-devices.rst
index 890c9360ce19..3ea056a95812 100644
--- a/Documentation/i2c/instantiating-devices.rst
+++ b/Documentation/i2c/instantiating-devices.rst
@@ -31,7 +31,9 @@ Declare the I2C devices via devicetree
On platforms using devicetree, the declaration of I2C devices is done in
subnodes of the master controller.
-Example::
+Example:
+
+.. code-block:: dts
i2c1: i2c@400a0000 {
/* ... master properties skipped ... */
@@ -71,7 +73,9 @@ code. Instantiating I2C devices via board files is done with an array of
struct i2c_board_info which is registered by calling
i2c_register_board_info().
-Example (from omap2 h4)::
+Example (from omap2 h4):
+
+.. code-block:: c
static struct i2c_board_info h4_i2c_board_info[] __initdata = {
{
@@ -111,7 +115,9 @@ bus in advance, so the method 1 described above can't be used. Instead,
you can instantiate your I2C devices explicitly. This is done by filling
a struct i2c_board_info and calling i2c_new_client_device().
-Example (from the sfe4001 network driver)::
+Example (from the sfe4001 network driver):
+
+.. code-block:: c
static struct i2c_board_info sfe4001_hwmon_info = {
I2C_BOARD_INFO("max6647", 0x4e),
@@ -136,7 +142,9 @@ it may have different addresses from one board to the next (manufacturer
changing its design without notice). In this case, you can call
i2c_new_scanned_device() instead of i2c_new_client_device().
-Example (from the nxp OHCI driver)::
+Example (from the nxp OHCI driver):
+
+.. code-block:: c
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
diff --git a/Documentation/i2c/smbus-protocol.rst b/Documentation/i2c/smbus-protocol.rst
index 00d8e17d0aca..4942c4cad4ad 100644
--- a/Documentation/i2c/smbus-protocol.rst
+++ b/Documentation/i2c/smbus-protocol.rst
@@ -41,12 +41,12 @@ Sr Repeated start condition, used to switch from write to
P Stop condition
Rd/Wr (1 bit) Read/Write bit. Rd equals 1, Wr equals 0.
A, NA (1 bit) Acknowledge (ACK) and Not Acknowledge (NACK) bit
-Addr (7 bits) I2C 7 bit address. Note that this can be expanded as usual to
+Addr (7 bits) I2C 7 bit address. Note that this can be expanded to
get a 10 bit I2C address.
Comm (8 bits) Command byte, a data byte which often selects a register on
the device.
-Data (8 bits) A plain data byte. Sometimes, I write DataLow, DataHigh
- for 16 bit data.
+Data (8 bits) A plain data byte. DataLow and DataHigh represent the low and
+ high byte of a 16 bit word.
Count (8 bits) A data byte containing the length of a block operation.
[..] Data sent by I2C device, as opposed to data sent by the host
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 67036a05b771..4737c18c97ff 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -128,7 +128,7 @@ needed).
sound/index
crypto/index
filesystems/index
- vm/index
+ mm/index
bpf/index
usb/index
PCI/index
diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst
index a7173843a294..858ed5d80def 100644
--- a/Documentation/kbuild/kconfig-language.rst
+++ b/Documentation/kbuild/kconfig-language.rst
@@ -525,8 +525,8 @@ followed by a test macro::
If you need to expose a compiler capability to makefiles and/or C source files,
`CC_HAS_` is the recommended prefix for the config option::
- config CC_HAS_ASM_GOTO
- def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
+ config CC_HAS_FOO
+ def_bool $(success,$(srctree)/scripts/cc-check-foo.sh $(CC))
Build as module only
~~~~~~~~~~~~~~~~~~~~
@@ -672,7 +672,7 @@ Future kconfig work
Work on kconfig is welcomed on both areas of clarifying semantics and on
evaluating the use of a full SAT solver for it. A full SAT solver can be
desirable to enable more complex dependency mappings and / or queries,
-for instance on possible use case for a SAT solver could be that of handling
+for instance one possible use case for a SAT solver could be that of handling
the current known recursive dependency issues. It is not known if this would
address such issues but such evaluation is desirable. If support for a full SAT
solver proves too complex or that it cannot address recursive dependency issues
diff --git a/Documentation/loongarch/introduction.rst b/Documentation/loongarch/introduction.rst
index 216b3f390e80..6c9160c4e9be 100644
--- a/Documentation/loongarch/introduction.rst
+++ b/Documentation/loongarch/introduction.rst
@@ -221,7 +221,7 @@ I26 Opcode + I26L + I26H
=========== ==========================
Rd is the destination register operand, while Rj, Rk and Ra ("a" stands for
-"additional") are the source register operands. I8/I12/I16/I21/I26 are
+"additional") are the source register operands. I8/I12/I14/I16/I21/I26 are
immediate operands of respective width. The longer I21 and I26 are stored
in separate higher and lower parts in the instruction word, denoted by the "L"
and "H" suffixes.
diff --git a/Documentation/m68k/kernel-options.rst b/Documentation/m68k/kernel-options.rst
index cabd9419740d..2008a20b4329 100644
--- a/Documentation/m68k/kernel-options.rst
+++ b/Documentation/m68k/kernel-options.rst
@@ -367,8 +367,8 @@ activated by a "external:" sub-option.
4.1.2) inverse
--------------
-Invert the display. This affects both, text (consoles) and graphics
-(X) display. Usually, the background is chosen to be black. With this
+Invert the display. This affects only text consoles.
+Usually, the background is chosen to be black. With this
option, you can make the background white.
4.1.3) font
diff --git a/Documentation/vm/active_mm.rst b/Documentation/mm/active_mm.rst
index 6f8269c284ed..6f8269c284ed 100644
--- a/Documentation/vm/active_mm.rst
+++ b/Documentation/mm/active_mm.rst
diff --git a/Documentation/vm/arch_pgtable_helpers.rst b/Documentation/mm/arch_pgtable_helpers.rst
index cbaee9e59241..cbaee9e59241 100644
--- a/Documentation/vm/arch_pgtable_helpers.rst
+++ b/Documentation/mm/arch_pgtable_helpers.rst
diff --git a/Documentation/vm/balance.rst b/Documentation/mm/balance.rst
index 6a1fadf3e173..6a1fadf3e173 100644
--- a/Documentation/vm/balance.rst
+++ b/Documentation/mm/balance.rst
diff --git a/Documentation/vm/bootmem.rst b/Documentation/mm/bootmem.rst
index eb2b31eedfa1..eb2b31eedfa1 100644
--- a/Documentation/vm/bootmem.rst
+++ b/Documentation/mm/bootmem.rst
diff --git a/Documentation/vm/damon/api.rst b/Documentation/mm/damon/api.rst
index 08f34df45523..08f34df45523 100644
--- a/Documentation/vm/damon/api.rst
+++ b/Documentation/mm/damon/api.rst
diff --git a/Documentation/vm/damon/design.rst b/Documentation/mm/damon/design.rst
index 0cff6fac6b7e..0cff6fac6b7e 100644
--- a/Documentation/vm/damon/design.rst
+++ b/Documentation/mm/damon/design.rst
diff --git a/Documentation/vm/damon/faq.rst b/Documentation/mm/damon/faq.rst
index dde7e2414ee6..dde7e2414ee6 100644
--- a/Documentation/vm/damon/faq.rst
+++ b/Documentation/mm/damon/faq.rst
diff --git a/Documentation/vm/damon/index.rst b/Documentation/mm/damon/index.rst
index 48c0bbff98b2..48c0bbff98b2 100644
--- a/Documentation/vm/damon/index.rst
+++ b/Documentation/mm/damon/index.rst
diff --git a/Documentation/vm/free_page_reporting.rst b/Documentation/mm/free_page_reporting.rst
index 8c05e62d8b2b..8c05e62d8b2b 100644
--- a/Documentation/vm/free_page_reporting.rst
+++ b/Documentation/mm/free_page_reporting.rst
diff --git a/Documentation/vm/frontswap.rst b/Documentation/mm/frontswap.rst
index feecc5e24477..feecc5e24477 100644
--- a/Documentation/vm/frontswap.rst
+++ b/Documentation/mm/frontswap.rst
diff --git a/Documentation/vm/highmem.rst b/Documentation/mm/highmem.rst
index c9887f241c6c..0f731d9196b0 100644
--- a/Documentation/vm/highmem.rst
+++ b/Documentation/mm/highmem.rst
@@ -60,17 +60,40 @@ list shows them in order of preference of use.
This function should be preferred, where feasible, over all the others.
These mappings are thread-local and CPU-local, meaning that the mapping
- can only be accessed from within this thread and the thread is bound the
- CPU while the mapping is active. Even if the thread is preempted (since
- preemption is never disabled by the function) the CPU can not be
- unplugged from the system via CPU-hotplug until the mapping is disposed.
+ can only be accessed from within this thread and the thread is bound to the
+ CPU while the mapping is active. Although preemption is never disabled by
+ this function, the CPU can not be unplugged from the system via
+ CPU-hotplug until the mapping is disposed.
It's valid to take pagefaults in a local kmap region, unless the context
in which the local mapping is acquired does not allow it for other reasons.
+ As said, pagefaults and preemption are never disabled. There is no need to
+ disable preemption because, when context switches to a different task, the
+ maps of the outgoing task are saved and those of the incoming one are
+ restored.
+
kmap_local_page() always returns a valid virtual address and it is assumed
that kunmap_local() will never fail.
+ On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ virtual address of the direct mapping. Only real highmem pages are
+ temporarily mapped. Therefore, users may call a plain page_address()
+ for pages which are known to not come from ZONE_HIGHMEM. However, it is
+ always safe to use kmap_local_page() / kunmap_local().
+
+ While it is significantly faster than kmap(), for the higmem case it
+ comes with restrictions about the pointers validity. Contrary to kmap()
+ mappings, the local mappings are only valid in the context of the caller
+ and cannot be handed to other contexts. This implies that users must
+ be absolutely sure to keep the use of the return address local to the
+ thread which mapped it.
+
+ Most code can be designed to use thread local mappings. User should
+ therefore try to design their code to avoid the use of kmap() by mapping
+ pages in the same thread the address will be used and prefer
+ kmap_local_page().
+
Nesting kmap_local_page() and kmap_atomic() mappings is allowed to a certain
extent (up to KMAP_TYPE_NR) but their invocations have to be strictly ordered
because the map implementation is stack based. See kmap_local_page() kdocs
diff --git a/Documentation/vm/hmm.rst b/Documentation/mm/hmm.rst
index f2a59ed82ed3..f2a59ed82ed3 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/mm/hmm.rst
diff --git a/Documentation/vm/hugetlbfs_reserv.rst b/Documentation/mm/hugetlbfs_reserv.rst
index f143954e0d05..f143954e0d05 100644
--- a/Documentation/vm/hugetlbfs_reserv.rst
+++ b/Documentation/mm/hugetlbfs_reserv.rst
diff --git a/Documentation/vm/hwpoison.rst b/Documentation/mm/hwpoison.rst
index b9d5253c1305..b9d5253c1305 100644
--- a/Documentation/vm/hwpoison.rst
+++ b/Documentation/mm/hwpoison.rst
diff --git a/Documentation/vm/index.rst b/Documentation/mm/index.rst
index 575ccd40e30c..575ccd40e30c 100644
--- a/Documentation/vm/index.rst
+++ b/Documentation/mm/index.rst
diff --git a/Documentation/vm/ksm.rst b/Documentation/mm/ksm.rst
index 9e37add068e6..9e37add068e6 100644
--- a/Documentation/vm/ksm.rst
+++ b/Documentation/mm/ksm.rst
diff --git a/Documentation/vm/memory-model.rst b/Documentation/mm/memory-model.rst
index 30e8fbed6914..3779e562dc76 100644
--- a/Documentation/vm/memory-model.rst
+++ b/Documentation/mm/memory-model.rst
@@ -170,7 +170,7 @@ The users of `ZONE_DEVICE` are:
* hmm: Extend `ZONE_DEVICE` with `->page_fault()` and `->page_free()`
event callbacks to allow a device-driver to coordinate memory management
events related to device-memory, typically GPU memory. See
- Documentation/vm/hmm.rst.
+ Documentation/mm/hmm.rst.
* p2pdma: Create `struct page` objects to allow peer devices in a
PCI/-E topology to coordinate direct-DMA operations between themselves,
diff --git a/Documentation/vm/mmu_notifier.rst b/Documentation/mm/mmu_notifier.rst
index df5d7777fc6b..df5d7777fc6b 100644
--- a/Documentation/vm/mmu_notifier.rst
+++ b/Documentation/mm/mmu_notifier.rst
diff --git a/Documentation/vm/numa.rst b/Documentation/mm/numa.rst
index 99fdeca917ca..99fdeca917ca 100644
--- a/Documentation/vm/numa.rst
+++ b/Documentation/mm/numa.rst
diff --git a/Documentation/vm/oom.rst b/Documentation/mm/oom.rst
index 18e9e40c1ec1..18e9e40c1ec1 100644
--- a/Documentation/vm/oom.rst
+++ b/Documentation/mm/oom.rst
diff --git a/Documentation/vm/overcommit-accounting.rst b/Documentation/mm/overcommit-accounting.rst
index a4895d6fc1c2..a4895d6fc1c2 100644
--- a/Documentation/vm/overcommit-accounting.rst
+++ b/Documentation/mm/overcommit-accounting.rst
diff --git a/Documentation/vm/page_allocation.rst b/Documentation/mm/page_allocation.rst
index d9b4495561f1..d9b4495561f1 100644
--- a/Documentation/vm/page_allocation.rst
+++ b/Documentation/mm/page_allocation.rst
diff --git a/Documentation/vm/page_cache.rst b/Documentation/mm/page_cache.rst
index 75eba7c431b2..75eba7c431b2 100644
--- a/Documentation/vm/page_cache.rst
+++ b/Documentation/mm/page_cache.rst
diff --git a/Documentation/vm/page_frags.rst b/Documentation/mm/page_frags.rst
index 7d6f9385d129..7d6f9385d129 100644
--- a/Documentation/vm/page_frags.rst
+++ b/Documentation/mm/page_frags.rst
diff --git a/Documentation/vm/page_migration.rst b/Documentation/mm/page_migration.rst
index 11493bad7112..11493bad7112 100644
--- a/Documentation/vm/page_migration.rst
+++ b/Documentation/mm/page_migration.rst
diff --git a/Documentation/vm/page_owner.rst b/Documentation/mm/page_owner.rst
index f5c954afe97c..f5c954afe97c 100644
--- a/Documentation/vm/page_owner.rst
+++ b/Documentation/mm/page_owner.rst
diff --git a/Documentation/vm/page_reclaim.rst b/Documentation/mm/page_reclaim.rst
index 50a30b7f8ac3..50a30b7f8ac3 100644
--- a/Documentation/vm/page_reclaim.rst
+++ b/Documentation/mm/page_reclaim.rst
diff --git a/Documentation/vm/page_table_check.rst b/Documentation/mm/page_table_check.rst
index 1a09472f10a3..1a09472f10a3 100644
--- a/Documentation/vm/page_table_check.rst
+++ b/Documentation/mm/page_table_check.rst
diff --git a/Documentation/vm/page_tables.rst b/Documentation/mm/page_tables.rst
index 96939571d7bc..96939571d7bc 100644
--- a/Documentation/vm/page_tables.rst
+++ b/Documentation/mm/page_tables.rst
diff --git a/Documentation/vm/physical_memory.rst b/Documentation/mm/physical_memory.rst
index 2ab7b8c1c863..2ab7b8c1c863 100644
--- a/Documentation/vm/physical_memory.rst
+++ b/Documentation/mm/physical_memory.rst
diff --git a/Documentation/vm/process_addrs.rst b/Documentation/mm/process_addrs.rst
index e8618fbc62c9..e8618fbc62c9 100644
--- a/Documentation/vm/process_addrs.rst
+++ b/Documentation/mm/process_addrs.rst
diff --git a/Documentation/vm/remap_file_pages.rst b/Documentation/mm/remap_file_pages.rst
index 7bef6718e3a9..7bef6718e3a9 100644
--- a/Documentation/vm/remap_file_pages.rst
+++ b/Documentation/mm/remap_file_pages.rst
diff --git a/Documentation/vm/shmfs.rst b/Documentation/mm/shmfs.rst
index 8b01ebb4c30e..8b01ebb4c30e 100644
--- a/Documentation/vm/shmfs.rst
+++ b/Documentation/mm/shmfs.rst
diff --git a/Documentation/vm/slab.rst b/Documentation/mm/slab.rst
index 87d5a5bb172f..87d5a5bb172f 100644
--- a/Documentation/vm/slab.rst
+++ b/Documentation/mm/slab.rst
diff --git a/Documentation/vm/slub.rst b/Documentation/mm/slub.rst
index 43063ade737a..43063ade737a 100644
--- a/Documentation/vm/slub.rst
+++ b/Documentation/mm/slub.rst
diff --git a/Documentation/vm/split_page_table_lock.rst b/Documentation/mm/split_page_table_lock.rst
index c08919662704..c08919662704 100644
--- a/Documentation/vm/split_page_table_lock.rst
+++ b/Documentation/mm/split_page_table_lock.rst
diff --git a/Documentation/vm/swap.rst b/Documentation/mm/swap.rst
index 78819bd4d745..78819bd4d745 100644
--- a/Documentation/vm/swap.rst
+++ b/Documentation/mm/swap.rst
diff --git a/Documentation/vm/transhuge.rst b/Documentation/mm/transhuge.rst
index 216db1d67d04..216db1d67d04 100644
--- a/Documentation/vm/transhuge.rst
+++ b/Documentation/mm/transhuge.rst
diff --git a/Documentation/vm/unevictable-lru.rst b/Documentation/mm/unevictable-lru.rst
index b280367d6a44..b280367d6a44 100644
--- a/Documentation/vm/unevictable-lru.rst
+++ b/Documentation/mm/unevictable-lru.rst
diff --git a/Documentation/vm/vmalloc.rst b/Documentation/mm/vmalloc.rst
index 363fe20d6b9f..363fe20d6b9f 100644
--- a/Documentation/vm/vmalloc.rst
+++ b/Documentation/mm/vmalloc.rst
diff --git a/Documentation/vm/vmalloced-kernel-stacks.rst b/Documentation/mm/vmalloced-kernel-stacks.rst
index fc8c67833af6..fc8c67833af6 100644
--- a/Documentation/vm/vmalloced-kernel-stacks.rst
+++ b/Documentation/mm/vmalloced-kernel-stacks.rst
diff --git a/Documentation/vm/vmemmap_dedup.rst b/Documentation/mm/vmemmap_dedup.rst
index c9c495f62d12..a4b12ff906c4 100644
--- a/Documentation/vm/vmemmap_dedup.rst
+++ b/Documentation/mm/vmemmap_dedup.rst
@@ -7,23 +7,25 @@ A vmemmap diet for HugeTLB and Device DAX
HugeTLB
=======
-The struct page structures (page structs) are used to describe a physical
-page frame. By default, there is a one-to-one mapping from a page frame to
-it's corresponding page struct.
+This section is to explain how HugeTLB Vmemmap Optimization (HVO) works.
+
+The ``struct page`` structures are used to describe a physical page frame. By
+default, there is a one-to-one mapping from a page frame to it's corresponding
+``struct page``.
HugeTLB pages consist of multiple base page size pages and is supported by many
architectures. See Documentation/admin-guide/mm/hugetlbpage.rst for more
details. On the x86-64 architecture, HugeTLB pages of size 2MB and 1GB are
currently supported. Since the base page size on x86 is 4KB, a 2MB HugeTLB page
consists of 512 base pages and a 1GB HugeTLB page consists of 4096 base pages.
-For each base page, there is a corresponding page struct.
+For each base page, there is a corresponding ``struct page``.
-Within the HugeTLB subsystem, only the first 4 page structs are used to
-contain unique information about a HugeTLB page. __NR_USED_SUBPAGE provides
-this upper limit. The only 'useful' information in the remaining page structs
+Within the HugeTLB subsystem, only the first 4 ``struct page`` are used to
+contain unique information about a HugeTLB page. ``__NR_USED_SUBPAGE`` provides
+this upper limit. The only 'useful' information in the remaining ``struct page``
is the compound_head field, and this field is the same for all tail pages.
-By removing redundant page structs for HugeTLB pages, memory can be returned
+By removing redundant ``struct page`` for HugeTLB pages, memory can be returned
to the buddy allocator for other uses.
Different architectures support different HugeTLB pages. For example, the
@@ -44,7 +46,7 @@ page.
| | 64KB | 2MB | 512MB | 16GB | |
+--------------+-----------+-----------+-----------+-----------+-----------+
-When the system boot up, every HugeTLB page has more than one struct page
+When the system boot up, every HugeTLB page has more than one ``struct page``
structs which size is (unit: pages)::
struct_size = HugeTLB_Size / PAGE_SIZE * sizeof(struct page) / PAGE_SIZE
@@ -74,10 +76,10 @@ Where n is how many pte entries which one page can contains. So the value of
n is (PAGE_SIZE / sizeof(pte_t)).
This optimization only supports 64-bit system, so the value of sizeof(pte_t)
-is 8. And this optimization also applicable only when the size of struct page
-is a power of two. In most cases, the size of struct page is 64 bytes (e.g.
+is 8. And this optimization also applicable only when the size of ``struct page``
+is a power of two. In most cases, the size of ``struct page`` is 64 bytes (e.g.
x86-64 and arm64). So if we use pmd level mapping for a HugeTLB page, the
-size of struct page structs of it is 8 page frames which size depends on the
+size of ``struct page`` structs of it is 8 page frames which size depends on the
size of the base page.
For the HugeTLB page of the pud level mapping, then::
@@ -86,7 +88,7 @@ For the HugeTLB page of the pud level mapping, then::
= PAGE_SIZE / 8 * 8 (pages)
= PAGE_SIZE (pages)
-Where the struct_size(pmd) is the size of the struct page structs of a
+Where the struct_size(pmd) is the size of the ``struct page`` structs of a
HugeTLB page of the pmd level mapping.
E.g.: A 2MB HugeTLB page on x86_64 consists in 8 page frames while 1GB
@@ -94,7 +96,7 @@ HugeTLB page consists in 4096.
Next, we take the pmd level mapping of the HugeTLB page as an example to
show the internal implementation of this optimization. There are 8 pages
-struct page structs associated with a HugeTLB page which is pmd mapped.
+``struct page`` structs associated with a HugeTLB page which is pmd mapped.
Here is how things look before optimization::
@@ -122,10 +124,10 @@ Here is how things look before optimization::
+-----------+
The value of page->compound_head is the same for all tail pages. The first
-page of page structs (page 0) associated with the HugeTLB page contains the 4
-page structs necessary to describe the HugeTLB. The only use of the remaining
-pages of page structs (page 1 to page 7) is to point to page->compound_head.
-Therefore, we can remap pages 1 to 7 to page 0. Only 1 page of page structs
+page of ``struct page`` (page 0) associated with the HugeTLB page contains the 4
+``struct page`` necessary to describe the HugeTLB. The only use of the remaining
+pages of ``struct page`` (page 1 to page 7) is to point to page->compound_head.
+Therefore, we can remap pages 1 to 7 to page 0. Only 1 page of ``struct page``
will be used for each HugeTLB page. This will allow us to free the remaining
7 pages to the buddy allocator.
@@ -167,13 +169,37 @@ entries that can be cached in a single TLB entry.
The contiguous bit is used to increase the mapping size at the pmd and pte
(last) level. So this type of HugeTLB page can be optimized only when its
-size of the struct page structs is greater than 1 page.
+size of the ``struct page`` structs is greater than **1** page.
Notice: The head vmemmap page is not freed to the buddy allocator and all
tail vmemmap pages are mapped to the head vmemmap page frame. So we can see
-more than one struct page struct with PG_head (e.g. 8 per 2 MB HugeTLB page)
-associated with each HugeTLB page. The compound_head() can handle this
-correctly (more details refer to the comment above compound_head()).
+more than one ``struct page`` struct with ``PG_head`` (e.g. 8 per 2 MB HugeTLB
+page) associated with each HugeTLB page. The ``compound_head()`` can handle
+this correctly. There is only **one** head ``struct page``, the tail
+``struct page`` with ``PG_head`` are fake head ``struct page``. We need an
+approach to distinguish between those two different types of ``struct page`` so
+that ``compound_head()`` can return the real head ``struct page`` when the
+parameter is the tail ``struct page`` but with ``PG_head``. The following code
+snippet describes how to distinguish between real and fake head ``struct page``.
+
+.. code-block:: c
+
+ if (test_bit(PG_head, &page->flags)) {
+ unsigned long head = READ_ONCE(page[1].compound_head);
+
+ if (head & 1) {
+ if (head == (unsigned long)page + 1)
+ /* head struct page */
+ else
+ /* tail struct page */
+ } else {
+ /* head struct page */
+ }
+ }
+
+We can safely access the field of the **page[1]** with ``PG_head`` because the
+page is a compound page composed with at least two contiguous pages.
+The implementation refers to ``page_fixed_fake_head()``.
Device DAX
==========
@@ -187,7 +213,7 @@ PMD_SIZE (2M on x86_64) and PUD_SIZE (1G on x86_64).
The differences with HugeTLB are relatively minor.
-It only use 3 page structs for storing all information as opposed
+It only use 3 ``struct page`` for storing all information as opposed
to 4 on HugeTLB pages.
There's no remapping of vmemmap given that device-dax memory is not part of
diff --git a/Documentation/vm/z3fold.rst b/Documentation/mm/z3fold.rst
index 224e3c61d686..224e3c61d686 100644
--- a/Documentation/vm/z3fold.rst
+++ b/Documentation/mm/z3fold.rst
diff --git a/Documentation/vm/zsmalloc.rst b/Documentation/mm/zsmalloc.rst
index 6e79893d6132..6e79893d6132 100644
--- a/Documentation/vm/zsmalloc.rst
+++ b/Documentation/mm/zsmalloc.rst
diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
index 53a18ff7cf23..7823a069a903 100644
--- a/Documentation/networking/bonding.rst
+++ b/Documentation/networking/bonding.rst
@@ -1982,15 +1982,6 @@ uses the response as an indication that the link is operating. This
gives some assurance that traffic is actually flowing to and from one
or more peers on the local network.
-The ARP monitor relies on the device driver itself to verify
-that traffic is flowing. In particular, the driver must keep up to
-date the last receive time, dev->last_rx. Drivers that use NETIF_F_LLTX
-flag must also update netdev_queue->trans_start. If they do not, then the
-ARP monitor will immediately fail any slaves using that driver, and
-those slaves will stay down. If networking monitoring (tcpdump, etc)
-shows the ARP requests and replies on the network, then it may be that
-your device driver is not updating last_rx and trans_start.
-
7.2 Configuring Multiple ARP Targets
------------------------------------
diff --git a/Documentation/powerpc/elf_hwcaps.rst b/Documentation/powerpc/elf_hwcaps.rst
new file mode 100644
index 000000000000..3366e5b18e67
--- /dev/null
+++ b/Documentation/powerpc/elf_hwcaps.rst
@@ -0,0 +1,231 @@
+.. _elf_hwcaps_powerpc:
+
+==================
+POWERPC ELF HWCAPs
+==================
+
+This document describes the usage and semantics of the powerpc ELF HWCAPs.
+
+
+1. Introduction
+---------------
+
+Some hardware or software features are only available on some CPU
+implementations, and/or with certain kernel configurations, but have no other
+discovery mechanism available to userspace code. The kernel exposes the
+presence of these features to userspace through a set of flags called HWCAPs,
+exposed in the auxiliary vector.
+
+Userspace software can test for features by acquiring the AT_HWCAP or
+AT_HWCAP2 entry of the auxiliary vector, and testing whether the relevant
+flags are set, e.g.::
+
+ bool floating_point_is_present(void)
+ {
+ unsigned long HWCAPs = getauxval(AT_HWCAP);
+ if (HWCAPs & PPC_FEATURE_HAS_FPU)
+ return true;
+
+ return false;
+ }
+
+Where software relies on a feature described by a HWCAP, it should check the
+relevant HWCAP flag to verify that the feature is present before attempting to
+make use of the feature.
+
+HWCAP is the preferred method to test for the presence of a feature rather
+than probing through other means, which may not be reliable or may cause
+unpredictable behaviour.
+
+Software that targets a particular platform does not necessarily have to
+test for required or implied features. For example if the program requires
+FPU, VMX, VSX, it is not necessary to test those HWCAPs, and it may be
+impossible to do so if the compiler generates code requiring those features.
+
+2. Facilities
+-------------
+
+The Power ISA uses the term "facility" to describe a class of instructions,
+registers, interrupts, etc. The presence or absence of a facility indicates
+whether this class is available to be used, but the specifics depend on the
+ISA version. For example, if the VSX facility is available, the VSX
+instructions that can be used differ between the v3.0B and v3.1B ISA
+versions.
+
+3. Categories
+-------------
+
+The Power ISA before v3.0 uses the term "category" to describe certain
+classes of instructions and operating modes which may be optional or
+mutually exclusive, the exact meaning of the HWCAP flag may depend on
+context, e.g., the presence of the BOOKE feature implies that the server
+category is not implemented.
+
+4. HWCAP allocation
+-------------------
+
+HWCAPs are allocated as described in Power Architecture 64-Bit ELF V2 ABI
+Specification (which will be reflected in the kernel's uapi headers).
+
+5. The HWCAPs exposed in AT_HWCAP
+---------------------------------
+
+PPC_FEATURE_32
+ 32-bit CPU
+
+PPC_FEATURE_64
+ 64-bit CPU (userspace may be running in 32-bit mode).
+
+PPC_FEATURE_601_INSTR
+ The processor is PowerPC 601.
+ Unused in the kernel since f0ed73f3fa2c ("powerpc: Remove PowerPC 601")
+
+PPC_FEATURE_HAS_ALTIVEC
+ Vector (aka Altivec, VMX) facility is available.
+
+PPC_FEATURE_HAS_FPU
+ Floating point facility is available.
+
+PPC_FEATURE_HAS_MMU
+ Memory management unit is present and enabled.
+
+PPC_FEATURE_HAS_4xxMAC
+ The processor is 40x or 44x family.
+
+PPC_FEATURE_UNIFIED_CACHE
+ The processor has a unified L1 cache for instructions and data, as
+ found in NXP e200.
+ Unused in the kernel since 39c8bf2b3cc1 ("powerpc: Retire e200 core (mpc555x processor)")
+
+PPC_FEATURE_HAS_SPE
+ Signal Processing Engine facility is available.
+
+PPC_FEATURE_HAS_EFP_SINGLE
+ Embedded Floating Point single precision operations are available.
+
+PPC_FEATURE_HAS_EFP_DOUBLE
+ Embedded Floating Point double precision operations are available.
+
+PPC_FEATURE_NO_TB
+ The timebase facility (mftb instruction) is not available.
+ This is a 601 specific HWCAP, so if it is known that the processor
+ running is not a 601, via other HWCAPs or other means, it is not
+ required to test this bit before using the timebase.
+ Unused in the kernel since f0ed73f3fa2c ("powerpc: Remove PowerPC 601")
+
+PPC_FEATURE_POWER4
+ The processor is POWER4 or PPC970/FX/MP.
+ POWER4 support dropped from the kernel since 471d7ff8b51b ("powerpc/64s: Remove POWER4 support")
+
+PPC_FEATURE_POWER5
+ The processor is POWER5.
+
+PPC_FEATURE_POWER5_PLUS
+ The processor is POWER5+.
+
+PPC_FEATURE_CELL
+ The processor is Cell.
+
+PPC_FEATURE_BOOKE
+ The processor implements the embedded category ("BookE") architecture.
+
+PPC_FEATURE_SMT
+ The processor implements SMT.
+
+PPC_FEATURE_ICACHE_SNOOP
+ The processor icache is coherent with the dcache, and instruction storage
+ can be made consistent with data storage for the purpose of executing
+ instructions with the sequence (as described in, e.g., POWER9 Processor
+ User's Manual, 4.6.2.2 Instruction Cache Block Invalidate (icbi))::
+
+ sync
+ icbi (to any address)
+ isync
+
+PPC_FEATURE_ARCH_2_05
+ The processor supports the v2.05 userlevel architecture. Processors
+ supporting later architectures DO NOT set this feature.
+
+PPC_FEATURE_PA6T
+ The processor is PA6T.
+
+PPC_FEATURE_HAS_DFP
+ DFP facility is available.
+
+PPC_FEATURE_POWER6_EXT
+ The processor is POWER6.
+
+PPC_FEATURE_ARCH_2_06
+ The processor supports the v2.06 userlevel architecture. Processors
+ supporting later architectures also set this feature.
+
+PPC_FEATURE_HAS_VSX
+ VSX facility is available.
+
+PPC_FEATURE_PSERIES_PERFMON_COMPAT
+ The processor supports architected PMU events in the range 0xE0-0xFF.
+
+PPC_FEATURE_TRUE_LE
+ The processor supports true little-endian mode.
+
+PPC_FEATURE_PPC_LE
+ The processor supports "PowerPC Little-Endian", that uses address
+ munging to make storage access appear to be little-endian, but the
+ data is stored in a different format that is unsuitable to be
+ accessed by other agents not running in this mode.
+
+6. The HWCAPs exposed in AT_HWCAP2
+----------------------------------
+
+PPC_FEATURE2_ARCH_2_07
+ The processor supports the v2.07 userlevel architecture. Processors
+ supporting later architectures also set this feature.
+
+PPC_FEATURE2_HTM
+ Transactional Memory feature is available.
+
+PPC_FEATURE2_DSCR
+ DSCR facility is available.
+
+PPC_FEATURE2_EBB
+ EBB facility is available.
+
+PPC_FEATURE2_ISEL
+ isel instruction is available. This is superseded by ARCH_2_07 and
+ later.
+
+PPC_FEATURE2_TAR
+ TAR facility is available.
+
+PPC_FEATURE2_VEC_CRYPTO
+ v2.07 crypto instructions are available.
+
+PPC_FEATURE2_HTM_NOSC
+ System calls fail if called in a transactional state, see
+ Documentation/powerpc/syscall64-abi.rst
+
+PPC_FEATURE2_ARCH_3_00
+ The processor supports the v3.0B / v3.0C userlevel architecture. Processors
+ supporting later architectures also set this feature.
+
+PPC_FEATURE2_HAS_IEEE128
+ IEEE 128-bit binary floating point is supported with VSX
+ quad-precision instructions and data types.
+
+PPC_FEATURE2_DARN
+ darn instruction is available.
+
+PPC_FEATURE2_SCV
+ The scv 0 instruction may be used for system calls, see
+ Documentation/powerpc/syscall64-abi.rst.
+
+PPC_FEATURE2_HTM_NO_SUSPEND
+ A limited Transactional Memory facility that does not support suspend is
+ available, see Documentation/powerpc/transactional_memory.rst.
+
+PPC_FEATURE2_ARCH_3_1
+ The processor supports the v3.1 userlevel architecture. Processors
+ supporting later architectures also set this feature.
+
+PPC_FEATURE2_MMA
+ MMA facility is available.
diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst
index 0f7d3c495693..85e80e30160b 100644
--- a/Documentation/powerpc/index.rst
+++ b/Documentation/powerpc/index.rst
@@ -17,6 +17,7 @@ powerpc
dawr-power9
dscr
eeh-pci-error-recovery
+ elf_hwcaps
elfnote
firmware-assisted-dump
hvcs
diff --git a/Documentation/process/kernel-docs.rst b/Documentation/process/kernel-docs.rst
index 502289d63385..306ad373a002 100644
--- a/Documentation/process/kernel-docs.rst
+++ b/Documentation/process/kernel-docs.rst
@@ -116,7 +116,7 @@ On-line docs
* Title: **Writing an ALSA Driver**
:Author: Takashi Iwai <tiwai@suse.de>
- :URL: http://www.alsa-project.org/~iwai/writing-an-alsa-driver/index.html
+ :URL: https://www.kernel.org/doc/html/latest/sound/kernel-api/writing-an-alsa-driver.html
:Date: 2005
:Keywords: ALSA, sound, soundcard, driver, lowlevel, hardware.
:Description: Advanced Linux Sound Architecture for developers,
diff --git a/Documentation/s390/index.rst b/Documentation/s390/index.rst
index b10ca9192557..73c79bf586fd 100644
--- a/Documentation/s390/index.rst
+++ b/Documentation/s390/index.rst
@@ -12,6 +12,7 @@ s390 Architecture
qeth
s390dbf
vfio-ap
+ vfio-ap-locking
vfio-ccw
zfcpdump
common_io
diff --git a/Documentation/s390/vfio-ap-locking.rst b/Documentation/s390/vfio-ap-locking.rst
new file mode 100644
index 000000000000..0dfcdb562e21
--- /dev/null
+++ b/Documentation/s390/vfio-ap-locking.rst
@@ -0,0 +1,115 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+VFIO AP Locks Overview
+======================
+This document describes the locks that are pertinent to the secure operation
+of the vfio_ap device driver. Throughout this document, the following variables
+will be used to denote instances of the structures herein described:
+
+.. code-block:: c
+
+ struct ap_matrix_dev *matrix_dev;
+ struct ap_matrix_mdev *matrix_mdev;
+ struct kvm *kvm;
+
+The Matrix Devices Lock (drivers/s390/crypto/vfio_ap_private.h)
+---------------------------------------------------------------
+
+.. code-block:: c
+
+ struct ap_matrix_dev {
+ ...
+ struct list_head mdev_list;
+ struct mutex mdevs_lock;
+ ...
+ }
+
+The Matrix Devices Lock (matrix_dev->mdevs_lock) is implemented as a global
+mutex contained within the single object of struct ap_matrix_dev. This lock
+controls access to all fields contained within each matrix_mdev
+(matrix_dev->mdev_list). This lock must be held while reading from, writing to
+or using the data from a field contained within a matrix_mdev instance
+representing one of the vfio_ap device driver's mediated devices.
+
+The KVM Lock (include/linux/kvm_host.h)
+---------------------------------------
+
+.. code-block:: c
+
+ struct kvm {
+ ...
+ struct mutex lock;
+ ...
+ }
+
+The KVM Lock (kvm->lock) controls access to the state data for a KVM guest. This
+lock must be held by the vfio_ap device driver while one or more AP adapters,
+domains or control domains are being plugged into or unplugged from the guest.
+
+The KVM pointer is stored in the in the matrix_mdev instance
+(matrix_mdev->kvm = kvm) containing the state of the mediated device that has
+been attached to the KVM guest.
+
+The Guests Lock (drivers/s390/crypto/vfio_ap_private.h)
+-----------------------------------------------------------
+
+.. code-block:: c
+
+ struct ap_matrix_dev {
+ ...
+ struct list_head mdev_list;
+ struct mutex guests_lock;
+ ...
+ }
+
+The Guests Lock (matrix_dev->guests_lock) controls access to the
+matrix_mdev instances (matrix_dev->mdev_list) that represent mediated devices
+that hold the state for the mediated devices that have been attached to a
+KVM guest. This lock must be held:
+
+1. To control access to the KVM pointer (matrix_mdev->kvm) while the vfio_ap
+ device driver is using it to plug/unplug AP devices passed through to the KVM
+ guest.
+
+2. To add matrix_mdev instances to or remove them from matrix_dev->mdev_list.
+ This is necessary to ensure the proper locking order when the list is perused
+ to find an ap_matrix_mdev instance for the purpose of plugging/unplugging
+ AP devices passed through to a KVM guest.
+
+ For example, when a queue device is removed from the vfio_ap device driver,
+ if the adapter is passed through to a KVM guest, it will have to be
+ unplugged. In order to figure out whether the adapter is passed through,
+ the matrix_mdev object to which the queue is assigned will have to be
+ found. The KVM pointer (matrix_mdev->kvm) can then be used to determine if
+ the mediated device is passed through (matrix_mdev->kvm != NULL) and if so,
+ to unplug the adapter.
+
+It is not necessary to take the Guests Lock to access the KVM pointer if the
+pointer is not used to plug/unplug devices passed through to the KVM guest;
+however, in this case, the Matrix Devices Lock (matrix_dev->mdevs_lock) must be
+held in order to access the KVM pointer since it is set and cleared under the
+protection of the Matrix Devices Lock. A case in point is the function that
+handles interception of the PQAP(AQIC) instruction sub-function. This handler
+needs to access the KVM pointer only for the purposes of setting or clearing IRQ
+resources, so only the matrix_dev->mdevs_lock needs to be held.
+
+The PQAP Hook Lock (arch/s390/include/asm/kvm_host.h)
+-----------------------------------------------------
+
+.. code-block:: c
+
+ typedef int (*crypto_hook)(struct kvm_vcpu *vcpu);
+
+ struct kvm_s390_crypto {
+ ...
+ struct rw_semaphore pqap_hook_rwsem;
+ crypto_hook *pqap_hook;
+ ...
+ };
+
+The PQAP Hook Lock is a r/w semaphore that controls access to the function
+pointer of the handler ``(*kvm->arch.crypto.pqap_hook)`` to invoke when the
+PQAP(AQIC) instruction sub-function is intercepted by the host. The lock must be
+held in write mode when pqap_hook value is set, and in read mode when the
+pqap_hook function is called.
diff --git a/Documentation/s390/vfio-ap.rst b/Documentation/s390/vfio-ap.rst
index f57ae621f33e..61a0a3c6c7b4 100644
--- a/Documentation/s390/vfio-ap.rst
+++ b/Documentation/s390/vfio-ap.rst
@@ -123,27 +123,24 @@ Let's now take a look at how AP instructions executed on a guest are interpreted
by the hardware.
A satellite control block called the Crypto Control Block (CRYCB) is attached to
-our main hardware virtualization control block. The CRYCB contains three fields
-to identify the adapters, usage domains and control domains assigned to the KVM
-guest:
+our main hardware virtualization control block. The CRYCB contains an AP Control
+Block (APCB) that has three fields to identify the adapters, usage domains and
+control domains assigned to the KVM guest:
* The AP Mask (APM) field is a bit mask that identifies the AP adapters assigned
- to the KVM guest. Each bit in the mask, from left to right (i.e. from most
- significant to least significant bit in big endian order), corresponds to
+ to the KVM guest. Each bit in the mask, from left to right, corresponds to
an APID from 0-255. If a bit is set, the corresponding adapter is valid for
use by the KVM guest.
* The AP Queue Mask (AQM) field is a bit mask identifying the AP usage domains
- assigned to the KVM guest. Each bit in the mask, from left to right (i.e. from
- most significant to least significant bit in big endian order), corresponds to
- an AP queue index (APQI) from 0-255. If a bit is set, the corresponding queue
- is valid for use by the KVM guest.
+ assigned to the KVM guest. Each bit in the mask, from left to right,
+ corresponds to an AP queue index (APQI) from 0-255. If a bit is set, the
+ corresponding queue is valid for use by the KVM guest.
* The AP Domain Mask field is a bit mask that identifies the AP control domains
assigned to the KVM guest. The ADM bit mask controls which domains can be
changed by an AP command-request message sent to a usage domain from the
- guest. Each bit in the mask, from left to right (i.e. from most significant to
- least significant bit in big endian order), corresponds to a domain from
+ guest. Each bit in the mask, from left to right, corresponds to a domain from
0-255. If a bit is set, the corresponding domain can be modified by an AP
command-request message sent to a usage domain.
@@ -151,10 +148,10 @@ If you recall from the description of an AP Queue, AP instructions include
an APQN to identify the AP queue to which an AP command-request message is to be
sent (NQAP and PQAP instructions), or from which a command-reply message is to
be received (DQAP instruction). The validity of an APQN is defined by the matrix
-calculated from the APM and AQM; it is the cross product of all assigned adapter
-numbers (APM) with all assigned queue indexes (AQM). For example, if adapters 1
-and 2 and usage domains 5 and 6 are assigned to a guest, the APQNs (1,5), (1,6),
-(2,5) and (2,6) will be valid for the guest.
+calculated from the APM and AQM; it is the Cartesian product of all assigned
+adapter numbers (APM) with all assigned queue indexes (AQM). For example, if
+adapters 1 and 2 and usage domains 5 and 6 are assigned to a guest, the APQNs
+(1,5), (1,6), (2,5) and (2,6) will be valid for the guest.
The APQNs can provide secure key functionality - i.e., a private key is stored
on the adapter card for each of its domains - so each APQN must be assigned to
@@ -192,7 +189,7 @@ The design introduces three new objects:
1. AP matrix device
2. VFIO AP device driver (vfio_ap.ko)
-3. VFIO AP mediated matrix pass-through device
+3. VFIO AP mediated pass-through device
The VFIO AP device driver
-------------------------
@@ -200,12 +197,13 @@ The VFIO AP (vfio_ap) device driver serves the following purposes:
1. Provides the interfaces to secure APQNs for exclusive use of KVM guests.
-2. Sets up the VFIO mediated device interfaces to manage a mediated matrix
+2. Sets up the VFIO mediated device interfaces to manage a vfio_ap mediated
device and creates the sysfs interfaces for assigning adapters, usage
domains, and control domains comprising the matrix for a KVM guest.
-3. Configures the APM, AQM and ADM in the CRYCB referenced by a KVM guest's
- SIE state description to grant the guest access to a matrix of AP devices
+3. Configures the APM, AQM and ADM in the APCB contained in the CRYCB referenced
+ by a KVM guest's SIE state description to grant the guest access to a matrix
+ of AP devices
Reserve APQNs for exclusive use of KVM guests
---------------------------------------------
@@ -235,10 +233,10 @@ reserved::
| | 8 probe | |
+--------^---------+ +--^--^------------+
6 edit | | |
- apmask | +-----------------------------+ | 9 mdev create
+ apmask | +-----------------------------+ | 11 mdev create
aqmask | | 1 modprobe |
+--------+-----+---+ +----------------+-+ +----------------+
- | | | |8 create | mediated |
+ | | | |10 create| mediated |
| admin | | VFIO device core |---------> matrix |
| + | | | device |
+------+-+---------+ +--------^---------+ +--------^-------+
@@ -246,14 +244,14 @@ reserved::
| | 9 create vfio_ap-passthrough | |
| +------------------------------+ |
+-------------------------------------------------------------+
- 10 assign adapter/domain/control domain
+ 12 assign adapter/domain/control domain
The process for reserving an AP queue for use by a KVM guest is:
1. The administrator loads the vfio_ap device driver
2. The vfio-ap driver during its initialization will register a single 'matrix'
device with the device core. This will serve as the parent device for
- all mediated matrix devices used to configure an AP matrix for a guest.
+ all vfio_ap mediated devices used to configure an AP matrix for a guest.
3. The /sys/devices/vfio_ap/matrix device is created by the device core
4. The vfio_ap device driver will register with the AP bus for AP queue devices
of type 10 and higher (CEX4 and newer). The driver will provide the vfio_ap
@@ -269,24 +267,24 @@ The process for reserving an AP queue for use by a KVM guest is:
default zcrypt cex4queue driver.
8. The AP bus probes the vfio_ap device driver to bind the queues reserved for
it.
-9. The administrator creates a passthrough type mediated matrix device to be
+9. The administrator creates a passthrough type vfio_ap mediated device to be
used by a guest
10. The administrator assigns the adapters, usage domains and control domains
to be exclusively used by a guest.
Set up the VFIO mediated device interfaces
------------------------------------------
-The VFIO AP device driver utilizes the common interface of the VFIO mediated
+The VFIO AP device driver utilizes the common interfaces of the VFIO mediated
device core driver to:
-* Register an AP mediated bus driver to add a mediated matrix device to and
+* Register an AP mediated bus driver to add a vfio_ap mediated device to and
remove it from a VFIO group.
-* Create and destroy a mediated matrix device
-* Add a mediated matrix device to and remove it from the AP mediated bus driver
-* Add a mediated matrix device to and remove it from an IOMMU group
+* Create and destroy a vfio_ap mediated device
+* Add a vfio_ap mediated device to and remove it from the AP mediated bus driver
+* Add a vfio_ap mediated device to and remove it from an IOMMU group
The following high-level block diagram shows the main components and interfaces
-of the VFIO AP mediated matrix device driver::
+of the VFIO AP mediated device driver::
+-------------+
| |
@@ -343,7 +341,7 @@ matrix device.
* device_api:
the mediated device type's API
* available_instances:
- the number of mediated matrix passthrough devices
+ the number of vfio_ap mediated passthrough devices
that can be created
* device_api:
specifies the VFIO API
@@ -351,29 +349,37 @@ matrix device.
This attribute group identifies the user-defined sysfs attributes of the
mediated device. When a device is registered with the VFIO mediated device
framework, the sysfs attribute files identified in the 'mdev_attr_groups'
- structure will be created in the mediated matrix device's directory. The
- sysfs attributes for a mediated matrix device are:
+ structure will be created in the vfio_ap mediated device's directory. The
+ sysfs attributes for a vfio_ap mediated device are:
assign_adapter / unassign_adapter:
Write-only attributes for assigning/unassigning an AP adapter to/from the
- mediated matrix device. To assign/unassign an adapter, the APID of the
- adapter is echoed to the respective attribute file.
+ vfio_ap mediated device. To assign/unassign an adapter, the APID of the
+ adapter is echoed into the respective attribute file.
assign_domain / unassign_domain:
Write-only attributes for assigning/unassigning an AP usage domain to/from
- the mediated matrix device. To assign/unassign a domain, the domain
- number of the usage domain is echoed to the respective attribute
+ the vfio_ap mediated device. To assign/unassign a domain, the domain
+ number of the usage domain is echoed into the respective attribute
file.
matrix:
- A read-only file for displaying the APQNs derived from the cross product
- of the adapter and domain numbers assigned to the mediated matrix device.
+ A read-only file for displaying the APQNs derived from the Cartesian
+ product of the adapter and domain numbers assigned to the vfio_ap mediated
+ device.
+ guest_matrix:
+ A read-only file for displaying the APQNs derived from the Cartesian
+ product of the adapter and domain numbers assigned to the APM and AQM
+ fields respectively of the KVM guest's CRYCB. This may differ from the
+ the APQNs assigned to the vfio_ap mediated device if any APQN does not
+ reference a queue device bound to the vfio_ap device driver (i.e., the
+ queue is not in the host's AP configuration).
assign_control_domain / unassign_control_domain:
Write-only attributes for assigning/unassigning an AP control domain
- to/from the mediated matrix device. To assign/unassign a control domain,
- the ID of the domain to be assigned/unassigned is echoed to the respective
- attribute file.
+ to/from the vfio_ap mediated device. To assign/unassign a control domain,
+ the ID of the domain to be assigned/unassigned is echoed into the
+ respective attribute file.
control_domains:
A read-only file for displaying the control domain numbers assigned to the
- mediated matrix device.
+ vfio_ap mediated device.
* functions:
@@ -383,45 +389,75 @@ matrix device.
* Store the reference to the KVM structure for the guest using the mdev
* Store the AP matrix configuration for the adapters, domains, and control
domains assigned via the corresponding sysfs attributes files
+ * Store the AP matrix configuration for the adapters, domains and control
+ domains available to a guest. A guest may not be provided access to APQNs
+ referencing queue devices that do not exist, or are not bound to the
+ vfio_ap device driver.
remove:
- deallocates the mediated matrix device's ap_matrix_mdev structure. This will
- be allowed only if a running guest is not using the mdev.
+ deallocates the vfio_ap mediated device's ap_matrix_mdev structure.
+ This will be allowed only if a running guest is not using the mdev.
* callback interfaces
- open:
+ open_device:
The vfio_ap driver uses this callback to register a
- VFIO_GROUP_NOTIFY_SET_KVM notifier callback function for the mdev matrix
- device. The open is invoked when QEMU connects the VFIO iommu group
- for the mdev matrix device to the MDEV bus. Access to the KVM structure used
- to configure the KVM guest is provided via this callback. The KVM structure,
- is used to configure the guest's access to the AP matrix defined via the
- mediated matrix device's sysfs attribute files.
- release:
+ VFIO_GROUP_NOTIFY_SET_KVM notifier callback function for the matrix mdev
+ devices. The open_device callback is invoked by userspace to connect the
+ VFIO iommu group for the matrix mdev device to the MDEV bus. Access to the
+ KVM structure used to configure the KVM guest is provided via this callback.
+ The KVM structure, is used to configure the guest's access to the AP matrix
+ defined via the vfio_ap mediated device's sysfs attribute files.
+
+ close_device:
unregisters the VFIO_GROUP_NOTIFY_SET_KVM notifier callback function for the
- mdev matrix device and deconfigures the guest's AP matrix.
+ matrix mdev device and deconfigures the guest's AP matrix.
-Configure the APM, AQM and ADM in the CRYCB
--------------------------------------------
-Configuring the AP matrix for a KVM guest will be performed when the
+ ioctl:
+ this callback handles the VFIO_DEVICE_GET_INFO and VFIO_DEVICE_RESET ioctls
+ defined by the vfio framework.
+
+Configure the guest's AP resources
+----------------------------------
+Configuring the AP resources for a KVM guest will be performed when the
VFIO_GROUP_NOTIFY_SET_KVM notifier callback is invoked. The notifier
-function is called when QEMU connects to KVM. The guest's AP matrix is
-configured via it's CRYCB by:
+function is called when userspace connects to KVM. The guest's AP resources are
+configured via it's APCB by:
* Setting the bits in the APM corresponding to the APIDs assigned to the
- mediated matrix device via its 'assign_adapter' interface.
+ vfio_ap mediated device via its 'assign_adapter' interface.
* Setting the bits in the AQM corresponding to the domains assigned to the
- mediated matrix device via its 'assign_domain' interface.
+ vfio_ap mediated device via its 'assign_domain' interface.
* Setting the bits in the ADM corresponding to the domain dIDs assigned to the
- mediated matrix device via its 'assign_control_domains' interface.
+ vfio_ap mediated device via its 'assign_control_domains' interface.
+
+The linux device model precludes passing a device through to a KVM guest that
+is not bound to the device driver facilitating its pass-through. Consequently,
+an APQN that does not reference a queue device bound to the vfio_ap device
+driver will not be assigned to a KVM guest's matrix. The AP architecture,
+however, does not provide a means to filter individual APQNs from the guest's
+matrix, so the adapters, domains and control domains assigned to vfio_ap
+mediated device via its sysfs 'assign_adapter', 'assign_domain' and
+'assign_control_domain' interfaces will be filtered before providing the AP
+configuration to a guest:
+
+* The APIDs of the adapters, the APQIs of the domains and the domain numbers of
+ the control domains assigned to the matrix mdev that are not also assigned to
+ the host's AP configuration will be filtered.
+
+* Each APQN derived from the Cartesian product of the APIDs and APQIs assigned
+ to the vfio_ap mdev is examined and if any one of them does not reference a
+ queue device bound to the vfio_ap device driver, the adapter will not be
+ plugged into the guest (i.e., the bit corresponding to its APID will not be
+ set in the APM of the guest's APCB).
The CPU model features for AP
-----------------------------
-The AP stack relies on the presence of the AP instructions as well as two
-facilities: The AP Facilities Test (APFT) facility; and the AP Query
-Configuration Information (QCI) facility. These features/facilities are made
-available to a KVM guest via the following CPU model features:
+The AP stack relies on the presence of the AP instructions as well as three
+facilities: The AP Facilities Test (APFT) facility; the AP Query
+Configuration Information (QCI) facility; and the AP Queue Interruption Control
+facility. These features/facilities are made available to a KVM guest via the
+following CPU model features:
1. ap: Indicates whether the AP instructions are installed on the guest. This
feature will be enabled by KVM only if the AP instructions are installed
@@ -435,24 +471,28 @@ available to a KVM guest via the following CPU model features:
can be made available to the guest only if it is available on the host (i.e.,
facility bit 12 is set).
+4. apqi: Indicates AP Queue Interruption Control faclity is available on the
+ guest. This facility can be made available to the guest only if it is
+ available on the host (i.e., facility bit 65 is set).
+
Note: If the user chooses to specify a CPU model different than the 'host'
model to QEMU, the CPU model features and facilities need to be turned on
explicitly; for example::
- /usr/bin/qemu-system-s390x ... -cpu z13,ap=on,apqci=on,apft=on
+ /usr/bin/qemu-system-s390x ... -cpu z13,ap=on,apqci=on,apft=on,apqi=on
A guest can be precluded from using AP features/facilities by turning them off
explicitly; for example::
- /usr/bin/qemu-system-s390x ... -cpu host,ap=off,apqci=off,apft=off
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=off,apqci=off,apft=off,apqi=off
Note: If the APFT facility is turned off (apft=off) for the guest, the guest
-will not see any AP devices. The zcrypt device drivers that register for type 10
-and newer AP devices - i.e., the cex4card and cex4queue device drivers - need
-the APFT facility to ascertain the facilities installed on a given AP device. If
-the APFT facility is not installed on the guest, then the probe of device
-drivers will fail since only type 10 and newer devices can be configured for
-guest use.
+will not see any AP devices. The zcrypt device drivers on the guest that
+register for type 10 and newer AP devices - i.e., the cex4card and cex4queue
+device drivers - need the APFT facility to ascertain the facilities installed on
+a given AP device. If the APFT facility is not installed on the guest, then no
+adapter or domain devices will get created by the AP bus running on the
+guest because only type 10 and newer devices can be configured for guest use.
Example
=======
@@ -471,7 +511,7 @@ CARD.DOMAIN TYPE MODE
05.00ab CEX5C CCA-Coproc
06 CEX5A Accelerator
06.0004 CEX5A Accelerator
-06.00ab CEX5C CCA-Coproc
+06.00ab CEX5A Accelerator
=========== ===== ============
Guest2
@@ -479,9 +519,9 @@ Guest2
=========== ===== ============
CARD.DOMAIN TYPE MODE
=========== ===== ============
-05 CEX5A Accelerator
-05.0047 CEX5A Accelerator
-05.00ff CEX5A Accelerator
+05 CEX5C CCA-Coproc
+05.0047 CEX5C CCA-Coproc
+05.00ff CEX5C CCA-Coproc
=========== ===== ============
Guest3
@@ -529,40 +569,56 @@ These are the steps:
2. Secure the AP queues to be used by the three guests so that the host can not
access them. To secure them, there are two sysfs files that specify
- bitmasks marking a subset of the APQN range as 'usable by the default AP
- queue device drivers' or 'not usable by the default device drivers' and thus
- available for use by the vfio_ap device driver'. The location of the sysfs
- files containing the masks are::
+ bitmasks marking a subset of the APQN range as usable only by the default AP
+ queue device drivers. All remaining APQNs are available for use by
+ any other device driver. The vfio_ap device driver is currently the only
+ non-default device driver. The location of the sysfs files containing the
+ masks are::
/sys/bus/ap/apmask
/sys/bus/ap/aqmask
The 'apmask' is a 256-bit mask that identifies a set of AP adapter IDs
- (APID). Each bit in the mask, from left to right (i.e., from most significant
- to least significant bit in big endian order), corresponds to an APID from
- 0-255. If a bit is set, the APID is marked as usable only by the default AP
- queue device drivers; otherwise, the APID is usable by the vfio_ap
- device driver.
+ (APID). Each bit in the mask, from left to right, corresponds to an APID from
+ 0-255. If a bit is set, the APID belongs to the subset of APQNs marked as
+ available only to the default AP queue device drivers.
The 'aqmask' is a 256-bit mask that identifies a set of AP queue indexes
- (APQI). Each bit in the mask, from left to right (i.e., from most significant
- to least significant bit in big endian order), corresponds to an APQI from
- 0-255. If a bit is set, the APQI is marked as usable only by the default AP
- queue device drivers; otherwise, the APQI is usable by the vfio_ap device
- driver.
+ (APQI). Each bit in the mask, from left to right, corresponds to an APQI from
+ 0-255. If a bit is set, the APQI belongs to the subset of APQNs marked as
+ available only to the default AP queue device drivers.
+
+ The Cartesian product of the APIDs corresponding to the bits set in the
+ apmask and the APQIs corresponding to the bits set in the aqmask comprise
+ the subset of APQNs that can be used only by the host default device drivers.
+ All other APQNs are available to the non-default device drivers such as the
+ vfio_ap driver.
+
+ Take, for example, the following masks::
+
+ apmask:
+ 0x7d00000000000000000000000000000000000000000000000000000000000000
- Take, for example, the following mask::
+ aqmask:
+ 0x8000000000000000000000000000000000000000000000000000000000000000
- 0x7dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ The masks indicate:
- It indicates:
+ * Adapters 1, 2, 3, 4, 5, and 7 are available for use by the host default
+ device drivers.
- 1, 2, 3, 4, 5, and 7-255 belong to the default drivers' pool, and 0 and 6
- belong to the vfio_ap device driver's pool.
+ * Domain 0 is available for use by the host default device drivers
+
+ * The subset of APQNs available for use only by the default host device
+ drivers are:
+
+ (1,0), (2,0), (3,0), (4.0), (5,0) and (7,0)
+
+ * All other APQNs are available for use by the non-default device drivers.
The APQN of each AP queue device assigned to the linux host is checked by the
- AP bus against the set of APQNs derived from the cross product of APIDs
- and APQIs marked as usable only by the default AP queue device drivers. If a
+ AP bus against the set of APQNs derived from the Cartesian product of APIDs
+ and APQIs marked as available to the default AP queue device drivers. If a
match is detected, only the default AP queue device drivers will be probed;
otherwise, the vfio_ap device driver will be probed.
@@ -579,8 +635,7 @@ These are the steps:
0x4100000000000000000000000000000000000000000000000000000000000000
- Keep in mind that the mask reads from left to right (i.e., most
- significant to least significant bit in big endian order), so the mask
+ Keep in mind that the mask reads from left to right, so the mask
above identifies device numbers 1 and 7 (01000001).
If the string is longer than the mask, the operation is terminated with
@@ -626,11 +681,22 @@ These are the steps:
default drivers pool: adapter 0-15, domain 1
alternate drivers pool: adapter 16-255, domains 0, 2-255
+ **Note:**
+ Changing a mask such that one or more APQNs will be taken from a vfio_ap
+ mediated device (see below) will fail with an error (EBUSY). A message
+ is logged to the kernel ring buffer which can be viewed with the 'dmesg'
+ command. The output identifies each APQN flagged as 'in use' and identifies
+ the vfio_ap mediated device to which it is assigned; for example:
+
+ Userspace may not re-assign queue 05.0054 already assigned to 62177883-f1bb-47f0-914d-32a22e3a8804
+ Userspace may not re-assign queue 04.0054 already assigned to cef03c3c-903d-4ecc-9a83-40694cb8aee4
+
Securing the APQNs for our example
----------------------------------
To secure the AP queues 05.0004, 05.0047, 05.00ab, 05.00ff, 06.0004, 06.0047,
06.00ab, and 06.00ff for use by the vfio_ap device driver, the corresponding
- APQNs can either be removed from the default masks::
+ APQNs can be removed from the default masks using either of the following
+ commands::
echo -5,-6 > /sys/bus/ap/apmask
@@ -683,7 +749,7 @@ Securing the APQNs for our example
/sys/devices/vfio_ap/matrix/
--- [mdev_supported_types]
- ------ [vfio_ap-passthrough] (passthrough mediated matrix device type)
+ ------ [vfio_ap-passthrough] (passthrough vfio_ap mediated device type)
--------- create
--------- [devices]
@@ -734,6 +800,9 @@ Securing the APQNs for our example
----------------unassign_control_domain
----------------unassign_domain
+ Note *****: The vfio_ap mdevs do not persist across reboots unless the
+ mdevctl tool is used to create and persist them.
+
4. The administrator now needs to configure the matrixes for the mediated
devices $uuid1 (for Guest1), $uuid2 (for Guest2) and $uuid3 (for Guest3).
@@ -755,6 +824,10 @@ Securing the APQNs for our example
cat matrix
+ To display the matrix that is or will be assigned to Guest1::
+
+ cat guest_matrix
+
This is how the matrix is configured for Guest2::
echo 5 > assign_adapter
@@ -774,17 +847,24 @@ Securing the APQNs for our example
higher than the maximum is specified, the operation will terminate with
an error (ENODEV).
- * All APQNs that can be derived from the adapter ID and the IDs of
- the previously assigned domains must be bound to the vfio_ap device
- driver. If no domains have yet been assigned, then there must be at least
- one APQN with the specified APID bound to the vfio_ap driver. If no such
- APQNs are bound to the driver, the operation will terminate with an
- error (EADDRNOTAVAIL).
+ Note: The maximum adapter number can be obtained via the sysfs
+ /sys/bus/ap/ap_max_adapter_id attribute file.
+
+ * Each APQN derived from the Cartesian product of the APID of the adapter
+ being assigned and the APQIs of the domains previously assigned:
+
+ - Must only be available to the vfio_ap device driver as specified in the
+ sysfs /sys/bus/ap/apmask and /sys/bus/ap/aqmask attribute files. If even
+ one APQN is reserved for use by the host device driver, the operation
+ will terminate with an error (EADDRNOTAVAIL).
+
+ - Must NOT be assigned to another vfio_ap mediated device. If even one APQN
+ is assigned to another vfio_ap mediated device, the operation will
+ terminate with an error (EBUSY).
- No APQN that can be derived from the adapter ID and the IDs of the
- previously assigned domains can be assigned to another mediated matrix
- device. If an APQN is assigned to another mediated matrix device, the
- operation will terminate with an error (EADDRINUSE).
+ - Must NOT be assigned while the sysfs /sys/bus/ap/apmask and
+ sys/bus/ap/aqmask attribute files are being edited or the operation may
+ terminate with an error (EBUSY).
In order to successfully assign a domain:
@@ -793,41 +873,50 @@ Securing the APQNs for our example
higher than the maximum is specified, the operation will terminate with
an error (ENODEV).
- * All APQNs that can be derived from the domain ID and the IDs of
- the previously assigned adapters must be bound to the vfio_ap device
- driver. If no domains have yet been assigned, then there must be at least
- one APQN with the specified APQI bound to the vfio_ap driver. If no such
- APQNs are bound to the driver, the operation will terminate with an
- error (EADDRNOTAVAIL).
+ Note: The maximum domain number can be obtained via the sysfs
+ /sys/bus/ap/ap_max_domain_id attribute file.
+
+ * Each APQN derived from the Cartesian product of the APQI of the domain
+ being assigned and the APIDs of the adapters previously assigned:
+
+ - Must only be available to the vfio_ap device driver as specified in the
+ sysfs /sys/bus/ap/apmask and /sys/bus/ap/aqmask attribute files. If even
+ one APQN is reserved for use by the host device driver, the operation
+ will terminate with an error (EADDRNOTAVAIL).
+
+ - Must NOT be assigned to another vfio_ap mediated device. If even one APQN
+ is assigned to another vfio_ap mediated device, the operation will
+ terminate with an error (EBUSY).
- No APQN that can be derived from the domain ID and the IDs of the
- previously assigned adapters can be assigned to another mediated matrix
- device. If an APQN is assigned to another mediated matrix device, the
- operation will terminate with an error (EADDRINUSE).
+ - Must NOT be assigned while the sysfs /sys/bus/ap/apmask and
+ sys/bus/ap/aqmask attribute files are being edited or the operation may
+ terminate with an error (EBUSY).
- In order to successfully assign a control domain, the domain number
- specified must represent a value from 0 up to the maximum domain number
- configured for the system. If a control domain number higher than the maximum
- is specified, the operation will terminate with an error (ENODEV).
+ In order to successfully assign a control domain:
+
+ * The domain number specified must represent a value from 0 up to the maximum
+ domain number configured for the system. If a control domain number higher
+ than the maximum is specified, the operation will terminate with an
+ error (ENODEV).
5. Start Guest1::
- /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on,apqi=on \
-device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid1 ...
7. Start Guest2::
- /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on,apqi=on \
-device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid2 ...
7. Start Guest3::
- /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on,apqi=on \
-device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid3 ...
-When the guest is shut down, the mediated matrix devices may be removed.
+When the guest is shut down, the vfio_ap mediated devices may be removed.
-Using our example again, to remove the mediated matrix device $uuid1::
+Using our example again, to remove the vfio_ap mediated device $uuid1::
/sys/devices/vfio_ap/matrix/
--- [mdev_supported_types]
@@ -840,26 +929,143 @@ Using our example again, to remove the mediated matrix device $uuid1::
echo 1 > remove
-This will remove all of the mdev matrix device's sysfs structures including
-the mdev device itself. To recreate and reconfigure the mdev matrix device,
+This will remove all of the matrix mdev device's sysfs structures including
+the mdev device itself. To recreate and reconfigure the matrix mdev device,
all of the steps starting with step 3 will have to be performed again. Note
-that the remove will fail if a guest using the mdev is still running.
+that the remove will fail if a guest using the vfio_ap mdev is still running.
-It is not necessary to remove an mdev matrix device, but one may want to
+It is not necessary to remove a vfio_ap mdev, but one may want to
remove it if no guest will use it during the remaining lifetime of the linux
-host. If the mdev matrix device is removed, one may want to also reconfigure
+host. If the vfio_ap mdev is removed, one may want to also reconfigure
the pool of adapters and queues reserved for use by the default drivers.
+Hot plug/unplug support:
+========================
+An adapter, domain or control domain may be hot plugged into a running KVM
+guest by assigning it to the vfio_ap mediated device being used by the guest if
+the following conditions are met:
+
+* The adapter, domain or control domain must also be assigned to the host's
+ AP configuration.
+
+* Each APQN derived from the Cartesian product comprised of the APID of the
+ adapter being assigned and the APQIs of the domains assigned must reference a
+ queue device bound to the vfio_ap device driver.
+
+* To hot plug a domain, each APQN derived from the Cartesian product
+ comprised of the APQI of the domain being assigned and the APIDs of the
+ adapters assigned must reference a queue device bound to the vfio_ap device
+ driver.
+
+An adapter, domain or control domain may be hot unplugged from a running KVM
+guest by unassigning it from the vfio_ap mediated device being used by the
+guest.
+
+Over-provisioning of AP queues for a KVM guest:
+===============================================
+Over-provisioning is defined herein as the assignment of adapters or domains to
+a vfio_ap mediated device that do not reference AP devices in the host's AP
+configuration. The idea here is that when the adapter or domain becomes
+available, it will be automatically hot-plugged into the KVM guest using
+the vfio_ap mediated device to which it is assigned as long as each new APQN
+resulting from plugging it in references a queue device bound to the vfio_ap
+device driver.
+
Limitations
===========
-* The KVM/kernel interfaces do not provide a way to prevent restoring an APQN
- to the default drivers pool of a queue that is still assigned to a mediated
- device in use by a guest. It is incumbent upon the administrator to
- ensure there is no mediated device in use by a guest to which the APQN is
- assigned lest the host be given access to the private data of the AP queue
- device such as a private key configured specifically for the guest.
+Live guest migration is not supported for guests using AP devices without
+intervention by a system administrator. Before a KVM guest can be migrated,
+the vfio_ap mediated device must be removed. Unfortunately, it can not be
+removed manually (i.e., echo 1 > /sys/devices/vfio_ap/matrix/$UUID/remove) while
+the mdev is in use by a KVM guest. If the guest is being emulated by QEMU,
+its mdev can be hot unplugged from the guest in one of two ways:
+
+1. If the KVM guest was started with libvirt, you can hot unplug the mdev via
+ the following commands:
+
+ virsh detach-device <guestname> <path-to-device-xml>
+
+ For example, to hot unplug mdev 62177883-f1bb-47f0-914d-32a22e3a8804 from
+ the guest named 'my-guest':
+
+ virsh detach-device my-guest ~/config/my-guest-hostdev.xml
+
+ The contents of my-guest-hostdev.xml:
+
+.. code-block:: xml
+
+ <hostdev mode='subsystem' type='mdev' managed='no' model='vfio-ap'>
+ <source>
+ <address uuid='62177883-f1bb-47f0-914d-32a22e3a8804'/>
+ </source>
+ </hostdev>
+
+
+ virsh qemu-monitor-command <guest-name> --hmp "device-del <device-id>"
+
+ For example, to hot unplug the vfio_ap mediated device identified on the
+ qemu command line with 'id=hostdev0' from the guest named 'my-guest':
+
+.. code-block:: sh
+
+ virsh qemu-monitor-command my-guest --hmp "device_del hostdev0"
+
+2. A vfio_ap mediated device can be hot unplugged by attaching the qemu monitor
+ to the guest and using the following qemu monitor command:
+
+ (QEMU) device-del id=<device-id>
+
+ For example, to hot unplug the vfio_ap mediated device that was specified
+ on the qemu command line with 'id=hostdev0' when the guest was started:
+
+ (QEMU) device-del id=hostdev0
+
+After live migration of the KVM guest completes, an AP configuration can be
+restored to the KVM guest by hot plugging a vfio_ap mediated device on the target
+system into the guest in one of two ways:
+
+1. If the KVM guest was started with libvirt, you can hot plug a matrix mediated
+ device into the guest via the following virsh commands:
+
+ virsh attach-device <guestname> <path-to-device-xml>
+
+ For example, to hot plug mdev 62177883-f1bb-47f0-914d-32a22e3a8804 into
+ the guest named 'my-guest':
+
+ virsh attach-device my-guest ~/config/my-guest-hostdev.xml
+
+ The contents of my-guest-hostdev.xml:
+
+.. code-block:: xml
+
+ <hostdev mode='subsystem' type='mdev' managed='no' model='vfio-ap'>
+ <source>
+ <address uuid='62177883-f1bb-47f0-914d-32a22e3a8804'/>
+ </source>
+ </hostdev>
+
+
+ virsh qemu-monitor-command <guest-name> --hmp \
+ "device_add vfio-ap,sysfsdev=<path-to-mdev>,id=<device-id>"
+
+ For example, to hot plug the vfio_ap mediated device
+ 62177883-f1bb-47f0-914d-32a22e3a8804 into the guest named 'my-guest' with
+ device-id hostdev0:
+
+ virsh qemu-monitor-command my-guest --hmp \
+ "device_add vfio-ap,\
+ sysfsdev=/sys/devices/vfio_ap/matrix/62177883-f1bb-47f0-914d-32a22e3a8804,\
+ id=hostdev0"
+
+2. A vfio_ap mediated device can be hot plugged by attaching the qemu monitor
+ to the guest and using the following qemu monitor command:
+
+ (qemu) device_add "vfio-ap,sysfsdev=<path-to-mdev>,id=<device-id>"
-* Dynamically modifying the AP matrix for a running guest (which would amount to
- hot(un)plug of AP devices for the guest) is currently not supported
+ For example, to plug the vfio_ap mediated device
+ 62177883-f1bb-47f0-914d-32a22e3a8804 into the guest with the device-id
+ hostdev0:
-* Live guest migration is not supported for guests using AP devices.
+ (QEMU) device-add "vfio-ap,\
+ sysfsdev=/sys/devices/vfio_ap/matrix/62177883-f1bb-47f0-914d-32a22e3a8804,\
+ id=hostdev0"
diff --git a/Documentation/scsi/ufs.rst b/Documentation/scsi/ufs.rst
index fbac745b783c..885b1a736e3f 100644
--- a/Documentation/scsi/ufs.rst
+++ b/Documentation/scsi/ufs.rst
@@ -17,6 +17,8 @@ Universal Flash Storage
3.2 UTP Transfer requests
3.3 UFS error handling
3.4 SCSI Error handling
+ 4. BSG Support
+ 5. UFS Reference Clock Frequency configuration
1. Overview
@@ -193,3 +195,16 @@ UFS specifications can be found at:
- UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
- UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
+
+5. UFS Reference Clock Frequency configuration
+==============================================
+
+Devicetree can define a clock named "ref_clk" under the UFS controller node
+to specify the intended reference clock frequency for the UFS storage
+parts. ACPI-based system can specify the frequency using ACPI
+Device-Specific Data property named "ref-clk-freq". In both ways the value
+is interpreted as frequency in Hz and must match one of the values given in
+the UFS specification. UFS subsystem will attempt to read the value when
+executing common controller initialization. If the value is available, UFS
+subsytem will ensure the bRefClkFreq attribute of the UFS storage device is
+set accordingly and will modify it if there is a mismatch.
diff --git a/Documentation/sound/soc/codec.rst b/Documentation/sound/soc/codec.rst
index 57df149acafc..af973c4cac93 100644
--- a/Documentation/sound/soc/codec.rst
+++ b/Documentation/sound/soc/codec.rst
@@ -132,7 +132,7 @@ The codec driver also supports the following ALSA PCM operations:-
};
Please refer to the ALSA driver PCM documentation for details.
-http://www.alsa-project.org/~iwai/writing-an-alsa-driver/
+https://www.kernel.org/doc/html/latest/sound/kernel-api/writing-an-alsa-driver.html
DAPM description
diff --git a/Documentation/sound/soc/platform.rst b/Documentation/sound/soc/platform.rst
index c1badea53d3d..7036630eaf01 100644
--- a/Documentation/sound/soc/platform.rst
+++ b/Documentation/sound/soc/platform.rst
@@ -46,7 +46,7 @@ snd_soc_component_driver:-
};
Please refer to the ALSA driver documentation for details of audio DMA.
-http://www.alsa-project.org/~iwai/writing-an-alsa-driver/
+https://www.kernel.org/doc/html/latest/sound/kernel-api/writing-an-alsa-driver.html
An example DMA driver is soc/pxa/pxa2xx-pcm.c
diff --git a/Documentation/sphinx/kerneldoc-preamble.sty b/Documentation/sphinx/kerneldoc-preamble.sty
index 2a29cbe51396..9707e033c8c4 100644
--- a/Documentation/sphinx/kerneldoc-preamble.sty
+++ b/Documentation/sphinx/kerneldoc-preamble.sty
@@ -70,8 +70,16 @@
% Translations have Asian (CJK) characters which are only displayed if
% xeCJK is used
+\usepackage{ifthen}
+\newboolean{enablecjk}
+\setboolean{enablecjk}{false}
\IfFontExistsTF{Noto Sans CJK SC}{
- % Load xeCJK when CJK font is available
+ \IfFileExists{xeCJK.sty}{
+ \setboolean{enablecjk}{true}
+ }{}
+}{}
+\ifthenelse{\boolean{enablecjk}}{
+ % Load xeCJK when both the Noto Sans CJK font and xeCJK.sty are available.
\usepackage{xeCJK}
% Noto CJK fonts don't provide slant shape. [AutoFakeSlant] permits
% its emulation.
@@ -196,7 +204,7 @@
% Inactivate CJK after tableofcontents
\apptocmd{\sphinxtableofcontents}{\kerneldocCJKoff}{}{}
\xeCJKsetup{CJKspace = true}% For inter-phrase space of Korean TOC
-}{ % No CJK font found
+}{ % Don't enable CJK
% Custom macros to on/off CJK and switch CJK fonts (Dummy)
\newcommand{\kerneldocCJKon}{}
\newcommand{\kerneldocCJKoff}{}
@@ -204,14 +212,16 @@
%% and ignore the argument (#1) in their definitions, whole contents of
%% CJK chapters can be ignored.
\newcommand{\kerneldocBeginSC}[1]{%
- %% Put a note on missing CJK fonts in place of zh_CN translation.
- \begin{sphinxadmonition}{note}{Note on missing fonts:}
+ %% Put a note on missing CJK fonts or the xecjk package in place of
+ %% zh_CN translation.
+ \begin{sphinxadmonition}{note}{Note on missing fonts and a package:}
Translations of Simplified Chinese (zh\_CN), Traditional Chinese
(zh\_TW), Korean (ko\_KR), and Japanese (ja\_JP) were skipped
- due to the lack of suitable font families.
+ due to the lack of suitable font families and/or the texlive-xecjk
+ package.
If you want them, please install ``Noto Sans CJK'' font families
- by following instructions from
+ along with the texlive-xecjk package by following instructions from
\sphinxcode{./scripts/sphinx-pre-install}.
Having optional ``Noto Serif CJK'' font families will improve
the looks of those translations.
diff --git a/Documentation/tools/rtla/rtla-timerlat-hist.rst b/Documentation/tools/rtla/rtla-timerlat-hist.rst
index e12eae1f3301..6bf7f0ca4556 100644
--- a/Documentation/tools/rtla/rtla-timerlat-hist.rst
+++ b/Documentation/tools/rtla/rtla-timerlat-hist.rst
@@ -33,7 +33,7 @@ EXAMPLE
=======
In the example below, **rtla timerlat hist** is set to run for *10* minutes,
in the cpus *0-4*, *skipping zero* only lines. Moreover, **rtla timerlat
-hist** will change the priority of the *timelat* threads to run under
+hist** will change the priority of the *timerlat* threads to run under
*SCHED_DEADLINE* priority, with a *10us* runtime every *1ms* period. The
*1ms* period is also passed to the *timerlat* tracer::
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index f9b7bcb5a630..2d73e8697523 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -32,3 +32,4 @@ Linux Tracing Technologies
sys-t
coresight/index
user_events
+ rv/index
diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst
index b175d88f31eb..4274cc6a2f94 100644
--- a/Documentation/trace/kprobetrace.rst
+++ b/Documentation/trace/kprobetrace.rst
@@ -28,10 +28,10 @@ Synopsis of kprobe_events
-------------------------
::
- p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
- r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
- p:[GRP/]EVENT] [MOD:]SYM[+0]%return [FETCHARGS] : Set a return probe
- -:[GRP/]EVENT : Clear a probe
+ p[:[GRP/][EVENT]] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
+ r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
+ p[:[GRP/][EVENT]] [MOD:]SYM[+0]%return [FETCHARGS] : Set a return probe
+ -:[GRP/][EVENT] : Clear a probe
GRP : Group name. If omitted, use "kprobes" for it.
EVENT : Event name. If omitted, the event name is generated
diff --git a/Documentation/trace/rv/da_monitor_instrumentation.rst b/Documentation/trace/rv/da_monitor_instrumentation.rst
new file mode 100644
index 000000000000..6c67c7b57811
--- /dev/null
+++ b/Documentation/trace/rv/da_monitor_instrumentation.rst
@@ -0,0 +1,171 @@
+Deterministic Automata Instrumentation
+======================================
+
+The RV monitor file created by dot2k, with the name "$MODEL_NAME.c"
+includes a section dedicated to instrumentation.
+
+In the example of the wip.dot monitor created on [1], it will look like::
+
+ /*
+ * This is the instrumentation part of the monitor.
+ *
+ * This is the section where manual work is required. Here the kernel events
+ * are translated into model's event.
+ *
+ */
+ static void handle_preempt_disable(void *data, /* XXX: fill header */)
+ {
+ da_handle_event_wip(preempt_disable_wip);
+ }
+
+ static void handle_preempt_enable(void *data, /* XXX: fill header */)
+ {
+ da_handle_event_wip(preempt_enable_wip);
+ }
+
+ static void handle_sched_waking(void *data, /* XXX: fill header */)
+ {
+ da_handle_event_wip(sched_waking_wip);
+ }
+
+ static int enable_wip(void)
+ {
+ int retval;
+
+ retval = da_monitor_init_wip();
+ if (retval)
+ return retval;
+
+ rv_attach_trace_probe("wip", /* XXX: tracepoint */, handle_preempt_disable);
+ rv_attach_trace_probe("wip", /* XXX: tracepoint */, handle_preempt_enable);
+ rv_attach_trace_probe("wip", /* XXX: tracepoint */, handle_sched_waking);
+
+ return 0;
+ }
+
+The comment at the top of the section explains the general idea: the
+instrumentation section translates *kernel events* into the *model's
+event*.
+
+Tracing callback functions
+--------------------------
+
+The first three functions are the starting point of the callback *handler
+functions* for each of the three events from the wip model. The developer
+does not necessarily need to use them: they are just starting points.
+
+Using the example of::
+
+ void handle_preempt_disable(void *data, /* XXX: fill header */)
+ {
+ da_handle_event_wip(preempt_disable_wip);
+ }
+
+The preempt_disable event from the model connects directly to the
+preemptirq:preempt_disable. The preemptirq:preempt_disable event
+has the following signature, from include/trace/events/preemptirq.h::
+
+ TP_PROTO(unsigned long ip, unsigned long parent_ip)
+
+Hence, the handle_preempt_disable() function will look like::
+
+ void handle_preempt_disable(void *data, unsigned long ip, unsigned long parent_ip)
+
+In this case, the kernel event translates one to one with the automata
+event, and indeed, no other change is required for this function.
+
+The next handler function, handle_preempt_enable() has the same argument
+list from the handle_preempt_disable(). The difference is that the
+preempt_enable event will be used to synchronize the system to the model.
+
+Initially, the *model* is placed in the initial state. However, the *system*
+might or might not be in the initial state. The monitor cannot start
+processing events until it knows that the system has reached the initial state.
+Otherwise, the monitor and the system could be out-of-sync.
+
+Looking at the automata definition, it is possible to see that the system
+and the model are expected to return to the initial state after the
+preempt_enable execution. Hence, it can be used to synchronize the
+system and the model at the initialization of the monitoring section.
+
+The start is informed via a special handle function, the
+"da_handle_start_event_$(MONITOR_NAME)(event)", in this case::
+
+ da_handle_start_event_wip(preempt_enable_wip);
+
+So, the callback function will look like::
+
+ void handle_preempt_enable(void *data, unsigned long ip, unsigned long parent_ip)
+ {
+ da_handle_start_event_wip(preempt_enable_wip);
+ }
+
+Finally, the "handle_sched_waking()" will look like::
+
+ void handle_sched_waking(void *data, struct task_struct *task)
+ {
+ da_handle_event_wip(sched_waking_wip);
+ }
+
+And the explanation is left for the reader as an exercise.
+
+enable and disable functions
+----------------------------
+
+dot2k automatically creates two special functions::
+
+ enable_$(MONITOR_NAME)()
+ disable_$(MONITOR_NAME)()
+
+These functions are called when the monitor is enabled and disabled,
+respectively.
+
+They should be used to *attach* and *detach* the instrumentation to the running
+system. The developer must add to the relative function all that is needed to
+*attach* and *detach* its monitor to the system.
+
+For the wip case, these functions were named::
+
+ enable_wip()
+ disable_wip()
+
+But no change was required because: by default, these functions *attach* and
+*detach* the tracepoints_to_attach, which was enough for this case.
+
+Instrumentation helpers
+-----------------------
+
+To complete the instrumentation, the *handler functions* need to be attached to a
+kernel event, at the monitoring enable phase.
+
+The RV interface also facilitates this step. For example, the macro "rv_attach_trace_probe()"
+is used to connect the wip model events to the relative kernel event. dot2k automatically
+adds "rv_attach_trace_probe()" function call for each model event in the enable phase, as
+a suggestion.
+
+For example, from the wip sample model::
+
+ static int enable_wip(void)
+ {
+ int retval;
+
+ retval = da_monitor_init_wip();
+ if (retval)
+ return retval;
+
+ rv_attach_trace_probe("wip", /* XXX: tracepoint */, handle_preempt_enable);
+ rv_attach_trace_probe("wip", /* XXX: tracepoint */, handle_sched_waking);
+ rv_attach_trace_probe("wip", /* XXX: tracepoint */, handle_preempt_disable);
+
+ return 0;
+ }
+
+The probes then need to be detached at the disable phase.
+
+[1] The wip model is presented in::
+
+ Documentation/trace/rv/deterministic_automata.rst
+
+The wip monitor is presented in::
+
+ Documentation/trace/rv/da_monitor_synthesis.rst
diff --git a/Documentation/trace/rv/da_monitor_synthesis.rst b/Documentation/trace/rv/da_monitor_synthesis.rst
new file mode 100644
index 000000000000..0dbdcd1e62b9
--- /dev/null
+++ b/Documentation/trace/rv/da_monitor_synthesis.rst
@@ -0,0 +1,147 @@
+Deterministic Automata Monitor Synthesis
+========================================
+
+The starting point for the application of runtime verification (RV) technics
+is the *specification* or *modeling* of the desired (or undesired) behavior
+of the system under scrutiny.
+
+The formal representation needs to be then *synthesized* into a *monitor*
+that can then be used in the analysis of the trace of the system. The
+*monitor* connects to the system via an *instrumentation* that converts
+the events from the *system* to the events of the *specification*.
+
+
+In Linux terms, the runtime verification monitors are encapsulated inside
+the *RV monitor* abstraction. The RV monitor includes a set of instances
+of the monitor (per-cpu monitor, per-task monitor, and so on), the helper
+functions that glue the monitor to the system reference model, and the
+trace output as a reaction to event parsing and exceptions, as depicted
+below::
+
+ Linux +----- RV Monitor ----------------------------------+ Formal
+ Realm | | Realm
+ +-------------------+ +----------------+ +-----------------+
+ | Linux kernel | | Monitor | | Reference |
+ | Tracing | -> | Instance(s) | <- | Model |
+ | (instrumentation) | | (verification) | | (specification) |
+ +-------------------+ +----------------+ +-----------------+
+ | | |
+ | V |
+ | +----------+ |
+ | | Reaction | |
+ | +--+--+--+-+ |
+ | | | | |
+ | | | +-> trace output ? |
+ +------------------------|--|----------------------+
+ | +----> panic ?
+ +-------> <user-specified>
+
+DA monitor synthesis
+--------------------
+
+The synthesis of automata-based models into the Linux *RV monitor* abstraction
+is automated by the dot2k tool and the rv/da_monitor.h header file that
+contains a set of macros that automatically generate the monitor's code.
+
+dot2k
+-----
+
+The dot2k utility leverages dot2c by converting an automaton model in
+the DOT format into the C representation [1] and creating the skeleton of
+a kernel monitor in C.
+
+For example, it is possible to transform the wip.dot model present in
+[1] into a per-cpu monitor with the following command::
+
+ $ dot2k -d wip.dot -t per_cpu
+
+This will create a directory named wip/ with the following files:
+
+- wip.h: the wip model in C
+- wip.c: the RV monitor
+
+The wip.c file contains the monitor declaration and the starting point for
+the system instrumentation.
+
+Monitor macros
+--------------
+
+The rv/da_monitor.h enables automatic code generation for the *Monitor
+Instance(s)* using C macros.
+
+The benefits of the usage of macro for monitor synthesis are 3-fold as it:
+
+- Reduces the code duplication;
+- Facilitates the bug fix/improvement;
+- Avoids the case of developers changing the core of the monitor code
+ to manipulate the model in a (let's say) non-standard way.
+
+This initial implementation presents three different types of monitor instances:
+
+- ``#define DECLARE_DA_MON_GLOBAL(name, type)``
+- ``#define DECLARE_DA_MON_PER_CPU(name, type)``
+- ``#define DECLARE_DA_MON_PER_TASK(name, type)``
+
+The first declares the functions for a global deterministic automata monitor,
+the second for monitors with per-cpu instances, and the third with per-task
+instances.
+
+In all cases, the 'name' argument is a string that identifies the monitor, and
+the 'type' argument is the data type used by dot2k on the representation of
+the model in C.
+
+For example, the wip model with two states and three events can be
+stored in an 'unsigned char' type. Considering that the preemption control
+is a per-cpu behavior, the monitor declaration in the 'wip.c' file is::
+
+ DECLARE_DA_MON_PER_CPU(wip, unsigned char);
+
+The monitor is executed by sending events to be processed via the functions
+presented below::
+
+ da_handle_event_$(MONITOR_NAME)($(event from event enum));
+ da_handle_start_event_$(MONITOR_NAME)($(event from event enum));
+ da_handle_start_run_event_$(MONITOR_NAME)($(event from event enum));
+
+The function ``da_handle_event_$(MONITOR_NAME)()`` is the regular case where
+the event will be processed if the monitor is processing events.
+
+When a monitor is enabled, it is placed in the initial state of the automata.
+However, the monitor does not know if the system is in the *initial state*.
+
+The ``da_handle_start_event_$(MONITOR_NAME)()`` function is used to notify the
+monitor that the system is returning to the initial state, so the monitor can
+start monitoring the next event.
+
+The ``da_handle_start_run_event_$(MONITOR_NAME)()`` function is used to notify
+the monitor that the system is known to be in the initial state, so the
+monitor can start monitoring and monitor the current event.
+
+Using the wip model as example, the events "preempt_disable" and
+"sched_waking" should be sent to monitor, respectively, via [2]::
+
+ da_handle_event_wip(preempt_disable_wip);
+ da_handle_event_wip(sched_waking_wip);
+
+While the event "preempt_enabled" will use::
+
+ da_handle_start_event_wip(preempt_enable_wip);
+
+To notify the monitor that the system will be returning to the initial state,
+so the system and the monitor should be in sync.
+
+Final remarks
+-------------
+
+With the monitor synthesis in place using the rv/da_monitor.h and
+dot2k, the developer's work should be limited to the instrumentation
+of the system, increasing the confidence in the overall approach.
+
+[1] For details about deterministic automata format and the translation
+from one representation to another, see::
+
+ Documentation/trace/rv/deterministic_automata.rst
+
+[2] dot2k appends the monitor's name suffix to the events enums to
+avoid conflicting variables when exporting the global vmlinux.h
+use by BPF programs.
diff --git a/Documentation/trace/rv/deterministic_automata.rst b/Documentation/trace/rv/deterministic_automata.rst
new file mode 100644
index 000000000000..d0638f95a455
--- /dev/null
+++ b/Documentation/trace/rv/deterministic_automata.rst
@@ -0,0 +1,184 @@
+Deterministic Automata
+======================
+
+Formally, a deterministic automaton, denoted by G, is defined as a quintuple:
+
+ *G* = { *X*, *E*, *f*, x\ :subscript:`0`, X\ :subscript:`m` }
+
+where:
+
+- *X* is the set of states;
+- *E* is the finite set of events;
+- x\ :subscript:`0` is the initial state;
+- X\ :subscript:`m` (subset of *X*) is the set of marked (or final) states.
+- *f* : *X* x *E* -> *X* $ is the transition function. It defines the state
+ transition in the occurrence of an event from *E* in the state *X*. In the
+ special case of deterministic automata, the occurrence of the event in *E*
+ in a state in *X* has a deterministic next state from *X*.
+
+For example, a given automaton named 'wip' (wakeup in preemptive) can
+be defined as:
+
+- *X* = { ``preemptive``, ``non_preemptive``}
+- *E* = { ``preempt_enable``, ``preempt_disable``, ``sched_waking``}
+- x\ :subscript:`0` = ``preemptive``
+- X\ :subscript:`m` = {``preemptive``}
+- *f* =
+ - *f*\ (``preemptive``, ``preempt_disable``) = ``non_preemptive``
+ - *f*\ (``non_preemptive``, ``sched_waking``) = ``non_preemptive``
+ - *f*\ (``non_preemptive``, ``preempt_enable``) = ``preemptive``
+
+One of the benefits of this formal definition is that it can be presented
+in multiple formats. For example, using a *graphical representation*, using
+vertices (nodes) and edges, which is very intuitive for *operating system*
+practitioners, without any loss.
+
+The previous 'wip' automaton can also be represented as::
+
+ preempt_enable
+ +---------------------------------+
+ v |
+ #============# preempt_disable +------------------+
+ --> H preemptive H -----------------> | non_preemptive |
+ #============# +------------------+
+ ^ |
+ | sched_waking |
+ +--------------+
+
+Deterministic Automaton in C
+----------------------------
+
+In the paper "Efficient formal verification for the Linux kernel",
+the authors present a simple way to represent an automaton in C that can
+be used as regular code in the Linux kernel.
+
+For example, the 'wip' automata can be presented as (augmented with comments)::
+
+ /* enum representation of X (set of states) to be used as index */
+ enum states {
+ preemptive = 0,
+ non_preemptive,
+ state_max
+ };
+
+ #define INVALID_STATE state_max
+
+ /* enum representation of E (set of events) to be used as index */
+ enum events {
+ preempt_disable = 0,
+ preempt_enable,
+ sched_waking,
+ event_max
+ };
+
+ struct automaton {
+ char *state_names[state_max]; // X: the set of states
+ char *event_names[event_max]; // E: the finite set of events
+ unsigned char function[state_max][event_max]; // f: transition function
+ unsigned char initial_state; // x_0: the initial state
+ bool final_states[state_max]; // X_m: the set of marked states
+ };
+
+ struct automaton aut = {
+ .state_names = {
+ "preemptive",
+ "non_preemptive"
+ },
+ .event_names = {
+ "preempt_disable",
+ "preempt_enable",
+ "sched_waking"
+ },
+ .function = {
+ { non_preemptive, INVALID_STATE, INVALID_STATE },
+ { INVALID_STATE, preemptive, non_preemptive },
+ },
+ .initial_state = preemptive,
+ .final_states = { 1, 0 },
+ };
+
+The *transition function* is represented as a matrix of states (lines) and
+events (columns), and so the function *f* : *X* x *E* -> *X* can be solved
+in O(1). For example::
+
+ next_state = automaton_wip.function[curr_state][event];
+
+Graphviz .dot format
+--------------------
+
+The Graphviz open-source tool can produce the graphical representation
+of an automaton using the (textual) DOT language as the source code.
+The DOT format is widely used and can be converted to many other formats.
+
+For example, this is the 'wip' model in DOT::
+
+ digraph state_automaton {
+ {node [shape = circle] "non_preemptive"};
+ {node [shape = plaintext, style=invis, label=""] "__init_preemptive"};
+ {node [shape = doublecircle] "preemptive"};
+ {node [shape = circle] "preemptive"};
+ "__init_preemptive" -> "preemptive";
+ "non_preemptive" [label = "non_preemptive"];
+ "non_preemptive" -> "non_preemptive" [ label = "sched_waking" ];
+ "non_preemptive" -> "preemptive" [ label = "preempt_enable" ];
+ "preemptive" [label = "preemptive"];
+ "preemptive" -> "non_preemptive" [ label = "preempt_disable" ];
+ { rank = min ;
+ "__init_preemptive";
+ "preemptive";
+ }
+ }
+
+This DOT format can be transformed into a bitmap or vectorial image
+using the dot utility, or into an ASCII art using graph-easy. For
+instance::
+
+ $ dot -Tsvg -o wip.svg wip.dot
+ $ graph-easy wip.dot > wip.txt
+
+dot2c
+-----
+
+dot2c is a utility that can parse a .dot file containing an automaton as
+in the example above and automatically convert it to the C representation
+presented in [3].
+
+For example, having the previous 'wip' model into a file named 'wip.dot',
+the following command will transform the .dot file into the C
+representation (previously shown) in the 'wip.h' file::
+
+ $ dot2c wip.dot > wip.h
+
+The 'wip.h' content is the code sample in section 'Deterministic Automaton
+in C'.
+
+Remarks
+-------
+
+The automata formalism allows modeling discrete event systems (DES) in
+multiple formats, suitable for different applications/users.
+
+For example, the formal description using set theory is better suitable
+for automata operations, while the graphical format for human interpretation;
+and computer languages for machine execution.
+
+References
+----------
+
+Many textbooks cover automata formalism. For a brief introduction see::
+
+ O'Regan, Gerard. Concise guide to software engineering. Springer,
+ Cham, 2017.
+
+For a detailed description, including operations, and application on Discrete
+Event Systems (DES), see::
+
+ Cassandras, Christos G., and Stephane Lafortune, eds. Introduction to discrete
+ event systems. Boston, MA: Springer US, 2008.
+
+For the C representation in kernel, see::
+
+ De Oliveira, Daniel Bristot; Cucinotta, Tommaso; De Oliveira, Romulo
+ Silva. Efficient formal verification for the Linux kernel. In:
+ International Conference on Software Engineering and Formal Methods.
+ Springer, Cham, 2019. p. 315-332.
diff --git a/Documentation/trace/rv/index.rst b/Documentation/trace/rv/index.rst
new file mode 100644
index 000000000000..15fa966102c0
--- /dev/null
+++ b/Documentation/trace/rv/index.rst
@@ -0,0 +1,14 @@
+====================
+Runtime Verification
+====================
+
+.. toctree::
+ :maxdepth: 2
+ :glob:
+
+ runtime-verification.rst
+ deterministic_automata.rst
+ da_monitor_synthesis.rst
+ da_monitor_instrumentation.rst
+ monitor_wip.rst
+ monitor_wwnr.rst
diff --git a/Documentation/trace/rv/monitor_wip.rst b/Documentation/trace/rv/monitor_wip.rst
new file mode 100644
index 000000000000..a95763438c48
--- /dev/null
+++ b/Documentation/trace/rv/monitor_wip.rst
@@ -0,0 +1,55 @@
+Monitor wip
+===========
+
+- Name: wip - wakeup in preemptive
+- Type: per-cpu deterministic automaton
+- Author: Daniel Bristot de Oliveira <bristot@kernel.org>
+
+Description
+-----------
+
+The wakeup in preemptive (wip) monitor is a sample per-cpu monitor
+that verifies if the wakeup events always take place with
+preemption disabled::
+
+ |
+ |
+ v
+ #==================#
+ H preemptive H <+
+ #==================# |
+ | |
+ | preempt_disable | preempt_enable
+ v |
+ sched_waking +------------------+ |
+ +--------------- | | |
+ | | non_preemptive | |
+ +--------------> | | -+
+ +------------------+
+
+The wakeup event always takes place with preemption disabled because
+of the scheduler synchronization. However, because the preempt_count
+and its trace event are not atomic with regard to interrupts, some
+inconsistencies might happen. For example::
+
+ preempt_disable() {
+ __preempt_count_add(1)
+ -------> smp_apic_timer_interrupt() {
+ preempt_disable()
+ do not trace (preempt count >= 1)
+
+ wake up a thread
+
+ preempt_enable()
+ do not trace (preempt count >= 1)
+ }
+ <------
+ trace_preempt_disable();
+ }
+
+This problem was reported and discussed here:
+ https://lore.kernel.org/r/cover.1559051152.git.bristot@redhat.com/
+
+Specification
+-------------
+Grapviz Dot file in tools/verification/models/wip.dot
diff --git a/Documentation/trace/rv/monitor_wwnr.rst b/Documentation/trace/rv/monitor_wwnr.rst
new file mode 100644
index 000000000000..80f1777b85aa
--- /dev/null
+++ b/Documentation/trace/rv/monitor_wwnr.rst
@@ -0,0 +1,45 @@
+Monitor wwnr
+============
+
+- Name: wwrn - wakeup while not running
+- Type: per-task deterministic automaton
+- Author: Daniel Bristot de Oliveira <bristot@kernel.org>
+
+Description
+-----------
+
+This is a per-task sample monitor, with the following
+definition::
+
+ |
+ |
+ v
+ wakeup +-------------+
+ +--------- | |
+ | | not_running |
+ +--------> | | <+
+ +-------------+ |
+ | |
+ | switch_in | switch_out
+ v |
+ +-------------+ |
+ | running | -+
+ +-------------+
+
+This model is borken, the reason is that a task can be running
+in the processor without being set as RUNNABLE. Think about a
+task about to sleep::
+
+ 1: set_current_state(TASK_UNINTERRUPTIBLE);
+ 2: schedule();
+
+And then imagine an IRQ happening in between the lines one and two,
+waking the task up. BOOM, the wakeup will happen while the task is
+running.
+
+- Why do we need this model, so?
+- To test the reactors.
+
+Specification
+-------------
+Grapviz Dot file in tools/verification/models/wwnr.dot
diff --git a/Documentation/trace/rv/runtime-verification.rst b/Documentation/trace/rv/runtime-verification.rst
new file mode 100644
index 000000000000..c46b6149470e
--- /dev/null
+++ b/Documentation/trace/rv/runtime-verification.rst
@@ -0,0 +1,231 @@
+====================
+Runtime Verification
+====================
+
+Runtime Verification (RV) is a lightweight (yet rigorous) method that
+complements classical exhaustive verification techniques (such as *model
+checking* and *theorem proving*) with a more practical approach for complex
+systems.
+
+Instead of relying on a fine-grained model of a system (e.g., a
+re-implementation a instruction level), RV works by analyzing the trace of the
+system's actual execution, comparing it against a formal specification of
+the system behavior.
+
+The main advantage is that RV can give precise information on the runtime
+behavior of the monitored system, without the pitfalls of developing models
+that require a re-implementation of the entire system in a modeling language.
+Moreover, given an efficient monitoring method, it is possible execute an
+*online* verification of a system, enabling the *reaction* for unexpected
+events, avoiding, for example, the propagation of a failure on safety-critical
+systems.
+
+Runtime Monitors and Reactors
+=============================
+
+A monitor is the central part of the runtime verification of a system. The
+monitor stands in between the formal specification of the desired (or
+undesired) behavior, and the trace of the actual system.
+
+In Linux terms, the runtime verification monitors are encapsulated inside the
+*RV monitor* abstraction. A *RV monitor* includes a reference model of the
+system, a set of instances of the monitor (per-cpu monitor, per-task monitor,
+and so on), and the helper functions that glue the monitor to the system via
+trace, as depicted bellow::
+
+ Linux +---- RV Monitor ----------------------------------+ Formal
+ Realm | | Realm
+ +-------------------+ +----------------+ +-----------------+
+ | Linux kernel | | Monitor | | Reference |
+ | Tracing | -> | Instance(s) | <- | Model |
+ | (instrumentation) | | (verification) | | (specification) |
+ +-------------------+ +----------------+ +-----------------+
+ | | |
+ | V |
+ | +----------+ |
+ | | Reaction | |
+ | +--+--+--+-+ |
+ | | | | |
+ | | | +-> trace output ? |
+ +------------------------|--|----------------------+
+ | +----> panic ?
+ +-------> <user-specified>
+
+In addition to the verification and monitoring of the system, a monitor can
+react to an unexpected event. The forms of reaction can vary from logging the
+event occurrence to the enforcement of the correct behavior to the extreme
+action of taking a system down to avoid the propagation of a failure.
+
+In Linux terms, a *reactor* is an reaction method available for *RV monitors*.
+By default, all monitors should provide a trace output of their actions,
+which is already a reaction. In addition, other reactions will be available
+so the user can enable them as needed.
+
+For further information about the principles of runtime verification and
+RV applied to Linux:
+
+ Bartocci, Ezio, et al. *Introduction to runtime verification.* In: Lectures on
+ Runtime Verification. Springer, Cham, 2018. p. 1-33.
+
+ Falcone, Ylies, et al. *A taxonomy for classifying runtime verification tools.*
+ In: International Conference on Runtime Verification. Springer, Cham, 2018. p.
+ 241-262.
+
+ De Oliveira, Daniel Bristot. *Automata-based formal analysis and
+ verification of the real-time Linux kernel.* Ph.D. Thesis, 2020.
+
+Online RV monitors
+==================
+
+Monitors can be classified as *offline* and *online* monitors. *Offline*
+monitor process the traces generated by a system after the events, generally by
+reading the trace execution from a permanent storage system. *Online* monitors
+process the trace during the execution of the system. Online monitors are said
+to be *synchronous* if the processing of an event is attached to the system
+execution, blocking the system during the event monitoring. On the other hand,
+an *asynchronous* monitor has its execution detached from the system. Each type
+of monitor has a set of advantages. For example, *offline* monitors can be
+executed on different machines but require operations to save the log to a
+file. In contrast, *synchronous online* method can react at the exact moment
+a violation occurs.
+
+Another important aspect regarding monitors is the overhead associated with the
+event analysis. If the system generates events at a frequency higher than the
+monitor's ability to process them in the same system, only the *offline*
+methods are viable. On the other hand, if the tracing of the events incurs
+on higher overhead than the simple handling of an event by a monitor, then a
+*synchronous online* monitors will incur on lower overhead.
+
+Indeed, the research presented in:
+
+ De Oliveira, Daniel Bristot; Cucinotta, Tommaso; De Oliveira, Romulo Silva.
+ *Efficient formal verification for the Linux kernel.* In: International
+ Conference on Software Engineering and Formal Methods. Springer, Cham, 2019.
+ p. 315-332.
+
+Shows that for Deterministic Automata models, the synchronous processing of
+events in-kernel causes lower overhead than saving the same events to the trace
+buffer, not even considering collecting the trace for user-space analysis.
+This motivated the development of an in-kernel interface for online monitors.
+
+For further information about modeling of Linux kernel behavior using automata,
+see:
+
+ De Oliveira, Daniel B.; De Oliveira, Romulo S.; Cucinotta, Tommaso. *A thread
+ synchronization model for the PREEMPT_RT Linux kernel.* Journal of Systems
+ Architecture, 2020, 107: 101729.
+
+The user interface
+==================
+
+The user interface resembles the tracing interface (on purpose). It is
+currently at "/sys/kernel/tracing/rv/".
+
+The following files/folders are currently available:
+
+**available_monitors**
+
+- Reading list the available monitors, one per line
+
+For example::
+
+ # cat available_monitors
+ wip
+ wwnr
+
+**available_reactors**
+
+- Reading shows the available reactors, one per line.
+
+For example::
+
+ # cat available_reactors
+ nop
+ panic
+ printk
+
+**enabled_monitors**:
+
+- Reading lists the enabled monitors, one per line
+- Writing to it enables a given monitor
+- Writing a monitor name with a '!' prefix disables it
+- Truncating the file disables all enabled monitors
+
+For example::
+
+ # cat enabled_monitors
+ # echo wip > enabled_monitors
+ # echo wwnr >> enabled_monitors
+ # cat enabled_monitors
+ wip
+ wwnr
+ # echo '!wip' >> enabled_monitors
+ # cat enabled_monitors
+ wwnr
+ # echo > enabled_monitors
+ # cat enabled_monitors
+ #
+
+Note that it is possible to enable more than one monitor concurrently.
+
+**monitoring_on**
+
+This is an on/off general switcher for monitoring. It resembles the
+"tracing_on" switcher in the trace interface.
+
+- Writing "0" stops the monitoring
+- Writing "1" continues the monitoring
+- Reading returns the current status of the monitoring
+
+Note that it does not disable enabled monitors but stop the per-entity
+monitors monitoring the events received from the system.
+
+**reacting_on**
+
+- Writing "0" prevents reactions for happening
+- Writing "1" enable reactions
+- Reading returns the current status of the reaction
+
+**monitors/**
+
+Each monitor will have its own directory inside "monitors/". There the
+monitor-specific files will be presented. The "monitors/" directory resembles
+the "events" directory on tracefs.
+
+For example::
+
+ # cd monitors/wip/
+ # ls
+ desc enable
+ # cat desc
+ wakeup in preemptive per-cpu testing monitor.
+ # cat enable
+ 0
+
+**monitors/MONITOR/desc**
+
+- Reading shows a description of the monitor *MONITOR*
+
+**monitors/MONITOR/enable**
+
+- Writing "0" disables the *MONITOR*
+- Writing "1" enables the *MONITOR*
+- Reading return the current status of the *MONITOR*
+
+**monitors/MONITOR/reactors**
+
+- List available reactors, with the select reaction for the given *MONITOR*
+ inside "[]". The default one is the nop (no operation) reactor.
+- Writing the name of a reactor enables it to the given MONITOR.
+
+For example::
+
+ # cat monitors/wip/reactors
+ [nop]
+ panic
+ printk
+ # echo panic > monitors/wip/reactors
+ # cat monitors/wip/reactors
+ nop
+ [panic]
+ printk
diff --git a/Documentation/trace/uprobetracer.rst b/Documentation/trace/uprobetracer.rst
index a8e5938f609e..3a1797d707f4 100644
--- a/Documentation/trace/uprobetracer.rst
+++ b/Documentation/trace/uprobetracer.rst
@@ -26,10 +26,10 @@ Synopsis of uprobe_tracer
-------------------------
::
- p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a uprobe
- r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return uprobe (uretprobe)
- p[:[GRP/]EVENT] PATH:OFFSET%return [FETCHARGS] : Set a return uprobe (uretprobe)
- -:[GRP/]EVENT : Clear uprobe or uretprobe event
+ p[:[GRP/][EVENT]] PATH:OFFSET [FETCHARGS] : Set a uprobe
+ r[:[GRP/][EVENT]] PATH:OFFSET [FETCHARGS] : Set a return uprobe (uretprobe)
+ p[:[GRP/][EVENT]] PATH:OFFSET%return [FETCHARGS] : Set a return uprobe (uretprobe)
+ -:[GRP/][EVENT] : Clear uprobe or uretprobe event
GRP : Group name. If omitted, "uprobes" is the default value.
EVENT : Event name. If omitted, the event name is generated based
diff --git a/Documentation/translations/ja_JP/SubmittingPatches b/Documentation/translations/ja_JP/SubmittingPatches
index 66ce0d8b0526..04deb77b20c6 100644
--- a/Documentation/translations/ja_JP/SubmittingPatches
+++ b/Documentation/translations/ja_JP/SubmittingPatches
@@ -35,8 +35,7 @@ Linux カーãƒãƒ«ã«å¤‰æ›´ã‚’加ãˆãŸã„ã¨æ€ã£ã¦ã„る個人åˆã¯ä¼šç¤¾ã
ã¦ã‚‚らãˆã‚„ã™ãã™ã‚‹æ案を集ã‚ãŸã‚‚ã®ã§ã™ã€‚
コードを投稿ã™ã‚‹å‰ã«ã€Documentation/process/submit-checklist.rst ã®é …目リストã«ç›®
-を通ã—ã¦ãƒã‚§ãƒƒã‚¯ã—ã¦ãã ã•ã„。もã—ã‚ãªãŸãŒãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã‚’投稿ã—よã†ã¨ã—
-ã¦ã„ã‚‹ãªã‚‰ã€Documentation/process/submitting-drivers.rst ã«ã‚‚目を通ã—ã¦ãã ã•ã„。
+を通ã—ã¦ãƒã‚§ãƒƒã‚¯ã—ã¦ãã ã•ã„。
--------------------------------------------
セクション1 パッãƒã®ä½œã‚Šæ–¹ã¨é€ã‚Šæ–¹
diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/index.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/index.rst
index 0c8276109fc0..30c69e1f44fe 100644
--- a/Documentation/translations/zh_CN/admin-guide/mm/damon/index.rst
+++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/index.rst
@@ -13,7 +13,7 @@
监测数æ®è®¿é—®
============
-:doc:`DAMON </vm/damon/index>` å…许轻é‡çº§çš„æ•°æ®è®¿é—®ç›‘测。使用DAMON,
+:doc:`DAMON </mm/damon/index>` å…许轻é‡çº§çš„æ•°æ®è®¿é—®ç›‘测。使用DAMON,
用户å¯ä»¥åˆ†æžä»–们系统的内存访问模å¼ï¼Œå¹¶ä¼˜åŒ–它们。
.. toctree::
diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/reclaim.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/reclaim.rst
index 1500bdbf338a..c976f3e33ffd 100644
--- a/Documentation/translations/zh_CN/admin-guide/mm/damon/reclaim.rst
+++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/reclaim.rst
@@ -229,4 +229,4 @@ DAMON_RECLAIMå†æ¬¡ä»€ä¹ˆéƒ½ä¸åšï¼Œè¿™æ ·æˆ‘们就å¯ä»¥é€€å›žåˆ°åŸºäºŽLRU列è
.. [1] https://research.google/pubs/pub48551/
.. [2] https://lwn.net/Articles/787611/
-.. [3] https://www.kernel.org/doc/html/latest/vm/free_page_reporting.html
+.. [3] https://www.kernel.org/doc/html/latest/mm/free_page_reporting.html
diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
index 2c7d9106e399..aeae2ab65dd8 100644
--- a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
+++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
@@ -33,9 +33,9 @@ DAMON 为ä¸åŒçš„用户æ供了下é¢è¿™äº›æŽ¥å£ã€‚
å£ç›¸åŒã€‚这将在下一个LTS内核å‘布åŽè¢«ç§»é™¤ï¼Œæ‰€ä»¥ç”¨æˆ·åº”该转移到
:ref:`sysfs interface <sysfs_interface>`。
- *内核空间编程接å£ã€‚*
- :doc:`è¿™ </vm/damon/api>` 这是为内核空间程åºå‘˜å‡†å¤‡çš„。使用它,用户å¯ä»¥é€šè¿‡ä¸ºä½ ç¼–写内
+ :doc:`è¿™ </mm/damon/api>` 这是为内核空间程åºå‘˜å‡†å¤‡çš„。使用它,用户å¯ä»¥é€šè¿‡ä¸ºä½ ç¼–写内
核空间的DAMON应用程åºï¼Œæœ€çµæ´»æœ‰æ•ˆåœ°åˆ©ç”¨DAMONçš„æ¯ä¸€ä¸ªåŠŸèƒ½ã€‚你甚至å¯ä»¥ä¸ºå„ç§åœ°å€ç©ºé—´æ‰©å±•DAMON。
- 详细情况请å‚è€ƒæŽ¥å£ :doc:`文件 </vm/damon/api>`。
+ 详细情况请å‚è€ƒæŽ¥å£ :doc:`文件 </mm/damon/api>`。
sysfs接å£
=========
@@ -148,7 +148,7 @@ contexts/<N>/monitoring_attrs/
在 ``nr_regions`` 目录下,有两个文件分别用于DAMON监测区域的下é™å’Œä¸Šé™ï¼ˆ``min`` å’Œ ``max`` ),
这两个文件控制ç€ç›‘测的开销。你å¯ä»¥é€šè¿‡å‘这些文件的写入和读出æ¥è®¾ç½®å’ŒèŽ·å–这些值。
-关于间隔和监测区域范围的更多细节,请å‚考设计文件 (:doc:`/vm/damon/design`)。
+关于间隔和监测区域范围的更多细节,请å‚考设计文件 (:doc:`/mm/damon/design`)。
contexts/<N>/targets/
---------------------
@@ -320,7 +320,7 @@ DAMON导出了八个文件, ``attrs``, ``target_ids``, ``init_regions``,
----
用户å¯ä»¥é€šè¿‡è¯»å–和写入 ``attrs`` 文件获得和设置 ``采样间隔`` 〠``èšé›†é—´éš”`` 〠``æ›´æ–°é—´éš”``
-以åŠç›‘测目标区域的最å°/最大数é‡ã€‚è¦è¯¦ç»†äº†è§£ç›‘测属性,请å‚考 `:doc:/vm/damon/design` 。例如,
+以åŠç›‘测目标区域的最å°/最大数é‡ã€‚è¦è¯¦ç»†äº†è§£ç›‘测属性,请å‚考 `:doc:/mm/damon/design` 。例如,
下é¢çš„命令将这些值设置为5msã€100msã€1000msã€10å’Œ1000,然åŽå†æ¬¡æ£€æŸ¥::
# cd <debugfs>/damon
diff --git a/Documentation/translations/zh_CN/core-api/index.rst b/Documentation/translations/zh_CN/core-api/index.rst
index 7ca44629860c..8a94ad87465d 100644
--- a/Documentation/translations/zh_CN/core-api/index.rst
+++ b/Documentation/translations/zh_CN/core-api/index.rst
@@ -53,7 +53,6 @@ Todolist:
circular-buffers
generic-radix-tree
packing
- bus-virt-phys-mapping
this_cpu_ops
timekeeping
errseq
@@ -102,7 +101,7 @@ Todolist:
========
如何在内核中分é…和使用内存。请注æ„,在
-:doc:`/vm/index` 中有更多的内存管ç†æ–‡æ¡£ã€‚
+:doc:`/mm/index` 中有更多的内存管ç†æ–‡æ¡£ã€‚
.. toctree::
:maxdepth: 1
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index ad7bb8c17562..bf85baca8b3e 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -118,7 +118,7 @@ TODOList:
sound/index
filesystems/index
scheduler/index
- vm/index
+ mm/index
peci/index
TODOList:
diff --git a/Documentation/translations/zh_CN/loongarch/introduction.rst b/Documentation/translations/zh_CN/loongarch/introduction.rst
index 11686ee0caeb..128878f5bb70 100644
--- a/Documentation/translations/zh_CN/loongarch/introduction.rst
+++ b/Documentation/translations/zh_CN/loongarch/introduction.rst
@@ -190,8 +190,8 @@ I26 Opcode + I26L + I26H
=========== ==========================
Opcode是指令æ“作ç ï¼ŒRjå’ŒRk是æºæ“作数(寄存器),Rd是目标æ“作数(寄存器),Ra是
-4R-typeæ ¼å¼ç‰¹æœ‰çš„附加æ“作数(寄存器)。I8/I12/I16/I21/I26分别是8ä½/12ä½/16ä½/
-21ä½/26ä½çš„ç«‹å³æ•°ã€‚其中较长的21ä½å’Œ26ä½ç«‹å³æ•°åœ¨æŒ‡ä»¤å­—中被分割为高ä½éƒ¨åˆ†ä¸Žä½Žä½
+4R-typeæ ¼å¼ç‰¹æœ‰çš„附加æ“作数(寄存器)。I8/I12/I14/I16/I21/I26分别是8ä½/12ä½/14ä½/
+16ä½/21ä½/26ä½çš„ç«‹å³æ•°ã€‚其中较长的21ä½å’Œ26ä½ç«‹å³æ•°åœ¨æŒ‡ä»¤å­—中被分割为高ä½éƒ¨åˆ†ä¸Žä½Žä½
部分,所以你们在这里的格å¼æ述中能够看到I21L/I21Hå’ŒI26L/I26H这样带åŽç¼€çš„表述。
指令列表
diff --git a/Documentation/translations/zh_CN/vm/active_mm.rst b/Documentation/translations/zh_CN/mm/active_mm.rst
index 366609ea4f37..c2816f523bd7 100644
--- a/Documentation/translations/zh_CN/vm/active_mm.rst
+++ b/Documentation/translations/zh_CN/mm/active_mm.rst
@@ -1,6 +1,6 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/vm/active_mm.rst
+:Original: Documentation/mm/active_mm.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/balance.rst b/Documentation/translations/zh_CN/mm/balance.rst
index e98a47ef24a8..6fd79209c307 100644
--- a/Documentation/translations/zh_CN/vm/balance.rst
+++ b/Documentation/translations/zh_CN/mm/balance.rst
@@ -1,6 +1,6 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/vm/balance.rst
+:Original: Documentation/mm/balance.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/damon/api.rst b/Documentation/translations/zh_CN/mm/damon/api.rst
index 21143eea4ebe..5593a83c86bc 100644
--- a/Documentation/translations/zh_CN/vm/damon/api.rst
+++ b/Documentation/translations/zh_CN/mm/damon/api.rst
@@ -1,6 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0
-:Original: Documentation/vm/damon/api.rst
+:Original: Documentation/mm/damon/api.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/damon/design.rst b/Documentation/translations/zh_CN/mm/damon/design.rst
index 46128b77c2b3..16e3db34a7dd 100644
--- a/Documentation/translations/zh_CN/vm/damon/design.rst
+++ b/Documentation/translations/zh_CN/mm/damon/design.rst
@@ -1,6 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0
-:Original: Documentation/vm/damon/design.rst
+:Original: Documentation/mm/damon/design.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/damon/faq.rst b/Documentation/translations/zh_CN/mm/damon/faq.rst
index 07b4ac19407d..de4be417494a 100644
--- a/Documentation/translations/zh_CN/vm/damon/faq.rst
+++ b/Documentation/translations/zh_CN/mm/damon/faq.rst
@@ -1,6 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0
-:Original: Documentation/vm/damon/faq.rst
+:Original: Documentation/mm/damon/faq.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/damon/index.rst b/Documentation/translations/zh_CN/mm/damon/index.rst
index 84d36d90c9b0..b03bf307204f 100644
--- a/Documentation/translations/zh_CN/vm/damon/index.rst
+++ b/Documentation/translations/zh_CN/mm/damon/index.rst
@@ -1,6 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0
-:Original: Documentation/vm/damon/index.rst
+:Original: Documentation/mm/damon/index.rst
:翻译:
@@ -14,7 +14,7 @@ DAMON:æ•°æ®è®¿é—®ç›‘视器
==========================
DAMON是Linux内核的一个数æ®è®¿é—®ç›‘控框架å­ç³»ç»Ÿã€‚DAMON的核心机制使其æˆä¸º
-(该核心机制详è§(Documentation/translations/zh_CN/vm/damon/design.rst))
+(该核心机制详è§(Documentation/translations/zh_CN/mm/damon/design.rst))
- *准确度* (监测输出对DRAM级别的内存管ç†è¶³å¤Ÿæœ‰ç”¨ï¼›ä½†å¯èƒ½ä¸é€‚åˆCPU Cache级别),
- *è½»é‡çº§* (监控开销低到å¯ä»¥åœ¨çº¿åº”用),以åŠ
@@ -30,4 +30,3 @@ DAMON是Linux内核的一个数æ®è®¿é—®ç›‘控框架å­ç³»ç»Ÿã€‚DAMON的核心æœ
faq
design
api
-
diff --git a/Documentation/translations/zh_CN/vm/free_page_reporting.rst b/Documentation/translations/zh_CN/mm/free_page_reporting.rst
index 14336a3aa5f4..5bfd58014c94 100644
--- a/Documentation/translations/zh_CN/vm/free_page_reporting.rst
+++ b/Documentation/translations/zh_CN/mm/free_page_reporting.rst
@@ -1,6 +1,6 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/vm/free_page_reporting.rst
+:Original: Documentation/mm/free_page_reporting.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/frontswap.rst b/Documentation/translations/zh_CN/mm/frontswap.rst
index 98aa6f581ea7..434975390b48 100644
--- a/Documentation/translations/zh_CN/vm/frontswap.rst
+++ b/Documentation/translations/zh_CN/mm/frontswap.rst
@@ -1,4 +1,4 @@
-:Original: Documentation/vm/free_page_reporting.rst
+:Original: Documentation/mm/frontswap.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/highmem.rst b/Documentation/translations/zh_CN/mm/highmem.rst
index 200321774646..f74800a6d9a7 100644
--- a/Documentation/translations/zh_CN/vm/highmem.rst
+++ b/Documentation/translations/zh_CN/mm/highmem.rst
@@ -1,6 +1,6 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/vm/highmem.rst
+:Original: Documentation/mm/highmem.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/hmm.rst b/Documentation/translations/zh_CN/mm/hmm.rst
index 2379df95aa58..5024a8a15516 100644
--- a/Documentation/translations/zh_CN/vm/hmm.rst
+++ b/Documentation/translations/zh_CN/mm/hmm.rst
@@ -1,6 +1,6 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/vm/hmm.rst
+:Original: Documentation/mm/hmm.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/hugetlbfs_reserv.rst b/Documentation/translations/zh_CN/mm/hugetlbfs_reserv.rst
index c6d471ce2131..752e5696cd47 100644
--- a/Documentation/translations/zh_CN/vm/hugetlbfs_reserv.rst
+++ b/Documentation/translations/zh_CN/mm/hugetlbfs_reserv.rst
@@ -1,6 +1,6 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/vm/hugetlbfs_reserv.rst
+:Original: Documentation/mm/hugetlbfs_reserv.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/hwpoison.rst b/Documentation/translations/zh_CN/mm/hwpoison.rst
index c6e1e7bdb05b..310862edc937 100644
--- a/Documentation/translations/zh_CN/vm/hwpoison.rst
+++ b/Documentation/translations/zh_CN/mm/hwpoison.rst
@@ -1,5 +1,5 @@
-:Original: Documentation/vm/hwpoison.rst
+:Original: Documentation/mm/hwpoison.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/index.rst b/Documentation/translations/zh_CN/mm/index.rst
index c77a56553845..2f53e37b8049 100644
--- a/Documentation/translations/zh_CN/vm/index.rst
+++ b/Documentation/translations/zh_CN/mm/index.rst
@@ -1,6 +1,6 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/vm/index.rst
+:Original: Documentation/mm/index.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/ksm.rst b/Documentation/translations/zh_CN/mm/ksm.rst
index 83b0c73984da..d1f82e857ad7 100644
--- a/Documentation/translations/zh_CN/vm/ksm.rst
+++ b/Documentation/translations/zh_CN/mm/ksm.rst
@@ -1,6 +1,6 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/vm/ksm.rst
+:Original: Documentation/mm/ksm.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/memory-model.rst b/Documentation/translations/zh_CN/mm/memory-model.rst
index 013e30c88d72..77ec149a970c 100644
--- a/Documentation/translations/zh_CN/vm/memory-model.rst
+++ b/Documentation/translations/zh_CN/mm/memory-model.rst
@@ -1,6 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0
-:Original: Documentation/vm/memory-model.rst
+:Original: Documentation/mm/memory-model.rst
:翻译:
@@ -129,7 +129,7 @@ ZONE_DEVICE
* pmem: 通过DAX映射将平å°æŒä¹…性内存作为直接I/O目标使用。
* hmm: 用 `->page_fault()` 和 `->page_free()` 事件回调扩展 `ZONE_DEVICE` ,
- 以å…许设备驱动程åºå调与设备内存相关的内存管ç†äº‹ä»¶ï¼Œé€šå¸¸æ˜¯GPU内存。å‚è§/vm/hmm.rst。
+ 以å…许设备驱动程åºå调与设备内存相关的内存管ç†äº‹ä»¶ï¼Œé€šå¸¸æ˜¯GPU内存。å‚è§Documentation/mm/hmm.rst。
* p2pdma: 创建 `struct page` 对象,å…许PCI/E拓扑结构中的peer设备å调它们之间的
直接DMAæ“作,å³ç»•è¿‡ä¸»æœºå†…存。
diff --git a/Documentation/translations/zh_CN/vm/mmu_notifier.rst b/Documentation/translations/zh_CN/mm/mmu_notifier.rst
index b29a37b33628..ce3664d1a410 100644
--- a/Documentation/translations/zh_CN/vm/mmu_notifier.rst
+++ b/Documentation/translations/zh_CN/mm/mmu_notifier.rst
@@ -1,4 +1,4 @@
-:Original: Documentation/vm/mmu_notifier.rst
+:Original: Documentation/mm/mmu_notifier.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/numa.rst b/Documentation/translations/zh_CN/mm/numa.rst
index 6af412b924ad..b15cfeeb6dfb 100644
--- a/Documentation/translations/zh_CN/vm/numa.rst
+++ b/Documentation/translations/zh_CN/mm/numa.rst
@@ -1,4 +1,4 @@
-:Original: Documentation/vm/numa.rst
+:Original: Documentation/mm/numa.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/overcommit-accounting.rst b/Documentation/translations/zh_CN/mm/overcommit-accounting.rst
index 8765cb118f24..d8452d8b7fbb 100644
--- a/Documentation/translations/zh_CN/vm/overcommit-accounting.rst
+++ b/Documentation/translations/zh_CN/mm/overcommit-accounting.rst
@@ -1,4 +1,4 @@
-:Original: Documentation/vm/overcommit-accounting.rst
+:Original: Documentation/mm/overcommit-accounting.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/page_frags.rst b/Documentation/translations/zh_CN/mm/page_frags.rst
index 38ecddb9e1c0..20bd3fafdc8c 100644
--- a/Documentation/translations/zh_CN/vm/page_frags.rst
+++ b/Documentation/translations/zh_CN/mm/page_frags.rst
@@ -1,4 +1,4 @@
-:Original: Documentation/vm/page_frags.rst
+:Original: Documentation/mm/page_frags.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/page_migration.rst b/Documentation/translations/zh_CN/mm/page_migration.rst
index 566880a41ea0..076081dc1635 100644
--- a/Documentation/translations/zh_CN/vm/page_migration.rst
+++ b/Documentation/translations/zh_CN/mm/page_migration.rst
@@ -1,6 +1,6 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/vm/index.rst
+:Original: Documentation/mm/page_migration.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/page_owner.rst b/Documentation/translations/zh_CN/mm/page_owner.rst
index 7bd740bc5bf4..b7f81d7a6589 100644
--- a/Documentation/translations/zh_CN/vm/page_owner.rst
+++ b/Documentation/translations/zh_CN/mm/page_owner.rst
@@ -1,4 +1,4 @@
-:Original: Documentation/vm/page_owner.rst
+:Original: Documentation/mm/page_owner.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/page_table_check.rst b/Documentation/translations/zh_CN/mm/page_table_check.rst
index a29fc1b360e6..e8077310a76c 100644
--- a/Documentation/translations/zh_CN/vm/page_table_check.rst
+++ b/Documentation/translations/zh_CN/mm/page_table_check.rst
@@ -1,6 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0
-:Original: Documentation/vm/page_table_check.rst
+:Original: Documentation/mm/page_table_check.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/remap_file_pages.rst b/Documentation/translations/zh_CN/mm/remap_file_pages.rst
index af6b7e28af23..31e0c54dc36f 100644
--- a/Documentation/translations/zh_CN/vm/remap_file_pages.rst
+++ b/Documentation/translations/zh_CN/mm/remap_file_pages.rst
@@ -1,4 +1,4 @@
-:Original: Documentation/vm/remap_file_pages.rst
+:Original: Documentation/mm/remap_file_pages.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/split_page_table_lock.rst b/Documentation/translations/zh_CN/mm/split_page_table_lock.rst
index 50694d97c426..4fb7aa666037 100644
--- a/Documentation/translations/zh_CN/vm/split_page_table_lock.rst
+++ b/Documentation/translations/zh_CN/mm/split_page_table_lock.rst
@@ -1,4 +1,4 @@
-:Original: Documentation/vm/split_page_table_lock.rst
+:Original: Documentation/mm/split_page_table_lock.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/vmalloced-kernel-stacks.rst b/Documentation/translations/zh_CN/mm/vmalloced-kernel-stacks.rst
index ad23f274f6d7..d02a23f7f07e 100644
--- a/Documentation/translations/zh_CN/vm/vmalloced-kernel-stacks.rst
+++ b/Documentation/translations/zh_CN/mm/vmalloced-kernel-stacks.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/vm/vmalloced-kernel-stacks.rst
+:Original: Documentation/mm/vmalloced-kernel-stacks.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/z3fold.rst b/Documentation/translations/zh_CN/mm/z3fold.rst
index 57204aa08caa..9569a6d88270 100644
--- a/Documentation/translations/zh_CN/vm/z3fold.rst
+++ b/Documentation/translations/zh_CN/mm/z3fold.rst
@@ -1,4 +1,4 @@
-:Original: Documentation/vm/z3fold.rst
+:Original: Documentation/mm/z3fold.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/vm/zsmalloc.rst b/Documentation/translations/zh_CN/mm/zsmalloc.rst
index 45a9b7ab2a51..4c8c9b1006a9 100644
--- a/Documentation/translations/zh_CN/vm/zsmalloc.rst
+++ b/Documentation/translations/zh_CN/mm/zsmalloc.rst
@@ -1,4 +1,4 @@
-:Original: Documentation/vm/zsmalloc.rst
+:Original: Documentation/mm/zsmalloc.rst
:翻译:
diff --git a/Documentation/translations/zh_TW/index.rst b/Documentation/translations/zh_TW/index.rst
index e1ce9d8c06f8..e97d7d578751 100644
--- a/Documentation/translations/zh_TW/index.rst
+++ b/Documentation/translations/zh_TW/index.rst
@@ -128,7 +128,7 @@ TODOList:
* security/index
* sound/index
* crypto/index
-* vm/index
+* mm/index
* bpf/index
* usb/index
* PCI/index
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index fcab013e47c9..3b985b19f39d 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -120,7 +120,7 @@ Code Seq# Include File Comments
'C' 01-2F linux/capi.h conflict!
'C' F0-FF drivers/net/wan/cosa.h conflict!
'D' all arch/s390/include/asm/dasd.h
-'D' 40-5F drivers/scsi/dpt/dtpi_ioctl.h
+'D' 40-5F drivers/scsi/dpt/dtpi_ioctl.h Dead since 2022
'D' 05 drivers/scsi/pmcraid.h
'E' all linux/input.h conflict!
'E' 00-0F xen/evtchn.h conflict!
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 9788b19f9ff7..abd7c32126ce 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -8262,15 +8262,15 @@ dump related UV data. Also the vcpu ioctl `KVM_S390_PV_CPU_COMMAND` is
available and supports the `KVM_PV_DUMP_CPU` subcommand.
8.38 KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
----------------------------
+-------------------------------------
-:Capability KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
+:Capability: KVM_CAP_VM_DISABLE_NX_HUGE_PAGES
:Architectures: x86
:Type: vm
:Parameters: arg[0] must be 0.
-:Returns 0 on success, -EPERM if the userspace process does not
- have CAP_SYS_BOOT, -EINVAL if args[0] is not 0 or any vCPUs have been
- created.
+:Returns: 0 on success, -EPERM if the userspace process does not
+ have CAP_SYS_BOOT, -EINVAL if args[0] is not 0 or any vCPUs have been
+ created.
This capability disables the NX huge pages mitigation for iTLB MULTIHIT.
diff --git a/Documentation/watchdog/watchdog-parameters.rst b/Documentation/watchdog/watchdog-parameters.rst
index 223c99361a30..29153eed6689 100644
--- a/Documentation/watchdog/watchdog-parameters.rst
+++ b/Documentation/watchdog/watchdog-parameters.rst
@@ -425,6 +425,18 @@ pnx833x_wdt:
-------------------------------------------------
+pseries-wdt:
+ action:
+ Action taken when watchdog expires: 0 (power off), 1 (restart),
+ 2 (dump and restart). (default=1)
+ timeout:
+ Initial watchdog timeout in seconds. (default=60)
+ nowayout:
+ Watchdog cannot be stopped once started.
+ (default=kernel config parameter)
+
+-------------------------------------------------
+
rc32434_wdt:
timeout:
Watchdog timeout value, in seconds (default=20)
diff --git a/Documentation/x86/sgx.rst b/Documentation/x86/sgx.rst
index 265568a9292c..2bcbffacbed5 100644
--- a/Documentation/x86/sgx.rst
+++ b/Documentation/x86/sgx.rst
@@ -100,6 +100,21 @@ pages and establish enclave page permissions.
sgx_ioc_enclave_init
sgx_ioc_enclave_provision
+Enclave runtime management
+--------------------------
+
+Systems supporting SGX2 additionally support changes to initialized
+enclaves: modifying enclave page permissions and type, and dynamically
+adding and removing of enclave pages. When an enclave accesses an address
+within its address range that does not have a backing page then a new
+regular page will be dynamically added to the enclave. The enclave is
+still required to run EACCEPT on the new page before it can be used.
+
+.. kernel-doc:: arch/x86/kernel/cpu/sgx/ioctl.c
+ :functions: sgx_ioc_enclave_restrict_permissions
+ sgx_ioc_enclave_modify_types
+ sgx_ioc_enclave_remove_pages
+
Enclave vDSO
------------
diff --git a/Documentation/x86/x86_64/boot-options.rst b/Documentation/x86/x86_64/boot-options.rst
index 03ec9cf01181..cbd14124a667 100644
--- a/Documentation/x86/x86_64/boot-options.rst
+++ b/Documentation/x86/x86_64/boot-options.rst
@@ -287,11 +287,13 @@ iommu options only relevant to the AMD GART hardware IOMMU:
iommu options only relevant to the software bounce buffering (SWIOTLB) IOMMU
implementation:
- swiotlb=<pages>[,force]
- <pages>
- Prereserve that many 128K pages for the software IO bounce buffering.
+ swiotlb=<slots>[,force,noforce]
+ <slots>
+ Prereserve that many 2K slots for the software IO bounce buffering.
force
Force all IO through the software TLB.
+ noforce
+ Do not initialize the software TLB.
Miscellaneous
diff --git a/MAINTAINERS b/MAINTAINERS
index a9f77648c107..589517372408 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -264,6 +264,11 @@ W: http://www.adaptec.com/
F: Documentation/scsi/aacraid.rst
F: drivers/scsi/aacraid/
+AB8500 BATTERY AND CHARGER DRIVERS
+M: Linus Walleij <linus.walleij@linaro.org>
+F: Documentation/devicetree/bindings/power/supply/*ab8500*
+F: drivers/power/supply/*ab8500*
+
ABI/API
L: linux-api@vger.kernel.org
F: include/linux/syscalls.h
@@ -736,6 +741,14 @@ S: Maintained
F: Documentation/i2c/busses/i2c-ali1563.rst
F: drivers/i2c/busses/i2c-ali1563.c
+ALIBABA ELASTIC RDMA DRIVER
+M: Cheng Xu <chengyou@linux.alibaba.com>
+M: Kai Shen <kaishen@linux.alibaba.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+F: drivers/infiniband/hw/erdma
+F: include/uapi/rdma/erdma-abi.h
+
ALIENWARE WMI DRIVER
L: Dell.Client.Kernel@dell.com
S: Maintained
@@ -808,7 +821,7 @@ S: Maintained
F: drivers/staging/media/sunxi/cedrus/
ALPHA PORT
-M: Richard Henderson <rth@twiddle.net>
+M: Richard Henderson <richard.henderson@linaro.org>
M: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
M: Matt Turner <mattst88@gmail.com>
L: linux-alpha@vger.kernel.org
@@ -1007,7 +1020,7 @@ AMD PMC DRIVER
M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: drivers/platform/x86/amd-pmc.*
+F: drivers/platform/x86/amd/pmc.c
AMD HSMP DRIVER
M: Naveen Krishna Chatradhi <naveenkrishna.chatradhi@amd.com>
@@ -1017,7 +1030,7 @@ S: Maintained
F: Documentation/x86/amd_hsmp.rst
F: arch/x86/include/asm/amd_hsmp.h
F: arch/x86/include/uapi/asm/amd_hsmp.h
-F: drivers/platform/x86/amd_hsmp.c
+F: drivers/platform/x86/amd/hsmp.c
AMD POWERPLAY AND SWSMU
M: Evan Quan <evan.quan@amd.com>
@@ -1382,10 +1395,14 @@ F: include/uapi/linux/apm_bios.h
APPARMOR SECURITY MODULE
M: John Johansen <john.johansen@canonical.com>
-L: apparmor@lists.ubuntu.com (subscribers-only, general discussion)
+M: John Johansen <john@apparmor.net>
+L: apparmor@lists.ubuntu.com (moderated for non-subscribers)
S: Supported
-W: wiki.apparmor.net
+W: apparmor.net
+B: https://gitlab.com/apparmor/apparmor-kernel
+C: irc://irc.oftc.net/apparmor
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jj/linux-apparmor
+T: https://gitlab.com/apparmor/apparmor-kernel.git
F: Documentation/admin-guide/LSM/apparmor.rst
F: security/apparmor/
@@ -1842,6 +1859,7 @@ ARM/APPLE MACHINE SUPPORT
M: Hector Martin <marcan@marcan.st>
M: Sven Peter <sven@svenpeter.dev>
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
+L: asahi@lists.linux.dev
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: https://asahilinux.org
@@ -1851,6 +1869,7 @@ T: git https://github.com/AsahiLinux/linux.git
F: Documentation/devicetree/bindings/arm/apple.yaml
F: Documentation/devicetree/bindings/arm/apple/*
F: Documentation/devicetree/bindings/clock/apple,nco.yaml
+F: Documentation/devicetree/bindings/dma/apple,admac.yaml
F: Documentation/devicetree/bindings/i2c/apple,i2c.yaml
F: Documentation/devicetree/bindings/interrupt-controller/apple,*
F: Documentation/devicetree/bindings/iommu/apple,dart.yaml
@@ -1864,6 +1883,7 @@ F: Documentation/devicetree/bindings/power/apple*
F: Documentation/devicetree/bindings/watchdog/apple,wdt.yaml
F: arch/arm64/boot/dts/apple/
F: drivers/clk/clk-apple-nco.c
+F: drivers/dma/apple-admac.c
F: drivers/i2c/busses/i2c-pasemi-core.c
F: drivers/i2c/busses/i2c-pasemi-platform.c
F: drivers/iommu/apple-dart.c
@@ -2158,7 +2178,7 @@ M: Jean-Marie Verdun <verdun@hpe.com>
M: Nick Hawkins <nick.hawkins@hpe.com>
S: Maintained
F: Documentation/devicetree/bindings/arm/hpe,gxp.yaml
-F: Documentation/devicetree/bindings/spi/hpe,gxp-spi.yaml
+F: Documentation/devicetree/bindings/spi/hpe,gxp-spifi.yaml
F: Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml
F: arch/arm/boot/dts/hpe-bmc*
F: arch/arm/boot/dts/hpe-gxp*
@@ -2466,11 +2486,13 @@ S: Supported
F: Documentation/devicetree/bindings/*/*/*npcm*
F: Documentation/devicetree/bindings/*/*npcm*
F: Documentation/devicetree/bindings/arm/npcm/*
+F: Documentation/devicetree/bindings/rtc/nuvoton,nct3018y.yaml
F: arch/arm/boot/dts/nuvoton-npcm*
F: arch/arm/mach-npcm/
F: arch/arm64/boot/dts/nuvoton/
F: drivers/*/*npcm*
F: drivers/*/*/*npcm*
+F: drivers/rtc/rtc-nct3018y.c
F: include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
F: include/dt-bindings/clock/nuvoton,npcm845-clk.h
@@ -2609,7 +2631,7 @@ L: linux-unisoc@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/rda.yaml
F: Documentation/devicetree/bindings/gpio/gpio-rda.yaml
-F: Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt
+F: Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.yaml
F: Documentation/devicetree/bindings/serial/rda,8810pl-uart.yaml
F: Documentation/devicetree/bindings/timer/rda,8810pl-timer.yaml
F: arch/arm/boot/dts/rda8810pl-*
@@ -2689,6 +2711,7 @@ B: mailto:linux-samsung-soc@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git
F: Documentation/arm/samsung/
F: Documentation/devicetree/bindings/arm/samsung/
+F: Documentation/devicetree/bindings/hwinfo/samsung,*
F: Documentation/devicetree/bindings/power/pd-samsung.yaml
F: Documentation/devicetree/bindings/soc/samsung/
F: arch/arm/boot/dts/exynos*
@@ -2941,6 +2964,7 @@ M: Tero Kristo <kristo@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/arm/ti/k3.yaml
+F: Documentation/devicetree/bindings/hwinfo/ti,k3-socinfo.yaml
F: arch/arm64/boot/dts/ti/Makefile
F: arch/arm64/boot/dts/ti/k3-*
F: include/dt-bindings/pinctrl/k3.h
@@ -3488,7 +3512,7 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/b43
F: drivers/net/wireless/broadcom/b43legacy/
BACKLIGHT CLASS/SUBSYSTEM
-M: Lee Jones <lee.jones@linaro.org>
+M: Lee Jones <lee@kernel.org>
M: Daniel Thompson <daniel.thompson@linaro.org>
M: Jingoo Han <jingoohan1@gmail.com>
L: dri-devel@lists.freedesktop.org
@@ -3588,9 +3612,9 @@ F: include/linux/find.h
F: include/linux/nodemask.h
F: lib/bitmap.c
F: lib/cpumask.c
+F: lib/cpumask_kunit.c
F: lib/find_bit.c
F: lib/find_bit_benchmark.c
-F: lib/nodemask.c
F: lib/test_bitmap.c
F: tools/include/linux/bitmap.h
F: tools/include/linux/find.h
@@ -3656,6 +3680,7 @@ F: Documentation/networking/bonding.rst
F: drivers/net/bonding/
F: include/net/bond*
F: include/uapi/linux/if_bonding.h
+F: tools/testing/selftests/drivers/net/bonding/
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
M: Dan Robertson <dan@dlrobertson.com>
@@ -4771,7 +4796,6 @@ L: keyrings@vger.kernel.org
S: Maintained
F: Documentation/admin-guide/module-signing.rst
F: certs/
-F: scripts/check-blacklist-hashes.awk
F: scripts/sign-file.c
F: tools/certs/
@@ -4899,6 +4923,7 @@ S: Maintained
F: Documentation/devicetree/bindings/sound/cirrus,cs*
F: include/dt-bindings/sound/cs*
F: sound/pci/hda/cs*
+F: sound/pci/hda/hda_cs_dsp_ctl.*
F: sound/soc/codecs/cs*
CIRRUS LOGIC DSP FIRMWARE DRIVER
@@ -5109,16 +5134,20 @@ F: include/linux/clk/
F: include/linux/of_clk.h
X: drivers/clk/clkdev.c
-COMMON INTERNET FILE SYSTEM CLIENT (CIFS)
+COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
M: Steve French <sfrench@samba.org>
+R: Paulo Alcantara <pc@cjr.nz> (DFS, global name space)
+R: Ronnie Sahlberg <lsahlber@redhat.com> (directory leases, sparse files)
+R: Shyam Prasad N <sprasad@microsoft.com> (multichannel)
L: linux-cifs@vger.kernel.org
L: samba-technical@lists.samba.org (moderated for non-subscribers)
S: Supported
-W: http://linux-cifs.samba.org/
+W: https://wiki.samba.org/index.php/LinuxCIFS
T: git git://git.samba.org/sfrench/cifs-2.6.git
F: Documentation/admin-guide/cifs/
F: fs/cifs/
F: fs/smbfs_common/
+F: include/uapi/linux/cifs
COMPACTPCI HOTPLUG CORE
M: Scott Murray <scott@spiteful.org>
@@ -5507,7 +5536,7 @@ W: http://www.chelsio.com
F: drivers/net/ethernet/chelsio/cxgb3/
CXGB3 ISCSI DRIVER (CXGB3I)
-M: Karen Xie <kxie@chelsio.com>
+M: Varun Prakash <varun@chelsio.com>
L: linux-scsi@vger.kernel.org
S: Supported
W: http://www.chelsio.com
@@ -5539,7 +5568,7 @@ W: http://www.chelsio.com
F: drivers/net/ethernet/chelsio/cxgb4/
CXGB4 ISCSI DRIVER (CXGB4I)
-M: Karen Xie <kxie@chelsio.com>
+M: Varun Prakash <varun@chelsio.com>
L: linux-scsi@vger.kernel.org
S: Supported
W: http://www.chelsio.com
@@ -5655,7 +5684,7 @@ L: linux-mm@kvack.org
S: Maintained
F: Documentation/ABI/testing/sysfs-kernel-mm-damon
F: Documentation/admin-guide/mm/damon/
-F: Documentation/vm/damon/
+F: Documentation/mm/damon/
F: include/linux/damon.h
F: include/trace/events/damon.h
F: mm/damon/
@@ -5985,6 +6014,7 @@ W: http://www.dialog-semiconductor.com/products
F: Documentation/devicetree/bindings/input/da90??-onkey.txt
F: Documentation/devicetree/bindings/input/dlg,da72??.txt
F: Documentation/devicetree/bindings/mfd/da90*.txt
+F: Documentation/devicetree/bindings/mfd/da90*.yaml
F: Documentation/devicetree/bindings/regulator/dlg,da9*.yaml
F: Documentation/devicetree/bindings/regulator/da92*.txt
F: Documentation/devicetree/bindings/regulator/slg51000.txt
@@ -6100,6 +6130,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git
F: Documentation/devicetree/bindings/dma/
F: Documentation/driver-api/dmaengine/
F: drivers/dma/
+F: include/dt-bindings/dma/
F: include/linux/dma/
F: include/linux/dmaengine.h
F: include/linux/of_dma.h
@@ -6270,14 +6301,6 @@ F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-drive
F: drivers/net/ethernet/freescale/dpaa2/dpaa2-switch*
F: drivers/net/ethernet/freescale/dpaa2/dpsw*
-DPT_I2O SCSI RAID DRIVER
-M: Adaptec OEM Raid Solutions <aacraid@microsemi.com>
-L: linux-scsi@vger.kernel.org
-S: Maintained
-W: http://www.adaptec.com/
-F: drivers/scsi/dpt*
-F: drivers/scsi/dpt/
-
DRBD DRIVER
M: Philipp Reisner <philipp.reisner@linbit.com>
M: Lars Ellenberg <lars.ellenberg@linbit.com>
@@ -7680,6 +7703,7 @@ M: Namjae Jeon <linkinjeon@kernel.org>
M: Sungjong Seo <sj1557.seo@samsung.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/linkinjeon/exfat.git
F: fs/exfat/
EXT2 FILE SYSTEM
@@ -8131,7 +8155,6 @@ L: linux-fbdev@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/video/fbdev/imxfb.c
-F: include/linux/platform_data/video-imxfb.h
FREESCALE IMX DDR PMU DRIVER
M: Frank Li <Frank.li@nxp.com>
@@ -9246,7 +9269,7 @@ HMM - Heterogeneous Memory Management
M: Jérôme Glisse <jglisse@redhat.com>
L: linux-mm@kvack.org
S: Maintained
-F: Documentation/vm/hmm.rst
+F: Documentation/mm/hmm.rst
F: include/linux/hmm*
F: lib/test_hmm*
F: mm/hmm*
@@ -9344,8 +9367,8 @@ L: linux-mm@kvack.org
S: Maintained
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
F: Documentation/admin-guide/mm/hugetlbpage.rst
-F: Documentation/vm/hugetlbfs_reserv.rst
-F: Documentation/vm/vmemmap_dedup.rst
+F: Documentation/mm/hugetlbfs_reserv.rst
+F: Documentation/mm/vmemmap_dedup.rst
F: fs/hugetlbfs/
F: include/linux/hugetlb.h
F: mm/hugetlb.c
@@ -9681,7 +9704,7 @@ F: arch/powerpc/platforms/powernv/copy-paste.h
F: arch/powerpc/platforms/powernv/vas*
IBM Power Virtual Ethernet Device Driver
-M: Cristobal Forno <cforno12@linux.ibm.com>
+M: Nick Child <nnac123@linux.ibm.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmveth.*
@@ -9760,7 +9783,7 @@ M: Christian Brauner <brauner@kernel.org>
M: Seth Forshee <sforshee@kernel.org>
L: linux-fsdevel@vger.kernel.org
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
+T: git://git.kernel.org/pub/scm/linux/kernel/git/vfs/idmapping.git
F: Documentation/filesystems/idmappings.rst
F: tools/testing/selftests/mount_setattr/
F: include/linux/mnt_idmapping.h
@@ -10043,6 +10066,13 @@ L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/i810/
+INTEL 8255 GPIO DRIVER
+M: William Breathitt Gray <william.gray@linaro.org>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: drivers/gpio/gpio-i8255.c
+F: drivers/gpio/gpio-i8255.h
+
INTEL ASoC DRIVERS
M: Cezary Rojewski <cezary.rojewski@intel.com>
M: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
@@ -10179,7 +10209,8 @@ S: Supported
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
F: drivers/dma/ioat*
-INTEL IADX DRIVER
+INTEL IDXD DRIVER
+M: Fenghua Yu <fenghua.yu@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: dmaengine@vger.kernel.org
S: Supported
@@ -10217,7 +10248,6 @@ L: iommu@lists.linux.dev
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
F: drivers/iommu/intel/
-F: include/linux/intel-iommu.h
F: include/linux/intel-svm.h
INTEL IOP-ADMA DMA DRIVER
@@ -10373,7 +10403,7 @@ F: drivers/gpio/gpio-*cove.c
INTEL PMIC MULTIFUNCTION DEVICE DRIVERS
M: Andy Shevchenko <andy@kernel.org>
-S: Maintained
+S: Supported
F: drivers/mfd/intel_soc_pmic*
F: include/linux/mfd/intel_soc_pmic*
@@ -10589,9 +10619,20 @@ T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
F: fs/iomap/
F: include/linux/iomap.h
-IOMMU DRIVERS
+IOMMU DMA-API LAYER
+M: Robin Murphy <robin.murphy@arm.com>
+L: iommu@lists.linux.dev
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+F: drivers/iommu/dma-iommu.c
+F: drivers/iommu/iova.c
+F: include/linux/dma-iommu.h
+F: include/linux/iova.h
+
+IOMMU SUBSYSTEM
M: Joerg Roedel <joro@8bytes.org>
M: Will Deacon <will@kernel.org>
+R: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
@@ -10619,6 +10660,7 @@ T: git git://git.kernel.dk/linux-block
T: git git://git.kernel.dk/liburing
F: io_uring/
F: include/linux/io_uring.h
+F: include/linux/io_uring_types.h
F: include/uapi/linux/io_uring.h
F: tools/io_uring/
@@ -11035,11 +11077,13 @@ R: Sergey Senozhatsky <senozhatsky@chromium.org>
L: linux-cifs@vger.kernel.org
S: Maintained
T: git git://git.samba.org/ksmbd.git
+F: Documentation/filesystems/cifs/ksmbd.rst
F: fs/ksmbd/
F: fs/smbfs_common/
KERNEL UNIT TESTING FRAMEWORK (KUnit)
M: Brendan Higgins <brendanhiggins@google.com>
+M: David Gow <davidgow@google.com>
L: linux-kselftest@vger.kernel.org
L: kunit-dev@googlegroups.com
S: Maintained
@@ -11602,8 +11646,8 @@ F: drivers/macintosh/
LINUX FOR POWERPC (32-BIT AND 64-BIT)
M: Michael Ellerman <mpe@ellerman.id.au>
-R: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-R: Paul Mackerras <paulus@samba.org>
+R: Nicholas Piggin <npiggin@gmail.com>
+R: Christophe Leroy <christophe.leroy@csgroup.eu>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
W: https://github.com/linuxppc/wiki/wiki
@@ -12386,7 +12430,6 @@ F: Documentation/devicetree/bindings/*/maxim,max77686.yaml
F: Documentation/devicetree/bindings/*/maxim,max77693.yaml
F: Documentation/devicetree/bindings/*/maxim,max77843.yaml
F: Documentation/devicetree/bindings/clock/maxim,max77686.txt
-F: Documentation/devicetree/bindings/mfd/max77693.txt
F: drivers/*/*max77843.c
F: drivers/*/max14577*.c
F: drivers/*/max77686*.c
@@ -12811,7 +12854,7 @@ F: Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
F: drivers/net/wireless/mediatek/mt76/
MEDIATEK MT7601U WIRELESS LAN DRIVER
-M: Jakub Kicinski <kubakici@wp.pl>
+M: Jakub Kicinski <kuba@kernel.org>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt7601u/
@@ -13108,6 +13151,7 @@ W: http://www.linux-mm.org
T: git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
T: quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new
F: include/linux/gfp.h
+F: include/linux/gfp_types.h
F: include/linux/memory_hotplug.h
F: include/linux/mm.h
F: include/linux/mmzone.h
@@ -13465,6 +13509,12 @@ F: drivers/scsi/smartpqi/smartpqi*.[ch]
F: include/linux/cciss*.h
F: include/uapi/linux/cciss*.h
+MICROSOFT SURFACE AGGREGATOR TABLET-MODE SWITCH
+M: Maximilian Luz <luzmaximilian@gmail.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/surface/surface_aggregator_tabletsw.c
+
MICROSOFT SURFACE BATTERY AND AC DRIVERS
M: Maximilian Luz <luzmaximilian@gmail.com>
L: linux-pm@vger.kernel.org
@@ -13536,6 +13586,12 @@ F: include/linux/surface_acpi_notify.h
F: include/linux/surface_aggregator/
F: include/uapi/linux/surface_aggregator/
+MICROSOFT SURFACE SYSTEM AGGREGATOR HUB DRIVER
+M: Maximilian Luz <luzmaximilian@gmail.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/surface/surface_aggregator_hub.c
+
MICROTEK X6 SCANNER
M: Oliver Neukum <oliver@neukum.org>
S: Maintained
@@ -13576,6 +13632,7 @@ F: Documentation/devicetree/bindings/mips/
F: Documentation/mips/
F: arch/mips/
F: drivers/platform/mips/
+F: include/dt-bindings/mips/
MIPS BOSTON DEVELOPMENT BOARD
M: Paul Burton <paulburton@kernel.org>
@@ -13703,6 +13760,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/linux.git modules-next
F: include/linux/module.h
F: kernel/module/
+F: scripts/module*
MONOLITHIC POWER SYSTEM PMIC DRIVER
M: Saravanan Sekar <sravanhome@gmail.com>
@@ -13845,7 +13903,7 @@ F: Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.yaml
F: drivers/media/i2c/mt9v111.c
MULTIFUNCTION DEVICES (MFD)
-M: Lee Jones <lee.jones@linaro.org>
+M: Lee Jones <lee@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git
F: Documentation/devicetree/bindings/mfd/
@@ -14414,6 +14472,7 @@ W: https://github.com/jonmason/ntb/wiki
T: git git://github.com/jonmason/ntb.git
F: drivers/net/ntb_netdev.c
F: drivers/ntb/
+F: drivers/pci/endpoint/functions/pci-epf-*ntb.c
F: include/linux/ntb.h
F: include/linux/ntb_transport.h
F: tools/testing/selftests/ntb/
@@ -14482,7 +14541,8 @@ S: Supported
W: http://git.infradead.org/nvme.git
T: git://git.infradead.org/nvme.git
F: drivers/nvme/host/
-F: include/linux/nvme.h
+F: drivers/nvme/common/
+F: include/linux/nvme*
F: include/uapi/linux/nvme_ioctl.h
NVM EXPRESS FC TRANSPORT DRIVERS
@@ -15311,7 +15371,7 @@ M: Pasha Tatashin <pasha.tatashin@soleen.com>
M: Andrew Morton <akpm@linux-foundation.org>
L: linux-mm@kvack.org
S: Maintained
-F: Documentation/vm/page_table_check.rst
+F: Documentation/mm/page_table_check.rst
F: include/linux/page_table_check.h
F: mm/page_table_check.c
@@ -15837,6 +15897,14 @@ L: linux-pci@vger.kernel.org
S: Maintained
F: drivers/pci/controller/dwc/*spear*
+PCI DRIVER FOR XILINX VERSAL CPM
+M: Bharat Kumar Gogada <bharat.kumar.gogada@amd.com>
+M: Michal Simek <michal.simek@amd.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
+F: drivers/pci/controller/pcie-xilinx-cpm.c
+
PCMCIA SUBSYSTEM
M: Dominik Brodowski <linux@dominikbrodowski.net>
S: Odd Fixes
@@ -16005,6 +16073,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
F: Documentation/devicetree/bindings/pinctrl/
F: Documentation/driver-api/pin-control.rst
F: drivers/pinctrl/
+F: include/dt-bindings/pinctrl/
F: include/linux/pinctrl/
PIN CONTROLLER - AMD
@@ -16489,7 +16558,7 @@ L: linux-pwm@vger.kernel.org
S: Maintained
Q: https://patchwork.ozlabs.org/project/linux-pwm/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/thierry.reding/linux-pwm.git
-F: Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+F: Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml
F: Documentation/devicetree/bindings/pwm/
F: Documentation/driver-api/pwm.rst
F: drivers/gpio/gpio-mvebu.c
@@ -16544,6 +16613,9 @@ M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Banajit Goswami <bgoswami@quicinc.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
+F: include/dt-bindings/sound/qcom,wcd9335.h
+F: sound/soc/codecs/lpass-rx-macro.*
+F: sound/soc/codecs/lpass-tx-macro.*
F: sound/soc/codecs/lpass-va-macro.c
F: sound/soc/codecs/lpass-wsa-macro.*
F: sound/soc/codecs/msm8916-wcd-analog.c
@@ -16551,7 +16623,9 @@ F: sound/soc/codecs/msm8916-wcd-digital.c
F: sound/soc/codecs/wcd9335.*
F: sound/soc/codecs/wcd934x.c
F: sound/soc/codecs/wcd-clsh-v2.*
+F: sound/soc/codecs/wcd-mbhc-v2.*
F: sound/soc/codecs/wsa881x.c
+F: sound/soc/codecs/wsa883x.c
F: sound/soc/qcom/
QCOM EMBEDDED USB DEBUGGER (EUD)
@@ -16817,7 +16891,7 @@ M: Robert Foss <robert.foss@linaro.org>
L: linux-i2c@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt
+F: Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
F: drivers/i2c/busses/i2c-qcom-cci.c
QUALCOMM INTERCONNECT BWMON DRIVER
@@ -17462,6 +17536,7 @@ F: drivers/char/hw_random/mpfs-rng.c
F: drivers/clk/microchip/clk-mpfs.c
F: drivers/mailbox/mailbox-mpfs.c
F: drivers/pci/controller/pcie-microchip-host.c
+F: drivers/rtc/rtc-mpfs.c
F: drivers/soc/microchip/
F: drivers/spi/spi-microchip-core.c
F: drivers/usb/musb/mpfs.c
@@ -17756,7 +17831,7 @@ M: Jason Herne <jjherne@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
-F: Documentation/s390/vfio-ap.rst
+F: Documentation/s390/vfio-ap*
F: drivers/s390/crypto/vfio_ap*
S390 VFIO-CCW DRIVER
@@ -18805,6 +18880,7 @@ SOFTWARE RAID (Multiple Disks) SUPPORT
M: Song Liu <song@kernel.org>
L: linux-raid@vger.kernel.org
S: Supported
+Q: https://patchwork.kernel.org/project/linux-raid/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
F: drivers/md/Kconfig
F: drivers/md/Makefile
@@ -19094,7 +19170,7 @@ F: drivers/pinctrl/spear/
SPI NOR SUBSYSTEM
M: Tudor Ambarus <tudor.ambarus@microchip.com>
-M: Pratyush Yadav <p.yadav@ti.com>
+M: Pratyush Yadav <pratyush@kernel.org>
R: Michael Walle <michael@walle.cc>
L: linux-mtd@lists.infradead.org
S: Maintained
@@ -19565,8 +19641,9 @@ F: Documentation/devicetree/bindings/gpio/snps,creg-gpio.txt
F: drivers/gpio/gpio-creg-snps.c
SYNOPSYS DESIGNWARE 8250 UART DRIVER
+M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-S: Maintained
+S: Supported
F: drivers/tty/serial/8250/8250_dw.c
F: drivers/tty/serial/8250/8250_dwlib.*
F: drivers/tty/serial/8250/8250_lpss.c
@@ -19647,7 +19724,7 @@ S: Maintained
F: drivers/mmc/host/sdhci-pci-dwc-mshc.c
SYSTEM CONFIGURATION (SYSCON)
-M: Lee Jones <lee.jones@linaro.org>
+M: Lee Jones <lee@kernel.org>
M: Arnd Bergmann <arnd@arndb.de>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git
@@ -20778,6 +20855,13 @@ L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/ufs/host/ufs-mediatek*
+UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER RENESAS HOOKS
+M: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+L: linux-renesas-soc@vger.kernel.org
+L: linux-scsi@vger.kernel.org
+S: Maintained
+F: drivers/ufs/host/ufs-renesas.c
+
UNSORTED BLOCK IMAGES (UBI)
M: Richard Weinberger <richard@nod.at>
L: linux-mtd@lists.infradead.org
@@ -21218,6 +21302,7 @@ M: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
S: Maintained
F: Documentation/filesystems/vfat.rst
F: fs/fat/
+F: tools/testing/selftests/filesystems/fat/
VFIO DRIVER
M: Alex Williamson <alex.williamson@redhat.com>
@@ -22119,12 +22204,14 @@ F: drivers/*/xen-*front.c
F: drivers/xen/
F: include/uapi/xen/
F: include/xen/
+F: kernel/configs/xen.config
XEN HYPERVISOR X86
M: Juergen Gross <jgross@suse.com>
R: Boris Ostrovsky <boris.ostrovsky@oracle.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
+F: arch/x86/configs/xen.config
F: arch/x86/include/asm/pvclock-abi.h
F: arch/x86/include/asm/xen/
F: arch/x86/platform/pvh/
@@ -22437,7 +22524,7 @@ M: Nitin Gupta <ngupta@vflare.org>
R: Sergey Senozhatsky <senozhatsky@chromium.org>
L: linux-mm@kvack.org
S: Maintained
-F: Documentation/vm/zsmalloc.rst
+F: Documentation/mm/zsmalloc.rst
F: include/linux/zsmalloc.h
F: mm/zsmalloc.c
diff --git a/Makefile b/Makefile
index dc6295f91263..952d354069a4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-VERSION = 5
-PATCHLEVEL = 19
+VERSION = 6
+PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION =
-NAME = Superb Owl
+EXTRAVERSION = -rc3
+NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -129,6 +129,9 @@ endif
$(if $(word 2, $(KBUILD_EXTMOD)), \
$(error building multiple external modules is not supported))
+$(foreach x, % :, $(if $(findstring $x, $(KBUILD_EXTMOD)), \
+ $(error module directory path cannot contain '$x')))
+
# Remove trailing slashes
ifneq ($(filter %/, $(KBUILD_EXTMOD)),)
KBUILD_EXTMOD := $(shell dirname $(KBUILD_EXTMOD).)
@@ -755,8 +758,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
KBUILD_CFLAGS += -O2
-else ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3
-KBUILD_CFLAGS += -O3
else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
endif
@@ -1033,6 +1034,11 @@ KBUILD_CFLAGS += $(KCFLAGS)
KBUILD_LDFLAGS_MODULE += --build-id=sha1
LDFLAGS_vmlinux += --build-id=sha1
+KBUILD_LDFLAGS += -z noexecstack
+ifeq ($(CONFIG_LD_IS_BFD),y)
+KBUILD_LDFLAGS += $(call ld-option,--no-warn-rwx-segments)
+endif
+
ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
@@ -1107,13 +1113,11 @@ vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \
$(patsubst %/,%,$(filter %/, $(core-) \
$(drivers-) $(libs-))))
-subdir-modorder := $(addsuffix modules.order,$(filter %/, \
- $(core-y) $(core-m) $(libs-y) $(libs-m) \
- $(drivers-y) $(drivers-m)))
-
build-dirs := $(vmlinux-dirs)
clean-dirs := $(vmlinux-alldirs)
+subdir-modorder := $(addsuffix /modules.order, $(build-dirs))
+
# Externally visible symbols (used by link-vmlinux.sh)
KBUILD_VMLINUX_OBJS := $(head-y) $(patsubst %/,%/built-in.a, $(core-y))
KBUILD_VMLINUX_OBJS += $(addsuffix built-in.a, $(filter %/, $(libs-y)))
@@ -1370,16 +1374,21 @@ endif
ifneq ($(dtstree),)
-%.dtb: include/config/kernel.release scripts_dtc
+%.dtb: dtbs_prepare
$(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
-%.dtbo: include/config/kernel.release scripts_dtc
+%.dtbo: dtbs_prepare
$(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
-PHONY += dtbs dtbs_install dtbs_check
-dtbs: include/config/kernel.release scripts_dtc
+PHONY += dtbs dtbs_prepare dtbs_install dtbs_check
+dtbs: dtbs_prepare
$(Q)$(MAKE) $(build)=$(dtstree)
+# include/config/kernel.release is actually needed when installing DTBs because
+# INSTALL_DTBS_PATH contains $(KERNELRELEASE). However, we do not want to make
+# dtbs_install depend on it as dtbs_install may run as root.
+dtbs_prepare: include/config/kernel.release scripts_dtc
+
ifneq ($(filter dtbs_check, $(MAKECMDGOALS)),)
export CHECK_DTBS=y
dtbs: dt_binding_check
diff --git a/arch/Kconfig b/arch/Kconfig
index 761d169ba7fd..5dbf11a5ba4e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -53,7 +53,6 @@ config KPROBES
config JUMP_LABEL
bool "Optimize very unlikely/likely branches"
depends on HAVE_ARCH_JUMP_LABEL
- depends on CC_HAS_ASM_GOTO
select OBJTOOL if HAVE_JUMP_LABEL_HACK
help
This option enables a transparent branch optimization that
@@ -1361,7 +1360,7 @@ config HAVE_PREEMPT_DYNAMIC_CALL
config HAVE_PREEMPT_DYNAMIC_KEY
bool
- depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
+ depends on HAVE_ARCH_JUMP_LABEL
select HAVE_PREEMPT_DYNAMIC
help
An architecture should select this if it can handle the preemption
@@ -1406,6 +1405,9 @@ config ARCH_HAS_ELFCORE_COMPAT
config ARCH_HAS_PARANOID_L1D_FLUSH
bool
+config ARCH_HAVE_TRACE_MMIO_ACCESS
+ bool
+
config DYNAMIC_SIGFRAME
bool
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 7d0d26b5b3f5..97fce7386b00 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -17,7 +17,6 @@ config ALPHA
select HAVE_PERF_EVENTS
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
- select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP
select AUTO_IRQ_AFFINITY if SMP
diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h
index e1d8483a45f2..bafb1c1f0fdc 100644
--- a/arch/alpha/include/asm/bitops.h
+++ b/arch/alpha/include/asm/bitops.h
@@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
-static inline void
-__set_bit(unsigned long nr, volatile void * addr)
+static __always_inline void
+arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);
@@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
-static __inline__ void
-__clear_bit(unsigned long nr, volatile void * addr)
+static __always_inline void
+arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);
@@ -94,7 +94,7 @@ static inline void
__clear_bit_unlock(unsigned long nr, volatile void * addr)
{
smp_mb();
- __clear_bit(nr, addr);
+ arch___clear_bit(nr, addr);
}
static inline void
@@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
-static __inline__ void
-__change_bit(unsigned long nr, volatile void * addr)
+static __always_inline void
+arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);
@@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
/*
* WARNING: non atomic version.
*/
-static inline int
-__test_and_set_bit(unsigned long nr, volatile void * addr)
+static __always_inline bool
+arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
@@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
-static inline int
-__test_and_clear_bit(unsigned long nr, volatile void * addr)
+static __always_inline bool
+arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
@@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
-static __inline__ int
-__test_and_change_bit(unsigned long nr, volatile void * addr)
+static __always_inline bool
+arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
@@ -283,11 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
return (old & mask) != 0;
}
-static inline int
-test_bit(int nr, const volatile void * addr)
-{
- return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
@@ -450,6 +447,8 @@ sched_find_first_bit(const unsigned long b[2])
return __ffs(tmp) + ofs;
}
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
+
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/alpha/include/asm/dma.h b/arch/alpha/include/asm/dma.h
index 28610ea7786d..a04d76b96089 100644
--- a/arch/alpha/include/asm/dma.h
+++ b/arch/alpha/include/asm/dma.h
@@ -365,13 +365,4 @@ extern void free_dma(unsigned int dmanr); /* release it again */
#define KERNEL_HAVE_CHECK_DMA
extern int check_dma(unsigned int dmanr);
-/* From PCI */
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
-
#endif /* _ASM_DMA_H */
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h
index 588758685439..64b42d9591fc 100644
--- a/arch/alpha/include/asm/floppy.h
+++ b/arch/alpha/include/asm/floppy.h
@@ -20,7 +20,7 @@
#define fd_free_dma() free_dma(FLOPPY_DMA)
#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode)
-#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA,virt_to_bus(addr))
+#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr))
#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index c9cb554fbe54..d277189b2677 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -106,15 +106,15 @@ static inline void * phys_to_virt(unsigned long address)
extern unsigned long __direct_map_base;
extern unsigned long __direct_map_size;
-static inline unsigned long __deprecated virt_to_bus(volatile void *address)
+static inline unsigned long __deprecated isa_virt_to_bus(volatile void *address)
{
unsigned long phys = virt_to_phys(address);
unsigned long bus = phys + __direct_map_base;
return phys <= __direct_map_size ? bus : 0;
}
-#define isa_virt_to_bus virt_to_bus
+#define isa_virt_to_bus isa_virt_to_bus
-static inline void * __deprecated bus_to_virt(unsigned long address)
+static inline void * __deprecated isa_bus_to_virt(unsigned long address)
{
void *virt;
@@ -125,7 +125,7 @@ static inline void * __deprecated bus_to_virt(unsigned long address)
virt = phys_to_virt(address);
return (long)address <= 0 ? NULL : virt;
}
-#define isa_bus_to_virt bus_to_virt
+#define isa_bus_to_virt isa_bus_to_virt
/*
* There are different chipsets to interface the Alpha CPUs to the world.
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h
index cf6bc1e64d66..6312656279d7 100644
--- a/arch/alpha/include/asm/pci.h
+++ b/arch/alpha/include/asm/pci.h
@@ -56,12 +56,6 @@ struct pci_controller {
/* IOMMU controls. */
-/* TODO: integrate with include/asm-generic/pci.h ? */
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? 15 : 14;
-}
-
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
static inline int pci_proc_domain(struct pci_bus *bus)
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 170451fde043..3ea9661c09ff 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -116,23 +116,6 @@ struct vm_area_struct;
* arch/alpha/mm/fault.c)
*/
/* xwr */
-#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
-#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
-#define __P010 _PAGE_P(_PAGE_FOE)
-#define __P011 _PAGE_P(_PAGE_FOE)
-#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
-#define __P101 _PAGE_P(_PAGE_FOW)
-#define __P110 _PAGE_P(0)
-#define __P111 _PAGE_P(0)
-
-#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
-#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
-#define __S010 _PAGE_S(_PAGE_FOE)
-#define __S011 _PAGE_S(_PAGE_FOE)
-#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
-#define __S101 _PAGE_S(_PAGE_FOW)
-#define __S110 _PAGE_S(0)
-#define __S111 _PAGE_S(0)
/*
* pgprot_noncached() is only for infiniband pci support, and a real
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index cb64e4797d2a..f4e20f75438f 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -497,12 +497,6 @@ smp_cpus_done(unsigned int max_cpus)
((bogosum + 2500) / (5000/HZ)) % 100);
}
-int
-setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
static void
send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
{
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index ec20c1004abf..ef427a6bdd1a 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -155,6 +155,10 @@ retry:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 7511723b7669..a155180d7a83 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -280,3 +280,25 @@ mem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
memblock_free_all();
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW |
+ _PAGE_FOR),
+ [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW),
+ [VM_WRITE] = _PAGE_P(_PAGE_FOE),
+ [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE),
+ [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR),
+ [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW),
+ [VM_EXEC | VM_WRITE] = _PAGE_P(0),
+ [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0),
+ [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW |
+ _PAGE_FOR),
+ [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW),
+ [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE),
+ [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE),
+ [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR),
+ [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 0016149f9583..e31a8ebc3ecc 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 5b031582a1cf..e0e8567f0d75 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index d4eec39e0112..fcbc952bc75b 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
index 7337cdf4ffdd..d87ad7e88d62 100644
--- a/arch/arc/configs/haps_hs_defconfig
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index bc927221afc0..8d82cdb7f86a 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index aa000075a575..f856b03e0fb5 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 326f6cde7826..a1ce12bf5b16 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index bf39a0091679..ca10f4a2c823 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -10,7 +10,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 7121bd71c543..31b6ec3683c6 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -10,7 +10,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index f9863b294a70..41a0037f48a5 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -8,7 +8,6 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index a12656ec0072..d93b65008d4a 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -14,7 +14,6 @@ CONFIG_INITRAMFS_SOURCE="../tb10x-rootfs.cpio"
CONFIG_INITRAMFS_ROOT_UID=2100
CONFIG_INITRAMFS_ROOT_GID=501
# CONFIG_RD_GZIP is not set
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index d7c858df520c..0c3b21416819 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -4,7 +4,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 015c1d43889e..f9ad9d3ee702 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -4,7 +4,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
index 5b744f4b10a7..02431027ed2f 100644
--- a/arch/arc/include/asm/dma.h
+++ b/arch/arc/include/asm/dma.h
@@ -7,10 +7,5 @@
#define ASM_ARC_DMA_H
#define MAX_DMA_ADDRESS 0xC0000000
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy 0
-#endif
#endif
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
index 183d23bc1e00..b23be557403e 100644
--- a/arch/arc/include/asm/pgtable-bits-arcv2.h
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -72,24 +72,6 @@
* This is to enable COW mechanism
*/
/* xwr */
-#define __P000 PAGE_U_NONE
-#define __P001 PAGE_U_R
-#define __P010 PAGE_U_R /* Pvt-W => !W */
-#define __P011 PAGE_U_R /* Pvt-W => !W */
-#define __P100 PAGE_U_X_R /* X => R */
-#define __P101 PAGE_U_X_R
-#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
-#define __P111 PAGE_U_X_R /* Pvt-W => !W */
-
-#define __S000 PAGE_U_NONE
-#define __S001 PAGE_U_R
-#define __S010 PAGE_U_W_R /* W => R */
-#define __S011 PAGE_U_W_R
-#define __S100 PAGE_U_X_R /* X => R */
-#define __S101 PAGE_U_X_R
-#define __S110 PAGE_U_X_W_R /* X => R */
-#define __S111 PAGE_U_X_W_R
-
#ifndef __ASSEMBLY__
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index d947473f1e6d..ab9e75e90f72 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -232,14 +232,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
return 0;
}
-/*
- * not supported here
- */
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
/*****************************************************************************/
/* Inter Processor Interrupt Handling */
/*****************************************************************************/
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index dad27e4d69ff..5ca59a482632 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -146,6 +146,10 @@ retry:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
/*
* Fault retry nuances, mmap_lock already relinquished by core mm
*/
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 722d26b94307..fce5fa2b4f52 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -74,3 +74,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_U_NONE,
+ [VM_READ] = PAGE_U_R,
+ [VM_WRITE] = PAGE_U_R,
+ [VM_WRITE | VM_READ] = PAGE_U_R,
+ [VM_EXEC] = PAGE_U_X_R,
+ [VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED] = PAGE_U_NONE,
+ [VM_SHARED | VM_READ] = PAGE_U_R,
+ [VM_SHARED | VM_WRITE] = PAGE_U_W_R,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R,
+ [VM_SHARED | VM_EXEC] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 53e6a1da9af5..87badeae3181 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -15,13 +15,12 @@ config ARM
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
- select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU
- select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU
- select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/arm/boot/dts/imxrt1170-pinfunc.h b/arch/arm/boot/dts/imxrt1170-pinfunc.h
new file mode 100644
index 000000000000..3b9fff2f08e1
--- /dev/null
+++ b/arch/arm/boot/dts/imxrt1170-pinfunc.h
@@ -0,0 +1,1561 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021
+ * Author(s): Jesse Taube <Mr.Bossman075@gmail.com>
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_IMXRT1170_PINFUNC_H
+#define _DT_BINDINGS_PINCTRL_IMXRT1170_PINFUNC_H
+
+#define IMX_PAD_SION 0x40000000
+
+/*
+ * The pin function ID is a tuple of
+ * <mux_reg conf_reg input_reg mux_mode input_val>
+ */
+
+#define IOMUXC_GPIO_LPSR_00_FLEXCAN3_TX 0x000 0x040 0x0 0x0 0x0
+#define IOMUXC_GPIO_LPSR_00_MIC_CLK 0x000 0x040 0x0 0x1 0x0
+#define IOMUXC_GPIO_LPSR_00_MQS_RIGHT 0x000 0x040 0x0 0x2 0x0
+#define IOMUXC_GPIO_LPSR_00_ARM_CM4_EVENTO 0x000 0x040 0x0 0x3 0x0
+#define IOMUXC_GPIO_LPSR_00_GPIO_MUX6_IO00 0x000 0x040 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_00_LPUART12_TXD 0x000 0x040 0x0B0 0x6 0x0
+#define IOMUXC_GPIO_LPSR_00_SAI4_MCLK 0x000 0x040 0x0C8 0x7 0x0
+#define IOMUXC_GPIO_LPSR_00_GPIO12_IO00 0x000 0x040 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_LPSR_01_FLEXCAN3_RX 0x004 0x044 0x080 0x0 0x0
+#define IOMUXC_GPIO_LPSR_01_MIC_BITSTREAM0 0x004 0x044 0x0B4 0x1 0x0
+#define IOMUXC_GPIO_LPSR_01_MQS_LEFT 0x004 0x044 0x0 0x2 0x0
+#define IOMUXC_GPIO_LPSR_01_ARM_CM4_EVENTI 0x004 0x044 0x0 0x3 0x0
+#define IOMUXC_GPIO_LPSR_01_GPIO_MUX6_IO01 0x004 0x044 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_01_LPUART12_RXD 0x004 0x044 0x0AC 0x6 0x0
+#define IOMUXC_GPIO_LPSR_01_GPIO12_IO01 0x004 0x044 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_LPSR_02_GPIO12_IO02 0x008 0x048 0x0 0xA 0x0
+#define IOMUXC_GPIO_LPSR_02_SRC_BOOT_MODE00 0x008 0x048 0x0 0x0 0x0
+#define IOMUXC_GPIO_LPSR_02_LPSPI5_SCK 0x008 0x048 0x098 0x1 0x0
+#define IOMUXC_GPIO_LPSR_02_SAI4_TX_DATA 0x008 0x048 0x0 0x2 0x0
+#define IOMUXC_GPIO_LPSR_02_MQS_RIGHT 0x008 0x048 0x0 0x3 0x0
+#define IOMUXC_GPIO_LPSR_02_GPIO_MUX6_IO02 0x008 0x048 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_LPSR_03_SRC_BOOT_MODE01 0x00C 0x04C 0x0 0x0 0x0
+#define IOMUXC_GPIO_LPSR_03_LPSPI5_PCS0 0x00C 0x04C 0x094 0x1 0x0
+#define IOMUXC_GPIO_LPSR_03_SAI4_TX_SYNC 0x00C 0x04C 0x0DC 0x2 0x0
+#define IOMUXC_GPIO_LPSR_03_MQS_LEFT 0x00C 0x04C 0x0 0x3 0x0
+#define IOMUXC_GPIO_LPSR_03_GPIO_MUX6_IO03 0x00C 0x04C 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_03_GPIO12_IO03 0x00C 0x04C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_LPSR_04_LPI2C5_SDA 0x010 0x050 0x088 0x0 0x0
+#define IOMUXC_GPIO_LPSR_04_LPSPI5_SOUT 0x010 0x050 0x0A0 0x1 0x0
+#define IOMUXC_GPIO_LPSR_04_SAI4_TX_BCLK 0x010 0x050 0x0D8 0x2 0x0
+#define IOMUXC_GPIO_LPSR_04_LPUART12_RTS_B 0x010 0x050 0x0 0x3 0x0
+#define IOMUXC_GPIO_LPSR_04_GPIO_MUX6_IO04 0x010 0x050 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_04_LPUART11_TXD 0x010 0x050 0x0A8 0x6 0x0
+#define IOMUXC_GPIO_LPSR_04_GPIO12_IO04 0x010 0x050 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_LPSR_05_GPIO12_IO05 0x014 0x054 0x0 0xA 0x0
+#define IOMUXC_GPIO_LPSR_05_LPI2C5_SCL 0x014 0x054 0x084 0x0 0x0
+#define IOMUXC_GPIO_LPSR_05_LPSPI5_SIN 0x014 0x054 0x09C 0x1 0x0
+#define IOMUXC_GPIO_LPSR_05_SAI4_MCLK 0x014 0x054 0x0C8 0x2 0x1
+#define IOMUXC_GPIO_LPSR_05_LPUART12_CTS_B 0x014 0x054 0x0 0x3 0x0
+#define IOMUXC_GPIO_LPSR_05_GPIO_MUX6_IO05 0x014 0x054 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_05_LPUART11_RXD 0x014 0x054 0x0A4 0x6 0x0
+#define IOMUXC_GPIO_LPSR_05_NMI_GLUE_NMI 0x014 0x054 0x0C4 0x7 0x0
+
+#define IOMUXC_GPIO_LPSR_06_LPI2C6_SDA 0x018 0x058 0x090 0x0 0x0
+#define IOMUXC_GPIO_LPSR_06_SAI4_RX_DATA 0x018 0x058 0x0D0 0x2 0x0
+#define IOMUXC_GPIO_LPSR_06_LPUART12_TXD 0x018 0x058 0x0B0 0x3 0x1
+#define IOMUXC_GPIO_LPSR_06_LPSPI6_PCS3 0x018 0x058 0x0 0x4 0x0
+#define IOMUXC_GPIO_LPSR_06_GPIO_MUX6_IO06 0x018 0x058 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_06_FLEXCAN3_TX 0x018 0x058 0x0 0x6 0x0
+#define IOMUXC_GPIO_LPSR_06_PIT2_TRIGGER3 0x018 0x058 0x0 0x7 0x0
+#define IOMUXC_GPIO_LPSR_06_LPSPI5_PCS1 0x018 0x058 0x0 0x8 0x0
+#define IOMUXC_GPIO_LPSR_06_GPIO12_IO06 0x018 0x058 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_LPSR_07_LPI2C6_SCL 0x01C 0x05C 0x08C 0x0 0x0
+#define IOMUXC_GPIO_LPSR_07_SAI4_RX_BCLK 0x01C 0x05C 0x0CC 0x2 0x0
+#define IOMUXC_GPIO_LPSR_07_LPUART12_RXD 0x01C 0x05C 0x0AC 0x3 0x1
+#define IOMUXC_GPIO_LPSR_07_LPSPI6_PCS2 0x01C 0x05C 0x0 0x4 0x0
+#define IOMUXC_GPIO_LPSR_07_GPIO_MUX6_IO07 0x01C 0x05C 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_07_FLEXCAN3_RX 0x01C 0x05C 0x080 0x6 0x1
+#define IOMUXC_GPIO_LPSR_07_PIT2_TRIGGER2 0x01C 0x05C 0x0 0x7 0x0
+#define IOMUXC_GPIO_LPSR_07_LPSPI5_PCS2 0x01C 0x05C 0x0 0x8 0x0
+#define IOMUXC_GPIO_LPSR_07_GPIO12_IO07 0x01C 0x05C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_LPSR_08_GPIO12_IO08 0x020 0x060 0x0 0xA 0x0
+#define IOMUXC_GPIO_LPSR_08_LPUART11_TXD 0x020 0x060 0x0A8 0x0 0x1
+#define IOMUXC_GPIO_LPSR_08_FLEXCAN3_TX 0x020 0x060 0x0 0x1 0x0
+#define IOMUXC_GPIO_LPSR_08_SAI4_RX_SYNC 0x020 0x060 0x0D4 0x2 0x0
+#define IOMUXC_GPIO_LPSR_08_MIC_CLK 0x020 0x060 0x0 0x3 0x0
+#define IOMUXC_GPIO_LPSR_08_LPSPI6_PCS1 0x020 0x060 0x0 0x4 0x0
+#define IOMUXC_GPIO_LPSR_08_GPIO_MUX6_IO08 0x020 0x060 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_08_LPI2C5_SDA 0x020 0x060 0x088 0x6 0x1
+#define IOMUXC_GPIO_LPSR_08_PIT2_TRIGGER1 0x020 0x060 0x0 0x7 0x0
+#define IOMUXC_GPIO_LPSR_08_LPSPI5_PCS3 0x020 0x060 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_LPSR_09_GPIO12_IO09 0x024 0x064 0x0 0xA 0x0
+#define IOMUXC_GPIO_LPSR_09_LPUART11_RXD 0x024 0x064 0x0A4 0x0 0x1
+#define IOMUXC_GPIO_LPSR_09_FLEXCAN3_RX 0x024 0x064 0x080 0x1 0x2
+#define IOMUXC_GPIO_LPSR_09_PIT2_TRIGGER0 0x024 0x064 0x0 0x2 0x0
+#define IOMUXC_GPIO_LPSR_09_MIC_BITSTREAM0 0x024 0x064 0x0B4 0x3 0x1
+#define IOMUXC_GPIO_LPSR_09_LPSPI6_PCS0 0x024 0x064 0x0 0x4 0x0
+#define IOMUXC_GPIO_LPSR_09_GPIO_MUX6_IO09 0x024 0x064 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_09_LPI2C5_SCL 0x024 0x064 0x084 0x6 0x1
+#define IOMUXC_GPIO_LPSR_09_SAI4_TX_DATA 0x024 0x064 0x0 0x7 0x0
+
+#define IOMUXC_GPIO_LPSR_10_GPIO12_IO10 0x028 0x068 0x0 0xA 0x0
+#define IOMUXC_GPIO_LPSR_10_JTAG_MUX_TRSTB 0x028 0x068 0x0 0x0 0x0
+#define IOMUXC_GPIO_LPSR_10_LPUART11_CTS_B 0x028 0x068 0x0 0x1 0x0
+#define IOMUXC_GPIO_LPSR_10_LPI2C6_SDA 0x028 0x068 0x090 0x2 0x1
+#define IOMUXC_GPIO_LPSR_10_MIC_BITSTREAM1 0x028 0x068 0x0B8 0x3 0x0
+#define IOMUXC_GPIO_LPSR_10_LPSPI6_SCK 0x028 0x068 0x0 0x4 0x0
+#define IOMUXC_GPIO_LPSR_10_GPIO_MUX6_IO10 0x028 0x068 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_10_LPI2C5_SCLS 0x028 0x068 0x0 0x6 0x0
+#define IOMUXC_GPIO_LPSR_10_SAI4_TX_SYNC 0x028 0x068 0x0DC 0x7 0x1
+#define IOMUXC_GPIO_LPSR_10_LPUART12_TXD 0x028 0x068 0x0B0 0x8 0x2
+
+#define IOMUXC_GPIO_LPSR_11_JTAG_MUX_TDO 0x02C 0x06C 0x0 0x0 0x0
+#define IOMUXC_GPIO_LPSR_11_LPUART11_RTS_B 0x02C 0x06C 0x0 0x1 0x0
+#define IOMUXC_GPIO_LPSR_11_LPI2C6_SCL 0x02C 0x06C 0x08C 0x2 0x1
+#define IOMUXC_GPIO_LPSR_11_MIC_BITSTREAM2 0x02C 0x06C 0x0BC 0x3 0x0
+#define IOMUXC_GPIO_LPSR_11_LPSPI6_SOUT 0x02C 0x06C 0x0 0x4 0x0
+#define IOMUXC_GPIO_LPSR_11_GPIO_MUX6_IO11 0x02C 0x06C 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_11_LPI2C5_SDAS 0x02C 0x06C 0x0 0x6 0x0
+#define IOMUXC_GPIO_LPSR_11_ARM_TRACE_SWO 0x02C 0x06C 0x0 0x7 0x0
+#define IOMUXC_GPIO_LPSR_11_LPUART12_RXD 0x02C 0x06C 0x0AC 0x8 0x2
+#define IOMUXC_GPIO_LPSR_11_GPIO12_IO11 0x02C 0x06C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_LPSR_12_GPIO12_IO12 0x030 0x070 0x0 0xA 0x0
+#define IOMUXC_GPIO_LPSR_12_JTAG_MUX_TDI 0x030 0x070 0x0 0x0 0x0
+#define IOMUXC_GPIO_LPSR_12_PIT2_TRIGGER0 0x030 0x070 0x0 0x1 0x0
+#define IOMUXC_GPIO_LPSR_12_MIC_BITSTREAM3 0x030 0x070 0x0C0 0x3 0x0
+#define IOMUXC_GPIO_LPSR_12_LPSPI6_SIN 0x030 0x070 0x0 0x4 0x0
+#define IOMUXC_GPIO_LPSR_12_GPIO_MUX6_IO12 0x030 0x070 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_12_LPI2C5_HREQ 0x030 0x070 0x0 0x6 0x0
+#define IOMUXC_GPIO_LPSR_12_SAI4_TX_BCLK 0x030 0x070 0x0D8 0x7 0x1
+#define IOMUXC_GPIO_LPSR_12_LPSPI5_SCK 0x030 0x070 0x098 0x8 0x1
+
+#define IOMUXC_GPIO_LPSR_13_GPIO12_IO13 0x034 0x074 0x0 0xA 0x0
+#define IOMUXC_GPIO_LPSR_13_JTAG_MUX_MOD 0x034 0x074 0x0 0x0 0x0
+#define IOMUXC_GPIO_LPSR_13_MIC_BITSTREAM1 0x034 0x074 0x0B8 0x1 0x1
+#define IOMUXC_GPIO_LPSR_13_PIT2_TRIGGER1 0x034 0x074 0x0 0x2 0x0
+#define IOMUXC_GPIO_LPSR_13_GPIO_MUX6_IO13 0x034 0x074 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_13_SAI4_RX_DATA 0x034 0x074 0x0D0 0x7 0x1
+#define IOMUXC_GPIO_LPSR_13_LPSPI5_PCS0 0x034 0x074 0x094 0x8 0x1
+
+#define IOMUXC_GPIO_LPSR_14_JTAG_MUX_TCK 0x038 0x078 0x0 0x0 0x0
+#define IOMUXC_GPIO_LPSR_14_MIC_BITSTREAM2 0x038 0x078 0x0BC 0x1 0x1
+#define IOMUXC_GPIO_LPSR_14_PIT2_TRIGGER2 0x038 0x078 0x0 0x2 0x0
+#define IOMUXC_GPIO_LPSR_14_GPIO_MUX6_IO14 0x038 0x078 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_14_SAI4_RX_BCLK 0x038 0x078 0x0CC 0x7 0x1
+#define IOMUXC_GPIO_LPSR_14_LPSPI5_SOUT 0x038 0x078 0x0A0 0x8 0x1
+#define IOMUXC_GPIO_LPSR_14_GPIO12_IO14 0x038 0x078 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_LPSR_15_GPIO12_IO15 0x03C 0x07C 0x0 0xA 0x0
+#define IOMUXC_GPIO_LPSR_15_JTAG_MUX_TMS 0x03C 0x07C 0x0 0x0 0x0
+#define IOMUXC_GPIO_LPSR_15_MIC_BITSTREAM3 0x03C 0x07C 0x0C0 0x1 0x1
+#define IOMUXC_GPIO_LPSR_15_PIT2_TRIGGER3 0x03C 0x07C 0x0 0x2 0x0
+#define IOMUXC_GPIO_LPSR_15_GPIO_MUX6_IO15 0x03C 0x07C 0x0 0x5 0x0
+#define IOMUXC_GPIO_LPSR_15_SAI4_RX_SYNC 0x03C 0x07C 0x0D4 0x7 0x1
+#define IOMUXC_GPIO_LPSR_15_LPSPI5_SIN 0x03C 0x07C 0x09C 0x8 0x1
+
+#define IOMUXC_WAKEUP_DIG_GPIO13_IO00 0x40C94000 0x40C94040 0x0 0x5 0x0
+#define IOMUXC_WAKEUP_DIG_NMI_GLUE_NMI 0x40C94000 0x40C94040 0x0C4 0x7 0x1
+
+#define IOMUXC_PMIC_ON_REQ_DIG_SNVS_LP_PMIC_ON_REQ 0x40C94004 0x40C94044 0x0 0x0 0x0
+#define IOMUXC_PMIC_ON_REQ_DIG_GPIO13_IO01 0x40C94004 0x40C94044 0x0 0x5 0x0
+
+#define IOMUXC_PMIC_STBY_REQ_DIG_CCM_PMIC_VSTBY_REQ 0x40C94008 0x40C94048 0x0 0x0 0x0
+#define IOMUXC_PMIC_STBY_REQ_DIG_GPIO13_IO02 0x40C94008 0x40C94048 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SNVS_00_DIG_SNVS_TAMPER0 0x40C9400C 0x40C9404C 0x0 0x0 0x0
+#define IOMUXC_GPIO_SNVS_00_DIG_GPIO13_IO03 0x40C9400C 0x40C9404C 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SNVS_01_DIG_SNVS_TAMPER1 0x40C94010 0x40C94050 0x0 0x0 0x0
+#define IOMUXC_GPIO_SNVS_01_DIG_GPIO13_IO04 0x40C94010 0x40C94050 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SNVS_02_DIG_SNVS_TAMPER2 0x40C94014 0x40C94054 0x0 0x0 0x0
+#define IOMUXC_GPIO_SNVS_02_DIG_GPIO13_IO05 0x40C94014 0x40C94054 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SNVS_03_DIG_SNVS_TAMPER3 0x40C94018 0x40C94058 0x0 0x0 0x0
+#define IOMUXC_GPIO_SNVS_03_DIG_GPIO13_IO06 0x40C94018 0x40C94058 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SNVS_04_DIG_SNVS_TAMPER4 0x40C9401C 0x40C9405C 0x0 0x0 0x0
+#define IOMUXC_GPIO_SNVS_04_DIG_GPIO13_IO07 0x40C9401C 0x40C9405C 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SNVS_05_DIG_SNVS_TAMPER5 0x40C94020 0x40C94060 0x0 0x0 0x0
+#define IOMUXC_GPIO_SNVS_05_DIG_GPIO13_IO08 0x40C94020 0x40C94060 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SNVS_06_DIG_SNVS_TAMPER6 0x40C94024 0x40C94064 0x0 0x0 0x0
+#define IOMUXC_GPIO_SNVS_06_DIG_GPIO13_IO09 0x40C94024 0x40C94064 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SNVS_07_DIG_SNVS_TAMPER7 0x40C94028 0x40C94068 0x0 0x0 0x0
+#define IOMUXC_GPIO_SNVS_07_DIG_GPIO13_IO10 0x40C94028 0x40C94068 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SNVS_08_DIG_SNVS_TAMPER8 0x40C9402C 0x40C9406C 0x0 0x0 0x0
+#define IOMUXC_GPIO_SNVS_08_DIG_GPIO13_IO11 0x40C9402C 0x40C9406C 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SNVS_09_DIG_SNVS_TAMPER9 0x40C94030 0x40C94070 0x0 0x0 0x0
+#define IOMUXC_GPIO_SNVS_09_DIG_GPIO13_IO12 0x40C94030 0x40C94070 0x0 0x5 0x0
+
+#define IOMUXC_TEST_MODE_DIG 0x0 0x40C94034 0x0 0x0 0x0
+
+#define IOMUXC_POR_B_DIG 0x0 0x40C94038 0x0 0x0 0x0
+
+#define IOMUXC_ONOFF_DIG 0x0 0x40C9403C 0x0 0x0 0x0
+
+#define IOMUXC_GPIO_EMC_B1_00_SEMC_DATA00 0x010 0x254 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_00_FLEXPWM4_PWM0_A 0x010 0x254 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_00_GPIO_MUX1_IO00 0x010 0x254 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_00_FLEXIO1_D00 0x010 0x254 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_00_GPIO7_IO00 0x010 0x254 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_01_GPIO7_IO01 0x014 0x258 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_01_SEMC_DATA01 0x014 0x258 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_01_FLEXPWM4_PWM0_B 0x014 0x258 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_01_GPIO_MUX1_IO01 0x014 0x258 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_01_FLEXIO1_D01 0x014 0x258 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_02_SEMC_DATA02 0x018 0x25C 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_02_FLEXPWM4_PWM1_A 0x018 0x25C 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_02_GPIO_MUX1_IO02 0x018 0x25C 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_02_FLEXIO1_D02 0x018 0x25C 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_02_GPIO7_IO02 0x018 0x25C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_03_SEMC_DATA03 0x01C 0x260 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_03_FLEXPWM4_PWM1_B 0x01C 0x260 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_03_GPIO_MUX1_IO03 0x01C 0x260 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_03_FLEXIO1_D03 0x01C 0x260 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_03_GPIO7_IO03 0x01C 0x260 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_04_GPIO7_IO04 0x020 0x264 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_04_SEMC_DATA04 0x020 0x264 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_04_FLEXPWM4_PWM2_A 0x020 0x264 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_04_GPIO_MUX1_IO04 0x020 0x264 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_04_FLEXIO1_D04 0x020 0x264 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_05_SEMC_DATA05 0x024 0x268 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_05_FLEXPWM4_PWM2_B 0x024 0x268 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_05_GPIO_MUX1_IO05 0x024 0x268 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_05_FLEXIO1_D05 0x024 0x268 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_05_GPIO7_IO05 0x024 0x268 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_06_SEMC_DATA06 0x028 0x26C 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_06_FLEXPWM2_PWM0_A 0x028 0x26C 0x518 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_06_GPIO_MUX1_IO06 0x028 0x26C 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_06_FLEXIO1_D06 0x028 0x26C 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_06_GPIO7_IO06 0x028 0x26C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_07_GPIO7_IO07 0x02C 0x270 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_07_SEMC_DATA07 0x02C 0x270 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_07_FLEXPWM2_PWM0_B 0x02C 0x270 0x524 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_07_GPIO_MUX1_IO07 0x02C 0x270 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_07_FLEXIO1_D07 0x02C 0x270 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_08_SEMC_DM00 0x030 0x274 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_08_FLEXPWM2_PWM1_A 0x030 0x274 0x51C 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_08_GPIO_MUX1_IO08 0x030 0x274 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_08_FLEXIO1_D08 0x030 0x274 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_08_GPIO7_IO08 0x030 0x274 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_09_SEMC_ADDR00 0x034 0x278 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_09_FLEXPWM2_PWM1_B 0x034 0x278 0x528 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_09_GPT5_CAPTURE1 0x034 0x278 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_09_GPIO_MUX1_IO09 0x034 0x278 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_09_FLEXIO1_D09 0x034 0x278 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_09_GPIO7_IO09 0x034 0x278 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_10_SEMC_ADDR01 0x038 0x27C 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_10_FLEXPWM2_PWM2_A 0x038 0x27C 0x520 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_10_GPT5_CAPTURE2 0x038 0x27C 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_10_GPIO_MUX1_IO10 0x038 0x27C 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_10_FLEXIO1_D10 0x038 0x27C 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_10_GPIO7_IO10 0x038 0x27C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_11_GPIO7_IO11 0x03C 0x280 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_11_SEMC_ADDR02 0x03C 0x280 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_11_FLEXPWM2_PWM2_B 0x03C 0x280 0x52C 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_11_GPT5_COMPARE1 0x03C 0x280 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_11_GPIO_MUX1_IO11 0x03C 0x280 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_11_FLEXIO1_D11 0x03C 0x280 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_12_SEMC_ADDR03 0x040 0x284 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_12_XBAR1_INOUT04 0x040 0x284 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_12_GPT5_COMPARE2 0x040 0x284 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_12_GPIO_MUX1_IO12 0x040 0x284 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_12_FLEXIO1_D12 0x040 0x284 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_12_GPIO7_IO12 0x040 0x284 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_13_SEMC_ADDR04 0x044 0x288 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_13_XBAR1_INOUT05 0x044 0x288 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_13_GPT5_COMPARE3 0x044 0x288 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_13_GPIO_MUX1_IO13 0x044 0x288 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_13_FLEXIO1_D13 0x044 0x288 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_13_GPIO7_IO13 0x044 0x288 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_14_GPIO7_IO14 0x048 0x28C 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_14_SEMC_ADDR05 0x048 0x28C 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_14_XBAR1_INOUT06 0x048 0x28C 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_14_GPT5_CLK 0x048 0x28C 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_14_GPIO_MUX1_IO14 0x048 0x28C 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_14_FLEXIO1_D14 0x048 0x28C 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_15_SEMC_ADDR06 0x04C 0x290 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_15_XBAR1_INOUT07 0x04C 0x290 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_15_GPIO_MUX1_IO15 0x04C 0x290 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_15_FLEXIO1_D15 0x04C 0x290 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_15_GPIO7_IO15 0x04C 0x290 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_16_SEMC_ADDR07 0x050 0x294 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_16_XBAR1_INOUT08 0x050 0x294 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_16_GPIO_MUX1_IO16 0x050 0x294 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_16_FLEXIO1_D16 0x050 0x294 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_16_GPIO7_IO16 0x050 0x294 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_17_GPIO7_IO17 0x054 0x298 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_17_SEMC_ADDR08 0x054 0x298 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_17_FLEXPWM4_PWM3_A 0x054 0x298 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_17_TMR1_TIMER0 0x054 0x298 0x63C 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_17_GPIO_MUX1_IO17 0x054 0x298 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_17_FLEXIO1_D17 0x054 0x298 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_18_SEMC_ADDR09 0x058 0x29C 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_18_FLEXPWM4_PWM3_B 0x058 0x29C 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_18_TMR2_TIMER0 0x058 0x29C 0x648 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_18_GPIO_MUX1_IO18 0x058 0x29C 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_18_FLEXIO1_D18 0x058 0x29C 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_18_GPIO7_IO18 0x058 0x29C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_19_SEMC_ADDR11 0x05C 0x2A0 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_19_FLEXPWM2_PWM3_A 0x05C 0x2A0 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_19_TMR3_TIMER0 0x05C 0x2A0 0x654 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_19_GPIO_MUX1_IO19 0x05C 0x2A0 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_19_FLEXIO1_D19 0x05C 0x2A0 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_19_GPIO7_IO19 0x05C 0x2A0 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_20_SEMC_ADDR12 0x060 0x2A4 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_20_FLEXPWM2_PWM3_B 0x060 0x2A4 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_20_TMR4_TIMER0 0x060 0x2A4 0x660 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_20_GPIO_MUX1_IO20 0x060 0x2A4 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_20_FLEXIO1_D20 0x060 0x2A4 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_20_GPIO7_IO20 0x060 0x2A4 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_21_GPIO7_IO21 0x064 0x2A8 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_21_SEMC_BA0 0x064 0x2A8 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_21_FLEXPWM3_PWM3_A 0x064 0x2A8 0x53C 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_21_GPIO_MUX1_IO21 0x064 0x2A8 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_21_FLEXIO1_D21 0x064 0x2A8 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_22_GPIO7_IO22 0x068 0x2AC 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_22_SEMC_BA1 0x068 0x2AC 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_22_FLEXPWM3_PWM3_B 0x068 0x2AC 0x54C 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_22_GPIO_MUX1_IO22 0x068 0x2AC 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_22_FLEXIO1_D22 0x068 0x2AC 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_23_SEMC_ADDR10 0x06C 0x2B0 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_23_FLEXPWM1_PWM0_A 0x06C 0x2B0 0x500 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_23_GPIO_MUX1_IO23 0x06C 0x2B0 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_23_FLEXIO1_D23 0x06C 0x2B0 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_23_GPIO7_IO23 0x06C 0x2B0 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_24_GPIO7_IO24 0x070 0x2B4 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_24_SEMC_CAS 0x070 0x2B4 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_24_FLEXPWM1_PWM0_B 0x070 0x2B4 0x50C 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_24_GPIO_MUX1_IO24 0x070 0x2B4 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_24_FLEXIO1_D24 0x070 0x2B4 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_25_GPIO7_IO25 0x074 0x2B8 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_25_SEMC_RAS 0x074 0x2B8 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_25_FLEXPWM1_PWM1_A 0x074 0x2B8 0x504 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_25_GPIO_MUX1_IO25 0x074 0x2B8 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_25_FLEXIO1_D25 0x074 0x2B8 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_26_SEMC_CLK 0x078 0x2BC 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_26_FLEXPWM1_PWM1_B 0x078 0x2BC 0x510 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_26_GPIO_MUX1_IO26 0x078 0x2BC 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_26_FLEXIO1_D26 0x078 0x2BC 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_26_GPIO7_IO26 0x078 0x2BC 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_27_GPIO7_IO27 0x07C 0x2C0 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_27_SEMC_CKE 0x07C 0x2C0 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_27_FLEXPWM1_PWM2_A 0x07C 0x2C0 0x508 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_27_GPIO_MUX1_IO27 0x07C 0x2C0 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_27_FLEXIO1_D27 0x07C 0x2C0 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_28_GPIO7_IO28 0x080 0x2C4 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_28_SEMC_WE 0x080 0x2C4 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_28_FLEXPWM1_PWM2_B 0x080 0x2C4 0x514 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_28_GPIO_MUX1_IO28 0x080 0x2C4 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_28_FLEXIO1_D28 0x080 0x2C4 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_29_SEMC_CS0 0x084 0x2C8 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_29_FLEXPWM3_PWM0_A 0x084 0x2C8 0x530 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_29_GPIO_MUX1_IO29 0x084 0x2C8 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_29_FLEXIO1_D29 0x084 0x2C8 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_29_GPIO7_IO29 0x084 0x2C8 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_30_SEMC_DATA08 0x088 0x2CC 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_30_FLEXPWM3_PWM0_B 0x088 0x2CC 0x540 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_30_GPIO_MUX1_IO30 0x088 0x2CC 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_30_FLEXIO1_D30 0x088 0x2CC 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B1_30_GPIO7_IO30 0x088 0x2CC 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_31_GPIO7_IO31 0x08C 0x2D0 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_31_SEMC_DATA09 0x08C 0x2D0 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_31_FLEXPWM3_PWM1_A 0x08C 0x2D0 0x534 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_31_GPIO_MUX1_IO31 0x08C 0x2D0 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_31_FLEXIO1_D31 0x08C 0x2D0 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_EMC_B1_32_GPIO8_IO00 0x090 0x2D4 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_32_SEMC_DATA10 0x090 0x2D4 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_32_FLEXPWM3_PWM1_B 0x090 0x2D4 0x544 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_32_GPIO_MUX2_IO00 0x090 0x2D4 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_EMC_B1_33_SEMC_DATA11 0x094 0x2D8 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_33_FLEXPWM3_PWM2_A 0x094 0x2D8 0x538 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_33_GPIO_MUX2_IO01 0x094 0x2D8 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_33_GPIO8_IO01 0x094 0x2D8 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_34_GPIO8_IO02 0x098 0x2DC 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_34_SEMC_DATA12 0x098 0x2DC 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_34_FLEXPWM3_PWM2_B 0x098 0x2DC 0x548 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_34_GPIO_MUX2_IO02 0x098 0x2DC 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_EMC_B1_35_GPIO8_IO03 0x09C 0x2E0 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_35_SEMC_DATA13 0x09C 0x2E0 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_35_XBAR1_INOUT09 0x09C 0x2E0 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_35_GPIO_MUX2_IO03 0x09C 0x2E0 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_EMC_B1_36_SEMC_DATA14 0x0A0 0x2E4 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_36_XBAR1_INOUT10 0x0A0 0x2E4 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_36_GPIO_MUX2_IO04 0x0A0 0x2E4 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_36_GPIO8_IO04 0x0A0 0x2E4 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_37_GPIO8_IO05 0x0A4 0x2E8 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_37_SEMC_DATA15 0x0A4 0x2E8 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_37_XBAR1_INOUT11 0x0A4 0x2E8 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_37_GPIO_MUX2_IO05 0x0A4 0x2E8 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_EMC_B1_38_GPIO8_IO06 0x0A8 0x2EC 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_38_SEMC_DM01 0x0A8 0x2EC 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_38_FLEXPWM1_PWM3_A 0x0A8 0x2EC 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_38_TMR1_TIMER1 0x0A8 0x2EC 0x640 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_38_GPIO_MUX2_IO06 0x0A8 0x2EC 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_EMC_B1_39_SEMC_DQS 0x0AC 0x2F0 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_39_FLEXPWM1_PWM3_B 0x0AC 0x2F0 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_39_TMR2_TIMER1 0x0AC 0x2F0 0x64C 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_39_GPIO_MUX2_IO07 0x0AC 0x2F0 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_39_GPIO8_IO07 0x0AC 0x2F0 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_40_SEMC_RDY 0x0B0 0x2F4 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_40_XBAR1_INOUT12 0x0B0 0x2F4 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_40_MQS_RIGHT 0x0B0 0x2F4 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_40_LPUART6_TXD 0x0B0 0x2F4 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B1_40_GPIO_MUX2_IO08 0x0B0 0x2F4 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_40_ENET_1G_MDC 0x0B0 0x2F4 0x0 0x7 0x0
+#define IOMUXC_GPIO_EMC_B1_40_CCM_CLKO1 0x0B0 0x2F4 0x0 0x9 0x0
+#define IOMUXC_GPIO_EMC_B1_40_GPIO8_IO08 0x0B0 0x2F4 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B1_41_GPIO8_IO09 0x0B4 0x2F8 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B1_41_SEMC_CSX00 0x0B4 0x2F8 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B1_41_XBAR1_INOUT13 0x0B4 0x2F8 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B1_41_MQS_LEFT 0x0B4 0x2F8 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B1_41_LPUART6_RXD 0x0B4 0x2F8 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B1_41_FLEXSPI2_B_DATA07 0x0B4 0x2F8 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B1_41_GPIO_MUX2_IO09 0x0B4 0x2F8 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B1_41_ENET_1G_MDIO 0x0B4 0x2F8 0x4C8 0x7 0x0
+#define IOMUXC_GPIO_EMC_B1_41_CCM_CLKO2 0x0B4 0x2F8 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_EMC_B2_00_SEMC_DATA16 0x0B8 0x2FC 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_00_CCM_ENET_REF_CLK_25M 0x0B8 0x2FC 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_00_TMR3_TIMER1 0x0B8 0x2FC 0x658 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_00_LPUART6_CTS_B 0x0B8 0x2FC 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_00_FLEXSPI2_B_DATA06 0x0B8 0x2FC 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_00_GPIO_MUX2_IO10 0x0B8 0x2FC 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_00_XBAR1_INOUT20 0x0B8 0x2FC 0x6D8 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_00_ENET_QOS_1588_EVENT1_OUT 0x0B8 0x2FC 0x0 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_00_LPSPI1_SCK 0x0B8 0x2FC 0x5D0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_00_LPI2C2_SCL 0x0B8 0x2FC 0x5B4 0x9 0x0
+#define IOMUXC_GPIO_EMC_B2_00_GPIO8_IO10 0x0B8 0x2FC 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_00_FLEXPWM3_PWM0_A 0x0B8 0x2FC 0x530 0xB 0x1
+
+#define IOMUXC_GPIO_EMC_B2_01_SEMC_DATA17 0x0BC 0x300 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_01_USDHC2_CD_B 0x0BC 0x300 0x6D0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_01_TMR4_TIMER1 0x0BC 0x300 0x664 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_01_LPUART6_RTS_B 0x0BC 0x300 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_01_FLEXSPI2_B_DATA05 0x0BC 0x300 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_01_GPIO_MUX2_IO11 0x0BC 0x300 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_01_XBAR1_INOUT21 0x0BC 0x300 0x6DC 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_01_ENET_QOS_1588_EVENT1_IN 0x0BC 0x300 0x0 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_01_LPSPI1_PCS0 0x0BC 0x300 0x5CC 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_01_LPI2C2_SDA 0x0BC 0x300 0x5B8 0x9 0x0
+#define IOMUXC_GPIO_EMC_B2_01_GPIO8_IO11 0x0BC 0x300 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_01_FLEXPWM3_PWM0_B 0x0BC 0x300 0x540 0xB 0x1
+
+#define IOMUXC_GPIO_EMC_B2_02_SEMC_DATA18 0x0C0 0x304 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_02_USDHC2_WP 0x0C0 0x304 0x6D4 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_02_VIDEO_MUX_CSI_DATA23 0x0C0 0x304 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_02_FLEXSPI2_B_DATA04 0x0C0 0x304 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_02_GPIO_MUX2_IO12 0x0C0 0x304 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_02_XBAR1_INOUT22 0x0C0 0x304 0x6E0 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_02_ENET_QOS_1588_EVENT1_AUX_IN 0x0C0 0x304 0x0 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_02_LPSPI1_SOUT 0x0C0 0x304 0x5D8 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_02_GPIO8_IO12 0x0C0 0x304 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_02_FLEXPWM3_PWM1_A 0x0C0 0x304 0x534 0xB 0x1
+
+#define IOMUXC_GPIO_EMC_B2_03_SEMC_DATA19 0x0C4 0x308 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_03_USDHC2_VSELECT 0x0C4 0x308 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_03_VIDEO_MUX_CSI_DATA22 0x0C4 0x308 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_03_FLEXSPI2_B_DATA03 0x0C4 0x308 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_03_GPIO_MUX2_IO13 0x0C4 0x308 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_03_XBAR1_INOUT23 0x0C4 0x308 0x6E4 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_03_ENET_1G_TX_DATA03 0x0C4 0x308 0x0 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_03_LPSPI1_SIN 0x0C4 0x308 0x5D4 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_03_GPIO8_IO13 0x0C4 0x308 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_03_FLEXPWM3_PWM1_B 0x0C4 0x308 0x544 0xB 0x1
+
+#define IOMUXC_GPIO_EMC_B2_04_SEMC_DATA20 0x0C8 0x30C 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_04_USDHC2_RESET_B 0x0C8 0x30C 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_04_SAI2_MCLK 0x0C8 0x30C 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_04_VIDEO_MUX_CSI_DATA21 0x0C8 0x30C 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_04_FLEXSPI2_B_DATA02 0x0C8 0x30C 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_04_GPIO_MUX2_IO14 0x0C8 0x30C 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_04_XBAR1_INOUT24 0x0C8 0x30C 0x6E8 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_04_ENET_1G_TX_DATA02 0x0C8 0x30C 0x0 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_04_LPSPI3_SCK 0x0C8 0x30C 0x600 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_04_GPIO8_IO14 0x0C8 0x30C 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_04_FLEXPWM3_PWM2_A 0x0C8 0x30C 0x538 0xB 0x1
+
+#define IOMUXC_GPIO_EMC_B2_05_SEMC_DATA21 0x0CC 0x310 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_05_GPT3_CLK 0x0CC 0x310 0x598 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_05_SAI2_RX_SYNC 0x0CC 0x310 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_05_VIDEO_MUX_CSI_DATA20 0x0CC 0x310 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_05_FLEXSPI2_B_DATA01 0x0CC 0x310 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_05_GPIO_MUX2_IO15 0x0CC 0x310 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_05_XBAR1_INOUT25 0x0CC 0x310 0x6EC 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_05_ENET_1G_RX_CLK 0x0CC 0x310 0x4CC 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_05_LPSPI3_PCS0 0x0CC 0x310 0x5F0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_05_PIT1_TRIGGER0 0x0CC 0x310 0x0 0x9 0x0
+#define IOMUXC_GPIO_EMC_B2_05_GPIO8_IO15 0x0CC 0x310 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_05_FLEXPWM3_PWM2_B 0x0CC 0x310 0x548 0xB 0x1
+
+#define IOMUXC_GPIO_EMC_B2_06_SEMC_DATA22 0x0D0 0x314 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_06_GPT3_CAPTURE1 0x0D0 0x314 0x590 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_06_GPIO8_IO16 0x0D0 0x314 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_06_SAI2_RX_BCLK 0x0D0 0x314 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_06_FLEXPWM3_PWM3_A 0x0D0 0x314 0x53C 0xB 0x1
+#define IOMUXC_GPIO_EMC_B2_06_VIDEO_MUX_CSI_DATA19 0x0D0 0x314 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_06_FLEXSPI2_B_DATA00 0x0D0 0x314 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_06_GPIO_MUX2_IO16 0x0D0 0x314 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_06_XBAR1_INOUT26 0x0D0 0x314 0x6F0 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_06_ENET_1G_TX_ER 0x0D0 0x314 0x0 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_06_LPSPI3_SOUT 0x0D0 0x314 0x608 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_06_PIT1_TRIGGER1 0x0D0 0x314 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_EMC_B2_07_SEMC_DATA23 0x0D4 0x318 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_07_GPT3_CAPTURE2 0x0D4 0x318 0x594 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_07_SAI2_RX_DATA 0x0D4 0x318 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_07_VIDEO_MUX_CSI_DATA18 0x0D4 0x318 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_07_FLEXSPI2_B_DQS 0x0D4 0x318 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_07_GPIO_MUX2_IO17 0x0D4 0x318 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_07_XBAR1_INOUT27 0x0D4 0x318 0x6F4 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_07_ENET_1G_RX_DATA03 0x0D4 0x318 0x4DC 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_07_LPSPI3_SIN 0x0D4 0x318 0x604 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_07_PIT1_TRIGGER2 0x0D4 0x318 0x0 0x9 0x0
+#define IOMUXC_GPIO_EMC_B2_07_GPIO8_IO17 0x0D4 0x318 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_07_FLEXPWM3_PWM3_B 0x0D4 0x318 0x54C 0xB 0x1
+
+#define IOMUXC_GPIO_EMC_B2_08_SEMC_DM02 0x0D8 0x31C 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_08_GPT3_COMPARE1 0x0D8 0x31C 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_08_SAI2_TX_DATA 0x0D8 0x31C 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_08_VIDEO_MUX_CSI_DATA17 0x0D8 0x31C 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_08_FLEXSPI2_B_SS0_B 0x0D8 0x31C 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_08_GPIO_MUX2_IO18 0x0D8 0x31C 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_08_XBAR1_INOUT28 0x0D8 0x31C 0x6F8 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_08_ENET_1G_RX_DATA02 0x0D8 0x31C 0x4D8 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_08_LPSPI3_PCS1 0x0D8 0x31C 0x5F4 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_08_PIT1_TRIGGER3 0x0D8 0x31C 0x0 0x9 0x0
+#define IOMUXC_GPIO_EMC_B2_08_GPIO8_IO18 0x0D8 0x31C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B2_09_GPIO8_IO19 0x0DC 0x320 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_09_SEMC_DATA24 0x0DC 0x320 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_09_GPT3_COMPARE2 0x0DC 0x320 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_09_SAI2_TX_BCLK 0x0DC 0x320 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_09_VIDEO_MUX_CSI_DATA16 0x0DC 0x320 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_09_FLEXSPI2_B_SCLK 0x0DC 0x320 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_09_GPIO_MUX2_IO19 0x0DC 0x320 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_09_XBAR1_INOUT29 0x0DC 0x320 0x6FC 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_09_ENET_1G_CRS 0x0DC 0x320 0x0 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_09_LPSPI3_PCS2 0x0DC 0x320 0x5F8 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_09_TMR1_TIMER0 0x0DC 0x320 0x63C 0x9 0x1
+
+#define IOMUXC_GPIO_EMC_B2_10_GPIO8_IO20 0x0E0 0x324 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_10_SEMC_DATA25 0x0E0 0x324 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_10_GPT3_COMPARE3 0x0E0 0x324 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_10_SAI2_TX_SYNC 0x0E0 0x324 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_10_VIDEO_MUX_CSI_FIELD 0x0E0 0x324 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_10_FLEXSPI2_A_SCLK 0x0E0 0x324 0x58C 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_10_GPIO_MUX2_IO20 0x0E0 0x324 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_10_XBAR1_INOUT30 0x0E0 0x324 0x700 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_10_ENET_1G_COL 0x0E0 0x324 0x0 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_10_LPSPI3_PCS3 0x0E0 0x324 0x5FC 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_10_TMR1_TIMER1 0x0E0 0x324 0x640 0x9 0x1
+
+#define IOMUXC_GPIO_EMC_B2_11_SEMC_DATA26 0x0E4 0x328 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_11_SPDIF_IN 0x0E4 0x328 0x6B4 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_11_ENET_1G_TX_DATA00 0x0E4 0x328 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_11_SAI3_RX_SYNC 0x0E4 0x328 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_11_FLEXSPI2_A_SS0_B 0x0E4 0x328 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_11_GPIO_MUX2_IO21 0x0E4 0x328 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_11_XBAR1_INOUT31 0x0E4 0x328 0x704 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_11_EMVSIM1_IO 0x0E4 0x328 0x69C 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_11_TMR1_TIMER2 0x0E4 0x328 0x644 0x9 0x0
+#define IOMUXC_GPIO_EMC_B2_11_GPIO8_IO21 0x0E4 0x328 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B2_12_SEMC_DATA27 0x0E8 0x32C 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_12_SPDIF_OUT 0x0E8 0x32C 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_12_ENET_1G_TX_DATA01 0x0E8 0x32C 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_12_SAI3_RX_BCLK 0x0E8 0x32C 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_12_FLEXSPI2_A_DQS 0x0E8 0x32C 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_12_GPIO_MUX2_IO22 0x0E8 0x32C 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_12_XBAR1_INOUT32 0x0E8 0x32C 0x708 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_12_EMVSIM1_CLK 0x0E8 0x32C 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_12_TMR1_TIMER3 0x0E8 0x32C 0x0 0x9 0x0
+#define IOMUXC_GPIO_EMC_B2_12_GPIO8_IO22 0x0E8 0x32C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B2_13_GPIO8_IO23 0x0EC 0x330 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_13_SEMC_DATA28 0x0EC 0x330 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_13_ENET_1G_TX_EN 0x0EC 0x330 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_13_SAI3_RX_DATA 0x0EC 0x330 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_13_FLEXSPI2_A_DATA00 0x0EC 0x330 0x57C 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_13_GPIO_MUX2_IO23 0x0EC 0x330 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_13_XBAR1_INOUT33 0x0EC 0x330 0x70C 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_13_EMVSIM1_RST 0x0EC 0x330 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_13_TMR2_TIMER0 0x0EC 0x330 0x648 0x9 0x1
+
+#define IOMUXC_GPIO_EMC_B2_14_SEMC_DATA29 0x0F0 0x334 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_14_ENET_1G_TX_CLK_IO 0x0F0 0x334 0x4E8 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_14_SAI3_TX_DATA 0x0F0 0x334 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_14_FLEXSPI2_A_DATA01 0x0F0 0x334 0x580 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_14_GPIO_MUX2_IO24 0x0F0 0x334 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_14_XBAR1_INOUT34 0x0F0 0x334 0x710 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_14_SFA_ipp_do_atx_clk_under_test 0x0F0 0x334 0x0 0x7 0x0
+#define IOMUXC_GPIO_EMC_B2_14_EMVSIM1_SVEN 0x0F0 0x334 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_14_TMR2_TIMER1 0x0F0 0x334 0x64C 0x9 0x1
+#define IOMUXC_GPIO_EMC_B2_14_GPIO8_IO24 0x0F0 0x334 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B2_15_SEMC_DATA30 0x0F4 0x338 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_15_ENET_1G_RX_DATA00 0x0F4 0x338 0x4D0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_15_SAI3_TX_BCLK 0x0F4 0x338 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_15_FLEXSPI2_A_DATA02 0x0F4 0x338 0x584 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_15_GPIO_MUX2_IO25 0x0F4 0x338 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_15_XBAR1_INOUT35 0x0F4 0x338 0x714 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_15_EMVSIM1_PD 0x0F4 0x338 0x6A0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_15_TMR2_TIMER2 0x0F4 0x338 0x650 0x9 0x0
+#define IOMUXC_GPIO_EMC_B2_15_GPIO8_IO25 0x0F4 0x338 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B2_16_GPIO8_IO26 0x0F8 0x33C 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_16_SEMC_DATA31 0x0F8 0x33C 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_16_XBAR1_INOUT14 0x0F8 0x33C 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_16_ENET_1G_RX_DATA01 0x0F8 0x33C 0x4D4 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_16_SAI3_TX_SYNC 0x0F8 0x33C 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_16_FLEXSPI2_A_DATA03 0x0F8 0x33C 0x588 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_16_GPIO_MUX2_IO26 0x0F8 0x33C 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_16_EMVSIM1_POWER_FAIL 0x0F8 0x33C 0x6A4 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_16_TMR2_TIMER3 0x0F8 0x33C 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_EMC_B2_17_SEMC_DM03 0x0FC 0x340 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_17_XBAR1_INOUT15 0x0FC 0x340 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_17_ENET_1G_RX_EN 0x0FC 0x340 0x4E0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_17_SAI3_MCLK 0x0FC 0x340 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_17_FLEXSPI2_A_DATA04 0x0FC 0x340 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_17_GPIO_MUX2_IO27 0x0FC 0x340 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_17_WDOG1_ANY 0x0FC 0x340 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_17_TMR3_TIMER0 0x0FC 0x340 0x654 0x9 0x1
+#define IOMUXC_GPIO_EMC_B2_17_GPIO8_IO27 0x0FC 0x340 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B2_18_SEMC_DQS4 0x100 0x344 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_18_XBAR1_INOUT16 0x100 0x344 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_18_ENET_1G_RX_ER 0x100 0x344 0x4E4 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_18_EWM_OUT_B 0x100 0x344 0x0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_18_FLEXSPI2_A_DATA05 0x100 0x344 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_18_GPIO_MUX2_IO28 0x100 0x344 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_18_FLEXSPI1_A_DQS 0x100 0x344 0x550 0x6 0x0
+#define IOMUXC_GPIO_EMC_B2_18_WDOG1_B 0x100 0x344 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_18_TMR3_TIMER1 0x100 0x344 0x658 0x9 0x1
+#define IOMUXC_GPIO_EMC_B2_18_GPIO8_IO28 0x100 0x344 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_EMC_B2_19_GPIO8_IO29 0x104 0x348 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_19_SEMC_CLKX00 0x104 0x348 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_19_ENET_MDC 0x104 0x348 0x0 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_19_ENET_1G_MDC 0x104 0x348 0x0 0x2 0x0
+#define IOMUXC_GPIO_EMC_B2_19_ENET_1G_REF_CLK 0x104 0x348 0x4C4 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_19_FLEXSPI2_A_DATA06 0x104 0x348 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_19_GPIO_MUX2_IO29 0x104 0x348 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_19_ENET_QOS_MDC 0x104 0x348 0x0 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_19_TMR3_TIMER2 0x104 0x348 0x65C 0x9 0x0
+
+#define IOMUXC_GPIO_EMC_B2_20_GPIO8_IO30 0x108 0x34C 0x0 0xA 0x0
+#define IOMUXC_GPIO_EMC_B2_20_SEMC_CLKX01 0x108 0x34C 0x0 0x0 0x0
+#define IOMUXC_GPIO_EMC_B2_20_ENET_MDIO 0x108 0x34C 0x4AC 0x1 0x0
+#define IOMUXC_GPIO_EMC_B2_20_ENET_1G_MDIO 0x108 0x34C 0x4C8 0x2 0x1
+#define IOMUXC_GPIO_EMC_B2_20_ENET_QOS_REF_CLK 0x108 0x34C 0x4A0 0x3 0x0
+#define IOMUXC_GPIO_EMC_B2_20_FLEXSPI2_A_DATA07 0x108 0x34C 0x0 0x4 0x0
+#define IOMUXC_GPIO_EMC_B2_20_GPIO_MUX2_IO30 0x108 0x34C 0x0 0x5 0x0
+#define IOMUXC_GPIO_EMC_B2_20_ENET_QOS_MDIO 0x108 0x34C 0x4EC 0x8 0x0
+#define IOMUXC_GPIO_EMC_B2_20_TMR3_TIMER3 0x108 0x34C 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_AD_00_GPIO8_IO31 0x10C 0x350 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_00_EMVSIM1_IO 0x10C 0x350 0x69C 0x0 0x1
+#define IOMUXC_GPIO_AD_00_FLEXCAN2_TX 0x10C 0x350 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_00_ENET_1G_1588_EVENT1_IN 0x10C 0x350 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_00_GPT2_CAPTURE1 0x10C 0x350 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_00_FLEXPWM1_PWM0_A 0x10C 0x350 0x500 0x4 0x1
+#define IOMUXC_GPIO_AD_00_GPIO_MUX2_IO31 0x10C 0x350 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_00_LPUART7_TXD 0x10C 0x350 0x630 0x6 0x0
+#define IOMUXC_GPIO_AD_00_FLEXIO2_D00 0x10C 0x350 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_00_FLEXSPI2_B_SS1_B 0x10C 0x350 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_AD_01_GPIO9_IO00 0x110 0x354 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_01_EMVSIM1_CLK 0x110 0x354 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_01_FLEXCAN2_RX 0x110 0x354 0x49C 0x1 0x0
+#define IOMUXC_GPIO_AD_01_ENET_1G_1588_EVENT1_OUT 0x110 0x354 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_01_GPT2_CAPTURE2 0x110 0x354 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_01_FLEXPWM1_PWM0_B 0x110 0x354 0x50C 0x4 0x1
+#define IOMUXC_GPIO_AD_01_GPIO_MUX3_IO00 0x110 0x354 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_01_LPUART7_RXD 0x110 0x354 0x62C 0x6 0x0
+#define IOMUXC_GPIO_AD_01_FLEXIO2_D01 0x110 0x354 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_01_FLEXSPI2_A_SS1_B 0x110 0x354 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_AD_02_GPIO9_IO01 0x114 0x358 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_02_EMVSIM1_RST 0x114 0x358 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_02_LPUART7_CTS_B 0x114 0x358 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_02_ENET_1G_1588_EVENT2_IN 0x114 0x358 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_02_GPT2_COMPARE1 0x114 0x358 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_02_FLEXPWM1_PWM1_A 0x114 0x358 0x504 0x4 0x1
+#define IOMUXC_GPIO_AD_02_GPIO_MUX3_IO01 0x114 0x358 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_02_LPUART8_TXD 0x114 0x358 0x638 0x6 0x0
+#define IOMUXC_GPIO_AD_02_FLEXIO2_D02 0x114 0x358 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_02_VIDEO_MUX_EXT_DCIC1 0x114 0x358 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_AD_03_GPIO9_IO02 0x118 0x35C 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_03_EMVSIM1_SVEN 0x118 0x35C 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_03_LPUART7_RTS_B 0x118 0x35C 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_03_ENET_1G_1588_EVENT2_OUT 0x118 0x35C 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_03_GPT2_COMPARE2 0x118 0x35C 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_03_FLEXPWM1_PWM1_B 0x118 0x35C 0x510 0x4 0x1
+#define IOMUXC_GPIO_AD_03_GPIO_MUX3_IO02 0x118 0x35C 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_03_LPUART8_RXD 0x118 0x35C 0x634 0x6 0x0
+#define IOMUXC_GPIO_AD_03_FLEXIO2_D03 0x118 0x35C 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_03_VIDEO_MUX_EXT_DCIC2 0x118 0x35C 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_AD_04_EMVSIM1_PD 0x11C 0x360 0x6A0 0x0 0x1
+#define IOMUXC_GPIO_AD_04_LPUART8_CTS_B 0x11C 0x360 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_04_ENET_1G_1588_EVENT3_IN 0x11C 0x360 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_04_GPT2_COMPARE3 0x11C 0x360 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_04_FLEXPWM1_PWM2_A 0x11C 0x360 0x508 0x4 0x1
+#define IOMUXC_GPIO_AD_04_GPIO_MUX3_IO03 0x11C 0x360 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_04_WDOG1_B 0x11C 0x360 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_04_FLEXIO2_D04 0x11C 0x360 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_04_TMR4_TIMER0 0x11C 0x360 0x660 0x9 0x1
+#define IOMUXC_GPIO_AD_04_GPIO9_IO03 0x11C 0x360 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_AD_05_EMVSIM1_POWER_FAIL 0x120 0x364 0x6A4 0x0 0x1
+#define IOMUXC_GPIO_AD_05_LPUART8_RTS_B 0x120 0x364 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_05_ENET_1G_1588_EVENT3_OUT 0x120 0x364 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_05_GPT2_CLK 0x120 0x364 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_05_FLEXPWM1_PWM2_B 0x120 0x364 0x514 0x4 0x1
+#define IOMUXC_GPIO_AD_05_GPIO_MUX3_IO04 0x120 0x364 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_05_WDOG2_B 0x120 0x364 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_05_FLEXIO2_D05 0x120 0x364 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_05_TMR4_TIMER1 0x120 0x364 0x664 0x9 0x1
+#define IOMUXC_GPIO_AD_05_GPIO9_IO04 0x120 0x364 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_AD_06_USB_OTG2_OC 0x124 0x368 0x6B8 0x0 0x0
+#define IOMUXC_GPIO_AD_06_FLEXCAN1_TX 0x124 0x368 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_06_EMVSIM2_IO 0x124 0x368 0x6A8 0x2 0x0
+#define IOMUXC_GPIO_AD_06_GPT3_CAPTURE1 0x124 0x368 0x590 0x3 0x1
+#define IOMUXC_GPIO_AD_06_VIDEO_MUX_CSI_DATA15 0x124 0x368 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_06_GPIO_MUX3_IO05 0x124 0x368 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_06_ENET_1588_EVENT1_IN 0x124 0x368 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_06_FLEXIO2_D06 0x124 0x368 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_06_TMR4_TIMER2 0x124 0x368 0x668 0x9 0x0
+#define IOMUXC_GPIO_AD_06_GPIO9_IO05 0x124 0x368 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_06_FLEXPWM1_PWM0_X 0x124 0x368 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_07_USB_OTG2_PWR 0x128 0x36C 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_07_FLEXCAN1_RX 0x128 0x36C 0x498 0x1 0x0
+#define IOMUXC_GPIO_AD_07_EMVSIM2_CLK 0x128 0x36C 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_07_GPT3_CAPTURE2 0x128 0x36C 0x594 0x3 0x1
+#define IOMUXC_GPIO_AD_07_VIDEO_MUX_CSI_DATA14 0x128 0x36C 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_07_GPIO_MUX3_IO06 0x128 0x36C 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_07_ENET_1588_EVENT1_OUT 0x128 0x36C 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_07_FLEXIO2_D07 0x128 0x36C 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_07_TMR4_TIMER3 0x128 0x36C 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_07_GPIO9_IO06 0x128 0x36C 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_07_FLEXPWM1_PWM1_X 0x128 0x36C 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_08_USBPHY2_OTG_ID 0x12C 0x370 0x6C4 0x0 0x0
+#define IOMUXC_GPIO_AD_08_LPI2C1_SCL 0x12C 0x370 0x5AC 0x1 0x0
+#define IOMUXC_GPIO_AD_08_EMVSIM2_RST 0x12C 0x370 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_08_GPT3_COMPARE1 0x12C 0x370 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_08_VIDEO_MUX_CSI_DATA13 0x12C 0x370 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_08_GPIO_MUX3_IO07 0x12C 0x370 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_08_ENET_1588_EVENT2_IN 0x12C 0x370 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_08_FLEXIO2_D08 0x12C 0x370 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_08_GPIO9_IO07 0x12C 0x370 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_08_FLEXPWM1_PWM2_X 0x12C 0x370 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_09_USBPHY1_OTG_ID 0x130 0x374 0x6C0 0x0 0x0
+#define IOMUXC_GPIO_AD_09_LPI2C1_SDA 0x130 0x374 0x5B0 0x1 0x0
+#define IOMUXC_GPIO_AD_09_EMVSIM2_SVEN 0x130 0x374 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_09_GPT3_COMPARE2 0x130 0x374 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_09_VIDEO_MUX_CSI_DATA12 0x130 0x374 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_09_GPIO_MUX3_IO08 0x130 0x374 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_09_ENET_1588_EVENT2_OUT 0x130 0x374 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_09_FLEXIO2_D09 0x130 0x374 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_09_GPIO9_IO08 0x130 0x374 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_09_FLEXPWM1_PWM3_X 0x130 0x374 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_10_USB_OTG1_PWR 0x134 0x378 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_10_LPI2C1_SCLS 0x134 0x378 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_10_EMVSIM2_PD 0x134 0x378 0x6AC 0x2 0x0
+#define IOMUXC_GPIO_AD_10_GPT3_COMPARE3 0x134 0x378 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_10_VIDEO_MUX_CSI_DATA11 0x134 0x378 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_10_GPIO_MUX3_IO09 0x134 0x378 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_10_ENET_1588_EVENT3_IN 0x134 0x378 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_10_FLEXIO2_D10 0x134 0x378 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_10_GPIO9_IO09 0x134 0x378 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_10_FLEXPWM2_PWM0_X 0x134 0x378 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_11_USB_OTG1_OC 0x138 0x37C 0x6BC 0x0 0x0
+#define IOMUXC_GPIO_AD_11_LPI2C1_SDAS 0x138 0x37C 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_11_EMVSIM2_POWER_FAIL 0x138 0x37C 0x6B0 0x2 0x0
+#define IOMUXC_GPIO_AD_11_GPT3_CLK 0x138 0x37C 0x598 0x3 0x1
+#define IOMUXC_GPIO_AD_11_VIDEO_MUX_CSI_DATA10 0x138 0x37C 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_11_GPIO_MUX3_IO10 0x138 0x37C 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_11_ENET_1588_EVENT3_OUT 0x138 0x37C 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_11_FLEXIO2_D11 0x138 0x37C 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_11_GPIO9_IO10 0x138 0x37C 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_11_FLEXPWM2_PWM1_X 0x138 0x37C 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_12_SPDIF_LOCK 0x13C 0x380 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_12_LPI2C1_HREQ 0x13C 0x380 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_12_GPT1_CAPTURE1 0x13C 0x380 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_12_FLEXSPI1_B_DATA03 0x13C 0x380 0x570 0x3 0x0
+#define IOMUXC_GPIO_AD_12_VIDEO_MUX_CSI_PIXCLK 0x13C 0x380 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_12_GPIO_MUX3_IO11 0x13C 0x380 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_12_ENET_TX_DATA03 0x13C 0x380 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_12_FLEXIO2_D12 0x13C 0x380 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_12_EWM_OUT_B 0x13C 0x380 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_12_GPIO9_IO11 0x13C 0x380 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_12_FLEXPWM2_PWM2_X 0x13C 0x380 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_13_SPDIF_SR_CLK 0x140 0x384 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_13_PIT1_TRIGGER0 0x140 0x384 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_13_GPT1_CAPTURE2 0x140 0x384 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_13_FLEXSPI1_B_DATA02 0x140 0x384 0x56C 0x3 0x0
+#define IOMUXC_GPIO_AD_13_VIDEO_MUX_CSI_MCLK 0x140 0x384 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_13_GPIO_MUX3_IO12 0x140 0x384 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_13_ENET_TX_DATA02 0x140 0x384 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_13_FLEXIO2_D13 0x140 0x384 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_13_REF_CLK_32K 0x140 0x384 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_13_GPIO9_IO12 0x140 0x384 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_13_FLEXPWM2_PWM3_X 0x140 0x384 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_14_SPDIF_EXT_CLK 0x144 0x388 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_14_REF_CLK_24M 0x144 0x388 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_14_GPT1_COMPARE1 0x144 0x388 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_14_FLEXSPI1_B_DATA01 0x144 0x388 0x568 0x3 0x0
+#define IOMUXC_GPIO_AD_14_VIDEO_MUX_CSI_VSYNC 0x144 0x388 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_14_GPIO_MUX3_IO13 0x144 0x388 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_14_ENET_RX_CLK 0x144 0x388 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_14_FLEXIO2_D14 0x144 0x388 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_14_CCM_ENET_REF_CLK_25M 0x144 0x388 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_14_GPIO9_IO13 0x144 0x388 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_14_FLEXPWM3_PWM0_X 0x144 0x388 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_15_GPIO9_IO14 0x148 0x38C 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_15_FLEXPWM3_PWM1_X 0x148 0x38C 0x0 0xB 0x0
+#define IOMUXC_GPIO_AD_15_SPDIF_IN 0x148 0x38C 0x6B4 0x0 0x1
+#define IOMUXC_GPIO_AD_15_LPUART10_TXD 0x148 0x38C 0x628 0x1 0x0
+#define IOMUXC_GPIO_AD_15_GPT1_COMPARE2 0x148 0x38C 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_15_FLEXSPI1_B_DATA00 0x148 0x38C 0x564 0x3 0x0
+#define IOMUXC_GPIO_AD_15_VIDEO_MUX_CSI_HSYNC 0x148 0x38C 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_15_GPIO_MUX3_IO14 0x148 0x38C 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_15_ENET_TX_ER 0x148 0x38C 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_15_FLEXIO2_D15 0x148 0x38C 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_AD_16_SPDIF_OUT 0x14C 0x390 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_16_LPUART10_RXD 0x14C 0x390 0x624 0x1 0x0
+#define IOMUXC_GPIO_AD_16_GPT1_COMPARE3 0x14C 0x390 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_16_FLEXSPI1_B_SCLK 0x14C 0x390 0x578 0x3 0x0
+#define IOMUXC_GPIO_AD_16_VIDEO_MUX_CSI_DATA09 0x14C 0x390 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_16_GPIO_MUX3_IO15 0x14C 0x390 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_16_ENET_RX_DATA03 0x14C 0x390 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_16_FLEXIO2_D16 0x14C 0x390 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_16_ENET_1G_MDC 0x14C 0x390 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_16_GPIO9_IO15 0x14C 0x390 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_16_FLEXPWM3_PWM2_X 0x14C 0x390 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_17_SAI1_MCLK 0x150 0x394 0x66C 0x0 0x0
+#define IOMUXC_GPIO_AD_17_ACMP1_OUT 0x150 0x394 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_17_GPT1_CLK 0x150 0x394 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_17_FLEXSPI1_A_DQS 0x150 0x394 0x550 0x3 0x1
+#define IOMUXC_GPIO_AD_17_VIDEO_MUX_CSI_DATA08 0x150 0x394 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_17_GPIO_MUX3_IO16 0x150 0x394 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_17_ENET_RX_DATA02 0x150 0x394 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_17_FLEXIO2_D17 0x150 0x394 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_17_ENET_1G_MDIO 0x150 0x394 0x4C8 0x9 0x2
+#define IOMUXC_GPIO_AD_17_GPIO9_IO16 0x150 0x394 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_17_FLEXPWM3_PWM3_X 0x150 0x394 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_18_GPIO9_IO17 0x154 0x398 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_18_FLEXPWM4_PWM0_X 0x154 0x398 0x0 0xB 0x0
+#define IOMUXC_GPIO_AD_18_SAI1_RX_SYNC 0x154 0x398 0x678 0x0 0x0
+#define IOMUXC_GPIO_AD_18_ACMP2_OUT 0x154 0x398 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_18_LPSPI1_PCS1 0x154 0x398 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_18_FLEXSPI1_A_SS0_B 0x154 0x398 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_18_VIDEO_MUX_CSI_DATA07 0x154 0x398 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_18_GPIO_MUX3_IO17 0x154 0x398 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_18_ENET_CRS 0x154 0x398 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_18_FLEXIO2_D18 0x154 0x398 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_18_LPI2C2_SCL 0x154 0x398 0x5B4 0x9 0x1
+
+#define IOMUXC_GPIO_AD_19_SAI1_RX_BCLK 0x158 0x39C 0x670 0x0 0x0
+#define IOMUXC_GPIO_AD_19_ACMP3_OUT 0x158 0x39C 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_19_LPSPI1_PCS2 0x158 0x39C 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_19_FLEXSPI1_A_SCLK 0x158 0x39C 0x574 0x3 0x0
+#define IOMUXC_GPIO_AD_19_VIDEO_MUX_CSI_DATA06 0x158 0x39C 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_19_GPIO_MUX3_IO18 0x158 0x39C 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_19_ENET_COL 0x158 0x39C 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_19_FLEXIO2_D19 0x158 0x39C 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_19_LPI2C2_SDA 0x158 0x39C 0x5B8 0x9 0x1
+#define IOMUXC_GPIO_AD_19_GPIO9_IO18 0x158 0x39C 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_19_FLEXPWM4_PWM1_X 0x158 0x39C 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_20_SAI1_RX_DATA00 0x15C 0x3A0 0x674 0x0 0x0
+#define IOMUXC_GPIO_AD_20_ACMP4_OUT 0x15C 0x3A0 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_20_LPSPI1_PCS3 0x15C 0x3A0 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_20_FLEXSPI1_A_DATA00 0x15C 0x3A0 0x554 0x3 0x0
+#define IOMUXC_GPIO_AD_20_VIDEO_MUX_CSI_DATA05 0x15C 0x3A0 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_20_GPIO_MUX3_IO19 0x15C 0x3A0 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_20_KPP_ROW07 0x15C 0x3A0 0x5A8 0x6 0x0
+#define IOMUXC_GPIO_AD_20_FLEXIO2_D20 0x15C 0x3A0 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_20_ENET_QOS_1588_EVENT2_OUT 0x15C 0x3A0 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_20_GPIO9_IO19 0x15C 0x3A0 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_20_FLEXPWM4_PWM2_X 0x15C 0x3A0 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_21_SAI1_TX_DATA00 0x160 0x3A4 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_21_LPSPI2_PCS1 0x160 0x3A4 0x5E0 0x2 0x0
+#define IOMUXC_GPIO_AD_21_FLEXSPI1_A_DATA01 0x160 0x3A4 0x558 0x3 0x0
+#define IOMUXC_GPIO_AD_21_VIDEO_MUX_CSI_DATA04 0x160 0x3A4 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_21_GPIO_MUX3_IO20 0x160 0x3A4 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_21_KPP_COL07 0x160 0x3A4 0x5A0 0x6 0x0
+#define IOMUXC_GPIO_AD_21_FLEXIO2_D21 0x160 0x3A4 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_21_ENET_QOS_1588_EVENT2_IN 0x160 0x3A4 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_21_GPIO9_IO20 0x160 0x3A4 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_21_FLEXPWM4_PWM3_X 0x160 0x3A4 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_22_GPIO9_IO21 0x164 0x3A8 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_22_SAI1_TX_BCLK 0x164 0x3A8 0x67C 0x0 0x0
+#define IOMUXC_GPIO_AD_22_LPSPI2_PCS2 0x164 0x3A8 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_22_FLEXSPI1_A_DATA02 0x164 0x3A8 0x55C 0x3 0x0
+#define IOMUXC_GPIO_AD_22_VIDEO_MUX_CSI_DATA03 0x164 0x3A8 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_22_GPIO_MUX3_IO21 0x164 0x3A8 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_22_KPP_ROW06 0x164 0x3A8 0x5A4 0x6 0x0
+#define IOMUXC_GPIO_AD_22_FLEXIO2_D22 0x164 0x3A8 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_22_ENET_QOS_1588_EVENT3_OUT 0x164 0x3A8 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_AD_23_SAI1_TX_SYNC 0x168 0x3AC 0x680 0x0 0x0
+#define IOMUXC_GPIO_AD_23_LPSPI2_PCS3 0x168 0x3AC 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_23_FLEXSPI1_A_DATA03 0x168 0x3AC 0x560 0x3 0x0
+#define IOMUXC_GPIO_AD_23_VIDEO_MUX_CSI_DATA02 0x168 0x3AC 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_23_GPIO_MUX3_IO22 0x168 0x3AC 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_23_KPP_COL06 0x168 0x3AC 0x59C 0x6 0x0
+#define IOMUXC_GPIO_AD_23_FLEXIO2_D23 0x168 0x3AC 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_23_ENET_QOS_1588_EVENT3_IN 0x168 0x3AC 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_23_GPIO9_IO22 0x168 0x3AC 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_AD_24_LPUART1_TXD 0x16C 0x3B0 0x620 0x0 0x0
+#define IOMUXC_GPIO_AD_24_LPSPI2_SCK 0x16C 0x3B0 0x5E4 0x1 0x0
+#define IOMUXC_GPIO_AD_24_VIDEO_MUX_CSI_DATA00 0x16C 0x3B0 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_24_ENET_RX_EN 0x16C 0x3B0 0x4B8 0x3 0x0
+#define IOMUXC_GPIO_AD_24_FLEXPWM2_PWM0_A 0x16C 0x3B0 0x518 0x4 0x1
+#define IOMUXC_GPIO_AD_24_GPIO_MUX3_IO23 0x16C 0x3B0 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_24_KPP_ROW05 0x16C 0x3B0 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_24_FLEXIO2_D24 0x16C 0x3B0 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_24_LPI2C4_SCL 0x16C 0x3B0 0x5C4 0x9 0x0
+#define IOMUXC_GPIO_AD_24_GPIO9_IO23 0x16C 0x3B0 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_AD_25_GPIO9_IO24 0x170 0x3B4 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_25_LPUART1_RXD 0x170 0x3B4 0x61C 0x0 0x0
+#define IOMUXC_GPIO_AD_25_LPSPI2_PCS0 0x170 0x3B4 0x5DC 0x1 0x0
+#define IOMUXC_GPIO_AD_25_VIDEO_MUX_CSI_DATA01 0x170 0x3B4 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_25_ENET_RX_ER 0x170 0x3B4 0x4BC 0x3 0x0
+#define IOMUXC_GPIO_AD_25_FLEXPWM2_PWM0_B 0x170 0x3B4 0x524 0x4 0x1
+#define IOMUXC_GPIO_AD_25_GPIO_MUX3_IO24 0x170 0x3B4 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_25_KPP_COL05 0x170 0x3B4 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_25_FLEXIO2_D25 0x170 0x3B4 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_25_LPI2C4_SDA 0x170 0x3B4 0x5C8 0x9 0x0
+
+#define IOMUXC_GPIO_AD_26_LPUART1_CTS_B 0x174 0x3B8 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_26_LPSPI2_SOUT 0x174 0x3B8 0x5EC 0x1 0x0
+#define IOMUXC_GPIO_AD_26_SEMC_CSX01 0x174 0x3B8 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_26_ENET_RX_DATA00 0x174 0x3B8 0x4B0 0x3 0x0
+#define IOMUXC_GPIO_AD_26_FLEXPWM2_PWM1_A 0x174 0x3B8 0x51C 0x4 0x1
+#define IOMUXC_GPIO_AD_26_GPIO_MUX3_IO25 0x174 0x3B8 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_26_KPP_ROW04 0x174 0x3B8 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_26_FLEXIO2_D26 0x174 0x3B8 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_26_ENET_QOS_MDC 0x174 0x3B8 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_26_GPIO9_IO25 0x174 0x3B8 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_26_USDHC2_CD_B 0x174 0x3B8 0x6D0 0xB 0x1
+
+#define IOMUXC_GPIO_AD_27_LPUART1_RTS_B 0x178 0x3BC 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_27_LPSPI2_SIN 0x178 0x3BC 0x5E8 0x1 0x0
+#define IOMUXC_GPIO_AD_27_SEMC_CSX02 0x178 0x3BC 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_27_ENET_RX_DATA01 0x178 0x3BC 0x4B4 0x3 0x0
+#define IOMUXC_GPIO_AD_27_FLEXPWM2_PWM1_B 0x178 0x3BC 0x528 0x4 0x1
+#define IOMUXC_GPIO_AD_27_GPIO_MUX3_IO26 0x178 0x3BC 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_27_KPP_COL04 0x178 0x3BC 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_27_FLEXIO2_D27 0x178 0x3BC 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_27_ENET_QOS_MDIO 0x178 0x3BC 0x4EC 0x9 0x1
+#define IOMUXC_GPIO_AD_27_GPIO9_IO26 0x178 0x3BC 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_27_USDHC2_WP 0x178 0x3BC 0x6D4 0xB 0x1
+
+#define IOMUXC_GPIO_AD_28_GPIO9_IO27 0x17C 0x3C0 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_28_USDHC2_VSELECT 0x17C 0x3C0 0x0 0xB 0x0
+#define IOMUXC_GPIO_AD_28_LPSPI1_SCK 0x17C 0x3C0 0x5D0 0x0 0x1
+#define IOMUXC_GPIO_AD_28_LPUART5_TXD 0x17C 0x3C0 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_28_SEMC_CSX03 0x17C 0x3C0 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_28_ENET_TX_EN 0x17C 0x3C0 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_28_FLEXPWM2_PWM2_A 0x17C 0x3C0 0x520 0x4 0x1
+#define IOMUXC_GPIO_AD_28_GPIO_MUX3_IO27 0x17C 0x3C0 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_28_KPP_ROW03 0x17C 0x3C0 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_28_FLEXIO2_D28 0x17C 0x3C0 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_28_VIDEO_MUX_EXT_DCIC1 0x17C 0x3C0 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_AD_29_LPSPI1_PCS0 0x180 0x3C4 0x5CC 0x0 0x1
+#define IOMUXC_GPIO_AD_29_LPUART5_RXD 0x180 0x3C4 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_29_ENET_REF_CLK 0x180 0x3C4 0x4A8 0x2 0x0
+#define IOMUXC_GPIO_AD_29_ENET_TX_CLK 0x180 0x3C4 0x4C0 0x3 0x0
+#define IOMUXC_GPIO_AD_29_FLEXPWM2_PWM2_B 0x180 0x3C4 0x52C 0x4 0x1
+#define IOMUXC_GPIO_AD_29_GPIO_MUX3_IO28 0x180 0x3C4 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_29_KPP_COL03 0x180 0x3C4 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_29_FLEXIO2_D29 0x180 0x3C4 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_29_VIDEO_MUX_EXT_DCIC2 0x180 0x3C4 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_29_GPIO9_IO28 0x180 0x3C4 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_29_USDHC2_RESET_B 0x180 0x3C4 0x0 0xB 0x0
+
+#define IOMUXC_GPIO_AD_30_LPSPI1_SOUT 0x184 0x3C8 0x5D8 0x0 0x1
+#define IOMUXC_GPIO_AD_30_USB_OTG2_OC 0x184 0x3C8 0x6B8 0x1 0x1
+#define IOMUXC_GPIO_AD_30_FLEXCAN2_TX 0x184 0x3C8 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_30_ENET_TX_DATA00 0x184 0x3C8 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_30_LPUART3_TXD 0x184 0x3C8 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_30_GPIO_MUX3_IO29 0x184 0x3C8 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_30_KPP_ROW02 0x184 0x3C8 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_30_FLEXIO2_D30 0x184 0x3C8 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_30_WDOG2_RESET_B_DEB 0x184 0x3C8 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_30_GPIO9_IO29 0x184 0x3C8 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_AD_31_LPSPI1_SIN 0x188 0x3CC 0x5D4 0x0 0x1
+#define IOMUXC_GPIO_AD_31_USB_OTG2_PWR 0x188 0x3CC 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_31_FLEXCAN2_RX 0x188 0x3CC 0x49C 0x2 0x1
+#define IOMUXC_GPIO_AD_31_ENET_TX_DATA01 0x188 0x3CC 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_31_LPUART3_RXD 0x188 0x3CC 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_31_GPIO_MUX3_IO30 0x188 0x3CC 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_31_KPP_COL02 0x188 0x3CC 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_31_FLEXIO2_D31 0x188 0x3CC 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_31_WDOG1_RESET_B_DEB 0x188 0x3CC 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_31_GPIO9_IO30 0x188 0x3CC 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_AD_32_GPIO9_IO31 0x18C 0x3D0 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_32_LPI2C1_SCL 0x18C 0x3D0 0x5AC 0x0 0x1
+#define IOMUXC_GPIO_AD_32_USBPHY2_OTG_ID 0x18C 0x3D0 0x6C4 0x1 0x1
+#define IOMUXC_GPIO_AD_32_PGMC_PMIC_RDY 0x18C 0x3D0 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_32_ENET_MDC 0x18C 0x3D0 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_32_USDHC1_CD_B 0x18C 0x3D0 0x6C8 0x4 0x0
+#define IOMUXC_GPIO_AD_32_GPIO_MUX3_IO31 0x18C 0x3D0 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_32_KPP_ROW01 0x18C 0x3D0 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_32_LPUART10_TXD 0x18C 0x3D0 0x628 0x8 0x1
+#define IOMUXC_GPIO_AD_32_ENET_1G_MDC 0x18C 0x3D0 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_AD_33_LPI2C1_SDA 0x190 0x3D4 0x5B0 0x0 0x1
+#define IOMUXC_GPIO_AD_33_USBPHY1_OTG_ID 0x190 0x3D4 0x6C0 0x1 0x1
+#define IOMUXC_GPIO_AD_33_XBAR1_INOUT17 0x190 0x3D4 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_33_ENET_MDIO 0x190 0x3D4 0x4AC 0x3 0x1
+#define IOMUXC_GPIO_AD_33_USDHC1_WP 0x190 0x3D4 0x6CC 0x4 0x0
+#define IOMUXC_GPIO_AD_33_GPIO_MUX4_IO00 0x190 0x3D4 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_33_KPP_COL01 0x190 0x3D4 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_33_LPUART10_RXD 0x190 0x3D4 0x624 0x8 0x1
+#define IOMUXC_GPIO_AD_33_ENET_1G_MDIO 0x190 0x3D4 0x4C8 0x9 0x3
+#define IOMUXC_GPIO_AD_33_GPIO10_IO00 0x190 0x3D4 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_AD_34_ENET_1G_1588_EVENT0_IN 0x194 0x3D8 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_34_USB_OTG1_PWR 0x194 0x3D8 0x0 0x1 0x0
+#define IOMUXC_GPIO_AD_34_XBAR1_INOUT18 0x194 0x3D8 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_34_ENET_1588_EVENT0_IN 0x194 0x3D8 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_34_USDHC1_VSELECT 0x194 0x3D8 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_34_GPIO_MUX4_IO01 0x194 0x3D8 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_34_KPP_ROW00 0x194 0x3D8 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_34_LPUART10_CTS_B 0x194 0x3D8 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_34_WDOG1_ANY 0x194 0x3D8 0x0 0x9 0x0
+#define IOMUXC_GPIO_AD_34_GPIO10_IO01 0x194 0x3D8 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_AD_35_GPIO10_IO02 0x198 0x3DC 0x0 0xA 0x0
+#define IOMUXC_GPIO_AD_35_ENET_1G_1588_EVENT0_OUT 0x198 0x3DC 0x0 0x0 0x0
+#define IOMUXC_GPIO_AD_35_USB_OTG1_OC 0x198 0x3DC 0x6BC 0x1 0x1
+#define IOMUXC_GPIO_AD_35_XBAR1_INOUT19 0x198 0x3DC 0x0 0x2 0x0
+#define IOMUXC_GPIO_AD_35_ENET_1588_EVENT0_OUT 0x198 0x3DC 0x0 0x3 0x0
+#define IOMUXC_GPIO_AD_35_USDHC1_RESET_B 0x198 0x3DC 0x0 0x4 0x0
+#define IOMUXC_GPIO_AD_35_GPIO_MUX4_IO02 0x198 0x3DC 0x0 0x5 0x0
+#define IOMUXC_GPIO_AD_35_KPP_COL00 0x198 0x3DC 0x0 0x6 0x0
+#define IOMUXC_GPIO_AD_35_LPUART10_RTS_B 0x198 0x3DC 0x0 0x8 0x0
+#define IOMUXC_GPIO_AD_35_FLEXSPI1_B_SS1_B 0x198 0x3DC 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_SD_B1_00_USDHC1_CMD 0x19C 0x3E0 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B1_00_XBAR1_INOUT20 0x19C 0x3E0 0x6D8 0x2 0x1
+#define IOMUXC_GPIO_SD_B1_00_GPT4_CAPTURE1 0x19C 0x3E0 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B1_00_GPIO_MUX4_IO03 0x19C 0x3E0 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B1_00_FLEXSPI2_A_SS0_B 0x19C 0x3E0 0x0 0x6 0x0
+#define IOMUXC_GPIO_SD_B1_00_KPP_ROW07 0x19C 0x3E0 0x5A8 0x8 0x1
+#define IOMUXC_GPIO_SD_B1_00_GPIO10_IO03 0x19C 0x3E0 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_SD_B1_01_USDHC1_CLK 0x1A0 0x3E4 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B1_01_XBAR1_INOUT21 0x1A0 0x3E4 0x6DC 0x2 0x1
+#define IOMUXC_GPIO_SD_B1_01_GPT4_CAPTURE2 0x1A0 0x3E4 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B1_01_GPIO_MUX4_IO04 0x1A0 0x3E4 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B1_01_FLEXSPI2_A_SCLK 0x1A0 0x3E4 0x58C 0x6 0x1
+#define IOMUXC_GPIO_SD_B1_01_KPP_COL07 0x1A0 0x3E4 0x5A0 0x8 0x1
+#define IOMUXC_GPIO_SD_B1_01_GPIO10_IO04 0x1A0 0x3E4 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_SD_B1_02_GPIO10_IO05 0x1A4 0x3E8 0x0 0xA 0x0
+#define IOMUXC_GPIO_SD_B1_02_USDHC1_DATA0 0x1A4 0x3E8 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B1_02_XBAR1_INOUT22 0x1A4 0x3E8 0x6E0 0x2 0x1
+#define IOMUXC_GPIO_SD_B1_02_GPT4_COMPARE1 0x1A4 0x3E8 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B1_02_GPIO_MUX4_IO05 0x1A4 0x3E8 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B1_02_FLEXSPI2_A_DATA00 0x1A4 0x3E8 0x57C 0x6 0x1
+#define IOMUXC_GPIO_SD_B1_02_KPP_ROW06 0x1A4 0x3E8 0x5A4 0x8 0x1
+#define IOMUXC_GPIO_SD_B1_02_FLEXSPI1_A_SS1_B 0x1A4 0x3E8 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_SD_B1_03_USDHC1_DATA1 0x1A8 0x3EC 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B1_03_XBAR1_INOUT23 0x1A8 0x3EC 0x6E4 0x2 0x1
+#define IOMUXC_GPIO_SD_B1_03_GPT4_COMPARE2 0x1A8 0x3EC 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B1_03_GPIO_MUX4_IO06 0x1A8 0x3EC 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B1_03_FLEXSPI2_A_DATA01 0x1A8 0x3EC 0x580 0x6 0x1
+#define IOMUXC_GPIO_SD_B1_03_KPP_COL06 0x1A8 0x3EC 0x59C 0x8 0x1
+#define IOMUXC_GPIO_SD_B1_03_FLEXSPI1_B_SS1_B 0x1A8 0x3EC 0x0 0x9 0x0
+#define IOMUXC_GPIO_SD_B1_03_GPIO10_IO06 0x1A8 0x3EC 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_SD_B1_04_USDHC1_DATA2 0x1AC 0x3F0 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B1_04_XBAR1_INOUT24 0x1AC 0x3F0 0x6E8 0x2 0x1
+#define IOMUXC_GPIO_SD_B1_04_GPT4_COMPARE3 0x1AC 0x3F0 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B1_04_GPIO_MUX4_IO07 0x1AC 0x3F0 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B1_04_FLEXSPI2_A_DATA02 0x1AC 0x3F0 0x584 0x6 0x1
+#define IOMUXC_GPIO_SD_B1_04_FLEXSPI1_B_SS0_B 0x1AC 0x3F0 0x0 0x8 0x0
+#define IOMUXC_GPIO_SD_B1_04_ENET_QOS_1588_EVENT2_AUX_IN 0x1AC 0x3F0 0x0 0x9 0x0
+#define IOMUXC_GPIO_SD_B1_04_GPIO10_IO07 0x1AC 0x3F0 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_SD_B1_05_GPIO10_IO08 0x1B0 0x3F4 0x0 0xA 0x0
+#define IOMUXC_GPIO_SD_B1_05_USDHC1_DATA3 0x1B0 0x3F4 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B1_05_XBAR1_INOUT25 0x1B0 0x3F4 0x6EC 0x2 0x1
+#define IOMUXC_GPIO_SD_B1_05_GPT4_CLK 0x1B0 0x3F4 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B1_05_GPIO_MUX4_IO08 0x1B0 0x3F4 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B1_05_FLEXSPI2_A_DATA03 0x1B0 0x3F4 0x588 0x6 0x1
+#define IOMUXC_GPIO_SD_B1_05_FLEXSPI1_B_DQS 0x1B0 0x3F4 0x0 0x8 0x0
+#define IOMUXC_GPIO_SD_B1_05_ENET_QOS_1588_EVENT3_AUX_IN 0x1B0 0x3F4 0x0 0x9 0x0
+
+#define IOMUXC_GPIO_SD_B2_00_GPIO10_IO09 0x1B4 0x3F8 0x0 0xA 0x0
+#define IOMUXC_GPIO_SD_B2_00_USDHC2_DATA3 0x1B4 0x3F8 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_00_FLEXSPI1_B_DATA03 0x1B4 0x3F8 0x570 0x1 0x1
+#define IOMUXC_GPIO_SD_B2_00_ENET_1G_RX_EN 0x1B4 0x3F8 0x4E0 0x2 0x1
+#define IOMUXC_GPIO_SD_B2_00_LPUART9_TXD 0x1B4 0x3F8 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_00_LPSPI4_SCK 0x1B4 0x3F8 0x610 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_00_GPIO_MUX4_IO09 0x1B4 0x3F8 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SD_B2_01_USDHC2_DATA2 0x1B8 0x3FC 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_01_FLEXSPI1_B_DATA02 0x1B8 0x3FC 0x56C 0x1 0x1
+#define IOMUXC_GPIO_SD_B2_01_ENET_1G_RX_CLK 0x1B8 0x3FC 0x4CC 0x2 0x1
+#define IOMUXC_GPIO_SD_B2_01_LPUART9_RXD 0x1B8 0x3FC 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_01_LPSPI4_PCS0 0x1B8 0x3FC 0x60C 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_01_GPIO_MUX4_IO10 0x1B8 0x3FC 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B2_01_GPIO10_IO10 0x1B8 0x3FC 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_SD_B2_02_GPIO10_IO11 0x1BC 0x400 0x0 0xA 0x0
+#define IOMUXC_GPIO_SD_B2_02_USDHC2_DATA1 0x1BC 0x400 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_02_FLEXSPI1_B_DATA01 0x1BC 0x400 0x568 0x1 0x1
+#define IOMUXC_GPIO_SD_B2_02_ENET_1G_RX_DATA00 0x1BC 0x400 0x4D0 0x2 0x1
+#define IOMUXC_GPIO_SD_B2_02_LPUART9_CTS_B 0x1BC 0x400 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_02_LPSPI4_SOUT 0x1BC 0x400 0x618 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_02_GPIO_MUX4_IO11 0x1BC 0x400 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SD_B2_03_GPIO10_IO12 0x1C0 0x404 0x0 0xA 0x0
+#define IOMUXC_GPIO_SD_B2_03_USDHC2_DATA0 0x1C0 0x404 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_03_FLEXSPI1_B_DATA00 0x1C0 0x404 0x564 0x1 0x1
+#define IOMUXC_GPIO_SD_B2_03_ENET_1G_RX_DATA01 0x1C0 0x404 0x4D4 0x2 0x1
+#define IOMUXC_GPIO_SD_B2_03_LPUART9_RTS_B 0x1C0 0x404 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_03_LPSPI4_SIN 0x1C0 0x404 0x614 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_03_GPIO_MUX4_IO12 0x1C0 0x404 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SD_B2_04_USDHC2_CLK 0x1C4 0x408 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_04_FLEXSPI1_B_SCLK 0x1C4 0x408 0x578 0x1 0x1
+#define IOMUXC_GPIO_SD_B2_04_ENET_1G_RX_DATA02 0x1C4 0x408 0x4D8 0x2 0x1
+#define IOMUXC_GPIO_SD_B2_04_FLEXSPI1_A_SS1_B 0x1C4 0x408 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_04_LPSPI4_PCS1 0x1C4 0x408 0x0 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_04_GPIO_MUX4_IO13 0x1C4 0x408 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B2_04_GPIO10_IO13 0x1C4 0x408 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_SD_B2_05_GPIO10_IO14 0x1C8 0x40C 0x0 0xA 0x0
+#define IOMUXC_GPIO_SD_B2_05_USDHC2_CMD 0x1C8 0x40C 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_05_FLEXSPI1_A_DQS 0x1C8 0x40C 0x550 0x1 0x2
+#define IOMUXC_GPIO_SD_B2_05_ENET_1G_RX_DATA03 0x1C8 0x40C 0x4DC 0x2 0x1
+#define IOMUXC_GPIO_SD_B2_05_FLEXSPI1_B_SS0_B 0x1C8 0x40C 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_05_LPSPI4_PCS2 0x1C8 0x40C 0x0 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_05_GPIO_MUX4_IO14 0x1C8 0x40C 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SD_B2_06_GPIO10_IO15 0x1CC 0x410 0x0 0xA 0x0
+#define IOMUXC_GPIO_SD_B2_06_USDHC2_RESET_B 0x1CC 0x410 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_06_FLEXSPI1_A_SS0_B 0x1CC 0x410 0x0 0x1 0x0
+#define IOMUXC_GPIO_SD_B2_06_ENET_1G_TX_DATA03 0x1CC 0x410 0x0 0x2 0x0
+#define IOMUXC_GPIO_SD_B2_06_LPSPI4_PCS3 0x1CC 0x410 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_06_GPT6_CAPTURE1 0x1CC 0x410 0x0 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_06_GPIO_MUX4_IO15 0x1CC 0x410 0x0 0x5 0x0
+
+#define IOMUXC_GPIO_SD_B2_07_USDHC2_STROBE 0x1D0 0x414 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_07_FLEXSPI1_A_SCLK 0x1D0 0x414 0x574 0x1 0x1
+#define IOMUXC_GPIO_SD_B2_07_ENET_1G_TX_DATA02 0x1D0 0x414 0x0 0x2 0x0
+#define IOMUXC_GPIO_SD_B2_07_LPUART3_CTS_B 0x1D0 0x414 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_07_GPT6_CAPTURE2 0x1D0 0x414 0x0 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_07_GPIO_MUX4_IO16 0x1D0 0x414 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B2_07_LPSPI2_SCK 0x1D0 0x414 0x5E4 0x6 0x1
+#define IOMUXC_GPIO_SD_B2_07_ENET_TX_ER 0x1D0 0x414 0x0 0x8 0x0
+#define IOMUXC_GPIO_SD_B2_07_ENET_QOS_REF_CLK 0x1D0 0x414 0x4A0 0x9 0x1
+#define IOMUXC_GPIO_SD_B2_07_GPIO10_IO16 0x1D0 0x414 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_SD_B2_08_GPIO10_IO17 0x1D4 0x418 0x0 0xA 0x0
+#define IOMUXC_GPIO_SD_B2_08_USDHC2_DATA4 0x1D4 0x418 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_08_FLEXSPI1_A_DATA00 0x1D4 0x418 0x554 0x1 0x1
+#define IOMUXC_GPIO_SD_B2_08_ENET_1G_TX_DATA01 0x1D4 0x418 0x0 0x2 0x0
+#define IOMUXC_GPIO_SD_B2_08_LPUART3_RTS_B 0x1D4 0x418 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_08_GPT6_COMPARE1 0x1D4 0x418 0x0 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_08_GPIO_MUX4_IO17 0x1D4 0x418 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B2_08_LPSPI2_PCS0 0x1D4 0x418 0x5DC 0x6 0x1
+
+#define IOMUXC_GPIO_SD_B2_09_GPIO10_IO18 0x1D8 0x41C 0x0 0xA 0x0
+#define IOMUXC_GPIO_SD_B2_09_USDHC2_DATA5 0x1D8 0x41C 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_09_FLEXSPI1_A_DATA01 0x1D8 0x41C 0x558 0x1 0x1
+#define IOMUXC_GPIO_SD_B2_09_ENET_1G_TX_DATA00 0x1D8 0x41C 0x0 0x2 0x0
+#define IOMUXC_GPIO_SD_B2_09_LPUART5_CTS_B 0x1D8 0x41C 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_09_GPT6_COMPARE2 0x1D8 0x41C 0x0 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_09_GPIO_MUX4_IO18 0x1D8 0x41C 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B2_09_LPSPI2_SOUT 0x1D8 0x41C 0x5EC 0x6 0x1
+
+#define IOMUXC_GPIO_SD_B2_10_GPIO10_IO19 0x1DC 0x420 0x0 0xA 0x0
+#define IOMUXC_GPIO_SD_B2_10_USDHC2_DATA6 0x1DC 0x420 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_10_FLEXSPI1_A_DATA02 0x1DC 0x420 0x55C 0x1 0x1
+#define IOMUXC_GPIO_SD_B2_10_ENET_1G_TX_EN 0x1DC 0x420 0x0 0x2 0x0
+#define IOMUXC_GPIO_SD_B2_10_LPUART5_RTS_B 0x1DC 0x420 0x0 0x3 0x0
+#define IOMUXC_GPIO_SD_B2_10_GPT6_COMPARE3 0x1DC 0x420 0x0 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_10_GPIO_MUX4_IO19 0x1DC 0x420 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B2_10_LPSPI2_SIN 0x1DC 0x420 0x5E8 0x6 0x1
+
+#define IOMUXC_GPIO_SD_B2_11_USDHC2_DATA7 0x1E0 0x424 0x0 0x0 0x0
+#define IOMUXC_GPIO_SD_B2_11_FLEXSPI1_A_DATA03 0x1E0 0x424 0x560 0x1 0x1
+#define IOMUXC_GPIO_SD_B2_11_ENET_1G_TX_CLK_IO 0x1E0 0x424 0x4E8 0x2 0x1
+#define IOMUXC_GPIO_SD_B2_11_ENET_1G_REF_CLK 0x1E0 0x424 0x4C4 0x3 0x1
+#define IOMUXC_GPIO_SD_B2_11_GPT6_CLK 0x1E0 0x424 0x0 0x4 0x0
+#define IOMUXC_GPIO_SD_B2_11_GPIO_MUX4_IO20 0x1E0 0x424 0x0 0x5 0x0
+#define IOMUXC_GPIO_SD_B2_11_LPSPI2_PCS1 0x1E0 0x424 0x5E0 0x6 0x1
+#define IOMUXC_GPIO_SD_B2_11_GPIO10_IO20 0x1E0 0x424 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B1_00_VIDEO_MUX_LCDIF_CLK 0x1E4 0x428 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_00_ENET_1G_RX_EN 0x1E4 0x428 0x4E0 0x1 0x2
+#define IOMUXC_GPIO_DISP_B1_00_TMR1_TIMER0 0x1E4 0x428 0x63C 0x3 0x2
+#define IOMUXC_GPIO_DISP_B1_00_XBAR1_INOUT26 0x1E4 0x428 0x6F0 0x4 0x1
+#define IOMUXC_GPIO_DISP_B1_00_GPIO_MUX4_IO21 0x1E4 0x428 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_00_ENET_QOS_RX_EN 0x1E4 0x428 0x4F8 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_00_GPIO10_IO21 0x1E4 0x428 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B1_01_VIDEO_MUX_LCDIF_ENABLE 0x1E8 0x42C 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_01_ENET_1G_RX_CLK 0x1E8 0x42C 0x4CC 0x1 0x2
+#define IOMUXC_GPIO_DISP_B1_01_ENET_1G_RX_ER 0x1E8 0x42C 0x4E4 0x2 0x1
+#define IOMUXC_GPIO_DISP_B1_01_TMR1_TIMER1 0x1E8 0x42C 0x640 0x3 0x2
+#define IOMUXC_GPIO_DISP_B1_01_XBAR1_INOUT27 0x1E8 0x42C 0x6F4 0x4 0x1
+#define IOMUXC_GPIO_DISP_B1_01_GPIO_MUX4_IO22 0x1E8 0x42C 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_01_ENET_QOS_RX_CLK 0x1E8 0x42C 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_01_ENET_QOS_RX_ER 0x1E8 0x42C 0x4FC 0x9 0x0
+#define IOMUXC_GPIO_DISP_B1_01_GPIO10_IO22 0x1E8 0x42C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B1_02_GPIO10_IO23 0x1EC 0x430 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B1_02_VIDEO_MUX_LCDIF_HSYNC 0x1EC 0x430 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_02_ENET_1G_RX_DATA00 0x1EC 0x430 0x4D0 0x1 0x2
+#define IOMUXC_GPIO_DISP_B1_02_LPI2C3_SCL 0x1EC 0x430 0x5BC 0x2 0x0
+#define IOMUXC_GPIO_DISP_B1_02_TMR1_TIMER2 0x1EC 0x430 0x644 0x3 0x1
+#define IOMUXC_GPIO_DISP_B1_02_XBAR1_INOUT28 0x1EC 0x430 0x6F8 0x4 0x1
+#define IOMUXC_GPIO_DISP_B1_02_GPIO_MUX4_IO23 0x1EC 0x430 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_02_ENET_QOS_RX_DATA00 0x1EC 0x430 0x4F0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_02_LPUART1_TXD 0x1EC 0x430 0x620 0x9 0x1
+
+#define IOMUXC_GPIO_DISP_B1_03_VIDEO_MUX_LCDIF_VSYNC 0x1F0 0x434 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_03_ENET_1G_RX_DATA01 0x1F0 0x434 0x4D4 0x1 0x2
+#define IOMUXC_GPIO_DISP_B1_03_LPI2C3_SDA 0x1F0 0x434 0x5C0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B1_03_TMR2_TIMER0 0x1F0 0x434 0x648 0x3 0x2
+#define IOMUXC_GPIO_DISP_B1_03_XBAR1_INOUT29 0x1F0 0x434 0x6FC 0x4 0x1
+#define IOMUXC_GPIO_DISP_B1_03_GPIO_MUX4_IO24 0x1F0 0x434 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_03_ENET_QOS_RX_DATA01 0x1F0 0x434 0x4F4 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_03_LPUART1_RXD 0x1F0 0x434 0x61C 0x9 0x1
+#define IOMUXC_GPIO_DISP_B1_03_GPIO10_IO24 0x1F0 0x434 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B1_04_VIDEO_MUX_LCDIF_DATA00 0x1F4 0x438 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_04_ENET_1G_RX_DATA02 0x1F4 0x438 0x4D8 0x1 0x2
+#define IOMUXC_GPIO_DISP_B1_04_LPUART4_RXD 0x1F4 0x438 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B1_04_TMR2_TIMER1 0x1F4 0x438 0x64C 0x3 0x2
+#define IOMUXC_GPIO_DISP_B1_04_XBAR1_INOUT30 0x1F4 0x438 0x700 0x4 0x1
+#define IOMUXC_GPIO_DISP_B1_04_GPIO_MUX4_IO25 0x1F4 0x438 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_04_ENET_QOS_RX_DATA02 0x1F4 0x438 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_04_LPSPI3_SCK 0x1F4 0x438 0x600 0x9 0x1
+#define IOMUXC_GPIO_DISP_B1_04_GPIO10_IO25 0x1F4 0x438 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B1_05_GPIO10_IO26 0x1F8 0x43C 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B1_05_VIDEO_MUX_LCDIF_DATA01 0x1F8 0x43C 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_05_ENET_1G_RX_DATA03 0x1F8 0x43C 0x4DC 0x1 0x2
+#define IOMUXC_GPIO_DISP_B1_05_LPUART4_CTS_B 0x1F8 0x43C 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B1_05_TMR2_TIMER2 0x1F8 0x43C 0x650 0x3 0x1
+#define IOMUXC_GPIO_DISP_B1_05_XBAR1_INOUT31 0x1F8 0x43C 0x704 0x4 0x1
+#define IOMUXC_GPIO_DISP_B1_05_GPIO_MUX4_IO26 0x1F8 0x43C 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_05_ENET_QOS_RX_DATA03 0x1F8 0x43C 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_05_LPSPI3_SIN 0x1F8 0x43C 0x604 0x9 0x1
+
+#define IOMUXC_GPIO_DISP_B1_06_VIDEO_MUX_LCDIF_DATA02 0x1FC 0x440 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_06_ENET_1G_TX_DATA03 0x1FC 0x440 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B1_06_LPUART4_TXD 0x1FC 0x440 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B1_06_TMR3_TIMER0 0x1FC 0x440 0x654 0x3 0x2
+#define IOMUXC_GPIO_DISP_B1_06_XBAR1_INOUT32 0x1FC 0x440 0x708 0x4 0x1
+#define IOMUXC_GPIO_DISP_B1_06_GPIO_MUX4_IO27 0x1FC 0x440 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_06_SRC_BT_CFG00 0x1FC 0x440 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B1_06_ENET_QOS_TX_DATA03 0x1FC 0x440 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_06_LPSPI3_SOUT 0x1FC 0x440 0x608 0x9 0x1
+#define IOMUXC_GPIO_DISP_B1_06_GPIO10_IO27 0x1FC 0x440 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B1_07_VIDEO_MUX_LCDIF_DATA03 0x200 0x444 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_07_ENET_1G_TX_DATA02 0x200 0x444 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B1_07_LPUART4_RTS_B 0x200 0x444 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B1_07_TMR3_TIMER1 0x200 0x444 0x658 0x3 0x2
+#define IOMUXC_GPIO_DISP_B1_07_XBAR1_INOUT33 0x200 0x444 0x70C 0x4 0x1
+#define IOMUXC_GPIO_DISP_B1_07_GPIO_MUX4_IO28 0x200 0x444 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_07_SRC_BT_CFG01 0x200 0x444 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B1_07_ENET_QOS_TX_DATA02 0x200 0x444 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_07_LPSPI3_PCS0 0x200 0x444 0x5F0 0x9 0x1
+#define IOMUXC_GPIO_DISP_B1_07_GPIO10_IO28 0x200 0x444 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B1_08_GPIO10_IO29 0x204 0x448 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B1_08_VIDEO_MUX_LCDIF_DATA04 0x204 0x448 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_08_ENET_1G_TX_DATA01 0x204 0x448 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B1_08_USDHC1_CD_B 0x204 0x448 0x6C8 0x2 0x1
+#define IOMUXC_GPIO_DISP_B1_08_TMR3_TIMER2 0x204 0x448 0x65C 0x3 0x1
+#define IOMUXC_GPIO_DISP_B1_08_XBAR1_INOUT34 0x204 0x448 0x710 0x4 0x1
+#define IOMUXC_GPIO_DISP_B1_08_GPIO_MUX4_IO29 0x204 0x448 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_08_SRC_BT_CFG02 0x204 0x448 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B1_08_ENET_QOS_TX_DATA01 0x204 0x448 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_08_LPSPI3_PCS1 0x204 0x448 0x5F4 0x9 0x1
+
+#define IOMUXC_GPIO_DISP_B1_09_VIDEO_MUX_LCDIF_DATA05 0x208 0x44C 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_09_ENET_1G_TX_DATA00 0x208 0x44C 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B1_09_USDHC1_WP 0x208 0x44C 0x6CC 0x2 0x1
+#define IOMUXC_GPIO_DISP_B1_09_TMR4_TIMER0 0x208 0x44C 0x660 0x3 0x2
+#define IOMUXC_GPIO_DISP_B1_09_XBAR1_INOUT35 0x208 0x44C 0x714 0x4 0x1
+#define IOMUXC_GPIO_DISP_B1_09_GPIO_MUX4_IO30 0x208 0x44C 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_09_SRC_BT_CFG03 0x208 0x44C 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B1_09_ENET_QOS_TX_DATA00 0x208 0x44C 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_09_LPSPI3_PCS2 0x208 0x44C 0x5F8 0x9 0x1
+#define IOMUXC_GPIO_DISP_B1_09_GPIO10_IO30 0x208 0x44C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B1_10_VIDEO_MUX_LCDIF_DATA06 0x20C 0x450 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_10_ENET_1G_TX_EN 0x20C 0x450 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B1_10_USDHC1_RESET_B 0x20C 0x450 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B1_10_TMR4_TIMER1 0x20C 0x450 0x664 0x3 0x2
+#define IOMUXC_GPIO_DISP_B1_10_XBAR1_INOUT36 0x20C 0x450 0x0 0x4 0x0
+#define IOMUXC_GPIO_DISP_B1_10_GPIO_MUX4_IO31 0x20C 0x450 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_10_SRC_BT_CFG04 0x20C 0x450 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B1_10_ENET_QOS_TX_EN 0x20C 0x450 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_10_LPSPI3_PCS3 0x20C 0x450 0x5FC 0x9 0x1
+#define IOMUXC_GPIO_DISP_B1_10_GPIO10_IO31 0x20C 0x450 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B1_11_VIDEO_MUX_LCDIF_DATA07 0x210 0x454 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B1_11_ENET_1G_TX_CLK_IO 0x210 0x454 0x4E8 0x1 0x2
+#define IOMUXC_GPIO_DISP_B1_11_ENET_1G_REF_CLK 0x210 0x454 0x4C4 0x2 0x2
+#define IOMUXC_GPIO_DISP_B1_11_TMR4_TIMER2 0x210 0x454 0x668 0x3 0x1
+#define IOMUXC_GPIO_DISP_B1_11_XBAR1_INOUT37 0x210 0x454 0x0 0x4 0x0
+#define IOMUXC_GPIO_DISP_B1_11_GPIO_MUX5_IO00 0x210 0x454 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B1_11_SRC_BT_CFG05 0x210 0x454 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B1_11_ENET_QOS_TX_CLK 0x210 0x454 0x4A4 0x8 0x0
+#define IOMUXC_GPIO_DISP_B1_11_ENET_QOS_REF_CLK 0x210 0x454 0x4A0 0x9 0x2
+#define IOMUXC_GPIO_DISP_B1_11_GPIO11_IO00 0x210 0x454 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B2_00_GPIO11_IO01 0x214 0x458 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_00_VIDEO_MUX_LCDIF_DATA08 0x214 0x458 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_00_WDOG1_B 0x214 0x458 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B2_00_MQS_RIGHT 0x214 0x458 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B2_00_ENET_1G_TX_ER 0x214 0x458 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_00_SAI1_TX_DATA03 0x214 0x458 0x0 0x4 0x0
+#define IOMUXC_GPIO_DISP_B2_00_GPIO_MUX5_IO01 0x214 0x458 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_00_SRC_BT_CFG06 0x214 0x458 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B2_00_ENET_QOS_TX_ER 0x214 0x458 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_DISP_B2_01_VIDEO_MUX_LCDIF_DATA09 0x218 0x45C 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_01_USDHC1_VSELECT 0x218 0x45C 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B2_01_MQS_LEFT 0x218 0x45C 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B2_01_WDOG2_B 0x218 0x45C 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_01_SAI1_TX_DATA02 0x218 0x45C 0x0 0x4 0x0
+#define IOMUXC_GPIO_DISP_B2_01_GPIO_MUX5_IO02 0x218 0x45C 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_01_SRC_BT_CFG07 0x218 0x45C 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B2_01_EWM_OUT_B 0x218 0x45C 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B2_01_CCM_ENET_REF_CLK_25M 0x218 0x45C 0x0 0x9 0x0
+#define IOMUXC_GPIO_DISP_B2_01_GPIO11_IO02 0x218 0x45C 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B2_02_GPIO11_IO03 0x21C 0x460 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_02_VIDEO_MUX_LCDIF_DATA10 0x21C 0x460 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_02_ENET_TX_DATA00 0x21C 0x460 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B2_02_PIT1_TRIGGER3 0x21C 0x460 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B2_02_ARM_TRACE00 0x21C 0x460 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_02_SAI1_TX_DATA01 0x21C 0x460 0x0 0x4 0x0
+#define IOMUXC_GPIO_DISP_B2_02_GPIO_MUX5_IO03 0x21C 0x460 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_02_SRC_BT_CFG08 0x21C 0x460 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B2_02_ENET_QOS_TX_DATA00 0x21C 0x460 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_DISP_B2_03_GPIO11_IO04 0x220 0x464 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_03_VIDEO_MUX_LCDIF_DATA11 0x220 0x464 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_03_ENET_TX_DATA01 0x220 0x464 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B2_03_PIT1_TRIGGER2 0x220 0x464 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B2_03_ARM_TRACE01 0x220 0x464 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_03_SAI1_MCLK 0x220 0x464 0x66C 0x4 0x1
+#define IOMUXC_GPIO_DISP_B2_03_GPIO_MUX5_IO04 0x220 0x464 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_03_SRC_BT_CFG09 0x220 0x464 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B2_03_ENET_QOS_TX_DATA01 0x220 0x464 0x0 0x8 0x0
+
+#define IOMUXC_GPIO_DISP_B2_04_VIDEO_MUX_LCDIF_DATA12 0x224 0x468 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_04_ENET_TX_EN 0x224 0x468 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B2_04_PIT1_TRIGGER1 0x224 0x468 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B2_04_ARM_TRACE02 0x224 0x468 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_04_SAI1_RX_SYNC 0x224 0x468 0x678 0x4 0x1
+#define IOMUXC_GPIO_DISP_B2_04_GPIO_MUX5_IO05 0x224 0x468 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_04_SRC_BT_CFG10 0x224 0x468 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B2_04_ENET_QOS_TX_EN 0x224 0x468 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B2_04_GPIO11_IO05 0x224 0x468 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B2_05_GPIO11_IO06 0x228 0x46C 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_05_VIDEO_MUX_LCDIF_DATA13 0x228 0x46C 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_05_ENET_TX_CLK 0x228 0x46C 0x4C0 0x1 0x1
+#define IOMUXC_GPIO_DISP_B2_05_ENET_REF_CLK 0x228 0x46C 0x4A8 0x2 0x1
+#define IOMUXC_GPIO_DISP_B2_05_ARM_TRACE03 0x228 0x46C 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_05_SAI1_RX_BCLK 0x228 0x46C 0x670 0x4 0x1
+#define IOMUXC_GPIO_DISP_B2_05_GPIO_MUX5_IO06 0x228 0x46C 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_05_SRC_BT_CFG11 0x228 0x46C 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B2_05_ENET_QOS_TX_CLK 0x228 0x46C 0x4A4 0x8 0x1
+
+#define IOMUXC_GPIO_DISP_B2_06_GPIO11_IO07 0x22C 0x470 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_06_VIDEO_MUX_LCDIF_DATA14 0x22C 0x470 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_06_ENET_RX_DATA00 0x22C 0x470 0x4B0 0x1 0x1
+#define IOMUXC_GPIO_DISP_B2_06_LPUART7_TXD 0x22C 0x470 0x630 0x2 0x1
+#define IOMUXC_GPIO_DISP_B2_06_ARM_TRACE_CLK 0x22C 0x470 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_06_SAI1_RX_DATA00 0x22C 0x470 0x674 0x4 0x1
+#define IOMUXC_GPIO_DISP_B2_06_GPIO_MUX5_IO07 0x22C 0x470 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_06_ENET_QOS_RX_DATA00 0x22C 0x470 0x4F0 0x8 0x1
+
+#define IOMUXC_GPIO_DISP_B2_07_VIDEO_MUX_LCDIF_DATA15 0x230 0x474 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_07_ENET_RX_DATA01 0x230 0x474 0x4B4 0x1 0x1
+#define IOMUXC_GPIO_DISP_B2_07_LPUART7_RXD 0x230 0x474 0x62C 0x2 0x1
+#define IOMUXC_GPIO_DISP_B2_07_ARM_TRACE_SWO 0x230 0x474 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_07_SAI1_TX_DATA00 0x230 0x474 0x0 0x4 0x0
+#define IOMUXC_GPIO_DISP_B2_07_GPIO_MUX5_IO08 0x230 0x474 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_07_ENET_QOS_RX_DATA01 0x230 0x474 0x4F4 0x8 0x1
+#define IOMUXC_GPIO_DISP_B2_07_GPIO11_IO08 0x230 0x474 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B2_08_GPIO11_IO09 0x234 0x478 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_08_VIDEO_MUX_LCDIF_DATA16 0x234 0x478 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_08_ENET_RX_EN 0x234 0x478 0x4B8 0x1 0x1
+#define IOMUXC_GPIO_DISP_B2_08_LPUART8_TXD 0x234 0x478 0x638 0x2 0x1
+#define IOMUXC_GPIO_DISP_B2_08_ARM_CM7_EVENTO 0x234 0x478 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_08_SAI1_TX_BCLK 0x234 0x478 0x67C 0x4 0x1
+#define IOMUXC_GPIO_DISP_B2_08_GPIO_MUX5_IO09 0x234 0x478 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_08_ENET_QOS_RX_EN 0x234 0x478 0x4F8 0x8 0x1
+#define IOMUXC_GPIO_DISP_B2_08_LPUART1_TXD 0x234 0x478 0x620 0x9 0x2
+
+#define IOMUXC_GPIO_DISP_B2_09_GPIO11_IO10 0x238 0x47C 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_09_VIDEO_MUX_LCDIF_DATA17 0x238 0x47C 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_09_ENET_RX_ER 0x238 0x47C 0x4BC 0x1 0x1
+#define IOMUXC_GPIO_DISP_B2_09_LPUART8_RXD 0x238 0x47C 0x634 0x2 0x1
+#define IOMUXC_GPIO_DISP_B2_09_ARM_CM7_EVENTI 0x238 0x47C 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_09_SAI1_TX_SYNC 0x238 0x47C 0x680 0x4 0x1
+#define IOMUXC_GPIO_DISP_B2_09_GPIO_MUX5_IO10 0x238 0x47C 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_09_ENET_QOS_RX_ER 0x238 0x47C 0x4FC 0x8 0x1
+#define IOMUXC_GPIO_DISP_B2_09_LPUART1_RXD 0x238 0x47C 0x61C 0x9 0x2
+
+#define IOMUXC_GPIO_DISP_B2_10_GPIO11_IO11 0x23C 0x480 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_10_VIDEO_MUX_LCDIF_DATA18 0x23C 0x480 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_10_EMVSIM2_IO 0x23C 0x480 0x6A8 0x1 0x1
+#define IOMUXC_GPIO_DISP_B2_10_LPUART2_TXD 0x23C 0x480 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B2_10_WDOG2_RESET_B_DEB 0x23C 0x480 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_10_XBAR1_INOUT38 0x23C 0x480 0x0 0x4 0x0
+#define IOMUXC_GPIO_DISP_B2_10_GPIO_MUX5_IO11 0x23C 0x480 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_10_LPI2C3_SCL 0x23C 0x480 0x5BC 0x6 0x1
+#define IOMUXC_GPIO_DISP_B2_10_ENET_QOS_RX_ER 0x23C 0x480 0x4FC 0x8 0x2
+#define IOMUXC_GPIO_DISP_B2_10_SPDIF_IN 0x23C 0x480 0x6B4 0x9 0x2
+
+#define IOMUXC_GPIO_DISP_B2_11_VIDEO_MUX_LCDIF_DATA19 0x240 0x484 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_11_EMVSIM2_CLK 0x240 0x484 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B2_11_LPUART2_RXD 0x240 0x484 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B2_11_WDOG1_RESET_B_DEB 0x240 0x484 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_11_XBAR1_INOUT39 0x240 0x484 0x0 0x4 0x0
+#define IOMUXC_GPIO_DISP_B2_11_GPIO_MUX5_IO12 0x240 0x484 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_11_LPI2C3_SDA 0x240 0x484 0x5C0 0x6 0x1
+#define IOMUXC_GPIO_DISP_B2_11_ENET_QOS_CRS 0x240 0x484 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B2_11_SPDIF_OUT 0x240 0x484 0x0 0x9 0x0
+#define IOMUXC_GPIO_DISP_B2_11_GPIO11_IO12 0x240 0x484 0x0 0xA 0x0
+
+#define IOMUXC_GPIO_DISP_B2_12_GPIO11_IO13 0x244 0x488 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_12_VIDEO_MUX_LCDIF_DATA20 0x244 0x488 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_12_EMVSIM2_RST 0x244 0x488 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B2_12_FLEXCAN1_TX 0x244 0x488 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B2_12_LPUART2_CTS_B 0x244 0x488 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_12_XBAR1_INOUT40 0x244 0x488 0x0 0x4 0x0
+#define IOMUXC_GPIO_DISP_B2_12_GPIO_MUX5_IO13 0x244 0x488 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_12_LPI2C4_SCL 0x244 0x488 0x5C4 0x6 0x1
+#define IOMUXC_GPIO_DISP_B2_12_ENET_QOS_COL 0x244 0x488 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B2_12_LPSPI4_SCK 0x244 0x488 0x610 0x9 0x1
+
+#define IOMUXC_GPIO_DISP_B2_13_GPIO11_IO14 0x248 0x48C 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_13_VIDEO_MUX_LCDIF_DATA21 0x248 0x48C 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_13_EMVSIM2_SVEN 0x248 0x48C 0x0 0x1 0x0
+#define IOMUXC_GPIO_DISP_B2_13_FLEXCAN1_RX 0x248 0x48C 0x498 0x2 0x1
+#define IOMUXC_GPIO_DISP_B2_13_LPUART2_RTS_B 0x248 0x48C 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_13_ENET_REF_CLK 0x248 0x48C 0x4A8 0x4 0x2
+#define IOMUXC_GPIO_DISP_B2_13_GPIO_MUX5_IO14 0x248 0x48C 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_13_LPI2C4_SDA 0x248 0x48C 0x5C8 0x6 0x1
+#define IOMUXC_GPIO_DISP_B2_13_ENET_QOS_1588_EVENT0_OUT 0x248 0x48C 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B2_13_LPSPI4_SIN 0x248 0x48C 0x614 0x9 0x1
+
+#define IOMUXC_GPIO_DISP_B2_14_GPIO_MUX5_IO15 0x24C 0x490 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_14_FLEXCAN1_TX 0x24C 0x490 0x0 0x6 0x0
+#define IOMUXC_GPIO_DISP_B2_14_ENET_QOS_1588_EVENT0_IN 0x24C 0x490 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B2_14_LPSPI4_SOUT 0x24C 0x490 0x618 0x9 0x1
+#define IOMUXC_GPIO_DISP_B2_14_GPIO11_IO15 0x24C 0x490 0x0 0xA 0x0
+#define IOMUXC_GPIO_DISP_B2_14_VIDEO_MUX_LCDIF_DATA22 0x24C 0x490 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_14_EMVSIM2_PD 0x24C 0x490 0x6AC 0x1 0x1
+#define IOMUXC_GPIO_DISP_B2_14_WDOG2_B 0x24C 0x490 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B2_14_VIDEO_MUX_EXT_DCIC1 0x24C 0x490 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_14_ENET_1G_REF_CLK 0x24C 0x490 0x4C4 0x4 0x3
+
+#define IOMUXC_GPIO_DISP_B2_15_VIDEO_MUX_LCDIF_DATA23 0x250 0x494 0x0 0x0 0x0
+#define IOMUXC_GPIO_DISP_B2_15_EMVSIM2_POWER_FAIL 0x250 0x494 0x6B0 0x1 0x1
+#define IOMUXC_GPIO_DISP_B2_15_WDOG1_B 0x250 0x494 0x0 0x2 0x0
+#define IOMUXC_GPIO_DISP_B2_15_VIDEO_MUX_EXT_DCIC2 0x250 0x494 0x0 0x3 0x0
+#define IOMUXC_GPIO_DISP_B2_15_PIT1_TRIGGER0 0x250 0x494 0x0 0x4 0x0
+#define IOMUXC_GPIO_DISP_B2_15_GPIO_MUX5_IO16 0x250 0x494 0x0 0x5 0x0
+#define IOMUXC_GPIO_DISP_B2_15_FLEXCAN1_RX 0x250 0x494 0x498 0x6 0x2
+#define IOMUXC_GPIO_DISP_B2_15_ENET_QOS_1588_EVENT0_AUX_IN 0x250 0x494 0x0 0x8 0x0
+#define IOMUXC_GPIO_DISP_B2_15_LPSPI4_PCS0 0x250 0x494 0x60C 0x9 0x1
+#define IOMUXC_GPIO_DISP_B2_15_GPIO11_IO16 0x250 0x494 0x0 0xA 0x0
+
+#endif /* _DT_BINDINGS_PINCTRL_IMXRT1170_PINFUNC_H */
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index bfaef45bdd04..2459f3cd7dd9 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -561,6 +561,12 @@
interrupts = <0 175 4>;
};
+ socfpga_axi_setup: stmmac-axi-config {
+ snps,wr_osr_lmt = <0xf>;
+ snps,rd_osr_lmt = <0xf>;
+ snps,blen = <0 0 0 0 16 0 0>;
+ };
+
gmac0: ethernet@ff700000 {
compatible = "altr,socfpga-stmmac", "snps,dwmac-3.70a", "snps,dwmac";
altr,sysmgr-syscon = <&sysmgr 0x60 0>;
@@ -576,6 +582,7 @@
snps,perfect-filter-entries = <128>;
tx-fifo-depth = <4096>;
rx-fifo-depth = <4096>;
+ snps,axi-config = <&socfpga_axi_setup>;
status = "disabled";
};
@@ -594,6 +601,7 @@
snps,perfect-filter-entries = <128>;
tx-fifo-depth = <4096>;
rx-fifo-depth = <4096>;
+ snps,axi-config = <&socfpga_axi_setup>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 70e634b37aae..6cdadba6a3ac 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -1389,7 +1389,6 @@
<GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&apb0_gates 0>, <&osc24M>, <&rtc CLK_OSC32K>;
clock-names = "apb", "hosc", "losc";
- resets = <&apb0_rst 0>;
gpio-controller;
interrupt-controller;
#interrupt-cells = <3>;
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 1a262a05fdcb..f630ab55bb6a 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -814,7 +814,6 @@
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&apb0_gates 0>, <&osc24M>, <&rtc CLK_OSC32K>;
clock-names = "apb", "hosc", "losc";
- resets = <&apb0_rst 0>;
gpio-controller;
interrupt-controller;
#interrupt-cells = <3>;
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index ce4fa6706d06..7d3f3300f431 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -1218,7 +1218,6 @@
<GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&apbs_gates 0>, <&osc24M>, <&osc32k>;
clock-names = "apb", "hosc", "losc";
- resets = <&apbs_rst 0>;
gpio-controller;
interrupt-controller;
#interrupt-cells = <3>;
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index e81e5937a60a..03301ddb3403 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -597,8 +597,8 @@
compatible = "socionext,uniphier-dwc3", "snps,dwc3";
status = "disabled";
reg = <0x65a00000 0xcd00>;
- interrupt-names = "host", "peripheral";
- interrupts = <0 134 4>, <0 135 4>;
+ interrupt-names = "dwc_usb3";
+ interrupts = <0 134 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>, <&pinctrl_usb2>;
clock-names = "ref", "bus_early", "suspend";
@@ -693,8 +693,8 @@
compatible = "socionext,uniphier-dwc3", "snps,dwc3";
status = "disabled";
reg = <0x65c00000 0xcd00>;
- interrupt-names = "host", "peripheral";
- interrupts = <0 137 4>, <0 138 4>;
+ interrupt-names = "dwc_usb3";
+ interrupts = <0 137 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>, <&pinctrl_usb3>;
clock-names = "ref", "bus_early", "suspend";
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index c8e198631d41..d2fdb1796f48 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -1,11 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config SA1111
bool
- select DMABOUNCE if !ARCH_PXA
-
-config DMABOUNCE
- bool
- select ZONE_DMA
+ select ZONE_DMA if ARCH_SA1100
config KRAIT_L2_ACCESSORS
bool
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 8cd574be94cf..7bae8cbaafe7 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -6,7 +6,6 @@
obj-y += firmware.o
obj-$(CONFIG_SA1111) += sa1111.o
-obj-$(CONFIG_DMABOUNCE) += dmabounce.o
obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
deleted file mode 100644
index 7996c04393d5..000000000000
--- a/arch/arm/common/dmabounce.c
+++ /dev/null
@@ -1,582 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/common/dmabounce.c
- *
- * Special dma_{map/unmap/dma_sync}_* routines for systems that have
- * limited DMA windows. These functions utilize bounce buffers to
- * copy data to/from buffers located outside the DMA region. This
- * only works for systems in which DMA memory is at the bottom of
- * RAM, the remainder of memory is at the top and the DMA memory
- * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
- * DMA windows will require custom implementations that reserve memory
- * areas at early bootup.
- *
- * Original version by Brad Parker (brad@heeltoe.com)
- * Re-written by Christopher Hoover <ch@murgatroid.com>
- * Made generic by Deepak Saxena <dsaxena@plexity.net>
- *
- * Copyright (C) 2002 Hewlett Packard Company.
- * Copyright (C) 2004 MontaVista Software, Inc.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/page-flags.h>
-#include <linux/device.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-map-ops.h>
-#include <linux/dmapool.h>
-#include <linux/list.h>
-#include <linux/scatterlist.h>
-
-#include <asm/cacheflush.h>
-#include <asm/dma-iommu.h>
-
-#undef STATS
-
-#ifdef STATS
-#define DO_STATS(X) do { X ; } while (0)
-#else
-#define DO_STATS(X) do { } while (0)
-#endif
-
-/* ************************************************** */
-
-struct safe_buffer {
- struct list_head node;
-
- /* original request */
- void *ptr;
- size_t size;
- int direction;
-
- /* safe buffer info */
- struct dmabounce_pool *pool;
- void *safe;
- dma_addr_t safe_dma_addr;
-};
-
-struct dmabounce_pool {
- unsigned long size;
- struct dma_pool *pool;
-#ifdef STATS
- unsigned long allocs;
-#endif
-};
-
-struct dmabounce_device_info {
- struct device *dev;
- struct list_head safe_buffers;
-#ifdef STATS
- unsigned long total_allocs;
- unsigned long map_op_count;
- unsigned long bounce_count;
- int attr_res;
-#endif
- struct dmabounce_pool small;
- struct dmabounce_pool large;
-
- rwlock_t lock;
-
- int (*needs_bounce)(struct device *, dma_addr_t, size_t);
-};
-
-#ifdef STATS
-static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
- return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
- device_info->small.allocs,
- device_info->large.allocs,
- device_info->total_allocs - device_info->small.allocs -
- device_info->large.allocs,
- device_info->total_allocs,
- device_info->map_op_count,
- device_info->bounce_count);
-}
-
-static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
-#endif
-
-
-/* allocate a 'safe' buffer and keep track of it */
-static inline struct safe_buffer *
-alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
- size_t size, enum dma_data_direction dir)
-{
- struct safe_buffer *buf;
- struct dmabounce_pool *pool;
- struct device *dev = device_info->dev;
- unsigned long flags;
-
- dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
- __func__, ptr, size, dir);
-
- if (size <= device_info->small.size) {
- pool = &device_info->small;
- } else if (size <= device_info->large.size) {
- pool = &device_info->large;
- } else {
- pool = NULL;
- }
-
- buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
- if (buf == NULL) {
- dev_warn(dev, "%s: kmalloc failed\n", __func__);
- return NULL;
- }
-
- buf->ptr = ptr;
- buf->size = size;
- buf->direction = dir;
- buf->pool = pool;
-
- if (pool) {
- buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
- &buf->safe_dma_addr);
- } else {
- buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
- GFP_ATOMIC);
- }
-
- if (buf->safe == NULL) {
- dev_warn(dev,
- "%s: could not alloc dma memory (size=%d)\n",
- __func__, size);
- kfree(buf);
- return NULL;
- }
-
-#ifdef STATS
- if (pool)
- pool->allocs++;
- device_info->total_allocs++;
-#endif
-
- write_lock_irqsave(&device_info->lock, flags);
- list_add(&buf->node, &device_info->safe_buffers);
- write_unlock_irqrestore(&device_info->lock, flags);
-
- return buf;
-}
-
-/* determine if a buffer is from our "safe" pool */
-static inline struct safe_buffer *
-find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
-{
- struct safe_buffer *b, *rb = NULL;
- unsigned long flags;
-
- read_lock_irqsave(&device_info->lock, flags);
-
- list_for_each_entry(b, &device_info->safe_buffers, node)
- if (b->safe_dma_addr <= safe_dma_addr &&
- b->safe_dma_addr + b->size > safe_dma_addr) {
- rb = b;
- break;
- }
-
- read_unlock_irqrestore(&device_info->lock, flags);
- return rb;
-}
-
-static inline void
-free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
-{
- unsigned long flags;
-
- dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
-
- write_lock_irqsave(&device_info->lock, flags);
-
- list_del(&buf->node);
-
- write_unlock_irqrestore(&device_info->lock, flags);
-
- if (buf->pool)
- dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
- else
- dma_free_coherent(device_info->dev, buf->size, buf->safe,
- buf->safe_dma_addr);
-
- kfree(buf);
-}
-
-/* ************************************************** */
-
-static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
- dma_addr_t dma_addr, const char *where)
-{
- if (!dev || !dev->archdata.dmabounce)
- return NULL;
- if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Trying to %s invalid mapping\n", where);
- return NULL;
- }
- return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
-}
-
-static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
-{
- if (!dev || !dev->archdata.dmabounce)
- return 0;
-
- if (dev->dma_mask) {
- unsigned long limit, mask = *dev->dma_mask;
-
- limit = (mask + 1) & ~mask;
- if (limit && size > limit) {
- dev_err(dev, "DMA mapping too big (requested %#x "
- "mask %#Lx)\n", size, *dev->dma_mask);
- return -E2BIG;
- }
-
- /* Figure out if we need to bounce from the DMA mask. */
- if ((dma_addr | (dma_addr + size - 1)) & ~mask)
- return 1;
- }
-
- return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
-}
-
-static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
- struct safe_buffer *buf;
-
- if (device_info)
- DO_STATS ( device_info->map_op_count++ );
-
- buf = alloc_safe_buffer(device_info, ptr, size, dir);
- if (buf == NULL) {
- dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
- __func__, ptr);
- return DMA_MAPPING_ERROR;
- }
-
- dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
- __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
- buf->safe, buf->safe_dma_addr);
-
- if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
- dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
- __func__, ptr, buf->safe, size);
- memcpy(buf->safe, ptr, size);
- }
-
- return buf->safe_dma_addr;
-}
-
-static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- BUG_ON(buf->size != size);
- BUG_ON(buf->direction != dir);
-
- dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
- __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
- buf->safe, buf->safe_dma_addr);
-
- DO_STATS(dev->archdata.dmabounce->bounce_count++);
-
- if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
- void *ptr = buf->ptr;
-
- dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
- __func__, buf->safe, ptr, size);
- memcpy(ptr, buf->safe, size);
-
- /*
- * Since we may have written to a page cache page,
- * we need to ensure that the data will be coherent
- * with user mappings.
- */
- __cpuc_flush_dcache_area(ptr, size);
- }
- free_safe_buffer(dev->archdata.dmabounce, buf);
-}
-
-/* ************************************************** */
-
-/*
- * see if a buffer address is in an 'unsafe' range. if it is
- * allocate a 'safe' buffer and copy the unsafe buffer into it.
- * substitute the safe buffer for the unsafe one.
- * (basically move the buffer from an unsafe area to a safe one)
- */
-static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t dma_addr;
- int ret;
-
- dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
- __func__, page, offset, size, dir);
-
- dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
-
- ret = needs_bounce(dev, dma_addr, size);
- if (ret < 0)
- return DMA_MAPPING_ERROR;
-
- if (ret == 0) {
- arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
- return dma_addr;
- }
-
- if (PageHighMem(page)) {
- dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
- return DMA_MAPPING_ERROR;
- }
-
- return map_single(dev, page_address(page) + offset, size, dir, attrs);
-}
-
-/*
- * see if a mapped address was really a "safe" buffer and if so, copy
- * the data from the safe buffer back to the unsafe buffer and free up
- * the safe buffer. (basically return things back to the way they
- * should be)
- */
-static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- struct safe_buffer *buf;
-
- dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
- __func__, dma_addr, size, dir);
-
- buf = find_safe_buffer_dev(dev, dma_addr, __func__);
- if (!buf) {
- arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
- return;
- }
-
- unmap_single(dev, buf, size, dir, attrs);
-}
-
-static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
- size_t sz, enum dma_data_direction dir)
-{
- struct safe_buffer *buf;
- unsigned long off;
-
- dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
- __func__, addr, sz, dir);
-
- buf = find_safe_buffer_dev(dev, addr, __func__);
- if (!buf)
- return 1;
-
- off = addr - buf->safe_dma_addr;
-
- BUG_ON(buf->direction != dir);
-
- dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
- __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
- buf->safe, buf->safe_dma_addr);
-
- DO_STATS(dev->archdata.dmabounce->bounce_count++);
-
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
- dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
- __func__, buf->safe + off, buf->ptr + off, sz);
- memcpy(buf->ptr + off, buf->safe + off, sz);
- }
- return 0;
-}
-
-static void dmabounce_sync_for_cpu(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
- return;
-
- arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
-}
-
-static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
- size_t sz, enum dma_data_direction dir)
-{
- struct safe_buffer *buf;
- unsigned long off;
-
- dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
- __func__, addr, sz, dir);
-
- buf = find_safe_buffer_dev(dev, addr, __func__);
- if (!buf)
- return 1;
-
- off = addr - buf->safe_dma_addr;
-
- BUG_ON(buf->direction != dir);
-
- dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
- __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
- buf->safe, buf->safe_dma_addr);
-
- DO_STATS(dev->archdata.dmabounce->bounce_count++);
-
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
- dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
- __func__,buf->ptr + off, buf->safe + off, sz);
- memcpy(buf->safe + off, buf->ptr + off, sz);
- }
- return 0;
-}
-
-static void dmabounce_sync_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- if (!__dmabounce_sync_for_device(dev, handle, size, dir))
- return;
-
- arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
-}
-
-static int dmabounce_dma_supported(struct device *dev, u64 dma_mask)
-{
- if (dev->archdata.dmabounce)
- return 0;
-
- return arm_dma_ops.dma_supported(dev, dma_mask);
-}
-
-static const struct dma_map_ops dmabounce_ops = {
- .alloc = arm_dma_alloc,
- .free = arm_dma_free,
- .mmap = arm_dma_mmap,
- .get_sgtable = arm_dma_get_sgtable,
- .map_page = dmabounce_map_page,
- .unmap_page = dmabounce_unmap_page,
- .sync_single_for_cpu = dmabounce_sync_for_cpu,
- .sync_single_for_device = dmabounce_sync_for_device,
- .map_sg = arm_dma_map_sg,
- .unmap_sg = arm_dma_unmap_sg,
- .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
- .sync_sg_for_device = arm_dma_sync_sg_for_device,
- .dma_supported = dmabounce_dma_supported,
-};
-
-static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
- const char *name, unsigned long size)
-{
- pool->size = size;
- DO_STATS(pool->allocs = 0);
- pool->pool = dma_pool_create(name, dev, size,
- 0 /* byte alignment */,
- 0 /* no page-crossing issues */);
-
- return pool->pool ? 0 : -ENOMEM;
-}
-
-int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
- unsigned long large_buffer_size,
- int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
-{
- struct dmabounce_device_info *device_info;
- int ret;
-
- device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
- if (!device_info) {
- dev_err(dev,
- "Could not allocated dmabounce_device_info\n");
- return -ENOMEM;
- }
-
- ret = dmabounce_init_pool(&device_info->small, dev,
- "small_dmabounce_pool", small_buffer_size);
- if (ret) {
- dev_err(dev,
- "dmabounce: could not allocate DMA pool for %ld byte objects\n",
- small_buffer_size);
- goto err_free;
- }
-
- if (large_buffer_size) {
- ret = dmabounce_init_pool(&device_info->large, dev,
- "large_dmabounce_pool",
- large_buffer_size);
- if (ret) {
- dev_err(dev,
- "dmabounce: could not allocate DMA pool for %ld byte objects\n",
- large_buffer_size);
- goto err_destroy;
- }
- }
-
- device_info->dev = dev;
- INIT_LIST_HEAD(&device_info->safe_buffers);
- rwlock_init(&device_info->lock);
- device_info->needs_bounce = needs_bounce_fn;
-
-#ifdef STATS
- device_info->total_allocs = 0;
- device_info->map_op_count = 0;
- device_info->bounce_count = 0;
- device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
-#endif
-
- dev->archdata.dmabounce = device_info;
- set_dma_ops(dev, &dmabounce_ops);
-
- dev_info(dev, "dmabounce: registered device\n");
-
- return 0;
-
- err_destroy:
- dma_pool_destroy(device_info->small.pool);
- err_free:
- kfree(device_info);
- return ret;
-}
-EXPORT_SYMBOL(dmabounce_register_dev);
-
-void dmabounce_unregister_dev(struct device *dev)
-{
- struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
-
- dev->archdata.dmabounce = NULL;
- set_dma_ops(dev, NULL);
-
- if (!device_info) {
- dev_warn(dev,
- "Never registered with dmabounce but attempting"
- "to unregister!\n");
- return;
- }
-
- if (!list_empty(&device_info->safe_buffers)) {
- dev_err(dev,
- "Removing from dmabounce with pending buffers!\n");
- BUG();
- }
-
- if (device_info->small.pool)
- dma_pool_destroy(device_info->small.pool);
- if (device_info->large.pool)
- dma_pool_destroy(device_info->large.pool);
-
-#ifdef STATS
- if (device_info->attr_res == 0)
- device_remove_file(dev, &dev_attr_dmabounce_stats);
-#endif
-
- kfree(device_info);
-
- dev_info(dev, "dmabounce: device unregistered\n");
-}
-EXPORT_SYMBOL(dmabounce_unregister_dev);
-
-MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
-MODULE_LICENSE("GPL");
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 2343e2b6214d..f5e6990b8856 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -1389,70 +1389,9 @@ void sa1111_driver_unregister(struct sa1111_driver *driver)
}
EXPORT_SYMBOL(sa1111_driver_unregister);
-#ifdef CONFIG_DMABOUNCE
-/*
- * According to the "Intel StrongARM SA-1111 Microprocessor Companion
- * Chip Specification Update" (June 2000), erratum #7, there is a
- * significant bug in the SA1111 SDRAM shared memory controller. If
- * an access to a region of memory above 1MB relative to the bank base,
- * it is important that address bit 10 _NOT_ be asserted. Depending
- * on the configuration of the RAM, bit 10 may correspond to one
- * of several different (processor-relative) address bits.
- *
- * This routine only identifies whether or not a given DMA address
- * is susceptible to the bug.
- *
- * This should only get called for sa1111_device types due to the
- * way we configure our device dma_masks.
- */
-static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
-{
- /*
- * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
- * User's Guide" mentions that jumpers R51 and R52 control the
- * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
- * SDRAM bank 1 on Neponset). The default configuration selects
- * Assabet, so any address in bank 1 is necessarily invalid.
- */
- return (machine_is_assabet() || machine_is_pfs168()) &&
- (addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
-}
-
-static int sa1111_notifier_call(struct notifier_block *n, unsigned long action,
- void *data)
-{
- struct sa1111_dev *dev = to_sa1111_device(data);
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) {
- int ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
- sa1111_needs_bounce);
- if (ret)
- dev_err(&dev->dev, "failed to register with dmabounce: %d\n", ret);
- }
- break;
-
- case BUS_NOTIFY_DEL_DEVICE:
- if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL)
- dmabounce_unregister_dev(&dev->dev);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block sa1111_bus_notifier = {
- .notifier_call = sa1111_notifier_call,
-};
-#endif
-
static int __init sa1111_init(void)
{
int ret = bus_register(&sa1111_bus_type);
-#ifdef CONFIG_DMABOUNCE
- if (ret == 0)
- bus_register_notifier(&sa1111_bus_type, &sa1111_bus_notifier);
-#endif
if (ret == 0)
platform_driver_register(&sa1111_device_driver);
return ret;
@@ -1461,9 +1400,6 @@ static int __init sa1111_init(void)
static void __exit sa1111_exit(void)
{
platform_driver_unregister(&sa1111_device_driver);
-#ifdef CONFIG_DMABOUNCE
- bus_unregister_notifier(&sa1111_bus_type, &sa1111_bus_notifier);
-#endif
bus_unregister(&sa1111_bus_type);
}
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 8e94fe7ab5eb..714440fa2fc6 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -160,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
-extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
-extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
-extern int _find_first_bit_le(const unsigned long *p, unsigned size);
-extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
+unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *p,
+ unsigned long size, unsigned long offset);
+unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
+unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
-extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
-extern int _find_first_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
+unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
+unsigned long _find_next_zero_bit_be(const unsigned long *p,
+ unsigned long size, unsigned long offset);
+unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
+unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);
#ifndef CONFIG_SMP
/*
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index be666f58bf7a..8754c0f5fc90 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -6,9 +6,6 @@
#define ASMARM_DEVICE_H
struct dev_archdata {
-#ifdef CONFIG_DMABOUNCE
- struct dmabounce_device_info *dmabounce;
-#endif
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h
index 77fcb7ee5ec9..4f7bcde03abb 100644
--- a/arch/arm/include/asm/dma-direct.h
+++ b/arch/arm/include/asm/dma-direct.h
@@ -1,48 +1 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_ARM_DMA_DIRECT_H
-#define ASM_ARM_DMA_DIRECT_H 1
-
-#include <asm/memory.h>
-
-/*
- * dma_to_pfn/pfn_to_dma/virt_to_dma are architecture private
- * functions used internally by the DMA-mapping API to provide DMA
- * addresses. They must not be used by drivers.
- */
-static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
-{
- if (dev && dev->dma_range_map)
- pfn = PFN_DOWN(translate_phys_to_dma(dev, PFN_PHYS(pfn)));
- return (dma_addr_t)__pfn_to_bus(pfn);
-}
-
-static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
-{
- unsigned long pfn = __bus_to_pfn(addr);
-
- if (dev && dev->dma_range_map)
- pfn = PFN_DOWN(translate_dma_to_phys(dev, PFN_PHYS(pfn)));
- return pfn;
-}
-
-static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
-{
- if (dev)
- return pfn_to_dma(dev, virt_to_pfn(addr));
-
- return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- unsigned int offset = paddr & ~PAGE_MASK;
- return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
-{
- unsigned int offset = dev_addr & ~PAGE_MASK;
- return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
-}
-
-#endif /* ASM_ARM_DMA_DIRECT_H */
+#include <mach/dma-direct.h>
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
deleted file mode 100644
index 77082246a5e1..000000000000
--- a/arch/arm/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASMARM_DMA_MAPPING_H
-#define ASMARM_DMA_MAPPING_H
-
-#ifdef __KERNEL__
-
-#include <linux/mm_types.h>
-#include <linux/scatterlist.h>
-
-#include <xen/xen.h>
-#include <asm/xen/hypervisor.h>
-
-extern const struct dma_map_ops arm_dma_ops;
-extern const struct dma_map_ops arm_coherent_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
- return &arm_dma_ops;
- return NULL;
-}
-
-/**
- * arm_dma_alloc - allocate consistent memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- * @attrs: optinal attributes that specific mapping properties
- *
- * Allocate some memory for a device for performing DMA. This function
- * allocates pages, and will return the CPU-viewed address, and sets @handle
- * to be the device-viewed address.
- */
-extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, unsigned long attrs);
-
-/**
- * arm_dma_free - free memory allocated by arm_dma_alloc
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_coherent
- * @cpu_addr: CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * arm_dma_alloc().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, unsigned long attrs);
-
-/**
- * arm_dma_mmap - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- * @size: size of memory originally requested in dma_alloc_coherent
- * @attrs: optinal attributes that specific mapping properties
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
- * into user space. The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-
-/*
- * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
- * and utilize bounce buffers as needed to work around limited DMA windows.
- *
- * On the SA-1111, a bug limits DMA to only certain regions of RAM.
- * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
- * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
- *
- * The following are helper functions used by the dmabounce subystem
- *
- */
-
-/**
- * dmabounce_register_dev
- *
- * @dev: valid struct device pointer
- * @small_buf_size: size of buffers to use with small buffer pool
- * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
- * @needs_bounce_fn: called to determine whether buffer needs bouncing
- *
- * This function should be called by low-level platform code to register
- * a device as requireing DMA buffer bouncing. The function will allocate
- * appropriate DMA pools for the device.
- */
-extern int dmabounce_register_dev(struct device *, unsigned long,
- unsigned long, int (*)(struct device *, dma_addr_t, size_t));
-
-/**
- * dmabounce_unregister_dev
- *
- * @dev: valid struct device pointer
- *
- * This function should be called by low-level platform code when device
- * that was previously registered with dmabounce_register_dev is removed
- * from the system.
- *
- */
-extern void dmabounce_unregister_dev(struct device *);
-
-
-
-/*
- * The scatter list versions of the above methods.
- */
-extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, unsigned long attrs);
-extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, unsigned long attrs);
-extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-
-#endif /* __KERNEL__ */
-#endif
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 45180a2cc47c..05f29a72150b 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -143,10 +143,4 @@ extern int get_dma_residue(unsigned int chan);
#endif /* CONFIG_ISA_DMA_API */
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* __ASM_ARM_DMA_H */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index f673e13e0f94..a55a9038abc8 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -378,8 +378,6 @@ static inline unsigned long __virt_to_idmap(unsigned long x)
#ifndef __virt_to_bus
#define __virt_to_bus __virt_to_phys
#define __bus_to_virt __phys_to_virt
-#define __pfn_to_bus(x) __pfn_to_phys(x)
-#define __bus_to_pfn(x) __phys_to_pfn(x)
#endif
/*
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 68e6f25784a4..5916b88d4c94 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -22,11 +22,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? 15 : 14;
-}
-
extern void pcibios_report_status(unsigned int status_mask, int warn);
#endif /* __KERNEL__ */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index cd1f84bb40ae..78a532068fec 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -137,23 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* 2) If we could do execute protection, then read is implied
* 3) write implies read permissions
*/
-#define __P000 __PAGE_NONE
-#define __P001 __PAGE_READONLY
-#define __P010 __PAGE_COPY
-#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY_EXEC
-#define __P101 __PAGE_READONLY_EXEC
-#define __P110 __PAGE_COPY_EXEC
-#define __P111 __PAGE_COPY_EXEC
-
-#define __S000 __PAGE_NONE
-#define __S001 __PAGE_READONLY
-#define __S010 __PAGE_SHARED
-#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY_EXEC
-#define __S101 __PAGE_READONLY_EXEC
-#define __S110 __PAGE_SHARED_EXEC
-#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 500612d3da2e..29e2900178a1 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -38,10 +38,10 @@
#ifdef CONFIG_ARM_LPAE
/* LPAE requires an additional page for the PGD */
#define PG_DIR_SIZE 0x5000
-#define PMD_ORDER 3
+#define PMD_ENTRY_ORDER 3 /* PMD entry size is 2^PMD_ENTRY_ORDER */
#else
#define PG_DIR_SIZE 0x4000
-#define PMD_ORDER 2
+#define PMD_ENTRY_ORDER 2
#endif
.globl swapper_pg_dir
@@ -240,7 +240,7 @@ __create_page_tables:
mov r6, r6, lsr #SECTION_SHIFT
1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base
- str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping
+ str r3, [r4, r5, lsl #PMD_ENTRY_ORDER] @ identity mapping
cmp r5, r6
addlo r5, r5, #1 @ next section
blo 1b
@@ -250,7 +250,7 @@ __create_page_tables:
* set two variables to indicate the physical start and end of the
* kernel.
*/
- add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
ldr r6, =(_end - 1)
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
@@ -259,8 +259,8 @@ __create_page_tables:
str r8, [r5] @ Save physical start of kernel (LE)
#endif
orr r3, r8, r7 @ Add the MMU flags
- add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
-1: str r3, [r0], #1 << PMD_ORDER
+ add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
+1: str r3, [r0], #1 << PMD_ENTRY_ORDER
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
bls 1b
@@ -280,14 +280,14 @@ __create_page_tables:
mov r3, pc
mov r3, r3, lsr #SECTION_SHIFT
orr r3, r7, r3, lsl #SECTION_SHIFT
- add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
- str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
+ add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
+ str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ENTRY_ORDER]!
ldr r6, =(_edata_loc - 1)
- add r0, r0, #1 << PMD_ORDER
- add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
+ add r0, r0, #1 << PMD_ENTRY_ORDER
+ add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
1: cmp r0, r6
add r3, r3, #1 << SECTION_SHIFT
- strls r3, [r0], #1 << PMD_ORDER
+ strls r3, [r0], #1 << PMD_ENTRY_ORDER
bls 1b
#endif
@@ -297,10 +297,10 @@ __create_page_tables:
*/
mov r0, r2, lsr #SECTION_SHIFT
cmp r2, #0
- ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER)
+ ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
addne r3, r3, r4
orrne r6, r7, r0, lsl #SECTION_SHIFT
- strne r6, [r3], #1 << PMD_ORDER
+ strne r6, [r3], #1 << PMD_ENTRY_ORDER
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
@@ -319,7 +319,7 @@ __create_page_tables:
addruart r7, r3, r0
mov r3, r3, lsr #SECTION_SHIFT
- mov r3, r3, lsl #PMD_ORDER
+ mov r3, r3, lsl #PMD_ENTRY_ORDER
add r0, r4, r3
mov r3, r7, lsr #SECTION_SHIFT
@@ -349,7 +349,7 @@ __create_page_tables:
* If we're using the NetWinder or CATS, we also need to map
* in the 16550-type serial port for the debug messages
*/
- add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
orr r3, r7, #0x7c000000
str r3, [r0]
#endif
@@ -359,10 +359,10 @@ __create_page_tables:
* Similar reasons here - for debug. This is
* only for Acorn RiscPC architectures.
*/
- add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
orr r3, r7, #0x02000000
str r3, [r0]
- add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
str r3, [r0]
#endif
#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 5c6f8d11a3ce..034cb48c9eeb 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -70,6 +70,7 @@ static void __init init_irq_stacks(void)
}
}
+#ifndef CONFIG_PREEMPT_RT
static void ____do_softirq(void *arg)
{
__do_softirq();
@@ -80,7 +81,7 @@ void do_softirq_own_stack(void)
call_with_stack(____do_softirq, NULL,
__this_cpu_read(irq_stack_ptr));
}
-
+#endif
#endif
int arch_show_interrupts(struct seq_file *p, int prec)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 73fc645fc4c7..978db2d96b44 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -787,14 +787,6 @@ void panic_smp_self_stop(void)
cpu_relax();
}
-/*
- * not supported here
- */
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
#ifdef CONFIG_CPU_FREQ
static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index c30b689bec2e..14eecaaf295f 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -237,7 +237,7 @@ static int __init test_size_treshold(void)
if (!dst_page)
goto no_dst;
kernel_ptr = page_address(src_page);
- user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
+ user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
if (!user_ptr)
goto no_vmap;
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index bcd4e4ca34f7..acc10b1caa69 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -61,6 +61,7 @@ endmenu
# Footbridge support
config FOOTBRIDGE
+ select ARCH_HAS_PHYS_TO_DMA
bool
# Footbridge in host mode
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 322495df271d..5020eb96b025 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <linux/dma-direct.h>
#include <video/vga.h>
#include <asm/page.h>
@@ -335,17 +336,19 @@ unsigned long __bus_to_virt(unsigned long res)
return res;
}
EXPORT_SYMBOL(__bus_to_virt);
-
-unsigned long __pfn_to_bus(unsigned long pfn)
+#else
+static inline unsigned long fb_bus_sdram_offset(void)
{
- return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET);
+ return BUS_OFFSET;
}
-EXPORT_SYMBOL(__pfn_to_bus);
+#endif /* CONFIG_FOOTBRIDGE_ADDIN */
-unsigned long __bus_to_pfn(unsigned long bus)
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- return __phys_to_pfn(bus - (fb_bus_sdram_offset() - PHYS_OFFSET));
+ return paddr + (fb_bus_sdram_offset() - PHYS_OFFSET);
}
-EXPORT_SYMBOL(__bus_to_pfn);
-#endif
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+ return dev_addr - (fb_bus_sdram_offset() - PHYS_OFFSET);
+}
diff --git a/arch/arm/mach-footbridge/include/mach/dma-direct.h b/arch/arm/mach-footbridge/include/mach/dma-direct.h
new file mode 100644
index 000000000000..01f9e8367c00
--- /dev/null
+++ b/arch/arm/mach-footbridge/include/mach/dma-direct.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MACH_FOOTBRIDGE_DMA_DIRECT_H
+#define MACH_FOOTBRIDGE_DMA_DIRECT_H 1
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr);
+
+#endif /* MACH_FOOTBRIDGE_DMA_DIRECT_H */
diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h
index 46fd4a8872b9..3a5d2638c18f 100644
--- a/arch/arm/mach-footbridge/include/mach/memory.h
+++ b/arch/arm/mach-footbridge/include/mach/memory.h
@@ -26,8 +26,6 @@
#ifndef __ASSEMBLY__
extern unsigned long __virt_to_bus(unsigned long);
extern unsigned long __bus_to_virt(unsigned long);
-extern unsigned long __pfn_to_bus(unsigned long);
-extern unsigned long __bus_to_pfn(unsigned long);
#endif
#define __virt_to_bus __virt_to_bus
#define __bus_to_virt __bus_to_virt
@@ -42,8 +40,6 @@ extern unsigned long __bus_to_pfn(unsigned long);
#define BUS_OFFSET 0xe0000000
#define __virt_to_bus(x) ((x) + (BUS_OFFSET - PAGE_OFFSET))
#define __bus_to_virt(x) ((x) - (BUS_OFFSET - PAGE_OFFSET))
-#define __pfn_to_bus(x) (__pfn_to_phys(x) + (BUS_OFFSET - PHYS_OFFSET))
-#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - PHYS_OFFSET))
#else
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index db607955a7e4..5d4f977ac7d2 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -98,7 +98,7 @@ static int highbank_platform_notifier(struct notifier_block *nb,
if (of_property_read_bool(dev->of_node, "dma-coherent")) {
val = readl(sregs_base + reg);
writel(val | 0xff01, sregs_base + reg);
- set_dma_ops(dev, &arm_coherent_dma_ops);
+ dev->dma_coherent = true;
}
return NOTIFY_OK;
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 883dab1b54f3..a6b621ff0b87 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -95,7 +95,7 @@ static int mvebu_hwcc_notifier(struct notifier_block *nb,
if (event != BUS_NOTIFY_ADD_DEVICE)
return NOTIFY_DONE;
- set_dma_ops(dev, &arm_coherent_dma_ops);
+ dev->dma_coherent = true;
return NOTIFY_OK;
}
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index 2c1e7bc5bac5..2e4daeab6278 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -81,11 +81,10 @@ int eseries_tmio_enable(struct platform_device *dev)
return 0;
}
-int eseries_tmio_disable(struct platform_device *dev)
+void eseries_tmio_disable(struct platform_device *dev)
{
gpio_set_value(GPIO_ESERIES_TMIO_SUSPEND, 0);
gpio_set_value(GPIO_ESERIES_TMIO_PCLR, 0);
- return 0;
}
int eseries_tmio_suspend(struct platform_device *dev)
@@ -134,7 +133,6 @@ static void __init __maybe_unused eseries_register_clks(void)
static struct tc6387xb_platform_data e330_tc6387xb_info = {
.enable = &eseries_tmio_enable,
- .disable = &eseries_tmio_disable,
.suspend = &eseries_tmio_suspend,
.resume = &eseries_tmio_resume,
};
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 6af8bc404825..d41641d6cfcd 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -678,13 +678,11 @@ err_req_pclr:
return rc;
}
-static int tosa_tc6393xb_disable(struct platform_device *dev)
+static void tosa_tc6393xb_disable(struct platform_device *dev)
{
gpio_free(TOSA_GPIO_TC6393XB_L3V_ON);
gpio_free(TOSA_GPIO_TC6393XB_SUSPEND);
gpio_free(TOSA_GPIO_TC6393XB_REST_IN);
-
- return 0;
}
static int tosa_tc6393xb_resume(struct platform_device *dev)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1483b6a4319d..089c9c644cce 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -103,139 +103,6 @@ static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
* before transfers and delay cache invalidation until transfer completion.
*
*/
-static void __dma_page_cpu_to_dev(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-static void __dma_page_dev_to_cpu(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
-/**
- * arm_dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_page().
- */
-static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __dma_page_cpu_to_dev(page, offset, size, dir);
- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-/**
- * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Unmap a page streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_page() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
- handle & ~PAGE_MASK, size, dir);
-}
-
-static void arm_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned int offset = handle & (PAGE_SIZE - 1);
- struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
- __dma_page_dev_to_cpu(page, offset, size, dir);
-}
-
-static void arm_dma_sync_single_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- unsigned int offset = handle & (PAGE_SIZE - 1);
- struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
- __dma_page_cpu_to_dev(page, offset, size, dir);
-}
-
-/*
- * Return whether the given device DMA address mask can be supported
- * properly. For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask
- * to this function.
- */
-static int arm_dma_supported(struct device *dev, u64 mask)
-{
- unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
-
- /*
- * Translate the device's DMA mask to a PFN limit. This
- * PFN number includes the page which we can DMA to.
- */
- return dma_to_pfn(dev, mask) >= max_dma_pfn;
-}
-
-const struct dma_map_ops arm_dma_ops = {
- .alloc = arm_dma_alloc,
- .free = arm_dma_free,
- .alloc_pages = dma_direct_alloc_pages,
- .free_pages = dma_direct_free_pages,
- .mmap = arm_dma_mmap,
- .get_sgtable = arm_dma_get_sgtable,
- .map_page = arm_dma_map_page,
- .unmap_page = arm_dma_unmap_page,
- .map_sg = arm_dma_map_sg,
- .unmap_sg = arm_dma_unmap_sg,
- .map_resource = dma_direct_map_resource,
- .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
- .sync_single_for_device = arm_dma_sync_single_for_device,
- .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
- .sync_sg_for_device = arm_dma_sync_sg_for_device,
- .dma_supported = arm_dma_supported,
- .get_required_mask = dma_direct_get_required_mask,
-};
-EXPORT_SYMBOL(arm_dma_ops);
-
-static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
-static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, unsigned long attrs);
-static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-
-const struct dma_map_ops arm_coherent_dma_ops = {
- .alloc = arm_coherent_dma_alloc,
- .free = arm_coherent_dma_free,
- .alloc_pages = dma_direct_alloc_pages,
- .free_pages = dma_direct_free_pages,
- .mmap = arm_coherent_dma_mmap,
- .get_sgtable = arm_dma_get_sgtable,
- .map_page = arm_coherent_dma_map_page,
- .map_sg = arm_dma_map_sg,
- .map_resource = dma_direct_map_resource,
- .dma_supported = arm_dma_supported,
- .get_required_mask = dma_direct_get_required_mask,
-};
-EXPORT_SYMBOL(arm_coherent_dma_ops);
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
{
@@ -725,7 +592,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (page) {
unsigned long flags;
- *handle = pfn_to_dma(dev, page_to_pfn(page));
+ *handle = phys_to_dma(dev, page_to_phys(page));
buf->virt = args.want_vaddr ? addr : page;
spin_lock_irqsave(&arm_dma_bufs_lock, flags);
@@ -739,74 +606,13 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
}
/*
- * Allocate DMA-coherent memory space and return both the kernel remapped
- * virtual and bus address for that space.
- */
-void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, unsigned long attrs)
-{
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
-
- return __dma_alloc(dev, size, handle, gfp, prot, false,
- attrs, __builtin_return_address(0));
-}
-
-static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
-{
- return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
- attrs, __builtin_return_address(0));
-}
-
-static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- int ret = -ENXIO;
- unsigned long nr_vma_pages = vma_pages(vma);
- unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long pfn = dma_to_pfn(dev, dma_addr);
- unsigned long off = vma->vm_pgoff;
-
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
- if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- }
-
- return ret;
-}
-
-/*
- * Create userspace mapping for the DMA-coherent memory.
- */
-static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
-
-int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
- return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
-
-/*
* Free a buffer as defined by the above mapping.
*/
static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs,
bool is_coherent)
{
- struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+ struct page *page = phys_to_page(dma_to_phys(dev, handle));
struct arm_dma_buffer *buf;
struct arm_dma_free_args args = {
.dev = dev,
@@ -824,40 +630,6 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
kfree(buf);
}
-void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, unsigned long attrs)
-{
- __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
-}
-
-static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, unsigned long attrs)
-{
- __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
-}
-
-int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs)
-{
- unsigned long pfn = dma_to_pfn(dev, handle);
- struct page *page;
- int ret;
-
- /* If the PFN is not valid, we do not have a struct page */
- if (!pfn_valid(pfn))
- return -ENXIO;
-
- page = pfn_to_page(pfn);
-
- ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
- if (unlikely(ret))
- return ret;
-
- sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
- return 0;
-}
-
static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
void (*op)(const void *, size_t, int))
@@ -907,8 +679,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
/*
* Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
+ * Note: Drivers should NOT use this function directly.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
@@ -961,122 +732,6 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
}
}
-/**
- * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the dma_map_single interface.
- * Here the scatter gather list elements are each tagged with the
- * appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}.
- *
- * Device ownership issues as mentioned for dma_map_single are the same
- * here.
- */
-int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- struct scatterlist *s;
- int i, j, ret;
-
- for_each_sg(sg, s, nents, i) {
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- s->dma_length = s->length;
-#endif
- s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
- s->length, dir, attrs);
- if (dma_mapping_error(dev, s->dma_address)) {
- ret = -EIO;
- goto bad_mapping;
- }
- }
- return nents;
-
- bad_mapping:
- for_each_sg(sg, s, i, j)
- ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
- return ret;
-}
-
-/**
- * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
- *
- * Unmap a set of streaming mode DMA translations. Again, CPU access
- * rules concerning calls here are the same as for dma_unmap_single().
- */
-void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- struct scatterlist *s;
-
- int i;
-
- for_each_sg(sg, s, nents, i)
- ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
-}
-
-/**
- * arm_dma_sync_sg_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map (returned from dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
- */
-void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i)
- ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
- dir);
-}
-
-/**
- * arm_dma_sync_sg_for_device
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map (returned from dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
- */
-void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i)
- ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
- dir);
-}
-
-static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
-{
- /*
- * When CONFIG_ARM_LPAE is set, physical address can extend above
- * 32-bits, which then can't be addressed by devices that only support
- * 32-bit DMA.
- * Use the generic dma-direct / swiotlb ops code in that case, as that
- * handles bounce buffering for us.
- */
- if (IS_ENABLED(CONFIG_ARM_LPAE))
- return NULL;
- return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
-}
-
#ifdef CONFIG_ARM_DMA_USE_IOMMU
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
@@ -1423,13 +1078,13 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
__free_from_pool(cpu_addr, size);
}
-static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
- int coherent_flag)
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
struct page **pages;
void *addr = NULL;
+ int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
*handle = DMA_MAPPING_ERROR;
size = PAGE_ALIGN(size);
@@ -1472,19 +1127,7 @@ err_buffer:
return NULL;
}
-static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
-{
- return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
-}
-
-static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
-{
- return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
-}
-
-static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
@@ -1498,35 +1141,24 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma
if (vma->vm_pgoff >= nr_pages)
return -ENXIO;
+ if (!dev->dma_coherent)
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
err = vm_map_pages(vma, pages, nr_pages);
if (err)
pr_err("Remapping memory failed: %d\n", err);
return err;
}
-static int arm_iommu_mmap_attrs(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs)
-{
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
- return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
-
-static int arm_coherent_iommu_mmap_attrs(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs)
-{
- return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
/*
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
-static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, unsigned long attrs, int coherent_flag)
+static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs)
{
+ int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
struct page **pages;
size = PAGE_ALIGN(size);
@@ -1548,19 +1180,6 @@ static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_ad
__iommu_free_buffer(dev, pages, size, attrs);
}
-static void arm_iommu_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle,
- unsigned long attrs)
-{
- __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
-}
-
-static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, unsigned long attrs)
-{
- __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
-}
-
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size, unsigned long attrs)
@@ -1580,8 +1199,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
*/
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
- enum dma_data_direction dir, unsigned long attrs,
- bool is_coherent)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova, iova_base;
@@ -1601,7 +1219,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
- if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
prot = __dma_info_to_prot(dir, attrs);
@@ -1621,9 +1239,20 @@ fail:
return ret;
}
-static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs,
- bool is_coherent)
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s = sg, *dma = sg, *start = sg;
int i, count = 0, ret;
@@ -1638,8 +1267,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
ret = __map_sg_chunk(dev, start, size,
- &dma->dma_address, dir, attrs,
- is_coherent);
+ &dma->dma_address, dir, attrs);
if (ret < 0)
goto bad_mapping;
@@ -1653,8 +1281,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
}
size += s->length;
}
- ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
- is_coherent);
+ ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs);
if (ret < 0)
goto bad_mapping;
@@ -1672,44 +1299,19 @@ bad_mapping:
}
/**
- * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of i/o coherent buffers described by scatterlist in streaming
- * mode for DMA. The scatter gather list elements are merged together (if
- * possible) and tagged with the appropriate dma address and length. They are
- * obtained via sg_dma_{address,length}.
- */
-static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
-}
-
-/**
- * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer
* @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
*
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * The scatter gather list elements are merged together (if possible) and
- * tagged with the appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}.
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
*/
-static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
-}
-
-static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs, bool is_coherent)
+static void arm_iommu_unmap_sg(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
@@ -1718,48 +1320,13 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
if (sg_dma_len(s))
__iommu_remove_mapping(dev, sg_dma_address(s),
sg_dma_len(s));
- if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
}
/**
- * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
- *
- * Unmap a set of streaming mode DMA translations. Again, CPU access
- * rules concerning calls here are the same as for dma_unmap_single().
- */
-static void arm_coherent_iommu_unmap_sg(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
-}
-
-/**
- * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
- *
- * Unmap a set of streaming mode DMA translations. Again, CPU access
- * rules concerning calls here are the same as for dma_unmap_single().
- */
-static void arm_iommu_unmap_sg(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
-}
-
-/**
* arm_iommu_sync_sg_for_cpu
* @dev: valid struct device pointer
* @sg: list of buffers
@@ -1773,6 +1340,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *s;
int i;
+ if (dev->dma_coherent)
+ return;
+
for_each_sg(sg, s, nents, i)
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
@@ -1792,22 +1362,24 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
struct scatterlist *s;
int i;
+ if (dev->dma_coherent)
+ return;
+
for_each_sg(sg, s, nents, i)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
-
/**
- * arm_coherent_iommu_map_page
+ * arm_iommu_map_page
* @dev: valid struct device pointer
* @page: page that buffer resides in
* @offset: offset into page for start of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
- * Coherent IOMMU aware version of arm_dma_map_page()
+ * IOMMU aware version of arm_dma_map_page()
*/
-static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
@@ -1815,6 +1387,9 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
dma_addr_t dma_addr;
int ret, prot, len = PAGE_ALIGN(size + offset);
+ if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
@@ -1832,50 +1407,6 @@ fail:
}
/**
- * arm_iommu_map_page
- * @dev: valid struct device pointer
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * IOMMU aware version of arm_dma_map_page()
- */
-static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __dma_page_cpu_to_dev(page, offset, size, dir);
-
- return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
-}
-
-/**
- * arm_coherent_iommu_unmap_page
- * @dev: valid struct device pointer
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Coherent IOMMU aware version of arm_dma_unmap_page()
- */
-static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
- dma_addr_t iova = handle & PAGE_MASK;
- int offset = handle & ~PAGE_MASK;
- int len = PAGE_ALIGN(size + offset);
-
- if (!iova)
- return;
-
- iommu_unmap(mapping->domain, iova, len);
- __free_iova(mapping, iova, len);
-}
-
-/**
* arm_iommu_unmap_page
* @dev: valid struct device pointer
* @handle: DMA address of buffer
@@ -1889,15 +1420,17 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ struct page *page;
int offset = handle & ~PAGE_MASK;
int len = PAGE_ALIGN(size + offset);
if (!iova)
return;
- if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_dev_to_cpu(page, offset, size, dir);
+ }
iommu_unmap(mapping->domain, iova, len);
__free_iova(mapping, iova, len);
@@ -1965,12 +1498,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
- if (!iova)
+ if (dev->dma_coherent || !iova)
return;
+ page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_dev_to_cpu(page, offset, size, dir);
}
@@ -1979,12 +1513,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
- if (!iova)
+ if (dev->dma_coherent || !iova)
return;
+ page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_cpu_to_dev(page, offset, size, dir);
}
@@ -2006,26 +1541,6 @@ static const struct dma_map_ops iommu_ops = {
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
-
- .dma_supported = arm_dma_supported,
-};
-
-static const struct dma_map_ops iommu_coherent_ops = {
- .alloc = arm_coherent_iommu_alloc_attrs,
- .free = arm_coherent_iommu_free_attrs,
- .mmap = arm_coherent_iommu_mmap_attrs,
- .get_sgtable = arm_iommu_get_sgtable,
-
- .map_page = arm_coherent_iommu_map_page,
- .unmap_page = arm_coherent_iommu_unmap_page,
-
- .map_sg = arm_coherent_iommu_map_sg,
- .unmap_sg = arm_coherent_iommu_unmap_sg,
-
- .map_resource = arm_iommu_map_resource,
- .unmap_resource = arm_iommu_unmap_resource,
-
- .dma_supported = arm_dma_supported,
};
/**
@@ -2201,40 +1716,32 @@ void arm_iommu_detach_device(struct device *dev)
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
to_dma_iommu_mapping(dev) = NULL;
- set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
+ set_dma_ops(dev, NULL);
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
-static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
-{
- return coherent ? &iommu_coherent_ops : &iommu_ops;
-}
-
-static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu)
+static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent)
{
struct dma_iommu_mapping *mapping;
- if (!iommu)
- return false;
-
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
size, dev_name(dev));
- return false;
+ return;
}
if (__arm_iommu_attach_device(dev, mapping)) {
pr_warn("Failed to attached device %s to IOMMU_mapping\n",
dev_name(dev));
arm_iommu_release_mapping(mapping);
- return false;
+ return;
}
- return true;
+ set_dma_ops(dev, &iommu_ops);
}
static void arm_teardown_iommu_dma_ops(struct device *dev)
@@ -2250,27 +1757,20 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
#else
-static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu)
+static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent)
{
- return false;
}
static void arm_teardown_iommu_dma_ops(struct device *dev) { }
-#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
-
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
- const struct dma_map_ops *dma_ops;
-
dev->archdata.dma_coherent = coherent;
-#ifdef CONFIG_SWIOTLB
dev->dma_coherent = coherent;
-#endif
/*
* Don't override the dma_ops if they have already been set. Ideally
@@ -2280,12 +1780,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
if (dev->dma_ops)
return;
- if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
- dma_ops = arm_get_iommu_dma_map_ops(coherent);
- else
- dma_ops = arm_get_dma_map_ops(coherent);
-
- set_dma_ops(dev, dma_ops);
+ if (iommu)
+ arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
xen_setup_dma_ops(dev);
dev->archdata.dma_ops_setup = true;
@@ -2301,7 +1797,6 @@ void arch_teardown_dma_ops(struct device *dev)
set_dma_ops(dev, NULL);
}
-#ifdef CONFIG_SWIOTLB
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
@@ -2329,4 +1824,3 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
{
__arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
}
-#endif /* CONFIG_SWIOTLB */
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index a062e07516dd..46cccd6bf705 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -322,6 +322,10 @@ retry:
return 0;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return 0;
+
if (!(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cd17e324aa51..a49f0b9c0f75 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -412,6 +412,26 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
}
+static pgprot_t protection_map[16] __ro_after_init = {
+ [VM_NONE] = __PAGE_NONE,
+ [VM_READ] = __PAGE_READONLY,
+ [VM_WRITE] = __PAGE_COPY,
+ [VM_WRITE | VM_READ] = __PAGE_COPY,
+ [VM_EXEC] = __PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = __PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = __PAGE_COPY_EXEC,
+ [VM_SHARED] = __PAGE_NONE,
+ [VM_SHARED | VM_READ] = __PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = __PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = __PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = __PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __PAGE_SHARED_EXEC
+};
+DECLARE_VM_GET_PAGE_PROT
+
/*
* Adjust the PMD section entries according to the CPU in use.
*/
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index e05fc9743767..9fb9fff08c94 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -45,10 +45,10 @@ config ARM64
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_ELF_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select ARCH_HAVE_TRACE_MMIO_ACCESS
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
@@ -917,6 +917,23 @@ config ARM64_ERRATUM_1902691
If unsure, say Y.
+config ARM64_ERRATUM_2457168
+ bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly"
+ depends on ARM64_AMU_EXTN
+ default y
+ help
+ This option adds the workaround for ARM Cortex-A510 erratum 2457168.
+
+ The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate
+ as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments
+ incorrectly giving a significantly higher output value.
+
+ Work around this problem by returning 0 when reading the affected counter in
+ key locations that results in disabling all users of this counter. This effect
+ is the same to firmware disabling affected counters.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
diff --git a/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi
index fc51bc872468..a6dbb1f485d8 100644
--- a/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi
@@ -725,11 +725,6 @@
#power-domain-cells = <0>;
#reset-cells = <0>;
label = "ans2";
- /*
- * The ADT makes ps_apcie_st depend on ps_ans2 instead, but this
- * doesn't make much sense since ANS2 uses APCIE_ST.
- */
- power-domains = <&ps_apcie_st>;
};
ps_gfx: power-controller@3f8 {
@@ -836,7 +831,7 @@
#power-domain-cells = <0>;
#reset-cells = <0>;
label = "apcie_st";
- power-domains = <&ps_apcie>;
+ power-domains = <&ps_apcie>, <&ps_ans2>;
};
ps_ane_sys: power-controller@470 {
diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
index 9f8f4145db88..51a63b29d404 100644
--- a/arch/arm64/boot/dts/apple/t8103.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103.dtsi
@@ -378,6 +378,40 @@
<AIC_IRQ 274 IRQ_TYPE_LEVEL_HIGH>;
};
+ ans_mbox: mbox@277408000 {
+ compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
+ reg = <0x2 0x77408000 0x0 0x4000>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 583 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 584 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 585 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 586 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "send-empty", "send-not-empty",
+ "recv-empty", "recv-not-empty";
+ #mbox-cells = <0>;
+ power-domains = <&ps_ans2>;
+ };
+
+ sart: iommu@27bc50000 {
+ compatible = "apple,t8103-sart";
+ reg = <0x2 0x7bc50000 0x0 0x10000>;
+ power-domains = <&ps_ans2>;
+ };
+
+ nvme@27bcc0000 {
+ compatible = "apple,t8103-nvme-ans2", "apple,nvme-ans2";
+ reg = <0x2 0x7bcc0000 0x0 0x40000>,
+ <0x2 0x77400000 0x0 0x4000>;
+ reg-names = "nvme", "ans";
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 590 IRQ_TYPE_LEVEL_HIGH>;
+ mboxes = <&ans_mbox>;
+ apple,sart = <&sart>;
+ power-domains = <&ps_ans2>, <&ps_apcie_st>;
+ power-domain-names = "ans", "apcie0";
+ resets = <&ps_ans2>;
+ };
+
pcie0_dart_0: dart@681008000 {
compatible = "apple,t8103-dart";
reg = <0x6 0x81008000 0x0 0x4000>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index be97da132258..ba75adedbf79 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -599,8 +599,8 @@
compatible = "socionext,uniphier-dwc3", "snps,dwc3";
status = "disabled";
reg = <0x65a00000 0xcd00>;
- interrupt-names = "host", "peripheral";
- interrupts = <0 134 4>, <0 135 4>;
+ interrupt-names = "dwc_usb3";
+ interrupts = <0 134 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>, <&pinctrl_usb2>;
clock-names = "ref", "bus_early", "suspend";
@@ -701,8 +701,8 @@
compatible = "socionext,uniphier-dwc3", "snps,dwc3";
status = "disabled";
reg = <0x65c00000 0xcd00>;
- interrupt-names = "host", "peripheral";
- interrupts = <0 137 4>, <0 138 4>;
+ interrupt-names = "dwc_usb3";
+ interrupts = <0 137 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>, <&pinctrl_usb3>;
clock-names = "ref", "bus_early", "suspend";
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index ca9b487112cc..34256bda0da9 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -71,7 +71,7 @@ static __always_inline int icache_is_vpipt(void)
static inline u32 cache_type_cwg(void)
{
- return (read_cpuid_cachetype() >> CTR_EL0_CWG_SHIFT) & CTR_EL0_CWG_MASK;
+ return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype());
}
#define __read_mostly __section(".data..read_mostly")
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 9bb1873f5295..6f86b7ab6c28 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -153,7 +153,7 @@ struct vl_info {
#ifdef CONFIG_ARM64_SVE
-extern void sve_alloc(struct task_struct *task);
+extern void sve_alloc(struct task_struct *task, bool flush);
extern void fpsimd_release_task(struct task_struct *task);
extern void fpsimd_sync_to_sve(struct task_struct *task);
extern void fpsimd_force_sync_to_sve(struct task_struct *task);
@@ -256,7 +256,7 @@ size_t sve_state_size(struct task_struct const *task);
#else /* ! CONFIG_ARM64_SVE */
-static inline void sve_alloc(struct task_struct *task) { }
+static inline void sve_alloc(struct task_struct *task, bool flush) { }
static inline void fpsimd_release_task(struct task_struct *task) { }
static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 1fd2846dbefe..d20f5da2d76f 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -46,9 +46,6 @@ extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_GET
extern pte_t huge_ptep_get(pte_t *ptep);
-extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz);
-#define set_huge_swap_pte_at set_huge_swap_pte_at
void __init arm64_hugetlb_cma_reserve(void);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 87dd42d74afe..877495a0fd0c 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -91,7 +91,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
}
/* IO barriers */
-#define __iormb(v) \
+#define __io_ar(v) \
({ \
unsigned long tmp; \
\
@@ -108,39 +108,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
: "memory"); \
})
-#define __io_par(v) __iormb(v)
-#define __iowmb() dma_wmb()
-#define __iomb() dma_mb()
-
-/*
- * Relaxed I/O memory access primitives. These follow the Device memory
- * ordering rules but do not guarantee any ordering relative to Normal memory
- * accesses.
- */
-#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
-#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
-#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
-#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
+#define __io_bw() dma_wmb()
+#define __io_br(v)
+#define __io_aw(v)
-#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
-#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
-#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
-#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
-
-/*
- * I/O memory access primitives. Reads are ordered relative to any
- * following Normal memory access. Writes are ordered relative to any prior
- * Normal memory access.
- */
-#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; })
-#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
-#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
-#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
-
-#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
-#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
-#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
-#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
+/* arm64-specific, don't use in portable drivers */
+#define __iormb(v) __io_ar(v)
+#define __iowmb() __io_bw()
+#define __iomb() dma_mb()
/*
* I/O port access primitives.
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f38ef299f13b..e9c9388ccc02 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -929,6 +929,10 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
(system_supports_mte() && \
test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
+#define kvm_supports_32bit_el0() \
+ (system_supports_32bit_el0() && \
+ !static_branch_unlikely(&arm64_mismatched_32bit_el0))
+
int kvm_trng_call(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM
extern phys_addr_t hyp_mem_base;
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 4e4c171f070b..9dd08cd339c3 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -381,6 +381,15 @@ static inline bool defer_reserve_crashkernel(void)
# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
#endif
+/*
+ * memory regions which marked with flag MEMBLOCK_NOMAP(for example, the memory
+ * of the EFI_UNUSABLE_MEMORY type) may divide a continuous memory block into
+ * multiple parts. As a result, the number of memory regions is large.
+ */
+#ifdef CONFIG_EFI
+#define INIT_MEMBLOCK_MEMORY_REGIONS (INIT_MEMBLOCK_REGIONS * 8)
+#endif
+
#include <asm-generic/memory_model.h>
#endif /* __ASM_MEMORY_H */
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index b33ca260e3c9..016eb6b46dc0 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -9,7 +9,6 @@
#include <asm/io.h>
#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM 0
/*
* Set to 1 if the kernel should re-assign all PCI bus numbers
@@ -18,21 +17,8 @@
(pci_has_flag(PCI_REASSIGN_ALL_BUS))
#define arch_can_pci_mmap_wc() 1
-#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
-extern int isa_dma_bridge_buggy;
-
-#ifdef CONFIG_PCI
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- /* no legacy IRQ on arm64 */
- return -ENODEV;
-}
-
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- return 1;
-}
-#endif /* CONFIG_PCI */
+/* Generic PCI */
+#include <asm-generic/pci.h>
#endif /* __ASM_PCI_H */
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 62e0ebeed720..9b165117a454 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -89,24 +89,6 @@ extern bool arm64_use_ng_mappings;
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_READONLY
-#define __P011 PAGE_READONLY
-#define __P100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_READONLY_EXEC
-#define __P111 PAGE_READONLY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PGTABLE_PROT_H */
diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
index 6437df661700..f4af547ef54c 100644
--- a/arch/arm64/include/asm/setup.h
+++ b/arch/arm64/include/asm/setup.h
@@ -3,6 +3,8 @@
#ifndef __ARM64_ASM_SETUP_H
#define __ARM64_ASM_SETUP_H
+#include <linux/string.h>
+
#include <uapi/asm/setup.h>
void *get_early_fdt_ptr(void);
@@ -14,4 +16,19 @@ void early_fdt_map(u64 dt_phys);
extern phys_addr_t __fdt_pointer __initdata;
extern u64 __cacheline_aligned boot_args[4];
+static inline bool arch_parse_debug_rodata(char *arg)
+{
+ extern bool rodata_enabled;
+ extern bool rodata_full;
+
+ if (arg && !strcmp(arg, "full")) {
+ rodata_enabled = true;
+ rodata_full = true;
+ return true;
+ }
+
+ return false;
+}
+#define arch_parse_debug_rodata arch_parse_debug_rodata
+
#endif
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 7c71358d44c4..818df938a7ad 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -1116,6 +1116,7 @@
#else
+#include <linux/bitfield.h>
#include <linux/build_bug.h>
#include <linux/types.h>
#include <asm/alternative.h>
@@ -1209,8 +1210,6 @@
par; \
})
-#endif
-
#define SYS_FIELD_GET(reg, field, val) \
FIELD_GET(reg##_##field##_MASK, val)
@@ -1220,4 +1219,6 @@
#define SYS_FIELD_PREP_ENUM(reg, field, val) \
FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val)
+#endif
+
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3bb134355874..316917b98707 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -75,9 +75,11 @@ struct kvm_regs {
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
-#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
+ KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16
-#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
+#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
+ KVM_ARM_DEVICE_ID_SHIFT)
/* Supported device IDs */
#define KVM_ARM_DEVICE_VGIC_V2 0
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
index 587543c6c51c..97c42be71338 100644
--- a/arch/arm64/kernel/cacheinfo.c
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
int init_cache_level(unsigned int cpu)
{
- unsigned int ctype, level, leaves, fw_level;
+ unsigned int ctype, level, leaves;
+ int fw_level;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
@@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu)
else
fw_level = acpi_find_last_cache_level(cpu);
+ if (fw_level < 0)
+ return fw_level;
+
if (level < fw_level) {
/*
* some external caches not specified in CLIDR_EL1
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 7e6289e709fc..53b973b6059f 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_1286807
{
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+ },
+ {
/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
},
@@ -654,6 +656,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_2457168
+ {
+ .desc = "ARM erratum 2457168",
+ .capability = ARM64_WORKAROUND_2457168,
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+
+ /* Cortex-A510 r0p0-r1p1 */
+ CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
+ },
+#endif
#ifdef CONFIG_ARM64_ERRATUM_2038923
{
.desc = "ARM erratum 2038923",
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 907401e4fffb..af4de817d712 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1870,7 +1870,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
smp_processor_id());
cpumask_set_cpu(smp_processor_id(), &amu_cpus);
- update_freq_counters_refs();
+
+ /* 0 reference values signal broken/disabled counters */
+ if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
+ update_freq_counters_refs();
}
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 254fe31c03a0..2d73b3e793b2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -502,7 +502,7 @@ tsk .req x28 // current thread_info
SYM_CODE_START(vectors)
kernel_ventry 1, t, 64, sync // Synchronous EL1t
kernel_ventry 1, t, 64, irq // IRQ EL1t
- kernel_ventry 1, t, 64, fiq // FIQ EL1h
+ kernel_ventry 1, t, 64, fiq // FIQ EL1t
kernel_ventry 1, t, 64, error // Error EL1t
kernel_ventry 1, h, 64, sync // Synchronous EL1h
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index dd63ffc3a2fa..23834d96d1e7 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -715,10 +715,12 @@ size_t sve_state_size(struct task_struct const *task)
* do_sve_acc() case, there is no ABI requirement to hide stale data
* written previously be task.
*/
-void sve_alloc(struct task_struct *task)
+void sve_alloc(struct task_struct *task, bool flush)
{
if (task->thread.sve_state) {
- memset(task->thread.sve_state, 0, sve_state_size(task));
+ if (flush)
+ memset(task->thread.sve_state, 0,
+ sve_state_size(task));
return;
}
@@ -1388,7 +1390,7 @@ void do_sve_acc(unsigned long esr, struct pt_regs *regs)
return;
}
- sve_alloc(current);
+ sve_alloc(current, true);
if (!current->thread.sve_state) {
force_sig(SIGKILL);
return;
@@ -1439,7 +1441,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
return;
}
- sve_alloc(current);
+ sve_alloc(current, false);
sme_alloc(current);
if (!current->thread.sve_state || !current->thread.za_state) {
force_sig(SIGKILL);
@@ -1460,17 +1462,6 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
fpsimd_bind_task_to_cpu();
}
- /*
- * If SVE was not already active initialise the SVE registers,
- * any non-shared state between the streaming and regular SVE
- * registers is architecturally guaranteed to be zeroed when
- * we enter streaming mode. We do not need to initialize ZA
- * since ZA must be disabled at this point and enabling ZA is
- * architecturally defined to zero ZA.
- */
- if (system_supports_sve() && !test_thread_flag(TIF_SVE))
- sve_init_regs();
-
put_cpu_fpsimd_context();
}
diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c
index 6c3855e69395..17bff6e399e4 100644
--- a/arch/arm64/kernel/pi/kaslr_early.c
+++ b/arch/arm64/kernel/pi/kaslr_early.c
@@ -94,11 +94,9 @@ asmlinkage u64 kaslr_early_init(void *fdt)
seed = get_kaslr_seed(fdt);
if (!seed) {
-#ifdef CONFIG_ARCH_RANDOM
- if (!__early_cpu_has_rndr() ||
- !__arm64_rndr((unsigned long *)&seed))
-#endif
- return 0;
+ if (!__early_cpu_has_rndr() ||
+ !__arm64_rndr((unsigned long *)&seed))
+ return 0;
}
/*
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 21da83187a60..eb7c08dfb834 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -882,7 +882,7 @@ static int sve_set_common(struct task_struct *target,
* state and ensure there's storage.
*/
if (target->thread.svcr != old_svcr)
- sve_alloc(target);
+ sve_alloc(target, true);
}
/* Registers: FPSIMD-only case */
@@ -912,7 +912,7 @@ static int sve_set_common(struct task_struct *target,
goto out;
}
- sve_alloc(target);
+ sve_alloc(target, true);
if (!target->thread.sve_state) {
ret = -ENOMEM;
clear_tsk_thread_flag(target, TIF_SVE);
@@ -1082,7 +1082,7 @@ static int za_set(struct task_struct *target,
/* Ensure there is some SVE storage for streaming mode */
if (!target->thread.sve_state) {
- sve_alloc(target);
+ sve_alloc(target, false);
if (!target->thread.sve_state) {
clear_thread_flag(TIF_SME);
ret = -ENOMEM;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 3e6d0352d7d3..9ad911f1647c 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -91,7 +91,7 @@ static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
* not taken into account. This limit is not a guarantee and is
* NOT ABI.
*/
-#define SIGFRAME_MAXSZ SZ_64K
+#define SIGFRAME_MAXSZ SZ_256K
static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
unsigned long *offset, size_t size, bool extend)
@@ -310,7 +310,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
fpsimd_flush_task_state(current);
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
- sve_alloc(current);
+ sve_alloc(current, true);
if (!current->thread.sve_state) {
clear_thread_flag(TIF_SVE);
return -ENOMEM;
@@ -926,6 +926,16 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* Signal handlers are invoked with ZA and streaming mode disabled */
if (system_supports_sme()) {
+ /*
+ * If we were in streaming mode the saved register
+ * state was SVE but we will exit SM and use the
+ * FPSIMD register state - flush the saved FPSIMD
+ * register state in case it gets loaded.
+ */
+ if (current->thread.svcr & SVCR_SM_MASK)
+ memset(&current->thread.uw.fpsimd_state, 0,
+ sizeof(current->thread.uw.fpsimd_state));
+
current->thread.svcr &= ~(SVCR_ZA_MASK |
SVCR_SM_MASK);
sme_smstop();
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 62ed361a4376..ffc5d76cf695 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -1078,14 +1078,6 @@ bool smp_crash_stop_failed(void)
}
#endif
-/*
- * not supported here
- */
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
static bool have_cpu_die(void)
{
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 869ffc4d4484..ad2bfc794257 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -296,12 +296,25 @@ core_initcall(init_amu_fie);
static void cpu_read_corecnt(void *val)
{
+ /*
+ * A value of 0 can be returned if the current CPU does not support AMUs
+ * or if the counter is disabled for this CPU. A return value of 0 at
+ * counter read is properly handled as an error case by the users of the
+ * counter.
+ */
*(u64 *)val = read_corecnt();
}
static void cpu_read_constcnt(void *val)
{
- *(u64 *)val = read_constcnt();
+ /*
+ * Return 0 if the current CPU is affected by erratum 2457168. A value
+ * of 0 is also returned if the current CPU does not support AMUs or if
+ * the counter is disabled. A return value of 0 at counter read is
+ * properly handled as an error case by the users of the counter.
+ */
+ *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ?
+ 0UL : read_constcnt();
}
static inline
@@ -328,7 +341,22 @@ int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
*/
bool cpc_ffh_supported(void)
{
- return freq_counters_valid(get_cpu_with_amu_feat());
+ int cpu = get_cpu_with_amu_feat();
+
+ /*
+ * FFH is considered supported if there is at least one present CPU that
+ * supports AMUs. Using FFH to read core and reference counters for CPUs
+ * that do not support AMUs, have counters disabled or that are affected
+ * by errata, will result in a return value of 0.
+ *
+ * This is done to allow any enabled and valid counters to be read
+ * through FFH, knowing that potentially returning 0 as counter value is
+ * properly handled by the users of these counters.
+ */
+ if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
+ return false;
+
+ return true;
}
int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 986cee6fbc7f..2ff0ef62abad 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -757,8 +757,7 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
if (likely(!vcpu_mode_is_32bit(vcpu)))
return false;
- return !system_supports_32bit_el0() ||
- static_branch_unlikely(&arm64_mismatched_32bit_el0);
+ return !kvm_supports_32bit_el0();
}
/**
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 8c607199cad1..f802a3b3f8db 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -242,7 +242,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
case PSR_AA32_MODE_USR:
- if (!system_supports_32bit_el0())
+ if (!kvm_supports_32bit_el0())
return -EINVAL;
break;
case PSR_AA32_MODE_FIQ:
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index ed5d222e2826..b5c5119c7396 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -4,7 +4,12 @@
#
asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
-ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
+
+# Tracepoint and MMIO logging symbols should not be visible at nVHE KVM as
+# there is no way to execute them and any such MMIO access from nVHE KVM
+# will explode instantly (Words of Marc Zyngier). So introduce a generic flag
+# __DISABLE_TRACE_MMIO__ to disable MMIO tracing for nVHE KVM.
+ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__
hostprogs := gen-hyprel
HOST_EXTRACFLAGS += -I$(objtree)/include
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 87f1cd0df36e..c9a13e487187 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -993,7 +993,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
* THP doesn't start to split while we are adjusting the
* refcounts.
*
- * We are sure this doesn't happen, because mmu_notifier_retry
+ * We are sure this doesn't happen, because mmu_invalidate_retry
* was successful and we are holding the mmu_lock, so if this
* THP is trying to split, it will be blocked in the mmu
* notifier before touching any of the pages, specifically
@@ -1188,9 +1188,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return ret;
}
- mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ mmu_seq = vcpu->kvm->mmu_invalidate_seq;
/*
- * Ensure the read of mmu_notifier_seq happens before we call
+ * Ensure the read of mmu_invalidate_seq happens before we call
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
* the page we just got a reference to gets unmapped before we have a
* chance to grab the mmu_lock, which ensure that if the page gets
@@ -1246,7 +1246,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
else
write_lock(&kvm->mmu_lock);
pgt = vcpu->arch.hw_mmu->pgt;
- if (mmu_notifier_retry(kvm, mmu_seq))
+ if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;
/*
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c059b259aea6..3234f50b8c4b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -652,7 +652,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
*/
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
- if (!system_supports_32bit_el0())
+ if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, r->reg) = val;
}
@@ -701,7 +701,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK;
- if (!system_supports_32bit_el0())
+ if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index cdf3ffa0c223..c33f1fad2745 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -608,6 +608,10 @@ retry:
return 0;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return 0;
+
if (fault & VM_FAULT_RETRY) {
mm_flags |= FAULT_FLAG_TRIED;
goto retry;
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index fc4f710e9820..5f9379b3c8c8 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -76,17 +76,10 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache);
void flush_dcache_page(struct page *page)
{
/*
- * Only the head page's flags of HugeTLB can be cleared since the tail
- * vmemmap pages associated with each HugeTLB page are mapped with
- * read-only when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is enabled (more
- * details can refer to vmemmap_remap_pte()). Although
- * __sync_icache_dcache() only set PG_dcache_clean flag on the head
- * page struct, there is more than one page struct with PG_dcache_clean
- * associated with the HugeTLB page since the head vmemmap page frame
- * is reused (more details can refer to the comments above
- * page_fixed_fake_head()).
+ * HugeTLB pages are always fully mapped and only head page will be
+ * set PG_dcache_clean (see comments in __sync_icache_dcache()).
*/
- if (hugetlb_optimize_vmemmap_enabled() && PageHuge(page))
+ if (PageHuge(page))
page = compound_head(page);
if (test_bit(PG_dcache_clean, &page->flags))
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 5307ffdefb8d..0795028f017c 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -241,6 +241,13 @@ static void clear_flush(struct mm_struct *mm,
flush_tlb_range(&vma, saddr, addr);
}
+static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
+{
+ VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
+
+ return page_folio(pfn_to_page(swp_offset(entry)));
+}
+
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
@@ -250,11 +257,16 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
unsigned long pfn, dpfn;
pgprot_t hugeprot;
- /*
- * Code needs to be expanded to handle huge swap and migration
- * entries. Needed for HUGETLB and MEMORY_FAILURE.
- */
- WARN_ON(!pte_present(pte));
+ if (!pte_present(pte)) {
+ struct folio *folio;
+
+ folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte));
+ ncontig = num_contig_ptes(folio_size(folio), &pgsize);
+
+ for (i = 0; i < ncontig; i++, ptep++)
+ set_pte_at(mm, addr, ptep, pte);
+ return;
+ }
if (!pte_cont(pte)) {
set_pte_at(mm, addr, ptep, pte);
@@ -272,18 +284,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
}
-void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
-{
- int i, ncontig;
- size_t pgsize;
-
- ncontig = num_contig_ptes(sz, &pgsize);
-
- for (i = 0; i < ncontig; i++, ptep++)
- set_pte(ptep, pte);
-}
-
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
@@ -371,6 +371,28 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return NULL;
}
+unsigned long hugetlb_mask_last_page(struct hstate *h)
+{
+ unsigned long hp_size = huge_page_size(h);
+
+ switch (hp_size) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ return PGDIR_SIZE - PUD_SIZE;
+#endif
+ case CONT_PMD_SIZE:
+ return PUD_SIZE - CONT_PMD_SIZE;
+ case PMD_SIZE:
+ return PUD_SIZE - PMD_SIZE;
+ case CONT_PTE_SIZE:
+ return PMD_SIZE - CONT_PTE_SIZE;
+ default:
+ break;
+ }
+
+ return 0UL;
+}
+
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
size_t pagesize = 1UL << shift;
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 78e9490f748d..8f5b7ce857ed 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -13,6 +13,27 @@
#include <asm/cpufeature.h>
#include <asm/page.h>
+static pgprot_t protection_map[16] __ro_after_init = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_READONLY,
+ [VM_WRITE | VM_READ] = PAGE_READONLY,
+ /* PAGE_EXECONLY if Enhanced PAN */
+ [VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ /* PAGE_EXECONLY if Enhanced PAN */
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+
/*
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index db7c4e6ae57b..e7ad44585f40 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -642,24 +642,6 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
vm_area_add_early(vma);
}
-static int __init parse_rodata(char *arg)
-{
- int ret = strtobool(arg, &rodata_enabled);
- if (!ret) {
- rodata_full = false;
- return 0;
- }
-
- /* permit 'full' in addition to boolean options */
- if (strcmp(arg, "full"))
- return -EINVAL;
-
- rodata_enabled = true;
- rodata_full = true;
- return 0;
-}
-early_param("rodata", parse_rodata);
-
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __init map_entry_trampoline(void)
{
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 7ca8779ae34f..389623ae5a91 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1496,7 +1496,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
- ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
+ ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
goto out_off;
@@ -1601,7 +1601,7 @@ skip_init_ctx:
ctx.offset[i] *= AARCH64_INSN_SIZE;
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off:
- kfree(ctx.offset);
+ kvfree(ctx.offset);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
@@ -1643,7 +1643,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
int args_off, int retval_off, int run_ctx_off,
bool save_ret)
{
- u32 *branch;
+ __le32 *branch;
u64 enter_prog;
u64 exit_prog;
struct bpf_prog *p = l->link.prog;
@@ -1698,7 +1698,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
if (ctx->image) {
int offset = &ctx->image[ctx->idx] - branch;
- *branch = A64_CBZ(1, A64_R(0), offset);
+ *branch = cpu_to_le32(A64_CBZ(1, A64_R(0), offset));
}
/* arg1: prog */
@@ -1713,7 +1713,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
int args_off, int retval_off, int run_ctx_off,
- u32 **branches)
+ __le32 **branches)
{
int i;
@@ -1784,7 +1784,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
bool save_ret;
- u32 **branches = NULL;
+ __le32 **branches = NULL;
/* trampoline stack layout:
* [ parent ip ]
@@ -1892,7 +1892,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
flags & BPF_TRAMP_F_RET_FENTRY_RET);
if (fmod_ret->nr_links) {
- branches = kcalloc(fmod_ret->nr_links, sizeof(u32 *),
+ branches = kcalloc(fmod_ret->nr_links, sizeof(__le32 *),
GFP_KERNEL);
if (!branches)
return -ENOMEM;
@@ -1916,7 +1916,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
/* update the branches saved in invoke_bpf_mod_ret with cbnz */
for (i = 0; i < fmod_ret->nr_links && ctx->image != NULL; i++) {
int offset = &ctx->image[ctx->idx] - branches[i];
- *branches[i] = A64_CBNZ(1, A64_R(10), offset);
+ *branches[i] = cpu_to_le32(A64_CBNZ(1, A64_R(10), offset));
}
for (i = 0; i < fexit->nr_links; i++)
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 779653771507..63b2484ce6c3 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -67,6 +67,7 @@ WORKAROUND_1902691
WORKAROUND_2038923
WORKAROUND_2064142
WORKAROUND_2077057
+WORKAROUND_2457168
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h
index ebc765b1f78b..42724c630d30 100644
--- a/arch/csky/include/asm/pci.h
+++ b/arch/csky/include/asm/pci.h
@@ -9,26 +9,7 @@
#include <asm/io.h>
-#define PCIBIOS_MIN_IO 0
-#define PCIBIOS_MIN_MEM 0
-
-/* C-SKY shim does not initialize PCI bus */
-#define pcibios_assign_all_busses() 1
-
-extern int isa_dma_bridge_buggy;
-
-#ifdef CONFIG_PCI
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- /* no legacy IRQ on csky */
- return -ENODEV;
-}
-
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- /* always show the domain in /proc */
- return 1;
-}
-#endif /* CONFIG_PCI */
+/* Generic PCI */
+#include <asm-generic/pci.h>
#endif /* __ASM_CSKY_PCI_H */
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bbbd0698b397..7d57e5da0914 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -44,7 +44,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd_t *ret;
pgd_t *init;
- ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *) __get_free_page(GFP_KERNEL);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long *)ret);
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index bbe245117777..c3d9b92cbe61 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -18,12 +18,10 @@
/*
* C-SKY is two-level paging structure:
*/
-#define PGD_ORDER 0
-#define PTE_ORDER 0
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
#define PTRS_PER_PMD 1
-#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
@@ -77,24 +75,6 @@
#define MAX_SWAPFILES_CHECK() \
BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READ
-#define __P010 PAGE_READ
-#define __P011 PAGE_READ
-#define __P100 PAGE_READ
-#define __P101 PAGE_READ
-#define __P110 PAGE_READ
-#define __P111 PAGE_READ
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READ
-#define __S010 PAGE_WRITE
-#define __S011 PAGE_WRITE
-#define __S100 PAGE_READ
-#define __S101 PAGE_READ
-#define __S110 PAGE_WRITE
-#define __S111 PAGE_WRITE
-
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index 6bb38bc2f39b..4b605aa2e1d6 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -243,11 +243,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
{
}
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
void csky_start_secondary(void)
{
struct mm_struct *mm = &init_mm;
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index 7215a46b6b8e..e15f736cca4b 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -285,6 +285,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index bf2004aa811a..bde7cabd23df 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -197,3 +197,23 @@ void __init fixaddr_init(void)
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READ,
+ [VM_WRITE] = PAGE_READ,
+ [VM_WRITE | VM_READ] = PAGE_READ,
+ [VM_EXEC] = PAGE_READ,
+ [VM_EXEC | VM_READ] = PAGE_READ,
+ [VM_EXEC | VM_WRITE] = PAGE_READ,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READ,
+ [VM_SHARED | VM_WRITE] = PAGE_WRITE,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE,
+ [VM_SHARED | VM_EXEC] = PAGE_READ,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 75d6ba3643b8..160d8f37fa1a 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr)
* be atomic, particularly for things like slab_lock and slab_unlock.
*
*/
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
test_and_clear_bit(nr, addr);
}
-static inline void __set_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
test_and_set_bit(nr, addr);
}
-static inline void __change_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
test_and_change_bit(nr, addr);
}
/* Apparently, at least some of these are allowed to be non-atomic */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_clear_bit(nr, addr);
}
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
-static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_change_bit(nr, addr);
}
-static inline int __test_bit(int nr, const volatile unsigned long *addr)
+static __always_inline bool
+arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
int retval;
@@ -172,7 +179,20 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr)
return retval;
}
-#define test_bit(nr, addr) __test_bit(nr, addr)
+static __always_inline bool
+arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ int retval;
+
+ asm volatile(
+ "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
+ : "=&r" (retval)
+ : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
+ : "p0", "memory"
+ );
+
+ return retval;
+}
/*
* ffz - find first zero in word.
@@ -271,6 +291,7 @@ static inline unsigned long __fls(unsigned long word)
}
#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index 0610724d6a28..f7048c18b6f9 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -126,33 +126,6 @@ extern unsigned long _dflt_cache_att;
*/
#define CACHEDEF (CACHE_DEFAULT << 6)
-/* Private (copy-on-write) page protections. */
-#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
-#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
-#define __P010 __P000 /* Write-only copy-on-write */
-#define __P011 __P001 /* Read/Write copy-on-write */
-#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_EXECUTE | CACHEDEF)
-#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
- _PAGE_READ | CACHEDEF)
-#define __P110 __P100 /* Write/execute copy-on-write */
-#define __P111 __P101 /* Read/Write/Execute, copy-on-write */
-
-/* Shared page protections. */
-#define __S000 __P000
-#define __S001 __P001
-#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_WRITE | CACHEDEF)
-#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
- _PAGE_WRITE | CACHEDEF)
-#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_EXECUTE | CACHEDEF)
-#define __S101 __P101
-#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
-#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
- _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
/* HUGETLB not working currently */
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 619c56420aa0..4ba93e59370c 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -240,11 +240,6 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
send_ipi(mask, IPI_CALL_FUNC);
}
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
void smp_start_cpus(void)
{
int i;
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index 3167a3b5c97b..146115c9de61 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -234,3 +234,45 @@ void __init setup_arch_memory(void)
* which is called by start_kernel() later on in the process
*/
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ CACHEDEF),
+ [VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | CACHEDEF),
+ [VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ CACHEDEF),
+ [VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | CACHEDEF),
+ [VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | CACHEDEF),
+ [VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_READ |
+ CACHEDEF),
+ [VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | CACHEDEF),
+ [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_READ |
+ CACHEDEF),
+ [VM_SHARED] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ CACHEDEF),
+ [VM_SHARED | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | CACHEDEF),
+ [VM_SHARED | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_WRITE | CACHEDEF),
+ [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | _PAGE_WRITE |
+ CACHEDEF),
+ [VM_SHARED | VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | CACHEDEF),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_READ |
+ CACHEDEF),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_WRITE |
+ CACHEDEF),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | _PAGE_EXECUTE |
+ _PAGE_WRITE | CACHEDEF)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index 4fac4b9eb316..f73c7cbfe326 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -96,6 +96,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
/* The most common case -- we are done. */
if (likely(!(fault & VM_FAULT_ERROR))) {
if (fault & VM_FAULT_RETRY) {
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index cb93769a9f2a..26ac8ea15a9e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -39,7 +39,6 @@ config IA64
select HAVE_FUNCTION_DESCRIPTORS
select HAVE_VIRT_CPU_ACCOUNTING
select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE
- select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 577be93c0818..1accb7842f58 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -53,7 +53,7 @@ set_bit (int nr, volatile void *addr)
}
/**
- * __set_bit - Set a bit in memory
+ * arch___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -61,8 +61,8 @@ set_bit (int nr, volatile void *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void
-__set_bit (int nr, volatile void *addr)
+static __always_inline void
+arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
}
@@ -135,7 +135,7 @@ __clear_bit_unlock(int nr, void *addr)
}
/**
- * __clear_bit - Clears a bit in memory (non-atomic version)
+ * arch___clear_bit - Clears a bit in memory (non-atomic version)
* @nr: the bit to clear
* @addr: the address to start counting from
*
@@ -143,8 +143,8 @@ __clear_bit_unlock(int nr, void *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void
-__clear_bit (int nr, volatile void *addr)
+static __always_inline void
+arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
}
@@ -175,7 +175,7 @@ change_bit (int nr, volatile void *addr)
}
/**
- * __change_bit - Toggle a bit in memory
+ * arch___change_bit - Toggle a bit in memory
* @nr: the bit to toggle
* @addr: the address to start counting from
*
@@ -183,8 +183,8 @@ change_bit (int nr, volatile void *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void
-__change_bit (int nr, volatile void *addr)
+static __always_inline void
+arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
}
@@ -224,7 +224,7 @@ test_and_set_bit (int nr, volatile void *addr)
#define test_and_set_bit_lock test_and_set_bit
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
@@ -232,8 +232,8 @@ test_and_set_bit (int nr, volatile void *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int
-__test_and_set_bit (int nr, volatile void *addr)
+static __always_inline bool
+arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
@@ -269,7 +269,7 @@ test_and_clear_bit (int nr, volatile void *addr)
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
@@ -277,8 +277,8 @@ test_and_clear_bit (int nr, volatile void *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int
-__test_and_clear_bit(int nr, volatile void * addr)
+static __always_inline bool
+arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
@@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr)
}
/**
- * __test_and_change_bit - Change a bit and return its old value
+ * arch___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
*/
-static __inline__ int
-__test_and_change_bit (int nr, void *addr)
+static __always_inline bool
+arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 old, bit = (1 << (nr & 31));
__u32 *m = (__u32 *) addr + (nr >> 5);
@@ -331,11 +331,8 @@ __test_and_change_bit (int nr, void *addr)
return (old & bit) != 0;
}
-static __inline__ int
-test_bit (int nr, const volatile void *addr)
-{
- return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
/**
* ffz - find the first zero bit in a long word
@@ -443,6 +440,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
#ifdef __KERNEL__
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
+
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/ia64/include/asm/dma.h b/arch/ia64/include/asm/dma.h
index 59625e9c1f9c..eaed2626ffda 100644
--- a/arch/ia64/include/asm/dma.h
+++ b/arch/ia64/include/asm/dma.h
@@ -12,8 +12,6 @@
extern unsigned long MAX_DMA_ADDRESS;
-extern int isa_dma_bridge_buggy;
-
#define free_dma(x)
#endif /* _ASM_IA64_DMA_H */
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 6d93b923b379..ce66dfc0e719 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -96,14 +96,6 @@ extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int valid_phys_addr_range (phys_addr_t addr, size_t count); /* efi.c */
extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
-/*
- * The following two macros are deprecated and scheduled for removal.
- * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
- */
-#define bus_to_virt phys_to_virt
-#define virt_to_bus virt_to_phys
-#define page_to_bus page_to_phys
-
# endif /* KERNEL */
/*
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
index 87a0d5bc11ef..06257e355d00 100644
--- a/arch/ia64/include/asm/mmu_context.h
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -124,9 +124,12 @@ reload_context (nv_mm_context_t context)
{
unsigned long rid;
unsigned long rid_incr = 0;
- unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
+ unsigned long rr0, rr1, rr2, rr3, rr4;
+#ifdef CONFIG_HUGETLB_PAGE
+ unsigned long old_rr4;
old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
+#endif
rid = context << 3; /* make space for encoding the region number */
rid_incr = 1 << 8;
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 8c163d1d0189..fa8f545c24c9 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -63,10 +63,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
return (pci_domain_nr(bus) != 0);
}
-#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
-}
-
#endif /* _ASM_IA64_PCI_H */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 7aa8f2330fb1..6925e28ae61d 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -161,24 +161,6 @@
* attempts to write to the page.
*/
/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
-#define __P011 PAGE_READONLY /* ditto */
-#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
-#define __S011 PAGE_SHARED
-#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
#if CONFIG_PGTABLE_LEVELS == 4
#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 7cbce290f4e5..757c2f6d8d4b 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -538,7 +538,7 @@ ia64_get_irr(unsigned int vector)
{
unsigned int reg = vector / 64;
unsigned int bit = vector % 64;
- u64 irr;
+ unsigned long irr;
switch (reg) {
case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index 2c2f3cfeaa77..ca2e02685343 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -33,24 +33,24 @@ extern void ia64_xchg_called_with_bad_pointer(void);
\
switch (size) { \
case 1: \
- __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
+ __xchg_result = ia64_xchg1((__u8 __force *)ptr, x); \
break; \
\
case 2: \
- __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
+ __xchg_result = ia64_xchg2((__u16 __force *)ptr, x); \
break; \
\
case 4: \
- __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
+ __xchg_result = ia64_xchg4((__u32 __force *)ptr, x); \
break; \
\
case 8: \
- __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
+ __xchg_result = ia64_xchg8((__u64 __force *)ptr, x); \
break; \
default: \
ia64_xchg_called_with_bad_pointer(); \
} \
- __xchg_result; \
+ (__typeof__ (*(ptr)) __force) __xchg_result; \
})
#ifndef __KERNEL__
@@ -76,42 +76,42 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
\
switch (size) { \
case 1: \
- _o_ = (__u8) (long) (old); \
+ _o_ = (__u8) (long __force) (old); \
break; \
case 2: \
- _o_ = (__u16) (long) (old); \
+ _o_ = (__u16) (long __force) (old); \
break; \
case 4: \
- _o_ = (__u32) (long) (old); \
+ _o_ = (__u32) (long __force) (old); \
break; \
case 8: \
- _o_ = (__u64) (long) (old); \
+ _o_ = (__u64) (long __force) (old); \
break; \
default: \
break; \
} \
switch (size) { \
case 1: \
- _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
+ _r_ = ia64_cmpxchg1_##sem((__u8 __force *) ptr, new, _o_); \
break; \
\
case 2: \
- _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
+ _r_ = ia64_cmpxchg2_##sem((__u16 __force *) ptr, new, _o_); \
break; \
\
case 4: \
- _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
+ _r_ = ia64_cmpxchg4_##sem((__u32 __force *) ptr, new, _o_); \
break; \
\
case 8: \
- _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
+ _r_ = ia64_cmpxchg8_##sem((__u64 __force *) ptr, new, _o_); \
break; \
\
default: \
_r_ = ia64_cmpxchg_called_with_bad_pointer(); \
break; \
} \
- (__typeof__(old)) _r_; \
+ (__typeof__(old) __force) _r_; \
})
#define cmpxchg_acq(ptr, o, n) \
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 7b7b64eb3129..e2cc59db86bc 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -333,9 +333,3 @@ smp_send_stop (void)
{
send_IPI_allbutself(IPI_CPU_STOP);
}
-
-int
-setup_profiling_timer (unsigned int multiplier)
-{
- return -EINVAL;
-}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 07379d1a227f..ef78c2d66cdd 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -139,6 +139,10 @@ retry:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We ran out of memory, or some other thing happened
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 855d949d81df..fc4e4217e87f 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -273,7 +273,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
- gate_vma.vm_page_prot = __P101;
+ gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
return 0;
}
@@ -490,3 +490,29 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_READONLY,
+ [VM_WRITE | VM_READ] = PAGE_READONLY,
+ [VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_X_RX),
+ [VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RX),
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_X_RX),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RX),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RWX),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RWX)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index fc2465892a60..26aeb1408e56 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -2,7 +2,9 @@
config LOONGARCH
bool
default y
+ select ACPI
select ACPI_GENERIC_GSI if ACPI
+ select ACPI_MCFG if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_BINFMT_ELF_STATE
select ARCH_ENABLE_MEMORY_HOTPLUG
@@ -40,6 +42,7 @@ config LOONGARCH
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SPARSEMEM_ENABLE
+ select ARCH_STACKWALK
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_HUGETLBFS
@@ -51,6 +54,7 @@ config LOONGARCH
select ARCH_WANTS_NO_INSTR
select BUILDTIME_TABLE_SORT
select COMMON_CLK
+ select EFI
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
@@ -86,6 +90,7 @@ config LOONGARCH
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
+ select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
@@ -95,20 +100,28 @@ config LOONGARCH
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
+ select MMU_GATHER_MERGE_VMAS if MMU
select MODULES_USE_ELF_RELA if MODULES
select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK
select OF
select OF_EARLY_FLATTREE
+ select PCI
+ select PCI_DOMAINS_GENERIC
+ select PCI_ECAM if ACPI
+ select PCI_LOONGSON
+ select PCI_MSI_ARCH_FALLBACKS
+ select PCI_QUIRKS
select PERF_USE_VMALLOC
select RTC_LIB
+ select SMP
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select SWIOTLB
select TRACE_IRQFLAGS_SUPPORT
select USE_PERCPU_NUMA_NODE_ID
+ select USER_STACKTRACE_SUPPORT
select ZONE_DMA32
- select MMU_GATHER_MERGE_VMAS if MMU
config 32BIT
bool
@@ -141,6 +154,10 @@ config LOCKDEP_SUPPORT
bool
default y
+config STACKTRACE_SUPPORT
+ bool
+ default y
+
# MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the
# MIPS Loongson code, to preserve Loongson-specific code paths in drivers that
# are shared between architectures, and specifically expecting the symbols.
@@ -407,7 +424,7 @@ config ARCH_SPARSEMEM_ENABLE
Say Y to support efficient handling of sparse physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access)
or have huge holes in the physical address space for other reasons.
- See <file:Documentation/vm/numa.rst> for more.
+ See <file:Documentation/mm/numa.rst> for more.
config ARCH_ENABLE_THP_MIGRATION
def_bool y
diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug
index e69de29bb2d1..8d36aab53008 100644
--- a/arch/loongarch/Kconfig.debug
+++ b/arch/loongarch/Kconfig.debug
@@ -0,0 +1,29 @@
+choice
+ prompt "Choose kernel unwinder"
+ default UNWINDER_PROLOGUE if KALLSYMS
+ help
+ This determines which method will be used for unwinding kernel stack
+ traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
+ lockdep, and more.
+
+config UNWINDER_GUESS
+ bool "Guess unwinder"
+ help
+ This option enables the "guess" unwinder for unwinding kernel stack
+ traces. It scans the stack and reports every kernel text address it
+ finds. Some of the addresses it reports may be incorrect.
+
+ While this option often produces false positives, it can still be
+ useful in many cases.
+
+config UNWINDER_PROLOGUE
+ bool "Prologue unwinder"
+ depends on KALLSYMS
+ help
+ This option enables the "prologue" unwinder for unwinding kernel stack
+ traces. It unwind the stack frame based on prologue code analyze. Symbol
+ information is needed, at least the address and length of each function.
+ Some of the addresses it reports may be incorrect (but better than the
+ Guess unwinder).
+
+endchoice
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index fbe4277e6404..ec3de6191276 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -47,6 +47,8 @@ cflags-y += $(call cc-option, -mno-check-zero-division)
load-y = 0x9000000000200000
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
+drivers-$(CONFIG_PCI) += arch/loongarch/pci/
+
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index eb9149786b6b..3712552e18d3 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -278,6 +278,8 @@ CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_BPF=m
CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BT=m
@@ -289,6 +291,7 @@ CONFIG_MAC80211=m
CONFIG_RFKILL=m
CONFIG_RFKILL_INPUT=y
CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
CONFIG_CEPH_LIB=m
CONFIG_PCIEPORTBUS=y
CONFIG_HOTPLUG_PCI_PCIE=y
@@ -308,6 +311,8 @@ CONFIG_RAPIDIO_MPORT_CDEV=m
CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_COMPRESS=y
+CONFIG_FW_LOADER_COMPRESS_ZSTD=y
CONFIG_MTD=m
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=m
@@ -328,8 +333,19 @@ CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_BLK_DEV_NVME=y
+CONFIG_NVME_MULTIPATH=y
+CONFIG_NVME_RDMA=m
+CONFIG_NVME_FC=m
+CONFIG_NVME_TCP=m
+CONFIG_NVME_TARGET=m
+CONFIG_NVME_TARGET_PASSTHRU=y
+CONFIG_NVME_TARGET_LOOP=m
+CONFIG_NVME_TARGET_RDMA=m
+CONFIG_NVME_TARGET_FC=m
+CONFIG_NVME_TARGET_TCP=m
CONFIG_EEPROM_AT24=m
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
@@ -359,6 +375,7 @@ CONFIG_SCSI_QLA_FC=m
CONFIG_TCM_QLA2XXX=m
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
+CONFIG_SCSI_VIRTIO=m
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
@@ -403,6 +420,7 @@ CONFIG_VXLAN=y
CONFIG_RIONET=m
CONFIG_TUN=m
CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
# CONFIG_NET_VENDOR_AGERE is not set
@@ -527,10 +545,12 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_PRINTER=m
+CONFIG_VIRTIO_CONSOLE=y
CONFIG_IPMI_HANDLER=m
CONFIG_IPMI_DEVICE_INTERFACE=m
CONFIG_IPMI_SI=m
CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
CONFIG_I2C_GPIO=y
@@ -568,6 +588,8 @@ CONFIG_DRM_AMDGPU_SI=y
CONFIG_DRM_AMDGPU_CIK=y
CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_AST=y
+CONFIG_DRM_QXL=m
+CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
@@ -637,7 +659,16 @@ CONFIG_UIO=m
CONFIG_UIO_PDRV_GENIRQ=m
CONFIG_UIO_DMEM_GENIRQ=m
CONFIG_UIO_PCI_GENERIC=m
-# CONFIG_VIRTIO_MENU is not set
+CONFIG_VFIO=m
+CONFIG_VFIO_PCI=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=m
+CONFIG_VIRTIO_INPUT=m
+CONFIG_VIRTIO_MMIO=m
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_SCSI=m
+CONFIG_VHOST_VSOCK=m
CONFIG_COMEDI=m
CONFIG_COMEDI_PCI_DRIVERS=m
CONFIG_COMEDI_8255_PCI=m
@@ -762,6 +793,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index b91e0733b2e5..d342935e5a72 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -109,4 +109,20 @@ extern unsigned long vm_map_base;
*/
#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
+/*
+ * On LoongArch, I/O ports mappring is following:
+ *
+ * | .... |
+ * |-----------------------|
+ * | pci io ports(16K~32M) |
+ * |-----------------------|
+ * | isa io ports(0 ~16K) |
+ * PCI_IOBASE ->|-----------------------|
+ * | .... |
+ */
+#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
+#define PCI_IOSIZE SZ_32M
+#define ISA_IOSIZE SZ_16K
+#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+
#endif /* _ASM_ADDRSPACE_H */
diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h
index 9b8d49d9e61b..e02ac4af7f6e 100644
--- a/arch/loongarch/include/asm/bootinfo.h
+++ b/arch/loongarch/include/asm/bootinfo.h
@@ -28,10 +28,10 @@ struct loongson_board_info {
struct loongson_system_configuration {
int nr_cpus;
int nr_nodes;
- int nr_io_pics;
int boot_cpu_id;
int cores_per_node;
int cores_per_package;
+ unsigned long cores_io_master;
const char *cpuname;
};
diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h
index 0a9b0fac1eee..ae19e33c7754 100644
--- a/arch/loongarch/include/asm/cmpxchg.h
+++ b/arch/loongarch/include/asm/cmpxchg.h
@@ -5,8 +5,9 @@
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
-#include <asm/barrier.h>
+#include <linux/bits.h>
#include <linux/build_bug.h>
+#include <asm/barrier.h>
#define __xchg_asm(amswap_db, m, val) \
({ \
@@ -21,10 +22,53 @@
__ret; \
})
+static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
+ unsigned int size)
+{
+ unsigned int shift;
+ u32 old32, mask, temp;
+ volatile u32 *ptr32;
+
+ /* Mask value to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ val &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * exchange within the naturally aligned 4 byte integerthat includes
+ * it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ shift *= BITS_PER_BYTE;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+
+ asm volatile (
+ "1: ll.w %0, %3 \n"
+ " andn %1, %0, %z4 \n"
+ " or %1, %1, %z5 \n"
+ " sc.w %1, %2 \n"
+ " beqz %1, 1b \n"
+ : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
+ : "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
+ : "memory");
+
+ return (old32 & mask) >> shift;
+}
+
static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
int size)
{
switch (size) {
+ case 1:
+ case 2:
+ return __xchg_small(ptr, x, size);
+
case 4:
return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
@@ -67,10 +111,62 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
__ret; \
})
+static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
+ unsigned int new, unsigned int size)
+{
+ unsigned int shift;
+ u32 old32, mask, temp;
+ volatile u32 *ptr32;
+
+ /* Mask inputs to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ old &= mask;
+ new &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * compare & exchange within the naturally aligned 4 byte integer
+ * that includes it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ shift *= BITS_PER_BYTE;
+ old <<= shift;
+ new <<= shift;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+
+ asm volatile (
+ "1: ll.w %0, %3 \n"
+ " and %1, %0, %z4 \n"
+ " bne %1, %z5, 2f \n"
+ " andn %1, %0, %z4 \n"
+ " or %1, %1, %z6 \n"
+ " sc.w %1, %2 \n"
+ " beqz %1, 1b \n"
+ " b 3f \n"
+ "2: \n"
+ __WEAK_LLSC_MB
+ "3: \n"
+ : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
+ : "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
+ : "memory");
+
+ return (old32 & mask) >> shift;
+}
+
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
switch (size) {
+ case 1:
+ case 2:
+ return __cmpxchg_small(ptr, old, new, size);
+
case 4:
return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
(u32)old, new);
diff --git a/arch/loongarch/include/asm/dma.h b/arch/loongarch/include/asm/dma.h
new file mode 100644
index 000000000000..1a8866319fe2
--- /dev/null
+++ b/arch/loongarch/include/asm/dma.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __ASM_DMA_H
+#define __ASM_DMA_H
+
+#define MAX_DMA_ADDRESS PAGE_OFFSET
+#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
+
+#endif
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 575d1bb66ffb..7b07cbb3188c 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -23,12 +23,33 @@ enum reg1i20_op {
lu32id_op = 0x0b,
};
+enum reg1i21_op {
+ beqz_op = 0x10,
+ bnez_op = 0x11,
+};
+
enum reg2i12_op {
+ addiw_op = 0x0a,
+ addid_op = 0x0b,
lu52id_op = 0x0c,
+ ldb_op = 0xa0,
+ ldh_op = 0xa1,
+ ldw_op = 0xa2,
+ ldd_op = 0xa3,
+ stb_op = 0xa4,
+ sth_op = 0xa5,
+ stw_op = 0xa6,
+ std_op = 0xa7,
};
enum reg2i16_op {
jirl_op = 0x13,
+ beq_op = 0x16,
+ bne_op = 0x17,
+ blt_op = 0x18,
+ bge_op = 0x19,
+ bltu_op = 0x1a,
+ bgeu_op = 0x1b,
};
struct reg0i26_format {
@@ -110,6 +131,37 @@ enum loongarch_gpr {
LOONGARCH_GPR_MAX
};
+#define is_imm12_negative(val) is_imm_negative(val, 12)
+
+static inline bool is_imm_negative(unsigned long val, unsigned int bit)
+{
+ return val & (1UL << (bit - 1));
+}
+
+static inline bool is_branch_ins(union loongarch_instruction *ip)
+{
+ return ip->reg1i21_format.opcode >= beqz_op &&
+ ip->reg1i21_format.opcode <= bgeu_op;
+}
+
+static inline bool is_ra_save_ins(union loongarch_instruction *ip)
+{
+ /* st.d $ra, $sp, offset */
+ return ip->reg2i12_format.opcode == std_op &&
+ ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
+ ip->reg2i12_format.rd == LOONGARCH_GPR_RA &&
+ !is_imm12_negative(ip->reg2i12_format.immediate);
+}
+
+static inline bool is_stack_alloc_ins(union loongarch_instruction *ip)
+{
+ /* addi.d $sp, $sp, -imm */
+ return ip->reg2i12_format.opcode == addid_op &&
+ ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
+ ip->reg2i12_format.rd == LOONGARCH_GPR_SP &&
+ is_imm12_negative(ip->reg2i12_format.immediate);
+}
+
u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index 884599739b36..999944ea1cea 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -7,35 +7,16 @@
#define ARCH_HAS_IOREMAP_WC
-#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/addrspace.h>
-#include <asm/bug.h>
-#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/string.h>
/*
- * On LoongArch, I/O ports mappring is following:
- *
- * | .... |
- * |-----------------------|
- * | pci io ports(64K~32M) |
- * |-----------------------|
- * | isa io ports(0 ~16K) |
- * PCI_IOBASE ->|-----------------------|
- * | .... |
- */
-#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
-#define PCI_IOSIZE SZ_32M
-#define ISA_IOSIZE SZ_16K
-#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
-
-/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index 149b2123e7f4..d06d4542b634 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -81,9 +81,6 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
#define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE
#define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
-extern int find_pch_pic(u32 gsi);
-extern int eiointc_get_node(int id);
-
struct acpi_madt_lio_pic;
struct acpi_madt_eio_pic;
struct acpi_madt_ht_pic;
@@ -100,16 +97,8 @@ struct irq_domain *htvec_acpi_init(struct irq_domain *parent,
struct acpi_madt_ht_pic *acpi_htvec);
int pch_lpc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lpc_pic *acpi_pchlpc);
-#if IS_ENABLED(CONFIG_LOONGSON_PCH_MSI)
int pch_msi_acpi_init(struct irq_domain *parent,
struct acpi_madt_msi_pic *acpi_pchmsi);
-#else
-static inline int pch_msi_acpi_init(struct irq_domain *parent,
- struct acpi_madt_msi_pic *acpi_pchmsi)
-{
- return 0;
-}
-#endif
int pch_pic_acpi_init(struct irq_domain *parent,
struct acpi_madt_bio_pic *acpi_pchpic);
int find_pch_pic(u32 gsi);
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index dc47fc724fa1..53f284a96182 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -33,8 +33,6 @@
#include <linux/kernel.h>
#include <linux/pfn.h>
-#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
-
/*
* It's normally defined only for FLATMEM config but it's
* used in our early mem init code for all memory models.
@@ -97,7 +95,7 @@ static inline int pfn_valid(unsigned long pfn)
#endif
-#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
+#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
extern int __virt_addr_valid(volatile void *kaddr);
diff --git a/arch/loongarch/include/asm/pci.h b/arch/loongarch/include/asm/pci.h
new file mode 100644
index 000000000000..846909d7e831
--- /dev/null
+++ b/arch/loongarch/include/asm/pci.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_PCI_H
+#define _ASM_PCI_H
+
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#define PCIBIOS_MIN_IO 0x4000
+#define PCIBIOS_MIN_MEM 0x20000000
+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
+
+#define HAVE_PCI_MMAP
+#define pcibios_assign_all_busses() 0
+
+extern phys_addr_t mcfg_addr_init(int node);
+
+/* generic pci stuff */
+#include <asm-generic/pci.h>
+
+#endif /* _ASM_PCI_H */
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index e6569f18c6dd..0bd6b0110198 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -123,6 +123,10 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
int size)
{
switch (size) {
+ case 1:
+ case 2:
+ return __xchg_small((volatile void *)ptr, val, size);
+
case 4:
return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
@@ -204,9 +208,13 @@ do { \
#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index b0a57b25c131..4bfeb3c9c9ac 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -66,12 +66,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
pmd_t *pmd;
struct page *pg;
- pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+ pg = alloc_page(GFP_KERNEL_ACCOUNT);
if (!pg)
return NULL;
if (!pgtable_pmd_page_ctor(pg)) {
- __free_pages(pg, PMD_ORDER);
+ __free_page(pg);
return NULL;
}
@@ -90,7 +90,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
pud_t *pud;
- pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
+ pud = (pud_t *) __get_free_page(GFP_KERNEL);
if (pud)
pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
return pud;
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 3badd112d9ab..9ca147a29bab 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -83,25 +83,6 @@
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC)
#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC)
-
-#define __P000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
-#define __P001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
-#define __P010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
-#define __P011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
-#define __P100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-#define __P101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-#define __P110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-#define __P111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-
-#define __S000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
-#define __S001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
-#define __S010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
-#define __S011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
-#define __S100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-#define __S101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-#define __S110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
-#define __S111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
-
#ifndef __ASSEMBLY__
#define pgprot_noncached pgprot_noncached
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index d9e86cfa53e2..8ea57e2f0e04 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -21,41 +21,36 @@
#include <asm-generic/pgtable-nop4d.h>
#endif
-#define PGD_ORDER 0
-#define PUD_ORDER 0
-#define PMD_ORDER 0
-#define PTE_ORDER 0
-
#if CONFIG_PGTABLE_LEVELS == 2
-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#elif CONFIG_PGTABLE_LEVELS == 3
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
#elif CONFIG_PGTABLE_LEVELS == 4
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT + PGD_ORDER - 3))
+#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) >> 3)
+#define PTRS_PER_PGD (PAGE_SIZE >> 3)
#if CONFIG_PGTABLE_LEVELS > 3
-#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) >> 3)
+#define PTRS_PER_PUD (PAGE_SIZE >> 3)
#endif
#if CONFIG_PGTABLE_LEVELS > 2
-#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) >> 3)
+#define PTRS_PER_PMD (PAGE_SIZE >> 3)
#endif
-#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) >> 3)
+#define PTRS_PER_PTE (PAGE_SIZE >> 3)
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
@@ -64,7 +59,6 @@
#include <linux/mm_types.h>
#include <linux/mmzone.h>
#include <asm/fixmap.h>
-#include <asm/io.h>
struct mm_struct;
struct vm_area_struct;
@@ -150,7 +144,7 @@ static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
*p4d = p4dval;
}
-#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
+#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
#endif
@@ -193,7 +187,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
-#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
+#define pud_phys(pud) PHYSADDR(pud_val(pud))
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
#endif
@@ -226,7 +220,7 @@ static inline void pmd_clear(pmd_t *pmdp)
#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
-#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
+#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
diff --git a/arch/loongarch/include/asm/processor.h b/arch/loongarch/include/asm/processor.h
index 57ec45aa078e..1c4b4308378d 100644
--- a/arch/loongarch/include/asm/processor.h
+++ b/arch/loongarch/include/asm/processor.h
@@ -101,6 +101,10 @@ struct thread_struct {
unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
+ /* __schedule() return address / call frame address */
+ unsigned long sched_ra;
+ unsigned long sched_cfa;
+
/* CSR registers */
unsigned long csr_prmd;
unsigned long csr_crmd;
@@ -129,6 +133,9 @@ struct thread_struct {
struct loongarch_fpu fpu FPU_ALIGN;
};
+#define thread_saved_ra(tsk) (tsk->thread.sched_ra)
+#define thread_saved_fp(tsk) (tsk->thread.sched_cfa)
+
#define INIT_THREAD { \
/* \
* Main processor registers \
@@ -145,6 +152,8 @@ struct thread_struct {
.reg29 = 0, \
.reg30 = 0, \
.reg31 = 0, \
+ .sched_ra = 0, \
+ .sched_cfa = 0, \
.csr_crmd = 0, \
.csr_prmd = 0, \
.csr_euen = 0, \
diff --git a/arch/loongarch/include/asm/reboot.h b/arch/loongarch/include/asm/reboot.h
deleted file mode 100644
index 51151749d8f0..000000000000
--- a/arch/loongarch/include/asm/reboot.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _ASM_REBOOT_H
-#define _ASM_REBOOT_H
-
-extern void (*pm_restart)(void);
-
-#endif /* _ASM_REBOOT_H */
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
index 6b5c2a7aa706..f23adb15f418 100644
--- a/arch/loongarch/include/asm/stacktrace.h
+++ b/arch/loongarch/include/asm/stacktrace.h
@@ -10,6 +10,26 @@
#include <asm/loongarch.h>
#include <linux/stringify.h>
+enum stack_type {
+ STACK_TYPE_UNKNOWN,
+ STACK_TYPE_IRQ,
+ STACK_TYPE_TASK,
+};
+
+struct stack_info {
+ enum stack_type type;
+ unsigned long begin, end, next_sp;
+};
+
+struct stack_frame {
+ unsigned long fp;
+ unsigned long ra;
+};
+
+bool in_irq_stack(unsigned long stack, struct stack_info *info);
+bool in_task_stack(unsigned long stack, struct task_struct *task, struct stack_info *info);
+int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_info *info);
+
#define STR_LONG_L __stringify(LONG_L)
#define STR_LONG_S __stringify(LONG_S)
#define STR_LONGSIZE __stringify(LONGSIZE)
diff --git a/arch/loongarch/include/asm/switch_to.h b/arch/loongarch/include/asm/switch_to.h
index 2a8d04375574..43a5ab162d38 100644
--- a/arch/loongarch/include/asm/switch_to.h
+++ b/arch/loongarch/include/asm/switch_to.h
@@ -15,12 +15,15 @@ struct task_struct;
* @prev: The task previously executed.
* @next: The task to begin executing.
* @next_ti: task_thread_info(next).
+ * @sched_ra: __schedule return address.
+ * @sched_cfa: __schedule call frame address.
*
* This function is used whilst scheduling to save the context of prev & load
* the context of next. Returns prev.
*/
extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *next, struct thread_info *next_ti);
+ struct task_struct *next, struct thread_info *next_ti,
+ void *sched_ra, void *sched_cfa);
/*
* For newly created kernel threads switch_to() will return to
@@ -28,10 +31,11 @@ extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
* That is, everything following __switch_to() will be skipped for new threads.
* So everything that matters to new threads should be placed before __switch_to().
*/
-#define switch_to(prev, next, last) \
-do { \
- lose_fpu_inatomic(1, prev); \
- (last) = __switch_to(prev, next, task_thread_info(next)); \
+#define switch_to(prev, next, last) \
+do { \
+ lose_fpu_inatomic(1, prev); \
+ (last) = __switch_to(prev, next, task_thread_info(next), \
+ __builtin_return_address(0), __builtin_frame_address(0)); \
} while (0)
#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
index 2b44edc604a2..a8ae2af4025a 100644
--- a/arch/loongarch/include/asm/uaccess.h
+++ b/arch/loongarch/include/asm/uaccess.h
@@ -229,13 +229,13 @@ extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n);
static inline unsigned long __must_check
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- return __copy_user(to, from, n);
+ return __copy_user(to, (__force const void *)from, n);
}
static inline unsigned long __must_check
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- return __copy_user(to, from, n);
+ return __copy_user((__force void *)to, from, n);
}
#define INLINE_COPY_FROM_USER
diff --git a/arch/loongarch/include/asm/unwind.h b/arch/loongarch/include/asm/unwind.h
new file mode 100644
index 000000000000..6af4718bdf01
--- /dev/null
+++ b/arch/loongarch/include/asm/unwind.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Most of this ideas comes from x86.
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_UNWIND_H
+#define _ASM_UNWIND_H
+
+#include <linux/sched.h>
+
+#include <asm/stacktrace.h>
+
+enum unwinder_type {
+ UNWINDER_GUESS,
+ UNWINDER_PROLOGUE,
+};
+
+struct unwind_state {
+ char type; /* UNWINDER_XXX */
+ struct stack_info stack_info;
+ struct task_struct *task;
+ bool first, error;
+ unsigned long sp, pc, ra;
+};
+
+void unwind_start(struct unwind_state *state,
+ struct task_struct *task, struct pt_regs *regs);
+bool unwind_next_frame(struct unwind_state *state);
+unsigned long unwind_get_return_address(struct unwind_state *state);
+
+static inline bool unwind_done(struct unwind_state *state)
+{
+ return state->stack_info.type == STACK_TYPE_UNKNOWN;
+}
+
+static inline bool unwind_error(struct unwind_state *state)
+{
+ return state->error;
+}
+
+#endif /* _ASM_UNWIND_H */
diff --git a/arch/loongarch/include/asm/vdso.h b/arch/loongarch/include/asm/vdso.h
index 8f8a0f9a4953..d3ba35eb23e7 100644
--- a/arch/loongarch/include/asm/vdso.h
+++ b/arch/loongarch/include/asm/vdso.h
@@ -7,6 +7,7 @@
#ifndef __ASM_VDSO_H
#define __ASM_VDSO_H
+#include <linux/mm.h>
#include <linux/mm_types.h>
#include <vdso/datapage.h>
diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
index 5a01643a65b3..3b55d32a0619 100644
--- a/arch/loongarch/include/asm/vdso/vdso.h
+++ b/arch/loongarch/include/asm/vdso/vdso.h
@@ -8,6 +8,18 @@
#include <asm/asm.h>
#include <asm/page.h>
+#include <asm/vdso.h>
+
+struct vdso_pcpu_data {
+ u32 node;
+} ____cacheline_aligned_in_smp;
+
+struct loongarch_vdso_data {
+ struct vdso_pcpu_data pdata[NR_CPUS];
+ struct vdso_data data[CS_BASES]; /* Arch-independent data */
+};
+
+#define VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data))
static inline unsigned long get_vdso_base(void)
{
@@ -24,7 +36,8 @@ static inline unsigned long get_vdso_base(void)
static inline const struct vdso_data *get_vdso_data(void)
{
- return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE);
+ return (const struct vdso_data *)(get_vdso_base()
+ - VDSO_DATA_SIZE + SMP_CACHE_BYTES * NR_CPUS);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 940de9173542..e5be17009fe8 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_EFI) += efi.o
obj-$(CONFIG_CPU_HAS_FPU) += fpu.o
obj-$(CONFIG_MODULES) += module.o module-sections.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PROC_FS) += proc.o
@@ -22,4 +23,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_NUMA) += numa.o
+obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
+obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
+
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 03aa14581d0a..f1c928648a4a 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -104,6 +104,39 @@ static int set_processor_mask(u32 id, u32 flags)
}
#endif
+static int __init
+acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
+{
+ struct acpi_madt_core_pic *processor = NULL;
+
+ processor = (struct acpi_madt_core_pic *)header;
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(&header->common);
+#ifdef CONFIG_SMP
+ set_processor_mask(processor->core_id, processor->flags);
+#endif
+
+ return 0;
+}
+
+static int __init
+acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
+{
+ static int core = 0;
+ struct acpi_madt_eio_pic *eiointc = NULL;
+
+ eiointc = (struct acpi_madt_eio_pic *)header;
+ if (BAD_MADT_ENTRY(eiointc, end))
+ return -EINVAL;
+
+ core = eiointc->node * CORES_PER_EIO_NODE;
+ set_bit(core, &(loongson_sysconf.cores_io_master));
+
+ return 0;
+}
+
static void __init acpi_process_madt(void)
{
#ifdef CONFIG_SMP
@@ -114,6 +147,11 @@ static void __init acpi_process_madt(void)
__cpu_logical_map[i] = -1;
}
#endif
+ acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
+ acpi_parse_processor, MAX_CORE_PIC);
+
+ acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
+ acpi_parse_eio_master, MAX_IO_PICS);
loongson_sysconf.nr_cpus = num_processors;
}
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
index 20cd9e16a95a..bdd88eda9513 100644
--- a/arch/loongarch/kernel/asm-offsets.c
+++ b/arch/loongarch/kernel/asm-offsets.c
@@ -103,6 +103,8 @@ void output_thread_defines(void)
OFFSET(THREAD_REG29, task_struct, thread.reg29);
OFFSET(THREAD_REG30, task_struct, thread.reg30);
OFFSET(THREAD_REG31, task_struct, thread.reg31);
+ OFFSET(THREAD_SCHED_RA, task_struct, thread.sched_ra);
+ OFFSET(THREAD_SCHED_CFA, task_struct, thread.sched_cfa);
OFFSET(THREAD_CSRCRMD, task_struct,
thread.csr_crmd);
OFFSET(THREAD_CSRPRMD, task_struct,
@@ -189,12 +191,6 @@ void output_mm_defines(void)
#endif
DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
BLANK();
- DEFINE(_PGD_ORDER, PGD_ORDER);
-#ifndef __PAGETABLE_PMD_FOLDED
- DEFINE(_PMD_ORDER, PMD_ORDER);
-#endif
- DEFINE(_PTE_ORDER, PTE_ORDER);
- BLANK();
DEFINE(_PMD_SHIFT, PMD_SHIFT);
DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
BLANK();
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index 7062cdf0e33e..c60eb66793e3 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -21,6 +21,12 @@ SYM_CODE_START(kernel_entry) # kernel entry point
csrwr t0, LOONGARCH_CSR_DMWIN0
li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx
csrwr t0, LOONGARCH_CSR_DMWIN1
+
+ /* We might not get launched at the address the kernel is linked to,
+ so we jump there. */
+ la.abs t0, 0f
+ jr t0
+0:
/* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD
@@ -29,11 +35,6 @@ SYM_CODE_START(kernel_entry) # kernel entry point
li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
csrwr t0, LOONGARCH_CSR_EUEN
- /* We might not get launched at the address the kernel is linked to,
- so we jump there. */
- la.abs t0, 0f
- jr t0
-0:
la t0, __bss_start # clear .bss
st.d zero, t0, 0
la t1, __bss_stop - LONGSIZE
@@ -74,6 +75,11 @@ SYM_CODE_START(smpboot_entry)
csrwr t0, LOONGARCH_CSR_DMWIN0
li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1
+
+ la.abs t0, 0f
+ jr t0
+0:
+ /* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD
li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
@@ -85,9 +91,6 @@ SYM_CODE_START(smpboot_entry)
ld.d sp, t0, CPU_BOOT_STACK
ld.d tp, t0, CPU_BOOT_TINFO
- la.abs t0, 0f
- jr t0
-0:
bl start_secondary
SYM_CODE_END(smpboot_entry)
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
index 1effc73850fe..5c67cc4fd56d 100644
--- a/arch/loongarch/kernel/proc.c
+++ b/arch/loongarch/kernel/proc.c
@@ -106,7 +106,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
{
unsigned long i = *pos;
- return i < NR_CPUS ? (void *)(i + 1) : NULL;
+ return i < nr_cpu_ids ? (void *)(i + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index bfa0dfe8b7d7..660492f064e7 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -44,6 +44,7 @@
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
+#include <asm/unwind.h>
#include <asm/vdso.h>
/*
@@ -134,6 +135,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs = (struct pt_regs *) childksp - 1;
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
+ p->thread.sched_cfa = 0;
p->thread.csr_euen = 0;
p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
@@ -144,6 +146,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.reg23 = (unsigned long)args->fn;
p->thread.reg24 = (unsigned long)args->fn_arg;
p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
+ p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
memset(childregs, 0, sizeof(struct pt_regs));
childregs->csr_euen = p->thread.csr_euen;
childregs->csr_crmd = p->thread.csr_crmd;
@@ -160,6 +163,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.reg03 = (unsigned long) childregs;
p->thread.reg01 = (unsigned long) ret_from_fork;
+ p->thread.sched_ra = (unsigned long) ret_from_fork;
/*
* New tasks lose permission to use the fpu. This accelerates context
@@ -180,7 +184,91 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
unsigned long __get_wchan(struct task_struct *task)
{
- return 0;
+ unsigned long pc;
+ struct unwind_state state;
+
+ if (!try_get_task_stack(task))
+ return 0;
+
+ unwind_start(&state, task, NULL);
+ state.sp = thread_saved_fp(task);
+ get_stack_info(state.sp, state.task, &state.stack_info);
+ state.pc = thread_saved_ra(task);
+#ifdef CONFIG_UNWINDER_PROLOGUE
+ state.type = UNWINDER_PROLOGUE;
+#endif
+ for (; !unwind_done(&state); unwind_next_frame(&state)) {
+ pc = unwind_get_return_address(&state);
+ if (!pc)
+ break;
+ if (in_sched_functions(pc))
+ continue;
+ break;
+ }
+
+ put_task_stack(task);
+
+ return pc;
+}
+
+bool in_irq_stack(unsigned long stack, struct stack_info *info)
+{
+ unsigned long nextsp;
+ unsigned long begin = (unsigned long)this_cpu_read(irq_stack);
+ unsigned long end = begin + IRQ_STACK_START;
+
+ if (stack < begin || stack >= end)
+ return false;
+
+ nextsp = *(unsigned long *)end;
+ if (nextsp & (SZREG - 1))
+ return false;
+
+ info->begin = begin;
+ info->end = end;
+ info->next_sp = nextsp;
+ info->type = STACK_TYPE_IRQ;
+
+ return true;
+}
+
+bool in_task_stack(unsigned long stack, struct task_struct *task,
+ struct stack_info *info)
+{
+ unsigned long begin = (unsigned long)task_stack_page(task);
+ unsigned long end = begin + THREAD_SIZE - 32;
+
+ if (stack < begin || stack >= end)
+ return false;
+
+ info->begin = begin;
+ info->end = end;
+ info->next_sp = 0;
+ info->type = STACK_TYPE_TASK;
+
+ return true;
+}
+
+int get_stack_info(unsigned long stack, struct task_struct *task,
+ struct stack_info *info)
+{
+ task = task ? : current;
+
+ if (!stack || stack & (SZREG - 1))
+ goto unknown;
+
+ if (in_task_stack(stack, task, info))
+ return 0;
+
+ if (task != current)
+ goto unknown;
+
+ if (in_irq_stack(stack, info))
+ return 0;
+
+unknown:
+ info->type = STACK_TYPE_UNKNOWN;
+ return -EINVAL;
}
unsigned long stack_top(void)
diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
index 800c965a17ea..8c82021eb2f4 100644
--- a/arch/loongarch/kernel/reset.c
+++ b/arch/loongarch/kernel/reset.c
@@ -15,10 +15,16 @@
#include <acpi/reboot.h>
#include <asm/idle.h>
#include <asm/loongarch.h>
-#include <asm/reboot.h>
-static void default_halt(void)
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_halt(void)
{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
local_irq_disable();
clear_csr_ecfg(ECFG0_IM);
@@ -30,18 +36,29 @@ static void default_halt(void)
}
}
-static void default_poweroff(void)
+void machine_power_off(void)
{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ do_kernel_power_off();
#ifdef CONFIG_EFI
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
#endif
+
while (true) {
__arch_cpu_idle();
}
}
-static void default_restart(void)
+void machine_restart(char *command)
{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ do_kernel_restart(command);
#ifdef CONFIG_EFI
if (efi_capsule_pending(NULL))
efi_reboot(REBOOT_WARM, NULL);
@@ -55,47 +72,3 @@ static void default_restart(void)
__arch_cpu_idle();
}
}
-
-void (*pm_restart)(void);
-EXPORT_SYMBOL(pm_restart);
-
-void (*pm_power_off)(void);
-EXPORT_SYMBOL(pm_power_off);
-
-void machine_halt(void)
-{
-#ifdef CONFIG_SMP
- preempt_disable();
- smp_send_stop();
-#endif
- default_halt();
-}
-
-void machine_power_off(void)
-{
-#ifdef CONFIG_SMP
- preempt_disable();
- smp_send_stop();
-#endif
- pm_power_off();
-}
-
-void machine_restart(char *command)
-{
-#ifdef CONFIG_SMP
- preempt_disable();
- smp_send_stop();
-#endif
- do_kernel_restart(command);
- pm_restart();
-}
-
-static int __init loongarch_reboot_setup(void)
-{
- pm_restart = default_restart;
- pm_power_off = default_poweroff;
-
- return 0;
-}
-
-arch_initcall(loongarch_reboot_setup);
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 09743103d9b3..b5fab308dcf2 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -242,10 +242,7 @@ void loongson3_smp_finish(void)
static bool io_master(int cpu)
{
- if (cpu == 0)
- return true;
-
- return false;
+ return test_bit(cpu, &loongson_sysconf.cores_io_master);
}
int loongson3_cpu_disable(void)
diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
new file mode 100644
index 000000000000..3a690f96f00c
--- /dev/null
+++ b/arch/loongarch/kernel/stacktrace.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Stack trace management functions
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
+
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long addr;
+ struct pt_regs dummyregs;
+ struct unwind_state state;
+
+ regs = &dummyregs;
+
+ if (task == current) {
+ regs->regs[3] = (unsigned long)__builtin_frame_address(0);
+ regs->csr_era = (unsigned long)__builtin_return_address(0);
+ } else {
+ regs->regs[3] = thread_saved_fp(task);
+ regs->csr_era = thread_saved_ra(task);
+ }
+
+ regs->regs[1] = 0;
+ for (unwind_start(&state, task, regs);
+ !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ if (!addr || !consume_entry(cookie, addr))
+ break;
+ }
+}
+
+static int
+copy_stack_frame(unsigned long fp, struct stack_frame *frame)
+{
+ int ret = 1;
+ unsigned long err;
+ unsigned long __user *user_frame_tail;
+
+ user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
+ if (!access_ok(user_frame_tail, sizeof(*frame)))
+ return 0;
+
+ pagefault_disable();
+ err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
+ if (err || (unsigned long)user_frame_tail >= frame->fp)
+ ret = 0;
+ pagefault_enable();
+
+ return ret;
+}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ unsigned long fp = regs->regs[22];
+
+ while (fp && !((unsigned long)fp & 0xf)) {
+ struct stack_frame frame;
+
+ frame.fp = 0;
+ frame.ra = 0;
+ if (!copy_stack_frame(fp, &frame))
+ break;
+ if (!frame.ra)
+ break;
+ if (!consume_entry(cookie, frame.ra))
+ break;
+ fp = frame.fp;
+ }
+}
diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S
index 37e84ac8ffc2..43ebbc3990f7 100644
--- a/arch/loongarch/kernel/switch.S
+++ b/arch/loongarch/kernel/switch.S
@@ -21,6 +21,8 @@ SYM_FUNC_START(__switch_to)
cpu_save_nonscratch a0
stptr.d ra, a0, THREAD_REG01
+ stptr.d a3, a0, THREAD_SCHED_RA
+ stptr.d a4, a0, THREAD_SCHED_CFA
move tp, a2
cpu_restore_nonscratch a1
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index 79dc5eddf504..786735dcc8d6 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -135,7 +135,7 @@ static int get_timer_irq(void)
int constant_clockevent_init(void)
{
- unsigned int irq;
+ int irq;
unsigned int cpu = smp_processor_id();
unsigned long min_delta = 0x600;
unsigned long max_delta = (1UL << 48) - 1;
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index 1bf58c65e2bf..aa1c95aaf595 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -43,6 +43,7 @@
#include <asm/stacktrace.h>
#include <asm/tlb.h>
#include <asm/types.h>
+#include <asm/unwind.h>
#include "access-helper.h"
@@ -64,19 +65,20 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
const char *loglvl, bool user)
{
unsigned long addr;
- unsigned long *sp = (unsigned long *)(regs->regs[3] & ~3);
+ struct unwind_state state;
+ struct pt_regs *pregs = (struct pt_regs *)regs;
+
+ if (!task)
+ task = current;
+
+ if (user_mode(regs))
+ state.type = UNWINDER_GUESS;
printk("%sCall Trace:", loglvl);
-#ifdef CONFIG_KALLSYMS
- printk("%s\n", loglvl);
-#endif
- while (!kstack_end(sp)) {
- if (__get_addr(&addr, sp++, user)) {
- printk("%s (Bad stack address)", loglvl);
- break;
- }
- if (__kernel_text_address(addr))
- print_ip_sym(loglvl, addr);
+ for (unwind_start(&state, task, pregs);
+ !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ print_ip_sym(loglvl, addr);
}
printk("%s\n", loglvl);
}
diff --git a/arch/loongarch/kernel/unwind_guess.c b/arch/loongarch/kernel/unwind_guess.c
new file mode 100644
index 000000000000..5afa6064d73e
--- /dev/null
+++ b/arch/loongarch/kernel/unwind_guess.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+
+#include <asm/unwind.h>
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+ if (unwind_done(state))
+ return 0;
+ else if (state->first)
+ return state->pc;
+
+ return *(unsigned long *)(state->sp);
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+void unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs)
+{
+ memset(state, 0, sizeof(*state));
+
+ if (regs) {
+ state->sp = regs->regs[3];
+ state->pc = regs->csr_era;
+ }
+
+ state->task = task;
+ state->first = true;
+
+ get_stack_info(state->sp, state->task, &state->stack_info);
+
+ if (!unwind_done(state) && !__kernel_text_address(state->pc))
+ unwind_next_frame(state);
+}
+EXPORT_SYMBOL_GPL(unwind_start);
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ unsigned long addr;
+
+ if (unwind_done(state))
+ return false;
+
+ if (state->first)
+ state->first = false;
+
+ do {
+ for (state->sp += sizeof(unsigned long);
+ state->sp < info->end;
+ state->sp += sizeof(unsigned long)) {
+ addr = *(unsigned long *)(state->sp);
+
+ if (__kernel_text_address(addr))
+ return true;
+ }
+
+ state->sp = info->next_sp;
+
+ } while (!get_stack_info(state->sp, state->task, info));
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(unwind_next_frame);
diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
new file mode 100644
index 000000000000..b206d9159205
--- /dev/null
+++ b/arch/loongarch/kernel/unwind_prologue.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kallsyms.h>
+
+#include <asm/inst.h>
+#include <asm/ptrace.h>
+#include <asm/unwind.h>
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+
+ if (unwind_done(state))
+ return 0;
+ else if (state->type)
+ return state->pc;
+ else if (state->first)
+ return state->pc;
+
+ return *(unsigned long *)(state->sp);
+
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+static bool unwind_by_guess(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ unsigned long addr;
+
+ for (state->sp += sizeof(unsigned long);
+ state->sp < info->end;
+ state->sp += sizeof(unsigned long)) {
+ addr = *(unsigned long *)(state->sp);
+ if (__kernel_text_address(addr))
+ return true;
+ }
+
+ return false;
+}
+
+static bool unwind_by_prologue(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ union loongarch_instruction *ip, *ip_end;
+ unsigned long frame_size = 0, frame_ra = -1;
+ unsigned long size, offset, pc = state->pc;
+
+ if (state->sp >= info->end || state->sp < info->begin)
+ return false;
+
+ if (!kallsyms_lookup_size_offset(pc, &size, &offset))
+ return false;
+
+ ip = (union loongarch_instruction *)(pc - offset);
+ ip_end = (union loongarch_instruction *)pc;
+
+ while (ip < ip_end) {
+ if (is_stack_alloc_ins(ip)) {
+ frame_size = (1 << 12) - ip->reg2i12_format.immediate;
+ ip++;
+ break;
+ }
+ ip++;
+ }
+
+ if (!frame_size) {
+ if (state->first)
+ goto first;
+
+ return false;
+ }
+
+ while (ip < ip_end) {
+ if (is_ra_save_ins(ip)) {
+ frame_ra = ip->reg2i12_format.immediate;
+ break;
+ }
+ if (is_branch_ins(ip))
+ break;
+ ip++;
+ }
+
+ if (frame_ra < 0) {
+ if (state->first) {
+ state->sp = state->sp + frame_size;
+ goto first;
+ }
+ return false;
+ }
+
+ if (state->first)
+ state->first = false;
+
+ state->pc = *(unsigned long *)(state->sp + frame_ra);
+ state->sp = state->sp + frame_size;
+ return !!__kernel_text_address(state->pc);
+
+first:
+ state->first = false;
+ if (state->pc == state->ra)
+ return false;
+
+ state->pc = state->ra;
+
+ return !!__kernel_text_address(state->ra);
+}
+
+void unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs)
+{
+ memset(state, 0, sizeof(*state));
+
+ if (regs && __kernel_text_address(regs->csr_era)) {
+ state->pc = regs->csr_era;
+ state->sp = regs->regs[3];
+ state->ra = regs->regs[1];
+ state->type = UNWINDER_PROLOGUE;
+ }
+
+ state->task = task;
+ state->first = true;
+
+ get_stack_info(state->sp, state->task, &state->stack_info);
+
+ if (!unwind_done(state) && !__kernel_text_address(state->pc))
+ unwind_next_frame(state);
+}
+EXPORT_SYMBOL_GPL(unwind_start);
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ struct pt_regs *regs;
+ unsigned long pc;
+
+ if (unwind_done(state))
+ return false;
+
+ do {
+ switch (state->type) {
+ case UNWINDER_GUESS:
+ state->first = false;
+ if (unwind_by_guess(state))
+ return true;
+ break;
+
+ case UNWINDER_PROLOGUE:
+ if (unwind_by_prologue(state))
+ return true;
+
+ if (info->type == STACK_TYPE_IRQ &&
+ info->end == state->sp) {
+ regs = (struct pt_regs *)info->next_sp;
+ pc = regs->csr_era;
+
+ if (user_mode(regs) || !__kernel_text_address(pc))
+ return false;
+
+ state->pc = pc;
+ state->sp = regs->regs[3];
+ state->ra = regs->regs[1];
+ state->first = true;
+ get_stack_info(state->sp, state->task, info);
+
+ return true;
+ }
+ }
+
+ state->sp = info->next_sp;
+
+ } while (!get_stack_info(state->sp, state->task, info));
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(unwind_next_frame);
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
index e20c8ca87473..f32c38abd791 100644
--- a/arch/loongarch/kernel/vdso.c
+++ b/arch/loongarch/kernel/vdso.c
@@ -25,12 +25,14 @@
extern char vdso_start[], vdso_end[];
/* Kernel-provided data used by the VDSO. */
-static union loongarch_vdso_data {
- u8 page[PAGE_SIZE];
- struct vdso_data data[CS_BASES];
+static union {
+ u8 page[VDSO_DATA_SIZE];
+ struct loongarch_vdso_data vdata;
} loongarch_vdso_data __page_aligned_data;
-struct vdso_data *vdso_data = loongarch_vdso_data.data;
+
static struct page *vdso_pages[] = { NULL };
+struct vdso_data *vdso_data = loongarch_vdso_data.vdata.data;
+struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
@@ -55,11 +57,14 @@ struct loongarch_vdso_info vdso_info = {
static int __init init_vdso(void)
{
- unsigned long i, pfn;
+ unsigned long i, cpu, pfn;
BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
BUG_ON(!PAGE_ALIGNED(vdso_info.size));
+ for_each_possible_cpu(cpu)
+ vdso_pdata[cpu].node = cpu_to_node(cpu);
+
pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
@@ -93,9 +98,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
/*
* Determine total area size. This includes the VDSO data itself
- * and the data page.
+ * and the data pages.
*/
- vvar_size = PAGE_SIZE;
+ vvar_size = VDSO_DATA_SIZE;
size = vvar_size + info->size;
data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
@@ -103,7 +108,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
ret = data_addr;
goto out;
}
- vdso_addr = data_addr + PAGE_SIZE;
+ vdso_addr = data_addr + VDSO_DATA_SIZE;
vma = _install_special_mapping(mm, data_addr, vvar_size,
VM_READ | VM_MAYREAD,
@@ -115,8 +120,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
/* Map VDSO data page. */
ret = remap_pfn_range(vma, data_addr,
- virt_to_phys(vdso_data) >> PAGE_SHIFT,
- PAGE_SIZE, PAGE_READONLY);
+ virt_to_phys(&loongarch_vdso_data) >> PAGE_SHIFT,
+ vvar_size, PAGE_READONLY);
if (ret)
goto out;
diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c
index 9e5ce5aa73f7..e8c68dcf6ab2 100644
--- a/arch/loongarch/mm/cache.c
+++ b/arch/loongarch/mm/cache.c
@@ -139,3 +139,49 @@ void cpu_cache_init(void)
shm_align_mask = PAGE_SIZE - 1;
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = __pgprot(_CACHE_CC | _PAGE_USER |
+ _PAGE_PROTNONE | _PAGE_NO_EXEC |
+ _PAGE_NO_READ),
+ [VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_SHARED] = __pgprot(_CACHE_CC | _PAGE_USER |
+ _PAGE_PROTNONE | _PAGE_NO_EXEC |
+ _PAGE_NO_READ),
+ [VM_SHARED | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_SHARED | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC | _PAGE_WRITE),
+ [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC | _PAGE_WRITE),
+ [VM_SHARED | VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_WRITE),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_WRITE)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index 605579b19a00..1ccd53655cab 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -216,6 +216,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index 52e40f0ba732..381a569635a9 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -2,16 +2,9 @@
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
-#include <linux/compiler.h>
-#include <linux/elf-randomize.h>
-#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/export.h>
-#include <linux/personality.h>
-#include <linux/random.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/mm.h>
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
@@ -120,6 +113,6 @@ int __virt_addr_valid(volatile void *kaddr)
if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
return 0;
- return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+ return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
}
EXPORT_SYMBOL_GPL(__virt_addr_valid);
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index 0569647152e9..ee179ccd3e3f 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -13,7 +13,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
- ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *) __get_free_page(GFP_KERNEL);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long)ret);
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index de19fa2d7f0d..39743337999e 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -83,7 +83,7 @@ vmalloc_done_load:
bnez t0, tlb_huge_update_load
csrrd t0, LOONGARCH_CSR_BADV
- srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
+ srli.d t0, t0, PAGE_SHIFT
andi t0, t0, (PTRS_PER_PTE - 1)
slli.d t0, t0, _PTE_T_LOG2
add.d t1, ra, t0
@@ -247,7 +247,7 @@ vmalloc_done_store:
bnez t0, tlb_huge_update_store
csrrd t0, LOONGARCH_CSR_BADV
- srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
+ srli.d t0, t0, PAGE_SHIFT
andi t0, t0, (PTRS_PER_PTE - 1)
slli.d t0, t0, _PTE_T_LOG2
add.d t1, ra, t0
@@ -414,7 +414,7 @@ vmalloc_done_modify:
bnez t0, tlb_huge_update_modify
csrrd t0, LOONGARCH_CSR_BADV
- srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
+ srli.d t0, t0, PAGE_SHIFT
andi t0, t0, (PTRS_PER_PTE - 1)
slli.d t0, t0, _PTE_T_LOG2
add.d t1, ra, t0
diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c
new file mode 100644
index 000000000000..bf921487333c
--- /dev/null
+++ b/arch/loongarch/pci/acpi.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include <asm/pci.h>
+#include <asm/numa.h>
+#include <asm/loongson.h>
+
+struct pci_root_info {
+ struct acpi_pci_root_info common;
+ struct pci_config_window *cfg;
+};
+
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_config_window *cfg = bridge->bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ struct device *bus_dev = &bridge->bus->dev;
+
+ ACPI_COMPANION_SET(&bridge->dev, adev);
+ set_dev_node(bus_dev, pa_to_nid(cfg->res.start));
+
+ return 0;
+}
+
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+
+ return root->segment;
+}
+
+static void acpi_release_root_info(struct acpi_pci_root_info *ci)
+{
+ struct pci_root_info *info;
+
+ info = container_of(ci, struct pci_root_info, common);
+ pci_ecam_free(info->cfg);
+ kfree(ci->ops);
+ kfree(info);
+}
+
+static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci)
+{
+ int status;
+ struct resource_entry *entry, *tmp;
+ struct acpi_device *device = ci->bridge;
+
+ status = acpi_pci_probe_root_resources(ci);
+ if (status > 0) {
+ resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
+ if (entry->res->flags & IORESOURCE_MEM) {
+ entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40);
+ entry->res->start |= entry->offset;
+ entry->res->end |= entry->offset;
+ }
+ }
+ return status;
+ }
+
+ resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
+ dev_dbg(&device->dev,
+ "host bridge window %pR (ignored)\n", entry->res);
+ resource_list_destroy_entry(entry);
+ }
+
+ return 0;
+}
+
+/*
+ * Lookup the bus range for the domain in MCFG, and set up config space
+ * mapping.
+ */
+static struct pci_config_window *
+pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
+{
+ int ret, bus_shift;
+ u16 seg = root->segment;
+ struct device *dev = &root->device->dev;
+ struct resource cfgres;
+ struct resource *bus_res = &root->secondary;
+ struct pci_config_window *cfg;
+ const struct pci_ecam_ops *ecam_ops;
+
+ ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
+ if (ret < 0) {
+ dev_err(dev, "%04x:%pR ECAM region not found, use default value\n", seg, bus_res);
+ ecam_ops = &loongson_pci_ecam_ops;
+ root->mcfg_addr = mcfg_addr_init(0);
+ }
+
+ bus_shift = ecam_ops->bus_shift ? : 20;
+
+ cfgres.start = root->mcfg_addr + (bus_res->start << bus_shift);
+ cfgres.end = cfgres.start + (resource_size(bus_res) << bus_shift) - 1;
+ cfgres.flags = IORESOURCE_MEM;
+
+ cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
+ if (IS_ERR(cfg)) {
+ dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, PTR_ERR(cfg));
+ return NULL;
+ }
+
+ return cfg;
+}
+
+struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+{
+ struct pci_bus *bus;
+ struct pci_root_info *info;
+ struct acpi_pci_root_ops *root_ops;
+ int domain = root->segment;
+ int busnum = root->secondary.start;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pr_warn("pci_bus %04x:%02x: ignored (out of memory)\n", domain, busnum);
+ return NULL;
+ }
+
+ root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
+ if (!root_ops) {
+ kfree(info);
+ return NULL;
+ }
+
+ info->cfg = pci_acpi_setup_ecam_mapping(root);
+ if (!info->cfg) {
+ kfree(info);
+ kfree(root_ops);
+ return NULL;
+ }
+
+ root_ops->release_info = acpi_release_root_info;
+ root_ops->prepare_resources = acpi_prepare_root_resources;
+ root_ops->pci_ops = (struct pci_ops *)&info->cfg->ops->pci_ops;
+
+ bus = pci_find_bus(domain, busnum);
+ if (bus) {
+ memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window));
+ kfree(info);
+ } else {
+ struct pci_bus *child;
+
+ bus = acpi_pci_root_create(root, root_ops,
+ &info->common, info->cfg);
+ if (!bus) {
+ kfree(info);
+ kfree(root_ops);
+ return NULL;
+ }
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
+
+ return bus;
+}
diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c
new file mode 100644
index 000000000000..e9b7c34d9b6d
--- /dev/null
+++ b/arch/loongarch/pci/pci.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/vgaarb.h>
+#include <asm/loongson.h>
+
+#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00
+#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
+#define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36
+
+int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val)
+{
+ struct pci_bus *bus_tmp = pci_find_bus(domain, bus);
+
+ if (bus_tmp)
+ return bus_tmp->ops->read(bus_tmp, devfn, reg, len, val);
+ return -EINVAL;
+}
+
+int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 val)
+{
+ struct pci_bus *bus_tmp = pci_find_bus(domain, bus);
+
+ if (bus_tmp)
+ return bus_tmp->ops->write(bus_tmp, devfn, reg, len, val);
+ return -EINVAL;
+}
+
+phys_addr_t mcfg_addr_init(int node)
+{
+ return (((u64)node << 44) | MCFG_EXT_PCICFG_BASE);
+}
+
+static int __init pcibios_init(void)
+{
+ unsigned int lsize;
+
+ /*
+ * Set PCI cacheline size to that of the highest level in the
+ * cache hierarchy.
+ */
+ lsize = cpu_dcache_line_size();
+ lsize = cpu_vcache_line_size() ? : lsize;
+ lsize = cpu_scache_line_size() ? : lsize;
+
+ BUG_ON(!lsize);
+
+ pci_dfl_cache_line_size = lsize >> 2;
+
+ pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
+
+ return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+int pcibios_device_add(struct pci_dev *dev)
+{
+ int id;
+ struct irq_domain *dom;
+
+ id = pci_domain_nr(dev->bus);
+ dom = irq_find_matching_fwnode(get_pch_msi_handle(id), DOMAIN_BUS_PCI_MSI);
+ dev_set_msi_domain(&dev->dev, dom);
+
+ return 0;
+}
+
+int pcibios_alloc_irq(struct pci_dev *dev)
+{
+ if (acpi_disabled)
+ return 0;
+ if (pci_dev_msi_enabled(dev))
+ return 0;
+ return acpi_pci_irq_enable(dev);
+}
+
+static void pci_fixup_vgadev(struct pci_dev *pdev)
+{
+ struct pci_dev *devp = NULL;
+
+ while ((devp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, devp))) {
+ if (devp->vendor != PCI_VENDOR_ID_LOONGSON) {
+ vga_set_default_device(devp);
+ dev_info(&pdev->dev,
+ "Overriding boot device as %X:%X\n",
+ devp->vendor, devp->device);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev);
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index 92e404032257..d89e2ac75f7b 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -6,7 +6,7 @@
ARCH_REL_TYPE_ABS := R_LARCH_32|R_LARCH_64|R_LARCH_MARK_LA|R_LARCH_JUMP_SLOT
include $(srctree)/lib/vdso/Makefile
-obj-vdso-y := elf.o vgettimeofday.o sigreturn.o
+obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o sigreturn.o
# Common compiler flags between ABIs.
ccflags-vdso := \
diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S
index 955f02de4a2d..56ad855896de 100644
--- a/arch/loongarch/vdso/vdso.lds.S
+++ b/arch/loongarch/vdso/vdso.lds.S
@@ -58,6 +58,7 @@ VERSION
{
LINUX_5.10 {
global:
+ __vdso_getcpu;
__vdso_clock_getres;
__vdso_clock_gettime;
__vdso_gettimeofday;
diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c
new file mode 100644
index 000000000000..e02e775f5360
--- /dev/null
+++ b/arch/loongarch/vdso/vgetcpu.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Fast user context implementation of getcpu()
+ */
+
+#include <asm/vdso.h>
+#include <linux/getcpu.h>
+
+static __always_inline int read_cpu_id(void)
+{
+ int cpu_id;
+
+ __asm__ __volatile__(
+ " rdtime.d $zero, %0\n"
+ : "=r" (cpu_id)
+ :
+ : "memory");
+
+ return cpu_id;
+}
+
+static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
+{
+ return (struct vdso_pcpu_data *)(get_vdso_base() - VDSO_DATA_SIZE);
+}
+
+extern
+int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused);
+int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
+{
+ int cpu_id;
+ const struct vdso_pcpu_data *data;
+
+ cpu_id = read_cpu_id();
+
+ if (cpu)
+ *cpu = cpu_id;
+
+ if (node) {
+ data = get_pcpu_data();
+ *node = data[cpu_id].node;
+ }
+
+ return 0;
+}
diff --git a/arch/loongarch/vdso/vgettimeofday.c b/arch/loongarch/vdso/vgettimeofday.c
index b1f4548dae92..8f22863bd7ea 100644
--- a/arch/loongarch/vdso/vgettimeofday.c
+++ b/arch/loongarch/vdso/vgettimeofday.c
@@ -6,20 +6,23 @@
*/
#include <linux/types.h>
-int __vdso_clock_gettime(clockid_t clock,
- struct __kernel_timespec *ts)
+extern
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
return __cvdso_clock_gettime(clock, ts);
}
-int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
- struct timezone *tz)
+extern
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
return __cvdso_gettimeofday(tv, tz);
}
-int __vdso_clock_getres(clockid_t clock_id,
- struct __kernel_timespec *res)
+extern
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
{
return __cvdso_clock_getres(clock_id, res);
}
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 936cce42ae9a..b06faf6c0b27 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -30,7 +30,6 @@ config M68K
select OLD_SIGACTION
select OLD_SIGSUSPEND3
select UACCESS_MEMCPY if !MMU
- select VIRT_TO_BUS
select ZONE_DMA
config CPU_BIG_ENDIAN
diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
index 4218750414bb..7dab46728aed 100644
--- a/arch/m68k/coldfire/device.c
+++ b/arch/m68k/coldfire/device.c
@@ -581,7 +581,7 @@ static struct platform_device mcf_esdhc = {
};
#endif /* MCFSDHC_BASE */
-#if IS_ENABLED(CONFIG_CAN_FLEXCAN)
+#ifdef MCFFLEXCAN_SIZE
#include <linux/can/platform/flexcan.h>
@@ -620,7 +620,7 @@ static struct platform_device mcf_flexcan0 = {
.resource = mcf5441x_flexcan0_resource,
.dev.platform_data = &mcf5441x_flexcan_info,
};
-#endif /* IS_ENABLED(CONFIG_CAN_FLEXCAN) */
+#endif /* MCFFLEXCAN_SIZE */
static struct platform_device *mcf_devices[] __initdata = {
&mcf_uart,
@@ -657,7 +657,7 @@ static struct platform_device *mcf_devices[] __initdata = {
#ifdef MCFSDHC_BASE
&mcf_esdhc,
#endif
-#if IS_ENABLED(CONFIG_CAN_FLEXCAN)
+#ifdef MCFFLEXCAN_SIZE
&mcf_flexcan0,
#endif
};
diff --git a/arch/m68k/coldfire/intc-2.c b/arch/m68k/coldfire/intc-2.c
index 995093357c59..f74f0e473119 100644
--- a/arch/m68k/coldfire/intc-2.c
+++ b/arch/m68k/coldfire/intc-2.c
@@ -7,7 +7,7 @@
* family, the 5270, 5271, 5274, 5275, and the 528x family which have two such
* controllers, and the 547x and 548x families which have only one of them.
*
- * The external 7 fixed interrupts are part the the Edge Port unit of these
+ * The external 7 fixed interrupts are part of the Edge Port unit of these
* ColdFire parts. They can be configured as level or edge triggered.
*
* (C) Copyright 2009-2011, Greg Ungerer <gerg@snapgear.com>
diff --git a/arch/m68k/coldfire/m523x.c b/arch/m68k/coldfire/m523x.c
index 193c178162c1..83a997313393 100644
--- a/arch/m68k/coldfire/m523x.c
+++ b/arch/m68k/coldfire/m523x.c
@@ -28,7 +28,7 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-struct clk_lookup m523x_clk_lookup[] = {
+static struct clk_lookup m523x_clk_lookup[] = {
CLKDEV_INIT(NULL, "pll.0", &clk_pll),
CLKDEV_INIT(NULL, "sys.0", &clk_sys),
CLKDEV_INIT("mcfpit.0", NULL, &clk_pll),
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index 87c2cd66a9ce..e984af71df6b 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -65,8 +65,11 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
bfset_mem_set_bit(nr, vaddr))
#endif
-#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
-
+static __always_inline void
+arch___set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ set_bit(nr, addr);
+}
static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
{
@@ -105,8 +108,11 @@ static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
bfclr_mem_clear_bit(nr, vaddr))
#endif
-#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
-
+static __always_inline void
+arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ clear_bit(nr, addr);
+}
static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
{
@@ -145,14 +151,14 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
bfchg_mem_change_bit(nr, vaddr))
#endif
-#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
-
-
-static inline int test_bit(int nr, const volatile unsigned long *vaddr)
+static __always_inline void
+arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
- return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
+ change_bit(nr, addr);
}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
static inline int bset_reg_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
@@ -201,8 +207,11 @@ static inline int bfset_mem_test_and_set_bit(int nr,
bfset_mem_test_and_set_bit(nr, vaddr))
#endif
-#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
-
+static __always_inline bool
+arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ return test_and_set_bit(nr, addr);
+}
static inline int bclr_reg_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
@@ -251,8 +260,11 @@ static inline int bfclr_mem_test_and_clear_bit(int nr,
bfclr_mem_test_and_clear_bit(nr, vaddr))
#endif
-#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
-
+static __always_inline bool
+arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ return test_and_clear_bit(nr, addr);
+}
static inline int bchg_reg_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
@@ -301,8 +313,11 @@ static inline int bfchg_mem_test_and_change_bit(int nr,
bfchg_mem_test_and_change_bit(nr, vaddr))
#endif
-#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
-
+static __always_inline bool
+arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ return test_and_change_bit(nr, addr);
+}
/*
* The true 68020 and more advanced processors support the "bfffo"
@@ -522,6 +537,7 @@ static inline unsigned long __fls(unsigned long x)
#define clear_bit_unlock clear_bit
#define __clear_bit_unlock clear_bit_unlock
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h
index f6c5e0dfb4e5..1c8d9c5bc2fa 100644
--- a/arch/m68k/include/asm/dma.h
+++ b/arch/m68k/include/asm/dma.h
@@ -6,10 +6,4 @@
bootmem allocator (but this should do it for this) */
#define MAX_DMA_ADDRESS PAGE_OFFSET
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* _M68K_DMA_H */
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 94f38d76e278..b619b22823f8 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -86,65 +86,6 @@
| CF_PAGE_READABLE \
| CF_PAGE_DIRTY)
-/*
- * Page protections for initialising protection_map. See mm/mmap.c
- * for use. In general, the bit positions are xwr, and P-items are
- * private, the S-items are shared.
- */
-#define __P000 PAGE_NONE
-#define __P001 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE)
-#define __P010 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_WRITABLE)
-#define __P011 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_WRITABLE)
-#define __P100 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_EXEC)
-#define __P101 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-#define __P110 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_WRITABLE \
- | CF_PAGE_EXEC)
-#define __P111 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_WRITABLE \
- | CF_PAGE_EXEC)
-
-#define __S000 PAGE_NONE
-#define __S001 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE)
-#define __S010 PAGE_SHARED
-#define __S011 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_READABLE)
-#define __S100 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_EXEC)
-#define __S101 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-#define __S110 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_EXEC)
-#define __S111 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-
#define PTE_MASK PAGE_MASK
#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 7c9b56e2a750..7ac3d64c6b33 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -76,35 +76,6 @@ extern unsigned long mm_cachebits;
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits)
-/* Alternate definitions that are compile time constants, for
- initializing protection_map. The cachebits are fixed later. */
-#define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
-#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
-#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
-
-/*
- * The m68k can't do page protection for execute, and considers that the same are read.
- * Also, write permissions imply read permissions. This is the closest we can get..
- */
-#define __P000 PAGE_NONE_C
-#define __P001 PAGE_READONLY_C
-#define __P010 PAGE_COPY_C
-#define __P011 PAGE_COPY_C
-#define __P100 PAGE_READONLY_C
-#define __P101 PAGE_READONLY_C
-#define __P110 PAGE_COPY_C
-#define __P111 PAGE_COPY_C
-
-#define __S000 PAGE_NONE_C
-#define __S001 PAGE_READONLY_C
-#define __S010 PAGE_SHARED_C
-#define __S011 PAGE_SHARED_C
-#define __S100 PAGE_READONLY_C
-#define __S101 PAGE_READONLY_C
-#define __S110 PAGE_SHARED_C
-#define __S111 PAGE_SHARED_C
-
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
/*
diff --git a/arch/m68k/include/asm/pci.h b/arch/m68k/include/asm/pci.h
index 5a4bc223743b..ccdfa0dc8413 100644
--- a/arch/m68k/include/asm/pci.h
+++ b/arch/m68k/include/asm/pci.h
@@ -2,8 +2,6 @@
#ifndef _ASM_M68K_PCI_H
#define _ASM_M68K_PCI_H
-#include <asm-generic/pci.h>
-
#define pcibios_assign_all_busses() 1
#define PCIBIOS_MIN_IO 0x00000100
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index 5e4e753f0d24..90d57e537eb1 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -66,29 +66,6 @@
| SUN3_PAGE_SYSTEM \
| SUN3_PAGE_NOCACHE)
-/*
- * Page protections for initialising protection_map. The sun3 has only two
- * protection settings, valid (implying read and execute) and writeable. These
- * are as close as we can get...
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
-
/* Use these fake page-protections on PMDs. */
#define SUN3_PMD_VALID (0x00000001)
#define SUN3_PMD_MASK (0x0000003F)
diff --git a/arch/m68k/include/asm/virtconvert.h b/arch/m68k/include/asm/virtconvert.h
index ca91b32dc6ef..0a27905b0036 100644
--- a/arch/m68k/include/asm/virtconvert.h
+++ b/arch/m68k/include/asm/virtconvert.h
@@ -33,9 +33,11 @@ static inline void *phys_to_virt(unsigned long address)
/*
* IO bus memory addresses are 1:1 with the physical address,
+ * deprecated globally but still used on two machines.
*/
+#if defined(CONFIG_AMIGA) || defined(CONFIG_VME)
#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
+#endif
#endif
#endif
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 71aa9f6315dc..4d2837eb3e2a 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -141,6 +141,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return 0;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return 0;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 6f1f25125294..70aa0979e027 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -234,3 +234,58 @@ void steal_context(void)
destroy_context(mm);
}
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE),
+ [VM_WRITE] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_WRITABLE),
+ [VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_WRITABLE),
+ [VM_EXEC] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_EXEC),
+ [VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_EXEC),
+ [VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_WRITABLE |
+ CF_PAGE_EXEC),
+ [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_WRITABLE |
+ CF_PAGE_EXEC),
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE),
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_SHARED),
+ [VM_SHARED | VM_EXEC] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_EXEC),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_EXEC),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_SHARED |
+ CF_PAGE_EXEC),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_SHARED |
+ CF_PAGE_EXEC)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index df7f797c908a..2a375637e007 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -383,6 +383,35 @@ static void __init map_node(int node)
}
/*
+ * Alternate definitions that are compile time constants, for
+ * initializing protection_map. The cachebits are fixed later.
+ */
+#define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
+#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
+
+static pgprot_t protection_map[16] __ro_after_init = {
+ [VM_NONE] = PAGE_NONE_C,
+ [VM_READ] = PAGE_READONLY_C,
+ [VM_WRITE] = PAGE_COPY_C,
+ [VM_WRITE | VM_READ] = PAGE_COPY_C,
+ [VM_EXEC] = PAGE_READONLY_C,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_C,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_C,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_C,
+ [VM_SHARED] = PAGE_NONE_C,
+ [VM_SHARED | VM_READ] = PAGE_READONLY_C,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED_C,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_C,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_C,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_C,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_C,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_C
+};
+DECLARE_VM_GET_PAGE_PROT
+
+/*
* paging_init() continues the virtual memory environment setup which
* was begun by the code in arch/head.S.
*/
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index dad494224497..b619d0d4319c 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -95,3 +95,23 @@ void __init paging_init(void)
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 8cf429ad1c84..996132a5ef35 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -38,14 +38,12 @@ config MICROBLAZE
select OF_EARLY_FLATTREE
select PCI_DOMAINS_GENERIC if PCI
select PCI_SYSCALL if PCI
- select VIRT_TO_BUS
select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE
select SPARSE_IRQ
select ZONE_DMA
select TRACE_IRQFLAGS_SUPPORT
select GENERIC_IRQ_MULTI_HANDLER
- select HANDLE_DOMAIN_IRQ
# Endianness selection
choice
diff --git a/arch/microblaze/include/asm/dma.h b/arch/microblaze/include/asm/dma.h
index f801582be912..7484c9eb66c4 100644
--- a/arch/microblaze/include/asm/dma.h
+++ b/arch/microblaze/include/asm/dma.h
@@ -9,10 +9,4 @@
/* Virtual address corresponding to last available physical memory address. */
#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1)
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* _ASM_MICROBLAZE_DMA_H */
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index b6a57f8468f0..c1d78b8977a6 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -30,8 +30,6 @@ extern resource_size_t isa_mem_base;
#define PCI_IOBASE ((void __iomem *)_IO_BASE)
#define IO_SPACE_LIMIT (0xFFFFFFFF)
-#define page_to_bus(page) (page_to_phys(page))
-
extern void iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 0c72646370e1..ba348e997dbb 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -204,23 +204,6 @@ extern pte_t *va_to_pte(unsigned long address);
* We consider execute permission the same as read.
* Also, write permissions imply read permissions.
*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY_X
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY_X
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY_X
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED_X
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED_X
#ifndef __ASSEMBLY__
/*
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index b179f8f6d287..d875a0c01032 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -456,7 +456,7 @@ TRAP_return: /* Make global symbol for debugging */
/* This the initial entry point for a new child thread, with an appropriate
- stack in place that makes it look the the child is in the middle of an
+ stack in place that makes it look like the child is in the middle of a
syscall. This function is actually `returned to' from switch_thread
(copy_thread makes ret_from_fork the return address in each new thread's
saved context). */
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index a9626e6a68af..5c40c3ebe52f 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -222,6 +222,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index f4e503461d24..353fabdfcbc5 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -285,3 +285,23 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
return p;
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY_X,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 1bc4282af064..5d04438ee12e 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -33,7 +33,6 @@ platform-$(CONFIG_SIBYTE_BCM1x55) += sibyte/
platform-$(CONFIG_SIBYTE_BCM1x80) += sibyte/
platform-$(CONFIG_SNI_RM) += sni/
platform-$(CONFIG_MACH_TX49XX) += txx9/
-platform-$(CONFIG_MACH_VR41XX) += vr41xx/
# include the platform specific files
include $(patsubst %/, $(srctree)/arch/mips/%/Platform, $(platform-y))
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 9457894db237..ec21f8999249 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -100,7 +100,6 @@ config MIPS
select RTC_LIB
select SYSCTL_EXCEPTION_TRACE
select TRACE_IRQFLAGS_SUPPORT
- select VIRT_TO_BUS
select ARCH_HAS_ELFCORE_COMPAT
select HAVE_ARCH_KCSAN if 64BIT
@@ -294,6 +293,7 @@ config BMIPS_GENERIC
select HARDIRQS_SW_RESEND
select HAVE_PCI
select PCI_DRIVERS_GENERIC
+ select FW_CFE
help
Build a generic DT-based kernel image that boots on select
BCM33xx cable modem chips, BCM63xx DSL chips, and BCM7xxx set-top
@@ -588,14 +588,6 @@ config MACH_PIC32
Microchip PIC32 is a family of general-purpose 32 bit MIPS core
microcontrollers.
-config MACH_VR41XX
- bool "NEC VR4100 series based machines"
- select CEVT_R4K
- select CSRC_R4K
- select SYS_HAS_CPU_VR41XX
- select SYS_SUPPORTS_MIPS16
- select GPIOLIB
-
config MACH_NINTENDO64
bool "Nintendo 64 console"
select CEVT_R4K
@@ -1012,7 +1004,6 @@ source "arch/mips/ralink/Kconfig"
source "arch/mips/sgi-ip27/Kconfig"
source "arch/mips/sibyte/Kconfig"
source "arch/mips/txx9/Kconfig"
-source "arch/mips/vr41xx/Kconfig"
source "arch/mips/cavium-octeon/Kconfig"
source "arch/mips/loongson2ef/Kconfig"
source "arch/mips/loongson32/Kconfig"
@@ -1579,17 +1570,6 @@ config CPU_R3000
might be a safe bet. If the resulting kernel does not work,
try to recompile with R3000.
-config CPU_VR41XX
- bool "R41xx"
- depends on SYS_HAS_CPU_VR41XX
- select CPU_SUPPORTS_32BIT_KERNEL
- select CPU_SUPPORTS_64BIT_KERNEL
- help
- The options selects support for the NEC VR4100 series of processors.
- Only choose this option if you have one of these processors as a
- kernel built with this option will not run on any other type of
- processor or vice versa.
-
config CPU_R4300
bool "R4300"
depends on SYS_HAS_CPU_R4300
@@ -1905,9 +1885,6 @@ config SYS_HAS_CPU_P5600
config SYS_HAS_CPU_R3000
bool
-config SYS_HAS_CPU_VR41XX
- bool
-
config SYS_HAS_CPU_R4300
bool
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index bb236de13133..4d2a3e73fc45 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -159,7 +159,6 @@ cflags-y += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
#
cflags-$(CONFIG_CPU_R3000) += -march=r3000
cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
-cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
index 73c77814687a..b17a0d199851 100644
--- a/arch/mips/alchemy/devboards/pm.c
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -17,7 +17,7 @@
* Generic suspend userspace interface for Alchemy development boards.
* This code exports a few sysfs nodes under /sys/power/db1x/ which
* can be used by userspace to en/disable all au1x-provided wakeup
- * sources and configure the timeout after which the the TOYMATCH2 irq
+ * sources and configure the timeout after which the TOYMATCH2 irq
* is to trigger a wakeup.
*/
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index 8751d067f98f..34c4dfdf46b4 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/errno.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <asm/addrspace.h>
#include <asm/setup.h>
@@ -18,38 +19,34 @@
static void (*_prom_putchar)(char);
-static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
+static inline void prom_putchar_wait(void __iomem *reg, u32 val)
{
u32 t;
do {
t = __raw_readl(reg);
- if ((t & mask) == val)
+ if ((t & val) == val)
break;
} while (1);
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
static void prom_putchar_ar71xx(char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
- prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
+ prom_putchar_wait(base + UART_LSR * 4, UART_LSR_BOTH_EMPTY);
__raw_writel((unsigned char)ch, base + UART_TX * 4);
- prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
+ prom_putchar_wait(base + UART_LSR * 4, UART_LSR_BOTH_EMPTY);
}
static void prom_putchar_ar933x(char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR933X_UART_BASE));
- prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
- AR933X_UART_DATA_TX_CSR);
+ prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR);
__raw_writel(AR933X_UART_DATA_TX_CSR | (unsigned char)ch,
base + AR933X_UART_DATA_REG);
- prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR,
- AR933X_UART_DATA_TX_CSR);
+ prom_putchar_wait(base + AR933X_UART_DATA_REG, AR933X_UART_DATA_TX_CSR);
}
static void prom_putchar_dummy(char ch)
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
index 87dc76a1f941..8ef002471b9c 100644
--- a/arch/mips/bcm47xx/board.c
+++ b/arch/mips/bcm47xx/board.c
@@ -181,6 +181,7 @@ struct bcm47xx_board_type_list1 bcm47xx_board_list_board_id[] __initconst = {
{{BCM47XX_BOARD_NETGEAR_WNR1000_V3, "Netgear WNR1000 V3"}, "U12H139T50_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNR2000, "Netgear WNR2000"}, "U12H114T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "U12H136T99_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500L_V2, "Netgear WNR3500L V2"}, "U12H172T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNR3500U, "Netgear WNR3500U"}, "U12H136T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNR3500V2, "Netgear WNR3500 V2"}, "U12H127T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNR3500V2VC, "Netgear WNR3500 V2vc"}, "U12H127T70_NETGEAR"},
@@ -195,6 +196,7 @@ struct bcm47xx_board_type_list3 bcm47xx_board_list_board[] __initconst = {
{{BCM47XX_BOARD_PHICOMM_M1, "Phicomm M1"}, "0x0590", "80", "0x1104"},
{{BCM47XX_BOARD_ZTE_H218N, "ZTE H218N"}, "0x053d", "1234", "0x1305"},
{{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "0x04CF", "3500", "02"},
+ {{BCM47XX_BOARD_NETGEAR_WNR3500L_V2, "Netgear WNR3500L V2"}, "0x052b", "3500L", "02"},
{{BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101, "Linksys WRT54G/GS/GL"}, "0x0101", "42", "0x10"},
{{BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467, "Linksys WRT54G/GS/GL"}, "0x0467", "42", "0x10"},
{{BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0708, "Linksys WRT54G/GS/GL"}, "0x0708", "42", "0x10"},
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
index 36f0b1aafaa2..38e4a9cbcf4e 100644
--- a/arch/mips/bcm47xx/buttons.c
+++ b/arch/mips/bcm47xx/buttons.c
@@ -460,6 +460,13 @@ bcm47xx_buttons_netgear_wnr3500lv1[] __initconst = {
};
static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wnr3500lv2[] __initconst = {
+ BCM47XX_GPIO_KEY(4, KEY_RESTART),
+ BCM47XX_GPIO_KEY(6, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(8, KEY_RFKILL),
+};
+
+static const struct gpio_keys_button
bcm47xx_buttons_netgear_wnr834bv2[] __initconst = {
BCM47XX_GPIO_KEY(6, KEY_RESTART),
};
@@ -736,6 +743,9 @@ int __init bcm47xx_buttons_register(void)
case BCM47XX_BOARD_NETGEAR_WNR3500L:
err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr3500lv1);
break;
+ case BCM47XX_BOARD_NETGEAR_WNR3500L_V2:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr3500lv2);
+ break;
case BCM47XX_BOARD_NETGEAR_WNR834BV2:
err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr834bv2);
break;
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
index 4648a302a3c0..8e257d0896d2 100644
--- a/arch/mips/bcm47xx/leds.c
+++ b/arch/mips/bcm47xx/leds.c
@@ -528,6 +528,14 @@ bcm47xx_leds_netgear_wnr3500lv1[] __initconst = {
};
static const struct gpio_led
+bcm47xx_leds_netgear_wnr3500lv2[] __initconst = {
+ BCM47XX_GPIO_LED(0, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(1, "green", "wps", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(3, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "amber", "power", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
bcm47xx_leds_netgear_wnr834bv2[] __initconst = {
BCM47XX_GPIO_LED(2, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
BCM47XX_GPIO_LED(3, "amber", "power", 0, LEDS_GPIO_DEFSTATE_OFF),
@@ -791,6 +799,9 @@ void __init bcm47xx_leds_register(void)
case BCM47XX_BOARD_NETGEAR_WNR3500L:
bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr3500lv1);
break;
+ case BCM47XX_BOARD_NETGEAR_WNR3500L_V2:
+ bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr3500lv2);
+ break;
case BCM47XX_BOARD_NETGEAR_WNR834BV2:
bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr834bv2);
break;
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index 0a63721d0fbf..ab203e66ba0d 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -69,7 +69,7 @@ static __init void prom_init_mem(void)
* call them at the beginning of the boot.
*
* BCM47XX uses 128MB for addressing the ram, if the system contains
- * less that that amount of ram it remaps the ram more often into the
+ * less than that amount of ram it remaps the ram more often into the
* available space.
*/
diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c
index 0ab95dd431b3..745c6228eb2c 100644
--- a/arch/mips/bcm47xx/workarounds.c
+++ b/arch/mips/bcm47xx/workarounds.c
@@ -22,6 +22,7 @@ void __init bcm47xx_workarounds(void)
switch (board) {
case BCM47XX_BOARD_NETGEAR_WNR3500L:
+ case BCM47XX_BOARD_NETGEAR_WNR3500L_V2:
bcm47xx_workarounds_enable_usb_power(12);
break;
case BCM47XX_BOARD_NETGEAR_WNDR3400V2:
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 31bcfa4e08b9..e95b3f78e7cd 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -28,6 +28,7 @@
#include <asm/smp-ops.h>
#include <asm/time.h>
#include <asm/traps.h>
+#include <asm/fw/cfe/cfe_api.h>
#define RELO_NORMAL_VEC BIT(18)
@@ -123,8 +124,19 @@ static const struct bmips_quirk bmips_quirk_list[] = {
{ },
};
+static void __init bmips_init_cfe(void)
+{
+ cfe_seal = fw_arg3;
+
+ if (cfe_seal != CFE_EPTSEAL)
+ return;
+
+ cfe_init(fw_arg0, fw_arg2);
+}
+
void __init prom_init(void)
{
+ bmips_init_cfe();
bmips_cpu_setup();
register_bmips_smp_ops();
}
@@ -165,7 +177,7 @@ void __init plat_mem_setup(void)
dtb = get_fdt();
if (!dtb)
- panic("no dtb found");
+ cfe_die("no dtb found");
__dt_setup_arch(dtb);
diff --git a/arch/mips/boot/dts/img/pistachio_marduk.dts b/arch/mips/boot/dts/img/pistachio_marduk.dts
index a8708783f04b..a8da2f992b1a 100644
--- a/arch/mips/boot/dts/img/pistachio_marduk.dts
+++ b/arch/mips/boot/dts/img/pistachio_marduk.dts
@@ -59,12 +59,12 @@
keys {
compatible = "gpio-keys";
- button@1 {
+ button-1 {
label = "Button 1";
linux,code = <0x101>; /* BTN_1 */
gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
};
- button@2 {
+ button-2 {
label = "Button 2";
linux,code = <0x102>; /* BTN_2 */
gpios = <&gpio2 14 GPIO_ACTIVE_LOW>;
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index ab6e3dc0bc1d..37c46720c719 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -31,7 +31,7 @@
gpio-keys {
compatible = "gpio-keys";
- sw1 {
+ switch {
label = "ci20:sw1";
linux,code = <KEY_F13>;
gpios = <&gpd 17 GPIO_ACTIVE_HIGH>;
diff --git a/arch/mips/boot/dts/ingenic/gcw0.dts b/arch/mips/boot/dts/ingenic/gcw0.dts
index 4abb0318416c..5d33f26fd28c 100644
--- a/arch/mips/boot/dts/ingenic/gcw0.dts
+++ b/arch/mips/boot/dts/ingenic/gcw0.dts
@@ -130,89 +130,86 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
-
autorepeat;
- button@0 {
+ button-0 {
label = "D-pad up";
linux,code = <KEY_UP>;
linux,can-disable;
gpios = <&gpe 21 GPIO_ACTIVE_LOW>;
};
- button@1 {
+ button-1 {
label = "D-pad down";
linux,code = <KEY_DOWN>;
linux,can-disable;
gpios = <&gpe 25 GPIO_ACTIVE_LOW>;
};
- button@2 {
+ button-2 {
label = "D-pad left";
linux,code = <KEY_LEFT>;
linux,can-disable;
gpios = <&gpe 23 GPIO_ACTIVE_LOW>;
};
- button@3 {
+ button-3 {
label = "D-pad right";
linux,code = <KEY_RIGHT>;
linux,can-disable;
gpios = <&gpe 24 GPIO_ACTIVE_LOW>;
};
- button@4 {
+ button-4 {
label = "Button A";
linux,code = <KEY_LEFTCTRL>;
linux,can-disable;
gpios = <&gpe 29 GPIO_ACTIVE_LOW>;
};
- button@5 {
+ button-5 {
label = "Button B";
linux,code = <KEY_LEFTALT>;
linux,can-disable;
gpios = <&gpe 20 GPIO_ACTIVE_LOW>;
};
- button@6 {
+ button-6 {
label = "Button Y";
linux,code = <KEY_SPACE>;
linux,can-disable;
gpios = <&gpe 27 GPIO_ACTIVE_LOW>;
};
- button@7 {
+ button-7 {
label = "Button X";
linux,code = <KEY_LEFTSHIFT>;
linux,can-disable;
gpios = <&gpe 28 GPIO_ACTIVE_LOW>;
};
- button@8 {
+ button-8 {
label = "Left shoulder button";
linux,code = <KEY_TAB>;
linux,can-disable;
gpios = <&gpb 20 GPIO_ACTIVE_LOW>;
};
- button@9 {
+ button-9 {
label = "Right shoulder button";
linux,code = <KEY_BACKSPACE>;
linux,can-disable;
gpios = <&gpe 26 GPIO_ACTIVE_LOW>;
};
- button@10 {
+ button-10 {
label = "Start button";
linux,code = <KEY_ENTER>;
linux,can-disable;
gpios = <&gpb 21 GPIO_ACTIVE_LOW>;
};
- button@11 {
+ button-11 {
label = "Select button";
linux,code = <KEY_ESC>;
linux,can-disable;
@@ -223,7 +220,7 @@
gpios = <&gpd 18 GPIO_ACTIVE_HIGH>;
};
- button@12 {
+ button-12 {
label = "Power slider";
linux,code = <KEY_POWER>;
linux,can-disable;
@@ -231,7 +228,7 @@
wakeup-source;
};
- button@13 {
+ button-13 {
label = "Power hold";
linux,code = <KEY_PAUSE>;
linux,can-disable;
diff --git a/arch/mips/boot/dts/ingenic/rs90.dts b/arch/mips/boot/dts/ingenic/rs90.dts
index 74fee7f01352..e8df70dd42bf 100644
--- a/arch/mips/boot/dts/ingenic/rs90.dts
+++ b/arch/mips/boot/dts/ingenic/rs90.dts
@@ -52,53 +52,51 @@
keys@0 {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- key@0 {
+ key-0 {
label = "D-pad up";
linux,code = <KEY_UP>;
gpios = <&gpc 10 GPIO_ACTIVE_LOW>;
};
- key@1 {
+ key-1 {
label = "D-pad down";
linux,code = <KEY_DOWN>;
gpios = <&gpc 11 GPIO_ACTIVE_LOW>;
};
- key@2 {
+ key-2 {
label = "D-pad left";
linux,code = <KEY_LEFT>;
gpios = <&gpb 31 GPIO_ACTIVE_LOW>;
};
- key@3 {
+ key-3 {
label = "D-pad right";
linux,code = <KEY_RIGHT>;
gpios = <&gpd 21 GPIO_ACTIVE_LOW>;
};
- key@4 {
+ key-4 {
label = "Button A";
linux,code = <KEY_LEFTCTRL>;
gpios = <&gpc 31 GPIO_ACTIVE_LOW>;
};
- key@5 {
+ key-5 {
label = "Button B";
linux,code = <KEY_LEFTALT>;
gpios = <&gpc 30 GPIO_ACTIVE_LOW>;
};
- key@6 {
+ key-6 {
label = "Right shoulder button";
linux,code = <KEY_BACKSPACE>;
gpios = <&gpc 12 GPIO_ACTIVE_LOW>;
debounce-interval = <10>;
};
- key@7 {
+ key-7 {
label = "Start button";
linux,code = <KEY_ENTER>;
gpios = <&gpd 17 GPIO_ACTIVE_LOW>;
diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
index cfc219a72bdd..6bd8a1ad94da 100644
--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
+++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
@@ -136,13 +136,14 @@
<0x1880000 0x10000>,
<0x1040000 0x10000>,
<0x1050000 0x10000>,
- <0x1060000 0x10000>;
+ <0x1060000 0x10000>,
+ <0x1a0 0x1c4>;
reg-names = "sys", "rew", "qs", "ptp", "port0", "port1",
"port2", "port3", "port4", "port5", "port6",
"port7", "port8", "port9", "port10", "qsys",
- "ana", "s0", "s1", "s2";
- interrupts = <18 21 22>;
- interrupt-names = "ptp_rdy", "xtr", "inj";
+ "ana", "s0", "s1", "s2", "fdma";
+ interrupts = <18 21 22 16>;
+ interrupt-names = "ptp_rdy", "xtr", "inj", "fdma";
ethernet-ports {
#address-cells = <1>;
diff --git a/arch/mips/boot/dts/pic32/pic32mzda_sk.dts b/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
index d7fa5d55dbf3..ab70637bbec5 100644
--- a/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
+++ b/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
@@ -52,22 +52,19 @@
pinctrl-0 = <&user_buttons_s0>;
pinctrl-names = "default";
- #address-cells = <1>;
- #size-cells = <0>;
-
- button@sw1 {
+ button-1 {
label = "ESC";
linux,code = <1>;
gpios = <&gpio1 12 0>;
};
- button@sw2 {
+ button-2 {
label = "Home";
linux,code = <102>;
gpios = <&gpio1 13 0>;
};
- button@sw3 {
+ button-3 {
label = "Menu";
linux,code = <139>;
gpios = <&gpio1 14 0>;
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
index 7fccf6357225..f3dff4009ab5 100644
--- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -23,17 +23,15 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- button@0 {
+ button-0 {
label = "reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio 3 GPIO_ACTIVE_LOW>;
debounce-interval = <60>;
};
- button@1 {
+ button-1 {
label = "qss";
linux,code = <KEY_WPS_BUTTON>;
gpios = <&gpio 7 GPIO_ACTIVE_LOW>;
diff --git a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
index 7695d326df11..c857cd22f7db 100644
--- a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
+++ b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
@@ -33,10 +33,8 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- button@0 {
+ button-0 {
label = "reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
diff --git a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
index d38aa73f1a2e..40e4c5da0e65 100644
--- a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
+++ b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
@@ -49,16 +49,14 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- button@0 {
+ button-0 {
label = "jumpstart";
linux,code = <KEY_WPS_BUTTON>;
gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
};
- button@1 {
+ button-1 {
label = "reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
diff --git a/arch/mips/boot/dts/qca/ar9331_omega.dts b/arch/mips/boot/dts/qca/ar9331_omega.dts
index 11778abacf66..ed184d861d5f 100644
--- a/arch/mips/boot/dts/qca/ar9331_omega.dts
+++ b/arch/mips/boot/dts/qca/ar9331_omega.dts
@@ -31,10 +31,8 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- button@0 {
+ button-0 {
label = "reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
diff --git a/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts b/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts
index e6622f8e8c2b..dc65ebd60bbc 100644
--- a/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts
+++ b/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts
@@ -33,10 +33,8 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- button@0 {
+ button-0 {
label = "reset";
linux,code = <KEY_RESTART>;
gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
diff --git a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
index c8290d36cfbe..5f424c2cd781 100644
--- a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
+++ b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
@@ -49,22 +49,20 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- button@0 {
+ button-0 {
label = "wps";
linux,code = <KEY_WPS_BUTTON>;
gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
};
- button@1 {
+ button-1 {
label = "sw1";
linux,code = <BTN_0>;
gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
};
- button@2 {
+ button-2 {
label = "sw2";
linux,code = <BTN_1>;
gpios = <&gpio 20 GPIO_ACTIVE_HIGH>;
diff --git a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
index 826e91b840a3..179558161f85 100644
--- a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
+++ b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts
@@ -26,7 +26,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinmux_gpio_gpio>; /* GPIO11 */
- user_btn1 {
+ button {
label = "USER_BTN1";
gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
linux,code =<KEY_PROG1> ;
diff --git a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
index 37037e4f3c3b..24eebc5a85b1 100644
--- a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
+++ b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
@@ -29,7 +29,7 @@
gpio-keys {
compatible = "gpio-keys";
- reset {
+ key-reset {
label = "reset";
gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_RESTART>;
diff --git a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
index a6201a119a1f..34006e667780 100644
--- a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
+++ b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
@@ -29,7 +29,7 @@
gpio-keys {
compatible = "gpio-keys";
- reset {
+ key-reset {
label = "reset";
gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_RESTART>;
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 4984e462be30..c1899f109e19 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -67,6 +67,18 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
help
Lock the kernel's implementation of memcpy() into L2.
+config CAVIUM_RESERVE32
+ int "Memory to reserve for user processes shared region (MB)"
+ range 0 1536
+ default "0"
+ help
+ Reserve a shared memory region for user processes to use for hardware
+ memory buffers. This is required for 32bit applications to be able to
+ send and receive packets directly. Applications access this memory by
+ memory mapping /dev/mem for the addresses in /proc/octeon_info. For
+ optimal performance with HugeTLBs, keep this size an even number of
+ megabytes.
+
config OCTEON_ILM
tristate "Module to measure interrupt latency using Octeon CIU Timer"
help
diff --git a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
index 20189e9ad94d..bf13e35871b2 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
@@ -57,14 +57,27 @@ EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
{
char *alloc_name = "cvmx_cmd_queues";
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+ extern uint64_t octeon_reserve32_memory;
+#endif
if (likely(__cvmx_cmd_queue_state_ptr))
return CVMX_CMD_QUEUE_SUCCESS;
- __cvmx_cmd_queue_state_ptr =
- cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
- 128,
- alloc_name);
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+ if (octeon_reserve32_memory)
+ __cvmx_cmd_queue_state_ptr =
+ cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
+ octeon_reserve32_memory,
+ octeon_reserve32_memory +
+ (CONFIG_CAVIUM_RESERVE32 <<
+ 20) - 1, 128, alloc_name);
+ else
+#endif
+ __cvmx_cmd_queue_state_ptr =
+ cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
+ 128,
+ alloc_name);
if (__cvmx_cmd_queue_state_ptr)
memset(__cvmx_cmd_queue_state_ptr, 0,
sizeof(*__cvmx_cmd_queue_state_ptr));
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 1daa0c6b6f4e..d09d0769f549 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -282,9 +282,9 @@ union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port)
* support and should return the number of actual ports on the
* board.
*
- * This function must be modifed for every new Octeon board.
+ * This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
+ * data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index a994022e32c9..ce05c0dd3acd 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -86,11 +86,12 @@ static void octeon2_usb_clocks_start(struct device *dev)
"refclk-frequency", &clock_rate);
if (i) {
dev_err(dev, "No UCTL \"refclk-frequency\"\n");
+ of_node_put(uctl_node);
goto exit;
}
i = of_property_read_string(uctl_node,
"refclk-type", &clock_type);
-
+ of_node_put(uctl_node);
if (!i && strcmp("crystal", clock_type) == 0)
is_crystal_clock = true;
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 00bf269763cf..cbd83205518d 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -284,6 +284,11 @@ void octeon_crash_smp_send_stop(void)
#endif /* CONFIG_KEXEC */
+#ifdef CONFIG_CAVIUM_RESERVE32
+uint64_t octeon_reserve32_memory;
+EXPORT_SYMBOL(octeon_reserve32_memory);
+#endif
+
#ifdef CONFIG_KEXEC
/* crashkernel cmdline parameter is parsed _after_ memory setup
* we also parse it here (workaround for EHB5200) */
@@ -661,7 +666,9 @@ void __init prom_init(void)
int i;
u64 t;
int argc;
-
+#ifdef CONFIG_CAVIUM_RESERVE32
+ int64_t addr = -1;
+#endif
/*
* The bootloader passes a pointer to the boot descriptor in
* $a3, this is available as fw_arg3.
@@ -776,6 +783,25 @@ void __init prom_init(void)
cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
cvmx_write_csr(CVMX_LED_EN, 1);
}
+#ifdef CONFIG_CAVIUM_RESERVE32
+ /*
+ * We need to temporarily allocate all memory in the reserve32
+ * region. This makes sure the kernel doesn't allocate this
+ * memory when it is getting memory from the
+ * bootloader. Later, after the memory allocations are
+ * complete, the reserve32 will be freed.
+ *
+ * Allocate memory for RESERVED32 aligned on 2MB boundary. This
+ * is in case we later use hugetlb entries with it.
+ */
+ addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+ 0, 0, 2 << 20,
+ "CAVIUM_RESERVE32", 0);
+ if (addr < 0)
+ pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
+ else
+ octeon_reserve32_memory = addr;
+#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
@@ -1053,6 +1079,16 @@ void __init plat_mem_setup(void)
cvmx_bootmem_unlock();
#endif /* CONFIG_CRASH_DUMP */
+#ifdef CONFIG_CAVIUM_RESERVE32
+ /*
+ * Now that we've allocated the kernel memory it is safe to
+ * free the reserved region. We free it here so that builtin
+ * drivers can use the memory.
+ */
+ if (octeon_reserve32_memory)
+ cvmx_bootmem_free_named("CAVIUM_RESERVE32");
+#endif /* CONFIG_CAVIUM_RESERVE32 */
+
if (total == 0)
panic("Unable to allocate memory from "
"cvmx_bootmem_phy_alloc");
diff --git a/arch/mips/configs/capcella_defconfig b/arch/mips/configs/capcella_defconfig
deleted file mode 100644
index 7bf8971af53b..000000000000
--- a/arch/mips/configs/capcella_defconfig
+++ /dev/null
@@ -1,91 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MACH_VR41XX=y
-CONFIG_ZAO_CAPCELLA=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_TCP_MD5SIG=y
-# CONFIG_IPV6 is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_IP_SCTP=m
-CONFIG_FW_LOADER=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_ATA=y
-CONFIG_PATA_LEGACY=y
-CONFIG_NETDEVICES=y
-CONFIG_8139TOO=y
-CONFIG_PHYLIB=m
-CONFIG_CICADA_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_MARVELL_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_VITESSE_PHY=m
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_VR41XX=y
-CONFIG_SERIAL_VR41XX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_GPIO_VR41XX=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_VR41XX=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_DES=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_HW is not set
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="mem=32M console=ttyVR0,38400"
diff --git a/arch/mips/configs/e55_defconfig b/arch/mips/configs/e55_defconfig
deleted file mode 100644
index fd82b858a8f0..000000000000
--- a/arch/mips/configs/e55_defconfig
+++ /dev/null
@@ -1,37 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MACH_VR41XX=y
-CONFIG_CASIO_E55=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_ATA=y
-CONFIG_PATA_LEGACY=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_VR41XX=y
-CONFIG_SERIAL_VR41XX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_GPIO_VR41XX=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_VR41XX=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyVR0,19200 ide0=0x1f0,0x3f6,40 mem=8M"
diff --git a/arch/mips/configs/mpc30x_defconfig b/arch/mips/configs/mpc30x_defconfig
deleted file mode 100644
index d4e038802510..000000000000
--- a/arch/mips/configs/mpc30x_defconfig
+++ /dev/null
@@ -1,53 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MACH_VR41XX=y
-CONFIG_VICTOR_MPC30X=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_IPV6 is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_CONNECTOR=m
-CONFIG_ATA_OVER_ETH=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_ATA=y
-CONFIG_PATA_LEGACY=y
-CONFIG_NETDEVICES=y
-CONFIG_USB_PEGASUS=m
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_VR41XX=y
-CONFIG_SERIAL_VR41XX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_GPIO_VR41XX=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=m
-CONFIG_USB_OHCI_HCD=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_VR41XX=y
-CONFIG_EXT2_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_CONFIGFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="mem=32M console=ttyVR0,19200 ide0=0x170,0x376,73"
diff --git a/arch/mips/configs/tb0219_defconfig b/arch/mips/configs/tb0219_defconfig
deleted file mode 100644
index c56d8ab14ba6..000000000000
--- a/arch/mips/configs/tb0219_defconfig
+++ /dev/null
@@ -1,76 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MACH_VR41XX=y
-CONFIG_TANBAC_TB0219=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_NET_IPIP=m
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_NETDEVICES=y
-CONFIG_8139TOO=y
-CONFIG_R8169=y
-CONFIG_VIA_RHINE=y
-CONFIG_VIA_RHINE_MMIO=y
-CONFIG_VIA_VELOCITY=y
-CONFIG_CICADA_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_MARVELL_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_VITESSE_PHY=m
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_VR41XX=y
-CONFIG_SERIAL_VR41XX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_GPIO_TB0219=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=m
-CONFIG_USB_MON=m
-CONFIG_USB_EHCI_HCD=m
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-CONFIG_USB_OHCI_HCD=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_VR41XX=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CRAMFS=m
-CONFIG_ROMFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="cca=3 mem=64M console=ttyVR0,115200 ip=any root=/dev/nfs"
diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig
deleted file mode 100644
index 6e1423428f02..000000000000
--- a/arch/mips/configs/tb0226_defconfig
+++ /dev/null
@@ -1,71 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MACH_VR41XX=y
-CONFIG_TANBAC_TB0226=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_E100=y
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_VR41XX=y
-CONFIG_SERIAL_VR41XX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=y
-CONFIG_USB_EHCI_HCD=y
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_ACM=y
-CONFIG_USB_STORAGE=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_VR41XX=y
-CONFIG_EXT2_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CRAMFS=m
-CONFIG_ROMFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="cca=3 mem=32M console=ttyVR0,115200"
diff --git a/arch/mips/configs/tb0287_defconfig b/arch/mips/configs/tb0287_defconfig
deleted file mode 100644
index cf65a0879ece..000000000000
--- a/arch/mips/configs/tb0287_defconfig
+++ /dev/null
@@ -1,84 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MACH_VR41XX=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_NET_IPIP=m
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_BIC=y
-CONFIG_TCP_CONG_CUBIC=m
-# CONFIG_IPV6 is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_SCAN_ASYNC=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_ATA=y
-CONFIG_PATA_SIL680=y
-CONFIG_NETDEVICES=y
-CONFIG_8139TOO=y
-CONFIG_R8169=y
-CONFIG_VIA_RHINE=y
-CONFIG_VIA_RHINE_MMIO=y
-CONFIG_VIA_VELOCITY=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_VR41XX=y
-CONFIG_SERIAL_VR41XX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_GPIO_VR41XX=y
-# CONFIG_HWMON is not set
-CONFIG_MFD_SM501=y
-CONFIG_FB=y
-CONFIG_FB_SM501=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_USB=m
-CONFIG_USB_MON=m
-CONFIG_USB_EHCI_HCD=m
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_STORAGE=m
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_XFS_FS=y
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CRAMFS=m
-CONFIG_ROMFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="cca=3 mem=64M console=ttyVR0,115200 ip=any root=/dev/nfs"
diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig
deleted file mode 100644
index 7e16da0bde8c..000000000000
--- a/arch/mips/configs/workpad_defconfig
+++ /dev/null
@@ -1,67 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MACH_VR41XX=y
-CONFIG_IBM_WORKPAD=y
-CONFIG_PCCARD=y
-CONFIG_PCMCIA_VRC4171=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_BLK_DEV_RAM=m
-# CONFIG_SCSI_PROC_FS is not set
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_ATA=y
-# CONFIG_ATA_VERBOSE_ERROR is not set
-# CONFIG_ATA_FORCE is not set
-# CONFIG_ATA_BMDMA is not set
-CONFIG_NETDEVICES=y
-CONFIG_PCMCIA_3C574=m
-CONFIG_PCMCIA_3C589=m
-CONFIG_PCMCIA_NMCLAN=m
-CONFIG_PCMCIA_FMVJ18X=m
-CONFIG_PCMCIA_AXNET=m
-CONFIG_PCMCIA_PCNET=m
-CONFIG_PCMCIA_SMC91C92=m
-CONFIG_PCMCIA_XIRC2PS=m
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_VR41XX=y
-CONFIG_SERIAL_VR41XX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_GPIO_VR41XX=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_VR41XX=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_NFS_FS=m
-CONFIG_NFSD=m
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyVR0,19200 ide0=0x170,0x376,49 mem=16M"
diff --git a/arch/mips/fw/cfe/cfe_api.c b/arch/mips/fw/cfe/cfe_api.c
index 0c9c97ab291e..dcdfd962dbde 100644
--- a/arch/mips/fw/cfe/cfe_api.c
+++ b/arch/mips/fw/cfe/cfe_api.c
@@ -13,10 +13,15 @@
*
* Authors: Mitch Lichtenberg, Chris Demetriou
*/
-
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <asm/mipsregs.h>
#include <asm/fw/cfe/cfe_api.h>
#include "cfe_api_int.h"
+unsigned long __initdata cfe_seal;
+
/* Cast from a native pointer to a cfe_xptr_t and back. */
#define XPTR_FROM_NATIVE(n) ((cfe_xptr_t) (intptr_t) (n))
#define NATIVE_FROM_XPTR(x) ((void *) (intptr_t) (x))
@@ -412,3 +417,64 @@ int cfe_writeblk(int handle, s64 offset, const char *buffer, int length)
return xiocb.xiocb_status;
return xiocb.plist.xiocb_buffer.buf_retlen;
}
+
+void __init cfe_die(char *fmt, ...)
+{
+ unsigned int prid, __maybe_unused rev;
+ char msg[128];
+ va_list ap;
+ int handle;
+ unsigned int count;
+
+ va_start(ap, fmt);
+ vsprintf(msg, fmt, ap);
+ strcat(msg, "\r\n");
+
+ if (cfe_seal != CFE_EPTSEAL)
+ goto no_cfe;
+
+ prid = read_c0_prid();
+ if ((prid & PRID_COMP_MASK) != PRID_COMP_BROADCOM)
+ goto no_cfe;
+
+ rev = prid & PRID_REV_MASK;
+
+ /* disable XKS01 so that CFE can access the registers */
+ switch (prid & PRID_IMP_MASK) {
+#ifdef CONFIG_CPU_BMIPS4380
+ case PRID_IMP_BMIPS43XX:
+ if (rev >= PRID_REV_BMIPS4380_LO &&
+ rev <= PRID_REV_BMIPS4380_HI)
+ __write_32bit_c0_register($22, 3,
+ __read_32bit_c0_register($22, 3) & ~BIT(12));
+ break;
+#endif
+#ifdef CONFIG_CPU_BMIPS5000
+ case PRID_IMP_BMIPS5000:
+ case PRID_IMP_BMIPS5200:
+ __write_32bit_c0_register($22, 5,
+ __read_32bit_c0_register($22, 5) & ~BIT(8));
+ break;
+#endif
+ default:
+ break;
+ }
+
+ handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE);
+ if (handle < 0)
+ goto no_cfe;
+
+ cfe_write(handle, msg, strlen(msg));
+
+ for (count = 0; count < 0x7fffffff; count++)
+ mb();
+ cfe_exit(0, 1);
+ while (1)
+ ;
+
+no_cfe:
+ /* probably won't print anywhere useful */
+ panic("%s", msg);
+
+ va_end(ap);
+}
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 5582ff0c247e..a4a66bd93748 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -105,17 +105,6 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_R3081E:
#endif
-#ifdef CONFIG_SYS_HAS_CPU_VR41XX
- case CPU_VR41XX:
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- case CPU_VR4181:
- case CPU_VR4181A:
-#endif
-
#ifdef CONFIG_SYS_HAS_CPU_R4300
case CPU_R4300:
case CPU_R4310:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 00a3fc7d778d..ecb9854cb432 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -305,8 +305,7 @@ enum cpu_type_enum {
CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310,
CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650,
CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R10000,
- CPU_R12000, CPU_R14000, CPU_R16000, CPU_VR41XX, CPU_VR4111, CPU_VR4121,
- CPU_VR4122, CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
+ CPU_R12000, CPU_R14000, CPU_R16000, CPU_RM7000,
CPU_SR71000, CPU_TX49XX,
/*
diff --git a/arch/mips/include/asm/dma.h b/arch/mips/include/asm/dma.h
index be726b943530..d6186e6bea7e 100644
--- a/arch/mips/include/asm/dma.h
+++ b/arch/mips/include/asm/dma.h
@@ -307,12 +307,4 @@ static __inline__ int get_dma_residue(unsigned int dmanr)
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
-/* From PCI */
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* _ASM_DMA_H */
diff --git a/arch/mips/include/asm/fw/cfe/cfe_api.h b/arch/mips/include/asm/fw/cfe/cfe_api.h
index 6457f36897a2..25df2f4deb31 100644
--- a/arch/mips/include/asm/fw/cfe/cfe_api.h
+++ b/arch/mips/include/asm/fw/cfe/cfe_api.h
@@ -105,5 +105,7 @@ int cfe_setenv(char *name, char *val);
int cfe_write(int handle, const char *buffer, int length);
int cfe_writeblk(int handle, int64_t offset, const char *buffer,
int length);
+extern unsigned long cfe_seal;
+__printf(1, 2) void cfe_die(char *fmt, ...);
#endif /* CFE_API_H */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 6f5c86d2bab4..e6d5ccaa309e 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -131,7 +131,7 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
*/
static inline void * phys_to_virt(unsigned long address)
{
- return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
+ return __va(address);
}
/*
@@ -148,15 +148,6 @@ static inline void *isa_bus_to_virt(unsigned long address)
}
/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them for x86 for legacy drivers, though.
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 717716cc51c5..5cedb28e8a40 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -84,8 +84,6 @@
#define KVM_MAX_VCPUS 16
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
index 6583639fe760..30f4114ab872 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
@@ -118,6 +118,7 @@ enum bcm47xx_board {
BCM47XX_BOARD_NETGEAR_WNR1000_V3,
BCM47XX_BOARD_NETGEAR_WNR2000,
BCM47XX_BOARD_NETGEAR_WNR3500L,
+ BCM47XX_BOARD_NETGEAR_WNR3500L_V2,
BCM47XX_BOARD_NETGEAR_WNR3500U,
BCM47XX_BOARD_NETGEAR_WNR3500V2,
BCM47XX_BOARD_NETGEAR_WNR3500V2VC,
diff --git a/arch/mips/include/asm/mach-vr41xx/irq.h b/arch/mips/include/asm/mach-vr41xx/irq.h
deleted file mode 100644
index 4281b2b9344d..000000000000
--- a/arch/mips/include/asm/mach-vr41xx/irq.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_MACH_VR41XX_IRQ_H
-#define __ASM_MACH_VR41XX_IRQ_H
-
-#include <asm/vr41xx/irq.h> /* for MIPS_CPU_IRQ_BASE */
-
-#include <asm/mach-generic/irq.h>
-
-#endif /* __ASM_MACH_VR41XX_IRQ_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 305651af15b3..99eeafe6dcab 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -164,18 +164,6 @@
/*
* Values for PageMask register
*/
-#ifdef CONFIG_CPU_VR41XX
-
-/* Why doesn't stupidity hurt ... */
-
-#define PM_1K 0x00000000
-#define PM_4K 0x00001800
-#define PM_16K 0x00007800
-#define PM_64K 0x0001f800
-#define PM_256K 0x0007f800
-
-#else
-
#define PM_4K 0x00000000
#define PM_8K 0x00002000
#define PM_16K 0x00006000
@@ -194,8 +182,6 @@
#define PM_256M 0x1fffe000
#define PM_1G 0x7fffe000
-#endif
-
/*
* Default page size for a given kernel configuration
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index ce52aafe7a8d..cfe9c256a918 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -63,9 +63,9 @@ typedef enum {
* connected to this port. On chips supporting multiple MII
* busses the bus number is encoded in bits <15:8>.
*
- * This function must be modifed for every new Octeon board.
+ * This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
+ * data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
@@ -81,9 +81,9 @@ extern int cvmx_helper_board_get_mii_address(int ipd_port);
* and are handled by the fall through case. This function must be
* updated for boards that don't have the normal Marvell PHYs.
*
- * This function must be modifed for every new Octeon board.
+ * This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
+ * data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
@@ -103,9 +103,9 @@ extern union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port);
* support and should return the number of actual ports on the
* board.
*
- * This function must be modifed for every new Octeon board.
+ * This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
+ * data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 9ffc8192adae..3fd6e22c108b 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -139,10 +139,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
/* Do platform specific device initialization at pci_enable_device() time */
extern int pcibios_plat_dev_init(struct pci_dev *dev);
-/* Chances are this interrupt is wired PC-style ... */
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? 15 : 14;
-}
-
#endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 867e9c3db76e..796035784c73 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -51,7 +51,7 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm);
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_pages((unsigned long)pgd, PGD_ORDER);
+ free_pages((unsigned long)pgd, PGD_TABLE_ORDER);
}
#define __pte_free_tlb(tlb,pte,address) \
@@ -67,12 +67,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
pmd_t *pmd;
struct page *pg;
- pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+ pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_TABLE_ORDER);
if (!pg)
return NULL;
if (!pgtable_pmd_page_ctor(pg)) {
- __free_pages(pg, PMD_ORDER);
+ __free_pages(pg, PMD_TABLE_ORDER);
return NULL;
}
@@ -91,7 +91,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
pud_t *pud;
- pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
+ pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_TABLE_ORDER);
if (pud)
pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
return pud;
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 95df9c293d8d..b40a0e69fccc 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -62,9 +62,9 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
-# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1)
+# define PGDIR_SHIFT (2 * PAGE_SHIFT - PTE_T_LOG2 - 1)
#else
-# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
+# define PGDIR_SHIFT (2 * PAGE_SHIFT - PTE_T_LOG2)
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
@@ -75,21 +75,20 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
* we don't really have any PUD/PMD directory physically.
*/
#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
-# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
+# define __PGD_TABLE_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
#else
-# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
+# define __PGD_TABLE_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
#endif
-#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER aieeee_attempt_to_allocate_pmd
-#define PTE_ORDER 0
+#define PGD_TABLE_ORDER (__PGD_TABLE_ORDER >= 0 ? __PGD_TABLE_ORDER : 0)
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd
#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
-# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2)
+# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t) / 2)
#else
-# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
#endif
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
@@ -185,14 +184,9 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#else
#define MAX_POSSIBLE_PHYSMEM_BITS 32
-#ifdef CONFIG_CPU_VR41XX
-#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
-#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
-#else
#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
-#endif
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
#define pte_page(x) pfn_to_page(pte_pfn(x))
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 41921acdc9d8..436c29d698fa 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -42,24 +42,24 @@
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#ifdef __PAGETABLE_PMD_FOLDED
-#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
+#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#else
/* PMD_SHIFT determines the size of the area a second-level page table can map */
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
# ifdef __PAGETABLE_PUD_FOLDED
-# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3))
# endif
#endif
#ifndef __PAGETABLE_PUD_FOLDED
-#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3))
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_TABLE_ORDER - 3))
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
@@ -85,56 +85,51 @@
*/
#ifdef CONFIG_PAGE_SIZE_4KB
# ifdef CONFIG_MIPS_VA_BITS_48
-# define PGD_ORDER 0
-# define PUD_ORDER 0
+# define PGD_TABLE_ORDER 0
+# define PUD_TABLE_ORDER 0
# else
-# define PGD_ORDER 1
-# define PUD_ORDER aieeee_attempt_to_allocate_pud
+# define PGD_TABLE_ORDER 1
+# define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
# endif
-#define PMD_ORDER 0
-#define PTE_ORDER 0
+#define PMD_TABLE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_8KB
-#define PGD_ORDER 0
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER 0
-#define PTE_ORDER 0
+#define PGD_TABLE_ORDER 0
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_TABLE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_16KB
#ifdef CONFIG_MIPS_VA_BITS_48
-#define PGD_ORDER 1
+#define PGD_TABLE_ORDER 1
#else
-#define PGD_ORDER 0
+#define PGD_TABLE_ORDER 0
#endif
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER 0
-#define PTE_ORDER 0
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_TABLE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_32KB
-#define PGD_ORDER 0
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER 0
-#define PTE_ORDER 0
+#define PGD_TABLE_ORDER 0
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_TABLE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_64KB
-#define PGD_ORDER 0
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
+#define PGD_TABLE_ORDER 0
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
#ifdef CONFIG_MIPS_VA_BITS_48
-#define PMD_ORDER 0
+#define PMD_TABLE_ORDER 0
#else
-#define PMD_ORDER aieeee_attempt_to_allocate_pmd
+#define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd
#endif
-#define PTE_ORDER 0
#endif
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#define PTRS_PER_PGD ((PAGE_SIZE << PGD_TABLE_ORDER) / sizeof(pgd_t))
#ifndef __PAGETABLE_PUD_FOLDED
-#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
+#define PTRS_PER_PUD ((PAGE_SIZE << PUD_TABLE_ORDER) / sizeof(pud_t))
#endif
#ifndef __PAGETABLE_PMD_FOLDED
-#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
+#define PTRS_PER_PMD ((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t))
#endif
-#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
@@ -303,14 +298,9 @@ static inline void pud_clear(pud_t *pudp)
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#ifdef CONFIG_CPU_VR41XX
-#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
-#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
-#else
#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
-#endif
#ifndef __PAGETABLE_PMD_FOLDED
static inline pmd_t *pud_pgtable(pud_t pud)
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 374c6322775d..6caec386ad2f 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -41,28 +41,6 @@ struct vm_area_struct;
* by reasonable means..
*/
-/*
- * Dummy values to fill the table in mmap.c
- * The real values will be generated at runtime
- */
-#define __P000 __pgprot(0)
-#define __P001 __pgprot(0)
-#define __P010 __pgprot(0)
-#define __P011 __pgprot(0)
-#define __P100 __pgprot(0)
-#define __P101 __pgprot(0)
-#define __P110 __pgprot(0)
-#define __P111 __pgprot(0)
-
-#define __S000 __pgprot(0)
-#define __S001 __pgprot(0)
-#define __S010 __pgprot(0)
-#define __S011 __pgprot(0)
-#define __S100 __pgprot(0)
-#define __S101 __pgprot(0)
-#define __S110 __pgprot(0)
-#define __S111 __pgprot(0)
-
extern unsigned long _page_cachable_default;
extern void __update_cache(unsigned long address, pte_t pte);
diff --git a/arch/mips/include/asm/vermagic.h b/arch/mips/include/asm/vermagic.h
index 1c33922eb945..7645f77c8272 100644
--- a/arch/mips/include/asm/vermagic.h
+++ b/arch/mips/include/asm/vermagic.h
@@ -22,8 +22,6 @@
#define MODULE_PROC_FAMILY "MIPS64_R6 "
#elif defined CONFIG_CPU_R3000
#define MODULE_PROC_FAMILY "R3000 "
-#elif defined CONFIG_CPU_VR41XX
-#define MODULE_PROC_FAMILY "VR41XX "
#elif defined CONFIG_CPU_R4300
#define MODULE_PROC_FAMILY "R4300 "
#elif defined CONFIG_CPU_R4X00
diff --git a/arch/mips/include/asm/vr41xx/capcella.h b/arch/mips/include/asm/vr41xx/capcella.h
deleted file mode 100644
index d45a33969951..000000000000
--- a/arch/mips/include/asm/vr41xx/capcella.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * capcella.h, Include file for ZAO Networks Capcella.
- *
- * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#ifndef __ZAO_CAPCELLA_H
-#define __ZAO_CAPCELLA_H
-
-#include <asm/vr41xx/irq.h>
-
-/*
- * General-Purpose I/O Pin Number
- */
-#define PC104PLUS_INTA_PIN 2
-#define PC104PLUS_INTB_PIN 3
-#define PC104PLUS_INTC_PIN 4
-#define PC104PLUS_INTD_PIN 5
-
-/*
- * Interrupt Number
- */
-#define RTL8139_1_IRQ GIU_IRQ(PC104PLUS_INTC_PIN)
-#define RTL8139_2_IRQ GIU_IRQ(PC104PLUS_INTD_PIN)
-#define PC104PLUS_INTA_IRQ GIU_IRQ(PC104PLUS_INTA_PIN)
-#define PC104PLUS_INTB_IRQ GIU_IRQ(PC104PLUS_INTB_PIN)
-#define PC104PLUS_INTC_IRQ GIU_IRQ(PC104PLUS_INTC_PIN)
-#define PC104PLUS_INTD_IRQ GIU_IRQ(PC104PLUS_INTD_PIN)
-
-#endif /* __ZAO_CAPCELLA_H */
diff --git a/arch/mips/include/asm/vr41xx/giu.h b/arch/mips/include/asm/vr41xx/giu.h
deleted file mode 100644
index 0211fa89897a..000000000000
--- a/arch/mips/include/asm/vr41xx/giu.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Include file for NEC VR4100 series General-purpose I/O Unit.
- *
- * Copyright (C) 2005-2009 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#ifndef __NEC_VR41XX_GIU_H
-#define __NEC_VR41XX_GIU_H
-
-/*
- * NEC VR4100 series GIU platform device IDs.
- */
-enum {
- GPIO_50PINS_PULLUPDOWN,
- GPIO_36PINS,
- GPIO_48PINS_EDGE_SELECT,
-};
-
-typedef enum {
- IRQ_TRIGGER_LEVEL,
- IRQ_TRIGGER_EDGE,
- IRQ_TRIGGER_EDGE_FALLING,
- IRQ_TRIGGER_EDGE_RISING,
-} irq_trigger_t;
-
-typedef enum {
- IRQ_SIGNAL_THROUGH,
- IRQ_SIGNAL_HOLD,
-} irq_signal_t;
-
-extern void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
- irq_signal_t signal);
-
-typedef enum {
- IRQ_LEVEL_LOW,
- IRQ_LEVEL_HIGH,
-} irq_level_t;
-
-extern void vr41xx_set_irq_level(unsigned int pin, irq_level_t level);
-
-#endif /* __NEC_VR41XX_GIU_H */
diff --git a/arch/mips/include/asm/vr41xx/irq.h b/arch/mips/include/asm/vr41xx/irq.h
deleted file mode 100644
index 2f3d552f9566..000000000000
--- a/arch/mips/include/asm/vr41xx/irq.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * include/asm-mips/vr41xx/irq.h
- *
- * Interrupt numbers for NEC VR4100 series.
- *
- * Copyright (C) 1999 Michael Klar
- * Copyright (C) 2001, 2002 Paul Mundt
- * Copyright (C) 2002 MontaVista Software, Inc.
- * Copyright (C) 2002 TimeSys Corp.
- * Copyright (C) 2003-2006 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#ifndef __NEC_VR41XX_IRQ_H
-#define __NEC_VR41XX_IRQ_H
-
-/*
- * CPU core Interrupt Numbers
- */
-#define MIPS_CPU_IRQ_BASE 0
-#define MIPS_CPU_IRQ(x) (MIPS_CPU_IRQ_BASE + (x))
-#define MIPS_SOFTINT0_IRQ MIPS_CPU_IRQ(0)
-#define MIPS_SOFTINT1_IRQ MIPS_CPU_IRQ(1)
-#define INT0_IRQ MIPS_CPU_IRQ(2)
-#define INT1_IRQ MIPS_CPU_IRQ(3)
-#define INT2_IRQ MIPS_CPU_IRQ(4)
-#define INT3_IRQ MIPS_CPU_IRQ(5)
-#define INT4_IRQ MIPS_CPU_IRQ(6)
-#define TIMER_IRQ MIPS_CPU_IRQ(7)
-
-/*
- * SYINT1 Interrupt Numbers
- */
-#define SYSINT1_IRQ_BASE 8
-#define SYSINT1_IRQ(x) (SYSINT1_IRQ_BASE + (x))
-#define BATTRY_IRQ SYSINT1_IRQ(0)
-#define POWER_IRQ SYSINT1_IRQ(1)
-#define RTCLONG1_IRQ SYSINT1_IRQ(2)
-#define ELAPSEDTIME_IRQ SYSINT1_IRQ(3)
-/* RFU */
-#define PIU_IRQ SYSINT1_IRQ(5)
-#define AIU_IRQ SYSINT1_IRQ(6)
-#define KIU_IRQ SYSINT1_IRQ(7)
-#define GIUINT_IRQ SYSINT1_IRQ(8)
-#define SIU_IRQ SYSINT1_IRQ(9)
-#define BUSERR_IRQ SYSINT1_IRQ(10)
-#define SOFTINT_IRQ SYSINT1_IRQ(11)
-#define CLKRUN_IRQ SYSINT1_IRQ(12)
-#define DOZEPIU_IRQ SYSINT1_IRQ(13)
-#define SYSINT1_IRQ_LAST DOZEPIU_IRQ
-
-/*
- * SYSINT2 Interrupt Numbers
- */
-#define SYSINT2_IRQ_BASE 24
-#define SYSINT2_IRQ(x) (SYSINT2_IRQ_BASE + (x))
-#define RTCLONG2_IRQ SYSINT2_IRQ(0)
-#define LED_IRQ SYSINT2_IRQ(1)
-#define HSP_IRQ SYSINT2_IRQ(2)
-#define TCLOCK_IRQ SYSINT2_IRQ(3)
-#define FIR_IRQ SYSINT2_IRQ(4)
-#define CEU_IRQ SYSINT2_IRQ(4) /* same number as FIR_IRQ */
-#define DSIU_IRQ SYSINT2_IRQ(5)
-#define PCI_IRQ SYSINT2_IRQ(6)
-#define SCU_IRQ SYSINT2_IRQ(7)
-#define CSI_IRQ SYSINT2_IRQ(8)
-#define BCU_IRQ SYSINT2_IRQ(9)
-#define ETHERNET_IRQ SYSINT2_IRQ(10)
-#define SYSINT2_IRQ_LAST ETHERNET_IRQ
-
-/*
- * GIU Interrupt Numbers
- */
-#define GIU_IRQ_BASE 40
-#define GIU_IRQ(x) (GIU_IRQ_BASE + (x)) /* IRQ 40-71 */
-#define GIU_IRQ_LAST GIU_IRQ(31)
-
-/*
- * VRC4173 Interrupt Numbers
- */
-#define VRC4173_IRQ_BASE 72
-#define VRC4173_IRQ(x) (VRC4173_IRQ_BASE + (x))
-#define VRC4173_USB_IRQ VRC4173_IRQ(0)
-#define VRC4173_PCMCIA2_IRQ VRC4173_IRQ(1)
-#define VRC4173_PCMCIA1_IRQ VRC4173_IRQ(2)
-#define VRC4173_PS2CH2_IRQ VRC4173_IRQ(3)
-#define VRC4173_PS2CH1_IRQ VRC4173_IRQ(4)
-#define VRC4173_PIU_IRQ VRC4173_IRQ(5)
-#define VRC4173_AIU_IRQ VRC4173_IRQ(6)
-#define VRC4173_KIU_IRQ VRC4173_IRQ(7)
-#define VRC4173_GIU_IRQ VRC4173_IRQ(8)
-#define VRC4173_AC97_IRQ VRC4173_IRQ(9)
-#define VRC4173_AC97INT1_IRQ VRC4173_IRQ(10)
-/* RFU */
-#define VRC4173_DOZEPIU_IRQ VRC4173_IRQ(13)
-#define VRC4173_IRQ_LAST VRC4173_DOZEPIU_IRQ
-
-#endif /* __NEC_VR41XX_IRQ_H */
diff --git a/arch/mips/include/asm/vr41xx/mpc30x.h b/arch/mips/include/asm/vr41xx/mpc30x.h
deleted file mode 100644
index 9f977e18d72f..000000000000
--- a/arch/mips/include/asm/vr41xx/mpc30x.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mpc30x.h, Include file for Victor MP-C303/304.
- *
- * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#ifndef __VICTOR_MPC30X_H
-#define __VICTOR_MPC30X_H
-
-#include <asm/vr41xx/irq.h>
-
-/*
- * General-Purpose I/O Pin Number
- */
-#define VRC4173_PIN 1
-#define MQ200_PIN 4
-
-/*
- * Interrupt Number
- */
-#define VRC4173_CASCADE_IRQ GIU_IRQ(VRC4173_PIN)
-#define MQ200_IRQ GIU_IRQ(MQ200_PIN)
-
-#endif /* __VICTOR_MPC30X_H */
diff --git a/arch/mips/include/asm/vr41xx/pci.h b/arch/mips/include/asm/vr41xx/pci.h
deleted file mode 100644
index ad93b5e89017..000000000000
--- a/arch/mips/include/asm/vr41xx/pci.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Include file for NEC VR4100 series PCI Control Unit.
- *
- * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#ifndef __NEC_VR41XX_PCI_H
-#define __NEC_VR41XX_PCI_H
-
-#define PCI_MASTER_ADDRESS_MASK 0x7fffffffU
-
-struct pci_master_address_conversion {
- uint32_t bus_base_address;
- uint32_t address_mask;
- uint32_t pci_base_address;
-};
-
-struct pci_target_address_conversion {
- uint32_t address_mask;
- uint32_t bus_base_address;
-};
-
-typedef enum {
- CANNOT_LOCK_FROM_DEVICE,
- CAN_LOCK_FROM_DEVICE,
-} pci_exclusive_access_t;
-
-struct pci_mailbox_address {
- uint32_t base_address;
-};
-
-struct pci_target_address_window {
- uint32_t base_address;
-};
-
-typedef enum {
- PCI_ARBITRATION_MODE_FAIR,
- PCI_ARBITRATION_MODE_ALTERNATE_0,
- PCI_ARBITRATION_MODE_ALTERNATE_B,
-} pci_arbiter_priority_control_t;
-
-typedef enum {
- PCI_TAKE_AWAY_GNT_DISABLE,
- PCI_TAKE_AWAY_GNT_ENABLE,
-} pci_take_away_gnt_mode_t;
-
-struct pci_controller_unit_setup {
- struct pci_master_address_conversion *master_memory1;
- struct pci_master_address_conversion *master_memory2;
-
- struct pci_target_address_conversion *target_memory1;
- struct pci_target_address_conversion *target_memory2;
-
- struct pci_master_address_conversion *master_io;
-
- pci_exclusive_access_t exclusive_access;
-
- uint32_t pci_clock_max;
- uint8_t wait_time_limit_from_irdy_to_trdy; /* Only VR4122 is supported */
-
- struct pci_mailbox_address *mailbox;
- struct pci_target_address_window *target_window1;
- struct pci_target_address_window *target_window2;
-
- uint8_t master_latency_timer;
- uint8_t retry_limit;
-
- pci_arbiter_priority_control_t arbiter_priority_control;
- pci_take_away_gnt_mode_t take_away_gnt_mode;
-
- struct resource *mem_resource;
- struct resource *io_resource;
-};
-
-extern void vr41xx_pciu_setup(struct pci_controller_unit_setup *setup);
-
-#endif /* __NEC_VR41XX_PCI_H */
diff --git a/arch/mips/include/asm/vr41xx/siu.h b/arch/mips/include/asm/vr41xx/siu.h
deleted file mode 100644
index e920cd2cf8b2..000000000000
--- a/arch/mips/include/asm/vr41xx/siu.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Include file for NEC VR4100 series Serial Interface Unit.
- *
- * Copyright (C) 2005-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#ifndef __NEC_VR41XX_SIU_H
-#define __NEC_VR41XX_SIU_H
-
-#define SIU_PORTS_MAX 2
-
-typedef enum {
- SIU_INTERFACE_RS232C,
- SIU_INTERFACE_IRDA,
-} siu_interface_t;
-
-extern void vr41xx_select_siu_interface(siu_interface_t interface);
-
-typedef enum {
- SIU_USE_IRDA,
- FIR_USE_IRDA,
-} irda_use_t;
-
-extern void vr41xx_use_irda(irda_use_t use);
-
-typedef enum {
- SHARP_IRDA,
- TEMIC_IRDA,
- HP_IRDA,
-} irda_module_t;
-
-typedef enum {
- IRDA_TX_1_5MBPS,
- IRDA_TX_4MBPS,
-} irda_speed_t;
-
-extern void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed);
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
-extern void vr41xx_siu_early_setup(struct uart_port *port);
-#else
-static inline void vr41xx_siu_early_setup(struct uart_port *port) {}
-#endif
-
-#endif /* __NEC_VR41XX_SIU_H */
diff --git a/arch/mips/include/asm/vr41xx/tb0219.h b/arch/mips/include/asm/vr41xx/tb0219.h
deleted file mode 100644
index 01e96d6c2dbd..000000000000
--- a/arch/mips/include/asm/vr41xx/tb0219.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * tb0219.h, Include file for TANBAC TB0219.
- *
- * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
- *
- * Modified for TANBAC TB0219:
- * Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
- */
-#ifndef __TANBAC_TB0219_H
-#define __TANBAC_TB0219_H
-
-#include <asm/vr41xx/irq.h>
-
-/*
- * General-Purpose I/O Pin Number
- */
-#define TB0219_PCI_SLOT1_PIN 2
-#define TB0219_PCI_SLOT2_PIN 3
-#define TB0219_PCI_SLOT3_PIN 4
-
-/*
- * Interrupt Number
- */
-#define TB0219_PCI_SLOT1_IRQ GIU_IRQ(TB0219_PCI_SLOT1_PIN)
-#define TB0219_PCI_SLOT2_IRQ GIU_IRQ(TB0219_PCI_SLOT2_PIN)
-#define TB0219_PCI_SLOT3_IRQ GIU_IRQ(TB0219_PCI_SLOT3_PIN)
-
-#endif /* __TANBAC_TB0219_H */
diff --git a/arch/mips/include/asm/vr41xx/tb0226.h b/arch/mips/include/asm/vr41xx/tb0226.h
deleted file mode 100644
index 64993d14916d..000000000000
--- a/arch/mips/include/asm/vr41xx/tb0226.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * tb0226.h, Include file for TANBAC TB0226.
- *
- * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#ifndef __TANBAC_TB0226_H
-#define __TANBAC_TB0226_H
-
-#include <asm/vr41xx/irq.h>
-
-/*
- * General-Purpose I/O Pin Number
- */
-#define GD82559_1_PIN 2
-#define GD82559_2_PIN 3
-#define UPD720100_INTA_PIN 4
-#define UPD720100_INTB_PIN 8
-#define UPD720100_INTC_PIN 13
-
-/*
- * Interrupt Number
- */
-#define GD82559_1_IRQ GIU_IRQ(GD82559_1_PIN)
-#define GD82559_2_IRQ GIU_IRQ(GD82559_2_PIN)
-#define UPD720100_INTA_IRQ GIU_IRQ(UPD720100_INTA_PIN)
-#define UPD720100_INTB_IRQ GIU_IRQ(UPD720100_INTB_PIN)
-#define UPD720100_INTC_IRQ GIU_IRQ(UPD720100_INTC_PIN)
-
-#endif /* __TANBAC_TB0226_H */
diff --git a/arch/mips/include/asm/vr41xx/tb0287.h b/arch/mips/include/asm/vr41xx/tb0287.h
deleted file mode 100644
index 3ddc913860d5..000000000000
--- a/arch/mips/include/asm/vr41xx/tb0287.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * tb0287.h, Include file for TANBAC TB0287 mini-ITX board.
- *
- * Copyright (C) 2005 Media Lab Inc. <ito@mlb.co.jp>
- *
- * This code is largely based on tb0219.h.
- */
-#ifndef __TANBAC_TB0287_H
-#define __TANBAC_TB0287_H
-
-#include <asm/vr41xx/irq.h>
-
-/*
- * General-Purpose I/O Pin Number
- */
-#define TB0287_PCI_SLOT_PIN 2
-#define TB0287_SM501_PIN 3
-#define TB0287_SIL680A_PIN 8
-#define TB0287_RTL8110_PIN 13
-
-/*
- * Interrupt Number
- */
-#define TB0287_PCI_SLOT_IRQ GIU_IRQ(TB0287_PCI_SLOT_PIN)
-#define TB0287_SM501_IRQ GIU_IRQ(TB0287_SM501_PIN)
-#define TB0287_SIL680A_IRQ GIU_IRQ(TB0287_SIL680A_PIN)
-#define TB0287_RTL8110_IRQ GIU_IRQ(TB0287_RTL8110_PIN)
-
-#endif /* __TANBAC_TB0287_H */
diff --git a/arch/mips/include/asm/vr41xx/vr41xx.h b/arch/mips/include/asm/vr41xx/vr41xx.h
deleted file mode 100644
index 9a4b36b756e2..000000000000
--- a/arch/mips/include/asm/vr41xx/vr41xx.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * include/asm-mips/vr41xx/vr41xx.h
- *
- * Include file for NEC VR4100 series.
- *
- * Copyright (C) 1999 Michael Klar
- * Copyright (C) 2001, 2002 Paul Mundt
- * Copyright (C) 2002 MontaVista Software, Inc.
- * Copyright (C) 2002 TimeSys Corp.
- * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#ifndef __NEC_VR41XX_H
-#define __NEC_VR41XX_H
-
-#include <linux/interrupt.h>
-
-/*
- * CPU Revision
- */
-/* VR4122 0x00000c70-0x00000c72 */
-#define PRID_VR4122_REV1_0 0x00000c70
-#define PRID_VR4122_REV2_0 0x00000c70
-#define PRID_VR4122_REV2_1 0x00000c70
-#define PRID_VR4122_REV3_0 0x00000c71
-#define PRID_VR4122_REV3_1 0x00000c72
-
-/* VR4181A 0x00000c73-0x00000c7f */
-#define PRID_VR4181A_REV1_0 0x00000c73
-#define PRID_VR4181A_REV1_1 0x00000c74
-
-/* VR4131 0x00000c80-0x00000c83 */
-#define PRID_VR4131_REV1_2 0x00000c80
-#define PRID_VR4131_REV2_0 0x00000c81
-#define PRID_VR4131_REV2_1 0x00000c82
-#define PRID_VR4131_REV2_2 0x00000c83
-
-/* VR4133 0x00000c84- */
-#define PRID_VR4133 0x00000c84
-
-/*
- * Bus Control Uint
- */
-extern unsigned long vr41xx_calculate_clock_frequency(void);
-extern unsigned long vr41xx_get_vtclock_frequency(void);
-extern unsigned long vr41xx_get_tclock_frequency(void);
-
-/*
- * Clock Mask Unit
- */
-typedef enum {
- PIU_CLOCK,
- SIU_CLOCK,
- AIU_CLOCK,
- KIU_CLOCK,
- FIR_CLOCK,
- DSIU_CLOCK,
- CSI_CLOCK,
- PCIU_CLOCK,
- HSP_CLOCK,
- PCI_CLOCK,
- CEU_CLOCK,
- ETHER0_CLOCK,
- ETHER1_CLOCK
-} vr41xx_clock_t;
-
-extern void vr41xx_supply_clock(vr41xx_clock_t clock);
-extern void vr41xx_mask_clock(vr41xx_clock_t clock);
-
-/*
- * Interrupt Control Unit
- */
-extern int vr41xx_set_intassign(unsigned int irq, unsigned char intassign);
-extern int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int));
-
-#define PIUINT_COMMAND 0x0040
-#define PIUINT_DATA 0x0020
-#define PIUINT_PAGE1 0x0010
-#define PIUINT_PAGE0 0x0008
-#define PIUINT_DATALOST 0x0004
-#define PIUINT_STATUSCHANGE 0x0001
-
-extern void vr41xx_enable_piuint(uint16_t mask);
-extern void vr41xx_disable_piuint(uint16_t mask);
-
-#define AIUINT_INPUT_DMAEND 0x0800
-#define AIUINT_INPUT_DMAHALT 0x0400
-#define AIUINT_INPUT_DATALOST 0x0200
-#define AIUINT_INPUT_DATA 0x0100
-#define AIUINT_OUTPUT_DMAEND 0x0008
-#define AIUINT_OUTPUT_DMAHALT 0x0004
-#define AIUINT_OUTPUT_NODATA 0x0002
-
-extern void vr41xx_enable_aiuint(uint16_t mask);
-extern void vr41xx_disable_aiuint(uint16_t mask);
-
-#define KIUINT_DATALOST 0x0004
-#define KIUINT_DATAREADY 0x0002
-#define KIUINT_SCAN 0x0001
-
-extern void vr41xx_enable_kiuint(uint16_t mask);
-extern void vr41xx_disable_kiuint(uint16_t mask);
-
-#define DSIUINT_CTS 0x0800
-#define DSIUINT_RXERR 0x0400
-#define DSIUINT_RX 0x0200
-#define DSIUINT_TX 0x0100
-#define DSIUINT_ALL 0x0f00
-
-extern void vr41xx_enable_dsiuint(uint16_t mask);
-extern void vr41xx_disable_dsiuint(uint16_t mask);
-
-#define FIRINT_UNIT 0x0010
-#define FIRINT_RX_DMAEND 0x0008
-#define FIRINT_RX_DMAHALT 0x0004
-#define FIRINT_TX_DMAEND 0x0002
-#define FIRINT_TX_DMAHALT 0x0001
-
-extern void vr41xx_enable_firint(uint16_t mask);
-extern void vr41xx_disable_firint(uint16_t mask);
-
-extern void vr41xx_enable_pciint(void);
-extern void vr41xx_disable_pciint(void);
-
-extern void vr41xx_enable_scuint(void);
-extern void vr41xx_disable_scuint(void);
-
-#define CSIINT_TX_DMAEND 0x0040
-#define CSIINT_TX_DMAHALT 0x0020
-#define CSIINT_TX_DATA 0x0010
-#define CSIINT_TX_FIFOEMPTY 0x0008
-#define CSIINT_RX_DMAEND 0x0004
-#define CSIINT_RX_DMAHALT 0x0002
-#define CSIINT_RX_FIFOEMPTY 0x0001
-
-extern void vr41xx_enable_csiint(uint16_t mask);
-extern void vr41xx_disable_csiint(uint16_t mask);
-
-extern void vr41xx_enable_bcuint(void);
-extern void vr41xx_disable_bcuint(void);
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
-extern void vr41xx_siu_setup(void);
-#else
-static inline void vr41xx_siu_setup(void) {}
-#endif
-
-#endif /* __NEC_VR41XX_H */
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 04ca75278f02..c4501897b870 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -196,11 +196,6 @@ void output_mm_defines(void)
#endif
DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
BLANK();
- DEFINE(_PGD_ORDER, PGD_ORDER);
-#ifndef __PAGETABLE_PMD_FOLDED
- DEFINE(_PMD_ORDER, PMD_ORDER);
-#endif
- DEFINE(_PTE_ORDER, PTE_ORDER);
BLANK();
DEFINE(_PMD_SHIFT, PMD_SHIFT);
DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index d510f628ee03..7ddf07f255f3 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1115,46 +1115,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
- case PRID_IMP_VR41XX:
- set_isa(c, MIPS_CPU_ISA_III);
- c->fpu_msk31 |= FPU_CSR_CONDX;
- c->options = R4K_OPTS;
- c->tlbsize = 32;
- switch (c->processor_id & 0xf0) {
- case PRID_REV_VR4111:
- c->cputype = CPU_VR4111;
- __cpu_name[cpu] = "NEC VR4111";
- break;
- case PRID_REV_VR4121:
- c->cputype = CPU_VR4121;
- __cpu_name[cpu] = "NEC VR4121";
- break;
- case PRID_REV_VR4122:
- if ((c->processor_id & 0xf) < 0x3) {
- c->cputype = CPU_VR4122;
- __cpu_name[cpu] = "NEC VR4122";
- } else {
- c->cputype = CPU_VR4181A;
- __cpu_name[cpu] = "NEC VR4181A";
- }
- break;
- case PRID_REV_VR4130:
- if ((c->processor_id & 0xf) < 0x4) {
- c->cputype = CPU_VR4131;
- __cpu_name[cpu] = "NEC VR4131";
- } else {
- c->cputype = CPU_VR4133;
- c->options |= MIPS_CPU_LLSC;
- __cpu_name[cpu] = "NEC VR4133";
- }
- break;
- default:
- printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
- c->cputype = CPU_VR41XX;
- __cpu_name[cpu] = "NEC Vr41xx";
- break;
- }
- break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
__cpu_name[cpu] = "R4300";
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index d5f7362e8c24..dc023a979803 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -230,7 +230,7 @@ void mips_mt_set_cpuoptions(void)
struct class *mt_class;
-static int __init mt_init(void)
+static int __init mips_mt_init(void)
{
struct class *mtc;
@@ -243,4 +243,4 @@ static int __init mt_init(void)
return 0;
}
-subsys_initcall(mt_init);
+subsys_initcall(mips_mt_init);
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index bb43bf850314..8eba5a1ed664 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -311,7 +311,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
{
unsigned long i = *pos;
- return i < NR_CPUS ? (void *) (i + 1) : NULL;
+ return i < nr_cpu_ids ? (void *) (i + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 3d0cf471f2fe..b2cc2c2dd4bf 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -159,7 +159,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
/* Map GIC user page. */
if (gic_size) {
gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS;
- gic_pfn = virt_to_phys((void *)gic_base) >> PAGE_SHIFT;
+ gic_pfn = PFN_DOWN(__pa(gic_base));
ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
pgprot_noncached(vma->vm_page_prot));
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 1bfd1b501d82..74cd64a24d05 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -80,7 +80,7 @@ pgd_t *kvm_pgd_alloc(void)
{
pgd_t *ret;
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
if (ret)
kvm_pgd_init(ret);
@@ -615,17 +615,17 @@ retry:
* Used to check for invalidations in progress, of the pfn that is
* returned by pfn_to_pfn_prot below.
*/
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
/*
- * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
- * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
+ * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
+ * in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
* risk the page we get a reference to getting unmapped before we have a
- * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
+ * chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
*
* This smp_rmb() pairs with the effective smp_wmb() of the combination
* of the pte_unmap_unlock() after the PTE is zapped, and the
* spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
- * mmu_notifier_seq is incremented.
+ * mmu_invalidate_seq is incremented.
*/
smp_rmb();
@@ -638,7 +638,7 @@ retry:
spin_lock(&kvm->mmu_lock);
/* Check if an invalidation has taken place since we got pfn */
- if (mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_invalidate_retry(kvm, mmu_seq)) {
/*
* This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 4256423632c4..8ce97b8741fd 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -59,14 +59,6 @@ static inline const char *msk2str(unsigned int mask)
case PM_8M: return "8Mb";
case PM_32M: return "32Mb";
#endif
-#ifndef CONFIG_CPU_VR41XX
- case PM_1M: return "1Mb";
- case PM_4M: return "4Mb";
- case PM_16M: return "16Mb";
- case PM_64M: return "64Mb";
- case PM_256M: return "256Mb";
- case PM_1G: return "1Gb";
-#endif
}
return "";
}
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index 69a533148efd..8f61e93c0c5b 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -196,7 +196,6 @@ void __init prom_init_numa_memory(void)
pr_info("CP0_PageGrain: CP0 5.1 (0x%x)\n", read_c0_pagegrain());
prom_meminit();
}
-EXPORT_SYMBOL(prom_init_numa_memory);
pg_data_t * __init arch_alloc_nodedata(int nid)
{
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index e2d46cb93ca9..e02bd20b60a6 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -82,11 +82,8 @@ retry:
/* Ensure we have an allocation bitmap */
if (!mm_ctx->bd_emupage_allocmap) {
- mm_ctx->bd_emupage_allocmap =
- kcalloc(BITS_TO_LONGS(emupage_frame_count),
- sizeof(unsigned long),
- GFP_ATOMIC);
-
+ mm_ctx->bd_emupage_allocmap = bitmap_zalloc(emupage_frame_count,
+ GFP_ATOMIC);
if (!mm_ctx->bd_emupage_allocmap) {
idx = BD_EMUFRAME_NONE;
goto out_unlock;
@@ -206,7 +203,7 @@ void dsemul_mm_cleanup(struct mm_struct *mm)
{
mm_context_t *mm_ctx = &mm->context;
- kfree(mm_ctx->bd_emupage_allocmap);
+ bitmap_free(mm_ctx->bd_emupage_allocmap);
}
int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index ccb9e47322b0..a549fa98c2f4 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1194,50 +1194,6 @@ static void probe_pcache(void)
c->options |= MIPS_CPU_PREFETCH;
break;
- case CPU_VR4133:
- write_c0_config(config & ~VR41_CONF_P4K);
- fallthrough;
- case CPU_VR4131:
- /* Workaround for cache instruction bug of VR4131 */
- if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
- c->processor_id == 0x0c82U) {
- config |= 0x00400000U;
- if (c->processor_id == 0x0c80U)
- config |= VR41_CONF_BP;
- write_c0_config(config);
- } else
- c->options |= MIPS_CPU_CACHE_CDEX_P;
-
- icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 2;
- c->icache.waybit = __ffs(icache_size/2);
-
- dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 2;
- c->dcache.waybit = __ffs(dcache_size/2);
- break;
-
- case CPU_VR41XX:
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4181:
- case CPU_VR4181A:
- icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
-
- dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 1;
- c->dcache.waybit = 0; /* does not matter */
-
- c->options |= MIPS_CPU_CACHE_CDEX_P;
- break;
-
case CPU_RM7000:
rm7k_erratum31();
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 7be7240f7703..11b3e7ddafd5 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -159,6 +159,9 @@ EXPORT_SYMBOL(_page_cachable_default);
#define PM(p) __pgprot(_page_cachable_default | (p))
+static pgprot_t protection_map[16] __ro_after_init;
+DECLARE_VM_GET_PAGE_PROT
+
static inline void setup_protection_map(void)
{
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
diff --git a/arch/mips/mm/context.c b/arch/mips/mm/context.c
index b25564090939..966f40066f03 100644
--- a/arch/mips/mm/context.c
+++ b/arch/mips/mm/context.c
@@ -67,7 +67,7 @@ static void flush_context(void)
int cpu;
/* Update the list of reserved MMIDs and the MMID bitmap */
- bitmap_clear(mmid_map, 0, num_mmids);
+ bitmap_zero(mmid_map, num_mmids);
/* Reserve an MMID for kmap/wired entries */
__set_bit(MMID_KERNEL_WIRED, mmid_map);
@@ -277,8 +277,7 @@ static int mmid_init(void)
WARN_ON(num_mmids <= num_possible_cpus());
atomic64_set(&mmid_version, asid_first_version(0));
- mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
- GFP_KERNEL);
+ mmid_map = bitmap_zalloc(num_mmids, GFP_KERNEL);
if (!mmid_map)
panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index b08bc556d30d..a27045f5a556 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -162,6 +162,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c
index 05560b042d82..3b7590660a04 100644
--- a/arch/mips/mm/pgtable.c
+++ b/arch/mips/mm/pgtable.c
@@ -12,7 +12,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
- ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long)ret);
diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c
index a1ced5e44951..f9b8c85e9843 100644
--- a/arch/mips/mm/physaddr.c
+++ b/arch/mips/mm/physaddr.c
@@ -5,6 +5,7 @@
#include <linux/mmdebug.h>
#include <linux/mm.h>
+#include <asm/addrspace.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -12,15 +13,6 @@
static inline bool __debug_virt_addr_valid(unsigned long x)
{
- /* high_memory does not get immediately defined, and there
- * are early callers of __pa() against PAGE_OFFSET
- */
- if (!high_memory && x >= PAGE_OFFSET)
- return true;
-
- if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory)
- return true;
-
/*
* MAX_DMA_ADDRESS is a virtual address that may not correspond to an
* actual physical address. Enough code relies on
@@ -30,7 +22,9 @@ static inline bool __debug_virt_addr_valid(unsigned long x)
if (x == MAX_DMA_ADDRESS)
return true;
- return false;
+ return x >= PAGE_OFFSET && (KSEGX(x) < KSEG2 ||
+ IS_ENABLED(CONFIG_EVA) ||
+ !IS_ENABLED(CONFIG_HIGHMEM));
}
phys_addr_t __virt_to_phys(volatile const void *x)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 8dbbd99fc7e8..80e05ee98d62 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -586,25 +586,6 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4181:
- case CPU_VR4181A:
- uasm_i_nop(p);
- uasm_i_nop(p);
- tlbw(p);
- uasm_i_nop(p);
- uasm_i_nop(p);
- break;
-
- case CPU_VR4131:
- case CPU_VR4133:
- uasm_i_nop(p);
- uasm_i_nop(p);
- tlbw(p);
- break;
-
case CPU_XBURST:
tlbw(p);
uasm_i_nop(p);
@@ -626,7 +607,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
return;
}
- if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
+ if (cpu_has_rixi && _PAGE_NO_EXEC != 0) {
if (fill_includes_sw_bits) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
} else {
@@ -818,7 +799,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* everything but the lower xuseg addresses goes down
* the module_alloc/vmalloc path.
*/
- uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_bnez(p, r, ptr, label_vmalloc);
} else {
uasm_il_bltz(p, r, tmp, label_vmalloc);
@@ -995,22 +976,6 @@ static void build_adjust_context(u32 **p, unsigned int ctx)
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
- switch (current_cpu_type()) {
- case CPU_VR41XX:
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4181:
- case CPU_VR4181A:
- case CPU_VR4133:
- shift += 2;
- break;
-
- default:
- break;
- }
-
if (shift)
UASM_i_SRL(p, ctx, ctx, shift);
uasm_i_andi(p, ctx, ctx, mask);
@@ -1127,7 +1092,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
uasm_i_dsrl_safe(p, scratch, tmp,
- PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_bnez(p, r, scratch, label_vmalloc);
if (pgd_reg == -1) {
@@ -1493,12 +1458,12 @@ static void setup_pw(void)
#endif
pgd_i = PGDIR_SHIFT; /* 1st level PGD */
#ifndef __PAGETABLE_PMD_FOLDED
- pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER;
+ pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER;
pmd_i = PMD_SHIFT; /* 2nd level PMD */
pmd_w = PMD_SHIFT - PAGE_SHIFT;
#else
- pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER;
+ pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER;
#endif
pt_i = PAGE_SHIFT; /* 3rd level PTE */
@@ -1536,7 +1501,7 @@ static void build_loongson3_tlb_refill_handler(void)
if (check_for_high_segbits) {
uasm_i_dmfc0(&p, K0, C0_BADVADDR);
- uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_beqz(&p, &r, K1, label_vmalloc);
uasm_i_nop(&p);
@@ -2065,7 +2030,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
UASM_i_LW(p, wr.r2, 0, wr.r2);
- UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
+ UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2);
uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
@@ -2565,7 +2530,7 @@ static void check_pabits(void)
unsigned long entry;
unsigned pabits, fillbits;
- if (!cpu_has_rixi || !_PAGE_NO_EXEC) {
+ if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) {
/*
* We'll only be making use of the fact that we can rotate bits
* into the fill if the CPU supports RIXI, so don't bother
@@ -2611,7 +2576,7 @@ void build_tlb_refill_handler(void)
check_pabits();
#ifdef CONFIG_64BIT
- check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
#endif
if (cpu_has_3kex) {
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index ed0388485a15..a6e9785b537e 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_PCI_DRIVERS_GENERIC)+= pci-generic.o
obj-$(CONFIG_MIPS_BONITO64) += ops-bonito64.o
obj-$(CONFIG_PCI_GT64XXX_PCI0) += ops-gt64xxx_pci0.o
obj-$(CONFIG_MIPS_MSC) += ops-msc.o
-obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o
obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o
obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \
@@ -42,14 +41,9 @@ obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
obj-$(CONFIG_SOC_MT7620) += pci-mt7620.o
obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o
obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o
-obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
-obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
-obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o
obj-$(CONFIG_SOC_TX4927) += pci-tx4927.o
obj-$(CONFIG_SOC_TX4938) += pci-tx4938.o
obj-$(CONFIG_TOSHIBA_RBTX4927) += fixup-rbtx4927.o
-obj-$(CONFIG_VICTOR_MPC30X) += fixup-mpc30x.o
-obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o
obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o
obj-$(CONFIG_CAVIUM_OCTEON_SOC) += pci-octeon.o pcie-octeon.o
diff --git a/arch/mips/pci/fixup-capcella.c b/arch/mips/pci/fixup-capcella.c
deleted file mode 100644
index dc8cd98a1761..000000000000
--- a/arch/mips/pci/fixup-capcella.c
+++ /dev/null
@@ -1,37 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * fixup-cappcela.c, The ZAO Networks Capcella specific PCI fixups.
- *
- * Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/init.h>
-#include <linux/pci.h>
-
-#include <asm/vr41xx/capcella.h>
-
-/*
- * Shortcuts
- */
-#define INT1 RTL8139_1_IRQ
-#define INT2 RTL8139_2_IRQ
-#define INTA PC104PLUS_INTA_IRQ
-#define INTB PC104PLUS_INTB_IRQ
-#define INTC PC104PLUS_INTC_IRQ
-#define INTD PC104PLUS_INTD_IRQ
-
-static char irq_tab_capcella[][5] = {
- [11] = { -1, INT1, INT1, INT1, INT1 },
- [12] = { -1, INT2, INT2, INT2, INT2 },
- [14] = { -1, INTA, INTB, INTC, INTD }
-};
-
-int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- return irq_tab_capcella[slot][pin];
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- return 0;
-}
diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c
index 632ff2daa338..afafda03ed4e 100644
--- a/arch/mips/pci/fixup-lemote2f.c
+++ b/arch/mips/pci/fixup-lemote2f.c
@@ -80,7 +80,7 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
return dev->irq;
} else {
- printk(KERN_INFO " strange pci slot number.\n");
+ printk(KERN_INFO "strange PCI slot number.\n");
return 0;
}
}
diff --git a/arch/mips/pci/fixup-mpc30x.c b/arch/mips/pci/fixup-mpc30x.c
deleted file mode 100644
index 27c75f268c4c..000000000000
--- a/arch/mips/pci/fixup-mpc30x.c
+++ /dev/null
@@ -1,36 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * fixup-mpc30x.c, The Victor MP-C303/304 specific PCI fixups.
- *
- * Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/init.h>
-#include <linux/pci.h>
-
-#include <asm/vr41xx/mpc30x.h>
-
-static const int internal_func_irqs[] = {
- VRC4173_CASCADE_IRQ,
- VRC4173_AC97_IRQ,
- VRC4173_USB_IRQ,
-};
-
-static const int irq_tab_mpc30x[] = {
- [12] = VRC4173_PCMCIA1_IRQ,
- [13] = VRC4173_PCMCIA2_IRQ,
- [29] = MQ200_IRQ,
-};
-
-int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- if (slot == 30)
- return internal_func_irqs[PCI_FUNC(dev->devfn)];
-
- return irq_tab_mpc30x[slot];
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- return 0;
-}
diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c
deleted file mode 100644
index 43942998599b..000000000000
--- a/arch/mips/pci/fixup-tb0219.c
+++ /dev/null
@@ -1,38 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups.
- *
- * Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
- * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/init.h>
-#include <linux/pci.h>
-
-#include <asm/vr41xx/tb0219.h>
-
-int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- int irq = -1;
-
- switch (slot) {
- case 12:
- irq = TB0219_PCI_SLOT1_IRQ;
- break;
- case 13:
- irq = TB0219_PCI_SLOT2_IRQ;
- break;
- case 14:
- irq = TB0219_PCI_SLOT3_IRQ;
- break;
- default:
- break;
- }
-
- return irq;
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- return 0;
-}
diff --git a/arch/mips/pci/fixup-tb0226.c b/arch/mips/pci/fixup-tb0226.c
deleted file mode 100644
index a4d1efadfd4a..000000000000
--- a/arch/mips/pci/fixup-tb0226.c
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * fixup-tb0226.c, The TANBAC TB0226 specific PCI fixups.
- *
- * Copyright (C) 2002-2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/init.h>
-#include <linux/pci.h>
-
-#include <asm/vr41xx/giu.h>
-#include <asm/vr41xx/tb0226.h>
-
-int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- int irq = -1;
-
- switch (slot) {
- case 12:
- vr41xx_set_irq_trigger(GD82559_1_PIN,
- IRQ_TRIGGER_LEVEL,
- IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(GD82559_1_PIN, IRQ_LEVEL_LOW);
- irq = GD82559_1_IRQ;
- break;
- case 13:
- vr41xx_set_irq_trigger(GD82559_2_PIN,
- IRQ_TRIGGER_LEVEL,
- IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(GD82559_2_PIN, IRQ_LEVEL_LOW);
- irq = GD82559_2_IRQ;
- break;
- case 14:
- switch (pin) {
- case 1:
- vr41xx_set_irq_trigger(UPD720100_INTA_PIN,
- IRQ_TRIGGER_LEVEL,
- IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(UPD720100_INTA_PIN,
- IRQ_LEVEL_LOW);
- irq = UPD720100_INTA_IRQ;
- break;
- case 2:
- vr41xx_set_irq_trigger(UPD720100_INTB_PIN,
- IRQ_TRIGGER_LEVEL,
- IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(UPD720100_INTB_PIN,
- IRQ_LEVEL_LOW);
- irq = UPD720100_INTB_IRQ;
- break;
- case 3:
- vr41xx_set_irq_trigger(UPD720100_INTC_PIN,
- IRQ_TRIGGER_LEVEL,
- IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(UPD720100_INTC_PIN,
- IRQ_LEVEL_LOW);
- irq = UPD720100_INTC_IRQ;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
-
- return irq;
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- return 0;
-}
diff --git a/arch/mips/pci/fixup-tb0287.c b/arch/mips/pci/fixup-tb0287.c
deleted file mode 100644
index 721ec9ac1c76..000000000000
--- a/arch/mips/pci/fixup-tb0287.c
+++ /dev/null
@@ -1,52 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * fixup-tb0287.c, The TANBAC TB0287 specific PCI fixups.
- *
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/init.h>
-#include <linux/pci.h>
-
-#include <asm/vr41xx/tb0287.h>
-
-int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- unsigned char bus;
- int irq = -1;
-
- bus = dev->bus->number;
- if (bus == 0) {
- switch (slot) {
- case 16:
- irq = TB0287_SM501_IRQ;
- break;
- case 17:
- irq = TB0287_SIL680A_IRQ;
- break;
- default:
- break;
- }
- } else if (bus == 1) {
- switch (PCI_SLOT(dev->devfn)) {
- case 0:
- irq = TB0287_PCI_SLOT_IRQ;
- break;
- case 2:
- case 3:
- irq = TB0287_RTL8110_IRQ;
- break;
- default:
- break;
- }
- } else if (bus > 1) {
- irq = TB0287_PCI_SLOT_IRQ;
- }
-
- return irq;
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- return 0;
-}
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index c2860ebbd863..abc3b61bff9f 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -46,16 +46,17 @@ static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock);
static int msi_irq_size;
/**
- * Called when a driver request MSI interrupts instead of the
+ * arch_setup_msi_irq() - setup MSI IRQs for a device
+ * @dev: Device requesting MSI interrupts
+ * @desc: MSI descriptor
+ *
+ * Called when a driver requests MSI interrupts instead of the
* legacy INT A-D. This routine will allocate multiple interrupts
* for MSI devices that support them. A device can override this by
* programming the MSI control bits [6:4] before calling
* pci_enable_msi().
*
- * @dev: Device requesting MSI interrupts
- * @desc: MSI descriptor
- *
- * Returns 0 on success.
+ * Return: %0 on success, non-%0 on error.
*/
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
@@ -186,10 +187,11 @@ msi_irq_allocated:
}
/**
+ * arch_teardown_msi_irq() - release MSI IRQs for a device
+ * @irq: The devices first irq number. There may be multiple in sequence.
+ *
* Called when a device no longer needs its MSI interrupts. All
* MSI interrupts for the device are freed.
- *
- * @irq: The devices first irq number. There may be multple in sequence.
*/
void arch_teardown_msi_irq(unsigned int irq)
{
diff --git a/arch/mips/pci/ops-vr41xx.c b/arch/mips/pci/ops-vr41xx.c
deleted file mode 100644
index 7b7709aa14c7..000000000000
--- a/arch/mips/pci/ops-vr41xx.c
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ops-vr41xx.c, PCI configuration routines for the PCIU of NEC VR4100 series.
- *
- * Copyright (C) 2001-2003 MontaVista Software Inc.
- * Author: Yoichi Yuasa <source@mvista.com>
- * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-/*
- * Changes:
- * MontaVista Software Inc. <source@mvista.com>
- * - New creation, NEC VR4122 and VR4131 are supported.
- */
-#include <linux/pci.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-
-#define PCICONFDREG (void __iomem *)KSEG1ADDR(0x0f000c14)
-#define PCICONFAREG (void __iomem *)KSEG1ADDR(0x0f000c18)
-
-static inline int set_pci_configuration_address(unsigned char number,
- unsigned int devfn, int where)
-{
- if (number == 0) {
- /*
- * Type 0 configuration
- */
- if (PCI_SLOT(devfn) < 11 || where > 0xff)
- return -EINVAL;
-
- writel((1U << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) |
- (where & 0xfc), PCICONFAREG);
- } else {
- /*
- * Type 1 configuration
- */
- if (where > 0xff)
- return -EINVAL;
-
- writel(((uint32_t)number << 16) | ((devfn & 0xff) << 8) |
- (where & 0xfc) | 1U, PCICONFAREG);
- }
-
- return 0;
-}
-
-static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
- int size, uint32_t *val)
-{
- uint32_t data;
-
- *val = 0xffffffffU;
- if (set_pci_configuration_address(bus->number, devfn, where) < 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- data = readl(PCICONFDREG);
-
- switch (size) {
- case 1:
- *val = (data >> ((where & 3) << 3)) & 0xffU;
- break;
- case 2:
- *val = (data >> ((where & 2) << 3)) & 0xffffU;
- break;
- case 4:
- *val = data;
- break;
- default:
- return PCIBIOS_FUNC_NOT_SUPPORTED;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where,
- int size, uint32_t val)
-{
- uint32_t data;
- int shift;
-
- if (set_pci_configuration_address(bus->number, devfn, where) < 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- data = readl(PCICONFDREG);
-
- switch (size) {
- case 1:
- shift = (where & 3) << 3;
- data &= ~(0xffU << shift);
- data |= ((val & 0xffU) << shift);
- break;
- case 2:
- shift = (where & 2) << 3;
- data &= ~(0xffffU << shift);
- data |= ((val & 0xffffU) << shift);
- break;
- case 4:
- data = val;
- break;
- default:
- return PCIBIOS_FUNC_NOT_SUPPORTED;
- }
-
- writel(data, PCICONFDREG);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-struct pci_ops vr41xx_pci_ops = {
- .read = pci_config_read,
- .write = pci_config_write,
-};
diff --git a/arch/mips/pci/pci-vr41xx.c b/arch/mips/pci/pci-vr41xx.c
deleted file mode 100644
index 4f250c55b6e6..000000000000
--- a/arch/mips/pci/pci-vr41xx.c
+++ /dev/null
@@ -1,309 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pci-vr41xx.c, PCI Control Unit routines for the NEC VR4100 series.
- *
- * Copyright (C) 2001-2003 MontaVista Software Inc.
- * Author: Yoichi Yuasa <source@mvista.com>
- * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
- */
-/*
- * Changes:
- * MontaVista Software Inc. <source@mvista.com>
- * - New creation, NEC VR4122 and VR4131 are supported.
- */
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-
-#include <asm/cpu.h>
-#include <asm/io.h>
-#include <asm/vr41xx/pci.h>
-#include <asm/vr41xx/vr41xx.h>
-
-#include "pci-vr41xx.h"
-
-extern struct pci_ops vr41xx_pci_ops;
-
-static void __iomem *pciu_base;
-
-#define pciu_read(offset) readl(pciu_base + (offset))
-#define pciu_write(offset, value) writel((value), pciu_base + (offset))
-
-static struct pci_master_address_conversion pci_master_memory1 = {
- .bus_base_address = PCI_MASTER_MEM1_BUS_BASE_ADDRESS,
- .address_mask = PCI_MASTER_MEM1_ADDRESS_MASK,
- .pci_base_address = PCI_MASTER_MEM1_PCI_BASE_ADDRESS,
-};
-
-static struct pci_target_address_conversion pci_target_memory1 = {
- .address_mask = PCI_TARGET_MEM1_ADDRESS_MASK,
- .bus_base_address = PCI_TARGET_MEM1_BUS_BASE_ADDRESS,
-};
-
-static struct pci_master_address_conversion pci_master_io = {
- .bus_base_address = PCI_MASTER_IO_BUS_BASE_ADDRESS,
- .address_mask = PCI_MASTER_IO_ADDRESS_MASK,
- .pci_base_address = PCI_MASTER_IO_PCI_BASE_ADDRESS,
-};
-
-static struct pci_mailbox_address pci_mailbox = {
- .base_address = PCI_MAILBOX_BASE_ADDRESS,
-};
-
-static struct pci_target_address_window pci_target_window1 = {
- .base_address = PCI_TARGET_WINDOW1_BASE_ADDRESS,
-};
-
-static struct resource pci_mem_resource = {
- .name = "PCI Memory resources",
- .start = PCI_MEM_RESOURCE_START,
- .end = PCI_MEM_RESOURCE_END,
- .flags = IORESOURCE_MEM,
-};
-
-static struct resource pci_io_resource = {
- .name = "PCI I/O resources",
- .start = PCI_IO_RESOURCE_START,
- .end = PCI_IO_RESOURCE_END,
- .flags = IORESOURCE_IO,
-};
-
-static struct pci_controller_unit_setup vr41xx_pci_controller_unit_setup = {
- .master_memory1 = &pci_master_memory1,
- .target_memory1 = &pci_target_memory1,
- .master_io = &pci_master_io,
- .exclusive_access = CANNOT_LOCK_FROM_DEVICE,
- .wait_time_limit_from_irdy_to_trdy = 0,
- .mailbox = &pci_mailbox,
- .target_window1 = &pci_target_window1,
- .master_latency_timer = 0x80,
- .retry_limit = 0,
- .arbiter_priority_control = PCI_ARBITRATION_MODE_FAIR,
- .take_away_gnt_mode = PCI_TAKE_AWAY_GNT_DISABLE,
-};
-
-static struct pci_controller vr41xx_pci_controller = {
- .pci_ops = &vr41xx_pci_ops,
- .mem_resource = &pci_mem_resource,
- .io_resource = &pci_io_resource,
-};
-
-void __init vr41xx_pciu_setup(struct pci_controller_unit_setup *setup)
-{
- vr41xx_pci_controller_unit_setup = *setup;
-}
-
-static int __init vr41xx_pciu_init(void)
-{
- struct pci_controller_unit_setup *setup;
- struct pci_master_address_conversion *master;
- struct pci_target_address_conversion *target;
- struct pci_mailbox_address *mailbox;
- struct pci_target_address_window *window;
- unsigned long vtclock, pci_clock_max;
- uint32_t val;
-
- setup = &vr41xx_pci_controller_unit_setup;
-
- if (request_mem_region(PCIU_BASE, PCIU_SIZE, "PCIU") == NULL)
- return -EBUSY;
-
- pciu_base = ioremap(PCIU_BASE, PCIU_SIZE);
- if (pciu_base == NULL) {
- release_mem_region(PCIU_BASE, PCIU_SIZE);
- return -EBUSY;
- }
-
- /* Disable PCI interrupt */
- vr41xx_disable_pciint();
-
- /* Supply VTClock to PCIU */
- vr41xx_supply_clock(PCIU_CLOCK);
-
- /* Dummy write, waiting for supply of VTClock. */
- vr41xx_disable_pciint();
-
- /* Select PCI clock */
- if (setup->pci_clock_max != 0)
- pci_clock_max = setup->pci_clock_max;
- else
- pci_clock_max = PCI_CLOCK_MAX;
- vtclock = vr41xx_get_vtclock_frequency();
- if (vtclock < pci_clock_max)
- pciu_write(PCICLKSELREG, EQUAL_VTCLOCK);
- else if ((vtclock / 2) < pci_clock_max)
- pciu_write(PCICLKSELREG, HALF_VTCLOCK);
- else if (current_cpu_data.processor_id >= PRID_VR4131_REV2_1 &&
- (vtclock / 3) < pci_clock_max)
- pciu_write(PCICLKSELREG, ONE_THIRD_VTCLOCK);
- else if ((vtclock / 4) < pci_clock_max)
- pciu_write(PCICLKSELREG, QUARTER_VTCLOCK);
- else {
- printk(KERN_ERR "PCI Clock is over 33MHz.\n");
- iounmap(pciu_base);
- return -EINVAL;
- }
-
- /* Supply PCI clock by PCI bus */
- vr41xx_supply_clock(PCI_CLOCK);
-
- if (setup->master_memory1 != NULL) {
- master = setup->master_memory1;
- val = IBA(master->bus_base_address) |
- MASTER_MSK(master->address_mask) |
- WINEN |
- PCIA(master->pci_base_address);
- pciu_write(PCIMMAW1REG, val);
- } else {
- val = pciu_read(PCIMMAW1REG);
- val &= ~WINEN;
- pciu_write(PCIMMAW1REG, val);
- }
-
- if (setup->master_memory2 != NULL) {
- master = setup->master_memory2;
- val = IBA(master->bus_base_address) |
- MASTER_MSK(master->address_mask) |
- WINEN |
- PCIA(master->pci_base_address);
- pciu_write(PCIMMAW2REG, val);
- } else {
- val = pciu_read(PCIMMAW2REG);
- val &= ~WINEN;
- pciu_write(PCIMMAW2REG, val);
- }
-
- if (setup->target_memory1 != NULL) {
- target = setup->target_memory1;
- val = TARGET_MSK(target->address_mask) |
- WINEN |
- ITA(target->bus_base_address);
- pciu_write(PCITAW1REG, val);
- } else {
- val = pciu_read(PCITAW1REG);
- val &= ~WINEN;
- pciu_write(PCITAW1REG, val);
- }
-
- if (setup->target_memory2 != NULL) {
- target = setup->target_memory2;
- val = TARGET_MSK(target->address_mask) |
- WINEN |
- ITA(target->bus_base_address);
- pciu_write(PCITAW2REG, val);
- } else {
- val = pciu_read(PCITAW2REG);
- val &= ~WINEN;
- pciu_write(PCITAW2REG, val);
- }
-
- if (setup->master_io != NULL) {
- master = setup->master_io;
- val = IBA(master->bus_base_address) |
- MASTER_MSK(master->address_mask) |
- WINEN |
- PCIIA(master->pci_base_address);
- pciu_write(PCIMIOAWREG, val);
- } else {
- val = pciu_read(PCIMIOAWREG);
- val &= ~WINEN;
- pciu_write(PCIMIOAWREG, val);
- }
-
- if (setup->exclusive_access == CANNOT_LOCK_FROM_DEVICE)
- pciu_write(PCIEXACCREG, UNLOCK);
- else
- pciu_write(PCIEXACCREG, 0);
-
- if (current_cpu_type() == CPU_VR4122)
- pciu_write(PCITRDYVREG, TRDYV(setup->wait_time_limit_from_irdy_to_trdy));
-
- pciu_write(LATTIMEREG, MLTIM(setup->master_latency_timer));
-
- if (setup->mailbox != NULL) {
- mailbox = setup->mailbox;
- val = MBADD(mailbox->base_address) | TYPE_32BITSPACE |
- MSI_MEMORY | PREF_APPROVAL;
- pciu_write(MAILBAREG, val);
- }
-
- if (setup->target_window1) {
- window = setup->target_window1;
- val = PMBA(window->base_address) | TYPE_32BITSPACE |
- MSI_MEMORY | PREF_APPROVAL;
- pciu_write(PCIMBA1REG, val);
- }
-
- if (setup->target_window2) {
- window = setup->target_window2;
- val = PMBA(window->base_address) | TYPE_32BITSPACE |
- MSI_MEMORY | PREF_APPROVAL;
- pciu_write(PCIMBA2REG, val);
- }
-
- val = pciu_read(RETVALREG);
- val &= ~RTYVAL_MASK;
- val |= RTYVAL(setup->retry_limit);
- pciu_write(RETVALREG, val);
-
- val = pciu_read(PCIAPCNTREG);
- val &= ~(TKYGNT | PAPC);
-
- switch (setup->arbiter_priority_control) {
- case PCI_ARBITRATION_MODE_ALTERNATE_0:
- val |= PAPC_ALTERNATE_0;
- break;
- case PCI_ARBITRATION_MODE_ALTERNATE_B:
- val |= PAPC_ALTERNATE_B;
- break;
- default:
- val |= PAPC_FAIR;
- break;
- }
-
- if (setup->take_away_gnt_mode == PCI_TAKE_AWAY_GNT_ENABLE)
- val |= TKYGNT_ENABLE;
-
- pciu_write(PCIAPCNTREG, val);
-
- pciu_write(COMMANDREG, PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
- PCI_COMMAND_SERR);
-
- /* Clear bus error */
- pciu_read(BUSERRADREG);
-
- pciu_write(PCIENREG, PCIU_CONFIG_DONE);
-
- if (setup->mem_resource != NULL)
- vr41xx_pci_controller.mem_resource = setup->mem_resource;
-
- if (setup->io_resource != NULL) {
- vr41xx_pci_controller.io_resource = setup->io_resource;
- } else {
- set_io_port_base(IO_PORT_BASE);
- ioport_resource.start = IO_PORT_RESOURCE_START;
- ioport_resource.end = IO_PORT_RESOURCE_END;
- }
-
- if (setup->master_io) {
- void __iomem *io_map_base;
- struct resource *res = vr41xx_pci_controller.io_resource;
- master = setup->master_io;
- io_map_base = ioremap(master->bus_base_address,
- resource_size(res));
- if (!io_map_base) {
- iounmap(pciu_base);
- return -EBUSY;
- }
-
- vr41xx_pci_controller.io_map_base = (unsigned long)io_map_base;
- }
-
- register_pci_controller(&vr41xx_pci_controller);
-
- return 0;
-}
-
-arch_initcall(vr41xx_pciu_init);
diff --git a/arch/mips/pci/pci-vr41xx.h b/arch/mips/pci/pci-vr41xx.h
deleted file mode 100644
index 5595e4a39b2a..000000000000
--- a/arch/mips/pci/pci-vr41xx.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * pci-vr41xx.h, Include file for PCI Control Unit of the NEC VR4100 series.
- *
- * Copyright (C) 2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <source@mvista.com>
- * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#ifndef __PCI_VR41XX_H
-#define __PCI_VR41XX_H
-
-#define PCIU_BASE 0x0f000c00UL
-#define PCIU_SIZE 0x200UL
-
-#define PCIMMAW1REG 0x00
-#define PCIMMAW2REG 0x04
-#define PCITAW1REG 0x08
-#define PCITAW2REG 0x0c
-#define PCIMIOAWREG 0x10
- #define IBA(addr) ((addr) & 0xff000000U)
- #define MASTER_MSK(mask) (((mask) >> 11) & 0x000fe000U)
- #define PCIA(addr) (((addr) >> 24) & 0x000000ffU)
- #define TARGET_MSK(mask) (((mask) >> 8) & 0x000fe000U)
- #define ITA(addr) (((addr) >> 24) & 0x000000ffU)
- #define PCIIA(addr) (((addr) >> 24) & 0x000000ffU)
- #define WINEN 0x1000U
-#define PCICONFDREG 0x14
-#define PCICONFAREG 0x18
-#define PCIMAILREG 0x1c
-#define BUSERRADREG 0x24
- #define EA(reg) ((reg) &0xfffffffc)
-
-#define INTCNTSTAREG 0x28
- #define MABTCLR 0x80000000U
- #define TRDYCLR 0x40000000U
- #define PARCLR 0x20000000U
- #define MBCLR 0x10000000U
- #define SERRCLR 0x08000000U
- #define RTYCLR 0x04000000U
- #define MABCLR 0x02000000U
- #define TABCLR 0x01000000U
- /* RFU */
- #define MABTMSK 0x00008000U
- #define TRDYMSK 0x00004000U
- #define PARMSK 0x00002000U
- #define MBMSK 0x00001000U
- #define SERRMSK 0x00000800U
- #define RTYMSK 0x00000400U
- #define MABMSK 0x00000200U
- #define TABMSK 0x00000100U
- #define IBAMABT 0x00000080U
- #define TRDYRCH 0x00000040U
- #define PAR 0x00000020U
- #define MB 0x00000010U
- #define PCISERR 0x00000008U
- #define RTYRCH 0x00000004U
- #define MABORT 0x00000002U
- #define TABORT 0x00000001U
-
-#define PCIEXACCREG 0x2c
- #define UNLOCK 0x2U
- #define EAREQ 0x1U
-#define PCIRECONTREG 0x30
- #define RTRYCNT(reg) ((reg) & 0x000000ffU)
-#define PCIENREG 0x34
- #define PCIU_CONFIG_DONE 0x4U
-#define PCICLKSELREG 0x38
- #define EQUAL_VTCLOCK 0x2U
- #define HALF_VTCLOCK 0x0U
- #define ONE_THIRD_VTCLOCK 0x3U
- #define QUARTER_VTCLOCK 0x1U
-#define PCITRDYVREG 0x3c
- #define TRDYV(val) ((uint32_t)(val) & 0xffU)
-#define PCICLKRUNREG 0x60
-
-#define VENDORIDREG 0x100
-#define DEVICEIDREG 0x100
-#define COMMANDREG 0x104
-#define STATUSREG 0x104
-#define REVIDREG 0x108
-#define CLASSREG 0x108
-#define CACHELSREG 0x10c
-#define LATTIMEREG 0x10c
- #define MLTIM(val) (((uint32_t)(val) << 7) & 0xff00U)
-#define MAILBAREG 0x110
-#define PCIMBA1REG 0x114
-#define PCIMBA2REG 0x118
- #define MBADD(base) ((base) & 0xfffff800U)
- #define PMBA(base) ((base) & 0xffe00000U)
- #define PREF 0x8U
- #define PREF_APPROVAL 0x8U
- #define PREF_DISAPPROVAL 0x0U
- #define TYPE 0x6U
- #define TYPE_32BITSPACE 0x0U
- #define MSI 0x1U
- #define MSI_MEMORY 0x0U
-#define INTLINEREG 0x13c
-#define INTPINREG 0x13c
-#define RETVALREG 0x140
-#define PCIAPCNTREG 0x140
- #define TKYGNT 0x04000000U
- #define TKYGNT_ENABLE 0x04000000U
- #define TKYGNT_DISABLE 0x00000000U
- #define PAPC 0x03000000U
- #define PAPC_ALTERNATE_B 0x02000000U
- #define PAPC_ALTERNATE_0 0x01000000U
- #define PAPC_FAIR 0x00000000U
- #define RTYVAL(val) (((uint32_t)(val) << 7) & 0xff00U)
- #define RTYVAL_MASK 0xff00U
-
-#define PCI_CLOCK_MAX 33333333U
-
-/*
- * Default setup
- */
-#define PCI_MASTER_MEM1_BUS_BASE_ADDRESS 0x10000000U
-#define PCI_MASTER_MEM1_ADDRESS_MASK 0x7c000000U
-#define PCI_MASTER_MEM1_PCI_BASE_ADDRESS 0x10000000U
-
-#define PCI_TARGET_MEM1_ADDRESS_MASK 0x08000000U
-#define PCI_TARGET_MEM1_BUS_BASE_ADDRESS 0x00000000U
-
-#define PCI_MASTER_IO_BUS_BASE_ADDRESS 0x16000000U
-#define PCI_MASTER_IO_ADDRESS_MASK 0x7e000000U
-#define PCI_MASTER_IO_PCI_BASE_ADDRESS 0x00000000U
-
-#define PCI_MAILBOX_BASE_ADDRESS 0x00000000U
-
-#define PCI_TARGET_WINDOW1_BASE_ADDRESS 0x00000000U
-
-#define IO_PORT_BASE KSEG1ADDR(PCI_MASTER_IO_BUS_BASE_ADDRESS)
-#define IO_PORT_RESOURCE_START PCI_MASTER_IO_PCI_BASE_ADDRESS
-#define IO_PORT_RESOURCE_END (~PCI_MASTER_IO_ADDRESS_MASK & PCI_MASTER_ADDRESS_MASK)
-
-#define PCI_IO_RESOURCE_START 0x01000000UL
-#define PCI_IO_RESOURCE_END 0x01ffffffUL
-
-#define PCI_MEM_RESOURCE_START 0x11000000UL
-#define PCI_MEM_RESOURCE_END 0x13ffffffUL
-
-#endif /* __PCI_VR41XX_H */
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
index 38d12f417e48..8686e8c1c4e5 100644
--- a/arch/mips/sgi-ip22/ip22-gio.c
+++ b/arch/mips/sgi-ip22/ip22-gio.c
@@ -148,7 +148,7 @@ static void gio_device_remove(struct device *dev)
struct gio_device *gio_dev = to_gio_device(dev);
struct gio_driver *drv = to_gio_driver(dev->driver);
- if (dev->driver && drv->remove)
+ if (drv->remove)
drv->remove(gio_dev);
}
diff --git a/arch/mips/vr41xx/Kconfig b/arch/mips/vr41xx/Kconfig
deleted file mode 100644
index e0b651db371d..000000000000
--- a/arch/mips/vr41xx/Kconfig
+++ /dev/null
@@ -1,104 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-choice
- prompt "Machine type"
- depends on MACH_VR41XX
- default TANBAC_TB022X
-
-config CASIO_E55
- bool "CASIO CASSIOPEIA E-10/15/55/65"
- select CEVT_R4K
- select CSRC_R4K
- select DMA_NONCOHERENT
- select IRQ_MIPS_CPU
- select ISA
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_LITTLE_ENDIAN
-
-config IBM_WORKPAD
- bool "IBM WorkPad z50"
- select CEVT_R4K
- select CSRC_R4K
- select DMA_NONCOHERENT
- select IRQ_MIPS_CPU
- select ISA
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_LITTLE_ENDIAN
-
-config TANBAC_TB022X
- bool "TANBAC VR4131 multichip module and TANBAC VR4131DIMM"
- select CEVT_R4K
- select CSRC_R4K
- select DMA_NONCOHERENT
- select IRQ_MIPS_CPU
- select HAVE_PCI
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_LITTLE_ENDIAN
- help
- The TANBAC VR4131 multichip module(TB0225) and
- the TANBAC VR4131DIMM(TB0229) are MIPS-based platforms
- manufactured by TANBAC.
- Please refer to <http://www.tanbac.co.jp/>
- about VR4131 multichip module and VR4131DIMM.
-
-config VICTOR_MPC30X
- bool "Victor MP-C303/304"
- select CEVT_R4K
- select CSRC_R4K
- select DMA_NONCOHERENT
- select IRQ_MIPS_CPU
- select HAVE_PCI
- select PCI_VR41XX
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_LITTLE_ENDIAN
-
-config ZAO_CAPCELLA
- bool "ZAO Networks Capcella"
- select CEVT_R4K
- select CSRC_R4K
- select DMA_NONCOHERENT
- select IRQ_MIPS_CPU
- select HAVE_PCI
- select PCI_VR41XX
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_LITTLE_ENDIAN
-
-endchoice
-
-choice
- prompt "Base board type"
- depends on TANBAC_TB022X
- default TANBAC_TB0287
-
-config TANBAC_TB0219
- bool "TANBAC DIMM Evaluation Kit(TB0219)"
- select GPIO_VR41XX
- select PCI_VR41XX
- help
- The TANBAC DIMM Evaluation Kit(TB0219) is a MIPS-based platform
- manufactured by TANBAC.
- Please refer to <http://www.tanbac.co.jp/> about DIMM Evaluation Kit.
-
-config TANBAC_TB0226
- bool "TANBAC Mbase(TB0226)"
- select GPIO_VR41XX
- select PCI_VR41XX
- help
- The TANBAC Mbase(TB0226) is a MIPS-based platform
- manufactured by TANBAC.
- Please refer to <http://www.tanbac.co.jp/> about Mbase.
-
-config TANBAC_TB0287
- bool "TANBAC Mini-ITX DIMM base(TB0287)"
- select PCI_VR41XX
- help
- The TANBAC Mini-ITX DIMM base(TB0287) is a MIPS-based platform
- manufactured by TANBAC.
- Please refer to <http://www.tanbac.co.jp/> about Mini-ITX DIMM base.
-
-endchoice
-
-config PCI_VR41XX
- bool "Add PCI control unit support of NEC VR4100 series"
- depends on MACH_VR41XX && HAVE_PCI
- default y
- select PCI
diff --git a/arch/mips/vr41xx/Makefile b/arch/mips/vr41xx/Makefile
deleted file mode 100644
index 765020d5ee4d..000000000000
--- a/arch/mips/vr41xx/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-obj-$(CONFIG_MACH_VR41XX) += common/
-obj-$(CONFIG_CASIO_E55) += casio-e55/
-obj-$(CONFIG_IBM_WORKPAD) += ibm-workpad/
diff --git a/arch/mips/vr41xx/Platform b/arch/mips/vr41xx/Platform
deleted file mode 100644
index 3f593a3e5678..000000000000
--- a/arch/mips/vr41xx/Platform
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# NEC VR4100 series based machines
-#
-cflags-$(CONFIG_MACH_VR41XX) += -I$(srctree)/arch/mips/include/asm/mach-vr41xx
-
-#
-# CASIO CASSIPEIA E-55/65 (VR4111)
-#
-load-$(CONFIG_CASIO_E55) += 0xffffffff80004000
-
-#
-# IBM WorkPad z50 (VR4121)
-#
-load-$(CONFIG_IBM_WORKPAD) += 0xffffffff80004000
-
-#
-# TANBAC VR4131 multichip module(TB0225) and TANBAC VR4131DIMM(TB0229) (VR4131)
-#
-load-$(CONFIG_TANBAC_TB022X) += 0xffffffff80000000
-
-#
-# Victor MP-C303/304 (VR4122)
-#
-load-$(CONFIG_VICTOR_MPC30X) += 0xffffffff80001000
-
-#
-# ZAO Networks Capcella (VR4131)
-#
-load-$(CONFIG_ZAO_CAPCELLA) += 0xffffffff80000000
diff --git a/arch/mips/vr41xx/casio-e55/Makefile b/arch/mips/vr41xx/casio-e55/Makefile
deleted file mode 100644
index 65d30d7c86a9..000000000000
--- a/arch/mips/vr41xx/casio-e55/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the CASIO CASSIOPEIA E-55/65 specific parts of the kernel
-#
-
-obj-y += setup.o
diff --git a/arch/mips/vr41xx/casio-e55/setup.c b/arch/mips/vr41xx/casio-e55/setup.c
deleted file mode 100644
index 25ea7f19f891..000000000000
--- a/arch/mips/vr41xx/casio-e55/setup.c
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * setup.c, Setup for the CASIO CASSIOPEIA E-11/15/55/65.
- *
- * Copyright (C) 2002-2006 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/init.h>
-#include <linux/ioport.h>
-
-#include <asm/io.h>
-
-#define E55_ISA_IO_BASE 0x1400c000
-#define E55_ISA_IO_SIZE 0x03ff4000
-#define E55_ISA_IO_START 0
-#define E55_ISA_IO_END (E55_ISA_IO_SIZE - 1)
-#define E55_IO_PORT_BASE KSEG1ADDR(E55_ISA_IO_BASE)
-
-static int __init casio_e55_setup(void)
-{
- set_io_port_base(E55_IO_PORT_BASE);
- ioport_resource.start = E55_ISA_IO_START;
- ioport_resource.end = E55_ISA_IO_END;
-
- return 0;
-}
-
-arch_initcall(casio_e55_setup);
diff --git a/arch/mips/vr41xx/common/Makefile b/arch/mips/vr41xx/common/Makefile
deleted file mode 100644
index 57d3eee29d5f..000000000000
--- a/arch/mips/vr41xx/common/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for common code of the NEC VR4100 series.
-#
-
-obj-y += bcu.o cmu.o giu.o icu.o init.o irq.o pmu.o rtc.o siu.o type.o
diff --git a/arch/mips/vr41xx/common/bcu.c b/arch/mips/vr41xx/common/bcu.c
deleted file mode 100644
index 0677d17796f6..000000000000
--- a/arch/mips/vr41xx/common/bcu.c
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * bcu.c, Bus Control Unit routines for the NEC VR4100 series.
- *
- * Copyright (C) 2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <source@mvista.com>
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-/*
- * Changes:
- * MontaVista Software Inc. <source@mvista.com>
- * - New creation, NEC VR4122 and VR4131 are supported.
- * - Added support for NEC VR4111 and VR4121.
- *
- * Yoichi Yuasa <yuasa@linux-mips.org>
- * - Added support for NEC VR4133.
- */
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/types.h>
-
-#include <asm/cpu-type.h>
-#include <asm/cpu.h>
-#include <asm/io.h>
-
-#define CLKSPEEDREG_TYPE1 (void __iomem *)KSEG1ADDR(0x0b000014)
-#define CLKSPEEDREG_TYPE2 (void __iomem *)KSEG1ADDR(0x0f000014)
- #define CLKSP(x) ((x) & 0x001f)
- #define CLKSP_VR4133(x) ((x) & 0x0007)
-
- #define DIV2B 0x8000
- #define DIV3B 0x4000
- #define DIV4B 0x2000
-
- #define DIVT(x) (((x) & 0xf000) >> 12)
- #define DIVVT(x) (((x) & 0x0f00) >> 8)
-
- #define TDIVMODE(x) (2 << (((x) & 0x1000) >> 12))
- #define VTDIVMODE(x) (((x) & 0x0700) >> 8)
-
-static unsigned long vr41xx_vtclock;
-static unsigned long vr41xx_tclock;
-
-unsigned long vr41xx_get_vtclock_frequency(void)
-{
- return vr41xx_vtclock;
-}
-
-EXPORT_SYMBOL_GPL(vr41xx_get_vtclock_frequency);
-
-unsigned long vr41xx_get_tclock_frequency(void)
-{
- return vr41xx_tclock;
-}
-
-EXPORT_SYMBOL_GPL(vr41xx_get_tclock_frequency);
-
-static inline uint16_t read_clkspeed(void)
-{
- switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121: return readw(CLKSPEEDREG_TYPE1);
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133: return readw(CLKSPEEDREG_TYPE2);
- default:
- printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
- break;
- }
-
- return 0;
-}
-
-static inline unsigned long calculate_pclock(uint16_t clkspeed)
-{
- unsigned long pclock = 0;
-
- switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121:
- pclock = 18432000 * 64;
- pclock /= CLKSP(clkspeed);
- break;
- case CPU_VR4122:
- pclock = 18432000 * 98;
- pclock /= CLKSP(clkspeed);
- break;
- case CPU_VR4131:
- pclock = 18432000 * 108;
- pclock /= CLKSP(clkspeed);
- break;
- case CPU_VR4133:
- switch (CLKSP_VR4133(clkspeed)) {
- case 0:
- pclock = 133000000;
- break;
- case 1:
- pclock = 149000000;
- break;
- case 2:
- pclock = 165900000;
- break;
- case 3:
- pclock = 199100000;
- break;
- case 4:
- pclock = 265900000;
- break;
- default:
- printk(KERN_INFO "Unknown PClock speed for NEC VR4133\n");
- break;
- }
- break;
- default:
- printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
- break;
- }
-
- printk(KERN_INFO "PClock: %ldHz\n", pclock);
-
- return pclock;
-}
-
-static inline unsigned long calculate_vtclock(uint16_t clkspeed, unsigned long pclock)
-{
- unsigned long vtclock = 0;
-
- switch (current_cpu_type()) {
- case CPU_VR4111:
- /* The NEC VR4111 doesn't have the VTClock. */
- break;
- case CPU_VR4121:
- vtclock = pclock;
- /* DIVVT == 9 Divide by 1.5 . VTClock = (PClock * 6) / 9 */
- if (DIVVT(clkspeed) == 9)
- vtclock = pclock * 6;
- /* DIVVT == 10 Divide by 2.5 . VTClock = (PClock * 4) / 10 */
- else if (DIVVT(clkspeed) == 10)
- vtclock = pclock * 4;
- vtclock /= DIVVT(clkspeed);
- printk(KERN_INFO "VTClock: %ldHz\n", vtclock);
- break;
- case CPU_VR4122:
- if(VTDIVMODE(clkspeed) == 7)
- vtclock = pclock / 1;
- else if(VTDIVMODE(clkspeed) == 1)
- vtclock = pclock / 2;
- else
- vtclock = pclock / VTDIVMODE(clkspeed);
- printk(KERN_INFO "VTClock: %ldHz\n", vtclock);
- break;
- case CPU_VR4131:
- case CPU_VR4133:
- vtclock = pclock / VTDIVMODE(clkspeed);
- printk(KERN_INFO "VTClock: %ldHz\n", vtclock);
- break;
- default:
- printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
- break;
- }
-
- return vtclock;
-}
-
-static inline unsigned long calculate_tclock(uint16_t clkspeed, unsigned long pclock,
- unsigned long vtclock)
-{
- unsigned long tclock = 0;
-
- switch (current_cpu_type()) {
- case CPU_VR4111:
- if (!(clkspeed & DIV2B))
- tclock = pclock / 2;
- else if (!(clkspeed & DIV3B))
- tclock = pclock / 3;
- else if (!(clkspeed & DIV4B))
- tclock = pclock / 4;
- break;
- case CPU_VR4121:
- tclock = pclock / DIVT(clkspeed);
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- tclock = vtclock / TDIVMODE(clkspeed);
- break;
- default:
- printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
- break;
- }
-
- printk(KERN_INFO "TClock: %ldHz\n", tclock);
-
- return tclock;
-}
-
-void vr41xx_calculate_clock_frequency(void)
-{
- unsigned long pclock;
- uint16_t clkspeed;
-
- clkspeed = read_clkspeed();
-
- pclock = calculate_pclock(clkspeed);
- vr41xx_vtclock = calculate_vtclock(clkspeed, pclock);
- vr41xx_tclock = calculate_tclock(clkspeed, pclock, vr41xx_vtclock);
-}
-
-EXPORT_SYMBOL_GPL(vr41xx_calculate_clock_frequency);
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
deleted file mode 100644
index e4cbe116b26d..000000000000
--- a/arch/mips/vr41xx/common/cmu.c
+++ /dev/null
@@ -1,242 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * cmu.c, Clock Mask Unit routines for the NEC VR4100 series.
- *
- * Copyright (C) 2001-2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <source@mvista.com>
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-/*
- * Changes:
- * MontaVista Software Inc. <source@mvista.com>
- * - New creation, NEC VR4122 and VR4131 are supported.
- * - Added support for NEC VR4111 and VR4121.
- *
- * Yoichi Yuasa <yuasa@linux-mips.org>
- * - Added support for NEC VR4133.
- */
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <asm/cpu.h>
-#include <asm/io.h>
-#include <asm/vr41xx/vr41xx.h>
-
-#define CMU_TYPE1_BASE 0x0b000060UL
-#define CMU_TYPE1_SIZE 0x4
-
-#define CMU_TYPE2_BASE 0x0f000060UL
-#define CMU_TYPE2_SIZE 0x4
-
-#define CMU_TYPE3_BASE 0x0f000060UL
-#define CMU_TYPE3_SIZE 0x8
-
-#define CMUCLKMSK 0x0
- #define MSKPIU 0x0001
- #define MSKSIU 0x0002
- #define MSKAIU 0x0004
- #define MSKKIU 0x0008
- #define MSKFIR 0x0010
- #define MSKDSIU 0x0820
- #define MSKCSI 0x0040
- #define MSKPCIU 0x0080
- #define MSKSSIU 0x0100
- #define MSKSHSP 0x0200
- #define MSKFFIR 0x0400
- #define MSKSCSI 0x1000
- #define MSKPPCIU 0x2000
-#define CMUCLKMSK2 0x4
- #define MSKCEU 0x0001
- #define MSKMAC0 0x0002
- #define MSKMAC1 0x0004
-
-static void __iomem *cmu_base;
-static uint16_t cmuclkmsk, cmuclkmsk2;
-static DEFINE_SPINLOCK(cmu_lock);
-
-#define cmu_read(offset) readw(cmu_base + (offset))
-#define cmu_write(offset, value) writew((value), cmu_base + (offset))
-
-void vr41xx_supply_clock(vr41xx_clock_t clock)
-{
- spin_lock_irq(&cmu_lock);
-
- switch (clock) {
- case PIU_CLOCK:
- cmuclkmsk |= MSKPIU;
- break;
- case SIU_CLOCK:
- cmuclkmsk |= MSKSIU | MSKSSIU;
- break;
- case AIU_CLOCK:
- cmuclkmsk |= MSKAIU;
- break;
- case KIU_CLOCK:
- cmuclkmsk |= MSKKIU;
- break;
- case FIR_CLOCK:
- cmuclkmsk |= MSKFIR | MSKFFIR;
- break;
- case DSIU_CLOCK:
- if (current_cpu_type() == CPU_VR4111 ||
- current_cpu_type() == CPU_VR4121)
- cmuclkmsk |= MSKDSIU;
- else
- cmuclkmsk |= MSKSIU | MSKDSIU;
- break;
- case CSI_CLOCK:
- cmuclkmsk |= MSKCSI | MSKSCSI;
- break;
- case PCIU_CLOCK:
- cmuclkmsk |= MSKPCIU;
- break;
- case HSP_CLOCK:
- cmuclkmsk |= MSKSHSP;
- break;
- case PCI_CLOCK:
- cmuclkmsk |= MSKPPCIU;
- break;
- case CEU_CLOCK:
- cmuclkmsk2 |= MSKCEU;
- break;
- case ETHER0_CLOCK:
- cmuclkmsk2 |= MSKMAC0;
- break;
- case ETHER1_CLOCK:
- cmuclkmsk2 |= MSKMAC1;
- break;
- default:
- break;
- }
-
- if (clock == CEU_CLOCK || clock == ETHER0_CLOCK ||
- clock == ETHER1_CLOCK)
- cmu_write(CMUCLKMSK2, cmuclkmsk2);
- else
- cmu_write(CMUCLKMSK, cmuclkmsk);
-
- spin_unlock_irq(&cmu_lock);
-}
-
-EXPORT_SYMBOL_GPL(vr41xx_supply_clock);
-
-void vr41xx_mask_clock(vr41xx_clock_t clock)
-{
- spin_lock_irq(&cmu_lock);
-
- switch (clock) {
- case PIU_CLOCK:
- cmuclkmsk &= ~MSKPIU;
- break;
- case SIU_CLOCK:
- if (current_cpu_type() == CPU_VR4111 ||
- current_cpu_type() == CPU_VR4121) {
- cmuclkmsk &= ~(MSKSIU | MSKSSIU);
- } else {
- if (cmuclkmsk & MSKDSIU)
- cmuclkmsk &= ~MSKSSIU;
- else
- cmuclkmsk &= ~(MSKSIU | MSKSSIU);
- }
- break;
- case AIU_CLOCK:
- cmuclkmsk &= ~MSKAIU;
- break;
- case KIU_CLOCK:
- cmuclkmsk &= ~MSKKIU;
- break;
- case FIR_CLOCK:
- cmuclkmsk &= ~(MSKFIR | MSKFFIR);
- break;
- case DSIU_CLOCK:
- if (current_cpu_type() == CPU_VR4111 ||
- current_cpu_type() == CPU_VR4121) {
- cmuclkmsk &= ~MSKDSIU;
- } else {
- if (cmuclkmsk & MSKSSIU)
- cmuclkmsk &= ~MSKDSIU;
- else
- cmuclkmsk &= ~(MSKSIU | MSKDSIU);
- }
- break;
- case CSI_CLOCK:
- cmuclkmsk &= ~(MSKCSI | MSKSCSI);
- break;
- case PCIU_CLOCK:
- cmuclkmsk &= ~MSKPCIU;
- break;
- case HSP_CLOCK:
- cmuclkmsk &= ~MSKSHSP;
- break;
- case PCI_CLOCK:
- cmuclkmsk &= ~MSKPPCIU;
- break;
- case CEU_CLOCK:
- cmuclkmsk2 &= ~MSKCEU;
- break;
- case ETHER0_CLOCK:
- cmuclkmsk2 &= ~MSKMAC0;
- break;
- case ETHER1_CLOCK:
- cmuclkmsk2 &= ~MSKMAC1;
- break;
- default:
- break;
- }
-
- if (clock == CEU_CLOCK || clock == ETHER0_CLOCK ||
- clock == ETHER1_CLOCK)
- cmu_write(CMUCLKMSK2, cmuclkmsk2);
- else
- cmu_write(CMUCLKMSK, cmuclkmsk);
-
- spin_unlock_irq(&cmu_lock);
-}
-
-EXPORT_SYMBOL_GPL(vr41xx_mask_clock);
-
-static int __init vr41xx_cmu_init(void)
-{
- unsigned long start, size;
-
- switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121:
- start = CMU_TYPE1_BASE;
- size = CMU_TYPE1_SIZE;
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- start = CMU_TYPE2_BASE;
- size = CMU_TYPE2_SIZE;
- break;
- case CPU_VR4133:
- start = CMU_TYPE3_BASE;
- size = CMU_TYPE3_SIZE;
- break;
- default:
- panic("Unexpected CPU of NEC VR4100 series");
- break;
- }
-
- if (request_mem_region(start, size, "CMU") == NULL)
- return -EBUSY;
-
- cmu_base = ioremap(start, size);
- if (cmu_base == NULL) {
- release_mem_region(start, size);
- return -EBUSY;
- }
-
- cmuclkmsk = cmu_read(CMUCLKMSK);
- if (current_cpu_type() == CPU_VR4133)
- cmuclkmsk2 = cmu_read(CMUCLKMSK2);
-
- return 0;
-}
-
-core_initcall(vr41xx_cmu_init);
diff --git a/arch/mips/vr41xx/common/giu.c b/arch/mips/vr41xx/common/giu.c
deleted file mode 100644
index 398c626411f8..000000000000
--- a/arch/mips/vr41xx/common/giu.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * NEC VR4100 series GIU platform device.
- *
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-
-#include <asm/cpu.h>
-#include <asm/vr41xx/giu.h>
-#include <asm/vr41xx/irq.h>
-
-static struct resource giu_50pins_pullupdown_resource[] __initdata = {
- {
- .start = 0x0b000100,
- .end = 0x0b00011f,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x0b0002e0,
- .end = 0x0b0002e3,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = GIUINT_IRQ,
- .end = GIUINT_IRQ,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource giu_36pins_resource[] __initdata = {
- {
- .start = 0x0f000140,
- .end = 0x0f00015f,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = GIUINT_IRQ,
- .end = GIUINT_IRQ,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource giu_48pins_resource[] __initdata = {
- {
- .start = 0x0f000140,
- .end = 0x0f000167,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = GIUINT_IRQ,
- .end = GIUINT_IRQ,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static int __init vr41xx_giu_add(void)
-{
- struct platform_device *pdev;
- struct resource *res;
- unsigned int num;
- int retval;
-
- pdev = platform_device_alloc("GIU", -1);
- if (!pdev)
- return -ENOMEM;
-
- switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121:
- pdev->id = GPIO_50PINS_PULLUPDOWN;
- res = giu_50pins_pullupdown_resource;
- num = ARRAY_SIZE(giu_50pins_pullupdown_resource);
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- pdev->id = GPIO_36PINS;
- res = giu_36pins_resource;
- num = ARRAY_SIZE(giu_36pins_resource);
- break;
- case CPU_VR4133:
- pdev->id = GPIO_48PINS_EDGE_SELECT;
- res = giu_48pins_resource;
- num = ARRAY_SIZE(giu_48pins_resource);
- break;
- default:
- retval = -ENODEV;
- goto err_free_device;
- }
-
- retval = platform_device_add_resources(pdev, res, num);
- if (retval)
- goto err_free_device;
-
- retval = platform_device_add(pdev);
- if (retval)
- goto err_free_device;
-
- return 0;
-
-err_free_device:
- platform_device_put(pdev);
-
- return retval;
-}
-device_initcall(vr41xx_giu_add);
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
deleted file mode 100644
index 9240bcdbe74e..000000000000
--- a/arch/mips/vr41xx/common/icu.c
+++ /dev/null
@@ -1,714 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * icu.c, Interrupt Control Unit routines for the NEC VR4100 series.
- *
- * Copyright (C) 2001-2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <source@mvista.com>
- * Copyright (C) 2003-2006 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-/*
- * Changes:
- * MontaVista Software Inc. <source@mvista.com>
- * - New creation, NEC VR4122 and VR4131 are supported.
- * - Added support for NEC VR4111 and VR4121.
- *
- * Yoichi Yuasa <yuasa@linux-mips.org>
- * - Coped with INTASSIGN of NEC VR4133.
- */
-#include <linux/errno.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/smp.h>
-#include <linux/types.h>
-
-#include <asm/cpu.h>
-#include <asm/io.h>
-#include <asm/vr41xx/irq.h>
-#include <asm/vr41xx/vr41xx.h>
-
-static void __iomem *icu1_base;
-static void __iomem *icu2_base;
-
-static unsigned char sysint1_assign[16] = {
- 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-static unsigned char sysint2_assign[16] = {
- 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
-#define ICU1_TYPE1_BASE 0x0b000080UL
-#define ICU2_TYPE1_BASE 0x0b000200UL
-
-#define ICU1_TYPE2_BASE 0x0f000080UL
-#define ICU2_TYPE2_BASE 0x0f0000a0UL
-
-#define ICU1_SIZE 0x20
-#define ICU2_SIZE 0x1c
-
-#define SYSINT1REG 0x00
-#define PIUINTREG 0x02
-#define INTASSIGN0 0x04
-#define INTASSIGN1 0x06
-#define GIUINTLREG 0x08
-#define DSIUINTREG 0x0a
-#define MSYSINT1REG 0x0c
-#define MPIUINTREG 0x0e
-#define MAIUINTREG 0x10
-#define MKIUINTREG 0x12
-#define MMACINTREG 0x12
-#define MGIUINTLREG 0x14
-#define MDSIUINTREG 0x16
-#define NMIREG 0x18
-#define SOFTREG 0x1a
-#define INTASSIGN2 0x1c
-#define INTASSIGN3 0x1e
-
-#define SYSINT2REG 0x00
-#define GIUINTHREG 0x02
-#define FIRINTREG 0x04
-#define MSYSINT2REG 0x06
-#define MGIUINTHREG 0x08
-#define MFIRINTREG 0x0a
-#define PCIINTREG 0x0c
- #define PCIINT0 0x0001
-#define SCUINTREG 0x0e
- #define SCUINT0 0x0001
-#define CSIINTREG 0x10
-#define MPCIINTREG 0x12
-#define MSCUINTREG 0x14
-#define MCSIINTREG 0x16
-#define BCUINTREG 0x18
- #define BCUINTR 0x0001
-#define MBCUINTREG 0x1a
-
-#define SYSINT1_IRQ_TO_PIN(x) ((x) - SYSINT1_IRQ_BASE) /* Pin 0-15 */
-#define SYSINT2_IRQ_TO_PIN(x) ((x) - SYSINT2_IRQ_BASE) /* Pin 0-15 */
-
-#define INT_TO_IRQ(x) ((x) + 2) /* Int0-4 -> IRQ2-6 */
-
-#define icu1_read(offset) readw(icu1_base + (offset))
-#define icu1_write(offset, value) writew((value), icu1_base + (offset))
-
-#define icu2_read(offset) readw(icu2_base + (offset))
-#define icu2_write(offset, value) writew((value), icu2_base + (offset))
-
-#define INTASSIGN_MAX 4
-#define INTASSIGN_MASK 0x0007
-
-static inline uint16_t icu1_set(uint8_t offset, uint16_t set)
-{
- uint16_t data;
-
- data = icu1_read(offset);
- data |= set;
- icu1_write(offset, data);
-
- return data;
-}
-
-static inline uint16_t icu1_clear(uint8_t offset, uint16_t clear)
-{
- uint16_t data;
-
- data = icu1_read(offset);
- data &= ~clear;
- icu1_write(offset, data);
-
- return data;
-}
-
-static inline uint16_t icu2_set(uint8_t offset, uint16_t set)
-{
- uint16_t data;
-
- data = icu2_read(offset);
- data |= set;
- icu2_write(offset, data);
-
- return data;
-}
-
-static inline uint16_t icu2_clear(uint8_t offset, uint16_t clear)
-{
- uint16_t data;
-
- data = icu2_read(offset);
- data &= ~clear;
- icu2_write(offset, data);
-
- return data;
-}
-
-void vr41xx_enable_piuint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(PIU_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4111 ||
- current_cpu_type() == CPU_VR4121) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu1_set(MPIUINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_enable_piuint);
-
-void vr41xx_disable_piuint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(PIU_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4111 ||
- current_cpu_type() == CPU_VR4121) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu1_clear(MPIUINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_disable_piuint);
-
-void vr41xx_enable_aiuint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(AIU_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4111 ||
- current_cpu_type() == CPU_VR4121) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu1_set(MAIUINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_enable_aiuint);
-
-void vr41xx_disable_aiuint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(AIU_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4111 ||
- current_cpu_type() == CPU_VR4121) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu1_clear(MAIUINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_disable_aiuint);
-
-void vr41xx_enable_kiuint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(KIU_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4111 ||
- current_cpu_type() == CPU_VR4121) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu1_set(MKIUINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_enable_kiuint);
-
-void vr41xx_disable_kiuint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(KIU_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4111 ||
- current_cpu_type() == CPU_VR4121) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu1_clear(MKIUINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_disable_kiuint);
-
-void vr41xx_enable_macint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(ETHERNET_IRQ);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu1_set(MMACINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-EXPORT_SYMBOL(vr41xx_enable_macint);
-
-void vr41xx_disable_macint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(ETHERNET_IRQ);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu1_clear(MMACINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-EXPORT_SYMBOL(vr41xx_disable_macint);
-
-void vr41xx_enable_dsiuint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(DSIU_IRQ);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu1_set(MDSIUINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-EXPORT_SYMBOL(vr41xx_enable_dsiuint);
-
-void vr41xx_disable_dsiuint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(DSIU_IRQ);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu1_clear(MDSIUINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-EXPORT_SYMBOL(vr41xx_disable_dsiuint);
-
-void vr41xx_enable_firint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(FIR_IRQ);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu2_set(MFIRINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-EXPORT_SYMBOL(vr41xx_enable_firint);
-
-void vr41xx_disable_firint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(FIR_IRQ);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu2_clear(MFIRINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-EXPORT_SYMBOL(vr41xx_disable_firint);
-
-void vr41xx_enable_pciint(void)
-{
- struct irq_desc *desc = irq_to_desc(PCI_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4122 ||
- current_cpu_type() == CPU_VR4131 ||
- current_cpu_type() == CPU_VR4133) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu2_write(MPCIINTREG, PCIINT0);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_enable_pciint);
-
-void vr41xx_disable_pciint(void)
-{
- struct irq_desc *desc = irq_to_desc(PCI_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4122 ||
- current_cpu_type() == CPU_VR4131 ||
- current_cpu_type() == CPU_VR4133) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu2_write(MPCIINTREG, 0);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_disable_pciint);
-
-void vr41xx_enable_scuint(void)
-{
- struct irq_desc *desc = irq_to_desc(SCU_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4122 ||
- current_cpu_type() == CPU_VR4131 ||
- current_cpu_type() == CPU_VR4133) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu2_write(MSCUINTREG, SCUINT0);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_enable_scuint);
-
-void vr41xx_disable_scuint(void)
-{
- struct irq_desc *desc = irq_to_desc(SCU_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4122 ||
- current_cpu_type() == CPU_VR4131 ||
- current_cpu_type() == CPU_VR4133) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu2_write(MSCUINTREG, 0);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_disable_scuint);
-
-void vr41xx_enable_csiint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(CSI_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4122 ||
- current_cpu_type() == CPU_VR4131 ||
- current_cpu_type() == CPU_VR4133) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu2_set(MCSIINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_enable_csiint);
-
-void vr41xx_disable_csiint(uint16_t mask)
-{
- struct irq_desc *desc = irq_to_desc(CSI_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4122 ||
- current_cpu_type() == CPU_VR4131 ||
- current_cpu_type() == CPU_VR4133) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu2_clear(MCSIINTREG, mask);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_disable_csiint);
-
-void vr41xx_enable_bcuint(void)
-{
- struct irq_desc *desc = irq_to_desc(BCU_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4122 ||
- current_cpu_type() == CPU_VR4131 ||
- current_cpu_type() == CPU_VR4133) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu2_write(MBCUINTREG, BCUINTR);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_enable_bcuint);
-
-void vr41xx_disable_bcuint(void)
-{
- struct irq_desc *desc = irq_to_desc(BCU_IRQ);
- unsigned long flags;
-
- if (current_cpu_type() == CPU_VR4122 ||
- current_cpu_type() == CPU_VR4131 ||
- current_cpu_type() == CPU_VR4133) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- icu2_write(MBCUINTREG, 0);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-
-EXPORT_SYMBOL(vr41xx_disable_bcuint);
-
-static void disable_sysint1_irq(struct irq_data *d)
-{
- icu1_clear(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(d->irq));
-}
-
-static void enable_sysint1_irq(struct irq_data *d)
-{
- icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(d->irq));
-}
-
-static struct irq_chip sysint1_irq_type = {
- .name = "SYSINT1",
- .irq_mask = disable_sysint1_irq,
- .irq_unmask = enable_sysint1_irq,
-};
-
-static void disable_sysint2_irq(struct irq_data *d)
-{
- icu2_clear(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(d->irq));
-}
-
-static void enable_sysint2_irq(struct irq_data *d)
-{
- icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(d->irq));
-}
-
-static struct irq_chip sysint2_irq_type = {
- .name = "SYSINT2",
- .irq_mask = disable_sysint2_irq,
- .irq_unmask = enable_sysint2_irq,
-};
-
-static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
-{
- struct irq_desc *desc = irq_to_desc(irq);
- uint16_t intassign0, intassign1;
- unsigned int pin;
-
- pin = SYSINT1_IRQ_TO_PIN(irq);
-
- raw_spin_lock_irq(&desc->lock);
-
- intassign0 = icu1_read(INTASSIGN0);
- intassign1 = icu1_read(INTASSIGN1);
-
- switch (pin) {
- case 0:
- intassign0 &= ~INTASSIGN_MASK;
- intassign0 |= (uint16_t)assign;
- break;
- case 1:
- intassign0 &= ~(INTASSIGN_MASK << 3);
- intassign0 |= (uint16_t)assign << 3;
- break;
- case 2:
- intassign0 &= ~(INTASSIGN_MASK << 6);
- intassign0 |= (uint16_t)assign << 6;
- break;
- case 3:
- intassign0 &= ~(INTASSIGN_MASK << 9);
- intassign0 |= (uint16_t)assign << 9;
- break;
- case 8:
- intassign0 &= ~(INTASSIGN_MASK << 12);
- intassign0 |= (uint16_t)assign << 12;
- break;
- case 9:
- intassign1 &= ~INTASSIGN_MASK;
- intassign1 |= (uint16_t)assign;
- break;
- case 11:
- intassign1 &= ~(INTASSIGN_MASK << 6);
- intassign1 |= (uint16_t)assign << 6;
- break;
- case 12:
- intassign1 &= ~(INTASSIGN_MASK << 9);
- intassign1 |= (uint16_t)assign << 9;
- break;
- default:
- raw_spin_unlock_irq(&desc->lock);
- return -EINVAL;
- }
-
- sysint1_assign[pin] = assign;
- icu1_write(INTASSIGN0, intassign0);
- icu1_write(INTASSIGN1, intassign1);
-
- raw_spin_unlock_irq(&desc->lock);
-
- return 0;
-}
-
-static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
-{
- struct irq_desc *desc = irq_to_desc(irq);
- uint16_t intassign2, intassign3;
- unsigned int pin;
-
- pin = SYSINT2_IRQ_TO_PIN(irq);
-
- raw_spin_lock_irq(&desc->lock);
-
- intassign2 = icu1_read(INTASSIGN2);
- intassign3 = icu1_read(INTASSIGN3);
-
- switch (pin) {
- case 0:
- intassign2 &= ~INTASSIGN_MASK;
- intassign2 |= (uint16_t)assign;
- break;
- case 1:
- intassign2 &= ~(INTASSIGN_MASK << 3);
- intassign2 |= (uint16_t)assign << 3;
- break;
- case 3:
- intassign2 &= ~(INTASSIGN_MASK << 6);
- intassign2 |= (uint16_t)assign << 6;
- break;
- case 4:
- intassign2 &= ~(INTASSIGN_MASK << 9);
- intassign2 |= (uint16_t)assign << 9;
- break;
- case 5:
- intassign2 &= ~(INTASSIGN_MASK << 12);
- intassign2 |= (uint16_t)assign << 12;
- break;
- case 6:
- intassign3 &= ~INTASSIGN_MASK;
- intassign3 |= (uint16_t)assign;
- break;
- case 7:
- intassign3 &= ~(INTASSIGN_MASK << 3);
- intassign3 |= (uint16_t)assign << 3;
- break;
- case 8:
- intassign3 &= ~(INTASSIGN_MASK << 6);
- intassign3 |= (uint16_t)assign << 6;
- break;
- case 9:
- intassign3 &= ~(INTASSIGN_MASK << 9);
- intassign3 |= (uint16_t)assign << 9;
- break;
- case 10:
- intassign3 &= ~(INTASSIGN_MASK << 12);
- intassign3 |= (uint16_t)assign << 12;
- break;
- default:
- raw_spin_unlock_irq(&desc->lock);
- return -EINVAL;
- }
-
- sysint2_assign[pin] = assign;
- icu1_write(INTASSIGN2, intassign2);
- icu1_write(INTASSIGN3, intassign3);
-
- raw_spin_unlock_irq(&desc->lock);
-
- return 0;
-}
-
-int vr41xx_set_intassign(unsigned int irq, unsigned char intassign)
-{
- int retval = -EINVAL;
-
- if (current_cpu_type() != CPU_VR4133)
- return -EINVAL;
-
- if (intassign > INTASSIGN_MAX)
- return -EINVAL;
-
- if (irq >= SYSINT1_IRQ_BASE && irq <= SYSINT1_IRQ_LAST)
- retval = set_sysint1_assign(irq, intassign);
- else if (irq >= SYSINT2_IRQ_BASE && irq <= SYSINT2_IRQ_LAST)
- retval = set_sysint2_assign(irq, intassign);
-
- return retval;
-}
-
-EXPORT_SYMBOL(vr41xx_set_intassign);
-
-static int icu_get_irq(unsigned int irq)
-{
- uint16_t pend1, pend2;
- uint16_t mask1, mask2;
- int i;
-
- pend1 = icu1_read(SYSINT1REG);
- mask1 = icu1_read(MSYSINT1REG);
-
- pend2 = icu2_read(SYSINT2REG);
- mask2 = icu2_read(MSYSINT2REG);
-
- mask1 &= pend1;
- mask2 &= pend2;
-
- if (mask1) {
- for (i = 0; i < 16; i++) {
- if (irq == INT_TO_IRQ(sysint1_assign[i]) && (mask1 & (1 << i)))
- return SYSINT1_IRQ(i);
- }
- }
-
- if (mask2) {
- for (i = 0; i < 16; i++) {
- if (irq == INT_TO_IRQ(sysint2_assign[i]) && (mask2 & (1 << i)))
- return SYSINT2_IRQ(i);
- }
- }
-
- printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
-
- return -1;
-}
-
-static int __init vr41xx_icu_init(void)
-{
- unsigned long icu1_start, icu2_start;
- int i;
-
- switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121:
- icu1_start = ICU1_TYPE1_BASE;
- icu2_start = ICU2_TYPE1_BASE;
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- icu1_start = ICU1_TYPE2_BASE;
- icu2_start = ICU2_TYPE2_BASE;
- break;
- default:
- printk(KERN_ERR "ICU: Unexpected CPU of NEC VR4100 series\n");
- return -ENODEV;
- }
-
- if (request_mem_region(icu1_start, ICU1_SIZE, "ICU") == NULL)
- return -EBUSY;
-
- if (request_mem_region(icu2_start, ICU2_SIZE, "ICU") == NULL) {
- release_mem_region(icu1_start, ICU1_SIZE);
- return -EBUSY;
- }
-
- icu1_base = ioremap(icu1_start, ICU1_SIZE);
- if (icu1_base == NULL) {
- release_mem_region(icu1_start, ICU1_SIZE);
- release_mem_region(icu2_start, ICU2_SIZE);
- return -ENOMEM;
- }
-
- icu2_base = ioremap(icu2_start, ICU2_SIZE);
- if (icu2_base == NULL) {
- iounmap(icu1_base);
- release_mem_region(icu1_start, ICU1_SIZE);
- release_mem_region(icu2_start, ICU2_SIZE);
- return -ENOMEM;
- }
-
- icu1_write(MSYSINT1REG, 0);
- icu1_write(MGIUINTLREG, 0xffff);
-
- icu2_write(MSYSINT2REG, 0);
- icu2_write(MGIUINTHREG, 0xffff);
-
- for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++)
- irq_set_chip_and_handler(i, &sysint1_irq_type,
- handle_level_irq);
-
- for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++)
- irq_set_chip_and_handler(i, &sysint2_irq_type,
- handle_level_irq);
-
- cascade_irq(INT0_IRQ, icu_get_irq);
- cascade_irq(INT1_IRQ, icu_get_irq);
- cascade_irq(INT2_IRQ, icu_get_irq);
- cascade_irq(INT3_IRQ, icu_get_irq);
- cascade_irq(INT4_IRQ, icu_get_irq);
-
- return 0;
-}
-
-core_initcall(vr41xx_icu_init);
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
deleted file mode 100644
index 628dddf79a05..000000000000
--- a/arch/mips/vr41xx/common/init.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * init.c, Common initialization routines for NEC VR4100 series.
- *
- * Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-#include <asm/time.h>
-#include <asm/vr41xx/irq.h>
-#include <asm/vr41xx/vr41xx.h>
-
-#define IO_MEM_RESOURCE_START 0UL
-#define IO_MEM_RESOURCE_END 0x1fffffffUL
-
-static void __init iomem_resource_init(void)
-{
- iomem_resource.start = IO_MEM_RESOURCE_START;
- iomem_resource.end = IO_MEM_RESOURCE_END;
-}
-
-void __init plat_time_init(void)
-{
- unsigned long tclock;
-
- vr41xx_calculate_clock_frequency();
-
- tclock = vr41xx_get_tclock_frequency();
- if (current_cpu_data.processor_id == PRID_VR4131_REV2_0 ||
- current_cpu_data.processor_id == PRID_VR4131_REV2_1)
- mips_hpt_frequency = tclock / 2;
- else
- mips_hpt_frequency = tclock / 4;
-}
-
-void __init plat_mem_setup(void)
-{
- iomem_resource_init();
-
- vr41xx_siu_setup();
-}
-
-void __init prom_init(void)
-{
- int argc, i;
- char **argv;
-
- argc = fw_arg0;
- argv = (char **)fw_arg1;
-
- for (i = 1; i < argc; i++) {
- strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE);
- if (i < (argc - 1))
- strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
- }
-}
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
deleted file mode 100644
index 8f68446ff2d9..000000000000
--- a/arch/mips/vr41xx/common/irq.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Interrupt handing routines for NEC VR4100 series.
- *
- * Copyright (C) 2005-2007 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/vr41xx/irq.h>
-
-typedef struct irq_cascade {
- int (*get_irq)(unsigned int);
-} irq_cascade_t;
-
-static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned;
-
-int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))
-{
- int retval = 0;
-
- if (irq >= NR_IRQS)
- return -EINVAL;
-
- if (irq_cascade[irq].get_irq != NULL)
- free_irq(irq, NULL);
-
- irq_cascade[irq].get_irq = get_irq;
-
- if (get_irq != NULL) {
- retval = request_irq(irq, no_action, IRQF_NO_THREAD,
- "cascade", NULL);
- if (retval < 0)
- irq_cascade[irq].get_irq = NULL;
- }
-
- return retval;
-}
-
-EXPORT_SYMBOL_GPL(cascade_irq);
-
-static void irq_dispatch(unsigned int irq)
-{
- irq_cascade_t *cascade;
-
- if (irq >= NR_IRQS) {
- atomic_inc(&irq_err_count);
- return;
- }
-
- cascade = irq_cascade + irq;
- if (cascade->get_irq != NULL) {
- struct irq_desc *desc = irq_to_desc(irq);
- struct irq_data *idata = irq_desc_get_irq_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int ret;
-
- if (chip->irq_mask_ack)
- chip->irq_mask_ack(idata);
- else {
- chip->irq_mask(idata);
- chip->irq_ack(idata);
- }
- ret = cascade->get_irq(irq);
- irq = ret;
- if (ret < 0)
- atomic_inc(&irq_err_count);
- else
- irq_dispatch(irq);
- if (!irqd_irq_disabled(idata) && chip->irq_unmask)
- chip->irq_unmask(idata);
- } else
- do_IRQ(irq);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
-
- if (pending & CAUSEF_IP7)
- do_IRQ(TIMER_IRQ);
- else if (pending & 0x7800) {
- if (pending & CAUSEF_IP3)
- irq_dispatch(INT1_IRQ);
- else if (pending & CAUSEF_IP4)
- irq_dispatch(INT2_IRQ);
- else if (pending & CAUSEF_IP5)
- irq_dispatch(INT3_IRQ);
- else if (pending & CAUSEF_IP6)
- irq_dispatch(INT4_IRQ);
- } else if (pending & CAUSEF_IP2)
- irq_dispatch(INT0_IRQ);
- else if (pending & CAUSEF_IP0)
- do_IRQ(MIPS_SOFTINT0_IRQ);
- else if (pending & CAUSEF_IP1)
- do_IRQ(MIPS_SOFTINT1_IRQ);
- else
- spurious_interrupt();
-}
-
-void __init arch_init_irq(void)
-{
- mips_cpu_irq_init();
-}
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
deleted file mode 100644
index 93cc7e0b30b1..000000000000
--- a/arch/mips/vr41xx/common/pmu.c
+++ /dev/null
@@ -1,123 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pmu.c, Power Management Unit routines for NEC VR4100 series.
- *
- * Copyright (C) 2003-2007 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/cpu.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/pm.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-
-#include <asm/cacheflush.h>
-#include <asm/cpu.h>
-#include <asm/idle.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/reboot.h>
-
-#define PMU_TYPE1_BASE 0x0b0000a0UL
-#define PMU_TYPE1_SIZE 0x0eUL
-
-#define PMU_TYPE2_BASE 0x0f0000c0UL
-#define PMU_TYPE2_SIZE 0x10UL
-
-#define PMUCNT2REG 0x06
- #define SOFTRST 0x0010
-
-static void __iomem *pmu_base;
-
-#define pmu_read(offset) readw(pmu_base + (offset))
-#define pmu_write(offset, value) writew((value), pmu_base + (offset))
-
-static void __cpuidle vr41xx_cpu_wait(void)
-{
- local_irq_disable();
- if (!need_resched())
- /*
- * "standby" sets IE bit of the CP0_STATUS to 1.
- */
- __asm__("standby;\n");
- else
- local_irq_enable();
-}
-
-static inline void software_reset(void)
-{
- uint16_t pmucnt2;
-
- switch (current_cpu_type()) {
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- pmucnt2 = pmu_read(PMUCNT2REG);
- pmucnt2 |= SOFTRST;
- pmu_write(PMUCNT2REG, pmucnt2);
- break;
- default:
- set_c0_status(ST0_BEV | ST0_ERL);
- change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
- __flush_cache_all();
- write_c0_wired(0);
- __asm__("jr %0"::"r"(0xbfc00000));
- break;
- }
-}
-
-static void vr41xx_restart(char *command)
-{
- local_irq_disable();
- software_reset();
- while (1) ;
-}
-
-static void vr41xx_halt(void)
-{
- local_irq_disable();
- printk(KERN_NOTICE "\nYou can turn off the power supply\n");
- __asm__("hibernate;\n");
-}
-
-static int __init vr41xx_pmu_init(void)
-{
- unsigned long start, size;
-
- switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121:
- start = PMU_TYPE1_BASE;
- size = PMU_TYPE1_SIZE;
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- start = PMU_TYPE2_BASE;
- size = PMU_TYPE2_SIZE;
- break;
- default:
- printk("Unexpected CPU of NEC VR4100 series\n");
- return -ENODEV;
- }
-
- if (request_mem_region(start, size, "PMU") == NULL)
- return -EBUSY;
-
- pmu_base = ioremap(start, size);
- if (pmu_base == NULL) {
- release_mem_region(start, size);
- return -EBUSY;
- }
-
- cpu_wait = vr41xx_cpu_wait;
- _machine_restart = vr41xx_restart;
- _machine_halt = vr41xx_halt;
- pm_power_off = vr41xx_halt;
-
- return 0;
-}
-
-core_initcall(vr41xx_pmu_init);
diff --git a/arch/mips/vr41xx/common/rtc.c b/arch/mips/vr41xx/common/rtc.c
deleted file mode 100644
index 5ce668317fe6..000000000000
--- a/arch/mips/vr41xx/common/rtc.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * NEC VR4100 series RTC platform device.
- *
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-
-#include <asm/cpu.h>
-#include <asm/vr41xx/irq.h>
-
-static struct resource rtc_type1_resource[] __initdata = {
- {
- .start = 0x0b0000c0,
- .end = 0x0b0000df,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x0b0001c0,
- .end = 0x0b0001df,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = ELAPSEDTIME_IRQ,
- .end = ELAPSEDTIME_IRQ,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = RTCLONG1_IRQ,
- .end = RTCLONG1_IRQ,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource rtc_type2_resource[] __initdata = {
- {
- .start = 0x0f000100,
- .end = 0x0f00011f,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x0f000120,
- .end = 0x0f00013f,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = ELAPSEDTIME_IRQ,
- .end = ELAPSEDTIME_IRQ,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = RTCLONG1_IRQ,
- .end = RTCLONG1_IRQ,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static int __init vr41xx_rtc_add(void)
-{
- struct platform_device *pdev;
- struct resource *res;
- unsigned int num;
- int retval;
-
- pdev = platform_device_alloc("RTC", -1);
- if (!pdev)
- return -ENOMEM;
-
- switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121:
- res = rtc_type1_resource;
- num = ARRAY_SIZE(rtc_type1_resource);
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- res = rtc_type2_resource;
- num = ARRAY_SIZE(rtc_type2_resource);
- break;
- default:
- retval = -ENODEV;
- goto err_free_device;
- }
-
- retval = platform_device_add_resources(pdev, res, num);
- if (retval)
- goto err_free_device;
-
- retval = platform_device_add(pdev);
- if (retval)
- goto err_free_device;
-
- return 0;
-
-err_free_device:
- platform_device_put(pdev);
-
- return retval;
-}
-device_initcall(vr41xx_rtc_add);
diff --git a/arch/mips/vr41xx/common/siu.c b/arch/mips/vr41xx/common/siu.c
deleted file mode 100644
index b37a791541bd..000000000000
--- a/arch/mips/vr41xx/common/siu.c
+++ /dev/null
@@ -1,142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * NEC VR4100 series SIU platform device.
- *
- * Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/serial_core.h>
-#include <linux/irq.h>
-
-#include <asm/cpu.h>
-#include <asm/vr41xx/siu.h>
-
-static unsigned int siu_type1_ports[SIU_PORTS_MAX] __initdata = {
- PORT_VR41XX_SIU,
- PORT_UNKNOWN,
-};
-
-static struct resource siu_type1_resource[] __initdata = {
- {
- .start = 0x0c000000,
- .end = 0x0c00000a,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = SIU_IRQ,
- .end = SIU_IRQ,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static unsigned int siu_type2_ports[SIU_PORTS_MAX] __initdata = {
- PORT_VR41XX_SIU,
- PORT_VR41XX_DSIU,
-};
-
-static struct resource siu_type2_resource[] __initdata = {
- {
- .start = 0x0f000800,
- .end = 0x0f00080a,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x0f000820,
- .end = 0x0f000829,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = SIU_IRQ,
- .end = SIU_IRQ,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = DSIU_IRQ,
- .end = DSIU_IRQ,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static int __init vr41xx_siu_add(void)
-{
- struct platform_device *pdev;
- struct resource *res;
- unsigned int num;
- int retval;
-
- pdev = platform_device_alloc("SIU", -1);
- if (!pdev)
- return -ENOMEM;
-
- switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121:
- pdev->dev.platform_data = siu_type1_ports;
- res = siu_type1_resource;
- num = ARRAY_SIZE(siu_type1_resource);
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- pdev->dev.platform_data = siu_type2_ports;
- res = siu_type2_resource;
- num = ARRAY_SIZE(siu_type2_resource);
- break;
- default:
- retval = -ENODEV;
- goto err_free_device;
- }
-
- retval = platform_device_add_resources(pdev, res, num);
- if (retval)
- goto err_free_device;
-
- retval = platform_device_add(pdev);
- if (retval)
- goto err_free_device;
-
- return 0;
-
-err_free_device:
- platform_device_put(pdev);
-
- return retval;
-}
-device_initcall(vr41xx_siu_add);
-
-void __init vr41xx_siu_setup(void)
-{
- struct uart_port port;
- struct resource *res;
- unsigned int *type;
- int i;
-
- switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121:
- type = siu_type1_ports;
- res = siu_type1_resource;
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- type = siu_type2_ports;
- res = siu_type2_resource;
- break;
- default:
- return;
- }
-
- for (i = 0; i < SIU_PORTS_MAX; i++) {
- port.line = i;
- port.type = type[i];
- if (port.type == PORT_UNKNOWN)
- break;
- port.mapbase = res[i].start;
- port.membase = (unsigned char __iomem *)KSEG1ADDR(res[i].start);
- vr41xx_siu_early_setup(&port);
- }
-}
diff --git a/arch/mips/vr41xx/common/type.c b/arch/mips/vr41xx/common/type.c
deleted file mode 100644
index dddcf1eaa912..000000000000
--- a/arch/mips/vr41xx/common/type.c
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * type.c, System type for NEC VR4100 series.
- *
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-
-const char *get_system_type(void)
-{
- return "NEC VR4100 series";
-}
diff --git a/arch/mips/vr41xx/ibm-workpad/Makefile b/arch/mips/vr41xx/ibm-workpad/Makefile
deleted file mode 100644
index c7be704e7b81..000000000000
--- a/arch/mips/vr41xx/ibm-workpad/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the IBM WorkPad z50 specific parts of the kernel
-#
-
-obj-y += setup.o
diff --git a/arch/mips/vr41xx/ibm-workpad/setup.c b/arch/mips/vr41xx/ibm-workpad/setup.c
deleted file mode 100644
index 7e14d65c5d2d..000000000000
--- a/arch/mips/vr41xx/ibm-workpad/setup.c
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * setup.c, Setup for the IBM WorkPad z50.
- *
- * Copyright (C) 2002-2006 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/init.h>
-#include <linux/ioport.h>
-
-#include <asm/io.h>
-
-#define WORKPAD_ISA_IO_BASE 0x15000000
-#define WORKPAD_ISA_IO_SIZE 0x03000000
-#define WORKPAD_ISA_IO_START 0
-#define WORKPAD_ISA_IO_END (WORKPAD_ISA_IO_SIZE - 1)
-#define WORKPAD_IO_PORT_BASE KSEG1ADDR(WORKPAD_ISA_IO_BASE)
-
-static int __init ibm_workpad_setup(void)
-{
- set_io_port_base(WORKPAD_IO_PORT_BASE);
- ioport_resource.start = WORKPAD_ISA_IO_START;
- ioport_resource.end = WORKPAD_ISA_IO_END;
-
- return 0;
-}
-
-arch_initcall(ibm_workpad_setup);
diff --git a/arch/nios2/include/asm/entry.h b/arch/nios2/include/asm/entry.h
index cf37f55efbc2..bafb7b2ca59f 100644
--- a/arch/nios2/include/asm/entry.h
+++ b/arch/nios2/include/asm/entry.h
@@ -50,7 +50,8 @@
stw r13, PT_R13(sp)
stw r14, PT_R14(sp)
stw r15, PT_R15(sp)
- stw r2, PT_ORIG_R2(sp)
+ movi r24, -1
+ stw r24, PT_ORIG_R2(sp)
stw r7, PT_ORIG_R7(sp)
stw ra, PT_RA(sp)
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 262d0609268c..b3d45e815295 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -40,24 +40,8 @@ struct mm_struct;
*/
/* Remove W bit on private pages for COW support */
-#define __P000 MKP(0, 0, 0)
-#define __P001 MKP(0, 0, 1)
-#define __P010 MKP(0, 0, 0) /* COW */
-#define __P011 MKP(0, 0, 1) /* COW */
-#define __P100 MKP(1, 0, 0)
-#define __P101 MKP(1, 0, 1)
-#define __P110 MKP(1, 0, 0) /* COW */
-#define __P111 MKP(1, 0, 1) /* COW */
/* Shared pages can have exact HW mapping */
-#define __S000 MKP(0, 0, 0)
-#define __S001 MKP(0, 0, 1)
-#define __S010 MKP(0, 1, 0)
-#define __S011 MKP(0, 1, 1)
-#define __S100 MKP(1, 0, 0)
-#define __S101 MKP(1, 0, 1)
-#define __S110 MKP(1, 1, 0)
-#define __S111 MKP(1, 1, 1)
/* Used all over the kernel */
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
@@ -68,11 +52,8 @@ struct mm_struct;
#define PAGE_COPY MKP(0, 0, 1)
-#define PGD_ORDER 0
-#define PTE_ORDER 0
-
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
-#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
#define USER_PTRS_PER_PGD \
(CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE)
diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h
index 642462144872..9da34c3022a2 100644
--- a/arch/nios2/include/asm/ptrace.h
+++ b/arch/nios2/include/asm/ptrace.h
@@ -74,6 +74,8 @@ extern void show_regs(struct pt_regs *);
((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE)\
- 1)
+#define force_successful_syscall_return() (current_pt_regs()->orig_r2 = -1)
+
int do_syscall_trace_enter(void);
void do_syscall_trace_exit(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
index 0794cd7803df..99f0a65e6234 100644
--- a/arch/nios2/kernel/entry.S
+++ b/arch/nios2/kernel/entry.S
@@ -185,6 +185,7 @@ ENTRY(handle_system_call)
ldw r5, PT_R5(sp)
local_restart:
+ stw r2, PT_ORIG_R2(sp)
/* Check that the requested system call is within limits */
movui r1, __NR_syscalls
bgeu r2, r1, ret_invsyscall
@@ -192,7 +193,6 @@ local_restart:
movhi r11, %hiadj(sys_call_table)
add r1, r1, r11
ldw r1, %lo(sys_call_table)(r1)
- beq r1, r0, ret_invsyscall
/* Check if we are being traced */
GET_THREAD_INFO r11
@@ -213,6 +213,9 @@ local_restart:
translate_rc_and_ret:
movi r1, 0
bge r2, zero, 3f
+ ldw r1, PT_ORIG_R2(sp)
+ addi r1, r1, 1
+ beq r1, zero, 3f
sub r2, zero, r2
movi r1, 1
3:
@@ -255,9 +258,9 @@ traced_system_call:
ldw r6, PT_R6(sp)
ldw r7, PT_R7(sp)
- /* Fetch the syscall function, we don't need to check the boundaries
- * since this is already done.
- */
+ /* Fetch the syscall function. */
+ movui r1, __NR_syscalls
+ bgeu r2, r1, traced_invsyscall
slli r1, r2, 2
movhi r11,%hiadj(sys_call_table)
add r1, r1, r11
@@ -276,6 +279,9 @@ traced_system_call:
translate_rc_and_ret2:
movi r1, 0
bge r2, zero, 4f
+ ldw r1, PT_ORIG_R2(sp)
+ addi r1, r1, 1
+ beq r1, zero, 4f
sub r2, zero, r2
movi r1, 1
4:
@@ -287,6 +293,11 @@ end_translate_rc_and_ret2:
RESTORE_SWITCH_STACK
br ret_from_exception
+ /* If the syscall number was invalid return ENOSYS */
+traced_invsyscall:
+ movi r2, -ENOSYS
+ br translate_rc_and_ret2
+
Luser_return:
GET_THREAD_INFO r11 /* get thread_info pointer */
ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
@@ -336,9 +347,6 @@ external_interrupt:
/* skip if no interrupt is pending */
beq r12, r0, ret_from_interrupt
- movi r24, -1
- stw r24, PT_ORIG_R2(sp)
-
/*
* Process an external hardware interrupt.
*/
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index cb0b91589cf2..a5b93a30c6eb 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -242,7 +242,7 @@ static int do_signal(struct pt_regs *regs)
/*
* If we were from a system call, check for system call restarting...
*/
- if (regs->orig_r2 >= 0) {
+ if (regs->orig_r2 >= 0 && regs->r1) {
continue_addr = regs->ea;
restart_addr = continue_addr - 4;
retval = regs->r2;
@@ -264,6 +264,7 @@ static int do_signal(struct pt_regs *regs)
regs->ea = restart_addr;
break;
}
+ regs->orig_r2 = -1;
}
if (get_signal(&ksig)) {
diff --git a/arch/nios2/kernel/syscall_table.c b/arch/nios2/kernel/syscall_table.c
index 6176d63023c1..c2875a6dd5a4 100644
--- a/arch/nios2/kernel/syscall_table.c
+++ b/arch/nios2/kernel/syscall_table.c
@@ -13,5 +13,6 @@
#define __SYSCALL(nr, call) [nr] = (call),
void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls-1] = sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index a32f14cd72f2..edaca0a6c1c1 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -139,6 +139,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 613fcaa5988a..7bc82ee889c9 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -78,9 +78,8 @@ void __init mmu_init(void)
flush_tlb_all();
}
-#define __page_aligned(order) __aligned(PAGE_SIZE << (order))
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
-pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
+pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE);
static struct page *kuser_page[1];
static int alloc_kuser_page(void)
@@ -124,3 +123,23 @@ const char *arch_vma_name(struct vm_area_struct *vma)
{
return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = MKP(0, 0, 0),
+ [VM_READ] = MKP(0, 0, 1),
+ [VM_WRITE] = MKP(0, 0, 0),
+ [VM_WRITE | VM_READ] = MKP(0, 0, 1),
+ [VM_EXEC] = MKP(1, 0, 0),
+ [VM_EXEC | VM_READ] = MKP(1, 0, 1),
+ [VM_EXEC | VM_WRITE] = MKP(1, 0, 0),
+ [VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 0, 1),
+ [VM_SHARED] = MKP(0, 0, 0),
+ [VM_SHARED | VM_READ] = MKP(0, 0, 1),
+ [VM_SHARED | VM_WRITE] = MKP(0, 1, 0),
+ [VM_SHARED | VM_WRITE | VM_READ] = MKP(0, 1, 1),
+ [VM_SHARED | VM_EXEC] = MKP(1, 0, 0),
+ [VM_SHARED | VM_EXEC | VM_READ] = MKP(1, 0, 1),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = MKP(1, 1, 0),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 1, 1)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c
index 9b587fd592dd..7c76e8a7447a 100644
--- a/arch/nios2/mm/pgtable.c
+++ b/arch/nios2/mm/pgtable.c
@@ -54,7 +54,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
- ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *) __get_free_page(GFP_KERNEL);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init(ret);
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index e814df4c483c..c7f282f60f64 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -20,8 +20,9 @@ config OPENRISC
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
- select GENERIC_IOMAP
+ select GENERIC_PCI_IOMAP
select GENERIC_CPU_DEVICES
+ select HAVE_PCI
select HAVE_UID16
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS_BROADCAST
@@ -32,6 +33,8 @@ config OPENRISC
select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
select ARCH_USE_QUEUED_RWLOCKS
select OMPIC if SMP
+ select PCI_DOMAINS_GENERIC if PCI
+ select PCI_MSI if PCI
select ARCH_WANT_FRAME_POINTERS
select GENERIC_IRQ_MULTI_HANDLER
select MMU_GATHER_NO_RANGE if MMU
diff --git a/arch/openrisc/configs/virt_defconfig b/arch/openrisc/configs/virt_defconfig
new file mode 100644
index 000000000000..c1b69166c500
--- /dev/null
+++ b/arch/openrisc/configs/virt_defconfig
@@ -0,0 +1,108 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_OPENRISC_HAVE_INST_CMOV=y
+CONFIG_OPENRISC_HAVE_INST_ROR=y
+CONFIG_OPENRISC_HAVE_INST_RORI=y
+CONFIG_OPENRISC_HAVE_INST_SEXT=y
+CONFIG_NR_CPUS=8
+CONFIG_SMP=y
+CONFIG_HZ_100=y
+# CONFIG_OPENRISC_NO_SPR_SR_DSX is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_CUBIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_ETHOC=y
+CONFIG_MICREL_PHY=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
+CONFIG_SYSCON_REBOOT_MODE=y
+# CONFIG_HWMON is not set
+CONFIG_DRM=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_INTEL8X0=y
+CONFIG_SND_INTEL8X0M=y
+CONFIG_SND_SOC=y
+CONFIG_SND_VIRTIO=y
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_ITE is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_REDRAGON is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_GADGET=y
+CONFIG_TYPEC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GOLDFISH=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VIRTIO_PCI=y
+# CONFIG_VIRTIO_PCI_LEGACY is not set
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+# CONFIG_DNOTIFY is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_EXFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_UNICODE=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_FTRACE=y
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 3386b9c1c073..c8c99b554ca4 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += extable.h
generic-y += kvm_para.h
+generic-y += parport.h
generic-y += spinlock_types.h
generic-y += spinlock.h
generic-y += qrwlock_types.h
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
index c298061c70a7..ee6043a03173 100644
--- a/arch/openrisc/include/asm/io.h
+++ b/arch/openrisc/include/asm/io.h
@@ -17,7 +17,7 @@
#include <linux/types.h>
/*
- * PCI: can we really do 0 here if we have no port IO?
+ * PCI: We do not use IO ports in OpenRISC
*/
#define IO_SPACE_LIMIT 0
@@ -31,7 +31,7 @@
void __iomem *ioremap(phys_addr_t offset, unsigned long size);
#define iounmap iounmap
-extern void iounmap(void __iomem *addr);
+extern void iounmap(volatile void __iomem *addr);
#include <asm-generic/io.h>
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index c3abbf71e09f..dcae8aea132f 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -176,24 +176,6 @@ extern void paging_init(void);
__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
| _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY_X
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY_X
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY_X
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED_X
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED_X
-
/* zero page used for uninitialized stuff */
extern unsigned long empty_zero_page[2048];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
index 27041db2c8b0..e1419095a6f0 100644
--- a/arch/openrisc/kernel/smp.c
+++ b/arch/openrisc/kernel/smp.c
@@ -197,12 +197,6 @@ void smp_send_stop(void)
smp_call_function(stop_this_cpu, NULL, 0);
}
-/* not supported, yet */
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
smp_cross_call = fn;
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 53b760af3bb7..b4762d66e9ef 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -165,6 +165,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 3a021ab6f1ae..d531ab82be12 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -208,3 +208,23 @@ void __init mem_init(void)
mem_init_done = 1;
return;
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY_X,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index daae13a76743..8ec0dafecf25 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -77,7 +77,7 @@ void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
}
EXPORT_SYMBOL(ioremap);
-void iounmap(void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
{
/* If the page is from the fixmap pool then we just clear out
* the fixmap mapping.
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index fa400055b2d5..9aede2447011 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -44,7 +44,6 @@ config PARISC
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC
- select VIRT_TO_BUS
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
select TTY # Needed for pdc_cons.c
@@ -147,10 +146,10 @@ menu "Processor type and features"
choice
prompt "Processor type"
- default PA7000
+ default PA7000 if "$(ARCH)" = "parisc"
config PA7000
- bool "PA7000/PA7100"
+ bool "PA7000/PA7100" if "$(ARCH)" = "parisc"
help
This is the processor type of your CPU. This information is
used for optimizing purposes. In order to compile a kernel
@@ -161,21 +160,21 @@ config PA7000
which is required on some machines.
config PA7100LC
- bool "PA7100LC"
+ bool "PA7100LC" if "$(ARCH)" = "parisc"
help
Select this option for the PCX-L processor, as used in the
712, 715/64, 715/80, 715/100, 715/100XC, 725/100, 743, 748,
D200, D210, D300, D310 and E-class
config PA7200
- bool "PA7200"
+ bool "PA7200" if "$(ARCH)" = "parisc"
help
Select this option for the PCX-T' processor, as used in the
C100, C110, J100, J110, J210XC, D250, D260, D350, D360,
K100, K200, K210, K220, K400, K410 and K420
config PA7300LC
- bool "PA7300LC"
+ bool "PA7300LC" if "$(ARCH)" = "parisc"
help
Select this option for the PCX-L2 processor, as used in the
744, A180, B132L, B160L, B180L, C132L, C160L, C180L,
@@ -225,17 +224,8 @@ config MLONGCALLS
Enabling this option will probably slow down your kernel.
config 64BIT
- bool "64-bit kernel"
+ def_bool "$(ARCH)" = "parisc64"
depends on PA8X00
- help
- Enable this if you want to support 64bit kernel on PA-RISC platform.
-
- At the moment, only people willing to use more than 2GB of RAM,
- or having a 64bit-only capable PA-RISC machine should say Y here.
-
- Since there is no 64bit userland on PA-RISC, there is no point to
- enable this option otherwise. The 64bit kernel is significantly bigger
- and slower than the 32bit one.
choice
prompt "Kernel page size"
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 56ffd260c669..0ec9cfc5131f 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -12,14 +12,6 @@
#include <asm/barrier.h>
#include <linux/atomic.h>
-/* compiler build environment sanity checks: */
-#if !defined(CONFIG_64BIT) && defined(__LP64__)
-#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
-#endif
-#if defined(CONFIG_64BIT) && !defined(__LP64__)
-#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
-#endif
-
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __*_bit() (set/clear/change):
* *_bit() want use of volatile.
diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h
index eea80ed34e6d..9e8c101de902 100644
--- a/arch/parisc/include/asm/dma.h
+++ b/arch/parisc/include/asm/dma.h
@@ -176,10 +176,4 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
#define free_dma(dmanr)
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* _ASM_DMA_H */
diff --git a/arch/parisc/include/asm/floppy.h b/arch/parisc/include/asm/floppy.h
index 762cfe7778c0..b318a7df52f6 100644
--- a/arch/parisc/include/asm/floppy.h
+++ b/arch/parisc/include/asm/floppy.h
@@ -179,7 +179,7 @@ static void _fd_chose_dma_mode(char *addr, unsigned long size)
{
if(can_use_virtual_dma == 2) {
if((unsigned int) addr >= (unsigned int) high_memory ||
- virt_to_bus(addr) >= 0x1000000 ||
+ virt_to_phys(addr) >= 0x1000000 ||
_CROSS_64KB(addr, size, 0))
use_virtual_dma = 1;
else
@@ -215,7 +215,7 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
doing_pdma = 0;
clear_dma_ff(FLOPPY_DMA);
set_dma_mode(FLOPPY_DMA,mode);
- set_dma_addr(FLOPPY_DMA,virt_to_bus(addr));
+ set_dma_addr(FLOPPY_DMA,virt_to_phys(addr));
set_dma_count(FLOPPY_DMA,size);
enable_dma(FLOPPY_DMA);
return 0;
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 837ddddbac6a..42ffb60a6ea9 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -7,8 +7,6 @@
#define virt_to_phys(a) ((unsigned long)__pa(a))
#define phys_to_virt(a) __va(a)
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
static inline unsigned long isa_bus_to_virt(unsigned long addr) {
BUG();
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h
index f14465b84de4..127ed5021ae3 100644
--- a/arch/parisc/include/asm/pci.h
+++ b/arch/parisc/include/asm/pci.h
@@ -162,11 +162,6 @@ extern void pcibios_init_bridge(struct pci_dev *);
#define PCIBIOS_MIN_IO 0x10
#define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? 15 : 14;
-}
-
#define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 54b63374579b..e3e142b1c5c5 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -20,18 +20,18 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
- pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
if (unlikely(pgd == NULL))
return NULL;
- memset(pgd, 0, PAGE_SIZE << PGD_ORDER);
+ memset(pgd, 0, PAGE_SIZE << PGD_TABLE_ORDER);
return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_pages((unsigned long)pgd, PGD_ORDER);
+ free_pages((unsigned long)pgd, PGD_TABLE_ORDER);
}
#if CONFIG_PGTABLE_LEVELS == 3
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 69765a6dbe89..df7b931865d2 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -118,9 +118,9 @@ extern void __update_cache(pte_t pte);
#if CONFIG_PGTABLE_LEVELS == 3
#define PMD_TABLE_ORDER 1
-#define PGD_ORDER 0
+#define PGD_TABLE_ORDER 0
#else
-#define PGD_ORDER 1
+#define PGD_TABLE_ORDER 1
#endif
/* Definitions for 3rd level (we use PLD here for Page Lower directory
@@ -144,10 +144,10 @@ extern void __update_cache(pte_t pte);
/* Definitions for 1st level */
#define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
-#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
+#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
#else
-#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
+#define BITS_PER_PGD (PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY)
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -271,24 +271,6 @@ extern void __update_cache(pte_t pte);
*/
/*xwr*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 __P000 /* copy on write */
-#define __P011 __P001 /* copy on write */
-#define __P100 PAGE_EXECREAD
-#define __P101 PAGE_EXECREAD
-#define __P110 __P100 /* copy on write */
-#define __P111 __P101 /* copy on write */
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_WRITEONLY
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECREAD
-#define __S101 PAGE_EXECREAD
-#define __S110 PAGE_RWX
-#define __S111 PAGE_RWX
-
extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 993999a65e54..3feb7694e0ca 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -50,9 +50,6 @@ void flush_instruction_cache_local(void); /* flushes local code-cache only */
*/
DEFINE_SPINLOCK(pa_tlb_flush_lock);
-/* Swapper page setup lock. */
-DEFINE_SPINLOCK(pa_swapper_pg_lock);
-
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
int pa_serialize_tlb_flushes __ro_after_init;
#endif
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 776d624a7207..d126e78e101a 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -520,7 +520,6 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
dev->id.hversion_rev = iodc_data[1] & 0x0f;
dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) |
(iodc_data[5] << 8) | iodc_data[6];
- dev->hpa.name = parisc_pathname(dev);
dev->hpa.start = hpa;
/* This is awkward. The STI spec says that gfx devices may occupy
* 32MB or 64MB. Unfortunately, we don't know how to tell whether
@@ -534,10 +533,10 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
dev->hpa.end = hpa + 0xfff;
}
dev->hpa.flags = IORESOURCE_MEM;
- name = parisc_hardware_description(&dev->id);
- if (name) {
- strlcpy(dev->name, name, sizeof(dev->name));
- }
+ dev->hpa.name = dev->name;
+ name = parisc_hardware_description(&dev->id) ? : "unknown";
+ snprintf(dev->name, sizeof(dev->name), "%s [%s]",
+ name, parisc_pathname(dev));
/* Silently fail things like mouse ports which are subsumed within
* the keyboard controller
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 17161e72ea29..357d9cdab7ce 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -23,9 +23,6 @@
* HP PARISC Hardware Database
* Access to this database is only possible during bootup
* so don't reference this table after starting the init process
- *
- * NOTE: Product names which are listed here and ends with a '?'
- * are guessed. If you know the correct name, please let us know.
*/
static struct hp_hardware hp_hardware_list[] __initdata = {
@@ -212,7 +209,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
{HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
{HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
{HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
- {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)?"},
+ {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)"},
{HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
{HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
{HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
@@ -266,11 +263,11 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
{HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
{HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
{HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"},
- {HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast?"},
+ {HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast"},
{HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"},
{HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"},
{HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"},
- {HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+?"},
+ {HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+"},
{HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"},
{HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"},
{HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"},
@@ -1198,7 +1195,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
{HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
- {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"},
+ {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast Core RS-232"},
{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index e0a9e9657622..fd15fd4bbb61 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -22,7 +22,7 @@
#include <linux/init.h>
#include <linux/pgtable.h>
- .level PA_ASM_LEVEL
+ .level 1.1
__INITDATA
ENTRY(boot_args)
@@ -70,6 +70,47 @@ $bss_loop:
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
+#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
+ /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
+ * and halt kernel if we detect a PA1.x CPU. */
+ ldi 32,%r10
+ mtctl %r10,%cr11
+ .level 2.0
+ mfctl,w %cr11,%r10
+ .level 1.1
+ comib,<>,n 0,%r10,$cpu_ok
+
+ load32 PA(msg1),%arg0
+ ldi msg1_end-msg1,%arg1
+$iodc_panic:
+ copy %arg0, %r10
+ copy %arg1, %r11
+ load32 PA(init_stack),%sp
+#define MEM_CONS 0x3A0
+ ldw MEM_CONS+32(%r0),%arg0 // HPA
+ ldi ENTRY_IO_COUT,%arg1
+ ldw MEM_CONS+36(%r0),%arg2 // SPA
+ ldw MEM_CONS+8(%r0),%arg3 // layers
+ load32 PA(__bss_start),%r1
+ stw %r1,-52(%sp) // arg4
+ stw %r0,-56(%sp) // arg5
+ stw %r10,-60(%sp) // arg6 = ptr to text
+ stw %r11,-64(%sp) // arg7 = len
+ stw %r0,-68(%sp) // arg8
+ load32 PA(.iodc_panic_ret), %rp
+ ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC
+ bv,n (%r1)
+.iodc_panic_ret:
+ b . /* wait endless with ... */
+ or %r10,%r10,%r10 /* qemu idle sleep */
+msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
+msg1_end:
+
+$cpu_ok:
+#endif
+
+ .level PA_ASM_LEVEL
+
/* Initialize startup VM. Just map first 16/32 MB of memory */
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 5ebb1771b4ab..fbb882cb8dbb 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -480,10 +480,12 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
*irq_stack_in_use = 1;
}
+#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void)
{
execute_on_irq_stack(__do_softirq, 0);
}
+#endif
#endif /* CONFIG_IRQSTACKS */
/* ONLY called from entry.S:intr_extint() */
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 160996f2198e..ba87f791323b 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -36,8 +36,8 @@
#include <asm/tlbflush.h> /* for purge_tlb_*() macros */
static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
-static unsigned long pcxl_used_bytes __read_mostly = 0;
-static unsigned long pcxl_used_pages __read_mostly = 0;
+static unsigned long pcxl_used_bytes __read_mostly;
+static unsigned long pcxl_used_pages __read_mostly;
extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
static DEFINE_SPINLOCK(pcxl_res_lock);
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 24d0744c3b3a..7dbd92cafae3 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -513,10 +513,3 @@ void __cpu_die(unsigned int cpu)
pdc_cpu_rendezvous_unlock();
}
-
-#ifdef CONFIG_PROC_FS
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-#endif
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 68b46fe2f17c..8a99c998da9b 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -413,7 +413,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index bac581b5ecfc..e8a4d77cff53 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -93,7 +93,7 @@
#define R1(i) (((i)>>21)&0x1f)
#define R2(i) (((i)>>16)&0x1f)
#define R3(i) ((i)&0x1f)
-#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
+#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
#define IM5_2(i) IM((i)>>16,5)
#define IM5_3(i) IM((i),5)
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 84bc437be5cd..869204e97ec9 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -38,7 +38,7 @@ int show_unhandled_signals = 1;
/*
* parisc_acctyp(unsigned int inst) --
* Given a PA-RISC memory access instruction, determine if the
- * the instruction would perform a memory read or memory write
+ * instruction would perform a memory read or memory write
* operation.
*
* This function assumes that the given instruction is a memory access
@@ -311,6 +311,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We hit a shared mapping outside of the file, or some
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 0a81499dd35e..b0c43f3b0a5f 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -722,7 +722,7 @@ static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
static unsigned long dirty_space_id[SID_ARRAY_SIZE];
static unsigned long space_id_index;
static unsigned long free_space_ids = NR_SPACE_IDS - 1;
-static unsigned long dirty_space_ids = 0;
+static unsigned long dirty_space_ids;
static DEFINE_SPINLOCK(sid_lock);
@@ -871,3 +871,23 @@ void flush_tlb_all(void)
spin_unlock(&sid_lock);
}
#endif
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_NONE,
+ [VM_WRITE | VM_READ] = PAGE_READONLY,
+ [VM_EXEC] = PAGE_EXECREAD,
+ [VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_EXEC | VM_WRITE] = PAGE_EXECREAD,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_EXECREAD,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a4f8a5276e5c..4c466acdc70d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -11,7 +11,7 @@ config 64BIT
config LIVEPATCH_64
def_bool PPC64
- depends on LIVEPATCH
+ depends on LIVEPATCH
config MMU
bool
@@ -140,7 +140,6 @@ config PPC
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_HAS_VM_GET_PAGE_PROT if PPC_BOOK3S_64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -193,8 +192,10 @@ config PPC
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN if PPC_RADIX_MMU
+ select HAVE_ARCH_KASAN if PPC_BOOK3E_64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
@@ -254,6 +255,7 @@ config PPC
select IOMMU_HELPER if PPC64
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
+ select KASAN_VMALLOC if KASAN && MODULES
select MMU_GATHER_PAGE_SIZE
select MMU_GATHER_RCU_TABLE_FREE
select MMU_GATHER_MERGE_VMAS
@@ -277,7 +279,6 @@ config PPC
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT
- select VIRT_TO_BUS if !PPC64
#
# Please keep this list sorted alphabetically.
#
@@ -378,6 +379,17 @@ config PPC_DCR
depends on PPC_DCR_NATIVE || PPC_DCR_MMIO
default y
+config PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
+ depends on PPC32
+ depends on !PPC_PMAC && !PPC_CHRP
+ bool "Assign PCI bus numbers from zero individually for each PCI domain"
+ help
+ By default on PPC32 were PCI bus numbers unique across all PCI domains.
+ So system could have only 256 PCI buses independently of available
+ PCI domains. When this option is enabled then PCI bus numbers are
+ PCI domain dependent and each PCI controller on own domain can have
+ 256 PCI buses, like it is on other Linux architectures.
+
config PPC_OF_PLATFORM_PCI
bool
depends on PCI
@@ -454,7 +466,7 @@ choice
default MATH_EMULATION_FULL
depends on MATH_EMULATION
-config MATH_EMULATION_FULL
+config MATH_EMULATION_FULL
bool "Emulate all the floating point instructions"
help
Select this option will enable the kernel to support to emulate
@@ -556,7 +568,6 @@ config KEXEC_FILE
bool "kexec file based system call"
select KEXEC_CORE
select HAVE_IMA_KEXEC if IMA
- select BUILD_BIN2C
select KEXEC_ELF
depends on PPC64
depends on CRYPTO=y
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 9f363c143d86..ae727d4218b9 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -305,7 +305,6 @@ config PPC_EARLY_DEBUG_OPAL
def_bool y
depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
-
config PPC_EARLY_DEBUG_HVSI_VTERMNO
hex "vterm number to use with early debug HVSI"
depends on PPC_EARLY_DEBUG_LPAR_HVSI
@@ -375,4 +374,5 @@ config KASAN_SHADOW_OFFSET
hex
depends on KASAN
default 0xe0000000 if PPC32
- default 0xa80e000000000000 if PPC64
+ default 0xa80e000000000000 if PPC_BOOK3S_64
+ default 0xa8001c0000000000 if PPC_BOOK3E_64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index a0cd70712061..02742facf895 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -15,23 +15,6 @@ HAS_BIARCH := $(call cc-option-yn, -m32)
# Set default 32 bits cross compilers for vdso and boot wrapper
CROSS32_COMPILE ?=
-ifeq ($(HAS_BIARCH),y)
-ifeq ($(CROSS32_COMPILE),)
-ifdef CONFIG_PPC32
-# These options will be overridden by any -mcpu option that the CPU
-# or platform code sets later on the command line, but they are needed
-# to set a sane 32-bit cpu target for the 64-bit cross compiler which
-# may default to the wrong ISA.
-KBUILD_CFLAGS += -mcpu=powerpc
-KBUILD_AFLAGS += -mcpu=powerpc
-endif
-endif
-endif
-
-ifdef CONFIG_PPC_BOOK3S_32
-KBUILD_CFLAGS += -mcpu=powerpc
-endif
-
# If we're on a ppc/ppc64/ppc64le machine use that defconfig, otherwise just use
# ppc64_defconfig because we have nothing better to go on.
uname := $(shell uname -m)
@@ -183,24 +166,11 @@ endif
endif
CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
+AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
-# Altivec option not allowed with e500mc64 in GCC.
-ifdef CONFIG_ALTIVEC
-E5500_CPU := -mcpu=powerpc64
-else
-E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
-endif
-CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU)
+CFLAGS-$(CONFIG_E5500_CPU) += $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
-ifdef CONFIG_PPC32
-ifdef CONFIG_PPC_E500MC
-CFLAGS-y += $(call cc-option,-mcpu=e500mc,-mcpu=powerpc)
-else
-CFLAGS-$(CONFIG_E500) += $(call cc-option,-mcpu=8540 -msoft-float,-mcpu=powerpc)
-endif
-endif
-
asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
KBUILD_CPPFLAGS += -I $(srctree)/arch/$(ARCH) $(asinstr)
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
index 7a590c92fe56..81b9ab2119be 100644
--- a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
@@ -48,6 +48,7 @@
bus-range = <0 255>;
clock-frequency = <33333333>;
interrupts = <26 2 0 0>;
+ law_trgt_if = <2>;
pcie@0 {
reg = <0 0 0 0 0>;
@@ -76,6 +77,7 @@
bus-range = <0 255>;
clock-frequency = <33333333>;
interrupts = <25 2 0 0>;
+ law_trgt_if = <1>;
pcie@0 {
reg = <0 0 0 0 0>;
@@ -105,6 +107,7 @@
bus-range = <0 255>;
clock-frequency = <33333333>;
interrupts = <24 2 0 0>;
+ law_trgt_if = <0>;
pcie@0 {
reg = <0 0 0 0 0>;
diff --git a/arch/powerpc/boot/dts/turris1x.dts b/arch/powerpc/boot/dts/turris1x.dts
new file mode 100644
index 000000000000..12e08271e61f
--- /dev/null
+++ b/arch/powerpc/boot/dts/turris1x.dts
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Turris 1.x Device Tree Source
+ *
+ * Copyright 2013 - 2022 CZ.NIC z.s.p.o. (http://www.nic.cz/)
+ *
+ * Pinout, Schematics and Altium hardware design files are open source
+ * and available at: https://docs.turris.cz/hw/turris-1x/turris-1x/
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+/include/ "fsl/p2020si-pre.dtsi"
+
+/ {
+ model = "Turris 1.x";
+ compatible = "cznic,turris1x", "fsl,P2020RDB-PC"; /* fsl,P2020RDB-PC is required for booting Linux */
+
+ aliases {
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ serial0 = &serial0;
+ serial1 = &serial1;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ spi0 = &spi0;
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x00100000>;
+
+ i2c@3000 {
+ /* PCA9557PW GPIO controller for boot config */
+ gpio-controller@18 {
+ compatible = "nxp,pca9557";
+ label = "bootcfg";
+ reg = <0x18>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ polarity = <0x00>;
+ };
+
+ /* STM32F030R8T6 MCU for power control */
+ power-control@2a {
+ /*
+ * Turris Power Control firmware runs on STM32F0 MCU.
+ * This firmware is open source and available at:
+ * https://gitlab.nic.cz/turris/hw/turris_power_control
+ */
+ reg = <0x2a>;
+ };
+
+ /* DDR3 SPD/EEPROM PSWP instruction */
+ eeprom@32 {
+ reg = <0x32>;
+ };
+
+ /* SA56004ED temperature control */
+ temperature-sensor@4c {
+ compatible = "nxp,sa56004";
+ reg = <0x4c>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>, /* GPIO12 - ALERT pin */
+ <13 IRQ_TYPE_LEVEL_LOW>; /* GPIO13 - CRIT pin */
+ };
+
+ /* DDR3 SPD/EEPROM */
+ eeprom@52 {
+ compatible = "atmel,spd";
+ reg = <0x52>;
+ };
+
+ /* MCP79402-I/ST Protected EEPROM */
+ eeprom@57 {
+ reg = <0x57>;
+ };
+
+ /* ATSHA204-TH-DA-T crypto module */
+ crypto@64 {
+ compatible = "atmel,atsha204";
+ reg = <0x64>;
+ };
+
+ /* IDT6V49205BNLGI clock generator */
+ clock-generator@69 {
+ compatible = "idt,6v49205b";
+ reg = <0x69>;
+ };
+
+ /* MCP79402-I/ST RTC */
+ rtc@6f {
+ compatible = "microchip,mcp7940x";
+ reg = <0x6f>;
+ interrupt-parent = <&gpio>;
+ interrupts = <14 0>; /* GPIO14 - MFP pin */
+ };
+ };
+
+ /* SPI on connector P1 */
+ spi0: spi@7000 {
+ };
+
+ gpio: gpio-controller@fc00 {
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ /* Connected to SMSC USB2412-DZK 2-Port USB 2.0 Hub Controller */
+ usb@22000 {
+ phy_type = "ulpi";
+ dr_mode = "host";
+ };
+
+ enet0: ethernet@24000 {
+ /* Connected to port 6 of QCA8337N-AL3C switch */
+ phy-connection-type = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ mdio@24520 {
+ /* KSZ9031RNXCA ethernet phy for WAN port */
+ phy: ethernet-phy@7 {
+ interrupts = <3 1 0 0>;
+ reg = <0x7>;
+ };
+
+ /* QCA8337N-AL3C switch with integrated ethernet PHYs for LAN ports */
+ switch@10 {
+ compatible = "qca,qca8337";
+ interrupts = <2 1 0 0>;
+ reg = <0x10>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "cpu1";
+ ethernet = <&enet1>;
+ phy-mode = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan5";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan4";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan2";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "lan1";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu0";
+ ethernet = <&enet0>;
+ phy-mode = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
+
+ ptp_clock@24e00 {
+ fsl,tclk-period = <5>;
+ fsl,tmr-prsc = <200>;
+ fsl,tmr-add = <0xcccccccd>;
+ fsl,tmr-fiper1 = <0x3b9ac9fb>;
+ fsl,tmr-fiper2 = <0x0001869b>;
+ fsl,max-adj = <249999999>;
+ };
+
+ enet1: ethernet@25000 {
+ /* Connected to port 0 of QCA8337N-AL3C switch */
+ phy-connection-type = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ mdio@25520 {
+ status = "disabled";
+ };
+
+ enet2: ethernet@26000 {
+ /* Connected to KSZ9031RNXCA ethernet phy (WAN port) */
+ label = "wan";
+ phy-handle = <&phy>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@26520 {
+ status = "disabled";
+ };
+
+ sdhc@2e000 {
+ bus-width = <4>;
+ cd-gpios = <&gpio 8 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+
+ ranges = <0x0 0x0 0x0 0xef000000 0x01000000>, /* NOR */
+ <0x1 0x0 0x0 0xff800000 0x00040000>, /* NAND */
+ <0x3 0x0 0x0 0xffa00000 0x00020000>; /* CPLD */
+
+ /* S29GL128P90TFIR10 NOR */
+ nor@0,0 {
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x01000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ /* 128 kB for Device Tree Blob */
+ reg = <0x00000000 0x00020000>;
+ label = "dtb";
+ };
+
+ partition@20000 {
+ /* 1.7 MB for Rescue Linux Kernel Image */
+ reg = <0x00020000 0x001a0000>;
+ label = "rescue-kernel";
+ };
+
+ partition@1c0000 {
+ /* 1.5 MB for Rescue JFFS2 Root File System */
+ reg = <0x001c0000 0x00180000>;
+ label = "rescue-rootfs";
+ };
+
+ partition@340000 {
+ /* 11 MB for TAR.XZ Backup with content of NAND Root File System */
+ reg = <0x00340000 0x00b00000>;
+ label = "backup-rootfs";
+ };
+
+ partition@e40000 {
+ /* 768 kB for Certificates JFFS2 File System */
+ reg = <0x00e40000 0x000c0000>;
+ label = "certificates";
+ };
+
+ /* free unused space 0x00f00000-0x00f20000 */
+
+ partition@f20000 {
+ /* 128 kB for U-Boot Environment Variables */
+ reg = <0x00f20000 0x00020000>;
+ label = "u-boot-env";
+ };
+
+ partition@f40000 {
+ /* 768 kB for U-Boot Bootloader Image */
+ reg = <0x00f40000 0x000c0000>;
+ label = "u-boot";
+ };
+ };
+ };
+
+ /* MT29F2G08ABAEAWP:E NAND */
+ nand@1,0 {
+ compatible = "fsl,p2020-fcm-nand", "fsl,elbc-fcm-nand";
+ reg = <0x1 0x0 0x00040000>;
+ nand-ecc-mode = "soft";
+ nand-ecc-algo = "bch";
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ /* 256 MB for UBI with one volume: UBIFS Root File System */
+ reg = <0x00000000 0x10000000>;
+ label = "rootfs";
+ };
+ };
+ };
+
+ /* LCMXO1200C-3FTN256C FPGA */
+ cpld@3,0 {
+ /*
+ * Turris CPLD firmware which runs on this Lattice FPGA,
+ * is extended version of P1021RDB-PC CPLD v4.1 firmware.
+ * It is backward compatible with its original version
+ * and the only extension is support for Turris LEDs.
+ * Turris CPLD firmware is open source and available at:
+ * https://gitlab.nic.cz/turris/hw/turris_cpld/-/blob/master/CZ_NIC_Router_CPLD.v
+ */
+ compatible = "cznic,turris1x-cpld", "fsl,p1021rdb-pc-cpld", "simple-bus", "syscon";
+ reg = <0x3 0x0 0x30>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x3 0x0 0x00020000>;
+
+ /* MAX6370KA+T watchdog */
+ watchdog@2 {
+ /*
+ * CPLD firmware maps SET0, SET1 and SET2
+ * input logic of MAX6370KA+T chip to CPLD
+ * memory space at byte offset 0x2. WDI
+ * input logic is outside of the CPLD and
+ * connected via external GPIO.
+ */
+ compatible = "maxim,max6370";
+ reg = <0x02 0x01>;
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+
+ reboot@d {
+ compatible = "syscon-reboot";
+ reg = <0x0d 0x01>;
+ offset = <0x0d>;
+ mask = <0x01>;
+ value = <0x01>;
+ };
+
+ led-controller@13 {
+ /*
+ * LEDs are controlled by CPLD firmware.
+ * All five LAN LEDs share common RGB settings
+ * and so it is not possible to set different
+ * colors on different LAN ports.
+ */
+ compatible = "cznic,turris1x-leds";
+ reg = <0x13 0x1d>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ multi-led@0 {
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_WAN;
+ };
+
+ multi-led@1 {
+ reg = <0x1>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <5>;
+ };
+
+ multi-led@2 {
+ reg = <0x2>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <4>;
+ };
+
+ multi-led@3 {
+ reg = <0x3>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <3>;
+ };
+
+ multi-led@4 {
+ reg = <0x4>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <2>;
+ };
+
+ multi-led@5 {
+ reg = <0x5>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <1>;
+ };
+
+ multi-led@6 {
+ reg = <0x6>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_WLAN;
+ };
+
+ multi-led@7 {
+ reg = <0x7>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_POWER;
+ };
+ };
+ };
+ };
+
+ pci2: pcie@ffe08000 {
+ /*
+ * PCIe bus for on-board TUSB7340RKM USB 3.0 xHCI controller.
+ * This xHCI controller is available only on Turris 1.1 boards.
+ * Turris 1.0 boards have nothing connected to this PCIe bus,
+ * so system would see only PCIe Root Port of this PCIe Root
+ * Complex. TUSB7340RKM xHCI controller has four SuperSpeed
+ * channels. Channel 0 is connected to the front USB 3.0 port,
+ * channel 1 (but only USB 2.0 subset) to USB 2.0 pins on mPCIe
+ * slot 1 (CN5), channels 2 and 3 to connector P600.
+ *
+ * P2020 PCIe Root Port uses 1MB of PCIe MEM and xHCI controller
+ * uses 64kB + 8kB of PCIe MEM. No PCIe IO is used or required.
+ * So allocate 2MB of PCIe MEM for this PCIe bus.
+ */
+ reg = <0 0xffe08000 0 0x1000>;
+ ranges = <0x02000000 0x0 0xc0000000 0 0xc0000000 0x0 0x00200000>, /* MEM */
+ <0x01000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>; /* IO */
+
+ pcie@0 {
+ ranges;
+ };
+ };
+
+ pci1: pcie@ffe09000 {
+ /* PCIe bus on mPCIe slot 2 (CN6) for expansion mPCIe card */
+ reg = <0 0xffe09000 0 0x1000>;
+ ranges = <0x02000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000>, /* MEM */
+ <0x01000000 0x0 0x00000000 0 0xffc10000 0x0 0x00010000>; /* IO */
+
+ pcie@0 {
+ ranges;
+ };
+ };
+
+ pci0: pcie@ffe0a000 {
+ /*
+ * PCIe bus on mPCIe slot 1 (CN5) for expansion mPCIe card.
+ * Turris 1.1 boards have in this mPCIe slot additional USB 2.0
+ * pins via channel 1 of TUSB7340RKM xHCI controller and also
+ * additional SIM card slot, both for USB-based WWAN cards.
+ */
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x02000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000>, /* MEM */
+ <0x01000000 0x0 0x00000000 0 0xffc00000 0x0 0x00010000>; /* IO */
+
+ pcie@0 {
+ ranges;
+ };
+ };
+};
+
+/include/ "fsl/p2020si-post.dtsi"
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index 4bc549c6edc5..fde4824f235e 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -118,7 +118,7 @@ CONFIG_CRAMFS=y
CONFIG_NLS_DEFAULT="n"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_XMON=y
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index 717827219921..7283b7d4a1a5 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -73,7 +73,7 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NLS_DEFAULT="n"
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_XMON=y
diff --git a/arch/powerpc/configs/44x/fsp2_defconfig b/arch/powerpc/configs/44x/fsp2_defconfig
index 8da316e61a08..3fdfbb29b854 100644
--- a/arch/powerpc/configs/44x/fsp2_defconfig
+++ b/arch/powerpc/configs/44x/fsp2_defconfig
@@ -110,7 +110,7 @@ CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_MESSAGE_LOGLEVEL_DEFAULT=3
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRYPTO_CBC=y
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index c11e777b2f3d..0f6380e1e612 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -56,7 +56,7 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_PPC_EARLY_DEBUG=y
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index 47252c2d7669..20891c413149 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -88,7 +88,7 @@ CONFIG_NLS_UTF8=y
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index 63368e677506..7db479dcbc0c 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -58,6 +58,6 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 72762da94846..6186ead1e105 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -84,7 +84,7 @@ CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_ECB=y
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index a3c8ca74032c..e6735b945327 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -85,7 +85,7 @@ CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_ECB=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index 5326bc739279..7f35d5bc1229 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -45,7 +45,7 @@ CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CRC32_SLICEBY4=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index 00d69965f898..8df6d3a293e3 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -59,7 +59,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_BDI_SWITCH=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index f5c3e72da719..a98ef6a4abef 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -48,6 +48,6 @@ CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CRC32_SLICEBY4=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index df37efed0aec..f14c6dbd7346 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -24,7 +24,7 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index dcc8dccf54f3..498d35db7833 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -73,7 +73,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 83d801307178..c0fe5e76604a 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -122,6 +122,6 @@ CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig
index 00a4d2bf43b2..4145ef5689ca 100644
--- a/arch/powerpc/configs/mpc8272_ads_defconfig
+++ b/arch/powerpc/configs/mpc8272_ads_defconfig
@@ -67,7 +67,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_BDI_SWITCH=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index c74dc76b1d0d..700115d85d6f 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -71,7 +71,7 @@ CONFIG_ROOT_NFS=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_DEV_TALITOS=y
CONFIG_CRC32_SLICEBY4=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_VM_PGTABLE=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index b622ecd73286..91967824272e 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -1065,7 +1065,7 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_HEADERS_INSTALL=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig
index 9d8a76857c6f..9d63e2e65211 100644
--- a/arch/powerpc/configs/pq2fads_defconfig
+++ b/arch/powerpc/configs/pq2fads_defconfig
@@ -68,7 +68,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 7c95fab4b920..2d9ac233da68 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -153,7 +153,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 77857d513022..083c2e57520a 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -55,6 +55,6 @@ CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CRC32_SLICEBY4=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
index 0e365c5b2396..51b093f67528 100644
--- a/arch/powerpc/include/asm/archrandom.h
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -2,27 +2,15 @@
#ifndef _ASM_POWERPC_ARCHRANDOM_H
#define _ASM_POWERPC_ARCHRANDOM_H
-#include <asm/machdep.h>
-
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
return 0;
}
-static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
-{
- if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
- return 1;
- return 0;
-}
+size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs);
#ifdef CONFIG_PPC_POWERNV
-int powernv_hwrng_present(void);
-int powernv_get_random_long(unsigned long *v);
-int powernv_get_random_real_mode(unsigned long *v);
-#else
-static inline int powernv_hwrng_present(void) { return 0; }
-static inline int powernv_get_random_real_mode(unsigned long *v) { return 0; }
+int pnv_get_random_long(unsigned long *v);
#endif
#endif /* _ASM_POWERPC_ARCHRANDOM_H */
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index d995c65d18ab..81631e64dbeb 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -2,8 +2,9 @@
#ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
#define _ASM_POWERPC_ASM_PROTOTYPES_H
/*
- * This file is for prototypes of C functions that are only called
- * from asm, and any associated variables.
+ * This file is for C prototypes of asm symbols that are EXPORTed.
+ * It allows the modversions logic to see their prototype and
+ * generate proper CRCs for them.
*
* Copyright 2016, Daniel Axtens, IBM Corporation.
*/
@@ -34,12 +35,6 @@ int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
int64_t a4, int64_t a5, int64_t a6, int64_t a7,
int64_t opcode, uint64_t msr);
-/* prom_init (OpenFirmware) */
-unsigned long __init prom_init(unsigned long r3, unsigned long r4,
- unsigned long pp,
- unsigned long r6, unsigned long r7,
- unsigned long kbase);
-
/* misc runtime */
extern u64 __bswapdi2(u64);
extern s64 __lshrdi3(s64, int);
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 853dc86864f4..486ab7889121 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -140,9 +140,10 @@ static __always_inline bool
arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
{
int r, o = *old;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
__asm__ __volatile__ (
-"1: lwarx %0,0,%2,%5 # atomic_try_cmpxchg_acquire \n"
+"1: lwarx %0,0,%2,%[eh] # atomic_try_cmpxchg_acquire \n"
" cmpw 0,%0,%3 \n"
" bne- 2f \n"
" stwcx. %4,0,%2 \n"
@@ -150,7 +151,7 @@ arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
"\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n"
: "=&r" (r), "+m" (v->counter)
- : "r" (&v->counter), "r" (o), "r" (new), "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0)
+ : "r" (&v->counter), "r" (o), "r" (new), [eh] "n" (eh)
: "cr0", "memory");
if (unlikely(r != o))
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index f0e687236484..ef2d8b15eaab 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -42,6 +42,8 @@
/* The sub-arch has lwsync */
#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_E500MC)
# define SMPWMB LWSYNC
+#elif defined(CONFIG_BOOKE)
+# define SMPWMB mbar
#else
# define SMPWMB eieio
#endif
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 344fba3b16eb..7e0f0322912b 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -163,7 +163,7 @@ static inline unsigned long fn( \
"bne- 1b\n" \
postfix \
: "=&r" (old), "=&r" (t) \
- : "rK" (mask), "r" (p), "i" (IS_ENABLED(CONFIG_PPC64) ? eh : 0) \
+ : "rK" (mask), "r" (p), "n" (eh) \
: "cc", "memory"); \
return (old & mask); \
}
@@ -171,7 +171,7 @@ static inline unsigned long fn( \
DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
DEFINE_TESTOP(test_and_set_bits_lock, or, "",
- PPC_ACQUIRE_BARRIER, 1)
+ PPC_ACQUIRE_BARRIER, IS_ENABLED(CONFIG_PPC64))
DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index b37a28f62cf6..aa1c67c8bfc8 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+
+#include <asm/firmware.h>
+
/*
* For radix we want generic code to handle hugetlb. But then if we want
* both hash and radix to be enabled together we need to workaround the
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index cb9d5fd39d7f..392ff48f77df 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1273,7 +1273,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
* should return true.
* We should not call this on a hugetlb entry. We should check for HugeTLB
* entry using vma->vm_flags
- * The page table walk rule is explained in Documentation/vm/transhuge.rst
+ * The page table walk rule is explained in Documentation/mm/transhuge.rst
*/
static inline int pmd_trans_huge(pmd_t pmd)
{
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index d2e80f178b6d..206f920fe5b9 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -138,9 +138,29 @@ static inline void flush_all_mm(struct mm_struct *mm)
static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
unsigned long address)
{
- /* See ptep_set_access_flags comment */
- if (atomic_read(&vma->vm_mm->context.copros) > 0)
- flush_tlb_page(vma, address);
+ /*
+ * Book3S 64 does not require spurious fault flushes because the PTE
+ * must be re-fetched in case of an access permission problem. So the
+ * only reason for a spurious fault should be concurrent modification
+ * to the PTE, in which case the PTE will eventually be re-fetched by
+ * the MMU when it attempts the access again.
+ *
+ * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
+ * Entry, Setting a Reference or Change Bit or Upgrading Access
+ * Authority (PTE Subject to Atomic Hardware Updates):
+ *
+ * "If the only change being made to a valid PTE that is subject to
+ * atomic hardware updates is to set the Reference or Change bit to
+ * 1 or to upgrade access authority, a simpler sequence suffices
+ * because the translation hardware will refetch the PTE if an
+ * access is attempted for which the only problems were reference
+ * and/or change bits needing to be set or insufficient access
+ * authority."
+ *
+ * The nest MMU in POWER9 does not perform this PTE re-fetch, but
+ * it avoids the spurious fault problem by flushing the TLB before
+ * upgrading PTE permissions, see radix__ptep_set_access_flags.
+ */
}
extern bool tlbie_capable;
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 549eb6dd146f..ae8c3e13cfce 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -70,9 +70,6 @@ struct cpu_spec {
/* Used to restore cpu setup on secondary processors and at resume */
cpu_restore_t cpu_restore;
- /* Used by oprofile userspace to select the right counters */
- char *oprofile_cpu_type;
-
/* Name of processor class, for the ELF AT_PLATFORM entry */
char *platform;
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 504f7fe6711a..6d2b27997492 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -19,6 +19,7 @@
#include <asm/div64.h>
#include <asm/time.h>
#include <asm/param.h>
+#include <asm/firmware.h>
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
index 6161a9596196..d97c66d9ae34 100644
--- a/arch/powerpc/include/asm/dma.h
+++ b/arch/powerpc/include/asm/dma.h
@@ -340,11 +340,5 @@ extern int request_dma(unsigned int dmanr, const char *device_id);
/* release it again */
extern void free_dma(unsigned int dmanr);
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DMA_H */
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 8dddd34b8ecf..398e0b5e485f 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -55,6 +55,7 @@
#define FW_FEATURE_RPT_INVALIDATE ASM_CONST(0x0000010000000000)
#define FW_FEATURE_FORM2_AFFINITY ASM_CONST(0x0000020000000000)
#define FW_FEATURE_ENERGY_SCALE_INFO ASM_CONST(0x0000040000000000)
+#define FW_FEATURE_WATCHDOG ASM_CONST(0x0000080000000000)
#ifndef __ASSEMBLY__
@@ -76,7 +77,7 @@ enum {
FW_FEATURE_DRC_INFO | FW_FEATURE_BLOCK_REMOVE |
FW_FEATURE_PAPR_SCM | FW_FEATURE_ULTRAVISOR |
FW_FEATURE_RPT_INVALIDATE | FW_FEATURE_FORM2_AFFINITY |
- FW_FEATURE_ENERGY_SCALE_INFO,
+ FW_FEATURE_ENERGY_SCALE_INFO | FW_FEATURE_WATCHDOG,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_ULTRAVISOR,
FW_FEATURE_POWERNV_ALWAYS = 0,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index d92a20a85395..8abae463f6c1 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -79,6 +79,7 @@
#define H_NOT_ENOUGH_RESOURCES -44
#define H_R_STATE -45
#define H_RESCINDED -46
+#define H_P1 -54
#define H_P2 -55
#define H_P3 -56
#define H_P4 -57
@@ -87,6 +88,7 @@
#define H_P7 -60
#define H_P8 -61
#define H_P9 -62
+#define H_NOOP -63
#define H_TOO_BIG -64
#define H_UNSUPPORTED -67
#define H_OVERLAP -68
@@ -97,6 +99,8 @@
#define H_OP_MODE -73
#define H_COP_HW -74
#define H_STATE -75
+#define H_IN_USE -77
+#define H_ABORTED -78
#define H_UNSUPPORTED_FLAG_START -256
#define H_UNSUPPORTED_FLAG_END -511
#define H_MULTI_THREADS_ACTIVE -9005
@@ -321,10 +325,19 @@
#define H_SCM_UNBIND_ALL 0x3FC
#define H_SCM_HEALTH 0x400
#define H_SCM_PERFORMANCE_STATS 0x418
+#define H_PKS_GET_CONFIG 0x41C
+#define H_PKS_SET_PASSWORD 0x420
+#define H_PKS_GEN_PASSWORD 0x424
+#define H_PKS_WRITE_OBJECT 0x42C
+#define H_PKS_GEN_KEY 0x430
+#define H_PKS_READ_OBJECT 0x434
+#define H_PKS_REMOVE_OBJECT 0x438
+#define H_PKS_CONFIRM_OBJECT_FLUSHED 0x43C
#define H_RPT_INVALIDATE 0x448
#define H_SCM_FLUSH 0x44C
#define H_GET_ENERGY_SCALE_INFO 0x450
-#define MAX_HCALL_OPCODE H_GET_ENERGY_SCALE_INFO
+#define H_WATCHDOG 0x45C
+#define MAX_HCALL_OPCODE H_WATCHDOG
/* Scope args for H_SCM_UNBIND_ALL */
#define H_UNBIND_SCOPE_ALL (0x1)
@@ -350,6 +363,14 @@
/* Platform specific hcalls, used by KVM */
#define H_RTAS 0xf000
+/*
+ * Platform specific hcalls, used by QEMU/SLOF. These are ignored by
+ * KVM and only kept here so we can identify them during tracing.
+ */
+#define H_LOGICAL_MEMOP 0xF001
+#define H_CAS 0XF002
+#define H_UPDATE_DT 0XF003
+
/* "Platform specific hcalls", provided by PHYP */
#define H_GET_24X7_CATALOG_PAGE 0xF078
#define H_GET_24X7_DATA 0xF07C
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 674e5aaafcbd..26ede09c521d 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -113,14 +113,7 @@ static inline void __hard_RI_enable(void)
static inline notrace unsigned long irq_soft_mask_return(void)
{
- unsigned long flags;
-
- asm volatile(
- "lbz %0,%1(13)"
- : "=r" (flags)
- : "i" (offsetof(struct paca_struct, irq_soft_mask)));
-
- return flags;
+ return READ_ONCE(local_paca->irq_soft_mask);
}
/*
@@ -130,7 +123,6 @@ static inline notrace unsigned long irq_soft_mask_return(void)
*/
static inline notrace void irq_soft_mask_set(unsigned long mask)
{
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
/*
* The irq mask must always include the STD bit if any are set.
*
@@ -145,49 +137,27 @@ static inline notrace void irq_soft_mask_set(unsigned long mask)
* unmasks to be replayed, among other things. For now, take
* the simple approach.
*/
- WARN_ON(mask && !(mask & IRQS_DISABLED));
-#endif
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON(mask && !(mask & IRQS_DISABLED));
- asm volatile(
- "stb %0,%1(13)"
- :
- : "r" (mask),
- "i" (offsetof(struct paca_struct, irq_soft_mask))
- : "memory");
+ WRITE_ONCE(local_paca->irq_soft_mask, mask);
+ barrier();
}
static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
{
- unsigned long flags;
-
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON(mask && !(mask & IRQS_DISABLED));
-#endif
+ unsigned long flags = irq_soft_mask_return();
- asm volatile(
- "lbz %0,%1(13); stb %2,%1(13)"
- : "=&r" (flags)
- : "i" (offsetof(struct paca_struct, irq_soft_mask)),
- "r" (mask)
- : "memory");
+ irq_soft_mask_set(mask);
return flags;
}
static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
{
- unsigned long flags, tmp;
-
- asm volatile(
- "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
- : "=&r" (flags), "=r" (tmp)
- : "i" (offsetof(struct paca_struct, irq_soft_mask)),
- "r" (mask)
- : "memory");
+ unsigned long flags = irq_soft_mask_return();
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
-#endif
+ irq_soft_mask_set(flags | mask);
return flags;
}
@@ -312,9 +282,7 @@ static inline bool pmi_irq_pending(void)
flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) { \
- asm ("stdx %%r1, 0, %1 ;" \
- : "=m" (local_paca->saved_r1) \
- : "b" (&local_paca->saved_r1)); \
+ WRITE_ONCE(local_paca->saved_r1, current_stack_pointer);\
trace_hardirqs_off(); \
} \
} while(0)
@@ -353,11 +321,13 @@ bool power_pmu_wants_prompt_pmi(void);
*/
static inline bool should_hard_irq_enable(void)
{
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
- WARN_ON(mfmsr() & MSR_EE);
-#endif
-#ifdef CONFIG_PERF_EVENTS
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+ WARN_ON(mfmsr() & MSR_EE);
+ }
+
+ if (!IS_ENABLED(CONFIG_PERF_EVENTS))
+ return false;
/*
* If the PMU is not running, there is not much reason to enable
* MSR[EE] in irq handlers because any interrupts would just be
@@ -372,9 +342,6 @@ static inline bool should_hard_irq_enable(void)
return false;
return true;
-#else
- return false;
-#endif
}
/*
@@ -382,11 +349,11 @@ static inline bool should_hard_irq_enable(void)
*/
static inline void do_hard_irq_enable(void)
{
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
- WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
- WARN_ON(mfmsr() & MSR_EE);
-#endif
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+ WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
+ WARN_ON(mfmsr() & MSR_EE);
+ }
/*
* This allows PMI interrupts (and watchdog soft-NMIs) through.
* There is no other reason to enable this way.
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
index b49aae9f6f27..684d3f453282 100644
--- a/arch/powerpc/include/asm/inst.h
+++ b/arch/powerpc/include/asm/inst.h
@@ -139,25 +139,6 @@ static inline void ppc_inst_write(u32 *ptr, ppc_inst_t x)
*(u64 *)ptr = ppc_inst_as_ulong(x);
}
-#define PPC_INST_STR_LEN sizeof("00000000 00000000")
-
-static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], ppc_inst_t x)
-{
- if (ppc_inst_prefixed(x))
- sprintf(str, "%08x %08x", ppc_inst_val(x), ppc_inst_suffix(x));
- else
- sprintf(str, "%08x", ppc_inst_val(x));
-
- return str;
-}
-
-#define ppc_inst_as_str(x) \
-({ \
- char __str[PPC_INST_STR_LEN]; \
- __ppc_inst_as_str(__str, x); \
- __str; \
-})
-
static inline int __copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src)
{
unsigned int val, suffix;
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index b14f54d789d2..8069dbc4b8d1 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -69,6 +69,7 @@
#include <linux/context_tracking.h>
#include <linux/hardirq.h>
#include <asm/cputime.h>
+#include <asm/firmware.h>
#include <asm/ftrace.h>
#include <asm/kprobes.h>
#include <asm/runlatch.h>
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index c5a5f7c9b231..fc112a91d0c2 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -33,7 +33,6 @@ extern struct pci_dev *isa_bridge_pcidev;
#include <asm/delay.h>
#include <asm/mmiowb.h>
#include <asm/mmu.h>
-#include <asm/ppc_asm.h>
#define SIO_CONFIG_RA 0x398
#define SIO_CONFIG_RD 0x399
@@ -985,8 +984,6 @@ static inline void * bus_to_virt(unsigned long address)
}
#define bus_to_virt bus_to_virt
-#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
-
#endif /* CONFIG_PPC32 */
/* access ports */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 13f0409dd617..5c1516a5ba8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -54,7 +54,6 @@ extern void *softirq_ctx[NR_CPUS];
void __do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void);
-extern void __do_irq(struct pt_regs *regs);
int irq_choose_cpu(const struct cpumask *mask);
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index a6be4025cba2..92a968202ba7 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -19,7 +19,7 @@
#define KASAN_SHADOW_SCALE_SHIFT 3
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) && defined(CONFIG_PPC32)
#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
#else
#define KASAN_KERN_START PAGE_OFFSET
@@ -39,6 +39,17 @@
* c00e000000000000 << 3 + a80e000000000000 = c00fc00000000000
*/
#define KASAN_SHADOW_END 0xc00fc00000000000UL
+
+#else
+
+/*
+ * The shadow ends before the highest accessible address
+ * because we don't need a shadow for the shadow.
+ * But it doesn't hurt to have a shadow for the shadow,
+ * keep shadow end aligned eases things.
+ */
+#define KASAN_SHADOW_END 0xc000200000000000UL
+
#endif
#ifdef CONFIG_KASAN
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index d6f4edfe4737..f8d122d16af4 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -83,6 +83,7 @@ extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern int crash_shutdown_register(crash_shutdown_t handler);
extern int crash_shutdown_unregister(crash_shutdown_t handler);
+extern void crash_kexec_prepare(void);
extern void crash_kexec_secondary(struct pt_regs *regs);
int __init overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index bab364152b29..c8e4b4fd4e33 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -29,7 +29,7 @@
struct pt_regs;
struct kprobe;
-typedef ppc_opcode_t kprobe_opcode_t;
+typedef u32 kprobe_opcode_t;
extern kprobe_opcode_t optinsn_slot;
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 91c9f937edcd..bbf5e2c5fe09 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -280,9 +280,6 @@ extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
long kvmppc_read_intr(void);
-void kvmppc_bad_interrupt(struct pt_regs *regs);
-void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip);
-void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip);
void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 4def2bd17b9b..d49065af08e9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -666,7 +666,7 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
VM_WARN(!spin_is_locked(&kvm->mmu_lock),
"%s called with kvm mmu_lock not held \n", __func__);
- if (mmu_notifier_retry(kvm, mmu_seq))
+ if (mmu_invalidate_retry(kvm, mmu_seq))
return NULL;
pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2909a88acd16..c2b003550dc9 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -523,7 +523,11 @@ struct kvm_vcpu_arch {
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
#endif
- struct pt_regs regs;
+ /*
+ * This is passed along to the HV via H_ENTER_NESTED. Align to
+ * prevent it crossing a real 4K page.
+ */
+ struct pt_regs regs __aligned(512);
struct thread_fp_state fp;
@@ -830,11 +834,21 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
struct kvmhv_tb_accumulator *cur_activity; /* What we're timing */
u64 cur_tb_start; /* when it started */
+#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
+ struct kvmhv_tb_accumulator vcpu_entry;
+ struct kvmhv_tb_accumulator vcpu_exit;
+ struct kvmhv_tb_accumulator in_guest;
+ struct kvmhv_tb_accumulator hcall;
+ struct kvmhv_tb_accumulator pg_fault;
+ struct kvmhv_tb_accumulator guest_entry;
+ struct kvmhv_tb_accumulator guest_exit;
+#else
struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */
struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */
struct kvmhv_tb_accumulator rm_exit; /* real-mode exit code */
struct kvmhv_tb_accumulator guest_time; /* guest execution */
struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
+#endif
#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
};
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 6c1002043367..8cb83600c434 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -8,8 +8,6 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
-#include <asm/setup.h>
-
struct pt_regs;
struct pci_bus;
struct device_node;
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 1b024e64c8ec..17a77d47ed6d 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/pkeys.h>
#include <asm/cpu_has_feature.h>
+#include <asm/firmware.h>
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey)
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 5f41565a1e5d..860d0290ca4d 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -96,15 +96,6 @@
*/
#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
-/* Enable use of TLB reservation. Processor should support tlbsrx.
- * instruction and MAS0[WQ].
- */
-#define MMU_FTR_USE_TLBRSRV ASM_CONST(0x00800000)
-
-/* Use paired MAS registers (MAS7||MAS3, etc.)
- */
-#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
-
/* Doesn't support the B bit (1T segment) in SLBIE
*/
#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x02000000)
@@ -180,9 +171,6 @@ enum {
#ifdef CONFIG_PPC_83xx
MMU_FTR_NEED_DTLB_SW_LRU |
#endif
-#ifdef CONFIG_PPC_BOOK3E_64
- MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
-#endif
#ifdef CONFIG_PPC_BOOK3S_64
MMU_FTR_KERNEL_RO |
#ifdef CONFIG_PPC_64S_HASH_MMU
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index ce1e0aabaa64..5ea16a71c2f0 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -15,7 +15,6 @@
#ifndef __ASSEMBLY__
#include <asm/types.h>
-#include <asm/prom.h>
#include <asm/mpc5xxx.h>
#endif /* __ASSEMBLY__ */
@@ -268,13 +267,14 @@ struct mpc52xx_intr {
#ifndef __ASSEMBLY__
+struct device_node;
+
/* mpc52xx_common.c */
extern void mpc5200_setup_xlb_arbiter(void);
extern void mpc52xx_declare_of_platform_devices(void);
extern int mpc5200_psc_ac97_gpio_reset(int psc_number);
extern void mpc52xx_map_common_devices(void);
extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
-extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
extern void __noreturn mpc52xx_restart(char *cmd);
/* mpc52xx_gpt.c */
diff --git a/arch/powerpc/include/asm/mpc5xxx.h b/arch/powerpc/include/asm/mpc5xxx.h
index 2f60f5c5461b..44db26380435 100644
--- a/arch/powerpc/include/asm/mpc5xxx.h
+++ b/arch/powerpc/include/asm/mpc5xxx.h
@@ -11,7 +11,14 @@
#ifndef __ASM_POWERPC_MPC5xxx_H__
#define __ASM_POWERPC_MPC5xxx_H__
-extern unsigned long mpc5xxx_get_bus_frequency(struct device_node *node);
+#include <linux/property.h>
+
+unsigned long mpc5xxx_fwnode_get_bus_frequency(struct fwnode_handle *fwnode);
+
+static inline unsigned long mpc5xxx_get_bus_frequency(struct device *dev)
+{
+ return mpc5xxx_fwnode_get_bus_frequency(dev_fwnode(dev));
+}
#endif /* __ASM_POWERPC_MPC5xxx_H__ */
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
index ea0e487f87b1..c3c7adef74de 100644
--- a/arch/powerpc/include/asm/nmi.h
+++ b/arch/powerpc/include/asm/nmi.h
@@ -5,8 +5,10 @@
#ifdef CONFIG_PPC_WATCHDOG
extern void arch_touch_nmi_watchdog(void);
long soft_nmi_interrupt(struct pt_regs *regs);
+void watchdog_nmi_set_timeout_pct(u64 pct);
#else
static inline void arch_touch_nmi_watchdog(void) {}
+static inline void watchdog_nmi_set_timeout_pct(u64 pct) {}
#endif
#ifdef CONFIG_NMI_IPI
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index 668aee6017e7..e50b211becb3 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -15,7 +15,10 @@ struct vmemmap_backing {
};
extern struct vmemmap_backing *vmemmap_list;
-#define p4d_populate(MM, P4D, PUD) p4d_set(P4D, (unsigned long)PUD)
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+ p4d_set(p4d, (unsigned long)pud);
+}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 57083f95e82b..599921cc257e 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -25,7 +25,7 @@
/*
* Define the address range of the kernel non-linear virtual area
*/
-#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
+#define KERN_VIRT_START ASM_CONST(0xc000100000000000)
#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
/*
@@ -38,15 +38,16 @@
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
/*
- * The second half of the kernel virtual space is used for IO mappings,
+ * The third quarter of the kernel virtual space is used for IO mappings,
* it's itself carved into the PIO region (ISA and PHB IO space) and
* the ioremap space
*
* ISA_IO_BASE = KERN_IO_START, 64K reserved area
* PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
- * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to KERN_IO_START + KERN_IO_SIZE
*/
#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
+#define KERN_IO_SIZE (KERN_VIRT_SIZE >> 2)
#define FULL_IO_SIZE 0x80000000ul
#define ISA_IO_BASE (KERN_IO_START)
#define ISA_IO_END (KERN_IO_START + 0x10000ul)
@@ -54,21 +55,9 @@
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
-#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
+#define IOREMAP_END (KERN_IO_START + KERN_IO_SIZE - FIXADDR_SIZE)
#define FIXADDR_SIZE SZ_32M
-
-/*
- * Region IDs
- */
-#define REGION_SHIFT 60UL
-#define REGION_MASK (0xfUL << REGION_SHIFT)
-#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
-
-#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
-#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
-#define USER_REGION_ID (0UL)
-
/*
* Defines the address of the vmemap area, in its own region on
* after the vmalloc space on Book3E
@@ -83,8 +72,6 @@
*/
#include <asm/nohash/pte-book3e.h>
-#define _PAGE_SAO 0
-
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
/*
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index ac75f4ab0dba..b499da6c1a99 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -193,7 +193,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
__asm__ __volatile__("\
stw%X0 %2,%0\n\
- eieio\n\
+ mbar\n\
stw%X1 %L2,%1"
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
: "r" (pte) : "memory");
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index c85f901227c9..e18c95f4e1d4 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -170,11 +170,15 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
return bus->sysdata;
}
+#ifdef CONFIG_PPC_PMAC
extern int pci_device_from_OF_node(struct device_node *node,
u8 *bus, u8 *devfn);
+#endif
#ifndef CONFIG_PPC64
+#ifdef CONFIG_PPC_CHRP
extern void pci_create_OF_bus_map(void);
+#endif
#else /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 915d6ee4b40a..289f1ec85bc5 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -14,7 +14,6 @@
#include <asm/machdep.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
/* Return values for pci_controller_ops.probe_mode function */
@@ -39,7 +38,6 @@
#define pcibios_assign_all_busses() \
(pci_has_flag(PCI_REASSIGN_ALL_BUS))
-#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
if (ppc_md.pci_get_legacy_ide_irq)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index d564d0ecd4cd..33f4bf8d22b0 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -20,25 +20,6 @@ struct mm_struct;
#include <asm/nohash/pgtable.h>
#endif /* !CONFIG_PPC_BOOK3S */
-/* Note due to the way vm flags are laid out, the bits are XWR */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_X
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY_X
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_X
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED_X
-#define __S111 PAGE_SHARED_X
-
#ifndef __ASSEMBLY__
#ifndef MAX_PTRS_PER_PGD
@@ -79,6 +60,7 @@ extern void paging_init(void);
void poking_init(void);
extern unsigned long ioremap_bot;
+extern const pgprot_t protection_map[16];
/*
* kern_addr_valid is intended to indicate whether an address is a valid
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 83e0f701ebc6..8239c0af5eb2 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -43,11 +43,10 @@ static inline long extended_cede_processor(unsigned long latency_hint)
set_cede_latency_hint(latency_hint);
rc = cede_processor();
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+
/* Ensure that H_CEDE returns with IRQs on */
- if (WARN_ON(!(mfmsr() & MSR_EE)))
+ if (WARN_ON(IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && !(mfmsr() & MSR_EE)))
__hard_irq_enable();
-#endif
set_cede_latency_hint(old_latency_hint);
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 89beabf5325c..c6d724104ed1 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -290,7 +290,6 @@
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
#define PPC_INST_STRING_GEN_MASK 0xfc00067e
-#define PPC_INST_SETB 0x7c000100
#define PPC_INST_STSWI 0x7c0005aa
#define PPC_INST_STSWX 0x7c00052a
#define PPC_INST_TRECHKPT 0x7c0007dd
@@ -344,6 +343,7 @@
#define __PPC_SPR(r) ((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
#define __PPC_RC21 (0x1 << 10)
#define __PPC_PRFX_R(r) (((r) & 0x1) << 20)
+#define __PPC_EH(eh) (((eh) & 0x1) << 0)
/*
* Both low and high 16 bits are added as SIGNED additions, so if low 16 bits
@@ -360,16 +360,6 @@
#define PPC_LI_MASK 0x03fffffc
#define PPC_LI(v) ((v) & PPC_LI_MASK)
-/*
- * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
- * larx with EH set as an illegal instruction.
- */
-#ifdef CONFIG_PPC64
-#define __PPC_EH(eh) (((eh) & 0x1) << 0)
-#else
-#define __PPC_EH(eh) 0
-#endif
-
/* Base instruction encoding */
#define PPC_RAW_CP_ABORT (0x7c00068c)
#define PPC_RAW_COPY(a, b) (PPC_INST_COPY | ___PPC_RA(a) | ___PPC_RB(b))
@@ -581,6 +571,9 @@
#define PPC_RAW_BRANCH(offset) (0x48000000 | PPC_LI(offset))
#define PPC_RAW_BL(offset) (0x48000001 | PPC_LI(offset))
+#define PPC_RAW_TW(t0, a, b) (0x7c000008 | ___PPC_RS(t0) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_TRAP() PPC_RAW_TW(31, 0, 0)
+#define PPC_RAW_SETB(t, bfa) (0x7c000100 | ___PPC_RT(t) | ___PPC_RA((bfa) << 2))
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
index 6f66e358aa37..e77a2ed7d938 100644
--- a/arch/powerpc/include/asm/probes.h
+++ b/arch/powerpc/include/asm/probes.h
@@ -9,9 +9,9 @@
*/
#include <linux/types.h>
#include <asm/disassemble.h>
+#include <asm/ppc-opcode.h>
-typedef u32 ppc_opcode_t;
-#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
+#define BREAKPOINT_INSTRUCTION PPC_RAW_TRAP() /* trap */
/* Trap definitions per ISA */
#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 5c80152e8f18..2e82820fbd64 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -12,15 +12,10 @@
* Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
*/
#include <linux/types.h>
-#include <asm/irq.h>
-#include <linux/atomic.h>
-
-/* These includes should be removed once implicit includes are cleaned up. */
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
+#include <asm/firmware.h>
+
+struct device_node;
+struct property;
#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
#define OF_DT_END_NODE 0x2 /* End node */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 8fa37ef5da4d..d8c28902cf59 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -12,7 +12,6 @@ extern unsigned long long memory_limit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
struct device_node;
-extern void note_scsi_host(struct device_node *, void *);
/* Used in very early kernel initialization. */
extern unsigned long reloc_offset(void);
@@ -85,6 +84,11 @@ void __init machine_init(u64 dt_ptr);
void __init early_setup(unsigned long dt_ptr);
void early_setup_secondary(void);
+/* prom_init (OpenFirmware) */
+unsigned long __init prom_init(unsigned long r3, unsigned long r4,
+ unsigned long pp, unsigned long r6,
+ unsigned long r7, unsigned long kbase);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 7ae6aeef8464..9dcc7e9993b9 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -48,10 +48,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, token;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
token = LOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2,1\n\
+"1: lwarx %0,0,%2,%[eh]\n\
cmpwi 0,%0,0\n\
bne- 2f\n\
stwcx. %1,0,%2\n\
@@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
PPC_ACQUIRE_BARRIER
"2:"
: "=&r" (tmp)
- : "r" (token), "r" (&lock->slock)
+ : "r" (token), "r" (&lock->slock), [eh] "n" (eh)
: "cr0", "memory");
return tmp;
@@ -156,9 +157,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline long __arch_read_trylock(arch_rwlock_t *rw)
{
long tmp;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
__asm__ __volatile__(
-"1: lwarx %0,0,%1,1\n"
+"1: lwarx %0,0,%1,%[eh]\n"
__DO_SIGN_EXTEND
" addic. %0,%0,1\n\
ble- 2f\n"
@@ -166,7 +168,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
bne- 1b\n"
PPC_ACQUIRE_BARRIER
"2:" : "=&r" (tmp)
- : "r" (&rw->lock)
+ : "r" (&rw->lock), [eh] "n" (eh)
: "cr0", "xer", "memory");
return tmp;
@@ -179,17 +181,18 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
static inline long __arch_write_trylock(arch_rwlock_t *rw)
{
long tmp, token;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
token = WRLOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2,1\n\
+"1: lwarx %0,0,%2,%[eh]\n\
cmpwi 0,%0,0\n\
bne- 2f\n"
" stwcx. %1,0,%2\n\
bne- 1b\n"
PPC_ACQUIRE_BARRIER
"2:" : "=&r" (tmp)
- : "r" (token), "r" (&rw->lock)
+ : "r" (token), "r" (&rw->lock), [eh] "n" (eh)
: "cr0", "memory");
return tmp;
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index 1d67bc8d7bc6..7130176d8cb8 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -14,7 +14,10 @@ extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
static inline void eieio(void)
{
- __asm__ __volatile__ ("eieio" : : : "memory");
+ if (IS_ENABLED(CONFIG_BOOKE))
+ __asm__ __volatile__ ("mbar" : : : "memory");
+ else
+ __asm__ __volatile__ ("eieio" : : : "memory");
}
static inline void isync(void)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 105f200b1e31..3ddc65c63a49 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -2,7 +2,6 @@
#ifndef _ARCH_POWERPC_UACCESS_H
#define _ARCH_POWERPC_UACCESS_H
-#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/extable.h>
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index a7ae1860115a..4fea116d3d37 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -12,7 +12,7 @@
#include <linux/notifier.h>
#include <asm/probes.h>
-typedef ppc_opcode_t uprobe_opcode_t;
+typedef u32 uprobe_opcode_t;
#define MAX_UINSN_BYTES 8
#define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES)
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index f3f4710d4ff5..46c31fb8748d 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <asm/asm-compat.h>
-#include <asm/ppc_asm.h>
+#include <asm/extable.h>
#ifdef __BIG_ENDIAN__
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index c8cf924bf9c0..06d2d1f78f71 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -54,6 +54,13 @@ CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
endif
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+# Remove stack protector to avoid triggering unneeded stack canary
+# checks due to randomize_kstack_offset.
+CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
+CFLAGS_syscall.o += -fno-stack-protector
+#endif
+
obj-y := cputable.o syscalls.o \
irq.o align.o signal_$(BITS).o pmc.o vdso.o \
process.o systbl.o idle.o \
@@ -62,9 +69,9 @@ obj-y := cputable.o syscalls.o \
udbg.o misc.o io.o misc_$(BITS).o \
of_platform.o prom_parse.o firmware.o \
hw_breakpoint_constraints.o interrupt.o \
- kdebugfs.o stacktrace.o
+ kdebugfs.o stacktrace.o syscall.o
obj-y += ptrace/
-obj-$(CONFIG_PPC64) += setup_64.o \
+obj-$(CONFIG_PPC64) += setup_64.o irq_64.o\
paca.o nvram_64.o note.o
obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
obj-$(CONFIG_VDSO32) += vdso32_wrapper.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index eec536aef83a..8c10f536e478 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -379,7 +379,7 @@ int main(void)
OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2);
OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3);
#endif
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry);
OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr);
OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit);
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 8f69bb07e500..2769889219bf 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -73,7 +73,7 @@ static inline void rmci_maybe_off(void)
* the display during identify_machine() and MMU_Init()
*
* The display is mapped to virtual address 0xD0000000, rather
- * than 1:1, because some some CHRP machines put the frame buffer
+ * than 1:1, because some CHRP machines put the frame buffer
* in the region starting at 0xC0000000 (PAGE_OFFSET).
* This mapping is temporary and will disappear as soon as the
* setup done by MMU_Init() is applied.
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index a5dbfccd2047..d8e42ef750f1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -149,7 +149,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970",
.platform = "ppc970",
},
{ /* PPC970FX */
@@ -166,7 +165,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970",
.platform = "ppc970",
},
{ /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
@@ -183,7 +181,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970MP",
.platform = "ppc970",
},
{ /* PPC970MP */
@@ -200,7 +197,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970MP,
.cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970MP",
.platform = "ppc970",
},
{ /* PPC970GX */
@@ -216,7 +212,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 8,
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970",
.platform = "ppc970",
},
{ /* Power5 GR */
@@ -230,7 +225,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power5",
.platform = "power5",
},
{ /* Power5++ */
@@ -243,7 +237,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
- .oprofile_cpu_type = "ppc64/power5++",
.platform = "power5+",
},
{ /* Power5 GS */
@@ -257,7 +250,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power5+",
.platform = "power5+",
},
{ /* POWER6 in P5+ mode; 2.04-compliant processor */
@@ -269,7 +261,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.platform = "power5+",
},
{ /* Power6 */
@@ -284,7 +275,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power6",
.platform = "power6x",
},
{ /* 2.05-compliant processor, i.e. Power6 "architected" mode */
@@ -296,7 +286,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.platform = "power6",
},
{ /* 2.06-compliant processor, i.e. Power7 "architected" mode */
@@ -309,7 +298,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
.machine_check_early = __machine_check_early_realmode_p7,
@@ -325,7 +313,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -341,7 +328,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER9,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.platform = "power9",
@@ -356,7 +342,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER10,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power10,
.cpu_restore = __restore_cpu_power10,
.platform = "power10",
@@ -373,7 +358,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power7",
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
.machine_check_early = __machine_check_early_realmode_p7,
@@ -391,7 +375,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power7",
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
.machine_check_early = __machine_check_early_realmode_p7,
@@ -409,7 +392,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -427,7 +409,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -445,7 +426,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -463,7 +443,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -481,7 +460,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -499,7 +477,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -517,7 +494,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -535,7 +511,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power10",
.cpu_setup = __setup_cpu_power10,
.cpu_restore = __restore_cpu_power10,
.machine_check_early = __machine_check_early_realmode_p10,
@@ -554,7 +529,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 4,
.pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/cell-be",
.platform = "ppc-cell-be",
},
{ /* PA Semi PA6T */
@@ -570,7 +544,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_PA6T,
.cpu_setup = __setup_cpu_pa6t,
.cpu_restore = __restore_cpu_pa6t,
- .oprofile_cpu_type = "ppc64/pa6t",
.platform = "pa6t",
},
{ /* default match */
@@ -734,7 +707,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750,
.machine_check = machine_check_generic,
.platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
},
{ /* 745/755 */
.pvr_mask = 0xfffff000,
@@ -765,7 +737,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750,
.machine_check = machine_check_generic,
.platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
},
{ /* 750FX rev 2.0 must disable HID0[DPM] */
.pvr_mask = 0xffffffff,
@@ -781,7 +752,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750,
.machine_check = machine_check_generic,
.platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
},
{ /* 750FX (All revs except 2.0) */
.pvr_mask = 0xffff0000,
@@ -797,7 +767,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750fx,
.machine_check = machine_check_generic,
.platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
},
{ /* 750GX */
.pvr_mask = 0xffff0000,
@@ -813,7 +782,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750fx,
.machine_check = machine_check_generic,
.platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
},
{ /* 740/750 (L2CR bit need fixup for 740) */
.pvr_mask = 0xffff0000,
@@ -891,7 +859,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -908,7 +875,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -925,7 +891,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -942,7 +907,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -959,7 +923,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -976,7 +939,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -993,7 +955,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1010,7 +971,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1026,7 +986,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1043,7 +1002,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1060,7 +1018,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1172,7 +1129,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_603,
.machine_check = machine_check_83xx,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e300",
.platform = "ppc603",
},
{ /* e300c4 (e300c1, plus one IU) */
@@ -1188,7 +1144,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_603,
.machine_check = machine_check_83xx,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e300",
.platform = "ppc603",
},
#endif
@@ -1884,7 +1839,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500",
.cpu_setup = __setup_cpu_e500v1,
.machine_check = machine_check_e500,
.platform = "ppc8540",
@@ -1903,7 +1857,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500",
.cpu_setup = __setup_cpu_e500v2,
.machine_check = machine_check_e500,
.platform = "ppc8548",
@@ -1922,7 +1875,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500mc",
.cpu_setup = __setup_cpu_e500mc,
.machine_check = machine_check_e500mc,
.platform = "ppce500mc",
@@ -1943,7 +1895,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500mc",
.cpu_setup = __setup_cpu_e5500,
#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e5500,
@@ -1965,7 +1916,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 6,
- .oprofile_cpu_type = "ppc/e6500",
.cpu_setup = __setup_cpu_e6500,
#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e6500,
@@ -2033,23 +1983,10 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
t->pmc_type = old.pmc_type;
/*
- * If we have passed through this logic once before and
- * have pulled the default case because the real PVR was
- * not found inside cpu_specs[], then we are possibly
- * running in compatibility mode. In that case, let the
- * oprofiler know which set of compatibility counters to
- * pull from by making sure the oprofile_cpu_type string
- * is set to that of compatibility mode. If the
- * oprofile_cpu_type already has a value, then we are
- * possibly overriding a real PVR with a logical one,
- * and, in that case, keep the current value for
- * oprofile_cpu_type. Furthermore, let's ensure that the
+ * Let's ensure that the
* fix for the PMAO bug is enabled on compatibility mode.
*/
- if (old.oprofile_cpu_type != NULL) {
- t->oprofile_cpu_type = old.oprofile_cpu_type;
- t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
- }
+ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
}
*PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c
index 30d4eca88d17..909a05cd2809 100644
--- a/arch/powerpc/kernel/dawr.c
+++ b/arch/powerpc/kernel/dawr.c
@@ -11,6 +11,7 @@
#include <linux/debugfs.h>
#include <asm/machdep.h>
#include <asm/hvcall.h>
+#include <asm/firmware.h>
bool dawr_force_enable;
EXPORT_SYMBOL_GPL(dawr_force_enable);
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 2ad365c21afa..fc800a9fb2c4 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -102,7 +102,6 @@ static struct cpu_spec __initdata base_cpu_spec = {
.dcache_bsize = 32, /* cache info init. */
.num_pmcs = 0,
.pmc_type = PPC_PMC_DEFAULT,
- .oprofile_cpu_type = NULL,
.cpu_setup = NULL,
.cpu_restore = __restore_cpu_cpufeatures,
.machine_check_early = NULL,
@@ -387,7 +386,6 @@ static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
- cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
return 1;
}
@@ -423,7 +421,6 @@ static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
- cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
return 1;
}
@@ -449,7 +446,6 @@ static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
- cur_cpu_spec->oprofile_cpu_type = "ppc64/power10";
return 1;
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 260273e56431..f279295179bd 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -750,7 +750,7 @@ static void eeh_pe_cleanup(struct eeh_pe *pe)
* @pdev: pci_dev to check
*
* This function may return a false positive if we can't determine the slot's
- * presence state. This might happen for for PCIe slots if the PE containing
+ * presence state. This might happen for PCIe slots if the PE containing
* the upstream bridge is also frozen, or the bridge is part of the same PE
* as the device.
*
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index b66dd6f775a4..3d0dc133a9ae 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -2779,7 +2779,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
/*
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
- * - If it was a decrementer interrupt, we bump the dec to max and and return.
+ * - If it was a decrementer interrupt, we bump the dec to max and return.
* - If it was a doorbell we return immediately since doorbells are edge
* triggered and won't automatically refire.
* - If it was a HMI we return immediately since we handled it in realmode
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d3eea633d11a..cf2c08902c05 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -965,6 +965,9 @@ start_here_multiplatform:
* and SLB setup before we turn on relocation.
*/
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
/* Restore parameters passed from prom_init/kexec */
mr r3,r31
LOAD_REG_ADDR(r12, DOTSYM(early_setup))
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 6c739beb938c..519b60695167 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -418,14 +418,14 @@ InstructionTLBMiss:
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_IMISS
-#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+#ifdef CONFIG_MODULES
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SDR1
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
rlwinm r2, r2, 28, 0xfffff000
-#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+#ifdef CONFIG_MODULES
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 784ea3289c84..0e75cb03244a 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -24,8 +24,6 @@
unsigned long global_dbcr0[NR_CPUS];
#endif
-typedef long (*syscall_fn)(long, long, long, long, long, long);
-
#ifdef CONFIG_PPC_BOOK3S_64
DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
static inline bool exit_must_hard_disable(void)
@@ -73,165 +71,6 @@ static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
return true;
}
-/* Has to run notrace because it is entered not completely "reconciled" */
-notrace long system_call_exception(long r3, long r4, long r5,
- long r6, long r7, long r8,
- unsigned long r0, struct pt_regs *regs)
-{
- syscall_fn f;
-
- kuap_lock();
-
- regs->orig_gpr3 = r3;
-
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
-
- trace_hardirqs_off(); /* finish reconciling */
-
- CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
- user_exit_irqoff();
-
- BUG_ON(regs_is_unrecoverable(regs));
- BUG_ON(!(regs->msr & MSR_PR));
- BUG_ON(arch_irq_disabled_regs(regs));
-
-#ifdef CONFIG_PPC_PKEY
- if (mmu_has_feature(MMU_FTR_PKEY)) {
- unsigned long amr, iamr;
- bool flush_needed = false;
- /*
- * When entering from userspace we mostly have the AMR/IAMR
- * different from kernel default values. Hence don't compare.
- */
- amr = mfspr(SPRN_AMR);
- iamr = mfspr(SPRN_IAMR);
- regs->amr = amr;
- regs->iamr = iamr;
- if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
- mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
- flush_needed = true;
- }
- if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
- mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
- flush_needed = true;
- }
- if (flush_needed)
- isync();
- } else
-#endif
- kuap_assert_locked();
-
- booke_restore_dbcr0();
-
- account_cpu_user_entry();
-
- account_stolen_time();
-
- /*
- * This is not required for the syscall exit path, but makes the
- * stack frame look nicer. If this was initialised in the first stack
- * frame, or if the unwinder was taught the first stack frame always
- * returns to user with IRQS_ENABLED, this store could be avoided!
- */
- irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
-
- /*
- * If system call is called with TM active, set _TIF_RESTOREALL to
- * prevent RFSCV being used to return to userspace, because POWER9
- * TM implementation has problems with this instruction returning to
- * transactional state. Final register values are not relevant because
- * the transaction will be aborted upon return anyway. Or in the case
- * of unsupported_scv SIGILL fault, the return state does not much
- * matter because it's an edge case.
- */
- if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
- unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
- set_bits(_TIF_RESTOREALL, &current_thread_info()->flags);
-
- /*
- * If the system call was made with a transaction active, doom it and
- * return without performing the system call. Unless it was an
- * unsupported scv vector, in which case it's treated like an illegal
- * instruction.
- */
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
- !trap_is_unsupported_scv(regs)) {
- /* Enable TM in the kernel, and disable EE (for scv) */
- hard_irq_disable();
- mtmsr(mfmsr() | MSR_TM);
-
- /* tabort, this dooms the transaction, nothing else */
- asm volatile(".long 0x7c00071d | ((%0) << 16)"
- :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
-
- /*
- * Userspace will never see the return value. Execution will
- * resume after the tbegin. of the aborted transaction with the
- * checkpointed register state. A context switch could occur
- * or signal delivered to the process before resuming the
- * doomed transaction context, but that should all be handled
- * as expected.
- */
- return -ENOSYS;
- }
-#endif // CONFIG_PPC_TRANSACTIONAL_MEM
-
- local_irq_enable();
-
- if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
- if (unlikely(trap_is_unsupported_scv(regs))) {
- /* Unsupported scv vector */
- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- return regs->gpr[3];
- }
- /*
- * We use the return value of do_syscall_trace_enter() as the
- * syscall number. If the syscall was rejected for any reason
- * do_syscall_trace_enter() returns an invalid syscall number
- * and the test against NR_syscalls will fail and the return
- * value to be used is in regs->gpr[3].
- */
- r0 = do_syscall_trace_enter(regs);
- if (unlikely(r0 >= NR_syscalls))
- return regs->gpr[3];
- r3 = regs->gpr[3];
- r4 = regs->gpr[4];
- r5 = regs->gpr[5];
- r6 = regs->gpr[6];
- r7 = regs->gpr[7];
- r8 = regs->gpr[8];
-
- } else if (unlikely(r0 >= NR_syscalls)) {
- if (unlikely(trap_is_unsupported_scv(regs))) {
- /* Unsupported scv vector */
- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- return regs->gpr[3];
- }
- return -ENOSYS;
- }
-
- /* May be faster to do array_index_nospec? */
- barrier_nospec();
-
- if (unlikely(is_compat_task())) {
- f = (void *)compat_sys_call_table[r0];
-
- r3 &= 0x00000000ffffffffULL;
- r4 &= 0x00000000ffffffffULL;
- r5 &= 0x00000000ffffffffULL;
- r6 &= 0x00000000ffffffffULL;
- r7 &= 0x00000000ffffffffULL;
- r8 &= 0x00000000ffffffffULL;
-
- } else {
- f = (void *)sys_call_table[r0];
- }
-
- return f(r3, r4, r5, r6, r7, r8);
-}
-
static notrace void booke_load_dbcr0(void)
{
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 7e56ddb3e0b9..caebe1431596 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -775,6 +775,11 @@ bool iommu_table_in_use(struct iommu_table *tbl)
/* ignore reserved bit0 */
if (tbl->it_offset == 0)
start = 1;
+
+ /* Simple case with no reserved MMIO32 region */
+ if (!tbl->it_reserved_start && !tbl->it_reserved_end)
+ return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
+
end = tbl->it_reserved_start - tbl->it_offset;
if (find_next_bit(tbl->it_map, end, start) != end)
return true;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index dd09919c3c66..0f17268c1f0b 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -65,13 +65,8 @@
#include <asm/smp.h>
#include <asm/hw_irq.h>
#include <asm/softirq_stack.h>
+#include <asm/ppc_asm.h>
-#ifdef CONFIG_PPC64
-#include <asm/paca.h>
-#include <asm/firmware.h>
-#include <asm/lv1call.h>
-#include <asm/dbell.h>
-#endif
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
#include <asm/cpu_has_feature.h>
@@ -88,411 +83,6 @@ u32 tau_interrupts(unsigned long cpu);
#endif
#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_PPC64
-
-int distribute_irqs = 1;
-
-static inline notrace unsigned long get_irq_happened(void)
-{
- unsigned long happened;
-
- __asm__ __volatile__("lbz %0,%1(13)"
- : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
-
- return happened;
-}
-
-void replay_soft_interrupts(void)
-{
- struct pt_regs regs;
-
- /*
- * Be careful here, calling these interrupt handlers can cause
- * softirqs to be raised, which they may run when calling irq_exit,
- * which will cause local_irq_enable() to be run, which can then
- * recurse into this function. Don't keep any state across
- * interrupt handler calls which may change underneath us.
- *
- * We use local_paca rather than get_paca() to avoid all the
- * debug_smp_processor_id() business in this low level function.
- */
-
- ppc_save_regs(&regs);
- regs.softe = IRQS_ENABLED;
- regs.msr |= MSR_EE;
-
-again:
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(mfmsr() & MSR_EE);
-
- /*
- * Force the delivery of pending soft-disabled interrupts on PS3.
- * Any HV call will have this side effect.
- */
- if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
- u64 tmp, tmp2;
- lv1_get_version_info(&tmp, &tmp2);
- }
-
- /*
- * Check if an hypervisor Maintenance interrupt happened.
- * This is a higher priority interrupt than the others, so
- * replay it first.
- */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
- local_paca->irq_happened &= ~PACA_IRQ_HMI;
- regs.trap = INTERRUPT_HMI;
- handle_hmi_exception(&regs);
- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
- hard_irq_disable();
- }
-
- if (local_paca->irq_happened & PACA_IRQ_DEC) {
- local_paca->irq_happened &= ~PACA_IRQ_DEC;
- regs.trap = INTERRUPT_DECREMENTER;
- timer_interrupt(&regs);
- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
- hard_irq_disable();
- }
-
- if (local_paca->irq_happened & PACA_IRQ_EE) {
- local_paca->irq_happened &= ~PACA_IRQ_EE;
- regs.trap = INTERRUPT_EXTERNAL;
- do_IRQ(&regs);
- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
- hard_irq_disable();
- }
-
- if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
- local_paca->irq_happened &= ~PACA_IRQ_DBELL;
- regs.trap = INTERRUPT_DOORBELL;
- doorbell_exception(&regs);
- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
- hard_irq_disable();
- }
-
- /* Book3E does not support soft-masking PMI interrupts */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
- local_paca->irq_happened &= ~PACA_IRQ_PMI;
- regs.trap = INTERRUPT_PERFMON;
- performance_monitor_exception(&regs);
- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
- hard_irq_disable();
- }
-
- if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
- /*
- * We are responding to the next interrupt, so interrupt-off
- * latencies should be reset here.
- */
- trace_hardirqs_on();
- trace_hardirqs_off();
- goto again;
- }
-}
-
-#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
-static inline void replay_soft_interrupts_irqrestore(void)
-{
- unsigned long kuap_state = get_kuap();
-
- /*
- * Check if anything calls local_irq_enable/restore() when KUAP is
- * disabled (user access enabled). We handle that case here by saving
- * and re-locking AMR but we shouldn't get here in the first place,
- * hence the warning.
- */
- kuap_assert_locked();
-
- if (kuap_state != AMR_KUAP_BLOCKED)
- set_kuap(AMR_KUAP_BLOCKED);
-
- replay_soft_interrupts();
-
- if (kuap_state != AMR_KUAP_BLOCKED)
- set_kuap(kuap_state);
-}
-#else
-#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
-#endif
-
-notrace void arch_local_irq_restore(unsigned long mask)
-{
- unsigned char irq_happened;
-
- /* Write the new soft-enabled value if it is a disable */
- if (mask) {
- irq_soft_mask_set(mask);
- return;
- }
-
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(in_nmi() || in_hardirq());
-
- /*
- * After the stb, interrupts are unmasked and there are no interrupts
- * pending replay. The restart sequence makes this atomic with
- * respect to soft-masked interrupts. If this was just a simple code
- * sequence, a soft-masked interrupt could become pending right after
- * the comparison and before the stb.
- *
- * This allows interrupts to be unmasked without hard disabling, and
- * also without new hard interrupts coming in ahead of pending ones.
- */
- asm_volatile_goto(
-"1: \n"
-" lbz 9,%0(13) \n"
-" cmpwi 9,0 \n"
-" bne %l[happened] \n"
-" stb 9,%1(13) \n"
-"2: \n"
- RESTART_TABLE(1b, 2b, 1b)
- : : "i" (offsetof(struct paca_struct, irq_happened)),
- "i" (offsetof(struct paca_struct, irq_soft_mask))
- : "cr0", "r9"
- : happened);
-
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
-
- return;
-
-happened:
- irq_happened = get_irq_happened();
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(!irq_happened);
-
- if (irq_happened == PACA_IRQ_HARD_DIS) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(mfmsr() & MSR_EE);
- irq_soft_mask_set(IRQS_ENABLED);
- local_paca->irq_happened = 0;
- __hard_irq_enable();
- return;
- }
-
- /* Have interrupts to replay, need to hard disable first */
- if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- if (!(mfmsr() & MSR_EE)) {
- /*
- * An interrupt could have come in and cleared
- * MSR[EE] and set IRQ_HARD_DIS, so check
- * IRQ_HARD_DIS again and warn if it is still
- * clear.
- */
- irq_happened = get_irq_happened();
- WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
- }
- }
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- } else {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- if (WARN_ON_ONCE(mfmsr() & MSR_EE))
- __hard_irq_disable();
- }
- }
-
- /*
- * Disable preempt here, so that the below preempt_enable will
- * perform resched if required (a replayed interrupt may set
- * need_resched).
- */
- preempt_disable();
- irq_soft_mask_set(IRQS_ALL_DISABLED);
- trace_hardirqs_off();
-
- replay_soft_interrupts_irqrestore();
- local_paca->irq_happened = 0;
-
- trace_hardirqs_on();
- irq_soft_mask_set(IRQS_ENABLED);
- __hard_irq_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(arch_local_irq_restore);
-
-/*
- * This is a helper to use when about to go into idle low-power
- * when the latter has the side effect of re-enabling interrupts
- * (such as calling H_CEDE under pHyp).
- *
- * You call this function with interrupts soft-disabled (this is
- * already the case when ppc_md.power_save is called). The function
- * will return whether to enter power save or just return.
- *
- * In the former case, it will have notified lockdep of interrupts
- * being re-enabled and generally sanitized the lazy irq state,
- * and in the latter case it will leave with interrupts hard
- * disabled and marked as such, so the local_irq_enable() call
- * in arch_cpu_idle() will properly re-enable everything.
- */
-bool prep_irq_for_idle(void)
-{
- /*
- * First we need to hard disable to ensure no interrupt
- * occurs before we effectively enter the low power state
- */
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- /*
- * If anything happened while we were soft-disabled,
- * we return now and do not enter the low power state.
- */
- if (lazy_irq_pending())
- return false;
-
- /* Tell lockdep we are about to re-enable */
- trace_hardirqs_on();
-
- /*
- * Mark interrupts as soft-enabled and clear the
- * PACA_IRQ_HARD_DIS from the pending mask since we
- * are about to hard enable as well as a side effect
- * of entering the low power state.
- */
- local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
- irq_soft_mask_set(IRQS_ENABLED);
-
- /* Tell the caller to enter the low power state */
- return true;
-}
-
-#ifdef CONFIG_PPC_BOOK3S
-/*
- * This is for idle sequences that return with IRQs off, but the
- * idle state itself wakes on interrupt. Tell the irq tracer that
- * IRQs are enabled for the duration of idle so it does not get long
- * off times. Must be paired with fini_irq_for_idle_irqsoff.
- */
-bool prep_irq_for_idle_irqsoff(void)
-{
- WARN_ON(!irqs_disabled());
-
- /*
- * First we need to hard disable to ensure no interrupt
- * occurs before we effectively enter the low power state
- */
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- /*
- * If anything happened while we were soft-disabled,
- * we return now and do not enter the low power state.
- */
- if (lazy_irq_pending())
- return false;
-
- /* Tell lockdep we are about to re-enable */
- trace_hardirqs_on();
-
- return true;
-}
-
-/*
- * Take the SRR1 wakeup reason, index into this table to find the
- * appropriate irq_happened bit.
- *
- * Sytem reset exceptions taken in idle state also come through here,
- * but they are NMI interrupts so do not need to wait for IRQs to be
- * restored, and should be taken as early as practical. These are marked
- * with 0xff in the table. The Power ISA specifies 0100b as the system
- * reset interrupt reason.
- */
-#define IRQ_SYSTEM_RESET 0xff
-
-static const u8 srr1_to_lazyirq[0x10] = {
- 0, 0, 0,
- PACA_IRQ_DBELL,
- IRQ_SYSTEM_RESET,
- PACA_IRQ_DBELL,
- PACA_IRQ_DEC,
- 0,
- PACA_IRQ_EE,
- PACA_IRQ_EE,
- PACA_IRQ_HMI,
- 0, 0, 0, 0, 0 };
-
-void replay_system_reset(void)
-{
- struct pt_regs regs;
-
- ppc_save_regs(&regs);
- regs.trap = 0x100;
- get_paca()->in_nmi = 1;
- system_reset_exception(&regs);
- get_paca()->in_nmi = 0;
-}
-EXPORT_SYMBOL_GPL(replay_system_reset);
-
-void irq_set_pending_from_srr1(unsigned long srr1)
-{
- unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
- u8 reason = srr1_to_lazyirq[idx];
-
- /*
- * Take the system reset now, which is immediately after registers
- * are restored from idle. It's an NMI, so interrupts need not be
- * re-enabled before it is taken.
- */
- if (unlikely(reason == IRQ_SYSTEM_RESET)) {
- replay_system_reset();
- return;
- }
-
- if (reason == PACA_IRQ_DBELL) {
- /*
- * When doorbell triggers a system reset wakeup, the message
- * is not cleared, so if the doorbell interrupt is replayed
- * and the IPI handled, the doorbell interrupt would still
- * fire when EE is enabled.
- *
- * To avoid taking the superfluous doorbell interrupt,
- * execute a msgclr here before the interrupt is replayed.
- */
- ppc_msgclr(PPC_DBELL_MSGTYPE);
- }
-
- /*
- * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
- * so this can be called unconditionally with the SRR1 wake
- * reason as returned by the idle code, which uses 0 to mean no
- * interrupt.
- *
- * If a future CPU was to designate this as an interrupt reason,
- * then a new index for no interrupt must be assigned.
- */
- local_paca->irq_happened |= reason;
-}
-#endif /* CONFIG_PPC_BOOK3S */
-
-/*
- * Force a replay of the external interrupt handler on this CPU.
- */
-void force_external_irq_replay(void)
-{
- /*
- * This must only be called with interrupts soft-disabled,
- * the replay will happen when re-enabling.
- */
- WARN_ON(!arch_irqs_disabled());
-
- /*
- * Interrupts must always be hard disabled before irq_happened is
- * modified (to prevent lost update in case of interrupt between
- * load and store).
- */
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- /* Indicate in the PACA that we have an interrupt to replay */
- local_paca->irq_happened |= PACA_IRQ_EE;
-}
-
-#endif /* CONFIG_PPC64 */
-
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
@@ -595,22 +185,21 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
return sum;
}
-static inline void check_stack_overflow(void)
+static inline void check_stack_overflow(unsigned long sp)
{
- long sp;
-
if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
return;
- sp = current_stack_pointer & (THREAD_SIZE - 1);
+ sp &= THREAD_SIZE - 1;
- /* check for stack overflow: is there less than 2KB free? */
- if (unlikely(sp < 2048)) {
+ /* check for stack overflow: is there less than 1/4th free? */
+ if (unlikely(sp < THREAD_SIZE / 4)) {
pr_err("do_IRQ: stack overflow: %ld\n", sp);
dump_stack();
}
}
+#ifndef CONFIG_PREEMPT_RT
static __always_inline void call_do_softirq(const void *sp)
{
/* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
@@ -629,37 +218,18 @@ static __always_inline void call_do_softirq(const void *sp)
"r11", "r12"
);
}
-
-static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
-{
- register unsigned long r3 asm("r3") = (unsigned long)regs;
-
- /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
- asm volatile (
- PPC_STLU " %%r1, %[offset](%[sp]) ;"
- "mr %%r1, %[sp] ;"
- "bl %[callee] ;"
- PPC_LL " %%r1, 0(%%r1) ;"
- : // Outputs
- "+r" (r3)
- : // Inputs
- [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
- [callee] "i" (__do_irq)
- : // Clobbers
- "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
- "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
- "r11", "r12"
- );
-}
+#endif
DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
-void __do_irq(struct pt_regs *regs)
+static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
{
unsigned int irq;
trace_irq_entry(regs);
+ check_stack_overflow(oldsp);
+
/*
* Query the platform PIC for the interrupt & ack it.
*
@@ -680,6 +250,29 @@ void __do_irq(struct pt_regs *regs)
trace_irq_exit(regs);
}
+static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
+{
+ register unsigned long r3 asm("r3") = (unsigned long)regs;
+
+ /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
+ asm volatile (
+ PPC_STLU " %%r1, %[offset](%[sp]) ;"
+ "mr %%r4, %%r1 ;"
+ "mr %%r1, %[sp] ;"
+ "bl %[callee] ;"
+ PPC_LL " %%r1, 0(%%r1) ;"
+ : // Outputs
+ "+r" (r3)
+ : // Inputs
+ [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
+ [callee] "i" (__do_irq)
+ : // Clobbers
+ "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
+ "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12"
+ );
+}
+
void __do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -690,16 +283,11 @@ void __do_IRQ(struct pt_regs *regs)
irqsp = hardirq_ctx[raw_smp_processor_id()];
sirqsp = softirq_ctx[raw_smp_processor_id()];
- check_stack_overflow();
-
- /* Already there ? */
- if (unlikely(cursp == irqsp || cursp == sirqsp)) {
- __do_irq(regs);
- set_irq_regs(old_regs);
- return;
- }
- /* Switch stack and call */
- call_do_irq(regs, irqsp);
+ /* Already there ? If not switch stack and call */
+ if (unlikely(cursp == irqsp || cursp == sirqsp))
+ __do_irq(regs, current_stack_pointer);
+ else
+ call_do_irq(regs, irqsp);
set_irq_regs(old_regs);
}
@@ -747,10 +335,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly;
void *softirq_ctx[NR_CPUS] __read_mostly;
void *hardirq_ctx[NR_CPUS] __read_mostly;
+#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void)
{
call_do_softirq(softirq_ctx[smp_processor_id()]);
}
+#endif
irq_hw_number_t virq_to_hw(unsigned int virq)
{
@@ -794,13 +384,3 @@ int irq_choose_cpu(const struct cpumask *mask)
return hard_smp_processor_id();
}
#endif
-
-#ifdef CONFIG_PPC64
-static int __init setup_noirqdistrib(char *str)
-{
- distribute_irqs = 0;
- return 1;
-}
-
-__setup("noirqdistrib", setup_noirqdistrib);
-#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c
new file mode 100644
index 000000000000..01645e03e9f0
--- /dev/null
+++ b/arch/powerpc/kernel/irq_64.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Derived from arch/i386/kernel/irq.c
+ * Copyright (C) 1992 Linus Torvalds
+ * Adapted from arch/i386 by Gary Thomas
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ * Updated and modified by Cort Dougan <cort@fsmlabs.com>
+ * Copyright (C) 1996-2001 Cort Dougan
+ * Adapted for Power Macintosh by Paul Mackerras
+ * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+#undef DEBUG
+
+#include <linux/export.h>
+#include <linux/threads.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/cpumask.h>
+#include <linux/profile.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
+#include <linux/static_call.h>
+
+#include <linux/uaccess.h>
+#include <asm/interrupt.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/cache.h>
+#include <asm/ptrace.h>
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/smp.h>
+#include <asm/hw_irq.h>
+#include <asm/softirq_stack.h>
+#include <asm/ppc_asm.h>
+
+#include <asm/paca.h>
+#include <asm/firmware.h>
+#include <asm/lv1call.h>
+#include <asm/dbell.h>
+#include <asm/trace.h>
+#include <asm/cpu_has_feature.h>
+
+int distribute_irqs = 1;
+
+void replay_soft_interrupts(void)
+{
+ struct pt_regs regs;
+
+ /*
+ * Be careful here, calling these interrupt handlers can cause
+ * softirqs to be raised, which they may run when calling irq_exit,
+ * which will cause local_irq_enable() to be run, which can then
+ * recurse into this function. Don't keep any state across
+ * interrupt handler calls which may change underneath us.
+ *
+ * We use local_paca rather than get_paca() to avoid all the
+ * debug_smp_processor_id() business in this low level function.
+ */
+
+ ppc_save_regs(&regs);
+ regs.softe = IRQS_ENABLED;
+ regs.msr |= MSR_EE;
+
+again:
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ /*
+ * Force the delivery of pending soft-disabled interrupts on PS3.
+ * Any HV call will have this side effect.
+ */
+ if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
+ u64 tmp, tmp2;
+ lv1_get_version_info(&tmp, &tmp2);
+ }
+
+ /*
+ * Check if an hypervisor Maintenance interrupt happened.
+ * This is a higher priority interrupt than the others, so
+ * replay it first.
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
+ local_paca->irq_happened &= ~PACA_IRQ_HMI;
+ regs.trap = INTERRUPT_HMI;
+ handle_hmi_exception(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
+
+ if (local_paca->irq_happened & PACA_IRQ_DEC) {
+ local_paca->irq_happened &= ~PACA_IRQ_DEC;
+ regs.trap = INTERRUPT_DECREMENTER;
+ timer_interrupt(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
+
+ if (local_paca->irq_happened & PACA_IRQ_EE) {
+ local_paca->irq_happened &= ~PACA_IRQ_EE;
+ regs.trap = INTERRUPT_EXTERNAL;
+ do_IRQ(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
+ local_paca->irq_happened &= ~PACA_IRQ_DBELL;
+ regs.trap = INTERRUPT_DOORBELL;
+ doorbell_exception(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
+
+ /* Book3E does not support soft-masking PMI interrupts */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
+ local_paca->irq_happened &= ~PACA_IRQ_PMI;
+ regs.trap = INTERRUPT_PERFMON;
+ performance_monitor_exception(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
+
+ if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
+ /*
+ * We are responding to the next interrupt, so interrupt-off
+ * latencies should be reset here.
+ */
+ trace_hardirqs_on();
+ trace_hardirqs_off();
+ goto again;
+ }
+}
+
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
+static inline void replay_soft_interrupts_irqrestore(void)
+{
+ unsigned long kuap_state = get_kuap();
+
+ /*
+ * Check if anything calls local_irq_enable/restore() when KUAP is
+ * disabled (user access enabled). We handle that case here by saving
+ * and re-locking AMR but we shouldn't get here in the first place,
+ * hence the warning.
+ */
+ kuap_assert_locked();
+
+ if (kuap_state != AMR_KUAP_BLOCKED)
+ set_kuap(AMR_KUAP_BLOCKED);
+
+ replay_soft_interrupts();
+
+ if (kuap_state != AMR_KUAP_BLOCKED)
+ set_kuap(kuap_state);
+}
+#else
+#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
+#endif
+
+notrace void arch_local_irq_restore(unsigned long mask)
+{
+ unsigned char irq_happened;
+
+ /* Write the new soft-enabled value if it is a disable */
+ if (mask) {
+ irq_soft_mask_set(mask);
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(in_nmi() || in_hardirq());
+
+ /*
+ * After the stb, interrupts are unmasked and there are no interrupts
+ * pending replay. The restart sequence makes this atomic with
+ * respect to soft-masked interrupts. If this was just a simple code
+ * sequence, a soft-masked interrupt could become pending right after
+ * the comparison and before the stb.
+ *
+ * This allows interrupts to be unmasked without hard disabling, and
+ * also without new hard interrupts coming in ahead of pending ones.
+ */
+ asm_volatile_goto(
+"1: \n"
+" lbz 9,%0(13) \n"
+" cmpwi 9,0 \n"
+" bne %l[happened] \n"
+" stb 9,%1(13) \n"
+"2: \n"
+ RESTART_TABLE(1b, 2b, 1b)
+ : : "i" (offsetof(struct paca_struct, irq_happened)),
+ "i" (offsetof(struct paca_struct, irq_soft_mask))
+ : "cr0", "r9"
+ : happened);
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+
+ return;
+
+happened:
+ irq_happened = READ_ONCE(local_paca->irq_happened);
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!irq_happened);
+
+ if (irq_happened == PACA_IRQ_HARD_DIS) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+ irq_soft_mask_set(IRQS_ENABLED);
+ local_paca->irq_happened = 0;
+ __hard_irq_enable();
+ return;
+ }
+
+ /* Have interrupts to replay, need to hard disable first */
+ if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (!(mfmsr() & MSR_EE)) {
+ /*
+ * An interrupt could have come in and cleared
+ * MSR[EE] and set IRQ_HARD_DIS, so check
+ * IRQ_HARD_DIS again and warn if it is still
+ * clear.
+ */
+ irq_happened = READ_ONCE(local_paca->irq_happened);
+ WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
+ }
+ }
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ } else {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (WARN_ON_ONCE(mfmsr() & MSR_EE))
+ __hard_irq_disable();
+ }
+ }
+
+ /*
+ * Disable preempt here, so that the below preempt_enable will
+ * perform resched if required (a replayed interrupt may set
+ * need_resched).
+ */
+ preempt_disable();
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ trace_hardirqs_off();
+
+ replay_soft_interrupts_irqrestore();
+ local_paca->irq_happened = 0;
+
+ trace_hardirqs_on();
+ irq_soft_mask_set(IRQS_ENABLED);
+ __hard_irq_enable();
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+/*
+ * This is a helper to use when about to go into idle low-power
+ * when the latter has the side effect of re-enabling interrupts
+ * (such as calling H_CEDE under pHyp).
+ *
+ * You call this function with interrupts soft-disabled (this is
+ * already the case when ppc_md.power_save is called). The function
+ * will return whether to enter power save or just return.
+ *
+ * In the former case, it will have notified lockdep of interrupts
+ * being re-enabled and generally sanitized the lazy irq state,
+ * and in the latter case it will leave with interrupts hard
+ * disabled and marked as such, so the local_irq_enable() call
+ * in arch_cpu_idle() will properly re-enable everything.
+ */
+bool prep_irq_for_idle(void)
+{
+ /*
+ * First we need to hard disable to ensure no interrupt
+ * occurs before we effectively enter the low power state
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /*
+ * If anything happened while we were soft-disabled,
+ * we return now and do not enter the low power state.
+ */
+ if (lazy_irq_pending())
+ return false;
+
+ /* Tell lockdep we are about to re-enable */
+ trace_hardirqs_on();
+
+ /*
+ * Mark interrupts as soft-enabled and clear the
+ * PACA_IRQ_HARD_DIS from the pending mask since we
+ * are about to hard enable as well as a side effect
+ * of entering the low power state.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ irq_soft_mask_set(IRQS_ENABLED);
+
+ /* Tell the caller to enter the low power state */
+ return true;
+}
+
+#ifdef CONFIG_PPC_BOOK3S
+/*
+ * This is for idle sequences that return with IRQs off, but the
+ * idle state itself wakes on interrupt. Tell the irq tracer that
+ * IRQs are enabled for the duration of idle so it does not get long
+ * off times. Must be paired with fini_irq_for_idle_irqsoff.
+ */
+bool prep_irq_for_idle_irqsoff(void)
+{
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * First we need to hard disable to ensure no interrupt
+ * occurs before we effectively enter the low power state
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /*
+ * If anything happened while we were soft-disabled,
+ * we return now and do not enter the low power state.
+ */
+ if (lazy_irq_pending())
+ return false;
+
+ /* Tell lockdep we are about to re-enable */
+ trace_hardirqs_on();
+
+ return true;
+}
+
+/*
+ * Take the SRR1 wakeup reason, index into this table to find the
+ * appropriate irq_happened bit.
+ *
+ * Sytem reset exceptions taken in idle state also come through here,
+ * but they are NMI interrupts so do not need to wait for IRQs to be
+ * restored, and should be taken as early as practical. These are marked
+ * with 0xff in the table. The Power ISA specifies 0100b as the system
+ * reset interrupt reason.
+ */
+#define IRQ_SYSTEM_RESET 0xff
+
+static const u8 srr1_to_lazyirq[0x10] = {
+ 0, 0, 0,
+ PACA_IRQ_DBELL,
+ IRQ_SYSTEM_RESET,
+ PACA_IRQ_DBELL,
+ PACA_IRQ_DEC,
+ 0,
+ PACA_IRQ_EE,
+ PACA_IRQ_EE,
+ PACA_IRQ_HMI,
+ 0, 0, 0, 0, 0 };
+
+void replay_system_reset(void)
+{
+ struct pt_regs regs;
+
+ ppc_save_regs(&regs);
+ regs.trap = 0x100;
+ get_paca()->in_nmi = 1;
+ system_reset_exception(&regs);
+ get_paca()->in_nmi = 0;
+}
+EXPORT_SYMBOL_GPL(replay_system_reset);
+
+void irq_set_pending_from_srr1(unsigned long srr1)
+{
+ unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
+ u8 reason = srr1_to_lazyirq[idx];
+
+ /*
+ * Take the system reset now, which is immediately after registers
+ * are restored from idle. It's an NMI, so interrupts need not be
+ * re-enabled before it is taken.
+ */
+ if (unlikely(reason == IRQ_SYSTEM_RESET)) {
+ replay_system_reset();
+ return;
+ }
+
+ if (reason == PACA_IRQ_DBELL) {
+ /*
+ * When doorbell triggers a system reset wakeup, the message
+ * is not cleared, so if the doorbell interrupt is replayed
+ * and the IPI handled, the doorbell interrupt would still
+ * fire when EE is enabled.
+ *
+ * To avoid taking the superfluous doorbell interrupt,
+ * execute a msgclr here before the interrupt is replayed.
+ */
+ ppc_msgclr(PPC_DBELL_MSGTYPE);
+ }
+
+ /*
+ * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
+ * so this can be called unconditionally with the SRR1 wake
+ * reason as returned by the idle code, which uses 0 to mean no
+ * interrupt.
+ *
+ * If a future CPU was to designate this as an interrupt reason,
+ * then a new index for no interrupt must be assigned.
+ */
+ local_paca->irq_happened |= reason;
+}
+#endif /* CONFIG_PPC_BOOK3S */
+
+/*
+ * Force a replay of the external interrupt handler on this CPU.
+ */
+void force_external_irq_replay(void)
+{
+ /*
+ * This must only be called with interrupts soft-disabled,
+ * the replay will happen when re-enabling.
+ */
+ WARN_ON(!arch_irqs_disabled());
+
+ /*
+ * Interrupts must always be hard disabled before irq_happened is
+ * modified (to prevent lost update in case of interrupt between
+ * load and store).
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /* Indicate in the PACA that we have an interrupt to replay */
+ local_paca->irq_happened |= PACA_IRQ_EE;
+}
+
+static int __init setup_noirqdistrib(char *str)
+{
+ distribute_irqs = 0;
+ return 1;
+}
+
+__setup("noirqdistrib", setup_noirqdistrib);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 1c97c0f177ae..912d4f8a13be 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -269,7 +269,7 @@ static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
* So, we should never get here... but, its still
* good to catch them, just in case...
*/
- printk("Can't step on instruction %s\n", ppc_inst_as_str(insn));
+ printk("Can't step on instruction %08lx\n", ppc_inst_as_ulong(insn));
BUG();
} else {
/*
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 18173199b79d..6c5d30fba766 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -756,7 +756,7 @@ void __init mce_init(void)
mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
__alignof__(*mce_info),
MEMBLOCK_LOW_LIMIT,
- limit, cpu_to_node(i));
+ limit, early_cpu_to_node(i));
if (!mce_info)
goto err;
paca_ptrs[i]->mce_info = mce_info;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 068410cd54a3..31de91c8359c 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -39,6 +39,7 @@
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
+#include <asm/setup.h>
#include "../../../drivers/pci/pci.h"
@@ -67,23 +68,35 @@ void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops)
pci_dma_ops = dma_ops;
}
-/*
- * This function should run under locking protection, specifically
- * hose_spinlock.
- */
static int get_phb_number(struct device_node *dn)
{
int ret, phb_id = -1;
- u32 prop_32;
u64 prop;
/*
* Try fixed PHB numbering first, by checking archs and reading
- * the respective device-tree properties. Firstly, try powernv by
- * reading "ibm,opal-phbid", only present in OPAL environment.
+ * the respective device-tree properties. Firstly, try reading
+ * standard "linux,pci-domain", then try reading "ibm,opal-phbid"
+ * (only present in powernv OPAL environment), then try device-tree
+ * alias and as the last try to use lower bits of "reg" property.
*/
- ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+ ret = of_get_pci_domain_nr(dn);
+ if (ret >= 0) {
+ prop = ret;
+ ret = 0;
+ }
+ if (ret)
+ ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+
if (ret) {
+ ret = of_alias_get_id(dn, "pci");
+ if (ret >= 0) {
+ prop = ret;
+ ret = 0;
+ }
+ }
+ if (ret) {
+ u32 prop_32;
ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
prop = prop_32;
}
@@ -91,18 +104,20 @@ static int get_phb_number(struct device_node *dn)
if (!ret)
phb_id = (int)(prop & (MAX_PHBS - 1));
+ spin_lock(&hose_spinlock);
+
/* We need to be sure to not use the same PHB number twice. */
if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
- return phb_id;
+ goto out_unlock;
- /*
- * If not pseries nor powernv, or if fixed PHB numbering tried to add
- * the same PHB number twice, then fallback to dynamic PHB numbering.
- */
+ /* If everything fails then fallback to dynamic PHB numbering. */
phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
BUG_ON(phb_id >= MAX_PHBS);
set_bit(phb_id, phb_bitmap);
+out_unlock:
+ spin_unlock(&hose_spinlock);
+
return phb_id;
}
@@ -113,10 +128,13 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
if (phb == NULL)
return NULL;
- spin_lock(&hose_spinlock);
+
phb->global_number = get_phb_number(dev);
+
+ spin_lock(&hose_spinlock);
list_add_tail(&phb->list_node, &hose_list);
spin_unlock(&hose_spinlock);
+
phb->dn = dev;
phb->is_dynamic = slab_is_available();
#ifdef CONFIG_PPC64
@@ -1087,7 +1105,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
*/
pci_read_bridge_bases(bus);
- /* Now fixup the bus bus */
+ /* Now fixup the bus */
pcibios_setup_bus_self(bus);
}
EXPORT_SYMBOL(pcibios_fixup_bus);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 5a174936c9a0..433965bf37b4 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -36,18 +36,13 @@ int pcibios_assign_bus_offset = 1;
EXPORT_SYMBOL(isa_io_base);
EXPORT_SYMBOL(pci_dram_offset);
-void __init pcibios_make_OF_bus_map(void);
-
static void fixup_cpc710_pci64(struct pci_dev* dev);
-static u8* pci_to_OF_bus_map;
/* By default, we don't re-assign bus numbers. We do this only on
* some pmacs
*/
static int pci_assign_all_buses;
-static int pci_bus_count;
-
/* This will remain NULL for now, until isa-bridge.c is made common
* to both 32-bit and 64-bit.
*/
@@ -67,6 +62,11 @@ fixup_cpc710_pci64(struct pci_dev* dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_CHRP)
+
+static u8* pci_to_OF_bus_map;
+static int pci_bus_count;
+
/*
* Functions below are used on OpenFirmware machines.
*/
@@ -108,7 +108,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
}
}
-void __init
+static void __init
pcibios_make_OF_bus_map(void)
{
int i;
@@ -154,6 +154,7 @@ pcibios_make_OF_bus_map(void)
}
+#ifdef CONFIG_PPC_PMAC
/*
* Returns the PCI device matching a given OF node
*/
@@ -193,7 +194,9 @@ int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
return -ENODEV;
}
EXPORT_SYMBOL(pci_device_from_OF_node);
+#endif
+#ifdef CONFIG_PPC_CHRP
/* We create the "pci-OF-bus-map" property now so it appears in the
* /proc device tree
*/
@@ -218,6 +221,9 @@ pci_create_OF_bus_map(void)
of_node_put(dn);
}
}
+#endif
+
+#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_CHRP) */
void pcibios_setup_phb_io_space(struct pci_controller *hose)
{
@@ -233,7 +239,9 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose)
static int __init pcibios_init(void)
{
struct pci_controller *hose, *tmp;
+#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
int next_busno = 0;
+#endif
printk(KERN_INFO "PCI: Probing PCI hardware\n");
@@ -242,14 +250,20 @@ static int __init pcibios_init(void)
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
if (pci_assign_all_buses)
hose->first_busno = next_busno;
+#endif
hose->last_busno = 0xff;
pcibios_scan_phb(hose);
pci_bus_add_devices(hose->bus);
+#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
+#endif
}
+
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_CHRP)
pci_bus_count = next_busno;
/* OpenFirmware based machines need a map of OF bus
@@ -258,6 +272,7 @@ static int __init pcibios_init(void)
*/
if (pci_assign_all_buses)
pcibios_make_OF_bus_map();
+#endif
/* Call common code to handle resource allocation */
pcibios_resource_survey();
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 19b03ddf5631..0c7cfb9fab04 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -286,6 +286,7 @@ int pcibus_to_node(struct pci_bus *bus)
EXPORT_SYMBOL(pcibus_to_node);
#endif
+#ifdef CONFIG_PPC_PMAC
int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn)
{
if (!PCI_DN(np))
@@ -294,3 +295,4 @@ int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn)
*devfn = PCI_DN(np)->devfn;
return 0;
}
+#endif
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 938ab8838ab5..7a35fc25a304 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -259,7 +259,7 @@ void remove_sriov_vf_pdns(struct pci_dev *pdev)
if (edev) {
/*
* We allocate pci_dn's for the totalvfs count,
- * but only only the vfs that were activated
+ * but only the vfs that were activated
* have a configured PE.
*/
if (edev->pe)
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index feae8509b59c..a730b951b64b 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -44,7 +44,7 @@
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/pci-bridge.h>
#include <asm/kexec.h>
#include <asm/opal.h>
@@ -54,6 +54,7 @@
#include <asm/dt_cpu_ftrs.h>
#include <asm/drmem.h>
#include <asm/ultravisor.h>
+#include <asm/prom.h>
#include <mm/mmu_decl.h>
@@ -751,6 +752,13 @@ void __init early_init_devtree(void *params)
early_init_dt_scan_root();
early_init_dt_scan_memory_ppc();
+ /*
+ * As generic code authors expect to be able to use static keys
+ * in early_param() handlers, we initialize the static keys just
+ * before parsing early params (it's fine to call jump_label_init()
+ * more than once).
+ */
+ jump_label_init();
parse_early_param();
/* make sure we've parsed cmdline for mem= before this */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 13d6cb188835..a6669c40c1db 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -42,7 +42,7 @@
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/asm-prototypes.h>
#include <asm/ultravisor-api.h>
diff --git a/arch/powerpc/kernel/ptrace/ptrace-vsx.c b/arch/powerpc/kernel/ptrace/ptrace-vsx.c
index 1da4303128ef..7df08004c47d 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-vsx.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-vsx.c
@@ -71,7 +71,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
}
/*
- * Currently to set and and get all the vsx state, you need to call
+ * Currently to set and get all the vsx state, you need to call
* the fp and VMX calls as well. This only get/sets the lower 32
* 128bit VSX registers.
*/
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 1a02629ec70b..dd98f43bd685 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -171,6 +171,14 @@ EXPORT_SYMBOL_GPL(machine_power_off);
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
+size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
+{
+ if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(arch_get_random_seed_longs);
+
void machine_halt(void)
{
machine_shutdown();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 5761f08dae95..2b2d0b0fbb30 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -113,7 +113,6 @@ void __init setup_tlb_core_data(void)
* Should we panic instead?
*/
WARN_ONCE(smt_enabled_at_boot >= 2 &&
- !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
book3e_htw_mode != PPC_HTW_E6500,
"%s: unsupported MMU configuration\n", __func__);
}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 472596a109e2..86bb5bb4c143 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -377,9 +377,12 @@ static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_
unsafe_get_user(set->sig[0], &sc->oldmask, efault_out);
/*
- * Force reload of FP/VEC.
- * This has to be done before copying stuff into tsk->thread.fpr/vr
- * for the reasons explained in the previous comment.
+ * Force reload of FP/VEC/VSX so userspace sees any changes.
+ * Clear these bits from the user process' MSR before copying into the
+ * thread struct. If we are rescheduled or preempted and another task
+ * uses FP/VEC/VSX, and this process has the MSR bits set, then the
+ * context switch code will save the current CPU state into the
+ * thread_struct - possibly overwriting the data we are updating here.
*/
regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index bcefab484ea6..169703fead57 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -35,6 +35,7 @@
#include <linux/stackprotector.h>
#include <linux/pgtable.h>
#include <linux/clockchips.h>
+#include <linux/kexec.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -55,7 +56,6 @@
#endif
#include <asm/vdso.h>
#include <asm/debug.h>
-#include <asm/kexec.h>
#include <asm/cpu_has_feature.h>
#include <asm/ftrace.h>
#include <asm/kup.h>
@@ -619,20 +619,6 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
}
#endif
-#ifdef CONFIG_NMI_IPI
-static void crash_stop_this_cpu(struct pt_regs *regs)
-#else
-static void crash_stop_this_cpu(void *dummy)
-#endif
-{
- /*
- * Just busy wait here and avoid marking CPU as offline to ensure
- * register data is captured appropriately.
- */
- while (1)
- cpu_relax();
-}
-
void crash_smp_send_stop(void)
{
static bool stopped = false;
@@ -651,11 +637,14 @@ void crash_smp_send_stop(void)
stopped = true;
-#ifdef CONFIG_NMI_IPI
- smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000);
-#else
- smp_call_function(crash_stop_this_cpu, NULL, 0);
-#endif /* CONFIG_NMI_IPI */
+#ifdef CONFIG_KEXEC_CORE
+ if (kexec_crash_image) {
+ crash_kexec_prepare();
+ return;
+ }
+#endif
+
+ smp_send_stop();
}
#ifdef CONFIG_NMI_IPI
@@ -1674,13 +1663,6 @@ void start_secondary(void *unused)
BUG();
}
-#ifdef CONFIG_PROFILING
-int setup_profiling_timer(unsigned int multiplier)
-{
- return 0;
-}
-#endif
-
static void __init fixup_topology(void)
{
int i;
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
new file mode 100644
index 000000000000..81ace9e8b72b
--- /dev/null
+++ b/arch/powerpc/kernel/syscall.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/compat.h>
+#include <linux/context_tracking.h>
+#include <linux/randomize_kstack.h>
+
+#include <asm/interrupt.h>
+#include <asm/kup.h>
+#include <asm/syscall.h>
+#include <asm/time.h>
+#include <asm/tm.h>
+#include <asm/unistd.h>
+
+
+typedef long (*syscall_fn)(long, long, long, long, long, long);
+
+/* Has to run notrace because it is entered not completely "reconciled" */
+notrace long system_call_exception(long r3, long r4, long r5,
+ long r6, long r7, long r8,
+ unsigned long r0, struct pt_regs *regs)
+{
+ long ret;
+ syscall_fn f;
+
+ kuap_lock();
+
+ add_random_kstack_offset();
+ regs->orig_gpr3 = r3;
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+
+ trace_hardirqs_off(); /* finish reconciling */
+
+ CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
+ user_exit_irqoff();
+
+ BUG_ON(regs_is_unrecoverable(regs));
+ BUG_ON(!(regs->msr & MSR_PR));
+ BUG_ON(arch_irq_disabled_regs(regs));
+
+#ifdef CONFIG_PPC_PKEY
+ if (mmu_has_feature(MMU_FTR_PKEY)) {
+ unsigned long amr, iamr;
+ bool flush_needed = false;
+ /*
+ * When entering from userspace we mostly have the AMR/IAMR
+ * different from kernel default values. Hence don't compare.
+ */
+ amr = mfspr(SPRN_AMR);
+ iamr = mfspr(SPRN_IAMR);
+ regs->amr = amr;
+ regs->iamr = iamr;
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
+ mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
+ flush_needed = true;
+ }
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
+ mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
+ flush_needed = true;
+ }
+ if (flush_needed)
+ isync();
+ } else
+#endif
+ kuap_assert_locked();
+
+ booke_restore_dbcr0();
+
+ account_cpu_user_entry();
+
+ account_stolen_time();
+
+ /*
+ * This is not required for the syscall exit path, but makes the
+ * stack frame look nicer. If this was initialised in the first stack
+ * frame, or if the unwinder was taught the first stack frame always
+ * returns to user with IRQS_ENABLED, this store could be avoided!
+ */
+ irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
+
+ /*
+ * If system call is called with TM active, set _TIF_RESTOREALL to
+ * prevent RFSCV being used to return to userspace, because POWER9
+ * TM implementation has problems with this instruction returning to
+ * transactional state. Final register values are not relevant because
+ * the transaction will be aborted upon return anyway. Or in the case
+ * of unsupported_scv SIGILL fault, the return state does not much
+ * matter because it's an edge case.
+ */
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
+ set_bits(_TIF_RESTOREALL, &current_thread_info()->flags);
+
+ /*
+ * If the system call was made with a transaction active, doom it and
+ * return without performing the system call. Unless it was an
+ * unsupported scv vector, in which case it's treated like an illegal
+ * instruction.
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
+ !trap_is_unsupported_scv(regs)) {
+ /* Enable TM in the kernel, and disable EE (for scv) */
+ hard_irq_disable();
+ mtmsr(mfmsr() | MSR_TM);
+
+ /* tabort, this dooms the transaction, nothing else */
+ asm volatile(".long 0x7c00071d | ((%0) << 16)"
+ :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
+
+ /*
+ * Userspace will never see the return value. Execution will
+ * resume after the tbegin. of the aborted transaction with the
+ * checkpointed register state. A context switch could occur
+ * or signal delivered to the process before resuming the
+ * doomed transaction context, but that should all be handled
+ * as expected.
+ */
+ return -ENOSYS;
+ }
+#endif // CONFIG_PPC_TRANSACTIONAL_MEM
+
+ local_irq_enable();
+
+ if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
+ if (unlikely(trap_is_unsupported_scv(regs))) {
+ /* Unsupported scv vector */
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ return regs->gpr[3];
+ }
+ /*
+ * We use the return value of do_syscall_trace_enter() as the
+ * syscall number. If the syscall was rejected for any reason
+ * do_syscall_trace_enter() returns an invalid syscall number
+ * and the test against NR_syscalls will fail and the return
+ * value to be used is in regs->gpr[3].
+ */
+ r0 = do_syscall_trace_enter(regs);
+ if (unlikely(r0 >= NR_syscalls))
+ return regs->gpr[3];
+ r3 = regs->gpr[3];
+ r4 = regs->gpr[4];
+ r5 = regs->gpr[5];
+ r6 = regs->gpr[6];
+ r7 = regs->gpr[7];
+ r8 = regs->gpr[8];
+
+ } else if (unlikely(r0 >= NR_syscalls)) {
+ if (unlikely(trap_is_unsupported_scv(regs))) {
+ /* Unsupported scv vector */
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ return regs->gpr[3];
+ }
+ return -ENOSYS;
+ }
+
+ /* May be faster to do array_index_nospec? */
+ barrier_nospec();
+
+ if (unlikely(is_compat_task())) {
+ f = (void *)compat_sys_call_table[r0];
+
+ r3 &= 0x00000000ffffffffULL;
+ r4 &= 0x00000000ffffffffULL;
+ r5 &= 0x00000000ffffffffULL;
+ r6 &= 0x00000000ffffffffULL;
+ r7 &= 0x00000000ffffffffULL;
+ r8 &= 0x00000000ffffffffULL;
+
+ } else {
+ f = (void *)sys_call_table[r0];
+ }
+
+ ret = f(r3, r4, r5, r6, r7, r8);
+
+ /*
+ * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+ * so the maximum stack offset is 1k bytes (10 bits).
+ *
+ * The actual entropy will be further reduced by the compiler when
+ * applying stack alignment constraints: the powerpc architecture
+ * may have two kinds of stack alignment (16-bytes and 8-bytes).
+ *
+ * So the resulting 6 or 7 bits of entropy is seen in SP[9:4] or SP[9:3].
+ */
+ choose_random_kstack_offset(mftb());
+
+ return ret;
+}
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 2a893e06e4f1..7b85c3b460a3 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -69,8 +69,8 @@ ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
/* Make sure it is what we expect it to be */
if (!ppc_inst_equal(replaced, old)) {
- pr_err("%p: replaced (%s) != old (%s)",
- (void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old));
+ pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
+ ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old));
return -EINVAL;
}
@@ -125,9 +125,9 @@ __ftrace_make_nop(struct module *mod,
return -EFAULT;
}
- /* Make sure that that this is still a 24bit jump */
+ /* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
+ pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
@@ -159,8 +159,8 @@ __ftrace_make_nop(struct module *mod,
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
!ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
- pr_err("Unexpected instruction %s around bl _mcount\n",
- ppc_inst_as_str(op));
+ pr_err("Unexpected instruction %08lx around bl _mcount\n",
+ ppc_inst_as_ulong(op));
return -EINVAL;
}
} else if (IS_ENABLED(CONFIG_PPC64)) {
@@ -174,7 +174,8 @@ __ftrace_make_nop(struct module *mod,
}
if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
- pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
+ pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
+ ppc_inst_as_ulong(op));
return -EINVAL;
}
}
@@ -310,9 +311,9 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
return -EFAULT;
}
- /* Make sure that that this is still a 24bit jump */
+ /* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
+ pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
@@ -392,11 +393,11 @@ int ftrace_make_nop(struct module *mod,
*/
static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
{
- if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
+ else
return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
- else
- return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
}
static int
@@ -411,13 +412,13 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (copy_inst_from_kernel_nofault(op, ip))
return -EFAULT;
- if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1) &&
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
copy_inst_from_kernel_nofault(op + 1, ip + 4))
return -EFAULT;
if (!expected_nop_sequence(ip, op[0], op[1])) {
- pr_err("Unexpected call sequence at %p: %s %s\n",
- ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1]));
+ pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
+ ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
return -EINVAL;
}
@@ -486,7 +487,8 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
}
if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
- pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
+ pr_err("Unexpected call sequence at %p: %08lx\n",
+ ip, ppc_inst_as_ulong(op));
return -EINVAL;
}
@@ -562,9 +564,9 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return -EFAULT;
}
- /* Make sure that that this is still a 24bit jump */
+ /* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
+ pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 3aaa50e5c72f..dadfcef5d6db 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1676,7 +1676,7 @@ DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S_64
static void tm_unavailable(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S
index d4e43ab2d5df..0085ae464dac 100644
--- a/arch/powerpc/kernel/vdso/cacheflush.S
+++ b/arch/powerpc/kernel/vdso/cacheflush.S
@@ -91,6 +91,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
3:
crclr cr0*4+so
sync
+ icbi 0,r1
isync
li r3,0
blr
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 7d28b9553654..dbcc4a793f0b 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -91,6 +91,10 @@ static cpumask_t wd_smp_cpus_pending;
static cpumask_t wd_smp_cpus_stuck;
static u64 wd_smp_last_reset_tb;
+#ifdef CONFIG_PPC_PSERIES
+static u64 wd_timeout_pct;
+#endif
+
/*
* Try to take the exclusive watchdog action / NMI IPI / printing lock.
* wd_smp_lock must be held. If this fails, we should return and wait
@@ -353,7 +357,7 @@ static void watchdog_timer_interrupt(int cpu)
if (__wd_nmi_output && xchg(&__wd_nmi_output, 0)) {
/*
* Something has called printk from NMI context. It might be
- * stuck, so this this triggers a flush that will get that
+ * stuck, so this triggers a flush that will get that
* printk output to the console.
*
* See wd_lockup_ipi.
@@ -527,7 +531,13 @@ static int stop_watchdog_on_cpu(unsigned int cpu)
static void watchdog_calc_timeouts(void)
{
- wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq;
+ u64 threshold = watchdog_thresh;
+
+#ifdef CONFIG_PPC_PSERIES
+ threshold += (READ_ONCE(wd_timeout_pct) * threshold) / 100;
+#endif
+
+ wd_panic_timeout_tb = threshold * ppc_tb_freq;
/* Have the SMP detector trigger a bit later */
wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
@@ -570,3 +580,12 @@ int __init watchdog_nmi_probe(void)
}
return 0;
}
+
+#ifdef CONFIG_PPC_PSERIES
+void watchdog_nmi_set_timeout_pct(u64 pct)
+{
+ pr_info("Set the NMI watchdog timeout factor to %llu%%\n", pct);
+ WRITE_ONCE(wd_timeout_pct, pct);
+ lockup_detector_reconfigure();
+}
+#endif
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 7ab4980fe13a..cf84bfe9e27e 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -19,6 +19,8 @@
#include <asm/machdep.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/firmware.h>
void machine_kexec_mask_interrupts(void) {
unsigned int i;
diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
index 80f54723cf6d..252724ed666a 100644
--- a/arch/powerpc/kexec/crash.c
+++ b/arch/powerpc/kexec/crash.c
@@ -40,6 +40,14 @@
#define REAL_MODE_TIMEOUT 10000
static int time_to_dump;
+
+/*
+ * In case of system reset, secondary CPUs enter crash_kexec_secondary with out
+ * having to send an IPI explicitly. So, indicate if the crash is via
+ * system reset to avoid sending another IPI.
+ */
+static int is_via_system_reset;
+
/*
* crash_wake_offline should be set to 1 by platforms that intend to wake
* up offline cpus prior to jumping to a kdump kernel. Currently powernv
@@ -101,7 +109,7 @@ void crash_ipi_callback(struct pt_regs *regs)
/* NOTREACHED */
}
-static void crash_kexec_prepare_cpus(int cpu)
+static void crash_kexec_prepare_cpus(void)
{
unsigned int msecs;
volatile unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
@@ -113,7 +121,15 @@ static void crash_kexec_prepare_cpus(int cpu)
if (crash_wake_offline)
ncpus = num_present_cpus() - 1;
- crash_send_ipi(crash_ipi_callback);
+ /*
+ * If we came in via system reset, secondaries enter via crash_kexec_secondary().
+ * So, wait a while for the secondary CPUs to enter for that case.
+ * Else, send IPI to all other CPUs.
+ */
+ if (is_via_system_reset)
+ mdelay(PRIMARY_TIMEOUT);
+ else
+ crash_send_ipi(crash_ipi_callback);
smp_wmb();
again:
@@ -202,7 +218,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
#else /* ! CONFIG_SMP */
-static void crash_kexec_prepare_cpus(int cpu)
+static void crash_kexec_prepare_cpus(void)
{
/*
* move the secondaries to us so that we can copy
@@ -248,6 +264,32 @@ noinstr static void __maybe_unused crash_kexec_wait_realmode(int cpu)
static inline void crash_kexec_wait_realmode(int cpu) {}
#endif /* CONFIG_SMP && CONFIG_PPC64 */
+void crash_kexec_prepare(void)
+{
+ /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
+ printk_deferred_enter();
+
+ /*
+ * This function is only called after the system
+ * has panicked or is otherwise in a critical state.
+ * The minimum amount of code to allow a kexec'd kernel
+ * to run successfully needs to happen here.
+ *
+ * In practice this means stopping other cpus in
+ * an SMP system.
+ * The kernel is broken so disable interrupts.
+ */
+ hard_irq_disable();
+
+ /*
+ * Make a note of crashing cpu. Will be used in machine_kexec
+ * such that another IPI will not be sent.
+ */
+ crashing_cpu = smp_processor_id();
+
+ crash_kexec_prepare_cpus();
+}
+
/*
* Register a function to be called on shutdown. Only use this if you
* can't reset your device in the second kernel.
@@ -311,35 +353,10 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
unsigned int i;
int (*old_handler)(struct pt_regs *regs);
- /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
- printk_deferred_enter();
-
- /*
- * This function is only called after the system
- * has panicked or is otherwise in a critical state.
- * The minimum amount of code to allow a kexec'd kernel
- * to run successfully needs to happen here.
- *
- * In practice this means stopping other cpus in
- * an SMP system.
- * The kernel is broken so disable interrupts.
- */
- hard_irq_disable();
-
- /*
- * Make a note of crashing cpu. Will be used in machine_kexec
- * such that another IPI will not be sent.
- */
- crashing_cpu = smp_processor_id();
-
- /*
- * If we came in via system reset, wait a while for the secondary
- * CPUs to enter.
- */
if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
- mdelay(PRIMARY_TIMEOUT);
+ is_via_system_reset = 1;
- crash_kexec_prepare_cpus(crashing_cpu);
+ crash_smp_send_stop();
crash_save_cpu(regs, crashing_cpu);
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index b4981b651d9a..349a781cea0b 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -23,6 +23,7 @@
#include <linux/vmalloc.h>
#include <asm/setup.h>
#include <asm/drmem.h>
+#include <asm/firmware.h>
#include <asm/kexec_ranges.h>
#include <asm/crashdump-ppc64.h>
@@ -1038,6 +1039,48 @@ out:
return ret;
}
+static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
+ const char *propname)
+{
+ const void *prop, *fdtprop;
+ int len = 0, fdtlen = 0;
+
+ prop = of_get_property(dn, propname, &len);
+ fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen);
+
+ if (fdtprop && !prop)
+ return fdt_delprop(fdt, node_offset, propname);
+ else if (prop)
+ return fdt_setprop(fdt, node_offset, propname, prop, len);
+ else
+ return -FDT_ERR_NOTFOUND;
+}
+
+static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
+{
+ struct device_node *dn;
+ int pci_offset, root_offset, ret = 0;
+
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ return 0;
+
+ root_offset = fdt_path_offset(fdt, "/");
+ for_each_node_with_property(dn, dmapropname) {
+ pci_offset = fdt_subnode_offset(fdt, root_offset, of_node_full_name(dn));
+ if (pci_offset < 0)
+ continue;
+
+ ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
+ if (ret < 0)
+ break;
+ ret = copy_property(fdt, pci_offset, dn, dmapropname);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
/**
* setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
* being loaded.
@@ -1099,6 +1142,18 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
if (ret < 0)
goto out;
+#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
+#define DMA64_PROPNAME "linux,dma64-ddr-window-info"
+ ret = update_pci_dma_nodes(fdt, DIRECT64_PROPNAME);
+ if (ret < 0)
+ goto out;
+
+ ret = update_pci_dma_nodes(fdt, DMA64_PROPNAME);
+ if (ret < 0)
+ goto out;
+#undef DMA64_PROPNAME
+#undef DIRECT64_PROPNAME
+
/* Update memory reserve map */
ret = get_reserved_memory_ranges(&rmem);
if (ret)
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index ddd88179110a..dcb398d5e009 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -128,9 +128,26 @@ config KVM_BOOK3S_64_PR
and system calls on the host.
config KVM_BOOK3S_HV_EXIT_TIMING
- bool "Detailed timing for hypervisor real-mode code"
+ bool
+
+config KVM_BOOK3S_HV_P9_TIMING
+ bool "Detailed timing for the P9 entry point"
+ select KVM_BOOK3S_HV_EXIT_TIMING
depends on KVM_BOOK3S_HV_POSSIBLE && DEBUG_FS
help
+ Calculate time taken for each vcpu during vcpu entry and
+ exit, time spent inside the guest and time spent handling
+ hypercalls and page faults. The total, minimum and maximum
+ times in nanoseconds together with the number of executions
+ are reported in debugfs in kvm/vm#/vcpu#/timings.
+
+ If unsure, say N.
+
+config KVM_BOOK3S_HV_P8_TIMING
+ bool "Detailed timing for hypervisor real-mode code (for POWER8)"
+ select KVM_BOOK3S_HV_EXIT_TIMING
+ depends on KVM_BOOK3S_HV_POSSIBLE && DEBUG_FS && !KVM_BOOK3S_HV_P9_TIMING
+ help
Calculate time taken for each vcpu in the real-mode guest entry,
exit, and interrupt handling code, plus time spent in the guest
and in nap mode due to idle (cede) while other threads are still
@@ -149,7 +166,7 @@ config KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND
Old nested HV capable Linux guests have a bug where they don't
reflect the PMU in-use status of their L2 guest to the L0 host
while the L2 PMU registers are live. This can result in loss
- of L2 PMU register state, causing perf to not work correctly in
+ of L2 PMU register state, causing perf to not work correctly in
L2 guests.
Selecting this option for the L0 host implements a workaround for
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 0cd23ce07d68..5319d889b184 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -86,6 +86,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_rm_mmu.o \
book3s_hv_ras.o \
book3s_hv_builtin.o \
+ book3s_hv_p9_perf.o \
$(kvm-book3s_64-builtin-tm-objs-y) \
$(kvm-book3s_64-builtin-xics-objs-y)
endif
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 1ae09992c9ea..bc6a381b5346 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -90,7 +90,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
unsigned long pfn;
/* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/* Get host physical address for gpa */
@@ -151,7 +151,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
cpte = kvmppc_mmu_hpte_cache_next(vcpu);
spin_lock(&kvm->mmu_lock);
- if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
+ if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {
r = -EAGAIN;
goto out_unlock;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 514fd45c1994..e9744b41a226 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -578,7 +578,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
return -EFAULT;
/* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
ret = -EFAULT;
@@ -693,7 +693,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
/* Check if we might have been invalidated; let the guest retry if so */
ret = RESUME_GUEST;
- if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
+ if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) {
unlock_rmap(rmap);
goto out_unlock;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 42851c32ff3b..5d5e12f3bf86 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -22,6 +22,7 @@
#include <asm/ultravisor.h>
#include <asm/kvm_book3s_uvmem.h>
#include <asm/plpar_wrappers.h>
+#include <asm/firmware.h>
/*
* Supported radix tree geometry.
@@ -639,7 +640,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
/* Check if we might have been invalidated; let the guest retry if so */
spin_lock(&kvm->mmu_lock);
ret = -EAGAIN;
- if (mmu_notifier_retry(kvm, mmu_seq))
+ if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;
/* Now traverse again under the lock and change the tree */
@@ -829,7 +830,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
bool large_enable;
/* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/*
@@ -1190,7 +1191,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
* Increase the mmu notifier sequence number to prevent any page
* fault that read the memslot earlier from writing a PTE.
*/
- kvm->mmu_notifier_seq++;
+ kvm->mmu_invalidate_seq++;
spin_unlock(&kvm->mmu_lock);
}
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index d6589c4fe889..40864373ef87 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -307,7 +307,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
return ret;
ret = -ENOMEM;
- stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL);
+ stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL | __GFP_NOWARN);
if (!stt)
goto fail_acct;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 631062cde6b4..57d0835e56fd 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2660,11 +2660,21 @@ static struct debugfs_timings_element {
const char *name;
size_t offset;
} timings[] = {
+#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
+ {"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)},
+ {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)},
+ {"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)},
+ {"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)},
+ {"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)},
+ {"hypercall", offsetof(struct kvm_vcpu, arch.hcall)},
+ {"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)},
+#else
{"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
{"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
{"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
{"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
{"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
+#endif
};
#define N_TIMINGS (ARRAY_SIZE(timings))
@@ -2783,8 +2793,9 @@ static const struct file_operations debugfs_timings_ops = {
/* Create a debugfs directory for the vcpu */
static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
{
- debugfs_create_file("timings", 0444, debugfs_dentry, vcpu,
- &debugfs_timings_ops);
+ if (cpu_has_feature(CPU_FTR_ARCH_300) == IS_ENABLED(CONFIG_KVM_BOOK3S_HV_P9_TIMING))
+ debugfs_create_file("timings", 0444, debugfs_dentry, vcpu,
+ &debugfs_timings_ops);
return 0;
}
@@ -4005,8 +4016,10 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
switch_pmu_to_guest(vcpu, &host_os_sprs);
+ accumulate_time(vcpu, &vcpu->arch.in_guest);
trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
__pa(&vcpu->arch.regs));
+ accumulate_time(vcpu, &vcpu->arch.guest_exit);
kvmhv_restore_hv_return_state(vcpu, &hvregs);
switch_pmu_to_host(vcpu, &host_os_sprs);
vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
@@ -4694,6 +4707,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
struct kvm *kvm;
unsigned long msr;
+ start_timing(vcpu, &vcpu->arch.vcpu_entry);
+
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
@@ -4759,6 +4774,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do {
+ accumulate_time(vcpu, &vcpu->arch.guest_entry);
if (cpu_has_feature(CPU_FTR_ARCH_300))
r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr);
@@ -4766,6 +4782,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
r = kvmppc_run_vcpu(vcpu);
if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
+ accumulate_time(vcpu, &vcpu->arch.hcall);
+
if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
/*
* These should have been caught reflected
@@ -4781,6 +4799,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
trace_kvm_hcall_exit(vcpu, r);
kvmppc_core_prepare_to_enter(vcpu);
} else if (r == RESUME_PAGE_FAULT) {
+ accumulate_time(vcpu, &vcpu->arch.pg_fault);
srcu_idx = srcu_read_lock(&kvm->srcu);
r = kvmppc_book3s_hv_page_fault(vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
@@ -4792,12 +4811,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
r = kvmppc_xics_rm_complete(vcpu, 0);
}
} while (is_kvmppc_resume_guest(r));
+ accumulate_time(vcpu, &vcpu->arch.vcpu_exit);
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&kvm->arch.vcpus_running);
srr_regs_clobbered();
+ end_timing(vcpu);
+
return r;
}
@@ -5643,7 +5665,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
else
kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
- /* invalidate the entry (what do do on error from the above ?) */
+ /* invalidate the entry (what to do on error from the above ?) */
pimap->mapped[i].r_hwirq = 0;
/*
diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h
index 6b7f07d9026b..2f2e59d7d433 100644
--- a/arch/powerpc/kvm/book3s_hv.h
+++ b/arch/powerpc/kvm/book3s_hv.h
@@ -40,3 +40,13 @@ void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
struct p9_host_os_sprs *host_os_sprs);
void switch_pmu_to_host(struct kvm_vcpu *vcpu,
struct p9_host_os_sprs *host_os_sprs);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
+void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
+#define start_timing(vcpu, next) accumulate_time(vcpu, next)
+#define end_timing(vcpu) accumulate_time(vcpu, NULL)
+#else
+#define accumulate_time(vcpu, next) do {} while (0)
+#define start_timing(vcpu, next) do {} while (0)
+#define end_timing(vcpu) do {} while (0)
+#endif
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 88a8f6473c4e..da85f046377a 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -19,7 +19,7 @@
#include <asm/interrupt.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
-#include <asm/archrandom.h>
+#include <asm/machdep.h>
#include <asm/xics.h>
#include <asm/xive.h>
#include <asm/dbell.h>
@@ -176,13 +176,14 @@ EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
int kvmppc_hwrng_present(void)
{
- return powernv_hwrng_present();
+ return ppc_md.get_random_seed != NULL;
}
EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
{
- if (powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]))
+ if (ppc_md.get_random_seed &&
+ ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4]))
return H_SUCCESS;
return H_HARDWARE;
@@ -489,24 +490,6 @@ static long kvmppc_read_one_intr(bool *again)
return kvmppc_check_passthru(xisr, xirr, again);
}
-void kvmppc_bad_interrupt(struct pt_regs *regs)
-{
- /*
- * 100 could happen at any time, 200 can happen due to invalid real
- * address access for example (or any time due to a hardware problem).
- */
- if (TRAP(regs) == 0x100) {
- get_paca()->in_nmi++;
- system_reset_exception(regs);
- get_paca()->in_nmi--;
- } else if (TRAP(regs) == 0x200) {
- machine_check_exception(regs);
- } else {
- die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
- }
- panic("Bad KVM trap");
-}
-
static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.ceded = 0;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 0644732d1a25..5a64a1341e6f 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -20,6 +20,7 @@
#include <asm/pte-walk.h>
#include <asm/reg.h>
#include <asm/plpar_wrappers.h>
+#include <asm/firmware.h>
static struct patb_entry *pseries_partition_tb;
@@ -1579,7 +1580,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
/* 2. Find the host pte for this L1 guest real address */
/* Used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/* See if can find translation in our partition scoped tables for L1 */
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 112a09b33328..34f1db212824 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -3,231 +3,10 @@
#include <linux/kvm_host.h>
#include <asm/asm-prototypes.h>
#include <asm/dbell.h>
-#include <asm/kvm_ppc.h>
-#include <asm/pmc.h>
#include <asm/ppc-opcode.h>
#include "book3s_hv.h"
-static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
-{
- if (!(mmcr0 & MMCR0_FC))
- goto do_freeze;
- if (mmcra & MMCRA_SAMPLE_ENABLE)
- goto do_freeze;
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- if (!(mmcr0 & MMCR0_PMCCEXT))
- goto do_freeze;
- if (!(mmcra & MMCRA_BHRB_DISABLE))
- goto do_freeze;
- }
- return;
-
-do_freeze:
- mmcr0 = MMCR0_FC;
- mmcra = 0;
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- mmcr0 |= MMCR0_PMCCEXT;
- mmcra = MMCRA_BHRB_DISABLE;
- }
-
- mtspr(SPRN_MMCR0, mmcr0);
- mtspr(SPRN_MMCRA, mmcra);
- isync();
-}
-
-void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
- struct p9_host_os_sprs *host_os_sprs)
-{
- struct lppaca *lp;
- int load_pmu = 1;
-
- lp = vcpu->arch.vpa.pinned_addr;
- if (lp)
- load_pmu = lp->pmcregs_in_use;
-
- /* Save host */
- if (ppc_get_pmu_inuse()) {
- /*
- * It might be better to put PMU handling (at least for the
- * host) in the perf subsystem because it knows more about what
- * is being used.
- */
-
- /* POWER9, POWER10 do not implement HPMC or SPMC */
-
- host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
- host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
-
- freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
-
- host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
- host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
- host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
- host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
- host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
- host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
- host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
- host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
- host_os_sprs->sdar = mfspr(SPRN_SDAR);
- host_os_sprs->siar = mfspr(SPRN_SIAR);
- host_os_sprs->sier1 = mfspr(SPRN_SIER);
-
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
- host_os_sprs->sier2 = mfspr(SPRN_SIER2);
- host_os_sprs->sier3 = mfspr(SPRN_SIER3);
- }
- }
-
-#ifdef CONFIG_PPC_PSERIES
- /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
- if (kvmhv_on_pseries()) {
- barrier();
- get_lppaca()->pmcregs_in_use = load_pmu;
- barrier();
- }
-#endif
-
- /*
- * Load guest. If the VPA said the PMCs are not in use but the guest
- * tried to access them anyway, HFSCR[PM] will be set by the HFAC
- * fault so we can make forward progress.
- */
- if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
- mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
- mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
- mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
- mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
- mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
- mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
- mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
- mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
- mtspr(SPRN_SDAR, vcpu->arch.sdar);
- mtspr(SPRN_SIAR, vcpu->arch.siar);
- mtspr(SPRN_SIER, vcpu->arch.sier[0]);
-
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
- mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
- mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
- }
-
- /* Set MMCRA then MMCR0 last */
- mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
- mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
- /* No isync necessary because we're starting counters */
-
- if (!vcpu->arch.nested &&
- (vcpu->arch.hfscr_permitted & HFSCR_PM))
- vcpu->arch.hfscr |= HFSCR_PM;
- }
-}
-EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
-
-void switch_pmu_to_host(struct kvm_vcpu *vcpu,
- struct p9_host_os_sprs *host_os_sprs)
-{
- struct lppaca *lp;
- int save_pmu = 1;
-
- lp = vcpu->arch.vpa.pinned_addr;
- if (lp)
- save_pmu = lp->pmcregs_in_use;
- if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
- /*
- * Save pmu if this guest is capable of running nested guests.
- * This is option is for old L1s that do not set their
- * lppaca->pmcregs_in_use properly when entering their L2.
- */
- save_pmu |= nesting_enabled(vcpu->kvm);
- }
-
- if (save_pmu) {
- vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
- vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
-
- freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
-
- vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
- vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
- vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
- vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
- vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
- vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
- vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
- vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
- vcpu->arch.sdar = mfspr(SPRN_SDAR);
- vcpu->arch.siar = mfspr(SPRN_SIAR);
- vcpu->arch.sier[0] = mfspr(SPRN_SIER);
-
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
- vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
- vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
- }
-
- } else if (vcpu->arch.hfscr & HFSCR_PM) {
- /*
- * The guest accessed PMC SPRs without specifying they should
- * be preserved, or it cleared pmcregs_in_use after the last
- * access. Just ensure they are frozen.
- */
- freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
-
- /*
- * Demand-fault PMU register access in the guest.
- *
- * This is used to grab the guest's VPA pmcregs_in_use value
- * and reflect it into the host's VPA in the case of a nested
- * hypervisor.
- *
- * It also avoids having to zero-out SPRs after each guest
- * exit to avoid side-channels when.
- *
- * This is cleared here when we exit the guest, so later HFSCR
- * interrupt handling can add it back to run the guest with
- * PM enabled next time.
- */
- if (!vcpu->arch.nested)
- vcpu->arch.hfscr &= ~HFSCR_PM;
- } /* otherwise the PMU should still be frozen */
-
-#ifdef CONFIG_PPC_PSERIES
- if (kvmhv_on_pseries()) {
- barrier();
- get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
- barrier();
- }
-#endif
-
- if (ppc_get_pmu_inuse()) {
- mtspr(SPRN_PMC1, host_os_sprs->pmc1);
- mtspr(SPRN_PMC2, host_os_sprs->pmc2);
- mtspr(SPRN_PMC3, host_os_sprs->pmc3);
- mtspr(SPRN_PMC4, host_os_sprs->pmc4);
- mtspr(SPRN_PMC5, host_os_sprs->pmc5);
- mtspr(SPRN_PMC6, host_os_sprs->pmc6);
- mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
- mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
- mtspr(SPRN_SDAR, host_os_sprs->sdar);
- mtspr(SPRN_SIAR, host_os_sprs->siar);
- mtspr(SPRN_SIER, host_os_sprs->sier1);
-
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
- mtspr(SPRN_SIER2, host_os_sprs->sier2);
- mtspr(SPRN_SIER3, host_os_sprs->sier3);
- }
-
- /* Set MMCRA then MMCR0 last */
- mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
- mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
- isync();
- }
-}
-EXPORT_SYMBOL_GPL(switch_pmu_to_host);
-
static void load_spr_state(struct kvm_vcpu *vcpu,
struct p9_host_os_sprs *host_os_sprs)
{
@@ -437,17 +216,8 @@ void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(restore_p9_host_os_sprs);
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
-static void __start_timing(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
-{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
- u64 tb = mftb() - vc->tb_offset_applied;
-
- vcpu->arch.cur_activity = next;
- vcpu->arch.cur_tb_start = tb;
-}
-
-static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
+#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
+void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
struct kvmhv_tb_accumulator *curr;
@@ -477,14 +247,7 @@ static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator
smp_wmb();
curr->seqcount = seq + 2;
}
-
-#define start_timing(vcpu, next) __start_timing(vcpu, next)
-#define end_timing(vcpu) __start_timing(vcpu, NULL)
-#define accumulate_time(vcpu, next) __accumulate_time(vcpu, next)
-#else
-#define start_timing(vcpu, next) do {} while (0)
-#define end_timing(vcpu) do {} while (0)
-#define accumulate_time(vcpu, next) do {} while (0)
+EXPORT_SYMBOL_GPL(accumulate_time);
#endif
static inline u64 mfslbv(unsigned int idx)
@@ -795,8 +558,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
- start_timing(vcpu, &vcpu->arch.rm_entry);
-
vcpu->arch.ceded = 0;
/* Save MSR for restore, with EE clear. */
@@ -957,13 +718,13 @@ tm_return_to_guest:
mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
- accumulate_time(vcpu, &vcpu->arch.guest_time);
-
switch_pmu_to_guest(vcpu, &host_os_sprs);
+ accumulate_time(vcpu, &vcpu->arch.in_guest);
+
kvmppc_p9_enter_guest(vcpu);
- switch_pmu_to_host(vcpu, &host_os_sprs);
- accumulate_time(vcpu, &vcpu->arch.rm_intr);
+ accumulate_time(vcpu, &vcpu->arch.guest_exit);
+ switch_pmu_to_host(vcpu, &host_os_sprs);
/* XXX: Could get these from r11/12 and paca exsave instead */
vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
@@ -1058,8 +819,6 @@ tm_return_to_guest:
#endif
}
- accumulate_time(vcpu, &vcpu->arch.rm_exit);
-
/* Advance host PURR/SPURR by the amount used by guest */
purr = mfspr(SPRN_PURR);
spurr = mfspr(SPRN_SPURR);
@@ -1166,8 +925,6 @@ tm_return_to_guest:
asm volatile(PPC_CP_ABORT);
out:
- end_timing(vcpu);
-
return trap;
}
EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);
diff --git a/arch/powerpc/kvm/book3s_hv_p9_perf.c b/arch/powerpc/kvm/book3s_hv_p9_perf.c
new file mode 100644
index 000000000000..44d24cca3df1
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_p9_perf.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/kvm_ppc.h>
+#include <asm/pmc.h>
+
+#include "book3s_hv.h"
+
+static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
+{
+ if (!(mmcr0 & MMCR0_FC))
+ goto do_freeze;
+ if (mmcra & MMCRA_SAMPLE_ENABLE)
+ goto do_freeze;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (!(mmcr0 & MMCR0_PMCCEXT))
+ goto do_freeze;
+ if (!(mmcra & MMCRA_BHRB_DISABLE))
+ goto do_freeze;
+ }
+ return;
+
+do_freeze:
+ mmcr0 = MMCR0_FC;
+ mmcra = 0;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mmcr0 |= MMCR0_PMCCEXT;
+ mmcra = MMCRA_BHRB_DISABLE;
+ }
+
+ mtspr(SPRN_MMCR0, mmcr0);
+ mtspr(SPRN_MMCRA, mmcra);
+ isync();
+}
+
+void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ struct lppaca *lp;
+ int load_pmu = 1;
+
+ lp = vcpu->arch.vpa.pinned_addr;
+ if (lp)
+ load_pmu = lp->pmcregs_in_use;
+
+ /* Save host */
+ if (ppc_get_pmu_inuse()) {
+ /* POWER9, POWER10 do not implement HPMC or SPMC */
+
+ host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
+ host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
+
+ freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
+
+ host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
+ host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
+ host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
+ host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
+ host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
+ host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
+ host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
+ host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
+ host_os_sprs->sdar = mfspr(SPRN_SDAR);
+ host_os_sprs->siar = mfspr(SPRN_SIAR);
+ host_os_sprs->sier1 = mfspr(SPRN_SIER);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
+ host_os_sprs->sier2 = mfspr(SPRN_SIER2);
+ host_os_sprs->sier3 = mfspr(SPRN_SIER3);
+ }
+ }
+
+#ifdef CONFIG_PPC_PSERIES
+ /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
+ if (kvmhv_on_pseries()) {
+ barrier();
+ get_lppaca()->pmcregs_in_use = load_pmu;
+ barrier();
+ }
+#endif
+
+ /*
+ * Load guest. If the VPA said the PMCs are not in use but the guest
+ * tried to access them anyway, HFSCR[PM] will be set by the HFAC
+ * fault so we can make forward progress.
+ */
+ if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
+ mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
+ mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
+ mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
+ mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
+ mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
+ mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
+ mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
+ mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
+ mtspr(SPRN_SDAR, vcpu->arch.sdar);
+ mtspr(SPRN_SIAR, vcpu->arch.siar);
+ mtspr(SPRN_SIER, vcpu->arch.sier[0]);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
+ mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
+ mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
+ }
+
+ /* Set MMCRA then MMCR0 last */
+ mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
+ mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
+ /* No isync necessary because we're starting counters */
+
+ if (!vcpu->arch.nested &&
+ (vcpu->arch.hfscr_permitted & HFSCR_PM))
+ vcpu->arch.hfscr |= HFSCR_PM;
+ }
+}
+EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
+
+void switch_pmu_to_host(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ struct lppaca *lp;
+ int save_pmu = 1;
+
+ lp = vcpu->arch.vpa.pinned_addr;
+ if (lp)
+ save_pmu = lp->pmcregs_in_use;
+ if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
+ /*
+ * Save pmu if this guest is capable of running nested guests.
+ * This is option is for old L1s that do not set their
+ * lppaca->pmcregs_in_use properly when entering their L2.
+ */
+ save_pmu |= nesting_enabled(vcpu->kvm);
+ }
+
+ if (save_pmu) {
+ vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
+ vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
+
+ freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
+
+ vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
+ vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
+ vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
+ vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
+ vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
+ vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
+ vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
+ vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
+ vcpu->arch.sdar = mfspr(SPRN_SDAR);
+ vcpu->arch.siar = mfspr(SPRN_SIAR);
+ vcpu->arch.sier[0] = mfspr(SPRN_SIER);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
+ vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
+ vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
+ }
+
+ } else if (vcpu->arch.hfscr & HFSCR_PM) {
+ /*
+ * The guest accessed PMC SPRs without specifying they should
+ * be preserved, or it cleared pmcregs_in_use after the last
+ * access. Just ensure they are frozen.
+ */
+ freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
+
+ /*
+ * Demand-fault PMU register access in the guest.
+ *
+ * This is used to grab the guest's VPA pmcregs_in_use value
+ * and reflect it into the host's VPA in the case of a nested
+ * hypervisor.
+ *
+ * It also avoids having to zero-out SPRs after each guest
+ * exit to avoid side-channels when.
+ *
+ * This is cleared here when we exit the guest, so later HFSCR
+ * interrupt handling can add it back to run the guest with
+ * PM enabled next time.
+ */
+ if (!vcpu->arch.nested)
+ vcpu->arch.hfscr &= ~HFSCR_PM;
+ } /* otherwise the PMU should still be frozen */
+
+#ifdef CONFIG_PPC_PSERIES
+ if (kvmhv_on_pseries()) {
+ barrier();
+ get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
+ barrier();
+ }
+#endif
+
+ if (ppc_get_pmu_inuse()) {
+ mtspr(SPRN_PMC1, host_os_sprs->pmc1);
+ mtspr(SPRN_PMC2, host_os_sprs->pmc2);
+ mtspr(SPRN_PMC3, host_os_sprs->pmc3);
+ mtspr(SPRN_PMC4, host_os_sprs->pmc4);
+ mtspr(SPRN_PMC5, host_os_sprs->pmc5);
+ mtspr(SPRN_PMC6, host_os_sprs->pmc6);
+ mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
+ mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
+ mtspr(SPRN_SDAR, host_os_sprs->sdar);
+ mtspr(SPRN_SIAR, host_os_sprs->siar);
+ mtspr(SPRN_SIER, host_os_sprs->sier1);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
+ mtspr(SPRN_SIER2, host_os_sprs->sier2);
+ mtspr(SPRN_SIER3, host_os_sprs->sier3);
+ }
+
+ /* Set MMCRA then MMCR0 last */
+ mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
+ mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
+ isync();
+ }
+}
+EXPORT_SYMBOL_GPL(switch_pmu_to_host);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 2257fb18cb72..5a05953ae13f 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
g_ptel = ptel;
/* used later to detect if we might have been invalidated */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/* Find the memslot (if any) for this address */
@@ -366,7 +366,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
rmap = real_vmalloc_addr(rmap);
lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */
- if (mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_invalidate_retry(kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
@@ -932,7 +932,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
int i;
/* Used later to detect if we might have been invalidated */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
@@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
long ret = H_SUCCESS;
/* Used later to detect if we might have been invalidated */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 0fc0e68d20d0..7ded202bf995 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -237,14 +237,14 @@ kvm_novcpu_wakeup:
cmpdi r4, 0
beq kvmppc_primary_no_guest
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r4, VCPU_TB_RMENTRY
bl kvmhv_start_timing
#endif
b kvmppc_got_guest
kvm_novcpu_exit:
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
ld r4, HSTATE_KVM_VCPU(r13)
cmpdi r4, 0
beq 13f
@@ -523,7 +523,7 @@ kvmppc_hv_entry:
li r6, KVM_GUEST_MODE_HOST_HV
stb r6, HSTATE_IN_GUEST(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
/* Store initial timestamp */
cmpdi r4, 0
beq 1f
@@ -894,7 +894,7 @@ fast_guest_return:
li r9, KVM_GUEST_MODE_GUEST_HV
stb r9, HSTATE_IN_GUEST(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
/* Accumulate timing */
addi r3, r4, VCPU_TB_GUEST
bl kvmhv_accumulate_time
@@ -945,7 +945,7 @@ secondary_too_late:
cmpdi r4, 0
beq 11f
stw r12, VCPU_TRAP(r4)
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r4, VCPU_TB_RMEXIT
bl kvmhv_accumulate_time
#endif
@@ -959,7 +959,7 @@ hdec_soon:
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
12: stw r12, VCPU_TRAP(r4)
mr r9, r4
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r4, VCPU_TB_RMEXIT
bl kvmhv_accumulate_time
#endif
@@ -1056,7 +1056,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
li r0, MSR_RI
mtmsrd r0, 1
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r9, VCPU_TB_RMINTR
mr r4, r9
bl kvmhv_accumulate_time
@@ -1135,7 +1135,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r9, VCPU_TB_RMEXIT
mr r4, r9
bl kvmhv_accumulate_time
@@ -1495,7 +1495,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_LPCR,r8
isync
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
/* Finish timing, if we have a vcpu */
ld r4, HSTATE_KVM_VCPU(r13)
cmpdi r4, 0
@@ -2153,7 +2153,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
ld r4, HSTATE_KVM_VCPU(r13)
std r3, VCPU_DEC_EXPIRES(r4)
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
ld r4, HSTATE_KVM_VCPU(r13)
addi r3, r4, VCPU_TB_CEDE
bl kvmhv_accumulate_time
@@ -2221,7 +2221,7 @@ kvm_end_cede:
/* get vcpu pointer */
ld r4, HSTATE_KVM_VCPU(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r4, VCPU_TB_RMINTR
bl kvmhv_accumulate_time
#endif
@@ -2961,7 +2961,7 @@ kvmppc_fix_pmao:
isync
blr
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
/*
* Start timing an activity
* r3 = pointer to time accumulation struct, r4 = vcpu
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index 8e4c79e2fcd8..08fb0843faf5 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -143,6 +143,7 @@ static inline struct kvmppc_ics *kvmppc_xics_find_ics(struct kvmppc_xics *xics,
}
extern unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu);
+extern unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu);
extern int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
extern int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 7f16afc331ef..05668e964140 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -339,7 +339,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
unsigned long flags;
/* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/*
@@ -460,7 +460,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
spin_lock(&kvm->mmu_lock);
- if (mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_invalidate_retry(kvm, mmu_seq)) {
ret = -EAGAIN;
goto out;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 191992fcb2c2..fb1490761c87 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -33,6 +33,7 @@
#include <asm/plpar_wrappers.h>
#endif
#include <asm/ultravisor.h>
+#include <asm/setup.h>
#include "timing.h"
#include "irq.h"
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index 32e2cb5811cc..8d57c8428531 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -94,6 +94,7 @@
{H_GET_HCA_INFO, "H_GET_HCA_INFO"}, \
{H_GET_PERF_COUNT, "H_GET_PERF_COUNT"}, \
{H_MANAGE_TRACE, "H_MANAGE_TRACE"}, \
+ {H_GET_CPU_CHARACTERISTICS, "H_GET_CPU_CHARACTERISTICS"}, \
{H_FREE_LOGICAL_LAN_BUFFER, "H_FREE_LOGICAL_LAN_BUFFER"}, \
{H_QUERY_INT_STATE, "H_QUERY_INT_STATE"}, \
{H_POLL_PENDING, "H_POLL_PENDING"}, \
@@ -125,7 +126,25 @@
{H_COP, "H_COP"}, \
{H_GET_MPP_X, "H_GET_MPP_X"}, \
{H_SET_MODE, "H_SET_MODE"}, \
- {H_RTAS, "H_RTAS"}
+ {H_REGISTER_PROC_TBL, "H_REGISTER_PROC_TBL"}, \
+ {H_QUERY_VAS_CAPABILITIES, "H_QUERY_VAS_CAPABILITIES"}, \
+ {H_INT_GET_SOURCE_INFO, "H_INT_GET_SOURCE_INFO"}, \
+ {H_INT_SET_SOURCE_CONFIG, "H_INT_SET_SOURCE_CONFIG"}, \
+ {H_INT_GET_QUEUE_INFO, "H_INT_GET_QUEUE_INFO"}, \
+ {H_INT_SET_QUEUE_CONFIG, "H_INT_SET_QUEUE_CONFIG"}, \
+ {H_INT_ESB, "H_INT_ESB"}, \
+ {H_INT_RESET, "H_INT_RESET"}, \
+ {H_RPT_INVALIDATE, "H_RPT_INVALIDATE"}, \
+ {H_RTAS, "H_RTAS"}, \
+ {H_LOGICAL_MEMOP, "H_LOGICAL_MEMOP"}, \
+ {H_CAS, "H_CAS"}, \
+ {H_UPDATE_DT, "H_UPDATE_DT"}, \
+ {H_GET_PERF_COUNTER_INFO, "H_GET_PERF_COUNTER_INFO"}, \
+ {H_SET_PARTITION_TABLE, "H_SET_PARTITION_TABLE"}, \
+ {H_ENTER_NESTED, "H_ENTER_NESTED"}, \
+ {H_TLB_INVALIDATE, "H_TLB_INVALIDATE"}, \
+ {H_COPY_TOFROM_GUEST, "H_COPY_TOFROM_GUEST"}
+
#define kvm_trace_symbol_kvmret \
{RESUME_GUEST, "RESUME_GUEST"}, \
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
index 4f141daafcff..23c7805fb7b3 100644
--- a/arch/powerpc/lib/test_emulate_step.c
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -53,9 +53,6 @@
ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
PPC_RAW_ADDI(t, a, i))
-#define TEST_SETB(t, bfa) ppc_inst(PPC_INST_SETB | ___PPC_RT(t) | ___PPC_RA((bfa & 0x7) << 2))
-
-
static void __init init_pt_regs(struct pt_regs *regs)
{
static unsigned long msr;
@@ -935,21 +932,21 @@ static struct compute_test compute_tests[] = {
.subtests = {
{
.descr = "BFA = 1, CR = GT",
- .instr = TEST_SETB(20, 1),
+ .instr = ppc_inst(PPC_RAW_SETB(20, 1)),
.regs = {
.ccr = 0x4000000,
}
},
{
.descr = "BFA = 4, CR = LT",
- .instr = TEST_SETB(20, 4),
+ .instr = ppc_inst(PPC_RAW_SETB(20, 4)),
.regs = {
.ccr = 0x8000,
}
},
{
.descr = "BFA = 5, CR = EQ",
- .instr = TEST_SETB(20, 5),
+ .instr = ppc_inst(PPC_RAW_SETB(20, 5)),
.regs = {
.ccr = 0x200,
}
@@ -1616,11 +1613,11 @@ static int __init emulate_compute_instr(struct pt_regs *regs,
if (analysed != 1 || GETTYPE(op.type) != COMPUTE) {
if (negative)
return -EFAULT;
- pr_info("emulation failed, instruction = %s\n", ppc_inst_as_str(instr));
+ pr_info("emulation failed, instruction = %08lx\n", ppc_inst_as_ulong(instr));
return -EFAULT;
}
if (analysed == 1 && negative)
- pr_info("negative test failed, instruction = %s\n", ppc_inst_as_str(instr));
+ pr_info("negative test failed, instruction = %08lx\n", ppc_inst_as_ulong(instr));
if (!negative)
emulate_update_regs(regs, &op);
return 0;
@@ -1637,7 +1634,7 @@ static int __init execute_compute_instr(struct pt_regs *regs,
/* Patch the NOP with the actual instruction */
patch_instruction_site(&patch__exec_instr, instr);
if (exec_instr(regs)) {
- pr_info("execution failed, instruction = %s\n", ppc_inst_as_str(instr));
+ pr_info("execution failed, instruction = %08lx\n", ppc_inst_as_ulong(instr));
return -EFAULT;
}
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 49a737fbbd18..a96b73006dfb 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -159,9 +159,12 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
unsigned long done;
unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
+ unsigned long size;
+ size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET);
+ setibat(0, PAGE_OFFSET, 0, size, PAGE_KERNEL_X);
- if (debug_pagealloc_enabled_or_kfence() || __map_without_bats) {
+ if (debug_pagealloc_enabled_or_kfence()) {
pr_debug_once("Read-Write memory mapped without BATs\n");
if (base >= border)
return base;
@@ -245,10 +248,9 @@ void mmu_mark_rodata_ro(void)
}
/*
- * Set up one of the I/D BAT (block address translation) register pairs.
+ * Set up one of the D BAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power
* of 2 between 128k and 256M.
- * On 603+, only set IBAT when _PAGE_EXEC is set
*/
void __init setbat(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, pgprot_t prot)
@@ -284,10 +286,6 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
/* G bit must be zero in IBATs */
flags &= ~_PAGE_EXEC;
}
- if (flags & _PAGE_EXEC)
- bat[0] = bat[1];
- else
- bat[0].batu = bat[0].batl = 0;
bat_addrs[index].start = virt;
bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index 2e0cad5817ba..ae008b9df0e6 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -13,6 +13,7 @@
#include <asm/sections.h>
#include <asm/mmu.h>
#include <asm/tlb.h>
+#include <asm/firmware.h>
#include <mm/mmu_decl.h>
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index fc92613dc2bf..363a9447d63d 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -408,7 +408,7 @@ repeat:
ssize);
if (ret == -1) {
/*
- * Try to to keep bolted entries in primary.
+ * Try to keep bolted entries in primary.
* Remove non bolted entries and try insert again
*/
ret = mmu_hash_ops.hpte_remove(hpteg);
diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index 753e62ba67af..1d2675ab6711 100644
--- a/arch/powerpc/mm/book3s64/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -10,6 +10,7 @@
#include <asm/mmu.h>
#include <asm/setup.h>
#include <asm/smp.h>
+#include <asm/firmware.h>
#include <linux/pkeys.h>
#include <linux/of_fdt.h>
diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
index d2fb776febb4..5e3195568525 100644
--- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
@@ -48,11 +48,13 @@ void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
/*
- * To avoid NMMU hang while relaxing access we need to flush the tlb before
- * we set the new value.
+ * POWER9 NMMU must flush the TLB after clearing the PTE before
+ * installing a PTE with more relaxed access permissions, see
+ * radix__ptep_set_access_flags.
*/
- if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
- (atomic_read(&mm->context.copros) > 0))
+ if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
+ is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
+ atomic_read(&mm->context.copros) > 0)
radix__flush_hugetlb_page(vma, addr);
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index db2f3d193448..698274109c91 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1018,16 +1018,21 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
unsigned long change = pte_val(entry) ^ pte_val(*ptep);
/*
- * To avoid NMMU hang while relaxing access, we need mark
- * the pte invalid in between.
+ * On POWER9, the NMMU is not able to relax PTE access permissions
+ * for a translation with a TLB. The PTE must be invalidated, TLB
+ * flushed before the new PTE is installed.
+ *
+ * This only needs to be done for radix, because hash translation does
+ * flush when updating the linux pte (and we don't support NMMU
+ * accelerators on HPT on POWER9 anyway XXX: do we?).
+ *
+ * POWER10 (and P9P) NMMU does behave as per ISA.
*/
- if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
+ if (!cpu_has_feature(CPU_FTR_ARCH_31) && (change & _PAGE_RW) &&
+ atomic_read(&mm->context.copros) > 0) {
unsigned long old_pte, new_pte;
old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
- /*
- * new value of pte
- */
new_pte = old_pte | set;
radix__flush_tlb_page_psize(mm, address, psize);
__radix_pte_update(ptep, _PAGE_INVALID, new_pte);
@@ -1035,9 +1040,12 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
__radix_pte_update(ptep, 0, set);
/*
* Book3S does not require a TLB flush when relaxing access
- * restrictions when the address space is not attached to a
- * NMMU, because the core MMU will reload the pte after taking
- * an access fault, which is defined by the architecture.
+ * restrictions when the address space (modulo the POWER9 nest
+ * MMU issue above) because the MMU will reload the PTE after
+ * taking an access fault, as defined by the architecture. See
+ * "Setting a Reference or Change Bit or Upgrading Access
+ * Authority (PTE Subject to Atomic Hardware Updates)" in
+ * Power ISA Version 3.1B.
*/
}
/* See ptesync comment in radix__set_pte_at */
@@ -1050,11 +1058,12 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
/*
- * To avoid NMMU hang while relaxing access we need to flush the tlb before
- * we set the new value. We need to do this only for radix, because hash
- * translation does flush when updating the linux pte.
+ * POWER9 NMMU must flush the TLB after clearing the PTE before
+ * installing a PTE with more relaxed access permissions, see
+ * radix__ptep_set_access_flags.
*/
- if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
+ if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
+ is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
(atomic_read(&mm->context.copros) > 0))
radix__flush_tlb_page(vma, addr);
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index dda51fef2d2e..4e29b619578c 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -755,10 +755,18 @@ EXPORT_SYMBOL(radix__local_flush_tlb_page);
static bool mm_needs_flush_escalation(struct mm_struct *mm)
{
/*
- * P9 nest MMU has issues with the page walk cache
- * caching PTEs and not flushing them properly when
- * RIC = 0 for a PID/LPID invalidate
+ * The P9 nest MMU has issues with the page walk cache caching PTEs
+ * and not flushing them when RIC = 0 for a PID/LPID invalidate.
+ *
+ * This may have been fixed in shipping firmware (by disabling PWC
+ * or preventing it from caching PTEs), but until that is confirmed,
+ * this workaround is required - escalate all RIC=0 IS=1/2/3 flushes
+ * to RIC=2.
+ *
+ * POWER10 (and P9P) does not have this problem.
*/
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ return false;
if (atomic_read(&mm->context.copros) > 0)
return true;
return false;
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index c1cb21a00884..7c507fb48182 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -65,6 +65,11 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
ret = 0;
*flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL);
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (*flt & VM_FAULT_COMPLETED)
+ return 0;
+
if (unlikely(*flt & VM_FAULT_ERROR)) {
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d53fed4eccbd..014005428687 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -511,6 +511,10 @@ retry:
if (fault_signal_pending(fault, regs))
return user_mode(regs) ? 0 : SIGBUS;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ goto out;
+
/*
* Handle the retry right now, the mmap_lock has been released in that
* case.
@@ -525,6 +529,7 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR))
return mm_fault_error(regs, address, fault);
+out:
/*
* Major/minor page fault accounting.
*/
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index b282af39fcf6..bc84a594ca62 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -24,6 +24,7 @@
#include <asm/setup.h>
#include <asm/hugetlb.h>
#include <asm/pte-walk.h>
+#include <asm/firmware.h>
bool hugetlb_disabled = false;
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 693a3a7a9463..62d9af6606cd 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -69,44 +69,10 @@ EXPORT_SYMBOL(agp_special_page);
void MMU_init(void);
-/*
- * this tells the system to map all of ram with the segregs
- * (i.e. page tables) instead of the bats.
- * -- Cort
- */
-int __map_without_bats;
-int __map_without_ltlbs;
-
/* max amount of low RAM to map in */
unsigned long __max_low_memory = MAX_LOW_MEM;
/*
- * Check for command-line options that affect what MMU_init will do.
- */
-static void __init MMU_setup(void)
-{
- /* Check for nobats option (used in mapin_ram). */
- if (strstr(boot_command_line, "nobats")) {
- __map_without_bats = 1;
- }
-
- if (strstr(boot_command_line, "noltlbs")) {
- __map_without_ltlbs = 1;
- }
- if (IS_ENABLED(CONFIG_PPC_8xx))
- return;
-
- if (IS_ENABLED(CONFIG_KFENCE))
- __map_without_ltlbs = 1;
-
- if (debug_pagealloc_enabled())
- __map_without_ltlbs = 1;
-
- if (strict_kernel_rwx_enabled())
- __map_without_ltlbs = 1;
-}
-
-/*
* MMU_init sets up the basic memory mappings for the kernel,
* including both RAM and possibly some I/O regions,
* and sets up the page tables and the MMU hardware ready to go.
@@ -116,9 +82,6 @@ void __init MMU_init(void)
if (ppc_md.progress)
ppc_md.progress("MMU:enter", 0x111);
- /* parse args from command line */
- MMU_setup();
-
/*
* Reserve gigantic pages for hugetlb. This MUST occur before
* lowmem_end_addr is initialized below.
diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
index 4999aadb1867..699eeffd9f55 100644
--- a/arch/powerpc/mm/kasan/Makefile
+++ b/arch/powerpc/mm/kasan/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PPC32) += init_32.o
obj-$(CONFIG_PPC_8xx) += 8xx.o
obj-$(CONFIG_PPC_BOOK3S_32) += book3s_32.o
obj-$(CONFIG_PPC_BOOK3S_64) += init_book3s_64.o
+obj-$(CONFIG_PPC_BOOK3E_64) += init_book3e_64.o
diff --git a/arch/powerpc/mm/kasan/init_32.c b/arch/powerpc/mm/kasan/init_32.c
index f3e4d069e0ba..a70828a6d935 100644
--- a/arch/powerpc/mm/kasan/init_32.c
+++ b/arch/powerpc/mm/kasan/init_32.c
@@ -25,7 +25,7 @@ static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
int i;
for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
- __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
+ __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 1);
}
int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
diff --git a/arch/powerpc/mm/kasan/init_book3e_64.c b/arch/powerpc/mm/kasan/init_book3e_64.c
new file mode 100644
index 000000000000..11519e88dc6b
--- /dev/null
+++ b/arch/powerpc/mm/kasan/init_book3e_64.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KASAN for 64-bit Book3e powerpc
+ *
+ * Copyright 2022, Christophe Leroy, CS GROUP France
+ */
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/printk.h>
+#include <linux/memblock.h>
+#include <linux/set_memory.h>
+
+#include <asm/pgalloc.h>
+
+static inline bool kasan_pud_table(p4d_t p4d)
+{
+ return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
+}
+
+static inline bool kasan_pmd_table(pud_t pud)
+{
+ return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
+}
+
+static inline bool kasan_pte_table(pmd_t pmd)
+{
+ return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
+}
+
+static int __init kasan_map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
+{
+ pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = pgd_offset_k(ea);
+ p4dp = p4d_offset(pgdp, ea);
+ if (kasan_pud_table(*p4dp)) {
+ pudp = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
+ memcpy(pudp, kasan_early_shadow_pud, PUD_TABLE_SIZE);
+ p4d_populate(&init_mm, p4dp, pudp);
+ }
+ pudp = pud_offset(p4dp, ea);
+ if (kasan_pmd_table(*pudp)) {
+ pmdp = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
+ memcpy(pmdp, kasan_early_shadow_pmd, PMD_TABLE_SIZE);
+ pud_populate(&init_mm, pudp, pmdp);
+ }
+ pmdp = pmd_offset(pudp, ea);
+ if (kasan_pte_table(*pmdp)) {
+ ptep = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
+ memcpy(ptep, kasan_early_shadow_pte, PTE_TABLE_SIZE);
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
+ }
+ ptep = pte_offset_kernel(pmdp, ea);
+
+ __set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
+
+ return 0;
+}
+
+static void __init kasan_init_phys_region(void *start, void *end)
+{
+ unsigned long k_start, k_end, k_cur;
+ void *va;
+
+ if (start >= end)
+ return;
+
+ k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
+ k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
+
+ va = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
+ kasan_map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
+}
+
+void __init kasan_early_init(void)
+{
+ int i;
+ unsigned long addr;
+ pgd_t *pgd = pgd_offset_k(KASAN_SHADOW_START);
+ pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL);
+
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
+ &kasan_early_shadow_pte[i], zero_pte, 0);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i],
+ kasan_early_shadow_pte);
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ pud_populate(&init_mm, &kasan_early_shadow_pud[i],
+ kasan_early_shadow_pmd);
+
+ for (addr = KASAN_SHADOW_START; addr != KASAN_SHADOW_END; addr += PGDIR_SIZE)
+ p4d_populate(&init_mm, p4d_offset(pgd++, addr), kasan_early_shadow_pud);
+}
+
+void __init kasan_init(void)
+{
+ phys_addr_t start, end;
+ u64 i;
+ pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO);
+
+ for_each_mem_range(i, &start, &end)
+ kasan_init_phys_region((void *)start, (void *)end);
+
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ kasan_remove_zero_shadow((void *)VMALLOC_START, VMALLOC_SIZE);
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
+ &kasan_early_shadow_pte[i], zero_pte, 0);
+
+ flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+
+ /* Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KASAN init done\n");
+}
+
+void __init kasan_late_init(void) { }
diff --git a/arch/powerpc/mm/kasan/init_book3s_64.c b/arch/powerpc/mm/kasan/init_book3s_64.c
index 0da5566d6b84..9300d641cf9a 100644
--- a/arch/powerpc/mm/kasan/init_book3s_64.c
+++ b/arch/powerpc/mm/kasan/init_book3s_64.c
@@ -99,4 +99,6 @@ void __init kasan_init(void)
pr_info("KASAN init done\n");
}
+void __init kasan_early_init(void) { }
+
void __init kasan_late_init(void) { }
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index a97128a48817..01772e79fd93 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -25,6 +25,7 @@
#include <asm/mmzone.h>
#include <asm/ftrace.h>
#include <asm/code-patching.h>
+#include <asm/setup.h>
#include <mm/mmu_decl.h>
@@ -54,6 +55,7 @@ int memory_add_physaddr_to_nid(u64 start)
{
return hot_add_scn_to_nid(start);
}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
int __weak create_section_mapping(unsigned long start, unsigned long end,
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 63c4b1a4d435..229c72e49198 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -92,7 +92,6 @@ extern void mapin_ram(void);
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, pgprot_t prot);
-extern int __map_without_bats;
extern unsigned int rtas_data, rtas_size;
struct hash_pte;
diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c
index b32e465a3d52..3684d6e570fb 100644
--- a/arch/powerpc/mm/nohash/40x.c
+++ b/arch/powerpc/mm/nohash/40x.c
@@ -43,7 +43,6 @@
#include <mm/mmu_decl.h>
-extern int __map_without_ltlbs;
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
@@ -94,7 +93,13 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
p = 0;
s = total_lowmem;
- if (__map_without_ltlbs)
+ if (IS_ENABLED(CONFIG_KFENCE))
+ return 0;
+
+ if (debug_pagealloc_enabled())
+ return 0;
+
+ if (strict_kernel_rwx_enabled())
return 0;
while (s >= LARGE_PAGE_SIZE_16M) {
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 27f9186ae374..dbbfe897455d 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -14,8 +14,6 @@
#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
-extern int __map_without_ltlbs;
-
static unsigned long block_mapped_ram;
/*
@@ -28,8 +26,6 @@ phys_addr_t v_block_mapped(unsigned long va)
if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
return p + va - VIRT_IMMR_BASE;
- if (__map_without_ltlbs)
- return 0;
if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
return __pa(va);
return 0;
@@ -45,8 +41,6 @@ unsigned long p_block_mapped(phys_addr_t pa)
if (pa >= p && pa < p + IMMR_SIZE)
return VIRT_IMMR_BASE + pa - p;
- if (__map_without_ltlbs)
- return 0;
if (pa < block_mapped_ram)
return (unsigned long)__va(pa);
return 0;
@@ -153,9 +147,6 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
mmu_mapin_immr();
- if (__map_without_ltlbs)
- return 0;
-
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
if (debug_pagealloc_enabled_or_kfence()) {
top = boundary;
@@ -179,8 +170,8 @@ void mmu_mark_initmem_nx(void)
unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
- mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
- mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
+ if (!debug_pagealloc_enabled_or_kfence())
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
mmu_pin_tlb(block_mapped_ram, false);
}
diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
index 307ca919d393..c7d4b317a823 100644
--- a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
+++ b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
@@ -103,21 +103,11 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
int found = 0;
mtspr(SPRN_MAS6, pid << 16);
- if (mmu_has_feature(MMU_FTR_USE_TLBRSRV)) {
- asm volatile(
- "li %0,0\n"
- "tlbsx. 0,%1\n"
- "bne 1f\n"
- "li %0,1\n"
- "1:\n"
- : "=&r"(found) : "r"(ea));
- } else {
- asm volatile(
- "tlbsx 0,%1\n"
- "mfspr %0,0x271\n"
- "srwi %0,%0,31\n"
- : "=&r"(found) : "r"(ea));
- }
+ asm volatile(
+ "tlbsx 0,%1\n"
+ "mfspr %0,0x271\n"
+ "srwi %0,%0,31\n"
+ : "=&r"(found) : "r"(ea));
return found;
}
@@ -169,13 +159,9 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
mtspr(SPRN_MAS1, mas1);
mtspr(SPRN_MAS2, mas2);
- if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
- mtspr(SPRN_MAS7_MAS3, mas7_3);
- } else {
- if (mmu_has_feature(MMU_FTR_BIG_PHYS))
- mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
- mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
- }
+ if (mmu_has_feature(MMU_FTR_BIG_PHYS))
+ mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
+ mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
asm volatile ("tlbwe");
diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index dd39074de9af..d62b613a0d5d 100644
--- a/arch/powerpc/mm/nohash/tlb_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
@@ -186,7 +186,7 @@ _GLOBAL(_tlbivax_bcast)
isync
PPC_TLBIVAX(0, R3)
isync
- eieio
+ mbar
tlbsync
BEGIN_FTR_SECTION
b 1f
@@ -355,7 +355,7 @@ _GLOBAL(_tlbivax_bcast)
rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
PPC_TLBIVAX(0,R3)
- eieio
+ mbar
tlbsync
sync
wrtee r10
diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
index 8b97c4acfebf..68ffbfdba894 100644
--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
@@ -152,16 +152,7 @@ tlb_miss_common_bolted:
clrrdi r15,r15,3
beq tlb_miss_fault_bolted /* No PGDIR, bail */
-BEGIN_MMU_FTR_SECTION
- /* Set the TLB reservation and search for existing entry. Then load
- * the entry.
- */
- PPC_TLBSRX_DOT(0,R16)
ldx r14,r14,r15 /* grab pgd entry */
- beq tlb_miss_done_bolted /* tlb exists already, bail */
-MMU_FTR_SECTION_ELSE
- ldx r14,r14,r15 /* grab pgd entry */
-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
clrrdi r15,r15,3
@@ -222,10 +213,11 @@ itlb_miss_kernel_bolted:
tlb_miss_kernel_bolted:
mfspr r10,SPRN_MAS1
ld r14,PACA_KERNELPGD(r13)
- cmpldi cr0,r15,8 /* Check for vmalloc region */
+ srdi r15,r16,44 /* get kernel region */
+ andi. r15,r15,1 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
- beq+ tlb_miss_common_bolted
+ bne+ tlb_miss_common_bolted
tlb_miss_fault_bolted:
/* We need to check if it was an instruction miss */
@@ -507,7 +499,9 @@ tlb_miss_huge_e6500:
tlb_miss_kernel_e6500:
ld r14,PACA_KERNELPGD(r13)
- cmpldi cr1,r15,8 /* Check for vmalloc region */
+ srdi r15,r16,44 /* get kernel region */
+ xoris r15,r15,0xc /* Check for vmalloc region */
+ cmplwi cr1,r15,1
beq+ cr1,tlb_miss_common_e6500
tlb_miss_fault_e6500:
@@ -541,16 +535,18 @@ itlb_miss_fault_e6500:
*/
mfspr r14,SPRN_ESR
mfspr r16,SPRN_DEAR /* get faulting address */
- srdi r15,r16,60 /* get region */
- cmpldi cr0,r15,0xc /* linear mapping ? */
+ srdi r15,r16,44 /* get region */
+ xoris r15,r15,0xc
+ cmpldi cr0,r15,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r15,1 /* vmalloc mapping ? */
/* The page tables are mapped virtually linear. At this point, though,
* we don't know whether we are trying to fault in a first level
* virtual address or a virtual page table address. We can get that
* from bit 0x1 of the region ID which we have set for a page table
*/
- andi. r10,r15,0x1
+ andis. r10,r15,0x1
bne- virt_page_table_tlb_miss
std r14,EX_TLB_ESR(r12); /* save ESR */
@@ -562,7 +558,7 @@ itlb_miss_fault_e6500:
/* We do the user/kernel test for the PID here along with the RW test
*/
- cmpldi cr0,r15,0 /* Check for user region */
+ srdi. r15,r16,60 /* Check for user region */
/* We pre-test some combination of permissions to avoid double
* faults:
@@ -583,13 +579,12 @@ itlb_miss_fault_e6500:
*/
rlwimi r11,r14,32-19,27,27
rlwimi r11,r14,32-16,19,19
- beq normal_tlb_miss
+ beq normal_tlb_miss_user
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
- cmpldi cr0,r15,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
- beq+ normal_tlb_miss
+ beq+ cr1,normal_tlb_miss
/* We got a crappy address, just fault with whatever DEAR and ESR
* are here
@@ -615,27 +610,28 @@ itlb_miss_fault_e6500:
*
* Faulting address is SRR0 which is already in r16
*/
- srdi r15,r16,60 /* get region */
- cmpldi cr0,r15,0xc /* linear mapping ? */
+ srdi r15,r16,44 /* get region */
+ xoris r15,r15,0xc
+ cmpldi cr0,r15,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r15,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */
oris r11,r11,_PAGE_ACCESSED@h
- cmpldi cr0,r15,0 /* Check for user region */
+ srdi. r15,r16,60 /* Check for user region */
std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
- beq normal_tlb_miss
+ beq normal_tlb_miss_user
li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
oris r11,r11,_PAGE_ACCESSED@h
/* XXX replace the RMW cycles with immediate loads + writes */
mfspr r10,SPRN_MAS1
- cmpldi cr0,r15,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
- beq+ normal_tlb_miss
+ beq+ cr1,normal_tlb_miss
/* We got a crappy address, just fault */
TLB_MISS_EPILOG_ERROR
@@ -653,6 +649,12 @@ itlb_miss_fault_e6500:
* r11 = PTE permission mask
* r10 = crap (free to use)
*/
+normal_tlb_miss_user:
+#ifdef CONFIG_PPC_KUAP
+ mfspr r14,SPRN_MAS1
+ rlwinm. r14,r14,0,0x3fff0000
+ beq- normal_tlb_miss_access_fault /* KUAP fault */
+#endif
normal_tlb_miss:
/* So we first construct the page table address. We do that by
* shifting the bottom of the address (not the region ID) by
@@ -662,32 +664,19 @@ normal_tlb_miss:
* NOTE: For 64K pages, we do things slightly differently in
* order to handle the weird page table format used by linux
*/
- ori r10,r15,0x1
+ srdi r15,r16,44
+ oris r10,r15,0x1
rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
- sldi r15,r10,60
- clrrdi r14,r14,3
+ sldi r15,r10,44
+ clrrdi r14,r14,19
or r10,r15,r14
-BEGIN_MMU_FTR_SECTION
- /* Set the TLB reservation and search for existing entry. Then load
- * the entry.
- */
- PPC_TLBSRX_DOT(0,R16)
- ld r14,0(r10)
- beq normal_tlb_miss_done
-MMU_FTR_SECTION_ELSE
ld r14,0(r10)
-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
finish_normal_tlb_miss:
/* Check if required permissions are met */
andc. r15,r11,r14
bne- normal_tlb_miss_access_fault
-#ifdef CONFIG_PPC_KUAP
- mfspr r11,SPRN_MAS1
- rlwinm. r10,r11,0,0x3fff0000
- beq- normal_tlb_miss_access_fault /* KUAP fault */
-#endif
/* Now we build the MAS:
*
@@ -709,9 +698,7 @@ finish_normal_tlb_miss:
rldicl r10,r14,64-8,64-8
cmpldi cr0,r10,BOOK3E_PAGESZ_4K
beq- 1f
-#ifndef CONFIG_PPC_KUAP
mfspr r11,SPRN_MAS1
-#endif
rlwimi r11,r14,31,21,24
rlwinm r11,r11,0,21,19
mtspr SPRN_MAS1,r11
@@ -728,13 +715,9 @@ finish_normal_tlb_miss:
li r11,MAS3_SW|MAS3_UW
andc r15,r15,r11
1:
-BEGIN_MMU_FTR_SECTION
srdi r16,r15,32
mtspr SPRN_MAS3,r15
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r15
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
@@ -786,6 +769,7 @@ normal_tlb_miss_access_fault:
*/
virt_page_table_tlb_miss:
/* Are we hitting a kernel page table ? */
+ srdi r15,r16,60
andi. r10,r15,0x8
/* The cool thing now is that r10 contains 0 for user and 8 for kernel,
@@ -810,18 +794,12 @@ virt_page_table_tlb_miss:
#else
1:
#endif
-BEGIN_MMU_FTR_SECTION
- /* Search if we already have a TLB entry for that virtual address, and
- * if we do, bail out.
- */
- PPC_TLBSRX_DOT(0,R16)
- beq virt_page_table_tlb_miss_done
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
/* Now, we need to walk the page tables. First check if we are in
* range.
*/
- rldicl. r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
+ rldicl r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
+ cmpldi r10,0x80
bne- virt_page_table_tlb_miss_fault
/* Get the PGD pointer */
@@ -867,41 +845,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
clrldi r11,r15,4 /* remove region ID from RPN */
ori r10,r11,1 /* Or-in SR */
-BEGIN_MMU_FTR_SECTION
srdi r16,r10,32
mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r10
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
-BEGIN_MMU_FTR_SECTION
-virt_page_table_tlb_miss_done:
-
- /* We have overridden MAS2:EPN but currently our primary TLB miss
- * handler will always restore it so that should not be an issue,
- * if we ever optimize the primary handler to not write MAS2 on
- * some cases, we'll have to restore MAS2:EPN here based on the
- * original fault's DEAR. If we do that we have to modify the
- * ITLB miss handler to also store SRR0 in the exception frame
- * as DEAR.
- *
- * However, one nasty thing we did is we cleared the reservation
- * (well, potentially we did). We do a trick here thus if we
- * are not a level 0 exception (we interrupted the TLB miss) we
- * offset the return address by -4 in order to replay the tlbsrx
- * instruction there
- */
- subf r10,r13,r12
- cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
- bne- 1f
- ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
- addi r10,r11,-4
- std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
-1:
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
/* Return to caller, normal case */
TLB_MISS_EPILOG_SUCCESS
rfi
@@ -969,23 +918,24 @@ virt_page_table_tlb_miss_whacko_fault:
*/
mfspr r14,SPRN_ESR
mfspr r16,SPRN_DEAR /* get faulting address */
- srdi r11,r16,60 /* get region */
- cmpldi cr0,r11,0xc /* linear mapping ? */
+ srdi r11,r16,44 /* get region */
+ xoris r11,r11,0xc
+ cmpldi cr0,r11,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r11,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
- cmpldi cr0,r11,0 /* Check for user region */
+ srdi. r11,r16,60 /* Check for user region */
ld r15,PACAPGD(r13) /* Load user pgdir */
beq htw_tlb_miss
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
- cmpldi cr0,r11,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
- beq+ htw_tlb_miss
+ beq+ cr1,htw_tlb_miss
/* We got a crappy address, just fault with whatever DEAR and ESR
* are here
@@ -1011,19 +961,20 @@ virt_page_table_tlb_miss_whacko_fault:
*
* Faulting address is SRR0 which is already in r16
*/
- srdi r11,r16,60 /* get region */
- cmpldi cr0,r11,0xc /* linear mapping ? */
+ srdi r11,r16,44 /* get region */
+ xoris r11,r11,0xc
+ cmpldi cr0,r11,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r11,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
- cmpldi cr0,r11,0 /* Check for user region */
+ srdi. r11,r16,60 /* Check for user region */
ld r15,PACAPGD(r13) /* Load user pgdir */
beq htw_tlb_miss
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
- cmpldi cr0,r11,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
@@ -1116,13 +1067,9 @@ htw_tlb_miss:
*/
ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
-BEGIN_MMU_FTR_SECTION
srdi r16,r10,32
mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r10
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
@@ -1203,13 +1150,9 @@ tlb_load_linear:
clrldi r10,r10,4 /* clear region bits */
ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
-BEGIN_MMU_FTR_SECTION
srdi r16,r10,32
mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r10
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index e6166b71d36d..cb2dcdb18f8e 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -472,3 +472,27 @@ out:
return ret_pte;
}
EXPORT_SYMBOL_GPL(__find_linux_pte);
+
+/* Note due to the way vm flags are laid out, the bits are XWR */
+const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY_X,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_X,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
+};
+
+#ifndef CONFIG_PPC_BOOK3S_64
+DECLARE_VM_GET_PAGE_PROT
+#endif
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index a56ade39dc68..3ac73f9fb5d5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -135,9 +135,9 @@ void mark_initmem_nx(void)
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
- if (v_block_mapped((unsigned long)_sinittext)) {
- mmu_mark_initmem_nx();
- } else {
+ mmu_mark_initmem_nx();
+
+ if (!v_block_mapped((unsigned long)_sinittext)) {
set_memory_nx((unsigned long)_sinittext, numpages);
set_memory_rw((unsigned long)_sinittext, numpages);
}
diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c
index 03607ab90c66..f884760ca5cf 100644
--- a/arch/powerpc/mm/ptdump/shared.c
+++ b/arch/powerpc/mm/ptdump/shared.c
@@ -17,9 +17,9 @@ static const struct flag_info flag_array[] = {
.clear = " ",
}, {
.mask = _PAGE_RW,
- .val = _PAGE_RW,
- .set = "rw",
- .clear = "r ",
+ .val = 0,
+ .set = "r ",
+ .clear = "rw",
}, {
.mask = _PAGE_EXEC,
.val = _PAGE_EXEC,
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index e46ed1e8c6ca..43f1c76d48ce 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -294,8 +294,10 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
u32 dst_reg_h = dst_reg - 1;
u32 src_reg = bpf_to_ppc(insn[i].src_reg);
u32 src_reg_h = src_reg - 1;
+ u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
u32 tmp_reg = bpf_to_ppc(TMP_REG);
u32 size = BPF_SIZE(code);
+ u32 save_reg, ret_reg;
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
@@ -798,25 +800,71 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* BPF_STX ATOMIC (atomic ops)
*/
case BPF_STX | BPF_ATOMIC | BPF_W:
- if (imm != BPF_ADD) {
- pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
- code, i);
- return -ENOTSUPP;
- }
-
- /* *(u32 *)(dst + off) += src */
+ save_reg = _R0;
+ ret_reg = src_reg;
bpf_set_seen_register(ctx, tmp_reg);
+ bpf_set_seen_register(ctx, ax_reg);
+
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
+ tmp_idx = ctx->idx * 4;
/* load value from memory into r0 */
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
- /* add value from src_reg into this */
- EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
- /* store result back */
- EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
+
+ /* Save old value in BPF_REG_AX */
+ if (imm & BPF_FETCH)
+ EMIT(PPC_RAW_MR(ax_reg, _R0));
+
+ switch (imm) {
+ case BPF_ADD:
+ case BPF_ADD | BPF_FETCH:
+ EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
+ break;
+ case BPF_AND:
+ case BPF_AND | BPF_FETCH:
+ EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
+ break;
+ case BPF_OR:
+ case BPF_OR | BPF_FETCH:
+ EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
+ break;
+ case BPF_XOR:
+ case BPF_XOR | BPF_FETCH:
+ EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
+ break;
+ case BPF_CMPXCHG:
+ /*
+ * Return old value in BPF_REG_0 for BPF_CMPXCHG &
+ * in src_reg for other cases.
+ */
+ ret_reg = bpf_to_ppc(BPF_REG_0);
+
+ /* Compare with old value in BPF_REG_0 */
+ EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
+ /* Don't set if different from old value */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
+ fallthrough;
+ case BPF_XCHG:
+ save_reg = src_reg;
+ break;
+ default:
+ pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
+ code, i);
+ return -EOPNOTSUPP;
+ }
+
+ /* store new value */
+ EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
/* we're done if this succeeded */
- PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4);
+ PPC_BCC_SHORT(COND_NE, tmp_idx);
+
+ /* For the BPF_FETCH variant, get old data into src_reg */
+ if (imm & BPF_FETCH) {
+ EMIT(PPC_RAW_MR(ret_reg, ax_reg));
+ if (!fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
+ }
break;
case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 594c54931e20..29ee306d6302 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -360,6 +360,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
u32 size = BPF_SIZE(code);
u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
+ u32 save_reg, ret_reg;
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
@@ -777,42 +778,83 @@ emit_clear:
* BPF_STX ATOMIC (atomic ops)
*/
case BPF_STX | BPF_ATOMIC | BPF_W:
- if (imm != BPF_ADD) {
- pr_err_ratelimited(
- "eBPF filter atomic op code %02x (@%d) unsupported\n",
- code, i);
- return -ENOTSUPP;
- }
-
- /* *(u32 *)(dst + off) += src */
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ save_reg = tmp2_reg;
+ ret_reg = src_reg;
- /* Get EA into TMP_REG_1 */
- EMIT(PPC_RAW_ADDI(tmp1_reg, dst_reg, off));
+ /* Get offset into TMP_REG_1 */
+ EMIT(PPC_RAW_LI(tmp1_reg, off));
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
- EMIT(PPC_RAW_LWARX(tmp2_reg, 0, tmp1_reg, 0));
- /* add value from src_reg into this */
- EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
- /* store result back */
- EMIT(PPC_RAW_STWCX(tmp2_reg, 0, tmp1_reg));
- /* we're done if this succeeded */
- PPC_BCC_SHORT(COND_NE, tmp_idx);
- break;
- case BPF_STX | BPF_ATOMIC | BPF_DW:
- if (imm != BPF_ADD) {
+ if (size == BPF_DW)
+ EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
+ else
+ EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
+
+ /* Save old value in _R0 */
+ if (imm & BPF_FETCH)
+ EMIT(PPC_RAW_MR(_R0, tmp2_reg));
+
+ switch (imm) {
+ case BPF_ADD:
+ case BPF_ADD | BPF_FETCH:
+ EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_AND:
+ case BPF_AND | BPF_FETCH:
+ EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_OR:
+ case BPF_OR | BPF_FETCH:
+ EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_XOR:
+ case BPF_XOR | BPF_FETCH:
+ EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_CMPXCHG:
+ /*
+ * Return old value in BPF_REG_0 for BPF_CMPXCHG &
+ * in src_reg for other cases.
+ */
+ ret_reg = bpf_to_ppc(BPF_REG_0);
+
+ /* Compare with old value in BPF_R0 */
+ if (size == BPF_DW)
+ EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
+ else
+ EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
+ /* Don't set if different from old value */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
+ fallthrough;
+ case BPF_XCHG:
+ save_reg = src_reg;
+ break;
+ default:
pr_err_ratelimited(
"eBPF filter atomic op code %02x (@%d) unsupported\n",
code, i);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- /* *(u64 *)(dst + off) += src */
- EMIT(PPC_RAW_ADDI(tmp1_reg, dst_reg, off));
- tmp_idx = ctx->idx * 4;
- EMIT(PPC_RAW_LDARX(tmp2_reg, 0, tmp1_reg, 0));
- EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
- EMIT(PPC_RAW_STDCX(tmp2_reg, 0, tmp1_reg));
+ /* store new value */
+ if (size == BPF_DW)
+ EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
+ else
+ EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
+ /* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx);
+
+ if (imm & BPF_FETCH) {
+ EMIT(PPC_RAW_MR(ret_reg, _R0));
+ /*
+ * Skip unnecessary zero-extension for 32-bit cmpxchg.
+ * For context, see commit 39491867ace5.
+ */
+ if (size != BPF_DW && imm == BPF_CMPXCHG &&
+ insn_is_zext(&insn[i + 1]))
+ addrs[++i] = ctx->idx * 4;
+ }
break;
/*
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 140502a7fdf8..13919eb96931 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1349,27 +1349,22 @@ static void power_pmu_disable(struct pmu *pmu)
* a PMI happens during interrupt replay and perf counter
* values are cleared by PMU callbacks before replay.
*
- * If any PMC corresponding to the active PMU events are
- * overflown, disable the interrupt by clearing the paca
- * bit for PMI since we are disabling the PMU now.
- * Otherwise provide a warning if there is PMI pending, but
- * no counter is found overflown.
+ * Disable the interrupt by clearing the paca bit for PMI
+ * since we are disabling the PMU now. Otherwise provide a
+ * warning if there is PMI pending, but no counter is found
+ * overflown.
+ *
+ * Since power_pmu_disable runs under local_irq_save, it
+ * could happen that code hits a PMC overflow without PMI
+ * pending in paca. Hence only clear PMI pending if it was
+ * set.
+ *
+ * If a PMI is pending, then MSR[EE] must be disabled (because
+ * the masked PMI handler disabling EE). So it is safe to
+ * call clear_pmi_irq_pending().
*/
- if (any_pmc_overflown(cpuhw)) {
- /*
- * Since power_pmu_disable runs under local_irq_save, it
- * could happen that code hits a PMC overflow without PMI
- * pending in paca. Hence only clear PMI pending if it was
- * set.
- *
- * If a PMI is pending, then MSR[EE] must be disabled (because
- * the masked PMI handler disabling EE). So it is safe to
- * call clear_pmi_irq_pending().
- */
- if (pmi_irq_pending())
- clear_pmi_irq_pending();
- } else
- WARN_ON(pmi_irq_pending());
+ if (pmi_irq_pending())
+ clear_pmi_irq_pending();
val = mmcra = cpuhw->mmcr.mmcra;
@@ -2488,6 +2483,33 @@ static int power_pmu_prepare_cpu(unsigned int cpu)
return 0;
}
+static ssize_t pmu_name_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (ppmu)
+ return sysfs_emit(buf, "%s", ppmu->name);
+
+ return 0;
+}
+
+static DEVICE_ATTR_RO(pmu_name);
+
+static struct attribute *pmu_caps_attrs[] = {
+ &dev_attr_pmu_name.attr,
+ NULL
+};
+
+static const struct attribute_group pmu_caps_group = {
+ .name = "caps",
+ .attrs = pmu_caps_attrs,
+};
+
+static const struct attribute_group *pmu_caps_groups[] = {
+ &pmu_caps_group,
+ NULL,
+};
+
int __init register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
@@ -2498,6 +2520,10 @@ int __init register_power_pmu(struct power_pmu *pmu)
pmu->name);
power_pmu.attr_groups = ppmu->attr_groups;
+
+ if (ppmu->flags & PPMU_ARCH_207S)
+ power_pmu.attr_update = pmu_caps_groups;
+
power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS);
#ifdef MSR_HV
diff --git a/arch/powerpc/perf/e500-pmu.c b/arch/powerpc/perf/e500-pmu.c
index a59c33bed32a..e3e1a68eb1d5 100644
--- a/arch/powerpc/perf/e500-pmu.c
+++ b/arch/powerpc/perf/e500-pmu.c
@@ -118,12 +118,13 @@ static struct fsl_emb_pmu e500_pmu = {
static int init_e500_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type)
- return -ENODEV;
+ unsigned int pvr = mfspr(SPRN_PVR);
- if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500mc"))
+ /* ec500mc */
+ if (PVR_VER(pvr) == PVR_VER_E500MC || PVR_VER(pvr) == PVR_VER_E5500)
num_events = 256;
- else if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500"))
+ /* e500 */
+ else if (PVR_VER(pvr) != PVR_VER_E500V1 && PVR_VER(pvr) != PVR_VER_E500V2)
return -ENODEV;
return register_fsl_emb_pmu(&e500_pmu);
diff --git a/arch/powerpc/perf/e6500-pmu.c b/arch/powerpc/perf/e6500-pmu.c
index 44ad65da82ed..bd779a2338f8 100644
--- a/arch/powerpc/perf/e6500-pmu.c
+++ b/arch/powerpc/perf/e6500-pmu.c
@@ -107,8 +107,9 @@ static struct fsl_emb_pmu e6500_pmu = {
static int init_e6500_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e6500"))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_VER_E6500)
return -ENODEV;
return register_fsl_emb_pmu(&e6500_pmu);
diff --git a/arch/powerpc/perf/generic-compat-pmu.c b/arch/powerpc/perf/generic-compat-pmu.c
index f3db88aee4dd..b5c414876ed5 100644
--- a/arch/powerpc/perf/generic-compat-pmu.c
+++ b/arch/powerpc/perf/generic-compat-pmu.c
@@ -151,9 +151,19 @@ static const struct attribute_group generic_compat_pmu_format_group = {
.attrs = generic_compat_pmu_format_attr,
};
+static struct attribute *generic_compat_pmu_caps_attrs[] = {
+ NULL
+};
+
+static struct attribute_group generic_compat_pmu_caps_group = {
+ .name = "caps",
+ .attrs = generic_compat_pmu_caps_attrs,
+};
+
static const struct attribute_group *generic_compat_pmu_attr_groups[] = {
&generic_compat_pmu_format_group,
&generic_compat_pmu_events_group,
+ &generic_compat_pmu_caps_group,
NULL,
};
@@ -292,7 +302,7 @@ static int generic_compute_mmcr(u64 event[], int n_ev,
}
static struct power_pmu generic_compat_pmu = {
- .name = "GENERIC_COMPAT",
+ .name = "ISAv3",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index cf5406b31e27..33c23225fd54 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1718,16 +1718,16 @@ static int hv_24x7_init(void)
{
int r;
unsigned long hret;
+ unsigned int pvr = mfspr(SPRN_PVR);
struct hv_perf_caps caps;
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
pr_debug("not a virtualized system, not enabling\n");
return -ENODEV;
- } else if (!cur_cpu_spec->oprofile_cpu_type)
- return -ENODEV;
+ }
/* POWER8 only supports v1, while POWER9 only supports v2. */
- if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
+ if (PVR_VER(pvr) == PVR_POWER8)
interface_version = 1;
else {
interface_version = 2;
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 42abbcfc73da..56301b2bc8ae 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -686,6 +686,9 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
mmcr2 |= MMCR2_FCS(pmc);
}
+ if (pevents[i]->attr.exclude_idle)
+ mmcr2 |= MMCR2_FCWAIT(pmc);
+
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
if (pmc <= 4) {
val = (event[i] >> p10_EVENT_MMCR3_SHIFT) &
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index ff122603989b..f594fa6580d1 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -249,6 +249,7 @@
/* Bits in MMCR2 for PowerISA v2.07 */
#define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9)))
#define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9)))
+#define MMCR2_FCWAIT(pmc) (1ull << (58 - (((pmc) - 1) * 9)))
#define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9)))
#define MAX_ALT 2
diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c
index e39b15b79a83..552d51a925d3 100644
--- a/arch/powerpc/perf/mpc7450-pmu.c
+++ b/arch/powerpc/perf/mpc7450-pmu.c
@@ -417,8 +417,9 @@ struct power_pmu mpc7450_pmu = {
static int __init init_mpc7450_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450"))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_7450)
return -ENODEV;
return register_power_pmu(&mpc7450_pmu);
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index c6d51e7093cf..9b5133e361a7 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -258,15 +258,26 @@ static const struct attribute_group power10_pmu_format_group = {
.attrs = power10_pmu_format_attr,
};
+static struct attribute *power10_pmu_caps_attrs[] = {
+ NULL
+};
+
+static struct attribute_group power10_pmu_caps_group = {
+ .name = "caps",
+ .attrs = power10_pmu_caps_attrs,
+};
+
static const struct attribute_group *power10_pmu_attr_groups_dd1[] = {
&power10_pmu_format_group,
&power10_pmu_events_group_dd1,
+ &power10_pmu_caps_group,
NULL,
};
static const struct attribute_group *power10_pmu_attr_groups[] = {
&power10_pmu_format_group,
&power10_pmu_events_group,
+ &power10_pmu_caps_group,
NULL,
};
@@ -597,12 +608,10 @@ int __init init_power10_pmu(void)
unsigned int pvr;
int rc;
- /* Comes from cpu_specs[] */
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10"))
+ pvr = mfspr(SPRN_PVR);
+ if (PVR_VER(pvr) != PVR_POWER10)
return -ENODEV;
- pvr = mfspr(SPRN_PVR);
/* Add the ppmu flag for power10 DD1 */
if ((PVR_CFG(pvr) == 1))
power10_pmu.flags |= PPMU_P10_DD1;
diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index 753b4740ef64..b4708ab73145 100644
--- a/arch/powerpc/perf/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
@@ -679,9 +679,9 @@ static struct power_pmu power5p_pmu = {
int __init init_power5p_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
- && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++")))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_POWER5p)
return -ENODEV;
return register_power_pmu(&power5p_pmu);
diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index 1f83c4cba0aa..c6aefd0a1cc8 100644
--- a/arch/powerpc/perf/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
@@ -620,8 +620,9 @@ static struct power_pmu power5_pmu = {
int __init init_power5_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_POWER5)
return -ENODEV;
return register_power_pmu(&power5_pmu);
diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index aec746f86804..5729b6e059de 100644
--- a/arch/powerpc/perf/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -541,8 +541,9 @@ static struct power_pmu power6_pmu = {
int __init init_power6_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_POWER6)
return -ENODEV;
return register_power_pmu(&power6_pmu);
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index a74211410b8d..c95ccf2e28da 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -447,11 +447,12 @@ static struct power_pmu power7_pmu = {
int __init init_power7_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_POWER7 && PVR_VER(pvr) != PVR_POWER7p)
return -ENODEV;
- if (pvr_version_is(PVR_POWER7p))
+ if (PVR_VER(pvr) == PVR_POWER7p)
power7_pmu.flags |= PPMU_SIAR_VALID;
return register_power_pmu(&power7_pmu);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index e37b1e714d2b..ef9685065aaf 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -187,9 +187,19 @@ static const struct attribute_group power8_pmu_events_group = {
.attrs = power8_events_attr,
};
+static struct attribute *power8_pmu_caps_attrs[] = {
+ NULL
+};
+
+static struct attribute_group power8_pmu_caps_group = {
+ .name = "caps",
+ .attrs = power8_pmu_caps_attrs,
+};
+
static const struct attribute_group *power8_pmu_attr_groups[] = {
&isa207_pmu_format_group,
&power8_pmu_events_group,
+ &power8_pmu_caps_group,
NULL,
};
@@ -381,9 +391,10 @@ static struct power_pmu power8_pmu = {
int __init init_power8_pmu(void)
{
int rc;
+ unsigned int pvr = mfspr(SPRN_PVR);
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
+ if (PVR_VER(pvr) != PVR_POWER8E && PVR_VER(pvr) != PVR_POWER8NVL &&
+ PVR_VER(pvr) != PVR_POWER8)
return -ENODEV;
rc = register_power_pmu(&power8_pmu);
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 3ad40ffb9256..cb6a7dc02dd7 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -258,9 +258,19 @@ static const struct attribute_group power9_pmu_format_group = {
.attrs = power9_pmu_format_attr,
};
+static struct attribute *power9_pmu_caps_attrs[] = {
+ NULL
+};
+
+static struct attribute_group power9_pmu_caps_group = {
+ .name = "caps",
+ .attrs = power9_pmu_caps_attrs,
+};
+
static const struct attribute_group *power9_pmu_attr_groups[] = {
&power9_pmu_format_group,
&power9_pmu_events_group,
+ &power9_pmu_caps_group,
NULL,
};
@@ -457,9 +467,7 @@ int __init init_power9_pmu(void)
int rc = 0;
unsigned int pvr = mfspr(SPRN_PVR);
- /* Comes from cpu_specs[] */
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9"))
+ if (PVR_VER(pvr) != PVR_POWER9)
return -ENODEV;
/* Blacklist events */
diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c
index 09802482ba72..762676fb839e 100644
--- a/arch/powerpc/perf/ppc970-pmu.c
+++ b/arch/powerpc/perf/ppc970-pmu.c
@@ -491,9 +491,10 @@ static struct power_pmu ppc970_pmu = {
int __init init_ppc970_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
- && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP")))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_970 && PVR_VER(pvr) != PVR_970MP &&
+ PVR_VER(pvr) != PVR_970FX && PVR_VER(pvr) != PVR_970GX)
return -ENODEV;
return register_power_pmu(&ppc970_pmu);
diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/4xx/cpm.c
index 1d3bc35ee1a7..182e12855c27 100644
--- a/arch/powerpc/platforms/4xx/cpm.c
+++ b/arch/powerpc/platforms/4xx/cpm.c
@@ -63,7 +63,7 @@ static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
* known as class 1, 2 and 3. For class 1 units, they are
* unconditionally put to sleep when the corresponding CPM bit is
* set. For class 2 and 3 units this is not case; if they can be
- * put to to sleep, they will. Here we do not verify, we just
+ * put to sleep, they will. Here we do not verify, we just
* set them and expect them to eventually go off when they can.
*/
value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 4348506d667d..409c0ec06265 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -204,43 +204,6 @@ int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv)
EXPORT_SYMBOL(mpc52xx_set_psc_clkdiv);
/**
- * mpc52xx_get_xtal_freq - Get SYS_XTAL_IN frequency for a device
- *
- * @node: device node
- *
- * Returns the frequency of the external oscillator clock connected
- * to the SYS_XTAL_IN pin, or 0 if it cannot be determined.
- */
-unsigned int mpc52xx_get_xtal_freq(struct device_node *node)
-{
- u32 val;
- unsigned int freq;
-
- if (!mpc52xx_cdm)
- return 0;
-
- freq = mpc5xxx_get_bus_frequency(node);
- if (!freq)
- return 0;
-
- if (in_8(&mpc52xx_cdm->ipb_clk_sel) & 0x1)
- freq *= 2;
-
- val = in_be32(&mpc52xx_cdm->rstcfg);
- if (val & (1 << 5))
- freq *= 8;
- else
- freq *= 4;
- if (val & (1 << 6))
- freq /= 12;
- else
- freq /= 16;
-
- return freq;
-}
-EXPORT_SYMBOL(mpc52xx_get_xtal_freq);
-
-/**
* mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer
*/
void __noreturn mpc52xx_restart(char *cmd)
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 968f5b727273..e43e08d991ea 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -60,6 +60,7 @@
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/kernel.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/watchdog.h>
@@ -316,17 +317,15 @@ mpc52xx_gpt_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
-static void
-mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
+static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt)
{
int rc;
- /* Only setup GPIO if the device tree claims the GPT is
- * a GPIO controller */
- if (!of_find_property(node, "gpio-controller", NULL))
+ /* Only setup GPIO if the device claims the GPT is a GPIO controller */
+ if (!device_property_present(gpt->dev, "gpio-controller"))
return;
- gpt->gc.label = kasprintf(GFP_KERNEL, "%pOF", node);
+ gpt->gc.label = kasprintf(GFP_KERNEL, "%pfw", dev_fwnode(gpt->dev));
if (!gpt->gc.label) {
dev_err(gpt->dev, "out of memory\n");
return;
@@ -338,7 +337,7 @@ mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
gpt->gc.get = mpc52xx_gpt_gpio_get;
gpt->gc.set = mpc52xx_gpt_gpio_set;
gpt->gc.base = -1;
- gpt->gc.of_node = node;
+ gpt->gc.parent = gpt->dev;
/* Setup external pin in GPIO mode */
clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK,
@@ -351,8 +350,7 @@ mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
dev_dbg(gpt->dev, "%s() complete.\n", __func__);
}
#else /* defined(CONFIG_GPIOLIB) */
-static void
-mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { }
+static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt) { }
#endif /* defined(CONFIG_GPIOLIB) */
/***********************************************************************
@@ -722,14 +720,14 @@ static int mpc52xx_gpt_probe(struct platform_device *ofdev)
raw_spin_lock_init(&gpt->lock);
gpt->dev = &ofdev->dev;
- gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
+ gpt->ipb_freq = mpc5xxx_get_bus_frequency(&ofdev->dev);
gpt->regs = of_iomap(ofdev->dev.of_node, 0);
if (!gpt->regs)
return -ENOMEM;
dev_set_drvdata(&ofdev->dev, gpt);
- mpc52xx_gpt_gpio_setup(gpt, ofdev->dev.of_node);
+ mpc52xx_gpt_gpio_setup(gpt);
mpc52xx_gpt_irq_setup(gpt, ofdev->dev.of_node);
mutex_lock(&mpc52xx_gpt_list_mutex);
@@ -755,11 +753,6 @@ static int mpc52xx_gpt_probe(struct platform_device *ofdev)
return 0;
}
-static int mpc52xx_gpt_remove(struct platform_device *ofdev)
-{
- return -EBUSY;
-}
-
static const struct of_device_id mpc52xx_gpt_match[] = {
{ .compatible = "fsl,mpc5200-gpt", },
@@ -772,10 +765,10 @@ static const struct of_device_id mpc52xx_gpt_match[] = {
static struct platform_driver mpc52xx_gpt_driver = {
.driver = {
.name = "mpc52xx-gpt",
+ .suppress_bind_attrs = true,
.of_match_table = mpc52xx_gpt_match,
},
.probe = mpc52xx_gpt_probe,
- .remove = mpc52xx_gpt_remove,
};
static int __init mpc52xx_gpt_init(void)
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index 3285dabcf923..2fb2a85d131f 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -121,17 +121,15 @@ void __init mpc83xx_setup_pci(void)
void __init mpc83xx_setup_arch(void)
{
+ phys_addr_t immrbase = get_immrbase();
+ int immrsize = IS_ALIGNED(immrbase, SZ_2M) ? SZ_2M : SZ_1M;
+ unsigned long va = fix_to_virt(FIX_IMMR_BASE);
+
if (ppc_md.progress)
ppc_md.progress("mpc83xx_setup_arch()", 0);
- if (!__map_without_bats) {
- phys_addr_t immrbase = get_immrbase();
- int immrsize = IS_ALIGNED(immrbase, SZ_2M) ? SZ_2M : SZ_1M;
- unsigned long va = fix_to_virt(FIX_IMMR_BASE);
-
- setbat(-1, va, immrbase, immrsize, PAGE_KERNEL_NCG);
- update_bats();
- }
+ setbat(-1, va, immrbase, immrsize, PAGE_KERNEL_NCG);
+ update_bats();
}
int machine_check_83xx(struct pt_regs *regs)
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 6d47a5b81485..3fa8979ac8a6 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -100,7 +100,6 @@ struct pmc_type {
int has_deep_sleep;
};
-static struct platform_device *pmc_dev;
static int has_deep_sleep, deep_sleeping;
static int pmc_irq;
static struct mpc83xx_pmc __iomem *pmc_regs;
@@ -319,7 +318,27 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
.end = mpc83xx_suspend_end,
};
-static const struct of_device_id pmc_match[];
+static struct pmc_type pmc_types[] = {
+ {
+ .has_deep_sleep = 1,
+ },
+ {
+ .has_deep_sleep = 0,
+ }
+};
+
+static const struct of_device_id pmc_match[] = {
+ {
+ .compatible = "fsl,mpc8313-pmc",
+ .data = &pmc_types[0],
+ },
+ {
+ .compatible = "fsl,mpc8349-pmc",
+ .data = &pmc_types[1],
+ },
+ {}
+};
+
static int pmc_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
@@ -336,7 +355,6 @@ static int pmc_probe(struct platform_device *ofdev)
has_deep_sleep = type->has_deep_sleep;
immrbase = get_immrbase();
- pmc_dev = ofdev;
is_pci_agent = mpc83xx_is_pci_agent();
if (is_pci_agent < 0)
@@ -401,39 +419,13 @@ out:
return ret;
}
-static int pmc_remove(struct platform_device *ofdev)
-{
- return -EPERM;
-};
-
-static struct pmc_type pmc_types[] = {
- {
- .has_deep_sleep = 1,
- },
- {
- .has_deep_sleep = 0,
- }
-};
-
-static const struct of_device_id pmc_match[] = {
- {
- .compatible = "fsl,mpc8313-pmc",
- .data = &pmc_types[0],
- },
- {
- .compatible = "fsl,mpc8349-pmc",
- .data = &pmc_types[1],
- },
- {}
-};
-
static struct platform_driver pmc_driver = {
.driver = {
.name = "mpc83xx-pmc",
.of_match_table = pmc_match,
+ .suppress_bind_attrs = true,
},
.probe = pmc_probe,
- .remove = pmc_remove
};
builtin_platform_driver(pmc_driver);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 2be17ffe8714..be16eba0f704 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -62,13 +62,13 @@ config MPC85xx_CDS
This option enables support for the MPC85xx CDS board
config MPC85xx_MDS
- bool "Freescale MPC85xx MDS"
+ bool "Freescale MPC8568 MDS / MPC8569 MDS / P1021 MDS"
select DEFAULT_UIMAGE
select PHYLIB if NETDEVICES
select HAVE_RAPIDIO
select SWIOTLB
help
- This option enables support for the MPC85xx MDS board
+ This option enables support for the MPC8568 MDS, MPC8569 MDS and P1021 MDS boards
config MPC8536_DS
bool "Freescale MPC8536 DS"
@@ -78,28 +78,30 @@ config MPC8536_DS
This option enables support for the MPC8536 DS board
config MPC85xx_DS
- bool "Freescale MPC85xx DS"
+ bool "Freescale MPC8544 DS / MPC8572 DS / P2020 DS"
select PPC_I8259
select DEFAULT_UIMAGE
select FSL_ULI1575 if PCI
select SWIOTLB
help
- This option enables support for the MPC85xx DS (MPC8544 DS) board
+ This option enables support for the MPC8544 DS, MPC8572 DS and P2020 DS boards
config MPC85xx_RDB
- bool "Freescale MPC85xx RDB"
+ bool "Freescale P102x MBG/UTM/RDB and P2020 RDB"
select PPC_I8259
select DEFAULT_UIMAGE
select FSL_ULI1575 if PCI
select SWIOTLB
help
- This option enables support for the MPC85xx RDB (P2020 RDB) board
+ This option enables support for the P1020 MBG PC, P1020 UTM PC,
+ P1020 RDB PC, P1020 RDB PD, P1020 RDB, P1021 RDB PC, P1024 RDB,
+ P1025 RDB, P2020 RDB and P2020 RDB PC boards
config P1010_RDB
- bool "Freescale P1010RDB"
+ bool "Freescale P1010 RDB"
select DEFAULT_UIMAGE
help
- This option enables support for the MPC85xx RDB (P1010 RDB) board
+ This option enables support for the P1010 RDB board
P1010RDB contains P1010Si, which provides CPU performance up to 800
MHz and 1600 DMIPS, additional functionality and faster interfaces
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 9e2df4b66478..5185d942b455 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -2,7 +2,6 @@
config PPC32
bool
default y if !PPC64
- select KASAN_VMALLOC if KASAN && MODULES
config PPC64
bool "64-bit kernel"
@@ -127,18 +126,18 @@ choice
config GENERIC_CPU
bool "Generic (POWER4 and above)"
- depends on PPC64 && !CPU_LITTLE_ENDIAN
- select PPC_64S_HASH_MMU if PPC_BOOK3S_64
+ depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+ select PPC_64S_HASH_MMU
config GENERIC_CPU
bool "Generic (POWER8 and above)"
- depends on PPC64 && CPU_LITTLE_ENDIAN
+ depends on PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
select ARCH_HAS_FAST_MULTIPLIER
select PPC_64S_HASH_MMU
-config GENERIC_CPU
+config POWERPC_CPU
bool "Generic 32 bits powerpc"
- depends on PPC32 && !PPC_8xx
+ depends on PPC_BOOK3S_32
config CELL_CPU
bool "Cell Broadband Engine"
@@ -174,11 +173,27 @@ config POWER9_CPU
config E5500_CPU
bool "Freescale e5500"
- depends on E500
+ depends on PPC64 && E500
config E6500_CPU
bool "Freescale e6500"
- depends on E500
+ depends on PPC64 && E500
+
+config 405_CPU
+ bool "40x family"
+ depends on 40x
+
+config 440_CPU
+ bool "440 (44x family)"
+ depends on 44x
+
+config 464_CPU
+ bool "464 (44x family)"
+ depends on 44x
+
+config 476_CPU
+ bool "476 (47x family)"
+ depends on PPC_47x
config 860_CPU
bool "8xx family"
@@ -197,11 +212,23 @@ config G4_CPU
depends on PPC_BOOK3S_32
select ALTIVEC
+config E500_CPU
+ bool "e500 (8540)"
+ depends on PPC_85xx && !PPC_E500MC
+
+config E500MC_CPU
+ bool "e500mc"
+ depends on PPC_85xx && PPC_E500MC
+
+config TOOLCHAIN_DEFAULT_CPU
+ bool "Rely on the toolchain's implicit default CPU"
+ depends on PPC32
+
endchoice
config TARGET_CPU_BOOL
bool
- default !GENERIC_CPU
+ default !GENERIC_CPU && !TOOLCHAIN_DEFAULT_CPU
config TARGET_CPU
string
@@ -212,10 +239,17 @@ config TARGET_CPU
default "power7" if POWER7_CPU
default "power8" if POWER8_CPU
default "power9" if POWER9_CPU
+ default "405" if 405_CPU
+ default "440" if 440_CPU
+ default "464" if 464_CPU
+ default "476" if 476_CPU
default "860" if 860_CPU
default "e300c2" if E300C2_CPU
default "e300c3" if E300C3_CPU
default "G4" if G4_CPU
+ default "8540" if E500_CPU
+ default "e500mc" if E500MC_CPU
+ default "powerpc" if POWERPC_CPU
config PPC_BOOK3S
def_bool y
@@ -324,7 +358,7 @@ config PHYS_64BIT
config ALTIVEC
bool "AltiVec Support"
- depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 || (PPC_E500MC && PPC64)
+ depends on PPC_BOOK3S || (PPC_E500MC && PPC64 && !E5500_CPU)
select PPC_FPU
help
This option enables kernel support for the Altivec extensions to the
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index f3291e957a19..5b012abca773 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -223,6 +223,7 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
if (!prop) {
dev_dbg(&dev->dev,
"axon_msi: no msi-address-(32|64) properties found\n");
+ of_node_put(dn);
return -ENOENT;
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 34334c32b7f5..320008528edd 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -660,6 +660,7 @@ spufs_init_isolated_loader(void)
return;
loader = of_get_property(dn, "loader", &size);
+ of_node_put(dn);
if (!loader)
return;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index afc1d6604d12..23c6799cfa5a 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -76,7 +76,7 @@ struct spu_context {
struct address_space *mss; /* 'mss' area mappings. */
struct address_space *psmap; /* 'psmap' area mappings. */
struct mutex mapping_lock;
- u64 object_id; /* user space pointer for oprofile */
+ u64 object_id; /* user space pointer for GNU Debugger */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
struct mutex state_mutex;
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index f71735ec449f..04daa7f0a03c 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -320,13 +320,6 @@ static void __init pmac_setup_arch(void)
#endif /* CONFIG_ADB */
}
-#ifdef CONFIG_SCSI
-void note_scsi_host(struct device_node *node, void *host)
-{
-}
-EXPORT_SYMBOL(note_scsi_host);
-#endif
-
static int initializing = 1;
static int pmac_late_init(void)
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index e1a05c5a9004..ae248a161b43 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -19,7 +19,7 @@ config PPC_POWERNV
default y
config OPAL_PRD
- tristate 'OPAL PRD driver'
+ tristate "OPAL PRD driver"
depends on PPC_POWERNV
help
This enables the opal-prd driver, a facility to run processor
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index c8cf2728031a..9de9b2fb163d 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1609,6 +1609,7 @@ found:
tbl->it_ops = &pnv_ioda1_iommu_ops;
pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
+ tbl->it_index = (phb->hose->global_number << 16) | pe->pe_number;
if (!iommu_init_table(tbl, phb->hose->node, 0, 0))
panic("Failed to initialize iommu table");
@@ -1779,6 +1780,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
res_end = min(window_size, SZ_4G) >> tbl->it_page_shift;
}
+ tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number;
if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end))
rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
else
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 3805ad13b8f3..196aa70fe043 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -21,24 +21,15 @@
#define DARN_ERR 0xFFFFFFFFFFFFFFFFul
-struct powernv_rng {
+struct pnv_rng {
void __iomem *regs;
void __iomem *regs_real;
unsigned long mask;
};
-static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng);
+static DEFINE_PER_CPU(struct pnv_rng *, pnv_rng);
-int powernv_hwrng_present(void)
-{
- struct powernv_rng *rng;
-
- rng = get_cpu_var(powernv_rng);
- put_cpu_var(rng);
- return rng != NULL;
-}
-
-static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val)
+static unsigned long rng_whiten(struct pnv_rng *rng, unsigned long val)
{
unsigned long parity;
@@ -58,18 +49,7 @@ static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val)
return val;
}
-int powernv_get_random_real_mode(unsigned long *v)
-{
- struct powernv_rng *rng;
-
- rng = raw_cpu_read(powernv_rng);
-
- *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real));
-
- return 1;
-}
-
-static int powernv_get_random_darn(unsigned long *v)
+static int pnv_get_random_darn(unsigned long *v)
{
unsigned long val;
@@ -93,29 +73,31 @@ static int __init initialise_darn(void)
return -ENODEV;
for (i = 0; i < 10; i++) {
- if (powernv_get_random_darn(&val)) {
- ppc_md.get_random_seed = powernv_get_random_darn;
+ if (pnv_get_random_darn(&val)) {
+ ppc_md.get_random_seed = pnv_get_random_darn;
return 0;
}
}
return -EIO;
}
-int powernv_get_random_long(unsigned long *v)
+int pnv_get_random_long(unsigned long *v)
{
- struct powernv_rng *rng;
-
- rng = get_cpu_var(powernv_rng);
-
- *v = rng_whiten(rng, in_be64(rng->regs));
-
- put_cpu_var(rng);
-
+ struct pnv_rng *rng;
+
+ if (mfmsr() & MSR_DR) {
+ rng = get_cpu_var(pnv_rng);
+ *v = rng_whiten(rng, in_be64(rng->regs));
+ put_cpu_var(rng);
+ } else {
+ rng = raw_cpu_read(pnv_rng);
+ *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real));
+ }
return 1;
}
-EXPORT_SYMBOL_GPL(powernv_get_random_long);
+EXPORT_SYMBOL_GPL(pnv_get_random_long);
-static __init void rng_init_per_cpu(struct powernv_rng *rng,
+static __init void rng_init_per_cpu(struct pnv_rng *rng,
struct device_node *dn)
{
int chip_id, cpu;
@@ -125,16 +107,16 @@ static __init void rng_init_per_cpu(struct powernv_rng *rng,
pr_warn("No ibm,chip-id found for %pOF.\n", dn);
for_each_possible_cpu(cpu) {
- if (per_cpu(powernv_rng, cpu) == NULL ||
+ if (per_cpu(pnv_rng, cpu) == NULL ||
cpu_to_chip_id(cpu) == chip_id) {
- per_cpu(powernv_rng, cpu) = rng;
+ per_cpu(pnv_rng, cpu) = rng;
}
}
}
static __init int rng_create(struct device_node *dn)
{
- struct powernv_rng *rng;
+ struct pnv_rng *rng;
struct resource res;
unsigned long val;
@@ -160,7 +142,7 @@ static __init int rng_create(struct device_node *dn)
rng_init_per_cpu(rng, dn);
- ppc_md.get_random_seed = powernv_get_random_long;
+ ppc_md.get_random_seed = pnv_get_random_long;
return 0;
}
@@ -208,7 +190,7 @@ static int __init pnv_rng_late_init(void)
if (ppc_md.get_random_seed == pnv_get_random_long_early)
pnv_get_random_long_early(&v);
- if (ppc_md.get_random_seed == powernv_get_random_long) {
+ if (ppc_md.get_random_seed == pnv_get_random_long) {
for_each_compatible_node(dn, NULL, "ibm,power-rng")
of_platform_device_create(dn, NULL, NULL);
}
diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c
index c1bfad56447d..2b47d5a86328 100644
--- a/arch/powerpc/platforms/powernv/vas-fault.c
+++ b/arch/powerpc/platforms/powernv/vas-fault.c
@@ -77,7 +77,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data)
/*
* VAS can interrupt with multiple page faults. So process all
* valid CRBs within fault FIFO until reaches invalid CRB.
- * We use CCW[0] and pswid to validate validate CRBs:
+ * We use CCW[0] and pswid to validate CRBs:
*
* CCW[0] Reserved bit. When NX pastes CRB, CCW[0]=0
* OS sets this bit to 1 after reading CRB.
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index 610682caabc4..a44869e5ea70 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -165,7 +165,7 @@ config PS3_LPM
If you intend to use the advanced performance monitoring and
profiling support of the Cell processor with programs like
- oprofile and perfmon2, then say Y or M, otherwise say N.
+ perfmon2, then say Y or M, otherwise say N.
config PS3GELIC_UDBG
bool "PS3 udbg output via UDP broadcasts on Ethernet"
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index f4a647c1f0b2..fb6499977f99 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -141,6 +141,19 @@ config IBMEBUS
help
Bus device driver for GX bus based adapters.
+config PSERIES_PLPKS
+ depends on PPC_PSERIES
+ bool "Support for the Platform Key Storage"
+ help
+ PowerVM provides an isolated Platform Keystore(PKS) storage
+ allocation for each LPAR with individually managed access
+ controls to store sensitive information securely. It can be
+ used to store asymmetric public keys or secrets as required
+ by different usecases. Select this config to enable
+ operating system interface to hypervisor to access this space.
+
+ If unsure, select N.
+
config PAPR_SCM
depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM
tristate "Support for the PAPR Storage Class Memory interface"
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 7aaff5323544..14e143b946a3 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_PAPR_SCM) += papr_scm.o
obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_PPC_SVM) += svm.o
obj-$(CONFIG_FA_DUMP) += rtas-fadump.o
+obj-$(CONFIG_PSERIES_PLPKS) += plpks.o
obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PPC_VAS) += vas.o vas-sysfs.o
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 1b0c901a6f3b..8e40ccac0f44 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -71,7 +71,7 @@ static void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
if (pdev->is_virtfn) {
/*
* FIXME: This really should be handled by choosing the right
- * parent PE in in pseries_eeh_init_edev().
+ * parent PE in pseries_eeh_init_edev().
*/
struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe;
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 09c119b2f623..080108d129ed 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -67,6 +67,7 @@ hypertas_fw_features_table[] = {
{FW_FEATURE_PAPR_SCM, "hcall-scm"},
{FW_FEATURE_RPT_INVALIDATE, "hcall-rpt-invalidate"},
{FW_FEATURE_ENERGY_SCALE_INFO, "hcall-energy-scale-info"},
+ {FW_FEATURE_WATCHDOG, "hcall-watchdog"},
};
/* Build up the firmware features bitmask using the contents of
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index fba64304e859..561adac69022 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -700,6 +700,33 @@ struct iommu_table_ops iommu_table_lpar_multi_ops = {
.get = tce_get_pSeriesLP
};
+/*
+ * Find nearest ibm,dma-window (default DMA window) or direct DMA window or
+ * dynamic 64bit DMA window, walking up the device tree.
+ */
+static struct device_node *pci_dma_find(struct device_node *dn,
+ const __be32 **dma_window)
+{
+ const __be32 *dw = NULL;
+
+ for ( ; dn && PCI_DN(dn); dn = dn->parent) {
+ dw = of_get_property(dn, "ibm,dma-window", NULL);
+ if (dw) {
+ if (dma_window)
+ *dma_window = dw;
+ return dn;
+ }
+ dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
+ if (dw)
+ return dn;
+ dw = of_get_property(dn, DMA64_PROPNAME, NULL);
+ if (dw)
+ return dn;
+ }
+
+ return NULL;
+}
+
static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
{
struct iommu_table *tbl;
@@ -712,20 +739,10 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
dn);
- /*
- * Find nearest ibm,dma-window (default DMA window), walking up the
- * device tree
- */
- for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- if (dma_window != NULL)
- break;
- }
+ pdn = pci_dma_find(dn, &dma_window);
- if (dma_window == NULL) {
+ if (dma_window == NULL)
pr_debug(" no ibm,dma-window property !\n");
- return;
- }
ppci = PCI_DN(pdn);
@@ -735,11 +752,13 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
if (!ppci->table_group) {
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
tbl = ppci->table_group->tables[0];
- iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
- ppci->table_group, dma_window);
+ if (dma_window) {
+ iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
+ ppci->table_group, dma_window);
- if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
- panic("Failed to initialize iommu table");
+ if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
+ panic("Failed to initialize iommu table");
+ }
iommu_register_group(ppci->table_group,
pci_domain_nr(bus), 0);
pr_debug(" created table: %p\n", ppci->table_group);
@@ -1021,9 +1040,6 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
ret = rtas_call(ddw_avail[DDW_QUERY_PE_DMA_WIN], 3, out_sz, query_out,
cfg_addr, BUID_HI(buid), BUID_LO(buid));
- dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x returned %d\n",
- ddw_avail[DDW_QUERY_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
- BUID_LO(buid), ret);
switch (out_sz) {
case 5:
@@ -1041,6 +1057,11 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
break;
}
+ dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x returned %d, lb=%llx ps=%x wn=%d\n",
+ ddw_avail[DDW_QUERY_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
+ BUID_LO(buid), ret, query->largest_available_block,
+ query->page_size, query->windows_available);
+
return ret;
}
@@ -1232,7 +1253,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
bool default_win_removed = false, direct_mapping = false;
bool pmem_present;
struct pci_dn *pci = PCI_DN(pdn);
- struct iommu_table *tbl = pci->table_group->tables[0];
+ struct property *default_win = NULL;
dn = of_find_node_by_type(NULL, "ibm,pmemory");
pmem_present = dn != NULL;
@@ -1289,11 +1310,10 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* for extensions presence.
*/
if (query.windows_available == 0) {
- struct property *default_win;
int reset_win_ext;
/* DDW + IOMMU on single window may fail if there is any allocation */
- if (iommu_table_in_use(tbl)) {
+ if (iommu_table_in_use(pci->table_group->tables[0])) {
dev_warn(&dev->dev, "current IOMMU table in use, can't be replaced.\n");
goto out_failed;
}
@@ -1429,16 +1449,18 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
pci->table_group->tables[1] = newtbl;
- /* Keep default DMA window struct if removed */
- if (default_win_removed) {
- tbl->it_size = 0;
- vfree(tbl->it_map);
- tbl->it_map = NULL;
- }
-
set_iommu_table_base(&dev->dev, newtbl);
}
+ if (default_win_removed) {
+ iommu_tce_table_put(pci->table_group->tables[0]);
+ pci->table_group->tables[0] = NULL;
+
+ /* default_win is valid here because default_win_removed == true */
+ of_remove_property(pdn, default_win);
+ dev_info(&dev->dev, "Removed default DMA window for %pOF\n", pdn);
+ }
+
spin_lock(&dma_win_list_lock);
list_add(&window->list, &dma_win_list);
spin_unlock(&dma_win_list_lock);
@@ -1503,13 +1525,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
pr_debug(" node is %pOF\n", dn);
- for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
- pdn = pdn->parent) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- if (dma_window)
- break;
- }
-
+ pdn = pci_dma_find(dn, &dma_window);
if (!pdn || !PCI_DN(pdn)) {
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
"no DMA window found for pci dev=%s dn=%pOF\n",
@@ -1540,7 +1556,6 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
{
struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
- const __be32 *dma_window = NULL;
/* only attempt to use a new window if 64-bit DMA is requested */
if (dma_mask < DMA_BIT_MASK(64))
@@ -1554,13 +1569,7 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
* search upwards in the tree until we either hit a dma-window
* property, OR find a parent with a table already allocated.
*/
- for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
- pdn = pdn->parent) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- if (dma_window)
- break;
- }
-
+ pdn = pci_dma_find(dn, NULL);
if (pdn && PCI_DN(pdn))
return enable_ddw(pdev, pdn);
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index ab6cdbebb35e..096d09ed89f6 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/page.h>
#include <asm/firmware.h>
#include <asm/kexec.h>
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 937f9c010b22..e6c117fb6491 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -27,7 +27,7 @@
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlb.h>
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 78f3f74c7056..3d36a8955eaf 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -48,6 +48,39 @@ struct update_props_workarea {
#define MIGRATION_SCOPE (1)
#define PRRN_SCOPE -2
+#ifdef CONFIG_PPC_WATCHDOG
+static unsigned int nmi_wd_lpm_factor = 200;
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = {
+ {
+ .procname = "nmi_wd_lpm_factor",
+ .data = &nmi_wd_lpm_factor,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ },
+ {}
+};
+static struct ctl_table nmi_wd_lpm_factor_sysctl_root[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = nmi_wd_lpm_factor_ctl_table,
+ },
+ {}
+};
+
+static int __init register_nmi_wd_lpm_factor_sysctl(void)
+{
+ register_sysctl_table(nmi_wd_lpm_factor_sysctl_root);
+
+ return 0;
+}
+device_initcall(register_nmi_wd_lpm_factor_sysctl);
+#endif /* CONFIG_SYSCTL */
+#endif /* CONFIG_PPC_WATCHDOG */
+
static int mobility_rtas_call(int token, char *buf, s32 scope)
{
int rc;
@@ -427,6 +460,43 @@ static int wait_for_vasi_session_suspending(u64 handle)
return ret;
}
+static void wait_for_vasi_session_completed(u64 handle)
+{
+ unsigned long state = 0;
+ int ret;
+
+ pr_info("waiting for memory transfer to complete...\n");
+
+ /*
+ * Wait for transition from H_VASI_RESUMED to H_VASI_COMPLETED.
+ */
+ while (true) {
+ ret = poll_vasi_state(handle, &state);
+
+ /*
+ * If the memory transfer is already complete and the migration
+ * has been cleaned up by the hypervisor, H_PARAMETER is return,
+ * which is translate in EINVAL by poll_vasi_state().
+ */
+ if (ret == -EINVAL || (!ret && state == H_VASI_COMPLETED)) {
+ pr_info("memory transfer completed.\n");
+ break;
+ }
+
+ if (ret) {
+ pr_err("H_VASI_STATE return error (%d)\n", ret);
+ break;
+ }
+
+ if (state != H_VASI_RESUMED) {
+ pr_err("unexpected H_VASI_STATE result %lu\n", state);
+ break;
+ }
+
+ msleep(500);
+ }
+}
+
static void prod_single(unsigned int target_cpu)
{
long hvrc;
@@ -665,19 +735,36 @@ static int pseries_suspend(u64 handle)
static int pseries_migrate_partition(u64 handle)
{
int ret;
+ unsigned int factor = 0;
+#ifdef CONFIG_PPC_WATCHDOG
+ factor = nmi_wd_lpm_factor;
+#endif
ret = wait_for_vasi_session_suspending(handle);
if (ret)
return ret;
vas_migration_handler(VAS_SUSPEND);
+ if (factor)
+ watchdog_nmi_set_timeout_pct(factor);
+
ret = pseries_suspend(handle);
- if (ret == 0)
+ if (ret == 0) {
post_mobility_fixup();
- else
+ /*
+ * Wait until the memory transfer is complete, so that the user
+ * space process returns from the syscall after the transfer is
+ * complete. This allows the user hooks to be executed at the
+ * right time.
+ */
+ wait_for_vasi_session_completed(handle);
+ } else
pseries_cancel_migration(handle, ret);
+ if (factor)
+ watchdog_nmi_set_timeout_pct(0);
+
vas_migration_handler(VAS_RESUME);
return ret;
diff --git a/arch/powerpc/platforms/pseries/papr_platform_attributes.c b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
index 515150417bb3..526c621b098b 100644
--- a/arch/powerpc/platforms/pseries/papr_platform_attributes.c
+++ b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
@@ -22,6 +22,7 @@
#include <asm/hvcall.h>
#include <asm/machdep.h>
+#include <asm/firmware.h>
#include "pseries.h"
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 82cae08976bc..20f6ed813bff 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -29,7 +29,7 @@
(1ul << ND_CMD_SET_CONFIG_DATA) | \
(1ul << ND_CMD_CALL))
-/* DIMM health bitmap bitmap indicators */
+/* DIMM health bitmap indicators */
/* SCM device is unable to persist memory contents */
#define PAPR_PMEM_UNARMED (1ULL << (63 - 0))
/* SCM device failed to persist memory contents */
@@ -354,7 +354,7 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev,
{
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
- struct papr_scm_priv *p = (struct papr_scm_priv *)dev->driver_data;
+ struct papr_scm_priv *p = dev_get_drvdata(dev);
int rc, size;
/* Allocate request buffer enough to hold single performance stat */
diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
new file mode 100644
index 000000000000..52aaa2894606
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/plpks.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * POWER LPAR Platform KeyStore(PLPKS)
+ * Copyright (C) 2022 IBM Corporation
+ * Author: Nayna Jain <nayna@linux.ibm.com>
+ *
+ * Provides access to variables stored in Power LPAR Platform KeyStore(PLPKS).
+ */
+
+#define pr_fmt(fmt) "plpks: " fmt
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/hvcall.h>
+
+#include "plpks.h"
+
+#define PKS_FW_OWNER 0x1
+#define PKS_BOOTLOADER_OWNER 0x2
+#define PKS_OS_OWNER 0x3
+
+#define LABEL_VERSION 0
+#define MAX_LABEL_ATTR_SIZE 16
+#define MAX_NAME_SIZE 239
+#define MAX_DATA_SIZE 4000
+
+#define PKS_FLUSH_MAX_TIMEOUT 5000 //msec
+#define PKS_FLUSH_SLEEP 10 //msec
+#define PKS_FLUSH_SLEEP_RANGE 400
+
+static u8 *ospassword;
+static u16 ospasswordlength;
+
+// Retrieved with H_PKS_GET_CONFIG
+static u16 maxpwsize;
+static u16 maxobjsize;
+
+struct plpks_auth {
+ u8 version;
+ u8 consumer;
+ __be64 rsvd0;
+ __be32 rsvd1;
+ __be16 passwordlength;
+ u8 password[];
+} __packed __aligned(16);
+
+struct label_attr {
+ u8 prefix[8];
+ u8 version;
+ u8 os;
+ u8 length;
+ u8 reserved[5];
+};
+
+struct label {
+ struct label_attr attr;
+ u8 name[MAX_NAME_SIZE];
+ size_t size;
+};
+
+static int pseries_status_to_err(int rc)
+{
+ int err;
+
+ switch (rc) {
+ case H_SUCCESS:
+ err = 0;
+ break;
+ case H_FUNCTION:
+ err = -ENXIO;
+ break;
+ case H_P1:
+ case H_P2:
+ case H_P3:
+ case H_P4:
+ case H_P5:
+ case H_P6:
+ err = -EINVAL;
+ break;
+ case H_NOT_FOUND:
+ err = -ENOENT;
+ break;
+ case H_BUSY:
+ err = -EBUSY;
+ break;
+ case H_AUTHORITY:
+ err = -EPERM;
+ break;
+ case H_NO_MEM:
+ err = -ENOMEM;
+ break;
+ case H_RESOURCE:
+ err = -EEXIST;
+ break;
+ case H_TOO_BIG:
+ err = -EFBIG;
+ break;
+ case H_STATE:
+ err = -EIO;
+ break;
+ case H_R_STATE:
+ err = -EIO;
+ break;
+ case H_IN_USE:
+ err = -EEXIST;
+ break;
+ case H_ABORTED:
+ err = -EINTR;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int plpks_gen_password(void)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ u8 *password, consumer = PKS_OS_OWNER;
+ int rc;
+
+ password = kzalloc(maxpwsize, GFP_KERNEL);
+ if (!password)
+ return -ENOMEM;
+
+ rc = plpar_hcall(H_PKS_GEN_PASSWORD, retbuf, consumer, 0,
+ virt_to_phys(password), maxpwsize);
+
+ if (!rc) {
+ ospasswordlength = maxpwsize;
+ ospassword = kzalloc(maxpwsize, GFP_KERNEL);
+ if (!ospassword) {
+ kfree(password);
+ return -ENOMEM;
+ }
+ memcpy(ospassword, password, ospasswordlength);
+ } else {
+ if (rc == H_IN_USE) {
+ pr_warn("Password is already set for POWER LPAR Platform KeyStore\n");
+ rc = 0;
+ } else {
+ goto out;
+ }
+ }
+out:
+ kfree(password);
+
+ return pseries_status_to_err(rc);
+}
+
+static struct plpks_auth *construct_auth(u8 consumer)
+{
+ struct plpks_auth *auth;
+
+ if (consumer > PKS_OS_OWNER)
+ return ERR_PTR(-EINVAL);
+
+ auth = kmalloc(struct_size(auth, password, maxpwsize), GFP_KERNEL);
+ if (!auth)
+ return ERR_PTR(-ENOMEM);
+
+ auth->version = 1;
+ auth->consumer = consumer;
+ auth->rsvd0 = 0;
+ auth->rsvd1 = 0;
+
+ if (consumer == PKS_FW_OWNER || consumer == PKS_BOOTLOADER_OWNER) {
+ auth->passwordlength = 0;
+ return auth;
+ }
+
+ memcpy(auth->password, ospassword, ospasswordlength);
+
+ auth->passwordlength = cpu_to_be16(ospasswordlength);
+
+ return auth;
+}
+
+/**
+ * Label is combination of label attributes + name.
+ * Label attributes are used internally by kernel and not exposed to the user.
+ */
+static struct label *construct_label(char *component, u8 varos, u8 *name,
+ u16 namelen)
+{
+ struct label *label;
+ size_t slen;
+
+ if (!name || namelen > MAX_NAME_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ slen = strlen(component);
+ if (component && slen > sizeof(label->attr.prefix))
+ return ERR_PTR(-EINVAL);
+
+ label = kzalloc(sizeof(*label), GFP_KERNEL);
+ if (!label)
+ return ERR_PTR(-ENOMEM);
+
+ if (component)
+ memcpy(&label->attr.prefix, component, slen);
+
+ label->attr.version = LABEL_VERSION;
+ label->attr.os = varos;
+ label->attr.length = MAX_LABEL_ATTR_SIZE;
+ memcpy(&label->name, name, namelen);
+
+ label->size = sizeof(struct label_attr) + namelen;
+
+ return label;
+}
+
+static int _plpks_get_config(void)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct {
+ u8 version;
+ u8 flags;
+ __be32 rsvd0;
+ __be16 maxpwsize;
+ __be16 maxobjlabelsize;
+ __be16 maxobjsize;
+ __be32 totalsize;
+ __be32 usedspace;
+ __be32 supportedpolicies;
+ __be64 rsvd1;
+ } __packed config;
+ size_t size;
+ int rc;
+
+ size = sizeof(config);
+
+ rc = plpar_hcall(H_PKS_GET_CONFIG, retbuf, virt_to_phys(&config), size);
+
+ if (rc != H_SUCCESS)
+ return pseries_status_to_err(rc);
+
+ maxpwsize = be16_to_cpu(config.maxpwsize);
+ maxobjsize = be16_to_cpu(config.maxobjsize);
+
+ return 0;
+}
+
+static int plpks_confirm_object_flushed(struct label *label,
+ struct plpks_auth *auth)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ u64 timeout = 0;
+ u8 status;
+ int rc;
+
+ do {
+ rc = plpar_hcall(H_PKS_CONFIRM_OBJECT_FLUSHED, retbuf,
+ virt_to_phys(auth), virt_to_phys(label),
+ label->size);
+
+ status = retbuf[0];
+ if (rc) {
+ if (rc == H_NOT_FOUND && status == 1)
+ rc = 0;
+ break;
+ }
+
+ if (!rc && status == 1)
+ break;
+
+ usleep_range(PKS_FLUSH_SLEEP,
+ PKS_FLUSH_SLEEP + PKS_FLUSH_SLEEP_RANGE);
+ timeout = timeout + PKS_FLUSH_SLEEP;
+ } while (timeout < PKS_FLUSH_MAX_TIMEOUT);
+
+ rc = pseries_status_to_err(rc);
+
+ return rc;
+}
+
+int plpks_write_var(struct plpks_var var)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct plpks_auth *auth;
+ struct label *label;
+ int rc;
+
+ if (!var.component || !var.data || var.datalen <= 0 ||
+ var.namelen > MAX_NAME_SIZE || var.datalen > MAX_DATA_SIZE)
+ return -EINVAL;
+
+ if (var.policy & SIGNEDUPDATE)
+ return -EINVAL;
+
+ auth = construct_auth(PKS_OS_OWNER);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ label = construct_label(var.component, var.os, var.name, var.namelen);
+ if (IS_ERR(label)) {
+ rc = PTR_ERR(label);
+ goto out;
+ }
+
+ rc = plpar_hcall(H_PKS_WRITE_OBJECT, retbuf, virt_to_phys(auth),
+ virt_to_phys(label), label->size, var.policy,
+ virt_to_phys(var.data), var.datalen);
+
+ if (!rc)
+ rc = plpks_confirm_object_flushed(label, auth);
+
+ if (rc)
+ pr_err("Failed to write variable %s for component %s with error %d\n",
+ var.name, var.component, rc);
+
+ rc = pseries_status_to_err(rc);
+ kfree(label);
+out:
+ kfree(auth);
+
+ return rc;
+}
+
+int plpks_remove_var(char *component, u8 varos, struct plpks_var_name vname)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct plpks_auth *auth;
+ struct label *label;
+ int rc;
+
+ if (!component || vname.namelen > MAX_NAME_SIZE)
+ return -EINVAL;
+
+ auth = construct_auth(PKS_OS_OWNER);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ label = construct_label(component, varos, vname.name, vname.namelen);
+ if (IS_ERR(label)) {
+ rc = PTR_ERR(label);
+ goto out;
+ }
+
+ rc = plpar_hcall(H_PKS_REMOVE_OBJECT, retbuf, virt_to_phys(auth),
+ virt_to_phys(label), label->size);
+
+ if (!rc)
+ rc = plpks_confirm_object_flushed(label, auth);
+
+ if (rc)
+ pr_err("Failed to remove variable %s for component %s with error %d\n",
+ vname.name, component, rc);
+
+ rc = pseries_status_to_err(rc);
+ kfree(label);
+out:
+ kfree(auth);
+
+ return rc;
+}
+
+static int plpks_read_var(u8 consumer, struct plpks_var *var)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct plpks_auth *auth;
+ struct label *label;
+ u8 *output;
+ int rc;
+
+ if (var->namelen > MAX_NAME_SIZE)
+ return -EINVAL;
+
+ auth = construct_auth(PKS_OS_OWNER);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ label = construct_label(var->component, var->os, var->name,
+ var->namelen);
+ if (IS_ERR(label)) {
+ rc = PTR_ERR(label);
+ goto out_free_auth;
+ }
+
+ output = kzalloc(maxobjsize, GFP_KERNEL);
+ if (!output) {
+ rc = -ENOMEM;
+ goto out_free_label;
+ }
+
+ rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
+ virt_to_phys(label), label->size, virt_to_phys(output),
+ maxobjsize);
+
+ if (rc != H_SUCCESS) {
+ pr_err("Failed to read variable %s for component %s with error %d\n",
+ var->name, var->component, rc);
+ rc = pseries_status_to_err(rc);
+ goto out_free_output;
+ }
+
+ if (var->datalen == 0 || var->datalen > retbuf[0])
+ var->datalen = retbuf[0];
+
+ var->data = kzalloc(var->datalen, GFP_KERNEL);
+ if (!var->data) {
+ rc = -ENOMEM;
+ goto out_free_output;
+ }
+ var->policy = retbuf[1];
+
+ memcpy(var->data, output, var->datalen);
+ rc = 0;
+
+out_free_output:
+ kfree(output);
+out_free_label:
+ kfree(label);
+out_free_auth:
+ kfree(auth);
+
+ return rc;
+}
+
+int plpks_read_os_var(struct plpks_var *var)
+{
+ return plpks_read_var(PKS_OS_OWNER, var);
+}
+
+int plpks_read_fw_var(struct plpks_var *var)
+{
+ return plpks_read_var(PKS_FW_OWNER, var);
+}
+
+int plpks_read_bootloader_var(struct plpks_var *var)
+{
+ return plpks_read_var(PKS_BOOTLOADER_OWNER, var);
+}
+
+static __init int pseries_plpks_init(void)
+{
+ int rc;
+
+ rc = _plpks_get_config();
+
+ if (rc) {
+ pr_err("POWER LPAR Platform KeyStore is not supported or enabled\n");
+ return rc;
+ }
+
+ rc = plpks_gen_password();
+ if (rc)
+ pr_err("Failed setting POWER LPAR Platform KeyStore Password\n");
+ else
+ pr_info("POWER LPAR Platform KeyStore initialized successfully\n");
+
+ return rc;
+}
+arch_initcall(pseries_plpks_init);
diff --git a/arch/powerpc/platforms/pseries/plpks.h b/arch/powerpc/platforms/pseries/plpks.h
new file mode 100644
index 000000000000..c6a291367bb1
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/plpks.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 IBM Corporation
+ * Author: Nayna Jain <nayna@linux.ibm.com>
+ *
+ * Platform keystore for pseries LPAR(PLPKS).
+ */
+
+#ifndef _PSERIES_PLPKS_H
+#define _PSERIES_PLPKS_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#define OSSECBOOTAUDIT 0x40000000
+#define OSSECBOOTENFORCE 0x20000000
+#define WORLDREADABLE 0x08000000
+#define SIGNEDUPDATE 0x01000000
+
+#define PLPKS_VAR_LINUX 0x01
+#define PLPKS_VAR_COMMON 0x04
+
+struct plpks_var {
+ char *component;
+ u8 *name;
+ u8 *data;
+ u32 policy;
+ u16 namelen;
+ u16 datalen;
+ u8 os;
+};
+
+struct plpks_var_name {
+ u8 *name;
+ u16 namelen;
+};
+
+struct plpks_var_name_list {
+ u32 varcount;
+ struct plpks_var_name varlist[];
+};
+
+/**
+ * Writes the specified var and its data to PKS.
+ * Any caller of PKS driver should present a valid component type for
+ * their variable.
+ */
+int plpks_write_var(struct plpks_var var);
+
+/**
+ * Removes the specified var and its data from PKS.
+ */
+int plpks_remove_var(char *component, u8 varos,
+ struct plpks_var_name vname);
+
+/**
+ * Returns the data for the specified os variable.
+ */
+int plpks_read_os_var(struct plpks_var *var);
+
+/**
+ * Returns the data for the specified firmware variable.
+ */
+int plpks_read_fw_var(struct plpks_var *var);
+
+/**
+ * Returns the data for the specified bootloader variable.
+ */
+int plpks_read_bootloader_var(struct plpks_var *var);
+
+#endif
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ee4f1db49515..489f4c4df468 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -14,6 +14,7 @@
#include <linux/cpu.h>
#include <linux/errno.h>
+#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -72,6 +73,7 @@
#include <asm/svm.h>
#include <asm/dtl.h>
#include <asm/hvconsole.h>
+#include <asm/setup.h>
#include "pseries.h"
@@ -169,6 +171,18 @@ static void __init fwnmi_init(void)
#endif
}
+/*
+ * Affix a device for the first timer to the platform bus if
+ * we have firmware support for the H_WATCHDOG hypercall.
+ */
+static __init int pseries_wdt_init(void)
+{
+ if (firmware_has_feature(FW_FEATURE_WATCHDOG))
+ platform_device_register_simple("pseries-wdt", 0, NULL, 0);
+ return 0;
+}
+machine_subsys_initcall(pseries, pseries_wdt_init);
+
static void pseries_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -802,9 +816,8 @@ static void __init pSeries_setup_arch(void)
fwnmi_init();
pseries_setup_security_mitigations();
-#ifdef CONFIG_PPC_64S_HASH_MMU
- pseries_lpar_read_hblkrm_characteristics();
-#endif
+ if (!radix_enabled())
+ pseries_lpar_read_hblkrm_characteristics();
/* By default, only probe PCI (can be overridden by rtas_pci) */
pci_add_flags(PCI_PROBE_ONLY);
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
index 500a1fc4a1d7..7e6e6dd2e33e 100644
--- a/arch/powerpc/platforms/pseries/vas.c
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -16,6 +16,7 @@
#include <asm/machdep.h>
#include <asm/hvcall.h>
#include <asm/plpar_wrappers.h>
+#include <asm/firmware.h>
#include <asm/vas.h>
#include "vas.h"
@@ -803,7 +804,7 @@ int vas_reconfig_capabilties(u8 type, int new_nr_creds)
* The total number of available credits may be decreased or
* increased with DLPAR operation. Means some windows have to be
* closed / reopened. Hold the vas_pseries_mutex so that the
- * the user space can not open new windows.
+ * user space can not open new windows.
*/
if (old_nr_creds < new_nr_creds) {
/*
diff --git a/arch/powerpc/purgatory/.gitignore b/arch/powerpc/purgatory/.gitignore
index b8dc6ff34254..5e40575c1f2b 100644
--- a/arch/powerpc/purgatory/.gitignore
+++ b/arch/powerpc/purgatory/.gitignore
@@ -1,3 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-kexec-purgatory.c
purgatory.ro
diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile
index 348f59581052..a81d155b89ae 100644
--- a/arch/powerpc/purgatory/Makefile
+++ b/arch/powerpc/purgatory/Makefile
@@ -2,17 +2,13 @@
KASAN_SANITIZE := n
-targets += trampoline_$(BITS).o purgatory.ro kexec-purgatory.c
+targets += trampoline_$(BITS).o purgatory.ro
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
$(obj)/purgatory.ro: $(obj)/trampoline_$(BITS).o FORCE
$(call if_changed,ld)
-quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
-
-$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
- $(call if_changed,bin2c)
+$(obj)/kexec-purgatory.o: $(obj)/purgatory.ro
obj-y += kexec-purgatory.o
diff --git a/arch/powerpc/purgatory/kexec-purgatory.S b/arch/powerpc/purgatory/kexec-purgatory.S
new file mode 100644
index 000000000000..f494fd5a0526
--- /dev/null
+++ b/arch/powerpc/purgatory/kexec-purgatory.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+ .section .rodata, "a"
+
+ .align 8
+kexec_purgatory:
+ .globl kexec_purgatory
+ .incbin "arch/powerpc/purgatory/purgatory.ro"
+.Lkexec_purgatory_end:
+
+ .align 8
+kexec_purgatory_size:
+ .globl kexec_purgatory_size
+ .quad .Lkexec_purgatory_end - kexec_purgatory
diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
index 3f130312b6e9..915f4d3991c3 100644
--- a/arch/powerpc/sysdev/cpm2.c
+++ b/arch/powerpc/sysdev/cpm2.c
@@ -107,7 +107,7 @@ EXPORT_SYMBOL(cpm_command);
* memory mapped space.
* The baud rate clock is the system clock divided by something.
* It was set up long ago during the initial boot phase and is
- * is given to us.
+ * given to us.
* Baud rate clocks are zero-based in the driver code (as that maps
* to port numbers). Documentation uses 1-based numbering.
*/
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 1011cfea2e32..af6c8ca824d3 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -38,6 +38,7 @@
#include <asm/disassemble.h>
#include <asm/ppc-opcode.h>
#include <asm/swiotlb.h>
+#include <asm/setup.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
@@ -521,6 +522,7 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
struct resource rsrc;
const int *bus_range;
u8 hdr_type, progif;
+ u32 class_code;
struct device_node *dev;
struct ccsr_pci __iomem *pci;
u16 temp;
@@ -594,6 +596,13 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
if (fsl_pcie_check_link(hose))
hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
+ /* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */
+ if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) {
+ early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code);
+ class_code &= 0xff;
+ class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code);
+ }
} else {
/*
* Set PBFR(PCI Bus Function Register)[10] = 1 to
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index cdbde2e0c96e..093a875d7d1e 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -18,6 +18,7 @@ struct platform_device;
#define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */
#define PCIE_LTSSM_L0 0x16 /* L0 state */
+#define PCIE_FSL_CSR_CLASSCODE 0x474 /* FSL GPEX CSR */
#define PCIE_IP_REV_2_2 0x02080202 /* PCIE IP block version Rev2.2 */
#define PCIE_IP_REV_3_0 0x02080300 /* PCIE IP block version Rev3.0 */
#define PIWAR_EN 0x80000000 /* Enable */
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 1bfc9afa8a1a..4647c6074f3b 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -69,10 +69,10 @@
static DEFINE_SPINLOCK(fsl_rio_config_lock);
-#define __fsl_read_rio_config(x, addr, err, op) \
+#define ___fsl_read_rio_config(x, addr, err, op, barrier) \
__asm__ __volatile__( \
"1: "op" %1,0(%2)\n" \
- " eieio\n" \
+ " "barrier"\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %1,-1\n" \
@@ -83,6 +83,14 @@ static DEFINE_SPINLOCK(fsl_rio_config_lock);
: "=r" (err), "=r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err))
+#ifdef CONFIG_BOOKE
+#define __fsl_read_rio_config(x, addr, err, op) \
+ ___fsl_read_rio_config(x, addr, err, op, "mbar")
+#else
+#define __fsl_read_rio_config(x, addr, err, op) \
+ ___fsl_read_rio_config(x, addr, err, op, "eieio")
+#endif
+
void __iomem *rio_regs_win;
void __iomem *rmu_regs_win;
resource_size_t rio_law_start;
diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c
index 834a6d7fbd88..c5bf7e1b3780 100644
--- a/arch/powerpc/sysdev/mpc5xxx_clocks.c
+++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c
@@ -1,31 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
-/**
- * mpc5xxx_get_bus_frequency - Find the bus frequency for a device
- * @node: device node
- *
- * Returns bus frequency (IPS on MPC512x, IPB on MPC52xx),
- * or 0 if the bus frequency cannot be found.
- */
#include <linux/kernel.h>
-#include <linux/of_platform.h>
#include <linux/export.h>
+#include <linux/property.h>
+
#include <asm/mpc5xxx.h>
-unsigned long mpc5xxx_get_bus_frequency(struct device_node *node)
+/**
+ * mpc5xxx_fwnode_get_bus_frequency - Find the bus frequency for a firmware node
+ * @fwnode: firmware node
+ *
+ * Returns bus frequency (IPS on MPC512x, IPB on MPC52xx),
+ * or 0 if the bus frequency cannot be found.
+ */
+unsigned long mpc5xxx_fwnode_get_bus_frequency(struct fwnode_handle *fwnode)
{
- const unsigned int *p_bus_freq = NULL;
+ struct fwnode_handle *parent;
+ u32 bus_freq;
+ int ret;
- of_node_get(node);
- while (node) {
- p_bus_freq = of_get_property(node, "bus-frequency", NULL);
- if (p_bus_freq)
- break;
+ ret = fwnode_property_read_u32(fwnode, "bus-frequency", &bus_freq);
+ if (!ret)
+ return bus_freq;
- node = of_get_next_parent(node);
+ fwnode_for_each_parent_node(fwnode, parent) {
+ ret = fwnode_property_read_u32(parent, "bus-frequency", &bus_freq);
+ if (!ret)
+ return bus_freq;
}
- of_node_put(node);
- return p_bus_freq ? *p_bus_freq : 0;
+ return 0;
}
-EXPORT_SYMBOL(mpc5xxx_get_bus_frequency);
+EXPORT_SYMBOL(mpc5xxx_fwnode_get_bus_frequency);
diff --git a/arch/powerpc/sysdev/of_rtc.c b/arch/powerpc/sysdev/of_rtc.c
index 1f408d34a6a7..420f949b7485 100644
--- a/arch/powerpc/sysdev/of_rtc.c
+++ b/arch/powerpc/sysdev/of_rtc.c
@@ -11,6 +11,8 @@
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <asm/prom.h>
+
static __initdata struct {
const char *compatible;
char *plat_name;
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index d02911e78cfc..e2c8f93b535b 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -718,6 +718,7 @@ static bool __init xive_get_max_prio(u8 *max_prio)
}
reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
+ of_node_put(rootdn);
if (!reg) {
pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
return false;
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 3d9782ea3fa7..26ef3388c24c 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -116,7 +116,7 @@ struct bpt {
static struct bpt bpts[NBPTS];
static struct bpt dabr[HBP_NUM_MAX];
static struct bpt *iabr;
-static unsigned bpinstr = 0x7fe00008; /* trap */
+static unsigned int bpinstr = PPC_RAW_TRAP();
#define BP_NUM(bp) ((bp) - bpts + 1)
@@ -3047,7 +3047,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
dotted = 0;
last_inst = inst;
if (praddr)
- printf(REG" %s", adr, ppc_inst_as_str(inst));
+ printf(REG" %08lx", adr, ppc_inst_as_ulong(inst));
printf("\t");
if (!ppc_inst_prefixed(inst))
dump_func(ppc_inst_val(inst), adr);
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 51713e03c934..ed66c31e4655 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -113,6 +113,7 @@ config RISCV
select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES
select OF
+ select OF_DMA_DEFAULT_COHERENT
select OF_EARLY_FLATTREE
select OF_IRQ
select PCI_DOMAINS_GENERIC if PCI
@@ -218,11 +219,34 @@ config PGTABLE_LEVELS
config LOCKDEP_SUPPORT
def_bool y
+config RISCV_DMA_NONCOHERENT
+ bool
+ select ARCH_HAS_DMA_PREP_COHERENT
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
+ select ARCH_HAS_SETUP_DMA_OPS
+ select DMA_DIRECT_REMAP
+
source "arch/riscv/Kconfig.socs"
source "arch/riscv/Kconfig.erratas"
menu "Platform type"
+config NONPORTABLE
+ bool "Allow configurations that result in non-portable kernels"
+ help
+ RISC-V kernel binaries are compatible between all known systems
+ whenever possible, but there are some use cases that can only be
+ satisfied by configurations that result in kernel binaries that are
+ not portable between systems.
+
+ Selecting N does not guarantee kernels will be portable to all known
+ systems. Selecting any of the options guarded by NONPORTABLE will
+ result in kernel binaries that are unlikely to be portable between
+ systems.
+
+ If unsure, say N.
+
choice
prompt "Base ISA"
default ARCH_RV64I
@@ -232,6 +256,7 @@ choice
config ARCH_RV32I
bool "RV32I"
+ depends on NONPORTABLE
select 32BIT
select GENERIC_LIB_ASHLDI3
select GENERIC_LIB_ASHRDI3
@@ -352,11 +377,11 @@ config RISCV_ISA_C
bool "Emit compressed instructions when building Linux"
default y
help
- Adds "C" to the ISA subsets that the toolchain is allowed to emit
- when building Linux, which results in compressed instructions in the
- Linux binary.
+ Adds "C" to the ISA subsets that the toolchain is allowed to emit
+ when building Linux, which results in compressed instructions in the
+ Linux binary.
- If you don't know what to do here, say Y.
+ If you don't know what to do here, say Y.
config RISCV_ISA_SVPBMT
bool "SVPBMT extension support"
@@ -376,6 +401,28 @@ config RISCV_ISA_SVPBMT
If you don't know what to do here, say Y.
+config CC_HAS_ZICBOM
+ bool
+ default y if 64BIT && $(cc-option,-mabi=lp64 -march=rv64ima_zicbom)
+ default y if 32BIT && $(cc-option,-mabi=ilp32 -march=rv32ima_zicbom)
+
+config RISCV_ISA_ZICBOM
+ bool "Zicbom extension support for non-coherent DMA operation"
+ depends on CC_HAS_ZICBOM
+ depends on !XIP_KERNEL && MMU
+ select RISCV_DMA_NONCOHERENT
+ select RISCV_ALTERNATIVE
+ default y
+ help
+ Adds support to dynamically detect the presence of the ZICBOM
+ extension (Cache Block Management Operations) and enable its
+ usage.
+
+ The Zicbom extension can be used to handle for example
+ non-coherent DMA support on devices that need it.
+
+ If you don't know what to do here, say Y.
+
config FPU
bool "FPU support"
default y
@@ -385,7 +432,7 @@ config FPU
If you don't know what to do here, say Y.
-endmenu
+endmenu # "Platform type"
menu "Kernel features"
@@ -447,7 +494,6 @@ config KEXEC_FILE
config ARCH_HAS_KEXEC_PURGATORY
def_bool KEXEC_FILE
- select BUILD_BIN2C
depends on CRYPTO=y
depends on CRYPTO_SHA256=y
@@ -474,7 +520,7 @@ config COMPAT
If you want to execute 32-bit userspace applications, say Y.
-endmenu
+endmenu # "Kernel features"
menu "Boot options"
@@ -510,7 +556,6 @@ config CMDLINE_EXTEND
cases where the provided arguments are insufficient and
you don't want to or cannot modify them.
-
config CMDLINE_FORCE
bool "Always use the default kernel command string"
help
@@ -553,6 +598,7 @@ config STACKPROTECTOR_PER_TASK
config PHYS_RAM_BASE_FIXED
bool "Explicitly specified physical RAM address"
+ depends on NONPORTABLE
default n
config PHYS_RAM_BASE
@@ -566,7 +612,7 @@ config PHYS_RAM_BASE
config XIP_KERNEL
bool "Kernel Execute-In-Place from ROM"
- depends on MMU && SPARSEMEM
+ depends on MMU && SPARSEMEM && NONPORTABLE
# This prevents XIP from being enabled by all{yes,mod}config, which
# fail to build since XIP doesn't support large kernels.
depends on !COMPILE_TEST
@@ -602,23 +648,30 @@ config XIP_PHYS_ADDR
be linked for and stored to. This address is dependent on your
own flash usage.
-endmenu
+endmenu # "Boot options"
config BUILTIN_DTB
bool
- depends on OF
+ depends on OF && NONPORTABLE
default y if XIP_KERNEL
+config PORTABLE
+ bool
+ default !NONPORTABLE
+ select EFI
+ select OF
+ select MMU
+
menu "Power management options"
source "kernel/power/Kconfig"
-endmenu
+endmenu # "Power management options"
menu "CPU Power Management"
source "drivers/cpuidle/Kconfig"
-endmenu
+endmenu # "CPU Power Management"
source "arch/riscv/kvm/Kconfig"
diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas
index 457ac72c9b36..6850e9389930 100644
--- a/arch/riscv/Kconfig.erratas
+++ b/arch/riscv/Kconfig.erratas
@@ -55,4 +55,15 @@ config ERRATA_THEAD_PBMT
If you don't know what to do here, say "Y".
-endmenu
+config ERRATA_THEAD_CMO
+ bool "Apply T-Head cache management errata"
+ depends on ERRATA_THEAD
+ select RISCV_DMA_NONCOHERENT
+ default y
+ help
+ This will apply the cache management errata to handle the
+ non-standard handling on non-coherent operations on T-Head SoCs.
+
+ If you don't know what to do here, say "Y".
+
+endmenu # "CPU errata selection"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 85670dc9fe95..69774bb362d6 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -78,6 +78,6 @@ config SOC_CANAAN_K210_DTB_SOURCE
for the DTS file that will be used to produce the DTB linked into the
kernel.
-endif
+endif # SOC_CANAAN
-endmenu
+endmenu # "SoC selection"
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 81029d40a672..3fa8ef336822 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -56,6 +56,14 @@ riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
+# Check if the toolchain supports Zicbom extension
+toolchain-supports-zicbom := $(call cc-option-yn, -march=$(riscv-march-y)_zicbom)
+riscv-march-$(toolchain-supports-zicbom) := $(riscv-march-y)_zicbom
+
+# Check if the toolchain supports Zihintpause extension
+toolchain-supports-zihintpause := $(call cc-option-yn, -march=$(riscv-march-y)_zihintpause)
+riscv-march-$(toolchain-supports-zihintpause) := $(riscv-march-y)_zihintpause
+
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
KBUILD_AFLAGS += -march=$(riscv-march-y)
diff --git a/arch/riscv/boot/dts/canaan/Makefile b/arch/riscv/boot/dts/canaan/Makefile
index c61b08ac8554..befe4eb7527b 100644
--- a/arch/riscv/boot/dts/canaan/Makefile
+++ b/arch/riscv/boot/dts/canaan/Makefile
@@ -1,3 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += $(addsuffix .dtb, $(CONFIG_SOC_CANAAN_K210_DTB_SOURCE))
-obj-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y))
+dtb-$(CONFIG_SOC_CANAAN) += canaan_kd233.dtb
+dtb-$(CONFIG_SOC_CANAAN) += k210_generic.dtb
+dtb-$(CONFIG_SOC_CANAAN) += sipeed_maix_bit.dtb
+dtb-$(CONFIG_SOC_CANAAN) += sipeed_maix_dock.dtb
+dtb-$(CONFIG_SOC_CANAAN) += sipeed_maix_go.dtb
+dtb-$(CONFIG_SOC_CANAAN) += sipeed_maixduino.dtb
+
+obj-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += $(addsuffix .dtb.o, $(CONFIG_SOC_CANAAN_K210_DTB_SOURCE))
diff --git a/arch/riscv/boot/dts/canaan/canaan_kd233.dts b/arch/riscv/boot/dts/canaan/canaan_kd233.dts
index f72540bd14a3..8df4cf3656f2 100644
--- a/arch/riscv/boot/dts/canaan/canaan_kd233.dts
+++ b/arch/riscv/boot/dts/canaan/canaan_kd233.dts
@@ -127,10 +127,10 @@
cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
panel@0 {
- compatible = "ilitek,ili9341";
+ compatible = "canaan,kd233-tft", "ilitek,ili9341";
reg = <0>;
dc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
- spi-max-frequency = <15000000>;
+ spi-max-frequency = <10000000>;
status = "disabled";
};
};
@@ -142,7 +142,7 @@
cs-gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
status = "okay";
- slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
diff --git a/arch/riscv/boot/dts/canaan/k210.dtsi b/arch/riscv/boot/dts/canaan/k210.dtsi
index 44d338514761..07e2e2649604 100644
--- a/arch/riscv/boot/dts/canaan/k210.dtsi
+++ b/arch/riscv/boot/dts/canaan/k210.dtsi
@@ -65,15 +65,29 @@
compatible = "riscv,cpu-intc";
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+ };
+ };
};
sram: memory@80000000 {
device_type = "memory";
+ reg = <0x80000000 0x400000>, /* sram0 4 MiB */
+ <0x80400000 0x200000>, /* sram1 2 MiB */
+ <0x80600000 0x200000>; /* aisram 2 MiB */
+ };
+
+ sram_controller: memory-controller {
compatible = "canaan,k210-sram";
- reg = <0x80000000 0x400000>,
- <0x80400000 0x200000>,
- <0x80600000 0x200000>;
- reg-names = "sram0", "sram1", "aisram";
clocks = <&sysclk K210_CLK_SRAM0>,
<&sysclk K210_CLK_SRAM1>,
<&sysclk K210_CLK_AI>;
@@ -161,7 +175,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-pm-bus";
- ranges;
+ ranges = <0x50200000 0x50200000 0x200000>;
clocks = <&sysclk K210_CLK_APB0>;
gpio1: gpio@50200000 {
@@ -249,7 +263,7 @@
};
i2s0: i2s@50250000 {
- compatible = "snps,designware-i2s";
+ compatible = "canaan,k210-i2s", "snps,designware-i2s";
reg = <0x50250000 0x200>;
interrupts = <5>;
clocks = <&sysclk K210_CLK_I2S0>;
@@ -258,7 +272,7 @@
};
i2s1: i2s@50260000 {
- compatible = "snps,designware-i2s";
+ compatible = "canaan,k210-i2s", "snps,designware-i2s";
reg = <0x50260000 0x200>;
interrupts = <6>;
clocks = <&sysclk K210_CLK_I2S1>;
@@ -267,7 +281,7 @@
};
i2s2: i2s@50270000 {
- compatible = "snps,designware-i2s";
+ compatible = "canaan,k210-i2s", "snps,designware-i2s";
reg = <0x50270000 0x200>;
interrupts = <7>;
clocks = <&sysclk K210_CLK_I2S2>;
@@ -317,28 +331,58 @@
timer0: timer@502d0000 {
compatible = "snps,dw-apb-timer";
- reg = <0x502D0000 0x100>;
- interrupts = <14>, <15>;
+ reg = <0x502D0000 0x14>;
+ interrupts = <14>;
clocks = <&sysclk K210_CLK_TIMER0>,
<&sysclk K210_CLK_APB0>;
clock-names = "timer", "pclk";
resets = <&sysrst K210_RST_TIMER0>;
};
- timer1: timer@502e0000 {
+ timer1: timer@502d0014 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x502D0014 0x14>;
+ interrupts = <15>;
+ clocks = <&sysclk K210_CLK_TIMER0>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "timer", "pclk";
+ resets = <&sysrst K210_RST_TIMER0>;
+ };
+
+ timer2: timer@502e0000 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x502E0000 0x14>;
+ interrupts = <16>;
+ clocks = <&sysclk K210_CLK_TIMER1>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "timer", "pclk";
+ resets = <&sysrst K210_RST_TIMER1>;
+ };
+
+ timer3: timer@502e0014 {
compatible = "snps,dw-apb-timer";
- reg = <0x502E0000 0x100>;
- interrupts = <16>, <17>;
+ reg = <0x502E0014 0x114>;
+ interrupts = <17>;
clocks = <&sysclk K210_CLK_TIMER1>,
<&sysclk K210_CLK_APB0>;
clock-names = "timer", "pclk";
resets = <&sysrst K210_RST_TIMER1>;
};
- timer2: timer@502f0000 {
+ timer4: timer@502f0000 {
+ compatible = "snps,dw-apb-timer";
+ reg = <0x502F0000 0x14>;
+ interrupts = <18>;
+ clocks = <&sysclk K210_CLK_TIMER2>,
+ <&sysclk K210_CLK_APB0>;
+ clock-names = "timer", "pclk";
+ resets = <&sysrst K210_RST_TIMER2>;
+ };
+
+ timer5: timer@502f0014 {
compatible = "snps,dw-apb-timer";
- reg = <0x502F0000 0x100>;
- interrupts = <18>, <19>;
+ reg = <0x502F0014 0x14>;
+ interrupts = <19>;
clocks = <&sysclk K210_CLK_TIMER2>,
<&sysclk K210_CLK_APB0>;
clock-names = "timer", "pclk";
@@ -350,7 +394,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-pm-bus";
- ranges;
+ ranges = <0x50400000 0x50400000 0x40100>;
clocks = <&sysclk K210_CLK_APB1>;
wdt0: watchdog@50400000 {
@@ -405,7 +449,7 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-pm-bus";
- ranges;
+ ranges = <0x52000000 0x52000000 0x2000200>;
clocks = <&sysclk K210_CLK_APB2>;
spi0: spi@52000000 {
@@ -419,7 +463,6 @@
clock-names = "ssi_clk", "pclk";
resets = <&sysrst K210_RST_SPI0>;
reset-names = "spi";
- spi-max-frequency = <25000000>;
num-cs = <4>;
reg-io-width = <4>;
};
@@ -435,7 +478,6 @@
clock-names = "ssi_clk", "pclk";
resets = <&sysrst K210_RST_SPI1>;
reset-names = "spi";
- spi-max-frequency = <25000000>;
num-cs = <4>;
reg-io-width = <4>;
};
@@ -451,8 +493,7 @@
clock-names = "ssi_clk", "pclk";
resets = <&sysrst K210_RST_SPI3>;
reset-names = "spi";
- /* Could possibly go up to 200 MHz */
- spi-max-frequency = <100000000>;
+
num-cs = <4>;
reg-io-width = <4>;
};
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
index 8abdbe26a1d0..6d25bf07481a 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
@@ -189,7 +189,7 @@
cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
status = "okay";
- slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
index 3c6df1ecf76f..f4f4d8d5e8b8 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
@@ -191,7 +191,7 @@
cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
status = "okay";
- slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
index 03c9843d503e..0d86df47e1ed 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
@@ -199,7 +199,7 @@
cs-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
status = "okay";
- slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
index 7164ad063178..5c05c498e2b8 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
@@ -164,7 +164,7 @@
cs-gpios = <&gpio1_0 2 GPIO_ACTIVE_LOW>;
status = "okay";
- slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3300 3300>;
diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
index 044982a11df5..f3f87ed2007f 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
+++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
@@ -84,12 +84,10 @@
phy1: ethernet-phy@9 {
reg = <9>;
- ti,fifo-depth = <0x1>;
};
phy0: ethernet-phy@8 {
reg = <8>;
- ti,fifo-depth = <0x1>;
};
};
@@ -102,7 +100,6 @@
disable-wp;
cap-sd-highspeed;
cap-mmc-highspeed;
- card-detect-delay = <200>;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
sd-uhs-sdr12;
diff --git a/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
index 82c93c8f5c17..c87cc2d8fe29 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
+++ b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
@@ -54,12 +54,10 @@
phy1: ethernet-phy@5 {
reg = <5>;
- ti,fifo-depth = <0x01>;
};
phy0: ethernet-phy@4 {
reg = <4>;
- ti,fifo-depth = <0x01>;
};
};
@@ -72,7 +70,6 @@
disable-wp;
cap-sd-highspeed;
cap-mmc-highspeed;
- card-detect-delay = <200>;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
sd-uhs-sdr12;
diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi
index 3c83e98e82e4..74493344ea41 100644
--- a/arch/riscv/boot/dts/microchip/mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi
@@ -142,6 +142,30 @@
interrupt-controller;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+
+ core4 {
+ cpu = <&cpu4>;
+ };
+ };
+ };
};
refclk: mssrefclk {
@@ -169,7 +193,7 @@
cache-size = <2097152>;
cache-unified;
interrupt-parent = <&plic>;
- interrupts = <1>, <2>, <3>;
+ interrupts = <1>, <3>, <4>, <2>;
};
clint: clint@2000000 {
@@ -291,7 +315,6 @@
interrupt-parent = <&plic>;
interrupts = <54>;
clocks = <&clkcfg CLK_SPI0>;
- spi-max-frequency = <25000000>;
status = "disabled";
};
@@ -303,7 +326,6 @@
interrupt-parent = <&plic>;
interrupts = <55>;
clocks = <&clkcfg CLK_SPI1>;
- spi-max-frequency = <25000000>;
status = "disabled";
};
@@ -315,7 +337,6 @@
interrupt-parent = <&plic>;
interrupts = <85>;
clocks = <&clkcfg CLK_QSPI>;
- spi-max-frequency = <25000000>;
status = "disabled";
};
@@ -464,9 +485,8 @@
ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
msi-parent = <&pcie>;
msi-controller;
- microchip,axi-m-atr0 = <0x10 0x0>;
status = "disabled";
- pcie_intc: legacy-interrupt-controller {
+ pcie_intc: interrupt-controller {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index e3172d0ffac4..24bba83bec77 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -133,6 +133,30 @@
interrupt-controller;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+
+ core4 {
+ cpu = <&cpu4>;
+ };
+ };
+ };
};
soc {
#address-cells = <2>;
diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
index 7b77c13496d8..43bed6c0a84f 100644
--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
@@ -134,6 +134,30 @@
interrupt-controller;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+
+ core2 {
+ cpu = <&cpu2>;
+ };
+
+ core3 {
+ cpu = <&cpu3>;
+ };
+
+ core4 {
+ cpu = <&cpu4>;
+ };
+ };
+ };
};
soc {
#address-cells = <2>;
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index c4ed9efdff03..07387f9c135c 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -4,6 +4,8 @@
#include "fu740-c000.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pwm/pwm.h>
/* Clock frequency (in Hz) of the PCB crystal for rtcclk */
#define RTCCLK_FREQ 1000000
@@ -44,6 +46,46 @@
compatible = "gpio-poweroff";
gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
};
+
+ led-controller-1 {
+ compatible = "pwm-leds";
+
+ led-d12 {
+ pwms = <&pwm0 0 7812500 PWM_POLARITY_INVERTED>;
+ active-low;
+ color = <LED_COLOR_ID_GREEN>;
+ max-brightness = <255>;
+ label = "d12";
+ };
+ };
+
+ led-controller-2 {
+ compatible = "pwm-leds-multicolor";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ max-brightness = <255>;
+ label = "d2";
+
+ led-red {
+ pwms = <&pwm0 2 7812500 PWM_POLARITY_INVERTED>;
+ active-low;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led-green {
+ pwms = <&pwm0 1 7812500 PWM_POLARITY_INVERTED>;
+ active-low;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led-blue {
+ pwms = <&pwm0 3 7812500 PWM_POLARITY_INVERTED>;
+ active-low;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+ };
};
&uart0 {
@@ -90,7 +132,7 @@
compatible = "dlg,da9063-rtc";
};
- wdt {
+ watchdog {
compatible = "dlg,da9063-watchdog";
};
diff --git a/arch/riscv/boot/dts/starfive/jh7100.dtsi b/arch/riscv/boot/dts/starfive/jh7100.dtsi
index 69f22f9aad9d..000447482aca 100644
--- a/arch/riscv/boot/dts/starfive/jh7100.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7100.dtsi
@@ -17,7 +17,7 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ U74_0: cpu@0 {
compatible = "sifive,u74-mc", "riscv";
reg = <0>;
d-cache-block-size = <64>;
@@ -42,7 +42,7 @@
};
};
- cpu@1 {
+ U74_1: cpu@1 {
compatible = "sifive,u74-mc", "riscv";
reg = <1>;
d-cache-block-size = <64>;
@@ -66,6 +66,18 @@
#interrupt-cells = <1>;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&U74_0>;
+ };
+
+ core1 {
+ cpu = <&U74_1>;
+ };
+ };
+ };
};
osc_sys: osc_sys {
@@ -118,7 +130,7 @@
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <1>;
- riscv,ndev = <127>;
+ riscv,ndev = <133>;
};
clkgen: clock-controller@11800000 {
diff --git a/arch/riscv/configs/32-bit.config b/arch/riscv/configs/32-bit.config
index 43f41323b67e..f6af0f708df4 100644
--- a/arch/riscv/configs/32-bit.config
+++ b/arch/riscv/configs/32-bit.config
@@ -1,2 +1,4 @@
CONFIG_ARCH_RV32I=y
CONFIG_32BIT=y
+# CONFIG_PORTABLE is not set
+CONFIG_NONPORTABLE=y
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 0cc17db8aaba..aed332a9d4ea 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -6,8 +6,17 @@ CONFIG_BPF_SYSCALL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
@@ -18,6 +27,7 @@ CONFIG_EXPERT=y
CONFIG_PROFILING=y
CONFIG_SOC_MICROCHIP_POLARFIRE=y
CONFIG_SOC_SIFIVE=y
+CONFIG_SOC_STARFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
@@ -28,9 +38,11 @@ CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_BLK_DEV_THROTTLING=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -38,7 +50,43 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
+CONFIG_INET_ESP=m
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_LOG_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_CLS_CGROUP=m
CONFIG_NETLINK_DIAG=y
+CONFIG_CGROUP_NET_PRIO=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_PCI=y
@@ -57,7 +105,15 @@ CONFIG_SCSI_VIRTIO=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_THIN_PROVISIONING=m
CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_MACVLAN=m
+CONFIG_IPVLAN=m
+CONFIG_VXLAN=m
+CONFIG_VETH=m
CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
@@ -105,7 +161,11 @@ CONFIG_RPMSG_CTRL=y
CONFIG_RPMSG_VIRTIO=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
+CONFIG_OVERLAY_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
@@ -119,6 +179,10 @@ CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=m
+CONFIG_SECURITY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_APPARMOR=y
+CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
@@ -137,7 +201,6 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_RWSEMS=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_PLIST=y
CONFIG_DEBUG_SG=y
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index 2438fa39f8ae..96fe8def644c 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -28,6 +28,7 @@ CONFIG_EMBEDDED=y
CONFIG_SLOB=y
# CONFIG_MMU is not set
CONFIG_SOC_CANAAN=y
+CONFIG_NONPORTABLE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0"
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index 9a133e63ae5b..379740654373 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -20,6 +20,7 @@ CONFIG_EMBEDDED=y
CONFIG_SLOB=y
# CONFIG_MMU is not set
CONFIG_SOC_CANAAN=y
+CONFIG_NONPORTABLE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0 root=/dev/mmcblk0p1 rootwait ro"
diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig
index 5269fbb6b4fc..1a56eda5ce46 100644
--- a/arch/riscv/configs/nommu_virt_defconfig
+++ b/arch/riscv/configs/nommu_virt_defconfig
@@ -25,6 +25,7 @@ CONFIG_EXPERT=y
CONFIG_SLOB=y
# CONFIG_MMU is not set
CONFIG_SOC_VIRT=y
+CONFIG_NONPORTABLE=y
CONFIG_SMP=y
CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
CONFIG_CMDLINE_FORCE=y
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index 6cd9d84d3e13..38760e4296cf 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -18,6 +18,7 @@ CONFIG_EXPERT=y
CONFIG_PROFILING=y
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
+CONFIG_NONPORTABLE=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index e5d75270b99c..202c83f677b2 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -14,40 +14,46 @@
#include <asm/patch.h>
#include <asm/vendorid_list.h>
-struct errata_info {
- char name[ERRATA_STRING_LENGTH_MAX];
- bool (*check_func)(unsigned long arch_id, unsigned long impid);
- unsigned int stage;
-};
+static bool errata_probe_pbmt(unsigned int stage,
+ unsigned long arch_id, unsigned long impid)
+{
+ if (arch_id != 0 || impid != 0)
+ return false;
+
+ if (stage == RISCV_ALTERNATIVES_EARLY_BOOT ||
+ stage == RISCV_ALTERNATIVES_MODULE)
+ return true;
+
+ return false;
+}
-static bool errata_mt_check_func(unsigned long arch_id, unsigned long impid)
+static bool errata_probe_cmo(unsigned int stage,
+ unsigned long arch_id, unsigned long impid)
{
+#ifdef CONFIG_ERRATA_THEAD_CMO
if (arch_id != 0 || impid != 0)
return false;
+
+ if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
+ return false;
+
+ riscv_noncoherent_supported();
return true;
+#else
+ return false;
+#endif
}
-static const struct errata_info errata_list[ERRATA_THEAD_NUMBER] = {
- {
- .name = "memory-types",
- .stage = RISCV_ALTERNATIVES_EARLY_BOOT,
- .check_func = errata_mt_check_func
- },
-};
-
-static u32 thead_errata_probe(unsigned int stage, unsigned long archid, unsigned long impid)
+static u32 thead_errata_probe(unsigned int stage,
+ unsigned long archid, unsigned long impid)
{
- const struct errata_info *info;
u32 cpu_req_errata = 0;
- int idx;
- for (idx = 0; idx < ERRATA_THEAD_NUMBER; idx++) {
- info = &errata_list[idx];
+ if (errata_probe_pbmt(stage, archid, impid))
+ cpu_req_errata |= (1U << ERRATA_THEAD_PBMT);
- if ((stage == RISCV_ALTERNATIVES_MODULE ||
- info->stage == stage) && info->check_func(archid, impid))
- cpu_req_errata |= (1U << idx);
- }
+ if (errata_probe_cmo(stage, archid, impid))
+ cpu_req_errata |= (1U << ERRATA_THEAD_CMO);
return cpu_req_errata;
}
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 618d7c5af1a2..1b471ff73178 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -67,4 +67,19 @@
#error "Unexpected __SIZEOF_SHORT__"
#endif
+#ifdef __ASSEMBLY__
+
+/* Common assembly source macros */
+
+/*
+ * NOP sequence
+ */
+.macro nops, num
+ .rept \num
+ nop
+ .endr
+.endm
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index d0e24aaa2aa0..110752594228 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -13,6 +13,8 @@
#ifndef __ASSEMBLY__
#define nop() __asm__ __volatile__ ("nop")
+#define __nops(n) ".rept " #n "\nnop\n.endr\n"
+#define nops(n) __asm__ __volatile__ (__nops(n))
#define RISCV_FENCE(p, s) \
__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h
index 9b58b104559e..d3036df23ccb 100644
--- a/arch/riscv/include/asm/cache.h
+++ b/arch/riscv/include/asm/cache.h
@@ -11,6 +11,10 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#endif
+
/*
* RISC-V requires the stack pointer to be 16-byte aligned, so ensure that
* the flat loader aligns it accordingly.
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 23ff70350992..a60acaecfeda 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -42,6 +42,16 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */
+#ifdef CONFIG_RISCV_ISA_ZICBOM
+void riscv_init_cbom_blocksize(void);
+#else
+static inline void riscv_init_cbom_blocksize(void) { }
+#endif
+
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
+void riscv_noncoherent_supported(void);
+#endif
+
/*
* Bits in sys_riscv_flush_icache()'s flags argument.
*/
diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h
index 134590f1b843..aa128466c4d4 100644
--- a/arch/riscv/include/asm/cpu_ops.h
+++ b/arch/riscv/include/asm/cpu_ops.h
@@ -38,6 +38,7 @@ struct cpu_operations {
#endif
};
+extern const struct cpu_operations cpu_ops_spinwait;
extern const struct cpu_operations *cpu_ops[NR_CPUS];
void __init cpu_set_ops(int cpu);
diff --git a/arch/riscv/include/asm/cpu_ops_sbi.h b/arch/riscv/include/asm/cpu_ops_sbi.h
index 56e4b76d09ff..d6e4665b3195 100644
--- a/arch/riscv/include/asm/cpu_ops_sbi.h
+++ b/arch/riscv/include/asm/cpu_ops_sbi.h
@@ -10,6 +10,8 @@
#include <linux/sched.h>
#include <linux/threads.h>
+extern const struct cpu_operations cpu_ops_sbi;
+
/**
* struct sbi_hart_boot_data - Hart specific boot used during booting and
* cpu hotplug.
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 17516afc389a..0e571f6483d9 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -247,6 +247,9 @@
#define CSR_SIP 0x144
#define CSR_SATP 0x180
+#define CSR_STIMECMP 0x14D
+#define CSR_STIMECMPH 0x15D
+
#define CSR_VSSTATUS 0x200
#define CSR_VSIE 0x204
#define CSR_VSTVEC 0x205
@@ -256,6 +259,8 @@
#define CSR_VSTVAL 0x243
#define CSR_VSIP 0x244
#define CSR_VSATP 0x280
+#define CSR_VSTIMECMP 0x24D
+#define CSR_VSTIMECMPH 0x25D
#define CSR_HSTATUS 0x600
#define CSR_HEDELEG 0x602
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 416ead0f9a65..19a771085781 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -16,11 +16,13 @@
#ifdef CONFIG_ERRATA_THEAD
#define ERRATA_THEAD_PBMT 0
-#define ERRATA_THEAD_NUMBER 1
+#define ERRATA_THEAD_CMO 1
+#define ERRATA_THEAD_NUMBER 2
#endif
#define CPUFEATURE_SVPBMT 0
-#define CPUFEATURE_NUMBER 1
+#define CPUFEATURE_ZICBOM 1
+#define CPUFEATURE_NUMBER 2
#ifdef __ASSEMBLY__
@@ -68,13 +70,7 @@ asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
*/
#define ALT_THEAD_PMA(_val) \
asm volatile(ALTERNATIVE( \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop", \
+ __nops(7), \
"li t3, %1\n\t" \
"slli t3, t3, %3\n\t" \
"and t3, %0, t3\n\t" \
@@ -93,6 +89,59 @@ asm volatile(ALTERNATIVE( \
#define ALT_THEAD_PMA(_val)
#endif
+/*
+ * dcache.ipa rs1 (invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01010 rs1 000 00000 0001011
+ * dache.iva rs1 (invalida, virtual address)
+ * 0000001 00110 rs1 000 00000 0001011
+ *
+ * dcache.cpa rs1 (clean, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01001 rs1 000 00000 0001011
+ * dcache.cva rs1 (clean, virtual address)
+ * 0000001 00100 rs1 000 00000 0001011
+ *
+ * dcache.cipa rs1 (clean then invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01011 rs1 000 00000 0001011
+ * dcache.civa rs1 (... virtual address)
+ * 0000001 00111 rs1 000 00000 0001011
+ *
+ * sync.s (make sure all cache operations finished)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000000 11001 00000 000 00000 0001011
+ */
+#define THEAD_inval_A0 ".long 0x0265000b"
+#define THEAD_clean_A0 ".long 0x0245000b"
+#define THEAD_flush_A0 ".long 0x0275000b"
+#define THEAD_SYNC_S ".long 0x0190000b"
+
+#define ALT_CMO_OP(_op, _start, _size, _cachesize) \
+asm volatile(ALTERNATIVE_2( \
+ __nops(6), \
+ "mv a0, %1\n\t" \
+ "j 2f\n\t" \
+ "3:\n\t" \
+ "cbo." __stringify(_op) " (a0)\n\t" \
+ "add a0, a0, %0\n\t" \
+ "2:\n\t" \
+ "bltu a0, %2, 3b\n\t" \
+ "nop", 0, CPUFEATURE_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \
+ "mv a0, %1\n\t" \
+ "j 2f\n\t" \
+ "3:\n\t" \
+ THEAD_##_op##_A0 "\n\t" \
+ "add a0, a0, %0\n\t" \
+ "2:\n\t" \
+ "bltu a0, %2, 3b\n\t" \
+ THEAD_SYNC_S, THEAD_VENDOR_ID, \
+ ERRATA_THEAD_CMO, CONFIG_ERRATA_THEAD_CMO) \
+ : : "r"(_cachesize), \
+ "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \
+ "r"((unsigned long)(_start) + (_size)) \
+ : "a0")
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 4e2486881840..6f59ec64175e 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -8,10 +8,12 @@
#ifndef _ASM_RISCV_HWCAP_H
#define _ASM_RISCV_HWCAP_H
+#include <asm/errno.h>
#include <linux/bits.h>
#include <uapi/asm/hwcap.h>
#ifndef __ASSEMBLY__
+#include <linux/jump_label.h>
/*
* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
@@ -53,9 +55,23 @@ extern unsigned long elf_hwcap;
enum riscv_isa_ext_id {
RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE,
RISCV_ISA_EXT_SVPBMT,
+ RISCV_ISA_EXT_ZICBOM,
+ RISCV_ISA_EXT_ZIHINTPAUSE,
+ RISCV_ISA_EXT_SSTC,
RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX,
};
+/*
+ * This enum represents the logical ID for each RISC-V ISA extension static
+ * keys. We can use static key to optimize code path if some ISA extensions
+ * are available.
+ */
+enum riscv_isa_ext_key {
+ RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */
+ RISCV_ISA_EXT_KEY_ZIHINTPAUSE,
+ RISCV_ISA_EXT_KEY_MAX,
+};
+
struct riscv_isa_ext_data {
/* Name of the extension displayed to userspace via /proc/cpuinfo */
char uprop[RISCV_ISA_EXT_NAME_LEN_MAX];
@@ -63,6 +79,22 @@ struct riscv_isa_ext_data {
unsigned int isa_ext_id;
};
+extern struct static_key_false riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_MAX];
+
+static __always_inline int riscv_isa_ext2key(int num)
+{
+ switch (num) {
+ case RISCV_ISA_EXT_f:
+ return RISCV_ISA_EXT_KEY_FPU;
+ case RISCV_ISA_EXT_d:
+ return RISCV_ISA_EXT_KEY_FPU;
+ case RISCV_ISA_EXT_ZIHINTPAUSE:
+ return RISCV_ISA_EXT_KEY_ZIHINTPAUSE;
+ default:
+ return -EINVAL;
+ }
+}
+
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h
index 50138e2eb91b..0d8fdb8ec63a 100644
--- a/arch/riscv/include/asm/kvm_vcpu_timer.h
+++ b/arch/riscv/include/asm/kvm_vcpu_timer.h
@@ -28,6 +28,11 @@ struct kvm_vcpu_timer {
u64 next_cycles;
/* Underlying hrtimer instance */
struct hrtimer hrt;
+
+ /* Flag to check if sstc is enabled or not */
+ bool sstc_enabled;
+ /* A function pointer to switch between stimecmp or hrtimer at runtime */
+ int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles);
};
int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles);
@@ -40,5 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
void kvm_riscv_guest_timer_init(struct kvm *kvm);
+void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
+bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 1526e410e802..ac70b0fd9a9a 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -167,7 +167,6 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
-#define page_to_bus(page) (page_to_phys(page))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h
index 7fd52a30e605..cc2a184cfc2e 100644
--- a/arch/riscv/include/asm/pci.h
+++ b/arch/riscv/include/asm/pci.h
@@ -12,31 +12,10 @@
#include <asm/io.h>
-#define PCIBIOS_MIN_IO 0
-#define PCIBIOS_MIN_MEM 0
-
-/* RISC-V shim does not initialize PCI bus */
-#define pcibios_assign_all_busses() 1
-
-#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
-
-extern int isa_dma_bridge_buggy;
-
-#ifdef CONFIG_PCI
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- /* no legacy IRQ on risc-v */
- return -ENODEV;
-}
-
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- /* always show the domain in /proc */
- return 1;
-}
-
-#ifdef CONFIG_NUMA
+#define PCIBIOS_MIN_IO 4
+#define PCIBIOS_MIN_MEM 16
+#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
static inline int pcibus_to_node(struct pci_bus *bus)
{
return dev_to_node(&bus->dev);
@@ -46,8 +25,9 @@ static inline int pcibus_to_node(struct pci_bus *bus)
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
#endif
-#endif /* CONFIG_NUMA */
+#endif /* defined(CONFIG_PCI) && defined(CONFIG_NUMA) */
-#endif /* CONFIG_PCI */
+/* Generic PCI */
+#include <asm-generic/pci.h>
#endif /* _ASM_RISCV_PCI_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 5dbd6610729b..7ec936910a96 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -186,26 +186,6 @@ extern struct pt_alloc_ops pt_ops __initdata;
extern pgd_t swapper_pg_dir[];
-/* MAP_PRIVATE permissions: xwr (copy-on-write) */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READ
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_EXEC
-#define __P101 PAGE_READ_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_READ_EXEC
-
-/* MAP_SHARED permissions: xwr */
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READ
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXEC
-#define __S101 PAGE_READ_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
{
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 21c8072dce17..19eedd4af4cd 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -79,8 +79,8 @@ static inline void wait_for_interrupt(void)
}
struct device_node;
-int riscv_of_processor_hartid(struct device_node *node);
-int riscv_of_parent_hartid(struct device_node *node);
+int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid);
+int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid);
extern void riscv_fill_hwcap(void);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 9e3c2cf1edaf..2a0ef738695e 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -122,7 +122,21 @@ enum sbi_ext_pmu_fid {
SBI_EXT_PMU_COUNTER_FW_READ,
};
-#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(55, 0)
+union sbi_pmu_ctr_info {
+ unsigned long value;
+ struct {
+ unsigned long csr:12;
+ unsigned long width:6;
+#if __riscv_xlen == 32
+ unsigned long reserved:13;
+#else
+ unsigned long reserved:45;
+#endif
+ unsigned long type:1;
+ };
+};
+
+#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
/** General pmu event codes specified in SBI PMU extension */
@@ -189,12 +203,26 @@ enum sbi_pmu_ctr_type {
SBI_PMU_CTR_TYPE_FW,
};
+/* Helper macros to decode event idx */
+#define SBI_PMU_EVENT_IDX_OFFSET 20
+#define SBI_PMU_EVENT_IDX_MASK 0xFFFFF
+#define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF
+#define SBI_PMU_EVENT_IDX_TYPE_MASK 0xF0000
+#define SBI_PMU_EVENT_RAW_IDX 0x20000
+#define SBI_PMU_FIXED_CTR_MASK 0x07
+
+#define SBI_PMU_EVENT_CACHE_ID_CODE_MASK 0xFFF8
+#define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06
+#define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01
+
+#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF
+
/* Flags defined for config matching function */
#define SBI_PMU_CFG_FLAG_SKIP_MATCH (1 << 0)
#define SBI_PMU_CFG_FLAG_CLEAR_VALUE (1 << 1)
#define SBI_PMU_CFG_FLAG_AUTO_START (1 << 2)
#define SBI_PMU_CFG_FLAG_SET_VUINH (1 << 3)
-#define SBI_PMU_CFG_FLAG_SET_VSNH (1 << 4)
+#define SBI_PMU_CFG_FLAG_SET_VSINH (1 << 4)
#define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5)
#define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6)
#define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7)
diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h
new file mode 100644
index 000000000000..532c29ef0376
--- /dev/null
+++ b/arch/riscv/include/asm/signal.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SIGNAL_H
+#define __ASM_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+asmlinkage __visible
+void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
+
+#endif
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index 23170c933d73..d3443be7eedc 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -42,7 +42,7 @@ void arch_send_call_function_ipi_mask(struct cpumask *mask);
/* Hook for the generic smp_call_function_single() routine. */
void arch_send_call_function_single_ipi(int cpu);
-int riscv_hartid_to_cpuid(int hartid);
+int riscv_hartid_to_cpuid(unsigned long hartid);
/* Set custom IPI operations */
void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops);
@@ -70,7 +70,7 @@ static inline void show_ipi_stats(struct seq_file *p, int prec)
{
}
-static inline int riscv_hartid_to_cpuid(int hartid)
+static inline int riscv_hartid_to_cpuid(unsigned long hartid)
{
if (hartid == boot_cpu_hartid)
return 0;
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 0a3f4f95c555..11463489fec6 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -8,6 +8,7 @@
#include <linux/jump_label.h>
#include <linux/sched/task_stack.h>
+#include <asm/hwcap.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/csr.h>
@@ -56,10 +57,9 @@ static inline void __switch_to_aux(struct task_struct *prev,
fstate_restore(next, task_pt_regs(next));
}
-extern struct static_key_false cpu_hwcap_fpu;
static __always_inline bool has_fpu(void)
{
- return static_branch_likely(&cpu_hwcap_fpu);
+ return static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_FPU]);
}
#else
static __always_inline bool has_fpu(void) { return false; }
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 78933ac04995..67322f878e0d 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -42,6 +42,8 @@
#ifndef __ASSEMBLY__
+extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
+
#include <asm/processor.h>
#include <asm/csr.h>
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
index 134388cbaaa1..1e4f8b4aef79 100644
--- a/arch/riscv/include/asm/vdso/processor.h
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -4,15 +4,30 @@
#ifndef __ASSEMBLY__
+#include <linux/jump_label.h>
#include <asm/barrier.h>
+#include <asm/hwcap.h>
static inline void cpu_relax(void)
{
+ if (!static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_ZIHINTPAUSE])) {
#ifdef __riscv_muldiv
- int dummy;
- /* In lieu of a halt instruction, induce a long-latency stall. */
- __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
+ } else {
+ /*
+ * Reduce instruction retirement.
+ * This assumes the PC changes.
+ */
+#ifdef __riscv_zihintpause
+ __asm__ __volatile__ ("pause");
+#else
+ /* Encoding of the pause instruction */
+ __asm__ __volatile__ (".4byte 0x100000F");
+#endif
+ }
barrier();
}
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index 24b2a6e27698..7351417afd62 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -97,6 +97,7 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_I,
KVM_RISCV_ISA_EXT_M,
KVM_RISCV_ISA_EXT_SVPBMT,
+ KVM_RISCV_ISA_EXT_SSTC,
KVM_RISCV_ISA_EXT_MAX,
};
diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c
index c9d0d3c53223..a7d26a00beea 100644
--- a/arch/riscv/kernel/alternative.c
+++ b/arch/riscv/kernel/alternative.c
@@ -20,7 +20,7 @@ struct cpu_manufacturer_info_t {
unsigned long vendor_id;
unsigned long arch_id;
unsigned long imp_id;
- void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
+ void (*patch_func)(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage);
};
@@ -40,16 +40,16 @@ static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_inf
switch (cpu_mfr_info->vendor_id) {
#ifdef CONFIG_ERRATA_SIFIVE
case SIFIVE_VENDOR_ID:
- cpu_mfr_info->vendor_patch_func = sifive_errata_patch_func;
+ cpu_mfr_info->patch_func = sifive_errata_patch_func;
break;
#endif
#ifdef CONFIG_ERRATA_THEAD
case THEAD_VENDOR_ID:
- cpu_mfr_info->vendor_patch_func = thead_errata_patch_func;
+ cpu_mfr_info->patch_func = thead_errata_patch_func;
break;
#endif
default:
- cpu_mfr_info->vendor_patch_func = NULL;
+ cpu_mfr_info->patch_func = NULL;
}
}
@@ -68,13 +68,13 @@ static void __init_or_module _apply_alternatives(struct alt_entry *begin,
riscv_cpufeature_patch_func(begin, end, stage);
- if (!cpu_mfr_info.vendor_patch_func)
+ if (!cpu_mfr_info.patch_func)
return;
- cpu_mfr_info.vendor_patch_func(begin, end,
- cpu_mfr_info.arch_id,
- cpu_mfr_info.imp_id,
- stage);
+ cpu_mfr_info.patch_func(begin, end,
+ cpu_mfr_info.arch_id,
+ cpu_mfr_info.imp_id,
+ stage);
}
void __init apply_boot_alternatives(void)
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index fba9e9f46a8c..0be8a2403212 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -14,37 +14,36 @@
* Returns the hart ID of the given device tree node, or -ENODEV if the node
* isn't an enabled and valid RISC-V hart node.
*/
-int riscv_of_processor_hartid(struct device_node *node)
+int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
{
const char *isa;
- u32 hart;
if (!of_device_is_compatible(node, "riscv")) {
pr_warn("Found incompatible CPU\n");
return -ENODEV;
}
- hart = of_get_cpu_hwid(node, 0);
- if (hart == ~0U) {
+ *hart = (unsigned long) of_get_cpu_hwid(node, 0);
+ if (*hart == ~0UL) {
pr_warn("Found CPU without hart ID\n");
return -ENODEV;
}
if (!of_device_is_available(node)) {
- pr_info("CPU with hartid=%d is not available\n", hart);
+ pr_info("CPU with hartid=%lu is not available\n", *hart);
return -ENODEV;
}
if (of_property_read_string(node, "riscv,isa", &isa)) {
- pr_warn("CPU with hartid=%d has no \"riscv,isa\" property\n", hart);
+ pr_warn("CPU with hartid=%lu has no \"riscv,isa\" property\n", *hart);
return -ENODEV;
}
if (isa[0] != 'r' || isa[1] != 'v') {
- pr_warn("CPU with hartid=%d has an invalid ISA of \"%s\"\n", hart, isa);
+ pr_warn("CPU with hartid=%lu has an invalid ISA of \"%s\"\n", *hart, isa);
return -ENODEV;
}
- return hart;
+ return 0;
}
/*
@@ -53,11 +52,16 @@ int riscv_of_processor_hartid(struct device_node *node)
* To achieve this, we walk up the DT tree until we find an active
* RISC-V core (HART) node and extract the cpuid from it.
*/
-int riscv_of_parent_hartid(struct device_node *node)
+int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
{
+ int rc;
+
for (; node; node = node->parent) {
- if (of_device_is_compatible(node, "riscv"))
- return riscv_of_processor_hartid(node);
+ if (of_device_is_compatible(node, "riscv")) {
+ rc = riscv_of_processor_hartid(node, hartid);
+ if (!rc)
+ return 0;
+ }
}
return -1;
@@ -89,6 +93,9 @@ int riscv_of_parent_hartid(struct device_node *node)
static struct riscv_isa_ext_data isa_ext_arr[] = {
__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
+ __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
+ __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
+ __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
__RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
};
diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c
index 170d07e57721..8275f237a59d 100644
--- a/arch/riscv/kernel/cpu_ops.c
+++ b/arch/riscv/kernel/cpu_ops.c
@@ -9,15 +9,14 @@
#include <linux/string.h>
#include <linux/sched.h>
#include <asm/cpu_ops.h>
+#include <asm/cpu_ops_sbi.h>
#include <asm/sbi.h>
#include <asm/smp.h>
const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
extern const struct cpu_operations cpu_ops_sbi;
-#ifdef CONFIG_RISCV_BOOT_SPINWAIT
-extern const struct cpu_operations cpu_ops_spinwait;
-#else
+#ifndef CONFIG_RISCV_BOOT_SPINWAIT
const struct cpu_operations cpu_ops_spinwait = {
.name = "",
.cpu_prepare = NULL,
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index 4f5a6f84e2a4..efa0f0816634 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -65,7 +65,7 @@ static int sbi_hsm_hart_get_status(unsigned long hartid)
static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
{
unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
- int hartid = cpuid_to_hartid_map(cpuid);
+ unsigned long hartid = cpuid_to_hartid_map(cpuid);
unsigned long hsm_data;
struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid);
@@ -107,7 +107,7 @@ static void sbi_cpu_stop(void)
static int sbi_cpu_is_stopped(unsigned int cpuid)
{
int rc;
- int hartid = cpuid_to_hartid_map(cpuid);
+ unsigned long hartid = cpuid_to_hartid_map(cpuid);
rc = sbi_hsm_hart_get_status(hartid);
diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
index 346847f6c41c..d98d19226b5f 100644
--- a/arch/riscv/kernel/cpu_ops_spinwait.c
+++ b/arch/riscv/kernel/cpu_ops_spinwait.c
@@ -11,6 +11,8 @@
#include <asm/sbi.h>
#include <asm/smp.h>
+#include "head.h"
+
const struct cpu_operations cpu_ops_spinwait;
void *__cpu_spinwait_stack_pointer[NR_CPUS] __section(".data");
void *__cpu_spinwait_task_pointer[NR_CPUS] __section(".data");
@@ -18,7 +20,7 @@ void *__cpu_spinwait_task_pointer[NR_CPUS] __section(".data");
static void cpu_update_secondary_bootdata(unsigned int cpuid,
struct task_struct *tidle)
{
- int hartid = cpuid_to_hartid_map(cpuid);
+ unsigned long hartid = cpuid_to_hartid_map(cpuid);
/*
* The hartid must be less than NR_CPUS to avoid out-of-bound access
@@ -27,7 +29,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
* spinwait booting is not the recommended approach for any platforms
* booting Linux in S-mode and can be disabled in the future.
*/
- if (hartid == INVALID_HARTID || hartid >= NR_CPUS)
+ if (hartid == INVALID_HARTID || hartid >= (unsigned long) NR_CPUS)
return;
/* Make sure tidle is updated */
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 12b05ce164bb..3b5583db9d80 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <asm/alternative.h>
+#include <asm/cacheflush.h>
#include <asm/errata_list.h>
#include <asm/hwcap.h>
#include <asm/patch.h>
@@ -27,9 +28,8 @@ unsigned long elf_hwcap __read_mostly;
/* Host ISA bitmap */
static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
-#ifdef CONFIG_FPU
-__ro_after_init DEFINE_STATIC_KEY_FALSE(cpu_hwcap_fpu);
-#endif
+DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX);
+EXPORT_SYMBOL(riscv_isa_ext_keys);
/**
* riscv_isa_extension_base() - Get base extension word
@@ -73,8 +73,9 @@ void __init riscv_fill_hwcap(void)
struct device_node *node;
const char *isa;
char print_str[NUM_ALPHA_EXTS + 1];
- int i, j;
+ int i, j, rc;
static unsigned long isa2hwcap[256] = {0};
+ unsigned long hartid;
isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I;
isa2hwcap['m'] = isa2hwcap['M'] = COMPAT_HWCAP_ISA_M;
@@ -92,7 +93,8 @@ void __init riscv_fill_hwcap(void)
DECLARE_BITMAP(this_isa, RISCV_ISA_EXT_MAX);
const char *temp;
- if (riscv_of_processor_hartid(node) < 0)
+ rc = riscv_of_processor_hartid(node, &hartid);
+ if (rc < 0)
continue;
if (of_property_read_string(node, "riscv,isa", &isa)) {
@@ -199,6 +201,9 @@ void __init riscv_fill_hwcap(void)
} else {
SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT);
+ SET_ISA_EXT_MAP("zicbom", RISCV_ISA_EXT_ZICBOM);
+ SET_ISA_EXT_MAP("zihintpause", RISCV_ISA_EXT_ZIHINTPAUSE);
+ SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC);
}
#undef SET_ISA_EXT_MAP
}
@@ -238,19 +243,15 @@ void __init riscv_fill_hwcap(void)
print_str[j++] = (char)('a' + i);
pr_info("riscv: ELF capabilities %s\n", print_str);
-#ifdef CONFIG_FPU
- if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
- static_branch_enable(&cpu_hwcap_fpu);
-#endif
+ for_each_set_bit(i, riscv_isa, RISCV_ISA_EXT_MAX) {
+ j = riscv_isa_ext2key(i);
+ if (j >= 0)
+ static_branch_enable(&riscv_isa_ext_keys[j]);
+ }
}
#ifdef CONFIG_RISCV_ALTERNATIVE
-struct cpufeature_info {
- char name[ERRATA_STRING_LENGTH_MAX];
- bool (*check_func)(unsigned int stage);
-};
-
-static bool __init_or_module cpufeature_svpbmt_check_func(unsigned int stage)
+static bool __init_or_module cpufeature_probe_svpbmt(unsigned int stage)
{
#ifdef CONFIG_RISCV_ISA_SVPBMT
switch (stage) {
@@ -264,26 +265,41 @@ static bool __init_or_module cpufeature_svpbmt_check_func(unsigned int stage)
return false;
}
-static const struct cpufeature_info __initdata_or_module
-cpufeature_list[CPUFEATURE_NUMBER] = {
- {
- .name = "svpbmt",
- .check_func = cpufeature_svpbmt_check_func
- },
-};
+static bool __init_or_module cpufeature_probe_zicbom(unsigned int stage)
+{
+#ifdef CONFIG_RISCV_ISA_ZICBOM
+ switch (stage) {
+ case RISCV_ALTERNATIVES_EARLY_BOOT:
+ return false;
+ default:
+ if (riscv_isa_extension_available(NULL, ZICBOM)) {
+ riscv_noncoherent_supported();
+ return true;
+ } else {
+ return false;
+ }
+ }
+#endif
+
+ return false;
+}
+/*
+ * Probe presence of individual extensions.
+ *
+ * This code may also be executed before kernel relocation, so we cannot use
+ * addresses generated by the address-of operator as they won't be valid in
+ * this context.
+ */
static u32 __init_or_module cpufeature_probe(unsigned int stage)
{
- const struct cpufeature_info *info;
u32 cpu_req_feature = 0;
- int idx;
- for (idx = 0; idx < CPUFEATURE_NUMBER; idx++) {
- info = &cpufeature_list[idx];
+ if (cpufeature_probe_svpbmt(stage))
+ cpu_req_feature |= (1U << CPUFEATURE_SVPBMT);
- if (info->check_func(stage))
- cpu_req_feature |= (1U << idx);
- }
+ if (cpufeature_probe_zicbom(stage))
+ cpu_req_feature |= (1U << CPUFEATURE_ZICBOM);
return cpu_req_feature;
}
diff --git a/arch/riscv/kernel/crash_save_regs.S b/arch/riscv/kernel/crash_save_regs.S
index 7832fb763aba..b2a1908c0463 100644
--- a/arch/riscv/kernel/crash_save_regs.S
+++ b/arch/riscv/kernel/crash_save_regs.S
@@ -44,7 +44,7 @@ SYM_CODE_START(riscv_crash_save_regs)
REG_S t6, PT_T6(a0) /* x31 */
csrr t1, CSR_STATUS
- csrr t2, CSR_EPC
+ auipc t2, 0x0
csrr t3, CSR_TVAL
csrr t4, CSR_CAUSE
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index df8e24559035..ee79e6839b86 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -138,19 +138,37 @@ void machine_shutdown(void)
#endif
}
+/* Override the weak function in kernel/panic.c */
+void crash_smp_send_stop(void)
+{
+ static int cpus_stopped;
+
+ /*
+ * This function can be called twice in panic path, but obviously
+ * we execute this only once.
+ */
+ if (cpus_stopped)
+ return;
+
+ smp_send_stop();
+ cpus_stopped = 1;
+}
+
/*
* machine_crash_shutdown - Prepare to kexec after a kernel crash
*
* This function is called by crash_kexec just before machine_kexec
- * below and its goal is similar to machine_shutdown, but in case of
- * a kernel crash. Since we don't handle such cases yet, this function
- * is empty.
+ * and its goal is to shutdown non-crashing cpus and save registers.
*/
void
machine_crash_shutdown(struct pt_regs *regs)
{
+ local_irq_disable();
+
+ /* shutdown non-crashing cpus */
+ crash_smp_send_stop();
+
crash_save_cpu(regs, smp_processor_id());
- machine_shutdown();
pr_info("Starting crashdump kernel...\n");
}
@@ -171,7 +189,7 @@ machine_kexec(struct kimage *image)
struct kimage_arch *internal = &image->arch;
unsigned long jump_addr = (unsigned long) image->start;
unsigned long first_ind_entry = (unsigned long) &image->head;
- unsigned long this_cpu_id = smp_processor_id();
+ unsigned long this_cpu_id = __smp_processor_id();
unsigned long this_hart_id = cpuid_to_hartid_map(this_cpu_id);
unsigned long fdt_addr = internal->fdt_addr;
void *control_code_buffer = page_address(image->control_code_page);
diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
index 7a057b5f0adc..c976a21cd4bd 100644
--- a/arch/riscv/kernel/probes/uprobes.c
+++ b/arch/riscv/kernel/probes/uprobes.c
@@ -59,8 +59,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
instruction_pointer_set(regs, utask->xol_vaddr);
- regs->status &= ~SR_SPIE;
-
return 0;
}
@@ -72,8 +70,6 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
- regs->status |= SR_SPIE;
-
return 0;
}
@@ -111,8 +107,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
* address.
*/
instruction_pointer_set(regs, utask->vaddr);
-
- regs->status &= ~SR_SPIE;
}
bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index f0f36a4a0e9b..95ef6e2bf45c 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -22,6 +22,7 @@
#include <linux/crash_dump.h>
#include <asm/alternative.h>
+#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/early_ioremap.h>
#include <asm/pgtable.h>
@@ -296,6 +297,7 @@ void __init setup_arch(char **cmdline_p)
#endif
riscv_fill_hwcap();
+ riscv_init_cbom_blocksize();
apply_boot_alternatives();
}
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 38b05ca6fe66..5a2de6b6f882 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -15,6 +15,7 @@
#include <asm/ucontext.h>
#include <asm/vdso.h>
+#include <asm/signal.h>
#include <asm/signal32.h>
#include <asm/switch_to.h>
#include <asm/csr.h>
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index b5d30ea92292..760a64518c58 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -47,7 +47,7 @@ static struct {
unsigned long bits ____cacheline_aligned;
} ipi_data[NR_CPUS] __cacheline_aligned;
-int riscv_hartid_to_cpuid(int hartid)
+int riscv_hartid_to_cpuid(unsigned long hartid)
{
int i;
@@ -55,7 +55,7 @@ int riscv_hartid_to_cpuid(int hartid)
if (cpuid_to_hartid_map(i) == hartid)
return i;
- pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
+ pr_err("Couldn't find cpu id for hartid [%lu]\n", hartid);
return -ENOENT;
}
@@ -64,12 +64,6 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
return phys_id == cpuid_to_hartid_map(cpu);
}
-/* Unsupported */
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
static void ipi_stop(void)
{
set_cpu_online(smp_processor_id(), false);
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index f1e4948a4b52..a752c7b41683 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -72,15 +72,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
void __init setup_smp(void)
{
struct device_node *dn;
- int hart;
+ unsigned long hart;
bool found_boot_cpu = false;
int cpuid = 1;
+ int rc;
cpu_set_ops(0);
for_each_of_cpu_node(dn) {
- hart = riscv_of_processor_hartid(dn);
- if (hart < 0)
+ rc = riscv_of_processor_hartid(dn, &hart);
+ if (rc < 0)
continue;
if (hart == cpuid_to_hartid_map(0)) {
@@ -90,7 +91,7 @@ void __init setup_smp(void)
continue;
}
if (cpuid >= NR_CPUS) {
- pr_warn("Invalid cpuid [%d] for hartid [%d]\n",
+ pr_warn("Invalid cpuid [%d] for hartid [%lu]\n",
cpuid, hart);
continue;
}
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 9c0194f176fc..571556bb9261 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -18,9 +18,8 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
return -EINVAL;
- if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
- if (unlikely(!(prot & PROT_READ)))
- return -EINVAL;
+ if (unlikely((prot & PROT_WRITE) && !(prot & PROT_READ)))
+ return -EINVAL;
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> (PAGE_SHIFT - page_shift_offset));
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index b40426509244..635e6ec26938 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -16,12 +16,14 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/irq.h>
+#include <linux/kexec.h>
#include <asm/asm-prototypes.h>
#include <asm/bug.h>
+#include <asm/csr.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
-#include <asm/csr.h>
+#include <asm/thread_info.h>
int show_unhandled_signals = 1;
@@ -44,6 +46,9 @@ void die(struct pt_regs *regs, const char *str)
ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV);
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 46c4dafe3ba0..378f5b151443 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/irq.h>
+#include <linux/stringify.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
@@ -150,9 +151,6 @@
#define PRECISION_S 0
#define PRECISION_D 1
-#define STR(x) XSTR(x)
-#define XSTR(x) #x
-
#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \
static inline type load_##type(const type *addr) \
{ \
@@ -207,9 +205,9 @@ static inline ulong get_insn(ulong mepc)
asm ("and %[tmp], %[addr], 2\n"
"bnez %[tmp], 1f\n"
#if defined(CONFIG_64BIT)
- STR(LWU) " %[insn], (%[addr])\n"
+ __stringify(LWU) " %[insn], (%[addr])\n"
#else
- STR(LW) " %[insn], (%[addr])\n"
+ __stringify(LW) " %[insn], (%[addr])\n"
#endif
"and %[tmp], %[insn], %[rvc_mask]\n"
"beq %[tmp], %[rvc_mask], 2f\n"
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 3a35b2d95697..3620ecac2fa1 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -666,7 +666,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
return ret;
}
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
if (hfn == KVM_PFN_ERR_HWPOISON) {
@@ -686,7 +686,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
spin_lock(&kvm->mmu_lock);
- if (mmu_notifier_retry(kvm, mmu_seq))
+ if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;
if (writable) {
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 5d271b597613..d0f08d5b4282 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -52,6 +52,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
RISCV_ISA_EXT_i,
RISCV_ISA_EXT_m,
RISCV_ISA_EXT_SVPBMT,
+ RISCV_ISA_EXT_SSTC,
};
static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
@@ -85,6 +86,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
+ case KVM_RISCV_ISA_EXT_SSTC:
return false;
default:
break;
@@ -203,7 +205,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER);
+ return kvm_riscv_vcpu_timer_pending(vcpu);
}
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
@@ -785,6 +787,8 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT))
henvcfg |= ENVCFG_PBMTE;
+ if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SSTC))
+ henvcfg |= ENVCFG_STCE;
csr_write(CSR_HENVCFG, henvcfg);
#ifdef CONFIG_32BIT
csr_write(CSR_HENVCFGH, henvcfg >> 32);
@@ -828,6 +832,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.isa);
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
+ kvm_riscv_vcpu_timer_save(vcpu);
+
csr->vsstatus = csr_read(CSR_VSSTATUS);
csr->vsie = csr_read(CSR_VSIE);
csr->vstvec = csr_read(CSR_VSTVEC);
diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
index 595043857049..16f50c46ba39 100644
--- a/arch/riscv/kvm/vcpu_timer.c
+++ b/arch/riscv/kvm/vcpu_timer.c
@@ -69,7 +69,18 @@ static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
return 0;
}
-int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
+static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
+{
+#if defined(CONFIG_32BIT)
+ csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
+ csr_write(CSR_VSTIMECMPH, ncycles >> 32);
+#else
+ csr_write(CSR_VSTIMECMP, ncycles);
+#endif
+ return 0;
+}
+
+static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
@@ -88,6 +99,65 @@ int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
return 0;
}
+int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
+{
+ struct kvm_vcpu_timer *t = &vcpu->arch.timer;
+
+ return t->timer_next_event(vcpu, ncycles);
+}
+
+static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
+{
+ u64 delta_ns;
+ struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
+ struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
+ struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
+
+ if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
+ delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
+ hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
+ return HRTIMER_RESTART;
+ }
+
+ t->next_set = false;
+ kvm_vcpu_kick(vcpu);
+
+ return HRTIMER_NORESTART;
+}
+
+bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_timer *t = &vcpu->arch.timer;
+ struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
+
+ if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
+ kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
+ return true;
+ else
+ return false;
+}
+
+static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_timer *t = &vcpu->arch.timer;
+ struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
+ u64 delta_ns;
+
+ if (!t->init_done)
+ return;
+
+ delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
+ if (delta_ns) {
+ hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
+ t->next_set = true;
+ }
+}
+
+static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
+{
+ kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
+}
+
int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
@@ -180,10 +250,20 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
return -EINVAL;
hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
t->init_done = true;
t->next_set = false;
+ /* Enable sstc for every vcpu if available in hardware */
+ if (riscv_isa_extension_available(NULL, SSTC)) {
+ t->sstc_enabled = true;
+ t->hrt.function = kvm_riscv_vcpu_vstimer_expired;
+ t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
+ } else {
+ t->sstc_enabled = false;
+ t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
+ t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
+ }
+
return 0;
}
@@ -199,21 +279,73 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
{
+ struct kvm_vcpu_timer *t = &vcpu->arch.timer;
+
+ t->next_cycles = -1ULL;
return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
}
-void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
+static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
{
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
-#ifdef CONFIG_64BIT
- csr_write(CSR_HTIMEDELTA, gt->time_delta);
-#else
+#if defined(CONFIG_32BIT)
csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
+#else
+ csr_write(CSR_HTIMEDELTA, gt->time_delta);
#endif
}
+void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_csr *csr;
+ struct kvm_vcpu_timer *t = &vcpu->arch.timer;
+
+ kvm_riscv_vcpu_update_timedelta(vcpu);
+
+ if (!t->sstc_enabled)
+ return;
+
+ csr = &vcpu->arch.guest_csr;
+#if defined(CONFIG_32BIT)
+ csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
+ csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
+#else
+ csr_write(CSR_VSTIMECMP, t->next_cycles);
+#endif
+
+ /* timer should be enabled for the remaining operations */
+ if (unlikely(!t->init_done))
+ return;
+
+ kvm_riscv_vcpu_timer_unblocking(vcpu);
+}
+
+void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_csr *csr;
+ struct kvm_vcpu_timer *t = &vcpu->arch.timer;
+
+ if (!t->sstc_enabled)
+ return;
+
+ csr = &vcpu->arch.guest_csr;
+ t = &vcpu->arch.timer;
+#if defined(CONFIG_32BIT)
+ t->next_cycles = csr_read(CSR_VSTIMECMP);
+ t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
+#else
+ t->next_cycles = csr_read(CSR_VSTIMECMP);
+#endif
+ /* timer should be enabled for the remaining operations */
+ if (unlikely(!t->init_done))
+ return;
+
+ if (kvm_vcpu_is_blocking(vcpu))
+ kvm_riscv_vcpu_timer_blocking(vcpu);
+}
+
void kvm_riscv_guest_timer_init(struct kvm *kvm)
{
struct kvm_guest_timer *gt = &kvm->arch.timer;
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index 8c475f4da308..ec486e5369d9 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -175,7 +175,7 @@ ENTRY(__asm_copy_from_user)
/* Exception fixup code */
10:
/* Disable access to user memory */
- csrs CSR_STATUS, t6
+ csrc CSR_STATUS, t6
mv a0, t5
ret
ENDPROC(__asm_copy_to_user)
@@ -227,7 +227,7 @@ ENTRY(__clear_user)
/* Exception fixup code */
11:
/* Disable access to user memory */
- csrs CSR_STATUS, t6
+ csrc CSR_STATUS, t6
mv a0, a1
ret
ENDPROC(__clear_user)
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index ac7a25298a04..d76aabf4b94d 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -30,3 +30,4 @@ endif
endif
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
+obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
new file mode 100644
index 000000000000..cd2225304c82
--- /dev/null
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RISC-V specific functions to support DMA for non-coherent devices
+ *
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/dma-direct.h>
+#include <linux/dma-map-ops.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm/cacheflush.h>
+
+static unsigned int riscv_cbom_block_size = L1_CACHE_BYTES;
+static bool noncoherent_supported;
+
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ void *vaddr = phys_to_virt(paddr);
+
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
+ break;
+ case DMA_FROM_DEVICE:
+ ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
+ break;
+ default:
+ break;
+ }
+}
+
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ void *vaddr = phys_to_virt(paddr);
+
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ break;
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
+ break;
+ default:
+ break;
+ }
+}
+
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ void *flush_addr = page_address(page);
+
+ ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
+}
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent)
+{
+ WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
+ TAINT_CPU_OUT_OF_SPEC,
+ "%s %s: ARCH_DMA_MINALIGN smaller than riscv,cbom-block-size (%d < %d)",
+ dev_driver_string(dev), dev_name(dev),
+ ARCH_DMA_MINALIGN, riscv_cbom_block_size);
+
+ WARN_TAINT(!coherent && !noncoherent_supported, TAINT_CPU_OUT_OF_SPEC,
+ "%s %s: device non-coherent but no non-coherent operations supported",
+ dev_driver_string(dev), dev_name(dev));
+
+ dev->dma_coherent = coherent;
+}
+
+#ifdef CONFIG_RISCV_ISA_ZICBOM
+void riscv_init_cbom_blocksize(void)
+{
+ struct device_node *node;
+ int ret;
+ u32 val;
+
+ for_each_of_cpu_node(node) {
+ unsigned long hartid;
+ int cbom_hartid;
+
+ ret = riscv_of_processor_hartid(node, &hartid);
+ if (ret)
+ continue;
+
+ if (hartid < 0)
+ continue;
+
+ /* set block-size for cbom extension if available */
+ ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
+ if (ret)
+ continue;
+
+ if (!riscv_cbom_block_size) {
+ riscv_cbom_block_size = val;
+ cbom_hartid = hartid;
+ } else {
+ if (riscv_cbom_block_size != val)
+ pr_warn("cbom-block-size mismatched between harts %d and %lu\n",
+ cbom_hartid, hartid);
+ }
+ }
+}
+#endif
+
+void riscv_noncoherent_supported(void)
+{
+ noncoherent_supported = true;
+}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 40694f0cab9e..f2fbd1400b7c 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -326,6 +326,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index d466ec670e1f..b56a0a75533f 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -135,6 +135,10 @@ static void __init print_vm_layout(void)
(unsigned long)VMEMMAP_END);
print_ml("vmalloc", (unsigned long)VMALLOC_START,
(unsigned long)VMALLOC_END);
+#ifdef CONFIG_64BIT
+ print_ml("modules", (unsigned long)MODULES_VADDR,
+ (unsigned long)MODULES_END);
+#endif
print_ml("lowmem", (unsigned long)PAGE_OFFSET,
(unsigned long)high_memory);
if (IS_ENABLED(CONFIG_64BIT)) {
@@ -288,6 +292,26 @@ static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAG
#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
#endif /* CONFIG_XIP_KERNEL */
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READ,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READ_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READ,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+DECLARE_VM_GET_PAGE_PROT
+
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long addr = __fix_to_virt(idx);
diff --git a/arch/riscv/purgatory/.gitignore b/arch/riscv/purgatory/.gitignore
index 38d7d1bda4d7..6e4dfb024ad2 100644
--- a/arch/riscv/purgatory/.gitignore
+++ b/arch/riscv/purgatory/.gitignore
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
purgatory.chk
purgatory.ro
-kexec-purgatory.c
diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
index d4df200f7edf..dd58e1d99397 100644
--- a/arch/riscv/purgatory/Makefile
+++ b/arch/riscv/purgatory/Makefile
@@ -84,12 +84,6 @@ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE
$(call if_changed,ld)
-targets += kexec-purgatory.c
+$(obj)/kexec-purgatory.o: $(obj)/purgatory.ro $(obj)/purgatory.chk
-quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
-
-$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE
- $(call if_changed,bin2c)
-
-obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o
+obj-y += kexec-purgatory.o
diff --git a/arch/riscv/purgatory/kexec-purgatory.S b/arch/riscv/purgatory/kexec-purgatory.S
new file mode 100644
index 000000000000..0e9188815718
--- /dev/null
+++ b/arch/riscv/purgatory/kexec-purgatory.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+ .section .rodata, "a"
+
+ .align 8
+kexec_purgatory:
+ .globl kexec_purgatory
+ .incbin "arch/riscv/purgatory/purgatory.ro"
+.Lkexec_purgatroy_end:
+
+ .align 8
+kexec_purgatory_size:
+ .globl kexec_purgatory_size
+ .quad .Lkexec_purgatroy_end - kexec_purgatory
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 863e6bcaa5a1..bc48fe82d949 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -152,6 +152,7 @@ static void setup_kernel_memory_layout(void)
unsigned long vmemmap_start;
unsigned long rte_size;
unsigned long pages;
+ unsigned long vmax;
pages = ident_map_size / PAGE_SIZE;
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
@@ -163,10 +164,10 @@ static void setup_kernel_memory_layout(void)
vmalloc_size > _REGION2_SIZE ||
vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
_REGION2_SIZE) {
- MODULES_END = _REGION1_SIZE;
+ vmax = _REGION1_SIZE;
rte_size = _REGION2_SIZE;
} else {
- MODULES_END = _REGION2_SIZE;
+ vmax = _REGION2_SIZE;
rte_size = _REGION3_SIZE;
}
/*
@@ -174,11 +175,12 @@ static void setup_kernel_memory_layout(void)
* secure storage limit, so that any vmalloc allocation
* we do could be used to back secure guest storage.
*/
- adjust_to_uv_max(&MODULES_END);
+ vmax = adjust_to_uv_max(vmax);
#ifdef CONFIG_KASAN
/* force vmalloc and modules below kasan shadow */
- MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
+ vmax = min(vmax, KASAN_SHADOW_START);
#endif
+ MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
index a5fa667160b2..0a077c0a2056 100644
--- a/arch/s390/boot/uv.c
+++ b/arch/s390/boot/uv.c
@@ -57,10 +57,11 @@ void uv_query_info(void)
}
#if IS_ENABLED(CONFIG_KVM)
-void adjust_to_uv_max(unsigned long *vmax)
+unsigned long adjust_to_uv_max(unsigned long limit)
{
if (is_prot_virt_host() && uv_info.max_sec_stor_addr)
- *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+ limit = min_t(unsigned long, limit, uv_info.max_sec_stor_addr);
+ return limit;
}
static int is_prot_virt_host_capable(void)
diff --git a/arch/s390/boot/uv.h b/arch/s390/boot/uv.h
index 690ce019af5a..0f3070856f8d 100644
--- a/arch/s390/boot/uv.h
+++ b/arch/s390/boot/uv.h
@@ -3,10 +3,13 @@
#define BOOT_UV_H
#if IS_ENABLED(CONFIG_KVM)
-void adjust_to_uv_max(unsigned long *vmax);
+unsigned long adjust_to_uv_max(unsigned long limit);
void sanitize_prot_virt_host(void);
#else
-static inline void adjust_to_uv_max(unsigned long *vmax) {}
+static inline unsigned long adjust_to_uv_max(unsigned long limit)
+{
+ return limit;
+}
static inline void sanitize_prot_virt_host(void) {}
#endif
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 1023e9d43d44..526c3f40f6a2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -1049,7 +1049,7 @@ out_err:
return ret;
}
-module_cpu_feature_match(MSA, aes_s390_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
module_exit(aes_s390_fini);
MODULE_ALIAS_CRYPTO("aes-all");
diff --git a/arch/s390/crypto/chacha-glue.c b/arch/s390/crypto/chacha-glue.c
index 2ec51f339cec..7752bd314558 100644
--- a/arch/s390/crypto/chacha-glue.c
+++ b/arch/s390/crypto/chacha-glue.c
@@ -121,7 +121,7 @@ static void __exit chacha_mod_fini(void)
crypto_unregister_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs));
}
-module_cpu_feature_match(VXRS, chacha_mod_init);
+module_cpu_feature_match(S390_CPU_FEATURE_VXRS, chacha_mod_init);
module_exit(chacha_mod_fini);
MODULE_DESCRIPTION("ChaCha20 stream cipher");
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
index fafecad20752..017143e9cef7 100644
--- a/arch/s390/crypto/crc32-vx.c
+++ b/arch/s390/crypto/crc32-vx.c
@@ -298,7 +298,7 @@ static void __exit crc_vx_mod_exit(void)
crypto_unregister_shashes(crc32_vx_algs, ARRAY_SIZE(crc32_vx_algs));
}
-module_cpu_feature_match(VXRS, crc_vx_mod_init);
+module_cpu_feature_match(S390_CPU_FEATURE_VXRS, crc_vx_mod_init);
module_exit(crc_vx_mod_exit);
MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index e013088b5115..8e75b83a5ddc 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -492,7 +492,7 @@ out_err:
return ret;
}
-module_cpu_feature_match(MSA, des_s390_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, des_s390_init);
module_exit(des_s390_exit);
MODULE_ALIAS_CRYPTO("des");
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 6b07a2f1ce8a..0800a2a5799f 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -145,7 +145,7 @@ static void __exit ghash_mod_exit(void)
crypto_unregister_shash(&ghash_alg);
}
-module_cpu_feature_match(MSA, ghash_mod_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_ALIAS_CRYPTO("ghash");
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index ae382bafc772..a077087bc6cc 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -907,5 +907,5 @@ static void __exit prng_exit(void)
}
}
-module_cpu_feature_match(MSA, prng_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, prng_init);
module_exit(prng_exit);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index a3fabf310a38..bc3a22704e09 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -95,7 +95,7 @@ static void __exit sha1_s390_fini(void)
crypto_unregister_shash(&alg);
}
-module_cpu_feature_match(MSA, sha1_s390_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, sha1_s390_init);
module_exit(sha1_s390_fini);
MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 24983f175676..6f1ccdf93d3e 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -134,7 +134,7 @@ static void __exit sha256_s390_fini(void)
crypto_unregister_shash(&sha256_alg);
}
-module_cpu_feature_match(MSA, sha256_s390_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, sha256_s390_init);
module_exit(sha256_s390_fini);
MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c
index 30ac49b635bf..e1350e033a32 100644
--- a/arch/s390/crypto/sha3_256_s390.c
+++ b/arch/s390/crypto/sha3_256_s390.c
@@ -137,7 +137,7 @@ static void __exit sha3_256_s390_fini(void)
crypto_unregister_shash(&sha3_256_alg);
}
-module_cpu_feature_match(MSA, sha3_256_s390_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, sha3_256_s390_init);
module_exit(sha3_256_s390_fini);
MODULE_ALIAS_CRYPTO("sha3-256");
diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c
index e70d50f7620f..06c142ed9bb1 100644
--- a/arch/s390/crypto/sha3_512_s390.c
+++ b/arch/s390/crypto/sha3_512_s390.c
@@ -147,7 +147,7 @@ static void __exit fini(void)
crypto_unregister_shash(&sha3_384_alg);
}
-module_cpu_feature_match(MSA, init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, init);
module_exit(fini);
MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 43ce4956df73..04f11c407763 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -142,7 +142,7 @@ static void __exit fini(void)
crypto_unregister_shash(&sha384_alg);
}
-module_cpu_feature_match(MSA, init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, init);
module_exit(fini);
MODULE_LICENSE("GPL");
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index f0bc4dc3e9bf..6511d15ace45 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
int rc;
if (diag204_probe()) {
- pr_err("The hardware system does not support hypfs\n");
+ pr_info("The hardware system does not support hypfs\n");
return -ENODATA;
}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 5c97f48cea91..ee919bfc8186 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -496,9 +496,9 @@ fail_hypfs_sprp_exit:
hypfs_vm_exit();
fail_hypfs_diag_exit:
hypfs_diag_exit();
+ pr_err("Initialization of hypfs failed with rc=%i\n", rc);
fail_dbfs_exit:
hypfs_dbfs_exit();
- pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc;
}
device_initcall(hypfs_init)
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index b515cfa62bd9..f508f5025e38 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -227,13 +227,13 @@ struct ap_qirq_ctrl {
* ap_aqic(): Control interruption for a specific AP.
* @qid: The AP queue number
* @qirqctrl: struct ap_qirq_ctrl (64 bit value)
- * @ind: The notification indicator byte
+ * @pa_ind: Physical address of the notification indicator byte
*
* Returns AP queue status.
*/
static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
struct ap_qirq_ctrl qirqctrl,
- void *ind)
+ phys_addr_t pa_ind)
{
unsigned long reg0 = qid | (3UL << 24); /* fc 3UL is AQIC */
union {
@@ -241,7 +241,7 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
struct ap_qirq_ctrl qirqctrl;
struct ap_queue_status status;
} reg1;
- unsigned long reg2 = virt_to_phys(ind);
+ unsigned long reg2 = pa_ind;
reg1.qirqctrl = qirqctrl;
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 191dc7898b0f..2de74fcd0578 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -113,76 +113,71 @@ static inline bool arch_test_and_change_bit(unsigned long nr,
return old & mask;
}
-static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr)
+static __always_inline void
+arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
- *addr |= mask;
+ *p |= mask;
}
-static inline void arch___clear_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline void
+arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
- *addr &= ~mask;
+ *p &= ~mask;
}
-static inline void arch___change_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline void
+arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
- *addr ^= mask;
+ *p ^= mask;
}
-static inline bool arch___test_and_set_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline bool
+arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;
- old = *addr;
- *addr |= mask;
+ old = *p;
+ *p |= mask;
return old & mask;
}
-static inline bool arch___test_and_clear_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline bool
+arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;
- old = *addr;
- *addr &= ~mask;
+ old = *p;
+ *p &= ~mask;
return old & mask;
}
-static inline bool arch___test_and_change_bit(unsigned long nr,
- volatile unsigned long *ptr)
+static __always_inline bool
+arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr);
unsigned long old;
- old = *addr;
- *addr ^= mask;
+ old = *p;
+ *p ^= mask;
return old & mask;
}
-static inline bool arch_test_bit(unsigned long nr,
- const volatile unsigned long *ptr)
-{
- const volatile unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask = __bitops_mask(nr);
-
- return *addr & mask;
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
static inline bool arch_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *ptr)
diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h
index 14cfd48d598e..931204613753 100644
--- a/arch/s390/include/asm/cpufeature.h
+++ b/arch/s390/include/asm/cpufeature.h
@@ -2,28 +2,21 @@
/*
* Module interface for CPU features
*
- * Copyright IBM Corp. 2015
+ * Copyright IBM Corp. 2015, 2022
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
#ifndef __ASM_S390_CPUFEATURE_H
#define __ASM_S390_CPUFEATURE_H
-#include <asm/elf.h>
+enum {
+ S390_CPU_FEATURE_MSA,
+ S390_CPU_FEATURE_VXRS,
+ S390_CPU_FEATURE_UV,
+ MAX_CPU_FEATURES
+};
-/* Hardware features on Linux on z Systems are indicated by facility bits that
- * are mapped to the so-called machine flags. Particular machine flags are
- * then used to define ELF hardware capabilities; most notably hardware flags
- * that are essential for user space / glibc.
- *
- * Restrict the set of exposed CPU features to ELF hardware capabilities for
- * now. Additional machine flags can be indicated by values larger than
- * MAX_ELF_HWCAP_FEATURES.
- */
-#define MAX_ELF_HWCAP_FEATURES (8 * sizeof(elf_hwcap))
-#define MAX_CPU_FEATURES MAX_ELF_HWCAP_FEATURES
-
-#define cpu_feature(feat) ilog2(HWCAP_ ## feat)
+#define cpu_feature(feature) (feature)
int cpu_have_feature(unsigned int nr);
diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h
index 6f26f35d4a71..dec1c4ce628c 100644
--- a/arch/s390/include/asm/dma.h
+++ b/arch/s390/include/asm/dma.h
@@ -11,10 +11,4 @@
*/
#define MAX_DMA_ADDRESS 0x80000000
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* _ASM_S390_DMA_H */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 1572b3634cdd..829d68e2c685 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -42,18 +42,4 @@ typedef struct {
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
-static inline int tprot(unsigned long addr)
-{
- int rc = -EFAULT;
-
- asm volatile(
- " tprot 0(%1),0\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (rc) : "a" (addr) : "cc");
- return rc;
-}
-
#endif
diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h
index 147a8d547ef9..85248d8fee0c 100644
--- a/arch/s390/include/asm/os_info.h
+++ b/arch/s390/include/asm/os_info.h
@@ -8,6 +8,8 @@
#ifndef _ASM_S390_OS_INFO_H
#define _ASM_S390_OS_INFO_H
+#include <linux/uio.h>
+
#define OS_INFO_VERSION_MAJOR 1
#define OS_INFO_VERSION_MINOR 1
#define OS_INFO_MAGIC 0x4f53494e464f535aULL /* OSINFOSZ */
@@ -39,7 +41,20 @@ u32 os_info_csum(struct os_info *os_info);
#ifdef CONFIG_CRASH_DUMP
void *os_info_old_entry(int nr, unsigned long *size);
-int copy_oldmem_kernel(void *dst, unsigned long src, size_t count);
+size_t copy_oldmem_iter(struct iov_iter *iter, unsigned long src, size_t count);
+
+static inline int copy_oldmem_kernel(void *dst, unsigned long src, size_t count)
+{
+ struct iov_iter iter;
+ struct kvec kvec;
+
+ kvec.iov_base = dst;
+ kvec.iov_len = count;
+ iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
+ if (copy_oldmem_iter(&iter, src, count) < count)
+ return -EFAULT;
+ return 0;
+}
#else
static inline void *os_info_old_entry(int nr, unsigned long *size)
{
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 85eb0ef9d4c3..7b4cdadbc023 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -6,7 +6,6 @@
#include <linux/mutex.h>
#include <linux/iommu.h>
#include <linux/pci_hotplug.h>
-#include <asm-generic/pci.h>
#include <asm/pci_clp.h>
#include <asm/pci_debug.h>
#include <asm/pci_insn.h>
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index cf81acf3879c..f019df19884d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -424,23 +424,6 @@ static inline int is_module_addr(void *addr)
* implies read permission.
*/
/*xwr*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_RO
-#define __P010 PAGE_RO
-#define __P011 PAGE_RO
-#define __P100 PAGE_RX
-#define __P101 PAGE_RX
-#define __P110 PAGE_RX
-#define __P111 PAGE_RX
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_RO
-#define __S010 PAGE_RW
-#define __S011 PAGE_RW
-#define __S100 PAGE_RX
-#define __S101 PAGE_RX
-#define __S110 PAGE_RWX
-#define __S111 PAGE_RWX
/*
* Segment entry (large page) protection definitions.
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index addefe8ccdba..9d4c7f71e070 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -17,6 +17,7 @@
#define EXT_SCCB_READ_CPU (3 * PAGE_SIZE)
#ifndef __ASSEMBLY__
+#include <linux/uio.h>
#include <asm/chpid.h>
#include <asm/cpu.h>
@@ -146,8 +147,7 @@ int sclp_pci_deconfigure(u32 fid);
int sclp_ap_configure(u32 apid);
int sclp_ap_deconfigure(u32 apid);
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid);
-int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
-int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
+size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count);
void sclp_ocf_cpc_name_copy(char *dst);
static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
diff --git a/arch/s390/include/asm/softirq_stack.h b/arch/s390/include/asm/softirq_stack.h
index fd17f25704bd..af68d6c1d584 100644
--- a/arch/s390/include/asm/softirq_stack.h
+++ b/arch/s390/include/asm/softirq_stack.h
@@ -5,9 +5,10 @@
#include <asm/lowcore.h>
#include <asm/stacktrace.h>
+#ifndef CONFIG_PREEMPT_RT
static inline void do_softirq_own_stack(void)
{
call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
}
-
+#endif
#endif /* __ASM_S390_SOFTIRQ_STACK_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index c2c9995466e0..f7038b800cc3 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -285,7 +285,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return __clear_user(to, n);
}
-int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count);
void *s390_kernel_write(void *dst, const void *src, size_t size);
int __noreturn __put_kernel_bad(void);
diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
index 0bf06f1682d8..02462e7100c1 100644
--- a/arch/s390/include/asm/unwind.h
+++ b/arch/s390/include/asm/unwind.h
@@ -47,7 +47,7 @@ struct unwind_state {
static inline unsigned long unwind_recover_ret_addr(struct unwind_state *state,
unsigned long ip)
{
- ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *)state->sp);
if (is_kretprobe_trampoline(ip))
ip = kretprobe_find_ret_addr(state->task, (void *)state->sp, &state->kr_cur);
return ip;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 27d6b3c7aa06..3cbfa9fddd9a 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -35,7 +35,7 @@ CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
obj-y := traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
-obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o
+obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o
obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
diff --git a/arch/s390/kernel/cpufeature.c b/arch/s390/kernel/cpufeature.c
new file mode 100644
index 000000000000..1b2ae42a0c15
--- /dev/null
+++ b/arch/s390/kernel/cpufeature.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2022
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/bug.h>
+#include <asm/elf.h>
+
+enum {
+ TYPE_HWCAP,
+ TYPE_FACILITY,
+};
+
+struct s390_cpu_feature {
+ unsigned int type : 4;
+ unsigned int num : 28;
+};
+
+static struct s390_cpu_feature s390_cpu_features[MAX_CPU_FEATURES] = {
+ [S390_CPU_FEATURE_MSA] = {.type = TYPE_HWCAP, .num = HWCAP_NR_MSA},
+ [S390_CPU_FEATURE_VXRS] = {.type = TYPE_HWCAP, .num = HWCAP_NR_VXRS},
+ [S390_CPU_FEATURE_UV] = {.type = TYPE_FACILITY, .num = 158},
+};
+
+/*
+ * cpu_have_feature - Test CPU features on module initialization
+ */
+int cpu_have_feature(unsigned int num)
+{
+ struct s390_cpu_feature *feature;
+
+ if (WARN_ON_ONCE(num >= MAX_CPU_FEATURES))
+ return 0;
+ feature = &s390_cpu_features[num];
+ switch (feature->type) {
+ case TYPE_HWCAP:
+ return !!(elf_hwcap & BIT(feature->num));
+ case TYPE_FACILITY:
+ return test_facility(feature->num);
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(cpu_have_feature);
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 28124d0fa1d5..bad8f47fc5d6 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -53,6 +53,8 @@ struct save_area {
};
static LIST_HEAD(dump_save_areas);
+static DEFINE_MUTEX(memcpy_real_mutex);
+static char memcpy_real_buf[PAGE_SIZE];
/*
* Allocate a save area
@@ -63,7 +65,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
sa = memblock_alloc(sizeof(*sa), 8);
if (!sa)
- panic("Failed to allocate save area\n");
+ return NULL;
if (is_boot_cpu)
list_add(&sa->list, &dump_save_areas);
@@ -114,80 +116,35 @@ void __init save_area_add_vxrs(struct save_area *sa, __vector128 *vxrs)
memcpy(sa->vxrs_high, vxrs + 16, 16 * sizeof(__vector128));
}
-/*
- * Return physical address for virtual address
- */
-static inline void *load_real_addr(void *addr)
-{
- unsigned long real_addr;
-
- asm volatile(
- " lra %0,0(%1)\n"
- " jz 0f\n"
- " la %0,0\n"
- "0:"
- : "=a" (real_addr) : "a" (addr) : "cc");
- return (void *)real_addr;
-}
-
-/*
- * Copy memory of the old, dumped system to a kernel space virtual address
- */
-int copy_oldmem_kernel(void *dst, unsigned long src, size_t count)
+static size_t copy_to_iter_real(struct iov_iter *iter, unsigned long src, size_t count)
{
- unsigned long len;
- void *ra;
- int rc;
+ size_t len, copied, res = 0;
+ mutex_lock(&memcpy_real_mutex);
while (count) {
- if (!oldmem_data.start && src < sclp.hsa_size) {
- /* Copy from zfcp/nvme dump HSA area */
- len = min(count, sclp.hsa_size - src);
- rc = memcpy_hsa_kernel(dst, src, len);
- if (rc)
- return rc;
- } else {
- /* Check for swapped kdump oldmem areas */
- if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
- src -= oldmem_data.start;
- len = min(count, oldmem_data.size - src);
- } else if (oldmem_data.start && src < oldmem_data.size) {
- len = min(count, oldmem_data.size - src);
- src += oldmem_data.start;
- } else {
- len = count;
- }
- if (is_vmalloc_or_module_addr(dst)) {
- ra = load_real_addr(dst);
- len = min(PAGE_SIZE - offset_in_page(ra), len);
- } else {
- ra = dst;
- }
- if (memcpy_real(ra, src, len))
- return -EFAULT;
- }
- dst += len;
- src += len;
- count -= len;
+ len = min(PAGE_SIZE, count);
+ if (memcpy_real(memcpy_real_buf, src, len))
+ break;
+ copied = copy_to_iter(memcpy_real_buf, len, iter);
+ count -= copied;
+ src += copied;
+ res += copied;
+ if (copied < len)
+ break;
}
- return 0;
+ mutex_unlock(&memcpy_real_mutex);
+ return res;
}
-/*
- * Copy memory of the old, dumped system to a user space virtual address
- */
-static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
+size_t copy_oldmem_iter(struct iov_iter *iter, unsigned long src, size_t count)
{
- unsigned long len;
- int rc;
+ size_t len, copied, res = 0;
while (count) {
if (!oldmem_data.start && src < sclp.hsa_size) {
/* Copy from zfcp/nvme dump HSA area */
len = min(count, sclp.hsa_size - src);
- rc = memcpy_hsa_user(dst, src, len);
- if (rc)
- return rc;
+ copied = memcpy_hsa_iter(iter, src, len);
} else {
/* Check for swapped kdump oldmem areas */
if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
@@ -199,15 +156,15 @@ static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
} else {
len = count;
}
- rc = copy_to_user_real(dst, src, count);
- if (rc)
- return rc;
+ copied = copy_to_iter_real(iter, src, len);
}
- dst += len;
- src += len;
- count -= len;
+ count -= copied;
+ src += copied;
+ res += copied;
+ if (copied < len)
+ break;
}
- return 0;
+ return res;
}
/*
@@ -217,26 +174,9 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
unsigned long offset)
{
unsigned long src;
- int rc;
- if (!(iter_is_iovec(iter) || iov_iter_is_kvec(iter)))
- return -EINVAL;
- /* Multi-segment iterators are not supported */
- if (iter->nr_segs > 1)
- return -EINVAL;
- if (!csize)
- return 0;
src = pfn_to_phys(pfn) + offset;
-
- /* XXX: pass the iov_iter down to a common function */
- if (iter_is_iovec(iter))
- rc = copy_oldmem_user(iter->iov->iov_base, src, csize);
- else
- rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize);
- if (rc < 0)
- return rc;
- iov_iter_advance(iter, csize);
- return csize;
+ return copy_oldmem_iter(iter, src, csize);
}
/*
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 53ed3884fe64..60ac66aab163 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -11,6 +11,7 @@
#include <linux/kernel_stat.h>
#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/entry-common.h>
#include <linux/hardirq.h>
#include <linux/log2.h>
#include <linux/kprobes.h>
@@ -397,11 +398,12 @@ int notrace s390_do_machine_check(struct pt_regs *regs)
static unsigned long long last_ipd;
struct mcck_struct *mcck;
unsigned long long tmp;
+ irqentry_state_t irq_state;
union mci mci;
unsigned long mcck_dam_code;
int mcck_pending = 0;
- nmi_enter();
+ irq_state = irqentry_nmi_enter(regs);
if (user_mode(regs))
update_timer_mcck();
@@ -504,14 +506,14 @@ int notrace s390_do_machine_check(struct pt_regs *regs)
clear_cpu_flag(CIF_MCCK_GUEST);
if (user_mode(regs) && mcck_pending) {
- nmi_exit();
+ irqentry_nmi_exit(regs, irq_state);
return 1;
}
if (mcck_pending)
schedule_mcck_handler();
- nmi_exit();
+ irqentry_nmi_exit(regs, irq_state);
return 0;
}
NOKPROBE_SYMBOL(s390_do_machine_check);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 89949b9f3cf8..d5119e039d85 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -91,6 +91,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
memcpy(dst, src, arch_task_struct_size);
dst->thread.fpu.regs = dst->thread.fpu.fprs;
+
+ /*
+ * Don't transfer over the runtime instrumentation or the guarded
+ * storage control block pointers. These fields are cleared here instead
+ * of in copy_thread() to avoid premature freeing of associated memory
+ * on fork() failure. Wait to clear the RI flag because ->stack still
+ * refers to the source thread.
+ */
+ dst->thread.ri_cb = NULL;
+ dst->thread.gs_cb = NULL;
+ dst->thread.gs_bc_cb = NULL;
+
return 0;
}
@@ -150,13 +162,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
frame->childregs.flags = 0;
if (new_stackp)
frame->childregs.gprs[15] = new_stackp;
-
- /* Don't copy runtime instrumentation info */
- p->thread.ri_cb = NULL;
+ /*
+ * Clear the runtime instrumentation flag after the above childregs
+ * copy. The CB pointer was already cleared in arch_dup_task_struct().
+ */
frame->childregs.psw.mask &= ~PSW_MASK_RI;
- /* Don't copy guarded storage control block */
- p->thread.gs_cb = NULL;
- p->thread.gs_bc_cb = NULL;
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index aa0e0e7fc773..a194611ba88c 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/stop_machine.h>
-#include <linux/cpufeature.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/random.h>
@@ -96,15 +95,6 @@ void cpu_init(void)
enter_lazy_tlb(&init_mm, current);
}
-/*
- * cpu_have_feature - Test CPU features on module initialization
- */
-int cpu_have_feature(unsigned int num)
-{
- return elf_hwcap & (1UL << num);
-}
-EXPORT_SYMBOL(cpu_have_feature);
-
static void show_facilities(struct seq_file *m)
{
unsigned int bit;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ebad41afe355..ed4fbbbdd1b0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -474,19 +474,18 @@ static void __init setup_lowcore_dat_off(void)
lc->restart_data = 0;
lc->restart_source = -1U;
- mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
- if (!mcck_stack)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, THREAD_SIZE, THREAD_SIZE);
- lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
-
- /* Setup absolute zero lowcore */
put_abs_lowcore(restart_stack, lc->restart_stack);
put_abs_lowcore(restart_fn, lc->restart_fn);
put_abs_lowcore(restart_data, lc->restart_data);
put_abs_lowcore(restart_source, lc->restart_source);
put_abs_lowcore(restart_psw, lc->restart_psw);
+ mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+ if (!mcck_stack)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, THREAD_SIZE, THREAD_SIZE);
+ lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
+
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ee7871f770fb..09b6e756d521 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -379,7 +379,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
- if (access == VM_WRITE || is_write)
+ if (is_write)
+ access = VM_WRITE;
+ if (access == VM_WRITE)
flags |= FAULT_FLAG_WRITE;
mmap_read_lock(mm);
@@ -433,6 +435,17 @@ retry:
goto out_up;
goto out;
}
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED) {
+ if (gmap) {
+ mmap_read_lock(mm);
+ goto out_gmap;
+ }
+ fault = 0;
+ goto out;
+ }
+
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
@@ -452,6 +465,7 @@ retry:
mmap_read_lock(mm);
goto retry;
}
+out_gmap:
if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
address = __gmap_link(gmap, current->thread.gmap_addr,
address);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 421efa46946b..d6d84e02f35a 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -172,32 +172,6 @@ void memcpy_absolute(void *dest, void *src, size_t count)
}
/*
- * Copy memory from kernel (real) to user (virtual)
- */
-int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count)
-{
- int offs = 0, size, rc;
- char *buf;
-
- buf = (char *) __get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- rc = -EFAULT;
- while (offs < count) {
- size = min(PAGE_SIZE, count - offs);
- if (memcpy_real(buf, src + offs, size))
- goto out;
- if (copy_to_user(dest + offs, buf, size))
- goto out;
- offs += size;
- }
- rc = 0;
-out:
- free_page((unsigned long) buf);
- return rc;
-}
-
-/*
* Check if physical address is within prefix or zero page
*/
static int is_swapped(phys_addr_t addr)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index d545f5c39f7e..5980ce348832 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -188,3 +188,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_RO,
+ [VM_WRITE] = PAGE_RO,
+ [VM_WRITE | VM_READ] = PAGE_RO,
+ [VM_EXEC] = PAGE_RX,
+ [VM_EXEC | VM_READ] = PAGE_RX,
+ [VM_EXEC | VM_WRITE] = PAGE_RX,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_RO,
+ [VM_SHARED | VM_WRITE] = PAGE_RW,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW,
+ [VM_SHARED | VM_EXEC] = PAGE_RX,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index 5d77acbd1c87..6a8da1b742ae 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -145,9 +145,6 @@ int zpci_bus_scan_bus(struct zpci_bus *zbus)
struct zpci_dev *zdev;
int devfn, rc, ret = 0;
- if (!zbus->function[0])
- return 0;
-
for (devfn = 0; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) {
zdev = zbus->function[devfn];
if (zdev && zdev->state == ZPCI_FN_STATE_CONFIGURED) {
@@ -184,26 +181,26 @@ void zpci_bus_scan_busses(void)
/* zpci_bus_create_pci_bus - Create the PCI bus associated with this zbus
* @zbus: the zbus holding the zdevices
- * @f0: function 0 of the bus
+ * @fr: PCI root function that will determine the bus's domain, and bus speeed
* @ops: the pci operations
*
- * Function zero is taken as a parameter as this is used to determine the
- * domain, multifunction property and maximum bus speed of the entire bus.
+ * The PCI function @fr determines the domain (its UID), multifunction property
+ * and maximum bus speed of the entire bus.
*
* Return: 0 on success, an error code otherwise
*/
-static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *f0, struct pci_ops *ops)
+static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, struct pci_ops *ops)
{
struct pci_bus *bus;
int domain;
- domain = zpci_alloc_domain((u16)f0->uid);
+ domain = zpci_alloc_domain((u16)fr->uid);
if (domain < 0)
return domain;
zbus->domain_nr = domain;
- zbus->multifunction = f0->rid_available;
- zbus->max_bus_speed = f0->max_bus_speed;
+ zbus->multifunction = fr->rid_available;
+ zbus->max_bus_speed = fr->max_bus_speed;
/*
* Note that the zbus->resources are taken over and zbus->resources
@@ -303,47 +300,6 @@ void pcibios_bus_add_device(struct pci_dev *pdev)
}
}
-/* zpci_bus_create_hotplug_slots - Add hotplug slot(s) for device added to bus
- * @zdev: the zPCI device that was newly added
- *
- * Add the hotplug slot(s) for the newly added PCI function. Normally this is
- * simply the slot for the function itself. If however we are adding the
- * function 0 on a zbus, it might be that we already registered functions on
- * that zbus but could not create their hotplug slots yet so add those now too.
- *
- * Return: 0 on success, an error code otherwise
- */
-static int zpci_bus_create_hotplug_slots(struct zpci_dev *zdev)
-{
- struct zpci_bus *zbus = zdev->zbus;
- int devfn, rc = 0;
-
- rc = zpci_init_slot(zdev);
- if (rc)
- return rc;
- zdev->has_hp_slot = 1;
-
- if (zdev->devfn == 0 && zbus->multifunction) {
- /* Now that function 0 is there we can finally create the
- * hotplug slots for those functions with devfn != 0 that have
- * been parked in zbus->function[] waiting for us to be able to
- * create the PCI bus.
- */
- for (devfn = 1; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) {
- zdev = zbus->function[devfn];
- if (zdev && !zdev->has_hp_slot) {
- rc = zpci_init_slot(zdev);
- if (rc)
- return rc;
- zdev->has_hp_slot = 1;
- }
- }
-
- }
-
- return rc;
-}
-
static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
int rc = -EINVAL;
@@ -352,21 +308,19 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
pr_err("devfn %04x is already assigned\n", zdev->devfn);
return rc;
}
+
zdev->zbus = zbus;
zbus->function[zdev->devfn] = zdev;
zpci_nb_devices++;
- if (zbus->bus) {
- if (zbus->multifunction && !zdev->rid_available) {
- WARN_ONCE(1, "rid_available not set for multifunction\n");
- goto error;
- }
-
- zpci_bus_create_hotplug_slots(zdev);
- } else {
- /* Hotplug slot will be created once function 0 appears */
- zbus->multifunction = 1;
+ if (zbus->multifunction && !zdev->rid_available) {
+ WARN_ONCE(1, "rid_available not set for multifunction\n");
+ goto error;
}
+ rc = zpci_init_slot(zdev);
+ if (rc)
+ goto error;
+ zdev->has_hp_slot = 1;
return 0;
@@ -400,7 +354,11 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
return -ENOMEM;
}
- if (zdev->devfn == 0) {
+ if (!zbus->bus) {
+ /* The UID of the first PCI function registered with a zpci_bus
+ * is used as the domain number for that bus. Currently there
+ * is exactly one zpci_bus per domain.
+ */
rc = zpci_bus_create_pci_bus(zbus, zdev, ops);
if (rc)
goto error;
diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h
index cfe5465acce7..5ace89b46507 100644
--- a/arch/sh/include/asm/bitops-op32.h
+++ b/arch/sh/include/asm/bitops-op32.h
@@ -2,6 +2,8 @@
#ifndef __ASM_SH_BITOPS_OP32_H
#define __ASM_SH_BITOPS_OP32_H
+#include <linux/bits.h>
+
/*
* The bit modifying instructions on SH-2A are only capable of working
* with a 3-bit immediate, which signifies the shift position for the bit
@@ -16,7 +18,8 @@
#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE)
#endif
-static inline void __set_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
__asm__ __volatile__ (
@@ -33,7 +36,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
}
}
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
__asm__ __volatile__ (
@@ -52,7 +56,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
}
/**
- * __change_bit - Toggle a bit in memory
+ * arch___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
@@ -60,7 +64,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __change_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
__asm__ __volatile__ (
@@ -79,7 +84,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
}
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
@@ -87,7 +92,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -98,7 +104,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
@@ -106,7 +112,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -117,8 +124,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
}
/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
- volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -128,14 +135,9 @@ static inline int __test_and_change_bit(int nr,
return (old & mask) != 0;
}
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static inline int test_bit(int nr, const volatile unsigned long *addr)
-{
- return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
+
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#endif /* __ASM_SH_BITOPS_OP32_H */
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h
index 17d23ae98c77..c8bee3f985a2 100644
--- a/arch/sh/include/asm/dma.h
+++ b/arch/sh/include/asm/dma.h
@@ -137,10 +137,4 @@ extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist);
extern int dma_create_sysfs_files(struct dma_channel *, struct dma_info *);
extern void dma_remove_sysfs_files(struct dma_channel *, struct dma_info *);
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* __ASM_SH_DMA_H */
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index ad22e88c6657..54c30126ea17 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -88,10 +88,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
return hose->need_domain_info;
}
-/* Chances are this interrupt is wired PC-style ... */
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? 15 : 14;
-}
-
#endif /* __ASM_SH_PCI_H */
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index d7ddb1ec86a0..6fb9ec54cf9b 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -89,23 +89,6 @@ static inline unsigned long phys_addr_mask(void)
* completely separate permission bits for user and kernel space.
*/
/*xwr*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_EXECREAD
-#define __P101 PAGE_EXECREAD
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_WRITEONLY
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECREAD
-#define __S101 PAGE_EXECREAD
-#define __S110 PAGE_RWX
-#define __S111 PAGE_RWX
typedef pte_t *pte_addr_t;
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 56269c2c3414..909276738078 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL;
}
+#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void)
{
struct thread_info *curctx;
@@ -176,6 +177,7 @@ void do_softirq_own_stack(void)
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
);
}
+#endif
#else
static inline void handle_one_irq(unsigned int irq)
{
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index e175667b1363..acd2f5e50bfc 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -485,6 +485,10 @@ good_area:
if (mm_fault_error(regs, error_code, address, fault))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6a1a1297baae..b82199878b45 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -19,6 +19,26 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
#ifdef CONFIG_MMU
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_EXECREAD,
+ [VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
+};
+DECLARE_VM_GET_PAGE_PROT
+
/*
* To avoid cache aliases, we map the shared page with same color.
*/
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 02f0a6084f04..1c852bb530ec 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -86,7 +86,6 @@ config SPARC64
select PERF_USE_VMALLOC
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_C_RECORDMCOUNT
- select ARCH_HAS_VM_GET_PAGE_PROT
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h
index 889afa9f990f..3448c191b484 100644
--- a/arch/sparc/include/asm/bitops_32.h
+++ b/arch/sparc/include/asm/bitops_32.h
@@ -19,9 +19,9 @@
#error only <linux/bitops.h> can be included directly
#endif
-unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
-unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
-unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
+unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask);
+unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask);
+unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask);
/*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
@@ -36,7 +36,7 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *add
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- return ___set_bit(ADDR, mask) != 0;
+ return sp32___set_bit(ADDR, mask) != 0;
}
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
@@ -46,7 +46,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- (void) ___set_bit(ADDR, mask);
+ (void) sp32___set_bit(ADDR, mask);
}
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
@@ -56,7 +56,7 @@ static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *a
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- return ___clear_bit(ADDR, mask) != 0;
+ return sp32___clear_bit(ADDR, mask) != 0;
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
@@ -66,7 +66,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- (void) ___clear_bit(ADDR, mask);
+ (void) sp32___clear_bit(ADDR, mask);
}
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
@@ -76,7 +76,7 @@ static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- return ___change_bit(ADDR, mask) != 0;
+ return sp32___change_bit(ADDR, mask) != 0;
}
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
@@ -86,7 +86,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
- (void) ___change_bit(ADDR, mask);
+ (void) sp32___change_bit(ADDR, mask);
}
#include <asm-generic/bitops/non-atomic.h>
diff --git a/arch/sparc/include/asm/dma.h b/arch/sparc/include/asm/dma.h
index 462e7c794a09..08043f35b110 100644
--- a/arch/sparc/include/asm/dma.h
+++ b/arch/sparc/include/asm/dma.h
@@ -82,14 +82,6 @@
#define DMA_BURST64 0x40
#define DMA_BURSTBITS 0x7f
-/* From PCI */
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#ifdef CONFIG_SPARC32
struct device;
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h
index 4deddf430e5d..1419aa84df71 100644
--- a/arch/sparc/include/asm/pci.h
+++ b/arch/sparc/include/asm/pci.h
@@ -37,16 +37,8 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#define HAVE_PCI_MMAP
#define arch_can_pci_mmap_io() 1
#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#define get_pci_unmapped_area get_fb_unmapped_area
#endif /* CONFIG_SPARC64 */
-#if defined(CONFIG_SPARC64) || defined(CONFIG_LEON_PCI)
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return PCI_IRQ_NONE;
-}
-#else
-#include <asm-generic/pci.h>
-#endif
-
#endif /* ___ASM_SPARC_PCI_H */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 4866625da314..8ff549004fac 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -64,25 +64,6 @@ void paging_init(void);
extern unsigned long ptr_in_current_pgd;
-/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
-
/* First physical page can be anywhere, the following is needed so that
* va-->pa and vice versa conversions work properly without performance
* hit for all __pa()/__va() operations.
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 4679e45c8348..a779418ceba9 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -187,25 +187,6 @@ bool kern_addr_valid(unsigned long addr);
#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
-/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
-#define __P000 __pgprot(0)
-#define __P001 __pgprot(0)
-#define __P010 __pgprot(0)
-#define __P011 __pgprot(0)
-#define __P100 __pgprot(0)
-#define __P101 __pgprot(0)
-#define __P110 __pgprot(0)
-#define __P111 __pgprot(0)
-
-#define __S000 __pgprot(0)
-#define __S001 __pgprot(0)
-#define __S010 __pgprot(0)
-#define __S011 __pgprot(0)
-#define __S100 __pgprot(0)
-#define __S101 __pgprot(0)
-#define __S110 __pgprot(0)
-#define __S111 __pgprot(0)
-
#ifndef __ASSEMBLY__
pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index c8848bb681a1..41fa1be980a3 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -855,6 +855,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs);
}
+#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void)
{
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
@@ -869,6 +870,7 @@ void do_softirq_own_stack(void)
__asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp));
}
+#endif
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 31b0c1983286..cb1ef25116e9 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -751,156 +751,15 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
}
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
-
-/* If the user uses a host-bridge as the PCI device, he may use
- * this to perform a raw mmap() of the I/O or MEM space behind
- * that controller.
- *
- * This can be useful for execution of x86 PCI bios initialization code
- * on a PCI card, like the xfree86 int10 stuff does.
- */
-static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state)
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
- unsigned long space_size, user_offset, user_size;
-
- if (mmap_state == pci_mmap_io) {
- space_size = resource_size(&pbm->io_space);
- } else {
- space_size = resource_size(&pbm->mem_space);
- }
-
- /* Make sure the request is in range. */
- user_offset = vma->vm_pgoff << PAGE_SHIFT;
- user_size = vma->vm_end - vma->vm_start;
-
- if (user_offset >= space_size ||
- (user_offset + user_size) > space_size)
- return -EINVAL;
-
- if (mmap_state == pci_mmap_io) {
- vma->vm_pgoff = (pbm->io_space.start +
- user_offset) >> PAGE_SHIFT;
- } else {
- vma->vm_pgoff = (pbm->mem_space.start +
- user_offset) >> PAGE_SHIFT;
- }
-
- return 0;
-}
-
-/* Adjust vm_pgoff of VMA such that it is the physical page offset
- * corresponding to the 32-bit pci bus offset for DEV requested by the user.
- *
- * Basically, the user finds the base address for his device which he wishes
- * to mmap. They read the 32-bit value from the config space base register,
- * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
- * offset parameter of mmap on /proc/bus/pci/XXX for that device.
- *
- * Returns negative error code on failure, zero on success.
- */
-static int __pci_mmap_make_offset(struct pci_dev *pdev,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state)
-{
- unsigned long user_paddr, user_size;
- int i, err;
-
- /* First compute the physical address in vma->vm_pgoff,
- * making sure the user offset is within range in the
- * appropriate PCI space.
- */
- err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
- if (err)
- return err;
-
- /* If this is a mapping on a host bridge, any address
- * is OK.
- */
- if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
- return err;
-
- /* Otherwise make sure it's in the range for one of the
- * device's resources.
- */
- user_paddr = vma->vm_pgoff << PAGE_SHIFT;
- user_size = vma->vm_end - vma->vm_start;
+ resource_size_t ioaddr = pci_resource_start(pdev, bar);
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- struct resource *rp = &pdev->resource[i];
- resource_size_t aligned_end;
-
- /* Active? */
- if (!rp->flags)
- continue;
-
- /* Same type? */
- if (i == PCI_ROM_RESOURCE) {
- if (mmap_state != pci_mmap_mem)
- continue;
- } else {
- if ((mmap_state == pci_mmap_io &&
- (rp->flags & IORESOURCE_IO) == 0) ||
- (mmap_state == pci_mmap_mem &&
- (rp->flags & IORESOURCE_MEM) == 0))
- continue;
- }
-
- /* Align the resource end to the next page address.
- * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1),
- * because actually we need the address of the next byte
- * after rp->end.
- */
- aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK;
-
- if ((rp->start <= user_paddr) &&
- (user_paddr + user_size) <= aligned_end)
- break;
- }
-
- if (i > PCI_ROM_RESOURCE)
+ if (!pbm)
return -EINVAL;
- return 0;
-}
-
-/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state)
-{
- /* Our io_remap_pfn_range takes care of this, do nothing. */
-}
-
-/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
- * for this architecture. The region in the process to map is described by vm_start
- * and vm_end members of VMA, the base physical address is found in vm_pgoff.
- * The pci device structure is provided so that architectures may make mapping
- * decisions on a per-device or per-bus basis.
- *
- * Returns a negative error code on failure, zero on success.
- */
-int pci_mmap_page_range(struct pci_dev *dev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- int ret;
-
- ret = __pci_mmap_make_offset(dev, vma, mmap_state);
- if (ret < 0)
- return ret;
-
- __pci_mmap_set_pgprot(dev, vma, mmap_state);
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- ret = io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- if (ret)
- return ret;
+ vma->vm_pgoff += (ioaddr + pbm->io_space.start) >> PAGE_SHIFT;
return 0;
}
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 22b148e5a5f8..ad8094d955eb 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -174,11 +174,6 @@ void smp_call_function_interrupt(void)
irq_exit();
}
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int i, cpuid, extra;
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index a1f78e9ddaf3..a55295d1b924 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1186,12 +1186,6 @@ void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
preempt_enable();
}
-/* /proc/profile writes can call this, don't __init it please. */
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
void __init smp_prepare_cpus(unsigned int max_cpus)
{
}
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 8b81d0f00c97..cf80d1ae352b 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -120,7 +120,7 @@ void arch_atomic_set(atomic_t *v, int i)
}
EXPORT_SYMBOL(arch_atomic_set);
-unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
+unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask)
{
unsigned long old, flags;
@@ -131,9 +131,9 @@ unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
return old & mask;
}
-EXPORT_SYMBOL(___set_bit);
+EXPORT_SYMBOL(sp32___set_bit);
-unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
+unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask)
{
unsigned long old, flags;
@@ -144,9 +144,9 @@ unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
return old & mask;
}
-EXPORT_SYMBOL(___clear_bit);
+EXPORT_SYMBOL(sp32___clear_bit);
-unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
+unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
{
unsigned long old, flags;
@@ -157,7 +157,7 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
return old & mask;
}
-EXPORT_SYMBOL(___change_bit);
+EXPORT_SYMBOL(sp32___change_bit);
unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
{
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index ad569d9bd124..91259f291c54 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -190,6 +190,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 253e07043298..4acc12eafbf5 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -427,6 +427,10 @@ good_area:
if (fault_signal_pending(fault, regs))
goto exit_exception;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ goto lock_released;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -449,6 +453,7 @@ good_area:
}
mmap_read_unlock(mm);
+lock_released:
mm_rss = get_mm_rss(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 1e9f577f084d..d88e774c8eb4 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -302,3 +302,23 @@ void sparc_flush_page_to_ram(struct page *page)
__flush_page_to_ram(vaddr);
}
EXPORT_SYMBOL(sparc_flush_page_to_ram);
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f6174df2d5af..d6faee23c77d 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2634,6 +2634,9 @@ void vmemmap_free(unsigned long start, unsigned long end,
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
+static pgprot_t protection_map[16] __ro_after_init;
+
static void prot_init_common(unsigned long page_none,
unsigned long page_shared,
unsigned long page_copy,
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 4ec22e156a2e..78de31ac1da7 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -12,6 +12,8 @@ config UML
select ARCH_HAS_STRNLEN_USER
select ARCH_NO_PREEMPT
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_KASAN if X86_64
+ select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ASM_MODVERSIONS
select HAVE_UID16
@@ -82,7 +84,7 @@ config ARCH_HAS_CACHE_LINE_SIZE
source "arch/$(HEADER_ARCH)/um/Kconfig"
config MAY_HAVE_RUNTIME_DEPS
- bool
+ bool
config STATIC_LINK
bool "Force a static link"
@@ -219,6 +221,19 @@ config UML_TIME_TRAVEL_SUPPORT
It is safe to say Y, but you probably don't need this.
+config KASAN_SHADOW_OFFSET
+ hex
+ depends on KASAN
+ default 0x100000000000
+ help
+ This is the offset at which the ~16TB of shadow memory is
+ mapped and used by KASAN for memory debugging. This can be any
+ address that has at least KASAN_SHADOW_SIZE (total address space divided
+ by 8) amount of space so that the KASAN shadow memory does not conflict
+ with anything. The default is 0x100000000000, which works even if mem is
+ set to a large value. On low-memory systems, try 0x7fff8000, as it fits
+ into the immediate of most instructions, improving performance.
+
endmenu
source "arch/um/drivers/Kconfig"
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index 914da774bd39..5903e2b598aa 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -251,37 +251,37 @@ config UML_NET_VECTOR
depends on UML_NET
select MAY_HAVE_RUNTIME_DEPS
help
- This User-Mode Linux network driver uses multi-message send
- and receive functions. The host running the UML guest must have
- a linux kernel version above 3.0 and a libc version > 2.13.
- This driver provides tap, raw, gre and l2tpv3 network transports
- with up to 4 times higher network throughput than the UML network
- drivers.
+ This User-Mode Linux network driver uses multi-message send
+ and receive functions. The host running the UML guest must have
+ a linux kernel version above 3.0 and a libc version > 2.13.
+ This driver provides tap, raw, gre and l2tpv3 network transports
+ with up to 4 times higher network throughput than the UML network
+ drivers.
config UML_NET_VDE
bool "VDE transport (obsolete)"
depends on UML_NET
select MAY_HAVE_RUNTIME_DEPS
help
- This User-Mode Linux network transport allows one or more running
- UMLs on a single host to communicate with each other and also
- with the rest of the world using Virtual Distributed Ethernet,
- an improved fork of uml_switch.
+ This User-Mode Linux network transport allows one or more running
+ UMLs on a single host to communicate with each other and also
+ with the rest of the world using Virtual Distributed Ethernet,
+ an improved fork of uml_switch.
- You must have libvdeplug installed in order to build the vde
- transport into UML.
+ You must have libvdeplug installed in order to build the vde
+ transport into UML.
- To use this form of networking, you will need to run vde_switch
- on the host.
+ To use this form of networking, you will need to run vde_switch
+ on the host.
- For more information, see <http://wiki.virtualsquare.org/>
- That site has a good overview of what VDE is and also examples
- of the UML command line to use to enable VDE networking.
+ For more information, see <http://wiki.virtualsquare.org/>
+ That site has a good overview of what VDE is and also examples
+ of the UML command line to use to enable VDE networking.
- NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please
- migrate to UML_NET_VECTOR.
+ NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please
+ migrate to UML_NET_VECTOR.
- If unsure, say N.
+ If unsure, say N.
config UML_NET_MCAST
bool "Multicast transport (obsolete)"
@@ -311,19 +311,19 @@ config UML_NET_PCAP
depends on UML_NET
select MAY_HAVE_RUNTIME_DEPS
help
- The pcap transport makes a pcap packet stream on the host look
- like an ethernet device inside UML. This is useful for making
- UML act as a network monitor for the host. You must have libcap
- installed in order to build the pcap transport into UML.
+ The pcap transport makes a pcap packet stream on the host look
+ like an ethernet device inside UML. This is useful for making
+ UML act as a network monitor for the host. You must have libcap
+ installed in order to build the pcap transport into UML.
For more information, see
<http://user-mode-linux.sourceforge.net/old/networking.html> That site
has examples of the UML command line to use to enable this option.
- NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please
- migrate to UML_NET_VECTOR.
+ NOTE: THIS TRANSPORT IS DEPRECATED AND WILL BE REMOVED SOON!!! Please
+ migrate to UML_NET_VECTOR.
- If unsure, say N.
+ If unsure, say N.
config UML_NET_SLIRP
bool "SLiRP transport (obsolete)"
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index 433a3f8f2ef3..32b3341fe970 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -28,7 +28,7 @@
* protects against a module being loaded twice at the same time.
*/
static int random_fd = -1;
-static struct hwrng hwrng = { 0, };
+static struct hwrng hwrng;
static DECLARE_COMPLETION(have_data);
static int rng_dev_read(struct hwrng *rng, void *buf, size_t max, bool block)
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index 82ff3785bf69..e719af8bdf56 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -958,6 +958,7 @@ static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
goto error_create;
}
vq->priv = info;
+ vq->num_max = num;
num = virtqueue_get_vring_size(vq);
if (vu_dev->protocol_features &
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index eca6c452a41b..fd481ac371de 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -83,6 +83,8 @@
}
.init_array : {
__init_array_start = .;
+ *(.kasan_init)
+ *(.init_array.*)
*(.init_array)
__init_array_end = .;
}
diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h
index 19cd7ed6ec3c..4b6d1b526bc1 100644
--- a/arch/um/include/asm/cpufeature.h
+++ b/arch/um/include/asm/cpufeature.h
@@ -65,20 +65,6 @@ extern void setup_clear_cpu_cap(unsigned int bit);
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
-#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
-
-/*
- * Workaround for the sake of BPF compilation which utilizes kernel
- * headers, but clang does not support ASM GOTO and fails the build.
- */
-#ifndef __BPF_TRACING__
-#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
-#endif
-
-#define static_cpu_has(bit) boot_cpu_has(bit)
-
-#else
-
/*
* Static testing of CPU features. Used the same as boot_cpu_has(). It
* statically patches the target code for additional performance. Use
@@ -137,7 +123,6 @@ t_no:
boot_cpu_has(bit) : \
_static_cpu_has(bit) \
)
-#endif
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
diff --git a/arch/um/include/asm/kasan.h b/arch/um/include/asm/kasan.h
new file mode 100644
index 000000000000..0d6547f4ec85
--- /dev/null
+++ b/arch/um/include/asm/kasan.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_UM_KASAN_H
+#define __ASM_UM_KASAN_H
+
+#include <linux/init.h>
+#include <linux/const.h>
+
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+/* used in kasan_mem_to_shadow to divide by 8 */
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+#ifdef CONFIG_X86_64
+#define KASAN_HOST_USER_SPACE_END_ADDR 0x00007fffffffffffUL
+/* KASAN_SHADOW_SIZE is the size of total address space divided by 8 */
+#define KASAN_SHADOW_SIZE ((KASAN_HOST_USER_SPACE_END_ADDR + 1) >> \
+ KASAN_SHADOW_SCALE_SHIFT)
+#else
+#error "KASAN_SHADOW_SIZE is not defined for this sub-architecture"
+#endif /* CONFIG_X86_64 */
+
+#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET)
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+#ifdef CONFIG_KASAN
+void kasan_init(void);
+void kasan_map_memory(void *start, unsigned long len);
+extern int kasan_um_is_ready;
+
+#ifdef CONFIG_STATIC_LINK
+#define kasan_arch_is_ready() (kasan_um_is_ready)
+#endif
+#else
+static inline void kasan_init(void) { }
+#endif /* CONFIG_KASAN */
+
+#endif /* __ASM_UM_KASAN_H */
diff --git a/arch/um/include/asm/pci.h b/arch/um/include/asm/pci.h
index da13fd5519ef..34fe4921b5fa 100644
--- a/arch/um/include/asm/pci.h
+++ b/arch/um/include/asm/pci.h
@@ -4,28 +4,8 @@
#include <linux/types.h>
#include <asm/io.h>
-#define PCIBIOS_MIN_IO 0
-#define PCIBIOS_MIN_MEM 0
-
-#define pcibios_assign_all_busses() 1
-
-extern int isa_dma_bridge_buggy;
-
-#ifdef CONFIG_PCI
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- /* no legacy IRQs */
- return -ENODEV;
-}
-#endif
-
-#ifdef CONFIG_PCI_DOMAINS
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- /* always show the domain in /proc */
- return 1;
-}
-#endif /* CONFIG_PCI */
+/* Generic PCI */
+#include <asm-generic/pci.h>
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
/*
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 167e236d9bb8..66bc3f99d9be 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -68,23 +68,6 @@ extern unsigned long end_iomem;
* Also, write permissions imply read permissions. This is the closest we can
* get..
*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
/*
* ZERO_PAGE is a global shared page that is always zero: used
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 6a4fe8b4e686..d0fc1862da95 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -59,11 +59,6 @@ static inline void release_thread(struct task_struct *task)
{
}
-static inline void mm_copy_segments(struct mm_struct *from_mm,
- struct mm_struct *new_mm)
-{
-}
-
/*
* User space process size: 3GB (default).
*/
diff --git a/arch/um/include/asm/xor.h b/arch/um/include/asm/xor.h
index 22b39de73c24..647fae200c5d 100644
--- a/arch/um/include/asm/xor.h
+++ b/arch/um/include/asm/xor.h
@@ -18,7 +18,7 @@
#undef XOR_SELECT_TEMPLATE
/* pick an arbitrary one - measuring isn't possible with inf-cpu */
#define XOR_SELECT_TEMPLATE(x) \
- (time_travel_mode == TT_MODE_INFCPU ? TT_CPU_INF_XOR_DEFAULT : x))
+ (time_travel_mode == TT_MODE_INFCPU ? TT_CPU_INF_XOR_DEFAULT : x)
#endif
#endif
diff --git a/arch/um/include/shared/user.h b/arch/um/include/shared/user.h
index dd4badffdeb3..bda66e5a9d4e 100644
--- a/arch/um/include/shared/user.h
+++ b/arch/um/include/shared/user.h
@@ -16,11 +16,12 @@
*/
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-/* This is to get size_t */
+/* This is to get size_t and NULL */
#ifndef __UM_HOST__
#include <linux/types.h>
#else
#include <stddef.h>
+#include <sys/types.h>
#endif
extern void panic(const char *fmt, ...)
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 2f2a8ce92f1e..2b7fc5b54164 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -109,7 +109,11 @@ SECTIONS
be empty, which isn't pretty. */
. = ALIGN(32 / 8);
.preinit_array : { *(.preinit_array) }
- .init_array : { *(.init_array) }
+ .init_array : {
+ *(.kasan_init)
+ *(.init_array.*)
+ *(.init_array)
+ }
.fini_array : { *(.fini_array) }
.data : {
INIT_TASK_DATA(KERNEL_STACK_SIZE)
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 15295c3237a0..38d5a71a579b 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -18,6 +18,25 @@
#include <kern_util.h>
#include <mem_user.h>
#include <os.h>
+#include <linux/sched/task.h>
+
+#ifdef CONFIG_KASAN
+int kasan_um_is_ready;
+void kasan_init(void)
+{
+ /*
+ * kasan_map_memory will map all of the required address space and
+ * the host machine will allocate physical memory as necessary.
+ */
+ kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE);
+ init_task.kasan_depth = 0;
+ kasan_um_is_ready = true;
+}
+
+static void (*kasan_init_ptr)(void)
+__section(".kasan_init") __used
+= kasan_init;
+#endif
/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
unsigned long *empty_zero_page = NULL;
@@ -197,3 +216,23 @@ void *uml_kmalloc(int size, int flags)
{
return kmalloc(size, flags);
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/um/kernel/stacktrace.c b/arch/um/kernel/stacktrace.c
index 86df52168bd9..fd3b61b3d4d2 100644
--- a/arch/um/kernel/stacktrace.c
+++ b/arch/um/kernel/stacktrace.c
@@ -27,7 +27,7 @@ void dump_trace(struct task_struct *tsk,
frame = (struct stack_frame *)bp;
while (((long) sp & (THREAD_SIZE-1)) != 0) {
- addr = *sp;
+ addr = READ_ONCE_NOCHECK(*sp);
if (__kernel_text_address(addr)) {
reliable = 0;
if ((unsigned long) sp == bp + sizeof(long)) {
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index d1d5d0be0308..d3ce21c4ca32 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -76,6 +76,10 @@ good_area:
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
goto out_nosemaphore;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return 0;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 7a8e2b123e29..71a59b8adbdc 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -95,6 +95,7 @@ SECTIONS
}
.got : { *(.got.plt) *(.got) }
+ .eh_frame : { KEEP (*(.eh_frame)) }
.dynamic : { *(.dynamic) }
.tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index 3c1b77474d2d..8530b2e08604 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -17,6 +17,28 @@
#include <init.h>
#include <os.h>
+/*
+ * kasan_map_memory - maps memory from @start with a size of @len.
+ * The allocated memory is filled with zeroes upon success.
+ * @start: the start address of the memory to be mapped
+ * @len: the length of the memory to be mapped
+ *
+ * This function is used to map shadow memory for KASAN in uml
+ */
+void kasan_map_memory(void *start, size_t len)
+{
+ if (mmap(start,
+ len,
+ PROT_READ|PROT_WRITE,
+ MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE,
+ -1,
+ 0) == MAP_FAILED) {
+ os_info("Couldn't allocate shadow memory: %s\n.",
+ strerror(errno));
+ exit(1);
+ }
+}
+
/* Set by make_tempfile() during early boot. */
static char *tempdir = NULL;
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index c316c993a949..b24db6017ded 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -5,6 +5,7 @@
*/
#include <stdlib.h>
+#include <stdbool.h>
#include <unistd.h>
#include <sched.h>
#include <errno.h>
@@ -707,10 +708,24 @@ void halt_skas(void)
UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
}
+static bool noreboot;
+
+static int __init noreboot_cmd_param(char *str, int *add)
+{
+ noreboot = true;
+ return 0;
+}
+
+__uml_setup("noreboot", noreboot_cmd_param,
+"noreboot\n"
+" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
+" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
+" crashes in CI\n");
+
void reboot_skas(void)
{
block_signals_trace();
- UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
+ UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
}
void __switch_mm(struct mm_id *mm_idp)
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
index a3dd61521d24..7a1abb829930 100644
--- a/arch/um/os-Linux/umid.c
+++ b/arch/um/os-Linux/umid.c
@@ -136,7 +136,7 @@ out:
static inline int is_umdir_used(char *dir)
{
char pid[sizeof("nnnnnnnnn")], *end, *file;
- int dead, fd, p, n, err;
+ int fd, p, n, err;
size_t filelen = strlen(dir) + sizeof("/pid") + 1;
file = malloc(filelen);
@@ -145,7 +145,6 @@ static inline int is_umdir_used(char *dir)
snprintf(file, filelen, "%s/pid", dir);
- dead = 0;
fd = open(file, O_RDONLY);
if (fd < 0) {
fd = -errno;
diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c
index 715594fe5719..cb667c9225ab 100644
--- a/arch/um/os-Linux/user_syms.c
+++ b/arch/um/os-Linux/user_syms.c
@@ -27,10 +27,10 @@ EXPORT_SYMBOL(strstr);
#ifndef __x86_64__
extern void *memcpy(void *, const void *, size_t);
EXPORT_SYMBOL(memcpy);
-#endif
-
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memset);
+#endif
+
EXPORT_SYMBOL(printf);
/* Here, instead, I can provide a fake prototype. Yes, someone cares: genksyms.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fb5900e2c29a..f9920f1341c8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -94,7 +94,6 @@ config X86
select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -280,7 +279,6 @@ config X86
select TRACE_IRQFLAGS_SUPPORT
select TRACE_IRQFLAGS_NMI_SUPPORT
select USER_STACKTRACE_SUPPORT
- select VIRT_TO_BUS
select HAVE_ARCH_KCSAN if X86_64
select X86_FEATURE_NAMES if PROC_FS
select PROC_PID_ARCH_STATUS if PROC_FS
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 7854685c5f25..bafbd905e6e7 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -286,10 +286,6 @@ vdso_install:
archprepare: checkbin
checkbin:
-ifndef CONFIG_CC_HAS_ASM_GOTO
- @echo Compiler lacks asm-goto support.
- @exit 1
-endif
ifdef CONFIG_RETPOLINE
ifeq ($(RETPOLINE_CFLAGS),)
@echo "You are building kernel with non-retpoline compiler." >&2
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index b5aecb524a8a..ffec8bb01ba8 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -103,7 +103,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE
AFLAGS_header.o += -I$(objtree)/$(obj)
$(obj)/header.o: $(obj)/zoffset.h
-LDFLAGS_setup.elf := -m elf_i386 -T
+LDFLAGS_setup.elf := -m elf_i386 -z noexecstack -T
$(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
$(call if_changed,ld)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 19e1905dcbf6..35ce1a64068b 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -69,6 +69,10 @@ LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker)
ifdef CONFIG_LD_ORPHAN_WARN
LDFLAGS_vmlinux += --orphan-handling=warn
endif
+LDFLAGS_vmlinux += -z noexecstack
+ifeq ($(CONFIG_LD_IS_BFD),y)
+LDFLAGS_vmlinux += $(call ld-option,--no-warn-rwx-segments)
+endif
LDFLAGS_vmlinux += -T
hostprogs := mkpiggy
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 4910bf230d7b..62208ec04ca4 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -132,7 +132,17 @@ void snp_set_page_private(unsigned long paddr);
void snp_set_page_shared(unsigned long paddr);
void sev_prep_identity_maps(unsigned long top_level_pgt);
#else
-static inline void sev_enable(struct boot_params *bp) { }
+static inline void sev_enable(struct boot_params *bp)
+{
+ /*
+ * bp->cc_blob_address should only be set by boot/compressed kernel.
+ * Initialize it to 0 unconditionally (thus here in this stub too) to
+ * ensure that uninitialized values from buggy bootloaders aren't
+ * propagated.
+ */
+ if (bp)
+ bp->cc_blob_address = 0;
+}
static inline void sev_es_shutdown_ghcb(void) { }
static inline bool sev_es_check_ghcb_fault(unsigned long address)
{
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 52f989f6acc2..c93930d5ccbd 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -277,6 +277,14 @@ void sev_enable(struct boot_params *bp)
bool snp;
/*
+ * bp->cc_blob_address should only be set by boot/compressed kernel.
+ * Initialize it to 0 to ensure that uninitialized values from
+ * buggy bootloaders aren't propagated.
+ */
+ if (bp)
+ bp->cc_blob_address = 0;
+
+ /*
* Setup/preliminary detection of SNP. This will be sanity-checked
* against CPUID/MSR values later.
*/
diff --git a/arch/x86/configs/xen.config b/arch/x86/configs/xen.config
index d9fc7139fd46..581296255b39 100644
--- a/arch/x86/configs/xen.config
+++ b/arch/x86/configs/xen.config
@@ -14,7 +14,6 @@ CONFIG_CPU_FREQ=y
# x86 xen specific config options
CONFIG_XEN_PVH=y
-CONFIG_XEN_MAX_DOMAIN_MEMORY=500
CONFIG_XEN_SAVE_RESTORE=y
# CONFIG_XEN_DEBUG_FS is not set
CONFIG_XEN_MCE_LOG=y
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index eeadbd7d92cc..ca2fe186994b 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -11,12 +11,13 @@ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
CFLAGS_common.o += -fno-stack-protector
-obj-y := entry.o entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
+obj-y := entry.o entry_$(BITS).o syscall_$(BITS).o
obj-y += common.o
obj-y += vdso/
obj-y += vsyscall/
+obj-$(CONFIG_PREEMPTION) += thunk_$(BITS).o
obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o
obj-$(CONFIG_X86_X32_ABI) += syscall_x32.o
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 682338e7e2a3..4dd19819053a 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -311,7 +311,7 @@ SYM_CODE_START(entry_INT80_compat)
* Interrupts are off on entry.
*/
ASM_CLAC /* Do this early to minimize exposure */
- SWAPGS
+ ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
/*
* User tracing code (ptrace or signal handlers) might assume that
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
index 7591bab060f7..ff6e7003da97 100644
--- a/arch/x86/entry/thunk_32.S
+++ b/arch/x86/entry/thunk_32.S
@@ -29,10 +29,8 @@ SYM_CODE_START_NOALIGN(\name)
SYM_CODE_END(\name)
.endm
-#ifdef CONFIG_PREEMPTION
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
EXPORT_SYMBOL(preempt_schedule_thunk)
EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
-#endif
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index 505b488fcc65..f38b07d2768b 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -31,14 +31,11 @@ SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
-#ifdef CONFIG_PREEMPTION
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
EXPORT_SYMBOL(preempt_schedule_thunk)
EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
-#endif
-#ifdef CONFIG_PREEMPTION
SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore)
popq %r11
popq %r10
@@ -53,4 +50,3 @@ SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore)
RET
_ASM_NOKPROBE(__thunk_restore)
SYM_CODE_END(__thunk_restore)
-#endif
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 76cd790ed0bd..12f6c4d714cd 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -180,7 +180,7 @@ quiet_cmd_vdso = VDSO $@
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 \
- $(call ld-option, --eh-frame-hdr) -Bsymbolic
+ $(call ld-option, --eh-frame-hdr) -Bsymbolic -z noexecstack
GCOV_PROFILE := n
quiet_cmd_vdso_and_check = VDSO $@
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 2db93498ff71..cb98a05ee743 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -6291,10 +6291,8 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_aliases = NULL;
x86_pmu.pebs_prec_dist = true;
x86_pmu.pebs_block = true;
- x86_pmu.pebs_capable = ~0ULL;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
- x86_pmu.flags |= PMU_FL_PEBS_ALL;
x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
@@ -6337,10 +6335,8 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_aliases = NULL;
x86_pmu.pebs_prec_dist = true;
x86_pmu.pebs_block = true;
- x86_pmu.pebs_capable = ~0ULL;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
- x86_pmu.flags |= PMU_FL_PEBS_ALL;
x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
x86_pmu.lbr_pt_coexist = true;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index ba60427caa6d..de1f55d51784 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -291,6 +291,7 @@ static u64 load_latency_data(struct perf_event *event, u64 status)
static u64 store_latency_data(struct perf_event *event, u64 status)
{
union intel_x86_pebs_dse dse;
+ union perf_mem_data_src src;
u64 val;
dse.val = status;
@@ -304,7 +305,14 @@ static u64 store_latency_data(struct perf_event *event, u64 status)
val |= P(BLK, NA);
- return val;
+ /*
+ * the pebs_data_source table is only for loads
+ * so override the mem_op to say STORE instead
+ */
+ src.val = val;
+ src.mem_op = P(OP,STORE);
+
+ return src.val;
}
struct pebs_record_core {
@@ -822,7 +830,7 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
struct event_constraint intel_grt_pebs_event_constraints[] = {
/* Allow all events as PEBS with no flags */
- INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0xf),
+ INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3),
INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf),
EVENT_CONSTRAINT_END
};
@@ -2262,6 +2270,7 @@ void __init intel_ds_init(void)
PERF_SAMPLE_BRANCH_STACK |
PERF_SAMPLE_TIME;
x86_pmu.flags |= PMU_FL_PEBS_ALL;
+ x86_pmu.pebs_capable = ~0ULL;
pebs_qual = "-baseline";
x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
} else {
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 4f70fb6c2c1e..47fca6a7a8bc 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1097,6 +1097,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
reg->config = mask;
+
+ /*
+ * The Arch LBR HW can retrieve the common branch types
+ * from the LBR_INFO. It doesn't require the high overhead
+ * SW disassemble.
+ * Enable the branch type by default for the Arch LBR.
+ */
+ reg->reg |= X86_BR_TYPE_SAVE;
return 0;
}
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index ce440011cc4e..1ef4f7861e2e 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -841,6 +841,22 @@ int snb_pci2phy_map_init(int devid)
return 0;
}
+static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * SNB IMC counters are 32-bit and are laid out back to back
+ * in MMIO space. Therefore we must use a 32-bit accessor function
+ * using readq() from uncore_mmio_read_counter() causes problems
+ * because it is reading 64-bit at a time. This is okay for the
+ * uncore_perf_event_update() function because it drops the upper
+ * 32-bits but not okay for plain uncore_read_counter() as invoked
+ * in uncore_pmu_event_start().
+ */
+ return (u64)readl(box->io_addr + hwc->event_base);
+}
+
static struct pmu snb_uncore_imc_pmu = {
.task_ctx_nr = perf_invalid_context,
.event_init = snb_uncore_imc_event_init,
@@ -860,7 +876,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
.disable_event = snb_uncore_imc_disable_event,
.enable_event = snb_uncore_imc_enable_event,
.hw_config = snb_uncore_imc_hw_config,
- .read_counter = uncore_mmio_read_counter,
+ .read_counter = snb_uncore_imc_read_counter,
};
static struct intel_uncore_type snb_uncore_imc = {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index bd8ae0a7010a..3415321c8240 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -98,8 +98,6 @@ static inline bool apic_from_smp_config(void)
#include <asm/paravirt.h>
#endif
-extern int setup_profiling_timer(unsigned int);
-
static inline void native_apic_mem_write(u32 reg, u32 v)
{
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index a288ecd230ab..0fe9de58af31 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -63,7 +63,7 @@ arch_set_bit(long nr, volatile unsigned long *addr)
}
static __always_inline void
-arch___set_bit(long nr, volatile unsigned long *addr)
+arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
@@ -89,7 +89,7 @@ arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
}
static __always_inline void
-arch___clear_bit(long nr, volatile unsigned long *addr)
+arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
@@ -114,7 +114,7 @@ arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
}
static __always_inline void
-arch___change_bit(long nr, volatile unsigned long *addr)
+arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
@@ -145,7 +145,7 @@ arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
}
static __always_inline bool
-arch___test_and_set_bit(long nr, volatile unsigned long *addr)
+arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
bool oldbit;
@@ -171,7 +171,7 @@ arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
* this without also updating arch/x86/kernel/kvm.c
*/
static __always_inline bool
-arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
+arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
bool oldbit;
@@ -183,7 +183,7 @@ arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
}
static __always_inline bool
-arch___test_and_change_bit(long nr, volatile unsigned long *addr)
+arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
bool oldbit;
@@ -207,6 +207,20 @@ static __always_inline bool constant_test_bit(long nr, const volatile unsigned l
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
+static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
+{
+ bool oldbit;
+
+ asm volatile("testb %2,%1"
+ CC_SET(nz)
+ : CC_OUT(nz) (oldbit)
+ : "m" (((unsigned char *)addr)[nr >> 3]),
+ "i" (1 << (nr & 7))
+ :"memory");
+
+ return oldbit;
+}
+
static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
{
bool oldbit;
@@ -219,10 +233,19 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
return oldbit;
}
-#define arch_test_bit(nr, addr) \
- (__builtin_constant_p((nr)) \
- ? constant_test_bit((nr), (addr)) \
- : variable_test_bit((nr), (addr)))
+static __always_inline bool
+arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
+ variable_test_bit(nr, addr);
+}
+
+static __always_inline bool
+arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
+ variable_test_bit(nr, addr);
+}
/**
* __ffs - find first set bit in word
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index ea34cc31b047..1a85e1fb0922 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -155,20 +155,6 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
-#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
-
-/*
- * Workaround for the sake of BPF compilation which utilizes kernel
- * headers, but clang does not support ASM GOTO and fails the build.
- */
-#ifndef __BPF_TRACING__
-#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
-#endif
-
-#define static_cpu_has(bit) boot_cpu_has(bit)
-
-#else
-
/*
* Static testing of CPU features. Used the same as boot_cpu_has(). It
* statically patches the target code for additional performance. Use
@@ -208,7 +194,6 @@ t_no:
boot_cpu_has(bit) : \
_static_cpu_has(bit) \
)
-#endif
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 14ed039dff55..ef4775c6db01 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -303,6 +303,7 @@
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
+#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
@@ -456,6 +457,8 @@
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
-#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
+#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
+#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
+#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h
index 8e95aa4b0d17..8ae6e0e11b8b 100644
--- a/arch/x86/include/asm/dma.h
+++ b/arch/x86/include/asm/dma.h
@@ -307,12 +307,4 @@ extern int request_dma(unsigned int dmanr, const char *device_id);
extern void free_dma(unsigned int dmanr);
#endif
-/* From PCI */
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* _ASM_X86_DMA_H */
diff --git a/arch/x86/include/asm/extable_fixup_types.h b/arch/x86/include/asm/extable_fixup_types.h
index 503622627400..991e31cfde94 100644
--- a/arch/x86/include/asm/extable_fixup_types.h
+++ b/arch/x86/include/asm/extable_fixup_types.h
@@ -64,4 +64,6 @@
#define EX_TYPE_UCOPY_LEN4 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(4))
#define EX_TYPE_UCOPY_LEN8 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(8))
+#define EX_TYPE_ZEROPAD 20 /* longword load with zeropad on fault */
+
#endif
diff --git a/arch/x86/include/asm/ibt.h b/arch/x86/include/asm/ibt.h
index 689880eca9ba..9b08082a5d9f 100644
--- a/arch/x86/include/asm/ibt.h
+++ b/arch/x86/include/asm/ibt.h
@@ -31,6 +31,16 @@
#define __noendbr __attribute__((nocf_check))
+/*
+ * Create a dummy function pointer reference to prevent objtool from marking
+ * the function as needing to be "sealed" (i.e. ENDBR converted to NOP by
+ * apply_ibt_endbr()).
+ */
+#define IBT_NOSEAL(fname) \
+ ".pushsection .discard.ibt_endbr_noseal\n\t" \
+ _ASM_PTR fname "\n\t" \
+ ".popsection\n\t"
+
static inline __attribute_const__ u32 gen_endbr(void)
{
u32 endbr;
@@ -84,6 +94,7 @@ extern __noendbr void ibt_restore(u64 save);
#ifndef __ASSEMBLY__
#define ASM_ENDBR
+#define IBT_NOSEAL(name)
#define __noendbr
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index def6ca121111..aeb38023a703 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -27,6 +27,7 @@
* _X - regular server parts
* _D - micro server parts
* _N,_P - other mobile parts
+ * _S - other client parts
*
* Historical OPTDIFFs:
*
@@ -112,6 +113,7 @@
#define INTEL_FAM6_RAPTORLAKE 0xB7
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
+#define INTEL_FAM6_RAPTORLAKE_S 0xBF
/* "Small Core" Processors (Atom) */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 1870b99c3356..e9025640f634 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -170,15 +170,6 @@ static inline unsigned int isa_virt_to_bus(volatile void *address)
#define isa_bus_to_virt phys_to_virt
/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them on x86 for legacy drivers, though.
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-/*
* The default ioremap() behavior is non-cached; if you need something
* else, you probably want one of the following.
*/
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e8281d64a431..2c96c43c313a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -53,7 +53,7 @@
#define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)
/* memory slots that are not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 3
+#define KVM_INTERNAL_MEM_SLOTS 3
#define KVM_HALT_POLL_NS_DEFAULT 200000
@@ -1704,7 +1704,7 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
#define kvm_arch_pmi_in_guest(vcpu) \
((vcpu) && (vcpu)->arch.handling_intr_from_guest)
-void kvm_mmu_x86_module_init(void);
+void __init kvm_mmu_x86_module_init(void);
int kvm_mmu_vendor_module_init(void);
void kvm_mmu_vendor_module_exit(void);
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 88ceaf3648b3..72ca90552b6a 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -89,6 +89,8 @@ static inline void mem_encrypt_free_decrypted_mem(void) { }
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);
+void add_encrypt_protection_map(void);
+
/*
* The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
* writing to or comparing values from the cr3 register. Having the
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 182b2a1f71fe..6674bdb096f3 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -150,6 +150,10 @@
* are restricted to targets in
* kernel.
*/
+#define ARCH_CAP_PBRSB_NO BIT(24) /*
+ * Not susceptible to Post-Barrier
+ * Return Stack Buffer Predictions.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index cba942006ffe..c936ce9f0c47 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -35,32 +35,57 @@
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
/*
+ * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
+ */
+#define __FILL_RETURN_SLOT \
+ ANNOTATE_INTRA_FUNCTION_CALL; \
+ call 772f; \
+ int3; \
+772:
+
+/*
+ * Stuff the entire RSB.
+ *
* Google experimented with loop-unrolling and this turned out to be
* the optimal version - two calls, each with their own speculation
* trap should their return address end up getting used, in a loop.
*/
-#define __FILL_RETURN_BUFFER(reg, nr, sp) \
- mov $(nr/2), reg; \
-771: \
- ANNOTATE_INTRA_FUNCTION_CALL; \
- call 772f; \
-773: /* speculation trap */ \
- UNWIND_HINT_EMPTY; \
- pause; \
- lfence; \
- jmp 773b; \
-772: \
- ANNOTATE_INTRA_FUNCTION_CALL; \
- call 774f; \
-775: /* speculation trap */ \
- UNWIND_HINT_EMPTY; \
- pause; \
- lfence; \
- jmp 775b; \
-774: \
- add $(BITS_PER_LONG/8) * 2, sp; \
- dec reg; \
- jnz 771b;
+#ifdef CONFIG_X86_64
+#define __FILL_RETURN_BUFFER(reg, nr) \
+ mov $(nr/2), reg; \
+771: \
+ __FILL_RETURN_SLOT \
+ __FILL_RETURN_SLOT \
+ add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
+ dec reg; \
+ jnz 771b; \
+ /* barrier for jnz misprediction */ \
+ lfence;
+#else
+/*
+ * i386 doesn't unconditionally have LFENCE, as such it can't
+ * do a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr) \
+ .rept nr; \
+ __FILL_RETURN_SLOT; \
+ .endr; \
+ add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
+#endif
+
+/*
+ * Stuff a single RSB slot.
+ *
+ * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
+ * forced to retire before letting a RET instruction execute.
+ *
+ * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
+ * before this point.
+ */
+#define __FILL_ONE_RETURN \
+ __FILL_RETURN_SLOT \
+ add $(BITS_PER_LONG/8), %_ASM_SP; \
+ lfence;
#ifdef __ASSEMBLY__
@@ -134,9 +159,11 @@
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
* monstrosity above, manually.
*/
-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
- ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
- __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
+ ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
+ __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
+ __stringify(__FILL_ONE_RETURN), \ftr2
+
.Lskip_rsb_\@:
.endm
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index f3fd5928bcbb..736793d65bcb 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -105,9 +105,6 @@ static inline void early_quirks(void) { }
extern void pci_iommu_alloc(void);
-/* generic pci stuff */
-#include <asm-generic/pci.h>
-
#ifdef CONFIG_NUMA
/* Returns the node based on pci bus */
static inline int __pcibus_to_node(const struct pci_bus *bus)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index bdaf8391e2e0..aa174fed3a71 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -230,25 +230,6 @@ enum page_cache_mode {
#endif /* __ASSEMBLY__ */
-/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_EXEC
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
/*
* early identity mapping pte attrib macros.
*/
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 8a9eba191516..7fa611216417 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -11,7 +11,7 @@
#define __CLOBBERS_MEM(clb...) "memory", ## clb
-#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO)
+#ifndef __GCC_ASM_FLAG_OUTPUTS__
/* Use asm goto */
@@ -27,7 +27,7 @@ cc_label: c = true; \
c; \
})
-#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
+#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) */
/* Use flags output or a set instruction */
@@ -40,7 +40,7 @@ cc_label: c = true; \
c; \
})
-#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
+#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) */
#define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \
__GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 4a23e52fe0ee..ebc271bb6d8e 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -195,7 +195,7 @@ void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp);
-void snp_abort(void);
+void __init __noreturn snp_abort(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
diff --git a/arch/x86/include/asm/sgx.h b/arch/x86/include/asm/sgx.h
index 3f9334ef67cd..eae20fa52b93 100644
--- a/arch/x86/include/asm/sgx.h
+++ b/arch/x86/include/asm/sgx.h
@@ -65,17 +65,22 @@ enum sgx_encls_function {
/**
* enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV
+ * %SGX_EPC_PAGE_CONFLICT: Page is being written by other ENCLS function.
* %SGX_NOT_TRACKED: Previous ETRACK's shootdown sequence has not
* been completed yet.
* %SGX_CHILD_PRESENT SECS has child pages present in the EPC.
* %SGX_INVALID_EINITTOKEN: EINITTOKEN is invalid and enclave signer's
* public key does not match IA32_SGXLEPUBKEYHASH.
+ * %SGX_PAGE_NOT_MODIFIABLE: The EPC page cannot be modified because it
+ * is in the PENDING or MODIFIED state.
* %SGX_UNMASKED_EVENT: An unmasked event, e.g. INTR, was received
*/
enum sgx_return_code {
+ SGX_EPC_PAGE_CONFLICT = 7,
SGX_NOT_TRACKED = 11,
SGX_CHILD_PRESENT = 13,
SGX_INVALID_EINITTOKEN = 16,
+ SGX_PAGE_NOT_MODIFIABLE = 20,
SGX_UNMASKED_EVENT = 128,
};
@@ -234,6 +239,9 @@ struct sgx_pageinfo {
* %SGX_PAGE_TYPE_REG: a regular page
* %SGX_PAGE_TYPE_VA: a VA page
* %SGX_PAGE_TYPE_TRIM: a page in trimmed state
+ *
+ * Make sure when making changes to this enum that its values can still fit
+ * in the bitfield within &struct sgx_encl_page
*/
enum sgx_page_type {
SGX_PAGE_TYPE_SECS,
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index 8338b0432b50..46b4f1f7f354 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -77,58 +77,18 @@ static inline unsigned long find_zero(unsigned long mask)
* and the next page not being mapped, take the exception and
* return zeroes in the non-existing part.
*/
-#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
- unsigned long offset, data;
unsigned long ret;
- asm_volatile_goto(
+ asm volatile(
"1: mov %[mem], %[ret]\n"
-
- _ASM_EXTABLE(1b, %l[do_exception])
-
- : [ret] "=r" (ret)
- : [mem] "m" (*(unsigned long *)addr)
- : : do_exception);
-
- return ret;
-
-do_exception:
- offset = (unsigned long)addr & (sizeof(long) - 1);
- addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
- data = *(unsigned long *)addr;
- ret = data >> offset * 8;
-
- return ret;
-}
-
-#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
-
-static inline unsigned long load_unaligned_zeropad(const void *addr)
-{
- unsigned long offset, data;
- unsigned long ret, err = 0;
-
- asm( "1: mov %[mem], %[ret]\n"
"2:\n"
-
- _ASM_EXTABLE_FAULT(1b, 2b)
-
- : [ret] "=&r" (ret), "+a" (err)
+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_ZEROPAD)
+ : [ret] "=r" (ret)
: [mem] "m" (*(unsigned long *)addr));
- if (unlikely(err)) {
- offset = (unsigned long)addr & (sizeof(long) - 1);
- addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
- data = *(unsigned long *)addr;
- ret = data >> offset * 8;
- }
-
return ret;
}
-#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
-
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h
index 78e667a31d6c..6daa9b0c8d11 100644
--- a/arch/x86/include/asm/xen/cpuid.h
+++ b/arch/x86/include/asm/xen/cpuid.h
@@ -107,6 +107,8 @@
* ID field from 8 to 15 bits, allowing to target APIC IDs up 32768.
*/
#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5)
+/* Per-vCPU event channel upcalls */
+#define XEN_HVM_CPUID_UPCALL_VECTOR (1u << 6)
/*
* Leaf 6 (0x40000x05)
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 068d9b067c83..62bdceb594f1 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -23,7 +23,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
/* No need for a barrier -- XCHG is a barrier on x86. */
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
-extern int xen_have_vector_callback;
+extern bool xen_have_vector_callback;
/*
* Events delivered via platform PCI interrupts are always
@@ -34,4 +34,5 @@ static inline bool xen_support_evtchn_rebind(void)
return (!xen_hvm_domain() || xen_have_vector_callback);
}
+extern bool xen_percpu_upcall;
#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
index f4b81587e90b..2dd35bbdc822 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
@@ -29,6 +29,12 @@ enum sgx_page_flags {
_IOW(SGX_MAGIC, 0x03, struct sgx_enclave_provision)
#define SGX_IOC_VEPC_REMOVE_ALL \
_IO(SGX_MAGIC, 0x04)
+#define SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS \
+ _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_restrict_permissions)
+#define SGX_IOC_ENCLAVE_MODIFY_TYPES \
+ _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_modify_types)
+#define SGX_IOC_ENCLAVE_REMOVE_PAGES \
+ _IOWR(SGX_MAGIC, 0x07, struct sgx_enclave_remove_pages)
/**
* struct sgx_enclave_create - parameter structure for the
@@ -76,6 +82,62 @@ struct sgx_enclave_provision {
__u64 fd;
};
+/**
+ * struct sgx_enclave_restrict_permissions - parameters for ioctl
+ * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
+ * @offset: starting page offset (page aligned relative to enclave base
+ * address defined in SECS)
+ * @length: length of memory (multiple of the page size)
+ * @permissions:new permission bits for pages in range described by @offset
+ * and @length
+ * @result: (output) SGX result code of ENCLS[EMODPR] function
+ * @count: (output) bytes successfully changed (multiple of page size)
+ */
+struct sgx_enclave_restrict_permissions {
+ __u64 offset;
+ __u64 length;
+ __u64 permissions;
+ __u64 result;
+ __u64 count;
+};
+
+/**
+ * struct sgx_enclave_modify_types - parameters for ioctl
+ * %SGX_IOC_ENCLAVE_MODIFY_TYPES
+ * @offset: starting page offset (page aligned relative to enclave base
+ * address defined in SECS)
+ * @length: length of memory (multiple of the page size)
+ * @page_type: new type for pages in range described by @offset and @length
+ * @result: (output) SGX result code of ENCLS[EMODT] function
+ * @count: (output) bytes successfully changed (multiple of page size)
+ */
+struct sgx_enclave_modify_types {
+ __u64 offset;
+ __u64 length;
+ __u64 page_type;
+ __u64 result;
+ __u64 count;
+};
+
+/**
+ * struct sgx_enclave_remove_pages - %SGX_IOC_ENCLAVE_REMOVE_PAGES parameters
+ * @offset: starting page offset (page aligned relative to enclave base
+ * address defined in SECS)
+ * @length: length of memory (multiple of the page size)
+ * @count: (output) bytes successfully changed (multiple of page size)
+ *
+ * Regular (PT_REG) or TCS (PT_TCS) can be removed from an initialized
+ * enclave if the system supports SGX2. First, the %SGX_IOC_ENCLAVE_MODIFY_TYPES
+ * ioctl() should be used to change the page type to PT_TRIM. After that
+ * succeeds ENCLU[EACCEPT] should be run from within the enclave and then
+ * %SGX_IOC_ENCLAVE_REMOVE_PAGES can be used to complete the page removal.
+ */
+struct sgx_enclave_remove_pages {
+ __u64 offset;
+ __u64 length;
+ __u64 count;
+};
+
struct sgx_enclave_run;
/**
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a4347605ab00..6d303d1d276c 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1115,11 +1115,6 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt)
set_irq_regs(old_regs);
}
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
/*
* Local APIC start and shutdown
*/
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 6761668100b9..da7c361f47e0 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -152,7 +152,7 @@ void __init check_bugs(void)
/*
* spectre_v2_user_select_mitigation() relies on the state set by
* retbleed_select_mitigation(); specifically the STIBP selection is
- * forced for UNRET.
+ * forced for UNRET or IBPB.
*/
spectre_v2_user_select_mitigation();
ssb_select_mitigation();
@@ -433,7 +433,8 @@ static void __init mmio_select_mitigation(void)
u64 ia32_cap;
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
- cpu_mitigations_off()) {
+ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
+ cpu_mitigations_off()) {
mmio_mitigation = MMIO_MITIGATION_OFF;
return;
}
@@ -538,6 +539,8 @@ out:
pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
+ else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+ pr_info("MMIO Stale Data: Unknown: No mitigations\n");
}
static void __init md_clear_select_mitigation(void)
@@ -1179,7 +1182,8 @@ spectre_v2_user_select_mitigation(void)
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
- if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
+ if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
+ retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
if (mode != SPECTRE_V2_USER_STRICT &&
mode != SPECTRE_V2_USER_STRICT_PREFERRED)
pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
@@ -1335,6 +1339,53 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
}
}
+static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
+{
+ /*
+ * Similar to context switches, there are two types of RSB attacks
+ * after VM exit:
+ *
+ * 1) RSB underflow
+ *
+ * 2) Poisoned RSB entry
+ *
+ * When retpoline is enabled, both are mitigated by filling/clearing
+ * the RSB.
+ *
+ * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
+ * prediction isolation protections, RSB still needs to be cleared
+ * because of #2. Note that SMEP provides no protection here, unlike
+ * user-space-poisoned RSB entries.
+ *
+ * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
+ * bug is present then a LITE version of RSB protection is required,
+ * just a single call needs to retire before a RET is executed.
+ */
+ switch (mode) {
+ case SPECTRE_V2_NONE:
+ return;
+
+ case SPECTRE_V2_EIBRS_LFENCE:
+ case SPECTRE_V2_EIBRS:
+ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
+ pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
+ }
+ return;
+
+ case SPECTRE_V2_EIBRS_RETPOLINE:
+ case SPECTRE_V2_RETPOLINE:
+ case SPECTRE_V2_LFENCE:
+ case SPECTRE_V2_IBRS:
+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
+ return;
+ }
+
+ pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
+ dump_stack();
+}
+
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -1485,28 +1536,7 @@ static void __init spectre_v2_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
- /*
- * Similar to context switches, there are two types of RSB attacks
- * after vmexit:
- *
- * 1) RSB underflow
- *
- * 2) Poisoned RSB entry
- *
- * When retpoline is enabled, both are mitigated by filling/clearing
- * the RSB.
- *
- * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
- * prediction isolation protections, RSB still needs to be cleared
- * because of #2. Note that SMEP provides no protection here, unlike
- * user-space-poisoned RSB entries.
- *
- * eIBRS, on the other hand, has RSB-poisoning protections, so it
- * doesn't need RSB clearing after vmexit.
- */
- if (boot_cpu_has(X86_FEATURE_RETPOLINE) ||
- boot_cpu_has(X86_FEATURE_KERNEL_IBRS))
- setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
@@ -2248,6 +2278,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
static ssize_t mmio_stale_data_show_state(char *buf)
{
+ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+ return sysfs_emit(buf, "Unknown: No mitigations\n");
+
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
@@ -2292,6 +2325,19 @@ static char *ibpb_state(void)
return "";
}
+static char *pbrsb_eibrs_state(void)
+{
+ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
+ if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
+ boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
+ return ", PBRSB-eIBRS: SW sequence";
+ else
+ return ", PBRSB-eIBRS: Vulnerable";
+ } else {
+ return ", PBRSB-eIBRS: Not affected";
+ }
+}
+
static ssize_t spectre_v2_show_state(char *buf)
{
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
@@ -2304,12 +2350,13 @@ static ssize_t spectre_v2_show_state(char *buf)
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
- return sprintf(buf, "%s%s%s%s%s%s\n",
+ return sprintf(buf, "%s%s%s%s%s%s%s\n",
spectre_v2_strings[spectre_v2_enabled],
ibpb_state(),
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
stibp_state(),
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
+ pbrsb_eibrs_state(),
spectre_v2_module_string());
}
@@ -2320,10 +2367,11 @@ static ssize_t srbds_show_state(char *buf)
static ssize_t retbleed_show_state(char *buf)
{
- if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
+ if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
+ retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
- return sprintf(buf, "Vulnerable: untrained return thunk on non-Zen uarch\n");
+ return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
return sprintf(buf, "%s; SMT %s\n",
retbleed_strings[retbleed_mitigation],
@@ -2379,6 +2427,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
return srbds_show_state(buf);
case X86_BUG_MMIO_STALE_DATA:
+ case X86_BUG_MMIO_UNKNOWN:
return mmio_stale_data_show_state(buf);
case X86_BUG_RETBLEED:
@@ -2438,7 +2487,10 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
{
- return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
+ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
+ else
+ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
}
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index fe98a1465be6..66556833d7af 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -29,6 +29,12 @@
#define LVL_3 4
#define LVL_TRACE 5
+/* Shared last level cache maps */
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
+
+/* Shared L2 cache maps */
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
+
struct _cache_table {
unsigned char descriptor;
char cache_type;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 736262a76a12..3e508f239098 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1135,6 +1135,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#define NO_SWAPGS BIT(6)
#define NO_ITLB_MULTIHIT BIT(7)
#define NO_SPECTRE_V2 BIT(8)
+#define NO_MMIO BIT(9)
+#define NO_EIBRS_PBRSB BIT(10)
#define VULNWL(vendor, family, model, whitelist) \
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
@@ -1157,6 +1159,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION),
/* Intel Family 6 */
+ VULNWL_INTEL(TIGERLAKE, NO_MMIO),
+ VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
+ VULNWL_INTEL(ALDERLAKE, NO_MMIO),
+ VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
+
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
@@ -1175,9 +1182,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
/*
* Technically, swapgs isn't serializing on AMD (despite it previously
@@ -1187,21 +1194,23 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
* good enough for our purposes.
*/
- VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB),
+ VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB),
+ VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
/* AMD Family 0xf - 0x12 */
- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
/* Zhaoxin Family 7 */
- VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
- VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
+ VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
+ VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
{}
};
@@ -1355,16 +1364,27 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* Affected CPU list is generally enough to enumerate the vulnerability,
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
* not want the guest to enumerate the bug.
+ *
+ * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
+ * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
*/
- if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
- !arch_cap_mmio_immune(ia32_cap))
- setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+ if (!arch_cap_mmio_immune(ia32_cap)) {
+ if (cpu_matches(cpu_vuln_blacklist, MMIO))
+ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+ else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
+ setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
+ }
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
setup_force_cpu_bug(X86_BUG_RETBLEED);
}
+ if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
+ !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+ !(ia32_cap & ARCH_CAP_PBRSB_NO))
+ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 7227c15299d0..9651275aecd1 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/isa-dma.h>
#include <linux/pci.h>
#include <asm/dma.h>
#include <linux/io.h>
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 663f6e6dd288..2d7ea5480ec3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1216,22 +1216,23 @@ static void bus_lock_init(void)
{
u64 val;
- /*
- * Warn and fatal are handled by #AC for split lock if #AC for
- * split lock is supported.
- */
- if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) ||
- (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
- (sld_state == sld_warn || sld_state == sld_fatal)) ||
- sld_state == sld_off)
+ if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
return;
- /*
- * Enable #DB for bus lock. All bus locks are handled in #DB except
- * split locks are handled in #AC in the fatal case.
- */
rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
- val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
+
+ if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
+ (sld_state == sld_warn || sld_state == sld_fatal)) ||
+ sld_state == sld_off) {
+ /*
+ * Warn and fatal are handled by #AC for split lock if #AC for
+ * split lock is supported.
+ */
+ val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
+ } else {
+ val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
+ }
+
wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
}
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 19876ebfb504..24c1bb8eb196 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -232,25 +232,10 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
return epc_page;
}
-static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
- unsigned long addr,
- unsigned long vm_flags)
+static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
+ struct sgx_encl_page *entry)
{
- unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
struct sgx_epc_page *epc_page;
- struct sgx_encl_page *entry;
-
- entry = xa_load(&encl->page_array, PFN_DOWN(addr));
- if (!entry)
- return ERR_PTR(-EFAULT);
-
- /*
- * Verify that the faulted page has equal or higher build time
- * permissions than the VMA permissions (i.e. the subset of {VM_READ,
- * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
- */
- if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits)
- return ERR_PTR(-EFAULT);
/* Entry successfully located. */
if (entry->epc_page) {
@@ -276,6 +261,146 @@ static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
return entry;
}
+static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl,
+ unsigned long addr,
+ unsigned long vm_flags)
+{
+ unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
+ struct sgx_encl_page *entry;
+
+ entry = xa_load(&encl->page_array, PFN_DOWN(addr));
+ if (!entry)
+ return ERR_PTR(-EFAULT);
+
+ /*
+ * Verify that the page has equal or higher build time
+ * permissions than the VMA permissions (i.e. the subset of {VM_READ,
+ * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
+ */
+ if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits)
+ return ERR_PTR(-EFAULT);
+
+ return __sgx_encl_load_page(encl, entry);
+}
+
+struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
+ unsigned long addr)
+{
+ struct sgx_encl_page *entry;
+
+ entry = xa_load(&encl->page_array, PFN_DOWN(addr));
+ if (!entry)
+ return ERR_PTR(-EFAULT);
+
+ return __sgx_encl_load_page(encl, entry);
+}
+
+/**
+ * sgx_encl_eaug_page() - Dynamically add page to initialized enclave
+ * @vma: VMA obtained from fault info from where page is accessed
+ * @encl: enclave accessing the page
+ * @addr: address that triggered the page fault
+ *
+ * When an initialized enclave accesses a page with no backing EPC page
+ * on a SGX2 system then the EPC can be added dynamically via the SGX2
+ * ENCLS[EAUG] instruction.
+ *
+ * Returns: Appropriate vm_fault_t: VM_FAULT_NOPAGE when PTE was installed
+ * successfully, VM_FAULT_SIGBUS or VM_FAULT_OOM as error otherwise.
+ */
+static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
+ struct sgx_encl *encl, unsigned long addr)
+{
+ vm_fault_t vmret = VM_FAULT_SIGBUS;
+ struct sgx_pageinfo pginfo = {0};
+ struct sgx_encl_page *encl_page;
+ struct sgx_epc_page *epc_page;
+ struct sgx_va_page *va_page;
+ unsigned long phys_addr;
+ u64 secinfo_flags;
+ int ret;
+
+ if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
+ return VM_FAULT_SIGBUS;
+
+ /*
+ * Ignore internal permission checking for dynamically added pages.
+ * They matter only for data added during the pre-initialization
+ * phase. The enclave decides the permissions by the means of
+ * EACCEPT, EACCEPTCOPY and EMODPE.
+ */
+ secinfo_flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X;
+ encl_page = sgx_encl_page_alloc(encl, addr - encl->base, secinfo_flags);
+ if (IS_ERR(encl_page))
+ return VM_FAULT_OOM;
+
+ mutex_lock(&encl->lock);
+
+ epc_page = sgx_alloc_epc_page(encl_page, false);
+ if (IS_ERR(epc_page)) {
+ if (PTR_ERR(epc_page) == -EBUSY)
+ vmret = VM_FAULT_NOPAGE;
+ goto err_out_unlock;
+ }
+
+ va_page = sgx_encl_grow(encl, false);
+ if (IS_ERR(va_page))
+ goto err_out_epc;
+
+ if (va_page)
+ list_add(&va_page->list, &encl->va_pages);
+
+ ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
+ encl_page, GFP_KERNEL);
+ /*
+ * If ret == -EBUSY then page was created in another flow while
+ * running without encl->lock
+ */
+ if (ret)
+ goto err_out_shrink;
+
+ pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page);
+ pginfo.addr = encl_page->desc & PAGE_MASK;
+ pginfo.metadata = 0;
+
+ ret = __eaug(&pginfo, sgx_get_epc_virt_addr(epc_page));
+ if (ret)
+ goto err_out;
+
+ encl_page->encl = encl;
+ encl_page->epc_page = epc_page;
+ encl_page->type = SGX_PAGE_TYPE_REG;
+ encl->secs_child_cnt++;
+
+ sgx_mark_page_reclaimable(encl_page->epc_page);
+
+ phys_addr = sgx_get_epc_phys_addr(epc_page);
+ /*
+ * Do not undo everything when creating PTE entry fails - next #PF
+ * would find page ready for a PTE.
+ */
+ vmret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
+ if (vmret != VM_FAULT_NOPAGE) {
+ mutex_unlock(&encl->lock);
+ return VM_FAULT_SIGBUS;
+ }
+ mutex_unlock(&encl->lock);
+ return VM_FAULT_NOPAGE;
+
+err_out:
+ xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
+
+err_out_shrink:
+ sgx_encl_shrink(encl, va_page);
+err_out_epc:
+ sgx_encl_free_epc_page(epc_page);
+err_out_unlock:
+ mutex_unlock(&encl->lock);
+ kfree(encl_page);
+
+ return vmret;
+}
+
static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
{
unsigned long addr = (unsigned long)vmf->address;
@@ -295,9 +420,20 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
if (unlikely(!encl))
return VM_FAULT_SIGBUS;
+ /*
+ * The page_array keeps track of all enclave pages, whether they
+ * are swapped out or not. If there is no entry for this page and
+ * the system supports SGX2 then it is possible to dynamically add
+ * a new enclave page. This is only possible for an initialized
+ * enclave that will be checked for right away.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_SGX2) &&
+ (!xa_load(&encl->page_array, PFN_DOWN(addr))))
+ return sgx_encl_eaug_page(vma, encl, addr);
+
mutex_lock(&encl->lock);
- entry = sgx_encl_load_page(encl, addr, vma->vm_flags);
+ entry = sgx_encl_load_page_in_vma(encl, addr, vma->vm_flags);
if (IS_ERR(entry)) {
mutex_unlock(&encl->lock);
@@ -367,6 +503,11 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
XA_STATE(xas, &encl->page_array, PFN_DOWN(start));
+ /* Disallow mapping outside enclave's address range. */
+ if (test_bit(SGX_ENCL_INITIALIZED, &encl->flags) &&
+ (start < encl->base || end > encl->base + encl->size))
+ return -EACCES;
+
/*
* Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
* conflict with the enclave page permissions.
@@ -445,7 +586,7 @@ static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
for ( ; ; ) {
mutex_lock(&encl->lock);
- entry = sgx_encl_load_page(encl, addr, vm_flags);
+ entry = sgx_encl_load_page_in_vma(encl, addr, vm_flags);
if (PTR_ERR(entry) != -EBUSY)
break;
@@ -687,7 +828,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
spin_lock(&encl->mm_lock);
list_add_rcu(&encl_mm->list, &encl->mm_list);
- /* Pairs with smp_rmb() in sgx_reclaimer_block(). */
+ /* Pairs with smp_rmb() in sgx_zap_enclave_ptes(). */
smp_wmb();
encl->mm_list_version++;
spin_unlock(&encl->mm_lock);
@@ -695,6 +836,73 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
return 0;
}
+/**
+ * sgx_encl_cpumask() - Query which CPUs might be accessing the enclave
+ * @encl: the enclave
+ *
+ * Some SGX functions require that no cached linear-to-physical address
+ * mappings are present before they can succeed. For example, ENCLS[EWB]
+ * copies a page from the enclave page cache to regular main memory but
+ * it fails if it cannot ensure that there are no cached
+ * linear-to-physical address mappings referring to the page.
+ *
+ * SGX hardware flushes all cached linear-to-physical mappings on a CPU
+ * when an enclave is exited via ENCLU[EEXIT] or an Asynchronous Enclave
+ * Exit (AEX). Exiting an enclave will thus ensure cached linear-to-physical
+ * address mappings are cleared but coordination with the tracking done within
+ * the SGX hardware is needed to support the SGX functions that depend on this
+ * cache clearing.
+ *
+ * When the ENCLS[ETRACK] function is issued on an enclave the hardware
+ * tracks threads operating inside the enclave at that time. The SGX
+ * hardware tracking require that all the identified threads must have
+ * exited the enclave in order to flush the mappings before a function such
+ * as ENCLS[EWB] will be permitted
+ *
+ * The following flow is used to support SGX functions that require that
+ * no cached linear-to-physical address mappings are present:
+ * 1) Execute ENCLS[ETRACK] to initiate hardware tracking.
+ * 2) Use this function (sgx_encl_cpumask()) to query which CPUs might be
+ * accessing the enclave.
+ * 3) Send IPI to identified CPUs, kicking them out of the enclave and
+ * thus flushing all locally cached linear-to-physical address mappings.
+ * 4) Execute SGX function.
+ *
+ * Context: It is required to call this function after ENCLS[ETRACK].
+ * This will ensure that if any new mm appears (racing with
+ * sgx_encl_mm_add()) then the new mm will enter into the
+ * enclave with fresh linear-to-physical address mappings.
+ *
+ * It is required that all IPIs are completed before a new
+ * ENCLS[ETRACK] is issued so be sure to protect steps 1 to 3
+ * of the above flow with the enclave's mutex.
+ *
+ * Return: cpumask of CPUs that might be accessing @encl
+ */
+const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl)
+{
+ cpumask_t *cpumask = &encl->cpumask;
+ struct sgx_encl_mm *encl_mm;
+ int idx;
+
+ cpumask_clear(cpumask);
+
+ idx = srcu_read_lock(&encl->srcu);
+
+ list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
+ if (!mmget_not_zero(encl_mm->mm))
+ continue;
+
+ cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm));
+
+ mmput_async(encl_mm->mm);
+ }
+
+ srcu_read_unlock(&encl->srcu, idx);
+
+ return cpumask;
+}
+
static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
pgoff_t index)
{
@@ -735,7 +943,6 @@ static int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
return PTR_ERR(pcmd);
}
- backing->page_index = page_index;
backing->contents = contents;
backing->pcmd = pcmd;
backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
@@ -902,8 +1109,85 @@ int sgx_encl_test_and_clear_young(struct mm_struct *mm,
return ret;
}
+struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
+ unsigned long offset,
+ u64 secinfo_flags)
+{
+ struct sgx_encl_page *encl_page;
+ unsigned long prot;
+
+ encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
+ if (!encl_page)
+ return ERR_PTR(-ENOMEM);
+
+ encl_page->desc = encl->base + offset;
+ encl_page->encl = encl;
+
+ prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ) |
+ _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) |
+ _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC);
+
+ /*
+ * TCS pages must always RW set for CPU access while the SECINFO
+ * permissions are *always* zero - the CPU ignores the user provided
+ * values and silently overwrites them with zero permissions.
+ */
+ if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
+ prot |= PROT_READ | PROT_WRITE;
+
+ /* Calculate maximum of the VM flags for the page. */
+ encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
+
+ return encl_page;
+}
+
+/**
+ * sgx_zap_enclave_ptes() - remove PTEs mapping the address from enclave
+ * @encl: the enclave
+ * @addr: page aligned pointer to single page for which PTEs will be removed
+ *
+ * Multiple VMAs may have an enclave page mapped. Remove the PTE mapping
+ * @addr from each VMA. Ensure that page fault handler is ready to handle
+ * new mappings of @addr before calling this function.
+ */
+void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr)
+{
+ unsigned long mm_list_version;
+ struct sgx_encl_mm *encl_mm;
+ struct vm_area_struct *vma;
+ int idx, ret;
+
+ do {
+ mm_list_version = encl->mm_list_version;
+
+ /* Pairs with smp_wmb() in sgx_encl_mm_add(). */
+ smp_rmb();
+
+ idx = srcu_read_lock(&encl->srcu);
+
+ list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
+ if (!mmget_not_zero(encl_mm->mm))
+ continue;
+
+ mmap_read_lock(encl_mm->mm);
+
+ ret = sgx_encl_find(encl_mm->mm, addr, &vma);
+ if (!ret && encl == vma->vm_private_data)
+ zap_vma_ptes(vma, addr, PAGE_SIZE);
+
+ mmap_read_unlock(encl_mm->mm);
+
+ mmput_async(encl_mm->mm);
+ }
+
+ srcu_read_unlock(&encl->srcu, idx);
+ } while (unlikely(encl->mm_list_version != mm_list_version));
+}
+
/**
* sgx_alloc_va_page() - Allocate a Version Array (VA) page
+ * @reclaim: Reclaim EPC pages directly if none available. Enclave
+ * mutex should not be held if this is set.
*
* Allocate a free EPC page and convert it to a Version Array (VA) page.
*
@@ -911,12 +1195,12 @@ int sgx_encl_test_and_clear_young(struct mm_struct *mm,
* a VA page,
* -errno otherwise
*/
-struct sgx_epc_page *sgx_alloc_va_page(void)
+struct sgx_epc_page *sgx_alloc_va_page(bool reclaim)
{
struct sgx_epc_page *epc_page;
int ret;
- epc_page = sgx_alloc_epc_page(NULL, true);
+ epc_page = sgx_alloc_epc_page(NULL, reclaim);
if (IS_ERR(epc_page))
return ERR_CAST(epc_page);
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index 332ef3568267..a65a952116fd 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -27,7 +27,8 @@
struct sgx_encl_page {
unsigned long desc;
- unsigned long vm_max_prot_bits;
+ unsigned long vm_max_prot_bits:8;
+ enum sgx_page_type type:16;
struct sgx_epc_page *epc_page;
struct sgx_encl *encl;
struct sgx_va_page *va_page;
@@ -78,7 +79,6 @@ struct sgx_va_page {
};
struct sgx_backing {
- pgoff_t page_index;
struct page *contents;
struct page *pcmd;
unsigned long pcmd_offset;
@@ -106,6 +106,7 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
bool current_is_ksgxd(void);
void sgx_encl_release(struct kref *ref);
int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
+const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl);
int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
struct sgx_backing *backing);
int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
@@ -113,11 +114,18 @@ int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
void sgx_encl_put_backing(struct sgx_backing *backing);
int sgx_encl_test_and_clear_young(struct mm_struct *mm,
struct sgx_encl_page *page);
-
-struct sgx_epc_page *sgx_alloc_va_page(void);
+struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
+ unsigned long offset,
+ u64 secinfo_flags);
+void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr);
+struct sgx_epc_page *sgx_alloc_va_page(bool reclaim);
unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
bool sgx_va_page_full(struct sgx_va_page *va_page);
void sgx_encl_free_epc_page(struct sgx_epc_page *page);
+struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
+ unsigned long addr);
+struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl, bool reclaim);
+void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page);
#endif /* _X86_ENCL_H */
diff --git a/arch/x86/kernel/cpu/sgx/encls.h b/arch/x86/kernel/cpu/sgx/encls.h
index fa04a73daf9c..99004b02e2ed 100644
--- a/arch/x86/kernel/cpu/sgx/encls.h
+++ b/arch/x86/kernel/cpu/sgx/encls.h
@@ -136,57 +136,71 @@ static inline bool encls_failed(int ret)
ret; \
})
+/* Initialize an EPC page into an SGX Enclave Control Structure (SECS) page. */
static inline int __ecreate(struct sgx_pageinfo *pginfo, void *secs)
{
return __encls_2(ECREATE, pginfo, secs);
}
+/* Hash a 256 byte region of an enclave page to SECS:MRENCLAVE. */
static inline int __eextend(void *secs, void *addr)
{
return __encls_2(EEXTEND, secs, addr);
}
+/*
+ * Associate an EPC page to an enclave either as a REG or TCS page
+ * populated with the provided data.
+ */
static inline int __eadd(struct sgx_pageinfo *pginfo, void *addr)
{
return __encls_2(EADD, pginfo, addr);
}
+/* Finalize enclave build, initialize enclave for user code execution. */
static inline int __einit(void *sigstruct, void *token, void *secs)
{
return __encls_ret_3(EINIT, sigstruct, secs, token);
}
+/* Disassociate EPC page from its enclave and mark it as unused. */
static inline int __eremove(void *addr)
{
return __encls_ret_1(EREMOVE, addr);
}
+/* Copy data to an EPC page belonging to a debug enclave. */
static inline int __edbgwr(void *addr, unsigned long *data)
{
return __encls_2(EDGBWR, *data, addr);
}
+/* Copy data from an EPC page belonging to a debug enclave. */
static inline int __edbgrd(void *addr, unsigned long *data)
{
return __encls_1_1(EDGBRD, *data, addr);
}
+/* Track that software has completed the required TLB address clears. */
static inline int __etrack(void *addr)
{
return __encls_ret_1(ETRACK, addr);
}
+/* Load, verify, and unblock an EPC page. */
static inline int __eldu(struct sgx_pageinfo *pginfo, void *addr,
void *va)
{
return __encls_ret_3(ELDU, pginfo, addr, va);
}
+/* Make EPC page inaccessible to enclave, ready to be written to memory. */
static inline int __eblock(void *addr)
{
return __encls_ret_1(EBLOCK, addr);
}
+/* Initialize an EPC page into a Version Array (VA) page. */
static inline int __epa(void *addr)
{
unsigned long rbx = SGX_PAGE_TYPE_VA;
@@ -194,10 +208,29 @@ static inline int __epa(void *addr)
return __encls_2(EPA, rbx, addr);
}
+/* Invalidate an EPC page and write it out to main memory. */
static inline int __ewb(struct sgx_pageinfo *pginfo, void *addr,
void *va)
{
return __encls_ret_3(EWB, pginfo, addr, va);
}
+/* Restrict the EPCM permissions of an EPC page. */
+static inline int __emodpr(struct sgx_secinfo *secinfo, void *addr)
+{
+ return __encls_ret_2(EMODPR, secinfo, addr);
+}
+
+/* Change the type of an EPC page. */
+static inline int __emodt(struct sgx_secinfo *secinfo, void *addr)
+{
+ return __encls_ret_2(EMODT, secinfo, addr);
+}
+
+/* Zero a page of EPC memory and add it to an initialized enclave. */
+static inline int __eaug(struct sgx_pageinfo *pginfo, void *addr)
+{
+ return __encls_2(EAUG, pginfo, addr);
+}
+
#endif /* _X86_ENCLS_H */
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 83df20e3e633..ebe79d60619f 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -17,7 +17,7 @@
#include "encl.h"
#include "encls.h"
-static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
+struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl, bool reclaim)
{
struct sgx_va_page *va_page = NULL;
void *err;
@@ -30,7 +30,7 @@ static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
if (!va_page)
return ERR_PTR(-ENOMEM);
- va_page->epc_page = sgx_alloc_va_page();
+ va_page->epc_page = sgx_alloc_va_page(reclaim);
if (IS_ERR(va_page->epc_page)) {
err = ERR_CAST(va_page->epc_page);
kfree(va_page);
@@ -43,7 +43,7 @@ static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
return va_page;
}
-static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
+void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
{
encl->page_cnt--;
@@ -64,7 +64,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
struct file *backing;
long ret;
- va_page = sgx_encl_grow(encl);
+ va_page = sgx_encl_grow(encl, true);
if (IS_ERR(va_page))
return PTR_ERR(va_page);
else if (va_page)
@@ -107,6 +107,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
set_bit(SGX_ENCL_DEBUG, &encl->flags);
encl->secs.encl = encl;
+ encl->secs.type = SGX_PAGE_TYPE_SECS;
encl->base = secs->base;
encl->size = secs->size;
encl->attributes = secs->attributes;
@@ -168,38 +169,6 @@ static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg)
return ret;
}
-static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
- unsigned long offset,
- u64 secinfo_flags)
-{
- struct sgx_encl_page *encl_page;
- unsigned long prot;
-
- encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
- if (!encl_page)
- return ERR_PTR(-ENOMEM);
-
- encl_page->desc = encl->base + offset;
- encl_page->encl = encl;
-
- prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ) |
- _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) |
- _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC);
-
- /*
- * TCS pages must always RW set for CPU access while the SECINFO
- * permissions are *always* zero - the CPU ignores the user provided
- * values and silently overwrites them with zero permissions.
- */
- if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
- prot |= PROT_READ | PROT_WRITE;
-
- /* Calculate maximum of the VM flags for the page. */
- encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
-
- return encl_page;
-}
-
static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
{
u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
@@ -306,7 +275,7 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
return PTR_ERR(epc_page);
}
- va_page = sgx_encl_grow(encl);
+ va_page = sgx_encl_grow(encl, true);
if (IS_ERR(va_page)) {
ret = PTR_ERR(va_page);
goto err_out_free;
@@ -344,6 +313,7 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
*/
encl_page->encl = encl;
encl_page->epc_page = epc_page;
+ encl_page->type = (secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK) >> 8;
encl->secs_child_cnt++;
if (flags & SGX_PAGE_MEASURE) {
@@ -372,6 +342,26 @@ err_out_free:
return ret;
}
+/*
+ * Ensure user provided offset and length values are valid for
+ * an enclave.
+ */
+static int sgx_validate_offset_length(struct sgx_encl *encl,
+ unsigned long offset,
+ unsigned long length)
+{
+ if (!IS_ALIGNED(offset, PAGE_SIZE))
+ return -EINVAL;
+
+ if (!length || !IS_ALIGNED(length, PAGE_SIZE))
+ return -EINVAL;
+
+ if (offset + length - PAGE_SIZE >= encl->size)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES
* @encl: an enclave pointer
@@ -425,14 +415,10 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
if (copy_from_user(&add_arg, arg, sizeof(add_arg)))
return -EFAULT;
- if (!IS_ALIGNED(add_arg.offset, PAGE_SIZE) ||
- !IS_ALIGNED(add_arg.src, PAGE_SIZE))
- return -EINVAL;
-
- if (!add_arg.length || add_arg.length & (PAGE_SIZE - 1))
+ if (!IS_ALIGNED(add_arg.src, PAGE_SIZE))
return -EINVAL;
- if (add_arg.offset + add_arg.length - PAGE_SIZE >= encl->size)
+ if (sgx_validate_offset_length(encl, add_arg.offset, add_arg.length))
return -EINVAL;
if (copy_from_user(&secinfo, (void __user *)add_arg.secinfo,
@@ -674,6 +660,565 @@ static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg)
return sgx_set_attribute(&encl->attributes_mask, params.fd);
}
+/*
+ * Ensure enclave is ready for SGX2 functions. Readiness is checked
+ * by ensuring the hardware supports SGX2 and the enclave is initialized
+ * and thus able to handle requests to modify pages within it.
+ */
+static int sgx_ioc_sgx2_ready(struct sgx_encl *encl)
+{
+ if (!(cpu_feature_enabled(X86_FEATURE_SGX2)))
+ return -ENODEV;
+
+ if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Some SGX functions require that no cached linear-to-physical address
+ * mappings are present before they can succeed. Collaborate with
+ * hardware via ENCLS[ETRACK] to ensure that all cached
+ * linear-to-physical address mappings belonging to all threads of
+ * the enclave are cleared. See sgx_encl_cpumask() for details.
+ *
+ * Must be called with enclave's mutex held from the time the
+ * SGX function requiring that no cached linear-to-physical mappings
+ * are present is executed until this ETRACK flow is complete.
+ */
+static int sgx_enclave_etrack(struct sgx_encl *encl)
+{
+ void *epc_virt;
+ int ret;
+
+ epc_virt = sgx_get_epc_virt_addr(encl->secs.epc_page);
+ ret = __etrack(epc_virt);
+ if (ret) {
+ /*
+ * ETRACK only fails when there is an OS issue. For
+ * example, two consecutive ETRACK was sent without
+ * completed IPI between.
+ */
+ pr_err_once("ETRACK returned %d (0x%x)", ret, ret);
+ /*
+ * Send IPIs to kick CPUs out of the enclave and
+ * try ETRACK again.
+ */
+ on_each_cpu_mask(sgx_encl_cpumask(encl), sgx_ipi_cb, NULL, 1);
+ ret = __etrack(epc_virt);
+ if (ret) {
+ pr_err_once("ETRACK repeat returned %d (0x%x)",
+ ret, ret);
+ return -EFAULT;
+ }
+ }
+ on_each_cpu_mask(sgx_encl_cpumask(encl), sgx_ipi_cb, NULL, 1);
+
+ return 0;
+}
+
+/**
+ * sgx_enclave_restrict_permissions() - Restrict EPCM permissions
+ * @encl: Enclave to which the pages belong.
+ * @modp: Checked parameters from user on which pages need modifying and
+ * their new permissions.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: Otherwise.
+ */
+static long
+sgx_enclave_restrict_permissions(struct sgx_encl *encl,
+ struct sgx_enclave_restrict_permissions *modp)
+{
+ struct sgx_encl_page *entry;
+ struct sgx_secinfo secinfo;
+ unsigned long addr;
+ unsigned long c;
+ void *epc_virt;
+ int ret;
+
+ memset(&secinfo, 0, sizeof(secinfo));
+ secinfo.flags = modp->permissions & SGX_SECINFO_PERMISSION_MASK;
+
+ for (c = 0 ; c < modp->length; c += PAGE_SIZE) {
+ addr = encl->base + modp->offset + c;
+
+ sgx_reclaim_direct();
+
+ mutex_lock(&encl->lock);
+
+ entry = sgx_encl_load_page(encl, addr);
+ if (IS_ERR(entry)) {
+ ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT;
+ goto out_unlock;
+ }
+
+ /*
+ * Changing EPCM permissions is only supported on regular
+ * SGX pages. Attempting this change on other pages will
+ * result in #PF.
+ */
+ if (entry->type != SGX_PAGE_TYPE_REG) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /*
+ * Apart from ensuring that read-access remains, do not verify
+ * the permission bits requested. Kernel has no control over
+ * how EPCM permissions can be relaxed from within the enclave.
+ * ENCLS[EMODPR] can only remove existing EPCM permissions,
+ * attempting to set new permissions will be ignored by the
+ * hardware.
+ */
+
+ /* Change EPCM permissions. */
+ epc_virt = sgx_get_epc_virt_addr(entry->epc_page);
+ ret = __emodpr(&secinfo, epc_virt);
+ if (encls_faulted(ret)) {
+ /*
+ * All possible faults should be avoidable:
+ * parameters have been checked, will only change
+ * permissions of a regular page, and no concurrent
+ * SGX1/SGX2 ENCLS instructions since these
+ * are protected with mutex.
+ */
+ pr_err_once("EMODPR encountered exception %d\n",
+ ENCLS_TRAPNR(ret));
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+ if (encls_failed(ret)) {
+ modp->result = ret;
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ ret = sgx_enclave_etrack(encl);
+ if (ret) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ mutex_unlock(&encl->lock);
+ }
+
+ ret = 0;
+ goto out;
+
+out_unlock:
+ mutex_unlock(&encl->lock);
+out:
+ modp->count = c;
+
+ return ret;
+}
+
+/**
+ * sgx_ioc_enclave_restrict_permissions() - handler for
+ * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
+ * @encl: an enclave pointer
+ * @arg: userspace pointer to a &struct sgx_enclave_restrict_permissions
+ * instance
+ *
+ * SGX2 distinguishes between relaxing and restricting the enclave page
+ * permissions maintained by the hardware (EPCM permissions) of pages
+ * belonging to an initialized enclave (after SGX_IOC_ENCLAVE_INIT).
+ *
+ * EPCM permissions cannot be restricted from within the enclave, the enclave
+ * requires the kernel to run the privileged level 0 instructions ENCLS[EMODPR]
+ * and ENCLS[ETRACK]. An attempt to relax EPCM permissions with this call
+ * will be ignored by the hardware.
+ *
+ * Return:
+ * - 0: Success
+ * - -errno: Otherwise
+ */
+static long sgx_ioc_enclave_restrict_permissions(struct sgx_encl *encl,
+ void __user *arg)
+{
+ struct sgx_enclave_restrict_permissions params;
+ long ret;
+
+ ret = sgx_ioc_sgx2_ready(encl);
+ if (ret)
+ return ret;
+
+ if (copy_from_user(&params, arg, sizeof(params)))
+ return -EFAULT;
+
+ if (sgx_validate_offset_length(encl, params.offset, params.length))
+ return -EINVAL;
+
+ if (params.permissions & ~SGX_SECINFO_PERMISSION_MASK)
+ return -EINVAL;
+
+ /*
+ * Fail early if invalid permissions requested to prevent ENCLS[EMODPR]
+ * from faulting later when the CPU does the same check.
+ */
+ if ((params.permissions & SGX_SECINFO_W) &&
+ !(params.permissions & SGX_SECINFO_R))
+ return -EINVAL;
+
+ if (params.result || params.count)
+ return -EINVAL;
+
+ ret = sgx_enclave_restrict_permissions(encl, &params);
+
+ if (copy_to_user(arg, &params, sizeof(params)))
+ return -EFAULT;
+
+ return ret;
+}
+
+/**
+ * sgx_enclave_modify_types() - Modify type of SGX enclave pages
+ * @encl: Enclave to which the pages belong.
+ * @modt: Checked parameters from user about which pages need modifying
+ * and their new page type.
+ *
+ * Return:
+ * - 0: Success
+ * - -errno: Otherwise
+ */
+static long sgx_enclave_modify_types(struct sgx_encl *encl,
+ struct sgx_enclave_modify_types *modt)
+{
+ unsigned long max_prot_restore;
+ enum sgx_page_type page_type;
+ struct sgx_encl_page *entry;
+ struct sgx_secinfo secinfo;
+ unsigned long prot;
+ unsigned long addr;
+ unsigned long c;
+ void *epc_virt;
+ int ret;
+
+ page_type = modt->page_type & SGX_PAGE_TYPE_MASK;
+
+ /*
+ * The only new page types allowed by hardware are PT_TCS and PT_TRIM.
+ */
+ if (page_type != SGX_PAGE_TYPE_TCS && page_type != SGX_PAGE_TYPE_TRIM)
+ return -EINVAL;
+
+ memset(&secinfo, 0, sizeof(secinfo));
+
+ secinfo.flags = page_type << 8;
+
+ for (c = 0 ; c < modt->length; c += PAGE_SIZE) {
+ addr = encl->base + modt->offset + c;
+
+ sgx_reclaim_direct();
+
+ mutex_lock(&encl->lock);
+
+ entry = sgx_encl_load_page(encl, addr);
+ if (IS_ERR(entry)) {
+ ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT;
+ goto out_unlock;
+ }
+
+ /*
+ * Borrow the logic from the Intel SDM. Regular pages
+ * (SGX_PAGE_TYPE_REG) can change type to SGX_PAGE_TYPE_TCS
+ * or SGX_PAGE_TYPE_TRIM but TCS pages can only be trimmed.
+ * CET pages not supported yet.
+ */
+ if (!(entry->type == SGX_PAGE_TYPE_REG ||
+ (entry->type == SGX_PAGE_TYPE_TCS &&
+ page_type == SGX_PAGE_TYPE_TRIM))) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ max_prot_restore = entry->vm_max_prot_bits;
+
+ /*
+ * Once a regular page becomes a TCS page it cannot be
+ * changed back. So the maximum allowed protection reflects
+ * the TCS page that is always RW from kernel perspective but
+ * will be inaccessible from within enclave. Before doing
+ * so, do make sure that the new page type continues to
+ * respect the originally vetted page permissions.
+ */
+ if (entry->type == SGX_PAGE_TYPE_REG &&
+ page_type == SGX_PAGE_TYPE_TCS) {
+ if (~entry->vm_max_prot_bits & (VM_READ | VM_WRITE)) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+ prot = PROT_READ | PROT_WRITE;
+ entry->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
+
+ /*
+ * Prevent page from being reclaimed while mutex
+ * is released.
+ */
+ if (sgx_unmark_page_reclaimable(entry->epc_page)) {
+ ret = -EAGAIN;
+ goto out_entry_changed;
+ }
+
+ /*
+ * Do not keep encl->lock because of dependency on
+ * mmap_lock acquired in sgx_zap_enclave_ptes().
+ */
+ mutex_unlock(&encl->lock);
+
+ sgx_zap_enclave_ptes(encl, addr);
+
+ mutex_lock(&encl->lock);
+
+ sgx_mark_page_reclaimable(entry->epc_page);
+ }
+
+ /* Change EPC type */
+ epc_virt = sgx_get_epc_virt_addr(entry->epc_page);
+ ret = __emodt(&secinfo, epc_virt);
+ if (encls_faulted(ret)) {
+ /*
+ * All possible faults should be avoidable:
+ * parameters have been checked, will only change
+ * valid page types, and no concurrent
+ * SGX1/SGX2 ENCLS instructions since these are
+ * protected with mutex.
+ */
+ pr_err_once("EMODT encountered exception %d\n",
+ ENCLS_TRAPNR(ret));
+ ret = -EFAULT;
+ goto out_entry_changed;
+ }
+ if (encls_failed(ret)) {
+ modt->result = ret;
+ ret = -EFAULT;
+ goto out_entry_changed;
+ }
+
+ ret = sgx_enclave_etrack(encl);
+ if (ret) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ entry->type = page_type;
+
+ mutex_unlock(&encl->lock);
+ }
+
+ ret = 0;
+ goto out;
+
+out_entry_changed:
+ entry->vm_max_prot_bits = max_prot_restore;
+out_unlock:
+ mutex_unlock(&encl->lock);
+out:
+ modt->count = c;
+
+ return ret;
+}
+
+/**
+ * sgx_ioc_enclave_modify_types() - handler for %SGX_IOC_ENCLAVE_MODIFY_TYPES
+ * @encl: an enclave pointer
+ * @arg: userspace pointer to a &struct sgx_enclave_modify_types instance
+ *
+ * Ability to change the enclave page type supports the following use cases:
+ *
+ * * It is possible to add TCS pages to an enclave by changing the type of
+ * regular pages (%SGX_PAGE_TYPE_REG) to TCS (%SGX_PAGE_TYPE_TCS) pages.
+ * With this support the number of threads supported by an initialized
+ * enclave can be increased dynamically.
+ *
+ * * Regular or TCS pages can dynamically be removed from an initialized
+ * enclave by changing the page type to %SGX_PAGE_TYPE_TRIM. Changing the
+ * page type to %SGX_PAGE_TYPE_TRIM marks the page for removal with actual
+ * removal done by handler of %SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl() called
+ * after ENCLU[EACCEPT] is run on %SGX_PAGE_TYPE_TRIM page from within the
+ * enclave.
+ *
+ * Return:
+ * - 0: Success
+ * - -errno: Otherwise
+ */
+static long sgx_ioc_enclave_modify_types(struct sgx_encl *encl,
+ void __user *arg)
+{
+ struct sgx_enclave_modify_types params;
+ long ret;
+
+ ret = sgx_ioc_sgx2_ready(encl);
+ if (ret)
+ return ret;
+
+ if (copy_from_user(&params, arg, sizeof(params)))
+ return -EFAULT;
+
+ if (sgx_validate_offset_length(encl, params.offset, params.length))
+ return -EINVAL;
+
+ if (params.page_type & ~SGX_PAGE_TYPE_MASK)
+ return -EINVAL;
+
+ if (params.result || params.count)
+ return -EINVAL;
+
+ ret = sgx_enclave_modify_types(encl, &params);
+
+ if (copy_to_user(arg, &params, sizeof(params)))
+ return -EFAULT;
+
+ return ret;
+}
+
+/**
+ * sgx_encl_remove_pages() - Remove trimmed pages from SGX enclave
+ * @encl: Enclave to which the pages belong
+ * @params: Checked parameters from user on which pages need to be removed
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: Otherwise.
+ */
+static long sgx_encl_remove_pages(struct sgx_encl *encl,
+ struct sgx_enclave_remove_pages *params)
+{
+ struct sgx_encl_page *entry;
+ struct sgx_secinfo secinfo;
+ unsigned long addr;
+ unsigned long c;
+ void *epc_virt;
+ int ret;
+
+ memset(&secinfo, 0, sizeof(secinfo));
+ secinfo.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X;
+
+ for (c = 0 ; c < params->length; c += PAGE_SIZE) {
+ addr = encl->base + params->offset + c;
+
+ sgx_reclaim_direct();
+
+ mutex_lock(&encl->lock);
+
+ entry = sgx_encl_load_page(encl, addr);
+ if (IS_ERR(entry)) {
+ ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT;
+ goto out_unlock;
+ }
+
+ if (entry->type != SGX_PAGE_TYPE_TRIM) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ /*
+ * ENCLS[EMODPR] is a no-op instruction used to inform if
+ * ENCLU[EACCEPT] was run from within the enclave. If
+ * ENCLS[EMODPR] is run with RWX on a trimmed page that is
+ * not yet accepted then it will return
+ * %SGX_PAGE_NOT_MODIFIABLE, after the trimmed page is
+ * accepted the instruction will encounter a page fault.
+ */
+ epc_virt = sgx_get_epc_virt_addr(entry->epc_page);
+ ret = __emodpr(&secinfo, epc_virt);
+ if (!encls_faulted(ret) || ENCLS_TRAPNR(ret) != X86_TRAP_PF) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ if (sgx_unmark_page_reclaimable(entry->epc_page)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ /*
+ * Do not keep encl->lock because of dependency on
+ * mmap_lock acquired in sgx_zap_enclave_ptes().
+ */
+ mutex_unlock(&encl->lock);
+
+ sgx_zap_enclave_ptes(encl, addr);
+
+ mutex_lock(&encl->lock);
+
+ sgx_encl_free_epc_page(entry->epc_page);
+ encl->secs_child_cnt--;
+ entry->epc_page = NULL;
+ xa_erase(&encl->page_array, PFN_DOWN(entry->desc));
+ sgx_encl_shrink(encl, NULL);
+ kfree(entry);
+
+ mutex_unlock(&encl->lock);
+ }
+
+ ret = 0;
+ goto out;
+
+out_unlock:
+ mutex_unlock(&encl->lock);
+out:
+ params->count = c;
+
+ return ret;
+}
+
+/**
+ * sgx_ioc_enclave_remove_pages() - handler for %SGX_IOC_ENCLAVE_REMOVE_PAGES
+ * @encl: an enclave pointer
+ * @arg: userspace pointer to &struct sgx_enclave_remove_pages instance
+ *
+ * Final step of the flow removing pages from an initialized enclave. The
+ * complete flow is:
+ *
+ * 1) User changes the type of the pages to be removed to %SGX_PAGE_TYPE_TRIM
+ * using the %SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl().
+ * 2) User approves the page removal by running ENCLU[EACCEPT] from within
+ * the enclave.
+ * 3) User initiates actual page removal using the
+ * %SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl() that is handled here.
+ *
+ * First remove any page table entries pointing to the page and then proceed
+ * with the actual removal of the enclave page and data in support of it.
+ *
+ * VA pages are not affected by this removal. It is thus possible that the
+ * enclave may end up with more VA pages than needed to support all its
+ * pages.
+ *
+ * Return:
+ * - 0: Success
+ * - -errno: Otherwise
+ */
+static long sgx_ioc_enclave_remove_pages(struct sgx_encl *encl,
+ void __user *arg)
+{
+ struct sgx_enclave_remove_pages params;
+ long ret;
+
+ ret = sgx_ioc_sgx2_ready(encl);
+ if (ret)
+ return ret;
+
+ if (copy_from_user(&params, arg, sizeof(params)))
+ return -EFAULT;
+
+ if (sgx_validate_offset_length(encl, params.offset, params.length))
+ return -EINVAL;
+
+ if (params.count)
+ return -EINVAL;
+
+ ret = sgx_encl_remove_pages(encl, &params);
+
+ if (copy_to_user(arg, &params, sizeof(params)))
+ return -EFAULT;
+
+ return ret;
+}
+
long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct sgx_encl *encl = filep->private_data;
@@ -695,6 +1240,16 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
case SGX_IOC_ENCLAVE_PROVISION:
ret = sgx_ioc_enclave_provision(encl, (void __user *)arg);
break;
+ case SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS:
+ ret = sgx_ioc_enclave_restrict_permissions(encl,
+ (void __user *)arg);
+ break;
+ case SGX_IOC_ENCLAVE_MODIFY_TYPES:
+ ret = sgx_ioc_enclave_modify_types(encl, (void __user *)arg);
+ break;
+ case SGX_IOC_ENCLAVE_REMOVE_PAGES:
+ ret = sgx_ioc_enclave_remove_pages(encl, (void __user *)arg);
+ break;
default:
ret = -ENOIOCTLCMD;
break;
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index a78652d43e61..515e2a5f25bb 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -137,36 +137,9 @@ static void sgx_reclaimer_block(struct sgx_epc_page *epc_page)
struct sgx_encl_page *page = epc_page->owner;
unsigned long addr = page->desc & PAGE_MASK;
struct sgx_encl *encl = page->encl;
- unsigned long mm_list_version;
- struct sgx_encl_mm *encl_mm;
- struct vm_area_struct *vma;
- int idx, ret;
-
- do {
- mm_list_version = encl->mm_list_version;
-
- /* Pairs with smp_rmb() in sgx_encl_mm_add(). */
- smp_rmb();
-
- idx = srcu_read_lock(&encl->srcu);
-
- list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
- if (!mmget_not_zero(encl_mm->mm))
- continue;
-
- mmap_read_lock(encl_mm->mm);
-
- ret = sgx_encl_find(encl_mm->mm, addr, &vma);
- if (!ret && encl == vma->vm_private_data)
- zap_vma_ptes(vma, addr, PAGE_SIZE);
-
- mmap_read_unlock(encl_mm->mm);
-
- mmput_async(encl_mm->mm);
- }
+ int ret;
- srcu_read_unlock(&encl->srcu, idx);
- } while (unlikely(encl->mm_list_version != mm_list_version));
+ sgx_zap_enclave_ptes(encl, addr);
mutex_lock(&encl->lock);
@@ -201,39 +174,10 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
return ret;
}
-static void sgx_ipi_cb(void *info)
+void sgx_ipi_cb(void *info)
{
}
-static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
-{
- cpumask_t *cpumask = &encl->cpumask;
- struct sgx_encl_mm *encl_mm;
- int idx;
-
- /*
- * Can race with sgx_encl_mm_add(), but ETRACK has already been
- * executed, which means that the CPUs running in the new mm will enter
- * into the enclave with a fresh epoch.
- */
- cpumask_clear(cpumask);
-
- idx = srcu_read_lock(&encl->srcu);
-
- list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
- if (!mmget_not_zero(encl_mm->mm))
- continue;
-
- cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm));
-
- mmput_async(encl_mm->mm);
- }
-
- srcu_read_unlock(&encl->srcu, idx);
-
- return cpumask;
-}
-
/*
* Swap page to the regular memory transformed to the blocked state by using
* EBLOCK, which means that it can no longer be referenced (no new TLB entries).
@@ -280,7 +224,7 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
* miss cpus that entered the enclave between
* generating the mask and incrementing epoch.
*/
- on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
+ on_each_cpu_mask(sgx_encl_cpumask(encl),
sgx_ipi_cb, NULL, 1);
ret = __sgx_encl_ewb(epc_page, va_slot, backing);
}
@@ -431,6 +375,17 @@ static bool sgx_should_reclaim(unsigned long watermark)
!list_empty(&sgx_active_page_list);
}
+/*
+ * sgx_reclaim_direct() should be called (without enclave's mutex held)
+ * in locations where SGX memory resources might be low and might be
+ * needed in order to make forward progress.
+ */
+void sgx_reclaim_direct(void)
+{
+ if (sgx_should_reclaim(SGX_NR_LOW_PAGES))
+ sgx_reclaim_pages();
+}
+
static int ksgxd(void *p)
{
set_freezable();
diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h
index 0f17def9fe6f..0f2020653fba 100644
--- a/arch/x86/kernel/cpu/sgx/sgx.h
+++ b/arch/x86/kernel/cpu/sgx/sgx.h
@@ -86,10 +86,13 @@ static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page)
struct sgx_epc_page *__sgx_alloc_epc_page(void);
void sgx_free_epc_page(struct sgx_epc_page *page);
+void sgx_reclaim_direct(void);
void sgx_mark_page_reclaimable(struct sgx_epc_page *page);
int sgx_unmark_page_reclaimable(struct sgx_epc_page *page);
struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim);
+void sgx_ipi_cb(void *info);
+
#ifdef CONFIG_X86_SGX_KVM
int __init sgx_vepc_init(void);
#else
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 24b9fa89aa27..bd165004776d 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -91,6 +91,7 @@ static int ftrace_verify_code(unsigned long ip, const char *old_code)
/* Make sure it is what we expect it to be */
if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
+ ftrace_expected = old_code;
WARN_ON(1);
return -EINVAL;
}
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7c4ab8870da4..4c3c27b6aea3 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -505,7 +505,7 @@ static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
if (p->ainsn.jcc.type >= 0xe)
- match = match && (regs->flags & X86_EFLAGS_ZF);
+ match = match || (regs->flags & X86_EFLAGS_ZF);
}
__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
}
@@ -814,16 +814,20 @@ set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
- if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
- }
-
/* Restore back the original saved kprobes variables and continue. */
- if (kcb->kprobe_status == KPROBE_REENTER)
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ /* This will restore both kcb and current_kprobe */
restore_previous_kprobe(kcb);
- else
+ } else {
+ /*
+ * Always update the kcb status because
+ * reset_curent_kprobe() doesn't update kcb.
+ */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler)
+ cur->post_handler(cur, regs, 0);
reset_current_kprobe();
+ }
}
NOKPROBE_SYMBOL(kprobe_post_process);
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 63dc626627a0..a428c62330d3 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -701,7 +701,13 @@ e_term:
void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
unsigned int npages)
{
- if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ /*
+ * This can be invoked in early boot while running identity mapped, so
+ * use an open coded check for SNP instead of using cc_platform_has().
+ * This eliminates worries about jump tables or checking boot_cpu_data
+ * in the cc_platform_has() function.
+ */
+ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
return;
/*
@@ -717,7 +723,13 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned int npages)
{
- if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ /*
+ * This can be invoked in early boot while running identity mapped, so
+ * use an open coded check for SNP instead of using cc_platform_has().
+ * This eliminates worries about jump tables or checking boot_cpu_data
+ * in the cc_platform_has() function.
+ */
+ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
return;
/* Invalidate the memory pages before they are marked shared in the RMP table. */
@@ -2100,7 +2112,7 @@ bool __init snp_init(struct boot_params *bp)
return true;
}
-void __init snp_abort(void)
+void __init __noreturn snp_abort(void)
{
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5e7f9532a10d..f24227bc3220 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -95,10 +95,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
EXPORT_PER_CPU_SYMBOL(cpu_die_map);
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
-
-DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
-
/* Per CPU bogomips and other parameters */
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 0c1154a1c403..3bacd935f840 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -6,7 +6,6 @@
* Copyright (c) 2006-2009, Intel Corporation
*/
-#include <linux/intel-iommu.h>
#include <linux/init_task.h>
#include <linux/spinlock.h>
#include <linux/export.h>
@@ -516,17 +515,3 @@ struct acpi_table_header *tboot_get_dmar_table(struct acpi_table_header *dmar_tb
return dmar_tbl;
}
-
-int tboot_force_iommu(void)
-{
- if (!tboot_enabled())
- return 0;
-
- if (no_iommu || dmar_disabled)
- pr_warn("Forcing Intel-IOMMU to enabled\n");
-
- dmar_disabled = 0;
- no_iommu = 0;
-
- return 1;
-}
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 38185aedf7d1..0ea57da92940 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -93,22 +93,27 @@ static struct orc_entry *orc_find(unsigned long ip);
static struct orc_entry *orc_ftrace_find(unsigned long ip)
{
struct ftrace_ops *ops;
- unsigned long caller;
+ unsigned long tramp_addr, offset;
ops = ftrace_ops_trampoline(ip);
if (!ops)
return NULL;
+ /* Set tramp_addr to the start of the code copied by the trampoline */
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
- caller = (unsigned long)ftrace_regs_call;
+ tramp_addr = (unsigned long)ftrace_regs_caller;
else
- caller = (unsigned long)ftrace_call;
+ tramp_addr = (unsigned long)ftrace_caller;
+
+ /* Now place tramp_addr to the location within the trampoline ip is at */
+ offset = ip - ops->trampoline;
+ tramp_addr += offset;
/* Prevent unlikely recursion */
- if (ip == caller)
+ if (ip == tramp_addr)
return NULL;
- return orc_find(caller);
+ return orc_find(tramp_addr);
}
#else
static struct orc_entry *orc_ftrace_find(unsigned long ip)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 047c583596bb..d5ec3a2ed5a4 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -326,7 +326,8 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
".align " __stringify(FASTOP_SIZE) " \n\t" \
".type " name ", @function \n\t" \
name ":\n\t" \
- ASM_ENDBR
+ ASM_ENDBR \
+ IBT_NOSEAL(name)
#define FOP_FUNC(name) \
__FOP_FUNC(#name)
@@ -446,27 +447,12 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
FOP_END
/* Special case for SETcc - 1 instruction per cc */
-
-/*
- * Depending on .config the SETcc functions look like:
- *
- * ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT]
- * SETcc %al [3 bytes]
- * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK]
- * INT3 [1 byte; CONFIG_SLS]
- */
-#define SETCC_ALIGN 16
-
#define FOP_SETCC(op) \
- ".align " __stringify(SETCC_ALIGN) " \n\t" \
- ".type " #op ", @function \n\t" \
- #op ": \n\t" \
- ASM_ENDBR \
+ FOP_FUNC(op) \
#op " %al \n\t" \
- __FOP_RET(#op) \
- ".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t"
+ FOP_RET(op)
-__FOP_START(setcc, SETCC_ALIGN)
+FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
@@ -493,7 +479,7 @@ FOP_END;
/*
* XXX: inoutclob user must know where the argument is being expanded.
- * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
+ * Using asm goto would allow us to remove _fault.
*/
#define asm_safe(insn, inoutclob...) \
({ \
@@ -1079,7 +1065,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
- void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf);
+ void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
asm("push %[flags]; popf; " CALL_NOSPEC
@@ -4578,6 +4564,10 @@ static const struct mode_dual mode_dual_63 = {
N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
};
+static const struct instr_dual instr_dual_8d = {
+ D(DstReg | SrcMem | ModRM | NoAccess), N
+};
+
static const struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
F6ALU(Lock, em_add),
@@ -4634,7 +4624,7 @@ static const struct opcode opcode_table[256] = {
I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
- D(ModRM | SrcMem | NoAccess | DstReg),
+ ID(0, &instr_dual_8d),
I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
G(0, group1A),
/* 0x90 - 0x97 */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index e2ce3556915e..9dda989a1cf0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2284,10 +2284,12 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
struct kvm_lapic *apic = vcpu->arch.apic;
u64 val;
- if (apic_x2apic_mode(apic))
- kvm_lapic_msr_read(apic, offset, &val);
- else
+ if (apic_x2apic_mode(apic)) {
+ if (KVM_BUG_ON(kvm_lapic_msr_read(apic, offset, &val), vcpu->kvm))
+ return;
+ } else {
val = kvm_lapic_get_reg(apic, offset);
+ }
/*
* ICR is a single 64-bit register when x2APIC is enabled. For legacy
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index a99acec925eb..6bdaacb6faa0 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -6,6 +6,8 @@
#include "kvm_cache_regs.h"
#include "cpuid.h"
+extern bool __read_mostly enable_mmio_caching;
+
#define PT_WRITABLE_SHIFT 1
#define PT_USER_SHIFT 2
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4236a28b9be5..126fa9aec64c 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2914,7 +2914,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
* If addresses are being invalidated, skip prefetching to avoid
* accidentally prefetching those addresses.
*/
- if (unlikely(vcpu->kvm->mmu_notifier_count))
+ if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
return;
__direct_pte_prefetch(vcpu, sp, sptep);
@@ -2928,7 +2928,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
*
* There are several ways to safely use this helper:
*
- * - Check mmu_notifier_retry_hva() after grabbing the mapping level, before
+ * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
* consuming it. In this case, mmu_lock doesn't need to be held during the
* lookup, but it does need to be held while checking the MMU notifier.
*
@@ -3056,7 +3056,7 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
return;
/*
- * mmu_notifier_retry() was successful and mmu_lock is held, so
+ * mmu_invalidate_retry() was successful and mmu_lock is held, so
* the pmd can't be split from under us.
*/
fault->goal_level = fault->req_level;
@@ -4164,7 +4164,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(fault->addr, fault->gfn);
if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
- trace_kvm_async_pf_doublefault(fault->addr, fault->gfn);
+ trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
return RET_PF_RETRY;
} else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) {
@@ -4203,7 +4203,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
return true;
return fault->slot &&
- mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
+ mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
}
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
@@ -4227,7 +4227,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (r)
return r;
- mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ mmu_seq = vcpu->kvm->mmu_invalidate_seq;
smp_rmb();
r = kvm_faultin_pfn(vcpu, fault);
@@ -6055,7 +6055,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
write_lock(&kvm->mmu_lock);
- kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
+ kvm_mmu_invalidate_begin(kvm, gfn_start, gfn_end);
flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
@@ -6069,7 +6069,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
gfn_end - gfn_start);
- kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
+ kvm_mmu_invalidate_end(kvm, gfn_start, gfn_end);
write_unlock(&kvm->mmu_lock);
}
@@ -6697,11 +6697,15 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
/*
* nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
* its default value of -1 is technically undefined behavior for a boolean.
+ * Forward the module init call to SPTE code so that it too can handle module
+ * params that need to be resolved/snapshot.
*/
-void kvm_mmu_x86_module_init(void)
+void __init kvm_mmu_x86_module_init(void)
{
if (nx_huge_pages == -1)
__set_nx_huge_pages(get_nx_auto_mode());
+
+ kvm_mmu_spte_module_init();
}
/*
@@ -6740,7 +6744,7 @@ int kvm_mmu_vendor_module_init(void)
if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
goto out;
- ret = register_shrinker(&mmu_shrinker);
+ ret = register_shrinker(&mmu_shrinker, "x86-mmu");
if (ret)
goto out;
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index f5958071220c..39e0205e7300 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -589,7 +589,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
* If addresses are being invalidated, skip prefetching to avoid
* accidentally prefetching those addresses.
*/
- if (unlikely(vcpu->kvm->mmu_notifier_count))
+ if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
return;
if (sp->role.direct)
@@ -838,7 +838,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
else
fault->max_level = walker.level;
- mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ mmu_seq = vcpu->kvm->mmu_invalidate_seq;
smp_rmb();
r = kvm_faultin_pfn(vcpu, fault);
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 7314d27d57a4..2e08b2a45361 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -20,7 +20,9 @@
#include <asm/vmx.h>
bool __read_mostly enable_mmio_caching = true;
+static bool __ro_after_init allow_mmio_caching;
module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
+EXPORT_SYMBOL_GPL(enable_mmio_caching);
u64 __read_mostly shadow_host_writable_mask;
u64 __read_mostly shadow_mmu_writable_mask;
@@ -43,6 +45,18 @@ u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
u8 __read_mostly shadow_phys_bits;
+void __init kvm_mmu_spte_module_init(void)
+{
+ /*
+ * Snapshot userspace's desire to allow MMIO caching. Whether or not
+ * KVM can actually enable MMIO caching depends on vendor-specific
+ * hardware capabilities and other module params that can't be resolved
+ * until the vendor module is loaded, i.e. enable_mmio_caching can and
+ * will change when the vendor module is (re)loaded.
+ */
+ allow_mmio_caching = enable_mmio_caching;
+}
+
static u64 generation_mmio_spte_mask(u64 gen)
{
u64 mask;
@@ -340,10 +354,24 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
BUG_ON((u64)(unsigned)access_mask != access_mask);
WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
+ /*
+ * Reset to the original module param value to honor userspace's desire
+ * to (dis)allow MMIO caching. Update the param itself so that
+ * userspace can see whether or not KVM is actually using MMIO caching.
+ */
+ enable_mmio_caching = allow_mmio_caching;
if (!enable_mmio_caching)
mmio_value = 0;
/*
+ * The mask must contain only bits that are carved out specifically for
+ * the MMIO SPTE mask, e.g. to ensure there's no overlap with the MMIO
+ * generation.
+ */
+ if (WARN_ON(mmio_mask & ~SPTE_MMIO_ALLOWED_MASK))
+ mmio_value = 0;
+
+ /*
* Disable MMIO caching if the MMIO value collides with the bits that
* are used to hold the relocated GFN when the L1TF mitigation is
* enabled. This should never fire as there is no known hardware that
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index cabe3fbb4f39..f3744eea45f5 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -5,8 +5,6 @@
#include "mmu_internal.h"
-extern bool __read_mostly enable_mmio_caching;
-
/*
* A MMU present SPTE is backed by actual memory and may or may not be present
* in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it
@@ -125,6 +123,20 @@ static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
static_assert(!(SPTE_MMU_PRESENT_MASK &
(MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK)));
+/*
+ * The SPTE MMIO mask must NOT overlap the MMIO generation bits or the
+ * MMU-present bit. The generation obviously co-exists with the magic MMIO
+ * mask/value, and MMIO SPTEs are considered !MMU-present.
+ *
+ * The SPTE MMIO mask is allowed to use hardware "present" bits (i.e. all EPT
+ * RWX bits), all physical address bits (legal PA bits are used for "fast" MMIO
+ * and so they're off-limits for generation; additional checks ensure the mask
+ * doesn't overlap legal PA bits), and bit 63 (carved out for future usage).
+ */
+#define SPTE_MMIO_ALLOWED_MASK (BIT_ULL(63) | GENMASK_ULL(51, 12) | GENMASK_ULL(2, 0))
+static_assert(!(SPTE_MMIO_ALLOWED_MASK &
+ (SPTE_MMU_PRESENT_MASK | MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK)));
+
#define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
#define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
@@ -450,6 +462,7 @@ static inline u64 restore_acc_track_spte(u64 spte)
u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
+void __init kvm_mmu_spte_module_init(void);
void kvm_mmu_reset_all_pte_masks(void);
#endif
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index b0e793e7d85c..28064060413a 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -22,6 +22,7 @@
#include <asm/trapnr.h>
#include <asm/fpu/xcr.h>
+#include "mmu.h"
#include "x86.h"
#include "svm.h"
#include "svm_ops.h"
@@ -2221,6 +2222,15 @@ void __init sev_hardware_setup(void)
if (!sev_es_enabled)
goto out;
+ /*
+ * SEV-ES requires MMIO caching as KVM doesn't have access to the guest
+ * instruction stream, i.e. can't emulate in response to a #NPF and
+ * instead relies on #NPF(RSVD) being reflected into the guest as #VC
+ * (the guest can then do a #VMGEXIT to request MMIO emulation).
+ */
+ if (!enable_mmio_caching)
+ goto out;
+
/* Does the CPU support SEV-ES? */
if (!boot_cpu_has(X86_FEATURE_SEV_ES))
goto out;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 38f873cb6f2c..f3813dbacb9f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -5034,13 +5034,16 @@ static __init int svm_hardware_setup(void)
/* Setup shadow_me_value and shadow_me_mask */
kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
- /* Note, SEV setup consumes npt_enabled. */
+ svm_adjust_mmio_mask();
+
+ /*
+ * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
+ * may be modified by svm_adjust_mmio_mask()).
+ */
sev_hardware_setup();
svm_hv_hardware_setup();
- svm_adjust_mmio_mask();
-
for_each_possible_cpu(cpu) {
r = svm_cpu_init(cpu);
if (r)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 862c1a4d971b..c399637a3a79 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -171,13 +171,6 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
}
-bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
-{
- struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
-
- return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
-}
-
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
{
struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
@@ -592,7 +585,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
bitmap_set(pmu->all_valid_pmc_idx,
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
- if (cpuid_model_is_consistent(vcpu))
+ perf_capabilities = vcpu_get_perf_capabilities(vcpu);
+ if (cpuid_model_is_consistent(vcpu) &&
+ (perf_capabilities & PMU_CAP_LBR_FMT))
x86_perf_get_lbr(&lbr_desc->records);
else
lbr_desc->records.nr = 0;
@@ -600,7 +595,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
if (lbr_desc->records.nr)
bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
- perf_capabilities = vcpu_get_perf_capabilities(vcpu);
if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
pmu->pebs_enable_mask = counter_mask;
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 4182c7ffc909..6de96b943804 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -227,11 +227,13 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
* entries and (in some cases) RSB underflow.
*
* eIBRS has its own protection against poisoned RSB, so it doesn't
- * need the RSB filling sequence. But it does need to be enabled
- * before the first unbalanced RET.
+ * need the RSB filling sequence. But it does need to be enabled, and a
+ * single call to retire, before the first unbalanced RET.
*/
- FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
+ FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\
+ X86_FEATURE_RSB_VMEXIT_LITE
+
pop %_ASM_ARG2 /* @flags */
pop %_ASM_ARG1 /* @vmx */
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index fb8e3480a9d7..24d58c2ffaa3 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -6,6 +6,7 @@
#include <asm/kvm.h>
#include <asm/intel_pt.h>
+#include <asm/perf_event.h>
#include "capabilities.h"
#include "../kvm_cache_regs.h"
@@ -104,15 +105,6 @@ static inline bool intel_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
return pmu->version > 1;
}
-#define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
-#define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
-
-void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
-bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
-
-int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
-void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
-
struct lbr_desc {
/* Basic info about guest LBR records. */
struct x86_pmu_lbr records;
@@ -542,6 +534,25 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
+static inline struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
+{
+ return &to_vmx(vcpu)->lbr_desc;
+}
+
+static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
+{
+ return &vcpu_to_lbr_desc(vcpu)->records;
+}
+
+static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
+{
+ return !!vcpu_to_lbr_records(vcpu)->nr;
+}
+
+void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
+int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
+void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
+
static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 33560bfa0cac..205ebdc2b11b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -41,7 +41,6 @@
#include <linux/mman.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
-#include <linux/intel-iommu.h>
#include <linux/cpufreq.h>
#include <linux/user-return-notifier.h>
#include <linux/srcu.h>
@@ -3414,6 +3413,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
struct kvm_steal_time __user *st;
struct kvm_memslots *slots;
+ gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
u64 steal;
u32 version;
@@ -3431,13 +3431,12 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
slots = kvm_memslots(vcpu->kvm);
if (unlikely(slots->generation != ghc->generation ||
+ gpa != ghc->gpa ||
kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
- gfn_t gfn = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
-
/* We rely on the fact that it fits in a single page. */
BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gfn, sizeof(*st)) ||
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
kvm_is_error_hva(ghc->hva) || !ghc->memslot)
return;
}
@@ -3546,9 +3545,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
vcpu->arch.perf_capabilities = data;
-
+ kvm_pmu_refresh(vcpu);
return 0;
- }
+ }
case MSR_EFER:
return set_efer(vcpu, msr_info);
case MSR_K7_HWCR:
@@ -4715,6 +4714,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
struct kvm_steal_time __user *st;
struct kvm_memslots *slots;
static const u8 preempted = KVM_VCPU_PREEMPTED;
+ gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
/*
* The vCPU can be marked preempted if and only if the VM-Exit was on
@@ -4742,6 +4742,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
slots = kvm_memslots(vcpu->kvm);
if (unlikely(slots->generation != ghc->generation ||
+ gpa != ghc->gpa ||
kvm_is_error_hva(ghc->hva) || !ghc->memslot))
return;
@@ -13020,6 +13021,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
fault.error_code = error_code;
fault.nested_page_fault = false;
fault.address = gva;
+ fault.async_page_fault = false;
}
vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
}
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index a0c05ccbf4b1..280cb5dc7341 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -707,23 +707,24 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
- if (data->u.timer.port) {
- if (data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
- r = -EINVAL;
- break;
- }
- vcpu->arch.xen.timer_virq = data->u.timer.port;
+ if (data->u.timer.port &&
+ data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
+ r = -EINVAL;
+ break;
+ }
+
+ if (!vcpu->arch.xen.timer.function)
kvm_xen_init_timer(vcpu);
- /* Restart the timer if it's set */
- if (data->u.timer.expires_ns)
- kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
- data->u.timer.expires_ns -
- get_kvmclock_ns(vcpu->kvm));
- } else if (kvm_xen_timer_enabled(vcpu)) {
- kvm_xen_stop_timer(vcpu);
- vcpu->arch.xen.timer_virq = 0;
- }
+ /* Stop the timer (if it's running) before changing the vector */
+ kvm_xen_stop_timer(vcpu);
+ vcpu->arch.xen.timer_virq = data->u.timer.port;
+
+ /* Start the timer if the new value has a valid vector+expiry. */
+ if (data->u.timer.port && data->u.timer.expires_ns)
+ kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
+ data->u.timer.expires_ns -
+ get_kvmclock_ns(vcpu->kvm));
r = 0;
break;
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 331310c29349..60814e110a54 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -41,6 +41,59 @@ static bool ex_handler_default(const struct exception_table_entry *e,
return true;
}
+/*
+ * This is the *very* rare case where we do a "load_unaligned_zeropad()"
+ * and it's a page crosser into a non-existent page.
+ *
+ * This happens when we optimistically load a pathname a word-at-a-time
+ * and the name is less than the full word and the next page is not
+ * mapped. Typically that only happens for CONFIG_DEBUG_PAGEALLOC.
+ *
+ * NOTE! The faulting address is always a 'mov mem,reg' type instruction
+ * of size 'long', and the exception fixup must always point to right
+ * after the instruction.
+ */
+static bool ex_handler_zeropad(const struct exception_table_entry *e,
+ struct pt_regs *regs,
+ unsigned long fault_addr)
+{
+ struct insn insn;
+ const unsigned long mask = sizeof(long) - 1;
+ unsigned long offset, addr, next_ip, len;
+ unsigned long *reg;
+
+ next_ip = ex_fixup_addr(e);
+ len = next_ip - regs->ip;
+ if (len > MAX_INSN_SIZE)
+ return false;
+
+ if (insn_decode(&insn, (void *) regs->ip, len, INSN_MODE_KERN))
+ return false;
+ if (insn.length != len)
+ return false;
+
+ if (insn.opcode.bytes[0] != 0x8b)
+ return false;
+ if (insn.opnd_bytes != sizeof(long))
+ return false;
+
+ addr = (unsigned long) insn_get_addr_ref(&insn, regs);
+ if (addr == ~0ul)
+ return false;
+
+ offset = addr & mask;
+ addr = addr & ~mask;
+ if (fault_addr != addr + sizeof(long))
+ return false;
+
+ reg = insn_get_modrm_reg_ptr(&insn, regs);
+ if (!reg)
+ return false;
+
+ *reg = *(unsigned long *)addr >> (offset * 8);
+ return ex_handler_default(e, regs);
+}
+
static bool ex_handler_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
@@ -217,6 +270,8 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
return ex_handler_sgx(e, regs, trapnr);
case EX_TYPE_UCOPY_LEN:
return ex_handler_ucopy_len(e, regs, trapnr, reg, imm);
+ case EX_TYPE_ZEROPAD:
+ return ex_handler_zeropad(e, regs, fault_addr);
}
BUG();
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 971977c438fc..fa71a5d12e87 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1408,6 +1408,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
/*
* If we need to retry the mmap_lock has already been released,
* and if there is a fatal signal pending there is no guarantee
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index a0d023cb4292..6b3033845c6d 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -19,44 +19,6 @@
#include <asm/tlbflush.h>
#include <asm/elf.h>
-#if 0 /* This is just for testing */
-struct page *
-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
-{
- unsigned long start = address;
- int length = 1;
- int nr;
- struct page *page;
- struct vm_area_struct *vma;
-
- vma = find_vma(mm, addr);
- if (!vma || !is_vm_hugetlb_page(vma))
- return ERR_PTR(-EINVAL);
-
- pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
-
- /* hugetlb should be locked, and hence, prefaulted */
- WARN_ON(!pte || pte_none(*pte));
-
- page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
-
- WARN_ON(!PageHead(page));
-
- return page;
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return 0;
-}
-
-#else
-
/*
* pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
* hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
@@ -68,11 +30,16 @@ int pmd_huge(pmd_t pmd)
(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
}
+/*
+ * pud_huge() returns 1 if @pud is hugetlb related entry, that is normal
+ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
+ * Otherwise, returns 0.
+ */
int pud_huge(pud_t pud)
{
- return !!(pud_val(pud) & _PAGE_PSE);
+ return !pud_none(pud) &&
+ (pud_val(pud) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
}
-#endif
#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 39c5246964a9..0fe690ebc269 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -645,7 +645,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
pages++;
spin_lock(&init_mm.page_table_lock);
- prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE);
+ prot = __pgprot(pgprot_val(prot) | _PAGE_PSE);
set_pte_init((pte_t *)pud,
pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 97452688f99f..9c4d8dbcb129 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -26,6 +26,7 @@
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#include <asm/setup.h>
+#include <asm/mem_encrypt.h>
#include <asm/bootparam.h>
#include <asm/set_memory.h>
#include <asm/cacheflush.h>
@@ -486,8 +487,6 @@ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, boo
void __init sme_early_init(void)
{
- unsigned int i;
-
if (!sme_me_mask)
return;
@@ -496,8 +495,7 @@ void __init sme_early_init(void)
__supported_pte_mask = __sme_set(__supported_pte_mask);
/* Update the protection map with memory encryption mask */
- for (i = 0; i < ARRAY_SIZE(protection_map); i++)
- protection_map[i] = pgprot_encrypted(protection_map[i]);
+ add_encrypt_protection_map();
x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index e8b061557887..2aadb2019b4f 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -867,7 +867,7 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable)
return;
}
mask = node_to_cpumask_map[node];
- if (!mask) {
+ if (!cpumask_available(mask)) {
pr_err("node_to_cpumask_map[%i] NULL\n", node);
dump_stack();
return;
@@ -913,7 +913,7 @@ const struct cpumask *cpumask_of_node(int node)
dump_stack();
return cpu_none_mask;
}
- if (node_to_cpumask_map[node] == NULL) {
+ if (!cpumask_available(node_to_cpumask_map[node])) {
printk(KERN_WARNING
"cpumask_of_node(%d): no node_to_cpumask_map!\n",
node);
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index d5ef64ddd35e..66a209f7eb86 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -62,6 +62,7 @@
static bool __read_mostly pat_bp_initialized;
static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
+static bool __initdata pat_force_disabled = !IS_ENABLED(CONFIG_X86_PAT);
static bool __read_mostly pat_bp_enabled;
static bool __read_mostly pat_cm_initialized;
@@ -86,6 +87,7 @@ void pat_disable(const char *msg_reason)
static int __init nopat(char *str)
{
pat_disable("PAT support disabled via boot option.");
+ pat_force_disabled = true;
return 0;
}
early_param("nopat", nopat);
@@ -272,7 +274,7 @@ static void pat_ap_init(u64 pat)
wrmsrl(MSR_IA32_CR_PAT, pat);
}
-void init_cache_modes(void)
+void __init init_cache_modes(void)
{
u64 pat = 0;
@@ -313,6 +315,12 @@ void init_cache_modes(void)
*/
pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
+ } else if (!pat_force_disabled && cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) {
+ /*
+ * Clearly PAT is enabled underneath. Allow pat_enabled() to
+ * reflect this.
+ */
+ pat_bp_enabled = true;
}
__init_cache_modes(pat);
diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c
index 763742782286..c84bd9540b16 100644
--- a/arch/x86/mm/pgprot.c
+++ b/arch/x86/mm/pgprot.c
@@ -3,6 +3,34 @@
#include <linux/export.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
+#include <asm/mem_encrypt.h>
+
+static pgprot_t protection_map[16] __ro_after_init = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+
+void add_encrypt_protection_map(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(protection_map); i++)
+ protection_map[i] = pgprot_encrypted(protection_map[i]);
+}
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index f03a6883dcc6..89f25af4b3c3 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -80,7 +80,7 @@ static void send_ebook_state(void)
return;
}
- if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state)
+ if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state)
return; /* Nothing new to report. */
input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state);
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 1bcd42c53039..186f13268401 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -32,12 +32,12 @@ config 3_LEVEL_PGTABLES
bool "Three-level pagetables" if !64BIT
default 64BIT
help
- Three-level pagetables will let UML have more than 4G of physical
- memory. All the memory that can't be mapped directly will be treated
- as high memory.
+ Three-level pagetables will let UML have more than 4G of physical
+ memory. All the memory that can't be mapped directly will be treated
+ as high memory.
- However, this it experimental on 32-bit architectures, so if unsure say
- N (on x86-64 it's automatically enabled, instead, as it's safe there).
+ However, this it experimental on 32-bit architectures, so if unsure say
+ N (on x86-64 it's automatically enabled, instead, as it's safe there).
config ARCH_HAS_SC_SIGNALS
def_bool !64BIT
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index ba5789c35809..3d5cd2e57820 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -28,7 +28,9 @@ else
obj-y += syscalls_64.o vdso/
-subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o
+subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o \
+ ../lib/memmove_64.o ../lib/memset_64.o
+subarch-$(CONFIG_PREEMPTION) += ../entry/thunk_64.o
endif
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
index 19c5dbd46770..cafd01f730da 100644
--- a/arch/x86/um/mem_32.c
+++ b/arch/x86/um/mem_32.c
@@ -17,7 +17,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
- gate_vma.vm_page_prot = __P101;
+ gate_vma.vm_page_prot = PAGE_READONLY;
return 0;
}
diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h
index e9c4b2b38803..92ea1670cf1c 100644
--- a/arch/x86/um/shared/sysdep/stub_64.h
+++ b/arch/x86/um/shared/sysdep/stub_64.h
@@ -8,6 +8,7 @@
#include <sysdep/ptrace_user.h>
#include <generated/asm-offsets.h>
+#include <linux/stddef.h>
#define STUB_MMAP_NR __NR_mmap
#define MMAP_OFFSET(o) (o)
diff --git a/arch/x86/um/sysrq_64.c b/arch/x86/um/sysrq_64.c
index 903ad91b624f..ef1eb4f4f612 100644
--- a/arch/x86/um/sysrq_64.c
+++ b/arch/x86/um/sysrq_64.c
@@ -19,8 +19,8 @@ void show_regs(struct pt_regs *regs)
print_modules();
printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current),
current->comm, print_tainted(), init_utsname()->release);
- printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff,
- PT_REGS_IP(regs));
+ printk(KERN_INFO "RIP: %04lx:%pS\n", PT_REGS_CS(regs) & 0xffff,
+ (void *)PT_REGS_IP(regs));
printk(KERN_INFO "RSP: %016lx EFLAGS: %08lx\n", PT_REGS_SP(regs),
PT_REGS_EFLAGS(regs));
printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index 5943387e3f35..8c0396fd0e6f 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -3,6 +3,9 @@
# Building vDSO images for x86.
#
+# do not instrument on vdso because KASAN is not compatible with user mode
+KASAN_SANITIZE := n
+
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 30c6e986a6cd..b8db2148c07d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(xen_start_info);
struct shared_info xen_dummy_shared_info;
-__read_mostly int xen_have_vector_callback;
+__read_mostly bool xen_have_vector_callback = true;
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
/*
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 28762f800596..1c1ac418484b 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -8,6 +8,8 @@
#include <xen/features.h>
#include <xen/events.h>
+#include <xen/hvm.h>
+#include <xen/interface/hvm/hvm_op.h>
#include <xen/interface/memory.h>
#include <asm/apic.h>
@@ -31,6 +33,9 @@
static unsigned long shared_info_pfn;
+__ro_after_init bool xen_percpu_upcall;
+EXPORT_SYMBOL_GPL(xen_percpu_upcall);
+
void xen_hvm_init_shared_info(void)
{
struct xen_add_to_physmap xatp;
@@ -126,6 +131,9 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
{
struct pt_regs *old_regs = set_irq_regs(regs);
+ if (xen_percpu_upcall)
+ ack_APIC_irq();
+
inc_irq_stat(irq_hv_callback_count);
xen_hvm_evtchn_do_upcall();
@@ -169,6 +177,15 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
if (!xen_have_vector_callback)
return 0;
+ if (xen_percpu_upcall) {
+ rc = xen_set_upcall_vector(cpu);
+ if (rc) {
+ WARN(1, "HVMOP_set_evtchn_upcall_vector"
+ " for CPU %d failed: %d\n", cpu, rc);
+ return rc;
+ }
+ }
+
if (xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu);
@@ -189,8 +206,6 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
return 0;
}
-static bool no_vector_callback __initdata;
-
static void __init xen_hvm_guest_init(void)
{
if (xen_pv_domain())
@@ -213,9 +228,6 @@ static void __init xen_hvm_guest_init(void)
xen_panic_handler_init();
- if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector))
- xen_have_vector_callback = 1;
-
xen_hvm_smp_init();
WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
xen_unplug_emulated_devices();
@@ -241,7 +253,7 @@ early_param("xen_nopv", xen_parse_nopv);
static __init int xen_parse_no_vector_callback(char *arg)
{
- no_vector_callback = true;
+ xen_have_vector_callback = false;
return 0;
}
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
diff --git a/arch/x86/xen/suspend_hvm.c b/arch/x86/xen/suspend_hvm.c
index 9d548b0c772f..0c4f7554b7cc 100644
--- a/arch/x86/xen/suspend_hvm.c
+++ b/arch/x86/xen/suspend_hvm.c
@@ -5,6 +5,7 @@
#include <xen/hvm.h>
#include <xen/features.h>
#include <xen/interface/features.h>
+#include <xen/events.h>
#include "xen-ops.h"
@@ -14,6 +15,13 @@ void xen_hvm_post_suspend(int suspend_cancelled)
xen_hvm_init_shared_info();
xen_vcpu_restore();
}
- xen_setup_callback_vector();
+ if (xen_percpu_upcall) {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ BUG_ON(xen_set_upcall_vector(cpu));
+ } else {
+ xen_setup_callback_vector();
+ }
xen_unplug_emulated_devices();
}
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 14e547ca2af8..12ac277282ba 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -52,7 +52,6 @@ config XTENSA
select MODULES_USE_ELF_RELA
select PERF_USE_VMALLOC
select TRACE_IRQFLAGS_SUPPORT
- select VIRT_TO_BUS
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
diff --git a/arch/xtensa/include/asm/dma.h b/arch/xtensa/include/asm/dma.h
index bb099a373b5a..172644539032 100644
--- a/arch/xtensa/include/asm/dma.h
+++ b/arch/xtensa/include/asm/dma.h
@@ -52,11 +52,4 @@
extern int request_dma(unsigned int dmanr, const char * device_id);
extern void free_dma(unsigned int dmanr);
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
-
#endif
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index 54188e69b988..a5b707e1c0f4 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -63,9 +63,6 @@ static inline void iounmap(volatile void __iomem *addr)
xtensa_iounmap(addr);
}
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
#endif /* CONFIG_MMU */
#include <asm-generic/io.h>
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 8e2b48a268db..b56de9635b6c 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -43,7 +43,4 @@
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
#define arch_can_pci_mmap_io() 1
-/* Generic PCI */
-#include <asm-generic/pci.h>
-
#endif /* _XTENSA_PCI_H */
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index eeb2de3a89e5..7fc0f9126dd3 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -29,7 +29,7 @@
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
{
- return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
+ return (pgd_t*) __get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline void ptes_clear(pte_t *ptep)
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 0a91376131c5..54f577c13afa 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -57,7 +57,6 @@
#define PTRS_PER_PTE 1024
#define PTRS_PER_PTE_SHIFT 10
#define PTRS_PER_PGD 1024
-#define PGD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
@@ -200,24 +199,6 @@
* What follows is the closest we can get by reasonable means..
* See linux/mm/mmap.c for protection_map[] array that uses these definitions.
*/
-#define __P000 PAGE_NONE /* private --- */
-#define __P001 PAGE_READONLY /* private --r */
-#define __P010 PAGE_COPY /* private -w- */
-#define __P011 PAGE_COPY /* private -wr */
-#define __P100 PAGE_READONLY_EXEC /* private x-- */
-#define __P101 PAGE_READONLY_EXEC /* private x-r */
-#define __P110 PAGE_COPY_EXEC /* private xw- */
-#define __P111 PAGE_COPY_EXEC /* private xwr */
-
-#define __S000 PAGE_NONE /* shared --- */
-#define __S001 PAGE_READONLY /* shared --r */
-#define __S010 PAGE_SHARED /* shared -w- */
-#define __S011 PAGE_SHARED /* shared -wr */
-#define __S100 PAGE_READONLY_EXEC /* shared x-- */
-#define __S101 PAGE_READONLY_EXEC /* shared x-r */
-#define __S110 PAGE_SHARED_EXEC /* shared xw- */
-#define __S111 PAGE_SHARED_EXEC /* shared xwr */
-
#ifndef __ASSEMBLY__
#define pte_ERROR(e) \
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 16f0a5ff5799..8c781b05c0bd 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -172,6 +172,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 6a32b2cf2718..b2587a1a7c46 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -216,3 +216,25 @@ static int __init parse_memmap_opt(char *str)
return 0;
}
early_param("memmap", parse_memmap_opt);
+
+#ifdef CONFIG_MMU
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+DECLARE_VM_GET_PAGE_PROT
+#endif
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 32929c89ba8a..3f5685c00e36 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -134,7 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
iv = bip->bip_vec + bip->bip_vcnt;
if (bip->bip_vcnt &&
- bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
+ bvec_gap_to_prev(&bdev_get_queue(bio->bi_bdev)->limits,
&bip->bip_vec[bip->bip_vcnt - 1], offset))
return 0;
diff --git a/block/bio.c b/block/bio.c
index 6f9f883f9a65..3d3a2678fea2 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -965,7 +965,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
* would create a gap, disallow it.
*/
bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
- if (bvec_gap_to_prev(q, bvec, offset))
+ if (bvec_gap_to_prev(&q->limits, bvec, offset))
return 0;
}
@@ -1151,22 +1151,12 @@ void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
bio_set_flag(bio, BIO_CLONED);
}
-static void bio_put_pages(struct page **pages, size_t size, size_t off)
-{
- size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
-
- for (i = 0; i < nr; i++)
- put_page(pages[i]);
-}
-
static int bio_iov_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
bool same_page = false;
if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
- if (WARN_ON_ONCE(bio_full(bio, len)))
- return -EINVAL;
__bio_add_page(bio, page, len, offset);
return 0;
}
@@ -1209,8 +1199,9 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
ssize_t size, left;
- unsigned len, i;
- size_t offset;
+ unsigned len, i = 0;
+ size_t offset, trim;
+ int ret = 0;
/*
* Move page array up in the allocated memory for the bio vecs as far as
@@ -1227,32 +1218,43 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
* result to ensure the bio's total size is correct. The remainder of
* the iov data will be picked up in the next bio iteration.
*/
- size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
- if (size > 0)
- size = ALIGN_DOWN(size, bdev_logical_block_size(bio->bi_bdev));
+ size = iov_iter_get_pages2(iter, pages, UINT_MAX - bio->bi_iter.bi_size,
+ nr_pages, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
+ nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
+
+ trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
+ iov_iter_revert(iter, trim);
+
+ size -= trim;
+ if (unlikely(!size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
for (left = size, i = 0; left > 0; left -= len, i++) {
struct page *page = pages[i];
- int ret;
len = min_t(size_t, PAGE_SIZE - offset, left);
- if (bio_op(bio) == REQ_OP_ZONE_APPEND)
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
ret = bio_iov_add_zone_append_page(bio, page, len,
offset);
- else
- ret = bio_iov_add_page(bio, page, len, offset);
+ if (ret)
+ break;
+ } else
+ bio_iov_add_page(bio, page, len, offset);
- if (ret) {
- bio_put_pages(pages + i, left, offset);
- return ret;
- }
offset = 0;
}
- iov_iter_advance(iter, size);
- return 0;
+ iov_iter_revert(iter, left);
+out:
+ while (i < nr_pages)
+ put_page(pages[i++]);
+
+ return ret;
}
/**
diff --git a/block/blk-core.c b/block/blk-core.c
index 3d286a256d3d..a0d1104c5590 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -377,7 +377,6 @@ static void blk_timeout_work(struct work_struct *work)
struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
{
struct request_queue *q;
- int ret;
q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
GFP_KERNEL | __GFP_ZERO, node_id);
@@ -396,13 +395,9 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
if (q->id < 0)
goto fail_srcu;
- ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
- if (ret)
- goto fail_id;
-
q->stats = blk_alloc_queue_stats();
if (!q->stats)
- goto fail_split;
+ goto fail_id;
q->node = node_id;
@@ -439,8 +434,6 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
fail_stats:
blk_free_queue_stats(q->stats);
-fail_split:
- bioset_exit(&q->bio_split);
fail_id:
ida_free(&blk_queue_ida, q->id);
fail_srcu:
diff --git a/block/blk-map.c b/block/blk-map.c
index df8b066cd548..7196a6b64c80 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -254,7 +254,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
size_t offs, added = 0;
int npages;
- bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
+ bytes = iov_iter_get_pages_alloc2(iter, &pages, LONG_MAX, &offs);
if (unlikely(bytes <= 0)) {
ret = bytes ? bytes : -EFAULT;
goto out_unmap;
@@ -284,7 +284,6 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
bytes -= n;
offs = 0;
}
- iov_iter_advance(iter, added);
}
/*
* release the pages we didn't map into the bio, if any
@@ -293,8 +292,10 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
put_page(pages[j++]);
kvfree(pages);
/* couldn't stuff something into bio? */
- if (bytes)
+ if (bytes) {
+ iov_iter_revert(iter, bytes);
break;
+ }
}
ret = blk_rq_append_bio(rq, bio);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 4c8a699754c9..ff04e9290715 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -82,7 +82,7 @@ static inline bool bio_will_gap(struct request_queue *q,
bio_get_first_bvec(next, &nb);
if (biovec_phys_mergeable(q, &pb, &nb))
return false;
- return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+ return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
}
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
@@ -95,23 +95,30 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
return bio_will_gap(req->q, NULL, bio, req->bio);
}
-static struct bio *blk_bio_discard_split(struct request_queue *q,
- struct bio *bio,
- struct bio_set *bs,
- unsigned *nsegs)
+/*
+ * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
+ * is defined as 'unsigned int', meantime it has to be aligned to with the
+ * logical block size, which is the minimum accepted unit by hardware.
+ */
+static unsigned int bio_allowed_max_sectors(struct queue_limits *lim)
+{
+ return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
+}
+
+static struct bio *bio_split_discard(struct bio *bio, struct queue_limits *lim,
+ unsigned *nsegs, struct bio_set *bs)
{
unsigned int max_discard_sectors, granularity;
- int alignment;
sector_t tmp;
unsigned split_sectors;
*nsegs = 1;
/* Zero-sector (unknown) and one-sector granularities are the same. */
- granularity = max(q->limits.discard_granularity >> 9, 1U);
+ granularity = max(lim->discard_granularity >> 9, 1U);
- max_discard_sectors = min(q->limits.max_discard_sectors,
- bio_allowed_max_sectors(q));
+ max_discard_sectors =
+ min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
@@ -128,9 +135,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
* If the next starting sector would be misaligned, stop the discard at
* the previous aligned sector.
*/
- alignment = (q->limits.discard_alignment >> 9) % granularity;
-
- tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
+ tmp = bio->bi_iter.bi_sector + split_sectors -
+ ((lim->discard_alignment >> 9) % granularity);
tmp = sector_div(tmp, granularity);
if (split_sectors > tmp)
@@ -139,18 +145,15 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
return bio_split(bio, split_sectors, GFP_NOIO, bs);
}
-static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
- struct bio *bio, struct bio_set *bs, unsigned *nsegs)
+static struct bio *bio_split_write_zeroes(struct bio *bio,
+ struct queue_limits *lim, unsigned *nsegs, struct bio_set *bs)
{
*nsegs = 0;
-
- if (!q->limits.max_write_zeroes_sectors)
+ if (!lim->max_write_zeroes_sectors)
return NULL;
-
- if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
+ if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
return NULL;
-
- return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
+ return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
}
/*
@@ -161,17 +164,17 @@ static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
* requests that are submitted to a block device if the start of a bio is not
* aligned to a physical block boundary.
*/
-static inline unsigned get_max_io_size(struct request_queue *q,
- struct bio *bio)
+static inline unsigned get_max_io_size(struct bio *bio,
+ struct queue_limits *lim)
{
- unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
- unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
- unsigned max_sectors = queue_max_sectors(q), start, end;
+ unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
+ unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
+ unsigned max_sectors = lim->max_sectors, start, end;
- if (q->limits.chunk_sectors) {
+ if (lim->chunk_sectors) {
max_sectors = min(max_sectors,
blk_chunk_sectors_left(bio->bi_iter.bi_sector,
- q->limits.chunk_sectors));
+ lim->chunk_sectors));
}
start = bio->bi_iter.bi_sector & (pbs - 1);
@@ -181,11 +184,10 @@ static inline unsigned get_max_io_size(struct request_queue *q,
return max_sectors & ~(lbs - 1);
}
-static inline unsigned get_max_segment_size(const struct request_queue *q,
- struct page *start_page,
- unsigned long offset)
+static inline unsigned get_max_segment_size(struct queue_limits *lim,
+ struct page *start_page, unsigned long offset)
{
- unsigned long mask = queue_segment_boundary(q);
+ unsigned long mask = lim->seg_boundary_mask;
offset = mask & (page_to_phys(start_page) + offset);
@@ -194,12 +196,12 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
* on 32bit arch, use queue's max segment size when that happens.
*/
return min_not_zero(mask - offset + 1,
- (unsigned long)queue_max_segment_size(q));
+ (unsigned long)lim->max_segment_size);
}
/**
* bvec_split_segs - verify whether or not a bvec should be split in the middle
- * @q: [in] request queue associated with the bio associated with @bv
+ * @lim: [in] queue limits to split based on
* @bv: [in] bvec to examine
* @nsegs: [in,out] Number of segments in the bio being built. Incremented
* by the number of segments from @bv that may be appended to that
@@ -217,10 +219,9 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
* *@nsegs segments and *@sectors sectors would make that bio unacceptable for
* the block driver.
*/
-static bool bvec_split_segs(const struct request_queue *q,
- const struct bio_vec *bv, unsigned *nsegs,
- unsigned *bytes, unsigned max_segs,
- unsigned max_bytes)
+static bool bvec_split_segs(struct queue_limits *lim, const struct bio_vec *bv,
+ unsigned *nsegs, unsigned *bytes, unsigned max_segs,
+ unsigned max_bytes)
{
unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
unsigned len = min(bv->bv_len, max_len);
@@ -228,7 +229,7 @@ static bool bvec_split_segs(const struct request_queue *q,
unsigned seg_size = 0;
while (len && *nsegs < max_segs) {
- seg_size = get_max_segment_size(q, bv->bv_page,
+ seg_size = get_max_segment_size(lim, bv->bv_page,
bv->bv_offset + total_len);
seg_size = min(seg_size, len);
@@ -236,7 +237,7 @@ static bool bvec_split_segs(const struct request_queue *q,
total_len += seg_size;
len -= seg_size;
- if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
+ if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
break;
}
@@ -247,16 +248,17 @@ static bool bvec_split_segs(const struct request_queue *q,
}
/**
- * blk_bio_segment_split - split a bio in two bios
- * @q: [in] request queue pointer
+ * bio_split_rw - split a bio in two bios
* @bio: [in] bio to be split
- * @bs: [in] bio set to allocate the clone from
+ * @lim: [in] queue limits to split based on
* @segs: [out] number of segments in the bio with the first half of the sectors
+ * @bs: [in] bio set to allocate the clone from
+ * @max_bytes: [in] maximum number of bytes per bio
*
* Clone @bio, update the bi_iter of the clone to represent the first sectors
* of @bio and update @bio->bi_iter to represent the remaining sectors. The
* following is guaranteed for the cloned bio:
- * - That it has at most get_max_io_size(@q, @bio) sectors.
+ * - That it has at most @max_bytes worth of data
* - That it has at most queue_max_segments(@q) segments.
*
* Except for discard requests the cloned bio will point at the bi_io_vec of
@@ -265,33 +267,30 @@ static bool bvec_split_segs(const struct request_queue *q,
* responsible for ensuring that @bs is only destroyed after processing of the
* split bio has finished.
*/
-static struct bio *blk_bio_segment_split(struct request_queue *q,
- struct bio *bio,
- struct bio_set *bs,
- unsigned *segs)
+static struct bio *bio_split_rw(struct bio *bio, struct queue_limits *lim,
+ unsigned *segs, struct bio_set *bs, unsigned max_bytes)
{
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
unsigned nsegs = 0, bytes = 0;
- const unsigned max_bytes = get_max_io_size(q, bio) << 9;
- const unsigned max_segs = queue_max_segments(q);
bio_for_each_bvec(bv, bio, iter) {
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
- if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
+ if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
goto split;
- if (nsegs < max_segs &&
+ if (nsegs < lim->max_segments &&
bytes + bv.bv_len <= max_bytes &&
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
nsegs++;
bytes += bv.bv_len;
- } else if (bvec_split_segs(q, &bv, &nsegs, &bytes, max_segs,
- max_bytes)) {
- goto split;
+ } else {
+ if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
+ lim->max_segments, max_bytes))
+ goto split;
}
bvprv = bv;
@@ -308,7 +307,7 @@ split:
* split size so that each bio is properly block size aligned, even if
* we do not use the full hardware limits.
*/
- bytes = ALIGN_DOWN(bytes, queue_logical_block_size(q));
+ bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
/*
* Bio splitting may cause subtle trouble such as hang when doing sync
@@ -320,34 +319,35 @@ split:
}
/**
- * __blk_queue_split - split a bio and submit the second half
- * @q: [in] request_queue new bio is being queued at
- * @bio: [in, out] bio to be split
- * @nr_segs: [out] number of segments in the first bio
+ * __bio_split_to_limits - split a bio to fit the queue limits
+ * @bio: bio to be split
+ * @lim: queue limits to split based on
+ * @nr_segs: returns the number of segments in the returned bio
+ *
+ * Check if @bio needs splitting based on the queue limits, and if so split off
+ * a bio fitting the limits from the beginning of @bio and return it. @bio is
+ * shortened to the remainder and re-submitted.
*
- * Split a bio into two bios, chain the two bios, submit the second half and
- * store a pointer to the first half in *@bio. If the second bio is still too
- * big it will be split by a recursive call to this function. Since this
- * function may allocate a new bio from q->bio_split, it is the responsibility
- * of the caller to ensure that q->bio_split is only released after processing
- * of the split bio has finished.
+ * The split bio is allocated from @q->bio_split, which is provided by the
+ * block layer.
*/
-void __blk_queue_split(struct request_queue *q, struct bio **bio,
+struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
unsigned int *nr_segs)
{
- struct bio *split = NULL;
+ struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
+ struct bio *split;
- switch (bio_op(*bio)) {
+ switch (bio_op(bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
- split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
+ split = bio_split_discard(bio, lim, nr_segs, bs);
break;
case REQ_OP_WRITE_ZEROES:
- split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
- nr_segs);
+ split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
break;
default:
- split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
+ split = bio_split_rw(bio, lim, nr_segs, bs,
+ get_max_io_size(bio, lim) << SECTOR_SHIFT);
break;
}
@@ -356,32 +356,35 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
split->bi_opf |= REQ_NOMERGE;
blkcg_bio_issue_init(split);
- bio_chain(split, *bio);
- trace_block_split(split, (*bio)->bi_iter.bi_sector);
- submit_bio_noacct(*bio);
- *bio = split;
+ bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
+ submit_bio_noacct(bio);
+ return split;
}
+ return bio;
}
/**
- * blk_queue_split - split a bio and submit the second half
- * @bio: [in, out] bio to be split
+ * bio_split_to_limits - split a bio to fit the queue limits
+ * @bio: bio to be split
+ *
+ * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
+ * if so split off a bio fitting the limits from the beginning of @bio and
+ * return it. @bio is shortened to the remainder and re-submitted.
*
- * Split a bio into two bios, chains the two bios, submit the second half and
- * store a pointer to the first half in *@bio. Since this function may allocate
- * a new bio from q->bio_split, it is the responsibility of the caller to ensure
- * that q->bio_split is only released after processing of the split bio has
- * finished.
+ * The split bio is allocated from @q->bio_split, which is provided by the
+ * block layer.
*/
-void blk_queue_split(struct bio **bio)
+struct bio *bio_split_to_limits(struct bio *bio)
{
- struct request_queue *q = bdev_get_queue((*bio)->bi_bdev);
+ struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
unsigned int nr_segs;
- if (blk_may_split(q, *bio))
- __blk_queue_split(q, bio, &nr_segs);
+ if (bio_may_exceed_limits(bio, lim))
+ return __bio_split_to_limits(bio, lim, &nr_segs);
+ return bio;
}
-EXPORT_SYMBOL(blk_queue_split);
+EXPORT_SYMBOL(bio_split_to_limits);
unsigned int blk_recalc_rq_segments(struct request *rq)
{
@@ -411,7 +414,7 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
}
rq_for_each_bvec(bv, rq, iter)
- bvec_split_segs(rq->q, &bv, &nr_phys_segs, &bytes,
+ bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
UINT_MAX, UINT_MAX);
return nr_phys_segs;
}
@@ -442,8 +445,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total;
- unsigned len = min(get_max_segment_size(q, bvec->bv_page,
- offset), nbytes);
+ unsigned len = min(get_max_segment_size(&q->limits,
+ bvec->bv_page, offset), nbytes);
struct page *page = bvec->bv_page;
/*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 70177ee74295..c96c8c4f751b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1931,7 +1931,8 @@ out:
/* If we didn't flush the entire list, we could have told the driver
* there was more coming, but that turned out to be a lie.
*/
- if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
+ if ((!list_empty(list) || errors || needs_resource ||
+ ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued)
q->mq_ops->commit_rqs(hctx);
/*
* Any items that need requeuing? Stuff them into hctx->dispatch,
@@ -2229,26 +2230,6 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
-/**
- * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
- * @q: request queue.
- *
- * The caller is responsible for serializing this function against
- * blk_mq_{start,stop}_hw_queue().
- */
-bool blk_mq_queue_stopped(struct request_queue *q)
-{
- struct blk_mq_hw_ctx *hctx;
- unsigned long i;
-
- queue_for_each_hw_ctx(q, hctx, i)
- if (blk_mq_hctx_stopped(hctx))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(blk_mq_queue_stopped);
-
/*
* This function is often used for pausing .queue_rq() by driver when
* there isn't enough resource or some conditions aren't satisfied, and
@@ -2570,7 +2551,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
break;
case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE:
- blk_mq_request_bypass_insert(rq, false, last);
+ blk_mq_request_bypass_insert(rq, false, true);
blk_mq_commit_rqs(hctx, &queued, from_schedule);
return;
default:
@@ -2680,6 +2661,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
list_del_init(&rq->queuelist);
ret = blk_mq_request_issue_directly(rq, list_empty(list));
if (ret != BLK_STS_OK) {
+ errors++;
if (ret == BLK_STS_RESOURCE ||
ret == BLK_STS_DEV_RESOURCE) {
blk_mq_request_bypass_insert(rq, false,
@@ -2687,7 +2669,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
break;
}
blk_mq_end_request(rq, ret);
- errors++;
} else
queued++;
}
@@ -2815,9 +2796,9 @@ void blk_mq_submit_bio(struct bio *bio)
unsigned int nr_segs = 1;
blk_status_t ret;
- blk_queue_bounce(q, &bio);
- if (blk_may_split(q, bio))
- __blk_queue_split(q, &bio, &nr_segs);
+ bio = blk_queue_bounce(bio, q);
+ if (bio_may_exceed_limits(bio, &q->limits))
+ bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
if (!bio_integrity_prep(bio))
return;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index c0303026752d..e1f009aba6fd 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -779,8 +779,6 @@ static void blk_release_queue(struct kobject *kobj)
if (queue_is_mq(q))
blk_mq_release(q);
- bioset_exit(&q->bio_split);
-
if (blk_queue_has_srcu(q))
cleanup_srcu_struct(q->srcu);
diff --git a/block/blk.h b/block/blk.h
index 1d83b1d41cd1..d7142c4d2fef 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -97,23 +97,23 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
return true;
}
-static inline bool __bvec_gap_to_prev(struct request_queue *q,
+static inline bool __bvec_gap_to_prev(struct queue_limits *lim,
struct bio_vec *bprv, unsigned int offset)
{
- return (offset & queue_virt_boundary(q)) ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+ return (offset & lim->virt_boundary_mask) ||
+ ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
}
/*
* Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do.
*/
-static inline bool bvec_gap_to_prev(struct request_queue *q,
+static inline bool bvec_gap_to_prev(struct queue_limits *lim,
struct bio_vec *bprv, unsigned int offset)
{
- if (!queue_virt_boundary(q))
+ if (!lim->virt_boundary_mask)
return false;
- return __bvec_gap_to_prev(q, bprv, offset);
+ return __bvec_gap_to_prev(lim, bprv, offset);
}
static inline bool rq_mergeable(struct request *rq)
@@ -189,7 +189,8 @@ static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio_integrity_payload *bip = bio_integrity(req->bio);
struct bio_integrity_payload *bip_next = bio_integrity(next);
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ return bvec_gap_to_prev(&req->q->limits,
+ &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
@@ -199,7 +200,8 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ return bvec_gap_to_prev(&req->q->limits,
+ &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
@@ -288,7 +290,8 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
const char *, size_t);
-static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
+static inline bool bio_may_exceed_limits(struct bio *bio,
+ struct queue_limits *lim)
{
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
@@ -307,12 +310,12 @@ static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
* to the performance impact of cloned bios themselves the loop below
* doesn't matter anyway.
*/
- return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
+ return lim->chunk_sectors || bio->bi_vcnt != 1 ||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
}
-void __blk_queue_split(struct request_queue *q, struct bio **bio,
- unsigned int *nr_segs);
+struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
+ unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs);
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
@@ -345,16 +348,6 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
}
/*
- * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
- * is defined as 'unsigned int', meantime it has to aligned to with logical
- * block size which is the minimum accepted unit by hardware.
- */
-static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
-{
- return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
-}
-
-/*
* Internal io_context interface
*/
struct io_cq *ioc_find_get_icq(struct request_queue *q);
@@ -378,7 +371,7 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { }
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
#endif
-void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
+struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
static inline bool blk_queue_may_bounce(struct request_queue *q)
{
@@ -387,10 +380,12 @@ static inline bool blk_queue_may_bounce(struct request_queue *q)
max_low_pfn >= max_pfn;
}
-static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
+static inline struct bio *blk_queue_bounce(struct bio *bio,
+ struct request_queue *q)
{
- if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
- __blk_queue_bounce(q, bio);
+ if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
+ return __blk_queue_bounce(bio, q);
+ return bio;
}
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
diff --git a/block/bounce.c b/block/bounce.c
index c8f487af7be3..7cfcb242f9a1 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -199,24 +199,24 @@ err_put:
return NULL;
}
-void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
+struct bio *__blk_queue_bounce(struct bio *bio_orig, struct request_queue *q)
{
struct bio *bio;
- int rw = bio_data_dir(*bio_orig);
+ int rw = bio_data_dir(bio_orig);
struct bio_vec *to, from;
struct bvec_iter iter;
unsigned i = 0, bytes = 0;
bool bounce = false;
int sectors;
- bio_for_each_segment(from, *bio_orig, iter) {
+ bio_for_each_segment(from, bio_orig, iter) {
if (i++ < BIO_MAX_VECS)
bytes += from.bv_len;
if (PageHighMem(from.bv_page))
bounce = true;
}
if (!bounce)
- return;
+ return bio_orig;
/*
* Individual bvecs might not be logical block aligned. Round down
@@ -225,13 +225,13 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
*/
sectors = ALIGN_DOWN(bytes, queue_logical_block_size(q)) >>
SECTOR_SHIFT;
- if (sectors < bio_sectors(*bio_orig)) {
- bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
- bio_chain(bio, *bio_orig);
- submit_bio_noacct(*bio_orig);
- *bio_orig = bio;
+ if (sectors < bio_sectors(bio_orig)) {
+ bio = bio_split(bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
+ bio_chain(bio, bio_orig);
+ submit_bio_noacct(bio_orig);
+ bio_orig = bio;
}
- bio = bounce_clone_bio(*bio_orig);
+ bio = bounce_clone_bio(bio_orig);
/*
* Bvec table can't be updated by bio_for_each_segment_all(),
@@ -254,7 +254,7 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
to->bv_page = bounce_page;
}
- trace_block_bio_bounce(*bio_orig);
+ trace_block_bio_bounce(bio_orig);
bio->bi_flags |= (1 << BIO_BOUNCED);
@@ -263,6 +263,6 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
else
bio->bi_end_io = bounce_end_io_write;
- bio->bi_private = *bio_orig;
- *bio_orig = bio;
+ bio->bi_private = bio_orig;
+ return bio;
}
diff --git a/block/bsg.c b/block/bsg.c
index 882f56bff14f..2ab1351eb082 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -169,7 +169,7 @@ static void bsg_device_release(struct device *dev)
{
struct bsg_device *bd = container_of(dev, struct bsg_device, device);
- ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+ ida_free(&bsg_minor_ida, MINOR(bd->device.devt));
kfree(bd);
}
@@ -196,7 +196,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
bd->queue = q;
bd->sg_io_fn = sg_io_fn;
- ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL);
+ ret = ida_alloc_max(&bsg_minor_ida, BSG_MAX_DEVS - 1, GFP_KERNEL);
if (ret < 0) {
if (ret == -ENOSPC)
dev_err(parent, "bsg: too many bsg devices\n");
diff --git a/block/fops.c b/block/fops.c
index a564cd81340c..b90742595317 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -75,7 +75,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
if (iov_iter_rw(iter) == READ) {
bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
- if (iter_is_iovec(iter))
+ if (user_backed_iter(iter))
should_dirty = true;
} else {
bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
@@ -204,7 +204,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
}
dio->size = 0;
- if (is_read && iter_is_iovec(iter))
+ if (is_read && user_backed_iter(iter))
dio->flags |= DIO_SHOULD_DIRTY;
blk_start_plug(&plug);
@@ -335,7 +335,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
dio->size = bio->bi_iter.bi_size;
if (is_read) {
- if (iter_is_iovec(iter)) {
+ if (user_backed_iter(iter)) {
dio->flags |= DIO_SHOULD_DIRTY;
bio_set_pages_dirty(bio);
}
diff --git a/block/genhd.c b/block/genhd.c
index e1d5b10ac193..d36fabf0abc1 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1151,6 +1151,7 @@ static void disk_release(struct device *dev)
blk_mq_exit_queue(disk->queue);
blkcg_exit_queue(disk->queue);
+ bioset_exit(&disk->bio_split);
disk_release_events(disk);
kfree(disk->random);
@@ -1340,11 +1341,14 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (!disk)
- goto out_put_queue;
+ return NULL;
+
+ if (bioset_init(&disk->bio_split, BIO_POOL_SIZE, 0, 0))
+ goto out_free_disk;
disk->bdi = bdi_alloc(node_id);
if (!disk->bdi)
- goto out_free_disk;
+ goto out_free_bioset;
/* bdev_alloc() might need the queue, set before the first call */
disk->queue = q;
@@ -1382,10 +1386,10 @@ out_destroy_part_tbl:
iput(disk->part0->bd_inode);
out_free_bdi:
bdi_put(disk->bdi);
+out_free_bioset:
+ bioset_exit(&disk->bio_split);
out_free_disk:
kfree(disk);
-out_put_queue:
- blk_put_queue(q);
return NULL;
}
diff --git a/certs/Makefile b/certs/Makefile
index 88a73b28d254..9486ed924731 100644
--- a/certs/Makefile
+++ b/certs/Makefile
@@ -4,24 +4,22 @@
#
obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
-obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o
+obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o blacklist_hashes.o
obj-$(CONFIG_SYSTEM_REVOCATION_LIST) += revocation_certificates.o
-ifneq ($(CONFIG_SYSTEM_BLACKLIST_HASH_LIST),)
$(obj)/blacklist_hashes.o: $(obj)/blacklist_hash_list
CFLAGS_blacklist_hashes.o := -I $(obj)
quiet_cmd_check_and_copy_blacklist_hash_list = GEN $@
cmd_check_and_copy_blacklist_hash_list = \
- $(AWK) -f $(srctree)/scripts/check-blacklist-hashes.awk $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) >&2; \
- cat $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) > $@
+ $(if $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST), \
+ $(AWK) -f $(srctree)/$(src)/check-blacklist-hashes.awk $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) >&2; \
+ { cat $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST); echo $(comma) NULL; } > $@, \
+ echo NULL > $@)
$(obj)/blacklist_hash_list: $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) FORCE
$(call if_changed,check_and_copy_blacklist_hash_list)
-obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_hashes.o
-else
-obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_nohashes.o
-endif
+
targets += blacklist_hash_list
quiet_cmd_extract_certs = CERT $@
diff --git a/certs/blacklist_hashes.c b/certs/blacklist_hashes.c
index 86d66fe11348..0c5476abebd9 100644
--- a/certs/blacklist_hashes.c
+++ b/certs/blacklist_hashes.c
@@ -3,5 +3,4 @@
const char __initconst *const blacklist_hashes[] = {
#include "blacklist_hash_list"
- , NULL
};
diff --git a/certs/blacklist_nohashes.c b/certs/blacklist_nohashes.c
deleted file mode 100644
index 753b703ef0ef..000000000000
--- a/certs/blacklist_nohashes.c
+++ /dev/null
@@ -1,6 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "blacklist.h"
-
-const char __initconst *const blacklist_hashes[] = {
- NULL
-};
diff --git a/scripts/check-blacklist-hashes.awk b/certs/check-blacklist-hashes.awk
index 107c1d3204d4..107c1d3204d4 100755
--- a/scripts/check-blacklist-hashes.awk
+++ b/certs/check-blacklist-hashes.awk
diff --git a/crypto/Makefile b/crypto/Makefile
index 167c004dbf4f..a6f94e04e1da 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o
+CFLAGS_blake2b_generic.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index c8289b7a85ba..e893c0f6c879 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -404,7 +404,7 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
ssize_t n;
int npages, i;
- n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
+ n = iov_iter_get_pages2(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
if (n < 0)
return n;
@@ -1191,7 +1191,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
len += err;
atomic_add(err, &ctx->rcvused);
rsgl->sg_num_bytes = err;
- iov_iter_advance(&msg->msg_iter, err);
}
*outlen = len;
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 50f7b22f1b48..1d017ec5c63c 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -102,11 +102,12 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
err = crypto_wait_req(crypto_ahash_update(&ctx->req),
&ctx->wait);
af_alg_free_sg(&ctx->sgl);
- if (err)
+ if (err) {
+ iov_iter_revert(&msg->msg_iter, len);
goto unlock;
+ }
copied += len;
- iov_iter_advance(&msg->msg_iter, len);
}
err = 0;
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 6592279d839a..277482bb1777 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -248,6 +248,15 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
case OID_sha224:
ctx->sinfo->sig->hash_algo = "sha224";
break;
+ case OID_sm3:
+ ctx->sinfo->sig->hash_algo = "sm3";
+ break;
+ case OID_gost2012Digest256:
+ ctx->sinfo->sig->hash_algo = "streebog256";
+ break;
+ case OID_gost2012Digest512:
+ ctx->sinfo->sig->hash_algo = "streebog512";
+ break;
default:
printk("Unsupported digest algo: %u\n", ctx->last_oid);
return -ENOPKG;
@@ -277,6 +286,15 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
ctx->sinfo->sig->pkey_algo = "ecdsa";
ctx->sinfo->sig->encoding = "x962";
break;
+ case OID_SM2_with_SM3:
+ ctx->sinfo->sig->pkey_algo = "sm2";
+ ctx->sinfo->sig->encoding = "raw";
+ break;
+ case OID_gost2012PKey256:
+ case OID_gost2012PKey512:
+ ctx->sinfo->sig->pkey_algo = "ecrdsa";
+ ctx->sinfo->sig->encoding = "raw";
+ break;
default:
printk("Unsupported pkey algo: %u\n", ctx->last_oid);
return -ENOPKG;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 7c9e6be35c30..2f8352e88860 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -304,6 +304,10 @@ static int cert_sig_digest_update(const struct public_key_signature *sig,
BUG_ON(!sig->data);
+ /* SM2 signatures always use the SM3 hash algorithm */
+ if (!sig->hash_algo || strcmp(sig->hash_algo, "sm3") != 0)
+ return -EINVAL;
+
ret = sm2_compute_z_digest(tfm_pkey, SM2_DEFAULT_USERID,
SM2_DEFAULT_USERID_LEN, dgst);
if (ret)
@@ -414,8 +418,7 @@ int public_key_verify_signature(const struct public_key *pkey,
if (ret)
goto error_free_key;
- if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 &&
- sig->data_size) {
+ if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) {
ret = cert_sig_digest_update(sig, tfm);
if (ret)
goto error_free_key;
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 2899ed80bb18..7a9b084e2043 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -508,6 +508,9 @@ int x509_extract_key_data(void *context, size_t hdrlen,
case OID_gost2012PKey512:
ctx->cert->pub->pkey_algo = "ecrdsa";
break;
+ case OID_sm2:
+ ctx->cert->pub->pkey_algo = "sm2";
+ break;
case OID_id_ecPublicKey:
if (parse_OID(ctx->params, ctx->params_size, &oid) != 0)
return -EBADMSG;
diff --git a/crypto/kpp.c b/crypto/kpp.c
index 7aa6ba4b60a4..678e871ce418 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -104,6 +104,12 @@ int crypto_grab_kpp(struct crypto_kpp_spawn *spawn,
}
EXPORT_SYMBOL_GPL(crypto_grab_kpp);
+int crypto_has_kpp(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_type_has_alg(alg_name, &crypto_kpp_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_has_kpp);
+
static void kpp_prepare_alg(struct kpp_alg *alg)
{
struct crypto_alg *base = &alg->base;
diff --git a/crypto/shash.c b/crypto/shash.c
index 0a0a50cb694f..4c88e63b3350 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -521,6 +521,12 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_shash);
+int crypto_has_shash(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_type_has_alg(alg_name, &crypto_shash_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_has_shash);
+
static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->base;
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index fdc6b593f500..c4d54a5326b1 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -131,7 +131,7 @@ static void vc_refresh(struct vc_data *vc)
for (i = 0; i < WIDTH; i++) {
u16 glyph = screen_glyph(vc,
2 * (vc_x + i) + vc_y * vc->vc_size_row);
- buf[i] = inverse_translate(vc, glyph, 1);
+ buf[i] = inverse_translate(vc, glyph, true);
}
braille_write(buf);
}
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index d726537fa16c..f52265293482 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -470,7 +470,7 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
c |= 0x100;
}
- ch = inverse_translate(vc, c, 1);
+ ch = inverse_translate(vc, c, true);
*attribs = (w & 0xff00) >> 8;
}
return ch;
diff --git a/drivers/accessibility/speakup/serialio.h b/drivers/accessibility/speakup/serialio.h
index 6f8f86f161bb..b4f9a1925b81 100644
--- a/drivers/accessibility/speakup/serialio.h
+++ b/drivers/accessibility/speakup/serialio.h
@@ -33,9 +33,8 @@ struct old_serial_port {
#define NUM_DISABLE_TIMEOUTS 3
/* buffer timeout in ms */
#define SPK_TIMEOUT 100
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
#define spk_serial_tx_busy() \
- ((inb(speakup_info.port_tts + UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
+ (!uart_lsr_tx_empty(inb(speakup_info.port_tts + UART_LSR)))
#endif
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index f2f8f05662de..ca2aed86b540 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -788,6 +788,294 @@ void acpi_configure_pmsi_domain(struct device *dev)
}
#ifdef CONFIG_IOMMU_API
+static void iort_rmr_free(struct device *dev,
+ struct iommu_resv_region *region)
+{
+ struct iommu_iort_rmr_data *rmr_data;
+
+ rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
+ kfree(rmr_data->sids);
+ kfree(rmr_data);
+}
+
+static struct iommu_iort_rmr_data *iort_rmr_alloc(
+ struct acpi_iort_rmr_desc *rmr_desc,
+ int prot, enum iommu_resv_type type,
+ u32 *sids, u32 num_sids)
+{
+ struct iommu_iort_rmr_data *rmr_data;
+ struct iommu_resv_region *region;
+ u32 *sids_copy;
+ u64 addr = rmr_desc->base_address, size = rmr_desc->length;
+
+ rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
+ if (!rmr_data)
+ return NULL;
+
+ /* Create a copy of SIDs array to associate with this rmr_data */
+ sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
+ if (!sids_copy) {
+ kfree(rmr_data);
+ return NULL;
+ }
+ rmr_data->sids = sids_copy;
+ rmr_data->num_sids = num_sids;
+
+ if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
+ /* PAGE align base addr and size */
+ addr &= PAGE_MASK;
+ size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
+
+ pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
+ rmr_desc->base_address,
+ rmr_desc->base_address + rmr_desc->length - 1,
+ addr, addr + size - 1);
+ }
+
+ region = &rmr_data->rr;
+ INIT_LIST_HEAD(&region->list);
+ region->start = addr;
+ region->length = size;
+ region->prot = prot;
+ region->type = type;
+ region->free = iort_rmr_free;
+
+ return rmr_data;
+}
+
+static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
+ u32 count)
+{
+ int i, j;
+
+ for (i = 0; i < count; i++) {
+ u64 end, start = desc[i].base_address, length = desc[i].length;
+
+ if (!length) {
+ pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
+ start);
+ continue;
+ }
+
+ end = start + length - 1;
+
+ /* Check for address overlap */
+ for (j = i + 1; j < count; j++) {
+ u64 e_start = desc[j].base_address;
+ u64 e_end = e_start + desc[j].length - 1;
+
+ if (start <= e_end && end >= e_start)
+ pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
+ start, end);
+ }
+ }
+}
+
+/*
+ * Please note, we will keep the already allocated RMR reserve
+ * regions in case of a memory allocation failure.
+ */
+static void iort_get_rmrs(struct acpi_iort_node *node,
+ struct acpi_iort_node *smmu,
+ u32 *sids, u32 num_sids,
+ struct list_head *head)
+{
+ struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
+ struct acpi_iort_rmr_desc *rmr_desc;
+ int i;
+
+ rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
+ rmr->rmr_offset);
+
+ iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
+
+ for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
+ struct iommu_iort_rmr_data *rmr_data;
+ enum iommu_resv_type type;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+ if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
+ type = IOMMU_RESV_DIRECT_RELAXABLE;
+ else
+ type = IOMMU_RESV_DIRECT;
+
+ if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
+ prot |= IOMMU_PRIV;
+
+ /* Attributes 0x00 - 0x03 represents device memory */
+ if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
+ ACPI_IORT_RMR_ATTR_DEVICE_GRE)
+ prot |= IOMMU_MMIO;
+ else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
+ ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
+ prot |= IOMMU_CACHE;
+
+ rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
+ sids, num_sids);
+ if (!rmr_data)
+ return;
+
+ list_add_tail(&rmr_data->rr.list, head);
+ }
+}
+
+static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
+ u32 new_count)
+{
+ u32 *new_sids;
+ u32 total_count = count + new_count;
+ int i;
+
+ new_sids = krealloc_array(sids, count + new_count,
+ sizeof(*new_sids), GFP_KERNEL);
+ if (!new_sids)
+ return NULL;
+
+ for (i = count; i < total_count; i++)
+ new_sids[i] = id_start++;
+
+ return new_sids;
+}
+
+static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
+ u32 id_count)
+{
+ int i;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ /*
+ * Make sure the kernel has preserved the boot firmware PCIe
+ * configuration. This is required to ensure that the RMR PCIe
+ * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
+ */
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+
+ if (!host->preserve_config)
+ return false;
+ }
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ if (fwspec->ids[i] >= id_start &&
+ fwspec->ids[i] <= id_start + id_count)
+ return true;
+ }
+
+ return false;
+}
+
+static void iort_node_get_rmr_info(struct acpi_iort_node *node,
+ struct acpi_iort_node *iommu,
+ struct device *dev, struct list_head *head)
+{
+ struct acpi_iort_node *smmu = NULL;
+ struct acpi_iort_rmr *rmr;
+ struct acpi_iort_id_mapping *map;
+ u32 *sids = NULL;
+ u32 num_sids = 0;
+ int i;
+
+ if (!node->mapping_offset || !node->mapping_count) {
+ pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
+ node);
+ return;
+ }
+
+ rmr = (struct acpi_iort_rmr *)node->node_data;
+ if (!rmr->rmr_offset || !rmr->rmr_count)
+ return;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
+ node->mapping_offset);
+
+ /*
+ * Go through the ID mappings and see if we have a match for SMMU
+ * and dev(if !NULL). If found, get the sids for the Node.
+ * Please note, id_count is equal to the number of IDs in the
+ * range minus one.
+ */
+ for (i = 0; i < node->mapping_count; i++, map++) {
+ struct acpi_iort_node *parent;
+
+ if (!map->id_count)
+ continue;
+
+ parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
+ map->output_reference);
+ if (parent != iommu)
+ continue;
+
+ /* If dev is valid, check RMR node corresponds to the dev SID */
+ if (dev && !iort_rmr_has_dev(dev, map->output_base,
+ map->id_count))
+ continue;
+
+ /* Retrieve SIDs associated with the Node. */
+ sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
+ map->id_count + 1);
+ if (!sids)
+ return;
+
+ num_sids += map->id_count + 1;
+ }
+
+ if (!sids)
+ return;
+
+ iort_get_rmrs(node, smmu, sids, num_sids, head);
+ kfree(sids);
+}
+
+static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
+ struct list_head *head)
+{
+ struct acpi_table_iort *iort;
+ struct acpi_iort_node *iort_node, *iort_end;
+ int i;
+
+ /* Only supports ARM DEN 0049E.d onwards */
+ if (iort_table->revision < 5)
+ return;
+
+ iort = (struct acpi_table_iort *)iort_table;
+
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort->node_offset);
+ iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+ iort_table->length);
+
+ for (i = 0; i < iort->node_count; i++) {
+ if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
+ "IORT node pointer overflows, bad table!\n"))
+ return;
+
+ if (iort_node->type == ACPI_IORT_NODE_RMR)
+ iort_node_get_rmr_info(iort_node, iommu, dev, head);
+
+ iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
+ iort_node->length);
+ }
+}
+
+/*
+ * Populate the RMR list associated with a given IOMMU and dev(if provided).
+ * If dev is NULL, the function populates all the RMRs associated with the
+ * given IOMMU.
+ */
+static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
+ struct device *dev,
+ struct list_head *head)
+{
+ struct acpi_iort_node *iommu;
+
+ iommu = iort_get_iort_node(iommu_fwnode);
+ if (!iommu)
+ return;
+
+ iort_find_rmrs(iommu, dev, head);
+}
+
static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
{
struct acpi_iort_node *iommu;
@@ -806,27 +1094,22 @@ static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
return NULL;
}
-/**
- * iort_iommu_msi_get_resv_regions - Reserved region driver helper
- * @dev: Device from iommu_get_resv_regions()
- * @head: Reserved region list from iommu_get_resv_regions()
- *
- * Returns: Number of msi reserved regions on success (0 if platform
- * doesn't require the reservation or no associated msi regions),
- * appropriate error value otherwise. The ITS interrupt translation
- * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
- * are the msi reserved regions.
+/*
+ * Retrieve platform specific HW MSI reserve regions.
+ * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
+ * associated with the device are the HW MSI reserved regions.
*/
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
+static void iort_iommu_msi_get_resv_regions(struct device *dev,
+ struct list_head *head)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct acpi_iort_its_group *its;
struct acpi_iort_node *iommu_node, *its_node = NULL;
- int i, resv = 0;
+ int i;
iommu_node = iort_get_msi_resv_iommu(dev);
if (!iommu_node)
- return 0;
+ return;
/*
* Current logic to reserve ITS regions relies on HW topologies
@@ -846,7 +1129,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
}
if (!its_node)
- return 0;
+ return;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)its_node->node_data;
@@ -860,15 +1143,52 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
prot, IOMMU_RESV_MSI);
- if (region) {
+ if (region)
list_add_tail(&region->list, head);
- resv++;
- }
}
}
+}
+
+/**
+ * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
+ * @dev: Device from iommu_get_resv_regions()
+ * @head: Reserved region list from iommu_get_resv_regions()
+ */
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ iort_iommu_msi_get_resv_regions(dev, head);
+ iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
+}
+
+/**
+ * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
+ * associated StreamIDs information.
+ * @iommu_fwnode: fwnode associated with IOMMU
+ * @head: Resereved region list
+ */
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head)
+{
+ iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
+}
+EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
+
+/**
+ * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
+ * @iommu_fwnode: fwnode associated with IOMMU
+ * @head: Resereved region list
+ */
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
- return (resv == its->its_count) ? resv : -ENODEV;
+ list_for_each_entry_safe(entry, next, head, list)
+ entry->free(NULL, entry);
}
+EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
static inline bool iort_iommu_driver_enabled(u8 type)
{
@@ -1034,8 +1354,8 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
}
#else
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
-{ return 0; }
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{ }
int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
{ return -ENODEV; }
#endif
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 53cab975f612..860014b89b8e 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -41,6 +41,8 @@ struct mcfg_fixup {
static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+#ifdef CONFIG_ARM64
+
#define AL_ECAM(table_id, rev, seg, ops) \
{ "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
@@ -169,6 +171,17 @@ static struct mcfg_fixup mcfg_quirks[] = {
ALTRA_ECAM_QUIRK(1, 13),
ALTRA_ECAM_QUIRK(1, 14),
ALTRA_ECAM_QUIRK(1, 15),
+#endif /* ARM64 */
+
+#ifdef CONFIG_LOONGARCH
+#define LOONGSON_ECAM_MCFG(table_id, seg) \
+ { "LOONGS", table_id, 1, seg, MCFG_BUS_ANY, &loongson_pci_ecam_ops }
+
+ LOONGSON_ECAM_MCFG("\0", 0),
+ LOONGSON_ECAM_MCFG("LOONGSON", 0),
+ LOONGSON_ECAM_MCFG("\0", 1),
+ LOONGSON_ECAM_MCFG("LOONGSON", 1),
+#endif /* LOONGARCH */
};
static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index db6ac540e924..e534fd49a67e 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -151,7 +151,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
unsigned int cpu;
for_each_cpu(cpu, policy->related_cpus) {
- struct acpi_processor *pr = per_cpu(processors, policy->cpu);
+ struct acpi_processor *pr = per_cpu(processors, cpu);
if (pr)
freq_qos_remove_request(&pr->thermal_req);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e764f9ac9cf8..d4c168ce428c 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -55,14 +55,19 @@ static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
+static const guid_t buffer_prop_guid =
+ GUID_INIT(0xedb12dd0, 0x363d, 0x4085,
+ 0xa3, 0xd2, 0x49, 0x52, 0x2c, 0xa1, 0x60, 0xc4);
+
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
- const union acpi_object *desc,
+ union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent);
-static bool acpi_extract_properties(const union acpi_object *desc,
+static bool acpi_extract_properties(acpi_handle handle,
+ union acpi_object *desc,
struct acpi_device_data *data);
-static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
+static bool acpi_nondev_subnode_extract(union acpi_object *desc,
acpi_handle handle,
const union acpi_object *link,
struct list_head *list,
@@ -81,7 +86,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(desc, &dn->data);
+ result = acpi_extract_properties(handle, desc, &dn->data);
if (handle) {
acpi_handle scope;
@@ -155,16 +160,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
return acpi_nondev_subnode_data_ok(handle, link, list, parent);
}
-static int acpi_add_nondev_subnodes(acpi_handle scope,
- const union acpi_object *links,
- struct list_head *list,
- struct fwnode_handle *parent)
+static bool acpi_add_nondev_subnodes(acpi_handle scope,
+ union acpi_object *links,
+ struct list_head *list,
+ struct fwnode_handle *parent)
{
bool ret = false;
int i;
for (i = 0; i < links->package.count; i++) {
- const union acpi_object *link, *desc;
+ union acpi_object *link, *desc;
acpi_handle handle;
bool result;
@@ -204,7 +209,7 @@ static int acpi_add_nondev_subnodes(acpi_handle scope,
}
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
- const union acpi_object *desc,
+ union acpi_object *desc,
struct acpi_device_data *data,
struct fwnode_handle *parent)
{
@@ -212,7 +217,8 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
/* Look for the ACPI data subnodes GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *guid, *links;
+ const union acpi_object *guid;
+ union acpi_object *links;
guid = &desc->package.elements[i];
links = &desc->package.elements[i + 1];
@@ -325,7 +331,7 @@ static bool acpi_is_property_guid(const guid_t *guid)
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
- const union acpi_object *properties)
+ union acpi_object *properties)
{
struct acpi_device_properties *props;
@@ -340,7 +346,141 @@ acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
return props;
}
-static bool acpi_extract_properties(const union acpi_object *desc,
+static void acpi_nondev_subnode_tag(acpi_handle handle, void *context)
+{
+}
+
+static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
+{
+ struct acpi_data_node *dn;
+
+ list_for_each_entry(dn, &data->subnodes, sibling) {
+ acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
+
+ acpi_untie_nondev_subnodes(&dn->data);
+ }
+}
+
+static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
+{
+ struct acpi_data_node *dn;
+
+ list_for_each_entry(dn, &data->subnodes, sibling) {
+ acpi_status status;
+ bool ret;
+
+ status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
+ if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+ acpi_handle_err(dn->handle, "Can't tag data node\n");
+ return false;
+ }
+
+ ret = acpi_tie_nondev_subnodes(&dn->data);
+ if (!ret)
+ return ret;
+ }
+
+ return true;
+}
+
+static void acpi_data_add_buffer_props(acpi_handle handle,
+ struct acpi_device_data *data,
+ union acpi_object *properties)
+{
+ struct acpi_device_properties *props;
+ union acpi_object *package;
+ size_t alloc_size;
+ unsigned int i;
+ u32 *count;
+
+ if (check_mul_overflow((size_t)properties->package.count,
+ sizeof(*package) + sizeof(void *),
+ &alloc_size) ||
+ check_add_overflow(sizeof(*props) + sizeof(*package), alloc_size,
+ &alloc_size)) {
+ acpi_handle_warn(handle,
+ "can't allocate memory for %u buffer props",
+ properties->package.count);
+ return;
+ }
+
+ props = kvzalloc(alloc_size, GFP_KERNEL);
+ if (!props)
+ return;
+
+ props->guid = &buffer_prop_guid;
+ props->bufs = (void *)(props + 1);
+ props->properties = (void *)(props->bufs + properties->package.count);
+
+ /* Outer package */
+ package = props->properties;
+ package->type = ACPI_TYPE_PACKAGE;
+ package->package.elements = package + 1;
+ count = &package->package.count;
+ *count = 0;
+
+ /* Inner packages */
+ package++;
+
+ for (i = 0; i < properties->package.count; i++) {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ union acpi_object *property = &properties->package.elements[i];
+ union acpi_object *prop, *obj, *buf_obj;
+ acpi_status status;
+
+ if (property->type != ACPI_TYPE_PACKAGE ||
+ property->package.count != 2) {
+ acpi_handle_warn(handle,
+ "buffer property %u has %u entries\n",
+ i, property->package.count);
+ continue;
+ }
+
+ prop = &property->package.elements[0];
+ obj = &property->package.elements[1];
+
+ if (prop->type != ACPI_TYPE_STRING ||
+ obj->type != ACPI_TYPE_STRING) {
+ acpi_handle_warn(handle,
+ "wrong object types %u and %u\n",
+ prop->type, obj->type);
+ continue;
+ }
+
+ status = acpi_evaluate_object_typed(handle, obj->string.pointer,
+ NULL, &buf,
+ ACPI_TYPE_BUFFER);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_warn(handle,
+ "can't evaluate \"%*pE\" as buffer\n",
+ obj->string.length,
+ obj->string.pointer);
+ continue;
+ }
+
+ package->type = ACPI_TYPE_PACKAGE;
+ package->package.elements = prop;
+ package->package.count = 2;
+
+ buf_obj = buf.pointer;
+
+ /* Replace the string object with a buffer object */
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = buf_obj->buffer.length;
+ obj->buffer.pointer = buf_obj->buffer.pointer;
+
+ props->bufs[i] = buf.pointer;
+ package++;
+ (*count)++;
+ }
+
+ if (*count)
+ list_add(&props->list, &data->properties);
+ else
+ kvfree(props);
+}
+
+static bool acpi_extract_properties(acpi_handle scope, union acpi_object *desc,
struct acpi_device_data *data)
{
int i;
@@ -350,7 +490,8 @@ static bool acpi_extract_properties(const union acpi_object *desc,
/* Look for the device properties GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *guid, *properties;
+ const union acpi_object *guid;
+ union acpi_object *properties;
guid = &desc->package.elements[i];
properties = &desc->package.elements[i + 1];
@@ -364,6 +505,12 @@ static bool acpi_extract_properties(const union acpi_object *desc,
properties->type != ACPI_TYPE_PACKAGE)
break;
+ if (guid_equal((guid_t *)guid->buffer.pointer,
+ &buffer_prop_guid)) {
+ acpi_data_add_buffer_props(scope, data, properties);
+ continue;
+ }
+
if (!acpi_is_property_guid((guid_t *)guid->buffer.pointer))
continue;
@@ -410,7 +557,7 @@ void acpi_init_properties(struct acpi_device *adev)
if (ACPI_FAILURE(status))
goto out;
- if (acpi_extract_properties(buf.pointer, &adev->data)) {
+ if (acpi_extract_properties(adev->handle, buf.pointer, &adev->data)) {
adev->data.pointer = buf.pointer;
if (acpi_of)
acpi_init_of_compatible(adev);
@@ -422,6 +569,9 @@ void acpi_init_properties(struct acpi_device *adev)
if (!adev->data.pointer) {
acpi_handle_debug(adev->handle, "Invalid _DSD data, skipping\n");
ACPI_FREE(buf.pointer);
+ } else {
+ if (!acpi_tie_nondev_subnodes(&adev->data))
+ acpi_untie_nondev_subnodes(&adev->data);
}
out:
@@ -438,8 +588,14 @@ static void acpi_free_device_properties(struct list_head *list)
struct acpi_device_properties *props, *tmp;
list_for_each_entry_safe(props, tmp, list, list) {
+ u32 i;
+
list_del(&props->list);
- kfree(props);
+ /* Buffer data properties were separately allocated */
+ if (props->bufs)
+ for (i = 0; i < props->properties->package.count; i++)
+ ACPI_FREE(props->bufs[i]);
+ kvfree(props);
}
}
@@ -462,6 +618,7 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
void acpi_free_properties(struct acpi_device *adev)
{
+ acpi_untie_nondev_subnodes(&adev->data);
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
@@ -633,6 +790,58 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
return NULL;
}
+static int acpi_get_ref_args(struct fwnode_reference_args *args,
+ struct fwnode_handle *ref_fwnode,
+ const union acpi_object **element,
+ const union acpi_object *end, size_t num_args)
+{
+ u32 nargs = 0, i;
+
+ /*
+ * Find the referred data extension node under the
+ * referred device node.
+ */
+ for (; *element < end && (*element)->type == ACPI_TYPE_STRING;
+ (*element)++) {
+ const char *child_name = (*element)->string.pointer;
+
+ ref_fwnode = acpi_fwnode_get_named_child_node(ref_fwnode, child_name);
+ if (!ref_fwnode)
+ return -EINVAL;
+ }
+
+ /*
+ * Assume the following integer elements are all args. Stop counting on
+ * the first reference or end of the package arguments. In case of
+ * neither reference, nor integer, return an error, we can't parse it.
+ */
+ for (i = 0; (*element) + i < end && i < num_args; i++) {
+ acpi_object_type type = (*element)[i].type;
+
+ if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ break;
+
+ if (type == ACPI_TYPE_INTEGER)
+ nargs++;
+ else
+ return -EINVAL;
+ }
+
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
+ return -EINVAL;
+
+ if (args) {
+ args->fwnode = ref_fwnode;
+ args->nargs = nargs;
+ for (i = 0; i < nargs; i++)
+ args->args[i] = (*element)[i].integer.value;
+ }
+
+ (*element) += nargs;
+
+ return 0;
+}
+
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
@@ -686,11 +895,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
if (ret)
return ret == -EINVAL ? -ENOENT : -EINVAL;
- /*
- * The simplest case is when the value is a single reference. Just
- * return that reference then.
- */
- if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ switch (obj->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /* Plain single reference without arguments. */
if (index)
return -ENOENT;
@@ -701,19 +908,21 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
return 0;
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * If it is not a single reference, then it is a package of
+ * references followed by number of ints as follows:
+ *
+ * Package () { REF, INT, REF, INT, INT }
+ *
+ * The index argument is then used to determine which reference
+ * the caller wants (along with the arguments).
+ */
+ break;
+ default:
+ return -EINVAL;
}
- /*
- * If it is not a single reference, then it is a package of
- * references followed by number of ints as follows:
- *
- * Package () { REF, INT, REF, INT, INT }
- *
- * The index argument is then used to determine which reference
- * the caller wants (along with the arguments).
- */
- if (obj->type != ACPI_TYPE_PACKAGE)
- return -EINVAL;
if (index >= obj->package.count)
return -ENOENT;
@@ -721,66 +930,30 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
end = element + obj->package.count;
while (element < end) {
- u32 nargs, i;
-
- if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
- struct fwnode_handle *ref_fwnode;
-
+ switch (element->type) {
+ case ACPI_TYPE_LOCAL_REFERENCE:
device = acpi_fetch_acpi_dev(element->reference.handle);
if (!device)
return -EINVAL;
- nargs = 0;
element++;
- /*
- * Find the referred data extension node under the
- * referred device node.
- */
- for (ref_fwnode = acpi_fwnode_handle(device);
- element < end && element->type == ACPI_TYPE_STRING;
- element++) {
- ref_fwnode = acpi_fwnode_get_named_child_node(
- ref_fwnode, element->string.pointer);
- if (!ref_fwnode)
- return -EINVAL;
- }
-
- /*
- * Assume the following integer elements are all args.
- * Stop counting on the first reference or end of the
- * package arguments. In case of neither reference,
- * nor integer, return an error, we can't parse it.
- */
- for (i = 0; element + i < end && i < num_args; i++) {
- int type = element[i].type;
-
- if (type == ACPI_TYPE_LOCAL_REFERENCE)
- break;
- if (type == ACPI_TYPE_INTEGER)
- nargs++;
- else
- return -EINVAL;
- }
-
- if (nargs > NR_FWNODE_REFERENCE_ARGS)
- return -EINVAL;
-
- if (idx == index) {
- args->fwnode = ref_fwnode;
- args->nargs = nargs;
- for (i = 0; i < nargs; i++)
- args->args[i] = element[i].integer.value;
+ ret = acpi_get_ref_args(idx == index ? args : NULL,
+ acpi_fwnode_handle(device),
+ &element, end, num_args);
+ if (ret < 0)
+ return ret;
+ if (idx == index)
return 0;
- }
- element += nargs;
- } else if (element->type == ACPI_TYPE_INTEGER) {
+ break;
+ case ACPI_TYPE_INTEGER:
if (idx == index)
return -ENOENT;
element++;
- } else {
+ break;
+ default:
return -EINVAL;
}
@@ -852,67 +1025,36 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
return ret;
}
-static int acpi_copy_property_array_u8(const union acpi_object *items, u8 *val,
- size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U8_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u16(const union acpi_object *items,
- u16 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U16_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u32(const union acpi_object *items,
- u32 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
- if (items[i].integer.value > U32_MAX)
- return -EOVERFLOW;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
-
-static int acpi_copy_property_array_u64(const union acpi_object *items,
- u64 *val, size_t nval)
-{
- int i;
-
- for (i = 0; i < nval; i++) {
- if (items[i].type != ACPI_TYPE_INTEGER)
- return -EPROTO;
-
- val[i] = items[i].integer.value;
- }
- return 0;
-}
+#define acpi_copy_property_array_uint(items, val, nval) \
+ ({ \
+ typeof(items) __items = items; \
+ typeof(val) __val = val; \
+ typeof(nval) __nval = nval; \
+ size_t i; \
+ int ret = 0; \
+ \
+ for (i = 0; i < __nval; i++) { \
+ if (__items->type == ACPI_TYPE_BUFFER) { \
+ __val[i] = __items->buffer.pointer[i]; \
+ continue; \
+ } \
+ if (__items[i].type != ACPI_TYPE_INTEGER) { \
+ ret = -EPROTO; \
+ break; \
+ } \
+ if (__items[i].integer.value > _Generic(__val, \
+ u8 *: U8_MAX, \
+ u16 *: U16_MAX, \
+ u32 *: U32_MAX, \
+ u64 *: U64_MAX)) { \
+ ret = -EOVERFLOW; \
+ break; \
+ } \
+ \
+ __val[i] = __items[i].integer.value; \
+ } \
+ ret; \
+ })
static int acpi_copy_property_array_string(const union acpi_object *items,
char **val, size_t nval)
@@ -954,31 +1096,54 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
+ if (ret && proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64)
+ ret = acpi_data_get_property(data, propname, ACPI_TYPE_BUFFER,
+ &obj);
if (ret)
return ret;
- if (!val)
+ if (!val) {
+ if (obj->type == ACPI_TYPE_BUFFER)
+ return obj->buffer.length;
+
return obj->package.count;
+ }
- if (proptype != DEV_PROP_STRING && nval > obj->package.count)
- return -EOVERFLOW;
+ switch (proptype) {
+ case DEV_PROP_STRING:
+ break;
+ case DEV_PROP_U8 ... DEV_PROP_U64:
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (nval > obj->buffer.length)
+ return -EOVERFLOW;
+ break;
+ }
+ fallthrough;
+ default:
+ if (nval > obj->package.count)
+ return -EOVERFLOW;
+ break;
+ }
if (nval == 0)
return -EINVAL;
- items = obj->package.elements;
+ if (obj->type != ACPI_TYPE_BUFFER)
+ items = obj->package.elements;
+ else
+ items = obj;
switch (proptype) {
case DEV_PROP_U8:
- ret = acpi_copy_property_array_u8(items, (u8 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u8 *)val, nval);
break;
case DEV_PROP_U16:
- ret = acpi_copy_property_array_u16(items, (u16 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u16 *)val, nval);
break;
case DEV_PROP_U32:
- ret = acpi_copy_property_array_u32(items, (u32 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u32 *)val, nval);
break;
case DEV_PROP_U64:
- ret = acpi_copy_property_array_u64(items, (u64 *)val, nval);
+ ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
ret = acpi_copy_property_array_string(
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b100e6ca9bb4..42cec8120f18 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1722,6 +1722,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"INT3515", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
+ {"CLSA0101", },
/*
* Some ACPI devs contain SerialBus resources even though they are not
* attached to a serial bus at all.
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 3a9773a09e19..5a7b8065e77f 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -291,6 +291,44 @@ int acpi_get_local_address(acpi_handle handle, u32 *addr)
}
EXPORT_SYMBOL(acpi_get_local_address);
+#define ACPI_MAX_SUB_BUF_SIZE 9
+
+const char *acpi_get_subsystem_id(acpi_handle handle)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ const char *sub;
+ size_t len;
+
+ status = acpi_evaluate_object(handle, METHOD_NAME__SUB, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "Reading ACPI _SUB failed: %#x\n", status);
+ return ERR_PTR(-ENODATA);
+ }
+
+ obj = buffer.pointer;
+ if (obj->type == ACPI_TYPE_STRING) {
+ len = strlen(obj->string.pointer);
+ if (len < ACPI_MAX_SUB_BUF_SIZE && len > 0) {
+ sub = kstrdup(obj->string.pointer, GFP_KERNEL);
+ if (!sub)
+ sub = ERR_PTR(-ENOMEM);
+ } else {
+ acpi_handle_err(handle, "ACPI _SUB Length %zu is Invalid\n", len);
+ sub = ERR_PTR(-ENODATA);
+ }
+ } else {
+ acpi_handle_warn(handle, "Warning ACPI _SUB did not return a string\n");
+ sub = ERR_PTR(-ENODATA);
+ }
+
+ acpi_os_free(buffer.pointer);
+
+ return sub;
+}
+EXPORT_SYMBOL_GPL(acpi_get_subsystem_id);
+
acpi_status
acpi_evaluate_reference(acpi_handle handle,
acpi_string pathname,
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index 647f11cf165d..6132092dab2a 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -88,7 +88,7 @@ static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
return -ENODEV;
}
- fwnode = pdev->dev.fwnode;
+ fwnode = dev_fwnode(&pdev->dev);
if (!fwnode) {
/*
* PCI devices aren't necessarily described by ACPI. Create a
@@ -101,7 +101,7 @@ static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
}
set_primary_fwnode(&pdev->dev, fwnode);
}
- viommu->fwnode = pdev->dev.fwnode;
+ viommu->fwnode = dev_fwnode(&pdev->dev);
pci_dev_put(pdev);
return 0;
}
@@ -314,7 +314,7 @@ static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
return -ENODEV;
/* We're not translating ourself */
- if (viommu->fwnode == dev->fwnode)
+ if (device_match_fwnode(dev, viommu->fwnode))
return -EINVAL;
ops = iommu_ops_from_fwnode(viommu->fwnode);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 5649a0371a1f..51f4e1c5cd01 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
if (mm) {
mmap_read_lock(mm);
- vma = alloc->vma;
+ vma = vma_lookup(mm, alloc->vma_addr);
}
if (!vma && need_mm) {
@@ -313,16 +313,22 @@ err_no_vma:
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
- if (vma)
- alloc->vma_vm_mm = vma->vm_mm;
+ unsigned long vm_start = 0;
+
/*
- * If we see alloc->vma is not NULL, buffer data structures set up
- * completely. Look at smp_rmb side binder_alloc_get_vma.
- * We also want to guarantee new alloc->vma_vm_mm is always visible
- * if alloc->vma is set.
+ * Allow clearing the vma with holding just the read lock to allow
+ * munmapping downgrade of the write lock before freeing and closing the
+ * file using binder_alloc_vma_close().
*/
- smp_wmb();
- alloc->vma = vma;
+ if (vma) {
+ vm_start = vma->vm_start;
+ alloc->vma_vm_mm = vma->vm_mm;
+ mmap_assert_write_locked(alloc->vma_vm_mm);
+ } else {
+ mmap_assert_locked(alloc->vma_vm_mm);
+ }
+
+ alloc->vma_addr = vm_start;
}
static inline struct vm_area_struct *binder_alloc_get_vma(
@@ -330,11 +336,9 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
{
struct vm_area_struct *vma = NULL;
- if (alloc->vma) {
- /* Look at description in binder_alloc_set_vma */
- smp_rmb();
- vma = alloc->vma;
- }
+ if (alloc->vma_addr)
+ vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
+
return vma;
}
@@ -398,12 +402,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
+ mmap_read_lock(alloc->vma_vm_mm);
if (!binder_alloc_get_vma(alloc)) {
+ mmap_read_unlock(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
+ mmap_read_unlock(alloc->vma_vm_mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -817,7 +824,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
- BUG_ON(alloc->vma);
+ BUG_ON(alloc->vma_addr &&
+ vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -924,17 +932,25 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
- }
+
+ mmap_read_lock(alloc->vma_vm_mm);
+ if (binder_alloc_get_vma(alloc) == NULL) {
+ mmap_read_unlock(alloc->vma_vm_mm);
+ goto uninitialized;
}
+
+ mmap_read_unlock(alloc->vma_vm_mm);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+
+uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
@@ -1084,7 +1100,7 @@ int binder_alloc_shrinker_init(void)
int ret = list_lru_init(&binder_alloc_lru);
if (ret == 0) {
- ret = register_shrinker(&binder_shrinker);
+ ret = register_shrinker(&binder_shrinker, "android-binder");
if (ret)
list_lru_destroy(&binder_alloc_lru);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 7dea57a84c79..1e4fd37af5e0 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -100,7 +100,7 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
- struct vm_area_struct *vma;
+ unsigned long vma_addr;
struct mm_struct *vma_vm_mm;
void __user *buffer;
struct list_head buffers;
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index c2b323bc3b3a..43a881073a42 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
+ if (!binder_selftest_run || !alloc->vma_addr)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index ef4508d72c02..7c128c89b454 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2122,6 +2122,7 @@ const char *ata_get_cmd_name(u8 command)
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9b999c0e8c37..29e2f55c6faa 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1060,6 +1060,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
dev->flags |= ATA_DFLAG_NO_UNLOAD;
/* configure max sectors */
+ dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
blk_queue_max_hw_sectors(q, dev->max_sectors);
if (dev->class == ATA_DEV_ATAPI) {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 03b6ae37a578..6559b606736d 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -683,7 +683,7 @@ static int mpc52xx_ata_probe(struct platform_device *op)
struct bcom_task *dmatsk;
/* Get ipb frequency */
- ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
if (!ipb_freq) {
dev_err(&op->dev, "could not determine IPB bus frequency\n");
return -ENODEV;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 81ce81a75fc6..681cb3786794 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3752,6 +3752,7 @@ static void __exit idt77252_exit(void)
card = idt77252_chain;
dev = card->atmdev;
idt77252_chain = card->next;
+ del_timer_sync(&card->tst_timer);
if (dev->phy->stop)
dev->phy->stop(dev);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e19fcab016ba..db1b4b202646 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -248,15 +248,6 @@ config BLK_DEV_NBD
If unsure, say N.
-config BLK_DEV_SX8
- tristate "Promise SATA SX8 support"
- depends on PCI
- help
- Saying Y or M here will enable support for the
- Promise SATA SX8 controllers.
-
- Use devices /dev/sx8/$N and /dev/sx8/$Np$M.
-
config BLK_DEV_RAM
tristate "RAM block device support"
help
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index be631352567e..101612cba303 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -26,8 +26,6 @@ obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
-obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
-
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 603f6828dd79..7d9db33363de 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -974,25 +974,58 @@ static void drbd_bm_endio(struct bio *bio)
}
}
+/* For the layout, see comment above drbd_md_set_sector_offsets(). */
+static inline sector_t drbd_md_last_bitmap_sector(struct drbd_backing_dev *bdev)
+{
+ switch (bdev->md.meta_dev_idx) {
+ case DRBD_MD_INDEX_INTERNAL:
+ case DRBD_MD_INDEX_FLEX_INT:
+ return bdev->md.md_offset + bdev->md.al_offset -1;
+ case DRBD_MD_INDEX_FLEX_EXT:
+ default:
+ return bdev->md.md_offset + bdev->md.md_size_sect -1;
+ }
+}
+
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
struct drbd_device *device = ctx->device;
enum req_op op = ctx->flags & BM_AIO_READ ? REQ_OP_READ : REQ_OP_WRITE;
- struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
- GFP_NOIO, &drbd_md_io_bio_set);
struct drbd_bitmap *b = device->bitmap;
+ struct bio *bio;
struct page *page;
+ sector_t last_bm_sect;
+ sector_t first_bm_sect;
+ sector_t on_disk_sector;
unsigned int len;
- sector_t on_disk_sector =
- device->ldev->md.md_offset + device->ldev->md.bm_offset;
- on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
+ first_bm_sect = device->ldev->md.md_offset + device->ldev->md.bm_offset;
+ on_disk_sector = first_bm_sect + (((sector_t)page_nr) << (PAGE_SHIFT-SECTOR_SHIFT));
/* this might happen with very small
* flexible external meta data device,
* or with PAGE_SIZE > 4k */
- len = min_t(unsigned int, PAGE_SIZE,
- (drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
+ last_bm_sect = drbd_md_last_bitmap_sector(device->ldev);
+ if (first_bm_sect <= on_disk_sector && last_bm_sect >= on_disk_sector) {
+ sector_t len_sect = last_bm_sect - on_disk_sector + 1;
+ if (len_sect < PAGE_SIZE/SECTOR_SIZE)
+ len = (unsigned int)len_sect*SECTOR_SIZE;
+ else
+ len = PAGE_SIZE;
+ } else {
+ if (__ratelimit(&drbd_ratelimit_state)) {
+ drbd_err(device, "Invalid offset during on-disk bitmap access: "
+ "page idx %u, sector %llu\n", page_nr, on_disk_sector);
+ }
+ ctx->error = -EIO;
+ bm_set_page_io_err(b->bm_pages[page_nr]);
+ if (atomic_dec_and_test(&ctx->in_flight)) {
+ ctx->done = 1;
+ wake_up(&device->misc_wait);
+ kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
+ }
+ return;
+ }
/* serialize IO on this page */
bm_page_lock_io(device, page_nr);
@@ -1007,6 +1040,8 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
+ bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, GFP_NOIO,
+ &drbd_md_io_bio_set);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 6d8dd14458c6..8f7f144e54f3 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1608,7 +1608,7 @@ void drbd_submit_bio(struct bio *bio)
{
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
/*
* what we "blindly" assume:
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e3c0ba93c1a3..ad92192c7d61 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -979,6 +979,11 @@ loop_set_status_from_info(struct loop_device *lo,
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
+
+ /* loff_t vars have been assigned __u64 */
+ if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
+ return -EOVERFLOW;
+
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
lo->lo_flags = info->lo_flags;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index f5d098a148cb..2a709daefbc4 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -11,6 +11,8 @@
* (part of code stolen from loop.c)
*/
+#define pr_fmt(fmt) "nbd: " fmt
+
#include <linux/major.h>
#include <linux/blkdev.h>
@@ -1950,7 +1952,7 @@ again:
test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
- pr_err("nbd: device at index %d is going down\n",
+ pr_err("device at index %d is going down\n",
index);
return -EINVAL;
}
@@ -1961,7 +1963,7 @@ again:
if (!nbd) {
nbd = nbd_dev_add(index, 2);
if (IS_ERR(nbd)) {
- pr_err("nbd: failed to add new device\n");
+ pr_err("failed to add new device\n");
return PTR_ERR(nbd);
}
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 8b224ede2e33..c451c477978f 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -201,6 +201,22 @@ static bool g_use_per_node_hctx;
module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
+static bool g_memory_backed;
+module_param_named(memory_backed, g_memory_backed, bool, 0444);
+MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
+
+static bool g_discard;
+module_param_named(discard, g_discard, bool, 0444);
+MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false");
+
+static unsigned long g_cache_size;
+module_param_named(cache_size, g_cache_size, ulong, 0444);
+MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+
+static unsigned int g_mbps;
+module_param_named(mbps, g_mbps, uint, 0444);
+MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
+
static bool g_zoned;
module_param_named(zoned, g_zoned, bool, S_IRUGO);
MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
@@ -409,6 +425,8 @@ NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
+NULLB_DEVICE_ATTR(no_sched, bool, NULL);
+NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -532,6 +550,8 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_max_open,
&nullb_device_attr_zone_max_active,
&nullb_device_attr_virt_boundary,
+ &nullb_device_attr_no_sched,
+ &nullb_device_attr_shared_tag_bitmap,
NULL,
};
@@ -588,7 +608,13 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
- "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors,virt_boundary\n");
+ "badblocks,blocking,blocksize,cache_size,"
+ "completion_nsec,discard,home_node,hw_queue_depth,"
+ "irqmode,max_sectors,mbps,memory_backed,no_sched,"
+ "poll_queues,power,queue_mode,shared_tag_bitmap,size,"
+ "submit_queues,use_per_node_hctx,virt_boundary,zoned,"
+ "zone_capacity,zone_max_active,zone_max_open,"
+ "zone_nr_conv,zone_size\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -650,6 +676,10 @@ static struct nullb_device *null_alloc_dev(void)
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
dev->blocking = g_blocking;
+ dev->memory_backed = g_memory_backed;
+ dev->discard = g_discard;
+ dev->cache_size = g_cache_size;
+ dev->mbps = g_mbps;
dev->use_per_node_hctx = g_use_per_node_hctx;
dev->zoned = g_zoned;
dev->zone_size = g_zone_size;
@@ -658,6 +688,8 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_max_open = g_zone_max_open;
dev->zone_max_active = g_zone_max_active;
dev->virt_boundary = g_virt_boundary;
+ dev->no_sched = g_no_sched;
+ dev->shared_tag_bitmap = g_shared_tag_bitmap;
return dev;
}
@@ -1655,7 +1687,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
static void cleanup_queue(struct nullb_queue *nq)
{
- kfree(nq->tag_map);
+ bitmap_free(nq->tag_map);
kfree(nq->cmds);
}
@@ -1782,14 +1814,13 @@ static const struct block_device_operations null_rq_ops = {
static int setup_commands(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
- int i, tag_size;
+ int i;
nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
if (!nq->cmds)
return -ENOMEM;
- tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
- nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
+ nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL);
if (!nq->tag_map) {
kfree(nq->cmds);
return -ENOMEM;
@@ -1866,31 +1897,48 @@ static int null_gendisk_register(struct nullb *nullb)
static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
+ unsigned int flags = BLK_MQ_F_SHOULD_MERGE;
+ int hw_queues, numa_node;
+ unsigned int queue_depth;
int poll_queues;
+ if (nullb) {
+ hw_queues = nullb->dev->submit_queues;
+ poll_queues = nullb->dev->poll_queues;
+ queue_depth = nullb->dev->hw_queue_depth;
+ numa_node = nullb->dev->home_node;
+ if (nullb->dev->no_sched)
+ flags |= BLK_MQ_F_NO_SCHED;
+ if (nullb->dev->shared_tag_bitmap)
+ flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (nullb->dev->blocking)
+ flags |= BLK_MQ_F_BLOCKING;
+ } else {
+ hw_queues = g_submit_queues;
+ poll_queues = g_poll_queues;
+ queue_depth = g_hw_queue_depth;
+ numa_node = g_home_node;
+ if (g_no_sched)
+ flags |= BLK_MQ_F_NO_SCHED;
+ if (g_shared_tag_bitmap)
+ flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (g_blocking)
+ flags |= BLK_MQ_F_BLOCKING;
+ }
+
set->ops = &null_mq_ops;
- set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
- g_submit_queues;
- poll_queues = nullb ? nullb->dev->poll_queues : g_poll_queues;
- if (poll_queues)
- set->nr_hw_queues += poll_queues;
- set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
- g_hw_queue_depth;
- set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
set->cmd_size = sizeof(struct nullb_cmd);
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- if (g_no_sched)
- set->flags |= BLK_MQ_F_NO_SCHED;
- if (g_shared_tag_bitmap)
- set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ set->flags = flags;
set->driver_data = nullb;
- if (poll_queues)
+ set->nr_hw_queues = hw_queues;
+ set->queue_depth = queue_depth;
+ set->numa_node = numa_node;
+ if (poll_queues) {
+ set->nr_hw_queues += poll_queues;
set->nr_maps = 3;
- else
+ } else {
set->nr_maps = 1;
-
- if ((nullb && nullb->dev->blocking) || g_blocking)
- set->flags |= BLK_MQ_F_BLOCKING;
+ }
return blk_mq_alloc_tag_set(set);
}
@@ -2042,8 +2090,13 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
- nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
- dev->index = nullb->index;
+ rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ if (rv < 0) {
+ mutex_unlock(&lock);
+ goto out_cleanup_zone;
+ }
+ nullb->index = rv;
+ dev->index = rv;
mutex_unlock(&lock);
blk_queue_logical_block_size(nullb->q, dev->blocksize);
@@ -2069,7 +2122,7 @@ static int null_add_dev(struct nullb_device *dev)
rv = null_gendisk_register(nullb);
if (rv)
- goto out_cleanup_zone;
+ goto out_ida_free;
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
@@ -2078,6 +2131,9 @@ static int null_add_dev(struct nullb_device *dev)
pr_info("disk %s created\n", nullb->disk_name);
return 0;
+
+out_ida_free:
+ ida_free(&nullb_indexes, nullb->index);
out_cleanup_zone:
null_free_zoned_dev(dev);
out_cleanup_disk:
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 6fbf0a1b2622..94ff68052b1e 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -113,6 +113,8 @@ struct nullb_device {
bool discard; /* if support discard */
bool zoned; /* if device is zoned */
bool virt_boundary; /* virtual boundary on/off for the device */
+ bool no_sched; /* no IO scheduler for the device */
+ bool shared_tag_bitmap; /* use hostwide shared tags */
};
struct nullb {
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 01a15dbd9cde..4cea3b08087e 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2399,7 +2399,7 @@ static void pkt_submit_bio(struct bio *bio)
struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
struct bio *split;
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_iter.bi_sector,
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index d1e0fefec90b..e1d080f680ed 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -586,7 +586,7 @@ static void ps3vram_submit_bio(struct bio *bio)
dev_dbg(&dev->core, "%s\n", __func__);
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0d8ec2fe5740..f9e39301c4af 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1297,7 +1297,7 @@ static void rbd_osd_submit(struct ceph_osd_request *osd_req)
dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
__func__, osd_req, obj_req, obj_req->ex.oe_objno,
obj_req->ex.oe_off, obj_req->ex.oe_len);
- ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
+ ceph_osdc_start_request(osd_req->r_osdc, osd_req);
}
/*
@@ -2081,7 +2081,7 @@ static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
if (ret)
return ret;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
return 0;
}
@@ -4768,7 +4768,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
if (ret)
goto out_req;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0)
ceph_copy_from_page_vector(pages, buf, 0, ret);
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index 2be5d87a3ca6..e7c7d9a68168 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -376,7 +376,7 @@ static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
if (ret)
return ret;
- ret = rnbd_clt_resize_disk(dev, (size_t)sectors);
+ ret = rnbd_clt_resize_disk(dev, sectors);
if (ret)
return ret;
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index b8d9e2824d9c..04da33a22ef4 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -68,39 +68,18 @@ static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev)
return refcount_inc_not_zero(&dev->refcount);
}
-static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
- const struct rnbd_msg_open_rsp *rsp)
+static void rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
+ sector_t new_nsectors)
{
- struct rnbd_clt_session *sess = dev->sess;
-
- if (!rsp->logical_block_size)
- return -EINVAL;
-
- dev->device_id = le32_to_cpu(rsp->device_id);
- dev->nsectors = le64_to_cpu(rsp->nsectors);
- dev->logical_block_size = le16_to_cpu(rsp->logical_block_size);
- dev->physical_block_size = le16_to_cpu(rsp->physical_block_size);
- dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors);
- dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
- dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
- dev->secure_discard = le16_to_cpu(rsp->secure_discard);
- dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
- dev->fua = !!(rsp->cache_policy & RNBD_FUA);
-
- dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
- dev->max_segments = sess->max_segments;
-
- return 0;
-}
+ if (get_capacity(dev->gd) == new_nsectors)
+ return;
-static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
- size_t new_nsectors)
-{
- rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
- dev->nsectors, new_nsectors);
- dev->nsectors = new_nsectors;
- set_capacity_and_notify(dev->gd, dev->nsectors);
- return 0;
+ /*
+ * If the size changed, we need to revalidate it
+ */
+ rnbd_clt_info(dev, "Device size changed from %llu to %llu sectors\n",
+ get_capacity(dev->gd), new_nsectors);
+ set_capacity_and_notify(dev->gd, new_nsectors);
}
static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
@@ -119,19 +98,16 @@ static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) {
u64 nsectors = le64_to_cpu(rsp->nsectors);
- /*
- * If the device was remapped and the size changed in the
- * meantime we need to revalidate it
- */
- if (dev->nsectors != nsectors)
- rnbd_clt_change_capacity(dev, nsectors);
+ rnbd_clt_change_capacity(dev, nsectors);
gd_kobj = &disk_to_dev(dev->gd)->kobj;
kobject_uevent(gd_kobj, KOBJ_ONLINE);
rnbd_clt_info(dev, "Device online, device remapped successfully\n");
}
- err = rnbd_clt_set_dev_attr(dev, rsp);
- if (err)
+ if (!rsp->logical_block_size) {
+ err = -EINVAL;
goto out;
+ }
+ dev->device_id = le32_to_cpu(rsp->device_id);
dev->dev_state = DEV_STATE_MAPPED;
out:
@@ -140,7 +116,7 @@ out:
return err;
}
-int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, sector_t newsize)
{
int ret = 0;
@@ -150,7 +126,7 @@ int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
ret = -ENOENT;
goto out;
}
- ret = rnbd_clt_change_capacity(dev, newsize);
+ rnbd_clt_change_capacity(dev, newsize);
out:
mutex_unlock(&dev->lock);
@@ -507,6 +483,11 @@ static void msg_open_conf(struct work_struct *work)
struct rnbd_msg_open_rsp *rsp = iu->buf;
struct rnbd_clt_dev *dev = iu->dev;
int errno = iu->errno;
+ bool from_map = false;
+
+ /* INIT state is only triggered from rnbd_clt_map_device */
+ if (dev->dev_state == DEV_STATE_INIT)
+ from_map = true;
if (errno) {
rnbd_clt_err(dev,
@@ -523,7 +504,9 @@ static void msg_open_conf(struct work_struct *work)
send_msg_close(dev, device_id, RTRS_PERMIT_NOWAIT);
}
}
- kfree(rsp);
+ /* We free rsp in rnbd_clt_map_device for map scenario */
+ if (!from_map)
+ kfree(rsp);
wake_up_iu_comp(iu, errno);
rnbd_put_iu(dev->sess, iu);
rnbd_clt_put_dev(dev);
@@ -942,7 +925,7 @@ static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
{
struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
- if (dev->read_only && (mode & FMODE_WRITE))
+ if (get_disk_ro(dev->gd) && (mode & FMODE_WRITE))
return -EPERM;
if (dev->dev_state == DEV_STATE_UNMAPPED ||
@@ -963,10 +946,10 @@ static int rnbd_client_getgeo(struct block_device *block_device,
struct hd_geometry *geo)
{
u64 size;
- struct rnbd_clt_dev *dev;
+ struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+ struct queue_limits *limit = &dev->queue->limits;
- dev = block_device->bd_disk->private_data;
- size = dev->size * (dev->logical_block_size / SECTOR_SIZE);
+ size = dev->size * (limit->logical_block_size / SECTOR_SIZE);
geo->cylinders = size >> 6; /* size/64 */
geo->heads = 4;
geo->sectors = 16;
@@ -1350,11 +1333,15 @@ static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
}
}
-static void setup_request_queue(struct rnbd_clt_dev *dev)
+static void setup_request_queue(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp)
{
- blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
- blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
- blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
+ blk_queue_logical_block_size(dev->queue,
+ le16_to_cpu(rsp->logical_block_size));
+ blk_queue_physical_block_size(dev->queue,
+ le16_to_cpu(rsp->physical_block_size));
+ blk_queue_max_hw_sectors(dev->queue,
+ dev->sess->max_io_size / SECTOR_SIZE);
/*
* we don't support discards to "discontiguous" segments
@@ -1362,21 +1349,27 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
*/
blk_queue_max_discard_segments(dev->queue, 1);
- blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
- dev->queue->limits.discard_granularity = dev->discard_granularity;
- dev->queue->limits.discard_alignment = dev->discard_alignment;
- if (dev->secure_discard)
+ blk_queue_max_discard_sectors(dev->queue,
+ le32_to_cpu(rsp->max_discard_sectors));
+ dev->queue->limits.discard_granularity =
+ le32_to_cpu(rsp->discard_granularity);
+ dev->queue->limits.discard_alignment =
+ le32_to_cpu(rsp->discard_alignment);
+ if (le16_to_cpu(rsp->secure_discard))
blk_queue_max_secure_erase_sectors(dev->queue,
- dev->max_discard_sectors);
+ le32_to_cpu(rsp->max_discard_sectors));
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
- blk_queue_max_segments(dev->queue, dev->max_segments);
+ blk_queue_max_segments(dev->queue, dev->sess->max_segments);
blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
- blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
+ blk_queue_write_cache(dev->queue,
+ !!(rsp->cache_policy & RNBD_WRITEBACK),
+ !!(rsp->cache_policy & RNBD_FUA));
}
-static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
+static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp, int idx)
{
int err;
@@ -1388,19 +1381,15 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
dev->gd->private_data = dev;
snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d",
idx);
- pr_debug("disk_name=%s, capacity=%zu\n",
+ pr_debug("disk_name=%s, capacity=%llu\n",
dev->gd->disk_name,
- dev->nsectors * (dev->logical_block_size / SECTOR_SIZE)
- );
+ le64_to_cpu(rsp->nsectors) *
+ (le16_to_cpu(rsp->logical_block_size) / SECTOR_SIZE));
- set_capacity(dev->gd, dev->nsectors);
+ set_capacity(dev->gd, le64_to_cpu(rsp->nsectors));
- if (dev->access_mode == RNBD_ACCESS_RO) {
- dev->read_only = true;
+ if (dev->access_mode == RNBD_ACCESS_RO)
set_disk_ro(dev->gd, true);
- } else {
- dev->read_only = false;
- }
/*
* Network device does not need rotational
@@ -1413,11 +1402,13 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
return err;
}
-static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
+static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp)
{
int idx = dev->clt_device_id;
- dev->size = dev->nsectors * dev->logical_block_size;
+ dev->size = le64_to_cpu(rsp->nsectors) *
+ le16_to_cpu(rsp->logical_block_size);
dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, dev);
if (IS_ERR(dev->gd))
@@ -1425,8 +1416,8 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
dev->queue = dev->gd->queue;
rnbd_init_mq_hw_queues(dev);
- setup_request_queue(dev);
- return rnbd_clt_setup_gen_disk(dev, idx);
+ setup_request_queue(dev, rsp);
+ return rnbd_clt_setup_gen_disk(dev, rsp, idx);
}
static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
@@ -1562,7 +1553,14 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
{
struct rnbd_clt_session *sess;
struct rnbd_clt_dev *dev;
- int ret;
+ int ret, errno;
+ struct rnbd_msg_open_rsp *rsp;
+ struct rnbd_msg_open msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
if (exists_devpath(pathname, sessname))
return ERR_PTR(-EEXIST);
@@ -1582,17 +1580,47 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
ret = -EEXIST;
goto put_dev;
}
- ret = send_msg_open(dev, RTRS_PERMIT_WAIT);
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp) {
+ ret = -ENOMEM;
+ goto del_dev;
+ }
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu) {
+ ret = -ENOMEM;
+ kfree(rsp);
+ goto del_dev;
+ }
+ iu->buf = rsp;
+ iu->dev = dev;
+ sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
+ msg.access_mode = dev->access_mode;
+ strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
+
+ WARN_ON(!rnbd_clt_get_dev(dev));
+ ret = send_usr_msg(sess->rtrs, READ, iu,
+ &vec, sizeof(*rsp), iu->sgt.sgl, 1,
+ msg_open_conf, &errno, RTRS_PERMIT_WAIT);
+ if (ret) {
+ rnbd_clt_put_dev(dev);
+ rnbd_put_iu(sess, iu);
+ } else {
+ ret = errno;
+ }
if (ret) {
rnbd_clt_err(dev,
"map_device: failed, can't open remote device, err: %d\n",
ret);
- goto del_dev;
+ goto put_iu;
}
mutex_lock(&dev->lock);
pr_debug("Opened remote device: session=%s, path='%s'\n",
sess->sessname, pathname);
- ret = rnbd_client_setup_device(dev);
+ ret = rnbd_client_setup_device(dev, rsp);
if (ret) {
rnbd_clt_err(dev,
"map_device: Failed to configure device, err: %d\n",
@@ -1602,21 +1630,30 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
}
rnbd_clt_info(dev,
- "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
- dev->gd->disk_name, dev->nsectors,
- dev->logical_block_size, dev->physical_block_size,
- dev->max_discard_sectors,
- dev->discard_granularity, dev->discard_alignment,
- dev->secure_discard, dev->max_segments,
- dev->max_hw_sectors, dev->wc, dev->fua);
+ "map_device: Device mapped as %s (nsectors: %llu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
+ dev->gd->disk_name, le64_to_cpu(rsp->nsectors),
+ le16_to_cpu(rsp->logical_block_size),
+ le16_to_cpu(rsp->physical_block_size),
+ le32_to_cpu(rsp->max_discard_sectors),
+ le32_to_cpu(rsp->discard_granularity),
+ le32_to_cpu(rsp->discard_alignment),
+ le16_to_cpu(rsp->secure_discard),
+ sess->max_segments, sess->max_io_size / SECTOR_SIZE,
+ !!(rsp->cache_policy & RNBD_WRITEBACK),
+ !!(rsp->cache_policy & RNBD_FUA));
mutex_unlock(&dev->lock);
+ kfree(rsp);
+ rnbd_put_iu(sess, iu);
rnbd_clt_put_sess(sess);
return dev;
send_close:
send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT);
+put_iu:
+ kfree(rsp);
+ rnbd_put_iu(sess, iu);
del_dev:
delete_dev(dev);
put_dev:
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index 2e2e8c4a85c1..a48e040abe63 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -106,6 +106,7 @@ struct rnbd_queue {
};
struct rnbd_clt_dev {
+ struct kobject kobj;
struct rnbd_clt_session *sess;
struct request_queue *queue;
struct rnbd_queue *hw_queues;
@@ -114,27 +115,14 @@ struct rnbd_clt_dev {
u32 clt_device_id;
struct mutex lock;
enum rnbd_clt_dev_state dev_state;
+ refcount_t refcount;
char *pathname;
enum rnbd_access_mode access_mode;
u32 nr_poll_queues;
- bool read_only;
- bool wc;
- bool fua;
- u32 max_hw_sectors;
- u32 max_discard_sectors;
- u32 discard_granularity;
- u32 discard_alignment;
- u16 secure_discard;
- u16 physical_block_size;
- u16 logical_block_size;
- u16 max_segments;
- size_t nsectors;
u64 size; /* device size in bytes */
struct list_head list;
struct gendisk *gd;
- struct kobject kobj;
char *blk_symlink_name;
- refcount_t refcount;
struct work_struct unmap_on_rmmod_work;
};
@@ -150,7 +138,7 @@ int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
const struct attribute *sysfs_self);
int rnbd_clt_remap_device(struct rnbd_clt_dev *dev);
-int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize);
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, sector_t newsize);
/* rnbd-clt-sysfs.c */
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 0713014bf423..5e08da277ddf 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -224,7 +224,6 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
wait_for_completion(&dc); /* wait for inflights to drop to zero */
rnbd_dev_close(sess_dev->rnbd_dev);
- list_del(&sess_dev->sess_list);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (sess_dev->open_flags & FMODE_WRITE)
@@ -239,14 +238,14 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
static void destroy_sess(struct rnbd_srv_session *srv_sess)
{
- struct rnbd_srv_sess_dev *sess_dev, *tmp;
+ struct rnbd_srv_sess_dev *sess_dev;
+ unsigned long index;
- if (list_empty(&srv_sess->sess_dev_list))
+ if (xa_empty(&srv_sess->index_idr))
goto out;
mutex_lock(&srv_sess->lock);
- list_for_each_entry_safe(sess_dev, tmp, &srv_sess->sess_dev_list,
- sess_list)
+ xa_for_each(&srv_sess->index_idr, index, sess_dev)
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
mutex_unlock(&srv_sess->lock);
@@ -281,7 +280,6 @@ static int create_sess(struct rtrs_srv_sess *rtrs)
srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
- INIT_LIST_HEAD(&srv_sess->sess_dev_list);
mutex_init(&srv_sess->lock);
mutex_lock(&sess_lock);
list_add(&srv_sess->list, &sess_list);
@@ -323,10 +321,11 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev,
{
struct rnbd_srv_session *sess = sess_dev->sess;
- sess_dev->keep_id = true;
/* It is already started to close by client's close message. */
if (!mutex_trylock(&sess->lock))
return;
+
+ sess_dev->keep_id = true;
/* first remove sysfs itself to avoid deadlock */
sysfs_remove_file_self(&sess_dev->kobj, &attr->attr);
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
@@ -666,11 +665,12 @@ static struct rnbd_srv_sess_dev *
find_srv_sess_dev(struct rnbd_srv_session *srv_sess, const char *dev_name)
{
struct rnbd_srv_sess_dev *sess_dev;
+ unsigned long index;
- if (list_empty(&srv_sess->sess_dev_list))
+ if (xa_empty(&srv_sess->index_idr))
return NULL;
- list_for_each_entry(sess_dev, &srv_sess->sess_dev_list, sess_list)
+ xa_for_each(&srv_sess->index_idr, index, sess_dev)
if (!strcmp(sess_dev->pathname, dev_name))
return sess_dev;
@@ -780,8 +780,6 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
list_add(&srv_sess_dev->dev_list, &srv_dev->sess_dev_list);
mutex_unlock(&srv_dev->lock);
- list_add(&srv_sess_dev->sess_list, &srv_sess->sess_dev_list);
-
rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->id);
kfree(full_path);
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 6926f9069dc4..081bceaf4ae9 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -25,8 +25,6 @@ struct rnbd_srv_session {
int queue_depth;
struct xarray index_idr;
- /* List of struct rnbd_srv_sess_dev */
- struct list_head sess_dev_list;
struct mutex lock;
u8 ver;
};
@@ -48,8 +46,6 @@ struct rnbd_srv_dev {
struct rnbd_srv_sess_dev {
/* Entry inside rnbd_srv_dev struct */
struct list_head dev_list;
- /* Entry inside rnbd_srv_session struct */
- struct list_head sess_list;
struct rnbd_dev *rnbd_dev;
struct rnbd_srv_session *sess;
struct rnbd_srv_dev *dev;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
deleted file mode 100644
index 0e1a484cab0b..000000000000
--- a/drivers/block/sx8.c
+++ /dev/null
@@ -1,1582 +0,0 @@
-/*
- * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
- *
- * Copyright 2004-2005 Red Hat, Inc.
- *
- * Author/maintainer: Jeff Garzik <jgarzik@pobox.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/blk-mq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/compiler.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/ktime.h>
-#include <linux/hdreg.h>
-#include <linux/dma-mapping.h>
-#include <linux/completion.h>
-#include <linux/scatterlist.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-
-#if 0
-#define CARM_DEBUG
-#define CARM_VERBOSE_DEBUG
-#else
-#undef CARM_DEBUG
-#undef CARM_VERBOSE_DEBUG
-#endif
-#undef CARM_NDEBUG
-
-#define DRV_NAME "sx8"
-#define DRV_VERSION "1.0"
-#define PFX DRV_NAME ": "
-
-MODULE_AUTHOR("Jeff Garzik");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Promise SATA SX8 block driver");
-MODULE_VERSION(DRV_VERSION);
-
-/*
- * SX8 hardware has a single message queue for all ATA ports.
- * When this driver was written, the hardware (firmware?) would
- * corrupt data eventually, if more than one request was outstanding.
- * As one can imagine, having 8 ports bottlenecking on a single
- * command hurts performance.
- *
- * Based on user reports, later versions of the hardware (firmware?)
- * seem to be able to survive with more than one command queued.
- *
- * Therefore, we default to the safe option -- 1 command -- but
- * allow the user to increase this.
- *
- * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ),
- * but problems seem to occur when you exceed ~30, even on newer hardware.
- */
-static int max_queue = 1;
-module_param(max_queue, int, 0444);
-MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)");
-
-
-#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN)
-
-/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
-#define TAG_ENCODE(tag) (((tag) << 16) | 0xf)
-#define TAG_DECODE(tag) (((tag) >> 16) & 0x1f)
-#define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32))
-
-/* note: prints function name for you */
-#ifdef CARM_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#ifdef CARM_VERBOSE_DEBUG
-#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define VPRINTK(fmt, args...)
-#endif /* CARM_VERBOSE_DEBUG */
-#else
-#define DPRINTK(fmt, args...)
-#define VPRINTK(fmt, args...)
-#endif /* CARM_DEBUG */
-
-#ifdef CARM_NDEBUG
-#define assert(expr)
-#else
-#define assert(expr) \
- if(unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- }
-#endif
-
-/* defines only for the constants which don't work well as enums */
-struct carm_host;
-
-enum {
- /* adapter-wide limits */
- CARM_MAX_PORTS = 8,
- CARM_SHM_SIZE = (4096 << 7),
- CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS,
- CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1,
-
- /* command message queue limits */
- CARM_MAX_REQ = 64, /* max command msgs per host */
- CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */
-
- /* S/G limits, host-wide and per-request */
- CARM_MAX_REQ_SG = 32, /* max s/g entries per request */
- CARM_MAX_HOST_SG = 600, /* max s/g entries per host */
- CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */
-
- /* hardware registers */
- CARM_IHQP = 0x1c,
- CARM_INT_STAT = 0x10, /* interrupt status */
- CARM_INT_MASK = 0x14, /* interrupt mask */
- CARM_HMUC = 0x18, /* host message unit control */
- RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */
- RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */
- RBUF_BYTE_SZ = 0x28,
- CARM_RESP_IDX = 0x2c,
- CARM_CMS0 = 0x30, /* command message size reg 0 */
- CARM_LMUC = 0x48,
- CARM_HMPHA = 0x6c,
- CARM_INITC = 0xb5,
-
- /* bits in CARM_INT_{STAT,MASK} */
- INT_RESERVED = 0xfffffff0,
- INT_WATCHDOG = (1 << 3), /* watchdog timer */
- INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */
- INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */
- INT_RESPONSE = (1 << 0), /* response msg available */
- INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW,
- INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW |
- INT_RESPONSE,
-
- /* command messages, and related register bits */
- CARM_HAVE_RESP = 0x01,
- CARM_MSG_READ = 1,
- CARM_MSG_WRITE = 2,
- CARM_MSG_VERIFY = 3,
- CARM_MSG_GET_CAPACITY = 4,
- CARM_MSG_FLUSH = 5,
- CARM_MSG_IOCTL = 6,
- CARM_MSG_ARRAY = 8,
- CARM_MSG_MISC = 9,
- CARM_CME = (1 << 2),
- CARM_RME = (1 << 1),
- CARM_WZBC = (1 << 0),
- CARM_RMI = (1 << 0),
- CARM_Q_FULL = (1 << 3),
- CARM_MSG_SIZE = 288,
- CARM_Q_LEN = 48,
-
- /* CARM_MSG_IOCTL messages */
- CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */
- CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */
- CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */
-
- IOC_SCAN_CHAN_NODEV = 0x1f,
- IOC_SCAN_CHAN_OFFSET = 0x40,
-
- /* CARM_MSG_ARRAY messages */
- CARM_ARRAY_INFO = 0,
-
- ARRAY_NO_EXIST = (1 << 31),
-
- /* response messages */
- RMSG_SZ = 8, /* sizeof(struct carm_response) */
- RMSG_Q_LEN = 48, /* resp. msg list length */
- RMSG_OK = 1, /* bit indicating msg was successful */
- /* length of entire resp. msg buffer */
- RBUF_LEN = RMSG_SZ * RMSG_Q_LEN,
-
- PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */
-
- /* CARM_MSG_MISC messages */
- MISC_GET_FW_VER = 2,
- MISC_ALLOC_MEM = 3,
- MISC_SET_TIME = 5,
-
- /* MISC_GET_FW_VER feature bits */
- FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */
- FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */
- FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */
-
- /* carm_host flags */
- FL_NON_RAID = FW_VER_NON_RAID,
- FL_4PORT = FW_VER_4PORT,
- FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT),
- FL_DYN_MAJOR = (1 << 17),
-};
-
-enum {
- CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */
-};
-
-enum scatter_gather_types {
- SGT_32BIT = 0,
- SGT_64BIT = 1,
-};
-
-enum host_states {
- HST_INVALID, /* invalid state; never used */
- HST_ALLOC_BUF, /* setting up master SHM area */
- HST_ERROR, /* we never leave here */
- HST_PORT_SCAN, /* start dev scan */
- HST_DEV_SCAN_START, /* start per-device probe */
- HST_DEV_SCAN, /* continue per-device probe */
- HST_DEV_ACTIVATE, /* activate devices we found */
- HST_PROBE_FINISHED, /* probe is complete */
- HST_PROBE_START, /* initiate probe */
- HST_SYNC_TIME, /* tell firmware what time it is */
- HST_GET_FW_VER, /* get firmware version, adapter port cnt */
-};
-
-#ifdef CARM_DEBUG
-static const char *state_name[] = {
- "HST_INVALID",
- "HST_ALLOC_BUF",
- "HST_ERROR",
- "HST_PORT_SCAN",
- "HST_DEV_SCAN_START",
- "HST_DEV_SCAN",
- "HST_DEV_ACTIVATE",
- "HST_PROBE_FINISHED",
- "HST_PROBE_START",
- "HST_SYNC_TIME",
- "HST_GET_FW_VER",
-};
-#endif
-
-struct carm_port {
- unsigned int port_no;
- struct gendisk *disk;
- struct carm_host *host;
-
- /* attached device characteristics */
- u64 capacity;
- char name[41];
- u16 dev_geom_head;
- u16 dev_geom_sect;
- u16 dev_geom_cyl;
-};
-
-struct carm_request {
- int n_elem;
- unsigned int msg_type;
- unsigned int msg_subtype;
- unsigned int msg_bucket;
- struct scatterlist sg[CARM_MAX_REQ_SG];
-};
-
-struct carm_host {
- unsigned long flags;
- void __iomem *mmio;
- void *shm;
- dma_addr_t shm_dma;
-
- int major;
- int id;
- char name[32];
-
- spinlock_t lock;
- struct pci_dev *pdev;
- unsigned int state;
- u32 fw_ver;
-
- struct blk_mq_tag_set tag_set;
- struct request_queue *oob_q;
- unsigned int n_oob;
-
- unsigned int hw_sg_used;
-
- unsigned int resp_idx;
-
- unsigned int wait_q_prod;
- unsigned int wait_q_cons;
- struct request_queue *wait_q[CARM_MAX_WAIT_Q];
-
- void *msg_base;
- dma_addr_t msg_dma;
-
- int cur_scan_dev;
- unsigned long dev_active;
- unsigned long dev_present;
- struct carm_port port[CARM_MAX_PORTS];
-
- struct work_struct fsm_task;
-
- int probe_err;
- struct completion probe_comp;
-};
-
-struct carm_response {
- __le32 ret_handle;
- __le32 status;
-} __attribute__((packed));
-
-struct carm_msg_sg {
- __le32 start;
- __le32 len;
-} __attribute__((packed));
-
-struct carm_msg_rw {
- u8 type;
- u8 id;
- u8 sg_count;
- u8 sg_type;
- __le32 handle;
- __le32 lba;
- __le16 lba_count;
- __le16 lba_high;
- struct carm_msg_sg sg[32];
-} __attribute__((packed));
-
-struct carm_msg_allocbuf {
- u8 type;
- u8 subtype;
- u8 n_sg;
- u8 sg_type;
- __le32 handle;
- __le32 addr;
- __le32 len;
- __le32 evt_pool;
- __le32 n_evt;
- __le32 rbuf_pool;
- __le32 n_rbuf;
- __le32 msg_pool;
- __le32 n_msg;
- struct carm_msg_sg sg[8];
-} __attribute__((packed));
-
-struct carm_msg_ioctl {
- u8 type;
- u8 subtype;
- u8 array_id;
- u8 reserved1;
- __le32 handle;
- __le32 data_addr;
- u32 reserved2;
-} __attribute__((packed));
-
-struct carm_msg_sync_time {
- u8 type;
- u8 subtype;
- u16 reserved1;
- __le32 handle;
- u32 reserved2;
- __le32 timestamp;
-} __attribute__((packed));
-
-struct carm_msg_get_fw_ver {
- u8 type;
- u8 subtype;
- u16 reserved1;
- __le32 handle;
- __le32 data_addr;
- u32 reserved2;
-} __attribute__((packed));
-
-struct carm_fw_ver {
- __le32 version;
- u8 features;
- u8 reserved1;
- u16 reserved2;
-} __attribute__((packed));
-
-struct carm_array_info {
- __le32 size;
-
- __le16 size_hi;
- __le16 stripe_size;
-
- __le32 mode;
-
- __le16 stripe_blk_sz;
- __le16 reserved1;
-
- __le16 cyl;
- __le16 head;
-
- __le16 sect;
- u8 array_id;
- u8 reserved2;
-
- char name[40];
-
- __le32 array_status;
-
- /* device list continues beyond this point? */
-} __attribute__((packed));
-
-static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static void carm_remove_one (struct pci_dev *pdev);
-static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static const struct pci_device_id carm_pci_tbl[] = {
- { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { } /* terminate list */
-};
-MODULE_DEVICE_TABLE(pci, carm_pci_tbl);
-
-static struct pci_driver carm_driver = {
- .name = DRV_NAME,
- .id_table = carm_pci_tbl,
- .probe = carm_init_one,
- .remove = carm_remove_one,
-};
-
-static const struct block_device_operations carm_bd_ops = {
- .owner = THIS_MODULE,
- .getgeo = carm_bdev_getgeo,
-};
-
-static unsigned int carm_host_id;
-static unsigned long carm_major_alloc;
-
-
-
-static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct carm_port *port = bdev->bd_disk->private_data;
-
- geo->heads = (u8) port->dev_geom_head;
- geo->sectors = (u8) port->dev_geom_sect;
- geo->cylinders = port->dev_geom_cyl;
- return 0;
-}
-
-static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
-
-static inline int carm_lookup_bucket(u32 msg_size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
- if (msg_size <= msg_sizes[i])
- return i;
-
- return -ENOENT;
-}
-
-static void carm_init_buckets(void __iomem *mmio)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
- writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i));
-}
-
-static inline void *carm_ref_msg(struct carm_host *host,
- unsigned int msg_idx)
-{
- return host->msg_base + (msg_idx * CARM_MSG_SIZE);
-}
-
-static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
- unsigned int msg_idx)
-{
- return host->msg_dma + (msg_idx * CARM_MSG_SIZE);
-}
-
-static int carm_send_msg(struct carm_host *host,
- struct carm_request *crq, unsigned tag)
-{
- void __iomem *mmio = host->mmio;
- u32 msg = (u32) carm_ref_msg_dma(host, tag);
- u32 cm_bucket = crq->msg_bucket;
- u32 tmp;
- int rc = 0;
-
- VPRINTK("ENTER\n");
-
- tmp = readl(mmio + CARM_HMUC);
- if (tmp & CARM_Q_FULL) {
-#if 0
- tmp = readl(mmio + CARM_INT_MASK);
- tmp |= INT_Q_AVAILABLE;
- writel(tmp, mmio + CARM_INT_MASK);
- readl(mmio + CARM_INT_MASK); /* flush */
-#endif
- DPRINTK("host msg queue full\n");
- rc = -EBUSY;
- } else {
- writel(msg | (cm_bucket << 1), mmio + CARM_IHQP);
- readl(mmio + CARM_IHQP); /* flush */
- }
-
- return rc;
-}
-
-static int carm_array_info (struct carm_host *host, unsigned int array_idx)
-{
- struct carm_msg_ioctl *ioc;
- u32 msg_data;
- dma_addr_t msg_dma;
- struct carm_request *crq;
- struct request *rq;
- int rc;
-
- rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(rq)) {
- rc = -ENOMEM;
- goto err_out;
- }
- crq = blk_mq_rq_to_pdu(rq);
-
- ioc = carm_ref_msg(host, rq->tag);
- msg_dma = carm_ref_msg_dma(host, rq->tag);
- msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
-
- crq->msg_type = CARM_MSG_ARRAY;
- crq->msg_subtype = CARM_ARRAY_INFO;
- rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) +
- sizeof(struct carm_array_info));
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_ARRAY;
- ioc->subtype = CARM_ARRAY_INFO;
- ioc->array_id = (u8) array_idx;
- ioc->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- spin_lock_irq(&host->lock);
- assert(host->state == HST_DEV_SCAN_START ||
- host->state == HST_DEV_SCAN);
- spin_unlock_irq(&host->lock);
-
- DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(rq, true);
-
- return 0;
-
-err_out:
- spin_lock_irq(&host->lock);
- host->state = HST_ERROR;
- spin_unlock_irq(&host->lock);
- return rc;
-}
-
-typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
-
-static int carm_send_special (struct carm_host *host, carm_sspc_t func)
-{
- struct request *rq;
- struct carm_request *crq;
- struct carm_msg_ioctl *ioc;
- void *mem;
- unsigned int msg_size;
- int rc;
-
- rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(rq))
- return -ENOMEM;
- crq = blk_mq_rq_to_pdu(rq);
-
- mem = carm_ref_msg(host, rq->tag);
-
- msg_size = func(host, rq->tag, mem);
-
- ioc = mem;
- crq->msg_type = ioc->type;
- crq->msg_subtype = ioc->subtype;
- rc = carm_lookup_bucket(msg_size);
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-
- DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(rq, true);
-
- return 0;
-}
-
-static unsigned int carm_fill_sync_time(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_sync_time *st = mem;
-
- time64_t tv = ktime_get_real_seconds();
-
- memset(st, 0, sizeof(*st));
- st->type = CARM_MSG_MISC;
- st->subtype = MISC_SET_TIME;
- st->handle = cpu_to_le32(TAG_ENCODE(idx));
- st->timestamp = cpu_to_le32(tv);
-
- return sizeof(struct carm_msg_sync_time);
-}
-
-static unsigned int carm_fill_alloc_buf(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_allocbuf *ab = mem;
-
- memset(ab, 0, sizeof(*ab));
- ab->type = CARM_MSG_MISC;
- ab->subtype = MISC_ALLOC_MEM;
- ab->handle = cpu_to_le32(TAG_ENCODE(idx));
- ab->n_sg = 1;
- ab->sg_type = SGT_32BIT;
- ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
- ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1);
- ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024));
- ab->n_evt = cpu_to_le32(1024);
- ab->rbuf_pool = cpu_to_le32(host->shm_dma);
- ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN);
- ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN);
- ab->n_msg = cpu_to_le32(CARM_Q_LEN);
- ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
- ab->sg[0].len = cpu_to_le32(65536);
-
- return sizeof(struct carm_msg_allocbuf);
-}
-
-static unsigned int carm_fill_scan_channels(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_ioctl *ioc = mem;
- u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) +
- IOC_SCAN_CHAN_OFFSET);
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_IOCTL;
- ioc->subtype = CARM_IOC_SCAN_CHAN;
- ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- /* fill output data area with "no device" default values */
- mem += IOC_SCAN_CHAN_OFFSET;
- memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS);
-
- return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS;
-}
-
-static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
- unsigned int idx, void *mem)
-{
- struct carm_msg_get_fw_ver *ioc = mem;
- u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc));
-
- memset(ioc, 0, sizeof(*ioc));
- ioc->type = CARM_MSG_MISC;
- ioc->subtype = MISC_GET_FW_VER;
- ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
- ioc->data_addr = cpu_to_le32(msg_data);
-
- return sizeof(struct carm_msg_get_fw_ver) +
- sizeof(struct carm_fw_ver);
-}
-
-static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
-{
- unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
-
- blk_mq_stop_hw_queues(q);
- VPRINTK("STOPPED QUEUE %p\n", q);
-
- host->wait_q[idx] = q;
- host->wait_q_prod++;
- BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
-}
-
-static inline struct request_queue *carm_pop_q(struct carm_host *host)
-{
- unsigned int idx;
-
- if (host->wait_q_prod == host->wait_q_cons)
- return NULL;
-
- idx = host->wait_q_cons % CARM_MAX_WAIT_Q;
- host->wait_q_cons++;
-
- return host->wait_q[idx];
-}
-
-static inline void carm_round_robin(struct carm_host *host)
-{
- struct request_queue *q = carm_pop_q(host);
- if (q) {
- blk_mq_start_hw_queues(q);
- VPRINTK("STARTED QUEUE %p\n", q);
- }
-}
-
-static inline enum dma_data_direction carm_rq_dir(struct request *rq)
-{
- return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-}
-
-static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct request_queue *q = hctx->queue;
- struct request *rq = bd->rq;
- struct carm_port *port = q->queuedata;
- struct carm_host *host = port->host;
- struct carm_request *crq = blk_mq_rq_to_pdu(rq);
- struct carm_msg_rw *msg;
- struct scatterlist *sg;
- int i, n_elem = 0, rc;
- unsigned int msg_size;
- u32 tmp;
-
- crq->n_elem = 0;
- sg_init_table(crq->sg, CARM_MAX_REQ_SG);
-
- blk_mq_start_request(rq);
-
- spin_lock_irq(&host->lock);
- if (req_op(rq) == REQ_OP_DRV_OUT)
- goto send_msg;
-
- /* get scatterlist from block layer */
- sg = &crq->sg[0];
- n_elem = blk_rq_map_sg(q, rq, sg);
- if (n_elem <= 0)
- goto out_ioerr;
-
- /* map scatterlist to PCI bus addresses */
- n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq));
- if (n_elem <= 0)
- goto out_ioerr;
-
- /* obey global hardware limit on S/G entries */
- if (host->hw_sg_used >= CARM_MAX_HOST_SG - n_elem)
- goto out_resource;
-
- crq->n_elem = n_elem;
- host->hw_sg_used += n_elem;
-
- /*
- * build read/write message
- */
-
- VPRINTK("build msg\n");
- msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag);
-
- if (rq_data_dir(rq) == WRITE) {
- msg->type = CARM_MSG_WRITE;
- crq->msg_type = CARM_MSG_WRITE;
- } else {
- msg->type = CARM_MSG_READ;
- crq->msg_type = CARM_MSG_READ;
- }
-
- msg->id = port->port_no;
- msg->sg_count = n_elem;
- msg->sg_type = SGT_32BIT;
- msg->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
- msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
- tmp = (blk_rq_pos(rq) >> 16) >> 16;
- msg->lba_high = cpu_to_le16( (u16) tmp );
- msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
-
- msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
- for (i = 0; i < n_elem; i++) {
- struct carm_msg_sg *carm_sg = &msg->sg[i];
- carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i]));
- carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i]));
- msg_size += sizeof(struct carm_msg_sg);
- }
-
- rc = carm_lookup_bucket(msg_size);
- BUG_ON(rc < 0);
- crq->msg_bucket = (u32) rc;
-send_msg:
- /*
- * queue read/write message to hardware
- */
- VPRINTK("send msg, tag == %u\n", rq->tag);
- rc = carm_send_msg(host, crq, rq->tag);
- if (rc) {
- host->hw_sg_used -= n_elem;
- goto out_resource;
- }
-
- spin_unlock_irq(&host->lock);
- return BLK_STS_OK;
-out_resource:
- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq));
- carm_push_q(host, q);
- spin_unlock_irq(&host->lock);
- return BLK_STS_DEV_RESOURCE;
-out_ioerr:
- carm_round_robin(host);
- spin_unlock_irq(&host->lock);
- return BLK_STS_IOERR;
-}
-
-static void carm_handle_array_info(struct carm_host *host,
- struct carm_request *crq, u8 *mem,
- blk_status_t error)
-{
- struct carm_port *port;
- u8 *msg_data = mem + sizeof(struct carm_array_info);
- struct carm_array_info *desc = (struct carm_array_info *) msg_data;
- u64 lo, hi;
- int cur_port;
- size_t slen;
-
- DPRINTK("ENTER\n");
-
- if (error)
- goto out;
- if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
- goto out;
-
- cur_port = host->cur_scan_dev;
-
- /* should never occur */
- if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) {
- printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n",
- cur_port, (int) desc->array_id);
- goto out;
- }
-
- port = &host->port[cur_port];
-
- lo = (u64) le32_to_cpu(desc->size);
- hi = (u64) le16_to_cpu(desc->size_hi);
-
- port->capacity = lo | (hi << 32);
- port->dev_geom_head = le16_to_cpu(desc->head);
- port->dev_geom_sect = le16_to_cpu(desc->sect);
- port->dev_geom_cyl = le16_to_cpu(desc->cyl);
-
- host->dev_active |= (1 << cur_port);
-
- strncpy(port->name, desc->name, sizeof(port->name));
- port->name[sizeof(port->name) - 1] = 0;
- slen = strlen(port->name);
- while (slen && (port->name[slen - 1] == ' ')) {
- port->name[slen - 1] = 0;
- slen--;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n",
- pci_name(host->pdev), port->port_no,
- (unsigned long long) port->capacity);
- printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n",
- pci_name(host->pdev), port->port_no, port->name);
-
-out:
- assert(host->state == HST_DEV_SCAN);
- schedule_work(&host->fsm_task);
-}
-
-static void carm_handle_scan_chan(struct carm_host *host,
- struct carm_request *crq, u8 *mem,
- blk_status_t error)
-{
- u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
- unsigned int i, dev_count = 0;
- int new_state = HST_DEV_SCAN_START;
-
- DPRINTK("ENTER\n");
-
- if (error) {
- new_state = HST_ERROR;
- goto out;
- }
-
- /* TODO: scan and support non-disk devices */
- for (i = 0; i < 8; i++)
- if (msg_data[i] == 0) { /* direct-access device (disk) */
- host->dev_present |= (1 << i);
- dev_count++;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n",
- pci_name(host->pdev), dev_count);
-
-out:
- assert(host->state == HST_PORT_SCAN);
- host->state = new_state;
- schedule_work(&host->fsm_task);
-}
-
-static void carm_handle_generic(struct carm_host *host,
- struct carm_request *crq, blk_status_t error,
- int cur_state, int next_state)
-{
- DPRINTK("ENTER\n");
-
- assert(host->state == cur_state);
- if (error)
- host->state = HST_ERROR;
- else
- host->state = next_state;
- schedule_work(&host->fsm_task);
-}
-
-static inline void carm_handle_resp(struct carm_host *host,
- __le32 ret_handle_le, u32 status)
-{
- u32 handle = le32_to_cpu(ret_handle_le);
- unsigned int msg_idx;
- struct request *rq;
- struct carm_request *crq;
- blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
- u8 *mem;
-
- VPRINTK("ENTER, handle == 0x%x\n", handle);
-
- if (unlikely(!TAG_VALID(handle))) {
- printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n",
- pci_name(host->pdev), handle);
- return;
- }
-
- msg_idx = TAG_DECODE(handle);
- VPRINTK("tag == %u\n", msg_idx);
-
- rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx);
- crq = blk_mq_rq_to_pdu(rq);
-
- /* fast path */
- if (likely(crq->msg_type == CARM_MSG_READ ||
- crq->msg_type == CARM_MSG_WRITE)) {
- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem,
- carm_rq_dir(rq));
- goto done;
- }
-
- mem = carm_ref_msg(host, msg_idx);
-
- switch (crq->msg_type) {
- case CARM_MSG_IOCTL: {
- switch (crq->msg_subtype) {
- case CARM_IOC_SCAN_CHAN:
- carm_handle_scan_chan(host, crq, mem, error);
- goto done;
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- case CARM_MSG_MISC: {
- switch (crq->msg_subtype) {
- case MISC_ALLOC_MEM:
- carm_handle_generic(host, crq, error,
- HST_ALLOC_BUF, HST_SYNC_TIME);
- goto done;
- case MISC_SET_TIME:
- carm_handle_generic(host, crq, error,
- HST_SYNC_TIME, HST_GET_FW_VER);
- goto done;
- case MISC_GET_FW_VER: {
- struct carm_fw_ver *ver = (struct carm_fw_ver *)
- (mem + sizeof(struct carm_msg_get_fw_ver));
- if (!error) {
- host->fw_ver = le32_to_cpu(ver->version);
- host->flags |= (ver->features & FL_FW_VER_MASK);
- }
- carm_handle_generic(host, crq, error,
- HST_GET_FW_VER, HST_PORT_SCAN);
- goto done;
- }
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- case CARM_MSG_ARRAY: {
- switch (crq->msg_subtype) {
- case CARM_ARRAY_INFO:
- carm_handle_array_info(host, crq, mem, error);
- break;
- default:
- /* unknown / invalid response */
- goto err_out;
- }
- break;
- }
-
- default:
- /* unknown / invalid response */
- goto err_out;
- }
-
- return;
-
-err_out:
- printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
- pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
- error = BLK_STS_IOERR;
-done:
- host->hw_sg_used -= crq->n_elem;
- blk_mq_end_request(blk_mq_rq_from_pdu(crq), error);
-
- if (host->hw_sg_used <= CARM_SG_LOW_WATER)
- carm_round_robin(host);
-}
-
-static inline void carm_handle_responses(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- struct carm_response *resp = (struct carm_response *) host->shm;
- unsigned int work = 0;
- unsigned int idx = host->resp_idx % RMSG_Q_LEN;
-
- while (1) {
- u32 status = le32_to_cpu(resp[idx].status);
-
- if (status == 0xffffffff) {
- VPRINTK("ending response on index %u\n", idx);
- writel(idx << 3, mmio + CARM_RESP_IDX);
- break;
- }
-
- /* response to a message we sent */
- else if ((status & (1 << 31)) == 0) {
- VPRINTK("handling msg response on index %u\n", idx);
- carm_handle_resp(host, resp[idx].ret_handle, status);
- resp[idx].status = cpu_to_le32(0xffffffff);
- }
-
- /* asynchronous events the hardware throws our way */
- else if ((status & 0xff000000) == (1 << 31)) {
- u8 *evt_type_ptr = (u8 *) &resp[idx];
- u8 evt_type = *evt_type_ptr;
- printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
- pci_name(host->pdev), (int) evt_type);
- resp[idx].status = cpu_to_le32(0xffffffff);
- }
-
- idx = NEXT_RESP(idx);
- work++;
- }
-
- VPRINTK("EXIT, work==%u\n", work);
- host->resp_idx += work;
-}
-
-static irqreturn_t carm_interrupt(int irq, void *__host)
-{
- struct carm_host *host = __host;
- void __iomem *mmio;
- u32 mask;
- int handled = 0;
- unsigned long flags;
-
- if (!host) {
- VPRINTK("no host\n");
- return IRQ_NONE;
- }
-
- spin_lock_irqsave(&host->lock, flags);
-
- mmio = host->mmio;
-
- /* reading should also clear interrupts */
- mask = readl(mmio + CARM_INT_STAT);
-
- if (mask == 0 || mask == 0xffffffff) {
- VPRINTK("no work, mask == 0x%x\n", mask);
- goto out;
- }
-
- if (mask & INT_ACK_MASK)
- writel(mask, mmio + CARM_INT_STAT);
-
- if (unlikely(host->state == HST_INVALID)) {
- VPRINTK("not initialized yet, mask = 0x%x\n", mask);
- goto out;
- }
-
- if (mask & CARM_HAVE_RESP) {
- handled = 1;
- carm_handle_responses(host);
- }
-
-out:
- spin_unlock_irqrestore(&host->lock, flags);
- VPRINTK("EXIT\n");
- return IRQ_RETVAL(handled);
-}
-
-static void carm_fsm_task (struct work_struct *work)
-{
- struct carm_host *host =
- container_of(work, struct carm_host, fsm_task);
- unsigned long flags;
- unsigned int state;
- int rc, i, next_dev;
- int reschedule = 0;
- int new_state = HST_INVALID;
-
- spin_lock_irqsave(&host->lock, flags);
- state = host->state;
- spin_unlock_irqrestore(&host->lock, flags);
-
- DPRINTK("ENTER, state == %s\n", state_name[state]);
-
- switch (state) {
- case HST_PROBE_START:
- new_state = HST_ALLOC_BUF;
- reschedule = 1;
- break;
-
- case HST_ALLOC_BUF:
- rc = carm_send_special(host, carm_fill_alloc_buf);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_SYNC_TIME:
- rc = carm_send_special(host, carm_fill_sync_time);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_GET_FW_VER:
- rc = carm_send_special(host, carm_fill_get_fw_ver);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_PORT_SCAN:
- rc = carm_send_special(host, carm_fill_scan_channels);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- break;
-
- case HST_DEV_SCAN_START:
- host->cur_scan_dev = -1;
- new_state = HST_DEV_SCAN;
- reschedule = 1;
- break;
-
- case HST_DEV_SCAN:
- next_dev = -1;
- for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++)
- if (host->dev_present & (1 << i)) {
- next_dev = i;
- break;
- }
-
- if (next_dev >= 0) {
- host->cur_scan_dev = next_dev;
- rc = carm_array_info(host, next_dev);
- if (rc) {
- new_state = HST_ERROR;
- reschedule = 1;
- }
- } else {
- new_state = HST_DEV_ACTIVATE;
- reschedule = 1;
- }
- break;
-
- case HST_DEV_ACTIVATE: {
- int activated = 0;
- for (i = 0; i < CARM_MAX_PORTS; i++)
- if (host->dev_active & (1 << i)) {
- struct carm_port *port = &host->port[i];
- struct gendisk *disk = port->disk;
-
- set_capacity(disk, port->capacity);
- host->probe_err = add_disk(disk);
- if (!host->probe_err)
- activated++;
- else
- break;
- }
-
- printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
- pci_name(host->pdev), activated);
-
- new_state = HST_PROBE_FINISHED;
- reschedule = 1;
- break;
- }
- case HST_PROBE_FINISHED:
- complete(&host->probe_comp);
- break;
- case HST_ERROR:
- /* FIXME: TODO */
- break;
-
- default:
- /* should never occur */
- printk(KERN_ERR PFX "BUG: unknown state %d\n", state);
- assert(0);
- break;
- }
-
- if (new_state != HST_INVALID) {
- spin_lock_irqsave(&host->lock, flags);
- host->state = new_state;
- spin_unlock_irqrestore(&host->lock, flags);
- }
- if (reschedule)
- schedule_work(&host->fsm_task);
-}
-
-static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit)
-{
- unsigned int i;
-
- for (i = 0; i < 50000; i++) {
- u32 tmp = readl(mmio + CARM_LMUC);
- udelay(100);
-
- if (test_bit) {
- if ((tmp & bits) == bits)
- return 0;
- } else {
- if ((tmp & bits) == 0)
- return 0;
- }
-
- cond_resched();
- }
-
- printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n",
- bits, test_bit ? "yes" : "no");
- return -EBUSY;
-}
-
-static void carm_init_responses(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- unsigned int i;
- struct carm_response *resp = (struct carm_response *) host->shm;
-
- for (i = 0; i < RMSG_Q_LEN; i++)
- resp[i].status = cpu_to_le32(0xffffffff);
-
- writel(0, mmio + CARM_RESP_IDX);
-}
-
-static int carm_init_host(struct carm_host *host)
-{
- void __iomem *mmio = host->mmio;
- u32 tmp;
- u8 tmp8;
- int rc;
-
- DPRINTK("ENTER\n");
-
- writel(0, mmio + CARM_INT_MASK);
-
- tmp8 = readb(mmio + CARM_INITC);
- if (tmp8 & 0x01) {
- tmp8 &= ~0x01;
- writeb(tmp8, mmio + CARM_INITC);
- readb(mmio + CARM_INITC); /* flush */
-
- DPRINTK("snooze...\n");
- msleep(5000);
- }
-
- tmp = readl(mmio + CARM_HMUC);
- if (tmp & CARM_CME) {
- DPRINTK("CME bit present, waiting\n");
- rc = carm_init_wait(mmio, CARM_CME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 1 failed\n");
- return rc;
- }
- }
- if (tmp & CARM_RME) {
- DPRINTK("RME bit present, waiting\n");
- rc = carm_init_wait(mmio, CARM_RME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 2 failed\n");
- return rc;
- }
- }
-
- tmp &= ~(CARM_RME | CARM_CME);
- writel(tmp, mmio + CARM_HMUC);
- readl(mmio + CARM_HMUC); /* flush */
-
- rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 3 failed\n");
- return rc;
- }
-
- carm_init_buckets(mmio);
-
- writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO);
- writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI);
- writel(RBUF_LEN, mmio + RBUF_BYTE_SZ);
-
- tmp = readl(mmio + CARM_HMUC);
- tmp |= (CARM_RME | CARM_CME | CARM_WZBC);
- writel(tmp, mmio + CARM_HMUC);
- readl(mmio + CARM_HMUC); /* flush */
-
- rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1);
- if (rc) {
- DPRINTK("EXIT, carm_init_wait 4 failed\n");
- return rc;
- }
-
- writel(0, mmio + CARM_HMPHA);
- writel(INT_DEF_MASK, mmio + CARM_INT_MASK);
-
- carm_init_responses(host);
-
- /* start initialization, probing state machine */
- spin_lock_irq(&host->lock);
- assert(host->state == HST_INVALID);
- host->state = HST_PROBE_START;
- spin_unlock_irq(&host->lock);
- schedule_work(&host->fsm_task);
-
- DPRINTK("EXIT\n");
- return 0;
-}
-
-static const struct blk_mq_ops carm_mq_ops = {
- .queue_rq = carm_queue_rq,
-};
-
-static int carm_init_disk(struct carm_host *host, unsigned int port_no)
-{
- struct carm_port *port = &host->port[port_no];
- struct gendisk *disk;
-
- port->host = host;
- port->port_no = port_no;
-
- disk = blk_mq_alloc_disk(&host->tag_set, port);
- if (IS_ERR(disk))
- return PTR_ERR(disk);
-
- port->disk = disk;
- sprintf(disk->disk_name, DRV_NAME "/%u",
- (unsigned int)host->id * CARM_MAX_PORTS + port_no);
- disk->major = host->major;
- disk->first_minor = port_no * CARM_MINORS_PER_MAJOR;
- disk->minors = CARM_MINORS_PER_MAJOR;
- disk->fops = &carm_bd_ops;
- disk->private_data = port;
-
- blk_queue_max_segments(disk->queue, CARM_MAX_REQ_SG);
- blk_queue_segment_boundary(disk->queue, CARM_SG_BOUNDARY);
- return 0;
-}
-
-static void carm_free_disk(struct carm_host *host, unsigned int port_no)
-{
- struct carm_port *port = &host->port[port_no];
- struct gendisk *disk = port->disk;
-
- if (!disk)
- return;
-
- if (host->state > HST_DEV_ACTIVATE)
- del_gendisk(disk);
- put_disk(disk);
-}
-
-static int carm_init_shm(struct carm_host *host)
-{
- host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE,
- &host->shm_dma, GFP_KERNEL);
- if (!host->shm)
- return -ENOMEM;
-
- host->msg_base = host->shm + RBUF_LEN;
- host->msg_dma = host->shm_dma + RBUF_LEN;
-
- memset(host->shm, 0xff, RBUF_LEN);
- memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN);
-
- return 0;
-}
-
-static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct carm_host *host;
- int rc;
- struct request_queue *q;
- unsigned int i;
-
- printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
-
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out;
-
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
- pci_name(pdev));
- goto err_out_regions;
- }
-
- host = kzalloc(sizeof(*host), GFP_KERNEL);
- if (!host) {
- rc = -ENOMEM;
- goto err_out_regions;
- }
-
- host->pdev = pdev;
- spin_lock_init(&host->lock);
- INIT_WORK(&host->fsm_task, carm_fsm_task);
- init_completion(&host->probe_comp);
-
- host->mmio = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!host->mmio) {
- printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n",
- pci_name(pdev));
- rc = -ENOMEM;
- goto err_out_kfree;
- }
-
- rc = carm_init_shm(host);
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n",
- pci_name(pdev));
- goto err_out_iounmap;
- }
-
- memset(&host->tag_set, 0, sizeof(host->tag_set));
- host->tag_set.ops = &carm_mq_ops;
- host->tag_set.cmd_size = sizeof(struct carm_request);
- host->tag_set.nr_hw_queues = 1;
- host->tag_set.nr_maps = 1;
- host->tag_set.queue_depth = max_queue;
- host->tag_set.numa_node = NUMA_NO_NODE;
- host->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
-
- rc = blk_mq_alloc_tag_set(&host->tag_set);
- if (rc)
- goto err_out_dma_free;
-
- q = blk_mq_init_queue(&host->tag_set);
- if (IS_ERR(q)) {
- rc = PTR_ERR(q);
- blk_mq_free_tag_set(&host->tag_set);
- goto err_out_dma_free;
- }
-
- host->oob_q = q;
- q->queuedata = host;
-
- /*
- * Figure out which major to use: 160, 161, or dynamic
- */
- if (!test_and_set_bit(0, &carm_major_alloc))
- host->major = 160;
- else if (!test_and_set_bit(1, &carm_major_alloc))
- host->major = 161;
- else
- host->flags |= FL_DYN_MAJOR;
-
- host->id = carm_host_id;
- sprintf(host->name, DRV_NAME "%d", carm_host_id);
-
- rc = register_blkdev(host->major, host->name);
- if (rc < 0)
- goto err_out_free_majors;
- if (host->flags & FL_DYN_MAJOR)
- host->major = rc;
-
- for (i = 0; i < CARM_MAX_PORTS; i++) {
- rc = carm_init_disk(host, i);
- if (rc)
- goto err_out_blkdev_disks;
- }
-
- pci_set_master(pdev);
-
- rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host);
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
- pci_name(pdev));
- goto err_out_blkdev_disks;
- }
-
- rc = carm_init_host(host);
- if (rc)
- goto err_out_free_irq;
-
- DPRINTK("waiting for probe_comp\n");
- host->probe_err = -ENODEV;
- wait_for_completion(&host->probe_comp);
- if (host->probe_err) {
- rc = host->probe_err;
- goto err_out_free_irq;
- }
-
- printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
- host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
- (unsigned long long)pci_resource_start(pdev, 0),
- pdev->irq, host->major);
-
- carm_host_id++;
- pci_set_drvdata(pdev, host);
- return 0;
-
-err_out_free_irq:
- free_irq(pdev->irq, host);
-err_out_blkdev_disks:
- for (i = 0; i < CARM_MAX_PORTS; i++)
- carm_free_disk(host, i);
- unregister_blkdev(host->major, host->name);
-err_out_free_majors:
- if (host->major == 160)
- clear_bit(0, &carm_major_alloc);
- else if (host->major == 161)
- clear_bit(1, &carm_major_alloc);
- blk_mq_destroy_queue(host->oob_q);
- blk_mq_free_tag_set(&host->tag_set);
-err_out_dma_free:
- dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
-err_out_iounmap:
- iounmap(host->mmio);
-err_out_kfree:
- kfree(host);
-err_out_regions:
- pci_release_regions(pdev);
-err_out:
- pci_disable_device(pdev);
- return rc;
-}
-
-static void carm_remove_one (struct pci_dev *pdev)
-{
- struct carm_host *host = pci_get_drvdata(pdev);
- unsigned int i;
-
- if (!host) {
- printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
- pci_name(pdev));
- return;
- }
-
- free_irq(pdev->irq, host);
- for (i = 0; i < CARM_MAX_PORTS; i++)
- carm_free_disk(host, i);
- unregister_blkdev(host->major, host->name);
- if (host->major == 160)
- clear_bit(0, &carm_major_alloc);
- else if (host->major == 161)
- clear_bit(1, &carm_major_alloc);
- blk_mq_destroy_queue(host->oob_q);
- blk_mq_free_tag_set(&host->tag_set);
- dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
- iounmap(host->mmio);
- kfree(host);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-
-module_pci_driver(carm_driver);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 3f1906965ac8..6a4a94b4cdf4 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -47,7 +47,12 @@
#define UBLK_MINORS (1U << MINORBITS)
/* All UBLK_F_* have to be included into UBLK_F_ALL */
-#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_URING_CMD_COMP_IN_TASK)
+#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
+ | UBLK_F_URING_CMD_COMP_IN_TASK \
+ | UBLK_F_NEED_GET_DATA)
+
+/* All UBLK_PARAM_TYPE_* should be included here */
+#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
struct ublk_rq_data {
struct callback_head work;
@@ -86,6 +91,15 @@ struct ublk_uring_cmd_pdu {
*/
#define UBLK_IO_FLAG_ABORTED 0x04
+/*
+ * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
+ * get data buffer address from ublksrv.
+ *
+ * Then, bio data could be copied into this data buffer for a WRITE request
+ * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
+ */
+#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+
struct ublk_io {
/* userspace buffer address from io cmd */
__u64 addr;
@@ -119,7 +133,6 @@ struct ublk_device {
char *__queues;
unsigned short queue_size;
- unsigned short bs_shift;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
@@ -137,6 +150,8 @@ struct ublk_device {
spinlock_t mm_lock;
struct mm_struct *mm;
+ struct ublk_params params;
+
struct completion completion;
unsigned int nr_queues_ready;
atomic_t nr_aborted_queues;
@@ -149,6 +164,12 @@ struct ublk_device {
struct work_struct stop_work;
};
+/* header of ublk_params */
+struct ublk_params_header {
+ __u32 len;
+ __u32 types;
+};
+
static dev_t ublk_chr_devt;
static struct class *ublk_chr_class;
@@ -160,6 +181,90 @@ static DEFINE_MUTEX(ublk_ctl_mutex);
static struct miscdevice ublk_misc;
+static void ublk_dev_param_basic_apply(struct ublk_device *ub)
+{
+ struct request_queue *q = ub->ub_disk->queue;
+ const struct ublk_param_basic *p = &ub->params.basic;
+
+ blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
+ blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
+ blk_queue_io_min(q, 1 << p->io_min_shift);
+ blk_queue_io_opt(q, 1 << p->io_opt_shift);
+
+ blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
+ p->attrs & UBLK_ATTR_FUA);
+ if (p->attrs & UBLK_ATTR_ROTATIONAL)
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ else
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+
+ blk_queue_max_hw_sectors(q, p->max_sectors);
+ blk_queue_chunk_sectors(q, p->chunk_sectors);
+ blk_queue_virt_boundary(q, p->virt_boundary_mask);
+
+ if (p->attrs & UBLK_ATTR_READ_ONLY)
+ set_disk_ro(ub->ub_disk, true);
+
+ set_capacity(ub->ub_disk, p->dev_sectors);
+}
+
+static void ublk_dev_param_discard_apply(struct ublk_device *ub)
+{
+ struct request_queue *q = ub->ub_disk->queue;
+ const struct ublk_param_discard *p = &ub->params.discard;
+
+ q->limits.discard_alignment = p->discard_alignment;
+ q->limits.discard_granularity = p->discard_granularity;
+ blk_queue_max_discard_sectors(q, p->max_discard_sectors);
+ blk_queue_max_write_zeroes_sectors(q,
+ p->max_write_zeroes_sectors);
+ blk_queue_max_discard_segments(q, p->max_discard_segments);
+}
+
+static int ublk_validate_params(const struct ublk_device *ub)
+{
+ /* basic param is the only one which must be set */
+ if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
+ const struct ublk_param_basic *p = &ub->params.basic;
+
+ if (p->logical_bs_shift > PAGE_SHIFT)
+ return -EINVAL;
+
+ if (p->logical_bs_shift > p->physical_bs_shift)
+ return -EINVAL;
+
+ if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
+ const struct ublk_param_discard *p = &ub->params.discard;
+
+ /* So far, only support single segment discard */
+ if (p->max_discard_sectors && p->max_discard_segments != 1)
+ return -EINVAL;
+
+ if (!p->discard_granularity)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ublk_apply_params(struct ublk_device *ub)
+{
+ if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
+ return -EINVAL;
+
+ ublk_dev_param_basic_apply(ub);
+
+ if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
+ ublk_dev_param_discard_apply(ub);
+
+ return 0;
+}
+
static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
{
if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
@@ -168,6 +273,13 @@ static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
return false;
}
+static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
+{
+ if (ubq->flags & UBLK_F_NEED_GET_DATA)
+ return true;
+ return false;
+}
+
static struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
@@ -443,7 +555,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
}
-static bool ubq_daemon_is_dying(struct ublk_queue *ubq)
+static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
{
return ubq->ubq_daemon->flags & PF_EXITING;
}
@@ -493,8 +605,9 @@ static void ublk_complete_rq(struct request *req)
}
/*
- * __ublk_fail_req() may be called from abort context or ->ubq_daemon
- * context during exiting, so lock is required.
+ * Since __ublk_rq_task_work always fails requests immediately during
+ * exiting, __ublk_fail_req() is only called from abort context during
+ * exiting. So lock is unnecessary.
*
* Also aborting may not be started yet, keep in mind that one failed
* request may be issued by block layer again.
@@ -509,6 +622,21 @@ static void __ublk_fail_req(struct ublk_io *io, struct request *req)
}
}
+static void ubq_complete_io_cmd(struct ublk_io *io, int res)
+{
+ /* mark this cmd owned by ublksrv */
+ io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
+
+ /*
+ * clear ACTIVE since we are done with this sqe/cmd slot
+ * We can only accept io cmd in case of being not active.
+ */
+ io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+
+ /* tell ublksrv one io request is coming */
+ io_uring_cmd_done(io->cmd, res, 0);
+}
+
#define UBLK_REQUEUE_DELAY_MS 3
static inline void __ublk_rq_task_work(struct request *req)
@@ -517,8 +645,7 @@ static inline void __ublk_rq_task_work(struct request *req)
struct ublk_device *ub = ubq->dev;
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
- bool task_exiting = current != ubq->ubq_daemon ||
- (current->flags & PF_EXITING);
+ bool task_exiting = current != ubq->ubq_daemon || ubq_daemon_is_dying(ubq);
unsigned int mapped_bytes;
pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
@@ -531,6 +658,35 @@ static inline void __ublk_rq_task_work(struct request *req)
return;
}
+ if (ublk_need_get_data(ubq) &&
+ (req_op(req) == REQ_OP_WRITE ||
+ req_op(req) == REQ_OP_FLUSH)) {
+ /*
+ * We have not handled UBLK_IO_NEED_GET_DATA command yet,
+ * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
+ * and notify it.
+ */
+ if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
+ io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
+ pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
+ __func__, io->cmd->cmd_op, ubq->q_id,
+ req->tag, io->flags);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
+ return;
+ }
+ /*
+ * We have handled UBLK_IO_NEED_GET_DATA command,
+ * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
+ * do the copy work.
+ */
+ io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+ ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
+ __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+ }
+
mapped_bytes = ublk_map_io(ubq, req, io);
/* partially mapped, update io descriptor */
@@ -553,17 +709,7 @@ static inline void __ublk_rq_task_work(struct request *req)
mapped_bytes >> 9;
}
- /* mark this cmd owned by ublksrv */
- io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
-
- /*
- * clear ACTIVE since we are done with this sqe/cmd slot
- * We can only accept io cmd in case of being not active.
- */
- io->flags &= ~UBLK_IO_FLAG_ACTIVE;
-
- /* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_OK, 0);
+ ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
}
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
@@ -610,9 +756,25 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
goto fail;
} else {
- struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_io *io = &ubq->ios[rq->tag];
+ struct io_uring_cmd *cmd = io->cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ /*
+ * If the check pass, we know that this is a re-issued request aborted
+ * previously in monitor_work because the ubq_daemon(cmd's task) is
+ * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
+ * because this ioucmd's io_uring context may be freed now if no inflight
+ * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
+ *
+ * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
+ * the tag). Then the request is re-started(allocating the tag) and we are here.
+ * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
+ * guarantees that here is a re-issued request aborted previously.
+ */
+ if ((io->flags & UBLK_IO_FLAG_ABORTED))
+ goto fail;
+
pdu->req = rq;
io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
}
@@ -788,16 +950,27 @@ static void ublk_daemon_monitor_work(struct work_struct *work)
UBLK_DAEMON_MONITOR_PERIOD);
}
+static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+{
+ return ubq->nr_io_ready == ubq->q_depth;
+}
+
static void ublk_cancel_queue(struct ublk_queue *ubq)
{
int i;
+ if (!ublk_queue_ready(ubq))
+ return;
+
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE)
io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
}
+
+ /* all io commands are canceled */
+ ubq->nr_io_ready = 0;
}
/* Cancel all pending commands, must be called after del_gendisk() returns */
@@ -818,19 +991,14 @@ static void ublk_stop_dev(struct ublk_device *ub)
del_gendisk(ub->ub_disk);
ub->dev_info.state = UBLK_S_DEV_DEAD;
ub->dev_info.ublksrv_pid = -1;
- ublk_cancel_dev(ub);
put_disk(ub->ub_disk);
ub->ub_disk = NULL;
unlock:
+ ublk_cancel_dev(ub);
mutex_unlock(&ub->mutex);
cancel_delayed_work_sync(&ub->monitor_work);
}
-static inline bool ublk_queue_ready(struct ublk_queue *ubq)
-{
- return ubq->nr_io_ready == ubq->q_depth;
-}
-
/* device can only be started after all IOs are ready */
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
{
@@ -846,6 +1014,25 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
mutex_unlock(&ub->mutex);
}
+static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
+ int tag, struct io_uring_cmd *cmd)
+{
+ struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
+
+ if (ublk_can_use_task_work(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ /* should not fail since we call it just in ubq->ubq_daemon */
+ task_work_add(ubq->ubq_daemon, &data->work, TWA_SIGNAL_NO_IPI);
+ } else {
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ pdu->req = req;
+ io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
+ }
+}
+
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
@@ -884,6 +1071,14 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
goto out;
}
+ /*
+ * ensure that the user issues UBLK_IO_NEED_GET_DATA
+ * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
+ */
+ if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
+ ^ (cmd_op == UBLK_IO_NEED_GET_DATA))
+ goto out;
+
switch (cmd_op) {
case UBLK_IO_FETCH_REQ:
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
@@ -917,6 +1112,14 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
io->cmd = cmd;
ublk_commit_completion(ub, ub_cmd);
break;
+ case UBLK_IO_NEED_GET_DATA:
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ goto out;
+ io->addr = ub_cmd->addr;
+ io->cmd = cmd;
+ io->flags |= UBLK_IO_FLAG_ACTIVE;
+ ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag, cmd);
+ break;
default:
goto out;
}
@@ -1083,13 +1286,13 @@ static void ublk_stop_work_fn(struct work_struct *work)
ublk_stop_dev(ub);
}
-/* align maximum I/O size to PAGE_SIZE */
+/* align max io buffer size with PAGE_SIZE */
static void ublk_align_max_io_size(struct ublk_device *ub)
{
- unsigned int max_rq_bytes = ub->dev_info.rq_max_blocks << ub->bs_shift;
+ unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
- ub->dev_info.rq_max_blocks =
- round_down(max_rq_bytes, PAGE_SIZE) >> ub->bs_shift;
+ ub->dev_info.max_io_buf_bytes =
+ round_down(max_io_bytes, PAGE_SIZE);
}
static int ublk_add_tag_set(struct ublk_device *ub)
@@ -1132,7 +1335,6 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
int ublksrv_pid = (int)header->data[0];
- unsigned long dev_blocks = header->data[1];
struct ublk_device *ub;
struct gendisk *disk;
int ret = -EINVAL;
@@ -1155,10 +1357,6 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
goto out_unlock;
}
- /* We may get disk size updated */
- if (dev_blocks)
- ub->dev_info.dev_blocks = dev_blocks;
-
disk = blk_mq_alloc_disk(&ub->tag_set, ub);
if (IS_ERR(disk)) {
ret = PTR_ERR(disk);
@@ -1168,27 +1366,28 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
disk->fops = &ub_fops;
disk->private_data = ub;
- blk_queue_logical_block_size(disk->queue, ub->dev_info.block_size);
- blk_queue_physical_block_size(disk->queue, ub->dev_info.block_size);
- blk_queue_io_min(disk->queue, ub->dev_info.block_size);
- blk_queue_max_hw_sectors(disk->queue,
- ub->dev_info.rq_max_blocks << (ub->bs_shift - 9));
- disk->queue->limits.discard_granularity = PAGE_SIZE;
- blk_queue_max_discard_sectors(disk->queue, UINT_MAX >> 9);
- blk_queue_max_write_zeroes_sectors(disk->queue, UINT_MAX >> 9);
-
- set_capacity(disk, ub->dev_info.dev_blocks << (ub->bs_shift - 9));
-
ub->dev_info.ublksrv_pid = ublksrv_pid;
ub->ub_disk = disk;
+
+ ret = ublk_apply_params(ub);
+ if (ret)
+ goto out_put_disk;
+
get_device(&ub->cdev_dev);
ret = add_disk(disk);
if (ret) {
- put_disk(disk);
- goto out_unlock;
+ /*
+ * Has to drop the reference since ->free_disk won't be
+ * called in case of add_disk failure.
+ */
+ ublk_put_device(ub);
+ goto out_put_disk;
}
set_bit(UB_STATE_USED, &ub->state);
ub->dev_info.state = UBLK_S_DEV_LIVE;
+out_put_disk:
+ if (ret)
+ put_disk(disk);
out_unlock:
mutex_unlock(&ub->mutex);
ublk_put_device(ub);
@@ -1250,9 +1449,8 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
{
pr_devel("%s: dev id %d flags %llx\n", __func__,
info->dev_id, info->flags);
- pr_devel("\t nr_hw_queues %d queue_depth %d block size %d dev_capacity %lld\n",
- info->nr_hw_queues, info->queue_depth,
- info->block_size, info->dev_blocks);
+ pr_devel("\t nr_hw_queues %d queue_depth %d\n",
+ info->nr_hw_queues, info->queue_depth);
}
static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
@@ -1312,7 +1510,6 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
/* We are not ready to support zero copy */
ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
- ub->bs_shift = ilog2(ub->dev_info.block_size);
ub->dev_info.nr_hw_queues = min_t(unsigned int,
ub->dev_info.nr_hw_queues, nr_cpu_ids);
ublk_align_max_io_size(ub);
@@ -1436,6 +1633,82 @@ static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
return ret;
}
+static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublk_params_header ph;
+ struct ublk_device *ub;
+ int ret;
+
+ if (header->len <= sizeof(ph) || !header->addr)
+ return -EINVAL;
+
+ if (copy_from_user(&ph, argp, sizeof(ph)))
+ return -EFAULT;
+
+ if (ph.len > header->len || !ph.len)
+ return -EINVAL;
+
+ if (ph.len > sizeof(struct ublk_params))
+ ph.len = sizeof(struct ublk_params);
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ mutex_lock(&ub->mutex);
+ if (copy_to_user(argp, &ub->params, ph.len))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ mutex_unlock(&ub->mutex);
+
+ ublk_put_device(ub);
+ return ret;
+}
+
+static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ struct ublk_params_header ph;
+ struct ublk_device *ub;
+ int ret = -EFAULT;
+
+ if (header->len <= sizeof(ph) || !header->addr)
+ return -EINVAL;
+
+ if (copy_from_user(&ph, argp, sizeof(ph)))
+ return -EFAULT;
+
+ if (ph.len > header->len || !ph.len || !ph.types)
+ return -EINVAL;
+
+ if (ph.len > sizeof(struct ublk_params))
+ ph.len = sizeof(struct ublk_params);
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return -EINVAL;
+
+ /* parameters can only be changed when device isn't live */
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
+ ret = -EACCES;
+ } else if (copy_from_user(&ub->params, argp, ph.len)) {
+ ret = -EFAULT;
+ } else {
+ /* clear all we don't support yet */
+ ub->params.types &= UBLK_PARAM_TYPE_ALL;
+ ret = ublk_validate_params(ub);
+ }
+ mutex_unlock(&ub->mutex);
+ ublk_put_device(ub);
+
+ return ret;
+}
+
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
@@ -1471,6 +1744,12 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_CMD_GET_QUEUE_AFFINITY:
ret = ublk_ctrl_get_queue_affinity(cmd);
break;
+ case UBLK_CMD_GET_PARAMS:
+ ret = ublk_ctrl_get_params(cmd);
+ break;
+ case UBLK_CMD_SET_PARAMS:
+ ret = ublk_ctrl_set_params(cmd);
+ break;
default:
break;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index d7d72e8f6e55..30255fcaf181 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -101,6 +101,14 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
}
}
+static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
+
+ return vq;
+}
+
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
{
struct scatterlist hdr, status, *sgs[3];
@@ -416,7 +424,7 @@ static void virtio_queue_rqs(struct request **rqlist)
struct request *requeue_list = NULL;
rq_list_for_each_safe(rqlist, req, next) {
- struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+ struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
bool kick;
if (!virtblk_prep_rq_batch(req)) {
@@ -837,7 +845,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
- struct virtio_blk_vq *vq = hctx->driver_data;
+ struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
struct virtblk_req *vbr;
unsigned long flags;
unsigned int len;
@@ -862,22 +870,10 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
return found;
}
-static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
-{
- struct virtio_blk *vblk = data;
- struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
-
- WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
- hctx->driver_data = vq;
- return 0;
-}
-
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
- .init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
.map_queues = virtblk_map_queues,
.poll = virtblk_poll,
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 97de13b14175..ee7ad2fb432d 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -157,6 +157,11 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
return 0;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature");
+
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
struct xen_blkif *blkif;
@@ -472,12 +477,6 @@ static void xen_vbd_free(struct xen_vbd *vbd)
vbd->bdev = NULL;
}
-/* Enable the persistent grants feature. */
-static bool feature_persistent = true;
-module_param(feature_persistent, bool, 0644);
-MODULE_PARM_DESC(feature_persistent,
- "Enables the persistent grants feature");
-
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
unsigned major, unsigned minor, int readonly,
int cdrom)
@@ -520,8 +519,6 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (bdev_max_secure_erase_sectors(bdev))
vbd->discard_secure = true;
- vbd->feature_gnt_persistent = feature_persistent;
-
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
@@ -1087,10 +1084,9 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- if (blkif->vbd.feature_gnt_persistent)
- blkif->vbd.feature_gnt_persistent =
- xenbus_read_unsigned(dev->otherend,
- "feature-persistent", 0);
+
+ blkif->vbd.feature_gnt_persistent = feature_persistent &&
+ xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index dc48298225a6..8e56e69fb4c4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1988,8 +1988,6 @@ static int blkfront_probe(struct xenbus_device *dev,
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
- info->feature_persistent = feature_persistent;
-
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
@@ -2283,7 +2281,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- if (info->feature_persistent)
+ if (feature_persistent)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 052aa3f65514..0916de952e09 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -63,12 +63,6 @@ static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
bool zcomp_available_algorithm(const char *comp)
{
- int i;
-
- i = sysfs_match_string(backends, comp);
- if (i >= 0)
- return true;
-
/*
* Crypto does not ignore a trailing new line symbol,
* so make sure you don't supply a string containing
@@ -217,6 +211,11 @@ struct zcomp *zcomp_create(const char *compress)
struct zcomp *comp;
int error;
+ /*
+ * Crypto API will execute /sbin/modprobe if the compression module
+ * is not loaded yet. We must do it here, otherwise we are about to
+ * call /sbin/modprobe under CPU hot-plug lock.
+ */
if (!zcomp_available_algorithm(compress))
return ERR_PTR(-EINVAL);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 4abeb261b833..226ea76cc819 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,7 +52,9 @@ static unsigned int num_devices = 1;
static size_t huge_class_size;
static const struct block_device_operations zram_devops;
+#ifdef CONFIG_ZRAM_WRITEBACK
static const struct block_device_operations zram_wb_devops;
+#endif
static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
@@ -1144,14 +1146,15 @@ static ssize_t bd_stat_show(struct device *dev,
static ssize_t debug_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int version = 2;
+ int version = 1;
struct zram *zram = dev_to_zram(dev);
ssize_t ret;
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
- "version: %d\n%8llu\n",
+ "version: %d\n%8llu %8llu\n",
version,
+ (u64)atomic64_read(&zram->stats.writestall),
(u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
@@ -1349,7 +1352,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
{
int ret = 0;
unsigned long alloced_pages;
- unsigned long handle = 0;
+ unsigned long handle = -ENOMEM;
unsigned int comp_len = 0;
void *src, *dst, *mem;
struct zcomp_strm *zstrm;
@@ -1367,6 +1370,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
}
kunmap_atomic(mem);
+compress_again:
zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
ret = zcomp_compress(zstrm, src, &comp_len);
@@ -1375,21 +1379,40 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (unlikely(ret)) {
zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
+ zs_free(zram->mem_pool, handle);
return ret;
}
if (comp_len >= huge_class_size)
comp_len = PAGE_SIZE;
-
- handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
-
- if (unlikely(!handle)) {
+ /*
+ * handle allocation has 2 paths:
+ * a) fast path is executed with preemption disabled (for
+ * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
+ * since we can't sleep;
+ * b) slow path enables preemption and attempts to allocate
+ * the page with __GFP_DIRECT_RECLAIM bit set. we have to
+ * put per-cpu compression stream and, thus, to re-do
+ * the compression once handle is allocated.
+ *
+ * if we have a 'non-null' handle here then we are coming
+ * from the slow path and handle has already been allocated.
+ */
+ if (IS_ERR((void *)handle))
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ __GFP_KSWAPD_RECLAIM |
+ __GFP_NOWARN |
+ __GFP_HIGHMEM |
+ __GFP_MOVABLE);
+ if (IS_ERR((void *)handle)) {
zcomp_stream_put(zram->comp);
- return -ENOMEM;
+ atomic64_inc(&zram->stats.writestall);
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
+ if (!IS_ERR((void *)handle))
+ goto compress_again;
+ return PTR_ERR((void *)handle);
}
alloced_pages = zs_get_total_pages(zram->mem_pool);
@@ -1946,6 +1969,7 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 158c91e54850..80c3b43b4828 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -81,6 +81,7 @@ struct zram_stats {
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
+ atomic64_t writestall; /* no. of write slow paths */
atomic64_t miss_free; /* no. of missed free */
#ifdef CONFIG_ZRAM_WRITEBACK
atomic64_t bd_count; /* no. of pages in backing device */
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fe7e2105e766..bf6716ff863b 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -20,7 +20,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
-#include <linux/intel-iommu.h>
+#include <linux/iommu.h>
#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
@@ -573,18 +573,15 @@ static void intel_gtt_cleanup(void)
*/
static inline int needs_ilk_vtd_wa(void)
{
-#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
+ /*
+ * Query iommu subsystem to see if we need the workaround. Presumably
+ * that was loaded first.
*/
- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
- gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
- intel_iommu_gfx_mapped)
- return 1;
-#endif
- return 0;
+ return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ device_iommu_mapped(&intel_private.pcidev->dev));
}
static bool intel_gtt_can_wc(void)
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index 8da1d7917bdc..429e956f34e1 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -23,7 +23,7 @@ static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
buf = (unsigned long *)data;
for (i = 0; i < len; i++)
- powernv_get_random_long(buf++);
+ pnv_get_random_long(buf++);
return len * sizeof(unsigned long);
}
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index 488808dc17a2..795853dfc46b 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -252,5 +252,5 @@ static void __exit trng_exit(void)
trng_debug_exit();
}
-module_cpu_feature_match(MSA, trng_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, trng_init);
module_exit(trng_exit);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 84ca98ed1dad..32a932a065a6 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -480,6 +480,11 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
+static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ return 0;
+}
+
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
{
size_t written = 0;
@@ -663,6 +668,7 @@ static const struct file_operations null_fops = {
.read_iter = read_iter_null,
.write_iter = write_iter_null,
.splice_write = splice_write_null,
+ .uring_cmd = uring_cmd_null,
};
static const struct file_operations __maybe_unused port_fops = {
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 4a5516406c22..927088b2c3d3 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -74,6 +74,18 @@ config TCG_TIS_SPI_CR50
If you have a H1 secure module running Cr50 firmware on SPI bus,
say Yes and it will be accessible from within Linux.
+config TCG_TIS_I2C
+ tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (I2C - generic)"
+ depends on I2C
+ select CRC_CCITT
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip, compliant with the TCG TPM PTP
+ (I2C interface) specification and connected to an I2C bus master,
+ say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_i2c.
+
config TCG_TIS_SYNQUACER
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
depends on ARCH_SYNQUACER || COMPILE_TEST
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 66d39ea6bd10..0222b1ddb310 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -29,6 +29,7 @@ tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
obj-$(CONFIG_TCG_TIS_I2C_CR50) += tpm_tis_i2c_cr50.o
+obj-$(CONFIG_TCG_TIS_I2C) += tpm_tis_i2c.o
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 2163c6ee0d36..24ee4e1cc452 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -55,6 +55,7 @@ enum tpm_addr {
#define TPM_WARN_DOING_SELFTEST 0x802
#define TPM_ERR_DEACTIVATED 0x6
#define TPM_ERR_DISABLED 0x7
+#define TPM_ERR_FAILEDSELFTEST 0x1C
#define TPM_ERR_INVALID_POSTINIT 38
#define TPM_TAG_RQU_COMMAND 193
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index f7dc986fa4a0..cf64c7385105 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -709,7 +709,12 @@ int tpm1_auto_startup(struct tpm_chip *chip)
if (rc)
goto out;
rc = tpm1_do_selftest(chip);
- if (rc) {
+ if (rc == TPM_ERR_FAILEDSELFTEST) {
+ dev_warn(&chip->dev, "TPM self test failed, switching to the firmware upgrade mode\n");
+ /* A TPM in this state possibly allows or needs a firmware upgrade */
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ return 0;
+ } else if (rc) {
dev_err(&chip->dev, "TPM self test failed\n");
goto out;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c1eb5d223839..65d03867e114 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -752,6 +752,12 @@ int tpm2_auto_startup(struct tpm_chip *chip)
}
rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc == TPM2_RC_FAILURE || (rc < 0 && rc != -ENOMEM)) {
+ dev_info(&chip->dev,
+ "TPM in field failure mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
out:
/*
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index dc56b976d816..757623bacfd5 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -289,6 +289,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
int size = 0;
int status;
u32 expected;
+ int rc;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -328,6 +329,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
+ rc = tpm_tis_verify_crc(priv, (size_t)size, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for response.\n");
+ size = rc;
+ goto out;
+ }
+
out:
tpm_tis_ready(chip);
return size;
@@ -443,6 +451,12 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
if (rc < 0)
return rc;
+ rc = tpm_tis_verify_crc(priv, len, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for command.\n");
+ return rc;
+ }
+
/* go and do it */
rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
if (rc < 0)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 6c203f36b8a1..66a5a13cd1df 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -121,6 +121,8 @@ struct tpm_tis_phy_ops {
u8 *result, enum tpm_tis_io_mode mode);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
const u8 *value, enum tpm_tis_io_mode mode);
+ int (*verify_crc)(struct tpm_tis_data *data, size_t len,
+ const u8 *value);
};
static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr,
@@ -188,6 +190,14 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
return rc;
}
+static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ if (!data->phy_ops->verify_crc)
+ return 0;
+ return data->phy_ops->verify_crc(data, len, value);
+}
+
static inline bool is_bsw(void)
{
#ifdef CONFIG_X86
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
new file mode 100644
index 000000000000..ba0911b1d1ff
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2021 Nuvoton Technology corporation
+ * Copyright (C) 2019-2022 Infineon Technologies AG
+ *
+ * This device driver implements the TPM interface as defined in the TCG PC
+ * Client Platform TPM Profile (PTP) Specification for TPM 2.0 v1.04
+ * Revision 14.
+ *
+ * It is based on the tpm_tis_spi device driver.
+ */
+
+#include <linux/i2c.h>
+#include <linux/crc-ccitt.h>
+#include "tpm_tis_core.h"
+
+/* TPM registers */
+#define TPM_I2C_LOC_SEL 0x00
+#define TPM_I2C_ACCESS 0x04
+#define TPM_I2C_INTERFACE_CAPABILITY 0x30
+#define TPM_I2C_DEVICE_ADDRESS 0x38
+#define TPM_I2C_DATA_CSUM_ENABLE 0x40
+#define TPM_DATA_CSUM 0x44
+#define TPM_I2C_DID_VID 0x48
+#define TPM_I2C_RID 0x4C
+
+/* TIS-compatible register address to avoid clash with TPM_ACCESS (0x00) */
+#define TPM_LOC_SEL 0x0FFF
+
+/* Mask to extract the I2C register from TIS register addresses */
+#define TPM_TIS_REGISTER_MASK 0x0FFF
+
+/* Default Guard Time of 250µs until interface capability register is read */
+#define GUARD_TIME_DEFAULT_MIN 250
+#define GUARD_TIME_DEFAULT_MAX 300
+
+/* Guard Time of 250µs after I2C slave NACK */
+#define GUARD_TIME_ERR_MIN 250
+#define GUARD_TIME_ERR_MAX 300
+
+/* Guard Time bit masks; SR is repeated start, RW is read then write, etc. */
+#define TPM_GUARD_TIME_SR_MASK 0x40000000
+#define TPM_GUARD_TIME_RR_MASK 0x00100000
+#define TPM_GUARD_TIME_RW_MASK 0x00080000
+#define TPM_GUARD_TIME_WR_MASK 0x00040000
+#define TPM_GUARD_TIME_WW_MASK 0x00020000
+#define TPM_GUARD_TIME_MIN_MASK 0x0001FE00
+#define TPM_GUARD_TIME_MIN_SHIFT 9
+
+/* Masks with bits that must be read zero */
+#define TPM_ACCESS_READ_ZERO 0x48
+#define TPM_INT_ENABLE_ZERO 0x7FFFFF6
+#define TPM_STS_READ_ZERO 0x23
+#define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000
+#define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000
+
+struct tpm_tis_i2c_phy {
+ struct tpm_tis_data priv;
+ struct i2c_client *i2c_client;
+ bool guard_time_read;
+ bool guard_time_write;
+ u16 guard_time_min;
+ u16 guard_time_max;
+ u8 *io_buf;
+};
+
+static inline struct tpm_tis_i2c_phy *
+to_tpm_tis_i2c_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_i2c_phy, priv);
+}
+
+/*
+ * tpm_tis_core uses the register addresses as defined in Table 19 "Allocation
+ * of Register Space for FIFO TPM Access" of the TCG PC Client PTP
+ * Specification. In order for this code to work together with tpm_tis_core,
+ * those addresses need to mapped to the registers defined for I2C TPMs in
+ * Table 51 "I2C-TPM Register Overview".
+ *
+ * For most addresses this can be done by simply stripping off the locality
+ * information from the address. A few addresses need to be mapped explicitly,
+ * since the corresponding I2C registers have been moved around. TPM_LOC_SEL is
+ * only defined for I2C TPMs and is also mapped explicitly here to distinguish
+ * it from TPM_ACCESS(0).
+ *
+ * Locality information is ignored, since this driver assumes exclusive access
+ * to the TPM and always uses locality 0.
+ */
+static u8 tpm_tis_i2c_address_to_register(u32 addr)
+{
+ addr &= TPM_TIS_REGISTER_MASK;
+
+ switch (addr) {
+ case TPM_ACCESS(0):
+ return TPM_I2C_ACCESS;
+ case TPM_LOC_SEL:
+ return TPM_I2C_LOC_SEL;
+ case TPM_DID_VID(0):
+ return TPM_I2C_DID_VID;
+ case TPM_RID(0):
+ return TPM_I2C_RID;
+ default:
+ return addr;
+ }
+}
+
+static int tpm_tis_i2c_retry_transfer_until_ack(struct tpm_tis_data *data,
+ struct i2c_msg *msg)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ bool guard_time;
+ int i = 0;
+ int ret;
+
+ if (msg->flags & I2C_M_RD)
+ guard_time = phy->guard_time_read;
+ else
+ guard_time = phy->guard_time_write;
+
+ do {
+ ret = i2c_transfer(phy->i2c_client->adapter, msg, 1);
+ if (ret < 0)
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ else if (guard_time)
+ usleep_range(phy->guard_time_min, phy->guard_time_max);
+ /* retry on TPM NACK */
+ } while (ret < 0 && i++ < TPM_RETRY);
+
+ return ret;
+}
+
+/* Check that bits which must be read zero are not set */
+static int tpm_tis_i2c_sanity_check_read(u8 reg, u16 len, u8 *buf)
+{
+ u32 zero_mask;
+ u32 value;
+
+ switch (len) {
+ case sizeof(u8):
+ value = buf[0];
+ break;
+ case sizeof(u16):
+ value = le16_to_cpup((__le16 *)buf);
+ break;
+ case sizeof(u32):
+ value = le32_to_cpup((__le32 *)buf);
+ break;
+ default:
+ /* unknown length, skip check */
+ return 0;
+ }
+
+ switch (reg) {
+ case TPM_I2C_ACCESS:
+ zero_mask = TPM_ACCESS_READ_ZERO;
+ break;
+ case TPM_INT_ENABLE(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INT_ENABLE_ZERO;
+ break;
+ case TPM_STS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_STS_READ_ZERO;
+ break;
+ case TPM_INTF_CAPS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INTF_CAPABILITY_ZERO;
+ break;
+ case TPM_I2C_INTERFACE_CAPABILITY:
+ zero_mask = TPM_I2C_INTERFACE_CAPABILITY_ZERO;
+ break;
+ default:
+ /* unknown register, skip check */
+ return 0;
+ }
+
+ if (unlikely((value & zero_mask) != 0x00)) {
+ pr_debug("TPM I2C read of register 0x%02x failed sanity check: 0x%x\n", reg, value);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int i;
+ int ret;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ /* write register */
+ msg.len = sizeof(reg);
+ msg.buf = &reg;
+ msg.flags = 0;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ /* read data */
+ msg.buf = result;
+ msg.len = len;
+ msg.flags = I2C_M_RD;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
+ if (ret == 0)
+ return 0;
+
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ }
+
+ return ret;
+}
+
+static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int ret;
+
+ if (len > TPM_BUFSIZE - 1)
+ return -EIO;
+
+ /* write register and data in one go */
+ phy->io_buf[0] = reg;
+ memcpy(phy->io_buf + sizeof(reg), value, len);
+
+ msg.len = sizeof(reg) + len;
+ msg.buf = phy->io_buf;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tpm_tis_i2c_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ u16 crc_tpm, crc_host;
+ int rc;
+
+ rc = tpm_tis_read16(data, TPM_DATA_CSUM, &crc_tpm);
+ if (rc < 0)
+ return rc;
+
+ /* reflect crc result, regardless of host endianness */
+ crc_host = swab16(crc_ccitt(0, value, len));
+ if (crc_tpm != crc_host)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Guard Time:
+ * After each I2C operation, the TPM might require the master to wait.
+ * The time period is vendor-specific and must be read from the
+ * TPM_I2C_INTERFACE_CAPABILITY register.
+ *
+ * Before the Guard Time is read (or after the TPM failed to send an I2C NACK),
+ * a Guard Time of 250µs applies.
+ *
+ * Various flags in the same register indicate if a guard time is needed:
+ * - SR: <I2C read with repeated start> <guard time> <I2C read>
+ * - RR: <I2C read> <guard time> <I2C read>
+ * - RW: <I2C read> <guard time> <I2C write>
+ * - WR: <I2C write> <guard time> <I2C read>
+ * - WW: <I2C write> <guard time> <I2C write>
+ *
+ * See TCG PC Client PTP Specification v1.04, 8.1.10 GUARD_TIME
+ */
+static int tpm_tis_i2c_init_guard_time(struct tpm_tis_i2c_phy *phy)
+{
+ u32 i2c_caps;
+ int ret;
+
+ phy->guard_time_read = true;
+ phy->guard_time_write = true;
+ phy->guard_time_min = GUARD_TIME_DEFAULT_MIN;
+ phy->guard_time_max = GUARD_TIME_DEFAULT_MAX;
+
+ ret = tpm_tis_i2c_read_bytes(&phy->priv, TPM_I2C_INTERFACE_CAPABILITY,
+ sizeof(i2c_caps), (u8 *)&i2c_caps,
+ TPM_TIS_PHYS_32);
+ if (ret)
+ return ret;
+
+ phy->guard_time_read = (i2c_caps & TPM_GUARD_TIME_RR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_RW_MASK);
+ phy->guard_time_write = (i2c_caps & TPM_GUARD_TIME_WR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_WW_MASK);
+ phy->guard_time_min = (i2c_caps & TPM_GUARD_TIME_MIN_MASK) >>
+ TPM_GUARD_TIME_MIN_SHIFT;
+ /* guard_time_max = guard_time_min * 1.2 */
+ phy->guard_time_max = phy->guard_time_min + phy->guard_time_min / 5;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static const struct tpm_tis_phy_ops tpm_i2c_phy_ops = {
+ .read_bytes = tpm_tis_i2c_read_bytes,
+ .write_bytes = tpm_tis_i2c_write_bytes,
+ .verify_crc = tpm_tis_i2c_verify_crc,
+};
+
+static int tpm_tis_i2c_probe(struct i2c_client *dev,
+ const struct i2c_device_id *id)
+{
+ struct tpm_tis_i2c_phy *phy;
+ const u8 crc_enable = 1;
+ const u8 locality = 0;
+ int ret;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->io_buf = devm_kzalloc(&dev->dev, TPM_BUFSIZE, GFP_KERNEL);
+ if (!phy->io_buf)
+ return -ENOMEM;
+
+ phy->i2c_client = dev;
+
+ /* must precede all communication with the tpm */
+ ret = tpm_tis_i2c_init_guard_time(phy);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_LOC_SEL, sizeof(locality),
+ &locality, TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_I2C_DATA_CSUM_ENABLE,
+ sizeof(crc_enable), &crc_enable,
+ TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_i2c_phy_ops,
+ NULL);
+}
+
+static int tpm_tis_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+ return 0;
+}
+
+static const struct i2c_device_id tpm_tis_i2c_id[] = {
+ { "tpm_tis_i2c", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_tis_i2c_match[] = {
+ { .compatible = "infineon,slb9673", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tis_i2c_match);
+#endif
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+ .driver = {
+ .name = "tpm_tis_i2c",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(of_tis_i2c_match),
+ },
+ .probe = tpm_tis_i2c_probe,
+ .remove = tpm_tis_i2c_remove,
+ .id_table = tpm_tis_i2c_id,
+};
+module_i2c_driver(tpm_tis_i2c_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native I2C access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index f9d5b7334341..4fb4fd4b06bd 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -4,42 +4,101 @@
#include <linux/export.h>
#include <linux/gfp.h>
+struct devm_clk_state {
+ struct clk *clk;
+ void (*exit)(struct clk *clk);
+};
+
static void devm_clk_release(struct device *dev, void *res)
{
- clk_put(*(struct clk **)res);
+ struct devm_clk_state *state = res;
+
+ if (state->exit)
+ state->exit(state->clk);
+
+ clk_put(state->clk);
}
-struct clk *devm_clk_get(struct device *dev, const char *id)
+static struct clk *__devm_clk_get(struct device *dev, const char *id,
+ struct clk *(*get)(struct device *dev, const char *id),
+ int (*init)(struct clk *clk),
+ void (*exit)(struct clk *clk))
{
- struct clk **ptr, *clk;
+ struct devm_clk_state *state;
+ struct clk *clk;
+ int ret;
- ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
+ state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
+ if (!state)
return ERR_PTR(-ENOMEM);
- clk = clk_get(dev, id);
- if (!IS_ERR(clk)) {
- *ptr = clk;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
+ clk = get(dev, id);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_clk_get;
}
+ if (init) {
+ ret = init(clk);
+ if (ret)
+ goto err_clk_init;
+ }
+
+ state->clk = clk;
+ state->exit = exit;
+
+ devres_add(dev, state);
+
return clk;
+
+err_clk_init:
+
+ clk_put(clk);
+err_clk_get:
+
+ devres_free(state);
+ return ERR_PTR(ret);
+}
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get);
-struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id)
{
- struct clk *clk = devm_clk_get(dev, id);
+ return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_prepared);
- if (clk == ERR_PTR(-ENOENT))
- return NULL;
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get,
+ clk_prepare_enable, clk_disable_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_enabled);
- return clk;
+struct clk *devm_clk_get_optional(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL);
}
EXPORT_SYMBOL(devm_clk_get_optional);
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional,
+ clk_prepare, clk_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared);
+
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
+{
+ return __devm_clk_get(dev, id, clk_get_optional,
+ clk_prepare_enable, clk_disable_unprepare);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
+
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 54942d758ee6..f734e34735a9 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -78,7 +78,8 @@ static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *
static struct clk_hw *
__clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
- const char *name, const char *parent_name, int index,
+ const char *name, const char *parent_name,
+ const struct clk_hw *parent_hw, int index,
unsigned long flags, unsigned int mult, unsigned int div,
bool devm)
{
@@ -110,6 +111,8 @@ __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
init.flags = flags;
if (parent_name)
init.parent_names = &parent_name;
+ else if (parent_hw)
+ init.parent_hws = &parent_hw;
else
init.parent_data = &pdata;
init.num_parents = 1;
@@ -148,16 +151,48 @@ struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
const char *name, unsigned int index, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, index,
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, NULL, index,
flags, mult, div, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_index);
+/**
+ * devm_clk_hw_register_fixed_factor_parent_hw - Register a fixed factor clock with
+ * pointer to parent clock
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: fixed factor flags
+ * @mult: multiplier
+ * @div: divider
+ *
+ * Return: Pointer to fixed factor clk_hw structure that was registered or
+ * an error pointer.
+ */
+struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div)
+{
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, parent_hw,
+ -1, flags, mult, div, true);
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor_parent_hw);
+
+struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div)
+{
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL,
+ parent_hw, -1, flags, mult, div,
+ false);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_parent_hw);
+
struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL, -1,
flags, mult, div, false);
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor);
@@ -204,22 +239,16 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
- return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, NULL, -1,
flags, mult, div, true);
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register_fixed_factor);
#ifdef CONFIG_OF
-static const struct of_device_id set_rate_parent_matches[] = {
- { .compatible = "allwinner,sun4i-a10-pll3-2x-clk" },
- { /* Sentinel */ },
-};
-
static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
{
struct clk_hw *hw;
const char *clk_name = node->name;
- unsigned long flags = 0;
u32 div, mult;
int ret;
@@ -237,11 +266,8 @@ static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
of_property_read_string(node, "clock-output-names", &clk_name);
- if (of_match_node(set_rate_parent_matches, node))
- flags |= CLK_SET_RATE_PARENT;
-
- hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, 0,
- flags, mult, div, false);
+ hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, NULL, 0,
+ 0, mult, div, false);
if (IS_ERR(hw)) {
/*
* Clear OF_POPULATED flag so that clock registration can be
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f00d4c1158d7..7fc191c15507 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4279,54 +4279,6 @@ int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(devm_clk_hw_register);
-static int devm_clk_match(struct device *dev, void *res, void *data)
-{
- struct clk *c = res;
- if (WARN_ON(!c))
- return 0;
- return c == data;
-}
-
-static int devm_clk_hw_match(struct device *dev, void *res, void *data)
-{
- struct clk_hw *hw = res;
-
- if (WARN_ON(!hw))
- return 0;
- return hw == data;
-}
-
-/**
- * devm_clk_unregister - resource managed clk_unregister()
- * @dev: device that is unregistering the clock data
- * @clk: clock to unregister
- *
- * Deallocate a clock allocated with devm_clk_register(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
- */
-void devm_clk_unregister(struct device *dev, struct clk *clk)
-{
- WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
-}
-EXPORT_SYMBOL_GPL(devm_clk_unregister);
-
-/**
- * devm_clk_hw_unregister - resource managed clk_hw_unregister()
- * @dev: device that is unregistering the hardware-specific clock data
- * @hw: link to hardware-specific clock data
- *
- * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
- * this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
- */
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
-{
- WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
- hw));
-}
-EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
-
static void devm_clk_release(struct device *dev, void *res)
{
clk_put(*(struct clk **)res);
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index 71c102d950ab..a2aaa14fc1ae 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -64,10 +64,13 @@ struct clk_fracn_gppll {
* Fout = Fvco / (rdiv * odiv)
*/
static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
- PLL_FRACN_GP(650000000U, 81, 0, 0, 0, 3),
- PLL_FRACN_GP(594000000U, 198, 0, 0, 0, 8),
- PLL_FRACN_GP(560000000U, 70, 0, 0, 0, 3),
- PLL_FRACN_GP(400000000U, 50, 0, 0, 0, 3),
+ PLL_FRACN_GP(650000000U, 81, 0, 1, 0, 3),
+ PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8),
+ PLL_FRACN_GP(560000000U, 70, 0, 1, 0, 3),
+ PLL_FRACN_GP(498000000U, 83, 0, 1, 0, 4),
+ PLL_FRACN_GP(484000000U, 121, 0, 1, 0, 6),
+ PLL_FRACN_GP(445333333U, 167, 0, 1, 0, 9),
+ PLL_FRACN_GP(400000000U, 50, 0, 1, 0, 3),
PLL_FRACN_GP(393216000U, 81, 92, 100, 0, 5)
};
@@ -131,18 +134,7 @@ static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned lon
mfi = FIELD_GET(PLL_MFI_MASK, pll_div);
rdiv = FIELD_GET(PLL_RDIV_MASK, pll_div);
- rdiv = rdiv + 1;
odiv = FIELD_GET(PLL_ODIV_MASK, pll_div);
- switch (odiv) {
- case 0:
- odiv = 2;
- break;
- case 1:
- odiv = 3;
- break;
- default:
- break;
- }
/*
* Sometimes, the recalculated rate has deviation due to
@@ -160,6 +152,20 @@ static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned lon
if (rate)
return (unsigned long)rate;
+ if (!rdiv)
+ rdiv = rdiv + 1;
+
+ switch (odiv) {
+ case 0:
+ odiv = 2;
+ break;
+ case 1:
+ odiv = 3;
+ break;
+ default:
+ break;
+ }
+
/* Fvco = Fref * (MFI + MFN / MFD) */
fvco = fvco * mfi * mfd + fvco * mfn;
do_div(fvco, mfd * rdiv * odiv);
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index edcc87661d1f..f5c9fa40491c 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -150,7 +150,7 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_A55_GATE, "a55", "a55_root", 0x8000, },
/* M33 critical clk for system run */
{ IMX93_CLK_CM33_GATE, "cm33", "m33_root", 0x8040, CLK_IS_CRITICAL },
- { IMX93_CLK_ADC1_GATE, "adc1", "osc_24m", 0x82c0, },
+ { IMX93_CLK_ADC1_GATE, "adc1", "adc_root", 0x82c0, },
{ IMX93_CLK_WDOG1_GATE, "wdog1", "osc_24m", 0x8300, },
{ IMX93_CLK_WDOG2_GATE, "wdog2", "osc_24m", 0x8340, },
{ IMX93_CLK_WDOG3_GATE, "wdog3", "osc_24m", 0x8380, },
@@ -160,7 +160,7 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_SEMA2_GATE, "sema2", "bus_wakeup_root", 0x8480, },
{ IMX93_CLK_MU_A_GATE, "mu_a", "bus_aon_root", 0x84c0, },
{ IMX93_CLK_MU_B_GATE, "mu_b", "bus_aon_root", 0x8500, },
- { IMX93_CLK_EDMA1_GATE, "edma1", "wakeup_axi_root", 0x8540, },
+ { IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
{ IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
{ IMX93_CLK_FLEXSPI1_GATE, "flexspi", "flexspi_root", 0x8640, },
{ IMX93_CLK_GPIO1_GATE, "gpio1", "m33_root", 0x8880, },
@@ -219,7 +219,7 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_LCDIF_GATE, "lcdif", "media_apb_root", 0x9640, },
{ IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, },
{ IMX93_CLK_ISI_GATE, "isi", "media_apb_root", 0x96c0, },
- { IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_apb_root", 0x9700, },
+ { IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_axi_root", 0x9700, },
{ IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root", 0x9a00, },
{ IMX93_CLK_USB_TEST_60M_GATE, "usb_test_60m", "hsio_usb_test_60m_root", 0x9a40, },
{ IMX93_CLK_HSIO_TROUT_24M_GATE, "hsio_trout_24m", "osc_24m", 0x9a80, },
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
index 47c2289f3d1d..edf1e2ed2b59 100644
--- a/drivers/clk/mediatek/clk-mt2701-eth.c
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -36,6 +36,14 @@ static const struct mtk_gate eth_clks[] = {
GATE_ETH(CLK_ETHSYS_CRYPTO, "crypto_clk", "ethif_sel", 29),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static const struct of_device_id of_match_clk_mt2701_eth[] = {
{ .compatible = "mediatek,mt2701-ethsys", },
{}
@@ -58,7 +66,7 @@ static int clk_mt2701_eth_probe(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
index 79929ed37f83..1458109d99d9 100644
--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -35,6 +35,14 @@ static const struct mtk_gate g3d_clks[] = {
GATE_G3D(CLK_G3DSYS_CORE, "g3d_core", "mfg_sel", 0),
};
+static u16 rst_ofs[] = { 0xc, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -52,7 +60,7 @@ static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0xc);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
index 1aa36cb93ad0..434cbbe8c037 100644
--- a/drivers/clk/mediatek/clk-mt2701-hif.c
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -33,6 +33,14 @@ static const struct mtk_gate hif_clks[] = {
GATE_HIF(CLK_HIFSYS_PCIE2, "pcie2_clk", "ethpll_500m_ck", 26),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static const struct of_device_id of_match_clk_mt2701_hif[] = {
{ .compatible = "mediatek,mt2701-hifsys", },
{}
@@ -57,7 +65,7 @@ static int clk_mt2701_hif_probe(struct platform_device *pdev)
return r;
}
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return 0;
}
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 04ba356db2d7..9b442af37e67 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -735,6 +735,24 @@ static const struct mtk_fixed_factor infra_fixed_divs[] = {
FACTOR(CLK_INFRA_CLK_13M, "clk13m", "clk26m", 1, 2),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ },
+};
+
static struct clk_hw_onecell_data *infra_clk_data;
static void __init mtk_infrasys_init_early(struct device_node *node)
@@ -787,7 +805,7 @@ static int mtk_infrasys_init(struct platform_device *pdev)
if (r)
return r;
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
return 0;
}
@@ -910,7 +928,7 @@ static int mtk_pericfg_init(struct platform_device *pdev)
if (r)
return r;
- mtk_register_reset_controller(node, 2, 0x0);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
return 0;
}
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 410b059727ea..56980dd6c2ea 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -1258,6 +1258,24 @@ static const struct mtk_pll_data plls[] = {
0, 31, 0x0300, 4, 0, 0, 0, 0x0304, 0),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infra */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* peri */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ },
+};
+
static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -1361,7 +1379,7 @@ static int clk_mt2712_infra_probe(struct platform_device *pdev)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
return r;
}
@@ -1383,7 +1401,7 @@ static int clk_mt2712_peri_probe(struct platform_device *pdev)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
index b12d48705496..43de0477d5d9 100644
--- a/drivers/clk/mediatek/clk-mt7622-eth.c
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -65,6 +65,14 @@ static const struct mtk_gate sgmii_clks[] = {
"ssusb_cdr_fb", 5),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7622_ethsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -82,7 +90,7 @@ static int clk_mt7622_ethsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
index 58728e35e80a..67e96231dd25 100644
--- a/drivers/clk/mediatek/clk-mt7622-hif.c
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -76,6 +76,14 @@ static const struct mtk_gate pcie_clks[] = {
GATE_PCIE(CLK_SATA_PM_EN, "sata_pm_en", "univpll2_d4", 30),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -93,7 +101,7 @@ static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
@@ -115,7 +123,7 @@ static int clk_mt7622_pciesys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index e4a5e5230861..3b55f8641fae 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -610,6 +610,24 @@ static struct mtk_composite peri_muxes[] = {
MUX(CLK_PERIBUS_SEL, "peribus_ck_sel", peribus_ck_parents, 0x05C, 0, 1),
};
+static u16 infrasys_rst_ofs[] = { 0x30, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ },
+};
+
static int mtk_topckgen_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -663,7 +681,7 @@ static int mtk_infrasys_init(struct platform_device *pdev)
if (r)
return r;
- mtk_register_reset_controller(node, 1, 0x30);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[0]);
return 0;
}
@@ -714,7 +732,7 @@ static int mtk_pericfg_init(struct platform_device *pdev)
clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk);
- mtk_register_reset_controller(node, 2, 0x0);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
return 0;
}
diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
index c49fd732c9b2..282dd6559465 100644
--- a/drivers/clk/mediatek/clk-mt7629-eth.c
+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
@@ -76,6 +76,14 @@ static const struct mtk_gate sgmii_clks[2][4] = {
}
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7629_ethsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -92,7 +100,7 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
index acaa97fda331..0c8b9e139789 100644
--- a/drivers/clk/mediatek/clk-mt7629-hif.c
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -71,6 +71,14 @@ static const struct mtk_gate pcie_clks[] = {
GATE_PCIE(CLK_PCIE_P0_PIPE_EN, "pcie_p0_pipe_en", "pcie0_pipe_en", 23),
};
+static u16 rst_ofs[] = { 0x34, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(rst_ofs),
+};
+
static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -88,7 +96,7 @@ static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
@@ -110,7 +118,7 @@ static int clk_mt7629_pciesys_init(struct platform_device *pdev)
"could not register clock provider: %s: %d\n",
pdev->name, r);
- mtk_register_reset_controller(node, 1, 0x34);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index 9ef524b44862..b68888a034c4 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -514,6 +514,24 @@ static const struct mtk_composite peri_clks[] __initconst = {
MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ }
+};
+
static void __init mtk_topckgen_init(struct device_node *node)
{
struct clk_hw_onecell_data *clk_data;
@@ -559,7 +577,7 @@ static void __init mtk_infrasys_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller(node, &clk_rst_desc[0]);
}
CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8135-infracfg", mtk_infrasys_init);
@@ -587,7 +605,7 @@ static void __init mtk_pericfg_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0);
+ mtk_register_reset_controller(node, &clk_rst_desc[1]);
}
CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8135-pericfg", mtk_pericfg_init);
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 0929db330852..b8529ee7199d 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -819,6 +819,24 @@ static const struct mtk_gate venclt_clks[] __initconst = {
GATE_VENCLT(CLK_VENCLT_CKE1, "venclt_cke1", "venclt_sel", 4),
};
+static u16 infrasys_rst_ofs[] = { 0x30, 0x34, };
+static u16 pericfg_rst_ofs[] = { 0x0, 0x4, };
+
+static const struct mtk_clk_rst_desc clk_rst_desc[] = {
+ /* infrasys */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infrasys_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infrasys_rst_ofs),
+ },
+ /* pericfg */
+ {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_ofs),
+ }
+};
+
static struct clk_hw_onecell_data *mt8173_top_clk_data __initdata;
static struct clk_hw_onecell_data *mt8173_pll_clk_data __initdata;
@@ -882,7 +900,7 @@ static void __init mtk_infrasys_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0x30);
+ mtk_register_reset_controller(node, &clk_rst_desc[0]);
}
CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8173-infracfg", mtk_infrasys_init);
@@ -910,7 +928,7 @@ static void __init mtk_pericfg_init(struct device_node *node)
pr_err("%s(): could not register clock provider: %d\n",
__func__, r);
- mtk_register_reset_controller(node, 2, 0);
+ mtk_register_reset_controller(node, &clk_rst_desc[1]);
}
CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index b5c17988c337..8512101e1189 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -18,9 +18,6 @@
#include <dt-bindings/clock/mt8183-clk.h>
-/* Infra global controller reset set register */
-#define INFRA_RST0_SET_OFFSET 0x120
-
static DEFINE_SPINLOCK(mt8183_clk_lock);
static const struct mtk_fixed_clk top_fixed_clks[] = {
@@ -1153,6 +1150,19 @@ static const struct mtk_pll_data plls[] = {
0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4),
};
+static u16 infra_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+};
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_rst_ofs),
+};
+
static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
@@ -1240,7 +1250,7 @@ static int clk_mt8183_infra_probe(struct platform_device *pdev)
return r;
}
- mtk_register_reset_controller_set_clr(node, 4, INFRA_RST0_SET_OFFSET);
+ mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mt8186-infra_ao.c b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
index 2a7adc25abaa..df2a6bd1aefa 100644
--- a/drivers/clk/mediatek/clk-mt8186-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt8186-clk.h>
+#include <dt-bindings/reset/mt8186-resets.h>
#include "clk-gate.h"
#include "clk-mtk.h"
@@ -191,9 +192,31 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO3(CLK_INFRA_AO_FLASHIF_66M, "infra_ao_flashif_66m", "top_axi", 29),
};
+static u16 infra_ao_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+ INFRA_RST4_SET_OFFSET,
+};
+
+static u16 infra_ao_idx_map[] = {
+ [MT8186_INFRA_THERMAL_CTRL_RST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8186_INFRA_PTP_CTRL_RST] = 1 * RST_NR_PER_BANK + 0,
+};
+
+static struct mtk_clk_rst_desc infra_ao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
static const struct mtk_clk_desc infra_ao_desc = {
.clks = infra_ao_clks,
.num_clks = ARRAY_SIZE(infra_ao_clks),
+ .rst_desc = &infra_ao_rst_desc,
};
static const struct of_device_id of_match_clk_mt8186_infra_ao[] = {
diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c
index 87c3b79b79cf..635f7a0b629a 100644
--- a/drivers/clk/mediatek/clk-mt8192-msdc.c
+++ b/drivers/clk/mediatek/clk-mt8192-msdc.c
@@ -12,28 +12,15 @@
#include <dt-bindings/clock/mt8192-clk.h>
-static const struct mtk_gate_regs msdc_cg_regs = {
- .set_ofs = 0xb4,
- .clr_ofs = 0xb4,
- .sta_ofs = 0xb4,
-};
-
static const struct mtk_gate_regs msdc_top_cg_regs = {
.set_ofs = 0x0,
.clr_ofs = 0x0,
.sta_ofs = 0x0,
};
-#define GATE_MSDC(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &msdc_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-
#define GATE_MSDC_TOP(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &msdc_top_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
-static const struct mtk_gate msdc_clks[] = {
- GATE_MSDC(CLK_MSDC_AXI_WRAP, "msdc_axi_wrap", "axi_sel", 22),
-};
-
static const struct mtk_gate msdc_top_clks[] = {
GATE_MSDC_TOP(CLK_MSDC_TOP_AES_0P, "msdc_top_aes_0p", "aes_msdcfde_sel", 0),
GATE_MSDC_TOP(CLK_MSDC_TOP_SRC_0P, "msdc_top_src_0p", "infra_msdc0_src", 1),
@@ -52,11 +39,6 @@ static const struct mtk_gate msdc_top_clks[] = {
GATE_MSDC_TOP(CLK_MSDC_TOP_AHB2AXI_BRG_AXI, "msdc_top_ahb2axi_brg_axi", "axi_sel", 14),
};
-static const struct mtk_clk_desc msdc_desc = {
- .clks = msdc_clks,
- .num_clks = ARRAY_SIZE(msdc_clks),
-};
-
static const struct mtk_clk_desc msdc_top_desc = {
.clks = msdc_top_clks,
.num_clks = ARRAY_SIZE(msdc_top_clks),
@@ -64,9 +46,6 @@ static const struct mtk_clk_desc msdc_top_desc = {
static const struct of_device_id of_match_clk_mt8192_msdc[] = {
{
- .compatible = "mediatek,mt8192-msdc",
- .data = &msdc_desc,
- }, {
.compatible = "mediatek,mt8192-msdc_top",
.data = &msdc_top_desc,
}, {
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index dda211b7a745..ebbd2798d9a3 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -18,6 +18,7 @@
#include "clk-pll.h"
#include <dt-bindings/clock/mt8192-clk.h>
+#include <dt-bindings/reset/mt8192-resets.h>
static DEFINE_SPINLOCK(mt8192_clk_lock);
@@ -1114,6 +1115,30 @@ static const struct mtk_gate top_clks[] = {
GATE_TOP(CLK_TOP_SSUSB_PHY_REF, "ssusb_phy_ref", "clk26m", 25),
};
+static u16 infra_ao_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+ INFRA_RST4_SET_OFFSET,
+};
+
+static u16 infra_ao_idx_map[] = {
+ [MT8192_INFRA_RST0_THERM_CTRL_SWRST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8192_INFRA_RST2_PEXTP_PHY_SWRST] = 2 * RST_NR_PER_BANK + 15,
+ [MT8192_INFRA_RST3_THERM_CTRL_PTP_SWRST] = 3 * RST_NR_PER_BANK + 5,
+ [MT8192_INFRA_RST4_PCIE_TOP_SWRST] = 4 * RST_NR_PER_BANK + 1,
+ [MT8192_INFRA_RST4_THERM_CTRL_MCU_SWRST] = 4 * RST_NR_PER_BANK + 12,
+};
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
#define MT8192_PLL_FMAX (3800UL * MHZ)
#define MT8192_PLL_FMIN (1500UL * MHZ)
#define MT8192_INTEGER_BITS 8
@@ -1240,6 +1265,10 @@ static int clk_mt8192_infra_probe(struct platform_device *pdev)
if (r)
goto free_clk_data;
+ r = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+ if (r)
+ goto free_clk_data;
+
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
goto free_clk_data;
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index 8ebe3b9415c4..97657f255618 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -7,6 +7,7 @@
#include "clk-mtk.h"
#include <dt-bindings/clock/mt8195-clk.h>
+#include <dt-bindings/reset/mt8195-resets.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
@@ -182,9 +183,32 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO4(CLK_INFRA_AO_PERI_UFS_MEM_SUB, "infra_ao_peri_ufs_mem_sub", "mem_466m", 31),
};
+static u16 infra_ao_rst_ofs[] = {
+ INFRA_RST0_SET_OFFSET,
+ INFRA_RST1_SET_OFFSET,
+ INFRA_RST2_SET_OFFSET,
+ INFRA_RST3_SET_OFFSET,
+ INFRA_RST4_SET_OFFSET,
+};
+
+static u16 infra_ao_idx_map[] = {
+ [MT8195_INFRA_RST0_THERM_CTRL_SWRST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST] = 3 * RST_NR_PER_BANK + 5,
+ [MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST] = 4 * RST_NR_PER_BANK + 10,
+};
+
+static struct mtk_clk_rst_desc infra_ao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
static const struct mtk_clk_desc infra_ao_desc = {
.clks = infra_ao_clks,
.num_clks = ARRAY_SIZE(infra_ao_clks),
+ .rst_desc = &infra_ao_rst_desc,
};
static const struct of_device_id of_match_clk_mt8195_infra_ao[] = {
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index b9188000ab3c..05a188c62119 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -444,6 +444,13 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, clk_data);
+ if (mcd->rst_desc) {
+ r = mtk_register_reset_controller_with_dev(&pdev->dev,
+ mcd->rst_desc);
+ if (r)
+ goto unregister_clks;
+ }
+
return r;
unregister_clks:
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index adb1304d35d4..1b95c484d5aa 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -13,6 +13,8 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+#include "reset.h"
+
#define MAX_MUX_GATE_BIT 31
#define INVALID_MUX_GATE_BIT (MAX_MUX_GATE_BIT + 1)
@@ -187,15 +189,10 @@ void mtk_free_clk_data(struct clk_hw_onecell_data *clk_data);
struct clk_hw *mtk_clk_register_ref2usb_tx(const char *name,
const char *parent_name, void __iomem *reg);
-void mtk_register_reset_controller(struct device_node *np,
- unsigned int num_regs, int regofs);
-
-void mtk_register_reset_controller_set_clr(struct device_node *np,
- unsigned int num_regs, int regofs);
-
struct mtk_clk_desc {
const struct mtk_gate *clks;
size_t num_clks;
+ const struct mtk_clk_rst_desc *rst_desc;
};
int mtk_clk_simple_probe(struct platform_device *pdev);
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index bcec4b89f449..179505549a7c 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -8,55 +8,39 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/reset-controller.h>
#include <linux/slab.h>
-#include "clk-mtk.h"
+#include "reset.h"
-struct mtk_reset {
- struct regmap *regmap;
- int regofs;
- struct reset_controller_dev rcdev;
-};
-
-static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev,
- unsigned long id)
+static inline struct mtk_clk_rst_data *to_mtk_clk_rst_data(struct reset_controller_dev *rcdev)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
- unsigned int reg = data->regofs + ((id / 32) << 4);
-
- return regmap_write(data->regmap, reg, 1);
+ return container_of(rcdev, struct mtk_clk_rst_data, rcdev);
}
-static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int mtk_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool deassert)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
- unsigned int reg = data->regofs + ((id / 32) << 4) + 0x4;
+ struct mtk_clk_rst_data *data = to_mtk_clk_rst_data(rcdev);
+ unsigned int val = deassert ? 0 : ~0;
- return regmap_write(data->regmap, reg, 1);
+ return regmap_update_bits(data->regmap,
+ data->desc->rst_bank_ofs[id / RST_NR_PER_BANK],
+ BIT(id % RST_NR_PER_BANK), val);
}
static int mtk_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+ unsigned long id)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
-
- return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2),
- BIT(id % 32), ~0);
+ return mtk_reset_update(rcdev, id, false);
}
static int mtk_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
+ unsigned long id)
{
- struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
-
- return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2),
- BIT(id % 32), 0);
+ return mtk_reset_update(rcdev, id, true);
}
-static int mtk_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int mtk_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
int ret;
@@ -67,8 +51,32 @@ static int mtk_reset(struct reset_controller_dev *rcdev,
return mtk_reset_deassert(rcdev, id);
}
+static int mtk_reset_update_set_clr(struct reset_controller_dev *rcdev,
+ unsigned long id, bool deassert)
+{
+ struct mtk_clk_rst_data *data = to_mtk_clk_rst_data(rcdev);
+ unsigned int deassert_ofs = deassert ? 0x4 : 0;
+
+ return regmap_write(data->regmap,
+ data->desc->rst_bank_ofs[id / RST_NR_PER_BANK] +
+ deassert_ofs,
+ BIT(id % RST_NR_PER_BANK));
+}
+
+static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return mtk_reset_update_set_clr(rcdev, id, false);
+}
+
+static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return mtk_reset_update_set_clr(rcdev, id, true);
+}
+
static int mtk_reset_set_clr(struct reset_controller_dev *rcdev,
- unsigned long id)
+ unsigned long id)
{
int ret;
@@ -90,51 +98,135 @@ static const struct reset_control_ops mtk_reset_ops_set_clr = {
.reset = mtk_reset_set_clr,
};
-static void mtk_register_reset_controller_common(struct device_node *np,
- unsigned int num_regs, int regofs,
- const struct reset_control_ops *reset_ops)
+static int reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct mtk_clk_rst_data *data = to_mtk_clk_rst_data(rcdev);
+
+ if (reset_spec->args[0] >= rcdev->nr_resets ||
+ reset_spec->args[0] >= data->desc->rst_idx_map_nr)
+ return -EINVAL;
+
+ return data->desc->rst_idx_map[reset_spec->args[0]];
+}
+
+int mtk_register_reset_controller(struct device_node *np,
+ const struct mtk_clk_rst_desc *desc)
{
- struct mtk_reset *data;
- int ret;
struct regmap *regmap;
+ const struct reset_control_ops *rcops = NULL;
+ struct mtk_clk_rst_data *data;
+ int ret;
+
+ if (!desc) {
+ pr_err("mtk clock reset desc is NULL\n");
+ return -EINVAL;
+ }
+
+ switch (desc->version) {
+ case MTK_RST_SIMPLE:
+ rcops = &mtk_reset_ops;
+ break;
+ case MTK_RST_SET_CLR:
+ rcops = &mtk_reset_ops_set_clr;
+ break;
+ default:
+ pr_err("Unknown reset version %d\n", desc->version);
+ return -EINVAL;
+ }
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap)) {
pr_err("Cannot find regmap for %pOF: %pe\n", np, regmap);
- return;
+ return -EINVAL;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- return;
+ return -ENOMEM;
+ data->desc = desc;
data->regmap = regmap;
- data->regofs = regofs;
data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = num_regs * 32;
- data->rcdev.ops = reset_ops;
+ data->rcdev.ops = rcops;
data->rcdev.of_node = np;
+ if (data->desc->rst_idx_map_nr > 0) {
+ data->rcdev.of_reset_n_cells = 1;
+ data->rcdev.nr_resets = desc->rst_idx_map_nr;
+ data->rcdev.of_xlate = reset_xlate;
+ } else {
+ data->rcdev.nr_resets = desc->rst_bank_nr * RST_NR_PER_BANK;
+ }
+
ret = reset_controller_register(&data->rcdev);
if (ret) {
pr_err("could not register reset controller: %d\n", ret);
kfree(data);
- return;
+ return ret;
}
-}
-void mtk_register_reset_controller(struct device_node *np,
- unsigned int num_regs, int regofs)
-{
- mtk_register_reset_controller_common(np, num_regs, regofs,
- &mtk_reset_ops);
+ return 0;
}
-void mtk_register_reset_controller_set_clr(struct device_node *np,
- unsigned int num_regs, int regofs)
+int mtk_register_reset_controller_with_dev(struct device *dev,
+ const struct mtk_clk_rst_desc *desc)
{
- mtk_register_reset_controller_common(np, num_regs, regofs,
- &mtk_reset_ops_set_clr);
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ const struct reset_control_ops *rcops = NULL;
+ struct mtk_clk_rst_data *data;
+ int ret;
+
+ if (!desc) {
+ dev_err(dev, "mtk clock reset desc is NULL\n");
+ return -EINVAL;
+ }
+
+ switch (desc->version) {
+ case MTK_RST_SIMPLE:
+ rcops = &mtk_reset_ops;
+ break;
+ case MTK_RST_SET_CLR:
+ rcops = &mtk_reset_ops_set_clr;
+ break;
+ default:
+ dev_err(dev, "Unknown reset version %d\n", desc->version);
+ return -EINVAL;
+ }
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Cannot find regmap %pe\n", regmap);
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->desc = desc;
+ data->regmap = regmap;
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.ops = rcops;
+ data->rcdev.of_node = np;
+ data->rcdev.dev = dev;
+
+ if (data->desc->rst_idx_map_nr > 0) {
+ data->rcdev.of_reset_n_cells = 1;
+ data->rcdev.nr_resets = desc->rst_idx_map_nr;
+ data->rcdev.of_xlate = reset_xlate;
+ } else {
+ data->rcdev.nr_resets = desc->rst_bank_nr * RST_NR_PER_BANK;
+ }
+
+ ret = devm_reset_controller_register(dev, &data->rcdev);
+ if (ret) {
+ dev_err(dev, "could not register reset controller: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/reset.h b/drivers/clk/mediatek/reset.h
new file mode 100644
index 000000000000..6a58a3d59165
--- /dev/null
+++ b/drivers/clk/mediatek/reset.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __DRV_CLK_MTK_RESET_H
+#define __DRV_CLK_MTK_RESET_H
+
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+#define RST_NR_PER_BANK 32
+
+/* Infra global controller reset set register */
+#define INFRA_RST0_SET_OFFSET 0x120
+#define INFRA_RST1_SET_OFFSET 0x130
+#define INFRA_RST2_SET_OFFSET 0x140
+#define INFRA_RST3_SET_OFFSET 0x150
+#define INFRA_RST4_SET_OFFSET 0x730
+
+/**
+ * enum mtk_reset_version - Version of MediaTek clock reset controller.
+ * @MTK_RST_SIMPLE: Use the same registers for bit set and clear.
+ * @MTK_RST_SET_CLR: Use separate registers for bit set and clear.
+ * @MTK_RST_MAX: Total quantity of version for MediaTek clock reset controller.
+ */
+enum mtk_reset_version {
+ MTK_RST_SIMPLE = 0,
+ MTK_RST_SET_CLR,
+ MTK_RST_MAX,
+};
+
+/**
+ * struct mtk_clk_rst_desc - Description of MediaTek clock reset.
+ * @version: Reset version which is defined in enum mtk_reset_version.
+ * @rst_bank_ofs: Pointer to an array containing base offsets of the reset register.
+ * @rst_bank_nr: Quantity of reset bank.
+ * @rst_idx_map:Pointer to an array containing ids if input argument is index.
+ * This array is not necessary if our input argument does not mean index.
+ * @rst_idx_map_nr: Quantity of reset index map.
+ */
+struct mtk_clk_rst_desc {
+ enum mtk_reset_version version;
+ u16 *rst_bank_ofs;
+ u32 rst_bank_nr;
+ u16 *rst_idx_map;
+ u32 rst_idx_map_nr;
+};
+
+/**
+ * struct mtk_clk_rst_data - Data of MediaTek clock reset controller.
+ * @regmap: Pointer to base address of reset register address.
+ * @rcdev: Reset controller device.
+ * @desc: Pointer to description of the reset controller.
+ */
+struct mtk_clk_rst_data {
+ struct regmap *regmap;
+ struct reset_controller_dev rcdev;
+ const struct mtk_clk_rst_desc *desc;
+};
+
+/**
+ * mtk_register_reset_controller - Register MediaTek clock reset controller
+ * @np: Pointer to device node.
+ * @desc: Constant pointer to description of clock reset.
+ *
+ * Return: 0 on success and errorno otherwise.
+ */
+int mtk_register_reset_controller(struct device_node *np,
+ const struct mtk_clk_rst_desc *desc);
+
+/**
+ * mtk_register_reset_controller - Register mediatek clock reset controller with device
+ * @np: Pointer to device.
+ * @desc: Constant pointer to description of clock reset.
+ *
+ * Return: 0 on success and errorno otherwise.
+ */
+int mtk_register_reset_controller_with_dev(struct device *dev,
+ const struct mtk_clk_rst_desc *desc);
+
+#endif /* __DRV_CLK_MTK_RESET_H */
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index bfe36bd41339..5016682e47c8 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -1657,35 +1657,6 @@ static struct clk_regmap *const sm1_clk_regmaps[] = {
&sm1_sysclk_b_en,
};
-static int devm_clk_get_enable(struct device *dev, char *id)
-{
- struct clk *clk;
- int ret;
-
- clk = devm_clk_get(dev, id);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err_probe(dev, ret, "failed to get %s", id);
- return ret;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to enable %s", id);
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
- if (ret) {
- dev_err(dev, "failed to add reset action on %s", id);
- return ret;
- }
-
- return 0;
-}
-
struct axg_audio_reset_data {
struct reset_controller_dev rstc;
struct regmap *map;
@@ -1787,6 +1758,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
struct regmap *map;
void __iomem *regs;
struct clk_hw *hw;
+ struct clk *clk;
int ret, i;
data = of_device_get_match_data(dev);
@@ -1804,9 +1776,9 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
/* Get the mandatory peripheral clock */
- ret = devm_clk_get_enable(dev, "pclk");
- if (ret)
- return ret;
+ clk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
ret = device_reset(dev);
if (ret) {
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index bc4dcf356d82..1cf1ef70e347 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -166,6 +166,7 @@ config IPQ_LCC_806X
config IPQ_GCC_8074
tristate "IPQ8074 Global Clock Controller"
+ select QCOM_GDSC
help
Support for global clock controller on ipq8074 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -608,6 +609,13 @@ config SM_CAMCC_8250
Support for the camera clock controller on SM8250 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_CAMCC_8450
+ tristate "SM8450 Camera Clock Controller"
+ select SM_GCC_8450
+ help
+ Support for the camera clock controller on SM8450 devices.
+ Say Y if you want to support camera devices and camera functionality.
+
config SM_DISPCC_6125
tristate "SM6125 Display Clock Controller"
depends on SM_GCC_6125
@@ -618,11 +626,11 @@ config SM_DISPCC_6125
splash screen
config SM_DISPCC_8250
- tristate "SM8150 and SM8250 Display Clock Controller"
- depends on SM_GCC_8150 || SM_GCC_8250
+ tristate "SM8150/SM8250/SM8350 Display Clock Controller"
+ depends on SM_GCC_8150 || SM_GCC_8250 || SM_GCC_8350
help
Support for the display clock controller on Qualcomm Technologies, Inc
- SM8150 and SM8250 devices.
+ SM8150/SM8250/SM8350 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
@@ -712,6 +720,14 @@ config SM_GPUCC_8250
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_GPUCC_8350
+ tristate "SM8350 Graphics Clock Controller"
+ select SM_GCC_8350
+ help
+ Support for the graphics clock controller on SM8350 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_VIDEOCC_8150
tristate "SM8150 Video Clock Controller"
select SM_GCC_8150
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 36789f5233ef..fbcf04073f07 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -11,6 +11,7 @@ clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-y += clk-regmap-phy-mux.o
clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
+obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
@@ -101,6 +103,7 @@ obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
+obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index be3f95326965..27d44188a7ab 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1534,6 +1534,8 @@ static struct clk_branch cam_cc_sys_tmr_clk = {
},
};
+static struct gdsc titan_top_gdsc;
+
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
.pd = {
@@ -1567,6 +1569,7 @@ static struct gdsc ife_0_gdsc = {
.name = "ife_0_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1576,6 +1579,7 @@ static struct gdsc ife_1_gdsc = {
.name = "ife_1_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c
index 439eaafdcc86..9b32c56a5bc5 100644
--- a/drivers/clk/qcom/camcc-sm8250.c
+++ b/drivers/clk/qcom/camcc-sm8250.c
@@ -2205,6 +2205,8 @@ static struct clk_branch cam_cc_sleep_clk = {
},
};
+static struct gdsc titan_top_gdsc;
+
static struct gdsc bps_gdsc = {
.gdscr = 0x7004,
.pd = {
@@ -2238,6 +2240,7 @@ static struct gdsc ife_0_gdsc = {
.name = "ife_0_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2247,6 +2250,7 @@ static struct gdsc ife_1_gdsc = {
.name = "ife_1_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2440,17 +2444,7 @@ static struct platform_driver cam_cc_sm8250_driver = {
},
};
-static int __init cam_cc_sm8250_init(void)
-{
- return platform_driver_register(&cam_cc_sm8250_driver);
-}
-subsys_initcall(cam_cc_sm8250_init);
-
-static void __exit cam_cc_sm8250_exit(void)
-{
- platform_driver_unregister(&cam_cc_sm8250_driver);
-}
-module_exit(cam_cc_sm8250_exit);
+module_platform_driver(cam_cc_sm8250_driver);
MODULE_DESCRIPTION("QTI CAMCC SM8250 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c
new file mode 100644
index 000000000000..e3c09471dadf
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sm8450.c
@@ -0,0 +1,2856 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8450-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_CAM_CC_PLL7_OUT_EVEN,
+ P_CAM_CC_PLL8_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct pll_vco rivian_evo_vco[] = {
+ { 864000000, 1056000000, 0 },
+};
+
+static const struct clk_parent_data pll_parent_data_tcxo = { .index = DT_BI_TCXO };
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x25,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x90008820,
+ .config_ctl_hi_val = 0x00890263,
+ .config_ctl_hi1_val = 0x00000217,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = rivian_evo_vco,
+ .num_vco = ARRAY_SIZE(rivian_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x5000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x5000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x6000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll7_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll7 = {
+ .offset = 0x7000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll7",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll7_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll7_out_even = {
+ .offset = 0x7000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll7_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll7_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll7_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll7.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll8_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll8 = {
+ .offset = 0x8000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll8",
+ .parent_data = &pll_parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll8_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll8_out_even = {
+ .offset = 0x8000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll8_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll8_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll8_out_even",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_pll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL8_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll8_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL7_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll7_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_8[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_9_ao[] = {
+ { .index = DT_BI_TCXO_AO, .name = "bi_tcxo_ao" },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x10050,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0x13194,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x1312c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x13148,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x1104c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x150e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x15104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x15124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x1514c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
+ .cmd_rcgr = 0x1516c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi4phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
+ .cmd_rcgr = 0x1518c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi5phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csid_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csid_clk_src = {
+ .cmd_rcgr = 0x13174,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x10018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0x13108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x11018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0x12018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_2_clk_src[] = {
+ F(432000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_2_clk_src = {
+ .cmd_rcgr = 0x12064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL8_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0x13000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0x13024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_nps_clk_src[] = {
+ F(364000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(500000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_nps_clk_src = {
+ .cmd_rcgr = 0x1008c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ipe_nps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0x130dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(68571429, P_CAM_CC_PLL2_OUT_MAIN, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x15000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x1501c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x15038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x15054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x15070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ .cmd_rcgr = 0x1508c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk6_clk_src = {
+ .cmd_rcgr = 0x150a8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk7_clk_src = {
+ .cmd_rcgr = 0x150c4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_qdss_debug_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_qdss_debug_clk_src = {
+ .cmd_rcgr = 0x131bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_qdss_debug_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sfe_0_clk_src[] = {
+ F(432000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sfe_0_clk_src = {
+ .cmd_rcgr = 0x13064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_sfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sfe_1_clk_src[] = {
+ F(432000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(594000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(675000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(727000000, P_CAM_CC_PLL7_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sfe_1_clk_src = {
+ .cmd_rcgr = 0x130ac,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_sfe_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x13210,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_8,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x10034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0x131f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_9,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_9_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_9_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_gdsc_clk = {
+ .halt_reg = 0x1320c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1320c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_gdsc_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x1004c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1004c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x10068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_fast_ahb_clk = {
+ .halt_reg = 0x10030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0x131ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0x131b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x13144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x13160,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13160,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x131f0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x131f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x13164,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_bps_clk = {
+ .halt_reg = 0x10070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_bps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_fast_ahb_clk = {
+ .halt_reg = 0x1316c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1316c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_0_clk = {
+ .halt_reg = 0x11038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_1_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_2_clk = {
+ .halt_reg = 0x12084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_lite_clk = {
+ .halt_reg = 0x13020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_lite_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ipe_nps_clk = {
+ .halt_reg = 0x100ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ipe_nps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sbi_clk = {
+ .halt_reg = 0x100ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sbi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_0_clk = {
+ .halt_reg = 0x13084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_1_clk = {
+ .halt_reg = 0x130cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x150f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x1511c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1511c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x1513c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1513c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x15164,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi4phytimer_clk = {
+ .halt_reg = 0x15184,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15184,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi4phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi4phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi5phytimer_clk = {
+ .halt_reg = 0x151a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi5phytimer_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csi5phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_clk = {
+ .halt_reg = 0x1318c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1318c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_csiphy_rx_clk = {
+ .halt_reg = 0x15100,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15100,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_csiphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x150fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x15120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x15140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15140,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x15168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15168,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy4_clk = {
+ .halt_reg = 0x15188,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15188,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy4_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy5_clk = {
+ .halt_reg = 0x151a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x151a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy5_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0x13128,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13128,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0x13120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x11030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0x1103c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1103c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_fast_ahb_clk = {
+ .halt_reg = 0x11048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0x12030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_fast_ahb_clk = {
+ .halt_reg = 0x12048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_clk = {
+ .halt_reg = 0x1207c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1207c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_dsp_clk = {
+ .halt_reg = 0x12088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_dsp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_fast_ahb_clk = {
+ .halt_reg = 0x12094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_ahb_clk = {
+ .halt_reg = 0x13048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0x13018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0x13044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0x1303c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1303c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_ahb_clk = {
+ .halt_reg = 0x100c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_clk = {
+ .halt_reg = 0x100a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_nps_fast_ahb_clk = {
+ .halt_reg = 0x100c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_nps_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_pps_clk = {
+ .halt_reg = 0x100b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_pps_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ipe_nps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_pps_fast_ahb_clk = {
+ .halt_reg = 0x100c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_pps_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0x130f4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x15018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x15034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x15050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x1506c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1506c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x15088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk5_clk = {
+ .halt_reg = 0x150a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk6_clk = {
+ .halt_reg = 0x150c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk7_clk = {
+ .halt_reg = 0x150dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_mclk7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_qdss_debug_clk = {
+ .halt_reg = 0x131d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_qdss_debug_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_qdss_debug_xo_clk = {
+ .halt_reg = 0x131d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_xo_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sbi_ahb_clk = {
+ .halt_reg = 0x100f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sbi_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sbi_clk = {
+ .halt_reg = 0x100e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x100e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sbi_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_clk = {
+ .halt_reg = 0x1307c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1307c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
+ .halt_reg = 0x13090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_0_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_1_clk = {
+ .halt_reg = 0x130c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_1_fast_ahb_clk = {
+ .halt_reg = 0x130d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_1_fast_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sleep_clk = {
+ .halt_reg = 0x13228,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13228,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &cam_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *cam_cc_sm8450_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_BPS_FAST_AHB_CLK] = &cam_cc_bps_fast_ahb_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPAS_BPS_CLK] = &cam_cc_cpas_bps_clk.clkr,
+ [CAM_CC_CPAS_FAST_AHB_CLK] = &cam_cc_cpas_fast_ahb_clk.clkr,
+ [CAM_CC_CPAS_IFE_0_CLK] = &cam_cc_cpas_ife_0_clk.clkr,
+ [CAM_CC_CPAS_IFE_1_CLK] = &cam_cc_cpas_ife_1_clk.clkr,
+ [CAM_CC_CPAS_IFE_2_CLK] = &cam_cc_cpas_ife_2_clk.clkr,
+ [CAM_CC_CPAS_IFE_LITE_CLK] = &cam_cc_cpas_ife_lite_clk.clkr,
+ [CAM_CC_CPAS_IPE_NPS_CLK] = &cam_cc_cpas_ipe_nps_clk.clkr,
+ [CAM_CC_CPAS_SBI_CLK] = &cam_cc_cpas_sbi_clk.clkr,
+ [CAM_CC_CPAS_SFE_0_CLK] = &cam_cc_cpas_sfe_0_clk.clkr,
+ [CAM_CC_CPAS_SFE_1_CLK] = &cam_cc_cpas_sfe_1_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK] = &cam_cc_csi4phytimer_clk.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK_SRC] = &cam_cc_csi4phytimer_clk_src.clkr,
+ [CAM_CC_CSI5PHYTIMER_CLK] = &cam_cc_csi5phytimer_clk.clkr,
+ [CAM_CC_CSI5PHYTIMER_CLK_SRC] = &cam_cc_csi5phytimer_clk_src.clkr,
+ [CAM_CC_CSID_CLK] = &cam_cc_csid_clk.clkr,
+ [CAM_CC_CSID_CLK_SRC] = &cam_cc_csid_clk_src.clkr,
+ [CAM_CC_CSID_CSIPHY_RX_CLK] = &cam_cc_csid_csiphy_rx_clk.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr,
+ [CAM_CC_CSIPHY5_CLK] = &cam_cc_csiphy5_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_0_FAST_AHB_CLK] = &cam_cc_ife_0_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_1_FAST_AHB_CLK] = &cam_cc_ife_1_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_2_CLK] = &cam_cc_ife_2_clk.clkr,
+ [CAM_CC_IFE_2_CLK_SRC] = &cam_cc_ife_2_clk_src.clkr,
+ [CAM_CC_IFE_2_DSP_CLK] = &cam_cc_ife_2_dsp_clk.clkr,
+ [CAM_CC_IFE_2_FAST_AHB_CLK] = &cam_cc_ife_2_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_AHB_CLK] = &cam_cc_ife_lite_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_NPS_AHB_CLK] = &cam_cc_ipe_nps_ahb_clk.clkr,
+ [CAM_CC_IPE_NPS_CLK] = &cam_cc_ipe_nps_clk.clkr,
+ [CAM_CC_IPE_NPS_CLK_SRC] = &cam_cc_ipe_nps_clk_src.clkr,
+ [CAM_CC_IPE_NPS_FAST_AHB_CLK] = &cam_cc_ipe_nps_fast_ahb_clk.clkr,
+ [CAM_CC_IPE_PPS_CLK] = &cam_cc_ipe_pps_clk.clkr,
+ [CAM_CC_IPE_PPS_FAST_AHB_CLK] = &cam_cc_ipe_pps_fast_ahb_clk.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr,
+ [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr,
+ [CAM_CC_MCLK6_CLK] = &cam_cc_mclk6_clk.clkr,
+ [CAM_CC_MCLK6_CLK_SRC] = &cam_cc_mclk6_clk_src.clkr,
+ [CAM_CC_MCLK7_CLK] = &cam_cc_mclk7_clk.clkr,
+ [CAM_CC_MCLK7_CLK_SRC] = &cam_cc_mclk7_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr,
+ [CAM_CC_PLL7] = &cam_cc_pll7.clkr,
+ [CAM_CC_PLL7_OUT_EVEN] = &cam_cc_pll7_out_even.clkr,
+ [CAM_CC_PLL8] = &cam_cc_pll8.clkr,
+ [CAM_CC_PLL8_OUT_EVEN] = &cam_cc_pll8_out_even.clkr,
+ [CAM_CC_QDSS_DEBUG_CLK] = &cam_cc_qdss_debug_clk.clkr,
+ [CAM_CC_QDSS_DEBUG_CLK_SRC] = &cam_cc_qdss_debug_clk_src.clkr,
+ [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
+ [CAM_CC_SBI_AHB_CLK] = &cam_cc_sbi_ahb_clk.clkr,
+ [CAM_CC_SBI_CLK] = &cam_cc_sbi_clk.clkr,
+ [CAM_CC_SFE_0_CLK] = &cam_cc_sfe_0_clk.clkr,
+ [CAM_CC_SFE_0_CLK_SRC] = &cam_cc_sfe_0_clk_src.clkr,
+ [CAM_CC_SFE_0_FAST_AHB_CLK] = &cam_cc_sfe_0_fast_ahb_clk.clkr,
+ [CAM_CC_SFE_1_CLK] = &cam_cc_sfe_1_clk.clkr,
+ [CAM_CC_SFE_1_CLK_SRC] = &cam_cc_sfe_1_clk_src.clkr,
+ [CAM_CC_SFE_1_FAST_AHB_CLK] = &cam_cc_sfe_1_fast_ahb_clk.clkr,
+ [CAM_CC_SLEEP_CLK] = &cam_cc_sleep_clk.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map cam_cc_sm8450_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x10000 },
+ [CAM_CC_ICP_BCR] = { 0x13104 },
+ [CAM_CC_IFE_0_BCR] = { 0x11000 },
+ [CAM_CC_IFE_1_BCR] = { 0x12000 },
+ [CAM_CC_IFE_2_BCR] = { 0x1204c },
+ [CAM_CC_IPE_0_BCR] = { 0x10074 },
+ [CAM_CC_QDSS_DEBUG_BCR] = { 0x131b8 },
+ [CAM_CC_SBI_BCR] = { 0x100cc },
+ [CAM_CC_SFE_0_BCR] = { 0x1304c },
+ [CAM_CC_SFE_1_BCR] = { 0x13094 },
+};
+
+static const struct regmap_config cam_cc_sm8450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1601c,
+ .fast_io = true,
+};
+
+static struct gdsc titan_top_gdsc;
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x10078,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc sbi_gdsc = {
+ .gdscr = 0x100d0,
+ .pd = {
+ .name = "sbi_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0x11004,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0x12004,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_2_gdsc = {
+ .gdscr = 0x12050,
+ .pd = {
+ .name = "ife_2_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc sfe_0_gdsc = {
+ .gdscr = 0x13050,
+ .pd = {
+ .name = "sfe_0_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc sfe_1_gdsc = {
+ .gdscr = 0x13098,
+ .pd = {
+ .name = "sfe_1_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0x131dc,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc *cam_cc_sm8450_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [SBI_GDSC] = &sbi_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IFE_2_GDSC] = &ife_2_gdsc,
+ [SFE_0_GDSC] = &sfe_0_gdsc,
+ [SFE_1_GDSC] = &sfe_1_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct qcom_cc_desc cam_cc_sm8450_desc = {
+ .config = &cam_cc_sm8450_regmap_config,
+ .clks = cam_cc_sm8450_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sm8450_clocks),
+ .resets = cam_cc_sm8450_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sm8450_resets),
+ .gdscs = cam_cc_sm8450_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sm8450_gdscs),
+};
+
+static const struct of_device_id cam_cc_sm8450_match_table[] = {
+ { .compatible = "qcom,sm8450-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sm8450_match_table);
+
+static int cam_cc_sm8450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sm8450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
+
+ return qcom_cc_really_probe(pdev, &cam_cc_sm8450_desc, regmap);
+}
+
+static struct platform_driver cam_cc_sm8450_driver = {
+ .probe = cam_cc_sm8450_probe,
+ .driver = {
+ .name = "camcc-sm8450",
+ .of_match_table = cam_cc_sm8450_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sm8450_driver);
+
+MODULE_DESCRIPTION("QCOM CAMCC SM8450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 4406cf609aae..b42684703fbb 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -154,6 +154,18 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x30,
[PLL_OFF_TEST_CTL_U1] = 0x34,
},
+ [CLK_ALPHA_PLL_TYPE_RIVIAN_EVO] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x14,
+ [PLL_OFF_USER_CTL_U] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1c,
+ [PLL_OFF_CONFIG_CTL_U] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x24,
+ [PLL_OFF_TEST_CTL] = 0x28,
+ [PLL_OFF_TEST_CTL_U] = 0x2c,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -191,8 +203,10 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define LUCID_5LPE_ENABLE_VOTE_RUN BIT(21)
/* LUCID EVO PLL specific settings and offsets */
+#define LUCID_EVO_PCAL_NOT_DONE BIT(8)
#define LUCID_EVO_ENABLE_VOTE_RUN BIT(25)
#define LUCID_EVO_PLL_L_VAL_MASK GENMASK(15, 0)
+#define LUCID_EVO_PLL_CAL_L_VAL_SHIFT 16
/* ZONDA PLL specific */
#define ZONDA_PLL_OUT_MASK 0xf
@@ -1439,7 +1453,7 @@ const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
/**
- * clk_lucid_pll_configure - configure the lucid pll
+ * clk_trion_pll_configure - configure the trion pll
*
* @pll: clk alpha pll
* @regmap: register map
@@ -1823,7 +1837,7 @@ const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
.round_rate = clk_alpha_pll_round_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_lucid_5lpe_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_5lpe_ops);
const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
.enable = alpha_pll_lucid_5lpe_enable,
@@ -1832,14 +1846,14 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
.recalc_rate = clk_trion_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_fixed_lucid_5lpe_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_5lpe_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
.round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
.set_rate = clk_lucid_5lpe_pll_postdiv_set_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
@@ -1992,7 +2006,33 @@ const struct clk_ops clk_alpha_pll_zonda_ops = {
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_zonda_pll_set_rate,
};
-EXPORT_SYMBOL(clk_alpha_pll_zonda_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_zonda_ops);
+
+void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 lval = config->l;
+
+ lval |= TRION_PLL_CAL_VAL << LUCID_EVO_PLL_CAL_L_VAL_SHIFT;
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), lval);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+
+ /* Set operation mode to STANDBY and de-assert the reset */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+EXPORT_SYMBOL_GPL(clk_lucid_evo_pll_configure);
static int alpha_pll_lucid_evo_enable(struct clk_hw *hw)
{
@@ -2079,6 +2119,31 @@ static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
}
+static int alpha_pll_lucid_evo_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct clk_hw *p;
+ u32 val = 0;
+ int ret;
+
+ /* Return early if calibration is not needed. */
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+ if (!(val & LUCID_EVO_PCAL_NOT_DONE))
+ return 0;
+
+ p = clk_hw_get_parent(hw);
+ if (!p)
+ return -EINVAL;
+
+ ret = alpha_pll_lucid_evo_enable(hw);
+ if (ret)
+ return ret;
+
+ alpha_pll_lucid_evo_disable(hw);
+
+ return 0;
+}
+
static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -2114,3 +2179,72 @@ const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops = {
.set_rate = clk_lucid_evo_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_evo_ops);
+
+const struct clk_ops clk_alpha_pll_lucid_evo_ops = {
+ .prepare = alpha_pll_lucid_evo_prepare,
+ .enable = alpha_pll_lucid_evo_enable,
+ .disable = alpha_pll_lucid_evo_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = alpha_pll_lucid_evo_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = alpha_pll_lucid_5lpe_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_evo_ops);
+
+void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ regmap_update_bits(regmap, PLL_MODE(pll),
+ PLL_RESET_N | PLL_BYPASSNL | PLL_OUTCTRL,
+ PLL_RESET_N | PLL_BYPASSNL);
+}
+EXPORT_SYMBOL_GPL(clk_rivian_evo_pll_configure);
+
+static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l;
+
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+
+ return parent_rate * l;
+}
+
+static long clk_rivian_evo_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ unsigned long min_freq, max_freq;
+ u32 l;
+ u64 a;
+
+ rate = alpha_pll_round_rate(rate, *prate, &l, &a, 0);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
+ return rate;
+
+ min_freq = pll->vco_table[0].min_freq;
+ max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
+
+ return clamp(rate, min_freq, max_freq);
+}
+
+const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
+ .enable = alpha_pll_lucid_5lpe_enable,
+ .disable = alpha_pll_lucid_5lpe_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_rivian_evo_pll_recalc_rate,
+ .round_rate = clk_rivian_evo_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_rivian_evo_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 6e9907deaf30..447efb82fe59 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -18,6 +18,7 @@ enum {
CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_ZONDA,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
+ CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -152,9 +153,14 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops;
extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_postdiv_zonda_ops clk_alpha_pll_postdiv_fabia_ops
+
+extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
+extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
+#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -168,6 +174,9 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
-
+void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
#endif
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
index e847d586a73a..7dd17c184b69 100644
--- a/drivers/clk/qcom/clk-hfpll.c
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -72,13 +72,16 @@ static void __clk_hfpll_enable(struct clk_hw *hw)
regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N);
/* Wait for PLL to lock. */
- if (hd->status_reg) {
- do {
- regmap_read(regmap, hd->status_reg, &val);
- } while (!(val & BIT(hd->lock_bit)));
- } else {
+ if (hd->status_reg)
+ /*
+ * Busy wait. Should never timeout, we add a timeout to
+ * prevent any sort of stall.
+ */
+ regmap_read_poll_timeout(regmap, hd->status_reg, val,
+ !(val & BIT(hd->lock_bit)), 0,
+ 100 * USEC_PER_MSEC);
+ else
udelay(60);
- }
/* Enable PLL output. */
regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL);
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
index 59f1af415b58..45da736bd5f4 100644
--- a/drivers/clk/qcom/clk-krait.c
+++ b/drivers/clk/qcom/clk-krait.c
@@ -18,13 +18,23 @@
static DEFINE_SPINLOCK(krait_clock_reg_lock);
#define LPL_SHIFT 8
+#define SECCLKAGD BIT(4)
+
static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
{
unsigned long flags;
u32 regval;
spin_lock_irqsave(&krait_clock_reg_lock, flags);
+
regval = krait_get_l2_indirect_reg(mux->offset);
+
+ /* apq/ipq8064 Errata: disable sec_src clock gating during switch. */
+ if (mux->disable_sec_src_gating) {
+ regval |= SECCLKAGD;
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ }
+
regval &= ~(mux->mask << mux->shift);
regval |= (sel & mux->mask) << mux->shift;
if (mux->lpl) {
@@ -32,11 +42,22 @@ static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
}
krait_set_l2_indirect_reg(mux->offset, regval);
- spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ /* apq/ipq8064 Errata: re-enabled sec_src clock gating. */
+ if (mux->disable_sec_src_gating) {
+ regval &= ~SECCLKAGD;
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ }
/* Wait for switch to complete. */
mb();
udelay(1);
+
+ /*
+ * Unlock now to make sure the mux register is not
+ * modified while switching to the new parent.
+ */
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
}
static int krait_mux_set_parent(struct clk_hw *hw, u8 index)
diff --git a/drivers/clk/qcom/clk-krait.h b/drivers/clk/qcom/clk-krait.h
index 9120bd2f5297..f930538c539e 100644
--- a/drivers/clk/qcom/clk-krait.h
+++ b/drivers/clk/qcom/clk-krait.h
@@ -15,6 +15,7 @@ struct krait_mux_clk {
u8 safe_sel;
u8 old_index;
bool reparent;
+ bool disable_sec_src_gating;
struct clk_hw hw;
struct notifier_block clk_nb;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8e5dce09d162..28019edd2a50 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -13,6 +13,7 @@
#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/math64.h>
+#include <linux/minmax.h>
#include <linux/slab.h>
#include <asm/div64.h>
@@ -437,7 +438,7 @@ static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- u32 notn_m, n, m, d, not2d, mask, duty_per;
+ u32 notn_m, n, m, d, not2d, mask, duty_per, cfg;
int ret;
/* Duty-cycle cannot be modified for non-MND RCGs */
@@ -448,6 +449,11 @@ static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
+ regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+
+ /* Duty-cycle cannot be modified if MND divider is in bypass mode. */
+ if (!(cfg & CFG_MODE_MASK))
+ return -EINVAL;
n = (~(notn_m) + m) & mask;
@@ -456,9 +462,11 @@ static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
/* Calculate 2d value */
d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
- /* Check bit widths of 2d. If D is too big reduce duty cycle. */
- if (d > mask)
- d = mask;
+ /*
+ * Check bit widths of 2d. If D is too big reduce duty cycle.
+ * Also make sure it is never zero.
+ */
+ d = clamp_val(d, 1, mask);
if ((d / 2) > (n - m))
d = (n - m) * 2;
diff --git a/drivers/clk/qcom/clk-regmap-phy-mux.c b/drivers/clk/qcom/clk-regmap-phy-mux.c
new file mode 100644
index 000000000000..7b7243b7107d
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-phy-mux.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap.h"
+#include "clk-regmap-phy-mux.h"
+
+#define PHY_MUX_MASK GENMASK(1, 0)
+#define PHY_MUX_PHY_SRC 0
+#define PHY_MUX_REF_SRC 2
+
+static inline struct clk_regmap_phy_mux *to_clk_regmap_phy_mux(struct clk_regmap *clkr)
+{
+ return container_of(clkr, struct clk_regmap_phy_mux, clkr);
+}
+
+static int phy_mux_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr);
+ unsigned int val;
+
+ regmap_read(clkr->regmap, phy_mux->reg, &val);
+ val = FIELD_GET(PHY_MUX_MASK, val);
+
+ WARN_ON(val != PHY_MUX_PHY_SRC && val != PHY_MUX_REF_SRC);
+
+ return val == PHY_MUX_PHY_SRC;
+}
+
+static int phy_mux_enable(struct clk_hw *hw)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr);
+
+ return regmap_update_bits(clkr->regmap, phy_mux->reg,
+ PHY_MUX_MASK,
+ FIELD_PREP(PHY_MUX_MASK, PHY_MUX_PHY_SRC));
+}
+
+static void phy_mux_disable(struct clk_hw *hw)
+{
+ struct clk_regmap *clkr = to_clk_regmap(hw);
+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr);
+
+ regmap_update_bits(clkr->regmap, phy_mux->reg,
+ PHY_MUX_MASK,
+ FIELD_PREP(PHY_MUX_MASK, PHY_MUX_REF_SRC));
+}
+
+const struct clk_ops clk_regmap_phy_mux_ops = {
+ .enable = phy_mux_enable,
+ .disable = phy_mux_disable,
+ .is_enabled = phy_mux_is_enabled,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_phy_mux_ops);
diff --git a/drivers/clk/qcom/clk-regmap-phy-mux.h b/drivers/clk/qcom/clk-regmap-phy-mux.h
new file mode 100644
index 000000000000..614dd384695c
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-phy-mux.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_PHY_MUX_H__
+#define __QCOM_CLK_REGMAP_PHY_MUX_H__
+
+#include "clk-regmap.h"
+
+/*
+ * A clock implementation for PHY pipe and symbols clock muxes.
+ *
+ * If the clock is running off the from-PHY source, report it as enabled.
+ * Report it as disabled otherwise (if it uses reference source).
+ *
+ * This way the PHY will disable the pipe clock before turning off the GDSC,
+ * which in turn would lead to disabling corresponding pipe_clk_src (and thus
+ * it being parked to a safe, reference clock source). And vice versa, after
+ * enabling the GDSC the PHY will enable the pipe clock, which would cause
+ * pipe_clk_src to be switched from a safe source to the working one.
+ *
+ * For some platforms this should be used for the UFS symbol_clk_src clocks
+ * too.
+ */
+struct clk_regmap_phy_mux {
+ u32 reg;
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_regmap_phy_mux_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index a18811c38018..747c473b0b5e 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -23,6 +23,14 @@
#define QCOM_RPM_SCALING_ENABLE_ID 0x2
#define QCOM_RPM_XO_MODE_ON 0x2
+static const struct clk_parent_data gcc_pxo[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+};
+
+static const struct clk_parent_data gcc_cxo[] = {
+ { .fw_name = "cxo", .name = "cxo_board" },
+};
+
#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
static struct clk_rpm _platform##_##_active; \
static struct clk_rpm _platform##_##_name = { \
@@ -32,8 +40,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_ops, \
.name = #_name, \
- .parent_names = (const char *[]){ "pxo_board" }, \
- .num_parents = 1, \
+ .parent_data = gcc_pxo, \
+ .num_parents = ARRAY_SIZE(gcc_pxo), \
}, \
}; \
static struct clk_rpm _platform##_##_active = { \
@@ -44,8 +52,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_ops, \
.name = #_active, \
- .parent_names = (const char *[]){ "pxo_board" }, \
- .num_parents = 1, \
+ .parent_data = gcc_pxo, \
+ .num_parents = ARRAY_SIZE(gcc_pxo), \
}, \
}
@@ -56,8 +64,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_xo_ops, \
.name = #_name, \
- .parent_names = (const char *[]){ "cxo_board" }, \
- .num_parents = 1, \
+ .parent_data = gcc_cxo, \
+ .num_parents = ARRAY_SIZE(gcc_cxo), \
}, \
}
@@ -68,8 +76,8 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpm_fixed_ops, \
.name = #_name, \
- .parent_names = (const char *[]){ "pxo" }, \
- .num_parents = 1, \
+ .parent_data = gcc_pxo, \
+ .num_parents = ARRAY_SIZE(gcc_pxo), \
}, \
}
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index aed907982344..c07cab6905cb 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -274,6 +274,11 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
cmd.addr = c->res_addr;
cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
+ /*
+ * Send only an active only state request. RPMh continues to
+ * use the active state when we're in sleep/wake state as long
+ * as the sleep/wake state has never been set.
+ */
ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable);
if (ret) {
dev_err(c->dev, "set active state of %s failed: (%d)\n",
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index db9379634fb2..709076f0f9d7 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -43,6 +43,10 @@ static struct pll_vco vco_table[] = {
{ 249600000, 2000000000, 0 },
};
+static struct pll_vco lucid_5lpe_vco[] = {
+ { 249600000, 1750000000, 0 },
+};
+
static struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x47,
.alpha = 0xE000,
@@ -1134,7 +1138,6 @@ static struct gdsc mdss_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.flags = HW_CTRL,
- .supply = "mmcx",
};
static struct clk_regmap *disp_cc_sm8250_clocks[] = {
@@ -1228,6 +1231,7 @@ static const struct of_device_id disp_cc_sm8250_match_table[] = {
{ .compatible = "qcom,sc8180x-dispcc" },
{ .compatible = "qcom,sm8150-dispcc" },
{ .compatible = "qcom,sm8250-dispcc" },
+ { .compatible = "qcom,sm8350-dispcc" },
{ }
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8250_match_table);
@@ -1258,7 +1262,7 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
- /* note: trion == lucid, except for the prepare() op */
+ /* Apply differences for SM8150 and SM8350 */
BUILD_BUG_ON(CLK_ALPHA_PLL_TYPE_TRION != CLK_ALPHA_PLL_TYPE_LUCID);
if (of_device_is_compatible(pdev->dev.of_node, "qcom,sc8180x-dispcc") ||
of_device_is_compatible(pdev->dev.of_node, "qcom,sm8150-dispcc")) {
@@ -1270,6 +1274,62 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
disp_cc_pll1_config.config_ctl_hi1_val = 0x00000024;
disp_cc_pll1_config.user_ctl_hi1_val = 0x000000D0;
disp_cc_pll1_init.ops = &clk_alpha_pll_trion_ops;
+ } else if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8350-dispcc")) {
+ static struct clk_rcg2 * const rcgs[] = {
+ &disp_cc_mdss_byte0_clk_src,
+ &disp_cc_mdss_byte1_clk_src,
+ &disp_cc_mdss_dp_aux1_clk_src,
+ &disp_cc_mdss_dp_aux_clk_src,
+ &disp_cc_mdss_dp_link1_clk_src,
+ &disp_cc_mdss_dp_link_clk_src,
+ &disp_cc_mdss_dp_pixel1_clk_src,
+ &disp_cc_mdss_dp_pixel2_clk_src,
+ &disp_cc_mdss_dp_pixel_clk_src,
+ &disp_cc_mdss_esc0_clk_src,
+ &disp_cc_mdss_mdp_clk_src,
+ &disp_cc_mdss_pclk0_clk_src,
+ &disp_cc_mdss_pclk1_clk_src,
+ &disp_cc_mdss_rot_clk_src,
+ &disp_cc_mdss_vsync_clk_src,
+ };
+ static struct clk_regmap_div * const divs[] = {
+ &disp_cc_mdss_byte0_div_clk_src,
+ &disp_cc_mdss_byte1_div_clk_src,
+ &disp_cc_mdss_dp_link1_div_clk_src,
+ &disp_cc_mdss_dp_link_div_clk_src,
+ };
+ unsigned int i;
+ static bool offset_applied;
+
+ /*
+ * note: trion == lucid, except for the prepare() op
+ * only apply the offsets once (in case of deferred probe)
+ */
+ if (!offset_applied) {
+ for (i = 0; i < ARRAY_SIZE(rcgs); i++)
+ rcgs[i]->cmd_rcgr -= 4;
+
+ for (i = 0; i < ARRAY_SIZE(divs); i++) {
+ divs[i]->reg -= 4;
+ divs[i]->width = 4;
+ }
+
+ disp_cc_mdss_ahb_clk.halt_reg -= 4;
+ disp_cc_mdss_ahb_clk.clkr.enable_reg -= 4;
+
+ offset_applied = true;
+ }
+
+ disp_cc_mdss_ahb_clk_src.cmd_rcgr = 0x22a0;
+
+ disp_cc_pll0_config.config_ctl_hi1_val = 0x2a9a699c;
+ disp_cc_pll0_config.test_ctl_hi1_val = 0x01800000;
+ disp_cc_pll0_init.ops = &clk_alpha_pll_lucid_5lpe_ops;
+ disp_cc_pll0.vco_table = lucid_5lpe_vco;
+ disp_cc_pll1_config.config_ctl_hi1_val = 0x2a9a699c;
+ disp_cc_pll1_config.test_ctl_hi1_val = 0x01800000;
+ disp_cc_pll1_init.ops = &clk_alpha_pll_lucid_5lpe_ops;
+ disp_cc_pll1.vco_table = lucid_5lpe_vco;
}
clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 541016db3c4b..42d185fe19c8 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -22,6 +22,7 @@
#include "clk-alpha-pll.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "gdsc.h"
#include "reset.h"
enum {
@@ -662,6 +663,7 @@ static struct clk_branch gcc_sleep_clk_src = {
},
.num_parents = 1,
.ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
},
},
};
@@ -1788,8 +1790,10 @@ static struct clk_regmap_div nss_port4_tx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_RX, 5, 0, 0),
F(78125000, P_UNIPHY1_RX, 4, 0, 0),
F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_RX, 1, 0, 0),
F(156250000, P_UNIPHY1_RX, 2, 0, 0),
F(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
@@ -1828,8 +1832,10 @@ static struct clk_regmap_div nss_port5_rx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
+ F(25000000, P_UNIPHY0_TX, 5, 0, 0),
F(78125000, P_UNIPHY1_TX, 4, 0, 0),
F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
+ F(125000000, P_UNIPHY0_TX, 1, 0, 0),
F(156250000, P_UNIPHY1_TX, 2, 0, 0),
F(312500000, P_UNIPHY1_TX, 1, 0, 0),
{ }
@@ -1867,8 +1873,10 @@ static struct clk_regmap_div nss_port5_tx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port6_rx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY2_RX, 5, 0, 0),
F(25000000, P_UNIPHY2_RX, 12.5, 0, 0),
F(78125000, P_UNIPHY2_RX, 4, 0, 0),
+ F(125000000, P_UNIPHY2_RX, 1, 0, 0),
F(125000000, P_UNIPHY2_RX, 2.5, 0, 0),
F(156250000, P_UNIPHY2_RX, 2, 0, 0),
F(312500000, P_UNIPHY2_RX, 1, 0, 0),
@@ -1907,8 +1915,10 @@ static struct clk_regmap_div nss_port6_rx_div_clk_src = {
static const struct freq_tbl ftbl_nss_port6_tx_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_UNIPHY2_TX, 5, 0, 0),
F(25000000, P_UNIPHY2_TX, 12.5, 0, 0),
F(78125000, P_UNIPHY2_TX, 4, 0, 0),
+ F(125000000, P_UNIPHY2_TX, 1, 0, 0),
F(125000000, P_UNIPHY2_TX, 2.5, 0, 0),
F(156250000, P_UNIPHY2_TX, 2, 0, 0),
F(312500000, P_UNIPHY2_TX, 1, 0, 0),
@@ -3174,6 +3184,24 @@ static struct clk_branch gcc_nss_ptp_ref_clk = {
},
};
+static struct clk_branch gcc_crypto_ppe_clk = {
+ .halt_reg = 0x68310,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x68310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ppe_clk",
+ .parent_names = (const char *[]){
+ "nss_ppe_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_nssnoc_ce_apb_clk = {
.halt_reg = 0x6830c,
.clkr = {
@@ -3346,6 +3374,7 @@ static struct clk_branch gcc_nssnoc_ubi1_ahb_clk = {
static struct clk_branch gcc_ubi0_ahb_clk = {
.halt_reg = 0x6820c,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x6820c,
.enable_mask = BIT(0),
@@ -3363,6 +3392,7 @@ static struct clk_branch gcc_ubi0_ahb_clk = {
static struct clk_branch gcc_ubi0_axi_clk = {
.halt_reg = 0x68200,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68200,
.enable_mask = BIT(0),
@@ -3380,6 +3410,7 @@ static struct clk_branch gcc_ubi0_axi_clk = {
static struct clk_branch gcc_ubi0_nc_axi_clk = {
.halt_reg = 0x68204,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68204,
.enable_mask = BIT(0),
@@ -3397,6 +3428,7 @@ static struct clk_branch gcc_ubi0_nc_axi_clk = {
static struct clk_branch gcc_ubi0_core_clk = {
.halt_reg = 0x68210,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68210,
.enable_mask = BIT(0),
@@ -3414,6 +3446,7 @@ static struct clk_branch gcc_ubi0_core_clk = {
static struct clk_branch gcc_ubi0_mpt_clk = {
.halt_reg = 0x68208,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68208,
.enable_mask = BIT(0),
@@ -3431,6 +3464,7 @@ static struct clk_branch gcc_ubi0_mpt_clk = {
static struct clk_branch gcc_ubi1_ahb_clk = {
.halt_reg = 0x6822c,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x6822c,
.enable_mask = BIT(0),
@@ -3448,6 +3482,7 @@ static struct clk_branch gcc_ubi1_ahb_clk = {
static struct clk_branch gcc_ubi1_axi_clk = {
.halt_reg = 0x68220,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68220,
.enable_mask = BIT(0),
@@ -3465,6 +3500,7 @@ static struct clk_branch gcc_ubi1_axi_clk = {
static struct clk_branch gcc_ubi1_nc_axi_clk = {
.halt_reg = 0x68224,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68224,
.enable_mask = BIT(0),
@@ -3482,6 +3518,7 @@ static struct clk_branch gcc_ubi1_nc_axi_clk = {
static struct clk_branch gcc_ubi1_core_clk = {
.halt_reg = 0x68230,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68230,
.enable_mask = BIT(0),
@@ -3499,6 +3536,7 @@ static struct clk_branch gcc_ubi1_core_clk = {
static struct clk_branch gcc_ubi1_mpt_clk = {
.halt_reg = 0x68228,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x68228,
.enable_mask = BIT(0),
@@ -4371,6 +4409,49 @@ static struct clk_branch gcc_pcie0_axi_s_bridge_clk = {
},
};
+static struct gdsc usb0_gdsc = {
+ .gdscr = 0x3e078,
+ .pd = {
+ .name = "usb0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb1_gdsc = {
+ .gdscr = 0x3f078,
+ .pd = {
+ .name = "usb1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static const struct alpha_pll_config ubi32_pll_config = {
+ .l = 0x4e,
+ .config_ctl_val = 0x200d4aa8,
+ .config_ctl_hi_val = 0x3c2,
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = GENMASK(9, 8),
+};
+
+static const struct alpha_pll_config nss_crypto_pll_config = {
+ .l = 0x3e,
+ .alpha = 0x0,
+ .alpha_hi = 0x80,
+ .config_ctl_val = 0x4001055b,
+ .main_output_mask = BIT(0),
+ .pre_div_val = 0x0,
+ .pre_div_mask = GENMASK(14, 12),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(11, 8),
+ .vco_mask = GENMASK(21, 20),
+ .vco_val = 0x0,
+ .alpha_en_mask = BIT(24),
+};
+
static struct clk_hw *gcc_ipq8074_hws[] = {
&gpll0_out_main_div2.hw,
&gpll6_out_main_div2.hw,
@@ -4609,6 +4690,7 @@ static struct clk_regmap *gcc_ipq8074_clks[] = {
[GCC_PCIE0_RCHNG_CLK_SRC] = &pcie0_rchng_clk_src.clkr,
[GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
[GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
+ [GCC_CRYPTO_PPE_CLK] = &gcc_crypto_ppe_clk.clkr,
};
static const struct qcom_reset_map gcc_ipq8074_resets[] = {
@@ -4746,6 +4828,11 @@ static const struct qcom_reset_map gcc_ipq8074_resets[] = {
[GCC_PCIE1_AXI_MASTER_STICKY_ARES] = { 0x76040, 6 },
};
+static struct gdsc *gcc_ipq8074_gdscs[] = {
+ [USB0_GDSC] = &usb0_gdsc,
+ [USB1_GDSC] = &usb1_gdsc,
+};
+
static const struct of_device_id gcc_ipq8074_match_table[] = {
{ .compatible = "qcom,gcc-ipq8074" },
{ }
@@ -4768,11 +4855,26 @@ static const struct qcom_cc_desc gcc_ipq8074_desc = {
.num_resets = ARRAY_SIZE(gcc_ipq8074_resets),
.clk_hws = gcc_ipq8074_hws,
.num_clk_hws = ARRAY_SIZE(gcc_ipq8074_hws),
+ .gdscs = gcc_ipq8074_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_ipq8074_gdscs),
};
static int gcc_ipq8074_probe(struct platform_device *pdev)
{
- return qcom_cc_probe(pdev, &gcc_ipq8074_desc);
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_ipq8074_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* SW Workaround for UBI32 Huayra PLL */
+ regmap_update_bits(regmap, 0x2501c, BIT(26), BIT(26));
+
+ clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+ clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
+ &nss_crypto_pll_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_ipq8074_desc, regmap);
}
static struct platform_driver gcc_ipq8074_driver = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 17e4a5a2a9fd..9a46794f6eb8 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -765,7 +765,20 @@ static struct clk_rcg2 cci_clk_src = {
},
};
+/*
+ * This is a frequency table for "General Purpose" clocks.
+ * These clocks can be muxed to the SoC pins and may be used by
+ * external devices. They're often used as PWM source.
+ *
+ * See comment at ftbl_gcc_gp1_3_clk.
+ */
static const struct freq_tbl ftbl_gcc_camss_gp0_1_clk[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(100000, P_XO, 16, 1, 12),
+ F(500000, P_GPLL0, 16, 1, 100),
+ F(1000000, P_GPLL0, 16, 1, 50),
+ F(2500000, P_GPLL0, 16, 1, 20),
+ F(5000000, P_GPLL0, 16, 1, 10),
F(100000000, P_GPLL0, 8, 0, 0),
F(200000000, P_GPLL0, 4, 0, 0),
{ }
@@ -927,7 +940,29 @@ static struct clk_rcg2 crypto_clk_src = {
},
};
+/*
+ * This is a frequency table for "General Purpose" clocks.
+ * These clocks can be muxed to the SoC pins and may be used by
+ * external devices. They're often used as PWM source.
+ *
+ * Please note that MND divider must be enabled for duty-cycle
+ * control to be possible. (M != N) Also since D register is configured
+ * with a value multiplied by 2, and duty cycle is calculated as
+ * (2 * D) % 2^W
+ * DutyCycle = ----------------
+ * 2 * (N % 2^W)
+ * (where W = .mnd_width)
+ * N must be half or less than maximum value for the register.
+ * Otherwise duty-cycle control would be limited.
+ * (e.g. for 8-bit NMD N should be less than 128)
+ */
static const struct freq_tbl ftbl_gcc_gp1_3_clk[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(100000, P_XO, 16, 1, 12),
+ F(500000, P_GPLL0, 16, 1, 100),
+ F(1000000, P_GPLL0, 16, 1, 50),
+ F(2500000, P_GPLL0, 16, 1, 20),
+ F(5000000, P_GPLL0, 16, 1, 10),
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 39ebb443ae3d..8e2d9fb98ad5 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -632,7 +632,7 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = {
};
static struct clk_rcg2 bimc_ddr_clk_src = {
- .cmd_rcgr = 0x32004,
+ .cmd_rcgr = 0x32024,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_bimc_map,
.clkr.hw.init = &(struct clk_init_data){
@@ -644,6 +644,18 @@ static struct clk_rcg2 bimc_ddr_clk_src = {
},
};
+static struct clk_rcg2 system_mm_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x2600c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6a_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_mm_noc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_camss_ahb_clk[] = {
F(40000000, P_GPLL0, 10, 1, 2),
F(80000000, P_GPLL0, 10, 0, 0),
@@ -1002,7 +1014,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
};
static const struct freq_tbl ftbl_gcc_camss_cci_clk[] = {
- F(19200000, P_XO, 1, 0, 0),
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0, 1, 3, 64),
{ }
};
@@ -1142,6 +1155,9 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
static const struct freq_tbl ftbl_gcc_camss_cpp_clk[] = {
F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
F(320000000, P_GPLL0, 2.5, 0, 0),
F(465000000, P_GPLL2, 2, 0, 0),
{ }
@@ -1290,6 +1306,8 @@ static const struct freq_tbl ftbl_gcc_mdss_mdp_clk[] = {
F(50000000, P_GPLL0_AUX, 16, 0, 0),
F(80000000, P_GPLL0_AUX, 10, 0, 0),
F(100000000, P_GPLL0_AUX, 8, 0, 0),
+ F(145500000, P_GPLL0_AUX, 5.5, 0, 0),
+ F(153600000, P_GPLL0, 4, 0, 0),
F(160000000, P_GPLL0_AUX, 5, 0, 0),
F(177780000, P_GPLL0_AUX, 4.5, 0, 0),
F(200000000, P_GPLL0_AUX, 4, 0, 0),
@@ -1462,7 +1480,9 @@ static struct clk_rcg2 bimc_gpu_clk_src = {
};
static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F(57140000, P_GPLL0, 14, 0, 0),
F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
{ }
};
@@ -1823,9 +1843,9 @@ static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
};
static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
- F(100000000, P_GPLL0, 8, 0, 0),
- F(160000000, P_GPLL0, 5, 0, 0),
- F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
{ }
};
@@ -2441,7 +2461,7 @@ static struct clk_branch gcc_camss_jpeg_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2645,7 +2665,7 @@ static struct clk_branch gcc_camss_vfe_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2801,7 +2821,7 @@ static struct clk_branch gcc_mdss_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3193,7 +3213,7 @@ static struct clk_branch gcc_mdp_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_mdp_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3211,7 +3231,7 @@ static struct clk_branch gcc_venus_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_venus_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3229,7 +3249,7 @@ static struct clk_branch gcc_vfe_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_vfe_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3247,7 +3267,7 @@ static struct clk_branch gcc_jpeg_tbu_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_jpeg_tbu_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3484,7 +3504,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_axi_clk",
.parent_data = &(const struct clk_parent_data){
- .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3623,6 +3643,7 @@ static struct clk_regmap *gcc_msm8939_clocks[] = {
[GPLL2_VOTE] = &gpll2_vote,
[PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
[SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+ [SYSTEM_MM_NOC_BFDCD_CLK_SRC] = &system_mm_noc_bfdcd_clk_src.clkr,
[CAMSS_AHB_CLK_SRC] = &camss_ahb_clk_src.clkr,
[APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
[CSI0_CLK_SRC] = &csi0_clk_src.clkr,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 051745ef99c8..a6e13b91e4c8 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3641,6 +3641,9 @@ static int gcc_msm8960_probe(struct platform_device *pdev)
hfpll_l2.d = &hfpll_l2_8064_data;
}
+ if (of_get_available_child_count(pdev->dev.of_node) != 0)
+ return devm_of_platform_populate(&pdev->dev);
+
tsens = platform_device_register_data(&pdev->dev, "qcom-tsens", -1,
NULL, 0);
if (IS_ERR(tsens))
@@ -3655,7 +3658,8 @@ static int gcc_msm8960_remove(struct platform_device *pdev)
{
struct platform_device *tsens = platform_get_drvdata(pdev);
- platform_device_unregister(tsens);
+ if (tsens)
+ platform_device_unregister(tsens);
return 0;
}
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 6b702cdacbf2..0f52c48e89d8 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -52,7 +52,9 @@ static struct clk_alpha_pll_postdiv gpll0 = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0",
- .parent_names = (const char *[]) { "gpll0_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
},
@@ -81,7 +83,9 @@ static struct clk_alpha_pll_postdiv gpll4 = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4",
- .parent_names = (const char *[]) { "gpll4_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll4_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
},
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 423627d49719..7ff64d4d5920 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -17,6 +17,7 @@
#include "clk-rcg.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "common.h"
#include "gdsc.h"
#include "reset.h"
@@ -255,26 +256,6 @@ static const struct clk_parent_data gcc_parent_data_5[] = {
{ .hw = &gcc_gpll0_out_even.clkr.hw },
};
-static const struct parent_map gcc_parent_map_6[] = {
- { P_PCIE_0_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_6[] = {
- { .fw_name = "pcie_0_pipe_clk", .name = "pcie_0_pipe_clk" },
- { .fw_name = "bi_tcxo" },
-};
-
-static const struct parent_map gcc_parent_map_7[] = {
- { P_PCIE_1_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_7[] = {
- { .fw_name = "pcie_1_pipe_clk", .name = "pcie_1_pipe_clk" },
- { .fw_name = "bi_tcxo" },
-};
-
static const struct parent_map gcc_parent_map_8[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -369,32 +350,32 @@ static const struct clk_parent_data gcc_parent_data_15[] = {
{ .hw = &gcc_mss_gpll0_main_div_clk_src.clkr.hw },
};
-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
.reg = 0x6b054,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_6,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk_src",
- .parent_data = gcc_parent_data_6,
- .num_parents = ARRAY_SIZE(gcc_parent_data_6),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_0_pipe_clk",
+ .name = "pcie_0_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
.reg = 0x8d054,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_7,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk_src",
- .parent_data = gcc_parent_data_7,
- .num_parents = ARRAY_SIZE(gcc_parent_data_7),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_1_pipe_clk",
+ .name = "pcie_1_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index 4b894442fdf5..a2f3ffcc5849 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -20,6 +20,7 @@
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "common.h"
#include "gdsc.h"
#include "reset.h"
@@ -82,11 +83,6 @@ enum {
P_GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC,
P_GCC_USB4_PHY_PIPEGMUX_CLK_SRC,
P_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC,
- P_PCIE_2A_PIPE_CLK,
- P_PCIE_2B_PIPE_CLK,
- P_PCIE_3A_PIPE_CLK,
- P_PCIE_3B_PIPE_CLK,
- P_PCIE_4_PIPE_CLK,
P_QUSB4PHY_1_GCC_USB4_RX0_CLK,
P_QUSB4PHY_1_GCC_USB4_RX1_CLK,
P_QUSB4PHY_GCC_USB4_RX0_CLK,
@@ -351,56 +347,6 @@ static const struct clk_parent_data gcc_parent_data_9[] = {
{ .hw = &gcc_gpll0_out_even.clkr.hw },
};
-static const struct parent_map gcc_parent_map_10[] = {
- { P_PCIE_2A_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_10[] = {
- { .index = DT_PCIE_2A_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_11[] = {
- { P_PCIE_2B_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_11[] = {
- { .index = DT_PCIE_2B_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_12[] = {
- { P_PCIE_3A_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_12[] = {
- { .index = DT_PCIE_3A_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_13[] = {
- { P_PCIE_3B_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_13[] = {
- { .index = DT_PCIE_3B_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
-static const struct parent_map gcc_parent_map_14[] = {
- { P_PCIE_4_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_14[] = {
- { .index = DT_PCIE_4_PIPE_CLK },
- { .index = DT_BI_TCXO },
-};
-
static const struct parent_map gcc_parent_map_15[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -741,77 +687,72 @@ static const struct clk_parent_data gcc_parent_data_41[] = {
{ .index = DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK },
};
-static struct clk_regmap_mux gcc_pcie_2a_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_2a_pipe_clk_src = {
.reg = 0x9d05c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_10,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_2a_pipe_clk_src",
- .parent_data = gcc_parent_data_10,
- .num_parents = ARRAY_SIZE(gcc_parent_data_10),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_2A_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_2b_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_2b_pipe_clk_src = {
.reg = 0x9e05c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_11,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_2b_pipe_clk_src",
- .parent_data = gcc_parent_data_11,
- .num_parents = ARRAY_SIZE(gcc_parent_data_11),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_2B_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_3a_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_3a_pipe_clk_src = {
.reg = 0xa005c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_12,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_3a_pipe_clk_src",
- .parent_data = gcc_parent_data_12,
- .num_parents = ARRAY_SIZE(gcc_parent_data_12),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3A_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_3b_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_3b_pipe_clk_src = {
.reg = 0xa205c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_13,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_3b_pipe_clk_src",
- .parent_data = gcc_parent_data_13,
- .num_parents = ARRAY_SIZE(gcc_parent_data_13),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3B_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
-static struct clk_regmap_mux gcc_pcie_4_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_4_pipe_clk_src = {
.reg = 0x6b05c,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_14,
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_4_pipe_clk_src",
- .parent_data = gcc_parent_data_14,
- .num_parents = ARRAY_SIZE(gcc_parent_data_14),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_4_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
@@ -6807,58 +6748,79 @@ static struct clk_branch gcc_video_vcodec_throttle_clk = {
static struct gdsc pcie_0_tunnel_gdsc = {
.gdscr = 0xa4004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(0),
.pd = {
.name = "pcie_0_tunnel_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_1_tunnel_gdsc = {
.gdscr = 0x8d004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(1),
.pd = {
.name = "pcie_1_tunnel_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_2a_gdsc = {
.gdscr = 0x9d004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(2),
.pd = {
.name = "pcie_2a_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_2b_gdsc = {
.gdscr = 0x9e004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(3),
.pd = {
.name = "pcie_2b_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_3a_gdsc = {
.gdscr = 0xa0004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(4),
.pd = {
.name = "pcie_3a_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_3b_gdsc = {
.gdscr = 0xa2004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(5),
.pd = {
.name = "pcie_3b_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc pcie_4_gdsc = {
.gdscr = 0x6b004,
+ .collapse_ctrl = 0x52128,
+ .collapse_mask = BIT(6),
.pd = {
.name = "pcie_4_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc ufs_card_gdsc = {
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index a4f7fba70393..69412400efa4 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -2558,7 +2558,7 @@ static int gcc_sm6350_probe(struct platform_device *pdev)
if (ret)
return ret;
- return qcom_cc_really_probe(pdev, &gcc_sm6350_desc, regmap);;
+ return qcom_cc_really_probe(pdev, &gcc_sm6350_desc, regmap);
}
static struct platform_driver gcc_sm6350_driver = {
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
index 593a195467ff..666efa5ff978 100644
--- a/drivers/clk/qcom/gcc-sm8450.c
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -17,6 +17,7 @@
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
#include "gdsc.h"
#include "reset.h"
@@ -26,9 +27,7 @@ enum {
P_GCC_GPLL0_OUT_MAIN,
P_GCC_GPLL4_OUT_MAIN,
P_GCC_GPLL9_OUT_MAIN,
- P_PCIE_0_PIPE_CLK,
P_PCIE_1_PHY_AUX_CLK,
- P_PCIE_1_PIPE_CLK,
P_SLEEP_CLK,
P_UFS_PHY_RX_SYMBOL_0_CLK,
P_UFS_PHY_RX_SYMBOL_1_CLK,
@@ -153,16 +152,6 @@ static const struct clk_parent_data gcc_parent_data_3[] = {
{ .fw_name = "bi_tcxo" },
};
-static const struct parent_map gcc_parent_map_4[] = {
- { P_PCIE_0_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_4[] = {
- { .fw_name = "pcie_0_pipe_clk", },
- { .fw_name = "bi_tcxo", },
-};
-
static const struct parent_map gcc_parent_map_5[] = {
{ P_PCIE_1_PHY_AUX_CLK, 0 },
{ P_BI_TCXO, 2 },
@@ -173,16 +162,6 @@ static const struct clk_parent_data gcc_parent_data_5[] = {
{ .fw_name = "bi_tcxo" },
};
-static const struct parent_map gcc_parent_map_6[] = {
- { P_PCIE_1_PIPE_CLK, 0 },
- { P_BI_TCXO, 2 },
-};
-
-static const struct clk_parent_data gcc_parent_data_6[] = {
- { .fw_name = "pcie_1_pipe_clk" },
- { .fw_name = "bi_tcxo" },
-};
-
static const struct parent_map gcc_parent_map_7[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -239,17 +218,16 @@ static const struct clk_parent_data gcc_parent_data_11[] = {
{ .fw_name = "bi_tcxo" },
};
-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
.reg = 0x7b060,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk_src",
- .parent_data = gcc_parent_data_4,
- .num_parents = ARRAY_SIZE(gcc_parent_data_4),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_0_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
@@ -269,17 +247,16 @@ static struct clk_regmap_mux gcc_pcie_1_phy_aux_clk_src = {
},
};
-static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
.reg = 0x9d064,
- .shift = 0,
- .width = 2,
- .parent_map = gcc_parent_map_6,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk_src",
- .parent_data = gcc_parent_data_6,
- .num_parents = ARRAY_SIZE(gcc_parent_data_6),
- .ops = &clk_regmap_mux_closest_ops,
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pcie_1_pipe_clk",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
},
},
};
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 44520efc6c72..d3244006c661 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -132,10 +132,29 @@ static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status)
return -ETIMEDOUT;
}
+static int gdsc_update_collapse_bit(struct gdsc *sc, bool val)
+{
+ u32 reg, mask;
+ int ret;
+
+ if (sc->collapse_mask) {
+ reg = sc->collapse_ctrl;
+ mask = sc->collapse_mask;
+ } else {
+ reg = sc->gdscr;
+ mask = SW_COLLAPSE_MASK;
+ }
+
+ ret = regmap_update_bits(sc->regmap, reg, mask, val ? mask : 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
{
int ret;
- u32 val = (status == GDSC_ON) ? 0 : SW_COLLAPSE_MASK;
if (status == GDSC_ON && sc->rsupply) {
ret = regulator_enable(sc->rsupply);
@@ -143,9 +162,7 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
return ret;
}
- ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
- if (ret)
- return ret;
+ ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF);
/* If disabling votable gdscs, don't poll on status */
if ((sc->flags & VOTABLE) && status == GDSC_OFF) {
@@ -420,13 +437,20 @@ static int gdsc_init(struct gdsc *sc)
return ret;
}
+ /* ...and the power-domain */
+ ret = gdsc_pm_runtime_get(sc);
+ if (ret) {
+ if (sc->rsupply)
+ regulator_disable(sc->rsupply);
+ return ret;
+ }
+
/*
* Votable GDSCs can be ON due to Vote from other masters.
* If a Votable GDSC is ON, make sure we have a Vote.
*/
if (sc->flags & VOTABLE) {
- ret = regmap_update_bits(sc->regmap, sc->gdscr,
- SW_COLLAPSE_MASK, val);
+ ret = gdsc_update_collapse_bit(sc, false);
if (ret)
return ret;
}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index ad313d7210bd..5de48c9439b2 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -18,6 +18,8 @@ struct reset_controller_dev;
* @pd: generic power domain
* @regmap: regmap for MMIO accesses
* @gdscr: gsdc control register
+ * @collapse_ctrl: APCS collapse-vote register
+ * @collapse_mask: APCS collapse-vote mask
* @gds_hw_ctrl: gds_hw_ctrl register
* @cxcs: offsets of branch registers to toggle mem/periph bits in
* @cxc_count: number of @cxcs
@@ -35,6 +37,8 @@ struct gdsc {
struct generic_pm_domain *parent;
struct regmap *regmap;
unsigned int gdscr;
+ unsigned int collapse_ctrl;
+ unsigned int collapse_mask;
unsigned int gds_hw_ctrl;
unsigned int clamp_io_ctrl;
unsigned int *cxcs;
diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c
new file mode 100644
index 000000000000..5367ce654ac9
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm8350.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sm8350.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-divider.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static struct pll_vco lucid_5lpe_vco[] = {
+ { 249600000, 1750000000, 0 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x18,
+ .alpha = 0x6000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static const struct clk_parent_data gpu_cc_parent = {
+ .fw_name = "bi_tcxo",
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_data = &gpu_cc_parent,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1a,
+ .alpha = 0xaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &gpu_cc_parent,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .fw_name = "gcc_gpu_gpll0_clk_src" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(150000000, P_GPLL0_OUT_MAIN_DIV, 2, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x117c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = {
+ .reg = 0x11c0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_hub_ahb_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = {
+ .reg = 0x11bc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cb_clk = {
+ .halt_reg = 0x1170,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1170,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+ .halt_reg = 0x1088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_qdss_at_clk = {
+ .halt_reg = 0x1080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_qdss_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_qdss_trig_clk = {
+ .halt_reg = 0x1094,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_qdss_trig_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_qdss_tsctr_clk = {
+ .halt_reg = 0x1084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_qdss_tsctr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x120c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x120c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_qdss_tsctr_clk = {
+ .halt_reg = 0x105c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x105c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_qdss_tsctr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x1058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x1178,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1178,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x1204,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm8350_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CB_CLK] = &gpu_cc_cb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_QDSS_AT_CLK] = &gpu_cc_cx_qdss_at_clk.clkr,
+ [GPU_CC_CX_QDSS_TRIG_CLK] = &gpu_cc_cx_qdss_trig_clk.clkr,
+ [GPU_CC_CX_QDSS_TSCTR_CLK] = &gpu_cc_cx_qdss_tsctr_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_QDSS_TSCTR_CLK] = &gpu_cc_gx_qdss_tsctr_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sm8350_resets[] = {
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x1160 },
+ [GPUCC_GPU_CC_CB_BCR] = { 0x116c },
+ [GPUCC_GPU_CC_CX_BCR] = { 0x1068 },
+ [GPUCC_GPU_CC_FAST_HUB_BCR] = { 0x1174 },
+ [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
+ [GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
+ [GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static struct gdsc *gpu_cc_sm8350_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm8350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8030,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm8350_desc = {
+ .config = &gpu_cc_sm8350_regmap_config,
+ .clks = gpu_cc_sm8350_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm8350_clocks),
+ .resets = gpu_cc_sm8350_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm8350_resets),
+ .gdscs = gpu_cc_sm8350_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm8350_gdscs),
+};
+
+static int gpu_cc_sm8350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm8350_desc);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to map gpu cc registers\n");
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sm8350_desc, regmap);
+}
+
+static const struct of_device_id gpu_cc_sm8350_match_table[] = {
+ { .compatible = "qcom,sm8350-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm8350_match_table);
+
+static struct platform_driver gpu_cc_sm8350_driver = {
+ .probe = gpu_cc_sm8350_probe,
+ .driver = {
+ .name = "sm8350-gpucc",
+ .of_match_table = gpu_cc_sm8350_match_table,
+ },
+};
+
+static int __init gpu_cc_sm8350_init(void)
+{
+ return platform_driver_register(&gpu_cc_sm8350_driver);
+}
+subsys_initcall(gpu_cc_sm8350_init);
+
+static void __exit gpu_cc_sm8350_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_sm8350_driver);
+}
+module_exit(gpu_cc_sm8350_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SM8350 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
index 4d4b657d33c3..cfd961d5cc45 100644
--- a/drivers/clk/qcom/krait-cc.c
+++ b/drivers/clk/qcom/krait-cc.c
@@ -139,6 +139,14 @@ krait_add_sec_mux(struct device *dev, int id, const char *s,
mux->hw.init = &init;
mux->safe_sel = 0;
+ /* Checking for qcom,krait-cc-v1 or qcom,krait-cc-v2 is not
+ * enough to limit this to apq/ipq8064. Directly check machine
+ * compatible to correctly handle this errata.
+ */
+ if (of_machine_is_compatible("qcom,ipq8064") ||
+ of_machine_is_compatible("qcom,apq8064"))
+ mux->disable_sec_src_gating = true;
+
init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
if (!init.name)
return -ENOMEM;
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 24843e4f2599..80330dab4d81 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -45,194 +45,14 @@ enum {
P_MMPLL4,
};
-static const struct parent_map mmss_xo_hdmi_map[] = {
- { P_XO, 0 },
- { P_HDMIPLL, 1 }
-};
-
-static const char * const mmss_xo_hdmi[] = {
- "xo",
- "hdmipll"
-};
-
-static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
- { P_XO, 0 },
- { P_DSI0PLL, 1 },
- { P_DSI1PLL, 2 }
-};
-
-static const char * const mmss_xo_dsi0pll_dsi1pll[] = {
- "xo",
- "dsi0pll",
- "dsi1pll"
-};
-
-static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_gpll0_gpll0_div[] = {
- "xo",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_dsibyte_map[] = {
- { P_XO, 0 },
- { P_DSI0PLL_BYTE, 1 },
- { P_DSI1PLL_BYTE, 2 }
-};
-
-static const char * const mmss_xo_dsibyte[] = {
- "xo",
- "dsi0pllbyte",
- "dsi1pllbyte"
-};
-
-static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL1, 2 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll1",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL3, 3 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll3",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL5, 2 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll5",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL4, 3 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll4",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL9, 2 },
- { P_MMPLL2, 3 },
- { P_MMPLL8, 4 },
- { P_GPLL0, 5 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0[] = {
- "xo",
- "mmpll0",
- "mmpll9",
- "mmpll2",
- "mmpll8",
- "gpll0"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL9, 2 },
- { P_MMPLL2, 3 },
- { P_MMPLL8, 4 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll9",
- "mmpll2",
- "mmpll8",
- "gpll0",
- "gpll0_div"
-};
-
-static const struct parent_map mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map[] = {
- { P_XO, 0 },
- { P_MMPLL0, 1 },
- { P_MMPLL1, 2 },
- { P_MMPLL4, 3 },
- { P_MMPLL3, 4 },
- { P_GPLL0, 5 },
- { P_GPLL0_DIV, 6 }
-};
-
-static const char * const mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div[] = {
- "xo",
- "mmpll0",
- "mmpll1",
- "mmpll4",
- "mmpll3",
- "gpll0",
- "gpll0_div"
-};
-
static struct clk_fixed_factor gpll0_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "gpll0_div",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gpll0", .name = "gpll0" },
+ },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
},
@@ -265,7 +85,9 @@ static struct clk_alpha_pll mmpll0_early = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmpll0_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -278,7 +100,9 @@ static struct clk_alpha_pll_postdiv mmpll0 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll0",
- .parent_names = (const char *[]){ "mmpll0_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll0_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -295,7 +119,9 @@ static struct clk_alpha_pll mmpll1_early = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "mmpll1_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
}
@@ -308,7 +134,9 @@ static struct clk_alpha_pll_postdiv mmpll1 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll1",
- .parent_names = (const char *[]){ "mmpll1_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll1_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -322,7 +150,9 @@ static struct clk_alpha_pll mmpll2_early = {
.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll2_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -334,7 +164,9 @@ static struct clk_alpha_pll_postdiv mmpll2 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll2",
- .parent_names = (const char *[]){ "mmpll2_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll2_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -348,7 +180,9 @@ static struct clk_alpha_pll mmpll3_early = {
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll3_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -360,7 +194,9 @@ static struct clk_alpha_pll_postdiv mmpll3 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll3",
- .parent_names = (const char *[]){ "mmpll3_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll3_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -374,7 +210,9 @@ static struct clk_alpha_pll mmpll4_early = {
.num_vco = ARRAY_SIZE(mmpll_t_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll4_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -386,7 +224,9 @@ static struct clk_alpha_pll_postdiv mmpll4 = {
.width = 2,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll4",
- .parent_names = (const char *[]){ "mmpll4_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll4_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -400,7 +240,9 @@ static struct clk_alpha_pll mmpll5_early = {
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll5_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -412,7 +254,9 @@ static struct clk_alpha_pll_postdiv mmpll5 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll5",
- .parent_names = (const char *[]){ "mmpll5_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll5_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -426,7 +270,9 @@ static struct clk_alpha_pll mmpll8_early = {
.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll8_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -438,7 +284,9 @@ static struct clk_alpha_pll_postdiv mmpll8 = {
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll8",
- .parent_names = (const char *[]){ "mmpll8_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll8_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -452,7 +300,9 @@ static struct clk_alpha_pll mmpll9_early = {
.num_vco = ARRAY_SIZE(mmpll_t_vco),
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll9_early",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -464,13 +314,197 @@ static struct clk_alpha_pll_postdiv mmpll9 = {
.width = 2,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll9",
- .parent_names = (const char *[]){ "mmpll9_early" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mmpll9_early.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
+static const struct parent_map mmss_xo_hdmi_map[] = {
+ { P_XO, 0 },
+ { P_HDMIPLL, 1 }
+};
+
+static const struct clk_parent_data mmss_xo_hdmi[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "hdmipll", .name = "hdmipll" }
+};
+
+static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 }
+};
+
+static const struct clk_parent_data mmss_xo_dsi0pll_dsi1pll[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" }
+};
+
+static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 2 }
+};
+
+static const struct clk_parent_data mmss_xo_dsibyte[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
+ { .fw_name = "dsi1pllbyte", .name = "dsi1pllbyte" }
+};
+
+static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll1.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL3, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll3.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL5, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll5.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL9, 2 },
+ { P_MMPLL2, 3 },
+ { P_MMPLL8, 4 },
+ { P_GPLL0, 5 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll9.clkr.hw },
+ { .hw = &mmpll2.clkr.hw },
+ { .hw = &mmpll8.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL9, 2 },
+ { P_MMPLL2, 3 },
+ { P_MMPLL8, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll9.clkr.hw },
+ { .hw = &mmpll2.clkr.hw },
+ { .hw = &mmpll8.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_MMPLL4, 3 },
+ { P_MMPLL3, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &mmpll0.clkr.hw },
+ { .hw = &mmpll1.clkr.hw },
+ { .hw = &mmpll4.clkr.hw },
+ { .hw = &mmpll3.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+ { .hw = &gpll0_div.hw }
+};
+
static const struct freq_tbl ftbl_ahb_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(40000000, P_GPLL0_DIV, 7.5, 0, 0),
@@ -485,8 +519,8 @@ static struct clk_rcg2 ahb_clk_src = {
.freq_tbl = ftbl_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ahb_clk_src",
- .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -509,8 +543,8 @@ static struct clk_rcg2 axi_clk_src = {
.freq_tbl = ftbl_axi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "axi_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -522,8 +556,8 @@ static struct clk_rcg2 maxi_clk_src = {
.freq_tbl = ftbl_axi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "maxi_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -535,8 +569,8 @@ static struct clk_rcg2_gfx3d gfx3d_clk_src = {
.parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
- .num_parents = 6,
+ .parent_data = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0),
.ops = &clk_gfx3d_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -560,8 +594,8 @@ static struct clk_rcg2 rbbmtimer_clk_src = {
.freq_tbl = ftbl_rbbmtimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "rbbmtimer_clk_src",
- .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -572,8 +606,8 @@ static struct clk_rcg2 isense_clk_src = {
.parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "isense_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -591,8 +625,8 @@ static struct clk_rcg2 rbcpr_clk_src = {
.freq_tbl = ftbl_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "rbcpr_clk_src",
- .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -613,8 +647,8 @@ static struct clk_rcg2 video_core_clk_src = {
.freq_tbl = ftbl_video_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_core_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -627,8 +661,8 @@ static struct clk_rcg2 video_subcore0_clk_src = {
.freq_tbl = ftbl_video_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_subcore0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -641,8 +675,8 @@ static struct clk_rcg2 video_subcore1_clk_src = {
.freq_tbl = ftbl_video_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_subcore1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -654,8 +688,8 @@ static struct clk_rcg2 pclk0_clk_src = {
.parent_map = mmss_xo_dsi0pll_dsi1pll_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
- .parent_names = mmss_xo_dsi0pll_dsi1pll,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsi0pll_dsi1pll,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -668,8 +702,8 @@ static struct clk_rcg2 pclk1_clk_src = {
.parent_map = mmss_xo_dsi0pll_dsi1pll_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
- .parent_names = mmss_xo_dsi0pll_dsi1pll,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsi0pll_dsi1pll,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -695,8 +729,8 @@ static struct clk_rcg2 mdp_clk_src = {
.freq_tbl = ftbl_mdp_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -713,8 +747,8 @@ static struct clk_rcg2 extpclk_clk_src = {
.freq_tbl = extpclk_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "extpclk_clk_src",
- .parent_names = mmss_xo_hdmi,
- .num_parents = 2,
+ .parent_data = mmss_xo_hdmi,
+ .num_parents = ARRAY_SIZE(mmss_xo_hdmi),
.ops = &clk_byte_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -732,8 +766,8 @@ static struct clk_rcg2 vsync_clk_src = {
.freq_tbl = ftbl_mdss_vsync_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
- .parent_names = mmss_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .parent_data = mmss_xo_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -750,8 +784,8 @@ static struct clk_rcg2 hdmi_clk_src = {
.freq_tbl = ftbl_mdss_hdmi_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "hdmi_clk_src",
- .parent_names = mmss_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .parent_data = mmss_xo_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -762,8 +796,8 @@ static struct clk_rcg2 byte0_clk_src = {
.parent_map = mmss_xo_dsibyte_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -775,8 +809,8 @@ static struct clk_rcg2 byte1_clk_src = {
.parent_map = mmss_xo_dsibyte_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -794,8 +828,8 @@ static struct clk_rcg2 esc0_clk_src = {
.freq_tbl = ftbl_mdss_esc0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -807,8 +841,8 @@ static struct clk_rcg2 esc1_clk_src = {
.freq_tbl = ftbl_mdss_esc0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
- .parent_names = mmss_xo_dsibyte,
- .num_parents = 3,
+ .parent_data = mmss_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -831,8 +865,8 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.freq_tbl = ftbl_camss_gp0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -845,8 +879,8 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.freq_tbl = ftbl_camss_gp0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -873,8 +907,8 @@ static struct clk_rcg2 mclk0_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -887,8 +921,8 @@ static struct clk_rcg2 mclk1_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -901,8 +935,8 @@ static struct clk_rcg2 mclk2_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk2_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -915,8 +949,8 @@ static struct clk_rcg2 mclk3_clk_src = {
.freq_tbl = ftbl_mclk0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk3_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -937,8 +971,8 @@ static struct clk_rcg2 cci_clk_src = {
.freq_tbl = ftbl_cci_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -957,8 +991,8 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.freq_tbl = ftbl_csi0phytimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -970,8 +1004,8 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.freq_tbl = ftbl_csi0phytimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -983,8 +1017,8 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
.freq_tbl = ftbl_csi0phytimer_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2phytimer_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1004,8 +1038,8 @@ static struct clk_rcg2 csiphy0_3p_clk_src = {
.freq_tbl = ftbl_csiphy0_3p_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy0_3p_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1017,8 +1051,8 @@ static struct clk_rcg2 csiphy1_3p_clk_src = {
.freq_tbl = ftbl_csiphy0_3p_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy1_3p_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1030,8 +1064,8 @@ static struct clk_rcg2 csiphy2_3p_clk_src = {
.freq_tbl = ftbl_csiphy0_3p_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy2_3p_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1053,8 +1087,8 @@ static struct clk_rcg2 jpeg0_clk_src = {
.freq_tbl = ftbl_jpeg0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1075,8 +1109,8 @@ static struct clk_rcg2 jpeg2_clk_src = {
.freq_tbl = ftbl_jpeg2_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg2_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1088,8 +1122,8 @@ static struct clk_rcg2 jpeg_dma_clk_src = {
.freq_tbl = ftbl_jpeg0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg_dma_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1111,8 +1145,8 @@ static struct clk_rcg2 vfe0_clk_src = {
.freq_tbl = ftbl_vfe0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1124,8 +1158,8 @@ static struct clk_rcg2 vfe1_clk_src = {
.freq_tbl = ftbl_vfe0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1146,8 +1180,8 @@ static struct clk_rcg2 cpp_clk_src = {
.freq_tbl = ftbl_cpp_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1168,8 +1202,8 @@ static struct clk_rcg2 csi0_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1181,8 +1215,8 @@ static struct clk_rcg2 csi1_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1194,8 +1228,8 @@ static struct clk_rcg2 csi2_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1207,8 +1241,8 @@ static struct clk_rcg2 csi3_clk_src = {
.freq_tbl = ftbl_csi0_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi3_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
- .num_parents = 7,
+ .parent_data = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1227,8 +1261,8 @@ static struct clk_rcg2 fd_core_clk_src = {
.freq_tbl = ftbl_fd_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "fd_core_clk_src",
- .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
- .num_parents = 5,
+ .parent_data = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1240,7 +1274,9 @@ static struct clk_branch mmss_mmagic_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmagic_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1255,7 +1291,9 @@ static struct clk_branch mmss_mmagic_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmagic_cfg_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1270,7 +1308,9 @@ static struct clk_branch mmss_misc_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_misc_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1285,7 +1325,9 @@ static struct clk_branch mmss_misc_cxo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_misc_cxo_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -1299,7 +1341,9 @@ static struct clk_branch mmss_mmagic_maxi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_mmagic_maxi_clk",
- .parent_names = (const char *[]){ "maxi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &maxi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1314,7 +1358,9 @@ static struct clk_branch mmagic_camss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_camss_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1329,7 +1375,9 @@ static struct clk_branch mmagic_camss_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_camss_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1344,7 +1392,9 @@ static struct clk_branch smmu_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_vfe_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1359,7 +1409,9 @@ static struct clk_branch smmu_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_vfe_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1374,7 +1426,9 @@ static struct clk_branch smmu_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_cpp_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1389,7 +1443,9 @@ static struct clk_branch smmu_cpp_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_cpp_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1404,7 +1460,9 @@ static struct clk_branch smmu_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_jpeg_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1419,7 +1477,9 @@ static struct clk_branch smmu_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_jpeg_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1434,7 +1494,9 @@ static struct clk_branch mmagic_mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_mdss_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1449,7 +1511,9 @@ static struct clk_branch mmagic_mdss_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_mdss_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1464,7 +1528,9 @@ static struct clk_branch smmu_rot_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_rot_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1479,7 +1545,9 @@ static struct clk_branch smmu_rot_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_rot_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1494,7 +1562,9 @@ static struct clk_branch smmu_mdp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_mdp_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1509,7 +1579,9 @@ static struct clk_branch smmu_mdp_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_mdp_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1524,7 +1596,9 @@ static struct clk_branch mmagic_video_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_video_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1539,7 +1613,9 @@ static struct clk_branch mmagic_video_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_video_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
@@ -1554,7 +1630,9 @@ static struct clk_branch smmu_video_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_video_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1569,7 +1647,9 @@ static struct clk_branch smmu_video_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "smmu_video_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1584,7 +1664,9 @@ static struct clk_branch mmagic_bimc_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmagic_bimc_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "gcc_mmss_noc_cfg_ahb_clk", .name = "gcc_mmss_noc_cfg_ahb_clk" },
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1599,7 +1681,9 @@ static struct clk_branch gpu_gx_gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_gx_gfx3d_clk",
- .parent_names = (const char *[]){ "gfx3d_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.rcg.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1614,7 +1698,9 @@ static struct clk_branch gpu_gx_rbbmtimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_gx_rbbmtimer_clk",
- .parent_names = (const char *[]){ "rbbmtimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &rbbmtimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1629,7 +1715,9 @@ static struct clk_branch gpu_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1644,7 +1732,9 @@ static struct clk_branch gpu_aon_isense_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_aon_isense_clk",
- .parent_names = (const char *[]){ "isense_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &isense_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1659,7 +1749,9 @@ static struct clk_branch vmem_maxi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vmem_maxi_clk",
- .parent_names = (const char *[]){ "maxi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &maxi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1674,7 +1766,9 @@ static struct clk_branch vmem_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vmem_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1689,7 +1783,9 @@ static struct clk_branch mmss_rbcpr_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_rbcpr_clk",
- .parent_names = (const char *[]){ "rbcpr_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &rbcpr_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1704,7 +1800,9 @@ static struct clk_branch mmss_rbcpr_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mmss_rbcpr_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1719,7 +1817,9 @@ static struct clk_branch video_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_core_clk",
- .parent_names = (const char *[]){ "video_core_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &video_core_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1734,7 +1834,9 @@ static struct clk_branch video_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1749,7 +1851,9 @@ static struct clk_branch video_maxi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_maxi_clk",
- .parent_names = (const char *[]){ "maxi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &maxi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1764,7 +1868,9 @@ static struct clk_branch video_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1779,7 +1885,9 @@ static struct clk_branch video_subcore0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_subcore0_clk",
- .parent_names = (const char *[]){ "video_subcore0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &video_subcore0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1794,7 +1902,9 @@ static struct clk_branch video_subcore1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_subcore1_clk",
- .parent_names = (const char *[]){ "video_subcore1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &video_subcore1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1809,7 +1919,9 @@ static struct clk_branch mdss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1824,7 +1936,9 @@ static struct clk_branch mdss_hdmi_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_hdmi_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1839,7 +1953,9 @@ static struct clk_branch mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1854,7 +1970,9 @@ static struct clk_branch mdss_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_pclk0_clk",
- .parent_names = (const char *[]){ "pclk0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1869,7 +1987,9 @@ static struct clk_branch mdss_pclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_pclk1_clk",
- .parent_names = (const char *[]){ "pclk1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1884,7 +2004,9 @@ static struct clk_branch mdss_mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_mdp_clk",
- .parent_names = (const char *[]){ "mdp_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1899,7 +2021,9 @@ static struct clk_branch mdss_extpclk_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_extpclk_clk",
- .parent_names = (const char *[]){ "extpclk_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &extpclk_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1914,7 +2038,9 @@ static struct clk_branch mdss_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_vsync_clk",
- .parent_names = (const char *[]){ "vsync_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vsync_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1929,7 +2055,9 @@ static struct clk_branch mdss_hdmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_hdmi_clk",
- .parent_names = (const char *[]){ "hdmi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &hdmi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1944,7 +2072,9 @@ static struct clk_branch mdss_byte0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_byte0_clk",
- .parent_names = (const char *[]){ "byte0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &byte0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1959,7 +2089,9 @@ static struct clk_branch mdss_byte1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_byte1_clk",
- .parent_names = (const char *[]){ "byte1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1974,7 +2106,9 @@ static struct clk_branch mdss_esc0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_esc0_clk",
- .parent_names = (const char *[]){ "esc0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &esc0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1989,7 +2123,9 @@ static struct clk_branch mdss_esc1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdss_esc1_clk",
- .parent_names = (const char *[]){ "esc1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2004,7 +2140,9 @@ static struct clk_branch camss_top_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_top_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2019,7 +2157,9 @@ static struct clk_branch camss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2034,7 +2174,9 @@ static struct clk_branch camss_micro_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_micro_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2049,7 +2191,9 @@ static struct clk_branch camss_gp0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk",
- .parent_names = (const char *[]){ "camss_gp0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2064,7 +2208,9 @@ static struct clk_branch camss_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk",
- .parent_names = (const char *[]){ "camss_gp1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2079,7 +2225,9 @@ static struct clk_branch camss_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk0_clk",
- .parent_names = (const char *[]){ "mclk0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2094,7 +2242,9 @@ static struct clk_branch camss_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk1_clk",
- .parent_names = (const char *[]){ "mclk1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2109,7 +2259,9 @@ static struct clk_branch camss_mclk2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk2_clk",
- .parent_names = (const char *[]){ "mclk2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2124,7 +2276,9 @@ static struct clk_branch camss_mclk3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_mclk3_clk",
- .parent_names = (const char *[]){ "mclk3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2139,7 +2293,9 @@ static struct clk_branch camss_cci_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cci_clk",
- .parent_names = (const char *[]){ "cci_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &cci_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2154,7 +2310,9 @@ static struct clk_branch camss_cci_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cci_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2169,7 +2327,9 @@ static struct clk_branch camss_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0phytimer_clk",
- .parent_names = (const char *[]){ "csi0phytimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0phytimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2184,7 +2344,9 @@ static struct clk_branch camss_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1phytimer_clk",
- .parent_names = (const char *[]){ "csi1phytimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1phytimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2199,7 +2361,9 @@ static struct clk_branch camss_csi2phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2phytimer_clk",
- .parent_names = (const char *[]){ "csi2phytimer_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2phytimer_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2214,7 +2378,9 @@ static struct clk_branch camss_csiphy0_3p_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csiphy0_3p_clk",
- .parent_names = (const char *[]){ "csiphy0_3p_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphy0_3p_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2229,7 +2395,9 @@ static struct clk_branch camss_csiphy1_3p_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csiphy1_3p_clk",
- .parent_names = (const char *[]){ "csiphy1_3p_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphy1_3p_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2244,7 +2412,9 @@ static struct clk_branch camss_csiphy2_3p_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csiphy2_3p_clk",
- .parent_names = (const char *[]){ "csiphy2_3p_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphy2_3p_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2259,7 +2429,9 @@ static struct clk_branch camss_jpeg0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg0_clk",
- .parent_names = (const char *[]){ "jpeg0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2274,7 +2446,9 @@ static struct clk_branch camss_jpeg2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg2_clk",
- .parent_names = (const char *[]){ "jpeg2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2289,7 +2463,9 @@ static struct clk_branch camss_jpeg_dma_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_dma_clk",
- .parent_names = (const char *[]){ "jpeg_dma_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg_dma_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2304,7 +2480,9 @@ static struct clk_branch camss_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2319,7 +2497,9 @@ static struct clk_branch camss_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_jpeg_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2334,7 +2514,9 @@ static struct clk_branch camss_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2349,7 +2531,9 @@ static struct clk_branch camss_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2364,7 +2548,9 @@ static struct clk_branch camss_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe0_clk",
- .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2379,7 +2565,9 @@ static struct clk_branch camss_vfe0_stream_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe0_stream_clk",
- .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2394,7 +2582,9 @@ static struct clk_branch camss_vfe0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe0_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2409,7 +2599,9 @@ static struct clk_branch camss_vfe1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe1_clk",
- .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2424,7 +2616,9 @@ static struct clk_branch camss_vfe1_stream_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe1_stream_clk",
- .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2439,7 +2633,9 @@ static struct clk_branch camss_vfe1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_vfe1_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2454,7 +2650,9 @@ static struct clk_branch camss_csi_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi_vfe0_clk",
- .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2469,7 +2667,9 @@ static struct clk_branch camss_csi_vfe1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi_vfe1_clk",
- .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2484,7 +2684,9 @@ static struct clk_branch camss_cpp_vbif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_vbif_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2499,7 +2701,9 @@ static struct clk_branch camss_cpp_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &axi_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2514,7 +2718,9 @@ static struct clk_branch camss_cpp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_clk",
- .parent_names = (const char *[]){ "cpp_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &cpp_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2529,7 +2735,9 @@ static struct clk_branch camss_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_cpp_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2544,7 +2752,9 @@ static struct clk_branch camss_csi0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2559,7 +2769,9 @@ static struct clk_branch camss_csi0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2574,7 +2786,9 @@ static struct clk_branch camss_csi0phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0phy_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2589,7 +2803,9 @@ static struct clk_branch camss_csi0rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0rdi_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2604,7 +2820,9 @@ static struct clk_branch camss_csi0pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi0pix_clk",
- .parent_names = (const char *[]){ "csi0_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2619,7 +2837,9 @@ static struct clk_branch camss_csi1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2634,7 +2854,9 @@ static struct clk_branch camss_csi1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2649,7 +2871,9 @@ static struct clk_branch camss_csi1phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1phy_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2664,7 +2888,9 @@ static struct clk_branch camss_csi1rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1rdi_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2679,7 +2905,9 @@ static struct clk_branch camss_csi1pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi1pix_clk",
- .parent_names = (const char *[]){ "csi1_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2694,7 +2922,9 @@ static struct clk_branch camss_csi2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2709,7 +2939,9 @@ static struct clk_branch camss_csi2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2724,7 +2956,9 @@ static struct clk_branch camss_csi2phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2phy_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2739,7 +2973,9 @@ static struct clk_branch camss_csi2rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2rdi_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2754,7 +2990,9 @@ static struct clk_branch camss_csi2pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi2pix_clk",
- .parent_names = (const char *[]){ "csi2_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2769,7 +3007,9 @@ static struct clk_branch camss_csi3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2784,7 +3024,9 @@ static struct clk_branch camss_csi3_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2799,7 +3041,9 @@ static struct clk_branch camss_csi3phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3phy_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2814,7 +3058,9 @@ static struct clk_branch camss_csi3rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3rdi_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2829,7 +3075,9 @@ static struct clk_branch camss_csi3pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_csi3pix_clk",
- .parent_names = (const char *[]){ "csi3_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi3_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2844,7 +3092,9 @@ static struct clk_branch camss_ispif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camss_ispif_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2859,7 +3109,9 @@ static struct clk_branch fd_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "fd_core_clk",
- .parent_names = (const char *[]){ "fd_core_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &fd_core_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2874,7 +3126,9 @@ static struct clk_branch fd_core_uar_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "fd_core_uar_clk",
- .parent_names = (const char *[]){ "fd_core_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &fd_core_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2889,7 +3143,9 @@ static struct clk_branch fd_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "fd_ahb_clk",
- .parent_names = (const char *[]){ "ahb_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ahb_clk_src.clkr.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
index 8617454e4a77..f28f2cb051d7 100644
--- a/drivers/clk/qcom/videocc-sm8250.c
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -277,7 +277,6 @@ static struct gdsc mvs0c_gdsc = {
},
.flags = 0,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct gdsc mvs1c_gdsc = {
@@ -287,7 +286,6 @@ static struct gdsc mvs1c_gdsc = {
},
.flags = 0,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct gdsc mvs0_gdsc = {
@@ -297,7 +295,6 @@ static struct gdsc mvs0_gdsc = {
},
.flags = HW_CTRL,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct gdsc mvs1_gdsc = {
@@ -307,7 +304,6 @@ static struct gdsc mvs1_gdsc = {
},
.flags = HW_CTRL,
.pwrsts = PWRSTS_OFF_ON,
- .supply = "mmcx",
};
static struct clk_regmap *video_cc_sm8250_clocks[] = {
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index cfed11c659d9..f45c2c45808b 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -18,7 +18,6 @@
struct r8a73a4_cpg {
struct clk_onecell_data data;
spinlock_t lock;
- void __iomem *reg;
};
#define CPG_CKSCR 0xc0
@@ -59,7 +58,7 @@ static const struct clk_div_table div4_div_table[] = {
static struct clk * __init
r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
- const char *name)
+ void __iomem *base, const char *name)
{
const struct clk_div_table *table = NULL;
const char *parent_name;
@@ -69,7 +68,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
if (!strcmp(name, "main")) {
- u32 ckscr = readl(cpg->reg + CPG_CKSCR);
+ u32 ckscr = readl(base + CPG_CKSCR);
switch ((ckscr >> 28) & 3) {
case 0: /* extal1 */
@@ -93,14 +92,14 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = readl(cpg->reg + CPG_PLL0CR);
+ u32 value = readl(base + CPG_PLL0CR);
parent_name = "main";
mult = ((value >> 24) & 0x7f) + 1;
if (value & BIT(20))
div = 2;
} else if (!strcmp(name, "pll1")) {
- u32 value = readl(cpg->reg + CPG_PLL1CR);
+ u32 value = readl(base + CPG_PLL1CR);
parent_name = "main";
/* XXX: enable bit? */
@@ -123,7 +122,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
default:
return ERR_PTR(-EINVAL);
}
- value = readl(cpg->reg + cr);
+ value = readl(base + cr);
switch ((value >> 5) & 7) {
case 0:
parent_name = "main";
@@ -159,7 +158,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
shift = 0;
}
div *= 32;
- mult = 0x20 - ((readl(cpg->reg + CPG_FRQCRC) >> shift) & 0x1f);
+ mult = 0x20 - ((readl(base + CPG_FRQCRC) >> shift) & 0x1f);
} else {
struct div4_clk *c;
@@ -181,7 +180,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
mult, div);
} else {
return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + reg, shift, 4, 0,
+ base + reg, shift, 4, 0,
table, &cpg->lock);
}
}
@@ -189,6 +188,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
{
struct r8a73a4_cpg *cpg;
+ void __iomem *base;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -213,8 +213,8 @@ static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
+ base = of_iomap(np, 0);
+ if (WARN_ON(base == NULL))
return;
for (i = 0; i < num_clks; ++i) {
@@ -224,7 +224,7 @@ static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a73a4_cpg_register_clock(np, cpg, name);
+ clk = r8a73a4_cpg_register_clock(np, cpg, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index d8190f007a81..3ee3f57e4e9a 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -18,7 +18,6 @@
struct r8a7740_cpg {
struct clk_onecell_data data;
spinlock_t lock;
- void __iomem *reg;
};
#define CPG_FRQCRA 0x00
@@ -61,7 +60,7 @@ static u32 cpg_mode __initdata;
static struct clk * __init
r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
- const char *name)
+ void __iomem *base, const char *name)
{
const struct clk_div_table *table = NULL;
const char *parent_name;
@@ -96,20 +95,20 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = readl(cpg->reg + CPG_FRQCRC);
+ u32 value = readl(base + CPG_FRQCRC);
parent_name = "system";
mult = ((value >> 24) & 0x7f) + 1;
} else if (!strcmp(name, "pllc1")) {
- u32 value = readl(cpg->reg + CPG_FRQCRA);
+ u32 value = readl(base + CPG_FRQCRA);
parent_name = "system";
mult = ((value >> 24) & 0x7f) + 1;
div = 2;
} else if (!strcmp(name, "pllc2")) {
- u32 value = readl(cpg->reg + CPG_PLLC2CR);
+ u32 value = readl(base + CPG_PLLC2CR);
parent_name = "system";
mult = ((value >> 24) & 0x3f) + 1;
} else if (!strcmp(name, "usb24s")) {
- u32 value = readl(cpg->reg + CPG_USBCKCR);
+ u32 value = readl(base + CPG_USBCKCR);
if (value & BIT(7))
/* extal2 */
parent_name = of_clk_get_parent_name(np, 1);
@@ -137,7 +136,7 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
mult, div);
} else {
return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + reg, shift, 4, 0,
+ base + reg, shift, 4, 0,
table, &cpg->lock);
}
}
@@ -145,6 +144,7 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
static void __init r8a7740_cpg_clocks_init(struct device_node *np)
{
struct r8a7740_cpg *cpg;
+ void __iomem *base;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -172,8 +172,8 @@ static void __init r8a7740_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
+ base = of_iomap(np, 0);
+ if (WARN_ON(base == NULL))
return;
for (i = 0; i < num_clks; ++i) {
@@ -183,7 +183,7 @@ static void __init r8a7740_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a7740_cpg_register_clock(np, cpg, name);
+ clk = r8a7740_cpg_register_clock(np, cpg, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c
index 3ccc53685bdd..797556259370 100644
--- a/drivers/clk/renesas/clk-r8a7778.c
+++ b/drivers/clk/renesas/clk-r8a7778.c
@@ -11,12 +11,6 @@
#include <linux/slab.h>
#include <linux/soc/renesas/rcar-rst.h>
-struct r8a7778_cpg {
- struct clk_onecell_data data;
- spinlock_t lock;
- void __iomem *reg;
-};
-
/* PLL multipliers per bits 11, 12, and 18 of MODEMR */
static const struct {
unsigned long plla_mult;
@@ -47,8 +41,7 @@ static u32 cpg_mode_rates __initdata;
static u32 cpg_mode_divs __initdata;
static struct clk * __init
-r8a7778_cpg_register_clock(struct device_node *np, struct r8a7778_cpg *cpg,
- const char *name)
+r8a7778_cpg_register_clock(struct device_node *np, const char *name)
{
if (!strcmp(name, "plla")) {
return clk_register_fixed_factor(NULL, "plla",
@@ -77,7 +70,7 @@ r8a7778_cpg_register_clock(struct device_node *np, struct r8a7778_cpg *cpg,
static void __init r8a7778_cpg_clocks_init(struct device_node *np)
{
- struct r8a7778_cpg *cpg;
+ struct clk_onecell_data *data;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -100,23 +93,17 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
return;
}
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
- if (cpg == NULL || clks == NULL) {
+ if (data == NULL || clks == NULL) {
/* We're leaking memory on purpose, there's no point in cleaning
* up as the system won't boot anyway.
*/
return;
}
- spin_lock_init(&cpg->lock);
-
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
-
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
- return;
+ data->clks = clks;
+ data->clk_num = num_clks;
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -125,15 +112,15 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a7778_cpg_register_clock(np, cpg, name);
+ clk = r8a7778_cpg_register_clock(np, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
else
- cpg->data.clks[i] = clk;
+ data->clks[i] = clk;
}
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+ of_clk_add_provider(np, of_clk_src_onecell_get, data);
cpg_mstp_add_clk_domain(np);
}
diff --git a/drivers/clk/renesas/clk-r8a7779.c b/drivers/clk/renesas/clk-r8a7779.c
index 9f3b5522eef5..9a2fea8cf4d7 100644
--- a/drivers/clk/renesas/clk-r8a7779.c
+++ b/drivers/clk/renesas/clk-r8a7779.c
@@ -21,12 +21,6 @@
#define CPG_NUM_CLOCKS (R8A7779_CLK_OUT + 1)
-struct r8a7779_cpg {
- struct clk_onecell_data data;
- spinlock_t lock;
- void __iomem *reg;
-};
-
/* -----------------------------------------------------------------------------
* CPG Clock Data
*/
@@ -87,7 +81,7 @@ static const unsigned int cpg_plla_mult[4] __initconst = { 42, 48, 56, 64 };
*/
static struct clk * __init
-r8a7779_cpg_register_clock(struct device_node *np, struct r8a7779_cpg *cpg,
+r8a7779_cpg_register_clock(struct device_node *np,
const struct cpg_clk_config *config,
unsigned int plla_mult, const char *name)
{
@@ -119,7 +113,7 @@ r8a7779_cpg_register_clock(struct device_node *np, struct r8a7779_cpg *cpg,
static void __init r8a7779_cpg_clocks_init(struct device_node *np)
{
const struct cpg_clk_config *config;
- struct r8a7779_cpg *cpg;
+ struct clk_onecell_data *data;
struct clk **clks;
unsigned int i, plla_mult;
int num_clks;
@@ -134,19 +128,17 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
return;
}
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
clks = kcalloc(CPG_NUM_CLOCKS, sizeof(*clks), GFP_KERNEL);
- if (cpg == NULL || clks == NULL) {
+ if (data == NULL || clks == NULL) {
/* We're leaking memory on purpose, there's no point in cleaning
* up as the system won't boot anyway.
*/
return;
}
- spin_lock_init(&cpg->lock);
-
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
+ data->clks = clks;
+ data->clk_num = num_clks;
config = &cpg_clk_configs[CPG_CLK_CONFIG_INDEX(mode)];
plla_mult = cpg_plla_mult[CPG_PLLA_MULT_INDEX(mode)];
@@ -158,16 +150,15 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = r8a7779_cpg_register_clock(np, cpg, config,
- plla_mult, name);
+ clk = r8a7779_cpg_register_clock(np, config, plla_mult, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
else
- cpg->data.clks[i] = clk;
+ data->clks[i] = clk;
}
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+ of_clk_add_provider(np, of_clk_src_onecell_get, data);
cpg_mstp_add_clk_domain(np);
}
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 7b703f14e20b..e770f09a27ed 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -15,11 +15,6 @@
#include <linux/of_address.h>
#include <linux/slab.h>
-struct rz_cpg {
- struct clk_onecell_data data;
- void __iomem *reg;
-};
-
#define CPG_FRQCR 0x10
#define CPG_FRQCR2 0x14
@@ -49,7 +44,8 @@ static u16 __init rz_cpg_read_mode_pins(void)
}
static struct clk * __init
-rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *name)
+rz_cpg_register_clock(struct device_node *np, void __iomem *base,
+ const char *name)
{
u32 val;
unsigned mult;
@@ -65,7 +61,7 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
}
/* If mapping regs failed, skip non-pll clocks. System will boot anyhow */
- if (!cpg->reg)
+ if (!base)
return ERR_PTR(-ENXIO);
/* FIXME:"i" and "g" are variable clocks with non-integer dividers (e.g. 2/3)
@@ -73,9 +69,9 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
* let them run at fixed current speed and implement the details later.
*/
if (strcmp(name, "i") == 0)
- val = (readl(cpg->reg + CPG_FRQCR) >> 8) & 3;
+ val = (readl(base + CPG_FRQCR) >> 8) & 3;
else if (strcmp(name, "g") == 0)
- val = readl(cpg->reg + CPG_FRQCR2) & 3;
+ val = readl(base + CPG_FRQCR2) & 3;
else
return ERR_PTR(-EINVAL);
@@ -85,8 +81,9 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
static void __init rz_cpg_clocks_init(struct device_node *np)
{
- struct rz_cpg *cpg;
+ struct clk_onecell_data *data;
struct clk **clks;
+ void __iomem *base;
unsigned i;
int num_clks;
@@ -94,14 +91,14 @@ static void __init rz_cpg_clocks_init(struct device_node *np)
if (WARN(num_clks <= 0, "can't count CPG clocks\n"))
return;
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
- BUG_ON(!cpg || !clks);
+ BUG_ON(!data || !clks);
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
+ data->clks = clks;
+ data->clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
+ base = of_iomap(np, 0);
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -109,15 +106,15 @@ static void __init rz_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i, &name);
- clk = rz_cpg_register_clock(np, cpg, name);
+ clk = rz_cpg_register_clock(np, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
else
- cpg->data.clks[i] = clk;
+ data->clks[i] = clk;
}
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+ of_clk_add_provider(np, of_clk_src_onecell_get, data);
cpg_mstp_add_clk_domain(np);
}
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index 4146c1d717b9..8c51090f13e1 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -18,7 +18,6 @@
struct sh73a0_cpg {
struct clk_onecell_data data;
spinlock_t lock;
- void __iomem *reg;
};
#define CPG_FRQCRA 0x00
@@ -73,7 +72,7 @@ static const struct clk_div_table z_div_table[] = {
static struct clk * __init
sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
- const char *name)
+ void __iomem *base, const char *name)
{
const struct clk_div_table *table = NULL;
unsigned int shift, reg, width;
@@ -83,12 +82,12 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
if (!strcmp(name, "main")) {
/* extal1, extal1_div2, extal2, extal2_div2 */
- u32 parent_idx = (readl(cpg->reg + CPG_CKSCR) >> 28) & 3;
+ u32 parent_idx = (readl(base + CPG_CKSCR) >> 28) & 3;
parent_name = of_clk_get_parent_name(np, parent_idx >> 1);
div = (parent_idx & 1) + 1;
} else if (!strncmp(name, "pll", 3)) {
- void __iomem *enable_reg = cpg->reg;
+ void __iomem *enable_reg = base;
u32 enable_bit = name[3] - '0';
parent_name = "main";
@@ -108,7 +107,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
default:
return ERR_PTR(-EINVAL);
}
- if (readl(cpg->reg + CPG_PLLECR) & BIT(enable_bit)) {
+ if (readl(base + CPG_PLLECR) & BIT(enable_bit)) {
mult = ((readl(enable_reg) >> 24) & 0x3f) + 1;
/* handle CFG bit for PLL1 and PLL2 */
if (enable_bit == 1 || enable_bit == 2)
@@ -117,7 +116,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
}
} else if (!strcmp(name, "dsi0phy") || !strcmp(name, "dsi1phy")) {
u32 phy_no = name[3] - '0';
- void __iomem *dsi_reg = cpg->reg +
+ void __iomem *dsi_reg = base +
(phy_no ? CPG_DSI1PHYCR : CPG_DSI0PHYCR);
parent_name = phy_no ? "dsi1pck" : "dsi0pck";
@@ -154,7 +153,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
mult, div);
} else {
return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + reg, shift, width, 0,
+ base + reg, shift, width, 0,
table, &cpg->lock);
}
}
@@ -162,6 +161,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
static void __init sh73a0_cpg_clocks_init(struct device_node *np)
{
struct sh73a0_cpg *cpg;
+ void __iomem *base;
struct clk **clks;
unsigned int i;
int num_clks;
@@ -186,14 +186,14 @@ static void __init sh73a0_cpg_clocks_init(struct device_node *np)
cpg->data.clks = clks;
cpg->data.clk_num = num_clks;
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
+ base = of_iomap(np, 0);
+ if (WARN_ON(base == NULL))
return;
/* Set SDHI clocks to a known state */
- writel(0x108, cpg->reg + CPG_SD0CKCR);
- writel(0x108, cpg->reg + CPG_SD1CKCR);
- writel(0x108, cpg->reg + CPG_SD2CKCR);
+ writel(0x108, base + CPG_SD0CKCR);
+ writel(0x108, base + CPG_SD1CKCR);
+ writel(0x108, base + CPG_SD2CKCR);
for (i = 0; i < num_clks; ++i) {
const char *name;
@@ -202,7 +202,7 @@ static void __init sh73a0_cpg_clocks_init(struct device_node *np)
of_property_read_string_index(np, "clock-output-names", i,
&name);
- clk = sh73a0_cpg_register_clock(np, cpg, name);
+ clk = sh73a0_cpg_register_clock(np, cpg, base, name);
if (IS_ERR(clk))
pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
__func__, np, name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
index c17ebe6b5992..cd80b6084ece 100644
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -77,6 +77,8 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
/* Core Clock Outputs */
+ DEF_GEN4_Z("z0", R8A779F0_CLK_Z0, CLK_TYPE_GEN4_Z, CLK_PLL2, 2, 0),
+ DEF_GEN4_Z("z1", R8A779F0_CLK_Z1, CLK_TYPE_GEN4_Z, CLK_PLL2, 2, 8),
DEF_FIXED("s0d2", R8A779F0_CLK_S0D2, CLK_S0, 2, 1),
DEF_FIXED("s0d3", R8A779F0_CLK_S0D3, CLK_S0, 3, 1),
DEF_FIXED("s0d4", R8A779F0_CLK_S0D4, CLK_S0, 4, 1),
@@ -118,20 +120,28 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+ DEF_MOD("hscif0", 514, R8A779F0_CLK_S0D3),
+ DEF_MOD("hscif1", 515, R8A779F0_CLK_S0D3),
+ DEF_MOD("hscif2", 516, R8A779F0_CLK_S0D3),
+ DEF_MOD("hscif3", 517, R8A779F0_CLK_S0D3),
DEF_MOD("i2c0", 518, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c1", 519, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c2", 520, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c3", 521, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c4", 522, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c5", 523, R8A779F0_CLK_S0D6_PER),
+ DEF_MOD("pcie0", 624, R8A779F0_CLK_S0D2),
+ DEF_MOD("pcie1", 625, R8A779F0_CLK_S0D2),
DEF_MOD("scif0", 702, R8A779F0_CLK_S0D12_PER),
DEF_MOD("scif1", 703, R8A779F0_CLK_S0D12_PER),
DEF_MOD("scif3", 704, R8A779F0_CLK_S0D12_PER),
DEF_MOD("scif4", 705, R8A779F0_CLK_S0D12_PER),
+ DEF_MOD("sdhi0", 706, R8A779F0_CLK_SD0),
DEF_MOD("sys-dmac0", 709, R8A779F0_CLK_S0D3_PER),
DEF_MOD("sys-dmac1", 710, R8A779F0_CLK_S0D3_PER),
DEF_MOD("wdt", 907, R8A779F0_CLK_R),
DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M),
+ DEF_MOD("tsc", 919, R8A779F0_CLK_CL16M),
DEF_MOD("ufs", 1514, R8A779F0_CLK_S0D4_HSC),
};
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 35ffc462af1a..1488c9d6e639 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -51,11 +51,9 @@ struct r9a06g032_clkdesc {
struct {
u16 div, mul;
};
- unsigned int factor;
- unsigned int frequency;
/* for dual gate */
struct {
- uint16_t group : 1, index: 3;
+ uint16_t group : 1;
u16 sel, g1, r1, g2, r2;
} dual;
};
@@ -85,10 +83,10 @@ struct r9a06g032_clkdesc {
.source = 1 + R9A06G032_##_src, .name = _n, \
.reg = _reg, .div_min = _min, .div_max = _max, \
.div_table = { __VA_ARGS__ } }
-#define D_UGATE(_idx, _n, _src, _g, _gi, _g1, _r1, _g2, _r2) \
+#define D_UGATE(_idx, _n, _src, _g, _g1, _r1, _g2, _r2) \
{ .type = K_DUALGATE, .index = R9A06G032_##_idx, \
.source = 1 + R9A06G032_##_src, .name = _n, \
- .dual = { .group = _g, .index = _gi, \
+ .dual = { .group = _g, \
.g1 = _g1, .r1 = _r1, .g2 = _g2, .r2 = _r2 }, }
enum { K_GATE = 0, K_FFC, K_DIV, K_BITSEL, K_DUALGATE };
@@ -290,8 +288,8 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
.name = "uart_group_012",
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_UART,
- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
- .dual.sel = ((0xec / 4) << 5) | 24,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
+ .dual.sel = ((0x34 / 4) << 5) | 30,
.dual.group = 0,
},
{
@@ -299,18 +297,18 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = {
.name = "uart_group_34567",
.type = K_BITSEL,
.source = 1 + R9A06G032_DIV_P2_PG,
- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */
- .dual.sel = ((0x34 / 4) << 5) | 30,
+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */
+ .dual.sel = ((0xec / 4) << 5) | 24,
.dual.group = 1,
},
- D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
- D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0, 1, 0x1b6, 0x1b7, 0x1b8, 0x1b9),
- D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0, 2, 0x1ba, 0x1bb, 0x1bc, 0x1bd),
- D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1, 0, 0x760, 0x761, 0x762, 0x763),
- D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1, 1, 0x764, 0x765, 0x766, 0x767),
- D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1, 2, 0x768, 0x769, 0x76a, 0x76b),
- D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1, 3, 0x76c, 0x76d, 0x76e, 0x76f),
- D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1, 4, 0x770, 0x771, 0x772, 0x773),
+ D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5),
+ D_UGATE(CLK_UART1, "clk_uart1", UART_GROUP_012, 0, 0x1b6, 0x1b7, 0x1b8, 0x1b9),
+ D_UGATE(CLK_UART2, "clk_uart2", UART_GROUP_012, 0, 0x1ba, 0x1bb, 0x1bc, 0x1bd),
+ D_UGATE(CLK_UART3, "clk_uart3", UART_GROUP_34567, 1, 0x760, 0x761, 0x762, 0x763),
+ D_UGATE(CLK_UART4, "clk_uart4", UART_GROUP_34567, 1, 0x764, 0x765, 0x766, 0x767),
+ D_UGATE(CLK_UART5, "clk_uart5", UART_GROUP_34567, 1, 0x768, 0x769, 0x76a, 0x76b),
+ D_UGATE(CLK_UART6, "clk_uart6", UART_GROUP_34567, 1, 0x76c, 0x76d, 0x76e, 0x76f),
+ D_UGATE(CLK_UART7, "clk_uart7", UART_GROUP_34567, 1, 0x770, 0x771, 0x772, 0x773),
};
struct r9a06g032_priv {
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index 33c2bd8df2e5..37475465100d 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -36,9 +36,11 @@ enum clk_ids {
CLK_PLL3_DIV2_4_2,
CLK_SEL_PLL3_3,
CLK_DIV_PLL3_C,
+#ifdef CONFIG_ARM64
CLK_PLL5,
CLK_PLL5_500,
CLK_PLL5_250,
+#endif
CLK_PLL6,
CLK_PLL6_250,
CLK_P1_DIV2,
@@ -100,9 +102,11 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3),
DEF_MUX_RO(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3, sel_pll3_3),
DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3, DIVPL3C, dtable_1_32),
+#ifdef CONFIG_ARM64
DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1),
DEF_FIXED(".pll5_500", CLK_PLL5_500, CLK_PLL5, 1, 6),
DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_500, 1, 2),
+#endif
DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6),
DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
@@ -126,12 +130,20 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
};
static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
+#ifdef CONFIG_ARM64
DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1,
0x514, 0),
DEF_MOD("ia55_pclk", R9A07G043_IA55_PCLK, R9A07G043_CLK_P2,
0x518, 0),
DEF_MOD("ia55_clk", R9A07G043_IA55_CLK, R9A07G043_CLK_P1,
0x518, 1),
+#endif
+#ifdef CONFIG_RISCV
+ DEF_MOD("iax45_pclk", R9A07G043_IAX45_PCLK, R9A07G043_CLK_P2,
+ 0x518, 0),
+ DEF_MOD("iax45_clk", R9A07G043_IAX45_CLK, R9A07G043_CLK_P1,
+ 0x518, 1),
+#endif
DEF_MOD("dmac_aclk", R9A07G043_DMAC_ACLK, R9A07G043_CLK_P1,
0x52c, 0),
DEF_MOD("dmac_pclk", R9A07G043_DMAC_PCLK, CLK_P1_DIV2,
@@ -243,9 +255,14 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
};
static struct rzg2l_reset r9a07g043_resets[] = {
+#ifdef CONFIG_ARM64
DEF_RST(R9A07G043_GIC600_GICRESET_N, 0x814, 0),
DEF_RST(R9A07G043_GIC600_DBG_GICRESET_N, 0x814, 1),
DEF_RST(R9A07G043_IA55_RESETN, 0x818, 0),
+#endif
+#ifdef CONFIG_RISCV
+ DEF_RST(R9A07G043_IAX45_RESETN, 0x818, 0),
+#endif
DEF_RST(R9A07G043_DMAC_ARESETN, 0x82c, 0),
DEF_RST(R9A07G043_DMAC_RST_ASYNC, 0x82c, 1),
DEF_RST(R9A07G043_OSTM0_PRESETZ, 0x834, 0),
@@ -291,8 +308,13 @@ static struct rzg2l_reset r9a07g043_resets[] = {
};
static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
+#ifdef CONFIG_ARM64
MOD_CLK_BASE + R9A07G043_GIC600_GICCLK,
MOD_CLK_BASE + R9A07G043_IA55_CLK,
+#endif
+#ifdef CONFIG_RISCV
+ MOD_CLK_BASE + R9A07G043_IAX45_CLK,
+#endif
MOD_CLK_BASE + R9A07G043_DMAC_ACLK,
};
@@ -310,11 +332,21 @@ const struct rzg2l_cpg_info r9a07g043_cpg_info = {
/* Module Clocks */
.mod_clks = r9a07g043_mod_clks,
.num_mod_clks = ARRAY_SIZE(r9a07g043_mod_clks),
+#ifdef CONFIG_ARM64
.num_hw_mod_clks = R9A07G043_TSU_PCLK + 1,
+#endif
+#ifdef CONFIG_RISCV
+ .num_hw_mod_clks = R9A07G043_IAX45_PCLK + 1,
+#endif
/* Resets */
.resets = r9a07g043_resets,
+#ifdef CONFIG_ARM64
.num_resets = R9A07G043_TSU_PRESETN + 1, /* Last reset ID + 1 */
+#endif
+#ifdef CONFIG_RISCV
+ .num_resets = R9A07G043_IAX45_RESETN + 1, /* Last reset ID + 1 */
+#endif
.has_clk_mon_regs = true,
};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index b288897852c7..fd7c4eecd398 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -182,7 +182,7 @@ static const struct {
};
static const struct {
- struct rzg2l_mod_clk common[71];
+ struct rzg2l_mod_clk common[76];
#ifdef CONFIG_CLK_R9A07G054
struct rzg2l_mod_clk drp[0];
#endif
@@ -204,6 +204,16 @@ static const struct {
0x534, 1),
DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
0x534, 2),
+ DEF_MOD("gpt_pclk", R9A07G044_GPT_PCLK, R9A07G044_CLK_P0,
+ 0x540, 0),
+ DEF_MOD("poeg_a_clkp", R9A07G044_POEG_A_CLKP, R9A07G044_CLK_P0,
+ 0x544, 0),
+ DEF_MOD("poeg_b_clkp", R9A07G044_POEG_B_CLKP, R9A07G044_CLK_P0,
+ 0x544, 1),
+ DEF_MOD("poeg_c_clkp", R9A07G044_POEG_C_CLKP, R9A07G044_CLK_P0,
+ 0x544, 2),
+ DEF_MOD("poeg_d_clkp", R9A07G044_POEG_D_CLKP, R9A07G044_CLK_P0,
+ 0x544, 3),
DEF_MOD("wdt0_pclk", R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
0x548, 0),
DEF_MOD("wdt0_clk", R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
@@ -346,6 +356,11 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_OSTM0_PRESETZ, 0x834, 0),
DEF_RST(R9A07G044_OSTM1_PRESETZ, 0x834, 1),
DEF_RST(R9A07G044_OSTM2_PRESETZ, 0x834, 2),
+ DEF_RST(R9A07G044_GPT_RST_C, 0x840, 0),
+ DEF_RST(R9A07G044_POEG_A_RST, 0x844, 0),
+ DEF_RST(R9A07G044_POEG_B_RST, 0x844, 1),
+ DEF_RST(R9A07G044_POEG_C_RST, 0x844, 2),
+ DEF_RST(R9A07G044_POEG_D_RST, 0x844, 3),
DEF_RST(R9A07G044_WDT0_PRESETN, 0x848, 0),
DEF_RST(R9A07G044_WDT1_PRESETN, 0x848, 1),
DEF_RST(R9A07G044_WDT2_PRESETN, 0x848, 2),
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
index 40693bb85b80..b21915cf6648 100644
--- a/drivers/clk/renesas/r9a09g011-cpg.c
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -126,19 +126,24 @@ static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = {
};
static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
+ DEF_MOD("pfc", R9A09G011_PFC_PCLK, CLK_MAIN, 0x400, 2),
DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5),
DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8),
DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8),
DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9),
DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12),
+ DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12),
+ DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13),
DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4),
DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5),
DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0),
};
static const struct rzg2l_reset r9a09g011_resets[] = {
+ DEF_RST(R9A09G011_PFC_PRESETN, 0x600, 2),
DEF_RST_MON(R9A09G011_ETH0_RST_HW_N, 0x608, 11, 11),
DEF_RST_MON(R9A09G011_SYC_RST_N, 0x610, 9, 13),
+ DEF_RST_MON(R9A09G011_WDT0_PRESETN, 0x614, 12, 19),
};
static const unsigned int r9a09g011_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index c7ed43d6aa67..e27832e5114f 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -23,7 +23,7 @@
#include "rcar-gen4-cpg.h"
#include "rcar-cpg-lib.h"
-static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initconst;
+static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index e2999ab2b53c..3ff6ecd61756 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -1180,7 +1180,7 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
s8 monbit = info->resets[id].monbit;
if (info->has_clk_mon_regs) {
- return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
+ return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
} else if (monbit >= 0) {
u32 monbitmask = BIT(monbit);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index b7962e5149a5..001582ea71ba 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -143,17 +143,6 @@ static struct ccu_common *sun50i_h6_r_ccu_clks[] = {
&w1_clk.common,
};
-static struct ccu_common *sun50i_h616_r_ccu_clks[] = {
- &r_apb1_clk.common,
- &r_apb2_clk.common,
- &r_apb1_twd_clk.common,
- &r_apb2_i2c_clk.common,
- &r_apb2_rsb_clk.common,
- &r_apb1_ir_clk.common,
- &r_apb1_rtc_clk.common,
- &ir_clk.common,
-};
-
static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = {
.hws = {
[CLK_AR100] = &ar100_clk.common.hw,
@@ -219,8 +208,8 @@ static const struct sunxi_ccu_desc sun50i_h6_r_ccu_desc = {
};
static const struct sunxi_ccu_desc sun50i_h616_r_ccu_desc = {
- .ccu_clks = sun50i_h616_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_h616_r_ccu_clks),
+ .ccu_clks = sun50i_h6_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h6_r_ccu_clks),
.hw_clks = &sun50i_h616_r_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 1a5e418923f6..30056da3e0af 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -95,13 +95,13 @@ static struct ccu_nkmp pll_periph1_clk = {
},
};
+/* For GPU PLL, using an output divider for DFS causes system to fail */
#define SUN50I_H6_PLL_GPU_REG 0x030
static struct ccu_nkmp pll_gpu_clk = {
.enable = BIT(31),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
- .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
.common = {
.reg = 0x030,
.hw.init = CLK_HW_INIT("pll-gpu", "osc24M",
@@ -294,9 +294,9 @@ static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace",
static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "psi-ahb1-ahb2",
0x62c, BIT(0), 0);
+/* Keep GPU_CLK divider const to avoid DFS instability. */
static const char * const gpu_parents[] = { "pll-gpu" };
-static SUNXI_CCU_M_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
- 0, 3, /* M */
+static SUNXI_CCU_MUX_WITH_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
24, 1, /* mux */
BIT(31), /* gate */
CLK_SET_RATE_PARENT);
@@ -1191,6 +1191,16 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
+ /* Force PLL_GPU output divider bits to 0 */
+ val = readl(reg + SUN50I_H6_PLL_GPU_REG);
+ val &= ~BIT(0);
+ writel(val, reg + SUN50I_H6_PLL_GPU_REG);
+
+ /* Force GPU_CLK divider bits to 0 */
+ val = readl(reg + gpu_clk.common.reg);
+ val &= ~GENMASK(3, 0);
+ writel(val, reg + gpu_clk.common.reg);
+
/* Enable the lock bits on all PLLs */
for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
val = readl(reg + pll_regs[i]);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index e7e3ddf4a227..2f6f02f00be2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -53,65 +53,26 @@ static SUNXI_CCU_M(wb_div_a83_clk, "wb-div", "pll-de", 0x0c, 8, 4,
static SUNXI_CCU_M(rot_div_a83_clk, "rot-div", "pll-de", 0x0c, 0x0c, 4,
CLK_SET_RATE_PARENT);
-static struct ccu_common *sun8i_a83t_de2_clks[] = {
+static struct ccu_common *sun8i_de2_ccu_clks[] = {
&mixer0_clk.common,
&mixer1_clk.common,
&wb_clk.common,
-
- &bus_mixer0_clk.common,
- &bus_mixer1_clk.common,
- &bus_wb_clk.common,
-
- &mixer0_div_a83_clk.common,
- &mixer1_div_a83_clk.common,
- &wb_div_a83_clk.common,
-
- &bus_rot_clk.common,
&rot_clk.common,
- &rot_div_a83_clk.common,
-};
-
-static struct ccu_common *sun8i_h3_de2_clks[] = {
- &mixer0_clk.common,
- &mixer1_clk.common,
- &wb_clk.common,
-
- &bus_mixer0_clk.common,
- &bus_mixer1_clk.common,
- &bus_wb_clk.common,
-
- &mixer0_div_clk.common,
- &mixer1_div_clk.common,
- &wb_div_clk.common,
-};
-
-static struct ccu_common *sun8i_v3s_de2_clks[] = {
- &mixer0_clk.common,
- &wb_clk.common,
-
- &bus_mixer0_clk.common,
- &bus_wb_clk.common,
-
- &mixer0_div_clk.common,
- &wb_div_clk.common,
-};
-
-static struct ccu_common *sun50i_a64_de2_clks[] = {
- &mixer0_clk.common,
- &mixer1_clk.common,
- &wb_clk.common,
&bus_mixer0_clk.common,
&bus_mixer1_clk.common,
&bus_wb_clk.common,
+ &bus_rot_clk.common,
&mixer0_div_clk.common,
&mixer1_div_clk.common,
&wb_div_clk.common,
-
- &bus_rot_clk.common,
- &rot_clk.common,
&rot_div_clk.common,
+
+ &mixer0_div_a83_clk.common,
+ &mixer1_div_a83_clk.common,
+ &wb_div_a83_clk.common,
+ &rot_div_a83_clk.common,
};
static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
@@ -219,8 +180,8 @@ static struct ccu_reset_map sun50i_h5_de2_resets[] = {
};
static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = {
- .ccu_clks = sun8i_a83t_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_a83t_de2_hw_clks,
@@ -229,8 +190,8 @@ static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun8i_h3_de2_clk_desc = {
- .ccu_clks = sun8i_h3_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_h3_de2_hw_clks,
@@ -239,8 +200,8 @@ static const struct sunxi_ccu_desc sun8i_h3_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun8i_r40_de2_clk_desc = {
- .ccu_clks = sun50i_a64_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun50i_a64_de2_hw_clks,
@@ -249,8 +210,8 @@ static const struct sunxi_ccu_desc sun8i_r40_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
- .ccu_clks = sun8i_v3s_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_v3s_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_v3s_de2_hw_clks,
@@ -259,8 +220,8 @@ static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
- .ccu_clks = sun50i_a64_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun50i_a64_de2_hw_clks,
@@ -269,8 +230,8 @@ static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
};
static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
- .ccu_clks = sun8i_h3_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks),
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
.hw_clks = &sun8i_h3_de2_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index e058cf691aea..d3fcb983c17c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -562,6 +562,7 @@ static struct ccu_common *sun8i_h3_ccu_clks[] = {
&bus_uart2_clk.common,
&bus_uart3_clk.common,
&bus_scr0_clk.common,
+ &bus_scr1_clk.common,
&bus_ephy_clk.common,
&bus_dbg_clk.common,
&ths_clk.common,
@@ -612,114 +613,6 @@ static struct ccu_common *sun8i_h3_ccu_clks[] = {
&gpu_clk.common,
};
-static struct ccu_common *sun50i_h5_ccu_clks[] = {
- &pll_cpux_clk.common,
- &pll_audio_base_clk.common,
- &pll_video_clk.common,
- &pll_ve_clk.common,
- &pll_ddr_clk.common,
- &pll_periph0_clk.common,
- &pll_gpu_clk.common,
- &pll_periph1_clk.common,
- &pll_de_clk.common,
- &cpux_clk.common,
- &axi_clk.common,
- &ahb1_clk.common,
- &apb1_clk.common,
- &apb2_clk.common,
- &ahb2_clk.common,
- &bus_ce_clk.common,
- &bus_dma_clk.common,
- &bus_mmc0_clk.common,
- &bus_mmc1_clk.common,
- &bus_mmc2_clk.common,
- &bus_nand_clk.common,
- &bus_dram_clk.common,
- &bus_emac_clk.common,
- &bus_ts_clk.common,
- &bus_hstimer_clk.common,
- &bus_spi0_clk.common,
- &bus_spi1_clk.common,
- &bus_otg_clk.common,
- &bus_ehci0_clk.common,
- &bus_ehci1_clk.common,
- &bus_ehci2_clk.common,
- &bus_ehci3_clk.common,
- &bus_ohci0_clk.common,
- &bus_ohci1_clk.common,
- &bus_ohci2_clk.common,
- &bus_ohci3_clk.common,
- &bus_ve_clk.common,
- &bus_tcon0_clk.common,
- &bus_tcon1_clk.common,
- &bus_deinterlace_clk.common,
- &bus_csi_clk.common,
- &bus_tve_clk.common,
- &bus_hdmi_clk.common,
- &bus_de_clk.common,
- &bus_gpu_clk.common,
- &bus_msgbox_clk.common,
- &bus_spinlock_clk.common,
- &bus_codec_clk.common,
- &bus_spdif_clk.common,
- &bus_pio_clk.common,
- &bus_ths_clk.common,
- &bus_i2s0_clk.common,
- &bus_i2s1_clk.common,
- &bus_i2s2_clk.common,
- &bus_i2c0_clk.common,
- &bus_i2c1_clk.common,
- &bus_i2c2_clk.common,
- &bus_uart0_clk.common,
- &bus_uart1_clk.common,
- &bus_uart2_clk.common,
- &bus_uart3_clk.common,
- &bus_scr0_clk.common,
- &bus_scr1_clk.common,
- &bus_ephy_clk.common,
- &bus_dbg_clk.common,
- &ths_clk.common,
- &nand_clk.common,
- &mmc0_clk.common,
- &mmc1_clk.common,
- &mmc2_clk.common,
- &ts_clk.common,
- &ce_clk.common,
- &spi0_clk.common,
- &spi1_clk.common,
- &i2s0_clk.common,
- &i2s1_clk.common,
- &i2s2_clk.common,
- &spdif_clk.common,
- &usb_phy0_clk.common,
- &usb_phy1_clk.common,
- &usb_phy2_clk.common,
- &usb_phy3_clk.common,
- &usb_ohci0_clk.common,
- &usb_ohci1_clk.common,
- &usb_ohci2_clk.common,
- &usb_ohci3_clk.common,
- &dram_clk.common,
- &dram_ve_clk.common,
- &dram_csi_clk.common,
- &dram_deinterlace_clk.common,
- &dram_ts_clk.common,
- &de_clk.common,
- &tcon_clk.common,
- &tve_clk.common,
- &deinterlace_clk.common,
- &csi_misc_clk.common,
- &csi_sclk_clk.common,
- &csi_mclk_clk.common,
- &ve_clk.common,
- &ac_dig_clk.common,
- &avs_clk.common,
- &hdmi_clk.common,
- &hdmi_ddc_clk.common,
- &mbus_clk.common,
- &gpu_clk.common,
-};
-
static const struct clk_hw *clk_parent_pll_audio[] = {
&pll_audio_base_clk.common.hw
};
@@ -1116,8 +1009,8 @@ static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {
};
static const struct sunxi_ccu_desc sun50i_h5_ccu_desc = {
- .ccu_clks = sun50i_h5_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_h5_ccu_clks),
+ .ccu_clks = sun8i_h3_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_h3_ccu_clks),
.hw_clks = &sun50i_h5_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 5b7fab832a52..4221649b311f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -114,32 +114,7 @@ static struct ccu_mp a83t_ir_clk = {
},
};
-static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
- &ar100_clk.common,
- &apb0_clk.common,
- &apb0_pio_clk.common,
- &apb0_ir_clk.common,
- &apb0_timer_clk.common,
- &apb0_rsb_clk.common,
- &apb0_uart_clk.common,
- &apb0_i2c_clk.common,
- &apb0_twd_clk.common,
- &a83t_ir_clk.common,
-};
-
-static struct ccu_common *sun8i_h3_r_ccu_clks[] = {
- &ar100_clk.common,
- &apb0_clk.common,
- &apb0_pio_clk.common,
- &apb0_ir_clk.common,
- &apb0_timer_clk.common,
- &apb0_uart_clk.common,
- &apb0_i2c_clk.common,
- &apb0_twd_clk.common,
- &ir_clk.common,
-};
-
-static struct ccu_common *sun50i_a64_r_ccu_clks[] = {
+static struct ccu_common *sun8i_r_ccu_clks[] = {
&ar100_clk.common,
&apb0_clk.common,
&apb0_pio_clk.common,
@@ -150,6 +125,7 @@ static struct ccu_common *sun50i_a64_r_ccu_clks[] = {
&apb0_i2c_clk.common,
&apb0_twd_clk.common,
&ir_clk.common,
+ &a83t_ir_clk.common,
};
static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = {
@@ -226,8 +202,8 @@ static struct ccu_reset_map sun50i_a64_r_ccu_resets[] = {
};
static const struct sunxi_ccu_desc sun8i_a83t_r_ccu_desc = {
- .ccu_clks = sun8i_a83t_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_r_ccu_clks),
+ .ccu_clks = sun8i_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_r_ccu_clks),
.hw_clks = &sun8i_a83t_r_hw_clks,
@@ -236,8 +212,8 @@ static const struct sunxi_ccu_desc sun8i_a83t_r_ccu_desc = {
};
static const struct sunxi_ccu_desc sun8i_h3_r_ccu_desc = {
- .ccu_clks = sun8i_h3_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_r_ccu_clks),
+ .ccu_clks = sun8i_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_r_ccu_clks),
.hw_clks = &sun8i_h3_r_hw_clks,
@@ -246,8 +222,8 @@ static const struct sunxi_ccu_desc sun8i_h3_r_ccu_desc = {
};
static const struct sunxi_ccu_desc sun50i_a64_r_ccu_desc = {
- .ccu_clks = sun50i_a64_r_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_a64_r_ccu_clks),
+ .ccu_clks = sun8i_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_r_ccu_clks),
.hw_clks = &sun50i_a64_r_hw_clks,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 87f87d6ea3ad..fbb3529f0d3e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -421,6 +421,7 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = {
&bus_de_clk.common,
&bus_codec_clk.common,
&bus_pio_clk.common,
+ &bus_i2s0_clk.common,
&bus_i2c0_clk.common,
&bus_i2c1_clk.common,
&bus_uart0_clk.common,
@@ -439,6 +440,7 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = {
&mmc2_output_clk.common,
&ce_clk.common,
&spi0_clk.common,
+ &i2s0_clk.common,
&usb_phy0_clk.common,
&usb_ohci0_clk.common,
&dram_clk.common,
@@ -463,80 +465,6 @@ static const struct clk_hw *clk_parent_pll_audio[] = {
&pll_audio_base_clk.common.hw
};
-static struct ccu_common *sun8i_v3_ccu_clks[] = {
- &pll_cpu_clk.common,
- &pll_audio_base_clk.common,
- &pll_video_clk.common,
- &pll_ve_clk.common,
- &pll_ddr0_clk.common,
- &pll_periph0_clk.common,
- &pll_isp_clk.common,
- &pll_periph1_clk.common,
- &pll_ddr1_clk.common,
- &cpu_clk.common,
- &axi_clk.common,
- &ahb1_clk.common,
- &apb1_clk.common,
- &apb2_clk.common,
- &ahb2_clk.common,
- &bus_ce_clk.common,
- &bus_dma_clk.common,
- &bus_mmc0_clk.common,
- &bus_mmc1_clk.common,
- &bus_mmc2_clk.common,
- &bus_dram_clk.common,
- &bus_emac_clk.common,
- &bus_hstimer_clk.common,
- &bus_spi0_clk.common,
- &bus_otg_clk.common,
- &bus_ehci0_clk.common,
- &bus_ohci0_clk.common,
- &bus_ve_clk.common,
- &bus_tcon0_clk.common,
- &bus_csi_clk.common,
- &bus_de_clk.common,
- &bus_codec_clk.common,
- &bus_pio_clk.common,
- &bus_i2s0_clk.common,
- &bus_i2c0_clk.common,
- &bus_i2c1_clk.common,
- &bus_uart0_clk.common,
- &bus_uart1_clk.common,
- &bus_uart2_clk.common,
- &bus_ephy_clk.common,
- &bus_dbg_clk.common,
- &mmc0_clk.common,
- &mmc0_sample_clk.common,
- &mmc0_output_clk.common,
- &mmc1_clk.common,
- &mmc1_sample_clk.common,
- &mmc1_output_clk.common,
- &mmc2_clk.common,
- &mmc2_sample_clk.common,
- &mmc2_output_clk.common,
- &ce_clk.common,
- &spi0_clk.common,
- &i2s0_clk.common,
- &usb_phy0_clk.common,
- &usb_ohci0_clk.common,
- &dram_clk.common,
- &dram_ve_clk.common,
- &dram_csi_clk.common,
- &dram_ohci_clk.common,
- &dram_ehci_clk.common,
- &de_clk.common,
- &tcon_clk.common,
- &csi_misc_clk.common,
- &csi0_mclk_clk.common,
- &csi1_sclk_clk.common,
- &csi1_mclk_clk.common,
- &ve_clk.common,
- &ac_dig_clk.common,
- &avs_clk.common,
- &mbus_clk.common,
- &mipi_csi_clk.common,
-};
-
/* We hardcode the divider to 1 for SDM support */
static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio",
clk_parent_pll_audio,
@@ -798,8 +726,8 @@ static const struct sunxi_ccu_desc sun8i_v3s_ccu_desc = {
};
static const struct sunxi_ccu_desc sun8i_v3_ccu_desc = {
- .ccu_clks = sun8i_v3_ccu_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_v3_ccu_clks),
+ .ccu_clks = sun8i_v3s_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_v3s_ccu_clks),
.hw_clks = &sun8i_v3_hw_clks,
diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig
index 3fba3d3ac9a2..1c4e543366dd 100644
--- a/drivers/clk/sunxi/Kconfig
+++ b/drivers/clk/sunxi/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CLK_SUNXI
bool "Legacy clock support for Allwinner SoCs"
- depends on ARCH_SUNXI || COMPILE_TEST
+ depends on (ARM && ARCH_SUNXI) || COMPILE_TEST
default y
if CLK_SUNXI
@@ -19,7 +19,6 @@ config CLK_SUNXI_CLOCKS
config CLK_SUNXI_PRCM_SUN6I
bool "Legacy A31 PRCM driver"
- select MFD_SUN6I_PRCM
default y
help
Legacy clock driver for the A31 PRCM clocks. Those are
@@ -27,7 +26,6 @@ config CLK_SUNXI_PRCM_SUN6I
config CLK_SUNXI_PRCM_SUN8I
bool "Legacy sun8i PRCM driver"
- select MFD_SUN6I_PRCM
default y
help
Legacy clock driver for the sun8i family PRCM clocks.
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index d078e5d73ed9..868bc7af21b0 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -56,7 +56,7 @@ static const struct omap_clkctrl_bit_data omap4_aess_bit_data[] __initconst = {
};
static const char * const omap4_func_dmic_abe_gfclk_parents[] __initconst = {
- "abe_cm:clk:0018:26",
+ "abe-clkctrl:0018:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -76,7 +76,7 @@ static const struct omap_clkctrl_bit_data omap4_dmic_bit_data[] __initconst = {
};
static const char * const omap4_func_mcasp_abe_gfclk_parents[] __initconst = {
- "abe_cm:clk:0020:26",
+ "abe-clkctrl:0020:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -89,7 +89,7 @@ static const struct omap_clkctrl_bit_data omap4_mcasp_bit_data[] __initconst = {
};
static const char * const omap4_func_mcbsp1_gfclk_parents[] __initconst = {
- "abe_cm:clk:0028:26",
+ "abe-clkctrl:0028:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -102,7 +102,7 @@ static const struct omap_clkctrl_bit_data omap4_mcbsp1_bit_data[] __initconst =
};
static const char * const omap4_func_mcbsp2_gfclk_parents[] __initconst = {
- "abe_cm:clk:0030:26",
+ "abe-clkctrl:0030:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -115,7 +115,7 @@ static const struct omap_clkctrl_bit_data omap4_mcbsp2_bit_data[] __initconst =
};
static const char * const omap4_func_mcbsp3_gfclk_parents[] __initconst = {
- "abe_cm:clk:0038:26",
+ "abe-clkctrl:0038:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -183,18 +183,18 @@ static const struct omap_clkctrl_bit_data omap4_timer8_bit_data[] __initconst =
static const struct omap_clkctrl_reg_data omap4_abe_clkctrl_regs[] __initconst = {
{ OMAP4_L4_ABE_CLKCTRL, NULL, 0, "ocp_abe_iclk" },
- { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
+ { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" },
{ OMAP4_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
- { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
- { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe_cm:clk:0020:24" },
- { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
- { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe_cm:clk:0030:24" },
- { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe_cm:clk:0038:24" },
- { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0040:8" },
- { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe_cm:clk:0048:24" },
- { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe_cm:clk:0050:24" },
- { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe_cm:clk:0058:24" },
- { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe_cm:clk:0060:24" },
+ { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" },
+ { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe-clkctrl:0020:24" },
+ { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" },
+ { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" },
+ { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" },
+ { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0040:8" },
+ { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" },
+ { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" },
+ { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" },
+ { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" },
{ OMAP4_WD_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ 0 },
};
@@ -287,7 +287,7 @@ static const struct omap_clkctrl_bit_data omap4_fdif_bit_data[] __initconst = {
static const struct omap_clkctrl_reg_data omap4_iss_clkctrl_regs[] __initconst = {
{ OMAP4_ISS_CLKCTRL, omap4_iss_bit_data, CLKF_SW_SUP, "ducati_clk_mux_ck" },
- { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss_cm:clk:0008:24" },
+ { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss-clkctrl:0008:24" },
{ 0 },
};
@@ -320,7 +320,7 @@ static const struct omap_clkctrl_bit_data omap4_dss_core_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap4_l3_dss_clkctrl_regs[] __initconst = {
- { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3_dss_cm:clk:0000:8" },
+ { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3-dss-clkctrl:0000:8" },
{ 0 },
};
@@ -336,7 +336,7 @@ static const struct omap_clkctrl_bit_data omap4_gpu_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data omap4_l3_gfx_clkctrl_regs[] __initconst = {
- { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3_gfx_cm:clk:0000:24" },
+ { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3-gfx-clkctrl:0000:24" },
{ 0 },
};
@@ -372,12 +372,12 @@ static const struct omap_clkctrl_bit_data omap4_hsi_bit_data[] __initconst = {
};
static const char * const omap4_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
- "l3_init_cm:clk:0038:24",
+ "l3-init-clkctrl:0038:24",
NULL,
};
static const char * const omap4_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
- "l3_init_cm:clk:0038:25",
+ "l3-init-clkctrl:0038:25",
NULL,
};
@@ -418,7 +418,7 @@ static const struct omap_clkctrl_bit_data omap4_usb_host_hs_bit_data[] __initcon
};
static const char * const omap4_usb_otg_hs_xclk_parents[] __initconst = {
- "l3_init_cm:clk:0040:24",
+ "l3-init-clkctrl:0040:24",
NULL,
};
@@ -452,14 +452,14 @@ static const struct omap_clkctrl_bit_data omap4_ocp2scp_usb_phy_bit_data[] __ini
};
static const struct omap_clkctrl_reg_data omap4_l3_init_clkctrl_regs[] __initconst = {
- { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3_init_cm:clk:0008:24" },
- { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3_init_cm:clk:0010:24" },
- { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3_init_cm:clk:0018:24" },
+ { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0008:24" },
+ { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0010:24" },
+ { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:0018:24" },
{ OMAP4_USB_HOST_HS_CLKCTRL, omap4_usb_host_hs_bit_data, CLKF_SW_SUP, "init_60m_fclk" },
{ OMAP4_USB_OTG_HS_CLKCTRL, omap4_usb_otg_hs_bit_data, CLKF_HW_SUP, "l3_div_ck" },
{ OMAP4_USB_TLL_HS_CLKCTRL, omap4_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_div_ck" },
{ OMAP4_USB_HOST_FS_CLKCTRL, NULL, CLKF_SW_SUP, "func_48mc_fclk" },
- { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3_init_cm:clk:00c0:8" },
+ { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:00c0:8" },
{ 0 },
};
@@ -530,7 +530,7 @@ static const struct omap_clkctrl_bit_data omap4_gpio6_bit_data[] __initconst = {
};
static const char * const omap4_per_mcbsp4_gfclk_parents[] __initconst = {
- "l4_per_cm:clk:00c0:26",
+ "l4-per-clkctrl:00c0:26",
"pad_clks_ck",
NULL,
};
@@ -570,12 +570,12 @@ static const struct omap_clkctrl_bit_data omap4_slimbus2_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initconst = {
- { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0008:24" },
- { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0010:24" },
- { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0018:24" },
- { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0020:24" },
- { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0028:24" },
- { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0030:24" },
+ { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0008:24" },
+ { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0010:24" },
+ { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0018:24" },
+ { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0020:24" },
+ { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0028:24" },
+ { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0030:24" },
{ OMAP4_ELM_CLKCTRL, NULL, 0, "l4_div_ck" },
{ OMAP4_GPIO2_CLKCTRL, omap4_gpio2_bit_data, CLKF_HW_SUP, "l4_div_ck" },
{ OMAP4_GPIO3_CLKCTRL, omap4_gpio3_bit_data, CLKF_HW_SUP, "l4_div_ck" },
@@ -588,14 +588,14 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons
{ OMAP4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
{ OMAP4_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
{ OMAP4_L4_PER_CLKCTRL, NULL, 0, "l4_div_ck" },
- { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:00c0:24" },
+ { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:00c0:24" },
{ OMAP4_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_MMC4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
- { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4_per_cm:clk:0118:8" },
+ { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0118:8" },
{ OMAP4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
{ OMAP4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
@@ -630,7 +630,7 @@ static const struct omap_clkctrl_reg_data omap4_l4_wkup_clkctrl_regs[] __initcon
{ OMAP4_L4_WKUP_CLKCTRL, NULL, 0, "l4_wkup_clk_mux_ck" },
{ OMAP4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ OMAP4_GPIO1_CLKCTRL, omap4_gpio1_bit_data, CLKF_HW_SUP, "l4_wkup_clk_mux_ck" },
- { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0020:24" },
+ { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4-wkup-clkctrl:0020:24" },
{ OMAP4_COUNTER_32K_CLKCTRL, NULL, 0, "sys_32k_ck" },
{ OMAP4_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ 0 },
@@ -644,7 +644,7 @@ static const char * const omap4_pmd_stm_clock_mux_ck_parents[] __initconst = {
};
static const char * const omap4_trace_clk_div_div_ck_parents[] __initconst = {
- "emu_sys_cm:clk:0000:22",
+ "emu-sys-clkctrl:0000:22",
NULL,
};
@@ -662,7 +662,7 @@ static const struct omap_clkctrl_div_data omap4_trace_clk_div_div_ck_data __init
};
static const char * const omap4_stm_clk_div_ck_parents[] __initconst = {
- "emu_sys_cm:clk:0000:20",
+ "emu-sys-clkctrl:0000:20",
NULL,
};
@@ -716,73 +716,73 @@ static struct ti_dt_clk omap44xx_clks[] = {
* hwmod support. Once hwmod is removed, these can be removed
* also.
*/
- DT_CLK(NULL, "aess_fclk", "abe_cm:0008:24"),
- DT_CLK(NULL, "cm2_dm10_mux", "l4_per_cm:0008:24"),
- DT_CLK(NULL, "cm2_dm11_mux", "l4_per_cm:0010:24"),
- DT_CLK(NULL, "cm2_dm2_mux", "l4_per_cm:0018:24"),
- DT_CLK(NULL, "cm2_dm3_mux", "l4_per_cm:0020:24"),
- DT_CLK(NULL, "cm2_dm4_mux", "l4_per_cm:0028:24"),
- DT_CLK(NULL, "cm2_dm9_mux", "l4_per_cm:0030:24"),
- DT_CLK(NULL, "dmic_sync_mux_ck", "abe_cm:0018:26"),
- DT_CLK(NULL, "dmt1_clk_mux", "l4_wkup_cm:0020:24"),
- DT_CLK(NULL, "dss_48mhz_clk", "l3_dss_cm:0000:9"),
- DT_CLK(NULL, "dss_dss_clk", "l3_dss_cm:0000:8"),
- DT_CLK(NULL, "dss_sys_clk", "l3_dss_cm:0000:10"),
- DT_CLK(NULL, "dss_tv_clk", "l3_dss_cm:0000:11"),
- DT_CLK(NULL, "fdif_fck", "iss_cm:0008:24"),
- DT_CLK(NULL, "func_dmic_abe_gfclk", "abe_cm:0018:24"),
- DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe_cm:0020:24"),
- DT_CLK(NULL, "func_mcbsp1_gfclk", "abe_cm:0028:24"),
- DT_CLK(NULL, "func_mcbsp2_gfclk", "abe_cm:0030:24"),
- DT_CLK(NULL, "func_mcbsp3_gfclk", "abe_cm:0038:24"),
- DT_CLK(NULL, "gpio1_dbclk", "l4_wkup_cm:0018:8"),
- DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:0040:8"),
- DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:0048:8"),
- DT_CLK(NULL, "gpio4_dbclk", "l4_per_cm:0050:8"),
- DT_CLK(NULL, "gpio5_dbclk", "l4_per_cm:0058:8"),
- DT_CLK(NULL, "gpio6_dbclk", "l4_per_cm:0060:8"),
- DT_CLK(NULL, "hsi_fck", "l3_init_cm:0018:24"),
- DT_CLK(NULL, "hsmmc1_fclk", "l3_init_cm:0008:24"),
- DT_CLK(NULL, "hsmmc2_fclk", "l3_init_cm:0010:24"),
- DT_CLK(NULL, "iss_ctrlclk", "iss_cm:0000:8"),
- DT_CLK(NULL, "mcasp_sync_mux_ck", "abe_cm:0020:26"),
- DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe_cm:0028:26"),
- DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe_cm:0030:26"),
- DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe_cm:0038:26"),
- DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4_per_cm:00c0:26"),
- DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3_init_cm:00c0:8"),
- DT_CLK(NULL, "otg_60m_gfclk", "l3_init_cm:0040:24"),
- DT_CLK(NULL, "per_mcbsp4_gfclk", "l4_per_cm:00c0:24"),
- DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu_sys_cm:0000:20"),
- DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu_sys_cm:0000:22"),
- DT_CLK(NULL, "sgx_clk_mux", "l3_gfx_cm:0000:24"),
- DT_CLK(NULL, "slimbus1_fclk_0", "abe_cm:0040:8"),
- DT_CLK(NULL, "slimbus1_fclk_1", "abe_cm:0040:9"),
- DT_CLK(NULL, "slimbus1_fclk_2", "abe_cm:0040:10"),
- DT_CLK(NULL, "slimbus1_slimbus_clk", "abe_cm:0040:11"),
- DT_CLK(NULL, "slimbus2_fclk_0", "l4_per_cm:0118:8"),
- DT_CLK(NULL, "slimbus2_fclk_1", "l4_per_cm:0118:9"),
- DT_CLK(NULL, "slimbus2_slimbus_clk", "l4_per_cm:0118:10"),
- DT_CLK(NULL, "stm_clk_div_ck", "emu_sys_cm:0000:27"),
- DT_CLK(NULL, "timer5_sync_mux", "abe_cm:0048:24"),
- DT_CLK(NULL, "timer6_sync_mux", "abe_cm:0050:24"),
- DT_CLK(NULL, "timer7_sync_mux", "abe_cm:0058:24"),
- DT_CLK(NULL, "timer8_sync_mux", "abe_cm:0060:24"),
- DT_CLK(NULL, "trace_clk_div_div_ck", "emu_sys_cm:0000:24"),
- DT_CLK(NULL, "usb_host_hs_func48mclk", "l3_init_cm:0038:15"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3_init_cm:0038:13"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3_init_cm:0038:14"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3_init_cm:0038:11"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3_init_cm:0038:12"),
- DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3_init_cm:0038:8"),
- DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3_init_cm:0038:9"),
- DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init_cm:0038:10"),
- DT_CLK(NULL, "usb_otg_hs_xclk", "l3_init_cm:0040:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3_init_cm:0048:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3_init_cm:0048:9"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3_init_cm:0048:10"),
- DT_CLK(NULL, "utmi_p1_gfclk", "l3_init_cm:0038:24"),
- DT_CLK(NULL, "utmi_p2_gfclk", "l3_init_cm:0038:25"),
+ DT_CLK(NULL, "aess_fclk", "abe-clkctrl:0008:24"),
+ DT_CLK(NULL, "cm2_dm10_mux", "l4-per-clkctrl:0008:24"),
+ DT_CLK(NULL, "cm2_dm11_mux", "l4-per-clkctrl:0010:24"),
+ DT_CLK(NULL, "cm2_dm2_mux", "l4-per-clkctrl:0018:24"),
+ DT_CLK(NULL, "cm2_dm3_mux", "l4-per-clkctrl:0020:24"),
+ DT_CLK(NULL, "cm2_dm4_mux", "l4-per-clkctrl:0028:24"),
+ DT_CLK(NULL, "cm2_dm9_mux", "l4-per-clkctrl:0030:24"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"),
+ DT_CLK(NULL, "dmt1_clk_mux", "l4-wkup-clkctrl:0020:24"),
+ DT_CLK(NULL, "dss_48mhz_clk", "l3-dss-clkctrl:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "l3-dss-clkctrl:0000:8"),
+ DT_CLK(NULL, "dss_sys_clk", "l3-dss-clkctrl:0000:10"),
+ DT_CLK(NULL, "dss_tv_clk", "l3-dss-clkctrl:0000:11"),
+ DT_CLK(NULL, "fdif_fck", "iss-clkctrl:0008:24"),
+ DT_CLK(NULL, "func_dmic_abe_gfclk", "abe-clkctrl:0018:24"),
+ DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe-clkctrl:0020:24"),
+ DT_CLK(NULL, "func_mcbsp1_gfclk", "abe-clkctrl:0028:24"),
+ DT_CLK(NULL, "func_mcbsp2_gfclk", "abe-clkctrl:0030:24"),
+ DT_CLK(NULL, "func_mcbsp3_gfclk", "abe-clkctrl:0038:24"),
+ DT_CLK(NULL, "gpio1_dbclk", "l4-wkup-clkctrl:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4-per-clkctrl:0040:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4-per-clkctrl:0048:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4-per-clkctrl:0050:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4-per-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4-per-clkctrl:0060:8"),
+ DT_CLK(NULL, "hsi_fck", "l3-init-clkctrl:0018:24"),
+ DT_CLK(NULL, "hsmmc1_fclk", "l3-init-clkctrl:0008:24"),
+ DT_CLK(NULL, "hsmmc2_fclk", "l3-init-clkctrl:0010:24"),
+ DT_CLK(NULL, "iss_ctrlclk", "iss-clkctrl:0000:8"),
+ DT_CLK(NULL, "mcasp_sync_mux_ck", "abe-clkctrl:0020:26"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"),
+ DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"),
+ DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"),
+ DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"),
+ DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"),
+ DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"),
+ DT_CLK(NULL, "sgx_clk_mux", "l3-gfx-clkctrl:0000:24"),
+ DT_CLK(NULL, "slimbus1_fclk_0", "abe-clkctrl:0040:8"),
+ DT_CLK(NULL, "slimbus1_fclk_1", "abe-clkctrl:0040:9"),
+ DT_CLK(NULL, "slimbus1_fclk_2", "abe-clkctrl:0040:10"),
+ DT_CLK(NULL, "slimbus1_slimbus_clk", "abe-clkctrl:0040:11"),
+ DT_CLK(NULL, "slimbus2_fclk_0", "l4-per-clkctrl:0118:8"),
+ DT_CLK(NULL, "slimbus2_fclk_1", "l4-per-clkctrl:0118:9"),
+ DT_CLK(NULL, "slimbus2_slimbus_clk", "l4-per-clkctrl:0118:10"),
+ DT_CLK(NULL, "stm_clk_div_ck", "emu-sys-clkctrl:0000:27"),
+ DT_CLK(NULL, "timer5_sync_mux", "abe-clkctrl:0048:24"),
+ DT_CLK(NULL, "timer6_sync_mux", "abe-clkctrl:0050:24"),
+ DT_CLK(NULL, "timer7_sync_mux", "abe-clkctrl:0058:24"),
+ DT_CLK(NULL, "timer8_sync_mux", "abe-clkctrl:0060:24"),
+ DT_CLK(NULL, "trace_clk_div_div_ck", "emu-sys-clkctrl:0000:24"),
+ DT_CLK(NULL, "usb_host_hs_func48mclk", "l3-init-clkctrl:0038:15"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3-init-clkctrl:0038:13"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3-init-clkctrl:0038:14"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3-init-clkctrl:0038:11"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3-init-clkctrl:0038:12"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3-init-clkctrl:0038:8"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3-init-clkctrl:0038:9"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init-clkctrl:0038:10"),
+ DT_CLK(NULL, "usb_otg_hs_xclk", "l3-init-clkctrl:0040:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3-init-clkctrl:0048:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3-init-clkctrl:0048:9"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3-init-clkctrl:0048:10"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "l3-init-clkctrl:0038:24"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "l3-init-clkctrl:0038:25"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 90e0a9ea6351..b4aff76eb373 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -50,7 +50,7 @@ static const struct omap_clkctrl_bit_data omap5_aess_bit_data[] __initconst = {
};
static const char * const omap5_dmic_gfclk_parents[] __initconst = {
- "abe_cm:clk:0018:26",
+ "abe-clkctrl:0018:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -70,7 +70,7 @@ static const struct omap_clkctrl_bit_data omap5_dmic_bit_data[] __initconst = {
};
static const char * const omap5_mcbsp1_gfclk_parents[] __initconst = {
- "abe_cm:clk:0028:26",
+ "abe-clkctrl:0028:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -83,7 +83,7 @@ static const struct omap_clkctrl_bit_data omap5_mcbsp1_bit_data[] __initconst =
};
static const char * const omap5_mcbsp2_gfclk_parents[] __initconst = {
- "abe_cm:clk:0030:26",
+ "abe-clkctrl:0030:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -96,7 +96,7 @@ static const struct omap_clkctrl_bit_data omap5_mcbsp2_bit_data[] __initconst =
};
static const char * const omap5_mcbsp3_gfclk_parents[] __initconst = {
- "abe_cm:clk:0038:26",
+ "abe-clkctrl:0038:26",
"pad_clks_ck",
"slimbus_clk",
NULL,
@@ -136,16 +136,16 @@ static const struct omap_clkctrl_bit_data omap5_timer8_bit_data[] __initconst =
static const struct omap_clkctrl_reg_data omap5_abe_clkctrl_regs[] __initconst = {
{ OMAP5_L4_ABE_CLKCTRL, NULL, 0, "abe_iclk" },
- { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe_cm:clk:0008:24" },
+ { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" },
{ OMAP5_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
- { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe_cm:clk:0018:24" },
- { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe_cm:clk:0028:24" },
- { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe_cm:clk:0030:24" },
- { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe_cm:clk:0038:24" },
- { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe_cm:clk:0048:24" },
- { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe_cm:clk:0050:24" },
- { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe_cm:clk:0058:24" },
- { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe_cm:clk:0060:24" },
+ { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" },
+ { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" },
+ { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" },
+ { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" },
+ { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" },
+ { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" },
+ { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" },
+ { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" },
{ 0 },
};
@@ -268,12 +268,12 @@ static const struct omap_clkctrl_bit_data omap5_gpio8_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst = {
- { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0008:24" },
- { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0010:24" },
- { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0018:24" },
- { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0020:24" },
- { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0028:24" },
- { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0030:24" },
+ { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0008:24" },
+ { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0010:24" },
+ { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0018:24" },
+ { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0020:24" },
+ { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0028:24" },
+ { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0030:24" },
{ OMAP5_GPIO2_CLKCTRL, omap5_gpio2_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
{ OMAP5_GPIO3_CLKCTRL, omap5_gpio3_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
{ OMAP5_GPIO4_CLKCTRL, omap5_gpio4_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
@@ -345,7 +345,7 @@ static const struct omap_clkctrl_bit_data omap5_dss_core_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap5_dss_clkctrl_regs[] __initconst = {
- { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss_cm:clk:0000:8" },
+ { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss-clkctrl:0000:8" },
{ 0 },
};
@@ -378,7 +378,7 @@ static const struct omap_clkctrl_bit_data omap5_gpu_core_bit_data[] __initconst
};
static const struct omap_clkctrl_reg_data omap5_gpu_clkctrl_regs[] __initconst = {
- { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24" },
+ { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24" },
{ 0 },
};
@@ -389,7 +389,7 @@ static const char * const omap5_mmc1_fclk_mux_parents[] __initconst = {
};
static const char * const omap5_mmc1_fclk_parents[] __initconst = {
- "l3init_cm:clk:0008:24",
+ "l3init-clkctrl:0008:24",
NULL,
};
@@ -405,7 +405,7 @@ static const struct omap_clkctrl_bit_data omap5_mmc1_bit_data[] __initconst = {
};
static const char * const omap5_mmc2_fclk_parents[] __initconst = {
- "l3init_cm:clk:0010:24",
+ "l3init-clkctrl:0010:24",
NULL,
};
@@ -430,12 +430,12 @@ static const char * const omap5_usb_host_hs_hsic480m_p3_clk_parents[] __initcons
};
static const char * const omap5_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
- "l3init_cm:clk:0038:24",
+ "l3init-clkctrl:0038:24",
NULL,
};
static const char * const omap5_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
- "l3init_cm:clk:0038:25",
+ "l3init-clkctrl:0038:25",
NULL,
};
@@ -494,8 +494,8 @@ static const struct omap_clkctrl_bit_data omap5_usb_otg_ss_bit_data[] __initcons
};
static const struct omap_clkctrl_reg_data omap5_l3init_clkctrl_regs[] __initconst = {
- { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0008:25" },
- { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" },
+ { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0008:25" },
+ { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" },
{ OMAP5_USB_HOST_HS_CLKCTRL, omap5_usb_host_hs_bit_data, CLKF_SW_SUP, "l3init_60m_fclk" },
{ OMAP5_USB_TLL_HS_CLKCTRL, omap5_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
{ OMAP5_SATA_CLKCTRL, omap5_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
@@ -519,7 +519,7 @@ static const struct omap_clkctrl_reg_data omap5_wkupaon_clkctrl_regs[] __initcon
{ OMAP5_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ OMAP5_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ OMAP5_GPIO1_CLKCTRL, omap5_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
- { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" },
+ { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" },
{ OMAP5_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ OMAP5_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ 0 },
@@ -549,58 +549,58 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = {
static struct ti_dt_clk omap54xx_clks[] = {
DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
DT_CLK(NULL, "sys_clkin_ck", "sys_clkin"),
- DT_CLK(NULL, "dmic_gfclk", "abe_cm:0018:24"),
- DT_CLK(NULL, "dmic_sync_mux_ck", "abe_cm:0018:26"),
- DT_CLK(NULL, "dss_32khz_clk", "dss_cm:0000:11"),
- DT_CLK(NULL, "dss_48mhz_clk", "dss_cm:0000:9"),
- DT_CLK(NULL, "dss_dss_clk", "dss_cm:0000:8"),
- DT_CLK(NULL, "dss_sys_clk", "dss_cm:0000:10"),
- DT_CLK(NULL, "gpio1_dbclk", "wkupaon_cm:0018:8"),
- DT_CLK(NULL, "gpio2_dbclk", "l4per_cm:0040:8"),
- DT_CLK(NULL, "gpio3_dbclk", "l4per_cm:0048:8"),
- DT_CLK(NULL, "gpio4_dbclk", "l4per_cm:0050:8"),
- DT_CLK(NULL, "gpio5_dbclk", "l4per_cm:0058:8"),
- DT_CLK(NULL, "gpio6_dbclk", "l4per_cm:0060:8"),
- DT_CLK(NULL, "gpio7_dbclk", "l4per_cm:00f0:8"),
- DT_CLK(NULL, "gpio8_dbclk", "l4per_cm:00f8:8"),
- DT_CLK(NULL, "mcbsp1_gfclk", "abe_cm:0028:24"),
- DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe_cm:0028:26"),
- DT_CLK(NULL, "mcbsp2_gfclk", "abe_cm:0030:24"),
- DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe_cm:0030:26"),
- DT_CLK(NULL, "mcbsp3_gfclk", "abe_cm:0038:24"),
- DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe_cm:0038:26"),
- DT_CLK(NULL, "mmc1_32khz_clk", "l3init_cm:0008:8"),
- DT_CLK(NULL, "mmc1_fclk", "l3init_cm:0008:25"),
- DT_CLK(NULL, "mmc1_fclk_mux", "l3init_cm:0008:24"),
- DT_CLK(NULL, "mmc2_fclk", "l3init_cm:0010:25"),
- DT_CLK(NULL, "mmc2_fclk_mux", "l3init_cm:0010:24"),
- DT_CLK(NULL, "sata_ref_clk", "l3init_cm:0068:8"),
- DT_CLK(NULL, "timer10_gfclk_mux", "l4per_cm:0008:24"),
- DT_CLK(NULL, "timer11_gfclk_mux", "l4per_cm:0010:24"),
- DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon_cm:0020:24"),
- DT_CLK(NULL, "timer2_gfclk_mux", "l4per_cm:0018:24"),
- DT_CLK(NULL, "timer3_gfclk_mux", "l4per_cm:0020:24"),
- DT_CLK(NULL, "timer4_gfclk_mux", "l4per_cm:0028:24"),
- DT_CLK(NULL, "timer5_gfclk_mux", "abe_cm:0048:24"),
- DT_CLK(NULL, "timer6_gfclk_mux", "abe_cm:0050:24"),
- DT_CLK(NULL, "timer7_gfclk_mux", "abe_cm:0058:24"),
- DT_CLK(NULL, "timer8_gfclk_mux", "abe_cm:0060:24"),
- DT_CLK(NULL, "timer9_gfclk_mux", "l4per_cm:0030:24"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init_cm:0038:13"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init_cm:0038:14"),
- DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init_cm:0038:7"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init_cm:0038:11"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init_cm:0038:12"),
- DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init_cm:0038:6"),
- DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init_cm:0038:8"),
- DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init_cm:0038:9"),
- DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init_cm:0038:10"),
- DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init_cm:00d0:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init_cm:0048:8"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init_cm:0048:9"),
- DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init_cm:0048:10"),
- DT_CLK(NULL, "utmi_p1_gfclk", "l3init_cm:0038:24"),
- DT_CLK(NULL, "utmi_p2_gfclk", "l3init_cm:0038:25"),
+ DT_CLK(NULL, "dmic_gfclk", "abe-clkctrl:0018:24"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss-clkctrl:0000:11"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss-clkctrl:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "dss-clkctrl:0000:8"),
+ DT_CLK(NULL, "dss_sys_clk", "dss-clkctrl:0000:10"),
+ DT_CLK(NULL, "gpio1_dbclk", "wkupaon-clkctrl:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4per-clkctrl:0040:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4per-clkctrl:0048:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4per-clkctrl:0050:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4per-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4per-clkctrl:0060:8"),
+ DT_CLK(NULL, "gpio7_dbclk", "l4per-clkctrl:00f0:8"),
+ DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"),
+ DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"),
+ DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
+ DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
+ DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "l4per-clkctrl:0018:24"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "l4per-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "l4per-clkctrl:0028:24"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "abe-clkctrl:0048:24"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "abe-clkctrl:0050:24"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "abe-clkctrl:0058:24"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "abe-clkctrl:0060:24"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "l4per-clkctrl:0030:24"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init-clkctrl:0038:13"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init-clkctrl:0038:14"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init-clkctrl:0038:7"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init-clkctrl:0038:11"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init-clkctrl:0038:12"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init-clkctrl:0038:6"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init-clkctrl:0038:8"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init-clkctrl:0038:9"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init-clkctrl:0038:10"),
+ DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init-clkctrl:00d0:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init-clkctrl:0048:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init-clkctrl:0048:9"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init-clkctrl:0048:10"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "l3init-clkctrl:0038:24"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "l3init-clkctrl:0038:25"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 3b5ebae3740f..ae5862879417 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -520,10 +520,6 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
char *c;
u16 soc_mask = 0;
- if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
- of_node_name_eq(node, "clk"))
- ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
-
addrp = of_get_address(node, 0, NULL, NULL);
addr = (u32)of_translate_address(node, addrp);
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index 1244c4e568ff..c2088b3c4081 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_PMC_ATOM) += clk-pmc-atom.o
obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE) += clk-fch.o
-clk-x86-lpss-y := clk-lpss-atom.o
-obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
+obj-$(CONFIG_X86_INTEL_LPSS) += clk-lpss-atom.o clk-pmc-atom.o
obj-$(CONFIG_CLK_LGM_CGU) += clk-cgu.o clk-cgu-pll.o clk-lgm.o
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 593d5a957b69..969a552da8d2 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -7,6 +7,9 @@
* either be read from the "time" and "timeh" CSRs, and can use the SBI to
* setup events, or directly accessed using MMIO registers.
*/
+
+#define pr_fmt(fmt) "riscv-timer: " fmt
+
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
@@ -20,14 +23,28 @@
#include <linux/of_irq.h>
#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
+#include <asm/hwcap.h>
#include <asm/sbi.h>
#include <asm/timex.h>
+static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
+
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
+ u64 next_tval = get_cycles64() + delta;
+
csr_set(CSR_IE, IE_TIE);
- sbi_set_timer(get_cycles64() + delta);
+ if (static_branch_likely(&riscv_sstc_available)) {
+#if defined(CONFIG_32BIT)
+ csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
+ csr_write(CSR_STIMECMPH, next_tval >> 32);
+#else
+ csr_write(CSR_STIMECMP, next_tval);
+#endif
+ } else
+ sbi_set_timer(next_tval);
+
return 0;
}
@@ -101,20 +118,21 @@ static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
static int __init riscv_timer_init_dt(struct device_node *n)
{
- int cpuid, hartid, error;
+ int cpuid, error;
+ unsigned long hartid;
struct device_node *child;
struct irq_domain *domain;
- hartid = riscv_of_processor_hartid(n);
- if (hartid < 0) {
- pr_warn("Not valid hartid for node [%pOF] error = [%d]\n",
+ error = riscv_of_processor_hartid(n, &hartid);
+ if (error < 0) {
+ pr_warn("Not valid hartid for node [%pOF] error = [%lu]\n",
n, hartid);
- return hartid;
+ return error;
}
cpuid = riscv_hartid_to_cpuid(hartid);
if (cpuid < 0) {
- pr_warn("Invalid cpuid for hartid [%d]\n", hartid);
+ pr_warn("Invalid cpuid for hartid [%lu]\n", hartid);
return cpuid;
}
@@ -140,7 +158,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
return -ENODEV;
}
- pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
+ pr_info("%s: Registering clocksource cpuid [%d] hartid [%lu]\n",
__func__, cpuid, hartid);
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
if (error) {
@@ -165,6 +183,12 @@ static int __init riscv_timer_init_dt(struct device_node *n)
if (error)
pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
error);
+
+ if (riscv_isa_extension_available(NULL, SSTC)) {
+ pr_info("Timer interrupt in S-mode is available via sstc extension\n");
+ static_branch_enable(&riscv_sstc_available);
+ }
+
return error;
}
diff --git a/drivers/comedi/drivers/comedi_isadma.c b/drivers/comedi/drivers/comedi_isadma.c
index 700982464c53..020b3d1e1ac0 100644
--- a/drivers/comedi/drivers/comedi_isadma.c
+++ b/drivers/comedi/drivers/comedi_isadma.c
@@ -8,7 +8,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <asm/dma.h>
+#include <linux/isa-dma.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_isadma.h>
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 8fcaba541539..d69d13a26414 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -29,9 +29,9 @@ struct private_data {
cpumask_var_t cpus;
struct device *cpu_dev;
- struct opp_table *opp_table;
struct cpufreq_frequency_table *freq_table;
bool have_static_opps;
+ int opp_token;
};
static LIST_HEAD(priv_list);
@@ -193,7 +193,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
struct private_data *priv;
struct device *cpu_dev;
bool fallback = false;
- const char *reg_name;
+ const char *reg_name[] = { NULL, NULL };
int ret;
/* Check if this CPU is already covered by some other policy */
@@ -218,12 +218,11 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
* OPP layer will be taking care of regulators now, but it needs to know
* the name of the regulator first.
*/
- reg_name = find_supply_name(cpu_dev);
- if (reg_name) {
- priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, &reg_name,
- 1);
- if (IS_ERR(priv->opp_table)) {
- ret = PTR_ERR(priv->opp_table);
+ reg_name[0] = find_supply_name(cpu_dev);
+ if (reg_name[0]) {
+ priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name);
+ if (priv->opp_token < 0) {
+ ret = priv->opp_token;
if (ret != -EPROBE_DEFER)
dev_err(cpu_dev, "failed to set regulators: %d\n",
ret);
@@ -295,7 +294,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
out:
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
- dev_pm_opp_put_regulators(priv->opp_table);
+ dev_pm_opp_put_regulators(priv->opp_token);
free_cpumask:
free_cpumask_var(priv->cpus);
return ret;
@@ -309,7 +308,7 @@ static void dt_cpufreq_release(void)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
- dev_pm_opp_put_regulators(priv->opp_table);
+ dev_pm_opp_put_regulators(priv->opp_token);
free_cpumask_var(priv->cpus);
list_del(&priv->node);
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 954eef26685f..69b3d61852ac 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -532,7 +532,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
target_freq = clamp_val(target_freq, policy->min, policy->max);
- if (!cpufreq_driver->target_index)
+ if (!policy->freq_table)
return target_freq;
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
@@ -1348,15 +1348,15 @@ static int cpufreq_online(unsigned int cpu)
}
if (!new_policy && cpufreq_driver->online) {
+ /* Recover policy->cpus using related_cpus */
+ cpumask_copy(policy->cpus, policy->related_cpus);
+
ret = cpufreq_driver->online(policy);
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
goto out_exit_policy;
}
-
- /* Recover policy->cpus using related_cpus */
- cpumask_copy(policy->cpus, policy->related_cpus);
} else {
cpumask_copy(policy->cpus, cpumask_of(cpu));
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 3fe9125156b4..76e553af2071 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -31,8 +31,8 @@
/* cpufreq-dt device registered by imx-cpufreq-dt */
static struct platform_device *cpufreq_dt_pdev;
-static struct opp_table *cpufreq_opp_table;
static struct device *cpu_dev;
+static int cpufreq_opp_token;
enum IMX7ULP_CPUFREQ_CLKS {
ARM,
@@ -153,9 +153,9 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "cpu speed grade %d mkt segment %d supported-hw %#x %#x\n",
speed_grade, mkt_segment, supported_hw[0], supported_hw[1]);
- cpufreq_opp_table = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
- if (IS_ERR(cpufreq_opp_table)) {
- ret = PTR_ERR(cpufreq_opp_table);
+ cpufreq_opp_token = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
+ if (cpufreq_opp_token < 0) {
+ ret = cpufreq_opp_token;
dev_err(&pdev->dev, "Failed to set supported opp: %d\n", ret);
return ret;
}
@@ -163,7 +163,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
cpufreq_dt_pdev = platform_device_register_data(
&pdev->dev, "cpufreq-dt", -1, NULL, 0);
if (IS_ERR(cpufreq_dt_pdev)) {
- dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ dev_pm_opp_put_supported_hw(cpufreq_opp_token);
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
return ret;
@@ -176,7 +176,7 @@ static int imx_cpufreq_dt_remove(struct platform_device *pdev)
{
platform_device_unregister(cpufreq_dt_pdev);
if (!of_machine_is_compatible("fsl,imx7ulp"))
- dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ dev_pm_opp_put_supported_hw(cpufreq_opp_token);
else
clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 76f6b3884e6b..7f2680bc9a0f 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -478,6 +478,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
if (info->soc_data->ccifreq_supported) {
info->vproc_on_boot = regulator_get_voltage(info->proc_reg);
if (info->vproc_on_boot < 0) {
+ ret = info->vproc_on_boot;
dev_err(info->cpu_dev,
"invalid Vproc value: %d\n", info->vproc_on_boot);
goto out_disable_inter_clock;
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 36c79580fba2..d5ef3c66c762 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -15,6 +15,7 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/units.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
@@ -26,8 +27,6 @@
#define GT_IRQ_STATUS BIT(2)
-#define HZ_PER_KHZ 1000
-
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_domain_state;
@@ -428,7 +427,7 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return 0;
}
- ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
@@ -445,7 +444,11 @@ static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
if (data->throttle_irq <= 0)
return 0;
- ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ mutex_lock(&data->throttle_lock);
+ data->cancel_throttle = false;
+ mutex_unlock(&data->throttle_lock);
+
+ ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
@@ -465,7 +468,8 @@ static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
mutex_unlock(&data->throttle_lock);
cancel_delayed_work_sync(&data->throttle_work);
- irq_set_affinity_hint(data->throttle_irq, NULL);
+ irq_set_affinity_and_hint(data->throttle_irq, NULL);
+ disable_irq_nosync(data->throttle_irq);
return 0;
}
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 6dfa86971a75..863548f59c3e 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -55,9 +55,7 @@ struct qcom_cpufreq_match_data {
};
struct qcom_cpufreq_drv {
- struct opp_table **names_opp_tables;
- struct opp_table **hw_opp_tables;
- struct opp_table **genpd_opp_tables;
+ int *opp_tokens;
u32 versions;
const struct qcom_cpufreq_match_data *data;
};
@@ -315,72 +313,43 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
of_node_put(np);
- drv->names_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->names_opp_tables),
+ drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
GFP_KERNEL);
- if (!drv->names_opp_tables) {
+ if (!drv->opp_tokens) {
ret = -ENOMEM;
goto free_drv;
}
- drv->hw_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->hw_opp_tables),
- GFP_KERNEL);
- if (!drv->hw_opp_tables) {
- ret = -ENOMEM;
- goto free_opp_names;
- }
-
- drv->genpd_opp_tables = kcalloc(num_possible_cpus(),
- sizeof(*drv->genpd_opp_tables),
- GFP_KERNEL);
- if (!drv->genpd_opp_tables) {
- ret = -ENOMEM;
- goto free_opp;
- }
for_each_possible_cpu(cpu) {
+ struct dev_pm_opp_config config = {
+ .supported_hw = NULL,
+ };
+
cpu_dev = get_cpu_device(cpu);
if (NULL == cpu_dev) {
ret = -ENODEV;
- goto free_genpd_opp;
+ goto free_opp;
}
if (drv->data->get_version) {
+ config.supported_hw = &drv->versions;
+ config.supported_hw_count = 1;
- if (pvs_name) {
- drv->names_opp_tables[cpu] = dev_pm_opp_set_prop_name(
- cpu_dev,
- pvs_name);
- if (IS_ERR(drv->names_opp_tables[cpu])) {
- ret = PTR_ERR(drv->names_opp_tables[cpu]);
- dev_err(cpu_dev, "Failed to add OPP name %s\n",
- pvs_name);
- goto free_opp;
- }
- }
-
- drv->hw_opp_tables[cpu] = dev_pm_opp_set_supported_hw(
- cpu_dev, &drv->versions, 1);
- if (IS_ERR(drv->hw_opp_tables[cpu])) {
- ret = PTR_ERR(drv->hw_opp_tables[cpu]);
- dev_err(cpu_dev,
- "Failed to set supported hardware\n");
- goto free_genpd_opp;
- }
+ if (pvs_name)
+ config.prop_name = pvs_name;
}
if (drv->data->genpd_names) {
- drv->genpd_opp_tables[cpu] =
- dev_pm_opp_attach_genpd(cpu_dev,
- drv->data->genpd_names,
- NULL);
- if (IS_ERR(drv->genpd_opp_tables[cpu])) {
- ret = PTR_ERR(drv->genpd_opp_tables[cpu]);
- if (ret != -EPROBE_DEFER)
- dev_err(cpu_dev,
- "Could not attach to pm_domain: %d\n",
- ret);
- goto free_genpd_opp;
+ config.genpd_names = drv->data->genpd_names;
+ config.virt_devs = NULL;
+ }
+
+ if (config.supported_hw || config.genpd_names) {
+ drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
+ if (drv->opp_tokens[cpu] < 0) {
+ ret = drv->opp_tokens[cpu];
+ dev_err(cpu_dev, "Failed to set OPP config\n");
+ goto free_opp;
}
}
}
@@ -395,27 +364,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(cpu_dev, "Failed to register platform device\n");
-free_genpd_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->genpd_opp_tables[cpu]))
- break;
- dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
- }
- kfree(drv->genpd_opp_tables);
free_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->names_opp_tables[cpu]))
- break;
- dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]);
- }
- for_each_possible_cpu(cpu) {
- if (IS_ERR(drv->hw_opp_tables[cpu]))
- break;
- dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
- }
- kfree(drv->hw_opp_tables);
-free_opp_names:
- kfree(drv->names_opp_tables);
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+ kfree(drv->opp_tokens);
free_drv:
kfree(drv);
@@ -429,15 +381,10 @@ static int qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu) {
- dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
- dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
- dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
- }
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
- kfree(drv->names_opp_tables);
- kfree(drv->hw_opp_tables);
- kfree(drv->genpd_opp_tables);
+ kfree(drv->opp_tokens);
kfree(drv);
return 0;
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index fdb0a722d881..a67df90848c2 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -156,9 +156,13 @@ static int sti_cpufreq_set_opp_info(void)
unsigned int hw_info_offset;
unsigned int version[VERSION_ELEMENTS];
int pcode, substrate, major, minor;
- int ret;
+ int opp_token, ret;
char name[MAX_PCODE_NAME_LEN];
- struct opp_table *opp_table;
+ struct dev_pm_opp_config config = {
+ .supported_hw = version,
+ .supported_hw_count = ARRAY_SIZE(version),
+ .prop_name = name,
+ };
reg_fields = sti_cpufreq_match();
if (!reg_fields) {
@@ -210,21 +214,14 @@ use_defaults:
snprintf(name, MAX_PCODE_NAME_LEN, "pcode%d", pcode);
- opp_table = dev_pm_opp_set_prop_name(dev, name);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to set prop name\n");
- return PTR_ERR(opp_table);
- }
-
version[0] = BIT(major);
version[1] = BIT(minor);
version[2] = BIT(substrate);
- opp_table = dev_pm_opp_set_supported_hw(dev, version, VERSION_ELEMENTS);
- if (IS_ERR(opp_table)) {
- dev_err(dev, "Failed to set supported hardware\n");
- ret = PTR_ERR(opp_table);
- goto err_put_prop_name;
+ opp_token = dev_pm_opp_set_config(dev, &config);
+ if (opp_token < 0) {
+ dev_err(dev, "Failed to set OPP config\n");
+ return opp_token;
}
dev_dbg(dev, "pcode: %d major: %d minor: %d substrate: %d\n",
@@ -233,10 +230,6 @@ use_defaults:
version[0], version[1], version[2]);
return 0;
-
-err_put_prop_name:
- dev_pm_opp_put_prop_name(opp_table);
- return ret;
}
static int sti_cpufreq_fetch_syscon_registers(void)
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 75e1bf3a08f7..a4922580ce06 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -86,20 +86,20 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
{
- struct opp_table **opp_tables;
+ int *opp_tokens;
char name[MAX_NAME_LEN];
unsigned int cpu;
u32 speed = 0;
int ret;
- opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables),
+ opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens),
GFP_KERNEL);
- if (!opp_tables)
+ if (!opp_tokens)
return -ENOMEM;
ret = sun50i_cpufreq_get_efuse(&speed);
if (ret) {
- kfree(opp_tables);
+ kfree(opp_tokens);
return ret;
}
@@ -113,9 +113,9 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
goto free_opp;
}
- opp_tables[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
- if (IS_ERR(opp_tables[cpu])) {
- ret = PTR_ERR(opp_tables[cpu]);
+ opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
+ if (opp_tokens[cpu] < 0) {
+ ret = opp_tokens[cpu];
pr_err("Failed to set prop name\n");
goto free_opp;
}
@@ -124,7 +124,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
NULL, 0);
if (!IS_ERR(cpufreq_dt_pdev)) {
- platform_set_drvdata(pdev, opp_tables);
+ platform_set_drvdata(pdev, opp_tokens);
return 0;
}
@@ -132,27 +132,24 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
pr_err("Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR_OR_NULL(opp_tables[cpu]))
- break;
- dev_pm_opp_put_prop_name(opp_tables[cpu]);
- }
- kfree(opp_tables);
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ kfree(opp_tokens);
return ret;
}
static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
{
- struct opp_table **opp_tables = platform_get_drvdata(pdev);
+ int *opp_tokens = platform_get_drvdata(pdev);
unsigned int cpu;
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tables[cpu]);
+ dev_pm_opp_put_prop_name(opp_tokens[cpu]);
- kfree(opp_tables);
+ kfree(opp_tokens);
return 0;
}
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 2a6a98764a8c..1216046cf4c2 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -162,7 +162,7 @@ static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
.set_cpu_ndiv = tegra234_set_cpu_ndiv,
};
-const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
+static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
.ops = &tegra234_cpufreq_ops,
.actmon_cntr_base = 0x9000,
.maxcpus_per_cluster = 4,
@@ -430,7 +430,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
.set_cpu_ndiv = tegra194_set_cpu_ndiv,
};
-const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
+static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
.ops = &tegra194_cpufreq_ops,
.maxcpus_per_cluster = 2,
};
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index e8db3d75be25..ab7ac7df9e62 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -32,9 +32,9 @@ static bool cpu0_node_has_opp_v2_prop(void)
return ret;
}
-static void tegra20_cpufreq_put_supported_hw(void *opp_table)
+static void tegra20_cpufreq_put_supported_hw(void *opp_token)
{
- dev_pm_opp_put_supported_hw(opp_table);
+ dev_pm_opp_put_supported_hw((unsigned long) opp_token);
}
static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
@@ -45,7 +45,6 @@ static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
struct platform_device *cpufreq_dt;
- struct opp_table *opp_table;
struct device *cpu_dev;
u32 versions[2];
int err;
@@ -71,16 +70,15 @@ static int tegra20_cpufreq_probe(struct platform_device *pdev)
if (WARN_ON(!cpu_dev))
return -ENODEV;
- opp_table = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
- err = PTR_ERR_OR_ZERO(opp_table);
- if (err) {
+ err = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
+ if (err < 0) {
dev_err(&pdev->dev, "failed to set supported hw: %d\n", err);
return err;
}
err = devm_add_action_or_reset(&pdev->dev,
tegra20_cpufreq_put_supported_hw,
- opp_table);
+ (void *)((unsigned long) err));
if (err)
return err;
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 8f9fdd864391..df85a77d476b 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -60,7 +60,6 @@ struct ti_cpufreq_data {
struct device_node *opp_node;
struct regmap *syscon;
const struct ti_cpufreq_soc_data *soc_data;
- struct opp_table *opp_table;
};
static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
@@ -173,7 +172,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = {
* seems to always read as 0).
*/
-static const char * const omap3_reg_names[] = {"cpu0", "vbb"};
+static const char * const omap3_reg_names[] = {"cpu0", "vbb", NULL};
static struct ti_cpufreq_soc_data omap36xx_soc_data = {
.reg_names = omap3_reg_names,
@@ -324,10 +323,13 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
const struct of_device_id *match;
- struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
- const char * const default_reg_names[] = {"vdd", "vbb"};
+ const char * const default_reg_names[] = {"vdd", "vbb", NULL};
int ret;
+ struct dev_pm_opp_config config = {
+ .supported_hw = version,
+ .supported_hw_count = ARRAY_SIZE(version),
+ };
match = dev_get_platdata(&pdev->dev);
if (!match)
@@ -370,33 +372,21 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
if (ret)
goto fail_put_node;
- ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
- version, VERSION_COUNT);
- if (IS_ERR(ti_opp_table)) {
- dev_err(opp_data->cpu_dev,
- "Failed to set supported hardware\n");
- ret = PTR_ERR(ti_opp_table);
- goto fail_put_node;
- }
-
- opp_data->opp_table = ti_opp_table;
-
if (opp_data->soc_data->multi_regulator) {
- const char * const *reg_names = default_reg_names;
-
if (opp_data->soc_data->reg_names)
- reg_names = opp_data->soc_data->reg_names;
- ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
- reg_names,
- ARRAY_SIZE(default_reg_names));
- if (IS_ERR(ti_opp_table)) {
- dev_pm_opp_put_supported_hw(opp_data->opp_table);
- ret = PTR_ERR(ti_opp_table);
- goto fail_put_node;
- }
+ config.regulator_names = opp_data->soc_data->reg_names;
+ else
+ config.regulator_names = default_reg_names;
+ }
+
+ ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
+ if (ret < 0) {
+ dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
+ goto fail_put_node;
}
of_node_put(opp_data->opp_node);
+
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 62dd956025f3..6eceb1988243 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -8,6 +8,7 @@
* This code is licenced under the GPL.
*/
+#include "linux/percpu-defs.h"
#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -279,6 +280,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* Shallower states are enabled, so update. */
dev->states_usage[entered_state].above++;
+ trace_cpu_idle_miss(dev->cpu, entered_state, false);
break;
}
} else if (diff > delay) {
@@ -290,8 +292,10 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* Update if a deeper state would have been a
* better match for the observed idle duration.
*/
- if (diff - delay >= drv->states[i].target_residency_ns)
+ if (diff - delay >= drv->states[i].target_residency_ns) {
dev->states_usage[entered_state].below++;
+ trace_cpu_idle_miss(dev->cpu, entered_state, true);
+ }
break;
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index f64e3984689f..768ced3d6fe8 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -2,6 +2,7 @@
menuconfig CXL_BUS
tristate "CXL (Compute Express Link) Devices Support"
depends on PCI
+ select PCI_DOE
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
@@ -102,4 +103,12 @@ config CXL_SUSPEND
def_bool y
depends on SUSPEND && CXL_MEM
+config CXL_REGION
+ bool
+ default CXL_BUS
+ # For MAX_PHYSMEM_BITS
+ depends on SPARSEMEM
+ select MEMREGION
+ select GET_FREE_REGION
+
endif
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 40286f5df812..fb649683dd3a 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -9,10 +9,6 @@
#include "cxlpci.h"
#include "cxl.h"
-/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
-#define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways)
-#define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
-
static unsigned long cfmws_to_decoder_flags(int restrictions)
{
unsigned long flags = CXL_DECODER_F_ENABLE;
@@ -34,7 +30,8 @@ static unsigned long cfmws_to_decoder_flags(int restrictions)
static int cxl_acpi_cfmws_verify(struct device *dev,
struct acpi_cedt_cfmws *cfmws)
{
- int expected_len;
+ int rc, expected_len;
+ unsigned int ways;
if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) {
dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n");
@@ -51,14 +48,14 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
return -EINVAL;
}
- if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) {
- dev_err(dev, "CFMWS Interleave Ways (%d) too large\n",
- CFMWS_INTERLEAVE_WAYS(cfmws));
+ rc = cxl_to_ways(cfmws->interleave_ways, &ways);
+ if (rc) {
+ dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
+ cfmws->interleave_ways);
return -EINVAL;
}
- expected_len = struct_size((cfmws), interleave_targets,
- CFMWS_INTERLEAVE_WAYS(cfmws));
+ expected_len = struct_size(cfmws, interleave_targets, ways);
if (cfmws->header.length < expected_len) {
dev_err(dev, "CFMWS length %d less than expected %d\n",
@@ -76,6 +73,8 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
struct cxl_cfmws_context {
struct device *dev;
struct cxl_port *root_port;
+ struct resource *cxl_res;
+ int id;
};
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -84,10 +83,14 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
+ struct resource *cxl_res = ctx->cxl_res;
+ struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
struct acpi_cedt_cfmws *cfmws;
struct cxl_decoder *cxld;
- int rc, i;
+ unsigned int ways, i, ig;
+ struct resource *res;
+ int rc;
cfmws = (struct acpi_cedt_cfmws *) header;
@@ -99,19 +102,51 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
return 0;
}
- for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
+ rc = cxl_to_ways(cfmws->interleave_ways, &ways);
+ if (rc)
+ return rc;
+ rc = cxl_to_granularity(cfmws->granularity, &ig);
+ if (rc)
+ return rc;
+ for (i = 0; i < ways; i++)
target_map[i] = cfmws->interleave_targets[i];
- cxld = cxl_root_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws));
- if (IS_ERR(cxld))
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++);
+ if (!res->name)
+ goto err_name;
+
+ res->start = cfmws->base_hpa;
+ res->end = cfmws->base_hpa + cfmws->window_size - 1;
+ res->flags = IORESOURCE_MEM;
+
+ /* add to the local resource tracking to establish a sort order */
+ rc = insert_resource(cxl_res, res);
+ if (rc)
+ goto err_insert;
+
+ cxlrd = cxl_root_decoder_alloc(root_port, ways);
+ if (IS_ERR(cxlrd))
return 0;
+ cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_EXPANDER;
- cxld->platform_res = (struct resource)DEFINE_RES_MEM(cfmws->base_hpa,
- cfmws->window_size);
- cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
- cxld->interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY(cfmws);
+ cxld->hpa_range = (struct range) {
+ .start = res->start,
+ .end = res->end,
+ };
+ cxld->interleave_ways = ways;
+ /*
+ * Minimize the x1 granularity to advertise support for any
+ * valid region granularity
+ */
+ if (ways == 1)
+ ig = CXL_DECODER_MIN_GRANULARITY;
+ cxld->interleave_granularity = ig;
rc = cxl_decoder_add(cxld, target_map);
if (rc)
@@ -119,15 +154,22 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
else
rc = cxl_decoder_autoremove(dev, cxld);
if (rc) {
- dev_err(dev, "Failed to add decoder for %pr\n",
- &cxld->platform_res);
+ dev_err(dev, "Failed to add decode range [%#llx - %#llx]\n",
+ cxld->hpa_range.start, cxld->hpa_range.end);
return 0;
}
- dev_dbg(dev, "add: %s node: %d range %pr\n", dev_name(&cxld->dev),
- phys_to_target_node(cxld->platform_res.start),
- &cxld->platform_res);
+ dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
+ dev_name(&cxld->dev),
+ phys_to_target_node(cxld->hpa_range.start),
+ cxld->hpa_range.start, cxld->hpa_range.end);
return 0;
+
+err_insert:
+ kfree(res->name);
+err_name:
+ kfree(res);
+ return -ENOMEM;
}
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
@@ -175,8 +217,7 @@ static int add_host_bridge_uport(struct device *match, void *arg)
if (rc)
return rc;
- port = devm_cxl_add_port(host, match, dport->component_reg_phys,
- root_port);
+ port = devm_cxl_add_port(host, match, dport->component_reg_phys, dport);
if (IS_ERR(port))
return PTR_ERR(port);
dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
@@ -282,9 +323,127 @@ static void cxl_acpi_lock_reset_class(void *dev)
device_lock_reset_class(dev);
}
+static void del_cxl_resource(struct resource *res)
+{
+ kfree(res->name);
+ kfree(res);
+}
+
+static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
+{
+ priv->desc = (unsigned long) pub;
+}
+
+static struct resource *cxl_get_public_resource(struct resource *priv)
+{
+ return (struct resource *) priv->desc;
+}
+
+static void remove_cxl_resources(void *data)
+{
+ struct resource *res, *next, *cxl = data;
+
+ for (res = cxl->child; res; res = next) {
+ struct resource *victim = cxl_get_public_resource(res);
+
+ next = res->sibling;
+ remove_resource(res);
+
+ if (victim) {
+ remove_resource(victim);
+ kfree(victim);
+ }
+
+ del_cxl_resource(res);
+ }
+}
+
+/**
+ * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
+ * @cxl_res: A standalone resource tree where each CXL window is a sibling
+ *
+ * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
+ * expanding its boundaries to ensure that any conflicting resources become
+ * children. If a window is expanded it may then conflict with a another window
+ * entry and require the window to be truncated or trimmed. Consider this
+ * situation:
+ *
+ * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
+ * |--------------- "System RAM" -------------|
+ *
+ * ...where platform firmware has established as System RAM resource across 2
+ * windows, but has left some portion of window 1 for dynamic CXL region
+ * provisioning. In this case "Window 0" will span the entirety of the "System
+ * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
+ * of that "System RAM" resource.
+ */
+static int add_cxl_resources(struct resource *cxl_res)
+{
+ struct resource *res, *new, *next;
+
+ for (res = cxl_res->child; res; res = next) {
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->name = res->name;
+ new->start = res->start;
+ new->end = res->end;
+ new->flags = IORESOURCE_MEM;
+ new->desc = IORES_DESC_CXL;
+
+ /*
+ * Record the public resource in the private cxl_res tree for
+ * later removal.
+ */
+ cxl_set_public_resource(res, new);
+
+ insert_resource_expand_to_fit(&iomem_resource, new);
+
+ next = res->sibling;
+ while (next && resource_overlaps(new, next)) {
+ if (resource_contains(new, next)) {
+ struct resource *_next = next->sibling;
+
+ remove_resource(next);
+ del_cxl_resource(next);
+ next = _next;
+ } else
+ next->start = new->end + 1;
+ }
+ }
+ return 0;
+}
+
+static int pair_cxl_resource(struct device *dev, void *data)
+{
+ struct resource *cxl_res = data;
+ struct resource *p;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ for (p = cxl_res->child; p; p = p->sibling) {
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct resource res = {
+ .start = cxld->hpa_range.start,
+ .end = cxld->hpa_range.end,
+ .flags = IORESOURCE_MEM,
+ };
+
+ if (resource_contains(p, &res)) {
+ cxlrd->res = cxl_get_public_resource(p);
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
+ struct resource *cxl_res;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
@@ -296,6 +455,14 @@ static int cxl_acpi_probe(struct platform_device *pdev)
if (rc)
return rc;
+ cxl_res = devm_kzalloc(host, sizeof(*cxl_res), GFP_KERNEL);
+ if (!cxl_res)
+ return -ENOMEM;
+ cxl_res->name = "CXL mem";
+ cxl_res->start = 0;
+ cxl_res->end = -1;
+ cxl_res->flags = IORESOURCE_MEM;
+
root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(root_port))
return PTR_ERR(root_port);
@@ -306,11 +473,28 @@ static int cxl_acpi_probe(struct platform_device *pdev)
if (rc < 0)
return rc;
+ rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
+ if (rc)
+ return rc;
+
ctx = (struct cxl_cfmws_context) {
.dev = host,
.root_port = root_port,
+ .cxl_res = cxl_res,
};
- acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
+ rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
+ if (rc < 0)
+ return -ENXIO;
+
+ rc = add_cxl_resources(cxl_res);
+ if (rc)
+ return rc;
+
+ /*
+ * Populate the root decoders with their related iomem resource,
+ * if present
+ */
+ device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
/*
* Root level scanned with host-bridge as dports, now scan host-bridges
@@ -337,12 +521,19 @@ static const struct acpi_device_id cxl_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
+static const struct platform_device_id cxl_test_ids[] = {
+ { "cxl_acpi" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, cxl_test_ids);
+
static struct platform_driver cxl_acpi_driver = {
.probe = cxl_acpi_probe,
.driver = {
.name = KBUILD_MODNAME,
.acpi_match_table = cxl_acpi_ids,
},
+ .id_table = cxl_test_ids,
};
module_platform_driver(cxl_acpi_driver);
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 9d35085d25af..79c7257f4107 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -10,3 +10,4 @@ cxl_core-y += memdev.o
cxl_core-y += mbox.o
cxl_core-y += pci.o
cxl_core-y += hdm.o
+cxl_core-$(CONFIG_CXL_REGION) += region.o
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 1a50c0fc399c..1d8f87be283f 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -9,6 +9,36 @@ extern const struct device_type cxl_nvdimm_type;
extern struct attribute_group cxl_base_attribute_group;
+#ifdef CONFIG_CXL_REGION
+extern struct device_attribute dev_attr_create_pmem_region;
+extern struct device_attribute dev_attr_delete_region;
+extern struct device_attribute dev_attr_region;
+extern const struct device_type cxl_pmem_region_type;
+extern const struct device_type cxl_region_type;
+void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
+#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
+#define CXL_REGION_TYPE(x) (&cxl_region_type)
+#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
+#define CXL_PMEM_REGION_TYPE(x) (&cxl_pmem_region_type)
+int cxl_region_init(void);
+void cxl_region_exit(void);
+#else
+static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+{
+}
+static inline int cxl_region_init(void)
+{
+ return 0;
+}
+static inline void cxl_region_exit(void)
+{
+}
+#define CXL_REGION_ATTR(x) NULL
+#define CXL_REGION_TYPE(x) NULL
+#define SET_CXL_REGION_ATTR(x)
+#define CXL_PMEM_REGION_TYPE(x) NULL
+#endif
+
struct cxl_send_command;
struct cxl_mem_query_commands;
int cxl_query_cmd(struct cxl_memdev *cxlmd,
@@ -17,9 +47,28 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s);
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
resource_size_t length);
+struct dentry *cxl_debugfs_create_dir(const char *dir);
+int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
+ enum cxl_decoder_mode mode);
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
+int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
+resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
+resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
+extern struct rw_semaphore cxl_dpa_rwsem;
+
+bool is_switch_decoder(struct device *dev);
+struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
+static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
+ struct cxl_memdev *cxlmd)
+{
+ if (!port)
+ return NULL;
+
+ return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
+}
+
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
void cxl_mbox_init(void);
-void cxl_mbox_exit(void);
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index bfc8ee876278..d1d2caea5c62 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -16,6 +17,8 @@
* for enumerating these registers and capabilities.
*/
+DECLARE_RWSEM(cxl_dpa_rwsem);
+
static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map)
{
@@ -46,20 +49,22 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
*/
int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
- struct cxl_decoder *cxld;
- struct cxl_dport *dport;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_dport *dport = NULL;
int single_port_map[1];
+ unsigned long index;
- cxld = cxl_switch_decoder_alloc(port, 1);
- if (IS_ERR(cxld))
- return PTR_ERR(cxld);
+ cxlsd = cxl_switch_decoder_alloc(port, 1);
+ if (IS_ERR(cxlsd))
+ return PTR_ERR(cxlsd);
device_lock_assert(&port->dev);
- dport = list_first_entry(&port->dports, typeof(*dport), list);
+ xa_for_each(&port->dports, index, dport)
+ break;
single_port_map[0] = dport->port_id;
- return add_hdm_decoder(port, cxld, single_port_map);
+ return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
@@ -124,47 +129,577 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
return ERR_PTR(-ENXIO);
}
+ dev_set_drvdata(dev, cxlhdm);
+
return cxlhdm;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
-static int to_interleave_granularity(u32 ctrl)
+static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
+{
+ unsigned long long start = r->start, end = r->end;
+
+ seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
+ r->name);
+}
+
+void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
+{
+ struct resource *p1, *p2;
+
+ down_read(&cxl_dpa_rwsem);
+ for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
+ __cxl_dpa_debug(file, p1, 0);
+ for (p2 = p1->child; p2; p2 = p2->sibling)
+ __cxl_dpa_debug(file, p2, 1);
+ }
+ up_read(&cxl_dpa_rwsem);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
+
+/*
+ * Must be called in a context that synchronizes against this decoder's
+ * port ->remove() callback (like an endpoint decoder sysfs attribute)
+ */
+static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct resource *res = cxled->dpa_res;
+ resource_size_t skip_start;
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+
+ /* save @skip_start, before @res is released */
+ skip_start = res->start - cxled->skip;
+ __release_region(&cxlds->dpa_res, res->start, resource_size(res));
+ if (cxled->skip)
+ __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
+ cxled->skip = 0;
+ cxled->dpa_res = NULL;
+ put_device(&cxled->cxld.dev);
+ port->hdm_end--;
+}
+
+static void cxl_dpa_release(void *cxled)
+{
+ down_write(&cxl_dpa_rwsem);
+ __cxl_dpa_release(cxled);
+ up_write(&cxl_dpa_rwsem);
+}
+
+/*
+ * Must be called from context that will not race port device
+ * unregistration, like decoder sysfs attribute methods
+ */
+static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+ devm_remove_action(&port->dev, cxl_dpa_release, cxled);
+ __cxl_dpa_release(cxled);
+}
+
+static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &port->dev;
+ struct resource *res;
+
+ lockdep_assert_held_write(&cxl_dpa_rwsem);
+
+ if (!len)
+ goto success;
+
+ if (cxled->dpa_res) {
+ dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
+ port->id, cxled->cxld.id, cxled->dpa_res);
+ return -EBUSY;
+ }
+
+ if (port->hdm_end + 1 != cxled->cxld.id) {
+ /*
+ * Assumes alloc and commit order is always in hardware instance
+ * order per expectations from 8.2.5.12.20 Committing Decoder
+ * Programming that enforce decoder[m] committed before
+ * decoder[m+1] commit start.
+ */
+ dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
+ cxled->cxld.id, port->id, port->hdm_end + 1);
+ return -EBUSY;
+ }
+
+ if (skipped) {
+ res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
+ dev_name(&cxled->cxld.dev), 0);
+ if (!res) {
+ dev_dbg(dev,
+ "decoder%d.%d: failed to reserve skipped space\n",
+ port->id, cxled->cxld.id);
+ return -EBUSY;
+ }
+ }
+ res = __request_region(&cxlds->dpa_res, base, len,
+ dev_name(&cxled->cxld.dev), 0);
+ if (!res) {
+ dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
+ port->id, cxled->cxld.id);
+ if (skipped)
+ __release_region(&cxlds->dpa_res, base - skipped,
+ skipped);
+ return -EBUSY;
+ }
+ cxled->dpa_res = res;
+ cxled->skip = skipped;
+
+ if (resource_contains(&cxlds->pmem_res, res))
+ cxled->mode = CXL_DECODER_PMEM;
+ else if (resource_contains(&cxlds->ram_res, res))
+ cxled->mode = CXL_DECODER_RAM;
+ else {
+ dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
+ cxled->cxld.id, cxled->dpa_res);
+ cxled->mode = CXL_DECODER_MIXED;
+ }
+
+success:
+ port->hdm_end++;
+ get_device(&cxled->cxld.dev);
+ return 0;
+}
+
+static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped)
{
- int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl);
+ struct cxl_port *port = cxled_to_port(cxled);
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ rc = __cxl_dpa_reserve(cxled, base, len, skipped);
+ up_write(&cxl_dpa_rwsem);
+
+ if (rc)
+ return rc;
- return 256 << val;
+ return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
}
-static int to_interleave_ways(u32 ctrl)
+resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl);
+ resource_size_t size = 0;
+
+ down_read(&cxl_dpa_rwsem);
+ if (cxled->dpa_res)
+ size = resource_size(cxled->dpa_res);
+ up_read(&cxl_dpa_rwsem);
- switch (val) {
- case 0 ... 4:
- return 1 << val;
- case 8 ... 10:
- return 3 << (val - 8);
+ return size;
+}
+
+resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
+{
+ resource_size_t base = -1;
+
+ down_read(&cxl_dpa_rwsem);
+ if (cxled->dpa_res)
+ base = cxled->dpa_res->start;
+ up_read(&cxl_dpa_rwsem);
+
+ return base;
+}
+
+int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct device *dev = &cxled->cxld.dev;
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ if (!cxled->dpa_res) {
+ rc = 0;
+ goto out;
+ }
+ if (cxled->cxld.region) {
+ dev_dbg(dev, "decoder assigned to: %s\n",
+ dev_name(&cxled->cxld.region->dev));
+ rc = -EBUSY;
+ goto out;
+ }
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ dev_dbg(dev, "decoder enabled\n");
+ rc = -EBUSY;
+ goto out;
+ }
+ if (cxled->cxld.id != port->hdm_end) {
+ dev_dbg(dev, "expected decoder%d.%d\n", port->id,
+ port->hdm_end);
+ rc = -EBUSY;
+ goto out;
+ }
+ devm_cxl_dpa_release(cxled);
+ rc = 0;
+out:
+ up_write(&cxl_dpa_rwsem);
+ return rc;
+}
+
+int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
+ enum cxl_decoder_mode mode)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &cxled->cxld.dev;
+ int rc;
+
+ switch (mode) {
+ case CXL_DECODER_RAM:
+ case CXL_DECODER_PMEM:
+ break;
default:
+ dev_dbg(dev, "unsupported mode: %d\n", mode);
+ return -EINVAL;
+ }
+
+ down_write(&cxl_dpa_rwsem);
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Only allow modes that are supported by the current partition
+ * configuration
+ */
+ if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
+ dev_dbg(dev, "no available pmem capacity\n");
+ rc = -ENXIO;
+ goto out;
+ }
+ if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
+ dev_dbg(dev, "no available ram capacity\n");
+ rc = -ENXIO;
+ goto out;
+ }
+
+ cxled->mode = mode;
+ rc = 0;
+out:
+ up_write(&cxl_dpa_rwsem);
+
+ return rc;
+}
+
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ resource_size_t free_ram_start, free_pmem_start;
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *dev = &cxled->cxld.dev;
+ resource_size_t start, avail, skip;
+ struct resource *p, *last;
+ int rc;
+
+ down_write(&cxl_dpa_rwsem);
+ if (cxled->cxld.region) {
+ dev_dbg(dev, "decoder attached to %s\n",
+ dev_name(&cxled->cxld.region->dev));
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
+ dev_dbg(dev, "decoder enabled\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ free_ram_start = last->end + 1;
+ else
+ free_ram_start = cxlds->ram_res.start;
+
+ for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ free_pmem_start = last->end + 1;
+ else
+ free_pmem_start = cxlds->pmem_res.start;
+
+ if (cxled->mode == CXL_DECODER_RAM) {
+ start = free_ram_start;
+ avail = cxlds->ram_res.end - start + 1;
+ skip = 0;
+ } else if (cxled->mode == CXL_DECODER_PMEM) {
+ resource_size_t skip_start, skip_end;
+
+ start = free_pmem_start;
+ avail = cxlds->pmem_res.end - start + 1;
+ skip_start = free_ram_start;
+
+ /*
+ * If some pmem is already allocated, then that allocation
+ * already handled the skip.
+ */
+ if (cxlds->pmem_res.child &&
+ skip_start == cxlds->pmem_res.child->start)
+ skip_end = skip_start - 1;
+ else
+ skip_end = start - 1;
+ skip = skip_end - skip_start + 1;
+ } else {
+ dev_dbg(dev, "mode not set\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (size > avail) {
+ dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
+ cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
+ &avail);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ rc = __cxl_dpa_reserve(cxled, start, size, skip);
+out:
+ up_write(&cxl_dpa_rwsem);
+
+ if (rc)
+ return rc;
+
+ return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
+}
+
+static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
+{
+ u16 eig;
+ u8 eiw;
+
+ /*
+ * Input validation ensures these warns never fire, but otherwise
+ * suppress unititalized variable usage warnings.
+ */
+ if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw),
+ "invalid interleave_ways: %d\n", cxld->interleave_ways))
+ return;
+ if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig),
+ "invalid interleave_granularity: %d\n",
+ cxld->interleave_granularity))
+ return;
+
+ u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
+ u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
+ *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
+}
+
+static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
+{
+ u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
+ CXL_HDM_DECODER0_CTRL_TYPE);
+}
+
+static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+{
+ struct cxl_dport **t = &cxlsd->target[0];
+ int ways = cxlsd->cxld.interleave_ways;
+
+ if (dev_WARN_ONCE(&cxlsd->cxld.dev,
+ ways > 8 || ways > cxlsd->nr_targets,
+ "ways: %d overflows targets: %d\n", ways,
+ cxlsd->nr_targets))
+ return -ENXIO;
+
+ *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
+ if (ways > 1)
+ *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
+ if (ways > 2)
+ *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
+ if (ways > 3)
+ *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
+ if (ways > 4)
+ *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
+ if (ways > 5)
+ *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
+ if (ways > 6)
+ *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
+ if (ways > 7)
+ *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
+
+ return 0;
+}
+
+/*
+ * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
+ * committed or error within 10ms, but just be generous with 20ms to account for
+ * clock skew and other marginal behavior
+ */
+#define COMMIT_TIMEOUT_MS 20
+static int cxld_await_commit(void __iomem *hdm, int id)
+{
+ u32 ctrl;
+ int i;
+
+ for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
+ ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ return -EIO;
+ }
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
+ return 0;
+ fsleep(1000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int cxl_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id, rc;
+ u64 base, size;
+ u32 ctrl;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ if (port->commit_end + 1 != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end + 1);
+ return -EBUSY;
+ }
+
+ down_read(&cxl_dpa_rwsem);
+ /* common decoder settings */
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
+ cxld_set_interleave(cxld, &ctrl);
+ cxld_set_type(cxld, &ctrl);
+ base = cxld->hpa_range.start;
+ size = range_len(&cxld->hpa_range);
+
+ writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
+ writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
+ writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
+ writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
+
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd =
+ to_cxl_switch_decoder(&cxld->dev);
+ void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
+ void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
+ u64 targets;
+
+ rc = cxlsd_set_targets(cxlsd, &targets);
+ if (rc) {
+ dev_dbg(&port->dev, "%s: target configuration error\n",
+ dev_name(&cxld->dev));
+ goto err;
+ }
+
+ writel(upper_32_bits(targets), tl_hi);
+ writel(lower_32_bits(targets), tl_lo);
+ } else {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+ void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
+ void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
+
+ writel(upper_32_bits(cxled->skip), sk_hi);
+ writel(lower_32_bits(cxled->skip), sk_lo);
+ }
+
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ up_read(&cxl_dpa_rwsem);
+
+ port->commit_end++;
+ rc = cxld_await_commit(hdm, cxld->id);
+err:
+ if (rc) {
+ dev_dbg(&port->dev, "%s: error %d committing decoder\n",
+ dev_name(&cxld->dev), rc);
+ cxld->reset(cxld);
+ return rc;
+ }
+ cxld->flags |= CXL_DECODER_F_ENABLE;
+
+ return 0;
+}
+
+static int cxl_decoder_reset(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id;
+ u32 ctrl;
+
+ if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
return 0;
+
+ if (port->commit_end != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order reset, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end);
+ return -EBUSY;
}
+
+ down_read(&cxl_dpa_rwsem);
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+ ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
+
+ writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
+ writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
+ up_read(&cxl_dpa_rwsem);
+
+ port->commit_end--;
+ cxld->flags &= ~CXL_DECODER_F_ENABLE;
+
+ return 0;
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map, void __iomem *hdm, int which)
+ int *target_map, void __iomem *hdm, int which,
+ u64 *dpa_base)
{
- u64 size, base;
+ struct cxl_endpoint_decoder *cxled = NULL;
+ u64 size, base, skip, dpa_size;
+ bool committed;
+ u32 remainder;
+ int i, rc;
u32 ctrl;
- int i;
union {
u64 value;
unsigned char target_id[8];
} target_list;
+ if (is_endpoint_decoder(&cxld->dev))
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
+ committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
+ cxld->commit = cxl_decoder_commit;
+ cxld->reset = cxl_decoder_reset;
- if (!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED))
+ if (!committed)
size = 0;
if (base == U64_MAX || size == U64_MAX) {
dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
@@ -172,39 +707,77 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return -ENXIO;
}
- cxld->decoder_range = (struct range) {
+ cxld->hpa_range = (struct range) {
.start = base,
.end = base + size - 1,
};
- /* switch decoders are always enabled if committed */
- if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) {
+ /* decoders are enabled if committed */
+ if (committed) {
cxld->flags |= CXL_DECODER_F_ENABLE;
if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
cxld->flags |= CXL_DECODER_F_LOCK;
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
+ cxld->target_type = CXL_DECODER_EXPANDER;
+ else
+ cxld->target_type = CXL_DECODER_ACCELERATOR;
+ if (cxld->id != port->commit_end + 1) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Committed out of order\n",
+ port->id, cxld->id);
+ return -ENXIO;
+ }
+ port->commit_end = cxld->id;
+ } else {
+ /* unless / until type-2 drivers arrive, assume type-3 */
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
+ ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
+ writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
+ }
+ cxld->target_type = CXL_DECODER_EXPANDER;
}
- cxld->interleave_ways = to_interleave_ways(ctrl);
- if (!cxld->interleave_ways) {
+ rc = cxl_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
+ &cxld->interleave_ways);
+ if (rc) {
dev_warn(&port->dev,
"decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
port->id, cxld->id, ctrl);
- return -ENXIO;
+ return rc;
}
- cxld->interleave_granularity = to_interleave_granularity(ctrl);
+ rc = cxl_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
+ &cxld->interleave_granularity);
+ if (rc)
+ return rc;
- if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
- cxld->target_type = CXL_DECODER_EXPANDER;
- else
- cxld->target_type = CXL_DECODER_ACCELERATOR;
+ if (!cxled) {
+ target_list.value =
+ ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
+ for (i = 0; i < cxld->interleave_ways; i++)
+ target_map[i] = target_list.target_id[i];
- if (is_endpoint_decoder(&cxld->dev))
return 0;
+ }
- target_list.value =
- ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
- for (i = 0; i < cxld->interleave_ways; i++)
- target_map[i] = target_list.target_id[i];
+ if (!committed)
+ return 0;
+ dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
+ if (remainder) {
+ dev_err(&port->dev,
+ "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
+ port->id, cxld->id, size, cxld->interleave_ways);
+ return -ENXIO;
+ }
+ skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+ rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
+ if (rc) {
+ dev_err(&port->dev,
+ "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+ port->id, cxld->id, *dpa_base,
+ *dpa_base + dpa_size + skip - 1, rc);
+ return rc;
+ }
+ *dpa_base += dpa_size + skip;
return 0;
}
@@ -216,7 +789,8 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
- int i, committed, failed;
+ int i, committed;
+ u64 dpa_base = 0;
u32 ctrl;
/*
@@ -236,27 +810,37 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
if (committed != cxlhdm->decoder_count)
msleep(20);
- for (i = 0, failed = 0; i < cxlhdm->decoder_count; i++) {
+ for (i = 0; i < cxlhdm->decoder_count; i++) {
int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
struct cxl_decoder *cxld;
- if (is_cxl_endpoint(port))
- cxld = cxl_endpoint_decoder_alloc(port);
- else
- cxld = cxl_switch_decoder_alloc(port, target_count);
- if (IS_ERR(cxld)) {
- dev_warn(&port->dev,
- "Failed to allocate the decoder\n");
- return PTR_ERR(cxld);
+ if (is_cxl_endpoint(port)) {
+ struct cxl_endpoint_decoder *cxled;
+
+ cxled = cxl_endpoint_decoder_alloc(port);
+ if (IS_ERR(cxled)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxled);
+ }
+ cxld = &cxled->cxld;
+ } else {
+ struct cxl_switch_decoder *cxlsd;
+
+ cxlsd = cxl_switch_decoder_alloc(port, target_count);
+ if (IS_ERR(cxlsd)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxlsd);
+ }
+ cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map,
- cxlhdm->regs.hdm_decoder, i);
+ rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base);
if (rc) {
put_device(&cxld->dev);
- failed++;
- continue;
+ return rc;
}
rc = add_hdm_decoder(port, cxld, target_map);
if (rc) {
@@ -266,11 +850,6 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
}
}
- if (failed == cxlhdm->decoder_count) {
- dev_err(&port->dev, "No valid decoders found\n");
- return -ENXIO;
- }
-
return 0;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index cbf23beebebe..16176b9278b4 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -718,12 +718,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
*/
static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
{
- struct cxl_mbox_get_partition_info {
- __le64 active_volatile_cap;
- __le64 active_persistent_cap;
- __le64 next_volatile_cap;
- __le64 next_persistent_cap;
- } __packed pi;
+ struct cxl_mbox_get_partition_info pi;
int rc;
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
@@ -773,15 +768,6 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
cxlds->partition_align_bytes =
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
- dev_dbg(cxlds->dev,
- "Identify Memory Device\n"
- " total_bytes = %#llx\n"
- " volatile_only_bytes = %#llx\n"
- " persistent_only_bytes = %#llx\n"
- " partition_align_bytes = %#llx\n",
- cxlds->total_bytes, cxlds->volatile_only_bytes,
- cxlds->persistent_only_bytes, cxlds->partition_align_bytes);
-
cxlds->lsa_size = le32_to_cpu(id.lsa_size);
memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
@@ -789,42 +775,63 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
-int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
+static int add_dpa_res(struct device *dev, struct resource *parent,
+ struct resource *res, resource_size_t start,
+ resource_size_t size, const char *type)
{
int rc;
- if (cxlds->partition_align_bytes == 0) {
- cxlds->ram_range.start = 0;
- cxlds->ram_range.end = cxlds->volatile_only_bytes - 1;
- cxlds->pmem_range.start = cxlds->volatile_only_bytes;
- cxlds->pmem_range.end = cxlds->volatile_only_bytes +
- cxlds->persistent_only_bytes - 1;
+ res->name = type;
+ res->start = start;
+ res->end = start + size - 1;
+ res->flags = IORESOURCE_MEM;
+ if (resource_size(res) == 0) {
+ dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
return 0;
}
-
- rc = cxl_mem_get_partition_info(cxlds);
+ rc = request_resource(parent, res);
if (rc) {
- dev_err(cxlds->dev, "Failed to query partition information\n");
+ dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
+ res, rc);
return rc;
}
- dev_dbg(cxlds->dev,
- "Get Partition Info\n"
- " active_volatile_bytes = %#llx\n"
- " active_persistent_bytes = %#llx\n"
- " next_volatile_bytes = %#llx\n"
- " next_persistent_bytes = %#llx\n",
- cxlds->active_volatile_bytes, cxlds->active_persistent_bytes,
- cxlds->next_volatile_bytes, cxlds->next_persistent_bytes);
+ dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+
+ return 0;
+}
- cxlds->ram_range.start = 0;
- cxlds->ram_range.end = cxlds->active_volatile_bytes - 1;
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
+{
+ struct device *dev = cxlds->dev;
+ int rc;
- cxlds->pmem_range.start = cxlds->active_volatile_bytes;
- cxlds->pmem_range.end =
- cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1;
+ cxlds->dpa_res =
+ (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
- return 0;
+ if (cxlds->partition_align_bytes == 0) {
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
+ cxlds->volatile_only_bytes, "ram");
+ if (rc)
+ return rc;
+ return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
+ cxlds->volatile_only_bytes,
+ cxlds->persistent_only_bytes, "pmem");
+ }
+
+ rc = cxl_mem_get_partition_info(cxlds);
+ if (rc) {
+ dev_err(dev, "Failed to query partition information\n");
+ return rc;
+ }
+
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
+ cxlds->active_volatile_bytes, "ram");
+ if (rc)
+ return rc;
+ return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
+ cxlds->active_volatile_bytes,
+ cxlds->active_persistent_bytes, "pmem");
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
@@ -845,19 +852,11 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
-static struct dentry *cxl_debugfs;
-
void __init cxl_mbox_init(void)
{
struct dentry *mbox_debugfs;
- cxl_debugfs = debugfs_create_dir("cxl", NULL);
- mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
+ mbox_debugfs = cxl_debugfs_create_dir("mbox");
debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
&cxl_raw_allow_all);
}
-
-void cxl_mbox_exit(void)
-{
- debugfs_remove_recursive(cxl_debugfs);
-}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index f7cdcd33504a..20ce488a7754 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -68,7 +68,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = range_len(&cxlds->ram_range);
+ unsigned long long len = resource_size(&cxlds->ram_res);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -81,7 +81,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = range_len(&cxlds->pmem_range);
+ unsigned long long len = resource_size(&cxlds->pmem_res);
return sysfs_emit(buf, "%#llx\n", len);
}
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index c4c99ff7b55e..9240df53ed87 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/pci-doe.h>
#include <cxlpci.h>
#include <cxlmem.h>
#include <cxl.h>
@@ -225,7 +226,6 @@ static int dvsec_range_allowed(struct device *dev, void *arg)
{
struct range *dev_range = arg;
struct cxl_decoder *cxld;
- struct range root_range;
if (!is_root_decoder(dev))
return 0;
@@ -237,12 +237,7 @@ static int dvsec_range_allowed(struct device *dev, void *arg)
if (!(cxld->flags & CXL_DECODER_F_RAM))
return 0;
- root_range = (struct range) {
- .start = cxld->platform_res.start,
- .end = cxld->platform_res.end,
- };
-
- return range_contains(&root_range, dev_range);
+ return range_contains(&cxld->hpa_range, dev_range);
}
static void disable_hdm(void *_cxlhdm)
@@ -458,3 +453,175 @@ hdm_init:
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
+
+#define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
+#define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
+#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00
+#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0
+#define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000
+#define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff
+#define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
+
+static struct pci_doe_mb *find_cdat_doe(struct device *uport)
+{
+ struct cxl_memdev *cxlmd;
+ struct cxl_dev_state *cxlds;
+ unsigned long index;
+ void *entry;
+
+ cxlmd = to_cxl_memdev(uport);
+ cxlds = cxlmd->cxlds;
+
+ xa_for_each(&cxlds->doe_mbs, index, entry) {
+ struct pci_doe_mb *cur = entry;
+
+ if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL,
+ CXL_DOE_PROTOCOL_TABLE_ACCESS))
+ return cur;
+ }
+
+ return NULL;
+}
+
+#define CDAT_DOE_REQ(entry_handle) \
+ (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
+ CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
+ FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
+ CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \
+ FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
+
+static void cxl_doe_task_complete(struct pci_doe_task *task)
+{
+ complete(task->private);
+}
+
+struct cdat_doe_task {
+ u32 request_pl;
+ u32 response_pl[32];
+ struct completion c;
+ struct pci_doe_task task;
+};
+
+#define DECLARE_CDAT_DOE_TASK(req, cdt) \
+struct cdat_doe_task cdt = { \
+ .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \
+ .request_pl = req, \
+ .task = { \
+ .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \
+ .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \
+ .request_pl = &cdt.request_pl, \
+ .request_pl_sz = sizeof(cdt.request_pl), \
+ .response_pl = cdt.response_pl, \
+ .response_pl_sz = sizeof(cdt.response_pl), \
+ .complete = cxl_doe_task_complete, \
+ .private = &cdt.c, \
+ } \
+}
+
+static int cxl_cdat_get_length(struct device *dev,
+ struct pci_doe_mb *cdat_doe,
+ size_t *length)
+{
+ DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t);
+ int rc;
+
+ rc = pci_doe_submit_task(cdat_doe, &t.task);
+ if (rc < 0) {
+ dev_err(dev, "DOE submit failed: %d", rc);
+ return rc;
+ }
+ wait_for_completion(&t.c);
+ if (t.task.rv < sizeof(u32))
+ return -EIO;
+
+ *length = t.response_pl[1];
+ dev_dbg(dev, "CDAT length %zu\n", *length);
+
+ return 0;
+}
+
+static int cxl_cdat_read_table(struct device *dev,
+ struct pci_doe_mb *cdat_doe,
+ struct cxl_cdat *cdat)
+{
+ size_t length = cdat->length;
+ u32 *data = cdat->table;
+ int entry_handle = 0;
+
+ do {
+ DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
+ size_t entry_dw;
+ u32 *entry;
+ int rc;
+
+ rc = pci_doe_submit_task(cdat_doe, &t.task);
+ if (rc < 0) {
+ dev_err(dev, "DOE submit failed: %d", rc);
+ return rc;
+ }
+ wait_for_completion(&t.c);
+ /* 1 DW header + 1 DW data min */
+ if (t.task.rv < (2 * sizeof(u32)))
+ return -EIO;
+
+ /* Get the CXL table access header entry handle */
+ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
+ t.response_pl[0]);
+ entry = t.response_pl + 1;
+ entry_dw = t.task.rv / sizeof(u32);
+ /* Skip Header */
+ entry_dw -= 1;
+ entry_dw = min(length / sizeof(u32), entry_dw);
+ /* Prevent length < 1 DW from causing a buffer overflow */
+ if (entry_dw) {
+ memcpy(data, entry, entry_dw * sizeof(u32));
+ length -= entry_dw * sizeof(u32);
+ data += entry_dw;
+ }
+ } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
+
+ return 0;
+}
+
+/**
+ * read_cdat_data - Read the CDAT data on this port
+ * @port: Port to read data from
+ *
+ * This call will sleep waiting for responses from the DOE mailbox.
+ */
+void read_cdat_data(struct cxl_port *port)
+{
+ struct pci_doe_mb *cdat_doe;
+ struct device *dev = &port->dev;
+ struct device *uport = port->uport;
+ size_t cdat_length;
+ int rc;
+
+ cdat_doe = find_cdat_doe(uport);
+ if (!cdat_doe) {
+ dev_dbg(dev, "No CDAT mailbox\n");
+ return;
+ }
+
+ port->cdat_available = true;
+
+ if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) {
+ dev_dbg(dev, "No CDAT length\n");
+ return;
+ }
+
+ port->cdat.table = devm_kzalloc(dev, cdat_length, GFP_KERNEL);
+ if (!port->cdat.table)
+ return;
+
+ port->cdat.length = cdat_length;
+ rc = cxl_cdat_read_table(dev, cdat_doe, &port->cdat);
+ if (rc) {
+ /* Don't leave table data allocated on error */
+ devm_kfree(dev, port->cdat.table);
+ port->cdat.table = NULL;
+ port->cdat.length = 0;
+ dev_err(dev, "CDAT data read error\n");
+ }
+}
+EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index bec7cfb54ebf..1d12a8206444 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
return is_cxl_nvdimm_bridge(dev);
}
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
{
- struct cxl_port *port = find_cxl_root(&cxl_nvd->dev);
+ struct cxl_port *port = find_cxl_root(start);
struct device *dev;
if (!port)
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index dbce99bdffab..bffde862de0b 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/memregion.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -42,6 +44,8 @@ static int cxl_device_id(struct device *dev)
return CXL_DEVICE_NVDIMM_BRIDGE;
if (dev->type == &cxl_nvdimm_type)
return CXL_DEVICE_NVDIMM;
+ if (dev->type == CXL_PMEM_REGION_TYPE())
+ return CXL_DEVICE_PMEM_REGION;
if (is_cxl_port(dev)) {
if (is_cxl_root(to_cxl_port(dev)))
return CXL_DEVICE_ROOT;
@@ -49,6 +53,8 @@ static int cxl_device_id(struct device *dev)
}
if (is_cxl_memdev(dev))
return CXL_DEVICE_MEMORY_EXPANDER;
+ if (dev->type == CXL_REGION_TYPE())
+ return CXL_DEVICE_REGION;
return 0;
}
@@ -73,14 +79,8 @@ static ssize_t start_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
- u64 start;
- if (is_root_decoder(dev))
- start = cxld->platform_res.start;
- else
- start = cxld->decoder_range.start;
-
- return sysfs_emit(buf, "%#llx\n", start);
+ return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
}
static DEVICE_ATTR_ADMIN_RO(start);
@@ -88,14 +88,8 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
- u64 size;
-
- if (is_root_decoder(dev))
- size = resource_size(&cxld->platform_res);
- else
- size = range_len(&cxld->decoder_range);
- return sysfs_emit(buf, "%#llx\n", size);
+ return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
}
static DEVICE_ATTR_RO(size);
@@ -131,20 +125,21 @@ static ssize_t target_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(target_type);
-static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf)
+static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
{
+ struct cxl_decoder *cxld = &cxlsd->cxld;
ssize_t offset = 0;
int i, rc = 0;
for (i = 0; i < cxld->interleave_ways; i++) {
- struct cxl_dport *dport = cxld->target[i];
+ struct cxl_dport *dport = cxlsd->target[i];
struct cxl_dport *next = NULL;
if (!dport)
break;
if (i + 1 < cxld->interleave_ways)
- next = cxld->target[i + 1];
+ next = cxlsd->target[i + 1];
rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
next ? "," : "");
if (rc < 0)
@@ -158,15 +153,15 @@ static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf)
static ssize_t target_list_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct cxl_decoder *cxld = to_cxl_decoder(dev);
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
ssize_t offset;
unsigned int seq;
int rc;
do {
- seq = read_seqbegin(&cxld->target_lock);
- rc = emit_target_list(cxld, buf);
- } while (read_seqretry(&cxld->target_lock, seq));
+ seq = read_seqbegin(&cxlsd->target_lock);
+ rc = emit_target_list(cxlsd, buf);
+ } while (read_seqretry(&cxlsd->target_lock, seq));
if (rc < 0)
return rc;
@@ -180,10 +175,121 @@ static ssize_t target_list_show(struct device *dev,
}
static DEVICE_ATTR_RO(target_list);
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+
+ switch (cxled->mode) {
+ case CXL_DECODER_RAM:
+ return sysfs_emit(buf, "ram\n");
+ case CXL_DECODER_PMEM:
+ return sysfs_emit(buf, "pmem\n");
+ case CXL_DECODER_NONE:
+ return sysfs_emit(buf, "none\n");
+ case CXL_DECODER_MIXED:
+ default:
+ return sysfs_emit(buf, "mixed\n");
+ }
+}
+
+static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ enum cxl_decoder_mode mode;
+ ssize_t rc;
+
+ if (sysfs_streq(buf, "pmem"))
+ mode = CXL_DECODER_PMEM;
+ else if (sysfs_streq(buf, "ram"))
+ mode = CXL_DECODER_RAM;
+ else
+ return -EINVAL;
+
+ rc = cxl_dpa_set_mode(cxled, mode);
+ if (rc)
+ return rc;
+
+ return len;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ u64 base = cxl_dpa_resource_start(cxled);
+
+ return sysfs_emit(buf, "%#llx\n", base);
+}
+static DEVICE_ATTR_RO(dpa_resource);
+
+static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ resource_size_t size = cxl_dpa_size(cxled);
+
+ return sysfs_emit(buf, "%pa\n", &size);
+}
+
+static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ unsigned long long size;
+ ssize_t rc;
+
+ rc = kstrtoull(buf, 0, &size);
+ if (rc)
+ return rc;
+
+ if (!IS_ALIGNED(size, SZ_256M))
+ return -EINVAL;
+
+ rc = cxl_dpa_free(cxled);
+ if (rc)
+ return rc;
+
+ if (size == 0)
+ return len;
+
+ rc = cxl_dpa_alloc(cxled, size);
+ if (rc)
+ return rc;
+
+ return len;
+}
+static DEVICE_ATTR_RW(dpa_size);
+
+static ssize_t interleave_granularity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
+}
+
+static DEVICE_ATTR_RO(interleave_granularity);
+
+static ssize_t interleave_ways_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
+}
+
+static DEVICE_ATTR_RO(interleave_ways);
+
static struct attribute *cxl_decoder_base_attrs[] = {
&dev_attr_start.attr,
&dev_attr_size.attr,
&dev_attr_locked.attr,
+ &dev_attr_interleave_granularity.attr,
+ &dev_attr_interleave_ways.attr,
NULL,
};
@@ -197,11 +303,35 @@ static struct attribute *cxl_decoder_root_attrs[] = {
&dev_attr_cap_type2.attr,
&dev_attr_cap_type3.attr,
&dev_attr_target_list.attr,
+ SET_CXL_REGION_ATTR(create_pmem_region)
+ SET_CXL_REGION_ATTR(delete_region)
NULL,
};
+static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
+{
+ unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
+
+ return (cxlrd->cxlsd.cxld.flags & flags) == flags;
+}
+
+static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
+ return 0;
+
+ if (a == CXL_REGION_ATTR(delete_region) && !can_create_pmem(cxlrd))
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_decoder_root_attribute_group = {
.attrs = cxl_decoder_root_attrs,
+ .is_visible = cxl_root_decoder_visible,
};
static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
@@ -214,6 +344,7 @@ static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
static struct attribute *cxl_decoder_switch_attrs[] = {
&dev_attr_target_type.attr,
&dev_attr_target_list.attr,
+ SET_CXL_REGION_ATTR(region)
NULL,
};
@@ -230,6 +361,10 @@ static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
static struct attribute *cxl_decoder_endpoint_attrs[] = {
&dev_attr_target_type.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_dpa_size.attr,
+ &dev_attr_dpa_resource.attr,
+ SET_CXL_REGION_ATTR(region)
NULL,
};
@@ -244,31 +379,64 @@ static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
NULL,
};
-static void cxl_decoder_release(struct device *dev)
+static void __cxl_decoder_release(struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld = to_cxl_decoder(dev);
- struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
ida_free(&port->decoder_ida, cxld->id);
- kfree(cxld);
put_device(&port->dev);
}
+static void cxl_endpoint_decoder_release(struct device *dev)
+{
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+
+ __cxl_decoder_release(&cxled->cxld);
+ kfree(cxled);
+}
+
+static void cxl_switch_decoder_release(struct device *dev)
+{
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
+
+ __cxl_decoder_release(&cxlsd->cxld);
+ kfree(cxlsd);
+}
+
+struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
+ "not a cxl_root_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
+
+static void cxl_root_decoder_release(struct device *dev)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ if (atomic_read(&cxlrd->region_id) >= 0)
+ memregion_free(atomic_read(&cxlrd->region_id));
+ __cxl_decoder_release(&cxlrd->cxlsd.cxld);
+ kfree(cxlrd);
+}
+
static const struct device_type cxl_decoder_endpoint_type = {
.name = "cxl_decoder_endpoint",
- .release = cxl_decoder_release,
+ .release = cxl_endpoint_decoder_release,
.groups = cxl_decoder_endpoint_attribute_groups,
};
static const struct device_type cxl_decoder_switch_type = {
.name = "cxl_decoder_switch",
- .release = cxl_decoder_release,
+ .release = cxl_switch_decoder_release,
.groups = cxl_decoder_switch_attribute_groups,
};
static const struct device_type cxl_decoder_root_type = {
.name = "cxl_decoder_root",
- .release = cxl_decoder_release,
+ .release = cxl_root_decoder_release,
.groups = cxl_decoder_root_attribute_groups,
};
@@ -283,39 +451,63 @@ bool is_root_decoder(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
-bool is_cxl_decoder(struct device *dev)
+bool is_switch_decoder(struct device *dev)
{
- return dev->type && dev->type->release == cxl_decoder_release;
+ return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_decoder, CXL);
struct cxl_decoder *to_cxl_decoder(struct device *dev)
{
- if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
+ if (dev_WARN_ONCE(dev,
+ !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
"not a cxl_decoder device\n"))
return NULL;
return container_of(dev, struct cxl_decoder, dev);
}
EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
+struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
+ "not a cxl_endpoint_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
+
+struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
+ "not a cxl_switch_decoder device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_switch_decoder, cxld.dev);
+}
+
static void cxl_ep_release(struct cxl_ep *ep)
{
- if (!ep)
- return;
- list_del(&ep->list);
put_device(ep->ep);
kfree(ep);
}
+static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
+{
+ if (!ep)
+ return;
+ xa_erase(&port->endpoints, (unsigned long) ep->ep);
+ cxl_ep_release(ep);
+}
+
static void cxl_port_release(struct device *dev)
{
struct cxl_port *port = to_cxl_port(dev);
- struct cxl_ep *ep, *_e;
+ unsigned long index;
+ struct cxl_ep *ep;
- device_lock(dev);
- list_for_each_entry_safe(ep, _e, &port->endpoints, list)
- cxl_ep_release(ep);
- device_unlock(dev);
+ xa_for_each(&port->endpoints, index, ep)
+ cxl_ep_remove(port, ep);
+ xa_destroy(&port->endpoints);
+ xa_destroy(&port->dports);
+ xa_destroy(&port->regions);
ida_free(&cxl_port_ida, port->id);
kfree(port);
}
@@ -370,7 +562,7 @@ static void unregister_port(void *_port)
lock_dev = &parent->dev;
device_lock_assert(lock_dev);
- port->uport = NULL;
+ port->dead = true;
device_unregister(&port->dev);
}
@@ -395,7 +587,7 @@ static struct lock_class_key cxl_port_key;
static struct cxl_port *cxl_port_alloc(struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port)
+ struct cxl_dport *parent_dport)
{
struct cxl_port *port;
struct device *dev;
@@ -409,6 +601,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
if (rc < 0)
goto err;
port->id = rc;
+ port->uport = uport;
/*
* The top-level cxl_port "cxl_root" does not have a cxl_port as
@@ -417,17 +610,37 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
* description.
*/
dev = &port->dev;
- if (parent_port) {
+ if (parent_dport) {
+ struct cxl_port *parent_port = parent_dport->port;
+ struct cxl_port *iter;
+
dev->parent = &parent_port->dev;
port->depth = parent_port->depth + 1;
+ port->parent_dport = parent_dport;
+
+ /*
+ * walk to the host bridge, or the first ancestor that knows
+ * the host bridge
+ */
+ iter = port;
+ while (!iter->host_bridge &&
+ !is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+ if (iter->host_bridge)
+ port->host_bridge = iter->host_bridge;
+ else
+ port->host_bridge = iter->uport;
+ dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge));
} else
dev->parent = uport;
- port->uport = uport;
port->component_reg_phys = component_reg_phys;
ida_init(&port->decoder_ida);
- INIT_LIST_HEAD(&port->dports);
- INIT_LIST_HEAD(&port->endpoints);
+ port->hdm_end = -1;
+ port->commit_end = -1;
+ xa_init(&port->dports);
+ xa_init(&port->endpoints);
+ xa_init(&port->regions);
device_initialize(dev);
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
@@ -447,24 +660,24 @@ err:
* @host: host device for devm operations
* @uport: "physical" device implementing this upstream port
* @component_reg_phys: (optional) for configurable cxl_port instances
- * @parent_port: next hop up in the CXL memory decode hierarchy
+ * @parent_dport: next hop up in the CXL memory decode hierarchy
*/
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port)
+ struct cxl_dport *parent_dport)
{
struct cxl_port *port;
struct device *dev;
int rc;
- port = cxl_port_alloc(uport, component_reg_phys, parent_port);
+ port = cxl_port_alloc(uport, component_reg_phys, parent_dport);
if (IS_ERR(port))
return port;
dev = &port->dev;
if (is_cxl_memdev(uport))
rc = dev_set_name(dev, "endpoint%d", port->id);
- else if (parent_port)
+ else if (parent_dport)
rc = dev_set_name(dev, "port%d", port->id);
else
rc = dev_set_name(dev, "root%d", port->id);
@@ -556,17 +769,13 @@ static int match_root_child(struct device *dev, const void *match)
return 0;
port = to_cxl_port(dev);
- device_lock(dev);
- list_for_each_entry(dport, &port->dports, list) {
- iter = match;
- while (iter) {
- if (iter == dport->dport)
- goto out;
- iter = iter->parent;
- }
+ iter = match;
+ while (iter) {
+ dport = cxl_find_dport_by_dev(port, iter);
+ if (dport)
+ break;
+ iter = iter->parent;
}
-out:
- device_unlock(dev);
return !!iter;
}
@@ -590,9 +799,10 @@ EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
+ unsigned long index;
device_lock_assert(&port->dev);
- list_for_each_entry (dport, &port->dports, list)
+ xa_for_each(&port->dports, index, dport)
if (dport->port_id == id)
return dport;
return NULL;
@@ -604,15 +814,15 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new)
device_lock_assert(&port->dev);
dup = find_dport(port, new->port_id);
- if (dup)
+ if (dup) {
dev_err(&port->dev,
"unable to add dport%d-%s non-unique port id (%s)\n",
new->port_id, dev_name(new->dport),
dev_name(dup->dport));
- else
- list_add_tail(&new->list, &port->dports);
-
- return dup ? -EEXIST : 0;
+ return -EBUSY;
+ }
+ return xa_insert(&port->dports, (unsigned long)new->dport, new,
+ GFP_KERNEL);
}
/*
@@ -639,10 +849,8 @@ static void cxl_dport_remove(void *data)
struct cxl_dport *dport = data;
struct cxl_port *port = dport->port;
+ xa_erase(&port->dports, (unsigned long) dport->dport);
put_device(dport->dport);
- cond_cxl_root_lock(port);
- list_del(&dport->list);
- cond_cxl_root_unlock(port);
}
static void cxl_dport_unlink(void *data)
@@ -694,7 +902,6 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
if (!dport)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&dport->list);
dport->dport = dport_dev;
dport->port_id = port_id;
dport->component_reg_phys = component_reg_phys;
@@ -723,44 +930,33 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
-static struct cxl_ep *find_ep(struct cxl_port *port, struct device *ep_dev)
-{
- struct cxl_ep *ep;
-
- device_lock_assert(&port->dev);
- list_for_each_entry(ep, &port->endpoints, list)
- if (ep->ep == ep_dev)
- return ep;
- return NULL;
-}
-
-static int add_ep(struct cxl_port *port, struct cxl_ep *new)
+static int add_ep(struct cxl_ep *new)
{
- struct cxl_ep *dup;
+ struct cxl_port *port = new->dport->port;
+ int rc;
device_lock(&port->dev);
if (port->dead) {
device_unlock(&port->dev);
return -ENXIO;
}
- dup = find_ep(port, new->ep);
- if (!dup)
- list_add_tail(&new->list, &port->endpoints);
+ rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
+ GFP_KERNEL);
device_unlock(&port->dev);
- return dup ? -EEXIST : 0;
+ return rc;
}
/**
* cxl_add_ep - register an endpoint's interest in a port
- * @port: a port in the endpoint's topology ancestry
+ * @dport: the dport that routes to @ep_dev
* @ep_dev: device representing the endpoint
*
* Intermediate CXL ports are scanned based on the arrival of endpoints.
* When those endpoints depart the port can be destroyed once all
* endpoints that care about that port have been removed.
*/
-static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
+static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
{
struct cxl_ep *ep;
int rc;
@@ -769,10 +965,10 @@ static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
if (!ep)
return -ENOMEM;
- INIT_LIST_HEAD(&ep->list);
ep->ep = get_device(ep_dev);
+ ep->dport = dport;
- rc = add_ep(port, ep);
+ rc = add_ep(ep);
if (rc)
cxl_ep_release(ep);
return rc;
@@ -781,11 +977,13 @@ static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev)
struct cxl_find_port_ctx {
const struct device *dport_dev;
const struct cxl_port *parent_port;
+ struct cxl_dport **dport;
};
static int match_port_by_dport(struct device *dev, const void *data)
{
const struct cxl_find_port_ctx *ctx = data;
+ struct cxl_dport *dport;
struct cxl_port *port;
if (!is_cxl_port(dev))
@@ -794,7 +992,10 @@ static int match_port_by_dport(struct device *dev, const void *data)
return 0;
port = to_cxl_port(dev);
- return cxl_find_dport_by_dev(port, ctx->dport_dev) != NULL;
+ dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
+ if (ctx->dport)
+ *ctx->dport = dport;
+ return dport != NULL;
}
static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
@@ -810,24 +1011,32 @@ static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
return NULL;
}
-static struct cxl_port *find_cxl_port(struct device *dport_dev)
+static struct cxl_port *find_cxl_port(struct device *dport_dev,
+ struct cxl_dport **dport)
{
struct cxl_find_port_ctx ctx = {
.dport_dev = dport_dev,
+ .dport = dport,
};
+ struct cxl_port *port;
- return __find_cxl_port(&ctx);
+ port = __find_cxl_port(&ctx);
+ return port;
}
static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
- struct device *dport_dev)
+ struct device *dport_dev,
+ struct cxl_dport **dport)
{
struct cxl_find_port_ctx ctx = {
.dport_dev = dport_dev,
.parent_port = parent_port,
+ .dport = dport,
};
+ struct cxl_port *port;
- return __find_cxl_port(&ctx);
+ port = __find_cxl_port(&ctx);
+ return port;
}
/*
@@ -851,13 +1060,13 @@ static void delete_endpoint(void *data)
struct cxl_port *parent_port;
struct device *parent;
- parent_port = cxl_mem_find_port(cxlmd);
+ parent_port = cxl_mem_find_port(cxlmd, NULL);
if (!parent_port)
goto out;
parent = &parent_port->dev;
device_lock(parent);
- if (parent->driver && endpoint->uport) {
+ if (parent->driver && !endpoint->dead) {
devm_release_action(parent, cxl_unlink_uport, endpoint);
devm_release_action(parent, unregister_port, endpoint);
}
@@ -883,21 +1092,70 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
* for a port to be unregistered is when all memdevs beneath that port have gone
* through ->remove(). This "bottom-up" removal selectively removes individual
* child ports manually. This depends on devm_cxl_add_port() to not change is
- * devm action registration order.
+ * devm action registration order, and for dports to have already been
+ * destroyed by reap_dports().
*/
-static void delete_switch_port(struct cxl_port *port, struct list_head *dports)
+static void delete_switch_port(struct cxl_port *port)
+{
+ devm_release_action(port->dev.parent, cxl_unlink_uport, port);
+ devm_release_action(port->dev.parent, unregister_port, port);
+}
+
+static void reap_dports(struct cxl_port *port)
{
- struct cxl_dport *dport, *_d;
+ struct cxl_dport *dport;
+ unsigned long index;
+
+ device_lock_assert(&port->dev);
- list_for_each_entry_safe(dport, _d, dports, list) {
+ xa_for_each(&port->dports, index, dport) {
devm_release_action(&port->dev, cxl_dport_unlink, dport);
devm_release_action(&port->dev, cxl_dport_remove, dport);
devm_kfree(&port->dev, dport);
}
- devm_release_action(port->dev.parent, cxl_unlink_uport, port);
- devm_release_action(port->dev.parent, unregister_port, port);
}
+int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
+ struct cxl_dport *parent_dport)
+{
+ struct cxl_port *parent_port = parent_dport->port;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_port *endpoint, *iter, *down;
+ int rc;
+
+ /*
+ * Now that the path to the root is established record all the
+ * intervening ports in the chain.
+ */
+ for (iter = parent_port, down = NULL; !is_cxl_root(iter);
+ down = iter, iter = to_cxl_port(iter->dev.parent)) {
+ struct cxl_ep *ep;
+
+ ep = cxl_ep_load(iter, cxlmd);
+ ep->next = down;
+ }
+
+ endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
+ cxlds->component_reg_phys, parent_dport);
+ if (IS_ERR(endpoint))
+ return PTR_ERR(endpoint);
+
+ dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
+
+ rc = cxl_endpoint_autoremove(cxlmd, endpoint);
+ if (rc)
+ return rc;
+
+ if (!endpoint->dev.driver) {
+ dev_err(&cxlmd->dev, "%s failed probe\n",
+ dev_name(&endpoint->dev));
+ return -ENXIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL);
+
static void cxl_detach_ep(void *data)
{
struct cxl_memdev *cxlmd = data;
@@ -906,13 +1164,13 @@ static void cxl_detach_ep(void *data)
for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) {
struct device *dport_dev = grandparent(iter);
struct cxl_port *port, *parent_port;
- LIST_HEAD(reap_dports);
struct cxl_ep *ep;
+ bool died = false;
if (!dport_dev)
break;
- port = find_cxl_port(dport_dev);
+ port = find_cxl_port(dport_dev, NULL);
if (!port)
continue;
@@ -936,26 +1194,27 @@ static void cxl_detach_ep(void *data)
}
device_lock(&port->dev);
- ep = find_ep(port, &cxlmd->dev);
+ ep = cxl_ep_load(port, cxlmd);
dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
- cxl_ep_release(ep);
- if (ep && !port->dead && list_empty(&port->endpoints) &&
+ cxl_ep_remove(port, ep);
+ if (ep && !port->dead && xa_empty(&port->endpoints) &&
!is_cxl_root(parent_port)) {
/*
* This was the last ep attached to a dynamically
* enumerated port. Block new cxl_add_ep() and garbage
* collect the port.
*/
+ died = true;
port->dead = true;
- list_splice_init(&port->dports, &reap_dports);
+ reap_dports(port);
}
device_unlock(&port->dev);
- if (!list_empty(&reap_dports)) {
+ if (died) {
dev_dbg(&cxlmd->dev, "delete %s\n",
dev_name(&port->dev));
- delete_switch_port(port, &reap_dports);
+ delete_switch_port(port);
}
put_device(&port->dev);
device_unlock(&parent_port->dev);
@@ -986,6 +1245,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
{
struct device *dparent = grandparent(dport_dev);
struct cxl_port *port, *parent_port = NULL;
+ struct cxl_dport *dport, *parent_dport;
resource_size_t component_reg_phys;
int rc;
@@ -1000,7 +1260,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return -ENXIO;
}
- parent_port = find_cxl_port(dparent);
+ parent_port = find_cxl_port(dparent, &parent_dport);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
@@ -1015,13 +1275,14 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
goto out;
}
- port = find_cxl_port_at(parent_port, dport_dev);
+ port = find_cxl_port_at(parent_port, dport_dev, &dport);
if (!port) {
component_reg_phys = find_component_registers(uport_dev);
port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_port);
+ component_reg_phys, parent_dport);
+ /* retry find to pick up the new dport information */
if (!IS_ERR(port))
- get_device(&port->dev);
+ port = find_cxl_port_at(parent_port, dport_dev, &dport);
}
out:
device_unlock(&parent_port->dev);
@@ -1031,8 +1292,8 @@ out:
else {
dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport));
- rc = cxl_add_ep(port, &cxlmd->dev);
- if (rc == -EEXIST) {
+ rc = cxl_add_ep(dport, &cxlmd->dev);
+ if (rc == -EBUSY) {
/*
* "can't" happen, but this error code means
* something to the caller, so translate it.
@@ -1065,6 +1326,7 @@ retry:
for (iter = dev; iter; iter = grandparent(iter)) {
struct device *dport_dev = grandparent(iter);
struct device *uport_dev;
+ struct cxl_dport *dport;
struct cxl_port *port;
if (!dport_dev)
@@ -1080,12 +1342,12 @@ retry:
dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
- port = find_cxl_port(dport_dev);
+ port = find_cxl_port(dport_dev, &dport);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport));
- rc = cxl_add_ep(port, &cxlmd->dev);
+ rc = cxl_add_ep(dport, &cxlmd->dev);
/*
* If the endpoint already exists in the port's list,
@@ -1094,7 +1356,7 @@ retry:
* the parent_port lock as the current port may be being
* reaped.
*/
- if (rc && rc != -EEXIST) {
+ if (rc && rc != -EBUSY) {
put_device(&port->dev);
return rc;
}
@@ -1124,30 +1386,14 @@ retry:
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
-struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd)
+struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
+ struct cxl_dport **dport)
{
- return find_cxl_port(grandparent(&cxlmd->dev));
+ return find_cxl_port(grandparent(&cxlmd->dev), dport);
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
-struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
- const struct device *dev)
-{
- struct cxl_dport *dport;
-
- device_lock(&port->dev);
- list_for_each_entry(dport, &port->dports, list)
- if (dport->dport == dev) {
- device_unlock(&port->dev);
- return dport;
- }
-
- device_unlock(&port->dev);
- return NULL;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL);
-
-static int decoder_populate_targets(struct cxl_decoder *cxld,
+static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
struct cxl_port *port, int *target_map)
{
int i, rc = 0;
@@ -1157,88 +1403,92 @@ static int decoder_populate_targets(struct cxl_decoder *cxld,
device_lock_assert(&port->dev);
- if (list_empty(&port->dports))
+ if (xa_empty(&port->dports))
return -EINVAL;
- write_seqlock(&cxld->target_lock);
- for (i = 0; i < cxld->nr_targets; i++) {
+ write_seqlock(&cxlsd->target_lock);
+ for (i = 0; i < cxlsd->nr_targets; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]);
if (!dport) {
rc = -ENXIO;
break;
}
- cxld->target[i] = dport;
+ cxlsd->target[i] = dport;
}
- write_sequnlock(&cxld->target_lock);
+ write_sequnlock(&cxlsd->target_lock);
return rc;
}
+static struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
+{
+ struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
+ struct cxl_decoder *cxld = &cxlsd->cxld;
+ int iw;
+
+ iw = cxld->interleave_ways;
+ if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
+ "misconfigured root decoder\n"))
+ return NULL;
+
+ return cxlrd->cxlsd.target[pos % iw];
+}
+
static struct lock_class_key cxl_decoder_key;
/**
- * cxl_decoder_alloc - Allocate a new CXL decoder
+ * cxl_decoder_init - Common decoder setup / initialization
* @port: owning port of this decoder
- * @nr_targets: downstream targets accessible by this decoder. All upstream
- * ports and root ports must have at least 1 target. Endpoint
- * devices will have 0 targets. Callers wishing to register an
- * endpoint device should specify 0.
- *
- * A port should contain one or more decoders. Each of those decoders enable
- * some address space for CXL.mem utilization. A decoder is expected to be
- * configured by the caller before registering.
+ * @cxld: common decoder properties to initialize
*
- * Return: A new cxl decoder to be registered by cxl_decoder_add(). The decoder
- * is initialized to be a "passthrough" decoder.
+ * A port may contain one or more decoders. Each of those decoders
+ * enable some address space for CXL.mem utilization. A decoder is
+ * expected to be configured by the caller before registering via
+ * cxl_decoder_add()
*/
-static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld;
struct device *dev;
- int rc = 0;
-
- if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
- return ERR_PTR(-EINVAL);
-
- cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
- if (!cxld)
- return ERR_PTR(-ENOMEM);
+ int rc;
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
if (rc < 0)
- goto err;
+ return rc;
/* need parent to stick around to release the id */
get_device(&port->dev);
cxld->id = rc;
- cxld->nr_targets = nr_targets;
- seqlock_init(&cxld->target_lock);
dev = &cxld->dev;
device_initialize(dev);
lockdep_set_class(&dev->mutex, &cxl_decoder_key);
device_set_pm_not_required(dev);
dev->parent = &port->dev;
dev->bus = &cxl_bus_type;
- if (is_cxl_root(port))
- cxld->dev.type = &cxl_decoder_root_type;
- else if (is_cxl_endpoint(port))
- cxld->dev.type = &cxl_decoder_endpoint_type;
- else
- cxld->dev.type = &cxl_decoder_switch_type;
/* Pre initialize an "empty" decoder */
cxld->interleave_ways = 1;
cxld->interleave_granularity = PAGE_SIZE;
cxld->target_type = CXL_DECODER_EXPANDER;
- cxld->platform_res = (struct resource)DEFINE_RES_MEM(0, 0);
+ cxld->hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
- return cxld;
-err:
- kfree(cxld);
- return ERR_PTR(rc);
+ return 0;
+}
+
+static int cxl_switch_decoder_init(struct cxl_port *port,
+ struct cxl_switch_decoder *cxlsd,
+ int nr_targets)
+{
+ if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
+ return -EINVAL;
+
+ cxlsd->nr_targets = nr_targets;
+ seqlock_init(&cxlsd->target_lock);
+ return cxl_decoder_init(port, &cxlsd->cxld);
}
/**
@@ -1251,13 +1501,46 @@ err:
* firmware description of CXL resources into a CXL standard decode
* topology.
*/
-struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets)
{
+ struct cxl_root_decoder *cxlrd;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (!is_cxl_root(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, nr_targets);
+ cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
+ GFP_KERNEL);
+ if (!cxlrd)
+ return ERR_PTR(-ENOMEM);
+
+ cxlsd = &cxlrd->cxlsd;
+ rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
+ if (rc) {
+ kfree(cxlrd);
+ return ERR_PTR(rc);
+ }
+
+ cxlrd->calc_hb = cxl_hb_modulo;
+
+ cxld = &cxlsd->cxld;
+ cxld->dev.type = &cxl_decoder_root_type;
+ /*
+ * cxl_root_decoder_release() special cases negative ids to
+ * detect memregion_alloc() failures.
+ */
+ atomic_set(&cxlrd->region_id, -1);
+ rc = memregion_alloc(GFP_KERNEL);
+ if (rc < 0) {
+ put_device(&cxld->dev);
+ return ERR_PTR(rc);
+ }
+
+ atomic_set(&cxlrd->region_id, rc);
+ return cxlrd;
}
EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
@@ -1272,13 +1555,29 @@ EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
* that sit between Switch Upstream Ports / Switch Downstream Ports and
* Host Bridges / Root Ports.
*/
-struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets)
+struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets)
{
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (is_cxl_root(port) || is_cxl_endpoint(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, nr_targets);
+ cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
+ if (!cxlsd)
+ return ERR_PTR(-ENOMEM);
+
+ rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
+ if (rc) {
+ kfree(cxlsd);
+ return ERR_PTR(rc);
+ }
+
+ cxld = &cxlsd->cxld;
+ cxld->dev.type = &cxl_decoder_switch_type;
+ return cxlsd;
}
EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
@@ -1288,18 +1587,35 @@ EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
*
* Return: A new cxl decoder to be registered by cxl_decoder_add()
*/
-struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
+struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
{
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_decoder *cxld;
+ int rc;
+
if (!is_cxl_endpoint(port))
return ERR_PTR(-EINVAL);
- return cxl_decoder_alloc(port, 0);
+ cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
+ if (!cxled)
+ return ERR_PTR(-ENOMEM);
+
+ cxled->pos = -1;
+ cxld = &cxled->cxld;
+ rc = cxl_decoder_init(port, cxld);
+ if (rc) {
+ kfree(cxled);
+ return ERR_PTR(rc);
+ }
+
+ cxld->dev.type = &cxl_decoder_endpoint_type;
+ return cxled;
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
/**
* cxl_decoder_add_locked - Add a decoder with targets
- * @cxld: The cxl decoder allocated by cxl_decoder_alloc()
+ * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
* @target_map: A list of downstream ports that this decoder can direct memory
* traffic to. These numbers should correspond with the port number
* in the PCIe Link Capabilities structure.
@@ -1335,7 +1651,9 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
if (!is_endpoint_decoder(dev)) {
- rc = decoder_populate_targets(cxld, port, target_map);
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
+
+ rc = decoder_populate_targets(cxlsd, port, target_map);
if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
dev_err(&port->dev,
"Failed to populate active decoder targets\n");
@@ -1347,20 +1665,13 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
if (rc)
return rc;
- /*
- * Platform decoder resources should show up with a reasonable name. All
- * other resources are just sub ranges within the main decoder resource.
- */
- if (is_root_decoder(dev))
- cxld->platform_res.name = dev_name(dev);
-
return device_add(dev);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
/**
* cxl_decoder_add - Add a decoder with targets
- * @cxld: The cxl decoder allocated by cxl_decoder_alloc()
+ * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
* @target_map: A list of downstream ports that this decoder can direct memory
* traffic to. These numbers should correspond with the port number
* in the PCIe Link Capabilities structure.
@@ -1394,6 +1705,13 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
static void cxld_unregister(void *dev)
{
+ struct cxl_endpoint_decoder *cxled;
+
+ if (is_endpoint_decoder(dev)) {
+ cxled = to_cxl_endpoint_decoder(dev);
+ cxl_decoder_kill_region(cxled);
+ }
+
device_unregister(dev);
}
@@ -1521,10 +1839,20 @@ struct bus_type cxl_bus_type = {
};
EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
+static struct dentry *cxl_debugfs;
+
+struct dentry *cxl_debugfs_create_dir(const char *dir)
+{
+ return debugfs_create_dir(dir, cxl_debugfs);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
+
static __init int cxl_core_init(void)
{
int rc;
+ cxl_debugfs = debugfs_create_dir("cxl", NULL);
+
cxl_mbox_init();
rc = cxl_memdev_init();
@@ -1541,22 +1869,28 @@ static __init int cxl_core_init(void)
if (rc)
goto err_bus;
+ rc = cxl_region_init();
+ if (rc)
+ goto err_region;
+
return 0;
+err_region:
+ bus_unregister(&cxl_bus_type);
err_bus:
destroy_workqueue(cxl_bus_wq);
err_wq:
cxl_memdev_exit();
- cxl_mbox_exit();
return rc;
}
static void cxl_core_exit(void)
{
+ cxl_region_exit();
bus_unregister(&cxl_bus_type);
destroy_workqueue(cxl_bus_wq);
cxl_memdev_exit();
- cxl_mbox_exit();
+ debugfs_remove_recursive(cxl_debugfs);
}
module_init(cxl_core_init);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
new file mode 100644
index 000000000000..401148016978
--- /dev/null
+++ b/drivers/cxl/core/region.c
@@ -0,0 +1,1896 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/memregion.h>
+#include <linux/genalloc.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/idr.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+/**
+ * DOC: cxl core region
+ *
+ * CXL Regions represent mapped memory capacity in system physical address
+ * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
+ * Memory ranges, Regions represent the active mapped capacity by the HDM
+ * Decoder Capability structures throughout the Host Bridges, Switches, and
+ * Endpoints in the topology.
+ *
+ * Region configuration has ordering constraints. UUID may be set at any time
+ * but is only visible for persistent regions.
+ * 1. Interleave granularity
+ * 2. Interleave size
+ * 3. Decoder targets
+ */
+
+/*
+ * All changes to the interleave configuration occur with this lock held
+ * for write.
+ */
+static DECLARE_RWSEM(cxl_region_rwsem);
+
+static struct cxl_region *to_cxl_region(struct device *dev);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static int is_dup(struct device *match, void *data)
+{
+ struct cxl_region_params *p;
+ struct cxl_region *cxlr;
+ uuid_t *uuid = data;
+
+ if (!is_cxl_region(match))
+ return 0;
+
+ lockdep_assert_held(&cxl_region_rwsem);
+ cxlr = to_cxl_region(match);
+ p = &cxlr->params;
+
+ if (uuid_equal(&p->uuid, uuid)) {
+ dev_dbg(match, "already has uuid: %pUb\n", uuid);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ uuid_t temp;
+ ssize_t rc;
+
+ if (len != UUID_STRING_LEN + 1)
+ return -EINVAL;
+
+ rc = uuid_parse(buf, &temp);
+ if (rc)
+ return rc;
+
+ if (uuid_is_null(&temp))
+ return -EINVAL;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (uuid_equal(&p->uuid, &temp))
+ goto out;
+
+ rc = -EBUSY;
+ if (p->state >= CXL_CONFIG_ACTIVE)
+ goto out;
+
+ rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
+ if (rc < 0)
+ goto out;
+
+ uuid_copy(&p->uuid, &temp);
+out:
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(uuid);
+
+static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ return xa_load(&port->regions, (unsigned long)cxlr);
+}
+
+static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int i;
+
+ for (i = count - 1; i >= 0; i--) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *iter = cxled_to_port(cxled);
+ struct cxl_ep *ep;
+ int rc;
+
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_decoder *cxld;
+
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ rc = cxld->reset(cxld);
+ if (rc)
+ return rc;
+ }
+
+ rc = cxled->cxld.reset(&cxled->cxld);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int cxl_region_decode_commit(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int i, rc = 0;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_decoder *cxld;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+
+ /* commit bottom up */
+ for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent)) {
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ rc = cxld->commit(cxld);
+ if (rc)
+ break;
+ }
+
+ if (rc) {
+ /* programming @iter failed, teardown */
+ for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ cxl_rr = cxl_rr_load(iter, cxlr);
+ cxld = cxl_rr->decoder;
+ cxld->reset(cxld);
+ }
+
+ cxled->cxld.reset(&cxled->cxld);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ /* undo the targets that were successfully committed */
+ cxl_region_decode_reset(cxlr, i);
+ return rc;
+}
+
+static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ bool commit;
+ ssize_t rc;
+
+ rc = kstrtobool(buf, &commit);
+ if (rc)
+ return rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ /* Already in the requested state? */
+ if (commit && p->state >= CXL_CONFIG_COMMIT)
+ goto out;
+ if (!commit && p->state < CXL_CONFIG_COMMIT)
+ goto out;
+
+ /* Not ready to commit? */
+ if (commit && p->state < CXL_CONFIG_ACTIVE) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (commit)
+ rc = cxl_region_decode_commit(cxlr);
+ else {
+ p->state = CXL_CONFIG_RESET_PENDING;
+ up_write(&cxl_region_rwsem);
+ device_release_driver(&cxlr->dev);
+ down_write(&cxl_region_rwsem);
+
+ /*
+ * The lock was dropped, so need to revalidate that the reset is
+ * still pending.
+ */
+ if (p->state == CXL_CONFIG_RESET_PENDING)
+ rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
+ }
+
+ if (rc)
+ goto out;
+
+ if (commit)
+ p->state = CXL_CONFIG_COMMIT;
+ else if (p->state == CXL_CONFIG_RESET_PENDING)
+ p->state = CXL_CONFIG_ACTIVE;
+
+out:
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+ return len;
+}
+
+static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(commit);
+
+static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
+ return 0;
+ return a->mode;
+}
+
+static ssize_t interleave_ways_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static const struct attribute_group *get_cxl_region_target_group(void);
+
+static ssize_t interleave_ways_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ unsigned int val, save;
+ int rc;
+ u8 iw;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = ways_to_cxl(val, &iw);
+ if (rc)
+ return rc;
+
+ /*
+ * Even for x3, x9, and x12 interleaves the region interleave must be a
+ * power of 2 multiple of the host bridge interleave.
+ */
+ if (!is_power_of_2(val / cxld->interleave_ways) ||
+ (val % cxld->interleave_ways)) {
+ dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
+ return -EINVAL;
+ }
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ save = p->interleave_ways;
+ p->interleave_ways = val;
+ rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
+ if (rc)
+ p->interleave_ways = save;
+out:
+ up_write(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(interleave_ways);
+
+static ssize_t interleave_granularity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static ssize_t interleave_granularity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ int rc, val;
+ u16 ig;
+
+ rc = kstrtoint(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = granularity_to_cxl(val, &ig);
+ if (rc)
+ return rc;
+
+ /*
+ * When the host-bridge is interleaved, disallow region granularity !=
+ * root granularity. Regions with a granularity less than the root
+ * interleave result in needing multiple endpoints to support a single
+ * slot in the interleave (possible to suport in the future). Regions
+ * with a granularity greater than the root interleave result in invalid
+ * DPA translations (invalid to support).
+ */
+ if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
+ return -EINVAL;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ p->interleave_granularity = val;
+out:
+ up_write(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(interleave_granularity);
+
+static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ u64 resource = -1ULL;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->res)
+ resource = p->res->start;
+ rc = sysfs_emit(buf, "%#llx\n", resource);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(resource);
+
+static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ struct resource *res;
+ u32 remainder = 0;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ /* Nothing to do... */
+ if (p->res && resource_size(p->res) == size)
+ return 0;
+
+ /* To change size the old size must be freed first */
+ if (p->res)
+ return -EBUSY;
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
+
+ /* ways, granularity and uuid (if PMEM) need to be set before HPA */
+ if (!p->interleave_ways || !p->interleave_granularity ||
+ (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ return -ENXIO;
+
+ div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
+ if (remainder)
+ return -EINVAL;
+
+ res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
+ dev_name(&cxlr->dev));
+ if (IS_ERR(res)) {
+ dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
+ PTR_ERR(res));
+ return PTR_ERR(res);
+ }
+
+ p->res = res;
+ p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
+
+ return 0;
+}
+
+static void cxl_region_iomem_release(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+
+ if (device_is_registered(&cxlr->dev))
+ lockdep_assert_held_write(&cxl_region_rwsem);
+ if (p->res) {
+ remove_resource(p->res);
+ kfree(p->res);
+ p->res = NULL;
+ }
+}
+
+static int free_hpa(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ if (!p->res)
+ return 0;
+
+ if (p->state >= CXL_CONFIG_ACTIVE)
+ return -EBUSY;
+
+ cxl_region_iomem_release(cxlr);
+ p->state = CXL_CONFIG_IDLE;
+ return 0;
+}
+
+static ssize_t size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ u64 val;
+ int rc;
+
+ rc = kstrtou64(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (val)
+ rc = alloc_hpa(cxlr, val);
+ else
+ rc = free_hpa(cxlr);
+ up_write(&cxl_region_rwsem);
+
+ if (rc)
+ return rc;
+
+ return len;
+}
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ u64 size = 0;
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+ if (p->res)
+ size = resource_size(p->res);
+ rc = sysfs_emit(buf, "%#llx\n", size);
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(size);
+
+static struct attribute *cxl_region_attrs[] = {
+ &dev_attr_uuid.attr,
+ &dev_attr_commit.attr,
+ &dev_attr_interleave_ways.attr,
+ &dev_attr_interleave_granularity.attr,
+ &dev_attr_resource.attr,
+ &dev_attr_size.attr,
+ NULL,
+};
+
+static const struct attribute_group cxl_region_group = {
+ .attrs = cxl_region_attrs,
+ .is_visible = cxl_region_visible,
+};
+
+static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ int rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ cxled = p->targets[pos];
+ if (!cxled)
+ rc = sysfs_emit(buf, "\n");
+ else
+ rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
+out:
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static int match_free_decoder(struct device *dev, void *data)
+{
+ struct cxl_decoder *cxld;
+ int *id = data;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+
+ /* enforce ordered allocation */
+ if (cxld->id != *id)
+ return 0;
+
+ if (!cxld->region)
+ return 1;
+
+ (*id)++;
+
+ return 0;
+}
+
+static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct device *dev;
+ int id = 0;
+
+ dev = device_find_child(&port->dev, &id, match_free_decoder);
+ if (!dev)
+ return NULL;
+ /*
+ * This decoder is pinned registered as long as the endpoint decoder is
+ * registered, and endpoint decoder unregistration holds the
+ * cxl_region_rwsem over unregister events, so no need to hold on to
+ * this extra reference.
+ */
+ put_device(dev);
+ return to_cxl_decoder(dev);
+}
+
+static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_region_ref *cxl_rr, *iter;
+ unsigned long index;
+ int rc;
+
+ xa_for_each(&port->regions, index, iter) {
+ struct cxl_region_params *ip = &iter->region->params;
+
+ if (ip->res->start > p->res->start) {
+ dev_dbg(&cxlr->dev,
+ "%s: HPA order violation %s:%pr vs %pr\n",
+ dev_name(&port->dev),
+ dev_name(&iter->region->dev), ip->res, p->res);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
+ cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
+ if (!cxl_rr)
+ return ERR_PTR(-ENOMEM);
+ cxl_rr->port = port;
+ cxl_rr->region = cxlr;
+ cxl_rr->nr_targets = 1;
+ xa_init(&cxl_rr->endpoints);
+
+ rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to track region reference: %d\n",
+ dev_name(&port->dev), rc);
+ kfree(cxl_rr);
+ return ERR_PTR(rc);
+ }
+
+ return cxl_rr;
+}
+
+static void free_region_ref(struct cxl_region_ref *cxl_rr)
+{
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+
+ dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
+ if (cxld->region == cxlr) {
+ cxld->region = NULL;
+ put_device(&cxlr->dev);
+ }
+
+ xa_erase(&port->regions, (unsigned long)cxlr);
+ xa_destroy(&cxl_rr->endpoints);
+ kfree(cxl_rr);
+}
+
+static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ int rc;
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+ struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
+
+ if (ep) {
+ rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+ }
+ cxl_rr->nr_eps++;
+
+ if (!cxld->region) {
+ cxld->region = cxlr;
+ get_device(&cxlr->dev);
+ }
+
+ return 0;
+}
+
+/**
+ * cxl_port_attach_region() - track a region's interest in a port by endpoint
+ * @port: port to add a new region reference 'struct cxl_region_ref'
+ * @cxlr: region to attach to @port
+ * @cxled: endpoint decoder used to create or further pin a region reference
+ * @pos: interleave position of @cxled in @cxlr
+ *
+ * The attach event is an opportunity to validate CXL decode setup
+ * constraints and record metadata needed for programming HDM decoders,
+ * in particular decoder target lists.
+ *
+ * The steps are:
+ *
+ * - validate that there are no other regions with a higher HPA already
+ * associated with @port
+ * - establish a region reference if one is not already present
+ *
+ * - additionally allocate a decoder instance that will host @cxlr on
+ * @port
+ *
+ * - pin the region reference by the endpoint
+ * - account for how many entries in @port's target list are needed to
+ * cover all of the added endpoints.
+ */
+static int cxl_port_attach_region(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
+ struct cxl_region_ref *cxl_rr;
+ bool nr_targets_inc = false;
+ struct cxl_decoder *cxld;
+ unsigned long index;
+ int rc = -EBUSY;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ cxl_rr = cxl_rr_load(port, cxlr);
+ if (cxl_rr) {
+ struct cxl_ep *ep_iter;
+ int found = 0;
+
+ /*
+ * Walk the existing endpoints that have been attached to
+ * @cxlr at @port and see if they share the same 'next' port
+ * in the downstream direction. I.e. endpoints that share common
+ * upstream switch.
+ */
+ xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
+ if (ep_iter == ep)
+ continue;
+ if (ep_iter->next == ep->next) {
+ found++;
+ break;
+ }
+ }
+
+ /*
+ * New target port, or @port is an endpoint port that always
+ * accounts its own local decode as a target.
+ */
+ if (!found || !ep->next) {
+ cxl_rr->nr_targets++;
+ nr_targets_inc = true;
+ }
+
+ /*
+ * The decoder for @cxlr was allocated when the region was first
+ * attached to @port.
+ */
+ cxld = cxl_rr->decoder;
+ } else {
+ cxl_rr = alloc_region_ref(port, cxlr);
+ if (IS_ERR(cxl_rr)) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to allocate region reference\n",
+ dev_name(&port->dev));
+ return PTR_ERR(cxl_rr);
+ }
+ nr_targets_inc = true;
+
+ if (port == cxled_to_port(cxled))
+ cxld = &cxled->cxld;
+ else
+ cxld = cxl_region_find_decoder(port, cxlr);
+ if (!cxld) {
+ dev_dbg(&cxlr->dev, "%s: no decoder available\n",
+ dev_name(&port->dev));
+ goto out_erase;
+ }
+
+ if (cxld->region) {
+ dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
+ dev_name(&port->dev), dev_name(&cxld->dev),
+ dev_name(&cxld->region->dev));
+ rc = -EBUSY;
+ goto out_erase;
+ }
+
+ cxl_rr->decoder = cxld;
+ }
+
+ rc = cxl_rr_ep_add(cxl_rr, cxled);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to track endpoint %s:%s reference\n",
+ dev_name(&port->dev), dev_name(&cxlmd->dev),
+ dev_name(&cxld->dev));
+ goto out_erase;
+ }
+
+ dev_dbg(&cxlr->dev,
+ "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxld->dev), dev_name(&cxlmd->dev),
+ dev_name(&cxled->cxld.dev), pos,
+ ep ? ep->next ? dev_name(ep->next->uport) :
+ dev_name(&cxlmd->dev) :
+ "none",
+ cxl_rr->nr_eps, cxl_rr->nr_targets);
+
+ return 0;
+out_erase:
+ if (nr_targets_inc)
+ cxl_rr->nr_targets--;
+ if (cxl_rr->nr_eps == 0)
+ free_region_ref(cxl_rr);
+ return rc;
+}
+
+static void cxl_port_detach_region(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_region_ref *cxl_rr;
+ struct cxl_ep *ep = NULL;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ cxl_rr = cxl_rr_load(port, cxlr);
+ if (!cxl_rr)
+ return;
+
+ /*
+ * Endpoint ports do not carry cxl_ep references, and they
+ * never target more than one endpoint by definition
+ */
+ if (cxl_rr->decoder == &cxled->cxld)
+ cxl_rr->nr_eps--;
+ else
+ ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
+ if (ep) {
+ struct cxl_ep *ep_iter;
+ unsigned long index;
+ int found = 0;
+
+ cxl_rr->nr_eps--;
+ xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
+ if (ep_iter->next == ep->next) {
+ found++;
+ break;
+ }
+ }
+ if (!found)
+ cxl_rr->nr_targets--;
+ }
+
+ if (cxl_rr->nr_eps == 0)
+ free_region_ref(cxl_rr);
+}
+
+static int check_last_peer(struct cxl_endpoint_decoder *cxled,
+ struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
+ int distance)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_region *cxlr = cxl_rr->region;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled_peer;
+ struct cxl_port *port = cxl_rr->port;
+ struct cxl_memdev *cxlmd_peer;
+ struct cxl_ep *ep_peer;
+ int pos = cxled->pos;
+
+ /*
+ * If this position wants to share a dport with the last endpoint mapped
+ * then that endpoint, at index 'position - distance', must also be
+ * mapped by this dport.
+ */
+ if (pos < distance) {
+ dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+ return -ENXIO;
+ }
+ cxled_peer = p->targets[pos - distance];
+ cxlmd_peer = cxled_to_memdev(cxled_peer);
+ ep_peer = cxl_ep_load(port, cxlmd_peer);
+ if (ep->dport != ep_peer->dport) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
+ dev_name(&cxlmd_peer->dev),
+ dev_name(&cxled_peer->cxld.dev));
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int cxl_port_setup_targets(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
+ struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
+ struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_decoder *cxld = cxl_rr->decoder;
+ struct cxl_switch_decoder *cxlsd;
+ u16 eig, peig;
+ u8 eiw, peiw;
+
+ /*
+ * While root level decoders support x3, x6, x12, switch level
+ * decoders only support powers of 2 up to x16.
+ */
+ if (!is_power_of_2(cxl_rr->nr_targets)) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ cxl_rr->nr_targets);
+ return -EINVAL;
+ }
+
+ cxlsd = to_cxl_switch_decoder(&cxld->dev);
+ if (cxl_rr->nr_targets_set) {
+ int i, distance;
+
+ distance = p->nr_targets / cxl_rr->nr_targets;
+ for (i = 0; i < cxl_rr->nr_targets_set; i++)
+ if (ep->dport == cxlsd->target[i]) {
+ rc = check_last_peer(cxled, ep, cxl_rr,
+ distance);
+ if (rc)
+ return rc;
+ goto out_target_set;
+ }
+ goto add_target;
+ }
+
+ if (is_cxl_root(parent_port)) {
+ parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
+ parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
+ /*
+ * For purposes of address bit routing, use power-of-2 math for
+ * switch ports.
+ */
+ if (!is_power_of_2(parent_iw))
+ parent_iw /= 3;
+ } else {
+ struct cxl_region_ref *parent_rr;
+ struct cxl_decoder *parent_cxld;
+
+ parent_rr = cxl_rr_load(parent_port, cxlr);
+ parent_cxld = parent_rr->decoder;
+ parent_ig = parent_cxld->interleave_granularity;
+ parent_iw = parent_cxld->interleave_ways;
+ }
+
+ rc = granularity_to_cxl(parent_ig, &peig);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
+ dev_name(parent_port->uport),
+ dev_name(&parent_port->dev), parent_ig);
+ return rc;
+ }
+
+ rc = ways_to_cxl(parent_iw, &peiw);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
+ dev_name(parent_port->uport),
+ dev_name(&parent_port->dev), parent_iw);
+ return rc;
+ }
+
+ iw = cxl_rr->nr_targets;
+ rc = ways_to_cxl(iw, &eiw);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
+ dev_name(port->uport), dev_name(&port->dev), iw);
+ return rc;
+ }
+
+ /*
+ * If @parent_port is masking address bits, pick the next unused address
+ * bit to route @port's targets.
+ */
+ if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
+ u32 address_bit = max(peig + peiw, eiw + peig);
+
+ eig = address_bit - eiw + 1;
+ } else {
+ eiw = peiw;
+ eig = peig;
+ }
+
+ rc = cxl_to_granularity(eig, &ig);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ 256 << eig);
+ return rc;
+ }
+
+ cxld->interleave_ways = iw;
+ cxld->interleave_granularity = ig;
+ cxld->hpa_range = (struct range) {
+ .start = p->res->start,
+ .end = p->res->end,
+ };
+ dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport),
+ dev_name(&port->dev), iw, ig);
+add_target:
+ if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: targets full trying to add %s:%s at %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+ return -ENXIO;
+ }
+ cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
+ inc = 1;
+out_target_set:
+ cxl_rr->nr_targets_set += inc;
+ dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
+ dev_name(port->uport), dev_name(&port->dev),
+ cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport),
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+
+ return 0;
+}
+
+static void cxl_port_reset_targets(struct cxl_port *port,
+ struct cxl_region *cxlr)
+{
+ struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
+ struct cxl_decoder *cxld;
+
+ /*
+ * After the last endpoint has been detached the entire cxl_rr may now
+ * be gone.
+ */
+ if (!cxl_rr)
+ return;
+ cxl_rr->nr_targets_set = 0;
+
+ cxld = cxl_rr->decoder;
+ cxld->hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
+}
+
+static void cxl_region_teardown_targets(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_memdev *cxlmd;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+ int i;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ cxlmd = cxled_to_memdev(cxled);
+
+ iter = cxled_to_port(cxled);
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
+ cxl_port_reset_targets(iter, cxlr);
+ }
+}
+
+static int cxl_region_setup_targets(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_memdev *cxlmd;
+ struct cxl_port *iter;
+ struct cxl_ep *ep;
+ int i, rc;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ cxlmd = cxled_to_memdev(cxled);
+
+ iter = cxled_to_port(cxled);
+ while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+ iter = to_cxl_port(iter->dev.parent);
+
+ /*
+ * Descend the topology tree programming targets while
+ * looking for conflicts.
+ */
+ for (ep = cxl_ep_load(iter, cxlmd); iter;
+ iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+ rc = cxl_port_setup_targets(iter, cxlr, cxled);
+ if (rc) {
+ cxl_region_teardown_targets(cxlr);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int cxl_region_attach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *ep_port, *root_port, *iter;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_dport *dport;
+ int i, rc = -ENXIO;
+
+ if (cxled->mode == CXL_DECODER_DEAD) {
+ dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
+ return -ENODEV;
+ }
+
+ /* all full of members, or interleave config not established? */
+ if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_dbg(&cxlr->dev, "region already active\n");
+ return -EBUSY;
+ } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ dev_dbg(&cxlr->dev, "interleave config missing\n");
+ return -ENXIO;
+ }
+
+ if (pos < 0 || pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ return -ENXIO;
+ }
+
+ if (p->targets[pos] == cxled)
+ return 0;
+
+ if (p->targets[pos]) {
+ struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
+ struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
+
+ dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
+ pos, dev_name(&cxlmd_target->dev),
+ dev_name(&cxled_target->cxld.dev));
+ return -EBUSY;
+ }
+
+ for (i = 0; i < p->interleave_ways; i++) {
+ struct cxl_endpoint_decoder *cxled_target;
+ struct cxl_memdev *cxlmd_target;
+
+ cxled_target = p->targets[pos];
+ if (!cxled_target)
+ continue;
+
+ cxlmd_target = cxled_to_memdev(cxled_target);
+ if (cxlmd_target == cxlmd) {
+ dev_dbg(&cxlr->dev,
+ "%s already specified at position %d via: %s\n",
+ dev_name(&cxlmd->dev), pos,
+ dev_name(&cxled_target->cxld.dev));
+ return -EBUSY;
+ }
+ }
+
+ ep_port = cxled_to_port(cxled);
+ root_port = cxlrd_to_port(cxlrd);
+ dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
+ if (!dport) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(cxlr->dev.parent));
+ return -ENXIO;
+ }
+
+ if (cxlrd->calc_hb(cxlrd, pos) != dport) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(&cxlrd->cxlsd.cxld.dev));
+ return -ENXIO;
+ }
+
+ if (cxled->cxld.target_type != cxlr->type) {
+ dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ cxled->cxld.target_type, cxlr->type);
+ return -ENXIO;
+ }
+
+ if (!cxled->dpa_res) {
+ dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
+ return -ENXIO;
+ }
+
+ if (resource_size(cxled->dpa_res) * p->interleave_ways !=
+ resource_size(p->res)) {
+ dev_dbg(&cxlr->dev,
+ "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ (u64)resource_size(cxled->dpa_res), p->interleave_ways,
+ (u64)resource_size(p->res));
+ return -EINVAL;
+ }
+
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent)) {
+ rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
+ if (rc)
+ goto err;
+ }
+
+ p->targets[pos] = cxled;
+ cxled->pos = pos;
+ p->nr_targets++;
+
+ if (p->nr_targets == p->interleave_ways) {
+ rc = cxl_region_setup_targets(cxlr);
+ if (rc)
+ goto err_decrement;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+ cxled->cxld.interleave_ways = p->interleave_ways;
+ cxled->cxld.interleave_granularity = p->interleave_granularity;
+ cxled->cxld.hpa_range = (struct range) {
+ .start = p->res->start,
+ .end = p->res->end,
+ };
+
+ return 0;
+
+err_decrement:
+ p->nr_targets--;
+err:
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent))
+ cxl_port_detach_region(iter, cxlr, cxled);
+ return rc;
+}
+
+static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
+ struct cxl_region *cxlr = cxled->cxld.region;
+ struct cxl_region_params *p;
+ int rc = 0;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ if (!cxlr)
+ return 0;
+
+ p = &cxlr->params;
+ get_device(&cxlr->dev);
+
+ if (p->state > CXL_CONFIG_ACTIVE) {
+ /*
+ * TODO: tear down all impacted regions if a device is
+ * removed out of order
+ */
+ rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
+ if (rc)
+ goto out;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+ for (iter = ep_port; !is_cxl_root(iter);
+ iter = to_cxl_port(iter->dev.parent))
+ cxl_port_detach_region(iter, cxlr, cxled);
+
+ if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
+ p->targets[cxled->pos] != cxled) {
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+
+ dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ cxled->pos);
+ goto out;
+ }
+
+ if (p->state == CXL_CONFIG_ACTIVE) {
+ p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
+ cxl_region_teardown_targets(cxlr);
+ }
+ p->targets[cxled->pos] = NULL;
+ p->nr_targets--;
+ cxled->cxld.hpa_range = (struct range) {
+ .start = 0,
+ .end = -1,
+ };
+
+ /* notify the region driver that one of its targets has departed */
+ up_write(&cxl_region_rwsem);
+ device_release_driver(&cxlr->dev);
+ down_write(&cxl_region_rwsem);
+out:
+ put_device(&cxlr->dev);
+ return rc;
+}
+
+void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+{
+ down_write(&cxl_region_rwsem);
+ cxled->mode = CXL_DECODER_DEAD;
+ cxl_region_detach(cxled);
+ up_write(&cxl_region_rwsem);
+}
+
+static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos)
+{
+ struct device *dev;
+ int rc;
+
+ dev = bus_find_device_by_name(&cxl_bus_type, NULL, decoder);
+ if (!dev)
+ return -ENODEV;
+
+ if (!is_endpoint_decoder(dev)) {
+ put_device(dev);
+ return -EINVAL;
+ }
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ goto out;
+ down_read(&cxl_dpa_rwsem);
+ rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos);
+ up_read(&cxl_dpa_rwsem);
+ up_write(&cxl_region_rwsem);
+out:
+ put_device(dev);
+ return rc;
+}
+
+static int detach_target(struct cxl_region *cxlr, int pos)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ rc = down_write_killable(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
+ p->interleave_ways);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (!p->targets[pos]) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = cxl_region_detach(p->targets[pos]);
+out:
+ up_write(&cxl_region_rwsem);
+ return rc;
+}
+
+static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
+ size_t len)
+{
+ int rc;
+
+ if (sysfs_streq(buf, "\n"))
+ rc = detach_target(cxlr, pos);
+ else
+ rc = attach_target(cxlr, buf, pos);
+
+ if (rc < 0)
+ return rc;
+ return len;
+}
+
+#define TARGET_ATTR_RW(n) \
+static ssize_t target##n##_show( \
+ struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ return show_targetN(to_cxl_region(dev), buf, (n)); \
+} \
+static ssize_t target##n##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return store_targetN(to_cxl_region(dev), buf, (n), len); \
+} \
+static DEVICE_ATTR_RW(target##n)
+
+TARGET_ATTR_RW(0);
+TARGET_ATTR_RW(1);
+TARGET_ATTR_RW(2);
+TARGET_ATTR_RW(3);
+TARGET_ATTR_RW(4);
+TARGET_ATTR_RW(5);
+TARGET_ATTR_RW(6);
+TARGET_ATTR_RW(7);
+TARGET_ATTR_RW(8);
+TARGET_ATTR_RW(9);
+TARGET_ATTR_RW(10);
+TARGET_ATTR_RW(11);
+TARGET_ATTR_RW(12);
+TARGET_ATTR_RW(13);
+TARGET_ATTR_RW(14);
+TARGET_ATTR_RW(15);
+
+static struct attribute *target_attrs[] = {
+ &dev_attr_target0.attr,
+ &dev_attr_target1.attr,
+ &dev_attr_target2.attr,
+ &dev_attr_target3.attr,
+ &dev_attr_target4.attr,
+ &dev_attr_target5.attr,
+ &dev_attr_target6.attr,
+ &dev_attr_target7.attr,
+ &dev_attr_target8.attr,
+ &dev_attr_target9.attr,
+ &dev_attr_target10.attr,
+ &dev_attr_target11.attr,
+ &dev_attr_target12.attr,
+ &dev_attr_target13.attr,
+ &dev_attr_target14.attr,
+ &dev_attr_target15.attr,
+ NULL,
+};
+
+static umode_t cxl_region_target_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+
+ if (n < p->interleave_ways)
+ return a->mode;
+ return 0;
+}
+
+static const struct attribute_group cxl_region_target_group = {
+ .attrs = target_attrs,
+ .is_visible = cxl_region_target_visible,
+};
+
+static const struct attribute_group *get_cxl_region_target_group(void)
+{
+ return &cxl_region_target_group;
+}
+
+static const struct attribute_group *region_groups[] = {
+ &cxl_base_attribute_group,
+ &cxl_region_group,
+ &cxl_region_target_group,
+ NULL,
+};
+
+static void cxl_region_release(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ memregion_free(cxlr->id);
+ kfree(cxlr);
+}
+
+const struct device_type cxl_region_type = {
+ .name = "cxl_region",
+ .release = cxl_region_release,
+ .groups = region_groups
+};
+
+bool is_cxl_region(struct device *dev)
+{
+ return dev->type == &cxl_region_type;
+}
+EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
+
+static struct cxl_region *to_cxl_region(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
+ "not a cxl_region device\n"))
+ return NULL;
+
+ return container_of(dev, struct cxl_region, dev);
+}
+
+static void unregister_region(void *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ device_del(dev);
+ cxl_region_iomem_release(cxlr);
+ put_device(dev);
+}
+
+static struct lock_class_key cxl_region_key;
+
+static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
+{
+ struct cxl_region *cxlr;
+ struct device *dev;
+
+ cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
+ if (!cxlr) {
+ memregion_free(id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev = &cxlr->dev;
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_region_key);
+ dev->parent = &cxlrd->cxlsd.cxld.dev;
+ device_set_pm_not_required(dev);
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_region_type;
+ cxlr->id = id;
+
+ return cxlr;
+}
+
+/**
+ * devm_cxl_add_region - Adds a region to a decoder
+ * @cxlrd: root decoder
+ * @id: memregion id to create, or memregion_free() on failure
+ * @mode: mode for the endpoint decoders of this region
+ * @type: select whether this is an expander or accelerator (type-2 or type-3)
+ *
+ * This is the second step of region initialization. Regions exist within an
+ * address space which is mapped by a @cxlrd.
+ *
+ * Return: 0 if the region was added to the @cxlrd, else returns negative error
+ * code. The region will be named "regionZ" where Z is the unique region number.
+ */
+static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
+ int id,
+ enum cxl_decoder_mode mode,
+ enum cxl_decoder_type type)
+{
+ struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
+ struct cxl_region *cxlr;
+ struct device *dev;
+ int rc;
+
+ cxlr = cxl_region_alloc(cxlrd, id);
+ if (IS_ERR(cxlr))
+ return cxlr;
+ cxlr->mode = mode;
+ cxlr->type = type;
+
+ dev = &cxlr->dev;
+ rc = dev_set_name(dev, "region%d", id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr);
+ if (rc)
+ return ERR_PTR(rc);
+
+ dev_dbg(port->uport, "%s: created %s\n",
+ dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
+ return cxlr;
+
+err:
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+
+static ssize_t create_pmem_region_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
+}
+
+static ssize_t create_pmem_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_region *cxlr;
+ int id, rc;
+
+ rc = sscanf(buf, "region%d\n", &id);
+ if (rc != 1)
+ return -EINVAL;
+
+ rc = memregion_alloc(GFP_KERNEL);
+ if (rc < 0)
+ return rc;
+
+ if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
+ memregion_free(rc);
+ return -EBUSY;
+ }
+
+ cxlr = devm_cxl_add_region(cxlrd, id, CXL_DECODER_PMEM,
+ CXL_DECODER_EXPANDER);
+ if (IS_ERR(cxlr))
+ return PTR_ERR(cxlr);
+
+ return len;
+}
+DEVICE_ATTR_RW(create_pmem_region);
+
+static ssize_t region_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+ ssize_t rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ if (cxld->region)
+ rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
+ else
+ rc = sysfs_emit(buf, "\n");
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+DEVICE_ATTR_RO(region);
+
+static struct cxl_region *
+cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
+{
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct device *region_dev;
+
+ region_dev = device_find_child_by_name(&cxld->dev, name);
+ if (!region_dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_cxl_region(region_dev);
+}
+
+static ssize_t delete_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+ struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_region *cxlr;
+
+ cxlr = cxl_find_region_by_name(cxlrd, buf);
+ if (IS_ERR(cxlr))
+ return PTR_ERR(cxlr);
+
+ devm_release_action(port->uport, unregister_region, cxlr);
+ put_device(&cxlr->dev);
+
+ return len;
+}
+DEVICE_ATTR_WO(delete_region);
+
+static void cxl_pmem_region_release(struct device *dev)
+{
+ struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
+ int i;
+
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
+
+ put_device(&cxlmd->dev);
+ }
+
+ kfree(cxlr_pmem);
+}
+
+static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_pmem_region_type = {
+ .name = "cxl_pmem_region",
+ .release = cxl_pmem_region_release,
+ .groups = cxl_pmem_region_attribute_groups,
+};
+
+bool is_cxl_pmem_region(struct device *dev)
+{
+ return dev->type == &cxl_pmem_region_type;
+}
+EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
+
+struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
+ "not a cxl_pmem_region device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_pmem_region, dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
+
+static struct lock_class_key cxl_pmem_region_key;
+
+static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_pmem_region *cxlr_pmem;
+ struct device *dev;
+ int i;
+
+ down_read(&cxl_region_rwsem);
+ if (p->state != CXL_CONFIG_COMMIT) {
+ cxlr_pmem = ERR_PTR(-ENXIO);
+ goto out;
+ }
+
+ cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
+ GFP_KERNEL);
+ if (!cxlr_pmem) {
+ cxlr_pmem = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ cxlr_pmem->hpa_range.start = p->res->start;
+ cxlr_pmem->hpa_range.end = p->res->end;
+
+ /* Snapshot the region configuration underneath the cxl_region_rwsem */
+ cxlr_pmem->nr_mappings = p->nr_targets;
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+
+ m->cxlmd = cxlmd;
+ get_device(&cxlmd->dev);
+ m->start = cxled->dpa_res->start;
+ m->size = resource_size(cxled->dpa_res);
+ m->position = i;
+ }
+
+ dev = &cxlr_pmem->dev;
+ cxlr_pmem->cxlr = cxlr;
+ device_initialize(dev);
+ lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
+ device_set_pm_not_required(dev);
+ dev->parent = &cxlr->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_pmem_region_type;
+out:
+ up_read(&cxl_region_rwsem);
+
+ return cxlr_pmem;
+}
+
+static void cxlr_pmem_unregister(void *dev)
+{
+ device_unregister(dev);
+}
+
+/**
+ * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
+ * @cxlr: parent CXL region for this pmem region bridge device
+ *
+ * Return: 0 on success negative error code on failure.
+ */
+static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
+{
+ struct cxl_pmem_region *cxlr_pmem;
+ struct device *dev;
+ int rc;
+
+ cxlr_pmem = cxl_pmem_region_alloc(cxlr);
+ if (IS_ERR(cxlr_pmem))
+ return PTR_ERR(cxlr_pmem);
+
+ dev = &cxlr_pmem->dev;
+ rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
+ dev_name(dev));
+
+ return devm_add_action_or_reset(&cxlr->dev, cxlr_pmem_unregister, dev);
+
+err:
+ put_device(dev);
+ return rc;
+}
+
+static int cxl_region_probe(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc) {
+ dev_dbg(&cxlr->dev, "probe interrupted\n");
+ return rc;
+ }
+
+ if (p->state < CXL_CONFIG_COMMIT) {
+ dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
+ rc = -ENXIO;
+ }
+
+ /*
+ * From this point on any path that changes the region's state away from
+ * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
+ */
+ up_read(&cxl_region_rwsem);
+
+ switch (cxlr->mode) {
+ case CXL_DECODER_PMEM:
+ return devm_cxl_add_pmem_region(cxlr);
+ default:
+ dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
+ cxlr->mode);
+ return -ENXIO;
+ }
+}
+
+static struct cxl_driver cxl_region_driver = {
+ .name = "cxl_region",
+ .probe = cxl_region_probe,
+ .id = CXL_DEVICE_REGION,
+};
+
+int cxl_region_init(void)
+{
+ return cxl_driver_register(&cxl_region_driver);
+}
+
+void cxl_region_exit(void)
+{
+ cxl_driver_unregister(&cxl_region_driver);
+}
+
+MODULE_IMPORT_NS(CXL);
+MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 6799b27c7db2..f680450f0b16 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -7,6 +7,7 @@
#include <linux/libnvdimm.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/log2.h>
#include <linux/io.h>
/**
@@ -53,9 +54,12 @@
#define CXL_HDM_DECODER0_CTRL_LOCK BIT(8)
#define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
#define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
+#define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11)
#define CXL_HDM_DECODER0_CTRL_TYPE BIT(12)
#define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
#define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
+#define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i)
+#define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i)
static inline int cxl_hdm_decoder_count(u32 cap_hdr)
{
@@ -64,6 +68,57 @@ static inline int cxl_hdm_decoder_count(u32 cap_hdr)
return val ? val * 2 : 1;
}
+/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
+static inline int cxl_to_granularity(u16 ig, unsigned int *val)
+{
+ if (ig > 6)
+ return -EINVAL;
+ *val = 256 << ig;
+ return 0;
+}
+
+/* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */
+static inline int cxl_to_ways(u8 eniw, unsigned int *val)
+{
+ switch (eniw) {
+ case 0 ... 4:
+ *val = 1 << eniw;
+ break;
+ case 8 ... 10:
+ *val = 3 << (eniw - 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int granularity_to_cxl(int g, u16 *ig)
+{
+ if (g > SZ_16K || g < 256 || !is_power_of_2(g))
+ return -EINVAL;
+ *ig = ilog2(g) - 8;
+ return 0;
+}
+
+static inline int ways_to_cxl(unsigned int ways, u8 *iw)
+{
+ if (ways > 16)
+ return -EINVAL;
+ if (is_power_of_2(ways)) {
+ *iw = ilog2(ways);
+ return 0;
+ }
+ if (ways % 3)
+ return -EINVAL;
+ ways /= 3;
+ if (!is_power_of_2(ways))
+ return -EINVAL;
+ *iw = ilog2(ways) + 8;
+ return 0;
+}
+
/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
#define CXLDEV_CAP_ARRAY_OFFSET 0x0
#define CXLDEV_CAP_ARRAY_CAP_ID 0
@@ -193,31 +248,77 @@ enum cxl_decoder_type {
*/
#define CXL_DECODER_MAX_INTERLEAVE 16
+#define CXL_DECODER_MIN_GRANULARITY 256
+
/**
- * struct cxl_decoder - CXL address range decode configuration
+ * struct cxl_decoder - Common CXL HDM Decoder Attributes
* @dev: this decoder's device
* @id: kernel device name id
- * @platform_res: address space resources considered by root decoder
- * @decoder_range: address space resources considered by midlevel decoder
+ * @hpa_range: Host physical address range mapped by this decoder
* @interleave_ways: number of cxl_dports in this decode
* @interleave_granularity: data stride per dport
* @target_type: accelerator vs expander (type2 vs type3) selector
+ * @region: currently assigned region for this decoder
* @flags: memory type capabilities and locking
- * @target_lock: coordinate coherent reads of the target list
- * @nr_targets: number of elements in @target
- * @target: active ordered target list in current decoder configuration
- */
+ * @commit: device/decoder-type specific callback to commit settings to hw
+ * @reset: device/decoder-type specific callback to reset hw settings
+*/
struct cxl_decoder {
struct device dev;
int id;
- union {
- struct resource platform_res;
- struct range decoder_range;
- };
+ struct range hpa_range;
int interleave_ways;
int interleave_granularity;
enum cxl_decoder_type target_type;
+ struct cxl_region *region;
unsigned long flags;
+ int (*commit)(struct cxl_decoder *cxld);
+ int (*reset)(struct cxl_decoder *cxld);
+};
+
+/*
+ * CXL_DECODER_DEAD prevents endpoints from being reattached to regions
+ * while cxld_unregister() is running
+ */
+enum cxl_decoder_mode {
+ CXL_DECODER_NONE,
+ CXL_DECODER_RAM,
+ CXL_DECODER_PMEM,
+ CXL_DECODER_MIXED,
+ CXL_DECODER_DEAD,
+};
+
+/**
+ * struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder
+ * @cxld: base cxl_decoder_object
+ * @dpa_res: actively claimed DPA span of this decoder
+ * @skip: offset into @dpa_res where @cxld.hpa_range maps
+ * @mode: which memory type / access-mode-partition this decoder targets
+ * @pos: interleave position in @cxld.region
+ */
+struct cxl_endpoint_decoder {
+ struct cxl_decoder cxld;
+ struct resource *dpa_res;
+ resource_size_t skip;
+ enum cxl_decoder_mode mode;
+ int pos;
+};
+
+/**
+ * struct cxl_switch_decoder - Switch specific CXL HDM Decoder
+ * @cxld: base cxl_decoder object
+ * @target_lock: coordinate coherent reads of the target list
+ * @nr_targets: number of elements in @target
+ * @target: active ordered target list in current decoder configuration
+ *
+ * The 'switch' decoder type represents the decoder instances of cxl_port's that
+ * route from the root of a CXL memory decode topology to the endpoints. They
+ * come in two flavors, root-level decoders, statically defined by platform
+ * firmware, and mid-level decoders, where interleave-granularity,
+ * interleave-width, and the target list are mutable.
+ */
+struct cxl_switch_decoder {
+ struct cxl_decoder cxld;
seqlock_t target_lock;
int nr_targets;
struct cxl_dport *target[];
@@ -225,6 +326,76 @@ struct cxl_decoder {
/**
+ * struct cxl_root_decoder - Static platform CXL address decoder
+ * @res: host / parent resource for region allocations
+ * @region_id: region id for next region provisioning event
+ * @calc_hb: which host bridge covers the n'th position by granularity
+ * @cxlsd: base cxl switch decoder
+ */
+struct cxl_root_decoder {
+ struct resource *res;
+ atomic_t region_id;
+ struct cxl_dport *(*calc_hb)(struct cxl_root_decoder *cxlrd, int pos);
+ struct cxl_switch_decoder cxlsd;
+};
+
+/*
+ * enum cxl_config_state - State machine for region configuration
+ * @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely
+ * @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more
+ * changes to interleave_ways or interleave_granularity
+ * @CXL_CONFIG_ACTIVE: All targets have been added the region is now
+ * active
+ * @CXL_CONFIG_RESET_PENDING: see commit_store()
+ * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware
+ */
+enum cxl_config_state {
+ CXL_CONFIG_IDLE,
+ CXL_CONFIG_INTERLEAVE_ACTIVE,
+ CXL_CONFIG_ACTIVE,
+ CXL_CONFIG_RESET_PENDING,
+ CXL_CONFIG_COMMIT,
+};
+
+/**
+ * struct cxl_region_params - region settings
+ * @state: allow the driver to lockdown further parameter changes
+ * @uuid: unique id for persistent regions
+ * @interleave_ways: number of endpoints in the region
+ * @interleave_granularity: capacity each endpoint contributes to a stripe
+ * @res: allocated iomem capacity for this region
+ * @targets: active ordered targets in current decoder configuration
+ * @nr_targets: number of targets
+ *
+ * State transitions are protected by the cxl_region_rwsem
+ */
+struct cxl_region_params {
+ enum cxl_config_state state;
+ uuid_t uuid;
+ int interleave_ways;
+ int interleave_granularity;
+ struct resource *res;
+ struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE];
+ int nr_targets;
+};
+
+/**
+ * struct cxl_region - CXL region
+ * @dev: This region's device
+ * @id: This region's id. Id is globally unique across all regions
+ * @mode: Endpoint decoder allocation / access mode
+ * @type: Endpoint decoder target type
+ * @params: active + config params for the region
+ */
+struct cxl_region {
+ struct device dev;
+ int id;
+ enum cxl_decoder_mode mode;
+ enum cxl_decoder_type type;
+ struct cxl_region_params params;
+};
+
+/**
* enum cxl_nvdimm_brige_state - state machine for managing bus rescans
* @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
* @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
@@ -251,7 +422,26 @@ struct cxl_nvdimm_bridge {
struct cxl_nvdimm {
struct device dev;
struct cxl_memdev *cxlmd;
- struct nvdimm *nvdimm;
+ struct cxl_nvdimm_bridge *bridge;
+ struct cxl_pmem_region *region;
+};
+
+struct cxl_pmem_region_mapping {
+ struct cxl_memdev *cxlmd;
+ struct cxl_nvdimm *cxl_nvd;
+ u64 start;
+ u64 size;
+ int position;
+};
+
+struct cxl_pmem_region {
+ struct device dev;
+ struct cxl_region *cxlr;
+ struct nd_region *nd_region;
+ struct cxl_nvdimm_bridge *bridge;
+ struct range hpa_range;
+ int nr_mappings;
+ struct cxl_pmem_region_mapping mapping[];
};
/**
@@ -260,50 +450,94 @@ struct cxl_nvdimm {
* decode hierarchy.
* @dev: this port's device
* @uport: PCI or platform device implementing the upstream port capability
+ * @host_bridge: Shortcut to the platform attach point for this port
* @id: id for port device-name
* @dports: cxl_dport instances referenced by decoders
* @endpoints: cxl_ep instances, endpoints that are a descendant of this port
+ * @regions: cxl_region_ref instances, regions mapped by this port
+ * @parent_dport: dport that points to this port in the parent
* @decoder_ida: allocator for decoder ids
+ * @hdm_end: track last allocated HDM decoder instance for allocation ordering
+ * @commit_end: cursor to track highest committed decoder for commit ordering
* @component_reg_phys: component register capability base address (optional)
* @dead: last ep has been removed, force port re-creation
* @depth: How deep this port is relative to the root. depth 0 is the root.
+ * @cdat: Cached CDAT data
+ * @cdat_available: Should a CDAT attribute be available in sysfs
*/
struct cxl_port {
struct device dev;
struct device *uport;
+ struct device *host_bridge;
int id;
- struct list_head dports;
- struct list_head endpoints;
+ struct xarray dports;
+ struct xarray endpoints;
+ struct xarray regions;
+ struct cxl_dport *parent_dport;
struct ida decoder_ida;
+ int hdm_end;
+ int commit_end;
resource_size_t component_reg_phys;
bool dead;
unsigned int depth;
+ struct cxl_cdat {
+ void *table;
+ size_t length;
+ } cdat;
+ bool cdat_available;
};
+static inline struct cxl_dport *
+cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev)
+{
+ return xa_load(&port->dports, (unsigned long)dport_dev);
+}
+
/**
* struct cxl_dport - CXL downstream port
* @dport: PCI bridge or firmware device representing the downstream link
* @port_id: unique hardware identifier for dport in decoder target list
* @component_reg_phys: downstream port component registers
* @port: reference to cxl_port that contains this downstream port
- * @list: node for a cxl_port's list of cxl_dport instances
*/
struct cxl_dport {
struct device *dport;
int port_id;
resource_size_t component_reg_phys;
struct cxl_port *port;
- struct list_head list;
};
/**
* struct cxl_ep - track an endpoint's interest in a port
* @ep: device that hosts a generic CXL endpoint (expander or accelerator)
- * @list: node on port->endpoints list
+ * @dport: which dport routes to this endpoint on @port
+ * @next: cxl switch port across the link attached to @dport NULL if
+ * attached to an endpoint
*/
struct cxl_ep {
struct device *ep;
- struct list_head list;
+ struct cxl_dport *dport;
+ struct cxl_port *next;
+};
+
+/**
+ * struct cxl_region_ref - track a region's interest in a port
+ * @port: point in topology to install this reference
+ * @decoder: decoder assigned for @region in @port
+ * @region: region for this reference
+ * @endpoints: cxl_ep references for region members beneath @port
+ * @nr_targets_set: track how many targets have been programmed during setup
+ * @nr_eps: number of endpoints beneath @port
+ * @nr_targets: number of distinct targets needed to reach @nr_eps
+ */
+struct cxl_region_ref {
+ struct cxl_port *port;
+ struct cxl_decoder *decoder;
+ struct cxl_region *region;
+ struct xarray endpoints;
+ int nr_targets_set;
+ int nr_eps;
+ int nr_targets;
};
/*
@@ -325,29 +559,31 @@ int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
- struct cxl_port *parent_port);
+ struct cxl_dport *parent_dport);
+int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
+ struct cxl_dport *parent_dport);
struct cxl_port *find_cxl_root(struct device *dev);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
int cxl_bus_rescan(void);
-struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd);
+struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
+ struct cxl_dport **dport);
bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd);
struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
struct device *dport, int port_id,
resource_size_t component_reg_phys);
-struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
- const struct device *dev);
struct cxl_decoder *to_cxl_decoder(struct device *dev);
+struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
+struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
bool is_root_decoder(struct device *dev);
bool is_endpoint_decoder(struct device *dev);
-bool is_cxl_decoder(struct device *dev);
-struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets);
-struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
- unsigned int nr_targets);
+struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets);
+struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
+ unsigned int nr_targets);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
-struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
+struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
@@ -357,6 +593,8 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port);
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
+bool is_cxl_region(struct device *dev);
+
extern struct bus_type cxl_bus_type;
struct cxl_driver {
@@ -385,6 +623,8 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv);
#define CXL_DEVICE_PORT 3
#define CXL_DEVICE_ROOT 4
#define CXL_DEVICE_MEMORY_EXPANDER 5
+#define CXL_DEVICE_REGION 6
+#define CXL_DEVICE_PMEM_REGION 7
#define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
#define CXL_MODALIAS_FMT "cxl:t%d"
@@ -396,7 +636,21 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev);
+
+#ifdef CONFIG_CXL_REGION
+bool is_cxl_pmem_region(struct device *dev);
+struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
+#else
+static inline bool is_cxl_pmem_region(struct device *dev)
+{
+ return false;
+}
+static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
+{
+ return NULL;
+}
+#endif
/*
* Unit test builds overrides this to __weak, find the 'strong' version
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 7df0b053373a..88e3a8e54b6a 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -50,6 +50,24 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
return container_of(dev, struct cxl_memdev, dev);
}
+static inline struct cxl_port *cxled_to_port(struct cxl_endpoint_decoder *cxled)
+{
+ return to_cxl_port(cxled->cxld.dev.parent);
+}
+
+static inline struct cxl_port *cxlrd_to_port(struct cxl_root_decoder *cxlrd)
+{
+ return to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
+}
+
+static inline struct cxl_memdev *
+cxled_to_memdev(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_port *port = to_cxl_port(cxled->cxld.dev.parent);
+
+ return to_cxl_memdev(port->uport);
+}
+
bool is_cxl_memdev(struct device *dev);
static inline bool is_cxl_endpoint(struct cxl_port *port)
{
@@ -178,8 +196,9 @@ struct cxl_endpoint_dvsec_info {
* @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL.
* @exclusive_cmds: Commands that are kernel-internal only
- * @pmem_range: Active Persistent memory capacity configuration
- * @ram_range: Active Volatile memory capacity configuration
+ * @dpa_res: Overall DPA resource tree for the device
+ * @pmem_res: Active Persistent memory capacity configuration
+ * @ram_res: Active Volatile memory capacity configuration
* @total_bytes: sum of all possible capacities
* @volatile_only_bytes: hard volatile capacity
* @persistent_only_bytes: hard persistent capacity
@@ -191,6 +210,7 @@ struct cxl_endpoint_dvsec_info {
* @component_reg_phys: register base of component registers
* @info: Cached DVSEC information about the device.
* @serial: PCIe Device Serial Number
+ * @doe_mbs: PCI DOE mailbox array
* @mbox_send: @dev specific transport for transmitting mailbox commands
*
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
@@ -209,8 +229,9 @@ struct cxl_dev_state {
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
- struct range pmem_range;
- struct range ram_range;
+ struct resource dpa_res;
+ struct resource pmem_res;
+ struct resource ram_res;
u64 total_bytes;
u64 volatile_only_bytes;
u64 persistent_only_bytes;
@@ -224,6 +245,8 @@ struct cxl_dev_state {
resource_size_t component_reg_phys;
u64 serial;
+ struct xarray doe_mbs;
+
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
};
@@ -299,6 +322,13 @@ struct cxl_mbox_identify {
u8 qos_telemetry_caps;
} __packed;
+struct cxl_mbox_get_partition_info {
+ __le64 active_volatile_cap;
+ __le64 active_persistent_cap;
+ __le64 next_volatile_cap;
+ __le64 next_persistent_cap;
+} __packed;
+
struct cxl_mbox_get_lsa {
__le32 offset;
__le32 length;
@@ -370,4 +400,8 @@ struct cxl_hdm {
unsigned int interleave_mask;
struct cxl_port *port;
};
+
+struct seq_file;
+struct dentry *cxl_debugfs_create_dir(const char *dir);
+void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds);
#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index fce1c11729c2..eec597dbe763 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -74,4 +74,5 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
+void read_cdat_data(struct cxl_port *port);
#endif /* __CXL_PCI_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index a979d0b484d5..64ccf053d32c 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -24,42 +25,32 @@
* in higher level operations.
*/
-static int create_endpoint(struct cxl_memdev *cxlmd,
- struct cxl_port *parent_port)
+static void enable_suspend(void *data)
{
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_port *endpoint;
- int rc;
+ cxl_mem_active_dec();
+}
- endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
- cxlds->component_reg_phys, parent_port);
- if (IS_ERR(endpoint))
- return PTR_ERR(endpoint);
+static void remove_debugfs(void *dentry)
+{
+ debugfs_remove_recursive(dentry);
+}
- dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
+static int cxl_mem_dpa_show(struct seq_file *file, void *data)
+{
+ struct device *dev = file->private;
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- rc = cxl_endpoint_autoremove(cxlmd, endpoint);
- if (rc)
- return rc;
-
- if (!endpoint->dev.driver) {
- dev_err(&cxlmd->dev, "%s failed probe\n",
- dev_name(&endpoint->dev));
- return -ENXIO;
- }
+ cxl_dpa_debug(file, cxlmd->cxlds);
return 0;
}
-static void enable_suspend(void *data)
-{
- cxl_mem_active_dec();
-}
-
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_port *parent_port;
+ struct cxl_dport *dport;
+ struct dentry *dentry;
int rc;
/*
@@ -73,11 +64,17 @@ static int cxl_mem_probe(struct device *dev)
if (work_pending(&cxlmd->detach_work))
return -EBUSY;
+ dentry = cxl_debugfs_create_dir(dev_name(dev));
+ debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
+ rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
+ if (rc)
+ return rc;
+
rc = devm_cxl_enumerate_ports(cxlmd);
if (rc)
return rc;
- parent_port = cxl_mem_find_port(cxlmd);
+ parent_port = cxl_mem_find_port(cxlmd, &dport);
if (!parent_port) {
dev_err(dev, "CXL port topology not found\n");
return -ENXIO;
@@ -91,7 +88,7 @@ static int cxl_mem_probe(struct device *dev)
goto unlock;
}
- rc = create_endpoint(cxlmd, parent_port);
+ rc = devm_cxl_add_endpoint(cxlmd, dport);
unlock:
device_unlock(&parent_port->dev);
put_device(&parent_port->dev);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 5a0ae46d4989..faeb5d9d7a7a 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/pci.h>
+#include <linux/pci-doe.h>
#include <linux/io.h>
#include "cxlmem.h"
#include "cxlpci.h"
@@ -386,6 +387,47 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
return rc;
}
+static void cxl_pci_destroy_doe(void *mbs)
+{
+ xa_destroy(mbs);
+}
+
+static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds)
+{
+ struct device *dev = cxlds->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 off = 0;
+
+ xa_init(&cxlds->doe_mbs);
+ if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) {
+ dev_err(dev, "Failed to create XArray for DOE's\n");
+ return;
+ }
+
+ /*
+ * Mailbox creation is best effort. Higher layers must determine if
+ * the lack of a mailbox for their protocol is a device failure or not.
+ */
+ pci_doe_for_each_off(pdev, off) {
+ struct pci_doe_mb *doe_mb;
+
+ doe_mb = pcim_doe_create_mb(pdev, off);
+ if (IS_ERR(doe_mb)) {
+ dev_err(dev, "Failed to create MB object for MB @ %x\n",
+ off);
+ continue;
+ }
+
+ if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) {
+ dev_err(dev, "xa_insert failed to insert MB @ %x\n",
+ off);
+ continue;
+ }
+
+ dev_dbg(dev, "Created DOE mailbox @%x\n", off);
+ }
+}
+
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_register_map map;
@@ -434,6 +476,8 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map);
+ devm_cxl_pci_create_doe(cxlds);
+
rc = cxl_pci_setup_mailbox(cxlds);
if (rc)
return rc;
@@ -454,7 +498,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+ if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
return rc;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 0aaa70b4e0f7..7dc0a2fa1a6b 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -7,6 +7,7 @@
#include <linux/ndctl.h>
#include <linux/async.h>
#include <linux/slab.h>
+#include <linux/nd.h>
#include "cxlmem.h"
#include "cxl.h"
@@ -26,7 +27,23 @@ static void clear_exclusive(void *cxlds)
static void unregister_nvdimm(void *nvdimm)
{
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+ struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge;
+ struct cxl_pmem_region *cxlr_pmem;
+
+ device_lock(&cxl_nvb->dev);
+ cxlr_pmem = cxl_nvd->region;
+ dev_set_drvdata(&cxl_nvd->dev, NULL);
+ cxl_nvd->region = NULL;
+ device_unlock(&cxl_nvb->dev);
+
+ if (cxlr_pmem) {
+ device_release_driver(&cxlr_pmem->dev);
+ put_device(&cxlr_pmem->dev);
+ }
+
nvdimm_delete(nvdimm);
+ cxl_nvd->bridge = NULL;
}
static int cxl_nvdimm_probe(struct device *dev)
@@ -39,7 +56,7 @@ static int cxl_nvdimm_probe(struct device *dev)
struct nvdimm *nvdimm;
int rc;
- cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd);
+ cxl_nvb = cxl_find_nvdimm_bridge(dev);
if (!cxl_nvb)
return -ENXIO;
@@ -66,6 +83,7 @@ static int cxl_nvdimm_probe(struct device *dev)
}
dev_set_drvdata(dev, nvdimm);
+ cxl_nvd->bridge = cxl_nvb;
rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
out:
device_unlock(&cxl_nvb->dev);
@@ -204,15 +222,38 @@ static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
return cxl_nvb->nvdimm_bus != NULL;
}
-static int cxl_nvdimm_release_driver(struct device *dev, void *data)
+static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb)
{
+ struct cxl_nvdimm *cxl_nvd;
+
if (!is_cxl_nvdimm(dev))
return 0;
+
+ cxl_nvd = to_cxl_nvdimm(dev);
+ if (cxl_nvd->bridge != cxl_nvb)
+ return 0;
+
device_release_driver(dev);
return 0;
}
-static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
+static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb)
+{
+ struct cxl_pmem_region *cxlr_pmem;
+
+ if (!is_cxl_pmem_region(dev))
+ return 0;
+
+ cxlr_pmem = to_cxl_pmem_region(dev);
+ if (cxlr_pmem->bridge != cxl_nvb)
+ return 0;
+
+ device_release_driver(dev);
+ return 0;
+}
+
+static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb,
+ struct nvdimm_bus *nvdimm_bus)
{
if (!nvdimm_bus)
return;
@@ -222,7 +263,10 @@ static void offline_nvdimm_bus(struct nvdimm_bus *nvdimm_bus)
* nvdimm_bus_unregister() rips the nvdimm objects out from
* underneath them.
*/
- bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_release_driver);
+ bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
+ cxl_pmem_region_release_driver);
+ bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
+ cxl_nvdimm_release_driver);
nvdimm_bus_unregister(nvdimm_bus);
}
@@ -260,7 +304,7 @@ static void cxl_nvb_update_state(struct work_struct *work)
dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
}
- offline_nvdimm_bus(victim_bus);
+ offline_nvdimm_bus(cxl_nvb, victim_bus);
put_device(&cxl_nvb->dev);
}
@@ -315,6 +359,203 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = {
.id = CXL_DEVICE_NVDIMM_BRIDGE,
};
+static int match_cxl_nvdimm(struct device *dev, void *data)
+{
+ return is_cxl_nvdimm(dev);
+}
+
+static void unregister_nvdimm_region(void *nd_region)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct cxl_pmem_region *cxlr_pmem;
+ int i;
+
+ cxlr_pmem = nd_region_provider_data(nd_region);
+ cxl_nvb = cxlr_pmem->bridge;
+ device_lock(&cxl_nvb->dev);
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+ struct cxl_nvdimm *cxl_nvd = m->cxl_nvd;
+
+ if (cxl_nvd->region) {
+ put_device(&cxlr_pmem->dev);
+ cxl_nvd->region = NULL;
+ }
+ }
+ device_unlock(&cxl_nvb->dev);
+
+ nvdimm_region_delete(nd_region);
+}
+
+static void cxlr_pmem_remove_resource(void *res)
+{
+ remove_resource(res);
+}
+
+struct cxl_pmem_region_info {
+ u64 offset;
+ u64 serial;
+};
+
+static int cxl_pmem_region_probe(struct device *dev)
+{
+ struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE];
+ struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
+ struct cxl_region *cxlr = cxlr_pmem->cxlr;
+ struct cxl_pmem_region_info *info = NULL;
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct nd_interleave_set *nd_set;
+ struct nd_region_desc ndr_desc;
+ struct cxl_nvdimm *cxl_nvd;
+ struct nvdimm *nvdimm;
+ struct resource *res;
+ int rc, i = 0;
+
+ cxl_nvb = cxl_find_nvdimm_bridge(&cxlr_pmem->mapping[0].cxlmd->dev);
+ if (!cxl_nvb) {
+ dev_dbg(dev, "bridge not found\n");
+ return -ENXIO;
+ }
+ cxlr_pmem->bridge = cxl_nvb;
+
+ device_lock(&cxl_nvb->dev);
+ if (!cxl_nvb->nvdimm_bus) {
+ dev_dbg(dev, "nvdimm bus not found\n");
+ rc = -ENXIO;
+ goto err;
+ }
+
+ memset(&mappings, 0, sizeof(mappings));
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ res->name = "Persistent Memory";
+ res->start = cxlr_pmem->hpa_range.start;
+ res->end = cxlr_pmem->hpa_range.end;
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_PERSISTENT_MEMORY;
+
+ rc = insert_resource(&iomem_resource, res);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res);
+ if (rc)
+ goto err;
+
+ ndr_desc.res = res;
+ ndr_desc.provider_data = cxlr_pmem;
+
+ ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start);
+ ndr_desc.target_node = phys_to_target_node(res->start);
+ if (ndr_desc.target_node == NUMA_NO_NODE) {
+ ndr_desc.target_node = ndr_desc.numa_node;
+ dev_dbg(&cxlr->dev, "changing target node from %d to %d",
+ NUMA_NO_NODE, ndr_desc.target_node);
+ }
+
+ nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
+ if (!nd_set) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ ndr_desc.memregion = cxlr->id;
+ set_bit(ND_REGION_CXL, &ndr_desc.flags);
+ set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
+
+ info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+ struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+ struct cxl_memdev *cxlmd = m->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct device *d;
+
+ d = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm);
+ if (!d) {
+ dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i,
+ dev_name(&cxlmd->dev));
+ rc = -ENODEV;
+ goto err;
+ }
+
+ /* safe to drop ref now with bridge lock held */
+ put_device(d);
+
+ cxl_nvd = to_cxl_nvdimm(d);
+ nvdimm = dev_get_drvdata(&cxl_nvd->dev);
+ if (!nvdimm) {
+ dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i,
+ dev_name(&cxlmd->dev));
+ rc = -ENODEV;
+ goto err;
+ }
+ cxl_nvd->region = cxlr_pmem;
+ get_device(&cxlr_pmem->dev);
+ m->cxl_nvd = cxl_nvd;
+ mappings[i] = (struct nd_mapping_desc) {
+ .nvdimm = nvdimm,
+ .start = m->start,
+ .size = m->size,
+ .position = i,
+ };
+ info[i].offset = m->start;
+ info[i].serial = cxlds->serial;
+ }
+ ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
+ ndr_desc.mapping = mappings;
+
+ /*
+ * TODO enable CXL labels which skip the need for 'interleave-set cookie'
+ */
+ nd_set->cookie1 =
+ nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0);
+ nd_set->cookie2 = nd_set->cookie1;
+ ndr_desc.nd_set = nd_set;
+
+ cxlr_pmem->nd_region =
+ nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc);
+ if (!cxlr_pmem->nd_region) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = devm_add_action_or_reset(dev, unregister_nvdimm_region,
+ cxlr_pmem->nd_region);
+out:
+ kfree(info);
+ device_unlock(&cxl_nvb->dev);
+ put_device(&cxl_nvb->dev);
+
+ return rc;
+
+err:
+ dev_dbg(dev, "failed to create nvdimm region\n");
+ for (i--; i >= 0; i--) {
+ nvdimm = mappings[i].nvdimm;
+ cxl_nvd = nvdimm_provider_data(nvdimm);
+ put_device(&cxl_nvd->region->dev);
+ cxl_nvd->region = NULL;
+ }
+ goto out;
+}
+
+static struct cxl_driver cxl_pmem_region_driver = {
+ .name = "cxl_pmem_region",
+ .probe = cxl_pmem_region_probe,
+ .id = CXL_DEVICE_PMEM_REGION,
+};
+
/*
* Return all bridges to the CXL_NVB_NEW state to invalidate any
* ->state_work referring to the now destroyed cxl_pmem_wq.
@@ -359,8 +600,14 @@ static __init int cxl_pmem_init(void)
if (rc)
goto err_nvdimm;
+ rc = cxl_driver_register(&cxl_pmem_region_driver);
+ if (rc)
+ goto err_region;
+
return 0;
+err_region:
+ cxl_driver_unregister(&cxl_nvdimm_driver);
err_nvdimm:
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
err_bridge:
@@ -370,6 +617,7 @@ err_bridge:
static __exit void cxl_pmem_exit(void)
{
+ cxl_driver_unregister(&cxl_pmem_region_driver);
cxl_driver_unregister(&cxl_nvdimm_driver);
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
destroy_cxl_pmem_wq();
@@ -381,3 +629,4 @@ module_exit(cxl_pmem_exit);
MODULE_IMPORT_NS(CXL);
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
+MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 3cf308f114c4..5453771bf330 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -53,6 +53,9 @@ static int cxl_port_probe(struct device *dev)
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ /* Cache the data early to ensure is_visible() works */
+ read_cdat_data(port);
+
get_device(&cxlmd->dev);
rc = devm_add_action_or_reset(dev, schedule_detach, cxlmd);
if (rc)
@@ -78,10 +81,60 @@ static int cxl_port_probe(struct device *dev)
return 0;
}
+static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_port *port = to_cxl_port(dev);
+
+ if (!port->cdat_available)
+ return -ENXIO;
+
+ if (!port->cdat.table)
+ return 0;
+
+ return memory_read_from_buffer(buf, count, &offset,
+ port->cdat.table,
+ port->cdat.length);
+}
+
+static BIN_ATTR_ADMIN_RO(CDAT, 0);
+
+static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_port *port = to_cxl_port(dev);
+
+ if ((attr == &bin_attr_CDAT) && port->cdat_available)
+ return attr->attr.mode;
+
+ return 0;
+}
+
+static struct bin_attribute *cxl_cdat_bin_attributes[] = {
+ &bin_attr_CDAT,
+ NULL,
+};
+
+static struct attribute_group cxl_cdat_attribute_group = {
+ .bin_attrs = cxl_cdat_bin_attributes,
+ .is_bin_visible = cxl_port_bin_attr_is_visible,
+};
+
+static const struct attribute_group *cxl_port_attribute_groups[] = {
+ &cxl_cdat_attribute_group,
+ NULL,
+};
+
static struct cxl_driver cxl_port_driver = {
.name = "cxl_port",
.probe = cxl_port_probe,
.id = CXL_DEVICE_PORT,
+ .drv = {
+ .dev_groups = cxl_port_attribute_groups,
+ },
};
module_cxl_driver(cxl_port_driver);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 50a08b2ec247..9b5e2a5eb0ae 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -22,6 +22,8 @@
* @private: dax driver private data
* @flags: state and boolean properties
* @ops: operations for this device
+ * @holder_data: holder of a dax_device: could be filesystem or mapped device
+ * @holder_ops: operations for the inner holder
*/
struct dax_device {
struct inode inode;
@@ -29,6 +31,8 @@ struct dax_device {
void *private;
unsigned long flags;
const struct dax_operations *ops;
+ void *holder_data;
+ const struct dax_holder_operations *holder_ops;
};
static dev_t dax_devt;
@@ -71,8 +75,11 @@ EXPORT_SYMBOL_GPL(dax_remove_host);
* fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
* @bdev: block device to find a dax_device for
* @start_off: returns the byte offset into the dax_device that @bdev starts
+ * @holder: filesystem or mapped device inside the dax_device
+ * @ops: operations for the inner holder
*/
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
+ void *holder, const struct dax_holder_operations *ops)
{
struct dax_device *dax_dev;
u64 part_size;
@@ -92,11 +99,26 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk);
if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode))
dax_dev = NULL;
+ else if (holder) {
+ if (!cmpxchg(&dax_dev->holder_data, NULL, holder))
+ dax_dev->holder_ops = ops;
+ else
+ dax_dev = NULL;
+ }
dax_read_unlock(id);
return dax_dev;
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
+
+void fs_put_dax(struct dax_device *dax_dev, void *holder)
+{
+ if (dax_dev && holder &&
+ cmpxchg(&dax_dev->holder_data, holder, NULL) == holder)
+ dax_dev->holder_ops = NULL;
+ put_dax(dax_dev);
+}
+EXPORT_SYMBOL_GPL(fs_put_dax);
#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
enum dax_device_flags {
@@ -204,6 +226,29 @@ size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
}
EXPORT_SYMBOL_GPL(dax_recovery_write);
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off,
+ u64 len, int mf_flags)
+{
+ int rc, id;
+
+ id = dax_read_lock();
+ if (!dax_alive(dax_dev)) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (!dax_dev->holder_ops) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags);
+out:
+ dax_read_unlock(id);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dax_holder_notify_failure);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
@@ -277,8 +322,15 @@ void kill_dax(struct dax_device *dax_dev)
if (!dax_dev)
return;
+ if (dax_dev->holder_data != NULL)
+ dax_holder_notify_failure(dax_dev, 0, U64_MAX, 0);
+
clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
synchronize_srcu(&dax_srcu);
+
+ /* clear holder data */
+ dax_dev->holder_ops = NULL;
+ dax_dev->holder_data = NULL;
}
EXPORT_SYMBOL_GPL(kill_dax);
@@ -421,6 +473,19 @@ void put_dax(struct dax_device *dax_dev)
EXPORT_SYMBOL_GPL(put_dax);
/**
+ * dax_holder() - obtain the holder of a dax device
+ * @dax_dev: a dax_device instance
+
+ * Return: the holder's data which represents the holder if registered,
+ * otherwize NULL.
+ */
+void *dax_holder(struct dax_device *dax_dev)
+{
+ return dax_dev->holder_data;
+}
+EXPORT_SYMBOL_GPL(dax_holder);
+
+/**
* inode_dax: convert a public inode into its dax_dev
* @inode: An inode with i_cdev pointing to a dax_dev
*
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index f7dcc44f9414..027e8f336acc 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -33,7 +33,7 @@ struct exynos_bus {
unsigned long curr_freq;
- struct opp_table *opp_table;
+ int opp_token;
struct clk *clk;
unsigned int ratio;
};
@@ -161,8 +161,7 @@ static void exynos_bus_exit(struct device *dev)
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
}
static void exynos_bus_passive_exit(struct device *dev)
@@ -179,18 +178,16 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
struct exynos_bus *bus)
{
struct device *dev = bus->dev;
- struct opp_table *opp_table;
- const char *vdd = "vdd";
+ const char *supplies[] = { "vdd", NULL };
int i, ret, count, size;
- opp_table = dev_pm_opp_set_regulators(dev, &vdd, 1);
- if (IS_ERR(opp_table)) {
- ret = PTR_ERR(opp_table);
+ ret = dev_pm_opp_set_regulators(dev, supplies);
+ if (ret < 0) {
dev_err(dev, "failed to set regulators %d\n", ret);
return ret;
}
- bus->opp_table = opp_table;
+ bus->opp_token = ret;
/*
* Get the devfreq-event devices to get the current utilization of
@@ -236,8 +233,7 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
return 0;
err_regulator:
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
return ret;
}
@@ -459,8 +455,7 @@ err:
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
err_reg:
- dev_pm_opp_put_regulators(bus->opp_table);
- bus->opp_table = NULL;
+ dev_pm_opp_put_regulators(bus->opp_token);
return ret;
}
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 585a95fe2bd6..503376b894b6 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -821,6 +821,15 @@ static int devm_tegra_devfreq_init_hw(struct device *dev,
return err;
}
+static int tegra_devfreq_config_clks_nop(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
+{
+ /* We want to skip clk configuration via dev_pm_opp_set_opp() */
+ return 0;
+}
+
static int tegra_devfreq_probe(struct platform_device *pdev)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
@@ -830,6 +839,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
unsigned int i;
long rate;
int err;
+ const char *clk_names[] = { "actmon", NULL };
+ struct dev_pm_opp_config config = {
+ .supported_hw = &hw_version,
+ .supported_hw_count = 1,
+ .clk_names = clk_names,
+ .config_clks = tegra_devfreq_config_clks_nop,
+ };
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
@@ -874,13 +890,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- err = devm_pm_opp_set_supported_hw(&pdev->dev, &hw_version, 1);
+ err = devm_pm_opp_set_config(&pdev->dev, &config);
if (err) {
- dev_err(&pdev->dev, "Failed to set supported HW: %d\n", err);
+ dev_err(&pdev->dev, "Failed to set OPP config: %d\n", err);
return err;
}
- err = devm_pm_opp_of_add_table_noclk(&pdev->dev, 0);
+ err = devm_pm_opp_of_add_table_indexed(&pdev->dev, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
return err;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 487ed4ddc3be..a06d2a7627aa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -85,6 +85,14 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
+config APPLE_ADMAC
+ tristate "Apple ADMAC support"
+ depends on ARCH_APPLE || COMPILE_TEST
+ select DMA_ENGINE
+ default ARCH_APPLE
+ help
+ Enable support for Audio DMA Controller found on Apple Silicon SoCs.
+
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2f1b87ffd7ab..10f7d4241001 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AMD_PTDMA) += ptdma/
+obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 6f56dfd375e3..4153c2edb049 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -749,7 +749,7 @@ static irqreturn_t msgdma_irq_handler(int irq, void *data)
}
/**
- * msgdma_chan_remove - Channel remove function
+ * msgdma_dev_remove() - Device remove function
* @mdev: Pointer to the Altera mSGDMA device structure
*/
static void msgdma_dev_remove(struct msgdma_device *mdev)
@@ -918,7 +918,7 @@ fail:
}
/**
- * msgdma_dma_remove - Driver remove function
+ * msgdma_remove() - Driver remove function
* @pdev: Pointer to the platform_device structure
*
* Return: Always '0'
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index a4a794e62ac2..487a01aa207d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -231,7 +231,7 @@ enum pl08x_dma_chan_state {
/**
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @phychan: the physical channel utilized by this channel, if there is one
* @name: name of channel
* @cd: channel platform data
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
new file mode 100644
index 000000000000..d1f74a3aa999
--- /dev/null
+++ b/drivers/dma/apple-admac.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Audio DMA Controller (ADMAC) on t8103 (M1) and other Apple chips
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include "dmaengine.h"
+
+#define NCHANNELS_MAX 64
+#define IRQ_NOUTPUTS 4
+
+#define RING_WRITE_SLOT GENMASK(1, 0)
+#define RING_READ_SLOT GENMASK(5, 4)
+#define RING_FULL BIT(9)
+#define RING_EMPTY BIT(8)
+#define RING_ERR BIT(10)
+
+#define STATUS_DESC_DONE BIT(0)
+#define STATUS_ERR BIT(6)
+
+#define FLAG_DESC_NOTIFY BIT(16)
+
+#define REG_TX_START 0x0000
+#define REG_TX_STOP 0x0004
+#define REG_RX_START 0x0008
+#define REG_RX_STOP 0x000c
+
+#define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200)
+#define REG_CHAN_CTL_RST_RINGS BIT(0)
+
+#define REG_DESC_RING(ch) (0x8070 + (ch) * 0x200)
+#define REG_REPORT_RING(ch) (0x8074 + (ch) * 0x200)
+
+#define REG_RESIDUE(ch) (0x8064 + (ch) * 0x200)
+
+#define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
+
+#define BUS_WIDTH_8BIT 0x00
+#define BUS_WIDTH_16BIT 0x01
+#define BUS_WIDTH_32BIT 0x02
+#define BUS_WIDTH_FRAME_2_WORDS 0x10
+#define BUS_WIDTH_FRAME_4_WORDS 0x20
+
+#define CHAN_BUFSIZE 0x8000
+
+#define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200)
+#define CHAN_FIFOCTL_LIMIT GENMASK(31, 16)
+#define CHAN_FIFOCTL_THRESHOLD GENMASK(15, 0)
+
+#define REG_DESC_WRITE(ch) (0x10000 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
+#define REG_REPORT_READ(ch) (0x10100 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
+
+#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
+#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
+#define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
+#define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
+
+struct admac_data;
+struct admac_tx;
+
+struct admac_chan {
+ unsigned int no;
+ struct admac_data *host;
+ struct dma_chan chan;
+ struct tasklet_struct tasklet;
+
+ spinlock_t lock;
+ struct admac_tx *current_tx;
+ int nperiod_acks;
+
+ /*
+ * We maintain a 'submitted' and 'issued' list mainly for interface
+ * correctness. Typical use of the driver (per channel) will be
+ * prepping, submitting and issuing a single cyclic transaction which
+ * will stay current until terminate_all is called.
+ */
+ struct list_head submitted;
+ struct list_head issued;
+
+ struct list_head to_free;
+};
+
+struct admac_data {
+ struct dma_device dma;
+ struct device *dev;
+ __iomem void *base;
+
+ int irq_index;
+ int nchannels;
+ struct admac_chan channels[];
+};
+
+struct admac_tx {
+ struct dma_async_tx_descriptor tx;
+ bool cyclic;
+ dma_addr_t buf_addr;
+ dma_addr_t buf_end;
+ size_t buf_len;
+ size_t period_len;
+
+ size_t submitted_pos;
+ size_t reclaimed_pos;
+
+ struct list_head node;
+};
+
+static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val)
+{
+ void __iomem *addr = ad->base + reg;
+ u32 curr = readl_relaxed(addr);
+
+ writel_relaxed((curr & ~mask) | (val & mask), addr);
+}
+
+static struct admac_chan *to_admac_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct admac_chan, chan);
+}
+
+static struct admac_tx *to_admac_tx(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct admac_tx, tx);
+}
+
+static enum dma_transfer_direction admac_chan_direction(int channo)
+{
+ /* Channel directions are hardwired */
+ return (channo & 1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+}
+
+static dma_cookie_t admac_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct admac_tx *adtx = to_admac_tx(tx);
+ struct admac_chan *adchan = to_admac_chan(tx->chan);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&adtx->node, &adchan->submitted);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ return cookie;
+}
+
+static int admac_desc_free(struct dma_async_tx_descriptor *tx)
+{
+ kfree(to_admac_tx(tx));
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *admac_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct admac_chan *adchan = container_of(chan, struct admac_chan, chan);
+ struct admac_tx *adtx;
+
+ if (direction != admac_chan_direction(adchan->no))
+ return NULL;
+
+ adtx = kzalloc(sizeof(*adtx), GFP_NOWAIT);
+ if (!adtx)
+ return NULL;
+
+ adtx->cyclic = true;
+
+ adtx->buf_addr = buf_addr;
+ adtx->buf_len = buf_len;
+ adtx->buf_end = buf_addr + buf_len;
+ adtx->period_len = period_len;
+
+ adtx->submitted_pos = 0;
+ adtx->reclaimed_pos = 0;
+
+ dma_async_tx_descriptor_init(&adtx->tx, chan);
+ adtx->tx.tx_submit = admac_tx_submit;
+ adtx->tx.desc_free = admac_desc_free;
+
+ return &adtx->tx;
+}
+
+/*
+ * Write one hardware descriptor for a dmaengine cyclic transaction.
+ */
+static void admac_cyclic_write_one_desc(struct admac_data *ad, int channo,
+ struct admac_tx *tx)
+{
+ dma_addr_t addr;
+
+ addr = tx->buf_addr + (tx->submitted_pos % tx->buf_len);
+
+ /* If happens means we have buggy code */
+ WARN_ON_ONCE(addr + tx->period_len > tx->buf_end);
+
+ dev_dbg(ad->dev, "ch%d descriptor: addr=0x%pad len=0x%zx flags=0x%lx\n",
+ channo, &addr, tx->period_len, FLAG_DESC_NOTIFY);
+
+ writel_relaxed(lower_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(upper_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(tx->period_len, ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(FLAG_DESC_NOTIFY, ad->base + REG_DESC_WRITE(channo));
+
+ tx->submitted_pos += tx->period_len;
+ tx->submitted_pos %= 2 * tx->buf_len;
+}
+
+/*
+ * Write all the hardware descriptors for a dmaengine cyclic
+ * transaction there is space for.
+ */
+static void admac_cyclic_write_desc(struct admac_data *ad, int channo,
+ struct admac_tx *tx)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_FULL)
+ break;
+ admac_cyclic_write_one_desc(ad, channo, tx);
+ }
+}
+
+static int admac_ring_noccupied_slots(int ringval)
+{
+ int wrslot = FIELD_GET(RING_WRITE_SLOT, ringval);
+ int rdslot = FIELD_GET(RING_READ_SLOT, ringval);
+
+ if (wrslot != rdslot) {
+ return (wrslot + 4 - rdslot) % 4;
+ } else {
+ WARN_ON((ringval & (RING_FULL | RING_EMPTY)) == 0);
+
+ if (ringval & RING_FULL)
+ return 4;
+ else
+ return 0;
+ }
+}
+
+/*
+ * Read from hardware the residue of a cyclic dmaengine transaction.
+ */
+static u32 admac_cyclic_read_residue(struct admac_data *ad, int channo,
+ struct admac_tx *adtx)
+{
+ u32 ring1, ring2;
+ u32 residue1, residue2;
+ int nreports;
+ size_t pos;
+
+ ring1 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
+ residue1 = readl_relaxed(ad->base + REG_RESIDUE(channo));
+ ring2 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
+ residue2 = readl_relaxed(ad->base + REG_RESIDUE(channo));
+
+ if (residue2 > residue1) {
+ /*
+ * Controller must have loaded next descriptor between
+ * the two residue reads
+ */
+ nreports = admac_ring_noccupied_slots(ring1) + 1;
+ } else {
+ /* No descriptor load between the two reads, ring2 is safe to use */
+ nreports = admac_ring_noccupied_slots(ring2);
+ }
+
+ pos = adtx->reclaimed_pos + adtx->period_len * (nreports + 1) - residue2;
+
+ return adtx->buf_len - pos % adtx->buf_len;
+}
+
+static enum dma_status admac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_data *ad = adchan->host;
+ struct admac_tx *adtx;
+
+ enum dma_status ret;
+ size_t residue;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ adtx = adchan->current_tx;
+
+ if (adtx && adtx->tx.cookie == cookie) {
+ ret = DMA_IN_PROGRESS;
+ residue = admac_cyclic_read_residue(ad, adchan->no, adtx);
+ } else {
+ ret = DMA_IN_PROGRESS;
+ residue = 0;
+ list_for_each_entry(adtx, &adchan->issued, node) {
+ if (adtx->tx.cookie == cookie) {
+ residue = adtx->buf_len;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ dma_set_residue(txstate, residue);
+ return ret;
+}
+
+static void admac_start_chan(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ u32 startbit = 1 << (adchan->no / 2);
+
+ writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
+ ad->base + REG_CHAN_INTSTATUS(adchan->no, ad->irq_index));
+ writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
+ ad->base + REG_CHAN_INTMASK(adchan->no, ad->irq_index));
+
+ switch (admac_chan_direction(adchan->no)) {
+ case DMA_MEM_TO_DEV:
+ writel_relaxed(startbit, ad->base + REG_TX_START);
+ break;
+ case DMA_DEV_TO_MEM:
+ writel_relaxed(startbit, ad->base + REG_RX_START);
+ break;
+ default:
+ break;
+ }
+ dev_dbg(adchan->host->dev, "ch%d start\n", adchan->no);
+}
+
+static void admac_stop_chan(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ u32 stopbit = 1 << (adchan->no / 2);
+
+ switch (admac_chan_direction(adchan->no)) {
+ case DMA_MEM_TO_DEV:
+ writel_relaxed(stopbit, ad->base + REG_TX_STOP);
+ break;
+ case DMA_DEV_TO_MEM:
+ writel_relaxed(stopbit, ad->base + REG_RX_STOP);
+ break;
+ default:
+ break;
+ }
+ dev_dbg(adchan->host->dev, "ch%d stop\n", adchan->no);
+}
+
+static void admac_reset_rings(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+
+ writel_relaxed(REG_CHAN_CTL_RST_RINGS,
+ ad->base + REG_CHAN_CTL(adchan->no));
+ writel_relaxed(0, ad->base + REG_CHAN_CTL(adchan->no));
+}
+
+static void admac_start_current_tx(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ int ch = adchan->no;
+
+ admac_reset_rings(adchan);
+ writel_relaxed(0, ad->base + REG_CHAN_CTL(ch));
+
+ admac_cyclic_write_one_desc(ad, ch, adchan->current_tx);
+ admac_start_chan(adchan);
+ admac_cyclic_write_desc(ad, ch, adchan->current_tx);
+}
+
+static void admac_issue_pending(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_tx *tx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ list_splice_tail_init(&adchan->submitted, &adchan->issued);
+ if (!list_empty(&adchan->issued) && !adchan->current_tx) {
+ tx = list_first_entry(&adchan->issued, struct admac_tx, node);
+ list_del(&tx->node);
+
+ adchan->current_tx = tx;
+ adchan->nperiod_acks = 0;
+ admac_start_current_tx(adchan);
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+}
+
+static int admac_pause(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ admac_stop_chan(adchan);
+
+ return 0;
+}
+
+static int admac_resume(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ admac_start_chan(adchan);
+
+ return 0;
+}
+
+static int admac_terminate_all(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ admac_stop_chan(adchan);
+ admac_reset_rings(adchan);
+
+ adchan->current_tx = NULL;
+ /*
+ * Descriptors can only be freed after the tasklet
+ * has been killed (in admac_synchronize).
+ */
+ list_splice_tail_init(&adchan->submitted, &adchan->to_free);
+ list_splice_tail_init(&adchan->issued, &adchan->to_free);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ return 0;
+}
+
+static void admac_synchronize(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_tx *adtx, *_adtx;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ list_splice_tail_init(&adchan->to_free, &head);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ tasklet_kill(&adchan->tasklet);
+
+ list_for_each_entry_safe(adtx, _adtx, &head, node) {
+ list_del(&adtx->node);
+ admac_desc_free(&adtx->tx);
+ }
+}
+
+static int admac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ dma_cookie_init(&adchan->chan);
+ return 0;
+}
+
+static void admac_free_chan_resources(struct dma_chan *chan)
+{
+ admac_terminate_all(chan);
+ admac_synchronize(chan);
+}
+
+static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct admac_data *ad = (struct admac_data *) ofdma->of_dma_data;
+ unsigned int index;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ index = dma_spec->args[0];
+
+ if (index >= ad->nchannels) {
+ dev_err(ad->dev, "channel index %u out of bounds\n", index);
+ return NULL;
+ }
+
+ return &ad->channels[index].chan;
+}
+
+static int admac_drain_reports(struct admac_data *ad, int channo)
+{
+ int count;
+
+ for (count = 0; count < 4; count++) {
+ u32 countval_hi, countval_lo, unk1, flags;
+
+ if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_EMPTY)
+ break;
+
+ countval_lo = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ countval_hi = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ unk1 = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ flags = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+
+ dev_dbg(ad->dev, "ch%d report: countval=0x%llx unk1=0x%x flags=0x%x\n",
+ channo, ((u64) countval_hi) << 32 | countval_lo, unk1, flags);
+ }
+
+ return count;
+}
+
+static void admac_handle_status_err(struct admac_data *ad, int channo)
+{
+ bool handled = false;
+
+ if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_ERR) {
+ writel_relaxed(RING_ERR, ad->base + REG_DESC_RING(channo));
+ dev_err_ratelimited(ad->dev, "ch%d descriptor ring error\n", channo);
+ handled = true;
+ }
+
+ if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_ERR) {
+ writel_relaxed(RING_ERR, ad->base + REG_REPORT_RING(channo));
+ dev_err_ratelimited(ad->dev, "ch%d report ring error\n", channo);
+ handled = true;
+ }
+
+ if (unlikely(!handled)) {
+ dev_err(ad->dev, "ch%d unknown error, masking errors as cause of IRQs\n", channo);
+ admac_modify(ad, REG_CHAN_INTMASK(channo, ad->irq_index),
+ STATUS_ERR, 0);
+ }
+}
+
+static void admac_handle_status_desc_done(struct admac_data *ad, int channo)
+{
+ struct admac_chan *adchan = &ad->channels[channo];
+ unsigned long flags;
+ int nreports;
+
+ writel_relaxed(STATUS_DESC_DONE,
+ ad->base + REG_CHAN_INTSTATUS(channo, ad->irq_index));
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ nreports = admac_drain_reports(ad, channo);
+
+ if (adchan->current_tx) {
+ struct admac_tx *tx = adchan->current_tx;
+
+ adchan->nperiod_acks += nreports;
+ tx->reclaimed_pos += nreports * tx->period_len;
+ tx->reclaimed_pos %= 2 * tx->buf_len;
+
+ admac_cyclic_write_desc(ad, channo, tx);
+ tasklet_schedule(&adchan->tasklet);
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+}
+
+static void admac_handle_chan_int(struct admac_data *ad, int no)
+{
+ u32 cause = readl_relaxed(ad->base + REG_CHAN_INTSTATUS(no, ad->irq_index));
+
+ if (cause & STATUS_ERR)
+ admac_handle_status_err(ad, no);
+
+ if (cause & STATUS_DESC_DONE)
+ admac_handle_status_desc_done(ad, no);
+}
+
+static irqreturn_t admac_interrupt(int irq, void *devid)
+{
+ struct admac_data *ad = devid;
+ u32 rx_intstate, tx_intstate;
+ int i;
+
+ rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
+ tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
+
+ if (!tx_intstate && !rx_intstate)
+ return IRQ_NONE;
+
+ for (i = 0; i < ad->nchannels; i += 2) {
+ if (tx_intstate & 1)
+ admac_handle_chan_int(ad, i);
+ tx_intstate >>= 1;
+ }
+
+ for (i = 1; i < ad->nchannels; i += 2) {
+ if (rx_intstate & 1)
+ admac_handle_chan_int(ad, i);
+ rx_intstate >>= 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void admac_chan_tasklet(struct tasklet_struct *t)
+{
+ struct admac_chan *adchan = from_tasklet(adchan, t, tasklet);
+ struct admac_tx *adtx;
+ struct dmaengine_desc_callback cb;
+ struct dmaengine_result tx_result;
+ int nacks;
+
+ spin_lock_irq(&adchan->lock);
+ adtx = adchan->current_tx;
+ nacks = adchan->nperiod_acks;
+ adchan->nperiod_acks = 0;
+ spin_unlock_irq(&adchan->lock);
+
+ if (!adtx || !nacks)
+ return;
+
+ tx_result.result = DMA_TRANS_NOERROR;
+ tx_result.residue = 0;
+
+ dmaengine_desc_get_callback(&adtx->tx, &cb);
+ while (nacks--)
+ dmaengine_desc_callback_invoke(&cb, &tx_result);
+}
+
+static int admac_device_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_data *ad = adchan->host;
+ bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
+ int wordsize = 0;
+ u32 bus_width = 0;
+
+ switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ wordsize = 1;
+ bus_width |= BUS_WIDTH_8BIT;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ wordsize = 2;
+ bus_width |= BUS_WIDTH_16BIT;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ wordsize = 4;
+ bus_width |= BUS_WIDTH_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * We take port_window_size to be the number of words in a frame.
+ *
+ * The controller has some means of out-of-band signalling, to the peripheral,
+ * of words position in a frame. That's where the importance of this control
+ * comes from.
+ */
+ switch (is_tx ? config->dst_port_window_size : config->src_port_window_size) {
+ case 0 ... 1:
+ break;
+ case 2:
+ bus_width |= BUS_WIDTH_FRAME_2_WORDS;
+ break;
+ case 4:
+ bus_width |= BUS_WIDTH_FRAME_4_WORDS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(bus_width, ad->base + REG_BUS_WIDTH(adchan->no));
+
+ /*
+ * By FIFOCTL_LIMIT we seem to set the maximal number of bytes allowed to be
+ * held in controller's per-channel FIFO. Transfers seem to be triggered
+ * around the time FIFO occupancy touches FIFOCTL_THRESHOLD.
+ *
+ * The numbers we set are more or less arbitrary.
+ */
+ writel_relaxed(FIELD_PREP(CHAN_FIFOCTL_LIMIT, 0x30 * wordsize)
+ | FIELD_PREP(CHAN_FIFOCTL_THRESHOLD, 0x18 * wordsize),
+ ad->base + REG_CHAN_FIFOCTL(adchan->no));
+
+ return 0;
+}
+
+static int admac_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct admac_data *ad;
+ struct dma_device *dma;
+ int nchannels;
+ int err, irq, i;
+
+ err = of_property_read_u32(np, "dma-channels", &nchannels);
+ if (err || nchannels > NCHANNELS_MAX) {
+ dev_err(&pdev->dev, "missing or invalid dma-channels property\n");
+ return -EINVAL;
+ }
+
+ ad = devm_kzalloc(&pdev->dev, struct_size(ad, channels, nchannels), GFP_KERNEL);
+ if (!ad)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ad);
+ ad->dev = &pdev->dev;
+ ad->nchannels = nchannels;
+
+ /*
+ * The controller has 4 IRQ outputs. Try them all until
+ * we find one we can use.
+ */
+ for (i = 0; i < IRQ_NOUTPUTS; i++) {
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq >= 0) {
+ ad->irq_index = i;
+ break;
+ }
+ }
+
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "no usable interrupt\n");
+
+ err = devm_request_irq(&pdev->dev, irq, admac_interrupt,
+ 0, dev_name(&pdev->dev), ad);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "unable to register interrupt\n");
+
+ ad->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ad->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ad->base),
+ "unable to obtain MMIO resource\n");
+
+ dma = &ad->dma;
+
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+
+ dma->dev = &pdev->dev;
+ dma->device_alloc_chan_resources = admac_alloc_chan_resources;
+ dma->device_free_chan_resources = admac_free_chan_resources;
+ dma->device_tx_status = admac_tx_status;
+ dma->device_issue_pending = admac_issue_pending;
+ dma->device_terminate_all = admac_terminate_all;
+ dma->device_synchronize = admac_synchronize;
+ dma->device_prep_dma_cyclic = admac_prep_dma_cyclic;
+ dma->device_config = admac_device_config;
+ dma->device_pause = admac_pause;
+ dma->device_resume = admac_resume;
+
+ dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+
+ INIT_LIST_HEAD(&dma->channels);
+ for (i = 0; i < nchannels; i++) {
+ struct admac_chan *adchan = &ad->channels[i];
+
+ adchan->host = ad;
+ adchan->no = i;
+ adchan->chan.device = &ad->dma;
+ spin_lock_init(&adchan->lock);
+ INIT_LIST_HEAD(&adchan->submitted);
+ INIT_LIST_HEAD(&adchan->issued);
+ INIT_LIST_HEAD(&adchan->to_free);
+ list_add_tail(&adchan->chan.device_node, &dma->channels);
+ tasklet_setup(&adchan->tasklet, admac_chan_tasklet);
+ }
+
+ err = dma_async_device_register(&ad->dma);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
+
+ err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad);
+ if (err) {
+ dma_async_device_unregister(&ad->dma);
+ return dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
+ }
+
+ return 0;
+}
+
+static int admac_remove(struct platform_device *pdev)
+{
+ struct admac_data *ad = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&ad->dma);
+
+ return 0;
+}
+
+static const struct of_device_id admac_of_match[] = {
+ { .compatible = "apple,admac", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, admac_of_match);
+
+static struct platform_driver apple_admac_driver = {
+ .driver = {
+ .name = "apple-admac",
+ .of_match_table = admac_of_match,
+ },
+ .probe = admac_probe,
+ .remove = admac_remove,
+};
+module_platform_driver(apple_admac_driver);
+
+MODULE_AUTHOR("Martin Povišer <povik+lin@cutebit.org>");
+MODULE_DESCRIPTION("Driver for Audio DMA Controller (ADMAC) on Apple SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7b3e6030f7b4..b102d8eb5d83 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -649,7 +649,7 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
}
/*
- * Only check that maxburst and addr width values are supported by the
+ * Only check that maxburst and addr width values are supported by
* the controller but not that the configuration is good to perform the
* transfer since we don't know the direction at this stage.
*/
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 5161b73c30c4..f30dabc99795 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -55,6 +56,9 @@
#define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
#define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
#define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
+#define AXI_DMAC_REG_COHERENCY_DESC 0x14
+#define AXI_DMAC_DST_COHERENT_MSK BIT(0)
+#define AXI_DMAC_DST_COHERENT_GET(x) FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x)
#define AXI_DMAC_REG_IRQ_MASK 0x80
#define AXI_DMAC_REG_IRQ_PENDING 0x84
@@ -979,6 +983,18 @@ static int axi_dmac_probe(struct platform_device *pdev)
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+ if (of_dma_is_coherent(pdev->dev.of_node)) {
+ ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
+
+ if (version < ADI_AXI_PCORE_VER(4, 4, 'a') ||
+ !AXI_DMAC_DST_COHERENT_GET(ret)) {
+ dev_err(dmac->dma_dev.dev,
+ "Coherent DMA not supported in hardware");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+ }
+
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_clk_disable;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index e2ec540e6519..2a483802d9ee 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -388,7 +388,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
if (i != (sg_len - 1) &&
!(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
- /* Automatically proceeed to the next descriptor. */
+ /* Automatically proceed to the next descriptor. */
desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
/*
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e80feeea0e01..c741b6431958 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1153,13 +1153,6 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
- if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_MEMCPY_SG");
- return -EIO;
- }
-
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index f696246f57fd..9fe2ae794316 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -22,51 +22,50 @@
#include <linux/wait.h>
static unsigned int test_buf_size = 16384;
-module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
+module_param(test_buf_size, uint, 0644);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
static char test_device[32];
-module_param_string(device, test_device, sizeof(test_device),
- S_IRUGO | S_IWUSR);
+module_param_string(device, test_device, sizeof(test_device), 0644);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
static unsigned int threads_per_chan = 1;
-module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
+module_param(threads_per_chan, uint, 0644);
MODULE_PARM_DESC(threads_per_chan,
"Number of threads to start per channel (default: 1)");
static unsigned int max_channels;
-module_param(max_channels, uint, S_IRUGO | S_IWUSR);
+module_param(max_channels, uint, 0644);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
static unsigned int iterations;
-module_param(iterations, uint, S_IRUGO | S_IWUSR);
+module_param(iterations, uint, 0644);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int dmatest;
-module_param(dmatest, uint, S_IRUGO | S_IWUSR);
+module_param(dmatest, uint, 0644);
MODULE_PARM_DESC(dmatest,
"dmatest 0-memcpy 1-memset (default: 0)");
static unsigned int xor_sources = 3;
-module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
+module_param(xor_sources, uint, 0644);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
-module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
+module_param(pq_sources, uint, 0644);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
-module_param(timeout, int, S_IRUGO | S_IWUSR);
+module_param(timeout, int, 0644);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
static bool noverify;
-module_param(noverify, bool, S_IRUGO | S_IWUSR);
+module_param(noverify, bool, 0644);
MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
static bool norandom;
@@ -74,7 +73,7 @@ module_param(norandom, bool, 0644);
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
static bool verbose;
-module_param(verbose, bool, S_IRUGO | S_IWUSR);
+module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
static int alignment = -1;
@@ -86,7 +85,7 @@ module_param(transfer_size, uint, 0644);
MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
static bool polled;
-module_param(polled, bool, S_IRUGO | S_IWUSR);
+module_param(polled, bool, 0644);
MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
/**
@@ -154,7 +153,7 @@ static const struct kernel_param_ops run_ops = {
.get = dmatest_run_get,
};
static bool dmatest_run;
-module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
+module_param_cb(run, &run_ops, &dmatest_run, 0644);
MODULE_PARM_DESC(run, "Run the test (default: false)");
static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
@@ -290,7 +289,7 @@ static const struct kernel_param_ops wait_ops = {
.get = dmatest_wait_get,
.set = param_set_bool,
};
-module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
+module_param_cb(wait, &wait_ops, &wait, 0444);
MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
static bool dmatest_match_channel(struct dmatest_params *params,
@@ -579,10 +578,10 @@ static int dmatest_func(void *data)
unsigned int total_tests = 0;
dma_cookie_t cookie;
enum dma_status status;
- enum dma_ctrl_flags flags;
+ enum dma_ctrl_flags flags;
u8 *pq_coefs = NULL;
int ret;
- unsigned int buf_size;
+ unsigned int buf_size;
struct dmatest_data *src;
struct dmatest_data *dst;
int i;
@@ -1095,8 +1094,8 @@ static void add_threaded_test(struct dmatest_info *info)
/* Copy test parameters */
params->buf_size = test_buf_size;
- strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
- strlcpy(params->device, strim(test_device), sizeof(params->device));
+ strscpy(params->channel, strim(test_channel), sizeof(params->channel));
+ strscpy(params->device, strim(test_device), sizeof(params->device));
params->threads_per_chan = threads_per_chan;
params->max_channels = max_channels;
params->iterations = iterations;
@@ -1240,7 +1239,7 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
dtc = list_last_entry(&info->channels,
struct dmatest_chan,
node);
- strlcpy(chan_reset_val,
+ strscpy(chan_reset_val,
dma_chan_name(dtc->chan),
sizeof(chan_reset_val));
ret = -EBUSY;
@@ -1263,14 +1262,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
&& (strcmp("", strim(test_channel)) != 0)) {
ret = -EINVAL;
- strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
+ strscpy(chan_reset_val, dma_chan_name(dtc->chan),
sizeof(chan_reset_val));
goto add_chan_err;
}
} else {
/* Clear test_channel if no channels were added successfully */
- strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
+ strscpy(chan_reset_val, "", sizeof(chan_reset_val));
ret = -EBUSY;
goto add_chan_err;
}
@@ -1295,7 +1294,7 @@ static int dmatest_chan_get(char *val, const struct kernel_param *kp)
mutex_lock(&info->lock);
if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
stop_threaded_test(info);
- strlcpy(test_channel, "", sizeof(test_channel));
+ strscpy(test_channel, "", sizeof(test_channel));
}
mutex_unlock(&info->lock);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index c741da02b67e..a183d93bd7e2 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -982,6 +982,11 @@ static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
static void axi_chan_dump_lli(struct axi_dma_chan *chan,
struct axi_dma_hw_desc *desc)
{
+ if (!desc->lli) {
+ dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
+ return;
+ }
+
dev_err(dchan2dev(&chan->vc.chan),
"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
le64_to_cpu(desc->lli->sar),
@@ -1049,6 +1054,11 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* The completed descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
if (chan->cyclic) {
desc = vd_to_axi_desc(vd);
@@ -1078,6 +1088,7 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
axi_chan_start_first_queued(chan);
}
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 468d1097a1ec..07f756479663 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -64,8 +64,8 @@ static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
{
+ struct dw_edma_chip *chip = desc->chan->dw->chip;
struct dw_edma_chan *chan = desc->chan;
- struct dw_edma *dw = chan->chip->dw;
struct dw_edma_chunk *chunk;
chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
@@ -82,11 +82,11 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
*/
chunk->cb = !(desc->chunks_alloc % 2);
if (chan->dir == EDMA_DIR_WRITE) {
- chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
- chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
+ chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
+ chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
} else {
- chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
- chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
+ chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
+ chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
}
if (desc->chunk) {
@@ -339,21 +339,40 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (!chan->configured)
return NULL;
- switch (chan->config.direction) {
- case DMA_DEV_TO_MEM: /* local DMA */
- if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
- break;
- return NULL;
- case DMA_MEM_TO_DEV: /* local DMA */
- if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
- break;
- return NULL;
- default: /* remote DMA */
- if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
- break;
- if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
- break;
- return NULL;
+ /*
+ * Local Root Port/End-point Remote End-point
+ * +-----------------------+ PCIe bus +----------------------+
+ * | | +-+ | |
+ * | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM |
+ * | | | | | |
+ * | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV |
+ * | | +-+ | |
+ * +-----------------------+ +----------------------+
+ *
+ * 1. Normal logic:
+ * If eDMA is embedded into the DW PCIe RP/EP and controlled from the
+ * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used
+ * for the device read operations (DEV_TO_MEM) and the Tx channel
+ * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV).
+ *
+ * 2. Inverted logic:
+ * If eDMA is embedded into a Remote PCIe EP and is controlled by the
+ * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx
+ * channel (EDMA_DIR_WRITE) will be used for the device read operations
+ * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write
+ * operations (MEM_TO_DEV).
+ *
+ * It is the client driver responsibility to choose a proper channel
+ * for the DMA transfers.
+ */
+ if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
+ (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
+ return NULL;
+ } else {
+ if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
+ (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
+ return NULL;
}
if (xfer->type == EDMA_XFER_CYCLIC) {
@@ -423,7 +442,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
- if (chan->dir == EDMA_DIR_WRITE) {
+ if (dir == DMA_DEV_TO_MEM) {
burst->sar = src_addr;
if (xfer->type == EDMA_XFER_CYCLIC) {
burst->dar = xfer->xfer.cyclic.paddr;
@@ -663,7 +682,7 @@ static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->status != EDMA_ST_IDLE)
return -EBUSY;
- pm_runtime_get(chan->chip->dev);
+ pm_runtime_get(chan->dw->chip->dev);
return 0;
}
@@ -685,15 +704,15 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
cpu_relax();
}
- pm_runtime_put(chan->chip->dev);
+ pm_runtime_put(chan->dw->chip->dev);
}
-static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
+static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
u32 wr_alloc, u32 rd_alloc)
{
+ struct dw_edma_chip *chip = dw->chip;
struct dw_edma_region *dt_region;
struct device *dev = chip->dev;
- struct dw_edma *dw = chip->dw;
struct dw_edma_chan *chan;
struct dw_edma_irq *irq;
struct dma_device *dma;
@@ -726,7 +745,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->vc.chan.private = dt_region;
- chan->chip = chip;
+ chan->dw = dw;
chan->id = j;
chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
chan->configured = false;
@@ -734,9 +753,9 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->status = EDMA_ST_IDLE;
if (write)
- chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
+ chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
else
- chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
+ chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
chan->ll_max -= 1;
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
@@ -766,13 +785,13 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
vchan_init(&chan->vc, dma);
if (write) {
- dt_region->paddr = dw->dt_region_wr[j].paddr;
- dt_region->vaddr = dw->dt_region_wr[j].vaddr;
- dt_region->sz = dw->dt_region_wr[j].sz;
+ dt_region->paddr = chip->dt_region_wr[j].paddr;
+ dt_region->vaddr = chip->dt_region_wr[j].vaddr;
+ dt_region->sz = chip->dt_region_wr[j].sz;
} else {
- dt_region->paddr = dw->dt_region_rd[j].paddr;
- dt_region->vaddr = dw->dt_region_rd[j].vaddr;
- dt_region->sz = dw->dt_region_rd[j].sz;
+ dt_region->paddr = chip->dt_region_rd[j].paddr;
+ dt_region->vaddr = chip->dt_region_rd[j].vaddr;
+ dt_region->sz = chip->dt_region_rd[j].sz;
}
dw_edma_v0_core_device_config(chan);
@@ -826,11 +845,11 @@ static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
(*mask)++;
}
-static int dw_edma_irq_request(struct dw_edma_chip *chip,
+static int dw_edma_irq_request(struct dw_edma *dw,
u32 *wr_alloc, u32 *rd_alloc)
{
- struct device *dev = chip->dev;
- struct dw_edma *dw = chip->dw;
+ struct dw_edma_chip *chip = dw->chip;
+ struct device *dev = dw->chip->dev;
u32 wr_mask = 1;
u32 rd_mask = 1;
int i, err = 0;
@@ -839,12 +858,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
- if (dw->nr_irqs < 1)
+ if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
return -EINVAL;
- if (dw->nr_irqs == 1) {
+ dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
+ if (!dw->irq)
+ return -ENOMEM;
+
+ if (chip->nr_irqs == 1) {
/* Common IRQ shared among all channels */
- irq = dw->ops->irq_vector(dev, 0);
+ irq = chip->ops->irq_vector(dev, 0);
err = request_irq(irq, dw_edma_interrupt_common,
IRQF_SHARED, dw->name, &dw->irq[0]);
if (err) {
@@ -854,9 +877,11 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
if (irq_get_msi_desc(irq))
get_cached_msi_msg(irq, &dw->irq[0].msi);
+
+ dw->nr_irqs = 1;
} else {
/* Distribute IRQs equally among all channels */
- int tmp = dw->nr_irqs;
+ int tmp = chip->nr_irqs;
while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
@@ -867,7 +892,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
- irq = dw->ops->irq_vector(dev, i);
+ irq = chip->ops->irq_vector(dev, i);
err = request_irq(irq,
i < *wr_alloc ?
dw_edma_interrupt_write :
@@ -901,20 +926,22 @@ int dw_edma_probe(struct dw_edma_chip *chip)
return -EINVAL;
dev = chip->dev;
- if (!dev)
+ if (!dev || !chip->ops)
return -EINVAL;
- dw = chip->dw;
- if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
- return -EINVAL;
+ dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ dw->chip = chip;
raw_spin_lock_init(&dw->lock);
- dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
+ dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
- dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
+ dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
@@ -936,17 +963,17 @@ int dw_edma_probe(struct dw_edma_chip *chip)
dw_edma_v0_core_off(dw);
/* Request IRQs */
- err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
+ err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
if (err)
return err;
/* Setup write channels */
- err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
+ err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
/* Setup read channels */
- err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
+ err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
@@ -954,15 +981,15 @@ int dw_edma_probe(struct dw_edma_chip *chip)
pm_runtime_enable(dev);
/* Turn debugfs on */
- dw_edma_v0_core_debugfs_on(chip);
+ dw_edma_v0_core_debugfs_on(dw);
+
+ chip->dw = dw;
return 0;
err_irq_free:
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
-
- dw->nr_irqs = 0;
+ free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
return err;
}
@@ -980,7 +1007,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Free irqs */
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
+ free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
/* Power management */
pm_runtime_disable(dev);
@@ -1001,7 +1028,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
}
/* Turn debugfs off */
- dw_edma_v0_core_debugfs_off(chip);
+ dw_edma_v0_core_debugfs_off(dw);
return 0;
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 60316d408c3e..85df2d511907 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -15,20 +15,12 @@
#include "../virt-dma.h"
#define EDMA_LL_SZ 24
-#define EDMA_MAX_WR_CH 8
-#define EDMA_MAX_RD_CH 8
enum dw_edma_dir {
EDMA_DIR_WRITE = 0,
EDMA_DIR_READ
};
-enum dw_edma_map_format {
- EDMA_MF_EDMA_LEGACY = 0x0,
- EDMA_MF_EDMA_UNROLL = 0x1,
- EDMA_MF_HDMA_COMPAT = 0x5
-};
-
enum dw_edma_request {
EDMA_REQ_NONE = 0,
EDMA_REQ_STOP,
@@ -57,12 +49,6 @@ struct dw_edma_burst {
u32 sz;
};
-struct dw_edma_region {
- phys_addr_t paddr;
- void __iomem *vaddr;
- size_t sz;
-};
-
struct dw_edma_chunk {
struct list_head list;
struct dw_edma_chan *chan;
@@ -87,7 +73,7 @@ struct dw_edma_desc {
struct dw_edma_chan {
struct virt_dma_chan vc;
- struct dw_edma_chip *chip;
+ struct dw_edma *dw;
int id;
enum dw_edma_dir dir;
@@ -109,10 +95,6 @@ struct dw_edma_irq {
struct dw_edma *dw;
};
-struct dw_edma_core_ops {
- int (*irq_vector)(struct device *dev, unsigned int nr);
-};
-
struct dw_edma {
char name[20];
@@ -122,21 +104,14 @@ struct dw_edma {
struct dma_device rd_edma;
u16 rd_ch_cnt;
- struct dw_edma_region rg_region; /* Registers */
- struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
- struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
- struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
- struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
-
struct dw_edma_irq *irq;
int nr_irqs;
- enum dw_edma_map_format mf;
-
struct dw_edma_chan *chan;
- const struct dw_edma_core_ops *ops;
raw_spinlock_t lock; /* Only for legacy */
+
+ struct dw_edma_chip *chip;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index cee7aa231d7b..d6b5e2463884 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -148,7 +148,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_pcie_data vsec_data;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
- struct dw_edma *dw;
int err, nr_irqs;
int i, mask;
@@ -197,10 +196,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
if (!chip)
return -ENOMEM;
- dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
- if (!dw)
- return -ENOMEM;
-
/* IRQs allocation */
nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
@@ -211,29 +206,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
}
/* Data structure initialization */
- chip->dw = dw;
chip->dev = dev;
chip->id = pdev->devfn;
- chip->irq = pdev->irq;
- dw->mf = vsec_data.mf;
- dw->nr_irqs = nr_irqs;
- dw->ops = &dw_edma_pcie_core_ops;
- dw->wr_ch_cnt = vsec_data.wr_ch_cnt;
- dw->rd_ch_cnt = vsec_data.rd_ch_cnt;
+ chip->mf = vsec_data.mf;
+ chip->nr_irqs = nr_irqs;
+ chip->ops = &dw_edma_pcie_core_ops;
- dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
- if (!dw->rg_region.vaddr)
- return -ENOMEM;
+ chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
+ chip->ll_rd_cnt = vsec_data.rd_ch_cnt;
- dw->rg_region.vaddr += vsec_data.rg.off;
- dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start;
- dw->rg_region.paddr += vsec_data.rg.off;
- dw->rg_region.sz = vsec_data.rg.sz;
+ chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ if (!chip->reg_base)
+ return -ENOMEM;
- for (i = 0; i < dw->wr_ch_cnt; i++) {
- struct dw_edma_region *ll_region = &dw->ll_region_wr[i];
- struct dw_edma_region *dt_region = &dw->dt_region_wr[i];
+ for (i = 0; i < chip->ll_wr_cnt; i++) {
+ struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
+ struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
@@ -256,9 +245,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
dt_region->sz = dt_block->sz;
}
- for (i = 0; i < dw->rd_ch_cnt; i++) {
- struct dw_edma_region *ll_region = &dw->ll_region_rd[i];
- struct dw_edma_region *dt_region = &dw->dt_region_rd[i];
+ for (i = 0; i < chip->ll_rd_cnt; i++) {
+ struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
+ struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
@@ -282,45 +271,45 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
}
/* Debug info */
- if (dw->mf == EDMA_MF_EDMA_LEGACY)
- pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
- else if (dw->mf == EDMA_MF_EDMA_UNROLL)
- pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
- else if (dw->mf == EDMA_MF_HDMA_COMPAT)
- pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
+ if (chip->mf == EDMA_MF_EDMA_LEGACY)
+ pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", chip->mf);
+ else if (chip->mf == EDMA_MF_EDMA_UNROLL)
+ pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf);
+ else if (chip->mf == EDMA_MF_HDMA_COMPAT)
+ pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);
else
- pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
+ pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
- pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",
vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
- dw->rg_region.vaddr, &dw->rg_region.paddr);
+ chip->reg_base);
- for (i = 0; i < dw->wr_ch_cnt; i++) {
+ for (i = 0; i < chip->ll_wr_cnt; i++) {
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_wr[i].bar,
- vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz,
- dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr);
+ vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
+ chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_wr[i].bar,
- vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz,
- dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr);
+ vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
+ chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);
}
- for (i = 0; i < dw->rd_ch_cnt; i++) {
+ for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_rd[i].bar,
- vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz,
- dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr);
+ vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
+ chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_rd[i].bar,
- vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz,
- dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr);
+ vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
+ chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);
}
- pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
+ pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);
/* Validating if PCI interrupts were enabled */
if (!pci_dev_msi_enabled(pdev)) {
@@ -328,10 +317,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -EPERM;
}
- dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
- if (!dw->irq)
- return -ENOMEM;
-
/* Starting eDMA driver */
err = dw_edma_probe(chip);
if (err) {
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 33bc1e6c4cf2..77e6cfe52e0a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -25,7 +25,7 @@ enum dw_edma_control {
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{
- return dw->rg_region.vaddr;
+ return dw->chip->reg_base;
}
#define SET_32(dw, name, value) \
@@ -96,7 +96,7 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
{
- if (dw->mf == EDMA_MF_EDMA_LEGACY)
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)
return &(__dw_regs(dw)->type.legacy.ch);
if (dir == EDMA_DIR_WRITE)
@@ -108,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u32 value, void __iomem *addr)
{
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -133,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
{
u32 value;
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -169,7 +169,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u64 value, void __iomem *addr)
{
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -194,7 +194,7 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
{
u32 value;
- if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -256,7 +256,7 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
u32 tmp;
tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
@@ -272,7 +272,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
SET_RW_32(dw, chan->dir, int_clear,
FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
@@ -280,7 +280,7 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
SET_RW_32(dw, chan->dir, int_clear,
FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
@@ -301,6 +301,7 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
+ struct dw_edma_chan *chan = chunk->chan;
struct dw_edma_v0_lli __iomem *lli;
struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
@@ -314,9 +315,11 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
j = chunk->bursts_alloc;
list_for_each_entry(child, &chunk->burst->list, list) {
j--;
- if (!j)
- control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
-
+ if (!j) {
+ control |= DW_EDMA_V0_LIE;
+ if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ control |= DW_EDMA_V0_RIE;
+ }
/* Channel control */
SET_LL_32(&lli[i].control, control);
/* Transfer size */
@@ -357,7 +360,7 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
u32 tmp;
dw_edma_v0_core_write_chunk(chunk);
@@ -365,7 +368,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
if (first) {
/* Enable engine */
SET_RW_32(dw, chan->dir, engine_en, BIT(0));
- if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
switch (chan->id) {
case 0:
SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
@@ -414,19 +417,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list */
-
- #ifdef CONFIG_64BIT
/* llp is not aligned on 64bit -> keep 32bit accesses */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
- #else /* CONFIG_64BIT */
- SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
- lower_32_bits(chunk->ll_region.paddr));
- SET_CH_32(dw, chan->dir, chan->id, llp.msb,
- upper_32_bits(chunk->ll_region.paddr));
- #endif /* CONFIG_64BIT */
}
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
@@ -435,7 +430,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
{
- struct dw_edma *dw = chan->chip->dw;
+ struct dw_edma *dw = chan->dw;
u32 tmp = 0;
/* MSI done addr - low, high */
@@ -505,12 +500,12 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
}
/* eDMA debugfs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
+void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
{
- dw_edma_v0_debugfs_on(chip);
+ dw_edma_v0_debugfs_on(dw);
}
-void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip)
+void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
{
- dw_edma_v0_debugfs_off(chip);
+ dw_edma_v0_debugfs_off(dw);
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
index 2afa626b8300..75aec6d31b21 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -22,7 +22,7 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir)
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip);
+void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
+void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);
#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 4b3bcffd15ef..5226c9014703 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -54,7 +54,7 @@ struct debugfs_entries {
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
void __iomem *reg = (void __force __iomem *)data;
- if (dw->mf == EDMA_MF_EDMA_LEGACY &&
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
reg >= (void __iomem *)&regs->type.legacy.ch) {
void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
@@ -173,7 +173,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -242,7 +242,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -282,13 +282,13 @@ static void dw_edma_debugfs_regs(void)
dw_edma_debugfs_regs_rd(regs_dir);
}
-void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
{
- dw = chip->dw;
+ dw = _dw;
if (!dw)
return;
- regs = dw->rg_region.vaddr;
+ regs = dw->chip->reg_base;
if (!regs)
return;
@@ -296,16 +296,16 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!dw->debugfs)
return;
- debugfs_create_u32("mf", 0444, dw->debugfs, &dw->mf);
+ debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
dw_edma_debugfs_regs();
}
-void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
+void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
{
- dw = chip->dw;
+ dw = _dw;
if (!dw)
return;
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
index d0ff25a9ea5c..3391b86edf5a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
@@ -12,14 +12,14 @@
#include <linux/dma/edma.h>
#ifdef CONFIG_DEBUG_FS
-void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip);
+void dw_edma_v0_debugfs_on(struct dw_edma *dw);
+void dw_edma_v0_debugfs_off(struct dw_edma *dw);
#else
-static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
}
-static inline void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
+static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)
{
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index 11d254e450b0..f9912c3dd4d7 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -102,10 +102,12 @@ free_map:
return ERR_PTR(ret);
}
+#ifdef CONFIG_OF
static const struct of_device_id rzn1_dmac_match[] = {
{ .compatible = "renesas,rzn1-dma" },
{}
};
+#endif
static int rzn1_dmamux_probe(struct platform_device *pdev)
{
@@ -140,6 +142,7 @@ static const struct of_device_id rzn1_dmamux_match[] = {
{ .compatible = "renesas,rzn1-dmamux" },
{}
};
+MODULE_DEVICE_TABLE(of, rzn1_dmamux_match);
static struct platform_driver rzn1_dmamux_driver = {
.driver = {
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 971ff5f9ae84..d19ea885c63e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1183,7 +1183,7 @@ fail:
*
* Synchronizes the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
- * descriptors have stopped and and it is safe to free the memory associated
+ * descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 3ae05d1446a5..a06a1575a2a5 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -559,9 +559,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
}
for_each_sg(sgl, sg, sg_len, i) {
- /* get next sg's physical address */
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
-
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
dst_addr = fsl_chan->dma_dev_addr;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 3bffe3ecbd1b..65c6094ce063 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1047,7 +1047,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
return -ENOMEM;
imxdma->dev = &pdev->dev;
- imxdma->devtype = (enum imx_dma_type)of_device_get_match_data(&pdev->dev);
+ imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imxdma->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f37a276f519e..fbea5f62dd98 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -183,12 +183,14 @@
BIT(DMA_DEV_TO_DEV))
#define SDMA_WATERMARK_LEVEL_N_FIFOS GENMASK(15, 12)
+#define SDMA_WATERMARK_LEVEL_OFF_FIFOS GENMASK(19, 16)
+#define SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO GENMASK(31, 28)
#define SDMA_WATERMARK_LEVEL_SW_DONE BIT(23)
#define SDMA_DONE0_CONFIG_DONE_SEL BIT(7)
#define SDMA_DONE0_CONFIG_DONE_DIS BIT(6)
-/**
+/*
* struct sdma_script_start_addrs - SDMA script start pointers
*
* start addresses of the different functions in the physical
@@ -424,6 +426,14 @@ struct sdma_desc {
* @data: specific sdma interface structure
* @bd_pool: dma_pool for bd
* @terminate_worker: used to call back into terminate work function
+ * @terminated: terminated list
+ * @is_ram_script: flag for script in ram
+ * @n_fifos_src: number of source device fifos
+ * @n_fifos_dst: number of destination device fifos
+ * @sw_done: software done flag
+ * @stride_fifos_src: stride for source device FIFOs
+ * @stride_fifos_dst: stride for destination device FIFOs
+ * @words_per_fifo: copy number of words one time for one FIFO
*/
struct sdma_channel {
struct virt_dma_chan vc;
@@ -451,6 +461,9 @@ struct sdma_channel {
bool is_ram_script;
unsigned int n_fifos_src;
unsigned int n_fifos_dst;
+ unsigned int stride_fifos_src;
+ unsigned int stride_fifos_dst;
+ unsigned int words_per_fifo;
bool sw_done;
};
@@ -1240,17 +1253,29 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
{
unsigned int n_fifos;
+ unsigned int stride_fifos;
+ unsigned int words_per_fifo;
if (sdmac->sw_done)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
- if (sdmac->direction == DMA_DEV_TO_MEM)
+ if (sdmac->direction == DMA_DEV_TO_MEM) {
n_fifos = sdmac->n_fifos_src;
- else
+ stride_fifos = sdmac->stride_fifos_src;
+ } else {
n_fifos = sdmac->n_fifos_dst;
+ stride_fifos = sdmac->stride_fifos_dst;
+ }
+
+ words_per_fifo = sdmac->words_per_fifo;
sdmac->watermark_level |=
FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
+ sdmac->watermark_level |=
+ FIELD_PREP(SDMA_WATERMARK_LEVEL_OFF_FIFOS, stride_fifos);
+ if (words_per_fifo)
+ sdmac->watermark_level |=
+ FIELD_PREP(SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO, (words_per_fifo - 1));
}
static int sdma_config_channel(struct dma_chan *chan)
@@ -1764,6 +1789,9 @@ static int sdma_config(struct dma_chan *chan,
}
sdmac->n_fifos_src = sdmacfg->n_fifos_src;
sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
+ sdmac->stride_fifos_src = sdmacfg->stride_fifos_src;
+ sdmac->stride_fifos_dst = sdmacfg->stride_fifos_dst;
+ sdmac->words_per_fifo = sdmacfg->words_per_fifo;
sdmac->sw_done = sdmacfg->sw_done;
}
@@ -2183,8 +2211,8 @@ static int sdma_probe(struct platform_device *pdev)
if (ret)
goto err_clk;
- ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
- sdma);
+ ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
+ dev_name(&pdev->dev), sdma);
if (ret)
goto err_irq;
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index f8847c48ba03..9ae92b8940ef 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -373,7 +373,7 @@ static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
/*
* free child CVD after completion.
- * the parent CVD would be freeed with desc_free by user.
+ * the parent CVD would be freed with desc_free by user.
*/
if (cvd->parent != cvd)
kfree(cvd);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 9ebd9231f62f..f7717c44b887 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -138,7 +138,7 @@ struct mtk_hsdma_vdesc {
/**
* struct mtk_hsdma_cb - This is the struct holding extra info required for RX
- * ring to know what relevant VD the the PD is being
+ * ring to know what relevant VD the PD is being
* mapped to.
* @vd: Pointer to the relevant VD.
* @flag: Flag indicating what action should be taken when VD
@@ -761,7 +761,7 @@ static void mtk_hsdma_free_active_desc(struct dma_chan *c)
/*
* Once issue_synchronize is being set, which means once the hardware
* consumes all descriptors for the channel in the ring, the
- * synchronization must be be notified immediately it is completed.
+ * synchronization must be notified immediately it is completed.
*/
spin_lock(&hvc->vc.lock);
if (!list_empty(&hvc->desc_hw_processing)) {
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f10b29034da1..f629ef6fd3c2 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -313,7 +313,7 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
- /* assign coookie */
+ /* assign cookie */
spin_lock_bh(&xor_dev->lock);
cookie = dma_cookie_assign(tx);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 1f0bbaed4643..95a462a1f511 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -193,7 +193,7 @@ struct owl_dma_pchan {
/**
* struct owl_dma_pchan - Wrapper for DMA ENGINE channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @pchan: the physical channel utilized by this channel
* @txd: active transaction on this channel
* @cfg: slave configuration for this channel
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 8e14c72d03f0..f6ed7e889781 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -202,7 +202,7 @@ struct s3c24xx_dma_phy {
* struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
* @id: the id of the channel
* @name: name of the channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @phy: the physical channel utilized by this channel, if there is one
* @runtime_addr: address for RX/TX according to the runtime config
* @at: active transaction on this channel
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index db5a4ef76077..4f8b8498c5c6 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -52,16 +52,6 @@ static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->lock, flags);
-
- if (chan->desc && !chan->desc->in_use) {
- spin_unlock_irqrestore(&chan->lock, flags);
- return chan->desc;
- }
-
- spin_unlock_irqrestore(&chan->lock, flags);
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
if (!desc)
@@ -111,7 +101,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
spin_lock_irqsave(&chan->vchan.lock, iflags);
- chan->desc = desc;
sf_pdma_fill_desc(desc, dest, src, len);
spin_unlock_irqrestore(&chan->vchan.lock, iflags);
@@ -170,11 +159,17 @@ static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
unsigned long flags;
u64 residue = 0;
struct sf_pdma_desc *desc;
- struct dma_async_tx_descriptor *tx;
+ struct dma_async_tx_descriptor *tx = NULL;
spin_lock_irqsave(&chan->vchan.lock, flags);
- tx = &chan->desc->vdesc.tx;
+ list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
+ if (vd->tx.cookie == cookie)
+ tx = &vd->tx;
+
+ if (!tx)
+ goto out;
+
if (cookie == tx->chan->completed_cookie)
goto out;
@@ -241,6 +236,19 @@ static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
writel(v, regs->ctrl);
}
+static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
+{
+ struct virt_dma_chan *vchan = &chan->vchan;
+ struct virt_dma_desc *vdesc;
+
+ if (list_empty(&vchan->desc_issued))
+ return NULL;
+
+ vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
+
+ return container_of(vdesc, struct sf_pdma_desc, vdesc);
+}
+
static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc = chan->desc;
@@ -268,8 +276,11 @@ static void sf_pdma_issue_pending(struct dma_chan *dchan)
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (vchan_issue_pending(&chan->vchan) && chan->desc)
+ if (!chan->desc && vchan_issue_pending(&chan->vchan)) {
+ /* vchan_issue_pending has made a check that desc in not NULL */
+ chan->desc = sf_pdma_get_first_pending_desc(chan);
sf_pdma_xfer_desc(chan);
+ }
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
@@ -298,6 +309,11 @@ static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
spin_lock_irqsave(&chan->vchan.lock, flags);
list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
+
+ chan->desc = sf_pdma_get_first_pending_desc(chan);
+ if (chan->desc)
+ sf_pdma_xfer_desc(chan);
+
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index ee2872e7d64c..476847a4916b 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -12,6 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -630,6 +631,21 @@ static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
*/
}
+static void rz_dmac_device_synchronize(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ u32 chstat;
+ int ret;
+
+ ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN),
+ 100, 100000, false, channel, CHSTAT, 1);
+ if (ret < 0)
+ dev_warn(dmac->dev, "DMA Timeout");
+
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+}
+
/*
* -----------------------------------------------------------------------------
* IRQ handling
@@ -909,6 +925,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
engine->device_config = rz_dmac_config;
engine->device_terminate_all = rz_dmac_terminate_all;
engine->device_issue_pending = rz_dmac_issue_pending;
+ engine->device_synchronize = rz_dmac_device_synchronize;
engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
dma_set_max_seg_size(engine->dev, U32_MAX);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 2138b80435ab..474d3ba8ec9f 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1237,11 +1237,8 @@ static int sprd_dma_remove(struct platform_device *pdev)
{
struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
struct sprd_dma_chn *c, *cn;
- int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- return ret;
+ pm_runtime_get_sync(&pdev->dev);
/* explicitly free the irq */
if (sdev->irq > 0)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index e1827393143f..f093e08c23b1 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1970,7 +1970,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
dma_has_cap(DMA_SLAVE, cap)) {
d40c->dma_cfg = dma40_memcpy_conf_phy;
- /* Generate interrrupt at end of transfer or relink. */
+ /* Generate interrupt at end of transfer or relink. */
d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
/* Generate interrupt on error. */
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index caf0cce8f528..b11927ed4367 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1328,12 +1328,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
return IRQ_NONE;
}
id = __ffs(status);
-
chan = &dmadev->chan[id];
- if (!chan) {
- dev_warn(mdma2dev(dmadev), "MDMA channel not initialized\n");
- return IRQ_NONE;
- }
/* Handle interrupt for the channel */
spin_lock(&chan->vchan.lock);
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 93f1645ae928..f291b1b4db32 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -7,6 +7,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
@@ -122,6 +123,15 @@
SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
+/*
+ * Normal DMA supports individual transfers (segments) up to 128k.
+ * Dedicated DMA supports transfers up to 16M. We can only report
+ * one size limit, so we have to use the smaller value.
+ */
+#define SUN4I_NDMA_MAX_SEG_SIZE SZ_128K
+#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
+#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
+
struct sun4i_dma_pchan {
/* Register base of channel */
void __iomem *base;
@@ -155,7 +165,8 @@ struct sun4i_dma_contract {
struct virt_dma_desc vd;
struct list_head demands;
struct list_head completed_demands;
- int is_cyclic;
+ bool is_cyclic : 1;
+ bool use_half_int : 1;
};
struct sun4i_dma_dev {
@@ -372,7 +383,7 @@ static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
if (promise) {
vchan->contract = contract;
vchan->pchan = pchan;
- set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
+ set_pchan_interrupt(priv, pchan, contract->use_half_int, 1);
configure_pchan(pchan, promise);
}
@@ -735,12 +746,21 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
*
* Which requires half the engine programming for the same
* functionality.
+ *
+ * This only works if two periods fit in a single promise. That will
+ * always be the case for dedicated DMA, where the hardware has a much
+ * larger maximum transfer size than advertised to clients.
*/
- nr_periods = DIV_ROUND_UP(len / period_len, 2);
+ if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) {
+ period_len *= 2;
+ contract->use_half_int = 1;
+ }
+
+ nr_periods = DIV_ROUND_UP(len, period_len);
for (i = 0; i < nr_periods; i++) {
/* Calculate the offset in the buffer and the length needed */
- offset = i * period_len * 2;
- plength = min((len - offset), (period_len * 2));
+ offset = i * period_len;
+ plength = min((len - offset), period_len);
if (dir == DMA_MEM_TO_DEV)
src = buf + offset;
else
@@ -1149,6 +1169,8 @@ static int sun4i_dma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
spin_lock_init(&priv->lock);
+ dma_set_max_seg_size(&pdev->dev, SUN4I_DMA_MAX_SEG_SIZE);
+
dma_cap_zero(priv->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 05cd451f541d..fa9bda4a2bc6 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -157,8 +157,8 @@
* If any burst is in flight and DMA paused then this is the time to complete
* on-flight burst and update DMA status register.
*/
-#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20
-#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100
+#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 10
+#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */
/* Channel base address offset from GPCDMA base address */
#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000
@@ -432,6 +432,17 @@ static int tegra_dma_device_resume(struct dma_chan *dc)
return 0;
}
+static inline int tegra_dma_pause_noerr(struct tegra_dma_channel *tdc)
+{
+ /* Return 0 irrespective of PAUSE status.
+ * This is useful to recover channels that can exit out of flush
+ * state when the channel is disabled.
+ */
+
+ tegra_dma_pause(tdc);
+ return 0;
+}
+
static void tegra_dma_disable(struct tegra_dma_channel *tdc)
{
u32 csr, status;
@@ -1292,6 +1303,14 @@ static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
.terminate = tegra_dma_pause,
};
+static const struct tegra_dma_chip_data tegra234_dma_chip_data = {
+ .nr_channels = 31,
+ .channel_reg_size = SZ_64K,
+ .max_dma_count = SZ_1G,
+ .hw_support_pause = true,
+ .terminate = tegra_dma_pause_noerr,
+};
+
static const struct of_device_id tegra_dma_of_match[] = {
{
.compatible = "nvidia,tegra186-gpcdma",
@@ -1300,6 +1319,9 @@ static const struct of_device_id tegra_dma_of_match[] = {
.compatible = "nvidia,tegra194-gpcdma",
.data = &tegra194_dma_chip_data,
}, {
+ .compatible = "nvidia,tegra234-gpcdma",
+ .data = &tegra234_dma_chip_data,
+ }, {
},
};
MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
index 4c4172a4d271..a488c2250623 100644
--- a/drivers/dma/ti/k3-psil-j721s2.c
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -112,6 +112,11 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* MAIN SA2UL */
+ PSIL_SA2UL(0x4a40, 0),
+ PSIL_SA2UL(0x4a41, 0),
+ PSIL_SA2UL(0x4a42, 0),
+ PSIL_SA2UL(0x4a43, 0),
/* CPSW0 */
PSIL_ETHERNET(0x7000),
/* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
@@ -144,6 +149,9 @@ static struct psil_ep j721s2_src_ep_map[] = {
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep j721s2_dst_ep_map[] = {
+ /* MAIN SA2UL */
+ PSIL_SA2UL(0xca40, 1),
+ PSIL_SA2UL(0xca41, 1),
/* CPSW0 */
PSIL_ETHERNET(0xf000),
PSIL_ETHERNET(0xf001),
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index cd62bbb50e8b..6276934d4d2b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2128,126 +2128,6 @@ error:
}
/**
- * xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction
- * @dchan: DMA channel
- * @dst_sg: Destination scatter list
- * @dst_sg_len: Number of entries in destination scatter list
- * @src_sg: Source scatter list
- * @src_sg_len: Number of entries in source scatter list
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg(
- struct dma_chan *dchan, struct scatterlist *dst_sg,
- unsigned int dst_sg_len, struct scatterlist *src_sg,
- unsigned int src_sg_len, unsigned long flags)
-{
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_cdma_tx_segment *segment, *prev = NULL;
- struct xilinx_cdma_desc_hw *hw;
- size_t len, dst_avail, src_avail;
- dma_addr_t dma_dst, dma_src;
-
- if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
- return NULL;
-
- if (unlikely(!dst_sg || !src_sg))
- return NULL;
-
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
-
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
-
- dst_avail = sg_dma_len(dst_sg);
- src_avail = sg_dma_len(src_sg);
- /*
- * loop until there is either no more source or no more destination
- * scatterlist entry
- */
- while (true) {
- len = min_t(size_t, src_avail, dst_avail);
- len = min_t(size_t, len, chan->xdev->max_buffer_len);
- if (len == 0)
- goto fetch;
-
- /* Allocate the link descriptor from DMA pool */
- segment = xilinx_cdma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
-
- dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
- dst_avail;
- dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
- src_avail;
- hw = &segment->hw;
- hw->control = len;
- hw->src_addr = dma_src;
- hw->dest_addr = dma_dst;
- if (chan->ext_addr) {
- hw->src_addr_msb = upper_32_bits(dma_src);
- hw->dest_addr_msb = upper_32_bits(dma_dst);
- }
-
- if (prev) {
- prev->hw.next_desc = segment->phys;
- if (chan->ext_addr)
- prev->hw.next_desc_msb =
- upper_32_bits(segment->phys);
- }
-
- prev = segment;
- dst_avail -= len;
- src_avail -= len;
- list_add_tail(&segment->node, &desc->segments);
-
-fetch:
- /* Fetch the next dst scatterlist entry */
- if (dst_avail == 0) {
- if (dst_sg_len == 0)
- break;
- dst_sg = sg_next(dst_sg);
- if (dst_sg == NULL)
- break;
- dst_sg_len--;
- dst_avail = sg_dma_len(dst_sg);
- }
- /* Fetch the next src scatterlist entry */
- if (src_avail == 0) {
- if (src_sg_len == 0)
- break;
- src_sg = sg_next(src_sg);
- if (src_sg == NULL)
- break;
- src_sg_len--;
- src_avail = sg_dma_len(src_sg);
- }
- }
-
- if (list_empty(&desc->segments)) {
- dev_err(chan->xdev->dev,
- "%s: Zero-size SG transfer requested\n", __func__);
- goto error;
- }
-
- /* Link the last hardware descriptor with the first. */
- segment = list_first_entry(&desc->segments,
- struct xilinx_cdma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
-
- return &desc->async_tx;
-
-error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
-}
-
-/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -3240,9 +3120,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
- dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
- xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg;
/* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b0f4948b00a5..84dc5240a807 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -376,7 +376,7 @@ static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
if (ret < 0)
goto done;
} else {
- strlcpy(kern_buff, "No testcase executed",
+ strscpy(kern_buff, "No testcase executed",
XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
}
@@ -1652,10 +1652,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
dpdma_hw_init(xdev);
xdev->irq = platform_get_irq(pdev, 0);
- if (xdev->irq < 0) {
- dev_err(xdev->dev, "failed to get platform irq\n");
+ if (xdev->irq < 0)
return xdev->irq;
- }
ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
dev_name(xdev->dev), xdev);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index d3e2477948c8..17562cf1fe97 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -263,6 +263,7 @@ config EDAC_I10NM
config EDAC_PND2
tristate "Intel Pondicherry2"
depends on PCI && X86_64 && X86_MCE_INTEL
+ select P2SB if X86
help
Support for error detection and correction on the Intel
Pondicherry2 Integrated Memory Controller. This SoC IP is
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 5bf92298554d..e50d7928bf8f 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -24,6 +24,8 @@
#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include "edac_module.h"
#include "mpc85xx_edac.h"
#include "fsl_ddr_edac.h"
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index c94ca1f790c4..a20b299f1202 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -28,6 +28,8 @@
#include <linux/bitmap.h>
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
+#include <linux/platform_data/x86/p2sb.h>
+
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
@@ -232,42 +234,14 @@ static u64 get_mem_ctrl_hub_base_addr(void)
return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
}
-static u64 get_sideband_reg_base_addr(void)
-{
- struct pci_dev *pdev;
- u32 hi, lo;
- u8 hidden;
-
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
- if (pdev) {
- /* Unhide the P2SB device, if it's hidden */
- pci_read_config_byte(pdev, 0xe1, &hidden);
- if (hidden)
- pci_write_config_byte(pdev, 0xe1, 0);
-
- pci_read_config_dword(pdev, 0x10, &lo);
- pci_read_config_dword(pdev, 0x14, &hi);
- lo &= 0xfffffff0;
-
- /* Hide the P2SB device, if it was hidden before */
- if (hidden)
- pci_write_config_byte(pdev, 0xe1, hidden);
-
- pci_dev_put(pdev);
- return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
- } else {
- return 0xfd000000;
- }
-}
-
#define DNV_MCHBAR_SIZE 0x8000
#define DNV_SB_PORT_SIZE 0x10000
static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
struct pci_dev *pdev;
- char *base;
- u64 addr;
- unsigned long size;
+ void __iomem *base;
+ struct resource r;
+ int ret;
if (op == 4) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
@@ -279,26 +253,30 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
} else {
/* MMIO via memory controller hub base address */
if (op == 0 && port == 0x4c) {
- addr = get_mem_ctrl_hub_base_addr();
- if (!addr)
+ memset(&r, 0, sizeof(r));
+
+ r.start = get_mem_ctrl_hub_base_addr();
+ if (!r.start)
return -ENODEV;
- size = DNV_MCHBAR_SIZE;
+ r.end = r.start + DNV_MCHBAR_SIZE - 1;
} else {
/* MMIO via sideband register base address */
- addr = get_sideband_reg_base_addr();
- if (!addr)
- return -ENODEV;
- addr += (port << 16);
- size = DNV_SB_PORT_SIZE;
+ ret = p2sb_bar(NULL, 0, &r);
+ if (ret)
+ return ret;
+
+ r.start += (port << 16);
+ r.end = r.start + DNV_SB_PORT_SIZE - 1;
}
- base = ioremap((resource_size_t)addr, size);
+ base = ioremap(r.start, resource_size(&r));
if (!base)
return -ENODEV;
if (sz == 8)
- *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
- *(u32 *)data = *(u32 *)(base + off);
+ *(u64 *)data = readq(base + off);
+ else
+ *(u32 *)data = readl(base + off);
iounmap(base);
}
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 6793f6d799e7..0bc670778c99 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/types.h>
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 7dad6f57d970..81cc3d0f6eec 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -2725,6 +2725,9 @@ void cs_dsp_stop(struct cs_dsp *dsp)
mutex_lock(&dsp->pwr_lock);
+ if (dsp->client_ops->pre_stop)
+ dsp->client_ops->pre_stop(dsp);
+
dsp->running = false;
if (dsp->ops->stop_core)
@@ -3177,6 +3180,110 @@ static const struct cs_dsp_ops cs_dsp_halo_ops = {
.stop_core = cs_dsp_halo_stop_core,
};
+/**
+ * cs_dsp_chunk_write() - Format data to a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to write
+ * @val: Value to write
+ *
+ * This function sequentially writes values into the format required for DSP
+ * memory, it handles both inserting of the padding bytes and converting to
+ * big endian. Note that data is only committed to the chunk when a whole DSP
+ * words worth of data is available.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val)
+{
+ int nwrite, i;
+
+ nwrite = min(CS_DSP_DATA_WORD_BITS - ch->cachebits, nbits);
+
+ ch->cache <<= nwrite;
+ ch->cache |= val >> (nbits - nwrite);
+ ch->cachebits += nwrite;
+ nbits -= nwrite;
+
+ if (ch->cachebits == CS_DSP_DATA_WORD_BITS) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache &= 0xFFFFFF;
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ *ch->data++ = (ch->cache & 0xFF000000) >> CS_DSP_DATA_WORD_BITS;
+
+ ch->bytes += sizeof(ch->cache);
+ ch->cachebits = 0;
+ }
+
+ if (nbits)
+ return cs_dsp_chunk_write(ch, nbits, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_write);
+
+/**
+ * cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * As cs_dsp_chunk_write only writes data when a whole DSP word is ready to
+ * be written out it is possible that some data will remain in the cache, this
+ * function will pad that data with zeros upto a whole DSP word and write out.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch)
+{
+ if (!ch->cachebits)
+ return 0;
+
+ return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_flush);
+
+/**
+ * cs_dsp_chunk_read() - Parse data from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to read
+ *
+ * This function sequentially reads values from a DSP memory formatted buffer,
+ * it handles both removing of the padding bytes and converting from big endian.
+ *
+ * Return: A negative number is returned on error, otherwise the read value.
+ */
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
+{
+ int nread, i;
+ u32 result;
+
+ if (!ch->cachebits) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache = 0;
+ ch->cachebits = CS_DSP_DATA_WORD_BITS;
+
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ ch->cache |= *ch->data++;
+
+ ch->bytes += sizeof(ch->cache);
+ }
+
+ nread = min(ch->cachebits, nbits);
+ nbits -= nread;
+
+ result = ch->cache >> ((sizeof(ch->cache) * BITS_PER_BYTE) - nread);
+ ch->cache <<= nread;
+ ch->cachebits -= nread;
+
+ if (nbits)
+ result = (result << nbits) | cs_dsp_chunk_read(ch, nbits);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_chunk_read);
+
MODULE_DESCRIPTION("Cirrus Logic DSP Support");
MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f191a1f901ac..0eb6b617f709 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -630,7 +630,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
- dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
+ dmi_ver = get_unaligned_be24(buf + 7);
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 9e85e58d1f27..b450ebf95977 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -8,6 +8,7 @@
#include <asm/efi.h>
#include <asm/sections.h>
+#include <asm/unaligned.h>
#include "efistub.h"
@@ -29,7 +30,7 @@ static int get_boot_hartid_from_fdt(void)
{
const void *fdt;
int chosen_node, len;
- const fdt32_t *prop;
+ const void *prop;
fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
@@ -40,10 +41,16 @@ static int get_boot_hartid_from_fdt(void)
return -EINVAL;
prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
- if (!prop || len != sizeof(u32))
+ if (!prop)
+ return -EINVAL;
+
+ if (len == sizeof(u32))
+ hartid = (unsigned long) fdt32_to_cpu(*(fdt32_t *)prop);
+ else if (len == sizeof(u64))
+ hartid = (unsigned long) fdt64_to_cpu(__get_unaligned_t(fdt64_t, prop));
+ else
return -EINVAL;
- hartid = fdt32_to_cpu(*prop);
return 0;
}
diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c
index cb255a99170c..3c071f814455 100644
--- a/drivers/firmware/mtk-adsp-ipc.c
+++ b/drivers/firmware/mtk-adsp-ipc.c
@@ -12,6 +12,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+static const char * const adsp_mbox_ch_names[MTK_ADSP_MBOX_NUM] = { "rx", "tx" };
+
/*
* mtk_adsp_ipc_send - send ipc cmd to MTK ADSP
*
@@ -72,7 +74,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
struct mtk_adsp_ipc *adsp_ipc;
struct mtk_adsp_chan *adsp_chan;
struct mbox_client *cl;
- char *chan_name;
int ret;
int i, j;
@@ -83,12 +84,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) {
- chan_name = kasprintf(GFP_KERNEL, "mbox%d", i);
- if (!chan_name) {
- ret = -ENOMEM;
- goto out;
- }
-
adsp_chan = &adsp_ipc->chans[i];
cl = &adsp_chan->cl;
cl->dev = dev->parent;
@@ -99,17 +94,20 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
adsp_chan->ipc = adsp_ipc;
adsp_chan->idx = i;
- adsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]);
if (IS_ERR(adsp_chan->ch)) {
ret = PTR_ERR(adsp_chan->ch);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request mbox chan %d ret %d\n",
- i, ret);
- goto out_free;
- }
+ dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ adsp_mbox_ch_names[i], ret);
+
+ for (j = 0; j < i; j++) {
+ adsp_chan = &adsp_ipc->chans[j];
+ mbox_free_channel(adsp_chan->ch);
+ }
- dev_dbg(dev, "request mbox chan %s\n", chan_name);
- kfree(chan_name);
+ return ret;
+ }
}
adsp_ipc->dev = dev;
@@ -117,16 +115,6 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
dev_dbg(dev, "MTK ADSP IPC initialized\n");
return 0;
-
-out_free:
- kfree(chan_name);
-out:
- for (j = 0; j < i; j++) {
- adsp_chan = &adsp_ipc->chans[j];
- mbox_free_channel(adsp_chan->ch);
- }
-
- return ret;
}
static int mtk_adsp_ipc_remove(struct platform_device *pdev)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b01961999ced..0642f579196f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -544,6 +544,7 @@ config GPIO_SAMA5D2_PIOBU
tristate "SAMA5D2 PIOBU GPIO support"
depends on MFD_SYSCON
depends on OF_GPIO
+ depends on ARCH_AT91 || COMPILE_TEST
select GPIO_SYSCON
help
Say yes here to use the PIOBU pins as GPIOs.
@@ -690,12 +691,6 @@ config GPIO_VISCONTI
help
Say yes here to support GPIO on Tohisba Visconti.
-config GPIO_VR41XX
- tristate "NEC VR4100 series General-purpose I/O Unit support"
- depends on CPU_VR41XX
- help
- Say yes here to support the NEC VR4100 series General-purpose I/O Unit.
-
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
depends on (X86 || COMPILE_TEST) && PCI
@@ -829,11 +824,24 @@ endmenu
menu "Port-mapped I/O GPIO drivers"
depends on X86 # Unconditional I/O space access
+config GPIO_I8255
+ tristate
+ help
+ Enables support for the i8255 interface library functions. The i8255
+ interface library provides functions to facilitate communication with
+ interfaces compatible with the venerable Intel 8255 Programmable
+ Peripheral Interface (PPI). The Intel 8255 PPI chip was first released
+ in the early 1970s but compatible interfaces are nowadays typically
+ found embedded in larger VLSI processing chips and FPGA components.
+
+ If built as a module its name will be gpio-i8255.
+
config GPIO_104_DIO_48E
tristate "ACCES 104-DIO-48E GPIO support"
depends on PC104
select ISA_BUS_API
select GPIOLIB_IRQCHIP
+ select GPIO_I8255
help
Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
104-DIO-24E). The base port addresses for the devices may be
@@ -857,6 +865,7 @@ config GPIO_104_IDI_48
depends on PC104
select ISA_BUS_API
select GPIOLIB_IRQCHIP
+ select GPIO_I8255
help
Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
104-IDI-48AC, 104-IDI-48B, 104-IDI-48BC). The base port addresses for
@@ -877,6 +886,7 @@ config GPIO_GPIO_MM
tristate "Diamond Systems GPIO-MM GPIO support"
depends on PC104
select ISA_BUS_API
+ select GPIO_I8255
help
Enables GPIO support for the Diamond Systems GPIO-MM and GPIO-MM-12.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 14352f6dfe8e..a0985d30f51b 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o
obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o
obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
+obj-$(CONFIG_GPIO_I8255) += gpio-i8255.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IDT3243X) += gpio-idt3243x.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
@@ -169,7 +170,6 @@ obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VIRTIO) += gpio-virtio.o
obj-$(CONFIG_GPIO_VISCONTI) += gpio-visconti.o
-obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WCD934X) += gpio-wcd934x.o
obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index f118ad9bcd33..a41551870759 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -6,8 +6,7 @@
* This driver supports the following ACCES devices: 104-DIO-48E and
* 104-DIO-24E.
*/
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -20,6 +19,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "gpio-i8255.h"
+
+MODULE_IMPORT_NS(I8255);
#define DIO48E_EXTENT 16
#define MAX_NUM_DIO48E max_num_isa_dev(DIO48E_EXTENT)
@@ -33,34 +37,54 @@ static unsigned int irq[MAX_NUM_DIO48E];
module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers");
+#define DIO48E_NUM_PPI 2
+
+/**
+ * struct dio48e_reg - device register structure
+ * @ppi: Programmable Peripheral Interface groups
+ * @enable_buffer: Enable/Disable Buffer groups
+ * @unused1: Unused
+ * @enable_interrupt: Write: Enable Interrupt
+ * Read: Disable Interrupt
+ * @unused2: Unused
+ * @enable_counter: Write: Enable Counter/Timer Addressing
+ * Read: Disable Counter/Timer Addressing
+ * @unused3: Unused
+ * @clear_interrupt: Clear Interrupt
+ */
+struct dio48e_reg {
+ struct i8255 ppi[DIO48E_NUM_PPI];
+ u8 enable_buffer[DIO48E_NUM_PPI];
+ u8 unused1;
+ u8 enable_interrupt;
+ u8 unused2;
+ u8 enable_counter;
+ u8 unused3;
+ u8 clear_interrupt;
+};
+
/**
* struct dio48e_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @io_state: bit I/O state (whether bit is set to input or output)
- * @out_state: output bits state
- * @control: Control registers state
- * @lock: synchronization lock to prevent I/O race conditions
- * @base: base port address of the GPIO device
- * @irq_mask: I/O bits affected by interrupts
+ * @chip: instance of the gpio_chip
+ * @ppi_state: PPI device states
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @reg: I/O address offset for the device registers
+ * @irq_mask: I/O bits affected by interrupts
*/
struct dio48e_gpio {
struct gpio_chip chip;
- unsigned char io_state[6];
- unsigned char out_state[6];
- unsigned char control[2];
+ struct i8255_state ppi_state[DIO48E_NUM_PPI];
raw_spinlock_t lock;
- void __iomem *base;
+ struct dio48e_reg __iomem *reg;
unsigned char irq_mask;
};
static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- if (dio48egpio->io_state[port] & mask)
- return GPIO_LINE_DIRECTION_IN;
+ if (i8255_get_direction(dio48egpio->ppi_state, offset))
+ return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
}
@@ -68,38 +92,9 @@ static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset
static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- void __iomem *const control_addr = dio48egpio->base + 3 + control_port * 4;
- unsigned long flags;
- unsigned int control;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- dio48egpio->io_state[io_port] |= 0xF0;
- dio48egpio->control[control_port] |= BIT(3);
- } else {
- dio48egpio->io_state[io_port] |= 0x0F;
- dio48egpio->control[control_port] |= BIT(0);
- }
- } else {
- dio48egpio->io_state[io_port] |= 0xFF;
- if (io_port == 0 || io_port == 3)
- dio48egpio->control[control_port] |= BIT(4);
- else
- dio48egpio->control[control_port] |= BIT(1);
- }
-
- control = BIT(7) | dio48egpio->control[control_port];
- iowrite8(control, control_addr);
- control &= ~BIT(7);
- iowrite8(control, control_addr);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ i8255_direction_input(dio48egpio->reg->ppi, dio48egpio->ppi_state,
+ offset);
return 0;
}
@@ -108,48 +103,9 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int off
int value)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- const unsigned int mask = BIT(offset % 8);
- void __iomem *const control_addr = dio48egpio->base + 3 + control_port * 4;
- const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port;
- unsigned long flags;
- unsigned int control;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
-
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- dio48egpio->io_state[io_port] &= 0x0F;
- dio48egpio->control[control_port] &= ~BIT(3);
- } else {
- dio48egpio->io_state[io_port] &= 0xF0;
- dio48egpio->control[control_port] &= ~BIT(0);
- }
- } else {
- dio48egpio->io_state[io_port] &= 0x00;
- if (io_port == 0 || io_port == 3)
- dio48egpio->control[control_port] &= ~BIT(4);
- else
- dio48egpio->control[control_port] &= ~BIT(1);
- }
-
- if (value)
- dio48egpio->out_state[io_port] |= mask;
- else
- dio48egpio->out_state[io_port] &= ~mask;
- control = BIT(7) | dio48egpio->control[control_port];
- iowrite8(control, control_addr);
-
- iowrite8(dio48egpio->out_state[io_port], dio48egpio->base + out_port);
-
- control &= ~BIT(7);
- iowrite8(control, control_addr);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ i8255_direction_output(dio48egpio->reg->ppi, dio48egpio->ppi_state,
+ offset, value);
return 0;
}
@@ -157,47 +113,16 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int off
static int dio48e_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int in_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
- unsigned int port_state;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
- /* ensure that GPIO is set for input */
- if (!(dio48egpio->io_state[port] & mask)) {
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
- return -EINVAL;
- }
-
- port_state = ioread8(dio48egpio->base + in_port);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
-
- return !!(port_state & mask);
+ return i8255_get(dio48egpio->reg->ppi, offset);
}
-static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
-
static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- void __iomem *port_addr;
- unsigned long port_state;
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- port_addr = dio48egpio->base + ports[offset / 8];
- port_state = ioread8(port_addr) & gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
+ i8255_get_multiple(dio48egpio->reg->ppi, mask, bits, chip->ngpio);
return 0;
}
@@ -205,49 +130,17 @@ static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
static void dio48e_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int out_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
-
- if (value)
- dio48egpio->out_state[port] |= mask;
- else
- dio48egpio->out_state[port] &= ~mask;
-
- iowrite8(dio48egpio->out_state[port], dio48egpio->base + out_port);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ i8255_set(dio48egpio->reg->ppi, dio48egpio->ppi_state, offset, value);
}
static void dio48e_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- size_t index;
- void __iomem *port_addr;
- unsigned long bitmask;
- unsigned long flags;
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- index = offset / 8;
- port_addr = dio48egpio->base + ports[index];
-
- bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
- /* update output state data and set device gpio register */
- dio48egpio->out_state[index] &= ~gpio_mask;
- dio48egpio->out_state[index] |= bitmask;
- iowrite8(dio48egpio->out_state[index], port_addr);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
- }
+ i8255_set_multiple(dio48egpio->reg->ppi, dio48egpio->ppi_state, mask,
+ bits, chip->ngpio);
}
static void dio48e_irq_ack(struct irq_data *data)
@@ -274,7 +167,7 @@ static void dio48e_irq_mask(struct irq_data *data)
if (!dio48egpio->irq_mask)
/* disable interrupts */
- ioread8(dio48egpio->base + 0xB);
+ ioread8(&dio48egpio->reg->enable_interrupt);
raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
}
@@ -294,8 +187,8 @@ static void dio48e_irq_unmask(struct irq_data *data)
if (!dio48egpio->irq_mask) {
/* enable interrupts */
- iowrite8(0x00, dio48egpio->base + 0xF);
- iowrite8(0x00, dio48egpio->base + 0xB);
+ iowrite8(0x00, &dio48egpio->reg->clear_interrupt);
+ iowrite8(0x00, &dio48egpio->reg->enable_interrupt);
}
if (offset == 19)
@@ -341,7 +234,7 @@ static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
raw_spin_lock(&dio48egpio->lock);
- iowrite8(0x00, dio48egpio->base + 0xF);
+ iowrite8(0x00, &dio48egpio->reg->clear_interrupt);
raw_spin_unlock(&dio48egpio->lock);
@@ -373,11 +266,26 @@ static int dio48e_irq_init_hw(struct gpio_chip *gc)
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(gc);
/* Disable IRQ by default */
- ioread8(dio48egpio->base + 0xB);
+ ioread8(&dio48egpio->reg->enable_interrupt);
return 0;
}
+static void dio48e_init_ppi(struct i8255 __iomem *const ppi,
+ struct i8255_state *const ppi_state)
+{
+ const unsigned long ngpio = 24;
+ const unsigned long mask = GENMASK(ngpio - 1, 0);
+ const unsigned long bits = 0;
+ unsigned long i;
+
+ /* Initialize all GPIO to output 0 */
+ for (i = 0; i < DIO48E_NUM_PPI; i++) {
+ i8255_mode0_output(&ppi[i]);
+ i8255_set_multiple(&ppi[i], &ppi_state[i], &mask, &bits, ngpio);
+ }
+}
+
static int dio48e_probe(struct device *dev, unsigned int id)
{
struct dio48e_gpio *dio48egpio;
@@ -395,8 +303,8 @@ static int dio48e_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- dio48egpio->base = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
- if (!dio48egpio->base)
+ dio48egpio->reg = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
+ if (!dio48egpio->reg)
return -ENOMEM;
dio48egpio->chip.label = name;
@@ -425,17 +333,8 @@ static int dio48e_probe(struct device *dev, unsigned int id)
raw_spin_lock_init(&dio48egpio->lock);
- /* initialize all GPIO as output */
- iowrite8(0x80, dio48egpio->base + 3);
- iowrite8(0x00, dio48egpio->base);
- iowrite8(0x00, dio48egpio->base + 1);
- iowrite8(0x00, dio48egpio->base + 2);
- iowrite8(0x00, dio48egpio->base + 3);
- iowrite8(0x80, dio48egpio->base + 7);
- iowrite8(0x00, dio48egpio->base + 4);
- iowrite8(0x00, dio48egpio->base + 5);
- iowrite8(0x00, dio48egpio->base + 6);
- iowrite8(0x00, dio48egpio->base + 7);
+ i8255_state_init(dio48egpio->ppi_state, DIO48E_NUM_PPI);
+ dio48e_init_ppi(dio48egpio->reg->ppi, dio48egpio->ppi_state);
err = devm_gpiochip_add_data(dev, &dio48egpio->chip, dio48egpio);
if (err) {
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 9521ece3ebef..40be76efeed7 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -6,8 +6,7 @@
* This driver supports the following ACCES devices: 104-IDI-48A,
* 104-IDI-48AC, 104-IDI-48B, and 104-IDI-48BC.
*/
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -20,6 +19,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "gpio-i8255.h"
+
+MODULE_IMPORT_NS(I8255);
#define IDI_48_EXTENT 8
#define MAX_NUM_IDI_48 max_num_isa_dev(IDI_48_EXTENT)
@@ -34,72 +38,61 @@ module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers");
/**
+ * struct idi_48_reg - device register structure
+ * @port0: Port 0 Inputs
+ * @unused: Unused
+ * @port1: Port 1 Inputs
+ * @irq: Read: IRQ Status Register/IRQ Clear
+ * Write: IRQ Enable/Disable
+ */
+struct idi_48_reg {
+ u8 port0[3];
+ u8 unused;
+ u8 port1[3];
+ u8 irq;
+};
+
+/**
* struct idi_48_gpio - GPIO device private data structure
* @chip: instance of the gpio_chip
* @lock: synchronization lock to prevent I/O race conditions
- * @ack_lock: synchronization lock to prevent IRQ handler race conditions
* @irq_mask: input bits affected by interrupts
- * @base: base port address of the GPIO device
+ * @reg: I/O address offset for the device registers
* @cos_enb: Change-Of-State IRQ enable boundaries mask
*/
struct idi_48_gpio {
struct gpio_chip chip;
- raw_spinlock_t lock;
- spinlock_t ack_lock;
+ spinlock_t lock;
unsigned char irq_mask[6];
- void __iomem *base;
+ struct idi_48_reg __iomem *reg;
unsigned char cos_enb;
};
-static int idi_48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int idi_48_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
return GPIO_LINE_DIRECTION_IN;
}
-static int idi_48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int idi_48_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
return 0;
}
-static int idi_48_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int idi_48_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- unsigned i;
- static const unsigned int register_offset[6] = { 0, 1, 2, 4, 5, 6 };
- void __iomem *port_addr;
- unsigned mask;
-
- for (i = 0; i < 48; i += 8)
- if (offset < i + 8) {
- port_addr = idi48gpio->base + register_offset[i / 8];
- mask = BIT(offset - i);
-
- return !!(ioread8(port_addr) & mask);
- }
+ void __iomem *const ppi = idi48gpio->reg;
- /* The following line should never execute since offset < 48 */
- return 0;
+ return i8255_get(ppi, offset);
}
static int idi_48_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
- void __iomem *port_addr;
- unsigned long port_state;
-
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
+ void __iomem *const ppi = idi48gpio->reg;
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- port_addr = idi48gpio->base + ports[offset / 8];
- port_state = ioread8(port_addr) & gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
+ i8255_get_multiple(ppi, mask, bits, chip->ngpio);
return 0;
}
@@ -112,67 +105,56 @@ static void idi_48_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- const unsigned offset = irqd_to_hwirq(data);
- unsigned i;
- unsigned mask;
- unsigned boundary;
+ const unsigned int offset = irqd_to_hwirq(data);
+ const unsigned long boundary = offset / 8;
+ const unsigned long mask = BIT(offset % 8);
unsigned long flags;
- for (i = 0; i < 48; i += 8)
- if (offset < i + 8) {
- mask = BIT(offset - i);
- boundary = i / 8;
-
- idi48gpio->irq_mask[boundary] &= ~mask;
+ spin_lock_irqsave(&idi48gpio->lock, flags);
- if (!idi48gpio->irq_mask[boundary]) {
- idi48gpio->cos_enb &= ~BIT(boundary);
+ idi48gpio->irq_mask[boundary] &= ~mask;
- raw_spin_lock_irqsave(&idi48gpio->lock, flags);
+ /* Exit early if there are still input lines with IRQ unmasked */
+ if (idi48gpio->irq_mask[boundary])
+ goto exit;
- iowrite8(idi48gpio->cos_enb, idi48gpio->base + 7);
+ idi48gpio->cos_enb &= ~BIT(boundary);
- raw_spin_unlock_irqrestore(&idi48gpio->lock, flags);
- }
+ iowrite8(idi48gpio->cos_enb, &idi48gpio->reg->irq);
- return;
- }
+exit:
+ spin_unlock_irqrestore(&idi48gpio->lock, flags);
}
static void idi_48_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- const unsigned offset = irqd_to_hwirq(data);
- unsigned i;
- unsigned mask;
- unsigned boundary;
- unsigned prev_irq_mask;
+ const unsigned int offset = irqd_to_hwirq(data);
+ const unsigned long boundary = offset / 8;
+ const unsigned long mask = BIT(offset % 8);
+ unsigned int prev_irq_mask;
unsigned long flags;
- for (i = 0; i < 48; i += 8)
- if (offset < i + 8) {
- mask = BIT(offset - i);
- boundary = i / 8;
- prev_irq_mask = idi48gpio->irq_mask[boundary];
+ spin_lock_irqsave(&idi48gpio->lock, flags);
- idi48gpio->irq_mask[boundary] |= mask;
+ prev_irq_mask = idi48gpio->irq_mask[boundary];
- if (!prev_irq_mask) {
- idi48gpio->cos_enb |= BIT(boundary);
+ idi48gpio->irq_mask[boundary] |= mask;
- raw_spin_lock_irqsave(&idi48gpio->lock, flags);
+ /* Exit early if IRQ was already unmasked for this boundary */
+ if (prev_irq_mask)
+ goto exit;
- iowrite8(idi48gpio->cos_enb, idi48gpio->base + 7);
+ idi48gpio->cos_enb |= BIT(boundary);
- raw_spin_unlock_irqrestore(&idi48gpio->lock, flags);
- }
+ iowrite8(idi48gpio->cos_enb, &idi48gpio->reg->irq);
- return;
- }
+exit:
+ spin_unlock_irqrestore(&idi48gpio->lock, flags);
}
-static int idi_48_irq_set_type(struct irq_data *data, unsigned flow_type)
+static int idi_48_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
/* The only valid irq types are none and both-edges */
if (flow_type != IRQ_TYPE_NONE &&
@@ -200,17 +182,13 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
unsigned long gpio;
struct gpio_chip *const chip = &idi48gpio->chip;
- spin_lock(&idi48gpio->ack_lock);
-
- raw_spin_lock(&idi48gpio->lock);
-
- cos_status = ioread8(idi48gpio->base + 7);
+ spin_lock(&idi48gpio->lock);
- raw_spin_unlock(&idi48gpio->lock);
+ cos_status = ioread8(&idi48gpio->reg->irq);
/* IRQ Status (bit 6) is active low (0 = IRQ generated by device) */
if (cos_status & BIT(6)) {
- spin_unlock(&idi48gpio->ack_lock);
+ spin_unlock(&idi48gpio->lock);
return IRQ_NONE;
}
@@ -228,7 +206,7 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
}
}
- spin_unlock(&idi48gpio->ack_lock);
+ spin_unlock(&idi48gpio->lock);
return IRQ_HANDLED;
}
@@ -250,8 +228,8 @@ static int idi_48_irq_init_hw(struct gpio_chip *gc)
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(gc);
/* Disable IRQ by default */
- iowrite8(0, idi48gpio->base + 7);
- ioread8(idi48gpio->base + 7);
+ iowrite8(0, &idi48gpio->reg->irq);
+ ioread8(&idi48gpio->reg->irq);
return 0;
}
@@ -273,8 +251,8 @@ static int idi_48_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- idi48gpio->base = devm_ioport_map(dev, base[id], IDI_48_EXTENT);
- if (!idi48gpio->base)
+ idi48gpio->reg = devm_ioport_map(dev, base[id], IDI_48_EXTENT);
+ if (!idi48gpio->reg)
return -ENOMEM;
idi48gpio->chip.label = name;
@@ -298,8 +276,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
girq->handler = handle_edge_irq;
girq->init_hw = idi_48_irq_init_hw;
- raw_spin_lock_init(&idi48gpio->lock);
- spin_lock_init(&idi48gpio->ack_lock);
+ spin_lock_init(&idi48gpio->lock);
err = devm_gpiochip_add_data(dev, &idi48gpio->chip, idi48gpio);
if (err) {
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 45f7ad8573e1..65a5f581d981 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -6,7 +6,7 @@
* This driver supports the following ACCES devices: 104-IDIO-16,
* 104-IDIO-16E, 104-IDO-16, 104-IDIO-8, 104-IDIO-8E, and 104-IDO-8.
*/
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
#define IDIO_16_EXTENT 8
#define MAX_NUM_IDIO_16 max_num_isa_dev(IDIO_16_EXTENT)
@@ -33,18 +34,41 @@ module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers");
/**
+ * struct idio_16_reg - device registers structure
+ * @out0_7: Read: N/A
+ * Write: FET Drive Outputs 0-7
+ * @in0_7: Read: Isolated Inputs 0-7
+ * Write: Clear Interrupt
+ * @irq_ctl: Read: Enable IRQ
+ * Write: Disable IRQ
+ * @unused: N/A
+ * @out8_15: Read: N/A
+ * Write: FET Drive Outputs 8-15
+ * @in8_15: Read: Isolated Inputs 8-15
+ * Write: N/A
+ */
+struct idio_16_reg {
+ u8 out0_7;
+ u8 in0_7;
+ u8 irq_ctl;
+ u8 unused;
+ u8 out8_15;
+ u8 in8_15;
+};
+
+/**
* struct idio_16_gpio - GPIO device private data structure
* @chip: instance of the gpio_chip
* @lock: synchronization lock to prevent I/O race conditions
* @irq_mask: I/O bits affected by interrupts
- * @base: base port address of the GPIO device
+ * @reg: I/O address offset for the device registers
* @out_state: output bits state
*/
struct idio_16_gpio {
struct gpio_chip chip;
raw_spinlock_t lock;
unsigned long irq_mask;
- void __iomem *base;
+ struct idio_16_reg __iomem *reg;
unsigned int out_state;
};
@@ -79,9 +103,9 @@ static int idio_16_gpio_get(struct gpio_chip *chip, unsigned int offset)
return -EINVAL;
if (offset < 24)
- return !!(ioread8(idio16gpio->base + 1) & mask);
+ return !!(ioread8(&idio16gpio->reg->in0_7) & mask);
- return !!(ioread8(idio16gpio->base + 5) & (mask>>8));
+ return !!(ioread8(&idio16gpio->reg->in8_15) & (mask>>8));
}
static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
@@ -91,9 +115,9 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
*bits = 0;
if (*mask & GENMASK(23, 16))
- *bits |= (unsigned long)ioread8(idio16gpio->base + 1) << 16;
+ *bits |= (unsigned long)ioread8(&idio16gpio->reg->in0_7) << 16;
if (*mask & GENMASK(31, 24))
- *bits |= (unsigned long)ioread8(idio16gpio->base + 5) << 24;
+ *bits |= (unsigned long)ioread8(&idio16gpio->reg->in8_15) << 24;
return 0;
}
@@ -116,9 +140,9 @@ static void idio_16_gpio_set(struct gpio_chip *chip, unsigned int offset,
idio16gpio->out_state &= ~mask;
if (offset > 7)
- iowrite8(idio16gpio->out_state >> 8, idio16gpio->base + 4);
+ iowrite8(idio16gpio->out_state >> 8, &idio16gpio->reg->out8_15);
else
- iowrite8(idio16gpio->out_state, idio16gpio->base);
+ iowrite8(idio16gpio->out_state, &idio16gpio->reg->out0_7);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -135,9 +159,9 @@ static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
idio16gpio->out_state |= *mask & *bits;
if (*mask & 0xFF)
- iowrite8(idio16gpio->out_state, idio16gpio->base);
+ iowrite8(idio16gpio->out_state, &idio16gpio->reg->out0_7);
if ((*mask >> 8) & 0xFF)
- iowrite8(idio16gpio->out_state >> 8, idio16gpio->base + 4);
+ iowrite8(idio16gpio->out_state >> 8, &idio16gpio->reg->out8_15);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -158,7 +182,7 @@ static void idio_16_irq_mask(struct irq_data *data)
if (!idio16gpio->irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
- iowrite8(0, idio16gpio->base + 2);
+ iowrite8(0, &idio16gpio->reg->irq_ctl);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -177,7 +201,7 @@ static void idio_16_irq_unmask(struct irq_data *data)
if (!prev_irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
- ioread8(idio16gpio->base + 2);
+ ioread8(&idio16gpio->reg->irq_ctl);
raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
@@ -212,7 +236,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
raw_spin_lock(&idio16gpio->lock);
- iowrite8(0, idio16gpio->base + 1);
+ iowrite8(0, &idio16gpio->reg->in0_7);
raw_spin_unlock(&idio16gpio->lock);
@@ -232,8 +256,8 @@ static int idio_16_irq_init_hw(struct gpio_chip *gc)
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(gc);
/* Disable IRQ by default */
- iowrite8(0, idio16gpio->base + 2);
- iowrite8(0, idio16gpio->base + 1);
+ iowrite8(0, &idio16gpio->reg->irq_ctl);
+ iowrite8(0, &idio16gpio->reg->in0_7);
return 0;
}
@@ -255,8 +279,8 @@ static int idio_16_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- idio16gpio->base = devm_ioport_map(dev, base[id], IDIO_16_EXTENT);
- if (!idio16gpio->base)
+ idio16gpio->reg = devm_ioport_map(dev, base[id], IDIO_16_EXTENT);
+ if (!idio16gpio->reg)
return -ENOMEM;
idio16gpio->chip.label = name;
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 173e06758e6c..0464f1ecd20d 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -5,15 +5,17 @@
* Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
*/
+#include <linux/bits.h>
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/gpio/driver.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
-#define MMIO_74XX_DIR_IN (0 << 8)
-#define MMIO_74XX_DIR_OUT (1 << 8)
-#define MMIO_74XX_BIT_CNT(x) ((x) & 0xff)
+#define MMIO_74XX_DIR_IN BIT(8)
+#define MMIO_74XX_DIR_OUT BIT(9)
+#define MMIO_74XX_BIT_CNT(x) ((x) & GENMASK(7, 0))
struct mmio_74xx_gpio_priv {
struct gpio_chip gc;
@@ -87,7 +89,10 @@ static int mmio_74xx_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct mmio_74xx_gpio_priv *priv = gpiochip_get_data(gc);
- return (priv->flags & MMIO_74XX_DIR_OUT) ? -ENOTSUPP : 0;
+ if (priv->flags & MMIO_74XX_DIR_IN)
+ return 0;
+
+ return -ENOTSUPP;
}
static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -112,7 +117,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ priv->flags = (uintptr_t)device_get_match_data(&pdev->dev);
dat = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dat))
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index cc349d4e4973..a6439e3daff0 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -6,8 +6,9 @@
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
+#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -485,22 +486,17 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
return 0;
}
-static int adnp_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adnp_i2c_probe(struct i2c_client *client)
{
- struct device_node *np = client->dev.of_node;
+ struct device *dev = &client->dev;
struct adnp *adnp;
u32 num_gpios;
int err;
- err = of_property_read_u32(np, "nr-gpios", &num_gpios);
+ err = device_property_read_u32(dev, "nr-gpios", &num_gpios);
if (err < 0)
return err;
- client->irq = irq_of_parse_and_map(np, 0);
- if (!client->irq)
- return -EPROBE_DEFER;
-
adnp = devm_kzalloc(&client->dev, sizeof(*adnp), GFP_KERNEL);
if (!adnp)
return -ENOMEM;
@@ -508,8 +504,7 @@ static int adnp_i2c_probe(struct i2c_client *client,
mutex_init(&adnp->i2c_lock);
adnp->client = client;
- err = adnp_gpio_setup(adnp, num_gpios,
- of_property_read_bool(np, "interrupt-controller"));
+ err = adnp_gpio_setup(adnp, num_gpios, device_property_read_bool(dev, "interrupt-controller"));
if (err)
return err;
@@ -535,7 +530,7 @@ static struct i2c_driver adnp_i2c_driver = {
.name = "gpio-adnp",
.of_match_table = adnp_of_match,
},
- .probe = adnp_i2c_probe,
+ .probe_new = adnp_i2c_probe,
.id_table = adnp_i2c_id,
};
module_i2c_driver(adnp_i2c_driver);
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index e388e75103f4..d49f12560cde 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -6,20 +6,18 @@
* Copyright 2009-2010 Analog Devices Inc.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/platform_data/adp5588.h>
-#define DRV_NAME "adp5588-gpio"
-
/*
* Early pre 4.0 Silicon required to delay readout by at least 25ms,
* since the Event Counter Register updated 25ms after the interrupt
@@ -422,23 +420,21 @@ static int adp5588_gpio_remove(struct i2c_client *client)
}
static const struct i2c_device_id adp5588_gpio_id[] = {
- {DRV_NAME, 0},
+ { "adp5588-gpio" },
{}
};
MODULE_DEVICE_TABLE(i2c, adp5588_gpio_id);
-#ifdef CONFIG_OF
static const struct of_device_id adp5588_gpio_of_id[] = {
- { .compatible = "adi," DRV_NAME, },
- {},
+ { .compatible = "adi,adp5588-gpio" },
+ {}
};
MODULE_DEVICE_TABLE(of, adp5588_gpio_of_id);
-#endif
static struct i2c_driver adp5588_gpio_driver = {
.driver = {
- .name = DRV_NAME,
- .of_match_table = of_match_ptr(adp5588_gpio_of_id),
+ .name = "adp5588-gpio",
+ .of_match_table = adp5588_gpio_of_id,
},
.probe_new = adp5588_gpio_probe,
.remove = adp5588_gpio_remove,
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 85429dd6b3b6..c55b35da61a0 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -375,12 +375,7 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
{
struct brcmstb_gpio_priv *priv = platform_get_drvdata(pdev);
struct brcmstb_gpio_bank *bank;
- int offset, ret = 0, virq;
-
- if (!priv) {
- dev_err(&pdev->dev, "called %s without drvdata!\n", __func__);
- return -EFAULT;
- }
+ int offset, virq;
if (priv->parent_irq > 0)
irq_set_chained_handler_and_data(priv->parent_irq, NULL, NULL);
@@ -401,7 +396,7 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
list_for_each_entry(bank, &priv->bank_list, node)
gpiochip_remove(&bank->gc);
- return ret;
+ return 0;
}
static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index f960587f86a3..59c4c48d8296 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -22,6 +22,7 @@
#include <linux/platform_data/gpio-davinci.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
#include <asm-generic/gpio.h>
@@ -62,6 +63,8 @@ struct davinci_gpio_controller {
void __iomem *regs[MAX_REGS_BANKS];
int gpio_unbanked;
int irqs[MAX_INT_PER_BANK];
+ struct davinci_gpio_regs context[MAX_REGS_BANKS];
+ u32 binten_context;
};
static inline u32 __gpio_mask(unsigned gpio)
@@ -622,6 +625,85 @@ done:
return 0;
}
+static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
+ u32 nbank)
+{
+ struct davinci_gpio_regs __iomem *g;
+ struct davinci_gpio_regs *context;
+ u32 bank;
+ void __iomem *base;
+
+ base = chips->regs[0] - offset_array[0];
+ chips->binten_context = readl_relaxed(base + BINTEN);
+
+ for (bank = 0; bank < nbank; bank++) {
+ g = chips->regs[bank];
+ context = &chips->context[bank];
+ context->dir = readl_relaxed(&g->dir);
+ context->set_data = readl_relaxed(&g->set_data);
+ context->set_rising = readl_relaxed(&g->set_rising);
+ context->set_falling = readl_relaxed(&g->set_falling);
+ }
+
+ /* Clear Bank interrupt enable bit */
+ writel_relaxed(0, base + BINTEN);
+
+ /* Clear all interrupt status registers */
+ writel_relaxed(GENMASK(31, 0), &g->intstat);
+}
+
+static void davinci_gpio_restore_context(struct davinci_gpio_controller *chips,
+ u32 nbank)
+{
+ struct davinci_gpio_regs __iomem *g;
+ struct davinci_gpio_regs *context;
+ u32 bank;
+ void __iomem *base;
+
+ base = chips->regs[0] - offset_array[0];
+
+ if (readl_relaxed(base + BINTEN) != chips->binten_context)
+ writel_relaxed(chips->binten_context, base + BINTEN);
+
+ for (bank = 0; bank < nbank; bank++) {
+ g = chips->regs[bank];
+ context = &chips->context[bank];
+ if (readl_relaxed(&g->dir) != context->dir)
+ writel_relaxed(context->dir, &g->dir);
+ if (readl_relaxed(&g->set_data) != context->set_data)
+ writel_relaxed(context->set_data, &g->set_data);
+ if (readl_relaxed(&g->set_rising) != context->set_rising)
+ writel_relaxed(context->set_rising, &g->set_rising);
+ if (readl_relaxed(&g->set_falling) != context->set_falling)
+ writel_relaxed(context->set_falling, &g->set_falling);
+ }
+}
+
+static int davinci_gpio_suspend(struct device *dev)
+{
+ struct davinci_gpio_controller *chips = dev_get_drvdata(dev);
+ struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev);
+ u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32);
+
+ davinci_gpio_save_context(chips, nbank);
+
+ return 0;
+}
+
+static int davinci_gpio_resume(struct device *dev)
+{
+ struct davinci_gpio_controller *chips = dev_get_drvdata(dev);
+ struct davinci_gpio_platform_data *pdata = dev_get_platdata(dev);
+ u32 nbank = DIV_ROUND_UP(pdata->ngpio, 32);
+
+ davinci_gpio_restore_context(chips, nbank);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEV_PM_OPS(davinci_gpio_dev_pm_ops, davinci_gpio_suspend,
+ davinci_gpio_resume);
+
static const struct of_device_id davinci_gpio_ids[] = {
{ .compatible = "ti,keystone-gpio", keystone_gpio_get_irq_chip},
{ .compatible = "ti,am654-gpio", keystone_gpio_get_irq_chip},
@@ -634,6 +716,7 @@ static struct platform_driver davinci_gpio_driver = {
.probe = davinci_gpio_probe,
.driver = {
.name = "davinci_gpio",
+ .pm = pm_sleep_ptr(&davinci_gpio_dev_pm_ops),
.of_match_table = of_match_ptr(davinci_gpio_ids),
},
};
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index 097a06463d01..2689671b6b01 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -6,8 +6,6 @@
* This driver supports the following Diamond Systems devices: GPIO-MM and
* GPIO-MM-12.
*/
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -17,7 +15,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/spinlock.h>
+
+#include "gpio-i8255.h"
+
+MODULE_IMPORT_NS(I8255);
#define GPIOMM_EXTENT 8
#define MAX_NUM_GPIOMM max_num_isa_dev(GPIOMM_EXTENT)
@@ -27,32 +28,26 @@ static unsigned int num_gpiomm;
module_param_hw_array(base, uint, ioport, &num_gpiomm, 0);
MODULE_PARM_DESC(base, "Diamond Systems GPIO-MM base addresses");
+#define GPIOMM_NUM_PPI 2
+
/**
* struct gpiomm_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @io_state: bit I/O state (whether bit is set to input or output)
- * @out_state: output bits state
- * @control: Control registers state
- * @lock: synchronization lock to prevent I/O race conditions
- * @base: base port address of the GPIO device
+ * @chip: instance of the gpio_chip
+ * @ppi_state: Programmable Peripheral Interface group states
+ * @ppi: Programmable Peripheral Interface groups
*/
struct gpiomm_gpio {
struct gpio_chip chip;
- unsigned char io_state[6];
- unsigned char out_state[6];
- unsigned char control[2];
- spinlock_t lock;
- void __iomem *base;
+ struct i8255_state ppi_state[GPIOMM_NUM_PPI];
+ struct i8255 __iomem *ppi;
};
static int gpiomm_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- if (gpiommgpio->io_state[port] & mask)
+ if (i8255_get_direction(gpiommgpio->ppi_state, offset))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -62,35 +57,8 @@ static int gpiomm_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- unsigned long flags;
- unsigned int control;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- gpiommgpio->io_state[io_port] |= 0xF0;
- gpiommgpio->control[control_port] |= BIT(3);
- } else {
- gpiommgpio->io_state[io_port] |= 0x0F;
- gpiommgpio->control[control_port] |= BIT(0);
- }
- } else {
- gpiommgpio->io_state[io_port] |= 0xFF;
- if (io_port == 0 || io_port == 3)
- gpiommgpio->control[control_port] |= BIT(4);
- else
- gpiommgpio->control[control_port] |= BIT(1);
- }
- control = BIT(7) | gpiommgpio->control[control_port];
- iowrite8(control, gpiommgpio->base + 3 + control_port*4);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+ i8255_direction_input(gpiommgpio->ppi, gpiommgpio->ppi_state, offset);
return 0;
}
@@ -99,44 +67,9 @@ static int gpiomm_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int io_port = offset / 8;
- const unsigned int control_port = io_port / 3;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int out_port = (io_port > 2) ? io_port + 1 : io_port;
- unsigned long flags;
- unsigned int control;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- /* Check if configuring Port C */
- if (io_port == 2 || io_port == 5) {
- /* Port C can be configured by nibble */
- if (offset % 8 > 3) {
- gpiommgpio->io_state[io_port] &= 0x0F;
- gpiommgpio->control[control_port] &= ~BIT(3);
- } else {
- gpiommgpio->io_state[io_port] &= 0xF0;
- gpiommgpio->control[control_port] &= ~BIT(0);
- }
- } else {
- gpiommgpio->io_state[io_port] &= 0x00;
- if (io_port == 0 || io_port == 3)
- gpiommgpio->control[control_port] &= ~BIT(4);
- else
- gpiommgpio->control[control_port] &= ~BIT(1);
- }
-
- if (value)
- gpiommgpio->out_state[io_port] |= mask;
- else
- gpiommgpio->out_state[io_port] &= ~mask;
-
- control = BIT(7) | gpiommgpio->control[control_port];
- iowrite8(control, gpiommgpio->base + 3 + control_port*4);
- iowrite8(gpiommgpio->out_state[io_port], gpiommgpio->base + out_port);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+ i8255_direction_output(gpiommgpio->ppi, gpiommgpio->ppi_state, offset,
+ value);
return 0;
}
@@ -144,47 +77,16 @@ static int gpiomm_gpio_direction_output(struct gpio_chip *chip,
static int gpiomm_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int in_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
- unsigned int port_state;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- /* ensure that GPIO is set for input */
- if (!(gpiommgpio->io_state[port] & mask)) {
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
- return -EINVAL;
- }
-
- port_state = ioread8(gpiommgpio->base + in_port);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
- return !!(port_state & mask);
+ return i8255_get(gpiommgpio->ppi, offset);
}
-static const size_t ports[] = { 0, 1, 2, 4, 5, 6 };
-
static int gpiomm_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- void __iomem *port_addr;
- unsigned long port_state;
-
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- port_addr = gpiommgpio->base + ports[offset / 8];
- port_state = ioread8(port_addr) & gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
+ i8255_get_multiple(gpiommgpio->ppi, mask, bits, chip->ngpio);
return 0;
}
@@ -193,49 +95,17 @@ static void gpiomm_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- const unsigned int port = offset / 8;
- const unsigned int mask = BIT(offset % 8);
- const unsigned int out_port = (port > 2) ? port + 1 : port;
- unsigned long flags;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
-
- if (value)
- gpiommgpio->out_state[port] |= mask;
- else
- gpiommgpio->out_state[port] &= ~mask;
-
- iowrite8(gpiommgpio->out_state[port], gpiommgpio->base + out_port);
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
+ i8255_set(gpiommgpio->ppi, gpiommgpio->ppi_state, offset, value);
}
static void gpiomm_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask, unsigned long *bits)
{
struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- size_t index;
- void __iomem *port_addr;
- unsigned long bitmask;
- unsigned long flags;
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- index = offset / 8;
- port_addr = gpiommgpio->base + ports[index];
-
- bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
-
- spin_lock_irqsave(&gpiommgpio->lock, flags);
- /* update output state data and set device gpio register */
- gpiommgpio->out_state[index] &= ~gpio_mask;
- gpiommgpio->out_state[index] |= bitmask;
- iowrite8(gpiommgpio->out_state[index], port_addr);
-
- spin_unlock_irqrestore(&gpiommgpio->lock, flags);
- }
+ i8255_set_multiple(gpiommgpio->ppi, gpiommgpio->ppi_state, mask, bits,
+ chip->ngpio);
}
#define GPIOMM_NGPIO 48
@@ -250,6 +120,21 @@ static const char *gpiomm_names[GPIOMM_NGPIO] = {
"Port 2C2", "Port 2C3", "Port 2C4", "Port 2C5", "Port 2C6", "Port 2C7",
};
+static void gpiomm_init_dio(struct i8255 __iomem *const ppi,
+ struct i8255_state *const ppi_state)
+{
+ const unsigned long ngpio = 24;
+ const unsigned long mask = GENMASK(ngpio - 1, 0);
+ const unsigned long bits = 0;
+ unsigned long i;
+
+ /* Initialize all GPIO to output 0 */
+ for (i = 0; i < GPIOMM_NUM_PPI; i++) {
+ i8255_mode0_output(&ppi[i]);
+ i8255_set_multiple(&ppi[i], &ppi_state[i], &mask, &bits, ngpio);
+ }
+}
+
static int gpiomm_probe(struct device *dev, unsigned int id)
{
struct gpiomm_gpio *gpiommgpio;
@@ -266,8 +151,8 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- gpiommgpio->base = devm_ioport_map(dev, base[id], GPIOMM_EXTENT);
- if (!gpiommgpio->base)
+ gpiommgpio->ppi = devm_ioport_map(dev, base[id], GPIOMM_EXTENT);
+ if (!gpiommgpio->ppi)
return -ENOMEM;
gpiommgpio->chip.label = name;
@@ -284,7 +169,8 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
gpiommgpio->chip.set = gpiomm_gpio_set;
gpiommgpio->chip.set_multiple = gpiomm_gpio_set_multiple;
- spin_lock_init(&gpiommgpio->lock);
+ i8255_state_init(gpiommgpio->ppi_state, GPIOMM_NUM_PPI);
+ gpiomm_init_dio(gpiommgpio->ppi, gpiommgpio->ppi_state);
err = devm_gpiochip_add_data(dev, &gpiommgpio->chip, gpiommgpio);
if (err) {
@@ -292,16 +178,6 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
return err;
}
- /* initialize all GPIO as output */
- iowrite8(0x80, gpiommgpio->base + 3);
- iowrite8(0x00, gpiommgpio->base);
- iowrite8(0x00, gpiommgpio->base + 1);
- iowrite8(0x00, gpiommgpio->base + 2);
- iowrite8(0x80, gpiommgpio->base + 7);
- iowrite8(0x00, gpiommgpio->base + 4);
- iowrite8(0x00, gpiommgpio->base + 5);
- iowrite8(0x00, gpiommgpio->base + 6);
-
return 0;
}
diff --git a/drivers/gpio/gpio-i8255.c b/drivers/gpio/gpio-i8255.c
new file mode 100644
index 000000000000..9b97db418df1
--- /dev/null
+++ b/drivers/gpio/gpio-i8255.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel 8255 Programmable Peripheral Interface
+ * Copyright (C) 2022 William Breathitt Gray
+ */
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "gpio-i8255.h"
+
+#define I8255_CONTROL_PORTC_LOWER_DIRECTION BIT(0)
+#define I8255_CONTROL_PORTB_DIRECTION BIT(1)
+#define I8255_CONTROL_PORTC_UPPER_DIRECTION BIT(3)
+#define I8255_CONTROL_PORTA_DIRECTION BIT(4)
+#define I8255_CONTROL_MODE_SET BIT(7)
+#define I8255_PORTA 0
+#define I8255_PORTB 1
+#define I8255_PORTC 2
+
+static int i8255_get_port(struct i8255 __iomem *const ppi,
+ const unsigned long io_port, const unsigned long mask)
+{
+ const unsigned long bank = io_port / 3;
+ const unsigned long ppi_port = io_port % 3;
+
+ return ioread8(&ppi[bank].port[ppi_port]) & mask;
+}
+
+static u8 i8255_direction_mask(const unsigned long offset)
+{
+ const unsigned long port_offset = offset % 8;
+ const unsigned long io_port = offset / 8;
+ const unsigned long ppi_port = io_port % 3;
+
+ switch (ppi_port) {
+ case I8255_PORTA:
+ return I8255_CONTROL_PORTA_DIRECTION;
+ case I8255_PORTB:
+ return I8255_CONTROL_PORTB_DIRECTION;
+ case I8255_PORTC:
+ /* Port C can be configured by nibble */
+ if (port_offset >= 4)
+ return I8255_CONTROL_PORTC_UPPER_DIRECTION;
+ return I8255_CONTROL_PORTC_LOWER_DIRECTION;
+ default:
+ /* Should never reach this path */
+ return 0;
+ }
+}
+
+static void i8255_set_port(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long io_port,
+ const unsigned long mask, const unsigned long bits)
+{
+ const unsigned long bank = io_port / 3;
+ const unsigned long ppi_port = io_port % 3;
+ unsigned long flags;
+ unsigned long out_state;
+
+ spin_lock_irqsave(&state[bank].lock, flags);
+
+ out_state = ioread8(&ppi[bank].port[ppi_port]);
+ out_state = (out_state & ~mask) | (bits & mask);
+ iowrite8(out_state, &ppi[bank].port[ppi_port]);
+
+ spin_unlock_irqrestore(&state[bank].lock, flags);
+}
+
+/**
+ * i8255_direction_input - configure signal offset as input
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @offset: signal offset to configure as input
+ *
+ * Configures a signal @offset as input for the respective Intel 8255
+ * Programmable Peripheral Interface (@ppi) banks. The @state control_state
+ * values are updated to reflect the new configuration.
+ */
+void i8255_direction_input(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long offset)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long bank = io_port / 3;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state[bank].lock, flags);
+
+ state[bank].control_state |= I8255_CONTROL_MODE_SET;
+ state[bank].control_state |= i8255_direction_mask(offset);
+
+ iowrite8(state[bank].control_state, &ppi[bank].control);
+
+ spin_unlock_irqrestore(&state[bank].lock, flags);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_direction_input, I8255);
+
+/**
+ * i8255_direction_output - configure signal offset as output
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @offset: signal offset to configure as output
+ * @value: signal value to output
+ *
+ * Configures a signal @offset as output for the respective Intel 8255
+ * Programmable Peripheral Interface (@ppi) banks and sets the respective signal
+ * output to the desired @value. The @state control_state values are updated to
+ * reflect the new configuration.
+ */
+void i8255_direction_output(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long offset,
+ const unsigned long value)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long bank = io_port / 3;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state[bank].lock, flags);
+
+ state[bank].control_state |= I8255_CONTROL_MODE_SET;
+ state[bank].control_state &= ~i8255_direction_mask(offset);
+
+ iowrite8(state[bank].control_state, &ppi[bank].control);
+
+ spin_unlock_irqrestore(&state[bank].lock, flags);
+
+ i8255_set(ppi, state, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_direction_output, I8255);
+
+/**
+ * i8255_get - get signal value at signal offset
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @offset: offset of signal to get
+ *
+ * Returns the signal value (0=low, 1=high) for the signal at @offset for the
+ * respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
+ */
+int i8255_get(struct i8255 __iomem *const ppi, const unsigned long offset)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long offset_mask = BIT(offset % 8);
+
+ return !!i8255_get_port(ppi, io_port, offset_mask);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_get, I8255);
+
+/**
+ * i8255_get_direction - get the I/O direction for a signal offset
+ * @state: devices states of the respective PPI banks
+ * @offset: offset of signal to get direction
+ *
+ * Returns the signal direction (0=output, 1=input) for the signal at @offset.
+ */
+int i8255_get_direction(const struct i8255_state *const state,
+ const unsigned long offset)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long bank = io_port / 3;
+
+ return !!(state[bank].control_state & i8255_direction_mask(offset));
+}
+EXPORT_SYMBOL_NS_GPL(i8255_get_direction, I8255);
+
+/**
+ * i8255_get_multiple - get multiple signal values at multiple signal offsets
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @mask: mask of signals to get
+ * @bits: bitmap to store signal values
+ * @ngpio: number of GPIO signals of the respective PPI banks
+ *
+ * Stores in @bits the values (0=low, 1=high) for the signals defined by @mask
+ * for the respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
+ */
+void i8255_get_multiple(struct i8255 __iomem *const ppi,
+ const unsigned long *const mask,
+ unsigned long *const bits, const unsigned long ngpio)
+{
+ unsigned long offset;
+ unsigned long port_mask;
+ unsigned long io_port;
+ unsigned long port_state;
+
+ bitmap_zero(bits, ngpio);
+
+ for_each_set_clump8(offset, port_mask, mask, ngpio) {
+ io_port = offset / 8;
+ port_state = i8255_get_port(ppi, io_port, port_mask);
+
+ bitmap_set_value8(bits, port_state, offset);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(i8255_get_multiple, I8255);
+
+/**
+ * i8255_mode0_output - configure all PPI ports to MODE 0 output mode
+ * @ppi: Intel 8255 Programmable Peripheral Interface bank
+ *
+ * Configures all Intel 8255 Programmable Peripheral Interface (@ppi) ports to
+ * MODE 0 (Basic Input/Output) output mode.
+ */
+void i8255_mode0_output(struct i8255 __iomem *const ppi)
+{
+ iowrite8(I8255_CONTROL_MODE_SET, &ppi->control);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_mode0_output, I8255);
+
+/**
+ * i8255_set - set signal value at signal offset
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @offset: offset of signal to set
+ * @value: value of signal to set
+ *
+ * Assigns output @value for the signal at @offset for the respective Intel 8255
+ * Programmable Peripheral Interface (@ppi) banks.
+ */
+void i8255_set(struct i8255 __iomem *const ppi, struct i8255_state *const state,
+ const unsigned long offset, const unsigned long value)
+{
+ const unsigned long io_port = offset / 8;
+ const unsigned long port_offset = offset % 8;
+ const unsigned long mask = BIT(port_offset);
+ const unsigned long bits = value << port_offset;
+
+ i8255_set_port(ppi, state, io_port, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_set, I8255);
+
+/**
+ * i8255_set_multiple - set signal values at multiple signal offsets
+ * @ppi: Intel 8255 Programmable Peripheral Interface banks
+ * @state: devices states of the respective PPI banks
+ * @mask: mask of signals to set
+ * @bits: bitmap of signal output values
+ * @ngpio: number of GPIO signals of the respective PPI banks
+ *
+ * Assigns output values defined by @bits for the signals defined by @mask for
+ * the respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
+ */
+void i8255_set_multiple(struct i8255 __iomem *const ppi,
+ struct i8255_state *const state,
+ const unsigned long *const mask,
+ const unsigned long *const bits,
+ const unsigned long ngpio)
+{
+ unsigned long offset;
+ unsigned long port_mask;
+ unsigned long io_port;
+ unsigned long value;
+
+ for_each_set_clump8(offset, port_mask, mask, ngpio) {
+ io_port = offset / 8;
+ value = bitmap_get_value8(bits, offset);
+ i8255_set_port(ppi, state, io_port, port_mask, value);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(i8255_set_multiple, I8255);
+
+/**
+ * i8255_state_init - initialize i8255_state structure
+ * @state: devices states of the respective PPI banks
+ * @nbanks: number of Intel 8255 Programmable Peripheral Interface banks
+ *
+ * Initializes the @state of each Intel 8255 Programmable Peripheral Interface
+ * bank for use in i8255 library functions.
+ */
+void i8255_state_init(struct i8255_state *const state,
+ const unsigned long nbanks)
+{
+ unsigned long bank;
+
+ for (bank = 0; bank < nbanks; bank++)
+ spin_lock_init(&state[bank].lock);
+}
+EXPORT_SYMBOL_NS_GPL(i8255_state_init, I8255);
+
+MODULE_AUTHOR("William Breathitt Gray");
+MODULE_DESCRIPTION("Intel 8255 Programmable Peripheral Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-i8255.h b/drivers/gpio/gpio-i8255.h
new file mode 100644
index 000000000000..d9084aae9446
--- /dev/null
+++ b/drivers/gpio/gpio-i8255.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2022 William Breathitt Gray */
+#ifndef _I8255_H_
+#define _I8255_H_
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * struct i8255 - Intel 8255 register structure
+ * @port: Port A, B, and C
+ * @control: Control register
+ */
+struct i8255 {
+ u8 port[3];
+ u8 control;
+};
+
+/**
+ * struct i8255_state - Intel 8255 state structure
+ * @lock: synchronization lock for accessing device state
+ * @control_state: Control register state
+ */
+struct i8255_state {
+ spinlock_t lock;
+ u8 control_state;
+};
+
+void i8255_direction_input(struct i8255 __iomem *ppi, struct i8255_state *state,
+ unsigned long offset);
+void i8255_direction_output(struct i8255 __iomem *ppi,
+ struct i8255_state *state, unsigned long offset,
+ unsigned long value);
+int i8255_get(struct i8255 __iomem *ppi, unsigned long offset);
+int i8255_get_direction(const struct i8255_state *state, unsigned long offset);
+void i8255_get_multiple(struct i8255 __iomem *ppi, const unsigned long *mask,
+ unsigned long *bits, unsigned long ngpio);
+void i8255_mode0_output(struct i8255 __iomem *const ppi);
+void i8255_set(struct i8255 __iomem *ppi, struct i8255_state *state,
+ unsigned long offset, unsigned long value);
+void i8255_set_multiple(struct i8255 __iomem *ppi, struct i8255_state *state,
+ const unsigned long *mask, const unsigned long *bits,
+ unsigned long ngpio);
+void i8255_state_init(struct i8255_state *const state, unsigned long nbanks);
+
+#endif /* _I8255_H_ */
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 8a30fb185aab..79edd5db49d2 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -42,7 +42,7 @@ struct lp3943_gpio {
u16 input_mask; /* 1 = GPIO is input direction, 0 = output */
};
-static int lp3943_gpio_request(struct gpio_chip *chip, unsigned offset)
+static int lp3943_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
struct lp3943 *lp3943 = lp3943_gpio->lp3943;
@@ -54,7 +54,7 @@ static int lp3943_gpio_request(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void lp3943_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void lp3943_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
struct lp3943 *lp3943 = lp3943_gpio->lp3943;
@@ -72,7 +72,7 @@ static int lp3943_gpio_set_mode(struct lp3943_gpio *lp3943_gpio, u8 offset,
val << mux[offset].shift);
}
-static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
@@ -82,7 +82,7 @@ static int lp3943_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
}
static int lp3943_get_gpio_in_status(struct lp3943_gpio *lp3943_gpio,
- struct gpio_chip *chip, unsigned offset)
+ struct gpio_chip *chip, unsigned int offset)
{
u8 addr, read;
int err;
@@ -107,7 +107,7 @@ static int lp3943_get_gpio_in_status(struct lp3943_gpio *lp3943_gpio,
}
static int lp3943_get_gpio_out_status(struct lp3943_gpio *lp3943_gpio,
- struct gpio_chip *chip, unsigned offset)
+ struct gpio_chip *chip, unsigned int offset)
{
struct lp3943 *lp3943 = lp3943_gpio->lp3943;
const struct lp3943_reg_cfg *mux = lp3943->mux_cfg;
@@ -128,7 +128,7 @@ static int lp3943_get_gpio_out_status(struct lp3943_gpio *lp3943_gpio,
return -EINVAL;
}
-static int lp3943_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int lp3943_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
@@ -147,7 +147,7 @@ static int lp3943_gpio_get(struct gpio_chip *chip, unsigned offset)
return lp3943_get_gpio_out_status(lp3943_gpio, chip, offset);
}
-static void lp3943_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
u8 data;
@@ -160,7 +160,7 @@ static void lp3943_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
lp3943_gpio_set_mode(lp3943_gpio, offset, data);
}
-static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index cb2b2f735c15..ab2a652964ec 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -121,12 +121,14 @@ static int pca9570_probe(struct i2c_client *client)
static const struct i2c_device_id pca9570_id_table[] = {
{ "pca9570", 4 },
+ { "pca9571", 8 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, pca9570_id_table);
static const struct of_device_id pca9570_of_match_table[] = {
{ .compatible = "nxp,pca9570", .data = (void *)4 },
+ { .compatible = "nxp,pca9571", .data = (void *)8 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pca9570_of_match_table);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 3a0bd8795741..ee37ecb615cb 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -37,6 +37,11 @@ struct pch_regs {
u32 reset;
};
+#define PCI_DEVICE_ID_INTEL_EG20T_PCH 0x8803
+#define PCI_DEVICE_ID_ROHM_ML7223m_IOH 0x8014
+#define PCI_DEVICE_ID_ROHM_ML7223n_IOH 0x8043
+#define PCI_DEVICE_ID_ROHM_EG20T_PCH 0x8803
+
enum pch_type_t {
INTEL_EG20T_PCH,
OKISEMI_ML7223m_IOH, /* LAPIS Semiconductor ML7223 IOH PCIe Bus-m */
@@ -357,16 +362,12 @@ static int pch_gpio_probe(struct pci_dev *pdev,
chip->dev = dev;
ret = pcim_enable_device(pdev);
- if (ret) {
- dev_err(dev, "pci_enable_device FAILED");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable PCI device\n");
ret = pcim_iomap_regions(pdev, BIT(1), KBUILD_MODNAME);
- if (ret) {
- dev_err(dev, "pci_request_regions FAILED-%d", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request and map PCI regions\n");
chip->base = pcim_iomap_table(pdev)[1];
chip->ioh = id->driver_data;
@@ -376,10 +377,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
pch_gpio_setup(chip);
ret = devm_gpiochip_add_data(dev, &chip->gpio, chip);
- if (ret) {
- dev_err(dev, "PCH gpio: Failed to register GPIO\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register GPIO\n");
irq_base = devm_irq_alloc_descs(dev, -1, 0,
gpio_pins[chip->ioh], NUMA_NO_NODE);
@@ -396,10 +395,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
ret = devm_request_irq(dev, pdev->irq, pch_gpio_handler,
IRQF_SHARED, KBUILD_MODNAME, chip);
- if (ret) {
- dev_err(dev, "request_irq failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request IRQ\n");
return pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
}
@@ -433,15 +430,11 @@ static int __maybe_unused pch_gpio_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pch_gpio_pm_ops, pch_gpio_suspend, pch_gpio_resume);
static const struct pci_device_id pch_gpio_pcidev_id[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803),
- .driver_data = INTEL_EG20T_PCH },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014),
- .driver_data = OKISEMI_ML7223m_IOH },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043),
- .driver_data = OKISEMI_ML7223n_IOH },
- { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803),
- .driver_data = INTEL_EG20T_PCH },
- { 0, }
+ { PCI_DEVICE_DATA(INTEL, EG20T_PCH, INTEL_EG20T_PCH) },
+ { PCI_DEVICE_DATA(ROHM, ML7223m_IOH, OKISEMI_ML7223m_IOH) },
+ { PCI_DEVICE_DATA(ROHM, ML7223n_IOH, OKISEMI_ML7223n_IOH) },
+ { PCI_DEVICE_DATA(ROHM, EG20T_PCH, INTEL_EG20T_PCH) },
+ { }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index e342a6dc4c6c..f91e876fd969 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -27,6 +27,7 @@
#define GPIO_TYPE_V1 (0) /* GPIO Version ID reserved */
#define GPIO_TYPE_V2 (0x01000C2B) /* GPIO Version ID 0x01000C2B */
+#define GPIO_TYPE_V2_1 (0x0101157C) /* GPIO Version ID 0x0101157C */
static const struct rockchip_gpio_regs gpio_regs_v1 = {
.port_dr = 0x00,
@@ -664,7 +665,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank)
id = readl(bank->reg_base + gpio_regs_v2.version_id);
/* If not gpio v2, that is default to v1. */
- if (id == GPIO_TYPE_V2) {
+ if (id == GPIO_TYPE_V2 || id == GPIO_TYPE_V2_1) {
bank->gpio_regs = &gpio_regs_v2;
bank->gpio_type = GPIO_TYPE_V2;
bank->db_clk = of_clk_get(bank->of_node, 1);
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index de249726230e..5046e51af8df 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -593,27 +593,13 @@ out:
/* Cannot use as gpio_twl4030_probe() calls us */
static int gpio_twl4030_remove(struct platform_device *pdev)
{
- struct twl4030_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct gpio_twl4030_priv *priv = platform_get_drvdata(pdev);
- int status;
-
- if (pdata && pdata->teardown) {
- status = pdata->teardown(&pdev->dev, priv->gpio_chip.base,
- TWL4030_GPIO_MAX);
- if (status) {
- dev_dbg(&pdev->dev, "teardown --> %d\n", status);
- return status;
- }
- }
gpiochip_remove(&priv->gpio_chip);
- if (is_module())
- return 0;
-
/* REVISIT no support yet for deregistering all the IRQs */
- WARN_ON(1);
- return -EIO;
+ WARN_ON(!is_module());
+ return 0;
}
static const struct of_device_id twl_gpio_match[] = {
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index d2a8644864c3..386e69300332 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -64,34 +64,14 @@ static int ucb1400_gpio_probe(struct platform_device *dev)
ucb->gc.can_sleep = true;
err = devm_gpiochip_add_data(&dev->dev, &ucb->gc, ucb);
- if (err)
- goto err;
-
- if (ucb->gpio_setup)
- err = ucb->gpio_setup(&dev->dev, ucb->gc.ngpio);
err:
return err;
}
-static int ucb1400_gpio_remove(struct platform_device *dev)
-{
- int err = 0;
- struct ucb1400_gpio *ucb = platform_get_drvdata(dev);
-
- if (ucb && ucb->gpio_teardown) {
- err = ucb->gpio_teardown(&dev->dev, ucb->gc.ngpio);
- if (err)
- return err;
- }
-
- return err;
-}
-
static struct platform_driver ucb1400_gpio_driver = {
.probe = ucb1400_gpio_probe,
- .remove = ucb1400_gpio_remove,
.driver = {
.name = "ucb1400_gpio"
},
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
deleted file mode 100644
index 8d09b619c166..000000000000
--- a/drivers/gpio/gpio-vr41xx.c
+++ /dev/null
@@ -1,541 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for NEC VR4100 series General-purpose I/O Unit.
- *
- * Copyright (C) 2002 MontaVista Software Inc.
- * Author: Yoichi Yuasa <source@mvista.com>
- * Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <asm/vr41xx/giu.h>
-#include <asm/vr41xx/irq.h>
-#include <asm/vr41xx/vr41xx.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
-MODULE_LICENSE("GPL");
-
-#define GIUIOSELL 0x00
-#define GIUIOSELH 0x02
-#define GIUPIODL 0x04
-#define GIUPIODH 0x06
-#define GIUINTSTATL 0x08
-#define GIUINTSTATH 0x0a
-#define GIUINTENL 0x0c
-#define GIUINTENH 0x0e
-#define GIUINTTYPL 0x10
-#define GIUINTTYPH 0x12
-#define GIUINTALSELL 0x14
-#define GIUINTALSELH 0x16
-#define GIUINTHTSELL 0x18
-#define GIUINTHTSELH 0x1a
-#define GIUPODATL 0x1c
-#define GIUPODATEN 0x1c
-#define GIUPODATH 0x1e
- #define PIOEN0 0x0100
- #define PIOEN1 0x0200
-#define GIUPODAT 0x1e
-#define GIUFEDGEINHL 0x20
-#define GIUFEDGEINHH 0x22
-#define GIUREDGEINHL 0x24
-#define GIUREDGEINHH 0x26
-
-#define GIUUSEUPDN 0x1e0
-#define GIUTERMUPDN 0x1e2
-
-#define GPIO_HAS_PULLUPDOWN_IO 0x0001
-#define GPIO_HAS_OUTPUT_ENABLE 0x0002
-#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
-
-enum {
- GPIO_INPUT,
- GPIO_OUTPUT,
-};
-
-static DEFINE_SPINLOCK(giu_lock);
-static unsigned long giu_flags;
-
-static void __iomem *giu_base;
-static struct gpio_chip vr41xx_gpio_chip;
-
-#define giu_read(offset) readw(giu_base + (offset))
-#define giu_write(offset, value) writew((value), giu_base + (offset))
-
-#define GPIO_PIN_OF_IRQ(irq) ((irq) - GIU_IRQ_BASE)
-#define GIUINT_HIGH_OFFSET 16
-#define GIUINT_HIGH_MAX 32
-
-static inline u16 giu_set(u16 offset, u16 set)
-{
- u16 data;
-
- data = giu_read(offset);
- data |= set;
- giu_write(offset, data);
-
- return data;
-}
-
-static inline u16 giu_clear(u16 offset, u16 clear)
-{
- u16 data;
-
- data = giu_read(offset);
- data &= ~clear;
- giu_write(offset, data);
-
- return data;
-}
-
-static void ack_giuint_low(struct irq_data *d)
-{
- giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(d->irq));
-}
-
-static void mask_giuint_low(struct irq_data *d)
-{
- giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
-}
-
-static void mask_ack_giuint_low(struct irq_data *d)
-{
- unsigned int pin;
-
- pin = GPIO_PIN_OF_IRQ(d->irq);
- giu_clear(GIUINTENL, 1 << pin);
- giu_write(GIUINTSTATL, 1 << pin);
-}
-
-static void unmask_giuint_low(struct irq_data *d)
-{
- giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
-}
-
-static unsigned int startup_giuint(struct irq_data *data)
-{
- int ret;
-
- ret = gpiochip_lock_as_irq(&vr41xx_gpio_chip, irqd_to_hwirq(data));
- if (ret) {
- dev_err(vr41xx_gpio_chip.parent,
- "unable to lock HW IRQ %lu for IRQ\n",
- data->hwirq);
- return ret;
- }
-
- /* Satisfy the .enable semantics by unmasking the line */
- unmask_giuint_low(data);
- return 0;
-}
-
-static void shutdown_giuint(struct irq_data *data)
-{
- mask_giuint_low(data);
- gpiochip_unlock_as_irq(&vr41xx_gpio_chip, data->hwirq);
-}
-
-static struct irq_chip giuint_low_irq_chip = {
- .name = "GIUINTL",
- .irq_ack = ack_giuint_low,
- .irq_mask = mask_giuint_low,
- .irq_mask_ack = mask_ack_giuint_low,
- .irq_unmask = unmask_giuint_low,
- .irq_startup = startup_giuint,
- .irq_shutdown = shutdown_giuint,
-};
-
-static void ack_giuint_high(struct irq_data *d)
-{
- giu_write(GIUINTSTATH,
- 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
-}
-
-static void mask_giuint_high(struct irq_data *d)
-{
- giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
-}
-
-static void mask_ack_giuint_high(struct irq_data *d)
-{
- unsigned int pin;
-
- pin = GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET;
- giu_clear(GIUINTENH, 1 << pin);
- giu_write(GIUINTSTATH, 1 << pin);
-}
-
-static void unmask_giuint_high(struct irq_data *d)
-{
- giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
-}
-
-static struct irq_chip giuint_high_irq_chip = {
- .name = "GIUINTH",
- .irq_ack = ack_giuint_high,
- .irq_mask = mask_giuint_high,
- .irq_mask_ack = mask_ack_giuint_high,
- .irq_unmask = unmask_giuint_high,
-};
-
-static int giu_get_irq(unsigned int irq)
-{
- u16 pendl, pendh, maskl, maskh;
- int i;
-
- pendl = giu_read(GIUINTSTATL);
- pendh = giu_read(GIUINTSTATH);
- maskl = giu_read(GIUINTENL);
- maskh = giu_read(GIUINTENH);
-
- maskl &= pendl;
- maskh &= pendh;
-
- if (maskl) {
- for (i = 0; i < 16; i++) {
- if (maskl & (1 << i))
- return GIU_IRQ(i);
- }
- } else if (maskh) {
- for (i = 0; i < 16; i++) {
- if (maskh & (1 << i))
- return GIU_IRQ(i + GIUINT_HIGH_OFFSET);
- }
- }
-
- printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
- maskl, pendl, maskh, pendh);
-
- return -EINVAL;
-}
-
-void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
- irq_signal_t signal)
-{
- u16 mask;
-
- if (pin < GIUINT_HIGH_OFFSET) {
- mask = 1 << pin;
- if (trigger != IRQ_TRIGGER_LEVEL) {
- giu_set(GIUINTTYPL, mask);
- if (signal == IRQ_SIGNAL_HOLD)
- giu_set(GIUINTHTSELL, mask);
- else
- giu_clear(GIUINTHTSELL, mask);
- if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
- switch (trigger) {
- case IRQ_TRIGGER_EDGE_FALLING:
- giu_set(GIUFEDGEINHL, mask);
- giu_clear(GIUREDGEINHL, mask);
- break;
- case IRQ_TRIGGER_EDGE_RISING:
- giu_clear(GIUFEDGEINHL, mask);
- giu_set(GIUREDGEINHL, mask);
- break;
- default:
- giu_set(GIUFEDGEINHL, mask);
- giu_set(GIUREDGEINHL, mask);
- break;
- }
- }
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_low_irq_chip,
- handle_edge_irq);
- } else {
- giu_clear(GIUINTTYPL, mask);
- giu_clear(GIUINTHTSELL, mask);
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_low_irq_chip,
- handle_level_irq);
- }
- giu_write(GIUINTSTATL, mask);
- } else if (pin < GIUINT_HIGH_MAX) {
- mask = 1 << (pin - GIUINT_HIGH_OFFSET);
- if (trigger != IRQ_TRIGGER_LEVEL) {
- giu_set(GIUINTTYPH, mask);
- if (signal == IRQ_SIGNAL_HOLD)
- giu_set(GIUINTHTSELH, mask);
- else
- giu_clear(GIUINTHTSELH, mask);
- if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
- switch (trigger) {
- case IRQ_TRIGGER_EDGE_FALLING:
- giu_set(GIUFEDGEINHH, mask);
- giu_clear(GIUREDGEINHH, mask);
- break;
- case IRQ_TRIGGER_EDGE_RISING:
- giu_clear(GIUFEDGEINHH, mask);
- giu_set(GIUREDGEINHH, mask);
- break;
- default:
- giu_set(GIUFEDGEINHH, mask);
- giu_set(GIUREDGEINHH, mask);
- break;
- }
- }
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_high_irq_chip,
- handle_edge_irq);
- } else {
- giu_clear(GIUINTTYPH, mask);
- giu_clear(GIUINTHTSELH, mask);
- irq_set_chip_and_handler(GIU_IRQ(pin),
- &giuint_high_irq_chip,
- handle_level_irq);
- }
- giu_write(GIUINTSTATH, mask);
- }
-}
-EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
-
-void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
-{
- u16 mask;
-
- if (pin < GIUINT_HIGH_OFFSET) {
- mask = 1 << pin;
- if (level == IRQ_LEVEL_HIGH)
- giu_set(GIUINTALSELL, mask);
- else
- giu_clear(GIUINTALSELL, mask);
- giu_write(GIUINTSTATL, mask);
- } else if (pin < GIUINT_HIGH_MAX) {
- mask = 1 << (pin - GIUINT_HIGH_OFFSET);
- if (level == IRQ_LEVEL_HIGH)
- giu_set(GIUINTALSELH, mask);
- else
- giu_clear(GIUINTALSELH, mask);
- giu_write(GIUINTSTATH, mask);
- }
-}
-EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
-
-static int giu_set_direction(struct gpio_chip *chip, unsigned pin, int dir)
-{
- u16 offset, mask, reg;
- unsigned long flags;
-
- if (pin >= chip->ngpio)
- return -EINVAL;
-
- if (pin < 16) {
- offset = GIUIOSELL;
- mask = 1 << pin;
- } else if (pin < 32) {
- offset = GIUIOSELH;
- mask = 1 << (pin - 16);
- } else {
- if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) {
- offset = GIUPODATEN;
- mask = 1 << (pin - 32);
- } else {
- switch (pin) {
- case 48:
- offset = GIUPODATH;
- mask = PIOEN0;
- break;
- case 49:
- offset = GIUPODATH;
- mask = PIOEN1;
- break;
- default:
- return -EINVAL;
- }
- }
- }
-
- spin_lock_irqsave(&giu_lock, flags);
-
- reg = giu_read(offset);
- if (dir == GPIO_OUTPUT)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(offset, reg);
-
- spin_unlock_irqrestore(&giu_lock, flags);
-
- return 0;
-}
-
-static int vr41xx_gpio_get(struct gpio_chip *chip, unsigned pin)
-{
- u16 reg, mask;
-
- if (pin >= chip->ngpio)
- return -EINVAL;
-
- if (pin < 16) {
- reg = giu_read(GIUPIODL);
- mask = 1 << pin;
- } else if (pin < 32) {
- reg = giu_read(GIUPIODH);
- mask = 1 << (pin - 16);
- } else if (pin < 48) {
- reg = giu_read(GIUPODATL);
- mask = 1 << (pin - 32);
- } else {
- reg = giu_read(GIUPODATH);
- mask = 1 << (pin - 48);
- }
-
- if (reg & mask)
- return 1;
-
- return 0;
-}
-
-static void vr41xx_gpio_set(struct gpio_chip *chip, unsigned pin,
- int value)
-{
- u16 offset, mask, reg;
- unsigned long flags;
-
- if (pin >= chip->ngpio)
- return;
-
- if (pin < 16) {
- offset = GIUPIODL;
- mask = 1 << pin;
- } else if (pin < 32) {
- offset = GIUPIODH;
- mask = 1 << (pin - 16);
- } else if (pin < 48) {
- offset = GIUPODATL;
- mask = 1 << (pin - 32);
- } else {
- offset = GIUPODATH;
- mask = 1 << (pin - 48);
- }
-
- spin_lock_irqsave(&giu_lock, flags);
-
- reg = giu_read(offset);
- if (value)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(offset, reg);
-
- spin_unlock_irqrestore(&giu_lock, flags);
-}
-
-
-static int vr41xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return giu_set_direction(chip, offset, GPIO_INPUT);
-}
-
-static int vr41xx_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- vr41xx_gpio_set(chip, offset, value);
-
- return giu_set_direction(chip, offset, GPIO_OUTPUT);
-}
-
-static int vr41xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- if (offset >= chip->ngpio)
- return -EINVAL;
-
- return GIU_IRQ_BASE + offset;
-}
-
-static struct gpio_chip vr41xx_gpio_chip = {
- .label = "vr41xx",
- .owner = THIS_MODULE,
- .direction_input = vr41xx_gpio_direction_input,
- .get = vr41xx_gpio_get,
- .direction_output = vr41xx_gpio_direction_output,
- .set = vr41xx_gpio_set,
- .to_irq = vr41xx_gpio_to_irq,
-};
-
-static int giu_probe(struct platform_device *pdev)
-{
- unsigned int trigger, i, pin;
- struct irq_chip *chip;
- int irq;
-
- switch (pdev->id) {
- case GPIO_50PINS_PULLUPDOWN:
- giu_flags = GPIO_HAS_PULLUPDOWN_IO;
- vr41xx_gpio_chip.ngpio = 50;
- break;
- case GPIO_36PINS:
- vr41xx_gpio_chip.ngpio = 36;
- break;
- case GPIO_48PINS_EDGE_SELECT:
- giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
- vr41xx_gpio_chip.ngpio = 48;
- break;
- default:
- dev_err(&pdev->dev, "GIU: unknown ID %d\n", pdev->id);
- return -ENODEV;
- }
-
- giu_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(giu_base))
- return PTR_ERR(giu_base);
-
- vr41xx_gpio_chip.parent = &pdev->dev;
-
- if (gpiochip_add_data(&vr41xx_gpio_chip, NULL))
- return -ENODEV;
-
- giu_write(GIUINTENL, 0);
- giu_write(GIUINTENH, 0);
-
- trigger = giu_read(GIUINTTYPH) << 16;
- trigger |= giu_read(GIUINTTYPL);
- for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
- pin = GPIO_PIN_OF_IRQ(i);
- if (pin < GIUINT_HIGH_OFFSET)
- chip = &giuint_low_irq_chip;
- else
- chip = &giuint_high_irq_chip;
-
- if (trigger & (1 << pin))
- irq_set_chip_and_handler(i, chip, handle_edge_irq);
- else
- irq_set_chip_and_handler(i, chip, handle_level_irq);
-
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0 || irq >= nr_irqs)
- return -EBUSY;
-
- return cascade_irq(irq, giu_get_irq);
-}
-
-static int giu_remove(struct platform_device *pdev)
-{
- if (giu_base) {
- giu_base = NULL;
- }
-
- return 0;
-}
-
-static struct platform_driver giu_device_driver = {
- .probe = giu_probe,
- .remove = giu_remove,
- .driver = {
- .name = "GIU",
- },
-};
-
-module_platform_driver(giu_device_driver);
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index 5078631d8014..b098f2dc196b 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -4,7 +4,6 @@
* Copyright (C) 2016 William Breathitt Gray
*/
#include <linux/bitmap.h>
-#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -17,8 +16,9 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
-#define WS16C48_EXTENT 16
+#define WS16C48_EXTENT 10
#define MAX_NUM_WS16C48 max_num_isa_dev(WS16C48_EXTENT)
static unsigned int base[MAX_NUM_WS16C48];
@@ -31,6 +31,20 @@ module_param_hw_array(irq, uint, irq, NULL, 0);
MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers");
/**
+ * struct ws16c48_reg - device register structure
+ * @port: Port 0 through 5 I/O
+ * @int_pending: Interrupt Pending
+ * @page_lock: Register page (Bits 7-6) and I/O port lock (Bits 5-0)
+ * @pol_enab_int_id: Interrupt polarity, enable, and ID
+ */
+struct ws16c48_reg {
+ u8 port[6];
+ u8 int_pending;
+ u8 page_lock;
+ u8 pol_enab_int_id[3];
+};
+
+/**
* struct ws16c48_gpio - GPIO device private data structure
* @chip: instance of the gpio_chip
* @io_state: bit I/O state (whether bit is set to input or output)
@@ -38,7 +52,7 @@ MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers");
* @lock: synchronization lock to prevent I/O race conditions
* @irq_mask: I/O bits affected by interrupts
* @flow_mask: IRQ flow type mask for the respective I/O bits
- * @base: base port address of the GPIO device
+ * @reg: I/O address offset for the device registers
*/
struct ws16c48_gpio {
struct gpio_chip chip;
@@ -47,7 +61,7 @@ struct ws16c48_gpio {
raw_spinlock_t lock;
unsigned long irq_mask;
unsigned long flow_mask;
- void __iomem *base;
+ struct ws16c48_reg __iomem *reg;
};
static int ws16c48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -73,7 +87,7 @@ static int ws16c48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
ws16c48gpio->io_state[port] |= mask;
ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+ iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -95,7 +109,7 @@ static int ws16c48_gpio_direction_output(struct gpio_chip *chip,
ws16c48gpio->out_state[port] |= mask;
else
ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+ iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -118,7 +132,7 @@ static int ws16c48_gpio_get(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
- port_state = ioread8(ws16c48gpio->base + port);
+ port_state = ioread8(ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -131,14 +145,16 @@ static int ws16c48_gpio_get_multiple(struct gpio_chip *chip,
struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
unsigned long offset;
unsigned long gpio_mask;
- void __iomem *port_addr;
+ size_t index;
+ u8 __iomem *port_addr;
unsigned long port_state;
/* clear bits array to a clean slate */
bitmap_zero(bits, chip->ngpio);
for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
- port_addr = ws16c48gpio->base + offset / 8;
+ index = offset / 8;
+ port_addr = ws16c48gpio->reg->port + index;
port_state = ioread8(port_addr) & gpio_mask;
bitmap_set_value8(bits, port_state, offset);
@@ -166,7 +182,7 @@ static void ws16c48_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
ws16c48gpio->out_state[port] |= mask;
else
ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+ iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -178,13 +194,13 @@ static void ws16c48_gpio_set_multiple(struct gpio_chip *chip,
unsigned long offset;
unsigned long gpio_mask;
size_t index;
- void __iomem *port_addr;
+ u8 __iomem *port_addr;
unsigned long bitmask;
unsigned long flags;
for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
index = offset / 8;
- port_addr = ws16c48gpio->base + index;
+ port_addr = ws16c48gpio->reg->port + index;
/* mask out GPIO configured for input */
gpio_mask &= ~ws16c48gpio->io_state[index];
@@ -219,10 +235,15 @@ static void ws16c48_irq_ack(struct irq_data *data)
port_state = ws16c48gpio->irq_mask >> (8*port);
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(port_state & ~mask, ws16c48gpio->base + 8 + port);
- iowrite8(port_state | mask, ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
+
+ /* Clear pending interrupt */
+ iowrite8(port_state & ~mask, ws16c48gpio->reg->pol_enab_int_id + port);
+ iowrite8(port_state | mask, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -235,6 +256,7 @@ static void ws16c48_irq_mask(struct irq_data *data)
const unsigned long mask = BIT(offset);
const unsigned port = offset / 8;
unsigned long flags;
+ unsigned long port_state;
/* only the first 3 ports support interrupts */
if (port > 2)
@@ -243,10 +265,16 @@ static void ws16c48_irq_mask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask &= ~mask;
+ port_state = ws16c48gpio->irq_mask >> (8 * port);
+
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Disable interrupt */
+ iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -259,6 +287,7 @@ static void ws16c48_irq_unmask(struct irq_data *data)
const unsigned long mask = BIT(offset);
const unsigned port = offset / 8;
unsigned long flags;
+ unsigned long port_state;
/* only the first 3 ports support interrupts */
if (port > 2)
@@ -267,10 +296,16 @@ static void ws16c48_irq_unmask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask |= mask;
+ port_state = ws16c48gpio->irq_mask >> (8 * port);
+
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Enable interrupt */
+ iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
@@ -283,6 +318,7 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
const unsigned long mask = BIT(offset);
const unsigned port = offset / 8;
unsigned long flags;
+ unsigned long port_state;
/* only the first 3 ports support interrupts */
if (port > 2)
@@ -304,9 +340,16 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
return -EINVAL;
}
- iowrite8(0x40, ws16c48gpio->base + 7);
- iowrite8(ws16c48gpio->flow_mask >> (8*port), ws16c48gpio->base + 8 + port);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ port_state = ws16c48gpio->flow_mask >> (8 * port);
+
+ /* Select Register Page 1; Unlock all I/O ports */
+ iowrite8(0x40, &ws16c48gpio->reg->page_lock);
+
+ /* Set interrupt polarity */
+ iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
@@ -325,25 +368,26 @@ static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
{
struct ws16c48_gpio *const ws16c48gpio = dev_id;
struct gpio_chip *const chip = &ws16c48gpio->chip;
+ struct ws16c48_reg __iomem *const reg = ws16c48gpio->reg;
unsigned long int_pending;
unsigned long port;
unsigned long int_id;
unsigned long gpio;
- int_pending = ioread8(ws16c48gpio->base + 6) & 0x7;
+ int_pending = ioread8(&reg->int_pending) & 0x7;
if (!int_pending)
return IRQ_NONE;
/* loop until all pending interrupts are handled */
do {
for_each_set_bit(port, &int_pending, 3) {
- int_id = ioread8(ws16c48gpio->base + 8 + port);
+ int_id = ioread8(reg->pol_enab_int_id + port);
for_each_set_bit(gpio, &int_id, 8)
generic_handle_domain_irq(chip->irq.domain,
gpio + 8*port);
}
- int_pending = ioread8(ws16c48gpio->base + 6) & 0x7;
+ int_pending = ioread8(&reg->int_pending) & 0x7;
} while (int_pending);
return IRQ_HANDLED;
@@ -369,12 +413,16 @@ static int ws16c48_irq_init_hw(struct gpio_chip *gc)
{
struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(gc);
- /* Disable IRQ by default */
- iowrite8(0x80, ws16c48gpio->base + 7);
- iowrite8(0, ws16c48gpio->base + 8);
- iowrite8(0, ws16c48gpio->base + 9);
- iowrite8(0, ws16c48gpio->base + 10);
- iowrite8(0xC0, ws16c48gpio->base + 7);
+ /* Select Register Page 2; Unlock all I/O ports */
+ iowrite8(0x80, &ws16c48gpio->reg->page_lock);
+
+ /* Disable interrupts for all lines */
+ iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[0]);
+ iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[1]);
+ iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[2]);
+
+ /* Select Register Page 3; Unlock all I/O ports */
+ iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
return 0;
}
@@ -396,8 +444,8 @@ static int ws16c48_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- ws16c48gpio->base = devm_ioport_map(dev, base[id], WS16C48_EXTENT);
- if (!ws16c48gpio->base)
+ ws16c48gpio->reg = devm_ioport_map(dev, base[id], WS16C48_EXTENT);
+ if (!ws16c48gpio->reg)
return -ENOMEM;
ws16c48gpio->chip.label = name;
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index 43ca52fa6f9a..fd88500399c6 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -281,11 +281,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
static int iproc_gpio_remove(struct platform_device *pdev)
{
- struct iproc_gpio_chip *chip;
-
- chip = platform_get_drvdata(pdev);
- if (!chip)
- return -ENODEV;
+ struct iproc_gpio_chip *chip = platform_get_drvdata(pdev);
if (chip->intr) {
u32 val;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 7f8e2fed2988..2fc6b6ff7f16 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -117,12 +117,14 @@ static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
static void xgpio_read_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+
xgpio_set_value32(a, bit, xgpio_readreg(addr));
}
static void xgpio_write_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+
xgpio_writereg(addr, xgpio_get_value32(a, bit));
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c2523ac26fac..9be1376f9a62 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -687,6 +687,9 @@ int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
case ACPI_PIN_CONFIG_PULLDOWN:
*lookupflags |= GPIO_PULL_DOWN;
break;
+ case ACPI_PIN_CONFIG_NOPULL:
+ *lookupflags |= GPIO_PULL_DISABLE;
+ break;
default:
break;
}
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index b26e64338376..f8041d4898d1 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -434,12 +434,15 @@ struct line {
struct linereq *req;
unsigned int irq;
/*
- * eflags is set by edge_detector_setup(), edge_detector_stop() and
- * edge_detector_update(), which are themselves mutually exclusive,
- * and is accessed by edge_irq_thread() and debounce_work_func(),
- * which can both live with a slightly stale value.
+ * The flags for the active edge detector configuration.
+ *
+ * edflags is set by linereq_create(), linereq_free(), and
+ * linereq_set_config_unlocked(), which are themselves mutually
+ * exclusive, and is accessed by edge_irq_thread(),
+ * process_hw_ts_thread() and debounce_work_func(),
+ * which can all live with a slightly stale value.
*/
- u64 eflags;
+ u64 edflags;
/*
* timestamp_ns and req_seqno are accessed only by
* edge_irq_handler() and edge_irq_thread(), which are themselves
@@ -469,9 +472,7 @@ struct line {
* stale value.
*/
unsigned int level;
- /*
- * -- hte specific fields --
- */
+#ifdef CONFIG_HTE
struct hte_ts_desc hdesc;
/*
* HTE provider sets line level at the time of event. The valid
@@ -488,6 +489,7 @@ struct line {
* last sequence number before debounce period expires.
*/
u32 last_seqno;
+#endif /* CONFIG_HTE */
};
/**
@@ -545,6 +547,12 @@ struct linereq {
GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
GPIO_V2_LINE_BIAS_FLAGS)
+/* subset of flags relevant for edge detector configuration */
+#define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
+ (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
+ GPIO_V2_LINE_EDGE_FLAGS)
+
static void linereq_put_event(struct linereq *lr,
struct gpio_v2_line_event *le)
{
@@ -567,19 +575,28 @@ static u64 line_event_timestamp(struct line *line)
{
if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
return ktime_get_real_ns();
- else if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
+ else if (IS_ENABLED(CONFIG_HTE) &&
+ test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
return line->timestamp_ns;
return ktime_get_ns();
}
+static u32 line_event_id(int level)
+{
+ return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
+ GPIO_V2_LINE_EVENT_FALLING_EDGE;
+}
+
+#ifdef CONFIG_HTE
+
static enum hte_return process_hw_ts_thread(void *p)
{
struct line *line;
struct linereq *lr;
struct gpio_v2_line_event le;
+ u64 edflags;
int level;
- u64 eflags;
if (!p)
return HTE_CB_HANDLED;
@@ -590,29 +607,26 @@ static enum hte_return process_hw_ts_thread(void *p)
memset(&le, 0, sizeof(le));
le.timestamp_ns = line->timestamp_ns;
- eflags = READ_ONCE(line->eflags);
+ edflags = READ_ONCE(line->edflags);
- if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
- if (line->raw_level >= 0) {
- if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
- level = !line->raw_level;
- else
- level = line->raw_level;
- } else {
- level = gpiod_get_value_cansleep(line->desc);
- }
+ switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
+ case GPIO_V2_LINE_FLAG_EDGE_BOTH:
+ level = (line->raw_level >= 0) ?
+ line->raw_level :
+ gpiod_get_raw_value_cansleep(line->desc);
- if (level)
- le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- else
- le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
- /* Emit low-to-high event */
+ if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
+ level = !level;
+
+ le.id = line_event_id(level);
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_RISING:
le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
- /* Emit high-to-low event */
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_FALLING:
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else {
+ break;
+ default:
return HTE_CB_HANDLED;
}
le.line_seqno = line->line_seqno;
@@ -659,12 +673,47 @@ static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
return HTE_CB_HANDLED;
}
+static int hte_edge_setup(struct line *line, u64 eflags)
+{
+ int ret;
+ unsigned long flags = 0;
+ struct hte_ts_desc *hdesc = &line->hdesc;
+
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
+ flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ HTE_FALLING_EDGE_TS :
+ HTE_RISING_EDGE_TS;
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
+ flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ HTE_RISING_EDGE_TS :
+ HTE_FALLING_EDGE_TS;
+
+ line->total_discard_seq = 0;
+
+ hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
+ line->desc);
+
+ ret = hte_ts_get(NULL, hdesc, 0);
+ if (ret)
+ return ret;
+
+ return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
+ line);
+}
+
+#else
+
+static int hte_edge_setup(struct line *line, u64 eflags)
+{
+ return 0;
+}
+#endif /* CONFIG_HTE */
+
static irqreturn_t edge_irq_thread(int irq, void *p)
{
struct line *line = p;
struct linereq *lr = line->req;
struct gpio_v2_line_event le;
- u64 eflags;
/* Do not leak kernel stack to userspace */
memset(&le, 0, sizeof(le));
@@ -683,23 +732,17 @@ static irqreturn_t edge_irq_thread(int irq, void *p)
}
line->timestamp_ns = 0;
- eflags = READ_ONCE(line->eflags);
- if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
- int level = gpiod_get_value_cansleep(line->desc);
-
- if (level)
- /* Emit low-to-high event */
- le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- else
- /* Emit high-to-low event */
- le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
- /* Emit low-to-high event */
+ switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
+ case GPIO_V2_LINE_FLAG_EDGE_BOTH:
+ le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_RISING:
le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
- /* Emit high-to-low event */
+ break;
+ case GPIO_V2_LINE_FLAG_EDGE_FALLING:
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else {
+ break;
+ default:
return IRQ_NONE;
}
line->line_seqno++;
@@ -764,16 +807,16 @@ static void debounce_work_func(struct work_struct *work)
struct gpio_v2_line_event le;
struct line *line = container_of(work, struct line, work.work);
struct linereq *lr;
- int level, diff_seqno;
- u64 eflags;
+ u64 eflags, edflags = READ_ONCE(line->edflags);
+ int level = -1;
+#ifdef CONFIG_HTE
+ int diff_seqno;
- if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+ if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
level = line->raw_level;
- if (level < 0)
- level = gpiod_get_raw_value_cansleep(line->desc);
- } else {
+#endif
+ if (level < 0)
level = gpiod_get_raw_value_cansleep(line->desc);
- }
if (level < 0) {
pr_debug_ratelimited("debouncer failed to read line value\n");
return;
@@ -785,12 +828,12 @@ static void debounce_work_func(struct work_struct *work)
WRITE_ONCE(line->level, level);
/* -- edge detection -- */
- eflags = READ_ONCE(line->eflags);
+ eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (!eflags)
return;
/* switch from physical level to logical - if they differ */
- if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
level = !level;
/* ignore edges that are not being monitored */
@@ -804,7 +847,8 @@ static void debounce_work_func(struct work_struct *work)
lr = line->req;
le.timestamp_ns = line_event_timestamp(line);
le.offset = gpio_chip_hwgpio(line->desc);
- if (test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags)) {
+#ifdef CONFIG_HTE
+ if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
/* discard events except the last one */
line->total_discard_seq -= 1;
diff_seqno = line->last_seqno - line->total_discard_seq -
@@ -813,51 +857,21 @@ static void debounce_work_func(struct work_struct *work)
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ?
le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
- } else {
+ } else
+#endif /* CONFIG_HTE */
+ {
line->line_seqno++;
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ?
le.line_seqno : atomic_inc_return(&lr->seqno);
}
- if (level)
- /* Emit low-to-high event */
- le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- else
- /* Emit high-to-low event */
- le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+ le.id = line_event_id(level);
linereq_put_event(lr, &le);
}
-static int hte_edge_setup(struct line *line, u64 eflags)
-{
- int ret;
- unsigned long flags = 0;
- struct hte_ts_desc *hdesc = &line->hdesc;
-
- if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
- HTE_FALLING_EDGE_TS : HTE_RISING_EDGE_TS;
- if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
- HTE_RISING_EDGE_TS : HTE_FALLING_EDGE_TS;
-
- line->total_discard_seq = 0;
-
- hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags,
- NULL, line->desc);
-
- ret = hte_ts_get(NULL, hdesc, 0);
- if (ret)
- return ret;
-
- return hte_request_ts_ns(hdesc, process_hw_ts,
- process_hw_ts_thread, line);
-}
-
-static int debounce_setup(struct line *line,
- unsigned int debounce_period_us, bool hte_req)
+static int debounce_setup(struct line *line, unsigned int debounce_period_us)
{
unsigned long irqflags;
int ret, level, irq;
@@ -877,7 +891,8 @@ static int debounce_setup(struct line *line,
if (level < 0)
return level;
- if (!hte_req) {
+ if (!(IS_ENABLED(CONFIG_HTE) &&
+ test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
@@ -889,9 +904,7 @@ static int debounce_setup(struct line *line,
return ret;
line->irq = irq;
} else {
- ret = hte_edge_setup(line,
- GPIO_V2_LINE_FLAG_EDGE_RISING |
- GPIO_V2_LINE_FLAG_EDGE_FALLING);
+ ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
if (ret)
return ret;
}
@@ -930,19 +943,21 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
return 0;
}
-static void edge_detector_stop(struct line *line, bool hte_en)
+static void edge_detector_stop(struct line *line)
{
- if (line->irq && !hte_en) {
+ if (line->irq) {
free_irq(line->irq, line);
line->irq = 0;
}
- if (hte_en)
+#ifdef CONFIG_HTE
+ if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
hte_ts_put(&line->hdesc);
+#endif
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
- WRITE_ONCE(line->eflags, 0);
+ WRITE_ONCE(line->edflags, 0);
if (line->desc)
WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
@@ -950,23 +965,23 @@ static void edge_detector_stop(struct line *line, bool hte_en)
static int edge_detector_setup(struct line *line,
struct gpio_v2_line_config *lc,
- unsigned int line_idx,
- u64 eflags, bool hte_req)
+ unsigned int line_idx, u64 edflags)
{
u32 debounce_period_us;
unsigned long irqflags = 0;
+ u64 eflags;
int irq, ret;
+ eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (eflags && !kfifo_initialized(&line->req->events)) {
ret = kfifo_alloc(&line->req->events,
line->req->event_buffer_size, GFP_KERNEL);
if (ret)
return ret;
}
- WRITE_ONCE(line->eflags, eflags);
if (gpio_v2_line_config_debounced(lc, line_idx)) {
debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
- ret = debounce_setup(line, debounce_period_us, hte_req);
+ ret = debounce_setup(line, debounce_period_us);
if (ret)
return ret;
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
@@ -976,8 +991,9 @@ static int edge_detector_setup(struct line *line,
if (!eflags || READ_ONCE(line->sw_debounced))
return 0;
- if (hte_req)
- return hte_edge_setup(line, eflags);
+ if (IS_ENABLED(CONFIG_HTE) &&
+ (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
+ return hte_edge_setup(line, edflags);
irq = gpiod_to_irq(line->desc);
if (irq < 0)
@@ -1003,35 +1019,29 @@ static int edge_detector_setup(struct line *line,
static int edge_detector_update(struct line *line,
struct gpio_v2_line_config *lc,
- unsigned int line_idx,
- u64 flags, bool polarity_change,
- bool prev_hte_flag)
+ unsigned int line_idx, u64 edflags)
{
- u64 eflags = flags & GPIO_V2_LINE_EDGE_FLAGS;
+ u64 active_edflags = READ_ONCE(line->edflags);
unsigned int debounce_period_us =
gpio_v2_line_config_debounce_period(lc, line_idx);
- bool hte_change = (prev_hte_flag !=
- ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) != 0));
- if ((READ_ONCE(line->eflags) == eflags) && !polarity_change &&
- (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us)
- && !hte_change)
+ if ((active_edflags == edflags) &&
+ (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
return 0;
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
- WRITE_ONCE(line->eflags, eflags);
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
return 0;
}
/* reconfiguring edge detection or sw debounce being disabled */
- if ((line->irq && !READ_ONCE(line->sw_debounced)) || prev_hte_flag ||
+ if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
+ (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
(!debounce_period_us && READ_ONCE(line->sw_debounced)))
- edge_detector_stop(line, prev_hte_flag);
+ edge_detector_stop(line);
- return edge_detector_setup(line, lc, line_idx, eflags,
- flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
+ return edge_detector_setup(line, lc, line_idx, edflags);
}
static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
@@ -1067,6 +1077,11 @@ static int gpio_v2_line_flags_validate(u64 flags)
/* Return an error if an unknown flag is set */
if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
return -EINVAL;
+
+ if (!IS_ENABLED(CONFIG_HTE) &&
+ (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
+ return -EOPNOTSUPP;
+
/*
* Do not allow both INPUT and OUTPUT flags to be set as they are
* contradictory.
@@ -1076,7 +1091,8 @@ static int gpio_v2_line_flags_validate(u64 flags)
return -EINVAL;
/* Only allow one event clock source */
- if ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
+ if (IS_ENABLED(CONFIG_HTE) &&
+ (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
(flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
return -EINVAL;
@@ -1300,22 +1316,17 @@ static long linereq_set_config_unlocked(struct linereq *lr,
struct gpio_v2_line_config *lc)
{
struct gpio_desc *desc;
+ struct line *line;
unsigned int i;
- u64 flags;
- bool polarity_change;
- bool prev_hte_flag;
+ u64 flags, edflags;
int ret;
for (i = 0; i < lr->num_lines; i++) {
+ line = &lr->lines[i];
desc = lr->lines[i].desc;
flags = gpio_v2_line_config_flags(lc, i);
- polarity_change =
- (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) !=
- ((flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) != 0));
-
- prev_hte_flag = !!test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags);
-
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
@@ -1323,7 +1334,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
- edge_detector_stop(&lr->lines[i], prev_hte_flag);
+ edge_detector_stop(line);
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
@@ -1332,12 +1343,13 @@ static long linereq_set_config_unlocked(struct linereq *lr,
if (ret)
return ret;
- ret = edge_detector_update(&lr->lines[i], lc, i,
- flags, polarity_change, prev_hte_flag);
+ ret = edge_detector_update(line, lc, i, edflags);
if (ret)
return ret;
}
+ WRITE_ONCE(line->edflags, edflags);
+
blocking_notifier_call_chain(&desc->gdev->notifier,
GPIO_V2_LINE_CHANGED_CONFIG,
desc);
@@ -1464,15 +1476,12 @@ static ssize_t linereq_read(struct file *file,
static void linereq_free(struct linereq *lr)
{
unsigned int i;
- bool hte = false;
for (i = 0; i < lr->num_lines; i++) {
- if (lr->lines[i].desc)
- hte = !!test_bit(FLAG_EVENT_CLOCK_HTE,
- &lr->lines[i].desc->flags);
- edge_detector_stop(&lr->lines[i], hte);
- if (lr->lines[i].desc)
+ if (lr->lines[i].desc) {
+ edge_detector_stop(&lr->lines[i]);
gpiod_free(lr->lines[i].desc);
+ }
}
kfifo_free(&lr->events);
kfree(lr->label);
@@ -1506,7 +1515,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
struct gpio_v2_line_config *lc;
struct linereq *lr;
struct file *file;
- u64 flags;
+ u64 flags, edflags;
unsigned int i;
int fd, ret;
@@ -1580,6 +1589,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if (ret < 0)
goto out_free_linereq;
+ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
@@ -1596,12 +1606,13 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
goto out_free_linereq;
ret = edge_detector_setup(&lr->lines[i], lc, i,
- flags & GPIO_V2_LINE_EDGE_FLAGS,
- flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
+ edflags);
if (ret)
goto out_free_linereq;
}
+ lr->lines[i].edflags = edflags;
+
blocking_notifier_call_chain(&desc->gdev->notifier,
GPIO_V2_LINE_CHANGED_REQUESTED, desc);
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 79da85d17b71..16a696249229 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -375,9 +375,6 @@ void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
-
-
-
static void devm_gpio_release(struct device *dev, void *res)
{
unsigned *gpio = res;
@@ -385,13 +382,6 @@ static void devm_gpio_release(struct device *dev, void *res)
gpio_free(*gpio);
}
-static int devm_gpio_match(struct device *dev, void *res, void *data)
-{
- unsigned *this = res, *gpio = data;
-
- return *this == *gpio;
-}
-
/**
* devm_gpio_request - request a GPIO for a managed device
* @dev: device to request the GPIO for
@@ -402,11 +392,7 @@ static int devm_gpio_match(struct device *dev, void *res, void *data)
* same arguments and performs the same function as
* gpio_request(). GPIOs requested with this function will be
* automatically freed on driver detach.
- *
- * If an GPIO allocated with this function needs to be freed
- * separately, devm_gpio_free() must be used.
*/
-
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
{
unsigned *dr;
@@ -459,24 +445,6 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
}
EXPORT_SYMBOL_GPL(devm_gpio_request_one);
-/**
- * devm_gpio_free - free a GPIO
- * @dev: device to free GPIO for
- * @gpio: GPIO to free
- *
- * Except for the extra @dev argument, this function takes the
- * same arguments and performs the same function as gpio_free().
- * This function instead of gpio_free() should be used to manually
- * free GPIOs allocated with devm_gpio_request().
- */
-void devm_gpio_free(struct device *dev, unsigned int gpio)
-{
-
- WARN_ON(devres_release(dev, devm_gpio_release, devm_gpio_match,
- &gpio));
-}
-EXPORT_SYMBOL_GPL(devm_gpio_free);
-
static void devm_gpio_chip_release(void *data)
{
struct gpio_chip *gc = data;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 3d6c3ffd5576..a037b50bef33 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -354,6 +354,9 @@ struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
if (flags & OF_GPIO_PULL_DOWN)
lflags |= GPIO_PULL_DOWN;
+ if (flags & OF_GPIO_PULL_DISABLE)
+ lflags |= GPIO_PULL_DISABLE;
+
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
gpiod_put(desc);
@@ -556,6 +559,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_PULL_UP;
if (of_flags & OF_GPIO_PULL_DOWN)
*flags |= GPIO_PULL_DOWN;
+ if (of_flags & OF_GPIO_PULL_DISABLE)
+ *flags |= GPIO_PULL_DISABLE;
return desc;
}
@@ -621,6 +626,8 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
*lflags |= GPIO_PULL_UP;
if (xlate_flags & OF_GPIO_PULL_DOWN)
*lflags |= GPIO_PULL_DOWN;
+ if (xlate_flags & OF_GPIO_PULL_DISABLE)
+ *lflags |= GPIO_PULL_DISABLE;
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
@@ -720,7 +727,7 @@ static void of_gpiochip_remove_hog(struct gpio_chip *chip,
static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
{
- return chip->gpiodev->dev.of_node == data;
+ return device_match_of_node(&chip->gpiodev->dev, data);
}
static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
@@ -860,7 +867,8 @@ int of_mm_gpiochip_add_data(struct device_node *np,
if (mm_gc->save_regs)
mm_gc->save_regs(mm_gc);
- mm_gc->gc.of_node = np;
+ of_node_put(mm_gc->gc.of_node);
+ mm_gc->gc.of_node = of_node_get(np);
ret = gpiochip_add_data(gc, data);
if (ret)
@@ -868,6 +876,7 @@ int of_mm_gpiochip_add_data(struct device_node *np,
return 0;
err2:
+ of_node_put(np);
iounmap(mm_gc->regs);
err1:
kfree(gc->label);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 68d9f95d7799..cc9c0a12259e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3942,9 +3942,11 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
- if ((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) {
+ if (((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) ||
+ ((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DISABLE)) ||
+ ((lflags & GPIO_PULL_DOWN) && (lflags & GPIO_PULL_DISABLE))) {
gpiod_err(desc,
- "both pull-up and pull-down enabled, invalid configuration\n");
+ "multiple pull-up, pull-down or pull-disable enabled, invalid configuration\n");
return -EINVAL;
}
@@ -3952,6 +3954,8 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
set_bit(FLAG_PULL_UP, &desc->flags);
else if (lflags & GPIO_PULL_DOWN)
set_bit(FLAG_PULL_DOWN, &desc->flags);
+ else if (lflags & GPIO_PULL_DISABLE)
+ set_bit(FLAG_BIAS_DISABLE, &desc->flags);
ret = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
if (ret < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index c6cc493a5486..2b97b8a96fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -148,30 +148,22 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r = 0;
dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
/* Wrong context, return error */
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_lock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
}
@@ -179,7 +171,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
* Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
* them together so that they can be completed asynchronously on multiple nodes
*/
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
if (!queue_work(system_unbound_wq,
@@ -197,7 +189,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
/* For XGMI wait for all resets to complete before proceed */
if (!r) {
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
flush_work(&tmp_adev->reset_cntl->reset_work);
r = tmp_adev->asic_reset_res;
@@ -207,7 +199,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
}
}
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
}
@@ -339,10 +331,13 @@ static int
aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_context *reset_context)
{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head reset_device_list;
int r;
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==
IP_VERSION(13, 0, 2) &&
reset_context->hive == NULL) {
@@ -350,19 +345,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
return -EINVAL;
}
- INIT_LIST_HEAD(&reset_device_list);
- if (reset_context->hive) {
- list_for_each_entry (tmp_adev,
- &reset_context->hive->device_list,
- gmc.xgmi.head)
- list_add_tail(&tmp_adev->reset_list,
- &reset_device_list);
- } else {
- list_add_tail(&reset_context->reset_req_dev->reset_list,
- &reset_device_list);
- }
-
- list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
r = aldebaran_mode2_restore_ip(tmp_adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e146810c700b..d597e2656c47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -317,7 +317,7 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-
+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 3c09dcc0986e..647220a8762d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
uint64_t vram_used;
+ uint64_t vram_used_aligned;
bool init_complete;
struct work_struct reset_work;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a699134a1e8c..cbd593f7d553 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -40,10 +40,10 @@
#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
/*
- * Align VRAM allocations to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
+ * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
* BO chunk
*/
-#define VRAM_ALLOCATION_ALIGN (1 << 21)
+#define VRAM_AVAILABLITY_ALIGN (1 << 21)
/* Impose limit on how much memory KFD can use */
static struct {
@@ -149,7 +149,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
* to avoid fragmentation caused by 4K allocations in the tail
* 2M BO chunk.
*/
- vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ vram_needed = size;
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
system_mem_needed = size;
} else if (!(alloc_flag &
@@ -182,8 +182,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
*/
WARN_ONCE(vram_needed && !adev,
"adev reference can't be null when vram is used");
- if (adev)
+ if (adev) {
adev->kfd.vram_used += vram_needed;
+ adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+ }
kfd_mem_limit.system_mem_used += system_mem_needed;
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
@@ -203,8 +205,10 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
WARN_ONCE(!adev,
"adev reference can't be null when alloc mem flags vram is set");
- if (adev)
- adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
+ if (adev) {
+ adev->kfd.vram_used -= size;
+ adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
+ }
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= size;
} else if (!(alloc_flag &
@@ -1608,15 +1612,14 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t available;
-
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
- - adev->kfd.vram_used
+ - adev->kfd.vram_used_aligned
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
spin_unlock(&kfd_mem_limit.mem_limit_lock);
- return ALIGN_DOWN(available, VRAM_ALLOCATION_ALIGN);
+ return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index fd8f3731758e..b81b77a9efa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -314,7 +314,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
- *vram_width = mem_channel_number * mem_channel_width;
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d8f1335bc68f..b7bae833c804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -837,16 +837,12 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
continue;
r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
- if (r) {
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (r)
return r;
- }
}
r = amdgpu_vm_handle_moved(adev, vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index e2eec985adb3..cb00c7d6f50b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1705,7 +1705,7 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
char reg_offset[11];
- uint32_t *new, *tmp = NULL;
+ uint32_t *new = NULL, *tmp = NULL;
int ret, i = 0, len = 0;
do {
@@ -1747,7 +1747,8 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
ret = size;
error_free:
- kfree(tmp);
+ if (tmp != new)
+ kfree(tmp);
kfree(new);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c4a6fe3070b6..f095a2513aff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2456,12 +2456,14 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (!hive->reset_domain ||
!amdgpu_reset_get_reset_domain(hive->reset_domain)) {
r = -ENOENT;
+ amdgpu_put_xgmi_hive(hive);
goto init_failed;
}
/* Drop the early temporary reset domain we created for device */
amdgpu_reset_put_reset_domain(adev->reset_domain);
adev->reset_domain = hive->reset_domain;
+ amdgpu_put_xgmi_hive(hive);
}
}
@@ -4413,8 +4415,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
retry:
amdgpu_amdkfd_pre_reset(adev);
- amdgpu_amdkfd_pre_reset(adev);
-
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
else
@@ -4742,6 +4742,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_reset_reg_dumps(tmp_adev);
+
+ reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
if (r == -ENOSYS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5071b96be982..b1099ee79c50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -272,10 +272,6 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
/* Signal all jobs not yet scheduled */
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
-
- if (!rq)
- continue;
-
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index b067ce45d226..1036446abc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2641,6 +2641,9 @@ static int psp_hw_fini(void *handle)
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ psp_xgmi_terminate(psp);
}
psp_asd_terminate(psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 9e55a5d7a825..ffda1560c648 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -37,6 +37,7 @@ struct amdgpu_reset_context {
struct amdgpu_device *reset_req_dev;
struct amdgpu_job *job;
struct amdgpu_hive_info *hive;
+ struct list_head *reset_device_list;
unsigned long flags;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3b4c19412625..134575a3893c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -637,6 +637,8 @@ struct amdgpu_ttm_tt {
#endif
};
+#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
+
#ifdef CONFIG_DRM_AMDGPU_USERPTR
/*
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
@@ -648,7 +650,7 @@ struct amdgpu_ttm_tt {
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{
struct ttm_tt *ttm = bo->tbo.ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
struct mm_struct *mm;
@@ -702,7 +704,7 @@ out_unlock:
*/
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
bool r = false;
if (!gtt || !gtt->userptr)
@@ -751,7 +753,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -788,7 +790,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -822,7 +824,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
struct ttm_tt *ttm = tbo->ttm;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (amdgpu_bo_encrypted(abo))
flags |= AMDGPU_PTE_TMZ;
@@ -860,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_resource *bo_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void*)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
uint64_t flags;
int r;
@@ -927,7 +929,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
struct ttm_placement placement;
struct ttm_place placements;
struct ttm_resource *tmp;
@@ -998,7 +1000,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
@@ -1025,7 +1027,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1079,7 +1081,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_operation_ctx *ctx)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
pgoff_t i;
int ret;
@@ -1113,7 +1115,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
struct amdgpu_device *adev;
pgoff_t i;
@@ -1182,7 +1184,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
- gtt = (void *)bo->ttm;
+ gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
gtt->userptr = addr;
gtt->userflags = flags;
@@ -1199,7 +1201,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
*/
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return NULL;
@@ -1218,7 +1220,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end, unsigned long *userptr)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long size;
if (gtt == NULL || !gtt->userptr)
@@ -1241,7 +1243,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
*/
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL || !gtt->userptr)
return false;
@@ -1254,7 +1256,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
*/
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 108e8e8a1a36..576849e95296 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -496,8 +496,7 @@ static int amdgpu_vkms_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 1b108d03e785..f2aebbf3fbe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -742,7 +742,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_put_xgmi_hive(hive);
}
- return psp_xgmi_terminate(&adev->psp);
+ return 0;
}
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
index 33a8a7365aef..f0e235f98afb 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
@@ -28,13 +28,44 @@
#include "navi10_enum.h"
#include "soc15_common.h"
+#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
+#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
+
+
+static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
+ break;
+ default:
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ break;
+ }
+ return data;
+}
+
+static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(3, 0, 1):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
+ break;
+ default:
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ break;
+ }
+}
+
static void
athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
@@ -42,7 +73,7 @@ athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
static void
@@ -51,7 +82,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
{
uint32_t def, data;
- def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
@@ -59,7 +90,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
if (def != data)
- WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ athub_v3_0_set_cg_cntl(adev, data);
}
int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
@@ -70,6 +101,7 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 0, 2):
athub_v3_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
@@ -88,7 +120,7 @@ void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
int data;
/* AMD_CG_SUPPORT_ATHUB_MGCG */
- data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ data = athub_v3_0_get_cg_cntl(adev);
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9c964cd3b5d4..288fce7dc0ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2796,8 +2796,7 @@ static int dce_v10_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index e0ad9f27dc3f..cbe5250b31cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2914,8 +2914,7 @@ static int dce_v11_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 77f5e998a120..b1c44fab074f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2673,8 +2673,7 @@ static int dce_v6_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_width = 16384;
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 802e5c753271..a22b45c92792 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2693,8 +2693,11 @@ static int dce_v8_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index fafbad3cf08d..a2a4dc1844c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4846,7 +4846,7 @@ static int gfx_v10_0_sw_init(void *handle)
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 2;
+ adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 6fd71cb10e54..f6b1bb40e503 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -53,6 +53,7 @@
#define GFX11_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
#define regCGTT_WD_CLK_CTRL 0x5086
#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
@@ -130,6 +131,8 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
bool all_hub, uint8_t dst_sel);
static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
+static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
+ bool enable);
static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -1138,6 +1141,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
.init_spm_golden = &gfx_v11_0_init_spm_golden_registers,
+ .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
};
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -5181,9 +5185,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
} else {
/* Program RLC_CGCG_CGLS_CTRL */
def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
@@ -5212,9 +5219,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+ if (adev->sdma.num_instances > 1) {
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+ data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+ }
}
}
@@ -5279,6 +5289,38 @@ static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
.update_spm_vmid = gfx_v11_0_update_spm_vmid,
};
+static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
+
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+ WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
+
+ // Program RLC_PG_DELAY3 for CGPG hysteresis
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 1):
+ WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
+ gfx_v11_cntl_power_gating(adev, enable);
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+}
+
static int gfx_v11_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
@@ -5293,6 +5335,10 @@ static int gfx_v11_0_set_powergating_state(void *handle,
case IP_VERSION(11, 0, 2):
amdgpu_gfx_off_ctrl(adev, enable);
break;
+ case IP_VERSION(11, 0, 1):
+ gfx_v11_cntl_pg(adev, enable);
+ amdgpu_gfx_off_ctrl(adev, enable);
+ break;
default:
break;
}
@@ -5310,6 +5356,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index c6e0f9313a7f..fc9c1043244c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2587,7 +2587,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
gfx_v9_0_tiling_mode_table_init(adev);
- gfx_v9_0_setup_rb(adev);
+ if (adev->gfx.num_gfx_rings)
+ gfx_v9_0_setup_rb(adev);
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9ae8cdaa033e..f513e2c2e964 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -419,6 +419,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -437,7 +438,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 22761a3bb818..4603653916f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -896,6 +896,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -935,7 +936,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
up_read(&adev->reset_domain->sem);
@@ -1624,12 +1625,15 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
case IP_VERSION(9, 4, 1):
adev->num_vmhubs = 3;
/* Keep the vm size same with Vega20 */
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index 39a696cd45b5..29c3484ae1f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -40,6 +40,156 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
0);
}
+static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+ uint32_t hdp_mem_pwr_cntl;
+
+ if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+
+ /* Before doing clock/power mode switch, forced on MEM clock */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 1);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+
+ /* disable clock and power gating before any changing */
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 0);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 0);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+
+ /* Already disabled above. The actions below are for "enabled" only */
+ if (enable) {
+ /* only one clock gating mode (LS/DS/SD) can be enabled */
+ if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_SD_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_SD_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_LS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_LS_EN, 1);
+ } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_DS_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
+ HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_DS_EN, 1);
+ }
+
+ /* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
+ if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
+ AMD_CG_SUPPORT_HDP_SD)) {
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ ATOMIC_MEM_POWER_CTRL_EN, 1);
+ hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
+ RC_MEM_POWER_CTRL_EN, 1);
+ WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
+ }
+ }
+
+ /* disable MEM clock override after clock/power mode changing */
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
+ hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
+ RC_MEM_CLK_SOFT_OVERRIDE, 0);
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t hdp_clk_cntl;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
+ return;
+
+ hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+
+ if (enable) {
+ hdp_clk_cntl &=
+ ~(uint32_t)
+ (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
+ } else {
+ hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
+ }
+
+ WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+}
+
+static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t tmp;
+
+ /* AMD_CG_SUPPORT_HDP_MGCG */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
+ if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
+ HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
+ *flags |= AMD_CG_SUPPORT_HDP_MGCG;
+
+ /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
+ tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
+ if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_LS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_DS;
+ else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_HDP_SD;
+}
+
+static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ hdp_v5_2_update_mem_power_gating(adev, enable);
+ hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
+}
+
const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
.flush_hdp = hdp_v5_2_flush_hdp,
+ .update_clock_gating = hdp_v5_2_update_clock_gating,
+ .get_clock_gating_state = hdp_v5_2_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 92dc60a9d209..085e613f3646 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -727,6 +727,7 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
.get_wptr = ih_v6_0_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = ih_v6_0_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3f44a099c52a..3e51e773f92b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index cac72ced94c8..e8058edc1d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -518,18 +518,41 @@ static u64 mmhub_v3_0_1_get_mc_fb_offset(struct amdgpu_device *adev)
static void mmhub_v3_0_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static void mmhub_v3_0_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
- //TODO
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ if (enable)
+ data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ else
+ data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
}
static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
mmhub_v3_0_1_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
mmhub_v3_0_1_update_medium_grain_light_sleep(adev,
@@ -539,7 +562,20 @@ static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
static void mmhub_v3_0_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
- //TODO
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
+
+ /* AMD_CG_SUPPORT_MC_MGCG */
+ if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+ /* AMD_CG_SUPPORT_MC_LS */
+ if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_LS;
}
const struct amdgpu_mmhub_funcs mmhub_v3_0_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 6e0145b2b408..445cb06b9d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -295,9 +295,17 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned int num_level, block_size;
uint32_t tmp;
int i;
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
+
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
@@ -305,7 +313,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_DEPTH,
- adev->vm_manager.num_level);
+ num_level);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
@@ -323,7 +331,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 4b5396d3e60f..eec13cb5bf75 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -409,9 +409,11 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -483,6 +485,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index 01e8288d09a8..1dc95ef21da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -247,6 +247,81 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
}
+static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (enable) {
+ data |= (BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ } else {
+ data &= ~(BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
+ BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL, data);
+}
+
+static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
+ return;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (enable)
+ data |= BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+ else
+ data &= ~BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2, data);
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1);
+ if (enable) {
+ data |= (BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ } else {
+ data &= ~(BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
+ BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1, data);
+}
+
+static void nbio_v7_7_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
+{
+ uint32_t data;
+
+ /* AMD_CG_SUPPORT_BIF_MGCG */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
+ if (data & BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+ /* AMD_CG_SUPPORT_BIF_LS */
+ data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
+ if (data & BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.get_hdp_flush_req_offset = nbio_v7_7_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_7_get_hdp_flush_done_offset,
@@ -262,6 +337,9 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.enable_doorbell_aperture = nbio_v7_7_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_7_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v7_7_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v7_7_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v7_7_get_clockgating_state,
.ih_control = nbio_v7_7_ih_control,
.init_registers = nbio_v7_7_init_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index a2588200ea58..0b2ac418e4ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -101,6 +101,16 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
adev->psp.dtm_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ if (adev->apu_flags & AMD_APU_IS_RENOIR) {
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 726a5bba40b2..a75a286e1ecf 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <linux/dev_printk.h>
#include <drm/drm_drv.h>
#include <linux/vmalloc.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 52816de5e17b..55284b24f113 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -494,6 +494,20 @@ static void soc21_pre_asic_init(struct amdgpu_device *adev)
{
}
+static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
+ bool enter)
+{
+ if (enter)
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+ else
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ if (adev->gfx.funcs->update_perfmon_mgcg)
+ adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
+
+ return 0;
+}
+
static const struct amdgpu_asic_funcs soc21_asic_funcs =
{
.read_disabled_bios = &soc21_read_disabled_bios,
@@ -513,6 +527,7 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs =
.supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &soc21_pre_asic_init,
.query_video_codecs = &soc21_query_video_codecs,
+ .update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
};
static int soc21_common_early_init(void *handle)
@@ -546,8 +561,10 @@ static int soc21_common_early_init(void *handle)
case IP_VERSION(11, 0, 0):
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
+#if 0
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
+#endif
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_FGCG |
@@ -575,7 +592,9 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_ATHUB_MGCG |
- AMD_CG_SUPPORT_ATHUB_LS;
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_HDP_SD;
adev->pg_flags =
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
@@ -586,9 +605,25 @@ static int soc21_common_early_init(void *handle)
break;
case IP_VERSION(11, 0, 1):
adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_FGCG |
+ AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_GFX_PERF_CLK |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS |
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
+ AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
@@ -683,6 +718,8 @@ static int soc21_common_set_clockgating_state(void *handle,
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(4, 3, 0):
+ case IP_VERSION(4, 3, 1):
+ case IP_VERSION(7, 7, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index ca14c3ef742e..fb2d74f30448 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1115,7 +1115,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
*
* Stop VCN block with dpg mode
*/
-static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
uint32_t tmp;
@@ -1133,7 +1133,6 @@ static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
- return 0;
}
/**
@@ -1154,7 +1153,7 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_stop_dpg_mode(adev, i);
+ vcn_v4_0_stop_dpg_mode(adev, i);
continue;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index cdd599a08125..03b7066471f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -334,9 +334,11 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -409,6 +411,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 3b4eb8285943..2022ffbb8dba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -385,9 +385,11 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- if (ih == &adev->irq.ih) {
+ if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
+ * ih_soft ring doesn't have any backing hardware registers,
+ * update wptr and return.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
@@ -461,6 +463,9 @@ static void vega20_ih_set_rptr(struct amdgpu_device *adev,
{
struct amdgpu_ih_regs *ih_regs;
+ if (ih == &adev->irq.ih_soft)
+ return;
+
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 2b3d8bc8f0aa..dc774ddf3445 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -874,7 +874,7 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
err = kfd_wait_on_events(p, args->num_events,
(void __user *)args->events_ptr,
(args->wait_for_all != 0),
- args->timeout, &args->wait_result);
+ &args->timeout, &args->wait_result);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f5853835f03a..22c0929d410b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -102,13 +102,18 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
switch (sdma_version) {
case IP_VERSION(6, 0, 0):
- case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
break;
+ case IP_VERSION(6, 0, 1):
+ /* Reserve 1 for paging and 1 for gfx */
+ kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
+ /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
+ kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
+ break;
default:
break;
}
@@ -377,12 +382,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
case IP_VERSION(10, 3, 6):
- gfx_target_version = 100306;
- if (!vf)
- f2g = &gfx_v10_3_kfd2kgd;
- break;
case IP_VERSION(10, 3, 7):
- gfx_target_version = 100307;
+ gfx_target_version = 100306;
if (!vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 3942a56c28bb..83e3ce9f6049 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -894,7 +894,8 @@ static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
return msecs_to_jiffies(user_timeout_ms) + 1;
}
-static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
+static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
+ bool undo_auto_reset)
{
uint32_t i;
@@ -903,6 +904,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
spin_lock(&waiters[i].event->lock);
remove_wait_queue(&waiters[i].event->wq,
&waiters[i].wait);
+ if (undo_auto_reset && waiters[i].activated &&
+ waiters[i].event && waiters[i].event->auto_reset)
+ set_event(waiters[i].event);
spin_unlock(&waiters[i].event->lock);
}
@@ -911,7 +915,7 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result)
{
struct kfd_event_data __user *events =
@@ -920,7 +924,7 @@ int kfd_wait_on_events(struct kfd_process *p,
int ret = 0;
struct kfd_event_waiter *event_waiters = NULL;
- long timeout = user_timeout_to_jiffies(user_timeout_ms);
+ long timeout = user_timeout_to_jiffies(*user_timeout_ms);
event_waiters = alloc_event_waiters(num_events);
if (!event_waiters) {
@@ -970,15 +974,11 @@ int kfd_wait_on_events(struct kfd_process *p,
}
if (signal_pending(current)) {
- /*
- * This is wrong when a nonzero, non-infinite timeout
- * is specified. We need to use
- * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
- * contains a union with data for each user and it's
- * in generic kernel code that I don't want to
- * touch yet.
- */
ret = -ERESTARTSYS;
+ if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
+ *user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
+ *user_timeout_ms = jiffies_to_msecs(
+ max(0l, timeout-1));
break;
}
@@ -1019,7 +1019,7 @@ int kfd_wait_on_events(struct kfd_process *p,
event_waiters, events);
out_unlock:
- free_waiters(num_events, event_waiters);
+ free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
mutex_unlock(&p->event_mutex);
out:
if (ret)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 373e5bfd4e91..b059a77b6081 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -685,13 +685,15 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.vma = vma;
migrate.start = start;
migrate.end = end;
- migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
+ if (adev->gmc.xgmi.connected_to_cpu)
+ migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
+ else
+ migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
GFP_KERNEL);
-
if (!buf)
goto out;
@@ -974,7 +976,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
{
struct kfd_dev *kfddev = adev->kfd.dev;
struct dev_pagemap *pgmap;
- struct resource *res;
+ struct resource *res = NULL;
unsigned long size;
void *r;
@@ -989,28 +991,34 @@ int svm_migrate_init(struct amdgpu_device *adev)
* should remove reserved size
*/
size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
- res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
- if (IS_ERR(res))
- return -ENOMEM;
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ pgmap->range.start = adev->gmc.aper_base;
+ pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
+ pgmap->type = MEMORY_DEVICE_COHERENT;
+ } else {
+ res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
+ if (IS_ERR(res))
+ return -ENOMEM;
+ pgmap->range.start = res->start;
+ pgmap->range.end = res->end;
+ pgmap->type = MEMORY_DEVICE_PRIVATE;
+ }
- pgmap->type = MEMORY_DEVICE_PRIVATE;
pgmap->nr_range = 1;
- pgmap->range.start = res->start;
- pgmap->range.end = res->end;
pgmap->ops = &svm_migrate_pgmap_ops;
pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
- pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
-
+ pgmap->flags = 0;
/* Device manager releases device-specific resources, memory region and
* pgmap when driver disconnects from device.
*/
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
-
/* Disable SVM support capability */
pgmap->type = 0;
- devm_release_mem_region(adev->dev, res->start, resource_size(res));
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE)
+ devm_release_mem_region(adev->dev, res->start,
+ res->end - res->start + 1);
return PTR_ERR(r);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d03a3b9c9c5d..bf610e3b683b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1317,7 +1317,7 @@ void kfd_event_free_process(struct kfd_process *p);
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
- bool all, uint32_t user_timeout_ms,
+ bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result);
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index a67ba8879a56..11074cc8c333 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -541,7 +541,6 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
kfree(svm_bo);
return -ESRCH;
}
- svm_bo->svms = prange->svms;
svm_bo->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
mm,
@@ -3273,7 +3272,6 @@ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
static void svm_range_evict_svm_bo_worker(struct work_struct *work)
{
struct svm_range_bo *svm_bo;
- struct kfd_process *p;
struct mm_struct *mm;
int r = 0;
@@ -3281,13 +3279,12 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
if (!svm_bo_ref_unless_zero(svm_bo))
return; /* svm_bo was freed while eviction was pending */
- /* svm_range_bo_release destroys this worker thread. So during
- * the lifetime of this thread, kfd_process and mm will be valid.
- */
- p = container_of(svm_bo->svms, struct kfd_process, svms);
- mm = p->mm;
- if (!mm)
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ mm = svm_bo->eviction_fence->mm;
+ } else {
+ svm_range_bo_unref(svm_bo);
return;
+ }
mmap_read_lock(mm);
spin_lock(&svm_bo->list_lock);
@@ -3305,8 +3302,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
- r = svm_migrate_vram_to_ram(prange,
- svm_bo->eviction_fence->mm,
+ r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_TTM_EVICTION);
} while (!r && prange->actual_loc && --retries);
@@ -3324,6 +3320,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
}
spin_unlock(&svm_bo->list_lock);
mmap_read_unlock(mm);
+ mmput(mm);
dma_fence_signal(&svm_bo->eviction_fence->base);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 9156b041ef17..cfac13ad06ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -46,7 +46,6 @@ struct svm_range_bo {
spinlock_t list_lock;
struct amdgpu_amdkfd_fence *eviction_fence;
struct work_struct eviction_work;
- struct svm_range_list *svms;
uint32_t evicting;
struct work_struct release_work;
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 25990bec600d..3f0a4a415907 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1392,8 +1392,8 @@ static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
{
+ struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
struct kfd_iolink_properties *props = NULL, *props2 = NULL;
- struct kfd_iolink_properties *gpu_link, *cpu_link;
struct kfd_topology_device *cpu_dev;
int ret = 0;
int i, num_cpu;
@@ -1416,16 +1416,19 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
continue;
/* find CPU <--> CPU links */
+ cpu_link = NULL;
cpu_dev = kfd_topology_device_by_proximity_domain(i);
if (cpu_dev) {
- list_for_each_entry(cpu_link,
+ list_for_each_entry(tmp_link,
&cpu_dev->io_link_props, list) {
- if (cpu_link->node_to == gpu_link->node_to)
+ if (tmp_link->node_to == gpu_link->node_to) {
+ cpu_link = tmp_link;
break;
+ }
}
}
- if (cpu_link->node_to != gpu_link->node_to)
+ if (!cpu_link)
return -ENOMEM;
/* CPU <--> CPU <--> GPU, GPU node*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8660d93cc405..5140d9c2bf3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3825,8 +3825,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- /* disable prefer shadow for now due to hibernation issues */
- adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
/* indicates support for immediate flip */
adev_to_drm(adev)->mode_config.async_page_flip = true;
@@ -4135,6 +4138,7 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
}
}
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
/*
* In this architecture, the association
@@ -4326,6 +4330,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
adev_to_drm(adev)->vblank_disable_immediate = false;
}
}
+ amdgpu_set_panel_orientation(&aconnector->base);
}
/* Software is initialized. Now we can register interrupt handlers. */
@@ -6684,6 +6689,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
return;
+ mutex_lock(&connector->dev->mode_config.mutex);
+ amdgpu_dm_connector_get_modes(connector);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!encoder)
return;
@@ -6728,8 +6737,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* restored here.
*/
amdgpu_dm_update_freesync_caps(connector, edid);
-
- amdgpu_set_panel_orientation(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index b841b8b0a9d8..987bde4dca3d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -34,6 +34,7 @@
#include "dal_asic_id.h"
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
+#include "amdgpu_dm_plane.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -149,12 +150,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_
*size += 1;
}
-bool modifier_has_dcc(uint64_t modifier)
+static bool modifier_has_dcc(uint64_t modifier)
{
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
}
-unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return 0;
@@ -660,7 +661,7 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
add_gfx11_modifiers(adev, mods, &size, &capacity);
break;
}
@@ -1412,7 +1413,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
}
break;
case AMDGPU_FAMILY_GC_11_0_0:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 95168c2cfa6f..286981a2dd40 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -36,17 +36,9 @@ int fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info);
-void get_min_max_dc_plane_scaling(struct drm_device *dev,
- struct drm_framebuffer *fb,
- int *min_downscale, int *max_upscale);
-
int dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state);
-bool modifier_has_dcc(uint64_t modifier);
-
-unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier);
-
int fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 6767fab55c26..352e9afb85c6 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -100,3 +100,24 @@ void convert_float_matrix(
matrix[i] = (uint16_t)reg_value;
}
}
+
+static uint32_t find_gcd(uint32_t a, uint32_t b)
+{
+ uint32_t remainder = 0;
+ while (b != 0) {
+ remainder = a % b;
+ a = b;
+ b = remainder;
+ }
+ return a;
+}
+
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den)
+{
+ uint32_t gcd = 0;
+
+ gcd = find_gcd(num, den);
+ *out_num = num / gcd;
+ *out_den = den / gcd;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
index ade785c4fdc7..81da4e6f7a1a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -38,6 +38,9 @@ void convert_float_matrix(
struct fixed31_32 *flt,
uint32_t buffer_size);
+void reduce_fraction(uint32_t num, uint32_t den,
+ uint32_t *out_num, uint32_t *out_den);
+
static inline unsigned int log_2(unsigned int num)
{
return ilog2(num);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c76091fd1f2..f276abb63bcd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -337,7 +337,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
}
- case AMDGPU_FAMILY_GC_11_0_2: {
+ case AMDGPU_FAMILY_GC_11_0_1: {
struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
@@ -397,7 +397,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
dcn32_clk_mgr_destroy(clk_mgr);
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dcn314_clk_mgr_destroy(clk_mgr);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 0202dc682682..ca6dfd2d7561 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -24,10 +24,9 @@
*/
#include "dccg.h"
-#include "clk_mgr_internal.h"
+#include "rn_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
-#include "rn_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dce100/dce_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
index 2e088c5171b2..f1319957e400 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -28,6 +28,7 @@
#include "clk_mgr.h"
#include "dm_pp_smu.h"
+#include "clk_mgr_internal.h"
extern struct wm_table ddr4_wm_table_gs;
extern struct wm_table lpddr4_wm_table_gs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index ee99974b3b62..beb025cd3dc2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -307,16 +307,6 @@ static void dcn314_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn314_smu_enable_pme_wa(clk_mgr);
}
-void dcn314_init_clocks(struct clk_mgr *clk_mgr)
-{
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
-
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
@@ -425,7 +415,7 @@ static struct wm_table lpddr5_wm_table = {
}
};
-static DpmClocks_t dummy_clocks;
+static DpmClocks314_t dummy_clocks;
static struct dcn314_watermarks dummy_wms = { 0 };
@@ -510,7 +500,7 @@ static void dcn314_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct dcn314_smu_dpm_clks *smu_dpm_clks)
{
- DpmClocks_t *table = smu_dpm_clks->dpm_clks;
+ DpmClocks314_t *table = smu_dpm_clks->dpm_clks;
if (!clk_mgr->smu_ver)
return;
@@ -527,6 +517,26 @@ static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
dcn314_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
+static inline bool is_valid_clock_value(uint32_t clock_value)
+{
+ return clock_value > 1 && clock_value < 100000;
+}
+
+static unsigned int convert_wck_ratio(uint8_t wck_ratio)
+{
+ switch (wck_ratio) {
+ case WCK_RATIO_1_2:
+ return 2;
+
+ case WCK_RATIO_1_4:
+ return 4;
+
+ default:
+ break;
+ }
+ return 1;
+}
+
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
@@ -540,89 +550,127 @@ static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
return max;
}
-static unsigned int find_clk_for_voltage(
- const DpmClocks_t *clock_table,
- const uint32_t clocks[],
- unsigned int voltage)
-{
- int i;
- int max_voltage = 0;
- int clock = 0;
-
- for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage) {
- return clocks[i];
- } else if (clock_table->SocVoltage[i] >= max_voltage &&
- clock_table->SocVoltage[i] < voltage) {
- max_voltage = clock_table->SocVoltage[i];
- clock = clocks[i];
- }
- }
-
- ASSERT(clock);
- return clock;
-}
-
static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
- const DpmClocks_t *clock_table)
+ const DpmClocks314_t *clock_table)
{
- int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
- uint32_t max_dispclk = 0, max_dppclk = 0;
-
- j = -1;
-
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
- /* Find lowest DPM, FCLK is filled in reverse order*/
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+ uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ int i;
- for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
- if (clock_table->DfPstateTable[i].FClk != 0) {
- j = i;
- break;
+ /* Find highest valid fclk pstate */
+ for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) &&
+ clock_table->DfPstateTable[i].FClk > max_fclk) {
+ max_fclk = clock_table->DfPstateTable[i].FClk;
+ max_pstate = i;
}
}
- if (j == -1) {
- /* clock table is all 0s, just use our own hardcode */
- ASSERT(0);
- return;
- }
-
- bw_params->clk_table.num_entries = j + 1;
+ /* We expect the table to contain at least one valid fclk entry. */
+ ASSERT(is_valid_clock_value(max_fclk));
- /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ /* Dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
} else {
+ /* Invalid number of entries in the table from PMFW. */
ASSERT(0);
}
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
- bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
- switch (clock_table->DfPstateTable[j].WckRatio) {
- case WCK_RATIO_1_2:
- bw_params->clk_table.entries[i].wck_ratio = 2;
- break;
- case WCK_RATIO_1_4:
- bw_params->clk_table.entries[i].wck_ratio = 4;
- break;
- default:
- bw_params->clk_table.entries[i].wck_ratio = 1;
+ /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
+ int j;
+
+ for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+ if (is_valid_clock_value(clock_table->DfPstateTable[j].FClk) &&
+ clock_table->DfPstateTable[j].FClk < min_fclk &&
+ clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
+ min_fclk = clock_table->DfPstateTable[j].FClk;
+ min_pstate = j;
+ }
}
- bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
- bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
+
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+ if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+ break;
+
+ bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[min_pstate].WckRatio);
+ };
+
+ /* Make sure to include at least one entry at highest pstate */
+ if (max_pstate != min_pstate || i == 0) {
+ if (i > MAX_NUM_DPM_LVL - 1)
+ i = MAX_NUM_DPM_LVL - 1;
+
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->DfPstateTable[max_pstate].WckRatio);
+ i++;
}
+ bw_params->clk_table.num_entries = i--;
+
+ /* Make sure all highest clocks are included*/
+ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+ ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ /*
+ * Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_mhz)
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ }
+ ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
- bw_params->num_channels = bios_info->ma_channel_number;
+ bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
@@ -641,7 +689,7 @@ static struct clk_mgr_funcs dcn314_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn314_update_clocks,
- .init_clocks = dcn314_init_clocks,
+ .init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn314_enable_pme_wa,
.are_clock_states_equal = dcn314_are_clock_states_equal,
.notify_wm_ranges = dcn314_notify_wm_ranges
@@ -681,10 +729,10 @@ void dcn314_clk_mgr_construct(
}
ASSERT(clk_mgr->smu_wm_set.wm_set);
- smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
+ smu_dpm_clks.dpm_clks = (DpmClocks314_t *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
- sizeof(DpmClocks_t),
+ sizeof(DpmClocks314_t),
&smu_dpm_clks.mc_address.quad_part);
if (smu_dpm_clks.dpm_clks == NULL) {
@@ -729,7 +777,7 @@ void dcn314_clk_mgr_construct(
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
dcn314_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
ctx->dc_bios->integrated_info,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index c695a4498c50..171f84340eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -42,7 +42,7 @@ struct clk_mgr_dcn314 {
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b);
-void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
index a7958dc96581..047d19ea919c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h
@@ -36,6 +36,37 @@ typedef enum {
WCK_RATIO_MAX
} WCK_RATIO_e;
+typedef struct {
+ uint32_t FClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+ uint8_t WckRatio;
+ uint8_t Spare[3];
+} DfPstateTable314_t;
+
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t NumDfPstatesEnabled;
+ uint8_t spare[3];
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks314_t;
+
struct dcn314_watermarks {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
@@ -43,7 +74,7 @@ struct dcn314_watermarks {
};
struct dcn314_smu_dpm_clks {
- DpmClocks_t *dpm_clks;
+ DpmClocks314_t *dpm_clks;
union large_integer mc_address;
};
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e42f44fc1c08..aeecca68dea7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1074,8 +1074,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
bool should_disable = true;
- bool pipe_split_change =
- context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
+ bool pipe_split_change = false;
+
+ if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
+ (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
+ else
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
for (j = 0; j < context->stream_count; j++) {
if (old_stream == context->streams[j]) {
@@ -3229,7 +3236,7 @@ static void commit_planes_for_stream(struct dc *dc,
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_lock(stream->link)) {
@@ -3247,7 +3254,6 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
top_pipe_to_program->stream_res.tg);
}
- }
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
@@ -3466,7 +3472,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
- if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
@@ -3493,21 +3499,19 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
top_pipe_to_program->stream_res.tg);
}
- }
- if (update_type != UPDATE_TYPE_FAST) {
+ if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
- /* Since phantom pipe programming is moved to post_unlock_program_front_end,
- * move the SubVP lock to after the phantom pipes have been setup
- */
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- }
+ /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+ * move the SubVP lock to after the phantom pipes have been setup
+ */
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
}
// Fire manual trigger only when bottom plane is flipped
@@ -4292,7 +4296,7 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2 &&
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
@@ -4340,6 +4344,7 @@ void dc_enable_dmub_outbox(struct dc *dc)
struct dc_context *dc_ctx = dc->ctx;
dmub_enable_outbox_notification(dc_ctx->dmub_srv);
+ DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9e51338441d0..66d2ae7aacf5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3372,7 +3372,7 @@ bool dc_link_setup_psr(struct dc_link *link,
switch(link->ctx->asic_id.chip_family) {
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
if(!dc->debug.disable_z10)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ffc0f1c0ea93..7dbab15bfa68 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -169,7 +169,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_21;
break;
- case AMDGPU_FAMILY_GC_11_0_2:
+ case AMDGPU_FAMILY_GC_11_0_1:
dc_version = DCN_VERSION_3_14;
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8e1e40083ec8..5908b60db313 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.196"
+#define DC_VER "3.2.198"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -213,6 +213,7 @@ struct dc_caps {
uint32_t cache_num_ways;
uint16_t subvp_fw_processing_delay_us;
uint16_t subvp_prefetch_end_to_mall_start_us;
+ uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height
uint16_t subvp_pstate_allow_width_us;
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
@@ -352,6 +353,7 @@ struct dc_config {
bool use_pipe_ctx_sync_logic;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
+ bool use_default_clock_table;
};
enum visual_confirm {
@@ -609,6 +611,7 @@ struct dc_bounding_box_overrides {
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
int dummy_clock_change_latency_ns;
+ int fclk_clock_change_latency_ns;
/* This forces a hard min on the DCFCLK we use
* for DML. Unlike the debug option for forcing
* DCFCLK, this override affects watermark calculations
@@ -751,6 +754,7 @@ struct dc_debug_options {
uint32_t mst_start_top_delay;
uint8_t psr_power_use_phy_fsm;
enum dml_hostvm_override_opts dml_hostvm_override;
+ bool dml_disallow_alternate_prefetch_modes;
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool enable_single_display_2to1_odm_policy;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 2d61c2a91cee..09b304507bad 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -29,6 +29,7 @@
#include "dm_helpers.h"
#include "dc_hw_types.h"
#include "core_types.h"
+#include "../basics/conversion.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -275,8 +276,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
union dmub_rb_cmd cmd = { 0 };
cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
- // TODO: Uncomment once FW headers are promoted
- //cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
+ cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
@@ -601,6 +601,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ uint32_t out_num, out_den;
pipe_data->mode = SUBVP;
pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
@@ -612,6 +613,16 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param;
+
+ /* Calculate the scaling factor from the src and dst height.
+ * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
+ * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
+ */
+ reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, &out_num, &out_den);
+ // TODO: Uncomment below lines once DMCUB include headers are promoted
+ //pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
+ //pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
// Prefetch lines is equal to VACTIVE + BP + VSYNC
pipe_data->pipe_config.subvp_data.prefetch_lines =
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index a0af0f6afeef..9544abf75e84 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -344,6 +344,7 @@ enum dc_detect_reason {
DETECT_REASON_HPDRX,
DETECT_REASON_FALLBACK,
DETECT_REASON_RETRAIN,
+ DETECT_REASON_TDR,
};
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 213de8cabfad..165392380842 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -543,9 +543,11 @@ static void dce112_get_pix_clk_dividers_helper (
switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_121212:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break;
case COLOR_DEPTH_161616:
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index d4a6504dfe00..db7ca4b0cdb9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -361,8 +361,6 @@ void dpp1_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index b54c12400323..564e061ccb58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -278,9 +278,6 @@ void hubp1_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index bed783747f16..5b5d952b2b8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -110,6 +110,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream ||
+ !pipe_ctx->plane_state ||
!tg->funcs->is_tg_enabled(tg))
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 769974375b4b..8e9384094f6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -131,6 +131,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e1a9a45b03b6..3fc300cd1ce9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -465,6 +465,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
OTG_CLOCK_ON, 1,
1, 1000);
} else {
+
+ //last chance to clear underflow, otherwise, it will always there due to clock is off.
+ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
+ optc->funcs->clear_optc_underflow(optc);
+
REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index ea1f14af0db7..eaa7032f0f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -166,8 +166,6 @@ static void dpp2_cnv_setup (
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 936af65381ef..9570c2118ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -463,9 +463,6 @@ void hubp2_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 3d307dd58e9a..116f67a0b989 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -531,6 +531,12 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc;
+
+ /* avoid circular linked list */
+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
+ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
+ break;
+
tmp_mpcc = tmp_mpcc->mpcc_bot;
}
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index c5e200d09038..5752271f22df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,9 +67,15 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active;
+ uint32_t riommu_active, prefetch_done;
int i;
+ REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
+
+ if (prefetch_done) {
+ hubbub->riommu_active = true;
+ return;
+ }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 77b00f86c216..4a668d6563df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -244,8 +244,6 @@ void dpp3_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
- break;
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
pixel_format = 26; /* ARGB16161616_UNORM */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index 6a4dcafb9bba..dc3e8df706b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
VMID, address->vmid);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 0a67f8a5656d..d97076648acb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -372,7 +372,7 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGE) {
+ if (eng_id <= ENGINE_ID_DIGB) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
index 7c77c71591a0..82c3b3ac1f0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
@@ -162,7 +162,8 @@
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\
SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\
- SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh)
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_HBLANK_CONTROL, HBLANK_MINIMUM_SYMBOL_WIDTH, mask_sh)
#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 468a893ff785..aedff18aff56 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -2153,7 +2153,7 @@ static bool dcn31_resource_construct(
pool->base.usb4_dpia_count = 4;
}
- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2)
+ if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1)
pool->base.usb4_dpia_count = 4;
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 41f8ec99da6b..901436591ed4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn31_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_1_ip;
-extern struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc;
struct dcn31_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
index e3b5a95e03b1..702c28c2560e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
@@ -13,31 +13,6 @@
DCN314 = dcn314_resource.o dcn314_hwseq.o dcn314_init.o \
dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn314/dcn314_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN314)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 755c715ad8dc..39931d48f385 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -343,7 +343,10 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ bool two_pix_per_container = false;
+ two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
if (is_dp_128b_132b_signal(pipe_ctx)) {
@@ -355,16 +358,13 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
else
*k2_div = PIXEL_RATE_DIV_BY_4;
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
- } else if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- *k1_div = PIXEL_RATE_DIV_BY_2;
- *k2_div = PIXEL_RATE_DIV_BY_2;
} else {
- if (odm_combine_factor == 1)
- *k2_div = PIXEL_RATE_DIV_BY_4;
- else if (odm_combine_factor == 2)
+ *k1_div = PIXEL_RATE_DIV_BY_1;
+ *k2_div = PIXEL_RATE_DIV_BY_4;
+ if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -374,3 +374,31 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
return odm_combine_factor;
}
+
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t pix_per_cycle = 1;
+ uint32_t odm_combine_factor = 1;
+
+ if (!pipe_ctx || !pipe_ctx->stream || !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1
+ || dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ pix_per_cycle = 2;
+
+ if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
+ pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
+ pix_per_cycle);
+}
+
+bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return true;
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index be0f5e4d48e1..d014580592ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -39,4 +39,8 @@ void dcn314_enable_power_gating_plane(struct dce_hwseq *hws, bool enable);
unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
+void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+
+bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index b9debeb081fd..fcf67eb3478f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -145,6 +145,8 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
+ .set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
+ .is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 63861cdfb09f..3a9e3870b3a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -70,6 +70,7 @@
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn314/dcn314_fpu.h"
#include "dcn314/dcn314_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -132,155 +133,6 @@ static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C
#define DC_LOGGER_INIT(logger)
-#define DCN3_14_DEFAULT_DET_SIZE 384
-#define DCN3_14_MAX_DET_SIZE 384
-#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
-#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_14_ip = {
- .VBlankNomDefaultUS = 668,
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
- .config_return_buffer_size_in_kbytes = 1792,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 4,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 48,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 8,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 600.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 442.0,
- .sr_enter_plus_exit_z8_time_us = 560.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.5,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -1402,7 +1254,7 @@ static struct stream_encoder *dcn314_stream_encoder_create(
int afmt_inst;
/* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
- if (eng_id <= ENGINE_ID_DIGF) {
+ if (eng_id < ENGINE_ID_DIGF) {
vpg_inst = eng_id;
afmt_inst = eng_id;
} else
@@ -1447,7 +1299,8 @@ static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
* VPG[8] -> HPO_DP[2]
* VPG[9] -> HPO_DP[3]
*/
- vpg_inst = hpo_dp_inst + 6;
+ //Uses offset index 5-8, but actually maps to vpg_inst 6-9
+ vpg_inst = hpo_dp_inst + 5;
/* Mapping of APG register blocks to HPO DP block instance:
* APG[0] -> HPO_DP[0]
@@ -1793,109 +1646,16 @@ static struct clock_source *dcn31_clock_source_create(
return NULL;
}
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
static int dcn314_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
- int i, pipe_cnt;
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
- bool upscaled = false;
-
- dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing;
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
- && pipe->stream->adjust.v_total_min > timing->v_total)
- pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
-
- if (pipe->plane_state &&
- (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
- pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
- upscaled = true;
-
- /*
- * Immediate flip can be set dynamically after enabling the plane.
- * We need to require support for immediate flip or underflow can be
- * intermittently experienced depending on peak b/w requirements.
- */
- pipes[pipe_cnt].pipe.src.immediate_flip = true;
-
- pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
- pipes[pipe_cnt].pipe.src.gpuvm = true;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
- pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.src.dcc_rate = 3;
- pipes[pipe_cnt].dout.dsc_input_bpc = 0;
-
- if (pipes[pipe_cnt].dout.dsc_enable) {
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_888:
- pipes[pipe_cnt].dout.dsc_input_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- pipes[pipe_cnt].dout.dsc_input_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
-
- pipe_cnt++;
- }
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
-
- dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
- /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- pipes[0].pipe.src.unbounded_req_mode = true;
- }
- } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
- && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
- } else if (context->stream_count >= 3 && upscaled) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe->stream)
- continue;
+ int pipe_cnt;
- if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
- pipe->stream->apply_seamless_boot_optimization) {
-
- if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
- context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
- break;
- }
- }
- }
+ DC_FP_START();
+ pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate);
+ DC_FP_END();
return pipe_cnt;
}
@@ -1906,88 +1666,9 @@ static struct dc_cap_funcs cap_funcs = {
static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st *clock_tmp = dcn3_14_soc._clock_tmp;
- unsigned int i, closest_clk_lvl;
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
- dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
-
- if (bw_params->num_channels > 0)
- dcn3_14_soc.num_chans = bw_params->num_channels;
-
- ASSERT(dcn3_14_soc.num_chans);
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_14_soc.num_states - 1;
- }
-
- clock_tmp[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- clock_tmp[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- clock_tmp[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
-
- if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
- clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_tmp[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_tmp[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_tmp[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_tmp[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_tmp[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_14_soc.clock_limits[i] = clock_tmp[i];
- if (clk_table->num_entries)
- dcn3_14_soc.num_states = clk_table->num_entries;
- }
-
- if (max_dispclk_mhz) {
- dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- }
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+ DC_FP_START();
+ dcn314_update_bw_bounding_box_fpu(dc, bw_params);
+ DC_FP_END();
}
static struct resource_funcs dcn314_res_pool_funcs = {
@@ -2069,6 +1750,7 @@ static bool dcn314_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
+ dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
index c41108847ce0..0dd3153aa5c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
@@ -29,6 +29,9 @@
#include "core_types.h"
+extern struct _vcs_dpi_ip_params_st dcn3_14_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc;
+
#define TO_DCN314_RES_POOL(pool)\
container_of(pool, struct dcn314_resource_pool, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
index 39929fa67a51..22849eaa6f24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn315_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_15_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_15_soc;
struct dcn315_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
index 0dc5a6c13ae7..aba6d634131b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
@@ -32,7 +32,6 @@
container_of(pool, struct dcn316_resource_pool, base)
extern struct _vcs_dpi_ip_params_st dcn3_16_ip;
-extern struct _vcs_dpi_ip_params_st dcn3_16_soc;
struct dcn316_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index d38341f68b17..ebd3945c71f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -250,6 +250,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
uint32_t total_lines = 0;
uint32_t lines_per_way = 0;
uint32_t num_ways = 0;
+ uint32_t prev_addr_low = 0;
for (i = 0; i < ctx->stream_count; i++) {
stream = ctx->streams[i];
@@ -267,10 +268,20 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
plane = ctx->stream_status[i].plane_states[j];
// Calculate total surface size
- surface_size = plane->plane_size.surface_pitch *
+ if (prev_addr_low != plane->address.grph.addr.u.low_part) {
+ /* if plane address are different from prev FB, then userspace allocated separate FBs*/
+ surface_size += plane->plane_size.surface_pitch *
plane->plane_size.surface_size.height *
(plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ prev_addr_low = plane->address.grph.addr.u.low_part;
+ } else {
+ /* We have the same fb for all the planes.
+ * Xorg always creates one giant fb that holds all surfaces,
+ * so allocating it once is sufficient.
+ * */
+ continue;
+ }
// Convert surface size + starting address to number of cache lines required
// (alignment accounted for)
cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
@@ -320,7 +331,10 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
union dmub_rb_cmd cmd;
- uint8_t ways;
+ uint8_t ways, i;
+ int j;
+ bool stereo_in_use = false;
+ struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
return false;
@@ -349,7 +363,23 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
* and configure HUBP's to fetch from MALL
*/
ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
- if (ways <= dc->caps.cache_num_ways) {
+
+ /* MALL not supported with Stereo3D. If any plane is using stereo,
+ * don't try to enter MALL.
+ */
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+ plane = dc->current_state->stream_status[i].plane_states[j];
+
+ if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO) {
+ stereo_in_use = true;
+ break;
+ }
+ }
+ if (stereo_in_use)
+ break;
+ }
+ if (ways <= dc->caps.cache_num_ways && !stereo_in_use) {
memset(&cmd, 0, sizeof(cmd));
cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
@@ -683,9 +713,11 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
} else {
+ // MALL not supported with Stereo3D
hubp->funcs->hubp_update_mall_sel(hubp,
num_ways <= dc->caps.cache_num_ways &&
- pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED ? 2 : 0,
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO ? 2 : 0,
cache_cursor);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index eff1f4e17689..1fad7b48bd5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -281,7 +281,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
- .set_drr = optc31_set_drr, // TODO: Update to optc32_set_drr once FW headers are promoted
+ .set_drr = optc32_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 9a26d24b579f..8b887b552f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -867,7 +867,7 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
@@ -2051,6 +2051,7 @@ static bool dcn32_resource_construct(
dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
dc->caps.subvp_vertical_int_margin_us = 30;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index b3f8503cea9c..955f52e6064d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -63,7 +63,7 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mall_region_pixels = pipe->stream->timing.h_addressable * pipe->stream->timing.v_addressable;
+ mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable;
// For bytes required in MALL, calculate based on number of MBlks required
num_mblks = (mall_region_pixels * bytes_per_pixel +
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 8157e40d2c7e..c8b7d6ff38f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -868,7 +868,7 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.use_max_lb = true,
- .force_disable_subvp = true,
+ .force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
@@ -1662,8 +1662,9 @@ static bool dcn321_resource_construct(
dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+ dc->caps.subvp_swath_height_margin_lines = 16;
dc->caps.subvp_pstate_allow_width_us = 20;
-
+ dc->caps.subvp_vertical_int_margin_us = 30;
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 359f6e9a1da0..86a3b5bfd699 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -61,7 +61,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
@@ -71,6 +70,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag)
@@ -82,7 +82,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare
@@ -131,6 +130,7 @@ DML += dcn321/dcn321_fpu.o
DML += dcn301/dcn301_fpu.o
DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
+DML += dcn314/dcn314_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index ca44df4fca74..d34e0f1314d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,6 +30,7 @@
#include "dchubbub.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
+#include "clk_mgr/dcn21/rn_clk_mgr.h"
#include "dcn20_fpu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 7ef66e511ec8..d211cf6d234c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -26,6 +26,7 @@
#include "clk_mgr.h"
#include "dcn20/dcn20_resource.h"
#include "dcn301/dcn301_resource.h"
+#include "clk_mgr/dcn301/vg_clk_mgr.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn301_fpu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index e36cfa5985ea..149a1b17cdf3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -25,6 +25,9 @@
#include "resource.h"
#include "clk_mgr.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn315/dcn315_resource.h"
+#include "dcn316/dcn316_resource.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "dcn31_fpu.h"
@@ -114,7 +117,7 @@ struct _vcs_dpi_ip_params_st dcn3_1_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
@@ -259,7 +262,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 50.0,
@@ -355,7 +358,7 @@ struct _vcs_dpi_ip_params_st dcn3_16_ip = {
.dcc_supported = true,
};
-struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 3fab19134480..d63b4209b14c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -26,7 +26,7 @@
#include "dc.h"
#include "dc_link.h"
#include "../display_mode_lib.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
#include "display_mode_vba_31.h"
#include "../dml_inline_defs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 66b82e4f05c6..35d10b4d018b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -27,7 +27,7 @@
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
#include "display_rq_dlg_calc_31.h"
-#include "dml/dcn30/display_mode_vba_30.h"
+#include "../dcn30/display_mode_vba_30.h"
static bool is_dual_plane(enum source_format_class source_format)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
new file mode 100644
index 000000000000..34a5d0f87b5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "clk_mgr.h"
+#include "resource.h"
+#include "dcn31/dcn31_hubbub.h"
+#include "dcn314_fpu.h"
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dml/display_mode_vba.h"
+
+struct _vcs_dpi_ip_params_st dcn3_14_ip = {
+ .VBlankNomDefaultUS = 668,
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st *clock_limits =
+ dcn3_14_soc.clock_limits;
+ unsigned int i, closest_clk_lvl;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) {
+
+ dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
+
+ if (bw_params->num_channels > 0)
+ dcn3_14_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(dcn3_14_soc.num_chans);
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_14_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_14_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_14_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz < dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+
+ if (clk_table->entries[i].memclk_mhz && clk_table->entries[i].wck_ratio)
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_14_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_14_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_14_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_14_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_14_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ if (max_dispclk_mhz) {
+ dcn3_14_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
+ else
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+ bool upscaled = false;
+
+ dc_assert_fp_enabled();
+
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
+ && pipe->stream->adjust.v_total_min > timing->v_total)
+ pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+ pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
+
+ dc->config.enable_4to1MPC = false;
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+ /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count
+ && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->signal == SIGNAL_TYPE_EDP && dc->debug.seamless_boot_odm_combine &&
+ pipe->stream->apply_seamless_boot_optimization) {
+
+ if (pipe->stream->apply_boot_odm_mode == dm_odm_combine_policy_2to1) {
+ context->bw_ctx.dml.vba.ODMCombinePolicy = dm_odm_combine_policy_2to1;
+ break;
+ }
+ }
+ }
+
+ return pipe_cnt;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
new file mode 100644
index 000000000000..d32c5bb99f4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN314_FPU_H__
+#define __DCN314_FPU_H__
+
+#define DCN3_14_DEFAULT_DET_SIZE 384
+#define DCN3_14_MAX_DET_SIZE 384
+#define DCN3_14_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_14_CRB_SEGMENT_SIZE_KB 64
+
+void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 66453546e24f..8118cfc5b405 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -473,8 +473,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
// DML calculation for MALL region doesn't take into account FW delay
// and required pstate allow width for multi-display cases
+ /* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
+ * to 2 swaths (i.e. 16 lines)
+ */
phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
- pstate_width_fw_delay_lines;
+ pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
// For backporch of phantom pipe, use vstartup of the main pipe
phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -490,6 +493,7 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
phantom_stream->timing.v_front_porch +
phantom_stream->timing.v_sync_width +
phantom_bp;
+ phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
}
/**
@@ -983,9 +987,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
* prefetch mode > 0 so try capping the prefetch mode to start.
+ * Override present for testing.
*/
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ if (dc->debug.dml_disallow_alternate_prefetch_modes)
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter;
+ else
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
if (*vlevel < context->bw_ctx.dml.soc.num_states)
@@ -1014,7 +1024,9 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched
* enough to support MCLK switching.
*/
- if (*vlevel == context->bw_ctx.dml.soc.num_states) {
+ if (*vlevel == context->bw_ctx.dml.soc.num_states &&
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==
+ dm_prefetch_support_uclk_fclk_and_stutter) {
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_stutter;
/* There are params (such as FabricClock) that need to be recalculated
@@ -1344,7 +1356,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int split[MAX_PIPES] = { 0 };
bool merge[MAX_PIPES] = { false };
bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
+ int pipe_cnt, i, pipe_idx;
+ int vlevel = context->bw_ctx.dml.soc.num_states;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
dc_assert_fp_enabled();
@@ -1373,17 +1386,22 @@ bool dcn32_internal_validate_bw(struct dc *dc,
DC_FP_END();
}
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ if (fast_validate ||
+ (dc->debug.dml_disallow_alternate_prefetch_modes &&
+ (vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
/*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
+ * If dml_disallow_alternate_prefetch_modes is false, then we have already
+ * tried alternate prefetch modes during full validation.
+ *
+ * If mode is unsupported or there is no p-state support, then
+ * fall back to favouring voltage.
*
- * If Prefetch mode 0 failed for this config, or passed with Max UCLK, try if
- * supported with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
+ * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
+ * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
*/
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_fclk_and_stutter;
+ dm_prefetch_support_fclk_and_stutter;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
@@ -2098,6 +2116,13 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_2_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 890612db08dc..cb2025771646 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -221,7 +221,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
// VBA_DELTA
// Calculate DET size, swath height
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -461,7 +460,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.SurfaceParameters,
v->SurfaceSizeInMALL,
@@ -757,9 +755,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
- v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
- v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
+ v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
mode_lib->vba.DPPCLKDelaySCL,
@@ -1167,7 +1163,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
mode_lib->vba.USRRetrainingRequiredFinal,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
@@ -1952,7 +1947,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2549,7 +2543,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
dml32_CalculateSwathAndDETConfiguration(
- &v->dummy_vars.dml32_CalculateSwathAndDETConfiguration,
mode_lib->vba.DETSizeOverride,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.ConfigReturnBufferSizeInKByte,
@@ -2749,7 +2742,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateVMRowAndSwath(
- &v->dummy_vars.dml32_CalculateVMRowAndSwath,
mode_lib->vba.NumberOfActiveSurfaces,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters,
mode_lib->vba.SurfaceSizeInMALL,
@@ -3266,7 +3258,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NoTimeForPrefetch[i][j][k] =
dml32_CalculatePrefetchSchedule(
- &v->dummy_vars.dml32_CalculatePrefetchSchedule,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
mode_lib->vba.DSCDelayPerState[i][k],
@@ -3566,7 +3557,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- &v->dummy_vars.dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport,
mode_lib->vba.USRRetrainingRequiredFinal,
mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.PrefetchModePerState[i][j],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 07f8f3b8626b..05fc14a47fba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -391,7 +391,6 @@ void dml32_CalculateBytePerPixelAndBlockSizes(
} // CalculateBytePerPixelAndBlockSizes
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -456,10 +455,18 @@ void dml32_CalculateSwathAndDETConfiguration(
bool ViewportSizeSupportPerSurface[],
bool *ViewportSizeSupport)
{
+ unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
+ unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
+ unsigned int RoundedUpSwathSizeBytesY;
+ unsigned int RoundedUpSwathSizeBytesC;
+ double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
+ double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
unsigned int k;
-
- st_vars->TotalActiveDPP = 0;
- st_vars->NoChromaSurfaces = true;
+ unsigned int TotalActiveDPP = 0;
+ bool NoChromaSurfaces = true;
+ unsigned int DETBufferSizeInKByteForSwathCalculation;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -494,43 +501,43 @@ void dml32_CalculateSwathAndDETConfiguration(
DPPPerSurface,
/* Output */
- st_vars->SwathWidthdoubleDPP,
- st_vars->SwathWidthdoubleDPPChroma,
+ SwathWidthdoubleDPP,
+ SwathWidthdoubleDPPChroma,
SwathWidth,
SwathWidthChroma,
- st_vars->MaximumSwathHeightY,
- st_vars->MaximumSwathHeightC,
+ MaximumSwathHeightY,
+ MaximumSwathHeightC,
swath_width_luma_ub,
swath_width_chroma_ub);
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * st_vars->MaximumSwathHeightY[k];
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * st_vars->MaximumSwathHeightC[k];
+ RoundedUpMaxSwathSizeBytesY[k] = swath_width_luma_ub[k] * BytePerPixDETY[k] * MaximumSwathHeightY[k];
+ RoundedUpMaxSwathSizeBytesC[k] = swath_width_chroma_ub[k] * BytePerPixDETC[k] * MaximumSwathHeightC[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DPPPerSurface = %d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%0d swath_width_luma_ub = %d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETY = %f\n", __func__, k, BytePerPixDETY[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, st_vars->MaximumSwathHeightY[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightY = %d\n", __func__, k, MaximumSwathHeightY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d swath_width_chroma_ub = %d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%0d BytePerPixDETC = %f\n", __func__, k, BytePerPixDETC[k]);
- dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, st_vars->MaximumSwathHeightC[k]);
+ dml_print("DML::%s: k=%0d MaximumSwathHeightC = %d\n", __func__, k, MaximumSwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__, k,
- st_vars->RoundedUpMaxSwathSizeBytesC[k]);
+ RoundedUpMaxSwathSizeBytesC[k]);
#endif
if (SourcePixelFormat[k] == dm_420_10) {
- st_vars->RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesY[k], 256);
- st_vars->RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) st_vars->RoundedUpMaxSwathSizeBytesC[k], 256);
+ RoundedUpMaxSwathSizeBytesY[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesY[k], 256);
+ RoundedUpMaxSwathSizeBytesC[k] = dml_ceil((unsigned int) RoundedUpMaxSwathSizeBytesC[k], 256);
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalActiveDPP = st_vars->TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
+ TotalActiveDPP = TotalActiveDPP + (ForceSingleDPP ? 1 : DPPPerSurface[k]);
if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 ||
SourcePixelFormat[k] == dm_420_12 || SourcePixelFormat[k] == dm_rgbe_alpha) {
- st_vars->NoChromaSurfaces = false;
+ NoChromaSurfaces = false;
}
}
@@ -540,10 +547,10 @@ void dml32_CalculateSwathAndDETConfiguration(
// if unbounded req is enabled, program reserved space such that the ROB will not hold more than 8 swaths worth of data
// - assume worst-case compression rate of 4. [ROB size - 8 * swath_size / max_compression ratio]
// - assume for "narrow" vp case in which the ROB can fit 8 swaths, the DET should be big enough to do full size req
- *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (st_vars->RoundedUpMaxSwathSizeBytesY[0]/512);
+ *CompBufReservedSpaceNeedAdjustment = ((int) ROBSizeKBytes - (int) *CompBufReservedSpaceKBytes) > (int) (RoundedUpMaxSwathSizeBytesY[0]/512);
if (*CompBufReservedSpaceNeedAdjustment == 1) {
- *CompBufReservedSpaceKBytes = ROBSizeKBytes - st_vars->RoundedUpMaxSwathSizeBytesY[0]/512;
+ *CompBufReservedSpaceKBytes = ROBSizeKBytes - RoundedUpMaxSwathSizeBytesY[0]/512;
}
#ifdef __DML_VBA_DEBUG__
@@ -551,7 +558,7 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: CompBufReservedSpaceNeedAdjustment = %d\n", __func__, *CompBufReservedSpaceNeedAdjustment);
#endif
- *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, st_vars->TotalActiveDPP, st_vars->NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
+ *UnboundedRequestEnabled = dml32_UnboundedRequest(UseUnboundedRequestingFinal, TotalActiveDPP, NoChromaSurfaces, Output[0], SurfaceTiling[0], *CompBufReservedSpaceNeedAdjustment, DisableUnboundRequestIfCompBufReservedSpaceNeedAdjustment);
dml32_CalculateDETBufferSize(DETSizeOverride,
UseMALLForPStateChange,
@@ -566,8 +573,8 @@ void dml32_CalculateSwathAndDETConfiguration(
SourcePixelFormat,
ReadBandwidthLuma,
ReadBandwidthChroma,
- st_vars->RoundedUpMaxSwathSizeBytesY,
- st_vars->RoundedUpMaxSwathSizeBytesC,
+ RoundedUpMaxSwathSizeBytesY,
+ RoundedUpMaxSwathSizeBytesC,
DPPPerSurface,
/* Output */
@@ -575,7 +582,7 @@ void dml32_CalculateSwathAndDETConfiguration(
CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, st_vars->TotalActiveDPP);
+ dml_print("DML::%s: TotalActiveDPP = %d\n", __func__, TotalActiveDPP);
dml_print("DML::%s: nomDETInKByte = %d\n", __func__, nomDETInKByte);
dml_print("DML::%s: ConfigReturnBufferSizeInKByte = %d\n", __func__, ConfigReturnBufferSizeInKByte);
dml_print("DML::%s: UseUnboundedRequestingFinal = %d\n", __func__, UseUnboundedRequestingFinal);
@@ -586,42 +593,42 @@ void dml32_CalculateSwathAndDETConfiguration(
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
+ DETBufferSizeInKByteForSwathCalculation = (UseMALLForPStateChange[k] ==
dm_use_mall_pstate_change_phantom_pipe ? 1024 : DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d DETBufferSizeInKByteForSwathCalculation = %d\n", __func__, k,
- st_vars->DETBufferSizeInKByteForSwathCalculation);
+ DETBufferSizeInKByteForSwathCalculation);
#endif
- if (st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k];
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k];
- } else if (st_vars->RoundedUpMaxSwathSizeBytesY[k] < 1.5 * st_vars->RoundedUpMaxSwathSizeBytesC[k] &&
- st_vars->RoundedUpMaxSwathSizeBytesY[k] + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 <=
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k];
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k];
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ if (RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] >= 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k];
+ } else if (RoundedUpMaxSwathSizeBytesY[k] < 1.5 * RoundedUpMaxSwathSizeBytesC[k] &&
+ RoundedUpMaxSwathSizeBytesY[k] + RoundedUpMaxSwathSizeBytesC[k] / 2 <=
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k];
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
} else {
- SwathHeightY[k] = st_vars->MaximumSwathHeightY[k] / 2;
- SwathHeightC[k] = st_vars->MaximumSwathHeightC[k] / 2;
- st_vars->RoundedUpSwathSizeBytesY = st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2;
- st_vars->RoundedUpSwathSizeBytesC = st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2;
+ SwathHeightY[k] = MaximumSwathHeightY[k] / 2;
+ SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY[k] / 2;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC[k] / 2;
}
- if ((st_vars->RoundedUpMaxSwathSizeBytesY[k] / 2 + st_vars->RoundedUpMaxSwathSizeBytesC[k] / 2 >
- st_vars->DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
+ if ((RoundedUpMaxSwathSizeBytesY[k] / 2 + RoundedUpMaxSwathSizeBytesC[k] / 2 >
+ DETBufferSizeInKByteForSwathCalculation * 1024 / 2)
|| SwathWidth[k] > MaximumSwathWidthLuma[k] || (SwathHeightC[k] > 0 &&
SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
*ViewportSizeSupport = false;
@@ -636,7 +643,7 @@ void dml32_CalculateSwathAndDETConfiguration(
#endif
DETBufferSizeY[k] = DETBufferSizeInKByte[k] * 1024;
DETBufferSizeC[k] = 0;
- } else if (st_vars->RoundedUpSwathSizeBytesY <= 1.5 * st_vars->RoundedUpSwathSizeBytesC) {
+ } else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%0d Half DET for plane0, half for plane1\n", __func__, k);
#endif
@@ -654,11 +661,11 @@ void dml32_CalculateSwathAndDETConfiguration(
dml_print("DML::%s: k=%0d SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
dml_print("DML::%s: k=%0d SwathHeightC = %d\n", __func__, k, SwathHeightC[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesY = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesY[k]);
+ k, RoundedUpMaxSwathSizeBytesY[k]);
dml_print("DML::%s: k=%0d RoundedUpMaxSwathSizeBytesC = %d\n", __func__,
- k, st_vars->RoundedUpMaxSwathSizeBytesC[k]);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesY);
- dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, st_vars->RoundedUpSwathSizeBytesC);
+ k, RoundedUpMaxSwathSizeBytesC[k]);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesY = %d\n", __func__, k, RoundedUpSwathSizeBytesY);
+ dml_print("DML::%s: k=%0d RoundedUpSwathSizeBytesC = %d\n", __func__, k, RoundedUpSwathSizeBytesC);
dml_print("DML::%s: k=%0d DETBufferSizeInKByte = %d\n", __func__, k, DETBufferSizeInKByte[k]);
dml_print("DML::%s: k=%0d DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
dml_print("DML::%s: k=%0d DETBufferSizeC = %d\n", __func__, k, DETBufferSizeC[k]);
@@ -1867,7 +1874,6 @@ void dml32_CalculateSurfaceSizeInMall(
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -1933,6 +1939,21 @@ void dml32_CalculateVMRowAndSwath(
unsigned int BIGK_FRAGMENT_SIZE[])
{
unsigned int k;
+ unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
+ unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
+ unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
+ bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (HostVMEnable == true) {
@@ -1954,15 +1975,15 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].SourcePixelFormat == dm_rgbe_alpha) {
if ((myPipe[k].SourcePixelFormat == dm_420_10 || myPipe[k].SourcePixelFormat == dm_420_12) &&
!IsVertical(myPipe[k].SourceRotation)) {
- st_vars->PTEBufferSizeInRequestsForLuma[k] =
+ PTEBufferSizeInRequestsForLuma[k] =
(PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma) / 2;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = st_vars->PTEBufferSizeInRequestsForLuma[k];
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsForLuma[k];
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma;
+ PTEBufferSizeInRequestsForChroma[k] = PTEBufferSizeInRequestsChroma;
}
- st_vars->PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameC = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -1982,21 +2003,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForChroma[k],
+ PTEBufferSizeInRequestsForChroma[k],
myPipe[k].PitchC,
myPipe[k].DCCMetaPitchC,
myPipe[k].BlockWidthC,
myPipe[k].BlockHeightC,
/* Output */
- &st_vars->MetaRowByteC[k],
- &st_vars->PixelPTEBytesPerRowC[k],
+ &MetaRowByteC[k],
+ &PixelPTEBytesPerRowC[k],
&dpte_row_width_chroma_ub[k],
&dpte_row_height_chroma[k],
&dpte_row_height_linear_chroma[k],
- &st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k],
- &st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_chroma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowC_one_row_per_frame[k],
+ &dpte_row_width_chroma_ub_one_row_per_frame[k],
+ &dpte_row_height_chroma_one_row_per_frame[k],
&meta_req_width_chroma[k],
&meta_req_height_chroma[k],
&meta_row_width_chroma[k],
@@ -2024,19 +2045,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillC[k],
&MaxNumSwathC[k]);
} else {
- st_vars->PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
- st_vars->PTEBufferSizeInRequestsForChroma[k] = 0;
- st_vars->PixelPTEBytesPerRowC[k] = 0;
- st_vars->PDEAndMetaPTEBytesFrameC = 0;
- st_vars->MetaRowByteC[k] = 0;
+ PTEBufferSizeInRequestsForLuma[k] = PTEBufferSizeInRequestsLuma + PTEBufferSizeInRequestsChroma;
+ PTEBufferSizeInRequestsForChroma[k] = 0;
+ PixelPTEBytesPerRowC[k] = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC[k] = 0;
MaxNumSwathC[k] = 0;
PrefetchSourceLinesC[k] = 0;
- st_vars->dpte_row_height_chroma_one_row_per_frame[k] = 0;
- st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
+ dpte_row_height_chroma_one_row_per_frame[k] = 0;
+ dpte_row_width_chroma_ub_one_row_per_frame[k] = 0;
+ PixelPTEBytesPerRowC_one_row_per_frame[k] = 0;
}
- st_vars->PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
+ PDEAndMetaPTEBytesFrameY = dml32_CalculateVMAndRowBytes(
myPipe[k].ViewportStationary,
myPipe[k].DCCEnable,
myPipe[k].DPPPerSurface,
@@ -2056,21 +2077,21 @@ void dml32_CalculateVMRowAndSwath(
GPUVMMaxPageTableLevels,
GPUVMMinPageSizeKBytes[k],
HostVMMinPageSize,
- st_vars->PTEBufferSizeInRequestsForLuma[k],
+ PTEBufferSizeInRequestsForLuma[k],
myPipe[k].PitchY,
myPipe[k].DCCMetaPitchY,
myPipe[k].BlockWidthY,
myPipe[k].BlockHeightY,
/* Output */
- &st_vars->MetaRowByteY[k],
- &st_vars->PixelPTEBytesPerRowY[k],
+ &MetaRowByteY[k],
+ &PixelPTEBytesPerRowY[k],
&dpte_row_width_luma_ub[k],
&dpte_row_height_luma[k],
&dpte_row_height_linear_luma[k],
- &st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k],
- &st_vars->dpte_row_width_luma_ub_one_row_per_frame[k],
- &st_vars->dpte_row_height_luma_one_row_per_frame[k],
+ &PixelPTEBytesPerRowY_one_row_per_frame[k],
+ &dpte_row_width_luma_ub_one_row_per_frame[k],
+ &dpte_row_height_luma_one_row_per_frame[k],
&meta_req_width[k],
&meta_req_height[k],
&meta_row_width[k],
@@ -2098,19 +2119,19 @@ void dml32_CalculateVMRowAndSwath(
&VInitPreFillY[k],
&MaxNumSwathY[k]);
- PDEAndMetaPTEBytesFrame[k] = st_vars->PDEAndMetaPTEBytesFrameY + st_vars->PDEAndMetaPTEBytesFrameC;
- MetaRowByte[k] = st_vars->MetaRowByteY[k] + st_vars->MetaRowByteC[k];
+ PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC;
+ MetaRowByte[k] = MetaRowByteY[k] + MetaRowByteC[k];
- if (st_vars->PixelPTEBytesPerRowY[k] <= 64 * st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC[k] <= 64 * st_vars->PTEBufferSizeInRequestsForChroma[k]) {
+ if (PixelPTEBytesPerRowY[k] <= 64 * PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC[k] <= 64 * PTEBufferSizeInRequestsForChroma[k]) {
PTEBufferSizeNotExceeded[k] = true;
} else {
PTEBufferSizeNotExceeded[k] = false;
}
- st_vars->one_row_per_frame_fits_in_buffer[k] = (st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
- st_vars->PTEBufferSizeInRequestsForLuma[k] &&
- st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * st_vars->PTEBufferSizeInRequestsForChroma[k]);
+ one_row_per_frame_fits_in_buffer[k] = (PixelPTEBytesPerRowY_one_row_per_frame[k] <= 64 * 2 *
+ PTEBufferSizeInRequestsForLuma[k] &&
+ PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * PTEBufferSizeInRequestsForChroma[k]);
}
dml32_CalculateMALLUseForStaticScreen(
@@ -2118,7 +2139,7 @@ void dml32_CalculateVMRowAndSwath(
MALLAllocatedForDCN,
UseMALLForStaticScreen, // mode
SurfaceSizeInMALL,
- st_vars->one_row_per_frame_fits_in_buffer,
+ one_row_per_frame_fits_in_buffer,
/* Output */
UsesMALLForStaticScreen); // boolen
@@ -2144,13 +2165,13 @@ void dml32_CalculateVMRowAndSwath(
!(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame);
if (use_one_row_for_frame[k]) {
- dpte_row_height_luma[k] = st_vars->dpte_row_height_luma_one_row_per_frame[k];
- dpte_row_width_luma_ub[k] = st_vars->dpte_row_width_luma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowY[k] = st_vars->PixelPTEBytesPerRowY_one_row_per_frame[k];
- dpte_row_height_chroma[k] = st_vars->dpte_row_height_chroma_one_row_per_frame[k];
- dpte_row_width_chroma_ub[k] = st_vars->dpte_row_width_chroma_ub_one_row_per_frame[k];
- st_vars->PixelPTEBytesPerRowC[k] = st_vars->PixelPTEBytesPerRowC_one_row_per_frame[k];
- PTEBufferSizeNotExceeded[k] = st_vars->one_row_per_frame_fits_in_buffer[k];
+ dpte_row_height_luma[k] = dpte_row_height_luma_one_row_per_frame[k];
+ dpte_row_width_luma_ub[k] = dpte_row_width_luma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowY[k] = PixelPTEBytesPerRowY_one_row_per_frame[k];
+ dpte_row_height_chroma[k] = dpte_row_height_chroma_one_row_per_frame[k];
+ dpte_row_width_chroma_ub[k] = dpte_row_width_chroma_ub_one_row_per_frame[k];
+ PixelPTEBytesPerRowC[k] = PixelPTEBytesPerRowC_one_row_per_frame[k];
+ PTEBufferSizeNotExceeded[k] = one_row_per_frame_fits_in_buffer[k];
}
if (MetaRowByte[k] <= DCCMetaBufferSizeBytes)
@@ -2158,7 +2179,7 @@ void dml32_CalculateVMRowAndSwath(
else
DCCMetaBufferSizeNotExceeded[k] = false;
- PixelPTEBytesPerRow[k] = st_vars->PixelPTEBytesPerRowY[k] + st_vars->PixelPTEBytesPerRowC[k];
+ PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY[k] + PixelPTEBytesPerRowC[k];
if (use_one_row_for_frame[k])
PixelPTEBytesPerRow[k] = PixelPTEBytesPerRow[k] / 2;
@@ -2169,11 +2190,11 @@ void dml32_CalculateVMRowAndSwath(
myPipe[k].VRatioChroma,
myPipe[k].DCCEnable,
myPipe[k].HTotal / myPipe[k].PixelClock,
- st_vars->MetaRowByteY[k], st_vars->MetaRowByteC[k],
+ MetaRowByteY[k], MetaRowByteC[k],
meta_row_height[k],
meta_row_height_chroma[k],
- st_vars->PixelPTEBytesPerRowY[k],
- st_vars->PixelPTEBytesPerRowC[k],
+ PixelPTEBytesPerRowY[k],
+ PixelPTEBytesPerRowC[k],
dpte_row_height_luma[k],
dpte_row_height_chroma[k],
@@ -2189,12 +2210,12 @@ void dml32_CalculateVMRowAndSwath(
dml_print("DML::%s: k=%d, dpte_row_height_luma = %d\n", __func__, k, dpte_row_height_luma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_luma_ub = %d\n",
__func__, k, dpte_row_width_luma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowY[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowY = %d\n", __func__, k, PixelPTEBytesPerRowY[k]);
dml_print("DML::%s: k=%d, dpte_row_height_chroma = %d\n",
__func__, k, dpte_row_height_chroma[k]);
dml_print("DML::%s: k=%d, dpte_row_width_chroma_ub = %d\n",
__func__, k, dpte_row_width_chroma_ub[k]);
- dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, st_vars->PixelPTEBytesPerRowC[k]);
+ dml_print("DML::%s: k=%d, PixelPTEBytesPerRowC = %d\n", __func__, k, PixelPTEBytesPerRowC[k]);
dml_print("DML::%s: k=%d, PixelPTEBytesPerRow = %d\n", __func__, k, PixelPTEBytesPerRow[k]);
dml_print("DML::%s: k=%d, PTEBufferSizeNotExceeded = %d\n",
__func__, k, PTEBufferSizeNotExceeded[k]);
@@ -3342,7 +3363,6 @@ double dml32_CalculateExtraLatency(
} // CalculateExtraLatency
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
@@ -3406,18 +3426,45 @@ bool dml32_CalculatePrefetchSchedule(
double *VReadyOffsetPix)
{
bool MyError = false;
-
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
- st_vars->max_vratio_pre = __DML_MAX_VRATIO_PRE__;
- st_vars->Tsw_est1 = 0;
- st_vars->Tsw_est3 = 0;
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DSTTotalPixelsAfterScaler;
+ double LineTime;
+ double dst_y_prefetch_equ;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tvm_oto_lines;
+ double Tr0_oto_lines;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+ unsigned int HostVMDynamicLevelsTrips;
+ double trip_to_mem;
+ double Tvm_trips;
+ double Tr0_trips;
+ double Tvm_trips_rounded;
+ double Tr0_trips_rounded;
+ double Lsw_oto;
+ double Tpre_rounded;
+ double prefetch_bw_equ;
+ double Tvm_equ;
+ double Tr0_equ;
+ double Tdmbf;
+ double Tdmec;
+ double Tdmsks;
+ double prefetch_sw_bytes;
+ double bytes_pp;
+ double dep_bytes;
+ unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+ double min_Lsw;
+ double Tsw_est1 = 0;
+ double Tsw_est3 = 0;
if (GPUVMEnable == true && HostVMEnable == true)
- st_vars->HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
else
- st_vars->HostVMDynamicLevelsTrips = 0;
+ HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
@@ -3440,19 +3487,19 @@ bool dml32_CalculatePrefetchSchedule(
TSetup,
/* output */
- &st_vars->Tdmbf,
- &st_vars->Tdmec,
- &st_vars->Tdmsks,
+ &Tdmbf,
+ &Tdmec,
+ &Tdmsks,
VUpdateOffsetPix,
VUpdateWidthPix,
VReadyOffsetPix);
- st_vars->LineTime = myPipe->HTotal / myPipe->PixelClock;
- st_vars->trip_to_mem = UrgentLatency;
- st_vars->Tvm_trips = UrgentExtraLatency + st_vars->trip_to_mem * (GPUVMPageTableLevels * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+ LineTime = myPipe->HTotal / myPipe->PixelClock;
+ trip_to_mem = UrgentLatency;
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
if (DynamicMetadataVMEnabled == true)
- *Tdmdl = TWait + st_vars->Tvm_trips + st_vars->trip_to_mem;
+ *Tdmdl = TWait + Tvm_trips + trip_to_mem;
else
*Tdmdl = TWait + UrgentExtraLatency;
@@ -3462,15 +3509,15 @@ bool dml32_CalculatePrefetchSchedule(
#endif
if (DynamicMetadataEnable == true) {
- if (VStartup * st_vars->LineTime < *TSetup + *Tdmdl + st_vars->Tdmbf + st_vars->Tdmec + st_vars->Tdmsks) {
+ if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n",
- __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n",
- __func__, st_vars->Tdmsks);
+ __func__, Tdmsks);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n",
__func__, *Tdmdl);
#endif
@@ -3482,21 +3529,21 @@ bool dml32_CalculatePrefetchSchedule(
}
*Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
- GPUVMEnable == true ? TWait + st_vars->Tvm_trips : 0);
+ GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
else
- st_vars->DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
- st_vars->DPPCycles = st_vars->DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
- st_vars->DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = DISPCLKDelaySubtotal;
if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
return true;
- *DSTXAfterScaler = st_vars->DPPCycles * myPipe->PixelClock / myPipe->Dppclk + st_vars->DISPCLKCycles *
+ *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->Dppclk + DISPCLKCycles *
myPipe->PixelClock / myPipe->Dispclk + DSCDelay;
*DSTXAfterScaler = *DSTXAfterScaler + (myPipe->ODMMode != dm_odm_combine_mode_disabled ? 18 : 0)
@@ -3506,10 +3553,10 @@ bool dml32_CalculatePrefetchSchedule(
+ ((myPipe->ODMMode == dm_odm_mode_mso_1to4) ? myPipe->HActive * 3 / 4 : 0);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DPPCycles: %d\n", __func__, st_vars->DPPCycles);
+ dml_print("DML::%s: DPPCycles: %d\n", __func__, DPPCycles);
dml_print("DML::%s: PixelClock: %f\n", __func__, myPipe->PixelClock);
dml_print("DML::%s: Dppclk: %f\n", __func__, myPipe->Dppclk);
- dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, st_vars->DISPCLKCycles);
+ dml_print("DML::%s: DISPCLKCycles: %d\n", __func__, DISPCLKCycles);
dml_print("DML::%s: DISPCLK: %f\n", __func__, myPipe->Dispclk);
dml_print("DML::%s: DSCDelay: %d\n", __func__, DSCDelay);
dml_print("DML::%s: ODMMode: %d\n", __func__, myPipe->ODMMode);
@@ -3522,9 +3569,9 @@ bool dml32_CalculatePrefetchSchedule(
else
*DSTYAfterScaler = 0;
- st_vars->DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
- *DSTYAfterScaler = dml_floor(st_vars->DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
- *DSTXAfterScaler = st_vars->DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+ DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DSTXAfterScaler: %d (final)\n", __func__, *DSTXAfterScaler);
dml_print("DML::%s: DSTYAfterScaler: %d (final)\n", __func__, *DSTYAfterScaler);
@@ -3532,132 +3579,132 @@ bool dml32_CalculatePrefetchSchedule(
MyError = false;
- st_vars->Tr0_trips = st_vars->trip_to_mem * (st_vars->HostVMDynamicLevelsTrips + 1);
+ Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
if (GPUVMEnable == true) {
- st_vars->Tvm_trips_rounded = dml_ceil(4.0 * st_vars->Tvm_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+ Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
if (GPUVMPageTableLevels >= 3) {
- *Tno_bw = UrgentExtraLatency + st_vars->trip_to_mem *
- (double) ((GPUVMPageTableLevels - 2) * (st_vars->HostVMDynamicLevelsTrips + 1) - 1);
+ *Tno_bw = UrgentExtraLatency + trip_to_mem *
+ (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
} else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / st_vars->LineTime, 1.0) /
- 4.0 * st_vars->LineTime; // VBA_ERROR
+ Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
+ 4.0 * LineTime; // VBA_ERROR
*Tno_bw = UrgentExtraLatency;
} else {
*Tno_bw = 0;
}
} else if (myPipe->DCCEnable == true) {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = dml_ceil(4.0 * st_vars->Tr0_trips / st_vars->LineTime, 1.0) / 4.0 * st_vars->LineTime;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
*Tno_bw = 0;
} else {
- st_vars->Tvm_trips_rounded = st_vars->LineTime / 4.0;
- st_vars->Tr0_trips_rounded = st_vars->LineTime / 2.0;
+ Tvm_trips_rounded = LineTime / 4.0;
+ Tr0_trips_rounded = LineTime / 2.0;
*Tno_bw = 0;
}
- st_vars->Tvm_trips_rounded = dml_max(st_vars->Tvm_trips_rounded, st_vars->LineTime / 4.0);
- st_vars->Tr0_trips_rounded = dml_max(st_vars->Tr0_trips_rounded, st_vars->LineTime / 4.0);
+ Tvm_trips_rounded = dml_max(Tvm_trips_rounded, LineTime / 4.0);
+ Tr0_trips_rounded = dml_max(Tr0_trips_rounded, LineTime / 4.0);
if (myPipe->SourcePixelFormat == dm_420_8 || myPipe->SourcePixelFormat == dm_420_10
|| myPipe->SourcePixelFormat == dm_420_12) {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
} else {
- st_vars->bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
+ bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
}
- st_vars->prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY
+ PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
- st_vars->prefetch_bw_oto = dml_max(st_vars->bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
- st_vars->prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * st_vars->LineTime));
+ prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerSurface,
+ prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
- st_vars->min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / st_vars->max_vratio_pre;
- st_vars->min_Lsw = dml_max(st_vars->min_Lsw, 1.0);
- st_vars->Lsw_oto = dml_ceil(4.0 * dml_max(st_vars->prefetch_sw_bytes / st_vars->prefetch_bw_oto / st_vars->LineTime, st_vars->min_Lsw), 1.0) / 4.0;
+ min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre;
+ min_Lsw = dml_max(min_Lsw, 1.0);
+ Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
if (GPUVMEnable == true) {
- st_vars->Tvm_oto = dml_max3(
- st_vars->Tvm_trips,
- *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / st_vars->prefetch_bw_oto,
- st_vars->LineTime / 4.0);
+ Tvm_oto = dml_max3(
+ Tvm_trips,
+ *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ LineTime / 4.0);
} else
- st_vars->Tvm_oto = st_vars->LineTime / 4.0;
+ Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_oto = dml_max4(
- st_vars->Tr0_trips,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto,
- (st_vars->LineTime - st_vars->Tvm_oto)/2.0,
- st_vars->LineTime / 4.0);
+ Tr0_oto = dml_max4(
+ Tr0_trips,
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
+ (LineTime - Tvm_oto)/2.0,
+ LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Tr0_oto max0 = %f\n", __func__,
- (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, st_vars->LineTime - st_vars->Tvm_oto);
- dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, st_vars->LineTime / 4);
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto max1 = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: Tr0_oto max2 = %f\n", __func__, LineTime - Tvm_oto);
+ dml_print("DML::%s: Tr0_oto max3 = %f\n", __func__, LineTime / 4);
#endif
} else
- st_vars->Tr0_oto = (st_vars->LineTime - st_vars->Tvm_oto) / 2.0;
+ Tr0_oto = (LineTime - Tvm_oto) / 2.0;
- st_vars->Tvm_oto_lines = dml_ceil(4.0 * st_vars->Tvm_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->Tr0_oto_lines = dml_ceil(4.0 * st_vars->Tr0_oto / st_vars->LineTime, 1) / 4.0;
- st_vars->dst_y_prefetch_oto = st_vars->Tvm_oto_lines + 2 * st_vars->Tr0_oto_lines + st_vars->Lsw_oto;
+ Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
+ Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
+ dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
- st_vars->dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / st_vars->LineTime -
+ dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
(*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
- dml_print("DML::%s: min_Lsw = %f\n", __func__, st_vars->min_Lsw);
+ dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
dml_print("DML::%s: *Tno_bw = %f\n", __func__, *Tno_bw);
dml_print("DML::%s: UrgentExtraLatency = %f\n", __func__, UrgentExtraLatency);
- dml_print("DML::%s: trip_to_mem = %f\n", __func__, st_vars->trip_to_mem);
+ dml_print("DML::%s: trip_to_mem = %f\n", __func__, trip_to_mem);
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
dml_print("DML::%s: BytePerPixelC = %d\n", __func__, myPipe->BytePerPixelC);
dml_print("DML::%s: PrefetchSourceLinesC = %f\n", __func__, PrefetchSourceLinesC);
dml_print("DML::%s: swath_width_chroma_ub = %d\n", __func__, swath_width_chroma_ub);
- dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, st_vars->prefetch_sw_bytes);
- dml_print("DML::%s: bytes_pp = %f\n", __func__, st_vars->bytes_pp);
+ dml_print("DML::%s: prefetch_sw_bytes = %f\n", __func__, prefetch_sw_bytes);
+ dml_print("DML::%s: bytes_pp = %f\n", __func__, bytes_pp);
dml_print("DML::%s: PDEAndMetaPTEBytesFrame = %d\n", __func__, PDEAndMetaPTEBytesFrame);
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
dml_print("DML::%s: PixelPTEBytesPerRow = %d\n", __func__, PixelPTEBytesPerRow);
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml_print("DML::%s: Tvm_trips = %f\n", __func__, st_vars->Tvm_trips);
- dml_print("DML::%s: Tr0_trips = %f\n", __func__, st_vars->Tr0_trips);
- dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, st_vars->prefetch_bw_oto);
- dml_print("DML::%s: Tr0_oto = %f\n", __func__, st_vars->Tr0_oto);
- dml_print("DML::%s: Tvm_oto = %f\n", __func__, st_vars->Tvm_oto);
- dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, st_vars->Tvm_oto_lines);
- dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, st_vars->Tr0_oto_lines);
- dml_print("DML::%s: Lsw_oto = %f\n", __func__, st_vars->Lsw_oto);
- dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, st_vars->dst_y_prefetch_oto);
- dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, st_vars->dst_y_prefetch_equ);
+ dml_print("DML::%s: Tvm_trips = %f\n", __func__, Tvm_trips);
+ dml_print("DML::%s: Tr0_trips = %f\n", __func__, Tr0_trips);
+ dml_print("DML::%s: prefetch_bw_oto = %f\n", __func__, prefetch_bw_oto);
+ dml_print("DML::%s: Tr0_oto = %f\n", __func__, Tr0_oto);
+ dml_print("DML::%s: Tvm_oto = %f\n", __func__, Tvm_oto);
+ dml_print("DML::%s: Tvm_oto_lines = %f\n", __func__, Tvm_oto_lines);
+ dml_print("DML::%s: Tr0_oto_lines = %f\n", __func__, Tr0_oto_lines);
+ dml_print("DML::%s: Lsw_oto = %f\n", __func__, Lsw_oto);
+ dml_print("DML::%s: dst_y_prefetch_oto = %f\n", __func__, dst_y_prefetch_oto);
+ dml_print("DML::%s: dst_y_prefetch_equ = %f\n", __func__, dst_y_prefetch_equ);
#endif
- st_vars->dst_y_prefetch_equ = dml_floor(4.0 * (st_vars->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- st_vars->Tpre_rounded = st_vars->dst_y_prefetch_equ * st_vars->LineTime;
+ dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
+ Tpre_rounded = dst_y_prefetch_equ * LineTime;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, st_vars->dst_y_prefetch_equ);
- dml_print("DML::%s: LineTime: %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
+ dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
dml_print("DML::%s: VStartup: %d\n", __func__, VStartup);
dml_print("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n",
- __func__, VStartup * st_vars->LineTime);
+ __func__, VStartup * LineTime);
dml_print("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *TSetup);
dml_print("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, TCalc);
- dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, st_vars->Tdmbf);
- dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, st_vars->Tdmec);
+ dml_print("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, Tdmbf);
+ dml_print("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, Tdmec);
dml_print("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd\n", __func__, *Tdmdl_vm);
dml_print("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd\n", __func__, *Tdmdl);
dml_print("DML::%s: DSTYAfterScaler: %d lines - number of lines of pipeline and buffer delay after scaler\n",
__func__, *DSTYAfterScaler);
#endif
- st_vars->dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
+ dep_bytes = dml_max(PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor,
MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor);
- if (st_vars->prefetch_sw_bytes < st_vars->dep_bytes)
- st_vars->prefetch_sw_bytes = 2 * st_vars->dep_bytes;
+ if (prefetch_sw_bytes < dep_bytes)
+ prefetch_sw_bytes = 2 * dep_bytes;
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@@ -3665,61 +3712,61 @@ bool dml32_CalculatePrefetchSchedule(
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
- if (st_vars->dst_y_prefetch_equ > 1) {
+ if (dst_y_prefetch_equ > 1) {
double PrefetchBandwidth1;
double PrefetchBandwidth2;
double PrefetchBandwidth3;
double PrefetchBandwidth4;
- if (st_vars->Tpre_rounded - *Tno_bw > 0) {
+ if (Tpre_rounded - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - *Tno_bw);
- st_vars->Tsw_est1 = st_vars->prefetch_sw_bytes / PrefetchBandwidth1;
+ + prefetch_sw_bytes) / (Tpre_rounded - *Tno_bw);
+ Tsw_est1 = prefetch_sw_bytes / PrefetchBandwidth1;
} else
PrefetchBandwidth1 = 0;
- if (VStartup == MaxVStartup && (st_vars->Tsw_est1 / st_vars->LineTime < st_vars->min_Lsw)
- && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw > 0) {
+ if (VStartup == MaxVStartup && (Tsw_est1 / LineTime < min_Lsw)
+ && Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - *Tno_bw);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - *Tno_bw);
}
- if (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded > 0)
- PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + st_vars->prefetch_sw_bytes) /
- (st_vars->Tpre_rounded - *Tno_bw - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+ PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + prefetch_sw_bytes) /
+ (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded > 0) {
+ if (Tpre_rounded - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
- + st_vars->prefetch_sw_bytes) / (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded);
- st_vars->Tsw_est3 = st_vars->prefetch_sw_bytes / PrefetchBandwidth3;
+ + prefetch_sw_bytes) / (Tpre_rounded - Tvm_trips_rounded);
+ Tsw_est3 = prefetch_sw_bytes / PrefetchBandwidth3;
} else
PrefetchBandwidth3 = 0;
if (VStartup == MaxVStartup &&
- (st_vars->Tsw_est3 / st_vars->LineTime < st_vars->min_Lsw) && st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 *
- st_vars->LineTime - st_vars->Tvm_trips_rounded > 0) {
+ (Tsw_est3 / LineTime < min_Lsw) && Tpre_rounded - min_Lsw * LineTime - 0.75 *
+ LineTime - Tvm_trips_rounded > 0) {
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / (st_vars->Tpre_rounded - st_vars->min_Lsw * st_vars->LineTime - 0.75 * st_vars->LineTime - st_vars->Tvm_trips_rounded);
+ / (Tpre_rounded - min_Lsw * LineTime - 0.75 * LineTime - Tvm_trips_rounded);
}
- if (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded > 0) {
- PrefetchBandwidth4 = st_vars->prefetch_sw_bytes /
- (st_vars->Tpre_rounded - st_vars->Tvm_trips_rounded - 2 * st_vars->Tr0_trips_rounded);
+ if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0) {
+ PrefetchBandwidth4 = prefetch_sw_bytes /
+ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
} else {
PrefetchBandwidth4 = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: Tpre_rounded: %f\n", __func__, st_vars->Tpre_rounded);
+ dml_print("DML::%s: Tpre_rounded: %f\n", __func__, Tpre_rounded);
dml_print("DML::%s: Tno_bw: %f\n", __func__, *Tno_bw);
- dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, st_vars->Tvm_trips_rounded);
- dml_print("DML::%s: Tsw_est1: %f\n", __func__, st_vars->Tsw_est1);
- dml_print("DML::%s: Tsw_est3: %f\n", __func__, st_vars->Tsw_est3);
+ dml_print("DML::%s: Tvm_trips_rounded: %f\n", __func__, Tvm_trips_rounded);
+ dml_print("DML::%s: Tsw_est1: %f\n", __func__, Tsw_est1);
+ dml_print("DML::%s: Tsw_est3: %f\n", __func__, Tsw_est3);
dml_print("DML::%s: PrefetchBandwidth1: %f\n", __func__, PrefetchBandwidth1);
dml_print("DML::%s: PrefetchBandwidth2: %f\n", __func__, PrefetchBandwidth2);
dml_print("DML::%s: PrefetchBandwidth3: %f\n", __func__, PrefetchBandwidth3);
@@ -3732,9 +3779,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth1 >= st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
Case1OK = false;
@@ -3745,9 +3792,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
- >= st_vars->Tvm_trips_rounded
+ >= Tvm_trips_rounded
&& (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor)
- / PrefetchBandwidth2 < st_vars->Tr0_trips_rounded) {
+ / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
Case2OK = false;
@@ -3758,9 +3805,9 @@ bool dml32_CalculatePrefetchSchedule(
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3 <
- st_vars->Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
+ Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / PrefetchBandwidth3 >=
- st_vars->Tr0_trips_rounded) {
+ Tr0_trips_rounded) {
Case3OK = true;
} else {
Case3OK = false;
@@ -3770,80 +3817,80 @@ bool dml32_CalculatePrefetchSchedule(
}
if (Case1OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth1;
+ prefetch_bw_equ = PrefetchBandwidth1;
else if (Case2OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth2;
+ prefetch_bw_equ = PrefetchBandwidth2;
else if (Case3OK)
- st_vars->prefetch_bw_equ = PrefetchBandwidth3;
+ prefetch_bw_equ = PrefetchBandwidth3;
else
- st_vars->prefetch_bw_equ = PrefetchBandwidth4;
+ prefetch_bw_equ = PrefetchBandwidth4;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: Case1OK: %d\n", __func__, Case1OK);
dml_print("DML::%s: Case2OK: %d\n", __func__, Case2OK);
dml_print("DML::%s: Case3OK: %d\n", __func__, Case3OK);
- dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, st_vars->prefetch_bw_equ);
+ dml_print("DML::%s: prefetch_bw_equ: %f\n", __func__, prefetch_bw_equ);
#endif
- if (st_vars->prefetch_bw_equ > 0) {
+ if (prefetch_bw_equ > 0) {
if (GPUVMEnable == true) {
- st_vars->Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
- HostVMInefficiencyFactor / st_vars->prefetch_bw_equ,
- st_vars->Tvm_trips, st_vars->LineTime / 4);
+ Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
+ HostVMInefficiencyFactor / prefetch_bw_equ,
+ Tvm_trips, LineTime / 4);
} else {
- st_vars->Tvm_equ = st_vars->LineTime / 4;
+ Tvm_equ = LineTime / 4;
}
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
- st_vars->Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
- HostVMInefficiencyFactor) / st_vars->prefetch_bw_equ, st_vars->Tr0_trips,
- (st_vars->LineTime - st_vars->Tvm_equ) / 2, st_vars->LineTime / 4);
+ Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
+ HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
+ (LineTime - Tvm_equ) / 2, LineTime / 4);
} else {
- st_vars->Tr0_equ = (st_vars->LineTime - st_vars->Tvm_equ) / 2;
+ Tr0_equ = (LineTime - Tvm_equ) / 2;
}
} else {
- st_vars->Tvm_equ = 0;
- st_vars->Tr0_equ = 0;
+ Tvm_equ = 0;
+ Tr0_equ = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
#endif
}
}
- if (st_vars->dst_y_prefetch_oto < st_vars->dst_y_prefetch_equ) {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_oto;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_oto;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_oto;
- *PrefetchBandwidth = st_vars->prefetch_bw_oto;
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ TimeForFetchingMetaPTE = Tvm_oto;
+ TimeForFetchingRowInVBlank = Tr0_oto;
+ *PrefetchBandwidth = prefetch_bw_oto;
} else {
- *DestinationLinesForPrefetch = st_vars->dst_y_prefetch_equ;
- st_vars->TimeForFetchingMetaPTE = st_vars->Tvm_equ;
- st_vars->TimeForFetchingRowInVBlank = st_vars->Tr0_equ;
- *PrefetchBandwidth = st_vars->prefetch_bw_equ;
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ TimeForFetchingMetaPTE = Tvm_equ;
+ TimeForFetchingRowInVBlank = Tr0_equ;
+ *PrefetchBandwidth = prefetch_bw_equ;
}
- *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * st_vars->TimeForFetchingMetaPTE / st_vars->LineTime, 1.0) / 4.0;
+ *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
*DestinationLinesToRequestRowInVBlank =
- dml_ceil(4.0 * st_vars->TimeForFetchingRowInVBlank / st_vars->LineTime, 1.0) / 4.0;
+ dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
- st_vars->LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesForPrefetch = %f\n", __func__, *DestinationLinesForPrefetch);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, st_vars->TimeForFetchingRowInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, TimeForFetchingRowInVBlank);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: DestinationLinesToRequestRowInVBlank = %f\n",
__func__, *DestinationLinesToRequestRowInVBlank);
dml_print("DML::%s: PrefetchSourceLinesY = %f\n", __func__, PrefetchSourceLinesY);
- dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, st_vars->LinesToRequestPrefetchPixelData);
+ dml_print("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, LinesToRequestPrefetchPixelData);
#endif
- if (st_vars->LinesToRequestPrefetchPixelData >= 1 && st_vars->prefetch_bw_equ > 0) {
- *VRatioPrefetchY = (double) PrefetchSourceLinesY / st_vars->LinesToRequestPrefetchPixelData;
+ if (LinesToRequestPrefetchPixelData >= 1 && prefetch_bw_equ > 0) {
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData;
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: VRatioPrefetchY = %f\n", __func__, *VRatioPrefetchY);
@@ -3851,12 +3898,12 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillY = %d\n", __func__, VInitPreFillY);
#endif
if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
*VRatioPrefetchY =
dml_max((double) PrefetchSourceLinesY /
- st_vars->LinesToRequestPrefetchPixelData,
+ LinesToRequestPrefetchPixelData,
(double) MaxNumSwathY * SwathHeightY /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillY - 3.0) / 2.0));
*VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
} else {
@@ -3870,7 +3917,7 @@ bool dml32_CalculatePrefetchSchedule(
#endif
}
- *VRatioPrefetchC = (double) PrefetchSourceLinesC / st_vars->LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
@@ -3879,11 +3926,11 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: VInitPreFillC = %d\n", __func__, VInitPreFillC);
#endif
if ((SwathHeightC > 4)) {
- if (st_vars->LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
*VRatioPrefetchC =
dml_max(*VRatioPrefetchC,
(double) MaxNumSwathC * SwathHeightC /
- (st_vars->LinesToRequestPrefetchPixelData -
+ (LinesToRequestPrefetchPixelData -
(VInitPreFillC - 3.0) / 2.0));
*VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
} else {
@@ -3898,25 +3945,25 @@ bool dml32_CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY
- / st_vars->LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
- / st_vars->LineTime;
+ / LinesToRequestPrefetchPixelData * myPipe->BytePerPixelY * swath_width_luma_ub
+ / LineTime;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: BytePerPixelY = %d\n", __func__, myPipe->BytePerPixelY);
dml_print("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
dml_print("DML::%s: RequiredPrefetchPixDataBWLuma = %f\n",
__func__, *RequiredPrefetchPixDataBWLuma);
#endif
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC /
- st_vars->LinesToRequestPrefetchPixelData
+ LinesToRequestPrefetchPixelData
* myPipe->BytePerPixelC
- * swath_width_chroma_ub / st_vars->LineTime;
+ * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML:%s: MyErr set. LinesToRequestPrefetchPixelData: %f, should be > 0\n",
- __func__, st_vars->LinesToRequestPrefetchPixelData);
+ __func__, LinesToRequestPrefetchPixelData);
#endif
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
@@ -3925,15 +3972,15 @@ bool dml32_CalculatePrefetchSchedule(
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML: Tpre: %fus - sum of time to request meta pte, 2 x data pte + meta data, swaths\n",
- (double)st_vars->LinesToRequestPrefetchPixelData * st_vars->LineTime +
- 2.0*st_vars->TimeForFetchingRowInVBlank + st_vars->TimeForFetchingMetaPTE);
- dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", st_vars->TimeForFetchingMetaPTE);
+ (double)LinesToRequestPrefetchPixelData * LineTime +
+ 2.0*TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
+ dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n",
- (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime);
+ (*DSTYAfterScaler + ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - TSetup - Tcalc - Twait - Tpre - To > 0\n");
- dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * st_vars->LineTime -
- st_vars->TimeForFetchingMetaPTE - 2*st_vars->TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
- ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * st_vars->LineTime - TWait - TCalc - *TSetup);
+ dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime -
+ TimeForFetchingMetaPTE - 2*TimeForFetchingRowInVBlank - (*DSTYAfterScaler +
+ ((double) (*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - *TSetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n",
PixelPTEBytesPerRow);
#endif
@@ -3941,7 +3988,7 @@ bool dml32_CalculatePrefetchSchedule(
MyError = true;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n",
- __func__, st_vars->dst_y_prefetch_equ);
+ __func__, dst_y_prefetch_equ);
#endif
}
@@ -3957,10 +4004,10 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
dml_print("DML::%s: DestinationLinesToRequestVMInVBlank = %f\n",
__func__, *DestinationLinesToRequestVMInVBlank);
- dml_print("DML::%s: LineTime = %f\n", __func__, st_vars->LineTime);
+ dml_print("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor /
- (*DestinationLinesToRequestVMInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestVMInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
@@ -3977,7 +4024,7 @@ bool dml32_CalculatePrefetchSchedule(
prefetch_row_bw = 0;
} else if (*DestinationLinesToRequestRowInVBlank > 0) {
prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) /
- (*DestinationLinesToRequestRowInVBlank * st_vars->LineTime);
+ (*DestinationLinesToRequestRowInVBlank * LineTime);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: MetaRowByte = %d\n", __func__, MetaRowByte);
@@ -4000,12 +4047,12 @@ bool dml32_CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- st_vars->TimeForFetchingMetaPTE = 0;
- st_vars->TimeForFetchingRowInVBlank = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- st_vars->LinesToRequestPrefetchPixelData = 0;
+ LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
@@ -4159,7 +4206,6 @@ void dml32_CalculateFlipSchedule(
} // CalculateFlipSchedule
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
bool USRRetrainingRequiredFinal,
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int PrefetchMode,
@@ -4221,15 +4267,37 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
double ActiveDRAMClockChangeLatencyMargin[])
{
unsigned int i, j, k;
-
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = 0;
- st_vars->DRAMClockChangeSupportNumber = 0;
- st_vars->DRAMClockChangeMethod = 0;
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
- st_vars->MinActiveFCLKChangeMargin = 0.;
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
- st_vars->TotalPixelBW = 0.0;
- st_vars->TotalActiveWriteback = 0;
+ unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0;
+ unsigned int DRAMClockChangeSupportNumber = 0;
+ unsigned int LastSurfaceWithoutMargin;
+ unsigned int DRAMClockChangeMethod = 0;
+ bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false;
+ double MinActiveFCLKChangeMargin = 0.;
+ double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = 0.;
+ double ActiveClockChangeLatencyHidingY;
+ double ActiveClockChangeLatencyHidingC;
+ double ActiveClockChangeLatencyHiding;
+ double EffectiveDETBufferSizeY;
+ double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
+ double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
+ double TotalPixelBW = 0.0;
+ bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double LinesInDETY[DC__NUM_DPP__MAX];
+ double LinesInDETC[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
+ unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
+ double FullDETBufferingTimeY;
+ double FullDETBufferingTimeC;
+ double WritebackDRAMClockChangeLatencyMargin;
+ double WritebackFCLKChangeLatencyMargin;
+ double WritebackLatencyHiding;
+ bool SameTimingForFCLKChange;
+
+ unsigned int TotalActiveWriteback = 0;
+ unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
+ unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
@@ -4261,13 +4329,13 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
#endif
- st_vars->TotalActiveWriteback = 0;
+ TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (WritebackEnable[k] == true)
- st_vars->TotalActiveWriteback = st_vars->TotalActiveWriteback + 1;
+ TotalActiveWriteback = TotalActiveWriteback + 1;
}
- if (st_vars->TotalActiveWriteback <= 1) {
+ if (TotalActiveWriteback <= 1) {
Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
} else {
Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
@@ -4277,7 +4345,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (st_vars->TotalActiveWriteback <= 1) {
+ if (TotalActiveWriteback <= 1) {
Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency;
Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
@@ -4307,14 +4375,14 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
#endif
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->TotalPixelBW = st_vars->TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
+ TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- st_vars->LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
- st_vars->LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+ LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
+ LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
#ifdef __DML_VBA_DEBUG__
@@ -4325,72 +4393,72 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dml_print("DML::%s: k=%d, VTaps = %d\n", __func__, k, VTaps[k]);
#endif
- st_vars->EffectiveLBLatencyHidingY = st_vars->LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveLBLatencyHidingC = st_vars->LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
- st_vars->EffectiveDETBufferSizeY = DETBufferSizeY[k];
+ EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveDETBufferSizeY = DETBufferSizeY[k];
if (UnboundedRequestEnabled) {
- st_vars->EffectiveDETBufferSizeY = st_vars->EffectiveDETBufferSizeY
+ EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024
* (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
- / (HTotal[k] / PixelClock[k]) / st_vars->TotalPixelBW;
+ / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
}
- st_vars->LinesInDETY[k] = (double) st_vars->EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
- st_vars->LinesInDETYRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETY[k], SwathHeightY[k]);
- st_vars->FullDETBufferingTimeY = st_vars->LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
+ LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->EffectiveLBLatencyHidingY + st_vars->FullDETBufferingTimeY
+ ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingY = st_vars->ActiveClockChangeLatencyHidingY
+ ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
- (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
/ PixelClock[k] / VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
- st_vars->LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
- st_vars->LinesInDETCRoundedDownToSwath[k] = dml_floor(st_vars->LinesInDETC[k], SwathHeightC[k]);
- st_vars->FullDETBufferingTimeC = st_vars->LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
+ LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
/ VRatioChroma[k];
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->EffectiveLBLatencyHidingC + st_vars->FullDETBufferingTimeC
+ ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
/ PixelClock[k];
if (NumberOfActiveSurfaces > 1) {
- st_vars->ActiveClockChangeLatencyHidingC = st_vars->ActiveClockChangeLatencyHidingC
+ ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
- (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
/ PixelClock[k] / VRatioChroma[k];
}
- st_vars->ActiveClockChangeLatencyHiding = dml_min(st_vars->ActiveClockChangeLatencyHidingY,
- st_vars->ActiveClockChangeLatencyHidingC);
+ ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
+ ActiveClockChangeLatencyHidingC);
} else {
- st_vars->ActiveClockChangeLatencyHiding = st_vars->ActiveClockChangeLatencyHidingY;
+ ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
}
- ActiveDRAMClockChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
+ ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- Watermark->DRAMClockChangeWatermark;
- st_vars->ActiveFCLKChangeLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
+ ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- Watermark->FCLKChangeWatermark;
- st_vars->USRRetrainingLatencyMargin[k] = st_vars->ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
+ USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
if (WritebackEnable[k]) {
- st_vars->WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
+ WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
/ (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
/ (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
if (WritebackPixelFormat[k] == dm_444_64)
- st_vars->WritebackLatencyHiding = st_vars->WritebackLatencyHiding / 2;
+ WritebackLatencyHiding = WritebackLatencyHiding / 2;
- st_vars->WritebackDRAMClockChangeLatencyMargin = st_vars->WritebackLatencyHiding
+ WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
- Watermark->WritebackDRAMClockChangeWatermark;
- st_vars->WritebackFCLKChangeLatencyMargin = st_vars->WritebackLatencyHiding
+ WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
- Watermark->WritebackFCLKChangeWatermark;
ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
- st_vars->WritebackFCLKChangeLatencyMargin);
- st_vars->ActiveFCLKChangeLatencyMargin[k] = dml_min(st_vars->ActiveFCLKChangeLatencyMargin[k],
- st_vars->WritebackDRAMClockChangeLatencyMargin);
+ WritebackFCLKChangeLatencyMargin);
+ ActiveFCLKChangeLatencyMargin[k] = dml_min(ActiveFCLKChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
}
MaxActiveDRAMClockChangeLatencySupported[k] =
(UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
@@ -4409,41 +4477,41 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
(DRRDisplay[i] || DRRDisplay[j]))) {
- st_vars->SynchronizedSurfaces[i][j] = true;
+ SynchronizedSurfaces[i][j] = true;
} else {
- st_vars->SynchronizedSurfaces[i][j] = false;
+ SynchronizedSurfaces[i][j] = false;
}
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (!st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] < st_vars->MinActiveFCLKChangeMargin)) {
- st_vars->FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
- st_vars->MinActiveFCLKChangeMargin = st_vars->ActiveFCLKChangeLatencyMargin[k];
- st_vars->SurfaceWithMinActiveFCLKChangeMargin = k;
+ (!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
+ ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
+ FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
+ MinActiveFCLKChangeMargin = ActiveFCLKChangeLatencyMargin[k];
+ SurfaceWithMinActiveFCLKChangeMargin = k;
}
}
- *MinActiveFCLKChangeLatencySupported = st_vars->MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
+ *MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
- st_vars->SameTimingForFCLKChange = true;
+ SameTimingForFCLKChange = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (!st_vars->SynchronizedSurfaces[k][st_vars->SurfaceWithMinActiveFCLKChangeMargin]) {
+ if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->SameTimingForFCLKChange ||
- st_vars->ActiveFCLKChangeLatencyMargin[k] <
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
- st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = st_vars->ActiveFCLKChangeLatencyMargin[k];
+ (SameTimingForFCLKChange ||
+ ActiveFCLKChangeLatencyMargin[k] <
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
+ SecondMinActiveFCLKChangeMarginOneDisplayInVBLank = ActiveFCLKChangeLatencyMargin[k];
}
- st_vars->SameTimingForFCLKChange = false;
+ SameTimingForFCLKChange = false;
}
}
- if (st_vars->MinActiveFCLKChangeMargin > 0) {
+ if (MinActiveFCLKChangeMargin > 0) {
*FCLKChangeSupport = dm_fclock_change_vactive;
- } else if ((st_vars->SameTimingForFCLKChange || st_vars->SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
+ } else if ((SameTimingForFCLKChange || SecondMinActiveFCLKChangeMarginOneDisplayInVBLank > 0) &&
(PrefetchMode <= 1)) {
*FCLKChangeSupport = dm_fclock_change_vblank;
} else {
@@ -4453,7 +4521,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
*USRRetrainingSupport = true;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
- (st_vars->USRRetrainingLatencyMargin[k] < 0)) {
+ (USRRetrainingLatencyMargin[k] < 0)) {
*USRRetrainingSupport = false;
}
}
@@ -4464,42 +4532,42 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
ActiveDRAMClockChangeLatencyMargin[k] < 0) {
if (PrefetchMode > 0) {
- st_vars->DRAMClockChangeSupportNumber = 2;
- } else if (st_vars->DRAMClockChangeSupportNumber == 0) {
- st_vars->DRAMClockChangeSupportNumber = 1;
- st_vars->LastSurfaceWithoutMargin = k;
- } else if (st_vars->DRAMClockChangeSupportNumber == 1 &&
- !st_vars->SynchronizedSurfaces[st_vars->LastSurfaceWithoutMargin][k]) {
- st_vars->DRAMClockChangeSupportNumber = 2;
+ DRAMClockChangeSupportNumber = 2;
+ } else if (DRAMClockChangeSupportNumber == 0) {
+ DRAMClockChangeSupportNumber = 1;
+ LastSurfaceWithoutMargin = k;
+ } else if (DRAMClockChangeSupportNumber == 1 &&
+ !SynchronizedSurfaces[LastSurfaceWithoutMargin][k]) {
+ DRAMClockChangeSupportNumber = 2;
}
}
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
- st_vars->DRAMClockChangeMethod = 1;
+ DRAMClockChangeMethod = 1;
else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
- st_vars->DRAMClockChangeMethod = 2;
+ DRAMClockChangeMethod = 2;
}
- if (st_vars->DRAMClockChangeMethod == 0) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeMethod == 0) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
- } else if (st_vars->DRAMClockChangeMethod == 1) {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ } else if (DRAMClockChangeMethod == 1) {
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_full_frame;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_full_frame;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
} else {
- if (st_vars->DRAMClockChangeSupportNumber == 0)
+ if (DRAMClockChangeSupportNumber == 0)
*DRAMClockChangeSupport = dm_dram_clock_change_vactive_w_mall_sub_vp;
- else if (st_vars->DRAMClockChangeSupportNumber == 1)
+ else if (DRAMClockChangeSupportNumber == 1)
*DRAMClockChangeSupport = dm_dram_clock_change_vblank_w_mall_sub_vp;
else
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
@@ -4513,7 +4581,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
- src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + st_vars->LBLatencyHidingSourceLinesY[k];
+ src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
#ifdef __DML_VBA_DEBUG__
@@ -4521,7 +4589,7 @@ dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DET
dml_print("DML::%s: k=%d, BytePerPixelDETY = %f\n", __func__, k, BytePerPixelDETY[k]);
dml_print("DML::%s: k=%d, SwathWidthY = %d\n", __func__, k, SwathWidthY[k]);
dml_print("DML::%s: k=%d, SwathHeightY = %d\n", __func__, k, SwathHeightY[k]);
-dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, st_vars->LBLatencyHidingSourceLinesY[k]);
+dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBLatencyHidingSourceLinesY[k]);
dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate);
dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l);
dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l);
@@ -4532,7 +4600,7 @@ dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l
if (BytePerPixelDETC[k] > 0) {
src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
- src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + st_vars->LBLatencyHidingSourceLinesC[k];
+ src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 37a314ce284b..d293856ba906 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -30,7 +30,6 @@
#include "os_types.h"
#include "../dc_features.h"
#include "../display_mode_structs.h"
-#include "dml/display_mode_vba.h"
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
@@ -82,7 +81,6 @@ void dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(
double *DPPCLKUsingSingleDPP);
void dml32_CalculateSwathAndDETConfiguration(
- struct dml32_CalculateSwathAndDETConfiguration *st_vars,
unsigned int DETSizeOverride[],
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int ConfigReturnBufferSizeInKByte,
@@ -362,7 +360,6 @@ void dml32_CalculateSurfaceSizeInMall(
bool *ExceededMALLSize);
void dml32_CalculateVMRowAndSwath(
- struct dml32_CalculateVMRowAndSwath *st_vars,
unsigned int NumberOfActiveSurfaces,
DmlPipe myPipe[],
unsigned int SurfaceSizeInMALL[],
@@ -715,7 +712,6 @@ double dml32_CalculateExtraLatency(
unsigned int HostVMMaxNonCachedPageTableLevels);
bool dml32_CalculatePrefetchSchedule(
- struct dml32_CalculatePrefetchSchedule *st_vars,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
@@ -811,7 +807,6 @@ void dml32_CalculateFlipSchedule(
bool *ImmediateFlipSupportedForPipe);
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport *st_vars,
bool USRRetrainingRequiredFinal,
enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
unsigned int PrefetchMode,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 84b4b00f29cb..c87091683b5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -498,6 +498,13 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+ if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_21_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
+
if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000)
!= dc->bb_overrides.dummy_clock_change_latency_ns
&& dc->bb_overrides.dummy_clock_change_latency_ns) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 8460aefe7b6d..492aec634b68 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -182,108 +182,6 @@ void Calculate256BBlockSizes(
unsigned int *BlockWidth256BytesY,
unsigned int *BlockWidth256BytesC);
-struct dml32_CalculateSwathAndDETConfiguration {
- unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX];
- unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX];
- unsigned int RoundedUpMaxSwathSizeBytesC[DC__NUM_DPP__MAX];
- unsigned int RoundedUpSwathSizeBytesY;
- unsigned int RoundedUpSwathSizeBytesC;
- double SwathWidthdoubleDPP[DC__NUM_DPP__MAX];
- double SwathWidthdoubleDPPChroma[DC__NUM_DPP__MAX];
- unsigned int TotalActiveDPP;
- bool NoChromaSurfaces;
- unsigned int DETBufferSizeInKByteForSwathCalculation;
-};
-
-struct dml32_CalculateVMRowAndSwath {
- unsigned int PTEBufferSizeInRequestsForLuma[DC__NUM_DPP__MAX];
- unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX];
- unsigned int PDEAndMetaPTEBytesFrameY;
- unsigned int PDEAndMetaPTEBytesFrameC;
- unsigned int MetaRowByteY[DC__NUM_DPP__MAX];
- unsigned int MetaRowByteC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int PixelPTEBytesPerRowC_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_luma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_luma_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_width_chroma_ub_one_row_per_frame[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_chroma_one_row_per_frame[DC__NUM_DPP__MAX];
- bool one_row_per_frame_fits_in_buffer[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport {
- unsigned int SurfaceWithMinActiveFCLKChangeMargin;
- unsigned int DRAMClockChangeSupportNumber;
- unsigned int LastSurfaceWithoutMargin;
- unsigned int DRAMClockChangeMethod;
- bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin;
- double MinActiveFCLKChangeMargin;
- double SecondMinActiveFCLKChangeMarginOneDisplayInVBLank;
- double ActiveClockChangeLatencyHidingY;
- double ActiveClockChangeLatencyHidingC;
- double ActiveClockChangeLatencyHiding;
- double EffectiveDETBufferSizeY;
- double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
- double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
- double TotalPixelBW;
- bool SynchronizedSurfaces[DC__NUM_DPP__MAX][DC__NUM_DPP__MAX];
- double EffectiveLBLatencyHidingY;
- double EffectiveLBLatencyHidingC;
- double LinesInDETY[DC__NUM_DPP__MAX];
- double LinesInDETC[DC__NUM_DPP__MAX];
- unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX];
- unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
- double FullDETBufferingTimeY;
- double FullDETBufferingTimeC;
- double WritebackDRAMClockChangeLatencyMargin;
- double WritebackFCLKChangeLatencyMargin;
- double WritebackLatencyHiding;
- bool SameTimingForFCLKChange;
- unsigned int TotalActiveWriteback;
- unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
- unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
-};
-
-struct dml32_CalculatePrefetchSchedule {
- unsigned int DPPCycles, DISPCLKCycles;
- double DSTTotalPixelsAfterScaler;
- double LineTime;
- double dst_y_prefetch_equ;
- double prefetch_bw_oto;
- double Tvm_oto;
- double Tr0_oto;
- double Tvm_oto_lines;
- double Tr0_oto_lines;
- double dst_y_prefetch_oto;
- double TimeForFetchingMetaPTE;
- double TimeForFetchingRowInVBlank;
- double LinesToRequestPrefetchPixelData;
- unsigned int HostVMDynamicLevelsTrips;
- double trip_to_mem;
- double Tvm_trips;
- double Tr0_trips;
- double Tvm_trips_rounded;
- double Tr0_trips_rounded;
- double Lsw_oto;
- double Tpre_rounded;
- double prefetch_bw_equ;
- double Tvm_equ;
- double Tr0_equ;
- double Tdmbf;
- double Tdmec;
- double Tdmsks;
- double prefetch_sw_bytes;
- double bytes_pp;
- double dep_bytes;
- unsigned int max_vratio_pre;
- double min_Lsw;
- double Tsw_est1;
- double Tsw_est3;
-};
-
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation {
unsigned int dummy_integer_array[2][DC__NUM_DPP__MAX];
double dummy_single_array[2][DC__NUM_DPP__MAX];
@@ -355,10 +253,6 @@ struct dummy_vars {
struct DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation
DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation;
struct dml32_ModeSupportAndSystemConfigurationFull dml32_ModeSupportAndSystemConfigurationFull;
- struct dml32_CalculateSwathAndDETConfiguration dml32_CalculateSwathAndDETConfiguration;
- struct dml32_CalculateVMRowAndSwath dml32_CalculateVMRowAndSwath;
- struct dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport;
- struct dml32_CalculatePrefetchSchedule dml32_CalculatePrefetchSchedule;
};
struct vba_vars_st {
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index ab06c7fc7452..9f3558c0ef11 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -244,13 +244,15 @@ enum {
#define ASICREV_IS_GC_10_3_7(eChipRev) ((eChipRev >= GC_10_3_7_A0) && (eChipRev < GC_10_3_7_UNKNOWN))
#define AMDGPU_FAMILY_GC_11_0_0 145
-#define AMDGPU_FAMILY_GC_11_0_2 148
+#define AMDGPU_FAMILY_GC_11_0_1 148
#define GC_11_0_0_A0 0x1
#define GC_11_0_2_A0 0x10
+#define GC_11_0_3_A0 0x20
#define GC_11_UNKNOWN 0xFF
#define ASICREV_IS_GC_11_0_0(eChipRev) (eChipRev < GC_11_0_2_A0)
-#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_UNKNOWN)
+#define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0)
+#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index f093b49c5e6e..3bf08a60c45c 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -119,13 +119,15 @@ enum dc_log_type {
LOG_HDMI_RETIMER_REDRIVER,
LOG_DSC,
LOG_SMU_MSG,
+ LOG_DC2RESERVED4,
+ LOG_DC2RESERVED5,
LOG_DWB,
LOG_GAMMA_DEBUG,
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
LOG_DP2,
- LOG_SECTION_TOTAL_COUNT
+ LOG_DC2RESERVED12,
};
#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index da09ba7589f7..0f39ab9dc5b4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -613,10 +613,6 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
* Note: We should never go above the field rate of the mode timing set.
*/
infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
-
- /* FreeSync HDR */
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
@@ -684,10 +680,6 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
/* PB16 : Reserved bits 7:1, FixedRate bit 0 */
infopacket->sb[16] = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? 1 : 0;
-
- //FreeSync HDR
- infopacket->sb[9] = 0;
- infopacket->sb[10] = 0;
}
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
@@ -772,8 +764,7 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal,
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */
infopacket->hb2 = 0x09;
- *payload_size = 0x0A;
-
+ *payload_size = 0x09;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
@@ -822,9 +813,9 @@ static void build_vrr_infopacket_header_v3(enum signal_type signal,
infopacket->hb1 = version;
/* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length] */
- *payload_size = 0x10;
- infopacket->hb2 = *payload_size - 1; //-1 for checksum
+ infopacket->hb2 = 0x10;
+ *payload_size = 0x10;
} else if (dc_is_dp_signal(signal)) {
/* HEADER */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
index 2ed95790a600..cf8d60c4df1b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h
@@ -15243,6 +15243,8 @@
#define regBIF0_PCIE_TX_TRACKING_ADDR_HI_BASE_IDX 5
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS 0x420186
#define regBIF0_PCIE_TX_TRACKING_CTRL_STATUS_BASE_IDX 5
+#define regBIF0_PCIE_TX_POWER_CTRL_1 0x420187
+#define regBIF0_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
#define regBIF0_PCIE_TX_CTRL_4 0x42018b
#define regBIF0_PCIE_TX_CTRL_4_BASE_IDX 5
#define regBIF0_PCIE_TX_STATUS 0x420194
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
index eb62a18fcc48..3d60c9e92548 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h
@@ -85627,6 +85627,19 @@
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK 0x0000000EL
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK 0x00007F00L
#define BIF0_PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK 0x00008000L
+//BIF0_PCIE_TX_POWER_CTRL_1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN__SHIFT 0x0
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN__SHIFT 0x1
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN__SHIFT 0x2
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN__SHIFT 0x3
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN__SHIFT 0x4
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN__SHIFT 0x5
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_DS_EN_MASK 0x00000002L
+#define BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_SD_EN_MASK 0x00000004L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_DS_EN_MASK 0x00000010L
+#define BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_SD_EN_MASK 0x00000020L
//BIF0_PCIE_TX_CTRL_4
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW__SHIFT 0x0
#define BIF0_PCIE_TX_CTRL_4__TX_PORT_ACCESS_TIMER_SKEW_MASK 0x0000000FL
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index 78620b0bd279..f745cd8f1ab7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -24,12 +24,8 @@
#ifndef SMU13_DRIVER_IF_V13_0_0_H
#define SMU13_DRIVER_IF_V13_0_0_H
-// *** IMPORTANT ***
-// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x23
-
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x1D
+#define PPTABLE_VERSION 0x22
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -1193,8 +1189,17 @@ typedef struct {
// SECTION: Advanced Options
uint32_t DebugOverrides;
+ // Section: Total Board Power idle vs active coefficients
+ uint8_t TotalBoardPowerSupport;
+ uint8_t TotalBoardPowerPadding[3];
+
+ int16_t TotalIdleBoardPowerM;
+ int16_t TotalIdleBoardPowerB;
+ int16_t TotalBoardPowerM;
+ int16_t TotalBoardPowerB;
+
// SECTION: Sku Reserved
- uint32_t Spare[64];
+ uint32_t Spare[61];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
@@ -1259,7 +1264,8 @@ typedef struct {
// SECTION: Clock Spread Spectrum
// UCLK Spread Spectrum
- uint16_t UclkSpreadPadding;
+ uint8_t UclkTrainingModeSpreadPercent;
+ uint8_t UclkSpreadPadding;
uint16_t UclkSpreadFreq; // kHz
// UCLK Spread Spectrum
@@ -1272,11 +1278,7 @@ typedef struct {
// Section: Memory Config
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
- uint8_t PaddingMem1[3];
-
- // Section: Total Board Power
- uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
- uint16_t BoardPowerPadding;
+ uint8_t PaddingMem1[7];
// SECTION: UMC feature flags
uint8_t HsrEnabled;
@@ -1375,8 +1377,11 @@ typedef struct {
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
- uint16_t AverageSocketPower ;
+ uint16_t AverageSocketPower;
+ uint16_t AverageTotalBoardPower;
+
uint16_t AvgTemperature[TEMP_COUNT];
+ uint16_t TempPadding;
uint8_t PcieRate ;
uint8_t PcieWidth ;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index 76f695a1d065..ae2d337158f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 4
+#define PMFW_DRIVER_IF_VERSION 5
typedef struct {
int32_t value;
@@ -197,6 +197,8 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
+ uint16_t CurTemp; //[centi-Celsius]
+ uint16_t spare2;
} SmuMetrics_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index c02e5e576728..ac308e72241a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,9 +28,9 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2E
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index fa520d79ef67..6db67f082d91 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -4283,6 +4283,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
+ .fini_microcode = smu_v11_0_fini_microcode,
.init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e8fe84f806d1..18ee3b5e64c5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -212,6 +212,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7))
+ return 0;
+
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
@@ -219,16 +222,10 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3667)
- pptable_id = 36671;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3688)
- pptable_id = 36881;
/*
* Temporary solution for SMU V13.0.0 with SCPM enabled:
* - use 36831 signed pptable when pp_table_id is 3683
+ * - use 37151 signed pptable when pp_table_id is 3715
* - use 36641 signed pptable when pp_table_id is 3664 or 0
* TODO: drop these when the pptable carried in vbios is ready.
*/
@@ -241,6 +238,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
case 3683:
pptable_id = 36831;
break;
+ case 3715:
+ pptable_id = 37151;
+ break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
return -EINVAL;
@@ -478,7 +478,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
/*
* Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use 3664 or 3683 on request
+ * - use 3664, 3683 or 3715 on request
* - use 3664 when pptable_id is 0
* TODO: drop these when the pptable carried in vbios is ready.
*/
@@ -489,6 +489,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
break;
case 3664:
case 3683:
+ case 3715:
break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
@@ -2344,8 +2345,8 @@ int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_EnableGfxImu);
-
- return smu_cmn_send_msg_without_waiting(smu, index, 0);
+ /* Param 1 to tell PMFW to enable GFXOFF feature */
+ return smu_cmn_send_msg_without_waiting(smu, index, 1);
}
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 1bbeceeb9e3c..df4a47acd724 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1792,7 +1792,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.dump_pptable = smu_v13_0_0_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_0_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_check_fw_status,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 82d3718d8324..97e1d55dcaad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -71,7 +71,6 @@ static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
@@ -199,6 +198,9 @@ static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
@@ -226,18 +228,6 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
return ret;
}
-static int smu_v13_0_4_post_smu_init(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0;
-
- /* allow message will be sent after enable message */
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
- if (ret)
- dev_err(adev->dev, "Failed to Enable GfxOff!\n");
- return ret;
-}
-
static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1026,7 +1016,6 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
- .post_init = smu_v13_0_4_post_smu_init,
.mode2_reset = smu_v13_0_4_mode2_reset,
.get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v13_0_od_edit_dpm_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 47360ef5c175..66445964efbd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -176,6 +176,9 @@ static int smu_v13_0_5_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9dd56e73218b..1016d1c216d8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -1567,6 +1567,16 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
+static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ /* SRIOV does not support SMU mode1 reset */
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return true;
+}
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1574,7 +1584,9 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dump_pptable = smu_v13_0_7_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_7_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_7_check_fw_status,
@@ -1624,6 +1636,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.baco_set_state = smu_v13_0_baco_set_state,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
+ .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
+ .mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
};
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 702ea803a743..39e7004de720 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -180,7 +180,7 @@ static int lvds_codec_probe(struct platform_device *pdev)
of_node_put(bus_node);
if (ret == -ENODEV) {
dev_warn(dev, "missing 'data-mapping' DT property\n");
- } else if (ret) {
+ } else if (ret < 0) {
dev_err(dev, "invalid 'data-mapping' DT property\n");
return ret;
} else {
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 281f8a9ba4fd..7ab38d734ad6 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -550,8 +550,9 @@ static int sii902x_audio_hw_params(struct device *dev, void *data,
unsigned long mclk_rate;
int i, ret;
- if (daifmt->bit_clk_master || daifmt->frame_clk_master) {
- dev_dbg(dev, "%s: I2S master mode not supported\n", __func__);
+ if (daifmt->bit_clk_provider || daifmt->frame_clk_provider) {
+ dev_dbg(dev, "%s: I2S clock provider mode not supported\n",
+ __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index f50b47ac11a8..a2f0860b20bb 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -45,7 +45,7 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
u8 inputclkfs = 0;
/* it cares I2S only */
- if (fmt->bit_clk_master | fmt->frame_clk_master) {
+ if (fmt->bit_clk_provider | fmt->frame_clk_provider) {
dev_err(dev, "unsupported clock settings\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index eb0c2d041f13..ad068865ba20 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
-
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -1226,7 +1211,7 @@ retry:
ret = dma_resv_lock_slow_interruptible(obj->resv,
acquire_ctx);
if (ret) {
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
@@ -1251,7 +1236,7 @@ retry:
goto retry;
}
- ww_acquire_done(acquire_ctx);
+ ww_acquire_fini(acquire_ctx);
return ret;
}
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 8ad0e02991ca..904fc893c905 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -302,6 +302,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (WARN_ON(map->is_iomem)) {
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
ret = -EIO;
goto err_put_pages;
}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1fbbc19f1ac0..7bb98e6a446d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle);
/* drm_drv.c */
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a3f180653b8b..eb09e86044c6 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
{
struct rb_node *rb;
- rb = prime_fpriv->dmabufs.rb_node;
+ mutex_lock(&prime_fpriv->lock);
+
+ rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
- if (member->dma_buf == dma_buf) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
- dma_buf_put(dma_buf);
+ dma_buf_put(member->dma_buf);
kfree(member);
- return;
- } else if (member->dma_buf < dma_buf) {
+ break;
+ } else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
+
+ mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 7655142a4651..10b0036f8a2e 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1594,12 +1594,12 @@ static int hdmi_audio_hw_params(struct device *dev, void *data,
struct hdmi_context *hdata = dev_get_drvdata(dev);
if (daifmt->fmt != HDMI_I2S || daifmt->bit_clk_inv ||
- daifmt->frame_clk_inv || daifmt->bit_clk_master ||
- daifmt->frame_clk_master) {
+ daifmt->frame_clk_inv || daifmt->bit_clk_provider ||
+ daifmt->frame_clk_provider) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 7c4455541dbb..f8eb6f69be05 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1096,11 +1096,11 @@ static int tda998x_audio_hw_params(struct device *dev, void *data,
if (!spdif &&
(daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
- daifmt->bit_clk_master || daifmt->frame_clk_master)) {
+ daifmt->bit_clk_provider || daifmt->frame_clk_provider)) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index a0f84cbe974f..fc5d94862ef3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -27,7 +27,6 @@
#include <acpi/video.h>
#include <linux/i2c.h>
#include <linux/input.h>
-#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-resv.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b7b2c14fd9e1..cd75b0ca2555 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -6,7 +6,6 @@
#include <linux/dma-resv.h>
#include <linux/highmem.h>
-#include <linux/intel-iommu.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index ccec4055fde3..389e9f157ca5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -268,7 +268,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
*/
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
- assert_object_held(obj);
+ assert_object_held_shared(obj);
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
@@ -331,15 +331,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
continue;
}
- if (!i915_gem_object_trylock(obj, NULL)) {
- /* busy, toss it back to the pile */
- if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
- continue;
- }
-
__i915_gem_object_pages_fini(obj);
- i915_gem_object_unlock(obj);
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
@@ -359,7 +351,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
- container_of(work, struct drm_i915_private, mm.free_work.work);
+ container_of(work, struct drm_i915_private, mm.free_work);
i915_gem_flush_free_objects(i915);
}
@@ -391,7 +383,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
+ queue_work(i915->wq, &i915->mm.free_work);
}
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
@@ -745,7 +737,7 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
void i915_gem_init__objects(struct drm_i915_private *i915)
{
- INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
void i915_objects_module_exit(void)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 5cf36a130061..9f6b14ec189a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -335,7 +335,6 @@ struct drm_i915_gem_object {
#define I915_BO_READONLY BIT(7)
#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(9)
-#define I915_BO_WAS_BOUND_BIT 10
/**
* @mem_flags - Mutable placement-related flags
*
@@ -616,6 +615,8 @@ struct drm_i915_gem_object {
* pages were last acquired.
*/
bool dirty:1;
+
+ u32 tlb;
} mm;
struct {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 97c820eee115..8357dbdcab5c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -6,14 +6,15 @@
#include <drm/drm_cache.h>
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
-#include "gt/intel_gt.h"
-
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@@ -190,6 +191,18 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
vunmap(ptr);
}
+static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_gt *gt = to_gt(i915);
+
+ if (!obj->mm.tlb)
+ return;
+
+ intel_gt_invalidate_tlb(gt, obj->mm.tlb);
+ obj->mm.tlb = 0;
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
@@ -215,13 +228,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
- if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
- intel_gt_invalidate_tlbs(to_gt(i915));
- }
+ flush_tlb_invalidate(obj);
return pages;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 1030053571a2..8dc5c8874d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -426,7 +426,8 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
i915->mm.shrinker.seeks = DEFAULT_SEEKS;
i915->mm.shrinker.batch = 4096;
- drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker));
+ drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker,
+ "drm-i915_gem"));
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 68c2b0d8f187..f435e06125aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -11,7 +11,9 @@
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
@@ -36,8 +38,6 @@ static void __intel_gt_init_early(struct intel_gt *gt)
{
spin_lock_init(&gt->irq_lock);
- mutex_init(&gt->tlb_invalidate_lock);
-
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -48,6 +48,8 @@ static void __intel_gt_init_early(struct intel_gt *gt)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
+ mutex_init(&gt->tlb.invalidate_lock);
+ seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
intel_gt_pm_init_early(gt);
intel_uc_init_early(&gt->uc);
@@ -768,6 +770,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
intel_gt_fini_timelines(gt);
+ mutex_destroy(&gt->tlb.invalidate_lock);
intel_engines_free(gt);
}
}
@@ -906,7 +909,7 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
return rb;
}
-void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+static void mmio_invalidate_full(struct intel_gt *gt)
{
static const i915_reg_t gen8_regs[] = {
[RENDER_CLASS] = GEN8_RTCR,
@@ -924,13 +927,11 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
const i915_reg_t *regs;
unsigned int num = 0;
- if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
- return;
-
if (GRAPHICS_VER(i915) == 12) {
regs = gen12_regs;
num = ARRAY_SIZE(gen12_regs);
@@ -945,28 +946,41 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
"Platform does not implement TLB invalidation!"))
return;
- GEM_TRACE("\n");
-
- assert_rpm_wakelock_held(&i915->runtime_pm);
-
- mutex_lock(&gt->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+ awake = 0;
for_each_engine(engine, gt, id) {
struct reg_and_bit rb;
+ if (!intel_engine_pm_is_awake(engine))
+ continue;
+
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
continue;
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ awake |= engine->mask;
}
+ GT_TRACE(gt, "invalidated engines %08x\n", awake);
+
+ /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
+ if (awake &&
+ (IS_TIGERLAKE(i915) ||
+ IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_ALDERLAKE_S(i915) ||
+ IS_ALDERLAKE_P(i915)))
+ intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
+
spin_unlock_irq(&uncore->lock);
- for_each_engine(engine, gt, id) {
+ for_each_engine_masked(engine, gt, awake, tmp) {
+ struct reg_and_bit rb;
+
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
@@ -974,12 +988,8 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
*/
const unsigned int timeout_us = 100;
const unsigned int timeout_ms = 4;
- struct reg_and_bit rb;
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- if (!i915_mmio_reg_offset(rb.reg))
- continue;
-
if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,
@@ -996,5 +1006,38 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
* transitions.
*/
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
- mutex_unlock(&gt->tlb_invalidate_lock);
+}
+
+static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
+{
+ u32 cur = intel_gt_tlb_seqno(gt);
+
+ /* Only skip if a *full* TLB invalidate barrier has passed */
+ return (s32)(cur - ALIGN(seqno, 2)) > 0;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
+{
+ intel_wakeref_t wakeref;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (intel_gt_is_wedged(gt))
+ return;
+
+ if (tlb_seqno_passed(gt, seqno))
+ return;
+
+ with_intel_gt_pm_if_awake(gt, wakeref) {
+ mutex_lock(&gt->tlb.invalidate_lock);
+ if (tlb_seqno_passed(gt, seqno))
+ goto unlock;
+
+ mmio_invalidate_full(gt);
+
+ write_seqcount_invalidate(&gt->tlb.seqno);
+unlock:
+ mutex_unlock(&gt->tlb.invalidate_lock);
+ }
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 82d6f248d876..40b06adf509a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -101,6 +101,16 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
-void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
+{
+ return seqprop_sequence(&gt->tlb.seqno);
+}
+
+static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
+{
+ return intel_gt_tlb_seqno(gt) | 1;
+}
+
+void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index bc898df7a48c..a334787a4939 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -55,6 +55,9 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
intel_gt_pm_put(gt), tmp = 0)
+#define with_intel_gt_pm_if_awake(gt, wf) \
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(&gt->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index df708802889d..3804a583382b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -11,6 +11,7 @@
#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/seqlock.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -83,7 +84,22 @@ struct intel_gt {
struct intel_uc uc;
struct intel_gsc gsc;
- struct mutex tlb_invalidate_lock;
+ struct {
+ /* Serialize global tlb invalidations */
+ struct mutex invalidate_lock;
+
+ /*
+ * Batch TLB invalidations
+ *
+ * After unbinding the PTE, we need to ensure the TLB
+ * are invalidated prior to releasing the physical pages.
+ * But we only need one such invalidation for all unbinds,
+ * so we track how many TLB invalidations have been
+ * performed since unbind the PTE and only emit an extra
+ * invalidate if no full barrier has been passed.
+ */
+ seqcount_mutex_t seqno;
+ } tlb;
struct i915_wa_list wa_list;
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 2c35324b5f68..2b10b96b17b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -708,7 +708,7 @@ intel_context_migrate_copy(struct intel_context *ce,
u8 src_access, dst_access;
struct i915_request *rq;
int src_sz, dst_sz;
- bool ccs_is_src;
+ bool ccs_is_src, overwrite_ccs;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
@@ -749,6 +749,8 @@ intel_context_migrate_copy(struct intel_context *ce,
get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
}
+ overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
+
src_offset = 0;
dst_offset = CHUNK_SZ;
if (HAS_64K_PAGES(ce->engine->i915)) {
@@ -852,6 +854,25 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
ccs_bytes_to_cpy -= ccs_sz;
+ } else if (overwrite_ccs) {
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ /*
+ * While we can't always restore/manage the CCS state,
+ * we still need to ensure we don't leak the CCS state
+ * from the previous user, so make sure we overwrite it
+ * with something.
+ */
+ err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
+ dst_offset, DIRECT_ACCESS, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
}
/* Arbitration is re-enabled between requests. */
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index d8b94d638559..6ee8d1127016 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -206,8 +206,12 @@ void ppgtt_bind_vma(struct i915_address_space *vm,
void ppgtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
- if (vma_res->allocated)
- vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (!vma_res->allocated)
+ return;
+
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+ if (vma_res->tlb)
+ vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 6e90032e12e9..aa6aed837194 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -15,6 +15,7 @@
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
+#ifdef CONFIG_64BIT
static void _release_bars(struct pci_dev *pdev)
{
int resno;
@@ -111,6 +112,9 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
}
+#else
+static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
+#endif
static int
region_lmem_release(struct intel_memory_region *mem)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index aee1a45da74b..705689e64011 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -226,7 +226,6 @@ struct intel_vgpu {
unsigned long nr_cache_entries;
struct mutex cache_lock;
- struct notifier_block iommu_notifier;
atomic_t released;
struct kvm_page_track_notifier_node track_node;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index e2f6c56ab342..e3cd58946477 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -231,57 +231,38 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- int total_pages;
- int npage;
- int ret;
-
- total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
-
- for (npage = 0; npage < total_pages; npage++) {
- unsigned long cur_gfn = gfn + npage;
-
- ret = vfio_unpin_pages(&vgpu->vfio_device, &cur_gfn, 1);
- drm_WARN_ON(&i915->drm, ret != 1);
- }
+ vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT,
+ DIV_ROUND_UP(size, PAGE_SIZE));
}
/* Pin a normal or compound guest page for dma. */
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
- unsigned long base_pfn = 0;
- int total_pages;
+ int total_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ struct page *base_page = NULL;
int npage;
int ret;
- total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
/*
* We pin the pages one-by-one to avoid allocating a big arrary
* on stack to hold pfns.
*/
for (npage = 0; npage < total_pages; npage++) {
- unsigned long cur_gfn = gfn + npage;
- unsigned long pfn;
+ dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT;
+ struct page *cur_page;
- ret = vfio_pin_pages(&vgpu->vfio_device, &cur_gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1,
+ IOMMU_READ | IOMMU_WRITE, &cur_page);
if (ret != 1) {
- gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
- cur_gfn, ret);
- goto err;
- }
-
- if (!pfn_valid(pfn)) {
- gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
- npage++;
- ret = -EFAULT;
+ gvt_vgpu_err("vfio_pin_pages failed for iova %pad, ret %d\n",
+ &cur_iova, ret);
goto err;
}
if (npage == 0)
- base_pfn = pfn;
- else if (base_pfn + npage != pfn) {
+ base_page = cur_page;
+ else if (base_page + npage != cur_page) {
gvt_vgpu_err("The pages are not continuous\n");
ret = -EINVAL;
npage++;
@@ -289,7 +270,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
}
}
- *page = pfn_to_page(base_pfn);
+ *page = base_page;
return 0;
err:
gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
@@ -729,34 +710,25 @@ int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
return ret;
}
-static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static void intel_vgpu_dma_unmap(struct vfio_device *vfio_dev, u64 iova,
+ u64 length)
{
- struct intel_vgpu *vgpu =
- container_of(nb, struct intel_vgpu, iommu_notifier);
-
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
- struct gvt_dma *entry;
- unsigned long iov_pfn, end_iov_pfn;
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+ struct gvt_dma *entry;
+ u64 iov_pfn = iova >> PAGE_SHIFT;
+ u64 end_iov_pfn = iov_pfn + length / PAGE_SIZE;
- iov_pfn = unmap->iova >> PAGE_SHIFT;
- end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
+ mutex_lock(&vgpu->cache_lock);
+ for (; iov_pfn < end_iov_pfn; iov_pfn++) {
+ entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
+ if (!entry)
+ continue;
- mutex_lock(&vgpu->cache_lock);
- for (; iov_pfn < end_iov_pfn; iov_pfn++) {
- entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
- if (!entry)
- continue;
-
- gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
- entry->size);
- __gvt_cache_remove_entry(vgpu, entry);
- }
- mutex_unlock(&vgpu->cache_lock);
+ gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
+ entry->size);
+ __gvt_cache_remove_entry(vgpu, entry);
}
-
- return NOTIFY_OK;
+ mutex_unlock(&vgpu->cache_lock);
}
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
@@ -783,36 +755,20 @@ out:
static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- unsigned long events;
- int ret;
-
- vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
-
- events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, &events,
- &vgpu->iommu_notifier);
- if (ret != 0) {
- gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
- ret);
- goto out;
- }
- ret = -EEXIST;
if (vgpu->attached)
- goto undo_iommu;
+ return -EEXIST;
- ret = -ESRCH;
if (!vgpu->vfio_device.kvm ||
vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- goto undo_iommu;
+ return -ESRCH;
}
kvm_get_kvm(vgpu->vfio_device.kvm);
- ret = -EEXIST;
if (__kvmgt_vgpu_exist(vgpu))
- goto undo_iommu;
+ return -EEXIST;
vgpu->attached = true;
@@ -831,12 +787,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
atomic_set(&vgpu->released, 0);
return 0;
-
-undo_iommu:
- vfio_unregister_notifier(vfio_dev, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
-out:
- return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
@@ -853,8 +803,6 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
- struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- int ret;
if (!vgpu->attached)
return;
@@ -864,11 +812,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
intel_gvt_release_vgpu(vgpu);
- ret = vfio_unregister_notifier(&vgpu->vfio_device, VFIO_IOMMU_NOTIFY,
- &vgpu->iommu_notifier);
- drm_WARN(&i915->drm, ret,
- "vfio_unregister_notifier for iommu failed: %d\n", ret);
-
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
@@ -1610,6 +1553,7 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
.write = intel_vgpu_write,
.mmap = intel_vgpu_mmap,
.ioctl = intel_vgpu_ioctl,
+ .dma_unmap = intel_vgpu_dma_unmap,
};
static int intel_vgpu_probe(struct mdev_device *mdev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d25647be25d1..086bbe8945d6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -247,7 +247,7 @@ struct i915_gem_mm {
* List of objects which are pending destruction.
*/
struct llist_head free_list;
- struct delayed_work free_work;
+ struct work_struct free_work;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -1378,7 +1378,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
* armed the work again.
*/
while (atomic_read(&i915->mm.free_count)) {
- flush_delayed_work(&i915->mm.free_work);
+ flush_work(&i915->mm.free_work);
flush_delayed_work(&i915->bdev.wq);
rcu_barrier();
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ef3b04c7e153..260371716490 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -538,8 +538,6 @@ int i915_vma_bind(struct i915_vma *vma,
bind_flags);
}
- set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
-
atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -1310,6 +1308,19 @@ err_unpin:
return err;
}
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
+{
+ /*
+ * Before we release the pages that were bound by this vma, we
+ * must invalidate all the TLBs that may still have a reference
+ * back to our physical address. It only needs to be done once,
+ * so after updating the PTE to point away from the pages, record
+ * the most recent TLB invalidation seqno, and if we have not yet
+ * flushed the TLBs upon release, perform a full invalidation.
+ */
+ WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
+}
+
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
/* We allocate under vma_get_pages, so beware the shrinker */
@@ -1941,7 +1952,12 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
- unbind_fence = i915_vma_resource_unbind(vma_res);
+ if (async)
+ unbind_fence = i915_vma_resource_unbind(vma_res,
+ &vma->obj->mm.tlb);
+ else
+ unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
+
vma->resource = NULL;
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
@@ -1949,10 +1965,13 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
i915_vma_detach(vma);
- if (!async && unbind_fence) {
- dma_fence_wait(unbind_fence, false);
- dma_fence_put(unbind_fence);
- unbind_fence = NULL;
+ if (!async) {
+ if (unbind_fence) {
+ dma_fence_wait(unbind_fence, false);
+ dma_fence_put(unbind_fence);
+ unbind_fence = NULL;
+ }
+ vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 88ca0bd9c900..33a58f605d75 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -213,6 +213,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 27c55027387a..5a67995ea5fe 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -223,10 +223,13 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
* Return: A refcounted pointer to a dma-fence that signals when unbinding is
* complete.
*/
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res)
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb)
{
struct i915_address_space *vm = vma_res->vm;
+ vma_res->tlb = tlb;
+
/* Reference for the sw fence */
i915_vma_resource_get(vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
index 5d8427caa2ba..06923d1816e7 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.h
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -67,6 +67,7 @@ struct i915_page_sizes {
* taken when the unbind is scheduled.
* @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
* needs to be skipped for unbind.
+ * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
*
* The lifetime of a struct i915_vma_resource is from a binding request to
* the actual possible asynchronous unbind has completed.
@@ -119,6 +120,8 @@ struct i915_vma_resource {
bool immediate_unbind:1;
bool needs_wakeref:1;
bool skip_pte_rewrite:1;
+
+ u32 *tlb;
};
bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
@@ -131,7 +134,8 @@ struct i915_vma_resource *i915_vma_resource_alloc(void);
void i915_vma_resource_free(struct i915_vma_resource *vma_res);
-struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res);
+struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
+ u32 *tlb);
void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 9b84df34a6a1..8cf3352d8858 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -142,8 +142,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
drm_kms_helper_poll_init(drm);
- drm_bridge_connector_enable_hpd(kms->connector);
-
ret = drm_dev_register(drm, 0);
if (ret)
goto cleanup_crtc;
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
index 8989e215dfc9..011be7ff51e1 100644
--- a/drivers/gpu/drm/lima/lima_devfreq.c
+++ b/drivers/gpu/drm/lima/lima_devfreq.c
@@ -111,6 +111,12 @@ int lima_devfreq_init(struct lima_device *ldev)
struct dev_pm_opp *opp;
unsigned long cur_freq;
int ret;
+ const char *regulator_names[] = { "mali", NULL };
+ const char *clk_names[] = { "core", NULL };
+ struct dev_pm_opp_config config = {
+ .regulator_names = regulator_names,
+ .clk_names = clk_names,
+ };
if (!device_property_present(dev, "operating-points-v2"))
/* Optional, continue without devfreq */
@@ -118,11 +124,7 @@ int lima_devfreq_init(struct lima_device *ldev)
spin_lock_init(&ldevfreq->lock);
- ret = devm_pm_opp_set_clkname(dev, "core");
- if (ret)
- return ret;
-
- ret = devm_pm_opp_set_regulators(dev, (const char *[]){ "mali" }, 1);
+ ret = devm_pm_opp_set_config(dev, &config);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV)
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 1b70938cfd2c..bd4ca11d3ff5 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -115,8 +115,11 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
for_each_endpoint_of_node(dev->of_node, ep) {
/* If the endpoint node exists, consider it enabled */
remote = of_graph_get_remote_port(ep);
- if (remote)
+ if (remote) {
+ of_node_put(remote);
+ of_node_put(ep);
return true;
+ }
}
return false;
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 259f3e6bec90..bb7e109534de 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -469,17 +469,17 @@ void meson_viu_init(struct meson_drm *priv)
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
- VIU_OSD_BLEND_REORDER(1, 0) |
- VIU_OSD_BLEND_REORDER(2, 0) |
- VIU_OSD_BLEND_REORDER(3, 0) |
- VIU_OSD_BLEND_DIN_EN(1) |
- VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
- VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
- VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
- VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
- VIU_OSD_BLEND_HOLD_LINES(4),
- priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+ u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) |
+ (u32)VIU_OSD_BLEND_REORDER(1, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(2, 0) |
+ (u32)VIU_OSD_BLEND_REORDER(3, 0) |
+ (u32)VIU_OSD_BLEND_DIN_EN(1) |
+ (u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
+ (u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
+ (u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
+ (u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
+ (u32)VIU_OSD_BLEND_HOLD_LINES(4);
+ writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 6e39d959b9f0..0317055e3253 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -221,7 +221,7 @@ void msm_gem_shrinker_init(struct drm_device *dev)
priv->shrinker.count_objects = msm_gem_shrinker_count;
priv->shrinker.scan_objects = msm_gem_shrinker_scan;
priv->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&priv->shrinker));
+ WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 05076e530e7d..e29175e4b44c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -820,6 +820,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
+ /* TODO: figure out a better solution here
+ *
+ * wait on the fence here explicitly as going through
+ * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+ *
+ * Without this the operation can timeout and we'll fallback to a
+ * software copy, which might take several minutes to finish.
+ */
+ nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict, false,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 568182e68dd7..d8cf71fb0512 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2605,6 +2605,27 @@ nv172_chipset = {
};
static const struct nvkm_device_chip
+nv173_chipset = {
+ .name = "GA103",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv174_chipset = {
.name = "GA104",
.bar = { 0x00000001, tu102_bar_new },
@@ -3067,6 +3088,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
+ case 0x173: device->chip = &nv173_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 194af7f607a6..5110cd9b2425 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -101,8 +101,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return 0;
}
- ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names,
- pfdev->comp->num_supplies);
+ ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names);
if (ret) {
/* Continue if the optional regulator is missing */
if (ret != -ENODEV) {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 2d870cf73b07..2fa5afe21288 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -626,24 +626,29 @@ static int panfrost_remove(struct platform_device *pdev)
return 0;
}
-static const char * const default_supplies[] = { "mali" };
+/*
+ * The OPP core wants the supply names to be NULL terminated, but we need the
+ * correct num_supplies value for regulator core. Hence, we NULL terminate here
+ * and then initialize num_supplies with ARRAY_SIZE - 1.
+ */
+static const char * const default_supplies[] = { "mali", NULL };
static const struct panfrost_compatible default_data = {
- .num_supplies = ARRAY_SIZE(default_supplies),
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.num_pm_domains = 1, /* optional */
.pm_domain_names = NULL,
};
static const struct panfrost_compatible amlogic_data = {
- .num_supplies = ARRAY_SIZE(default_supplies),
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
-static const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
+static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
static const struct panfrost_compatible mediatek_mt8183_data = {
- .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
+ .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
.supply_names = mediatek_mt8183_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 77e7cb6d1ae3..bf0170782f25 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -103,7 +103,7 @@ void panfrost_gem_shrinker_init(struct drm_device *dev)
pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
pfdev->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&pfdev->shrinker));
+ WARN_ON(register_shrinker(&pfdev->shrinker, "drm-panfrost"));
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2b12389f841a..ee0165687239 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1605,6 +1605,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
if (r) {
/* delay GPU reset to resume */
radeon_fence_driver_force_completion(rdev, i);
+ } else {
+ /* finish executing delayed work */
+ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
}
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 61a034a01764..cb82622877d2 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1176,12 +1176,12 @@ static int hdmi_audio_hw_params(struct device *dev,
DRM_DEBUG_DRIVER("\n");
if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
- daifmt->frame_clk_inv || daifmt->bit_clk_master ||
- daifmt->frame_clk_master) {
+ daifmt->frame_clk_inv || daifmt->bit_clk_provider ||
+ daifmt->frame_clk_provider) {
dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
daifmt->bit_clk_inv, daifmt->frame_clk_inv,
- daifmt->bit_clk_master,
- daifmt->frame_clk_master);
+ daifmt->bit_clk_provider,
+ daifmt->frame_clk_provider);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index b4dfa166eccd..34234a144e87 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -531,7 +531,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
struct mipi_dsi_device *device = dsi->device;
- unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+ int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
u32 basic_ctl = 0;
size_t bytes;
@@ -555,7 +555,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* (4 bytes). Its minimal size is therefore 10 bytes
*/
#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ hsa = max(HSA_PACKET_OVERHEAD,
(mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
/*
@@ -564,7 +564,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* therefore 6 bytes
*/
#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ hbp = max(HBP_PACKET_OVERHEAD,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
@@ -574,7 +574,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* 16 bytes
*/
#define HFP_PACKET_OVERHEAD 16
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ hfp = max(HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
/*
@@ -583,7 +583,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
* bytes). Its minimal size is therefore 10 bytes.
*/
#define HBLK_PACKET_OVERHEAD 10
- hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+ hblk = max(HBLK_PACKET_OVERHEAD,
(mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
HBLK_PACKET_OVERHEAD);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e210df65c30..97184c333526 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -912,7 +912,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* We might need to add a TTM.
*/
- if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 1bba0a0ed3f9..21b61631f73a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -722,7 +722,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
mm_shrinker.count_objects = ttm_pool_shrinker_count;
mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
mm_shrinker.seeks = 1;
- return register_shrinker(&mm_shrinker);
+ return register_shrinker(&mm_shrinker, "drm-ttm_pool");
}
/**
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 061be9a6619d..b0f3117102ca 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -8,6 +8,7 @@ config DRM_VC4
depends on DRM
depends on SND && SND_SOC
depends on COMMON_CLK
+ depends on PM
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 6ab83296b0e4..1e5f68704d7d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -2003,6 +2003,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
.name = "vc4-hdmi-cpu-dai-component",
+ .legacy_dai_naming = 1,
};
static int vc4_hdmi_audio_cpu_dai_probe(struct snd_soc_dai *dai)
@@ -2854,7 +2855,7 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-static int __maybe_unused vc4_hdmi_runtime_suspend(struct device *dev)
+static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
@@ -2971,17 +2972,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_4kp60 = true;
}
+ pm_runtime_enable(dev);
+
/*
- * We need to have the device powered up at this point to call
- * our reset hook and for the CEC init.
+ * We need to have the device powered up at this point to call
+ * our reset hook and for the CEC init.
*/
- ret = vc4_hdmi_runtime_resume(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret)
- goto err_put_ddc;
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ goto err_disable_runtime_pm;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
@@ -3027,6 +3026,7 @@ err_destroy_conn:
err_destroy_encoder:
drm_encoder_cleanup(encoder);
pm_runtime_put_sync(dev);
+err_disable_runtime_pm:
pm_runtime_disable(dev);
err_put_ddc:
put_device(&vc4_hdmi->ddc->dev);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index aff37d6f587c..9c1d31f63f85 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2089,6 +2089,7 @@ const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
return NULL;
}
+EXPORT_SYMBOL_GPL(hid_match_id);
static const struct hid_device_id hid_hiddev_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 68f9e9d207f4..71a9c258a20b 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -41,6 +41,9 @@ module_param(disable_tap_to_click, bool, 0644);
MODULE_PARM_DESC(disable_tap_to_click,
"Disable Tap-To-Click mode reporting for touchpads (only on the K400 currently).");
+/* Define a non-zero software ID to identify our own requests */
+#define LINUX_KERNEL_SW_ID 0x01
+
#define REPORT_ID_HIDPP_SHORT 0x10
#define REPORT_ID_HIDPP_LONG 0x11
#define REPORT_ID_HIDPP_VERY_LONG 0x12
@@ -71,21 +74,18 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
#define HIDPP_QUIRK_UNIFYING BIT(25)
-#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
-#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
-#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
-#define HIDPP_QUIRK_HIDPP_WHEELS BIT(29)
-#define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(30)
-#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(31)
+#define HIDPP_QUIRK_HIDPP_WHEELS BIT(26)
+#define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(27)
+#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(28)
/* These are just aliases for now */
#define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
#define HIDPP_QUIRK_KBD_ZOOM_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
/* Convenience constant to check for any high-res support. */
-#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
- HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
- HIDPP_QUIRK_HI_RES_SCROLL_X2121)
+#define HIDPP_CAPABILITY_HI_RES_SCROLL (HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL | \
+ HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL | \
+ HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL)
#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
@@ -96,6 +96,9 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4)
#define HIDPP_CAPABILITY_BATTERY_PERCENTAGE BIT(5)
#define HIDPP_CAPABILITY_UNIFIED_BATTERY BIT(6)
+#define HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL BIT(7)
+#define HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL BIT(8)
+#define HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL BIT(9)
#define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
@@ -343,7 +346,7 @@ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
else
message->report_id = REPORT_ID_HIDPP_LONG;
message->fap.feature_index = feat_index;
- message->fap.funcindex_clientid = funcindex_clientid;
+ message->fap.funcindex_clientid = funcindex_clientid | LINUX_KERNEL_SW_ID;
memcpy(&message->fap.params, params, param_count);
ret = hidpp_send_message_sync(hidpp, message, response);
@@ -856,8 +859,8 @@ static int hidpp_unifying_init(struct hidpp_device *hidpp)
#define HIDPP_PAGE_ROOT 0x0000
#define HIDPP_PAGE_ROOT_IDX 0x00
-#define CMD_ROOT_GET_FEATURE 0x01
-#define CMD_ROOT_GET_PROTOCOL_VERSION 0x11
+#define CMD_ROOT_GET_FEATURE 0x00
+#define CMD_ROOT_GET_PROTOCOL_VERSION 0x10
static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
u8 *feature_index, u8 *feature_type)
@@ -934,9 +937,9 @@ print_version:
#define HIDPP_PAGE_GET_DEVICE_NAME_TYPE 0x0005
-#define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x01
-#define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x11
-#define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x21
+#define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x00
+#define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x10
+#define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x20
static int hidpp_devicenametype_get_count(struct hidpp_device *hidpp,
u8 feature_index, u8 *nameLength)
@@ -1966,8 +1969,8 @@ static int hidpp_touchpad_fw_items_set(struct hidpp_device *hidpp,
#define HIDPP_PAGE_TOUCHPAD_RAW_XY 0x6100
-#define CMD_TOUCHPAD_GET_RAW_INFO 0x01
-#define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x21
+#define CMD_TOUCHPAD_GET_RAW_INFO 0x00
+#define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x20
#define EVENT_TOUCHPAD_RAW_XY 0x00
@@ -3415,14 +3418,14 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
int ret;
u8 multiplier = 1;
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
+ if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL) {
ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
if (ret == 0)
ret = hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
- } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
+ } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL) {
ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
&multiplier);
- } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */ {
+ } else /* if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL) */ {
ret = hidpp10_enable_scrolling_acceleration(hidpp);
multiplier = 8;
}
@@ -3437,6 +3440,49 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
return 0;
}
+static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp)
+{
+ int ret;
+ unsigned long capabilities;
+
+ capabilities = hidpp->capabilities;
+
+ if (hidpp->protocol_major >= 2) {
+ u8 feature_index;
+ u8 feature_type;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (!ret) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL;
+ hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scroll wheel\n");
+ return 0;
+ }
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
+ &feature_index, &feature_type);
+ if (!ret) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL;
+ hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scrolling\n");
+ }
+ } else {
+ struct hidpp_report response;
+
+ ret = hidpp_send_rap_command_sync(hidpp,
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_GET_REGISTER,
+ HIDPP_ENABLE_FAST_SCROLL,
+ NULL, 0, &response);
+ if (!ret) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL;
+ hid_dbg(hidpp->hid_dev, "Detected HID++ 1.0 fast scroll\n");
+ }
+ }
+
+ if (hidpp->capabilities == capabilities)
+ hid_dbg(hidpp->hid_dev, "Did not detect HID++ hi-res scrolling hardware support\n");
+ return 0;
+}
+
/* -------------------------------------------------------------------------- */
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
@@ -3691,8 +3737,9 @@ static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
* cases we must return early (falling back to default behaviour) to
* avoid a crash in hidpp_scroll_counter_handle_scroll.
*/
- if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
- || hidpp->input == NULL || counter->wheel_multiplier == 0)
+ if (!(hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL)
+ || value == 0 || hidpp->input == NULL
+ || counter->wheel_multiplier == 0)
return 0;
hidpp_scroll_counter_handle_scroll(hidpp->input, counter, value);
@@ -3924,6 +3971,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
}
hidpp_initialize_battery(hidpp);
+ hidpp_initialize_hires_scroll(hidpp);
/* forward current battery state */
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
@@ -3943,7 +3991,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
+ if (hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL)
hi_res_scroll_enable(hidpp);
if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
@@ -3959,8 +4007,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
hidpp_populate_input(hidpp, input);
ret = input_register_device(input);
- if (ret)
+ if (ret) {
input_free_device(input);
+ return;
+ }
hidpp->delayed_input = input;
}
@@ -4219,6 +4269,21 @@ static void hidpp_remove(struct hid_device *hdev)
mutex_destroy(&hidpp->send_mutex);
}
+static const struct hid_device_id unhandled_hidpp_devices[] = {
+ /* Logitech Harmony Adapter for PS3, handled in hid-sony */
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
+ /* Handled in hid-generic */
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD) },
+ {}
+};
+
+static bool hidpp_match(struct hid_device *hdev,
+ bool ignore_special_driver)
+{
+ /* Refuse to handle devices handled by other HID drivers */
+ return !hid_match_id(hdev, unhandled_hidpp_devices);
+}
+
#define LDJ_DEVICE(product) \
HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
USB_VENDOR_ID_LOGITECH, (product))
@@ -4239,42 +4304,9 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_T651),
.driver_data = HIDPP_QUIRK_CLASS_WTP },
- { /* Mouse Logitech Anywhere MX */
- LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
- { /* Mouse Logitech Cube */
- LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
- { /* Mouse Logitech M335 */
- LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech M515 */
- LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
{ /* Mouse logitech M560 */
LDJ_DEVICE(0x402d),
- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560
- | HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
- { /* Mouse Logitech M705 (firmware RQM17) */
- LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
- { /* Mouse Logitech M705 (firmware RQM67) */
- LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech M720 */
- LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Anywhere 2 */
- LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4072), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Anywhere 2S */
- LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master */
- LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master 2S */
- LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master 3 */
- LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech Performance MX */
- LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
{ /* Keyboard logitech K400 */
LDJ_DEVICE(0x4024),
.driver_data = HIDPP_QUIRK_CLASS_K400 },
@@ -4335,18 +4367,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
- { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
- { /* MX Master mouse over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
- .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* MX Ergo trackball over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
- .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* MX Master 3 mouse over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023),
- .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+
+ { /* And try to enable HID++ for all the Logitech Bluetooth devices */
+ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_ANY, USB_VENDOR_ID_LOGITECH, HID_ANY_ID) },
{}
};
@@ -4360,6 +4383,7 @@ static const struct hid_usage_id hidpp_usages[] = {
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
+ .match = hidpp_match,
.report_fixup = hidpp_report_fixup,
.probe = hidpp_probe,
.remove = hidpp_remove,
diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
index e46330b2e561..87637f813de2 100644
--- a/drivers/hid/surface-hid/surface_hid_core.c
+++ b/drivers/hid/surface-hid/surface_hid_core.c
@@ -19,12 +19,30 @@
#include "surface_hid_core.h"
+/* -- Utility functions. ---------------------------------------------------- */
+
+static bool surface_hid_is_hot_removed(struct surface_hid_device *shid)
+{
+ /*
+ * Non-ssam client devices, i.e. platform client devices, cannot be
+ * hot-removed.
+ */
+ if (!is_ssam_device(shid->dev))
+ return false;
+
+ return ssam_device_is_hot_removed(to_ssam_device(shid->dev));
+}
+
+
/* -- Device descriptor access. --------------------------------------------- */
static int surface_hid_load_hid_descriptor(struct surface_hid_device *shid)
{
int status;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_HID,
(u8 *)&shid->hid_desc, sizeof(shid->hid_desc));
if (status)
@@ -61,6 +79,9 @@ static int surface_hid_load_device_attributes(struct surface_hid_device *shid)
{
int status;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_ATTRS,
(u8 *)&shid->attrs, sizeof(shid->attrs));
if (status)
@@ -88,9 +109,18 @@ static int surface_hid_start(struct hid_device *hid)
static void surface_hid_stop(struct hid_device *hid)
{
struct surface_hid_device *shid = hid->driver_data;
+ bool hot_removed;
+
+ /*
+ * Communication may fail for devices that have been hot-removed. This
+ * also includes unregistration of HID events, so we need to check this
+ * here. Only if the device has not been marked as hot-removed, we can
+ * safely disable events.
+ */
+ hot_removed = surface_hid_is_hot_removed(shid);
/* Note: This call will log errors for us, so ignore them here. */
- ssam_notifier_unregister(shid->ctrl, &shid->notif);
+ __ssam_notifier_unregister(shid->ctrl, &shid->notif, !hot_removed);
}
static int surface_hid_open(struct hid_device *hid)
@@ -109,6 +139,9 @@ static int surface_hid_parse(struct hid_device *hid)
u8 *buf;
int status;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -126,6 +159,9 @@ static int surface_hid_raw_request(struct hid_device *hid, unsigned char reportn
{
struct surface_hid_device *shid = hid->driver_data;
+ if (surface_hid_is_hot_removed(shid))
+ return -ENODEV;
+
if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT)
return shid->ops.output_report(shid, reportnum, buf, len);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 6218bbf6863a..eca7afd366d6 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -171,6 +171,14 @@ int vmbus_connect(void)
goto cleanup;
}
+ vmbus_connection.rescind_work_queue =
+ create_workqueue("hv_vmbus_rescind");
+ if (!vmbus_connection.rescind_work_queue) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ vmbus_connection.ignore_any_offer_msg = false;
+
vmbus_connection.handle_primary_chan_wq =
create_workqueue("hv_pri_chan");
if (!vmbus_connection.handle_primary_chan_wq) {
@@ -357,6 +365,9 @@ void vmbus_disconnect(void)
if (vmbus_connection.handle_primary_chan_wq)
destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
+ if (vmbus_connection.rescind_work_queue)
+ destroy_workqueue(vmbus_connection.rescind_work_queue);
+
if (vmbus_connection.work_queue)
destroy_workqueue(vmbus_connection.work_queue);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 91e8a72eee14..fdf6decacf06 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mman.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -248,7 +249,7 @@ struct dm_capabilities_resp_msg {
* num_committed: Committed memory in pages.
* page_file_size: The accumulated size of all page files
* in the system in pages.
- * zero_free: The nunber of zero and free pages.
+ * zero_free: The number of zero and free pages.
* page_file_writes: The writes to the page file in pages.
* io_diff: An indicator of file cache efficiency or page file activity,
* calculated as File Cache Page Fault Count - Page Read Count.
@@ -567,6 +568,11 @@ struct hv_dynmem_device {
__u32 version;
struct page_reporting_dev_info pr_dev_info;
+
+ /*
+ * Maximum number of pages that can be hot_add-ed
+ */
+ __u64 max_dynamic_page_count;
};
static struct hv_dynmem_device dm_device;
@@ -1078,6 +1084,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
pr_info("Max. dynamic memory size: %llu MB\n",
(*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
+ dm->max_dynamic_page_count = *max_page_count;
}
break;
@@ -1117,6 +1124,19 @@ static unsigned long compute_balloon_floor(void)
}
/*
+ * Compute total committed memory pages
+ */
+
+static unsigned long get_pages_committed(struct hv_dynmem_device *dm)
+{
+ return vm_memory_committed() +
+ dm->num_pages_ballooned +
+ (dm->num_pages_added > dm->num_pages_onlined ?
+ dm->num_pages_added - dm->num_pages_onlined : 0) +
+ compute_balloon_floor();
+}
+
+/*
* Post our status as it relates memory pressure to the
* host. Host expects the guests to post this status
* periodically at 1 second intervals.
@@ -1157,11 +1177,7 @@ static void post_status(struct hv_dynmem_device *dm)
* asking us to balloon them out.
*/
num_pages_avail = si_mem_available();
- num_pages_committed = vm_memory_committed() +
- dm->num_pages_ballooned +
- (dm->num_pages_added > dm->num_pages_onlined ?
- dm->num_pages_added - dm->num_pages_onlined : 0) +
- compute_balloon_floor();
+ num_pages_committed = get_pages_committed(dm);
trace_balloon_status(num_pages_avail, num_pages_committed,
vm_memory_committed(), dm->num_pages_ballooned,
@@ -1807,6 +1823,109 @@ out:
return ret;
}
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * hv_balloon_debug_show - shows statistics of balloon operations.
+ * @f: pointer to the &struct seq_file.
+ * @offset: ignored.
+ *
+ * Provides the statistics that can be accessed in hv-balloon in the debugfs.
+ *
+ * Return: zero on success or an error code.
+ */
+static int hv_balloon_debug_show(struct seq_file *f, void *offset)
+{
+ struct hv_dynmem_device *dm = f->private;
+ char *sname;
+
+ seq_printf(f, "%-22s: %u.%u\n", "host_version",
+ DYNMEM_MAJOR_VERSION(dm->version),
+ DYNMEM_MINOR_VERSION(dm->version));
+
+ seq_printf(f, "%-22s:", "capabilities");
+ if (ballooning_enabled())
+ seq_puts(f, " enabled");
+
+ if (hot_add_enabled())
+ seq_puts(f, " hot_add");
+
+ seq_puts(f, "\n");
+
+ seq_printf(f, "%-22s: %u", "state", dm->state);
+ switch (dm->state) {
+ case DM_INITIALIZING:
+ sname = "Initializing";
+ break;
+ case DM_INITIALIZED:
+ sname = "Initialized";
+ break;
+ case DM_BALLOON_UP:
+ sname = "Balloon Up";
+ break;
+ case DM_BALLOON_DOWN:
+ sname = "Balloon Down";
+ break;
+ case DM_HOT_ADD:
+ sname = "Hot Add";
+ break;
+ case DM_INIT_ERROR:
+ sname = "Error";
+ break;
+ default:
+ sname = "Unknown";
+ }
+ seq_printf(f, " (%s)\n", sname);
+
+ /* HV Page Size */
+ seq_printf(f, "%-22s: %ld\n", "page_size", HV_HYP_PAGE_SIZE);
+
+ /* Pages added with hot_add */
+ seq_printf(f, "%-22s: %u\n", "pages_added", dm->num_pages_added);
+
+ /* pages that are "onlined"/used from pages_added */
+ seq_printf(f, "%-22s: %u\n", "pages_onlined", dm->num_pages_onlined);
+
+ /* pages we have given back to host */
+ seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
+
+ seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
+ get_pages_committed(dm));
+
+ seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
+ dm->max_dynamic_page_count);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
+
+static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+{
+ debugfs_create_file("hv-balloon", 0444, NULL, b,
+ &hv_balloon_debug_fops);
+}
+
+static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+{
+ debugfs_remove(debugfs_lookup("hv-balloon", NULL));
+}
+
+#else
+
+static inline void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
+{
+}
+
+static inline void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
static int balloon_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
@@ -1854,6 +1973,8 @@ static int balloon_probe(struct hv_device *dev,
goto probe_error;
}
+ hv_balloon_debugfs_init(&dm_device);
+
return 0;
probe_error:
@@ -1879,6 +2000,8 @@ static int balloon_remove(struct hv_device *dev)
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
+ hv_balloon_debugfs_exit(dm);
+
cancel_work_sync(&dm->balloon_wrk.wrk);
cancel_work_sync(&dm->ha_wrk.wrk);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 4f5b824b16cf..dc673edf053c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -261,6 +261,13 @@ struct vmbus_connection {
struct workqueue_struct *work_queue;
struct workqueue_struct *handle_primary_chan_wq;
struct workqueue_struct *handle_sub_chan_wq;
+ struct workqueue_struct *rescind_work_queue;
+
+ /*
+ * On suspension of the vmbus, the accumulated offer messages
+ * must be dropped.
+ */
+ bool ignore_any_offer_msg;
/*
* The number of sub-channels and hv_sock channels that should be
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 547ae334e5cd..23c680d1a0f5 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1160,7 +1160,9 @@ void vmbus_on_msg_dpc(unsigned long data)
* work queue: the RESCIND handler can not start to
* run before the OFFER handler finishes.
*/
- schedule_work(&ctx->work);
+ if (vmbus_connection.ignore_any_offer_msg)
+ break;
+ queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
break;
case CHANNELMSG_OFFERCHANNEL:
@@ -1186,6 +1188,8 @@ void vmbus_on_msg_dpc(unsigned long data)
* to the CPUs which will execute the offer & rescind
* works by the time these works will start execution.
*/
+ if (vmbus_connection.ignore_any_offer_msg)
+ break;
atomic_inc(&vmbus_connection.offer_in_progress);
fallthrough;
@@ -2446,15 +2450,20 @@ acpi_walk_err:
#ifdef CONFIG_PM_SLEEP
static int vmbus_bus_suspend(struct device *dev)
{
+ struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
+ hv_context.cpu_context, VMBUS_CONNECT_CPU);
struct vmbus_channel *channel, *sc;
- while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
- /*
- * We wait here until the completion of any channel
- * offers that are currently in progress.
- */
- usleep_range(1000, 2000);
- }
+ tasklet_disable(&hv_cpu->msg_dpc);
+ vmbus_connection.ignore_any_offer_msg = true;
+ /* The tasklet_enable() takes care of providing a memory barrier */
+ tasklet_enable(&hv_cpu->msg_dpc);
+
+ /* Drain all the workqueues as we are in suspend */
+ drain_workqueue(vmbus_connection.rescind_work_queue);
+ drain_workqueue(vmbus_connection.work_queue);
+ drain_workqueue(vmbus_connection.handle_primary_chan_wq);
+ drain_workqueue(vmbus_connection.handle_sub_chan_wq);
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
@@ -2531,6 +2540,8 @@ static int vmbus_bus_resume(struct device *dev)
size_t msgsize;
int ret;
+ vmbus_connection.ignore_any_offer_msg = false;
+
/*
* We only use the 'vmbus_proto_version', which was in use before
* hibernation, to re-negotiate with the host.
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 03d07da8c2dc..221de01a327a 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -2321,7 +2321,7 @@ static const char *lm90_detect_nuvoton(struct i2c_client *client, int chip_id,
const char *name = NULL;
if (config2 < 0)
- return ERR_PTR(-ENODEV);
+ return NULL;
if (address == 0x4c && !(config1 & 0x2a) && !(config2 & 0xf8)) {
if (chip_id == 0x01 && convrate <= 0x09) {
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 446964cbae4c..da9ec6983e13 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -1480,7 +1480,7 @@ static int nct6775_update_pwm_limits(struct device *dev)
return 0;
}
-static struct nct6775_data *nct6775_update_device(struct device *dev)
+struct nct6775_data *nct6775_update_device(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
int i, j, err = 0;
@@ -1615,6 +1615,7 @@ out:
mutex_unlock(&data->update_lock);
return err ? ERR_PTR(err) : data;
}
+EXPORT_SYMBOL_GPL(nct6775_update_device);
/*
* Sysfs callback functions
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index ab30437221ce..41c97cfacfb8 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -359,7 +359,7 @@ static int __maybe_unused nct6775_suspend(struct device *dev)
{
int err;
u16 tmp;
- struct nct6775_data *data = dev_get_drvdata(dev);
+ struct nct6775_data *data = nct6775_update_device(dev);
if (IS_ERR(data))
return PTR_ERR(data);
diff --git a/drivers/hwmon/nct6775.h b/drivers/hwmon/nct6775.h
index 93f708148e65..be41848c3cd2 100644
--- a/drivers/hwmon/nct6775.h
+++ b/drivers/hwmon/nct6775.h
@@ -196,6 +196,8 @@ static inline int nct6775_write_value(struct nct6775_data *data, u16 reg, u16 va
return regmap_write(data->regmap, reg, value);
}
+struct nct6775_data *nct6775_update_device(struct device *dev);
+
bool nct6775_reg_is_word_sized(struct nct6775_data *data, u16 reg);
int nct6775_probe(struct device *dev, struct nct6775_data *data,
const struct regmap_config *regmapcfg);
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 33eeff94fc2a..1fb3a2550e29 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -94,11 +94,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
* the module SYSSTATUS register
*/
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
goto runtime_err;
- }
/* Determine number of locks */
i = readl(io_base + SYSSTATUS_OFFSET);
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 364710966665..80ea45b3a815 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -19,6 +19,11 @@
#define QCOM_MUTEX_APPS_PROC_ID 1
#define QCOM_MUTEX_NUM_LOCKS 32
+struct qcom_hwspinlock_of_data {
+ u32 offset;
+ u32 stride;
+};
+
static int qcom_hwspinlock_trylock(struct hwspinlock *lock)
{
struct regmap_field *field = lock->priv;
@@ -63,9 +68,20 @@ static const struct hwspinlock_ops qcom_hwspinlock_ops = {
.unlock = qcom_hwspinlock_unlock,
};
+static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
+ .offset = 0x4,
+ .stride = 0x4,
+};
+
+/* All modern platform has offset 0 and stride of 4k */
+static const struct qcom_hwspinlock_of_data of_tcsr_mutex = {
+ .offset = 0,
+ .stride = 0x1000,
+};
+
static const struct of_device_id qcom_hwspinlock_of_match[] = {
- { .compatible = "qcom,sfpb-mutex" },
- { .compatible = "qcom,tcsr-mutex" },
+ { .compatible = "qcom,sfpb-mutex", .data = &of_sfpb_mutex },
+ { .compatible = "qcom,tcsr-mutex", .data = &of_tcsr_mutex },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
@@ -112,12 +128,14 @@ static const struct regmap_config tcsr_mutex_config = {
static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev,
u32 *offset, u32 *stride)
{
+ const struct qcom_hwspinlock_of_data *data;
struct device *dev = &pdev->dev;
void __iomem *base;
- /* All modern platform has offset 0 and stride of 4k */
- *offset = 0;
- *stride = 0x1000;
+ data = of_device_get_match_data(dev);
+
+ *offset = data->offset;
+ *stride = data->stride;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index cf249ecad5a5..d39660a3e50c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -98,7 +98,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
- __iormb(res); /* Imitate the !relaxed I/O helpers */
+ __io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@@ -106,7 +106,7 @@ u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
- __iowmb(); /* Imitate the !relaxed I/O helpers */
+ __io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
@@ -130,7 +130,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
}
if (!_relaxed)
- __iormb(res); /* Imitate the !relaxed I/O helpers */
+ __io_ar(res); /* Imitate the !relaxed I/O helpers */
return res;
}
@@ -138,7 +138,7 @@ static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
if (!_relaxed)
- __iowmb(); /* Imitate the !relaxed I/O helpers */
+ __io_bw(); /* Imitate the !relaxed I/O helpers */
if (!_64bit)
val &= GENMASK(31, 0);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index a7bfea31f7d8..4b21bb79f168 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -547,14 +547,14 @@
#define etm4x_read32(csa, offset) \
({ \
u32 __val = etm4x_relaxed_read32((csa), (offset)); \
- __iormb(__val); \
+ __io_ar(__val); \
__val; \
})
#define etm4x_read64(csa, offset) \
({ \
u64 __val = etm4x_relaxed_read64((csa), (offset)); \
- __iormb(__val); \
+ __io_ar(__val); \
__val; \
})
@@ -578,13 +578,13 @@
#define etm4x_write32(csa, val, offset) \
do { \
- __iowmb(); \
+ __io_bw(); \
etm4x_relaxed_write32((csa), (val), (offset)); \
} while (0)
#define etm4x_write64(csa, val, offset) \
do { \
- __iowmb(); \
+ __io_bw(); \
etm4x_relaxed_write64((csa), (val), (offset)); \
} while (0)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e95d6bc34070..7284206b278b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -108,6 +108,7 @@ config I2C_HIX5HD2
config I2C_I801
tristate "Intel 82801 (ICH/PCH)"
depends on PCI
+ select P2SB if X86
select CHECK_SIGNATURE if X86 && DMI
select I2C_SMBUS
help
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 354cf7e45c4a..50e7f3f670b6 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -447,7 +447,7 @@ static int altr_i2c_probe(struct platform_device *pdev)
mutex_unlock(&idev->isr_mutex);
i2c_set_adapdata(&idev->adapter, idev);
- strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
+ strscpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
idev->adapter.owner = THIS_MODULE;
idev->adapter.algo = &altr_i2c_algo;
idev->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 771e53d3d197..185dedfebbac 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -1022,7 +1022,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
bus->adap.algo = &aspeed_i2c_algo;
bus->adap.dev.parent = &pdev->dev;
bus->adap.dev.of_node = pdev->dev.of_node;
- strlcpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
+ strscpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
i2c_set_adapdata(&bus->adap, bus);
bus->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 22aed922552b..99bd24d0e6a5 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -321,7 +321,7 @@ i2c_au1550_probe(struct platform_device *pdev)
priv->adap.algo = &au1550_algo;
priv->adap.algo_data = priv;
priv->adap.dev.parent = &pdev->dev;
- strlcpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name));
/* Now, set up the PSC for SMBus PIO mode. */
i2c_au1550_setup(priv);
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 5294b73beca8..bdf3b50de8ad 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -783,7 +783,7 @@ static int axxia_i2c_probe(struct platform_device *pdev)
}
i2c_set_adapdata(&idev->adapter, idev);
- strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
+ strscpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
idev->adapter.owner = THIS_MODULE;
idev->adapter.algo = &axxia_i2c_algo;
idev->adapter.bus_recovery_info = &axxia_i2c_recovery_info;
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 16bf41f1f086..f3e369f0fd40 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -839,7 +839,7 @@ static int bcm_kona_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- strlcpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "Broadcom I2C adapter", sizeof(adap->name));
adap->algo = &bcm_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 2ae187e2b642..69383be47905 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -674,7 +674,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
adap->algo = &brcmstb_i2c_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index f8639a4457d2..d97c61eec95c 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -245,7 +245,7 @@ static int cbus_i2c_probe(struct platform_device *pdev)
adapter->nr = pdev->id;
adapter->timeout = HZ;
adapter->algo = &cbus_i2c_algo;
- strlcpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
+ strscpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
spin_lock_init(&chost->lock);
chost->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index de15f09c9b47..190abdc46dd3 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -404,7 +404,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
adap->adapter.class = I2C_CLASS_HWMON;
adap->adapter.algo = &cht_wc_i2c_adap_algo;
adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
- strlcpy(adap->adapter.name, "PMIC I2C Adapter",
+ strscpy(adap->adapter.name, "PMIC I2C Adapter",
sizeof(adap->adapter.name));
adap->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 892213d51f43..4e787dc709f9 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -267,7 +267,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
bus->dev = dev;
bus->adap.owner = THIS_MODULE;
- strlcpy(bus->adap.name, "cros-ec-i2c-tunnel", sizeof(bus->adap.name));
+ strscpy(bus->adap.name, "cros-ec-i2c-tunnel", sizeof(bus->adap.name));
bus->adap.algo = &ec_i2c_algorithm;
bus->adap.algo_data = bus;
bus->adap.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 9e09db31a937..471c47db546b 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -845,7 +845,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
adap->algo = &i2c_davinci_algo;
adap->dev.parent = &pdev->dev;
adap->timeout = DAVINCI_I2C_TIMEOUT;
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 60c838c7c454..50925d97fa42 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -322,7 +322,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- strlcpy(i2c->adap.name, "Conexant Digicolor I2C adapter",
+ strscpy(i2c->adap.name, "Conexant Digicolor I2C adapter",
sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &dc_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 321b2770feab..4914bfbee2a9 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -773,7 +773,7 @@ static int pch_i2c_probe(struct pci_dev *pdev,
pch_adap->owner = THIS_MODULE;
pch_adap->class = I2C_CLASS_HWMON;
- strlcpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
+ strscpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
pch_adap->algo = &pch_algorithm;
pch_adap->algo_data = &adap_info->pch_data[i];
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index bdff0e6345d9..f2e537b137b2 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -371,7 +371,7 @@ static int em_i2c_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- strlcpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
priv->sclk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(priv->sclk))
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b812d1090c0f..4a6260d04db2 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -802,7 +802,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-frequency", &i2c->op_clock))
i2c->op_clock = I2C_MAX_STANDARD_MODE_FREQ;
- strlcpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &exynos5_i2c_algorithm;
i2c->adap.retries = 3;
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 7a048abbf92b..b1985c1667e1 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -436,7 +436,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
if (np)
- strlcpy(adap->name, dev_name(dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(dev), sizeof(adap->name));
else
snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index a2add128d084..4374a8677271 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -402,7 +402,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON;
- strlcpy(adap->name, "HL FPGA I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "HL FPGA I2C adapter", sizeof(adap->name));
adap->algo = &highlander_i2c_algo;
adap->dev.parent = &pdev->dev;
adap->nr = pdev->id;
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 61ae58f57047..0e34cbaca22d 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -423,7 +423,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
}
clk_prepare_enable(priv->clk);
- strlcpy(priv->adap.name, "hix5hd2-i2c", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "hix5hd2-i2c", sizeof(priv->adap.name));
priv->dev = &pdev->dev;
priv->adap.owner = THIS_MODULE;
priv->adap.algo = &hix5hd2_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 9e5b87e107ba..a176296f4fff 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -112,6 +112,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/platform_data/itco_wdt.h>
+#include <linux/platform_data/x86/p2sb.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
@@ -141,7 +142,6 @@
#define TCOBASE 0x050
#define TCOCTL 0x054
-#define SBREG_BAR 0x10
#define SBREG_SMBCTRL 0xc6000c
#define SBREG_SMBCTRL_DNV 0xcf000c
@@ -1116,7 +1116,7 @@ static void dmi_check_onboard_device(u8 type, const char *name,
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = dmi_devices[i].i2c_addr;
- strlcpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE);
+ strscpy(info.type, dmi_devices[i].i2c_type, I2C_NAME_SIZE);
i2c_new_client_device(adap, &info);
break;
}
@@ -1267,7 +1267,7 @@ static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = dell_lis3lv02d_devices[i].i2c_addr;
- strlcpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
+ strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
i2c_new_client_device(&priv->adapter, &info);
}
@@ -1485,45 +1485,24 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
.version = 4,
};
struct resource *res;
- unsigned int devfn;
- u64 base64_addr;
- u32 base_addr;
- u8 hidden;
+ int ret;
/*
* We must access the NO_REBOOT bit over the Primary to Sideband
- * bridge (P2SB). The BIOS prevents the P2SB device from being
- * enumerated by the PCI subsystem, so we need to unhide/hide it
- * to lookup the P2SB BAR.
+ * (P2SB) bridge.
*/
- pci_lock_rescan_remove();
-
- devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 1);
-
- /* Unhide the P2SB device, if it is hidden */
- pci_bus_read_config_byte(pci_dev->bus, devfn, 0xe1, &hidden);
- if (hidden)
- pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x0);
-
- pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR, &base_addr);
- base64_addr = base_addr & 0xfffffff0;
-
- pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR + 0x4, &base_addr);
- base64_addr |= (u64)base_addr << 32;
-
- /* Hide the P2SB device, if it was hidden before */
- if (hidden)
- pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden);
- pci_unlock_rescan_remove();
res = &tco_res[1];
+ ret = p2sb_bar(pci_dev->bus, 0, res);
+ if (ret)
+ return ERR_PTR(ret);
+
if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+ res->start += SBREG_SMBCTRL_DNV;
else
- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+ res->start += SBREG_SMBCTRL;
res->end = res->start + 3;
- res->flags = IORESOURCE_MEM;
return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
tco_res, 2, &pldata, sizeof(pldata));
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 9f71daf6db64..eeb80e34f9ad 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -738,7 +738,7 @@ static int iic_probe(struct platform_device *ofdev)
adap = &dev->adap;
adap->dev.parent = &ofdev->dev;
adap->dev.of_node = of_node_get(np);
- strlcpy(adap->name, "IBM IIC", sizeof(adap->name));
+ strscpy(adap->name, "IBM IIC", sizeof(adap->name));
i2c_set_adapdata(adap, dev);
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->algo = &iic_algo;
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
index 5dae7cab7260..febcb6f01d4d 100644
--- a/drivers/i2c/busses/i2c-icy.c
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -141,7 +141,7 @@ static int icy_probe(struct zorro_dev *z,
i2c->adapter.owner = THIS_MODULE;
/* i2c->adapter.algo assigned by i2c_pcf_add_bus() */
i2c->adapter.algo_data = algo_data;
- strlcpy(i2c->adapter.name, "ICY I2C Zorro adapter",
+ strscpy(i2c->adapter.name, "ICY I2C Zorro adapter",
sizeof(i2c->adapter.name));
if (!devm_request_mem_region(&z->dev,
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 8b9ba055c418..b51ab3cad2b1 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -558,7 +558,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
lpi2c_imx->adapter.algo = &lpi2c_imx_algo;
lpi2c_imx->adapter.dev.parent = &pdev->dev;
lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
- strlcpy(lpi2c_imx->adapter.name, pdev->name,
+ strscpy(lpi2c_imx->adapter.name, pdev->name,
sizeof(lpi2c_imx->adapter.name));
lpi2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 78fb1a4274a6..e47fa3465671 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1572,9 +1572,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
int irq, ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(&pdev->dev);
hrtimer_cancel(&i2c_imx->slave_timer);
@@ -1585,17 +1583,21 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (i2c_imx->dma)
i2c_imx_dma_free(i2c_imx);
- /* setup chip registers to defaults */
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ if (ret == 0) {
+ /* setup chip registers to defaults */
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ clk_disable(i2c_imx->clk);
+ }
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, i2c_imx);
- clk_disable_unprepare(i2c_imx->clk);
+
+ clk_unprepare(i2c_imx->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index 5bbb7f0d7852..cf857cf22507 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -303,6 +303,7 @@ static int kempld_i2c_probe(struct platform_device *pdev)
i2c->dev = &pdev->dev;
i2c->adap = kempld_i2c_adapter;
i2c->adap.dev.parent = i2c->dev;
+ ACPI_COMPANION_SET(&i2c->adap.dev, ACPI_COMPANION(&pdev->dev));
i2c_set_adapdata(&i2c->adap, i2c);
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 4e30c5267142..8fff6fbb7065 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -417,7 +417,7 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.owner = THIS_MODULE;
- strlcpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "LPC2K I2C adapter", sizeof(i2c->adap.name));
i2c->adap.algo = &i2c_lpc2k_algorithm;
i2c->adap.dev.parent = &pdev->dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 61cc5b2462c6..889eff06b78f 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -502,7 +502,7 @@ static int meson_i2c_probe(struct platform_device *pdev)
return ret;
}
- strlcpy(i2c->adap.name, "Meson I2C adapter",
+ strscpy(i2c->adap.name, "Meson I2C adapter",
sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &meson_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 6df0f1c33278..4d7e9b25f018 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -206,7 +206,7 @@ static void mchp_corei2c_empty_rx(struct mchp_corei2c_dev *idev)
idev->msg_len--;
}
- if (idev->msg_len == 0) {
+ if (idev->msg_len <= 1) {
ctrl = readb(idev->base + CORE_I2C_CTRL);
ctrl &= ~CTRL_AA;
writeb(ctrl, idev->base + CORE_I2C_CTRL);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 6c698c10d3cd..81ac92bb4f6f 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -239,6 +239,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
u32 *real_clk)
{
+ struct fwnode_handle *fwnode = of_fwnode_handle(node);
const struct mpc_i2c_divider *div = NULL;
unsigned int pvr = mfspr(SPRN_PVR);
u32 divider;
@@ -246,12 +247,12 @@ static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
if (clock == MPC_I2C_CLOCK_LEGACY) {
/* see below - default fdr = 0x3f -> div = 2048 */
- *real_clk = mpc5xxx_get_bus_frequency(node) / 2048;
+ *real_clk = mpc5xxx_fwnode_get_bus_frequency(fwnode) / 2048;
return -EINVAL;
}
/* Determine divider value */
- divider = mpc5xxx_get_bus_frequency(node) / clock;
+ divider = mpc5xxx_fwnode_get_bus_frequency(fwnode) / clock;
/*
* We want to choose an FDR/DFSR that generates an I2C bus speed that
@@ -266,7 +267,7 @@ static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
break;
}
- *real_clk = mpc5xxx_get_bus_frequency(node) / div->divider;
+ *real_clk = mpc5xxx_fwnode_get_bus_frequency(fwnode) / div->divider;
return (int)div->fdr;
}
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 8e6985354fd5..fc7bfd98156b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -229,6 +229,35 @@ static const u16 mt_i2c_regs_v2[] = {
[OFFSET_DCM_EN] = 0xf88,
};
+static const u16 mt_i2c_regs_v3[] = {
+ [OFFSET_DATA_PORT] = 0x0,
+ [OFFSET_INTR_MASK] = 0x8,
+ [OFFSET_INTR_STAT] = 0xc,
+ [OFFSET_CONTROL] = 0x10,
+ [OFFSET_TRANSFER_LEN] = 0x14,
+ [OFFSET_TRANSAC_LEN] = 0x18,
+ [OFFSET_DELAY_LEN] = 0x1c,
+ [OFFSET_TIMING] = 0x20,
+ [OFFSET_START] = 0x24,
+ [OFFSET_EXT_CONF] = 0x28,
+ [OFFSET_LTIMING] = 0x2c,
+ [OFFSET_HS] = 0x30,
+ [OFFSET_IO_CONFIG] = 0x34,
+ [OFFSET_FIFO_ADDR_CLR] = 0x38,
+ [OFFSET_SDA_TIMING] = 0x3c,
+ [OFFSET_TRANSFER_LEN_AUX] = 0x44,
+ [OFFSET_CLOCK_DIV] = 0x48,
+ [OFFSET_SOFTRESET] = 0x50,
+ [OFFSET_MULTI_DMA] = 0x8c,
+ [OFFSET_SCL_MIS_COMP_POINT] = 0x90,
+ [OFFSET_SLAVE_ADDR] = 0x94,
+ [OFFSET_DEBUGSTAT] = 0xe4,
+ [OFFSET_DEBUGCTRL] = 0xe8,
+ [OFFSET_FIFO_STAT] = 0xf4,
+ [OFFSET_FIFO_THRESH] = 0xf8,
+ [OFFSET_DCM_EN] = 0xf88,
+};
+
struct mtk_i2c_compatible {
const struct i2c_adapter_quirks *quirks;
const u16 *regs;
@@ -442,6 +471,19 @@ static const struct mtk_i2c_compatible mt8186_compat = {
.max_dma_support = 36,
};
+static const struct mtk_i2c_compatible mt8188_compat = {
+ .regs = mt_i2c_regs_v3,
+ .pmic_i2c = 0,
+ .dcm = 0,
+ .auto_restart = 1,
+ .aux_len_reg = 1,
+ .timing_adjust = 1,
+ .dma_sync = 0,
+ .ltiming_adjust = 1,
+ .apdma_sync = 1,
+ .max_dma_support = 36,
+};
+
static const struct mtk_i2c_compatible mt8192_compat = {
.quirks = &mt8183_i2c_quirks,
.regs = mt_i2c_regs_v2,
@@ -465,6 +507,7 @@ static const struct of_device_id mtk_i2c_of_match[] = {
{ .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
{ .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
{ .compatible = "mediatek,mt8186-i2c", .data = &mt8186_compat },
+ { .compatible = "mediatek,mt8188-i2c", .data = &mt8188_compat },
{ .compatible = "mediatek,mt8192-i2c", .data = &mt8192_compat },
{}
};
@@ -1389,7 +1432,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
speed_clk = I2C_MT65XX_CLK_MAIN;
}
- strlcpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
ret = mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk));
if (ret) {
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index cfe6de8175dd..20eda5738ac4 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -312,7 +312,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
i2c_set_adapdata(adap, i2c);
adap->dev.of_node = pdev->dev.of_node;
- strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ strscpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 103a05ecc3d6..047dfef7a657 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -989,7 +989,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
if (IS_ERR(drv_data->reg_base))
return PTR_ERR(drv_data->reg_base);
- strlcpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
+ strscpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
sizeof(drv_data->adapter.name));
init_waitqueue_head(&drv_data->waitq);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 68f67d084c63..5af5cffc444e 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -838,7 +838,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
return err;
adap = &i2c->adapter;
- strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &mxs_i2c_algo;
adap->quirks = &mxs_i2c_quirks;
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 6920c1b9a126..12e330cd7635 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -299,7 +299,7 @@ static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
i2c_set_adapdata(&i2cd->adapter, i2cd);
i2cd->adapter.owner = THIS_MODULE;
- strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
+ strscpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
sizeof(i2cd->adapter.name));
i2cd->adapter.algo = &gpu_i2c_algorithm;
i2cd->adapter.quirks = &gpu_i2c_quirks;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d4f6c6d60683..f9ae520aed22 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1488,7 +1488,7 @@ omap_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, omap);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->quirks = &omap_i2c_quirks;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
index 6eb0f50c5d28..9f773b4f5ed8 100644
--- a/drivers/i2c/busses/i2c-opal.c
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -220,9 +220,9 @@ static int i2c_opal_probe(struct platform_device *pdev)
adapter->dev.of_node = of_node_get(pdev->dev.of_node);
pname = of_get_property(pdev->dev.of_node, "ibm,port-name", NULL);
if (pname)
- strlcpy(adapter->name, pname, sizeof(adapter->name));
+ strscpy(adapter->name, pname, sizeof(adapter->name));
else
- strlcpy(adapter->name, "opal", sizeof(adapter->name));
+ strscpy(adapter->name, "opal", sizeof(adapter->name));
platform_set_drvdata(pdev, adapter);
rc = i2c_add_adapter(adapter);
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 231145c48728..0af86a542568 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -308,7 +308,7 @@ static void i2c_parport_attach(struct parport *port)
/* Fill the rest of the structure */
adapter->adapter.owner = THIS_MODULE;
adapter->adapter.class = I2C_CLASS_HWMON;
- strlcpy(adapter->adapter.name, "Parallel port adapter",
+ strscpy(adapter->adapter.name, "Parallel port adapter",
sizeof(adapter->adapter.name));
adapter->algo_data = parport_algo_data;
/* Slow down if we can't sense SCL */
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 690188a9ffff..b605b6e43cb9 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1403,7 +1403,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- strlcpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
i2c->clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(i2c->clk)) {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 6ac179a373ff..84a77512614d 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -494,12 +494,12 @@ static void geni_i2c_gpi_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
{
if (tx_buf) {
dma_unmap_single(gi2c->se.dev->parent, tx_addr, msg->len, DMA_TO_DEVICE);
- i2c_put_dma_safe_msg_buf(tx_buf, msg, false);
+ i2c_put_dma_safe_msg_buf(tx_buf, msg, !gi2c->err);
}
if (rx_buf) {
dma_unmap_single(gi2c->se.dev->parent, rx_addr, msg->len, DMA_FROM_DEVICE);
- i2c_put_dma_safe_msg_buf(rx_buf, msg, false);
+ i2c_put_dma_safe_msg_buf(rx_buf, msg, !gi2c->err);
}
}
@@ -563,6 +563,7 @@ static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
desc->callback_param = gi2c;
dmaengine_submit(desc);
+ *buf = dma_buf;
*dma_addr_p = addr;
return 0;
@@ -816,7 +817,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&gi2c->adap, gi2c);
gi2c->adap.dev.parent = dev;
gi2c->adap.dev.of_node = dev->of_node;
- strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
+ strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
ret = geni_icc_get(&gi2c->se, "qup-memory");
if (ret)
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 69e9f3ecf87d..2e153f2f71b6 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1878,7 +1878,7 @@ nodma:
qup->adap.dev.of_node = pdev->dev.of_node;
qup->is_last = true;
- strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
+ strscpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(qup->dev);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 6e7be9d9f504..cef82b205c26 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1076,7 +1076,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
adap->bus_recovery_info = &rcar_i2c_bri;
adap->quirks = &rcar_i2c_quirks;
i2c_set_adapdata(adap, priv);
- strlcpy(adap->name, pdev->name, sizeof(adap->name));
+ strscpy(adap->name, pdev->name, sizeof(adap->name));
/* Init DMA */
sg_init_table(&priv->sg, 1);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index cded77e06670..ecba1dfc1278 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -448,7 +448,7 @@ static int riic_i2c_probe(struct platform_device *pdev)
adap = &riic->adapter;
i2c_set_adapdata(adap, riic);
- strlcpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
+ strscpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &riic_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 989040a73626..2e98e7793bba 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1240,7 +1240,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
/* use common interface to get I2C timing properties */
i2c_parse_fw_timings(&pdev->dev, &i2c->t, true);
- strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &rk3x_i2c_algorithm;
i2c->adap.retries = 3;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index b49a1b170bb2..36dab9cd208c 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1076,7 +1076,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
else
s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c);
- strlcpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &s3c24xx_i2c_algorithm;
i2c->adap.retries = 2;
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 79798fc7462a..6746aa46d96c 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -30,7 +30,7 @@ struct acpi_smbus_cmi {
u8 cap_info:1;
u8 cap_read:1;
u8 cap_write:1;
- const struct smbus_methods_t *methods;
+ struct smbus_methods_t *methods;
};
static const struct smbus_methods_t smbus_methods = {
@@ -361,6 +361,7 @@ static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level,
static int acpi_smbus_cmi_add(struct acpi_device *device)
{
struct acpi_smbus_cmi *smbus_cmi;
+ const struct acpi_device_id *id;
int ret;
smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
@@ -368,7 +369,6 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
return -ENOMEM;
smbus_cmi->handle = device->handle;
- smbus_cmi->methods = device_get_match_data(&device->dev);
strcpy(acpi_device_name(device), ACPI_SMBUS_HC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SMBUS_HC_CLASS);
device->driver_data = smbus_cmi;
@@ -376,6 +376,11 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
smbus_cmi->cap_read = 0;
smbus_cmi->cap_write = 0;
+ for (id = acpi_smbus_cmi_ids; id->id[0]; id++)
+ if (!strcmp(id->id, acpi_device_hid(device)))
+ smbus_cmi->methods =
+ (struct smbus_methods_t *) id->driver_data;
+
acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 72f024a0c363..29330ee64c9c 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -940,7 +940,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
adap->nr = dev->id;
adap->dev.of_node = dev->dev.of_node;
- strlcpy(adap->name, dev->name, sizeof(adap->name));
+ strscpy(adap->name, dev->name, sizeof(adap->name));
spin_lock_init(&pd->lock);
init_waitqueue_head(&pd->wait);
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 458c7bcf1d24..87701744752f 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -99,7 +99,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
pd->adap.algo_data = &pd->bit;
pd->adap.dev.parent = &dev->dev;
- strlcpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name));
+ strscpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name));
pd->bit.data = pd;
pd->bit.setsda = simtec_i2c_setsda;
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index b4050f5b6746..b0f0120793e1 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -239,7 +239,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
dev_err(&serio->dev, "TAOS EVM identification failed\n");
goto exit_close;
}
- strlcpy(adapter->name, name, sizeof(adapter->name));
+ strscpy(adapter->name, name, sizeof(adapter->name));
/* Turn echo off for better performance */
taos->state = TAOS_STATE_EOFF;
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index ec0c7cad4240..95139985b2d5 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -305,7 +305,7 @@ static int tegra_bpmp_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.owner = THIS_MODULE;
- strlcpy(i2c->adapter.name, "Tegra BPMP I2C adapter",
+ strscpy(i2c->adapter.name, "Tegra BPMP I2C adapter",
sizeof(i2c->adapter.name));
i2c->adapter.algo = &tegra_bpmp_i2c_algo;
i2c->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 2941e42aa6a0..031c78ac42e6 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1825,7 +1825,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
if (i2c_dev->hw->supports_bus_clear)
i2c_dev->adapter.bus_recovery_info = &tegra_i2c_recovery_info;
- strlcpy(i2c_dev->adapter.name, dev_name(i2c_dev->dev),
+ strscpy(i2c_dev->adapter.name, dev_name(i2c_dev->dev),
sizeof(i2c_dev->adapter.name));
err = i2c_add_numbered_adapter(&i2c_dev->adapter);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index cb4666c54a23..d7b622891e52 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -564,7 +564,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
priv->adap.algo = &uniphier_fi2c_algo;
priv->adap.dev.parent = dev;
priv->adap.dev.of_node = dev->of_node;
- strlcpy(priv->adap.name, "UniPhier FI2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "UniPhier FI2C", sizeof(priv->adap.name));
priv->adap.bus_recovery_info = &uniphier_fi2c_bus_recovery_info;
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index ee00a44bf4c7..e3ebae381f08 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -358,7 +358,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
priv->adap.algo = &uniphier_i2c_algo;
priv->adap.dev.parent = dev;
priv->adap.dev.of_node = dev->of_node;
- strlcpy(priv->adap.name, "UniPhier I2C", sizeof(priv->adap.name));
+ strscpy(priv->adap.name, "UniPhier I2C", sizeof(priv->adap.name));
priv->adap.bus_recovery_info = &uniphier_i2c_bus_recovery_info;
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 8d980b1374a8..1ab419f8fa52 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -79,7 +79,7 @@ static int i2c_versatile_probe(struct platform_device *dev)
writel(SCL | SDA, i2c->base + I2C_CONTROLS);
i2c->adap.owner = THIS_MODULE;
- strlcpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name));
+ strscpy(i2c->adap.name, "Versatile I2C adapter", sizeof(i2c->adap.name));
i2c->adap.algo_data = &i2c->algo;
i2c->adap.dev.parent = &dev->dev;
i2c->adap.dev.of_node = dev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index 88f5aafdce5b..7d4bc8736079 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -413,7 +413,7 @@ static int wmt_i2c_probe(struct platform_device *pdev)
adap = &i2c_dev->adapter;
i2c_set_adapdata(adap, i2c_dev);
- strlcpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
+ strscpy(adap->name, "WMT I2C adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &wmt_i2c_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 10f35f942066..91007558bcb2 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -933,7 +933,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->init_irq = i2c_dev_irq_from_resources(info->resources,
info->num_resources);
- strlcpy(client->name, info->type, sizeof(client->name));
+ strscpy(client->name, info->type, sizeof(client->name));
status = i2c_check_addr_validity(client->addr, client->flags);
if (status) {
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 775332945ad0..8ba9b59a3c40 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -391,7 +391,7 @@ void i2c_register_spd(struct i2c_adapter *adap)
unsigned short addr_list[2];
memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, name, I2C_NAME_SIZE);
+ strscpy(info.type, name, I2C_NAME_SIZE);
addr_list[0] = 0x50 + n;
addr_list[1] = I2C_CLIENT_END;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 33d3ce9c888e..aa36ac618e72 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -78,20 +78,21 @@ config INFINIBAND_VIRT_DMA
def_bool !HIGHMEM
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
-source "drivers/infiniband/hw/mthca/Kconfig"
-source "drivers/infiniband/hw/qib/Kconfig"
+source "drivers/infiniband/hw/bnxt_re/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
+source "drivers/infiniband/hw/erdma/Kconfig"
+source "drivers/infiniband/hw/hfi1/Kconfig"
+source "drivers/infiniband/hw/hns/Kconfig"
source "drivers/infiniband/hw/irdma/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
+source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
-source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
-source "drivers/infiniband/hw/usnic/Kconfig"
-source "drivers/infiniband/hw/hns/Kconfig"
-source "drivers/infiniband/hw/bnxt_re/Kconfig"
-source "drivers/infiniband/hw/hfi1/Kconfig"
source "drivers/infiniband/hw/qedr/Kconfig"
+source "drivers/infiniband/hw/qib/Kconfig"
+source "drivers/infiniband/hw/usnic/Kconfig"
+source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/sw/siw/Kconfig"
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index fabca5e51e3d..46d06678dfbe 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -11,6 +11,7 @@
#include <linux/in6.h>
#include <linux/mutex.h>
#include <linux/random.h>
+#include <linux/rbtree.h>
#include <linux/igmp.h>
#include <linux/xarray.h>
#include <linux/inetdevice.h>
@@ -20,6 +21,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/netevent.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip_fib.h>
@@ -168,6 +170,9 @@ static struct ib_sa_client sa_client;
static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
+static struct rb_root id_table = RB_ROOT;
+/* Serialize operations of id_table tree */
+static DEFINE_SPINLOCK(id_table_lock);
static struct workqueue_struct *cma_wq;
static unsigned int cma_pernet_id;
@@ -202,6 +207,11 @@ struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
}
}
+struct id_table_entry {
+ struct list_head id_list;
+ struct rb_node rb_node;
+};
+
struct cma_device {
struct list_head list;
struct ib_device *device;
@@ -420,11 +430,21 @@ static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
return hdr->ip_version >> 4;
}
-static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
+static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
{
hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
}
+static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
+{
+ return (struct sockaddr *)&id_priv->id.route.addr.src_addr;
+}
+
+static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
+{
+ return (struct sockaddr *)&id_priv->id.route.addr.dst_addr;
+}
+
static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
{
struct in_device *in_dev = NULL;
@@ -445,6 +465,117 @@ static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
return (in_dev) ? 0 : -ENODEV;
}
+static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
+ struct id_table_entry *entry_b)
+{
+ struct rdma_id_private *id_priv = list_first_entry(
+ &entry_b->id_list, struct rdma_id_private, id_list_entry);
+ int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if;
+ struct sockaddr *sb = cma_dst_addr(id_priv);
+
+ if (ifindex_a != ifindex_b)
+ return (ifindex_a > ifindex_b) ? 1 : -1;
+
+ if (sa->sa_family != sb->sa_family)
+ return sa->sa_family - sb->sa_family;
+
+ if (sa->sa_family == AF_INET)
+ return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr,
+ (char *)&((struct sockaddr_in *)sb)->sin_addr,
+ sizeof(((struct sockaddr_in *)sa)->sin_addr));
+
+ return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
+ &((struct sockaddr_in6 *)sb)->sin6_addr);
+}
+
+static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
+{
+ struct rb_node **new, *parent = NULL;
+ struct id_table_entry *this, *node;
+ unsigned long flags;
+ int result;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ new = &id_table.rb_node;
+ while (*new) {
+ this = container_of(*new, struct id_table_entry, rb_node);
+ result = compare_netdev_and_ip(
+ node_id_priv->id.route.addr.dev_addr.bound_dev_if,
+ cma_dst_addr(node_id_priv), this);
+
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else if (result > 0)
+ new = &((*new)->rb_right);
+ else {
+ list_add_tail(&node_id_priv->id_list_entry,
+ &this->id_list);
+ kfree(node);
+ goto unlock;
+ }
+ }
+
+ INIT_LIST_HEAD(&node->id_list);
+ list_add_tail(&node_id_priv->id_list_entry, &node->id_list);
+
+ rb_link_node(&node->rb_node, parent, new);
+ rb_insert_color(&node->rb_node, &id_table);
+
+unlock:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+ return 0;
+}
+
+static struct id_table_entry *
+node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa)
+{
+ struct rb_node *node = root->rb_node;
+ struct id_table_entry *data;
+ int result;
+
+ while (node) {
+ data = container_of(node, struct id_table_entry, rb_node);
+ result = compare_netdev_and_ip(ifindex, sa, data);
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return data;
+ }
+
+ return NULL;
+}
+
+static void cma_remove_id_from_tree(struct rdma_id_private *id_priv)
+{
+ struct id_table_entry *data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ if (list_empty(&id_priv->id_list_entry))
+ goto out;
+
+ data = node_from_ndev_ip(&id_table,
+ id_priv->id.route.addr.dev_addr.bound_dev_if,
+ cma_dst_addr(id_priv));
+ if (!data)
+ goto out;
+
+ list_del_init(&id_priv->id_list_entry);
+ if (list_empty(&data->id_list)) {
+ rb_erase(&data->rb_node, &id_table);
+ kfree(data);
+ }
+out:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+}
+
static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev)
{
@@ -481,16 +612,6 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
-static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
-{
- return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
-}
-
-static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
-{
- return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
-}
-
static inline unsigned short cma_family(struct rdma_id_private *id_priv)
{
return id_priv->id.route.addr.src_addr.ss_family;
@@ -861,6 +982,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
refcount_set(&id_priv->refcount, 1);
mutex_init(&id_priv->handler_mutex);
INIT_LIST_HEAD(&id_priv->device_item);
+ INIT_LIST_HEAD(&id_priv->id_list_entry);
INIT_LIST_HEAD(&id_priv->listen_list);
INIT_LIST_HEAD(&id_priv->mc_list);
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
@@ -1883,6 +2005,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
cma_cancel_operation(id_priv, state);
rdma_restrack_del(&id_priv->res);
+ cma_remove_id_from_tree(id_priv);
if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.ib)
@@ -3172,8 +3295,11 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
cma_id_get(id_priv);
if (rdma_cap_ib_sa(id->device, id->port_num))
ret = cma_resolve_ib_route(id_priv, timeout_ms);
- else if (rdma_protocol_roce(id->device, id->port_num))
+ else if (rdma_protocol_roce(id->device, id->port_num)) {
ret = cma_resolve_iboe_route(id_priv);
+ if (!ret)
+ cma_add_id_to_tree(id_priv);
+ }
else if (rdma_protocol_iwarp(id->device, id->port_num))
ret = cma_resolve_iw_route(id_priv);
else
@@ -4922,10 +5048,87 @@ out:
return ret;
}
+static void cma_netevent_work_handler(struct work_struct *_work)
+{
+ struct rdma_id_private *id_priv =
+ container_of(_work, struct rdma_id_private, id.net_work);
+ struct rdma_cm_event event = {};
+
+ mutex_lock(&id_priv->handler_mutex);
+
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
+ goto out_unlock;
+
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = -ETIMEDOUT;
+
+ if (cma_cm_event_handler(id_priv, &event)) {
+ __acquire(&id_priv->handler_mutex);
+ id_priv->cm_id.ib = NULL;
+ cma_id_put(id_priv);
+ destroy_id_handler_unlock(id_priv);
+ return;
+ }
+
+out_unlock:
+ mutex_unlock(&id_priv->handler_mutex);
+ cma_id_put(id_priv);
+}
+
+static int cma_netevent_callback(struct notifier_block *self,
+ unsigned long event, void *ctx)
+{
+ struct id_table_entry *ips_node = NULL;
+ struct rdma_id_private *current_id;
+ struct neighbour *neigh = ctx;
+ unsigned long flags;
+
+ if (event != NETEVENT_NEIGH_UPDATE)
+ return NOTIFY_DONE;
+
+ spin_lock_irqsave(&id_table_lock, flags);
+ if (neigh->tbl->family == AF_INET6) {
+ struct sockaddr_in6 neigh_sock_6;
+
+ neigh_sock_6.sin6_family = AF_INET6;
+ neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key;
+ ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
+ (struct sockaddr *)&neigh_sock_6);
+ } else if (neigh->tbl->family == AF_INET) {
+ struct sockaddr_in neigh_sock_4;
+
+ neigh_sock_4.sin_family = AF_INET;
+ neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key);
+ ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
+ (struct sockaddr *)&neigh_sock_4);
+ } else
+ goto out;
+
+ if (!ips_node)
+ goto out;
+
+ list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) {
+ if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
+ neigh->ha, ETH_ALEN))
+ continue;
+ INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler);
+ cma_id_get(current_id);
+ queue_work(cma_wq, &current_id->id.net_work);
+ }
+out:
+ spin_unlock_irqrestore(&id_table_lock, flags);
+ return NOTIFY_DONE;
+}
+
static struct notifier_block cma_nb = {
.notifier_call = cma_netdev_callback
};
+static struct notifier_block cma_netevent_cb = {
+ .notifier_call = cma_netevent_callback
+};
+
static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
{
struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
@@ -5148,6 +5351,7 @@ static int __init cma_init(void)
ib_sa_register_client(&sa_client);
register_netdevice_notifier(&cma_nb);
+ register_netevent_notifier(&cma_netevent_cb);
ret = ib_register_client(&cma_client);
if (ret)
@@ -5162,6 +5366,7 @@ static int __init cma_init(void)
err_ib:
ib_unregister_client(&cma_client);
err:
+ unregister_netevent_notifier(&cma_netevent_cb);
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
unregister_pernet_subsys(&cma_pernet_operations);
@@ -5174,6 +5379,7 @@ static void __exit cma_cleanup(void)
{
cma_configfs_exit();
ib_unregister_client(&cma_client);
+ unregister_netevent_notifier(&cma_netevent_cb);
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
unregister_pernet_subsys(&cma_pernet_operations);
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index 757a0ef79872..b7354c94cf1b 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -64,6 +64,7 @@ struct rdma_id_private {
struct list_head listen_item;
struct list_head listen_list;
};
+ struct list_head id_list_entry;
struct cma_device *cma_dev;
struct list_head mc_list;
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 94d83b665a2f..29b1ab1d5f93 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -68,7 +68,7 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj,
* In exclusive access mode, we check that the counter is zero (nobody
* claimed this object) and we set it to -1. Releasing a shared access
* lock is done simply by decreasing the counter. As for exclusive
- * access locks, since only a single one of them is is allowed
+ * access locks, since only a single one of them is allowed
* concurrently, setting the counter to zero is enough for releasing
* this lock.
*/
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 68197e576433..e958c43dd28f 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -250,7 +250,7 @@ static bool upper_device_filter(struct ib_device *ib_dev, u32 port,
/**
* is_upper_ndev_bond_master_filter - Check if a given netdevice
- * is bond master device of netdevice of the the RDMA device of port.
+ * is bond master device of netdevice of the RDMA device of port.
* @ib_dev: IB device to check
* @port: Port to consider for adding default GID
* @rdma_ndev: Pointer to rdma netdevice
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 4d98f931a13d..8367974b7998 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -274,33 +274,6 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return 1;
}
-static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
- u32 sg_cnt, enum dma_data_direction dir)
-{
- if (is_pci_p2pdma_page(sg_page(sg)))
- pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
- else
- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
-}
-
-static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- int nents;
-
- if (is_pci_p2pdma_page(sg_page(sgt->sgl))) {
- if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
- return 0;
- nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl,
- sgt->orig_nents, dir);
- if (!nents)
- return -EIO;
- sgt->nents = nents;
- return 0;
- }
- return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0);
-}
-
/**
* rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
* @ctx: context to initialize
@@ -327,7 +300,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
};
int ret;
- ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
if (ret)
return ret;
sg_cnt = sgt.nents;
@@ -366,7 +339,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
return ret;
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_init);
@@ -414,12 +387,12 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return -EINVAL;
}
- ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
if (ret)
return ret;
if (prot_sg_cnt) {
- ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir);
+ ret = ib_dma_map_sgtable_attrs(dev, &prot_sgt, dir, 0);
if (ret)
goto out_unmap_sg;
}
@@ -486,9 +459,9 @@ out_free_ctx:
kfree(ctx->reg);
out_unmap_prot_sg:
if (prot_sgt.nents)
- rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &prot_sgt, dir, 0);
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
+ ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
@@ -621,7 +594,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
break;
}
- rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
@@ -649,8 +622,8 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
kfree(ctx->reg);
if (prot_sg_cnt)
- rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
- rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index fce80a4a5147..04c04e6d24c3 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -18,6 +18,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
struct scatterlist *sg;
unsigned long start, end, cur = 0;
unsigned int nmap = 0;
+ long ret;
int i;
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
@@ -67,9 +68,14 @@ wait_fence:
* may be not up-to-date. Wait for the exporter to finish
* the migration.
*/
- return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+ ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
+ return 0;
}
EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index fba0b3be903e..6b3a88046125 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
+obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 79401e6c6aa9..785c37cae3c0 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -173,7 +173,7 @@ struct bnxt_re_dev {
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
- /* QP for for handling QP1 packets */
+ /* QP for handling QP1 packets */
struct bnxt_re_gsi_context gsi_ctx;
struct bnxt_re_stats stats;
atomic_t nq_alloc_cnt;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index c16017f6e8db..14392c942f49 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2468,31 +2468,24 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CCTRL_ECN_V(1);
}
- skb_get(skb);
- rpl = cplhdr(skb);
if (!is_t4(adapter_type)) {
- BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
- skb_trim(skb, sizeof(*rpl5));
- rpl5 = (void *)rpl;
- INIT_TP_WR(rpl5, ep->hwtid);
- } else {
- skb_trim(skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- }
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
-
- if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
u32 isn = (prandom_u32() & ~7UL) - 1;
+
+ skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
+ rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
+ rpl = (void *)rpl5;
+ INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
- rpl5 = (void *)rpl;
- memset_after(rpl5, 0, iss);
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
+ } else {
+ skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ rpl = __skb_put_zero(skb, sizeof(*rpl));
+ INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
}
rpl->opt0 = cpu_to_be64(opt0);
diff --git a/drivers/infiniband/hw/erdma/Kconfig b/drivers/infiniband/hw/erdma/Kconfig
new file mode 100644
index 000000000000..169038e3ceb1
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_ERDMA
+ tristate "Alibaba Elastic RDMA Adapter (ERDMA) support"
+ depends on PCI_MSI && 64BIT
+ depends on INFINIBAND_ADDR_TRANS
+ depends on INFINIBAND_USER_ACCESS
+ help
+ This is a RDMA/iWarp driver for Alibaba Elastic RDMA Adapter(ERDMA),
+ which supports RDMA features in Alibaba cloud environment.
+
+ To compile this driver as module, choose M here. The module will be
+ called erdma.
diff --git a/drivers/infiniband/hw/erdma/Makefile b/drivers/infiniband/hw/erdma/Makefile
new file mode 100644
index 000000000000..51d2ef91905a
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INFINIBAND_ERDMA) := erdma.o
+
+erdma-y := erdma_cm.o erdma_main.o erdma_cmdq.o erdma_cq.o erdma_verbs.o erdma_qp.o erdma_eq.o
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
new file mode 100644
index 000000000000..2aae635c1c8d
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_H__
+#define __ERDMA_H__
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+#include <linux/xarray.h>
+#include <rdma/ib_verbs.h>
+
+#include "erdma_hw.h"
+
+#define DRV_MODULE_NAME "erdma"
+#define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
+
+struct erdma_eq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+
+ u16 ci;
+ u16 rsvd;
+
+ atomic64_t event_num;
+ atomic64_t notify_num;
+
+ u64 __iomem *db_addr;
+ u64 *db_record;
+};
+
+struct erdma_cmdq_sq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+ u16 ci;
+ u16 pi;
+
+ u16 wqebb_cnt;
+
+ u64 *db_record;
+};
+
+struct erdma_cmdq_cq {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+
+ spinlock_t lock;
+
+ u32 depth;
+ u32 ci;
+ u32 cmdsn;
+
+ u64 *db_record;
+
+ atomic64_t armed_num;
+};
+
+enum {
+ ERDMA_CMD_STATUS_INIT,
+ ERDMA_CMD_STATUS_ISSUED,
+ ERDMA_CMD_STATUS_FINISHED,
+ ERDMA_CMD_STATUS_TIMEOUT
+};
+
+struct erdma_comp_wait {
+ struct completion wait_event;
+ u32 cmd_status;
+ u32 ctx_id;
+ u16 sq_pi;
+ u8 comp_status;
+ u8 rsvd;
+ u32 comp_data[4];
+};
+
+enum {
+ ERDMA_CMDQ_STATE_OK_BIT = 0,
+ ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1,
+ ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2,
+};
+
+#define ERDMA_CMDQ_TIMEOUT_MS 15000
+#define ERDMA_REG_ACCESS_WAIT_MS 20
+#define ERDMA_WAIT_DEV_DONE_CNT 500
+
+struct erdma_cmdq {
+ unsigned long *comp_wait_bitmap;
+ struct erdma_comp_wait *wait_pool;
+ spinlock_t lock;
+
+ bool use_event;
+
+ struct erdma_cmdq_sq sq;
+ struct erdma_cmdq_cq cq;
+ struct erdma_eq eq;
+
+ unsigned long state;
+
+ struct semaphore credits;
+ u16 max_outstandings;
+};
+
+#define COMPROMISE_CC ERDMA_CC_CUBIC
+enum erdma_cc_alg {
+ ERDMA_CC_NEWRENO = 0,
+ ERDMA_CC_CUBIC,
+ ERDMA_CC_HPCC_RTT,
+ ERDMA_CC_HPCC_ECN,
+ ERDMA_CC_HPCC_INT,
+ ERDMA_CC_METHODS_NUM
+};
+
+struct erdma_devattr {
+ u32 fw_version;
+
+ unsigned char peer_addr[ETH_ALEN];
+
+ int numa_node;
+ enum erdma_cc_alg cc;
+ u32 grp_num;
+ u32 irq_num;
+
+ bool disable_dwqe;
+ u16 dwqe_pages;
+ u16 dwqe_entries;
+
+ u32 max_qp;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_ord;
+ u32 max_ird;
+
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_sge_rd;
+ u32 max_cq;
+ u32 max_cqe;
+ u64 max_mr_size;
+ u32 max_mr;
+ u32 max_pd;
+ u32 max_mw;
+ u32 local_dma_key;
+};
+
+#define ERDMA_IRQNAME_SIZE 50
+
+struct erdma_irq {
+ char name[ERDMA_IRQNAME_SIZE];
+ u32 msix_vector;
+ cpumask_t affinity_hint_mask;
+};
+
+struct erdma_eq_cb {
+ bool ready;
+ void *dev; /* All EQs use this fields to get erdma_dev struct */
+ struct erdma_irq irq;
+ struct erdma_eq eq;
+ struct tasklet_struct tasklet;
+};
+
+struct erdma_resource_cb {
+ unsigned long *bitmap;
+ spinlock_t lock;
+ u32 next_alloc_idx;
+ u32 max_cap;
+};
+
+enum {
+ ERDMA_RES_TYPE_PD = 0,
+ ERDMA_RES_TYPE_STAG_IDX = 1,
+ ERDMA_RES_CNT = 2,
+};
+
+#define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
+#define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
+
+struct erdma_dev {
+ struct ib_device ibdev;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct notifier_block netdev_nb;
+
+ resource_size_t func_bar_addr;
+ resource_size_t func_bar_len;
+ u8 __iomem *func_bar;
+
+ struct erdma_devattr attrs;
+ /* physical port state (only one port per device) */
+ enum ib_port_state state;
+
+ /* cmdq and aeq use the same msix vector */
+ struct erdma_irq comm_irq;
+ struct erdma_cmdq cmdq;
+ struct erdma_eq aeq;
+ struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1];
+
+ spinlock_t lock;
+ struct erdma_resource_cb res_cb[ERDMA_RES_CNT];
+ struct xarray qp_xa;
+ struct xarray cq_xa;
+
+ u32 next_alloc_qpn;
+ u32 next_alloc_cqn;
+
+ spinlock_t db_bitmap_lock;
+ /* We provide max 64 uContexts that each has one SQ doorbell Page. */
+ DECLARE_BITMAP(sdb_page, ERDMA_DWQE_TYPE0_CNT);
+ /*
+ * We provide max 496 uContexts that each has one SQ normal Db,
+ * and one directWQE db。
+ */
+ DECLARE_BITMAP(sdb_entry, ERDMA_DWQE_TYPE1_CNT);
+
+ atomic_t num_ctx;
+ struct list_head cep_list;
+};
+
+static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
+{
+ idx &= (depth - 1);
+
+ return qbuf + (idx << shift);
+}
+
+static inline struct erdma_dev *to_edev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct erdma_dev, ibdev);
+}
+
+static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg)
+{
+ return readl(dev->func_bar + reg);
+}
+
+static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg)
+{
+ return readq(dev->func_bar + reg);
+}
+
+static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value)
+{
+ writel(value, dev->func_bar + reg);
+}
+
+static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value)
+{
+ writeq(value, dev->func_bar + reg);
+}
+
+static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
+ u32 filed_mask)
+{
+ u32 val = erdma_reg_read32(dev, reg);
+
+ return FIELD_GET(filed_mask, val);
+}
+
+int erdma_cmdq_init(struct erdma_dev *dev);
+void erdma_finish_cmdq_init(struct erdma_dev *dev);
+void erdma_cmdq_destroy(struct erdma_dev *dev);
+
+void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+ u64 *resp0, u64 *resp1);
+void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
+
+int erdma_ceqs_init(struct erdma_dev *dev);
+void erdma_ceqs_uninit(struct erdma_dev *dev);
+void notify_eq(struct erdma_eq *eq);
+void *get_next_valid_eqe(struct erdma_eq *eq);
+
+int erdma_aeq_init(struct erdma_dev *dev);
+void erdma_aeq_destroy(struct erdma_dev *dev);
+
+void erdma_aeq_event_handler(struct erdma_dev *dev);
+void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
new file mode 100644
index 000000000000..f13f16479eca
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -0,0 +1,1430 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Fredy Neeser */
+/* Greg Joyce <greg@opengridcomputing.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+/* Copyright (c) 2017, Open Grid Computing, Inc. */
+
+#include <linux/errno.h>
+#include <linux/inetdevice.h>
+#include <linux/net.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_verbs.h"
+
+static struct workqueue_struct *erdma_cm_wq;
+
+static void erdma_cm_llp_state_change(struct sock *sk);
+static void erdma_cm_llp_data_ready(struct sock *sk);
+static void erdma_cm_llp_error_report(struct sock *sk);
+
+static void erdma_sk_assign_cm_upcalls(struct sock *sk)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_state_change = erdma_cm_llp_state_change;
+ sk->sk_data_ready = erdma_cm_llp_data_ready;
+ sk->sk_error_report = erdma_cm_llp_error_report;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void erdma_sk_save_upcalls(struct sock *sk)
+{
+ struct erdma_cep *cep = sk_to_cep(sk);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ cep->sk_state_change = sk->sk_state_change;
+ cep->sk_data_ready = sk->sk_data_ready;
+ cep->sk_error_report = sk->sk_error_report;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void erdma_sk_restore_upcalls(struct sock *sk, struct erdma_cep *cep)
+{
+ sk->sk_state_change = cep->sk_state_change;
+ sk->sk_data_ready = cep->sk_data_ready;
+ sk->sk_error_report = cep->sk_error_report;
+ sk->sk_user_data = NULL;
+}
+
+static void erdma_socket_disassoc(struct socket *s)
+{
+ struct sock *sk = s->sk;
+ struct erdma_cep *cep;
+
+ if (sk) {
+ write_lock_bh(&sk->sk_callback_lock);
+ cep = sk_to_cep(sk);
+ if (cep) {
+ erdma_sk_restore_upcalls(sk, cep);
+ erdma_cep_put(cep);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void erdma_cep_socket_assoc(struct erdma_cep *cep, struct socket *s)
+{
+ cep->sock = s;
+ erdma_cep_get(cep);
+ s->sk->sk_user_data = cep;
+
+ erdma_sk_save_upcalls(s->sk);
+ erdma_sk_assign_cm_upcalls(s->sk);
+}
+
+static void erdma_disassoc_listen_cep(struct erdma_cep *cep)
+{
+ if (cep->listen_cep) {
+ erdma_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ }
+}
+
+static struct erdma_cep *erdma_cep_alloc(struct erdma_dev *dev)
+{
+ struct erdma_cep *cep = kzalloc(sizeof(*cep), GFP_KERNEL);
+ unsigned long flags;
+
+ if (!cep)
+ return NULL;
+
+ INIT_LIST_HEAD(&cep->listenq);
+ INIT_LIST_HEAD(&cep->devq);
+ INIT_LIST_HEAD(&cep->work_freelist);
+
+ kref_init(&cep->ref);
+ cep->state = ERDMA_EPSTATE_IDLE;
+ init_waitqueue_head(&cep->waitq);
+ spin_lock_init(&cep->lock);
+ cep->dev = dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&cep->devq, &dev->cep_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return cep;
+}
+
+static void erdma_cm_free_work(struct erdma_cep *cep)
+{
+ struct list_head *w, *tmp;
+ struct erdma_cm_work *work;
+
+ list_for_each_safe(w, tmp, &cep->work_freelist) {
+ work = list_entry(w, struct erdma_cm_work, list);
+ list_del(&work->list);
+ kfree(work);
+ }
+}
+
+static void erdma_cancel_mpatimer(struct erdma_cep *cep)
+{
+ spin_lock_bh(&cep->lock);
+ if (cep->mpa_timer) {
+ if (cancel_delayed_work(&cep->mpa_timer->work)) {
+ erdma_cep_put(cep);
+ kfree(cep->mpa_timer);
+ }
+ cep->mpa_timer = NULL;
+ }
+ spin_unlock_bh(&cep->lock);
+}
+
+static void erdma_put_work(struct erdma_cm_work *work)
+{
+ INIT_LIST_HEAD(&work->list);
+ spin_lock_bh(&work->cep->lock);
+ list_add(&work->list, &work->cep->work_freelist);
+ spin_unlock_bh(&work->cep->lock);
+}
+
+static void erdma_cep_set_inuse(struct erdma_cep *cep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cep->lock, flags);
+ while (cep->in_use) {
+ spin_unlock_irqrestore(&cep->lock, flags);
+ wait_event_interruptible(cep->waitq, !cep->in_use);
+ if (signal_pending(current))
+ flush_signals(current);
+
+ spin_lock_irqsave(&cep->lock, flags);
+ }
+
+ cep->in_use = 1;
+ spin_unlock_irqrestore(&cep->lock, flags);
+}
+
+static void erdma_cep_set_free(struct erdma_cep *cep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cep->lock, flags);
+ cep->in_use = 0;
+ spin_unlock_irqrestore(&cep->lock, flags);
+
+ wake_up(&cep->waitq);
+}
+
+static void __erdma_cep_dealloc(struct kref *ref)
+{
+ struct erdma_cep *cep = container_of(ref, struct erdma_cep, ref);
+ struct erdma_dev *dev = cep->dev;
+ unsigned long flags;
+
+ WARN_ON(cep->listen_cep);
+
+ kfree(cep->private_data);
+ kfree(cep->mpa.pdata);
+ spin_lock_bh(&cep->lock);
+ if (!list_empty(&cep->work_freelist))
+ erdma_cm_free_work(cep);
+ spin_unlock_bh(&cep->lock);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_del(&cep->devq);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ kfree(cep);
+}
+
+static struct erdma_cm_work *erdma_get_work(struct erdma_cep *cep)
+{
+ struct erdma_cm_work *work = NULL;
+
+ spin_lock_bh(&cep->lock);
+ if (!list_empty(&cep->work_freelist)) {
+ work = list_entry(cep->work_freelist.next, struct erdma_cm_work,
+ list);
+ list_del_init(&work->list);
+ }
+
+ spin_unlock_bh(&cep->lock);
+ return work;
+}
+
+static int erdma_cm_alloc_work(struct erdma_cep *cep, int num)
+{
+ struct erdma_cm_work *work;
+
+ while (num--) {
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ if (!(list_empty(&cep->work_freelist)))
+ erdma_cm_free_work(cep);
+ return -ENOMEM;
+ }
+ work->cep = cep;
+ INIT_LIST_HEAD(&work->list);
+ list_add(&work->list, &cep->work_freelist);
+ }
+
+ return 0;
+}
+
+static int erdma_cm_upcall(struct erdma_cep *cep, enum iw_cm_event_type reason,
+ int status)
+{
+ struct iw_cm_event event;
+ struct iw_cm_id *cm_id;
+
+ memset(&event, 0, sizeof(event));
+ event.status = status;
+ event.event = reason;
+
+ if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
+ event.provider_data = cep;
+ cm_id = cep->listen_cep->cm_id;
+
+ event.ird = cep->dev->attrs.max_ird;
+ event.ord = cep->dev->attrs.max_ord;
+ } else {
+ cm_id = cep->cm_id;
+ }
+
+ if (reason == IW_CM_EVENT_CONNECT_REQUEST ||
+ reason == IW_CM_EVENT_CONNECT_REPLY) {
+ u16 pd_len = be16_to_cpu(cep->mpa.hdr.params.pd_len);
+
+ if (pd_len && cep->mpa.pdata) {
+ event.private_data_len = pd_len;
+ event.private_data = cep->mpa.pdata;
+ }
+
+ getname_local(cep->sock, &event.local_addr);
+ getname_peer(cep->sock, &event.remote_addr);
+ }
+
+ return cm_id->event_handler(cm_id, &event);
+}
+
+void erdma_qp_cm_drop(struct erdma_qp *qp)
+{
+ struct erdma_cep *cep = qp->cep;
+
+ if (!qp->cep)
+ return;
+
+ erdma_cep_set_inuse(cep);
+
+ /* already closed. */
+ if (cep->state == ERDMA_EPSTATE_CLOSED)
+ goto out;
+
+ if (cep->cm_id) {
+ switch (cep->state) {
+ case ERDMA_EPSTATE_AWAIT_MPAREP:
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -EINVAL);
+ break;
+ case ERDMA_EPSTATE_RDMA_MODE:
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ break;
+ case ERDMA_EPSTATE_IDLE:
+ case ERDMA_EPSTATE_LISTENING:
+ case ERDMA_EPSTATE_CONNECTING:
+ case ERDMA_EPSTATE_AWAIT_MPAREQ:
+ case ERDMA_EPSTATE_RECVD_MPAREQ:
+ case ERDMA_EPSTATE_CLOSED:
+ default:
+ break;
+ }
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ erdma_cep_put(cep);
+ }
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+
+ if (cep->qp) {
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+ }
+out:
+ erdma_cep_set_free(cep);
+}
+
+void erdma_cep_put(struct erdma_cep *cep)
+{
+ WARN_ON(kref_read(&cep->ref) < 1);
+ kref_put(&cep->ref, __erdma_cep_dealloc);
+}
+
+void erdma_cep_get(struct erdma_cep *cep)
+{
+ kref_get(&cep->ref);
+}
+
+static int erdma_send_mpareqrep(struct erdma_cep *cep, const void *pdata,
+ u8 pd_len)
+{
+ struct socket *s = cep->sock;
+ struct mpa_rr *rr = &cep->mpa.hdr;
+ struct kvec iov[3];
+ struct msghdr msg;
+ int iovec_num = 0;
+ int ret;
+ int mpa_len;
+
+ memset(&msg, 0, sizeof(msg));
+
+ rr->params.pd_len = cpu_to_be16(pd_len);
+
+ iov[iovec_num].iov_base = rr;
+ iov[iovec_num].iov_len = sizeof(*rr);
+ iovec_num++;
+ mpa_len = sizeof(*rr);
+
+ iov[iovec_num].iov_base = &cep->mpa.ext_data;
+ iov[iovec_num].iov_len = sizeof(cep->mpa.ext_data);
+ iovec_num++;
+ mpa_len += sizeof(cep->mpa.ext_data);
+
+ if (pd_len) {
+ iov[iovec_num].iov_base = (char *)pdata;
+ iov[iovec_num].iov_len = pd_len;
+ mpa_len += pd_len;
+ iovec_num++;
+ }
+
+ ret = kernel_sendmsg(s, &msg, iov, iovec_num, mpa_len);
+
+ return ret < 0 ? ret : 0;
+}
+
+static inline int ksock_recv(struct socket *sock, char *buf, size_t size,
+ int flags)
+{
+ struct kvec iov = { buf, size };
+ struct msghdr msg = { .msg_name = NULL, .msg_flags = flags };
+
+ return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
+}
+
+static int __recv_mpa_hdr(struct erdma_cep *cep, int hdr_rcvd, char *hdr,
+ int hdr_size, int *rcvd_out)
+{
+ struct socket *s = cep->sock;
+ int rcvd;
+
+ *rcvd_out = 0;
+ if (hdr_rcvd < hdr_size) {
+ rcvd = ksock_recv(s, hdr + hdr_rcvd, hdr_size - hdr_rcvd,
+ MSG_DONTWAIT);
+ if (rcvd == -EAGAIN)
+ return -EAGAIN;
+
+ if (rcvd <= 0)
+ return -ECONNABORTED;
+
+ hdr_rcvd += rcvd;
+ *rcvd_out = rcvd;
+
+ if (hdr_rcvd < hdr_size)
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void __mpa_rr_set_revision(__be16 *bits, u8 rev)
+{
+ *bits = (*bits & ~MPA_RR_MASK_REVISION) |
+ (cpu_to_be16(rev) & MPA_RR_MASK_REVISION);
+}
+
+static u8 __mpa_rr_revision(__be16 mpa_rr_bits)
+{
+ __be16 rev = mpa_rr_bits & MPA_RR_MASK_REVISION;
+
+ return (u8)be16_to_cpu(rev);
+}
+
+static void __mpa_ext_set_cc(__be32 *bits, u32 cc)
+{
+ *bits = (*bits & ~MPA_EXT_FLAG_CC) |
+ (cpu_to_be32(cc) & MPA_EXT_FLAG_CC);
+}
+
+static u8 __mpa_ext_cc(__be32 mpa_ext_bits)
+{
+ __be32 cc = mpa_ext_bits & MPA_EXT_FLAG_CC;
+
+ return (u8)be32_to_cpu(cc);
+}
+
+/*
+ * Receive MPA Request/Reply header.
+ *
+ * Returns 0 if complete MPA Request/Reply haeder including
+ * eventual private data was received. Returns -EAGAIN if
+ * header was partially received or negative error code otherwise.
+ *
+ * Context: May be called in process context only
+ */
+static int erdma_recv_mpa_rr(struct erdma_cep *cep)
+{
+ struct mpa_rr *hdr = &cep->mpa.hdr;
+ struct socket *s = cep->sock;
+ u16 pd_len;
+ int rcvd, to_rcv, ret, pd_rcvd;
+
+ if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) {
+ ret = __recv_mpa_hdr(cep, cep->mpa.bytes_rcvd,
+ (char *)&cep->mpa.hdr,
+ sizeof(struct mpa_rr), &rcvd);
+ cep->mpa.bytes_rcvd += rcvd;
+ if (ret)
+ return ret;
+ }
+
+ if (be16_to_cpu(hdr->params.pd_len) > MPA_MAX_PRIVDATA ||
+ __mpa_rr_revision(hdr->params.bits) != MPA_REVISION_EXT_1)
+ return -EPROTO;
+
+ if (cep->mpa.bytes_rcvd - sizeof(struct mpa_rr) <
+ sizeof(struct erdma_mpa_ext)) {
+ ret = __recv_mpa_hdr(
+ cep, cep->mpa.bytes_rcvd - sizeof(struct mpa_rr),
+ (char *)&cep->mpa.ext_data,
+ sizeof(struct erdma_mpa_ext), &rcvd);
+ cep->mpa.bytes_rcvd += rcvd;
+ if (ret)
+ return ret;
+ }
+
+ pd_len = be16_to_cpu(hdr->params.pd_len);
+ pd_rcvd = cep->mpa.bytes_rcvd - sizeof(struct mpa_rr) -
+ sizeof(struct erdma_mpa_ext);
+ to_rcv = pd_len - pd_rcvd;
+
+ if (!to_rcv) {
+ /*
+ * We have received the whole MPA Request/Reply message.
+ * Check against peer protocol violation.
+ */
+ u32 word;
+
+ ret = __recv_mpa_hdr(cep, 0, (char *)&word, sizeof(word),
+ &rcvd);
+ if (ret == -EAGAIN && rcvd == 0)
+ return 0;
+
+ if (ret)
+ return ret;
+
+ return -EPROTO;
+ }
+
+ /*
+ * At this point, MPA header has been fully received, and pd_len != 0.
+ * So, begin to receive private data.
+ */
+ if (!cep->mpa.pdata) {
+ cep->mpa.pdata = kmalloc(pd_len + 4, GFP_KERNEL);
+ if (!cep->mpa.pdata)
+ return -ENOMEM;
+ }
+
+ rcvd = ksock_recv(s, cep->mpa.pdata + pd_rcvd, to_rcv + 4,
+ MSG_DONTWAIT);
+ if (rcvd < 0)
+ return rcvd;
+
+ if (rcvd > to_rcv)
+ return -EPROTO;
+
+ cep->mpa.bytes_rcvd += rcvd;
+
+ if (to_rcv == rcvd)
+ return 0;
+
+ return -EAGAIN;
+}
+
+/*
+ * erdma_proc_mpareq()
+ *
+ * Read MPA Request from socket and signal new connection to IWCM
+ * if success. Caller must hold lock on corresponding listening CEP.
+ */
+static int erdma_proc_mpareq(struct erdma_cep *cep)
+{
+ struct mpa_rr *req;
+ int ret;
+
+ ret = erdma_recv_mpa_rr(cep);
+ if (ret)
+ return ret;
+
+ req = &cep->mpa.hdr;
+
+ if (memcmp(req->key, MPA_KEY_REQ, MPA_KEY_SIZE))
+ return -EPROTO;
+
+ memcpy(req->key, MPA_KEY_REP, MPA_KEY_SIZE);
+
+ /* Currently does not support marker and crc. */
+ if (req->params.bits & MPA_RR_FLAG_MARKERS ||
+ req->params.bits & MPA_RR_FLAG_CRC)
+ goto reject_conn;
+
+ cep->state = ERDMA_EPSTATE_RECVD_MPAREQ;
+
+ /* Keep reference until IWCM accepts/rejects */
+ erdma_cep_get(cep);
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REQUEST, 0);
+ if (ret)
+ erdma_cep_put(cep);
+
+ return ret;
+
+reject_conn:
+ req->params.bits &= ~MPA_RR_FLAG_MARKERS;
+ req->params.bits |= MPA_RR_FLAG_REJECT;
+ req->params.bits &= ~MPA_RR_FLAG_CRC;
+
+ kfree(cep->mpa.pdata);
+ cep->mpa.pdata = NULL;
+ erdma_send_mpareqrep(cep, NULL, 0);
+
+ return -EOPNOTSUPP;
+}
+
+static int erdma_proc_mpareply(struct erdma_cep *cep)
+{
+ struct erdma_qp_attrs qp_attrs;
+ struct erdma_qp *qp = cep->qp;
+ struct mpa_rr *rep;
+ int ret;
+
+ ret = erdma_recv_mpa_rr(cep);
+ if (ret)
+ goto out_err;
+
+ erdma_cancel_mpatimer(cep);
+
+ rep = &cep->mpa.hdr;
+
+ if (memcmp(rep->key, MPA_KEY_REP, MPA_KEY_SIZE)) {
+ ret = -EPROTO;
+ goto out_err;
+ }
+
+ if (rep->params.bits & MPA_RR_FLAG_REJECT) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET);
+ return -ECONNRESET;
+ }
+
+ /* Currently does not support marker and crc. */
+ if ((rep->params.bits & MPA_RR_FLAG_MARKERS) ||
+ (rep->params.bits & MPA_RR_FLAG_CRC)) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
+ return -EINVAL;
+ }
+
+ memset(&qp_attrs, 0, sizeof(qp_attrs));
+ qp_attrs.irq_size = cep->ird;
+ qp_attrs.orq_size = cep->ord;
+ qp_attrs.state = ERDMA_QP_STATE_RTS;
+
+ down_write(&qp->state_lock);
+ if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto out_err;
+ }
+
+ qp->attrs.qp_type = ERDMA_QP_ACTIVE;
+ if (__mpa_ext_cc(cep->mpa.ext_data.bits) != qp->attrs.cc)
+ qp->attrs.cc = COMPROMISE_CC;
+
+ ret = erdma_modify_qp_internal(qp, &qp_attrs,
+ ERDMA_QP_ATTR_STATE |
+ ERDMA_QP_ATTR_LLP_HANDLE |
+ ERDMA_QP_ATTR_MPA);
+
+ up_write(&qp->state_lock);
+
+ if (!ret) {
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, 0);
+ if (!ret)
+ cep->state = ERDMA_EPSTATE_RDMA_MODE;
+
+ return 0;
+ }
+
+out_err:
+ if (ret != -EAGAIN)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
+
+ return ret;
+}
+
+static void erdma_accept_newconn(struct erdma_cep *cep)
+{
+ struct socket *s = cep->sock;
+ struct socket *new_s = NULL;
+ struct erdma_cep *new_cep = NULL;
+ int ret = 0;
+
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ goto error;
+
+ new_cep = erdma_cep_alloc(cep->dev);
+ if (!new_cep)
+ goto error;
+
+ /*
+ * 4: Allocate a sufficient number of work elements
+ * to allow concurrent handling of local + peer close
+ * events, MPA header processing + MPA timeout.
+ */
+ if (erdma_cm_alloc_work(new_cep, 4) != 0)
+ goto error;
+
+ /*
+ * Copy saved socket callbacks from listening CEP
+ * and assign new socket with new CEP
+ */
+ new_cep->sk_state_change = cep->sk_state_change;
+ new_cep->sk_data_ready = cep->sk_data_ready;
+ new_cep->sk_error_report = cep->sk_error_report;
+
+ ret = kernel_accept(s, &new_s, O_NONBLOCK);
+ if (ret != 0)
+ goto error;
+
+ new_cep->sock = new_s;
+ erdma_cep_get(new_cep);
+ new_s->sk->sk_user_data = new_cep;
+
+ tcp_sock_set_nodelay(new_s->sk);
+ new_cep->state = ERDMA_EPSTATE_AWAIT_MPAREQ;
+
+ ret = erdma_cm_queue_work(new_cep, ERDMA_CM_WORK_MPATIMEOUT);
+ if (ret)
+ goto error;
+
+ new_cep->listen_cep = cep;
+ erdma_cep_get(cep);
+
+ if (atomic_read(&new_s->sk->sk_rmem_alloc)) {
+ /* MPA REQ already queued */
+ erdma_cep_set_inuse(new_cep);
+ ret = erdma_proc_mpareq(new_cep);
+ if (ret != -EAGAIN) {
+ erdma_cep_put(cep);
+ new_cep->listen_cep = NULL;
+ if (ret) {
+ erdma_cep_set_free(new_cep);
+ goto error;
+ }
+ }
+ erdma_cep_set_free(new_cep);
+ }
+ return;
+
+error:
+ if (new_cep) {
+ new_cep->state = ERDMA_EPSTATE_CLOSED;
+ erdma_cancel_mpatimer(new_cep);
+
+ erdma_cep_put(new_cep);
+ new_cep->sock = NULL;
+ }
+
+ if (new_s) {
+ erdma_socket_disassoc(new_s);
+ sock_release(new_s);
+ }
+}
+
+static int erdma_newconn_connected(struct erdma_cep *cep)
+{
+ int ret = 0;
+
+ cep->mpa.hdr.params.bits = 0;
+ __mpa_rr_set_revision(&cep->mpa.hdr.params.bits, MPA_REVISION_EXT_1);
+
+ memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, MPA_KEY_SIZE);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+ __mpa_ext_set_cc(&cep->mpa.ext_data.bits, cep->qp->attrs.cc);
+
+ ret = erdma_send_mpareqrep(cep, cep->private_data, cep->pd_len);
+ cep->state = ERDMA_EPSTATE_AWAIT_MPAREP;
+ cep->mpa.hdr.params.pd_len = 0;
+
+ if (ret >= 0)
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_MPATIMEOUT);
+
+ return ret;
+}
+
+static void erdma_cm_work_handler(struct work_struct *w)
+{
+ struct erdma_cm_work *work;
+ struct erdma_cep *cep;
+ int release_cep = 0, ret = 0;
+
+ work = container_of(w, struct erdma_cm_work, work.work);
+ cep = work->cep;
+
+ erdma_cep_set_inuse(cep);
+
+ switch (work->type) {
+ case ERDMA_CM_WORK_CONNECTED:
+ erdma_cancel_mpatimer(cep);
+ if (cep->state == ERDMA_EPSTATE_CONNECTING) {
+ ret = erdma_newconn_connected(cep);
+ if (ret) {
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -EIO);
+ release_cep = 1;
+ }
+ }
+ break;
+ case ERDMA_CM_WORK_CONNECTTIMEOUT:
+ if (cep->state == ERDMA_EPSTATE_CONNECTING) {
+ cep->mpa_timer = NULL;
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ETIMEDOUT);
+ release_cep = 1;
+ }
+ break;
+ case ERDMA_CM_WORK_ACCEPT:
+ erdma_accept_newconn(cep);
+ break;
+ case ERDMA_CM_WORK_READ_MPAHDR:
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ if (cep->listen_cep) {
+ erdma_cep_set_inuse(cep->listen_cep);
+
+ if (cep->listen_cep->state ==
+ ERDMA_EPSTATE_LISTENING)
+ ret = erdma_proc_mpareq(cep);
+ else
+ ret = -EFAULT;
+
+ erdma_cep_set_free(cep->listen_cep);
+
+ if (ret != -EAGAIN) {
+ erdma_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ if (ret)
+ erdma_cep_put(cep);
+ }
+ }
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ ret = erdma_proc_mpareply(cep);
+ }
+
+ if (ret && ret != -EAGAIN)
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_CLOSE_LLP:
+ if (cep->cm_id)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_PEER_CLOSE:
+ if (cep->cm_id) {
+ if (cep->state == ERDMA_EPSTATE_CONNECTING ||
+ cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ /*
+ * MPA reply not received, but connection drop
+ */
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ } else if (cep->state == ERDMA_EPSTATE_RDMA_MODE) {
+ /*
+ * NOTE: IW_CM_EVENT_DISCONNECT is given just
+ * to transition IWCM into CLOSING.
+ */
+ erdma_cm_upcall(cep, IW_CM_EVENT_DISCONNECT, 0);
+ erdma_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
+ }
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ /* Socket close before MPA request received. */
+ erdma_disassoc_listen_cep(cep);
+ erdma_cep_put(cep);
+ }
+ release_cep = 1;
+ break;
+ case ERDMA_CM_WORK_MPATIMEOUT:
+ cep->mpa_timer = NULL;
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP) {
+ /*
+ * MPA request timed out:
+ * Hide any partially received private data and signal
+ * timeout
+ */
+ cep->mpa.hdr.params.pd_len = 0;
+
+ if (cep->cm_id)
+ erdma_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
+ -ETIMEDOUT);
+ release_cep = 1;
+ } else if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ) {
+ /* No MPA req received after peer TCP stream setup. */
+ erdma_disassoc_listen_cep(cep);
+
+ erdma_cep_put(cep);
+ release_cep = 1;
+ }
+ break;
+ default:
+ WARN(1, "Undefined CM work type: %d\n", work->type);
+ }
+
+ if (release_cep) {
+ erdma_cancel_mpatimer(cep);
+ cep->state = ERDMA_EPSTATE_CLOSED;
+ if (cep->qp) {
+ struct erdma_qp *qp = cep->qp;
+ /*
+ * Serialize a potential race with application
+ * closing the QP and calling erdma_qp_cm_drop()
+ */
+ erdma_qp_get(qp);
+ erdma_cep_set_free(cep);
+
+ erdma_qp_llp_close(qp);
+ erdma_qp_put(qp);
+
+ erdma_cep_set_inuse(cep);
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+ }
+
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ erdma_cep_put(cep);
+ }
+ }
+ erdma_cep_set_free(cep);
+ erdma_put_work(work);
+ erdma_cep_put(cep);
+}
+
+int erdma_cm_queue_work(struct erdma_cep *cep, enum erdma_work_type type)
+{
+ struct erdma_cm_work *work = erdma_get_work(cep);
+ unsigned long delay = 0;
+
+ if (!work)
+ return -ENOMEM;
+
+ work->type = type;
+ work->cep = cep;
+
+ erdma_cep_get(cep);
+
+ INIT_DELAYED_WORK(&work->work, erdma_cm_work_handler);
+
+ if (type == ERDMA_CM_WORK_MPATIMEOUT) {
+ cep->mpa_timer = work;
+
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREP)
+ delay = MPAREP_TIMEOUT;
+ else
+ delay = MPAREQ_TIMEOUT;
+ } else if (type == ERDMA_CM_WORK_CONNECTTIMEOUT) {
+ cep->mpa_timer = work;
+
+ delay = CONNECT_TIMEOUT;
+ }
+
+ queue_delayed_work(erdma_cm_wq, &work->work, delay);
+
+ return 0;
+}
+
+static void erdma_cm_llp_data_ready(struct sock *sk)
+{
+ struct erdma_cep *cep;
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep)
+ goto out;
+
+ if (cep->state == ERDMA_EPSTATE_AWAIT_MPAREQ ||
+ cep->state == ERDMA_EPSTATE_AWAIT_MPAREP)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_READ_MPAHDR);
+
+out:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void erdma_cm_llp_error_report(struct sock *sk)
+{
+ struct erdma_cep *cep = sk_to_cep(sk);
+
+ if (cep)
+ cep->sk_error_report(sk);
+}
+
+static void erdma_cm_llp_state_change(struct sock *sk)
+{
+ struct erdma_cep *cep;
+ void (*orig_state_change)(struct sock *sk);
+
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (!cep) {
+ read_unlock(&sk->sk_callback_lock);
+ return;
+ }
+ orig_state_change = cep->sk_state_change;
+
+ switch (sk->sk_state) {
+ case TCP_ESTABLISHED:
+ if (cep->state == ERDMA_EPSTATE_CONNECTING)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTED);
+ else
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_ACCEPT);
+ break;
+ case TCP_CLOSE:
+ case TCP_CLOSE_WAIT:
+ if (cep->state != ERDMA_EPSTATE_LISTENING)
+ erdma_cm_queue_work(cep, ERDMA_CM_WORK_PEER_CLOSE);
+ break;
+ default:
+ break;
+ }
+ read_unlock(&sk->sk_callback_lock);
+ orig_state_change(sk);
+}
+
+static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
+ int laddrlen, struct sockaddr *raddr,
+ int raddrlen, int flags)
+{
+ int ret;
+
+ sock_set_reuseaddr(s->sk);
+ ret = s->ops->bind(s, laddr, laddrlen);
+ if (ret)
+ return ret;
+ ret = s->ops->connect(s, raddr, raddrlen, flags);
+ return ret < 0 ? ret : 0;
+}
+
+int erdma_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
+{
+ struct erdma_dev *dev = to_edev(id->device);
+ struct erdma_qp *qp;
+ struct erdma_cep *cep = NULL;
+ struct socket *s = NULL;
+ struct sockaddr *laddr = (struct sockaddr *)&id->m_local_addr;
+ struct sockaddr *raddr = (struct sockaddr *)&id->m_remote_addr;
+ u16 pd_len = params->private_data_len;
+ int ret;
+
+ if (pd_len > MPA_MAX_PRIVDATA)
+ return -EINVAL;
+
+ if (params->ird > dev->attrs.max_ird ||
+ params->ord > dev->attrs.max_ord)
+ return -EINVAL;
+
+ if (laddr->sa_family != AF_INET || raddr->sa_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ qp = find_qp_by_qpn(dev, params->qpn);
+ if (!qp)
+ return -ENOENT;
+ erdma_qp_get(qp);
+
+ ret = sock_create(AF_INET, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (ret < 0)
+ goto error_put_qp;
+
+ cep = erdma_cep_alloc(dev);
+ if (!cep) {
+ ret = -ENOMEM;
+ goto error_release_sock;
+ }
+
+ erdma_cep_set_inuse(cep);
+
+ /* Associate QP with CEP */
+ erdma_cep_get(cep);
+ qp->cep = cep;
+ cep->qp = qp;
+
+ /* Associate cm_id with CEP */
+ id->add_ref(id);
+ cep->cm_id = id;
+
+ /*
+ * 6: Allocate a sufficient number of work elements
+ * to allow concurrent handling of local + peer close
+ * events, MPA header processing + MPA timeout, connected event
+ * and connect timeout.
+ */
+ ret = erdma_cm_alloc_work(cep, 6);
+ if (ret != 0) {
+ ret = -ENOMEM;
+ goto error_release_cep;
+ }
+
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+ cep->state = ERDMA_EPSTATE_CONNECTING;
+
+ erdma_cep_socket_assoc(cep, s);
+
+ if (pd_len) {
+ cep->pd_len = pd_len;
+ cep->private_data = kmalloc(pd_len, GFP_KERNEL);
+ if (!cep->private_data) {
+ ret = -ENOMEM;
+ goto error_disassoc;
+ }
+
+ memcpy(cep->private_data, params->private_data,
+ params->private_data_len);
+ }
+
+ ret = kernel_bindconnect(s, laddr, sizeof(*laddr), raddr,
+ sizeof(*raddr), O_NONBLOCK);
+ if (ret != -EINPROGRESS && ret != 0) {
+ goto error_disassoc;
+ } else if (ret == 0) {
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTED);
+ if (ret)
+ goto error_disassoc;
+ } else {
+ ret = erdma_cm_queue_work(cep, ERDMA_CM_WORK_CONNECTTIMEOUT);
+ if (ret)
+ goto error_disassoc;
+ }
+
+ erdma_cep_set_free(cep);
+ return 0;
+
+error_disassoc:
+ kfree(cep->private_data);
+ cep->private_data = NULL;
+ cep->pd_len = 0;
+
+ erdma_socket_disassoc(s);
+
+error_release_cep:
+ /* disassoc with cm_id */
+ cep->cm_id = NULL;
+ id->rem_ref(id);
+
+ /* disassoc with qp */
+ qp->cep = NULL;
+ erdma_cep_put(cep);
+ cep->qp = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+
+ /* release the cep. */
+ erdma_cep_put(cep);
+
+error_release_sock:
+ if (s)
+ sock_release(s);
+error_put_qp:
+ erdma_qp_put(qp);
+
+ return ret;
+}
+
+int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
+{
+ struct erdma_dev *dev = to_edev(id->device);
+ struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+ struct erdma_qp *qp;
+ struct erdma_qp_attrs qp_attrs;
+ int ret;
+
+ erdma_cep_set_inuse(cep);
+ erdma_cep_put(cep);
+
+ /* Free lingering inbound private data */
+ if (cep->mpa.hdr.params.pd_len) {
+ cep->mpa.hdr.params.pd_len = 0;
+ kfree(cep->mpa.pdata);
+ cep->mpa.pdata = NULL;
+ }
+ erdma_cancel_mpatimer(cep);
+
+ if (cep->state != ERDMA_EPSTATE_RECVD_MPAREQ) {
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return -ECONNRESET;
+ }
+
+ qp = find_qp_by_qpn(dev, params->qpn);
+ if (!qp)
+ return -ENOENT;
+ erdma_qp_get(qp);
+
+ down_write(&qp->state_lock);
+ if (qp->attrs.state > ERDMA_QP_STATE_RTR) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ if (params->ord > dev->attrs.max_ord ||
+ params->ird > dev->attrs.max_ord) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ if (params->private_data_len > MPA_MAX_PRIVDATA) {
+ ret = -EINVAL;
+ up_write(&qp->state_lock);
+ goto error;
+ }
+
+ cep->ird = params->ird;
+ cep->ord = params->ord;
+
+ cep->cm_id = id;
+ id->add_ref(id);
+
+ memset(&qp_attrs, 0, sizeof(qp_attrs));
+ qp_attrs.orq_size = params->ord;
+ qp_attrs.irq_size = params->ird;
+
+ qp_attrs.state = ERDMA_QP_STATE_RTS;
+
+ /* Associate QP with CEP */
+ erdma_cep_get(cep);
+ qp->cep = cep;
+ cep->qp = qp;
+
+ cep->state = ERDMA_EPSTATE_RDMA_MODE;
+
+ qp->attrs.qp_type = ERDMA_QP_PASSIVE;
+ qp->attrs.pd_len = params->private_data_len;
+
+ if (qp->attrs.cc != __mpa_ext_cc(cep->mpa.ext_data.bits))
+ qp->attrs.cc = COMPROMISE_CC;
+
+ /* move to rts */
+ ret = erdma_modify_qp_internal(qp, &qp_attrs,
+ ERDMA_QP_ATTR_STATE |
+ ERDMA_QP_ATTR_ORD |
+ ERDMA_QP_ATTR_LLP_HANDLE |
+ ERDMA_QP_ATTR_IRD |
+ ERDMA_QP_ATTR_MPA);
+ up_write(&qp->state_lock);
+
+ if (ret)
+ goto error;
+
+ cep->mpa.ext_data.bits = 0;
+ __mpa_ext_set_cc(&cep->mpa.ext_data.bits, qp->attrs.cc);
+ cep->mpa.ext_data.cookie = cpu_to_be32(cep->qp->attrs.cookie);
+
+ ret = erdma_send_mpareqrep(cep, params->private_data,
+ params->private_data_len);
+ if (!ret) {
+ ret = erdma_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
+ if (ret)
+ goto error;
+
+ erdma_cep_set_free(cep);
+
+ return 0;
+ }
+
+error:
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(id);
+ cep->cm_id = NULL;
+ }
+
+ if (qp->cep) {
+ erdma_cep_put(cep);
+ qp->cep = NULL;
+ }
+
+ cep->qp = NULL;
+ erdma_qp_put(qp);
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return ret;
+}
+
+int erdma_reject(struct iw_cm_id *id, const void *pdata, u8 plen)
+{
+ struct erdma_cep *cep = (struct erdma_cep *)id->provider_data;
+
+ erdma_cep_set_inuse(cep);
+ erdma_cep_put(cep);
+
+ erdma_cancel_mpatimer(cep);
+
+ if (cep->state != ERDMA_EPSTATE_RECVD_MPAREQ) {
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return -ECONNRESET;
+ }
+
+ if (__mpa_rr_revision(cep->mpa.hdr.params.bits) == MPA_REVISION_EXT_1) {
+ cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */
+ erdma_send_mpareqrep(cep, pdata, plen);
+ }
+
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+
+ return 0;
+}
+
+int erdma_create_listen(struct iw_cm_id *id, int backlog)
+{
+ struct socket *s;
+ struct erdma_cep *cep = NULL;
+ int ret = 0;
+ struct erdma_dev *dev = to_edev(id->device);
+ int addr_family = id->local_addr.ss_family;
+ struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
+
+ if (addr_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ ret = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (ret < 0)
+ return ret;
+
+ sock_set_reuseaddr(s->sk);
+
+ /* For wildcard addr, limit binding to current device only */
+ if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
+ s->sk->sk_bound_dev_if = dev->netdev->ifindex;
+
+ ret = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in));
+ if (ret)
+ goto error;
+
+ cep = erdma_cep_alloc(dev);
+ if (!cep) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ erdma_cep_socket_assoc(cep, s);
+
+ ret = erdma_cm_alloc_work(cep, backlog);
+ if (ret)
+ goto error;
+
+ ret = s->ops->listen(s, backlog);
+ if (ret)
+ goto error;
+
+ cep->cm_id = id;
+ id->add_ref(id);
+
+ if (!id->provider_data) {
+ id->provider_data =
+ kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!id->provider_data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ INIT_LIST_HEAD((struct list_head *)id->provider_data);
+ }
+
+ list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
+ cep->state = ERDMA_EPSTATE_LISTENING;
+
+ return 0;
+
+error:
+ if (cep) {
+ erdma_cep_set_inuse(cep);
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ }
+ cep->sock = NULL;
+ erdma_socket_disassoc(s);
+ cep->state = ERDMA_EPSTATE_CLOSED;
+
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+ }
+ sock_release(s);
+
+ return ret;
+}
+
+static void erdma_drop_listeners(struct iw_cm_id *id)
+{
+ struct list_head *p, *tmp;
+ /*
+ * In case of a wildcard rdma_listen on a multi-homed device,
+ * a listener's IWCM id is associated with more than one listening CEP.
+ */
+ list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) {
+ struct erdma_cep *cep =
+ list_entry(p, struct erdma_cep, listenq);
+
+ list_del(p);
+
+ erdma_cep_set_inuse(cep);
+
+ if (cep->cm_id) {
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+ }
+ if (cep->sock) {
+ erdma_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+ cep->state = ERDMA_EPSTATE_CLOSED;
+ erdma_cep_set_free(cep);
+ erdma_cep_put(cep);
+ }
+}
+
+int erdma_destroy_listen(struct iw_cm_id *id)
+{
+ if (!id->provider_data)
+ return 0;
+
+ erdma_drop_listeners(id);
+ kfree(id->provider_data);
+ id->provider_data = NULL;
+
+ return 0;
+}
+
+int erdma_cm_init(void)
+{
+ erdma_cm_wq = create_singlethread_workqueue("erdma_cm_wq");
+ if (!erdma_cm_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void erdma_cm_exit(void)
+{
+ if (erdma_cm_wq)
+ destroy_workqueue(erdma_cm_wq);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.h b/drivers/infiniband/hw/erdma/erdma_cm.h
new file mode 100644
index 000000000000..8a3f998fec9b
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cm.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Greg Joyce <greg@opengridcomputing.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+/* Copyright (c) 2017, Open Grid Computing, Inc. */
+
+#ifndef __ERDMA_CM_H__
+#define __ERDMA_CM_H__
+
+#include <linux/tcp.h>
+#include <net/sock.h>
+#include <rdma/iw_cm.h>
+
+/* iWarp MPA protocol defs */
+#define MPA_REVISION_EXT_1 129
+#define MPA_MAX_PRIVDATA RDMA_MAX_PRIVATE_DATA
+#define MPA_KEY_REQ "MPA ID Req Frame"
+#define MPA_KEY_REP "MPA ID Rep Frame"
+#define MPA_KEY_SIZE 16
+#define MPA_DEFAULT_HDR_LEN 28
+
+struct mpa_rr_params {
+ __be16 bits;
+ __be16 pd_len;
+};
+
+/*
+ * MPA request/response Hdr bits & fields
+ */
+enum {
+ MPA_RR_FLAG_MARKERS = __cpu_to_be16(0x8000),
+ MPA_RR_FLAG_CRC = __cpu_to_be16(0x4000),
+ MPA_RR_FLAG_REJECT = __cpu_to_be16(0x2000),
+ MPA_RR_RESERVED = __cpu_to_be16(0x1f00),
+ MPA_RR_MASK_REVISION = __cpu_to_be16(0x00ff)
+};
+
+/*
+ * MPA request/reply header
+ */
+struct mpa_rr {
+ u8 key[16];
+ struct mpa_rr_params params;
+};
+
+struct erdma_mpa_ext {
+ __be32 cookie;
+ __be32 bits;
+};
+
+enum {
+ MPA_EXT_FLAG_CC = cpu_to_be32(0x0000000f),
+};
+
+struct erdma_mpa_info {
+ struct mpa_rr hdr; /* peer mpa hdr in host byte order */
+ struct erdma_mpa_ext ext_data;
+ char *pdata;
+ int bytes_rcvd;
+};
+
+struct erdma_sk_upcalls {
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk, int bytes);
+ void (*sk_error_report)(struct sock *sk);
+};
+
+struct erdma_dev;
+
+enum erdma_cep_state {
+ ERDMA_EPSTATE_IDLE = 1,
+ ERDMA_EPSTATE_LISTENING,
+ ERDMA_EPSTATE_CONNECTING,
+ ERDMA_EPSTATE_AWAIT_MPAREQ,
+ ERDMA_EPSTATE_RECVD_MPAREQ,
+ ERDMA_EPSTATE_AWAIT_MPAREP,
+ ERDMA_EPSTATE_RDMA_MODE,
+ ERDMA_EPSTATE_CLOSED
+};
+
+struct erdma_cep {
+ struct iw_cm_id *cm_id;
+ struct erdma_dev *dev;
+ struct list_head devq;
+ spinlock_t lock;
+ struct kref ref;
+ int in_use;
+ wait_queue_head_t waitq;
+ enum erdma_cep_state state;
+
+ struct list_head listenq;
+ struct erdma_cep *listen_cep;
+
+ struct erdma_qp *qp;
+ struct socket *sock;
+
+ struct erdma_cm_work *mpa_timer;
+ struct list_head work_freelist;
+
+ struct erdma_mpa_info mpa;
+ int ord;
+ int ird;
+
+ int pd_len;
+ /* hold user's private data. */
+ void *private_data;
+
+ /* Saved upcalls of socket llp.sock */
+ void (*sk_state_change)(struct sock *sk);
+ void (*sk_data_ready)(struct sock *sk);
+ void (*sk_error_report)(struct sock *sk);
+};
+
+#define MPAREQ_TIMEOUT (HZ * 20)
+#define MPAREP_TIMEOUT (HZ * 10)
+#define CONNECT_TIMEOUT (HZ * 10)
+
+enum erdma_work_type {
+ ERDMA_CM_WORK_ACCEPT = 1,
+ ERDMA_CM_WORK_READ_MPAHDR,
+ ERDMA_CM_WORK_CLOSE_LLP, /* close socket */
+ ERDMA_CM_WORK_PEER_CLOSE, /* socket indicated peer close */
+ ERDMA_CM_WORK_MPATIMEOUT,
+ ERDMA_CM_WORK_CONNECTED,
+ ERDMA_CM_WORK_CONNECTTIMEOUT
+};
+
+struct erdma_cm_work {
+ struct delayed_work work;
+ struct list_head list;
+ enum erdma_work_type type;
+ struct erdma_cep *cep;
+};
+
+#define to_sockaddr_in(a) (*(struct sockaddr_in *)(&(a)))
+
+static inline int getname_peer(struct socket *s, struct sockaddr_storage *a)
+{
+ return s->ops->getname(s, (struct sockaddr *)a, 1);
+}
+
+static inline int getname_local(struct socket *s, struct sockaddr_storage *a)
+{
+ return s->ops->getname(s, (struct sockaddr *)a, 0);
+}
+
+int erdma_connect(struct iw_cm_id *id, struct iw_cm_conn_param *param);
+int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *param);
+int erdma_reject(struct iw_cm_id *id, const void *pdata, u8 plen);
+int erdma_create_listen(struct iw_cm_id *id, int backlog);
+int erdma_destroy_listen(struct iw_cm_id *id);
+
+void erdma_cep_get(struct erdma_cep *ceq);
+void erdma_cep_put(struct erdma_cep *ceq);
+int erdma_cm_queue_work(struct erdma_cep *ceq, enum erdma_work_type type);
+
+int erdma_cm_init(void);
+void erdma_cm_exit(void);
+
+#define sk_to_cep(sk) ((struct erdma_cep *)((sk)->sk_user_data))
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
new file mode 100644
index 000000000000..57da0c670472
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "erdma.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
+{
+ struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
+ u64 db_data = FIELD_PREP(ERDMA_CQDB_CI_MASK, cmdq->cq.ci) |
+ FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
+ FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
+ FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
+
+ *cmdq->cq.db_record = db_data;
+ writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
+
+ atomic64_inc(&cmdq->cq.armed_num);
+}
+
+static void kick_cmdq_db(struct erdma_cmdq *cmdq)
+{
+ struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
+ u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
+
+ *cmdq->sq.db_record = db_data;
+ writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
+}
+
+static struct erdma_comp_wait *get_comp_wait(struct erdma_cmdq *cmdq)
+{
+ int comp_idx;
+
+ spin_lock(&cmdq->lock);
+ comp_idx = find_first_zero_bit(cmdq->comp_wait_bitmap,
+ cmdq->max_outstandings);
+ if (comp_idx == cmdq->max_outstandings) {
+ spin_unlock(&cmdq->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ __set_bit(comp_idx, cmdq->comp_wait_bitmap);
+ spin_unlock(&cmdq->lock);
+
+ return &cmdq->wait_pool[comp_idx];
+}
+
+static void put_comp_wait(struct erdma_cmdq *cmdq,
+ struct erdma_comp_wait *comp_wait)
+{
+ int used;
+
+ cmdq->wait_pool[comp_wait->ctx_id].cmd_status = ERDMA_CMD_STATUS_INIT;
+ spin_lock(&cmdq->lock);
+ used = __test_and_clear_bit(comp_wait->ctx_id, cmdq->comp_wait_bitmap);
+ spin_unlock(&cmdq->lock);
+
+ WARN_ON(!used);
+}
+
+static int erdma_cmdq_wait_res_init(struct erdma_dev *dev,
+ struct erdma_cmdq *cmdq)
+{
+ int i;
+
+ cmdq->wait_pool =
+ devm_kcalloc(&dev->pdev->dev, cmdq->max_outstandings,
+ sizeof(struct erdma_comp_wait), GFP_KERNEL);
+ if (!cmdq->wait_pool)
+ return -ENOMEM;
+
+ spin_lock_init(&cmdq->lock);
+ cmdq->comp_wait_bitmap = devm_bitmap_zalloc(
+ &dev->pdev->dev, cmdq->max_outstandings, GFP_KERNEL);
+ if (!cmdq->comp_wait_bitmap)
+ return -ENOMEM;
+
+ for (i = 0; i < cmdq->max_outstandings; i++) {
+ init_completion(&cmdq->wait_pool[i].wait_event);
+ cmdq->wait_pool[i].ctx_id = i;
+ }
+
+ return 0;
+}
+
+static int erdma_cmdq_sq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_cmdq_sq *sq = &cmdq->sq;
+ u32 buf_size;
+
+ sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE);
+ sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
+
+ buf_size = sq->depth << SQEBB_SHIFT;
+
+ sq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &sq->qbuf_dma_addr, GFP_KERNEL);
+ if (!sq->qbuf)
+ return -ENOMEM;
+
+ sq->db_record = (u64 *)(sq->qbuf + buf_size);
+
+ spin_lock_init(&sq->lock);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_H_REG,
+ upper_32_bits(sq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
+ lower_32_bits(sq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
+ erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG,
+ sq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+static int erdma_cmdq_cq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_cmdq_cq *cq = &cmdq->cq;
+ u32 buf_size;
+
+ cq->depth = cmdq->sq.depth;
+ buf_size = cq->depth << CQE_SHIFT;
+
+ cq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &cq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!cq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&cq->lock);
+
+ cq->db_record = (u64 *)(cq->qbuf + buf_size);
+
+ atomic64_set(&cq->armed_num, 0);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_H_REG,
+ upper_32_bits(cq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
+ lower_32_bits(cq->qbuf_dma_addr));
+ erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG,
+ cq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+static int erdma_cmdq_eq_init(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ struct erdma_eq *eq = &cmdq->eq;
+ u32 buf_size;
+
+ eq->depth = cmdq->max_outstandings;
+ buf_size = eq->depth << EQE_SHIFT;
+
+ eq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+
+ eq->db_addr =
+ (u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG);
+ eq->db_record = (u64 *)(eq->qbuf + buf_size);
+
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG,
+ eq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+int erdma_cmdq_init(struct erdma_dev *dev)
+{
+ int err, i;
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+ u32 sts, ctrl;
+
+ cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
+ cmdq->use_event = false;
+
+ sema_init(&cmdq->credits, cmdq->max_outstandings);
+
+ err = erdma_cmdq_wait_res_init(dev, cmdq);
+ if (err)
+ return err;
+
+ err = erdma_cmdq_sq_init(dev);
+ if (err)
+ return err;
+
+ err = erdma_cmdq_cq_init(dev);
+ if (err)
+ goto err_destroy_sq;
+
+ err = erdma_cmdq_eq_init(dev);
+ if (err)
+ goto err_destroy_cq;
+
+ ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_INIT_MASK, 1);
+ erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
+
+ for (i = 0; i < ERDMA_WAIT_DEV_DONE_CNT; i++) {
+ sts = erdma_reg_read32_filed(dev, ERDMA_REGS_DEV_ST_REG,
+ ERDMA_REG_DEV_ST_INIT_DONE_MASK);
+ if (sts)
+ break;
+
+ msleep(ERDMA_REG_ACCESS_WAIT_MS);
+ }
+
+ if (i == ERDMA_WAIT_DEV_DONE_CNT) {
+ dev_err(&dev->pdev->dev, "wait init done failed.\n");
+ err = -ETIMEDOUT;
+ goto err_destroy_eq;
+ }
+
+ set_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+
+ return 0;
+
+err_destroy_eq:
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->eq.depth << EQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
+
+err_destroy_cq:
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->cq.depth << CQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+
+err_destroy_sq:
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->sq.depth << SQEBB_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
+
+ return err;
+}
+
+void erdma_finish_cmdq_init(struct erdma_dev *dev)
+{
+ /* after device init successfully, change cmdq to event mode. */
+ dev->cmdq.use_event = true;
+ arm_cmdq_cq(&dev->cmdq);
+}
+
+void erdma_cmdq_destroy(struct erdma_dev *dev)
+{
+ struct erdma_cmdq *cmdq = &dev->cmdq;
+
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->eq.depth << EQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->sq.depth << SQEBB_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
+ dma_free_coherent(&dev->pdev->dev,
+ (cmdq->cq.depth << CQE_SHIFT) +
+ ERDMA_EXTRA_BUFFER_SIZE,
+ cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+}
+
+static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
+{
+ __be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci,
+ cmdq->cq.depth, CQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ __be32_to_cpu(READ_ONCE(*cqe)));
+
+ return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
+}
+
+static void push_cmdq_sqe(struct erdma_cmdq *cmdq, u64 *req, size_t req_len,
+ struct erdma_comp_wait *comp_wait)
+{
+ __le64 *wqe;
+ u64 hdr = *req;
+
+ comp_wait->cmd_status = ERDMA_CMD_STATUS_ISSUED;
+ reinit_completion(&comp_wait->wait_event);
+ comp_wait->sq_pi = cmdq->sq.pi;
+
+ wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth,
+ SQEBB_SHIFT);
+ memcpy(wqe, req, req_len);
+
+ cmdq->sq.pi += cmdq->sq.wqebb_cnt;
+ hdr |= FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi) |
+ FIELD_PREP(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK,
+ comp_wait->ctx_id) |
+ FIELD_PREP(ERDMA_CMD_HDR_WQEBB_CNT_MASK, cmdq->sq.wqebb_cnt - 1);
+ *wqe = cpu_to_le64(hdr);
+
+ kick_cmdq_db(cmdq);
+}
+
+static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
+{
+ struct erdma_comp_wait *comp_wait;
+ u32 hdr0, sqe_idx;
+ __be32 *cqe;
+ u16 ctx_id;
+ u64 *sqe;
+ int i;
+
+ cqe = get_next_valid_cmdq_cqe(cmdq);
+ if (!cqe)
+ return -EAGAIN;
+
+ cmdq->cq.ci++;
+
+ dma_rmb();
+ hdr0 = __be32_to_cpu(*cqe);
+ sqe_idx = __be32_to_cpu(*(cqe + 1));
+
+ sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
+ SQEBB_SHIFT);
+ ctx_id = FIELD_GET(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK, *sqe);
+ comp_wait = &cmdq->wait_pool[ctx_id];
+ if (comp_wait->cmd_status != ERDMA_CMD_STATUS_ISSUED)
+ return -EIO;
+
+ comp_wait->cmd_status = ERDMA_CMD_STATUS_FINISHED;
+ comp_wait->comp_status = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, hdr0);
+ cmdq->sq.ci += cmdq->sq.wqebb_cnt;
+
+ for (i = 0; i < 4; i++)
+ comp_wait->comp_data[i] = __be32_to_cpu(*(cqe + 2 + i));
+
+ if (cmdq->use_event)
+ complete(&comp_wait->wait_event);
+
+ return 0;
+}
+
+static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
+{
+ unsigned long flags;
+ u16 comp_num;
+
+ spin_lock_irqsave(&cmdq->cq.lock, flags);
+
+ /* We must have less than # of max_outstandings
+ * completions at one time.
+ */
+ for (comp_num = 0; comp_num < cmdq->max_outstandings; comp_num++)
+ if (erdma_poll_single_cmd_completion(cmdq))
+ break;
+
+ if (comp_num && cmdq->use_event)
+ arm_cmdq_cq(cmdq);
+
+ spin_unlock_irqrestore(&cmdq->cq.lock, flags);
+}
+
+void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
+{
+ int got_event = 0;
+
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
+ !cmdq->use_event)
+ return;
+
+ while (get_next_valid_eqe(&cmdq->eq)) {
+ cmdq->eq.ci++;
+ got_event++;
+ }
+
+ if (got_event) {
+ cmdq->cq.cmdsn++;
+ erdma_polling_cmd_completions(cmdq);
+ }
+
+ notify_eq(&cmdq->eq);
+}
+
+static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
+ struct erdma_cmdq *cmdq, u32 timeout)
+{
+ unsigned long comp_timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (1) {
+ erdma_polling_cmd_completions(cmdq);
+ if (comp_ctx->cmd_status != ERDMA_CMD_STATUS_ISSUED)
+ break;
+
+ if (time_is_before_jiffies(comp_timeout))
+ return -ETIME;
+
+ msleep(20);
+ }
+
+ return 0;
+}
+
+static int erdma_wait_cmd_completion(struct erdma_comp_wait *comp_ctx,
+ struct erdma_cmdq *cmdq, u32 timeout)
+{
+ unsigned long flags = 0;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+ msecs_to_jiffies(timeout));
+
+ if (unlikely(comp_ctx->cmd_status != ERDMA_CMD_STATUS_FINISHED)) {
+ spin_lock_irqsave(&cmdq->cq.lock, flags);
+ comp_ctx->cmd_status = ERDMA_CMD_STATUS_TIMEOUT;
+ spin_unlock_irqrestore(&cmdq->cq.lock, flags);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
+{
+ *hdr = FIELD_PREP(ERDMA_CMD_HDR_SUB_MOD_MASK, mod) |
+ FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
+}
+
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+ u64 *resp0, u64 *resp1)
+{
+ struct erdma_comp_wait *comp_wait;
+ int ret;
+
+ if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
+ return -ENODEV;
+
+ down(&cmdq->credits);
+
+ comp_wait = get_comp_wait(cmdq);
+ if (IS_ERR(comp_wait)) {
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+ set_bit(ERDMA_CMDQ_STATE_CTX_ERR_BIT, &cmdq->state);
+ up(&cmdq->credits);
+ return PTR_ERR(comp_wait);
+ }
+
+ spin_lock(&cmdq->sq.lock);
+ push_cmdq_sqe(cmdq, req, req_size, comp_wait);
+ spin_unlock(&cmdq->sq.lock);
+
+ if (cmdq->use_event)
+ ret = erdma_wait_cmd_completion(comp_wait, cmdq,
+ ERDMA_CMDQ_TIMEOUT_MS);
+ else
+ ret = erdma_poll_cmd_completion(comp_wait, cmdq,
+ ERDMA_CMDQ_TIMEOUT_MS);
+
+ if (ret) {
+ set_bit(ERDMA_CMDQ_STATE_TIMEOUT_BIT, &cmdq->state);
+ clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
+ goto out;
+ }
+
+ if (comp_wait->comp_status)
+ ret = -EIO;
+
+ if (resp0 && resp1) {
+ *resp0 = *((u64 *)&comp_wait->comp_data[0]);
+ *resp1 = *((u64 *)&comp_wait->comp_data[2]);
+ }
+ put_comp_wait(cmdq, comp_wait);
+
+out:
+ up(&cmdq->credits);
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
new file mode 100644
index 000000000000..751c7f9f0de7
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <rdma/ib_verbs.h>
+
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+static void *get_next_valid_cqe(struct erdma_cq *cq)
+{
+ __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
+ cq->depth, CQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
+ __be32_to_cpu(READ_ONCE(*cqe)));
+
+ return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
+}
+
+static void notify_cq(struct erdma_cq *cq, u8 solcitied)
+{
+ u64 db_data =
+ FIELD_PREP(ERDMA_CQDB_IDX_MASK, (cq->kern_cq.notify_cnt)) |
+ FIELD_PREP(ERDMA_CQDB_CQN_MASK, cq->cqn) |
+ FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
+ FIELD_PREP(ERDMA_CQDB_SOL_MASK, solcitied) |
+ FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
+ FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
+
+ *cq->kern_cq.db_record = db_data;
+ writeq(db_data, cq->kern_cq.db);
+}
+
+int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ unsigned long irq_flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, irq_flags);
+
+ notify_cq(cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && get_next_valid_cqe(cq))
+ ret = 1;
+
+ cq->kern_cq.notify_cnt++;
+
+ spin_unlock_irqrestore(&cq->kern_cq.lock, irq_flags);
+
+ return ret;
+}
+
+static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
+ [ERDMA_OP_WRITE] = IB_WC_RDMA_WRITE,
+ [ERDMA_OP_READ] = IB_WC_RDMA_READ,
+ [ERDMA_OP_SEND] = IB_WC_SEND,
+ [ERDMA_OP_SEND_WITH_IMM] = IB_WC_SEND,
+ [ERDMA_OP_RECEIVE] = IB_WC_RECV,
+ [ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
+ [ERDMA_OP_RECV_INV] = IB_WC_RECV,
+ [ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [ERDMA_OP_INVALIDATE] = IB_WC_LOCAL_INV,
+ [ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
+ [ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
+ [ERDMA_OP_REG_MR] = IB_WC_REG_MR,
+ [ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
+ [ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
+};
+
+static const struct {
+ enum erdma_wc_status erdma;
+ enum ib_wc_status base;
+ enum erdma_vendor_err vendor;
+} map_cqe_status[ERDMA_NUM_WC_STATUS] = {
+ { ERDMA_WC_SUCCESS, IB_WC_SUCCESS, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_GENERAL_ERR, IB_WC_GENERAL_ERR, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_RECV_WQE_FORMAT_ERR, IB_WC_GENERAL_ERR,
+ ERDMA_WC_VENDOR_INVALID_RQE },
+ { ERDMA_WC_RECV_STAG_INVALID_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_INVALID_STAG },
+ { ERDMA_WC_RECV_ADDR_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION },
+ { ERDMA_WC_RECV_RIGHT_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR },
+ { ERDMA_WC_RECV_PDID_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_INVALID_PD },
+ { ERDMA_WC_RECV_WARRPING_ERR, IB_WC_REM_ACCESS_ERR,
+ ERDMA_WC_VENDOR_RQE_WRAP_ERR },
+ { ERDMA_WC_SEND_WQE_FORMAT_ERR, IB_WC_LOC_QP_OP_ERR,
+ ERDMA_WC_VENDOR_INVALID_SQE },
+ { ERDMA_WC_SEND_WQE_ORD_EXCEED, IB_WC_GENERAL_ERR,
+ ERDMA_WC_VENDOR_ZERO_ORD },
+ { ERDMA_WC_SEND_STAG_INVALID_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_INVALID_STAG },
+ { ERDMA_WC_SEND_ADDR_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION },
+ { ERDMA_WC_SEND_RIGHT_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_ACCESS_ERR },
+ { ERDMA_WC_SEND_PDID_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_INVALID_PD },
+ { ERDMA_WC_SEND_WARRPING_ERR, IB_WC_LOC_ACCESS_ERR,
+ ERDMA_WC_VENDOR_SQE_WARP_ERR },
+ { ERDMA_WC_FLUSH_ERR, IB_WC_WR_FLUSH_ERR, ERDMA_WC_VENDOR_NO_ERR },
+ { ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
+};
+
+#define ERDMA_POLLCQ_NO_QP 1
+
+static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
+{
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+ u8 opcode, syndrome, qtype;
+ struct erdma_kqp *kern_qp;
+ struct erdma_cqe *cqe;
+ struct erdma_qp *qp;
+ u16 wqe_idx, depth;
+ u32 qpn, cqe_hdr;
+ u64 *id_table;
+ u64 *wqe_hdr;
+
+ cqe = get_next_valid_cqe(cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ cq->kern_cq.ci++;
+
+ /* cqbuf should be ready when we poll */
+ dma_rmb();
+
+ qpn = be32_to_cpu(cqe->qpn);
+ wqe_idx = be32_to_cpu(cqe->qe_idx);
+ cqe_hdr = be32_to_cpu(cqe->hdr);
+
+ qp = find_qp_by_qpn(dev, qpn);
+ if (!qp)
+ return ERDMA_POLLCQ_NO_QP;
+
+ kern_qp = &qp->kern_qp;
+
+ qtype = FIELD_GET(ERDMA_CQE_HDR_QTYPE_MASK, cqe_hdr);
+ syndrome = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, cqe_hdr);
+ opcode = FIELD_GET(ERDMA_CQE_HDR_OPCODE_MASK, cqe_hdr);
+
+ if (qtype == ERDMA_CQE_QTYPE_SQ) {
+ id_table = kern_qp->swr_tbl;
+ depth = qp->attrs.sq_size;
+ wqe_hdr = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ kern_qp->sq_ci =
+ FIELD_GET(ERDMA_SQE_HDR_WQEBB_CNT_MASK, *wqe_hdr) +
+ wqe_idx + 1;
+ } else {
+ id_table = kern_qp->rwr_tbl;
+ depth = qp->attrs.rq_size;
+ }
+ wc->wr_id = id_table[wqe_idx & (depth - 1)];
+ wc->byte_len = be32_to_cpu(cqe->size);
+
+ wc->wc_flags = 0;
+
+ wc->opcode = wc_mapping_table[opcode];
+ if (opcode == ERDMA_OP_RECV_IMM || opcode == ERDMA_OP_RSP_SEND_IMM) {
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->imm_data));
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ } else if (opcode == ERDMA_OP_RECV_INV) {
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inv_rkey);
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+
+ if (syndrome >= ERDMA_NUM_WC_STATUS)
+ syndrome = ERDMA_WC_GENERAL_ERR;
+
+ wc->status = map_cqe_status[syndrome].base;
+ wc->vendor_err = map_cqe_status[syndrome].vendor;
+ wc->qp = &qp->ibqp;
+
+ return 0;
+}
+
+int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ unsigned long flags;
+ int npolled, ret;
+
+ spin_lock_irqsave(&cq->kern_cq.lock, flags);
+
+ for (npolled = 0; npolled < num_entries;) {
+ ret = erdma_poll_one_cqe(cq, wc + npolled);
+
+ if (ret == -EAGAIN) /* no received new CQEs. */
+ break;
+ else if (ret) /* ignore invalid CQEs. */
+ continue;
+
+ npolled++;
+ }
+
+ spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
+
+ return npolled;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
new file mode 100644
index 000000000000..8f2d094e0227
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "erdma.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+#define MAX_POLL_CHUNK_SIZE 16
+
+void notify_eq(struct erdma_eq *eq)
+{
+ u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
+ FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
+
+ *eq->db_record = db_data;
+ writeq(db_data, eq->db_addr);
+
+ atomic64_inc(&eq->notify_num);
+}
+
+void *get_next_valid_eqe(struct erdma_eq *eq)
+{
+ u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
+ u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
+
+ return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
+}
+
+void erdma_aeq_event_handler(struct erdma_dev *dev)
+{
+ struct erdma_aeqe *aeqe;
+ u32 cqn, qpn;
+ struct erdma_qp *qp;
+ struct erdma_cq *cq;
+ struct ib_event event;
+ u32 poll_cnt = 0;
+
+ memset(&event, 0, sizeof(event));
+
+ while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
+ aeqe = get_next_valid_eqe(&dev->aeq);
+ if (!aeqe)
+ break;
+
+ dma_rmb();
+
+ dev->aeq.ci++;
+ atomic64_inc(&dev->aeq.event_num);
+ poll_cnt++;
+
+ if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
+ le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
+ cqn = le32_to_cpu(aeqe->event_data0);
+ cq = find_cq_by_cqn(dev, cqn);
+ if (!cq)
+ continue;
+
+ event.device = cq->ibcq.device;
+ event.element.cq = &cq->ibcq;
+ event.event = IB_EVENT_CQ_ERR;
+ if (cq->ibcq.event_handler)
+ cq->ibcq.event_handler(&event,
+ cq->ibcq.cq_context);
+ } else {
+ qpn = le32_to_cpu(aeqe->event_data0);
+ qp = find_qp_by_qpn(dev, qpn);
+ if (!qp)
+ continue;
+
+ event.device = qp->ibqp.device;
+ event.element.qp = &qp->ibqp;
+ event.event = IB_EVENT_QP_FATAL;
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&event,
+ qp->ibqp.qp_context);
+ }
+ }
+
+ notify_eq(&dev->aeq);
+}
+
+int erdma_aeq_init(struct erdma_dev *dev)
+{
+ struct erdma_eq *eq = &dev->aeq;
+ u32 buf_size;
+
+ eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
+ buf_size = eq->depth << EQE_SHIFT;
+
+ eq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+ atomic64_set(&eq->notify_num, 0);
+
+ eq->db_addr = (u64 __iomem *)(dev->func_bar + ERDMA_REGS_AEQ_DB_REG);
+ eq->db_record = (u64 *)(eq->qbuf + buf_size);
+
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG,
+ eq->qbuf_dma_addr + buf_size);
+
+ return 0;
+}
+
+void erdma_aeq_destroy(struct erdma_dev *dev)
+{
+ struct erdma_eq *eq = &dev->aeq;
+
+ dma_free_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(eq->depth << EQE_SHIFT), eq->qbuf,
+ eq->qbuf_dma_addr);
+}
+
+void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
+{
+ struct erdma_dev *dev = ceq_cb->dev;
+ struct erdma_cq *cq;
+ u32 poll_cnt = 0;
+ u64 *ceqe;
+ int cqn;
+
+ if (!ceq_cb->ready)
+ return;
+
+ while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
+ ceqe = get_next_valid_eqe(&ceq_cb->eq);
+ if (!ceqe)
+ break;
+
+ dma_rmb();
+ ceq_cb->eq.ci++;
+ poll_cnt++;
+ cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
+
+ cq = find_cq_by_cqn(dev, cqn);
+ if (!cq)
+ continue;
+
+ if (rdma_is_kernel_res(&cq->ibcq.res))
+ cq->kern_cq.cmdsn++;
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ }
+
+ notify_eq(&ceq_cb->eq);
+}
+
+static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
+{
+ struct erdma_eq_cb *ceq_cb = data;
+
+ tasklet_schedule(&ceq_cb->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void erdma_intr_ceq_task(unsigned long data)
+{
+ erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
+}
+
+static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
+ int err;
+
+ snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
+ pci_name(dev->pdev));
+ eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
+
+ tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
+ (unsigned long)&dev->ceqs[ceqn]);
+
+ cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
+ &eqc->irq.affinity_hint_mask);
+
+ err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
+ eqc->irq.name, eqc);
+ if (err) {
+ dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
+ return err;
+ }
+
+ irq_set_affinity_hint(eqc->irq.msix_vector,
+ &eqc->irq.affinity_hint_mask);
+
+ return 0;
+}
+
+static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
+
+ irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
+ free_irq(eqc->irq.msix_vector, eqc);
+}
+
+static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
+{
+ struct erdma_cmdq_create_eq_req req;
+ dma_addr_t db_info_dma_addr;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_CREATE_EQ);
+ req.eqn = eqn;
+ req.depth = ilog2(eq->depth);
+ req.qbuf_addr = eq->qbuf_dma_addr;
+ req.qtype = ERDMA_EQ_TYPE_CEQ;
+ /* Vector index is the same as EQN. */
+ req.vector_idx = eqn;
+ db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT);
+ req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
+ req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req,
+ sizeof(struct erdma_cmdq_create_eq_req),
+ NULL, NULL);
+}
+
+static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
+ u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
+ int ret;
+
+ eq->qbuf =
+ dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
+ &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!eq->qbuf)
+ return -ENOMEM;
+
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+ atomic64_set(&eq->notify_num, 0);
+
+ eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
+ eq->db_addr =
+ (u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
+ (ceqn + 1) * ERDMA_DB_SIZE);
+ eq->db_record = (u64 *)(eq->qbuf + buf_size);
+ eq->ci = 0;
+ dev->ceqs[ceqn].dev = dev;
+
+ /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
+ ret = create_eq_cmd(dev, ceqn + 1, eq);
+ dev->ceqs[ceqn].ready = ret ? false : true;
+
+ return ret;
+}
+
+static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
+{
+ struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
+ u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
+ struct erdma_cmdq_destroy_eq_req req;
+ int err;
+
+ dev->ceqs[ceqn].ready = 0;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_DESTROY_EQ);
+ /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
+ req.eqn = ceqn + 1;
+ req.qtype = ERDMA_EQ_TYPE_CEQ;
+ req.vector_idx = ceqn + 1;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (err)
+ return;
+
+ dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf,
+ eq->qbuf_dma_addr);
+}
+
+int erdma_ceqs_init(struct erdma_dev *dev)
+{
+ u32 i, j;
+ int err;
+
+ for (i = 0; i < dev->attrs.irq_num - 1; i++) {
+ err = erdma_ceq_init_one(dev, i);
+ if (err)
+ goto out_err;
+
+ err = erdma_set_ceq_irq(dev, i);
+ if (err) {
+ erdma_ceq_uninit_one(dev, i);
+ goto out_err;
+ }
+ }
+
+ return 0;
+
+out_err:
+ for (j = 0; j < i; j++) {
+ erdma_free_ceq_irq(dev, j);
+ erdma_ceq_uninit_one(dev, j);
+ }
+
+ return err;
+}
+
+void erdma_ceqs_uninit(struct erdma_dev *dev)
+{
+ u32 i;
+
+ for (i = 0; i < dev->attrs.irq_num - 1; i++) {
+ erdma_free_ceq_irq(dev, i);
+ erdma_ceq_uninit_one(dev, i);
+ }
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
new file mode 100644
index 000000000000..b210c49c669f
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -0,0 +1,508 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_HW_H__
+#define __ERDMA_HW_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/* PCIe device related definition. */
+#define PCI_VENDOR_ID_ALIBABA 0x1ded
+
+#define ERDMA_PCI_WIDTH 64
+#define ERDMA_FUNC_BAR 0
+#define ERDMA_MISX_BAR 2
+
+#define ERDMA_BAR_MASK (BIT(ERDMA_FUNC_BAR) | BIT(ERDMA_MISX_BAR))
+
+/* MSI-X related. */
+#define ERDMA_NUM_MSIX_VEC 32U
+#define ERDMA_MSIX_VECTOR_CMDQ 0
+
+/* PCIe Bar0 Registers. */
+#define ERDMA_REGS_VERSION_REG 0x0
+#define ERDMA_REGS_DEV_CTRL_REG 0x10
+#define ERDMA_REGS_DEV_ST_REG 0x14
+#define ERDMA_REGS_NETDEV_MAC_L_REG 0x18
+#define ERDMA_REGS_NETDEV_MAC_H_REG 0x1C
+#define ERDMA_REGS_CMDQ_SQ_ADDR_L_REG 0x20
+#define ERDMA_REGS_CMDQ_SQ_ADDR_H_REG 0x24
+#define ERDMA_REGS_CMDQ_CQ_ADDR_L_REG 0x28
+#define ERDMA_REGS_CMDQ_CQ_ADDR_H_REG 0x2C
+#define ERDMA_REGS_CMDQ_DEPTH_REG 0x30
+#define ERDMA_REGS_CMDQ_EQ_DEPTH_REG 0x34
+#define ERDMA_REGS_CMDQ_EQ_ADDR_L_REG 0x38
+#define ERDMA_REGS_CMDQ_EQ_ADDR_H_REG 0x3C
+#define ERDMA_REGS_AEQ_ADDR_L_REG 0x40
+#define ERDMA_REGS_AEQ_ADDR_H_REG 0x44
+#define ERDMA_REGS_AEQ_DEPTH_REG 0x48
+#define ERDMA_REGS_GRP_NUM_REG 0x4c
+#define ERDMA_REGS_AEQ_DB_REG 0x50
+#define ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG 0x60
+#define ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG 0x68
+#define ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG 0x70
+#define ERDMA_AEQ_DB_HOST_ADDR_REG 0x78
+#define ERDMA_REGS_STATS_TSO_IN_PKTS_REG 0x80
+#define ERDMA_REGS_STATS_TSO_OUT_PKTS_REG 0x88
+#define ERDMA_REGS_STATS_TSO_OUT_BYTES_REG 0x90
+#define ERDMA_REGS_STATS_TX_DROP_PKTS_REG 0x98
+#define ERDMA_REGS_STATS_TX_BPS_METER_DROP_PKTS_REG 0xa0
+#define ERDMA_REGS_STATS_TX_PPS_METER_DROP_PKTS_REG 0xa8
+#define ERDMA_REGS_STATS_RX_PKTS_REG 0xc0
+#define ERDMA_REGS_STATS_RX_BYTES_REG 0xc8
+#define ERDMA_REGS_STATS_RX_DROP_PKTS_REG 0xd0
+#define ERDMA_REGS_STATS_RX_BPS_METER_DROP_PKTS_REG 0xd8
+#define ERDMA_REGS_STATS_RX_PPS_METER_DROP_PKTS_REG 0xe0
+#define ERDMA_REGS_CEQ_DB_BASE_REG 0x100
+#define ERDMA_CMDQ_SQDB_REG 0x200
+#define ERDMA_CMDQ_CQDB_REG 0x300
+
+/* DEV_CTRL_REG details. */
+#define ERDMA_REG_DEV_CTRL_RESET_MASK 0x00000001
+#define ERDMA_REG_DEV_CTRL_INIT_MASK 0x00000002
+
+/* DEV_ST_REG details. */
+#define ERDMA_REG_DEV_ST_RESET_DONE_MASK 0x00000001U
+#define ERDMA_REG_DEV_ST_INIT_DONE_MASK 0x00000002U
+
+/* eRDMA PCIe DBs definition. */
+#define ERDMA_BAR_DB_SPACE_BASE 4096
+
+#define ERDMA_BAR_SQDB_SPACE_OFFSET ERDMA_BAR_DB_SPACE_BASE
+#define ERDMA_BAR_SQDB_SPACE_SIZE (384 * 1024)
+
+#define ERDMA_BAR_RQDB_SPACE_OFFSET \
+ (ERDMA_BAR_SQDB_SPACE_OFFSET + ERDMA_BAR_SQDB_SPACE_SIZE)
+#define ERDMA_BAR_RQDB_SPACE_SIZE (96 * 1024)
+
+#define ERDMA_BAR_CQDB_SPACE_OFFSET \
+ (ERDMA_BAR_RQDB_SPACE_OFFSET + ERDMA_BAR_RQDB_SPACE_SIZE)
+
+/* Doorbell page resources related. */
+/*
+ * Max # of parallelly issued directSQE is 3072 per device,
+ * hardware organizes this into 24 group, per group has 128 credits.
+ */
+#define ERDMA_DWQE_MAX_GRP_CNT 24
+#define ERDMA_DWQE_NUM_PER_GRP 128
+
+#define ERDMA_DWQE_TYPE0_CNT 64
+#define ERDMA_DWQE_TYPE1_CNT 496
+/* type1 DB contains 2 DBs, takes 256Byte. */
+#define ERDMA_DWQE_TYPE1_CNT_PER_PAGE 16
+
+#define ERDMA_SDB_SHARED_PAGE_INDEX 95
+
+/* Doorbell related. */
+#define ERDMA_DB_SIZE 8
+
+#define ERDMA_CQDB_IDX_MASK GENMASK_ULL(63, 56)
+#define ERDMA_CQDB_CQN_MASK GENMASK_ULL(55, 32)
+#define ERDMA_CQDB_ARM_MASK BIT_ULL(31)
+#define ERDMA_CQDB_SOL_MASK BIT_ULL(30)
+#define ERDMA_CQDB_CMDSN_MASK GENMASK_ULL(29, 28)
+#define ERDMA_CQDB_CI_MASK GENMASK_ULL(23, 0)
+
+#define ERDMA_EQDB_ARM_MASK BIT(31)
+#define ERDMA_EQDB_CI_MASK GENMASK_ULL(23, 0)
+
+#define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000
+
+/* WQE related. */
+#define EQE_SIZE 16
+#define EQE_SHIFT 4
+#define RQE_SIZE 32
+#define RQE_SHIFT 5
+#define CQE_SIZE 32
+#define CQE_SHIFT 5
+#define SQEBB_SIZE 32
+#define SQEBB_SHIFT 5
+#define SQEBB_MASK (~(SQEBB_SIZE - 1))
+#define SQEBB_ALIGN(size) ((size + SQEBB_SIZE - 1) & SQEBB_MASK)
+#define SQEBB_COUNT(size) (SQEBB_ALIGN(size) >> SQEBB_SHIFT)
+
+#define ERDMA_MAX_SQE_SIZE 128
+#define ERDMA_MAX_WQEBB_PER_SQE 4
+
+/* CMDQ related. */
+#define ERDMA_CMDQ_MAX_OUTSTANDING 128
+#define ERDMA_CMDQ_SQE_SIZE 64
+
+/* cmdq sub module definition. */
+enum CMDQ_WQE_SUB_MOD {
+ CMDQ_SUBMOD_RDMA = 0,
+ CMDQ_SUBMOD_COMMON = 1
+};
+
+enum CMDQ_RDMA_OPCODE {
+ CMDQ_OPCODE_QUERY_DEVICE = 0,
+ CMDQ_OPCODE_CREATE_QP = 1,
+ CMDQ_OPCODE_DESTROY_QP = 2,
+ CMDQ_OPCODE_MODIFY_QP = 3,
+ CMDQ_OPCODE_CREATE_CQ = 4,
+ CMDQ_OPCODE_DESTROY_CQ = 5,
+ CMDQ_OPCODE_REG_MR = 8,
+ CMDQ_OPCODE_DEREG_MR = 9
+};
+
+enum CMDQ_COMMON_OPCODE {
+ CMDQ_OPCODE_CREATE_EQ = 0,
+ CMDQ_OPCODE_DESTROY_EQ = 1,
+ CMDQ_OPCODE_QUERY_FW_INFO = 2,
+};
+
+/* cmdq-SQE HDR */
+#define ERDMA_CMD_HDR_WQEBB_CNT_MASK GENMASK_ULL(54, 52)
+#define ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK GENMASK_ULL(47, 32)
+#define ERDMA_CMD_HDR_SUB_MOD_MASK GENMASK_ULL(25, 24)
+#define ERDMA_CMD_HDR_OPCODE_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0)
+
+struct erdma_cmdq_destroy_cq_req {
+ u64 hdr;
+ u32 cqn;
+};
+
+#define ERDMA_EQ_TYPE_AEQ 0
+#define ERDMA_EQ_TYPE_CEQ 1
+
+struct erdma_cmdq_create_eq_req {
+ u64 hdr;
+ u64 qbuf_addr;
+ u8 vector_idx;
+ u8 eqn;
+ u8 depth;
+ u8 qtype;
+ u32 db_dma_addr_l;
+ u32 db_dma_addr_h;
+};
+
+struct erdma_cmdq_destroy_eq_req {
+ u64 hdr;
+ u64 rsvd0;
+ u8 vector_idx;
+ u8 eqn;
+ u8 rsvd1;
+ u8 qtype;
+};
+
+/* create_cq cfg0 */
+#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
+#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
+#define ERDMA_CMD_CREATE_CQ_CQN_MASK GENMASK(19, 0)
+
+/* create_cq cfg1 */
+#define ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK GENMASK(31, 16)
+#define ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK BIT(15)
+#define ERDMA_CMD_CREATE_CQ_EQN_MASK GENMASK(9, 0)
+
+struct erdma_cmdq_create_cq_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 qbuf_addr_l;
+ u32 qbuf_addr_h;
+ u32 cfg1;
+ u64 cq_db_info_addr;
+ u32 first_page_offset;
+};
+
+/* regmr/deregmr cfg0 */
+#define ERDMA_CMD_MR_VALID_MASK BIT(31)
+#define ERDMA_CMD_MR_KEY_MASK GENMASK(27, 20)
+#define ERDMA_CMD_MR_MPT_IDX_MASK GENMASK(19, 0)
+
+/* regmr cfg1 */
+#define ERDMA_CMD_REGMR_PD_MASK GENMASK(31, 12)
+#define ERDMA_CMD_REGMR_TYPE_MASK GENMASK(7, 6)
+#define ERDMA_CMD_REGMR_RIGHT_MASK GENMASK(5, 2)
+#define ERDMA_CMD_REGMR_ACC_MODE_MASK GENMASK(1, 0)
+
+/* regmr cfg2 */
+#define ERDMA_CMD_REGMR_PAGESIZE_MASK GENMASK(31, 27)
+#define ERDMA_CMD_REGMR_MTT_TYPE_MASK GENMASK(21, 20)
+#define ERDMA_CMD_REGMR_MTT_CNT_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_reg_mr_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u64 start_va;
+ u32 size;
+ u32 cfg2;
+ u64 phy_addr[4];
+};
+
+struct erdma_cmdq_dereg_mr_req {
+ u64 hdr;
+ u32 cfg;
+};
+
+/* modify qp cfg */
+#define ERDMA_CMD_MODIFY_QP_STATE_MASK GENMASK(31, 24)
+#define ERDMA_CMD_MODIFY_QP_CC_MASK GENMASK(23, 20)
+#define ERDMA_CMD_MODIFY_QP_QPN_MASK GENMASK(19, 0)
+
+struct erdma_cmdq_modify_qp_req {
+ u64 hdr;
+ u32 cfg;
+ u32 cookie;
+ __be32 dip;
+ __be32 sip;
+ __be16 sport;
+ __be16 dport;
+ u32 send_nxt;
+ u32 recv_nxt;
+};
+
+/* create qp cfg0 */
+#define ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK GENMASK(31, 20)
+#define ERDMA_CMD_CREATE_QP_QPN_MASK GENMASK(19, 0)
+
+/* create qp cfg1 */
+#define ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK GENMASK(31, 20)
+#define ERDMA_CMD_CREATE_QP_PD_MASK GENMASK(19, 0)
+
+/* create qp cqn_mtt_cfg */
+#define ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK GENMASK(31, 28)
+#define ERDMA_CMD_CREATE_QP_CQN_MASK GENMASK(23, 0)
+
+/* create qp mtt_cfg */
+#define ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK GENMASK(31, 12)
+#define ERDMA_CMD_CREATE_QP_MTT_CNT_MASK GENMASK(11, 1)
+#define ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK BIT(0)
+
+#define ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK GENMASK_ULL(31, 0)
+
+struct erdma_cmdq_create_qp_req {
+ u64 hdr;
+ u32 cfg0;
+ u32 cfg1;
+ u32 sq_cqn_mtt_cfg;
+ u32 rq_cqn_mtt_cfg;
+ u64 sq_buf_addr;
+ u64 rq_buf_addr;
+ u32 sq_mtt_cfg;
+ u32 rq_mtt_cfg;
+ u64 sq_db_info_dma_addr;
+ u64 rq_db_info_dma_addr;
+};
+
+struct erdma_cmdq_destroy_qp_req {
+ u64 hdr;
+ u32 qpn;
+};
+
+/* cap qword 0 definition */
+#define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
+#define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0)
+
+/* cap qword 1 definition */
+#define ERDMA_CMD_DEV_CAP_DMA_LOCAL_KEY_MASK GENMASK_ULL(63, 32)
+#define ERDMA_CMD_DEV_CAP_DEFAULT_CC_MASK GENMASK_ULL(31, 28)
+#define ERDMA_CMD_DEV_CAP_QBLOCK_MASK GENMASK_ULL(27, 16)
+#define ERDMA_CMD_DEV_CAP_MAX_MW_MASK GENMASK_ULL(7, 0)
+
+#define ERDMA_NQP_PER_QBLOCK 1024
+
+#define ERDMA_CMD_INFO0_FW_VER_MASK GENMASK_ULL(31, 0)
+
+/* CQE hdr */
+#define ERDMA_CQE_HDR_OWNER_MASK BIT(31)
+#define ERDMA_CQE_HDR_OPCODE_MASK GENMASK(23, 16)
+#define ERDMA_CQE_HDR_QTYPE_MASK GENMASK(15, 8)
+#define ERDMA_CQE_HDR_SYNDROME_MASK GENMASK(7, 0)
+
+#define ERDMA_CQE_QTYPE_SQ 0
+#define ERDMA_CQE_QTYPE_RQ 1
+#define ERDMA_CQE_QTYPE_CMDQ 2
+
+struct erdma_cqe {
+ __be32 hdr;
+ __be32 qe_idx;
+ __be32 qpn;
+ union {
+ __le32 imm_data;
+ __be32 inv_rkey;
+ };
+ __be32 size;
+ __be32 rsvd[3];
+};
+
+struct erdma_sge {
+ __aligned_le64 laddr;
+ __le32 length;
+ __le32 lkey;
+};
+
+/* Receive Queue Element */
+struct erdma_rqe {
+ __le16 qe_idx;
+ __le16 rsvd0;
+ __le32 qpn;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ __le64 to;
+ __le32 length;
+ __le32 stag;
+};
+
+/* SQE */
+#define ERDMA_SQE_HDR_SGL_LEN_MASK GENMASK_ULL(63, 56)
+#define ERDMA_SQE_HDR_WQEBB_CNT_MASK GENMASK_ULL(54, 52)
+#define ERDMA_SQE_HDR_QPN_MASK GENMASK_ULL(51, 32)
+#define ERDMA_SQE_HDR_OPCODE_MASK GENMASK_ULL(31, 27)
+#define ERDMA_SQE_HDR_DWQE_MASK BIT_ULL(26)
+#define ERDMA_SQE_HDR_INLINE_MASK BIT_ULL(25)
+#define ERDMA_SQE_HDR_FENCE_MASK BIT_ULL(24)
+#define ERDMA_SQE_HDR_SE_MASK BIT_ULL(23)
+#define ERDMA_SQE_HDR_CE_MASK BIT_ULL(22)
+#define ERDMA_SQE_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0)
+
+/* REG MR attrs */
+#define ERDMA_SQE_MR_MODE_MASK GENMASK(1, 0)
+#define ERDMA_SQE_MR_ACCESS_MASK GENMASK(5, 2)
+#define ERDMA_SQE_MR_MTT_TYPE_MASK GENMASK(7, 6)
+#define ERDMA_SQE_MR_MTT_CNT_MASK GENMASK(31, 12)
+
+struct erdma_write_sqe {
+ __le64 hdr;
+ __be32 imm_data;
+ __le32 length;
+
+ __le32 sink_stag;
+ __le32 sink_to_l;
+ __le32 sink_to_h;
+
+ __le32 rsvd;
+
+ struct erdma_sge sgl[0];
+};
+
+struct erdma_send_sqe {
+ __le64 hdr;
+ union {
+ __be32 imm_data;
+ __le32 invalid_stag;
+ };
+
+ __le32 length;
+ struct erdma_sge sgl[0];
+};
+
+struct erdma_readreq_sqe {
+ __le64 hdr;
+ __le32 invalid_stag;
+ __le32 length;
+ __le32 sink_stag;
+ __le32 sink_to_l;
+ __le32 sink_to_h;
+ __le32 rsvd;
+};
+
+struct erdma_reg_mr_sqe {
+ __le64 hdr;
+ __le64 addr;
+ __le32 length;
+ __le32 stag;
+ __le32 attrs;
+ __le32 rsvd;
+};
+
+/* EQ related. */
+#define ERDMA_DEFAULT_EQ_DEPTH 256
+
+/* ceqe */
+#define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)
+#define ERDMA_CEQE_HDR_PI_MASK GENMASK_ULL(55, 32)
+#define ERDMA_CEQE_HDR_O_MASK BIT_ULL(31)
+#define ERDMA_CEQE_HDR_CQN_MASK GENMASK_ULL(19, 0)
+
+/* aeqe */
+#define ERDMA_AEQE_HDR_O_MASK BIT(31)
+#define ERDMA_AEQE_HDR_TYPE_MASK GENMASK(23, 16)
+#define ERDMA_AEQE_HDR_SUBTYPE_MASK GENMASK(7, 0)
+
+#define ERDMA_AE_TYPE_QP_FATAL_EVENT 0
+#define ERDMA_AE_TYPE_QP_ERQ_ERR_EVENT 1
+#define ERDMA_AE_TYPE_ACC_ERR_EVENT 2
+#define ERDMA_AE_TYPE_CQ_ERR 3
+#define ERDMA_AE_TYPE_OTHER_ERROR 4
+
+struct erdma_aeqe {
+ __le32 hdr;
+ __le32 event_data0;
+ __le32 event_data1;
+ __le32 rsvd;
+};
+
+enum erdma_opcode {
+ ERDMA_OP_WRITE = 0,
+ ERDMA_OP_READ = 1,
+ ERDMA_OP_SEND = 2,
+ ERDMA_OP_SEND_WITH_IMM = 3,
+
+ ERDMA_OP_RECEIVE = 4,
+ ERDMA_OP_RECV_IMM = 5,
+ ERDMA_OP_RECV_INV = 6,
+
+ ERDMA_OP_REQ_ERR = 7,
+ ERDMA_OP_READ_RESPONSE = 8,
+ ERDMA_OP_WRITE_WITH_IMM = 9,
+
+ ERDMA_OP_RECV_ERR = 10,
+
+ ERDMA_OP_INVALIDATE = 11,
+ ERDMA_OP_RSP_SEND_IMM = 12,
+ ERDMA_OP_SEND_WITH_INV = 13,
+
+ ERDMA_OP_REG_MR = 14,
+ ERDMA_OP_LOCAL_INV = 15,
+ ERDMA_OP_READ_WITH_INV = 16,
+ ERDMA_NUM_OPCODES = 17,
+ ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1
+};
+
+enum erdma_wc_status {
+ ERDMA_WC_SUCCESS = 0,
+ ERDMA_WC_GENERAL_ERR = 1,
+ ERDMA_WC_RECV_WQE_FORMAT_ERR = 2,
+ ERDMA_WC_RECV_STAG_INVALID_ERR = 3,
+ ERDMA_WC_RECV_ADDR_VIOLATION_ERR = 4,
+ ERDMA_WC_RECV_RIGHT_VIOLATION_ERR = 5,
+ ERDMA_WC_RECV_PDID_ERR = 6,
+ ERDMA_WC_RECV_WARRPING_ERR = 7,
+ ERDMA_WC_SEND_WQE_FORMAT_ERR = 8,
+ ERDMA_WC_SEND_WQE_ORD_EXCEED = 9,
+ ERDMA_WC_SEND_STAG_INVALID_ERR = 10,
+ ERDMA_WC_SEND_ADDR_VIOLATION_ERR = 11,
+ ERDMA_WC_SEND_RIGHT_VIOLATION_ERR = 12,
+ ERDMA_WC_SEND_PDID_ERR = 13,
+ ERDMA_WC_SEND_WARRPING_ERR = 14,
+ ERDMA_WC_FLUSH_ERR = 15,
+ ERDMA_WC_RETRY_EXC_ERR = 16,
+ ERDMA_NUM_WC_STATUS
+};
+
+enum erdma_vendor_err {
+ ERDMA_WC_VENDOR_NO_ERR = 0,
+ ERDMA_WC_VENDOR_INVALID_RQE = 1,
+ ERDMA_WC_VENDOR_RQE_INVALID_STAG = 2,
+ ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION = 3,
+ ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR = 4,
+ ERDMA_WC_VENDOR_RQE_INVALID_PD = 5,
+ ERDMA_WC_VENDOR_RQE_WRAP_ERR = 6,
+ ERDMA_WC_VENDOR_INVALID_SQE = 0x20,
+ ERDMA_WC_VENDOR_ZERO_ORD = 0x21,
+ ERDMA_WC_VENDOR_SQE_INVALID_STAG = 0x30,
+ ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION = 0x31,
+ ERDMA_WC_VENDOR_SQE_ACCESS_ERR = 0x32,
+ ERDMA_WC_VENDOR_SQE_INVALID_PD = 0x33,
+ ERDMA_WC_VENDOR_SQE_WARP_ERR = 0x34
+};
+
+#endif
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
new file mode 100644
index 000000000000..07e743d24847
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -0,0 +1,608 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <net/addrconf.h>
+#include <rdma/erdma-abi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+MODULE_AUTHOR("Cheng Xu <chengyou@linux.alibaba.com>");
+MODULE_DESCRIPTION("Alibaba elasticRDMA adapter driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
+ void *arg)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(arg);
+ struct erdma_dev *dev = container_of(nb, struct erdma_dev, netdev_nb);
+
+ if (dev->netdev == NULL || dev->netdev != netdev)
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ dev->state = IB_PORT_ACTIVE;
+ erdma_port_event(dev, IB_EVENT_PORT_ACTIVE);
+ break;
+ case NETDEV_DOWN:
+ dev->state = IB_PORT_DOWN;
+ erdma_port_event(dev, IB_EVENT_PORT_ERR);
+ break;
+ case NETDEV_REGISTER:
+ case NETDEV_UNREGISTER:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGEMTU:
+ case NETDEV_GOING_DOWN:
+ case NETDEV_CHANGE:
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_OK;
+}
+
+static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
+{
+ struct net_device *netdev;
+ int ret = -ENODEV;
+
+ /* Already binded to a net_device, so we skip. */
+ if (dev->netdev)
+ return 0;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, netdev) {
+ /*
+ * In erdma, the paired netdev and ibdev should have the same
+ * MAC address. erdma can get the value from its PCIe bar
+ * registers. Since erdma can not get the paired netdev
+ * reference directly, we do a traverse here to get the paired
+ * netdev.
+ */
+ if (ether_addr_equal_unaligned(netdev->perm_addr,
+ dev->attrs.peer_addr)) {
+ ret = ib_device_set_netdev(&dev->ibdev, netdev, 1);
+ if (ret) {
+ rtnl_unlock();
+ ibdev_warn(&dev->ibdev,
+ "failed (%d) to link netdev", ret);
+ return ret;
+ }
+
+ dev->netdev = netdev;
+ break;
+ }
+ }
+
+ rtnl_unlock();
+
+ return ret;
+}
+
+static int erdma_device_register(struct erdma_dev *dev)
+{
+ struct ib_device *ibdev = &dev->ibdev;
+ int ret;
+
+ ret = erdma_enum_and_get_netdev(dev);
+ if (ret)
+ return ret;
+
+ addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr);
+
+ ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev);
+ if (ret) {
+ dev_err(&dev->pdev->dev,
+ "ib_register_device failed: ret = %d\n", ret);
+ return ret;
+ }
+
+ dev->netdev_nb.notifier_call = erdma_netdev_event;
+ ret = register_netdevice_notifier(&dev->netdev_nb);
+ if (ret) {
+ ibdev_err(&dev->ibdev, "failed to register notifier.\n");
+ ib_unregister_device(ibdev);
+ }
+
+ return ret;
+}
+
+static irqreturn_t erdma_comm_irq_handler(int irq, void *data)
+{
+ struct erdma_dev *dev = data;
+
+ erdma_cmdq_completion_handler(&dev->cmdq);
+ erdma_aeq_event_handler(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void erdma_dwqe_resource_init(struct erdma_dev *dev)
+{
+ int total_pages, type0, type1;
+
+ dev->attrs.grp_num = erdma_reg_read32(dev, ERDMA_REGS_GRP_NUM_REG);
+
+ if (dev->attrs.grp_num < 4)
+ dev->attrs.disable_dwqe = true;
+ else
+ dev->attrs.disable_dwqe = false;
+
+ /* One page contains 4 goups. */
+ total_pages = dev->attrs.grp_num * 4;
+
+ if (dev->attrs.grp_num >= ERDMA_DWQE_MAX_GRP_CNT) {
+ dev->attrs.grp_num = ERDMA_DWQE_MAX_GRP_CNT;
+ type0 = ERDMA_DWQE_TYPE0_CNT;
+ type1 = ERDMA_DWQE_TYPE1_CNT / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+ } else {
+ type1 = total_pages / 3;
+ type0 = total_pages - type1 - 1;
+ }
+
+ dev->attrs.dwqe_pages = type0;
+ dev->attrs.dwqe_entries = type1 * ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+}
+
+static int erdma_request_vectors(struct erdma_dev *dev)
+{
+ int expect_irq_num = min(num_possible_cpus() + 1, ERDMA_NUM_MSIX_VEC);
+ int ret;
+
+ ret = pci_alloc_irq_vectors(dev->pdev, 1, expect_irq_num, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "request irq vectors failed(%d)\n",
+ ret);
+ return ret;
+ }
+ dev->attrs.irq_num = ret;
+
+ return 0;
+}
+
+static int erdma_comm_irq_init(struct erdma_dev *dev)
+{
+ snprintf(dev->comm_irq.name, ERDMA_IRQNAME_SIZE, "erdma-common@pci:%s",
+ pci_name(dev->pdev));
+ dev->comm_irq.msix_vector =
+ pci_irq_vector(dev->pdev, ERDMA_MSIX_VECTOR_CMDQ);
+
+ cpumask_set_cpu(cpumask_first(cpumask_of_pcibus(dev->pdev->bus)),
+ &dev->comm_irq.affinity_hint_mask);
+ irq_set_affinity_hint(dev->comm_irq.msix_vector,
+ &dev->comm_irq.affinity_hint_mask);
+
+ return request_irq(dev->comm_irq.msix_vector, erdma_comm_irq_handler, 0,
+ dev->comm_irq.name, dev);
+}
+
+static void erdma_comm_irq_uninit(struct erdma_dev *dev)
+{
+ irq_set_affinity_hint(dev->comm_irq.msix_vector, NULL);
+ free_irq(dev->comm_irq.msix_vector, dev);
+}
+
+static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
+{
+ int ret;
+
+ erdma_dwqe_resource_init(dev);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(ERDMA_PCI_WIDTH));
+ if (ret)
+ return ret;
+
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
+ return 0;
+}
+
+static void erdma_device_uninit(struct erdma_dev *dev)
+{
+ u32 ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_RESET_MASK, 1);
+
+ erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
+}
+
+static const struct pci_device_id erdma_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ALIBABA, 0x107f) },
+ {}
+};
+
+static int erdma_probe_dev(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev;
+ int bars, err;
+ u32 version;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed(%d)\n", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ dev = ib_alloc_device(erdma_dev, ibdev);
+ if (!dev) {
+ dev_err(&pdev->dev, "ib_alloc_device failed\n");
+ err = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ dev->pdev = pdev;
+ dev->attrs.numa_node = dev_to_node(&pdev->dev);
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
+ if (bars != ERDMA_BAR_MASK || err) {
+ err = err ? err : -EINVAL;
+ goto err_ib_device_release;
+ }
+
+ dev->func_bar_addr = pci_resource_start(pdev, ERDMA_FUNC_BAR);
+ dev->func_bar_len = pci_resource_len(pdev, ERDMA_FUNC_BAR);
+
+ dev->func_bar =
+ devm_ioremap(&pdev->dev, dev->func_bar_addr, dev->func_bar_len);
+ if (!dev->func_bar) {
+ dev_err(&pdev->dev, "devm_ioremap failed.\n");
+ err = -EFAULT;
+ goto err_release_bars;
+ }
+
+ version = erdma_reg_read32(dev, ERDMA_REGS_VERSION_REG);
+ if (version == 0) {
+ /* we knows that it is a non-functional function. */
+ err = -ENODEV;
+ goto err_iounmap_func_bar;
+ }
+
+ err = erdma_device_init(dev, pdev);
+ if (err)
+ goto err_iounmap_func_bar;
+
+ err = erdma_request_vectors(dev);
+ if (err)
+ goto err_iounmap_func_bar;
+
+ err = erdma_comm_irq_init(dev);
+ if (err)
+ goto err_free_vectors;
+
+ err = erdma_aeq_init(dev);
+ if (err)
+ goto err_uninit_comm_irq;
+
+ err = erdma_cmdq_init(dev);
+ if (err)
+ goto err_uninit_aeq;
+
+ err = erdma_ceqs_init(dev);
+ if (err)
+ goto err_uninit_cmdq;
+
+ erdma_finish_cmdq_init(dev);
+
+ return 0;
+
+err_uninit_cmdq:
+ erdma_device_uninit(dev);
+ erdma_cmdq_destroy(dev);
+
+err_uninit_aeq:
+ erdma_aeq_destroy(dev);
+
+err_uninit_comm_irq:
+ erdma_comm_irq_uninit(dev);
+
+err_free_vectors:
+ pci_free_irq_vectors(dev->pdev);
+
+err_iounmap_func_bar:
+ devm_iounmap(&pdev->dev, dev->func_bar);
+
+err_release_bars:
+ pci_release_selected_regions(pdev, bars);
+
+err_ib_device_release:
+ ib_dealloc_device(&dev->ibdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void erdma_remove_dev(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+
+ erdma_ceqs_uninit(dev);
+
+ erdma_device_uninit(dev);
+
+ erdma_cmdq_destroy(dev);
+ erdma_aeq_destroy(dev);
+ erdma_comm_irq_uninit(dev);
+ pci_free_irq_vectors(dev->pdev);
+
+ devm_iounmap(&pdev->dev, dev->func_bar);
+ pci_release_selected_regions(pdev, ERDMA_BAR_MASK);
+
+ ib_dealloc_device(&dev->ibdev);
+
+ pci_disable_device(pdev);
+}
+
+#define ERDMA_GET_CAP(name, cap) FIELD_GET(ERDMA_CMD_DEV_CAP_##name##_MASK, cap)
+
+static int erdma_dev_attrs_init(struct erdma_dev *dev)
+{
+ int err;
+ u64 req_hdr, cap0, cap1;
+
+ erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_QUERY_DEVICE);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
+ &cap1);
+ if (err)
+ return err;
+
+ dev->attrs.max_cqe = 1 << ERDMA_GET_CAP(MAX_CQE, cap0);
+ dev->attrs.max_mr_size = 1ULL << ERDMA_GET_CAP(MAX_MR_SIZE, cap0);
+ dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
+ dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
+ dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
+ dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
+ dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
+ dev->attrs.max_mr = dev->attrs.max_qp << 1;
+ dev->attrs.max_cq = dev->attrs.max_qp << 1;
+
+ dev->attrs.max_send_wr = ERDMA_MAX_SEND_WR;
+ dev->attrs.max_ord = ERDMA_MAX_ORD;
+ dev->attrs.max_ird = ERDMA_MAX_IRD;
+ dev->attrs.max_send_sge = ERDMA_MAX_SEND_SGE;
+ dev->attrs.max_recv_sge = ERDMA_MAX_RECV_SGE;
+ dev->attrs.max_sge_rd = ERDMA_MAX_SGE_RD;
+ dev->attrs.max_pd = ERDMA_MAX_PD;
+
+ dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
+ dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
+
+ erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_QUERY_FW_INFO);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
+ &cap1);
+ if (!err)
+ dev->attrs.fw_version =
+ FIELD_GET(ERDMA_CMD_INFO0_FW_VER_MASK, cap0);
+
+ return err;
+}
+
+static int erdma_res_cb_init(struct erdma_dev *dev)
+{
+ int i, j;
+
+ for (i = 0; i < ERDMA_RES_CNT; i++) {
+ dev->res_cb[i].next_alloc_idx = 1;
+ spin_lock_init(&dev->res_cb[i].lock);
+ dev->res_cb[i].bitmap =
+ bitmap_zalloc(dev->res_cb[i].max_cap, GFP_KERNEL);
+ if (!dev->res_cb[i].bitmap)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ bitmap_free(dev->res_cb[j].bitmap);
+
+ return -ENOMEM;
+}
+
+static void erdma_res_cb_free(struct erdma_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ERDMA_RES_CNT; i++)
+ bitmap_free(dev->res_cb[i].bitmap);
+}
+
+static const struct ib_device_ops erdma_device_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_ERDMA,
+ .uverbs_abi_ver = ERDMA_ABI_VERSION,
+
+ .alloc_mr = erdma_ib_alloc_mr,
+ .alloc_pd = erdma_alloc_pd,
+ .alloc_ucontext = erdma_alloc_ucontext,
+ .create_cq = erdma_create_cq,
+ .create_qp = erdma_create_qp,
+ .dealloc_pd = erdma_dealloc_pd,
+ .dealloc_ucontext = erdma_dealloc_ucontext,
+ .dereg_mr = erdma_dereg_mr,
+ .destroy_cq = erdma_destroy_cq,
+ .destroy_qp = erdma_destroy_qp,
+ .get_dma_mr = erdma_get_dma_mr,
+ .get_port_immutable = erdma_get_port_immutable,
+ .iw_accept = erdma_accept,
+ .iw_add_ref = erdma_qp_get_ref,
+ .iw_connect = erdma_connect,
+ .iw_create_listen = erdma_create_listen,
+ .iw_destroy_listen = erdma_destroy_listen,
+ .iw_get_qp = erdma_get_ibqp,
+ .iw_reject = erdma_reject,
+ .iw_rem_ref = erdma_qp_put_ref,
+ .map_mr_sg = erdma_map_mr_sg,
+ .mmap = erdma_mmap,
+ .mmap_free = erdma_mmap_free,
+ .modify_qp = erdma_modify_qp,
+ .post_recv = erdma_post_recv,
+ .post_send = erdma_post_send,
+ .poll_cq = erdma_poll_cq,
+ .query_device = erdma_query_device,
+ .query_gid = erdma_query_gid,
+ .query_port = erdma_query_port,
+ .query_qp = erdma_query_qp,
+ .req_notify_cq = erdma_req_notify_cq,
+ .reg_user_mr = erdma_reg_user_mr,
+
+ INIT_RDMA_OBJ_SIZE(ib_cq, erdma_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, erdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, erdma_ucontext, ibucontext),
+ INIT_RDMA_OBJ_SIZE(ib_qp, erdma_qp, ibqp),
+};
+
+static int erdma_ib_device_add(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+ struct ib_device *ibdev = &dev->ibdev;
+ u64 mac;
+ int ret;
+
+ ret = erdma_dev_attrs_init(dev);
+ if (ret)
+ return ret;
+
+ ibdev->node_type = RDMA_NODE_RNIC;
+ memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
+
+ /*
+ * Current model (one-to-one device association):
+ * One ERDMA device per net_device or, equivalently,
+ * per physical port.
+ */
+ ibdev->phys_port_cnt = 1;
+ ibdev->num_comp_vectors = dev->attrs.irq_num - 1;
+
+ ib_set_device_ops(ibdev, &erdma_device_ops);
+
+ INIT_LIST_HEAD(&dev->cep_list);
+
+ spin_lock_init(&dev->lock);
+ xa_init_flags(&dev->qp_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&dev->cq_xa, XA_FLAGS_ALLOC1);
+ dev->next_alloc_cqn = 1;
+ dev->next_alloc_qpn = 1;
+
+ ret = erdma_res_cb_init(dev);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&dev->db_bitmap_lock);
+ bitmap_zero(dev->sdb_page, ERDMA_DWQE_TYPE0_CNT);
+ bitmap_zero(dev->sdb_entry, ERDMA_DWQE_TYPE1_CNT);
+
+ atomic_set(&dev->num_ctx, 0);
+
+ mac = erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_L_REG);
+ mac |= (u64)erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_H_REG) << 32;
+
+ u64_to_ether_addr(mac, dev->attrs.peer_addr);
+
+ ret = erdma_device_register(dev);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ xa_destroy(&dev->qp_xa);
+ xa_destroy(&dev->cq_xa);
+
+ erdma_res_cb_free(dev);
+
+ return ret;
+}
+
+static void erdma_ib_device_remove(struct pci_dev *pdev)
+{
+ struct erdma_dev *dev = pci_get_drvdata(pdev);
+
+ unregister_netdevice_notifier(&dev->netdev_nb);
+ ib_unregister_device(&dev->ibdev);
+
+ erdma_res_cb_free(dev);
+ xa_destroy(&dev->qp_xa);
+ xa_destroy(&dev->cq_xa);
+}
+
+static int erdma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = erdma_probe_dev(pdev);
+ if (ret)
+ return ret;
+
+ ret = erdma_ib_device_add(pdev);
+ if (ret) {
+ erdma_remove_dev(pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void erdma_remove(struct pci_dev *pdev)
+{
+ erdma_ib_device_remove(pdev);
+ erdma_remove_dev(pdev);
+}
+
+static struct pci_driver erdma_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = erdma_pci_tbl,
+ .probe = erdma_probe,
+ .remove = erdma_remove
+};
+
+MODULE_DEVICE_TABLE(pci, erdma_pci_tbl);
+
+static __init int erdma_init_module(void)
+{
+ int ret;
+
+ ret = erdma_cm_init();
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&erdma_pci_driver);
+ if (ret)
+ erdma_cm_exit();
+
+ return ret;
+}
+
+static void __exit erdma_exit_module(void)
+{
+ pci_unregister_driver(&erdma_pci_driver);
+
+ erdma_cm_exit();
+}
+
+module_init(erdma_init_module);
+module_exit(erdma_exit_module);
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
new file mode 100644
index 000000000000..bc3ec22a62c5
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2021, Alibaba Group */
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_verbs.h"
+
+void erdma_qp_llp_close(struct erdma_qp *qp)
+{
+ struct erdma_qp_attrs qp_attrs;
+
+ down_write(&qp->state_lock);
+
+ switch (qp->attrs.state) {
+ case ERDMA_QP_STATE_RTS:
+ case ERDMA_QP_STATE_RTR:
+ case ERDMA_QP_STATE_IDLE:
+ case ERDMA_QP_STATE_TERMINATE:
+ qp_attrs.state = ERDMA_QP_STATE_CLOSING;
+ erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ break;
+ case ERDMA_QP_STATE_CLOSING:
+ qp->attrs.state = ERDMA_QP_STATE_IDLE;
+ break;
+ default:
+ break;
+ }
+
+ if (qp->cep) {
+ erdma_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+
+ up_write(&qp->state_lock);
+}
+
+struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
+{
+ struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id);
+
+ if (qp)
+ return &qp->ibqp;
+
+ return NULL;
+}
+
+static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
+ struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask)
+{
+ int ret;
+ struct erdma_dev *dev = qp->dev;
+ struct erdma_cmdq_modify_qp_req req;
+ struct tcp_sock *tp;
+ struct erdma_cep *cep = qp->cep;
+ struct sockaddr_storage local_addr, remote_addr;
+
+ if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
+ return -EINVAL;
+
+ if (!(mask & ERDMA_QP_ATTR_MPA))
+ return -EINVAL;
+
+ ret = getname_local(cep->sock, &local_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = getname_peer(cep->sock, &remote_addr);
+ if (ret < 0)
+ return ret;
+
+ qp->attrs.state = ERDMA_QP_STATE_RTS;
+
+ tp = tcp_sk(qp->cep->sock->sk);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
+ req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
+ req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
+ req.dport = to_sockaddr_in(remote_addr).sin_port;
+ req.sport = to_sockaddr_in(local_addr).sin_port;
+
+ req.send_nxt = tp->snd_nxt;
+ /* rsvd tcp seq for mpa-rsp in server. */
+ if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
+ req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
+ req.recv_nxt = tp->rcv_nxt;
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
+ struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask)
+{
+ struct erdma_dev *dev = qp->dev;
+ struct erdma_cmdq_modify_qp_req req;
+
+ qp->attrs.state = attrs->state;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_MODIFY_QP);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
+ FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask)
+{
+ int drop_conn, ret = 0;
+
+ if (!mask)
+ return 0;
+
+ if (!(mask & ERDMA_QP_ATTR_STATE))
+ return 0;
+
+ switch (qp->attrs.state) {
+ case ERDMA_QP_STATE_IDLE:
+ case ERDMA_QP_STATE_RTR:
+ if (attrs->state == ERDMA_QP_STATE_RTS) {
+ ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
+ } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ if (qp->cep) {
+ erdma_cep_put(qp->cep);
+ qp->cep = NULL;
+ }
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ }
+ break;
+ case ERDMA_QP_STATE_RTS:
+ drop_conn = 0;
+
+ if (attrs->state == ERDMA_QP_STATE_CLOSING) {
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ drop_conn = 1;
+ } else if (attrs->state == ERDMA_QP_STATE_TERMINATE) {
+ qp->attrs.state = ERDMA_QP_STATE_TERMINATE;
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ drop_conn = 1;
+ } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ drop_conn = 1;
+ }
+
+ if (drop_conn)
+ erdma_qp_cm_drop(qp);
+
+ break;
+ case ERDMA_QP_STATE_TERMINATE:
+ if (attrs->state == ERDMA_QP_STATE_ERROR)
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ break;
+ case ERDMA_QP_STATE_CLOSING:
+ if (attrs->state == ERDMA_QP_STATE_IDLE) {
+ qp->attrs.state = ERDMA_QP_STATE_IDLE;
+ } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
+ ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
+ qp->attrs.state = ERDMA_QP_STATE_ERROR;
+ } else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
+ return -ECONNABORTED;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void erdma_qp_safe_free(struct kref *ref)
+{
+ struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
+
+ complete(&qp->safe_free);
+}
+
+void erdma_qp_put(struct erdma_qp *qp)
+{
+ WARN_ON(kref_read(&qp->ref) < 1);
+ kref_put(&qp->ref, erdma_qp_safe_free);
+}
+
+void erdma_qp_get(struct erdma_qp *qp)
+{
+ kref_get(&qp->ref);
+}
+
+static int fill_inline_data(struct erdma_qp *qp,
+ const struct ib_send_wr *send_wr, u16 wqe_idx,
+ u32 sgl_offset, __le32 *length_field)
+{
+ u32 remain_size, copy_size, data_off, bytes = 0;
+ char *data;
+ int i = 0;
+
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+ data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size,
+ SQEBB_SHIFT);
+
+ while (i < send_wr->num_sge) {
+ bytes += send_wr->sg_list[i].length;
+ if (bytes > (int)ERDMA_MAX_INLINE)
+ return -EINVAL;
+
+ remain_size = send_wr->sg_list[i].length;
+ data_off = 0;
+
+ while (1) {
+ copy_size = min(remain_size, SQEBB_SIZE - sgl_offset);
+
+ memcpy(data + sgl_offset,
+ (void *)(uintptr_t)send_wr->sg_list[i].addr +
+ data_off,
+ copy_size);
+ remain_size -= copy_size;
+ data_off += copy_size;
+ sgl_offset += copy_size;
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+
+ data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ if (!remain_size)
+ break;
+ }
+
+ i++;
+ }
+ *length_field = cpu_to_le32(bytes);
+
+ return bytes;
+}
+
+static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
+ u16 wqe_idx, u32 sgl_offset, __le32 *length_field)
+{
+ int i = 0;
+ u32 bytes = 0;
+ char *sgl;
+
+ if (send_wr->num_sge > qp->dev->attrs.max_send_sge)
+ return -EINVAL;
+
+ if (sgl_offset & 0xF)
+ return -EINVAL;
+
+ while (i < send_wr->num_sge) {
+ wqe_idx += (sgl_offset >> SQEBB_SHIFT);
+ sgl_offset &= (SQEBB_SIZE - 1);
+ sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+
+ bytes += send_wr->sg_list[i].length;
+ memcpy(sgl + sgl_offset, &send_wr->sg_list[i],
+ sizeof(struct ib_sge));
+
+ sgl_offset += sizeof(struct ib_sge);
+ i++;
+ }
+
+ *length_field = cpu_to_le32(bytes);
+ return 0;
+}
+
+static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
+ const struct ib_send_wr *send_wr)
+{
+ u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
+ u32 idx = *pi & (qp->attrs.sq_size - 1);
+ enum ib_wr_opcode op = send_wr->opcode;
+ struct erdma_readreq_sqe *read_sqe;
+ struct erdma_reg_mr_sqe *regmr_sge;
+ struct erdma_write_sqe *write_sqe;
+ struct erdma_send_sqe *send_sqe;
+ struct ib_rdma_wr *rdma_wr;
+ struct erdma_mr *mr;
+ __le32 *length_field;
+ u64 wqe_hdr, *entry;
+ struct ib_sge *sge;
+ u32 attrs;
+ int ret;
+
+ entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
+ SQEBB_SHIFT);
+
+ /* Clear the SQE header section. */
+ *entry = 0;
+
+ qp->kern_qp.swr_tbl[idx] = send_wr->wr_id;
+ flags = send_wr->send_flags;
+ wqe_hdr = FIELD_PREP(
+ ERDMA_SQE_HDR_CE_MASK,
+ ((flags & IB_SEND_SIGNALED) || qp->kern_qp.sig_all) ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SE_MASK,
+ flags & IB_SEND_SOLICITED ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_FENCE_MASK,
+ flags & IB_SEND_FENCE ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_INLINE_MASK,
+ flags & IB_SEND_INLINE ? 1 : 0);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp));
+
+ switch (op) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ hw_op = ERDMA_OP_WRITE;
+ if (op == IB_WR_RDMA_WRITE_WITH_IMM)
+ hw_op = ERDMA_OP_WRITE_WITH_IMM;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
+ write_sqe = (struct erdma_write_sqe *)entry;
+
+ write_sqe->imm_data = send_wr->ex.imm_data;
+ write_sqe->sink_stag = cpu_to_le32(rdma_wr->rkey);
+ write_sqe->sink_to_h =
+ cpu_to_le32(upper_32_bits(rdma_wr->remote_addr));
+ write_sqe->sink_to_l =
+ cpu_to_le32(lower_32_bits(rdma_wr->remote_addr));
+
+ length_field = &write_sqe->length;
+ wqe_size = sizeof(struct erdma_write_sqe);
+ sgl_offset = wqe_size;
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ read_sqe = (struct erdma_readreq_sqe *)entry;
+ if (unlikely(send_wr->num_sge != 1))
+ return -EINVAL;
+ hw_op = ERDMA_OP_READ;
+ if (op == IB_WR_RDMA_READ_WITH_INV) {
+ hw_op = ERDMA_OP_READ_WITH_INV;
+ read_sqe->invalid_stag =
+ cpu_to_le32(send_wr->ex.invalidate_rkey);
+ }
+
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
+ read_sqe->length = cpu_to_le32(send_wr->sg_list[0].length);
+ read_sqe->sink_stag = cpu_to_le32(send_wr->sg_list[0].lkey);
+ read_sqe->sink_to_l =
+ cpu_to_le32(lower_32_bits(send_wr->sg_list[0].addr));
+ read_sqe->sink_to_h =
+ cpu_to_le32(upper_32_bits(send_wr->sg_list[0].addr));
+
+ sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+ qp->attrs.sq_size, SQEBB_SHIFT);
+ sge->addr = rdma_wr->remote_addr;
+ sge->lkey = rdma_wr->rkey;
+ sge->length = send_wr->sg_list[0].length;
+ wqe_size = sizeof(struct erdma_readreq_sqe) +
+ send_wr->num_sge * sizeof(struct ib_sge);
+
+ goto out;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ send_sqe = (struct erdma_send_sqe *)entry;
+ hw_op = ERDMA_OP_SEND;
+ if (op == IB_WR_SEND_WITH_IMM) {
+ hw_op = ERDMA_OP_SEND_WITH_IMM;
+ send_sqe->imm_data = send_wr->ex.imm_data;
+ } else if (op == IB_WR_SEND_WITH_INV) {
+ hw_op = ERDMA_OP_SEND_WITH_INV;
+ send_sqe->invalid_stag =
+ cpu_to_le32(send_wr->ex.invalidate_rkey);
+ }
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
+ length_field = &send_sqe->length;
+ wqe_size = sizeof(struct erdma_send_sqe);
+ sgl_offset = wqe_size;
+
+ break;
+ case IB_WR_REG_MR:
+ wqe_hdr |=
+ FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, ERDMA_OP_REG_MR);
+ regmr_sge = (struct erdma_reg_mr_sqe *)entry;
+ mr = to_emr(reg_wr(send_wr)->mr);
+
+ mr->access = ERDMA_MR_ACC_LR |
+ to_erdma_access_flags(reg_wr(send_wr)->access);
+ regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
+ regmr_sge->length = cpu_to_le32(mr->ibmr.length);
+ regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
+ attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
+ FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
+ FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
+ mr->mem.mtt_nents);
+
+ if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
+ /* Copy SGLs to SQE content to accelerate */
+ memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+ qp->attrs.sq_size, SQEBB_SHIFT),
+ mr->mem.mtt_buf, MTT_SIZE(mr->mem.mtt_nents));
+ wqe_size = sizeof(struct erdma_reg_mr_sqe) +
+ MTT_SIZE(mr->mem.mtt_nents);
+ } else {
+ attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 1);
+ wqe_size = sizeof(struct erdma_reg_mr_sqe);
+ }
+
+ regmr_sge->attrs = cpu_to_le32(attrs);
+ goto out;
+ case IB_WR_LOCAL_INV:
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
+ ERDMA_OP_LOCAL_INV);
+ regmr_sge = (struct erdma_reg_mr_sqe *)entry;
+ regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey);
+ wqe_size = sizeof(struct erdma_reg_mr_sqe);
+ goto out;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (flags & IB_SEND_INLINE) {
+ ret = fill_inline_data(qp, send_wr, idx, sgl_offset,
+ length_field);
+ if (ret < 0)
+ return -EINVAL;
+ wqe_size += ret;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK, ret);
+ } else {
+ ret = fill_sgl(qp, send_wr, idx, sgl_offset, length_field);
+ if (ret)
+ return -EINVAL;
+ wqe_size += send_wr->num_sge * sizeof(struct ib_sge);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK,
+ send_wr->num_sge);
+ }
+
+out:
+ wqebb_cnt = SQEBB_COUNT(wqe_size);
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_CNT_MASK, wqebb_cnt - 1);
+ *pi += wqebb_cnt;
+ wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, *pi);
+
+ *entry = wqe_hdr;
+
+ return 0;
+}
+
+static void kick_sq_db(struct erdma_qp *qp, u16 pi)
+{
+ u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
+ FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
+
+ *(u64 *)qp->kern_qp.sq_db_info = db_data;
+ writeq(db_data, qp->kern_qp.hw_sq_db);
+}
+
+int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ int ret = 0;
+ const struct ib_send_wr *wr = send_wr;
+ unsigned long flags;
+ u16 sq_pi;
+
+ if (!send_wr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&qp->lock, flags);
+ sq_pi = qp->kern_qp.sq_pi;
+
+ while (wr) {
+ if ((u16)(sq_pi - qp->kern_qp.sq_ci) >= qp->attrs.sq_size) {
+ ret = -ENOMEM;
+ *bad_send_wr = send_wr;
+ break;
+ }
+
+ ret = erdma_push_one_sqe(qp, &sq_pi, wr);
+ if (ret) {
+ *bad_send_wr = wr;
+ break;
+ }
+ qp->kern_qp.sq_pi = sq_pi;
+ kick_sq_db(qp, sq_pi);
+
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ return ret;
+}
+
+static int erdma_post_recv_one(struct erdma_qp *qp,
+ const struct ib_recv_wr *recv_wr)
+{
+ struct erdma_rqe *rqe =
+ get_queue_entry(qp->kern_qp.rq_buf, qp->kern_qp.rq_pi,
+ qp->attrs.rq_size, RQE_SHIFT);
+
+ rqe->qe_idx = cpu_to_le16(qp->kern_qp.rq_pi + 1);
+ rqe->qpn = cpu_to_le32(QP_ID(qp));
+
+ if (recv_wr->num_sge == 0) {
+ rqe->length = 0;
+ } else if (recv_wr->num_sge == 1) {
+ rqe->stag = cpu_to_le32(recv_wr->sg_list[0].lkey);
+ rqe->to = cpu_to_le64(recv_wr->sg_list[0].addr);
+ rqe->length = cpu_to_le32(recv_wr->sg_list[0].length);
+ } else {
+ return -EINVAL;
+ }
+
+ *(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe;
+ writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
+
+ qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
+ recv_wr->wr_id;
+ qp->kern_qp.rq_pi++;
+
+ return 0;
+}
+
+int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr)
+{
+ const struct ib_recv_wr *wr = recv_wr;
+ struct erdma_qp *qp = to_eqp(ibqp);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ while (wr) {
+ ret = erdma_post_recv_one(qp, wr);
+ if (ret) {
+ *bad_recv_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
new file mode 100644
index 000000000000..699bd3f59cd3
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -0,0 +1,1460 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
+/* Copyright (c) 2008-2019, IBM Corporation */
+
+/* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <net/addrconf.h>
+#include <rdma/erdma-abi.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_hw.h"
+#include "erdma_verbs.h"
+
+static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
+{
+ struct erdma_cmdq_create_qp_req req;
+ struct erdma_pd *pd = to_epd(qp->ibqp.pd);
+ struct erdma_uqp *user_qp;
+ u64 resp0, resp1;
+ int err;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_QP);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK,
+ ilog2(qp->attrs.sq_size)) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_QPN_MASK, QP_ID(qp));
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK,
+ ilog2(qp->attrs.rq_size)) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res)) {
+ u32 pgsz_range = ilog2(SZ_1M) - PAGE_SHIFT;
+
+ req.sq_cqn_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ pgsz_range) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
+ req.rq_cqn_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ pgsz_range) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
+
+ req.sq_mtt_cfg =
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
+ ERDMA_MR_INLINE_MTT);
+ req.rq_mtt_cfg = req.sq_mtt_cfg;
+
+ req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
+ req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
+ req.sq_db_info_dma_addr = qp->kern_qp.sq_buf_dma_addr +
+ (qp->attrs.sq_size << SQEBB_SHIFT);
+ req.rq_db_info_dma_addr = qp->kern_qp.rq_buf_dma_addr +
+ (qp->attrs.rq_size << RQE_SHIFT);
+ } else {
+ user_qp = &qp->user_qp;
+ req.sq_cqn_mtt_cfg = FIELD_PREP(
+ ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ ilog2(user_qp->sq_mtt.page_size) - PAGE_SHIFT);
+ req.sq_cqn_mtt_cfg |=
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
+
+ req.rq_cqn_mtt_cfg = FIELD_PREP(
+ ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+ ilog2(user_qp->rq_mtt.page_size) - PAGE_SHIFT);
+ req.rq_cqn_mtt_cfg |=
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
+
+ req.sq_mtt_cfg = user_qp->sq_mtt.page_offset;
+ req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
+ user_qp->sq_mtt.mtt_nents) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
+ user_qp->sq_mtt.mtt_type);
+
+ req.rq_mtt_cfg = user_qp->rq_mtt.page_offset;
+ req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
+ user_qp->rq_mtt.mtt_nents) |
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
+ user_qp->rq_mtt.mtt_type);
+
+ req.sq_buf_addr = user_qp->sq_mtt.mtt_entry[0];
+ req.rq_buf_addr = user_qp->rq_mtt.mtt_entry[0];
+
+ req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr;
+ req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
+ }
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), &resp0,
+ &resp1);
+ if (!err)
+ qp->attrs.cookie =
+ FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
+
+ return err;
+}
+
+static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
+{
+ struct erdma_cmdq_reg_mr_req req;
+ struct erdma_pd *pd = to_epd(mr->ibmr.pd);
+ u64 *phy_addr;
+ int i;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
+ FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) |
+ FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8);
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_REGMR_PD_MASK, pd->pdn) |
+ FIELD_PREP(ERDMA_CMD_REGMR_TYPE_MASK, mr->type) |
+ FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access) |
+ FIELD_PREP(ERDMA_CMD_REGMR_ACC_MODE_MASK, 0);
+ req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK,
+ ilog2(mr->mem.page_size)) |
+ FIELD_PREP(ERDMA_CMD_REGMR_MTT_TYPE_MASK, mr->mem.mtt_type) |
+ FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt);
+
+ if (mr->type == ERDMA_MR_TYPE_DMA)
+ goto post_cmd;
+
+ if (mr->type == ERDMA_MR_TYPE_NORMAL) {
+ req.start_va = mr->mem.va;
+ req.size = mr->mem.len;
+ }
+
+ if (mr->type == ERDMA_MR_TYPE_FRMR ||
+ mr->mem.mtt_type == ERDMA_MR_INDIRECT_MTT) {
+ phy_addr = req.phy_addr;
+ *phy_addr = mr->mem.mtt_entry[0];
+ } else {
+ phy_addr = req.phy_addr;
+ for (i = 0; i < mr->mem.mtt_nents; i++)
+ *phy_addr++ = mr->mem.mtt_entry[i];
+ }
+
+post_cmd:
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
+{
+ struct erdma_cmdq_create_cq_req req;
+ u32 page_size;
+ struct erdma_mem *mtt;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_CREATE_CQ);
+
+ req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_CQN_MASK, cq->cqn) |
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_DEPTH_MASK, ilog2(cq->depth));
+ req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_EQN_MASK, cq->assoc_eqn);
+
+ if (rdma_is_kernel_res(&cq->ibcq.res)) {
+ page_size = SZ_32M;
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+ ilog2(page_size) - PAGE_SHIFT);
+ req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
+ req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
+
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) |
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
+ ERDMA_MR_INLINE_MTT);
+
+ req.first_page_offset = 0;
+ req.cq_db_info_addr =
+ cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
+ } else {
+ mtt = &cq->user_cq.qbuf_mtt;
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+ ilog2(mtt->page_size) - PAGE_SHIFT);
+ if (mtt->mtt_nents == 1) {
+ req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
+ req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
+ } else {
+ req.qbuf_addr_l = lower_32_bits(mtt->mtt_entry[0]);
+ req.qbuf_addr_h = upper_32_bits(mtt->mtt_entry[0]);
+ }
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK,
+ mtt->mtt_nents);
+ req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
+ mtt->mtt_type);
+
+ req.first_page_offset = mtt->page_offset;
+ req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
+ }
+
+ return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+}
+
+static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
+{
+ int idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&res_cb->lock, flags);
+ idx = find_next_zero_bit(res_cb->bitmap, res_cb->max_cap,
+ res_cb->next_alloc_idx);
+ if (idx == res_cb->max_cap) {
+ idx = find_first_zero_bit(res_cb->bitmap, res_cb->max_cap);
+ if (idx == res_cb->max_cap) {
+ res_cb->next_alloc_idx = 1;
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+ return -ENOSPC;
+ }
+ }
+
+ set_bit(idx, res_cb->bitmap);
+ res_cb->next_alloc_idx = idx + 1;
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+
+ return idx;
+}
+
+static inline void erdma_free_idx(struct erdma_resource_cb *res_cb, u32 idx)
+{
+ unsigned long flags;
+ u32 used;
+
+ spin_lock_irqsave(&res_cb->lock, flags);
+ used = __test_and_clear_bit(idx, res_cb->bitmap);
+ spin_unlock_irqrestore(&res_cb->lock, flags);
+ WARN_ON(!used);
+}
+
+static struct rdma_user_mmap_entry *
+erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address,
+ u32 size, u8 mmap_flag, u64 *mmap_offset)
+{
+ struct erdma_user_mmap_entry *entry =
+ kzalloc(sizeof(*entry), GFP_KERNEL);
+ int ret;
+
+ if (!entry)
+ return NULL;
+
+ entry->address = (u64)address;
+ entry->mmap_flag = mmap_flag;
+
+ size = PAGE_ALIGN(size);
+
+ ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry,
+ size);
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+
+ *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
+ struct ib_udata *unused)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->max_mr_size = dev->attrs.max_mr_size;
+ attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
+ attr->vendor_part_id = dev->pdev->device;
+ attr->hw_ver = dev->pdev->revision;
+ attr->max_qp = dev->attrs.max_qp - 1;
+ attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
+ attr->max_qp_rd_atom = dev->attrs.max_ord;
+ attr->max_qp_init_rd_atom = dev->attrs.max_ird;
+ attr->max_res_rd_atom = dev->attrs.max_qp * dev->attrs.max_ird;
+ attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
+ attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ ibdev->local_dma_lkey = dev->attrs.local_dma_key;
+ attr->max_send_sge = dev->attrs.max_send_sge;
+ attr->max_recv_sge = dev->attrs.max_recv_sge;
+ attr->max_sge_rd = dev->attrs.max_sge_rd;
+ attr->max_cq = dev->attrs.max_cq - 1;
+ attr->max_cqe = dev->attrs.max_cqe;
+ attr->max_mr = dev->attrs.max_mr;
+ attr->max_pd = dev->attrs.max_pd;
+ attr->max_mw = dev->attrs.max_mw;
+ attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
+ attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
+ attr->fw_ver = dev->attrs.fw_version;
+
+ if (dev->netdev)
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ dev->netdev->dev_addr);
+
+ return 0;
+}
+
+int erdma_query_gid(struct ib_device *ibdev, u32 port, int idx,
+ union ib_gid *gid)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+
+ memset(gid, 0, sizeof(*gid));
+ ether_addr_copy(gid->raw, dev->attrs.peer_addr);
+
+ return 0;
+}
+
+int erdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+ struct net_device *ndev = dev->netdev;
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->gid_tbl_len = 1;
+ attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
+ attr->max_msg_sz = -1;
+
+ if (!ndev)
+ goto out;
+
+ ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
+ attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
+ attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
+ if (netif_running(ndev) && netif_carrier_ok(ndev))
+ dev->state = IB_PORT_ACTIVE;
+ else
+ dev->state = IB_PORT_DOWN;
+ attr->state = dev->state;
+
+out:
+ if (dev->state == IB_PORT_ACTIVE)
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ else
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+
+ return 0;
+}
+
+int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
+ struct ib_port_immutable *port_immutable)
+{
+ port_immutable->gid_tbl_len = 1;
+ port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+ return 0;
+}
+
+int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct erdma_pd *pd = to_epd(ibpd);
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ int pdn;
+
+ pdn = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_PD]);
+ if (pdn < 0)
+ return pdn;
+
+ pd->pdn = pdn;
+
+ return 0;
+}
+
+int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct erdma_pd *pd = to_epd(ibpd);
+ struct erdma_dev *dev = to_edev(ibpd->device);
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_PD], pd->pdn);
+
+ return 0;
+}
+
+static int erdma_qp_validate_cap(struct erdma_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if ((attrs->cap.max_send_wr > dev->attrs.max_send_wr) ||
+ (attrs->cap.max_recv_wr > dev->attrs.max_recv_wr) ||
+ (attrs->cap.max_send_sge > dev->attrs.max_send_sge) ||
+ (attrs->cap.max_recv_sge > dev->attrs.max_recv_sge) ||
+ (attrs->cap.max_inline_data > ERDMA_MAX_INLINE) ||
+ !attrs->cap.max_send_wr || !attrs->cap.max_recv_wr) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int erdma_qp_validate_attr(struct erdma_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if (attrs->qp_type != IB_QPT_RC)
+ return -EOPNOTSUPP;
+
+ if (attrs->srq)
+ return -EOPNOTSUPP;
+
+ if (!attrs->send_cq || !attrs->recv_cq)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void free_kernel_qp(struct erdma_qp *qp)
+{
+ struct erdma_dev *dev = qp->dev;
+
+ vfree(qp->kern_qp.swr_tbl);
+ vfree(qp->kern_qp.rwr_tbl);
+
+ if (qp->kern_qp.sq_buf)
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
+ qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
+
+ if (qp->kern_qp.rq_buf)
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
+ qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
+}
+
+static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ struct erdma_kqp *kqp = &qp->kern_qp;
+ int size;
+
+ if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
+ kqp->sig_all = 1;
+
+ kqp->sq_pi = 0;
+ kqp->sq_ci = 0;
+ kqp->rq_pi = 0;
+ kqp->rq_ci = 0;
+ kqp->hw_sq_db =
+ dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT);
+ kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET;
+
+ kqp->swr_tbl = vmalloc(qp->attrs.sq_size * sizeof(u64));
+ kqp->rwr_tbl = vmalloc(qp->attrs.rq_size * sizeof(u64));
+ if (!kqp->swr_tbl || !kqp->rwr_tbl)
+ goto err_out;
+
+ size = (qp->attrs.sq_size << SQEBB_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
+ kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
+ &kqp->sq_buf_dma_addr, GFP_KERNEL);
+ if (!kqp->sq_buf)
+ goto err_out;
+
+ size = (qp->attrs.rq_size << RQE_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
+ kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
+ &kqp->rq_buf_dma_addr, GFP_KERNEL);
+ if (!kqp->rq_buf)
+ goto err_out;
+
+ kqp->sq_db_info = kqp->sq_buf + (qp->attrs.sq_size << SQEBB_SHIFT);
+ kqp->rq_db_info = kqp->rq_buf + (qp->attrs.rq_size << RQE_SHIFT);
+
+ return 0;
+
+err_out:
+ free_kernel_qp(qp);
+ return -ENOMEM;
+}
+
+static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
+ u64 start, u64 len, int access, u64 virt,
+ unsigned long req_page_size, u8 force_indirect_mtt)
+{
+ struct ib_block_iter biter;
+ uint64_t *phy_addr = NULL;
+ int ret = 0;
+
+ mem->umem = ib_umem_get(&dev->ibdev, start, len, access);
+ if (IS_ERR(mem->umem)) {
+ ret = PTR_ERR(mem->umem);
+ mem->umem = NULL;
+ return ret;
+ }
+
+ mem->va = virt;
+ mem->len = len;
+ mem->page_size = ib_umem_find_best_pgsz(mem->umem, req_page_size, virt);
+ mem->page_offset = start & (mem->page_size - 1);
+ mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size);
+ mem->page_cnt = mem->mtt_nents;
+
+ if (mem->page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES ||
+ force_indirect_mtt) {
+ mem->mtt_type = ERDMA_MR_INDIRECT_MTT;
+ mem->mtt_buf =
+ alloc_pages_exact(MTT_SIZE(mem->page_cnt), GFP_KERNEL);
+ if (!mem->mtt_buf) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ phy_addr = mem->mtt_buf;
+ } else {
+ mem->mtt_type = ERDMA_MR_INLINE_MTT;
+ phy_addr = mem->mtt_entry;
+ }
+
+ rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size) {
+ *phy_addr = rdma_block_iter_dma_address(&biter);
+ phy_addr++;
+ }
+
+ if (mem->mtt_type == ERDMA_MR_INDIRECT_MTT) {
+ mem->mtt_entry[0] =
+ dma_map_single(&dev->pdev->dev, mem->mtt_buf,
+ MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, mem->mtt_entry[0])) {
+ free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
+ mem->mtt_buf = NULL;
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ }
+
+ return 0;
+
+error_ret:
+ if (mem->umem) {
+ ib_umem_release(mem->umem);
+ mem->umem = NULL;
+ }
+
+ return ret;
+}
+
+static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem)
+{
+ if (mem->mtt_buf) {
+ dma_unmap_single(&dev->pdev->dev, mem->mtt_entry[0],
+ MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
+ free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
+ }
+
+ if (mem->umem) {
+ ib_umem_release(mem->umem);
+ mem->umem = NULL;
+ }
+}
+
+static int erdma_map_user_dbrecords(struct erdma_ucontext *ctx,
+ u64 dbrecords_va,
+ struct erdma_user_dbrecords_page **dbr_page,
+ dma_addr_t *dma_addr)
+{
+ struct erdma_user_dbrecords_page *page = NULL;
+ int rv = 0;
+
+ mutex_lock(&ctx->dbrecords_page_mutex);
+
+ list_for_each_entry(page, &ctx->dbrecords_page_list, list)
+ if (page->va == (dbrecords_va & PAGE_MASK))
+ goto found;
+
+ page = kmalloc(sizeof(*page), GFP_KERNEL);
+ if (!page) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ page->va = (dbrecords_va & PAGE_MASK);
+ page->refcnt = 0;
+
+ page->umem = ib_umem_get(ctx->ibucontext.device,
+ dbrecords_va & PAGE_MASK, PAGE_SIZE, 0);
+ if (IS_ERR(page->umem)) {
+ rv = PTR_ERR(page->umem);
+ kfree(page);
+ goto out;
+ }
+
+ list_add(&page->list, &ctx->dbrecords_page_list);
+
+found:
+ *dma_addr = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (dbrecords_va & ~PAGE_MASK);
+ *dbr_page = page;
+ page->refcnt++;
+
+out:
+ mutex_unlock(&ctx->dbrecords_page_mutex);
+ return rv;
+}
+
+static void
+erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
+ struct erdma_user_dbrecords_page **dbr_page)
+{
+ if (!ctx || !(*dbr_page))
+ return;
+
+ mutex_lock(&ctx->dbrecords_page_mutex);
+ if (--(*dbr_page)->refcnt == 0) {
+ list_del(&(*dbr_page)->list);
+ ib_umem_release((*dbr_page)->umem);
+ kfree(*dbr_page);
+ }
+
+ *dbr_page = NULL;
+ mutex_unlock(&ctx->dbrecords_page_mutex);
+}
+
+static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
+ u64 va, u32 len, u64 db_info_va)
+{
+ dma_addr_t db_info_dma_addr;
+ u32 rq_offset;
+ int ret;
+
+ if (len < (PAGE_ALIGN(qp->attrs.sq_size * SQEBB_SIZE) +
+ qp->attrs.rq_size * RQE_SIZE))
+ return -EINVAL;
+
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mtt, va,
+ qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
+ (SZ_1M - SZ_4K), 1);
+ if (ret)
+ return ret;
+
+ rq_offset = PAGE_ALIGN(qp->attrs.sq_size << SQEBB_SHIFT);
+ qp->user_qp.rq_offset = rq_offset;
+
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,
+ qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset,
+ (SZ_1M - SZ_4K), 1);
+ if (ret)
+ goto put_sq_mtt;
+
+ ret = erdma_map_user_dbrecords(uctx, db_info_va,
+ &qp->user_qp.user_dbr_page,
+ &db_info_dma_addr);
+ if (ret)
+ goto put_rq_mtt;
+
+ qp->user_qp.sq_db_info_dma_addr = db_info_dma_addr;
+ qp->user_qp.rq_db_info_dma_addr = db_info_dma_addr + ERDMA_DB_SIZE;
+
+ return 0;
+
+put_rq_mtt:
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
+
+put_sq_mtt:
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
+
+ return ret;
+}
+
+static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx)
+{
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
+ erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page);
+}
+
+int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ struct erdma_dev *dev = to_edev(ibqp->device);
+ struct erdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ struct erdma_ureq_create_qp ureq;
+ struct erdma_uresp_create_qp uresp;
+ int ret;
+
+ ret = erdma_qp_validate_cap(dev, attrs);
+ if (ret)
+ goto err_out;
+
+ ret = erdma_qp_validate_attr(dev, attrs);
+ if (ret)
+ goto err_out;
+
+ qp->scq = to_ecq(attrs->send_cq);
+ qp->rcq = to_ecq(attrs->recv_cq);
+ qp->dev = dev;
+ qp->attrs.cc = dev->attrs.cc;
+
+ init_rwsem(&qp->state_lock);
+ kref_init(&qp->ref);
+ init_completion(&qp->safe_free);
+
+ ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
+ XA_LIMIT(1, dev->attrs.max_qp - 1),
+ &dev->next_alloc_qpn, GFP_KERNEL);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ qp->attrs.sq_size = roundup_pow_of_two(attrs->cap.max_send_wr *
+ ERDMA_MAX_WQEBB_PER_SQE);
+ qp->attrs.rq_size = roundup_pow_of_two(attrs->cap.max_recv_wr);
+
+ if (uctx) {
+ ret = ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen));
+ if (ret)
+ goto err_out_xa;
+
+ ret = init_user_qp(qp, uctx, ureq.qbuf_va, ureq.qbuf_len,
+ ureq.db_record_va);
+ if (ret)
+ goto err_out_xa;
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.num_sqe = qp->attrs.sq_size;
+ uresp.num_rqe = qp->attrs.rq_size;
+ uresp.qp_id = QP_ID(qp);
+ uresp.rq_offset = qp->user_qp.rq_offset;
+
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_out_cmd;
+ } else {
+ init_kernel_qp(dev, qp, attrs);
+ }
+
+ qp->attrs.max_send_sge = attrs->cap.max_send_sge;
+ qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
+ qp->attrs.state = ERDMA_QP_STATE_IDLE;
+
+ ret = create_qp_cmd(dev, qp);
+ if (ret)
+ goto err_out_cmd;
+
+ spin_lock_init(&qp->lock);
+
+ return 0;
+
+err_out_cmd:
+ if (uctx)
+ free_user_qp(qp, uctx);
+ else
+ free_kernel_qp(qp);
+err_out_xa:
+ xa_erase(&dev->qp_xa, QP_ID(qp));
+err_out:
+ return ret;
+}
+
+static int erdma_create_stag(struct erdma_dev *dev, u32 *stag)
+{
+ int stag_idx;
+
+ stag_idx = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX]);
+ if (stag_idx < 0)
+ return stag_idx;
+
+ /* For now, we always let key field be zero. */
+ *stag = (stag_idx << 8);
+
+ return 0;
+}
+
+struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ struct erdma_mr *mr;
+ u32 stag;
+ int ret;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto out_free;
+
+ mr->type = ERDMA_MR_TYPE_DMA;
+
+ mr->ibmr.lkey = stag;
+ mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(acc);
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto out_remove_stag;
+
+ return &mr->ibmr;
+
+out_remove_stag:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct erdma_mr *mr;
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ int ret;
+ u32 stag;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (max_num_sg > ERDMA_MR_MAX_MTT_CNT)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto out_free;
+
+ mr->type = ERDMA_MR_TYPE_FRMR;
+
+ mr->ibmr.lkey = stag;
+ mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ /* update it in FRMR. */
+ mr->access = ERDMA_MR_ACC_LR | ERDMA_MR_ACC_LW | ERDMA_MR_ACC_RR |
+ ERDMA_MR_ACC_RW;
+
+ mr->mem.page_size = PAGE_SIZE; /* update it later. */
+ mr->mem.page_cnt = max_num_sg;
+ mr->mem.mtt_type = ERDMA_MR_INDIRECT_MTT;
+ mr->mem.mtt_buf =
+ alloc_pages_exact(MTT_SIZE(mr->mem.page_cnt), GFP_KERNEL);
+ if (!mr->mem.mtt_buf) {
+ ret = -ENOMEM;
+ goto out_remove_stag;
+ }
+
+ mr->mem.mtt_entry[0] =
+ dma_map_single(&dev->pdev->dev, mr->mem.mtt_buf,
+ MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, mr->mem.mtt_entry[0])) {
+ ret = -ENOMEM;
+ goto out_free_mtt;
+ }
+
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto out_dma_unmap;
+
+ return &mr->ibmr;
+
+out_dma_unmap:
+ dma_unmap_single(&dev->pdev->dev, mr->mem.mtt_entry[0],
+ MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
+out_free_mtt:
+ free_pages_exact(mr->mem.mtt_buf, MTT_SIZE(mr->mem.page_cnt));
+
+out_remove_stag:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+static int erdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct erdma_mr *mr = to_emr(ibmr);
+
+ if (mr->mem.mtt_nents >= mr->mem.page_cnt)
+ return -1;
+
+ *((u64 *)mr->mem.mtt_buf + mr->mem.mtt_nents) = addr;
+ mr->mem.mtt_nents++;
+
+ return 0;
+}
+
+int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct erdma_mr *mr = to_emr(ibmr);
+ int num;
+
+ mr->mem.mtt_nents = 0;
+
+ num = ib_sg_to_pages(&mr->ibmr, sg, sg_nents, sg_offset,
+ erdma_set_page);
+
+ return num;
+}
+
+struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 virt, int access, struct ib_udata *udata)
+{
+ struct erdma_mr *mr = NULL;
+ struct erdma_dev *dev = to_edev(ibpd->device);
+ u32 stag;
+ int ret;
+
+ if (!len || len > dev->attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt,
+ SZ_2G - SZ_4K, 0);
+ if (ret)
+ goto err_out_free;
+
+ ret = erdma_create_stag(dev, &stag);
+ if (ret)
+ goto err_out_put_mtt;
+
+ mr->ibmr.lkey = mr->ibmr.rkey = stag;
+ mr->ibmr.pd = ibpd;
+ mr->mem.va = virt;
+ mr->mem.len = len;
+ mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(access);
+ mr->valid = 1;
+ mr->type = ERDMA_MR_TYPE_NORMAL;
+
+ ret = regmr_cmd(dev, mr);
+ if (ret)
+ goto err_out_mr;
+
+ return &mr->ibmr;
+
+err_out_mr:
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
+ mr->ibmr.lkey >> 8);
+
+err_out_put_mtt:
+ put_mtt_entries(dev, &mr->mem);
+
+err_out_free:
+ kfree(mr);
+
+ return ERR_PTR(ret);
+}
+
+int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct erdma_mr *mr;
+ struct erdma_dev *dev = to_edev(ibmr->device);
+ struct erdma_cmdq_dereg_mr_req req;
+ int ret;
+
+ mr = to_emr(ibmr);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DEREG_MR);
+
+ req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
+ FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
+
+ ret = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (ret)
+ return ret;
+
+ erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], ibmr->lkey >> 8);
+
+ put_mtt_entries(dev, &mr->mem);
+
+ kfree(mr);
+ return 0;
+}
+
+int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_dev *dev = to_edev(ibcq->device);
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ int err;
+ struct erdma_cmdq_destroy_cq_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_CQ);
+ req.cqn = cq->cqn;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (err)
+ return err;
+
+ if (rdma_is_kernel_res(&cq->ibcq.res)) {
+ dma_free_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ } else {
+ erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ }
+
+ xa_erase(&dev->cq_xa, cq->cqn);
+
+ return 0;
+}
+
+int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct erdma_qp *qp = to_eqp(ibqp);
+ struct erdma_dev *dev = to_edev(ibqp->device);
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+ struct erdma_qp_attrs qp_attrs;
+ int err;
+ struct erdma_cmdq_destroy_qp_req req;
+
+ down_write(&qp->state_lock);
+ qp_attrs.state = ERDMA_QP_STATE_ERROR;
+ erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
+ up_write(&qp->state_lock);
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
+ CMDQ_OPCODE_DESTROY_QP);
+ req.qpn = QP_ID(qp);
+
+ err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
+ NULL);
+ if (err)
+ return err;
+
+ erdma_qp_put(qp);
+ wait_for_completion(&qp->safe_free);
+
+ if (rdma_is_kernel_res(&qp->ibqp.res)) {
+ vfree(qp->kern_qp.swr_tbl);
+ vfree(qp->kern_qp.rwr_tbl);
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
+ qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
+ dma_free_coherent(
+ &dev->pdev->dev,
+ WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
+ qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
+ } else {
+ put_mtt_entries(dev, &qp->user_qp.sq_mtt);
+ put_mtt_entries(dev, &qp->user_qp.rq_mtt);
+ erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page);
+ }
+
+ if (qp->cep)
+ erdma_cep_put(qp->cep);
+ xa_erase(&dev->qp_xa, QP_ID(qp));
+
+ return 0;
+}
+
+void erdma_qp_get_ref(struct ib_qp *ibqp)
+{
+ erdma_qp_get(to_eqp(ibqp));
+}
+
+void erdma_qp_put_ref(struct ib_qp *ibqp)
+{
+ erdma_qp_put(to_eqp(ibqp));
+}
+
+int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct erdma_user_mmap_entry *entry;
+ pgprot_t prot;
+ int err;
+
+ rdma_entry = rdma_user_mmap_entry_get(ctx, vma);
+ if (!rdma_entry)
+ return -EINVAL;
+
+ entry = to_emmap(rdma_entry);
+
+ switch (entry->mmap_flag) {
+ case ERDMA_MMAP_IO_NC:
+ /* map doorbell. */
+ prot = pgprot_device(vma->vm_page_prot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
+ prot, rdma_entry);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return err;
+}
+
+void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct erdma_user_mmap_entry *entry = to_emmap(rdma_entry);
+
+ kfree(entry);
+}
+
+#define ERDMA_SDB_PAGE 0
+#define ERDMA_SDB_ENTRY 1
+#define ERDMA_SDB_SHARED 2
+
+static void alloc_db_resources(struct erdma_dev *dev,
+ struct erdma_ucontext *ctx)
+{
+ u32 bitmap_idx;
+ struct erdma_devattr *attrs = &dev->attrs;
+
+ if (attrs->disable_dwqe)
+ goto alloc_normal_db;
+
+ /* Try to alloc independent SDB page. */
+ spin_lock(&dev->db_bitmap_lock);
+ bitmap_idx = find_first_zero_bit(dev->sdb_page, attrs->dwqe_pages);
+ if (bitmap_idx != attrs->dwqe_pages) {
+ set_bit(bitmap_idx, dev->sdb_page);
+ spin_unlock(&dev->db_bitmap_lock);
+
+ ctx->sdb_type = ERDMA_SDB_PAGE;
+ ctx->sdb_idx = bitmap_idx;
+ ctx->sdb_page_idx = bitmap_idx;
+ ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
+ (bitmap_idx << PAGE_SHIFT);
+ ctx->sdb_page_off = 0;
+
+ return;
+ }
+
+ bitmap_idx = find_first_zero_bit(dev->sdb_entry, attrs->dwqe_entries);
+ if (bitmap_idx != attrs->dwqe_entries) {
+ set_bit(bitmap_idx, dev->sdb_entry);
+ spin_unlock(&dev->db_bitmap_lock);
+
+ ctx->sdb_type = ERDMA_SDB_ENTRY;
+ ctx->sdb_idx = bitmap_idx;
+ ctx->sdb_page_idx = attrs->dwqe_pages +
+ bitmap_idx / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+ ctx->sdb_page_off = bitmap_idx % ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
+
+ ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
+ (ctx->sdb_page_idx << PAGE_SHIFT);
+
+ return;
+ }
+
+ spin_unlock(&dev->db_bitmap_lock);
+
+alloc_normal_db:
+ ctx->sdb_type = ERDMA_SDB_SHARED;
+ ctx->sdb_idx = 0;
+ ctx->sdb_page_idx = ERDMA_SDB_SHARED_PAGE_INDEX;
+ ctx->sdb_page_off = 0;
+
+ ctx->sdb = dev->func_bar_addr + (ctx->sdb_page_idx << PAGE_SHIFT);
+}
+
+static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx)
+{
+ rdma_user_mmap_entry_remove(uctx->sq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(uctx->rq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(uctx->cq_db_mmap_entry);
+}
+
+int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
+{
+ struct erdma_ucontext *ctx = to_ectx(ibctx);
+ struct erdma_dev *dev = to_edev(ibctx->device);
+ int ret;
+ struct erdma_uresp_alloc_ctx uresp = {};
+
+ if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ INIT_LIST_HEAD(&ctx->dbrecords_page_list);
+ mutex_init(&ctx->dbrecords_page_mutex);
+
+ alloc_db_resources(dev, ctx);
+
+ ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
+ ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
+
+ if (udata->outlen < sizeof(uresp)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
+ if (!ctx->sq_db_mmap_entry) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb);
+ if (!ctx->rq_db_mmap_entry) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert(
+ ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb);
+ if (!ctx->cq_db_mmap_entry) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ uresp.dev_id = dev->pdev->device;
+ uresp.sdb_type = ctx->sdb_type;
+ uresp.sdb_offset = ctx->sdb_page_off;
+
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ erdma_uctx_user_mmap_entries_remove(ctx);
+ atomic_dec(&dev->num_ctx);
+ return ret;
+}
+
+void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct erdma_ucontext *ctx = to_ectx(ibctx);
+ struct erdma_dev *dev = to_edev(ibctx->device);
+
+ spin_lock(&dev->db_bitmap_lock);
+ if (ctx->sdb_type == ERDMA_SDB_PAGE)
+ clear_bit(ctx->sdb_idx, dev->sdb_page);
+ else if (ctx->sdb_type == ERDMA_SDB_ENTRY)
+ clear_bit(ctx->sdb_idx, dev->sdb_entry);
+
+ erdma_uctx_user_mmap_entries_remove(ctx);
+
+ spin_unlock(&dev->db_bitmap_lock);
+
+ atomic_dec(&dev->num_ctx);
+}
+
+static int ib_qp_state_to_erdma_qp_state[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = ERDMA_QP_STATE_IDLE,
+ [IB_QPS_INIT] = ERDMA_QP_STATE_IDLE,
+ [IB_QPS_RTR] = ERDMA_QP_STATE_RTR,
+ [IB_QPS_RTS] = ERDMA_QP_STATE_RTS,
+ [IB_QPS_SQD] = ERDMA_QP_STATE_CLOSING,
+ [IB_QPS_SQE] = ERDMA_QP_STATE_TERMINATE,
+ [IB_QPS_ERR] = ERDMA_QP_STATE_ERROR
+};
+
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+ struct erdma_qp_attrs new_attrs;
+ enum erdma_qp_attr_mask erdma_attr_mask = 0;
+ struct erdma_qp *qp = to_eqp(ibqp);
+ int ret = 0;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ memset(&new_attrs, 0, sizeof(new_attrs));
+
+ if (attr_mask & IB_QP_STATE) {
+ new_attrs.state = ib_qp_state_to_erdma_qp_state[attr->qp_state];
+
+ erdma_attr_mask |= ERDMA_QP_ATTR_STATE;
+ }
+
+ down_write(&qp->state_lock);
+
+ ret = erdma_modify_qp_internal(qp, &new_attrs, erdma_attr_mask);
+
+ up_write(&qp->state_lock);
+
+ return ret;
+}
+
+int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct erdma_qp *qp;
+ struct erdma_dev *dev;
+
+ if (ibqp && qp_attr && qp_init_attr) {
+ qp = to_eqp(ibqp);
+ dev = to_edev(ibqp->device);
+ } else {
+ return -EINVAL;
+ }
+
+ qp_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
+ qp_init_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
+
+ qp_attr->cap.max_send_wr = qp->attrs.sq_size;
+ qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
+ qp_attr->cap.max_send_sge = qp->attrs.max_send_sge;
+ qp_attr->cap.max_recv_sge = qp->attrs.max_recv_sge;
+
+ qp_attr->path_mtu = ib_mtu_int_to_enum(dev->netdev->mtu);
+ qp_attr->max_rd_atomic = qp->attrs.irq_size;
+ qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
+
+ qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+ qp_init_attr->cap = qp_attr->cap;
+
+ return 0;
+}
+
+static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
+ struct erdma_ureq_create_cq *ureq)
+{
+ int ret;
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+
+ ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mtt, ureq->qbuf_va,
+ ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
+ 1);
+ if (ret)
+ return ret;
+
+ ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
+ &cq->user_cq.user_dbr_page,
+ &cq->user_cq.db_info_dma_addr);
+ if (ret)
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+
+ return ret;
+}
+
+static int erdma_init_kernel_cq(struct erdma_cq *cq)
+{
+ struct erdma_dev *dev = to_edev(cq->ibcq.device);
+
+ cq->kern_cq.qbuf =
+ dma_alloc_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
+ &cq->kern_cq.qbuf_dma_addr, GFP_KERNEL);
+ if (!cq->kern_cq.qbuf)
+ return -ENOMEM;
+
+ cq->kern_cq.db_record =
+ (u64 *)(cq->kern_cq.qbuf + (cq->depth << CQE_SHIFT));
+ spin_lock_init(&cq->kern_cq.lock);
+ /* use default cqdb addr */
+ cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET;
+
+ return 0;
+}
+
+int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct erdma_cq *cq = to_ecq(ibcq);
+ struct erdma_dev *dev = to_edev(ibcq->device);
+ unsigned int depth = attr->cqe;
+ int ret;
+ struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
+ udata, struct erdma_ucontext, ibucontext);
+
+ if (depth > dev->attrs.max_cqe)
+ return -EINVAL;
+
+ depth = roundup_pow_of_two(depth);
+ cq->ibcq.cqe = depth;
+ cq->depth = depth;
+ cq->assoc_eqn = attr->comp_vector + 1;
+
+ ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq,
+ XA_LIMIT(1, dev->attrs.max_cq - 1),
+ &dev->next_alloc_cqn, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ if (!rdma_is_kernel_res(&ibcq->res)) {
+ struct erdma_ureq_create_cq ureq;
+ struct erdma_uresp_create_cq uresp;
+
+ ret = ib_copy_from_udata(&ureq, udata,
+ min(udata->inlen, sizeof(ureq)));
+ if (ret)
+ goto err_out_xa;
+
+ ret = erdma_init_user_cq(ctx, cq, &ureq);
+ if (ret)
+ goto err_out_xa;
+
+ uresp.cq_id = cq->cqn;
+ uresp.num_cqe = depth;
+
+ ret = ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen));
+ if (ret)
+ goto err_free_res;
+ } else {
+ ret = erdma_init_kernel_cq(cq);
+ if (ret)
+ goto err_out_xa;
+ }
+
+ ret = create_cq_cmd(dev, cq);
+ if (ret)
+ goto err_free_res;
+
+ return 0;
+
+err_free_res:
+ if (!rdma_is_kernel_res(&ibcq->res)) {
+ erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ } else {
+ dma_free_coherent(&dev->pdev->dev,
+ WARPPED_BUFSIZE(depth << CQE_SHIFT),
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ }
+
+err_out_xa:
+ xa_erase(&dev->cq_xa, cq->cqn);
+
+ return ret;
+}
+
+void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
+{
+ struct ib_event event;
+
+ event.device = &dev->ibdev;
+ event.element.port_num = 1;
+ event.event = reason;
+
+ ib_dispatch_event(&event);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
new file mode 100644
index 000000000000..c7baddb1f292
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/* Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_VERBS_H__
+#define __ERDMA_VERBS_H__
+
+#include <linux/errno.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_hw.h"
+
+/* RDMA Capability. */
+#define ERDMA_MAX_PD (128 * 1024)
+#define ERDMA_MAX_SEND_WR 4096
+#define ERDMA_MAX_ORD 128
+#define ERDMA_MAX_IRD 128
+#define ERDMA_MAX_SGE_RD 1
+#define ERDMA_MAX_CONTEXT (128 * 1024)
+#define ERDMA_MAX_SEND_SGE 6
+#define ERDMA_MAX_RECV_SGE 1
+#define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE))
+#define ERDMA_MAX_FRMR_PA 512
+
+enum {
+ ERDMA_MMAP_IO_NC = 0, /* no cache */
+};
+
+struct erdma_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u64 address;
+ u8 mmap_flag;
+};
+
+struct erdma_ucontext {
+ struct ib_ucontext ibucontext;
+
+ u32 sdb_type;
+ u32 sdb_idx;
+ u32 sdb_page_idx;
+ u32 sdb_page_off;
+ u64 sdb;
+ u64 rdb;
+ u64 cdb;
+
+ struct rdma_user_mmap_entry *sq_db_mmap_entry;
+ struct rdma_user_mmap_entry *rq_db_mmap_entry;
+ struct rdma_user_mmap_entry *cq_db_mmap_entry;
+
+ /* doorbell records */
+ struct list_head dbrecords_page_list;
+ struct mutex dbrecords_page_mutex;
+};
+
+struct erdma_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+};
+
+/*
+ * MemoryRegion definition.
+ */
+#define ERDMA_MAX_INLINE_MTT_ENTRIES 4
+#define MTT_SIZE(mtt_cnt) (mtt_cnt << 3) /* per mtt takes 8 Bytes. */
+#define ERDMA_MR_MAX_MTT_CNT 524288
+#define ERDMA_MTT_ENTRY_SIZE 8
+
+#define ERDMA_MR_TYPE_NORMAL 0
+#define ERDMA_MR_TYPE_FRMR 1
+#define ERDMA_MR_TYPE_DMA 2
+
+#define ERDMA_MR_INLINE_MTT 0
+#define ERDMA_MR_INDIRECT_MTT 1
+
+#define ERDMA_MR_ACC_LR BIT(0)
+#define ERDMA_MR_ACC_LW BIT(1)
+#define ERDMA_MR_ACC_RR BIT(2)
+#define ERDMA_MR_ACC_RW BIT(3)
+
+static inline u8 to_erdma_access_flags(int access)
+{
+ return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) |
+ (access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) |
+ (access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0);
+}
+
+struct erdma_mem {
+ struct ib_umem *umem;
+ void *mtt_buf;
+ u32 mtt_type;
+ u32 page_size;
+ u32 page_offset;
+ u32 page_cnt;
+ u32 mtt_nents;
+
+ u64 va;
+ u64 len;
+
+ u64 mtt_entry[ERDMA_MAX_INLINE_MTT_ENTRIES];
+};
+
+struct erdma_mr {
+ struct ib_mr ibmr;
+ struct erdma_mem mem;
+ u8 type;
+ u8 access;
+ u8 valid;
+};
+
+struct erdma_user_dbrecords_page {
+ struct list_head list;
+ struct ib_umem *umem;
+ u64 va;
+ int refcnt;
+};
+
+struct erdma_uqp {
+ struct erdma_mem sq_mtt;
+ struct erdma_mem rq_mtt;
+
+ dma_addr_t sq_db_info_dma_addr;
+ dma_addr_t rq_db_info_dma_addr;
+
+ struct erdma_user_dbrecords_page *user_dbr_page;
+
+ u32 rq_offset;
+};
+
+struct erdma_kqp {
+ u16 sq_pi;
+ u16 sq_ci;
+
+ u16 rq_pi;
+ u16 rq_ci;
+
+ u64 *swr_tbl;
+ u64 *rwr_tbl;
+
+ void __iomem *hw_sq_db;
+ void __iomem *hw_rq_db;
+
+ void *sq_buf;
+ dma_addr_t sq_buf_dma_addr;
+
+ void *rq_buf;
+ dma_addr_t rq_buf_dma_addr;
+
+ void *sq_db_info;
+ void *rq_db_info;
+
+ u8 sig_all;
+};
+
+enum erdma_qp_state {
+ ERDMA_QP_STATE_IDLE = 0,
+ ERDMA_QP_STATE_RTR = 1,
+ ERDMA_QP_STATE_RTS = 2,
+ ERDMA_QP_STATE_CLOSING = 3,
+ ERDMA_QP_STATE_TERMINATE = 4,
+ ERDMA_QP_STATE_ERROR = 5,
+ ERDMA_QP_STATE_UNDEF = 7,
+ ERDMA_QP_STATE_COUNT = 8
+};
+
+enum erdma_qp_attr_mask {
+ ERDMA_QP_ATTR_STATE = (1 << 0),
+ ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
+ ERDMA_QP_ATTR_ORD = (1 << 3),
+ ERDMA_QP_ATTR_IRD = (1 << 4),
+ ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
+ ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
+ ERDMA_QP_ATTR_MPA = (1 << 7)
+};
+
+struct erdma_qp_attrs {
+ enum erdma_qp_state state;
+ enum erdma_cc_alg cc; /* Congestion control algorithm */
+ u32 sq_size;
+ u32 rq_size;
+ u32 orq_size;
+ u32 irq_size;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 cookie;
+#define ERDMA_QP_ACTIVE 0
+#define ERDMA_QP_PASSIVE 1
+ u8 qp_type;
+ u8 pd_len;
+};
+
+struct erdma_qp {
+ struct ib_qp ibqp;
+ struct kref ref;
+ struct completion safe_free;
+ struct erdma_dev *dev;
+ struct erdma_cep *cep;
+ struct rw_semaphore state_lock;
+
+ union {
+ struct erdma_kqp kern_qp;
+ struct erdma_uqp user_qp;
+ };
+
+ struct erdma_cq *scq;
+ struct erdma_cq *rcq;
+
+ struct erdma_qp_attrs attrs;
+ spinlock_t lock;
+};
+
+struct erdma_kcq_info {
+ void *qbuf;
+ dma_addr_t qbuf_dma_addr;
+ u32 ci;
+ u32 cmdsn;
+ u32 notify_cnt;
+
+ spinlock_t lock;
+ u8 __iomem *db;
+ u64 *db_record;
+};
+
+struct erdma_ucq_info {
+ struct erdma_mem qbuf_mtt;
+ struct erdma_user_dbrecords_page *user_dbr_page;
+ dma_addr_t db_info_dma_addr;
+};
+
+struct erdma_cq {
+ struct ib_cq ibcq;
+ u32 cqn;
+
+ u32 depth;
+ u32 assoc_eqn;
+
+ union {
+ struct erdma_kcq_info kern_cq;
+ struct erdma_ucq_info user_cq;
+ };
+};
+
+#define QP_ID(qp) ((qp)->ibqp.qp_num)
+
+static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id)
+{
+ return (struct erdma_qp *)xa_load(&dev->qp_xa, id);
+}
+
+static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
+{
+ return (struct erdma_cq *)xa_load(&dev->cq_xa, id);
+}
+
+void erdma_qp_get(struct erdma_qp *qp);
+void erdma_qp_put(struct erdma_qp *qp);
+int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
+ enum erdma_qp_attr_mask mask);
+void erdma_qp_llp_close(struct erdma_qp *qp);
+void erdma_qp_cm_drop(struct erdma_qp *qp);
+
+static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
+{
+ return container_of(ibctx, struct erdma_ucontext, ibucontext);
+}
+
+static inline struct erdma_pd *to_epd(struct ib_pd *pd)
+{
+ return container_of(pd, struct erdma_pd, ibpd);
+}
+
+static inline struct erdma_mr *to_emr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct erdma_mr, ibmr);
+}
+
+static inline struct erdma_qp *to_eqp(struct ib_qp *qp)
+{
+ return container_of(qp, struct erdma_qp, ibqp);
+}
+
+static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct erdma_cq, ibcq);
+}
+
+static inline struct erdma_user_mmap_entry *
+to_emmap(struct rdma_user_mmap_entry *ibmmap)
+{
+ return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry);
+}
+
+int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data);
+void erdma_dealloc_ucontext(struct ib_ucontext *ibctx);
+int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr,
+ struct ib_udata *data);
+int erdma_get_port_immutable(struct ib_device *dev, u32 port,
+ struct ib_port_immutable *ib_port_immutable);
+int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *data);
+int erdma_query_port(struct ib_device *dev, u32 port,
+ struct ib_port_attr *attr);
+int erdma_query_gid(struct ib_device *dev, u32 port, int idx,
+ union ib_gid *gid);
+int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data);
+int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *data);
+int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_qp_init_attr *init_attr);
+int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *data);
+int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 virt, int access, struct ib_udata *udata);
+struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
+int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
+int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
+void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+void erdma_qp_get_ref(struct ib_qp *ibqp);
+void erdma_qp_put_ref(struct ib_qp *ibqp);
+struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
+int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr);
+int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
+int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
+
+#endif
diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index 6eb739052121..14b92e12bf29 100644
--- a/drivers/infiniband/hw/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HFI1
tristate "Cornelis OPX Gen1 support"
- depends on X86_64 && INFINIBAND_RDMAVT && I2C
+ depends on X86_64 && INFINIBAND_RDMAVT && I2C && !UML
select MMU_NOTIFIER
select CRC32
select I2C_ALGOBIT
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 2e4cf2b11653..629beff053ad 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1179,8 +1179,10 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
goto done;
ret = init_user_ctxt(fd, uctxt);
- if (ret)
+ if (ret) {
+ hfi1_free_ctxt_rcv_groups(uctxt);
goto done;
+ }
user_init(uctxt);
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index d6bbdb8fcb50..5d9a7b09ca37 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -742,9 +742,7 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
kzalloc_node(sizeof(*tx->sdma_hdr),
GFP_KERNEL, priv->dd->node);
- netif_tx_napi_add(dev, &txq->napi,
- hfi1_ipoib_poll_tx_ring,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring);
}
return 0;
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 03b098a494b5..3dfa5aff2512 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -216,7 +216,7 @@ static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
* right now.
*/
set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
- netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
+ netif_napi_add_weight(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
rc = msix_netdev_request_rcd_irq(rxq->rcd);
if (rc)
goto bail_context_irq_failure;
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 136f9a99e1e0..7690f996d5e3 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -172,7 +172,7 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n)
}
/*
- * Read nbytes from "from" and and place them in the low bytes
+ * Read nbytes from "from" and place them in the low bytes
* of pbuf->carry. Other bytes are left as-is. Any previous
* value in pbuf->carry is lost.
*
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
index 707f1053f0b7..582b6f68df3d 100644
--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -26,14 +26,10 @@ DECLARE_EVENT_CLASS(hfi1_trace_template,
TP_PROTO(const char *function, struct va_format *vaf),
TP_ARGS(function, vaf),
TP_STRUCT__entry(__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf
- (__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >=
- MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("(%s) %s",
__get_str(function),
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 2855e9ad4b32..f848eedc6a23 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -959,6 +959,7 @@ struct hns_roce_dev {
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;
+ struct work_struct ecc_work;
const struct hns_roce_dfx_hw *dfx;
u32 func_num;
u32 is_vf;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index ba3c742258ef..cbdafaac678a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -55,6 +55,42 @@ enum {
CMD_RST_PRC_EBUSY,
};
+enum ecc_resource_type {
+ ECC_RESOURCE_QPC,
+ ECC_RESOURCE_CQC,
+ ECC_RESOURCE_MPT,
+ ECC_RESOURCE_SRQC,
+ ECC_RESOURCE_GMV,
+ ECC_RESOURCE_QPC_TIMER,
+ ECC_RESOURCE_CQC_TIMER,
+ ECC_RESOURCE_SCCC,
+ ECC_RESOURCE_COUNT,
+};
+
+static const struct {
+ const char *name;
+ u8 read_bt0_op;
+ u8 write_bt0_op;
+} fmea_ram_res[] = {
+ { "ECC_RESOURCE_QPC",
+ HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 },
+ { "ECC_RESOURCE_CQC",
+ HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 },
+ { "ECC_RESOURCE_MPT",
+ HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 },
+ { "ECC_RESOURCE_SRQC",
+ HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 },
+ /* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */
+ { "ECC_RESOURCE_GMV",
+ 0, 0 },
+ { "ECC_RESOURCE_QPC_TIMER",
+ HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 },
+ { "ECC_RESOURCE_CQC_TIMER",
+ HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 },
+ { "ECC_RESOURCE_SCCC",
+ HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 },
+};
+
static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
struct ib_sge *sg)
{
@@ -5855,12 +5891,12 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
}
-static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
+static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
{
struct device *dev = hr_dev->dev;
struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
- int aeqe_found = 0;
+ irqreturn_t aeqe_found = IRQ_NONE;
int event_type;
u32 queue_num;
int sub_type;
@@ -5914,7 +5950,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
eq->event_type = event_type;
eq->sub_type = sub_type;
++eq->cons_index;
- aeqe_found = 1;
+ aeqe_found = IRQ_HANDLED;
hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
@@ -5922,7 +5958,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
}
update_eq_db(eq);
- return aeqe_found;
+
+ return IRQ_RETVAL(aeqe_found);
}
static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
@@ -5937,11 +5974,11 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
}
-static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
+static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
{
struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
- int ceqe_found = 0;
+ irqreturn_t ceqe_found = IRQ_NONE;
u32 cqn;
while (ceqe) {
@@ -5955,21 +5992,21 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
hns_roce_cq_completion(hr_dev, cqn);
++eq->cons_index;
- ceqe_found = 1;
+ ceqe_found = IRQ_HANDLED;
ceqe = next_ceqe_sw_v2(eq);
}
update_eq_db(eq);
- return ceqe_found;
+ return IRQ_RETVAL(ceqe_found);
}
static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
{
struct hns_roce_eq *eq = eq_ptr;
struct hns_roce_dev *hr_dev = eq->hr_dev;
- int int_work;
+ irqreturn_t int_work;
if (eq->type_flag == HNS_ROCE_CEQ)
/* Completion event interrupt */
@@ -5981,27 +6018,22 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
return IRQ_RETVAL(int_work);
}
-static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
+ u32 int_st)
{
- struct hns_roce_dev *hr_dev = dev_id;
- struct device *dev = hr_dev->dev;
- int int_work = 0;
- u32 int_st;
+ struct pci_dev *pdev = hr_dev->pci_dev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ const struct hnae3_ae_ops *ops = ae_dev->ops;
+ irqreturn_t int_work = IRQ_NONE;
u32 int_en;
- /* Abnormal interrupt */
- int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
- struct pci_dev *pdev = hr_dev->pci_dev;
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- const struct hnae3_ae_ops *ops = ae_dev->ops;
+ dev_err(hr_dev->dev, "AEQ overflow!\n");
- dev_err(dev, "AEQ overflow!\n");
-
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
+ 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
/* Set reset level for reset_event() */
if (ops->set_default_reset_request)
@@ -6013,19 +6045,165 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
- int_work = 1;
- } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) {
- dev_err(dev, "RAS interrupt!\n");
+ int_work = IRQ_HANDLED;
+ } else {
+ dev_err(hr_dev->dev, "there is no basic abn irq found.\n");
+ }
+
+ return IRQ_RETVAL(int_work);
+}
- int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+static int fmea_ram_ecc_query(struct hns_roce_dev *hr_dev,
+ struct fmea_ram_ecc *ecc_info)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ int ret;
- int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
- roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_QUERY_RAM_ECC, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR);
+ ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE);
+ ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG);
+
+ return 0;
+}
+
+static int fmea_recover_gmv(struct hns_roce_dev *hr_dev, u32 idx)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ u32 addr_upper;
+ u32 addr_low;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, true);
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
+
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to read gmv, ret = %d.\n", ret);
+ return ret;
+ }
+
+ addr_low = hr_reg_read(req, CFG_GMV_BT_BA_L);
+ addr_upper = hr_reg_read(req, CFG_GMV_BT_BA_H);
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
+ hr_reg_write(req, CFG_GMV_BT_BA_L, addr_low);
+ hr_reg_write(req, CFG_GMV_BT_BA_H, addr_upper);
+ hr_reg_write(req, CFG_GMV_BT_IDX, idx);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
+{
+ if (res_type == ECC_RESOURCE_QPC_TIMER ||
+ res_type == ECC_RESOURCE_CQC_TIMER ||
+ res_type == ECC_RESOURCE_SCCC)
+ return le64_to_cpu(*data);
+
+ return le64_to_cpu(*data) << PAGE_SHIFT;
+}
- int_work = 1;
+static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
+ u32 index)
+{
+ u8 write_bt0_op = fmea_ram_res[res_type].write_bt0_op;
+ u8 read_bt0_op = fmea_ram_res[res_type].read_bt0_op;
+ struct hns_roce_cmd_mailbox *mailbox;
+ u64 addr;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to read fmea ram, ret = %d.\n",
+ ret);
+ goto out;
+ }
+
+ addr = fmea_get_ram_res_addr(res_type, mailbox->buf);
+
+ ret = hns_roce_cmd_mbox(hr_dev, addr, 0, write_bt0_op, index);
+ if (ret)
+ dev_err(hr_dev->dev,
+ "failed to execute cmd to write fmea ram, ret = %d.\n",
+ ret);
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static void fmea_ram_ecc_recover(struct hns_roce_dev *hr_dev,
+ struct fmea_ram_ecc *ecc_info)
+{
+ u32 res_type = ecc_info->res_type;
+ u32 index = ecc_info->index;
+ int ret;
+
+ BUILD_BUG_ON(ARRAY_SIZE(fmea_ram_res) != ECC_RESOURCE_COUNT);
+
+ if (res_type >= ECC_RESOURCE_COUNT) {
+ dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n",
+ res_type);
+ return;
+ }
+
+ if (res_type == ECC_RESOURCE_GMV)
+ ret = fmea_recover_gmv(hr_dev, index);
+ else
+ ret = fmea_recover_others(hr_dev, res_type, index);
+ if (ret)
+ dev_err(hr_dev->dev,
+ "failed to recover %s, index = %u, ret = %d.\n",
+ fmea_ram_res[res_type].name, index, ret);
+}
+
+static void fmea_ram_ecc_work(struct work_struct *ecc_work)
+{
+ struct hns_roce_dev *hr_dev =
+ container_of(ecc_work, struct hns_roce_dev, ecc_work);
+ struct fmea_ram_ecc ecc_info = {};
+
+ if (fmea_ram_ecc_query(hr_dev, &ecc_info)) {
+ dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n");
+ return;
+ }
+
+ if (!ecc_info.is_ecc_err) {
+ dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n");
+ return;
+ }
+
+ fmea_ram_ecc_recover(hr_dev, &ecc_info);
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ irqreturn_t int_work = IRQ_NONE;
+ u32 int_st;
+
+ int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
+
+ if (int_st) {
+ int_work = abnormal_interrupt_basic(hr_dev, int_st);
+ } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ queue_work(hr_dev->irq_workq, &hr_dev->ecc_work);
+ int_work = IRQ_HANDLED;
} else {
- dev_err(dev, "There is no abnormal irq found!\n");
+ dev_err(hr_dev->dev, "there is no abnormal irq found.\n");
}
return IRQ_RETVAL(int_work);
@@ -6342,6 +6520,8 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
}
}
+ INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work);
+
hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
if (!hr_dev->irq_workq) {
dev_err(dev, "failed to create irq workqueue.\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 7ffb7824d268..f96debac30fe 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -250,6 +250,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
HNS_ROCE_OPC_EXT_CFG = 0x8512,
+ HNS_ROCE_QUERY_RAM_ECC = 0x8513,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
};
@@ -1107,6 +1108,11 @@ enum {
#define CFG_GMV_BT_BA_H CMQ_REQ_FIELD_LOC(51, 32)
#define CFG_GMV_BT_IDX CMQ_REQ_FIELD_LOC(95, 64)
+/* Fields of HNS_ROCE_QUERY_RAM_ECC */
+#define QUERY_RAM_ECC_1BIT_ERR CMQ_REQ_FIELD_LOC(31, 0)
+#define QUERY_RAM_ECC_RES_TYPE CMQ_REQ_FIELD_LOC(63, 32)
+#define QUERY_RAM_ECC_TAG CMQ_REQ_FIELD_LOC(95, 64)
+
struct hns_roce_cfg_sgid_tb {
__le32 table_idx_rsv;
__le32 vf_sgid_l;
@@ -1343,6 +1349,12 @@ struct hns_roce_dip {
struct list_head node; /* all dips are on a list */
};
+struct fmea_ram_ecc {
+ u32 is_ecc_err;
+ u32 res_type;
+ u32 index;
+};
+
/* only for RNR timeout issue of HIP08 */
#define HNS_ROCE_CLOCK_ADJUST 1000
#define HNS_ROCE_MAX_CQ_PERIOD 65
@@ -1382,7 +1394,6 @@ struct hns_roce_dip {
#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
-#define HNS_ROCE_V2_VF_INT_ST_RAS_INT_S 1
#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 646fa8677490..7b086fe63a24 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1477,12 +1477,13 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
list_for_each_entry (listen_node, &cm_core->listen_list, list) {
memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
listen_port = listen_node->loc_port;
+ if (listen_port != dst_port ||
+ !(listener_state & listen_node->listener_state))
+ continue;
/* compare node pair, return node handle if a match */
- if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
- !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
- listen_port == dst_port &&
- vlan_id == listen_node->vlan_id &&
- (listener_state & listen_node->listener_state)) {
+ if (!memcmp(listen_addr, ip_zero, sizeof(listen_addr)) ||
+ (!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) &&
+ vlan_id == listen_node->vlan_id)) {
refcount_inc(&listen_node->refcnt);
spin_unlock_irqrestore(&cm_core->listen_list_lock,
flags);
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 58c0e181ca2b..a41e0d21143a 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -4872,10 +4872,12 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
sd_diff = sd_needed - hmc_fpm_misc->max_sds;
if (sd_diff > 128) {
- if (qpwanted > 128 && sd_diff > 144)
+ if (!(loop_count % 2) && qpwanted > 128) {
qpwanted /= 2;
- mrwanted /= 2;
- pblewanted /= 2;
+ } else {
+ mrwanted /= 2;
+ pblewanted /= 2;
+ }
continue;
}
if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index dd3943d22dc6..4f132c6fb653 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -257,10 +257,6 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
iwqp->last_aeq = info->ae_id;
spin_unlock_irqrestore(&iwqp->lock, flags);
ctx_info = &iwqp->ctx_info;
- if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1))
- ctx_info->roce_info->err_rq_idx_valid = true;
- else
- ctx_info->iwarp_info->err_rq_idx_valid = true;
} else {
if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
continue;
@@ -370,16 +366,12 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- if (rdma_protocol_roce(&iwdev->ibdev, 1))
- ctx_info->roce_info->err_rq_idx_valid = false;
- else
- ctx_info->iwarp_info->err_rq_idx_valid = false;
- fallthrough;
default:
- ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d\n",
- info->ae_id, info->qp, info->qp_cq_id);
+ ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
+ info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
- if (!info->sq && ctx_info->roce_info->err_rq_idx_valid) {
+ ctx_info->roce_info->err_rq_idx_valid = info->rq;
+ if (info->rq) {
ctx_info->roce_info->err_rq_idx = info->wqe_idx;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
ctx_info);
@@ -388,7 +380,8 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
irdma_cm_disconn(iwqp);
break;
}
- if (!info->sq && ctx_info->iwarp_info->err_rq_idx_valid) {
+ ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
+ if (info->rq) {
ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
ctx_info->tcp_info_valid = false;
ctx_info->iwarp_info_valid = true;
@@ -1512,10 +1505,7 @@ static int irdma_hmc_setup(struct irdma_pci_f *rf)
int status;
u32 qpcnt;
- if (rf->rdma_ver == IRDMA_GEN_1)
- qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2;
- else
- qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
+ qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
rf->sd_type = IRDMA_SD_TYPE_DIRECT;
status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
@@ -1543,7 +1533,7 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
rf->obj_mem.pa);
rf->obj_mem.va = NULL;
if (rf->rdma_ver != IRDMA_GEN_1) {
- kfree(rf->allocated_ws_nodes);
+ bitmap_free(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
}
kfree(rf->ceqlist);
@@ -1972,9 +1962,8 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
u32 ret;
if (rf->rdma_ver != IRDMA_GEN_1) {
- rf->allocated_ws_nodes =
- kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES),
- sizeof(unsigned long), GFP_KERNEL);
+ rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
+ GFP_KERNEL);
if (!rf->allocated_ws_nodes)
return -ENOMEM;
@@ -2023,7 +2012,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
return 0;
mem_rsrc_kzalloc_fail:
- kfree(rf->allocated_ws_nodes);
+ bitmap_free(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
return ret;
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index ef862bced20f..65e966ad3453 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -85,7 +85,7 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
#define IRDMA_NO_QSET 0xffff
#define IW_CFG_FPM_QP_COUNT 32768
-#define IRDMA_MAX_PAGES_PER_FMR 512
+#define IRDMA_MAX_PAGES_PER_FMR 262144
#define IRDMA_MIN_PAGES_PER_FMR 1
#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index ab3c5208a123..fdf4cc88cb91 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -652,6 +652,7 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
};
static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
+ {0xffff, 0x8002, "Invalid State"},
{0xffff, 0x8006, "Flush No Wqe Pending"},
{0xffff, 0x8007, "Modify QP Bad Close"},
{0xffff, 0x8009, "LLP Closed"},
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 96135a228f26..9b07b8af2997 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -1776,11 +1776,11 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
spin_unlock_irqrestore(&iwcq->lock, flags);
irdma_cq_wq_destroy(iwdev->rf, cq);
- irdma_cq_free_rsrc(iwdev->rf, iwcq);
spin_lock_irqsave(&iwceq->ce_lock, flags);
irdma_sc_cleanup_ceqes(cq, ceq);
spin_unlock_irqrestore(&iwceq->ce_lock, flags);
+ irdma_cq_free_rsrc(iwdev->rf, iwcq);
return 0;
}
@@ -2605,7 +2605,7 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
palloc = &iwpbl->pble_alloc;
iwmr->page_cnt = max_num_sg;
err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
- true);
+ false);
if (err_code)
goto err_get_pble;
@@ -2641,8 +2641,16 @@ static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
if (unlikely(iwmr->npages == iwmr->page_cnt))
return -ENOMEM;
- pbl = palloc->level1.addr;
- pbl[iwmr->npages++] = addr;
+ if (palloc->level == PBLE_LEVEL_2) {
+ struct irdma_pble_info *palloc_info =
+ palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
+
+ palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
+ } else {
+ pbl = palloc->level1.addr;
+ pbl[iwmr->npages] = addr;
+ }
+ iwmr->npages++;
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 08371a80fdc2..be189e0525de 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -523,6 +523,10 @@ repoll:
"Requestor" : "Responder", cq->mcq.cqn);
mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
err_cqe->syndrome, err_cqe->vendor_err_synd);
+ if (wc->status != IB_WC_WR_FLUSH_ERR &&
+ (*cur_qp)->type == MLX5_IB_QPT_REG_UMR)
+ dev->umrc.state = MLX5_UMR_STATE_RECOVER;
+
if (opcode == MLX5_CQE_REQ_ERR) {
wq = &(*cur_qp)->sq;
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 39ffb363ba0c..490ec308e309 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -679,7 +679,15 @@ enum flow_table_type {
#define MLX5_FS_MAX_TYPES 6
#define MLX5_FS_MAX_ENTRIES BIT(16)
-static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
+static bool mlx5_ib_shared_ft_allowed(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return MLX5_CAP_GEN(dev->mdev, shared_object_to_user_object_allowed);
+}
+
+static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_namespace *ns,
struct mlx5_ib_flow_prio *prio,
int priority,
int num_entries, int num_groups,
@@ -688,6 +696,8 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
+ if (mlx5_ib_shared_ft_allowed(&dev->ib_dev))
+ ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
ft_attr.prio = priority;
ft_attr.max_fte = num_entries;
ft_attr.flags = flags;
@@ -784,8 +794,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = prio->flow_table;
if (!ft)
- return _get_prio(ns, prio, priority, max_table_size, num_groups,
- flags);
+ return _get_prio(dev, ns, prio, priority, max_table_size,
+ num_groups, flags);
return prio;
}
@@ -927,7 +937,7 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
prio = &dev->flow_db->opfcs[type];
if (!prio->flow_table) {
- prio = _get_prio(ns, prio, priority,
+ prio = _get_prio(dev, ns, prio, priority,
dev->num_ports * MAX_OPFC_RULES, 1, 0);
if (IS_ERR(prio)) {
err = PTR_ERR(prio);
@@ -1407,8 +1417,8 @@ free_ucmd:
}
static struct mlx5_ib_flow_prio *
-_get_flow_table(struct mlx5_ib_dev *dev,
- struct mlx5_ib_flow_matcher *fs_matcher,
+_get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
+ enum mlx5_flow_namespace_type ns_type,
bool mcast)
{
struct mlx5_flow_namespace *ns = NULL;
@@ -1421,11 +1431,11 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (mcast)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
- priority = ib_prio_to_core_prio(fs_matcher->priority, false);
+ priority = ib_prio_to_core_prio(user_priority, false);
esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- switch (fs_matcher->ns_type) {
+ switch (ns_type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size));
@@ -1452,17 +1462,17 @@ _get_flow_table(struct mlx5_ib_dev *dev,
reformat_l3_tunnel_to_l2) &&
esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- priority = fs_matcher->priority;
+ priority = user_priority;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX:
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size));
- priority = fs_matcher->priority;
+ priority = user_priority;
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size));
- priority = fs_matcher->priority;
+ priority = user_priority;
break;
default:
break;
@@ -1470,11 +1480,11 @@ _get_flow_table(struct mlx5_ib_dev *dev,
max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
- ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
+ ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
if (!ns)
return ERR_PTR(-EOPNOTSUPP);
- switch (fs_matcher->ns_type) {
+ switch (ns_type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
prio = &dev->flow_db->prios[priority];
break;
@@ -1499,7 +1509,7 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (prio->flow_table)
return prio;
- return _get_prio(ns, prio, priority, max_table_size,
+ return _get_prio(dev, ns, prio, priority, max_table_size,
MLX5_FS_MAX_TYPES, flags);
}
@@ -1618,7 +1628,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
mutex_lock(&dev->flow_db->lock);
- ft_prio = _get_flow_table(dev, fs_matcher, mcast);
+ ft_prio = _get_flow_table(dev, fs_matcher->priority,
+ fs_matcher->ns_type, mcast);
if (IS_ERR(ft_prio)) {
err = PTR_ERR(ft_prio);
goto unlock;
@@ -2015,6 +2026,23 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject,
return 0;
}
+static int steering_anchor_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_steering_anchor *obj = uobject->object;
+
+ if (atomic_read(&obj->usecnt))
+ return -EBUSY;
+
+ mutex_lock(&obj->dev->flow_db->lock);
+ put_flow_table(obj->dev, obj->ft_prio, true);
+ mutex_unlock(&obj->dev->flow_db->lock);
+
+ kfree(obj);
+ return 0;
+}
+
static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
struct mlx5_ib_flow_matcher *obj)
{
@@ -2050,12 +2078,10 @@ static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
if (err)
return err;
- if (flags) {
- mlx5_ib_ft_type_to_namespace(
+ if (flags)
+ return mlx5_ib_ft_type_to_namespace(
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
&obj->ns_type);
- return 0;
- }
}
obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
@@ -2121,6 +2147,75 @@ end:
return err;
}
+static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE);
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ enum mlx5_ib_uapi_flow_table_type ib_uapi_ft_type;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_ib_steering_anchor *obj;
+ struct mlx5_ib_flow_prio *ft_prio;
+ u16 priority;
+ u32 ft_id;
+ int err;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ err = uverbs_get_const(&ib_uapi_ft_type, attrs,
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE);
+ if (err)
+ return err;
+
+ err = mlx5_ib_ft_type_to_namespace(ib_uapi_ft_type, &ns_type);
+ if (err)
+ return err;
+
+ err = uverbs_copy_from(&priority, attrs,
+ MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY);
+ if (err)
+ return err;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ mutex_lock(&dev->flow_db->lock);
+ ft_prio = _get_flow_table(dev, priority, ns_type, 0);
+ if (IS_ERR(ft_prio)) {
+ mutex_unlock(&dev->flow_db->lock);
+ err = PTR_ERR(ft_prio);
+ goto free_obj;
+ }
+
+ ft_prio->refcount++;
+ ft_id = mlx5_flow_table_id(ft_prio->flow_table);
+ mutex_unlock(&dev->flow_db->lock);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ &ft_id, sizeof(ft_id));
+ if (err)
+ goto put_flow_table;
+
+ uobj->object = obj;
+ obj->dev = dev;
+ obj->ft_prio = ft_prio;
+ atomic_set(&obj->usecnt, 0);
+
+ return 0;
+
+put_flow_table:
+ mutex_lock(&dev->flow_db->lock);
+ put_flow_table(dev, ft_prio, true);
+ mutex_unlock(&dev->flow_db->lock);
+free_obj:
+ kfree(obj);
+
+ return err;
+}
+
static struct ib_flow_action *
mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
enum mlx5_ib_uapi_flow_table_type ft_type,
@@ -2477,6 +2572,35 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
&UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
&UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_STEERING_ANCHOR_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_TYPE_ALLOC_IDR(steering_anchor_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY));
+
const struct uapi_definition mlx5_ib_flow_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
MLX5_IB_OBJECT_FLOW_MATCHER),
@@ -2485,6 +2609,9 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
&mlx5_ib_fs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_actions),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)),
{},
};
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b68fddeac0f1..fc94a1b25485 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2738,26 +2738,24 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
- dev->port_caps[port - 1].has_smi = false;
- if (MLX5_CAP_GEN(dev->mdev, port_type) ==
- MLX5_CAP_PORT_TYPE_IB) {
- if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
- err = mlx5_query_hca_vport_context(dev->mdev, 0,
- port, 0,
- &vport_ctx);
- if (err) {
- mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
- port, err);
- return err;
- }
- dev->port_caps[port - 1].has_smi =
- vport_ctx.has_smi;
- } else {
- dev->port_caps[port - 1].has_smi = true;
- }
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ return 0;
+
+ for (port = 1; port <= dev->num_ports; port++) {
+ if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
+ dev->port_caps[port - 1].has_smi = true;
+ continue;
}
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
+ &vport_ctx);
+ if (err) {
+ mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
+ port, err);
+ return err;
+ }
+ dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
}
+
return 0;
}
@@ -4002,7 +4000,7 @@ static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{
int err;
- err = mlx5_mr_cache_cleanup(dev);
+ err = mlx5_mkey_cache_cleanup(dev);
if (err)
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
@@ -4022,7 +4020,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
if (ret)
return ret;
- ret = mlx5_mr_cache_init(dev);
+ ret = mlx5_mkey_cache_init(dev);
if (ret) {
mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
mlx5r_umr_resource_cleanup(dev);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 998b67509a53..2e2ad3918385 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -259,6 +259,12 @@ struct mlx5_ib_flow_matcher {
u8 match_criteria_enable;
};
+struct mlx5_ib_steering_anchor {
+ struct mlx5_ib_flow_prio *ft_prio;
+ struct mlx5_ib_dev *dev;
+ atomic_t usecnt;
+};
+
struct mlx5_ib_pp {
u16 index;
struct mlx5_core_dev *mdev;
@@ -613,6 +619,7 @@ struct mlx5_ib_mkey {
unsigned int ndescs;
struct wait_queue_head wait;
refcount_t usecount;
+ struct mlx5_cache_ent *cache_ent;
};
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
@@ -635,20 +642,9 @@ struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_ib_mkey mmkey;
- /* User MR data */
- struct mlx5_cache_ent *cache_ent;
- /* Everything after cache_ent is zero'd when MR allocated */
struct ib_umem *umem;
union {
- /* Used only while the MR is in the cache */
- struct {
- u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
- struct mlx5_async_work cb_work;
- /* Cache list element */
- struct list_head list;
- };
-
/* Used only by kernel MRs (umem == NULL) */
struct {
void *descs;
@@ -688,12 +684,6 @@ struct mlx5_ib_mr {
};
};
-/* Zero the fields in the mr that are variant depending on usage */
-static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
-{
- memset_after(mr, 0, cache_ent);
-}
-
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
{
return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
@@ -717,21 +707,29 @@ struct mlx5_ib_umr_context {
struct completion done;
};
+enum {
+ MLX5_UMR_STATE_ACTIVE,
+ MLX5_UMR_STATE_RECOVER,
+ MLX5_UMR_STATE_ERR,
+};
+
struct umr_common {
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
- /* control access to UMR QP
+ /* Protects from UMR QP overflow
*/
struct semaphore sem;
+ /* Protects from using UMR while the UMR is not active
+ */
+ struct mutex lock;
+ unsigned int state;
};
struct mlx5_cache_ent {
- struct list_head head;
- /* sync access to the cahce entry
- */
- spinlock_t lock;
-
+ struct xarray mkeys;
+ unsigned long stored;
+ unsigned long reserved;
char name[4];
u32 order;
@@ -743,18 +741,11 @@ struct mlx5_cache_ent {
u8 fill_to_high_water:1;
/*
- * - available_mrs is the length of list head, ie the number of MRs
- * available for immediate allocation.
- * - total_mrs is available_mrs plus all in use MRs that could be
- * returned to the cache.
- * - limit is the low water mark for available_mrs, 2* limit is the
+ * - limit is the low water mark for stored mkeys, 2* limit is the
* upper water mark.
- * - pending is the number of MRs currently being created
*/
- u32 total_mrs;
- u32 available_mrs;
+ u32 in_use;
u32 limit;
- u32 pending;
/* Statistics */
u32 miss;
@@ -763,9 +754,19 @@ struct mlx5_cache_ent {
struct delayed_work dwork;
};
-struct mlx5_mr_cache {
+struct mlx5r_async_create_mkey {
+ union {
+ u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
+ u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
+ };
+ struct mlx5_async_work cb_work;
+ struct mlx5_cache_ent *ent;
+ u32 mkey;
+};
+
+struct mlx5_mkey_cache {
struct workqueue_struct *wq;
- struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
+ struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES];
struct dentry *root;
unsigned long last_add;
};
@@ -1064,7 +1065,7 @@ struct mlx5_ib_dev {
struct mlx5_ib_resources devr;
atomic_t mkey_var;
- struct mlx5_mr_cache cache;
+ struct mlx5_mkey_cache cache;
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
struct mutex slow_path_mutex;
@@ -1309,8 +1310,8 @@ void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
u64 access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
-int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
-int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
+int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
struct mlx5_cache_ent *ent,
@@ -1338,7 +1339,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
+void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent);
void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags);
@@ -1357,7 +1358,7 @@ static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
-static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
+static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {}
static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags) {}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index aedfd7ff4846..129d531bd01b 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -82,15 +82,14 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
MLX5_SET64(mkc, mkc, start_addr, start_addr);
}
-static void assign_mkey_variant(struct mlx5_ib_dev *dev,
- struct mlx5_ib_mkey *mkey, u32 *in)
+static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in)
{
u8 key = atomic_inc_return(&dev->mkey_var);
void *mkc;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, mkey_7_0, key);
- mkey->key = key;
+ *mkey = key;
}
static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
@@ -98,7 +97,7 @@ static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
{
int ret;
- assign_mkey_variant(dev, mkey, in);
+ assign_mkey_variant(dev, &mkey->key, in);
ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
if (!ret)
init_waitqueue_head(&mkey->wait);
@@ -106,20 +105,21 @@ static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
return ret;
}
-static int
-mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
- struct mlx5_ib_mkey *mkey,
- struct mlx5_async_ctx *async_ctx,
- u32 *in, int inlen, u32 *out, int outlen,
- struct mlx5_async_work *context)
+static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
{
- MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
- assign_mkey_variant(dev, mkey, in);
- return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
- create_mkey_callback, context);
+ struct mlx5_ib_dev *dev = async_create->ent->dev;
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ size_t outlen = MLX5_ST_SZ_BYTES(create_mkey_out);
+
+ MLX5_SET(create_mkey_in, async_create->in, opcode,
+ MLX5_CMD_OP_CREATE_MKEY);
+ assign_mkey_variant(dev, &async_create->mkey, async_create->in);
+ return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen,
+ async_create->out, outlen, create_mkey_callback,
+ &async_create->cb_work);
}
-static int mr_cache_max_order(struct mlx5_ib_dev *dev);
+static int mkey_cache_max_order(struct mlx5_ib_dev *dev);
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
@@ -142,40 +142,132 @@ static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
}
+
+static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
+ void *to_store)
+{
+ XA_STATE(xas, &ent->mkeys, 0);
+ void *curr;
+
+ xa_lock_irq(&ent->mkeys);
+ if (limit_pendings &&
+ (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) {
+ xa_unlock_irq(&ent->mkeys);
+ return -EAGAIN;
+ }
+ while (1) {
+ /*
+ * This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version
+ * doesn't transparently unlock. Instead we set the xas index to
+ * the current value of reserved every iteration.
+ */
+ xas_set(&xas, ent->reserved);
+ curr = xas_load(&xas);
+ if (!curr) {
+ if (to_store && ent->stored == ent->reserved)
+ xas_store(&xas, to_store);
+ else
+ xas_store(&xas, XA_ZERO_ENTRY);
+ if (xas_valid(&xas)) {
+ ent->reserved++;
+ if (to_store) {
+ if (ent->stored != ent->reserved)
+ __xa_store(&ent->mkeys,
+ ent->stored,
+ to_store,
+ GFP_KERNEL);
+ ent->stored++;
+ queue_adjust_cache_locked(ent);
+ WRITE_ONCE(ent->dev->cache.last_add,
+ jiffies);
+ }
+ }
+ }
+ xa_unlock_irq(&ent->mkeys);
+
+ /*
+ * Notice xas_nomem() must always be called as it cleans
+ * up any cached allocation.
+ */
+ if (!xas_nomem(&xas, GFP_KERNEL))
+ break;
+ xa_lock_irq(&ent->mkeys);
+ }
+ if (xas_error(&xas))
+ return xas_error(&xas);
+ if (WARN_ON(curr))
+ return -EINVAL;
+ return 0;
+}
+
+static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent)
+{
+ void *old;
+
+ ent->reserved--;
+ old = __xa_erase(&ent->mkeys, ent->reserved);
+ WARN_ON(old);
+}
+
+static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey)
+{
+ void *old;
+
+ old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0);
+ WARN_ON(old);
+ ent->stored++;
+}
+
+static u32 pop_stored_mkey(struct mlx5_cache_ent *ent)
+{
+ void *old, *xa_mkey;
+
+ ent->stored--;
+ ent->reserved--;
+
+ if (ent->stored == ent->reserved) {
+ xa_mkey = __xa_erase(&ent->mkeys, ent->stored);
+ WARN_ON(!xa_mkey);
+ return (u32)xa_to_value(xa_mkey);
+ }
+
+ xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ WARN_ON(!xa_mkey || xa_is_err(xa_mkey));
+ old = __xa_erase(&ent->mkeys, ent->reserved);
+ WARN_ON(old);
+ return (u32)xa_to_value(xa_mkey);
+}
+
static void create_mkey_callback(int status, struct mlx5_async_work *context)
{
- struct mlx5_ib_mr *mr =
- container_of(context, struct mlx5_ib_mr, cb_work);
- struct mlx5_cache_ent *ent = mr->cache_ent;
+ struct mlx5r_async_create_mkey *mkey_out =
+ container_of(context, struct mlx5r_async_create_mkey, cb_work);
+ struct mlx5_cache_ent *ent = mkey_out->ent;
struct mlx5_ib_dev *dev = ent->dev;
unsigned long flags;
if (status) {
- create_mkey_warn(dev, status, mr->out);
- kfree(mr);
- spin_lock_irqsave(&ent->lock, flags);
- ent->pending--;
+ create_mkey_warn(dev, status, mkey_out->out);
+ kfree(mkey_out);
+ xa_lock_irqsave(&ent->mkeys, flags);
+ undo_push_reserve_mkey(ent);
WRITE_ONCE(dev->fill_delay, 1);
- spin_unlock_irqrestore(&ent->lock, flags);
+ xa_unlock_irqrestore(&ent->mkeys, flags);
mod_timer(&dev->delay_timer, jiffies + HZ);
return;
}
- mr->mmkey.type = MLX5_MKEY_MR;
- mr->mmkey.key |= mlx5_idx_to_mkey(
- MLX5_GET(create_mkey_out, mr->out, mkey_index));
- init_waitqueue_head(&mr->mmkey.wait);
-
+ mkey_out->mkey |= mlx5_idx_to_mkey(
+ MLX5_GET(create_mkey_out, mkey_out->out, mkey_index));
WRITE_ONCE(dev->cache.last_add, jiffies);
- spin_lock_irqsave(&ent->lock, flags);
- list_add_tail(&mr->list, &ent->head);
- ent->available_mrs++;
- ent->total_mrs++;
+ xa_lock_irqsave(&ent->mkeys, flags);
+ push_to_reserved(ent, mkey_out->mkey);
/* If we are doing fill_to_high_water then keep going. */
queue_adjust_cache_locked(ent);
- ent->pending--;
- spin_unlock_irqrestore(&ent->lock, flags);
+ xa_unlock_irqrestore(&ent->mkeys, flags);
+ kfree(mkey_out);
}
static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
@@ -197,15 +289,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
return ret;
}
-static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
+static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
{
- struct mlx5_ib_mr *mr;
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr)
- return NULL;
- mr->cache_ent = ent;
-
set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
@@ -215,133 +300,106 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, translations_octword_size,
get_mkc_octo_size(ent->access_mode, ent->ndescs));
MLX5_SET(mkc, mkc, log_page_size, ent->page);
- return mr;
}
/* Asynchronously schedule new MRs to be populated in the cache. */
static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
{
- size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_ib_mr *mr;
+ struct mlx5r_async_create_mkey *async_create;
void *mkc;
- u32 *in;
int err = 0;
int i;
- in = kzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
for (i = 0; i < num; i++) {
- mr = alloc_cache_mr(ent, mkc);
- if (!mr) {
- err = -ENOMEM;
- break;
- }
- spin_lock_irq(&ent->lock);
- if (ent->pending >= MAX_PENDING_REG_MR) {
- err = -EAGAIN;
- spin_unlock_irq(&ent->lock);
- kfree(mr);
- break;
- }
- ent->pending++;
- spin_unlock_irq(&ent->lock);
- err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
- &ent->dev->async_ctx, in, inlen,
- mr->out, sizeof(mr->out),
- &mr->cb_work);
+ async_create = kzalloc(sizeof(struct mlx5r_async_create_mkey),
+ GFP_KERNEL);
+ if (!async_create)
+ return -ENOMEM;
+ mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in,
+ memory_key_mkey_entry);
+ set_cache_mkc(ent, mkc);
+ async_create->ent = ent;
+
+ err = push_mkey(ent, true, NULL);
+ if (err)
+ goto free_async_create;
+
+ err = mlx5_ib_create_mkey_cb(async_create);
if (err) {
- spin_lock_irq(&ent->lock);
- ent->pending--;
- spin_unlock_irq(&ent->lock);
mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
- kfree(mr);
- break;
+ goto err_undo_reserve;
}
}
- kfree(in);
+ return 0;
+
+err_undo_reserve:
+ xa_lock_irq(&ent->mkeys);
+ undo_push_reserve_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+free_async_create:
+ kfree(async_create);
return err;
}
/* Synchronously create a MR in the cache */
-static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
+static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey)
{
size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_ib_mr *mr;
void *mkc;
u32 *in;
int err;
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ set_cache_mkc(ent, mkc);
- mr = alloc_cache_mr(ent, mkc);
- if (!mr) {
- err = -ENOMEM;
- goto free_in;
- }
-
- err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen);
+ err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen);
if (err)
- goto free_mr;
+ goto free_in;
- init_waitqueue_head(&mr->mmkey.wait);
- mr->mmkey.type = MLX5_MKEY_MR;
WRITE_ONCE(ent->dev->cache.last_add, jiffies);
- spin_lock_irq(&ent->lock);
- ent->total_mrs++;
- spin_unlock_irq(&ent->lock);
- kfree(in);
- return mr;
-free_mr:
- kfree(mr);
free_in:
kfree(in);
- return ERR_PTR(err);
+ return err;
}
static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
{
- struct mlx5_ib_mr *mr;
+ u32 mkey;
- lockdep_assert_held(&ent->lock);
- if (list_empty(&ent->head))
+ lockdep_assert_held(&ent->mkeys.xa_lock);
+ if (!ent->stored)
return;
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_del(&mr->list);
- ent->available_mrs--;
- ent->total_mrs--;
- spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key);
- kfree(mr);
- spin_lock_irq(&ent->lock);
+ mkey = pop_stored_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+ mlx5_core_destroy_mkey(ent->dev->mdev, mkey);
+ xa_lock_irq(&ent->mkeys);
}
static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
bool limit_fill)
+ __acquires(&ent->mkeys) __releases(&ent->mkeys)
{
int err;
- lockdep_assert_held(&ent->lock);
+ lockdep_assert_held(&ent->mkeys.xa_lock);
while (true) {
if (limit_fill)
target = ent->limit * 2;
- if (target == ent->available_mrs + ent->pending)
+ if (target == ent->reserved)
return 0;
- if (target > ent->available_mrs + ent->pending) {
- u32 todo = target - (ent->available_mrs + ent->pending);
+ if (target > ent->reserved) {
+ u32 todo = target - ent->reserved;
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
err = add_keys(ent, todo);
if (err == -EAGAIN)
usleep_range(3000, 5000);
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (err) {
if (err != -EAGAIN)
return err;
@@ -366,15 +424,15 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
/*
* Target is the new value of total_mrs the user requests, however we
- * cannot free MRs that are in use. Compute the target value for
- * available_mrs.
+ * cannot free MRs that are in use. Compute the target value for stored
+ * mkeys.
*/
- spin_lock_irq(&ent->lock);
- if (target < ent->total_mrs - ent->available_mrs) {
+ xa_lock_irq(&ent->mkeys);
+ if (target < ent->in_use) {
err = -EINVAL;
goto err_unlock;
}
- target = target - (ent->total_mrs - ent->available_mrs);
+ target = target - ent->in_use;
if (target < ent->limit || target > ent->limit*2) {
err = -EINVAL;
goto err_unlock;
@@ -382,12 +440,12 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
err = resize_available_mrs(ent, target, false);
if (err)
goto err_unlock;
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
return count;
err_unlock:
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
return err;
}
@@ -398,7 +456,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
+ err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use);
if (err < 0)
return err;
@@ -427,10 +485,10 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
* Upon set we immediately fill the cache to high water mark implied by
* the limit.
*/
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
ent->limit = var;
err = resize_available_mrs(ent, 0, true);
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
if (err)
return err;
return count;
@@ -457,17 +515,17 @@ static const struct file_operations limit_fops = {
.read = limit_read,
};
-static bool someone_adding(struct mlx5_mr_cache *cache)
+static bool someone_adding(struct mlx5_mkey_cache *cache)
{
unsigned int i;
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
struct mlx5_cache_ent *ent = &cache->ent[i];
bool ret;
- spin_lock_irq(&ent->lock);
- ret = ent->available_mrs < ent->limit;
- spin_unlock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
+ ret = ent->stored < ent->limit;
+ xa_unlock_irq(&ent->mkeys);
if (ret)
return true;
}
@@ -481,26 +539,26 @@ static bool someone_adding(struct mlx5_mr_cache *cache)
*/
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
{
- lockdep_assert_held(&ent->lock);
+ lockdep_assert_held(&ent->mkeys.xa_lock);
if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
return;
- if (ent->available_mrs < ent->limit) {
+ if (ent->stored < ent->limit) {
ent->fill_to_high_water = true;
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->fill_to_high_water &&
- ent->available_mrs + ent->pending < 2 * ent->limit) {
+ ent->reserved < 2 * ent->limit) {
/*
* Once we start populating due to hitting a low water mark
* continue until we pass the high water mark.
*/
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
- } else if (ent->available_mrs == 2 * ent->limit) {
+ } else if (ent->stored == 2 * ent->limit) {
ent->fill_to_high_water = false;
- } else if (ent->available_mrs > 2 * ent->limit) {
+ } else if (ent->stored > 2 * ent->limit) {
/* Queue deletion of excess entries */
ent->fill_to_high_water = false;
- if (ent->pending)
+ if (ent->stored != ent->reserved)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(1000));
else
@@ -511,25 +569,24 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
static void __cache_work_func(struct mlx5_cache_ent *ent)
{
struct mlx5_ib_dev *dev = ent->dev;
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
int err;
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (ent->disabled)
goto out;
- if (ent->fill_to_high_water &&
- ent->available_mrs + ent->pending < 2 * ent->limit &&
+ if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit &&
!READ_ONCE(dev->fill_delay)) {
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
err = add_keys(ent, 1);
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (ent->disabled)
goto out;
if (err) {
/*
- * EAGAIN only happens if pending is positive, so we
- * will be rescheduled from reg_mr_callback(). The only
+ * EAGAIN only happens if there are pending MRs, so we
+ * will be rescheduled when storing them. The only
* failure path here is ENOMEM.
*/
if (err != -EAGAIN) {
@@ -541,7 +598,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
msecs_to_jiffies(1000));
}
}
- } else if (ent->available_mrs > 2 * ent->limit) {
+ } else if (ent->stored > 2 * ent->limit) {
bool need_delay;
/*
@@ -556,11 +613,11 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
* the garbage collection work to try to run in next cycle, in
* order to free CPU resources to other tasks.
*/
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
need_delay = need_resched() || someone_adding(cache) ||
!time_after(jiffies,
READ_ONCE(cache->last_add) + 300 * HZ);
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
if (ent->disabled)
goto out;
if (need_delay) {
@@ -571,7 +628,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
queue_adjust_cache_locked(ent);
}
out:
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
}
static void delayed_cache_work_func(struct work_struct *work)
@@ -587,73 +644,59 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
int access_flags)
{
struct mlx5_ib_mr *mr;
+ int err;
- /* Matches access in alloc_cache_mr() */
if (!mlx5r_umr_can_reconfig(dev, 0, access_flags))
return ERR_PTR(-EOPNOTSUPP);
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ xa_lock_irq(&ent->mkeys);
+ ent->in_use++;
+
+ if (!ent->stored) {
queue_adjust_cache_locked(ent);
ent->miss++;
- spin_unlock_irq(&ent->lock);
- mr = create_cache_mr(ent);
- if (IS_ERR(mr))
- return mr;
+ xa_unlock_irq(&ent->mkeys);
+ err = create_cache_mkey(ent, &mr->mmkey.key);
+ if (err) {
+ xa_lock_irq(&ent->mkeys);
+ ent->in_use--;
+ xa_unlock_irq(&ent->mkeys);
+ kfree(mr);
+ return ERR_PTR(err);
+ }
} else {
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_del(&mr->list);
- ent->available_mrs--;
+ mr->mmkey.key = pop_stored_mkey(ent);
queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
-
- mlx5_clear_mr(mr);
+ xa_unlock_irq(&ent->mkeys);
}
+ mr->mmkey.cache_ent = ent;
+ mr->mmkey.type = MLX5_MKEY_MR;
+ init_waitqueue_head(&mr->mmkey.wait);
return mr;
}
-static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
-{
- struct mlx5_cache_ent *ent = mr->cache_ent;
-
- WRITE_ONCE(dev->cache.last_add, jiffies);
- spin_lock_irq(&ent->lock);
- list_add_tail(&mr->list, &ent->head);
- ent->available_mrs++;
- queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
-}
-
static void clean_keys(struct mlx5_ib_dev *dev, int c)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
- struct mlx5_ib_mr *tmp_mr;
- struct mlx5_ib_mr *mr;
- LIST_HEAD(del_list);
+ u32 mkey;
cancel_delayed_work(&ent->dwork);
- while (1) {
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
- spin_unlock_irq(&ent->lock);
- break;
- }
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_move(&mr->list, &del_list);
- ent->available_mrs--;
- ent->total_mrs--;
- spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
- }
-
- list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
- list_del(&mr->list);
- kfree(mr);
+ xa_lock_irq(&ent->mkeys);
+ while (ent->stored) {
+ mkey = pop_stored_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+ mlx5_core_destroy_mkey(dev->mdev, mkey);
+ xa_lock_irq(&ent->mkeys);
}
+ xa_unlock_irq(&ent->mkeys);
}
-static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
if (!mlx5_debugfs_root || dev->is_rep)
return;
@@ -662,9 +705,9 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
dev->cache.root = NULL;
}
-static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
+static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
struct dentry *dir;
int i;
@@ -674,13 +717,13 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev));
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
sprintf(ent->name, "%d", ent->order);
dir = debugfs_create_dir(ent->name, cache->root);
debugfs_create_file("size", 0600, dir, ent, &size_fops);
debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
- debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
+ debugfs_create_ulong("cur", 0400, dir, &ent->stored);
debugfs_create_u32("miss", 0600, dir, &ent->miss);
}
}
@@ -692,9 +735,9 @@ static void delay_time_func(struct timer_list *t)
WRITE_ONCE(dev->fill_delay, 0);
}
-int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
+int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
int i;
@@ -707,22 +750,21 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
timer_setup(&dev->delay_timer, delay_time_func, 0);
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
- INIT_LIST_HEAD(&ent->head);
- spin_lock_init(&ent->lock);
+ xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
ent->order = i + 2;
ent->dev = dev;
ent->limit = 0;
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
- if (i > MR_CACHE_LAST_STD_ENTRY) {
- mlx5_odp_init_mr_cache_entry(ent);
+ if (i > MKEY_CACHE_LAST_STD_ENTRY) {
+ mlx5_odp_init_mkey_cache_entry(ent);
continue;
}
- if (ent->order > mr_cache_max_order(dev))
+ if (ent->order > mkey_cache_max_order(dev))
continue;
ent->page = PAGE_SHIFT;
@@ -734,36 +776,36 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->limit = dev->mdev->profile.mr_cache[i].limit;
else
ent->limit = 0;
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
}
- mlx5_mr_cache_debugfs_init(dev);
+ mlx5_mkey_cache_debugfs_init(dev);
return 0;
}
-int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
+int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
{
unsigned int i;
if (!dev->cache.wq)
return 0;
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
struct mlx5_cache_ent *ent = &dev->cache.ent[i];
- spin_lock_irq(&ent->lock);
+ xa_lock_irq(&ent->mkeys);
ent->disabled = true;
- spin_unlock_irq(&ent->lock);
+ xa_unlock_irq(&ent->mkeys);
cancel_delayed_work_sync(&ent->dwork);
}
- mlx5_mr_cache_debugfs_cleanup(dev);
+ mlx5_mkey_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
- for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
@@ -830,22 +872,22 @@ static int get_octo_len(u64 addr, u64 len, int page_shift)
return (npages + 1) / 2;
}
-static int mr_cache_max_order(struct mlx5_ib_dev *dev)
+static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
- return MR_CACHE_LAST_STD_ENTRY + 2;
+ return MKEY_CACHE_LAST_STD_ENTRY + 2;
return MLX5_MAX_UMR_SHIFT;
}
-static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
- unsigned int order)
+static struct mlx5_cache_ent *mkey_cache_ent_from_order(struct mlx5_ib_dev *dev,
+ unsigned int order)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_mkey_cache *cache = &dev->cache;
if (order < cache->ent[0].order)
return &cache->ent[0];
order = order - cache->ent[0].order;
- if (order > MR_CACHE_LAST_STD_ENTRY)
+ if (order > MKEY_CACHE_LAST_STD_ENTRY)
return NULL;
return &cache->ent[order];
}
@@ -888,7 +930,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
0, iova);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
- ent = mr_cache_ent_from_order(
+ ent = mkey_cache_ent_from_order(
dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
/*
* Matches access in alloc_cache_mr(). If the MR can't come from the
@@ -1320,7 +1362,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
/* We only track the allocated sizes of MRs from the cache */
- if (!mr->cache_ent)
+ if (!mr->mmkey.cache_ent)
return false;
if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
return false;
@@ -1329,7 +1371,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
if (WARN_ON(!*page_size))
return false;
- return (1ULL << mr->cache_ent->order) >=
+ return (1ULL << mr->mmkey.cache_ent->order) >=
ib_umem_num_dma_blocks(new_umem, *page_size);
}
@@ -1570,15 +1612,17 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
}
/* Stop DMA */
- if (mr->cache_ent) {
- if (mlx5r_umr_revoke_mr(mr)) {
- spin_lock_irq(&mr->cache_ent->lock);
- mr->cache_ent->total_mrs--;
- spin_unlock_irq(&mr->cache_ent->lock);
- mr->cache_ent = NULL;
- }
+ if (mr->mmkey.cache_ent) {
+ xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
+ mr->mmkey.cache_ent->in_use--;
+ xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
+
+ if (mlx5r_umr_revoke_mr(mr) ||
+ push_mkey(mr->mmkey.cache_ent, false,
+ xa_mk_value(mr->mmkey.key)))
+ mr->mmkey.cache_ent = NULL;
}
- if (!mr->cache_ent) {
+ if (!mr->mmkey.cache_ent) {
rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
if (rc)
return rc;
@@ -1595,12 +1639,10 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
mlx5_ib_free_odp_mr(mr);
}
- if (mr->cache_ent) {
- mlx5_mr_cache_free(dev, mr);
- } else {
+ if (!mr->mmkey.cache_ent)
mlx5_free_priv_descs(mr);
- kfree(mr);
- }
+
+ kfree(mr);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 84da5674e1ab..e305bf1dc6c2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1588,7 +1588,7 @@ mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
return err;
}
-void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
+void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
{
if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return;
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 3a48364c0918..e00b94d1b1ea 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -176,6 +176,7 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
dev->umrc.pd = pd;
sema_init(&dev->umrc.sem, MAX_UMR_WR);
+ mutex_init(&dev->umrc.lock);
return 0;
@@ -195,6 +196,31 @@ void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
ib_dealloc_pd(dev->umrc.pd);
}
+static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct ib_qp_attr attr;
+ int err;
+
+ attr.qp_state = IB_QPS_RESET;
+ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+ if (err) {
+ mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
+ goto err;
+ }
+
+ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+ if (err)
+ goto err;
+
+ umrc->state = MLX5_UMR_STATE_ACTIVE;
+ return 0;
+
+err:
+ umrc->state = MLX5_UMR_STATE_ERR;
+ return err;
+}
+
static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
struct mlx5r_umr_wqe *wqe, bool with_data)
{
@@ -231,7 +257,7 @@ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
id.ib_cqe = cqe;
mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
- MLX5_FENCE_MODE_NONE, MLX5_OPCODE_UMR);
+ MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR);
mlx5r_ring_db(qp, 1, ctrl);
@@ -270,17 +296,49 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
mlx5r_umr_init_context(&umr_context);
down(&umrc->sem);
- err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
- with_data);
- if (err)
- mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
- else {
- wait_for_completion(&umr_context.done);
- if (umr_context.status != IB_WC_SUCCESS) {
- mlx5_ib_warn(dev, "reg umr failed (%u)\n",
- umr_context.status);
+ while (true) {
+ mutex_lock(&umrc->lock);
+ if (umrc->state == MLX5_UMR_STATE_ERR) {
+ mutex_unlock(&umrc->lock);
err = -EFAULT;
+ break;
+ }
+
+ if (umrc->state == MLX5_UMR_STATE_RECOVER) {
+ mutex_unlock(&umrc->lock);
+ usleep_range(3000, 5000);
+ continue;
+ }
+
+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
+ with_data);
+ mutex_unlock(&umrc->lock);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR post send failed, err %d\n",
+ err);
+ break;
}
+
+ wait_for_completion(&umr_context.done);
+
+ if (umr_context.status == IB_WC_SUCCESS)
+ break;
+
+ if (umr_context.status == IB_WC_WR_FLUSH_ERR)
+ continue;
+
+ WARN_ON_ONCE(1);
+ mlx5_ib_warn(dev,
+ "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n",
+ umr_context.status);
+ mutex_lock(&umrc->lock);
+ err = mlx5r_umr_recover(dev);
+ mutex_unlock(&umrc->lock);
+ if (err)
+ mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
+ err);
+ err = -EFAULT;
+ break;
}
up(&umrc->sem);
return err;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 03ed7c0fae50..d745ce9dc88a 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3084,7 +3084,7 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
else
DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
- goto err0;
+ goto err1;
}
/* Index only, 18 bit long, lkey = itid << 8 | key */
@@ -3108,7 +3108,7 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
if (rc) {
DP_ERR(dev, "roce register tid returned an error %d\n", rc);
- goto err1;
+ goto err2;
}
mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
@@ -3117,8 +3117,10 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
return mr;
-err1:
+err2:
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
err0:
kfree(mr);
return ERR_PTR(rc);
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index b37b1c6d35c6..26c615772be3 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -321,7 +321,7 @@ struct qib_verbs_txreq {
* These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
* negotiation) are used for the 3rd argument to path_f_set_ib_cfg
* with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
- * are also the the possible values for qib_link_speed_enabled and active
+ * are also the possible values for qib_link_speed_enabled and active
* The values were chosen to match values used within the IB spec.
*/
#define QIB_IB_SDR 1
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index aa290928cf96..3937144b2ae5 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -153,7 +153,7 @@ static int qib_get_base_info(struct file *fp, void __user *ubase,
kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
/*
* for this use, may be cfgctxts summed over all chips that
- * are are configured and present
+ * are configured and present
*/
kinfo->spi_nctxts = dd->cfgctxts;
/* unit (chip/board) our context is on */
@@ -851,7 +851,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
ret = -EPERM;
goto bail;
}
- /* don't allow them to later change to writeable with mprotect */
+ /* don't allow them to later change to writable with mprotect */
vma->vm_flags &= ~VM_MAYWRITE;
start = vma->vm_start;
@@ -941,7 +941,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
goto bail;
}
/*
- * Don't allow permission to later change to writeable
+ * Don't allow permission to later change to writable
* with mprotect.
*/
vma->vm_flags &= ~VM_MAYWRITE;
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 37b628a162e0..6af57067c32e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -58,7 +58,7 @@ static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
/*
* This file contains almost all the chip-specific register information and
* access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
- * exception of SerDes support, which in in qib_sd7220.c.
+ * exception of SerDes support, which in qib_sd7220.c.
*/
/* Below uses machine-generated qib_chipnum_regs.h file */
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ceed302cf6a0..6861c6384f18 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2850,9 +2850,9 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
qib_7322_free_irq(dd);
kfree(dd->cspec->cntrs);
- kfree(dd->cspec->sendchkenable);
- kfree(dd->cspec->sendgrhchk);
- kfree(dd->cspec->sendibchk);
+ bitmap_free(dd->cspec->sendchkenable);
+ bitmap_free(dd->cspec->sendgrhchk);
+ bitmap_free(dd->cspec->sendibchk);
kfree(dd->cspec->msix_entries);
for (i = 0; i < dd->num_pports; i++) {
unsigned long flags;
@@ -6383,18 +6383,11 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
features = qib_7322_boardname(dd);
/* now that piobcnt2k and 4k set, we can allocate these */
- sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
- NUM_VL15_BUFS + BITS_PER_LONG - 1;
- sbufcnt /= BITS_PER_LONG;
- dd->cspec->sendchkenable =
- kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
- GFP_KERNEL);
- dd->cspec->sendgrhchk =
- kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
- GFP_KERNEL);
- dd->cspec->sendibchk =
- kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
- GFP_KERNEL);
+ sbufcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+
+ dd->cspec->sendchkenable = bitmap_zalloc(sbufcnt, GFP_KERNEL);
+ dd->cspec->sendgrhchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
+ dd->cspec->sendibchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
!dd->cspec->sendibchk) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index d1a72e89e297..45211008449f 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1106,8 +1106,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
if (!qib_cpulist_count) {
u32 count = num_online_cpus();
- qib_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
- GFP_KERNEL);
+ qib_cpulist = bitmap_zalloc(count, GFP_KERNEL);
if (qib_cpulist)
qib_cpulist_count = count;
}
@@ -1279,7 +1278,7 @@ static void __exit qib_ib_cleanup(void)
#endif
qib_cpulist_count = 0;
- kfree(qib_cpulist);
+ bitmap_free(qib_cpulist);
WARN_ON(!xa_empty(&qib_dev_table));
qib_dev_cleanup();
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 81b810d006c0..1dc3ccf0cf1f 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -587,7 +587,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
/* Need to release */
u64 pollval;
/*
- * The only writeable bits are the request and CS.
+ * The only writable bits are the request and CS.
* Both should be clear
*/
u64 newval = 0;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index e212929369df..67a1b4562dc2 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -482,7 +482,7 @@ int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
if (err)
goto out_free_dev;
- if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
+ if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
usnic_err("IOMMU of %s does not support cache coherency\n",
dev_name(dev));
err = -EINVAL;
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index da3a398053b8..fb0c008af78c 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -114,6 +114,8 @@ void retransmit_timer(struct timer_list *t)
{
struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
+ pr_debug("%s: fired for qp#%d\n", __func__, qp->elem.index);
+
if (qp->valid) {
qp->comp.timeout = 1;
rxe_run_task(&qp->comp.task, 1);
@@ -560,17 +562,16 @@ int rxe_completer(void *arg)
struct sk_buff *skb = NULL;
struct rxe_pkt_info *pkt = NULL;
enum comp_state state;
- int ret = 0;
+ int ret;
if (!rxe_get(qp))
return -EAGAIN;
- if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
- qp->req.state == QP_STATE_RESET) {
+ if (!qp->valid || qp->comp.state == QP_STATE_ERROR ||
+ qp->comp.state == QP_STATE_RESET) {
rxe_drain_resp_pkts(qp, qp->valid &&
- qp->req.state == QP_STATE_ERROR);
- ret = -EAGAIN;
- goto done;
+ qp->comp.state == QP_STATE_ERROR);
+ goto exit;
}
if (qp->comp.timeout) {
@@ -580,10 +581,8 @@ int rxe_completer(void *arg)
qp->comp.timeout_retry = 0;
}
- if (qp->req.need_retry) {
- ret = -EAGAIN;
- goto done;
- }
+ if (qp->req.need_retry)
+ goto exit;
state = COMPST_GET_ACK;
@@ -676,8 +675,7 @@ int rxe_completer(void *arg)
qp->qp_timeout_jiffies)
mod_timer(&qp->retrans_timer,
jiffies + qp->qp_timeout_jiffies);
- ret = -EAGAIN;
- goto done;
+ goto exit;
case COMPST_ERROR_RETRY:
/* we come here if the retry timer fired and we did
@@ -689,10 +687,8 @@ int rxe_completer(void *arg)
*/
/* there is nothing to retry in this case */
- if (!wqe || (wqe->state == wqe_state_posted)) {
- ret = -EAGAIN;
- goto done;
- }
+ if (!wqe || (wqe->state == wqe_state_posted))
+ goto exit;
/* if we've started a retry, don't start another
* retry sequence, unless this is a timeout.
@@ -730,18 +726,21 @@ int rxe_completer(void *arg)
break;
case COMPST_RNR_RETRY:
+ /* we come here if we received an RNR NAK */
if (qp->comp.rnr_retry > 0) {
if (qp->comp.rnr_retry != 7)
qp->comp.rnr_retry--;
- qp->req.need_retry = 1;
+ /* don't start a retry flow until the
+ * rnr timer has fired
+ */
+ qp->req.wait_for_rnr_timer = 1;
pr_debug("qp#%d set rnr nak timer\n",
qp_num(qp));
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
- ret = -EAGAIN;
- goto done;
+ goto exit;
} else {
rxe_counter_inc(rxe,
RXE_CNT_RNR_RETRY_EXCEEDED);
@@ -754,12 +753,20 @@ int rxe_completer(void *arg)
WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
do_complete(qp, wqe);
rxe_qp_error(qp);
- ret = -EAGAIN;
- goto done;
+ goto exit;
}
}
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the tasklet. A zero return
+ * will continue looping and return to rxe_completer
+ */
done:
+ ret = 0;
+ goto out;
+exit:
+ ret = -EAGAIN;
+out:
if (pkt)
free_pkt(pkt);
rxe_put(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 642b52539ac3..b1a0ab3cd4bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -19,16 +19,16 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
}
if (cqe > rxe->attr.max_cqe) {
- pr_warn("cqe(%d) > max_cqe(%d)\n",
- cqe, rxe->attr.max_cqe);
+ pr_debug("cqe(%d) > max_cqe(%d)\n",
+ cqe, rxe->attr.max_cqe);
goto err1;
}
if (cq) {
count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
- pr_warn("cqe(%d) < current # elements in queue (%d)",
- cqe, count);
+ pr_debug("cqe(%d) < current # elements in queue (%d)",
+ cqe, count);
goto err1;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 0e022ae1b8a5..22f6cc31d1d6 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -77,9 +77,8 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
-int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
+int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
-int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr);
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
void rxe_mr_cleanup(struct rxe_pool_elem *elem);
@@ -145,7 +144,7 @@ static inline int rcv_wqe_size(int max_sge)
max_sge * sizeof(struct ib_sge);
}
-void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
+void free_rd_atomic_resource(struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index fc3942e04a1f..850b80f5ad8b 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -24,7 +24,7 @@ u8 rxe_get_next_key(u32 last_key)
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
- struct rxe_map_set *set = mr->cur_map_set;
+
switch (mr->type) {
case IB_MR_TYPE_DMA:
@@ -32,8 +32,8 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG:
- if (iova < set->iova || length > set->length ||
- iova > set->iova + set->length - length)
+ if (iova < mr->iova || length > mr->length ||
+ iova > mr->iova + mr->length - length)
return -EFAULT;
return 0;
@@ -65,89 +65,41 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}
-static void rxe_mr_free_map_set(int num_map, struct rxe_map_set *set)
-{
- int i;
-
- for (i = 0; i < num_map; i++)
- kfree(set->map[i]);
-
- kfree(set->map);
- kfree(set);
-}
-
-static int rxe_mr_alloc_map_set(int num_map, struct rxe_map_set **setp)
+static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
int i;
- struct rxe_map_set *set;
+ int num_map;
+ struct rxe_map **map = mr->map;
- set = kmalloc(sizeof(*set), GFP_KERNEL);
- if (!set)
- goto err_out;
+ num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
- set->map = kmalloc_array(num_map, sizeof(struct rxe_map *), GFP_KERNEL);
- if (!set->map)
- goto err_free_set;
+ mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
+ if (!mr->map)
+ goto err1;
for (i = 0; i < num_map; i++) {
- set->map[i] = kmalloc(sizeof(struct rxe_map), GFP_KERNEL);
- if (!set->map[i])
- goto err_free_map;
+ mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
+ if (!mr->map[i])
+ goto err2;
}
- *setp = set;
-
- return 0;
-
-err_free_map:
- for (i--; i >= 0; i--)
- kfree(set->map[i]);
-
- kfree(set->map);
-err_free_set:
- kfree(set);
-err_out:
- return -ENOMEM;
-}
-
-/**
- * rxe_mr_alloc() - Allocate memory map array(s) for MR
- * @mr: Memory region
- * @num_buf: Number of buffer descriptors to support
- * @both: If non zero allocate both mr->map and mr->next_map
- * else just allocate mr->map. Used for fast MRs
- *
- * Return: 0 on success else an error
- */
-static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
-{
- int ret;
- int num_map;
-
BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
- num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
mr->map_mask = RXE_BUF_PER_MAP - 1;
+
mr->num_buf = num_buf;
- mr->max_buf = num_map * RXE_BUF_PER_MAP;
mr->num_map = num_map;
-
- ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set);
- if (ret)
- return -ENOMEM;
-
- if (both) {
- ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set);
- if (ret)
- goto err_free;
- }
+ mr->max_buf = num_map * RXE_BUF_PER_MAP;
return 0;
-err_free:
- rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
- mr->cur_map_set = NULL;
+err2:
+ for (i--; i >= 0; i--)
+ kfree(mr->map[i]);
+
+ kfree(mr->map);
+err1:
return -ENOMEM;
}
@@ -164,7 +116,6 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
- struct rxe_map_set *set;
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
struct ib_umem *umem;
@@ -172,6 +123,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int num_buf;
void *vaddr;
int err;
+ int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
@@ -185,20 +137,18 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
rxe_mr_init(access, mr);
- err = rxe_mr_alloc(mr, num_buf, 0);
+ err = rxe_mr_alloc(mr, num_buf);
if (err) {
pr_warn("%s: Unable to allocate memory for map\n",
__func__);
goto err_release_umem;
}
- set = mr->cur_map_set;
- set->page_shift = PAGE_SHIFT;
- set->page_mask = PAGE_SIZE - 1;
-
- num_buf = 0;
- map = set->map;
+ mr->page_shift = PAGE_SHIFT;
+ mr->page_mask = PAGE_SIZE - 1;
+ num_buf = 0;
+ map = mr->map;
if (length > 0) {
buf = map[0]->buf;
@@ -214,29 +164,33 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
pr_warn("%s: Unable to get virtual address\n",
__func__);
err = -ENOMEM;
- goto err_release_umem;
+ goto err_cleanup_map;
}
buf->addr = (uintptr_t)vaddr;
buf->size = PAGE_SIZE;
num_buf++;
buf++;
+
}
}
mr->ibmr.pd = &pd->ibpd;
mr->umem = umem;
mr->access = access;
+ mr->length = length;
+ mr->iova = iova;
+ mr->va = start;
+ mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_USER;
- set->length = length;
- set->iova = iova;
- set->va = start;
- set->offset = ib_umem_offset(umem);
-
return 0;
+err_cleanup_map:
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
+ kfree(mr->map);
err_release_umem:
ib_umem_release(umem);
err_out:
@@ -250,7 +204,7 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
/* always allow remote access for FMRs */
rxe_mr_init(IB_ACCESS_REMOTE, mr);
- err = rxe_mr_alloc(mr, max_pages, 1);
+ err = rxe_mr_alloc(mr, max_pages);
if (err)
goto err1;
@@ -268,24 +222,21 @@ err1:
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
size_t *offset_out)
{
- struct rxe_map_set *set = mr->cur_map_set;
- size_t offset = iova - set->iova + set->offset;
+ size_t offset = iova - mr->iova + mr->offset;
int map_index;
int buf_index;
u64 length;
- struct rxe_map *map;
- if (likely(set->page_shift)) {
- *offset_out = offset & set->page_mask;
- offset >>= set->page_shift;
+ if (likely(mr->page_shift)) {
+ *offset_out = offset & mr->page_mask;
+ offset >>= mr->page_shift;
*n_out = offset & mr->map_mask;
*m_out = offset >> mr->map_shift;
} else {
map_index = 0;
buf_index = 0;
- map = set->map[map_index];
- length = map->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
while (offset >= length) {
offset -= length;
@@ -295,8 +246,7 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
map_index++;
buf_index = 0;
}
- map = set->map[map_index];
- length = map->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
}
*m_out = map_index;
@@ -317,7 +267,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
goto out;
}
- if (!mr->cur_map_set) {
+ if (!mr->map) {
addr = (void *)(uintptr_t)iova;
goto out;
}
@@ -330,13 +280,13 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
lookup_iova(mr, iova, &m, &n, &offset);
- if (offset + length > mr->cur_map_set->map[m]->buf[n].size) {
+ if (offset + length > mr->map[m]->buf[n].size) {
pr_warn("crosses page boundary\n");
addr = NULL;
goto out;
}
- addr = (void *)(uintptr_t)mr->cur_map_set->map[m]->buf[n].addr + offset;
+ addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
out:
return addr;
@@ -372,7 +322,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return 0;
}
- WARN_ON_ONCE(!mr->cur_map_set);
+ WARN_ON_ONCE(!mr->map);
err = mr_check_range(mr, iova, length);
if (err) {
@@ -382,7 +332,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
lookup_iova(mr, iova, &m, &i, &offset);
- map = mr->cur_map_set->map + m;
+ map = mr->map + m;
buf = map[0]->buf + i;
while (length > 0) {
@@ -576,22 +526,22 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
return mr;
}
-int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
+int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_mr *mr;
int ret;
- mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+ mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8);
if (!mr) {
- pr_err("%s: No MR for rkey %#x\n", __func__, rkey);
+ pr_err("%s: No MR for key %#x\n", __func__, key);
ret = -EINVAL;
goto err;
}
- if (rkey != mr->rkey) {
- pr_err("%s: rkey (%#x) doesn't match mr->rkey (%#x)\n",
- __func__, rkey, mr->rkey);
+ if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) {
+ pr_err("%s: wr key (%#x) doesn't match mr key (%#x)\n",
+ __func__, key, (mr->rkey ? mr->rkey : mr->lkey));
ret = -EINVAL;
goto err_drop_ref;
}
@@ -628,9 +578,8 @@ err:
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
- u32 key = wqe->wr.wr.reg.key & 0xff;
+ u32 key = wqe->wr.wr.reg.key;
u32 access = wqe->wr.wr.reg.access;
- struct rxe_map_set *set;
/* user can only register MR in free state */
if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
@@ -646,36 +595,19 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
return -EINVAL;
}
+ /* user is only allowed to change key portion of l/rkey */
+ if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
+ pr_warn("%s: key = 0x%x has wrong index mr->lkey = 0x%x\n",
+ __func__, key, mr->lkey);
+ return -EINVAL;
+ }
+
mr->access = access;
- mr->lkey = (mr->lkey & ~0xff) | key;
- mr->rkey = (access & IB_ACCESS_REMOTE) ? mr->lkey : 0;
+ mr->lkey = key;
+ mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0;
+ mr->iova = wqe->wr.wr.reg.mr->iova;
mr->state = RXE_MR_STATE_VALID;
- set = mr->cur_map_set;
- mr->cur_map_set = mr->next_map_set;
- mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova;
- mr->next_map_set = set;
-
- return 0;
-}
-
-int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map_set *set = mr->next_map_set;
- struct rxe_map *map;
- struct rxe_phys_buf *buf;
-
- if (unlikely(set->nbuf == mr->num_buf))
- return -ENOMEM;
-
- map = set->map[set->nbuf / RXE_BUF_PER_MAP];
- buf = &map->buf[set->nbuf % RXE_BUF_PER_MAP];
-
- buf->addr = addr;
- buf->size = ibmr->page_size;
- set->nbuf++;
-
return 0;
}
@@ -687,7 +619,7 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (atomic_read(&mr->num_mw) > 0)
return -EINVAL;
- rxe_put(mr);
+ rxe_cleanup(mr);
return 0;
}
@@ -695,14 +627,15 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{
struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
+ int i;
rxe_put(mr_pd(mr));
-
ib_umem_release(mr->umem);
- if (mr->cur_map_set)
- rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
+ if (mr->map) {
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
- if (mr->next_map_set)
- rxe_mr_free_map_set(mr->num_map, mr->next_map_set);
+ kfree(mr->map);
+ }
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 2e1fa844fabf..104993801a80 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -33,6 +33,8 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
+ rxe_finalize(mw);
+
return 0;
}
@@ -40,7 +42,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
{
struct rxe_mw *mw = to_rmw(ibmw);
- rxe_put(mw);
+ rxe_cleanup(mw);
return 0;
}
@@ -48,8 +50,6 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_mw *mw, struct rxe_mr *mr)
{
- u32 key = wqe->wr.wr.mw.rkey & 0xff;
-
if (mw->ibmw.type == IB_MW_TYPE_1) {
if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
pr_err_once(
@@ -87,11 +87,6 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
}
- if (unlikely(key == (mw->rkey & 0xff))) {
- pr_err_once("attempt to bind MW with same key\n");
- return -EINVAL;
- }
-
/* remaining checks only apply to a nonzero MR */
if (!mr)
return 0;
@@ -113,21 +108,21 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
(IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
!(mr->access & IB_ACCESS_LOCAL_WRITE))) {
pr_err_once(
- "attempt to bind an writeable MW to an MR without local write access\n");
+ "attempt to bind an Writable MW to an MR without local write access\n");
return -EINVAL;
}
/* C10-75 */
if (mw->access & IB_ZERO_BASED) {
- if (unlikely(wqe->wr.wr.mw.length > mr->cur_map_set->length)) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
pr_err_once(
"attempt to bind a ZB MW outside of the MR\n");
return -EINVAL;
}
} else {
- if (unlikely((wqe->wr.wr.mw.addr < mr->cur_map_set->iova) ||
+ if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
- (mr->cur_map_set->iova + mr->cur_map_set->length)))) {
+ (mr->iova + mr->length)))) {
pr_err_once(
"attempt to bind a VA MW outside of the MR\n");
return -EINVAL;
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 568a7cbd13d4..86c7a8bf3cbb 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -105,6 +105,12 @@ enum rxe_device_param {
RXE_INFLIGHT_SKBS_PER_QP_HIGH = 64,
RXE_INFLIGHT_SKBS_PER_QP_LOW = 16,
+ /* Max number of interations of each tasklet
+ * before yielding the cpu to let other
+ * work make progress
+ */
+ RXE_MAX_ITERATIONS = 1024,
+
/* Delay before calling arbiter timer */
RXE_NSEC_ARB_TIMER_DELAY = 200,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 19b14826385b..f50620f5a0a1 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -6,6 +6,7 @@
#include "rxe.h"
+#define RXE_POOL_TIMEOUT (200)
#define RXE_POOL_ALIGN (16)
static const struct rxe_type_info {
@@ -136,10 +137,14 @@ void *rxe_alloc(struct rxe_pool *pool)
elem->pool = pool;
elem->obj = obj;
kref_init(&elem->ref_cnt);
+ init_completion(&elem->complete);
- err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
+ /* allocate index in array but leave pointer as NULL so it
+ * can't be looked up until rxe_finalize() is called
+ */
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
&pool->next, GFP_KERNEL);
- if (err)
+ if (err < 0)
goto err_free;
return obj;
@@ -151,9 +156,11 @@ err_cnt:
return NULL;
}
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
+ bool sleepable)
{
int err;
+ gfp_t gfp_flags;
if (WARN_ON(pool->type == RXE_TYPE_MR))
return -EINVAL;
@@ -164,10 +171,19 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
elem->pool = pool;
elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
-
- err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
- &pool->next, GFP_KERNEL);
- if (err)
+ init_completion(&elem->complete);
+
+ /* AH objects are unique in that the create_ah verb
+ * can be called in atomic context. If the create_ah
+ * call is not sleepable use GFP_ATOMIC.
+ */
+ gfp_flags = sleepable ? GFP_KERNEL : GFP_ATOMIC;
+
+ if (sleepable)
+ might_sleep();
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
+ &pool->next, gfp_flags);
+ if (err < 0)
goto err_cnt;
return 0;
@@ -181,16 +197,15 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
struct rxe_pool_elem *elem;
struct xarray *xa = &pool->xa;
- unsigned long flags;
void *obj;
- xa_lock_irqsave(xa, flags);
+ rcu_read_lock();
elem = xa_load(xa, index);
if (elem && kref_get_unless_zero(&elem->ref_cnt))
obj = elem->obj;
else
obj = NULL;
- xa_unlock_irqrestore(xa, flags);
+ rcu_read_unlock();
return obj;
}
@@ -198,17 +213,74 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
static void rxe_elem_release(struct kref *kref)
{
struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
- struct rxe_pool *pool = elem->pool;
- xa_erase(&pool->xa, elem->index);
+ complete(&elem->complete);
+}
+
+int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+{
+ struct rxe_pool *pool = elem->pool;
+ struct xarray *xa = &pool->xa;
+ static int timeout = RXE_POOL_TIMEOUT;
+ int ret, err = 0;
+ void *xa_ret;
+
+ if (sleepable)
+ might_sleep();
+
+ /* erase xarray entry to prevent looking up
+ * the pool elem from its index
+ */
+ xa_ret = xa_erase(xa, elem->index);
+ WARN_ON(xa_err(xa_ret));
+
+ /* if this is the last call to rxe_put complete the
+ * object. It is safe to touch obj->elem after this since
+ * it is freed below
+ */
+ __rxe_put(elem);
+
+ /* wait until all references to the object have been
+ * dropped before final object specific cleanup and
+ * return to rdma-core
+ */
+ if (sleepable) {
+ if (!completion_done(&elem->complete) && timeout) {
+ ret = wait_for_completion_timeout(&elem->complete,
+ timeout);
+
+ /* Shouldn't happen. There are still references to
+ * the object but, rather than deadlock, free the
+ * object or pass back to rdma-core.
+ */
+ if (WARN_ON(!ret))
+ err = -EINVAL;
+ }
+ } else {
+ unsigned long until = jiffies + timeout;
+
+ /* AH objects are unique in that the destroy_ah verb
+ * can be called in atomic context. This delay
+ * replaces the wait_for_completion call above
+ * when the destroy_ah call is not sleepable
+ */
+ while (!completion_done(&elem->complete) &&
+ time_before(jiffies, until))
+ mdelay(1);
+
+ if (WARN_ON(!completion_done(&elem->complete)))
+ err = -EINVAL;
+ }
if (pool->cleanup)
pool->cleanup(elem);
if (pool->type == RXE_TYPE_MR)
- kfree(elem->obj);
+ kfree_rcu(elem->obj);
atomic_dec(&pool->num_elem);
+
+ return err;
}
int __rxe_get(struct rxe_pool_elem *elem)
@@ -220,3 +292,11 @@ int __rxe_put(struct rxe_pool_elem *elem)
{
return kref_put(&elem->ref_cnt, rxe_elem_release);
}
+
+void __rxe_finalize(struct rxe_pool_elem *elem)
+{
+ void *xa_ret;
+
+ xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL);
+ WARN_ON(xa_err(xa_ret));
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 0860660d65ec..9d83cb32092f 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -24,6 +24,7 @@ struct rxe_pool_elem {
void *obj;
struct kref ref_cnt;
struct list_head list;
+ struct completion complete;
u32 index;
};
@@ -57,21 +58,28 @@ void rxe_pool_cleanup(struct rxe_pool *pool);
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
-
-#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
+ bool sleepable);
+#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem, true)
+#define rxe_add_to_pool_ah(pool, obj, sleepable) __rxe_add_to_pool(pool, \
+ &(obj)->elem, sleepable)
/* lookup an indexed object from index. takes a reference on object */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
int __rxe_get(struct rxe_pool_elem *elem);
-
#define rxe_get(obj) __rxe_get(&(obj)->elem)
int __rxe_put(struct rxe_pool_elem *elem);
-
#define rxe_put(obj) __rxe_put(&(obj)->elem)
+int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable);
+#define rxe_cleanup(obj) __rxe_cleanup(&(obj)->elem, true)
+#define rxe_cleanup_ah(obj, sleepable) __rxe_cleanup(&(obj)->elem, sleepable)
+
#define rxe_read(obj) kref_read(&(obj)->elem.ref_cnt)
+void __rxe_finalize(struct rxe_pool_elem *elem);
+#define rxe_finalize(obj) __rxe_finalize(&(obj)->elem)
+
#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 22e9b85344c3..516bf9b95e48 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -120,17 +120,15 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
struct resp_res *res = &qp->resp.resources[i];
- free_rd_atomic_resource(qp, res);
+ free_rd_atomic_resource(res);
}
kfree(qp->resp.resources);
qp->resp.resources = NULL;
}
}
-void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
+void free_rd_atomic_resource(struct resp_res *res)
{
- if (res->type == RXE_ATOMIC_MASK)
- kfree_skb(res->atomic.skb);
res->type = 0;
}
@@ -142,7 +140,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
if (qp->resp.resources) {
for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
res = &qp->resp.resources[i];
- free_rd_atomic_resource(qp, res);
+ free_rd_atomic_resource(res);
}
}
}
@@ -174,6 +172,14 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_lock_init(&qp->state_lock);
+ spin_lock_init(&qp->req.task.state_lock);
+ spin_lock_init(&qp->resp.task.state_lock);
+ spin_lock_init(&qp->comp.task.state_lock);
+
+ spin_lock_init(&qp->sq.sq_lock);
+ spin_lock_init(&qp->rq.producer_lock);
+ spin_lock_init(&qp->rq.consumer_lock);
+
atomic_set(&qp->ssn, 0);
atomic_set(&qp->skb_out, 0);
}
@@ -230,10 +236,10 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
QUEUE_TYPE_FROM_CLIENT);
qp->req.state = QP_STATE_RESET;
+ qp->comp.state = QP_STATE_RESET;
qp->req.opcode = -1;
qp->comp.opcode = -1;
- spin_lock_init(&qp->sq.sq_lock);
skb_queue_head_init(&qp->req_pkts);
rxe_init_task(rxe, &qp->req.task, qp,
@@ -284,9 +290,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
}
}
- spin_lock_init(&qp->rq.producer_lock);
- spin_lock_init(&qp->rq.consumer_lock);
-
skb_queue_head_init(&qp->resp_pkts);
rxe_init_task(rxe, &qp->resp.task, qp,
@@ -490,6 +493,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
/* move qp to the reset state */
qp->req.state = QP_STATE_RESET;
+ qp->comp.state = QP_STATE_RESET;
qp->resp.state = QP_STATE_RESET;
/* let state machines reset themselves drain work and packet queues
@@ -507,6 +511,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
atomic_set(&qp->ssn, 0);
qp->req.opcode = -1;
qp->req.need_retry = 0;
+ qp->req.wait_for_rnr_timer = 0;
qp->req.noack_pkts = 0;
qp->resp.msn = 0;
qp->resp.opcode = -1;
@@ -552,6 +557,7 @@ void rxe_qp_error(struct rxe_qp *qp)
{
qp->req.state = QP_STATE_ERROR;
qp->resp.state = QP_STATE_ERROR;
+ qp->comp.state = QP_STATE_ERROR;
qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
@@ -689,6 +695,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
pr_debug("qp#%d state -> INIT\n", qp_num(qp));
qp->req.state = QP_STATE_INIT;
qp->resp.state = QP_STATE_INIT;
+ qp->comp.state = QP_STATE_INIT;
break;
case IB_QPS_RTR:
@@ -699,6 +706,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
case IB_QPS_RTS:
pr_debug("qp#%d state -> RTS\n", qp_num(qp));
qp->req.state = QP_STATE_READY;
+ qp->comp.state = QP_STATE_READY;
break;
case IB_QPS_SQD:
@@ -804,13 +812,15 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
if (qp->rq.queue)
rxe_queue_cleanup(qp->rq.queue);
- atomic_dec(&qp->scq->num_wq);
- if (qp->scq)
+ if (qp->scq) {
+ atomic_dec(&qp->scq->num_wq);
rxe_put(qp->scq);
+ }
- atomic_dec(&qp->rcq->num_wq);
- if (qp->rcq)
+ if (qp->rcq) {
+ atomic_dec(&qp->rcq->num_wq);
rxe_put(qp->rcq);
+ }
if (qp->pd)
rxe_put(qp->pd);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 6227112ef7a2..ed44042782fa 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -7,9 +7,6 @@
#ifndef RXE_QUEUE_H
#define RXE_QUEUE_H
-/* for definition of shared struct rxe_queue_buf */
-#include <uapi/rdma/rdma_user_rxe.h>
-
/* Implements a simple circular buffer that is shared between user
* and the driver and can be resized. The requested element size is
* rounded up to a power of 2 and the number of elements in the buffer
@@ -53,6 +50,8 @@ enum queue_type {
QUEUE_TYPE_FROM_DRIVER,
};
+struct rxe_queue_buf;
+
struct rxe_queue {
struct rxe_dev *rxe;
struct rxe_queue_buf *buf;
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 9d98237389cf..f63771207970 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -15,8 +15,7 @@ static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
u32 opcode);
static inline void retry_first_write_send(struct rxe_qp *qp,
- struct rxe_send_wqe *wqe,
- unsigned int mask, int npsn)
+ struct rxe_send_wqe *wqe, int npsn)
{
int i;
@@ -83,7 +82,7 @@ static void req_retry(struct rxe_qp *qp)
if (mask & WR_WRITE_OR_SEND_MASK) {
npsn = (qp->comp.psn - wqe->first_psn) &
BTH_PSN_MASK;
- retry_first_write_send(qp, wqe, mask, npsn);
+ retry_first_write_send(qp, wqe, npsn);
}
if (mask & WR_READ_MASK) {
@@ -101,7 +100,11 @@ void rnr_nak_timer(struct timer_list *t)
{
struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
- pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
+ pr_debug("%s: fired for qp#%d\n", __func__, qp_num(qp));
+
+ /* request a send queue retry */
+ qp->req.need_retry = 1;
+ qp->req.wait_for_rnr_timer = 0;
rxe_run_task(&qp->req.task, 1);
}
@@ -161,16 +164,36 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
(wqe->state != wqe_state_processing)))
return NULL;
- if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
- (index != cons))) {
- qp->req.wait_fence = 1;
- return NULL;
- }
-
wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
return wqe;
}
+/**
+ * rxe_wqe_is_fenced - check if next wqe is fenced
+ * @qp: the queue pair
+ * @wqe: the next wqe
+ *
+ * Returns: 1 if wqe needs to wait
+ * 0 if wqe is ready to go
+ */
+static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ /* Local invalidate fence (LIF) see IBA 10.6.5.1
+ * Requires ALL previous operations on the send queue
+ * are complete. Make mandatory for the rxe driver.
+ */
+ if (wqe->wr.opcode == IB_WR_LOCAL_INV)
+ return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
+
+ /* Fence see IBA 10.8.3.3
+ * Requires that all previous read and atomic operations
+ * are complete.
+ */
+ return (wqe->wr.send_flags & IB_SEND_FENCE) &&
+ atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
+}
+
static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
{
switch (opcode) {
@@ -581,9 +604,11 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->status = IB_WC_SUCCESS;
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
- if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- qp->sq_sig_type == IB_SIGNAL_ALL_WR)
- rxe_run_task(&qp->comp.task, 1);
+ /* There is no ack coming for local work requests
+ * which can lead to a deadlock. So go ahead and complete
+ * it now.
+ */
+ rxe_run_task(&qp->comp.task, 1);
return 0;
}
@@ -599,6 +624,7 @@ int rxe_requester(void *arg)
u32 payload;
int mtu;
int opcode;
+ int err;
int ret;
struct rxe_send_wqe rollback_wqe;
u32 rollback_psn;
@@ -609,10 +635,20 @@ int rxe_requester(void *arg)
if (!rxe_get(qp))
return -EAGAIN;
-next_wqe:
- if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
+ if (unlikely(!qp->valid))
goto exit;
+ if (unlikely(qp->req.state == QP_STATE_ERROR)) {
+ wqe = req_next_wqe(qp);
+ if (wqe)
+ /*
+ * Generate an error completion for error qp state
+ */
+ goto err;
+ else
+ goto exit;
+ }
+
if (unlikely(qp->req.state == QP_STATE_RESET)) {
qp->req.wqe_index = queue_get_consumer(q,
QUEUE_TYPE_FROM_CLIENT);
@@ -620,10 +656,17 @@ next_wqe:
qp->req.need_rd_atomic = 0;
qp->req.wait_psn = 0;
qp->req.need_retry = 0;
+ qp->req.wait_for_rnr_timer = 0;
goto exit;
}
- if (unlikely(qp->req.need_retry)) {
+ /* we come here if the retransmit timer has fired
+ * or if the rnr timer has fired. If the retransmit
+ * timer fires while we are processing an RNR NAK wait
+ * until the rnr timer has fired before starting the
+ * retry flow
+ */
+ if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
req_retry(qp);
qp->req.need_retry = 0;
}
@@ -632,12 +675,17 @@ next_wqe:
if (unlikely(!wqe))
goto exit;
+ if (rxe_wqe_is_fenced(qp, wqe)) {
+ qp->req.wait_fence = 1;
+ goto exit;
+ }
+
if (wqe->mask & WR_LOCAL_OP_MASK) {
- ret = rxe_do_local_ops(qp, wqe);
- if (unlikely(ret))
+ err = rxe_do_local_ops(qp, wqe);
+ if (unlikely(err))
goto err;
else
- goto next_wqe;
+ goto done;
}
if (unlikely(qp_type(qp) == IB_QPT_RC &&
@@ -685,9 +733,8 @@ next_wqe:
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- __rxe_do_task(&qp->comp.task);
- rxe_put(qp);
- return 0;
+ rxe_run_task(&qp->comp.task, 0);
+ goto done;
}
payload = mtu;
}
@@ -703,25 +750,29 @@ next_wqe:
if (unlikely(!av)) {
pr_err("qp#%d Failed no address vector\n", qp_num(qp));
wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err_drop_ah;
+ goto err;
}
skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
if (unlikely(!skb)) {
pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err_drop_ah;
+ if (ah)
+ rxe_put(ah);
+ goto err;
}
- ret = finish_packet(qp, av, wqe, &pkt, skb, payload);
- if (unlikely(ret)) {
+ err = finish_packet(qp, av, wqe, &pkt, skb, payload);
+ if (unlikely(err)) {
pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
- if (ret == -EFAULT)
+ if (err == -EFAULT)
wqe->status = IB_WC_LOC_PROT_ERR;
else
wqe->status = IB_WC_LOC_QP_OP_ERR;
kfree_skb(skb);
- goto err_drop_ah;
+ if (ah)
+ rxe_put(ah);
+ goto err;
}
if (ah)
@@ -736,13 +787,14 @@ next_wqe:
save_state(wqe, qp, &rollback_wqe, &rollback_psn);
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
- ret = rxe_xmit_packet(qp, &pkt, skb);
- if (ret) {
+
+ err = rxe_xmit_packet(qp, &pkt, skb);
+ if (err) {
qp->need_req_skb = 1;
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
- if (ret == -EAGAIN) {
+ if (err == -EAGAIN) {
rxe_run_task(&qp->req.task, 1);
goto exit;
}
@@ -753,16 +805,23 @@ next_wqe:
update_state(qp, &pkt);
- goto next_wqe;
-
-err_drop_ah:
- if (ah)
- rxe_put(ah);
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the tasklet. A zero return
+ * will continue looping and return to rxe_requester
+ */
+done:
+ ret = 0;
+ goto out;
err:
+ /* update wqe_index for each wqe completion */
+ qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error;
- __rxe_do_task(&qp->comp.task);
-
+ qp->req.state = QP_STATE_ERROR;
+ rxe_run_task(&qp->comp.task, 0);
exit:
+ ret = -EAGAIN;
+out:
rxe_put(qp);
- return -EAGAIN;
+
+ return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index f4f6ee5d81fe..b36ec5c4d5e0 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -21,6 +21,7 @@ enum resp_states {
RESPST_CHK_RKEY,
RESPST_EXECUTE,
RESPST_READ_REPLY,
+ RESPST_ATOMIC_REPLY,
RESPST_COMPLETE,
RESPST_ACKNOWLEDGE,
RESPST_CLEANUP,
@@ -55,6 +56,7 @@ static char *resp_state_name[] = {
[RESPST_CHK_RKEY] = "CHK_RKEY",
[RESPST_EXECUTE] = "EXECUTE",
[RESPST_READ_REPLY] = "READ_REPLY",
+ [RESPST_ATOMIC_REPLY] = "ATOMIC_REPLY",
[RESPST_COMPLETE] = "COMPLETE",
[RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
[RESPST_CLEANUP] = "CLEANUP",
@@ -448,7 +450,8 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
if (rkey_is_mw(rkey)) {
mw = rxe_lookup_mw(qp, access, rkey);
if (!mw) {
- pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
+ pr_debug("%s: no MW matches rkey %#x\n",
+ __func__, rkey);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
@@ -468,7 +471,8 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
} else {
mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
if (!mr) {
- pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
+ pr_debug("%s: no MR matches rkey %#x\n",
+ __func__, rkey);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
@@ -549,49 +553,106 @@ out:
return rc;
}
+static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ int type)
+{
+ struct resp_res *res;
+ u32 pkts;
+
+ res = &qp->resp.resources[qp->resp.res_head];
+ rxe_advance_resp_resource(qp);
+ free_rd_atomic_resource(res);
+
+ res->type = type;
+ res->replay = 0;
+
+ switch (type) {
+ case RXE_READ_MASK:
+ res->read.va = qp->resp.va + qp->resp.offset;
+ res->read.va_org = qp->resp.va + qp->resp.offset;
+ res->read.resid = qp->resp.resid;
+ res->read.length = qp->resp.resid;
+ res->read.rkey = qp->resp.rkey;
+
+ pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
+ res->first_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
+
+ res->state = rdatm_res_state_new;
+ break;
+ case RXE_ATOMIC_MASK:
+ res->first_psn = pkt->psn;
+ res->last_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ break;
+ }
+
+ return res;
+}
+
/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);
-static enum resp_states process_atomic(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
+static enum resp_states atomic_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
{
u64 *vaddr;
enum resp_states ret;
struct rxe_mr *mr = qp->resp.mr;
+ struct resp_res *res = qp->resp.res;
+ u64 value;
- if (mr->state != RXE_MR_STATE_VALID) {
- ret = RESPST_ERR_RKEY_VIOLATION;
- goto out;
+ if (!res) {
+ res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
+ qp->resp.res = res;
}
- vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
+ if (!res->replay) {
+ if (mr->state != RXE_MR_STATE_VALID) {
+ ret = RESPST_ERR_RKEY_VIOLATION;
+ goto out;
+ }
- /* check vaddr is 8 bytes aligned. */
- if (!vaddr || (uintptr_t)vaddr & 7) {
- ret = RESPST_ERR_MISALIGNED_ATOMIC;
- goto out;
- }
+ vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
+ sizeof(u64));
- spin_lock_bh(&atomic_ops_lock);
+ /* check vaddr is 8 bytes aligned. */
+ if (!vaddr || (uintptr_t)vaddr & 7) {
+ ret = RESPST_ERR_MISALIGNED_ATOMIC;
+ goto out;
+ }
- qp->resp.atomic_orig = *vaddr;
+ spin_lock_bh(&atomic_ops_lock);
+ res->atomic.orig_val = value = *vaddr;
- if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
- if (*vaddr == atmeth_comp(pkt))
- *vaddr = atmeth_swap_add(pkt);
- } else {
- *vaddr += atmeth_swap_add(pkt);
- }
+ if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+ if (value == atmeth_comp(pkt))
+ value = atmeth_swap_add(pkt);
+ } else {
+ value += atmeth_swap_add(pkt);
+ }
+
+ *vaddr = value;
+ spin_unlock_bh(&atomic_ops_lock);
+
+ qp->resp.msn++;
- spin_unlock_bh(&atomic_ops_lock);
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
- ret = RESPST_NONE;
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
+ }
+
+ ret = RESPST_ACKNOWLEDGE;
out:
return ret;
}
static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt,
struct rxe_pkt_info *ack,
int opcode,
int payload,
@@ -629,7 +690,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
}
if (ack->mask & RXE_ATMACK_MASK)
- atmack_set_orig(ack, qp->resp.atomic_orig);
+ atmack_set_orig(ack, qp->resp.res->atomic.orig_val);
err = rxe_prepare(&qp->pri_av, ack, skb);
if (err) {
@@ -640,34 +701,6 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
return skb;
}
-static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
-{
- struct resp_res *res;
- u32 pkts;
-
- res = &qp->resp.resources[qp->resp.res_head];
- rxe_advance_resp_resource(qp);
- free_rd_atomic_resource(qp, res);
-
- res->type = RXE_READ_MASK;
- res->replay = 0;
- res->read.va = qp->resp.va + qp->resp.offset;
- res->read.va_org = qp->resp.va + qp->resp.offset;
- res->read.resid = qp->resp.resid;
- res->read.length = qp->resp.resid;
- res->read.rkey = qp->resp.rkey;
-
- pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
- res->first_psn = pkt->psn;
- res->cur_psn = pkt->psn;
- res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
-
- res->state = rdatm_res_state_new;
-
- return res;
-}
-
/**
* rxe_recheck_mr - revalidate MR from rkey and get a reference
* @qp: the qp
@@ -738,7 +771,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
struct rxe_mr *mr;
if (!res) {
- res = rxe_prepare_read_res(qp, req_pkt);
+ res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
qp->resp.res = res;
}
@@ -771,7 +804,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
payload = min_t(int, res->read.resid, mtu);
- skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
+ skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
res->cur_psn, AETH_ACK_UNLIMITED);
if (!skb)
return RESPST_ERR_RNR;
@@ -858,9 +891,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
qp->resp.msn++;
return RESPST_READ_REPLY;
} else if (pkt->mask & RXE_ATOMIC_MASK) {
- err = process_atomic(qp, pkt);
- if (err)
- return err;
+ return RESPST_ATOMIC_REPLY;
} else {
/* Unreachable */
WARN_ON_ONCE(1);
@@ -997,14 +1028,13 @@ finish:
return RESPST_CLEANUP;
}
-static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
- u8 syndrome, u32 psn)
+static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{
int err = 0;
struct rxe_pkt_info ack_pkt;
struct sk_buff *skb;
- skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
+ skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
0, psn, syndrome);
if (!skb) {
err = -ENOMEM;
@@ -1019,40 +1049,29 @@ err1:
return err;
}
-static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
- u8 syndrome)
+static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{
- int rc = 0;
+ int err = 0;
struct rxe_pkt_info ack_pkt;
struct sk_buff *skb;
- struct resp_res *res;
- skb = prepare_ack_packet(qp, pkt, &ack_pkt,
- IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
- syndrome);
+ skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE,
+ 0, psn, syndrome);
if (!skb) {
- rc = -ENOMEM;
+ err = -ENOMEM;
goto out;
}
- res = &qp->resp.resources[qp->resp.res_head];
- free_rd_atomic_resource(qp, res);
- rxe_advance_resp_resource(qp);
-
- skb_get(skb);
- res->type = RXE_ATOMIC_MASK;
- res->atomic.skb = skb;
- res->first_psn = ack_pkt.psn;
- res->last_psn = ack_pkt.psn;
- res->cur_psn = ack_pkt.psn;
+ err = rxe_xmit_packet(qp, &ack_pkt, skb);
+ if (err)
+ pr_err_ratelimited("Failed sending atomic ack\n");
- rc = rxe_xmit_packet(qp, &ack_pkt, skb);
- if (rc) {
- pr_err_ratelimited("Failed sending ack\n");
- rxe_put(qp);
- }
+ /* have to clear this since it is used to trigger
+ * long read replies
+ */
+ qp->resp.res = NULL;
out:
- return rc;
+ return err;
}
static enum resp_states acknowledge(struct rxe_qp *qp,
@@ -1062,11 +1081,11 @@ static enum resp_states acknowledge(struct rxe_qp *qp,
return RESPST_CLEANUP;
if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
- send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
+ send_ack(qp, qp->resp.aeth_syndrome, pkt->psn);
else if (pkt->mask & RXE_ATOMIC_MASK)
- send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
+ send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
else if (bth_ack(pkt))
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
+ send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
return RESPST_CLEANUP;
}
@@ -1119,7 +1138,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
/* SEND. Ack again and cleanup. C9-105. */
- send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
+ send_ack(qp, AETH_ACK_UNLIMITED, prev_psn);
return RESPST_CLEANUP;
} else if (pkt->mask & RXE_READ_MASK) {
struct resp_res *res;
@@ -1173,14 +1192,11 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
/* Find the operation in our list of responder resources. */
res = find_resource(qp, pkt->psn);
if (res) {
- skb_get(res->atomic.skb);
- /* Resend the result. */
- rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
- if (rc) {
- pr_err("Failed resending result. This flow is not handled - skb ignored\n");
- rc = RESPST_CLEANUP;
- goto out;
- }
+ res->replay = 1;
+ res->cur_psn = pkt->psn;
+ qp->resp.res = res;
+ rc = RESPST_ATOMIC_REPLY;
+ goto out;
}
/* Resource not found. Class D error. Drop the request. */
@@ -1260,17 +1276,15 @@ int rxe_responder(void *arg)
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
enum resp_states state;
struct rxe_pkt_info *pkt = NULL;
- int ret = 0;
+ int ret;
if (!rxe_get(qp))
return -EAGAIN;
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
- if (!qp->valid) {
- ret = -EINVAL;
- goto done;
- }
+ if (!qp->valid)
+ goto exit;
switch (qp->resp.state) {
case QP_STATE_RESET:
@@ -1316,6 +1330,9 @@ int rxe_responder(void *arg)
case RESPST_READ_REPLY:
state = read_reply(qp, pkt);
break;
+ case RESPST_ATOMIC_REPLY:
+ state = atomic_reply(qp, pkt);
+ break;
case RESPST_ACKNOWLEDGE:
state = acknowledge(qp, pkt);
break;
@@ -1327,7 +1344,7 @@ int rxe_responder(void *arg)
break;
case RESPST_ERR_PSN_OUT_OF_SEQ:
/* RC only - Class B. Drop packet. */
- send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
+ send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
state = RESPST_CLEANUP;
break;
@@ -1349,7 +1366,7 @@ int rxe_responder(void *arg)
if (qp_type(qp) == IB_QPT_RC) {
rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
/* RC - class B */
- send_ack(qp, pkt, AETH_RNR_NAK |
+ send_ack(qp, AETH_RNR_NAK |
(~AETH_TYPE_MASK &
qp->attr.min_rnr_timer),
pkt->psn);
@@ -1438,7 +1455,7 @@ int rxe_responder(void *arg)
case RESPST_ERROR:
qp->resp.goto_error = 0;
- pr_warn("qp#%d moved to error state\n", qp_num(qp));
+ pr_debug("qp#%d moved to error state\n", qp_num(qp));
rxe_qp_error(qp);
goto exit;
@@ -1447,9 +1464,16 @@ int rxe_responder(void *arg)
}
}
+ /* A non-zero return value will cause rxe_do_task to
+ * exit its loop and end the tasklet. A zero return
+ * will continue looping and return to rxe_responder
+ */
+done:
+ ret = 0;
+ goto out;
exit:
ret = -EAGAIN;
-done:
+out:
rxe_put(qp);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 0c4db5bb17d7..2248cf33d776 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -8,7 +8,7 @@
#include <linux/interrupt.h>
#include <linux/hardirq.h>
-#include "rxe_task.h"
+#include "rxe.h"
int __rxe_do_task(struct rxe_task *task)
@@ -33,6 +33,7 @@ void rxe_do_task(struct tasklet_struct *t)
int cont;
int ret;
struct rxe_task *task = from_tasklet(task, t, tasklet);
+ unsigned int iterations = RXE_MAX_ITERATIONS;
spin_lock_bh(&task->state_lock);
switch (task->state) {
@@ -61,13 +62,20 @@ void rxe_do_task(struct tasklet_struct *t)
spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_BUSY:
- if (ret)
+ if (ret) {
task->state = TASK_STATE_START;
- else
+ } else if (iterations--) {
cont = 1;
+ } else {
+ /* reschedule the tasklet and exit
+ * the loop to give up the cpu
+ */
+ tasklet_schedule(&task->tasklet);
+ task->state = TASK_STATE_START;
+ }
break;
- /* soneone tried to run the task since the last time we called
+ /* someone tried to run the task since the last time we called
* func, so we will call one more time regardless of the
* return value
*/
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 9d995854a174..e264cf69bf55 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -115,7 +115,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
{
struct rxe_ucontext *uc = to_ruc(ibuc);
- rxe_put(uc);
+ rxe_cleanup(uc);
}
static int rxe_port_immutable(struct ib_device *dev, u32 port_num,
@@ -149,7 +149,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_pd *pd = to_rpd(ibpd);
- rxe_put(pd);
+ rxe_cleanup(pd);
return 0;
}
@@ -176,7 +176,8 @@ static int rxe_create_ah(struct ib_ah *ibah,
if (err)
return err;
- err = rxe_add_to_pool(&rxe->ah_pool, ah);
+ err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
+ init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
if (err)
return err;
@@ -188,7 +189,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = copy_to_user(&uresp->ah_num, &ah->ah_num,
sizeof(uresp->ah_num));
if (err) {
- rxe_put(ah);
+ rxe_cleanup(ah);
return -EFAULT;
}
} else if (ah->is_user) {
@@ -197,6 +198,8 @@ static int rxe_create_ah(struct ib_ah *ibah,
}
rxe_init_av(init_attr->ah_attr, &ah->av);
+ rxe_finalize(ah);
+
return 0;
}
@@ -228,7 +231,8 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
- rxe_put(ah);
+ rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
+
return 0;
}
@@ -308,12 +312,13 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err)
- goto err_put;
+ goto err_cleanup;
return 0;
-err_put:
- rxe_put(srq);
+err_cleanup:
+ rxe_cleanup(srq);
+
return err;
}
@@ -362,7 +367,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rxe_srq *srq = to_rsrq(ibsrq);
- rxe_put(srq);
+ rxe_cleanup(srq);
return 0;
}
@@ -429,10 +434,11 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (err)
goto qp_init;
+ rxe_finalize(qp);
return 0;
qp_init:
- rxe_put(qp);
+ rxe_cleanup(qp);
return err;
}
@@ -485,7 +491,7 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (ret)
return ret;
- rxe_put(qp);
+ rxe_cleanup(qp);
return 0;
}
@@ -803,7 +809,7 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
rxe_cq_disable(cq);
- rxe_put(cq);
+ rxe_cleanup(cq);
return 0;
}
@@ -898,6 +904,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_get(pd);
rxe_mr_init_dma(pd, access, mr);
+ rxe_finalize(mr);
return &mr->ibmr;
}
@@ -926,11 +933,13 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
if (err)
goto err3;
+ rxe_finalize(mr);
+
return &mr->ibmr;
err3:
rxe_put(pd);
- rxe_put(mr);
+ rxe_cleanup(mr);
err2:
return ERR_PTR(err);
}
@@ -958,35 +967,52 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
if (err)
goto err2;
+ rxe_finalize(mr);
+
return &mr->ibmr;
err2:
rxe_put(pd);
- rxe_put(mr);
+ rxe_cleanup(mr);
err1:
return ERR_PTR(err);
}
-/* build next_map_set from scatterlist
- * The IB_WR_REG_MR WR will swap map_sets
- */
+static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct rxe_mr *mr = to_rmr(ibmr);
+ struct rxe_map *map;
+ struct rxe_phys_buf *buf;
+
+ if (unlikely(mr->nbuf == mr->num_buf))
+ return -ENOMEM;
+
+ map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
+ buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
+
+ buf->addr = addr;
+ buf->size = ibmr->page_size;
+ mr->nbuf++;
+
+ return 0;
+}
+
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map_set *set = mr->next_map_set;
int n;
- set->nbuf = 0;
+ mr->nbuf = 0;
- n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_mr_set_page);
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
- set->va = ibmr->iova;
- set->iova = ibmr->iova;
- set->length = ibmr->length;
- set->page_shift = ilog2(ibmr->page_size);
- set->page_mask = ibmr->page_size - 1;
- set->offset = set->iova & set->page_mask;
+ mr->va = ibmr->iova;
+ mr->iova = ibmr->iova;
+ mr->length = ibmr->length;
+ mr->page_shift = ilog2(ibmr->page_size);
+ mr->page_mask = ibmr->page_size - 1;
+ mr->offset = mr->iova & mr->page_mask;
return n;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index ac464e68c923..96af3e054f4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -9,7 +9,6 @@
#include <linux/interrupt.h>
#include <linux/workqueue.h>
-#include <rdma/rdma_user_rxe.h>
#include "rxe_pool.h"
#include "rxe_task.h"
#include "rxe_hw_counters.h"
@@ -124,11 +123,13 @@ struct rxe_req_info {
int need_rd_atomic;
int wait_psn;
int need_retry;
+ int wait_for_rnr_timer;
int noack_pkts;
struct rxe_task task;
};
struct rxe_comp_info {
+ enum rxe_qp_state state;
u32 psn;
int opcode;
int timeout;
@@ -155,7 +156,7 @@ struct resp_res {
union {
struct {
- struct sk_buff *skb;
+ u64 orig_val;
} atomic;
struct {
u64 va_org;
@@ -189,7 +190,6 @@ struct rxe_resp_info {
u32 resid;
u32 rkey;
u32 length;
- u64 atomic_orig;
/* SRQ only */
struct {
@@ -288,17 +288,6 @@ struct rxe_map {
struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
};
-struct rxe_map_set {
- struct rxe_map **map;
- u64 va;
- u64 iova;
- size_t length;
- u32 offset;
- u32 nbuf;
- int page_shift;
- int page_mask;
-};
-
static inline int rkey_is_mw(u32 rkey)
{
u32 index = rkey >> 8;
@@ -316,20 +305,26 @@ struct rxe_mr {
u32 rkey;
enum rxe_mr_state state;
enum ib_mr_type type;
+ u64 va;
+ u64 iova;
+ size_t length;
+ u32 offset;
int access;
+ int page_shift;
+ int page_mask;
int map_shift;
int map_mask;
u32 num_buf;
+ u32 nbuf;
u32 max_buf;
u32 num_map;
atomic_t num_mw;
- struct rxe_map_set *cur_map_set;
- struct rxe_map_set *next_map_set;
+ struct rxe_map **map;
};
enum rxe_mw_state {
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 17f34d584cd9..f88d2971c2c6 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -725,11 +725,11 @@ static int siw_proc_mpareply(struct siw_cep *cep)
enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR;
rv = siw_recv_mpa_rr(cep);
- if (rv != -EAGAIN)
- siw_cancel_mpatimer(cep);
if (rv)
goto out_err;
+ siw_cancel_mpatimer(cep);
+
rep = &cep->mpa.hdr;
if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) {
@@ -895,7 +895,8 @@ static int siw_proc_mpareply(struct siw_cep *cep)
}
out_err:
- siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
+ if (rv != -EAGAIN)
+ siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
return rv;
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 09316072b789..8dedae7ae79e 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -1167,7 +1167,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
err_out:
siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
- if (cq && cq->queue) {
+ if (cq->queue) {
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f7995519bbc8..ed25061fac62 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1109,7 +1109,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
* if he sets the device address back to be based on GID index 0,
* he no longer wishs to control it.
*
- * If the user doesn't control the the device address,
+ * If the user doesn't control the device address,
* IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
* the port GUID has changed and GID at index 0 has changed
* so we need to change priv->local_gid and priv->dev->dev_addr
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2a8961b685c2..a4904371e2db 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1664,8 +1664,10 @@ static void ipoib_napi_add(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC);
- netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE);
+ netif_napi_add_weight(dev, &priv->recv_napi, ipoib_rx_poll,
+ IPOIB_NUM_WC);
+ netif_napi_add_weight(dev, &priv->send_napi, ipoib_tx_poll,
+ MAX_SEND_CQE);
}
static void ipoib_napi_del(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 321949a570ed..620ae5b2d80d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -568,7 +568,7 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
iscsi_host_free(shost);
}
@@ -685,7 +685,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
return cls_session;
remove_host:
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
free_host:
iscsi_host_free(shost);
return NULL;
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index bd5f3b5e1727..7b83f48f60c5 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -537,6 +537,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
struct iscsi_hdr *hdr;
char *data;
int length;
+ bool full_feature_phase;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "login_rsp");
@@ -550,6 +551,9 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
hdr = desc->rsp + sizeof(struct iser_ctrl);
data = desc->rsp + ISER_HEADERS_LEN;
length = wc->byte_len - ISER_HEADERS_LEN;
+ full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
+ ISCSI_FULL_FEATURE_PHASE) &&
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL);
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, length);
@@ -560,7 +564,8 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
- if (iser_conn->iscsi_conn->session->discovery_sess)
+ if (!full_feature_phase ||
+ iser_conn->iscsi_conn->session->discovery_sess)
return;
/* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index c08f2d9133b6..a00ca117303a 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -246,6 +246,7 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
device = ib_conn->device;
ib_dev = device->ib_device;
+ /* +1 for drain */
if (ib_conn->pi_support)
max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
else
@@ -267,7 +268,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
init_attr.qp_context = (void *)ib_conn;
init_attr.send_cq = ib_conn->cq;
init_attr.recv_cq = ib_conn->cq;
- init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
+ /* +1 for drain */
+ init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS + 1;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -485,7 +487,7 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
iser_conn, err);
/* block until all flush errors are consumed */
- ib_drain_sq(ib_conn->qp);
+ ib_drain_qp(ib_conn->qp);
}
return 1;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index 385a19846c24..1e6ffafa2db3 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -32,11 +32,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
{
- struct rtrs_clt_stats_pcpu *s;
-
- s = get_cpu_ptr(stats->pcpu_stats);
- s->rdma.failover_cnt++;
- put_cpu_ptr(stats->pcpu_stats);
+ this_cpu_inc(stats->pcpu_stats->rdma.failover_cnt);
}
int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
@@ -169,12 +165,8 @@ int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
size_t size, int d)
{
- struct rtrs_clt_stats_pcpu *s;
-
- s = get_cpu_ptr(stats->pcpu_stats);
- s->rdma.dir[d].cnt++;
- s->rdma.dir[d].size_total += size;
- put_cpu_ptr(stats->pcpu_stats);
+ this_cpu_inc(stats->pcpu_stats->rdma.dir[d].cnt);
+ this_cpu_add(stats->pcpu_stats->rdma.dir[d].size_total, size);
}
void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 9809c3883979..baecde41d126 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -740,25 +740,25 @@ struct path_it {
struct rtrs_clt_path *(*next_path)(struct path_it *it);
};
-/**
- * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
+/*
+ * rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL
* @head: the head for the list.
- * @ptr: the list head to take the next element from.
- * @type: the type of the struct this is embedded in.
- * @memb: the name of the list_head within the struct.
+ * @clt_path: The element to take the next clt_path from.
*
- * Next element returned in round-robin fashion, i.e. head will be skipped,
+ * Next clt path returned in round-robin fashion, i.e. head will be skipped,
* but if list is observed as empty, NULL will be returned.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
+ * This function may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
-#define list_next_or_null_rr_rcu(head, ptr, type, memb) \
-({ \
- list_next_or_null_rcu(head, ptr, type, memb) ?: \
- list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
- type, memb); \
-})
+static inline struct rtrs_clt_path *
+rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path)
+{
+ return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?:
+ list_next_or_null_rcu(head,
+ READ_ONCE((&clt_path->s.entry)->next),
+ typeof(*clt_path), s.entry);
+}
/**
* get_next_path_rr() - Returns path in round-robin fashion.
@@ -789,10 +789,8 @@ static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
path = list_first_or_null_rcu(&clt->paths_list,
typeof(*path), s.entry);
else
- path = list_next_or_null_rr_rcu(&clt->paths_list,
- &path->s.entry,
- typeof(*path),
- s.entry);
+ path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path);
+
rcu_assign_pointer(*ppcpu_path, path);
return path;
@@ -1403,8 +1401,7 @@ static int alloc_permits(struct rtrs_clt_sess *clt)
unsigned int chunk_bits;
int err, i;
- clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
- sizeof(long), GFP_KERNEL);
+ clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL);
if (!clt->permits_map) {
err = -ENOMEM;
goto out_err;
@@ -1426,7 +1423,7 @@ static int alloc_permits(struct rtrs_clt_sess *clt)
return 0;
err_map:
- kfree(clt->permits_map);
+ bitmap_free(clt->permits_map);
clt->permits_map = NULL;
out_err:
return err;
@@ -1434,13 +1431,11 @@ out_err:
static void free_permits(struct rtrs_clt_sess *clt)
{
- if (clt->permits_map) {
- size_t sz = clt->queue_depth;
-
+ if (clt->permits_map)
wait_event(clt->permits_wait,
- find_first_bit(clt->permits_map, sz) >= sz);
- }
- kfree(clt->permits_map);
+ bitmap_empty(clt->permits_map, clt->queue_depth));
+
+ bitmap_free(clt->permits_map);
clt->permits_map = NULL;
kfree(clt->permits);
clt->permits = NULL;
@@ -2277,8 +2272,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
* removed. If @sess is the last element, then @next is NULL.
*/
rcu_read_lock();
- next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry,
- typeof(*next), s.entry);
+ next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path);
rcu_read_unlock();
/*
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 9a1e5c2ae55c..ac0df734eba8 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -23,6 +23,17 @@
#define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \
__stringify(RTRS_PROTO_VER_MINOR)
+/*
+ * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
+ * and the minimum chunk size is 4096 (2^12).
+ * So the maximum sess_queue_depth is 65536 (2^16) in theory.
+ * But mempool_create, create_qp and ib_post_send fail with
+ * "cannot allocate memory" error if sess_queue_depth is too big.
+ * Therefore the pratical max value of sess_queue_depth is
+ * somewhere between 1 and 65534 and it depends on the system.
+ */
+#define MAX_SESS_QUEUE_DEPTH 65535
+
enum rtrs_imm_const {
MAX_IMM_TYPE_BITS = 4,
MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1),
@@ -46,16 +57,6 @@ enum {
MAX_PATHS_NUM = 128,
- /*
- * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
- * and the minimum chunk size is 4096 (2^12).
- * So the maximum sess_queue_depth is 65536 (2^16) in theory.
- * But mempool_create, create_qp and ib_post_send fail with
- * "cannot allocate memory" error if sess_queue_depth is too big.
- * Therefore the pratical max value of sess_queue_depth is
- * somewhere between 1 and 65534 and it depends on the system.
- */
- MAX_SESS_QUEUE_DEPTH = 65535,
MIN_CHUNK_SIZE = 8192,
RTRS_HB_INTERVAL_MS = 5000,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
index 44b1c1652131..2aff1213a19d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -14,9 +14,14 @@
int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
{
if (enable) {
- struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+ int cpu;
+ struct rtrs_srv_stats_rdma_stats *r;
+
+ for_each_possible_cpu(cpu) {
+ r = per_cpu_ptr(stats->rdma_stats, cpu);
+ memset(r, 0, sizeof(*r));
+ }
- memset(r, 0, sizeof(*r));
return 0;
}
@@ -25,11 +30,22 @@ int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page)
{
- struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+ int cpu;
+ struct rtrs_srv_stats_rdma_stats sum;
+ struct rtrs_srv_stats_rdma_stats *r;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = per_cpu_ptr(stats->rdma_stats, cpu);
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ }
- return sysfs_emit(page, "%lld %lld %lld %lldn %u\n",
- (s64)atomic64_read(&r->dir[READ].cnt),
- (s64)atomic64_read(&r->dir[READ].size_total),
- (s64)atomic64_read(&r->dir[WRITE].cnt),
- (s64)atomic64_read(&r->dir[WRITE].size_total), 0);
+ return sysfs_emit(page, "%llu %llu %llu %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index b94ae12c2795..2a3c9ac64a42 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -220,6 +220,8 @@ static void rtrs_srv_path_stats_release(struct kobject *kobj)
stats = container_of(kobj, struct rtrs_srv_stats, kobj_stats);
+ free_percpu(stats->rdma_stats);
+
kfree(stats);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 24024bce2566..34c03bde5064 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -11,7 +11,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include <linux/module.h>
-#include <linux/mempool.h>
#include "rtrs-srv.h"
#include "rtrs-log.h"
@@ -26,11 +25,7 @@ MODULE_LICENSE("GPL");
#define DEFAULT_SESS_QUEUE_DEPTH 512
#define MAX_HDR_SIZE PAGE_SIZE
-/* We guarantee to serve 10 paths at least */
-#define CHUNK_POOL_SZ 10
-
static struct rtrs_rdma_dev_pd dev_pd;
-static mempool_t *chunk_pool;
struct class *rtrs_dev_class;
static struct rtrs_srv_ib_ctx ib_ctx;
@@ -1358,7 +1353,7 @@ static void free_srv(struct rtrs_srv_sess *srv)
WARN_ON(refcount_read(&srv->refcount));
for (i = 0; i < srv->queue_depth; i++)
- mempool_free(srv->chunks[i], chunk_pool);
+ __free_pages(srv->chunks[i], get_order(max_chunk_size));
kfree(srv->chunks);
mutex_destroy(&srv->paths_mutex);
mutex_destroy(&srv->paths_ev_mutex);
@@ -1411,7 +1406,8 @@ static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
goto err_free_srv;
for (i = 0; i < srv->queue_depth; i++) {
- srv->chunks[i] = mempool_alloc(chunk_pool, GFP_KERNEL);
+ srv->chunks[i] = alloc_pages(GFP_KERNEL,
+ get_order(max_chunk_size));
if (!srv->chunks[i])
goto err_free_chunks;
}
@@ -1424,7 +1420,7 @@ static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
err_free_chunks:
while (i--)
- mempool_free(srv->chunks[i], chunk_pool);
+ __free_pages(srv->chunks[i], get_order(max_chunk_size));
kfree(srv->chunks);
err_free_srv:
@@ -1513,6 +1509,7 @@ static void free_path(struct rtrs_srv_path *srv_path)
kobject_del(&srv_path->kobj);
kobject_put(&srv_path->kobj);
} else {
+ free_percpu(srv_path->stats->rdma_stats);
kfree(srv_path->stats);
kfree(srv_path);
}
@@ -1755,13 +1752,17 @@ static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
if (!srv_path->stats)
goto err_free_sess;
+ srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats);
+ if (!srv_path->stats->rdma_stats)
+ goto err_free_stats;
+
srv_path->stats->srv_path = srv_path;
srv_path->dma_addr = kcalloc(srv->queue_depth,
sizeof(*srv_path->dma_addr),
GFP_KERNEL);
if (!srv_path->dma_addr)
- goto err_free_stats;
+ goto err_free_percpu;
srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
GFP_KERNEL);
@@ -1813,6 +1814,8 @@ err_free_con:
kfree(srv_path->s.con);
err_free_dma_addr:
kfree(srv_path->dma_addr);
+err_free_percpu:
+ free_percpu(srv_path->stats->rdma_stats);
err_free_stats:
kfree(srv_path->stats);
err_free_sess:
@@ -2266,14 +2269,10 @@ static int __init rtrs_server_init(void)
err);
return err;
}
- chunk_pool = mempool_create_page_pool(sess_queue_depth * CHUNK_POOL_SZ,
- get_order(max_chunk_size));
- if (!chunk_pool)
- return -ENOMEM;
rtrs_dev_class = class_create(THIS_MODULE, "rtrs-server");
if (IS_ERR(rtrs_dev_class)) {
err = PTR_ERR(rtrs_dev_class);
- goto out_chunk_pool;
+ goto out_err;
}
rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
if (!rtrs_wq) {
@@ -2285,9 +2284,7 @@ static int __init rtrs_server_init(void)
out_dev_class:
class_destroy(rtrs_dev_class);
-out_chunk_pool:
- mempool_destroy(chunk_pool);
-
+out_err:
return err;
}
@@ -2295,7 +2292,6 @@ static void __exit rtrs_server_exit(void)
{
destroy_workqueue(rtrs_wq);
class_destroy(rtrs_dev_class);
- mempool_destroy(chunk_pool);
rtrs_rdma_dev_pd_deinit(&dev_pd);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 6292e87f6afd..186a63c217df 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/refcount.h>
+#include <linux/percpu.h>
#include "rtrs-pri.h"
/*
@@ -29,15 +30,15 @@ enum rtrs_srv_state {
*/
struct rtrs_srv_stats_rdma_stats {
struct {
- atomic64_t cnt;
- atomic64_t size_total;
+ u64 cnt;
+ u64 size_total;
} dir[2];
};
struct rtrs_srv_stats {
- struct kobject kobj_stats;
- struct rtrs_srv_stats_rdma_stats rdma_stats;
- struct rtrs_srv_path *srv_path;
+ struct kobject kobj_stats;
+ struct rtrs_srv_stats_rdma_stats __percpu *rdma_stats;
+ struct rtrs_srv_path *srv_path;
};
struct rtrs_srv_con {
@@ -130,8 +131,8 @@ void close_path(struct rtrs_srv_path *srv_path);
static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
size_t size, int d)
{
- atomic64_inc(&s->rdma_stats.dir[d].cnt);
- atomic64_add(size, &s->rdma_stats.dir[d].size_total);
+ this_cpu_inc(s->rdma_stats->dir[d].cnt);
+ this_cpu_add(s->rdma_stats->dir[d].size_total, size);
}
/* functions which are implemented in rtrs-srv-stats.c */
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index f86ee1c4b970..21cbe30d526f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -565,12 +565,9 @@ static int srpt_refresh_port(struct srpt_port *sport)
if (ret)
return ret;
- sport->port_guid_id.wwn.priv = sport;
- srpt_format_guid(sport->port_guid_id.name,
- sizeof(sport->port_guid_id.name),
+ srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
&sport->gid.global.interface_id);
- sport->port_gid_id.wwn.priv = sport;
- snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name),
+ snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
@@ -2221,13 +2218,13 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
ch->sport = sport;
- if (ib_cm_id) {
- ch->ib_cm.cm_id = ib_cm_id;
- ib_cm_id->context = ch;
- } else {
+ if (rdma_cm_id) {
ch->using_rdma_cm = true;
ch->rdma_cm.cm_id = rdma_cm_id;
rdma_cm_id->context = ch;
+ } else {
+ ch->ib_cm.cm_id = ib_cm_id;
+ ib_cm_id->context = ch;
}
/*
* ch->rq_size should be at least as large as the initiator queue
@@ -2314,31 +2311,35 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
tag_num = ch->rq_size;
tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
- mutex_lock(&sport->port_guid_id.mutex);
- list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) {
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (sport->guid_id) {
+ mutex_lock(&sport->guid_id->mutex);
+ list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
+ }
+ mutex_unlock(&sport->guid_id->mutex);
}
- mutex_unlock(&sport->port_guid_id.mutex);
- mutex_lock(&sport->port_gid_id.mutex);
- list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) {
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (sport->gid_id) {
+ mutex_lock(&sport->gid_id->mutex);
+ list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL, i_port_id,
ch, NULL);
- if (!IS_ERR_OR_NULL(ch->sess))
- break;
- /* Retry without leading "0x" */
- ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ /* Retry without leading "0x" */
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
+ }
+ mutex_unlock(&sport->gid_id->mutex);
}
- mutex_unlock(&sport->port_gid_id.mutex);
if (IS_ERR_OR_NULL(ch->sess)) {
WARN_ON_ONCE(ch->sess == NULL);
@@ -2983,7 +2984,12 @@ static int srpt_release_sport(struct srpt_port *sport)
return 0;
}
-static struct se_wwn *__srpt_lookup_wwn(const char *name)
+struct port_and_port_id {
+ struct srpt_port *sport;
+ struct srpt_port_id **port_id;
+};
+
+static struct port_and_port_id __srpt_lookup_port(const char *name)
{
struct ib_device *dev;
struct srpt_device *sdev;
@@ -2998,25 +3004,38 @@ static struct se_wwn *__srpt_lookup_wwn(const char *name)
for (i = 0; i < dev->phys_port_cnt; i++) {
sport = &sdev->port[i];
- if (strcmp(sport->port_guid_id.name, name) == 0)
- return &sport->port_guid_id.wwn;
- if (strcmp(sport->port_gid_id.name, name) == 0)
- return &sport->port_gid_id.wwn;
+ if (strcmp(sport->guid_name, name) == 0) {
+ kref_get(&sdev->refcnt);
+ return (struct port_and_port_id){
+ sport, &sport->guid_id};
+ }
+ if (strcmp(sport->gid_name, name) == 0) {
+ kref_get(&sdev->refcnt);
+ return (struct port_and_port_id){
+ sport, &sport->gid_id};
+ }
}
}
- return NULL;
+ return (struct port_and_port_id){};
}
-static struct se_wwn *srpt_lookup_wwn(const char *name)
+/**
+ * srpt_lookup_port() - Look up an RDMA port by name
+ * @name: ASCII port name
+ *
+ * Increments the RDMA port reference count if an RDMA port pointer is returned.
+ * The caller must drop that reference count by calling srpt_port_put_ref().
+ */
+static struct port_and_port_id srpt_lookup_port(const char *name)
{
- struct se_wwn *wwn;
+ struct port_and_port_id papi;
spin_lock(&srpt_dev_lock);
- wwn = __srpt_lookup_wwn(name);
+ papi = __srpt_lookup_port(name);
spin_unlock(&srpt_dev_lock);
- return wwn;
+ return papi;
}
static void srpt_free_srq(struct srpt_device *sdev)
@@ -3101,6 +3120,18 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
return ret;
}
+static void srpt_free_sdev(struct kref *refcnt)
+{
+ struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt);
+
+ kfree(sdev);
+}
+
+static void srpt_sdev_put(struct srpt_device *sdev)
+{
+ kref_put(&sdev->refcnt, srpt_free_sdev);
+}
+
/**
* srpt_add_one - InfiniBand device addition callback function
* @device: Describes a HCA.
@@ -3119,6 +3150,7 @@ static int srpt_add_one(struct ib_device *device)
if (!sdev)
return -ENOMEM;
+ kref_init(&sdev->refcnt);
sdev->device = device;
mutex_init(&sdev->sdev_mutex);
@@ -3182,10 +3214,6 @@ static int srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
- mutex_init(&sport->port_guid_id.mutex);
- INIT_LIST_HEAD(&sport->port_guid_id.tpg_list);
- mutex_init(&sport->port_gid_id.mutex);
- INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
ret = srpt_refresh_port(sport);
if (ret) {
@@ -3214,7 +3242,7 @@ err_ring:
srpt_free_srq(sdev);
ib_dealloc_pd(sdev->pd);
free_dev:
- kfree(sdev);
+ srpt_sdev_put(sdev);
pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
return ret;
}
@@ -3258,7 +3286,7 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
ib_dealloc_pd(sdev->pd);
- kfree(sdev);
+ srpt_sdev_put(sdev);
}
static struct ib_client srpt_client = {
@@ -3286,10 +3314,10 @@ static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
{
struct srpt_port *sport = wwn->priv;
- if (wwn == &sport->port_guid_id.wwn)
- return &sport->port_guid_id;
- if (wwn == &sport->port_gid_id.wwn)
- return &sport->port_gid_id;
+ if (sport->guid_id && &sport->guid_id->wwn == wwn)
+ return sport->guid_id;
+ if (sport->gid_id && &sport->gid_id->wwn == wwn)
+ return sport->gid_id;
WARN_ON_ONCE(true);
return NULL;
}
@@ -3774,7 +3802,31 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
- return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
+ struct port_and_port_id papi = srpt_lookup_port(name);
+ struct srpt_port *sport = papi.sport;
+ struct srpt_port_id *port_id;
+
+ if (!papi.port_id)
+ return ERR_PTR(-EINVAL);
+ if (*papi.port_id) {
+ /* Attempt to create a directory that already exists. */
+ WARN_ON_ONCE(true);
+ return &(*papi.port_id)->wwn;
+ }
+ port_id = kzalloc(sizeof(*port_id), GFP_KERNEL);
+ if (!port_id) {
+ srpt_sdev_put(sport->sdev);
+ return ERR_PTR(-ENOMEM);
+ }
+ mutex_init(&port_id->mutex);
+ INIT_LIST_HEAD(&port_id->tpg_list);
+ port_id->wwn.priv = sport;
+ memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
+ sport->gid_name, ARRAY_SIZE(port_id->name));
+
+ *papi.port_id = port_id;
+
+ return &port_id->wwn;
}
/**
@@ -3783,6 +3835,18 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
*/
static void srpt_drop_tport(struct se_wwn *wwn)
{
+ struct srpt_port_id *port_id = container_of(wwn, typeof(*port_id), wwn);
+ struct srpt_port *sport = wwn->priv;
+
+ if (sport->guid_id == port_id)
+ sport->guid_id = NULL;
+ else if (sport->gid_id == port_id)
+ sport->gid_id = NULL;
+ else
+ WARN_ON_ONCE(true);
+
+ srpt_sdev_put(sport->sdev);
+ kfree(port_id);
}
static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 76e66f630c17..4c46b301eea1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -376,7 +376,7 @@ struct srpt_tpg {
};
/**
- * struct srpt_port_id - information about an RDMA port name
+ * struct srpt_port_id - LIO RDMA port information
* @mutex: Protects @tpg_list changes.
* @tpg_list: TPGs associated with the RDMA port name.
* @wwn: WWN associated with the RDMA port name.
@@ -393,7 +393,7 @@ struct srpt_port_id {
};
/**
- * struct srpt_port - information associated by SRPT with a single IB port
+ * struct srpt_port - SRPT RDMA port information
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
@@ -402,8 +402,10 @@ struct srpt_port_id {
* @lid: cached value of the port's lid.
* @gid: cached value of the port's gid.
* @work: work structure for refreshing the aforementioned cached values.
- * @port_guid_id: target port GUID
- * @port_gid_id: target port GID
+ * @guid_name: port name in GUID format.
+ * @guid_id: LIO target port information for the port name in GUID format.
+ * @gid_name: port name in GID format.
+ * @gid_id: LIO target port information for the port name in GID format.
* @port_attrib: Port attributes that can be accessed through configfs.
* @refcount: Number of objects associated with this port.
* @freed_channels: Completion that will be signaled once @refcount becomes 0.
@@ -419,8 +421,10 @@ struct srpt_port {
u32 lid;
union ib_gid gid;
struct work_struct work;
- struct srpt_port_id port_guid_id;
- struct srpt_port_id port_gid_id;
+ char guid_name[64];
+ struct srpt_port_id *guid_id;
+ char gid_name[64];
+ struct srpt_port_id *gid_id;
struct srpt_port_attrib port_attrib;
atomic_t refcount;
struct completion *freed_channels;
@@ -430,6 +434,7 @@ struct srpt_port {
/**
* struct srpt_device - information associated by SRPT with a single HCA
+ * @refcnt: Reference count for this device.
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
* @lkey: L_Key (local key) with write access to all local memory.
@@ -445,6 +450,7 @@ struct srpt_port {
* @port: Information about the ports owned by this HCA.
*/
struct srpt_device {
+ struct kref refcnt;
struct ib_device *device;
struct ib_pd *pd;
u32 lkey;
diff --git a/drivers/input/input-core-private.h b/drivers/input/input-core-private.h
new file mode 100644
index 000000000000..116834cf8868
--- /dev/null
+++ b/drivers/input/input-core-private.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _INPUT_CORE_PRIVATE_H
+#define _INPUT_CORE_PRIVATE_H
+
+/*
+ * Functions and definitions that are private to input core,
+ * should not be used by input drivers or handlers.
+ */
+
+struct input_dev;
+
+void input_mt_release_slots(struct input_dev *dev);
+void input_handle_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value);
+
+#endif /* _INPUT_CORE_PRIVATE_H */
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 44fe6f2f063c..14b53dac1253 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -8,6 +8,7 @@
#include <linux/input/mt.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include "input-core-private.h"
#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
@@ -259,10 +260,13 @@ static void __input_mt_drop_unused(struct input_dev *dev, struct input_mt *mt)
{
int i;
+ lockdep_assert_held(&dev->event_lock);
+
for (i = 0; i < mt->num_slots; i++) {
- if (!input_mt_is_used(mt, &mt->slots[i])) {
- input_mt_slot(dev, i);
- input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
+ if (input_mt_is_active(&mt->slots[i]) &&
+ !input_mt_is_used(mt, &mt->slots[i])) {
+ input_handle_event(dev, EV_ABS, ABS_MT_SLOT, i);
+ input_handle_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
}
}
}
@@ -278,13 +282,44 @@ void input_mt_drop_unused(struct input_dev *dev)
struct input_mt *mt = dev->mt;
if (mt) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
__input_mt_drop_unused(dev, mt);
mt->frame++;
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
EXPORT_SYMBOL(input_mt_drop_unused);
/**
+ * input_mt_release_slots() - Deactivate all slots
+ * @dev: input device with allocated MT slots
+ *
+ * Lift all active slots.
+ */
+void input_mt_release_slots(struct input_dev *dev)
+{
+ struct input_mt *mt = dev->mt;
+
+ lockdep_assert_held(&dev->event_lock);
+
+ if (mt) {
+ /* This will effectively mark all slots unused. */
+ mt->frame++;
+
+ __input_mt_drop_unused(dev, mt);
+
+ if (test_bit(ABS_PRESSURE, dev->absbit))
+ input_handle_event(dev, EV_ABS, ABS_PRESSURE, 0);
+
+ mt->frame++;
+ }
+}
+
+/**
* input_mt_sync_frame() - synchronize mt frame
* @dev: input device with allocated MT slots
*
@@ -300,8 +335,13 @@ void input_mt_sync_frame(struct input_dev *dev)
if (!mt)
return;
- if (mt->flags & INPUT_MT_DROP_UNUSED)
+ if (mt->flags & INPUT_MT_DROP_UNUSED) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
__input_mt_drop_unused(dev, mt);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
use_count = true;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1365c9dfb5f2..ebb2b7f0f8ff 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include "input-compat.h"
+#include "input-core-private.h"
#include "input-poller.h"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
@@ -142,6 +143,8 @@ static void input_pass_values(struct input_dev *dev,
struct input_handle *handle;
struct input_value *v;
+ lockdep_assert_held(&dev->event_lock);
+
if (!count)
return;
@@ -174,44 +177,6 @@ static void input_pass_values(struct input_dev *dev,
}
}
-static void input_pass_event(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
-{
- struct input_value vals[] = { { type, code, value } };
-
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
-}
-
-/*
- * Generate software autorepeat event. Note that we take
- * dev->event_lock here to avoid racing with input_event
- * which may cause keys get "stuck".
- */
-static void input_repeat_key(struct timer_list *t)
-{
- struct input_dev *dev = from_timer(dev, t, timer);
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- if (test_bit(dev->repeat_key, dev->key) &&
- is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
- struct input_value vals[] = {
- { EV_KEY, dev->repeat_key, 2 },
- input_value_sync
- };
-
- input_set_timestamp(dev, ktime_get());
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
-
- if (dev->rep[REP_PERIOD])
- mod_timer(&dev->timer, jiffies +
- msecs_to_jiffies(dev->rep[REP_PERIOD]));
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
#define INPUT_IGNORE_EVENT 0
#define INPUT_PASS_TO_HANDLERS 1
#define INPUT_PASS_TO_DEVICE 2
@@ -275,6 +240,10 @@ static int input_get_disposition(struct input_dev *dev,
int disposition = INPUT_IGNORE_EVENT;
int value = *pval;
+ /* filter-out events from inhibited devices */
+ if (dev->inhibited)
+ return INPUT_IGNORE_EVENT;
+
switch (type) {
case EV_SYN:
@@ -375,19 +344,9 @@ static int input_get_disposition(struct input_dev *dev,
return disposition;
}
-static void input_handle_event(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
+static void input_event_dispose(struct input_dev *dev, int disposition,
+ unsigned int type, unsigned int code, int value)
{
- int disposition;
-
- /* filter-out events from inhibited devices */
- if (dev->inhibited)
- return;
-
- disposition = input_get_disposition(dev, type, code, &value);
- if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN)
- add_input_randomness(type, code, value);
-
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);
@@ -426,7 +385,22 @@ static void input_handle_event(struct input_dev *dev,
input_pass_values(dev, dev->vals, dev->num_vals);
dev->num_vals = 0;
}
+}
+void input_handle_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value)
+{
+ int disposition;
+
+ lockdep_assert_held(&dev->event_lock);
+
+ disposition = input_get_disposition(dev, type, code, &value);
+ if (disposition != INPUT_IGNORE_EVENT) {
+ if (type != EV_SYN)
+ add_input_randomness(type, code, value);
+
+ input_event_dispose(dev, disposition, type, code, value);
+ }
}
/**
@@ -613,7 +587,7 @@ static void __input_release_device(struct input_handle *handle)
lockdep_is_held(&dev->mutex));
if (grabber == handle) {
rcu_assign_pointer(dev->grab, NULL);
- /* Make sure input_pass_event() notices that grab is gone */
+ /* Make sure input_pass_values() notices that grab is gone */
synchronize_rcu();
list_for_each_entry(handle, &dev->h_list, d_node)
@@ -736,7 +710,7 @@ void input_close_device(struct input_handle *handle)
if (!--handle->open) {
/*
- * synchronize_rcu() makes sure that input_pass_event()
+ * synchronize_rcu() makes sure that input_pass_values()
* completed and that no more input events are delivered
* through this handle
*/
@@ -751,22 +725,21 @@ EXPORT_SYMBOL(input_close_device);
* Simulate keyup events for all keys that are marked as pressed.
* The function must be called with dev->event_lock held.
*/
-static void input_dev_release_keys(struct input_dev *dev)
+static bool input_dev_release_keys(struct input_dev *dev)
{
bool need_sync = false;
int code;
+ lockdep_assert_held(&dev->event_lock);
+
if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
for_each_set_bit(code, dev->key, KEY_CNT) {
- input_pass_event(dev, EV_KEY, code, 0);
+ input_handle_event(dev, EV_KEY, code, 0);
need_sync = true;
}
-
- if (need_sync)
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
-
- memset(dev->key, 0, sizeof(dev->key));
}
+
+ return need_sync;
}
/*
@@ -793,7 +766,8 @@ static void input_disconnect_device(struct input_dev *dev)
* generate events even after we done here but they will not
* reach any handlers.
*/
- input_dev_release_keys(dev);
+ if (input_dev_release_keys(dev))
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
list_for_each_entry(handle, &dev->h_list, d_node)
handle->open = 0;
@@ -1004,12 +978,16 @@ int input_set_keycode(struct input_dev *dev,
} else if (test_bit(EV_KEY, dev->evbit) &&
!is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(old_keycode, dev->key)) {
- struct input_value vals[] = {
- { EV_KEY, old_keycode, 0 },
- input_value_sync
- };
-
- input_pass_values(dev, vals, ARRAY_SIZE(vals));
+ /*
+ * We have to use input_event_dispose() here directly instead
+ * of input_handle_event() because the key we want to release
+ * here is considered no longer supported by the device and
+ * input_handle_event() will ignore it.
+ */
+ input_event_dispose(dev, INPUT_PASS_TO_HANDLERS,
+ EV_KEY, old_keycode, 0);
+ input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH,
+ EV_SYN, SYN_REPORT, 1);
}
out:
@@ -1784,7 +1762,8 @@ void input_reset_device(struct input_dev *dev)
spin_lock_irqsave(&dev->event_lock, flags);
input_dev_toggle(dev, true);
- input_dev_release_keys(dev);
+ if (input_dev_release_keys(dev))
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
spin_unlock_irqrestore(&dev->event_lock, flags);
mutex_unlock(&dev->mutex);
@@ -1806,7 +1785,9 @@ static int input_inhibit_device(struct input_dev *dev)
}
spin_lock_irq(&dev->event_lock);
+ input_mt_release_slots(dev);
input_dev_release_keys(dev);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
input_dev_toggle(dev, false);
spin_unlock_irq(&dev->event_lock);
@@ -1857,7 +1838,8 @@ static int input_dev_suspend(struct device *dev)
* Keys that are pressed now are unlikely to be
* still pressed when we resume.
*/
- input_dev_release_keys(input_dev);
+ if (input_dev_release_keys(input_dev))
+ input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
/* Turn off LEDs and sounds, if any are active. */
input_dev_toggle(input_dev, false);
@@ -1891,7 +1873,8 @@ static int input_dev_freeze(struct device *dev)
* Keys that are pressed now are unlikely to be
* still pressed when we resume.
*/
- input_dev_release_keys(input_dev);
+ if (input_dev_release_keys(input_dev))
+ input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
spin_unlock_irq(&input_dev->event_lock);
@@ -2259,6 +2242,34 @@ static void devm_input_device_unregister(struct device *dev, void *res)
__input_unregister_device(input);
}
+/*
+ * Generate software autorepeat event. Note that we take
+ * dev->event_lock here to avoid racing with input_event
+ * which may cause keys get "stuck".
+ */
+static void input_repeat_key(struct timer_list *t)
+{
+ struct input_dev *dev = from_timer(dev, t, timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (!dev->inhibited &&
+ test_bit(dev->repeat_key, dev->key) &&
+ is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
+
+ input_set_timestamp(dev, ktime_get());
+ input_handle_event(dev, EV_KEY, dev->repeat_key, 2);
+ input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
+
+ if (dev->rep[REP_PERIOD])
+ mod_timer(&dev->timer, jiffies +
+ msecs_to_jiffies(dev->rep[REP_PERIOD]));
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
/**
* input_enable_softrepeat - enable software autorepeat
* @dev: input device
diff --git a/drivers/input/joystick/adc-joystick.c b/drivers/input/joystick/adc-joystick.c
index 78ebca7d400a..e0cfdc84763f 100644
--- a/drivers/input/joystick/adc-joystick.c
+++ b/drivers/input/joystick/adc-joystick.c
@@ -222,13 +222,6 @@ static int adc_joystick_probe(struct platform_device *pdev)
if (error)
return error;
- input_set_drvdata(input, joy);
- error = input_register_device(input);
- if (error) {
- dev_err(dev, "Unable to register input device\n");
- return error;
- }
-
joy->buffer = iio_channel_get_all_cb(dev, adc_joystick_handle, joy);
if (IS_ERR(joy->buffer)) {
dev_err(dev, "Unable to allocate callback buffer\n");
@@ -241,6 +234,14 @@ static int adc_joystick_probe(struct platform_device *pdev)
return error;
}
+ input_set_drvdata(input, joy);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "Unable to register input device\n");
+ return error;
+ }
+
return 0;
}
diff --git a/drivers/input/joystick/sensehat-joystick.c b/drivers/input/joystick/sensehat-joystick.c
index 5ad1fe4ff496..a84df39d3b2f 100644
--- a/drivers/input/joystick/sensehat-joystick.c
+++ b/drivers/input/joystick/sensehat-joystick.c
@@ -98,10 +98,8 @@ static int sensehat_joystick_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Could not retrieve interrupt request");
+ if (irq < 0)
return irq;
- }
error = devm_request_threaded_irq(&pdev->dev, irq,
NULL, sensehat_joystick_report,
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4ea79db8f134..a20ee693b22b 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -795,7 +795,7 @@ config KEYBOARD_MT6779
config KEYBOARD_MTK_PMIC
tristate "MediaTek PMIC keys support"
- depends on MFD_MT6397
+ depends on MFD_MT6397 || COMPILE_TEST
help
Say Y here if you want to use the pmic keys (powerkey/homekey).
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 1592da4de336..1a1a05d7cd42 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -8,17 +8,19 @@
* Copyright (C) 2008-2010 Analog Devices Inc.
*/
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/pm.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/i2c.h>
-#include <linux/gpio/driver.h>
+#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/timekeeping.h>
#include <linux/platform_data/adp5588.h>
@@ -36,18 +38,18 @@
* asserted.
*/
#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
+#define WA_DELAYED_READOUT_TIME 25
struct adp5588_kpad {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work work;
+ ktime_t irq_time;
unsigned long delay;
unsigned short keycode[ADP5588_KEYMAPSIZE];
const struct adp5588_gpi_map *gpimap;
unsigned short gpimapsize;
#ifdef CONFIG_GPIOLIB
unsigned char gpiomap[ADP5588_MAXGPIO];
- bool export_gpio;
struct gpio_chip gc;
struct mutex gpio_lock; /* Protect cached dir, dat_out */
u8 dat_out[3];
@@ -179,6 +181,21 @@ static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
return n_unused;
}
+static void adp5588_gpio_do_teardown(void *_kpad)
+{
+ struct adp5588_kpad *kpad = _kpad;
+ struct device *dev = &kpad->client->dev;
+ const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
+ const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
+ int error;
+
+ error = gpio_data->teardown(kpad->client,
+ kpad->gc.base, kpad->gc.ngpio,
+ gpio_data->context);
+ if (error)
+ dev_warn(&kpad->client->dev, "teardown failed %d\n", error);
+}
+
static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
@@ -195,8 +212,6 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
return 0;
}
- kpad->export_gpio = true;
-
kpad->gc.direction_input = adp5588_gpio_direction_input;
kpad->gc.direction_output = adp5588_gpio_direction_output;
kpad->gc.get = adp5588_gpio_get_value;
@@ -210,9 +225,9 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
mutex_init(&kpad->gpio_lock);
- error = gpiochip_add_data(&kpad->gc, kpad);
+ error = devm_gpiochip_add_data(dev, &kpad->gc, kpad);
if (error) {
- dev_err(dev, "gpiochip_add failed, err: %d\n", error);
+ dev_err(dev, "gpiochip_add failed: %d\n", error);
return error;
}
@@ -227,41 +242,24 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->gc.base, kpad->gc.ngpio,
gpio_data->context);
if (error)
- dev_warn(dev, "setup failed, %d\n", error);
+ dev_warn(dev, "setup failed: %d\n", error);
}
- return 0;
-}
-
-static void adp5588_gpio_remove(struct adp5588_kpad *kpad)
-{
- struct device *dev = &kpad->client->dev;
- const struct adp5588_kpad_platform_data *pdata = dev_get_platdata(dev);
- const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
- int error;
-
- if (!kpad->export_gpio)
- return;
-
if (gpio_data->teardown) {
- error = gpio_data->teardown(kpad->client,
- kpad->gc.base, kpad->gc.ngpio,
- gpio_data->context);
+ error = devm_add_action(dev, adp5588_gpio_do_teardown, kpad);
if (error)
- dev_warn(dev, "teardown failed %d\n", error);
+ dev_warn(dev, "failed to schedule teardown: %d\n",
+ error);
}
- gpiochip_remove(&kpad->gc);
+ return 0;
}
+
#else
static inline int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
return 0;
}
-
-static inline void adp5588_gpio_remove(struct adp5588_kpad *kpad)
-{
-}
#endif
static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt)
@@ -289,13 +287,36 @@ static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt)
}
}
-static void adp5588_work(struct work_struct *work)
+static irqreturn_t adp5588_hard_irq(int irq, void *handle)
{
- struct adp5588_kpad *kpad = container_of(work,
- struct adp5588_kpad, work.work);
+ struct adp5588_kpad *kpad = handle;
+
+ kpad->irq_time = ktime_get();
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t adp5588_thread_irq(int irq, void *handle)
+{
+ struct adp5588_kpad *kpad = handle;
struct i2c_client *client = kpad->client;
+ ktime_t target_time, now;
+ unsigned long delay;
int status, ev_cnt;
+ /*
+ * Readout needs to wait for at least 25ms after the notification
+ * for REVID < 4.
+ */
+ if (kpad->delay) {
+ target_time = ktime_add_ms(kpad->irq_time, kpad->delay);
+ now = ktime_get();
+ if (ktime_before(now, target_time)) {
+ delay = ktime_to_us(ktime_sub(target_time, now));
+ usleep_range(delay, delay + 1000);
+ }
+ }
+
status = adp5588_read(client, INT_STAT);
if (status & ADP5588_OVR_FLOW_INT) /* Unlikely and should never happen */
@@ -308,20 +329,8 @@ static void adp5588_work(struct work_struct *work)
input_sync(kpad->input);
}
}
- adp5588_write(client, INT_STAT, status); /* Status is W1C */
-}
-
-static irqreturn_t adp5588_irq(int irq, void *handle)
-{
- struct adp5588_kpad *kpad = handle;
- /*
- * use keventd context to read the event fifo registers
- * Schedule readout at least 25ms after notification for
- * REVID < 4
- */
-
- schedule_delayed_work(&kpad->work, kpad->delay);
+ adp5588_write(client, INT_STAT, status); /* Status is W1C */
return IRQ_HANDLED;
}
@@ -496,30 +505,27 @@ static int adp5588_probe(struct i2c_client *client,
return -EINVAL;
}
- kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
- input = input_allocate_device();
- if (!kpad || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ kpad = devm_kzalloc(&client->dev, sizeof(*kpad), GFP_KERNEL);
+ if (!kpad)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
kpad->client = client;
kpad->input = input;
- INIT_DELAYED_WORK(&kpad->work, adp5588_work);
ret = adp5588_read(client, DEV_ID);
- if (ret < 0) {
- error = ret;
- goto err_free_mem;
- }
+ if (ret < 0)
+ return ret;
revid = (u8) ret & ADP5588_DEVICE_ID_MASK;
if (WA_DELAYED_READOUT_REVID(revid))
- kpad->delay = msecs_to_jiffies(30);
+ kpad->delay = msecs_to_jiffies(WA_DELAYED_READOUT_TIME);
input->name = client->name;
input->phys = "adp5588-keys/input0";
- input->dev.parent = &client->dev;
input_set_drvdata(input, kpad);
@@ -556,95 +562,63 @@ static int adp5588_probe(struct i2c_client *client,
error = input_register_device(input);
if (error) {
- dev_err(&client->dev, "unable to register input device\n");
- goto err_free_mem;
+ dev_err(&client->dev, "unable to register input device: %d\n",
+ error);
+ return error;
}
- error = request_irq(client->irq, adp5588_irq,
- IRQF_TRIGGER_FALLING,
- client->dev.driver->name, kpad);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ adp5588_hard_irq, adp5588_thread_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, kpad);
if (error) {
- dev_err(&client->dev, "irq %d busy?\n", client->irq);
- goto err_unreg_dev;
+ dev_err(&client->dev, "failed to request irq %d: %d\n",
+ client->irq, error);
+ return error;
}
error = adp5588_setup(client);
if (error)
- goto err_free_irq;
+ return error;
if (kpad->gpimapsize)
adp5588_report_switch_state(kpad);
error = adp5588_gpio_add(kpad);
if (error)
- goto err_free_irq;
-
- device_init_wakeup(&client->dev, 1);
- i2c_set_clientdata(client, kpad);
+ return error;
dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
return 0;
-
- err_free_irq:
- free_irq(client->irq, kpad);
- cancel_delayed_work_sync(&kpad->work);
- err_unreg_dev:
- input_unregister_device(input);
- input = NULL;
- err_free_mem:
- input_free_device(input);
- kfree(kpad);
-
- return error;
}
static int adp5588_remove(struct i2c_client *client)
{
- struct adp5588_kpad *kpad = i2c_get_clientdata(client);
-
adp5588_write(client, CFG, 0);
- free_irq(client->irq, kpad);
- cancel_delayed_work_sync(&kpad->work);
- input_unregister_device(kpad->input);
- adp5588_gpio_remove(kpad);
- kfree(kpad);
+ /* all resources will be freed by devm */
return 0;
}
-#ifdef CONFIG_PM
-static int adp5588_suspend(struct device *dev)
+static int __maybe_unused adp5588_suspend(struct device *dev)
{
- struct adp5588_kpad *kpad = dev_get_drvdata(dev);
- struct i2c_client *client = kpad->client;
+ struct i2c_client *client = to_i2c_client(dev);
disable_irq(client->irq);
- cancel_delayed_work_sync(&kpad->work);
-
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(client->irq);
return 0;
}
-static int adp5588_resume(struct device *dev)
+static int __maybe_unused adp5588_resume(struct device *dev)
{
- struct adp5588_kpad *kpad = dev_get_drvdata(dev);
- struct i2c_client *client = kpad->client;
-
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(client->irq);
+ struct i2c_client *client = to_i2c_client(dev);
enable_irq(client->irq);
return 0;
}
-static const struct dev_pm_ops adp5588_dev_pm_ops = {
- .suspend = adp5588_suspend,
- .resume = adp5588_resume,
-};
-#endif
+static SIMPLE_DEV_PM_OPS(adp5588_dev_pm_ops, adp5588_suspend, adp5588_resume);
static const struct i2c_device_id adp5588_id[] = {
{ "adp5588-keys", 0 },
@@ -656,9 +630,7 @@ MODULE_DEVICE_TABLE(i2c, adp5588_id);
static struct i2c_driver adp5588_driver = {
.driver = {
.name = KBUILD_MODNAME,
-#ifdef CONFIG_PM
.pm = &adp5588_dev_pm_ops,
-#endif
},
.probe = adp5588_probe,
.remove = adp5588_remove,
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index cc73a149da28..c14136b733a9 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -12,6 +12,7 @@
// expensive.
#include <linux/module.h>
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/input.h>
@@ -518,6 +519,50 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev,
return 0;
}
+static void cros_ec_keyb_parse_vivaldi_physmap(struct cros_ec_keyb *ckdev)
+{
+ u32 *physmap = ckdev->vdata.function_row_physmap;
+ unsigned int row, col, scancode;
+ int n_physmap;
+ int error;
+ int i;
+
+ n_physmap = device_property_count_u32(ckdev->dev,
+ "function-row-physmap");
+ if (n_physmap <= 0)
+ return;
+
+ if (n_physmap >= VIVALDI_MAX_FUNCTION_ROW_KEYS) {
+ dev_warn(ckdev->dev,
+ "only up to %d top row keys is supported (%d specified)\n",
+ VIVALDI_MAX_FUNCTION_ROW_KEYS, n_physmap);
+ n_physmap = VIVALDI_MAX_FUNCTION_ROW_KEYS;
+ }
+
+ error = device_property_read_u32_array(ckdev->dev,
+ "function-row-physmap",
+ physmap, n_physmap);
+ if (error) {
+ dev_warn(ckdev->dev,
+ "failed to parse function-row-physmap property: %d\n",
+ error);
+ return;
+ }
+
+ /*
+ * Convert (in place) from row/column encoding to matrix "scancode"
+ * used by the driver.
+ */
+ for (i = 0; i < n_physmap; i++) {
+ row = KEY_ROW(physmap[i]);
+ col = KEY_COL(physmap[i]);
+ scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
+ physmap[i] = scancode;
+ }
+
+ ckdev->vdata.num_function_row_keys = n_physmap;
+}
+
/**
* cros_ec_keyb_register_matrix - Register matrix keys
*
@@ -534,11 +579,6 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
struct input_dev *idev;
const char *phys;
int err;
- struct property *prop;
- const __be32 *p;
- u32 *physmap;
- u32 key_pos;
- unsigned int row, col, scancode, n_physmap;
err = matrix_keypad_parse_properties(dev, &ckdev->rows, &ckdev->cols);
if (err)
@@ -573,7 +613,7 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
idev->id.product = 0;
idev->dev.parent = dev;
- ckdev->ghost_filter = of_property_read_bool(dev->of_node,
+ ckdev->ghost_filter = device_property_read_bool(dev,
"google,needs-ghost-filter");
err = matrix_keypad_build_keymap(NULL, NULL, ckdev->rows, ckdev->cols,
@@ -589,22 +629,7 @@ static int cros_ec_keyb_register_matrix(struct cros_ec_keyb *ckdev)
input_set_drvdata(idev, ckdev);
ckdev->idev = idev;
cros_ec_keyb_compute_valid_keys(ckdev);
-
- physmap = ckdev->vdata.function_row_physmap;
- n_physmap = 0;
- of_property_for_each_u32(dev->of_node, "function-row-physmap",
- prop, p, key_pos) {
- if (n_physmap == VIVALDI_MAX_FUNCTION_ROW_KEYS) {
- dev_warn(dev, "Only support up to %d top row keys\n",
- VIVALDI_MAX_FUNCTION_ROW_KEYS);
- break;
- }
- row = KEY_ROW(key_pos);
- col = KEY_COL(key_pos);
- scancode = MATRIX_SCAN_CODE(row, col, ckdev->row_shift);
- physmap[n_physmap++] = scancode;
- }
- ckdev->vdata.num_function_row_keys = n_physmap;
+ cros_ec_keyb_parse_vivaldi_physmap(ckdev);
err = input_register_device(ckdev->idev);
if (err) {
@@ -653,14 +678,19 @@ static const struct attribute_group cros_ec_keyb_attr_group = {
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
- struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_device *ec;
struct device *dev = &pdev->dev;
struct cros_ec_keyb *ckdev;
bool buttons_switches_only = device_get_match_data(dev);
int err;
- if (!dev->of_node)
- return -ENODEV;
+ /*
+ * If the parent ec device has not been probed yet, defer the probe of
+ * this keyboard/button driver until later.
+ */
+ ec = dev_get_drvdata(pdev->dev.parent);
+ if (!ec)
+ return -EPROBE_DEFER;
ckdev = devm_kzalloc(dev, sizeof(*ckdev), GFP_KERNEL);
if (!ckdev)
@@ -713,6 +743,14 @@ static int cros_ec_keyb_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_ec_keyb_acpi_match[] = {
+ { "GOOG0007", true },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_keyb_acpi_match);
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id cros_ec_keyb_of_match[] = {
{ .compatible = "google,cros-ec-keyb" },
@@ -730,6 +768,7 @@ static struct platform_driver cros_ec_keyb_driver = {
.driver = {
.name = "cros-ec-keyb",
.of_match_table = of_match_ptr(cros_ec_keyb_of_match),
+ .acpi_match_table = ACPI_PTR(cros_ec_keyb_acpi_match),
.pm = &cros_ec_keyb_pm_ops,
},
};
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
index 2e7c9187c10f..bf447bf598fb 100644
--- a/drivers/input/keyboard/mt6779-keypad.c
+++ b/drivers/input/keyboard/mt6779-keypad.c
@@ -17,6 +17,11 @@
#define MTK_KPD_DEBOUNCE 0x0018
#define MTK_KPD_DEBOUNCE_MASK GENMASK(13, 0)
#define MTK_KPD_DEBOUNCE_MAX_MS 256
+#define MTK_KPD_SEL 0x0020
+#define MTK_KPD_SEL_COL GENMASK(15, 10)
+#define MTK_KPD_SEL_ROW GENMASK(9, 4)
+#define MTK_KPD_SEL_COLMASK(c) GENMASK((c) + 9, 10)
+#define MTK_KPD_SEL_ROWMASK(r) GENMASK((r) + 3, 4)
#define MTK_KPD_NUM_MEMS 5
#define MTK_KPD_NUM_BITS 136 /* 4*32+8 MEM5 only use 8 BITS */
@@ -42,7 +47,7 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
const unsigned short *keycode = keypad->input_dev->keycode;
DECLARE_BITMAP(new_state, MTK_KPD_NUM_BITS);
DECLARE_BITMAP(change, MTK_KPD_NUM_BITS);
- unsigned int bit_nr;
+ unsigned int bit_nr, key;
unsigned int row, col;
unsigned int scancode;
unsigned int row_shift = get_count_order(keypad->n_cols);
@@ -61,8 +66,10 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
if (bit_nr % 32 >= 16)
continue;
- row = bit_nr / 32;
- col = bit_nr % 32;
+ key = bit_nr / 32 * 16 + bit_nr % 32;
+ row = key / 9;
+ col = key % 9;
+
scancode = MATRIX_SCAN_CODE(row, col, row_shift);
/* 1: not pressed, 0: pressed */
pressed = !test_bit(bit_nr, new_state);
@@ -159,6 +166,11 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
regmap_write(keypad->regmap, MTK_KPD_DEBOUNCE,
(debounce * (1 << 5)) & MTK_KPD_DEBOUNCE_MASK);
+ regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_ROW,
+ MTK_KPD_SEL_ROWMASK(keypad->n_rows));
+ regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_COL,
+ MTK_KPD_SEL_COLMASK(keypad->n_cols));
+
keypad->clk = devm_clk_get(&pdev->dev, "kpd");
if (IS_ERR(keypad->clk))
return PTR_ERR(keypad->clk);
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index c31ab4368388..6404081253ea 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -18,17 +18,9 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define MTK_PMIC_PWRKEY_RST_EN_MASK 0x1
-#define MTK_PMIC_PWRKEY_RST_EN_SHIFT 6
-#define MTK_PMIC_HOMEKEY_RST_EN_MASK 0x1
-#define MTK_PMIC_HOMEKEY_RST_EN_SHIFT 5
-#define MTK_PMIC_RST_DU_MASK 0x3
-#define MTK_PMIC_RST_DU_SHIFT 8
-
-#define MTK_PMIC_PWRKEY_RST \
- (MTK_PMIC_PWRKEY_RST_EN_MASK << MTK_PMIC_PWRKEY_RST_EN_SHIFT)
-#define MTK_PMIC_HOMEKEY_RST \
- (MTK_PMIC_HOMEKEY_RST_EN_MASK << MTK_PMIC_HOMEKEY_RST_EN_SHIFT)
+#define MTK_PMIC_RST_DU_MASK GENMASK(9, 8)
+#define MTK_PMIC_PWRKEY_RST BIT(6)
+#define MTK_PMIC_HOMEKEY_RST BIT(5)
#define MTK_PMIC_PWRKEY_INDEX 0
#define MTK_PMIC_HOMEKEY_INDEX 1
@@ -39,50 +31,58 @@ struct mtk_pmic_keys_regs {
u32 deb_mask;
u32 intsel_reg;
u32 intsel_mask;
+ u32 rst_en_mask;
};
#define MTK_PMIC_KEYS_REGS(_deb_reg, _deb_mask, \
- _intsel_reg, _intsel_mask) \
+ _intsel_reg, _intsel_mask, _rst_mask) \
{ \
.deb_reg = _deb_reg, \
.deb_mask = _deb_mask, \
.intsel_reg = _intsel_reg, \
.intsel_mask = _intsel_mask, \
+ .rst_en_mask = _rst_mask, \
}
struct mtk_pmic_regs {
const struct mtk_pmic_keys_regs keys_regs[MTK_PMIC_MAX_KEY_COUNT];
u32 pmic_rst_reg;
+ u32 rst_lprst_mask; /* Long-press reset timeout bitmask */
};
static const struct mtk_pmic_regs mt6397_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6397_CHRSTATUS,
- 0x8, MT6397_INT_RSV, 0x10),
+ 0x8, MT6397_INT_RSV, 0x10, MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6397_OCSTATUS2,
- 0x10, MT6397_INT_RSV, 0x8),
+ 0x10, MT6397_INT_RSV, 0x8, MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6397_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
static const struct mtk_pmic_regs mt6323_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6323_CHRSTATUS,
- 0x2, MT6323_INT_MISC_CON, 0x10),
+ 0x2, MT6323_INT_MISC_CON, 0x10, MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6323_CHRSTATUS,
- 0x4, MT6323_INT_MISC_CON, 0x8),
+ 0x4, MT6323_INT_MISC_CON, 0x8, MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6323_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
static const struct mtk_pmic_regs mt6358_regs = {
.keys_regs[MTK_PMIC_PWRKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
- 0x2, MT6358_PSC_TOP_INT_CON0, 0x5),
+ 0x2, MT6358_PSC_TOP_INT_CON0, 0x5,
+ MTK_PMIC_PWRKEY_RST),
.keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
MTK_PMIC_KEYS_REGS(MT6358_TOPSTATUS,
- 0x8, MT6358_PSC_TOP_INT_CON0, 0xa),
+ 0x8, MT6358_PSC_TOP_INT_CON0, 0xa,
+ MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6358_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
struct mtk_pmic_keys_info {
@@ -108,53 +108,49 @@ enum mtk_pmic_keys_lp_mode {
};
static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys,
- u32 pmic_rst_reg)
+ const struct mtk_pmic_regs *regs)
{
- int ret;
+ const struct mtk_pmic_keys_regs *kregs_home, *kregs_pwr;
u32 long_press_mode, long_press_debounce;
+ u32 value, mask;
+ int error;
+
+ kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs;
+ kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs;
- ret = of_property_read_u32(keys->dev->of_node,
- "power-off-time-sec", &long_press_debounce);
- if (ret)
+ error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec",
+ &long_press_debounce);
+ if (error)
long_press_debounce = 0;
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_RST_DU_MASK << MTK_PMIC_RST_DU_SHIFT,
- long_press_debounce << MTK_PMIC_RST_DU_SHIFT);
+ mask = regs->rst_lprst_mask;
+ value = long_press_debounce << (ffs(regs->rst_lprst_mask) - 1);
- ret = of_property_read_u32(keys->dev->of_node,
- "mediatek,long-press-mode", &long_press_mode);
- if (ret)
+ error = of_property_read_u32(keys->dev->of_node,
+ "mediatek,long-press-mode",
+ &long_press_mode);
+ if (error)
long_press_mode = LP_DISABLE;
switch (long_press_mode) {
- case LP_ONEKEY:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- MTK_PMIC_PWRKEY_RST);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- 0);
- break;
case LP_TWOKEY:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- MTK_PMIC_PWRKEY_RST);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- MTK_PMIC_HOMEKEY_RST);
- break;
+ value |= kregs_home->rst_en_mask;
+ fallthrough;
+
+ case LP_ONEKEY:
+ value |= kregs_pwr->rst_en_mask;
+ fallthrough;
+
case LP_DISABLE:
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_PWRKEY_RST,
- 0);
- regmap_update_bits(keys->regmap, pmic_rst_reg,
- MTK_PMIC_HOMEKEY_RST,
- 0);
+ mask |= kregs_home->rst_en_mask;
+ mask |= kregs_pwr->rst_en_mask;
break;
+
default:
break;
}
+
+ regmap_update_bits(keys->regmap, regs->pmic_rst_reg, mask, value);
}
static irqreturn_t mtk_pmic_keys_irq_handler_thread(int irq, void *data)
@@ -358,7 +354,7 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
return error;
}
- mtk_pmic_keys_lp_reset_setup(keys, mtk_pmic_regs->pmic_rst_reg);
+ mtk_pmic_keys_lp_reset_setup(keys, mtk_pmic_regs);
platform_set_drvdata(pdev, keys);
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 8a7ce41b8c56..ee9d04a3f0d5 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -179,11 +179,9 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
int error;
u64 keys;
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
return IRQ_NONE;
- }
low = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE31_0);
high = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE63_32);
@@ -207,11 +205,9 @@ static int omap4_keypad_open(struct input_dev *input)
struct device *dev = input->dev.parent;
int error;
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
return error;
- }
disable_irq(keypad_data->irq);
@@ -254,9 +250,10 @@ static void omap4_keypad_close(struct input_dev *input)
struct device *dev = input->dev.parent;
int error;
- error = pm_runtime_get_sync(dev);
- if (error < 0)
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
+ dev_err(dev, "%s: pm_runtime_resume_and_get() failed: %d\n",
+ __func__, error);
disable_irq(keypad_data->irq);
omap4_keypad_stop(keypad_data);
@@ -392,10 +389,9 @@ static int omap4_keypad_probe(struct platform_device *pdev)
* Enable clocks for the keypad module so that we can read
* revision register.
*/
- error = pm_runtime_get_sync(dev);
- if (error < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- pm_runtime_put_noidle(dev);
+ error = pm_runtime_resume_and_get(dev);
+ if (error) {
+ dev_err(dev, "pm_runtime_resume_and_get() failed\n");
return error;
}
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 6b4138771a3f..b2e8097a2e6d 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -40,7 +40,6 @@
#define IQS7222_SLDR_SETUP_2_RES_MASK GENMASK(15, 8)
#define IQS7222_SLDR_SETUP_2_RES_SHIFT 8
#define IQS7222_SLDR_SETUP_2_TOP_SPEED_MASK GENMASK(7, 0)
-#define IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK GENMASK(9, 0)
#define IQS7222_GPIO_SETUP_0_GPIO_EN BIT(0)
@@ -54,6 +53,9 @@
#define IQS7222_SYS_SETUP_ACK_RESET BIT(0)
#define IQS7222_EVENT_MASK_ATI BIT(12)
+#define IQS7222_EVENT_MASK_SLDR BIT(10)
+#define IQS7222_EVENT_MASK_TOUCH BIT(1)
+#define IQS7222_EVENT_MASK_PROX BIT(0)
#define IQS7222_COMMS_HOLD BIT(0)
#define IQS7222_COMMS_ERROR 0xEEEE
@@ -92,11 +94,11 @@ enum iqs7222_reg_key_id {
enum iqs7222_reg_grp_id {
IQS7222_REG_GRP_STAT,
+ IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_CYCLE,
IQS7222_REG_GRP_GLBL,
IQS7222_REG_GRP_BTN,
IQS7222_REG_GRP_CHAN,
- IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_SLDR,
IQS7222_REG_GRP_GPIO,
IQS7222_REG_GRP_SYS,
@@ -135,12 +137,12 @@ struct iqs7222_event_desc {
static const struct iqs7222_event_desc iqs7222_kp_events[] = {
{
.name = "event-prox",
- .enable = BIT(0),
+ .enable = IQS7222_EVENT_MASK_PROX,
.reg_key = IQS7222_REG_KEY_PROX,
},
{
.name = "event-touch",
- .enable = BIT(1),
+ .enable = IQS7222_EVENT_MASK_TOUCH,
.reg_key = IQS7222_REG_KEY_TOUCH,
},
};
@@ -556,13 +558,6 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
.label = "current reference trim",
},
{
- .name = "azoteq,rf-filt-enable",
- .reg_grp = IQS7222_REG_GRP_GLBL,
- .reg_offset = 0,
- .reg_shift = 15,
- .reg_width = 1,
- },
- {
.name = "azoteq,max-counts",
.reg_grp = IQS7222_REG_GRP_GLBL,
.reg_offset = 0,
@@ -1272,9 +1267,22 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
struct i2c_client *client = iqs7222->client;
ktime_t ati_timeout;
u16 sys_status = 0;
- u16 sys_setup = iqs7222->sys_setup[0] & ~IQS7222_SYS_SETUP_ACK_RESET;
+ u16 sys_setup;
int error, i;
+ /*
+ * The reserved fields of the system setup register may have changed
+ * as a result of other registers having been written. As such, read
+ * the register's latest value to avoid unexpected behavior when the
+ * register is written in the loop that follows.
+ */
+ error = iqs7222_read_word(iqs7222, IQS7222_SYS_SETUP, &sys_setup);
+ if (error)
+ return error;
+
+ sys_setup &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
+ sys_setup &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
+
for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
/*
* Trigger ATI from streaming and normal-power modes so that
@@ -1299,12 +1307,15 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
if (error)
return error;
- if (sys_status & IQS7222_SYS_STATUS_ATI_ACTIVE)
- continue;
+ if (sys_status & IQS7222_SYS_STATUS_RESET)
+ return 0;
if (sys_status & IQS7222_SYS_STATUS_ATI_ERROR)
break;
+ if (sys_status & IQS7222_SYS_STATUS_ATI_ACTIVE)
+ continue;
+
/*
* Use stream-in-touch mode if either slider reports
* absolute position.
@@ -1321,7 +1332,7 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
dev_err(&client->dev,
"ATI attempt %d of %d failed with status 0x%02X, %s\n",
i + 1, IQS7222_NUM_RETRIES, (u8)sys_status,
- i < IQS7222_NUM_RETRIES ? "retrying..." : "stopping");
+ i + 1 < IQS7222_NUM_RETRIES ? "retrying" : "stopping");
}
return -ETIMEDOUT;
@@ -1334,6 +1345,34 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
int error, i, j, k;
/*
+ * Acknowledge reset before writing any registers in case the device
+ * suffers a spurious reset during initialization. Because this step
+ * may change the reserved fields of the second filter beta register,
+ * its cache must be updated.
+ *
+ * Writing the second filter beta register, in turn, may clobber the
+ * system status register. As such, the filter beta register pair is
+ * written first to protect against this hazard.
+ */
+ if (dir == WRITE) {
+ u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1;
+ u16 filt_setup;
+
+ error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
+ iqs7222->sys_setup[0] |
+ IQS7222_SYS_SETUP_ACK_RESET);
+ if (error)
+ return error;
+
+ error = iqs7222_read_word(iqs7222, reg, &filt_setup);
+ if (error)
+ return error;
+
+ iqs7222->filt_setup[1] &= GENMASK(7, 0);
+ iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0));
+ }
+
+ /*
* Take advantage of the stop-bit disable function, if available, to
* save the trouble of having to reopen a communication window after
* each burst read or write.
@@ -1957,8 +1996,8 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
int ext_chan = rounddown(num_chan, 10);
int count, error, reg_offset, i;
+ u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset];
u16 *sldr_setup = iqs7222->sldr_setup[sldr_index];
- u16 *sys_setup = iqs7222->sys_setup;
unsigned int chan_sel[4], val;
error = iqs7222_parse_props(iqs7222, &sldr_node, sldr_index,
@@ -2003,7 +2042,7 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
reg_offset = dev_desc->sldr_res < U16_MAX ? 0 : 1;
sldr_setup[0] |= count;
- sldr_setup[3 + reg_offset] &= ~IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK;
+ sldr_setup[3 + reg_offset] &= ~GENMASK(ext_chan - 1, 0);
for (i = 0; i < ARRAY_SIZE(chan_sel); i++) {
sldr_setup[5 + reg_offset + i] = 0;
@@ -2081,17 +2120,19 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
sldr_setup[0] |= dev_desc->wheel_enable;
}
+ /*
+ * The absence of a register offset makes it safe to assume the device
+ * supports gestures, each of which is first disabled until explicitly
+ * enabled.
+ */
+ if (!reg_offset)
+ for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++)
+ sldr_setup[9] &= ~iqs7222_sl_events[i].enable;
+
for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++) {
const char *event_name = iqs7222_sl_events[i].name;
struct fwnode_handle *event_node;
- /*
- * The absence of a register offset means the remaining fields
- * in the group represent gesture settings.
- */
- if (iqs7222_sl_events[i].enable && !reg_offset)
- sldr_setup[9] &= ~iqs7222_sl_events[i].enable;
-
event_node = fwnode_get_named_child_node(sldr_node, event_name);
if (!event_node)
continue;
@@ -2104,6 +2145,22 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
if (error)
return error;
+ /*
+ * The press/release event does not expose a direct GPIO link,
+ * but one can be emulated by tying each of the participating
+ * channels to the same GPIO.
+ */
+ error = iqs7222_gpio_select(iqs7222, event_node,
+ i ? iqs7222_sl_events[i].enable
+ : sldr_setup[3 + reg_offset],
+ i ? 1568 + sldr_index * 30
+ : sldr_setup[4 + reg_offset]);
+ if (error)
+ return error;
+
+ if (!reg_offset)
+ sldr_setup[9] |= iqs7222_sl_events[i].enable;
+
error = fwnode_property_read_u32(event_node, "linux,code",
&val);
if (error) {
@@ -2115,26 +2172,20 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index)
iqs7222->sl_code[sldr_index][i] = val;
input_set_capability(iqs7222->keypad, EV_KEY, val);
- /*
- * The press/release event is determined based on whether the
- * coordinate field reports 0xFFFF and has no explicit enable
- * control.
- */
- if (!iqs7222_sl_events[i].enable || reg_offset)
- continue;
-
- sldr_setup[9] |= iqs7222_sl_events[i].enable;
-
- error = iqs7222_gpio_select(iqs7222, event_node,
- iqs7222_sl_events[i].enable,
- 1568 + sldr_index * 30);
- if (error)
- return error;
-
if (!dev_desc->event_offset)
continue;
- sys_setup[dev_desc->event_offset] |= BIT(10 + sldr_index);
+ /*
+ * The press/release event is determined based on whether the
+ * coordinate field reports 0xFFFF and solely relies on touch
+ * or proximity interrupts to be unmasked.
+ */
+ if (i && !reg_offset)
+ *event_mask |= (IQS7222_EVENT_MASK_SLDR << sldr_index);
+ else if (sldr_setup[4 + reg_offset] == dev_desc->touch_link)
+ *event_mask |= IQS7222_EVENT_MASK_TOUCH;
+ else
+ *event_mask |= IQS7222_EVENT_MASK_PROX;
}
/*
@@ -2227,11 +2278,6 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
return error;
}
- sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
- sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
-
- sys_setup[0] |= IQS7222_SYS_SETUP_ACK_RESET;
-
return iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_SYS,
IQS7222_REG_KEY_NONE);
}
@@ -2299,29 +2345,37 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
input_report_abs(iqs7222->keypad, iqs7222->sl_axis[i],
sldr_pos);
- for (j = 0; j < ARRAY_SIZE(iqs7222_sl_events); j++) {
- u16 mask = iqs7222_sl_events[j].mask;
- u16 val = iqs7222_sl_events[j].val;
+ input_report_key(iqs7222->keypad, iqs7222->sl_code[i][0],
+ sldr_pos < dev_desc->sldr_res);
- if (!iqs7222_sl_events[j].enable) {
- input_report_key(iqs7222->keypad,
- iqs7222->sl_code[i][j],
- sldr_pos < dev_desc->sldr_res);
- continue;
- }
+ /*
+ * A maximum resolution indicates the device does not support
+ * gestures, in which case the remaining fields are ignored.
+ */
+ if (dev_desc->sldr_res == U16_MAX)
+ continue;
- /*
- * The remaining offsets represent gesture state, and
- * are discarded in the case of IQS7222C because only
- * absolute position is reported.
- */
- if (num_stat < IQS7222_MAX_COLS_STAT)
- continue;
+ if (!(le16_to_cpu(status[1]) & IQS7222_EVENT_MASK_SLDR << i))
+ continue;
+
+ /*
+ * Skip the press/release event, as it does not have separate
+ * status fields and is handled separately.
+ */
+ for (j = 1; j < ARRAY_SIZE(iqs7222_sl_events); j++) {
+ u16 mask = iqs7222_sl_events[j].mask;
+ u16 val = iqs7222_sl_events[j].val;
input_report_key(iqs7222->keypad,
iqs7222->sl_code[i][j],
(state & mask) == val);
}
+
+ input_sync(iqs7222->keypad);
+
+ for (j = 1; j < ARRAY_SIZE(iqs7222_sl_events); j++)
+ input_report_key(iqs7222->keypad,
+ iqs7222->sl_code[i][j], 0);
}
input_sync(iqs7222->keypad);
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index 812edfced86e..0caaf3e64215 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -57,7 +57,7 @@ struct pip_app_resp_head {
* The value of data_status can be the first byte of data or
* the command status or the unsupported command code depending on the
* requested command code.
- */
+ */
u8 data_status;
} __packed;
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 23507fce3a2b..18ccbd45004a 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -41,7 +41,7 @@ struct gpio_mouse {
/*
* Timer function which is run every scan_ms ms when the device is opened.
- * The dev input variable is set to the the input_dev pointer.
+ * The dev input variable is set to the input_dev pointer.
*/
static void gpio_mouse_scan(struct input_dev *input)
{
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index a9065c6ab550..da2c67cb8642 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -350,6 +350,10 @@ static int __init gscps2_probe(struct parisc_device *dev)
ps2port->port = serio;
ps2port->padev = dev;
ps2port->addr = ioremap(hpa, GSC_STATUS + 4);
+ if (!ps2port->addr) {
+ ret = -ENOMEM;
+ goto fail_nomem;
+ }
spin_lock_init(&ps2port->lock);
gscps2_reset(ps2port);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 148a7c5fd0e2..4fbec7bbecca 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -67,25 +67,84 @@ static inline void i8042_write_command(int val)
#include <linux/dmi.h>
-static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+#define SERIO_QUIRK_NOKBD BIT(0)
+#define SERIO_QUIRK_NOAUX BIT(1)
+#define SERIO_QUIRK_NOMUX BIT(2)
+#define SERIO_QUIRK_FORCEMUX BIT(3)
+#define SERIO_QUIRK_UNLOCK BIT(4)
+#define SERIO_QUIRK_PROBE_DEFER BIT(5)
+#define SERIO_QUIRK_RESET_ALWAYS BIT(6)
+#define SERIO_QUIRK_RESET_NEVER BIT(7)
+#define SERIO_QUIRK_DIECT BIT(8)
+#define SERIO_QUIRK_DUMBKBD BIT(9)
+#define SERIO_QUIRK_NOLOOP BIT(10)
+#define SERIO_QUIRK_NOTIMEOUT BIT(11)
+#define SERIO_QUIRK_KBDRESET BIT(12)
+#define SERIO_QUIRK_DRITEK BIT(13)
+#define SERIO_QUIRK_NOPNP BIT(14)
+
+/* Quirk table for different mainboards. Options similar or identical to i8042
+ * module parameters.
+ * ORDERING IS IMPORTANT! The first match will be apllied and the rest ignored.
+ * This allows entries to overwrite vendor wide quirks on a per device basis.
+ * Where this is irrelevant, entries are sorted case sensitive by DMI_SYS_VENDOR
+ * and/or DMI_BOARD_VENDOR to make it easier to avoid dublicate entries.
+ */
+static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
{
- /*
- * Arima-Rioworks HDAMB -
- * AUX LOOP command does not raise AUX IRQ
- */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
- DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
- DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* ASUS G1S */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_BOARD_NAME, "G1S"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
+ {
+ /* Asus X450LCP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ /* ASUS ZenBook UX425UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
},
+ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ /* ASUS ZenBook UM325UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
+ },
+ /*
+ * On some Asus laptops, just running self tests cause problems.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_NEVER)
},
{
/* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */
@@ -94,585 +153,689 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"),
DMI_MATCH(DMI_BOARD_VERSION, "REV 2.X"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
+ /* ASUS G1S */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "G1S"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Acer Aspire 5710 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Embedded Box PC 3000 */
+ /* Acer Aspire 7738 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* OQO Model 01 */
+ /* Acer Aspire 5536 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* ULI EV4873 - AUX LOOP does not work properly */
+ /*
+ * Acer Aspire 5738z
+ * Touchpad stops working in mux mode when dis- + re-enabled
+ * with the touchpad enable/disable toggle hotkey
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Microsoft Virtual Machine */
+ /* Acer Aspire One 150 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Medion MAM 2070 */
+ /* Acer Aspire One 532h */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AO532h"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Medion Akoya E7225 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Blue FB5601 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "blue"),
- DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte M912 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte M1022M netbook */
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
- DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte Spring Peak - defines wrong chassis type */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some Wistron based laptops need us to explicitly enable the 'Dritek
+ * keyboard extension' to make their extra keys start generating scancodes.
+ * Originally, this was just confined to older laptops, but a few Acer laptops
+ * have turned up in 2007 that also need this again.
+ */
{
+ /* Acer Aspire 5100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5610 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
+ /* Acer Aspire 5630 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
- { }
-};
-
-/*
- * Some Fujitsu notebooks are having trouble with touchpads if
- * active multiplexing mode is activated. Luckily they don't have
- * external PS/2 ports so we can safely disable it.
- * ... apparently some Toshibas don't like MUX mode either and
- * die horrible death on reboot.
- */
-static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{
- /* Fujitsu Lifebook P7010/P7010D */
+ /* Acer Aspire 5650 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook P7010 */
+ /* Acer Aspire 5680 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook P5020D */
+ /* Acer Aspire 5720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook S2000 */
+ /* Acer Aspire 9110 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook S6230 */
+ /* Acer TravelMate 660 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook T725 laptop */
+ /* Acer TravelMate 2490 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu Lifebook U745 */
+ /* Acer TravelMate 4280 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
},
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
- /* Fujitsu T70H */
+ /* Amoi M636/A737 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu-Siemens Lifebook T3010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Fujitsu-Siemens Lifebook E4010 */
+ /* Compal HEL80I */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Fujitsu-Siemens Amilo Pro 2010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Fujitsu-Siemens Amilo Pro 2030 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * No data is coming from the touchscreen unless KBC
- * is in legacy mode.
- */
- /* Panasonic CF-29 */
+ /* Advent 4211 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
+ DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /*
- * HP Pavilion DV4017EA -
- * errors on MUX ports are reported without raising AUXDATA
- * causing "spurious NAK" messages.
- */
+ /* Dell Embedded Box PC 3000 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /*
- * HP Pavilion ZT1000 -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
- */
+ /* Dell XPS M1530 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * HP Pavilion DV4270ca -
- * like DV4017EA does not raise AUXERR for errors on MUX ports.
- */
+ /* Dell Vostro 1510 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Dell Vostro V13 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
+ /* Dell Vostro 1320 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Dell Vostro 1520 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Dell Vostro 1720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Sharp Actius MM20 */
+ /* Entroware Proteus */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some Fujitsu notebooks are having trouble with touchpads if
+ * active multiplexing mode is activated. Luckily they don't have
+ * external PS/2 ports so we can safely disable it.
+ * ... apparently some Toshibas don't like MUX mode either and
+ * die horrible death on reboot.
+ */
{
- /* Sony Vaio FS-115b */
+ /* Fujitsu Lifebook P7010/P7010D */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * Sony Vaio FZ-240E -
- * reset and GET ID commands issued via KBD port are
- * sometimes being delivered to AUX3.
- */
+ /* Fujitsu Lifebook P5020D */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /*
- * Most (all?) VAIOs do not have external PS/2 ports nor
- * they implement active multiplexing properly, and
- * MUX discovery usually messes up keyboard/touchpad.
- */
+ /* Fujitsu Lifebook S2000 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Amoi M636/A737 */
+ /* Fujitsu Lifebook S6230 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Lenovo 3000 n100 */
+ /* Fujitsu Lifebook T725 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Lenovo XiaoXin Air 12 */
+ /* Fujitsu Lifebook U745 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Fujitsu T70H */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5710 */
+ /* Fujitsu A544 laptop */
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Acer Aspire 7738 */
+ /* Fujitsu AH544 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Gericom Bellagio */
+ /* Fujitsu U574 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* IBM 2656 */
+ /* Fujitsu UH554 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
- DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOTIMEOUT)
},
{
- /* Dell XPS M1530 */
+ /* Fujitsu Lifebook P7010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Compal HEL80I */
+ /* Fujitsu-Siemens Lifebook T3010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro 1510 */
+ /* Fujitsu-Siemens Lifebook E4010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5536 */
+ /* Fujitsu-Siemens Amilo Pro 2010 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro V13 */
+ /* Fujitsu-Siemens Amilo Pro 2030 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Newer HP Pavilion dv4 models */
+ /* Gigabyte M912 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M912"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "01"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Asus X450LCP */
+ /* Gigabyte Spring Peak - defines wrong chassis type */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Avatar AVIU-145A6 */
+ /* Gigabyte T1005 - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
- DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* TUXEDO BU1406 */
+ /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
+ /*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
{
- /* Lenovo LaVie Z */
+ /* Gigabyte P35 v2 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /*
- * Acer Aspire 5738z
- * Touchpad stops working in mux mode when dis- + re-enabled
- * with the touchpad enable/disable toggle hotkey
- */
+ /* Gigabyte P34 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /* Entroware Proteus */
+ /* Gigabyte P57 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
+ },
+ {
+ /* Gericom Bellagio */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /* Gigabyte M1022M netbook */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
- { }
-};
-
-static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
{
/*
- * Sony Vaio VGN-CS series require MUX or the touch sensor
- * buttons will disturb touchpad operation
+ * HP Pavilion DV4017EA -
+ * errors on MUX ports are reported without raising AUXDATA
+ * causing "spurious NAK" messages.
*/
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-
-/*
- * On some Asus laptops, just running self tests cause problems.
- */
-static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
{
+ /*
+ * HP Pavilion ZT1000 -
+ * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
},
- }, {
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /*
+ * HP Pavilion DV4270ca -
+ * like DV4017EA does not raise AUXERR for errors on MUX ports.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
{
- /* MSI Wind U-100 */
+ /* Newer HP Pavilion dv4 models */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_NOTIMEOUT)
},
{
- /* LG Electronics X110 */
+ /* IBM 2656 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "X110"),
- DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire One 150 */
+ /* Avatar AVIU-145A6 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Intel MBO Desktop D845PESV */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
{
+ /*
+ * Intel NUC D54250WYK - does not have i8042 controller but
+ * declares PS/2 devices in DSDT.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
{
+ /* Lenovo 3000 n100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo XiaoXin Air 12 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo LaVie Z */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Lenovo Ideapad U455 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
+ /* Lenovo ThinkPad L460 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Advent 4211 */
+ /* Lenovo ThinkPad Twist S230u */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
+ },
+ {
+ /* LG Electronics X110 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "X110"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
/* Medion Akoya Mini E1210 */
@@ -680,6 +843,7 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E1210"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
/* Medion Akoya E1222 */
@@ -687,331 +851,434 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
{
- /* Mivvy M310 */
+ /* MSI Wind U-100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOPNP)
},
{
- /* Dell Vostro 1320 */
+ /*
+ * No data is coming from the touchscreen unless KBC
+ * is in legacy mode.
+ */
+ /* Panasonic CF-29 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Dell Vostro 1520 */
+ /* Medion Akoya E7225 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Dell Vostro 1720 */
+ /* Microsoft Virtual Machine */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo Ideapad U455 */
+ /* Medion MAM 2070 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo ThinkPad L460 */
+ /* TUXEDO BU1406 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ /* OQO Model 01 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "00"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Lenovo ThinkPad Twist S230u */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
+ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* Entroware Proteus */
+ /* Acer Aspire 5 A515 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-#ifdef CONFIG_PNP
-static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = {
{
- /* Intel MBO Desktop D845PESV */
+ /* ULI EV4873 - AUX LOOP does not work properly */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ULI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
/*
- * Intel NUC D54250WYK - does not have i8042 controller but
- * declares PS/2 devices in DSDT.
+ * Arima-Rioworks HDAMB -
+ * AUX LOOP command does not raise AUX IRQ
*/
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "D54250WYK"),
- DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"),
+ DMI_MATCH(DMI_BOARD_NAME, "HDAMB"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Rev E"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
{
- /* MSI Wind U-100 */
+ /* Sharp Actius MM20 */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "U-100"),
- DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
- /* Acer Aspire 5 A515 */
+ /*
+ * Sony Vaio FZ-240E -
+ * reset and GET ID commands issued via KBD port are
+ * sometimes being delivered to AUX3.
+ */
.matches = {
- DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"),
- DMI_MATCH(DMI_BOARD_VENDOR, "PK"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
+ {
+ /*
+ * Most (all?) VAIOs do not have external PS/2 ports nor
+ * they implement active multiplexing properly, and
+ * MUX discovery usually messes up keyboard/touchpad.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-
-static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
{
+ /* Sony Vaio FS-115b */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /*
+ * Sony Vaio VGN-CS series require MUX or the touch sensor
+ * buttons will disturb touchpad operation
+ */
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
},
+ .driver_data = (void *)(SERIO_QUIRK_FORCEMUX)
},
{
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
.matches = {
- DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
- { }
-};
-#endif
-
-static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
{
- /* Dell Vostro V13 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+ * none of them have an external PS/2 port so this can safely be set for
+ * all of them. These two are based on a Clevo design, but have the
+ * board_name changed.
+ */
{
- /* Newer HP Pavilion dv4 models */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Fujitsu A544 laptop */
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
+ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Fujitsu AH544 laptop */
- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ /* Mivvy M310 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
+ DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
},
+ .driver_data = (void *)(SERIO_QUIRK_RESET_ALWAYS)
},
+ /*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
{
- /* Fujitsu Lifebook T725 laptop */
+ /* Schenker XMG C504 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"),
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
},
+ .driver_data = (void *)(SERIO_QUIRK_KBDRESET)
},
{
- /* Fujitsu U574 laptop */
- /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ /* Blue FB5601 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
+ DMI_MATCH(DMI_SYS_VENDOR, "blue"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "M606"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+ * none of them have an external PS/2 port so this can safely be set for
+ * all of them.
+ * Clevo barebones come with board_vendor and/or system_vendor set to
+ * either the very generic string "Notebook" and/or a different value
+ * for each individual reseller. The only somewhat universal way to
+ * identify them is by board_name.
+ */
{
- /* Fujitsu UH554 laptop */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-/*
- * Some Wistron based laptops need us to explicitly enable the 'Dritek
- * keyboard extension' to make their extra keys start generating scancodes.
- * Originally, this was just confined to older laptops, but a few Acer laptops
- * have turned up in 2007 that also need this again.
- */
-static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
{
- /* Acer Aspire 5100 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5610 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
+ DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5630 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
+ DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5650 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 5680 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ /*
+ * At least one modern Clevo barebone has the touchpad connected both
+ * via PS/2 and i2c interface. This causes a race condition between the
+ * psmouse and i2c-hid driver. Since the full capability of the touchpad
+ * is available via the i2c interface and the device has no external
+ * PS/2 port, it is safe to just ignore all ps2 mouses here to avoid
+ * this issue. The known affected device is the
+ * TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU which comes with one of
+ * the two different dmi strings below. NS50MU is not a typo!
+ */
{
- /* Acer Aspire 5720 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
},
{
- /* Acer Aspire 9110 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
+ DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 660 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
+ DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 2490 */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Acer TravelMate 4280 */
+ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- { }
-};
-
-/*
- * Some laptops need keyboard reset before probing for the trackpad to get
- * it detected, initialised & finally work.
- */
-static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
{
- /* Gigabyte P35 v2 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
- {
- /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ {
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Gigabyte P34 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Gigabyte P57 - Elantech touchpad */
+ /*
+ * This is only a partial board_name and might be followed by
+ * another letter or number. DMI_MATCH however does do partial
+ * matching.
+ */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
- /* Schenker XMG C504 - Elantech touchpad */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
},
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{ }
};
-static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
+#ifdef CONFIG_PNP
+static const struct dmi_system_id i8042_dmi_laptop_table[] __initconst = {
{
- /* ASUS ZenBook UX425UA */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
},
},
{
- /* ASUS ZenBook UM325UA */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
},
},
{ }
};
+#endif
#endif /* CONFIG_X86 */
@@ -1167,11 +1434,6 @@ static int __init i8042_pnp_init(void)
bool pnp_data_busted = false;
int err;
-#ifdef CONFIG_X86
- if (dmi_check_system(i8042_dmi_nopnp_table))
- i8042_nopnp = true;
-#endif
-
if (i8042_nopnp) {
pr_info("PNP detection disabled\n");
return 0;
@@ -1275,6 +1537,59 @@ static inline int i8042_pnp_init(void) { return 0; }
static inline void i8042_pnp_exit(void) { }
#endif /* CONFIG_PNP */
+
+#ifdef CONFIG_X86
+static void __init i8042_check_quirks(void)
+{
+ const struct dmi_system_id *device_quirk_info;
+ uintptr_t quirks;
+
+ device_quirk_info = dmi_first_match(i8042_dmi_quirk_table);
+ if (!device_quirk_info)
+ return;
+
+ quirks = (uintptr_t)device_quirk_info->driver_data;
+
+ if (quirks & SERIO_QUIRK_NOKBD)
+ i8042_nokbd = true;
+ if (quirks & SERIO_QUIRK_NOAUX)
+ i8042_noaux = true;
+ if (quirks & SERIO_QUIRK_NOMUX)
+ i8042_nomux = true;
+ if (quirks & SERIO_QUIRK_FORCEMUX)
+ i8042_nomux = false;
+ if (quirks & SERIO_QUIRK_UNLOCK)
+ i8042_unlock = true;
+ if (quirks & SERIO_QUIRK_PROBE_DEFER)
+ i8042_probe_defer = true;
+ /* Honor module parameter when value is not default */
+ if (i8042_reset == I8042_RESET_DEFAULT) {
+ if (quirks & SERIO_QUIRK_RESET_ALWAYS)
+ i8042_reset = I8042_RESET_ALWAYS;
+ if (quirks & SERIO_QUIRK_RESET_NEVER)
+ i8042_reset = I8042_RESET_NEVER;
+ }
+ if (quirks & SERIO_QUIRK_DIECT)
+ i8042_direct = true;
+ if (quirks & SERIO_QUIRK_DUMBKBD)
+ i8042_dumbkbd = true;
+ if (quirks & SERIO_QUIRK_NOLOOP)
+ i8042_noloop = true;
+ if (quirks & SERIO_QUIRK_NOTIMEOUT)
+ i8042_notimeout = true;
+ if (quirks & SERIO_QUIRK_KBDRESET)
+ i8042_kbdreset = true;
+ if (quirks & SERIO_QUIRK_DRITEK)
+ i8042_dritek = true;
+#ifdef CONFIG_PNP
+ if (quirks & SERIO_QUIRK_NOPNP)
+ i8042_nopnp = true;
+#endif
+}
+#else
+static inline void i8042_check_quirks(void) {}
+#endif
+
static int __init i8042_platform_init(void)
{
int retval;
@@ -1297,45 +1612,42 @@ static int __init i8042_platform_init(void)
i8042_kbd_irq = I8042_MAP_IRQ(1);
i8042_aux_irq = I8042_MAP_IRQ(12);
- retval = i8042_pnp_init();
- if (retval)
- return retval;
-
#if defined(__ia64__)
- i8042_reset = I8042_RESET_ALWAYS;
+ i8042_reset = I8042_RESET_ALWAYS;
#endif
+ i8042_check_quirks();
+
+ pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ i8042_nokbd ? " nokbd" : "",
+ i8042_noaux ? " noaux" : "",
+ i8042_nomux ? " nomux" : "",
+ i8042_unlock ? " unlock" : "",
+ i8042_probe_defer ? "probe_defer" : "",
+ i8042_reset == I8042_RESET_DEFAULT ?
+ "" : i8042_reset == I8042_RESET_ALWAYS ?
+ " reset_always" : " reset_never",
+ i8042_direct ? " direct" : "",
+ i8042_dumbkbd ? " dumbkbd" : "",
+ i8042_noloop ? " noloop" : "",
+ i8042_notimeout ? " notimeout" : "",
+ i8042_kbdreset ? " kbdreset" : "",
#ifdef CONFIG_X86
- /* Honor module parameter when value is not default */
- if (i8042_reset == I8042_RESET_DEFAULT) {
- if (dmi_check_system(i8042_dmi_reset_table))
- i8042_reset = I8042_RESET_ALWAYS;
-
- if (dmi_check_system(i8042_dmi_noselftest_table))
- i8042_reset = I8042_RESET_NEVER;
- }
-
- if (dmi_check_system(i8042_dmi_noloop_table))
- i8042_noloop = true;
-
- if (dmi_check_system(i8042_dmi_nomux_table))
- i8042_nomux = true;
-
- if (dmi_check_system(i8042_dmi_forcemux_table))
- i8042_nomux = false;
-
- if (dmi_check_system(i8042_dmi_notimeout_table))
- i8042_notimeout = true;
-
- if (dmi_check_system(i8042_dmi_dritek_table))
- i8042_dritek = true;
-
- if (dmi_check_system(i8042_dmi_kbdreset_table))
- i8042_kbdreset = true;
+ i8042_dritek ? " dritek" : "",
+#else
+ "",
+#endif
+#ifdef CONFIG_PNP
+ i8042_nopnp ? " nopnp" : "");
+#else
+ "");
+#endif
- if (dmi_check_system(i8042_dmi_probe_defer_table))
- i8042_probe_defer = true;
+ retval = i8042_pnp_init();
+ if (retval)
+ return retval;
+#ifdef CONFIG_X86
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index bb2e1cbffba7..82beddb28761 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/ratelimit.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -47,6 +48,8 @@
#define M09_REGISTER_NUM_X 0x94
#define M09_REGISTER_NUM_Y 0x95
+#define M12_REGISTER_REPORT_RATE 0x88
+
#define EV_REGISTER_THRESHOLD 0x40
#define EV_REGISTER_GAIN 0x41
#define EV_REGISTER_OFFSET_Y 0x45
@@ -127,9 +130,12 @@ struct edt_ft5x06_ts_data {
int max_support_points;
char name[EDT_NAME_LEN];
+ char fw_version[EDT_NAME_LEN];
struct edt_reg_addr reg_addr;
enum edt_ver version;
+ unsigned int crc_errors;
+ unsigned int header_errors;
};
struct edt_i2c_chip_data {
@@ -178,6 +184,7 @@ static bool edt_ft5x06_ts_check_crc(struct edt_ft5x06_ts_data *tsdata,
crc ^= buf[i];
if (crc != buf[buflen-1]) {
+ tsdata->crc_errors++;
dev_err_ratelimited(&tsdata->client->dev,
"crc error: 0x%02x expected, got 0x%02x\n",
crc, buf[buflen-1]);
@@ -235,6 +242,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
if (tsdata->version == EDT_M06) {
if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa ||
rdbuf[2] != datalen) {
+ tsdata->header_errors++;
dev_err_ratelimited(dev,
"Unexpected header: %02x%02x%02x!\n",
rdbuf[0], rdbuf[1], rdbuf[2]);
@@ -523,9 +531,55 @@ static EDT_ATTR(offset_y, S_IWUSR | S_IRUGO, NO_REGISTER, NO_REGISTER,
/* m06: range 20 to 80, m09: range 0 to 30, m12: range 1 to 255... */
static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
M09_REGISTER_THRESHOLD, EV_REGISTER_THRESHOLD, 0, 255);
-/* m06: range 3 to 14, m12: (0x64: 100Hz) */
+/* m06: range 3 to 14, m12: range 1 to 255 */
static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
- NO_REGISTER, NO_REGISTER, 0, 255);
+ M12_REGISTER_REPORT_RATE, NO_REGISTER, 0, 255);
+
+static ssize_t model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%s\n", tsdata->name);
+}
+
+static DEVICE_ATTR_RO(model);
+
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%s\n", tsdata->fw_version);
+}
+
+static DEVICE_ATTR_RO(fw_version);
+
+/* m06 only */
+static ssize_t header_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%d\n", tsdata->header_errors);
+}
+
+static DEVICE_ATTR_RO(header_errors);
+
+/* m06 only */
+static ssize_t crc_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ return sysfs_emit(buf, "%d\n", tsdata->crc_errors);
+}
+
+static DEVICE_ATTR_RO(crc_errors);
static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_gain.dattr.attr,
@@ -534,6 +588,10 @@ static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_offset_y.dattr.attr,
&edt_ft5x06_attr_threshold.dattr.attr,
&edt_ft5x06_attr_report_rate.dattr.attr,
+ &dev_attr_model.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_header_errors.attr,
+ &dev_attr_crc_errors.attr,
NULL
};
@@ -820,13 +878,13 @@ static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
#endif /* CONFIG_DEBUGFS */
static int edt_ft5x06_ts_identify(struct i2c_client *client,
- struct edt_ft5x06_ts_data *tsdata,
- char *fw_version)
+ struct edt_ft5x06_ts_data *tsdata)
{
u8 rdbuf[EDT_NAME_LEN];
char *p;
int error;
char *model_name = tsdata->name;
+ char *fw_version = tsdata->fw_version;
/* see what we find if we assume it is a M06 *
* if we get less than EDT_NAME_LEN, we don't want
@@ -1030,7 +1088,8 @@ static void edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
case EDT_M09:
case EDT_M12:
reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
- reg_addr->reg_report_rate = NO_REGISTER;
+ reg_addr->reg_report_rate = tsdata->version == EDT_M12 ?
+ M12_REGISTER_REPORT_RATE : NO_REGISTER;
reg_addr->reg_gain = M09_REGISTER_GAIN;
reg_addr->reg_offset = M09_REGISTER_OFFSET;
reg_addr->reg_offset_x = NO_REGISTER;
@@ -1081,7 +1140,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
struct input_dev *input;
unsigned long irq_flags;
int error;
- char fw_version[EDT_NAME_LEN];
+ u32 report_rate;
dev_dbg(&client->dev, "probing for EDT FT5x06 I2C\n");
@@ -1194,7 +1253,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
tsdata->input = input;
tsdata->factory_mode = false;
- error = edt_ft5x06_ts_identify(client, tsdata, fw_version);
+ error = edt_ft5x06_ts_identify(client, tsdata);
if (error) {
dev_err(&client->dev, "touchscreen probe failed\n");
return error;
@@ -1210,9 +1269,30 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
edt_ft5x06_ts_get_parameters(tsdata);
+ if (tsdata->reg_addr.reg_report_rate != NO_REGISTER &&
+ !device_property_read_u32(&client->dev,
+ "report-rate-hz", &report_rate)) {
+ if (tsdata->version == EDT_M06)
+ tsdata->report_rate = clamp_val(report_rate, 30, 140);
+ else
+ tsdata->report_rate = clamp_val(report_rate, 1, 255);
+
+ if (report_rate != tsdata->report_rate)
+ dev_warn(&client->dev,
+ "report-rate %dHz is unsupported, use %dHz\n",
+ report_rate, tsdata->report_rate);
+
+ if (tsdata->version == EDT_M06)
+ tsdata->report_rate /= 10;
+
+ edt_ft5x06_register_write(tsdata,
+ tsdata->reg_addr.reg_report_rate,
+ tsdata->report_rate);
+ }
+
dev_dbg(&client->dev,
"Model \"%s\", Rev. \"%s\", %dx%d sensors\n",
- tsdata->name, fw_version, tsdata->num_x, tsdata->num_y);
+ tsdata->name, tsdata->fw_version, tsdata->num_x, tsdata->num_y);
input->name = tsdata->name;
input->id.bustype = BUS_I2C;
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index cbe0dd412912..4b7eee01c6aa 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -220,6 +220,7 @@ static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
{
u8 buf[EXC3000_LEN_VENDOR_REQUEST] = { 0x67, 0x00, 0x42, 0x00, 0x03 };
int ret;
+ unsigned long time_left;
mutex_lock(&data->query_lock);
@@ -233,9 +234,9 @@ static int exc3000_vendor_data_request(struct exc3000_data *data, u8 *request,
goto out_unlock;
if (response) {
- ret = wait_for_completion_timeout(&data->wait_event,
- timeout * HZ);
- if (ret <= 0) {
+ time_left = wait_for_completion_timeout(&data->wait_event,
+ timeout * HZ);
+ if (time_left == 0) {
ret = -ETIMEDOUT;
goto out_unlock;
}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index aa45a9fee6a0..d016505fc081 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -822,22 +822,16 @@ static int goodix_resource(struct acpi_resource *ares, void *data)
struct device *dev = &ts->client->dev;
struct acpi_resource_gpio *gpio;
- switch (ares->type) {
- case ACPI_RESOURCE_TYPE_GPIO:
- gpio = &ares->data.gpio;
- if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) {
- if (ts->gpio_int_idx == -1) {
- ts->gpio_int_idx = ts->gpio_count;
- } else {
- dev_err(dev, "More then one GpioInt resource, ignoring ACPI GPIO resources\n");
- ts->gpio_int_idx = -2;
- }
+ if (acpi_gpio_get_irq_resource(ares, &gpio)) {
+ if (ts->gpio_int_idx == -1) {
+ ts->gpio_int_idx = ts->gpio_count;
+ } else {
+ dev_err(dev, "More then one GpioInt resource, ignoring ACPI GPIO resources\n");
+ ts->gpio_int_idx = -2;
}
ts->gpio_count++;
- break;
- default:
- break;
- }
+ } else if (acpi_gpio_get_io_resource(ares, &gpio))
+ ts->gpio_count++;
return 0;
}
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 8bd03278ad9a..52f9e9eaab14 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -15,75 +15,75 @@
/* Register Map */
-#define BT541_SWRESET_CMD 0x0000
-#define BT541_WAKEUP_CMD 0x0001
+#define ZINITIX_SWRESET_CMD 0x0000
+#define ZINITIX_WAKEUP_CMD 0x0001
-#define BT541_IDLE_CMD 0x0004
-#define BT541_SLEEP_CMD 0x0005
+#define ZINITIX_IDLE_CMD 0x0004
+#define ZINITIX_SLEEP_CMD 0x0005
-#define BT541_CLEAR_INT_STATUS_CMD 0x0003
-#define BT541_CALIBRATE_CMD 0x0006
-#define BT541_SAVE_STATUS_CMD 0x0007
-#define BT541_SAVE_CALIBRATION_CMD 0x0008
-#define BT541_RECALL_FACTORY_CMD 0x000f
+#define ZINITIX_CLEAR_INT_STATUS_CMD 0x0003
+#define ZINITIX_CALIBRATE_CMD 0x0006
+#define ZINITIX_SAVE_STATUS_CMD 0x0007
+#define ZINITIX_SAVE_CALIBRATION_CMD 0x0008
+#define ZINITIX_RECALL_FACTORY_CMD 0x000f
-#define BT541_THRESHOLD 0x0020
+#define ZINITIX_THRESHOLD 0x0020
-#define BT541_LARGE_PALM_REJECT_AREA_TH 0x003F
+#define ZINITIX_LARGE_PALM_REJECT_AREA_TH 0x003F
-#define BT541_DEBUG_REG 0x0115 /* 0~7 */
+#define ZINITIX_DEBUG_REG 0x0115 /* 0~7 */
-#define BT541_TOUCH_MODE 0x0010
-#define BT541_CHIP_REVISION 0x0011
-#define BT541_FIRMWARE_VERSION 0x0012
+#define ZINITIX_TOUCH_MODE 0x0010
+#define ZINITIX_CHIP_REVISION 0x0011
+#define ZINITIX_FIRMWARE_VERSION 0x0012
#define ZINITIX_USB_DETECT 0x116
-#define BT541_MINOR_FW_VERSION 0x0121
+#define ZINITIX_MINOR_FW_VERSION 0x0121
-#define BT541_VENDOR_ID 0x001C
-#define BT541_HW_ID 0x0014
+#define ZINITIX_VENDOR_ID 0x001C
+#define ZINITIX_HW_ID 0x0014
-#define BT541_DATA_VERSION_REG 0x0013
-#define BT541_SUPPORTED_FINGER_NUM 0x0015
-#define BT541_EEPROM_INFO 0x0018
-#define BT541_INITIAL_TOUCH_MODE 0x0019
+#define ZINITIX_DATA_VERSION_REG 0x0013
+#define ZINITIX_SUPPORTED_FINGER_NUM 0x0015
+#define ZINITIX_EEPROM_INFO 0x0018
+#define ZINITIX_INITIAL_TOUCH_MODE 0x0019
-#define BT541_TOTAL_NUMBER_OF_X 0x0060
-#define BT541_TOTAL_NUMBER_OF_Y 0x0061
+#define ZINITIX_TOTAL_NUMBER_OF_X 0x0060
+#define ZINITIX_TOTAL_NUMBER_OF_Y 0x0061
-#define BT541_DELAY_RAW_FOR_HOST 0x007f
+#define ZINITIX_DELAY_RAW_FOR_HOST 0x007f
-#define BT541_BUTTON_SUPPORTED_NUM 0x00B0
-#define BT541_BUTTON_SENSITIVITY 0x00B2
-#define BT541_DUMMY_BUTTON_SENSITIVITY 0X00C8
+#define ZINITIX_BUTTON_SUPPORTED_NUM 0x00B0
+#define ZINITIX_BUTTON_SENSITIVITY 0x00B2
+#define ZINITIX_DUMMY_BUTTON_SENSITIVITY 0X00C8
-#define BT541_X_RESOLUTION 0x00C0
-#define BT541_Y_RESOLUTION 0x00C1
+#define ZINITIX_X_RESOLUTION 0x00C0
+#define ZINITIX_Y_RESOLUTION 0x00C1
-#define BT541_POINT_STATUS_REG 0x0080
-#define BT541_ICON_STATUS_REG 0x00AA
+#define ZINITIX_POINT_STATUS_REG 0x0080
+#define ZINITIX_ICON_STATUS_REG 0x00AA
-#define BT541_POINT_COORD_REG (BT541_POINT_STATUS_REG + 2)
+#define ZINITIX_POINT_COORD_REG (ZINITIX_POINT_STATUS_REG + 2)
-#define BT541_AFE_FREQUENCY 0x0100
-#define BT541_DND_N_COUNT 0x0122
-#define BT541_DND_U_COUNT 0x0135
+#define ZINITIX_AFE_FREQUENCY 0x0100
+#define ZINITIX_DND_N_COUNT 0x0122
+#define ZINITIX_DND_U_COUNT 0x0135
-#define BT541_RAWDATA_REG 0x0200
+#define ZINITIX_RAWDATA_REG 0x0200
-#define BT541_EEPROM_INFO_REG 0x0018
+#define ZINITIX_EEPROM_INFO_REG 0x0018
-#define BT541_INT_ENABLE_FLAG 0x00f0
-#define BT541_PERIODICAL_INTERRUPT_INTERVAL 0x00f1
+#define ZINITIX_INT_ENABLE_FLAG 0x00f0
+#define ZINITIX_PERIODICAL_INTERRUPT_INTERVAL 0x00f1
-#define BT541_BTN_WIDTH 0x016d
+#define ZINITIX_BTN_WIDTH 0x016d
-#define BT541_CHECKSUM_RESULT 0x012c
+#define ZINITIX_CHECKSUM_RESULT 0x012c
-#define BT541_INIT_FLASH 0x01d0
-#define BT541_WRITE_FLASH 0x01d1
-#define BT541_READ_FLASH 0x01d2
+#define ZINITIX_INIT_FLASH 0x01d0
+#define ZINITIX_WRITE_FLASH 0x01d1
+#define ZINITIX_READ_FLASH 0x01d2
#define ZINITIX_INTERNAL_FLAG_02 0x011e
#define ZINITIX_INTERNAL_FLAG_03 0x011f
@@ -196,13 +196,13 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
int i;
int error;
- error = zinitix_write_cmd(client, BT541_SWRESET_CMD);
+ error = zinitix_write_cmd(client, ZINITIX_SWRESET_CMD);
if (error) {
dev_err(&client->dev, "Failed to write reset command\n");
return error;
}
- error = zinitix_write_u16(client, BT541_INT_ENABLE_FLAG, 0x0);
+ error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, 0x0);
if (error) {
dev_err(&client->dev,
"Failed to reset interrupt enable flag\n");
@@ -210,32 +210,32 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
}
/* initialize */
- error = zinitix_write_u16(client, BT541_X_RESOLUTION,
+ error = zinitix_write_u16(client, ZINITIX_X_RESOLUTION,
bt541->prop.max_x);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_Y_RESOLUTION,
+ error = zinitix_write_u16(client, ZINITIX_Y_RESOLUTION,
bt541->prop.max_y);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_SUPPORTED_FINGER_NUM,
+ error = zinitix_write_u16(client, ZINITIX_SUPPORTED_FINGER_NUM,
MAX_SUPPORTED_FINGER_NUM);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_INITIAL_TOUCH_MODE,
+ error = zinitix_write_u16(client, ZINITIX_INITIAL_TOUCH_MODE,
bt541->zinitix_mode);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_TOUCH_MODE,
+ error = zinitix_write_u16(client, ZINITIX_TOUCH_MODE,
bt541->zinitix_mode);
if (error)
return error;
- error = zinitix_write_u16(client, BT541_INT_ENABLE_FLAG,
+ error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG,
BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE |
BIT_UP);
if (error)
@@ -243,7 +243,7 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
/* clear queue */
for (i = 0; i < 10; i++) {
- zinitix_write_cmd(client, BT541_CLEAR_INT_STATUS_CMD);
+ zinitix_write_cmd(client, ZINITIX_CLEAR_INT_STATUS_CMD);
udelay(10);
}
@@ -361,7 +361,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
memset(&touch_event, 0, sizeof(struct touch_event));
- error = zinitix_read_data(bt541->client, BT541_POINT_STATUS_REG,
+ error = zinitix_read_data(bt541->client, ZINITIX_POINT_STATUS_REG,
&touch_event, sizeof(struct touch_event));
if (error) {
dev_err(&client->dev, "Failed to read in touchpoint struct\n");
@@ -381,7 +381,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
input_sync(bt541->input_dev);
out:
- zinitix_write_cmd(bt541->client, BT541_CLEAR_INT_STATUS_CMD);
+ zinitix_write_cmd(bt541->client, ZINITIX_CLEAR_INT_STATUS_CMD);
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c79a0df090c0..5c5cb5bee8b6 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -363,6 +363,16 @@ config ARM_SMMU_QCOM
When running on a Qualcomm platform that has the custom variant
of the ARM SMMU, this needs to be built into the SMMU driver.
+config ARM_SMMU_QCOM_DEBUG
+ bool "ARM SMMU QCOM implementation defined debug support"
+ depends on ARM_SMMU_QCOM
+ help
+ Support for implementation specific debug features in ARM SMMU
+ hardware found in QTI platforms.
+
+ Say Y here to enable debug for issues such as TLB sync timeouts
+ which requires implementation defined register dumps.
+
config ARM_SMMU_V3
tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 1ab31074f5b3..84e5bb1bf01b 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -13,12 +13,13 @@
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
-extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern int amd_iommu_init_api(void);
+extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
@@ -114,10 +115,17 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
amd_iommu_domain_set_pt_root(domain, 0);
}
+static inline int get_pci_sbdf_id(struct pci_dev *pdev)
+{
+ int seg = pci_domain_nr(pdev->bus);
+ u16 devid = pci_dev_id(pdev);
+
+ return PCI_SEG_DEVID_TO_SBDF(seg, devid);
+}
extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct device *dev);
-extern int __init add_special_device(u8 type, u8 id, u16 *devid,
+extern int __init add_special_device(u8 type, u8 id, u32 *devid,
bool cmd_line);
#ifdef CONFIG_DMI
@@ -128,4 +136,10 @@ static inline void amd_iommu_apply_ivrs_quirks(void) { }
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
+extern struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
+
+extern u64 amd_iommu_efr;
+extern u64 amd_iommu_efr2;
+
+extern bool amd_iommu_snp_en;
#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 72d0f5e2f651..5b1019dab328 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -67,6 +67,7 @@
#define MMIO_INTCAPXT_EVT_OFFSET 0x0170
#define MMIO_INTCAPXT_PPR_OFFSET 0x0178
#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180
+#define MMIO_EXT_FEATURES2 0x01A0
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
@@ -102,6 +103,12 @@
#define FEATURE_GLXVAL_SHIFT 14
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
+/* Extended Feature 2 Bits */
+#define FEATURE_SNPAVICSUP_SHIFT 5
+#define FEATURE_SNPAVICSUP_MASK (0x07ULL << FEATURE_SNPAVICSUP_SHIFT)
+#define FEATURE_SNPAVICSUP_GAM(x) \
+ ((x & FEATURE_SNPAVICSUP_MASK) >> FEATURE_SNPAVICSUP_SHIFT == 0x1)
+
/* Note:
* The current driver only support 16-bit PASID.
* Currently, hardware only implement upto 16-bit PASID
@@ -143,27 +150,28 @@
#define EVENT_FLAG_I 0x008
/* feature control bits */
-#define CONTROL_IOMMU_EN 0x00ULL
-#define CONTROL_HT_TUN_EN 0x01ULL
-#define CONTROL_EVT_LOG_EN 0x02ULL
-#define CONTROL_EVT_INT_EN 0x03ULL
-#define CONTROL_COMWAIT_EN 0x04ULL
-#define CONTROL_INV_TIMEOUT 0x05ULL
-#define CONTROL_PASSPW_EN 0x08ULL
-#define CONTROL_RESPASSPW_EN 0x09ULL
-#define CONTROL_COHERENT_EN 0x0aULL
-#define CONTROL_ISOC_EN 0x0bULL
-#define CONTROL_CMDBUF_EN 0x0cULL
-#define CONTROL_PPRLOG_EN 0x0dULL
-#define CONTROL_PPRINT_EN 0x0eULL
-#define CONTROL_PPR_EN 0x0fULL
-#define CONTROL_GT_EN 0x10ULL
-#define CONTROL_GA_EN 0x11ULL
-#define CONTROL_GAM_EN 0x19ULL
-#define CONTROL_GALOG_EN 0x1CULL
-#define CONTROL_GAINT_EN 0x1DULL
-#define CONTROL_XT_EN 0x32ULL
-#define CONTROL_INTCAPXT_EN 0x33ULL
+#define CONTROL_IOMMU_EN 0
+#define CONTROL_HT_TUN_EN 1
+#define CONTROL_EVT_LOG_EN 2
+#define CONTROL_EVT_INT_EN 3
+#define CONTROL_COMWAIT_EN 4
+#define CONTROL_INV_TIMEOUT 5
+#define CONTROL_PASSPW_EN 8
+#define CONTROL_RESPASSPW_EN 9
+#define CONTROL_COHERENT_EN 10
+#define CONTROL_ISOC_EN 11
+#define CONTROL_CMDBUF_EN 12
+#define CONTROL_PPRLOG_EN 13
+#define CONTROL_PPRINT_EN 14
+#define CONTROL_PPR_EN 15
+#define CONTROL_GT_EN 16
+#define CONTROL_GA_EN 17
+#define CONTROL_GAM_EN 25
+#define CONTROL_GALOG_EN 28
+#define CONTROL_GAINT_EN 29
+#define CONTROL_XT_EN 50
+#define CONTROL_INTCAPXT_EN 51
+#define CONTROL_SNPAVIC_EN 61
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE 0
@@ -445,8 +453,6 @@ struct irq_remap_table {
u32 *table;
};
-extern struct irq_remap_table **irq_lookup_table;
-
/* Interrupt remapping feature used? */
extern bool amd_iommu_irq_remap;
@@ -456,6 +462,16 @@ extern bool amdr_ivrs_remap_support;
/* kmem_cache to get tables with 128 byte alignement */
extern struct kmem_cache *amd_iommu_irq_cache;
+#define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff)
+#define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff)
+#define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \
+ ((devid) & 0xffff))
+
+/* Make iterating over all pci segment easier */
+#define for_each_pci_segment(pci_seg) \
+ list_for_each_entry((pci_seg), &amd_iommu_pci_seg_list, list)
+#define for_each_pci_segment_safe(pci_seg, next) \
+ list_for_each_entry_safe((pci_seg), (next), &amd_iommu_pci_seg_list, list)
/*
* Make iterating over all IOMMUs easier
*/
@@ -478,13 +494,14 @@ extern struct kmem_cache *amd_iommu_irq_cache;
struct amd_iommu_fault {
u64 address; /* IO virtual address of the fault*/
u32 pasid; /* Address space identifier */
- u16 device_id; /* Originating PCI device id */
+ u32 sbdf; /* Originating PCI device id */
u16 tag; /* PPR tag */
u16 flags; /* Fault flags */
};
+struct amd_iommu;
struct iommu_domain;
struct irq_domain;
struct amd_irte_ops;
@@ -531,6 +548,75 @@ struct protection_domain {
};
/*
+ * This structure contains information about one PCI segment in the system.
+ */
+struct amd_iommu_pci_seg {
+ /* List with all PCI segments in the system */
+ struct list_head list;
+
+ /* List of all available dev_data structures */
+ struct llist_head dev_data_list;
+
+ /* PCI segment number */
+ u16 id;
+
+ /* Largest PCI device id we expect translation requests for */
+ u16 last_bdf;
+
+ /* Size of the device table */
+ u32 dev_table_size;
+
+ /* Size of the alias table */
+ u32 alias_table_size;
+
+ /* Size of the rlookup table */
+ u32 rlookup_table_size;
+
+ /*
+ * device table virtual address
+ *
+ * Pointer to the per PCI segment device table.
+ * It is indexed by the PCI device id or the HT unit id and contains
+ * information about the domain the device belongs to as well as the
+ * page table root pointer.
+ */
+ struct dev_table_entry *dev_table;
+
+ /*
+ * The rlookup iommu table is used to find the IOMMU which is
+ * responsible for a specific device. It is indexed by the PCI
+ * device id.
+ */
+ struct amd_iommu **rlookup_table;
+
+ /*
+ * This table is used to find the irq remapping table for a given
+ * device id quickly.
+ */
+ struct irq_remap_table **irq_lookup_table;
+
+ /*
+ * Pointer to a device table which the content of old device table
+ * will be copied to. It's only be used in kdump kernel.
+ */
+ struct dev_table_entry *old_dev_tbl_cpy;
+
+ /*
+ * The alias table is a driver specific data structure which contains the
+ * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
+ * More than one device can share the same requestor id.
+ */
+ u16 *alias_table;
+
+ /*
+ * A list of required unity mappings we find in ACPI. It is not locked
+ * because as runtime it is only read. It is created at ACPI table
+ * parsing time.
+ */
+ struct list_head unity_map;
+};
+
+/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
*/
@@ -567,6 +653,9 @@ struct amd_iommu {
/* Extended features */
u64 features;
+ /* Extended features 2 */
+ u64 features2;
+
/* IOMMUv2 */
bool is_iommu_v2;
@@ -581,7 +670,7 @@ struct amd_iommu {
u16 cap_ptr;
/* pci domain of this IOMMU */
- u16 pci_seg;
+ struct amd_iommu_pci_seg *pci_seg;
/* start of exclusion range of that IOMMU */
u64 exclusion_start;
@@ -666,8 +755,8 @@ struct acpihid_map_entry {
struct list_head list;
u8 uid[ACPIHID_UID_LEN];
u8 hid[ACPIHID_HID_LEN];
- u16 devid;
- u16 root_devid;
+ u32 devid;
+ u32 root_devid;
bool cmd_line;
struct iommu_group *group;
};
@@ -675,7 +764,7 @@ struct acpihid_map_entry {
struct devid_map {
struct list_head list;
u8 id;
- u16 devid;
+ u32 devid;
bool cmd_line;
};
@@ -689,7 +778,7 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
- struct pci_dev *pdev;
+ struct device *dev;
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
struct {
@@ -710,6 +799,12 @@ extern struct list_head hpet_map;
extern struct list_head acpihid_map;
/*
+ * List with all PCI segments in the system. This list is not locked because
+ * it is only written at driver initialization time
+ */
+extern struct list_head amd_iommu_pci_seg_list;
+
+/*
* List with all IOMMUs in the system. This list is not locked because it is
* only written and read at driver initialization or suspend time
*/
@@ -749,38 +844,12 @@ struct unity_map_entry {
};
/*
- * List of all unity mappings. It is not locked because as runtime it is only
- * read. It is created at ACPI table parsing time.
- */
-extern struct list_head amd_iommu_unity_map;
-
-/*
* Data structures for device handling
*/
-/*
- * Device table used by hardware. Read and write accesses by software are
- * locked with the amd_iommu_pd_table lock.
- */
-extern struct dev_table_entry *amd_iommu_dev_table;
-
-/*
- * Alias table to find requestor ids to device ids. Not locked because only
- * read on runtime.
- */
-extern u16 *amd_iommu_alias_table;
-
-/*
- * Reverse lookup table to find the IOMMU which translates a specific device.
- */
-extern struct amd_iommu **amd_iommu_rlookup_table;
-
/* size of the dma_ops aperture as power of 2 */
extern unsigned amd_iommu_aperture_order;
-/* largest PCI device id we expect translation requests for */
-extern u16 amd_iommu_last_bdf;
-
/* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap;
@@ -913,6 +982,7 @@ struct irq_2_irte {
struct amd_ir_data {
u32 cached_ga_tag;
+ struct amd_iommu *iommu;
struct irq_2_irte irq_2_irte;
struct msi_msg msi_entry;
void *entry; /* Pointer to union irte or struct irte_ga */
@@ -930,9 +1000,9 @@ struct amd_ir_data {
struct amd_irte_ops {
void (*prepare)(void *, u32, bool, u8, u32, int);
- void (*activate)(void *, u16, u16);
- void (*deactivate)(void *, u16, u16);
- void (*set_affinity)(void *, u16, u16, u8, u32);
+ void (*activate)(struct amd_iommu *iommu, void *, u16, u16);
+ void (*deactivate)(struct amd_iommu *iommu, void *, u16, u16);
+ void (*set_affinity)(struct amd_iommu *iommu, void *, u16, u16, u8, u32);
void *(*get)(struct irq_remap_table *, int);
void (*set_allocated)(struct irq_remap_table *, int);
bool (*is_allocated)(struct irq_remap_table *, int);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 1d08f87e734b..fdc642362c14 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -84,6 +84,10 @@
#define ACPI_DEVFLAG_ATSDIS 0x10000000
#define LOOP_TIMEOUT 2000000
+
+#define IVRS_GET_SBDF_ID(seg, bus, dev, fd) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
+ | ((dev & 0x1f) << 3) | (fn & 0x7))
+
/*
* ACPI table definitions
*
@@ -110,7 +114,7 @@ struct ivhd_header {
/* Following only valid on IVHD type 11h and 40h */
u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
- u64 res;
+ u64 efr_reg2;
} __attribute__((packed));
/*
@@ -141,7 +145,8 @@ struct ivmd_header {
u16 length;
u16 devid;
u16 aux;
- u64 resv;
+ u16 pci_seg;
+ u8 resv[6];
u64 range_start;
u64 range_length;
} __attribute__((packed));
@@ -159,11 +164,15 @@ static bool amd_iommu_disabled __initdata;
static bool amd_iommu_force_enable __initdata;
static int amd_iommu_target_ivhd_type;
-u16 amd_iommu_last_bdf; /* largest PCI device id we have
- to handle */
-LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
- we find in ACPI */
+/* Global EFR and EFR2 registers */
+u64 amd_iommu_efr;
+u64 amd_iommu_efr2;
+/* SNP is enabled on the system? */
+bool amd_iommu_snp_en;
+EXPORT_SYMBOL(amd_iommu_snp_en);
+
+LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
@@ -186,47 +195,11 @@ bool amdr_ivrs_remap_support __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
/*
- * Pointer to the device table which is shared by all AMD IOMMUs
- * it is indexed by the PCI device id or the HT unit id and contains
- * information about the domain the device belongs to as well as the
- * page table root pointer.
- */
-struct dev_table_entry *amd_iommu_dev_table;
-/*
- * Pointer to a device table which the content of old device table
- * will be copied to. It's only be used in kdump kernel.
- */
-static struct dev_table_entry *old_dev_tbl_cpy;
-
-/*
- * The alias table is a driver specific data structure which contains the
- * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
- * More than one device can share the same requestor id.
- */
-u16 *amd_iommu_alias_table;
-
-/*
- * The rlookup table is used to find the IOMMU which is responsible
- * for a specific device. It is also indexed by the PCI device id.
- */
-struct amd_iommu **amd_iommu_rlookup_table;
-
-/*
- * This table is used to find the irq remapping table for a given device id
- * quickly.
- */
-struct irq_remap_table **irq_lookup_table;
-
-/*
* AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
*/
unsigned long *amd_iommu_pd_alloc_bitmap;
-static u32 dev_table_size; /* size of the device table */
-static u32 alias_table_size; /* size of the alias table */
-static u32 rlookup_table_size; /* size if the rlookup table */
-
enum iommu_init_state {
IOMMU_START_STATE,
IOMMU_IVRS_DETECTED,
@@ -256,7 +229,7 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
-static void init_device_table_dma(void);
+static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
static bool amd_iommu_pre_enabled = true;
@@ -281,16 +254,10 @@ static void init_translation_status(struct amd_iommu *iommu)
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
-static inline void update_last_devid(u16 devid)
-{
- if (devid > amd_iommu_last_bdf)
- amd_iommu_last_bdf = devid;
-}
-
-static inline unsigned long tbl_size(int entry_size)
+static inline unsigned long tbl_size(int entry_size, int last_bdf)
{
unsigned shift = PAGE_SHIFT +
- get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
+ get_order((last_bdf + 1) * entry_size);
return 1UL << shift;
}
@@ -300,21 +267,46 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
-#ifdef CONFIG_IRQ_REMAP
-static bool check_feature_on_all_iommus(u64 mask)
+/*
+ * Iterate through all the IOMMUs to get common EFR
+ * masks among all IOMMUs and warn if found inconsistency.
+ */
+static void get_global_efr(void)
{
- bool ret = false;
struct amd_iommu *iommu;
for_each_iommu(iommu) {
- ret = iommu_feature(iommu, mask);
- if (!ret)
- return false;
+ u64 tmp = iommu->features;
+ u64 tmp2 = iommu->features2;
+
+ if (list_is_first(&iommu->list, &amd_iommu_list)) {
+ amd_iommu_efr = tmp;
+ amd_iommu_efr2 = tmp2;
+ continue;
+ }
+
+ if (amd_iommu_efr == tmp &&
+ amd_iommu_efr2 == tmp2)
+ continue;
+
+ pr_err(FW_BUG
+ "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
+ tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
+ iommu->index, iommu->pci_seg->id,
+ PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
+ PCI_FUNC(iommu->devid));
+
+ amd_iommu_efr &= tmp;
+ amd_iommu_efr2 &= tmp2;
}
- return true;
+ pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
+}
+
+static bool check_feature_on_all_iommus(u64 mask)
+{
+ return !!(amd_iommu_efr & mask);
}
-#endif
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
@@ -324,8 +316,10 @@ static bool check_feature_on_all_iommus(u64 mask)
static void __init early_iommu_features_init(struct amd_iommu *iommu,
struct ivhd_header *h)
{
- if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
+ if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
iommu->features = h->efr_reg;
+ iommu->features2 = h->efr_reg2;
+ }
if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
amdr_ivrs_remap_support = true;
}
@@ -399,7 +393,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
u64 entry = start & PM_ADDR_MASK;
- if (!iommu_feature(iommu, FEATURE_SNP))
+ if (!check_feature_on_all_iommus(FEATURE_SNP))
return;
/* Note:
@@ -421,10 +415,12 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
static void iommu_set_device_table(struct amd_iommu *iommu)
{
u64 entry;
+ u32 dev_table_size = iommu->pci_seg->dev_table_size;
+ void *dev_table = (void *)get_dev_table(iommu);
BUG_ON(iommu->mmio_base == NULL);
- entry = iommu_virt_to_phys(amd_iommu_dev_table);
+ entry = iommu_virt_to_phys(dev_table);
entry |= (dev_table_size >> 12) - 1;
memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
&entry, sizeof(entry));
@@ -557,6 +553,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
{
u8 *p = (void *)h, *end = (void *)h;
struct ivhd_entry *dev;
+ int last_devid = -EINVAL;
u32 ivhd_size = get_ivhd_header_size(h);
@@ -573,14 +570,14 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
switch (dev->type) {
case IVHD_DEV_ALL:
/* Use maximum BDF value for DEV_ALL */
- update_last_devid(0xffff);
- break;
+ return 0xffff;
case IVHD_DEV_SELECT:
case IVHD_DEV_RANGE_END:
case IVHD_DEV_ALIAS:
case IVHD_DEV_EXT_SELECT:
/* all the above subfield types refer to device ids */
- update_last_devid(dev->devid);
+ if (dev->devid > last_devid)
+ last_devid = dev->devid;
break;
default:
break;
@@ -590,7 +587,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
WARN_ON(p != end);
- return 0;
+ return last_devid;
}
static int __init check_ivrs_checksum(struct acpi_table_header *table)
@@ -614,38 +611,125 @@ static int __init check_ivrs_checksum(struct acpi_table_header *table)
* id which we need to handle. This is the first of three functions which parse
* the ACPI table. So we check the checksum here.
*/
-static int __init find_last_devid_acpi(struct acpi_table_header *table)
+static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
{
u8 *p = (u8 *)table, *end = (u8 *)table;
struct ivhd_header *h;
+ int last_devid, last_bdf = 0;
p += IVRS_HEADER_LENGTH;
end += table->length;
while (p < end) {
h = (struct ivhd_header *)p;
- if (h->type == amd_iommu_target_ivhd_type) {
- int ret = find_last_devid_from_ivhd(h);
-
- if (ret)
- return ret;
+ if (h->pci_seg == pci_seg &&
+ h->type == amd_iommu_target_ivhd_type) {
+ last_devid = find_last_devid_from_ivhd(h);
+
+ if (last_devid < 0)
+ return -EINVAL;
+ if (last_devid > last_bdf)
+ last_bdf = last_devid;
}
p += h->length;
}
WARN_ON(p != end);
- return 0;
+ return last_bdf;
}
/****************************************************************************
*
* The following functions belong to the code path which parses the ACPI table
* the second time. In this ACPI parsing iteration we allocate IOMMU specific
- * data structures, initialize the device/alias/rlookup table and also
- * basically initialize the hardware.
+ * data structures, initialize the per PCI segment device/alias/rlookup table
+ * and also basically initialize the hardware.
*
****************************************************************************/
+/* Allocate per PCI segment device table */
+static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
+ get_order(pci_seg->dev_table_size));
+ if (!pci_seg->dev_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = NULL;
+}
+
+/* Allocate per PCI segment IOMMU rlookup table. */
+static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->rlookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(pci_seg->rlookup_table_size));
+ if (pci_seg->rlookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->rlookup_table,
+ get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = NULL;
+}
+
+static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ pci_seg->irq_lookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(pci_seg->rlookup_table_size));
+ kmemleak_alloc(pci_seg->irq_lookup_table,
+ pci_seg->rlookup_table_size, 1, GFP_KERNEL);
+ if (pci_seg->irq_lookup_table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ kmemleak_free(pci_seg->irq_lookup_table);
+ free_pages((unsigned long)pci_seg->irq_lookup_table,
+ get_order(pci_seg->rlookup_table_size));
+ pci_seg->irq_lookup_table = NULL;
+}
+
+static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ int i;
+
+ pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(pci_seg->alias_table_size));
+ if (!pci_seg->alias_table)
+ return -ENOMEM;
+
+ /*
+ * let all alias entries point to itself
+ */
+ for (i = 0; i <= pci_seg->last_bdf; ++i)
+ pci_seg->alias_table[i] = i;
+
+ return 0;
+}
+
+static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
+{
+ free_pages((unsigned long)pci_seg->alias_table,
+ get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = NULL;
+}
+
/*
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
* write commands to that buffer later and the IOMMU will execute them
@@ -724,7 +808,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
void *buf = (void *)__get_free_pages(gfp, order);
if (buf &&
- iommu_feature(iommu, FEATURE_SNP) &&
+ check_feature_on_all_iommus(FEATURE_SNP) &&
set_memory_4k((unsigned long)buf, (1 << order))) {
free_pages((unsigned long)buf, order);
buf = NULL;
@@ -815,20 +899,15 @@ static void free_ga_log(struct amd_iommu *iommu)
#endif
}
+#ifdef CONFIG_IRQ_REMAP
static int iommu_ga_log_enable(struct amd_iommu *iommu)
{
-#ifdef CONFIG_IRQ_REMAP
u32 status, i;
u64 entry;
if (!iommu->ga_log)
return -EINVAL;
- /* Check if already running */
- status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
- return 0;
-
entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
&entry, sizeof(entry));
@@ -852,13 +931,12 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
if (WARN_ON(i >= LOOP_TIMEOUT))
return -EINVAL;
-#endif /* CONFIG_IRQ_REMAP */
+
return 0;
}
static int iommu_init_ga_log(struct amd_iommu *iommu)
{
-#ifdef CONFIG_IRQ_REMAP
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
@@ -876,10 +954,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
err_out:
free_ga_log(iommu);
return -EINVAL;
-#else
- return 0;
-#endif /* CONFIG_IRQ_REMAP */
}
+#endif /* CONFIG_IRQ_REMAP */
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
@@ -916,56 +992,59 @@ static void iommu_enable_gt(struct amd_iommu *iommu)
}
/* sets a specific bit in the device table entry. */
-static void set_dev_entry_bit(u16 devid, u8 bit)
+static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
+ u16 devid, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
+ dev_table[devid].data[i] |= (1UL << _bit);
+}
+
+static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
+ return __set_dev_entry_bit(dev_table, devid, bit);
}
-static int get_dev_entry_bit(u16 devid, u8 bit)
+static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
+ u16 devid, u8 bit)
{
int i = (bit >> 6) & 0x03;
int _bit = bit & 0x3f;
- return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
+ return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
}
+static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
+{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
-static bool copy_device_table(void)
+ return __get_dev_entry_bit(dev_table, devid, bit);
+}
+
+static bool __copy_device_table(struct amd_iommu *iommu)
{
- u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
+ u64 int_ctl, int_tab_len, entry = 0;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
struct dev_table_entry *old_devtb = NULL;
u32 lo, hi, devid, old_devtb_size;
phys_addr_t old_devtb_phys;
- struct amd_iommu *iommu;
u16 dom_id, dte_v, irq_v;
gfp_t gfp_flag;
u64 tmp;
- if (!amd_iommu_pre_enabled)
- return false;
-
- pr_warn("Translation is already enabled - trying to copy translation structures\n");
- for_each_iommu(iommu) {
- /* All IOMMUs should use the same device table with the same size */
- lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
- hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
- entry = (((u64) hi) << 32) + lo;
- if (last_entry && last_entry != entry) {
- pr_err("IOMMU:%d should use the same dev table as others!\n",
- iommu->index);
- return false;
- }
- last_entry = entry;
+ /* Each IOMMU use separate device table with the same size */
+ lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
+ hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
+ entry = (((u64) hi) << 32) + lo;
- old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
- if (old_devtb_size != dev_table_size) {
- pr_err("The device table size of IOMMU:%d is not expected!\n",
- iommu->index);
- return false;
- }
+ old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
+ if (old_devtb_size != pci_seg->dev_table_size) {
+ pr_err("The device table size of IOMMU:%d is not expected!\n",
+ iommu->index);
+ return false;
}
/*
@@ -981,38 +1060,38 @@ static bool copy_device_table(void)
}
old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
? (__force void *)ioremap_encrypted(old_devtb_phys,
- dev_table_size)
- : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
+ pci_seg->dev_table_size)
+ : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
if (!old_devtb)
return false;
gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
- old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
- get_order(dev_table_size));
- if (old_dev_tbl_cpy == NULL) {
+ pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
+ get_order(pci_seg->dev_table_size));
+ if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
return false;
}
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- old_dev_tbl_cpy[devid] = old_devtb[devid];
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
if (dte_v && dom_id) {
- old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
- old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
+ pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
+ pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
__set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
/* If gcr3 table existed, mask it out */
if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
- old_dev_tbl_cpy[devid].data[1] &= ~tmp;
+ pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
tmp |= DTE_FLAG_GV;
- old_dev_tbl_cpy[devid].data[0] &= ~tmp;
+ pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
}
}
@@ -1027,7 +1106,7 @@ static bool copy_device_table(void)
return false;
}
- old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
+ pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
}
}
memunmap(old_devtb);
@@ -1035,21 +1114,42 @@ static bool copy_device_table(void)
return true;
}
-void amd_iommu_apply_erratum_63(u16 devid)
+static bool copy_device_table(void)
{
- int sysmgt;
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
- sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
- (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
+ if (!amd_iommu_pre_enabled)
+ return false;
- if (sysmgt == 0x01)
- set_dev_entry_bit(devid, DEV_ENTRY_IW);
+ pr_warn("Translation is already enabled - trying to copy translation structures\n");
+
+ /*
+ * All IOMMUs within PCI segment shares common device table.
+ * Hence copy device table only once per PCI segment.
+ */
+ for_each_pci_segment(pci_seg) {
+ for_each_iommu(iommu) {
+ if (pci_seg->id != iommu->pci_seg->id)
+ continue;
+ if (!__copy_device_table(iommu))
+ return false;
+ break;
+ }
+ }
+
+ return true;
}
-/* Writes the specific IOMMU for a device into the rlookup table */
-static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
+void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
{
- amd_iommu_rlookup_table[devid] = iommu;
+ int sysmgt;
+
+ sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
+ (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
+
+ if (sysmgt == 0x01)
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
}
/*
@@ -1060,26 +1160,26 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
u16 devid, u32 flags, u32 ext_flags)
{
if (flags & ACPI_DEVFLAG_INITPASS)
- set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
if (flags & ACPI_DEVFLAG_EXTINT)
- set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
if (flags & ACPI_DEVFLAG_NMI)
- set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
if (flags & ACPI_DEVFLAG_SYSMGT1)
- set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
if (flags & ACPI_DEVFLAG_SYSMGT2)
- set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
if (flags & ACPI_DEVFLAG_LINT0)
- set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
if (flags & ACPI_DEVFLAG_LINT1)
- set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
+ set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
- amd_iommu_apply_erratum_63(devid);
+ amd_iommu_apply_erratum_63(iommu, devid);
- set_iommu_for_device(iommu, devid);
+ amd_iommu_set_rlookup_table(iommu, devid);
}
-int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
{
struct devid_map *entry;
struct list_head *list;
@@ -1116,7 +1216,7 @@ int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
return 0;
}
-static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
+static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
bool cmd_line)
{
struct acpihid_map_entry *entry;
@@ -1195,10 +1295,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
{
u8 *p = (u8 *)h;
u8 *end = p, flags = 0;
- u16 devid = 0, devid_start = 0, devid_to = 0;
+ u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
u32 dev_i, ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u32 ivhd_size;
int ret;
@@ -1230,19 +1331,21 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
while (p < end) {
e = (struct ivhd_entry *)p;
+ seg_id = pci_seg->id;
+
switch (e->type) {
case IVHD_DEV_ALL:
DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
- for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
+ for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
break;
case IVHD_DEV_SELECT:
- DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -1253,8 +1356,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_SELECT_RANGE_START:
DUMP_printk(" DEV_SELECT_RANGE_START\t "
- "devid: %02x:%02x.%x flags: %02x\n",
- PCI_BUS_NUM(e->devid),
+ "devid: %04x:%02x:%02x.%x flags: %02x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -1266,9 +1369,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_ALIAS:
- DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x devid_to: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
@@ -1280,18 +1383,18 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
devid_to = e->ext >> 8;
set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
- amd_iommu_alias_table[devid] = devid_to;
+ pci_seg->alias_table[devid] = devid_to;
break;
case IVHD_DEV_ALIAS_RANGE:
DUMP_printk(" DEV_ALIAS_RANGE\t\t "
- "devid: %02x:%02x.%x flags: %02x "
- "devid_to: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ "devid: %04x:%02x:%02x.%x flags: %02x "
+ "devid_to: %04x:%02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
- PCI_BUS_NUM(e->ext >> 8),
+ seg_id, PCI_BUS_NUM(e->ext >> 8),
PCI_SLOT(e->ext >> 8),
PCI_FUNC(e->ext >> 8));
@@ -1303,9 +1406,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_EXT_SELECT:
- DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
+ DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
"flags: %02x ext: %08x\n",
- PCI_BUS_NUM(e->devid),
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -1317,8 +1420,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_EXT_SELECT_RANGE:
DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
- "%02x:%02x.%x flags: %02x ext: %08x\n",
- PCI_BUS_NUM(e->devid),
+ "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -1330,15 +1433,15 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
case IVHD_DEV_RANGE_END:
- DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
- PCI_BUS_NUM(e->devid),
+ DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
+ seg_id, PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid));
devid = e->devid;
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
if (alias) {
- amd_iommu_alias_table[dev_i] = devid_to;
+ pci_seg->alias_table[dev_i] = devid_to;
set_dev_entry_from_acpi(iommu,
devid_to, flags, ext_flags);
}
@@ -1349,11 +1452,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_SPECIAL: {
u8 handle, type;
const char *var;
- u16 devid;
+ u32 devid;
int ret;
handle = e->ext & 0xff;
- devid = (e->ext >> 8) & 0xffff;
+ devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
type = (e->ext >> 24) & 0xff;
if (type == IVHD_SPECIAL_IOAPIC)
@@ -1363,9 +1466,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
else
var = "UNKNOWN";
- DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
+ DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
var, (int)handle,
- PCI_BUS_NUM(devid),
+ seg_id, PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
@@ -1383,7 +1486,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
case IVHD_DEV_ACPI_HID: {
- u16 devid;
+ u32 devid;
u8 hid[ACPIHID_HID_LEN];
u8 uid[ACPIHID_UID_LEN];
int ret;
@@ -1426,9 +1529,9 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
- devid = e->devid;
- DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
- hid, uid,
+ devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
+ DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
+ hid, uid, seg_id,
PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
@@ -1458,6 +1561,74 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
return 0;
}
+/* Allocate PCI segment data structure */
+static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ int last_bdf;
+
+ /*
+ * First parse ACPI tables to find the largest Bus/Dev/Func we need to
+ * handle in this PCI segment. Upon this information the shared data
+ * structures for the PCI segments in the system will be allocated.
+ */
+ last_bdf = find_last_devid_acpi(ivrs_base, id);
+ if (last_bdf < 0)
+ return NULL;
+
+ pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
+ if (pci_seg == NULL)
+ return NULL;
+
+ pci_seg->last_bdf = last_bdf;
+ DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
+ pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
+
+ pci_seg->id = id;
+ init_llist_head(&pci_seg->dev_data_list);
+ INIT_LIST_HEAD(&pci_seg->unity_map);
+ list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
+
+ if (alloc_dev_table(pci_seg))
+ return NULL;
+ if (alloc_alias_table(pci_seg))
+ return NULL;
+ if (alloc_rlookup_table(pci_seg))
+ return NULL;
+
+ return pci_seg;
+}
+
+static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
+ struct acpi_table_header *ivrs_base)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == id)
+ return pci_seg;
+ }
+
+ return alloc_pci_segment(id, ivrs_base);
+}
+
+static void __init free_pci_segments(void)
+{
+ struct amd_iommu_pci_seg *pci_seg, *next;
+
+ for_each_pci_segment_safe(pci_seg, next) {
+ list_del(&pci_seg->list);
+ free_irq_lookup_table(pci_seg);
+ free_rlookup_table(pci_seg);
+ free_alias_table(pci_seg);
+ free_dev_table(pci_seg);
+ kfree(pci_seg);
+ }
+}
+
static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_cwwb_sem(iommu);
@@ -1542,9 +1713,15 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
*/
-static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
+ struct acpi_table_header *ivrs_base)
{
- int ret;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
+ if (pci_seg == NULL)
+ return -ENOMEM;
+ iommu->pci_seg = pci_seg;
raw_spin_lock_init(&iommu->lock);
iommu->cmd_sem_val = 0;
@@ -1566,7 +1743,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
*/
iommu->devid = h->devid;
iommu->cap_ptr = h->cap_ptr;
- iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys;
switch (h->type) {
@@ -1621,6 +1797,13 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->mmio_base)
return -ENOMEM;
+ return init_iommu_from_acpi(iommu, h);
+}
+
+static int __init init_iommu_one_late(struct amd_iommu *iommu)
+{
+ int ret;
+
if (alloc_cwwb_sem(iommu))
return -ENOMEM;
@@ -1642,10 +1825,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (amd_iommu_pre_enabled)
amd_iommu_pre_enabled = translation_pre_enabled(iommu);
- ret = init_iommu_from_acpi(iommu, h);
- if (ret)
- return ret;
-
if (amd_iommu_irq_remap) {
ret = amd_iommu_create_irq_domain(iommu);
if (ret)
@@ -1656,7 +1835,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
* Make sure IOMMU is not considered to translate itself. The IVRS
* table tells us so, but this is a lie!
*/
- amd_iommu_rlookup_table[iommu->devid] = NULL;
+ iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
return 0;
}
@@ -1701,15 +1880,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
end += table->length;
p += IVRS_HEADER_LENGTH;
+ /* Phase 1: Process all IVHD blocks */
while (p < end) {
h = (struct ivhd_header *)p;
if (*p == amd_iommu_target_ivhd_type) {
- DUMP_printk("device: %02x:%02x.%01x cap: %04x "
- "seg: %d flags: %01x info %04x\n",
- PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
- PCI_FUNC(h->devid), h->cap_ptr,
- h->pci_seg, h->flags, h->info);
+ DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
+ "flags: %01x info %04x\n",
+ h->pci_seg, PCI_BUS_NUM(h->devid),
+ PCI_SLOT(h->devid), PCI_FUNC(h->devid),
+ h->cap_ptr, h->flags, h->info);
DUMP_printk(" mmio-addr: %016llx\n",
h->mmio_phys);
@@ -1717,7 +1897,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
if (iommu == NULL)
return -ENOMEM;
- ret = init_iommu_one(iommu, h);
+ ret = init_iommu_one(iommu, h, table);
if (ret)
return ret;
}
@@ -1726,6 +1906,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
}
WARN_ON(p != end);
+ /* Phase 2 : Early feature support check */
+ get_global_efr();
+
+ /* Phase 3 : Enabling IOMMU features */
+ for_each_iommu(iommu) {
+ ret = init_iommu_one_late(iommu);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1762,7 +1952,7 @@ static ssize_t amd_iommu_show_features(struct device *dev,
char *buf)
{
struct amd_iommu *iommu = dev_to_amd_iommu(dev);
- return sprintf(buf, "%llx\n", iommu->features);
+ return sprintf(buf, "%llx:%llx\n", iommu->features2, iommu->features);
}
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
@@ -1789,16 +1979,18 @@ static const struct attribute_group *amd_iommu_groups[] = {
*/
static void __init late_iommu_features_init(struct amd_iommu *iommu)
{
- u64 features;
+ u64 features, features2;
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
return;
/* read extended feature bits */
features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+ features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
if (!iommu->features) {
iommu->features = features;
+ iommu->features2 = features2;
return;
}
@@ -1806,9 +1998,13 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
* Sanity check and warn if EFR values from
* IVHD and MMIO conflict.
*/
- if (features != iommu->features)
- pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
- features, iommu->features);
+ if (features != iommu->features ||
+ features2 != iommu->features2) {
+ pr_warn(FW_WARN
+ "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
+ features, iommu->features,
+ features2, iommu->features2);
+ }
}
static int __init iommu_init_pci(struct amd_iommu *iommu)
@@ -1816,7 +2012,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
int cap_ptr = iommu->cap_ptr;
int ret;
- iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
+ iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ PCI_BUS_NUM(iommu->devid),
iommu->devid & 0xff);
if (!iommu->dev)
return -ENODEV;
@@ -1863,10 +2060,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM;
- ret = iommu_init_ga_log(iommu);
- if (ret)
- return ret;
-
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
pr_info("Using strict mode due to virtualization\n");
iommu_set_dma_strict();
@@ -1879,7 +2072,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
int i, j;
iommu->root_pdev =
- pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
+ pci_get_domain_bus_and_slot(iommu->pci_seg->id,
+ iommu->dev->bus->number,
PCI_DEVFN(0, 0));
/*
@@ -1906,8 +2100,11 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_erratum_746_workaround(iommu);
amd_iommu_ats_write_check_workaround(iommu);
- iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
+ ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
amd_iommu_groups, "ivhd%d", iommu->index);
+ if (ret)
+ return ret;
+
iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
return pci_enable_device(iommu->dev);
@@ -1928,7 +2125,7 @@ static void print_iommu_info(void)
pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- pr_info("Extended features (%#llx):", iommu->features);
+ pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
@@ -1938,13 +2135,14 @@ static void print_iommu_info(void)
if (iommu->features & FEATURE_GAM_VAPIC)
pr_cont(" GA_vAPIC");
+ if (iommu->features & FEATURE_SNP)
+ pr_cont(" SNP");
+
pr_cont("\n");
}
}
if (irq_remapping_enabled) {
pr_info("Interrupt remapping enabled\n");
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
- pr_info("Virtual APIC enabled\n");
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
@@ -1953,6 +2151,7 @@ static void print_iommu_info(void)
static int __init amd_iommu_init_pci(void)
{
struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
int ret;
for_each_iommu(iommu) {
@@ -1983,7 +2182,8 @@ static int __init amd_iommu_init_pci(void)
goto out;
}
- init_device_table_dma();
+ for_each_pci_segment(pci_seg)
+ init_device_table_dma(pci_seg);
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
@@ -2232,9 +2432,6 @@ enable_faults:
if (iommu->ppr_log != NULL)
iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
-
- iommu_ga_log_enable(iommu);
-
return 0;
}
@@ -2249,19 +2446,28 @@ enable_faults:
static void __init free_unity_maps(void)
{
struct unity_map_entry *entry, *next;
+ struct amd_iommu_pci_seg *p, *pci_seg;
- list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
- list_del(&entry->list);
- kfree(entry);
+ for_each_pci_segment_safe(pci_seg, p) {
+ list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
}
}
/* called for unity map ACPI definition */
-static int __init init_unity_map_range(struct ivmd_header *m)
+static int __init init_unity_map_range(struct ivmd_header *m,
+ struct acpi_table_header *ivrs_base)
{
struct unity_map_entry *e = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
char *s;
+ pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
+ if (pci_seg == NULL)
+ return -ENOMEM;
+
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
@@ -2277,7 +2483,7 @@ static int __init init_unity_map_range(struct ivmd_header *m)
case ACPI_IVMD_TYPE_ALL:
s = "IVMD_TYPE_ALL\t\t";
e->devid_start = 0;
- e->devid_end = amd_iommu_last_bdf;
+ e->devid_end = pci_seg->last_bdf;
break;
case ACPI_IVMD_TYPE_RANGE:
s = "IVMD_TYPE_RANGE\t\t";
@@ -2299,14 +2505,16 @@ static int __init init_unity_map_range(struct ivmd_header *m)
if (m->flags & IVMD_FLAG_EXCL_RANGE)
e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
- DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
- " range_start: %016llx range_end: %016llx flags: %x\n", s,
+ DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
+ "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
+ " flags: %x\n", s, m->pci_seg,
PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
- PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
+ PCI_FUNC(e->devid_start), m->pci_seg,
+ PCI_BUS_NUM(e->devid_end),
PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
e->address_start, e->address_end, m->flags);
- list_add_tail(&e->list, &amd_iommu_unity_map);
+ list_add_tail(&e->list, &pci_seg->unity_map);
return 0;
}
@@ -2323,7 +2531,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
while (p < end) {
m = (struct ivmd_header *)p;
if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
- init_unity_map_range(m);
+ init_unity_map_range(m, table);
p += m->length;
}
@@ -2334,35 +2542,48 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
/*
* Init the device table to not allow DMA access for devices
*/
-static void init_device_table_dma(void)
+static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{
u32 devid;
+ struct dev_table_entry *dev_table = pci_seg->dev_table;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- set_dev_entry_bit(devid, DEV_ENTRY_VALID);
- set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
+ if (dev_table == NULL)
+ return;
+
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
+ if (!amd_iommu_snp_en)
+ __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
}
}
-static void __init uninit_device_table_dma(void)
+static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{
u32 devid;
+ struct dev_table_entry *dev_table = pci_seg->dev_table;
+
+ if (dev_table == NULL)
+ return;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
- amd_iommu_dev_table[devid].data[0] = 0ULL;
- amd_iommu_dev_table[devid].data[1] = 0ULL;
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
+ dev_table[devid].data[0] = 0ULL;
+ dev_table[devid].data[1] = 0ULL;
}
}
static void init_device_table(void)
{
+ struct amd_iommu_pci_seg *pci_seg;
u32 devid;
if (!amd_iommu_irq_remap)
return;
- for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
- set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
+ for_each_pci_segment(pci_seg) {
+ for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
+ __set_dev_entry_bit(pci_seg->dev_table,
+ devid, DEV_ENTRY_IRQ_TBL_EN);
+ }
}
static void iommu_init_flags(struct amd_iommu *iommu)
@@ -2440,8 +2661,6 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
#ifdef CONFIG_IRQ_REMAP
switch (amd_iommu_guest_ir) {
case AMD_IOMMU_GUEST_IR_VAPIC:
- iommu_feature_enable(iommu, CONTROL_GAM_EN);
- fallthrough;
case AMD_IOMMU_GUEST_IR_LEGACY_GA:
iommu_feature_enable(iommu, CONTROL_GA_EN);
iommu->irte_ops = &irte_128_ops;
@@ -2478,7 +2697,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
static void early_enable_iommus(void)
{
struct amd_iommu *iommu;
-
+ struct amd_iommu_pci_seg *pci_seg;
if (!copy_device_table()) {
/*
@@ -2488,9 +2707,14 @@ static void early_enable_iommus(void)
*/
if (amd_iommu_pre_enabled)
pr_err("Failed to copy DEV table from previous kernel.\n");
- if (old_dev_tbl_cpy != NULL)
- free_pages((unsigned long)old_dev_tbl_cpy,
- get_order(dev_table_size));
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->old_dev_tbl_cpy != NULL) {
+ free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = NULL;
+ }
+ }
for_each_iommu(iommu) {
clear_translation_pre_enabled(iommu);
@@ -2498,9 +2722,13 @@ static void early_enable_iommus(void)
}
} else {
pr_info("Copied DEV table from previous kernel.\n");
- free_pages((unsigned long)amd_iommu_dev_table,
- get_order(dev_table_size));
- amd_iommu_dev_table = old_dev_tbl_cpy;
+
+ for_each_pci_segment(pci_seg) {
+ free_pages((unsigned long)pci_seg->dev_table,
+ get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
+ }
+
for_each_iommu(iommu) {
iommu_disable_command_buffer(iommu);
iommu_disable_event_buffer(iommu);
@@ -2512,19 +2740,6 @@ static void early_enable_iommus(void)
iommu_flush_all_caches(iommu);
}
}
-
-#ifdef CONFIG_IRQ_REMAP
- /*
- * Note: We have already checked GASup from IVRS table.
- * Now, we need to make sure that GAMSup is set.
- */
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
- !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
-
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
- amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
-#endif
}
static void enable_iommus_v2(void)
@@ -2537,10 +2752,72 @@ static void enable_iommus_v2(void)
}
}
+static void enable_iommus_vapic(void)
+{
+#ifdef CONFIG_IRQ_REMAP
+ u32 status, i;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ /*
+ * Disable GALog if already running. It could have been enabled
+ * in the previous boot before kdump.
+ */
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
+ continue;
+
+ iommu_feature_disable(iommu, CONTROL_GALOG_EN);
+ iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+
+ /*
+ * Need to set and poll check the GALOGRun bit to zero before
+ * we can set/ modify GA Log registers safely.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
+ break;
+ udelay(10);
+ }
+
+ if (WARN_ON(i >= LOOP_TIMEOUT))
+ return;
+ }
+
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ return;
+ }
+
+ if (amd_iommu_snp_en &&
+ !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
+ pr_warn("Force to disable Virtual APIC due to SNP\n");
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+ return;
+ }
+
+ /* Enabling GAM and SNPAVIC support */
+ for_each_iommu(iommu) {
+ if (iommu_init_ga_log(iommu) ||
+ iommu_ga_log_enable(iommu))
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_GAM_EN);
+ if (amd_iommu_snp_en)
+ iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
+ }
+
+ amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
+ pr_info("Virtual APIC enabled\n");
+#endif
+}
+
static void enable_iommus(void)
{
early_enable_iommus();
-
+ enable_iommus_vapic();
enable_iommus_v2();
}
@@ -2590,27 +2867,11 @@ static struct syscore_ops amd_iommu_syscore_ops = {
static void __init free_iommu_resources(void)
{
- kmemleak_free(irq_lookup_table);
- free_pages((unsigned long)irq_lookup_table,
- get_order(rlookup_table_size));
- irq_lookup_table = NULL;
-
kmem_cache_destroy(amd_iommu_irq_cache);
amd_iommu_irq_cache = NULL;
- free_pages((unsigned long)amd_iommu_rlookup_table,
- get_order(rlookup_table_size));
- amd_iommu_rlookup_table = NULL;
-
- free_pages((unsigned long)amd_iommu_alias_table,
- get_order(alias_table_size));
- amd_iommu_alias_table = NULL;
-
- free_pages((unsigned long)amd_iommu_dev_table,
- get_order(dev_table_size));
- amd_iommu_dev_table = NULL;
-
free_iommu_all();
+ free_pci_segments();
}
/* SB IOAPIC is always on this device in AMD systems */
@@ -2709,7 +2970,7 @@ static void __init ivinfo_init(void *ivrs)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- int i, remap_cache_sz, ret;
+ int remap_cache_sz, ret;
acpi_status status;
if (!amd_iommu_detected)
@@ -2737,42 +2998,8 @@ static int __init early_amd_iommu_init(void)
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
- /*
- * First parse ACPI tables to find the largest Bus/Dev/Func
- * we need to handle. Upon this information the shared data
- * structures for the IOMMUs in the system will be allocated
- */
- ret = find_last_devid_acpi(ivrs_base);
- if (ret)
- goto out;
-
- dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
- alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
- rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
-
/* Device table - directly used by all IOMMUs */
ret = -ENOMEM;
- amd_iommu_dev_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
- get_order(dev_table_size));
- if (amd_iommu_dev_table == NULL)
- goto out;
-
- /*
- * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
- * IOMMU see for that device
- */
- amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
- get_order(alias_table_size));
- if (amd_iommu_alias_table == NULL)
- goto out;
-
- /* IOMMU rlookup table - find the IOMMU for a specific device */
- amd_iommu_rlookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(rlookup_table_size));
- if (amd_iommu_rlookup_table == NULL)
- goto out;
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO,
@@ -2781,12 +3008,6 @@ static int __init early_amd_iommu_init(void)
goto out;
/*
- * let all alias entries point to itself
- */
- for (i = 0; i <= amd_iommu_last_bdf; ++i)
- amd_iommu_alias_table[i] = i;
-
- /*
* never allocate domain 0 because its used as the non-allocated and
* error value placeholder
*/
@@ -2808,6 +3029,7 @@ static int __init early_amd_iommu_init(void)
amd_iommu_irq_remap = check_ioapic_information();
if (amd_iommu_irq_remap) {
+ struct amd_iommu_pci_seg *pci_seg;
/*
* Interrupt remapping enabled, create kmem_cache for the
* remapping tables.
@@ -2824,13 +3046,10 @@ static int __init early_amd_iommu_init(void)
if (!amd_iommu_irq_cache)
goto out;
- irq_lookup_table = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO,
- get_order(rlookup_table_size));
- kmemleak_alloc(irq_lookup_table, rlookup_table_size,
- 1, GFP_KERNEL);
- if (!irq_lookup_table)
- goto out;
+ for_each_pci_segment(pci_seg) {
+ if (alloc_irq_lookup_table(pci_seg))
+ goto out;
+ }
}
ret = init_memory_definitions(ivrs_base);
@@ -2937,6 +3156,7 @@ static int __init state_next(void)
register_syscore_ops(&amd_iommu_syscore_ops);
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
+ enable_iommus_vapic();
enable_iommus_v2();
break;
case IOMMU_PCI_INIT:
@@ -2967,8 +3187,11 @@ static int __init state_next(void)
free_iommu_resources();
} else {
struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg)
+ uninit_device_table_dma(pci_seg);
- uninit_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
}
@@ -3161,15 +3384,17 @@ static int __init parse_amd_iommu_options(char *str)
static int __init parse_ivrs_ioapic(char *str)
{
- unsigned int bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
int ret, id, i;
- u16 devid;
+ u32 devid;
ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
-
if (ret != 4) {
- pr_err("Invalid command line: ivrs_ioapic%s\n", str);
- return 1;
+ ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_ioapic%s\n", str);
+ return 1;
+ }
}
if (early_ioapic_map_size == EARLY_MAP_SIZE) {
@@ -3178,7 +3403,7 @@ static int __init parse_ivrs_ioapic(char *str)
return 1;
}
- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
cmdline_maps = true;
i = early_ioapic_map_size++;
@@ -3191,15 +3416,17 @@ static int __init parse_ivrs_ioapic(char *str)
static int __init parse_ivrs_hpet(char *str)
{
- unsigned int bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
int ret, id, i;
- u16 devid;
+ u32 devid;
ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
-
if (ret != 4) {
- pr_err("Invalid command line: ivrs_hpet%s\n", str);
- return 1;
+ ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_hpet%s\n", str);
+ return 1;
+ }
}
if (early_hpet_map_size == EARLY_MAP_SIZE) {
@@ -3208,7 +3435,7 @@ static int __init parse_ivrs_hpet(char *str)
return 1;
}
- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
cmdline_maps = true;
i = early_hpet_map_size++;
@@ -3221,15 +3448,18 @@ static int __init parse_ivrs_hpet(char *str)
static int __init parse_ivrs_acpihid(char *str)
{
- u32 bus, dev, fn;
+ u32 seg = 0, bus, dev, fn;
char *hid, *uid, *p;
char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
int ret, i;
ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
if (ret != 4) {
- pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
- return 1;
+ ret = sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid);
+ if (ret != 5) {
+ pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
+ return 1;
+ }
}
p = acpiid;
@@ -3244,8 +3474,7 @@ static int __init parse_ivrs_acpihid(char *str)
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
- early_acpihid_map[i].devid =
- ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
early_acpihid_map[i].cmd_line = true;
return 1;
@@ -3260,7 +3489,12 @@ __setup("ivrs_acpihid", parse_ivrs_acpihid);
bool amd_iommu_v2_supported(void)
{
- return amd_iommu_v2_present;
+ /*
+ * Since DTE[Mode]=0 is prohibited on SNP-enabled system
+ * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
+ * setting up IOMMUv1 page table.
+ */
+ return amd_iommu_v2_present && !amd_iommu_snp_en;
}
EXPORT_SYMBOL(amd_iommu_v2_supported);
@@ -3363,3 +3597,41 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+int amd_iommu_snp_enable(void)
+{
+ /*
+ * The SNP support requires that IOMMU must be enabled, and is
+ * not configured in the passthrough mode.
+ */
+ if (no_iommu || iommu_default_passthrough()) {
+ pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported");
+ return -EINVAL;
+ }
+
+ /*
+ * Prevent enabling SNP after IOMMU_ENABLED state because this process
+ * affect how IOMMU driver sets up data structures and configures
+ * IOMMU hardware.
+ */
+ if (init_state > IOMMU_ENABLED) {
+ pr_err("SNP: Too late to enable SNP for IOMMU.\n");
+ return -EINVAL;
+ }
+
+ amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
+ if (!amd_iommu_snp_en)
+ return -EINVAL;
+
+ pr_info("SNP enabled\n");
+
+ /* Enforce IOMMU v1 pagetable when SNP is enabled. */
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 6608d1717574..7d4b61e5db47 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -258,7 +258,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
__npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
/* pte could have been changed somewhere. */
- if (cmpxchg64(pte, __pte, __npte) != __pte)
+ if (!try_cmpxchg64(pte, &__pte, __npte))
free_page((unsigned long)page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -341,10 +341,8 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
u64 *pt;
int mode;
- while (cmpxchg64(pte, pteval, 0) != pteval) {
+ while (!try_cmpxchg64(pte, &pteval, 0))
pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
- pteval = *pte;
- }
if (!IOMMU_PTE_PRESENT(pteval))
return;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 840831d5d2ad..65b8e4fd8217 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -62,9 +62,6 @@
static DEFINE_SPINLOCK(pd_bitmap_lock);
-/* List of all available dev_data structures */
-static LLIST_HEAD(dev_data_list);
-
LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
@@ -95,13 +92,6 @@ static void detach_device(struct device *dev);
*
****************************************************************************/
-static inline u16 get_pci_device_id(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- return pci_dev_id(pdev);
-}
-
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
@@ -122,16 +112,74 @@ static inline int get_acpihid_device_id(struct device *dev,
return -EINVAL;
}
-static inline int get_device_id(struct device *dev)
+static inline int get_device_sbdf_id(struct device *dev)
{
- int devid;
+ int sbdf;
if (dev_is_pci(dev))
- devid = get_pci_device_id(dev);
+ sbdf = get_pci_sbdf_id(to_pci_dev(dev));
else
- devid = get_acpihid_device_id(dev, NULL);
+ sbdf = get_acpihid_device_id(dev, NULL);
+
+ return sbdf;
+}
+
+struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
+{
+ struct dev_table_entry *dev_table;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ BUG_ON(pci_seg == NULL);
+ dev_table = pci_seg->dev_table;
+ BUG_ON(dev_table == NULL);
+
+ return dev_table;
+}
+
+static inline u16 get_device_segment(struct device *dev)
+{
+ u16 seg;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ seg = pci_domain_nr(pdev->bus);
+ } else {
+ u32 devid = get_acpihid_device_id(dev, NULL);
+
+ seg = PCI_SBDF_TO_SEGID(devid);
+ }
+
+ return seg;
+}
+
+/* Writes the specific IOMMU for a device into the PCI segment rlookup table */
+void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->rlookup_table[devid] = iommu;
+}
+
+static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id == seg)
+ return pci_seg->rlookup_table[devid];
+ }
+ return NULL;
+}
- return devid;
+static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
+{
+ u16 seg = get_device_segment(dev);
+ int devid = get_device_sbdf_id(dev);
+
+ if (devid < 0)
+ return NULL;
+ return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
}
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
@@ -139,9 +187,10 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static struct iommu_dev_data *alloc_dev_data(u16 devid)
+static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
@@ -151,19 +200,20 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
dev_data->devid = devid;
ratelimit_default_init(&dev_data->rs);
- llist_add(&dev_data->dev_data_list, &dev_data_list);
+ llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list);
return dev_data;
}
-static struct iommu_dev_data *search_dev_data(u16 devid)
+static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
struct llist_node *node;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- if (llist_empty(&dev_data_list))
+ if (llist_empty(&pci_seg->dev_data_list))
return NULL;
- node = dev_data_list.first;
+ node = pci_seg->dev_data_list.first;
llist_for_each_entry(dev_data, node, dev_data_list) {
if (dev_data->devid == devid)
return dev_data;
@@ -174,67 +224,74 @@ static struct iommu_dev_data *search_dev_data(u16 devid)
static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
+ struct amd_iommu *iommu;
+ struct dev_table_entry *dev_table;
u16 devid = pci_dev_id(pdev);
if (devid == alias)
return 0;
- amd_iommu_rlookup_table[alias] =
- amd_iommu_rlookup_table[devid];
- memcpy(amd_iommu_dev_table[alias].data,
- amd_iommu_dev_table[devid].data,
- sizeof(amd_iommu_dev_table[alias].data));
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return 0;
+
+ amd_iommu_set_rlookup_table(iommu, alias);
+ dev_table = get_dev_table(iommu);
+ memcpy(dev_table[alias].data,
+ dev_table[devid].data,
+ sizeof(dev_table[alias].data));
return 0;
}
-static void clone_aliases(struct pci_dev *pdev)
+static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
{
- if (!pdev)
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(dev))
return;
+ pdev = to_pci_dev(dev);
/*
* The IVRS alias stored in the alias table may not be
* part of the PCI DMA aliases if it's bus differs
* from the original device.
*/
- clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
+ clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
pci_for_each_dma_alias(pdev, clone_alias, NULL);
}
-static struct pci_dev *setup_aliases(struct device *dev)
+static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u16 ivrs_alias;
/* For ACPI HID devices, there are no aliases */
if (!dev_is_pci(dev))
- return NULL;
+ return;
/*
* Add the IVRS alias to the pci aliases if it is on the same
* bus. The IVRS table may know about a quirk that we don't.
*/
- ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+ ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
if (ivrs_alias != pci_dev_id(pdev) &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
- clone_aliases(pdev);
-
- return pdev;
+ clone_aliases(iommu, dev);
}
-static struct iommu_dev_data *find_dev_data(u16 devid)
+static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
- dev_data = search_dev_data(devid);
+ dev_data = search_dev_data(iommu, devid);
if (dev_data == NULL) {
- dev_data = alloc_dev_data(devid);
+ dev_data = alloc_dev_data(iommu, devid);
if (!dev_data)
return NULL;
@@ -296,42 +353,49 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
*/
static bool check_device(struct device *dev)
{
- int devid;
+ struct amd_iommu_pci_seg *pci_seg;
+ struct amd_iommu *iommu;
+ int devid, sbdf;
if (!dev)
return false;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
return false;
+ devid = PCI_SBDF_TO_DEVID(sbdf);
- /* Out of our scope? */
- if (devid > amd_iommu_last_bdf)
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
return false;
- if (amd_iommu_rlookup_table[devid] == NULL)
+ /* Out of our scope? */
+ pci_seg = iommu->pci_seg;
+ if (devid > pci_seg->last_bdf)
return false;
return true;
}
-static int iommu_init_device(struct device *dev)
+static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
{
struct iommu_dev_data *dev_data;
- int devid;
+ int devid, sbdf;
if (dev_iommu_priv_get(dev))
return 0;
- devid = get_device_id(dev);
- if (devid < 0)
- return devid;
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return sbdf;
- dev_data = find_dev_data(devid);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ dev_data = find_dev_data(iommu, devid);
if (!dev_data)
return -ENOMEM;
- dev_data->pdev = setup_aliases(dev);
+ dev_data->dev = dev;
+ setup_aliases(iommu, dev);
/*
* By default we use passthrough mode for IOMMUv2 capable device.
@@ -341,9 +405,6 @@ static int iommu_init_device(struct device *dev)
*/
if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
- struct amd_iommu *iommu;
-
- iommu = amd_iommu_rlookup_table[dev_data->devid];
dev_data->iommu_v2 = iommu->is_iommu_v2;
}
@@ -352,18 +413,21 @@ static int iommu_init_device(struct device *dev)
return 0;
}
-static void iommu_ignore_device(struct device *dev)
+static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
{
- int devid;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+ int devid, sbdf;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
return;
- amd_iommu_rlookup_table[devid] = NULL;
- memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ pci_seg->rlookup_table[devid] = NULL;
+ memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
- setup_aliases(dev);
+ setup_aliases(iommu, dev);
}
static void amd_iommu_uninit_device(struct device *dev)
@@ -391,13 +455,13 @@ static void amd_iommu_uninit_device(struct device *dev)
*
****************************************************************************/
-static void dump_dte_entry(u16 devid)
+static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
{
int i;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
for (i = 0; i < 4; ++i)
- pr_err("DTE[%d]: %016llx\n", i,
- amd_iommu_dev_table[devid].data[i]);
+ pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
}
static void dump_command(unsigned long phys_addr)
@@ -409,7 +473,7 @@ static void dump_command(unsigned long phys_addr)
pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
}
-static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
+static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
{
struct iommu_dev_data *dev_data = NULL;
int devid, vmg_tag, flags;
@@ -421,7 +485,7 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -432,8 +496,8 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
vmg_tag, spa, flags);
}
} else {
- pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
vmg_tag, spa, flags);
}
@@ -441,7 +505,7 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
pci_dev_put(pdev);
}
-static void amd_iommu_report_rmp_fault(volatile u32 *event)
+static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
{
struct iommu_dev_data *dev_data = NULL;
int devid, flags_rmp, vmg_tag, flags;
@@ -454,7 +518,7 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
gpa = ((u64)event[3] << 32) | event[2];
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -465,8 +529,8 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
vmg_tag, gpa, flags_rmp, flags);
}
} else {
- pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
vmg_tag, gpa, flags_rmp, flags);
}
@@ -480,13 +544,14 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
#define IS_WRITE_REQUEST(flags) \
((flags) & EVENT_FLAG_RW)
-static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
+static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
+ u16 devid, u16 domain_id,
u64 address, int flags)
{
struct iommu_dev_data *dev_data = NULL;
struct pci_dev *pdev;
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -511,8 +576,8 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
domain_id, address, flags);
}
} else {
- pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
domain_id, address, flags);
}
@@ -549,26 +614,26 @@ retry:
}
if (type == EVENT_TYPE_IO_FAULT) {
- amd_iommu_report_page_fault(devid, pasid, address, flags);
+ amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
return;
}
switch (type) {
case EVENT_TYPE_ILL_DEV:
- dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
- dump_dte_entry(devid);
+ dump_dte_entry(iommu, devid);
break;
case EVENT_TYPE_DEV_TAB_ERR:
- dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+ dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
"address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
case EVENT_TYPE_ILL_CMD:
@@ -580,26 +645,26 @@ retry:
address, flags);
break;
case EVENT_TYPE_IOTLB_INV_TO:
- dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address);
break;
case EVENT_TYPE_INV_DEV_REQ:
- dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
case EVENT_TYPE_RMP_FAULT:
- amd_iommu_report_rmp_fault(event);
+ amd_iommu_report_rmp_fault(iommu, event);
break;
case EVENT_TYPE_RMP_HW_ERR:
- amd_iommu_report_rmp_hw_error(event);
+ amd_iommu_report_rmp_hw_error(iommu, event);
break;
case EVENT_TYPE_INV_PPR_REQ:
pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
- dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags, tag);
break;
default:
@@ -636,7 +701,7 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
fault.address = raw[1];
fault.pasid = PPR_PASID(raw[0]);
- fault.device_id = PPR_DEVID(raw[0]);
+ fault.sbdf = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0]));
fault.tag = PPR_TAG(raw[0]);
fault.flags = PPR_FLAGS(raw[0]);
@@ -1125,8 +1190,9 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
{
u32 devid;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (devid = 0; devid <= 0xffff; ++devid)
+ for (devid = 0; devid <= last_bdf; ++devid)
iommu_flush_dte(iommu, devid);
iommu_completion_wait(iommu);
@@ -1139,8 +1205,9 @@ static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
{
u32 dom_id;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
+ for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
dom_id, 1);
@@ -1183,8 +1250,9 @@ static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
{
u32 devid;
+ u16 last_bdf = iommu->pci_seg->last_bdf;
- for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
+ for (devid = 0; devid <= last_bdf; devid++)
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -1212,7 +1280,9 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
int qdep;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
@@ -1232,20 +1302,28 @@ static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
static int device_flush_dte(struct iommu_dev_data *dev_data)
{
struct amd_iommu *iommu;
+ struct pci_dev *pdev = NULL;
+ struct amd_iommu_pci_seg *pci_seg;
u16 alias;
int ret;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return -EINVAL;
- if (dev_data->pdev)
- ret = pci_for_each_dma_alias(dev_data->pdev,
+ if (dev_is_pci(dev_data->dev))
+ pdev = to_pci_dev(dev_data->dev);
+
+ if (pdev)
+ ret = pci_for_each_dma_alias(pdev,
device_flush_dte_alias, iommu);
else
ret = iommu_flush_dte(iommu, dev_data->devid);
if (ret)
return ret;
- alias = amd_iommu_alias_table[dev_data->devid];
+ pci_seg = iommu->pci_seg;
+ alias = pci_seg->alias_table[dev_data->devid];
if (alias != dev_data->devid) {
ret = iommu_flush_dte(iommu, alias);
if (ret)
@@ -1461,28 +1539,35 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-static void set_dte_entry(u16 devid, struct protection_domain *domain,
- bool ats, bool ppr)
+static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
+ struct protection_domain *domain, bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
if (domain->iop.mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->iop.root);
pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
- pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
- flags = amd_iommu_dev_table[devid].data[1];
+ pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+
+ /*
+ * When SNP is enabled, Only set TV bit when IOMMU
+ * page translation is in use.
+ */
+ if (!amd_iommu_snp_en || (domain->id != 0))
+ pte_root |= DTE_FLAG_TV;
+
+ flags = dev_table[devid].data[1];
if (ats)
flags |= DTE_FLAG_IOTLB;
if (ppr) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
if (iommu_feature(iommu, FEATURE_EPHSUP))
pte_root |= 1ULL << DEV_ENTRY_PPR;
}
@@ -1516,9 +1601,9 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
flags &= ~DEV_DOMID_MASK;
flags |= domain->id;
- old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
- amd_iommu_dev_table[devid].data[1] = flags;
- amd_iommu_dev_table[devid].data[0] = pte_root;
+ old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
+ dev_table[devid].data[1] = flags;
+ dev_table[devid].data[0] = pte_root;
/*
* A kdump kernel might be replacing a domain ID that was copied from
@@ -1526,19 +1611,23 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
* entries for the old domain ID that is being overwritten
*/
if (old_domid) {
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
amd_iommu_flush_tlb_domid(iommu, old_domid);
}
}
-static void clear_dte_entry(u16 devid)
+static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
{
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
+
/* remove entry from the device table seen by the hardware */
- amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV;
- amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
+ dev_table[devid].data[0] = DTE_FLAG_V;
+
+ if (!amd_iommu_snp_en)
+ dev_table[devid].data[0] |= DTE_FLAG_TV;
+
+ dev_table[devid].data[1] &= DTE_FLAG_MASK;
- amd_iommu_apply_erratum_63(devid);
+ amd_iommu_apply_erratum_63(iommu, devid);
}
static void do_attach(struct iommu_dev_data *dev_data,
@@ -1547,7 +1636,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct amd_iommu *iommu;
bool ats;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -1559,9 +1650,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- set_dte_entry(dev_data->devid, domain,
+ set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
- clone_aliases(dev_data->pdev);
+ clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
}
@@ -1571,13 +1662,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ return;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
- clear_dte_entry(dev_data->devid);
- clone_aliases(dev_data->pdev);
+ clear_dte_entry(iommu, dev_data->devid);
+ clone_aliases(iommu, dev_data->dev);
/* Flush the DTE entry */
device_flush_dte(dev_data);
@@ -1749,23 +1842,24 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
- int ret, devid;
+ int ret;
if (!check_device(dev))
return ERR_PTR(-ENODEV);
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
- ret = iommu_init_device(dev);
+ ret = iommu_init_device(iommu, dev);
if (ret) {
if (ret != -ENOTSUPP)
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_dev = ERR_PTR(ret);
- iommu_ignore_device(dev);
+ iommu_ignore_device(iommu, dev);
} else {
amd_iommu_set_pci_msi_domain(dev, iommu);
iommu_dev = &iommu->iommu;
@@ -1785,13 +1879,14 @@ static void amd_iommu_probe_finalize(struct device *dev)
static void amd_iommu_release_device(struct device *dev)
{
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
return;
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
+ return;
amd_iommu_uninit_device(dev);
iommu_completion_wait(iommu);
@@ -1816,9 +1911,13 @@ static void update_device_table(struct protection_domain *domain)
struct iommu_dev_data *dev_data;
list_for_each_entry(dev_data, &domain->dev_list, list) {
- set_dte_entry(dev_data->devid, domain,
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ if (!iommu)
+ continue;
+ set_dte_entry(iommu, dev_data->devid, domain,
dev_data->ats.enabled, dev_data->iommu_v2);
- clone_aliases(dev_data->pdev);
+ clone_aliases(iommu, dev_data->dev);
}
}
@@ -1969,6 +2068,13 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *domain;
+ /*
+ * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
+ * default to use IOMMU_DOMAIN_DMA[_FQ].
+ */
+ if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
+ return NULL;
+
domain = protection_domain_alloc(type);
if (!domain)
return NULL;
@@ -2004,7 +2110,6 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
- int devid = get_device_id(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
@@ -2013,7 +2118,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
if (dev_data->domain != NULL)
detach_device(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return;
@@ -2040,7 +2145,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
dev_data = dev_iommu_priv_get(dev);
dev_data->defer_attach = false;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(dev);
if (!iommu)
return -EINVAL;
@@ -2169,13 +2274,21 @@ static void amd_iommu_get_resv_regions(struct device *dev,
{
struct iommu_resv_region *region;
struct unity_map_entry *entry;
- int devid;
+ struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
+ int devid, sbdf;
- devid = get_device_id(dev);
- if (devid < 0)
+ sbdf = get_device_sbdf_id(dev);
+ if (sbdf < 0)
+ return;
+
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ iommu = rlookup_amd_iommu(dev);
+ if (!iommu)
return;
+ pci_seg = iommu->pci_seg;
- list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+ list_for_each_entry(entry, &pci_seg->unity_map, list) {
int type, prot = 0;
size_t length;
@@ -2280,7 +2393,6 @@ const struct iommu_ops amd_iommu_ops = {
.probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.get_resv_regions = amd_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.def_domain_type = amd_iommu_def_domain_type,
@@ -2419,8 +2531,9 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
continue;
qdep = dev_data->ats.qdep;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
-
+ iommu = rlookup_amd_iommu(dev_data->dev);
+ if (!iommu)
+ continue;
build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
qdep, address, size);
@@ -2582,7 +2695,9 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
struct iommu_cmd cmd;
dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = rlookup_amd_iommu(&pdev->dev);
+ if (!iommu)
+ return -ENODEV;
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
tag, dev_data->pri_tlp);
@@ -2644,30 +2759,35 @@ EXPORT_SYMBOL(amd_iommu_device_info);
static struct irq_chip amd_ir_chip;
static DEFINE_SPINLOCK(iommu_table_lock);
-static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
+static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
+ struct irq_remap_table *table)
{
u64 dte;
+ struct dev_table_entry *dev_table = get_dev_table(iommu);
- dte = amd_iommu_dev_table[devid].data[2];
+ dte = dev_table[devid].data[2];
dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
dte |= iommu_virt_to_phys(table->table);
dte |= DTE_IRQ_REMAP_INTCTL;
dte |= DTE_INTTABLEN;
dte |= DTE_IRQ_REMAP_ENABLE;
- amd_iommu_dev_table[devid].data[2] = dte;
+ dev_table[devid].data[2] = dte;
}
-static struct irq_remap_table *get_irq_table(u16 devid)
+static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
{
struct irq_remap_table *table;
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
- "%s: no iommu for devid %x\n", __func__, devid))
+ if (WARN_ONCE(!pci_seg->rlookup_table[devid],
+ "%s: no iommu for devid %x:%x\n",
+ __func__, pci_seg->id, devid))
return NULL;
- table = irq_lookup_table[devid];
- if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
+ table = pci_seg->irq_lookup_table[devid];
+ if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
+ __func__, pci_seg->id, devid))
return NULL;
return table;
@@ -2700,8 +2820,10 @@ static struct irq_remap_table *__alloc_irq_table(void)
static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
- irq_lookup_table[devid] = table;
- set_dte_irq_entry(devid, table);
+ struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
+
+ pci_seg->irq_lookup_table[devid] = table;
+ set_dte_irq_entry(iommu, devid, table);
iommu_flush_dte(iommu, devid);
}
@@ -2709,35 +2831,38 @@ static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
void *data)
{
struct irq_remap_table *table = data;
+ struct amd_iommu_pci_seg *pci_seg;
+ struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
- irq_lookup_table[alias] = table;
- set_dte_irq_entry(alias, table);
+ if (!iommu)
+ return -EINVAL;
- iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
+ pci_seg = iommu->pci_seg;
+ pci_seg->irq_lookup_table[alias] = table;
+ set_dte_irq_entry(iommu, alias, table);
+ iommu_flush_dte(pci_seg->rlookup_table[alias], alias);
return 0;
}
-static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
+static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
+ u16 devid, struct pci_dev *pdev)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
- struct amd_iommu *iommu;
+ struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
u16 alias;
spin_lock_irqsave(&iommu_table_lock, flags);
- iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- goto out_unlock;
-
- table = irq_lookup_table[devid];
+ pci_seg = iommu->pci_seg;
+ table = pci_seg->irq_lookup_table[devid];
if (table)
goto out_unlock;
- alias = amd_iommu_alias_table[devid];
- table = irq_lookup_table[alias];
+ alias = pci_seg->alias_table[devid];
+ table = pci_seg->irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
goto out_wait;
@@ -2751,11 +2876,11 @@ static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
spin_lock_irqsave(&iommu_table_lock, flags);
- table = irq_lookup_table[devid];
+ table = pci_seg->irq_lookup_table[devid];
if (table)
goto out_unlock;
- table = irq_lookup_table[alias];
+ table = pci_seg->irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
goto out_wait;
@@ -2786,18 +2911,14 @@ out_unlock:
return table;
}
-static int alloc_irq_index(u16 devid, int count, bool align,
- struct pci_dev *pdev)
+static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
+ bool align, struct pci_dev *pdev)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
unsigned long flags;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- return -ENODEV;
-
- table = alloc_irq_table(devid, pdev);
+ table = alloc_irq_table(iommu, devid, pdev);
if (!table)
return -ENODEV;
@@ -2836,20 +2957,15 @@ out:
return index;
}
-static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
- struct amd_ir_data *data)
+static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte, struct amd_ir_data *data)
{
bool ret;
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
struct irte_ga *entry;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return -EINVAL;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENOMEM;
@@ -2880,17 +2996,13 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
return 0;
}
-static int modify_irte(u16 devid, int index, union irte *irte)
+static int modify_irte(struct amd_iommu *iommu,
+ u16 devid, int index, union irte *irte)
{
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return -EINVAL;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENOMEM;
@@ -2904,17 +3016,12 @@ static int modify_irte(u16 devid, int index, union irte *irte)
return 0;
}
-static void free_irte(u16 devid, int index)
+static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
{
struct irq_remap_table *table;
- struct amd_iommu *iommu;
unsigned long flags;
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu == NULL)
- return;
-
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return;
@@ -2956,49 +3063,49 @@ static void irte_ga_prepare(void *entry,
irte->lo.fields_remap.valid = 1;
}
-static void irte_activate(void *entry, u16 devid, u16 index)
+static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
union irte *irte = (union irte *) entry;
irte->fields.valid = 1;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_activate(void *entry, u16 devid, u16 index)
+static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 1;
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
-static void irte_deactivate(void *entry, u16 devid, u16 index)
+static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
union irte *irte = (union irte *) entry;
irte->fields.valid = 0;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
+static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 0;
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
-static void irte_set_affinity(void *entry, u16 devid, u16 index,
+static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
union irte *irte = (union irte *) entry;
irte->fields.vector = vector;
irte->fields.destination = dest_apicid;
- modify_irte(devid, index, irte);
+ modify_irte(iommu, devid, index, irte);
}
-static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
+static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
@@ -3009,7 +3116,7 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
APICID_TO_IRTE_DEST_LO(dest_apicid);
irte->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(dest_apicid);
- modify_irte_ga(devid, index, irte, NULL);
+ modify_irte_ga(iommu, devid, index, irte, NULL);
}
}
@@ -3068,7 +3175,7 @@ static int get_devid(struct irq_alloc_info *info)
return get_hpet_devid(info->devid);
case X86_IRQ_ALLOC_TYPE_PCI_MSI:
case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
- return get_device_id(msi_desc_to_dev(info->desc));
+ return get_device_sbdf_id(msi_desc_to_dev(info->desc));
default:
WARN_ON_ONCE(1);
return -1;
@@ -3097,7 +3204,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
int devid, int index, int sub_handle)
{
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+ struct amd_iommu *iommu = data->iommu;
if (!iommu)
return;
@@ -3148,8 +3255,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
struct amd_ir_data *data = NULL;
+ struct amd_iommu *iommu;
struct irq_cfg *cfg;
- int i, ret, devid;
+ int i, ret, devid, seg, sbdf;
int index;
if (!info)
@@ -3165,8 +3273,14 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
- devid = get_devid(info);
- if (devid < 0)
+ sbdf = get_devid(info);
+ if (sbdf < 0)
+ return -EINVAL;
+
+ seg = PCI_SBDF_TO_SEGID(sbdf);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ iommu = __rlookup_amd_iommu(seg, devid);
+ if (!iommu)
return -EINVAL;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
@@ -3175,9 +3289,8 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
struct irq_remap_table *table;
- struct amd_iommu *iommu;
- table = alloc_irq_table(devid, NULL);
+ table = alloc_irq_table(iommu, devid, NULL);
if (table) {
if (!table->min_index) {
/*
@@ -3185,7 +3298,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
* interrupts.
*/
table->min_index = 32;
- iommu = amd_iommu_rlookup_table[devid];
for (i = 0; i < 32; ++i)
iommu->irte_ops->set_allocated(table, i);
}
@@ -3198,10 +3310,10 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
- index = alloc_irq_index(devid, nr_irqs, align,
+ index = alloc_irq_index(iommu, devid, nr_irqs, align,
msi_desc_to_pci_dev(info->desc));
} else {
- index = alloc_irq_index(devid, nr_irqs, false, NULL);
+ index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
}
if (index < 0) {
@@ -3233,6 +3345,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
goto out_free_data;
}
+ data->iommu = iommu;
irq_data->hwirq = (devid << 16) + i;
irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip;
@@ -3249,7 +3362,7 @@ out_free_data:
kfree(irq_data->chip_data);
}
for (i = 0; i < nr_irqs; i++)
- free_irte(devid, index + i);
+ free_irte(iommu, devid, index + i);
out_free_parent:
irq_domain_free_irqs_common(domain, virq, nr_irqs);
return ret;
@@ -3268,7 +3381,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
if (irq_data && irq_data->chip_data) {
data = irq_data->chip_data;
irte_info = &data->irq_2_irte;
- free_irte(irte_info->devid, irte_info->index);
+ free_irte(data->iommu, irte_info->devid, irte_info->index);
kfree(data->entry);
kfree(data);
}
@@ -3286,13 +3399,13 @@ static int irq_remapping_activate(struct irq_domain *domain,
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = data->iommu;
struct irq_cfg *cfg = irqd_cfg(irq_data);
if (!iommu)
return 0;
- iommu->irte_ops->activate(data->entry, irte_info->devid,
+ iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
irte_info->index);
amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
return 0;
@@ -3303,10 +3416,10 @@ static void irq_remapping_deactivate(struct irq_domain *domain,
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = data->iommu;
if (iommu)
- iommu->irte_ops->deactivate(data->entry, irte_info->devid,
+ iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
irte_info->index);
}
@@ -3326,8 +3439,8 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
if (devid < 0)
return 0;
+ iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
- iommu = amd_iommu_rlookup_table[devid];
return iommu && iommu->ir_domain == d;
}
@@ -3361,7 +3474,7 @@ int amd_iommu_activate_guest_mode(void *data)
entry->hi.fields.vector = ir_data->ga_vector;
entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
- return modify_irte_ga(ir_data->irq_2_irte.devid,
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, ir_data);
}
EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
@@ -3391,7 +3504,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
entry->hi.fields.destination =
APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
- return modify_irte_ga(ir_data->irq_2_irte.devid,
+ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, ir_data);
}
EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
@@ -3399,12 +3512,16 @@ EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
{
int ret;
- struct amd_iommu *iommu;
struct amd_iommu_pi_data *pi_data = vcpu_info;
struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
struct amd_ir_data *ir_data = data->chip_data;
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
- struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
+ struct iommu_dev_data *dev_data;
+
+ if (ir_data->iommu == NULL)
+ return -EINVAL;
+
+ dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
/* Note:
* This device has never been set up for guest mode.
@@ -3426,10 +3543,6 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
pi_data->is_guest_mode = false;
}
- iommu = amd_iommu_rlookup_table[irte_info->devid];
- if (iommu == NULL)
- return -EINVAL;
-
pi_data->prev_ga_tag = ir_data->cached_ga_tag;
if (pi_data->is_guest_mode) {
ir_data->ga_root_ptr = (pi_data->base >> 12);
@@ -3463,7 +3576,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
* Atomically updates the IRTE with the new destination, vector
* and flushes the interrupt entry cache.
*/
- iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
+ iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
irte_info->index, cfg->vector,
cfg->dest_apicid);
}
@@ -3475,7 +3588,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct irq_cfg *cfg = irqd_cfg(data);
struct irq_data *parent = data->parent_data;
- struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct amd_iommu *iommu = ir_data->iommu;
int ret;
if (!iommu)
@@ -3545,11 +3658,11 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
!ref || !entry || !entry->lo.fields_vapic.guest_mode)
return 0;
- iommu = amd_iommu_rlookup_table[devid];
+ iommu = ir_data->iommu;
if (!iommu)
return -ENODEV;
- table = get_irq_table(devid);
+ table = get_irq_table(iommu, devid);
if (!table)
return -ENODEV;
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index afb3efd565b7..696d5555be57 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -51,7 +51,7 @@ struct pasid_state {
struct device_state {
struct list_head list;
- u16 devid;
+ u32 sbdf;
atomic_t count;
struct pci_dev *pdev;
struct pasid_state **states;
@@ -83,35 +83,25 @@ static struct workqueue_struct *iommu_wq;
static void free_pasid_states(struct device_state *dev_state);
-static u16 device_id(struct pci_dev *pdev)
-{
- u16 devid;
-
- devid = pdev->bus->number;
- devid = (devid << 8) | pdev->devfn;
-
- return devid;
-}
-
-static struct device_state *__get_device_state(u16 devid)
+static struct device_state *__get_device_state(u32 sbdf)
{
struct device_state *dev_state;
list_for_each_entry(dev_state, &state_list, list) {
- if (dev_state->devid == devid)
+ if (dev_state->sbdf == sbdf)
return dev_state;
}
return NULL;
}
-static struct device_state *get_device_state(u16 devid)
+static struct device_state *get_device_state(u32 sbdf)
{
struct device_state *dev_state;
unsigned long flags;
spin_lock_irqsave(&state_lock, flags);
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state != NULL)
atomic_inc(&dev_state->count);
spin_unlock_irqrestore(&state_lock, flags);
@@ -528,15 +518,16 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
unsigned long flags;
struct fault *fault;
bool finish;
- u16 tag, devid;
+ u16 tag, devid, seg_id;
int ret;
iommu_fault = data;
tag = iommu_fault->tag & 0x1ff;
finish = (iommu_fault->tag >> 9) & 1;
- devid = iommu_fault->device_id;
- pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ seg_id = PCI_SBDF_TO_SEGID(iommu_fault->sbdf);
+ devid = PCI_SBDF_TO_DEVID(iommu_fault->sbdf);
+ pdev = pci_get_domain_bus_and_slot(seg_id, PCI_BUS_NUM(devid),
devid & 0xff);
if (!pdev)
return -ENODEV;
@@ -550,7 +541,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
goto out;
}
- dev_state = get_device_state(iommu_fault->device_id);
+ dev_state = get_device_state(iommu_fault->sbdf);
if (dev_state == NULL)
goto out;
@@ -609,7 +600,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
struct pasid_state *pasid_state;
struct device_state *dev_state;
struct mm_struct *mm;
- u16 devid;
+ u32 sbdf;
int ret;
might_sleep();
@@ -617,8 +608,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
- dev_state = get_device_state(devid);
+ sbdf = get_pci_sbdf_id(pdev);
+ dev_state = get_device_state(sbdf);
if (dev_state == NULL)
return -EINVAL;
@@ -692,15 +683,15 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
- u16 devid;
+ u32 sbdf;
might_sleep();
if (!amd_iommu_v2_supported())
return;
- devid = device_id(pdev);
- dev_state = get_device_state(devid);
+ sbdf = get_pci_sbdf_id(pdev);
+ dev_state = get_device_state(sbdf);
if (dev_state == NULL)
return;
@@ -742,7 +733,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
struct iommu_group *group;
unsigned long flags;
int ret, tmp;
- u16 devid;
+ u32 sbdf;
might_sleep();
@@ -759,7 +750,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (pasids <= 0 || pasids > (PASID_MASK + 1))
return -EINVAL;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
if (dev_state == NULL)
@@ -768,7 +759,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_init(&dev_state->lock);
init_waitqueue_head(&dev_state->wq);
dev_state->pdev = pdev;
- dev_state->devid = devid;
+ dev_state->sbdf = sbdf;
tmp = pasids;
for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
@@ -806,7 +797,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_irqsave(&state_lock, flags);
- if (__get_device_state(devid) != NULL) {
+ if (__get_device_state(sbdf) != NULL) {
spin_unlock_irqrestore(&state_lock, flags);
ret = -EBUSY;
goto out_free_domain;
@@ -838,16 +829,16 @@ void amd_iommu_free_device(struct pci_dev *pdev)
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
if (!amd_iommu_v2_supported())
return;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL) {
spin_unlock_irqrestore(&state_lock, flags);
return;
@@ -867,18 +858,18 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
int ret;
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL)
goto out_unlock;
@@ -898,18 +889,18 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
{
struct device_state *dev_state;
unsigned long flags;
- u16 devid;
+ u32 sbdf;
int ret;
if (!amd_iommu_v2_supported())
return -ENODEV;
- devid = device_id(pdev);
+ sbdf = get_pci_sbdf_id(pdev);
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = __get_device_state(devid);
+ dev_state = __get_device_state(sbdf);
if (dev_state == NULL)
goto out_unlock;
diff --git a/drivers/iommu/amd/quirks.c b/drivers/iommu/amd/quirks.c
index 5120ce4fdce3..79dbb8f33b47 100644
--- a/drivers/iommu/amd/quirks.c
+++ b/drivers/iommu/amd/quirks.c
@@ -15,7 +15,7 @@
struct ivrs_quirk_entry {
u8 id;
- u16 devid;
+ u32 devid;
};
enum {
@@ -49,7 +49,7 @@ static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
const struct ivrs_quirk_entry *i;
for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
- add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
+ add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u32 *)&i->devid, 0);
return 0;
}
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 8af0242a90d9..1b1725759262 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -564,9 +564,6 @@ static void apple_dart_release_device(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
- if (!cfg)
- return;
-
dev_iommu_priv_set(dev, NULL);
kfree(cfg);
}
@@ -771,7 +768,6 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.of_xlate = apple_dart_of_xlate,
.def_domain_type = apple_dart_def_domain_type,
.get_resv_regions = apple_dart_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 88817a3376ef..d32b02336411 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1380,12 +1380,21 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
-static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent)
+static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
{
unsigned int i;
+ u64 val = STRTAB_STE_0_V;
+
+ if (disable_bypass && !force)
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
+ else
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- arm_smmu_write_strtab_ent(NULL, -1, strtab);
+ strtab[0] = cpu_to_le64(val);
+ strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+ strtab[2] = 0;
strtab += STRTAB_STE_DWORDS;
}
}
@@ -1413,7 +1422,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
+ arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -2537,6 +2546,19 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
return sid < limit;
}
+static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
+{
+ /* Check the SIDs are in range of the SMMU and our stream table */
+ if (!arm_smmu_sid_in_range(smmu, sid))
+ return -ERANGE;
+
+ /* Ensure l2 strtab is initialised */
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
+ return arm_smmu_init_l2_strtab(smmu, sid);
+
+ return 0;
+}
+
static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
struct arm_smmu_master *master)
{
@@ -2560,20 +2582,9 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
new_stream->id = sid;
new_stream->master = master;
- /*
- * Check the SIDs are in range of the SMMU and our stream table
- */
- if (!arm_smmu_sid_in_range(smmu, sid)) {
- ret = -ERANGE;
+ ret = arm_smmu_init_sid_strtab(smmu, sid);
+ if (ret)
break;
- }
-
- /* Ensure l2 strtab is initialised */
- if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- ret = arm_smmu_init_l2_strtab(smmu, sid);
- if (ret)
- break;
- }
/* Insert into SID tree */
new_node = &(smmu->streams.rb_node);
@@ -2691,20 +2702,14 @@ err_free_master:
static void arm_smmu_release_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master *master;
-
- if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- master = dev_iommu_priv_get(dev);
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
kfree(master);
- iommu_fwspec_free(dev);
}
static struct iommu_group *arm_smmu_device_group(struct device *dev)
@@ -2760,58 +2765,27 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static bool arm_smmu_dev_has_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return false;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return arm_smmu_master_iopf_supported(master);
- case IOMMU_DEV_FEAT_SVA:
- return arm_smmu_master_sva_supported(master);
- default:
- return false;
- }
-}
-
-static bool arm_smmu_dev_feature_enabled(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return false;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return master->iopf_enabled;
- case IOMMU_DEV_FEAT_SVA:
- return arm_smmu_master_sva_enabled(master);
- default:
- return false;
- }
-}
-
static int arm_smmu_dev_enable_feature(struct device *dev,
enum iommu_dev_features feat)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (!arm_smmu_dev_has_feature(dev, feat))
+ if (!master)
return -ENODEV;
- if (arm_smmu_dev_feature_enabled(dev, feat))
- return -EBUSY;
-
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
+ if (!arm_smmu_master_iopf_supported(master))
+ return -EINVAL;
+ if (master->iopf_enabled)
+ return -EBUSY;
master->iopf_enabled = true;
return 0;
case IOMMU_DEV_FEAT_SVA:
+ if (!arm_smmu_master_sva_supported(master))
+ return -EINVAL;
+ if (arm_smmu_master_sva_enabled(master))
+ return -EBUSY;
return arm_smmu_master_enable_sva(master);
default:
return -EINVAL;
@@ -2823,16 +2797,20 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (!arm_smmu_dev_feature_enabled(dev, feat))
+ if (!master)
return -EINVAL;
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
+ if (!master->iopf_enabled)
+ return -EINVAL;
if (master->sva_enabled)
return -EBUSY;
master->iopf_enabled = false;
return 0;
case IOMMU_DEV_FEAT_SVA:
+ if (!arm_smmu_master_sva_enabled(master))
+ return -EINVAL;
return arm_smmu_master_disable_sva(master);
default:
return -EINVAL;
@@ -2847,9 +2825,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
- .dev_has_feat = arm_smmu_dev_has_feature,
- .dev_feat_enabled = arm_smmu_dev_feature_enabled,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
.sva_bind = arm_smmu_sva_bind,
@@ -3049,7 +3024,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
+ arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
return 0;
}
@@ -3743,6 +3718,36 @@ static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
return devm_ioremap_resource(dev, &res);
}
+static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
+{
+ struct list_head rmr_list;
+ struct iommu_resv_region *e;
+
+ INIT_LIST_HEAD(&rmr_list);
+ iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+
+ list_for_each_entry(e, &rmr_list, list) {
+ __le64 *step;
+ struct iommu_iort_rmr_data *rmr;
+ int ret, i;
+
+ rmr = container_of(e, struct iommu_iort_rmr_data, rr);
+ for (i = 0; i < rmr->num_sids; i++) {
+ ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]);
+ if (ret) {
+ dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n",
+ rmr->sids[i]);
+ continue;
+ }
+
+ step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
+ arm_smmu_init_bypass_stes(step, 1, true);
+ }
+ }
+
+ iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
@@ -3826,6 +3831,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Record our private device structure */
platform_set_drvdata(pdev, smmu);
+ /* Check for RMRs and install bypass STEs if any */
+ arm_smmu_rmr_install_bypass_ste(smmu);
+
/* Reset the device */
ret = arm_smmu_device_reset(smmu, bypass);
if (ret)
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
index b0cc01aa20c9..2a5a95e8e3f9 100644
--- a/drivers/iommu/arm/arm-smmu/Makefile
+++ b/drivers/iommu/arm/arm-smmu/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM_DEBUG) += arm-smmu-qcom-debug.o
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
new file mode 100644
index 000000000000..6eed8e67a0ca
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/of_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/ratelimit.h>
+
+#include "arm-smmu.h"
+#include "arm-smmu-qcom.h"
+
+enum qcom_smmu_impl_reg_offset {
+ QCOM_SMMU_TBU_PWR_STATUS,
+ QCOM_SMMU_STATS_SYNC_INV_TBU_ACK,
+ QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR,
+};
+
+struct qcom_smmu_config {
+ const u32 *reg_offset;
+};
+
+void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
+{
+ int ret;
+ u32 tbu_pwr_status, sync_inv_ack, sync_inv_progress;
+ struct qcom_smmu *qsmmu = container_of(smmu, struct qcom_smmu, smmu);
+ const struct qcom_smmu_config *cfg;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ if (__ratelimit(&rs)) {
+ dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n");
+
+ cfg = qsmmu->cfg;
+ if (!cfg)
+ return;
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_TBU_PWR_STATUS],
+ &tbu_pwr_status);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TBU power status: %d\n", ret);
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK],
+ &sync_inv_ack);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TBU sync/inv ack status: %d\n", ret);
+
+ ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR],
+ &sync_inv_progress);
+ if (ret)
+ dev_err(smmu->dev,
+ "Failed to read TCU syn/inv progress: %d\n", ret);
+
+ dev_err(smmu->dev,
+ "TBU: power_status %#x sync_inv_ack %#x sync_inv_progress %#x\n",
+ tbu_pwr_status, sync_inv_ack, sync_inv_progress);
+ }
+}
+
+/* Implementation Defined Register Space 0 register offsets */
+static const u32 qcom_smmu_impl0_reg_offset[] = {
+ [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204,
+ [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc,
+ [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670,
+};
+
+static const struct qcom_smmu_config qcm2290_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc7180_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc7280_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc8180x_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sc8280xp_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm6125_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm6350_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8150_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8250_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8350_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct qcom_smmu_config sm8450_smmu_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+static const struct of_device_id __maybe_unused qcom_smmu_impl_debug_match[] = {
+ { .compatible = "qcom,msm8998-smmu-v2" },
+ { .compatible = "qcom,qcm2290-smmu-500", .data = &qcm2290_smmu_cfg },
+ { .compatible = "qcom,sc7180-smmu-500", .data = &sc7180_smmu_cfg },
+ { .compatible = "qcom,sc7280-smmu-500", .data = &sc7280_smmu_cfg},
+ { .compatible = "qcom,sc8180x-smmu-500", .data = &sc8180x_smmu_cfg },
+ { .compatible = "qcom,sc8280xp-smmu-500", .data = &sc8280xp_smmu_cfg },
+ { .compatible = "qcom,sdm630-smmu-v2" },
+ { .compatible = "qcom,sdm845-smmu-500" },
+ { .compatible = "qcom,sm6125-smmu-500", .data = &sm6125_smmu_cfg},
+ { .compatible = "qcom,sm6350-smmu-500", .data = &sm6350_smmu_cfg},
+ { .compatible = "qcom,sm8150-smmu-500", .data = &sm8150_smmu_cfg },
+ { .compatible = "qcom,sm8250-smmu-500", .data = &sm8250_smmu_cfg },
+ { .compatible = "qcom,sm8350-smmu-500", .data = &sm8350_smmu_cfg },
+ { .compatible = "qcom,sm8450-smmu-500", .data = &sm8450_smmu_cfg },
+ { }
+};
+
+const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
+{
+ const struct of_device_id *match;
+ const struct device_node *np = smmu->dev->of_node;
+
+ match = of_match_node(qcom_smmu_impl_debug_match, np);
+ if (!match)
+ return NULL;
+
+ return match->data;
+}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 7820711c4560..b2708de25ea3 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -5,23 +5,40 @@
#include <linux/acpi.h>
#include <linux/adreno-smmu-priv.h>
+#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/qcom_scm.h>
#include "arm-smmu.h"
+#include "arm-smmu-qcom.h"
-struct qcom_smmu {
- struct arm_smmu_device smmu;
- bool bypass_quirk;
- u8 bypass_cbndx;
- u32 stall_enabled;
-};
+#define QCOM_DUMMY_VAL -1
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
{
return container_of(smmu, struct qcom_smmu, smmu);
}
+static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+ int sync, int status)
+{
+ unsigned int spin_cnt, delay;
+ u32 reg;
+
+ arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ reg = arm_smmu_readl(smmu, page, status);
+ if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
+ return;
+ cpu_relax();
+ }
+ udelay(delay);
+ }
+
+ qcom_smmu_tlb_sync_debug(smmu);
+}
+
static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
u32 reg)
{
@@ -233,6 +250,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7280-mdss" },
{ .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" },
+ { .compatible = "qcom,sm8250-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
{ }
@@ -374,6 +392,7 @@ static const struct arm_smmu_impl qcom_smmu_impl = {
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
+ .tlb_sync = qcom_smmu_tlb_sync,
};
static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
@@ -382,6 +401,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
.reset = qcom_smmu500_reset,
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
+ .tlb_sync = qcom_smmu_tlb_sync,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
@@ -398,6 +418,7 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
return ERR_PTR(-ENOMEM);
qsmmu->smmu.impl = impl;
+ qsmmu->cfg = qcom_smmu_impl_data(smmu);
return &qsmmu->smmu;
}
@@ -413,6 +434,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm6125-smmu-500" },
{ .compatible = "qcom,sm6350-smmu-500" },
+ { .compatible = "qcom,sm6375-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
{ .compatible = "qcom,sm8350-smmu-500" },
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
new file mode 100644
index 000000000000..99ec8f8629a0
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ARM_SMMU_QCOM_H
+#define _ARM_SMMU_QCOM_H
+
+struct qcom_smmu {
+ struct arm_smmu_device smmu;
+ const struct qcom_smmu_config *cfg;
+ bool bypass_quirk;
+ u8 bypass_cbndx;
+ u32 stall_enabled;
+};
+
+#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
+void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
+const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu);
+#else
+static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { }
+static inline const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _ARM_SMMU_QCOM_H */
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 2ed3594f384e..dfa82df00342 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1432,27 +1432,19 @@ out_free:
static void arm_smmu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master_cfg *cfg;
- struct arm_smmu_device *smmu;
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
int ret;
- if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return;
-
- cfg = dev_iommu_priv_get(dev);
- smmu = cfg->smmu;
-
- ret = arm_smmu_rpm_get(smmu);
+ ret = arm_smmu_rpm_get(cfg->smmu);
if (ret < 0)
return;
arm_smmu_master_free_smes(cfg, fwspec);
- arm_smmu_rpm_put(smmu);
+ arm_smmu_rpm_put(cfg->smmu);
dev_iommu_priv_set(dev, NULL);
kfree(cfg);
- iommu_fwspec_free(dev);
}
static void arm_smmu_probe_finalize(struct device *dev)
@@ -1592,7 +1584,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
@@ -2071,10 +2062,57 @@ err_reset_platform_ops: __maybe_unused;
return err;
}
+static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
+{
+ struct list_head rmr_list;
+ struct iommu_resv_region *e;
+ int idx, cnt = 0;
+ u32 reg;
+
+ INIT_LIST_HEAD(&rmr_list);
+ iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+
+ /*
+ * Rather than trying to look at existing mappings that
+ * are setup by the firmware and then invalidate the ones
+ * that do no have matching RMR entries, just disable the
+ * SMMU until it gets enabled again in the reset routine.
+ */
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
+ reg |= ARM_SMMU_sCR0_CLIENTPD;
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
+
+ list_for_each_entry(e, &rmr_list, list) {
+ struct iommu_iort_rmr_data *rmr;
+ int i;
+
+ rmr = container_of(e, struct iommu_iort_rmr_data, rr);
+ for (i = 0; i < rmr->num_sids; i++) {
+ idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
+ if (idx < 0)
+ continue;
+
+ if (smmu->s2crs[idx].count == 0) {
+ smmu->smrs[idx].id = rmr->sids[i];
+ smmu->smrs[idx].mask = 0;
+ smmu->smrs[idx].valid = true;
+ }
+ smmu->s2crs[idx].count++;
+ smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
+ smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
+
+ cnt++;
+ }
+ }
+
+ dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
+ cnt == 1 ? "" : "s");
+ iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
struct resource *res;
- resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
@@ -2098,7 +2136,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
- ioaddr = res->start;
+ smmu->ioaddr = res->start;
+
/*
* The resource size should effectively match the value of SMMU_TOP;
* stash that temporarily until we know PAGESIZE to validate it with.
@@ -2178,7 +2217,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
- "smmu.%pa", &ioaddr);
+ "smmu.%pa", &smmu->ioaddr);
if (err) {
dev_err(dev, "Failed to register iommu in sysfs\n");
return err;
@@ -2191,6 +2230,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, smmu);
+
+ /* Check for RMRs and install bypass SMRs if any */
+ arm_smmu_rmr_install_bypass_smr(smmu);
+
arm_smmu_device_reset(smmu);
arm_smmu_test_smr_masks(smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 2b9b42fb6f30..703fd5817ec1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -278,6 +278,7 @@ struct arm_smmu_device {
struct device *dev;
void __iomem *base;
+ phys_addr_t ioaddr;
unsigned int numpage;
unsigned int pgshift;
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 4c077c38fbd6..17235116d3bb 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -532,16 +532,6 @@ static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
return &qcom_iommu->iommu;
}
-static void qcom_iommu_release_device(struct device *dev)
-{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
-
- if (!qcom_iommu)
- return;
-
- iommu_fwspec_free(dev);
-}
-
static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
struct qcom_iommu_dev *qcom_iommu;
@@ -591,7 +581,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.capable = qcom_iommu_capable,
.domain_alloc = qcom_iommu_domain_alloc,
.probe_device = qcom_iommu_probe_device,
- .release_device = qcom_iommu_release_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
@@ -750,9 +739,12 @@ static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
{
struct device_node *child;
- for_each_child_of_node(qcom_iommu->dev->of_node, child)
- if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
+ for_each_child_of_node(qcom_iommu->dev->of_node, child) {
+ if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
+ of_node_put(child);
return true;
+ }
+ }
return false;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f90251572a5d..17dd683b2fce 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -21,6 +21,7 @@
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/list_sort.h>
+#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -64,6 +65,7 @@ struct iommu_dma_cookie {
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
+ struct mutex mutex;
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
@@ -310,6 +312,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
if (!domain->iova_cookie)
return -ENOMEM;
+ mutex_init(&domain->iova_cookie->mutex);
return 0;
}
@@ -385,7 +388,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
- iort_iommu_msi_get_resv_regions(dev, list);
+ iort_iommu_get_resv_regions(dev, list);
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
@@ -560,26 +563,33 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
/* start_pfn is always nonzero for an already-initialised domain */
+ mutex_lock(&cookie->mutex);
if (iovad->start_pfn) {
if (1UL << order != iovad->granule ||
base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n");
- return -EFAULT;
+ ret = -EFAULT;
+ goto done_unlock;
}
- return 0;
+ ret = 0;
+ goto done_unlock;
}
init_iova_domain(iovad, 1UL << order, base_pfn);
ret = iova_domain_init_rcaches(iovad);
if (ret)
- return ret;
+ goto done_unlock;
/* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
domain->type = IOMMU_DOMAIN_DMA;
- return iova_reserve_iommu_regions(dev, domain);
+ ret = iova_reserve_iommu_regions(dev, domain);
+
+done_unlock:
+ mutex_unlock(&cookie->mutex);
+ return ret;
}
/**
@@ -1053,15 +1063,30 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
+ dma_addr_t s_dma_addr = sg_dma_address(s);
unsigned int s_iova_off = sg_dma_address(s);
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
- s->offset += s_iova_off;
- s->length = s_length;
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
+ if (sg_is_dma_bus_address(s)) {
+ if (i > 0)
+ cur = sg_next(cur);
+
+ sg_dma_unmark_bus_address(s);
+ sg_dma_address(cur) = s_dma_addr;
+ sg_dma_len(cur) = s_length;
+ sg_dma_mark_bus_address(cur);
+ count++;
+ cur_len = 0;
+ continue;
+ }
+
+ s->offset += s_iova_off;
+ s->length = s_length;
+
/*
* Now fill in the real DMA data. If...
* - there is a valid output segment to append to
@@ -1102,10 +1127,14 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != DMA_MAPPING_ERROR)
- s->offset += sg_dma_address(s);
- if (sg_dma_len(s))
- s->length = sg_dma_len(s);
+ if (sg_is_dma_bus_address(s)) {
+ sg_dma_unmark_bus_address(s);
+ } else {
+ if (sg_dma_address(s) != DMA_MAPPING_ERROR)
+ s->offset += sg_dma_address(s);
+ if (sg_dma_len(s))
+ s->length = sg_dma_len(s);
+ }
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
@@ -1158,6 +1187,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ enum pci_p2pdma_map_type map;
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
@@ -1187,6 +1218,30 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
+ if (is_pci_p2pdma_page(sg_page(s))) {
+ map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
+ switch (map) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * iommu_map_sg() will skip this segment as
+ * it is marked as a bus address,
+ * __finalise_sg() will copy the dma address
+ * into the output segment.
+ */
+ continue;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * Mapping through host bridge should be
+ * mapped with regular IOVAs, thus we
+ * do nothing here and continue below.
+ */
+ break;
+ default:
+ ret = -EREMOTEIO;
+ goto out_restore_sg;
+ }
+ }
+
sg_dma_address(s) = s_iova_off;
sg_dma_len(s) = s_length;
s->offset -= s_iova_off;
@@ -1215,6 +1270,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
+ if (!iova_len)
+ return __finalise_sg(dev, sg, nents, 0);
+
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova) {
ret = -ENOMEM;
@@ -1236,7 +1294,7 @@ out_free_iova:
out_restore_sg:
__invalidate_sg(sg, nents);
out:
- if (ret != -ENOMEM)
+ if (ret != -ENOMEM && ret != -EREMOTEIO)
return -EINVAL;
return ret;
}
@@ -1244,7 +1302,7 @@ out:
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
- dma_addr_t start, end;
+ dma_addr_t end = 0, start;
struct scatterlist *tmp;
int i;
@@ -1258,16 +1316,37 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
/*
* The scatterlist segments are mapped into a single
- * contiguous IOVA allocation, so this is incredibly easy.
+ * contiguous IOVA allocation, the start and end points
+ * just have to be determined.
*/
- start = sg_dma_address(sg);
- for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+ for_each_sg(sg, tmp, nents, i) {
+ if (sg_is_dma_bus_address(tmp)) {
+ sg_dma_unmark_bus_address(tmp);
+ continue;
+ }
+
+ if (sg_dma_len(tmp) == 0)
+ break;
+
+ start = sg_dma_address(tmp);
+ break;
+ }
+
+ nents -= i;
+ for_each_sg(tmp, tmp, nents, i) {
+ if (sg_is_dma_bus_address(tmp)) {
+ sg_dma_unmark_bus_address(tmp);
+ continue;
+ }
+
if (sg_dma_len(tmp) == 0)
break;
- sg = tmp;
+
+ end = sg_dma_address(tmp) + sg_dma_len(tmp);
}
- end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(dev, start, end - start);
+
+ if (end)
+ __iommu_dma_unmap(dev, start, end - start);
}
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -1459,7 +1538,13 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
}
+static size_t iommu_dma_opt_mapping_size(void)
+{
+ return iova_rcache_range();
+}
+
static const struct dma_map_ops iommu_dma_ops = {
+ .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
@@ -1479,6 +1564,7 @@ static const struct dma_map_ops iommu_dma_ops = {
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary,
+ .opt_mapping_size = iommu_dma_opt_mapping_size,
};
/*
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 71f2018e23fe..8e18984a0c4f 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -135,6 +135,11 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
+#define CTRL_VM_ENABLE BIT(0)
+#define CTRL_VM_FAULT_MODE_STALL BIT(3)
+#define CAPA0_CAPA1_EXIST BIT(11)
+#define CAPA1_VCR_ENABLED BIT(14)
+
/* common registers */
#define REG_MMU_CTRL 0x000
#define REG_MMU_CFG 0x004
@@ -148,29 +153,20 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
/* v1.x - v3.x registers */
-#define REG_MMU_FLUSH 0x00C
-#define REG_MMU_FLUSH_ENTRY 0x010
-#define REG_PT_BASE_ADDR 0x014
-#define REG_INT_STATUS 0x018
-#define REG_INT_CLEAR 0x01C
-
#define REG_PAGE_FAULT_ADDR 0x024
#define REG_AW_FAULT_ADDR 0x028
#define REG_AR_FAULT_ADDR 0x02C
#define REG_DEFAULT_SLAVE_ADDR 0x030
/* v5.x registers */
-#define REG_V5_PT_BASE_PFN 0x00C
-#define REG_V5_MMU_FLUSH_ALL 0x010
-#define REG_V5_MMU_FLUSH_ENTRY 0x014
-#define REG_V5_MMU_FLUSH_RANGE 0x018
-#define REG_V5_MMU_FLUSH_START 0x020
-#define REG_V5_MMU_FLUSH_END 0x024
-#define REG_V5_INT_STATUS 0x060
-#define REG_V5_INT_CLEAR 0x064
#define REG_V5_FAULT_AR_VA 0x070
#define REG_V5_FAULT_AW_VA 0x080
+/* v7.x registers */
+#define REG_V7_CAPA0 0x870
+#define REG_V7_CAPA1 0x874
+#define REG_V7_CTRL_VM 0x8000
+
#define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
static struct device *dma_dev;
@@ -251,6 +247,21 @@ struct exynos_iommu_domain {
};
/*
+ * SysMMU version specific data. Contains offsets for the registers which can
+ * be found in different SysMMU variants, but have different offset values.
+ */
+struct sysmmu_variant {
+ u32 pt_base; /* page table base address (physical) */
+ u32 flush_all; /* invalidate all TLB entries */
+ u32 flush_entry; /* invalidate specific TLB entry */
+ u32 flush_range; /* invalidate TLB entries in specified range */
+ u32 flush_start; /* start address of range invalidation */
+ u32 flush_end; /* end address of range invalidation */
+ u32 int_status; /* interrupt status information */
+ u32 int_clear; /* clear the interrupt */
+};
+
+/*
* This structure hold all data of a single SYSMMU controller, this includes
* hw resources like registers and clocks, pointers and list nodes to connect
* it to all other structures, internal state and parameters read from device
@@ -274,6 +285,45 @@ struct sysmmu_drvdata {
unsigned int version; /* our version */
struct iommu_device iommu; /* IOMMU core handle */
+ const struct sysmmu_variant *variant; /* version specific data */
+
+ /* v7 fields */
+ bool has_vcr; /* virtual machine control register */
+};
+
+#define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
+
+/* SysMMU v1..v3 */
+static const struct sysmmu_variant sysmmu_v1_variant = {
+ .flush_all = 0x0c,
+ .flush_entry = 0x10,
+ .pt_base = 0x14,
+ .int_status = 0x18,
+ .int_clear = 0x1c,
+};
+
+/* SysMMU v5 and v7 (non-VM capable) */
+static const struct sysmmu_variant sysmmu_v5_variant = {
+ .pt_base = 0x0c,
+ .flush_all = 0x10,
+ .flush_entry = 0x14,
+ .flush_range = 0x18,
+ .flush_start = 0x20,
+ .flush_end = 0x24,
+ .int_status = 0x60,
+ .int_clear = 0x64,
+};
+
+/* SysMMU v7: VM capable register set */
+static const struct sysmmu_variant sysmmu_v7_vm_variant = {
+ .pt_base = 0x800c,
+ .flush_all = 0x8010,
+ .flush_entry = 0x8014,
+ .flush_range = 0x8018,
+ .flush_start = 0x8020,
+ .flush_end = 0x8024,
+ .int_status = 0x60,
+ .int_clear = 0x64,
};
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -304,10 +354,7 @@ static bool sysmmu_block(struct sysmmu_drvdata *data)
static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
{
- if (MMU_MAJ_VER(data->version) < 5)
- writel(0x1, data->sfrbase + REG_MMU_FLUSH);
- else
- writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
+ writel(0x1, SYSMMU_REG(data, flush_all));
}
static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -315,34 +362,30 @@ static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
{
unsigned int i;
- if (MMU_MAJ_VER(data->version) < 5) {
+ if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) {
for (i = 0; i < num_inv; i++) {
writel((iova & SPAGE_MASK) | 1,
- data->sfrbase + REG_MMU_FLUSH_ENTRY);
+ SYSMMU_REG(data, flush_entry));
iova += SPAGE_SIZE;
}
} else {
- if (num_inv == 1) {
- writel((iova & SPAGE_MASK) | 1,
- data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
- } else {
- writel((iova & SPAGE_MASK),
- data->sfrbase + REG_V5_MMU_FLUSH_START);
- writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
- data->sfrbase + REG_V5_MMU_FLUSH_END);
- writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
- }
+ writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start));
+ writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
+ SYSMMU_REG(data, flush_end));
+ writel(0x1, SYSMMU_REG(data, flush_range));
}
}
static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
{
+ u32 pt_base;
+
if (MMU_MAJ_VER(data->version) < 5)
- writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
+ pt_base = pgd;
else
- writel(pgd >> PAGE_SHIFT,
- data->sfrbase + REG_V5_PT_BASE_PFN);
+ pt_base = pgd >> SPAGE_ORDER;
+ writel(pt_base, SYSMMU_REG(data, pt_base));
__sysmmu_tlb_invalidate(data);
}
@@ -362,6 +405,20 @@ static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
clk_disable_unprepare(data->clk_master);
}
+static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data)
+{
+ u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0);
+
+ return capa0 & CAPA0_CAPA1_EXIST;
+}
+
+static void __sysmmu_get_vcr(struct sysmmu_drvdata *data)
+{
+ u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1);
+
+ data->has_vcr = capa1 & CAPA1_VCR_ENABLED;
+}
+
static void __sysmmu_get_version(struct sysmmu_drvdata *data)
{
u32 ver;
@@ -379,6 +436,19 @@ static void __sysmmu_get_version(struct sysmmu_drvdata *data)
dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
+ if (MMU_MAJ_VER(data->version) < 5) {
+ data->variant = &sysmmu_v1_variant;
+ } else if (MMU_MAJ_VER(data->version) < 7) {
+ data->variant = &sysmmu_v5_variant;
+ } else {
+ if (__sysmmu_has_capa1(data))
+ __sysmmu_get_vcr(data);
+ if (data->has_vcr)
+ data->variant = &sysmmu_v7_vm_variant;
+ else
+ data->variant = &sysmmu_v5_variant;
+ }
+
__sysmmu_disable_clocks(data);
}
@@ -406,19 +476,14 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
const struct sysmmu_fault_info *finfo;
unsigned int i, n, itype;
sysmmu_iova_t fault_addr;
- unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
WARN_ON(!data->active);
if (MMU_MAJ_VER(data->version) < 5) {
- reg_status = REG_INT_STATUS;
- reg_clear = REG_INT_CLEAR;
finfo = sysmmu_faults;
n = ARRAY_SIZE(sysmmu_faults);
} else {
- reg_status = REG_V5_INT_STATUS;
- reg_clear = REG_V5_INT_CLEAR;
finfo = sysmmu_v5_faults;
n = ARRAY_SIZE(sysmmu_v5_faults);
}
@@ -427,7 +492,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
clk_enable(data->clk_master);
- itype = __ffs(readl(data->sfrbase + reg_status));
+ itype = __ffs(readl(SYSMMU_REG(data, int_status)));
for (i = 0; i < n; i++, finfo++)
if (finfo->bit == itype)
break;
@@ -444,7 +509,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
/* fault is not recovered by fault handler */
BUG_ON(ret != 0);
- writel(1 << itype, data->sfrbase + reg_clear);
+ writel(1 << itype, SYSMMU_REG(data, int_clear));
sysmmu_unblock(data);
@@ -486,6 +551,18 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
writel(cfg, data->sfrbase + REG_MMU_CFG);
}
+static void __sysmmu_enable_vid(struct sysmmu_drvdata *data)
+{
+ u32 ctrl;
+
+ if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr)
+ return;
+
+ ctrl = readl(data->sfrbase + REG_V7_CTRL_VM);
+ ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL;
+ writel(ctrl, data->sfrbase + REG_V7_CTRL_VM);
+}
+
static void __sysmmu_enable(struct sysmmu_drvdata *data)
{
unsigned long flags;
@@ -496,6 +573,7 @@ static void __sysmmu_enable(struct sysmmu_drvdata *data)
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
__sysmmu_init_config(data);
__sysmmu_set_ptbase(data, data->pgtable);
+ __sysmmu_enable_vid(data);
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
data->active = true;
spin_unlock_irqrestore(&data->lock, flags);
@@ -551,7 +629,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
* 64KB page can be one of 16 consecutive sets.
*/
if (MMU_MAJ_VER(data->version) == 2)
- num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
+ num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64);
if (sysmmu_block(data)) {
__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
@@ -623,6 +701,8 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
data->sysmmu = dev;
spin_lock_init(&data->lock);
+ __sysmmu_get_version(data);
+
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
dev_name(data->sysmmu));
if (ret)
@@ -630,11 +710,10 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
if (ret)
- return ret;
+ goto err_iommu_register;
platform_set_drvdata(pdev, data);
- __sysmmu_get_version(data);
if (PG_ENT_SHIFT < 0) {
if (MMU_MAJ_VER(data->version) < 5) {
PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
@@ -647,6 +726,14 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
}
}
+ if (MMU_MAJ_VER(data->version) >= 5) {
+ ret = dma_set_mask(dev, DMA_BIT_MASK(36));
+ if (ret) {
+ dev_err(dev, "Unable to set DMA mask: %d\n", ret);
+ goto err_dma_set_mask;
+ }
+ }
+
/*
* use the first registered sysmmu device for performing
* dma mapping operations on iommu page tables (cpu cache flush)
@@ -657,6 +744,12 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
return 0;
+
+err_dma_set_mask:
+ iommu_device_unregister(&data->iommu);
+err_iommu_register:
+ iommu_device_sysfs_remove(&data->iommu);
+ return ret;
}
static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
@@ -1251,9 +1344,6 @@ static void exynos_iommu_release_device(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
- if (!has_sysmmu(dev))
- return;
-
if (owner->domain) {
struct iommu_group *group = iommu_group_get(dev);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 94b4589dc67c..011f9ab7f743 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -447,15 +447,10 @@ static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
return &pamu_iommu;
}
-static void fsl_pamu_release_device(struct device *dev)
-{
-}
-
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc,
.probe_device = fsl_pamu_probe_device,
- .release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = fsl_pamu_attach_device,
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 51bd66a45a11..e190bb8c225c 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -68,7 +68,6 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
{
struct irq_alloc_info *info = arg;
struct irq_data *irq_data;
- struct irq_desc *desc;
int ret = 0;
if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs > 1)
@@ -90,8 +89,7 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
* Hypver-V IO APIC irq affinity should be in the scope of
* ioapic_max_cpumask because no irq remapping support.
*/
- desc = irq_data_to_desc(irq_data);
- cpumask_copy(desc->irq_common_data.affinity, &ioapic_max_cpumask);
+ irq_data_update_affinity(irq_data, &ioapic_max_cpumask);
return 0;
}
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
index 71596fc62822..3ee68393122f 100644
--- a/drivers/iommu/intel/cap_audit.c
+++ b/drivers/iommu/intel/cap_audit.c
@@ -10,7 +10,7 @@
#define pr_fmt(fmt) "DMAR: " fmt
-#include <linux/intel-iommu.h>
+#include "iommu.h"
#include "cap_audit.h"
static u64 intel_iommu_cap_sanity;
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index ed796eea4581..1f925285104e 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -10,11 +10,11 @@
#include <linux/debugfs.h>
#include <linux/dmar.h>
-#include <linux/intel-iommu.h>
#include <linux/pci.h>
#include <asm/irq_remapping.h>
+#include "iommu.h"
#include "pasid.h"
#include "perf.h"
@@ -263,10 +263,9 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
{
- unsigned long flags;
u16 bus;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
(u64)virt_to_phys(iommu->root_entry));
seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
@@ -278,8 +277,7 @@ static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
*/
for (bus = 0; bus < 256; bus++)
ctx_tbl_walk(m, iommu, bus);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
}
static int dmar_translation_struct_show(struct seq_file *m, void *unused)
@@ -342,13 +340,13 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
}
}
-static int show_device_domain_translation(struct device *dev, void *data)
+static int __show_device_domain_translation(struct device *dev, void *data)
{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct dmar_domain *domain = info->domain;
+ struct dmar_domain *domain;
struct seq_file *m = data;
u64 path[6] = { 0 };
+ domain = to_dmar_domain(iommu_get_domain_for_dev(dev));
if (!domain)
return 0;
@@ -359,20 +357,39 @@ static int show_device_domain_translation(struct device *dev, void *data)
pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
seq_putc(m, '\n');
- return 0;
+ /* Don't iterate */
+ return 1;
}
-static int domain_translation_struct_show(struct seq_file *m, void *unused)
+static int show_device_domain_translation(struct device *dev, void *data)
{
- unsigned long flags;
- int ret;
+ struct iommu_group *group;
- spin_lock_irqsave(&device_domain_lock, flags);
- ret = bus_for_each_dev(&pci_bus_type, NULL, m,
- show_device_domain_translation);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ group = iommu_group_get(dev);
+ if (group) {
+ /*
+ * The group->mutex is held across the callback, which will
+ * block calls to iommu_attach/detach_group/device. Hence,
+ * the domain of the device will not change during traversal.
+ *
+ * All devices in an iommu group share a single domain, hence
+ * we only dump the domain of the first device. Even though,
+ * this code still possibly races with the iommu_unmap()
+ * interface. This could be solved by RCU-freeing the page
+ * table pages in the iommu_unmap() path.
+ */
+ iommu_group_for_each_dev(group, data,
+ __show_device_domain_translation);
+ iommu_group_put(group);
+ }
- return ret;
+ return 0;
+}
+
+static int domain_translation_struct_show(struct seq_file *m, void *unused)
+{
+ return bus_for_each_dev(&pci_bus_type, NULL, m,
+ show_device_domain_translation);
}
DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 9699ca101c62..5a8f780e7ffd 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -19,7 +19,6 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/iova.h>
-#include <linux/intel-iommu.h>
#include <linux/timer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -30,10 +29,11 @@
#include <linux/numa.h>
#include <linux/limits.h>
#include <asm/irq_remapping.h>
-#include <trace/events/intel_iommu.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "perf.h"
+#include "trace.h"
typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
struct dmar_res_callback {
@@ -60,7 +60,7 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static int dmar_dev_scope_status = 1;
-static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
+static DEFINE_IDA(dmar_seq_ids);
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
@@ -494,7 +494,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
if (drhd->reg_base_addr == rhsa->base_address) {
int node = pxm_to_node(rhsa->proximity_domain);
- if (!node_online(node))
+ if (node != NUMA_NO_NODE && !node_online(node))
node = NUMA_NO_NODE;
drhd->iommu->node = node;
return 0;
@@ -1023,28 +1023,6 @@ out:
return err;
}
-static int dmar_alloc_seq_id(struct intel_iommu *iommu)
-{
- iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
- DMAR_UNITS_SUPPORTED);
- if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
- iommu->seq_id = -1;
- } else {
- set_bit(iommu->seq_id, dmar_seq_ids);
- sprintf(iommu->name, "dmar%d", iommu->seq_id);
- }
-
- return iommu->seq_id;
-}
-
-static void dmar_free_seq_id(struct intel_iommu *iommu)
-{
- if (iommu->seq_id >= 0) {
- clear_bit(iommu->seq_id, dmar_seq_ids);
- iommu->seq_id = -1;
- }
-}
-
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
@@ -1062,11 +1040,14 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (!iommu)
return -ENOMEM;
- if (dmar_alloc_seq_id(iommu) < 0) {
+ iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
+ DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
+ if (iommu->seq_id < 0) {
pr_err("Failed to allocate seq_id\n");
- err = -ENOSPC;
+ err = iommu->seq_id;
goto error;
}
+ sprintf(iommu->name, "dmar%d", iommu->seq_id);
err = map_iommu(iommu, drhd->reg_base_addr);
if (err) {
@@ -1150,7 +1131,7 @@ err_sysfs:
err_unmap:
unmap_iommu(iommu);
error_free_seq_id:
- dmar_free_seq_id(iommu);
+ ida_free(&dmar_seq_ids, iommu->seq_id);
error:
kfree(iommu);
return err;
@@ -1183,7 +1164,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
- dmar_free_seq_id(iommu);
+ ida_free(&dmar_seq_ids, iommu->seq_id);
kfree(iommu);
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 5c0dce78586a..7cca030a508e 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -17,7 +17,6 @@
#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/dmi.h>
-#include <linux/intel-iommu.h>
#include <linux/intel-svm.h>
#include <linux/memory.h>
#include <linux/pci.h>
@@ -26,6 +25,7 @@
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
#include "pasid.h"
@@ -126,13 +126,8 @@ static inline unsigned long virt_to_dma_pfn(void *p)
return page_to_dma_pfn(virt_to_page(p));
}
-/* global iommu list, set NULL for ignored DMAR units */
-static struct intel_iommu **g_iommus;
-
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
-static inline struct device_domain_info *
-dmar_search_domain_by_dev_info(int segment, int bus, int devfn);
/*
* set to 1 to panic kernel if can't successfully enable VT-d
@@ -256,10 +251,6 @@ static inline void context_clear_entry(struct context_entry *context)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
-#define for_each_domain_iommu(idx, domain) \
- for (idx = 0; idx < g_num_of_iommus; idx++) \
- if (domain->iommu_refcnt[idx])
-
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -293,12 +284,7 @@ static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
-/* bitmap for indexing intel_iommus */
-static int g_num_of_iommus;
-
-static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
-static void __dmar_remove_one_dev_info(struct device_domain_info *info);
int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
@@ -314,12 +300,6 @@ static int iommu_skip_te_disable;
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-int intel_iommu_gfx_mapped;
-EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
-
-DEFINE_SPINLOCK(device_domain_lock);
-static LIST_HEAD(device_domain_list);
-
const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
@@ -455,24 +435,6 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}
-/* This functionin only returns single iommu in a domain */
-struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
-{
- int iommu_id;
-
- /* si_domain and vm domain should not get here. */
- if (WARN_ON(!iommu_is_dma_domain(&domain->domain)))
- return NULL;
-
- for_each_domain_iommu(iommu_id, domain)
- break;
-
- if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
- return NULL;
-
- return g_iommus[iommu_id];
-}
-
static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{
return sm_supported(iommu) ?
@@ -481,16 +443,16 @@ static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
+ struct iommu_domain_info *info;
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool found = false;
- int i;
+ unsigned long i;
domain->iommu_coherency = true;
-
- for_each_domain_iommu(i, domain) {
+ xa_for_each(&domain->iommu_array, i, info) {
found = true;
- if (!iommu_paging_structure_coherency(g_iommus[i])) {
+ if (!iommu_paging_structure_coherency(info->iommu)) {
domain->iommu_coherency = false;
break;
}
@@ -544,15 +506,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
- assert_spin_locked(&device_domain_lock);
-
- if (list_empty(&domain->devices))
- return NUMA_NO_NODE;
-
+ spin_lock(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
- if (!info->dev)
- continue;
-
/*
* There could possibly be multiple device numa nodes as devices
* within the same domain may sit behind different IOMMUs. There
@@ -563,6 +518,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
+ spin_unlock(&domain->lock);
return nid;
}
@@ -804,26 +760,23 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct context_entry *context;
int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (context)
ret = context_present(context);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
return ret;
}
static void free_context_table(struct intel_iommu *iommu)
{
- int i;
- unsigned long flags;
struct context_entry *context;
+ int i;
+
+ if (!iommu->root_entry)
+ return;
- spin_lock_irqsave(&iommu->lock, flags);
- if (!iommu->root_entry) {
- goto out;
- }
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
@@ -835,12 +788,10 @@ static void free_context_table(struct intel_iommu *iommu)
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
free_pgtable_page(context);
-
}
+
free_pgtable_page(iommu->root_entry);
iommu->root_entry = NULL;
-out:
- spin_unlock_irqrestore(&iommu->lock, flags);
}
#ifdef CONFIG_DMAR_DEBUG
@@ -849,9 +800,14 @@ static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, u8 bus, u
struct device_domain_info *info;
struct dma_pte *parent, *pte;
struct dmar_domain *domain;
+ struct pci_dev *pdev;
int offset, level;
- info = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
+ pdev = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn);
+ if (!pdev)
+ return;
+
+ info = dev_iommu_priv_get(&pdev->dev);
if (!info || !info->domain) {
pr_info("device [%02x:%02x.%d] not probed\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -1234,7 +1190,6 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root) {
@@ -1244,10 +1199,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
}
__iommu_flush_cache(iommu, root, ROOT_SIZE);
-
- spin_lock_irqsave(&iommu->lock, flags);
iommu->root_entry = root;
- spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
@@ -1389,23 +1341,23 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
}
static struct device_domain_info *
-iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
- u8 bus, u8 devfn)
+iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
+ u8 bus, u8 devfn)
{
struct device_domain_info *info;
- assert_spin_locked(&device_domain_lock);
-
if (!iommu->qi)
return NULL;
- list_for_each_entry(info, &domain->devices, link)
+ spin_lock(&domain->lock);
+ list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- if (info->ats_supported && info->dev)
- return info;
- break;
+ spin_unlock(&domain->lock);
+ return info->ats_supported ? info : NULL;
}
+ }
+ spin_unlock(&domain->lock);
return NULL;
}
@@ -1415,23 +1367,21 @@ static void domain_update_iotlb(struct dmar_domain *domain)
struct device_domain_info *info;
bool has_iotlb_device = false;
- assert_spin_locked(&device_domain_lock);
-
- list_for_each_entry(info, &domain->devices, link)
+ spin_lock(&domain->lock);
+ list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
-
+ }
domain->has_iotlb_device = has_iotlb_device;
+ spin_unlock(&domain->lock);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
-
if (!info || !dev_is_pci(info->dev))
return;
@@ -1477,8 +1427,6 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
- assert_spin_locked(&device_domain_lock);
-
if (!dev_is_pci(info->dev))
return;
@@ -1518,17 +1466,15 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
- unsigned long flags;
struct device_domain_info *info;
if (!domain->has_iotlb_device)
return;
- spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&domain->lock);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
-
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&domain->lock);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -1539,7 +1485,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
unsigned int aligned_pages = __roundup_pow_of_two(pages);
unsigned int mask = ilog2(aligned_pages);
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
- u16 did = domain->iommu_did[iommu->seq_id];
+ u16 did = domain_id_iommu(domain, iommu);
BUG_ON(pages == 0);
@@ -1609,11 +1555,12 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- int idx;
+ struct iommu_domain_info *info;
+ unsigned long idx;
- for_each_domain_iommu(idx, dmar_domain) {
- struct intel_iommu *iommu = g_iommus[idx];
- u16 did = dmar_domain->iommu_did[iommu->seq_id];
+ xa_for_each(&dmar_domain->iommu_array, idx, info) {
+ struct intel_iommu *iommu = info->iommu;
+ u16 did = domain_id_iommu(dmar_domain, iommu);
if (domain_use_first_level(dmar_domain))
qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0);
@@ -1719,23 +1666,16 @@ static int iommu_init_domains(struct intel_iommu *iommu)
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
- struct device_domain_info *info, *tmp;
- unsigned long flags;
-
if (!iommu->domain_ids)
return;
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
- if (info->iommu != iommu)
- continue;
-
- if (!info->dev || !info->domain)
- continue;
-
- __dmar_remove_one_dev_info(info);
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ /*
+ * All iommu domains must have been detached from the devices,
+ * hence there should be no domain IDs in use.
+ */
+ if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))
+ > NUM_RESERVED_DID))
+ return;
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
@@ -1748,8 +1688,6 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
iommu->domain_ids = NULL;
}
- g_iommus[iommu->seq_id] = NULL;
-
/* free context mapping */
free_context_table(iommu);
@@ -1795,55 +1733,77 @@ static struct dmar_domain *alloc_domain(unsigned int type)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ spin_lock_init(&domain->lock);
+ xa_init(&domain->iommu_array);
return domain;
}
-/* Must be called with iommu->lock */
static int domain_attach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
+ struct iommu_domain_info *info, *curr;
unsigned long ndomains;
- int num;
+ int num, ret = -ENOSPC;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- domain->iommu_refcnt[iommu->seq_id] += 1;
- if (domain->iommu_refcnt[iommu->seq_id] == 1) {
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ spin_lock(&iommu->lock);
+ curr = xa_load(&domain->iommu_array, iommu->seq_id);
+ if (curr) {
+ curr->refcnt++;
+ spin_unlock(&iommu->lock);
+ kfree(info);
+ return 0;
+ }
- if (num >= ndomains) {
- pr_err("%s: No free domain ids\n", iommu->name);
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- return -ENOSPC;
- }
+ ndomains = cap_ndoms(iommu->cap);
+ num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ if (num >= ndomains) {
+ pr_err("%s: No free domain ids\n", iommu->name);
+ goto err_unlock;
+ }
- set_bit(num, iommu->domain_ids);
- domain->iommu_did[iommu->seq_id] = num;
- domain->nid = iommu->node;
- domain_update_iommu_cap(domain);
+ set_bit(num, iommu->domain_ids);
+ info->refcnt = 1;
+ info->did = num;
+ info->iommu = iommu;
+ curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
+ NULL, info, GFP_ATOMIC);
+ if (curr) {
+ ret = xa_err(curr) ? : -EBUSY;
+ goto err_clear;
}
+ domain_update_iommu_cap(domain);
+ spin_unlock(&iommu->lock);
return 0;
+
+err_clear:
+ clear_bit(info->did, iommu->domain_ids);
+err_unlock:
+ spin_unlock(&iommu->lock);
+ kfree(info);
+ return ret;
}
static void domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- int num;
+ struct iommu_domain_info *info;
- assert_spin_locked(&device_domain_lock);
- assert_spin_locked(&iommu->lock);
-
- domain->iommu_refcnt[iommu->seq_id] -= 1;
- if (domain->iommu_refcnt[iommu->seq_id] == 0) {
- num = domain->iommu_did[iommu->seq_id];
- clear_bit(num, iommu->domain_ids);
+ spin_lock(&iommu->lock);
+ info = xa_load(&domain->iommu_array, iommu->seq_id);
+ if (--info->refcnt == 0) {
+ clear_bit(info->did, iommu->domain_ids);
+ xa_erase(&domain->iommu_array, iommu->seq_id);
+ domain->nid = NUMA_NO_NODE;
domain_update_iommu_cap(domain);
- domain->iommu_did[iommu->seq_id] = 0;
+ kfree(info);
}
+ spin_unlock(&iommu->lock);
}
static inline int guestwidth_to_adjustwidth(int gaw)
@@ -1862,10 +1822,6 @@ static inline int guestwidth_to_adjustwidth(int gaw)
static void domain_exit(struct dmar_domain *domain)
{
-
- /* Remove associated devices and clear attached or cached domains */
- domain_remove_dev_info(domain);
-
if (domain->pgd) {
LIST_HEAD(freelist);
@@ -1873,6 +1829,9 @@ static void domain_exit(struct dmar_domain *domain)
put_pages_list(&freelist);
}
+ if (WARN_ON(!list_empty(&domain->devices)))
+ return;
+
kfree(domain);
}
@@ -1930,11 +1889,11 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct pasid_table *table,
u8 bus, u8 devfn)
{
- u16 did = domain->iommu_did[iommu->seq_id];
+ struct device_domain_info *info =
+ iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+ u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
- struct device_domain_info *info = NULL;
struct context_entry *context;
- unsigned long flags;
int ret;
WARN_ON(did == 0);
@@ -1947,9 +1906,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
BUG_ON(!domain->pgd);
- spin_lock_irqsave(&device_domain_lock, flags);
spin_lock(&iommu->lock);
-
ret = -ENOMEM;
context = iommu_context_addr(iommu, bus, devfn, 1);
if (!context)
@@ -2000,7 +1957,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* Setup the Device-TLB enable bit and Page request
* Enable bit:
*/
- info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
context_set_sm_dte(context);
if (info && info->pri_supported)
@@ -2023,7 +1979,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
goto out_unlock;
}
- info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
translation = CONTEXT_TT_DEV_IOTLB;
else
@@ -2069,7 +2024,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
out_unlock:
spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
@@ -2186,8 +2140,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
unsigned long end_pfn, int level)
{
unsigned long lvl_pages = lvl_to_nr_pages(level);
+ struct iommu_domain_info *info;
struct dma_pte *pte = NULL;
- int i;
+ unsigned long i;
while (start_pfn <= end_pfn) {
if (!pte)
@@ -2198,8 +2153,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
start_pfn + lvl_pages - 1,
level + 1);
- for_each_domain_iommu(i, domain)
- iommu_flush_iotlb_psi(g_iommus[i], domain,
+ xa_for_each(&domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
}
@@ -2313,16 +2268,15 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
{
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
- unsigned long flags;
u16 did_old;
if (!iommu)
return;
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) {
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
return;
}
@@ -2330,14 +2284,14 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
if (hw_pass_through && domain_type_is_si(info->domain))
did_old = FLPT_DEFAULT_DID;
else
- did_old = info->domain->iommu_did[iommu->seq_id];
+ did_old = domain_id_iommu(info->domain, iommu);
} else {
did_old = context_domain_id(context);
}
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
iommu->flush.flush_context(iommu,
did_old,
(((u16)bus) << 8) | devfn,
@@ -2356,30 +2310,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
__iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
}
-static void domain_remove_dev_info(struct dmar_domain *domain)
-{
- struct device_domain_info *info, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry_safe(info, tmp, &domain->devices, link)
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-}
-
-static inline struct device_domain_info *
-dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
-{
- struct device_domain_info *info;
-
- list_for_each_entry(info, &device_domain_list, global)
- if (info->segment == segment && info->bus == bus &&
- info->devfn == devfn)
- return info;
-
- return NULL;
-}
-
static int domain_setup_first_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev,
@@ -2412,7 +2342,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
flags |= PASID_FLAG_PAGE_SNOOP;
return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
- domain->iommu_did[iommu->seq_id],
+ domain_id_iommu(domain, iommu),
flags);
}
@@ -2499,7 +2429,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
- unsigned long flags;
u8 bus, devfn;
int ret;
@@ -2507,17 +2436,13 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (!iommu)
return -ENODEV;
- spin_lock_irqsave(&device_domain_lock, flags);
- info->domain = domain;
- spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
- if (ret) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (ret)
return ret;
- }
+ info->domain = domain;
+ spin_lock(&domain->lock);
list_add(&info->link, &domain->devices);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&domain->lock);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -2529,7 +2454,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
}
/* Setup the PASID entry for requests without PASID: */
- spin_lock_irqsave(&iommu->lock, flags);
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
dev, PASID_RID2PASID);
@@ -2539,7 +2463,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
else
ret = intel_pasid_setup_second_level(iommu, domain,
dev, PASID_RID2PASID);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
dmar_remove_one_dev_info(dev);
@@ -2807,7 +2730,6 @@ static int copy_translation_tables(struct intel_iommu *iommu)
struct root_entry *old_rt;
phys_addr_t old_rt_phys;
int ctxt_table_entries;
- unsigned long flags;
u64 rtaddr_reg;
int bus, ret;
bool new_ext, ext;
@@ -2850,7 +2772,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
}
}
- spin_lock_irqsave(&iommu->lock, flags);
+ spin_lock(&iommu->lock);
/* Context tables are copied, now write them to the root_entry table */
for (bus = 0; bus < 256; bus++) {
@@ -2869,7 +2791,7 @@ static int copy_translation_tables(struct intel_iommu *iommu)
iommu->root_entry[bus].hi = val;
}
- spin_unlock_irqrestore(&iommu->lock, flags);
+ spin_unlock(&iommu->lock);
kfree(ctxt_tbls);
@@ -2968,36 +2890,6 @@ static int __init init_dmars(void)
struct intel_iommu *iommu;
int ret;
- /*
- * for each drhd
- * allocate root
- * initialize and program root entry to not present
- * endfor
- */
- for_each_drhd_unit(drhd) {
- /*
- * lock not needed as this is only incremented in the single
- * threaded kernel __init code path all other access are read
- * only
- */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
- g_num_of_iommus++;
- continue;
- }
- pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
- }
-
- /* Preallocate enough resources for IOMMU hot-addition */
- if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
- g_num_of_iommus = DMAR_UNITS_SUPPORTED;
-
- g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
- GFP_KERNEL);
- if (!g_iommus) {
- ret = -ENOMEM;
- goto error;
- }
-
ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
if (ret)
goto free_iommu;
@@ -3020,8 +2912,6 @@ static int __init init_dmars(void)
intel_pasid_max_id);
}
- g_iommus[iommu->seq_id] = iommu;
-
intel_iommu_init_qi(iommu);
ret = iommu_init_domains(iommu);
@@ -3147,9 +3037,6 @@ free_iommu:
free_dmar_iommu(iommu);
}
- kfree(g_iommus);
-
-error:
return ret;
}
@@ -3530,9 +3417,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
int sp, ret;
struct intel_iommu *iommu = dmaru->iommu;
- if (g_iommus[iommu->seq_id])
- return 0;
-
ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
if (ret)
goto out;
@@ -3556,7 +3440,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- g_iommus[iommu->seq_id] = iommu;
ret = iommu_init_domains(iommu);
if (ret == 0)
ret = iommu_alloc_root_entry(iommu);
@@ -4022,6 +3905,20 @@ static int __init probe_acpi_namespace_devices(void)
return 0;
}
+static __init int tboot_force_iommu(void)
+{
+ if (!tboot_enabled())
+ return 0;
+
+ if (no_iommu || dmar_disabled)
+ pr_warn("Forcing Intel-IOMMU to enabled\n");
+
+ dmar_disabled = 0;
+ no_iommu = 0;
+
+ return 1;
+}
+
int __init intel_iommu_init(void)
{
int ret = -ENODEV;
@@ -4093,9 +3990,6 @@ int __init intel_iommu_init(void)
if (list_empty(&dmar_satc_units))
pr_info("No SATC found\n");
- if (dmar_map_gfx)
- intel_iommu_gfx_mapped = 1;
-
init_no_remapping_devices();
ret = init_dmars();
@@ -4181,21 +4075,13 @@ static void domain_context_clear(struct device_domain_info *info)
&domain_context_clear_one_cb, info);
}
-static void __dmar_remove_one_dev_info(struct device_domain_info *info)
+static void dmar_remove_one_dev_info(struct device *dev)
{
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
- unsigned long flags;
-
- assert_spin_locked(&device_domain_lock);
-
- if (WARN_ON(!info))
- return;
-
- iommu = info->iommu;
- domain = info->domain;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct dmar_domain *domain = info->domain;
+ struct intel_iommu *iommu = info->iommu;
- if (info->dev && !dev_is_real_dma_subdevice(info->dev)) {
+ if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, info->dev,
PASID_RID2PASID, false);
@@ -4205,23 +4091,12 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
intel_pasid_free_table(info->dev);
}
+ spin_lock(&domain->lock);
list_del(&info->link);
+ spin_unlock(&domain->lock);
- spin_lock_irqsave(&iommu->lock, flags);
domain_detach_iommu(domain, iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-static void dmar_remove_one_dev_info(struct device *dev)
-{
- struct device_domain_info *info;
- unsigned long flags;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dev_iommu_priv_get(dev);
- if (info)
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ info->domain = NULL;
}
static int md_domain_init(struct dmar_domain *domain, int guest_width)
@@ -4466,15 +4341,16 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long iova_pfn = IOVA_PFN(gather->start);
size_t size = gather->end - gather->start;
+ struct iommu_domain_info *info;
unsigned long start_pfn;
unsigned long nrpages;
- int iommu_id;
+ unsigned long i;
nrpages = aligned_nrpages(gather->start, size);
start_pfn = mm_to_dma_pfn(iova_pfn);
- for_each_domain_iommu(iommu_id, dmar_domain)
- iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+ xa_for_each(&dmar_domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, dmar_domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
@@ -4503,7 +4379,7 @@ static bool domain_support_force_snooping(struct dmar_domain *domain)
struct device_domain_info *info;
bool support = true;
- assert_spin_locked(&device_domain_lock);
+ assert_spin_locked(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
if (!ecap_sc_support(info->iommu->ecap)) {
support = false;
@@ -4518,8 +4394,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
{
struct device_domain_info *info;
- assert_spin_locked(&device_domain_lock);
-
+ assert_spin_locked(&domain->lock);
/*
* Second level page table supports per-PTE snoop control. The
* iommu_map() interface will handle this by setting SNP bit.
@@ -4537,20 +4412,19 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long flags;
if (dmar_domain->force_snooping)
return true;
- spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&dmar_domain->lock);
if (!domain_support_force_snooping(dmar_domain)) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&dmar_domain->lock);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
- spin_unlock_irqrestore(&device_domain_lock, flags);
+ spin_unlock(&dmar_domain->lock);
return true;
}
@@ -4572,7 +4446,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
struct device_domain_info *info;
struct intel_iommu *iommu;
- unsigned long flags;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
@@ -4615,10 +4488,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
}
- spin_lock_irqsave(&device_domain_lock, flags);
- list_add(&info->global, &device_domain_list);
dev_iommu_priv_set(dev, info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return &iommu->iommu;
}
@@ -4626,15 +4496,9 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- unsigned long flags;
dmar_remove_one_dev_info(dev);
-
- spin_lock_irqsave(&device_domain_lock, flags);
dev_iommu_priv_set(dev, NULL);
- list_del(&info->global);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
kfree(info);
set_dma_ops(dev, NULL);
}
@@ -4707,7 +4571,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct context_entry *context;
struct dmar_domain *domain;
- unsigned long flags;
u64 ctx_lo;
int ret;
@@ -4715,9 +4578,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
if (!domain)
return -EINVAL;
- spin_lock_irqsave(&device_domain_lock, flags);
spin_lock(&iommu->lock);
-
ret = -EINVAL;
if (!info->pasid_supported)
goto out;
@@ -4733,7 +4594,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
context[0].lo = ctx_lo;
wmb();
iommu->flush.flush_context(iommu,
- domain->iommu_did[iommu->seq_id],
+ domain_id_iommu(domain, iommu),
PCI_DEVID(info->bus, info->devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
@@ -4747,7 +4608,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
out:
spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
@@ -4871,13 +4731,11 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long pages = aligned_nrpages(iova, size);
unsigned long pfn = iova >> VTD_PAGE_SHIFT;
- struct intel_iommu *iommu;
- int iommu_id;
+ struct iommu_domain_info *info;
+ unsigned long i;
- for_each_domain_iommu(iommu_id, dmar_domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, dmar_domain, pfn, pages);
- }
+ xa_for_each(&dmar_domain->iommu_array, i, info)
+ __mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
}
const struct iommu_ops intel_iommu_ops = {
@@ -4887,7 +4745,6 @@ const struct iommu_ops intel_iommu_ops = {
.probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.device_group = intel_iommu_device_group,
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
diff --git a/include/linux/intel-iommu.h b/drivers/iommu/intel/iommu.h
index 5fcf89faa31a..fae45bbb0c7f 100644
--- a/include/linux/intel-iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -21,6 +21,7 @@
#include <linux/dmar.h>
#include <linux/ioasid.h>
#include <linux/bitfield.h>
+#include <linux/xarray.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -480,7 +481,6 @@ enum {
#define VTD_FLAG_SVM_CAPABLE (1 << 2)
extern int intel_iommu_sm;
-extern spinlock_t device_domain_lock;
#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
#define pasid_supported(iommu) (sm_supported(iommu) && \
@@ -525,25 +525,25 @@ struct context_entry {
*/
#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
-struct dmar_domain {
- int nid; /* node id */
-
- unsigned int iommu_refcnt[DMAR_UNITS_SUPPORTED];
- /* Refcount of devices per iommu */
-
-
- u16 iommu_did[DMAR_UNITS_SUPPORTED];
- /* Domain ids per IOMMU. Use u16 since
+struct iommu_domain_info {
+ struct intel_iommu *iommu;
+ unsigned int refcnt; /* Refcount of devices per iommu */
+ u16 did; /* Domain ids per IOMMU. Use u16 since
* domain ids are 16 bit wide according
* to VT-d spec, section 9.3 */
+};
+
+struct dmar_domain {
+ int nid; /* node id */
+ struct xarray iommu_array; /* Attached IOMMU array */
u8 has_iotlb_device: 1;
u8 iommu_coherency: 1; /* indicate coherency of iommu access */
u8 force_snooping : 1; /* Create IOPTEs with snoop control */
u8 set_pte_snp:1;
+ spinlock_t lock; /* Protect device tracking lists */
struct list_head devices; /* all devices' list */
- struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
int gaw; /* max guest address width */
@@ -611,7 +611,6 @@ struct intel_iommu {
/* PCI domain-device relationship */
struct device_domain_info {
struct list_head link; /* link to domain siblings */
- struct list_head global; /* link to global list */
u32 segment; /* PCI segment number */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
@@ -642,6 +641,16 @@ static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
return container_of(dom, struct dmar_domain, domain);
}
+/* Retrieve the domain ID which has allocated to the domain */
+static inline u16
+domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
+{
+ struct iommu_domain_info *info =
+ xa_load(&domain->iommu_array, iommu->seq_id);
+
+ return info->did;
+}
+
/*
* 0: readable
* 1: writable
@@ -727,7 +736,6 @@ extern int dmar_ir_support(void);
void *alloc_pgtable_page(int node);
void free_pgtable_page(void *vaddr);
-struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
@@ -787,7 +795,6 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
extern int intel_iommu_enabled;
-extern int intel_iommu_gfx_mapped;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index a67319597884..2e9683e970f8 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -10,7 +10,6 @@
#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
-#include <linux/intel-iommu.h>
#include <linux/acpi.h>
#include <linux/irqdomain.h>
#include <linux/crash_dump.h>
@@ -21,6 +20,7 @@
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
+#include "iommu.h"
#include "../irq_remapping.h"
#include "cap_audit.h"
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 17cad7c1f62d..c5e7e8b020a5 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -12,13 +12,13 @@
#include <linux/bitops.h>
#include <linux/cpufeature.h>
#include <linux/dmar.h>
-#include <linux/intel-iommu.h>
#include <linux/iommu.h>
#include <linux/memory.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/spinlock.h>
+#include "iommu.h"
#include "pasid.h"
/*
@@ -450,17 +450,17 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
struct pasid_entry *pte;
u16 did, pgtt;
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte))
- return;
-
- if (!pasid_pte_is_present(pte))
+ if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return;
+ }
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
-
intel_pasid_clear_entry(dev, pasid, fault_ignore);
+ spin_unlock(&iommu->lock);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
@@ -496,22 +496,6 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
}
}
-static inline int pasid_enable_wpe(struct pasid_entry *pte)
-{
-#ifdef CONFIG_X86
- unsigned long cr0 = read_cr0();
-
- /* CR0.WP is normally set but just to be sure */
- if (unlikely(!(cr0 & X86_CR0_WP))) {
- pr_err_ratelimited("No CPU write protect!\n");
- return -EINVAL;
- }
-#endif
- pasid_set_wpe(pte);
-
- return 0;
-};
-
/*
* Set up the scalable mode pasid table entry for first only
* translation type.
@@ -528,39 +512,52 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
return -EINVAL;
}
- pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte))
+ if (flags & PASID_FLAG_SUPERVISOR_MODE) {
+#ifdef CONFIG_X86
+ unsigned long cr0 = read_cr0();
+
+ /* CR0.WP is normally set but just to be sure */
+ if (unlikely(!(cr0 & X86_CR0_WP))) {
+ pr_err("No CPU write protect!\n");
+ return -EINVAL;
+ }
+#endif
+ if (!ecap_srs(iommu->ecap)) {
+ pr_err("No supervisor request support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+ }
+
+ if ((flags & PASID_FLAG_FL5LP) && !cap_5lp_support(iommu->cap)) {
+ pr_err("No 5-level paging support for first-level on %s\n",
+ iommu->name);
return -EINVAL;
+ }
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ spin_lock(&iommu->lock);
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (!pte) {
+ spin_unlock(&iommu->lock);
+ return -ENODEV;
+ }
+
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
/* Setup the first level page table pointer: */
pasid_set_flptr(pte, (u64)__pa(pgd));
if (flags & PASID_FLAG_SUPERVISOR_MODE) {
- if (!ecap_srs(iommu->ecap)) {
- pr_err("No supervisor request support on %s\n",
- iommu->name);
- return -EINVAL;
- }
pasid_set_sre(pte);
- if (pasid_enable_wpe(pte))
- return -EINVAL;
-
+ pasid_set_wpe(pte);
}
- if (flags & PASID_FLAG_FL5LP) {
- if (cap_5lp_support(iommu->cap)) {
- pasid_set_flpm(pte, 1);
- } else {
- pr_err("No 5-level paging support for first-level\n");
- pasid_clear_entry(pte);
- return -EINVAL;
- }
- }
+ if (flags & PASID_FLAG_FL5LP)
+ pasid_set_flpm(pte, 1);
if (flags & PASID_FLAG_PAGE_SNOOP)
pasid_set_pgsnp(pte);
@@ -572,6 +569,8 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
/* Setup Present and PASID Granular Transfer Type: */
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
@@ -627,17 +626,19 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
}
pgd_val = virt_to_phys(pgd);
- did = domain->iommu_did[iommu->seq_id];
+ did = domain_id_iommu(domain, iommu);
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
if (!pte) {
- dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
+ spin_unlock(&iommu->lock);
return -ENODEV;
}
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
@@ -654,6 +655,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
if (pasid != PASID_RID2PASID)
pasid_set_sre(pte);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
@@ -669,15 +672,17 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
u16 did = FLPT_DEFAULT_DID;
struct pasid_entry *pte;
+ spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
if (!pte) {
- dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
+ spin_unlock(&iommu->lock);
return -ENODEV;
}
- /* Caller must ensure PASID entry is not in use. */
- if (pasid_pte_is_present(pte))
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
return -EBUSY;
+ }
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
@@ -692,6 +697,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
*/
pasid_set_sre(pte);
pasid_set_present(pte);
+ spin_unlock(&iommu->lock);
+
pasid_flush_caches(iommu, pte, pasid, did);
return 0;
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index bf5b937848b4..20c54e50f533 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -39,6 +39,7 @@
* only and pass-through transfer modes.
*/
#define FLPT_DEFAULT_DID 1
+#define NUM_RESERVED_DID 2
/*
* The SUPERVISOR_MODE flag indicates a first level translation which
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index 0e8e03252d92..94ee70ac38e3 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -9,8 +9,8 @@
*/
#include <linux/spinlock.h>
-#include <linux/intel-iommu.h>
+#include "iommu.h"
#include "perf.h"
static DEFINE_SPINLOCK(latency_lock);
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 7ee37d996e15..8bcfb93dda56 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -5,7 +5,6 @@
* Authors: David Woodhouse <dwmw2@infradead.org>
*/
-#include <linux/intel-iommu.h>
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
@@ -21,11 +20,12 @@
#include <linux/ioasid.h>
#include <asm/page.h>
#include <asm/fpu/api.h>
-#include <trace/events/intel_iommu.h>
+#include "iommu.h"
#include "pasid.h"
#include "perf.h"
#include "../iommu-sva-lib.h"
+#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
static void intel_svm_drain_prq(struct device *dev, u32 pasid);
@@ -328,9 +328,9 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
unsigned int flags)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- unsigned long iflags, sflags;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
+ unsigned long sflags;
int ret = 0;
svm = pasid_private_find(mm->pasid);
@@ -394,11 +394,8 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ?
PASID_FLAG_SUPERVISOR_MODE : 0;
sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
- spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
FLPT_DEFAULT_DID, sflags);
- spin_unlock_irqrestore(&iommu->lock, iflags);
-
if (ret)
goto free_sdev;
@@ -544,7 +541,7 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid)
domain = info->domain;
pdev = to_pci_dev(dev);
sid = PCI_DEVID(info->bus, info->devfn);
- did = domain->iommu_did[iommu->seq_id];
+ did = domain_id_iommu(domain, iommu);
qdep = pci_ats_queue_depth(pdev);
/*
diff --git a/drivers/iommu/intel/trace.c b/drivers/iommu/intel/trace.c
index bfb6a6e37a88..117e626e3ea9 100644
--- a/drivers/iommu/intel/trace.c
+++ b/drivers/iommu/intel/trace.c
@@ -11,4 +11,4 @@
#include <linux/types.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/intel_iommu.h>
+#include "trace.h"
diff --git a/include/trace/events/intel_iommu.h b/drivers/iommu/intel/trace.h
index e5c1ca6d16ee..93d96f93a89b 100644
--- a/include/trace/events/intel_iommu.h
+++ b/drivers/iommu/intel/trace.h
@@ -13,7 +13,8 @@
#define _TRACE_INTEL_IOMMU_H
#include <linux/tracepoint.h>
-#include <linux/intel-iommu.h>
+
+#include "iommu.h"
#define MSG_MAX 256
@@ -91,4 +92,8 @@ TRACE_EVENT(prq_report,
#endif /* _TRACE_INTEL_IOMMU_H */
/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/iommu/intel/
+#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index be066c1503d3..ba3115fd0f86 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -182,14 +182,8 @@ static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg)
(cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT);
}
-static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
- struct io_pgtable_cfg *cfg)
+static arm_v7s_iopte to_mtk_iopte(phys_addr_t paddr, arm_v7s_iopte pte)
{
- arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
-
- if (!arm_v7s_is_mtk_enabled(cfg))
- return pte;
-
if (paddr & BIT_ULL(32))
pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
if (paddr & BIT_ULL(33))
@@ -199,6 +193,17 @@ static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
return pte;
}
+static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
+ struct io_pgtable_cfg *cfg)
+{
+ arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
+
+ if (arm_v7s_is_mtk_enabled(cfg))
+ return to_mtk_iopte(paddr, pte);
+
+ return pte;
+}
+
static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
struct io_pgtable_cfg *cfg)
{
@@ -240,10 +245,17 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
dma_addr_t dma;
size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
void *table = NULL;
+ gfp_t gfp_l1;
+
+ /*
+ * ARM_MTK_TTBR_EXT extend the translation table base support larger
+ * memory address.
+ */
+ gfp_l1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ GFP_KERNEL : ARM_V7S_TABLE_GFP_DMA;
if (lvl == 1)
- table = (void *)__get_free_pages(
- __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
+ table = (void *)__get_free_pages(gfp_l1 | __GFP_ZERO, get_order(size));
else if (lvl == 2)
table = kmem_cache_zalloc(data->l2_tables, gfp);
@@ -251,7 +263,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
return NULL;
phys = virt_to_phys(table);
- if (phys != (arm_v7s_iopte)phys) {
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ phys >= (1ULL << cfg->oas) : phys != (arm_v7s_iopte)phys) {
/* Doesn't fit in PTE */
dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
@@ -457,9 +470,14 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
arm_v7s_iopte curr,
struct io_pgtable_cfg *cfg)
{
+ phys_addr_t phys = virt_to_phys(table);
arm_v7s_iopte old, new;
- new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE;
+ new = phys | ARM_V7S_PTE_TYPE_TABLE;
+
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT)
+ new = to_mtk_iopte(phys, new);
+
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
new |= ARM_V7S_ATTR_NS_TABLE;
@@ -779,6 +797,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
void *cookie)
{
struct arm_v7s_io_pgtable *data;
+ slab_flags_t slab_flag;
+ phys_addr_t paddr;
if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
return NULL;
@@ -788,7 +808,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
- IO_PGTABLE_QUIRK_ARM_MTK_EXT))
+ IO_PGTABLE_QUIRK_ARM_MTK_EXT |
+ IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
@@ -796,15 +817,27 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
!(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
return NULL;
+ if ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT) &&
+ !arm_v7s_is_mtk_enabled(cfg))
+ return NULL;
+
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
spin_lock_init(&data->split_lock);
+
+ /*
+ * ARM_MTK_TTBR_EXT extend the translation table base support larger
+ * memory address.
+ */
+ slab_flag = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
+ 0 : ARM_V7S_TABLE_SLAB_FLAGS;
+
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
ARM_V7S_TABLE_SIZE(2, cfg),
ARM_V7S_TABLE_SIZE(2, cfg),
- ARM_V7S_TABLE_SLAB_FLAGS, NULL);
+ slab_flag, NULL);
if (!data->l2_tables)
goto out_free_data;
@@ -850,12 +883,16 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
wmb();
/* TTBR */
- cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
- (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
- ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
- (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
+ paddr = virt_to_phys(data->pgd);
+ if (arm_v7s_is_mtk_enabled(cfg))
+ cfg->arm_v7s_cfg.ttbr = paddr | upper_32_bits(paddr);
+ else
+ cfg->arm_v7s_cfg.ttbr = paddr | ARM_V7S_TTBR_S |
+ (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
+ ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
+ (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
return &data->iop;
out_free_data:
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 847ad47a2dfd..780fb7071577 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -259,7 +259,8 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
return 0;
out_release:
- ops->release_device(dev);
+ if (ops->release_device)
+ ops->release_device(dev);
out_module_put:
module_put(ops->owner);
@@ -272,7 +273,7 @@ err_free:
int iommu_probe_device(struct device *dev)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops;
struct iommu_group *group;
int ret;
@@ -313,6 +314,7 @@ int iommu_probe_device(struct device *dev)
mutex_unlock(&group->mutex);
iommu_group_put(group);
+ ops = dev_iommu_ops(dev);
if (ops->probe_finalize)
ops->probe_finalize(dev);
@@ -336,7 +338,8 @@ void iommu_release_device(struct device *dev)
iommu_device_unlink(dev->iommu->iommu_dev, dev);
ops = dev_iommu_ops(dev);
- ops->release_device(dev);
+ if (ops->release_device)
+ ops->release_device(dev);
iommu_group_remove_device(dev);
module_put(ops->owner);
@@ -600,7 +603,7 @@ static void iommu_group_release(struct kobject *kobj)
if (group->iommu_data_release)
group->iommu_data_release(group->iommu_data);
- ida_simple_remove(&iommu_group_ida, group->id);
+ ida_free(&iommu_group_ida, group->id);
if (group->default_domain)
iommu_domain_free(group->default_domain);
@@ -641,7 +644,7 @@ struct iommu_group *iommu_group_alloc(void)
INIT_LIST_HEAD(&group->devices);
INIT_LIST_HEAD(&group->entry);
- ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
if (ret < 0) {
kfree(group);
return ERR_PTR(ret);
@@ -651,7 +654,7 @@ struct iommu_group *iommu_group_alloc(void)
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
- ida_simple_remove(&iommu_group_ida, group->id);
+ ida_free(&iommu_group_ida, group->id);
kobject_put(&group->kobj);
return ERR_PTR(ret);
}
@@ -2457,6 +2460,9 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
len = 0;
}
+ if (sg_is_dma_bus_address(sg))
+ goto next;
+
if (len) {
len += sg->length;
} else {
@@ -2464,6 +2470,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
start = s_phys;
}
+next:
if (++i < nents)
sg = sg_next(sg);
}
@@ -2576,32 +2583,25 @@ void iommu_get_resv_regions(struct device *dev, struct list_head *list)
ops->get_resv_regions(dev, list);
}
-void iommu_put_resv_regions(struct device *dev, struct list_head *list)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->put_resv_regions)
- ops->put_resv_regions(dev, list);
-}
-
/**
- * generic_iommu_put_resv_regions - Reserved region driver helper
+ * iommu_put_resv_regions - release resered regions
* @dev: device for which to free reserved regions
* @list: reserved region list for device
*
- * IOMMU drivers can use this to implement their .put_resv_regions() callback
- * for simple reservations. Memory allocated for each reserved region will be
- * freed. If an IOMMU driver allocates additional resources per region, it is
- * going to have to implement a custom callback.
+ * This releases a reserved region list acquired by iommu_get_resv_regions().
*/
-void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
+void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
struct iommu_resv_region *entry, *next;
- list_for_each_entry_safe(entry, next, list, list)
- kfree(entry);
+ list_for_each_entry_safe(entry, next, list, list) {
+ if (entry->free)
+ entry->free(dev, entry);
+ else
+ kfree(entry);
+ }
}
-EXPORT_SYMBOL(generic_iommu_put_resv_regions);
+EXPORT_SYMBOL(iommu_put_resv_regions);
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
size_t length, int prot,
@@ -2751,19 +2751,6 @@ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
}
EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
-bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev->iommu && dev->iommu->iommu_dev) {
- const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
-
- if (ops->dev_feat_enabled)
- return ops->dev_feat_enabled(dev, feat);
- }
-
- return false;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
-
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index db77aa675145..47d1983dfa2a 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -26,6 +26,11 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
+unsigned long iova_rcache_range(void)
+{
+ return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
+}
+
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
{
struct iova_domain *iovad;
@@ -614,7 +619,12 @@ EXPORT_SYMBOL_GPL(reserve_iova);
* dynamic size tuning described in the paper.
*/
-#define IOVA_MAG_SIZE 128
+/*
+ * As kmalloc's buffer size is fixed to power of 2, 127 is chosen to
+ * assure size of 'iova_magazine' to be 1024 bytes, so that no memory
+ * will be wasted.
+ */
+#define IOVA_MAG_SIZE 127
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
struct iova_magazine {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index f09aedfdd462..6a24aa804ea3 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -394,10 +394,6 @@ static struct iommu_device *msm_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
-static void msm_iommu_release_device(struct device *dev)
-{
-}
-
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
@@ -603,7 +599,7 @@ static int insert_iommu_master(struct device *dev,
for (sid = 0; sid < master->num_mids; sid++)
if (master->mids[sid] == spec->args[0]) {
- dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
+ dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n",
sid);
return 0;
}
@@ -677,7 +673,6 @@ fail:
static struct iommu_ops msm_iommu_ops = {
.domain_alloc = msm_iommu_domain_alloc,
.probe_device = msm_iommu_probe_device,
- .release_device = msm_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index bb9dd92c9898..7e363b1f24df 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -34,7 +34,6 @@
#include <dt-bindings/memory/mtk-memory-port.h>
#define REG_MMU_PT_BASE_ADDR 0x000
-#define MMU_PT_ADDR_MASK GENMASK(31, 7)
#define REG_MMU_INVALIDATE 0x020
#define F_ALL_INVLD 0x2
@@ -138,6 +137,7 @@
/* PM and clock always on. e.g. infra iommu */
#define PM_CLK_AO BIT(15)
#define IFA_IOMMU_PCIE_SUPPORT BIT(16)
+#define PGTABLE_PA_35_EN BIT(17)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -596,6 +596,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
.iommu_dev = data->dev,
};
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN))
+ dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT;
+
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
dom->cfg.oas = data->enable_4GB ? 33 : 32;
else
@@ -684,8 +687,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
goto err_unlock;
}
bank->m4u_dom = dom;
- writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
- bank->base + REG_MMU_PT_BASE_ADDR);
+ writel(dom->cfg.arm_v7s_cfg.ttbr, bank->base + REG_MMU_PT_BASE_ADDR);
pm_runtime_put(m4udev);
}
@@ -819,17 +821,12 @@ static void mtk_iommu_release_device(struct device *dev)
struct device *larbdev;
unsigned int larbid;
- if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return;
-
data = dev_iommu_priv_get(dev);
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
larbdev = data->larb_imu[larbid].dev;
device_link_remove(dev, larbdev);
}
-
- iommu_fwspec_free(dev);
}
static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data)
@@ -933,7 +930,6 @@ static const struct iommu_ops mtk_iommu_ops = {
.device_group = mtk_iommu_device_group,
.of_xlate = mtk_iommu_of_xlate,
.get_resv_regions = mtk_iommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
@@ -1140,22 +1136,32 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
- switch (data->plat_data->m4u_plat) {
- case M4U_MT2712:
- p = "mediatek,mt2712-infracfg";
- break;
- case M4U_MT8173:
- p = "mediatek,mt8173-infracfg";
- break;
- default:
- p = NULL;
+ infracfg = syscon_regmap_lookup_by_phandle(dev->of_node, "mediatek,infracfg");
+ if (IS_ERR(infracfg)) {
+ /*
+ * Legacy devicetrees will not specify a phandle to
+ * mediatek,infracfg: in that case, we use the older
+ * way to retrieve a syscon to infra.
+ *
+ * This is for retrocompatibility purposes only, hence
+ * no more compatibles shall be added to this.
+ */
+ switch (data->plat_data->m4u_plat) {
+ case M4U_MT2712:
+ p = "mediatek,mt2712-infracfg";
+ break;
+ case M4U_MT8173:
+ p = "mediatek,mt8173-infracfg";
+ break;
+ default:
+ p = NULL;
+ }
+
+ infracfg = syscon_regmap_lookup_by_compatible(p);
+ if (IS_ERR(infracfg))
+ return PTR_ERR(infracfg);
}
- infracfg = syscon_regmap_lookup_by_compatible(p);
-
- if (IS_ERR(infracfg))
- return PTR_ERR(infracfg);
-
ret = regmap_read(infracfg, REG_INFRA_MISC, &val);
if (ret)
return ret;
@@ -1204,18 +1210,16 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = mtk_iommu_mm_dts_parse(dev, &match, data);
if (ret) {
- dev_err(dev, "mm dts parse fail(%d).", ret);
+ dev_err_probe(dev, ret, "mm dts parse fail\n");
goto out_runtime_disable;
}
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
- data->plat_data->pericfg_comp_str) {
- infracfg = syscon_regmap_lookup_by_compatible(data->plat_data->pericfg_comp_str);
- if (IS_ERR(infracfg)) {
- ret = PTR_ERR(infracfg);
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
+ p = data->plat_data->pericfg_comp_str;
+ data->pericfg = syscon_regmap_lookup_by_compatible(p);
+ if (IS_ERR(data->pericfg)) {
+ ret = PTR_ERR(data->pericfg);
goto out_runtime_disable;
}
-
- data->pericfg = infracfg;
}
platform_set_drvdata(pdev, data);
@@ -1366,8 +1370,7 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0);
writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR);
- writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
- base + REG_MMU_PT_BASE_ADDR);
+ writel(m4u_dom->cfg.arm_v7s_cfg.ttbr, base + REG_MMU_PT_BASE_ADDR);
} while (++i < data->plat_data->banks_num);
/*
@@ -1401,7 +1404,7 @@ static const struct mtk_iommu_plat_data mt2712_data = {
static const struct mtk_iommu_plat_data mt6779_data = {
.m4u_plat = M4U_MT6779,
.flags = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN |
- MTK_IOMMU_TYPE_MM,
+ MTK_IOMMU_TYPE_MM | PGTABLE_PA_35_EN,
.inv_sel_reg = REG_MMU_INV_SEL_GEN2,
.banks_num = 1,
.banks_enable = {true},
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index e1cb51b9866c..128c7a3f1778 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -532,15 +532,10 @@ static void mtk_iommu_v1_release_device(struct device *dev)
struct device *larbdev;
unsigned int larbid;
- if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
- return;
-
data = dev_iommu_priv_get(dev);
larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
larbdev = data->larb_imu[larbid].dev;
device_link_remove(dev, larbdev);
-
- iommu_fwspec_free(dev);
}
static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index bd409bab6286..511959c8a14d 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -383,16 +383,6 @@ static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
return &sdev->iommu;
}
-static void sprd_iommu_release_device(struct device *dev)
-{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- if (!fwspec || fwspec->ops != &sprd_iommu_ops)
- return;
-
- iommu_fwspec_free(dev);
-}
-
static struct iommu_group *sprd_iommu_device_group(struct device *dev)
{
struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
@@ -417,7 +407,6 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops sprd_iommu_ops = {
.domain_alloc = sprd_iommu_domain_alloc,
.probe_device = sprd_iommu_probe_device,
- .release_device = sprd_iommu_release_device,
.device_group = sprd_iommu_device_group,
.of_xlate = sprd_iommu_of_xlate,
.pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index c54ab477b8fd..a84c63518773 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -738,8 +738,6 @@ static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
-static void sun50i_iommu_release_device(struct device *dev) {}
-
static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
{
struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
@@ -764,7 +762,6 @@ static const struct iommu_ops sun50i_iommu_ops = {
.domain_alloc = sun50i_iommu_domain_alloc,
.of_xlate = sun50i_iommu_of_xlate,
.probe_device = sun50i_iommu_probe_device,
- .release_device = sun50i_iommu_release_device,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sun50i_iommu_attach_device,
.detach_dev = sun50i_iommu_detach_device,
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index a6700a40a6f8..e5ca3cf1a949 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -246,10 +246,6 @@ static struct iommu_device *gart_iommu_probe_device(struct device *dev)
return &gart_handle->iommu;
}
-static void gart_iommu_release_device(struct device *dev)
-{
-}
-
static int gart_iommu_of_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -273,7 +269,6 @@ static void gart_iommu_sync(struct iommu_domain *domain,
static const struct iommu_ops gart_iommu_ops = {
.domain_alloc = gart_iommu_domain_alloc,
.probe_device = gart_iommu_probe_device,
- .release_device = gart_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
.of_xlate = gart_iommu_of_xlate,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 1fea68e551f1..2a8de975fe63 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -864,8 +864,6 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
return &smmu->iommu;
}
-static void tegra_smmu_release_device(struct device *dev) {}
-
static const struct tegra_smmu_group_soc *
tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
{
@@ -966,7 +964,6 @@ static int tegra_smmu_of_xlate(struct device *dev,
static const struct iommu_ops tegra_smmu_ops = {
.domain_alloc = tegra_smmu_domain_alloc,
.probe_device = tegra_smmu_probe_device,
- .release_device = tegra_smmu_release_device,
.device_group = tegra_smmu_device_group,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 25be4b822aa0..08eeafc9529f 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -788,11 +788,13 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return 0;
}
-static int viommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
int ret;
u32 flags;
+ size_t size = pgsize * pgcount;
u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -823,17 +825,21 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret)
viommu_del_mappings(vdomain, iova, end);
+ else if (mapped)
+ *mapped = size;
return ret;
}
-static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
int ret = 0;
size_t unmapped;
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
+ size_t size = pgsize * pgcount;
unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size)
@@ -964,7 +970,7 @@ static struct iommu_device *viommu_probe_device(struct device *dev)
return &viommu->iommu;
err_free_dev:
- generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
+ iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
return ERR_PTR(ret);
@@ -981,15 +987,9 @@ static void viommu_probe_finalize(struct device *dev)
static void viommu_release_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev;
-
- if (!fwspec || fwspec->ops != &viommu_ops)
- return;
-
- vdev = dev_iommu_priv_get(dev);
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
- generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
+ iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
}
@@ -1013,13 +1013,12 @@ static struct iommu_ops viommu_ops = {
.release_device = viommu_release_device,
.device_group = viommu_device_group,
.get_resv_regions = viommu_get_resv_regions,
- .put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = viommu_attach_dev,
- .map = viommu_map,
- .unmap = viommu_unmap,
+ .map_pages = viommu_map_pages,
+ .unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.free = viommu_domain_free,
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index 327f3ab62c03..741612ba6a52 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -129,7 +129,7 @@ static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
clear_csr_ecfg(ECFG0_IM);
clear_csr_estat(ESTATF_IP);
- cpuintc_handle = irq_domain_alloc_fwnode(NULL);
+ cpuintc_handle = irq_domain_alloc_named_fwnode("CPUINTC");
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
&loongarch_cpu_intc_irq_domain_ops, NULL);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 80d8ca6f2d46..16e9af8d8b1e 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -111,11 +111,15 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
/* Mask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0);
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
+ 0x0, priv->node * CORES_PER_EIO_NODE);
+
/* Set route for target vector */
eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
+
/* Unmask target vector */
- csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0);
+ csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
+ 0x0, priv->node * CORES_PER_EIO_NODE);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -286,7 +290,7 @@ static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi
}
}
-struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
+static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
{
int i;
@@ -344,7 +348,8 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
if (!priv)
return -ENOMEM;
- priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc);
+ priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
+ acpi_eiointc->node);
if (!priv->domain_handle) {
pr_err("Unable to allocate domain handle\n");
goto out_free_priv;
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index c4f3c886ad61..0da8716f8f24 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -207,7 +207,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
"reg-names", core_reg_names[i]);
if (index < 0)
- return -EINVAL;
+ goto out_iounmap;
priv->core_isr[i] = of_iomap(node, index);
}
@@ -360,7 +360,7 @@ int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic
parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_liointc);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index d0e8551bebfa..a72ede90ffc6 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -282,7 +282,7 @@ int __init pch_msi_acpi_init(struct irq_domain *parent,
int ret;
struct fwnode_handle *domain_handle;
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchmsi);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchmsi->msg_address);
ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
acpi_pchmsi->count, parent, domain_handle);
if (ret < 0)
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index b6f1392964b1..c01b9c257005 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -48,25 +48,6 @@ static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
-int find_pch_pic(u32 gsi)
-{
- int i;
-
- /* Find the PCH_PIC that manages this GSI. */
- for (i = 0; i < MAX_IO_PICS; i++) {
- struct pch_pic *priv = pch_pic_priv[i];
-
- if (!priv)
- return -1;
-
- if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
- return i;
- }
-
- pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
- return -1;
-}
-
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
{
u32 reg;
@@ -325,6 +306,25 @@ IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
#endif
#ifdef CONFIG_ACPI
+int find_pch_pic(u32 gsi)
+{
+ int i;
+
+ /* Find the PCH_PIC that manages this GSI. */
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ struct pch_pic *priv = pch_pic_priv[i];
+
+ if (!priv)
+ return -1;
+
+ if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
+ return i;
+ }
+
+ pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
+ return -1;
+}
+
static int __init
pch_lpc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
@@ -349,7 +349,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
- domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchpic);
+ domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index b65bd8878d4f..499e5f81b3fe 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -95,10 +95,11 @@ static const struct irq_domain_ops riscv_intc_domain_ops = {
static int __init riscv_intc_init(struct device_node *node,
struct device_node *parent)
{
- int rc, hartid;
+ int rc;
+ unsigned long hartid;
- hartid = riscv_of_parent_hartid(node);
- if (hartid < 0) {
+ rc = riscv_of_parent_hartid(node, &hartid);
+ if (rc < 0) {
pr_warn("unable to find hart id for %pOF\n", node);
return 0;
}
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index ba4938188570..2f4784860df5 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -374,7 +374,8 @@ static int __init __plic_init(struct device_node *node,
for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
irq_hw_number_t hwirq;
- int cpu, hartid;
+ int cpu;
+ unsigned long hartid;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
@@ -398,8 +399,8 @@ static int __init __plic_init(struct device_node *node,
continue;
}
- hartid = riscv_of_parent_hartid(parent.np);
- if (hartid < 0) {
+ error = riscv_of_parent_hartid(parent.np, &hartid);
+ if (error < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i);
continue;
}
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index e1f771c72fc4..ad3e2c1b3c87 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -148,10 +148,10 @@ static int tegra_ictlr_suspend(void)
lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
/* Disable COP interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
/* Disable CPU interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
/* Enable the wakeup sources of ictlr */
writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
@@ -172,12 +172,12 @@ static void tegra_ictlr_resume(void)
writel_relaxed(lic->cpu_iep[i],
ictlr + ICTLR_CPU_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR);
writel_relaxed(lic->cpu_ier[i],
ictlr + ICTLR_CPU_IER_SET);
writel_relaxed(lic->cop_iep[i],
ictlr + ICTLR_COP_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR);
writel_relaxed(lic->cop_ier[i],
ictlr + ICTLR_COP_IER_SET);
}
@@ -312,7 +312,7 @@ static int __init tegra_ictlr_init(struct device_node *node,
lic->base[i] = base;
/* Disable all interrupts */
- writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR);
+ writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR);
/* All interrupts target IRQ */
writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a49979f41eee..499d0f215a8b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -447,16 +447,16 @@ config LEDS_LP8860
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
- depends on LEDS_CLASS
+ depends on LEDS_CLASS && BROKEN
depends on X86 && SERIO_I8042 && DMI
help
This driver makes the mail LED accessible from userspace
- programs through the leds subsystem. This LED have three
- known mode: off, blink at 0.5Hz and blink at 1Hz.
+ programs through the LEDs subsystem. This LED has three
+ known modes: off, blink at 0.5Hz and blink at 1Hz.
The driver supports two kinds of interface: using ledtrig-timer
or through /sys/class/leds/clevo::mail/brightness. As this LED
- cannot change it's brightness it blinks instead. The brightness
+ cannot change its brightness it blinks instead. The brightness
value 0 means off, 1..127 means blink at 0.5Hz and 128..255 means
blink at 1Hz.
@@ -697,7 +697,7 @@ config LEDS_MENF21BMC
config LEDS_IS31FL319X
tristate "LED Support for ISSI IS31FL319x I2C LED controller family"
- depends on LEDS_CLASS && I2C && OF
+ depends on LEDS_CLASS && I2C
select REGMAP_I2C
help
This option enables support for LEDs connected to ISSI IS31FL319x
diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
index 59ba81e40e85..945c84286a4e 100644
--- a/drivers/leds/blink/Kconfig
+++ b/drivers/leds/blink/Kconfig
@@ -1,3 +1,17 @@
+config LEDS_BCM63138
+ tristate "LED Support for Broadcom BCM63138 SoC"
+ depends on LEDS_CLASS
+ depends on ARCH_BCM4908 || ARCH_BCM_5301X || BCM63XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ default ARCH_BCM4908
+ help
+ This option enables support for LED controller that is part of
+ BCM63138 SoC. The same hardware block is known to be also used
+ in BCM4908, BCM6848, BCM6858, BCM63148, BCM63381 and BCM68360.
+
+ If compiled as module it will be called leds-bcm63138.
+
config LEDS_LGM
tristate "LED support for LGM SoC series"
depends on X86 || COMPILE_TEST
diff --git a/drivers/leds/blink/Makefile b/drivers/leds/blink/Makefile
index fa5d04dccf13..447029f4153a 100644
--- a/drivers/leds/blink/Makefile
+++ b/drivers/leds/blink/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_BCM63138) += leds-bcm63138.o
obj-$(CONFIG_LEDS_LGM) += leds-lgm-sso.o
diff --git a/drivers/leds/blink/leds-bcm63138.c b/drivers/leds/blink/leds-bcm63138.c
new file mode 100644
index 000000000000..2cf2761e4914
--- /dev/null
+++ b/drivers/leds/blink/leds-bcm63138.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define BCM63138_MAX_LEDS 32
+#define BCM63138_MAX_BRIGHTNESS 9
+
+#define BCM63138_LED_BITS 4 /* how many bits control a single LED */
+#define BCM63138_LED_MASK ((1 << BCM63138_LED_BITS) - 1) /* 0xf */
+#define BCM63138_LEDS_PER_REG (32 / BCM63138_LED_BITS) /* 8 */
+
+#define BCM63138_GLB_CTRL 0x00
+#define BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL 0x00000002
+#define BCM63138_GLB_CTRL_SERIAL_LED_EN_POL 0x00000008
+#define BCM63138_MASK 0x04
+#define BCM63138_HW_LED_EN 0x08
+#define BCM63138_SERIAL_LED_SHIFT_SEL 0x0c
+#define BCM63138_FLASH_RATE_CTRL1 0x10
+#define BCM63138_FLASH_RATE_CTRL2 0x14
+#define BCM63138_FLASH_RATE_CTRL3 0x18
+#define BCM63138_FLASH_RATE_CTRL4 0x1c
+#define BCM63138_BRIGHT_CTRL1 0x20
+#define BCM63138_BRIGHT_CTRL2 0x24
+#define BCM63138_BRIGHT_CTRL3 0x28
+#define BCM63138_BRIGHT_CTRL4 0x2c
+#define BCM63138_POWER_LED_CFG 0x30
+#define BCM63138_HW_POLARITY 0xb4
+#define BCM63138_SW_DATA 0xb8
+#define BCM63138_SW_POLARITY 0xbc
+#define BCM63138_PARALLEL_LED_POLARITY 0xc0
+#define BCM63138_SERIAL_LED_POLARITY 0xc4
+#define BCM63138_HW_LED_STATUS 0xc8
+#define BCM63138_FLASH_CTRL_STATUS 0xcc
+#define BCM63138_FLASH_BRT_CTRL 0xd0
+#define BCM63138_FLASH_P_LED_OUT_STATUS 0xd4
+#define BCM63138_FLASH_S_LED_OUT_STATUS 0xd8
+
+struct bcm63138_leds {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+struct bcm63138_led {
+ struct bcm63138_leds *leds;
+ struct led_classdev cdev;
+ u32 pin;
+ bool active_low;
+};
+
+/*
+ * I/O access
+ */
+
+static void bcm63138_leds_write(struct bcm63138_leds *leds, unsigned int reg,
+ u32 data)
+{
+ writel(data, leds->base + reg);
+}
+
+static unsigned long bcm63138_leds_read(struct bcm63138_leds *leds,
+ unsigned int reg)
+{
+ return readl(leds->base + reg);
+}
+
+static void bcm63138_leds_update_bits(struct bcm63138_leds *leds,
+ unsigned int reg, u32 mask, u32 val)
+{
+ WARN_ON(val & ~mask);
+
+ bcm63138_leds_write(leds, reg, (bcm63138_leds_read(leds, reg) & ~mask) | (val & mask));
+}
+
+/*
+ * Helpers
+ */
+
+static void bcm63138_leds_set_flash_rate(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ u8 value)
+{
+ int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4;
+ int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS;
+
+ bcm63138_leds_update_bits(leds, BCM63138_FLASH_RATE_CTRL1 + reg_offset,
+ BCM63138_LED_MASK << shift, value << shift);
+}
+
+static void bcm63138_leds_set_bright(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ u8 value)
+{
+ int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4;
+ int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS;
+
+ bcm63138_leds_update_bits(leds, BCM63138_BRIGHT_CTRL1 + reg_offset,
+ BCM63138_LED_MASK << shift, value << shift);
+}
+
+static void bcm63138_leds_enable_led(struct bcm63138_leds *leds,
+ struct bcm63138_led *led,
+ enum led_brightness value)
+{
+ u32 bit = BIT(led->pin);
+
+ bcm63138_leds_update_bits(leds, BCM63138_SW_DATA, bit, value ? bit : 0);
+}
+
+/*
+ * API callbacks
+ */
+
+static void bcm63138_leds_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev);
+ struct bcm63138_leds *leds = led->leds;
+ unsigned long flags;
+
+ spin_lock_irqsave(&leds->lock, flags);
+
+ bcm63138_leds_enable_led(leds, led, value);
+ if (!value)
+ bcm63138_leds_set_flash_rate(leds, led, 0);
+ else
+ bcm63138_leds_set_bright(leds, led, value);
+
+ spin_unlock_irqrestore(&leds->lock, flags);
+}
+
+static int bcm63138_leds_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev);
+ struct bcm63138_leds *leds = led->leds;
+ unsigned long flags;
+ u8 value;
+
+ if (!*delay_on && !*delay_off) {
+ *delay_on = 640;
+ *delay_off = 640;
+ }
+
+ if (*delay_on != *delay_off) {
+ dev_dbg(led_cdev->dev, "Blinking at unequal delays is not supported\n");
+ return -EINVAL;
+ }
+
+ switch (*delay_on) {
+ case 1152 ... 1408: /* 1280 ms ± 10% */
+ value = 0x7;
+ break;
+ case 576 ... 704: /* 640 ms ± 10% */
+ value = 0x6;
+ break;
+ case 288 ... 352: /* 320 ms ± 10% */
+ value = 0x5;
+ break;
+ case 126 ... 154: /* 140 ms ± 10% */
+ value = 0x4;
+ break;
+ case 59 ... 72: /* 65 ms ± 10% */
+ value = 0x3;
+ break;
+ default:
+ dev_dbg(led_cdev->dev, "Blinking delay value %lu is unsupported\n",
+ *delay_on);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&leds->lock, flags);
+
+ bcm63138_leds_enable_led(leds, led, BCM63138_MAX_BRIGHTNESS);
+ bcm63138_leds_set_flash_rate(leds, led, value);
+
+ spin_unlock_irqrestore(&leds->lock, flags);
+
+ return 0;
+}
+
+/*
+ * LED driver
+ */
+
+static void bcm63138_leds_create_led(struct bcm63138_leds *leds,
+ struct device_node *np)
+{
+ struct led_init_data init_data = {
+ .fwnode = of_fwnode_handle(np),
+ };
+ struct device *dev = leds->dev;
+ struct bcm63138_led *led;
+ struct pinctrl *pinctrl;
+ u32 bit;
+ int err;
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led) {
+ dev_err(dev, "Failed to alloc LED\n");
+ return;
+ }
+
+ led->leds = leds;
+
+ if (of_property_read_u32(np, "reg", &led->pin)) {
+ dev_err(dev, "Missing \"reg\" property in %pOF\n", np);
+ goto err_free;
+ }
+
+ if (led->pin >= BCM63138_MAX_LEDS) {
+ dev_err(dev, "Invalid \"reg\" value %d\n", led->pin);
+ goto err_free;
+ }
+
+ led->active_low = of_property_read_bool(np, "active-low");
+
+ led->cdev.max_brightness = BCM63138_MAX_BRIGHTNESS;
+ led->cdev.brightness_set = bcm63138_leds_brightness_set;
+ led->cdev.blink_set = bcm63138_leds_blink_set;
+
+ err = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
+ if (err) {
+ dev_err(dev, "Failed to register LED %pOF: %d\n", np, err);
+ goto err_free;
+ }
+
+ pinctrl = devm_pinctrl_get_select_default(led->cdev.dev);
+ if (IS_ERR(pinctrl) && PTR_ERR(pinctrl) != -ENODEV) {
+ dev_warn(led->cdev.dev, "Failed to select %pOF pinctrl: %ld\n",
+ np, PTR_ERR(pinctrl));
+ }
+
+ bit = BIT(led->pin);
+ bcm63138_leds_update_bits(leds, BCM63138_PARALLEL_LED_POLARITY, bit,
+ led->active_low ? 0 : bit);
+ bcm63138_leds_update_bits(leds, BCM63138_HW_LED_EN, bit, 0);
+ bcm63138_leds_set_flash_rate(leds, led, 0);
+ bcm63138_leds_enable_led(leds, led, led->cdev.brightness);
+
+ return;
+
+err_free:
+ devm_kfree(dev, led);
+}
+
+static int bcm63138_leds_probe(struct platform_device *pdev)
+{
+ struct device_node *np = dev_of_node(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct bcm63138_leds *leds;
+ struct device_node *child;
+
+ leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ leds->dev = dev;
+
+ leds->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(leds->base))
+ return PTR_ERR(leds->base);
+
+ spin_lock_init(&leds->lock);
+
+ bcm63138_leds_write(leds, BCM63138_GLB_CTRL,
+ BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL |
+ BCM63138_GLB_CTRL_SERIAL_LED_EN_POL);
+ bcm63138_leds_write(leds, BCM63138_HW_LED_EN, 0);
+ bcm63138_leds_write(leds, BCM63138_SERIAL_LED_POLARITY, 0);
+ bcm63138_leds_write(leds, BCM63138_PARALLEL_LED_POLARITY, 0);
+
+ for_each_available_child_of_node(np, child) {
+ bcm63138_leds_create_led(leds, child);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id bcm63138_leds_of_match_table[] = {
+ { .compatible = "brcm,bcm63138-leds", },
+ { },
+};
+
+static struct platform_driver bcm63138_leds_driver = {
+ .probe = bcm63138_leds_probe,
+ .driver = {
+ .name = "leds-bcm63xxx",
+ .of_match_table = bcm63138_leds_of_match_table,
+ },
+};
+
+module_platform_driver(bcm63138_leds_driver);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, bcm63138_leds_of_match_table);
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 4161b9dd7e48..52b59b62f437 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -11,9 +11,9 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -21,39 +21,64 @@
/* register numbers */
#define IS31FL319X_SHUTDOWN 0x00
-#define IS31FL319X_CTRL1 0x01
-#define IS31FL319X_CTRL2 0x02
-#define IS31FL319X_CONFIG1 0x03
-#define IS31FL319X_CONFIG2 0x04
-#define IS31FL319X_RAMP_MODE 0x05
-#define IS31FL319X_BREATH_MASK 0x06
-#define IS31FL319X_PWM(channel) (0x07 + channel)
-#define IS31FL319X_DATA_UPDATE 0x10
-#define IS31FL319X_T0(channel) (0x11 + channel)
-#define IS31FL319X_T123_1 0x1a
-#define IS31FL319X_T123_2 0x1b
-#define IS31FL319X_T123_3 0x1c
-#define IS31FL319X_T4(channel) (0x1d + channel)
-#define IS31FL319X_TIME_UPDATE 0x26
-#define IS31FL319X_RESET 0xff
-
-#define IS31FL319X_REG_CNT (IS31FL319X_RESET + 1)
+
+/* registers for 3190, 3191 and 3193 */
+#define IS31FL3190_BREATHING 0x01
+#define IS31FL3190_LEDMODE 0x02
+#define IS31FL3190_CURRENT 0x03
+#define IS31FL3190_PWM(channel) (0x04 + channel)
+#define IS31FL3190_DATA_UPDATE 0x07
+#define IS31FL3190_T0(channel) (0x0a + channel)
+#define IS31FL3190_T1T2(channel) (0x10 + channel)
+#define IS31FL3190_T3T4(channel) (0x16 + channel)
+#define IS31FL3190_TIME_UPDATE 0x1c
+#define IS31FL3190_LEDCONTROL 0x1d
+#define IS31FL3190_RESET 0x2f
+
+#define IS31FL3190_CURRENT_uA_MIN 5000
+#define IS31FL3190_CURRENT_uA_DEFAULT 42000
+#define IS31FL3190_CURRENT_uA_MAX 42000
+#define IS31FL3190_CURRENT_MASK GENMASK(4, 2)
+#define IS31FL3190_CURRENT_5_mA 0x02
+#define IS31FL3190_CURRENT_10_mA 0x01
+#define IS31FL3190_CURRENT_17dot5_mA 0x04
+#define IS31FL3190_CURRENT_30_mA 0x03
+#define IS31FL3190_CURRENT_42_mA 0x00
+
+/* registers for 3196 and 3199 */
+#define IS31FL3196_CTRL1 0x01
+#define IS31FL3196_CTRL2 0x02
+#define IS31FL3196_CONFIG1 0x03
+#define IS31FL3196_CONFIG2 0x04
+#define IS31FL3196_RAMP_MODE 0x05
+#define IS31FL3196_BREATH_MARK 0x06
+#define IS31FL3196_PWM(channel) (0x07 + channel)
+#define IS31FL3196_DATA_UPDATE 0x10
+#define IS31FL3196_T0(channel) (0x11 + channel)
+#define IS31FL3196_T123_1 0x1a
+#define IS31FL3196_T123_2 0x1b
+#define IS31FL3196_T123_3 0x1c
+#define IS31FL3196_T4(channel) (0x1d + channel)
+#define IS31FL3196_TIME_UPDATE 0x26
+#define IS31FL3196_RESET 0xff
+
+#define IS31FL3196_REG_CNT (IS31FL3196_RESET + 1)
#define IS31FL319X_MAX_LEDS 9
/* CS (Current Setting) in CONFIG2 register */
-#define IS31FL319X_CONFIG2_CS_SHIFT 4
-#define IS31FL319X_CONFIG2_CS_MASK 0x7
-#define IS31FL319X_CONFIG2_CS_STEP_REF 12
+#define IS31FL3196_CONFIG2_CS_SHIFT 4
+#define IS31FL3196_CONFIG2_CS_MASK GENMASK(2, 0)
+#define IS31FL3196_CONFIG2_CS_STEP_REF 12
-#define IS31FL319X_CURRENT_MIN ((u32)5000)
-#define IS31FL319X_CURRENT_MAX ((u32)40000)
-#define IS31FL319X_CURRENT_STEP ((u32)5000)
-#define IS31FL319X_CURRENT_DEFAULT ((u32)20000)
+#define IS31FL3196_CURRENT_uA_MIN 5000
+#define IS31FL3196_CURRENT_uA_MAX 40000
+#define IS31FL3196_CURRENT_uA_STEP 5000
+#define IS31FL3196_CURRENT_uA_DEFAULT 20000
/* Audio gain in CONFIG2 register */
-#define IS31FL319X_AUDIO_GAIN_DB_MAX ((u32)21)
-#define IS31FL319X_AUDIO_GAIN_DB_STEP ((u32)3)
+#define IS31FL3196_AUDIO_GAIN_DB_MAX ((u32)21)
+#define IS31FL3196_AUDIO_GAIN_DB_STEP 3
/*
* regmap is used as a cache of chip's register space,
@@ -78,52 +103,161 @@ struct is31fl319x_chip {
struct is31fl319x_chipdef {
int num_leds;
+ u8 reset_reg;
+ const struct regmap_config *is31fl319x_regmap_config;
+ int (*brightness_set)(struct led_classdev *cdev, enum led_brightness brightness);
+ u32 current_default;
+ u32 current_min;
+ u32 current_max;
+ bool is_3196or3199;
};
-static const struct is31fl319x_chipdef is31fl3190_cdef = {
- .num_leds = 1,
-};
+static bool is31fl319x_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* we have no readable registers */
+ return false;
+}
-static const struct is31fl319x_chipdef is31fl3193_cdef = {
- .num_leds = 3,
+static bool is31fl3190_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* volatile registers are not cached */
+ switch (reg) {
+ case IS31FL3190_DATA_UPDATE:
+ case IS31FL3190_TIME_UPDATE:
+ case IS31FL3190_RESET:
+ return true; /* always write-through */
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default is31fl3190_reg_defaults[] = {
+ { IS31FL3190_LEDMODE, 0x00 },
+ { IS31FL3190_CURRENT, 0x00 },
+ { IS31FL3190_PWM(0), 0x00 },
+ { IS31FL3190_PWM(1), 0x00 },
+ { IS31FL3190_PWM(2), 0x00 },
};
-static const struct is31fl319x_chipdef is31fl3196_cdef = {
- .num_leds = 6,
+static struct regmap_config is31fl3190_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IS31FL3190_RESET,
+ .cache_type = REGCACHE_FLAT,
+ .readable_reg = is31fl319x_readable_reg,
+ .volatile_reg = is31fl3190_volatile_reg,
+ .reg_defaults = is31fl3190_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(is31fl3190_reg_defaults),
};
-static const struct is31fl319x_chipdef is31fl3199_cdef = {
- .num_leds = 9,
+static bool is31fl3196_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* volatile registers are not cached */
+ switch (reg) {
+ case IS31FL3196_DATA_UPDATE:
+ case IS31FL3196_TIME_UPDATE:
+ case IS31FL3196_RESET:
+ return true; /* always write-through */
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default is31fl3196_reg_defaults[] = {
+ { IS31FL3196_CONFIG1, 0x00 },
+ { IS31FL3196_CONFIG2, 0x00 },
+ { IS31FL3196_PWM(0), 0x00 },
+ { IS31FL3196_PWM(1), 0x00 },
+ { IS31FL3196_PWM(2), 0x00 },
+ { IS31FL3196_PWM(3), 0x00 },
+ { IS31FL3196_PWM(4), 0x00 },
+ { IS31FL3196_PWM(5), 0x00 },
+ { IS31FL3196_PWM(6), 0x00 },
+ { IS31FL3196_PWM(7), 0x00 },
+ { IS31FL3196_PWM(8), 0x00 },
};
-static const struct of_device_id of_is31fl319x_match[] = {
- { .compatible = "issi,is31fl3190", .data = &is31fl3190_cdef, },
- { .compatible = "issi,is31fl3191", .data = &is31fl3190_cdef, },
- { .compatible = "issi,is31fl3193", .data = &is31fl3193_cdef, },
- { .compatible = "issi,is31fl3196", .data = &is31fl3196_cdef, },
- { .compatible = "issi,is31fl3199", .data = &is31fl3199_cdef, },
- { .compatible = "si-en,sn3199", .data = &is31fl3199_cdef, },
- { }
+static struct regmap_config is31fl3196_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IS31FL3196_REG_CNT,
+ .cache_type = REGCACHE_FLAT,
+ .readable_reg = is31fl319x_readable_reg,
+ .volatile_reg = is31fl3196_volatile_reg,
+ .reg_defaults = is31fl3196_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(is31fl3196_reg_defaults),
};
-MODULE_DEVICE_TABLE(of, of_is31fl319x_match);
-static int is31fl319x_brightness_set(struct led_classdev *cdev,
+static int is31fl3190_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led, cdev);
+ struct is31fl319x_chip *is31 = led->chip;
+ int chan = led - is31->leds;
+ int ret;
+ int i;
+ u8 ctrl = 0;
+
+ dev_dbg(&is31->client->dev, "channel %d: %d\n", chan, brightness);
+
+ mutex_lock(&is31->lock);
+
+ /* update PWM register */
+ ret = regmap_write(is31->regmap, IS31FL3190_PWM(chan), brightness);
+ if (ret < 0)
+ goto out;
+
+ /* read current brightness of all PWM channels */
+ for (i = 0; i < is31->cdef->num_leds; i++) {
+ unsigned int pwm_value;
+ bool on;
+
+ /*
+ * since neither cdev nor the chip can provide
+ * the current setting, we read from the regmap cache
+ */
+
+ ret = regmap_read(is31->regmap, IS31FL3190_PWM(i), &pwm_value);
+ on = ret >= 0 && pwm_value > LED_OFF;
+
+ ctrl |= on << i;
+ }
+
+ if (ctrl > 0) {
+ dev_dbg(&is31->client->dev, "power up %02x\n", ctrl);
+ regmap_write(is31->regmap, IS31FL3190_LEDCONTROL, ctrl);
+ /* update PWMs */
+ regmap_write(is31->regmap, IS31FL3190_DATA_UPDATE, 0x00);
+ /* enable chip from shut down and enable all channels */
+ ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x20);
+ } else {
+ dev_dbg(&is31->client->dev, "power down\n");
+ /* shut down (no need to clear LEDCONTROL) */
+ ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x01);
+ }
+
+out:
+ mutex_unlock(&is31->lock);
+
+ return ret;
+}
+
+static int is31fl3196_brightness_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
- struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led,
- cdev);
+ struct is31fl319x_led *led = container_of(cdev, struct is31fl319x_led, cdev);
struct is31fl319x_chip *is31 = led->chip;
int chan = led - is31->leds;
int ret;
int i;
u8 ctrl1 = 0, ctrl2 = 0;
- dev_dbg(&is31->client->dev, "%s %d: %d\n", __func__, chan, brightness);
+ dev_dbg(&is31->client->dev, "channel %d: %d\n", chan, brightness);
mutex_lock(&is31->lock);
/* update PWM register */
- ret = regmap_write(is31->regmap, IS31FL319X_PWM(chan), brightness);
+ ret = regmap_write(is31->regmap, IS31FL3196_PWM(chan), brightness);
if (ret < 0)
goto out;
@@ -137,9 +271,7 @@ static int is31fl319x_brightness_set(struct led_classdev *cdev,
* the current setting, we read from the regmap cache
*/
- ret = regmap_read(is31->regmap, IS31FL319X_PWM(i), &pwm_value);
- dev_dbg(&is31->client->dev, "%s read %d: ret=%d: %d\n",
- __func__, i, ret, pwm_value);
+ ret = regmap_read(is31->regmap, IS31FL3196_PWM(i), &pwm_value);
on = ret >= 0 && pwm_value > LED_OFF;
if (i < 3)
@@ -153,10 +285,10 @@ static int is31fl319x_brightness_set(struct led_classdev *cdev,
if (ctrl1 > 0 || ctrl2 > 0) {
dev_dbg(&is31->client->dev, "power up %02x %02x\n",
ctrl1, ctrl2);
- regmap_write(is31->regmap, IS31FL319X_CTRL1, ctrl1);
- regmap_write(is31->regmap, IS31FL319X_CTRL2, ctrl2);
+ regmap_write(is31->regmap, IS31FL3196_CTRL1, ctrl1);
+ regmap_write(is31->regmap, IS31FL3196_CTRL2, ctrl2);
/* update PWMs */
- regmap_write(is31->regmap, IS31FL319X_DATA_UPDATE, 0x00);
+ regmap_write(is31->regmap, IS31FL3196_DATA_UPDATE, 0x00);
/* enable chip from shut down */
ret = regmap_write(is31->regmap, IS31FL319X_SHUTDOWN, 0x01);
} else {
@@ -171,92 +303,141 @@ out:
return ret;
}
-static int is31fl319x_parse_child_dt(const struct device *dev,
- const struct device_node *child,
- struct is31fl319x_led *led)
+static const struct is31fl319x_chipdef is31fl3190_cdef = {
+ .num_leds = 1,
+ .reset_reg = IS31FL3190_RESET,
+ .is31fl319x_regmap_config = &is31fl3190_regmap_config,
+ .brightness_set = is31fl3190_brightness_set,
+ .current_default = IS31FL3190_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3190_CURRENT_uA_MIN,
+ .current_max = IS31FL3190_CURRENT_uA_MAX,
+ .is_3196or3199 = false,
+};
+
+static const struct is31fl319x_chipdef is31fl3193_cdef = {
+ .num_leds = 3,
+ .reset_reg = IS31FL3190_RESET,
+ .is31fl319x_regmap_config = &is31fl3190_regmap_config,
+ .brightness_set = is31fl3190_brightness_set,
+ .current_default = IS31FL3190_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3190_CURRENT_uA_MIN,
+ .current_max = IS31FL3190_CURRENT_uA_MAX,
+ .is_3196or3199 = false,
+};
+
+static const struct is31fl319x_chipdef is31fl3196_cdef = {
+ .num_leds = 6,
+ .reset_reg = IS31FL3196_RESET,
+ .is31fl319x_regmap_config = &is31fl3196_regmap_config,
+ .brightness_set = is31fl3196_brightness_set,
+ .current_default = IS31FL3196_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3196_CURRENT_uA_MIN,
+ .current_max = IS31FL3196_CURRENT_uA_MAX,
+ .is_3196or3199 = true,
+};
+
+static const struct is31fl319x_chipdef is31fl3199_cdef = {
+ .num_leds = 9,
+ .reset_reg = IS31FL3196_RESET,
+ .is31fl319x_regmap_config = &is31fl3196_regmap_config,
+ .brightness_set = is31fl3196_brightness_set,
+ .current_default = IS31FL3196_CURRENT_uA_DEFAULT,
+ .current_min = IS31FL3196_CURRENT_uA_MIN,
+ .current_max = IS31FL3196_CURRENT_uA_MAX,
+ .is_3196or3199 = true,
+};
+
+static const struct of_device_id of_is31fl319x_match[] = {
+ { .compatible = "issi,is31fl3190", .data = &is31fl3190_cdef, },
+ { .compatible = "issi,is31fl3191", .data = &is31fl3190_cdef, },
+ { .compatible = "issi,is31fl3193", .data = &is31fl3193_cdef, },
+ { .compatible = "issi,is31fl3196", .data = &is31fl3196_cdef, },
+ { .compatible = "issi,is31fl3199", .data = &is31fl3199_cdef, },
+ { .compatible = "si-en,sn3190", .data = &is31fl3190_cdef, },
+ { .compatible = "si-en,sn3191", .data = &is31fl3190_cdef, },
+ { .compatible = "si-en,sn3193", .data = &is31fl3193_cdef, },
+ { .compatible = "si-en,sn3196", .data = &is31fl3196_cdef, },
+ { .compatible = "si-en,sn3199", .data = &is31fl3199_cdef, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_is31fl319x_match);
+
+static int is31fl319x_parse_child_fw(const struct device *dev,
+ const struct fwnode_handle *child,
+ struct is31fl319x_led *led,
+ struct is31fl319x_chip *is31)
{
struct led_classdev *cdev = &led->cdev;
int ret;
- if (of_property_read_string(child, "label", &cdev->name))
- cdev->name = child->name;
+ if (fwnode_property_read_string(child, "label", &cdev->name))
+ cdev->name = fwnode_get_name(child);
- ret = of_property_read_string(child, "linux,default-trigger",
- &cdev->default_trigger);
+ ret = fwnode_property_read_string(child, "linux,default-trigger", &cdev->default_trigger);
if (ret < 0 && ret != -EINVAL) /* is optional */
return ret;
- led->max_microamp = IS31FL319X_CURRENT_DEFAULT;
- ret = of_property_read_u32(child, "led-max-microamp",
- &led->max_microamp);
+ led->max_microamp = is31->cdef->current_default;
+ ret = fwnode_property_read_u32(child, "led-max-microamp", &led->max_microamp);
if (!ret) {
- if (led->max_microamp < IS31FL319X_CURRENT_MIN)
+ if (led->max_microamp < is31->cdef->current_min)
return -EINVAL; /* not supported */
led->max_microamp = min(led->max_microamp,
- IS31FL319X_CURRENT_MAX);
+ is31->cdef->current_max);
}
return 0;
}
-static int is31fl319x_parse_dt(struct device *dev,
- struct is31fl319x_chip *is31)
+static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
{
- struct device_node *np = dev_of_node(dev), *child;
+ struct fwnode_handle *fwnode = dev_fwnode(dev), *child;
int count;
int ret;
- if (!np)
- return -ENODEV;
-
- is31->shutdown_gpio = devm_gpiod_get_optional(dev,
- "shutdown",
- GPIOD_OUT_HIGH);
- if (IS_ERR(is31->shutdown_gpio)) {
- ret = PTR_ERR(is31->shutdown_gpio);
- dev_err(dev, "Failed to get shutdown gpio: %d\n", ret);
- return ret;
- }
+ is31->shutdown_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(is31->shutdown_gpio))
+ return dev_err_probe(dev, PTR_ERR(is31->shutdown_gpio),
+ "Failed to get shutdown gpio\n");
is31->cdef = device_get_match_data(dev);
- count = of_get_available_child_count(np);
+ count = 0;
+ fwnode_for_each_available_child_node(fwnode, child)
+ count++;
dev_dbg(dev, "probing with %d leds defined in DT\n", count);
- if (!count || count > is31->cdef->num_leds) {
- dev_err(dev, "Number of leds defined must be between 1 and %u\n",
- is31->cdef->num_leds);
- return -ENODEV;
- }
+ if (!count || count > is31->cdef->num_leds)
+ return dev_err_probe(dev, -ENODEV,
+ "Number of leds defined must be between 1 and %u\n",
+ is31->cdef->num_leds);
- for_each_available_child_of_node(np, child) {
+ fwnode_for_each_available_child_node(fwnode, child) {
struct is31fl319x_led *led;
u32 reg;
- ret = of_property_read_u32(child, "reg", &reg);
+ ret = fwnode_property_read_u32(child, "reg", &reg);
if (ret) {
- dev_err(dev, "Failed to read led 'reg' property\n");
+ ret = dev_err_probe(dev, ret, "Failed to read led 'reg' property\n");
goto put_child_node;
}
if (reg < 1 || reg > is31->cdef->num_leds) {
- dev_err(dev, "invalid led reg %u\n", reg);
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "invalid led reg %u\n", reg);
goto put_child_node;
}
led = &is31->leds[reg - 1];
if (led->configured) {
- dev_err(dev, "led %u is already configured\n", reg);
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "led %u is already configured\n", reg);
goto put_child_node;
}
- ret = is31fl319x_parse_child_dt(dev, child, led);
+ ret = is31fl319x_parse_child_fw(dev, child, led, is31);
if (ret) {
- dev_err(dev, "led %u DT parsing failed\n", reg);
+ ret = dev_err_probe(dev, ret, "led %u DT parsing failed\n", reg);
goto put_child_node;
}
@@ -264,82 +445,62 @@ static int is31fl319x_parse_dt(struct device *dev,
}
is31->audio_gain_db = 0;
- ret = of_property_read_u32(np, "audio-gain-db", &is31->audio_gain_db);
- if (!ret)
- is31->audio_gain_db = min(is31->audio_gain_db,
- IS31FL319X_AUDIO_GAIN_DB_MAX);
+ if (is31->cdef->is_3196or3199) {
+ ret = fwnode_property_read_u32(fwnode, "audio-gain-db", &is31->audio_gain_db);
+ if (!ret)
+ is31->audio_gain_db = min(is31->audio_gain_db,
+ IS31FL3196_AUDIO_GAIN_DB_MAX);
+ }
return 0;
put_child_node:
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
-static bool is31fl319x_readable_reg(struct device *dev, unsigned int reg)
-{ /* we have no readable registers */
- return false;
-}
-
-static bool is31fl319x_volatile_reg(struct device *dev, unsigned int reg)
-{ /* volatile registers are not cached */
- switch (reg) {
- case IS31FL319X_DATA_UPDATE:
- case IS31FL319X_TIME_UPDATE:
- case IS31FL319X_RESET:
- return true; /* always write-through */
+static inline int is31fl3190_microamp_to_cs(struct device *dev, u32 microamp)
+{
+ switch (microamp) {
+ case 5000:
+ return IS31FL3190_CURRENT_5_mA;
+ case 10000:
+ return IS31FL3190_CURRENT_10_mA;
+ case 17500:
+ return IS31FL3190_CURRENT_17dot5_mA;
+ case 30000:
+ return IS31FL3190_CURRENT_30_mA;
+ case 42000:
+ return IS31FL3190_CURRENT_42_mA;
default:
- return false;
+ dev_warn(dev, "Unsupported current value: %d, using 5000 µA!\n", microamp);
+ return IS31FL3190_CURRENT_5_mA;
}
}
-static const struct reg_default is31fl319x_reg_defaults[] = {
- { IS31FL319X_CONFIG1, 0x00},
- { IS31FL319X_CONFIG2, 0x00},
- { IS31FL319X_PWM(0), 0x00},
- { IS31FL319X_PWM(1), 0x00},
- { IS31FL319X_PWM(2), 0x00},
- { IS31FL319X_PWM(3), 0x00},
- { IS31FL319X_PWM(4), 0x00},
- { IS31FL319X_PWM(5), 0x00},
- { IS31FL319X_PWM(6), 0x00},
- { IS31FL319X_PWM(7), 0x00},
- { IS31FL319X_PWM(8), 0x00},
-};
-
-static struct regmap_config regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = IS31FL319X_REG_CNT,
- .cache_type = REGCACHE_FLAT,
- .readable_reg = is31fl319x_readable_reg,
- .volatile_reg = is31fl319x_volatile_reg,
- .reg_defaults = is31fl319x_reg_defaults,
- .num_reg_defaults = ARRAY_SIZE(is31fl319x_reg_defaults),
-};
-
-static inline int is31fl319x_microamp_to_cs(struct device *dev, u32 microamp)
-{ /* round down to nearest supported value (range check done by caller) */
- u32 step = microamp / IS31FL319X_CURRENT_STEP;
+static inline int is31fl3196_microamp_to_cs(struct device *dev, u32 microamp)
+{
+ /* round down to nearest supported value (range check done by caller) */
+ u32 step = microamp / IS31FL3196_CURRENT_uA_STEP;
- return ((IS31FL319X_CONFIG2_CS_STEP_REF - step) &
- IS31FL319X_CONFIG2_CS_MASK) <<
- IS31FL319X_CONFIG2_CS_SHIFT; /* CS encoding */
+ return ((IS31FL3196_CONFIG2_CS_STEP_REF - step) &
+ IS31FL3196_CONFIG2_CS_MASK) <<
+ IS31FL3196_CONFIG2_CS_SHIFT; /* CS encoding */
}
-static inline int is31fl319x_db_to_gain(u32 dezibel)
-{ /* round down to nearest supported value (range check done by caller) */
- return dezibel / IS31FL319X_AUDIO_GAIN_DB_STEP;
+static inline int is31fl3196_db_to_gain(u32 dezibel)
+{
+ /* round down to nearest supported value (range check done by caller) */
+ return dezibel / IS31FL3196_AUDIO_GAIN_DB_STEP;
}
-static int is31fl319x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int is31fl319x_probe(struct i2c_client *client)
{
struct is31fl319x_chip *is31;
struct device *dev = &client->dev;
int err;
int i = 0;
- u32 aggregated_led_microamp = IS31FL319X_CURRENT_MAX;
+ u32 aggregated_led_microamp;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -EIO;
@@ -349,10 +510,13 @@ static int is31fl319x_probe(struct i2c_client *client,
return -ENOMEM;
mutex_init(&is31->lock);
+ err = devm_add_action(dev, (void (*)(void *))mutex_destroy, &is31->lock);
+ if (err)
+ return err;
- err = is31fl319x_parse_dt(&client->dev, is31);
+ err = is31fl319x_parse_fw(&client->dev, is31);
if (err)
- goto free_mutex;
+ return err;
if (is31->shutdown_gpio) {
gpiod_direction_output(is31->shutdown_gpio, 0);
@@ -361,37 +525,35 @@ static int is31fl319x_probe(struct i2c_client *client,
}
is31->client = client;
- is31->regmap = devm_regmap_init_i2c(client, &regmap_config);
- if (IS_ERR(is31->regmap)) {
- dev_err(&client->dev, "failed to allocate register map\n");
- err = PTR_ERR(is31->regmap);
- goto free_mutex;
- }
+ is31->regmap = devm_regmap_init_i2c(client, is31->cdef->is31fl319x_regmap_config);
+ if (IS_ERR(is31->regmap))
+ return dev_err_probe(dev, PTR_ERR(is31->regmap), "failed to allocate register map\n");
i2c_set_clientdata(client, is31);
/* check for write-reply from chip (we can't read any registers) */
- err = regmap_write(is31->regmap, IS31FL319X_RESET, 0x00);
- if (err < 0) {
- dev_err(&client->dev, "no response from chip write: err = %d\n",
- err);
- err = -EIO; /* does not answer */
- goto free_mutex;
- }
+ err = regmap_write(is31->regmap, is31->cdef->reset_reg, 0x00);
+ if (err < 0)
+ return dev_err_probe(dev, err, "no response from chip write\n");
/*
* Kernel conventions require per-LED led-max-microamp property.
* But the chip does not allow to limit individual LEDs.
* So we take minimum from all subnodes for safety of hardware.
*/
+ aggregated_led_microamp = is31->cdef->current_max;
for (i = 0; i < is31->cdef->num_leds; i++)
if (is31->leds[i].configured &&
is31->leds[i].max_microamp < aggregated_led_microamp)
aggregated_led_microamp = is31->leds[i].max_microamp;
- regmap_write(is31->regmap, IS31FL319X_CONFIG2,
- is31fl319x_microamp_to_cs(dev, aggregated_led_microamp) |
- is31fl319x_db_to_gain(is31->audio_gain_db));
+ if (is31->cdef->is_3196or3199)
+ regmap_write(is31->regmap, IS31FL3196_CONFIG2,
+ is31fl3196_microamp_to_cs(dev, aggregated_led_microamp) |
+ is31fl3196_db_to_gain(is31->audio_gain_db));
+ else
+ regmap_update_bits(is31->regmap, IS31FL3190_CURRENT, IS31FL3190_CURRENT_MASK,
+ is31fl3190_microamp_to_cs(dev, aggregated_led_microamp));
for (i = 0; i < is31->cdef->num_leds; i++) {
struct is31fl319x_led *led = &is31->leds[i];
@@ -400,26 +562,14 @@ static int is31fl319x_probe(struct i2c_client *client,
continue;
led->chip = is31;
- led->cdev.brightness_set_blocking = is31fl319x_brightness_set;
+ led->cdev.brightness_set_blocking = is31->cdef->brightness_set;
err = devm_led_classdev_register(&client->dev, &led->cdev);
if (err < 0)
- goto free_mutex;
+ return err;
}
return 0;
-
-free_mutex:
- mutex_destroy(&is31->lock);
- return err;
-}
-
-static int is31fl319x_remove(struct i2c_client *client)
-{
- struct is31fl319x_chip *is31 = i2c_get_clientdata(client);
-
- mutex_destroy(&is31->lock);
- return 0;
}
/*
@@ -432,6 +582,10 @@ static const struct i2c_device_id is31fl319x_id[] = {
{ "is31fl3193" },
{ "is31fl3196" },
{ "is31fl3199" },
+ { "sn3190" },
+ { "sn3191" },
+ { "sn3193" },
+ { "sn3196" },
{ "sn3199" },
{},
};
@@ -440,10 +594,9 @@ MODULE_DEVICE_TABLE(i2c, is31fl319x_id);
static struct i2c_driver is31fl319x_driver = {
.driver = {
.name = "leds-is31fl319x",
- .of_match_table = of_match_ptr(of_is31fl319x_match),
+ .of_match_table = of_is31fl319x_match,
},
- .probe = is31fl319x_probe,
- .remove = is31fl319x_remove,
+ .probe_new = is31fl319x_probe,
.id_table = is31fl319x_id,
};
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 1adfed1c0619..eac6f4a573b2 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -239,9 +239,6 @@ static int omnia_leds_probe(struct i2c_client *client,
led += ret;
}
- if (devm_device_add_groups(dev, omnia_led_controller_groups))
- dev_warn(dev, "Could not add attribute group!\n");
-
return 0;
}
@@ -283,6 +280,7 @@ static struct i2c_driver omnia_leds_driver = {
.driver = {
.name = "leds-turris-omnia",
.of_match_table = of_omnia_leds_match,
+ .dev_groups = omnia_led_controller_groups,
},
};
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index 45e38708ecb1..da9d2218ae18 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -19,6 +19,7 @@
struct pwm_led {
struct pwm_device *pwm;
struct pwm_state state;
+ bool active_low;
};
struct pwm_mc_led {
@@ -45,6 +46,9 @@ static int led_pwm_mc_set(struct led_classdev *cdev,
duty *= mc_cdev->subled_info[i].brightness;
do_div(duty, cdev->max_brightness);
+ if (priv->leds[i].active_low)
+ duty = priv->leds[i].state.period - duty;
+
priv->leds[i].state.duty_cycle = duty;
priv->leds[i].state.enabled = duty > 0;
ret = pwm_apply_state(priv->leds[i].pwm,
@@ -72,11 +76,11 @@ static int iterate_subleds(struct device *dev, struct pwm_mc_led *priv,
pwmled = &priv->leds[priv->mc_cdev.num_colors];
pwmled->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
if (IS_ERR(pwmled->pwm)) {
- ret = PTR_ERR(pwmled->pwm);
- dev_err(dev, "unable to request PWM: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(pwmled->pwm), "unable to request PWM\n");
goto release_fwnode;
}
pwm_init_state(pwmled->pwm, &pwmled->state);
+ pwmled->active_low = fwnode_property_read_bool(fwnode, "active-low");
ret = fwnode_property_read_u32(fwnode, "color", &color);
if (ret) {
diff --git a/drivers/leds/simple/Kconfig b/drivers/leds/simple/Kconfig
index 9f6a68336659..fd2b8225d926 100644
--- a/drivers/leds/simple/Kconfig
+++ b/drivers/leds/simple/Kconfig
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config LEDS_SIEMENS_SIMATIC_IPC
tristate "LED driver for Siemens Simatic IPCs"
- depends on LEDS_CLASS
+ depends on LEDS_GPIO
depends on SIEMENS_SIMATIC_IPC
help
This option enables support for the LEDs of several Industrial PCs
from Siemens.
- To compile this driver as a module, choose M here: the module
- will be called simatic-ipc-leds.
+ To compile this driver as a module, choose M here: the modules
+ will be called simatic-ipc-leds and simatic-ipc-leds-gpio.
diff --git a/drivers/leds/simple/Makefile b/drivers/leds/simple/Makefile
index 8481f1e9e360..1c7ef5e1324b 100644
--- a/drivers/leds/simple/Makefile
+++ b/drivers/leds/simple/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds.o
+obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds-gpio.o
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.c b/drivers/leds/simple/simatic-ipc-leds-gpio.c
new file mode 100644
index 000000000000..4c9e663a90ba
--- /dev/null
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for GPIO based LEDs
+ *
+ * Copyright (c) Siemens AG, 2022
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 3, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 5, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 56, NULL, 6, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 59, NULL, 7, GPIO_ACTIVE_HIGH),
+ },
+};
+
+static const struct gpio_led simatic_ipc_gpio_leds[] = {
+ { .name = "green:" LED_FUNCTION_STATUS "-3" },
+ { .name = "red:" LED_FUNCTION_STATUS "-1" },
+ { .name = "green:" LED_FUNCTION_STATUS "-1" },
+ { .name = "red:" LED_FUNCTION_STATUS "-2" },
+ { .name = "green:" LED_FUNCTION_STATUS "-2" },
+ { .name = "red:" LED_FUNCTION_STATUS "-3" },
+};
+
+static const struct gpio_led_platform_data simatic_ipc_gpio_leds_pdata = {
+ .num_leds = ARRAY_SIZE(simatic_ipc_gpio_leds),
+ .leds = simatic_ipc_gpio_leds,
+};
+
+static struct platform_device *simatic_leds_pdev;
+
+static int simatic_ipc_leds_gpio_remove(struct platform_device *pdev)
+{
+ gpiod_remove_lookup_table(&simatic_ipc_led_gpio_table);
+ platform_device_unregister(simatic_leds_pdev);
+
+ return 0;
+}
+
+static int simatic_ipc_leds_gpio_probe(struct platform_device *pdev)
+{
+ struct gpio_desc *gpiod;
+ int err;
+
+ gpiod_add_lookup_table(&simatic_ipc_led_gpio_table);
+ simatic_leds_pdev = platform_device_register_resndata(NULL,
+ "leds-gpio", PLATFORM_DEVID_NONE, NULL, 0,
+ &simatic_ipc_gpio_leds_pdata,
+ sizeof(simatic_ipc_gpio_leds_pdata));
+ if (IS_ERR(simatic_leds_pdev)) {
+ err = PTR_ERR(simatic_leds_pdev);
+ goto out;
+ }
+
+ /* PM_BIOS_BOOT_N */
+ gpiod = gpiod_get_index(&simatic_leds_pdev->dev, NULL, 6, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ err = PTR_ERR(gpiod);
+ goto out;
+ }
+ gpiod_put(gpiod);
+
+ /* PM_WDT_OUT */
+ gpiod = gpiod_get_index(&simatic_leds_pdev->dev, NULL, 7, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ err = PTR_ERR(gpiod);
+ goto out;
+ }
+ gpiod_put(gpiod);
+
+ return 0;
+out:
+ simatic_ipc_leds_gpio_remove(pdev);
+
+ return err;
+}
+
+static struct platform_driver simatic_ipc_led_gpio_driver = {
+ .probe = simatic_ipc_leds_gpio_probe,
+ .remove = simatic_ipc_leds_gpio_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ }
+};
+module_platform_driver(simatic_ipc_led_gpio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_SOFTDEP("pre: platform:leds-gpio");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/simple/simatic-ipc-leds.c b/drivers/leds/simple/simatic-ipc-leds.c
index 078d43f5ba38..4894c228c165 100644
--- a/drivers/leds/simple/simatic-ipc-leds.c
+++ b/drivers/leds/simple/simatic-ipc-leds.c
@@ -23,7 +23,7 @@
#define SIMATIC_IPC_LED_PORT_BASE 0x404E
struct simatic_ipc_led {
- unsigned int value; /* mask for io and offset for mem */
+ unsigned int value; /* mask for io */
char *name;
struct led_classdev cdev;
};
@@ -38,21 +38,6 @@ static struct simatic_ipc_led simatic_ipc_leds_io[] = {
{ }
};
-/* the actual start will be discovered with PCI, 0 is a placeholder */
-static struct resource simatic_ipc_led_mem_res = DEFINE_RES_MEM_NAMED(0, SZ_4K, KBUILD_MODNAME);
-
-static void __iomem *simatic_ipc_led_memory;
-
-static struct simatic_ipc_led simatic_ipc_leds_mem[] = {
- {0x500 + 0x1A0, "red:" LED_FUNCTION_STATUS "-1"},
- {0x500 + 0x1A8, "green:" LED_FUNCTION_STATUS "-1"},
- {0x500 + 0x1C8, "red:" LED_FUNCTION_STATUS "-2"},
- {0x500 + 0x1D0, "green:" LED_FUNCTION_STATUS "-2"},
- {0x500 + 0x1E0, "red:" LED_FUNCTION_STATUS "-3"},
- {0x500 + 0x198, "green:" LED_FUNCTION_STATUS "-3"},
- { }
-};
-
static struct resource simatic_ipc_led_io_res =
DEFINE_RES_IO_NAMED(SIMATIC_IPC_LED_PORT_BASE, SZ_2, KBUILD_MODNAME);
@@ -88,28 +73,6 @@ static enum led_brightness simatic_ipc_led_get_io(struct led_classdev *led_cd)
return inw(SIMATIC_IPC_LED_PORT_BASE) & led->value ? LED_OFF : led_cd->max_brightness;
}
-static void simatic_ipc_led_set_mem(struct led_classdev *led_cd,
- enum led_brightness brightness)
-{
- struct simatic_ipc_led *led = cdev_to_led(led_cd);
- void __iomem *reg = simatic_ipc_led_memory + led->value;
- u32 val;
-
- val = readl(reg);
- val = (val & ~1) | (brightness == LED_OFF);
- writel(val, reg);
-}
-
-static enum led_brightness simatic_ipc_led_get_mem(struct led_classdev *led_cd)
-{
- struct simatic_ipc_led *led = cdev_to_led(led_cd);
- void __iomem *reg = simatic_ipc_led_memory + led->value;
- u32 val;
-
- val = readl(reg);
- return (val & 1) ? LED_OFF : led_cd->max_brightness;
-}
-
static int simatic_ipc_leds_probe(struct platform_device *pdev)
{
const struct simatic_ipc_platform *plat = pdev->dev.platform_data;
@@ -117,9 +80,7 @@ static int simatic_ipc_leds_probe(struct platform_device *pdev)
struct simatic_ipc_led *ipcled;
struct led_classdev *cdev;
struct resource *res;
- void __iomem *reg;
- int err, type;
- u32 val;
+ int err;
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_227D:
@@ -134,52 +95,19 @@ static int simatic_ipc_leds_probe(struct platform_device *pdev)
}
ipcled = simatic_ipc_leds_io;
}
- type = IORESOURCE_IO;
if (!devm_request_region(dev, res->start, resource_size(res), KBUILD_MODNAME)) {
dev_err(dev, "Unable to register IO resource at %pR\n", res);
return -EBUSY;
}
break;
- case SIMATIC_IPC_DEVICE_127E:
- res = &simatic_ipc_led_mem_res;
- ipcled = simatic_ipc_leds_mem;
- type = IORESOURCE_MEM;
-
- /* get GPIO base from PCI */
- res->start = simatic_ipc_get_membase0(PCI_DEVFN(13, 0));
- if (res->start == 0)
- return -ENODEV;
-
- /* do the final address calculation */
- res->start = res->start + (0xC5 << 16);
- res->end += res->start;
-
- simatic_ipc_led_memory = devm_ioremap_resource(dev, res);
- if (IS_ERR(simatic_ipc_led_memory))
- return PTR_ERR(simatic_ipc_led_memory);
-
- /* initialize power/watchdog LED */
- reg = simatic_ipc_led_memory + 0x500 + 0x1D8; /* PM_WDT_OUT */
- val = readl(reg);
- writel(val & ~1, reg);
-
- reg = simatic_ipc_led_memory + 0x500 + 0x1C0; /* PM_BIOS_BOOT_N */
- val = readl(reg);
- writel(val | 1, reg);
- break;
default:
return -ENODEV;
}
while (ipcled->value) {
cdev = &ipcled->cdev;
- if (type == IORESOURCE_MEM) {
- cdev->brightness_set = simatic_ipc_led_set_mem;
- cdev->brightness_get = simatic_ipc_led_get_mem;
- } else {
- cdev->brightness_set = simatic_ipc_led_set_io;
- cdev->brightness_get = simatic_ipc_led_get_io;
- }
+ cdev->brightness_set = simatic_ipc_led_set_io;
+ cdev->brightness_get = simatic_ipc_led_get_io;
cdev->max_brightness = LED_ON;
cdev->name = ipcled->name;
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 439fab4eaa85..1bbb9ca08d40 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -647,7 +647,7 @@ do_adb_query(struct adb_request *req)
switch(req->data[1]) {
case ADB_QUERY_GETDEVINFO:
- if (req->nbytes < 3)
+ if (req->nbytes < 3 || req->data[2] >= 16)
break;
mutex_lock(&adb_handler_mutex);
req->reply[0] = adb_handler[req->data[2]].original_address;
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index b10239d6ef93..02922073c9ef 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -19,13 +19,15 @@
#include <linux/suspend.h>
#include <linux/slab.h>
-#define IMX_MU_CHANS 16
+#define IMX_MU_CHANS 17
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
/* TX0/RX0 */
#define IMX_MU_S4_CHANS 2
#define IMX_MU_CHAN_NAME_SIZE 20
+#define IMX_MU_NUM_RR 4
+
#define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
#define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
@@ -35,9 +37,11 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_RX = 1, /* Rx */
IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
+ IMX_MU_TYPE_RST = 4, /* Reset */
};
enum imx_mu_xcr {
+ IMX_MU_CR,
IMX_MU_GIER,
IMX_MU_GCR,
IMX_MU_TCR,
@@ -50,6 +54,7 @@ enum imx_mu_xsr {
IMX_MU_GSR,
IMX_MU_TSR,
IMX_MU_RSR,
+ IMX_MU_xSR_MAX,
};
struct imx_sc_rpc_msg_max {
@@ -85,7 +90,7 @@ struct imx_mu_priv {
int irq[IMX_MU_CHANS];
bool suspend;
- u32 xcr[4];
+ u32 xcr[IMX_MU_xCR_MAX];
bool side_b;
};
@@ -105,8 +110,8 @@ struct imx_mu_dcfg {
enum imx_mu_type type;
u32 xTR; /* Transmit Register0 */
u32 xRR; /* Receive Register0 */
- u32 xSR[4]; /* Status Registers */
- u32 xCR[4]; /* Control Registers */
+ u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
+ u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
};
#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
@@ -121,6 +126,9 @@ struct imx_mu_dcfg {
#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Request */
#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
+/* MU reset */
+#define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5))
+#define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7))
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
@@ -497,6 +505,8 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
(ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
+ case IMX_MU_TYPE_RST:
+ return IRQ_NONE;
default:
dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
cp->type);
@@ -581,6 +591,8 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
+ int ret;
+ u32 sr;
if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
@@ -598,6 +610,13 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
case IMX_MU_TYPE_RXDB:
imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
break;
+ case IMX_MU_TYPE_RST:
+ imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0);
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr,
+ !(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5);
+ if (ret)
+ dev_warn(priv->dev, "RST channel timeout\n");
+ break;
default:
break;
}
@@ -694,6 +713,7 @@ static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
static void imx_mu_init_generic(struct imx_mu_priv *priv)
{
unsigned int i;
+ unsigned int val;
for (i = 0; i < IMX_MU_CHANS; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
@@ -715,6 +735,14 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
+
+ /* Clear any pending GIP */
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
+ imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
+
+ /* Clear any pending RSR */
+ for (i = 0; i < IMX_MU_NUM_RR; i++)
+ imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
}
static void imx_mu_init_specific(struct imx_mu_priv *priv)
@@ -865,7 +893,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
- .xCR = {0x24, 0x24, 0x24, 0x24},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
@@ -888,7 +916,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
- .xCR = {0x110, 0x114, 0x120, 0x128},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 2578e5aaa935..9465f9081515 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -192,15 +192,10 @@ static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
{
- struct cmdq_task_cb *cb = &task->pkt->async_cb;
struct cmdq_cb_data data;
data.sta = sta;
- data.data = cb->data;
data.pkt = task->pkt;
- if (cb->cb)
- cb->cb(data);
-
mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
@@ -448,7 +443,6 @@ done:
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
{
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
- struct cmdq_task_cb *cb;
struct cmdq_cb_data data;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task, *tmp;
@@ -465,13 +459,8 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
- cb = &task->pkt->async_cb;
data.sta = -ECONNABORTED;
- data.data = cb->data;
data.pkt = task->pkt;
- if (cb->cb)
- cb->cb(data);
-
mbox_chan_received_data(task->thread->chan, &data);
list_del(&task->list_entry);
kfree(task);
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index cf3e8096942a..529c9d04e9a4 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -29,7 +29,7 @@ config BCACHE_CLOSURES_DEBUG
operations that get stuck.
config BCACHE_ASYNC_REGISTRATION
- bool "Asynchronous device registration (EXPERIMENTAL)"
+ bool "Asynchronous device registration"
depends on BCACHE
help
Add a sysfs file /sys/fs/bcache/register_async. Writing registering
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index e136d6edc1ed..147c493a989a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -812,7 +812,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
- if (register_shrinker(&c->shrink))
+ if (register_shrinker(&c->shrink, "md-bcache:%pU", c->set_uuid))
pr_warn("bcache: %s: could not register shrinker\n",
__func__);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index dc01ce33265b..09c7ed2650ca 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/stacktrace.h>
+#include <linux/jump_label.h>
#define DM_MSG_PREFIX "bufio"
@@ -81,6 +82,8 @@
*/
struct dm_bufio_client {
struct mutex lock;
+ spinlock_t spinlock;
+ bool no_sleep;
struct list_head lru[LIST_SIZE];
unsigned long n_buffers[LIST_SIZE];
@@ -90,7 +93,6 @@ struct dm_bufio_client {
s8 sectors_per_block_bits;
void (*alloc_callback)(struct dm_buffer *);
void (*write_callback)(struct dm_buffer *);
-
struct kmem_cache *slab_buffer;
struct kmem_cache *slab_cache;
struct dm_io_client *dm_io;
@@ -161,23 +163,34 @@ struct dm_buffer {
#endif
};
+static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
+
/*----------------------------------------------------------------*/
#define dm_bufio_in_request() (!!current->bio_list)
static void dm_bufio_lock(struct dm_bufio_client *c)
{
- mutex_lock_nested(&c->lock, dm_bufio_in_request());
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ spin_lock_bh(&c->spinlock);
+ else
+ mutex_lock_nested(&c->lock, dm_bufio_in_request());
}
static int dm_bufio_trylock(struct dm_bufio_client *c)
{
- return mutex_trylock(&c->lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return spin_trylock_bh(&c->spinlock);
+ else
+ return mutex_trylock(&c->lock);
}
static void dm_bufio_unlock(struct dm_bufio_client *c)
{
- mutex_unlock(&c->lock);
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ spin_unlock_bh(&c->spinlock);
+ else
+ mutex_unlock(&c->lock);
}
/*----------------------------------------------------------------*/
@@ -802,6 +815,10 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
BUG_ON(test_bit(B_WRITING, &b->state));
BUG_ON(test_bit(B_DIRTY, &b->state));
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
+ unlikely(test_bit(B_READING, &b->state)))
+ continue;
+
if (!b->hold_count) {
__make_buffer_clean(b);
__unlink_buffer(b);
@@ -810,6 +827,9 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
cond_resched();
}
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return NULL;
+
list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
BUG_ON(test_bit(B_READING, &b->state));
@@ -1617,7 +1637,8 @@ static void drop_buffers(struct dm_bufio_client *c)
*/
static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
{
- if (!(gfp & __GFP_FS)) {
+ if (!(gfp & __GFP_FS) ||
+ (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
@@ -1715,7 +1736,8 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrin
struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
unsigned reserved_buffers, unsigned aux_size,
void (*alloc_callback)(struct dm_buffer *),
- void (*write_callback)(struct dm_buffer *))
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags)
{
int r;
struct dm_bufio_client *c;
@@ -1745,12 +1767,18 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->alloc_callback = alloc_callback;
c->write_callback = write_callback;
+ if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
+ c->no_sleep = true;
+ static_branch_inc(&no_sleep_enabled);
+ }
+
for (i = 0; i < LIST_SIZE; i++) {
INIT_LIST_HEAD(&c->lru[i]);
c->n_buffers[i] = 0;
}
mutex_init(&c->lock);
+ spin_lock_init(&c->spinlock);
INIT_LIST_HEAD(&c->reserved_buffers);
c->need_reserved_buffers = reserved_buffers;
@@ -1804,7 +1832,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
- r = register_shrinker(&c->shrinker);
+ r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
+ MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
if (r)
goto bad;
@@ -1876,6 +1905,8 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
kmem_cache_destroy(c->slab_buffer);
dm_io_client_destroy(c->dm_io);
mutex_destroy(&c->lock);
+ if (c->no_sleep)
+ static_branch_dec(&no_sleep_enabled);
kfree(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 223e8e1a7a13..512cc6cea095 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -313,7 +313,8 @@ static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1, 0, NULL, NULL);
+ ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1,
+ 0, NULL, NULL, 0);
if (IS_ERR(ec->bufio)) {
ti->error = "Cannot create dm bufio client";
r = PTR_ERR(ec->bufio);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c60f9b2ece2d..aaf2472df6e5 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4439,7 +4439,7 @@ try_smaller_buffer:
}
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
- 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
+ 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
if (IS_ERR(ic->bufio)) {
r = PTR_ERR(ic->bufio);
ti->error = "Cannot initialize dm-bufio";
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 1ec17c32867f..c640be453313 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3728,6 +3728,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
}
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index f46f930eedf9..680cc05ec654 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -493,7 +493,7 @@ static int read_exceptions(struct pstore *ps,
client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
ps->store->chunk_size << SECTOR_SHIFT,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(client))
return PTR_ERR(client);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index cea2b3789736..23cffce56403 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -749,7 +749,7 @@ int verity_fec_ctr(struct dm_verity *v)
f->bufio = dm_bufio_client_create(f->dev->bdev,
f->io_size,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(f->bufio)) {
ti->error = "Cannot initialize FEC bufio client";
return PTR_ERR(f->bufio);
@@ -765,7 +765,7 @@ int verity_fec_ctr(struct dm_verity *v)
f->data_bufio = dm_bufio_client_create(v->data_dev->bdev,
1 << v->data_dev_block_bits,
- 1, 0, NULL, NULL);
+ 1, 0, NULL, NULL, 0);
if (IS_ERR(f->data_bufio)) {
ti->error = "Cannot initialize FEC data bufio client";
return PTR_ERR(f->data_bufio);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 4fd853a56b1a..94b6cb599db4 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -20,6 +20,7 @@
#include <linux/reboot.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/jump_label.h>
#define DM_MSG_PREFIX "verity"
@@ -35,14 +36,17 @@
#define DM_VERITY_OPT_PANIC "panic_on_corruption"
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
+#define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet"
-#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \
+#define DM_VERITY_OPTS_MAX (4 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
+
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
@@ -221,7 +225,7 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
struct mapped_device *md = dm_table_get_md(v->ti->table);
/* Corruption should be visible in device status in all modes */
- v->hash_failed = 1;
+ v->hash_failed = true;
if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
goto out;
@@ -287,7 +291,19 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
verity_hash_at_level(v, block, level, &hash_block, &offset);
- data = dm_bufio_read(v->bufio, hash_block, &buf);
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ data = dm_bufio_get(v->bufio, hash_block, &buf);
+ if (data == NULL) {
+ /*
+ * In tasklet and the hash was not in the bufio cache.
+ * Return early and resume execution from a work-queue
+ * to read the hash from disk.
+ */
+ return -EAGAIN;
+ }
+ } else
+ data = dm_bufio_read(v->bufio, hash_block, &buf);
+
if (IS_ERR(data))
return PTR_ERR(data);
@@ -308,6 +324,15 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
v->digest_size) == 0))
aux->hash_verified = 1;
+ else if (static_branch_unlikely(&use_tasklet_enabled) &&
+ io->in_tasklet) {
+ /*
+ * Error handling code (FEC included) cannot be run in a
+ * tasklet since it may sleep, so fallback to work-queue.
+ */
+ r = -EAGAIN;
+ goto release_ret_r;
+ }
else if (verity_fec_decode(v, io,
DM_VERITY_BLOCK_TYPE_METADATA,
hash_block, data, NULL) == 0)
@@ -474,10 +499,24 @@ static int verity_verify_io(struct dm_verity_io *io)
{
bool is_zero;
struct dm_verity *v = io->v;
+#if defined(CONFIG_DM_VERITY_FEC)
struct bvec_iter start;
- unsigned b;
+#endif
+ struct bvec_iter iter_copy;
+ struct bvec_iter *iter;
struct crypto_wait wait;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
+ unsigned int b;
+
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ /*
+ * Copy the iterator in case we need to restart
+ * verification in a work-queue.
+ */
+ iter_copy = io->iter;
+ iter = &iter_copy;
+ } else
+ iter = &io->iter;
for (b = 0; b < io->n_blocks; b++) {
int r;
@@ -486,7 +525,7 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks &&
likely(test_bit(cur_block, v->validated_blocks))) {
- verity_bv_skip_block(v, io, &io->iter);
+ verity_bv_skip_block(v, io, iter);
continue;
}
@@ -501,7 +540,7 @@ static int verity_verify_io(struct dm_verity_io *io)
* If we expect a zero block, don't validate, just
* return zeros.
*/
- r = verity_for_bv_block(v, io, &io->iter,
+ r = verity_for_bv_block(v, io, iter,
verity_bv_zero);
if (unlikely(r < 0))
return r;
@@ -513,8 +552,11 @@ static int verity_verify_io(struct dm_verity_io *io)
if (unlikely(r < 0))
return r;
- start = io->iter;
- r = verity_for_io_block(v, io, &io->iter, &wait);
+#if defined(CONFIG_DM_VERITY_FEC)
+ if (verity_fec_is_enabled(v))
+ start = *iter;
+#endif
+ r = verity_for_io_block(v, io, iter, &wait);
if (unlikely(r < 0))
return r;
@@ -528,9 +570,18 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
continue;
+ } else if (static_branch_unlikely(&use_tasklet_enabled) &&
+ io->in_tasklet) {
+ /*
+ * Error handling code (FEC included) cannot be run in a
+ * tasklet since it may sleep, so fallback to work-queue.
+ */
+ return -EAGAIN;
+#if defined(CONFIG_DM_VERITY_FEC)
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
- cur_block, NULL, &start) == 0) {
+ cur_block, NULL, &start) == 0) {
continue;
+#endif
} else {
if (bio->bi_status) {
/*
@@ -567,7 +618,8 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_status = status;
- verity_fec_finish_io(io);
+ if (!static_branch_unlikely(&use_tasklet_enabled) || !io->in_tasklet)
+ verity_fec_finish_io(io);
bio_endio(bio);
}
@@ -576,9 +628,29 @@ static void verity_work(struct work_struct *w)
{
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
+ io->in_tasklet = false;
+
+ verity_fec_init_io(io);
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
+static void verity_tasklet(unsigned long data)
+{
+ struct dm_verity_io *io = (struct dm_verity_io *)data;
+ int err;
+
+ io->in_tasklet = true;
+ err = verity_verify_io(io);
+ if (err == -EAGAIN) {
+ /* fallback to retrying with work-queue */
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ return;
+ }
+
+ verity_finish_io(io, errno_to_blk_status(err));
+}
+
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
@@ -589,8 +661,13 @@ static void verity_end_io(struct bio *bio)
return;
}
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
+ if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
+ tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
+ tasklet_schedule(&io->tasklet);
+ } else {
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ }
}
/*
@@ -701,8 +778,6 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio->bi_private = io;
io->iter = bio->bi_iter;
- verity_fec_init_io(io);
-
verity_submit_prefetch(v, io);
submit_bio_noacct(bio);
@@ -752,6 +827,8 @@ static void verity_status(struct dm_target *ti, status_type_t type,
args++;
if (v->validated_blocks)
args++;
+ if (v->use_tasklet)
+ args++;
if (v->signature_key_desc)
args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS;
if (!args)
@@ -777,6 +854,8 @@ static void verity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
if (v->validated_blocks)
DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
+ if (v->use_tasklet)
+ DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY);
sz = verity_fec_status_table(v, sz, result, maxlen);
if (v->signature_key_desc)
DMEMIT(" " DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY
@@ -890,6 +969,9 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->signature_key_desc);
+ if (v->use_tasklet)
+ static_branch_dec(&use_tasklet_enabled);
+
kfree(v);
}
@@ -968,9 +1050,10 @@ static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
}
static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
- struct dm_verity_sig_opts *verify_args)
+ struct dm_verity_sig_opts *verify_args,
+ bool only_modifier_opts)
{
- int r;
+ int r = 0;
unsigned argc;
struct dm_target *ti = v->ti;
const char *arg_name;
@@ -991,6 +1074,8 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
argc--;
if (verity_is_verity_mode(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_parse_verity_mode(v, arg_name);
if (r) {
ti->error = "Conflicting error handling parameters";
@@ -999,6 +1084,8 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
+ if (only_modifier_opts)
+ continue;
r = verity_alloc_zero_digest(v);
if (r) {
ti->error = "Cannot allocate zero digest";
@@ -1007,17 +1094,29 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
+ if (only_modifier_opts)
+ continue;
r = verity_alloc_most_once(v);
if (r)
return r;
continue;
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_TASKLET_VERIFY)) {
+ v->use_tasklet = true;
+ static_branch_inc(&use_tasklet_enabled);
+ continue;
+
} else if (verity_is_fec_opt_arg(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
if (r)
return r;
continue;
+
} else if (verity_verify_is_sig_opt_arg(arg_name)) {
+ if (only_modifier_opts)
+ continue;
r = verity_verify_sig_parse_opt_args(as, v,
verify_args,
&argc, arg_name);
@@ -1025,8 +1124,17 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
return r;
continue;
+ } else if (only_modifier_opts) {
+ /*
+ * Ignore unrecognized opt, could easily be an extra
+ * argument to an option whose parsing was skipped.
+ * Normal parsing (@only_modifier_opts=false) will
+ * properly parse all options (and their extra args).
+ */
+ continue;
}
+ DMERR("Unrecognized verity feature request: %s", arg_name);
ti->error = "Unrecognized verity feature request";
return -EINVAL;
} while (argc && !r);
@@ -1054,6 +1162,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct dm_verity_sig_opts verify_args = {0};
struct dm_arg_set as;
unsigned int num;
+ unsigned int wq_flags;
unsigned long long num_ll;
int r;
int i;
@@ -1085,6 +1194,15 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ /* Parse optional parameters that modify primary args */
+ if (argc > 10) {
+ as.argc = argc - 10;
+ as.argv = argv + 10;
+ r = verity_parse_opt_args(&as, v, &verify_args, true);
+ if (r < 0)
+ goto bad;
+ }
+
if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
num > 1) {
ti->error = "Invalid version";
@@ -1156,7 +1274,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- v->tfm = crypto_alloc_ahash(v->alg_name, 0, 0);
+ v->tfm = crypto_alloc_ahash(v->alg_name, 0,
+ v->use_tasklet ? CRYPTO_ALG_ASYNC : 0);
if (IS_ERR(v->tfm)) {
ti->error = "Cannot initialize hash function";
r = PTR_ERR(v->tfm);
@@ -1218,8 +1337,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (argc) {
as.argc = argc;
as.argv = argv;
-
- r = verity_parse_opt_args(&as, v, &verify_args);
+ r = verity_parse_opt_args(&as, v, &verify_args, false);
if (r < 0)
goto bad;
}
@@ -1266,7 +1384,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
- dm_bufio_alloc_callback, NULL);
+ dm_bufio_alloc_callback, NULL,
+ v->use_tasklet ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
if (IS_ERR(v->bufio)) {
ti->error = "Cannot initialize dm-bufio";
r = PTR_ERR(v->bufio);
@@ -1281,7 +1400,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
- v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
+ wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
+ if (v->use_tasklet) {
+ /*
+ * Allow verify_wq to preempt softirq since verification in
+ * tasklet will fall-back to using it for error handling
+ * (or if the bufio cache doesn't have required hashes).
+ */
+ wq_flags |= WQ_HIGHPRI;
+ }
+ v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;
@@ -1343,7 +1471,7 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
- .version = {1, 8, 1},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index c832cc3e3d24..45455de1b4bc 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -13,6 +13,7 @@
#include <linux/dm-bufio.h>
#include <linux/device-mapper.h>
+#include <linux/interrupt.h>
#include <crypto/hash.h>
#define DM_VERITY_MAX_LEVELS 63
@@ -51,9 +52,10 @@ struct dm_verity {
unsigned char hash_per_block_bits; /* log2(hashes in hash block) */
unsigned char levels; /* the number of tree levels */
unsigned char version;
+ bool hash_failed:1; /* set if hash of any block failed */
+ bool use_tasklet:1; /* try to verify in tasklet before work-queue */
unsigned digest_size; /* digest size for the current hash algorithm */
unsigned int ahash_reqsize;/* the size of temporary space for crypto */
- int hash_failed; /* set to 1 if hash of any block failed */
enum verity_mode mode; /* mode for handling verification errors */
unsigned corrupted_errs;/* Number of errors for corrupted blocks */
@@ -76,10 +78,12 @@ struct dm_verity_io {
sector_t block;
unsigned n_blocks;
+ bool in_tasklet;
struct bvec_iter iter;
struct work_struct work;
+ struct tasklet_struct tasklet;
/*
* Three variably-size fields follow this struct:
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 1fc161d65673..96a003eb7323 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1594,7 +1594,8 @@ done:
default:
BUG();
- return -1;
+ wc_unlock(wc);
+ return DM_MAPIO_KILL;
}
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 34db364c23a8..0278482fac94 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -2945,7 +2945,9 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
/* Metadata cache shrinker */
- ret = register_shrinker(&zmd->mblk_shrinker);
+ ret = register_shrinker(&zmd->mblk_shrinker, "md-meta:(%u:%u)",
+ MAJOR(dev->bdev->bd_dev),
+ MINOR(dev->bdev->bd_dev));
if (ret) {
dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
goto err;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 99642f69bfa7..60549b65c799 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -752,7 +752,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off);
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
return 0;
}
@@ -1016,7 +1016,7 @@ static void dm_wq_requeue_work(struct work_struct *work)
while (io) {
struct dm_io *next = io->next;
- dm_io_rewind(io, &md->queue->bio_split);
+ dm_io_rewind(io, &md->disk->bio_split);
io->next = NULL;
__dm_io_complete(io, false);
@@ -1181,7 +1181,7 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
* Does the target need to split IO even further?
* - varied (per target) IO splitting is a tenet of DM; this
* explains why stacked chunk_sectors based splitting via
- * blk_queue_split() isn't possible here.
+ * bio_split_to_limits() isn't possible here.
*/
if (!ti->max_io_len)
return len;
@@ -1751,10 +1751,10 @@ static void dm_split_and_process_bio(struct mapped_device *md,
is_abnormal = is_abnormal_io(bio);
if (unlikely(is_abnormal)) {
/*
- * Use blk_queue_split() for abnormal IO (e.g. discard, etc)
+ * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
* otherwise associated queue_limits won't be imposed.
*/
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
}
init_clone_info(&ci, md, map, bio, is_abnormal);
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index 2cf973722f59..91836e6de326 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -125,7 +125,6 @@ static void __init md_setup_drive(struct md_setup_args *args)
char *devname = args->device_names;
dev_t devices[MD_SB_DISKS + 1], mdev;
struct mdu_array_info_s ainfo = { };
- struct block_device *bdev;
struct mddev *mddev;
int err = 0, i;
char name[16];
@@ -169,24 +168,16 @@ static void __init md_setup_drive(struct md_setup_args *args)
pr_info("md: Loading %s: %s\n", name, args->device_names);
- bdev = blkdev_get_by_dev(mdev, FMODE_READ, NULL);
- if (IS_ERR(bdev)) {
- pr_err("md: open failed - cannot start array %s\n", name);
+ mddev = md_alloc(mdev, name);
+ if (IS_ERR(mddev)) {
+ pr_err("md: md_alloc failed - cannot start array %s\n", name);
return;
}
- err = -EIO;
- if (WARN(bdev->bd_disk->fops != &md_fops,
- "Opening block device %x resulted in non-md device\n",
- mdev))
- goto out_blkdev_put;
-
- mddev = bdev->bd_disk->private_data;
-
err = mddev_lock(mddev);
if (err) {
pr_err("md: failed to lock array %s\n", name);
- goto out_blkdev_put;
+ goto out_mddev_put;
}
if (!list_empty(&mddev->disks) || mddev->raid_disks) {
@@ -230,8 +221,8 @@ static void __init md_setup_drive(struct md_setup_args *args)
pr_warn("md: starting %s failed\n", name);
out_unlock:
mddev_unlock(mddev);
-out_blkdev_put:
- blkdev_put(bdev, FMODE_READ);
+out_mddev_put:
+ mddev_put(mddev);
}
static int __init raid_setup(char *str)
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 37cbcce3cc66..742b2349fea3 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -40,7 +40,7 @@ struct resync_info {
/* Lock the send communication. This is done through
* bit manipulation as opposed to a mutex in order to
- * accomodate lock and hold. See next comment.
+ * accommodate lock and hold. See next comment.
*/
#define MD_CLUSTER_SEND_LOCK 4
/* If cluster operations (such as adding a disk) must lock the
@@ -689,7 +689,7 @@ static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
/*
* If resync thread run after raid1d thread, then process_metadata_update
* could not continue if raid1d held reconfig_mutex (and raid1d is blocked
- * since another node already got EX on Token and waitting the EX of Ack),
+ * since another node already got EX on Token and waiting the EX of Ack),
* so let resync wake up thread in case flag is set.
*/
if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4df78e30b76a..729be2c5296c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -368,28 +368,6 @@ EXPORT_SYMBOL_GPL(md_new_event);
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
-/*
- * iterates through all used mddevs in the system.
- * We take care to grab the all_mddevs_lock whenever navigating
- * the list, and to always hold a refcount when unlocked.
- * Any code which breaks out of this loop while own
- * a reference to the current mddev and must mddev_put it.
- */
-#define for_each_mddev(_mddev,_tmp) \
- \
- for (({ spin_lock(&all_mddevs_lock); \
- _tmp = all_mddevs.next; \
- _mddev = NULL;}); \
- ({ if (_tmp != &all_mddevs) \
- mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
- spin_unlock(&all_mddevs_lock); \
- if (_mddev) mddev_put(_mddev); \
- _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
- _tmp != &all_mddevs;}); \
- ({ spin_lock(&all_mddevs_lock); \
- _tmp = _tmp->next;}) \
- )
-
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
@@ -464,7 +442,7 @@ static void md_submit_bio(struct bio *bio)
return;
}
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
if (bio_sectors(bio) != 0)
@@ -647,13 +625,17 @@ EXPORT_SYMBOL(md_flush_request);
static inline struct mddev *mddev_get(struct mddev *mddev)
{
+ lockdep_assert_held(&all_mddevs_lock);
+
+ if (test_bit(MD_DELETED, &mddev->flags))
+ return NULL;
atomic_inc(&mddev->active);
return mddev;
}
static void mddev_delayed_delete(struct work_struct *ws);
-static void mddev_put(struct mddev *mddev)
+void mddev_put(struct mddev *mddev)
{
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
@@ -661,7 +643,7 @@ static void mddev_put(struct mddev *mddev)
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
- list_del_init(&mddev->all_mddevs);
+ set_bit(MD_DELETED, &mddev->flags);
/*
* Call queue_work inside the spinlock so that
@@ -678,7 +660,6 @@ static void md_safemode_timeout(struct timer_list *t);
void mddev_init(struct mddev *mddev)
{
- kobject_init(&mddev->kobj, &md_ktype);
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->bitmap_info.mutex);
@@ -733,22 +714,6 @@ static dev_t mddev_alloc_unit(void)
return dev;
}
-static struct mddev *mddev_find(dev_t unit)
-{
- struct mddev *mddev;
-
- if (MAJOR(unit) != MD_MAJOR)
- unit &= ~((1 << MdpMinorShift) - 1);
-
- spin_lock(&all_mddevs_lock);
- mddev = mddev_find_locked(unit);
- if (mddev)
- mddev_get(mddev);
- spin_unlock(&all_mddevs_lock);
-
- return mddev;
-}
-
static struct mddev *mddev_alloc(dev_t unit)
{
struct mddev *new;
@@ -791,6 +756,15 @@ out_free_new:
return ERR_PTR(error);
}
+static void mddev_free(struct mddev *mddev)
+{
+ spin_lock(&all_mddevs_lock);
+ list_del(&mddev->all_mddevs);
+ spin_unlock(&all_mddevs_lock);
+
+ kfree(mddev);
+}
+
static const struct attribute_group md_redundancy_group;
void mddev_unlock(struct mddev *mddev)
@@ -3335,14 +3309,35 @@ rdev_size_show(struct md_rdev *rdev, char *page)
return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
}
-static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
+static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b)
{
/* check if two start/length pairs overlap */
- if (s1+l1 <= s2)
- return 0;
- if (s2+l2 <= s1)
- return 0;
- return 1;
+ if (a->data_offset + a->sectors <= b->data_offset)
+ return false;
+ if (b->data_offset + b->sectors <= a->data_offset)
+ return false;
+ return true;
+}
+
+static bool md_rdev_overlaps(struct md_rdev *rdev)
+{
+ struct mddev *mddev;
+ struct md_rdev *rdev2;
+
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
+ if (test_bit(MD_DELETED, &mddev->flags))
+ continue;
+ rdev_for_each(rdev2, mddev) {
+ if (rdev != rdev2 && rdev->bdev == rdev2->bdev &&
+ md_rdevs_overlap(rdev, rdev2)) {
+ spin_unlock(&all_mddevs_lock);
+ return true;
+ }
+ }
+ }
+ spin_unlock(&all_mddevs_lock);
+ return false;
}
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
@@ -3394,46 +3389,21 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
return -EINVAL; /* component must fit device */
rdev->sectors = sectors;
- if (sectors > oldsectors && my_mddev->external) {
- /* Need to check that all other rdevs with the same
- * ->bdev do not overlap. 'rcu' is sufficient to walk
- * the rdev lists safely.
- * This check does not provide a hard guarantee, it
- * just helps avoid dangerous mistakes.
- */
- struct mddev *mddev;
- int overlap = 0;
- struct list_head *tmp;
-
- rcu_read_lock();
- for_each_mddev(mddev, tmp) {
- struct md_rdev *rdev2;
- rdev_for_each(rdev2, mddev)
- if (rdev->bdev == rdev2->bdev &&
- rdev != rdev2 &&
- overlaps(rdev->data_offset, rdev->sectors,
- rdev2->data_offset,
- rdev2->sectors)) {
- overlap = 1;
- break;
- }
- if (overlap) {
- mddev_put(mddev);
- break;
- }
- }
- rcu_read_unlock();
- if (overlap) {
- /* Someone else could have slipped in a size
- * change here, but doing so is just silly.
- * We put oldsectors back because we *know* it is
- * safe, and trust userspace not to race with
- * itself
- */
- rdev->sectors = oldsectors;
- return -EBUSY;
- }
+ /*
+ * Check that all other rdevs with the same bdev do not overlap. This
+ * check does not provide a hard guarantee, it just helps avoid
+ * dangerous mistakes.
+ */
+ if (sectors > oldsectors && my_mddev->external &&
+ md_rdev_overlaps(rdev)) {
+ /*
+ * Someone else could have slipped in a size change here, but
+ * doing so is just silly. We put oldsectors back because we
+ * know it is safe, and trust userspace not to race with itself.
+ */
+ rdev->sectors = oldsectors;
+ return -EBUSY;
}
return len;
}
@@ -4830,6 +4800,19 @@ action_store(struct mddev *mddev, const char *page, size_t len)
if (work_pending(&mddev->del_work))
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
+ sector_t save_rp = mddev->reshape_position;
+
+ mddev_unlock(mddev);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
+ mddev_lock_nointr(mddev);
+ /*
+ * set RECOVERY_INTR again and restore reshape
+ * position in case others changed them after
+ * got lock, eg, reshape_position_store and
+ * md_check_recovery.
+ */
+ mddev->reshape_position = save_rp;
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
@@ -5001,7 +4984,7 @@ static ssize_t
sync_speed_show(struct mddev *mddev, char *page)
{
unsigned long resync, dt, db;
- if (mddev->curr_resync == 0)
+ if (mddev->curr_resync == MD_RESYNC_NONE)
return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
dt = (jiffies - mddev->resync_mark) / HZ;
@@ -5020,8 +5003,8 @@ sync_completed_show(struct mddev *mddev, char *page)
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
- if (mddev->curr_resync == 1 ||
- mddev->curr_resync == 2)
+ if (mddev->curr_resync == MD_RESYNC_YIELDED ||
+ mddev->curr_resync == MD_RESYNC_DELAYED)
return sprintf(page, "delayed\n");
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -5532,11 +5515,10 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
spin_lock(&all_mddevs_lock);
- if (list_empty(&mddev->all_mddevs)) {
+ if (!mddev_get(mddev)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
- mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = entry->show(mddev, page);
@@ -5557,18 +5539,17 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock(&all_mddevs_lock);
- if (list_empty(&mddev->all_mddevs)) {
+ if (!mddev_get(mddev)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
- mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = entry->store(mddev, page, length);
mddev_put(mddev);
return rv;
}
-static void md_free(struct kobject *ko)
+static void md_kobj_release(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
@@ -5577,15 +5558,8 @@ static void md_free(struct kobject *ko)
if (mddev->sysfs_level)
sysfs_put(mddev->sysfs_level);
- if (mddev->gendisk) {
- del_gendisk(mddev->gendisk);
- put_disk(mddev->gendisk);
- }
- percpu_ref_exit(&mddev->writes_pending);
-
- bioset_exit(&mddev->bio_set);
- bioset_exit(&mddev->sync_set);
- kfree(mddev);
+ del_gendisk(mddev->gendisk);
+ put_disk(mddev->gendisk);
}
static const struct sysfs_ops md_sysfs_ops = {
@@ -5593,7 +5567,7 @@ static const struct sysfs_ops md_sysfs_ops = {
.store = md_attr_store,
};
static struct kobj_type md_ktype = {
- .release = md_free,
+ .release = md_kobj_release,
.sysfs_ops = &md_sysfs_ops,
.default_groups = md_attr_groups,
};
@@ -5604,7 +5578,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
- kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
@@ -5623,7 +5596,7 @@ int mddev_init_writes_pending(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
-static int md_alloc(dev_t dev, char *name)
+struct mddev *md_alloc(dev_t dev, char *name)
{
/*
* If dev is zero, name is the name of a device to allocate with
@@ -5647,12 +5620,13 @@ static int md_alloc(dev_t dev, char *name)
* removed (mddev_delayed_delete).
*/
flush_workqueue(md_misc_wq);
+ flush_workqueue(md_rdev_misc_wq);
mutex_lock(&disks_mutex);
mddev = mddev_alloc(dev);
if (IS_ERR(mddev)) {
- mutex_unlock(&disks_mutex);
- return PTR_ERR(mddev);
+ error = PTR_ERR(mddev);
+ goto out_unlock;
}
partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
@@ -5670,7 +5644,7 @@ static int md_alloc(dev_t dev, char *name)
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
error = -EEXIST;
- goto out_unlock_disks_mutex;
+ goto out_free_mddev;
}
spin_unlock(&all_mddevs_lock);
}
@@ -5683,7 +5657,7 @@ static int md_alloc(dev_t dev, char *name)
error = -ENOMEM;
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
- goto out_unlock_disks_mutex;
+ goto out_free_mddev;
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -5704,25 +5678,45 @@ static int md_alloc(dev_t dev, char *name)
mddev->gendisk = disk;
error = add_disk(disk);
if (error)
- goto out_cleanup_disk;
+ goto out_put_disk;
+ kobject_init(&mddev->kobj, &md_ktype);
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
- if (error)
- goto out_del_gendisk;
+ if (error) {
+ /*
+ * The disk is already live at this point. Clear the hold flag
+ * and let mddev_put take care of the deletion, as it isn't any
+ * different from a normal close on last release now.
+ */
+ mddev->hold_active = 0;
+ mutex_unlock(&disks_mutex);
+ mddev_put(mddev);
+ return ERR_PTR(error);
+ }
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
- goto out_unlock_disks_mutex;
+ mutex_unlock(&disks_mutex);
+ return mddev;
-out_del_gendisk:
- del_gendisk(disk);
-out_cleanup_disk:
+out_put_disk:
put_disk(disk);
-out_unlock_disks_mutex:
+out_free_mddev:
+ mddev_free(mddev);
+out_unlock:
mutex_unlock(&disks_mutex);
+ return ERR_PTR(error);
+}
+
+static int md_alloc_and_put(dev_t dev, char *name)
+{
+ struct mddev *mddev = md_alloc(dev, name);
+
+ if (IS_ERR(mddev))
+ return PTR_ERR(mddev);
mddev_put(mddev);
- return error;
+ return 0;
}
static void md_probe(dev_t dev)
@@ -5730,7 +5724,7 @@ static void md_probe(dev_t dev)
if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
return;
if (create_on_open)
- md_alloc(dev, NULL);
+ md_alloc_and_put(dev, NULL);
}
static int add_named_array(const char *val, const struct kernel_param *kp)
@@ -5752,12 +5746,12 @@ static int add_named_array(const char *val, const struct kernel_param *kp)
return -E2BIG;
strscpy(buf, val, len+1);
if (strncmp(buf, "md_", 3) == 0)
- return md_alloc(0, buf);
+ return md_alloc_and_put(0, buf);
if (strncmp(buf, "md", 2) == 0 &&
isdigit(buf[2]) &&
kstrtoul(buf+2, 10, &devnum) == 0 &&
devnum <= MINORMASK)
- return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
+ return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL);
return -EINVAL;
}
@@ -6197,6 +6191,7 @@ static void __md_stop_writes(struct mddev *mddev)
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
}
@@ -6266,6 +6261,7 @@ void md_stop(struct mddev *mddev)
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
+ __md_stop_writes(mddev);
__md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
@@ -6497,9 +6493,8 @@ static void autorun_devices(int part)
break;
}
- md_probe(dev);
- mddev = mddev_find(dev);
- if (!mddev)
+ mddev = md_alloc(dev, NULL);
+ if (IS_ERR(mddev))
break;
if (mddev_lock(mddev))
@@ -7782,45 +7777,33 @@ out_unlock:
static int md_open(struct block_device *bdev, fmode_t mode)
{
- /*
- * Succeed if we can lock the mddev, which confirms that
- * it isn't being stopped right now.
- */
- struct mddev *mddev = mddev_find(bdev->bd_dev);
+ struct mddev *mddev;
int err;
+ spin_lock(&all_mddevs_lock);
+ mddev = mddev_get(bdev->bd_disk->private_data);
+ spin_unlock(&all_mddevs_lock);
if (!mddev)
return -ENODEV;
- if (mddev->gendisk != bdev->bd_disk) {
- /* we are racing with mddev_put which is discarding this
- * bd_disk.
- */
- mddev_put(mddev);
- /* Wait until bdev->bd_disk is definitely gone */
- if (work_pending(&mddev->del_work))
- flush_workqueue(md_misc_wq);
- return -EBUSY;
- }
- BUG_ON(mddev != bdev->bd_disk->private_data);
-
- if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
+ err = mutex_lock_interruptible(&mddev->open_mutex);
+ if (err)
goto out;
- if (test_bit(MD_CLOSING, &mddev->flags)) {
- mutex_unlock(&mddev->open_mutex);
- err = -ENODEV;
- goto out;
- }
+ err = -ENODEV;
+ if (test_bit(MD_CLOSING, &mddev->flags))
+ goto out_unlock;
- err = 0;
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
bdev_check_media_change(bdev);
- out:
- if (err)
- mddev_put(mddev);
+ return 0;
+
+out_unlock:
+ mutex_unlock(&mddev->open_mutex);
+out:
+ mddev_put(mddev);
return err;
}
@@ -7844,6 +7827,17 @@ static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
return ret;
}
+static void md_free_disk(struct gendisk *disk)
+{
+ struct mddev *mddev = disk->private_data;
+
+ percpu_ref_exit(&mddev->writes_pending);
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
+
+ mddev_free(mddev);
+}
+
const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -7857,6 +7851,7 @@ const struct block_device_operations md_fops =
.getgeo = md_getgeo,
.check_events = md_check_events,
.set_read_only = md_set_read_only,
+ .free_disk = md_free_disk,
};
static int md_thread(void *arg)
@@ -8018,16 +8013,26 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
max_sectors = mddev->dev_sectors;
resync = mddev->curr_resync;
- if (resync <= 3) {
+ if (resync < MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
/* Still cleaning up */
resync = max_sectors;
- } else if (resync > max_sectors)
+ } else if (resync > max_sectors) {
resync = max_sectors;
- else
+ } else {
resync -= atomic_read(&mddev->recovery_active);
+ if (resync < MD_RESYNC_ACTIVE) {
+ /*
+ * Resync has started, but the subtraction has
+ * yielded one of the special values. Force it
+ * to active to ensure the status reports an
+ * active resync.
+ */
+ resync = MD_RESYNC_ACTIVE;
+ }
+ }
- if (resync == 0) {
+ if (resync == MD_RESYNC_NONE) {
if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
struct md_rdev *rdev;
@@ -8051,7 +8056,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
}
return 0;
}
- if (resync < 3) {
+ if (resync < MD_RESYNC_ACTIVE) {
seq_printf(seq, "\tresync=DELAYED");
return 1;
}
@@ -8152,6 +8157,8 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
mddev_get(mddev);
+ if (!mddev_get(mddev))
+ continue;
spin_unlock(&all_mddevs_lock);
return mddev;
}
@@ -8165,25 +8172,35 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
struct mddev *next_mddev, *mddev = v;
+ struct mddev *to_put = NULL;
++*pos;
if (v == (void*)2)
return NULL;
spin_lock(&all_mddevs_lock);
- if (v == (void*)1)
+ if (v == (void*)1) {
tmp = all_mddevs.next;
- else
+ } else {
+ to_put = mddev;
+ tmp = mddev->all_mddevs.next;
+ }
+
+ for (;;) {
+ if (tmp == &all_mddevs) {
+ next_mddev = (void*)2;
+ *pos = 0x10000;
+ break;
+ }
+ next_mddev = list_entry(tmp, struct mddev, all_mddevs);
+ if (mddev_get(next_mddev))
+ break;
+ mddev = next_mddev;
tmp = mddev->all_mddevs.next;
- if (tmp != &all_mddevs)
- next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
- else {
- next_mddev = (void*)2;
- *pos = 0x10000;
}
spin_unlock(&all_mddevs_lock);
- if (v != (void*)1)
+ if (to_put)
mddev_put(mddev);
return next_mddev;
@@ -8682,7 +8699,6 @@ void md_do_sync(struct md_thread *thread)
unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
- struct list_head *tmp;
sector_t last_check;
int skipped = 0;
struct md_rdev *rdev;
@@ -8729,13 +8745,7 @@ void md_do_sync(struct md_thread *thread)
mddev->last_sync_action = action ?: desc;
- /* we overload curr_resync somewhat here.
- * 0 == not engaged in resync at all
- * 2 == checking that there is no conflict with another sync
- * 1 == like 2, but have yielded to allow conflicting resync to
- * commence
- * other == active in resync - this many blocks
- *
+ /*
* Before starting a resync we must have set curr_resync to
* 2, and then checked that every "conflicting" array has curr_resync
* less than ours. When we find one that is the same or higher
@@ -8747,24 +8757,29 @@ void md_do_sync(struct md_thread *thread)
do {
int mddev2_minor = -1;
- mddev->curr_resync = 2;
+ mddev->curr_resync = MD_RESYNC_DELAYED;
try_again:
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
- for_each_mddev(mddev2, tmp) {
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry(mddev2, &all_mddevs, all_mddevs) {
+ if (test_bit(MD_DELETED, &mddev2->flags))
+ continue;
if (mddev2 == mddev)
continue;
if (!mddev->parallel_resync
&& mddev2->curr_resync
&& match_mddev_units(mddev, mddev2)) {
DEFINE_WAIT(wq);
- if (mddev < mddev2 && mddev->curr_resync == 2) {
+ if (mddev < mddev2 &&
+ mddev->curr_resync == MD_RESYNC_DELAYED) {
/* arbitrarily yield */
- mddev->curr_resync = 1;
+ mddev->curr_resync = MD_RESYNC_YIELDED;
wake_up(&resync_wait);
}
- if (mddev > mddev2 && mddev->curr_resync == 1)
+ if (mddev > mddev2 &&
+ mddev->curr_resync == MD_RESYNC_YIELDED)
/* no need to wait here, we can wait the next
* time 'round when curr_resync == 2
*/
@@ -8782,7 +8797,8 @@ void md_do_sync(struct md_thread *thread)
desc, mdname(mddev),
mdname(mddev2));
}
- mddev_put(mddev2);
+ spin_unlock(&all_mddevs_lock);
+
if (signal_pending(current))
flush_signals(current);
schedule();
@@ -8792,7 +8808,8 @@ void md_do_sync(struct md_thread *thread)
finish_wait(&resync_wait, &wq);
}
}
- } while (mddev->curr_resync < 2);
+ spin_unlock(&all_mddevs_lock);
+ } while (mddev->curr_resync < MD_RESYNC_DELAYED);
j = 0;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -8876,7 +8893,7 @@ void md_do_sync(struct md_thread *thread)
desc, mdname(mddev));
mddev->curr_resync = j;
} else
- mddev->curr_resync = 3; /* no longer delayed */
+ mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
md_new_event();
@@ -9011,14 +9028,14 @@ void md_do_sync(struct md_thread *thread)
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- mddev->curr_resync > 3) {
+ mddev->curr_resync >= MD_RESYNC_ACTIVE) {
mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync > 3) {
+ mddev->curr_resync >= MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
@@ -9082,7 +9099,7 @@ void md_do_sync(struct md_thread *thread)
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
- mddev->curr_resync = 0;
+ mddev->curr_resync = MD_RESYNC_NONE;
spin_unlock(&mddev->lock);
wake_up(&resync_wait);
@@ -9303,6 +9320,7 @@ void md_check_recovery(struct mddev *mddev)
* ->spare_active and clear saved_raid_disk
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -9338,6 +9356,7 @@ void md_check_recovery(struct mddev *mddev)
goto unlock;
}
if (mddev->sync_thread) {
+ md_unregister_thread(&mddev->sync_thread);
md_reap_sync_thread(mddev);
goto unlock;
}
@@ -9417,8 +9436,7 @@ void md_reap_sync_thread(struct mddev *mddev)
sector_t old_dev_sectors = mddev->dev_sectors;
bool is_reshaped = false;
- /* resync has finished, collect result */
- md_unregister_thread(&mddev->sync_thread);
+ /* sync_thread should be unregistered, collect result */
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
mddev->degraded != mddev->raid_disks) {
@@ -9466,6 +9484,7 @@ void md_reap_sync_thread(struct mddev *mddev)
wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event();
if (mddev->event_work.func)
@@ -9544,11 +9563,14 @@ EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
- struct list_head *tmp;
- struct mddev *mddev;
+ struct mddev *mddev, *n;
int need_delay = 0;
- for_each_mddev(mddev, tmp) {
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+ if (!mddev_get(mddev))
+ continue;
+ spin_unlock(&all_mddevs_lock);
if (mddev_trylock(mddev)) {
if (mddev->pers)
__md_stop_writes(mddev);
@@ -9557,7 +9579,11 @@ static int md_notify_reboot(struct notifier_block *this,
mddev_unlock(mddev);
}
need_delay = 1;
+ mddev_put(mddev);
+ spin_lock(&all_mddevs_lock);
}
+ spin_unlock(&all_mddevs_lock);
+
/*
* certain more exotic SCSI devices are known to be
* volatile wrt too early system reboots. While the
@@ -9876,8 +9902,7 @@ void md_autostart_arrays(int part)
static __exit void md_exit(void)
{
- struct mddev *mddev;
- struct list_head *tmp;
+ struct mddev *mddev, *n;
int delay = 1;
unregister_blkdev(MD_MAJOR,"md");
@@ -9897,17 +9922,24 @@ static __exit void md_exit(void)
}
remove_proc_entry("mdstat", NULL);
- for_each_mddev(mddev, tmp) {
+ spin_lock(&all_mddevs_lock);
+ list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+ if (!mddev_get(mddev))
+ continue;
+ spin_unlock(&all_mddevs_lock);
export_array(mddev);
mddev->ctime = 0;
mddev->hold_active = 0;
/*
- * for_each_mddev() will call mddev_put() at the end of each
- * iteration. As the mddev is now fully clear, this will
- * schedule the mddev for destruction by a workqueue, and the
+ * As the mddev is now fully clear, mddev_put will schedule
+ * the mddev for destruction by a workqueue, and the
* destroy_workqueue() below will wait for that to complete.
*/
+ mddev_put(mddev);
+ spin_lock(&all_mddevs_lock);
}
+ spin_unlock(&all_mddevs_lock);
+
destroy_workqueue(md_rdev_misc_wq);
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b4f84b27bdef..b4e2d8b87b61 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -254,6 +254,7 @@ struct md_cluster_info;
* @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that
* array is ready yet.
* @MD_BROKEN: This is used to stop writes and mark array as failed.
+ * @MD_DELETED: This device is being deleted
*
* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added
*/
@@ -270,6 +271,7 @@ enum mddev_flags {
MD_UPDATING_SB,
MD_NOT_READY,
MD_BROKEN,
+ MD_DELETED,
};
enum mddev_sb_flags {
@@ -288,6 +290,21 @@ struct serial_info {
sector_t _subtree_last; /* highest sector in subtree of rb node */
};
+/*
+ * mddev->curr_resync stores the current sector of the resync but
+ * also has some overloaded values.
+ */
+enum {
+ /* No resync in progress */
+ MD_RESYNC_NONE = 0,
+ /* Yielded to allow another conflicting resync to commence */
+ MD_RESYNC_YIELDED = 1,
+ /* Delayed to check that there is no conflict with another sync */
+ MD_RESYNC_DELAYED = 2,
+ /* Any value greater than or equal to this is in an active resync */
+ MD_RESYNC_ACTIVE = 3,
+};
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -750,6 +767,8 @@ extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
+struct mddev *md_alloc(dev_t dev, char *name);
+void mddev_put(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
extern int md_start(struct mddev *mddev);
extern void md_stop(struct mddev *mddev);
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 54c089a50b15..11935864f50f 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -391,7 +391,8 @@ struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
sizeof(struct buffer_aux),
dm_block_manager_alloc_callback,
- dm_block_manager_write_callback);
+ dm_block_manager_write_callback,
+ 0);
if (IS_ERR(bm->bufio)) {
r = PTR_ERR(bm->bufio);
kfree(bm);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 26545950ca42..64d6e4cd8a3a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2167,9 +2167,12 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
int err = 0;
int number = rdev->raid_disk;
struct md_rdev **rdevp;
- struct raid10_info *p = conf->mirrors + number;
+ struct raid10_info *p;
print_conf(conf);
+ if (unlikely(number >= mddev->raid_disks))
+ return 0;
+ p = conf->mirrors + number;
if (rdev == p->rdev)
rdevp = &p->rdev;
else if (rdev == p->replacement)
@@ -2636,18 +2639,18 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
}
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
- int sectors, struct page *page, int rw)
+ int sectors, struct page *page, enum req_op op)
{
sector_t first_bad;
int bad_sectors;
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
- && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
+ && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1;
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
/* success */
return 1;
- if (rw == WRITE) {
+ if (op == REQ_OP_WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED,
@@ -2777,7 +2780,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
if (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s, conf->tmppage, WRITE)
+ s, conf->tmppage, REQ_OP_WRITE)
== 0) {
/* Well, this device is dead */
pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
@@ -2811,8 +2814,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
switch (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s, conf->tmppage,
- READ)) {
+ s, conf->tmppage, REQ_OP_READ)) {
case 0:
/* Well, this device is dead */
pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 6f2dd73128b0..f4e1cc1ece43 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1590,18 +1590,13 @@ void r5l_quiesce(struct r5l_log *log, int quiesce)
bool r5l_log_disk_error(struct r5conf *conf)
{
- struct r5l_log *log;
- bool ret;
- /* don't allow write if journal disk is missing */
- rcu_read_lock();
- log = rcu_dereference(conf->log);
+ struct r5l_log *log = conf->log;
+ /* don't allow write if journal disk is missing */
if (!log)
- ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+ return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
else
- ret = test_bit(Faulty, &log->rdev->flags);
- rcu_read_unlock();
- return ret;
+ return test_bit(Faulty, &log->rdev->flags);
}
#define R5L_RECOVERY_PAGE_POOL_SIZE 256
@@ -2534,12 +2529,13 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
struct r5conf *conf;
int ret;
- spin_lock(&mddev->lock);
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
conf = mddev->private;
- if (!conf || !conf->log) {
- spin_unlock(&mddev->lock);
- return 0;
- }
+ if (!conf || !conf->log)
+ goto out_unlock;
switch (conf->log->r5c_journal_mode) {
case R5C_JOURNAL_MODE_WRITE_THROUGH:
@@ -2557,7 +2553,9 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
default:
ret = 0;
}
- spin_unlock(&mddev->lock);
+
+out_unlock:
+ mddev_unlock(mddev);
return ret;
}
@@ -2639,7 +2637,7 @@ int r5c_try_caching_write(struct r5conf *conf,
int i;
struct r5dev *dev;
int to_cache = 0;
- void **pslot;
+ void __rcu **pslot;
sector_t tree_index;
int ret;
uintptr_t refcount;
@@ -2806,7 +2804,7 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
int i;
int do_wakeup = 0;
sector_t tree_index;
- void **pslot;
+ void __rcu **pslot;
uintptr_t refcount;
if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
@@ -3145,7 +3143,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->stripe_in_journal_lock);
atomic_set(&log->stripe_in_journal_count, 0);
- rcu_assign_pointer(conf->log, log);
+ conf->log = log;
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
@@ -3167,13 +3165,13 @@ void r5l_exit_log(struct r5conf *conf)
{
struct r5l_log *log = conf->log;
- conf->log = NULL;
- synchronize_rcu();
-
/* Ensure disable_writeback_work wakes up and exits */
wake_up(&conf->mddev->sb_wait);
flush_work(&log->disable_writeback_work);
md_unregister_thread(&log->reclaim_thread);
+
+ conf->log = NULL;
+
mempool_exit(&log->meta_pool);
bioset_exit(&log->bs);
mempool_exit(&log->io_pool);
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 43c714a8798c..c8332502669e 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -2,49 +2,46 @@
#ifndef _RAID5_LOG_H
#define _RAID5_LOG_H
-extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
-extern void r5l_exit_log(struct r5conf *conf);
-extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
-extern void r5l_write_stripe_run(struct r5l_log *log);
-extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
-extern void r5l_stripe_write_finished(struct stripe_head *sh);
-extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
-extern void r5l_quiesce(struct r5l_log *log, int quiesce);
-extern bool r5l_log_disk_error(struct r5conf *conf);
-extern bool r5c_is_writeback(struct r5l_log *log);
-extern int
-r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
- struct stripe_head_state *s, int disks);
-extern void
-r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
- struct stripe_head_state *s);
-extern void r5c_release_extra_page(struct stripe_head *sh);
-extern void r5c_use_extra_page(struct stripe_head *sh);
-extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
-extern void r5c_handle_cached_data_endio(struct r5conf *conf,
- struct stripe_head *sh, int disks);
-extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh);
-extern void r5c_make_stripe_write_out(struct stripe_head *sh);
-extern void r5c_flush_cache(struct r5conf *conf, int num);
-extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
-extern void r5c_check_cached_full_stripe(struct r5conf *conf);
+int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
+void r5l_exit_log(struct r5conf *conf);
+int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
+void r5l_write_stripe_run(struct r5l_log *log);
+void r5l_flush_stripe_to_raid(struct r5l_log *log);
+void r5l_stripe_write_finished(struct stripe_head *sh);
+int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
+void r5l_quiesce(struct r5l_log *log, int quiesce);
+bool r5l_log_disk_error(struct r5conf *conf);
+bool r5c_is_writeback(struct r5l_log *log);
+int r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s, int disks);
+void r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
+ struct stripe_head_state *s);
+void r5c_release_extra_page(struct stripe_head *sh);
+void r5c_use_extra_page(struct stripe_head *sh);
+void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+void r5c_handle_cached_data_endio(struct r5conf *conf,
+ struct stripe_head *sh, int disks);
+int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh);
+void r5c_make_stripe_write_out(struct stripe_head *sh);
+void r5c_flush_cache(struct r5conf *conf, int num);
+void r5c_check_stripe_cache_usage(struct r5conf *conf);
+void r5c_check_cached_full_stripe(struct r5conf *conf);
extern struct md_sysfs_entry r5c_journal_mode;
-extern void r5c_update_on_rdev_error(struct mddev *mddev,
- struct md_rdev *rdev);
-extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
-extern int r5l_start(struct r5l_log *log);
+void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev);
+bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
+int r5l_start(struct r5l_log *log);
-extern struct dma_async_tx_descriptor *
+struct dma_async_tx_descriptor *
ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx);
-extern int ppl_init_log(struct r5conf *conf);
-extern void ppl_exit_log(struct r5conf *conf);
-extern int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh);
-extern void ppl_write_stripe_run(struct r5conf *conf);
-extern void ppl_stripe_write_finished(struct stripe_head *sh);
-extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
-extern void ppl_quiesce(struct r5conf *conf, int quiesce);
-extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
+int ppl_init_log(struct r5conf *conf);
+void ppl_exit_log(struct r5conf *conf);
+int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh);
+void ppl_write_stripe_run(struct r5conf *conf);
+void ppl_stripe_write_finished(struct stripe_head *sh);
+int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
+void ppl_quiesce(struct r5conf *conf, int quiesce);
+int ppl_handle_flush_request(struct bio *bio);
extern struct md_sysfs_entry ppl_write_hint;
static inline bool raid5_has_log(struct r5conf *conf)
@@ -111,7 +108,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
if (conf->log)
ret = r5l_handle_flush_request(conf->log, bio);
else if (raid5_has_ppl(conf))
- ret = ppl_handle_flush_request(conf->log, bio);
+ ret = ppl_handle_flush_request(bio);
return ret;
}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 98988cb26295..31b9157bc9ae 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -679,7 +679,7 @@ void ppl_quiesce(struct r5conf *conf, int quiesce)
}
}
-int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
+int ppl_handle_flush_request(struct bio *bio)
{
if (bio->bi_iter.bi_size == 0) {
bio_endio(bio);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5cabdbbac48b..31a0cbf63384 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -61,6 +61,8 @@
#define cpu_to_group(cpu) cpu_to_node(cpu)
#define ANY_GROUP NUMA_NO_NODE
+#define RAID5_MAX_REQ_STRIPES 256
+
static bool devices_handle_discard_safely = false;
module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
@@ -624,6 +626,49 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
return NULL;
}
+static struct stripe_head *find_get_stripe(struct r5conf *conf,
+ sector_t sector, short generation, int hash)
+{
+ int inc_empty_inactive_list_flag;
+ struct stripe_head *sh;
+
+ sh = __find_stripe(conf, sector, generation);
+ if (!sh)
+ return NULL;
+
+ if (atomic_inc_not_zero(&sh->count))
+ return sh;
+
+ /*
+ * Slow path. The reference count is zero which means the stripe must
+ * be on a list (sh->lru). Must remove the stripe from the list that
+ * references it with the device_lock held.
+ */
+
+ spin_lock(&conf->device_lock);
+ if (!atomic_read(&sh->count)) {
+ if (!test_bit(STRIPE_HANDLE, &sh->state))
+ atomic_inc(&conf->active_stripes);
+ BUG_ON(list_empty(&sh->lru) &&
+ !test_bit(STRIPE_EXPANDING, &sh->state));
+ inc_empty_inactive_list_flag = 0;
+ if (!list_empty(conf->inactive_list + hash))
+ inc_empty_inactive_list_flag = 1;
+ list_del_init(&sh->lru);
+ if (list_empty(conf->inactive_list + hash) &&
+ inc_empty_inactive_list_flag)
+ atomic_inc(&conf->empty_inactive_list_nr);
+ if (sh->group) {
+ sh->group->stripes_cnt--;
+ sh->group = NULL;
+ }
+ }
+ atomic_inc(&sh->count);
+ spin_unlock(&conf->device_lock);
+
+ return sh;
+}
+
/*
* Need to check if array has failed when deciding whether to:
* - start an array
@@ -710,80 +755,121 @@ static bool has_failed(struct r5conf *conf)
return degraded > conf->max_degraded;
}
-struct stripe_head *
-raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- int previous, int noblock, int noquiesce)
+enum stripe_result {
+ STRIPE_SUCCESS = 0,
+ STRIPE_RETRY,
+ STRIPE_SCHEDULE_AND_RETRY,
+ STRIPE_FAIL,
+};
+
+struct stripe_request_ctx {
+ /* a reference to the last stripe_head for batching */
+ struct stripe_head *batch_last;
+
+ /* first sector in the request */
+ sector_t first_sector;
+
+ /* last sector in the request */
+ sector_t last_sector;
+
+ /*
+ * bitmap to track stripe sectors that have been added to stripes
+ * add one to account for unaligned requests
+ */
+ DECLARE_BITMAP(sectors_to_do, RAID5_MAX_REQ_STRIPES + 1);
+
+ /* the request had REQ_PREFLUSH, cleared after the first stripe_head */
+ bool do_flush;
+};
+
+/*
+ * Block until another thread clears R5_INACTIVE_BLOCKED or
+ * there are fewer than 3/4 the maximum number of active stripes
+ * and there is an inactive stripe available.
+ */
+static bool is_inactive_blocked(struct r5conf *conf, int hash)
+{
+ int active = atomic_read(&conf->active_stripes);
+
+ if (list_empty(conf->inactive_list + hash))
+ return false;
+
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
+ return true;
+
+ return active < (conf->max_nr_stripes * 3 / 4);
+}
+
+static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
+ struct stripe_request_ctx *ctx, sector_t sector,
+ bool previous, bool noblock, bool noquiesce)
{
struct stripe_head *sh;
int hash = stripe_hash_locks_hash(conf, sector);
- int inc_empty_inactive_list_flag;
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
spin_lock_irq(conf->hash_locks + hash);
- do {
- wait_event_lock_irq(conf->wait_for_quiescent,
- conf->quiesce == 0 || noquiesce,
+retry:
+ if (!noquiesce && conf->quiesce) {
+ /*
+ * Must release the reference to batch_last before waiting,
+ * on quiesce, otherwise the batch_last will hold a reference
+ * to a stripe and raid5_quiesce() will deadlock waiting for
+ * active_stripes to go to zero.
+ */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
+
+ wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
*(conf->hash_locks + hash));
- sh = __find_stripe(conf, sector, conf->generation - previous);
- if (!sh) {
- if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
- sh = get_free_stripe(conf, hash);
- if (!sh && !test_bit(R5_DID_ALLOC,
- &conf->cache_state))
- set_bit(R5_ALLOC_MORE,
- &conf->cache_state);
- }
- if (noblock && sh == NULL)
- break;
+ }
- r5c_check_stripe_cache_usage(conf);
- if (!sh) {
- set_bit(R5_INACTIVE_BLOCKED,
- &conf->cache_state);
- r5l_wake_reclaim(conf->log, 0);
- wait_event_lock_irq(
- conf->wait_for_stripe,
- !list_empty(conf->inactive_list + hash) &&
- (atomic_read(&conf->active_stripes)
- < (conf->max_nr_stripes * 3 / 4)
- || !test_bit(R5_INACTIVE_BLOCKED,
- &conf->cache_state)),
- *(conf->hash_locks + hash));
- clear_bit(R5_INACTIVE_BLOCKED,
- &conf->cache_state);
- } else {
- init_stripe(sh, sector, previous);
- atomic_inc(&sh->count);
- }
- } else if (!atomic_inc_not_zero(&sh->count)) {
- spin_lock(&conf->device_lock);
- if (!atomic_read(&sh->count)) {
- if (!test_bit(STRIPE_HANDLE, &sh->state))
- atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&sh->lru) &&
- !test_bit(STRIPE_EXPANDING, &sh->state));
- inc_empty_inactive_list_flag = 0;
- if (!list_empty(conf->inactive_list + hash))
- inc_empty_inactive_list_flag = 1;
- list_del_init(&sh->lru);
- if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
- atomic_inc(&conf->empty_inactive_list_nr);
- if (sh->group) {
- sh->group->stripes_cnt--;
- sh->group = NULL;
- }
- }
- atomic_inc(&sh->count);
- spin_unlock(&conf->device_lock);
- }
- } while (sh == NULL);
+ sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
+ if (sh)
+ goto out;
+
+ if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
+ goto wait_for_stripe;
+
+ sh = get_free_stripe(conf, hash);
+ if (sh) {
+ r5c_check_stripe_cache_usage(conf);
+ init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ goto out;
+ }
+
+ if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE, &conf->cache_state);
+
+wait_for_stripe:
+ if (noblock)
+ goto out;
+ set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ is_inactive_blocked(conf, hash),
+ *(conf->hash_locks + hash));
+ clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ goto retry;
+
+out:
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
+struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
+ sector_t sector, bool previous, bool noblock, bool noquiesce)
+{
+ return __raid5_get_active_stripe(conf, NULL, sector, previous, noblock,
+ noquiesce);
+}
+
static bool is_full_stripe_write(struct stripe_head *sh)
{
BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
@@ -824,13 +910,13 @@ static bool stripe_can_batch(struct stripe_head *sh)
}
/* we only do back search */
-static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
+static void stripe_add_to_batch_list(struct r5conf *conf,
+ struct stripe_head *sh, struct stripe_head *last_sh)
{
struct stripe_head *head;
sector_t head_sector, tmp_sec;
int hash;
int dd_idx;
- int inc_empty_inactive_list_flag;
/* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
tmp_sec = sh->sector;
@@ -838,36 +924,20 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
return;
head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf);
- hash = stripe_hash_locks_hash(conf, head_sector);
- spin_lock_irq(conf->hash_locks + hash);
- head = __find_stripe(conf, head_sector, conf->generation);
- if (head && !atomic_inc_not_zero(&head->count)) {
- spin_lock(&conf->device_lock);
- if (!atomic_read(&head->count)) {
- if (!test_bit(STRIPE_HANDLE, &head->state))
- atomic_inc(&conf->active_stripes);
- BUG_ON(list_empty(&head->lru) &&
- !test_bit(STRIPE_EXPANDING, &head->state));
- inc_empty_inactive_list_flag = 0;
- if (!list_empty(conf->inactive_list + hash))
- inc_empty_inactive_list_flag = 1;
- list_del_init(&head->lru);
- if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
- atomic_inc(&conf->empty_inactive_list_nr);
- if (head->group) {
- head->group->stripes_cnt--;
- head->group = NULL;
- }
- }
+ if (last_sh && head_sector == last_sh->sector) {
+ head = last_sh;
atomic_inc(&head->count);
- spin_unlock(&conf->device_lock);
+ } else {
+ hash = stripe_hash_locks_hash(conf, head_sector);
+ spin_lock_irq(conf->hash_locks + hash);
+ head = find_get_stripe(conf, head_sector, conf->generation,
+ hash);
+ spin_unlock_irq(conf->hash_locks + hash);
+ if (!head)
+ return;
+ if (!stripe_can_batch(head))
+ goto out;
}
- spin_unlock_irq(conf->hash_locks + hash);
-
- if (!head)
- return;
- if (!stripe_can_batch(head))
- goto out;
lock_two_stripes(head, sh);
/* clear_batch_ready clear the flag */
@@ -2882,10 +2952,10 @@ static void raid5_end_write_request(struct bio *bi)
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
- raid5_release_stripe(sh);
if (sh->batch_head && sh != sh->batch_head)
raid5_release_stripe(sh->batch_head);
+ raid5_release_stripe(sh);
}
static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
@@ -3413,39 +3483,32 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
s->locked, s->ops_request);
}
-/*
- * Each stripe/dev can have one or more bion attached.
- * toread/towrite point to the first in a chain.
- * The bi_next chain must be in order.
- */
-static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
- int forwrite, int previous)
+static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi,
+ int dd_idx, int forwrite)
{
- struct bio **bip;
struct r5conf *conf = sh->raid_conf;
- int firstwrite=0;
+ struct bio **bip;
- pr_debug("adding bi b#%llu to stripe s#%llu\n",
- (unsigned long long)bi->bi_iter.bi_sector,
- (unsigned long long)sh->sector);
+ pr_debug("checking bi b#%llu to stripe s#%llu\n",
+ bi->bi_iter.bi_sector, sh->sector);
- spin_lock_irq(&sh->stripe_lock);
/* Don't allow new IO added to stripes in batch list */
if (sh->batch_head)
- goto overlap;
- if (forwrite) {
+ return true;
+
+ if (forwrite)
bip = &sh->dev[dd_idx].towrite;
- if (*bip == NULL)
- firstwrite = 1;
- } else
+ else
bip = &sh->dev[dd_idx].toread;
+
while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
- goto overlap;
- bip = & (*bip)->bi_next;
+ return true;
+ bip = &(*bip)->bi_next;
}
+
if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
- goto overlap;
+ return true;
if (forwrite && raid5_has_ppl(conf)) {
/*
@@ -3474,9 +3537,30 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
}
if (first + conf->chunk_sectors * (count - 1) != last)
- goto overlap;
+ return true;
+ }
+
+ return false;
+}
+
+static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
+ int dd_idx, int forwrite, int previous)
+{
+ struct r5conf *conf = sh->raid_conf;
+ struct bio **bip;
+ int firstwrite = 0;
+
+ if (forwrite) {
+ bip = &sh->dev[dd_idx].towrite;
+ if (!*bip)
+ firstwrite = 1;
+ } else {
+ bip = &sh->dev[dd_idx].toread;
}
+ while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector)
+ bip = &(*bip)->bi_next;
+
if (!forwrite || previous)
clear_bit(STRIPE_BATCH_READY, &sh->state);
@@ -3502,9 +3586,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
sh->overwrite_disks++;
}
- pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
- (unsigned long long)(*bip)->bi_iter.bi_sector,
- (unsigned long long)sh->sector, dd_idx);
+ pr_debug("added bi b#%llu to stripe s#%llu, disk %d, logical %llu\n",
+ (*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
+ sh->dev[dd_idx].sector);
if (conf->mddev->bitmap && firstwrite) {
/* Cannot hold spinlock over bitmap_startwrite,
@@ -3512,7 +3596,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
* we have added to the bitmap and set bm_seq.
* So set STRIPE_BITMAP_PENDING to prevent
* batching.
- * If multiple add_stripe_bio() calls race here they
+ * If multiple __add_stripe_bio() calls race here they
* much all set STRIPE_BITMAP_PENDING. So only the first one
* to complete "bitmap_startwrite" gets to set
* STRIPE_BIT_DELAY. This is important as once a stripe
@@ -3530,16 +3614,27 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
}
- spin_unlock_irq(&sh->stripe_lock);
+}
- if (stripe_can_batch(sh))
- stripe_add_to_batch_list(conf, sh);
- return 1;
+/*
+ * Each stripe/dev can have one or more bios attached.
+ * toread/towrite point to the first in a chain.
+ * The bi_next chain must be in order.
+ */
+static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi,
+ int dd_idx, int forwrite, int previous)
+{
+ spin_lock_irq(&sh->stripe_lock);
+
+ if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
+ set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
+ spin_unlock_irq(&sh->stripe_lock);
+ return false;
+ }
- overlap:
- set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
+ __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
spin_unlock_irq(&sh->stripe_lock);
- return 0;
+ return true;
}
static void end_reshape(struct r5conf *conf);
@@ -5785,17 +5880,215 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
bio_endio(bi);
}
-static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+static bool ahead_of_reshape(struct mddev *mddev, sector_t sector,
+ sector_t reshape_sector)
{
- struct r5conf *conf = mddev->private;
+ return mddev->reshape_backwards ? sector < reshape_sector :
+ sector >= reshape_sector;
+}
+
+static bool range_ahead_of_reshape(struct mddev *mddev, sector_t min,
+ sector_t max, sector_t reshape_sector)
+{
+ return mddev->reshape_backwards ? max < reshape_sector :
+ min >= reshape_sector;
+}
+
+static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
+ struct stripe_head *sh)
+{
+ sector_t max_sector = 0, min_sector = MaxSector;
+ bool ret = false;
int dd_idx;
- sector_t new_sector;
- sector_t logical_sector, last_sector;
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+ if (dd_idx == sh->pd_idx)
+ continue;
+
+ min_sector = min(min_sector, sh->dev[dd_idx].sector);
+ max_sector = min(max_sector, sh->dev[dd_idx].sector);
+ }
+
+ spin_lock_irq(&conf->device_lock);
+
+ if (!range_ahead_of_reshape(mddev, min_sector, max_sector,
+ conf->reshape_progress))
+ /* mismatch, need to try again */
+ ret = true;
+
+ spin_unlock_irq(&conf->device_lock);
+
+ return ret;
+}
+
+static int add_all_stripe_bios(struct r5conf *conf,
+ struct stripe_request_ctx *ctx, struct stripe_head *sh,
+ struct bio *bi, int forwrite, int previous)
+{
+ int dd_idx;
+ int ret = 1;
+
+ spin_lock_irq(&sh->stripe_lock);
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+ struct r5dev *dev = &sh->dev[dd_idx];
+
+ if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ continue;
+
+ if (dev->sector < ctx->first_sector ||
+ dev->sector >= ctx->last_sector)
+ continue;
+
+ if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
+ set_bit(R5_Overlap, &dev->flags);
+ ret = 0;
+ continue;
+ }
+ }
+
+ if (!ret)
+ goto out;
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+ struct r5dev *dev = &sh->dev[dd_idx];
+
+ if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ continue;
+
+ if (dev->sector < ctx->first_sector ||
+ dev->sector >= ctx->last_sector)
+ continue;
+
+ __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
+ clear_bit((dev->sector - ctx->first_sector) >>
+ RAID5_STRIPE_SHIFT(conf), ctx->sectors_to_do);
+ }
+
+out:
+ spin_unlock_irq(&sh->stripe_lock);
+ return ret;
+}
+
+static enum stripe_result make_stripe_request(struct mddev *mddev,
+ struct r5conf *conf, struct stripe_request_ctx *ctx,
+ sector_t logical_sector, struct bio *bi)
+{
+ const int rw = bio_data_dir(bi);
+ enum stripe_result ret;
struct stripe_head *sh;
+ sector_t new_sector;
+ int previous = 0;
+ int seq, dd_idx;
+
+ seq = read_seqcount_begin(&conf->gen_lock);
+
+ if (unlikely(conf->reshape_progress != MaxSector)) {
+ /*
+ * Spinlock is needed as reshape_progress may be
+ * 64bit on a 32bit platform, and so it might be
+ * possible to see a half-updated value
+ * Of course reshape_progress could change after
+ * the lock is dropped, so once we get a reference
+ * to the stripe that we think it is, we will have
+ * to check again.
+ */
+ spin_lock_irq(&conf->device_lock);
+ if (ahead_of_reshape(mddev, logical_sector,
+ conf->reshape_progress)) {
+ previous = 1;
+ } else {
+ if (ahead_of_reshape(mddev, logical_sector,
+ conf->reshape_safe)) {
+ spin_unlock_irq(&conf->device_lock);
+ return STRIPE_SCHEDULE_AND_RETRY;
+ }
+ }
+ spin_unlock_irq(&conf->device_lock);
+ }
+
+ new_sector = raid5_compute_sector(conf, logical_sector, previous,
+ &dd_idx, NULL);
+ pr_debug("raid456: %s, sector %llu logical %llu\n", __func__,
+ new_sector, logical_sector);
+
+ sh = __raid5_get_active_stripe(conf, ctx, new_sector, previous,
+ (bi->bi_opf & REQ_RAHEAD), 0);
+ if (unlikely(!sh)) {
+ /* cannot get stripe, just give-up */
+ bi->bi_status = BLK_STS_IOERR;
+ return STRIPE_FAIL;
+ }
+
+ if (unlikely(previous) &&
+ stripe_ahead_of_reshape(mddev, conf, sh)) {
+ /*
+ * Expansion moved on while waiting for a stripe.
+ * Expansion could still move past after this
+ * test, but as we are holding a reference to
+ * 'sh', we know that if that happens,
+ * STRIPE_EXPANDING will get set and the expansion
+ * won't proceed until we finish with the stripe.
+ */
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out_release;
+ }
+
+ if (read_seqcount_retry(&conf->gen_lock, seq)) {
+ /* Might have got the wrong stripe_head by accident */
+ ret = STRIPE_RETRY;
+ goto out_release;
+ }
+
+ if (test_bit(STRIPE_EXPANDING, &sh->state) ||
+ !add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) {
+ /*
+ * Stripe is busy expanding or add failed due to
+ * overlap. Flush everything and wait a while.
+ */
+ md_wakeup_thread(mddev->thread);
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out_release;
+ }
+
+ if (stripe_can_batch(sh)) {
+ stripe_add_to_batch_list(conf, sh, ctx->batch_last);
+ if (ctx->batch_last)
+ raid5_release_stripe(ctx->batch_last);
+ atomic_inc(&sh->count);
+ ctx->batch_last = sh;
+ }
+
+ if (ctx->do_flush) {
+ set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
+ /* we only need flush for one stripe */
+ ctx->do_flush = false;
+ }
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ if ((!sh->batch_head || sh == sh->batch_head) &&
+ (bi->bi_opf & REQ_SYNC) &&
+ !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ atomic_inc(&conf->preread_active_stripes);
+
+ release_stripe_plug(mddev, sh);
+ return STRIPE_SUCCESS;
+
+out_release:
+ raid5_release_stripe(sh);
+ return ret;
+}
+
+static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct r5conf *conf = mddev->private;
+ sector_t logical_sector;
+ struct stripe_request_ctx ctx = {};
const int rw = bio_data_dir(bi);
- DEFINE_WAIT(w);
- bool do_prepare;
- bool do_flush = false;
+ enum stripe_result res;
+ int s, stripe_cnt;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
int ret = log_handle_flush_request(conf, bi);
@@ -5811,7 +6104,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
* if r5l_handle_flush_request() didn't clear REQ_PREFLUSH,
* we need to flush journal device
*/
- do_flush = bi->bi_opf & REQ_PREFLUSH;
+ ctx.do_flush = bi->bi_opf & REQ_PREFLUSH;
}
if (!md_write_start(mddev, bi))
@@ -5835,134 +6128,68 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
}
logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
- last_sector = bio_end_sector(bi);
+ ctx.first_sector = logical_sector;
+ ctx.last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
+ stripe_cnt = DIV_ROUND_UP_SECTOR_T(ctx.last_sector - logical_sector,
+ RAID5_STRIPE_SECTORS(conf));
+ bitmap_set(ctx.sectors_to_do, 0, stripe_cnt);
+
+ pr_debug("raid456: %s, logical %llu to %llu\n", __func__,
+ bi->bi_iter.bi_sector, ctx.last_sector);
+
/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
if ((bi->bi_opf & REQ_NOWAIT) &&
(conf->reshape_progress != MaxSector) &&
- (mddev->reshape_backwards
- ? (logical_sector > conf->reshape_progress && logical_sector <= conf->reshape_safe)
- : (logical_sector >= conf->reshape_safe && logical_sector < conf->reshape_progress))) {
+ !ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) &&
+ ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) {
bio_wouldblock_error(bi);
if (rw == WRITE)
md_write_end(mddev);
return true;
}
md_account_bio(mddev, &bi);
- prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
- for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
- int previous;
- int seq;
-
- do_prepare = false;
- retry:
- seq = read_seqcount_begin(&conf->gen_lock);
- previous = 0;
- if (do_prepare)
- prepare_to_wait(&conf->wait_for_overlap, &w,
- TASK_UNINTERRUPTIBLE);
- if (unlikely(conf->reshape_progress != MaxSector)) {
- /* spinlock is needed as reshape_progress may be
- * 64bit on a 32bit platform, and so it might be
- * possible to see a half-updated value
- * Of course reshape_progress could change after
- * the lock is dropped, so once we get a reference
- * to the stripe that we think it is, we will have
- * to check again.
- */
- spin_lock_irq(&conf->device_lock);
- if (mddev->reshape_backwards
- ? logical_sector < conf->reshape_progress
- : logical_sector >= conf->reshape_progress) {
- previous = 1;
- } else {
- if (mddev->reshape_backwards
- ? logical_sector < conf->reshape_safe
- : logical_sector >= conf->reshape_safe) {
- spin_unlock_irq(&conf->device_lock);
- schedule();
- do_prepare = true;
- goto retry;
- }
- }
- spin_unlock_irq(&conf->device_lock);
- }
- new_sector = raid5_compute_sector(conf, logical_sector,
- previous,
- &dd_idx, NULL);
- pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
- (unsigned long long)new_sector,
- (unsigned long long)logical_sector);
+ add_wait_queue(&conf->wait_for_overlap, &wait);
+ while (1) {
+ res = make_stripe_request(mddev, conf, &ctx, logical_sector,
+ bi);
+ if (res == STRIPE_FAIL)
+ break;
- sh = raid5_get_active_stripe(conf, new_sector, previous,
- (bi->bi_opf & REQ_RAHEAD), 0);
- if (sh) {
- if (unlikely(previous)) {
- /* expansion might have moved on while waiting for a
- * stripe, so we must do the range check again.
- * Expansion could still move past after this
- * test, but as we are holding a reference to
- * 'sh', we know that if that happens,
- * STRIPE_EXPANDING will get set and the expansion
- * won't proceed until we finish with the stripe.
- */
- int must_retry = 0;
- spin_lock_irq(&conf->device_lock);
- if (mddev->reshape_backwards
- ? logical_sector >= conf->reshape_progress
- : logical_sector < conf->reshape_progress)
- /* mismatch, need to try again */
- must_retry = 1;
- spin_unlock_irq(&conf->device_lock);
- if (must_retry) {
- raid5_release_stripe(sh);
- schedule();
- do_prepare = true;
- goto retry;
- }
- }
- if (read_seqcount_retry(&conf->gen_lock, seq)) {
- /* Might have got the wrong stripe_head
- * by accident
- */
- raid5_release_stripe(sh);
- goto retry;
- }
+ if (res == STRIPE_RETRY)
+ continue;
- if (test_bit(STRIPE_EXPANDING, &sh->state) ||
- !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
- /* Stripe is busy expanding or
- * add failed due to overlap. Flush everything
- * and wait a while
- */
- md_wakeup_thread(mddev->thread);
- raid5_release_stripe(sh);
- schedule();
- do_prepare = true;
- goto retry;
- }
- if (do_flush) {
- set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
- /* we only need flush for one stripe */
- do_flush = false;
+ if (res == STRIPE_SCHEDULE_AND_RETRY) {
+ /*
+ * Must release the reference to batch_last before
+ * scheduling and waiting for work to be done,
+ * otherwise the batch_last stripe head could prevent
+ * raid5_activate_delayed() from making progress
+ * and thus deadlocking.
+ */
+ if (ctx.batch_last) {
+ raid5_release_stripe(ctx.batch_last);
+ ctx.batch_last = NULL;
}
- set_bit(STRIPE_HANDLE, &sh->state);
- clear_bit(STRIPE_DELAYED, &sh->state);
- if ((!sh->batch_head || sh == sh->batch_head) &&
- (bi->bi_opf & REQ_SYNC) &&
- !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- atomic_inc(&conf->preread_active_stripes);
- release_stripe_plug(mddev, sh);
- } else {
- /* cannot get stripe for read-ahead, just give-up */
- bi->bi_status = BLK_STS_IOERR;
- break;
+ wait_woken(&wait, TASK_UNINTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ continue;
}
+
+ s = find_first_bit(ctx.sectors_to_do, stripe_cnt);
+ if (s == stripe_cnt)
+ break;
+
+ logical_sector = ctx.first_sector +
+ (s << RAID5_STRIPE_SHIFT(conf));
}
- finish_wait(&conf->wait_for_overlap, &w);
+ remove_wait_queue(&conf->wait_for_overlap, &wait);
+
+ if (ctx.batch_last)
+ raid5_release_stripe(ctx.batch_last);
if (rw == WRITE)
md_write_end(mddev);
@@ -7417,7 +7644,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->shrinker.count_objects = raid5_cache_count;
conf->shrinker.batch = 128;
conf->shrinker.flags = 0;
- ret = register_shrinker(&conf->shrinker);
+ ret = register_shrinker(&conf->shrinker, "md-raid5:%s", mdname(mddev));
if (ret) {
pr_warn("md/raid:%s: couldn't register shrinker.\n",
mdname(mddev));
@@ -7815,7 +8042,15 @@ static int raid5_run(struct mddev *mddev)
mddev->queue->limits.discard_granularity < stripe)
blk_queue_max_discard_sectors(mddev->queue, 0);
- blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
+ /*
+ * Requests require having a bitmap for each stripe.
+ * Limit the max sectors based on this.
+ */
+ blk_queue_max_hw_sectors(mddev->queue,
+ RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf));
+
+ /* No restrictions on the number of segments in the request */
+ blk_queue_max_segments(mddev->queue, USHRT_MAX);
}
if (log_init(conf, journal_dev, raid5_has_ppl(conf)))
@@ -8066,8 +8301,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* find the disk ... but prefer rdev->saved_raid_disk
* if possible.
*/
- if (rdev->saved_raid_disk >= 0 &&
- rdev->saved_raid_disk >= first &&
+ if (rdev->saved_raid_disk >= first &&
rdev->saved_raid_disk <= last &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
first = rdev->saved_raid_disk;
@@ -8704,8 +8938,11 @@ static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf)
err = log_init(conf, NULL, true);
if (!err) {
err = resize_stripes(conf, conf->pool_size);
- if (err)
+ if (err) {
+ mddev_suspend(mddev);
log_exit(conf);
+ mddev_resume(mddev);
+ }
}
} else
err = -EINVAL;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 638d29863503..a5082bed83c8 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -812,7 +812,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
struct stripe_head *sh);
extern struct stripe_head *
raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- int previous, int noblock, int noquiesce);
+ bool previous, bool noblock, bool noquiesce);
extern int raid5_calc_degraded(struct r5conf *conf);
extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
#endif
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 0de7acdf58a7..f66ac14cffad 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2517,7 +2517,6 @@ static struct snd_soc_component_driver tda1997x_codec_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int tda1997x_probe(struct i2c_client *client,
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index cb48c5ff3dee..c93d2906e4c7 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -875,7 +875,7 @@ static int vcodec_domains_get(struct venus_core *core)
}
skip_pmdomains:
- if (!core->has_opp_table)
+ if (!core->res->opp_pmdomain)
return 0;
/* Attach the power domain for setting performance state */
@@ -1007,6 +1007,10 @@ static int core_get_v4(struct venus_core *core)
if (ret)
return ret;
+ ret = vcodec_domains_get(core);
+ if (ret)
+ return ret;
+
if (core->res->opp_pmdomain) {
ret = devm_pm_opp_of_add_table(dev);
if (!ret) {
@@ -1017,10 +1021,6 @@ static int core_get_v4(struct venus_core *core)
}
}
- ret = vcodec_domains_get(core);
- if (ret)
- return ret;
-
return 0;
}
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 908f8d5392b2..85bc936c02f9 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1395,15 +1395,14 @@ err_msg:
static int tegra_emc_opp_table_init(struct tegra_emc *emc)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
- struct opp_table *hw_opp_table;
- int err;
+ int opp_token, err;
- hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
- err = PTR_ERR_OR_ZERO(hw_opp_table);
- if (err) {
+ err = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ if (err < 0) {
dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
return err;
}
+ opp_token = err;
err = dev_pm_opp_of_add_table(emc->dev);
if (err) {
@@ -1430,7 +1429,7 @@ static int tegra_emc_opp_table_init(struct tegra_emc *emc)
remove_table:
dev_pm_opp_of_remove_table(emc->dev);
put_hw_table:
- dev_pm_opp_put_supported_hw(hw_opp_table);
+ dev_pm_opp_put_supported_hw(opp_token);
return err;
}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index ed9a683b3ca8..ba8414519515 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1341,17 +1341,17 @@ static int msb_ftl_initialize(struct msb_data *msb)
msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
msb->logical_block_count = msb->zone_count * 496 - 2;
- msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
- msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+ msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
+ msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
msb->lba_to_pba_table =
kmalloc_array(msb->logical_block_count, sizeof(u16),
GFP_KERNEL);
if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
!msb->erased_blocks_bitmap) {
- kfree(msb->used_blocks_bitmap);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
kfree(msb->lba_to_pba_table);
- kfree(msb->erased_blocks_bitmap);
return -ENOMEM;
}
@@ -1946,7 +1946,8 @@ static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
static void msb_data_clear(struct msb_data *msb)
{
kfree(msb->boot_page);
- kfree(msb->used_blocks_bitmap);
+ bitmap_free(msb->used_blocks_bitmap);
+ bitmap_free(msb->erased_blocks_bitmap);
kfree(msb->lba_to_pba_table);
kfree(msb->cache);
msb->card = NULL;
@@ -2243,8 +2244,8 @@ static int msb_resume(struct memstick_dev *card)
goto out;
if (msb->block_count != new_msb->block_count ||
- memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
- msb->block_count / 8))
+ !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
+ msb->block_count))
goto out;
card_dead = false;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 388675cc1765..62089a8caa2f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -101,7 +101,7 @@ static u8 mptspiInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for interna
* @target: per target private data
* @sdev: SCSI device
*
- * Update the target negotiation parameters based on the the Inquiry
+ * Update the target negotiation parameters based on the Inquiry
* data, adapter capabilities, and NVRAM settings.
**/
static void
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 3b59456f5545..abb58ab1a1a4 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -572,6 +572,7 @@ config LPC_ICH
tristate "Intel ICH LPC"
depends on PCI
select MFD_CORE
+ select P2SB if X86
help
The LPC bridge function of the Intel ICH provides support for
many functional units. This driver provides needed support for
@@ -1357,12 +1358,13 @@ config MFD_STA2X11
select REGMAP_MMIO
config MFD_SUN6I_PRCM
- bool "Allwinner A31 PRCM controller"
+ bool "Allwinner A31/A23/A33 PRCM controller"
depends on ARCH_SUNXI || COMPILE_TEST
select MFD_CORE
help
Support for the PRCM (Power/Reset/Clock Management) unit available
- in A31 SoC.
+ in the A31, A23, and A33 SoCs. Other Allwinner SoCs contain similar
+ hardware, but they do not use this driver.
config MFD_SYSCON
bool "System Controller Register R/W Based on Regmap"
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 56338f9dbd0b..4fb7e35eb5ed 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -596,12 +596,11 @@ static __init int asic3_gpio_probe(struct platform_device *pdev,
return gpiochip_add_data(&asic->gpio, asic);
}
-static int asic3_gpio_remove(struct platform_device *pdev)
+static void asic3_gpio_remove(struct platform_device *pdev)
{
struct asic3 *asic = platform_get_drvdata(pdev);
gpiochip_remove(&asic->gpio);
- return 0;
}
static void asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
@@ -1030,7 +1029,6 @@ static int __init asic3_probe(struct platform_device *pdev)
static int asic3_remove(struct platform_device *pdev)
{
- int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
@@ -1038,9 +1036,8 @@ static int asic3_remove(struct platform_device *pdev)
asic3_mfd_remove(pdev);
- ret = asic3_gpio_remove(pdev);
- if (ret < 0)
- return ret;
+ asic3_gpio_remove(pdev);
+
asic3_irq_remove(pdev);
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), 0);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 8161a5dc68e8..88a212a8168c 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -619,6 +619,9 @@ static const struct mfd_cell axp20x_cells[] = {
static const struct mfd_cell axp221_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp22x_pek_resources),
.resources = axp22x_pek_resources,
@@ -645,6 +648,9 @@ static const struct mfd_cell axp221_cells[] = {
static const struct mfd_cell axp223_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp22x_pek_resources),
.resources = axp22x_pek_resources,
@@ -785,6 +791,9 @@ static const struct mfd_cell axp806_cells[] = {
static const struct mfd_cell axp809_cells[] = {
{
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp221-gpio",
+ }, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp809_pek_resources),
.resources = axp809_pek_resources,
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 596731caf407..344ad03bdc42 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -65,6 +65,11 @@ static const struct cros_feature_to_name cros_mcu_devices[] = {
.desc = "System Control Processor",
},
{
+ .id = EC_FEATURE_SCP_C1,
+ .name = CROS_EC_DEV_SCP_C1_NAME,
+ .desc = "System Control Processor 2nd Core",
+ },
+ {
.id = EC_FEATURE_TOUCHPAD,
.name = CROS_EC_DEV_TP_NAME,
.desc = "Touchpad",
@@ -250,8 +255,8 @@ static int ec_device_probe(struct platform_device *pdev)
* The PCHG device cannot be detected by sending EC_FEATURE_GET_CMD, but
* it can be detected by querying the number of peripheral chargers.
*/
- retval = cros_ec_command(ec->ec_dev, 0, EC_CMD_PCHG_COUNT, NULL, 0,
- &pchg_count, sizeof(pchg_count));
+ retval = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_PCHG_COUNT, NULL, 0,
+ &pchg_count, sizeof(pchg_count));
if (retval >= 0 && pchg_count.port_count) {
retval = mfd_add_hotplug_devices(ec->dev,
cros_ec_pchg_cells,
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 56c61c99eb23..27a881da4d6e 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -798,7 +798,7 @@ void db8500_prcmu_get_abb_event_buffer(void __iomem **buf)
* @opp: The new ARM operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
- * This function sets the the operating point of the ARM.
+ * This function sets the operating point of the ARM.
*/
int db8500_prcmu_set_arm_opp(u8 opp)
{
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 852129ea0766..6cd0b0c752d6 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -91,11 +91,6 @@ struct dln2_mod_rx_slots {
spinlock_t lock;
};
-enum dln2_endpoint {
- DLN2_EP_OUT = 0,
- DLN2_EP_IN = 1,
-};
-
struct dln2_dev {
struct usb_device *usb_dev;
struct usb_interface *interface;
@@ -777,16 +772,12 @@ static int dln2_probe(struct usb_interface *interface,
int ret;
int i, j;
- if (hostif->desc.bInterfaceNumber != 0 ||
- hostif->desc.bNumEndpoints < 2)
+ if (hostif->desc.bInterfaceNumber != 0)
return -ENODEV;
- epout = &hostif->endpoint[DLN2_EP_OUT].desc;
- if (!usb_endpoint_is_bulk_out(epout))
- return -ENODEV;
- epin = &hostif->endpoint[DLN2_EP_IN].desc;
- if (!usb_endpoint_is_bulk_in(epin))
- return -ENODEV;
+ ret = usb_find_common_endpoints(hostif, &epin, &epout, NULL, NULL);
+ if (ret)
+ return ret;
dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
if (!dln2)
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index f7950d2197df..bb08b7a73fe1 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -385,6 +385,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x7afc), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afd), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afe), (kernel_ulong_t)&bxt_uart_info },
+ /* MTL-P */
+ { PCI_VDEVICE(INTEL, 0x7e25), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e26), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e27), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e30), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e46), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e50), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e51), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e52), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7e78), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e79), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e7a), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7e7b), (kernel_ulong_t)&bxt_i2c_info },
/* LKF */
{ PCI_VDEVICE(INTEL, 0x98a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x98a9), (kernel_ulong_t)&bxt_uart_info },
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index bc069c4daa60..8dac0d41f64f 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -2,10 +2,11 @@
/*
* MFD core driver for Intel Broxton Whiskey Cove PMIC
*
- * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015-2017, 2022 Intel Corporation. All rights reserved.
*/
#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -18,9 +19,9 @@
#include <asm/intel_scu_ipc.h>
/* PMIC device registers */
-#define REG_ADDR_MASK 0xFF00
+#define REG_ADDR_MASK GENMASK(15, 8)
#define REG_ADDR_SHIFT 8
-#define REG_OFFSET_MASK 0xFF
+#define REG_OFFSET_MASK GENMASK(7, 0)
/* Interrupt Status Registers */
#define BXTWC_IRQLVL1 0x4E02
@@ -112,29 +113,29 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = {
};
static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = {
- REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01),
+ REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, BIT(0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = {
- REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 0, 0x1f),
+ REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 0, GENMASK(4, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_adc[] = {
- REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 0, 0xff),
+ REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 0, GENMASK(7, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = {
- REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20),
- REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f),
- REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f),
+ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)),
+ REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, GENMASK(4, 0)),
+ REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, GENMASK(4, 0)),
};
static const struct regmap_irq bxtwc_regmap_irqs_tmu[] = {
- REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, 0x06),
+ REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, GENMASK(2, 1)),
};
static const struct regmap_irq bxtwc_regmap_irqs_crit[] = {
- REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, 0x03),
+ REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, GENMASK(1, 0)),
};
static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
@@ -333,17 +334,19 @@ static unsigned long bxtwc_reg_addr;
static ssize_t addr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%lx\n", bxtwc_reg_addr);
+ return sysfs_emit(buf, "0x%lx\n", bxtwc_reg_addr);
}
static ssize_t addr_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- if (kstrtoul(buf, 0, &bxtwc_reg_addr)) {
- dev_err(dev, "Invalid register address\n");
- return -EINVAL;
- }
- return (ssize_t)count;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &bxtwc_reg_addr);
+ if (ret)
+ return ret;
+
+ return count;
}
static ssize_t val_show(struct device *dev,
@@ -354,12 +357,12 @@ static ssize_t val_show(struct device *dev,
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
ret = regmap_read(pmic->regmap, bxtwc_reg_addr, &val);
- if (ret < 0) {
+ if (ret) {
dev_err(dev, "Failed to read 0x%lx\n", bxtwc_reg_addr);
- return -EIO;
+ return ret;
}
- return sprintf(buf, "0x%02x\n", val);
+ return sysfs_emit(buf, "0x%02x\n", val);
}
static ssize_t val_store(struct device *dev,
@@ -377,7 +380,7 @@ static ssize_t val_store(struct device *dev,
if (ret) {
dev_err(dev, "Failed to write value 0x%02x to address 0x%lx",
val, bxtwc_reg_addr);
- return -EIO;
+ return ret;
}
return count;
}
@@ -394,6 +397,11 @@ static const struct attribute_group bxtwc_group = {
.attrs = bxtwc_attrs,
};
+static const struct attribute_group *bxtwc_groups[] = {
+ &bxtwc_group,
+ NULL
+};
+
static const struct regmap_config bxtwc_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
@@ -410,12 +418,9 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic,
int irq;
irq = regmap_irq_get_virq(pdata, pirq);
- if (irq < 0) {
- dev_err(pmic->dev,
- "Failed to get parent vIRQ(%d) for chip %s, ret:%d\n",
- pirq, chip->name, irq);
- return irq;
- }
+ if (irq < 0)
+ return dev_err_probe(pmic->dev, irq, "Failed to get parent vIRQ(%d) for chip %s\n",
+ pirq, chip->name);
return devm_regmap_add_irq_chip(pmic->dev, pmic->regmap, irq, irq_flags,
0, chip, data);
@@ -423,25 +428,19 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic,
static int bxtwc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
- acpi_handle handle;
acpi_status status;
unsigned long long hrv;
struct intel_soc_pmic *pmic;
- handle = ACPI_HANDLE(&pdev->dev);
- status = acpi_evaluate_integer(handle, "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status)) {
- dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
- return -ENODEV;
- }
- if (hrv != BROXTON_PMIC_WC_HRV) {
- dev_err(&pdev->dev, "Invalid PMIC hardware revision: %llu\n",
- hrv);
- return -ENODEV;
- }
+ status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv);
+ if (ACPI_FAILURE(status))
+ return dev_err_probe(dev, -ENODEV, "Failed to get PMIC hardware revision\n");
+ if (hrv != BROXTON_PMIC_WC_HRV)
+ return dev_err_probe(dev, -ENODEV, "Invalid PMIC hardware revision: %llu\n", hrv);
- pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
return -ENOMEM;
@@ -450,49 +449,39 @@ static int bxtwc_probe(struct platform_device *pdev)
return ret;
pmic->irq = ret;
- dev_set_drvdata(&pdev->dev, pmic);
- pmic->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pmic);
+ pmic->dev = dev;
- pmic->scu = devm_intel_scu_ipc_dev_get(&pdev->dev);
+ pmic->scu = devm_intel_scu_ipc_dev_get(dev);
if (!pmic->scu)
return -EPROBE_DEFER;
- pmic->regmap = devm_regmap_init(&pdev->dev, NULL, pmic,
- &bxtwc_regmap_config);
- if (IS_ERR(pmic->regmap)) {
- ret = PTR_ERR(pmic->regmap);
- dev_err(&pdev->dev, "Failed to initialise regmap: %d\n", ret);
- return ret;
- }
+ pmic->regmap = devm_regmap_init(dev, NULL, pmic, &bxtwc_regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return dev_err_probe(dev, PTR_ERR(pmic->regmap), "Failed to initialise regmap\n");
- ret = devm_regmap_add_irq_chip(&pdev->dev, pmic->regmap, pmic->irq,
+ ret = devm_regmap_add_irq_chip(dev, pmic->regmap, pmic->irq,
IRQF_ONESHOT | IRQF_SHARED,
0, &bxtwc_regmap_irq_chip,
&pmic->irq_chip_data);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_PWRBTN_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_pwrbtn,
&pmic->irq_chip_data_pwrbtn);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add PWRBTN IRQ chip\n");
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_TMU_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_tmu,
&pmic->irq_chip_data_tmu);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add TMU IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add TMU IRQ chip\n");
/* Add chained IRQ handler for BCU IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -500,12 +489,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_bcu,
&pmic->irq_chip_data_bcu);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add BUC IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add BUC IRQ chip\n");
/* Add chained IRQ handler for ADC IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -513,12 +498,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_adc,
&pmic->irq_chip_data_adc);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add ADC IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add ADC IRQ chip\n");
/* Add chained IRQ handler for CHGR IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -526,12 +507,8 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_chgr,
&pmic->irq_chip_data_chgr);
-
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add CHGR IRQ chip\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add CHGR IRQ chip\n");
/* Add chained IRQ handler for CRIT IRQs */
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
@@ -539,54 +516,33 @@ static int bxtwc_probe(struct platform_device *pdev)
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_crit,
&pmic->irq_chip_data_crit);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add CRIT IRQ chip\n");
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to add CRIT IRQ chip\n");
- return ret;
- }
-
- ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, bxt_wc_dev,
- ARRAY_SIZE(bxt_wc_dev), NULL, 0, NULL);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add devices\n");
- return ret;
- }
-
- ret = sysfs_create_group(&pdev->dev.kobj, &bxtwc_group);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create sysfs group %d\n", ret);
- return ret;
- }
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, bxt_wc_dev, ARRAY_SIZE(bxt_wc_dev),
+ NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add devices\n");
/*
- * There is known hw bug. Upon reset BIT 5 of register
+ * There is a known H/W bug. Upon reset, BIT 5 of register
* BXTWC_CHGR_LVL1_IRQ is 0 which is the expected value. However,
* later it's set to 1(masked) automatically by hardware. So we
- * have the software workaround here to unmaksed it in order to let
- * charger interrutp work.
+ * place the software workaround here to unmask it again in order
+ * to re-enable the charger interrupt.
*/
- regmap_update_bits(pmic->regmap, BXTWC_MIRQLVL1,
- BXTWC_MIRQLVL1_MCHGR, 0);
-
- return 0;
-}
-
-static int bxtwc_remove(struct platform_device *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &bxtwc_group);
+ regmap_update_bits(pmic->regmap, BXTWC_MIRQLVL1, BXTWC_MIRQLVL1_MCHGR, 0);
return 0;
}
static void bxtwc_shutdown(struct platform_device *pdev)
{
- struct intel_soc_pmic *pmic = dev_get_drvdata(&pdev->dev);
+ struct intel_soc_pmic *pmic = platform_get_drvdata(pdev);
disable_irq(pmic->irq);
}
-#ifdef CONFIG_PM_SLEEP
static int bxtwc_suspend(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -603,8 +559,8 @@ static int bxtwc_resume(struct device *dev)
enable_irq(pmic->irq);
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(bxtwc_pm_ops, bxtwc_suspend, bxtwc_resume);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bxtwc_pm_ops, bxtwc_suspend, bxtwc_resume);
static const struct acpi_device_id bxtwc_acpi_ids[] = {
{ "INT34D3", },
@@ -614,16 +570,16 @@ MODULE_DEVICE_TABLE(acpi, bxtwc_acpi_ids);
static struct platform_driver bxtwc_driver = {
.probe = bxtwc_probe,
- .remove = bxtwc_remove,
.shutdown = bxtwc_shutdown,
.driver = {
.name = "BXTWC PMIC",
- .pm = &bxtwc_pm_ops,
- .acpi_match_table = ACPI_PTR(bxtwc_acpi_ids),
+ .pm = pm_sleep_ptr(&bxtwc_pm_ops),
+ .acpi_match_table = bxtwc_acpi_ids,
+ .dev_groups = bxtwc_groups,
},
};
module_platform_driver(bxtwc_driver);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Qipeng Zha<qipeng.zha@intel.com>");
+MODULE_AUTHOR("Qipeng Zha <qipeng.zha@intel.com>");
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index 4eab191e053a..9216f0d34206 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -179,18 +179,13 @@ static int cht_wc_probe(struct i2c_client *client)
int ret;
status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "Failed to get PMIC hardware revision\n");
- return -ENODEV;
- }
- if (hrv != CHT_WC_HRV) {
- dev_err(dev, "Invalid PMIC hardware revision: %llu\n", hrv);
- return -ENODEV;
- }
- if (client->irq < 0) {
- dev_err(dev, "Invalid IRQ\n");
- return -EINVAL;
- }
+ if (ACPI_FAILURE(status))
+ return dev_err_probe(dev, -ENODEV, "Failed to get PMIC hardware revision\n");
+ if (hrv != CHT_WC_HRV)
+ return dev_err_probe(dev, -ENODEV, "Invalid PMIC hardware revision: %llu\n", hrv);
+
+ if (client->irq < 0)
+ return dev_err_probe(dev, -EINVAL, "Invalid IRQ\n");
pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
@@ -227,7 +222,7 @@ static void cht_wc_shutdown(struct i2c_client *client)
disable_irq(pmic->irq);
}
-static int __maybe_unused cht_wc_suspend(struct device *dev)
+static int cht_wc_suspend(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -236,7 +231,7 @@ static int __maybe_unused cht_wc_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused cht_wc_resume(struct device *dev)
+static int cht_wc_resume(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -244,7 +239,7 @@ static int __maybe_unused cht_wc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(cht_wc_pm_ops, cht_wc_suspend, cht_wc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(cht_wc_pm_ops, cht_wc_suspend, cht_wc_resume);
static const struct i2c_device_id cht_wc_i2c_id[] = {
{ }
@@ -258,7 +253,7 @@ static const struct acpi_device_id cht_wc_acpi_ids[] = {
static struct i2c_driver cht_wc_driver = {
.driver = {
.name = "CHT Whiskey Cove PMIC",
- .pm = &cht_wc_pm_ops,
+ .pm = pm_sleep_ptr(&cht_wc_pm_ops),
.acpi_match_table = cht_wc_acpi_ids,
},
.probe_new = cht_wc_probe,
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 9ffab9aafd81..650951f89f1c 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -8,7 +8,8 @@
* Configuration Registers.
*
* This driver is derived from lpc_sch.
-
+ *
+ * Copyright (c) 2017, 2021-2022 Intel Corporation
* Copyright (c) 2011 Extreme Engineering Solution, Inc.
* Author: Aaron Sierra <asierra@xes-inc.com>
*
@@ -42,9 +43,11 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/pinctrl/pinctrl.h>
#include <linux/mfd/core.h>
#include <linux/mfd/lpc_ich.h>
#include <linux/platform_data/itco_wdt.h>
+#include <linux/platform_data/x86/p2sb.h>
#define ACPIBASE 0x40
#define ACPIBASE_GPE_OFF 0x28
@@ -71,8 +74,6 @@
#define BCR 0xdc
#define BCR_WPD BIT(0)
-#define SPIBASE_APL_SZ 4096
-
#define GPIOBASE_ICH0 0x58
#define GPIOCTRL_ICH0 0x5C
#define GPIOBASE_ICH6 0x48
@@ -143,6 +144,73 @@ static struct mfd_cell lpc_ich_gpio_cell = {
.ignore_resource_conflicts = true,
};
+#define APL_GPIO_NORTH 0
+#define APL_GPIO_NORTHWEST 1
+#define APL_GPIO_WEST 2
+#define APL_GPIO_SOUTHWEST 3
+#define APL_GPIO_NR_DEVICES 4
+
+/* Offset data for Apollo Lake GPIO controllers */
+static resource_size_t apl_gpio_offsets[APL_GPIO_NR_DEVICES] = {
+ [APL_GPIO_NORTH] = 0xc50000,
+ [APL_GPIO_NORTHWEST] = 0xc40000,
+ [APL_GPIO_WEST] = 0xc70000,
+ [APL_GPIO_SOUTHWEST] = 0xc00000,
+};
+
+#define APL_GPIO_RESOURCE_SIZE 0x1000
+
+#define APL_GPIO_IRQ 14
+
+static struct resource apl_gpio_resources[APL_GPIO_NR_DEVICES][2] = {
+ [APL_GPIO_NORTH] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+ [APL_GPIO_NORTHWEST] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+ [APL_GPIO_WEST] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+ [APL_GPIO_SOUTHWEST] = {
+ DEFINE_RES_MEM(0, 0),
+ DEFINE_RES_IRQ(APL_GPIO_IRQ),
+ },
+};
+
+static const struct mfd_cell apl_gpio_devices[APL_GPIO_NR_DEVICES] = {
+ [APL_GPIO_NORTH] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_NORTH,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_NORTH]),
+ .resources = apl_gpio_resources[APL_GPIO_NORTH],
+ .ignore_resource_conflicts = true,
+ },
+ [APL_GPIO_NORTHWEST] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_NORTHWEST,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_NORTHWEST]),
+ .resources = apl_gpio_resources[APL_GPIO_NORTHWEST],
+ .ignore_resource_conflicts = true,
+ },
+ [APL_GPIO_WEST] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_WEST,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_WEST]),
+ .resources = apl_gpio_resources[APL_GPIO_WEST],
+ .ignore_resource_conflicts = true,
+ },
+ [APL_GPIO_SOUTHWEST] = {
+ .name = "apollolake-pinctrl",
+ .id = APL_GPIO_SOUTHWEST,
+ .num_resources = ARRAY_SIZE(apl_gpio_resources[APL_GPIO_SOUTHWEST]),
+ .resources = apl_gpio_resources[APL_GPIO_SOUTHWEST],
+ .ignore_resource_conflicts = true,
+ },
+};
static struct mfd_cell lpc_ich_spi_cell = {
.name = "intel-spi",
@@ -1086,6 +1154,34 @@ wdt_done:
return ret;
}
+static int lpc_ich_init_pinctrl(struct pci_dev *dev)
+{
+ struct resource base;
+ unsigned int i;
+ int ret;
+
+ /* Check, if GPIO has been exported as an ACPI device */
+ if (acpi_dev_present("INT3452", NULL, -1))
+ return -EEXIST;
+
+ ret = p2sb_bar(dev->bus, 0, &base);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(apl_gpio_devices); i++) {
+ struct resource *mem = &apl_gpio_resources[i][0];
+ resource_size_t offset = apl_gpio_offsets[i];
+
+ /* Fill MEM resource */
+ mem->start = base.start + offset;
+ mem->end = base.start + offset + APL_GPIO_RESOURCE_SIZE - 1;
+ mem->flags = base.flags;
+ }
+
+ return mfd_add_devices(&dev->dev, 0, apl_gpio_devices,
+ ARRAY_SIZE(apl_gpio_devices), NULL, 0, NULL);
+}
+
static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data)
{
u32 val;
@@ -1100,35 +1196,32 @@ static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data)
return val & BYT_BCR_WPD;
}
-static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data)
+static bool lpc_ich_set_writeable(struct pci_bus *bus, unsigned int devfn)
{
- struct pci_dev *pdev = data;
u32 bcr;
- pci_read_config_dword(pdev, BCR, &bcr);
+ pci_bus_read_config_dword(bus, devfn, BCR, &bcr);
if (!(bcr & BCR_WPD)) {
bcr |= BCR_WPD;
- pci_write_config_dword(pdev, BCR, bcr);
- pci_read_config_dword(pdev, BCR, &bcr);
+ pci_bus_write_config_dword(bus, devfn, BCR, bcr);
+ pci_bus_read_config_dword(bus, devfn, BCR, &bcr);
}
return bcr & BCR_WPD;
}
-static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data)
+static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data)
{
- unsigned int spi = PCI_DEVFN(13, 2);
- struct pci_bus *bus = data;
- u32 bcr;
+ struct pci_dev *pdev = data;
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_bus_write_config_dword(bus, spi, BCR, bcr);
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- }
+ return lpc_ich_set_writeable(pdev->bus, pdev->devfn);
+}
- return bcr & BCR_WPD;
+static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data)
+{
+ struct pci_dev *pdev = data;
+
+ return lpc_ich_set_writeable(pdev->bus, PCI_DEVFN(13, 2));
}
static int lpc_ich_init_spi(struct pci_dev *dev)
@@ -1137,6 +1230,7 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
struct resource *res = &intel_spi_res[0];
struct intel_spi_boardinfo *info;
u32 spi_base, rcba;
+ int ret;
info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1167,30 +1261,19 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
}
break;
- case INTEL_SPI_BXT: {
- unsigned int p2sb = PCI_DEVFN(13, 0);
- unsigned int spi = PCI_DEVFN(13, 2);
- struct pci_bus *bus = dev->bus;
-
+ case INTEL_SPI_BXT:
/*
* The P2SB is hidden by BIOS and we need to unhide it in
* order to read BAR of the SPI flash device. Once that is
* done we hide it again.
*/
- pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x0);
- pci_bus_read_config_dword(bus, spi, PCI_BASE_ADDRESS_0,
- &spi_base);
- if (spi_base != ~0) {
- res->start = spi_base & 0xfffffff0;
- res->end = res->start + SPIBASE_APL_SZ - 1;
-
- info->set_writeable = lpc_ich_bxt_set_writeable;
- info->data = bus;
- }
+ ret = p2sb_bar(dev->bus, PCI_DEVFN(13, 2), res);
+ if (ret)
+ return ret;
- pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1);
+ info->set_writeable = lpc_ich_bxt_set_writeable;
+ info->data = dev;
break;
- }
default:
return -EINVAL;
@@ -1249,6 +1332,12 @@ static int lpc_ich_probe(struct pci_dev *dev,
cell_added = true;
}
+ if (priv->chipset == LPC_APL) {
+ ret = lpc_ich_init_pinctrl(dev);
+ if (!ret)
+ cell_added = true;
+ }
+
if (lpc_chipset_info[priv->chipset].spi_type) {
ret = lpc_ich_init_spi(dev);
if (!ret)
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index fec2096474ad..a6661e07035b 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -419,9 +419,11 @@ static int max77620_initialise_fps(struct max77620_chip *chip)
ret = max77620_config_fps(chip, fps_child);
if (ret < 0) {
of_node_put(fps_child);
+ of_node_put(fps_np);
return ret;
}
}
+ of_node_put(fps_np);
config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0;
ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
diff --git a/drivers/mfd/max77714.c b/drivers/mfd/max77714.c
index d1e4247800d2..143a432ea343 100644
--- a/drivers/mfd/max77714.c
+++ b/drivers/mfd/max77714.c
@@ -3,7 +3,7 @@
* Maxim MAX77714 Core Driver
*
* Copyright (C) 2022 Luca Ceresoli
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/i2c.h>
@@ -148,5 +148,5 @@ static struct i2c_driver max77714_driver = {
module_i2c_driver(max77714_driver);
MODULE_DESCRIPTION("Maxim MAX77714 MFD core driver");
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
index ea5e452510eb..389756436af6 100644
--- a/drivers/mfd/mt6358-irq.c
+++ b/drivers/mfd/mt6358-irq.c
@@ -3,6 +3,8 @@
// Copyright (c) 2020 MediaTek Inc.
#include <linux/interrupt.h>
+#include <linux/mfd/mt6357/core.h>
+#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6359/core.h>
@@ -17,6 +19,17 @@
#define MTK_PMIC_REG_WIDTH 16
+static const struct irq_top_t mt6357_ints[] = {
+ MT6357_TOP_GEN(BUCK),
+ MT6357_TOP_GEN(LDO),
+ MT6357_TOP_GEN(PSC),
+ MT6357_TOP_GEN(SCK),
+ MT6357_TOP_GEN(BM),
+ MT6357_TOP_GEN(HK),
+ MT6357_TOP_GEN(AUD),
+ MT6357_TOP_GEN(MISC),
+};
+
static const struct irq_top_t mt6358_ints[] = {
MT6358_TOP_GEN(BUCK),
MT6358_TOP_GEN(LDO),
@@ -39,6 +52,13 @@ static const struct irq_top_t mt6359_ints[] = {
MT6359_TOP_GEN(MISC),
};
+static struct pmic_irq_data mt6357_irqd = {
+ .num_top = ARRAY_SIZE(mt6357_ints),
+ .num_pmic_irqs = MT6357_IRQ_NR,
+ .top_int_status_reg = MT6357_TOP_INT_STATUS0,
+ .pmic_ints = mt6357_ints,
+};
+
static struct pmic_irq_data mt6358_irqd = {
.num_top = ARRAY_SIZE(mt6358_ints),
.num_pmic_irqs = MT6358_IRQ_NR,
@@ -211,6 +231,10 @@ int mt6358_irq_init(struct mt6397_chip *chip)
struct pmic_irq_data *irqd;
switch (chip->chip_id) {
+ case MT6357_CHIP_ID:
+ chip->irq_data = &mt6357_irqd;
+ break;
+
case MT6358_CHIP_ID:
case MT6366_CHIP_ID:
chip->irq_data = &mt6358_irqd;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 1a368ad08f58..f6c1f80f94a4 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -12,10 +12,14 @@
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mt6323/core.h>
+#include <linux/mfd/mt6331/core.h>
+#include <linux/mfd/mt6357/core.h>
#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6359/core.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6331/registers.h>
+#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/registers.h>
@@ -23,6 +27,12 @@
#define MT6323_RTC_BASE 0x8000
#define MT6323_RTC_SIZE 0x40
+#define MT6357_RTC_BASE 0x0588
+#define MT6357_RTC_SIZE 0x3c
+
+#define MT6331_RTC_BASE 0x4000
+#define MT6331_RTC_SIZE 0x40
+
#define MT6358_RTC_BASE 0x0588
#define MT6358_RTC_SIZE 0x3c
@@ -37,6 +47,16 @@ static const struct resource mt6323_rtc_resources[] = {
DEFINE_RES_IRQ(MT6323_IRQ_STATUS_RTC),
};
+static const struct resource mt6357_rtc_resources[] = {
+ DEFINE_RES_MEM(MT6357_RTC_BASE, MT6357_RTC_SIZE),
+ DEFINE_RES_IRQ(MT6357_IRQ_RTC),
+};
+
+static const struct resource mt6331_rtc_resources[] = {
+ DEFINE_RES_MEM(MT6331_RTC_BASE, MT6331_RTC_SIZE),
+ DEFINE_RES_IRQ(MT6331_IRQ_STATUS_RTC),
+};
+
static const struct resource mt6358_rtc_resources[] = {
DEFINE_RES_MEM(MT6358_RTC_BASE, MT6358_RTC_SIZE),
DEFINE_RES_IRQ(MT6358_IRQ_RTC),
@@ -66,6 +86,18 @@ static const struct resource mt6323_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_FCHRKEY, "homekey"),
};
+static const struct resource mt6357_keys_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_HOMEKEY, "homekey"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_PWRKEY_R, "powerkey_r"),
+ DEFINE_RES_IRQ_NAMED(MT6357_IRQ_HOMEKEY_R, "homekey_r"),
+};
+
+static const struct resource mt6331_keys_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6331_IRQ_STATUS_PWRKEY, "powerkey"),
+ DEFINE_RES_IRQ_NAMED(MT6331_IRQ_STATUS_HOMEKEY, "homekey"),
+};
+
static const struct resource mt6397_keys_resources[] = {
DEFINE_RES_IRQ_NAMED(MT6397_IRQ_PWRKEY, "powerkey"),
DEFINE_RES_IRQ_NAMED(MT6397_IRQ_HOMEKEY, "homekey"),
@@ -100,6 +132,43 @@ static const struct mfd_cell mt6323_devs[] = {
},
};
+static const struct mfd_cell mt6357_devs[] = {
+ {
+ .name = "mt6357-regulator",
+ }, {
+ .name = "mt6357-rtc",
+ .num_resources = ARRAY_SIZE(mt6357_rtc_resources),
+ .resources = mt6357_rtc_resources,
+ .of_compatible = "mediatek,mt6357-rtc",
+ }, {
+ .name = "mtk-pmic-keys",
+ .num_resources = ARRAY_SIZE(mt6357_keys_resources),
+ .resources = mt6357_keys_resources,
+ .of_compatible = "mediatek,mt6357-keys"
+ },
+};
+
+/* MT6331 is always used in combination with MT6332 */
+static const struct mfd_cell mt6331_mt6332_devs[] = {
+ {
+ .name = "mt6331-rtc",
+ .num_resources = ARRAY_SIZE(mt6331_rtc_resources),
+ .resources = mt6331_rtc_resources,
+ .of_compatible = "mediatek,mt6331-rtc",
+ }, {
+ .name = "mt6331-regulator",
+ .of_compatible = "mediatek,mt6331-regulator"
+ }, {
+ .name = "mt6332-regulator",
+ .of_compatible = "mediatek,mt6332-regulator"
+ }, {
+ .name = "mtk-pmic-keys",
+ .num_resources = ARRAY_SIZE(mt6331_keys_resources),
+ .resources = mt6331_keys_resources,
+ .of_compatible = "mediatek,mt6331-keys"
+ },
+};
+
static const struct mfd_cell mt6358_devs[] = {
{
.name = "mt6358-regulator",
@@ -179,6 +248,22 @@ static const struct chip_data mt6323_core = {
.irq_init = mt6397_irq_init,
};
+static const struct chip_data mt6357_core = {
+ .cid_addr = MT6357_SWCID,
+ .cid_shift = 8,
+ .cells = mt6357_devs,
+ .cell_size = ARRAY_SIZE(mt6357_devs),
+ .irq_init = mt6358_irq_init,
+};
+
+static const struct chip_data mt6331_mt6332_core = {
+ .cid_addr = MT6331_HWCID,
+ .cid_shift = 0,
+ .cells = mt6331_mt6332_devs,
+ .cell_size = ARRAY_SIZE(mt6331_mt6332_devs),
+ .irq_init = mt6397_irq_init,
+};
+
static const struct chip_data mt6358_core = {
.cid_addr = MT6358_SWCID,
.cid_shift = 8,
@@ -262,6 +347,12 @@ static const struct of_device_id mt6397_of_match[] = {
.compatible = "mediatek,mt6323",
.data = &mt6323_core,
}, {
+ .compatible = "mediatek,mt6331",
+ .data = &mt6331_mt6332_core,
+ }, {
+ .compatible = "mediatek,mt6357",
+ .data = &mt6357_core,
+ }, {
.compatible = "mediatek,mt6358",
.data = &mt6358_core,
}, {
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
index 2924919da991..eff53fed8fe7 100644
--- a/drivers/mfd/mt6397-irq.c
+++ b/drivers/mfd/mt6397-irq.c
@@ -12,6 +12,8 @@
#include <linux/suspend.h>
#include <linux/mfd/mt6323/core.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6331/core.h>
+#include <linux/mfd/mt6331/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/registers.h>
@@ -172,7 +174,12 @@ int mt6397_irq_init(struct mt6397_chip *chip)
chip->int_status[0] = MT6323_INT_STATUS0;
chip->int_status[1] = MT6323_INT_STATUS1;
break;
-
+ case MT6331_CHIP_ID:
+ chip->int_con[0] = MT6331_INT_CON0;
+ chip->int_con[1] = MT6331_INT_CON1;
+ chip->int_status[0] = MT6331_INT_STATUS_CON0;
+ chip->int_status[1] = MT6331_INT_STATUS_CON1;
+ break;
case MT6391_CHIP_ID:
case MT6397_CHIP_ID:
chip->int_con[0] = MT6397_INT_CON0;
diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
index c472d7f8103c..4b8ff947762f 100644
--- a/drivers/mfd/qcom-pm8008.c
+++ b/drivers/mfd/qcom-pm8008.c
@@ -54,13 +54,6 @@ enum {
#define PM8008_PERIPH_OFFSET(paddr) (paddr - PM8008_PERIPH_0_BASE)
-struct pm8008_data {
- struct device *dev;
- struct regmap *regmap;
- int irq;
- struct regmap_irq_chip_data *irq_data;
-};
-
static unsigned int p0_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_0_BASE)};
static unsigned int p1_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_1_BASE)};
static unsigned int p2_offs[] = {PM8008_PERIPH_OFFSET(PM8008_PERIPH_2_BASE)};
@@ -150,7 +143,7 @@ static struct regmap_config qcom_mfd_regmap_cfg = {
.max_register = 0xFFFF,
};
-static int pm8008_init(struct pm8008_data *chip)
+static int pm8008_init(struct regmap *regmap)
{
int rc;
@@ -160,34 +153,31 @@ static int pm8008_init(struct pm8008_data *chip)
* This is required to enable the writing of TYPE registers in
* regmap_irq_sync_unlock().
*/
- rc = regmap_write(chip->regmap,
- (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET),
- BIT(0));
+ rc = regmap_write(regmap, (PM8008_TEMP_ALARM_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
if (rc)
return rc;
/* Do the same for GPIO1 and GPIO2 peripherals */
- rc = regmap_write(chip->regmap,
- (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
+ rc = regmap_write(regmap, (PM8008_GPIO1_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
if (rc)
return rc;
- rc = regmap_write(chip->regmap,
- (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
+ rc = regmap_write(regmap, (PM8008_GPIO2_ADDR | INT_SET_TYPE_OFFSET), BIT(0));
return rc;
}
-static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
+static int pm8008_probe_irq_peripherals(struct device *dev,
+ struct regmap *regmap,
int client_irq)
{
int rc, i;
struct regmap_irq_type *type;
struct regmap_irq_chip_data *irq_data;
- rc = pm8008_init(chip);
+ rc = pm8008_init(regmap);
if (rc) {
- dev_err(chip->dev, "Init failed: %d\n", rc);
+ dev_err(dev, "Init failed: %d\n", rc);
return rc;
}
@@ -207,10 +197,10 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW);
}
- rc = devm_regmap_add_irq_chip(chip->dev, chip->regmap, client_irq,
+ rc = devm_regmap_add_irq_chip(dev, regmap, client_irq,
IRQF_SHARED, 0, &pm8008_irq_chip, &irq_data);
if (rc) {
- dev_err(chip->dev, "Failed to add IRQ chip: %d\n", rc);
+ dev_err(dev, "Failed to add IRQ chip: %d\n", rc);
return rc;
}
@@ -220,26 +210,23 @@ static int pm8008_probe_irq_peripherals(struct pm8008_data *chip,
static int pm8008_probe(struct i2c_client *client)
{
int rc;
- struct pm8008_data *chip;
-
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
+ struct device *dev;
+ struct regmap *regmap;
- chip->dev = &client->dev;
- chip->regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
- if (!chip->regmap)
+ dev = &client->dev;
+ regmap = devm_regmap_init_i2c(client, &qcom_mfd_regmap_cfg);
+ if (!regmap)
return -ENODEV;
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, regmap);
- if (of_property_read_bool(chip->dev->of_node, "interrupt-controller")) {
- rc = pm8008_probe_irq_peripherals(chip, client->irq);
+ if (of_property_read_bool(dev->of_node, "interrupt-controller")) {
+ rc = pm8008_probe_irq_peripherals(dev, regmap, client->irq);
if (rc)
- dev_err(chip->dev, "Failed to probe irq periphs: %d\n", rc);
+ dev_err(dev, "Failed to probe irq periphs: %d\n", rc);
}
- return devm_of_platform_populate(chip->dev);
+ return devm_of_platform_populate(dev);
}
static const struct of_device_id pm8008_match[] = {
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 191fdb87c424..bdb2ce7ff03b 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -101,8 +101,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
}
}
- syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", np,
- (u64)res.start);
+ syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start);
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 5369c67e3280..663ffd4b8570 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -397,11 +397,8 @@ err_noirq:
static int t7l66xb_remove(struct platform_device *dev)
{
- struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- int ret;
- ret = pdata->disable(dev);
clk_disable_unprepare(t7l66xb->clk48m);
clk_put(t7l66xb->clk48m);
clk_disable_unprepare(t7l66xb->clk32k);
@@ -412,8 +409,7 @@ static int t7l66xb_remove(struct platform_device *dev)
mfd_remove_devices(&dev->dev);
kfree(t7l66xb);
- return ret;
-
+ return 0;
}
static struct platform_driver t7l66xb_platform_driver = {
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 0be5731685b4..aa903a31dd43 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -798,20 +798,19 @@ static int tc6393xb_remove(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
- int ret;
mfd_remove_devices(&dev->dev);
tc6393xb_detach_irq(dev);
- ret = tcpd->disable(dev);
+ tcpd->disable(dev);
clk_disable_unprepare(tc6393xb->clk);
iounmap(tc6393xb->scr);
release_resource(&tc6393xb->rscr);
clk_put(tc6393xb->clk);
kfree(tc6393xb);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index bd6659cf3bc0..2cb9326f3e61 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -656,309 +656,6 @@ static inline struct device *add_child(unsigned mod_no, const char *name,
can_wakeup, irq0, irq1);
}
-static struct device *
-add_regulator_linked(int num, struct regulator_init_data *pdata,
- struct regulator_consumer_supply *consumers,
- unsigned num_consumers, unsigned long features)
-{
- struct twl_regulator_driver_data drv_data;
-
- /* regulator framework demands init_data ... */
- if (!pdata)
- return NULL;
-
- if (consumers) {
- pdata->consumer_supplies = consumers;
- pdata->num_consumer_supplies = num_consumers;
- }
-
- if (pdata->driver_data) {
- /* If we have existing drv_data, just add the flags */
- struct twl_regulator_driver_data *tmp;
- tmp = pdata->driver_data;
- tmp->features |= features;
- } else {
- /* add new driver data struct, used only during init */
- drv_data.features = features;
- drv_data.set_voltage = NULL;
- drv_data.get_voltage = NULL;
- drv_data.data = NULL;
- pdata->driver_data = &drv_data;
- }
-
- /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */
- return add_numbered_child(TWL_MODULE_PM_MASTER, "twl_reg", num,
- pdata, sizeof(*pdata), false, 0, 0);
-}
-
-static struct device *
-add_regulator(int num, struct regulator_init_data *pdata,
- unsigned long features)
-{
- return add_regulator_linked(num, pdata, NULL, 0, features);
-}
-
-/*
- * NOTE: We know the first 8 IRQs after pdata->base_irq are
- * for the PIH, and the next are for the PWR_INT SIH, since
- * that's how twl_init_irq() sets things up.
- */
-
-static int
-add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
- unsigned long features)
-{
- struct device *child;
-
- if (IS_ENABLED(CONFIG_GPIO_TWL4030) && pdata->gpio) {
- child = add_child(TWL4030_MODULE_GPIO, "twl4030_gpio",
- pdata->gpio, sizeof(*pdata->gpio),
- false, irq_base + GPIO_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_KEYBOARD_TWL4030) && pdata->keypad) {
- child = add_child(TWL4030_MODULE_KEYPAD, "twl4030_keypad",
- pdata->keypad, sizeof(*pdata->keypad),
- true, irq_base + KEYPAD_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc &&
- twl_class_is_4030()) {
- child = add_child(TWL4030_MODULE_MADC, "twl4030_madc",
- pdata->madc, sizeof(*pdata->madc),
- true, irq_base + MADC_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_RTC_DRV_TWL4030)) {
- /*
- * REVISIT platform_data here currently might expose the
- * "msecure" line ... but for now we just expect board
- * setup to tell the chip "it's always ok to SET_TIME".
- * Eventually, Linux might become more aware of such
- * HW security concerns, and "least privilege".
- */
- child = add_child(TWL_MODULE_RTC, "twl_rtc", NULL, 0,
- true, irq_base + RTC_INTR_OFFSET, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_PWM_TWL)) {
- child = add_child(TWL_MODULE_PWM, "twl-pwm", NULL, 0,
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_PWM_TWL_LED)) {
- child = add_child(TWL_MODULE_LED, "twl-pwmled", NULL, 0,
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_USB) && pdata->usb &&
- twl_class_is_4030()) {
-
- static struct regulator_consumer_supply usb1v5 = {
- .supply = "usb1v5",
- };
- static struct regulator_consumer_supply usb1v8 = {
- .supply = "usb1v8",
- };
- static struct regulator_consumer_supply usb3v1 = {
- .supply = "usb3v1",
- };
-
- /* First add the regulators so that they can be used by transceiver */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030)) {
- /* this is a template that gets copied */
- struct regulator_init_data usb_fixed = {
- .constraints.valid_modes_mask =
- REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .constraints.valid_ops_mask =
- REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- };
-
- child = add_regulator_linked(TWL4030_REG_VUSB1V5,
- &usb_fixed, &usb1v5, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator_linked(TWL4030_REG_VUSB1V8,
- &usb_fixed, &usb1v8, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator_linked(TWL4030_REG_VUSB3V1,
- &usb_fixed, &usb3v1, 1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- }
-
- child = add_child(TWL_MODULE_USB, "twl4030_usb",
- pdata->usb, sizeof(*pdata->usb), true,
- /* irq0 = USB_PRES, irq1 = USB */
- irq_base + USB_PRES_INTR_OFFSET,
- irq_base + USB_INTR_OFFSET);
-
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- /* we need to connect regulators to this transceiver */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && child) {
- usb1v5.dev_name = dev_name(child);
- usb1v8.dev_name = dev_name(child);
- usb3v1.dev_name = dev_name(child);
- }
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
- child = add_child(TWL_MODULE_PM_RECEIVER, "twl4030_wdt", NULL,
- 0, false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) {
- child = add_child(TWL_MODULE_PM_MASTER, "twl4030_pwrbutton",
- NULL, 0, true, irq_base + 8 + 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio &&
- twl_class_is_4030()) {
- child = add_child(TWL4030_MODULE_AUDIO_VOICE, "twl4030-audio",
- pdata->audio, sizeof(*pdata->audio),
- false, 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- /* twl4030 regulators */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && twl_class_is_4030()) {
- child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VIO, pdata->vio,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VDAC, pdata->vdac,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator((features & TWL4030_VAUX2)
- ? TWL4030_REG_VAUX2_4030
- : TWL4030_REG_VAUX2,
- pdata->vaux2, features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- /* maybe add LDOs that are omitted on cost-reduced parts */
- if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && !(features & TPS_SUBSET)
- && twl_class_is_4030()) {
- child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VSIM, pdata->vsim,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4,
- features);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
- !(features & (TPS_SUBSET | TWL5031))) {
- child = add_child(TWL_MODULE_MAIN_CHARGE, "twl4030_bci",
- pdata->bci, sizeof(*pdata->bci), false,
- /* irq0 = CHG_PRES, irq1 = BCI */
- irq_base + BCI_PRES_INTR_OFFSET,
- irq_base + BCI_INTR_OFFSET);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- if (IS_ENABLED(CONFIG_TWL4030_POWER) && pdata->power) {
- child = add_child(TWL_MODULE_PM_MASTER, "twl4030_power",
- pdata->power, sizeof(*pdata->power), false,
- 0, 0);
- if (IS_ERR(child))
- return PTR_ERR(child);
- }
-
- return 0;
-}
-
/*----------------------------------------------------------------------*/
/*
@@ -987,8 +684,7 @@ static inline int unprotect_pm_master(void)
return e;
}
-static void clocks_init(struct device *dev,
- struct twl4030_clock_init_data *clock)
+static void clocks_init(struct device *dev)
{
int e = 0;
struct clk *osc;
@@ -1018,8 +714,6 @@ static void clocks_init(struct device *dev,
}
ctrl |= HIGH_PERF_SQ;
- if (clock && clock->ck32k_lowpwr_enable)
- ctrl |= CK32K_LOWPWR_EN;
e |= unprotect_pm_master();
/* effect->MADC+USB ck en */
@@ -1063,7 +757,6 @@ static struct of_dev_auxdata twl_auxdata_lookup[] = {
static int
twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev);
struct device_node *node = client->dev.of_node;
struct platform_device *pdev;
const struct regmap_config *twl_regmap_config;
@@ -1071,7 +764,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
int status;
unsigned i, num_slaves;
- if (!node && !pdata) {
+ if (!node) {
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
@@ -1161,7 +854,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
twl_priv->ready = true;
/* setup clock framework */
- clocks_init(&client->dev, pdata ? pdata->clock : NULL);
+ clocks_init(&client->dev);
/* read TWL IDCODE Register */
if (twl_class_is_4030()) {
@@ -1209,14 +902,8 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
TWL4030_DCDC_GLOBAL_CFG);
}
- if (node) {
- if (pdata)
- twl_auxdata_lookup[0].platform_data = pdata->gpio;
- status = of_platform_populate(node, NULL, twl_auxdata_lookup,
- &client->dev);
- } else {
- status = add_children(pdata, irq_base, id->driver_data);
- }
+ status = of_platform_populate(node, NULL, twl_auxdata_lookup,
+ &client->dev);
fail:
if (status < 0)
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
index 8c3832a58ef6..ac1d18039568 100644
--- a/drivers/mfd/ucb1400_core.c
+++ b/drivers/mfd/ucb1400_core.c
@@ -72,11 +72,9 @@ static int ucb1400_core_probe(struct device *dev)
/* GPIO */
ucb_gpio.ac97 = ac97;
- if (pdata) {
- ucb_gpio.gpio_setup = pdata->gpio_setup;
- ucb_gpio.gpio_teardown = pdata->gpio_teardown;
+ if (pdata)
ucb_gpio.gpio_offset = pdata->gpio_offset;
- }
+
ucb->ucb1400_gpio = platform_device_alloc("ucb1400_gpio", -1);
if (!ucb->ucb1400_gpio) {
err = -ENOMEM;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 85dd6aa33df6..61a2be712bf7 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1585,7 +1585,7 @@ static int vmballoon_register_shrinker(struct vmballoon *b)
b->shrinker.count_objects = vmballoon_shrinker_count;
b->shrinker.seeks = DEFAULT_SEEKS;
- r = register_shrinker(&b->shrinker);
+ r = register_shrinker(&b->shrinker, "vmw-balloon");
if (r == 0)
b->shrinker_registered = true;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index e08e22f0a7c5..ce89611a136e 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -176,7 +176,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
@@ -1302,7 +1302,7 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr_p,
+ int recovery_mode, bool *do_rel_wr_p,
bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
@@ -1368,12 +1368,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks--;
/*
- * After a read error, we redo the request one sector
+ * After a read error, we redo the request one (native) sector
* at a time in order to accurately determine which
* sectors can be read successfully.
*/
- if (disable_multi)
- brq->data.blocks = 1;
+ if (recovery_mode)
+ brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
/*
* Some controllers have HW issues while operating
@@ -1590,7 +1590,7 @@ static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
- int disable_multi,
+ int recovery_mode,
struct mmc_queue *mq)
{
u32 readcmd, writecmd;
@@ -1599,7 +1599,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->blkdata;
bool do_rel_wr, do_data_tag;
- mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
+ mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
brq->mrq.cmd = &brq->cmd;
@@ -1690,7 +1690,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
#define MMC_READ_SINGLE_RETRIES 2
-/* Single sector read during recovery */
+/* Single (native) sector read during recovery */
static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
@@ -1698,6 +1698,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
+ size_t bytes_per_read = queue_physical_block_size(mq->queue);
do {
u32 status;
@@ -1732,13 +1733,13 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
else
error = BLK_STS_OK;
- } while (blk_update_request(req, error, 512));
+ } while (blk_update_request(req, error, bytes_per_read));
return;
error_exit:
mrq->data->bytes_xfered = 0;
- blk_update_request(req, BLK_STS_IOERR, 512);
+ blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
/* Let it try the remaining request again */
if (mqrq->retries > MMC_MAX_RETRIES - 1)
mqrq->retries = MMC_MAX_RETRIES - 1;
@@ -1879,10 +1880,9 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
return;
}
- /* FIXME: Missing single sector read for large sector size */
- if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
- brq->data.blocks > 1) {
- /* Read one sector at a time */
+ if (rq_data_dir(req) == READ && brq->data.blocks >
+ queue_physical_block_size(mq->queue) >> 9) {
+ /* Read one (native) sector at a time */
mmc_blk_read_single(mq, req);
return;
}
@@ -2988,7 +2988,7 @@ static int mmc_blk_probe(struct mmc_card *card)
* Don't enable runtime PM for SD-combo cards here. Leave that
* decision to be taken during the SDIO init sequence instead.
*/
- if (card->type != MMC_TYPE_SD_COMBO) {
+ if (!mmc_card_sd_combo(card)) {
pm_runtime_set_active(&card->dev);
pm_runtime_enable(&card->dev);
}
@@ -3015,7 +3015,7 @@ static void mmc_blk_remove(struct mmc_card *card)
mmc_blk_part_switch(card, md->part_type);
mmc_release_host(card->host);
}
- if (card->type != MMC_TYPE_SD_COMBO)
+ if (!mmc_card_sd_combo(card))
pm_runtime_disable(&card->dev);
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 58a60afa650b..d8762fa3d5cd 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -85,7 +85,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
- if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ if (mmc_card_sdio(card) || mmc_card_sd_combo(card)) {
retval = add_uevent_var(env, "SDIO_ID=%04X:%04X",
card->cis.vendor, card->cis.device);
if (retval)
@@ -107,7 +107,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
* SDIO (non-combo) cards are not handled by mmc_block driver and do not
* have accessible CID register which used by mmc_card_name() function.
*/
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
return 0;
retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card));
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 4b70cbfc6d5d..ef53a2578824 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -943,9 +943,11 @@ int mmc_execute_tuning(struct mmc_card *card)
}
/* Only print error when we don't check for card removal */
- if (!host->detect_change)
+ if (!host->detect_change) {
pr_err("%s: tuning execution failed: %d\n",
mmc_hostname(host), err);
+ mmc_debugfs_err_stats_inc(host, MMC_ERR_TUNING);
+ }
return err;
}
@@ -2244,6 +2246,12 @@ void mmc_rescan(struct work_struct *work)
if (freqs[i] <= host->f_min)
break;
}
+
+ /*
+ * Ignore the command timeout errors observed during
+ * the card init as those are excepted.
+ */
+ host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0;
mmc_release_host(host);
out:
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 3fdbc801e64a..fe6808771bc7 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -223,6 +223,81 @@ static int mmc_clock_opt_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
"%llu\n");
+static int mmc_err_state_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+ int i;
+
+ if (!host)
+ return -EINVAL;
+
+ *val = 0;
+ for (i = 0; i < MMC_ERR_MAX; i++) {
+ if (host->err_stats[i]) {
+ *val = 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_err_state, mmc_err_state_get, NULL, "%llu\n");
+
+static int mmc_err_stats_show(struct seq_file *file, void *data)
+{
+ struct mmc_host *host = (struct mmc_host *)file->private;
+ const char *desc[MMC_ERR_MAX] = {
+ [MMC_ERR_CMD_TIMEOUT] = "Command Timeout Occurred",
+ [MMC_ERR_CMD_CRC] = "Command CRC Errors Occurred",
+ [MMC_ERR_DAT_TIMEOUT] = "Data Timeout Occurred",
+ [MMC_ERR_DAT_CRC] = "Data CRC Errors Occurred",
+ [MMC_ERR_AUTO_CMD] = "Auto-Cmd Error Occurred",
+ [MMC_ERR_ADMA] = "ADMA Error Occurred",
+ [MMC_ERR_TUNING] = "Tuning Error Occurred",
+ [MMC_ERR_CMDQ_RED] = "CMDQ RED Errors",
+ [MMC_ERR_CMDQ_GCE] = "CMDQ GCE Errors",
+ [MMC_ERR_CMDQ_ICCE] = "CMDQ ICCE Errors",
+ [MMC_ERR_REQ_TIMEOUT] = "Request Timedout",
+ [MMC_ERR_CMDQ_REQ_TIMEOUT] = "CMDQ Request Timedout",
+ [MMC_ERR_ICE_CFG] = "ICE Config Errors",
+ [MMC_ERR_CTRL_TIMEOUT] = "Controller Timedout errors",
+ [MMC_ERR_UNEXPECTED_IRQ] = "Unexpected IRQ errors",
+ };
+ int i;
+
+ for (i = 0; i < MMC_ERR_MAX; i++) {
+ if (desc[i])
+ seq_printf(file, "# %s:\t %d\n",
+ desc[i], host->err_stats[i]);
+ }
+
+ return 0;
+}
+
+static int mmc_err_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_err_stats_show, inode->i_private);
+}
+
+static ssize_t mmc_err_stats_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mmc_host *host = filp->f_mapping->host->i_private;
+
+ pr_debug("%s: Resetting MMC error statistics\n", __func__);
+ memset(host->err_stats, 0, sizeof(host->err_stats));
+
+ return cnt;
+}
+
+static const struct file_operations mmc_err_stats_fops = {
+ .open = mmc_err_stats_open,
+ .read = seq_read,
+ .write = mmc_err_stats_write,
+ .release = single_release,
+};
+
void mmc_add_host_debugfs(struct mmc_host *host)
{
struct dentry *root;
@@ -236,6 +311,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
debugfs_create_file_unsafe("clock", S_IRUSR | S_IWUSR, root, host,
&mmc_clock_fops);
+ debugfs_create_file_unsafe("err_state", 0600, root, host,
+ &mmc_err_state);
+ debugfs_create_file("err_stats", 0600, root, host,
+ &mmc_err_stats_fops);
+
#ifdef CONFIG_FAIL_MMC_REQUEST
if (fail_request)
setup_fault_attr(&fail_default_attr, fail_request);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 2ed2b4d5e5a5..0fd91f749b3a 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -599,7 +599,7 @@ static int mmc_validate_host_caps(struct mmc_host *host)
}
if (caps2 & (MMC_CAP2_HS400_ES | MMC_CAP2_HS400) &&
- !(caps & MMC_CAP_8_BIT_DATA)) {
+ !(caps & MMC_CAP_8_BIT_DATA) && !(caps2 & MMC_CAP2_NO_MMC)) {
dev_warn(dev, "drop HS400 support since no 8-bit bus\n");
host->caps2 = caps2 & ~MMC_CAP2_HS400_ES & ~MMC_CAP2_HS400;
}
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index f879dc63d936..be4393988086 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -163,8 +163,10 @@ static inline bool mmc_fixup_of_compatible_match(struct mmc_card *card,
struct device_node *np;
for_each_child_of_node(mmc_dev(card->host)->of_node, np) {
- if (of_device_is_compatible(np, compatible))
+ if (of_device_is_compatible(np, compatible)) {
+ of_node_put(np);
return true;
+ }
}
return false;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c5f1df6ce4c0..cee4c0b59f43 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -793,7 +793,7 @@ static umode_t sd_std_is_visible(struct kobject *kobj, struct attribute *attr,
attr == &dev_attr_info2.attr ||
attr == &dev_attr_info3.attr ||
attr == &dev_attr_info4.attr
- ) && card->type != MMC_TYPE_SD_COMBO)
+ ) &&!mmc_card_sd_combo(card))
return 0;
return attr->mode;
@@ -870,7 +870,7 @@ try_again:
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
+ if (!mmc_host_is_spi(host) && rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 25799accf8a0..0b682a31cd3e 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -226,6 +226,20 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
if (data & SDIO_DRIVE_SDTD)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTERRUPT_EXT, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_INTERRUPT_EXT_SAI) {
+ data |= SDIO_INTERRUPT_EXT_EAI;
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_INTERRUPT_EXT,
+ data, NULL);
+ if (ret)
+ goto out;
+
+ card->cccr.enable_async_irq = 1;
+ }
}
/* if no uhs mode ensure we check for high speed */
@@ -335,7 +349,7 @@ static int sdio_disable_4bit_bus(struct mmc_card *card)
{
int err;
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
goto out;
if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
@@ -360,7 +374,7 @@ static int sdio_enable_4bit_bus(struct mmc_card *card)
err = sdio_enable_wide(card);
if (err <= 0)
return err;
- if (card->type == MMC_TYPE_SDIO)
+ if (mmc_card_sdio(card))
goto out;
if (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) {
@@ -415,7 +429,7 @@ static int sdio_enable_hs(struct mmc_card *card)
int ret;
ret = mmc_sdio_switch_hs(card, true);
- if (ret <= 0 || card->type == MMC_TYPE_SDIO)
+ if (ret <= 0 || mmc_card_sdio(card))
return ret;
ret = mmc_sd_switch_hs(card);
@@ -441,7 +455,7 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
max_dtr = card->cis.max_dtr;
}
- if (card->type == MMC_TYPE_SD_COMBO)
+ if (mmc_card_sd_combo(card))
max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
return max_dtr;
@@ -689,7 +703,7 @@ try_again:
mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
card->type = MMC_TYPE_SD_COMBO;
- if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
+ if (oldcard && (!mmc_card_sd_combo(oldcard) ||
memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
err = -ENOENT;
goto mismatch;
@@ -697,7 +711,7 @@ try_again:
} else {
card->type = MMC_TYPE_SDIO;
- if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
+ if (oldcard && !mmc_card_sdio(oldcard)) {
err = -ENOENT;
goto mismatch;
}
@@ -754,7 +768,7 @@ try_again:
/*
* Read CSD, before selecting the card
*/
- if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
+ if (!oldcard && mmc_card_sd_combo(card)) {
err = mmc_sd_get_csd(card);
if (err)
goto remove;
@@ -827,7 +841,7 @@ try_again:
mmc_fixup_device(card, sdio_fixup_methods);
- if (card->type == MMC_TYPE_SD_COMBO) {
+ if (mmc_card_sd_combo(card)) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
/* handle as SDIO-only card if memory init failed */
if (err) {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 10c563999d3d..e63608834411 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -171,6 +171,7 @@ config MMC_SDHCI_OF_ASPEED
config MMC_SDHCI_OF_ASPEED_TEST
bool "Tests for the ASPEED SDHCI driver" if !KUNIT_ALL_TESTS
depends on MMC_SDHCI_OF_ASPEED && KUNIT
+ depends on (MMC_SDHCI_OF_ASPEED=m || KUNIT=y)
default KUNIT_ALL_TESTS
help
Enable KUnit tests for the ASPEED SDHCI driver. Select this
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 2c4b2df52adb..12dca91a8ef6 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -277,6 +277,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Error populating slots\n");
octeon_mmc_set_shared_power(host, 0);
+ of_node_put(cn);
goto error;
}
i++;
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index 76013bbbcff3..202b1d6da678 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -142,8 +142,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
continue;
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
- if (ret)
+ if (ret) {
+ of_node_put(child_node);
goto error;
+ }
}
i++;
}
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index b0d30c35c390..b3d7d6d8d654 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -822,8 +822,15 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
- cmd_error || data_error)
+ cmd_error || data_error) {
+ if (status & CQHCI_IS_RED)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_RED);
+ if (status & CQHCI_IS_GCE)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_GCE);
+ if (status & CQHCI_IS_ICCE)
+ mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_ICCE);
cqhci_error_irq(mmc, status, cmd_error, data_error);
+ }
if (status & CQHCI_IS_TCC) {
/* read TCN and complete the request */
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index ca5be4445ae0..9f20ac524c8b 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -670,7 +670,9 @@ static int dw_mci_exynos_remove(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
+
+ return 0;
}
static const struct dev_pm_ops dw_mci_exynos_pmops = {
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index e9437ef8ef19..6f22fe054087 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -179,7 +179,9 @@ static int dw_mci_hi3798cv200_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->drive_clk);
clk_disable_unprepare(priv->sample_clk);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
+
+ return 0;
}
static const struct of_device_id dw_mci_hi3798cv200_match[] = {
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index f825487aa739..2a99f15f527f 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -377,7 +377,9 @@ static int dw_mci_rockchip_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- return dw_mci_pltfm_remove(pdev);
+ dw_mci_pltfm_remove(pdev);
+
+ return 0;
}
static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 2f08d442e557..fc462995cf94 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1172,8 +1172,10 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
ret = device_reset_optional(&pdev->dev);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "device reset failed\n");
+ goto free_host;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 01159eaf8694..012aa85489d8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -762,7 +762,7 @@ int mmci_dmae_setup(struct mmci_host *host)
/*
* If only an RX channel is specified, the driver will
- * attempt to use it bidirectionally, however if it is
+ * attempt to use it bidirectionally, however if it
* is specified but cannot be located, DMA will be disabled.
*/
if (dmae->rx_channel && !dmae->tx_channel)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 9da4489dc345..69d78604d1fc 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2015 MediaTek Inc.
+ * Copyright (c) 2014-2015, 2022 MediaTek Inc.
* Author: Chaotian.Jing <chaotian.jing@mediatek.com>
*/
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -440,8 +441,10 @@ struct msdc_host {
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default;
struct pinctrl_state *pins_uhs;
+ struct pinctrl_state *pins_eint;
struct delayed_work req_timeout;
int irq; /* host interrupt */
+ int eint_irq; /* interrupt from sdio device for waking up system */
struct reset_control *reset;
struct clk *src_clk; /* msdc source clock */
@@ -1521,17 +1524,46 @@ static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
{
- unsigned long flags;
struct msdc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret;
spin_lock_irqsave(&host->lock, flags);
__msdc_enable_sdio_irq(host, enb);
spin_unlock_irqrestore(&host->lock, flags);
- if (enb)
- pm_runtime_get_noresume(host->dev);
- else
- pm_runtime_put_noidle(host->dev);
+ if (mmc_card_enable_async_irq(mmc->card) && host->pins_eint) {
+ if (enb) {
+ /*
+ * In dev_pm_set_dedicated_wake_irq_reverse(), eint pin will be set to
+ * GPIO mode. We need to restore it to SDIO DAT1 mode after that.
+ * Since the current pinstate is pins_uhs, to ensure pinctrl select take
+ * affect successfully, we change the pinstate to pins_eint firstly.
+ */
+ pinctrl_select_state(host->pinctrl, host->pins_eint);
+ ret = dev_pm_set_dedicated_wake_irq_reverse(host->dev, host->eint_irq);
+
+ if (ret) {
+ dev_err(host->dev, "Failed to register SDIO wakeup irq!\n");
+ host->pins_eint = NULL;
+ pm_runtime_get_noresume(host->dev);
+ } else {
+ dev_dbg(host->dev, "SDIO eint irq: %d!\n", host->eint_irq);
+ }
+
+ pinctrl_select_state(host->pinctrl, host->pins_uhs);
+ } else {
+ dev_pm_clear_wake_irq(host->dev);
+ }
+ } else {
+ if (enb) {
+ /* Ensure host->pins_eint is NULL */
+ host->pins_eint = NULL;
+ pm_runtime_get_noresume(host->dev);
+ } else {
+ pm_runtime_put_noidle(host->dev);
+ }
+ }
}
static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
@@ -2319,7 +2351,7 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
else
val = readl(host->base + PAD_DS_TUNE);
- dev_info(host->dev, "Fianl PAD_DS_TUNE: 0x%x\n", val);
+ dev_info(host->dev, "Final PAD_DS_TUNE: 0x%x\n", val);
return 0;
@@ -2414,6 +2446,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
/* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
@@ -2635,6 +2670,20 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
+ /* Support for SDIO eint irq ? */
+ if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) {
+ host->eint_irq = platform_get_irq_byname(pdev, "sdio_wakeup");
+ if (host->eint_irq > 0) {
+ host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint");
+ if (IS_ERR(host->pins_eint)) {
+ dev_err(&pdev->dev, "Cannot find pinctrl eint!\n");
+ host->pins_eint = NULL;
+ } else {
+ device_init_wakeup(&pdev->dev, true);
+ }
+ }
+ }
+
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
@@ -2849,6 +2898,15 @@ static int __maybe_unused msdc_runtime_suspend(struct device *dev)
struct msdc_host *host = mmc_priv(mmc);
msdc_save_reg(host);
+
+ if (sdio_irq_claimed(mmc)) {
+ if (host->pins_eint) {
+ disable_irq(host->irq);
+ pinctrl_select_state(host->pinctrl, host->pins_eint);
+ }
+
+ __msdc_enable_sdio_irq(host, 0);
+ }
msdc_gate_clock(host);
return 0;
}
@@ -2864,25 +2922,47 @@ static int __maybe_unused msdc_runtime_resume(struct device *dev)
return ret;
msdc_restore_reg(host);
+
+ if (sdio_irq_claimed(mmc) && host->pins_eint) {
+ pinctrl_select_state(host->pinctrl, host->pins_uhs);
+ enable_irq(host->irq);
+ }
return 0;
}
static int __maybe_unused msdc_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc);
if (ret)
return ret;
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
}
+ /*
+ * Bump up runtime PM usage counter otherwise dev->power.needs_force_resume will
+ * not be marked as 1, pm_runtime_force_resume() will go out directly.
+ */
+ if (sdio_irq_claimed(mmc) && host->pins_eint)
+ pm_runtime_get_noresume(dev);
+
return pm_runtime_force_suspend(dev);
}
static int __maybe_unused msdc_resume(struct device *dev)
{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msdc_host *host = mmc_priv(mmc);
+
+ if (sdio_irq_claimed(mmc) && host->pins_eint)
+ pm_runtime_put_noidle(dev);
+
return pm_runtime_force_resume(dev);
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index de04b5afef2e..2cf0413407ea 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -923,7 +923,7 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
* One way to prevent this is to only allow 1-bit transfers.
*/
- if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)
+ if (is_imx31_mmc(mxcmci) && mmc_card_sdio(card))
host->caps &= ~MMC_CAP_4_BIT_DATA;
else
host->caps |= MMC_CAP_4_BIT_DATA;
@@ -1025,7 +1025,7 @@ static int mxcmci_probe(struct platform_device *pdev)
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- host->devtype = (enum mxcmci_type)of_device_get_match_data(&pdev->dev);
+ host->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
/* adjust max_segs after devtype detection */
if (!is_mpc512x_mmc(host))
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0db9490dc659..e4003f6058eb 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -648,7 +648,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_of_init(pdev, mmc);
if (ret)
- return ret;
+ goto out;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -672,7 +672,7 @@ static int pxamci_probe(struct platform_device *pdev)
ret = pxamci_init_ocr(host);
if (ret < 0)
- return ret;
+ goto out;
mmc->caps = 0;
host->cmdat = 0;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 1a1e3e020a8c..c4abfee1ebae 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -43,6 +43,7 @@ struct renesas_sdhi_quirks {
bool hs400_4taps;
bool fixed_addr_mode;
bool dma_one_rx_only;
+ bool manual_tap_correction;
u32 hs400_bad_taps;
const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX];
};
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 4404ca1f98d8..6edbf5c161ab 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -49,9 +49,6 @@
#define HOST_MODE_GEN3_32BIT (HOST_MODE_GEN3_WMODE | HOST_MODE_GEN3_BUSWIDTH)
#define HOST_MODE_GEN3_64BIT 0
-#define CTL_SDIF_MODE 0xe6
-#define SDIF_MODE_HS400 BIT(0)
-
#define SDHI_VER_GEN2_SDR50 0x490c
#define SDHI_VER_RZ_A1 0x820b
/* very old datasheets said 0x490c for SDR104, too. They are wrong! */
@@ -383,8 +380,7 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF,
priv->scc_tappos_hs400);
- /* Gen3 can't do automatic tap correction with HS400, so disable it */
- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC)
+ if (priv->quirks && priv->quirks->manual_tap_correction)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
@@ -562,23 +558,25 @@ static void renesas_sdhi_scc_reset(struct tmio_mmc_host *host, struct renesas_sd
}
/* only populated for TMIO_MMC_MIN_RCAR2 */
-static void renesas_sdhi_reset(struct tmio_mmc_host *host)
+static void renesas_sdhi_reset(struct tmio_mmc_host *host, bool preserve)
{
struct renesas_sdhi *priv = host_to_priv(host);
int ret;
u16 val;
- if (priv->rstc) {
- reset_control_reset(priv->rstc);
- /* Unknown why but without polling reset status, it will hang */
- read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
- false, priv->rstc);
- /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
- sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
- priv->needs_adjust_hs400 = false;
- renesas_sdhi_set_clock(host, host->clk_cache);
- } else if (priv->scc_ctl) {
- renesas_sdhi_scc_reset(host, priv);
+ if (!preserve) {
+ if (priv->rstc) {
+ reset_control_reset(priv->rstc);
+ /* Unknown why but without polling reset status, it will hang */
+ read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
+ false, priv->rstc);
+ /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
+ priv->needs_adjust_hs400 = false;
+ renesas_sdhi_set_clock(host, host->clk_cache);
+ } else if (priv->scc_ctl) {
+ renesas_sdhi_scc_reset(host, priv);
+ }
}
if (sd_ctrl_read16(host, CTL_VERSION) >= SDHI_VER_GEN3_SD) {
@@ -719,7 +717,7 @@ static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
/* Change TAP position according to correction status */
- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN3_SDMMC &&
+ if (priv->quirks && priv->quirks->manual_tap_correction &&
host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0;
/*
@@ -938,6 +936,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(priv->clk_cd))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cd), "cannot get cd clock");
+ priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return PTR_ERR(priv->rstc);
+
priv->pinctrl = devm_pinctrl_get(&pdev->dev);
if (!IS_ERR(priv->pinctrl)) {
priv->pins_default = pinctrl_lookup_state(priv->pinctrl,
@@ -1030,10 +1032,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ret)
goto efree;
- priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
- if (IS_ERR(priv->rstc))
- return PTR_ERR(priv->rstc);
-
ver = sd_ctrl_read16(host, CTL_VERSION);
/* GEN2_SDR104 is first known SDHI to use 32bit block count */
if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 3084b15ae2cb..42937596c4c4 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -170,6 +170,7 @@ static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400_one_rx = {
static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
@@ -182,25 +183,30 @@ static const struct renesas_sdhi_quirks sdhi_quirks_fixed_addr = {
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
.hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a7796_es13_calib_table,
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a77965_calib_table,
+ .manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
.hs400_calib_table = r8a77990_calib_table,
+ .manual_tap_correction = true,
};
/*
@@ -268,6 +274,7 @@ static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
{ .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
@@ -321,7 +328,7 @@ renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host)
}
/*
- * renesas_sdhi_internal_dmac_map() will be called with two difference
+ * renesas_sdhi_internal_dmac_map() will be called with two different
* sg pointers in two mmc_data by .pre_req(), but tmio host can have a single
* sg_ptr only. So, renesas_sdhi_internal_dmac_{un}map() should use a sg
* pointer in a mmc_data instead of host->sg_ptr.
@@ -355,7 +362,7 @@ renesas_sdhi_internal_dmac_map(struct tmio_mmc_host *host,
data->host_cookie = cookie;
- /* This DMAC cannot handle if buffer is not 128-bytes alignment */
+ /* This DMAC needs buffers to be 128-byte aligned */
if (!IS_ALIGNED(sg_dma_address(data->sg), 128)) {
renesas_sdhi_internal_dmac_unmap(host, data, cookie);
return false;
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 8eb57de48e0c..aff36a933ebe 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -31,6 +31,8 @@
struct sdhci_brcmstb_priv {
void __iomem *cfg_regs;
unsigned int flags;
+ struct clk *base_clk;
+ u32 base_freq_hz;
};
struct brcmstb_match_priv {
@@ -250,9 +252,11 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
const struct of_device_id *match;
struct sdhci_brcmstb_priv *priv;
+ u32 actual_clock_mhz;
struct sdhci_host *host;
struct resource *iomem;
struct clk *clk;
+ struct clk *base_clk = NULL;
int res;
match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
@@ -330,6 +334,35 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ /* Change the base clock frequency if the DT property exists */
+ if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ &priv->base_freq_hz) != 0)
+ goto add_host;
+
+ base_clk = devm_clk_get_optional(&pdev->dev, "sdio_freq");
+ if (IS_ERR(base_clk)) {
+ dev_warn(&pdev->dev, "Clock for \"sdio_freq\" not found\n");
+ goto add_host;
+ }
+
+ res = clk_prepare_enable(base_clk);
+ if (res)
+ goto err;
+
+ /* set improved clock rate */
+ clk_set_rate(base_clk, priv->base_freq_hz);
+ actual_clock_mhz = clk_get_rate(base_clk) / 1000000;
+
+ host->caps &= ~SDHCI_CLOCK_V3_BASE_MASK;
+ host->caps |= (actual_clock_mhz << SDHCI_CLOCK_BASE_SHIFT);
+ /* Disable presets because they are now incorrect */
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
+ dev_dbg(&pdev->dev, "Base Clock Frequency changed to %dMHz\n",
+ actual_clock_mhz);
+ priv->base_clk = base_clk;
+
+add_host:
res = sdhci_brcmstb_add_host(host, priv);
if (res)
goto err;
@@ -340,6 +373,7 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
err:
sdhci_pltfm_free(pdev);
err_clk:
+ clk_disable_unprepare(base_clk);
clk_disable_unprepare(clk);
return res;
}
@@ -351,11 +385,51 @@ static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_brcmstb_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
+ clk_disable_unprepare(priv->base_clk);
+ return sdhci_pltfm_suspend(dev);
+}
+
+static int sdhci_brcmstb_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = sdhci_pltfm_resume(dev);
+ if (!ret && priv->base_freq_hz) {
+ ret = clk_prepare_enable(priv->base_clk);
+ /*
+ * Note: using clk_get_rate() below as clk_get_rate()
+ * honors CLK_GET_RATE_NOCACHE attribute, but clk_set_rate()
+ * may do implicit get_rate() calls that do not honor
+ * CLK_GET_RATE_NOCACHE.
+ */
+ if (!ret &&
+ (clk_get_rate(priv->base_clk) != priv->base_freq_hz))
+ ret = clk_set_rate(priv->base_clk, priv->base_freq_hz);
+ }
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops sdhci_brcmstb_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_brcmstb_suspend, sdhci_brcmstb_resume)
+};
+
static struct platform_driver sdhci_brcmstb_driver = {
.driver = {
.name = "sdhci-brcmstb",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_pltfm_pmops,
+ .pm = &sdhci_brcmstb_pmops,
.of_match_table = of_match_ptr(sdhci_brcm_of_match),
},
.probe = sdhci_brcmstb_probe,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e395411fb6fd..dc2991422a87 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2435,33 +2435,12 @@ static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
};
static const struct of_device_id sdhci_msm_dt_match[] = {
- /* Following two entries are deprecated (kept only for backward compatibility) */
- {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
- /* Add entries for sdcc versions less than 5.0 here */
- {.compatible = "qcom,apq8084-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8226-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8916-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8953-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8974-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8992-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8994-sdhci", .data = &sdhci_msm_mci_var},
- {.compatible = "qcom,msm8996-sdhci", .data = &sdhci_msm_mci_var},
/*
- * Add entries for sdcc version 5.0 here. For SDCC version 5.0.0,
- * MCI registers are removed from SDCC interface and some registers
- * are moved to HC.
+ * Do not add new variants to the driver which are compatible with
+ * generic ones, unless they need customization.
*/
- {.compatible = "qcom,qcs404-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sdx55-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sdx65-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sdm630-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm6125-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm6350-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm8150-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sm8250-sdhci", .data = &sdhci_msm_v5_var},
- {.compatible = "qcom,sc7280-sdhci", .data = &sdhci_msm_v5_var},
- /* Add entries where soc specific handling is required, here */
+ {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
+ {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
{.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
{},
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 757801dfc308..3997cad1f793 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -1733,7 +1733,6 @@ err_pltfm_free:
static int sdhci_arasan_remove(struct platform_device *pdev)
{
- int ret;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
@@ -1747,11 +1746,11 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
sdhci_arasan_unregister_sdclk(&pdev->dev);
- ret = sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_unregister(pdev);
clk_disable_unprepare(clk_ahb);
- return ret;
+ return 0;
}
static struct platform_driver sdhci_arasan_driver = {
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 10fb4cb2c731..cd0134580a90 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -100,8 +100,13 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
- if (timing == MMC_TIMING_MMC_DDR52)
- sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+ u8 mc1r;
+
+ if (timing == MMC_TIMING_MMC_DDR52) {
+ mc1r = sdhci_readb(host, SDMMC_MC1R);
+ mc1r |= SDMMC_MC1R_DDR;
+ sdhci_writeb(host, mc1r, SDMMC_MC1R);
+ }
sdhci_set_uhs_signaling(host, timing);
}
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index bac874ab0b33..a7343d4bc50e 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
#include "sdhci-pltfm.h"
@@ -30,6 +31,7 @@
/* Offset inside the vendor area 1 */
#define DWCMSHC_HOST_CTRL3 0x8
#define DWCMSHC_EMMC_CONTROL 0x2c
+#define DWCMSHC_CARD_IS_EMMC BIT(0)
#define DWCMSHC_ENHANCED_STROBE BIT(8)
#define DWCMSHC_EMMC_ATCTRL 0x40
@@ -38,7 +40,7 @@
#define DWCMSHC_EMMC_DLL_RXCLK 0x804
#define DWCMSHC_EMMC_DLL_TXCLK 0x808
#define DWCMSHC_EMMC_DLL_STRBIN 0x80c
-#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
+#define DECMSHC_EMMC_DLL_CMDOUT 0x810
#define DWCMSHC_EMMC_DLL_STATUS0 0x840
#define DWCMSHC_EMMC_DLL_START BIT(0)
#define DWCMSHC_EMMC_DLL_LOCKED BIT(8)
@@ -47,22 +49,39 @@
#define DWCMSHC_EMMC_DLL_START_POINT 16
#define DWCMSHC_EMMC_DLL_INC 8
#define DWCMSHC_EMMC_DLL_DLYENA BIT(27)
-#define DLL_TXCLK_TAPNUM_DEFAULT 0x8
-#define DLL_STRBIN_TAPNUM_DEFAULT 0x8
+#define DLL_TXCLK_TAPNUM_DEFAULT 0x10
+#define DLL_TXCLK_TAPNUM_90_DEGREES 0xA
#define DLL_TXCLK_TAPNUM_FROM_SW BIT(24)
+#define DLL_STRBIN_TAPNUM_DEFAULT 0x8
+#define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
+#define DLL_STRBIN_DELAY_NUM_SEL BIT(26)
+#define DLL_STRBIN_DELAY_NUM_OFFSET 16
+#define DLL_STRBIN_DELAY_NUM_DEFAULT 0x16
#define DLL_RXCLK_NO_INVERTER 1
#define DLL_RXCLK_INVERTER 0
+#define DLL_CMDOUT_TAPNUM_90_DEGREES 0x8
+#define DLL_CMDOUT_TAPNUM_FROM_SW BIT(24)
+#define DLL_CMDOUT_SRC_CLK_NEG BIT(28)
+#define DLL_CMDOUT_EN_SRC_CLK_NEG BIT(29)
+
#define DLL_LOCK_WO_TMOUT(x) \
((((x) & DWCMSHC_EMMC_DLL_LOCKED) == DWCMSHC_EMMC_DLL_LOCKED) && \
(((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0))
-#define RK3568_MAX_CLKS 3
+#define RK35xx_MAX_CLKS 3
#define BOUNDARY_OK(addr, len) \
((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
-struct rk3568_priv {
+enum dwcmshc_rk_type {
+ DWCMSHC_RK3568,
+ DWCMSHC_RK3588,
+};
+
+struct rk35xx_priv {
/* Rockchip specified optional clocks */
- struct clk_bulk_data rockchip_clks[RK3568_MAX_CLKS];
+ struct clk_bulk_data rockchip_clks[RK35xx_MAX_CLKS];
+ struct reset_control *reset;
+ enum dwcmshc_rk_type devtype;
u8 txclk_tapnum;
};
@@ -131,7 +150,9 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
- u16 ctrl_2;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u16 ctrl, ctrl_2;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
@@ -149,8 +170,15 @@ static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
else if ((timing == MMC_TIMING_UHS_DDR50) ||
(timing == MMC_TIMING_MMC_DDR52))
ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
- else if (timing == MMC_TIMING_MMC_HS400)
+ else if (timing == MMC_TIMING_MMC_HS400) {
+ /* set CARD_IS_EMMC bit to enable Data Strobe for HS400 */
+ ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+ ctrl |= DWCMSHC_CARD_IS_EMMC;
+ sdhci_writew(host, ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+
ctrl_2 |= DWCMSHC_CTRL_HS400;
+ }
+
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
@@ -176,24 +204,18 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *priv = dwc_priv->priv;
+ struct rk35xx_priv *priv = dwc_priv->priv;
u8 txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
u32 extra, reg;
int err;
host->mmc->actual_clock = 0;
- /*
- * DO NOT TOUCH THIS SETTING. RX clk inverter unit is enabled
- * by default, but it shouldn't be enabled. We should anyway
- * disable it before issuing any cmds.
- */
- extra = DWCMSHC_EMMC_DLL_DLYENA |
- DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
- sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
-
- if (clock == 0)
+ if (clock == 0) {
+ /* Disable interface clock at initial state. */
+ sdhci_set_clock(host, clock);
return;
+ }
/* Rockchip platform only support 375KHz for identify mode */
if (clock <= 400000)
@@ -211,9 +233,21 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
extra &= ~BIT(0);
sdhci_writel(host, extra, reg);
- if (clock <= 400000) {
- /* Disable DLL to reset sample clock */
+ if (clock <= 52000000) {
+ /* Disable DLL and reset both of sample and drive clock */
sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_CTRL);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_RXCLK);
+ sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
+ sdhci_writel(host, 0, DECMSHC_EMMC_DLL_CMDOUT);
+ /*
+ * Before switching to hs400es mode, the driver will enable
+ * enhanced strobe first. PHY needs to configure the parameters
+ * of enhanced strobe first.
+ */
+ extra = DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_STRBIN_DELAY_NUM_SEL |
+ DLL_STRBIN_DELAY_NUM_DEFAULT << DLL_STRBIN_DELAY_NUM_OFFSET;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
return;
}
@@ -222,6 +256,15 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
udelay(1);
sdhci_writel(host, 0x0, DWCMSHC_EMMC_DLL_CTRL);
+ /*
+ * We shouldn't set DLL_RXCLK_NO_INVERTER for identify mode but
+ * we must set it in higher speed mode.
+ */
+ extra = DWCMSHC_EMMC_DLL_DLYENA;
+ if (priv->devtype == DWCMSHC_RK3568)
+ extra |= DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
+
/* Init DLL settings */
extra = 0x5 << DWCMSHC_EMMC_DLL_START_POINT |
0x2 << DWCMSHC_EMMC_DLL_INC |
@@ -244,8 +287,20 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
host->mmc->ios.timing == MMC_TIMING_MMC_HS400)
txclk_tapnum = priv->txclk_tapnum;
+ if ((priv->devtype == DWCMSHC_RK3588) && host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
+ txclk_tapnum = DLL_TXCLK_TAPNUM_90_DEGREES;
+
+ extra = DLL_CMDOUT_SRC_CLK_NEG |
+ DLL_CMDOUT_EN_SRC_CLK_NEG |
+ DWCMSHC_EMMC_DLL_DLYENA |
+ DLL_CMDOUT_TAPNUM_90_DEGREES |
+ DLL_CMDOUT_TAPNUM_FROM_SW;
+ sdhci_writel(host, extra, DECMSHC_EMMC_DLL_CMDOUT);
+ }
+
extra = DWCMSHC_EMMC_DLL_DLYENA |
DLL_TXCLK_TAPNUM_FROM_SW |
+ DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL |
txclk_tapnum;
sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_TXCLK);
@@ -255,6 +310,21 @@ static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock
sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
}
+static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
+ struct rk35xx_priv *priv = dwc_priv->priv;
+
+ if (mask & SDHCI_RESET_ALL && priv->reset) {
+ reset_control_assert(priv->reset);
+ udelay(1);
+ reset_control_deassert(priv->reset);
+ }
+
+ sdhci_reset(host, mask);
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -264,12 +334,12 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
.adma_write_desc = dwcmshc_adma_write_desc,
};
-static const struct sdhci_ops sdhci_dwcmshc_rk3568_ops = {
+static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = {
.set_clock = dwcmshc_rk3568_set_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = dwcmshc_set_uhs_signaling,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
- .reset = sdhci_reset,
+ .reset = rk35xx_sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
};
@@ -279,30 +349,46 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
-static const struct sdhci_pltfm_data sdhci_dwcmshc_rk3568_pdata = {
- .ops = &sdhci_dwcmshc_rk3568_ops,
+#ifdef CONFIG_ACPI
+static const struct sdhci_pltfm_data sdhci_dwcmshc_bf3_pdata = {
+ .ops = &sdhci_dwcmshc_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_ACMD23_BROKEN,
+};
+#endif
+
+static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
+ .ops = &sdhci_dwcmshc_rk35xx_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
-static int dwcmshc_rk3568_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
{
int err;
- struct rk3568_priv *priv = dwc_priv->priv;
+ struct rk35xx_priv *priv = dwc_priv->priv;
+
+ priv->reset = devm_reset_control_array_get_optional_exclusive(mmc_dev(host->mmc));
+ if (IS_ERR(priv->reset)) {
+ err = PTR_ERR(priv->reset);
+ dev_err(mmc_dev(host->mmc), "failed to get reset control %d\n", err);
+ return err;
+ }
priv->rockchip_clks[0].id = "axi";
priv->rockchip_clks[1].id = "block";
priv->rockchip_clks[2].id = "timer";
- err = devm_clk_bulk_get_optional(mmc_dev(host->mmc), RK3568_MAX_CLKS,
+ err = devm_clk_bulk_get_optional(mmc_dev(host->mmc), RK35xx_MAX_CLKS,
priv->rockchip_clks);
if (err) {
dev_err(mmc_dev(host->mmc), "failed to get clocks %d\n", err);
return err;
}
- err = clk_bulk_prepare_enable(RK3568_MAX_CLKS, priv->rockchip_clks);
+ err = clk_bulk_prepare_enable(RK35xx_MAX_CLKS, priv->rockchip_clks);
if (err) {
dev_err(mmc_dev(host->mmc), "failed to enable clocks %d\n", err);
return err;
@@ -321,10 +407,28 @@ static int dwcmshc_rk3568_init(struct sdhci_host *host, struct dwcmshc_priv *dwc
return 0;
}
+static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+{
+ /*
+ * Don't support highspeed bus mode with low clk speed as we
+ * cannot use DLL for this condition.
+ */
+ if (host->mmc->f_max <= 52000000) {
+ dev_info(mmc_dev(host->mmc), "Disabling HS200/HS400, frequency too low (%d)\n",
+ host->mmc->f_max);
+ host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400);
+ host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR);
+ }
+}
+
static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
{
+ .compatible = "rockchip,rk3588-dwcmshc",
+ .data = &sdhci_dwcmshc_rk35xx_pdata,
+ },
+ {
.compatible = "rockchip,rk3568-dwcmshc",
- .data = &sdhci_dwcmshc_rk3568_pdata,
+ .data = &sdhci_dwcmshc_rk35xx_pdata,
},
{
.compatible = "snps,dwcmshc-sdhci",
@@ -336,7 +440,10 @@ MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id sdhci_dwcmshc_acpi_ids[] = {
- { .id = "MLNXBF30" },
+ {
+ .id = "MLNXBF30",
+ .driver_data = (kernel_ulong_t)&sdhci_dwcmshc_bf3_pdata,
+ },
{}
};
#endif
@@ -347,12 +454,12 @@ static int dwcmshc_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
struct dwcmshc_priv *priv;
- struct rk3568_priv *rk_priv = NULL;
+ struct rk35xx_priv *rk_priv = NULL;
const struct sdhci_pltfm_data *pltfm_data;
int err;
u32 extra;
- pltfm_data = of_device_get_match_data(&pdev->dev);
+ pltfm_data = device_get_match_data(&pdev->dev);
if (!pltfm_data) {
dev_err(&pdev->dev, "Error: No device match data found\n");
return -ENODEV;
@@ -402,33 +509,47 @@ static int dwcmshc_probe(struct platform_device *pdev)
host->mmc_host_ops.request = dwcmshc_request;
host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe;
- if (pltfm_data == &sdhci_dwcmshc_rk3568_pdata) {
- rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk3568_priv), GFP_KERNEL);
+ if (pltfm_data == &sdhci_dwcmshc_rk35xx_pdata) {
+ rk_priv = devm_kzalloc(&pdev->dev, sizeof(struct rk35xx_priv), GFP_KERNEL);
if (!rk_priv) {
err = -ENOMEM;
goto err_clk;
}
+ if (of_device_is_compatible(pdev->dev.of_node, "rockchip,rk3588-dwcmshc"))
+ rk_priv->devtype = DWCMSHC_RK3588;
+ else
+ rk_priv->devtype = DWCMSHC_RK3568;
+
priv->priv = rk_priv;
- err = dwcmshc_rk3568_init(host, priv);
+ err = dwcmshc_rk35xx_init(host, priv);
if (err)
goto err_clk;
}
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- err = sdhci_add_host(host);
+ err = sdhci_setup_host(host);
if (err)
goto err_clk;
+ if (rk_priv)
+ dwcmshc_rk35xx_postinit(host, priv);
+
+ err = __sdhci_add_host(host);
+ if (err)
+ goto err_setup_host;
+
return 0;
+err_setup_host:
+ sdhci_cleanup_host(host);
err_clk:
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
+ clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
free_pltfm:
sdhci_pltfm_free(pdev);
@@ -440,14 +561,14 @@ static int dwcmshc_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
+ struct rk35xx_priv *rk_priv = priv->priv;
sdhci_remove_host(host, 0);
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
+ clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
sdhci_pltfm_free(pdev);
@@ -460,7 +581,7 @@ static int dwcmshc_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
+ struct rk35xx_priv *rk_priv = priv->priv;
int ret;
ret = sdhci_suspend_host(host);
@@ -472,7 +593,7 @@ static int dwcmshc_suspend(struct device *dev)
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
- clk_bulk_disable_unprepare(RK3568_MAX_CLKS,
+ clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
return ret;
@@ -483,7 +604,7 @@ static int dwcmshc_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- struct rk3568_priv *rk_priv = priv->priv;
+ struct rk35xx_priv *rk_priv = priv->priv;
int ret;
ret = clk_prepare_enable(pltfm_host->clk);
@@ -497,7 +618,7 @@ static int dwcmshc_resume(struct device *dev)
}
if (rk_priv) {
- ret = clk_bulk_prepare_enable(RK3568_MAX_CLKS,
+ ret = clk_bulk_prepare_enable(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
if (ret)
return ret;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d9dc41143bb3..e0266638381d 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -904,6 +904,7 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
scfg_node = of_find_matching_node(NULL, scfg_device_ids);
if (scfg_node)
scfg_base = of_iomap(scfg_node, 0);
+ of_node_put(scfg_node);
if (scfg_base) {
sdhciovselcr = SDHCIOVSELCR_TGLEN |
SDHCIOVSELCR_VSELVAL;
@@ -1418,7 +1419,7 @@ static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
static int sdhci_esdhc_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
- struct device_node *np;
+ struct device_node *np, *tp;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_esdhc *esdhc;
int ret;
@@ -1463,7 +1464,9 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
if (esdhc->vendor_ver > VENDOR_V_22)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
- if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
+ tp = of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc");
+ if (tp) {
+ of_node_put(tp);
host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index f13c08db3da5..4d509f656188 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -95,6 +95,9 @@
#define PCIE_GLI_9763E_SCR 0x8E0
#define GLI_9763E_SCR_AXI_REQ BIT(9)
+#define PCIE_GLI_9763E_CFG 0x8A0
+#define GLI_9763E_CFG_LPSN_DIS BIT(12)
+
#define PCIE_GLI_9763E_CFG2 0x8A4
#define GLI_9763E_CFG2_L1DLY GENMASK(28, 19)
#define GLI_9763E_CFG2_L1DLY_MID 0x54
@@ -963,12 +966,40 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
}
#ifdef CONFIG_PM
+static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
+{
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
+
+ if (enable)
+ value &= ~GLI_9763E_CFG_LPSN_DIS;
+ else
+ value |= GLI_9763E_CFG_LPSN_DIS;
+
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+}
+
static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
struct sdhci_host *host = slot->host;
u16 clock;
+ /* Enable LPM negotiation to allow entering L1 state */
+ gl9763e_set_low_power_negotiation(slot, true);
+
clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clock &= ~(SDHCI_CLOCK_PLL_EN | SDHCI_CLOCK_CARD_EN);
sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
@@ -1002,6 +1033,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
clock |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
+ /* Disable LPM negotiation to avoid entering L1 state. */
+ gl9763e_set_low_power_negotiation(slot, false);
+
return 0;
}
#endif
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index d41582c21aa3..6415916fbd91 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -440,15 +440,14 @@ static int sdhci_st_remove(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
struct reset_control *rstc = pdata->rstc;
- int ret;
- ret = sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_unregister(pdev);
clk_disable_unprepare(pdata->icnclk);
reset_control_assert(rstc);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 22152029e14c..7689ffec5ad1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -224,6 +224,7 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
if (timedout) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -1716,6 +1717,7 @@ static bool sdhci_send_command_retry(struct sdhci_host *host,
if (!timeout--) {
pr_err("%s: Controller never released inhibit bit(s).\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
cmd->error = -EIO;
return false;
@@ -1965,6 +1967,7 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -1987,6 +1990,7 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
if (timedout) {
pr_err("%s: PLL clock never stabilised.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
return;
}
@@ -3161,6 +3165,7 @@ static void sdhci_timeout_timer(struct timer_list *t)
if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, REQ_TIMEOUT);
sdhci_dumpregs(host);
host->cmd->error = -ETIMEDOUT;
@@ -3183,6 +3188,7 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
(host->cmd && sdhci_data_line_cmd(host->cmd))) {
pr_err("%s: Timeout waiting for hardware interrupt.\n",
mmc_hostname(host->mmc));
+ sdhci_err_stats_inc(host, REQ_TIMEOUT);
sdhci_dumpregs(host);
if (host->data) {
@@ -3234,17 +3240,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
return;
pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
return;
}
if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
- if (intmask & SDHCI_INT_TIMEOUT)
+ if (intmask & SDHCI_INT_TIMEOUT) {
host->cmd->error = -ETIMEDOUT;
- else
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
+ } else {
host->cmd->error = -EILSEQ;
-
+ if (!mmc_op_tuning(host->cmd->opcode))
+ sdhci_err_stats_inc(host, CMD_CRC);
+ }
/* Treat data command CRC error the same as data CRC error */
if (host->cmd->data &&
(intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
@@ -3266,6 +3276,8 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
-ETIMEDOUT :
-EILSEQ;
+ sdhci_err_stats_inc(host, AUTO_CMD);
+
if (sdhci_auto_cmd23(host, mrq)) {
mrq->sbc->error = err;
__sdhci_finish_mrq(host, mrq);
@@ -3342,6 +3354,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
host->data_cmd = NULL;
data_cmd->error = -ETIMEDOUT;
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
__sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
@@ -3370,23 +3383,30 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
return;
}
- if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ if (intmask & SDHCI_INT_DATA_TIMEOUT) {
host->data->error = -ETIMEDOUT;
- else if (intmask & SDHCI_INT_DATA_END_BIT)
+ sdhci_err_stats_inc(host, DAT_TIMEOUT);
+ } else if (intmask & SDHCI_INT_DATA_END_BIT) {
host->data->error = -EILSEQ;
- else if ((intmask & SDHCI_INT_DATA_CRC) &&
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if ((intmask & SDHCI_INT_DATA_CRC) &&
SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
- != MMC_BUS_TEST_R)
+ != MMC_BUS_TEST_R) {
host->data->error = -EILSEQ;
- else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
intmask);
sdhci_adma_show_error(host);
+ sdhci_err_stats_inc(host, ADMA);
host->data->error = -EIO;
if (host->ops->adma_workaround)
host->ops->adma_workaround(host, intmask);
@@ -3584,6 +3604,7 @@ out:
if (unexpected) {
pr_err("%s: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), unexpected);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
}
@@ -3905,20 +3926,27 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (!host->cqe_on)
return false;
- if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
+ if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
*cmd_error = -EILSEQ;
- else if (intmask & SDHCI_INT_TIMEOUT)
+ if (!mmc_op_tuning(host->cmd->opcode))
+ sdhci_err_stats_inc(host, CMD_CRC);
+ } else if (intmask & SDHCI_INT_TIMEOUT) {
*cmd_error = -ETIMEDOUT;
- else
+ sdhci_err_stats_inc(host, CMD_TIMEOUT);
+ } else
*cmd_error = 0;
- if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
+ if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
*data_error = -EILSEQ;
- else if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ if (!mmc_op_tuning(host->cmd->opcode))
+ sdhci_err_stats_inc(host, DAT_CRC);
+ } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
*data_error = -ETIMEDOUT;
- else if (intmask & SDHCI_INT_ADMA_ERROR)
+ sdhci_err_stats_inc(host, DAT_TIMEOUT);
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
*data_error = -EIO;
- else
+ sdhci_err_stats_inc(host, ADMA);
+ } else
*data_error = 0;
/* Clear selected interrupts. */
@@ -3934,6 +3962,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
sdhci_writel(host, intmask, SDHCI_INT_STATUS);
pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), intmask);
+ sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
sdhci_dumpregs(host);
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index d7929d725730..95a08f09df30 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -356,6 +356,9 @@ struct sdhci_adma2_64_desc {
*/
#define MMC_CMD_TRANSFER_TIME (10 * NSEC_PER_MSEC) /* max 10 ms */
+#define sdhci_err_stats_inc(host, err_name) \
+ mmc_debugfs_err_stats_inc((host)->mmc, MMC_ERR_##err_name)
+
enum sdhci_cookie {
COOKIE_UNMAPPED,
COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index b55a29c53d9c..53a2ad9a24b8 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -75,7 +75,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
tmio_mmc_clk_start(host);
}
-static void tmio_mmc_reset(struct tmio_mmc_host *host)
+static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
{
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
usleep_range(10000, 11000);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index e754bb3f5c32..501613c74406 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -42,6 +42,7 @@
#define CTL_DMA_ENABLE 0xd8
#define CTL_RESET_SD 0xe0
#define CTL_VERSION 0xe2
+#define CTL_SDIF_MODE 0xe6 /* only known on R-Car 2+ */
/* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */
#define TMIO_STOP_STP BIT(0)
@@ -98,6 +99,9 @@
/* Definitions for values the CTL_DMA_ENABLE register can take */
#define DMA_ENABLE_DMASDRW BIT(1)
+/* Definitions for values the CTL_SDIF_MODE register can take */
+#define SDIF_MODE_HS400 BIT(0) /* only known on R-Car 2+ */
+
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
@@ -181,7 +185,7 @@ struct tmio_mmc_host {
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
- void (*reset)(struct tmio_mmc_host *host);
+ void (*reset)(struct tmio_mmc_host *host, bool preserve);
bool (*check_retune)(struct tmio_mmc_host *host, struct mmc_request *mrq);
void (*fixup_request)(struct tmio_mmc_host *host, struct mmc_request *mrq);
unsigned int (*get_timeout_cycles)(struct tmio_mmc_host *host);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index a5850d83908b..437048bb8027 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -179,8 +179,17 @@ static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
}
-static void tmio_mmc_reset(struct tmio_mmc_host *host)
+static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
{
+ u16 card_opt, clk_ctrl, sdif_mode;
+
+ if (preserve) {
+ card_opt = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT);
+ clk_ctrl = sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL);
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ sdif_mode = sd_ctrl_read16(host, CTL_SDIF_MODE);
+ }
+
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
usleep_range(10000, 11000);
@@ -190,7 +199,7 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
tmio_mmc_abort_dma(host);
if (host->reset)
- host->reset(host);
+ host->reset(host, preserve);
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
host->sdcard_irq_mask = host->sdcard_irq_mask_all;
@@ -206,6 +215,13 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
}
+ if (preserve) {
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, card_opt);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk_ctrl);
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ sd_ctrl_write16(host, CTL_SDIF_MODE, sdif_mode);
+ }
+
if (host->mmc->card)
mmc_retune_needed(host->mmc);
}
@@ -248,7 +264,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
spin_unlock_irqrestore(&host->lock, flags);
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, true);
/* Ready for new calls */
host->mrq = NULL;
@@ -961,7 +977,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
tmio_mmc_power_off(host);
/* For R-Car Gen2+, we need to reset SDHI specific SCC */
if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, false);
host->set_clock(host, 0);
break;
@@ -1189,7 +1205,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
_host->sdcard_irq_mask_all = TMIO_MASK_ALL;
_host->set_clock(_host, 0);
- tmio_mmc_reset(_host);
+ tmio_mmc_reset(_host, false);
spin_lock_init(&_host->lock);
mutex_init(&_host->ios_lock);
@@ -1285,7 +1301,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_clk_enable(host);
- tmio_mmc_reset(host);
+ tmio_mmc_reset(host, false);
if (host->clk_cache)
host->set_clock(host, host->clk_cache);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 134e27328597..25bad4318305 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -112,6 +112,13 @@ static const struct of_device_id dataflash_dt_ids[] = {
MODULE_DEVICE_TABLE(of, dataflash_dt_ids);
#endif
+static const struct spi_device_id dataflash_spi_ids[] = {
+ { .name = "at45", },
+ { .name = "dataflash", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, dataflash_spi_ids);
+
/* ......................................................................... */
/*
@@ -936,6 +943,7 @@ static struct spi_driver dataflash_driver = {
.probe = dataflash_probe,
.remove = dataflash_remove,
+ .id_table = dataflash_spi_ids,
/* FIXME: investigate suspend and resume... */
};
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 6950a8764815..36e060386e59 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -270,7 +270,9 @@ static int powernv_flash_release(struct platform_device *pdev)
struct powernv_flash *data = dev_get_drvdata(&(pdev->dev));
/* All resources should be freed automatically */
- return mtd_device_unregister(&(data->mtd));
+ WARN_ON(mtd_device_unregister(&data->mtd));
+
+ return 0;
}
static const struct of_device_id powernv_flash_match[] = {
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 24073518587f..f58742486d3d 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -1045,13 +1045,9 @@ static int spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
struct spear_snor_flash *flash;
- int ret, i;
+ int i;
dev = platform_get_drvdata(pdev);
- if (!dev) {
- dev_err(&pdev->dev, "dev is null\n");
- return -ENODEV;
- }
/* clean up for all nor flash */
for (i = 0; i < dev->num_flashes; i++) {
@@ -1060,9 +1056,7 @@ static int spear_smi_remove(struct platform_device *pdev)
continue;
/* clean up mtd stuff */
- ret = mtd_device_unregister(&flash->mtd);
- if (ret)
- dev_err(&pdev->dev, "error removing mtd\n");
+ WARN_ON(mtd_device_unregister(&flash->mtd));
}
clk_disable_unprepare(dev->clk);
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index d3377b10fc0f..54861d889c30 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2084,15 +2084,12 @@ static int stfsm_probe(struct platform_device *pdev)
* Configure READ/WRITE/ERASE sequences according to platform and
* device flags.
*/
- if (info->config) {
+ if (info->config)
ret = info->config(fsm);
- if (ret)
- goto err_clk_unprepare;
- } else {
+ else
ret = stfsm_prepare_rwe_seqs_default(fsm);
- if (ret)
- goto err_clk_unprepare;
- }
+ if (ret)
+ goto err_clk_unprepare;
fsm->mtd.name = info->name;
fsm->mtd.dev.parent = &pdev->dev;
@@ -2115,10 +2112,12 @@ static int stfsm_probe(struct platform_device *pdev)
(long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
- return mtd_device_register(&fsm->mtd, NULL, 0);
-
+ ret = mtd_device_register(&fsm->mtd, NULL, 0);
+ if (ret) {
err_clk_unprepare:
- clk_disable_unprepare(fsm->clk);
+ clk_disable_unprepare(fsm->clk);
+ }
+
return ret;
}
@@ -2126,9 +2125,11 @@ static int stfsm_remove(struct platform_device *pdev)
{
struct stfsm *fsm = platform_get_drvdata(pdev);
+ WARN_ON(mtd_device_unregister(&fsm->mtd));
+
clk_disable_unprepare(fsm->clk);
- return mtd_device_unregister(&fsm->mtd);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index a3439b791eeb..a6161ce340d4 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -233,16 +233,16 @@ static int am654_hbmc_remove(struct platform_device *pdev)
{
struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
- int ret;
- ret = hyperbus_unregister_device(&priv->hbdev);
+ hyperbus_unregister_device(&priv->hbdev);
+
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
if (dev_priv->rx_chan)
dma_release_channel(dev_priv->rx_chan);
- return ret;
+ return 0;
}
static const struct of_device_id am654_hbmc_dt_ids[] = {
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
index 2f9fc4e17d53..4d8047d43e48 100644
--- a/drivers/mtd/hyperbus/hyperbus-core.c
+++ b/drivers/mtd/hyperbus/hyperbus-core.c
@@ -126,16 +126,12 @@ int hyperbus_register_device(struct hyperbus_device *hbdev)
}
EXPORT_SYMBOL_GPL(hyperbus_register_device);
-int hyperbus_unregister_device(struct hyperbus_device *hbdev)
+void hyperbus_unregister_device(struct hyperbus_device *hbdev)
{
- int ret = 0;
-
if (hbdev && hbdev->mtd) {
- ret = mtd_device_unregister(hbdev->mtd);
+ WARN_ON(mtd_device_unregister(hbdev->mtd));
map_destroy(hbdev->mtd);
}
-
- return ret;
}
EXPORT_SYMBOL_GPL(hyperbus_unregister_device);
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
index 6e08ec1d4f09..d00d30243403 100644
--- a/drivers/mtd/hyperbus/rpc-if.c
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -134,7 +134,7 @@ static int rpcif_hb_probe(struct platform_device *pdev)
error = rpcif_hw_init(&hyperbus->rpc, true);
if (error)
- return error;
+ goto out_disable_rpm;
hyperbus->hbdev.map.size = hyperbus->rpc.size;
hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
@@ -145,19 +145,24 @@ static int rpcif_hb_probe(struct platform_device *pdev)
hyperbus->hbdev.np = of_get_next_child(pdev->dev.parent->of_node, NULL);
error = hyperbus_register_device(&hyperbus->hbdev);
if (error)
- rpcif_disable_rpm(&hyperbus->rpc);
+ goto out_disable_rpm;
+
+ return 0;
+out_disable_rpm:
+ rpcif_disable_rpm(&hyperbus->rpc);
return error;
}
static int rpcif_hb_remove(struct platform_device *pdev)
{
struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
- int error = hyperbus_unregister_device(&hyperbus->hbdev);
+
+ hyperbus_unregister_device(&hyperbus->hbdev);
rpcif_disable_rpm(&hyperbus->rpc);
- return error;
+ return 0;
}
static struct platform_driver rpcif_platform_driver = {
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index 72f5c7b30079..367e2d906de0 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -478,7 +478,9 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
*/
static int lpddr2_nvm_remove(struct platform_device *pdev)
{
- return mtd_device_unregister(dev_get_drvdata(&pdev->dev));
+ WARN_ON(mtd_device_unregister(dev_get_drvdata(&pdev->dev)));
+
+ return 0;
}
/* Initialize platform_driver data structure for lpddr2_nvm */
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 4f63b8430c71..85eca6a192e6 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -66,18 +66,12 @@ static int physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
- int i, err = 0;
+ int i;
info = platform_get_drvdata(dev);
- if (!info) {
- err = -EINVAL;
- goto out;
- }
if (info->cmtd) {
- err = mtd_device_unregister(info->cmtd);
- if (err)
- goto out;
+ WARN_ON(mtd_device_unregister(info->cmtd));
if (info->cmtd != info->mtds[0])
mtd_concat_destroy(info->cmtd);
@@ -92,10 +86,9 @@ static int physmap_flash_remove(struct platform_device *dev)
if (physmap_data && physmap_data->exit)
physmap_data->exit(dev);
-out:
pm_runtime_put(&dev->dev);
pm_runtime_disable(&dev->dev);
- return err;
+ return 0;
}
static void physmap_set_vpp(struct map_info *map, int state)
diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c
index ad7cd9cfaee0..a1b8b7b25f88 100644
--- a/drivers/mtd/maps/physmap-versatile.c
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -93,6 +93,7 @@ static int ap_flash_init(struct platform_device *pdev)
return -ENODEV;
}
ebi_base = of_iomap(ebi, 0);
+ of_node_put(ebi);
if (!ebi_base)
return -ENODEV;
@@ -207,6 +208,7 @@ int of_flash_probe_versatile(struct platform_device *pdev,
versatile_flashprot = (enum versatile_flashprot)devid->data;
rmap = syscon_node_to_regmap(sysnp);
+ of_node_put(sysnp);
if (IS_ERR(rmap))
return PTR_ERR(rmap);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index d0f9c4b0285c..05860288a7af 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -615,21 +615,24 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
if (!usr_oob)
req.ooblen = 0;
+ req.len &= 0xffffffff;
+ req.ooblen &= 0xffffffff;
+
if (req.start + req.len > mtd->size)
return -EINVAL;
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
if (datbuf_len > 0) {
- datbuf = kmalloc(datbuf_len, GFP_KERNEL);
+ datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
if (!datbuf)
return -ENOMEM;
}
oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
if (oobbuf_len > 0) {
- oobbuf = kmalloc(oobbuf_len, GFP_KERNEL);
+ oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
if (!oobbuf) {
- kfree(datbuf);
+ kvfree(datbuf);
return -ENOMEM;
}
}
@@ -679,8 +682,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
usr_oob += ops.oobretlen;
}
- kfree(datbuf);
- kfree(oobbuf);
+ kvfree(datbuf);
+ kvfree(oobbuf);
return ret;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9eb0680db312..a9b8be9f40dc 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -546,6 +546,68 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
return 0;
}
+static void mtd_check_of_node(struct mtd_info *mtd)
+{
+ struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
+ const char *pname, *prefix = "partition-";
+ int plen, mtd_name_len, offset, prefix_len;
+ struct mtd_info *parent;
+ bool found = false;
+
+ /* Check if MTD already has a device node */
+ if (dev_of_node(&mtd->dev))
+ return;
+
+ /* Check if a partitions node exist */
+ if (!mtd_is_partition(mtd))
+ return;
+ parent = mtd->parent;
+ parent_dn = dev_of_node(&parent->dev);
+ if (!parent_dn)
+ return;
+
+ partitions = of_get_child_by_name(parent_dn, "partitions");
+ if (!partitions)
+ goto exit_parent;
+
+ prefix_len = strlen(prefix);
+ mtd_name_len = strlen(mtd->name);
+
+ /* Search if a partition is defined with the same name */
+ for_each_child_of_node(partitions, mtd_dn) {
+ offset = 0;
+
+ /* Skip partition with no/wrong prefix */
+ if (!of_node_name_prefix(mtd_dn, "partition-"))
+ continue;
+
+ /* Label have priority. Check that first */
+ if (of_property_read_string(mtd_dn, "label", &pname)) {
+ of_property_read_string(mtd_dn, "name", &pname);
+ offset = prefix_len;
+ }
+
+ plen = strlen(pname) - offset;
+ if (plen == mtd_name_len &&
+ !strncmp(mtd->name, pname + offset, plen)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ goto exit_partitions;
+
+ /* Set of_node only for nvmem */
+ if (of_device_is_compatible(mtd_dn, "nvmem-cells"))
+ mtd_set_of_node(mtd, mtd_dn);
+
+exit_partitions:
+ of_node_put(partitions);
+exit_parent:
+ of_node_put(parent_dn);
+}
+
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
@@ -658,6 +720,7 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
dev_set_drvdata(&mtd->dev, mtd);
+ mtd_check_of_node(mtd);
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
if (error)
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 53bd10738418..296fb16c8dc3 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -347,17 +347,17 @@ static int anfc_select_target(struct nand_chip *chip, int target)
/* Update clock frequency */
if (nfc->cur_clk != anand->clk) {
- clk_disable_unprepare(nfc->controller_clk);
- ret = clk_set_rate(nfc->controller_clk, anand->clk);
+ clk_disable_unprepare(nfc->bus_clk);
+ ret = clk_set_rate(nfc->bus_clk, anand->clk);
if (ret) {
dev_err(nfc->dev, "Failed to change clock rate\n");
return ret;
}
- ret = clk_prepare_enable(nfc->controller_clk);
+ ret = clk_prepare_enable(nfc->bus_clk);
if (ret) {
dev_err(nfc->dev,
- "Failed to re-enable the controller clock\n");
+ "Failed to re-enable the bus clock\n");
return ret;
}
@@ -1043,7 +1043,13 @@ static int anfc_setup_interface(struct nand_chip *chip, int target,
DQS_BUFF_SEL_OUT(dqs_mode);
}
- anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ if (nand_interface_is_sdr(conf)) {
+ anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+ } else {
+ /* ONFI timings are defined in picoseconds */
+ anand->clk = div_u64((u64)NSEC_PER_SEC * 1000,
+ conf->timings.nvddr.tCK_min);
+ }
/*
* Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 6ef14442c71a..c9ac3baf68c0 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -2629,7 +2629,9 @@ static int atmel_nand_controller_remove(struct platform_device *pdev)
{
struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
- return nc->caps->ops->remove(nc);
+ WARN_ON(nc->caps->ops->remove(nc));
+
+ return 0;
}
static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index 9dbf031716a6..af119e376352 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -679,8 +679,10 @@ static int cafe_nand_probe(struct pci_dev *pdev,
pci_set_master(pdev);
cafe = kzalloc(sizeof(*cafe), GFP_KERNEL);
- if (!cafe)
- return -ENOMEM;
+ if (!cafe) {
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
mtd = nand_to_mtd(&cafe->nand);
mtd->dev.parent = &pdev->dev;
@@ -801,6 +803,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
pci_iounmap(pdev, cafe->mmio);
out_free_mtd:
kfree(cafe);
+ out_disable_device:
+ pci_disable_device(pdev);
out:
return err;
}
@@ -822,6 +826,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
pci_iounmap(pdev, cafe->mmio);
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(cafe);
+ pci_disable_device(pdev);
}
static const struct pci_device_id cafe_nand_tbl[] = {
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index ac3be92872d0..829b76b303aa 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -1293,26 +1293,20 @@ meson_nfc_nand_chip_init(struct device *dev,
return 0;
}
-static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
+static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
{
struct meson_nfc_nand_chip *meson_chip;
struct mtd_info *mtd;
- int ret;
while (!list_empty(&nfc->chips)) {
meson_chip = list_first_entry(&nfc->chips,
struct meson_nfc_nand_chip, node);
mtd = nand_to_mtd(&meson_chip->nand);
- ret = mtd_device_unregister(mtd);
- if (ret)
- return ret;
+ WARN_ON(mtd_device_unregister(mtd));
- meson_nfc_free_buffer(&meson_chip->nand);
nand_cleanup(&meson_chip->nand);
list_del(&meson_chip->node);
}
-
- return 0;
}
static int meson_nfc_nand_chips_init(struct device *dev,
@@ -1445,16 +1439,11 @@ err_clk:
static int meson_nfc_remove(struct platform_device *pdev)
{
struct meson_nfc *nfc = platform_get_drvdata(pdev);
- int ret;
- ret = meson_nfc_nand_chip_cleanup(nfc);
- if (ret)
- return ret;
+ meson_nfc_nand_chip_cleanup(nfc);
meson_nfc_disable_clk(nfc);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 58c32a11792e..4a9f2b6c772d 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -2278,16 +2278,14 @@ static int omap_nand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct omap_nand_info *info = mtd_to_omap(mtd);
- int ret;
rawnand_sw_bch_cleanup(nand_chip);
if (info->dma)
dma_release_channel(info->dma);
- ret = mtd_device_unregister(mtd);
- WARN_ON(ret);
+ WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(nand_chip);
- return ret;
+ return 0;
}
/* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 048b255faa76..8f80019a9f01 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -80,8 +80,10 @@
#define DISABLE_STATUS_AFTER_WRITE 4
#define CW_PER_PAGE 6
#define UD_SIZE_BYTES 9
+#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
#define ECC_PARITY_SIZE_BYTES_RS 19
#define SPARE_SIZE_BYTES 23
+#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
#define NUM_ADDR_CYCLES 27
#define STATUS_BFR_READ 30
#define SET_RD_MODE_AFTER_STATUS 31
@@ -102,6 +104,7 @@
#define ECC_MODE 4
#define ECC_PARITY_SIZE_BYTES_BCH 8
#define ECC_NUM_DATA_BYTES 16
+#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
#define ECC_FORCE_CLK_OPEN 30
/* NAND_DEV_CMD1 bits */
@@ -238,6 +241,9 @@ nandc_set_reg(chip, reg, \
* @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
+ * @txn_done - completion for NAND transfer.
* @bam_ce_pos - the index in bam_ce which is available for next sgl
* @bam_ce_start - the index in bam_ce which marks the start position ce
* for current sgl. It will be used for size calculation
@@ -250,14 +256,14 @@ nandc_set_reg(chip, reg, \
* @rx_sgl_start - start index in data sgl for rx.
* @wait_second_completion - wait for second DMA desc completion before making
* the NAND transfer completion.
- * @txn_done - completion for NAND transfer.
- * @last_data_desc - last DMA desc in data channel (tx/rx).
- * @last_cmd_desc - last DMA desc in command channel.
*/
struct bam_transaction {
struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+ struct completion txn_done;
u32 bam_ce_pos;
u32 bam_ce_start;
u32 cmd_sgl_pos;
@@ -267,25 +273,23 @@ struct bam_transaction {
u32 rx_sgl_pos;
u32 rx_sgl_start;
bool wait_second_completion;
- struct completion txn_done;
- struct dma_async_tx_descriptor *last_data_desc;
- struct dma_async_tx_descriptor *last_cmd_desc;
};
/*
* This data type corresponds to the nand dma descriptor
+ * @dma_desc - low level DMA engine descriptor
* @list - list for desc_info
- * @dir - DMA transfer direction
+ *
* @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
* ADM
* @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
* @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
- * @dma_desc - low level DMA engine descriptor
+ * @dir - DMA transfer direction
*/
struct desc_info {
+ struct dma_async_tx_descriptor *dma_desc;
struct list_head node;
- enum dma_data_direction dir;
union {
struct scatterlist adm_sgl;
struct {
@@ -293,7 +297,7 @@ struct desc_info {
int sgl_cnt;
};
};
- struct dma_async_tx_descriptor *dma_desc;
+ enum dma_data_direction dir;
};
/*
@@ -337,52 +341,64 @@ struct nandc_regs {
/*
* NAND controller data struct
*
- * @controller: base controller structure
- * @host_list: list containing all the chips attached to the
- * controller
* @dev: parent device
+ *
* @base: MMIO base
- * @base_phys: physical base address of controller registers
- * @base_dma: dma base address of controller registers
+ *
* @core_clk: controller clock
* @aon_clk: another controller clock
*
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ *
+ * @props: properties of current NAND controller,
+ * initialized via DT match data
+ *
+ * @controller: base controller structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ *
* @chan: dma channel
* @cmd_crci: ADM DMA CRCI for command flow control
* @data_crci: ADM DMA CRCI for data flow control
+ *
* @desc_list: DMA descriptor list (list of desc_infos)
*
* @data_buffer: our local DMA buffer for page read/writes,
* used when we can't use the buffer provided
* by upper layers directly
- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
- * functions
* @reg_read_buf: local buffer for reading back registers via DMA
+ *
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
* @reg_read_dma: contains dma address for register read buffer
- * @reg_read_pos: marker for data read in reg_read_buf
*
- * @regs: a contiguous chunk of memory for DMA register
- * writes. contains the register values to be
- * written to controller
- * @cmd1/vld: some fixed controller register values
- * @props: properties of current NAND controller,
- * initialized via DT match data
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
* @max_cwperpage: maximum QPIC codewords required. calculated
* from all connected NAND devices pagesize
+ *
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @cmd1/vld: some fixed controller register values
*/
struct qcom_nand_controller {
- struct nand_controller controller;
- struct list_head host_list;
-
struct device *dev;
void __iomem *base;
- phys_addr_t base_phys;
- dma_addr_t base_dma;
struct clk *core_clk;
struct clk *aon_clk;
+ struct nandc_regs *regs;
+ struct bam_transaction *bam_txn;
+
+ const struct qcom_nandc_props *props;
+
+ struct nand_controller controller;
+ struct list_head host_list;
+
union {
/* will be used only by QPIC for BAM DMA */
struct {
@@ -400,64 +416,89 @@ struct qcom_nand_controller {
};
struct list_head desc_list;
- struct bam_transaction *bam_txn;
u8 *data_buffer;
+ __le32 *reg_read_buf;
+
+ phys_addr_t base_phys;
+ dma_addr_t base_dma;
+ dma_addr_t reg_read_dma;
+
int buf_size;
int buf_count;
int buf_start;
unsigned int max_cwperpage;
- __le32 *reg_read_buf;
- dma_addr_t reg_read_dma;
int reg_read_pos;
- struct nandc_regs *regs;
-
u32 cmd1, vld;
- const struct qcom_nandc_props *props;
+};
+
+/*
+ * NAND special boot partitions
+ *
+ * @page_offset: offset of the partition where spare data is not protected
+ * by ECC (value in pages)
+ * @page_offset: size of the partition where spare data is not protected
+ * by ECC (value in pages)
+ */
+struct qcom_nand_boot_partition {
+ u32 page_offset;
+ u32 page_size;
};
/*
* NAND chip structure
*
+ * @boot_partitions: array of boot partitions where offset and size of the
+ * boot partitions are stored
+ *
* @chip: base NAND chip structure
* @node: list node to add itself to host_list in
* qcom_nand_controller
*
+ * @nr_boot_partitions: count of the boot partitions where spare data is not
+ * protected by ECC
+ *
* @cs: chip select value for this chip
* @cw_size: the number of bytes in a single step/codeword
* of a page, consisting of all data, ecc, spare
* and reserved bytes
* @cw_data: the number of bytes within a codeword protected
* by ECC
- * @use_ecc: request the controller to use ECC for the
- * upcoming read/write
- * @bch_enabled: flag to tell whether BCH ECC mode is used
* @ecc_bytes_hw: ECC bytes used by controller hardware for this
* chip
- * @status: value to be returned if NAND_CMD_STATUS command
- * is executed
+ *
* @last_command: keeps track of last command on this chip. used
* for reading correct status
*
* @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
* ecc/non-ecc mode for the current nand flash
* device
+ *
+ * @status: value to be returned if NAND_CMD_STATUS command
+ * is executed
+ * @codeword_fixup: keep track of the current layout used by
+ * the driver for read/write operation.
+ * @use_ecc: request the controller to use ECC for the
+ * upcoming read/write
+ * @bch_enabled: flag to tell whether BCH ECC mode is used
*/
struct qcom_nand_host {
+ struct qcom_nand_boot_partition *boot_partitions;
+
struct nand_chip chip;
struct list_head node;
+ int nr_boot_partitions;
+
int cs;
int cw_size;
int cw_data;
- bool use_ecc;
- bool bch_enabled;
int ecc_bytes_hw;
int spare_bytes;
int bbm_size;
- u8 status;
+
int last_command;
u32 cfg0, cfg1;
@@ -466,23 +507,30 @@ struct qcom_nand_host {
u32 ecc_bch_cfg;
u32 clrflashstatus;
u32 clrreadstatus;
+
+ u8 status;
+ bool codeword_fixup;
+ bool use_ecc;
+ bool bch_enabled;
};
/*
* This data type corresponds to the NAND controller properties which varies
* among different NAND controllers.
* @ecc_modes - ecc mode for NAND
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
* @is_bam - whether NAND controller is using BAM
* @is_qpic - whether NAND CTRL is part of qpic IP
* @qpic_v2 - flag to indicate QPIC IP version 2
- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
*/
struct qcom_nandc_props {
u32 ecc_modes;
+ u32 dev_cmd_reg_start;
bool is_bam;
bool is_qpic;
bool qpic_v2;
- u32 dev_cmd_reg_start;
+ bool use_codeword_fixup;
};
/* Frees the BAM transaction memory */
@@ -1701,7 +1749,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
oob_size1 = host->bbm_size;
- if (qcom_nandc_is_last_cw(ecc, cw)) {
+ if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
data_size2 = ecc->size - data_size1 -
((ecc->steps - 1) * 4);
oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
@@ -1782,7 +1830,7 @@ check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
}
for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
- if (qcom_nandc_is_last_cw(ecc, cw)) {
+ if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) * 4);
oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
} else {
@@ -1940,7 +1988,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
for (i = 0; i < ecc->steps; i++) {
int data_size, oob_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
@@ -2037,6 +2085,69 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
return ret;
}
+static bool qcom_nandc_is_boot_partition(struct qcom_nand_host *host, int page)
+{
+ struct qcom_nand_boot_partition *boot_partition;
+ u32 start, end;
+ int i;
+
+ /*
+ * Since the frequent access will be to the non-boot partitions like rootfs,
+ * optimize the page check by:
+ *
+ * 1. Checking if the page lies after the last boot partition.
+ * 2. Checking from the boot partition end.
+ */
+
+ /* First check the last boot partition */
+ boot_partition = &host->boot_partitions[host->nr_boot_partitions - 1];
+ start = boot_partition->page_offset;
+ end = start + boot_partition->page_size;
+
+ /* Page is after the last boot partition end. This is NOT a boot partition */
+ if (page > end)
+ return false;
+
+ /* Actually check if it's a boot partition */
+ if (page < end && page >= start)
+ return true;
+
+ /* Check the other boot partitions starting from the second-last partition */
+ for (i = host->nr_boot_partitions - 2; i >= 0; i--) {
+ boot_partition = &host->boot_partitions[i];
+ start = boot_partition->page_offset;
+ end = start + boot_partition->page_size;
+
+ if (page < end && page >= start)
+ return true;
+ }
+
+ return false;
+}
+
+static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
+{
+ bool codeword_fixup = qcom_nandc_is_boot_partition(host, page);
+
+ /* Skip conf write if we are already in the correct mode */
+ if (codeword_fixup == host->codeword_fixup)
+ return;
+
+ host->codeword_fixup = codeword_fixup;
+
+ host->cw_data = codeword_fixup ? 512 : 516;
+ host->spare_bytes = host->cw_size - host->ecc_bytes_hw -
+ host->bbm_size - host->cw_data;
+
+ host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
+ host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
+ host->cw_data << UD_SIZE_BYTES;
+
+ host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
+ host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
+ host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
+}
+
/* implements ecc->read_page() */
static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
@@ -2045,6 +2156,9 @@ static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
u8 *data_buf, *oob_buf = NULL;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -2064,6 +2178,9 @@ static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int cw, ret;
u8 *data_buf = buf, *oob_buf = chip->oob_poi;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
for (cw = 0; cw < ecc->steps; cw++) {
ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
page, cw);
@@ -2084,6 +2201,9 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2104,6 +2224,9 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
u8 *data_buf, *oob_buf;
int i, ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
@@ -2119,7 +2242,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
for (i = 0; i < ecc->steps; i++) {
int data_size, oob_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
@@ -2176,6 +2299,9 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
u8 *data_buf, *oob_buf;
int i, ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2194,7 +2320,7 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
oob_size1 = host->bbm_size;
- if (qcom_nandc_is_last_cw(ecc, i)) {
+ if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size2 = ecc->size - data_size1 -
((ecc->steps - 1) << 2);
oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
@@ -2254,6 +2380,9 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
int data_size, oob_size;
int ret;
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
host->use_ecc = true;
clear_bam_transaction(nandc);
@@ -2915,6 +3044,74 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
+static int qcom_nand_host_parse_boot_partitions(struct qcom_nand_controller *nandc,
+ struct qcom_nand_host *host,
+ struct device_node *dn)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_boot_partition *boot_partition;
+ struct device *dev = nandc->dev;
+ int partitions_count, i, j, ret;
+
+ if (!of_find_property(dn, "qcom,boot-partitions", NULL))
+ return 0;
+
+ partitions_count = of_property_count_u32_elems(dn, "qcom,boot-partitions");
+ if (partitions_count <= 0) {
+ dev_err(dev, "Error parsing boot partition\n");
+ return partitions_count ? partitions_count : -EINVAL;
+ }
+
+ host->nr_boot_partitions = partitions_count / 2;
+ host->boot_partitions = devm_kcalloc(dev, host->nr_boot_partitions,
+ sizeof(*host->boot_partitions), GFP_KERNEL);
+ if (!host->boot_partitions) {
+ host->nr_boot_partitions = 0;
+ return -ENOMEM;
+ }
+
+ for (i = 0, j = 0; i < host->nr_boot_partitions; i++, j += 2) {
+ boot_partition = &host->boot_partitions[i];
+
+ ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j,
+ &boot_partition->page_offset);
+ if (ret) {
+ dev_err(dev, "Error parsing boot partition offset at index %d\n", i);
+ host->nr_boot_partitions = 0;
+ return ret;
+ }
+
+ if (boot_partition->page_offset % mtd->writesize) {
+ dev_err(dev, "Boot partition offset not multiple of writesize at index %i\n",
+ i);
+ host->nr_boot_partitions = 0;
+ return -EINVAL;
+ }
+ /* Convert offset to nand pages */
+ boot_partition->page_offset /= mtd->writesize;
+
+ ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j + 1,
+ &boot_partition->page_size);
+ if (ret) {
+ dev_err(dev, "Error parsing boot partition size at index %d\n", i);
+ host->nr_boot_partitions = 0;
+ return ret;
+ }
+
+ if (boot_partition->page_size % mtd->writesize) {
+ dev_err(dev, "Boot partition size not multiple of writesize at index %i\n",
+ i);
+ host->nr_boot_partitions = 0;
+ return -EINVAL;
+ }
+ /* Convert size to nand pages */
+ boot_partition->page_size /= mtd->writesize;
+ }
+
+ return 0;
+}
+
static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
struct qcom_nand_host *host,
struct device_node *dn)
@@ -2972,6 +3169,14 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
if (ret)
nand_cleanup(chip);
+ if (nandc->props->use_codeword_fixup) {
+ ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+ }
+
return ret;
}
@@ -3137,6 +3342,7 @@ static int qcom_nandc_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq806x_nandc_props = {
.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
.is_bam = false,
+ .use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
};
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index ba24cb36d0b9..b2b42dd1a2de 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -52,7 +52,7 @@ static const struct mtd_ooblayout_ops oob_sm_ops = {
.free = oob_sm_ooblayout_free,
};
-/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* NOTE: This layout is not compatabable with SmartMedia, */
/* because the 256 byte devices have page depenent oob layout */
/* However it does preserve the bad block markers */
/* If you use smftl, it will bypass this and work correctly */
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index b36e5260ae27..e12f9f580a15 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -1223,11 +1223,8 @@ static int tegra_nand_remove(struct platform_device *pdev)
struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
struct nand_chip *chip = ctrl->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
- int ret;
- ret = mtd_device_unregister(mtd);
- if (ret)
- return ret;
+ WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(chip);
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 80dabe6ff0f3..b520fe634041 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o
+spinand-objs := core.o ato.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
new file mode 100644
index 000000000000..82b377c06812
--- /dev/null
+++ b/drivers/mtd/nand/spi/ato.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Aidan MacDonald
+ *
+ * Author: Aidan MacDonald <aidanmacdonald.0x0@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+
+#define SPINAND_MFR_ATO 0x9b
+
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+
+static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+ return 0;
+}
+
+static int ato25d1ga_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = (16 * section);
+ region->length = 8;
+ } else {
+ /* first byte of section 0 is reserved for the BBM */
+ region->offset = 1;
+ region->length = 7;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ato25d1ga_ooblayout = {
+ .ecc = ato25d1ga_ooblayout_ecc,
+ .free = ato25d1ga_ooblayout_free,
+};
+
+
+static const struct spinand_info ato_spinand_table[] = {
+ SPINAND_INFO("ATO25D1GA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x12),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&ato25d1ga_ooblayout, NULL)),
+};
+
+static const struct spinand_manufacturer_ops ato_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer ato_spinand_manufacturer = {
+ .id = SPINAND_MFR_ATO,
+ .name = "ATO",
+ .chips = ato_spinand_table,
+ .nchips = ARRAY_SIZE(ato_spinand_table),
+ .ops = &ato_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index d5b685d1605e..9d73910a7ae8 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -927,6 +927,7 @@ static const struct nand_ops spinand_ops = {
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &ato_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index 23763d16e4f9..b43df73927a0 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -186,3 +186,12 @@ config MTD_QCOMSMEM_PARTS
help
This provides support for parsing partitions from Shared Memory (SMEM)
for NAND and SPI flash on Qualcomm platforms.
+
+config MTD_SERCOMM_PARTS
+ tristate "Sercomm partition table parser"
+ depends on MTD && RALINK
+ help
+ This provides partitions table parser for devices with Sercomm
+ partition map. This partition table contains real partition
+ offsets, which may differ from device to device depending on the
+ number and location of bad blocks on NAND.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 2e98aa048278..2fcf0ab9e7da 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -10,6 +10,7 @@ ofpart-$(CONFIG_MTD_OF_PARTS_LINKSYS_NS)+= ofpart_linksys_ns.o
obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
+obj-$(CONFIG_MTD_SERCOMM_PARTS) += scpart.o
obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_QCOMSMEM_PARTS) += qcomsmempart.o
diff --git a/drivers/mtd/parsers/ofpart_bcm4908.c b/drivers/mtd/parsers/ofpart_bcm4908.c
index 0eddef4c198e..bb072a0940e4 100644
--- a/drivers/mtd/parsers/ofpart_bcm4908.c
+++ b/drivers/mtd/parsers/ofpart_bcm4908.c
@@ -35,12 +35,15 @@ static long long bcm4908_partitions_fw_offset(void)
err = kstrtoul(s + len + 1, 0, &offset);
if (err) {
pr_err("failed to parse %s\n", s + len + 1);
+ of_node_put(root);
return err;
}
+ of_node_put(root);
return offset << 10;
}
+ of_node_put(root);
return -ENOENT;
}
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
index feb44a573d44..a16b42a88581 100644
--- a/drivers/mtd/parsers/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -58,6 +58,7 @@ static void parse_redboot_of(struct mtd_info *master)
return;
ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
+ of_node_put(npart);
if (ret)
return;
diff --git a/drivers/mtd/parsers/scpart.c b/drivers/mtd/parsers/scpart.c
new file mode 100644
index 000000000000..02601bb33de4
--- /dev/null
+++ b/drivers/mtd/parsers/scpart.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * drivers/mtd/scpart.c: Sercomm Partition Parser
+ *
+ * Copyright (C) 2018 NOGUCHI Hiroshi
+ * Copyright (C) 2022 Mikhail Zhilkin
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/module.h>
+
+#define MOD_NAME "scpart"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) MOD_NAME ": " fmt
+
+#define ID_ALREADY_FOUND 0xffffffffUL
+
+#define MAP_OFFS_IN_BLK 0x800
+#define MAP_MIRROR_NUM 2
+
+static const char sc_part_magic[] = {
+ 'S', 'C', 'F', 'L', 'M', 'A', 'P', 'O', 'K', '\0',
+};
+#define PART_MAGIC_LEN sizeof(sc_part_magic)
+
+/* assumes that all fields are set by CPU native endian */
+struct sc_part_desc {
+ uint32_t part_id;
+ uint32_t part_offs;
+ uint32_t part_bytes;
+};
+
+static uint32_t scpart_desc_is_valid(struct sc_part_desc *pdesc)
+{
+ return ((pdesc->part_id != 0xffffffffUL) &&
+ (pdesc->part_offs != 0xffffffffUL) &&
+ (pdesc->part_bytes != 0xffffffffUL));
+}
+
+static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs,
+ struct sc_part_desc **ppdesc)
+{
+ int cnt = 0;
+ int res = 0;
+ int res2;
+ loff_t offs;
+ size_t retlen;
+ struct sc_part_desc *pdesc = NULL;
+ struct sc_part_desc *tmpdesc;
+ uint8_t *buf;
+
+ buf = kzalloc(master->erasesize, GFP_KERNEL);
+ if (!buf) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ res2 = mtd_read(master, partmap_offs, master->erasesize, &retlen, buf);
+ if (res2 || retlen != master->erasesize) {
+ res = -EIO;
+ goto free;
+ }
+
+ for (offs = MAP_OFFS_IN_BLK;
+ offs < master->erasesize - sizeof(*tmpdesc);
+ offs += sizeof(*tmpdesc)) {
+ tmpdesc = (struct sc_part_desc *)&buf[offs];
+ if (!scpart_desc_is_valid(tmpdesc))
+ break;
+ cnt++;
+ }
+
+ if (cnt > 0) {
+ int bytes = cnt * sizeof(*pdesc);
+
+ pdesc = kcalloc(cnt, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc) {
+ res = -ENOMEM;
+ goto free;
+ }
+ memcpy(pdesc, &(buf[MAP_OFFS_IN_BLK]), bytes);
+
+ *ppdesc = pdesc;
+ res = cnt;
+ }
+
+free:
+ kfree(buf);
+
+out:
+ return res;
+}
+
+static int scpart_find_partmap(struct mtd_info *master,
+ struct sc_part_desc **ppdesc)
+{
+ int magic_found = 0;
+ int res = 0;
+ int res2;
+ loff_t offs = 0;
+ size_t retlen;
+ uint8_t rdbuf[PART_MAGIC_LEN];
+
+ while ((magic_found < MAP_MIRROR_NUM) &&
+ (offs < master->size) &&
+ !mtd_block_isbad(master, offs)) {
+ res2 = mtd_read(master, offs, PART_MAGIC_LEN, &retlen, rdbuf);
+ if (res2 || retlen != PART_MAGIC_LEN) {
+ res = -EIO;
+ goto out;
+ }
+ if (!memcmp(rdbuf, sc_part_magic, PART_MAGIC_LEN)) {
+ pr_debug("Signature found at 0x%llx\n", offs);
+ magic_found++;
+ res = scpart_scan_partmap(master, offs, ppdesc);
+ if (res > 0)
+ goto out;
+ }
+ offs += master->erasesize;
+ }
+
+out:
+ if (res > 0)
+ pr_info("Valid 'SC PART MAP' (%d partitions) found at 0x%llx\n", res, offs);
+ else
+ pr_info("No valid 'SC PART MAP' was found\n");
+
+ return res;
+}
+
+static int scpart_parse(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ const char *partname;
+ int n;
+ int nr_scparts;
+ int nr_parts = 0;
+ int res = 0;
+ struct sc_part_desc *scpart_map = NULL;
+ struct mtd_partition *parts = NULL;
+ struct device_node *mtd_node;
+ struct device_node *ofpart_node;
+ struct device_node *pp;
+
+ mtd_node = mtd_get_of_node(master);
+ if (!mtd_node) {
+ res = -ENOENT;
+ goto out;
+ }
+
+ ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+ if (!ofpart_node) {
+ pr_info("%s: 'partitions' subnode not found on %pOF.\n",
+ master->name, mtd_node);
+ res = -ENOENT;
+ goto out;
+ }
+
+ nr_scparts = scpart_find_partmap(master, &scpart_map);
+ if (nr_scparts <= 0) {
+ pr_info("No any partitions was found in 'SC PART MAP'.\n");
+ res = -ENOENT;
+ goto free;
+ }
+
+ parts = kcalloc(of_get_child_count(ofpart_node), sizeof(*parts),
+ GFP_KERNEL);
+ if (!parts) {
+ res = -ENOMEM;
+ goto free;
+ }
+
+ for_each_child_of_node(ofpart_node, pp) {
+ u32 scpart_id;
+
+ if (of_property_read_u32(pp, "sercomm,scpart-id", &scpart_id))
+ continue;
+
+ for (n = 0 ; n < nr_scparts ; n++)
+ if ((scpart_map[n].part_id != ID_ALREADY_FOUND) &&
+ (scpart_id == scpart_map[n].part_id))
+ break;
+ if (n >= nr_scparts)
+ /* not match */
+ continue;
+
+ /* add the partition found in OF into MTD partition array */
+ parts[nr_parts].offset = scpart_map[n].part_offs;
+ parts[nr_parts].size = scpart_map[n].part_bytes;
+ parts[nr_parts].of_node = pp;
+
+ if (!of_property_read_string(pp, "label", &partname))
+ parts[nr_parts].name = partname;
+ if (of_property_read_bool(pp, "read-only"))
+ parts[nr_parts].mask_flags |= MTD_WRITEABLE;
+ if (of_property_read_bool(pp, "lock"))
+ parts[nr_parts].mask_flags |= MTD_POWERUP_LOCK;
+
+ /* mark as 'done' */
+ scpart_map[n].part_id = ID_ALREADY_FOUND;
+
+ nr_parts++;
+ }
+
+ if (nr_parts > 0) {
+ *pparts = parts;
+ res = nr_parts;
+ } else
+ pr_info("No partition in OF matches partition ID with 'SC PART MAP'.\n");
+
+ of_node_put(pp);
+
+free:
+ of_node_put(ofpart_node);
+ kfree(scpart_map);
+ if (res <= 0)
+ kfree(parts);
+
+out:
+ return res;
+}
+
+static const struct of_device_id scpart_parser_of_match_table[] = {
+ { .compatible = "sercomm,sc-partitions" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, scpart_parser_of_match_table);
+
+static struct mtd_part_parser scpart_parser = {
+ .parse_fn = scpart_parse,
+ .name = "scpart",
+ .of_match_table = scpart_parser_of_match_table,
+};
+module_mtd_part_parser(scpart_parser);
+
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("scpart");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NOGUCHI Hiroshi <drvlabo@gmail.com>");
+MODULE_AUTHOR("Mikhail Zhilkin <csharper2005@gmail.com>");
+MODULE_DESCRIPTION("Sercomm partition parser");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 0cff2cda1b5a..7f955fade838 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1111,9 +1111,9 @@ static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
- mutex_lock(&ftl->mutex);
del_timer_sync(&ftl->timer);
cancel_work_sync(&ftl->flush_work);
+ mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
}
diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 94a969185ceb..5070d72835ec 100644
--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -237,7 +237,7 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
reg = readl(host->regbase + FMC_CFG);
reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK);
reg |= FMC_CFG_OP_MODE_NORMAL;
- reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES
+ reg |= (nor->addr_nbytes == 4) ? SPI_NOR_ADDR_MODE_4BYTES
: SPI_NOR_ADDR_MODE_3BYTES;
writel(reg, host->regbase + FMC_CFG);
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 9032b9ab2eaf..ab3990e6ac25 100644
--- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -203,7 +203,7 @@ static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
SPIFI_CMD_DATALEN(len) |
SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_OPCODE(nor->program_opcode) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
for (i = 0; i < len; i++)
@@ -230,7 +230,7 @@ static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
SPIFI_CMD_OPCODE(nor->erase_opcode) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
return nxp_spifi_wait_for_cmd(spifi);
@@ -252,12 +252,12 @@ static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
}
/* Memory mode supports address length between 1 and 4 */
- if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4)
+ if (spifi->nor.addr_nbytes < 1 || spifi->nor.addr_nbytes > 4)
return -EINVAL;
spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
- SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_nbytes + 1);
return 0;
}
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 502967c76c5f..f2c64006f8d7 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -38,7 +38,7 @@
*/
#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
-#define SPI_NOR_MAX_ADDR_WIDTH 4
+#define SPI_NOR_MAX_ADDR_NBYTES 4
#define SPI_NOR_SRST_SLEEP_MIN 200
#define SPI_NOR_SRST_SLEEP_MAX 400
@@ -177,7 +177,7 @@ int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
{
- if (spi_nor_protocol_is_dtr(nor->write_proto))
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
return -EOPNOTSUPP;
return nor->controller_ops->erase(nor, offs);
@@ -198,7 +198,7 @@ static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, from, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(len, buf, 0));
bool usebouncebuf;
@@ -262,7 +262,7 @@ static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, to, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(len, buf, 0));
ssize_t nbytes;
@@ -972,7 +972,7 @@ static int spi_nor_erase_chip(struct spi_nor *nor)
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
- spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
@@ -1113,9 +1113,9 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
if (nor->spimem) {
struct spi_mem_op op =
SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
- nor->addr_width, addr);
+ nor->addr_nbytes, addr);
- spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
return spi_mem_exec_op(nor->spimem, &op);
} else if (nor->controller_ops->erase) {
@@ -1126,13 +1126,13 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
* Default implementation, if driver doesn't have a specialized HW
* control
*/
- for (i = nor->addr_width - 1; i >= 0; i--) {
+ for (i = nor->addr_nbytes - 1; i >= 0; i--) {
nor->bouncebuf[i] = addr & 0xff;
addr >>= 8;
}
return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
- nor->bouncebuf, nor->addr_width);
+ nor->bouncebuf, nor->addr_nbytes);
}
/**
@@ -2249,43 +2249,43 @@ static int spi_nor_default_setup(struct spi_nor *nor,
return 0;
}
-static int spi_nor_set_addr_width(struct spi_nor *nor)
+static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
{
- if (nor->addr_width) {
- /* already configured from SFDP */
+ if (nor->params->addr_nbytes) {
+ nor->addr_nbytes = nor->params->addr_nbytes;
} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
/*
* In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
- * in this protocol an odd address width cannot be used because
+ * in this protocol an odd addr_nbytes cannot be used because
* then the address phase would only span a cycle and a half.
* Half a cycle would be left over. We would then have to start
* the dummy phase in the middle of a cycle and so too the data
* phase, and we will end the transaction with half a cycle left
* over.
*
- * Force all 8D-8D-8D flashes to use an address width of 4 to
+ * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
* avoid this situation.
*/
- nor->addr_width = 4;
- } else if (nor->info->addr_width) {
- nor->addr_width = nor->info->addr_width;
+ nor->addr_nbytes = 4;
+ } else if (nor->info->addr_nbytes) {
+ nor->addr_nbytes = nor->info->addr_nbytes;
} else {
- nor->addr_width = 3;
+ nor->addr_nbytes = 3;
}
- if (nor->addr_width == 3 && nor->params->size > 0x1000000) {
+ if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
+ nor->addr_nbytes = 4;
}
- if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_dbg(nor->dev, "address width is too large: %u\n",
- nor->addr_width);
+ if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
+ dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
+ nor->addr_nbytes);
return -EINVAL;
}
/* Set 4byte opcodes when possible. */
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
!(nor->flags & SNOR_F_HAS_4BAIT))
spi_nor_set_4byte_opcodes(nor);
@@ -2304,7 +2304,7 @@ static int spi_nor_setup(struct spi_nor *nor,
if (ret)
return ret;
- return spi_nor_set_addr_width(nor);
+ return spi_nor_set_addr_nbytes(nor);
}
/**
@@ -2382,12 +2382,7 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
*/
erase_mask = 0;
i = 0;
- if (no_sfdp_flags & SECT_4K_PMC) {
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], 4096u,
- SPINOR_OP_BE_4K_PMC);
- i++;
- } else if (no_sfdp_flags & SECT_4K) {
+ if (no_sfdp_flags & SECT_4K) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K);
@@ -2497,7 +2492,6 @@ static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
if (spi_nor_parse_sfdp(nor)) {
memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
- nor->addr_width = 0;
nor->flags &= ~SNOR_F_4B_OPCODES;
}
}
@@ -2718,7 +2712,7 @@ static int spi_nor_init(struct spi_nor *nor)
nor->flags & SNOR_F_SWP_IS_VOLATILE))
spi_nor_try_unlock_all(nor);
- if (nor->addr_width == 4 &&
+ if (nor->addr_nbytes == 4 &&
nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
!(nor->flags & SNOR_F_4B_OPCODES)) {
/*
@@ -2730,7 +2724,7 @@ static int spi_nor_init(struct spi_nor *nor)
*/
WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
"enabling reset hack; may not recover from unexpected reboots\n");
- nor->params->set_4byte_addr_mode(nor, true);
+ return nor->params->set_4byte_addr_mode(nor, true);
}
return 0;
@@ -2845,7 +2839,7 @@ static void spi_nor_put_device(struct mtd_info *mtd)
void spi_nor_restore(struct spi_nor *nor)
{
/* restore the addressing mode */
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+ if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
nor->flags & SNOR_F_BROKEN_RESET)
nor->params->set_4byte_addr_mode(nor, false);
@@ -2989,7 +2983,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
* - select op codes for (Fast) Read, Page Program and Sector Erase.
* - set the number of dummy cycles (mode cycles + wait states).
* - set the SPI protocols for register and memory accesses.
- * - set the address width.
+ * - set the number of address bytes.
*/
ret = spi_nor_setup(nor, hwcaps);
if (ret)
@@ -3030,7 +3024,7 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(0, NULL, 0)),
.offset = 0,
@@ -3061,7 +3055,7 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
- SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
+ SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
.offset = 0,
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 3f841ec36e56..85b0cf254e97 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -84,9 +84,9 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_width, addr) \
+#define SPI_NOR_SECTOR_ERASE_OP(opcode, addr_nbytes, addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
- SPI_MEM_OP_ADDR(addr_width, addr, 0), \
+ SPI_MEM_OP_ADDR(addr_nbytes, addr, 0), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
@@ -340,6 +340,11 @@ struct spi_nor_otp {
* @writesize Minimal writable flash unit size. Defaults to 1. Set to
* ECC unit size for ECC-ed flashes.
* @page_size: the page size of the SPI NOR flash memory.
+ * @addr_nbytes: number of address bytes to send.
+ * @addr_mode_nbytes: number of address bytes of current address mode. Useful
+ * when the flash operates with 4B opcodes but needs the
+ * internal address mode for opcodes that don't have a 4B
+ * opcode correspondent.
* @rdsr_dummy: dummy cycles needed for Read Status Register command
* in octal DTR mode.
* @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register
@@ -372,6 +377,8 @@ struct spi_nor_flash_parameter {
u64 size;
u32 writesize;
u32 page_size;
+ u8 addr_nbytes;
+ u8 addr_mode_nbytes;
u8 rdsr_dummy;
u8 rdsr_addr_nbytes;
@@ -429,7 +436,7 @@ struct spi_nor_fixups {
* isn't necessarily called a "sector" by the vendor.
* @n_sectors: the number of sectors.
* @page_size: the flash's page size.
- * @addr_width: the flash's address width.
+ * @addr_nbytes: number of address bytes to send.
*
* @parse_sfdp: true when flash supports SFDP tables. The false value has no
* meaning. If one wants to skip the SFDP tables, one should
@@ -457,7 +464,6 @@ struct spi_nor_fixups {
* flags are used together with the SPI_NOR_SKIP_SFDP flag.
* SPI_NOR_SKIP_SFDP: skip parsing of SFDP tables.
* SECT_4K: SPINOR_OP_BE_4K works uniformly.
- * SECT_4K_PMC: SPINOR_OP_BE_4K_PMC works uniformly.
* SPI_NOR_DUAL_READ: flash supports Dual Read.
* SPI_NOR_QUAD_READ: flash supports Quad Read.
* SPI_NOR_OCTAL_READ: flash supports Octal Read.
@@ -488,7 +494,7 @@ struct flash_info {
unsigned sector_size;
u16 n_sectors;
u16 page_size;
- u16 addr_width;
+ u8 addr_nbytes;
bool parse_sfdp;
u16 flags;
@@ -505,7 +511,6 @@ struct flash_info {
u8 no_sfdp_flags;
#define SPI_NOR_SKIP_SFDP BIT(0)
#define SECT_4K BIT(1)
-#define SECT_4K_PMC BIT(2)
#define SPI_NOR_DUAL_READ BIT(3)
#define SPI_NOR_QUAD_READ BIT(4)
#define SPI_NOR_OCTAL_READ BIT(5)
@@ -550,11 +555,11 @@ struct flash_info {
.n_sectors = (_n_sectors), \
.page_size = 256, \
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
- .addr_width = (_addr_width), \
+ .addr_nbytes = (_addr_nbytes), \
.flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR, \
#define OTP_INFO(_len, _n_regions, _base, _offset) \
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index eaf84f7a0676..df76cb5de3f9 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -86,7 +86,7 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
seq_printf(s, "size\t\t%s\n", buf);
seq_printf(s, "write size\t%u\n", params->writesize);
seq_printf(s, "page size\t%u\n", params->page_size);
- seq_printf(s, "address width\t%u\n", nor->addr_width);
+ seq_printf(s, "address nbytes\t%u\n", nor->addr_nbytes);
seq_puts(s, "flags\t\t");
spi_nor_print_flags(s, nor->flags, snor_f_names, sizeof(snor_f_names));
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
index 79e2408f4998..fcc3b0e7cda9 100644
--- a/drivers/mtd/spi-nor/esmt.c
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -13,7 +13,7 @@ static const struct flash_info esmt_nor_parts[] = {
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K) },
- { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64)
+ { "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index c012bc2486e1..89a66a19d754 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -14,13 +14,13 @@ is25lp256_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_bfpt *bfpt)
{
/*
- * IS25LP256 supports 4B opcodes, but the BFPT advertises a
- * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
- * Overwrite the address width advertised by the BFPT.
+ * IS25LP256 supports 4B opcodes, but the BFPT advertises
+ * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY.
+ * Overwrite the number of address bytes advertised by the BFPT.
*/
if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
- nor->addr_width = 4;
+ nor->params->addr_nbytes = 4;
return 0;
}
@@ -29,6 +29,21 @@ static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
+static void pm25lv_nor_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ int i;
+
+ /* The PM25LV series has a different 4k sector erase opcode */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (map->erase_type[i].size == 4096)
+ map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
+}
+
+static const struct spi_nor_fixups pm25lv_nor_fixups = {
+ .late_init = pm25lv_nor_late_init,
+};
+
static const struct flash_info issi_nor_parts[] = {
/* ISSI */
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2)
@@ -62,9 +77,13 @@ static const struct flash_info issi_nor_parts[] = {
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2)
- NO_SFDP_FLAGS(SECT_4K_PMC) },
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &pm25lv_nor_fixups
+ },
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4)
- NO_SFDP_FLAGS(SECT_4K_PMC) },
+ NO_SFDP_FLAGS(SECT_4K)
+ .fixups = &pm25lv_nor_fixups
+ },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index a96f74e0f568..3c9681a3f7a3 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -399,8 +399,16 @@ static int micron_st_nor_ready(struct spi_nor *nor)
return sr_ready;
ret = micron_st_nor_read_fsr(nor, nor->bouncebuf);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * Some controllers, such as Intel SPI, do not support low
+ * level operations such as reading the flag status
+ * register. They only expose small amount of high level
+ * operations to the software. If this is the case we use
+ * only the status register value.
+ */
+ return ret == -EOPNOTSUPP ? sr_ready : ret;
+ }
if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
if (nor->bouncebuf[0] & FSR_E_ERR)
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index fa63d8571218..00ab0d2d6d2f 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -35,13 +35,13 @@
*/
int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
{
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
struct spi_mem_dirmap_desc *rdesc;
enum spi_nor_protocol read_proto;
int ret;
read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
read_proto = nor->read_proto;
rdesc = nor->dirmap.rdesc;
@@ -54,7 +54,7 @@ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
ret = spi_nor_read_data(nor, addr, len, buf);
nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
nor->read_proto = read_proto;
nor->dirmap.rdesc = rdesc;
@@ -85,11 +85,11 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
{
enum spi_nor_protocol write_proto;
struct spi_mem_dirmap_desc *wdesc;
- u8 addr_width, program_opcode;
+ u8 addr_nbytes, program_opcode;
int ret, written;
program_opcode = nor->program_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
write_proto = nor->write_proto;
wdesc = nor->dirmap.wdesc;
@@ -113,7 +113,7 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
out:
nor->program_opcode = program_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->write_proto = write_proto;
nor->dirmap.wdesc = wdesc;
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index a5211543d30d..2257f1b4c2e2 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -134,7 +134,7 @@ struct sfdp_4bait {
/**
* spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
- * addr_width and read_dummy members of the struct spi_nor
+ * addr_nbytes and read_dummy members of the struct spi_nor
* should be previously
* set.
* @nor: pointer to a 'struct spi_nor'
@@ -178,21 +178,21 @@ static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
size_t len, void *buf)
{
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
int ret;
read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
nor->read_opcode = SPINOR_OP_RDSFDP;
- nor->addr_width = 3;
+ nor->addr_nbytes = 3;
nor->read_dummy = 8;
ret = spi_nor_read_raw(nor, addr, len, buf);
nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
return ret;
@@ -462,11 +462,13 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
- nor->addr_width = 3;
+ params->addr_nbytes = 3;
+ params->addr_mode_nbytes = 3;
break;
case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
- nor->addr_width = 4;
+ params->addr_nbytes = 4;
+ params->addr_mode_nbytes = 4;
break;
default:
@@ -637,12 +639,12 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
}
/**
- * spi_nor_smpt_addr_width() - return the address width used in the
+ * spi_nor_smpt_addr_nbytes() - return the number of address bytes used in the
* configuration detection command.
* @nor: pointer to a 'struct spi_nor'
* @settings: configuration detection command descriptor, dword1
*/
-static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
+static u8 spi_nor_smpt_addr_nbytes(const struct spi_nor *nor, const u32 settings)
{
switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
case SMPT_CMD_ADDRESS_LEN_0:
@@ -653,7 +655,7 @@ static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
return 4;
case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
default:
- return nor->addr_width;
+ return nor->params->addr_mode_nbytes;
}
}
@@ -690,7 +692,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
u32 addr;
int err;
u8 i;
- u8 addr_width, read_opcode, read_dummy;
+ u8 addr_nbytes, read_opcode, read_dummy;
u8 read_data_mask, map_id;
/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
@@ -698,7 +700,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
if (!buf)
return ERR_PTR(-ENOMEM);
- addr_width = nor->addr_width;
+ addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
read_opcode = nor->read_opcode;
@@ -709,7 +711,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
break;
read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
- nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
+ nor->addr_nbytes = spi_nor_smpt_addr_nbytes(nor, smpt[i]);
nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
addr = smpt[i + 1];
@@ -756,7 +758,7 @@ static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
/* fall through */
out:
kfree(buf);
- nor->addr_width = addr_width;
+ nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
nor->read_opcode = read_opcode;
return ret;
@@ -1044,7 +1046,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
/*
* We need at least one 4-byte op code per read, program and erase
* operation; the .read(), .write() and .erase() hooks share the
- * nor->addr_width value.
+ * nor->addr_nbytes value.
*/
if (!read_hwcaps || !pp_hwcaps || !erase_mask)
goto out;
@@ -1098,7 +1100,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
* Spansion memory. However this quirk is no longer needed with new
* SFDP compliant memories.
*/
- nor->addr_width = 4;
+ params->addr_nbytes = 4;
nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
/* fall through */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 43cd6cd92537..0150049007be 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -14,6 +14,8 @@
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
+#define SPINOR_REG_CYPRESS_CFR1V 0x00800002
+#define SPINOR_REG_CYPRESS_CFR1V_QUAD_EN BIT(1) /* Quad Enable */
#define SPINOR_REG_CYPRESS_CFR2V 0x00800003
#define SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24 0xb
#define SPINOR_REG_CYPRESS_CFR3V 0x00800004
@@ -114,6 +116,150 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
}
/**
+ * cypress_nor_quad_enable_volatile() - enable Quad I/O mode in volatile
+ * register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * It is recommended to update volatile registers in the field application due
+ * to a risk of the non-volatile registers corruption by power interrupt. This
+ * function sets Quad Enable bit in CFR1 volatile. If users set the Quad Enable
+ * bit in the CFR1 non-volatile in advance (typically by a Flash programmer
+ * before mounting Flash on PCB), the Quad Enable bit in the CFR1 volatile is
+ * also set during Flash power-up.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
+{
+ struct spi_mem_op op;
+ u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+ u8 cfr1v_written;
+ int ret;
+
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V,
+ nor->bouncebuf);
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1V_QUAD_EN)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1V_QUAD_EN;
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V, 1,
+ nor->bouncebuf);
+ ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ cfr1v_written = nor->bouncebuf[0];
+
+ /* Read back and check it. */
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes,
+ SPINOR_REG_CYPRESS_CFR1V,
+ nor->bouncebuf);
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != cfr1v_written) {
+ dev_err(nor->dev, "CFR1: Read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * cypress_nor_set_page_size() - Set page size which corresponds to the flash
+ * configuration.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * The BFPT table advertises a 512B or 256B page size depending on part but the
+ * page size is actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_set_page_size(struct spi_nor *nor)
+{
+ struct spi_mem_op op =
+ CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_CFR3V,
+ nor->bouncebuf);
+ int ret;
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
+ nor->params->page_size = 512;
+ else
+ nor->params->page_size = 256;
+
+ return 0;
+}
+
+static int
+s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt)
+{
+ /* Replace Quad Enable with volatile version */
+ nor->params->quad_enable = cypress_nor_quad_enable_volatile;
+
+ return cypress_nor_set_page_size(nor);
+}
+
+static void s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
+{
+ struct spi_nor_erase_type *erase_type =
+ nor->params->erase_map.erase_type;
+ unsigned int i;
+
+ /*
+ * In some parts, 3byte erase opcodes are advertised by 4BAIT.
+ * Convert them to 4byte erase opcodes.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ switch (erase_type[i].opcode) {
+ case SPINOR_OP_SE:
+ erase_type[i].opcode = SPINOR_OP_SE_4B;
+ break;
+ case SPINOR_OP_BE_4K:
+ erase_type[i].opcode = SPINOR_OP_BE_4K_4B;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void s25hx_t_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ /* Fast Read 4B requires mode cycles */
+ params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
+
+ /* The writesize should be ECC data unit size */
+ params->writesize = 16;
+}
+
+static struct spi_nor_fixups s25hx_t_fixups = {
+ .post_bfpt = s25hx_t_post_bfpt_fixup,
+ .post_sfdp = s25hx_t_post_sfdp_fixup,
+ .late_init = s25hx_t_late_init,
+};
+
+/**
* cypress_nor_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
@@ -167,28 +313,7 @@ static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
- /*
- * The BFPT table advertises a 512B page size but the page size is
- * actually configurable (with the default being 256B). Read from
- * CFR3V[4] and set the correct size.
- */
- struct spi_mem_op op =
- CYPRESS_NOR_RD_ANY_REG_OP(3, SPINOR_REG_CYPRESS_CFR3V,
- nor->bouncebuf);
- int ret;
-
- spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
- nor->params->page_size = 512;
- else
- nor->params->page_size = 256;
-
- return 0;
+ return cypress_nor_set_page_size(nor);
}
static const struct spi_nor_fixups s28hs512t_fixups = {
@@ -310,6 +435,22 @@ static const struct flash_info spansion_nor_parts[] = {
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+ { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 256 * 1024, 256)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 256 * 1024, 512)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 256 * 1024, 256)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
+ { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 256 * 1024, 512)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLSR)
+ .fixups = &s25hx_t_fixups },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
FLAGS(SPI_NOR_NO_ERASE) },
{ "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256)
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
index 1d2f5db047bd..5723157739fc 100644
--- a/drivers/mtd/spi-nor/xilinx.c
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -31,7 +31,7 @@
.sector_size = (8 * (_page_size)), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
- .addr_width = 3, \
+ .addr_nbytes = 3, \
.flags = SPI_NOR_NO_FR
/* Xilinx S3AN share MFR with Atmel SPI NOR */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d7fb33c078e8..184608bd8999 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -84,7 +84,8 @@ enum ad_link_speed_type {
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
0, 0, 0, 0, 0, 0
};
-static u16 ad_ticks_per_sec;
+
+static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
@@ -2001,36 +2002,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
- * @tick_resolution: tick duration (millisecond resolution)
*
* Can be called only after the mac address of the bond is set.
*/
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+void bond_3ad_initialize(struct bonding *bond)
{
- /* check that the bond is not initialized yet */
- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
- bond->dev->dev_addr)) {
-
- BOND_AD_INFO(bond).aggregator_identifier = 0;
-
- BOND_AD_INFO(bond).system.sys_priority =
- bond->params.ad_actor_sys_prio;
- if (is_zero_ether_addr(bond->params.ad_actor_system))
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->dev->dev_addr);
- else
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->params.ad_actor_system);
-
- /* initialize how many times this module is called in one
- * second (should be about every 100ms)
- */
- ad_ticks_per_sec = tick_resolution;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
- bond_3ad_initiate_agg_selection(bond,
- AD_AGGREGATOR_SELECTION_TIMER *
- ad_ticks_per_sec);
- }
+ bond_3ad_initiate_agg_selection(bond,
+ AD_AGGREGATOR_SELECTION_TIMER *
+ ad_ticks_per_sec);
}
/**
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 007d43e46dcb..b9dbad3a8af8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -653,6 +653,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb,
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
struct slave *tx_slave = NULL;
+ struct net_device *dev;
struct arp_pkt *arp;
if (!pskb_network_may_pull(skb, sizeof(*arp)))
@@ -665,6 +666,15 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
return NULL;
+ dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
+ if (dev) {
+ if (netif_is_bridge_master(dev)) {
+ dev_put(dev);
+ return NULL;
+ }
+ dev_put(dev);
+ }
+
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
tx_slave = rlb_choose_channel(skb, bond, arp);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e75acb14d066..2f4da2c13c0a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2001,6 +2001,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
new_slave->target_last_arp_rx[i] = new_slave->last_rx;
+ new_slave->last_tx = new_slave->last_rx;
+
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -2079,7 +2081,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
- bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
+ bond_3ad_initialize(bond);
} else {
SLAVE_AD_INFO(new_slave)->id =
SLAVE_AD_INFO(prev_slave)->id + 1;
@@ -2884,8 +2886,11 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
return;
}
- if (bond_handle_vlan(slave, tags, skb))
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
arp_xmit(skb);
+ }
+
return;
}
@@ -3074,8 +3079,7 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_arp(bond, slave, sip, tip);
out_unlock:
@@ -3103,8 +3107,10 @@ static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
}
addrconf_addr_solict_mult(daddr, &mcaddr);
- if (bond_handle_vlan(slave, tags, skb))
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
ndisc_send_skb(skb, &mcaddr, saddr);
+ }
}
static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
@@ -3246,8 +3252,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
curr_active_slave->last_link_up))
bond_validate_ns(bond, slave, saddr, daddr);
else if (curr_arp_slave &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_ns(bond, slave, saddr, daddr);
out:
@@ -3335,12 +3340,12 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* so it can wait
*/
bond_for_each_slave_rcu(bond, slave, iter) {
- unsigned long trans_start = dev_trans_start(slave->dev);
+ unsigned long last_tx = slave_last_tx(slave);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) {
- if (bond_time_in_interval(bond, trans_start, 1) &&
+ if (bond_time_in_interval(bond, last_tx, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
@@ -3365,7 +3370,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
@@ -3431,7 +3436,7 @@ re_arm:
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
- unsigned long trans_start, last_rx;
+ unsigned long last_tx, last_rx;
struct list_head *iter;
struct slave *slave;
int commit = 0;
@@ -3482,9 +3487,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* - (more than missed_max*delta since receive AND
* the bond has an IP address)
*/
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (bond_is_active_slave(slave) &&
- (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
+ (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
!bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
@@ -3501,8 +3506,8 @@ static int bond_ab_arp_inspect(struct bonding *bond)
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
- unsigned long trans_start;
struct list_head *iter;
+ unsigned long last_tx;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
@@ -3511,10 +3516,10 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (rtnl_dereference(bond->curr_active_slave) != slave ||
(!rtnl_dereference(bond->curr_active_slave) &&
- bond_time_in_interval(bond, trans_start, 1))) {
+ bond_time_in_interval(bond, last_tx, 1))) {
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
@@ -5333,8 +5338,14 @@ static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *dev)
{
- if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
- return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
+
+ /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
+ * was true, if tls_device_down is running in parallel, but it's OK,
+ * because bond_get_slave_by_dev has a NULL check.
+ */
+ if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_netdev);
return bond_tx_drop(dev, skb);
}
#endif
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 65ba6697bd7d..c469b2f3e57d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -63,7 +63,7 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
else
*mscan_clksrc = MSCAN_CLKSRC_XTAL;
- freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
+ freq = mpc5xxx_get_bus_frequency(&ofdev->dev);
if (!freq)
return 0;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index e750d13c8841..c320de474f40 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1070,9 +1070,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
- /* mask out flags we don't care about */
- intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
/* receive buffer 0 */
if (intf & CANINTF_RX0IF) {
mcp251x_hw_rx(spi, 0);
@@ -1082,6 +1079,18 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (mcp251x_is_2510(spi))
mcp251x_write_bits(spi, CANINTF,
CANINTF_RX0IF, 0x00);
+
+ /* check if buffer 1 is already known to be full, no need to re-read */
+ if (!(intf & CANINTF_RX1IF)) {
+ u8 intf1, eflag1;
+
+ /* intf needs to be read again to avoid a race condition */
+ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1);
+
+ /* combine flags from both operations for error handling */
+ intf |= intf1;
+ eflag |= eflag1;
+ }
}
/* receive buffer 1 */
@@ -1092,6 +1101,9 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
clear_intf |= CANINTF_RX1IF;
}
+ /* mask out flags we don't care about */
+ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
/* any error or tx interrupt we need to clear? */
if (intf & (CANINTF_ERR | CANINTF_TX))
clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index d1e1a459c045..d31191686a54 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -195,7 +195,7 @@ struct __packed ems_cpc_msg {
__le32 ts_sec; /* timestamp in seconds */
__le32 ts_nsec; /* timestamp in nano seconds */
- union {
+ union __packed {
u8 generic[64];
struct cpc_can_msg can_msg;
struct cpc_can_params can_params;
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 4b14d80d27ed..e4f446db0ca1 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -613,6 +613,9 @@ int ksz9477_fdb_dump(struct ksz_device *dev, int port,
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index ed7d137cba99..6bd69a7e6809 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -803,9 +803,15 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->info->supports_rgmii[port])
phy_interface_set_rgmii(config->supported_interfaces);
- if (dev->info->internal_phy[port])
+ if (dev->info->internal_phy[port]) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
@@ -962,6 +968,7 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
const u16 *regs;
int ret;
@@ -1001,6 +1008,14 @@ static int ksz_setup(struct dsa_switch *ds)
return ret;
}
+ /* Start with learning disabled on standalone user ports, and enabled
+ * on the CPU port. In lack of other finer mechanisms, learning on the
+ * CPU port will avoid flooding bridge local addresses on the network
+ * in some cases.
+ */
+ p = &dev->ports[dev->cpu_port];
+ p->learning = true;
+
/* start switch */
regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
SW_START, SW_START);
@@ -1277,6 +1292,8 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ p = &dev->ports[port];
+
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
@@ -1286,9 +1303,13 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
break;
case BR_STATE_LEARNING:
data |= PORT_RX_ENABLE;
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
@@ -1300,12 +1321,38 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
- p = &dev->ports[port];
p->stp_state = state;
ksz_update_port_member(dev, port);
}
+static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ if (flags.mask & BR_LEARNING) {
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ /* Make the change take effect immediately */
+ ksz_port_stp_state_set(ds, port, p->stp_state);
+ }
+
+ return 0;
+}
+
static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
@@ -1719,6 +1766,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_bridge_join = ksz_port_bridge_join,
.port_bridge_leave = ksz_port_bridge_leave,
.port_stp_state_set = ksz_port_stp_state_set,
+ .port_pre_bridge_flags = ksz_port_pre_bridge_flags,
+ .port_bridge_flags = ksz_port_bridge_flags,
.port_fast_age = ksz_port_fast_age,
.port_vlan_filtering = ksz_port_vlan_filtering,
.port_vlan_add = ksz_port_vlan_add,
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 764ada3a0f42..0d9520dc6d2d 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -65,6 +65,7 @@ struct ksz_chip_data {
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
+ bool learning;
int stp_state;
struct phy_device phydev;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a4c6eb9a52d0..83dca9179aa0 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 859196898a7d..aadb0bd7c24f 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -610,6 +610,9 @@ static int felix_change_tag_protocol(struct dsa_switch *ds,
old_proto_ops = felix->tag_proto_ops;
+ if (proto_ops == old_proto_ops)
+ return 0;
+
err = proto_ops->setup(ds);
if (err)
goto setup_failed;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 61ed317602e7..1cdce8a98d1d 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -274,27 +274,98 @@ static const u32 vsc9959_rew_regmap[] = {
static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_UNICAST, 0x000204),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000208),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
REG(SYS_COUNT_TX_COLLISION, 0x000210),
REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_PAUSE, 0x000218),
REG(SYS_COUNT_TX_64, 0x00021c),
REG(SYS_COUNT_TX_65_127, 0x000220),
- REG(SYS_COUNT_TX_128_511, 0x000224),
- REG(SYS_COUNT_TX_512_1023, 0x000228),
- REG(SYS_COUNT_TX_1024_1526, 0x00022c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000230),
+ REG(SYS_COUNT_TX_128_255, 0x000224),
+ REG(SYS_COUNT_TX_256_511, 0x000228),
+ REG(SYS_COUNT_TX_512_1023, 0x00022c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000230),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000234),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000400),
+ REG(SYS_COUNT_DROP_TAIL, 0x000404),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -547,100 +618,379 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
-static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x80, .name = "tx_octets", },
- { .offset = 0x81, .name = "tx_unicast", },
- { .offset = 0x82, .name = "tx_multicast", },
- { .offset = 0x83, .name = "tx_broadcast", },
- { .offset = 0x84, .name = "tx_collision", },
- { .offset = 0x85, .name = "tx_drops", },
- { .offset = 0x86, .name = "tx_pause", },
- { .offset = 0x87, .name = "tx_frames_below_65_octets", },
- { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x89, .name = "tx_frames_128_255_octets", },
- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
- { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x8E, .name = "tx_yellow_prio_0", },
- { .offset = 0x8F, .name = "tx_yellow_prio_1", },
- { .offset = 0x90, .name = "tx_yellow_prio_2", },
- { .offset = 0x91, .name = "tx_yellow_prio_3", },
- { .offset = 0x92, .name = "tx_yellow_prio_4", },
- { .offset = 0x93, .name = "tx_yellow_prio_5", },
- { .offset = 0x94, .name = "tx_yellow_prio_6", },
- { .offset = 0x95, .name = "tx_yellow_prio_7", },
- { .offset = 0x96, .name = "tx_green_prio_0", },
- { .offset = 0x97, .name = "tx_green_prio_1", },
- { .offset = 0x98, .name = "tx_green_prio_2", },
- { .offset = 0x99, .name = "tx_green_prio_3", },
- { .offset = 0x9A, .name = "tx_green_prio_4", },
- { .offset = 0x9B, .name = "tx_green_prio_5", },
- { .offset = 0x9C, .name = "tx_green_prio_6", },
- { .offset = 0x9D, .name = "tx_green_prio_7", },
- { .offset = 0x9E, .name = "tx_aged", },
- { .offset = 0x100, .name = "drop_local", },
- { .offset = 0x101, .name = "drop_tail", },
- { .offset = 0x102, .name = "drop_yellow_prio_0", },
- { .offset = 0x103, .name = "drop_yellow_prio_1", },
- { .offset = 0x104, .name = "drop_yellow_prio_2", },
- { .offset = 0x105, .name = "drop_yellow_prio_3", },
- { .offset = 0x106, .name = "drop_yellow_prio_4", },
- { .offset = 0x107, .name = "drop_yellow_prio_5", },
- { .offset = 0x108, .name = "drop_yellow_prio_6", },
- { .offset = 0x109, .name = "drop_yellow_prio_7", },
- { .offset = 0x10A, .name = "drop_green_prio_0", },
- { .offset = 0x10B, .name = "drop_green_prio_1", },
- { .offset = 0x10C, .name = "drop_green_prio_2", },
- { .offset = 0x10D, .name = "drop_green_prio_3", },
- { .offset = 0x10E, .name = "drop_green_prio_4", },
- { .offset = 0x10F, .name = "drop_green_prio_5", },
- { .offset = 0x110, .name = "drop_green_prio_6", },
- { .offset = 0x111, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -1137,6 +1487,7 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
{
struct tc_taprio_sched_entry *entry;
u64 gate_len[OCELOT_NUM_TC];
+ u8 gates_ever_opened = 0;
int tc, i, n;
/* Initialize arrays */
@@ -1164,16 +1515,28 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
if (entry->gate_mask & BIT(tc)) {
gate_len[tc] += entry->interval;
+ gates_ever_opened |= BIT(tc);
} else {
/* Gate closes now, record a potential new
* minimum and reinitialize length
*/
- if (min_gate_len[tc] > gate_len[tc])
+ if (min_gate_len[tc] > gate_len[tc] &&
+ gate_len[tc])
min_gate_len[tc] = gate_len[tc];
gate_len[tc] = 0;
}
}
}
+
+ /* min_gate_len[tc] actually tracks minimum *open* gate time, so for
+ * permanently closed gates, min_gate_len[tc] will still be U64_MAX.
+ * Therefore they are currently indistinguishable from permanently
+ * open gates. Overwrite the gate len with 0 when we know they're
+ * actually permanently closed, i.e. after the loop above.
+ */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (!(gates_ever_opened & BIT(tc)))
+ min_gate_len[tc] = 0;
}
/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
@@ -2153,7 +2516,7 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
struct felix_stream_filter_counters *counters)
{
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
SYS_STAT_CFG_STAT_VIEW_M,
@@ -2170,7 +2533,7 @@ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
SYS_STAT_CFG);
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
}
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index ea0649211356..b34f4cdfe814 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -270,27 +270,98 @@ static const u32 vsc9953_rew_regmap[] = {
static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000318),
REG_RESERVED(SYS_SR_ETYPE_CFG),
REG(SYS_VLAN_ETYPE_CFG, 0x000320),
@@ -543,101 +614,379 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
-static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x40, .name = "tx_octets", },
- { .offset = 0x41, .name = "tx_unicast", },
- { .offset = 0x42, .name = "tx_multicast", },
- { .offset = 0x43, .name = "tx_broadcast", },
- { .offset = 0x44, .name = "tx_collision", },
- { .offset = 0x45, .name = "tx_drops", },
- { .offset = 0x46, .name = "tx_pause", },
- { .offset = 0x47, .name = "tx_frames_below_65_octets", },
- { .offset = 0x48, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x49, .name = "tx_frames_128_255_octets", },
- { .offset = 0x4A, .name = "tx_frames_256_511_octets", },
- { .offset = 0x4B, .name = "tx_frames_512_1023_octets", },
- { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x4D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x4E, .name = "tx_yellow_prio_0", },
- { .offset = 0x4F, .name = "tx_yellow_prio_1", },
- { .offset = 0x50, .name = "tx_yellow_prio_2", },
- { .offset = 0x51, .name = "tx_yellow_prio_3", },
- { .offset = 0x52, .name = "tx_yellow_prio_4", },
- { .offset = 0x53, .name = "tx_yellow_prio_5", },
- { .offset = 0x54, .name = "tx_yellow_prio_6", },
- { .offset = 0x55, .name = "tx_yellow_prio_7", },
- { .offset = 0x56, .name = "tx_green_prio_0", },
- { .offset = 0x57, .name = "tx_green_prio_1", },
- { .offset = 0x58, .name = "tx_green_prio_2", },
- { .offset = 0x59, .name = "tx_green_prio_3", },
- { .offset = 0x5A, .name = "tx_green_prio_4", },
- { .offset = 0x5B, .name = "tx_green_prio_5", },
- { .offset = 0x5C, .name = "tx_green_prio_6", },
- { .offset = 0x5D, .name = "tx_green_prio_7", },
- { .offset = 0x5E, .name = "tx_aged", },
- { .offset = 0x80, .name = "drop_local", },
- { .offset = 0x81, .name = "drop_tail", },
- { .offset = 0x82, .name = "drop_yellow_prio_0", },
- { .offset = 0x83, .name = "drop_yellow_prio_1", },
- { .offset = 0x84, .name = "drop_yellow_prio_2", },
- { .offset = 0x85, .name = "drop_yellow_prio_3", },
- { .offset = 0x86, .name = "drop_yellow_prio_4", },
- { .offset = 0x87, .name = "drop_yellow_prio_5", },
- { .offset = 0x88, .name = "drop_yellow_prio_6", },
- { .offset = 0x89, .name = "drop_yellow_prio_7", },
- { .offset = 0x8A, .name = "drop_green_prio_0", },
- { .offset = 0x8B, .name = "drop_green_prio_1", },
- { .offset = 0x8C, .name = "drop_green_prio_2", },
- { .offset = 0x8D, .name = "drop_green_prio_3", },
- { .offset = 0x8E, .name = "drop_green_prio_4", },
- { .offset = 0x8F, .name = "drop_green_prio_5", },
- { .offset = 0x90, .name = "drop_green_prio_6", },
- { .offset = 0x91, .name = "drop_green_prio_7", },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 0569ff066634..10c6fea1227f 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -93,7 +93,7 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
- while (i-- >= 0)
+ while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
return PTR_ERR(region);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index e11cc29d3264..06508eebb585 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -265,12 +265,10 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, polling_timer);
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_isr(i, (void *)aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_isr(i, (void *)self->aq_vec[i]);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
@@ -1014,7 +1012,6 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
@@ -1064,11 +1061,11 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data += i;
for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
- for (i = 0U, aq_vec = self->aq_vec[0];
- aq_vec && self->aq_vecs > i;
- ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ if (!self->aq_vec[i])
+ break;
data += count;
- count = aq_vec_get_sw_stats(aq_vec, tc, data);
+ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
}
}
@@ -1382,7 +1379,6 @@ int aq_nic_set_loopback(struct aq_nic_s *self)
int aq_nic_stop(struct aq_nic_s *self)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
netif_tx_disable(self->ndev);
@@ -1400,9 +1396,8 @@ int aq_nic_stop(struct aq_nic_s *self)
aq_ptp_irq_free(self);
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_stop(aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_stop(self->aq_vec[i]);
aq_ptp_ring_stop(self);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 2dfc1e32bbb3..93580484a3f4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -189,8 +189,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
}
slot->skb = skb;
- ring->end += nr_frags + 1;
netdev_sent_queue(net_dev, skb->len);
+ ring->end += nr_frags + 1;
wmb();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 7071604f9984..02808513ffe4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13844,7 +13844,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* Since some switches tend to reinit the AN process and clear the
- * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ba0f1ffac507..f46eefb5a029 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -11178,10 +11178,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
- if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
- features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
-
- if (!(bp->flags & BNXT_FLAG_TPA))
+ if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
if (!(features & NETIF_F_GRO))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 075c6206325c..b1b17f911300 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2130,6 +2130,7 @@ struct bnxt {
#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
+ u8 xdp_has_frags;
struct bnxt_ptp_cfg *ptp_cfg;
u8 ptp_all_rx_tstamp;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 14df8cfc2946..a36803e79e92 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,7 +21,6 @@
#include "bnxt_ptp.h"
#include "bnxt_coredump.h"
#include "bnxt_nvm_defs.h"
-#include "bnxt_ethtool.h"
static void __bnxt_fw_recover(struct bnxt *bp)
{
@@ -1307,6 +1306,7 @@ int bnxt_dl_register(struct bnxt *bp)
if (rc)
goto err_dl_port_unreg;
+ devlink_set_features(dl, DEVLINK_F_RELOAD);
out:
devlink_register(dl);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 730febd19330..a4cba7cb2783 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -623,7 +623,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f53387ed0167..c3065ec0a479 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -181,6 +181,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp)
{
struct bnxt_sw_rx_bd *rx_buf;
+ u32 buflen = PAGE_SIZE;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 offset;
@@ -192,7 +193,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
- xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
+ if (bp->xdp_has_frags)
+ buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
+
+ xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
}
@@ -397,8 +401,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
}
- if (prog)
+ if (prog) {
tx_xdp = bp->rx_nr_rings;
+ bp->xdp_has_frags = prog->aux->xdp_has_frags;
+ }
tc = netdev_get_num_tc(dev);
if (!tc)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c888ddee1fc4..7ded559842e8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -393,6 +393,9 @@ int bcmgenet_mii_probe(struct net_device *dev)
if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_MAC_INTERRUPT;
+ /* Indicate that the MAC is responsible for PHY PM */
+ dev->phydev->mac_managed_pm = true;
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 84604aff53ce..89256b866840 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -243,7 +243,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
/*
* on rx, the iscsi pdu has to be < rx page size and the
- * the max rx data length programmed in TP
+ * max rx data length programmed in TP
*/
val = min(adapter->params.tp.rx_pg_size,
((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 26433a62d7f0..fed5f93bf620 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2;
__be64 opt0;
__be32 iss;
- __be32 rsvd[3];
+ __be32 rsvd;
};
struct cpl_act_open_req {
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index bfee0e4e54b1..da9973b711f4 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1932,6 +1932,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
struct sge_eth_txq *q;
struct adapter *adap;
@@ -1945,7 +1946,12 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != dev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't quit on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (unlikely(tls_netdev && tls_netdev != dev))
goto out;
tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index cb069a0af7b9..a5f7152a1716 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -340,14 +340,14 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
return 0;
}
-static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
+static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
int i;
for (i = 0; i < count; i++) {
- entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE];
+ entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
if (entry->len) {
if (i == 0)
@@ -395,7 +395,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
retval = tsnep_tx_map(skb, tx, count);
if (retval != 0) {
- tsnep_tx_unmap(tx, count);
+ tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
@@ -464,7 +464,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
if (skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
- tsnep_tx_unmap(tx, count);
+ tsnep_tx_unmap(tx, tx->read, count);
if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
@@ -1282,7 +1282,7 @@ MODULE_DEVICE_TABLE(of, tsnep_of_match);
static struct platform_driver tsnep_driver = {
.driver = {
.name = TSNEP,
- .of_match_table = of_match_ptr(tsnep_of_match),
+ .of_match_table = tsnep_of_match,
},
.probe = tsnep_probe,
.remove = tsnep_remove,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 45634579adb6..a770bab4d1ed 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2886,6 +2886,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
/* The Aquantia PHYs are capable of performing rate adaptation */
#define PHY_VEND_AQUANTIA 0x03a1b400
+#define PHY_VEND_AQUANTIA2 0x31c31c00
static int dpaa_phy_init(struct net_device *net_dev)
{
@@ -2893,6 +2894,7 @@ static int dpaa_phy_init(struct net_device *net_dev)
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
+ u32 phy_vendor;
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
@@ -2905,9 +2907,11 @@ static int dpaa_phy_init(struct net_device *net_dev)
return -ENODEV;
}
+ phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
/* Unless the PHY is capable of rate adaptation */
if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
- ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+ (phy_vendor != PHY_VEND_AQUANTIA &&
+ phy_vendor != PHY_VEND_AQUANTIA2)) {
/* remove any features not supported by the controller */
ethtool_convert_legacy_u32_to_link_mode(mask,
mac_dev->if_support);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index cd9ec80522e7..75d51572693d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1660,8 +1660,8 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
buf_array[i] = addr;
/* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev,
- page, DPAA2_ETH_RX_BUF_RAW_SIZE,
+ trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, priv->rx_buf_size,
bpid);
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ed7301b69169..0cebe4b63adb 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -634,6 +634,13 @@ struct fec_enet_private {
int pps_enable;
unsigned int next_counter;
+ struct {
+ struct timespec64 ts_phc;
+ u64 ns_sys;
+ u32 at_corr;
+ u8 at_inc_corr;
+ } ptp_saved_state;
+
u64 ethtool_stats[];
};
@@ -644,5 +651,8 @@ void fec_ptp_disable_hwts(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
+void fec_ptp_save_state(struct fec_enet_private *fep);
+int fec_ptp_restore_state(struct fec_enet_private *fep);
+
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e8e2aa1e7f01..b0d60f898249 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -285,8 +285,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */
-#define FEC_ECR_MAGICEN (1 << 2)
-#define FEC_ECR_SLEEP (1 << 3)
+#define FEC_ECR_RESET BIT(0)
+#define FEC_ECR_ETHEREN BIT(1)
+#define FEC_ECR_MAGICEN BIT(2)
+#define FEC_ECR_SLEEP BIT(3)
+#define FEC_ECR_EN1588 BIT(4)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -982,6 +985,9 @@ fec_restart(struct net_device *ndev)
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
+ struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
+
+ fec_ptp_save_state(fep);
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1135,7 +1141,7 @@ fec_restart(struct net_device *ndev)
}
if (fep->bufdesc_ex)
- ecntl |= (1 << 4);
+ ecntl |= FEC_ECR_EN1588;
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
fep->rgmii_txc_dly)
@@ -1156,6 +1162,14 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_start_cyclecounter(ndev);
+ /* Restart PPS if needed */
+ if (fep->pps_enable) {
+ /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
+ fep->pps_enable = 0;
+ fec_ptp_restore_state(fep);
+ fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
+ }
+
/* Enable interrupts we wish to service */
if (fep->link)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1206,6 +1220,8 @@ fec_stop(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
u32 val;
+ struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
+ u32 ecntl = 0;
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -1215,6 +1231,8 @@ fec_stop(struct net_device *ndev)
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
+ fec_ptp_save_state(fep);
+
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
@@ -1234,12 +1252,28 @@ fec_stop(struct net_device *ndev)
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ if (fep->bufdesc_ex)
+ ecntl |= FEC_ECR_EN1588;
+
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- writel(2, fep->hwp + FEC_ECNTRL);
+ ecntl |= FEC_ECR_ETHEREN;
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
+
+ writel(ecntl, fep->hwp + FEC_ECNTRL);
+
+ if (fep->bufdesc_ex)
+ fec_ptp_start_cyclecounter(ndev);
+
+ /* Restart PPS if needed */
+ if (fep->pps_enable) {
+ /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
+ fep->pps_enable = 0;
+ fec_ptp_restore_state(fep);
+ fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
+ }
}
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 5ddb769bdfb4..a7f4c3c29f3e 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -924,7 +924,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(&op->dev) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
prop = of_get_property(np, "current-speed", &prop_size);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index f85b5e81dfc1..95f778cce98c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -100,8 +100,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
dev_set_drvdata(dev, bus);
/* set MII speed */
- out_be32(&priv->regs->mii_speed,
- ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
+ out_be32(&priv->regs->mii_speed, ((mpc5xxx_get_bus_frequency(dev) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7d49c28215f3..c74d04f4b2fd 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -135,11 +135,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fep->cc.read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -637,7 +633,36 @@ void fec_ptp_stop(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ if (fep->pps_enable)
+ fec_ptp_enable_pps(fep, 0);
+
cancel_delayed_work_sync(&fep->time_keep);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
}
+
+void fec_ptp_save_state(struct fec_enet_private *fep)
+{
+ u32 atime_inc_corr;
+
+ fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
+ fep->ptp_saved_state.ns_sys = ktime_get_ns();
+
+ fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
+ atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
+ fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
+}
+
+int fec_ptp_restore_state(struct fec_enet_private *fep)
+{
+ u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+ u64 ns_sys;
+
+ writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
+ atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
+ writel(atime_inc, fep->hwp + FEC_ATIME_INC);
+
+ ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys;
+ timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys);
+ return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
+}
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 152f4d83765a..d37d7a19a759 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -102,7 +102,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
- int (*get_bus_freq)(struct device_node *);
+ int (*get_bus_freq)(struct device *);
int ret = -ENOMEM, clock, speed;
match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
@@ -136,7 +136,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
}
if (get_bus_freq) {
- clock = get_bus_freq(ofdev->dev.of_node);
+ clock = get_bus_freq(&ofdev->dev);
if (!clock) {
/* Use maximum divider if clock is unknown */
dev_warn(&ofdev->dev, "could not determine IPS clock\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 156e92c43780..e9cd0fa6a0d2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -4485,7 +4485,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
(struct in6_addr *)&ipv6_full_mask))
new_mask |= I40E_L3_V6_DST_MASK;
else if (ipv6_addr_any((struct in6_addr *)
- &usr_ip6_spec->ip6src))
+ &usr_ip6_spec->ip6dst))
new_mask &= ~I40E_L3_V6_DST_MASK;
else
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b36bf9c3e1e4..9f1d5de7bf16 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -384,7 +384,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f6ba97a0166e..d4226161a3ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3203,11 +3203,13 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
protocol = vlan_get_protocol(skb);
- if (eth_p_mpls(protocol))
+ if (eth_p_mpls(protocol)) {
ip.hdr = skb_inner_network_header(skb);
- else
+ l4.hdr = skb_checksum_start(skb);
+ } else {
ip.hdr = skb_network_header(skb);
- l4.hdr = skb_checksum_start(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* set the tx_flags to indicate the IP protocol type. this is
* required so that checksum header computation below is accurate.
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index cd4e6a22d0f9..9ffbd24d83cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
+init_free_asq_bufs:
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
@@ -383,6 +389,7 @@ init_adminq_exit:
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
+init_free_arq_bufs:
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 45d097a164ad..f39440ad5c50 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2367,7 +2367,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
err = iavf_get_vf_config(adapter);
if (err == -EALREADY) {
err = iavf_send_vf_config_msg(adapter);
- goto err_alloc;
+ goto err;
} else if (err == -EINVAL) {
/* We only get -EINVAL if the device is in a very bad
* state or if we've been disabled for previous bad
@@ -3086,12 +3086,15 @@ continue_reset:
return;
reset_err:
+ if (running) {
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+ iavf_free_traffic_irqs(adapter);
+ }
+ iavf_disable_vf(adapter);
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running)
- iavf_change_state(adapter, __IAVF_RUNNING);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- iavf_close(netdev);
}
/**
@@ -4085,8 +4088,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock))
+ while (!mutex_trylock(&adapter->crit_lock)) {
+ /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
+ * is already taken and iavf_open is called from an upper
+ * device's notifier reacting on NETDEV_REGISTER event.
+ * We have to leave here to avoid dead lock.
+ */
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ return -EBUSY;
+
usleep_range(500, 1000);
+ }
if (adapter->state != __IAVF_DOWN) {
err = -EBUSY;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index cc5b85afd437..841fa149c407 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -684,8 +684,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise.
+ * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
+ * present, NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
@@ -699,23 +699,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
}
/**
- * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: Tx ring to use
+ * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ * Sets XSK buff pool pointer on XDP ring.
+ *
+ * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
+ * queue id. Reason for doing so is that queue vectors might have assigned more
+ * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
+ * carries a pointer to one of these XDP rings for its own purposes, such as
+ * handling XDP_TX action, therefore we can piggyback here on the
+ * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
*/
-static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
{
- struct ice_vsi *vsi = ring->vsi;
- u16 qid;
+ struct ice_tx_ring *ring;
- qid = ring->q_index - vsi->alloc_txq;
+ ring = vsi->rx_rings[qid]->xdp_ring;
+ if (!ring)
+ return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+ ring->xsk_pool = NULL;
+ return;
+ }
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 85a94483c2ed..40e678cfb507 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -62,7 +62,7 @@ ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -86,7 +86,7 @@ ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
int result;
result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i (rc=%d)\n",
vsi->vsi_num, result);
@@ -109,7 +109,7 @@ ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
@@ -132,7 +132,7 @@ ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
int result;
result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
- if (result)
+ if (result && result != -EEXIST)
dev_err(ice_pf_to_dev(pf),
"Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a830f7f9aed0..0c4ec9264071 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1986,8 +1986,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- ice_for_each_xdp_txq(vsi, i)
- vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
return ret;
}
@@ -3181,7 +3181,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
pf = vsi->back;
vtype = vsi->type;
- if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf)
+ if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
ice_vsi_init_vlan_ops(vsi);
@@ -4062,7 +4062,11 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
if (err && err != -EEXIST)
return err;
- return 0;
+ /* when deleting the last VLAN filter, make sure to disable the VLAN
+ * promisc mode so the filter isn't left by accident
+ */
+ return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index eb40526ee179..173fe6c31341 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -267,8 +267,10 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
promisc_m, 0);
}
+ if (status && status != -EEXIST)
+ return status;
- return status;
+ return 0;
}
/**
@@ -2579,7 +2581,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
spin_lock_init(&xdp_ring->tx_lock);
for (j = 0; j < xdp_ring->count; j++) {
tx_desc = ICE_TX_DESC(xdp_ring, j);
@@ -2587,13 +2588,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
}
}
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key))
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- else
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
- }
-
return 0;
free_xdp_rings:
@@ -2683,6 +2677,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
xdp_rings_rem -= xdp_rings_per_v;
}
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key)) {
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ } else {
+ struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx) {
+ if (ice_ring_is_xdp(ring)) {
+ vsi->rx_rings[i]->xdp_ring = ring;
+ break;
+ }
+ }
+ }
+ ice_tx_xsk_pool(vsi, i);
+ }
+
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
@@ -3573,6 +3584,14 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
usleep_range(1000, 2000);
+ ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+ if (ret) {
+ netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ }
+
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Make sure VLAN delete is successful before updating VLAN
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index fce204693dbb..3808034f7e7e 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -4445,6 +4445,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
goto free_fltr_list;
list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ /* Avoid enabling or disabling VLAN zero twice when in double
+ * VLAN mode
+ */
+ if (ice_is_dvm_ena(hw) &&
+ list_itr->fltr_info.l_data.vlan.tpid == 0)
+ continue;
+
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = ice_clear_vsi_promisc(hw, vsi_handle,
@@ -4452,7 +4459,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
- if (status)
+ if (status && status != -EEXIST)
break;
}
@@ -4971,7 +4978,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
- bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
+ bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
/* For each profile we are going to associate the recipe with, add the
* recipes that are associated with that profile. This will give us
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 8fd7c3e37f5e..0abeed092de1 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -571,8 +571,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
- if (WARN_ON(!vsi))
+ if (!vsi) {
+ dev_dbg(dev, "VF is already removed\n");
return -EINVAL;
+ }
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
ice_vsi_stop_all_rx_rings(vsi);
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
@@ -762,13 +764,16 @@ static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops;
- int err;
+ int err = 0;
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- err = vlan_ops->ena_tx_filtering(vsi);
- if (err)
- return err;
+ /* Allow VF with VLAN 0 only to send all tagged traffic */
+ if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vlan_ops->ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
return ice_cfg_mac_antispoof(vsi, true);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 094e3c97a1ea..2b4c791b6cba 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2288,6 +2288,15 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Enable VLAN filtering on first non-zero VLAN */
if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+ if (vf->spoofchk) {
+ status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
+ }
if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
@@ -2333,8 +2342,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
/* Disable VLAN filtering when only VLAN 0 is left */
- if (!ice_vsi_has_non_zero_vlans(vsi))
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ vsi->inner_vlan_ops.dis_tx_filtering(vsi);
vsi->inner_vlan_ops.dis_rx_filtering(vsi);
+ }
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
@@ -2838,6 +2849,13 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (vlan_promisc)
ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2853,8 +2871,17 @@ ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
- ice_vf_dis_vlan_promisc(vsi, &vlan);
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
}
}
@@ -2931,6 +2958,13 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
if (err)
return err;
}
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
vc_vlan = &vlan_fltr->inner;
@@ -2946,10 +2980,19 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
/* no support for VLAN promiscuous on inner VLAN unless
* we are in Single VLAN Mode (SVM)
*/
- if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
- if (err)
- return err;
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid) {
+ err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
}
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 49ba8bfdbf04..e48e29258450 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -329,6 +329,12 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
+ if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
+ netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
+ pool_failure = -EINVAL;
+ goto failure;
+ }
+
if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
!is_power_of_2(vsi->tx_rings[qid]->count)) {
netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
@@ -353,7 +359,7 @@ xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
if (!ret && pool_present)
- napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
@@ -944,13 +950,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
if (!ice_is_xdp_ena_vsi(vsi))
return -EINVAL;
- if (queue_id >= vsi->num_txq)
+ if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
return -EINVAL;
- if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -EINVAL;
+ ring = vsi->rx_rings[queue_id]->xdp_ring;
- ring = vsi->xdp_rings[queue_id];
+ if (!ring->xsk_pool)
+ return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
* it will run again. If not, trigger an interrupt and
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2d3daf022651..015b78144114 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -664,6 +664,8 @@ struct igb_adapter {
struct igb_mac_addr *mac_table;
struct vf_mac_filter vf_macs;
struct vf_mac_filter *vf_mac_list;
+ /* lock for VF resources */
+ spinlock_t vfs_lock;
};
/* flags controlling PTP/1588 function */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d8b836a85cc3..2796e81d2726 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3637,6 +3637,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
@@ -3649,12 +3650,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
msleep(500);
}
-
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
kfree(adapter->vf_mac_list);
adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
wrfl();
msleep(100);
@@ -3814,7 +3816,9 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
+ rtnl_lock();
igb_disable_sriov(pdev);
+ rtnl_unlock();
#endif
unregister_netdev(netdev);
@@ -3974,6 +3978,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
+
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -7958,8 +7965,10 @@ unlock:
static void igb_msg_task(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
@@ -7973,6 +7982,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9f06896a049b..f8605f57bd06 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1214,7 +1214,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
struct cyclecounter cc;
unsigned long flags;
u32 incval = 0;
- u32 tsauxc = 0;
u32 fuse0 = 0;
/* For some of the boards below this mask is technically incorrect.
@@ -1249,18 +1248,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
-
- /* enable SYSTIME counter */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
-
- IXGBE_WRITE_FLUSH(hw);
break;
case ixgbe_mac_X540:
cc.read = ixgbe_ptp_read_82599;
@@ -1293,6 +1280,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
+ * @adapter: the ixgbe private board structure
+ *
+ * Initialize and start the SYSTIME registers.
+ */
+static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 tsauxc;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ case ixgbe_mac_X550:
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+
+ /* Reset interrupt settings */
+ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+
+ /* Activate the SYSTIME counter */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+ break;
+ default:
+ /* Other devices aren't supported */
+ return;
+ };
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
@@ -1318,6 +1349,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
ixgbe_ptp_start_cyclecounter(adapter);
+ ixgbe_ptp_init_systime(adapter);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 5edb68a8aab1..57f27cc7724e 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -193,6 +193,7 @@ static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int
ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
if (!ch->rx_buff[ch->dma.desc]) {
+ ch->rx_buff[ch->dma.desc] = buf;
ret = -ENOMEM;
goto skip;
}
@@ -239,6 +240,12 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
}
skb = build_skb(buf, priv->rx_skb_size);
+ if (!skb) {
+ skb_free_frag(buf);
+ net_dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
@@ -288,7 +295,7 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
continue;
if (ret != XRX200_DMA_PACKET_COMPLETE)
- return ret;
+ break;
rx++;
} else {
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 6809b8b4c556..7282a826d81e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2580,6 +2580,12 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
+ * entries, check and free the MCAM entries explicitly to avoid leak.
+ * Since LF is detached use LF number as -1.
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+
mutex_unlock(&rvu->flr_lock);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 583ead4dd246..1e348fd0d930 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -1097,6 +1097,9 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
/* Delete multicast and promisc MCAM entries */
@@ -1136,6 +1139,9 @@ bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bo
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
/* Enables only broadcast match entry. Promisc/Allmulti are enabled
* in set_rx_mode mbox handler.
*/
@@ -1675,7 +1681,7 @@ static void npc_load_kpu_profile(struct rvu *rvu)
* Firmware database method.
* Default KPU profile.
*/
- if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) {
dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
kpu_profile);
rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
@@ -1939,6 +1945,7 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
{
+ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
u64 nibble_ena, rx_kex, tx_kex;
@@ -1951,15 +1958,15 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
mcam->counters.max--;
mcam->rx_miss_act_cntr = mcam->counters.max;
- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
+ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
if (nibble_ena) {
tx_kex &= ~NPC_PARSE_NIBBLE;
tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
- npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex;
+ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
}
/* Configure RX interfaces */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index a400aa22da79..7c4e1acd0f77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -467,7 +467,8 @@ do { \
NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
- NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* SMAC follows the DMAC(which is 6 bytes) */
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
/* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index fb8db5888d2f..d686c7b6252f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -632,6 +632,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
@@ -641,11 +647,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
- req->num_regs++;
- req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
-
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
* For VF this is always ignored.
@@ -1591,6 +1598,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
+
+ pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
}
EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e795f9ee76dd..b28029cc4316 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -195,6 +195,7 @@ struct otx2_hw {
u16 sqb_size;
/* NIX */
+ u8 txschq_link_cfg_lvl;
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d9426b01f462..5ace4609de47 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1732,7 +1732,7 @@ static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
case XDP_TX: {
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
- if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
act = XDP_DROP;
break;
@@ -1891,10 +1891,19 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd3;
- else
+ } else {
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd4;
+ }
if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1902,16 +1911,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
- if (hash != MTK_RXD4_FOE_ENTRY) {
- hash = jhash_1word(hash, 0);
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
- }
-
reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe, skb,
- trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
+ mtk_ppe_check_skb(eth->ppe, skb, hash);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 7405c97cda66..ecf85e9ed824 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -314,6 +314,11 @@
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_SPECIAL_TAG BIT(22)
+/* PDMA descriptor rxd5 */
+#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
+#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
+#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
+
#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 42c96c9d7fb1..dcb9eb1899ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -463,7 +463,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = min(
bitmap_weight(actv_ports.ports, dev->caps.num_ports),
- dev->caps.num_ports);
+ (unsigned int) dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = dev->caps.function_caps; /* set PF behaviours */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index 37522352e4b2..c8e5ca65bb6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -79,6 +79,10 @@ tc_act_police_offload(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
int err = 0;
+ err = mlx5e_policer_validate(&fl_act->action, act, fl_act->extack);
+ if (err)
+ return err;
+
err = fill_meter_params_from_act(act, &params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index d87bbb0be7c8..e6f64d890fb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -506,7 +506,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
int err;
attr.ttl = tun_key->ttl;
- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
attr.fl.fl6.saddr = tun_key->u.ipv6.src;
@@ -620,7 +620,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
attr.ttl = tun_key->ttl;
- attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(tun_key->tos, tun_key->label);
attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
attr.fl.fl6.saddr = tun_key->u.ipv6.src;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 6b6c7044b64a..3a1f76eac542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -246,7 +246,7 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
struct list_head *list, int size)
{
- struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_ktls_offload_context_tx *obj, *n;
struct mlx5e_async_ctx *bulk_async;
int i;
@@ -255,7 +255,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
return;
i = 0;
- list_for_each_entry(obj, list, list_node) {
+ list_for_each_entry_safe(obj, n, list, list_node) {
mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
i++;
}
@@ -808,6 +808,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
int datalen;
u32 seq;
@@ -819,7 +820,12 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
mlx5e_tx_mpwqe_ensure_complete(sq);
tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't WARN on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index e2a9b9be5c1f..e0ce5a233d0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1395,10 +1395,11 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
}
return fs;
-err_free_fs:
- kvfree(fs);
+
err_free_vlan:
mlx5e_fs_vlan_free(fs);
+err_free_fs:
+ kvfree(fs);
err:
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d858667736a3..02eb2f0fa2ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3682,7 +3682,9 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
- if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
+ int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
+ MLX5_TC_FLAG(NIC_OFFLOAD);
+ if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
@@ -4769,14 +4771,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* RQ */
mlx5e_build_rq_params(mdev, params);
- /* HW LRO */
- if (MLX5_CAP_ETH(mdev, lro_cap) &&
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- /* No XSK params: checking the availability of striding RQ in general. */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- params->packet_merge.type = slow_pci_heuristic(mdev) ?
- MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
- }
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 4c1599de652c..759f7d3c2cfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -662,6 +662,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ params->vlan_strip_disable = true;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -696,6 +698,13 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
mlx5e_build_rep_params(netdev);
mlx5e_timestamp_init(priv);
@@ -708,12 +717,21 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
+ return -ENOMEM;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
mlx5e_vxlan_set_netdev_info(priv);
- return mlx5e_init_rep(mdev, netdev);
+ mlx5e_build_rep_params(netdev);
+ mlx5e_timestamp_init(priv);
+ return 0;
}
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
@@ -836,13 +854,6 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
- if (!priv->fs) {
- netdev_err(priv->netdev, "FS allocation failed\n");
- return -ENOMEM;
- }
-
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index ed73132129aa..a9f4c652f859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -427,7 +427,8 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (mlx5_lag_mpesw_is_activated(esw->dev))
+ if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
+ mlx5_lag_mpesw_is_activated(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
@@ -3115,8 +3116,10 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
MLX5_VPORT_UC_ADDR_CHANGE);
- if (err)
+ if (err) {
+ devl_unlock(devlink);
return;
+ }
}
esw->esw_funcs.num_vfs = new_num_vfs;
devl_unlock(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 735dc805dad7..e735e19461ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -50,10 +50,12 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
- ft->max_fte = size ? roundup_pow_of_two(size) : 1;
+ int max_fte = ft_attr->max_fte;
+
+ ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1;
return 0;
}
@@ -258,7 +260,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
@@ -267,17 +269,19 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
+ unsigned int size;
int err;
- if (size != POOL_NEXT_SIZE)
- size = roundup_pow_of_two(size);
- size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
+ if (ft_attr->max_fte != POOL_NEXT_SIZE)
+ size = roundup_pow_of_two(ft_attr->max_fte);
+ size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
if (!size)
return -ENOSPC;
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 274004e80f03..8ef4254b9ea1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -38,7 +38,7 @@
struct mlx5_flow_cmds {
int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft);
int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f1b908d40fa6..e3960cdf5131 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1155,7 +1155,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
ft->ns = ns;
- err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
+ err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
if (err)
goto free_ft;
@@ -1195,6 +1195,12 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
}
EXPORT_SYMBOL(mlx5_create_flow_table);
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
+{
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_flow_table_id);
+
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr, u16 vport)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 0f34e3c80d1f..065102278cb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1067,30 +1067,32 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
+ unsigned long flags;
if (fn >= ldev->ports)
return;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
struct net_device *netdev)
{
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
for (i = 0; i < ldev->ports; i++) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
}
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
@@ -1234,7 +1236,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mlx5_ldev_add_netdev(ldev, dev, netdev);
for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
+ if (!ldev->pf[i].netdev)
break;
if (i >= ldev->ports)
@@ -1246,12 +1248,13 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1260,12 +1263,13 @@ EXPORT_SYMBOL(mlx5_lag_is_roce);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1274,13 +1278,14 @@ EXPORT_SYMBOL(mlx5_lag_is_active);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev) &&
dev == ldev->pf[MLX5_LAG_P1].dev;
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1289,12 +1294,13 @@ EXPORT_SYMBOL(mlx5_lag_is_master);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1303,13 +1309,14 @@ EXPORT_SYMBOL(mlx5_lag_is_sriov);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
bool res;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev) &&
test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return res;
}
@@ -1352,9 +1359,10 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
{
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
@@ -1373,7 +1381,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return ndev;
}
@@ -1383,10 +1391,11 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
struct mlx5_lag *ldev;
+ unsigned long flags;
u8 port = 0;
int i;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
@@ -1401,7 +1410,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
port = ldev->v2p_map[port * ldev->buckets];
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return port;
}
EXPORT_SYMBOL(mlx5_lag_get_slave_port);
@@ -1422,8 +1431,9 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_core_dev *peer_dev = NULL;
struct mlx5_lag *ldev;
+ unsigned long flags;
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!ldev)
goto unlock;
@@ -1433,7 +1443,7 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
ldev->pf[MLX5_LAG_P1].dev;
unlock:
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
return peer_dev;
}
EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
@@ -1446,6 +1456,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
struct mlx5_lag *ldev;
+ unsigned long flags;
int num_ports;
int ret, i, j;
void *out;
@@ -1462,7 +1473,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
- spin_lock(&lag_lock);
+ spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
@@ -1472,7 +1483,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
- spin_unlock(&lag_lock);
+ spin_unlock_irqrestore(&lag_lock, flags);
for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bec8d6d0b5f6..c085b031abfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1530,7 +1530,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
+ lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1597,6 +1599,7 @@ err_timeout_init:
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
return err;
}
@@ -1618,6 +1621,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
mutex_destroy(&dev->intf_state_mutex);
+ lockdep_unregister_key(&dev->lock_key);
}
static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index ec76a8b1acc1..60596357bfc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -376,8 +376,8 @@ retry:
goto out_dropped;
}
}
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
- err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_dropped;
@@ -524,10 +524,13 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
dev->priv.reclaim_pages_discard += npages;
}
/* if triggered by FW event and failed by FW then ignore */
- if (event && err == -EREMOTEIO)
+ if (event && err == -EREMOTEIO) {
err = 0;
+ goto out_free;
+ }
+
+ err = mlx5_cmd_check(dev, err, in, out);
if (err) {
- err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index ee2e1b7c1310..c0e6c487c63c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -159,11 +159,11 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
devl_lock(devlink);
err = mlx5_device_enable_sriov(dev, num_vfs);
+ devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
return err;
}
- devl_unlock(devlink);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 223c8741b7ae..16d65fe4f654 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -439,6 +439,7 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
+ MLX5_SET(create_flow_table_in, in, uid, attr->uid);
ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index e5f6412baea9..31d443dd8386 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -214,7 +214,7 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
tbl->table_type);
}
-static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
+static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl, u16 uid)
{
bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
@@ -236,6 +236,7 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
ft_attr.sw_owner = true;
ft_attr.decap_en = en_decap;
ft_attr.reformat_en = en_encap;
+ ft_attr.uid = uid;
ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr,
NULL, &tbl->table_id);
@@ -243,7 +244,8 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
return ret;
}
-struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u32 flags)
+struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level,
+ u32 flags, u16 uid)
{
struct mlx5dr_table *tbl;
int ret;
@@ -263,7 +265,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u
if (ret)
goto free_tbl;
- ret = dr_table_create_sw_owned_tbl(tbl);
+ ret = dr_table_create_sw_owned_tbl(tbl, uid);
if (ret)
goto uninit_tbl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index feed227944b7..062c7c74a1f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1217,6 +1217,7 @@ struct mlx5dr_cmd_query_flow_table_details {
struct mlx5dr_cmd_create_flow_table_attr {
u32 table_type;
+ u16 uid;
u64 icm_addr_rx;
u64 icm_addr_tx;
u8 level;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 75672663359d..13b6d4721e17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -62,7 +62,7 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int size,
+ struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
struct mlx5dr_table *tbl;
@@ -71,7 +71,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
- size,
+ ft_attr,
next_ft);
flags = ft->flags;
/* turn off encap/decap if not supported for sw-str by fw */
@@ -79,7 +79,8 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags);
+ tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags,
+ ft_attr->uid);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index ecffc2cbe6fe..226a0d7bb06d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -51,7 +51,8 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn);
struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
+ u16 uid);
struct mlx5dr_table *
mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index d9bf584234a6..bb1cd4bae82e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -328,7 +328,6 @@ static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
- struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
u8 last_module = max_ports;
int i;
int err;
@@ -357,7 +356,6 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
}
/* Create port objects for each valid entry */
- devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0) {
err = mlxsw_m_port_create(mlxsw_m,
@@ -367,7 +365,6 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
goto err_module_to_port_create;
}
}
- devl_unlock(devlink);
return 0;
@@ -377,7 +374,6 @@ err_module_to_port_create:
mlxsw_m_port_remove(mlxsw_m,
mlxsw_m->module_to_port[i]);
}
- devl_unlock(devlink);
i = max_ports;
err_module_to_port_map:
for (i--; i > 0; i--)
@@ -390,10 +386,8 @@ err_module_to_port_alloc:
static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
int i;
- devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0) {
mlxsw_m_port_remove(mlxsw_m,
@@ -401,7 +395,6 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
mlxsw_m_port_module_unmap(mlxsw_m, i);
}
}
- devl_unlock(devlink);
kfree(mlxsw_m->module_to_port);
kfree(mlxsw_m->ports);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1e240cdd9cbd..30c7b0e15721 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1897,9 +1897,9 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
- mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 2e0b704b8a31..7b01b9c20722 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -46,6 +46,7 @@ struct mlxsw_sp2_ptp_state {
* enabled.
*/
struct hwtstamp_config config;
+ struct mutex lock; /* Protects 'config' and HW configuration. */
};
struct mlxsw_sp1_ptp_key {
@@ -1374,6 +1375,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
goto err_ptp_traps_set;
refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ mutex_init(&ptp_state->lock);
return &ptp_state->common;
err_ptp_traps_set:
@@ -1388,6 +1390,7 @@ void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ mutex_destroy(&ptp_state->lock);
mlxsw_sp_ptp_traps_unset(mlxsw_sp);
kfree(ptp_state);
}
@@ -1461,7 +1464,10 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
*config = ptp_state->config;
+ mutex_unlock(&ptp_state->lock);
+
return 0;
}
@@ -1523,6 +1529,9 @@ mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
return -EINVAL;
}
+ if ((ing_types && !egr_types) || (!ing_types && egr_types))
+ return -EINVAL;
+
*p_ing_types = ing_types;
*p_egr_types = egr_types;
return 0;
@@ -1574,8 +1583,6 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
- ASSERT_RTNL();
-
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
@@ -1597,8 +1604,6 @@ static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
- ASSERT_RTNL();
-
ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
@@ -1618,16 +1623,20 @@ err_ptp_disable:
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config)
{
+ struct mlxsw_sp2_ptp_state *ptp_state;
enum hwtstamp_rx_filters rx_filter;
struct hwtstamp_config new_config;
u16 new_ing_types, new_egr_types;
bool ptp_enabled;
int err;
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+ mutex_lock(&ptp_state->lock);
+
err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
&new_egr_types, &rx_filter);
if (err)
- return err;
+ goto err_get_message_types;
new_config.flags = config->flags;
new_config.tx_type = config->tx_type;
@@ -1640,11 +1649,11 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
new_egr_types, new_config);
if (err)
- return err;
+ goto err_configure_port;
} else if (!new_ing_types && !new_egr_types && ptp_enabled) {
err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
if (err)
- return err;
+ goto err_deconfigure_port;
}
mlxsw_sp_port->ptp.ing_types = new_ing_types;
@@ -1652,8 +1661,15 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
/* Notify the ioctl caller what we are actually timestamping. */
config->rx_filter = rx_filter;
+ mutex_unlock(&ptp_state->lock);
return 0;
+
+err_deconfigure_port:
+err_configure_port:
+err_get_message_types:
+ mutex_unlock(&ptp_state->lock);
+ return err;
}
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 2d1628fdefc1..a8b88230959a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -171,10 +171,11 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
{
}
-int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
return -EOPNOTSUPP;
}
@@ -231,10 +232,11 @@ static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
-int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+static inline int
+mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1d6e3b641b2e..d928b75f3780 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -710,7 +710,7 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->xtr_irq);
lan966x->xtr_irq = -ENXIO;
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
disable_irq(lan966x->ana_irq);
lan966x->ana_irq = -ENXIO;
}
@@ -718,10 +718,10 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (lan966x->fdma)
devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
- if (lan966x->ptp_irq)
+ if (lan966x->ptp_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
- if (lan966x->ptp_ext_irq)
+ if (lan966x->ptp_ext_irq > 0)
devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
}
@@ -1049,7 +1049,7 @@ static int lan966x_probe(struct platform_device *pdev)
}
lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
- if (lan966x->ana_irq) {
+ if (lan966x->ana_irq > 0) {
err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
lan966x_ana_irq_handler, IRQF_ONESHOT,
"ana irq", lan966x);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index a3214a762e4b..9e57d23e57bf 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -62,9 +62,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
{
struct sockaddr *address = addr;
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
@@ -74,11 +71,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
static void moxart_mac_free_memory(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < RX_DESC_NUM; i++)
- dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
- priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
dma_free_coherent(&priv->pdev->dev,
@@ -147,11 +139,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
- priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
priv->rx_buf[i],
priv->rx_buf_size,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n");
moxart_desc_write(priv->rx_mapping[i],
@@ -172,9 +164,6 @@ static int moxart_mac_open(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EADDRNOTAVAIL;
-
napi_enable(&priv->napi);
moxart_mac_reset(ndev);
@@ -193,6 +182,7 @@ static int moxart_mac_open(struct net_device *ndev)
static int moxart_mac_stop(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
napi_disable(&priv->napi);
@@ -204,6 +194,11 @@ static int moxart_mac_stop(struct net_device *ndev)
/* disable all functions */
writel(0, priv->base + REG_MAC_CTRL);
+ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
return 0;
}
@@ -240,7 +235,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- dma_sync_single_for_cpu(&ndev->dev,
+ dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_mapping[rx_head],
priv->rx_buf_size, DMA_FROM_DEVICE);
skb = netdev_alloc_skb_ip_align(ndev, len);
@@ -294,7 +289,7 @@ static void moxart_tx_finished(struct net_device *ndev)
unsigned int tx_tail = priv->tx_tail;
while (tx_tail != tx_head) {
- dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
ndev->stats.tx_packets++;
@@ -358,9 +353,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
- priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n");
goto out_unlock;
}
@@ -379,7 +374,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = ETH_ZLEN;
}
- dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
priv->tx_buf_size, DMA_TO_DEVICE);
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
@@ -488,12 +483,19 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
ndev->base_addr = res->start;
+ ret = platform_get_ethdev_address(p_dev, ndev);
+ if (ret == -EPROBE_DEFER)
+ goto init_fail;
+ if (ret)
+ eth_hw_addr_random(ndev);
+ moxart_update_mac_address(ndev);
+
spin_lock_init(&priv->txlock);
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
- priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
+ priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -501,7 +503,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
+ priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index d4649e4ee0e7..306026e6aa11 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1860,16 +1860,20 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
- for (i = 0; i < ocelot->num_stats; i++)
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
ETH_GSTRING_LEN);
+ }
}
EXPORT_SYMBOL(ocelot_get_strings);
/* Caller must hold &ocelot->stats_lock */
static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
{
- unsigned int idx = port * ocelot->num_stats;
+ unsigned int idx = port * OCELOT_NUM_STATS;
struct ocelot_stats_region *region;
int err, j;
@@ -1877,9 +1881,8 @@ static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
list_for_each_entry(region, &ocelot->stats_regions, node) {
- err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
- region->offset, region->buf,
- region->count);
+ err = ocelot_bulk_read(ocelot, region->base, region->buf,
+ region->count);
if (err)
return err;
@@ -1906,13 +1909,13 @@ static void ocelot_check_stats_work(struct work_struct *work)
stats_work);
int i, err;
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
for (i = 0; i < ocelot->num_phys_ports; i++) {
err = ocelot_port_update_stats(ocelot, i);
if (err)
break;
}
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1925,16 +1928,22 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
int i, err;
- mutex_lock(&ocelot->stats_lock);
+ spin_lock(&ocelot->stats_lock);
/* check and update now */
err = ocelot_port_update_stats(ocelot, port);
- /* Copy all counters */
- for (i = 0; i < ocelot->num_stats; i++)
- *data++ = ocelot->stats[port * ocelot->num_stats + i];
+ /* Copy all supported counters */
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ int index = port * OCELOT_NUM_STATS + i;
+
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ *data++ = ocelot->stats[index];
+ }
- mutex_unlock(&ocelot->stats_lock);
+ spin_unlock(&ocelot->stats_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
@@ -1943,10 +1952,16 @@ EXPORT_SYMBOL(ocelot_get_ethtool_stats);
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
{
+ int i, num_stats = 0;
+
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return ocelot->num_stats;
+ for (i = 0; i < OCELOT_NUM_STATS; i++)
+ if (ocelot->stats_layout[i].name[0] != '\0')
+ num_stats++;
+
+ return num_stats;
}
EXPORT_SYMBOL(ocelot_get_sset_count);
@@ -1958,8 +1973,11 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->stats_regions);
- for (i = 0; i < ocelot->num_stats; i++) {
- if (region && ocelot->stats_layout[i].offset == last + 1) {
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ if (region && ocelot->stats_layout[i].reg == last + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -1967,12 +1985,12 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!region)
return -ENOMEM;
- region->offset = ocelot->stats_layout[i].offset;
+ region->base = ocelot->stats_layout[i].reg;
region->count = 1;
list_add_tail(&region->node, &ocelot->stats_regions);
}
- last = ocelot->stats_layout[i].offset;
+ last = ocelot->stats_layout[i].reg;
}
list_for_each_entry(region, &ocelot->stats_regions, node) {
@@ -3340,7 +3358,6 @@ static void ocelot_detect_features(struct ocelot *ocelot)
int ocelot_init(struct ocelot *ocelot)
{
- const struct ocelot_stat_layout *stat;
char queue_name[32];
int i, ret;
u32 port;
@@ -3353,17 +3370,13 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- ocelot->num_stats = 0;
- for_each_stat(ocelot, stat)
- ocelot->num_stats++;
-
ocelot->stats = devm_kcalloc(ocelot->dev,
- ocelot->num_phys_ports * ocelot->num_stats,
+ ocelot->num_phys_ports * OCELOT_NUM_STATS,
sizeof(u64), GFP_KERNEL);
if (!ocelot->stats)
return -ENOMEM;
- mutex_init(&ocelot->stats_lock);
+ spin_lock_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
@@ -3511,7 +3524,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
destroy_workqueue(ocelot->owq);
- mutex_destroy(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 5e6136e80282..330d30841cdc 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -725,37 +725,42 @@ static void ocelot_get_stats64(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
+ u64 *s;
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
- SYS_STAT_CFG);
+ spin_lock(&ocelot->stats_lock);
+
+ s = &ocelot->stats[port * OCELOT_NUM_STATS];
/* Get Rx stats */
- stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
- stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
- ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
- ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
- ocelot_read(ocelot, SYS_COUNT_RX_64) +
- ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
- ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
- ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
- stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
+ stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
+ stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
+ s[OCELOT_STAT_RX_FRAGMENTS] +
+ s[OCELOT_STAT_RX_JABBERS] +
+ s[OCELOT_STAT_RX_LONGS] +
+ s[OCELOT_STAT_RX_64] +
+ s[OCELOT_STAT_RX_65_127] +
+ s[OCELOT_STAT_RX_128_255] +
+ s[OCELOT_STAT_RX_256_511] +
+ s[OCELOT_STAT_RX_512_1023] +
+ s[OCELOT_STAT_RX_1024_1526] +
+ s[OCELOT_STAT_RX_1527_MAX];
+ stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
stats->rx_dropped = dev->stats.rx_dropped;
/* Get Tx stats */
- stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
- stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
- ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
- ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
- ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
- ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
- ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
- stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
- ocelot_read(ocelot, SYS_COUNT_TX_AGING);
- stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+ stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
+ stats->tx_packets = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
+ s[OCELOT_STAT_TX_AGED];
+ stats->collisions = s[OCELOT_STAT_TX_COLLISION];
+
+ spin_unlock(&ocelot->stats_lock);
}
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 961f803aca19..9c488953f541 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -96,101 +96,379 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
};
-static const struct ocelot_stat_layout ocelot_stats_layout[] = {
- { .name = "rx_octets", .offset = 0x00, },
- { .name = "rx_unicast", .offset = 0x01, },
- { .name = "rx_multicast", .offset = 0x02, },
- { .name = "rx_broadcast", .offset = 0x03, },
- { .name = "rx_shorts", .offset = 0x04, },
- { .name = "rx_fragments", .offset = 0x05, },
- { .name = "rx_jabbers", .offset = 0x06, },
- { .name = "rx_crc_align_errs", .offset = 0x07, },
- { .name = "rx_sym_errs", .offset = 0x08, },
- { .name = "rx_frames_below_65_octets", .offset = 0x09, },
- { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
- { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
- { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
- { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
- { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
- { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
- { .name = "rx_pause", .offset = 0x10, },
- { .name = "rx_control", .offset = 0x11, },
- { .name = "rx_longs", .offset = 0x12, },
- { .name = "rx_classified_drops", .offset = 0x13, },
- { .name = "rx_red_prio_0", .offset = 0x14, },
- { .name = "rx_red_prio_1", .offset = 0x15, },
- { .name = "rx_red_prio_2", .offset = 0x16, },
- { .name = "rx_red_prio_3", .offset = 0x17, },
- { .name = "rx_red_prio_4", .offset = 0x18, },
- { .name = "rx_red_prio_5", .offset = 0x19, },
- { .name = "rx_red_prio_6", .offset = 0x1A, },
- { .name = "rx_red_prio_7", .offset = 0x1B, },
- { .name = "rx_yellow_prio_0", .offset = 0x1C, },
- { .name = "rx_yellow_prio_1", .offset = 0x1D, },
- { .name = "rx_yellow_prio_2", .offset = 0x1E, },
- { .name = "rx_yellow_prio_3", .offset = 0x1F, },
- { .name = "rx_yellow_prio_4", .offset = 0x20, },
- { .name = "rx_yellow_prio_5", .offset = 0x21, },
- { .name = "rx_yellow_prio_6", .offset = 0x22, },
- { .name = "rx_yellow_prio_7", .offset = 0x23, },
- { .name = "rx_green_prio_0", .offset = 0x24, },
- { .name = "rx_green_prio_1", .offset = 0x25, },
- { .name = "rx_green_prio_2", .offset = 0x26, },
- { .name = "rx_green_prio_3", .offset = 0x27, },
- { .name = "rx_green_prio_4", .offset = 0x28, },
- { .name = "rx_green_prio_5", .offset = 0x29, },
- { .name = "rx_green_prio_6", .offset = 0x2A, },
- { .name = "rx_green_prio_7", .offset = 0x2B, },
- { .name = "tx_octets", .offset = 0x40, },
- { .name = "tx_unicast", .offset = 0x41, },
- { .name = "tx_multicast", .offset = 0x42, },
- { .name = "tx_broadcast", .offset = 0x43, },
- { .name = "tx_collision", .offset = 0x44, },
- { .name = "tx_drops", .offset = 0x45, },
- { .name = "tx_pause", .offset = 0x46, },
- { .name = "tx_frames_below_65_octets", .offset = 0x47, },
- { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
- { .name = "tx_frames_128_255_octets", .offset = 0x49, },
- { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
- { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
- { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
- { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
- { .name = "tx_yellow_prio_0", .offset = 0x4E, },
- { .name = "tx_yellow_prio_1", .offset = 0x4F, },
- { .name = "tx_yellow_prio_2", .offset = 0x50, },
- { .name = "tx_yellow_prio_3", .offset = 0x51, },
- { .name = "tx_yellow_prio_4", .offset = 0x52, },
- { .name = "tx_yellow_prio_5", .offset = 0x53, },
- { .name = "tx_yellow_prio_6", .offset = 0x54, },
- { .name = "tx_yellow_prio_7", .offset = 0x55, },
- { .name = "tx_green_prio_0", .offset = 0x56, },
- { .name = "tx_green_prio_1", .offset = 0x57, },
- { .name = "tx_green_prio_2", .offset = 0x58, },
- { .name = "tx_green_prio_3", .offset = 0x59, },
- { .name = "tx_green_prio_4", .offset = 0x5A, },
- { .name = "tx_green_prio_5", .offset = 0x5B, },
- { .name = "tx_green_prio_6", .offset = 0x5C, },
- { .name = "tx_green_prio_7", .offset = 0x5D, },
- { .name = "tx_aged", .offset = 0x5E, },
- { .name = "drop_local", .offset = 0x80, },
- { .name = "drop_tail", .offset = 0x81, },
- { .name = "drop_yellow_prio_0", .offset = 0x82, },
- { .name = "drop_yellow_prio_1", .offset = 0x83, },
- { .name = "drop_yellow_prio_2", .offset = 0x84, },
- { .name = "drop_yellow_prio_3", .offset = 0x85, },
- { .name = "drop_yellow_prio_4", .offset = 0x86, },
- { .name = "drop_yellow_prio_5", .offset = 0x87, },
- { .name = "drop_yellow_prio_6", .offset = 0x88, },
- { .name = "drop_yellow_prio_7", .offset = 0x89, },
- { .name = "drop_green_prio_0", .offset = 0x8A, },
- { .name = "drop_green_prio_1", .offset = 0x8B, },
- { .name = "drop_green_prio_2", .offset = 0x8C, },
- { .name = "drop_green_prio_3", .offset = 0x8D, },
- { .name = "drop_green_prio_4", .offset = 0x8E, },
- { .name = "drop_green_prio_5", .offset = 0x8F, },
- { .name = "drop_green_prio_6", .offset = 0x90, },
- { .name = "drop_green_prio_7", .offset = 0x91, },
- OCELOT_STAT_END
+static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
+ [OCELOT_STAT_RX_OCTETS] = {
+ .name = "rx_octets",
+ .reg = SYS_COUNT_RX_OCTETS,
+ },
+ [OCELOT_STAT_RX_UNICAST] = {
+ .name = "rx_unicast",
+ .reg = SYS_COUNT_RX_UNICAST,
+ },
+ [OCELOT_STAT_RX_MULTICAST] = {
+ .name = "rx_multicast",
+ .reg = SYS_COUNT_RX_MULTICAST,
+ },
+ [OCELOT_STAT_RX_BROADCAST] = {
+ .name = "rx_broadcast",
+ .reg = SYS_COUNT_RX_BROADCAST,
+ },
+ [OCELOT_STAT_RX_SHORTS] = {
+ .name = "rx_shorts",
+ .reg = SYS_COUNT_RX_SHORTS,
+ },
+ [OCELOT_STAT_RX_FRAGMENTS] = {
+ .name = "rx_fragments",
+ .reg = SYS_COUNT_RX_FRAGMENTS,
+ },
+ [OCELOT_STAT_RX_JABBERS] = {
+ .name = "rx_jabbers",
+ .reg = SYS_COUNT_RX_JABBERS,
+ },
+ [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
+ .name = "rx_crc_align_errs",
+ .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ },
+ [OCELOT_STAT_RX_SYM_ERRS] = {
+ .name = "rx_sym_errs",
+ .reg = SYS_COUNT_RX_SYM_ERRS,
+ },
+ [OCELOT_STAT_RX_64] = {
+ .name = "rx_frames_below_65_octets",
+ .reg = SYS_COUNT_RX_64,
+ },
+ [OCELOT_STAT_RX_65_127] = {
+ .name = "rx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_RX_65_127,
+ },
+ [OCELOT_STAT_RX_128_255] = {
+ .name = "rx_frames_128_to_255_octets",
+ .reg = SYS_COUNT_RX_128_255,
+ },
+ [OCELOT_STAT_RX_256_511] = {
+ .name = "rx_frames_256_to_511_octets",
+ .reg = SYS_COUNT_RX_256_511,
+ },
+ [OCELOT_STAT_RX_512_1023] = {
+ .name = "rx_frames_512_to_1023_octets",
+ .reg = SYS_COUNT_RX_512_1023,
+ },
+ [OCELOT_STAT_RX_1024_1526] = {
+ .name = "rx_frames_1024_to_1526_octets",
+ .reg = SYS_COUNT_RX_1024_1526,
+ },
+ [OCELOT_STAT_RX_1527_MAX] = {
+ .name = "rx_frames_over_1526_octets",
+ .reg = SYS_COUNT_RX_1527_MAX,
+ },
+ [OCELOT_STAT_RX_PAUSE] = {
+ .name = "rx_pause",
+ .reg = SYS_COUNT_RX_PAUSE,
+ },
+ [OCELOT_STAT_RX_CONTROL] = {
+ .name = "rx_control",
+ .reg = SYS_COUNT_RX_CONTROL,
+ },
+ [OCELOT_STAT_RX_LONGS] = {
+ .name = "rx_longs",
+ .reg = SYS_COUNT_RX_LONGS,
+ },
+ [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
+ .name = "rx_classified_drops",
+ .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_0] = {
+ .name = "rx_red_prio_0",
+ .reg = SYS_COUNT_RX_RED_PRIO_0,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_1] = {
+ .name = "rx_red_prio_1",
+ .reg = SYS_COUNT_RX_RED_PRIO_1,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_2] = {
+ .name = "rx_red_prio_2",
+ .reg = SYS_COUNT_RX_RED_PRIO_2,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_3] = {
+ .name = "rx_red_prio_3",
+ .reg = SYS_COUNT_RX_RED_PRIO_3,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_4] = {
+ .name = "rx_red_prio_4",
+ .reg = SYS_COUNT_RX_RED_PRIO_4,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_5] = {
+ .name = "rx_red_prio_5",
+ .reg = SYS_COUNT_RX_RED_PRIO_5,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_6] = {
+ .name = "rx_red_prio_6",
+ .reg = SYS_COUNT_RX_RED_PRIO_6,
+ },
+ [OCELOT_STAT_RX_RED_PRIO_7] = {
+ .name = "rx_red_prio_7",
+ .reg = SYS_COUNT_RX_RED_PRIO_7,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
+ .name = "rx_yellow_prio_0",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
+ .name = "rx_yellow_prio_1",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
+ .name = "rx_yellow_prio_2",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
+ .name = "rx_yellow_prio_3",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
+ .name = "rx_yellow_prio_4",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
+ .name = "rx_yellow_prio_5",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
+ .name = "rx_yellow_prio_6",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
+ .name = "rx_yellow_prio_7",
+ .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_0] = {
+ .name = "rx_green_prio_0",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_1] = {
+ .name = "rx_green_prio_1",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_2] = {
+ .name = "rx_green_prio_2",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_3] = {
+ .name = "rx_green_prio_3",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_4] = {
+ .name = "rx_green_prio_4",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_5] = {
+ .name = "rx_green_prio_5",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_6] = {
+ .name = "rx_green_prio_6",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_RX_GREEN_PRIO_7] = {
+ .name = "rx_green_prio_7",
+ .reg = SYS_COUNT_RX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_OCTETS] = {
+ .name = "tx_octets",
+ .reg = SYS_COUNT_TX_OCTETS,
+ },
+ [OCELOT_STAT_TX_UNICAST] = {
+ .name = "tx_unicast",
+ .reg = SYS_COUNT_TX_UNICAST,
+ },
+ [OCELOT_STAT_TX_MULTICAST] = {
+ .name = "tx_multicast",
+ .reg = SYS_COUNT_TX_MULTICAST,
+ },
+ [OCELOT_STAT_TX_BROADCAST] = {
+ .name = "tx_broadcast",
+ .reg = SYS_COUNT_TX_BROADCAST,
+ },
+ [OCELOT_STAT_TX_COLLISION] = {
+ .name = "tx_collision",
+ .reg = SYS_COUNT_TX_COLLISION,
+ },
+ [OCELOT_STAT_TX_DROPS] = {
+ .name = "tx_drops",
+ .reg = SYS_COUNT_TX_DROPS,
+ },
+ [OCELOT_STAT_TX_PAUSE] = {
+ .name = "tx_pause",
+ .reg = SYS_COUNT_TX_PAUSE,
+ },
+ [OCELOT_STAT_TX_64] = {
+ .name = "tx_frames_below_65_octets",
+ .reg = SYS_COUNT_TX_64,
+ },
+ [OCELOT_STAT_TX_65_127] = {
+ .name = "tx_frames_65_to_127_octets",
+ .reg = SYS_COUNT_TX_65_127,
+ },
+ [OCELOT_STAT_TX_128_255] = {
+ .name = "tx_frames_128_255_octets",
+ .reg = SYS_COUNT_TX_128_255,
+ },
+ [OCELOT_STAT_TX_256_511] = {
+ .name = "tx_frames_256_511_octets",
+ .reg = SYS_COUNT_TX_256_511,
+ },
+ [OCELOT_STAT_TX_512_1023] = {
+ .name = "tx_frames_512_1023_octets",
+ .reg = SYS_COUNT_TX_512_1023,
+ },
+ [OCELOT_STAT_TX_1024_1526] = {
+ .name = "tx_frames_1024_1526_octets",
+ .reg = SYS_COUNT_TX_1024_1526,
+ },
+ [OCELOT_STAT_TX_1527_MAX] = {
+ .name = "tx_frames_over_1526_octets",
+ .reg = SYS_COUNT_TX_1527_MAX,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
+ .name = "tx_yellow_prio_0",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
+ .name = "tx_yellow_prio_1",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
+ .name = "tx_yellow_prio_2",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
+ .name = "tx_yellow_prio_3",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
+ .name = "tx_yellow_prio_4",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
+ .name = "tx_yellow_prio_5",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
+ .name = "tx_yellow_prio_6",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
+ .name = "tx_yellow_prio_7",
+ .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_0] = {
+ .name = "tx_green_prio_0",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_1] = {
+ .name = "tx_green_prio_1",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_2] = {
+ .name = "tx_green_prio_2",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_3] = {
+ .name = "tx_green_prio_3",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_4] = {
+ .name = "tx_green_prio_4",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_5] = {
+ .name = "tx_green_prio_5",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_6] = {
+ .name = "tx_green_prio_6",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_TX_GREEN_PRIO_7] = {
+ .name = "tx_green_prio_7",
+ .reg = SYS_COUNT_TX_GREEN_PRIO_7,
+ },
+ [OCELOT_STAT_TX_AGED] = {
+ .name = "tx_aged",
+ .reg = SYS_COUNT_TX_AGING,
+ },
+ [OCELOT_STAT_DROP_LOCAL] = {
+ .name = "drop_local",
+ .reg = SYS_COUNT_DROP_LOCAL,
+ },
+ [OCELOT_STAT_DROP_TAIL] = {
+ .name = "drop_tail",
+ .reg = SYS_COUNT_DROP_TAIL,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
+ .name = "drop_yellow_prio_0",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
+ .name = "drop_yellow_prio_1",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
+ .name = "drop_yellow_prio_2",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
+ .name = "drop_yellow_prio_3",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
+ .name = "drop_yellow_prio_4",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
+ .name = "drop_yellow_prio_5",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
+ .name = "drop_yellow_prio_6",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
+ .name = "drop_yellow_prio_7",
+ .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
+ .name = "drop_green_prio_0",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
+ .name = "drop_green_prio_1",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
+ .name = "drop_green_prio_2",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
+ .name = "drop_green_prio_3",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
+ .name = "drop_green_prio_4",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
+ .name = "drop_green_prio_5",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
+ .name = "drop_green_prio_6",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
+ },
+ [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
+ .name = "drop_green_prio_7",
+ .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
+ },
};
static void ocelot_pll5_init(struct ocelot *ocelot)
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index c2af4eb8ca5d..9cf82ecf191c 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -180,13 +180,38 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_PAUSE, 0x00003c),
- REG(SYS_COUNT_RX_CONTROL, 0x000040),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
- REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
REG(SYS_COUNT_TX_UNICAST, 0x000104),
REG(SYS_COUNT_TX_MULTICAST, 0x000108),
@@ -196,11 +221,46 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index c922dfab8080..eeb1455a4e5d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1395,6 +1395,8 @@ nfp_port_get_module_info(struct net_device *netdev,
u8 data;
port = nfp_port_from_netdev(netdev);
+ /* update port state to get latest interface */
+ set_bit(NFP_PORT_CHANGED, &port->flags);
eth_port = nfp_port_get_eth_port(port);
if (!eth_port)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 34c0d2ddf9ef..a8286d0032d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -874,7 +874,6 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
}
/* Adjust the start address to be cache size aligned */
- cache->id = id;
cache->addr = addr & ~(u64)(cache->size - 1);
/* Re-init to the new ID and address */
@@ -894,6 +893,8 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
return NULL;
}
+ cache->id = id;
+
exit:
/* Adjust offset */
*offset = addr - cache->addr;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 1443f788ee37..0be79c516781 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1564,8 +1564,67 @@ static int ionic_set_features(struct net_device *netdev,
return err;
}
+static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+
+ ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
+ return ionic_adminq_post_wait(lif, &ctx);
+}
+
+static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_getattr = {
+ .opcode = IONIC_CMD_LIF_GETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_MAC,
+ },
+ };
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ return err;
+
+ ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
+ return 0;
+}
+
+static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
+{
+ u8 get_mac[ETH_ALEN];
+ int err;
+
+ err = ionic_set_attr_mac(lif, mac);
+ if (err)
+ return err;
+
+ err = ionic_get_attr_mac(lif, get_mac);
+ if (err)
+ return err;
+
+ /* To deal with older firmware that silently ignores the set attr mac:
+ * doesn't actually change the mac and doesn't return an error, so we
+ * do the get attr to verify whether or not the set actually happened
+ */
+ if (!ether_addr_equal(get_mac, mac))
+ return 1;
+
+ return 0;
+}
+
static int ionic_set_mac_address(struct net_device *netdev, void *sa)
{
+ struct ionic_lif *lif = netdev_priv(netdev);
struct sockaddr *addr = sa;
u8 *mac;
int err;
@@ -1574,6 +1633,14 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
if (ether_addr_equal(netdev->dev_addr, mac))
return 0;
+ err = ionic_program_mac(lif, mac);
+ if (err < 0)
+ return err;
+
+ if (err > 0)
+ netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
+ __func__);
+
err = eth_prepare_mac_addr_change(netdev, addr);
if (err)
return err;
@@ -2963,6 +3030,9 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
mutex_lock(&lif->queue_lock);
+ if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
+ dev_info(ionic->dev, "FW Up: clearing broken state\n");
+
err = ionic_qcqs_alloc(lif);
if (err)
goto err_unlock;
@@ -3169,6 +3239,7 @@ static int ionic_station_set(struct ionic_lif *lif)
.attr = IONIC_LIF_ATTR_MAC,
},
};
+ u8 mac_address[ETH_ALEN];
struct sockaddr addr;
int err;
@@ -3177,8 +3248,23 @@ static int ionic_station_set(struct ionic_lif *lif)
return err;
netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
ctx.comp.lif_getattr.mac);
- if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
- return 0;
+ ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
+
+ if (is_zero_ether_addr(mac_address)) {
+ eth_hw_addr_random(netdev);
+ netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
+ ether_addr_copy(mac_address, netdev->dev_addr);
+
+ err = ionic_program_mac(lif, mac_address);
+ if (err < 0)
+ return err;
+
+ if (err > 0) {
+ netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
+ __func__);
+ return 0;
+ }
+ }
if (!is_zero_ether_addr(netdev->dev_addr)) {
/* If the netdev mac is non-zero and doesn't match the default
@@ -3186,12 +3272,11 @@ static int ionic_station_set(struct ionic_lif *lif)
* likely here again after a fw-upgrade reset. We need to be
* sure the netdev mac is in our filter list.
*/
- if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
- netdev->dev_addr))
+ if (!ether_addr_equal(mac_address, netdev->dev_addr))
ionic_lif_addr_add(lif, netdev->dev_addr);
} else {
/* Update the netdev mac with the device's mac */
- memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+ ether_addr_copy(addr.sa_data, mac_address);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 4029b4e021f8..56f93b030551 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -474,8 +474,8 @@ try_again:
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
- msleep(1000);
iowrite32(0, &idev->dev_cmd_regs->done);
+ msleep(1000);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
goto try_again;
}
@@ -488,6 +488,8 @@ try_again:
return ionic_error_to_errno(err);
}
+ ionic_dev_cmd_clean(ionic);
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 52f9ed8db9c9..4f2b82a884b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -1134,6 +1134,7 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(&pdev->dev);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
pcim_iounmap_regions(pdev, BIT(0));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index caa4bfc4c1d6..9b6138b11776 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -258,14 +258,18 @@ EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
/* Enable disable MAC RX/TX */
void stmmac_set_mac(void __iomem *ioaddr, bool enable)
{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
+ u32 old_val, value;
+
+ old_val = readl(ioaddr + MAC_CTRL_REG);
+ value = old_val;
if (enable)
value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
else
value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
- writel(value, ioaddr + MAC_CTRL_REG);
+ if (value != old_val)
+ writel(value, ioaddr + MAC_CTRL_REG);
}
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 070b5ef165eb..592d29abcb1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -986,10 +986,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- u32 ctrl;
+ u32 old_ctrl, ctrl;
- ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
- ctrl &= ~priv->hw->link.speed_mask;
+ old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl = old_ctrl & ~priv->hw->link.speed_mask;
if (interface == PHY_INTERFACE_MODE_USXGMII) {
switch (speed) {
@@ -1064,7 +1064,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (tx_pause && rx_pause)
stmmac_mac_flow_ctrl(priv, duplex);
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (ctrl != old_ctrl)
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index baa1f0a5cc37..b4a4fa0a58f8 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -7,12 +7,12 @@ config NET_VENDOR_WANGXUN
bool "Wangxun devices"
default y
help
- If you have a network (Ethernet) card belonging to this class, say Y.
+ If you have a network (Ethernet) card from Wangxun(R), say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about Intel cards. If you say Y, you will be asked for
- your specific card in the following questions.
+ the questions about Wangxun(R) cards. If you say Y, you will
+ be asked for your specific card in the following questions.
if NET_VENDOR_WANGXUN
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 76c4a709d73d..e97db826cdd4 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -348,7 +348,7 @@ do { \
* This macro is invoked by the OS-specific before it left the
* function mac_drv_rx_complete. This macro calls mac_drv_fill_rxd
* if the number of used RxDs is equal or lower than the
- * the given low water mark.
+ * given low water mark.
*
* para low_water low water mark of used RxD's
*
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 018d365f9deb..7962c37b3f14 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -797,7 +797,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct geneve_sock *gs4,
struct flowi4 *fl4,
const struct ip_tunnel_info *info,
- __be16 dport, __be16 sport)
+ __be16 dport, __be16 sport,
+ __u8 *full_tos)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -823,6 +824,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
use_cache = false;
}
fl4->flowi4_tos = RT_TOS(tos);
+ if (full_tos)
+ *full_tos = tos;
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
@@ -876,8 +879,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
use_cache = false;
}
- fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
- info->key.label);
+ fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label);
dst_cache = (struct dst_cache *)&info->dst_cache;
if (use_cache) {
dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
@@ -911,6 +913,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
struct flowi4 fl4;
+ __u8 full_tos;
__u8 tos, ttl;
__be16 df = 0;
__be16 sport;
@@ -921,7 +924,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, &full_tos);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -965,7 +968,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
- tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
+ tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb);
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
else
@@ -1149,7 +1152,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1, USHRT_MAX, true);
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
- geneve->cfg.info.key.tp_dst, sport);
+ geneve->cfg.info.key.tp_dst, sport, NULL);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 1e9eae208e44..53a1dbeaffa6 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -568,7 +568,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
}
/* Align the address down and the size up to a page boundary */
- addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+ addr = qcom_smem_virt_to_phys(virt);
phys = addr & PAGE_MASK;
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index a5b355384d4a..6f35438cda89 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -48,7 +48,7 @@ struct ipa;
*
* The offset of registers related to resource types is computed by a macro
* that is supplied a parameter "rt". The "rt" represents a resource type,
- * which is is a member of the ipa_resource_type_src enumerated type for
+ * which is a member of the ipa_resource_type_src enumerated type for
* source endpoint resources or the ipa_resource_type_dst enumerated type
* for destination endpoint resources.
*
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index ef02f2cf5ce1..cbabca167a07 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
.notifier_call = ipvtap_device_event,
};
-static int ipvtap_init(void)
+static int __init ipvtap_init(void)
{
int err;
@@ -228,7 +228,7 @@ out1:
}
module_init(ipvtap_init);
-static void ipvtap_exit(void)
+static void __exit ipvtap_exit(void)
{
rtnl_link_unregister(&ipvtap_link_ops);
unregister_netdevice_notifier(&ipvtap_notifier_block);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index f1683ce6b561..c6d271e5687e 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -162,6 +162,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
+static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
+{
+ struct macsec_rx_sa *sa = NULL;
+ int an;
+
+ for (an = 0; an < MACSEC_NUM_AN; an++) {
+ sa = macsec_rxsa_get(rx_sc->sa[an]);
+ if (sa)
+ break;
+ }
+ return sa;
+}
+
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@@ -449,11 +462,6 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
- return make_sci(dev->dev_addr, port);
-}
-
static void __macsec_pn_wrapped(struct macsec_secy *secy,
struct macsec_tx_sa *tx_sa)
{
@@ -500,18 +508,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
skb->protocol = eth_hdr(skb)->h_proto;
}
+static unsigned int macsec_msdu_len(struct sk_buff *skb)
+{
+ struct macsec_dev *macsec = macsec_priv(skb->dev);
+ struct macsec_secy *secy = &macsec->secy;
+ bool sci_present = macsec_skb_cb(skb)->has_sci;
+
+ return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
+}
+
static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
struct macsec_tx_sa *tx_sa)
{
+ unsigned int msdu_len = macsec_msdu_len(skb);
struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
u64_stats_update_begin(&txsc_stats->syncp);
if (tx_sc->encrypt) {
- txsc_stats->stats.OutOctetsEncrypted += skb->len;
+ txsc_stats->stats.OutOctetsEncrypted += msdu_len;
txsc_stats->stats.OutPktsEncrypted++;
this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
} else {
- txsc_stats->stats.OutOctetsProtected += skb->len;
+ txsc_stats->stats.OutOctetsProtected += msdu_len;
txsc_stats->stats.OutPktsProtected++;
this_cpu_inc(tx_sa->stats->OutPktsProtected);
}
@@ -541,9 +559,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
aead_request_free(macsec_skb_cb(skb)->req);
rcu_read_lock_bh();
- macsec_encrypt_finish(skb, dev);
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
- len = skb->len;
+ /* packet is encrypted/protected so tx_bytes must be calculated */
+ len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
+ macsec_encrypt_finish(skb, dev);
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
rcu_read_unlock_bh();
@@ -702,6 +721,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_skb_cb(skb)->req = req;
macsec_skb_cb(skb)->tx_sa = tx_sa;
+ macsec_skb_cb(skb)->has_sci = sci_present;
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
dev_hold(skb->dev);
@@ -743,15 +763,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ secy->netdev->stats.rx_dropped++;
return false;
}
if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
+ unsigned int msdu_len = macsec_msdu_len(skb);
u64_stats_update_begin(&rxsc_stats->syncp);
if (hdr->tci_an & MACSEC_TCI_E)
- rxsc_stats->stats.InOctetsDecrypted += skb->len;
+ rxsc_stats->stats.InOctetsDecrypted += msdu_len;
else
- rxsc_stats->stats.InOctetsValidated += skb->len;
+ rxsc_stats->stats.InOctetsValidated += msdu_len;
u64_stats_update_end(&rxsc_stats->syncp);
}
@@ -764,6 +786,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotValid++;
u64_stats_update_end(&rxsc_stats->syncp);
+ this_cpu_inc(rx_sa->stats->InPktsNotValid);
+ secy->netdev->stats.rx_errors++;
return false;
}
@@ -856,9 +880,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
macsec_finalize_skb(skb, macsec->secy.icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, macsec->secy.netdev);
- len = skb->len;
if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -1049,6 +1073,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
+ macsec->secy.netdev->stats.rx_dropped++;
continue;
}
@@ -1158,6 +1183,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
+ secy->netdev->stats.rx_errors++;
goto drop_nosa;
}
@@ -1168,11 +1194,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
+ struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ secy->netdev->stats.rx_errors++;
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
}
@@ -1182,6 +1212,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
+ if (active_rx_sa)
+ this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}
@@ -1202,6 +1234,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
+ macsec->secy.netdev->stats.rx_dropped++;
goto drop;
}
}
@@ -1230,6 +1263,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
deliver:
macsec_finalize_skb(skb, secy->icv_len,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ len = skb->len;
macsec_reset_skb(skb, secy->netdev);
if (rx_sa)
@@ -1237,7 +1271,6 @@ deliver:
macsec_rxsc_put(rx_sc);
skb_orphan(skb);
- len = skb->len;
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
@@ -1279,6 +1312,7 @@ nosci:
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoSCI++;
u64_stats_update_end(&secy_stats->syncp);
+ macsec->secy.netdev->stats.rx_errors++;
continue;
}
@@ -3404,6 +3438,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ len = skb->len;
skb = macsec_encrypt(skb, dev);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EINPROGRESS)
@@ -3414,7 +3449,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
macsec_encrypt_finish(skb, dev);
- len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
return ret;
@@ -3622,7 +3656,6 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
out:
eth_hw_addr_set(dev, addr->sa_data);
- macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
@@ -3662,6 +3695,7 @@ static void macsec_get_stats64(struct net_device *dev,
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
+ s->rx_errors = dev->stats.rx_errors;
}
static int macsec_get_iflink(const struct net_device *dev)
@@ -3960,6 +3994,11 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
return false;
}
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+ return make_sci(dev->dev_addr, port);
+}
+
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
{
struct macsec_dev *macsec = macsec_priv(dev);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 1e38039c5c56..6939563d3b7c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -535,7 +535,7 @@ static int dp83867_of_init_io_impedance(struct phy_device *phydev)
cell = of_nvmem_cell_get(of_node, "io_impedance_ctrl");
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
- if (ret != -ENOENT)
+ if (ret != -ENOENT && ret != -EOPNOTSUPP)
return phydev_err_probe(phydev, ret,
"failed to get nvmem cell io_impedance_ctrl\n");
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 29b1df03f3e8..a87a4b3ffce4 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -190,44 +190,42 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced);
*/
static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
{
+ u16 adv_l_mask, adv_l = 0;
+ u16 adv_m_mask, adv_m = 0;
int changed = 0;
- u16 adv_l = 0;
- u16 adv_m = 0;
int ret;
+ adv_l_mask = MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP |
+ MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+ adv_m_mask = MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
+
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ fallthrough;
case MASTER_SLAVE_CFG_SLAVE_FORCE:
adv_l |= MDIO_AN_T1_ADV_L_FORCE_MS;
break;
case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ adv_m |= MDIO_AN_T1_ADV_M_MST;
+ fallthrough;
case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
break;
case MASTER_SLAVE_CFG_UNKNOWN:
case MASTER_SLAVE_CFG_UNSUPPORTED:
- return 0;
+ /* if master/slave role is not specified, do not overwrite it */
+ adv_l_mask &= ~MDIO_AN_T1_ADV_L_FORCE_MS;
+ adv_m_mask &= ~MDIO_AN_T1_ADV_M_MST;
+ break;
default:
phydev_warn(phydev, "Unsupported Master/Slave mode\n");
return -EOPNOTSUPP;
}
- switch (phydev->master_slave_set) {
- case MASTER_SLAVE_CFG_MASTER_FORCE:
- case MASTER_SLAVE_CFG_MASTER_PREFERRED:
- adv_m |= MDIO_AN_T1_ADV_M_MST;
- break;
- case MASTER_SLAVE_CFG_SLAVE_FORCE:
- case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
- break;
- default:
- break;
- }
-
adv_l |= linkmode_adv_to_mii_t1_adv_l_t(phydev->advertising);
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L,
- (MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP
- | MDIO_AN_T1_ADV_L_PAUSE_ASYM), adv_l);
+ adv_l_mask, adv_l);
if (ret < 0)
return ret;
if (ret > 0)
@@ -236,7 +234,7 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
adv_m |= linkmode_adv_to_mii_t1_adv_m_t(phydev->advertising);
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M,
- MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L, adv_m);
+ adv_m_mask, adv_m);
if (ret < 0)
return ret;
if (ret > 0)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index a74b320f5b27..12ff276b80ae 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -316,6 +316,12 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
+ /* If we manged to get here with the PHY state machine in a state neither
+ * PHY_HALTED nor PHY_READY this is an indication that something went wrong
+ * and we should most likely be using MAC managed PM and we are not.
+ */
+ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
+
ret = phy_init_hw(phydev);
if (ret < 0)
return ret;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index dafd3e9ebbf8..c8791e9b451d 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1111,7 +1111,7 @@ plip_open(struct net_device *dev)
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
- const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
+ const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
if (ifa != NULL) {
dev_addr_mod(dev, 2, &ifa->ifa_local, 4);
}
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index c3d42062559d..9e75ed3f08ce 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -716,10 +716,20 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_reset_mac_header(skb);
skb->protocol = eth_hdr(skb)->h_proto;
+ rcu_read_lock();
+ tap = rcu_dereference(q->tap);
+ if (!tap) {
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return total_len;
+ }
+ skb->dev = tap->dev;
+
if (vnet_hdr_len) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
tap_is_little_endian(q));
if (err) {
+ rcu_read_unlock();
drop_reason = SKB_DROP_REASON_DEV_HDR;
goto err_kfree;
}
@@ -732,8 +742,6 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
- rcu_read_lock();
- tap = rcu_dereference(q->tap);
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
skb_zcopy_init(skb, msg_control);
@@ -742,14 +750,8 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
uarg->callback(NULL, uarg, false);
}
- if (tap) {
- skb->dev = tap->dev;
- dev_queue_xmit(skb);
- } else {
- kfree_skb(skb);
- }
+ dev_queue_xmit(skb);
rcu_read_unlock();
-
return total_len;
err_kfree:
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 0ad468a00064..aff39bf3161d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1680,7 +1680,7 @@ static const struct driver_info ax88179_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1693,7 +1693,7 @@ static const struct driver_info ax88178a_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1706,7 +1706,7 @@ static const struct driver_info cypress_GX3_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1719,7 +1719,7 @@ static const struct driver_info dlink_dub1312_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1732,7 +1732,7 @@ static const struct driver_info sitecom_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1745,7 +1745,7 @@ static const struct driver_info samsung_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1758,7 +1758,7 @@ static const struct driver_info lenovo_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1771,7 +1771,7 @@ static const struct driver_info belkin_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1784,7 +1784,7 @@ static const struct driver_info toshiba_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1797,7 +1797,7 @@ static const struct driver_info mct_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1810,7 +1810,7 @@ static const struct driver_info at_umc2000_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1823,7 +1823,7 @@ static const struct driver_info at_umc200_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1836,7 +1836,7 @@ static const struct driver_info at_umc2000sp_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 571a399c195d..709e3c59e340 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1390,6 +1390,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
{QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
{QMI_FIXED_INTF(0x1e2d, 0x00b9, 0)}, /* Cinterion MV31 RmNet based on new baseline */
+ {QMI_FIXED_INTF(0x1e2d, 0x00f3, 0)}, /* Cinterion MV32-W-A RmNet */
+ {QMI_FIXED_INTF(0x1e2d, 0x00f4, 0)}, /* Cinterion MV32-W-B RmNet */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0f6efaabaa32..d142ac8fcf6e 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5906,6 +5906,11 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
@@ -6431,21 +6436,8 @@ static void r8156_fc_parameter(struct r8152 *tp)
u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
- switch (tp->version) {
- case RTL_VER_10:
- case RTL_VER_11:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 8);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 8);
- break;
- case RTL_VER_12:
- case RTL_VER_13:
- case RTL_VER_15:
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
- break;
- default:
- break;
- }
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
}
static void rtl8156_change_mtu(struct r8152 *tp)
@@ -6557,6 +6549,11 @@ static void rtl8156_down(struct r8152 *tp)
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ /* RX FIFO settings for OOB */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16);
+
rtl_disable(tp);
rtl_reset_bmu(tp);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2cb833b3006a..466da01ba2e3 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -312,7 +312,6 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
- struct netdev_queue *queue = NULL;
struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
@@ -330,7 +329,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rxq = skb_get_queue_mapping(skb);
if (rxq < rcv->real_num_rx_queues) {
rq = &rcv_priv->rq[rxq];
- queue = netdev_get_tx_queue(dev, rxq);
/* The napi pointer is available when an XDP program is
* attached or when GRO is enabled
@@ -342,8 +340,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
- if (queue)
- txq_trans_cond_update(queue);
if (!use_napi)
dev_lstats_add(dev, length);
} else {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ec8e1b3108c3..9cce7dec7366 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -135,6 +135,9 @@ struct send_queue {
struct virtnet_sq_stats stats;
struct napi_struct napi;
+
+ /* Record whether sq is in reset state. */
+ bool reset;
};
/* Internal representation of a receive virtqueue */
@@ -267,6 +270,12 @@ struct virtnet_info {
u8 duplex;
u32 speed;
+ /* Interrupt coalescing settings */
+ u32 tx_usecs;
+ u32 rx_usecs;
+ u32 tx_max_packets;
+ u32 rx_max_packets;
+
unsigned long guest_offloads;
unsigned long guest_offloads_capable;
@@ -284,6 +293,9 @@ struct padded_vnet_hdr {
char padding[12];
};
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+
static bool is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
@@ -1057,8 +1069,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ if (unlikely(xdp_page != page))
+ put_page(xdp_page);
goto err_xdp;
+ }
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(!err)) {
xdp_return_frame_rx_napi(xdpf);
@@ -1196,7 +1211,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
if (!hdr_hash || !skb)
return;
- switch ((int)hdr_hash->hash_report) {
+ switch (__le16_to_cpu(hdr_hash->hash_report)) {
case VIRTIO_NET_HASH_REPORT_TCPv4:
case VIRTIO_NET_HASH_REPORT_UDPv4:
case VIRTIO_NET_HASH_REPORT_TCPv6:
@@ -1214,7 +1229,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
default:
rss_hash_type = PKT_HASH_TYPE_NONE;
}
- skb_set_hash(skb, (unsigned int)hdr_hash->hash_value, rss_hash_type);
+ skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
}
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
@@ -1625,6 +1640,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
+ if (sq->reset) {
+ __netif_tx_unlock(txq);
+ return;
+ }
+
do {
virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
@@ -1872,6 +1892,70 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int virtnet_rx_resize(struct virtnet_info *vi,
+ struct receive_queue *rq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ if (running)
+ napi_disable(&rq->napi);
+
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
+ if (running)
+ virtnet_napi_enable(rq->vq, &rq->napi);
+ return err;
+}
+
+static int virtnet_tx_resize(struct virtnet_info *vi,
+ struct send_queue *sq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ struct netdev_queue *txq;
+ int err, qindex;
+
+ qindex = sq - vi->sq;
+
+ if (running)
+ virtnet_napi_tx_disable(&sq->napi);
+
+ txq = netdev_get_tx_queue(vi->dev, qindex);
+
+ /* 1. wait all ximt complete
+ * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
+ */
+ __netif_tx_lock_bh(txq);
+
+ /* Prevent rx poll from accessing sq. */
+ sq->reset = true;
+
+ /* Prevent the upper layer from trying to send packets. */
+ netif_stop_subqueue(vi->dev, qindex);
+
+ __netif_tx_unlock_bh(txq);
+
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+ __netif_tx_lock_bh(txq);
+ sq->reset = false;
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock_bh(txq);
+
+ if (running)
+ virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ return err;
+}
+
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
@@ -2282,10 +2366,57 @@ static void virtnet_get_ringparam(struct net_device *dev,
{
struct virtnet_info *vi = netdev_priv(dev);
- ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
- ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
- ring->rx_pending = ring->rx_max_pending;
- ring->tx_pending = ring->tx_max_pending;
+ ring->rx_max_pending = vi->rq[0].vq->num_max;
+ ring->tx_max_pending = vi->sq[0].vq->num_max;
+ ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+}
+
+static int virtnet_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ u32 rx_pending, tx_pending;
+ struct receive_queue *rq;
+ struct send_queue *sq;
+ int i, err;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+
+ if (ring->rx_pending == rx_pending &&
+ ring->tx_pending == tx_pending)
+ return 0;
+
+ if (ring->rx_pending > vi->rq[0].vq->num_max)
+ return -EINVAL;
+
+ if (ring->tx_pending > vi->sq[0].vq->num_max)
+ return -EINVAL;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ rq = vi->rq + i;
+ sq = vi->sq + i;
+
+ if (ring->tx_pending != tx_pending) {
+ err = virtnet_tx_resize(vi, sq, ring->tx_pending);
+ if (err)
+ return err;
+ }
+
+ if (ring->rx_pending != rx_pending) {
+ err = virtnet_rx_resize(vi, rq, ring->rx_pending);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
}
static bool virtnet_commit_rss_command(struct virtnet_info *vi)
@@ -2615,27 +2746,89 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
+static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
+{
+ struct scatterlist sgs_tx, sgs_rx;
+ struct virtio_net_ctrl_coal_tx coal_tx;
+ struct virtio_net_ctrl_coal_rx coal_rx;
+
+ coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
+ &sgs_tx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->tx_usecs = ec->tx_coalesce_usecs;
+ vi->tx_max_packets = ec->tx_max_coalesced_frames;
+
+ coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
+ &sgs_rx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->rx_usecs = ec->rx_coalesce_usecs;
+ vi->rx_max_packets = ec->rx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
+{
+ /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
+ * feature is negotiated.
+ */
+ if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
+ return -EOPNOTSUPP;
+
+ if (ec->tx_max_coalesced_frames > 1 ||
+ ec->rx_max_coalesced_frames != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
- int i, napi_weight;
-
- if (ec->tx_max_coalesced_frames > 1 ||
- ec->rx_max_coalesced_frames != 1)
- return -EINVAL;
+ int ret, i, napi_weight;
+ bool update_napi = false;
+ /* Can't change NAPI weight if the link is up */
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
+ else
+ update_napi = true;
+ }
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
+ ret = virtnet_send_notf_coal_cmds(vi, ec);
+ else
+ ret = virtnet_coal_params_supported(ec);
+
+ if (ret)
+ return ret;
+
+ if (update_napi) {
for (i = 0; i < vi->max_queue_pairs; i++)
vi->sq[i].napi.weight = napi_weight;
}
- return 0;
+ return ret;
}
static int virtnet_get_coalesce(struct net_device *dev,
@@ -2643,16 +2836,19 @@ static int virtnet_get_coalesce(struct net_device *dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ethtool_coalesce ec_default = {
- .cmd = ETHTOOL_GCOALESCE,
- .rx_max_coalesced_frames = 1,
- };
struct virtnet_info *vi = netdev_priv(dev);
- memcpy(ec, &ec_default, sizeof(ec_default));
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ ec->rx_coalesce_usecs = vi->rx_usecs;
+ ec->tx_coalesce_usecs = vi->tx_usecs;
+ ec->tx_max_coalesced_frames = vi->tx_max_packets;
+ ec->rx_max_coalesced_frames = vi->rx_max_packets;
+ } else {
+ ec->rx_max_coalesced_frames = 1;
- if (vi->sq[0].napi.weight)
- ec->tx_max_coalesced_frames = 1;
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+ }
return 0;
}
@@ -2771,10 +2967,12 @@ static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
+ .set_ringparam = virtnet_set_ringparam,
.get_strings = virtnet_get_strings,
.get_sset_count = virtnet_get_sset_count,
.get_ethtool_stats = virtnet_get_ethtool_stats,
@@ -3168,6 +3366,27 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ if (!is_xdp_frame(buf))
+ dev_kfree_skb(buf);
+ else
+ xdp_return_frame(ptr_to_xdp(buf));
+}
+
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2rxq(vq);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -3175,26 +3394,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_frame(buf))
- dev_kfree_skb(buf);
- else
- xdp_return_frame(ptr_to_xdp(buf));
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_sq_free_unused_buf(vq, buf);
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
-
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(&vi->rq[i], buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_free_unused_buf(vq, buf);
}
}
@@ -3441,6 +3648,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
"VIRTIO_NET_F_CTRL_VQ"))) {
return false;
}
@@ -3577,6 +3786,13 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ vi->rx_usecs = 0;
+ vi->tx_usecs = 0;
+ vi->tx_max_packets = 0;
+ vi->rx_max_packets = 0;
+ }
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
@@ -3811,7 +4027,7 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_CTRL_MAC_ADDR, \
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
- VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT
+ VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL
static unsigned int features[] = {
VIRTNET_FEATURES,
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 90811ab851fd..c3285242f74f 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2321,7 +2321,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowi6_oif = oif;
fl6.daddr = *daddr;
fl6.saddr = *saddr;
- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
+ fl6.flowlabel = ip6_make_flowinfo(tos, label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
fl6.fl6_dport = dport;
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 4714c86bb501..64e7a767d963 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -52,15 +52,12 @@ DECLARE_EVENT_CLASS(ath10k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
- __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH10K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH10K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
@@ -92,16 +89,13 @@ TRACE_EVENT(ath10k_log_dbg,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, level)
- __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->level = level;
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH10K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH10K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index a02e54735e88..76560587bea0 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -126,15 +126,12 @@ DECLARE_EVENT_CLASS(ath11k_log_event,
TP_STRUCT__entry(
__string(device, dev_name(ab->dev))
__string(driver, dev_driver_string(ab->dev))
- __dynamic_array(char, msg, ATH11K_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, dev_name(ab->dev));
__assign_str(driver, dev_driver_string(ab->dev));
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH11K_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH11K_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
"%s %s %s",
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
index a3d3740419eb..231a94769ddb 100644
--- a/drivers/net/wireless/ath/ath6kl/trace.h
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -253,13 +253,10 @@ DECLARE_EVENT_CLASS(ath6kl_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH6KL_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH6KL_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -284,14 +281,11 @@ TRACE_EVENT(ath6kl_log_dbg,
TP_ARGS(level, vaf),
TP_STRUCT__entry(
__field(unsigned int, level)
- __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH6KL_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= ATH6KL_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/ath/trace.h b/drivers/net/wireless/ath/trace.h
index ba711644d27e..9935cf475b6d 100644
--- a/drivers/net/wireless/ath/trace.h
+++ b/drivers/net/wireless/ath/trace.h
@@ -40,16 +40,13 @@ TRACE_EVENT(ath_log,
TP_STRUCT__entry(
__string(device, wiphy_name(wiphy))
__string(driver, KBUILD_MODNAME)
- __dynamic_array(char, msg, ATH_DBG_MAX_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, wiphy_name(wiphy));
__assign_str(driver, KBUILD_MODNAME);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- ATH_DBG_MAX_LEN,
- vaf->fmt,
- *vaf->va) >= ATH_DBG_MAX_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index 11c989e95880..201f44612c31 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -70,13 +70,10 @@ DECLARE_EVENT_CLASS(wil6210_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, WIL6210_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- WIL6210_MSG_MAX,
- vaf->fmt,
- *vaf->va) >= WIL6210_MSG_MAX);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
index 338c66d0c5f8..5a139d7ed47a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
@@ -33,13 +33,11 @@ TRACE_EVENT(brcmf_err,
TP_ARGS(func, vaf),
TP_STRUCT__entry(
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
@@ -50,14 +48,12 @@ TRACE_EVENT(brcmf_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
index 0e8a69ab909f..488456420353 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
@@ -28,12 +28,10 @@ DECLARE_EVENT_CLASS(brcms_msg_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -64,14 +62,12 @@ TRACE_EVENT(brcms_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(func, func)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(func, func);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
index 7dd70011fd1e..1d6c292cf545 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -18,12 +18,10 @@ DECLARE_EVENT_CLASS(iwlwifi_msg_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
@@ -55,14 +53,12 @@ TRACE_EVENT(iwlwifi_dbg,
TP_STRUCT__entry(
__field(u32, level)
__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__entry->level = level;
__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index b89519ab9205..eb1d1ba3a443 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -635,7 +635,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
conn_info->req_ies_len = 0;
}
-inline void wilc_handle_disconnect(struct wilc_vif *vif)
+void wilc_handle_disconnect(struct wilc_vif *vif)
{
struct host_if_drv *hif_drv = vif->hif_drv;
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index 69ba1d469e9f..baa2881f4465 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -215,5 +215,6 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto);
int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index);
-inline void wilc_handle_disconnect(struct wilc_vif *vif);
+void wilc_handle_disconnect(struct wilc_vif *vif);
+
#endif
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 2caf997f9bc9..07596bf5f7d6 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -310,6 +310,7 @@ static void pn532_uart_remove(struct serdev_device *serdev)
pn53x_unregister_nfc(pn532->priv);
serdev_device_close(serdev);
pn53x_common_clean(pn532->priv);
+ del_timer_sync(&pn532->cmd_timeout);
kfree_skb(pn532->recv_skb);
kfree(pn532);
}
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
index b019755e4e21..3ece49cb18ff 100644
--- a/drivers/ntb/hw/epf/ntb_hw_epf.c
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -45,7 +45,6 @@
#define NTB_EPF_MIN_DB_COUNT 3
#define NTB_EPF_MAX_DB_COUNT 31
-#define NTB_EPF_MW_OFFSET 2
#define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */
@@ -67,6 +66,7 @@ struct ntb_epf_dev {
enum pci_barno ctrl_reg_bar;
enum pci_barno peer_spad_reg_bar;
enum pci_barno db_reg_bar;
+ enum pci_barno mw_bar;
unsigned int mw_count;
unsigned int spad_count;
@@ -92,6 +92,8 @@ struct ntb_epf_data {
enum pci_barno peer_spad_reg_bar;
/* BAR that contains Doorbell region and Memory window '1' */
enum pci_barno db_reg_bar;
+ /* BAR that contains memory windows*/
+ enum pci_barno mw_bar;
};
static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
@@ -411,7 +413,7 @@ static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return -EINVAL;
}
- bar = idx + NTB_EPF_MW_OFFSET;
+ bar = idx + ndev->mw_bar;
mw_size = pci_resource_len(ntb->pdev, bar);
@@ -453,7 +455,7 @@ static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
if (idx == 0)
offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET);
- bar = idx + NTB_EPF_MW_OFFSET;
+ bar = idx + ndev->mw_bar;
if (base)
*base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
@@ -565,6 +567,7 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
struct pci_dev *pdev)
{
struct device *dev = ndev->dev;
+ size_t spad_sz, spad_off;
int ret;
pci_set_drvdata(pdev, ndev);
@@ -599,10 +602,16 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
goto err_dma_mask;
}
- ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
- if (!ndev->peer_spad_reg) {
- ret = -EIO;
- goto err_dma_mask;
+ if (ndev->peer_spad_reg_bar) {
+ ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
+ if (!ndev->peer_spad_reg) {
+ ret = -EIO;
+ goto err_dma_mask;
+ }
+ } else {
+ spad_sz = 4 * readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
+ spad_off = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET);
+ ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz;
}
ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
@@ -657,6 +666,7 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev,
enum pci_barno peer_spad_reg_bar = BAR_1;
enum pci_barno ctrl_reg_bar = BAR_0;
enum pci_barno db_reg_bar = BAR_2;
+ enum pci_barno mw_bar = BAR_2;
struct device *dev = &pdev->dev;
struct ntb_epf_data *data;
struct ntb_epf_dev *ndev;
@@ -671,17 +681,16 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev,
data = (struct ntb_epf_data *)id->driver_data;
if (data) {
- if (data->peer_spad_reg_bar)
- peer_spad_reg_bar = data->peer_spad_reg_bar;
- if (data->ctrl_reg_bar)
- ctrl_reg_bar = data->ctrl_reg_bar;
- if (data->db_reg_bar)
- db_reg_bar = data->db_reg_bar;
+ peer_spad_reg_bar = data->peer_spad_reg_bar;
+ ctrl_reg_bar = data->ctrl_reg_bar;
+ db_reg_bar = data->db_reg_bar;
+ mw_bar = data->mw_bar;
}
ndev->peer_spad_reg_bar = peer_spad_reg_bar;
ndev->ctrl_reg_bar = ctrl_reg_bar;
ndev->db_reg_bar = db_reg_bar;
+ ndev->mw_bar = mw_bar;
ndev->dev = dev;
ntb_epf_init_struct(ndev, pdev);
@@ -729,6 +738,14 @@ static const struct ntb_epf_data j721e_data = {
.ctrl_reg_bar = BAR_0,
.peer_spad_reg_bar = BAR_1,
.db_reg_bar = BAR_2,
+ .mw_bar = BAR_2,
+};
+
+static const struct ntb_epf_data mx8_data = {
+ .ctrl_reg_bar = BAR_0,
+ .peer_spad_reg_bar = BAR_0,
+ .db_reg_bar = BAR_2,
+ .mw_bar = BAR_4,
};
static const struct pci_device_id ntb_epf_pci_tbl[] = {
@@ -737,6 +754,11 @@ static const struct pci_device_id ntb_epf_pci_tbl[] = {
.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
.driver_data = (kernel_ulong_t)&j721e_data,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809),
+ .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
+ .driver_data = (kernel_ulong_t)&mx8_data,
+ },
{ },
};
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 733557231ed0..0ed6f809ff2e 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2406,7 +2406,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
"\t%hhu.\t", idx);
else
off += scnprintf(strbuf + off, size - off,
- "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+ "\t%hhu-%d.\t", idx, idx + cnt - 1);
off += scnprintf(strbuf + off, size - off, "%s BAR%hhu, ",
idt_get_mw_name(data), ndev->mws[idx].bar);
@@ -2435,7 +2435,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
"\t%hhu.\t", idx);
else
off += scnprintf(strbuf + off, size - off,
- "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
+ "\t%hhu-%d.\t", idx, idx + cnt - 1);
off += scnprintf(strbuf + off, size - off,
"%s BAR%hhu, ", idt_get_mw_name(data),
@@ -2480,7 +2480,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
int src;
data = idt_ntb_msg_read(&ndev->ntb, &src, idx);
off += scnprintf(strbuf + off, size - off,
- "\t%hhu. 0x%08x from peer %hhu (Port %hhu)\n",
+ "\t%hhu. 0x%08x from peer %d (Port %hhu)\n",
idx, data, src, ndev->peers[src].port);
}
off += scnprintf(strbuf + off, size - off, "\n");
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index e5f14e20a9ff..84772013812b 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -763,7 +763,7 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_gen3(ndev->ntb.pdev))
return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
- else if (pdev_is_gen4(ndev->ntb.pdev))
+ else if (pdev_is_gen4(ndev->ntb.pdev) || pdev_is_gen5(ndev->ntb.pdev))
return ndev_ntb4_debugfs_read(filp, ubuf, count, offp);
return -ENXIO;
@@ -1874,7 +1874,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
rc = gen3_init_dev(ndev);
if (rc)
goto err_init_dev;
- } else if (pdev_is_gen4(pdev)) {
+ } else if (pdev_is_gen4(pdev) || pdev_is_gen5(pdev)) {
ndev->ntb.ops = &intel_ntb4_ops;
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
@@ -1904,7 +1904,8 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
err_register:
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
+ pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
@@ -1920,7 +1921,8 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
+ pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
@@ -2047,6 +2049,8 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
/* GEN4 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)},
+ /* GEN5 PCIe */
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_GNR)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
index 4081fc538ff4..22cac7975b3c 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -197,7 +197,7 @@ int gen4_init_dev(struct intel_ntb_dev *ndev)
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
if (pdev_is_ICX(pdev))
ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
- else if (pdev_is_SPR(pdev))
+ else if (pdev_is_SPR(pdev) || pdev_is_gen5(pdev))
ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
ntb_topo_string(ndev->ntb.topo));
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index b233d1c6ba2d..da4d5fe55bab 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -70,6 +70,7 @@
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
#define PCI_DEVICE_ID_INTEL_NTB_B2B_ICX 0x347e
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_GNR 0x0db4
/* Ntb control and link status */
#define NTB_CTL_CFG_LOCK BIT(0)
@@ -228,4 +229,10 @@ static inline int pdev_is_gen4(struct pci_dev *pdev)
return 0;
}
+
+static inline int pdev_is_gen5(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_GNR;
+}
+
#endif
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index b7bf3f863d79..5ee0afa621a9 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -367,14 +367,16 @@ static ssize_t tool_fn_write(struct tool_ctx *tc,
u64 bits;
int n;
+ if (*offp)
+ return 0;
+
buf = kmalloc(size + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = simple_write_to_buffer(buf, size, offp, ubuf, size);
- if (ret < 0) {
+ if (copy_from_user(buf, ubuf, size)) {
kfree(buf);
- return ret;
+ return -EFAULT;
}
buf[size] = 0;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f36efcc11f67..7e88cd242380 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -453,6 +453,21 @@ static void pmem_release_disk(void *__pmem)
put_disk(pmem->disk);
}
+static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap,
+ unsigned long pfn, unsigned long nr_pages, int mf_flags)
+{
+ struct pmem_device *pmem =
+ container_of(pgmap, struct pmem_device, pgmap);
+ u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
+ u64 len = nr_pages << PAGE_SHIFT;
+
+ return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags);
+}
+
+static const struct dev_pagemap_ops fsdax_pagemap_ops = {
+ .memory_failure = pmem_pagemap_memory_failure,
+};
+
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
{
@@ -514,6 +529,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags = PFN_DEV;
if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -527,6 +543,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pgmap.range.end = res->end;
pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index d976260eca7a..473a71bbd9c9 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -133,7 +133,8 @@ static void nd_region_release(struct device *dev)
put_device(&nvdimm->dev);
}
free_percpu(nd_region->lane);
- memregion_free(nd_region->id);
+ if (!test_bit(ND_REGION_CXL, &nd_region->flags))
+ memregion_free(nd_region->id);
kfree(nd_region);
}
@@ -982,9 +983,14 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (!nd_region)
return NULL;
- nd_region->id = memregion_alloc(GFP_KERNEL);
- if (nd_region->id < 0)
- goto err_id;
+ /* CXL pre-assigns memregion ids before creating nvdimm regions */
+ if (test_bit(ND_REGION_CXL, &ndr_desc->flags)) {
+ nd_region->id = ndr_desc->memregion;
+ } else {
+ nd_region->id = memregion_alloc(GFP_KERNEL);
+ if (nd_region->id < 0)
+ goto err_id;
+ }
nd_region->lane = alloc_percpu(struct nd_percpu_lane);
if (!nd_region->lane)
@@ -1043,9 +1049,10 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return nd_region;
- err_percpu:
- memregion_free(nd_region->id);
- err_id:
+err_percpu:
+ if (!test_bit(ND_REGION_CXL, &ndr_desc->flags))
+ memregion_free(nd_region->id);
+err_id:
kfree(nd_region);
return NULL;
}
@@ -1068,6 +1075,13 @@ struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
+void nvdimm_region_delete(struct nd_region *nd_region)
+{
+ if (nd_region)
+ nd_device_unregister(&nd_region->dev, ND_SYNC);
+}
+EXPORT_SYMBOL_GPL(nvdimm_region_delete);
+
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
{
int rc = 0;
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 995b6cdc67ed..20da455d2ef6 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -81,17 +81,24 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
ndr_desc.res = &res;
ndr_desc.numa_node = nid;
ndr_desc.flush = async_pmem_flush;
+ ndr_desc.provider_data = vdev;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
+ /*
+ * The NVDIMM region could be available before the
+ * virtio_device_ready() that is called by
+ * virtio_dev_probe(), so we set device ready here.
+ */
+ virtio_device_ready(vdev);
nd_region = nvdimm_pmem_region_create(vpmem->nvdimm_bus, &ndr_desc);
if (!nd_region) {
dev_err(&vdev->dev, "failed to create nvdimm region\n");
err = -ENXIO;
goto out_nd;
}
- nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent);
return 0;
out_nd:
+ virtio_reset_device(vdev);
nvdimm_bus_unregister(vpmem->nvdimm_bus);
out_vq:
vdev->config->del_vqs(vdev);
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
index 87ae409a32b9..656e46d938da 100644
--- a/drivers/nvme/Kconfig
+++ b/drivers/nvme/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "NVME Support"
+source "drivers/nvme/common/Kconfig"
source "drivers/nvme/host/Kconfig"
source "drivers/nvme/target/Kconfig"
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
index fb42c44609a8..eedca8c72098 100644
--- a/drivers/nvme/Makefile
+++ b/drivers/nvme/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NVME_COMMON) += common/
obj-y += host/
obj-y += target/
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig
new file mode 100644
index 000000000000..4514f44362dd
--- /dev/null
+++ b/drivers/nvme/common/Kconfig
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config NVME_COMMON
+ tristate
diff --git a/drivers/nvme/common/Makefile b/drivers/nvme/common/Makefile
new file mode 100644
index 000000000000..720c625b8a52
--- /dev/null
+++ b/drivers/nvme/common/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_NVME_COMMON) += nvme-common.o
+
+nvme-common-y += auth.o
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
new file mode 100644
index 000000000000..04bd28f17dcc
--- /dev/null
+++ b/drivers/nvme/common/auth.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include <linux/nvme.h>
+#include <linux/nvme-auth.h>
+
+static u32 nvme_dhchap_seqnum;
+static DEFINE_MUTEX(nvme_dhchap_mutex);
+
+u32 nvme_auth_get_seqnum(void)
+{
+ u32 seqnum;
+
+ mutex_lock(&nvme_dhchap_mutex);
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum = prandom_u32();
+ else {
+ nvme_dhchap_seqnum++;
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum++;
+ }
+ seqnum = nvme_dhchap_seqnum;
+ mutex_unlock(&nvme_dhchap_mutex);
+ return seqnum;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum);
+
+static struct nvme_auth_dhgroup_map {
+ const char name[16];
+ const char kpp[16];
+} dhgroup_map[] = {
+ [NVME_AUTH_DHGROUP_NULL] = {
+ .name = "null", .kpp = "null" },
+ [NVME_AUTH_DHGROUP_2048] = {
+ .name = "ffdhe2048", .kpp = "ffdhe2048(dh)" },
+ [NVME_AUTH_DHGROUP_3072] = {
+ .name = "ffdhe3072", .kpp = "ffdhe3072(dh)" },
+ [NVME_AUTH_DHGROUP_4096] = {
+ .name = "ffdhe4096", .kpp = "ffdhe4096(dh)" },
+ [NVME_AUTH_DHGROUP_6144] = {
+ .name = "ffdhe6144", .kpp = "ffdhe6144(dh)" },
+ [NVME_AUTH_DHGROUP_8192] = {
+ .name = "ffdhe8192", .kpp = "ffdhe8192(dh)" },
+};
+
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].name;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
+
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].kpp;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
+
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name)
+{
+ int i;
+
+ if (!dhgroup_name || !strlen(dhgroup_name))
+ return NVME_AUTH_DHGROUP_INVALID;
+ for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+ if (!strlen(dhgroup_map[i].name))
+ continue;
+ if (!strncmp(dhgroup_map[i].name, dhgroup_name,
+ strlen(dhgroup_map[i].name)))
+ return i;
+ }
+ return NVME_AUTH_DHGROUP_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
+
+static struct nvme_dhchap_hash_map {
+ int len;
+ const char hmac[15];
+ const char digest[8];
+} hash_map[] = {
+ [NVME_AUTH_HASH_SHA256] = {
+ .len = 32,
+ .hmac = "hmac(sha256)",
+ .digest = "sha256",
+ },
+ [NVME_AUTH_HASH_SHA384] = {
+ .len = 48,
+ .hmac = "hmac(sha384)",
+ .digest = "sha384",
+ },
+ [NVME_AUTH_HASH_SHA512] = {
+ .len = 64,
+ .hmac = "hmac(sha512)",
+ .digest = "sha512",
+ },
+};
+
+const char *nvme_auth_hmac_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].hmac;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
+
+const char *nvme_auth_digest_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].digest;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
+
+u8 nvme_auth_hmac_id(const char *hmac_name)
+{
+ int i;
+
+ if (!hmac_name || !strlen(hmac_name))
+ return NVME_AUTH_HASH_INVALID;
+
+ for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
+ if (!strlen(hash_map[i].hmac))
+ continue;
+ if (!strncmp(hash_map[i].hmac, hmac_name,
+ strlen(hash_map[i].hmac)))
+ return i;
+ }
+ return NVME_AUTH_HASH_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
+
+size_t nvme_auth_hmac_hash_len(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return 0;
+ return hash_map[hmac_id].len;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
+
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash)
+{
+ struct nvme_dhchap_key *key;
+ unsigned char *p;
+ u32 crc;
+ int ret, key_len;
+ size_t allocated_len = strlen(secret);
+
+ /* Secret might be affixed with a ':' */
+ p = strrchr(secret, ':');
+ if (p)
+ allocated_len = p - secret;
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key)
+ return ERR_PTR(-ENOMEM);
+ key->key = kzalloc(allocated_len, GFP_KERNEL);
+ if (!key->key) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ key_len = base64_decode(secret, allocated_len, key->key);
+ if (key_len < 0) {
+ pr_debug("base64 key decoding error %d\n",
+ key_len);
+ ret = key_len;
+ goto out_free_secret;
+ }
+
+ if (key_len != 36 && key_len != 52 &&
+ key_len != 68) {
+ pr_err("Invalid key len %d\n", key_len);
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ if (key_hash > 0 &&
+ (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
+ pr_err("Mismatched key len %d for %s\n", key_len,
+ nvme_auth_hmac_name(key_hash));
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ /* The last four bytes is the CRC in little-endian format */
+ key_len -= 4;
+ /*
+ * The linux implementation doesn't do pre- and post-increments,
+ * so we have to do it manually.
+ */
+ crc = ~crc32(~0, key->key, key_len);
+
+ if (get_unaligned_le32(key->key + key_len) != crc) {
+ pr_err("key crc mismatch (key %08x, crc %08x)\n",
+ get_unaligned_le32(key->key + key_len), crc);
+ ret = -EKEYREJECTED;
+ goto out_free_secret;
+ }
+ key->len = key_len;
+ key->hash = key_hash;
+ return key;
+out_free_secret:
+ kfree_sensitive(key->key);
+out_free_key:
+ kfree(key);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
+
+void nvme_auth_free_key(struct nvme_dhchap_key *key)
+{
+ if (!key)
+ return;
+ kfree_sensitive(key->key);
+ kfree(key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free_key);
+
+u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
+{
+ const char *hmac_name;
+ struct crypto_shash *key_tfm;
+ struct shash_desc *shash;
+ u8 *transformed_key;
+ int ret;
+
+ if (!key || !key->key) {
+ pr_warn("No key specified\n");
+ return ERR_PTR(-ENOKEY);
+ }
+ if (key->hash == 0) {
+ transformed_key = kmemdup(key->key, key->len, GFP_KERNEL);
+ return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
+ }
+ hmac_name = nvme_auth_hmac_name(key->hash);
+ if (!hmac_name) {
+ pr_warn("Invalid key hash id %d\n", key->hash);
+ return ERR_PTR(-EINVAL);
+ }
+
+ key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(key_tfm))
+ return (u8 *)key_tfm;
+
+ shash = kmalloc(sizeof(struct shash_desc) +
+ crypto_shash_descsize(key_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
+ if (!transformed_key) {
+ ret = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ shash->tfm = key_tfm;
+ ret = crypto_shash_setkey(key_tfm, key->key, key->len);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_init(shash);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, nqn, strlen(nqn));
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_final(shash, transformed_key);
+ if (ret < 0)
+ goto out_free_transformed_key;
+
+ kfree(shash);
+ crypto_free_shash(key_tfm);
+
+ return transformed_key;
+
+out_free_transformed_key:
+ kfree_sensitive(transformed_key);
+out_free_shash:
+ kfree(shash);
+out_free_key:
+ crypto_free_shash(key_tfm);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
+
+static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey)
+{
+ const char *digest_name;
+ struct crypto_shash *tfm;
+ int ret;
+
+ digest_name = nvme_auth_digest_name(hmac_id);
+ if (!digest_name) {
+ pr_debug("%s: failed to get digest for %d\n", __func__,
+ hmac_id);
+ return -EINVAL;
+ }
+ tfm = crypto_alloc_shash(digest_name, 0, 0);
+ if (IS_ERR(tfm))
+ return -ENOMEM;
+
+ ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey);
+ if (ret < 0)
+ pr_debug("%s: Failed to hash digest len %zu\n", __func__,
+ skey_len);
+
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ u8 *hashed_key;
+ const char *hmac_name;
+ int ret;
+
+ hashed_key = kmalloc(hlen, GFP_KERNEL);
+ if (!hashed_key)
+ return -ENOMEM;
+
+ ret = nvme_auth_hash_skey(hmac_id, skey,
+ skey_len, hashed_key);
+ if (ret < 0)
+ goto out_free_key;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ ret = -EINVAL;
+ goto out_free_key;
+ }
+
+ tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out_free_key;
+ }
+
+ desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out_free_hash;
+ }
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, hashed_key, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_update(desc, challenge, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_final(desc, aug);
+out_free_desc:
+ kfree_sensitive(desc);
+out_free_hash:
+ crypto_free_shash(tfm);
+out_free_key:
+ kfree_sensitive(hashed_key);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge);
+
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid)
+{
+ int ret;
+
+ ret = crypto_kpp_set_secret(dh_tfm, NULL, 0);
+ if (ret)
+ pr_debug("failed to set private key, error %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_privkey);
+
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ kpp_request_set_input(req, NULL, 0);
+ sg_init_one(&dst, host_key, host_key_len);
+ kpp_request_set_output(req, &dst, host_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey);
+
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist src, dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ sg_init_one(&src, ctrl_key, ctrl_key_len);
+ kpp_request_set_input(req, &src, ctrl_key_len);
+ sg_init_one(&dst, sess_key, sess_key_len);
+ kpp_request_set_output(req, &dst, sess_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
+
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret);
+
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
+{
+ struct nvme_dhchap_key *key;
+ u8 key_hash;
+
+ if (!secret) {
+ *ret_key = NULL;
+ return 0;
+ }
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
+ return -EINVAL;
+
+ /* Pass in the secret without the 'DHHC-1:XX:' prefix */
+ key = nvme_auth_extract_key(secret + 10, key_hash);
+ if (IS_ERR(key)) {
+ *ret_key = NULL;
+ return PTR_ERR(key);
+ }
+
+ *ret_key = key;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 877d2ec4ea9f..2f6a7f8c94e8 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -92,6 +92,21 @@ config NVME_TCP
If unsure, say N.
+config NVME_AUTH
+ bool "NVM Express over Fabrics In-Band Authentication"
+ depends on NVME_CORE
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This provides support for NVMe over Fabrics In-Band Authentication.
+
+ If unsure, say N.
+
config NVME_APPLE
tristate "Apple ANS2 NVM Express host driver"
depends on OF && BLOCK
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index a36ae1612059..e27202d22c7d 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -10,12 +10,14 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
-nvme-core-y := core.o ioctl.o constants.o
+nvme-core-y += core.o ioctl.o
+nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
+nvme-core-$(CONFIG_NVME_AUTH) += auth.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 5c352d5d8ee6..5fc5ea196b40 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -845,11 +845,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
apple_nvme_handle_cq(&anv->adminq, true);
spin_unlock_irqrestore(&anv->lock, flags);
- blk_mq_tagset_busy_iter(&anv->tagset, nvme_cancel_request, &anv->ctrl);
- blk_mq_tagset_busy_iter(&anv->admin_tagset, nvme_cancel_request,
- &anv->ctrl);
- blk_mq_tagset_wait_completed_request(&anv->tagset);
- blk_mq_tagset_wait_completed_request(&anv->admin_tagset);
+ nvme_cancel_tagset(&anv->ctrl);
+ nvme_cancel_admin_tagset(&anv->ctrl);
/*
* The driver will not be starting up queues again if shutting down so
@@ -1222,6 +1219,11 @@ static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
nvme_put_ctrl(&anv->ctrl);
}
+static void devm_apple_nvme_put_tag_set(void *data)
+{
+ blk_mq_free_tag_set(data);
+}
+
static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
{
int ret;
@@ -1238,8 +1240,7 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
if (ret)
return ret;
- ret = devm_add_action_or_reset(anv->dev,
- (void (*)(void *))blk_mq_free_tag_set,
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
&anv->admin_tagset);
if (ret)
return ret;
@@ -1263,8 +1264,8 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
ret = blk_mq_alloc_tag_set(&anv->tagset);
if (ret)
return ret;
- ret = devm_add_action_or_reset(
- anv->dev, (void (*)(void *))blk_mq_free_tag_set, &anv->tagset);
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
+ &anv->tagset);
if (ret)
return ret;
@@ -1365,6 +1366,11 @@ static int apple_nvme_attach_genpd(struct apple_nvme *anv)
return 0;
}
+static void devm_apple_nvme_mempool_destroy(void *data)
+{
+ mempool_destroy(data);
+}
+
static int apple_nvme_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1462,8 +1468,8 @@ static int apple_nvme_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto put_dev;
}
- ret = devm_add_action_or_reset(
- anv->dev, (void (*)(void *))mempool_destroy, anv->iod_mempool);
+ ret = devm_add_action_or_reset(anv->dev,
+ devm_apple_nvme_mempool_destroy, anv->iod_mempool);
if (ret)
goto put_dev;
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
new file mode 100644
index 000000000000..c8a6db7c4498
--- /dev/null
+++ b/drivers/nvme/host/auth.c
@@ -0,0 +1,1017 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-auth.h>
+
+struct nvme_dhchap_queue_context {
+ struct list_head entry;
+ struct work_struct auth_work;
+ struct nvme_ctrl *ctrl;
+ struct crypto_shash *shash_tfm;
+ struct crypto_kpp *dh_tfm;
+ void *buf;
+ size_t buf_size;
+ int qid;
+ int error;
+ u32 s1;
+ u32 s2;
+ u16 transaction;
+ u8 status;
+ u8 hash_id;
+ size_t hash_len;
+ u8 dhgroup_id;
+ u8 c1[64];
+ u8 c2[64];
+ u8 response[64];
+ u8 *host_response;
+ u8 *ctrl_key;
+ int ctrl_key_len;
+ u8 *host_key;
+ int host_key_len;
+ u8 *sess_key;
+ int sess_key_len;
+};
+
+#define nvme_auth_flags_from_qid(qid) \
+ (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
+#define nvme_auth_queue_from_qid(ctrl, qid) \
+ (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
+
+static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
+ void *data, size_t data_len, bool auth_send)
+{
+ struct nvme_command cmd = {};
+ blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
+ struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+ int ret;
+
+ cmd.auth_common.opcode = nvme_fabrics_command;
+ cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
+ cmd.auth_common.spsp0 = 0x01;
+ cmd.auth_common.spsp1 = 0x01;
+ if (auth_send) {
+ cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
+ cmd.auth_send.tl = cpu_to_le32(data_len);
+ } else {
+ cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
+ cmd.auth_receive.al = cpu_to_le32(data_len);
+ }
+
+ ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
+ qid == 0 ? NVME_QID_ANY : qid,
+ 0, flags);
+ if (ret > 0)
+ dev_warn(ctrl->device,
+ "qid %d auth_send failed with status %d\n", qid, ret);
+ else if (ret < 0)
+ dev_err(ctrl->device,
+ "qid %d auth_send failed with error %d\n", qid, ret);
+ return ret;
+}
+
+static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
+ struct nvmf_auth_dhchap_failure_data *data,
+ u16 transaction, u8 expected_msg)
+{
+ dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
+ __func__, qid, data->auth_type, data->auth_id);
+
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ return data->rescode_exp;
+ }
+ if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
+ data->auth_id != expected_msg) {
+ dev_warn(ctrl->device,
+ "qid %d invalid message %02x/%02x\n",
+ qid, data->auth_type, data->auth_id);
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ if (le16_to_cpu(data->t_id) != transaction) {
+ dev_warn(ctrl->device,
+ "qid %d invalid transaction ID %d\n",
+ qid, le16_to_cpu(data->t_id));
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
+ size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+ memset((u8 *)chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->sc_c = 0; /* No secure channel concatenation */
+ data->napd = 1;
+ data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
+ data->auth_protocol[0].dhchap.halen = 3;
+ data->auth_protocol[0].dhchap.dhlen = 6;
+ data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
+ data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
+ data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
+ data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
+ data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
+ data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
+ data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
+ data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
+ data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ size_t size = sizeof(*data) + data->hl + dhvlen;
+ const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
+ const char *hmac_name, *kpp_name;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ hmac_name = nvme_auth_hmac_name(data->hashid);
+ if (!hmac_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid HASH ID %d\n",
+ chap->qid, data->hashid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ if (chap->hash_id == data->hashid && chap->shash_tfm &&
+ !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
+ crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing hash %s\n",
+ chap->qid, hmac_name);
+ goto select_kpp;
+ }
+
+ /* Reset if hash cannot be reused */
+ if (chap->shash_tfm) {
+ crypto_free_shash(chap->shash_tfm);
+ chap->hash_id = 0;
+ chap->hash_len = 0;
+ }
+ chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(chap->shash_tfm)) {
+ dev_warn(ctrl->device,
+ "qid %d: failed to allocate hash %s, error %ld\n",
+ chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %d\n",
+ chap->qid, data->hl);
+ crypto_free_shash(chap->shash_tfm);
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Reset host response if the hash had been changed */
+ if (chap->hash_id != data->hashid) {
+ kfree(chap->host_response);
+ chap->host_response = NULL;
+ }
+
+ chap->hash_id = data->hashid;
+ chap->hash_len = data->hl;
+ dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
+ chap->qid, hmac_name);
+
+select_kpp:
+ kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
+ if (!kpp_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH group id %d\n",
+ chap->qid, data->dhgid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ /* Leave previous dh_tfm intact */
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Clear host and controller key to avoid accidental reuse */
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+
+ if (chap->dhgroup_id == data->dhgid &&
+ (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing DH group %s\n",
+ chap->qid, gid_name);
+ goto skip_kpp;
+ }
+
+ /* Reset dh_tfm if it can't be reused */
+ if (chap->dh_tfm) {
+ crypto_free_kpp(chap->dh_tfm);
+ chap->dh_tfm = NULL;
+ }
+
+ if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
+ if (dhvlen == 0) {
+ dev_warn(ctrl->device,
+ "qid %d: empty DH value\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
+ if (IS_ERR(chap->dh_tfm)) {
+ int ret = PTR_ERR(chap->dh_tfm);
+
+ dev_warn(ctrl->device,
+ "qid %d: error %d initializing DH group %s\n",
+ chap->qid, ret, gid_name);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ chap->dh_tfm = NULL;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+ dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
+ chap->qid, gid_name);
+ } else if (dhvlen != 0) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH value for NULL DH\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+ chap->dhgroup_id = data->dhgid;
+
+skip_kpp:
+ chap->s1 = le32_to_cpu(data->seqnum);
+ memcpy(chap->c1, data->cval, chap->hash_len);
+ if (dhvlen) {
+ chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
+ if (!chap->ctrl_key) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+ chap->ctrl_key_len = dhvlen;
+ memcpy(chap->ctrl_key, data->cval + chap->hash_len,
+ dhvlen);
+ dev_dbg(ctrl->device, "ctrl public key %*ph\n",
+ (int)chap->ctrl_key_len, chap->ctrl_key);
+ }
+
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_reply_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ size += 2 * chap->hash_len;
+
+ if (chap->host_key_len)
+ size += chap->host_key_len;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->hl = chap->hash_len;
+ data->dhvlen = cpu_to_le16(chap->host_key_len);
+ memcpy(data->rval, chap->response, chap->hash_len);
+ if (ctrl->ctrl_key) {
+ get_random_bytes(chap->c2, chap->hash_len);
+ data->cvalid = 1;
+ chap->s2 = nvme_auth_get_seqnum();
+ memcpy(data->rval + chap->hash_len, chap->c2,
+ chap->hash_len);
+ dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, chap->c2);
+ } else {
+ memset(chap->c2, 0, chap->hash_len);
+ chap->s2 = 0;
+ }
+ data->seqnum = cpu_to_le32(chap->s2);
+ if (chap->host_key_len) {
+ dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
+ __func__, chap->qid,
+ chap->host_key_len, chap->host_key);
+ memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
+ chap->host_key_len);
+ }
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success1_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ if (ctrl->ctrl_key)
+ size += chap->hash_len;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ if (data->hl != chap->hash_len) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %u\n",
+ chap->qid, data->hl);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: authenticated with hash %s dhgroup %s\n",
+ nvme_auth_hmac_name(chap->hash_id),
+ nvme_auth_dhgroup_name(chap->dhgroup_id));
+
+ if (!data->rvalid)
+ return 0;
+
+ /* Validate controller response */
+ if (memcmp(chap->response, data->rval, data->hl)) {
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, data->rval);
+ dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len,
+ chap->response);
+ dev_warn(ctrl->device,
+ "qid %d: controller authentication failed\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: controller authenticated\n");
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success2_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ data->t_id = cpu_to_le16(chap->transaction);
+
+ return size;
+}
+
+static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_failure_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = chap->status;
+
+ return size;
+}
+
+static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 buf[4], *challenge = chap->c1;
+ int ret;
+
+ dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s1, chap->transaction);
+
+ if (!chap->host_response) {
+ chap->host_response = nvme_auth_transform_key(ctrl->host_key,
+ ctrl->opts->host->nqn);
+ if (IS_ERR(chap->host_response)) {
+ ret = PTR_ERR(chap->host_response);
+ chap->host_response = NULL;
+ return ret;
+ }
+ } else {
+ dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
+ __func__, chap->qid);
+ }
+
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ chap->host_response, ctrl->host_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c1, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, sizeof(buf));
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c1)
+ kfree(challenge);
+ return ret;
+}
+
+static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 *ctrl_response;
+ u8 buf[4], *challenge = chap->c2;
+ int ret;
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->opts->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ return ret;
+ }
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ ctrl_response, ctrl->ctrl_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c2, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s2, chap->transaction);
+ dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, challenge);
+ dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
+ __func__, chap->qid, ctrl->opts->subsysnqn);
+ dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
+ __func__, chap->qid, ctrl->opts->host->nqn);
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c2)
+ kfree(challenge);
+ kfree(ctrl_response);
+ return ret;
+}
+
+static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ int ret;
+
+ if (chap->host_key && chap->host_key_len) {
+ dev_dbg(ctrl->device,
+ "qid %d: reusing host key\n", chap->qid);
+ goto gen_sesskey;
+ }
+ ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
+ if (ret < 0) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+ chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
+
+ chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
+ if (!chap->host_key) {
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(chap->dh_tfm,
+ chap->host_key, chap->host_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate public key, error %d\n", ret);
+ kfree(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+gen_sesskey:
+ chap->sess_key_len = chap->host_key_len;
+ chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
+ if (!chap->sess_key) {
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+
+ ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
+ chap->ctrl_key, chap->ctrl_key_len,
+ chap->sess_key, chap->sess_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate shared secret, error %d\n", ret);
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+ dev_dbg(ctrl->device, "shared secret %*ph\n",
+ (int)chap->sess_key_len, chap->sess_key);
+ return 0;
+}
+
+static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
+{
+ kfree_sensitive(chap->host_response);
+ chap->host_response = NULL;
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = 0;
+ chap->error = 0;
+ chap->s1 = 0;
+ chap->s2 = 0;
+ chap->transaction = 0;
+ memset(chap->c1, 0, sizeof(chap->c1));
+ memset(chap->c2, 0, sizeof(chap->c2));
+}
+
+static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap)
+{
+ __nvme_auth_reset(chap);
+ if (chap->shash_tfm)
+ crypto_free_shash(chap->shash_tfm);
+ if (chap->dh_tfm)
+ crypto_free_kpp(chap->dh_tfm);
+ kfree_sensitive(chap->ctrl_key);
+ kfree_sensitive(chap->host_key);
+ kfree_sensitive(chap->sess_key);
+ kfree_sensitive(chap->host_response);
+ kfree(chap->buf);
+ kfree(chap);
+}
+
+static void __nvme_auth_work(struct work_struct *work)
+{
+ struct nvme_dhchap_queue_context *chap =
+ container_of(work, struct nvme_dhchap_queue_context, auth_work);
+ struct nvme_ctrl *ctrl = chap->ctrl;
+ size_t tl;
+ int ret = 0;
+
+ chap->transaction = ctrl->transaction++;
+
+ /* DH-HMAC-CHAP Step 1: send negotiate */
+ dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ return;
+ }
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ return;
+ }
+
+ /* DH-HMAC-CHAP Step 2: receive challenge */
+ dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, chap->buf_size);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive challenge, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
+ if (ret) {
+ chap->status = ret;
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ return;
+ }
+
+ ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
+ if (ret) {
+ /* Invalid challenge parameters */
+ chap->error = ret;
+ goto fail2;
+ }
+
+ if (chap->ctrl_key_len) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d DH exponential\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_exponential(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ dev_dbg(ctrl->device, "%s: qid %d host response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 3: send reply */
+ dev_dbg(ctrl->device, "%s: qid %d send reply\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 4: receive success1 */
+ dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, chap->buf_size);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive success1, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid,
+ chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
+ if (ret) {
+ chap->status = ret;
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ return;
+ }
+
+ if (ctrl->ctrl_key) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d controller response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ ret = nvme_auth_process_dhchap_success1(ctrl, chap);
+ if (ret) {
+ /* Controller authentication failed */
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ goto fail2;
+ }
+
+ if (ctrl->ctrl_key) {
+ /* DH-HMAC-CHAP Step 5: send success2 */
+ dev_dbg(ctrl->device, "%s: qid %d send success2\n",
+ __func__, chap->qid);
+ tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret)
+ chap->error = ret;
+ }
+ if (!ret) {
+ chap->error = 0;
+ return;
+ }
+
+fail2:
+ dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
+ __func__, chap->qid, chap->status);
+ tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ /*
+ * only update error if send failure2 failed and no other
+ * error had been set during authentication.
+ */
+ if (ret && !chap->error)
+ chap->error = ret;
+}
+
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ if (!ctrl->host_key) {
+ dev_warn(ctrl->device, "qid %d: no key\n", qid);
+ return -ENOKEY;
+ }
+
+ if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
+ dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
+ return -ENOKEY;
+ }
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ /* Check if the context is already queued */
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ WARN_ON(!chap->buf);
+ if (chap->qid == qid) {
+ dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ __nvme_auth_reset(chap);
+ queue_work(nvme_wq, &chap->auth_work);
+ return 0;
+ }
+ }
+ chap = kzalloc(sizeof(*chap), GFP_KERNEL);
+ if (!chap) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ return -ENOMEM;
+ }
+ chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
+ chap->ctrl = ctrl;
+
+ /*
+ * Allocate a large enough buffer for the entire negotiation:
+ * 4k should be enough to ffdhe8192.
+ */
+ chap->buf_size = 4096;
+ chap->buf = kzalloc(chap->buf_size, GFP_KERNEL);
+ if (!chap->buf) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ kfree(chap);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&chap->auth_work, __nvme_auth_work);
+ list_add(&chap->entry, &ctrl->dhchap_auth_list);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ queue_work(nvme_wq, &chap->auth_work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
+
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+ int ret;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ if (chap->qid != qid)
+ continue;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ ret = chap->error;
+ return ret;
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_wait);
+
+void nvme_auth_reset(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ __nvme_auth_reset(chap);
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_reset);
+
+static void nvme_dhchap_auth_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, dhchap_auth_work);
+ int ret, q;
+
+ /* Authenticate admin queue first */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: error %d setting up authentication\n", ret);
+ return;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ return;
+ }
+
+ for (q = 1; q < ctrl->queue_count; q++) {
+ ret = nvme_auth_negotiate(ctrl, q);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: error %d setting up authentication\n",
+ q, ret);
+ break;
+ }
+ }
+
+ /*
+ * Failure is a soft-state; credentials remain valid until
+ * the controller terminates the connection.
+ */
+}
+
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
+{
+ INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
+ INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
+ mutex_init(&ctrl->dhchap_auth_mutex);
+ if (!ctrl->opts)
+ return;
+ nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
+ nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
+
+void nvme_auth_stop(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+ cancel_work_sync(&ctrl->dhchap_auth_work);
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
+ cancel_work_sync(&chap->auth_work);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_stop);
+
+void nvme_auth_free(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
+ list_del_init(&chap->entry);
+ flush_work(&chap->auth_work);
+ __nvme_auth_free(chap);
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free);
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 4910543f00ff..e958d5015585 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -6,7 +6,6 @@
#include "nvme.h"
-#ifdef CONFIG_NVME_VERBOSE_ERRORS
static const char * const nvme_ops[] = {
[nvme_cmd_flush] = "Flush",
[nvme_cmd_write] = "Write",
@@ -178,6 +177,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
return nvme_ops[opcode];
return "Unknown";
}
+EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
{
@@ -185,4 +185,3 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
return nvme_admin_ops[opcode];
return "Unknown";
}
-#endif /* CONFIG_NVME_VERBOSE_ERRORS */
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2533b88e66d5..af367b22871b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -24,12 +24,22 @@
#include "nvme.h"
#include "fabrics.h"
+#include <linux/nvme-auth.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#define NVME_MINORS (1U << MINORBITS)
+struct nvme_ns_info {
+ struct nvme_ns_ids ids;
+ u32 nsid;
+ __le32 anagrpid;
+ bool is_shared;
+ bool is_readonly;
+ bool is_ready;
+};
+
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
@@ -330,6 +340,7 @@ enum nvme_disposition {
COMPLETE,
RETRY,
FAILOVER,
+ AUTHENTICATE,
};
static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
@@ -337,6 +348,9 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
if (likely(nvme_req(req)->status == 0))
return COMPLETE;
+ if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
+ return AUTHENTICATE;
+
if (blk_noretry_request(req) ||
(nvme_req(req)->status & NVME_SC_DNR) ||
nvme_req(req)->retries >= nvme_max_retries)
@@ -375,11 +389,13 @@ static inline void nvme_end_req(struct request *req)
void nvme_complete_rq(struct request *req)
{
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
- if (nvme_req(req)->ctrl->kas)
- nvme_req(req)->ctrl->comp_seen = true;
+ if (ctrl->kas)
+ ctrl->comp_seen = true;
switch (nvme_decide_disposition(req)) {
case COMPLETE:
@@ -391,6 +407,14 @@ void nvme_complete_rq(struct request *req)
case FAILOVER:
nvme_failover_req(req);
return;
+ case AUTHENTICATE:
+#ifdef CONFIG_NVME_AUTH
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+ nvme_retry_req(req);
+#else
+ nvme_end_req(req);
+#endif
+ return;
}
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -702,7 +726,9 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
switch (ctrl->state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
- req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
+ (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
return true;
break;
default:
@@ -990,8 +1016,7 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags)
+ int qid, int at_head, blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
@@ -1000,15 +1025,12 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
else
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
- qid ? qid - 1 : 0);
+ qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
nvme_init_request(req, cmd);
- if (timeout)
- req->timeout = timeout;
-
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
if (ret)
@@ -1028,7 +1050,7 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1329,8 +1351,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
}
}
-static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
struct nvme_command c = { };
bool csi_seen = false;
@@ -1343,7 +1365,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
return 0;
c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.nsid = cpu_to_le32(info->nsid);
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
@@ -1355,7 +1377,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (status) {
dev_warn(ctrl->device,
"Identify Descriptors failed (nsid=%u, status=0x%x)\n",
- nsid, status);
+ info->nsid, status);
goto free_data;
}
@@ -1365,7 +1387,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (cur->nidl == 0)
break;
- len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
+ len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
if (len < 0)
break;
@@ -1374,7 +1396,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (nvme_multi_css(ctrl) && !csi_seen) {
dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
- nsid);
+ info->nsid);
status = -EINVAL;
}
@@ -1384,7 +1406,7 @@ free_data:
}
static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids, struct nvme_id_ns **id)
+ struct nvme_id_ns **id)
{
struct nvme_command c = { };
int error;
@@ -1407,51 +1429,66 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
error = NVME_SC_INVALID_NS | NVME_SC_DNR;
if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id;
+ return 0;
+out_free_id:
+ kfree(*id);
+ return error;
+}
+static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_ns_ids *ids = &info->ids;
+ struct nvme_id_ns *id;
+ int ret;
+
+ ret = nvme_identify_ns(ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = true;
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
dev_info(ctrl->device,
"Ignoring bogus Namespace Identifiers\n");
} else {
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0) &&
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+ memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
}
-
+ kfree(id);
return 0;
-
-out_free_id:
- kfree(*id);
- return error;
}
-static int nvme_identify_ns_cs_indep(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_id_ns_cs_indep **id)
+static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
+ struct nvme_id_ns_cs_indep *id;
struct nvme_command c = {
.identify.opcode = nvme_admin_identify,
- .identify.nsid = cpu_to_le32(nsid),
+ .identify.nsid = cpu_to_le32(info->nsid),
.identify.cns = NVME_ID_CNS_NS_CS_INDEP,
};
int ret;
- *id = kmalloc(sizeof(**id), GFP_KERNEL);
- if (!*id)
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
return -ENOMEM;
- ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
- if (ret) {
- dev_warn(ctrl->device,
- "Identify namespace (CS independent) failed (%d)\n",
- ret);
- kfree(*id);
- return ret;
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (!ret) {
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = id->nstat & NVME_NSTAT_NRDY;
}
-
- return 0;
+ kfree(id);
+ return ret;
}
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
@@ -1466,7 +1503,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, NVME_QID_ANY, 0, 0);
+ buffer, buflen, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1875,6 +1912,11 @@ static void nvme_update_disk_info(struct gendisk *disk,
ns->ctrl->max_zeroes_sectors);
}
+static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
+}
+
static inline bool nvme_first_scan(struct gendisk *disk)
{
/* nvme_alloc_ns() scans the disk prior to adding it */
@@ -1912,12 +1954,44 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
blk_queue_chunk_sectors(ns->queue, iob);
}
-static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_update_ns_info_generic(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
{
- unsigned lbaf = nvme_lbaf_index(id->flbas);
+ blk_mq_freeze_queue(ns->disk->queue);
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ blk_mq_unfreeze_queue(ns->disk->queue);
+
+ if (nvme_ns_head_multipath(ns->head)) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+
+ /* Hide the block-interface for these devices */
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+
+ return 0;
+}
+
+static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
+{
+ struct nvme_id_ns *id;
+ unsigned lbaf;
int ret;
+ ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+
blk_mq_freeze_queue(ns->disk->queue);
+ lbaf = nvme_lbaf_index(id->flbas);
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
@@ -1927,36 +2001,35 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf);
- if (ret)
- goto out_unfreeze;
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
}
- set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns);
if (ret && !nvme_first_scan(ns->disk))
- return ret;
+ goto out;
}
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
- set_disk_ro(ns->head->disk,
- (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
disk_update_readahead(ns->head->disk);
blk_mq_unfreeze_queue(ns->head->disk->queue);
}
- return 0;
-out_unfreeze:
+ ret = 0;
+out:
/*
* If probing fails due an unsupported feature, hide the block device,
* but still allow other access.
@@ -1966,10 +2039,31 @@ out_unfreeze:
set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
- blk_mq_unfreeze_queue(ns->disk->queue);
+ kfree(id);
return ret;
}
+static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ switch (info->ids.csi) {
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
+ info->nsid);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+ return nvme_update_ns_info_block(ns, info);
+ case NVME_CSI_NVM:
+ return nvme_update_ns_info_block(ns, info);
+ default:
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported (csi %u)\n",
+ info->nsid, info->ids.csi);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+}
+
static char nvme_pr_type(enum pr_type type)
{
switch (type) {
@@ -2103,7 +2197,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
cmd.common.cdw11 = cpu_to_le32(len);
- return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
+ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
NVME_QID_ANY, 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
@@ -2123,6 +2217,7 @@ static int nvme_report_zones(struct gendisk *disk, sector_t sector,
static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
@@ -3613,6 +3708,108 @@ static ssize_t dctype_show(struct device *dev,
}
static DEVICE_ATTR_RO(dctype);
+#ifdef CONFIG_NVME_AUTH
+static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_ctrl_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_ctrl_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
+#endif
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -3636,6 +3833,10 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_kato.attr,
&dev_attr_cntrltype.attr,
&dev_attr_dctype.attr,
+#ifdef CONFIG_NVME_AUTH
+ &dev_attr_dhchap_secret.attr,
+ &dev_attr_dhchap_ctrl_secret.attr,
+#endif
NULL
};
@@ -3659,6 +3860,12 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
return 0;
+#ifdef CONFIG_NVME_AUTH
+ if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
+ return 0;
+#endif
return a->mode;
}
@@ -3786,7 +3993,7 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
}
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_ns_ids *ids, bool is_shared)
+ struct nvme_ns_info *info)
{
struct nvme_ns_head *head;
size_t size = sizeof(*head);
@@ -3808,9 +4015,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
if (ret)
goto out_ida_remove;
head->subsys = ctrl->subsys;
- head->ns_id = nsid;
- head->ids = *ids;
- head->shared = is_shared;
+ head->ns_id = info->nsid;
+ head->ids = info->ids;
+ head->shared = info->is_shared;
kref_init(&head->ref);
if (head->ids.csi) {
@@ -3867,54 +4074,54 @@ static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
return ret;
}
-static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
- struct nvme_ns_ids *ids, bool is_shared)
+static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
{
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_ns_head *head = NULL;
int ret;
- ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
+ ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
- "globally duplicate IDs for nsid %d\n", nsid);
+ "globally duplicate IDs for nsid %d\n", info->nsid);
nvme_print_device_info(ctrl);
return ret;
}
mutex_lock(&ctrl->subsys->lock);
- head = nvme_find_ns_head(ctrl, nsid);
+ head = nvme_find_ns_head(ctrl, info->nsid);
if (!head) {
- ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
+ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
"duplicate IDs in subsystem for nsid %d\n",
- nsid);
+ info->nsid);
goto out_unlock;
}
- head = nvme_alloc_ns_head(ctrl, nsid, ids, is_shared);
+ head = nvme_alloc_ns_head(ctrl, info);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
}
} else {
ret = -EINVAL;
- if (!is_shared || !head->shared) {
+ if (!info->is_shared || !head->shared) {
dev_err(ctrl->device,
- "Duplicate unshared namespace %d\n", nsid);
+ "Duplicate unshared namespace %d\n",
+ info->nsid);
goto out_put_ns_head;
}
- if (!nvme_ns_ids_equal(&head->ids, ids)) {
+ if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
- nsid);
+ info->nsid);
goto out_put_ns_head;
}
if (!multipath && !list_empty(&head->list)) {
dev_warn(ctrl->device,
"Found shared namespace %d, but multipathing not supported.\n",
- nsid);
+ info->nsid);
dev_warn_once(ctrl->device,
"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
}
@@ -3968,20 +4175,15 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
list_add(&ns->list, &ns->ctrl->namespaces);
}
-static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
{
struct nvme_ns *ns;
struct gendisk *disk;
- struct nvme_id_ns *id;
int node = ctrl->numa_node;
- if (nvme_identify_ns(ctrl, nsid, ids, &id))
- return;
-
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
- goto out_free_id;
+ return;
disk = blk_mq_alloc_disk(ctrl->tagset, ns);
if (IS_ERR(disk))
@@ -3996,13 +4198,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
- if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
+ if (ctrl->ops->supports_pci_p2pdma &&
+ ctrl->ops->supports_pci_p2pdma(ctrl))
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
ns->ctrl = ctrl;
kref_init(&ns->kref);
- if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
+ if (nvme_init_ns_head(ns, info))
goto out_cleanup_disk;
/*
@@ -4028,7 +4231,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
ns->head->instance);
}
- if (nvme_update_ns_info(ns, id))
+ if (nvme_update_ns_info(ns, info))
goto out_unlink_ns;
down_write(&ctrl->namespaces_rwsem);
@@ -4042,9 +4245,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!nvme_ns_head_multipath(ns->head))
nvme_add_ns_cdev(ns);
- nvme_mpath_add_disk(ns, id);
+ nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
- kfree(id);
return;
@@ -4064,8 +4266,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
put_disk(disk);
out_free_ns:
kfree(ns);
- out_free_id:
- kfree(id);
}
static void nvme_ns_remove(struct nvme_ns *ns)
@@ -4123,29 +4323,21 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
}
}
-static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
+static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
{
- struct nvme_id_ns *id;
int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
if (test_bit(NVME_NS_DEAD, &ns->flags))
goto out;
- ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
- if (ret)
- goto out;
-
ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
- if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
+ if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
dev_err(ns->ctrl->device,
"identifiers changed for nsid %d\n", ns->head->ns_id);
- goto out_free_id;
+ goto out;
}
- ret = nvme_update_ns_info(ns, id);
-
-out_free_id:
- kfree(id);
+ ret = nvme_update_ns_info(ns, info);
out:
/*
* Only remove the namespace if we got a fatal error back from the
@@ -4157,59 +4349,47 @@ out:
nvme_ns_remove(ns);
}
-static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
- struct nvme_ns_ids ids = { };
- struct nvme_id_ns_cs_indep *id;
+ struct nvme_ns_info info = { .nsid = nsid };
struct nvme_ns *ns;
- bool ready = true;
- if (nvme_identify_ns_descs(ctrl, nsid, &ids))
+ if (nvme_identify_ns_descs(ctrl, &info))
return;
+ if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
+ dev_warn(ctrl->device,
+ "command set not reported for nsid: %d\n", nsid);
+ return;
+ }
+
/*
- * Check if the namespace is ready. If not ignore it, we will get an
- * AEN once it becomes ready and restart the scan.
+ * If available try to use the Command Set Idependent Identify Namespace
+ * data structure to find all the generic information that is needed to
+ * set up a namespace. If not fall back to the legacy version.
*/
- if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) &&
- !nvme_identify_ns_cs_indep(ctrl, nsid, &id)) {
- ready = id->nstat & NVME_NSTAT_NRDY;
- kfree(id);
+ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
+ (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) {
+ if (nvme_ns_info_from_id_cs_indep(ctrl, &info))
+ return;
+ } else {
+ if (nvme_ns_info_from_identify(ctrl, &info))
+ return;
}
- if (!ready)
+ /*
+ * Ignore the namespace if it is not ready. We will get an AEN once it
+ * becomes ready and restart the scan.
+ */
+ if (!info.is_ready)
return;
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- nvme_validate_ns(ns, &ids);
+ nvme_validate_ns(ns, &info);
nvme_put_ns(ns);
- return;
- }
-
- switch (ids.csi) {
- case NVME_CSI_NVM:
- nvme_alloc_ns(ctrl, nsid, &ids);
- break;
- case NVME_CSI_ZNS:
- if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
- dev_warn(ctrl->device,
- "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
- nsid);
- break;
- }
- if (!nvme_multi_css(ctrl)) {
- dev_warn(ctrl->device,
- "command set not reported for nsid: %d\n",
- nsid);
- break;
- }
- nvme_alloc_ns(ctrl, nsid, &ids);
- break;
- default:
- dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
- ids.csi, nsid);
- break;
+ } else {
+ nvme_alloc_ns(ctrl, &info);
}
}
@@ -4265,7 +4445,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
if (!nsid) /* end of the list? */
goto out;
- nvme_validate_or_alloc_ns(ctrl, nsid);
+ nvme_scan_ns(ctrl, nsid);
while (++prev < nsid)
nvme_ns_remove_by_nsid(ctrl, prev);
}
@@ -4288,7 +4468,7 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
kfree(id);
for (i = 1; i <= nn; i++)
- nvme_validate_or_alloc_ns(ctrl, i);
+ nvme_scan_ns(ctrl, i);
nvme_remove_invalid_namespaces(ctrl, nn);
}
@@ -4525,9 +4705,19 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_get_fw_slot_info(ctrl);
}
+static u32 nvme_aer_type(u32 result)
+{
+ return result & 0x7;
+}
+
+static u32 nvme_aer_subtype(u32 result)
+{
+ return (result & 0xff00) >> 8;
+}
+
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
- u32 aer_notice_type = (result & 0xff00) >> 8;
+ u32 aer_notice_type = nvme_aer_subtype(result);
trace_nvme_async_event(ctrl, aer_notice_type);
@@ -4542,8 +4732,10 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
* recovery actions from interfering with the controller's
* firmware activation.
*/
- if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+ nvme_auth_stop(ctrl);
queue_work(nvme_wq, &ctrl->fw_act_work);
+ }
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
@@ -4560,11 +4752,19 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
}
}
+static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+{
+ trace_nvme_async_event(ctrl, NVME_AER_ERROR);
+ dev_warn(ctrl->device, "resetting controller due to AER\n");
+ nvme_reset_ctrl(ctrl);
+}
+
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
- u32 aer_type = result & 0x07;
+ u32 aer_type = nvme_aer_type(result);
+ u32 aer_subtype = nvme_aer_subtype(result);
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
@@ -4574,6 +4774,15 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
+ /*
+ * For a persistent internal error, don't run async_event_work
+ * to submit a new AER. The controller reset will do it.
+ */
+ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
+ nvme_handle_aer_persistent_error(ctrl);
+ return;
+ }
+ fallthrough;
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
@@ -4590,6 +4799,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
@@ -4649,6 +4859,8 @@ static void nvme_free_ctrl(struct device *dev)
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
+ nvme_auth_stop(ctrl);
+ nvme_auth_free(ctrl);
__free_page(ctrl->discard_page);
if (subsys) {
@@ -4739,6 +4951,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
nvme_mpath_init_ctrl(ctrl);
+ nvme_auth_init_ctrl(ctrl);
return 0;
out_free_name:
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ee79a6d639b4..10cc4a814602 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -152,7 +152,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
@@ -198,7 +198,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
@@ -243,7 +243,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
NVME_QID_ANY, 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
@@ -270,6 +270,12 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
{
int err_sctype = errval & ~NVME_SC_DNR;
+ if (errval < 0) {
+ dev_err(ctrl->device,
+ "Connect command failed, errno: %d\n", errval);
+ return;
+ }
+
switch (err_sctype) {
case NVME_SC_CONNECT_INVALID_PARAM:
if (offset >> 16) {
@@ -331,6 +337,10 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
dev_err(ctrl->device,
"Connect command failed: host path error\n");
break;
+ case NVME_SC_AUTH_REQUIRED:
+ dev_err(ctrl->device,
+ "Connect command failed: authentication required\n");
+ break;
default:
dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n",
@@ -365,6 +375,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
union nvme_result res;
struct nvmf_connect_data *data;
int ret;
+ u32 result;
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
@@ -389,7 +400,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), 0, NVME_QID_ANY, 1,
+ data, sizeof(*data), NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
@@ -397,8 +408,25 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
goto out_free_data;
}
- ctrl->cntlid = le16_to_cpu(res.u16);
-
+ result = le32_to_cpu(res.u32);
+ ctrl->cntlid = result & 0xFFFF;
+ if ((result >> 16) & 0x3) {
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication setup failed\n");
+ ret = NVME_SC_AUTH_REQUIRED;
+ goto out_free_data;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ else
+ dev_info(ctrl->device,
+ "qid 0: authenticated\n");
+ }
out_free_data:
kfree(data);
return ret;
@@ -431,6 +459,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
struct nvmf_connect_data *data;
union nvme_result res;
int ret;
+ u32 result;
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
@@ -450,12 +479,27 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), 0, qid, 1,
+ data, sizeof(*data), qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
}
+ result = le32_to_cpu(res.u32);
+ if ((result >> 16) & 2) {
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, qid);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: authentication setup failed\n", qid);
+ ret = NVME_SC_AUTH_REQUIRED;
+ } else {
+ ret = nvme_auth_wait(ctrl, qid);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid %u: authentication failed\n", qid);
+ }
+ }
kfree(data);
return ret;
}
@@ -548,6 +592,8 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" },
+ { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
+ { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
{ NVMF_OPT_ERR, NULL }
};
@@ -829,6 +875,34 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
case NVMF_OPT_DISCOVERY:
opts->discovery_nqn = true;
break;
+ case NVMF_OPT_DHCHAP_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = p;
+ break;
+ case NVMF_OPT_DHCHAP_CTRL_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = p;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -947,6 +1021,8 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->subsysnqn);
kfree(opts->host_traddr);
kfree(opts->host_iface);
+ kfree(opts->dhchap_secret);
+ kfree(opts->dhchap_ctrl_secret);
kfree(opts);
}
EXPORT_SYMBOL_GPL(nvmf_free_options);
@@ -956,7 +1032,8 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
- NVMF_OPT_FAIL_FAST_TMO)
+ NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
+ NVMF_OPT_DHCHAP_CTRL_SECRET)
static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf)
@@ -1159,7 +1236,7 @@ static int __init nvmf_init(void)
nvmf_device =
device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
if (IS_ERR(nvmf_device)) {
- pr_err("couldn't create nvme-fabris device!\n");
+ pr_err("couldn't create nvme-fabrics device!\n");
ret = PTR_ERR(nvmf_device);
goto out_destroy_class;
}
@@ -1192,7 +1269,14 @@ static void __exit nvmf_exit(void)
BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46d6e194ac2b..a6e22116e139 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,8 @@ enum {
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
NVMF_OPT_DISCOVERY = 1 << 22,
+ NVMF_OPT_DHCHAP_SECRET = 1 << 23,
+ NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24,
};
/**
@@ -97,6 +99,9 @@ enum {
* @max_reconnects: maximum number of allowed reconnect attempts before removing
* the controller, (-1) means reconnect forever, zero means remove
* immediately;
+ * @dhchap_secret: DH-HMAC-CHAP secret
+ * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
+ * authentication
* @disable_sqflow: disable controller sq flow control
* @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP)
@@ -121,6 +126,8 @@ struct nvmf_ctrl_options {
unsigned int kato;
struct nvmf_host *host;
int max_reconnects;
+ char *dhchap_secret;
+ char *dhchap_ctrl_secret;
bool disable_sqflow;
bool hdr_digest;
bool data_digest;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9987797620b6..127abaf9ba5d 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2533,6 +2533,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ if (start_queues)
+ nvme_start_admin_queue(&ctrl->ctrl);
}
static void
@@ -3878,6 +3880,7 @@ static int fc_parse_cgrpid(const char *buf, u64 *id)
static ssize_t fc_appid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
+ size_t orig_count = count;
u64 cgrp_id;
int appid_len = 0;
int cgrpid_len = 0;
@@ -3902,7 +3905,7 @@ static ssize_t fc_appid_store(struct device *dev,
ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
if (ret < 0)
return ret;
- return count;
+ return orig_count;
}
static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
#endif /* CONFIG_BLK_CGROUP_FC_APPID */
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index f26640ccb955..6ef497c75a16 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -346,7 +346,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
* different queue via blk_steal_bios(), so we need to use the bio_split
* pool from the original queue to allocate the bvecs from.
*/
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
@@ -408,6 +408,7 @@ const struct block_device_operations nvme_ns_head_ops = {
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ns_head_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.getgeo = nvme_getgeo,
.report_zones = nvme_ns_head_report_zones,
.pr_ops = &nvme_pr_ops,
@@ -800,16 +801,16 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
return -ENXIO; /* just break out of the loop */
}
-void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
if (nvme_ctrl_use_ana(ns->ctrl)) {
struct nvme_ana_group_desc desc = {
- .grpid = id->anagrpid,
+ .grpid = anagrpid,
.state = 0,
};
mutex_lock(&ns->ctrl->ana_lock);
- ns->ana_grpid = le32_to_cpu(id->anagrpid);
+ ns->ana_grpid = le32_to_cpu(anagrpid);
nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
mutex_unlock(&ns->ctrl->ana_lock);
if (desc.state) {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7e0a925bf3be..1bdf714dcd9e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -140,7 +140,7 @@ enum nvme_quirks {
NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
/*
- * The controller requires the command_id value be be limited, so skip
+ * The controller requires the command_id value be limited, so skip
* encoding the generation sequence number.
*/
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
@@ -328,6 +328,15 @@ struct nvme_ctrl {
struct work_struct ana_work;
#endif
+#ifdef CONFIG_NVME_AUTH
+ struct work_struct dhchap_auth_work;
+ struct list_head dhchap_auth_list;
+ struct mutex dhchap_auth_mutex;
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u16 transaction;
+#endif
+
/* Power saving configuration */
u64 ps_max_latency_us;
bool apst_enabled;
@@ -495,7 +504,6 @@ struct nvme_ctrl_ops {
unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
-#define NVME_F_PCI_P2PDMA (1 << 2)
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -505,6 +513,7 @@ struct nvme_ctrl_ops {
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
void (*print_device_info)(struct nvme_ctrl *ctrl);
+ bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
};
/*
@@ -781,7 +790,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
+ int qid, int at_head,
blk_mq_req_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
@@ -837,7 +846,7 @@ void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
-void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
@@ -879,8 +888,7 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
{
return 0;
}
-static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
- struct nvme_id_ns *id)
+static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
}
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -992,6 +1000,27 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
return ctrl->sgls & ((1 << 0) | (1 << 1));
}
+#ifdef CONFIG_NVME_AUTH
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_auth_stop(struct nvme_ctrl *ctrl);
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
+void nvme_auth_reset(struct nvme_ctrl *ctrl);
+void nvme_auth_free(struct nvme_ctrl *ctrl);
+#else
+static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
+static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
+static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ return -EPROTONOSUPPORT;
+}
+static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ return NVME_SC_AUTH_REQUIRED;
+}
+static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
+#endif
+
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
int nvme_execute_passthru_rq(struct request *rq);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 7e7d4802ac6b..3a1c37f32f30 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -230,11 +230,10 @@ struct nvme_iod {
bool use_sgl;
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
- int nents; /* Used in scatterlist */
dma_addr_t first_dma;
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t meta_dma;
- struct scatterlist *sg;
+ struct sg_table sgt;
};
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
@@ -524,7 +523,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
static void **nvme_pci_iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
+ return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
}
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
@@ -576,17 +575,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
}
}
-static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
-}
-
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -597,9 +585,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
return;
}
- WARN_ON_ONCE(!iod->nents);
+ WARN_ON_ONCE(!iod->sgt.nents);
+
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- nvme_unmap_sg(dev, req);
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
iod->first_dma);
@@ -607,7 +596,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
nvme_free_sgls(dev, req);
else
nvme_free_prps(dev, req);
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -630,7 +619,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
int length = blk_rq_payload_bytes(req);
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
@@ -670,7 +659,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) {
- iod->first_dma = dma_addr;
iod->npages = -1;
return BLK_STS_RESOURCE;
}
@@ -703,16 +691,16 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_len = sg_dma_len(sg);
}
done:
- cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
return BLK_STS_OK;
free_prps:
nvme_free_prps(dev, req);
return BLK_STS_RESOURCE;
bad_sgl:
- WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
+ WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
"Invalid SGL for payload:%d nents:%d\n",
- blk_rq_payload_bytes(req), iod->nents);
+ blk_rq_payload_bytes(req), iod->sgt.nents);
return BLK_STS_IOERR;
}
@@ -738,12 +726,13 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
}
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmd, int entries)
+ struct request *req, struct nvme_rw_command *cmd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
+ unsigned int entries = iod->sgt.nents;
dma_addr_t sgl_dma;
int i = 0;
@@ -841,7 +830,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret = BLK_STS_RESOURCE;
- int nr_mapped;
+ int rc;
if (blk_rq_nr_phys_segments(req) == 1) {
struct bio_vec bv = req_bvec(req);
@@ -859,26 +848,25 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
}
iod->dma_len = 0;
- iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
+ iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sgt.sgl)
return BLK_STS_RESOURCE;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
- if (!iod->nents)
+ sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
+ iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
+ if (!iod->sgt.orig_nents)
goto out_free_sg;
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
- iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
- else
- nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req), DMA_ATTR_NO_WARN);
- if (!nr_mapped)
+ rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
+ DMA_ATTR_NO_WARN);
+ if (rc) {
+ if (rc == -EREMOTEIO)
+ ret = BLK_STS_TARGET;
goto out_free_sg;
+ }
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
- ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
if (ret != BLK_STS_OK)
@@ -886,9 +874,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
return BLK_STS_OK;
out_unmap_sg:
- nvme_unmap_sg(dev, req);
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
out_free_sg:
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
return ret;
}
@@ -912,7 +900,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
iod->aborted = 0;
iod->npages = -1;
- iod->nents = 0;
+ iod->sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
@@ -1435,8 +1423,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
- "I/O %d QID %d timeout, aborting\n",
- req->tag, nvmeq->qid);
+ "I/O %d (%s) QID %d timeout, aborting\n",
+ req->tag,
+ nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
+ nvmeq->qid);
abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
BLK_MQ_REQ_NOWAIT);
@@ -1765,37 +1755,35 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
}
}
-static int nvme_alloc_admin_tags(struct nvme_dev *dev)
+static int nvme_pci_alloc_admin_tag_set(struct nvme_dev *dev)
{
- if (!dev->ctrl.admin_q) {
- dev->admin_tagset.ops = &nvme_mq_admin_ops;
- dev->admin_tagset.nr_hw_queues = 1;
+ struct blk_mq_tag_set *set = &dev->admin_tagset;
- dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- dev->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
- dev->admin_tagset.numa_node = dev->ctrl.numa_node;
- dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
- dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
- dev->admin_tagset.driver_data = dev;
+ set->ops = &nvme_mq_admin_ops;
+ set->nr_hw_queues = 1;
- if (blk_mq_alloc_tag_set(&dev->admin_tagset))
- return -ENOMEM;
- dev->ctrl.admin_tagset = &dev->admin_tagset;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ set->numa_node = dev->ctrl.numa_node;
+ set->cmd_size = sizeof(struct nvme_iod);
+ set->flags = BLK_MQ_F_NO_SCHED;
+ set->driver_data = dev;
- dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
- if (IS_ERR(dev->ctrl.admin_q)) {
- blk_mq_free_tag_set(&dev->admin_tagset);
- dev->ctrl.admin_q = NULL;
- return -ENOMEM;
- }
- if (!blk_get_queue(dev->ctrl.admin_q)) {
- nvme_dev_remove_admin(dev);
- dev->ctrl.admin_q = NULL;
- return -ENODEV;
- }
- } else
- nvme_start_admin_queue(&dev->ctrl);
+ if (blk_mq_alloc_tag_set(set))
+ return -ENOMEM;
+ dev->ctrl.admin_tagset = set;
+ dev->ctrl.admin_q = blk_mq_init_queue(set);
+ if (IS_ERR(dev->ctrl.admin_q)) {
+ blk_mq_free_tag_set(set);
+ dev->ctrl.admin_q = NULL;
+ return -ENOMEM;
+ }
+ if (!blk_get_queue(dev->ctrl.admin_q)) {
+ nvme_dev_remove_admin(dev);
+ dev->ctrl.admin_q = NULL;
+ return -ENODEV;
+ }
return 0;
}
@@ -2534,47 +2522,45 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
return true;
}
-static void nvme_dev_add(struct nvme_dev *dev)
+static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
{
+ struct blk_mq_tag_set * set = &dev->tagset;
int ret;
- if (!dev->ctrl.tagset) {
- dev->tagset.ops = &nvme_mq_ops;
- dev->tagset.nr_hw_queues = dev->online_queues - 1;
- dev->tagset.nr_maps = 2; /* default + read */
- if (dev->io_queues[HCTX_TYPE_POLL])
- dev->tagset.nr_maps++;
- dev->tagset.timeout = NVME_IO_TIMEOUT;
- dev->tagset.numa_node = dev->ctrl.numa_node;
- dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
- BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = sizeof(struct nvme_iod);
- dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
- dev->tagset.driver_data = dev;
-
- /*
- * Some Apple controllers requires tags to be unique
- * across admin and IO queue, so reserve the first 32
- * tags of the IO queue.
- */
- if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
- dev->tagset.reserved_tags = NVME_AQ_DEPTH;
+ set->ops = &nvme_mq_ops;
+ set->nr_hw_queues = dev->online_queues - 1;
+ set->nr_maps = 2; /* default + read */
+ if (dev->io_queues[HCTX_TYPE_POLL])
+ set->nr_maps++;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->numa_node = dev->ctrl.numa_node;
+ set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
+ set->cmd_size = sizeof(struct nvme_iod);
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->driver_data = dev;
- ret = blk_mq_alloc_tag_set(&dev->tagset);
- if (ret) {
- dev_warn(dev->ctrl.device,
- "IO queues tagset allocation failed %d\n", ret);
- return;
- }
- dev->ctrl.tagset = &dev->tagset;
- } else {
- blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /*
+ * Some Apple controllers requires tags to be unique
+ * across admin and IO queue, so reserve the first 32
+ * tags of the IO queue.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+ set->reserved_tags = NVME_AQ_DEPTH;
- /* Free previously allocated queues that are no longer usable */
- nvme_free_queues(dev, dev->online_queues);
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "IO queues tagset allocation failed %d\n", ret);
+ return;
}
+ dev->ctrl.tagset = set;
+}
- nvme_dbbuf_set(dev);
+static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
+{
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /* free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
}
static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2725,10 +2711,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_pci_disable(dev);
nvme_reap_pending_cqes(dev);
- blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
- blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
- blk_mq_tagset_wait_completed_request(&dev->tagset);
- blk_mq_tagset_wait_completed_request(&dev->admin_tagset);
+ nvme_cancel_tagset(&dev->ctrl);
+ nvme_cancel_admin_tagset(&dev->ctrl);
/*
* The driver will not be starting up queues again if shutting down so
@@ -2842,9 +2826,13 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out_unlock;
- result = nvme_alloc_admin_tags(dev);
- if (result)
- goto out_unlock;
+ if (!dev->ctrl.admin_q) {
+ result = nvme_pci_alloc_admin_tag_set(dev);
+ if (result)
+ goto out_unlock;
+ } else {
+ nvme_start_admin_queue(&dev->ctrl);
+ }
/*
* Limit the max command size to prevent iod->sg allocations going
@@ -2923,7 +2911,11 @@ static void nvme_reset_work(struct work_struct *work)
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_dev_add(dev);
+ if (!dev->ctrl.tagset)
+ nvme_pci_alloc_tag_set(dev);
+ else
+ nvme_pci_update_nr_queues(dev);
+ nvme_dbbuf_set(dev);
nvme_unfreeze(&dev->ctrl);
}
@@ -2989,7 +2981,6 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
}
-
static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
{
struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
@@ -3004,11 +2995,17 @@ static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
subsys->firmware_rev);
}
+static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+ return dma_pci_p2pdma_supported(dev->dev);
+}
+
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
- .flags = NVME_F_METADATA_SUPPORTED |
- NVME_F_PCI_P2PDMA,
+ .flags = NVME_F_METADATA_SUPPORTED,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
@@ -3016,6 +3013,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.submit_async_event = nvme_pci_submit_async_event,
.get_address = nvme_pci_get_address,
.print_device_info = nvme_pci_print_device_info,
+ .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
};
static int nvme_dev_map(struct nvme_dev *dev)
@@ -3513,6 +3511,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 4665aebd944d..3100643be299 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -29,7 +29,7 @@
#include "fabrics.h"
-#define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */
+#define NVME_RDMA_CM_TIMEOUT_MS 3000 /* 3 second */
#define NVME_RDMA_MAX_SEGMENTS 256
@@ -248,12 +248,9 @@ static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
{
int ret;
- ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
- msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
- if (ret < 0)
+ ret = wait_for_completion_interruptible(&queue->cm_done);
+ if (ret)
return ret;
- if (ret == 0)
- return -ETIMEDOUT;
WARN_ON_ONCE(queue->cm_error > 0);
return queue->cm_error;
}
@@ -612,7 +609,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
queue->cm_error = -ETIMEDOUT;
ret = rdma_resolve_addr(queue->cm_id, src_addr,
(struct sockaddr *)&ctrl->addr,
- NVME_RDMA_CONNECT_TIMEOUT_MS);
+ NVME_RDMA_CM_TIMEOUT_MS);
if (ret) {
dev_info(ctrl->ctrl.device,
"rdma_resolve_addr failed (%d).\n", ret);
@@ -790,50 +787,54 @@ out_free_queues:
return ret;
}
-static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
- bool admin)
+static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set;
+ struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
int ret;
- if (admin) {
- set = &ctrl->admin_tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- set->flags = BLK_MQ_F_NO_SCHED;
- } else {
- set = &ctrl->tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- if (nctrl->max_integrity_segments)
- set->cmd_size += sizeof(struct nvme_rdma_sgl) +
- NVME_RDMA_METADATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- }
-
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_rdma_admin_mq_ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ set->flags = BLK_MQ_F_NO_SCHED;
ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
+ if (!ret)
+ ctrl->ctrl.admin_tagset = set;
+ return ret;
+}
+
+static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int ret;
- return set;
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_rdma_mq_ops;
+ set->queue_depth = nctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+ if (nctrl->max_integrity_segments)
+ set->cmd_size += sizeof(struct nvme_rdma_sgl) +
+ NVME_RDMA_METADATA_SGL_SIZE;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = nctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (!ret)
+ ctrl->ctrl.tagset = set;
+ return ret;
}
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
@@ -885,11 +886,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_queue;
if (new) {
- ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
- if (IS_ERR(ctrl->ctrl.admin_tagset)) {
- error = PTR_ERR(ctrl->ctrl.admin_tagset);
+ error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl);
+ if (error)
goto out_free_async_qe;
- }
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.fabrics_q)) {
@@ -972,11 +971,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
return ret;
if (new) {
- ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
- if (IS_ERR(ctrl->ctrl.tagset)) {
- ret = PTR_ERR(ctrl->ctrl.tagset);
+ ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
+ if (ret)
goto out_free_io_queues;
- }
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
@@ -1205,6 +1202,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work);
+ nvme_auth_stop(&ctrl->ctrl);
nvme_stop_keep_alive(&ctrl->ctrl);
flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
@@ -1894,7 +1892,7 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
if (ctrl->opts->tos >= 0)
rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
- ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
+ ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS);
if (ret) {
dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
queue->cm_error);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index b95ee85053e3..044da18c06f5 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -209,9 +209,11 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}
-static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
+static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
{
- return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+ if (nvme_is_fabrics(req->req.cmd))
+ return NVME_TCP_ADMIN_CCSZ;
+ return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
}
static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
@@ -229,7 +231,7 @@ static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
rq = blk_mq_rq_from_pdu(req);
return rq_data_dir(rq) == WRITE && req->data_len &&
- req->data_len <= nvme_tcp_inline_data_size(req->queue);
+ req->data_len <= nvme_tcp_inline_data_size(req);
}
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
@@ -1658,6 +1660,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ return;
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
@@ -1685,45 +1690,49 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
return ret;
}
-static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
- bool admin)
+static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct blk_mq_tag_set *set;
+ struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
int ret;
- if (admin) {
- set = &ctrl->admin_tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- } else {
- set = &ctrl->tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- }
-
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_tcp_admin_mq_ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_BLOCKING;
+ set->cmd_size = sizeof(struct nvme_tcp_request);
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
+ if (!ret)
+ nctrl->admin_tagset = set;
+ return ret;
+}
- return set;
+static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_tcp_mq_ops;
+ set->queue_depth = nctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ set->cmd_size = sizeof(struct nvme_tcp_request);
+ set->driver_data = ctrl;
+ set->nr_hw_queues = nctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (!ret)
+ nctrl->tagset = set;
+ return ret;
}
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
@@ -1899,11 +1908,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
return ret;
if (new) {
- ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
- if (IS_ERR(ctrl->tagset)) {
- ret = PTR_ERR(ctrl->tagset);
+ ret = nvme_tcp_alloc_tag_set(ctrl);
+ if (ret)
goto out_free_io_queues;
- }
ret = nvme_ctrl_init_connect_q(ctrl);
if (ret)
@@ -1968,11 +1975,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
return error;
if (new) {
- ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
- if (IS_ERR(ctrl->admin_tagset)) {
- error = PTR_ERR(ctrl->admin_tagset);
+ error = nvme_tcp_alloc_admin_tag_set(ctrl);
+ if (error)
goto out_free_queue;
- }
ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
if (IS_ERR(ctrl->fabrics_q)) {
@@ -2173,6 +2178,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
@@ -2371,7 +2377,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
if (!blk_rq_nr_phys_segments(rq))
nvme_tcp_set_sg_null(c);
else if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(queue))
+ req->data_len <= nvme_tcp_inline_data_size(req))
nvme_tcp_set_sg_inline(queue, c, req->data_len);
else
nvme_tcp_set_sg_host_data(c, req->data_len);
@@ -2406,7 +2412,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
nvme_tcp_init_iter(req, rq_data_dir(rq));
if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(queue))
+ req->data_len <= nvme_tcp_inline_data_size(req))
req->pdu_len = req->data_len;
pdu->hdr.type = nvme_tcp_cmd;
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 2a89c5aa0790..1c36fcedea20 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -287,6 +287,34 @@ static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc)
return ret;
}
+static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 tl = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+ spsp0, spsp1, secp, tl);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 al = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+ spsp0, spsp1, secp, al);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -306,6 +334,10 @@ const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p,
return nvme_trace_fabrics_connect(p, spc);
case nvme_fabrics_type_property_get:
return nvme_trace_fabrics_property_get(p, spc);
+ case nvme_fabrics_type_auth_send:
+ return nvme_trace_fabrics_auth_send(p, spc);
+ case nvme_fabrics_type_auth_receive:
+ return nvme_trace_fabrics_auth_receive(p, spc);
default:
return nvme_trace_fabrics_common(p, spc);
}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 37c7f4c89f92..6f0eaf6a1528 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -98,7 +98,7 @@ TRACE_EVENT(nvme_complete_rq,
TP_fast_assign(
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
__entry->qid = nvme_req_qid(req);
- __entry->cid = req->tag;
+ __entry->cid = nvme_req(req)->cmd->common.command_id;
__entry->result = le64_to_cpu(nvme_req(req)->result.u64);
__entry->retries = nvme_req(req)->retries;
__entry->flags = nvme_req(req)->flags;
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 973561c93888..79fc64035ee3 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -83,3 +83,18 @@ config NVME_TARGET_TCP
devices over TCP.
If unsure, say N.
+
+config NVME_TARGET_AUTH
+ bool "NVMe over Fabrics In-band Authentication support"
+ depends on NVME_TARGET
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This enables support for NVMe over Fabrics In-band Authentication
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 9837e580fa7e..c66820102493 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -13,6 +13,7 @@ nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
+nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 397daaf51f1b..fc8a957fad0a 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1017,7 +1017,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
u16 ret;
if (nvme_is_fabrics(cmd))
- return nvmet_parse_fabrics_cmd(req);
+ return nvmet_parse_fabrics_admin_cmd(req);
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
new file mode 100644
index 000000000000..cf690df34775
--- /dev/null
+++ b/drivers/nvme/target/auth.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <crypto/hash.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <asm/unaligned.h>
+
+#include "nvmet.h"
+
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl)
+{
+ unsigned char key_hash;
+ char *dhchap_secret;
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1)
+ return -EINVAL;
+ if (key_hash > 3) {
+ pr_warn("Invalid DH-HMAC-CHAP hash id %d\n",
+ key_hash);
+ return -EINVAL;
+ }
+ if (key_hash > 0) {
+ /* Validate selected hash algorithm */
+ const char *hmac = nvme_auth_hmac_name(key_hash);
+
+ if (!crypto_has_shash(hmac, 0, 0)) {
+ pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac);
+ return -ENOTSUPP;
+ }
+ }
+ dhchap_secret = kstrdup(secret, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ if (set_ctrl) {
+ host->dhchap_ctrl_secret = strim(dhchap_secret);
+ host->dhchap_ctrl_key_hash = key_hash;
+ } else {
+ host->dhchap_secret = strim(dhchap_secret);
+ host->dhchap_key_hash = key_hash;
+ }
+ return 0;
+}
+
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
+{
+ const char *dhgroup_kpp;
+ int ret = 0;
+
+ pr_debug("%s: ctrl %d selecting dhgroup %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+
+ if (ctrl->dh_tfm) {
+ if (ctrl->dh_gid == dhgroup_id) {
+ pr_debug("%s: ctrl %d reuse existing DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return 0;
+ }
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+
+ if (dhgroup_id == NVME_AUTH_DHGROUP_NULL)
+ return 0;
+
+ dhgroup_kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+ if (!dhgroup_kpp) {
+ pr_debug("%s: ctrl %d invalid DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return -EINVAL;
+ }
+ ctrl->dh_tfm = crypto_alloc_kpp(dhgroup_kpp, 0, 0);
+ if (IS_ERR(ctrl->dh_tfm)) {
+ pr_debug("%s: ctrl %d failed to setup DH group %d, err %ld\n",
+ __func__, ctrl->cntlid, dhgroup_id,
+ PTR_ERR(ctrl->dh_tfm));
+ ret = PTR_ERR(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ } else {
+ ctrl->dh_gid = dhgroup_id;
+ pr_debug("%s: ctrl %d setup DH group %d\n",
+ __func__, ctrl->cntlid, ctrl->dh_gid);
+ ret = nvme_auth_gen_privkey(ctrl->dh_tfm, ctrl->dh_gid);
+ if (ret < 0) {
+ pr_debug("%s: ctrl %d failed to generate private key, err %d\n",
+ __func__, ctrl->cntlid, ret);
+ kfree_sensitive(ctrl->dh_key);
+ return ret;
+ }
+ ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm);
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = kzalloc(ctrl->dh_keysize, GFP_KERNEL);
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d failed to allocate public key\n",
+ ctrl->cntlid);
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(ctrl->dh_tfm, ctrl->dh_key,
+ ctrl->dh_keysize);
+ if (ret < 0) {
+ pr_warn("ctrl %d failed to generate public key\n",
+ ctrl->cntlid);
+ kfree(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+ }
+ }
+
+ return ret;
+}
+
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ int ret = 0;
+ struct nvmet_host_link *p;
+ struct nvmet_host *host = NULL;
+ const char *hash_name;
+
+ down_read(&nvmet_config_sem);
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ goto out_unlock;
+
+ if (ctrl->subsys->allow_any_host)
+ goto out_unlock;
+
+ list_for_each_entry(p, &ctrl->subsys->hosts, entry) {
+ pr_debug("check %s\n", nvmet_host_name(p->host));
+ if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn))
+ continue;
+ host = p->host;
+ break;
+ }
+ if (!host) {
+ pr_debug("host %s not found\n", ctrl->hostnqn);
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
+ if (ret < 0)
+ pr_warn("Failed to setup DH group");
+
+ if (!host->dhchap_secret) {
+ pr_debug("No authentication provided\n");
+ goto out_unlock;
+ }
+
+ if (host->dhchap_hash_id == ctrl->shash_id) {
+ pr_debug("Re-use existing hash ID %d\n",
+ ctrl->shash_id);
+ } else {
+ hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ctrl->shash_id = host->dhchap_hash_id;
+ }
+
+ /* Skip the 'DHHC-1:XX:' prefix */
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10,
+ host->dhchap_key_hash);
+ if (IS_ERR(ctrl->host_key)) {
+ ret = PTR_ERR(ctrl->host_key);
+ ctrl->host_key = NULL;
+ goto out_free_hash;
+ }
+ pr_debug("%s: using hash %s key %*ph\n", __func__,
+ ctrl->host_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->host_key->hash) : "none",
+ (int)ctrl->host_key->len, ctrl->host_key->key);
+
+ nvme_auth_free_key(ctrl->ctrl_key);
+ if (!host->dhchap_ctrl_secret) {
+ ctrl->ctrl_key = NULL;
+ goto out_unlock;
+ }
+
+ ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10,
+ host->dhchap_ctrl_key_hash);
+ if (IS_ERR(ctrl->ctrl_key)) {
+ ret = PTR_ERR(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+ pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
+ ctrl->ctrl_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none",
+ (int)ctrl->ctrl_key->len, ctrl->ctrl_key->key);
+
+out_free_hash:
+ if (ret) {
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ ctrl->shash_id = 0;
+ }
+out_unlock:
+ up_read(&nvmet_config_sem);
+
+ return ret;
+}
+
+void nvmet_auth_sq_free(struct nvmet_sq *sq)
+{
+ cancel_delayed_work(&sq->auth_expired_work);
+ kfree(sq->dhchap_c1);
+ sq->dhchap_c1 = NULL;
+ kfree(sq->dhchap_c2);
+ sq->dhchap_c2 = NULL;
+ kfree(sq->dhchap_skey);
+ sq->dhchap_skey = NULL;
+}
+
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
+{
+ ctrl->shash_id = 0;
+
+ if (ctrl->dh_tfm) {
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+
+bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ if (req->sq->ctrl->host_key &&
+ !req->sq->authenticated)
+ return false;
+ return true;
+}
+
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c1, *host_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn);
+ if (IS_ERR(host_response)) {
+ ret = PTR_ERR(host_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, host_response,
+ ctrl->host_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c1,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid);
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c1)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(host_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c2, *ctrl_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, ctrl_response,
+ ctrl->ctrl_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c2,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c2)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(ctrl_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d no DH public key!\n", ctrl->cntlid);
+ return -ENOKEY;
+ }
+ if (buf_size != ctrl->dh_keysize) {
+ pr_warn("ctrl %d DH public key size mismatch, need %zu is %d\n",
+ ctrl->cntlid, ctrl->dh_keysize, buf_size);
+ ret = -EINVAL;
+ } else {
+ memcpy(buf, ctrl->dh_key, buf_size);
+ pr_debug("%s: ctrl %d public key %*ph\n", __func__,
+ ctrl->cntlid, (int)buf_size, buf);
+ }
+
+ return ret;
+}
+
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *pkey, int pkey_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret;
+
+ req->sq->dhchap_skey_len = ctrl->dh_keysize;
+ req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL);
+ if (!req->sq->dhchap_skey)
+ return -ENOMEM;
+ ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm,
+ pkey, pkey_size,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len);
+ if (ret)
+ pr_debug("failed to compute shared secret, err %d\n", ret);
+ else
+ pr_debug("%s: shared secret %*ph\n", __func__,
+ (int)req->sq->dhchap_skey_len,
+ req->sq->dhchap_skey);
+
+ return ret;
+}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index ff77c3d2354f..2bcd60758919 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -11,6 +11,11 @@
#include <linux/ctype.h>
#include <linux/pci.h>
#include <linux/pci-p2pdma.h>
+#ifdef CONFIG_NVME_TARGET_AUTH
+#include <linux/nvme-auth.h>
+#endif
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
#include "nvmet.h"
@@ -1680,10 +1685,133 @@ static const struct config_item_type nvmet_ports_type = {
static struct config_group nvmet_subsystems_group;
static struct config_group nvmet_ports_group;
+#ifdef CONFIG_NVME_TARGET_AUTH
+static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, false);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_key);
+
+static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, true);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
+
+static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+
+ return sprintf(page, "%s\n", hash_name ? hash_name : "none");
+}
+
+static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ u8 hmac_id;
+
+ hmac_id = nvme_auth_hmac_id(page);
+ if (hmac_id == NVME_AUTH_HASH_INVALID)
+ return -EINVAL;
+ if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
+ return -ENOTSUPP;
+ host->dhchap_hash_id = hmac_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
+
+static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
+
+ return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
+}
+
+static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int dhgroup_id;
+
+ dhgroup_id = nvme_auth_dhgroup_id(page);
+ if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
+ return -EINVAL;
+ if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
+ const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+
+ if (!crypto_has_kpp(kpp, 0, 0))
+ return -EINVAL;
+ }
+ host->dhchap_dhgroup_id = dhgroup_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
+
+static struct configfs_attribute *nvmet_host_attrs[] = {
+ &nvmet_host_attr_dhchap_key,
+ &nvmet_host_attr_dhchap_ctrl_key,
+ &nvmet_host_attr_dhchap_hash,
+ &nvmet_host_attr_dhchap_dhgroup,
+ NULL,
+};
+#endif /* CONFIG_NVME_TARGET_AUTH */
+
static void nvmet_host_release(struct config_item *item)
{
struct nvmet_host *host = to_host(item);
+#ifdef CONFIG_NVME_TARGET_AUTH
+ kfree(host->dhchap_secret);
+#endif
kfree(host);
}
@@ -1693,6 +1821,9 @@ static struct configfs_item_operations nvmet_host_item_ops = {
static const struct config_item_type nvmet_host_type = {
.ct_item_ops = &nvmet_host_item_ops,
+#ifdef CONFIG_NVME_TARGET_AUTH
+ .ct_attrs = nvmet_host_attrs,
+#endif
.ct_owner = THIS_MODULE,
};
@@ -1705,6 +1836,11 @@ static struct config_group *nvmet_hosts_make_group(struct config_group *group,
if (!host)
return ERR_PTR(-ENOMEM);
+#ifdef CONFIG_NVME_TARGET_AUTH
+ /* Default to SHA256 */
+ host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
+#endif
+
config_group_init_type_name(&host->group, name, &nvmet_host_type);
return &host->group;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index c27660a660d9..a1345790005f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -795,6 +795,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref);
+ nvmet_auth_sq_free(sq);
if (ctrl) {
/*
@@ -865,8 +866,15 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
+ struct nvme_command *cmd = req->cmd;
u16 ret;
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_io_cmd(req);
+
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+
ret = nvmet_check_ctrl_status(req);
if (unlikely(ret))
return ret;
@@ -1271,6 +1279,11 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req)
req->cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
}
+
+ if (unlikely(!nvmet_check_auth_status(req))) {
+ pr_warn("qid %d not authenticated\n", req->sq->qid);
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ }
return 0;
}
@@ -1467,6 +1480,8 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
+ nvmet_destroy_auth(ctrl);
+
ida_free(&cntlid_ida, ctrl->cntlid);
nvmet_async_events_free(ctrl);
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
new file mode 100644
index 000000000000..ebdf9aa81041
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include "nvmet.h"
+
+static void nvmet_auth_expired_work(struct work_struct *work)
+{
+ struct nvmet_sq *sq = container_of(to_delayed_work(work),
+ struct nvmet_sq, auth_expired_work);
+
+ pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
+ __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ sq->dhchap_tid = -1;
+}
+
+void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+{
+ u32 result = le32_to_cpu(req->cqe->result.u32);
+
+ /* Initialize in-band authentication */
+ INIT_DELAYED_WORK(&req->sq->auth_expired_work,
+ nvmet_auth_expired_work);
+ req->sq->authenticated = false;
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16;
+ req->cqe->result.u32 = cpu_to_le32(result);
+}
+
+static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_negotiate_data *data = d;
+ int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
+
+ pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
+ data->auth_protocol[0].dhchap.halen,
+ data->auth_protocol[0].dhchap.dhlen);
+ req->sq->dhchap_tid = le16_to_cpu(data->t_id);
+ if (data->sc_c)
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+
+ if (data->napd != 1)
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+
+ if (data->auth_protocol[0].dhchap.authid !=
+ NVME_AUTH_DHCHAP_AUTH_ID)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+
+ for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
+ u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
+
+ if (!fallback_hash_id &&
+ crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
+ fallback_hash_id = host_hmac_id;
+ if (ctrl->shash_id != host_hmac_id)
+ continue;
+ hash_id = ctrl->shash_id;
+ break;
+ }
+ if (hash_id == 0) {
+ if (fallback_hash_id == 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_hmac_name(fallback_hash_id));
+ ctrl->shash_id = fallback_hash_id;
+ }
+
+ dhgid = -1;
+ fallback_dhgid = -1;
+ for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
+ int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
+
+ if (tmp_dhgid != ctrl->dh_gid) {
+ dhgid = tmp_dhgid;
+ break;
+ }
+ if (fallback_dhgid < 0) {
+ const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
+
+ if (crypto_has_kpp(kpp, 0, 0))
+ fallback_dhgid = tmp_dhgid;
+ }
+ }
+ if (dhgid < 0) {
+ if (fallback_dhgid < 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(fallback_dhgid));
+ ctrl->dh_gid = fallback_dhgid;
+ }
+ pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
+ return 0;
+}
+
+static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_reply_data *data = d;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ u8 *response;
+
+ pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->hl, data->cvalid, dhvlen);
+
+ if (dhvlen) {
+ if (!ctrl->dh_tfm)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
+ dhvlen) < 0)
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+
+ response = kmalloc(data->hl, GFP_KERNEL);
+ if (!response)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ if (!ctrl->host_key) {
+ pr_warn("ctrl %d qid %d no host key\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
+ pr_debug("ctrl %d qid %d host hash failed\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+
+ if (memcmp(data->rval, response, data->hl)) {
+ pr_info("ctrl %d qid %d host response mismatch\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ kfree(response);
+ pr_debug("%s: ctrl %d qid %d host authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ if (data->cvalid) {
+ req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
+ GFP_KERNEL);
+ if (!req->sq->dhchap_c2)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
+ __func__, ctrl->cntlid, req->sq->qid, data->hl,
+ req->sq->dhchap_c2);
+ req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
+ } else {
+ req->sq->authenticated = true;
+ req->sq->dhchap_c2 = NULL;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ return data->rescode_exp;
+}
+
+void nvmet_execute_auth_send(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_success2_data *data;
+ void *d;
+ u32 tl;
+ u16 status = 0;
+
+ if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp1);
+ goto done;
+ }
+ tl = le32_to_cpu(req->cmd->auth_send.tl);
+ if (!tl) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, tl);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, tl)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
+ return;
+ }
+
+ d = kmalloc(tl, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, tl);
+ if (status) {
+ kfree(d);
+ goto done;
+ }
+
+ data = d;
+ pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
+ req->sq->dhchap_step);
+ if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
+ goto done_failure1;
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
+ if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
+ /* Restart negotiation */
+ pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
+ ctrl->cntlid, req->sq->qid);
+ if (!req->sq->qid) {
+ if (nvmet_setup_auth(ctrl) < 0) {
+ status = NVME_SC_INTERNAL;
+ pr_err("ctrl %d qid 0 failed to setup"
+ "re-authentication",
+ ctrl->cntlid);
+ goto done_failure1;
+ }
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ } else if (data->auth_id != req->sq->dhchap_step)
+ goto done_failure1;
+ /* Validate negotiation parameters */
+ status = nvmet_auth_negotiate(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ }
+ if (data->auth_id != req->sq->dhchap_step) {
+ pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->auth_id, req->sq->dhchap_step);
+ goto done_failure1;
+ }
+ if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
+ pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ le16_to_cpu(data->t_id),
+ req->sq->dhchap_tid);
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ goto done_kfree;
+ }
+
+ switch (data->auth_id) {
+ case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
+ status = nvmet_auth_reply(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
+ req->sq->authenticated = true;
+ pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ goto done_kfree;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
+ status = nvmet_auth_failure2(req, d);
+ if (status) {
+ pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ status = 0;
+ }
+ goto done_kfree;
+ break;
+ default:
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ req->sq->authenticated = false;
+ goto done_kfree;
+ break;
+ }
+done_failure1:
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+
+done_kfree:
+ kfree(d);
+done:
+ pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status, req->sq->dhchap_step);
+ if (status)
+ pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ status, req->error_loc);
+ req->cqe->result.u64 = 0;
+ nvmet_req_complete(req, status);
+ if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+
+ mod_delayed_work(system_wq, &req->sq->auth_expired_work,
+ auth_expire_secs * HZ);
+ return;
+ }
+ /* Final states, clear up variables */
+ nvmet_auth_sq_free(req->sq);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
+ nvmet_ctrl_fatal_error(ctrl);
+}
+
+static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+ int data_size = sizeof(*d) + hash_len;
+
+ if (ctrl->dh_tfm)
+ data_size += ctrl->dh_keysize;
+ if (al < data_size) {
+ pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
+ al, data_size);
+ return -EINVAL;
+ }
+ memset(data, 0, data_size);
+ req->sq->dhchap_s1 = nvme_auth_get_seqnum();
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hashid = ctrl->shash_id;
+ data->hl = hash_len;
+ data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
+ req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
+ if (!req->sq->dhchap_c1)
+ return -ENOMEM;
+ get_random_bytes(req->sq->dhchap_c1, data->hl);
+ memcpy(data->cval, req->sq->dhchap_c1, data->hl);
+ if (ctrl->dh_tfm) {
+ data->dhgid = ctrl->dh_gid;
+ data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
+ ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
+ ctrl->dh_keysize);
+ }
+ pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
+ __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
+ return ret;
+}
+
+static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_success1_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+
+ WARN_ON(al < sizeof(*data));
+ memset(data, 0, sizeof(*data));
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hl = hash_len;
+ if (req->sq->dhchap_c2) {
+ if (!ctrl->ctrl_key) {
+ pr_warn("ctrl %d qid %d no ctrl key\n",
+ ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ data->rvalid = 1;
+ pr_debug("ctrl %d qid %d response %*ph\n",
+ ctrl->cntlid, req->sq->qid, data->hl, data->rval);
+ }
+ return 0;
+}
+
+static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ WARN_ON(al < sizeof(*data));
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = req->sq->dhchap_status;
+}
+
+void nvmet_execute_auth_receive(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ void *d;
+ u32 al;
+ u16 status = 0;
+
+ if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp1);
+ goto done;
+ }
+ al = le32_to_cpu(req->cmd->auth_receive.al);
+ if (!al) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, al);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, al)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
+ return;
+ }
+
+ d = kmalloc(al, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+ pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ switch (req->sq->dhchap_step) {
+ case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
+ if (nvmet_auth_challenge(req, d, al) < 0) {
+ pr_warn("ctrl %d qid %d: challenge error (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ status = NVME_SC_INTERNAL;
+ break;
+ }
+ if (status) {
+ req->sq->dhchap_status = status;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: challenge status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ status = 0;
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
+ status = nvmet_auth_success1(req, d, al);
+ if (status) {
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: success1 status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d failure1 (%x)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
+ break;
+ default:
+ pr_warn("ctrl %d qid %d unhandled step (%d)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ nvmet_auth_failure1(req, d, al);
+ status = 0;
+ break;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, d, al);
+ kfree(d);
+done:
+ req->cqe->result.u64 = 0;
+ nvmet_req_complete(req, status);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ nvmet_auth_sq_free(req->sq);
+ else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ nvmet_auth_sq_free(req->sq);
+ nvmet_ctrl_fatal_error(ctrl);
+ }
+}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 70fb587e9413..f91a56180d3d 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -82,7 +82,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
-u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -93,6 +93,37 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
case nvme_fabrics_type_property_get:
req->execute = nvmet_execute_prop_get;
break;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
+ default:
+ pr_debug("received unknown capsule type 0x%x\n",
+ cmd->fabrics.fctype);
+ req->error_loc = offsetof(struct nvmf_common_command, fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return 0;
+}
+
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
default:
pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
@@ -173,6 +204,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
u16 status = 0;
+ int ret;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
@@ -215,18 +247,32 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
uuid_copy(&ctrl->hostid, &d->hostid);
+ ret = nvmet_setup_auth(ctrl);
+ if (ret < 0) {
+ pr_err("Failed to setup authentication, error %d\n", ret);
+ nvmet_ctrl_put(ctrl);
+ if (ret == -EPERM)
+ status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
+ else
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
status = nvmet_install_queue(ctrl, req);
if (status) {
nvmet_ctrl_put(ctrl);
goto out;
}
- pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n",
+ pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
- ctrl->pi_support ? " T10-PI is enabled" : "");
+ ctrl->pi_support ? " T10-PI is enabled" : "",
+ nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+ if (nvmet_has_auth(ctrl))
+ nvmet_init_auth(ctrl, req);
out:
kfree(d);
complete:
@@ -286,6 +332,9 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+ if (nvmet_has_auth(ctrl))
+ nvmet_init_auth(ctrl, req);
out:
kfree(d);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 0f5c77e22a0a..9750a7fca268 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -424,9 +424,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
+ nvme_cancel_tagset(&ctrl->ctrl);
nvme_loop_destroy_io_queues(ctrl);
}
@@ -434,9 +432,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
nvme_shutdown_ctrl(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 2b3e5719f24e..6ffeeb0a1c49 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -108,6 +108,19 @@ struct nvmet_sq {
u16 size;
u32 sqhd;
bool sqhd_disabled;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct delayed_work auth_expired_work;
+ bool authenticated;
+ u16 dhchap_tid;
+ u16 dhchap_status;
+ int dhchap_step;
+ u8 *dhchap_c1;
+ u8 *dhchap_c2;
+ u32 dhchap_s1;
+ u32 dhchap_s2;
+ u8 *dhchap_skey;
+ int dhchap_skey_len;
+#endif
struct completion free_done;
struct completion confirm_done;
};
@@ -209,6 +222,15 @@ struct nvmet_ctrl {
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
bool pi_support;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u8 shash_id;
+ struct crypto_kpp *dh_tfm;
+ u8 dh_gid;
+ u8 *dh_key;
+ size_t dh_keysize;
+#endif
};
struct nvmet_subsys {
@@ -271,6 +293,12 @@ static inline struct nvmet_subsys *namespaces_to_subsys(
struct nvmet_host {
struct config_group group;
+ u8 *dhchap_secret;
+ u8 *dhchap_ctrl_secret;
+ u8 dhchap_key_hash;
+ u8 dhchap_ctrl_key_hash;
+ u8 dhchap_hash_id;
+ u8 dhchap_dhgroup_id;
};
static inline struct nvmet_host *to_host(struct config_item *item)
@@ -420,7 +448,8 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
-u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
@@ -668,4 +697,48 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
bio_put(bio);
}
+#ifdef CONFIG_NVME_TARGET_AUTH
+void nvmet_execute_auth_send(struct nvmet_req *req);
+void nvmet_execute_auth_receive(struct nvmet_req *req);
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl);
+int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req);
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
+void nvmet_auth_sq_free(struct nvmet_sq *sq);
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
+bool nvmet_check_auth_status(struct nvmet_req *req);
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return ctrl->host_key != NULL;
+}
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+#else
+static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ return 0;
+}
+static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl,
+ struct nvmet_req *req) {};
+static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
+static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
+static inline bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ return true;
+}
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return false;
+}
+static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
+#endif
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 09fdcac87d17..4597bca43a6d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -415,7 +415,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
goto out_free_rsp;
- if (!ib_uses_virt_dma(ndev->device))
+ if (ib_dma_pci_p2p_dma_supported(ndev->device))
r->req.p2p_client = &ndev->device->dev;
r->send_sge.length = sizeof(*r->req.cqe);
r->send_sge.lkey = ndev->pd->local_dma_lkey;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 0a9542599ad1..dc3b4dc8fe08 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1839,7 +1839,8 @@ static int __init nvmet_tcp_init(void)
{
int ret;
- nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
+ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!nvmet_tcp_wq)
return -ENOMEM;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 94f017d808c4..96f0a12e507c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -1045,26 +1045,29 @@ phys_addr_t __init of_dma_get_max_cpu_address(struct device_node *np)
*
* It returns true if "dma-coherent" property was found
* for this device in the DT, or if DMA is coherent by
- * default for OF devices on the current platform.
+ * default for OF devices on the current platform and no
+ * "dma-noncoherent" property was found for this device.
*/
bool of_dma_is_coherent(struct device_node *np)
{
struct device_node *node;
-
- if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
- return true;
+ bool is_coherent = IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT);
node = of_node_get(np);
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
- of_node_put(node);
- return true;
+ is_coherent = true;
+ break;
+ }
+ if (of_property_read_bool(node, "dma-noncoherent")) {
+ is_coherent = false;
+ break;
}
node = of_get_next_dma_parent(node);
}
of_node_put(node);
- return false;
+ return is_coherent;
}
EXPORT_SYMBOL_GPL(of_dma_is_coherent);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index a19cd0c73644..7fa960bd3df1 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -2079,7 +2079,7 @@ struct device_node *of_find_next_cache_node(const struct device_node *np)
*
* @cpu: cpu number(logical index) for which the last cache level is needed
*
- * Return: The the level at which the last cache is present. It is exactly
+ * Return: The level at which the last cache is present. It is exactly
* same as the total number of cache levels for the given logical cpu.
*/
int of_find_last_cache_level(unsigned int cpu)
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 874f031442dc..75b6cbffa755 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -81,8 +81,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
* restricted-dma-pool region is allowed.
*/
if (of_device_is_compatible(node, "restricted-dma-pool") &&
- of_device_is_available(node))
+ of_device_is_available(node)) {
+ of_node_put(node);
break;
+ }
+ of_node_put(node);
}
/*
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index a8f5b6532165..7bc92923104c 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -246,7 +246,7 @@ static int populate_node(const void *blob,
}
*pnp = np;
- return true;
+ return 0;
}
static void reverse_nodes(struct device_node *parent)
@@ -477,8 +477,8 @@ void *initial_boot_params __ro_after_init;
static u32 of_fdt_crc32;
-static int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
- phys_addr_t size, bool nomap)
+static int __init early_init_dt_reserve_memory(phys_addr_t base,
+ phys_addr_t size, bool nomap)
{
if (nomap) {
/*
@@ -525,15 +525,15 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
- early_init_dt_reserve_memory_arch(base, size, nomap) == 0) {
+ early_init_dt_reserve_memory(base, size, nomap) == 0) {
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
if (!nomap)
- kmemleak_alloc_phys(base, size, 0, 0);
+ kmemleak_alloc_phys(base, size, 0);
}
else
- pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
- uname, &base, (unsigned long)(size / SZ_1M));
+ pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
+ uname, &base, (unsigned long)(size / SZ_1M));
len -= t_len;
if (first) {
@@ -644,7 +644,7 @@ void __init early_init_fdt_scan_reserved_mem(void)
fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
if (!size)
break;
- early_init_dt_reserve_memory_arch(base, size, false);
+ memblock_reserve(base, size);
}
fdt_scan_reserved_mem();
@@ -661,9 +661,8 @@ void __init early_init_fdt_reserve_self(void)
return;
/* Reserve the dtb region */
- early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
- fdt_totalsize(initial_boot_params),
- false);
+ memblock_reserve(__pa(initial_boot_params),
+ fdt_totalsize(initial_boot_params));
}
/**
@@ -1025,6 +1024,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
int l;
const struct earlycon_id *match;
const void *fdt = initial_boot_params;
+ int ret;
offset = fdt_path_offset(fdt, "/chosen");
if (offset < 0)
@@ -1057,7 +1057,8 @@ int __init early_init_dt_scan_chosen_stdout(void)
if (fdt_node_check_compatible(fdt, offset, match->compatible))
continue;
- if (of_setup_earlycon(match, offset, options) == 0)
+ ret = of_setup_earlycon(match, offset, options);
+ if (!ret || ret == -EALREADY)
return 0;
}
return -ENODEV;
diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
index f2e58ddfaed2..e6c01db393f9 100644
--- a/drivers/of/kexec.c
+++ b/drivers/of/kexec.c
@@ -128,6 +128,7 @@ int __init ima_get_kexec_buffer(void **addr, size_t *size)
{
int ret, len;
unsigned long tmp_addr;
+ unsigned long start_pfn, end_pfn;
size_t tmp_size;
const void *prop;
@@ -139,6 +140,22 @@ int __init ima_get_kexec_buffer(void **addr, size_t *size)
if (ret)
return ret;
+ /* Do some sanity on the returned size for the ima-kexec buffer */
+ if (!tmp_size)
+ return -ENOENT;
+
+ /*
+ * Calculate the PFNs for the buffer and ensure
+ * they are with in addressable memory.
+ */
+ start_pfn = PHYS_PFN(tmp_addr);
+ end_pfn = PHYS_PFN(tmp_addr + tmp_size - 1);
+ if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn)) {
+ pr_warn("IMA buffer at 0x%lx, size = 0x%zx beyond memory\n",
+ tmp_addr, tmp_size);
+ return -EINVAL;
+ }
+
*addr = __va(tmp_addr);
*size = tmp_size;
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 75caa6f5d36f..65f3b02a0e4e 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -156,7 +156,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
}
if (base == 0) {
- pr_info("failed to allocate memory for node '%s'\n", uname);
+ pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
+ uname, (unsigned long)(size / SZ_1M));
return -ENOMEM;
}
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 4044ddcb02c6..bd8ff4df723d 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -903,12 +903,6 @@ static int of_overlay_apply(struct overlay_changeset *ovcs)
{
int ret = 0, ret_revert, ret_tmp;
- if (devicetree_corrupt()) {
- pr_err("devicetree state suspect, refuse to apply overlay\n");
- ret = -EBUSY;
- goto out;
- }
-
ret = of_resolve_phandles(ovcs->overlay_root);
if (ret)
goto out;
@@ -983,6 +977,11 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
*ret_ovcs_id = 0;
+ if (devicetree_corrupt()) {
+ pr_err("devicetree state suspect, refuse to apply overlay\n");
+ return -EBUSY;
+ }
+
if (overlay_fdt_size < sizeof(struct fdt_header) ||
fdt_check_header(overlay_fdt)) {
pr_err("Invalid overlay_fdt header\n");
@@ -1044,20 +1043,15 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
* goto err_free_ovcs. Instead, the caller of of_overlay_fdt_apply()
* can call of_overlay_remove();
*/
-
- mutex_unlock(&of_mutex);
- of_overlay_mutex_unlock();
-
*ret_ovcs_id = ovcs->id;
-
- return ret;
+ goto out_unlock;
err_free_ovcs:
free_overlay_changeset(ovcs);
+out_unlock:
mutex_unlock(&of_mutex);
of_overlay_mutex_unlock();
-
return ret;
}
EXPORT_SYMBOL_GPL(of_overlay_fdt_apply);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 7f6bba18c515..eafa8ffefbd0 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1602,7 +1602,7 @@ static int unittest_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, devptr);
- devptr->chip.of_node = pdev->dev.of_node;
+ devptr->chip.fwnode = dev_fwnode(&pdev->dev);
devptr->chip.label = "of-unittest-gpio";
devptr->chip.base = -1; /* dynamic allocation */
devptr->chip.ngpio = 5;
@@ -1611,7 +1611,7 @@ static int unittest_gpio_probe(struct platform_device *pdev)
ret = gpiochip_add_data(&devptr->chip, NULL);
unittest(!ret,
- "gpiochip_add_data() for node @%pOF failed, ret = %d\n", devptr->chip.of_node, ret);
+ "gpiochip_add_data() for node @%pfw failed, ret = %d\n", devptr->chip.fwnode, ret);
if (!ret)
unittest_gpio_probe_pass_count++;
@@ -1620,20 +1620,19 @@ static int unittest_gpio_probe(struct platform_device *pdev)
static int unittest_gpio_remove(struct platform_device *pdev)
{
- struct unittest_gpio_dev *gdev = platform_get_drvdata(pdev);
+ struct unittest_gpio_dev *devptr = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
- dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
+ dev_dbg(dev, "%s for node @%pfw\n", __func__, devptr->chip.fwnode);
- if (!gdev)
+ if (!devptr)
return -EINVAL;
- if (gdev->chip.base != -1)
- gpiochip_remove(&gdev->chip);
+ if (devptr->chip.base != -1)
+ gpiochip_remove(&devptr->chip);
platform_set_drvdata(pdev, NULL);
- kfree(gdev);
+ kfree(devptr);
return 0;
}
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 84063eaebb91..77d1ba3a4154 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -13,11 +13,12 @@
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/slab.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
#include "opp.h"
@@ -36,6 +37,9 @@ DEFINE_MUTEX(opp_table_lock);
/* Flag indicating that opp_tables list is being updated at the moment */
static bool opp_tables_busy;
+/* OPP ID allocator */
+static DEFINE_XARRAY_ALLOC1(opp_configs);
+
static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
{
struct opp_device *opp_dev;
@@ -93,6 +97,18 @@ struct opp_table *_find_opp_table(struct device *dev)
return opp_table;
}
+/*
+ * Returns true if multiple clocks aren't there, else returns false with WARN.
+ *
+ * We don't force clk_count == 1 here as there are users who don't have a clock
+ * representation in the OPP table and manage the clock configuration themselves
+ * in an platform specific way.
+ */
+static bool assert_single_clk(struct opp_table *opp_table)
+{
+ return !WARN_ON(opp_table->clk_count > 1);
+}
+
/**
* dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
* @opp: opp for which voltage has to be returned for
@@ -114,6 +130,31 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
/**
+ * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp
+ * @opp: opp for which voltage has to be returned for
+ * @supplies: Placeholder for copying the supply information.
+ *
+ * Return: negative error number on failure, 0 otherwise on success after
+ * setting @supplies.
+ *
+ * This can be used for devices with any number of power supplies. The caller
+ * must ensure the @supplies array must contain space for each regulator.
+ */
+int dev_pm_opp_get_supplies(struct dev_pm_opp *opp,
+ struct dev_pm_opp_supply *supplies)
+{
+ if (IS_ERR_OR_NULL(opp) || !supplies) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(supplies, opp->supplies,
+ sizeof(*supplies) * opp->opp_table->regulator_count);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies);
+
+/**
* dev_pm_opp_get_power() - Gets the power corresponding to an opp
* @opp: opp for which power has to be returned for
*
@@ -152,7 +193,10 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return 0;
}
- return opp->rate;
+ if (!assert_single_clk(opp->opp_table))
+ return 0;
+
+ return opp->rates[0];
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
@@ -398,6 +442,154 @@ int dev_pm_opp_get_opp_count(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
+/* Helpers to read keys */
+static unsigned long _read_freq(struct dev_pm_opp *opp, int index)
+{
+ return opp->rates[0];
+}
+
+static unsigned long _read_level(struct dev_pm_opp *opp, int index)
+{
+ return opp->level;
+}
+
+static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
+{
+ return opp->bandwidth[index].peak;
+}
+
+/* Generic comparison helpers */
+static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key == key) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
+static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key >= key) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
+static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key)
+{
+ if (opp_key > key)
+ return true;
+
+ *opp = temp_opp;
+ return false;
+}
+
+/* Generic key finding helpers */
+static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ /* Assert that the requirement is met */
+ if (assert && !assert(opp_table))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available == available) {
+ if (compare(&opp, temp_opp, read(temp_opp, index), *key))
+ break;
+ }
+ }
+
+ /* Increment the reference count of OPP */
+ if (!IS_ERR(opp)) {
+ *key = read(opp, index);
+ dev_pm_opp_get(opp);
+ }
+
+ mutex_unlock(&opp_table->lock);
+
+ return opp;
+}
+
+static struct dev_pm_opp *
+_find_key(struct device *dev, unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ unsigned long opp_key, unsigned long key),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp;
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
+ PTR_ERR(opp_table));
+ return ERR_CAST(opp_table);
+ }
+
+ opp = _opp_table_find_key(opp_table, key, index, available, read,
+ compare, assert);
+
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+
+static struct dev_pm_opp *_find_key_exact(struct device *dev,
+ unsigned long key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ /*
+ * The value of key will be updated here, but will be ignored as the
+ * caller doesn't need it.
+ */
+ return _find_key(dev, &key, index, available, read, _compare_exact,
+ assert);
+}
+
+static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _opp_table_find_key(opp_table, key, index, available, read,
+ _compare_ceil, assert);
+}
+
+static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
+ int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _find_key(dev, key, index, available, read, _compare_ceil,
+ assert);
+}
+
+static struct dev_pm_opp *_find_key_floor(struct device *dev,
+ unsigned long *key, int index, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ bool (*assert)(struct opp_table *opp_table))
+{
+ return _find_key(dev, key, index, available, read, _compare_floor,
+ assert);
+}
+
/**
* dev_pm_opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
@@ -422,61 +614,18 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
* use.
*/
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
- unsigned long freq,
- bool available)
+ unsigned long freq, bool available)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available == available &&
- temp_opp->rate == freq) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_exact(dev, freq, 0, available, _read_freq,
+ assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->rate >= *freq) {
- opp = temp_opp;
- *freq = opp->rate;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
-
- return opp;
+ return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq,
+ assert_single_clk);
}
/**
@@ -500,23 +649,7 @@ static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *opp;
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- opp = _find_freq_ceil(opp_table, freq);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
@@ -541,98 +674,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !freq) {
- dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available) {
- /* go to the next node, before choosing prev */
- if (temp_opp->rate > *freq)
- break;
- else
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- if (!IS_ERR(opp))
- *freq = opp->rate;
-
- return opp;
+ return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/**
- * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
- * target voltage.
- * @dev: Device for which we do this operation.
- * @u_volt: Target voltage.
- *
- * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
- *
- * Return: matching *opp, else returns ERR_PTR in case of error which should be
- * handled using IS_ERR.
- *
- * Error return values can be:
- * EINVAL: bad parameters
- *
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
- */
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !u_volt) {
- dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
- u_volt);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available) {
- if (temp_opp->supplies[0].u_volt > u_volt)
- break;
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
-
-/**
* dev_pm_opp_find_level_exact() - search for an exact level
* @dev: device for which we do this operation
* @level: level to search for
@@ -650,33 +696,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
unsigned int level)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->level == level) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _find_key_exact(dev, level, 0, true, _read_level, NULL);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
@@ -698,33 +718,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
unsigned int *level)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table)) {
- int r = PTR_ERR(opp_table);
-
- dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
- return ERR_PTR(r);
- }
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->level >= *level) {
- opp = temp_opp;
- *level = opp->level;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
+ unsigned long temp = *level;
+ struct dev_pm_opp *opp;
+ opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
+ *level = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
@@ -732,7 +730,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
/**
* dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
* @dev: device for which we do this operation
- * @freq: start bandwidth
+ * @bw: start bandwidth
* @index: which bandwidth to compare, in case of OPPs with several values
*
* Search for the matching floor *available* OPP from a starting bandwidth
@@ -748,42 +746,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
* The callers are required to call dev_pm_opp_put() for the returned OPP after
* use.
*/
-struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
- unsigned int *bw, int index)
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
+ int index)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !bw) {
- dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- if (index >= opp_table->path_count)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->bandwidth) {
- if (temp_opp->bandwidth[index].peak >= *bw) {
- opp = temp_opp;
- *bw = opp->bandwidth[index].peak;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
- }
- }
-
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
+ unsigned long temp = *bw;
+ struct dev_pm_opp *opp;
+ opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL);
+ *bw = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
@@ -791,7 +761,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
/**
* dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
* @dev: device for which we do this operation
- * @freq: start bandwidth
+ * @bw: start bandwidth
* @index: which bandwidth to compare, in case of OPPs with several values
*
* Search for the matching floor *available* OPP from a starting bandwidth
@@ -810,41 +780,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
unsigned int *bw, int index)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
-
- if (!dev || !bw) {
- dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
- return ERR_PTR(-EINVAL);
- }
-
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
-
- if (index >= opp_table->path_count)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->bandwidth) {
- /* go to the next node, before choosing prev */
- if (temp_opp->bandwidth[index].peak > *bw)
- break;
- opp = temp_opp;
- }
- }
-
- /* Increment the reference count of OPP */
- if (!IS_ERR(opp))
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
-
- if (!IS_ERR(opp))
- *bw = opp->bandwidth[index].peak;
+ unsigned long temp = *bw;
+ struct dev_pm_opp *opp;
+ opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL);
+ *bw = temp;
return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
@@ -874,80 +814,97 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
return ret;
}
-static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
- unsigned long freq)
+static int
+_opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data, bool scaling_down)
{
+ unsigned long *target = data;
+ unsigned long freq;
int ret;
- /* We may reach here for devices which don't change frequency */
- if (IS_ERR(clk))
- return 0;
+ /* One of target and opp must be available */
+ if (target) {
+ freq = *target;
+ } else if (opp) {
+ freq = opp->rates[0];
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
- ret = clk_set_rate(clk, freq);
+ ret = clk_set_rate(opp_table->clk, freq);
if (ret) {
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
ret);
+ } else {
+ opp_table->rate_clk_single = freq;
}
return ret;
}
-static int _generic_set_opp_regulator(struct opp_table *opp_table,
- struct device *dev,
- struct dev_pm_opp *opp,
- unsigned long freq,
- int scaling_down)
+/*
+ * Simple implementation for configuring multiple clocks. Configure clocks in
+ * the order in which they are present in the array while scaling up.
+ */
+int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
{
- struct regulator *reg = opp_table->regulators[0];
- struct dev_pm_opp *old_opp = opp_table->current_opp;
+ int ret, i;
+
+ if (scaling_down) {
+ for (i = opp_table->clk_count - 1; i >= 0; i--) {
+ ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ return ret;
+ }
+ }
+ } else {
+ for (i = 0; i < opp_table->clk_count; i++) {
+ ret = clk_set_rate(opp_table->clks[i], opp->rates[i]);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
+
+static int _opp_config_regulator_single(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count)
+{
+ struct regulator *reg = regulators[0];
int ret;
/* This function only supports single regulator per device */
- if (WARN_ON(opp_table->regulator_count > 1)) {
+ if (WARN_ON(count > 1)) {
dev_err(dev, "multiple regulators are not supported\n");
return -EINVAL;
}
- /* Scaling up? Scale voltage before frequency */
- if (!scaling_down) {
- ret = _set_opp_voltage(dev, reg, opp->supplies);
- if (ret)
- goto restore_voltage;
- }
-
- /* Change frequency */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ ret = _set_opp_voltage(dev, reg, new_opp->supplies);
if (ret)
- goto restore_voltage;
-
- /* Scaling down? Scale voltage after frequency */
- if (scaling_down) {
- ret = _set_opp_voltage(dev, reg, opp->supplies);
- if (ret)
- goto restore_freq;
- }
+ return ret;
/*
* Enable the regulator after setting its voltages, otherwise it breaks
* some boot-enabled regulators.
*/
- if (unlikely(!opp_table->enabled)) {
+ if (unlikely(!new_opp->opp_table->enabled)) {
ret = regulator_enable(reg);
if (ret < 0)
dev_warn(dev, "Failed to enable regulator: %d", ret);
}
return 0;
-
-restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, old_opp->rate))
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_opp->rate);
-restore_voltage:
- /* This shouldn't harm even if the voltages weren't updated earlier */
- _set_opp_voltage(dev, reg, old_opp->supplies);
-
- return ret;
}
static int _set_opp_bw(const struct opp_table *opp_table,
@@ -978,36 +935,6 @@ static int _set_opp_bw(const struct opp_table *opp_table,
return 0;
}
-static int _set_opp_custom(const struct opp_table *opp_table,
- struct device *dev, struct dev_pm_opp *opp,
- unsigned long freq)
-{
- struct dev_pm_set_opp_data *data = opp_table->set_opp_data;
- struct dev_pm_opp *old_opp = opp_table->current_opp;
- int size;
-
- /*
- * We support this only if dev_pm_opp_set_regulators() was called
- * earlier.
- */
- if (opp_table->sod_supplies) {
- size = sizeof(*old_opp->supplies) * opp_table->regulator_count;
- memcpy(data->old_opp.supplies, old_opp->supplies, size);
- memcpy(data->new_opp.supplies, opp->supplies, size);
- data->regulator_count = opp_table->regulator_count;
- } else {
- data->regulator_count = 0;
- }
-
- data->regulators = opp_table->regulators;
- data->clk = opp_table->clk;
- data->dev = dev;
- data->old_opp.rate = old_opp->rate;
- data->new_opp.rate = freq;
-
- return opp_table->set_opp(data);
-}
-
static int _set_required_opp(struct device *dev, struct device *pd_dev,
struct dev_pm_opp *opp, int i)
{
@@ -1019,7 +946,7 @@ static int _set_required_opp(struct device *dev, struct device *pd_dev,
ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
if (ret) {
- dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
+ dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
dev_name(pd_dev), pstate, ret);
}
@@ -1138,7 +1065,7 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
}
static int _set_opp(struct device *dev, struct opp_table *opp_table,
- struct dev_pm_opp *opp, unsigned long freq)
+ struct dev_pm_opp *opp, void *clk_data, bool forced)
{
struct dev_pm_opp *old_opp;
int scaling_down, ret;
@@ -1153,18 +1080,17 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
old_opp = opp_table->current_opp;
/* Return early if nothing to do */
- if (old_opp == opp && opp_table->current_rate == freq &&
- opp_table->enabled) {
+ if (!forced && old_opp == opp && opp_table->enabled) {
dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__);
return 0;
}
dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
- __func__, opp_table->current_rate, freq, old_opp->level,
+ __func__, old_opp->rates[0], opp->rates[0], old_opp->level,
opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
opp->bandwidth ? opp->bandwidth[0].peak : 0);
- scaling_down = _opp_compare_key(old_opp, opp);
+ scaling_down = _opp_compare_key(opp_table, old_opp, opp);
if (scaling_down == -1)
scaling_down = 0;
@@ -1181,23 +1107,38 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
dev_err(dev, "Failed to set bw: %d\n", ret);
return ret;
}
- }
- if (opp_table->set_opp) {
- ret = _set_opp_custom(opp_table, dev, opp, freq);
- } else if (opp_table->regulators) {
- ret = _generic_set_opp_regulator(opp_table, dev, opp, freq,
- scaling_down);
- } else {
- /* Only frequency scaling */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
+ if (opp_table->config_regulators) {
+ ret = opp_table->config_regulators(dev, old_opp, opp,
+ opp_table->regulators,
+ opp_table->regulator_count);
+ if (ret) {
+ dev_err(dev, "Failed to set regulator voltages: %d\n",
+ ret);
+ return ret;
+ }
+ }
}
- if (ret)
- return ret;
+ if (opp_table->config_clks) {
+ ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down);
+ if (ret)
+ return ret;
+ }
/* Scaling down? Configure required OPPs after frequency */
if (scaling_down) {
+ if (opp_table->config_regulators) {
+ ret = opp_table->config_regulators(dev, old_opp, opp,
+ opp_table->regulators,
+ opp_table->regulator_count);
+ if (ret) {
+ dev_err(dev, "Failed to set regulator voltages: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
ret = _set_opp_bw(opp_table, opp, dev);
if (ret) {
dev_err(dev, "Failed to set bw: %d\n", ret);
@@ -1217,7 +1158,6 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
/* Make sure current_opp doesn't get freed */
dev_pm_opp_get(opp);
opp_table->current_opp = opp;
- opp_table->current_rate = freq;
return ret;
}
@@ -1238,6 +1178,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
struct opp_table *opp_table;
unsigned long freq = 0, temp_freq;
struct dev_pm_opp *opp = NULL;
+ bool forced = false;
int ret;
opp_table = _find_opp_table(dev);
@@ -1255,7 +1196,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
* equivalent to a clk_set_rate()
*/
if (!_get_opp_count(opp_table)) {
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, target_freq);
+ ret = opp_table->config_clks(dev, opp_table, NULL,
+ &target_freq, false);
goto put_opp_table;
}
@@ -1276,12 +1218,22 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
__func__, freq, ret);
goto put_opp_table;
}
+
+ /*
+ * An OPP entry specifies the highest frequency at which other
+ * properties of the OPP entry apply. Even if the new OPP is
+ * same as the old one, we may still reach here for a different
+ * value of the frequency. In such a case, do not abort but
+ * configure the hardware to the desired frequency forcefully.
+ */
+ forced = opp_table->rate_clk_single != target_freq;
}
- ret = _set_opp(dev, opp_table, opp, freq);
+ ret = _set_opp(dev, opp_table, opp, &target_freq, forced);
if (target_freq)
dev_pm_opp_put(opp);
+
put_opp_table:
dev_pm_opp_put_opp_table(opp_table);
return ret;
@@ -1309,7 +1261,7 @@ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
return PTR_ERR(opp_table);
}
- ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0);
+ ret = _set_opp(dev, opp_table, opp, NULL, false);
dev_pm_opp_put_opp_table(opp_table);
return ret;
@@ -1366,6 +1318,8 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
INIT_LIST_HEAD(&opp_table->dev_list);
INIT_LIST_HEAD(&opp_table->lazy);
+ opp_table->clk = ERR_PTR(-ENODEV);
+
/* Mark regulator count uninitialized */
opp_table->regulator_count = -1;
@@ -1412,20 +1366,38 @@ static struct opp_table *_update_opp_table_clk(struct device *dev,
int ret;
/*
- * Return early if we don't need to get clk or we have already tried it
+ * Return early if we don't need to get clk or we have already done it
* earlier.
*/
- if (!getclk || IS_ERR(opp_table) || opp_table->clk)
+ if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) ||
+ opp_table->clks)
return opp_table;
/* Find clk for the device */
opp_table->clk = clk_get(dev, NULL);
ret = PTR_ERR_OR_ZERO(opp_table->clk);
- if (!ret)
+ if (!ret) {
+ opp_table->config_clks = _opp_config_clk_single;
+ opp_table->clk_count = 1;
return opp_table;
+ }
if (ret == -ENOENT) {
+ /*
+ * There are few platforms which don't want the OPP core to
+ * manage device's clock settings. In such cases neither the
+ * platform provides the clks explicitly to us, nor the DT
+ * contains a valid clk entry. The OPP nodes in DT may still
+ * contain "opp-hz" property though, which we need to parse and
+ * allow the platform to find an OPP based on freq later on.
+ *
+ * This is a simple solution to take care of such corner cases,
+ * i.e. make the clk_count 1, which lets us allocate space for
+ * frequency in opp->rates and also parse the entries in DT.
+ */
+ opp_table->clk_count = 1;
+
dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
return opp_table;
}
@@ -1528,7 +1500,7 @@ static void _opp_table_kref_release(struct kref *kref)
_of_clear_opp_table(opp_table);
- /* Release clk */
+ /* Release automatically acquired single clk */
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
@@ -1581,7 +1553,7 @@ static void _opp_kref_release(struct kref *kref)
* frequency/voltage list.
*/
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
- _of_opp_free_required_opps(opp_table, opp);
+ _of_clear_opp(opp_table, opp);
opp_debug_remove_one(opp);
kfree(opp);
}
@@ -1613,10 +1585,13 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
if (IS_ERR(opp_table))
return;
+ if (!assert_single_clk(opp_table))
+ goto put_table;
+
mutex_lock(&opp_table->lock);
list_for_each_entry(iter, &opp_table->opp_list, node) {
- if (iter->rate == freq) {
+ if (iter->rates[0] == freq) {
opp = iter;
break;
}
@@ -1634,6 +1609,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
__func__, freq);
}
+put_table:
/* Drop the reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
}
@@ -1720,26 +1696,31 @@ void dev_pm_opp_remove_all_dynamic(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
-struct dev_pm_opp *_opp_allocate(struct opp_table *table)
+struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
- int supply_count, supply_size, icc_size;
+ int supply_count, supply_size, icc_size, clk_size;
/* Allocate space for at least one supply */
- supply_count = table->regulator_count > 0 ? table->regulator_count : 1;
+ supply_count = opp_table->regulator_count > 0 ?
+ opp_table->regulator_count : 1;
supply_size = sizeof(*opp->supplies) * supply_count;
- icc_size = sizeof(*opp->bandwidth) * table->path_count;
+ clk_size = sizeof(*opp->rates) * opp_table->clk_count;
+ icc_size = sizeof(*opp->bandwidth) * opp_table->path_count;
/* allocate new OPP node and supplies structures */
- opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL);
-
+ opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL);
if (!opp)
return NULL;
- /* Put the supplies at the end of the OPP structure as an empty array */
+ /* Put the supplies, bw and clock at the end of the OPP structure */
opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+
+ opp->rates = (unsigned long *)(opp->supplies + supply_count);
+
if (icc_size)
- opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count);
+ opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count);
+
INIT_LIST_HEAD(&opp->node);
return opp;
@@ -1770,15 +1751,57 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
return true;
}
-int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+static int _opp_compare_rate(struct opp_table *opp_table,
+ struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+{
+ int i;
+
+ for (i = 0; i < opp_table->clk_count; i++) {
+ if (opp1->rates[i] != opp2->rates[i])
+ return opp1->rates[i] < opp2->rates[i] ? -1 : 1;
+ }
+
+ /* Same rates for both OPPs */
+ return 0;
+}
+
+static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1,
+ struct dev_pm_opp *opp2)
{
- if (opp1->rate != opp2->rate)
- return opp1->rate < opp2->rate ? -1 : 1;
- if (opp1->bandwidth && opp2->bandwidth &&
- opp1->bandwidth[0].peak != opp2->bandwidth[0].peak)
- return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1;
+ int i;
+
+ for (i = 0; i < opp_table->path_count; i++) {
+ if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak)
+ return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1;
+ }
+
+ /* Same bw for both OPPs */
+ return 0;
+}
+
+/*
+ * Returns
+ * 0: opp1 == opp2
+ * 1: opp1 > opp2
+ * -1: opp1 < opp2
+ */
+int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1,
+ struct dev_pm_opp *opp2)
+{
+ int ret;
+
+ ret = _opp_compare_rate(opp_table, opp1, opp2);
+ if (ret)
+ return ret;
+
+ ret = _opp_compare_bw(opp_table, opp1, opp2);
+ if (ret)
+ return ret;
+
if (opp1->level != opp2->level)
return opp1->level < opp2->level ? -1 : 1;
+
+ /* Duplicate OPPs */
return 0;
}
@@ -1798,7 +1821,7 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
* loop.
*/
list_for_each_entry(opp, &opp_table->opp_list, node) {
- opp_cmp = _opp_compare_key(new_opp, opp);
+ opp_cmp = _opp_compare_key(opp_table, new_opp, opp);
if (opp_cmp > 0) {
*head = &opp->node;
continue;
@@ -1809,8 +1832,8 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
/* Duplicate OPPs */
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
- __func__, opp->rate, opp->supplies[0].u_volt,
- opp->available, new_opp->rate,
+ __func__, opp->rates[0], opp->supplies[0].u_volt,
+ opp->available, new_opp->rates[0],
new_opp->supplies[0].u_volt, new_opp->available);
/* Should we compare voltages for all regulators here ? */
@@ -1831,7 +1854,7 @@ void _required_opps_available(struct dev_pm_opp *opp, int count)
opp->available = false;
pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
- __func__, opp->required_opps[i]->np, opp->rate);
+ __func__, opp->required_opps[i]->np, opp->rates[0]);
return;
}
}
@@ -1847,7 +1870,7 @@ void _required_opps_available(struct dev_pm_opp *opp, int count)
* should be considered an error by the callers of _opp_add().
*/
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct opp_table *opp_table, bool rate_not_available)
+ struct opp_table *opp_table)
{
struct list_head *head;
int ret;
@@ -1872,7 +1895,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
if (!_opp_supported_by_regulators(new_opp, opp_table)) {
new_opp->available = false;
dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
- __func__, new_opp->rate);
+ __func__, new_opp->rates[0]);
}
/* required-opps not fully initialized yet */
@@ -1913,12 +1936,15 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
unsigned long tol;
int ret;
+ if (!assert_single_clk(opp_table))
+ return -EINVAL;
+
new_opp = _opp_allocate(opp_table);
if (!new_opp)
return -ENOMEM;
/* populate the opp table */
- new_opp->rate = freq;
+ new_opp->rates[0] = freq;
tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
new_opp->supplies[0].u_volt = u_volt;
new_opp->supplies[0].u_volt_min = u_volt - tol;
@@ -1926,7 +1952,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
new_opp->available = true;
new_opp->dynamic = dynamic;
- ret = _opp_add(dev, new_opp, opp_table, false);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
@@ -1948,7 +1974,7 @@ free_opp:
}
/**
- * dev_pm_opp_set_supported_hw() - Set supported platforms
+ * _opp_set_supported_hw() - Set supported platforms
* @dev: Device for which supported-hw has to be set.
* @versions: Array of hierarchy of versions to match.
* @count: Number of elements in the array.
@@ -1958,87 +1984,42 @@ free_opp:
* OPPs, which are available for those versions, based on its 'opp-supported-hw'
* property.
*/
-struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions, unsigned int count)
+static int _opp_set_supported_hw(struct opp_table *opp_table,
+ const u32 *versions, unsigned int count)
{
- struct opp_table *opp_table;
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
/* Another CPU that shares the OPP table has set the property ? */
if (opp_table->supported_hw)
- return opp_table;
+ return 0;
opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
GFP_KERNEL);
- if (!opp_table->supported_hw) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-ENOMEM);
- }
+ if (!opp_table->supported_hw)
+ return -ENOMEM;
opp_table->supported_hw_count = count;
- return opp_table;
+ return 0;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
/**
- * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
- * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
+ * _opp_put_supported_hw() - Releases resources blocked for supported hw
+ * @opp_table: OPP table returned by _opp_set_supported_hw().
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
+ * _opp_set_supported_hw(). Until this is called, the opp_table structure
* will not be freed.
*/
-void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
-
- kfree(opp_table->supported_hw);
- opp_table->supported_hw = NULL;
- opp_table->supported_hw_count = 0;
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
-
-static void devm_pm_opp_supported_hw_release(void *data)
+static void _opp_put_supported_hw(struct opp_table *opp_table)
{
- dev_pm_opp_put_supported_hw(data);
-}
-
-/**
- * devm_pm_opp_set_supported_hw() - Set supported platforms
- * @dev: Device for which supported-hw has to be set.
- * @versions: Array of hierarchy of versions to match.
- * @count: Number of elements in the array.
- *
- * This is a resource-managed variant of dev_pm_opp_set_supported_hw().
- *
- * Return: 0 on success and errorno otherwise.
- */
-int devm_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
- unsigned int count)
-{
- struct opp_table *opp_table;
-
- opp_table = dev_pm_opp_set_supported_hw(dev, versions, count);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- return devm_add_action_or_reset(dev, devm_pm_opp_supported_hw_release,
- opp_table);
+ if (opp_table->supported_hw) {
+ kfree(opp_table->supported_hw);
+ opp_table->supported_hw = NULL;
+ opp_table->supported_hw_count = 0;
+ }
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_supported_hw);
/**
- * dev_pm_opp_set_prop_name() - Set prop-extn name
+ * _opp_set_prop_name() - Set prop-extn name
* @dev: Device for which the prop-name has to be set.
* @name: name to postfix to properties.
*
@@ -2047,53 +2028,36 @@ EXPORT_SYMBOL_GPL(devm_pm_opp_set_supported_hw);
* which the extension will apply are opp-microvolt and opp-microamp. OPP core
* should postfix the property name with -<name> while looking for them.
*/
-struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+static int _opp_set_prop_name(struct opp_table *opp_table, const char *name)
{
- struct opp_table *opp_table;
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* Make sure there are no concurrent readers while updating opp_table */
- WARN_ON(!list_empty(&opp_table->opp_list));
-
/* Another CPU that shares the OPP table has set the property ? */
- if (opp_table->prop_name)
- return opp_table;
-
- opp_table->prop_name = kstrdup(name, GFP_KERNEL);
if (!opp_table->prop_name) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-ENOMEM);
+ opp_table->prop_name = kstrdup(name, GFP_KERNEL);
+ if (!opp_table->prop_name)
+ return -ENOMEM;
}
- return opp_table;
+ return 0;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
/**
- * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
- * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
+ * _opp_put_prop_name() - Releases resources blocked for prop-name
+ * @opp_table: OPP table returned by _opp_set_prop_name().
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
+ * _opp_set_prop_name(). Until this is called, the opp_table structure
* will not be freed.
*/
-void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
+static void _opp_put_prop_name(struct opp_table *opp_table)
{
- if (unlikely(!opp_table))
- return;
-
- kfree(opp_table->prop_name);
- opp_table->prop_name = NULL;
-
- dev_pm_opp_put_opp_table(opp_table);
+ if (opp_table->prop_name) {
+ kfree(opp_table->prop_name);
+ opp_table->prop_name = NULL;
+ }
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
/**
- * dev_pm_opp_set_regulators() - Set regulator names for the device
+ * _opp_set_regulators() - Set regulator names for the device
* @dev: Device for which regulator name is being set.
* @names: Array of pointers to the names of the regulator.
* @count: Number of regulators.
@@ -2104,36 +2068,29 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
+static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev,
+ const char * const names[])
{
- struct dev_pm_opp_supply *supplies;
- struct opp_table *opp_table;
+ const char * const *temp = names;
struct regulator *reg;
- int ret, i;
+ int count = 0, ret, i;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
+ /* Count number of regulators */
+ while (*temp++)
+ count++;
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
+ if (!count)
+ return -EINVAL;
/* Another CPU that shares the OPP table has set the regulators ? */
if (opp_table->regulators)
- return opp_table;
+ return 0;
opp_table->regulators = kmalloc_array(count,
sizeof(*opp_table->regulators),
GFP_KERNEL);
- if (!opp_table->regulators) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!opp_table->regulators)
+ return -ENOMEM;
for (i = 0; i < count; i++) {
reg = regulator_get_optional(dev, names[i]);
@@ -2149,21 +2106,11 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
opp_table->regulator_count = count;
- supplies = kmalloc_array(count * 2, sizeof(*supplies), GFP_KERNEL);
- if (!supplies) {
- ret = -ENOMEM;
- goto free_regulators;
- }
-
- mutex_lock(&opp_table->lock);
- opp_table->sod_supplies = supplies;
- if (opp_table->set_opp_data) {
- opp_table->set_opp_data->old_opp.supplies = supplies;
- opp_table->set_opp_data->new_opp.supplies = supplies + count;
- }
- mutex_unlock(&opp_table->lock);
+ /* Set generic config_regulators() for single regulators here */
+ if (count == 1)
+ opp_table->config_regulators = _opp_config_regulator_single;
- return opp_table;
+ return 0;
free_regulators:
while (i != 0)
@@ -2172,26 +2119,20 @@ free_regulators:
kfree(opp_table->regulators);
opp_table->regulators = NULL;
opp_table->regulator_count = -1;
-err:
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(ret);
+ return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
/**
- * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
- * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
+ * _opp_put_regulators() - Releases resources blocked for regulator
+ * @opp_table: OPP table returned from _opp_set_regulators().
*/
-void dev_pm_opp_put_regulators(struct opp_table *opp_table)
+static void _opp_put_regulators(struct opp_table *opp_table)
{
int i;
- if (unlikely(!opp_table))
- return;
-
if (!opp_table->regulators)
- goto put_opp_table;
+ return;
if (opp_table->enabled) {
for (i = opp_table->regulator_count - 1; i >= 0; i--)
@@ -2201,252 +2142,158 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
- mutex_lock(&opp_table->lock);
- if (opp_table->set_opp_data) {
- opp_table->set_opp_data->old_opp.supplies = NULL;
- opp_table->set_opp_data->new_opp.supplies = NULL;
- }
-
- kfree(opp_table->sod_supplies);
- opp_table->sod_supplies = NULL;
- mutex_unlock(&opp_table->lock);
-
kfree(opp_table->regulators);
opp_table->regulators = NULL;
opp_table->regulator_count = -1;
-
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
-static void devm_pm_opp_regulators_release(void *data)
+static void _put_clks(struct opp_table *opp_table, int count)
{
- dev_pm_opp_put_regulators(data);
-}
-
-/**
- * devm_pm_opp_set_regulators() - Set regulator names for the device
- * @dev: Device for which regulator name is being set.
- * @names: Array of pointers to the names of the regulator.
- * @count: Number of regulators.
- *
- * This is a resource-managed variant of dev_pm_opp_set_regulators().
- *
- * Return: 0 on success and errorno otherwise.
- */
-int devm_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
-{
- struct opp_table *opp_table;
+ int i;
- opp_table = dev_pm_opp_set_regulators(dev, names, count);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
+ for (i = count - 1; i >= 0; i--)
+ clk_put(opp_table->clks[i]);
- return devm_add_action_or_reset(dev, devm_pm_opp_regulators_release,
- opp_table);
+ kfree(opp_table->clks);
+ opp_table->clks = NULL;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_regulators);
/**
- * dev_pm_opp_set_clkname() - Set clk name for the device
- * @dev: Device for which clk name is being set.
- * @name: Clk name.
- *
- * In order to support OPP switching, OPP layer needs to get pointer to the
- * clock for the device. Simple cases work fine without using this routine (i.e.
- * by passing connection-id as NULL), but for a device with multiple clocks
- * available, the OPP core needs to know the exact name of the clk to use.
+ * _opp_set_clknames() - Set clk names for the device
+ * @dev: Device for which clk names is being set.
+ * @names: Clk names.
+ *
+ * In order to support OPP switching, OPP layer needs to get pointers to the
+ * clocks for the device. Simple cases work fine without using this routine
+ * (i.e. by passing connection-id as NULL), but for a device with multiple
+ * clocks available, the OPP core needs to know the exact names of the clks to
+ * use.
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
+static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev,
+ const char * const names[],
+ config_clks_t config_clks)
{
- struct opp_table *opp_table;
- int ret;
+ const char * const *temp = names;
+ int count = 0, ret, i;
+ struct clk *clk;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
+ /* Count number of clks */
+ while (*temp++)
+ count++;
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- ret = -EBUSY;
- goto err;
- }
+ /*
+ * This is a special case where we have a single clock, whose connection
+ * id name is NULL, i.e. first two entries are NULL in the array.
+ */
+ if (!count && !names[1])
+ count = 1;
- /* clk shouldn't be initialized at this point */
- if (WARN_ON(opp_table->clk)) {
- ret = -EBUSY;
- goto err;
- }
+ /* Fail early for invalid configurations */
+ if (!count || (!config_clks && count > 1))
+ return -EINVAL;
- /* Find clk for the device */
- opp_table->clk = clk_get(dev, name);
- if (IS_ERR(opp_table->clk)) {
- ret = dev_err_probe(dev, PTR_ERR(opp_table->clk),
- "%s: Couldn't find clock\n", __func__);
- goto err;
- }
+ /* Another CPU that shares the OPP table has set the clkname ? */
+ if (opp_table->clks)
+ return 0;
- return opp_table;
+ opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks),
+ GFP_KERNEL);
+ if (!opp_table->clks)
+ return -ENOMEM;
-err:
- dev_pm_opp_put_opp_table(opp_table);
+ /* Find clks for the device */
+ for (i = 0; i < count; i++) {
+ clk = clk_get(dev, names[i]);
+ if (IS_ERR(clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(clk),
+ "%s: Couldn't find clock with name: %s\n",
+ __func__, names[i]);
+ goto free_clks;
+ }
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
+ opp_table->clks[i] = clk;
+ }
-/**
- * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
- * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
- */
-void dev_pm_opp_put_clkname(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
+ opp_table->clk_count = count;
+ opp_table->config_clks = config_clks;
- clk_put(opp_table->clk);
- opp_table->clk = ERR_PTR(-EINVAL);
+ /* Set generic single clk set here */
+ if (count == 1) {
+ if (!opp_table->config_clks)
+ opp_table->config_clks = _opp_config_clk_single;
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
+ /*
+ * We could have just dropped the "clk" field and used "clks"
+ * everywhere. Instead we kept the "clk" field around for
+ * following reasons:
+ *
+ * - avoiding clks[0] everywhere else.
+ * - not running single clk helpers for multiple clk usecase by
+ * mistake.
+ *
+ * Since this is single-clk case, just update the clk pointer
+ * too.
+ */
+ opp_table->clk = opp_table->clks[0];
+ }
-static void devm_pm_opp_clkname_release(void *data)
-{
- dev_pm_opp_put_clkname(data);
+ return 0;
+
+free_clks:
+ _put_clks(opp_table, i);
+ return ret;
}
/**
- * devm_pm_opp_set_clkname() - Set clk name for the device
- * @dev: Device for which clk name is being set.
- * @name: Clk name.
- *
- * This is a resource-managed variant of dev_pm_opp_set_clkname().
- *
- * Return: 0 on success and errorno otherwise.
+ * _opp_put_clknames() - Releases resources blocked for clks.
+ * @opp_table: OPP table returned from _opp_set_clknames().
*/
-int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+static void _opp_put_clknames(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
+ if (!opp_table->clks)
+ return;
- opp_table = dev_pm_opp_set_clkname(dev, name);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
+ opp_table->config_clks = NULL;
+ opp_table->clk = ERR_PTR(-ENODEV);
- return devm_add_action_or_reset(dev, devm_pm_opp_clkname_release,
- opp_table);
+ _put_clks(opp_table, opp_table->clk_count);
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_set_clkname);
/**
- * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * _opp_set_config_regulators_helper() - Register custom set regulator helper.
* @dev: Device for which the helper is getting registered.
- * @set_opp: Custom set OPP helper.
+ * @config_regulators: Custom set regulator helper.
*
- * This is useful to support complex platforms (like platforms with multiple
- * regulators per device), instead of the generic OPP set rate helper.
+ * This is useful to support platforms with multiple regulators per device.
*
* This must be called before any OPPs are initialized for the device.
*/
-struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static int _opp_set_config_regulators_helper(struct opp_table *opp_table,
+ struct device *dev, config_regulators_t config_regulators)
{
- struct dev_pm_set_opp_data *data;
- struct opp_table *opp_table;
-
- if (!set_opp)
- return ERR_PTR(-EINVAL);
-
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
- /* This should be called before OPPs are initialized */
- if (WARN_ON(!list_empty(&opp_table->opp_list))) {
- dev_pm_opp_put_opp_table(opp_table);
- return ERR_PTR(-EBUSY);
- }
-
/* Another CPU that shares the OPP table has set the helper ? */
- if (opp_table->set_opp)
- return opp_table;
+ if (!opp_table->config_regulators)
+ opp_table->config_regulators = config_regulators;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&opp_table->lock);
- opp_table->set_opp_data = data;
- if (opp_table->sod_supplies) {
- data->old_opp.supplies = opp_table->sod_supplies;
- data->new_opp.supplies = opp_table->sod_supplies +
- opp_table->regulator_count;
- }
- mutex_unlock(&opp_table->lock);
-
- opp_table->set_opp = set_opp;
-
- return opp_table;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
-
-/**
- * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
- * set_opp helper
- * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
- *
- * Release resources blocked for platform specific set_opp helper.
- */
-void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
-{
- if (unlikely(!opp_table))
- return;
-
- opp_table->set_opp = NULL;
-
- mutex_lock(&opp_table->lock);
- kfree(opp_table->set_opp_data);
- opp_table->set_opp_data = NULL;
- mutex_unlock(&opp_table->lock);
-
- dev_pm_opp_put_opp_table(opp_table);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
-
-static void devm_pm_opp_unregister_set_opp_helper(void *data)
-{
- dev_pm_opp_unregister_set_opp_helper(data);
+ return 0;
}
/**
- * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper
- * @dev: Device for which the helper is getting registered.
- * @set_opp: Custom set OPP helper.
- *
- * This is a resource-managed version of dev_pm_opp_register_set_opp_helper().
+ * _opp_put_config_regulators_helper() - Releases resources blocked for
+ * config_regulators helper.
+ * @opp_table: OPP table returned from _opp_set_config_regulators_helper().
*
- * Return: 0 on success and errorno otherwise.
+ * Release resources blocked for platform specific config_regulators helper.
*/
-int devm_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static void _opp_put_config_regulators_helper(struct opp_table *opp_table)
{
- struct opp_table *opp_table;
-
- opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp);
- if (IS_ERR(opp_table))
- return PTR_ERR(opp_table);
-
- return devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper,
- opp_table);
+ if (opp_table->config_regulators)
+ opp_table->config_regulators = NULL;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper);
-static void _opp_detach_genpd(struct opp_table *opp_table)
+static void _detach_genpd(struct opp_table *opp_table)
{
int index;
@@ -2466,7 +2313,7 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
}
/**
- * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
+ * _opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
* @dev: Consumer device for which the genpd is getting attached.
* @names: Null terminated array of pointers containing names of genpd to attach.
* @virt_devs: Pointer to return the array of virtual devices.
@@ -2487,30 +2334,23 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
* The order of entries in the names array must match the order in which
* "required-opps" are added in DT.
*/
-struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
- const char * const *names, struct device ***virt_devs)
+static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
+ const char * const *names, struct device ***virt_devs)
{
- struct opp_table *opp_table;
struct device *virt_dev;
int index = 0, ret = -EINVAL;
const char * const *name = names;
- opp_table = _add_opp_table(dev, false);
- if (IS_ERR(opp_table))
- return opp_table;
-
if (opp_table->genpd_virt_devs)
- return opp_table;
+ return 0;
/*
* If the genpd's OPP table isn't already initialized, parsing of the
* required-opps fail for dev. We should retry this after genpd's OPP
* table is added.
*/
- if (!opp_table->required_opp_count) {
- ret = -EPROBE_DEFER;
- goto put_table;
- }
+ if (!opp_table->required_opp_count)
+ return -EPROBE_DEFER;
mutex_lock(&opp_table->genpd_virt_dev_lock);
@@ -2528,8 +2368,8 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
}
virt_dev = dev_pm_domain_attach_by_name(dev, *name);
- if (IS_ERR(virt_dev)) {
- ret = PTR_ERR(virt_dev);
+ if (IS_ERR_OR_NULL(virt_dev)) {
+ ret = PTR_ERR(virt_dev) ? : -ENODEV;
dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
goto err;
}
@@ -2543,73 +2383,230 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
*virt_devs = opp_table->genpd_virt_devs;
mutex_unlock(&opp_table->genpd_virt_dev_lock);
- return opp_table;
+ return 0;
err:
- _opp_detach_genpd(opp_table);
+ _detach_genpd(opp_table);
unlock:
mutex_unlock(&opp_table->genpd_virt_dev_lock);
+ return ret;
-put_table:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
/**
- * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device.
- * @opp_table: OPP table returned by dev_pm_opp_attach_genpd().
+ * _opp_detach_genpd() - Detach genpd(s) from the device.
+ * @opp_table: OPP table returned by _opp_attach_genpd().
*
* This detaches the genpd(s), resets the virtual device pointers, and puts the
* OPP table.
*/
-void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
+static void _opp_detach_genpd(struct opp_table *opp_table)
{
- if (unlikely(!opp_table))
- return;
-
/*
* Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
* used in parallel.
*/
mutex_lock(&opp_table->genpd_virt_dev_lock);
- _opp_detach_genpd(opp_table);
+ _detach_genpd(opp_table);
mutex_unlock(&opp_table->genpd_virt_dev_lock);
-
- dev_pm_opp_put_opp_table(opp_table);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
-static void devm_pm_opp_detach_genpd(void *data)
+static void _opp_clear_config(struct opp_config_data *data)
{
- dev_pm_opp_detach_genpd(data);
+ if (data->flags & OPP_CONFIG_GENPD)
+ _opp_detach_genpd(data->opp_table);
+ if (data->flags & OPP_CONFIG_REGULATOR)
+ _opp_put_regulators(data->opp_table);
+ if (data->flags & OPP_CONFIG_SUPPORTED_HW)
+ _opp_put_supported_hw(data->opp_table);
+ if (data->flags & OPP_CONFIG_REGULATOR_HELPER)
+ _opp_put_config_regulators_helper(data->opp_table);
+ if (data->flags & OPP_CONFIG_PROP_NAME)
+ _opp_put_prop_name(data->opp_table);
+ if (data->flags & OPP_CONFIG_CLK)
+ _opp_put_clknames(data->opp_table);
+
+ dev_pm_opp_put_opp_table(data->opp_table);
+ kfree(data);
}
/**
- * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual
- * device pointer
- * @dev: Consumer device for which the genpd is getting attached.
- * @names: Null terminated array of pointers containing names of genpd to attach.
- * @virt_devs: Pointer to return the array of virtual devices.
+ * dev_pm_opp_set_config() - Set OPP configuration for the device.
+ * @dev: Device for which configuration is being set.
+ * @config: OPP configuration.
*
- * This is a resource-managed version of dev_pm_opp_attach_genpd().
+ * This allows all device OPP configurations to be performed at once.
*
- * Return: 0 on success and errorno otherwise.
+ * This must be called before any OPPs are initialized for the device. This may
+ * be called multiple times for the same OPP table, for example once for each
+ * CPU that share the same table. This must be balanced by the same number of
+ * calls to dev_pm_opp_clear_config() in order to free the OPP table properly.
+ *
+ * This returns a token to the caller, which must be passed to
+ * dev_pm_opp_clear_config() to free the resources later. The value of the
+ * returned token will be >= 1 for success and negative for errors. The minimum
+ * value of 1 is chosen here to make it easy for callers to manage the resource.
*/
-int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names,
- struct device ***virt_devs)
+int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
struct opp_table *opp_table;
+ struct opp_config_data *data;
+ unsigned int id;
+ int ret;
- opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs);
- if (IS_ERR(opp_table))
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ opp_table = _add_opp_table(dev, false);
+ if (IS_ERR(opp_table)) {
+ kfree(data);
return PTR_ERR(opp_table);
+ }
+
+ data->opp_table = opp_table;
+ data->flags = 0;
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Configure clocks */
+ if (config->clk_names) {
+ ret = _opp_set_clknames(opp_table, dev, config->clk_names,
+ config->config_clks);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_CLK;
+ } else if (config->config_clks) {
+ /* Don't allow config callback without clocks */
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Configure property names */
+ if (config->prop_name) {
+ ret = _opp_set_prop_name(opp_table, config->prop_name);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_PROP_NAME;
+ }
+
+ /* Configure config_regulators helper */
+ if (config->config_regulators) {
+ ret = _opp_set_config_regulators_helper(opp_table, dev,
+ config->config_regulators);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_REGULATOR_HELPER;
+ }
- return devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd,
- opp_table);
+ /* Configure supported hardware */
+ if (config->supported_hw) {
+ ret = _opp_set_supported_hw(opp_table, config->supported_hw,
+ config->supported_hw_count);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_SUPPORTED_HW;
+ }
+
+ /* Configure supplies */
+ if (config->regulator_names) {
+ ret = _opp_set_regulators(opp_table, dev,
+ config->regulator_names);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_REGULATOR;
+ }
+
+ /* Attach genpds */
+ if (config->genpd_names) {
+ ret = _opp_attach_genpd(opp_table, dev, config->genpd_names,
+ config->virt_devs);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_GENPD;
+ }
+
+ ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
+ GFP_KERNEL);
+ if (ret)
+ goto err;
+
+ return id;
+
+err:
+ _opp_clear_config(data);
+ return ret;
}
-EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd);
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_config);
+
+/**
+ * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration.
+ * @opp_table: OPP table returned from dev_pm_opp_set_config().
+ *
+ * This allows all device OPP configurations to be cleared at once. This must be
+ * called once for each call made to dev_pm_opp_set_config(), in order to free
+ * the OPPs properly.
+ *
+ * Currently the first call itself ends up freeing all the OPP configurations,
+ * while the later ones only drop the OPP table reference. This works well for
+ * now as we would never want to use an half initialized OPP table and want to
+ * remove the configurations together.
+ */
+void dev_pm_opp_clear_config(int token)
+{
+ struct opp_config_data *data;
+
+ /*
+ * This lets the callers call this unconditionally and keep their code
+ * simple.
+ */
+ if (unlikely(token <= 0))
+ return;
+
+ data = xa_erase(&opp_configs, token);
+ if (WARN_ON(!data))
+ return;
+
+ _opp_clear_config(data);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config);
+
+static void devm_pm_opp_config_release(void *token)
+{
+ dev_pm_opp_clear_config((unsigned long)token);
+}
+
+/**
+ * devm_pm_opp_set_config() - Set OPP configuration for the device.
+ * @dev: Device for which configuration is being set.
+ * @config: OPP configuration.
+ *
+ * This allows all device OPP configurations to be performed at once.
+ * This is a resource-managed variant of dev_pm_opp_set_config().
+ *
+ * Return: 0 on success and errorno otherwise.
+ */
+int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
+{
+ int token = dev_pm_opp_set_config(dev, config);
+
+ if (token < 0)
+ return token;
+
+ return devm_add_action_or_reset(dev, devm_pm_opp_config_release,
+ (void *) ((unsigned long) token));
+}
+EXPORT_SYMBOL_GPL(devm_pm_opp_set_config);
/**
* dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
@@ -2795,11 +2792,16 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
return r;
}
+ if (!assert_single_clk(opp_table)) {
+ r = -EINVAL;
+ goto put_table;
+ }
+
mutex_lock(&opp_table->lock);
/* Do we have the frequency? */
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rate == freq) {
+ if (tmp_opp->rates[0] == freq) {
opp = tmp_opp;
break;
}
@@ -2866,11 +2868,16 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
return r;
}
+ if (!assert_single_clk(opp_table)) {
+ r = -EINVAL;
+ goto put_table;
+ }
+
mutex_lock(&opp_table->lock);
/* Do we have the frequency? */
list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rate == freq) {
+ if (tmp_opp->rates[0] == freq) {
opp = tmp_opp;
break;
}
@@ -2897,11 +2904,11 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
opp);
dev_pm_opp_put(opp);
- goto adjust_put_table;
+ goto put_table;
adjust_unlock:
mutex_unlock(&opp_table->lock);
-adjust_put_table:
+put_table:
dev_pm_opp_put_opp_table(opp_table);
return r;
}
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 5004335cf0de..3c3506021501 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -41,7 +41,7 @@
* the table if any of the mentioned functions have been invoked in the interim.
*/
int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
+ struct cpufreq_frequency_table **opp_table)
{
struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table = NULL;
@@ -76,7 +76,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
freq_table[i].driver_data = i;
freq_table[i].frequency = CPUFREQ_TABLE_END;
- *table = &freq_table[0];
+ *opp_table = &freq_table[0];
out:
if (ret)
@@ -94,13 +94,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
* Free up the table allocated by dev_pm_opp_init_cpufreq_table
*/
void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
+ struct cpufreq_frequency_table **opp_table)
{
- if (!table)
+ if (!opp_table)
return;
- kfree(*table);
- *table = NULL;
+ kfree(*opp_table);
+ *opp_table = NULL;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 1b6e5c55c3ed..96a30a032c5f 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -74,6 +74,24 @@ static void opp_debug_create_bw(struct dev_pm_opp *opp,
}
}
+static void opp_debug_create_clks(struct dev_pm_opp *opp,
+ struct opp_table *opp_table,
+ struct dentry *pdentry)
+{
+ char name[12];
+ int i;
+
+ if (opp_table->clk_count == 1) {
+ debugfs_create_ulong("rate_hz", S_IRUGO, pdentry, &opp->rates[0]);
+ return;
+ }
+
+ for (i = 0; i < opp_table->clk_count; i++) {
+ snprintf(name, sizeof(name), "rate_hz_%d", i);
+ debugfs_create_ulong(name, S_IRUGO, pdentry, &opp->rates[i]);
+ }
+}
+
static void opp_debug_create_supplies(struct dev_pm_opp *opp,
struct opp_table *opp_table,
struct dentry *pdentry)
@@ -117,10 +135,11 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
* Get directory name for OPP.
*
* - Normally rate is unique to each OPP, use it to get unique opp-name.
- * - For some devices rate isn't available, use index instead.
+ * - For some devices rate isn't available or there are multiple, use
+ * index instead for them.
*/
- if (likely(opp->rate))
- id = opp->rate;
+ if (likely(opp_table->clk_count == 1 && opp->rates[0]))
+ id = opp->rates[0];
else
id = _get_opp_count(opp_table);
@@ -134,7 +153,6 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
- debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate);
debugfs_create_u32("level", S_IRUGO, d, &opp->level);
debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
&opp->clock_latency_ns);
@@ -142,6 +160,7 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
opp->of_name = of_node_full_name(opp->np);
debugfs_create_str("of_name", S_IRUGO, d, (char **)&opp->of_name);
+ opp_debug_create_clks(opp, opp_table, d);
opp_debug_create_supplies(opp, opp_table, d);
opp_debug_create_bw(opp, opp_table, d);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index eb89c9a75985..605d68673f92 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -242,20 +242,20 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
opp_table->np = opp_np;
_opp_table_alloc_required_tables(opp_table, dev, opp_np);
- of_node_put(opp_np);
}
void _of_clear_opp_table(struct opp_table *opp_table)
{
_opp_table_free_required_tables(opp_table);
+ of_node_put(opp_table->np);
}
/*
* Release all resources previously acquired with a call to
* _of_opp_alloc_required_opps().
*/
-void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp)
+static void _of_opp_free_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp)
{
struct dev_pm_opp **required_opps = opp->required_opps;
int i;
@@ -275,6 +275,12 @@ void _of_opp_free_required_opps(struct opp_table *opp_table,
kfree(required_opps);
}
+void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp)
+{
+ _of_opp_free_required_opps(opp_table, opp);
+ of_node_put(opp->np);
+}
+
/* Populate all required OPPs which are part of "required-opps" list */
static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
struct dev_pm_opp *opp)
@@ -767,7 +773,51 @@ void dev_pm_opp_of_remove_table(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
+static int _read_rate(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
+ struct device_node *np)
+{
+ struct property *prop;
+ int i, count, ret;
+ u64 *rates;
+
+ prop = of_find_property(np, "opp-hz", NULL);
+ if (!prop)
+ return -ENODEV;
+
+ count = prop->length / sizeof(u64);
+ if (opp_table->clk_count != count) {
+ pr_err("%s: Count mismatch between opp-hz and clk_count (%d %d)\n",
+ __func__, count, opp_table->clk_count);
+ return -EINVAL;
+ }
+
+ rates = kmalloc_array(count, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return -ENOMEM;
+
+ ret = of_property_read_u64_array(np, "opp-hz", rates, count);
+ if (ret) {
+ pr_err("%s: Error parsing opp-hz: %d\n", __func__, ret);
+ } else {
+ /*
+ * Rate is defined as an unsigned long in clk API, and so
+ * casting explicitly to its type. Must be fixed once rate is 64
+ * bit guaranteed in clk API.
+ */
+ for (i = 0; i < count; i++) {
+ new_opp->rates[i] = (unsigned long)rates[i];
+
+ /* This will happen for frequencies > 4.29 GHz */
+ WARN_ON(new_opp->rates[i] != rates[i]);
+ }
+ }
+
+ kfree(rates);
+
+ return ret;
+}
+
+static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
struct device_node *np, bool peak)
{
const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
@@ -780,9 +830,9 @@ static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
return -ENODEV;
count = prop->length / sizeof(u32);
- if (table->path_count != count) {
+ if (opp_table->path_count != count) {
pr_err("%s: Mismatch between %s and paths (%d %d)\n",
- __func__, name, count, table->path_count);
+ __func__, name, count, opp_table->path_count);
return -EINVAL;
}
@@ -808,34 +858,27 @@ out:
return ret;
}
-static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table,
- struct device_node *np, bool *rate_not_available)
+static int _read_opp_key(struct dev_pm_opp *new_opp,
+ struct opp_table *opp_table, struct device_node *np)
{
bool found = false;
- u64 rate;
int ret;
- ret = of_property_read_u64(np, "opp-hz", &rate);
- if (!ret) {
- /*
- * Rate is defined as an unsigned long in clk API, and so
- * casting explicitly to its type. Must be fixed once rate is 64
- * bit guaranteed in clk API.
- */
- new_opp->rate = (unsigned long)rate;
+ ret = _read_rate(new_opp, opp_table, np);
+ if (!ret)
found = true;
- }
- *rate_not_available = !!ret;
+ else if (ret != -ENODEV)
+ return ret;
/*
* Bandwidth consists of peak and average (optional) values:
* opp-peak-kBps = <path1_value path2_value>;
* opp-avg-kBps = <path1_value path2_value>;
*/
- ret = _read_bw(new_opp, table, np, true);
+ ret = _read_bw(new_opp, opp_table, np, true);
if (!ret) {
found = true;
- ret = _read_bw(new_opp, table, np, false);
+ ret = _read_bw(new_opp, opp_table, np, false);
}
/* The properties were found but we failed to parse them */
@@ -881,13 +924,12 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
struct dev_pm_opp *new_opp;
u32 val;
int ret;
- bool rate_not_available = false;
new_opp = _opp_allocate(opp_table);
if (!new_opp)
return ERR_PTR(-ENOMEM);
- ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
+ ret = _read_opp_key(new_opp, opp_table, np);
if (ret < 0) {
dev_err(dev, "%s: opp key field not found\n", __func__);
goto free_opp;
@@ -895,14 +937,14 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
- dev_dbg(dev, "OPP not supported by hardware: %lu\n",
- new_opp->rate);
+ dev_dbg(dev, "OPP not supported by hardware: %s\n",
+ of_node_full_name(np));
goto free_opp;
}
new_opp->turbo = of_property_read_bool(np, "turbo-mode");
- new_opp->np = np;
+ new_opp->np = of_node_get(np);
new_opp->dynamic = false;
new_opp->available = true;
@@ -920,7 +962,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
if (opp_table->is_genpd)
new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
- ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
@@ -931,8 +973,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
if (opp_table->suspend_opp) {
- /* Pick the OPP with higher rate as suspend OPP */
- if (new_opp->rate > opp_table->suspend_opp->rate) {
+ /* Pick the OPP with higher rate/bw/level as suspend OPP */
+ if (_opp_compare_key(opp_table, new_opp, opp_table->suspend_opp) == 1) {
opp_table->suspend_opp->suspend = false;
new_opp->suspend = true;
opp_table->suspend_opp = new_opp;
@@ -947,7 +989,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu level:%u\n",
- __func__, new_opp->turbo, new_opp->rate,
+ __func__, new_opp->turbo, new_opp->rates[0],
new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns,
new_opp->level);
@@ -1084,7 +1126,7 @@ remove_static_opp:
return ret;
}
-static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
+static int _of_add_table_indexed(struct device *dev, int index)
{
struct opp_table *opp_table;
int ret, count;
@@ -1100,7 +1142,7 @@ static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
index = 0;
}
- opp_table = _add_opp_table_indexed(dev, index, getclk);
+ opp_table = _add_opp_table_indexed(dev, index, true);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@@ -1124,11 +1166,11 @@ static void devm_pm_opp_of_table_release(void *data)
dev_pm_opp_of_remove_table(data);
}
-static int _devm_of_add_table_indexed(struct device *dev, int index, bool getclk)
+static int _devm_of_add_table_indexed(struct device *dev, int index)
{
int ret;
- ret = _of_add_table_indexed(dev, index, getclk);
+ ret = _of_add_table_indexed(dev, index);
if (ret)
return ret;
@@ -1156,7 +1198,7 @@ static int _devm_of_add_table_indexed(struct device *dev, int index, bool getclk
*/
int devm_pm_opp_of_add_table(struct device *dev)
{
- return _devm_of_add_table_indexed(dev, 0, true);
+ return _devm_of_add_table_indexed(dev, 0);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
@@ -1179,7 +1221,7 @@ EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
*/
int dev_pm_opp_of_add_table(struct device *dev)
{
- return _of_add_table_indexed(dev, 0, true);
+ return _of_add_table_indexed(dev, 0);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
@@ -1195,7 +1237,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
*/
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return _of_add_table_indexed(dev, index, true);
+ return _of_add_table_indexed(dev, index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
@@ -1208,42 +1250,10 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
*/
int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return _devm_of_add_table_indexed(dev, index, true);
+ return _devm_of_add_table_indexed(dev, index);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_indexed);
-/**
- * dev_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
- * tree without getting clk for device.
- * @dev: device pointer used to lookup OPP table.
- * @index: Index number.
- *
- * Register the initial OPP table with the OPP library for given device only
- * using the "operating-points-v2" property. Do not try to get the clk for the
- * device.
- *
- * Return: Refer to dev_pm_opp_of_add_table() for return values.
- */
-int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
-{
- return _of_add_table_indexed(dev, index, false);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_noclk);
-
-/**
- * devm_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
- * tree without getting clk for device.
- * @dev: device pointer used to lookup OPP table.
- * @index: Index number.
- *
- * This is a resource-managed variant of dev_pm_opp_of_add_table_noclk().
- */
-int devm_pm_opp_of_add_table_noclk(struct device *dev, int index)
-{
- return _devm_of_add_table_indexed(dev, index, false);
-}
-EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_noclk);
-
/* CPU device specific helpers */
/**
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 45e3a55239a1..3a6e077df386 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -28,6 +28,27 @@ extern struct mutex opp_table_lock;
extern struct list_head opp_tables, lazy_opp_tables;
+/* OPP Config flags */
+#define OPP_CONFIG_CLK BIT(0)
+#define OPP_CONFIG_REGULATOR BIT(1)
+#define OPP_CONFIG_REGULATOR_HELPER BIT(2)
+#define OPP_CONFIG_PROP_NAME BIT(3)
+#define OPP_CONFIG_SUPPORTED_HW BIT(4)
+#define OPP_CONFIG_GENPD BIT(5)
+
+/**
+ * struct opp_config_data - data for set config operations
+ * @opp_table: OPP table
+ * @flags: OPP config flags
+ *
+ * This structure stores the OPP config information for each OPP table
+ * configuration by the callers.
+ */
+struct opp_config_data {
+ struct opp_table *opp_table;
+ unsigned int flags;
+};
+
/*
* Internal data structure organization with the OPP layer library is as
* follows:
@@ -58,7 +79,7 @@ extern struct list_head opp_tables, lazy_opp_tables;
* @suspend: true if suspend OPP
* @removed: flag indicating that OPP's reference is dropped by OPP core.
* @pstate: Device's power domain's performance state.
- * @rate: Frequency in hertz
+ * @rates: Frequencies in hertz
* @level: Performance level
* @supplies: Power supplies voltage/current values
* @bandwidth: Interconnect bandwidth values
@@ -81,7 +102,7 @@ struct dev_pm_opp {
bool suspend;
bool removed;
unsigned int pstate;
- unsigned long rate;
+ unsigned long *rates;
unsigned int level;
struct dev_pm_opp_supply *supplies;
@@ -138,7 +159,7 @@ enum opp_table_access {
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
- * @current_rate: Currently configured frequency.
+ * @rate_clk_single: Currently configured frequency for single clk.
* @current_opp: Currently configured OPP for the table.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
@@ -149,7 +170,11 @@ enum opp_table_access {
* @supported_hw: Array of version number to support.
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
- * @clk: Device's clock handle
+ * @config_clks: Platform specific config_clks() callback.
+ * @clks: Device's clock handles, for multiple clocks.
+ * @clk: Device's clock handle, for single clock.
+ * @clk_count: Number of clocks.
+ * @config_regulators: Platform specific config_regulators() callback.
* @regulators: Supply regulators
* @regulator_count: Number of power supply regulators. Its value can be -1
* (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
@@ -159,9 +184,6 @@ enum opp_table_access {
* @enabled: Set to true if the device's resources are enabled/configured.
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
- * @set_opp: Platform specific set_opp callback
- * @sod_supplies: Set opp data supplies
- * @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -188,7 +210,7 @@ struct opp_table {
unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
- unsigned long current_rate;
+ unsigned long rate_clk_single;
struct dev_pm_opp *current_opp;
struct dev_pm_opp *suspend_opp;
@@ -200,7 +222,11 @@ struct opp_table {
unsigned int *supported_hw;
unsigned int supported_hw_count;
const char *prop_name;
+ config_clks_t config_clks;
+ struct clk **clks;
struct clk *clk;
+ int clk_count;
+ config_regulators_t config_regulators;
struct regulator **regulators;
int regulator_count;
struct icc_path **paths;
@@ -209,10 +235,6 @@ struct opp_table {
bool genpd_performance_state;
bool is_genpd;
- int (*set_opp)(struct dev_pm_set_opp_data *data);
- struct dev_pm_opp_supply *sod_supplies;
- struct dev_pm_set_opp_data *set_opp_data;
-
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
char dentry_name[NAME_MAX];
@@ -228,8 +250,8 @@ struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
-int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
-int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
+int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk);
@@ -245,14 +267,12 @@ static inline bool lazy_linking_pending(struct opp_table *opp_table)
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);
void _of_clear_opp_table(struct opp_table *opp_table);
struct opp_table *_managed_opp(struct device *dev, int index);
-void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp);
+void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp);
#else
static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) {}
static inline void _of_clear_opp_table(struct opp_table *opp_table) {}
static inline struct opp_table *_managed_opp(struct device *dev, int index) { return NULL; }
-static inline void _of_opp_free_required_opps(struct opp_table *opp_table,
- struct dev_pm_opp *opp) {}
+static inline void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) {}
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index bd4771f388ab..8f3f13fbbb25 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -36,11 +36,15 @@ struct ti_opp_supply_optimum_voltage_table {
* @vdd_table: Optimized voltage mapping table
* @num_vdd_table: number of entries in vdd_table
* @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply
+ * @old_supplies: Placeholder for supplies information for old OPP.
+ * @new_supplies: Placeholder for supplies information for new OPP.
*/
struct ti_opp_supply_data {
struct ti_opp_supply_optimum_voltage_table *vdd_table;
u32 num_vdd_table;
u32 vdd_absolute_max_voltage_uv;
+ struct dev_pm_opp_supply old_supplies[2];
+ struct dev_pm_opp_supply new_supplies[2];
};
static struct ti_opp_supply_data opp_data;
@@ -266,27 +270,32 @@ static int _opp_set_voltage(struct device *dev,
return 0;
}
-/**
- * ti_opp_supply_set_opp() - do the opp supply transition
- * @data: information on regulators and new and old opps provided by
- * opp core to use in transition
- *
- * Return: If successful, 0, else appropriate error value.
- */
-static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
+/* Do the opp supply transition */
+static int ti_opp_config_regulators(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count)
{
- struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
- struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1];
- struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
- struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1];
- struct device *dev = data->dev;
- unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
- struct clk *clk = data->clk;
- struct regulator *vdd_reg = data->regulators[0];
- struct regulator *vbb_reg = data->regulators[1];
+ struct dev_pm_opp_supply *old_supply_vdd = &opp_data.old_supplies[0];
+ struct dev_pm_opp_supply *old_supply_vbb = &opp_data.old_supplies[1];
+ struct dev_pm_opp_supply *new_supply_vdd = &opp_data.new_supplies[0];
+ struct dev_pm_opp_supply *new_supply_vbb = &opp_data.new_supplies[1];
+ struct regulator *vdd_reg = regulators[0];
+ struct regulator *vbb_reg = regulators[1];
+ unsigned long old_freq, freq;
int vdd_uv;
int ret;
+ /* We must have two regulators here */
+ WARN_ON(count != 2);
+
+ /* Fetch supplies and freq information from OPP core */
+ ret = dev_pm_opp_get_supplies(new_opp, opp_data.new_supplies);
+ WARN_ON(ret);
+
+ old_freq = dev_pm_opp_get_freq(old_opp);
+ freq = dev_pm_opp_get_freq(new_opp);
+ WARN_ON(!old_freq || !freq);
+
vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
new_supply_vdd->u_volt);
@@ -303,39 +312,24 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
if (ret)
goto restore_voltage;
- }
-
- /* Change frequency */
- dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
- __func__, old_freq, freq);
-
- ret = clk_set_rate(clk, freq);
- if (ret) {
- dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
- ret);
- goto restore_voltage;
- }
-
- /* Scaling down? Scale voltage after frequency */
- if (freq < old_freq) {
+ } else {
ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
if (ret)
- goto restore_freq;
+ goto restore_voltage;
ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
"vdd");
if (ret)
- goto restore_freq;
+ goto restore_voltage;
}
return 0;
-restore_freq:
- ret = clk_set_rate(clk, old_freq);
- if (ret)
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
restore_voltage:
+ /* Fetch old supplies information only if required */
+ ret = dev_pm_opp_get_supplies(old_opp, opp_data.old_supplies);
+ WARN_ON(ret);
+
/* This shouldn't harm even if the voltages weren't updated earlier */
if (old_supply_vdd->u_volt) {
ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb");
@@ -405,9 +399,8 @@ static int ti_opp_supply_probe(struct platform_device *pdev)
return ret;
}
- ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev,
- ti_opp_supply_set_opp));
- if (ret)
+ ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators);
+ if (ret < 0)
_free_optimized_voltages(dev, &opp_data);
return ret;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 9be007c9420f..f223afe47d10 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -268,7 +268,7 @@ static int ioc_count;
* Each bit can represent a number of pages.
* LSbs represent lower addresses (IOVA's).
*
-* This was was copied from sba_iommu.c. Don't try to unify
+* This was copied from sba_iommu.c. Don't try to unify
* the two resource managers unless a way to have different
* allocation policies is also adjusted. We'd like to avoid
* I/O TLB thrashing by having resource allocation policy
@@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
}
}
-static void __init ccio_init_resources(struct ioc *ioc)
+static int __init ccio_init_resources(struct ioc *ioc)
{
struct resource *res = ioc->mmio_region;
char *name = kmalloc(14, GFP_KERNEL);
-
+ if (unlikely(!name))
+ return -ENOMEM;
snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
+ return 0;
}
static int new_ioc_area(struct resource *res, unsigned long size,
@@ -1543,7 +1545,10 @@ static int __init ccio_probe(struct parisc_device *dev)
return -ENOMEM;
}
ccio_ioc_init(ioc);
- ccio_init_resources(ioc);
+ if (ccio_init_resources(ioc)) {
+ kfree(ioc);
+ return -ENOMEM;
+ }
hppa_dma_ops = &ccio_ops;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 732b516c7bf8..afc6e66ddc31 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1476,9 +1476,13 @@ lba_driver_probe(struct parisc_device *dev)
u32 func_class;
void *tmp_obj;
char *version;
- void __iomem *addr = ioremap(dev->hpa.start, 4096);
+ void __iomem *addr;
int max;
+ addr = ioremap(dev->hpa.start, 4096);
+ if (addr == NULL)
+ return -ENOMEM;
+
/* Read HW Rev First */
func_class = READ_REG32(addr + LBA_FCLASS);
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 1e4a5663d011..d4be9d2ee74d 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -646,7 +646,7 @@ int lcd_print( const char *str )
cancel_delayed_work_sync(&led_task);
/* copy display string to buffer for procfs */
- strlcpy(lcd_text, str, sizeof(lcd_text));
+ strscpy(lcd_text, str, sizeof(lcd_text));
/* Set LCD Cursor to 1st character */
gsc_writeb(lcd_info.reset_cmd1, LCD_CMD_REG);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 133c73207782..55c028af4bd9 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -121,6 +121,9 @@ config XEN_PCIDEV_FRONTEND
config PCI_ATS
bool
+config PCI_DOE
+ bool
+
config PCI_ECAM
bool
@@ -164,6 +167,11 @@ config PCI_PASID
config PCI_P2PDMA
bool "PCI peer-to-peer transfer support"
depends on ZONE_DEVICE
+ #
+ # The need for the scatterlist DMA bus address flag means PCI P2PDMA
+ # requires 64bit
+ #
+ depends on 64BIT
select GENERIC_ALLOCATOR
help
Enableѕ drivers to do PCI peer-to-peer transactions to and from
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 0da6b1ebc694..2680e4c92f0a 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
+obj-$(CONFIG_PCI_DOE) += doe.o
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index b8d96d38064d..d1c5fcf00a8a 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -237,7 +237,7 @@ config PCIE_ROCKCHIP_EP
config PCIE_MEDIATEK
tristate "MediaTek PCIe controller"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
help
@@ -293,7 +293,7 @@ config PCI_HYPERV_INTERFACE
config PCI_LOONGSON
bool "LOONGSON PCI Controller"
depends on MACH_LOONGSON64 || COMPILE_TEST
- depends on OF
+ depends on OF || ACPI
depends on PCI_QUIRKS
default MACH_LOONGSON64
help
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 52767f26048f..13c4032ca379 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -243,7 +243,6 @@ err_phy:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
static int cdns_pcie_suspend_noirq(struct device *dev)
{
struct cdns_pcie *pcie = dev_get_drvdata(dev);
@@ -266,9 +265,8 @@ static int cdns_pcie_resume_noirq(struct device *dev)
return 0;
}
-#endif
const struct dev_pm_ops cdns_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
- cdns_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
+ cdns_pcie_resume_noirq)
};
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index dfcdeb432dc8..38462ed11d07 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -178,7 +178,7 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
dra7xx_pcie_enable_msi_interrupts(dra7xx);
}
-static int dra7xx_pcie_host_init(struct pcie_port *pp)
+static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
@@ -202,7 +202,7 @@ static const struct irq_domain_ops intx_domain_ops = {
.xlate = pci_irqd_intx_xlate,
};
-static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
+static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned long val;
@@ -224,7 +224,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
return 1;
}
-static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
+static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
int ret, i, count, num_ctrls;
@@ -255,8 +255,8 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct dra7xx_pcie *dra7xx;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
- struct pcie_port *pp;
unsigned long reg;
u32 bit;
@@ -344,7 +344,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
@@ -475,7 +475,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
{
int ret;
struct dw_pcie *pci = dra7xx->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
pp->irq = platform_get_irq(pdev, 1);
@@ -483,7 +483,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return pp->irq;
/* MSI IRQ is muxed */
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
@@ -862,7 +862,6 @@ err_link:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
static int dra7xx_pcie_suspend(struct device *dev)
{
struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
@@ -919,7 +918,6 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)
return 0;
}
-#endif
static void dra7xx_pcie_shutdown(struct platform_device *pdev)
{
@@ -940,9 +938,9 @@ static void dra7xx_pcie_shutdown(struct platform_device *pdev)
}
static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
- dra7xx_pcie_resume_noirq)
+ SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+ dra7xx_pcie_resume_noirq)
};
static struct platform_driver dra7xx_pcie_driver = {
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 467c8d1cd7e4..ec5611005566 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -249,7 +249,7 @@ static int exynos_pcie_link_up(struct dw_pcie *pci)
return (val & PCIE_ELBI_XMLH_LINKUP);
}
-static int exynos_pcie_host_init(struct pcie_port *pp)
+static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct exynos_pcie *ep = to_exynos_pcie(pci);
@@ -258,9 +258,8 @@ static int exynos_pcie_host_init(struct pcie_port *pp)
exynos_pcie_assert_core_reset(ep);
- phy_reset(ep->phy);
- phy_power_on(ep->phy);
phy_init(ep->phy);
+ phy_power_on(ep->phy);
exynos_pcie_deassert_core_reset(ep);
exynos_pcie_enable_irq_pulse(ep);
@@ -276,7 +275,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,
struct platform_device *pdev)
{
struct dw_pcie *pci = &ep->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -292,7 +291,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,
}
pp->ops = &exynos_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -390,7 +389,7 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)
+static int exynos_pcie_suspend_noirq(struct device *dev)
{
struct exynos_pcie *ep = dev_get_drvdata(dev);
@@ -402,11 +401,11 @@ static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)
return 0;
}
-static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
+static int exynos_pcie_resume_noirq(struct device *dev)
{
struct exynos_pcie *ep = dev_get_drvdata(dev);
struct dw_pcie *pci = &ep->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
@@ -421,8 +420,8 @@ static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops exynos_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
- exynos_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
+ exynos_pcie_resume_noirq)
};
static const struct of_device_id exynos_pcie_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 7a285fb0f619..6e5debdbc55b 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -67,6 +67,7 @@ struct imx6_pcie {
struct dw_pcie *pci;
int reset_gpio;
bool gpio_active_high;
+ bool link_is_up;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie_inbound_axi;
@@ -146,6 +147,31 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
+static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+{
+ WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
+ imx6_pcie->drvdata->variant != IMX8MM);
+ return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+}
+
+static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+{
+ unsigned int mask, val;
+
+ if (imx6_pcie->drvdata->variant == IMX8MQ &&
+ imx6_pcie->controller_id == 1) {
+ mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+ val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT);
+ } else {
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
+ PCI_EXP_TYPE_ROOT_PORT);
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+}
+
static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -271,6 +297,134 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
+static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ /*
+ * The PHY initialization had been done in the PHY
+ * driver, break here directly.
+ */
+ break;
+ case IMX8MQ:
+ /*
+ * TODO: Currently this code assumes external
+ * oscillator is being used
+ */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Regarding the datasheet, the PCIE_VPH is suggested
+ * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
+ * VREG_BYPASS should be cleared to zero.
+ */
+ if (imx6_pcie->vph &&
+ regulator_get_voltage(imx6_pcie->vph) > 3000000)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ imx6_pcie_grp_offset(imx6_pcie),
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+ break;
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK,
+ IMX6SX_GPR12_PCIE_RX_EQ_2);
+ fallthrough;
+ default:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
+ break;
+ }
+
+ imx6_pcie_configure_type(imx6_pcie);
+}
+
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+{
+ u32 val;
+ struct device *dev = imx6_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ IOMUXC_GPR22, val,
+ val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT))
+ dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
+static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+{
+ unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ int mult, div;
+ u16 val;
+
+ if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ return 0;
+
+ switch (phy_rate) {
+ case 125000000:
+ /*
+ * The default settings of the MPLL are for a 125MHz input
+ * clock, so no need to reconfigure anything in that case.
+ */
+ return 0;
+ case 100000000:
+ mult = 25;
+ div = 0;
+ break;
+ case 200000000:
+ mult = 25;
+ div = 1;
+ break;
+ default:
+ dev_err(imx6_pcie->pci->dev,
+ "Unsupported PHY reference clock rate %lu\n", phy_rate);
+ return -EINVAL;
+ }
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+ PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+ val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+ val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+ PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+ val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+ val |= PCIE_PHY_ATEOVRD_EN;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+
+ return 0;
+}
+
static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
u16 tmp;
@@ -367,61 +521,6 @@ static int imx6_pcie_attach_pd(struct device *dev)
return 0;
}
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
-{
- struct device *dev = imx6_pcie->pci->dev;
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- case IMX8MQ:
- reset_control_assert(imx6_pcie->pciephy_reset);
- fallthrough;
- case IMX8MM:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- break;
- }
-
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- int ret = regulator_disable(imx6_pcie->vpcie);
-
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
-
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio))
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
-}
-
-static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
-{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MM);
- return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
-}
-
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -482,38 +581,44 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
return ret;
}
-static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
{
- u32 val;
- struct device *dev = imx6_pcie->pci->dev;
-
- if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
- IOMUXC_GPR22, val,
- val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
- PHY_PLL_LOCK_WAIT_USLEEP_MAX,
- PHY_PLL_LOCK_WAIT_TIMEOUT))
- dev_err(dev, "PCIe PLL lock timeout\n");
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6SX:
+ clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+ break;
+ case IMX6QP:
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD,
+ IMX6Q_GPR1_PCIE_TEST_PD);
+ break;
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ break;
+ case IMX8MM:
+ case IMX8MQ:
+ clk_disable_unprepare(imx6_pcie->pcie_aux);
+ break;
+ default:
+ break;
+ }
}
-static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
struct device *dev = pci->dev;
int ret;
- if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
- ret = regulator_enable(imx6_pcie->vpcie);
- if (ret) {
- dev_err(dev, "failed to enable vpcie regulator: %d\n",
- ret);
- return;
- }
- }
-
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
dev_err(dev, "unable to enable pcie_phy clock\n");
- goto err_pcie_phy;
+ return ret;
}
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
@@ -534,25 +639,75 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
goto err_ref_clk;
}
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+ return 0;
+
+err_ref_clk:
+ clk_disable_unprepare(imx6_pcie->pcie);
+err_pcie:
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+err_pcie_bus:
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+
+ return ret;
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+ imx6_pcie_disable_ref_clk(imx6_pcie);
+ clk_disable_unprepare(imx6_pcie->pcie);
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+}
+
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+{
switch (imx6_pcie->drvdata->variant) {
+ case IMX7D:
+ case IMX8MQ:
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ fallthrough;
case IMX8MM:
- if (phy_power_on(imx6_pcie->phy))
- dev_err(dev, "unable to power on PHY\n");
+ reset_control_assert(imx6_pcie->apps_reset);
break;
- default:
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST,
+ IMX6Q_GPR1_PCIE_SW_RST);
+ break;
+ case IMX6Q:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
}
- /* allow the clocks to stabilize */
- usleep_range(200, 500);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio))
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
+}
+
+static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct device *dev = pci->dev;
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ:
reset_control_deassert(imx6_pcie->pciephy_reset);
break;
- case IMX8MM:
- if (phy_init(imx6_pcie->phy))
- dev_err(dev, "waiting for phy ready timeout!\n");
- break;
case IMX7D:
reset_control_deassert(imx6_pcie->pciephy_reset);
@@ -588,6 +743,7 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
usleep_range(200, 500);
break;
case IMX6Q: /* Nothing to do */
+ case IMX8MM:
break;
}
@@ -600,153 +756,6 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
msleep(100);
}
- return;
-
-err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
-err_pcie_phy:
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- ret = regulator_disable(imx6_pcie->vpcie);
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
-}
-
-static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
-{
- unsigned int mask, val;
-
- if (imx6_pcie->drvdata->variant == IMX8MQ &&
- imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
- } else {
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
- }
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
-}
-
-static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
-{
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- /*
- * The PHY initialization had been done in the PHY
- * driver, break here directly.
- */
- break;
- case IMX8MQ:
- /*
- * TODO: Currently this code assumes external
- * oscillator is being used
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_REF_USE_PAD,
- IMX8MQ_GPR_PCIE_REF_USE_PAD);
- /*
- * Regarding the datasheet, the PCIE_VPH is suggested
- * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
- * VREG_BYPASS should be cleared to zero.
- */
- if (imx6_pcie->vph &&
- regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_VREG_BYPASS,
- 0);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_RX_EQ_MASK,
- IMX6SX_GPR12_PCIE_RX_EQ_2);
- fallthrough;
- default:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
-
- /* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
- break;
- }
-
- imx6_pcie_configure_type(imx6_pcie);
-}
-
-static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
-{
- unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
- int mult, div;
- u16 val;
-
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
- return 0;
-
- switch (phy_rate) {
- case 125000000:
- /*
- * The default settings of the MPLL are for a 125MHz input
- * clock, so no need to reconfigure anything in that case.
- */
- return 0;
- case 100000000:
- mult = 25;
- div = 0;
- break;
- case 200000000:
- mult = 25;
- div = 1;
- break;
- default:
- dev_err(imx6_pcie->pci->dev,
- "Unsupported PHY reference clock rate %lu\n", phy_rate);
- return -EINVAL;
- }
-
- pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
- val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
- PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
- val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
- val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
- pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
-
- pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
- val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
- PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
- val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
- val |= PCIE_PHY_ATEOVRD_EN;
- pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
-
return 0;
}
@@ -789,6 +798,25 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
}
}
+static void imx6_pcie_ltssm_disable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX6Q:
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0);
+ break;
+ case IMX7D:
+ case IMX8MQ:
+ case IMX8MM:
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ }
+}
+
static int imx6_pcie_start_link(struct dw_pcie *pci)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
@@ -802,21 +830,26 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
+ dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
- dw_pcie_wait_for_link(pci);
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
- if (pci->link_gen == 2) {
- /* Allow Gen2 mode after the link is up. */
+ if (pci->link_gen > 1) {
+ /* Allow faster modes after the link is up */
+ dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
+ tmp |= pci->link_gen;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
@@ -826,6 +859,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
if (imx6_pcie->drvdata->flags &
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
@@ -846,34 +880,110 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
}
/* Make sure link training is finished as well! */
- dw_pcie_wait_for_link(pci);
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
} else {
- dev_info(dev, "Link: Gen2 disabled\n");
+ dev_info(dev, "Link: Only Gen1 is enabled\n");
}
+ imx6_pcie->link_is_up = true;
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
+ imx6_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
imx6_pcie_reset_phy(imx6_pcie);
- return ret;
+ return 0;
+}
+
+static void imx6_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+
+ /* Turn off PCIe LTSSM */
+ imx6_pcie_ltssm_disable(dev);
}
-static int imx6_pcie_host_init(struct pcie_port *pp)
+static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ int ret;
+
+ if (imx6_pcie->vpcie) {
+ ret = regulator_enable(imx6_pcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie regulator: %d\n",
+ ret);
+ return ret;
+ }
+ }
imx6_pcie_assert_core_reset(imx6_pcie);
imx6_pcie_init_phy(imx6_pcie);
- imx6_pcie_deassert_core_reset(imx6_pcie);
+
+ ret = imx6_pcie_clk_enable(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
+ goto err_reg_disable;
+ }
+
+ if (imx6_pcie->phy) {
+ ret = phy_power_on(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "pcie PHY power up failed\n");
+ goto err_clk_disable;
+ }
+ }
+
+ ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ if (ret < 0) {
+ dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
+ goto err_phy_off;
+ }
+
+ if (imx6_pcie->phy) {
+ ret = phy_init(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "waiting for PHY ready timeout!\n");
+ goto err_phy_off;
+ }
+ }
imx6_setup_phy_mpll(imx6_pcie);
return 0;
+
+err_phy_off:
+ if (imx6_pcie->phy)
+ phy_power_off(imx6_pcie->phy);
+err_clk_disable:
+ imx6_pcie_clk_disable(imx6_pcie);
+err_reg_disable:
+ if (imx6_pcie->vpcie)
+ regulator_disable(imx6_pcie->vpcie);
+ return ret;
+}
+
+static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+ if (imx6_pcie->phy) {
+ if (phy_power_off(imx6_pcie->phy))
+ dev_err(pci->dev, "unable to power off PHY\n");
+ phy_exit(imx6_pcie->phy);
+ }
+ imx6_pcie_clk_disable(imx6_pcie);
+
+ if (imx6_pcie->vpcie)
+ regulator_disable(imx6_pcie->vpcie);
}
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
@@ -884,26 +994,6 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = imx6_pcie_start_link,
};
-#ifdef CONFIG_PM_SLEEP
-static void imx6_pcie_ltssm_disable(struct device *dev)
-{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0);
- break;
- case IMX7D:
- case IMX8MM:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- default:
- dev_err(dev, "ltssm_disable not supported\n");
- }
-}
-
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
struct device *dev = imx6_pcie->pci->dev;
@@ -941,49 +1031,17 @@ pm_turnoff_sleep:
usleep_range(1000, 10000);
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
-{
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- case IMX8MQ:
- case IMX8MM:
- clk_disable_unprepare(imx6_pcie->pcie_aux);
- break;
- default:
- break;
- }
-}
-
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_ltssm_disable(dev);
- imx6_pcie_clk_disable(imx6_pcie);
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- if (phy_power_off(imx6_pcie->phy))
- dev_err(dev, "unable to power off PHY\n");
- phy_exit(imx6_pcie->phy);
- break;
- default:
- break;
- }
+ imx6_pcie_stop_link(imx6_pcie->pci);
+ imx6_pcie_host_exit(pp);
return 0;
}
@@ -992,27 +1050,25 @@ static int imx6_pcie_resume_noirq(struct device *dev)
{
int ret;
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct pcie_port *pp = &imx6_pcie->pci->pp;
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
- imx6_pcie_deassert_core_reset(imx6_pcie);
+ ret = imx6_pcie_host_init(pp);
+ if (ret)
+ return ret;
dw_pcie_setup_rc(pp);
- ret = imx6_pcie_start_link(imx6_pcie->pci);
- if (ret < 0)
- dev_info(dev, "pcie link is down after resume.\n");
+ if (imx6_pcie->link_is_up)
+ imx6_pcie_start_link(imx6_pcie->pci);
return 0;
}
-#endif
static const struct dev_pm_ops imx6_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
- imx6_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
+ imx6_pcie_resume_noirq)
};
static int imx6_pcie_probe(struct platform_device *pdev)
@@ -1291,7 +1347,7 @@ static struct platform_driver imx6_pcie_driver = {
static void imx6_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
/* Bus parent is the PCI bridge, its parent is this platform driver */
if (!bus->dev.parent || !bus->dev.parent->parent)
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index d10e5fd0f83c..78818853af9e 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -109,7 +109,7 @@ struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
const struct dw_pcie_ep_ops *ep_ops;
- unsigned int version;
+ u32 version;
};
struct keystone_pcie {
@@ -147,7 +147,7 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
static void ks_pcie_msi_irq_ack(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
@@ -167,7 +167,7 @@ static void ks_pcie_msi_irq_ack(struct irq_data *data)
static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
struct dw_pcie *pci;
u64 msi_target;
@@ -192,7 +192,7 @@ static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
static void ks_pcie_msi_mask(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
@@ -216,7 +216,7 @@ static void ks_pcie_msi_mask(struct irq_data *data)
static void ks_pcie_msi_unmask(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
@@ -247,7 +247,7 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.irq_unmask = ks_pcie_msi_unmask,
};
-static int ks_pcie_msi_host_init(struct pcie_port *pp)
+static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
@@ -390,7 +390,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
u64 start, end;
struct resource *mem;
int i;
@@ -428,7 +428,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
@@ -456,7 +456,7 @@ static struct pci_ops ks_child_pcie_ops = {
*/
static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
@@ -574,7 +574,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irq;
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 vector, reg, pos;
@@ -799,7 +799,7 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
return 0;
}
-static int __init ks_pcie_host_init(struct pcie_port *pp)
+static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
@@ -1069,19 +1069,19 @@ static int ks_pcie_am654_set_mode(struct device *dev,
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
- .version = 0x365A,
+ .version = DW_PCIE_VER_365A,
};
static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
.host_ops = &ks_pcie_am654_host_ops,
.mode = DW_PCIE_RC_TYPE,
- .version = 0x490A,
+ .version = DW_PCIE_VER_490A,
};
static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
.ep_ops = &ks_pcie_am654_ep_ops,
.mode = DW_PCIE_EP_TYPE,
- .version = 0x490A,
+ .version = DW_PCIE_VER_490A,
};
static const struct of_device_id ks_pcie_of_match[] = {
@@ -1114,12 +1114,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct device_link **link;
struct gpio_desc *gpiod;
struct resource *res;
- unsigned int version;
void __iomem *base;
u32 num_viewport;
struct phy **phy;
u32 num_lanes;
char name[10];
+ u32 version;
int ret;
int irq;
int i;
@@ -1233,7 +1233,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- if (pci->version >= 0x480A)
+ if (dw_pcie_ver_is_ge(pci, 480A))
ret = ks_pcie_am654_set_mode(dev, mode);
else
ret = ks_pcie_set_mode(dev);
@@ -1324,7 +1324,7 @@ static struct platform_driver ks_pcie_driver __refdata = {
.remove = __exit_p(ks_pcie_remove),
.driver = {
.name = "keystone-pcie",
- .of_match_table = of_match_ptr(ks_pcie_of_match),
+ .of_match_table = ks_pcie_of_match,
},
};
builtin_platform_driver(ks_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 39f4664bd84c..ad99707b3b99 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -32,15 +32,6 @@ struct ls_pcie_ep {
const struct ls_pcie_ep_drvdata *drvdata;
};
-static int ls_pcie_establish_link(struct dw_pcie *pci)
-{
- return 0;
-}
-
-static const struct dw_pcie_ops dw_ls_pcie_ep_ops = {
- .start_link = ls_pcie_establish_link,
-};
-
static const struct pci_epc_features*
ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
@@ -106,19 +97,16 @@ static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
.func_offset = 0x20000,
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
.func_offset = 0x8000,
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct of_device_id ls_pcie_ep_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 6a4f0619bb1c..879b8692f96a 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -74,7 +74,7 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
}
-static int ls_pcie_host_init(struct pcie_port *pp)
+static int ls_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index f44bf347904a..c1527693bed9 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -370,7 +370,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int meson_pcie_host_init(struct pcie_port *pp)
+static int meson_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct meson_pcie *mp = to_meson_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index e8afa50129a8..b8cb77c9c4bd 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -217,7 +217,7 @@ static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
unsigned int busnr = bus->number;
struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
@@ -245,7 +245,7 @@ static struct pci_ops al_child_pci_ops = {
static void al_pcie_config_prepare(struct al_pcie *pcie)
{
struct al_pcie_target_bus_cfg *target_bus_cfg;
- struct pcie_port *pp = &pcie->pci->pp;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
unsigned int ecam_bus_mask;
u32 cfg_control_offset;
u8 subordinate_bus;
@@ -289,7 +289,7 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
}
-static int al_pcie_host_init(struct pcie_port *pp)
+static int al_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct al_pcie *pcie = to_al_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 4e2552dcf982..dc469ef8e99b 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -166,7 +166,7 @@ static int armada8k_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int armada8k_pcie_host_init(struct pcie_port *pp)
+static int armada8k_pcie_host_init(struct dw_pcie_rp *pp)
{
u32 reg;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -233,7 +233,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -343,7 +343,7 @@ static struct platform_driver armada8k_pcie_driver = {
.probe = armada8k_pcie_probe,
.driver = {
.name = "armada8k-pcie",
- .of_match_table = of_match_ptr(armada8k_pcie_of_match),
+ .of_match_table = armada8k_pcie_of_match,
.suppress_bind_attrs = true,
},
};
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 2f15441770e1..98102079e26d 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -97,7 +97,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct dw_pcie_ep *ep = &pci->ep;
switch (artpec6_pcie->mode) {
@@ -315,7 +315,7 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
usleep_range(100, 200);
}
-static int artpec6_pcie_host_init(struct pcie_port *pp)
+static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 0eda8236c125..83ddb190292e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -154,22 +154,25 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
return 0;
}
-static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_barno bar, dma_addr_t cpu_addr,
- enum dw_pcie_as_type as_type)
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ dma_addr_t cpu_addr, enum pci_barno bar)
{
int ret;
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ if (!ep->bar_to_atu[bar])
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ else
+ free_win = ep->bar_to_atu[bar];
+
if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
}
- ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr,
- as_type);
+ ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type,
+ cpu_addr, bar);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
@@ -185,8 +188,9 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
phys_addr_t phys_addr,
u64 pci_addr, size_t size)
{
- u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 free_win;
+ int ret;
free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
if (free_win >= pci->num_ob_windows) {
@@ -194,8 +198,10 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return -EINVAL;
}
- dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
+ if (ret)
+ return ret;
set_bit(free_win, ep->ob_window_map);
ep->outbound_addr[free_win] = phys_addr;
@@ -213,38 +219,40 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
clear_bit(atu_index, ep->ib_window_map);
ep->epf_bar[bar] = NULL;
+ ep->bar_to_atu[bar] = 0;
}
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
int flags = epf_bar->flags;
- enum dw_pcie_as_type as_type;
- u32 reg;
unsigned int func_offset = 0;
+ int ret, type;
+ u32 reg;
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
if (!(flags & PCI_BASE_ADDRESS_SPACE))
- as_type = DW_PCIE_AS_MEM;
+ type = PCIE_ATU_TYPE_MEM;
else
- as_type = DW_PCIE_AS_IO;
+ type = PCIE_ATU_TYPE_IO;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, bar,
- epf_bar->phys_addr, as_type);
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
if (ret)
return ret;
+ if (ep->epf_bar[bar])
+ return 0;
+
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
@@ -289,7 +297,7 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
if (ret < 0)
return;
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
clear_bit(atu_index, ep->ob_window_map);
}
@@ -435,8 +443,7 @@ static void dw_pcie_ep_stop(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (pci->ops && pci->ops->stop_link)
- pci->ops->stop_link(pci);
+ dw_pcie_stop_link(pci);
}
static int dw_pcie_ep_start(struct pci_epc *epc)
@@ -444,10 +451,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (!pci->ops || !pci->ops->start_link)
- return -EINVAL;
-
- return pci->ops->start_link(pci);
+ return dw_pcie_start_link(pci);
}
static const struct pci_epc_features*
@@ -699,17 +703,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (!pci->dbi_base2) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- if (!res)
+ if (!res) {
pci->dbi_base2 = pci->dbi_base + SZ_4K;
- else {
+ } else {
pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base2))
return PTR_ERR(pci->dbi_base2);
}
}
- dw_pcie_iatu_detect(pci);
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
return -EINVAL;
@@ -717,17 +719,17 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- ep->ib_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(pci->num_ib_windows),
- sizeof(long),
- GFP_KERNEL);
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
if (!ep->ib_window_map)
return -ENOMEM;
- ep->ob_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(pci->num_ob_windows),
- sizeof(long),
- GFP_KERNEL);
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
if (!ep->ob_window_map)
return -ENOMEM;
@@ -780,8 +782,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
epc->mem->window.page_size);
if (!ep->msi_mem) {
+ ret = -ENOMEM;
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
- return -ENOMEM;
+ goto err_exit_epc_mem;
}
if (ep->ops->get_features) {
@@ -790,6 +793,19 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return 0;
}
- return dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret)
+ goto err_free_epc_mem;
+
+ return 0;
+
+err_free_epc_mem:
+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ epc->mem->window.page_size);
+
+err_exit_epc_mem:
+ pci_epc_mem_exit(epc);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 9979302532b7..7746f94a715f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -53,7 +53,7 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
};
/* MSI int handler */
-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
int i, pos;
unsigned long val;
@@ -88,7 +88,7 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
static void dw_chained_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
chained_irq_enter(chip, desc);
@@ -100,7 +100,7 @@ static void dw_chained_msi_isr(struct irq_desc *desc)
static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
@@ -123,7 +123,7 @@ static int dw_pci_msi_set_affinity(struct irq_data *d,
static void dw_pci_bottom_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -142,7 +142,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)
static void dw_pci_bottom_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -161,7 +161,7 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
static void dw_pci_bottom_ack(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
@@ -185,7 +185,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *args)
{
- struct pcie_port *pp = domain->host_data;
+ struct dw_pcie_rp *pp = domain->host_data;
unsigned long flags;
u32 i;
int bit;
@@ -213,7 +213,7 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct pcie_port *pp = domain->host_data;
+ struct dw_pcie_rp *pp = domain->host_data;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
@@ -229,7 +229,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
.free = dw_pcie_irq_domain_free,
};
-int dw_pcie_allocate_domains(struct pcie_port *pp)
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
@@ -255,10 +255,15 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
return 0;
}
-static void dw_pcie_free_msi(struct pcie_port *pp)
+static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
{
- if (pp->msi_irq)
- irq_set_chained_handler_and_data(pp->msi_irq, NULL, NULL);
+ u32 ctrl;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ NULL, NULL);
+ }
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
@@ -267,12 +272,13 @@ static void dw_pcie_free_msi(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (pp->msi_page)
+ __free_page(pp->msi_page);
}
}
-static void dw_pcie_msi_init(struct pcie_port *pp)
+static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target = (u64)pp->msi_data;
@@ -285,7 +291,112 @@ static void dw_pcie_msi_init(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
-int dw_pcie_host_init(struct pcie_port *pp)
+static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 ctrl, max_vectors;
+ int irq;
+
+ /* Parse any "msiX" IRQs described in the devicetree */
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ char msi_name[] = "msiX";
+
+ msi_name[3] = '0' + ctrl;
+ irq = platform_get_irq_byname_optional(pdev, msi_name);
+ if (irq == -ENXIO)
+ break;
+ if (irq < 0)
+ return dev_err_probe(dev, irq,
+ "Failed to parse MSI IRQ '%s'\n",
+ msi_name);
+
+ pp->msi_irq[ctrl] = irq;
+ }
+
+ /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
+ if (ctrl == 0)
+ return -ENXIO;
+
+ max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
+ if (pp->num_vectors > max_vectors) {
+ dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
+ max_vectors);
+ pp->num_vectors = max_vectors;
+ }
+ if (!pp->num_vectors)
+ pp->num_vectors = max_vectors;
+
+ return 0;
+}
+
+static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+ u32 ctrl, num_ctrls;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
+ pp->irq_mask[ctrl] = ~0;
+
+ if (!pp->msi_irq[0]) {
+ ret = dw_pcie_parse_split_msi_irq(pp);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ }
+
+ if (!pp->num_vectors)
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ if (!pp->msi_irq[0]) {
+ pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
+ if (pp->msi_irq[0] < 0) {
+ pp->msi_irq[0] = platform_get_irq(pdev, 0);
+ if (pp->msi_irq[0] < 0)
+ return pp->msi_irq[0];
+ }
+ }
+
+ dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
+
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
+ ret = dw_pcie_allocate_domains(pp);
+ if (ret)
+ return ret;
+
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ dw_chained_msi_isr, pp);
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+
+ pp->msi_page = alloc_page(GFP_DMA32);
+ pp->msi_data = dma_map_page(dev, pp->msi_page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dev, pp->msi_data);
+ if (ret) {
+ dev_err(pci->dev, "Failed to map MSI data\n");
+ __free_page(pp->msi_page);
+ pp->msi_page = NULL;
+ pp->msi_data = 0;
+ dw_pcie_free_msi(pp);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
@@ -293,17 +404,17 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
struct pci_host_bridge *bridge;
- struct resource *cfg_res;
+ struct resource *res;
int ret;
- raw_spin_lock_init(&pci->pp.lock);
+ raw_spin_lock_init(&pp->lock);
- cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res);
- pp->cfg0_base = cfg_res->start;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (res) {
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, cfg_res);
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pp->va_cfg0_base))
return PTR_ERR(pp->va_cfg0_base);
} else {
@@ -312,8 +423,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
if (!pci->dbi_base) {
- struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
}
@@ -350,67 +461,39 @@ int dw_pcie_host_init(struct pcie_port *pp)
of_property_read_bool(np, "msi-parent") ||
of_property_read_bool(np, "msi-map"));
- if (!pp->num_vectors) {
+ /*
+ * For the has_msi_ctrl case the default assignment is handled
+ * in the dw_pcie_msi_host_init().
+ */
+ if (!pp->has_msi_ctrl && !pp->num_vectors) {
pp->num_vectors = MSI_DEF_NUM_VECTORS;
} else if (pp->num_vectors > MAX_MSI_IRQS) {
dev_err(dev, "Invalid number of vectors\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_deinit_host;
}
if (pp->ops->msi_host_init) {
ret = pp->ops->msi_host_init(pp);
if (ret < 0)
- return ret;
+ goto err_deinit_host;
} else if (pp->has_msi_ctrl) {
- u32 ctrl, num_ctrls;
-
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
- for (ctrl = 0; ctrl < num_ctrls; ctrl++)
- pp->irq_mask[ctrl] = ~0;
-
- if (!pp->msi_irq) {
- pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
- if (pp->msi_irq < 0) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
- }
-
- pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
-
- ret = dw_pcie_allocate_domains(pp);
- if (ret)
- return ret;
-
- if (pp->msi_irq > 0)
- irq_set_chained_handler_and_data(pp->msi_irq,
- dw_chained_msi_isr,
- pp);
-
- ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
- if (ret)
- dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
-
- pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
- sizeof(pp->msi_msg),
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- ret = dma_mapping_error(pci->dev, pp->msi_data);
- if (ret) {
- dev_err(pci->dev, "Failed to map MSI data\n");
- pp->msi_data = 0;
- goto err_free_msi;
- }
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret < 0)
+ goto err_deinit_host;
}
}
+ dw_pcie_version_detect(pci);
+
dw_pcie_iatu_detect(pci);
- dw_pcie_setup_rc(pp);
+ ret = dw_pcie_setup_rc(pp);
+ if (ret)
+ goto err_free_msi;
- if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) {
- ret = pci->ops->start_link(pci);
+ if (!dw_pcie_link_up(pci)) {
+ ret = dw_pcie_start_link(pci);
if (ret)
goto err_free_msi;
}
@@ -421,32 +504,50 @@ int dw_pcie_host_init(struct pcie_port *pp)
bridge->sysdata = pp;
ret = pci_host_probe(bridge);
- if (!ret)
- return 0;
+ if (ret)
+ goto err_stop_link;
+
+ return 0;
+
+err_stop_link:
+ dw_pcie_stop_link(pci);
err_free_msi:
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
+
+err_deinit_host:
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
+
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_host_init);
-void dw_pcie_host_deinit(struct pcie_port *pp)
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
+
+ dw_pcie_stop_link(pci);
+
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
+
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- int type;
- u32 busdev;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int type, ret;
+ u32 busdev;
/*
* Checking whether the link is up here is a last line of defense
@@ -467,8 +568,10 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
else
type = PCIE_ATU_TYPE_CFG1;
-
- dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
+ ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
+ pp->cfg0_size);
+ if (ret)
+ return NULL;
return pp->va_cfg0_base + where;
}
@@ -476,33 +579,45 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- int ret;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
ret = pci_generic_config_read(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
- if (!ret && pci->io_cfg_atu_shared)
- dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
- return ret;
+ return PCIBIOS_SUCCESSFUL;
}
static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- int ret;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
ret = pci_generic_config_write(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
- if (!ret && pci->io_cfg_atu_shared)
- dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
- return ret;
+ return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops dw_child_pcie_ops = {
@@ -513,7 +628,7 @@ static struct pci_ops dw_child_pcie_ops = {
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
if (PCI_SLOT(devfn) > 0)
@@ -529,11 +644,72 @@ static struct pci_ops dw_pcie_ops = {
.write = pci_generic_config_write,
};
-void dw_pcie_setup_rc(struct pcie_port *pp)
+static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
- int i;
- u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct resource_entry *entry;
+ int i, ret;
+
+ /* Note the very first outbound ATU is used for CFG IOs */
+ if (!pci->num_ob_windows) {
+ dev_err(pci->dev, "No outbound iATU found\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure all outbound windows are disabled before proceeding with
+ * the MEM/IO ranges setups.
+ */
+ for (i = 0; i < pci->num_ob_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
+
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ob_windows <= ++i)
+ break;
+
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set MEM range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pp->io_size) {
+ if (pci->num_ob_windows > ++i) {
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
+ pp->io_base,
+ pp->io_bus_addr,
+ pp->io_size);
+ if (ret) {
+ dev_err(pci->dev, "Failed to set IO range %pr\n",
+ entry->res);
+ return ret;
+ }
+ } else {
+ pp->cfg0_io_shared = true;
+ }
+ }
+
+ if (pci->num_ob_windows <= i)
+ dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
+ pci->num_ob_windows);
+
+ return 0;
+}
+
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u32 val, ctrl, num_ctrls;
+ int ret;
/*
* Enable DBI read-only registers for writing/updating configuration.
@@ -582,45 +758,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
- /* Ensure all outbound windows are disabled so there are multiple matches */
- for (i = 0; i < pci->num_ob_windows; i++)
- dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND);
-
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
* ATU, so we should not program the ATU here.
*/
if (pp->bridge->child_ops == &dw_child_pcie_ops) {
- int atu_idx = 0;
- struct resource_entry *entry;
-
- /* Get last memory resource entry */
- resource_list_for_each_entry(entry, &pp->bridge->windows) {
- if (resource_type(entry->res) != IORESOURCE_MEM)
- continue;
-
- if (pci->num_ob_windows <= ++atu_idx)
- break;
-
- dw_pcie_prog_outbound_atu(pci, atu_idx,
- PCIE_ATU_TYPE_MEM, entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
- }
-
- if (pp->io_size) {
- if (pci->num_ob_windows > ++atu_idx)
- dw_pcie_prog_outbound_atu(pci, atu_idx,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
- else
- pci->io_cfg_atu_shared = true;
- }
-
- if (pci->num_ob_windows <= atu_idx)
- dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
- pci->num_ob_windows);
+ ret = dw_pcie_iatu_setup(pp);
+ if (ret)
+ return ret;
}
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
@@ -633,5 +779,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 0c5de87d3cc6..1fcfb840f238 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -17,13 +17,11 @@
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/types.h>
-#include <linux/regmap.h>
#include "pcie-designware.h"
struct dw_plat_pcie {
struct dw_pcie *pci;
- struct regmap *regmap;
enum dw_pcie_device_mode mode;
};
@@ -31,20 +29,9 @@ struct dw_plat_pcie_of_data {
enum dw_pcie_device_mode mode;
};
-static const struct of_device_id dw_plat_pcie_of_match[];
-
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
};
-static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
-{
- return 0;
-}
-
-static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = dw_plat_pcie_establish_link,
-};
-
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -96,7 +83,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = dw_plat_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -140,7 +127,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
- pci->ops = &dw_pcie_ops;
dw_plat_pcie->pci = pci;
dw_plat_pcie->mode = mode;
@@ -153,20 +139,21 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
return -ENODEV;
ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
- if (ret < 0)
- return ret;
break;
case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
return -ENODEV;
pci->ep.ops = &pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+ ret = -EINVAL;
+ break;
}
- return 0;
+ return ret;
}
static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index d92c8a25094f..c6725c519a47 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -8,14 +8,41 @@
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/align.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/sizes.h>
#include <linux/types.h>
#include "../../pci.h"
#include "pcie-designware.h"
+void dw_pcie_version_detect(struct dw_pcie *pci)
+{
+ u32 ver;
+
+ /* The content of the CSR is zero on DWC PCIe older than v4.70a */
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
+ if (!ver)
+ return;
+
+ if (pci->version && pci->version != ver)
+ dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
+ pci->version, ver);
+ else
+ pci->version = ver;
+
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
+
+ if (pci->type && pci->type != ver)
+ dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
+ pci->type, ver);
+ else
+ pci->type = ver;
+}
+
/*
* These interfaces resemble the pci_find_*capability() interfaces, but these
* are for configuring host controllers, which are bridges *to* PCI devices but
@@ -181,48 +208,61 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
dev_err(pci->dev, "write DBI address failed\n");
}
-static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
+static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
+ u32 index)
{
+ if (pci->iatu_unroll_enabled)
+ return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
+ return pci->atu_base;
+}
+
+static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
+{
+ void __iomem *base;
int ret;
u32 val;
+ base = dw_pcie_select_atu(pci, dir, index);
+
if (pci->ops && pci->ops->read_dbi)
- return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
+ return pci->ops->read_dbi(pci, base, reg, 4);
- ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
+ ret = dw_pcie_read(base + reg, 4, &val);
if (ret)
dev_err(pci->dev, "Read ATU address failed\n");
return val;
}
-static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
+static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
+ u32 reg, u32 val)
{
+ void __iomem *base;
int ret;
+ base = dw_pcie_select_atu(pci, dir, index);
+
if (pci->ops && pci->ops->write_dbi) {
- pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
+ pci->ops->write_dbi(pci, base, reg, 4, val);
return;
}
- ret = dw_pcie_write(pci->atu_base + reg, 4, val);
+ ret = dw_pcie_write(base + reg, 4, val);
if (ret)
dev_err(pci->dev, "Write ATU address failed\n");
}
-static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- return dw_pcie_readl_atu(pci, offset + reg);
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
}
-static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- dw_pcie_writel_atu(pci, offset + reg, val);
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
}
static inline u32 dw_pcie_enable_ecrc(u32 val)
@@ -266,264 +306,160 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
return val | PCIE_ATU_TD;
}
-static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
- int index, int type,
- u64 cpu_addr, u64 pci_addr,
- u64 size)
+static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
{
u32 retries, val;
- u64 limit_addr = cpu_addr + size - 1;
-
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
- lower_32_bits(limit_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
- upper_32_bits(limit_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(pci_addr));
- val = type | PCIE_ATU_FUNC_NUM(func_no);
- val = upper_32_bits(size - 1) ?
- val | PCIE_ATU_INCREASE_REGION_SIZE : val;
- if (pci->version == 0x490A)
- val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_ENABLE);
+ u64 limit_addr;
- /*
- * Make sure ATU enable takes effect before any subsequent config
- * and I/O accesses.
- */
- for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_ob_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- if (val & PCIE_ATU_ENABLE)
- return;
+ if (pci->ops && pci->ops->cpu_addr_fixup)
+ cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
- mdelay(LINK_WAIT_IATU);
+ limit_addr = cpu_addr + size - 1;
+
+ if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ return -EINVAL;
}
- dev_err(pci->dev, "Outbound iATU is not being enabled\n");
-}
-static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
-{
- u32 retries, val;
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(cpu_addr));
- if (pci->ops && pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
- if (pci->iatu_unroll_enabled) {
- dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
- return;
- }
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
- PCIE_ATU_REGION_OUTBOUND | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
- if (pci->version >= 0x460A)
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_LIMIT,
- upper_32_bits(cpu_addr + size - 1));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
val = type | PCIE_ATU_FUNC_NUM(func_no);
- val = ((upper_32_bits(size - 1)) && (pci->version >= 0x460A)) ?
- val | PCIE_ATU_INCREASE_REGION_SIZE : val;
- if (pci->version == 0x490A)
+ if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, val);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
- return;
+ return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
-}
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
-{
- __dw_pcie_prog_outbound_atu(pci, 0, index, type,
- cpu_addr, pci_addr, size);
+ return -ETIMEDOUT;
}
-void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size)
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
{
- __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
+ return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
+ cpu_addr, pci_addr, size);
}
-static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u64 size)
{
- u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
-
- return dw_pcie_readl_atu(pci, offset + reg);
+ return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
}
-static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
- u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
-
- dw_pcie_writel_atu(pci, offset + reg, val);
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
}
-static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
- int index, int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type)
+static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
- int type;
- u32 retries, val;
-
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(cpu_addr));
-
- switch (as_type) {
- case DW_PCIE_AS_MEM:
- type = PCIE_ATU_TYPE_MEM;
- break;
- case DW_PCIE_AS_IO:
- type = PCIE_ATU_TYPE_IO;
- break;
- default:
- return -EINVAL;
- }
-
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
- PCIE_ATU_FUNC_NUM(func_no));
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_FUNC_NUM_MATCH_EN |
- PCIE_ATU_ENABLE |
- PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
-
- /*
- * Make sure ATU enable takes effect before any subsequent config
- * and I/O accesses.
- */
- for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_ib_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- if (val & PCIE_ATU_ENABLE)
- return 0;
-
- mdelay(LINK_WAIT_IATU);
- }
- dev_err(pci->dev, "Inbound iATU is not being enabled\n");
-
- return -EBUSY;
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type)
+ int type, u64 cpu_addr, u8 bar)
{
- int type;
u32 retries, val;
- if (pci->iatu_unroll_enabled)
- return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
- cpu_addr, as_type);
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
- index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
-
- switch (as_type) {
- case DW_PCIE_AS_MEM:
- type = PCIE_ATU_TYPE_MEM;
- break;
- case DW_PCIE_AS_IO:
- type = PCIE_ATU_TYPE_IO;
- break;
- default:
+ if (!IS_ALIGNED(cpu_addr, pci->region_align))
return -EINVAL;
- }
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
- PCIE_ATU_FUNC_NUM(func_no));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
- PCIE_ATU_FUNC_NUM_MATCH_EN |
- PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
+ PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
- return -EBUSY;
+ return -ETIMEDOUT;
}
-void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
- enum dw_pcie_region_type type)
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
{
- int region;
-
- switch (type) {
- case DW_PCIE_REGION_INBOUND:
- region = PCIE_ATU_REGION_INBOUND;
- break;
- case DW_PCIE_REGION_OUTBOUND:
- region = PCIE_ATU_REGION_OUTBOUND;
- break;
- default:
- return;
- }
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
+ dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
+ u32 offset, val;
int retries;
/* Check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "Link up\n");
- return 0;
- }
+ if (dw_pcie_link_up(pci))
+ break;
+
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_info(pci->dev, "Phy link never came up\n");
+ if (retries >= LINK_WAIT_MAX_RETRIES) {
+ dev_err(pci->dev, "Phy link never came up\n");
+ return -ETIMEDOUT;
+ }
- return -ETIMEDOUT;
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+
+ dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
+ FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
+
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
@@ -534,7 +470,7 @@ int dw_pcie_link_up(struct dw_pcie *pci)
if (pci->ops && pci->ops->link_up)
return pci->ops->link_up(pci);
- val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
}
@@ -586,95 +522,81 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
}
-static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
if (val == 0xffffffff)
- return 1;
+ return true;
- return 0;
+ return false;
}
-static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
+static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
{
- int max_region, i, ob = 0, ib = 0;
- u32 val;
+ int max_region, ob, ib;
+ u32 val, min, dir;
+ u64 max;
- max_region = min((int)pci->atu_size / 512, 256);
-
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
- 0x11110000);
+ if (pci->iatu_unroll_enabled) {
+ max_region = min((int)pci->atu_size / 512, 256);
+ } else {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
+ max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
+ }
- val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
- if (val == 0x11110000)
- ob++;
- else
+ for (ob = 0; ob < max_region; ob++) {
+ dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
break;
}
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
- 0x11110000);
-
- val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
- if (val == 0x11110000)
- ib++;
- else
+ for (ib = 0; ib < max_region; ib++) {
+ dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
break;
}
- pci->num_ib_windows = ib;
- pci->num_ob_windows = ob;
-}
-
-static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
-{
- int max_region, i, ob = 0, ib = 0;
- u32 val;
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
- max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
- if (val == 0x11110000)
- ob++;
- else
- break;
+ if (ob) {
+ dir = PCIE_ATU_REGION_DIR_OB;
+ } else if (ib) {
+ dir = PCIE_ATU_REGION_DIR_IB;
+ } else {
+ dev_err(pci->dev, "No iATU regions found\n");
+ return;
}
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
- if (val == 0x11110000)
- ib++;
- else
- break;
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
+ min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
+
+ if (dw_pcie_ver_is_ge(pci, 460A)) {
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
+ max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
+ } else {
+ max = 0;
}
- pci->num_ib_windows = ib;
pci->num_ob_windows = ob;
+ pci->num_ib_windows = ib;
+ pci->region_align = 1 << fls(min);
+ pci->region_limit = (max << 32) | (SZ_4G - 1);
}
void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(pci->dev);
- if (pci->version >= 0x480A || (!pci->version &&
- dw_pcie_iatu_unroll_enabled(pci))) {
- pci->iatu_unroll_enabled = true;
+ pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
+ if (pci->iatu_unroll_enabled) {
if (!pci->atu_base) {
struct resource *res =
platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
if (res) {
pci->atu_size = resource_size(res);
- pci->atu_base = devm_ioremap_resource(dev, res);
+ pci->atu_base = devm_ioremap_resource(pci->dev, res);
}
if (!pci->atu_base || IS_ERR(pci->atu_base))
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
@@ -683,23 +605,25 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)
if (!pci->atu_size)
/* Pick a minimal default, enough for 8 in and 8 out windows */
pci->atu_size = SZ_4K;
+ } else {
+ pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
+ pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+ }
- dw_pcie_iatu_detect_regions_unroll(pci);
- } else
- dw_pcie_iatu_detect_regions(pci);
+ dw_pcie_iatu_detect_regions(pci);
dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
"enabled" : "disabled");
- dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
- pci->num_ob_windows, pci->num_ib_windows);
+ dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n",
+ pci->num_ob_windows, pci->num_ib_windows,
+ pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
}
void dw_pcie_setup(struct dw_pcie *pci)
{
+ struct device_node *np = pci->dev->of_node;
u32 val;
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
if (pci->link_gen > 0)
dw_pcie_link_set_max_speed(pci, pci->link_gen);
@@ -726,6 +650,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+ if (of_property_read_bool(np, "snps,enable-cdm-check")) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+ PCIE_PL_CHK_REG_CHK_REG_START;
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ }
+
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
if (!pci->num_lanes) {
dev_dbg(pci->dev, "Using h/w default number of lanes\n");
@@ -772,11 +703,4 @@ void dw_pcie_setup(struct dw_pcie *pci)
break;
}
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
-
- if (of_property_read_bool(np, "snps,enable-cdm-check")) {
- val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
- val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
- PCIE_PL_CHK_REG_CHK_REG_START;
- dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
- }
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 7d6e9b7576be..09b887093a84 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -20,6 +20,29 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+/* DWC PCIe IP-core versions (native support since v4.70a) */
+#define DW_PCIE_VER_365A 0x3336352a
+#define DW_PCIE_VER_460A 0x3436302a
+#define DW_PCIE_VER_470A 0x3437302a
+#define DW_PCIE_VER_480A 0x3438302a
+#define DW_PCIE_VER_490A 0x3439302a
+#define DW_PCIE_VER_520A 0x3532302a
+
+#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
+ ((_pci)->version _op DW_PCIE_VER_ ## _ver)
+
+#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==)
+
+#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=)
+
+#define dw_pcie_ver_type_is(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==))
+
+#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
+
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
@@ -74,13 +97,34 @@
#define PCIE_MSI_INTR0_MASK 0x82C
#define PCIE_MSI_INTR0_STATUS 0x830
+#define GEN3_RELATED_OFF 0x890
+#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
+#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13)
+#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
+#define PCIE_VERSION_NUMBER 0x8F8
+#define PCIE_VERSION_TYPE 0x8FC
+
+/*
+ * iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each
+ * iATU region CSRs had been indirectly accessible by means of the dedicated
+ * viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe
+ * v4.80a in a way so the viewport was unrolled into the directly accessible
+ * iATU/eDMA CSRs space.
+ */
#define PCIE_ATU_VIEWPORT 0x900
-#define PCIE_ATU_REGION_INBOUND BIT(31)
-#define PCIE_ATU_REGION_OUTBOUND 0
-#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_REGION_DIR_IB BIT(31)
+#define PCIE_ATU_REGION_DIR_OB 0
+#define PCIE_ATU_VIEWPORT_BASE 0x904
+#define PCIE_ATU_UNROLL_BASE(dir, index) \
+ (((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0))
+#define PCIE_ATU_VIEWPORT_SIZE 0x2C
+#define PCIE_ATU_REGION_CTRL1 0x000
#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
#define PCIE_ATU_TYPE_MEM 0x0
#define PCIE_ATU_TYPE_IO 0x2
@@ -88,19 +132,19 @@
#define PCIE_ATU_TYPE_CFG1 0x5
#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
-#define PCIE_ATU_CR2 0x908
+#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
-#define PCIE_ATU_LOWER_BASE 0x90C
-#define PCIE_ATU_UPPER_BASE 0x910
-#define PCIE_ATU_LIMIT 0x914
-#define PCIE_ATU_LOWER_TARGET 0x918
+#define PCIE_ATU_LOWER_BASE 0x008
+#define PCIE_ATU_UPPER_BASE 0x00C
+#define PCIE_ATU_LIMIT 0x010
+#define PCIE_ATU_LOWER_TARGET 0x014
#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x)
#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
-#define PCIE_ATU_UPPER_TARGET 0x91C
-#define PCIE_ATU_UPPER_LIMIT 0x924
+#define PCIE_ATU_UPPER_TARGET 0x018
+#define PCIE_ATU_UPPER_LIMIT 0x020
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
#define PCIE_DBI_RO_WR_EN BIT(0)
@@ -131,6 +175,25 @@
#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
/*
+ * RAS-DES register definitions
+ */
+#define PCIE_RAS_DES_EVENT_COUNTER_CONTROL 0x8
+#define EVENT_COUNTER_ALL_CLEAR 0x3
+#define EVENT_COUNTER_ENABLE_ALL 0x7
+#define EVENT_COUNTER_ENABLE_SHIFT 2
+#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
+#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
+#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
+#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
+#define EVENT_COUNTER_EVENT_L1 0x5
+#define EVENT_COUNTER_EVENT_L1_1 0x7
+#define EVENT_COUNTER_EVENT_L1_2 0x8
+#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
+#define EVENT_COUNTER_GROUP_5 0x5
+
+#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+
+/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
* default; the driver core automatically derives atu_base from dbi_base using
@@ -138,13 +201,6 @@
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
-/* Register address builder */
-#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
- ((region) << 9)
-
-#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
- (((region) << 9) | BIT(8))
-
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
@@ -155,16 +211,10 @@
#define MAX_IATU_IN 256
#define MAX_IATU_OUT 256
-struct pcie_port;
struct dw_pcie;
+struct dw_pcie_rp;
struct dw_pcie_ep;
-enum dw_pcie_region_type {
- DW_PCIE_REGION_UNKNOWN,
- DW_PCIE_REGION_INBOUND,
- DW_PCIE_REGION_OUTBOUND,
-};
-
enum dw_pcie_device_mode {
DW_PCIE_UNKNOWN_TYPE,
DW_PCIE_EP_TYPE,
@@ -173,12 +223,14 @@ enum dw_pcie_device_mode {
};
struct dw_pcie_host_ops {
- int (*host_init)(struct pcie_port *pp);
- int (*msi_host_init)(struct pcie_port *pp);
+ int (*host_init)(struct dw_pcie_rp *pp);
+ void (*host_deinit)(struct dw_pcie_rp *pp);
+ int (*msi_host_init)(struct dw_pcie_rp *pp);
};
-struct pcie_port {
+struct dw_pcie_rp {
bool has_msi_ctrl:1;
+ bool cfg0_io_shared:1;
u64 cfg0_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
@@ -187,11 +239,11 @@ struct pcie_port {
u32 io_size;
int irq;
const struct dw_pcie_host_ops *ops;
- int msi_irq;
+ int msi_irq[MAX_MSI_CTRLS];
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
- u16 msi_msg;
dma_addr_t msi_data;
+ struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
@@ -200,12 +252,6 @@ struct pcie_port {
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
-enum dw_pcie_as_type {
- DW_PCIE_AS_UNKNOWN,
- DW_PCIE_AS_MEM,
- DW_PCIE_AS_IO,
-};
-
struct dw_pcie_ep_ops {
void (*ep_init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
@@ -261,20 +307,21 @@ struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
void __iomem *dbi_base2;
- /* Used when iatu_unroll_enabled is true */
void __iomem *atu_base;
size_t atu_size;
u32 num_ib_windows;
u32 num_ob_windows;
- struct pcie_port pp;
+ u32 region_align;
+ u64 region_limit;
+ struct dw_pcie_rp pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
- unsigned int version;
+ u32 version;
+ u32 type;
int num_lanes;
int link_gen;
u8 n_fts[2];
bool iatu_unroll_enabled: 1;
- bool io_cfg_atu_shared: 1;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -282,6 +329,8 @@ struct dw_pcie {
#define to_dw_pcie_from_ep(endpoint) \
container_of((endpoint), struct dw_pcie, ep)
+void dw_pcie_version_detect(struct dw_pcie *pci);
+
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
@@ -294,17 +343,13 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size);
-void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type);
-void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
- enum dw_pcie_region_type type);
+ int type, u64 cpu_addr, u8 bar);
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
@@ -365,34 +410,49 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, reg, val);
}
+static inline int dw_pcie_start_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->start_link)
+ return pci->ops->start_link(pci);
+
+ return 0;
+}
+
+static inline void dw_pcie_stop_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->stop_link)
+ pci->ops->stop_link(pci);
+}
+
#ifdef CONFIG_PCIE_DW_HOST
-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
-void dw_pcie_setup_rc(struct pcie_port *pp);
-int dw_pcie_host_init(struct pcie_port *pp);
-void dw_pcie_host_deinit(struct pcie_port *pp);
-int dw_pcie_allocate_domains(struct pcie_port *pp);
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
+int dw_pcie_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
-static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
}
-static inline void dw_pcie_setup_rc(struct pcie_port *pp)
+static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
+ return 0;
}
-static inline int dw_pcie_host_init(struct pcie_port *pp)
+static inline int dw_pcie_host_init(struct dw_pcie_rp *pp)
{
return 0;
}
-static inline void dw_pcie_host_deinit(struct pcie_port *pp)
+static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
}
-static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
+static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 8c5bb9d7cc36..c1e7653e508e 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -186,7 +186,7 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int rockchip_pcie_host_init(struct pcie_port *pp)
+static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
@@ -288,7 +288,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_pcie *rockchip;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
int ret;
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
index 02cc70d8cc06..0c90583c078b 100644
--- a/drivers/pci/controller/dwc/pcie-fu740.c
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -16,11 +16,9 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
#include <linux/resource.h>
#include <linux/types.h>
#include <linux/interrupt.h>
@@ -236,7 +234,7 @@ err:
return ret;
}
-static int fu740_pcie_host_init(struct pcie_port *pp)
+static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct fu740_pcie *afp = to_fu740_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 410555dccb6d..e2b80f10030d 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -74,7 +74,7 @@ static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
writel(val, histb_pcie->ctrl + reg);
}
-static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
+static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -88,7 +88,7 @@ static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
}
-static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
+static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -180,7 +180,7 @@ static int histb_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int histb_pcie_host_init(struct pcie_port *pp)
+static int histb_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -219,7 +219,7 @@ static void histb_pcie_host_disable(struct histb_pcie *hipcie)
regulator_disable(hipcie->vpcie);
}
-static int histb_pcie_host_enable(struct pcie_port *pp)
+static int histb_pcie_host_enable(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -297,7 +297,7 @@ static int histb_pcie_probe(struct platform_device *pdev)
{
struct histb_pcie *hipcie;
struct dw_pcie *pci;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
enum of_gpio_flags of_flags;
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index 5ba144924ff8..333c33d98a70 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -58,10 +58,6 @@
#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
-struct intel_pcie_soc {
- unsigned int pcie_ver;
-};
-
struct intel_pcie {
struct dw_pcie pci;
void __iomem *app_base;
@@ -306,7 +302,11 @@ static int intel_pcie_host_setup(struct intel_pcie *pcie)
intel_pcie_ltssm_disable(pcie);
intel_pcie_link_setup(pcie);
intel_pcie_init_n_fts(pci);
- dw_pcie_setup_rc(&pci->pp);
+
+ ret = dw_pcie_setup_rc(&pci->pp);
+ if (ret)
+ goto app_init_err;
+
dw_pcie_upconfig_setup(pci);
intel_pcie_device_rst_deassert(pcie);
@@ -343,7 +343,7 @@ static void __intel_pcie_remove(struct intel_pcie *pcie)
static int intel_pcie_remove(struct platform_device *pdev)
{
struct intel_pcie *pcie = platform_get_drvdata(pdev);
- struct pcie_port *pp = &pcie->pci.pp;
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
dw_pcie_host_deinit(pp);
__intel_pcie_remove(pcie);
@@ -351,7 +351,7 @@ static int intel_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
+static int intel_pcie_suspend_noirq(struct device *dev)
{
struct intel_pcie *pcie = dev_get_drvdata(dev);
int ret;
@@ -366,14 +366,14 @@ static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
return ret;
}
-static int __maybe_unused intel_pcie_resume_noirq(struct device *dev)
+static int intel_pcie_resume_noirq(struct device *dev)
{
struct intel_pcie *pcie = dev_get_drvdata(dev);
return intel_pcie_host_setup(pcie);
}
-static int intel_pcie_rc_init(struct pcie_port *pp)
+static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct intel_pcie *pcie = dev_get_drvdata(pci->dev);
@@ -394,16 +394,11 @@ static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
.host_init = intel_pcie_rc_init,
};
-static const struct intel_pcie_soc pcie_data = {
- .pcie_ver = 0x520A,
-};
-
static int intel_pcie_probe(struct platform_device *pdev)
{
- const struct intel_pcie_soc *data;
struct device *dev = &pdev->dev;
struct intel_pcie *pcie;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
int ret;
@@ -424,12 +419,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- data = device_get_match_data(dev);
- if (!data)
- return -ENODEV;
-
pci->ops = &intel_pcie_ops;
- pci->version = data->pcie_ver;
pp->ops = &intel_pcie_dw_ops;
ret = dw_pcie_host_init(pp);
@@ -442,12 +432,12 @@ static int intel_pcie_probe(struct platform_device *pdev)
}
static const struct dev_pm_ops intel_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
- intel_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
+ intel_pcie_resume_noirq)
};
static const struct of_device_id of_intel_pcie_match[] = {
- { .compatible = "intel,lgm-pcie", .data = &pcie_data },
+ { .compatible = "intel,lgm-pcie" },
{}
};
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 1ac29a6eef22..f90f36bac018 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -231,7 +231,7 @@ static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 val, mask, status;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
/*
* Keem Bay PCIe Controller provides an additional IP logic on top of
@@ -332,13 +332,13 @@ static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
u32 val;
int ret;
pp->ops = &keembay_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = keembay_pcie_setup_msi_irq(pcie);
if (ret)
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index a52cad269f85..7f67aad71df4 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -620,7 +620,7 @@ static int kirin_pcie_start_link(struct dw_pcie *pci)
return 0;
}
-static int kirin_pcie_host_init(struct pcie_port *pp)
+static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
{
pp->bridge->ops = &kirin_pci_ops;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 2ea13750b492..66886dc6e777 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -41,6 +41,9 @@
#define L23_CLK_RMV_DIS BIT(2)
#define L1_CLK_RMV_DIS BIT(1)
+#define PCIE20_PARF_PM_CTRL 0x20
+#define REQ_NOT_ENTR_L1 BIT(5)
+
#define PCIE20_PARF_PHY_CTRL 0x40
#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
@@ -52,6 +55,10 @@
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define AHB_CLK_EN BIT(0)
+#define MSTR_AXI_CLK_EN BIT(1)
+#define BYPASS BIT(4)
+
#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
#define PCIE20_PARF_LTSSM 0x1B0
@@ -69,7 +76,20 @@
#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
#define CFG_BRIDGE_SB_INIT BIT(0)
-#define PCIE_CAP_LINK1_VAL 0x2FD7F
+#define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \
+ 250)
+#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \
+ 1)
+#define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \
+ PCI_EXP_SLTCAP_PCP | \
+ PCI_EXP_SLTCAP_MRLSP | \
+ PCI_EXP_SLTCAP_AIP | \
+ PCI_EXP_SLTCAP_PIP | \
+ PCI_EXP_SLTCAP_HPS | \
+ PCI_EXP_SLTCAP_HPC | \
+ PCI_EXP_SLTCAP_EIP | \
+ PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
+ PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
#define PCIE20_PARF_Q2A_FLUSH 0x1AC
@@ -128,7 +148,6 @@ struct qcom_pcie_resources_2_3_2 {
struct clk *master_clk;
struct clk *slave_clk;
struct clk *cfg_clk;
- struct clk *pipe_clk;
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
@@ -165,10 +184,11 @@ struct qcom_pcie_resources_2_7_0 {
int num_clks;
struct regulator_bulk_data supplies[2];
struct reset_control *pci_reset;
- struct clk *pipe_clk;
- struct clk *pipe_clk_src;
- struct clk *phy_pipe_clk;
- struct clk *ref_clk_src;
+};
+
+struct qcom_pcie_resources_2_9_0 {
+ struct clk_bulk_data clks[5];
+ struct reset_control *rst;
};
union qcom_pcie_resources {
@@ -178,6 +198,7 @@ union qcom_pcie_resources {
struct qcom_pcie_resources_2_3_3 v2_3_3;
struct qcom_pcie_resources_2_4_0 v2_4_0;
struct qcom_pcie_resources_2_7_0 v2_7_0;
+ struct qcom_pcie_resources_2_9_0 v2_9_0;
};
struct qcom_pcie;
@@ -194,7 +215,6 @@ struct qcom_pcie_ops {
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
- unsigned int pipe_clk_need_muxing:1;
unsigned int has_tbu_clk:1;
unsigned int has_ddrss_sf_tbu_clk:1;
unsigned int has_aggre0_clk:1;
@@ -325,8 +345,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- struct device_node *node = dev->of_node;
- u32 val;
int ret;
/* reset the PCIe interface as uboot can leave it undefined state */
@@ -337,8 +355,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
reset_control_assert(res->ext_reset);
reset_control_assert(res->phy_reset);
- writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
-
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
if (ret < 0) {
dev_err(dev, "cannot enable regulators\n");
@@ -381,15 +397,42 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
goto err_deassert_axi;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
- if (ret)
- goto err_clks;
+ return 0;
+
+err_deassert_axi:
+ reset_control_assert(res->por_reset);
+err_deassert_por:
+ reset_control_assert(res->pci_reset);
+err_deassert_pci:
+ reset_control_assert(res->phy_reset);
+err_deassert_phy:
+ reset_control_assert(res->ext_reset);
+err_deassert_ext:
+ reset_control_assert(res->ahb_reset);
+err_deassert_ahb:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ u32 val;
+ int ret;
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ if (ret)
+ return ret;
+
if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
@@ -428,23 +471,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
return 0;
-
-err_clks:
- reset_control_assert(res->axi_reset);
-err_deassert_axi:
- reset_control_assert(res->por_reset);
-err_deassert_por:
- reset_control_assert(res->pci_reset);
-err_deassert_pci:
- reset_control_assert(res->phy_reset);
-err_deassert_phy:
- reset_control_assert(res->ext_reset);
-err_deassert_ext:
- reset_control_assert(res->ahb_reset);
-err_deassert_ahb:
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
-
- return ret;
}
static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
@@ -532,16 +558,6 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
goto err_slave;
}
- /* change DBI base address */
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
-
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
- }
-
return 0;
err_slave:
clk_disable_unprepare(res->slave_bus);
@@ -557,6 +573,21 @@ err_res:
return ret;
}
+static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
+{
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ }
+
+ return 0;
+}
+
static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
@@ -597,8 +628,7 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
if (IS_ERR(res->slave_clk))
return PTR_ERR(res->slave_clk);
- res->pipe_clk = devm_clk_get(dev, "pipe");
- return PTR_ERR_OR_ZERO(res->pipe_clk);
+ return 0;
}
static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
@@ -613,19 +643,11 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
-static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
-{
- struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
-
- clk_disable_unprepare(res->pipe_clk);
-}
-
static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u32 val;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
@@ -658,6 +680,25 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
goto err_slave_clk;
}
+ return 0;
+
+err_slave_clk:
+ clk_disable_unprepare(res->master_clk);
+err_master_clk:
+ clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+ clk_disable_unprepare(res->aux_clk);
+
+err_aux_clk:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+ u32 val;
+
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
@@ -680,34 +721,6 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
return 0;
-
-err_slave_clk:
- clk_disable_unprepare(res->master_clk);
-err_master_clk:
- clk_disable_unprepare(res->cfg_clk);
-err_cfg_clk:
- clk_disable_unprepare(res->aux_clk);
-
-err_aux_clk:
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
-
- return ret;
-}
-
-static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
-{
- struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
- int ret;
-
- ret = clk_prepare_enable(res->pipe_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable pipe clock\n");
- return ret;
- }
-
- return 0;
}
static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
@@ -814,7 +827,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u32 val;
int ret;
ret = reset_control_assert(res->axi_m_reset);
@@ -939,6 +951,33 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
if (ret)
goto err_clks;
+ return 0;
+
+err_clks:
+ reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+ reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+ reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+ reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+ reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+ reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+ reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+ reset_control_assert(res->phy_reset);
+err_rst_phy:
+ reset_control_assert(res->phy_ahb_reset);
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie)
+{
+ u32 val;
+
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
val &= ~BIT(0);
@@ -961,26 +1000,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
return 0;
-
-err_clks:
- reset_control_assert(res->ahb_reset);
-err_rst_ahb:
- reset_control_assert(res->pwr_reset);
-err_rst_pwr:
- reset_control_assert(res->axi_s_reset);
-err_rst_axi_s:
- reset_control_assert(res->axi_m_sticky_reset);
-err_rst_axi_m_sticky:
- reset_control_assert(res->axi_m_reset);
-err_rst_axi_m:
- reset_control_assert(res->pipe_sticky_reset);
-err_rst_pipe_sticky:
- reset_control_assert(res->pipe_reset);
-err_rst_pipe:
- reset_control_assert(res->phy_reset);
-err_rst_phy:
- reset_control_assert(res->phy_ahb_reset);
- return ret;
}
static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
@@ -1038,9 +1057,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int i, ret;
- u32 val;
for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
ret = reset_control_assert(res->rst[i]);
@@ -1097,6 +1114,33 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
goto err_clk_aux;
}
+ return 0;
+
+err_clk_aux:
+ clk_disable_unprepare(res->ahb_clk);
+err_clk_ahb:
+ clk_disable_unprepare(res->axi_s_clk);
+err_clk_axi_s:
+ clk_disable_unprepare(res->axi_m_clk);
+err_clk_axi_m:
+ clk_disable_unprepare(res->iface);
+err_clk_iface:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ for (i = 0; i < ARRAY_SIZE(res->rst); i++)
+ reset_control_assert(res->rst[i]);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
writel(SLV_ADDR_SPACE_SZ,
pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
@@ -1114,7 +1158,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
- writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
val &= ~PCI_EXP_LNKCAP_ASPMS;
@@ -1124,24 +1168,6 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
PCI_EXP_DEVCTL2);
return 0;
-
-err_clk_aux:
- clk_disable_unprepare(res->ahb_clk);
-err_clk_ahb:
- clk_disable_unprepare(res->axi_s_clk);
-err_clk_axi_s:
- clk_disable_unprepare(res->axi_m_clk);
-err_clk_axi_m:
- clk_disable_unprepare(res->iface);
-err_clk_iface:
- /*
- * Not checking for failure, will anyway return
- * the original failure in 'ret'.
- */
- for (i = 0; i < ARRAY_SIZE(res->rst); i++)
- reset_control_assert(res->rst[i]);
-
- return ret;
}
static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
@@ -1184,22 +1210,7 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
if (ret < 0)
return ret;
- if (pcie->cfg->pipe_clk_need_muxing) {
- res->pipe_clk_src = devm_clk_get(dev, "pipe_mux");
- if (IS_ERR(res->pipe_clk_src))
- return PTR_ERR(res->pipe_clk_src);
-
- res->phy_pipe_clk = devm_clk_get(dev, "phy_pipe");
- if (IS_ERR(res->phy_pipe_clk))
- return PTR_ERR(res->phy_pipe_clk);
-
- res->ref_clk_src = devm_clk_get(dev, "ref");
- if (IS_ERR(res->ref_clk_src))
- return PTR_ERR(res->ref_clk_src);
- }
-
- res->pipe_clk = devm_clk_get(dev, "pipe");
- return PTR_ERR_OR_ZERO(res->pipe_clk);
+ return 0;
}
static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
@@ -1216,10 +1227,6 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
return ret;
}
- /* Set TCXO as clock source for pcie_pipe_clk_src */
- if (pcie->cfg->pipe_clk_need_muxing)
- clk_set_parent(res->pipe_clk_src, res->ref_clk_src);
-
ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret < 0)
goto err_disable_regulators;
@@ -1261,6 +1268,11 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
val |= BIT(4);
writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ /* Enable L1 and L1SS */
+ val = readl(pcie->parf + PCIE20_PARF_PM_CTRL);
+ val &= ~REQ_NOT_ENTR_L1;
+ writel(val, pcie->parf + PCIE20_PARF_PM_CTRL);
+
if (IS_ENABLED(CONFIG_PCI_MSI)) {
val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
val |= BIT(31);
@@ -1281,25 +1293,114 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
clk_bulk_disable_unprepare(res->num_clks, res->clks);
+
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
-static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
- /* Set pipe clock as clock source for pcie_pipe_clk_src */
- if (pcie->cfg->pipe_clk_need_muxing)
- clk_set_parent(res->pipe_clk_src, res->phy_pipe_clk);
+ res->clks[0].id = "iface";
+ res->clks[1].id = "axi_m";
+ res->clks[2].id = "axi_s";
+ res->clks[3].id = "axi_bridge";
+ res->clks[4].id = "rchng";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(res->rst))
+ return PTR_ERR(res->rst);
- return clk_prepare_enable(res->pipe_clk);
+ return 0;
}
-static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct device *dev = pcie->pci->dev;
+ int ret;
+
+ ret = reset_control_assert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset assert failed (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Delay periods before and after reset deassert are working values
+ * from downstream Codeaurora kernel
+ */
+ usleep_range(2000, 2500);
+
+ ret = reset_control_deassert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset deassert failed (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(2000, 2500);
- clk_disable_unprepare(res->pipe_clk);
+ return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+}
+
+static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int i;
+
+ writel(SLV_ADDR_SPACE_SZ,
+ pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
+ writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
+ pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
+ GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
+ pci->dbi_base + GEN3_RELATED_OFF);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
+ SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
+
+ for (i = 0; i < 256; i++)
+ writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i));
+
+ return 0;
}
static int qcom_pcie_link_up(struct dw_pcie *pci)
@@ -1381,7 +1482,7 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_host_init(struct pcie_port *pp)
+static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
@@ -1433,6 +1534,7 @@ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
static const struct qcom_pcie_ops ops_2_1_0 = {
.get_resources = qcom_pcie_get_resources_2_1_0,
.init = qcom_pcie_init_2_1_0,
+ .post_init = qcom_pcie_post_init_2_1_0,
.deinit = qcom_pcie_deinit_2_1_0,
.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
};
@@ -1441,6 +1543,7 @@ static const struct qcom_pcie_ops ops_2_1_0 = {
static const struct qcom_pcie_ops ops_1_0_0 = {
.get_resources = qcom_pcie_get_resources_1_0_0,
.init = qcom_pcie_init_1_0_0,
+ .post_init = qcom_pcie_post_init_1_0_0,
.deinit = qcom_pcie_deinit_1_0_0,
.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
};
@@ -1451,7 +1554,6 @@ static const struct qcom_pcie_ops ops_2_3_2 = {
.init = qcom_pcie_init_2_3_2,
.post_init = qcom_pcie_post_init_2_3_2,
.deinit = qcom_pcie_deinit_2_3_2,
- .post_deinit = qcom_pcie_post_deinit_2_3_2,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1459,6 +1561,7 @@ static const struct qcom_pcie_ops ops_2_3_2 = {
static const struct qcom_pcie_ops ops_2_4_0 = {
.get_resources = qcom_pcie_get_resources_2_4_0,
.init = qcom_pcie_init_2_4_0,
+ .post_init = qcom_pcie_post_init_2_4_0,
.deinit = qcom_pcie_deinit_2_4_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1467,6 +1570,7 @@ static const struct qcom_pcie_ops ops_2_4_0 = {
static const struct qcom_pcie_ops ops_2_3_3 = {
.get_resources = qcom_pcie_get_resources_2_3_3,
.init = qcom_pcie_init_2_3_3,
+ .post_init = qcom_pcie_post_init_2_3_3,
.deinit = qcom_pcie_deinit_2_3_3,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1477,8 +1581,6 @@ static const struct qcom_pcie_ops ops_2_7_0 = {
.init = qcom_pcie_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
- .post_init = qcom_pcie_post_init_2_7_0,
- .post_deinit = qcom_pcie_post_deinit_2_7_0,
};
/* Qcom IP rev.: 1.9.0 */
@@ -1487,11 +1589,18 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
.init = qcom_pcie_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
- .post_init = qcom_pcie_post_init_2_7_0,
- .post_deinit = qcom_pcie_post_deinit_2_7_0,
.config_sid = qcom_pcie_config_sid_sm8250,
};
+/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
+static const struct qcom_pcie_ops ops_2_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_9_0,
+ .init = qcom_pcie_init_2_9_0,
+ .post_init = qcom_pcie_post_init_2_9_0,
+ .deinit = qcom_pcie_deinit_2_9_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
static const struct qcom_pcie_cfg apq8084_cfg = {
.ops = &ops_1_0_0,
};
@@ -1533,7 +1642,6 @@ static const struct qcom_pcie_cfg sm8250_cfg = {
static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
.ops = &ops_1_9_0,
.has_ddrss_sf_tbu_clk = true,
- .pipe_clk_need_muxing = true,
.has_aggre0_clk = true,
.has_aggre1_clk = true,
};
@@ -1541,14 +1649,12 @@ static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {
.ops = &ops_1_9_0,
.has_ddrss_sf_tbu_clk = true,
- .pipe_clk_need_muxing = true,
.has_aggre1_clk = true,
};
static const struct qcom_pcie_cfg sc7280_cfg = {
.ops = &ops_1_9_0,
.has_tbu_clk = true,
- .pipe_clk_need_muxing = true,
};
static const struct qcom_pcie_cfg sc8180x_cfg = {
@@ -1556,6 +1662,10 @@ static const struct qcom_pcie_cfg sc8180x_cfg = {
.has_tbu_clk = true,
};
+static const struct qcom_pcie_cfg ipq6018_cfg = {
+ .ops = &ops_2_9_0,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1564,7 +1674,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int qcom_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
struct qcom_pcie *pcie;
const struct qcom_pcie_cfg *pcie_cfg;
@@ -1666,6 +1776,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
{ .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
+ { .compatible = "qcom,pcie-ipq6018", .data = &ipq6018_cfg },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 1569e82b5568..99d47ae80331 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -85,7 +85,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
struct spear13xx_pcie *spear13xx_pcie = arg;
struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
unsigned int status;
status = readl(&app_reg->int_sts);
@@ -121,7 +121,7 @@ static int spear13xx_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int spear13xx_pcie_host_init(struct pcie_port *pp)
+static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
@@ -155,7 +155,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -172,7 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
}
pp->ops = &spear13xx_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -258,7 +258,7 @@ static struct platform_driver spear13xx_pcie_driver = {
.probe = spear13xx_pcie_probe,
.driver = {
.name = "spear-pcie",
- .of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+ .of_match_table = spear13xx_pcie_of_match,
.suppress_bind_attrs = true,
},
};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
index c2de6ed4d86f..55f61914a986 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
@@ -39,7 +39,8 @@ static int tegra194_acpi_init(struct pci_config_window *cfg)
static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
u32 val, u32 reg)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+ u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) +
+ PCIE_ATU_VIEWPORT_BASE;
writel(val, pcie_ecam->iatu_base + offset + reg);
}
@@ -58,8 +59,8 @@ static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
PCIE_ATU_LIMIT);
atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
PCIE_ATU_UPPER_TARGET);
- atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
- atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2);
}
static void __iomem *tegra194_map_bus(struct pci_bus *bus,
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index cc2678490162..1b6b437823d2 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * PCIe host controller driver for Tegra194 SoC
+ * PCIe host controller driver for the following SoCs
+ * Tegra194
+ * Tegra234
*
- * Copyright (C) 2019 NVIDIA Corporation.
+ * Copyright (C) 2019-2022 NVIDIA Corporation.
*
* Author: Vidya Sagar <vidyas@nvidia.com>
*/
@@ -35,6 +37,9 @@
#include <soc/tegra/bpmp-abi.h>
#include "../../pci.h"
+#define TEGRA194_DWC_IP_VER 0x490A
+#define TEGRA234_DWC_IP_VER 0x562A
+
#define APPL_PINMUX 0x0
#define APPL_PINMUX_PEX_RST BIT(0)
#define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
@@ -49,6 +54,7 @@
#define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2
#define APPL_INTR_EN_L0_0 0x8
#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
@@ -170,19 +176,6 @@
#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
#define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
-#define EVENT_COUNTER_ALL_CLEAR 0x3
-#define EVENT_COUNTER_ENABLE_ALL 0x7
-#define EVENT_COUNTER_ENABLE_SHIFT 2
-#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
-#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
-#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
-#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
-#define EVENT_COUNTER_EVENT_L1 0x5
-#define EVENT_COUNTER_EVENT_L1_1 0x7
-#define EVENT_COUNTER_EVENT_L1_2 0x8
-#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
-#define EVENT_COUNTER_GROUP_5 0x5
-
#define N_FTS_VAL 52
#define FTS_VAL 52
@@ -191,12 +184,6 @@
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
-#define GEN3_RELATED_OFF 0x890
-#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
-#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
-#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
-#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
-
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
@@ -243,7 +230,19 @@ static const unsigned int pcie_gen_freq[] = {
GEN4_CORE_CLK_FREQ
};
-struct tegra194_pcie {
+struct tegra_pcie_dw_of_data {
+ u32 version;
+ enum dw_pcie_device_mode mode;
+ bool has_msix_doorbell_access_fix;
+ bool has_sbr_reset_fix;
+ bool has_l1ss_exit_fix;
+ bool has_ltr_req_fix;
+ u32 cdm_chk_int_en_bit;
+ u32 gen4_preset_vec;
+ u8 n_fts[2];
+};
+
+struct tegra_pcie_dw {
struct device *dev;
struct resource *appl_res;
struct resource *dbi_res;
@@ -255,17 +254,20 @@ struct tegra194_pcie {
struct dw_pcie pci;
struct tegra_bpmp *bpmp;
- enum dw_pcie_device_mode mode;
+ struct tegra_pcie_dw_of_data *of_data;
bool supports_clkreq;
bool enable_cdm_check;
+ bool enable_srns;
bool link_state;
bool update_fc_fixup;
+ bool enable_ext_refclk;
u8 init_link_width;
u32 msi_ctrl_int;
u32 num_lanes;
u32 cid;
u32 cfg_link_cap_l1sub;
+ u32 ras_des_cap;
u32 pcie_cap_base;
u32 aspm_cmrt;
u32 aspm_pwr_on_t;
@@ -287,22 +289,18 @@ struct tegra194_pcie {
int ep_state;
};
-struct tegra194_pcie_of_data {
- enum dw_pcie_device_mode mode;
-};
-
-static inline struct tegra194_pcie *to_tegra_pcie(struct dw_pcie *pci)
+static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
{
- return container_of(pci, struct tegra194_pcie, pci);
+ return container_of(pci, struct tegra_pcie_dw, pci);
}
-static inline void appl_writel(struct tegra194_pcie *pcie, const u32 value,
+static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
const u32 reg)
{
writel_relaxed(value, pcie->appl_base + reg);
}
-static inline u32 appl_readl(struct tegra194_pcie *pcie, const u32 reg)
+static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
{
return readl_relaxed(pcie->appl_base + reg);
}
@@ -311,10 +309,10 @@ struct tegra_pcie_soc {
enum dw_pcie_device_mode mode;
};
-static void apply_bad_link_workaround(struct pcie_port *pp)
+static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 current_link_width;
u16 val;
@@ -347,18 +345,18 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
- u32 val, tmp;
+ struct dw_pcie_rp *pp = &pci->pp;
+ u32 val, status_l0, status_l1;
u16 val_w;
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
- if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
-
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+ if (!pcie->of_data->has_sbr_reset_fix &&
+ status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
/* SBR & Surprise Link Down WAR */
val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
@@ -374,15 +372,21 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
}
}
- if (val & APPL_INTR_STATUS_L0_INT_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
- if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
+ if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
appl_writel(pcie,
APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
APPL_INTR_STATUS_L1_8_0);
apply_bad_link_workaround(pp);
}
- if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_w |= PCI_EXP_LNKSTA_LBMS;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA, val_w);
+
appl_writel(pcie,
APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
APPL_INTR_STATUS_L1_8_0);
@@ -394,31 +398,30 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
}
}
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
- tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
+ if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
dev_info(pci->dev, "CDM check complete\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
}
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
dev_err(pci->dev, "CDM comparison mismatch\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
}
- if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
dev_err(pci->dev, "CDM Logic error\n");
- tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
+ val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
}
- dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp);
- tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
- dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp);
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
+ dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
}
return IRQ_HANDLED;
}
-static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie)
+static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -446,7 +449,7 @@ static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie)
static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
u32 val, speed;
@@ -454,6 +457,9 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
PCI_EXP_LNKSTA_CLS;
clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+ if (pcie->of_data->has_ltr_req_fix)
+ return IRQ_HANDLED;
+
/* If EP doesn't advertise L1SS, just return */
val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
@@ -492,7 +498,7 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
u32 status_l0, status_l1, link_status;
@@ -535,16 +541,21 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
*val = 0x00000000;
return PCIBIOS_SUCCESSFUL;
}
@@ -552,16 +563,21 @@ static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
return pci_generic_config_read(bus, devfn, where, size, val);
}
-static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 val)
{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
return PCIBIOS_SUCCESSFUL;
return pci_generic_config_write(bus, devfn, where, size, val);
@@ -569,30 +585,12 @@ static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
static struct pci_ops tegra_pci_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
- .read = tegra194_pcie_rd_own_conf,
- .write = tegra194_pcie_wr_own_conf,
+ .read = tegra_pcie_dw_rd_own_conf,
+ .write = tegra_pcie_dw_wr_own_conf,
};
#if defined(CONFIG_PCIEASPM)
-static const u32 event_cntr_ctrl_offset[] = {
- 0x1d8,
- 0x1a8,
- 0x1a8,
- 0x1a8,
- 0x1c4,
- 0x1d8
-};
-
-static const u32 event_cntr_data_offset[] = {
- 0x1dc,
- 0x1ac,
- 0x1ac,
- 0x1ac,
- 0x1c8,
- 0x1dc
-};
-
-static void disable_aspm_l11(struct tegra194_pcie *pcie)
+static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -601,7 +599,7 @@ static void disable_aspm_l11(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
}
-static void disable_aspm_l12(struct tegra194_pcie *pcie)
+static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -610,24 +608,27 @@ static void disable_aspm_l12(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
}
-static inline u32 event_counter_prog(struct tegra194_pcie *pcie, u32 event)
+static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
{
u32 val;
- val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]);
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
- dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
- val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]);
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_DATA);
return val;
}
static int aspm_state_cnt(struct seq_file *s, void *data)
{
- struct tegra194_pcie *pcie = (struct tegra194_pcie *)
+ struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
dev_get_drvdata(s->private);
u32 val;
@@ -647,18 +648,20 @@ static int aspm_state_cnt(struct seq_file *s, void *data)
event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
/* Clear all counters */
- dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid],
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
EVENT_COUNTER_ALL_CLEAR);
/* Re-enable counting */
val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
- dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
return 0;
}
-static void init_host_aspm(struct tegra194_pcie *pcie)
+static void init_host_aspm(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
u32 val;
@@ -666,10 +669,14 @@ static void init_host_aspm(struct tegra194_pcie *pcie)
val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
+ pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
+ PCI_EXT_CAP_ID_VNDR);
+
/* Enable ASPM counters */
val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
- dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val);
+ dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
/* Program T_cmrt and T_pwr_on values */
val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
@@ -686,22 +693,22 @@ static void init_host_aspm(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
}
-static void init_debugfs(struct tegra194_pcie *pcie)
+static void init_debugfs(struct tegra_pcie_dw *pcie)
{
debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
aspm_state_cnt);
}
#else
-static inline void disable_aspm_l12(struct tegra194_pcie *pcie) { return; }
-static inline void disable_aspm_l11(struct tegra194_pcie *pcie) { return; }
-static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; }
-static inline void init_debugfs(struct tegra194_pcie *pcie) { return; }
+static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
+static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
#endif
-static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
u16 val_w;
@@ -709,13 +716,15 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
- val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
- val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
- appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+ }
if (pcie->enable_cdm_check) {
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
- val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
+ val |= pcie->of_data->cdm_chk_int_en_bit;
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
val = appl_readl(pcie, APPL_INTR_EN_L1_18);
@@ -736,10 +745,10 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
val_w);
}
-static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
/* Enable legacy interrupt generation */
@@ -757,10 +766,10 @@ static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
}
-static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
/* Enable MSI interrupt generation */
@@ -770,10 +779,10 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
}
-static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
/* Clear interrupt statuses before enabling interrupts */
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
@@ -798,7 +807,7 @@ static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
tegra_pcie_enable_msi_interrupts(pp);
}
-static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
+static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
u32 val, offset, i;
@@ -842,7 +851,8 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+ val |= (pcie->of_data->gen4_preset_vec <<
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
@@ -851,11 +861,12 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
}
-static int tegra194_pcie_host_init(struct pcie_port *pp)
+static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
+ u16 val_16;
pp->bridge->ops = &tegra_pci_ops;
@@ -863,6 +874,11 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
+ val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
+
val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
@@ -887,6 +903,15 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
config_gen3_gen4_eq_presets(pcie);
init_host_aspm(pcie);
@@ -897,9 +922,11 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
disable_aspm_l12(pcie);
}
- val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
- val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
- dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
if (pcie->update_fc_fixup) {
val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
@@ -912,14 +939,14 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)
return 0;
}
-static int tegra194_pcie_start_link(struct dw_pcie *pci)
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
{
u32 val, offset, speed, tmp;
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
- struct pcie_port *pp = &pci->pp;
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct dw_pcie_rp *pp = &pci->pp;
bool retry = true;
- if (pcie->mode == DW_PCIE_EP_TYPE) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
enable_irq(pcie->pex_rst_irq);
return 0;
}
@@ -978,9 +1005,9 @@ retry_link:
offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
val &= ~PCI_DLF_EXCHANGE_ENABLE;
- dw_pcie_writel_dbi(pci, offset, val);
+ dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
- tegra194_pcie_host_init(pp);
+ tegra_pcie_dw_host_init(pp);
dw_pcie_setup_rc(pp);
retry = false;
@@ -996,32 +1023,32 @@ retry_link:
return 0;
}
-static int tegra194_pcie_link_up(struct dw_pcie *pci)
+static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
{
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
-static void tegra194_pcie_stop_link(struct dw_pcie *pci)
+static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
{
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
disable_irq(pcie->pex_rst_irq);
}
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
- .link_up = tegra194_pcie_link_up,
- .start_link = tegra194_pcie_start_link,
- .stop_link = tegra194_pcie_stop_link,
+ .link_up = tegra_pcie_dw_link_up,
+ .start_link = tegra_pcie_dw_start_link,
+ .stop_link = tegra_pcie_dw_stop_link,
};
-static const struct dw_pcie_host_ops tegra194_pcie_host_ops = {
- .host_init = tegra194_pcie_host_init,
+static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
+ .host_init = tegra_pcie_dw_host_init,
};
-static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie)
+static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
{
unsigned int phy_count = pcie->phy_count;
@@ -1031,7 +1058,7 @@ static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie)
}
}
-static int tegra_pcie_enable_phy(struct tegra194_pcie *pcie)
+static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
{
unsigned int i;
int ret;
@@ -1058,7 +1085,7 @@ phy_exit:
return ret;
}
-static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
+static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device_node *np = pcie->dev->of_node;
@@ -1111,13 +1138,27 @@ static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
pcie->update_fc_fixup = true;
+ /* RP using an external REFCLK is supported only in Tegra234 */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
+ pcie->enable_ext_refclk = true;
+ } else {
+ pcie->enable_ext_refclk =
+ of_property_read_bool(pcie->dev->of_node,
+ "nvidia,enable-ext-refclk");
+ }
+
pcie->supports_clkreq =
of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
pcie->enable_cdm_check =
of_property_read_bool(np, "snps,enable-cdm-check");
- if (pcie->mode == DW_PCIE_RC_TYPE)
+ if (pcie->of_data->version == TEGRA234_DWC_IP_VER)
+ pcie->enable_srns =
+ of_property_read_bool(np, "nvidia,enable-srns");
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE)
return 0;
/* Endpoint mode specific DT entries */
@@ -1154,15 +1195,18 @@ static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
return 0;
}
-static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie,
+static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
bool enable)
{
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
- /* Controller-5 doesn't need to have its state set by BPMP-FW */
- if (pcie->cid == 5)
+ /*
+ * Controller-5 doesn't need to have its state set by BPMP-FW in
+ * Tegra194
+ */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)
return 0;
memset(&req, 0, sizeof(req));
@@ -1182,7 +1226,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
-static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,
+static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
bool enable)
{
struct mrq_uphy_response resp;
@@ -1210,9 +1254,9 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
-static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
+static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
- struct pcie_port *pp = &pcie->pci.pp;
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
struct pci_bus *child, *root_bus = NULL;
struct pci_dev *pdev;
@@ -1248,7 +1292,7 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
}
}
-static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie)
+static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
{
pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
if (IS_ERR(pcie->slot_ctl_3v3)) {
@@ -1269,7 +1313,7 @@ static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie)
return 0;
}
-static int tegra_pcie_enable_slot_regulators(struct tegra194_pcie *pcie)
+static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
{
int ret;
@@ -1307,7 +1351,7 @@ fail_12v_enable:
return ret;
}
-static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie)
+static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
{
if (pcie->slot_ctl_12v)
regulator_disable(pcie->slot_ctl_12v);
@@ -1315,7 +1359,7 @@ static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie)
regulator_disable(pcie->slot_ctl_3v3);
}
-static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
+static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
bool en_hw_hot_rst)
{
int ret;
@@ -1328,6 +1372,14 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
return ret;
}
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret);
+ goto fail_pll_init;
+ }
+ }
+
ret = tegra_pcie_enable_slot_regulators(pcie);
if (ret < 0)
goto fail_slot_reg_en;
@@ -1351,11 +1403,13 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
goto fail_core_apb_rst;
}
- if (en_hw_hot_rst) {
+ if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {
/* Enable HW_HOT_RST mode */
val = appl_readl(pcie, APPL_CTRL);
val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
val |= APPL_CTRL_HW_HOT_RST_EN;
appl_writel(pcie, val, APPL_CTRL);
}
@@ -1382,6 +1436,19 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
appl_writel(pcie, val, APPL_CFG_MISC);
+ if (pcie->enable_srns || pcie->enable_ext_refclk) {
+ /*
+ * When Tegra PCIe RP is using external clock, it cannot supply
+ * same clock to its downstream hierarchy. Hence, gate PCIe RP
+ * REFCLK out pads when RP & EP are using separate clocks or RP
+ * is using an external REFCLK.
+ */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+ }
+
if (!pcie->supports_clkreq) {
val = appl_readl(pcie, APPL_PINMUX);
val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
@@ -1407,12 +1474,15 @@ fail_core_clk:
fail_reg_en:
tegra_pcie_disable_slot_regulators(pcie);
fail_slot_reg_en:
+ if (pcie->enable_ext_refclk)
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
tegra_pcie_bpmp_set_ctrl_state(pcie, false);
return ret;
}
-static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)
+static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
{
int ret;
@@ -1434,23 +1504,29 @@ static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)
tegra_pcie_disable_slot_regulators(pcie);
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret);
+ }
+
ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
if (ret)
dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
pcie->cid, ret);
}
-static int tegra_pcie_init_controller(struct tegra194_pcie *pcie)
+static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
int ret;
ret = tegra_pcie_config_controller(pcie, false);
if (ret < 0)
return ret;
- pp->ops = &tegra194_pcie_host_ops;
+ pp->ops = &tegra_pcie_dw_host_ops;
ret = dw_pcie_host_init(pp);
if (ret < 0) {
@@ -1465,11 +1541,11 @@ fail_host_init:
return ret;
}
-static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie)
+static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
{
u32 val;
- if (!tegra194_pcie_link_up(&pcie->pci))
+ if (!tegra_pcie_dw_link_up(&pcie->pci))
return 0;
val = appl_readl(pcie, APPL_RADM_STATUS);
@@ -1481,12 +1557,12 @@ static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie)
1, PME_ACK_TIMEOUT);
}
-static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie)
+static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
{
u32 data;
int err;
- if (!tegra194_pcie_link_up(&pcie->pci)) {
+ if (!tegra_pcie_dw_link_up(&pcie->pci)) {
dev_dbg(pcie->dev, "PCIe link is not up...!\n");
return;
}
@@ -1543,15 +1619,15 @@ static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie)
appl_writel(pcie, data, APPL_PINMUX);
}
-static void tegra_pcie_deinit_controller(struct tegra194_pcie *pcie)
+static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
{
tegra_pcie_downstream_dev_to_D0(pcie);
dw_pcie_host_deinit(&pcie->pci.pp);
- tegra194_pcie_pme_turnoff(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
}
-static int tegra_pcie_config_rp(struct tegra194_pcie *pcie)
+static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
struct device *dev = pcie->dev;
char *name;
@@ -1578,7 +1654,7 @@ static int tegra_pcie_config_rp(struct tegra194_pcie *pcie)
goto fail_pm_get_sync;
}
- pcie->link_state = tegra194_pcie_link_up(&pcie->pci);
+ pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
if (!pcie->link_state) {
ret = -ENOMEDIUM;
goto fail_host_init;
@@ -1603,7 +1679,7 @@ fail_pm_get_sync:
return ret;
}
-static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
+static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
{
u32 val;
int ret;
@@ -1634,6 +1710,13 @@ static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
pm_runtime_put_sync(pcie->dev);
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n",
+ ret);
+ }
+
ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
if (ret)
dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
@@ -1642,13 +1725,14 @@ static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
}
-static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
+static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
struct dw_pcie_ep *ep = &pci->ep;
struct device *dev = pcie->dev;
u32 val;
int ret;
+ u16 val_16;
if (pcie->ep_state == EP_STATE_ENABLED)
return;
@@ -1660,10 +1744,20 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
return;
}
- ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
if (ret) {
- dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
- goto fail_pll_init;
+ dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
+ pcie->cid, ret);
+ goto fail_set_ctrl_state;
+ }
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n",
+ ret);
+ goto fail_pll_init;
+ }
}
ret = clk_prepare_enable(pcie->core_clk);
@@ -1760,12 +1854,29 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
disable_aspm_l12(pcie);
}
- val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
- val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
- dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
+
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
+ val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
@@ -1782,6 +1893,13 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
dw_pcie_ep_init_notify(ep);
+ /* Program the private control to allow sending LTR upstream */
+ if (pcie->of_data->has_ltr_req_fix) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+ }
+
/* Enable LTSSM */
val = appl_readl(pcie, APPL_CTRL);
val |= APPL_CTRL_LTSSM_EN;
@@ -1802,12 +1920,14 @@ fail_core_apb_rst:
fail_core_clk_enable:
tegra_pcie_bpmp_set_pll_state(pcie, false);
fail_pll_init:
+ tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+fail_set_ctrl_state:
pm_runtime_put_sync(dev);
}
static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
{
- struct tegra194_pcie *pcie = arg;
+ struct tegra_pcie_dw *pcie = arg;
if (gpiod_get_value(pcie->pex_rst_gpiod))
pex_ep_event_pex_rst_assert(pcie);
@@ -1817,7 +1937,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq)
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
if (irq > 1)
@@ -1829,7 +1949,7 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq)
return 0;
}
-static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq)
+static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
if (unlikely(irq > 31))
return -EINVAL;
@@ -1839,7 +1959,7 @@ static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq)
return 0;
}
-static int tegra_pcie_ep_raise_msix_irq(struct tegra194_pcie *pcie, u16 irq)
+static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
struct dw_pcie_ep *ep = &pcie->pci.ep;
@@ -1853,7 +1973,7 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct tegra194_pcie *pcie = to_tegra_pcie(pci);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
@@ -1894,7 +2014,7 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
.get_features = tegra_pcie_ep_get_features,
};
-static int tegra_pcie_config_ep(struct tegra194_pcie *pcie,
+static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
@@ -1949,19 +2069,20 @@ static int tegra_pcie_config_ep(struct tegra194_pcie *pcie,
if (ret) {
dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
ret);
+ pm_runtime_disable(dev);
return ret;
}
return 0;
}
-static int tegra194_pcie_probe(struct platform_device *pdev)
+static int tegra_pcie_dw_probe(struct platform_device *pdev)
{
- const struct tegra194_pcie_of_data *data;
+ const struct tegra_pcie_dw_of_data *data;
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
- struct tegra194_pcie *pcie;
- struct pcie_port *pp;
+ struct tegra_pcie_dw *pcie;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
struct phy **phys;
char *name;
@@ -1977,16 +2098,14 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
pci = &pcie->pci;
pci->dev = &pdev->dev;
pci->ops = &tegra_dw_pcie_ops;
- pci->n_fts[0] = N_FTS_VAL;
- pci->n_fts[1] = FTS_VAL;
- pci->version = 0x490A;
-
+ pcie->dev = &pdev->dev;
+ pcie->of_data = (struct tegra_pcie_dw_of_data *)data;
+ pci->n_fts[0] = pcie->of_data->n_fts[0];
+ pci->n_fts[1] = pcie->of_data->n_fts[1];
pp = &pci->pp;
pp->num_vectors = MAX_MSI_IRQS;
- pcie->dev = &pdev->dev;
- pcie->mode = (enum dw_pcie_device_mode)data->mode;
- ret = tegra194_pcie_parse_dt(pcie);
+ ret = tegra_pcie_dw_parse_dt(pcie);
if (ret < 0) {
const char *level = KERN_ERR;
@@ -2101,7 +2220,7 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
- switch (pcie->mode) {
+ switch (pcie->of_data->mode) {
case DW_PCIE_RC_TYPE:
ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
IRQF_SHARED, "tegra-pcie-intr", pcie);
@@ -2136,7 +2255,8 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
break;
default:
- dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
+ dev_err(dev, "Invalid PCIe device type %d\n",
+ pcie->of_data->mode);
}
fail:
@@ -2144,16 +2264,22 @@ fail:
return ret;
}
-static int tegra194_pcie_remove(struct platform_device *pdev)
+static int tegra_pcie_dw_remove(struct platform_device *pdev)
{
- struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
- if (!pcie->link_state)
- return 0;
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return 0;
+
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_deinit_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
- debugfs_remove_recursive(pcie->debugfs);
- tegra_pcie_deinit_controller(pcie);
- pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
tegra_bpmp_put(pcie->bpmp);
if (pcie->pex_refclk_sel_gpiod)
@@ -2162,41 +2288,48 @@ static int tegra194_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int tegra194_pcie_suspend_late(struct device *dev)
+static int tegra_pcie_dw_suspend_late(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
u32 val;
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
+ return -EPERM;
+ }
+
if (!pcie->link_state)
return 0;
/* Enable HW_HOT_RST mode */
- val = appl_readl(pcie, APPL_CTRL);
- val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
- APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
- val |= APPL_CTRL_HW_HOT_RST_EN;
- appl_writel(pcie, val, APPL_CTRL);
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
return 0;
}
-static int tegra194_pcie_suspend_noirq(struct device *dev)
+static int tegra_pcie_dw_suspend_noirq(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
if (!pcie->link_state)
return 0;
tegra_pcie_downstream_dev_to_D0(pcie);
- tegra194_pcie_pme_turnoff(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
return 0;
}
-static int tegra194_pcie_resume_noirq(struct device *dev)
+static int tegra_pcie_dw_resume_noirq(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
int ret;
if (!pcie->link_state)
@@ -2206,7 +2339,7 @@ static int tegra194_pcie_resume_noirq(struct device *dev)
if (ret < 0)
return ret;
- ret = tegra194_pcie_host_init(&pcie->pci.pp);
+ ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
if (ret < 0) {
dev_err(dev, "Failed to init host: %d\n", ret);
goto fail_host_init;
@@ -2214,7 +2347,7 @@ static int tegra194_pcie_resume_noirq(struct device *dev)
dw_pcie_setup_rc(&pcie->pci.pp);
- ret = tegra194_pcie_start_link(&pcie->pci);
+ ret = tegra_pcie_dw_start_link(&pcie->pci);
if (ret < 0)
goto fail_host_init;
@@ -2225,12 +2358,12 @@ fail_host_init:
return ret;
}
-static int tegra194_pcie_resume_early(struct device *dev)
+static int tegra_pcie_dw_resume_early(struct device *dev)
{
- struct tegra194_pcie *pcie = dev_get_drvdata(dev);
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
u32 val;
- if (pcie->mode == DW_PCIE_EP_TYPE) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
dev_err(dev, "Suspend is not supported in EP mode");
return -ENOTSUPP;
}
@@ -2239,75 +2372,124 @@ static int tegra194_pcie_resume_early(struct device *dev)
return 0;
/* Disable HW_HOT_RST mode */
- val = appl_readl(pcie, APPL_CTRL);
- val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
- APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
- val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
- APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
- val &= ~APPL_CTRL_HW_HOT_RST_EN;
- appl_writel(pcie, val, APPL_CTRL);
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
+ val &= ~APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
return 0;
}
-static void tegra194_pcie_shutdown(struct platform_device *pdev)
+static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
{
- struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
- if (!pcie->link_state)
- return;
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return;
- debugfs_remove_recursive(pcie->debugfs);
- tegra_pcie_downstream_dev_to_D0(pcie);
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_downstream_dev_to_D0(pcie);
- disable_irq(pcie->pci.pp.irq);
- if (IS_ENABLED(CONFIG_PCI_MSI))
- disable_irq(pcie->pci.pp.msi_irq);
+ disable_irq(pcie->pci.pp.irq);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ disable_irq(pcie->pci.pp.msi_irq[0]);
- tegra194_pcie_pme_turnoff(pcie);
- tegra_pcie_unconfig_controller(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
}
-static const struct tegra194_pcie_of_data tegra194_pcie_rc_of_data = {
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_RC_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_EP_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
.mode = DW_PCIE_RC_TYPE,
+ .has_msix_doorbell_access_fix = true,
+ .has_sbr_reset_fix = true,
+ .has_l1ss_exit_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
};
-static const struct tegra194_pcie_of_data tegra194_pcie_ep_of_data = {
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
.mode = DW_PCIE_EP_TYPE,
+ .has_l1ss_exit_fix = true,
+ .has_ltr_req_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
};
-static const struct of_device_id tegra194_pcie_of_match[] = {
+static const struct of_device_id tegra_pcie_dw_of_match[] = {
{
.compatible = "nvidia,tegra194-pcie",
- .data = &tegra194_pcie_rc_of_data,
+ .data = &tegra194_pcie_dw_rc_of_data,
},
{
.compatible = "nvidia,tegra194-pcie-ep",
- .data = &tegra194_pcie_ep_of_data,
+ .data = &tegra194_pcie_dw_ep_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie",
+ .data = &tegra234_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie-ep",
+ .data = &tegra234_pcie_dw_ep_of_data,
},
- {},
+ {}
};
-static const struct dev_pm_ops tegra194_pcie_pm_ops = {
- .suspend_late = tegra194_pcie_suspend_late,
- .suspend_noirq = tegra194_pcie_suspend_noirq,
- .resume_noirq = tegra194_pcie_resume_noirq,
- .resume_early = tegra194_pcie_resume_early,
+static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
+ .suspend_late = tegra_pcie_dw_suspend_late,
+ .suspend_noirq = tegra_pcie_dw_suspend_noirq,
+ .resume_noirq = tegra_pcie_dw_resume_noirq,
+ .resume_early = tegra_pcie_dw_resume_early,
};
-static struct platform_driver tegra194_pcie_driver = {
- .probe = tegra194_pcie_probe,
- .remove = tegra194_pcie_remove,
- .shutdown = tegra194_pcie_shutdown,
+static struct platform_driver tegra_pcie_dw_driver = {
+ .probe = tegra_pcie_dw_probe,
+ .remove = tegra_pcie_dw_remove,
+ .shutdown = tegra_pcie_dw_shutdown,
.driver = {
.name = "tegra194-pcie",
- .pm = &tegra194_pcie_pm_ops,
- .of_match_table = tegra194_pcie_of_match,
+ .pm = &tegra_pcie_dw_pm_ops,
+ .of_match_table = tegra_pcie_dw_of_match,
},
};
-module_platform_driver(tegra194_pcie_driver);
+module_platform_driver(tegra_pcie_dw_driver);
-MODULE_DEVICE_TABLE(of, tegra194_pcie_of_match);
+MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index b45ac3754242..48c3eba817b4 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -171,7 +171,7 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie)
static void uniphier_pcie_irq_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
@@ -188,7 +188,7 @@ static void uniphier_pcie_irq_mask(struct irq_data *d)
static void uniphier_pcie_irq_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
@@ -225,7 +225,7 @@ static const struct irq_domain_ops uniphier_intx_domain_ops = {
static void uniphier_pcie_irq_handler(struct irq_desc *desc)
{
- struct pcie_port *pp = irq_desc_get_handler_data(desc);
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -258,7 +258,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
+static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
@@ -295,7 +295,7 @@ out_put_node:
return ret;
}
-static int uniphier_pcie_host_init(struct pcie_port *pp)
+static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
index 50f80f07e4db..71026fefa366 100644
--- a/drivers/pci/controller/dwc/pcie-visconti.c
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -178,7 +178,7 @@ static void visconti_pcie_stop_link(struct dw_pcie *pci)
*/
static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
return cpu_addr & ~pp->io_base;
}
@@ -190,7 +190,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.stop_link = visconti_pcie_stop_link,
};
-static int visconti_pcie_host_init(struct pcie_port *pp)
+static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
@@ -278,7 +278,7 @@ static int visconti_add_pcie_port(struct visconti_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
pp->irq = platform_get_irq_byname(pdev, "intr");
if (pp->irq < 0)
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index ffec82c8a523..966c8b48bd96 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -8,6 +8,7 @@
* Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
@@ -33,6 +34,7 @@
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_DEV_REV_REG 0x8
#define PCIE_CORE_PCIEXP_CAP 0xc0
+#define PCIE_CORE_PCIERR_CAP 0x100
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
@@ -857,14 +859,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
switch (reg) {
- case PCI_EXP_SLTCTL:
- *value = PCI_EXP_SLTSTA_PDS << 16;
- return PCI_BRIDGE_EMUL_HANDLED;
-
/*
- * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need
- * to be handled here, because their values are stored in emulated
- * config space buffer, and we read them from there when needed.
+ * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are
+ * also supported, but do not need to be handled here, because their
+ * values are stored in emulated config space buffer, and we read them
+ * from there when needed.
*/
case PCI_EXP_LNKCAP: {
@@ -944,11 +943,89 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
}
}
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case 0:
+ *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
+
+ /*
+ * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada
+ * 3700 Functional Specification does not document registers
+ * at those addresses.
+ *
+ * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error
+ * Reporting Capability header the last Extended Capability.
+ * If we obtain documentation for those registers in the
+ * future, this can be changed.
+ */
+ *value &= 0x000fffff;
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG + 0:
+ case PCI_ERR_HEADER_LOG + 4:
+ case PCI_ERR_HEADER_LOG + 8:
+ case PCI_ERR_HEADER_LOG + 12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_STATUS:
+ case PCI_ERR_ROOT_ERR_SRC:
+ *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+}
+
+static void
+advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ /* These are W1C registers, so clear other bits */
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_ROOT_STATUS:
+ new &= mask;
+ fallthrough;
+
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG + 0:
+ case PCI_ERR_HEADER_LOG + 4:
+ case PCI_ERR_HEADER_LOG + 8:
+ case PCI_ERR_HEADER_LOG + 12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_ERR_SRC:
+ advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg);
+ break;
+
+ default:
+ break;
+ }
+}
+
static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
.read_base = advk_pci_bridge_emul_base_conf_read,
.write_base = advk_pci_bridge_emul_base_conf_write,
.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
.write_pcie = advk_pci_bridge_emul_pcie_conf_write,
+ .read_ext = advk_pci_bridge_emul_ext_conf_read,
+ .write_ext = advk_pci_bridge_emul_ext_conf_write,
};
/*
@@ -977,8 +1054,25 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
/* Support interrupt A for MSI feature */
bridge->conf.intpin = PCI_INTERRUPT_INTA;
- /* Aardvark HW provides PCIe Capability structure in version 2 */
- bridge->pcie_conf.cap = cpu_to_le16(2);
+ /*
+ * Aardvark HW provides PCIe Capability structure in version 2 and
+ * indicate slot support, which is emulated.
+ */
+ bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT);
+
+ /*
+ * Set Presence Detect State bit permanently since there is no support
+ * for unplugging the card nor detecting whether it is plugged. (If a
+ * platform exists in the future that supports it, via a GPIO for
+ * example, it should be implemented via this bit.)
+ *
+ * Set physical slot number to 1 since there is only one port and zero
+ * value is reserved for ports within the same silicon as Root Port
+ * which is not our case.
+ */
+ bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN,
+ 1));
+ bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 50a8e1d6f70a..05c50408f13b 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -9,6 +9,8 @@
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include "../pci.h"
@@ -18,18 +20,31 @@
#define DEV_PCIE_PORT_2 0x7a29
#define DEV_LS2K_APB 0x7a02
-#define DEV_LS7A_CONF 0x7a10
+#define DEV_LS7A_GMAC 0x7a03
+#define DEV_LS7A_DC1 0x7a06
#define DEV_LS7A_LPC 0x7a0c
+#define DEV_LS7A_AHCI 0x7a08
+#define DEV_LS7A_CONF 0x7a10
+#define DEV_LS7A_GNET 0x7a13
+#define DEV_LS7A_EHCI 0x7a14
+#define DEV_LS7A_DC2 0x7a36
+#define DEV_LS7A_HDMI 0x7a37
#define FLAG_CFG0 BIT(0)
#define FLAG_CFG1 BIT(1)
#define FLAG_DEV_FIX BIT(2)
+#define FLAG_DEV_HIDDEN BIT(3)
+
+struct loongson_pci_data {
+ u32 flags;
+ struct pci_ops *ops;
+};
struct loongson_pci {
void __iomem *cfg0_base;
void __iomem *cfg1_base;
struct platform_device *pdev;
- u32 flags;
+ const struct loongson_pci_data *data;
};
/* Fixup wrong class code in PCIe bridges */
@@ -92,55 +107,106 @@ static void loongson_mrrs_quirk(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk);
-static void __iomem *cfg1_map(struct loongson_pci *priv, int bus,
- unsigned int devfn, int where)
+static void loongson_pci_pin_quirk(struct pci_dev *pdev)
{
- unsigned long addroff = 0x0;
+ pdev->pin = 1 + (PCI_FUNC(pdev->devfn) & 3);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_DC1, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_DC2, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_GMAC, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_AHCI, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_EHCI, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_GNET, loongson_pci_pin_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+
+static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg;
- if (bus != 0)
- addroff |= BIT(28); /* Type 1 Access */
- addroff |= (where & 0xff) | ((where & 0xf00) << 16);
- addroff |= (bus << 16) | (devfn << 8);
- return priv->cfg1_base + addroff;
+ if (acpi_disabled)
+ return (struct loongson_pci *)(bus->sysdata);
+
+ cfg = bus->sysdata;
+ return (struct loongson_pci *)(cfg->priv);
}
-static void __iomem *cfg0_map(struct loongson_pci *priv, int bus,
- unsigned int devfn, int where)
+static void __iomem *cfg0_map(struct loongson_pci *priv, struct pci_bus *bus,
+ unsigned int devfn, int where)
{
unsigned long addroff = 0x0;
+ unsigned char busnum = bus->number;
- if (bus != 0)
+ if (!pci_is_root_bus(bus)) {
addroff |= BIT(24); /* Type 1 Access */
- addroff |= (bus << 16) | (devfn << 8) | where;
+ addroff |= (busnum << 16);
+ }
+ addroff |= (devfn << 8) | where;
return priv->cfg0_base + addroff;
}
-static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *cfg1_map(struct loongson_pci *priv, struct pci_bus *bus,
+ unsigned int devfn, int where)
{
+ unsigned long addroff = 0x0;
unsigned char busnum = bus->number;
- struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
- struct loongson_pci *priv = pci_host_bridge_priv(bridge);
+
+ if (!pci_is_root_bus(bus)) {
+ addroff |= BIT(28); /* Type 1 Access */
+ addroff |= (busnum << 16);
+ }
+ addroff |= (devfn << 8) | (where & 0xff) | ((where & 0xf00) << 16);
+ return priv->cfg1_base + addroff;
+}
+
+static bool pdev_may_exist(struct pci_bus *bus, unsigned int device,
+ unsigned int function)
+{
+ return !(pci_is_root_bus(bus) &&
+ (device >= 9 && device <= 20) && (function > 0));
+}
+
+static void __iomem *pci_loongson_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ unsigned int device = PCI_SLOT(devfn);
+ unsigned int function = PCI_FUNC(devfn);
+ struct loongson_pci *priv = pci_bus_to_loongson_pci(bus);
/*
* Do not read more than one device on the bus other than
- * the host bus. For our hardware the root bus is always bus 0.
+ * the host bus.
*/
- if (priv->flags & FLAG_DEV_FIX && busnum != 0 &&
- PCI_SLOT(devfn) > 0)
- return NULL;
+ if ((priv->data->flags & FLAG_DEV_FIX) && bus->self) {
+ if (!pci_is_root_bus(bus) && (device > 0))
+ return NULL;
+ }
+
+ /* Don't access non-existent devices */
+ if (priv->data->flags & FLAG_DEV_HIDDEN) {
+ if (!pdev_may_exist(bus, device, function))
+ return NULL;
+ }
/* CFG0 can only access standard space */
if (where < PCI_CFG_SPACE_SIZE && priv->cfg0_base)
- return cfg0_map(priv, busnum, devfn, where);
+ return cfg0_map(priv, bus, devfn, where);
/* CFG1 can access extended space */
if (where < PCI_CFG_SPACE_EXP_SIZE && priv->cfg1_base)
- return cfg1_map(priv, busnum, devfn, where);
+ return cfg1_map(priv, bus, devfn, where);
return NULL;
}
+#ifdef CONFIG_OF
+
static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
@@ -159,20 +225,42 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return val;
}
-/* H/w only accept 32-bit PCI operations */
+/* LS2K/LS7A accept 8/16/32-bit PCI config operations */
static struct pci_ops loongson_pci_ops = {
.map_bus = pci_loongson_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/* RS780/SR5690 only accept 32-bit PCI config operations */
+static struct pci_ops loongson_pci_ops32 = {
+ .map_bus = pci_loongson_map_bus,
.read = pci_generic_config_read32,
.write = pci_generic_config_write32,
};
+static const struct loongson_pci_data ls2k_pci_data = {
+ .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN,
+ .ops = &loongson_pci_ops,
+};
+
+static const struct loongson_pci_data ls7a_pci_data = {
+ .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN,
+ .ops = &loongson_pci_ops,
+};
+
+static const struct loongson_pci_data rs780e_pci_data = {
+ .flags = FLAG_CFG0,
+ .ops = &loongson_pci_ops32,
+};
+
static const struct of_device_id loongson_pci_of_match[] = {
{ .compatible = "loongson,ls2k-pci",
- .data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), },
+ .data = &ls2k_pci_data, },
{ .compatible = "loongson,ls7a-pci",
- .data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), },
+ .data = &ls7a_pci_data, },
{ .compatible = "loongson,rs780e-pci",
- .data = (void *)(FLAG_CFG0), },
+ .data = &rs780e_pci_data, },
{}
};
@@ -193,20 +281,20 @@ static int loongson_pci_probe(struct platform_device *pdev)
priv = pci_host_bridge_priv(bridge);
priv->pdev = pdev;
- priv->flags = (unsigned long)of_device_get_match_data(dev);
+ priv->data = of_device_get_match_data(dev);
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_err(dev, "missing mem resources for cfg0\n");
- return -EINVAL;
+ if (priv->data->flags & FLAG_CFG0) {
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ dev_err(dev, "missing mem resources for cfg0\n");
+ else {
+ priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(priv->cfg0_base))
+ return PTR_ERR(priv->cfg0_base);
+ }
}
- priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs);
- if (IS_ERR(priv->cfg0_base))
- return PTR_ERR(priv->cfg0_base);
-
- /* CFG1 is optional */
- if (priv->flags & FLAG_CFG1) {
+ if (priv->data->flags & FLAG_CFG1) {
regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!regs)
dev_info(dev, "missing mem resource for cfg1\n");
@@ -218,7 +306,7 @@ static int loongson_pci_probe(struct platform_device *pdev)
}
bridge->sysdata = priv;
- bridge->ops = &loongson_pci_ops;
+ bridge->ops = priv->data->ops;
bridge->map_irq = loongson_map_irq;
return pci_host_probe(bridge);
@@ -232,3 +320,41 @@ static struct platform_driver loongson_pci_driver = {
.probe = loongson_pci_probe,
};
builtin_platform_driver(loongson_pci_driver);
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static int loongson_pci_ecam_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct loongson_pci *priv;
+ struct loongson_pci_data *data;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ cfg->priv = priv;
+ data->flags = FLAG_CFG1 | FLAG_DEV_HIDDEN;
+ priv->data = data;
+ priv->cfg1_base = cfg->win - (cfg->busr.start << 16);
+
+ return 0;
+}
+
+const struct pci_ecam_ops loongson_pci_ecam_ops = {
+ .bus_shift = 16,
+ .init = loongson_pci_ecam_init,
+ .pci_ops = {
+ .map_bus = pci_loongson_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+#endif
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index c1ffdb06c971..af915c951f06 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1216,7 +1216,6 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
return -ENOENT;
}
-#ifdef CONFIG_PM_SLEEP
static int mvebu_pcie_suspend(struct device *dev)
{
struct mvebu_pcie *pcie;
@@ -1249,7 +1248,6 @@ static int mvebu_pcie_resume(struct device *dev)
return 0;
}
-#endif
static void mvebu_pcie_port_clk_put(void *data)
{
@@ -1737,7 +1735,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
};
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
};
static struct platform_driver mvebu_pcie_driver = {
diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c
index 35804ea394fd..839695791757 100644
--- a/drivers/pci/controller/pci-rcar-gen2.c
+++ b/drivers/pci/controller/pci-rcar-gen2.c
@@ -328,6 +328,7 @@ static const struct of_device_id rcar_pci_of_match[] = {
{ .compatible = "renesas,pci-r8a7791", },
{ .compatible = "renesas,pci-r8a7794", },
{ .compatible = "renesas,pci-rcar-gen2", },
+ { .compatible = "renesas,pci-rzn1", },
{ },
};
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 0457ec02ab70..8e323e93be91 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2707,7 +2707,7 @@ static int tegra_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
+static int tegra_pcie_pm_suspend(struct device *dev)
{
struct tegra_pcie *pcie = dev_get_drvdata(dev);
struct tegra_pcie_port *port;
@@ -2742,7 +2742,7 @@ static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
+static int tegra_pcie_pm_resume(struct device *dev)
{
struct tegra_pcie *pcie = dev_get_drvdata(dev);
int err;
@@ -2798,9 +2798,8 @@ poweroff:
}
static const struct dev_pm_ops tegra_pcie_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
- tegra_pcie_pm_resume)
+ RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)
};
static struct platform_driver tegra_pcie_driver = {
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index eb6240958bb0..549d3bd6d1c2 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -641,7 +641,7 @@ static const struct of_device_id xgene_pcie_match_table[] = {
static struct platform_driver xgene_pcie_driver = {
.driver = {
.name = "xgene-pcie",
- .of_match_table = of_match_ptr(xgene_pcie_match_table),
+ .of_match_table = xgene_pcie_match_table,
.suppress_bind_attrs = true,
},
.probe = xgene_pcie_probe,
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index e61058e13818..521acd632f1a 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -190,11 +191,6 @@
/* Forward declarations */
struct brcm_pcie;
-static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
enum {
RGR1_SW_INIT_1,
@@ -223,64 +219,9 @@ struct pcie_cfg_data {
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
};
-static const int pcie_offsets[] = {
- [RGR1_SW_INIT_1] = 0x9210,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
-};
-
-static const int pcie_offsets_bmips_7425[] = {
- [RGR1_SW_INIT_1] = 0x8010,
- [EXT_CFG_INDEX] = 0x8300,
- [EXT_CFG_DATA] = 0x8304,
-};
-
-static const struct pcie_cfg_data generic_cfg = {
- .offsets = pcie_offsets,
- .type = GENERIC,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm7425_cfg = {
- .offsets = pcie_offsets_bmips_7425,
- .type = BCM7425,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm7435_cfg = {
- .offsets = pcie_offsets,
- .type = BCM7435,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm4908_cfg = {
- .offsets = pcie_offsets,
- .type = BCM4908,
- .perst_set = brcm_pcie_perst_set_4908,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const int pcie_offset_bcm7278[] = {
- [RGR1_SW_INIT_1] = 0xc010,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
-};
-
-static const struct pcie_cfg_data bcm7278_cfg = {
- .offsets = pcie_offset_bcm7278,
- .type = BCM7278,
- .perst_set = brcm_pcie_perst_set_7278,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
-};
-
-static const struct pcie_cfg_data bcm2711_cfg = {
- .offsets = pcie_offsets,
- .type = BCM2711,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+struct subdev_regulators {
+ unsigned int num_supplies;
+ struct regulator_bulk_data supplies[];
};
struct brcm_msi {
@@ -320,6 +261,8 @@ struct brcm_pcie {
u32 hw_rev;
void (*perst_set)(struct brcm_pcie *pcie, u32 val);
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ struct subdev_regulators *sr;
+ bool ep_wakeup_capable;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
@@ -741,52 +684,48 @@ static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
return dla && plu;
}
-static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
struct brcm_pcie *pcie = bus->sysdata;
void __iomem *base = pcie->base;
int idx;
- /* Accesses to the RC go right to the RC registers if slot==0 */
+ /* Accesses to the RC go right to the RC registers if !devfn */
if (pci_is_root_bus(bus))
- return PCI_SLOT(devfn) ? NULL : base + where;
+ return devfn ? NULL : base + PCIE_ECAM_REG(where);
+
+ /* An access to our HW w/o link-up will cause a CPU Abort */
+ if (!brcm_pcie_link_up(pcie))
+ return NULL;
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
- return base + PCIE_EXT_CFG_DATA + where;
+ return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
}
-static void __iomem *brcm_pcie_map_conf32(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
struct brcm_pcie *pcie = bus->sysdata;
void __iomem *base = pcie->base;
int idx;
- /* Accesses to the RC go right to the RC registers if slot==0 */
+ /* Accesses to the RC go right to the RC registers if !devfn */
if (pci_is_root_bus(bus))
- return PCI_SLOT(devfn) ? NULL : base + (where & ~0x3);
+ return devfn ? NULL : base + PCIE_ECAM_REG(where);
+
+ /* An access to our HW w/o link-up will cause a CPU Abort */
+ if (!brcm_pcie_link_up(pcie))
+ return NULL;
/* For devices, write to the config space index register */
- idx = PCIE_ECAM_OFFSET(bus->number, devfn, (where & ~3));
+ idx = PCIE_ECAM_OFFSET(bus->number, devfn, where);
writel(idx, base + IDX_ADDR(pcie));
return base + DATA_ADDR(pcie);
}
-static struct pci_ops brcm_pcie_ops = {
- .map_bus = brcm_pcie_map_conf,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
-};
-
-static struct pci_ops brcm_pcie_ops32 = {
- .map_bus = brcm_pcie_map_conf32,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
-};
-
static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
@@ -926,17 +865,13 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u64 rc_bar2_offset, rc_bar2_size;
void __iomem *base = pcie->base;
- struct device *dev = pcie->dev;
+ struct pci_host_bridge *bridge;
struct resource_entry *entry;
- bool ssc_good = false;
- struct resource *res;
- int num_out_wins = 0;
- u16 nlw, cls, lnksta;
- int i, ret, memc;
u32 tmp, burst, aspm_support;
+ int num_out_wins = 0;
+ int ret, memc;
/* Reset the bridge */
pcie->bridge_sw_init_set(pcie, 1);
@@ -1012,6 +947,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
+
/* disable the PCIe->GISB memory window (RC_BAR1) */
tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
@@ -1022,31 +962,27 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- if (pcie->gen)
- brcm_pcie_set_gen(pcie, pcie->gen);
-
- /* Unassert the fundamental reset */
- pcie->perst_set(pcie, 0);
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
/*
- * Give the RC/EP time to wake up, before trying to configure RC.
- * Intermittently check status for link-up, up to a total of 100ms.
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
*/
- for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
- msleep(5);
-
- if (!brcm_pcie_link_up(pcie)) {
- dev_err(dev, "link down\n");
- return -ENODEV;
- }
-
- if (!brcm_pcie_rc_mode(pcie)) {
- dev_err(dev, "PCIe misconfigured; is in EP mode\n");
- return -EINVAL;
- }
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ u32p_replace_bits(&tmp, 0x060400,
+ PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ bridge = pci_host_bridge_from_priv(pcie);
resource_list_for_each_entry(entry, &bridge->windows) {
- res = entry->res;
+ struct resource *res = entry->res;
if (resource_type(res) != IORESOURCE_MEM)
continue;
@@ -1075,23 +1011,41 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
- /* Don't advertise L0s capability if 'aspm-no-l0s' */
- aspm_support = PCIE_LINK_STATE_L1;
- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
- aspm_support |= PCIE_LINK_STATE_L0S;
- tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- u32p_replace_bits(&tmp, aspm_support,
- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ /* PCIe->SCB endian mode for BAR */
+ tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
+ writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+
+ return 0;
+}
+
+static int brcm_pcie_start_link(struct brcm_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ void __iomem *base = pcie->base;
+ u16 nlw, cls, lnksta;
+ bool ssc_good = false;
+ u32 tmp;
+ int ret, i;
+
+ /* Unassert the fundamental reset */
+ pcie->perst_set(pcie, 0);
/*
- * For config space accesses on the RC, show the right class for
- * a PCIe-PCIe bridge (the default setting is to be EP mode).
+ * Give the RC/EP time to wake up, before trying to configure RC.
+ * Intermittently check status for link-up, up to a total of 100ms.
*/
- tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
- u32p_replace_bits(&tmp, 0x060400,
- PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
+ msleep(5);
+
+ if (!brcm_pcie_link_up(pcie)) {
+ dev_err(dev, "link down\n");
+ return -ENODEV;
+ }
+
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
@@ -1108,12 +1062,6 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
pci_speed_string(pcie_link_speed[cls]), nlw,
ssc_good ? "(SSC)" : "(!SSC)");
- /* PCIe->SCB endian mode for BAR */
- tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
- u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
- PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
- writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
-
/*
* Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
* is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
@@ -1125,6 +1073,82 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
return 0;
}
+static const char * const supplies[] = {
+ "vpcie3v3",
+ "vpcie3v3aux",
+ "vpcie12v",
+};
+
+static void *alloc_subdev_regulators(struct device *dev)
+{
+ const size_t size = sizeof(struct subdev_regulators) +
+ sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
+ struct subdev_regulators *sr;
+ int i;
+
+ sr = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (sr) {
+ sr->num_supplies = ARRAY_SIZE(supplies);
+ for (i = 0; i < ARRAY_SIZE(supplies); i++)
+ sr->supplies[i].supply = supplies[i];
+ }
+
+ return sr;
+}
+
+static int brcm_pcie_add_bus(struct pci_bus *bus)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ struct device *dev = &bus->dev;
+ struct subdev_regulators *sr;
+ int ret;
+
+ if (!bus->parent || !pci_is_root_bus(bus->parent))
+ return 0;
+
+ if (dev->of_node) {
+ sr = alloc_subdev_regulators(dev);
+ if (!sr) {
+ dev_info(dev, "Can't allocate regulators for downstream device\n");
+ goto no_regulators;
+ }
+
+ pcie->sr = sr;
+
+ ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_info(dev, "No regulators for downstream device\n");
+ goto no_regulators;
+ }
+
+ ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_err(dev, "Can't enable regulators for downstream device\n");
+ regulator_bulk_free(sr->num_supplies, sr->supplies);
+ pcie->sr = NULL;
+ }
+ }
+
+no_regulators:
+ brcm_pcie_start_link(pcie);
+ return 0;
+}
+
+static void brcm_pcie_remove_bus(struct pci_bus *bus)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ struct subdev_regulators *sr = pcie->sr;
+ struct device *dev = &bus->dev;
+
+ if (!sr)
+ return;
+
+ if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
+ dev_err(dev, "Failed to disable regulators for downstream device\n");
+ regulator_bulk_free(sr->num_supplies, sr->supplies);
+ pcie->sr = NULL;
+}
+
/* L23 is a low-power PCIe link state */
static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
{
@@ -1221,9 +1245,21 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
pcie->bridge_sw_init_set(pcie, 1);
}
-static int brcm_pcie_suspend(struct device *dev)
+static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
+{
+ bool *ret = data;
+
+ if (device_may_wakeup(&dev->dev)) {
+ *ret = true;
+ dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n");
+ }
+ return (int) *ret;
+}
+
+static int brcm_pcie_suspend_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
int ret;
brcm_pcie_turn_off(pcie);
@@ -1241,12 +1277,31 @@ static int brcm_pcie_suspend(struct device *dev)
return ret;
}
+ if (pcie->sr) {
+ /*
+ * Now turn off the regulators, but if at least one
+ * downstream device is enabled as a wake-up source, do not
+ * turn off regulators.
+ */
+ pcie->ep_wakeup_capable = false;
+ pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
+ &pcie->ep_wakeup_capable);
+ if (!pcie->ep_wakeup_capable) {
+ ret = regulator_bulk_disable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn off regulators\n");
+ reset_control_reset(pcie->rescal);
+ return ret;
+ }
+ }
+ }
clk_disable_unprepare(pcie->clk);
return 0;
}
-static int brcm_pcie_resume(struct device *dev)
+static int brcm_pcie_resume_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
void __iomem *base;
@@ -1281,11 +1336,37 @@ static int brcm_pcie_resume(struct device *dev)
if (ret)
goto err_reset;
+ if (pcie->sr) {
+ if (pcie->ep_wakeup_capable) {
+ /*
+ * We are resuming from a suspend. In the suspend we
+ * did not disable the power supplies, so there is
+ * no need to enable them (and falsely increase their
+ * usage count).
+ */
+ pcie->ep_wakeup_capable = false;
+ } else {
+ ret = regulator_bulk_enable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn on regulators\n");
+ goto err_reset;
+ }
+ }
+ }
+
+ ret = brcm_pcie_start_link(pcie);
+ if (ret)
+ goto err_regulator;
+
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
return 0;
+err_regulator:
+ if (pcie->sr)
+ regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
reset_control_rearm(pcie->rescal);
err_disable_clk:
@@ -1316,6 +1397,66 @@ static int brcm_pcie_remove(struct platform_device *pdev)
return 0;
}
+static const int pcie_offsets[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const int pcie_offsets_bmips_7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+};
+
+static const struct pcie_cfg_data generic_cfg = {
+ .offsets = pcie_offsets,
+ .type = GENERIC,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bmips_7425,
+ .type = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM7435,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm4908_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM4908,
+ .perst_set = brcm_pcie_perst_set_4908,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const int pcie_offset_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const struct pcie_cfg_data bcm7278_cfg = {
+ .offsets = pcie_offset_bcm7278,
+ .type = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+};
+
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM2711,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
@@ -1328,6 +1469,22 @@ static const struct of_device_id brcm_pcie_match[] = {
{},
};
+static struct pci_ops brcm_pcie_ops = {
+ .map_bus = brcm_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = brcm_pcie_remove_bus,
+};
+
+static struct pci_ops brcm7425_pcie_ops = {
+ .map_bus = brcm7425_pcie_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = brcm_pcie_remove_bus,
+};
+
static int brcm_pcie_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *msi_np;
@@ -1414,12 +1571,22 @@ static int brcm_pcie_probe(struct platform_device *pdev)
}
}
- bridge->ops = pcie->type == BCM7425 ? &brcm_pcie_ops32 : &brcm_pcie_ops;
+ bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
- return pci_host_probe(bridge);
+ ret = pci_host_probe(bridge);
+ if (!ret && !brcm_pcie_link_up(pcie))
+ ret = -ENODEV;
+
+ if (ret) {
+ brcm_pcie_remove(pdev);
+ return ret;
+ }
+
+ return 0;
+
fail:
__brcm_pcie_remove(pcie);
return ret;
@@ -1428,8 +1595,8 @@ fail:
MODULE_DEVICE_TABLE(of, brcm_pcie_match);
static const struct dev_pm_ops brcm_pcie_pm_ops = {
- .suspend = brcm_pcie_suspend,
- .resume = brcm_pcie_resume,
+ .suspend_noirq = brcm_pcie_suspend_noirq,
+ .resume_noirq = brcm_pcie_resume_noirq,
};
static struct platform_driver brcm_pcie_driver = {
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 757b7fbcdc59..fee036b07cd4 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -589,8 +589,8 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
msi->has_inten_reg = true;
msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
- msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
- sizeof(*msi->bitmap), GFP_KERNEL);
+ msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs,
+ GFP_KERNEL);
if (!msi->bitmap)
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 5d9fd36b02d1..11cdb9b6f109 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -153,6 +153,37 @@ struct mtk_gen3_pcie {
DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
};
+/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
+static const char *const ltssm_str[] = {
+ "detect.quiet", /* 0x00 */
+ "detect.active", /* 0x01 */
+ "polling.active", /* 0x02 */
+ "polling.compliance", /* 0x03 */
+ "polling.configuration", /* 0x04 */
+ "config.linkwidthstart", /* 0x05 */
+ "config.linkwidthaccept", /* 0x06 */
+ "config.lanenumwait", /* 0x07 */
+ "config.lanenumaccept", /* 0x08 */
+ "config.complete", /* 0x09 */
+ "config.idle", /* 0x0A */
+ "recovery.receiverlock", /* 0x0B */
+ "recovery.equalization", /* 0x0C */
+ "recovery.speed", /* 0x0D */
+ "recovery.receiverconfig", /* 0x0E */
+ "recovery.idle", /* 0x0F */
+ "L0", /* 0x10 */
+ "L0s", /* 0x11 */
+ "L1.entry", /* 0x12 */
+ "L1.idle", /* 0x13 */
+ "L2.idle", /* 0x14 */
+ "L2.transmitwake", /* 0x15 */
+ "disable", /* 0x16 */
+ "loopback.entry", /* 0x17 */
+ "loopback.active", /* 0x18 */
+ "loopback.exit", /* 0x19 */
+ "hotreset", /* 0x1A */
+};
+
/**
* mtk_pcie_config_tlp_header() - Configure a configuration TLP header
* @bus: PCI bus to query
@@ -327,8 +358,16 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
!!(val & PCIE_PORT_LINKUP), 20,
PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
if (err) {
+ const char *ltssm_state;
+ int ltssm_index;
+
val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
- dev_err(pcie->dev, "PCIe link down, ltssm reg val: %#x\n", val);
+ ltssm_index = PCIE_LTSSM_STATE(val);
+ ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
+ "Unknown state" : ltssm_str[ltssm_index];
+ dev_err(pcie->dev,
+ "PCIe link down, current LTSSM state: %s (%#x)\n",
+ ltssm_state, val);
return err;
}
@@ -600,7 +639,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
&intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put_node;
}
/* Setup MSI */
@@ -623,13 +663,15 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
goto err_msi_domain;
}
+ of_node_put(intc_node);
return 0;
err_msi_domain:
irq_domain_remove(pcie->msi_bottom_domain);
err_msi_bottom_domain:
irq_domain_remove(pcie->intx_domain);
-
+out_put_node:
+ of_node_put(intc_node);
return ret;
}
@@ -917,7 +959,7 @@ static int mtk_pcie_remove(struct platform_device *pdev)
return 0;
}
-static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
+static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
{
int i;
@@ -935,7 +977,7 @@ static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
raw_spin_unlock(&pcie->irq_lock);
}
-static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
+static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
{
int i;
@@ -953,7 +995,7 @@ static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
raw_spin_unlock(&pcie->irq_lock);
}
-static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
+static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
{
u32 val;
@@ -968,7 +1010,7 @@ static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
50 * USEC_PER_MSEC);
}
-static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+static int mtk_pcie_suspend_noirq(struct device *dev)
{
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
@@ -994,7 +1036,7 @@ static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
return 0;
}
-static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+static int mtk_pcie_resume_noirq(struct device *dev)
{
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
@@ -1015,8 +1057,8 @@ static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops mtk_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
- mtk_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
};
static const struct of_device_id mtk_pcie_of_match[] = {
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index be8bd919cb88..ae5ad05ddc1d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1150,7 +1150,7 @@ static int mtk_pcie_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+static int mtk_pcie_suspend_noirq(struct device *dev)
{
struct mtk_pcie *pcie = dev_get_drvdata(dev);
struct mtk_pcie_port *port;
@@ -1174,7 +1174,7 @@ static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
return 0;
}
-static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+static int mtk_pcie_resume_noirq(struct device *dev)
{
struct mtk_pcie *pcie = dev_get_drvdata(dev);
struct mtk_pcie_port *port, *tmp;
@@ -1195,8 +1195,8 @@ static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops mtk_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
- mtk_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
};
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index dd5dba419047..7263d175b5ad 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -904,6 +904,7 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port)
&event_domain_ops, port);
if (!port->event_domain) {
dev_err(dev, "failed to get event domain\n");
+ of_node_put(pcie_intc_node);
return -ENOMEM;
}
@@ -913,6 +914,7 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port)
&intx_domain_ops, port);
if (!port->intx_domain) {
dev_err(dev, "failed to get an INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
return -ENOMEM;
}
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 997c4df6a1e7..e4faf90feaf5 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -1072,7 +1072,7 @@ err_pm_put:
return err;
}
-static int __maybe_unused rcar_pcie_resume(struct device *dev)
+static int rcar_pcie_resume(struct device *dev)
{
struct rcar_pcie_host *host = dev_get_drvdata(dev);
struct rcar_pcie *pcie = &host->pcie;
@@ -1127,7 +1127,7 @@ static int rcar_pcie_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops rcar_pcie_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
+ SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
.resume_noirq = rcar_pcie_resume_noirq,
};
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 7f56f99b4116..7352b5ff8d35 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -864,7 +864,7 @@ static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
return 0;
}
-static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
+static int rockchip_pcie_suspend_noirq(struct device *dev)
{
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
int ret;
@@ -889,7 +889,7 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
return ret;
}
-static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
+static int rockchip_pcie_resume_noirq(struct device *dev)
{
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
int err;
@@ -1035,8 +1035,8 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops rockchip_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
- rockchip_pcie_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
+ rockchip_pcie_resume_noirq)
};
static const struct of_device_id rockchip_pcie_of_match[] = {
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index c7cd44ed4dfc..e4ab48041eb6 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -35,6 +35,10 @@
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+
/* Interrupt registers definitions */
#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
#define XILINX_CPM_PCIE_INTR_HOT_RESET 3
@@ -98,6 +102,19 @@
/* Phy Status/Control Register definitions */
#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
+enum xilinx_cpm_version {
+ CPM,
+ CPM5,
+};
+
+/**
+ * struct xilinx_cpm_variant - CPM variant information
+ * @version: CPM version
+ */
+struct xilinx_cpm_variant {
+ enum xilinx_cpm_version version;
+};
+
/**
* struct xilinx_cpm_pcie - PCIe port information
* @dev: Device pointer
@@ -109,6 +126,7 @@
* @intx_irq: legacy interrupt number
* @irq: Error interrupt number
* @lock: lock protecting shared register access
+ * @variant: CPM version check pointer
*/
struct xilinx_cpm_pcie {
struct device *dev;
@@ -120,6 +138,7 @@ struct xilinx_cpm_pcie {
int intx_irq;
int irq;
raw_spinlock_t lock;
+ const struct xilinx_cpm_variant *variant;
};
static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg)
@@ -285,6 +304,13 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
+ if (port->variant->version == CPM5) {
+ val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (val)
+ writel_relaxed(val, port->cpm_base +
+ XILINX_CPM_PCIE_IR_STATUS);
+ }
+
/*
* XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
* CPM SLCR block.
@@ -484,6 +510,12 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
*/
writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
+
+ if (port->variant->version == CPM5) {
+ writel(XILINX_CPM_PCIE_IR_LOCAL,
+ port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ }
+
/* Enable the Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
XILINX_CPM_PCIE_REG_RPSC_BEN,
@@ -518,7 +550,14 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
if (IS_ERR(port->cfg))
return PTR_ERR(port->cfg);
- port->reg_base = port->cfg->win;
+ if (port->variant->version == CPM5) {
+ port->reg_base = devm_platform_ioremap_resource_byname(pdev,
+ "cpm_csr");
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+ } else {
+ port->reg_base = port->cfg->win;
+ }
return 0;
}
@@ -559,6 +598,8 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
if (!bus)
return -ENODEV;
+ port->variant = of_device_get_match_data(dev);
+
err = xilinx_cpm_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
@@ -591,8 +632,23 @@ err_parse_dt:
return err;
}
+static const struct xilinx_cpm_variant cpm_host = {
+ .version = CPM,
+};
+
+static const struct xilinx_cpm_variant cpm5_host = {
+ .version = CPM5,
+};
+
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
- { .compatible = "xlnx,versal-cpm-host-1.00", },
+ {
+ .compatible = "xlnx,versal-cpm-host-1.00",
+ .data = &cpm_host,
+ },
+ {
+ .compatible = "xlnx,versal-cpm5-host",
+ .data = &cpm5_host,
+ },
{}
};
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 94a14a3d7e55..e06e9f4fc50f 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -898,7 +898,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (vmd->instance < 0)
return vmd->instance;
- vmd->name = kasprintf(GFP_KERNEL, "vmd%d", vmd->instance);
+ vmd->name = devm_kasprintf(&dev->dev, GFP_KERNEL, "vmd%d",
+ vmd->instance);
if (!vmd->name) {
err = -ENOMEM;
goto out_release_instance;
@@ -936,7 +937,6 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_release_instance:
ida_simple_remove(&vmd_instance_ida, vmd->instance);
- kfree(vmd->name);
return err;
}
@@ -959,7 +959,6 @@ static void vmd_remove(struct pci_dev *dev)
vmd_detach_resources(vmd);
vmd_remove_irq_domain(vmd);
ida_simple_remove(&vmd_instance_ida, vmd->instance);
- kfree(vmd->name);
}
#ifdef CONFIG_PM_SLEEP
@@ -1013,6 +1012,14 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7d0b),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xad0b),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
new file mode 100644
index 000000000000..e402f05068a5
--- /dev/null
+++ b/drivers/pci/doe.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Data Object Exchange
+ * PCIe r6.0, sec 6.30 DOE
+ *
+ * Copyright (C) 2021 Huawei
+ * Jonathan Cameron <Jonathan.Cameron@huawei.com>
+ *
+ * Copyright (C) 2022 Intel Corporation
+ * Ira Weiny <ira.weiny@intel.com>
+ */
+
+#define dev_fmt(fmt) "DOE: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci-doe.h>
+#include <linux/workqueue.h>
+
+#define PCI_DOE_PROTOCOL_DISCOVERY 0
+
+/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
+#define PCI_DOE_TIMEOUT HZ
+#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
+
+#define PCI_DOE_FLAG_CANCEL 0
+#define PCI_DOE_FLAG_DEAD 1
+
+/**
+ * struct pci_doe_mb - State for a single DOE mailbox
+ *
+ * This state is used to manage a single DOE mailbox capability. All fields
+ * should be considered opaque to the consumers and the structure passed into
+ * the helpers below after being created by devm_pci_doe_create()
+ *
+ * @pdev: PCI device this mailbox belongs to
+ * @cap_offset: Capability offset
+ * @prots: Array of protocols supported (encoded as long values)
+ * @wq: Wait queue for work item
+ * @work_queue: Queue of pci_doe_work items
+ * @flags: Bit array of PCI_DOE_FLAG_* flags
+ */
+struct pci_doe_mb {
+ struct pci_dev *pdev;
+ u16 cap_offset;
+ struct xarray prots;
+
+ wait_queue_head_t wq;
+ struct workqueue_struct *work_queue;
+ unsigned long flags;
+};
+
+static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
+{
+ if (wait_event_timeout(doe_mb->wq,
+ test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags),
+ timeout))
+ return -EIO;
+ return 0;
+}
+
+static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+
+ pci_write_config_dword(pdev, offset + PCI_DOE_CTRL, val);
+}
+
+static int pci_doe_abort(struct pci_doe_mb *doe_mb)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ unsigned long timeout_jiffies;
+
+ pci_dbg(pdev, "[%x] Issuing Abort\n", offset);
+
+ timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
+ pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT);
+
+ do {
+ int rc;
+ u32 val;
+
+ rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
+ if (rc)
+ return rc;
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+
+ /* Abort success! */
+ if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) &&
+ !FIELD_GET(PCI_DOE_STATUS_BUSY, val))
+ return 0;
+
+ } while (!time_after(jiffies, timeout_jiffies));
+
+ /* Abort has timed out and the MB is dead */
+ pci_err(pdev, "[%x] ABORT timed out\n", offset);
+ return -EIO;
+}
+
+static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
+ struct pci_doe_task *task)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ u32 val;
+ int i;
+
+ /*
+ * Check the DOE busy bit is not set. If it is set, this could indicate
+ * someone other than Linux (e.g. firmware) is using the mailbox. Note
+ * it is expected that firmware and OS will negotiate access rights via
+ * an, as yet to be defined, method.
+ */
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_BUSY, val))
+ return -EBUSY;
+
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ return -EIO;
+
+ /* Write DOE Header */
+ val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
+ /* Length is 2 DW of header + length of payload in DW */
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
+ 2 + task->request_pl_sz /
+ sizeof(u32)));
+ for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
+ pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ task->request_pl[i]);
+
+ pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
+
+ return 0;
+}
+
+static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ u32 val;
+
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val))
+ return true;
+ return false;
+}
+
+static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
+{
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ size_t length, payload_length;
+ u32 val;
+ int i;
+
+ /* Read the first dword to get the protocol */
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
+ (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
+ dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
+ doe_mb->cap_offset, task->prot.vid, task->prot.type,
+ FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
+ FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
+ return -EIO;
+ }
+
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ /* Read the second dword to get the length */
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+
+ length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val);
+ if (length > SZ_1M || length < 2)
+ return -EIO;
+
+ /* First 2 dwords have already been read */
+ length -= 2;
+ payload_length = min(length, task->response_pl_sz / sizeof(u32));
+ /* Read the rest of the response payload */
+ for (i = 0; i < payload_length; i++) {
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ,
+ &task->response_pl[i]);
+ /* Prior to the last ack, ensure Data Object Ready */
+ if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
+ return -EIO;
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ }
+
+ /* Flush excess length */
+ for (; i < length; i++) {
+ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+ pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
+ }
+
+ /* Final error check to pick up on any since Data Object Ready */
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ return -EIO;
+
+ return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
+}
+
+static void signal_task_complete(struct pci_doe_task *task, int rv)
+{
+ task->rv = rv;
+ task->complete(task);
+}
+
+static void signal_task_abort(struct pci_doe_task *task, int rv)
+{
+ struct pci_doe_mb *doe_mb = task->doe_mb;
+ struct pci_dev *pdev = doe_mb->pdev;
+
+ if (pci_doe_abort(doe_mb)) {
+ /*
+ * If the device can't process an abort; set the mailbox dead
+ * - no more submissions
+ */
+ pci_err(pdev, "[%x] Abort failed marking mailbox dead\n",
+ doe_mb->cap_offset);
+ set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
+ }
+ signal_task_complete(task, rv);
+}
+
+static void doe_statemachine_work(struct work_struct *work)
+{
+ struct pci_doe_task *task = container_of(work, struct pci_doe_task,
+ work);
+ struct pci_doe_mb *doe_mb = task->doe_mb;
+ struct pci_dev *pdev = doe_mb->pdev;
+ int offset = doe_mb->cap_offset;
+ unsigned long timeout_jiffies;
+ u32 val;
+ int rc;
+
+ if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) {
+ signal_task_complete(task, -EIO);
+ return;
+ }
+
+ /* Send request */
+ rc = pci_doe_send_req(doe_mb, task);
+ if (rc) {
+ /*
+ * The specification does not provide any guidance on how to
+ * resolve conflicting requests from other entities.
+ * Furthermore, it is likely that busy will not be detected
+ * most of the time. Flag any detection of status busy with an
+ * error.
+ */
+ if (rc == -EBUSY)
+ dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n",
+ offset);
+ signal_task_abort(task, rc);
+ return;
+ }
+
+ timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
+ /* Poll for response */
+retry_resp:
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) {
+ signal_task_abort(task, -EIO);
+ return;
+ }
+
+ if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) {
+ if (time_after(jiffies, timeout_jiffies)) {
+ signal_task_abort(task, -EIO);
+ return;
+ }
+ rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
+ if (rc) {
+ signal_task_abort(task, rc);
+ return;
+ }
+ goto retry_resp;
+ }
+
+ rc = pci_doe_recv_resp(doe_mb, task);
+ if (rc < 0) {
+ signal_task_abort(task, rc);
+ return;
+ }
+
+ signal_task_complete(task, rc);
+}
+
+static void pci_doe_task_complete(struct pci_doe_task *task)
+{
+ complete(task->private);
+}
+
+static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
+ u8 *protocol)
+{
+ u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
+ *index);
+ u32 response_pl;
+ DECLARE_COMPLETION_ONSTACK(c);
+ struct pci_doe_task task = {
+ .prot.vid = PCI_VENDOR_ID_PCI_SIG,
+ .prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
+ .request_pl = &request_pl,
+ .request_pl_sz = sizeof(request_pl),
+ .response_pl = &response_pl,
+ .response_pl_sz = sizeof(response_pl),
+ .complete = pci_doe_task_complete,
+ .private = &c,
+ };
+ int rc;
+
+ rc = pci_doe_submit_task(doe_mb, &task);
+ if (rc < 0)
+ return rc;
+
+ wait_for_completion(&c);
+
+ if (task.rv != sizeof(response_pl))
+ return -EIO;
+
+ *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
+ *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
+ response_pl);
+ *index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
+ response_pl);
+
+ return 0;
+}
+
+static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
+{
+ return xa_mk_value((vid << 8) | prot);
+}
+
+static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
+{
+ u8 index = 0;
+ u8 xa_idx = 0;
+
+ do {
+ int rc;
+ u16 vid;
+ u8 prot;
+
+ rc = pci_doe_discovery(doe_mb, &index, &vid, &prot);
+ if (rc)
+ return rc;
+
+ pci_dbg(doe_mb->pdev,
+ "[%x] Found protocol %d vid: %x prot: %x\n",
+ doe_mb->cap_offset, xa_idx, vid, prot);
+
+ rc = xa_insert(&doe_mb->prots, xa_idx++,
+ pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
+ if (rc)
+ return rc;
+ } while (index);
+
+ return 0;
+}
+
+static void pci_doe_xa_destroy(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ xa_destroy(&doe_mb->prots);
+}
+
+static void pci_doe_destroy_workqueue(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ destroy_workqueue(doe_mb->work_queue);
+}
+
+static void pci_doe_flush_mb(void *mb)
+{
+ struct pci_doe_mb *doe_mb = mb;
+
+ /* Stop all pending work items from starting */
+ set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
+
+ /* Cancel an in progress work item, if necessary */
+ set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags);
+ wake_up(&doe_mb->wq);
+
+ /* Flush all work items */
+ flush_workqueue(doe_mb->work_queue);
+}
+
+/**
+ * pcim_doe_create_mb() - Create a DOE mailbox object
+ *
+ * @pdev: PCI device to create the DOE mailbox for
+ * @cap_offset: Offset of the DOE mailbox
+ *
+ * Create a single mailbox object to manage the mailbox protocol at the
+ * cap_offset specified.
+ *
+ * RETURNS: created mailbox object on success
+ * ERR_PTR(-errno) on failure
+ */
+struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset)
+{
+ struct pci_doe_mb *doe_mb;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ doe_mb = devm_kzalloc(dev, sizeof(*doe_mb), GFP_KERNEL);
+ if (!doe_mb)
+ return ERR_PTR(-ENOMEM);
+
+ doe_mb->pdev = pdev;
+ doe_mb->cap_offset = cap_offset;
+ init_waitqueue_head(&doe_mb->wq);
+
+ xa_init(&doe_mb->prots);
+ rc = devm_add_action(dev, pci_doe_xa_destroy, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
+ dev_driver_string(&pdev->dev),
+ pci_name(pdev),
+ doe_mb->cap_offset);
+ if (!doe_mb->work_queue) {
+ pci_err(pdev, "[%x] failed to allocate work queue\n",
+ doe_mb->cap_offset);
+ return ERR_PTR(-ENOMEM);
+ }
+ rc = devm_add_action_or_reset(dev, pci_doe_destroy_workqueue, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* Reset the mailbox by issuing an abort */
+ rc = pci_doe_abort(doe_mb);
+ if (rc) {
+ pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n",
+ doe_mb->cap_offset, rc);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * The state machine and the mailbox should be in sync now;
+ * Set up mailbox flush prior to using the mailbox to query protocols.
+ */
+ rc = devm_add_action_or_reset(dev, pci_doe_flush_mb, doe_mb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = pci_doe_cache_protocols(doe_mb);
+ if (rc) {
+ pci_err(pdev, "[%x] failed to cache protocols : %d\n",
+ doe_mb->cap_offset, rc);
+ return ERR_PTR(rc);
+ }
+
+ return doe_mb;
+}
+EXPORT_SYMBOL_GPL(pcim_doe_create_mb);
+
+/**
+ * pci_doe_supports_prot() - Return if the DOE instance supports the given
+ * protocol
+ * @doe_mb: DOE mailbox capability to query
+ * @vid: Protocol Vendor ID
+ * @type: Protocol type
+ *
+ * RETURNS: True if the DOE mailbox supports the protocol specified
+ */
+bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
+{
+ unsigned long index;
+ void *entry;
+
+ /* The discovery protocol must always be supported */
+ if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
+ return true;
+
+ xa_for_each(&doe_mb->prots, index, entry)
+ if (entry == pci_doe_xa_prot_entry(vid, type))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
+
+/**
+ * pci_doe_submit_task() - Submit a task to be processed by the state machine
+ *
+ * @doe_mb: DOE mailbox capability to submit to
+ * @task: task to be queued
+ *
+ * Submit a DOE task (request/response) to the DOE mailbox to be processed.
+ * Returns upon queueing the task object. If the queue is full this function
+ * will sleep until there is room in the queue.
+ *
+ * task->complete will be called when the state machine is done processing this
+ * task.
+ *
+ * Excess data will be discarded.
+ *
+ * RETURNS: 0 when task has been successfully queued, -ERRNO on error
+ */
+int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
+{
+ if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
+ return -EINVAL;
+
+ /*
+ * DOE requests must be a whole number of DW and the response needs to
+ * be big enough for at least 1 DW
+ */
+ if (task->request_pl_sz % sizeof(u32) ||
+ task->response_pl_sz < sizeof(u32))
+ return -EINVAL;
+
+ if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
+ return -EIO;
+
+ task->doe_mb = doe_mb;
+ INIT_WORK(&task->work, doe_statemachine_work);
+ queue_work(doe_mb->work_queue, &task->work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_doe_submit_task);
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 5f1242ca2f4e..295a033ee9a2 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -25,3 +25,15 @@ config PCI_EPF_NTB
device tree.
If in doubt, say "N" to disable Endpoint NTB driver.
+
+config PCI_EPF_VNTB
+ tristate "PCI Endpoint NTB driver"
+ depends on PCI_ENDPOINT
+ depends on NTB
+ select CONFIGFS_FS
+ help
+ Select this configuration option to enable the Non-Transparent
+ Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB
+ between PCI Root Port and PCIe Endpoint.
+
+ If in doubt, say "N" to disable Endpoint NTB driver.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index 96ab932a537a..5c13001deaba 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
+obj-$(CONFIG_PCI_EPF_VNTB) += pci-epf-vntb.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 5b833f00e980..36b1801a061b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -52,9 +52,11 @@ struct pci_epf_test {
enum pci_barno test_reg_bar;
size_t msix_table_offset;
struct delayed_work cmd_handler;
- struct dma_chan *dma_chan;
+ struct dma_chan *dma_chan_tx;
+ struct dma_chan *dma_chan_rx;
struct completion transfer_complete;
bool dma_supported;
+ bool dma_private;
const struct pci_epc_features *epc_features;
};
@@ -96,6 +98,8 @@ static void pci_epf_test_dma_callback(void *param)
* @dma_src: The source address of the data transfer. It can be a physical
* address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
* @len: The size of the data transfer
+ * @dma_remote: remote RC physical address
+ * @dir: DMA transfer direction
*
* Function that uses dmaengine API to transfer data between PCIe EP and remote
* PCIe RC. The source and destination address can be a physical address given
@@ -105,12 +109,16 @@ static void pci_epf_test_dma_callback(void *param)
*/
static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
dma_addr_t dma_dst, dma_addr_t dma_src,
- size_t len)
+ size_t len, dma_addr_t dma_remote,
+ enum dma_transfer_direction dir)
{
+ struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ?
+ epf_test->dma_chan_tx : epf_test->dma_chan_rx;
+ dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
- struct dma_chan *chan = epf_test->dma_chan;
struct pci_epf *epf = epf_test->epf;
struct dma_async_tx_descriptor *tx;
+ struct dma_slave_config sconf = {};
struct device *dev = &epf->dev;
dma_cookie_t cookie;
int ret;
@@ -120,7 +128,24 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
return -EINVAL;
}
- tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
+ if (epf_test->dma_private) {
+ sconf.direction = dir;
+ if (dir == DMA_MEM_TO_DEV)
+ sconf.dst_addr = dma_remote;
+ else
+ sconf.src_addr = dma_remote;
+
+ if (dmaengine_slave_config(chan, &sconf)) {
+ dev_err(dev, "DMA slave config fail\n");
+ return -EIO;
+ }
+ tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
+ flags);
+ } else {
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
+ flags);
+ }
+
if (!tx) {
dev_err(dev, "Failed to prepare DMA memcpy\n");
return -EIO;
@@ -148,6 +173,23 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
return 0;
}
+struct epf_dma_filter {
+ struct device *dev;
+ u32 dma_mask;
+};
+
+static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
+{
+ struct epf_dma_filter *filter = node;
+ struct dma_slave_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+ dma_get_slave_caps(chan, &caps);
+
+ return chan->device->dev == filter->dev
+ && (filter->dma_mask & caps.directions);
+}
+
/**
* pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
* @epf_test: the EPF test device that performs data transfer operation
@@ -158,10 +200,44 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
{
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
+ struct epf_dma_filter filter;
struct dma_chan *dma_chan;
dma_cap_mask_t mask;
int ret;
+ filter.dev = epf->epc->dev.parent;
+ filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
+ if (!dma_chan) {
+ dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
+ goto fail_back_tx;
+ }
+
+ epf_test->dma_chan_rx = dma_chan;
+
+ filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+ dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
+
+ if (!dma_chan) {
+ dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
+ goto fail_back_rx;
+ }
+
+ epf_test->dma_chan_tx = dma_chan;
+ epf_test->dma_private = true;
+
+ init_completion(&epf_test->transfer_complete);
+
+ return 0;
+
+fail_back_rx:
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_tx = NULL;
+
+fail_back_tx:
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
@@ -174,7 +250,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
}
init_completion(&epf_test->transfer_complete);
- epf_test->dma_chan = dma_chan;
+ epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
return 0;
}
@@ -190,8 +266,17 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
if (!epf_test->dma_supported)
return;
- dma_release_channel(epf_test->dma_chan);
- epf_test->dma_chan = NULL;
+ dma_release_channel(epf_test->dma_chan_tx);
+ if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
+ return;
+ }
+
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_rx = NULL;
+
+ return;
}
static void pci_epf_test_print_rate(const char *ops, u64 size,
@@ -280,8 +365,15 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_map_addr;
}
+ if (epf_test->dma_private) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_map_addr;
+ }
+
ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- src_phys_addr, reg->size);
+ src_phys_addr, reg->size, 0,
+ DMA_MEM_TO_MEM);
if (ret)
dev_err(dev, "Data transfer failed\n");
} else {
@@ -373,7 +465,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
ktime_get_ts64(&start);
ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- phys_addr, reg->size);
+ phys_addr, reg->size,
+ reg->src_addr, DMA_DEV_TO_MEM);
if (ret)
dev_err(dev, "Data transfer failed\n");
ktime_get_ts64(&end);
@@ -463,8 +556,11 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
}
ktime_get_ts64(&start);
+
ret = pci_epf_test_data_transfer(epf_test, phys_addr,
- src_phys_addr, reg->size);
+ src_phys_addr, reg->size,
+ reg->dst_addr,
+ DMA_MEM_TO_DEV);
if (ret)
dev_err(dev, "Data transfer failed\n");
ktime_get_ts64(&end);
@@ -627,7 +723,6 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
cancel_delayed_work(&epf_test->cmd_handler);
pci_epf_test_clean_dma_chan(epf_test);
- pci_epc_stop(epc);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
new file mode 100644
index 000000000000..0ea85e1d292e
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -0,0 +1,1442 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Endpoint Function Driver to implement Non-Transparent Bridge functionality
+ * Between PCI RC and EP
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Copyright (C) 2022 NXP
+ *
+ * Based on pci-epf-ntb.c
+ * Author: Frank Li <Frank.Li@nxp.com>
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+/**
+ * +------------+ +---------------------------------------+
+ * | | | |
+ * +------------+ | +--------------+
+ * | NTB | | | NTB |
+ * | NetDev | | | NetDev |
+ * +------------+ | +--------------+
+ * | NTB | | | NTB |
+ * | Transfer | | | Transfer |
+ * +------------+ | +--------------+
+ * | | | | |
+ * | PCI NTB | | | |
+ * | EPF | | | |
+ * | Driver | | | PCI Virtual |
+ * | | +---------------+ | NTB Driver |
+ * | | | PCI EP NTB |<------>| |
+ * | | | FN Driver | | |
+ * +------------+ +---------------+ +--------------+
+ * | | | | | |
+ * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI |
+ * | | PCI | | | Bus |
+ * +------------+ +---------------+--------+--------------+
+ * PCIe Root Port PCI EP
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/ntb.h>
+
+static struct workqueue_struct *kpcintb_workqueue;
+
+#define COMMAND_CONFIGURE_DOORBELL 1
+#define COMMAND_TEARDOWN_DOORBELL 2
+#define COMMAND_CONFIGURE_MW 3
+#define COMMAND_TEARDOWN_MW 4
+#define COMMAND_LINK_UP 5
+#define COMMAND_LINK_DOWN 6
+
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define LINK_STATUS_UP BIT(0)
+
+#define SPAD_COUNT 64
+#define DB_COUNT 4
+#define NTB_MW_OFFSET 2
+#define DB_COUNT_MASK GENMASK(15, 0)
+#define MSIX_ENABLE BIT(16)
+#define MAX_DB_COUNT 32
+#define MAX_MW 4
+
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_DB,
+ BAR_MW0,
+ BAR_MW1,
+ BAR_MW2,
+};
+
+/*
+ * +--------------------------------------------------+ Base
+ * | |
+ * | |
+ * | |
+ * | Common Control Register |
+ * | |
+ * | |
+ * | |
+ * +-----------------------+--------------------------+ Base+span_offset
+ * | | |
+ * | Peer Span Space | Span Space |
+ * | | |
+ * | | |
+ * +-----------------------+--------------------------+ Base+span_offset
+ * | | | +span_count * 4
+ * | | |
+ * | Span Space | Peer Span Space |
+ * | | |
+ * +-----------------------+--------------------------+
+ * Virtual PCI PCIe Endpoint
+ * NTB Driver NTB Driver
+ */
+struct epf_ntb_ctrl {
+ u32 command;
+ u32 argument;
+ u16 command_status;
+ u16 link_status;
+ u32 topology;
+ u64 addr;
+ u64 size;
+ u32 num_mws;
+ u32 reserved;
+ u32 spad_offset;
+ u32 spad_count;
+ u32 db_entry_size;
+ u32 db_data[MAX_DB_COUNT];
+ u32 db_offset[MAX_DB_COUNT];
+} __packed;
+
+struct epf_ntb {
+ struct ntb_dev ntb;
+ struct pci_epf *epf;
+ struct config_group group;
+
+ u32 num_mws;
+ u32 db_count;
+ u32 spad_count;
+ u64 mws_size[MAX_MW];
+ u64 db;
+ u32 vbus_number;
+ u16 vntb_pid;
+ u16 vntb_vid;
+
+ bool linkup;
+ u32 spad_size;
+
+ enum pci_barno epf_ntb_bar[6];
+
+ struct epf_ntb_ctrl *reg;
+
+ phys_addr_t epf_db_phy;
+ void __iomem *epf_db;
+
+ phys_addr_t vpci_mw_phy[MAX_MW];
+ void __iomem *vpci_mw_addr[MAX_MW];
+
+ struct delayed_work cmd_handler;
+};
+
+#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
+#define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb)
+
+static struct pci_epf_header epf_ntb_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_BASE_CLASS_MEMORY,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+/**
+ * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @link_up: true or false indicating Link is UP or Down
+ *
+ * Once NTB function in HOST invoke ntb_link_enable(),
+ * this NTB function driver will trigger a link event to vhost.
+ */
+static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+{
+ if (link_up)
+ ntb->reg->link_status |= LINK_STATUS_UP;
+ else
+ ntb->reg->link_status &= ~LINK_STATUS_UP;
+
+ ntb_link_event(&ntb->ntb);
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_mw() - Configure the Outbound Address Space for vhost
+ * to access the memory window of host
+ * @ntb: NTB device that facilitates communication between host and vhost
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * EP Outbound Window
+ * +--------+ +-----------+
+ * | | | |
+ * | | | |
+ * | | | |
+ * | | | |
+ * | | +-----------+
+ * | Virtual| | Memory Win|
+ * | NTB | -----------> | |
+ * | Driver | | |
+ * | | +-----------+
+ * | | | |
+ * | | | |
+ * +--------+ +-----------+
+ * VHost PCI EP
+ */
+static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
+{
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ u64 addr, size;
+ int ret = 0;
+
+ phys_addr = ntb->vpci_mw_phy[mw];
+ addr = ntb->reg->addr;
+ size = ntb->reg->size;
+
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size);
+ if (ret)
+ dev_err(&ntb->epf->epc->dev,
+ "Failed to map memory window %d address\n", mw);
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_mw() - Teardown the configured OB ATU
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
+ * pci_epc_unmap_addr()
+ */
+static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
+{
+ pci_epc_unmap_addr(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ ntb->vpci_mw_phy[mw]);
+}
+
+/**
+ * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host
+ * @work: work_struct for the epf_ntb_epc
+ *
+ * Workqueue function that gets invoked for the two epf_ntb_epc
+ * periodically (once every 5ms) to see if it has received any commands
+ * from NTB host. The host can send commands to configure doorbell or
+ * configure memory window or to update link status.
+ */
+static void epf_ntb_cmd_handler(struct work_struct *work)
+{
+ struct epf_ntb_ctrl *ctrl;
+ u32 command, argument;
+ struct epf_ntb *ntb;
+ struct device *dev;
+ int ret;
+ int i;
+
+ ntb = container_of(work, struct epf_ntb, cmd_handler.work);
+
+ for (i = 1; i < ntb->db_count; i++) {
+ if (readl(ntb->epf_db + i * 4)) {
+ if (readl(ntb->epf_db + i * 4))
+ ntb->db |= 1 << (i - 1);
+
+ ntb_db_event(&ntb->ntb, i);
+ writel(0, ntb->epf_db + i * 4);
+ }
+ }
+
+ ctrl = ntb->reg;
+ command = ctrl->command;
+ if (!command)
+ goto reset_handler;
+ argument = ctrl->argument;
+
+ ctrl->command = 0;
+ ctrl->argument = 0;
+
+ ctrl = ntb->reg;
+ dev = &ntb->epf->dev;
+
+ switch (command) {
+ case COMMAND_CONFIGURE_DOORBELL:
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_DOORBELL:
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_CONFIGURE_MW:
+ ret = epf_ntb_configure_mw(ntb, argument);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_MW:
+ epf_ntb_teardown_mw(ntb, argument);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_UP:
+ ntb->linkup = true;
+ ret = epf_ntb_link_up(ntb, true);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ goto reset_handler;
+ case COMMAND_LINK_DOWN:
+ ntb->linkup = false;
+ ret = epf_ntb_link_up(ntb, false);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ default:
+ dev_err(dev, "UNKNOWN command: %d\n", command);
+ break;
+ }
+
+reset_handler:
+ queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
+ msecs_to_jiffies(5));
+}
+
+/**
+ * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
+ *
+ * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region (removes inbound ATU configuration). While BAR0 is
+ * the default self scratchpad BAR, an NTB could have other BARs for self
+ * scratchpad (because of reserved BARs). This function can get the exact BAR
+ * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb->epf->bar[barno];
+
+ pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+}
+
+/**
+ * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region.
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR.
+ */
+static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb->epf->bar[barno];
+
+ ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
+ * config + scratchpad region
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+{
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ pci_epf_free_space(ntb->epf, ntb->reg, barno, 0);
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
+ * region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Allocate the Local Memory mentioned in the above diagram. The size of
+ * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
+ * is obtained from "spad-count" configfs entry.
+ */
+static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
+{
+ size_t align;
+ enum pci_barno barno;
+ struct epf_ntb_ctrl *ctrl;
+ u32 spad_size, ctrl_size;
+ u64 size;
+ struct pci_epf *epf = ntb->epf;
+ struct device *dev = &epf->dev;
+ u32 spad_count;
+ void *base;
+ int i;
+ const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc,
+ epf->func_no,
+ epf->vfunc_no);
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ size = epc_features->bar_fixed_size[barno];
+ align = epc_features->align;
+
+ if ((!IS_ALIGNED(size, align)))
+ return -EINVAL;
+
+ spad_count = ntb->spad_count;
+
+ ctrl_size = sizeof(struct epf_ntb_ctrl);
+ spad_size = 2 * spad_count * 4;
+
+ if (!align) {
+ ctrl_size = roundup_pow_of_two(ctrl_size);
+ spad_size = roundup_pow_of_two(spad_size);
+ } else {
+ ctrl_size = ALIGN(ctrl_size, align);
+ spad_size = ALIGN(spad_size, align);
+ }
+
+ if (!size)
+ size = ctrl_size + spad_size;
+ else if (size < ctrl_size + spad_size)
+ return -EINVAL;
+
+ base = pci_epf_alloc_space(epf, size, barno, align, 0);
+ if (!base) {
+ dev_err(dev, "Config/Status/SPAD alloc region fail\n");
+ return -ENOMEM;
+ }
+
+ ntb->reg = base;
+
+ ctrl = ntb->reg;
+ ctrl->spad_offset = ctrl_size;
+
+ ctrl->spad_count = spad_count;
+ ctrl->num_mws = ntb->num_mws;
+ ntb->spad_size = spad_size;
+
+ ctrl->db_entry_size = 4;
+
+ for (i = 0; i < ntb->db_count; i++) {
+ ntb->reg->db_data[i] = 1 + i;
+ ntb->reg->db_offset[i] = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Configure MSI/MSI-X capability for each interface with number of
+ * interrupts equal to "db_count" configfs entry.
+ */
+static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ struct device *dev;
+ u32 db_count;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
+
+ if (!(epc_features->msix_capable || epc_features->msi_capable)) {
+ dev_err(dev, "MSI or MSI-X is required for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_count = ntb->db_count;
+ if (db_count > MAX_DB_COUNT) {
+ dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
+ return -EINVAL;
+ }
+
+ ntb->db_count = db_count;
+
+ if (epc_features->msi_capable) {
+ ret = pci_epc_set_msi(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ 16);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_db_bar_init() - Configure Doorbell window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ u32 align;
+ struct device *dev = &ntb->epf->dev;
+ int ret;
+ struct pci_epf_bar *epf_bar;
+ void __iomem *mw_addr;
+ enum pci_barno barno;
+ size_t size = 4 * ntb->db_count;
+
+ epc_features = pci_epc_get_features(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no);
+ align = epc_features->align;
+
+ if (size < 128)
+ size = 128;
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
+
+ barno = ntb->epf_ntb_bar[BAR_DB];
+
+ mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
+ if (!mw_addr) {
+ dev_err(dev, "Failed to allocate OB address\n");
+ return -ENOMEM;
+ }
+
+ ntb->epf_db = mw_addr;
+
+ epf_bar = &ntb->epf->bar[barno];
+
+ ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Doorbell BAR set failed\n");
+ goto err_alloc_peer_mem;
+ }
+ return ret;
+
+err_alloc_peer_mem:
+ pci_epc_mem_free_addr(ntb->epf->epc, epf_bar->phys_addr, mw_addr, epf_bar->size);
+ return -1;
+}
+
+static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
+
+/**
+ * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
+ * allocated in peer's outbound address space
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
+{
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_DB];
+ pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+}
+
+/**
+ * epf_ntb_mw_bar_init() - Configure Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ */
+static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
+{
+ int ret = 0;
+ int i;
+ u64 size;
+ enum pci_barno barno;
+ struct device *dev = &ntb->epf->dev;
+
+ for (i = 0; i < ntb->num_mws; i++) {
+ size = ntb->mws_size[i];
+ barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+
+ ntb->epf->bar[barno].barno = barno;
+ ntb->epf->bar[barno].size = size;
+ ntb->epf->bar[barno].addr = NULL;
+ ntb->epf->bar[barno].phys_addr = 0;
+ ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
+ PCI_BASE_ADDRESS_MEM_TYPE_64 :
+ PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ ret = pci_epc_set_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+ if (ret) {
+ dev_err(dev, "MW set failed\n");
+ goto err_alloc_mem;
+ }
+
+ /* Allocate EPC outbound memory windows to vpci vntb device */
+ ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc,
+ &ntb->vpci_mw_phy[i],
+ size);
+ if (!ntb->vpci_mw_addr[i]) {
+ ret = -ENOMEM;
+ dev_err(dev, "Failed to allocate source address\n");
+ goto err_set_bar;
+ }
+ }
+
+ return ret;
+
+err_set_bar:
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+err_alloc_mem:
+ epf_ntb_mw_bar_clear(ntb, i);
+ return ret;
+}
+
+/**
+ * epf_ntb_mw_bar_clear() - Clear Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
+{
+ enum pci_barno barno;
+ int i;
+
+ for (i = 0; i < num_mws; i++) {
+ barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+
+ pci_epc_mem_free_addr(ntb->epf->epc,
+ ntb->vpci_mw_phy[i],
+ ntb->vpci_mw_addr[i],
+ ntb->mws_size[i]);
+ }
+}
+
+/**
+ * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ *
+ * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
+ */
+static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+{
+ pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
+ pci_epc_put(ntb->epf->epc);
+}
+
+/**
+ * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
+ * constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST and vHOST
+ */
+static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ enum pci_barno barno;
+ enum epf_ntb_bar bar;
+ struct device *dev;
+ u32 num_mws;
+ int i;
+
+ barno = BAR_0;
+ num_mws = ntb->num_mws;
+ dev = &ntb->epf->dev;
+ epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
+
+ /* These are required BARs which are mandatory for NTB functionality */
+ for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ dev_err(dev, "Fail to get NTB function BAR\n");
+ return barno;
+ }
+ ntb->epf_ntb_bar[bar] = barno;
+ }
+
+ /* These are optional BARs which don't impact NTB functionality */
+ for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ ntb->num_mws = i;
+ dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
+ }
+ ntb->epf_ntb_bar[bar] = barno;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_init() - Initialize NTB interface
+ * @ntb: NTB device that facilitates communication between HOST and vHOST2
+ *
+ * Wrapper to initialize a particular EPC interface and start the workqueue
+ * to check for commands from host. This function will write to the
+ * EP controller HW for configuring it.
+ */
+static int epf_ntb_epc_init(struct epf_ntb *ntb)
+{
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+ epc = epf->epc;
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = epf_ntb_config_sspad_bar_set(ntb);
+ if (ret) {
+ dev_err(dev, "Config/self SPAD BAR init failed");
+ return ret;
+ }
+
+ ret = epf_ntb_configure_interrupt(ntb);
+ if (ret) {
+ dev_err(dev, "Interrupt configuration failed\n");
+ goto err_config_interrupt;
+ }
+
+ ret = epf_ntb_db_bar_init(ntb);
+ if (ret) {
+ dev_err(dev, "DB BAR init failed\n");
+ goto err_db_bar_init;
+ }
+
+ ret = epf_ntb_mw_bar_init(ntb);
+ if (ret) {
+ dev_err(dev, "MW BAR init failed\n");
+ goto err_mw_bar_init;
+ }
+
+ if (vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ goto err_write_header;
+ }
+ }
+
+ INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler);
+ queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
+
+ return 0;
+
+err_write_header:
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+err_mw_bar_init:
+ epf_ntb_db_bar_clear(ntb);
+err_db_bar_init:
+err_config_interrupt:
+ epf_ntb_config_sspad_bar_clear(ntb);
+
+ return ret;
+}
+
+
+/**
+ * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to cleanup all NTB interfaces.
+ */
+static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+{
+ epf_ntb_db_bar_clear(ntb);
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+}
+
+#define EPF_NTB_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->_name); \
+}
+
+#define EPF_NTB_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ u32 val; \
+ int ret; \
+ \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ ntb->_name = val; \
+ \
+ return len; \
+}
+
+#define EPF_NTB_MW_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (win_no <= 0 || win_no > ntb->num_mws) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+}
+
+#define EPF_NTB_MW_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ u64 val; \
+ int ret; \
+ \
+ ret = kstrtou64(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (win_no <= 0 || win_no > ntb->num_mws) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ ntb->mws_size[win_no - 1] = val; \
+ \
+ return len; \
+}
+
+static ssize_t epf_ntb_num_mws_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct epf_ntb *ntb = to_epf_ntb(group);
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > MAX_MW)
+ return -EINVAL;
+
+ ntb->num_mws = val;
+
+ return len;
+}
+
+EPF_NTB_R(spad_count)
+EPF_NTB_W(spad_count)
+EPF_NTB_R(db_count)
+EPF_NTB_W(db_count)
+EPF_NTB_R(num_mws)
+EPF_NTB_R(vbus_number)
+EPF_NTB_W(vbus_number)
+EPF_NTB_R(vntb_pid)
+EPF_NTB_W(vntb_pid)
+EPF_NTB_R(vntb_vid)
+EPF_NTB_W(vntb_vid)
+EPF_NTB_MW_R(mw1)
+EPF_NTB_MW_W(mw1)
+EPF_NTB_MW_R(mw2)
+EPF_NTB_MW_W(mw2)
+EPF_NTB_MW_R(mw3)
+EPF_NTB_MW_W(mw3)
+EPF_NTB_MW_R(mw4)
+EPF_NTB_MW_W(mw4)
+
+CONFIGFS_ATTR(epf_ntb_, spad_count);
+CONFIGFS_ATTR(epf_ntb_, db_count);
+CONFIGFS_ATTR(epf_ntb_, num_mws);
+CONFIGFS_ATTR(epf_ntb_, mw1);
+CONFIGFS_ATTR(epf_ntb_, mw2);
+CONFIGFS_ATTR(epf_ntb_, mw3);
+CONFIGFS_ATTR(epf_ntb_, mw4);
+CONFIGFS_ATTR(epf_ntb_, vbus_number);
+CONFIGFS_ATTR(epf_ntb_, vntb_pid);
+CONFIGFS_ATTR(epf_ntb_, vntb_vid);
+
+static struct configfs_attribute *epf_ntb_attrs[] = {
+ &epf_ntb_attr_spad_count,
+ &epf_ntb_attr_db_count,
+ &epf_ntb_attr_num_mws,
+ &epf_ntb_attr_mw1,
+ &epf_ntb_attr_mw2,
+ &epf_ntb_attr_mw3,
+ &epf_ntb_attr_mw4,
+ &epf_ntb_attr_vbus_number,
+ &epf_ntb_attr_vntb_pid,
+ &epf_ntb_attr_vntb_vid,
+ NULL,
+};
+
+static const struct config_item_type ntb_group_type = {
+ .ct_attrs = epf_ntb_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/**
+ * epf_ntb_add_cfs() - Add configfs directory specific to NTB
+ * @epf: NTB endpoint function device
+ * @group: A pointer to the config_group structure referencing a group of
+ * config_items of a specific type that belong to a specific sub-system.
+ *
+ * Add configfs directory specific to NTB. This directory will hold
+ * NTB specific properties like db_count, spad_count, num_mws etc.,
+ */
+static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct config_group *ntb_group = &ntb->group;
+ struct device *dev = &epf->dev;
+
+ config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
+
+ return ntb_group;
+}
+
+/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
+
+static u32 pci_space[] = {
+ 0xffffffff, /*DeviceID, Vendor ID*/
+ 0, /*Status, Command*/
+ 0xffffffff, /*Class code, subclass, prog if, revision id*/
+ 0x40, /*bist, header type, latency Timer, cache line size*/
+ 0, /*BAR 0*/
+ 0, /*BAR 1*/
+ 0, /*BAR 2*/
+ 0, /*BAR 3*/
+ 0, /*BAR 4*/
+ 0, /*BAR 5*/
+ 0, /*Cardbus cis point*/
+ 0, /*Subsystem ID Subystem vendor id*/
+ 0, /*ROM Base Address*/
+ 0, /*Reserved, Cap. Point*/
+ 0, /*Reserved,*/
+ 0, /*Max Lat, Min Gnt, interrupt pin, interrupt line*/
+};
+
+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
+{
+ if (devfn == 0) {
+ memcpy(val, ((u8 *)pci_space) + where, size);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
+{
+ return 0;
+}
+
+static struct pci_ops vpci_ops = {
+ .read = pci_read,
+ .write = pci_write,
+};
+
+static int vpci_scan_bus(void *sysdata)
+{
+ struct pci_bus *vpci_bus;
+ struct epf_ntb *ndev = sysdata;
+
+ vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
+ if (vpci_bus)
+ pr_err("create pci bus\n");
+
+ pci_bus_add_devices(vpci_bus);
+
+ return 0;
+}
+
+/*==================== Virtual PCIe NTB driver ==========================*/
+
+static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ struct epf_ntb *ndev = ntb_ndev(ntb);
+
+ return ndev->num_mws;
+}
+
+static int vntb_epf_spad_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->spad_count;
+}
+
+static int vntb_epf_peer_mw_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->num_mws;
+}
+
+static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb)
+{
+ return BIT_ULL(ntb_ndev(ntb)->db_count) - 1;
+}
+
+static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return 0;
+}
+
+static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ int ret;
+ struct device *dev;
+
+ dev = &ntb->ntb.dev;
+ barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
+ epf_bar = &ntb->epf->bar[barno];
+ epf_bar->phys_addr = addr;
+ epf_bar->barno = barno;
+ epf_bar->size = size;
+
+ ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
+ if (ret) {
+ dev_err(dev, "failure set mw trans\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
+{
+ return 0;
+}
+
+static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx,
+ phys_addr_t *base, resource_size_t *size)
+{
+
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ if (base)
+ *base = ntb->vpci_mw_phy[idx];
+
+ if (size)
+ *size = ntb->mws_size[idx];
+
+ return 0;
+}
+
+static int vntb_epf_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ return 0;
+}
+
+static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * 4;
+ u32 val;
+ void __iomem *base = ntb->reg;
+
+ val = readl(base + off + ct + idx * 4);
+ return val;
+}
+
+static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset, ct = ctrl->spad_count * 4;
+ void __iomem *base = ntb->reg;
+
+ writel(val, base + off + ct + idx * 4);
+ return 0;
+}
+
+static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset;
+ void __iomem *base = ntb->reg;
+ u32 val;
+
+ val = readl(base + off + idx * 4);
+ return val;
+}
+
+static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset;
+ void __iomem *base = ntb->reg;
+
+ writel(val, base + off + idx * 4);
+ return 0;
+}
+
+static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
+{
+ u32 interrupt_num = ffs(db_bits) + 1;
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ u8 func_no, vfunc_no;
+ int ret;
+
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = pci_epc_raise_irq(ntb->epf->epc,
+ func_no,
+ vfunc_no,
+ PCI_EPC_IRQ_MSI,
+ interrupt_num + 1);
+ if (ret)
+ dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
+
+ return ret;
+}
+
+static u64 vntb_epf_db_read(struct ntb_dev *ndev)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ return ntb->db;
+}
+
+static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ if (addr_align)
+ *addr_align = SZ_4K;
+
+ if (size_align)
+ *size_align = 1;
+
+ if (size_max)
+ *size_max = ntb->mws_size[idx];
+
+ return 0;
+}
+
+static u64 vntb_epf_link_is_up(struct ntb_dev *ndev,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ return ntb->reg->link_status;
+}
+
+static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits)
+{
+ return 0;
+}
+
+static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ ntb->db &= ~db_bits;
+ return 0;
+}
+
+static int vntb_epf_link_disable(struct ntb_dev *ntb)
+{
+ return 0;
+}
+
+static const struct ntb_dev_ops vntb_epf_ops = {
+ .mw_count = vntb_epf_mw_count,
+ .spad_count = vntb_epf_spad_count,
+ .peer_mw_count = vntb_epf_peer_mw_count,
+ .db_valid_mask = vntb_epf_db_valid_mask,
+ .db_set_mask = vntb_epf_db_set_mask,
+ .mw_set_trans = vntb_epf_mw_set_trans,
+ .mw_clear_trans = vntb_epf_mw_clear_trans,
+ .peer_mw_get_addr = vntb_epf_peer_mw_get_addr,
+ .link_enable = vntb_epf_link_enable,
+ .spad_read = vntb_epf_spad_read,
+ .spad_write = vntb_epf_spad_write,
+ .peer_spad_read = vntb_epf_peer_spad_read,
+ .peer_spad_write = vntb_epf_peer_spad_write,
+ .peer_db_set = vntb_epf_peer_db_set,
+ .db_read = vntb_epf_db_read,
+ .mw_get_align = vntb_epf_mw_get_align,
+ .link_is_up = vntb_epf_link_is_up,
+ .db_clear_mask = vntb_epf_db_clear_mask,
+ .db_clear = vntb_epf_db_clear,
+ .link_disable = vntb_epf_link_disable,
+};
+
+static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int ret;
+ struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata;
+ struct device *dev = &pdev->dev;
+
+ ndev->ntb.pdev = pdev;
+ ndev->ntb.topo = NTB_TOPO_NONE;
+ ndev->ntb.ops = &vntb_epf_ops;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "Cannot set DMA mask\n");
+ return -EINVAL;
+ }
+
+ ret = ntb_register_device(&ndev->ntb);
+ if (ret) {
+ dev_err(dev, "Failed to register NTB device\n");
+ goto err_register_dev;
+ }
+
+ dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
+ return 0;
+
+err_register_dev:
+ return -EINVAL;
+}
+
+static struct pci_device_id pci_vntb_table[] = {
+ {
+ PCI_DEVICE(0xffff, 0xffff),
+ },
+ {},
+};
+
+static struct pci_driver vntb_pci_driver = {
+ .name = "pci-vntb",
+ .id_table = pci_vntb_table,
+ .probe = pci_vntb_probe,
+};
+
+/* ============ PCIe EPF Driver Bind ====================*/
+
+/**
+ * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
+ * @epf: NTB endpoint function device
+ *
+ * Initialize both the endpoint controllers associated with NTB function device.
+ * Invoked when a primary interface or secondary interface is bound to EPC
+ * device. This function will succeed only when EPC is bound to both the
+ * interfaces.
+ */
+static int epf_ntb_bind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (!epf->epc) {
+ dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ ret = epf_ntb_init_epc_bar(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ goto err_bar_init;
+ }
+
+ ret = epf_ntb_config_spad_bar_alloc(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to allocate BAR memory\n");
+ goto err_bar_alloc;
+ }
+
+ ret = epf_ntb_epc_init(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to initialize EPC\n");
+ goto err_bar_alloc;
+ }
+
+ epf_set_drvdata(epf, ntb);
+
+ pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid;
+ pci_vntb_table[0].vendor = ntb->vntb_vid;
+ pci_vntb_table[0].device = ntb->vntb_pid;
+
+ ret = pci_register_driver(&vntb_pci_driver);
+ if (ret) {
+ dev_err(dev, "failure register vntb pci driver\n");
+ goto err_bar_alloc;
+ }
+
+ vpci_scan_bus(ntb);
+
+ return 0;
+
+err_bar_alloc:
+ epf_ntb_config_spad_bar_free(ntb);
+
+err_bar_init:
+ epf_ntb_epc_destroy(ntb);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
+ * @epf: NTB endpoint function device
+ *
+ * Cleanup the initialization from epf_ntb_bind()
+ */
+static void epf_ntb_unbind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+
+ epf_ntb_epc_cleanup(ntb);
+ epf_ntb_config_spad_bar_free(ntb);
+ epf_ntb_epc_destroy(ntb);
+
+ pci_unregister_driver(&vntb_pci_driver);
+}
+
+// EPF driver probe
+static struct pci_epf_ops epf_ntb_ops = {
+ .bind = epf_ntb_bind,
+ .unbind = epf_ntb_unbind,
+ .add_cfs = epf_ntb_add_cfs,
+};
+
+/**
+ * epf_ntb_probe() - Probe NTB function driver
+ * @epf: NTB endpoint function device
+ *
+ * Probe NTB function driver when endpoint function bus detects a NTB
+ * endpoint function.
+ */
+static int epf_ntb_probe(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb;
+ struct device *dev;
+
+ dev = &epf->dev;
+
+ ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
+ if (!ntb)
+ return -ENOMEM;
+
+ epf->header = &epf_ntb_header;
+ ntb->epf = epf;
+ ntb->vbus_number = 0xff;
+ epf_set_drvdata(epf, ntb);
+
+ dev_info(dev, "pci-ep epf driver loaded\n");
+ return 0;
+}
+
+static const struct pci_epf_device_id epf_ntb_ids[] = {
+ {
+ .name = "pci_epf_vntb",
+ },
+ {},
+};
+
+static struct pci_epf_driver epf_ntb_driver = {
+ .driver.name = "pci_epf_vntb",
+ .probe = epf_ntb_probe,
+ .id_table = epf_ntb_ids,
+ .ops = &epf_ntb_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init epf_ntb_init(void)
+{
+ int ret;
+
+ kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&epf_ntb_driver);
+ if (ret) {
+ destroy_workqueue(kpcintb_workqueue);
+ pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(epf_ntb_init);
+
+static void __exit epf_ntb_exit(void)
+{
+ pci_epf_unregister_driver(&epf_ntb_driver);
+ destroy_workqueue(kpcintb_workqueue);
+}
+module_exit(epf_ntb_exit);
+
+MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
+MODULE_AUTHOR("Frank Li <Frank.li@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
index b8c9011987f4..4504039056d1 100644
--- a/drivers/pci/mmap.c
+++ b/drivers/pci/mmap.c
@@ -13,27 +13,6 @@
#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
-/*
- * Modern setup: generic pci_mmap_resource_range(), and implement the legacy
- * pci_mmap_page_range() (if needed) as a wrapper round it.
- */
-
-#ifdef HAVE_PCI_MMAP
-int pci_mmap_page_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- resource_size_t start, end;
-
- pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
-
- /* Adjust vm_pgoff to be the offset within the resource */
- vma->vm_pgoff -= start >> PAGE_SHIFT;
- return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
- write_combine);
-}
-#endif
-
static const struct vm_operations_struct pci_phys_vm_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
.access = generic_access_phys,
@@ -70,27 +49,4 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
vma->vm_page_prot);
}
-#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
-
-/*
- * Legacy setup: Implement pci_mmap_resource_range() as a wrapper around
- * the architecture's pci_mmap_page_range(), converting to "user visible"
- * addresses as necessary.
- */
-
-int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- resource_size_t start, end;
-
- /*
- * pci_mmap_page_range() expects the same kind of entry as coming
- * from /proc/bus/pci/ which is a "user visible" value. If this is
- * different from the resource itself, arch will do necessary fixup.
- */
- pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
- vma->vm_pgoff += start >> PAGE_SHIFT;
- return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
-}
#endif
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 462b429ad243..4496a7c5c478 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) "pci-p2pdma: " fmt
#include <linux/ctype.h>
+#include <linux/dma-map-ops.h>
#include <linux/pci-p2pdma.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -20,13 +21,6 @@
#include <linux/seq_buf.h>
#include <linux/xarray.h>
-enum pci_p2pdma_map_type {
- PCI_P2PDMA_MAP_UNKNOWN = 0,
- PCI_P2PDMA_MAP_NOT_SUPPORTED,
- PCI_P2PDMA_MAP_BUS_ADDR,
- PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
-};
-
struct pci_p2pdma {
struct gen_pool *pool;
bool p2pmem_published;
@@ -854,6 +848,7 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
struct pci_dev *client;
struct pci_p2pdma *p2pdma;
+ int dist;
if (!provider->p2pdma)
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
@@ -870,74 +865,48 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
type = xa_to_value(xa_load(&p2pdma->map_types,
map_types_idx(client)));
rcu_read_unlock();
- return type;
-}
-static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
- struct device *dev, struct scatterlist *sg, int nents)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i) {
- s->dma_address = sg_phys(s) + p2p_pgmap->bus_offset;
- sg_dma_len(s) = s->length;
- }
+ if (type == PCI_P2PDMA_MAP_UNKNOWN)
+ return calc_map_type_and_dist(provider, client, &dist, true);
- return nents;
+ return type;
}
/**
- * pci_p2pdma_map_sg_attrs - map a PCI peer-to-peer scatterlist for DMA
- * @dev: device doing the DMA request
- * @sg: scatter list to map
- * @nents: elements in the scatterlist
- * @dir: DMA direction
- * @attrs: DMA attributes passed to dma_map_sg() (if called)
+ * pci_p2pdma_map_segment - map an sg segment determining the mapping type
+ * @state: State structure that should be declared outside of the for_each_sg()
+ * loop and initialized to zero.
+ * @dev: DMA device that's doing the mapping operation
+ * @sg: scatterlist segment to map
*
- * Scatterlists mapped with this function should be unmapped using
- * pci_p2pdma_unmap_sg_attrs().
+ * This is a helper to be used by non-IOMMU dma_map_sg() implementations where
+ * the sg segment is the same for the page_link and the dma_address.
*
- * Returns the number of SG entries mapped or 0 on error.
+ * Attempt to map a single segment in an SGL with the PCI bus address.
+ * The segment must point to a PCI P2PDMA page and thus must be
+ * wrapped in a is_pci_p2pdma_page(sg_page(sg)) check.
+ *
+ * Returns the type of mapping used and maps the page if the type is
+ * PCI_P2PDMA_MAP_BUS_ADDR.
*/
-int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg)
{
- struct pci_p2pdma_pagemap *p2p_pgmap =
- to_p2p_pgmap(sg_page(sg)->pgmap);
-
- switch (pci_p2pdma_map_type(sg_page(sg)->pgmap, dev)) {
- case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
- return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
- case PCI_P2PDMA_MAP_BUS_ADDR:
- return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
- default:
- WARN_ON_ONCE(1);
- return 0;
+ if (state->pgmap != sg_page(sg)->pgmap) {
+ state->pgmap = sg_page(sg)->pgmap;
+ state->map = pci_p2pdma_map_type(state->pgmap, dev);
+ state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
}
-}
-EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
-/**
- * pci_p2pdma_unmap_sg_attrs - unmap a PCI peer-to-peer scatterlist that was
- * mapped with pci_p2pdma_map_sg()
- * @dev: device doing the DMA request
- * @sg: scatter list to map
- * @nents: number of elements returned by pci_p2pdma_map_sg()
- * @dir: DMA direction
- * @attrs: DMA attributes passed to dma_unmap_sg() (if called)
- */
-void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- enum pci_p2pdma_map_type map_type;
-
- map_type = pci_p2pdma_map_type(sg_page(sg)->pgmap, dev);
+ if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) {
+ sg->dma_address = sg_phys(sg) + state->bus_off;
+ sg_dma_len(sg) = sg->length;
+ sg_dma_mark_bus_address(sg);
+ }
- if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
- dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
+ return state->map;
}
-EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
/**
* pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 3760d85c10d2..a46fec776ad7 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -21,8 +21,9 @@
#include "pci.h"
/*
- * The GUID is defined in the PCI Firmware Specification available here:
- * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
+ * The GUID is defined in the PCI Firmware Specification available
+ * here to PCI-SIG members:
+ * https://members.pcisig.com/wg/PCI-SIG/document/15350
*/
const guid_t pci_acpi_dsm_guid =
GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cfaf40a540a8..95bc329e74c0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -41,8 +41,10 @@ const char *pci_power_names[] = {
};
EXPORT_SYMBOL_GPL(pci_power_names);
+#ifdef CONFIG_X86_32
int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy);
+#endif
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
@@ -1293,9 +1295,6 @@ static int pci_set_full_power_state(struct pci_dev *dev)
pci_restore_bars(dev);
}
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
@@ -1390,9 +1389,6 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
pci_power_name(dev->current_state),
pci_power_name(state));
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index e10cdec6c56e..785f31086313 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -560,12 +560,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 7952e5efd6cf..e2d8a74f83c3 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -392,6 +392,11 @@ void pci_aer_init(struct pci_dev *dev)
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
pci_aer_clear_status(dev);
+
+ if (pci_aer_available())
+ pci_enable_pcie_error_reporting(dev);
+
+ pcie_set_ecrc_checking(dev);
}
void pci_aer_exit(struct pci_dev *dev)
@@ -538,7 +543,7 @@ static const char *aer_agent_string[] = {
u64 *stats = pdev->aer_stats->stats_array; \
size_t len = 0; \
\
- for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
+ for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
if (strings_array[i]) \
len += sysfs_emit_at(buf, len, "%s %llu\n", \
strings_array[i], \
@@ -1228,9 +1233,6 @@ static int set_device_error_reporting(struct pci_dev *dev, void *data)
pci_disable_pcie_error_reporting(dev);
}
- if (enable)
- pcie_set_ecrc_checking(dev);
-
return 0;
}
@@ -1347,6 +1349,11 @@ static int aer_probe(struct pcie_device *dev)
struct device *device = &dev->device;
struct pci_dev *port = dev->port;
+ BUILD_BUG_ON(ARRAY_SIZE(aer_correctable_error_string) <
+ AER_MAX_TYPEOF_COR_ERRS);
+ BUILD_BUG_ON(ARRAY_SIZE(aer_uncorrectable_error_string) <
+ AER_MAX_TYPEOF_UNCOR_ERRS);
+
/* Limit to Root Ports or Root Complex Event Collectors */
if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) &&
(pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT))
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index a96b7424c9bc..a8aec190986c 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1012,25 +1012,6 @@ out:
up_read(&pci_bus_sem);
}
-/* @pdev: the root port or switch downstream port */
-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
-{
- struct pcie_link_state *link = pdev->link_state;
-
- if (aspm_disabled || !link)
- return;
- /*
- * Devices changed PM state, we should recheck if latency
- * meets all functions' requirement
- */
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- pcie_update_aspm_capable(link->root);
- pcie_config_aspm_path(link);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
@@ -1366,4 +1347,3 @@ bool pcie_aspm_support_enabled(void)
{
return aspm_support_enabled;
}
-EXPORT_SYMBOL(pcie_aspm_support_enabled);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 0c5a143025af..59c90d04a609 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -55,10 +55,14 @@ static int report_error_detected(struct pci_dev *dev,
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pci_dev_set_io_state(dev, state) ||
- !pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->error_detected) {
+ if (pci_dev_is_disconnected(dev)) {
+ vote = PCI_ERS_RESULT_DISCONNECT;
+ } else if (!pci_dev_set_io_state(dev, state)) {
+ pci_info(dev, "can't recover (state transition %u -> %u invalid)\n",
+ dev->error_state, state);
+ vote = PCI_ERS_RESULT_NONE;
+ } else if (!pdrv || !pdrv->err_handler ||
+ !pdrv->err_handler->error_detected) {
/*
* If any device in the subtree does not have an error_detected
* callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 604feeb84ee4..1ac7fec47d6f 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -222,15 +222,8 @@ static int get_port_device_capability(struct pci_dev *dev)
#ifdef CONFIG_PCIEAER
if (dev->aer_cap && pci_aer_available() &&
- (pcie_ports_native || host->native_aer)) {
+ (pcie_ports_native || host->native_aer))
services |= PCIE_PORT_SERVICE_AER;
-
- /*
- * Disable AER on this port in case it's been enabled by the
- * BIOS (the AER service driver will enable it when necessary).
- */
- pci_disable_pcie_error_reporting(dev);
- }
#endif
/* Root Ports and Root Complex Event Collectors may generate PMEs */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 17a969942d37..c5286b027f00 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1890,6 +1890,9 @@ int pci_setup_device(struct pci_dev *dev)
dev->broken_intx_masking = pci_intx_mask_broken(dev);
+ /* Clear errors left from system firmware */
+ pci_write_config_word(dev, PCI_STATUS, 0xffff);
+
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
@@ -2312,7 +2315,7 @@ EXPORT_SYMBOL(pci_alloc_dev);
static bool pci_bus_crs_vendor_id(u32 l)
{
- return (l & 0xffff) == 0x0001;
+ return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
}
static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
@@ -2579,33 +2582,39 @@ struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
}
EXPORT_SYMBOL(pci_scan_single_device);
-static unsigned int next_fn(struct pci_bus *bus, struct pci_dev *dev,
- unsigned int fn)
+static int next_ari_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
{
int pos;
u16 cap = 0;
unsigned int next_fn;
- if (pci_ari_enabled(bus)) {
- if (!dev)
- return 0;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
- if (!pos)
- return 0;
+ if (!dev)
+ return -ENODEV;
- pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
- next_fn = PCI_ARI_CAP_NFN(cap);
- if (next_fn <= fn)
- return 0; /* protect against malformed list */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+ if (!pos)
+ return -ENODEV;
- return next_fn;
- }
+ pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
+ next_fn = PCI_ARI_CAP_NFN(cap);
+ if (next_fn <= fn)
+ return -ENODEV; /* protect against malformed list */
- /* dev may be NULL for non-contiguous multifunction devices */
- if (!dev || dev->multifunction)
- return (fn + 1) % 8;
+ return next_fn;
+}
- return 0;
+static int next_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
+{
+ if (pci_ari_enabled(bus))
+ return next_ari_fn(bus, dev, fn);
+
+ if (fn >= 7)
+ return -ENODEV;
+ /* only multifunction devices may have more functions */
+ if (dev && !dev->multifunction)
+ return -ENODEV;
+
+ return fn + 1;
}
static int only_one_child(struct pci_bus *bus)
@@ -2643,26 +2652,30 @@ static int only_one_child(struct pci_bus *bus)
*/
int pci_scan_slot(struct pci_bus *bus, int devfn)
{
- unsigned int fn, nr = 0;
struct pci_dev *dev;
+ int fn = 0, nr = 0;
if (only_one_child(bus) && (devfn > 0))
return 0; /* Already scanned the entire slot */
- dev = pci_scan_single_device(bus, devfn);
- if (!dev)
- return 0;
- if (!pci_dev_is_added(dev))
- nr++;
-
- for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
+ do {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
if (!pci_dev_is_added(dev))
nr++;
- dev->multifunction = 1;
+ if (fn > 0)
+ dev->multifunction = 1;
+ } else if (fn == 0) {
+ /*
+ * Function 0 is required unless we are running on
+ * a hypervisor that passes through individual PCI
+ * functions.
+ */
+ if (!hypervisor_isolated_pci_functions())
+ break;
}
- }
+ fn = next_fn(bus, dev, fn);
+ } while (fn >= 0);
/* Only one slot has PCIe device */
if (bus->self && nr)
@@ -2858,29 +2871,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
{
unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
unsigned int start = bus->busn_res.start;
- unsigned int devfn, fn, cmax, max = start;
+ unsigned int devfn, cmax, max = start;
struct pci_dev *dev;
- int nr_devs;
dev_dbg(&bus->dev, "scanning bus\n");
/* Go find them, Rover! */
- for (devfn = 0; devfn < 256; devfn += 8) {
- nr_devs = pci_scan_slot(bus, devfn);
-
- /*
- * The Jailhouse hypervisor may pass individual functions of a
- * multi-function device to a guest without passing function 0.
- * Look for them as well.
- */
- if (jailhouse_paravirt() && nr_devs == 0) {
- for (fn = 1; fn < 8; fn++) {
- dev = pci_scan_single_device(bus, devfn + fn);
- if (dev)
- dev->multifunction = 1;
- }
- }
- }
+ for (devfn = 0; devfn < 256; devfn += 8)
+ pci_scan_slot(bus, devfn);
/* Reserve buses for SR-IOV capability */
used_buses = pci_iov_bus_range(bus);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 31b26d8ea6cc..f967709082d6 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -244,6 +244,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pci_dev *dev = pde_data(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
+ resource_size_t start, end;
int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
if (!capable(CAP_SYS_RAWIO) ||
@@ -278,7 +279,11 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
iomem_is_exclusive(dev->resource[i].start))
return -EINVAL;
- ret = pci_mmap_page_range(dev, i, vma,
+ pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
+
+ /* Adjust vm_pgoff to be the offset within the resource */
+ vma->vm_pgoff -= start >> PAGE_SHIFT;
+ ret = pci_mmap_resource_range(dev, i, vma,
fpriv->mmap_state, write_combine);
if (ret < 0)
return ret;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 41aeaa235132..4944798e75b5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
+#include <linux/isa-dma.h> /* isa_dma_bridge_buggy */
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -30,7 +31,6 @@
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/switchtec.h>
-#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
static ktime_t fixup_debug_start(struct pci_dev *dev,
@@ -239,6 +239,7 @@ static void quirk_passive_release(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
+#ifdef CONFIG_X86_32
/*
* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a
* workaround but VIA don't answer queries. If you happen to have good
@@ -265,6 +266,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
+#endif
/*
* Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
@@ -2709,10 +2711,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
nvenet_msi_disable);
/*
- * PCIe spec r4.0 sec 7.7.1.2 and sec 7.7.2.2 say that if MSI/MSI-X is enabled,
- * then the device can't use INTx interrupts. Tegra's PCIe root ports don't
- * generate MSI interrupts for PME and AER events instead only INTx interrupts
- * are generated. Though Tegra's PCIe root ports can generate MSI interrupts
+ * PCIe spec r6.0 sec 6.1.4.3 says that if MSI/MSI-X is enabled, the device
+ * can't use INTx interrupts. Tegra's PCIe Root Ports don't generate MSI
+ * interrupts for PME and AER events; instead only INTx interrupts are
+ * generated. Though Tegra's PCIe Root Ports can generate MSI interrupts
* for other events, since PCIe specification doesn't support using a mix of
* INTx and MSI/MSI-X, it is required to disable MSI interrupts to avoid port
* service drivers registering their respective ISRs for MSIs.
@@ -2760,6 +2762,15 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,
PCI_CLASS_BRIDGE_PCI, 8,
pci_quirk_nvidia_tegra_disable_rp_msi);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229a,
+ PCI_CLASS_BRIDGE_PCI, 8,
+ pci_quirk_nvidia_tegra_disable_rp_msi);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229c,
+ PCI_CLASS_BRIDGE_PCI, 8,
+ pci_quirk_nvidia_tegra_disable_rp_msi);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229e,
+ PCI_CLASS_BRIDGE_PCI, 8,
+ pci_quirk_nvidia_tegra_disable_rp_msi);
/*
* Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
@@ -4924,6 +4935,9 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
/* Broadcom multi-function device */
{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index c36c1238c604..75be4fe22509 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1376,8 +1376,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
dev->groups = switchtec_device_groups;
dev->release = stdev_release;
- minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
- GFP_KERNEL);
+ minor = ida_alloc(&switchtec_minor_ida, GFP_KERNEL);
if (minor < 0) {
rc = minor;
goto err_put;
@@ -1692,7 +1691,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
err_devadd:
stdev_kill(stdev);
err_put:
- ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
put_device(&stdev->dev);
return rc;
}
@@ -1704,7 +1703,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
cdev_device_del(&stdev->cdev, &stdev->dev);
- ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
dev_info(&stdev->dev, "unregistered.\n");
stdev_kill(stdev);
put_device(&stdev->dev);
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 2c961839903d..ebca5eab9c9b 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -170,7 +170,6 @@ int riscv_pmu_event_set_period(struct perf_event *event)
left = (max_period >> 1);
local64_set(&hwc->prev_count, (u64)-left);
- perf_event_update_userpage(event);
return overflow;
}
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 342778782359..2c20b0de8cb0 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -72,7 +72,7 @@ static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
local64_set(&hwc->prev_count, initial_val);
}
-/**
+/*
* This is just a simple implementation to allow legacy implementations
* compatible with new RISC-V PMU driver framework.
* This driver only allows reading two counters i.e CYCLE & INSTRET.
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 79a3de515ece..6f6681bbfd36 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -41,20 +41,6 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = {
NULL,
};
-union sbi_pmu_ctr_info {
- unsigned long value;
- struct {
- unsigned long csr:12;
- unsigned long width:6;
-#if __riscv_xlen == 32
- unsigned long reserved:13;
-#else
- unsigned long reserved:45;
-#endif
- unsigned long type:1;
- };
-};
-
/*
* RISC-V doesn't have hetergenous harts yet. This need to be part of
* per_cpu in case of harts with different pmu counters
@@ -294,8 +280,13 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
cflags |= SBI_PMU_CFG_FLAG_SET_UINH;
/* retrieve the available counter index */
+#if defined(CONFIG_32BIT)
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
+ cflags, hwc->event_base, hwc->config, hwc->config >> 32);
+#else
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
cflags, hwc->event_base, hwc->config, 0);
+#endif
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
hwc->event_base, hwc->config);
@@ -437,8 +428,13 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
struct hw_perf_event *hwc = &event->hw;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
+#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
1, flag, ival, ival >> 32, 0);
+#else
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
+ 1, flag, ival, 0, 0);
+#endif
if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
pr_err("Starting counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
@@ -545,8 +541,14 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
hwc = &event->hw;
max_period = riscv_pmu_ctr_get_width_mask(event);
init_val = local64_read(&hwc->prev_count) & max_period;
+#if defined(CONFIG_32BIT)
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
+ flag, init_val, init_val >> 32, 0);
+#else
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
flag, init_val, 0, 0);
+#endif
+ perf_event_update_userpage(event);
}
ctr_ovf_mask = ctr_ovf_mask >> 1;
idx++;
diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
index 578cfe07d07a..53c9230c2907 100644
--- a/drivers/phy/samsung/phy-exynos-pcie.c
+++ b/drivers/phy/samsung/phy-exynos-pcie.c
@@ -51,6 +51,13 @@ static int exynos5433_pcie_phy_init(struct phy *phy)
{
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
+ regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
+ BIT(0), 1);
+ regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
+ PCIE_APP_REQ_EXIT_L1_MODE, 0);
+ regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
+ PCIE_REFCLK_GATING_EN, 0);
+
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET,
PCIE_PHY_RESET, 1);
regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET,
@@ -109,20 +116,7 @@ static int exynos5433_pcie_phy_init(struct phy *phy)
return 0;
}
-static int exynos5433_pcie_phy_power_on(struct phy *phy)
-{
- struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
-
- regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
- BIT(0), 1);
- regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
- PCIE_APP_REQ_EXIT_L1_MODE, 0);
- regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
- PCIE_REFCLK_GATING_EN, 0);
- return 0;
-}
-
-static int exynos5433_pcie_phy_power_off(struct phy *phy)
+static int exynos5433_pcie_phy_exit(struct phy *phy)
{
struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
@@ -135,8 +129,7 @@ static int exynos5433_pcie_phy_power_off(struct phy *phy)
static const struct phy_ops exynos5433_phy_ops = {
.init = exynos5433_pcie_phy_init,
- .power_on = exynos5433_pcie_phy_power_on,
- .power_off = exynos5433_pcie_phy_power_off,
+ .exit = exynos5433_pcie_phy_exit,
.owner = THIS_MODULE,
};
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index bff144c97e66..1cf74b0c42e5 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -311,7 +311,7 @@ config PINCTRL_MICROCHIP_SGPIO
LED controller.
config PINCTRL_OCELOT
- bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
+ tristate "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
depends on OF
depends on HAS_IOMEM
select GPIOLIB
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 4d7548686f39..aaa78a613196 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -632,7 +632,7 @@ struct aspeed_pin_desc {
SIG_EXPR_LIST_ALIAS(pin, sig, group)
/**
- * Similar to the above, but for pins with a dual expressions (DE) and
+ * Similar to the above, but for pins with a dual expressions (DE)
* and a single group (SG) of pins.
*
* @pin: The pin the signal will be routed to
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index dad453054776..7857e612a100 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -507,7 +507,7 @@ static void bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc,
}
}
-static void bcm2835_gpio_irq_enable(struct irq_data *data)
+static void bcm2835_gpio_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
@@ -516,13 +516,15 @@ static void bcm2835_gpio_irq_enable(struct irq_data *data)
unsigned bank = GPIO_REG_OFFSET(gpio);
unsigned long flags;
+ gpiochip_enable_irq(chip, gpio);
+
raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
set_bit(offset, &pc->enabled_irq_map[bank]);
bcm2835_gpio_irq_config(pc, gpio, true);
raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
}
-static void bcm2835_gpio_irq_disable(struct irq_data *data)
+static void bcm2835_gpio_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
@@ -537,6 +539,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
clear_bit(offset, &pc->enabled_irq_map[bank]);
raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+
+ gpiochip_disable_irq(chip, gpio);
}
static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc,
@@ -693,16 +697,15 @@ static int bcm2835_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
return ret;
}
-static struct irq_chip bcm2835_gpio_irq_chip = {
+static const struct irq_chip bcm2835_gpio_irq_chip = {
.name = MODULE_NAME,
- .irq_enable = bcm2835_gpio_irq_enable,
- .irq_disable = bcm2835_gpio_irq_disable,
.irq_set_type = bcm2835_gpio_irq_set_type,
.irq_ack = bcm2835_gpio_irq_ack,
- .irq_mask = bcm2835_gpio_irq_disable,
- .irq_unmask = bcm2835_gpio_irq_enable,
+ .irq_mask = bcm2835_gpio_irq_mask,
+ .irq_unmask = bcm2835_gpio_irq_unmask,
.irq_set_wake = bcm2835_gpio_irq_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .flags = (IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE),
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
@@ -1280,7 +1283,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
girq = &pc->gpio_chip.irq;
- girq->chip = &bcm2835_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &bcm2835_gpio_irq_chip);
girq->parent_handler = bcm2835_gpio_irq_handler;
girq->num_parents = BCM2835_NUM_IRQS;
girq->parents = devm_kcalloc(dev, BCM2835_NUM_IRQS,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index ffe39336fcac..9e57f4c62e60 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -126,7 +126,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np)
mutex_lock(&pinctrldev_list_mutex);
list_for_each_entry(pctldev, &pinctrldev_list, node)
- if (pctldev->dev->of_node == np) {
+ if (device_match_of_node(pctldev->dev, np)) {
mutex_unlock(&pinctrldev_list_mutex);
return pctldev;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx93.c b/drivers/pinctrl/freescale/pinctrl-imx93.c
index 417e41b37a6f..91b3ee1e6fa9 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx93.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx93.c
@@ -247,6 +247,7 @@ static const struct of_device_id imx93_pinctrl_of_match[] = {
{ .compatible = "fsl,imx93-iomuxc", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx93_pinctrl_of_match);
static int imx93_pinctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index e5ec8b8956da..078eec8af4a4 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -151,6 +151,14 @@ config PINCTRL_LEWISBURG
This pinctrl driver provides an interface that allows configuring
of Intel Lewisburg pins and using them as GPIOs.
+config PINCTRL_METEORLAKE
+ tristate "Intel Meteor Lake pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Meteor Lake pins and using them as GPIOs.
+
config PINCTRL_SUNRISEPOINT
tristate "Intel Sunrisepoint pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 181ffcf34d62..bb87e7bc7b20 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -18,5 +18,6 @@ obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_JASPERLAKE) += pinctrl-jasperlake.o
obj-$(CONFIG_PINCTRL_LAKEFIELD) += pinctrl-lakefield.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
+obj-$(CONFIG_PINCTRL_METEORLAKE) += pinctrl-meteorlake.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 31f8f271628c..67db79f38051 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -603,7 +603,7 @@ static const char *byt_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc->groups[selector].name;
+ return vg->soc->groups[selector].grp.name;
}
static int byt_get_group_pins(struct pinctrl_dev *pctldev,
@@ -613,8 +613,8 @@ static int byt_get_group_pins(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *pins = vg->soc->groups[selector].pins;
- *num_pins = vg->soc->groups[selector].npins;
+ *pins = vg->soc->groups[selector].grp.pins;
+ *num_pins = vg->soc->groups[selector].grp.npins;
return 0;
}
@@ -662,15 +662,15 @@ static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < group.npins; i++) {
+ for (i = 0; i < group.grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+ padcfg0 = byt_gpio_reg(vg, group.grp.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
- group.name, i);
+ group.grp.name, i);
continue;
}
@@ -692,15 +692,15 @@ static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
raw_spin_lock_irqsave(&byt_lock, flags);
- for (i = 0; i < group.npins; i++) {
+ for (i = 0; i < group.grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+ padcfg0 = byt_gpio_reg(vg, group.grp.pins[i], BYT_CONF0_REG);
if (!padcfg0) {
dev_warn(vg->dev,
"Group %s, pin %i not muxed (no padcfg0)\n",
- group.name, i);
+ group.grp.name, i);
continue;
}
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 26b2a425d201..5c4fd16e5b01 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -627,7 +627,7 @@ static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->soc->groups[group].name;
+ return pctrl->soc->groups[group].grp.name;
}
static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -635,8 +635,8 @@ static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *pins = pctrl->soc->groups[group].pins;
- *npins = pctrl->soc->groups[group].npins;
+ *pins = pctrl->soc->groups[group].grp.pins;
+ *npins = pctrl->soc->groups[group].grp.npins;
return 0;
}
@@ -721,16 +721,16 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&chv_lock, flags);
/* Check first that the pad is not locked */
- for (i = 0; i < grp->npins; i++) {
- if (chv_pad_locked(pctrl, grp->pins[i])) {
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (chv_pad_locked(pctrl, grp->grp.pins[i])) {
raw_spin_unlock_irqrestore(&chv_lock, flags);
- dev_warn(dev, "unable to set mode for locked pin %u\n", grp->pins[i]);
+ dev_warn(dev, "unable to set mode for locked pin %u\n", grp->grp.pins[i]);
return -EBUSY;
}
}
- for (i = 0; i < grp->npins; i++) {
- int pin = grp->pins[i];
+ for (i = 0; i < grp->grp.npins; i++) {
+ int pin = grp->grp.pins[i];
unsigned int mode;
bool invert_oe;
u32 value;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index ffc045f7bf00..52ecd66ce357 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -279,7 +279,7 @@ static const char *intel_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->soc->groups[group].name;
+ return pctrl->soc->groups[group].grp.name;
}
static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -287,8 +287,8 @@ static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *pins = pctrl->soc->groups[group].pins;
- *npins = pctrl->soc->groups[group].npins;
+ *pins = pctrl->soc->groups[group].grp.pins;
+ *npins = pctrl->soc->groups[group].grp.npins;
return 0;
}
@@ -391,19 +391,19 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev,
* All pins in the groups needs to be accessible and writable
* before we can enable the mux for this group.
*/
- for (i = 0; i < grp->npins; i++) {
- if (!intel_pad_usable(pctrl, grp->pins[i])) {
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (!intel_pad_usable(pctrl, grp->grp.pins[i])) {
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EBUSY;
}
}
/* Now enable the mux setting for each pin in the group */
- for (i = 0; i < grp->npins; i++) {
+ for (i = 0; i < grp->grp.npins; i++) {
void __iomem *padcfg0;
u32 value;
- padcfg0 = intel_get_padcfg(pctrl, grp->pins[i], PADCFG0);
+ padcfg0 = intel_get_padcfg(pctrl, grp->grp.pins[i], PADCFG0);
value = readl(padcfg0);
value &= ~PADCFG0_PMODE_MASK;
@@ -1641,16 +1641,14 @@ EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid);
const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev)
{
+ const struct intel_pinctrl_soc_data * const *table;
const struct intel_pinctrl_soc_data *data = NULL;
- const struct intel_pinctrl_soc_data **table;
- struct acpi_device *adev;
- unsigned int i;
- adev = ACPI_COMPANION(&pdev->dev);
- if (adev) {
- const void *match = device_get_match_data(&pdev->dev);
+ table = device_get_match_data(&pdev->dev);
+ if (table) {
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ unsigned int i;
- table = (const struct intel_pinctrl_soc_data **)match;
for (i = 0; table[i]; i++) {
if (!strcmp(adev->pnp.unique_id, table[i]->uid)) {
data = table[i];
@@ -1664,7 +1662,7 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
if (!id)
return ERR_PTR(-ENODEV);
- table = (const struct intel_pinctrl_soc_data **)id->driver_data;
+ table = (const struct intel_pinctrl_soc_data * const *)id->driver_data;
data = table[pdev->id];
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 710341bb67cc..65628423bf63 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -24,17 +24,12 @@ struct device;
/**
* struct intel_pingroup - Description about group of pins
- * @name: Name of the groups
- * @pins: All pins in this group
- * @npins: Number of pins in this groups
- * @mode: Native mode in which the group is muxed out @pins. Used if @modes
- * is %NULL.
+ * @grp: Generic data of the pin group (name and pins)
+ * @mode: Native mode in which the group is muxed out @pins. Used if @modes is %NULL.
* @modes: If not %NULL this will hold mode for each pin in @pins
*/
struct intel_pingroup {
- const char *name;
- const unsigned int *pins;
- size_t npins;
+ struct pingroup grp;
unsigned short mode;
const unsigned int *modes;
};
@@ -156,15 +151,11 @@ struct intel_community {
* a single integer or an array of integers in which case mode is per
* pin.
*/
-#define PIN_GROUP(n, p, m) \
- { \
- .name = (n), \
- .pins = (p), \
- .npins = ARRAY_SIZE((p)), \
- .mode = __builtin_choose_expr( \
- __builtin_constant_p((m)), (m), 0), \
- .modes = __builtin_choose_expr( \
- __builtin_constant_p((m)), NULL, (m)), \
+#define PIN_GROUP(n, p, m) \
+ { \
+ .grp = PINCTRL_PINGROUP((n), (p), ARRAY_SIZE((p))), \
+ .mode = __builtin_choose_expr(__builtin_constant_p((m)), (m), 0), \
+ .modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
#define FUNCTION(n, g) \
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index 4fb39eb30902..5d1abee30f8f 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -282,7 +282,7 @@ static const char *lp_get_group_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
- return lg->soc->groups[selector].name;
+ return lg->soc->groups[selector].grp.name;
}
static int lp_get_group_pins(struct pinctrl_dev *pctldev,
@@ -292,8 +292,8 @@ static int lp_get_group_pins(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
- *pins = lg->soc->groups[selector].pins;
- *num_pins = lg->soc->groups[selector].npins;
+ *pins = lg->soc->groups[selector].grp.pins;
+ *num_pins = lg->soc->groups[selector].grp.npins;
return 0;
}
@@ -366,8 +366,8 @@ static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&lg->lock, flags);
/* Now enable the mux setting for each pin in the group */
- for (i = 0; i < grp->npins; i++) {
- void __iomem *reg = lp_gpio_reg(&lg->chip, grp->pins[i], LP_CONFIG1);
+ for (i = 0; i < grp->grp.npins; i++) {
+ void __iomem *reg = lp_gpio_reg(&lg->chip, grp->grp.pins[i], LP_CONFIG1);
u32 value;
value = ioread32(reg);
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 3ae141e0b421..5e752818adb4 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -520,7 +520,7 @@ static const char *mrfld_get_group_name(struct pinctrl_dev *pctldev,
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- return mp->groups[group].name;
+ return mp->groups[group].grp.name;
}
static int mrfld_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -528,8 +528,8 @@ static int mrfld_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- *pins = mp->groups[group].pins;
- *npins = mp->groups[group].npins;
+ *pins = mp->groups[group].grp.pins;
+ *npins = mp->groups[group].grp.npins;
return 0;
}
@@ -604,15 +604,15 @@ static int mrfld_pinmux_set_mux(struct pinctrl_dev *pctldev,
* All pins in the groups needs to be accessible and writable
* before we can enable the mux for this group.
*/
- for (i = 0; i < grp->npins; i++) {
- if (!mrfld_buf_available(mp, grp->pins[i]))
+ for (i = 0; i < grp->grp.npins; i++) {
+ if (!mrfld_buf_available(mp, grp->grp.pins[i]))
return -EBUSY;
}
/* Now enable the mux setting for each pin in the group */
raw_spin_lock_irqsave(&mp->lock, flags);
- for (i = 0; i < grp->npins; i++)
- mrfld_update_bufcfg(mp, grp->pins[i], bits, mask);
+ for (i = 0; i < grp->grp.npins; i++)
+ mrfld_update_bufcfg(mp, grp->grp.pins[i], bits, mask);
raw_spin_unlock_irqrestore(&mp->lock, flags);
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c
new file mode 100644
index 000000000000..9576dcd1cb29
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Meteor Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2022, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define MTL_PAD_OWN 0x0b0
+#define MTL_PADCFGLOCK 0x110
+#define MTL_HOSTSW_OWN 0x140
+#define MTL_GPI_IS 0x200
+#define MTL_GPI_IE 0x210
+
+#define MTL_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define MTL_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = MTL_PAD_OWN, \
+ .padcfglock_offset = MTL_PADCFGLOCK, \
+ .hostown_offset = MTL_HOSTSW_OWN, \
+ .is_offset = MTL_GPI_IS, \
+ .ie_offset = MTL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Meteor Lake-P */
+static const struct pinctrl_pin_desc mtlp_pins[] = {
+ /* CPU */
+ PINCTRL_PIN(0, "PECI"),
+ PINCTRL_PIN(1, "UFS_RESET_B"),
+ PINCTRL_PIN(2, "VIDSOUT"),
+ PINCTRL_PIN(3, "VIDSCK"),
+ PINCTRL_PIN(4, "VIDALERT_B"),
+ /* GPP_V */
+ PINCTRL_PIN(5, "BATLOW_B"),
+ PINCTRL_PIN(6, "AC_PRESENT"),
+ PINCTRL_PIN(7, "SOC_WAKE_B"),
+ PINCTRL_PIN(8, "PWRBTN_B"),
+ PINCTRL_PIN(9, "SLP_S3_B"),
+ PINCTRL_PIN(10, "SLP_S4_B"),
+ PINCTRL_PIN(11, "SLP_A_B"),
+ PINCTRL_PIN(12, "GPP_V_7"),
+ PINCTRL_PIN(13, "SUSCLK"),
+ PINCTRL_PIN(14, "SLP_WLAN_B"),
+ PINCTRL_PIN(15, "SLP_S5_B"),
+ PINCTRL_PIN(16, "LANPHYPC"),
+ PINCTRL_PIN(17, "SLP_LAN_B"),
+ PINCTRL_PIN(18, "GPP_V_13"),
+ PINCTRL_PIN(19, "WAKE_B"),
+ PINCTRL_PIN(20, "GPP_V_15"),
+ PINCTRL_PIN(21, "GPP_V_16"),
+ PINCTRL_PIN(22, "GPP_V_17"),
+ PINCTRL_PIN(23, "GPP_V_18"),
+ PINCTRL_PIN(24, "CATERR_B"),
+ PINCTRL_PIN(25, "PROCHOT_B"),
+ PINCTRL_PIN(26, "THERMTRIP_B"),
+ PINCTRL_PIN(27, "DSI_DE_TE_2_GENLOCK_REF"),
+ PINCTRL_PIN(28, "DSI_DE_TE_1_DISP_UTILS"),
+ /* GPP_C */
+ PINCTRL_PIN(29, "SMBCLK"),
+ PINCTRL_PIN(30, "SMBDATA"),
+ PINCTRL_PIN(31, "SMBALERT_B"),
+ PINCTRL_PIN(32, "SML0CLK"),
+ PINCTRL_PIN(33, "SML0DATA"),
+ PINCTRL_PIN(34, "GPP_C_5"),
+ PINCTRL_PIN(35, "GPP_C_6"),
+ PINCTRL_PIN(36, "GPP_C_7"),
+ PINCTRL_PIN(37, "GPP_C_8"),
+ PINCTRL_PIN(38, "GPP_C_9"),
+ PINCTRL_PIN(39, "GPP_C_10"),
+ PINCTRL_PIN(40, "GPP_C_11"),
+ PINCTRL_PIN(41, "GPP_C_12"),
+ PINCTRL_PIN(42, "GPP_C_13"),
+ PINCTRL_PIN(43, "GPP_C_14"),
+ PINCTRL_PIN(44, "GPP_C_15"),
+ PINCTRL_PIN(45, "GPP_C_16"),
+ PINCTRL_PIN(46, "GPP_C_17"),
+ PINCTRL_PIN(47, "GPP_C_18"),
+ PINCTRL_PIN(48, "GPP_C_19"),
+ PINCTRL_PIN(49, "GPP_C_20"),
+ PINCTRL_PIN(50, "GPP_C_21"),
+ PINCTRL_PIN(51, "GPP_C_22"),
+ PINCTRL_PIN(52, "GPP_C_23"),
+ /* GPP_A */
+ PINCTRL_PIN(53, "ESPI_IO_0"),
+ PINCTRL_PIN(54, "ESPI_IO_1"),
+ PINCTRL_PIN(55, "ESPI_IO_2"),
+ PINCTRL_PIN(56, "ESPI_IO_3"),
+ PINCTRL_PIN(57, "ESPI_CS0_B"),
+ PINCTRL_PIN(58, "ESPI_CLK"),
+ PINCTRL_PIN(59, "ESPI_RESET_B"),
+ PINCTRL_PIN(60, "GPP_A_7"),
+ PINCTRL_PIN(61, "GPP_A_8"),
+ PINCTRL_PIN(62, "GPP_A_9"),
+ PINCTRL_PIN(63, "GPP_A_10"),
+ PINCTRL_PIN(64, "GPP_A_11"),
+ PINCTRL_PIN(65, "GPP_A_12"),
+ PINCTRL_PIN(66, "ESPI_CS1_B"),
+ PINCTRL_PIN(67, "ESPI_CS2_B"),
+ PINCTRL_PIN(68, "ESPI_CS3_B"),
+ PINCTRL_PIN(69, "ESPI_ALERT0_B"),
+ PINCTRL_PIN(70, "ESPI_ALERT1_B"),
+ PINCTRL_PIN(71, "ESPI_ALERT2_B"),
+ PINCTRL_PIN(72, "ESPI_ALERT3_B"),
+ PINCTRL_PIN(73, "GPP_A_20"),
+ PINCTRL_PIN(74, "GPP_A_21"),
+ PINCTRL_PIN(75, "GPP_A_22"),
+ PINCTRL_PIN(76, "GPP_A_23"),
+ PINCTRL_PIN(77, "ESPI_CLK_LOOPBK"),
+ /* GPP_E */
+ PINCTRL_PIN(78, "GPP_E_0"),
+ PINCTRL_PIN(79, "GPP_E_1"),
+ PINCTRL_PIN(80, "GPP_E_2"),
+ PINCTRL_PIN(81, "GPP_E_3"),
+ PINCTRL_PIN(82, "GPP_E_4"),
+ PINCTRL_PIN(83, "GPP_E_5"),
+ PINCTRL_PIN(84, "GPP_E_6"),
+ PINCTRL_PIN(85, "GPP_E_7"),
+ PINCTRL_PIN(86, "GPP_E_8"),
+ PINCTRL_PIN(87, "GPP_E_9"),
+ PINCTRL_PIN(88, "GPP_E_10"),
+ PINCTRL_PIN(89, "GPP_E_11"),
+ PINCTRL_PIN(90, "GPP_E_12"),
+ PINCTRL_PIN(91, "GPP_E_13"),
+ PINCTRL_PIN(92, "GPP_E_14"),
+ PINCTRL_PIN(93, "SLP_DRAM_B"),
+ PINCTRL_PIN(94, "GPP_E_16"),
+ PINCTRL_PIN(95, "GPP_E_17"),
+ PINCTRL_PIN(96, "GPP_E_18"),
+ PINCTRL_PIN(97, "GPP_E_19"),
+ PINCTRL_PIN(98, "GPP_E_20"),
+ PINCTRL_PIN(99, "GPP_E_21"),
+ PINCTRL_PIN(100, "DNX_FORCE_RELOAD"),
+ PINCTRL_PIN(101, "GPP_E_23"),
+ PINCTRL_PIN(102, "THC0_GSPI0_CLK_LOOPBK"),
+ /* GPP_H */
+ PINCTRL_PIN(103, "GPP_H_0"),
+ PINCTRL_PIN(104, "GPP_H_1"),
+ PINCTRL_PIN(105, "GPP_H_2"),
+ PINCTRL_PIN(106, "GPP_H_3"),
+ PINCTRL_PIN(107, "GPP_H_4"),
+ PINCTRL_PIN(108, "GPP_H_5"),
+ PINCTRL_PIN(109, "GPP_H_6"),
+ PINCTRL_PIN(110, "GPP_H_7"),
+ PINCTRL_PIN(111, "GPP_H_8"),
+ PINCTRL_PIN(112, "GPP_H_9"),
+ PINCTRL_PIN(113, "GPP_H_10"),
+ PINCTRL_PIN(114, "GPP_H_11"),
+ PINCTRL_PIN(115, "GPP_H_12"),
+ PINCTRL_PIN(116, "CPU_C10_GATE_B"),
+ PINCTRL_PIN(117, "GPP_H_14"),
+ PINCTRL_PIN(118, "GPP_H_15"),
+ PINCTRL_PIN(119, "GPP_H_16"),
+ PINCTRL_PIN(120, "GPP_H_17"),
+ PINCTRL_PIN(121, "GPP_H_18"),
+ PINCTRL_PIN(122, "GPP_H_19"),
+ PINCTRL_PIN(123, "GPP_H_20"),
+ PINCTRL_PIN(124, "GPP_H_21"),
+ PINCTRL_PIN(125, "GPP_H_22"),
+ PINCTRL_PIN(126, "GPP_H_23"),
+ PINCTRL_PIN(127, "LPI3C1_CLK_LOOPBK"),
+ PINCTRL_PIN(128, "I3C0_CLK_LOOPBK"),
+ /* GPP_F */
+ PINCTRL_PIN(129, "CNV_BRI_DT"),
+ PINCTRL_PIN(130, "CNV_BRI_RSP"),
+ PINCTRL_PIN(131, "CNV_RGI_DT"),
+ PINCTRL_PIN(132, "CNV_RGI_RSP"),
+ PINCTRL_PIN(133, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(134, "CRF_CLKREQ"),
+ PINCTRL_PIN(135, "GPP_F_6"),
+ PINCTRL_PIN(136, "FUSA_DIAGTEST_EN"),
+ PINCTRL_PIN(137, "FUSA_DIAGTEST_MODE"),
+ PINCTRL_PIN(138, "BOOTMPC"),
+ PINCTRL_PIN(139, "GPP_F_10"),
+ PINCTRL_PIN(140, "GPP_F_11"),
+ PINCTRL_PIN(141, "GSXDOUT"),
+ PINCTRL_PIN(142, "GSXSLOAD"),
+ PINCTRL_PIN(143, "GSXDIN"),
+ PINCTRL_PIN(144, "GSXSRESETB"),
+ PINCTRL_PIN(145, "GSXCLK"),
+ PINCTRL_PIN(146, "GMII_MDC_0"),
+ PINCTRL_PIN(147, "GMII_MDIO_0"),
+ PINCTRL_PIN(148, "GPP_F_19"),
+ PINCTRL_PIN(149, "GPP_F_20"),
+ PINCTRL_PIN(150, "GPP_F_21"),
+ PINCTRL_PIN(151, "GPP_F_22"),
+ PINCTRL_PIN(152, "GPP_F_23"),
+ PINCTRL_PIN(153, "THC1_GSPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(154, "GSPI0A_CLK_LOOPBK"),
+ /* SPI0 */
+ PINCTRL_PIN(155, "SPI0_IO_2"),
+ PINCTRL_PIN(156, "SPI0_IO_3"),
+ PINCTRL_PIN(157, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(158, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(159, "SPI0_TPM_CS_B"),
+ PINCTRL_PIN(160, "SPI0_FLASH_0_CS_B"),
+ PINCTRL_PIN(161, "SPI0_FLASH_1_CS_B"),
+ PINCTRL_PIN(162, "SPI0_CLK"),
+ PINCTRL_PIN(163, "L_BKLTEN"),
+ PINCTRL_PIN(164, "L_BKLTCTL"),
+ PINCTRL_PIN(165, "L_VDDEN"),
+ PINCTRL_PIN(166, "SYS_PWROK"),
+ PINCTRL_PIN(167, "SYS_RESET_B"),
+ PINCTRL_PIN(168, "MLK_RST_B"),
+ PINCTRL_PIN(169, "SPI0_CLK_LOOPBK"),
+ /* vGPIO_3 */
+ PINCTRL_PIN(170, "ESPI_USB_OCB_0"),
+ PINCTRL_PIN(171, "ESPI_USB_OCB_1"),
+ PINCTRL_PIN(172, "ESPI_USB_OCB_2"),
+ PINCTRL_PIN(173, "ESPI_USB_OCB_3"),
+ PINCTRL_PIN(174, "USB_CPU_OCB_0"),
+ PINCTRL_PIN(175, "USB_CPU_OCB_1"),
+ PINCTRL_PIN(176, "USB_CPU_OCB_2"),
+ PINCTRL_PIN(177, "USB_CPU_OCB_3"),
+ PINCTRL_PIN(178, "TS0_IN_INT"),
+ PINCTRL_PIN(179, "TS1_IN_INT"),
+ PINCTRL_PIN(180, "THC0_WOT_INT"),
+ PINCTRL_PIN(181, "THC1_WOT_INT"),
+ PINCTRL_PIN(182, "THC0_WHC_INT"),
+ PINCTRL_PIN(183, "THC1_WHC_INT"),
+ /* GPP_S */
+ PINCTRL_PIN(184, "GPP_S_0"),
+ PINCTRL_PIN(185, "GPP_S_1"),
+ PINCTRL_PIN(186, "GPP_S_2"),
+ PINCTRL_PIN(187, "GPP_S_3"),
+ PINCTRL_PIN(188, "GPP_S_4"),
+ PINCTRL_PIN(189, "GPP_S_5"),
+ PINCTRL_PIN(190, "GPP_S_6"),
+ PINCTRL_PIN(191, "GPP_S_7"),
+ /* JTAG */
+ PINCTRL_PIN(192, "JTAG_MBPB0"),
+ PINCTRL_PIN(193, "JTAG_MBPB1"),
+ PINCTRL_PIN(194, "JTAG_MBPB2"),
+ PINCTRL_PIN(195, "JTAG_MBPB3"),
+ PINCTRL_PIN(196, "JTAG_TDO"),
+ PINCTRL_PIN(197, "PRDY_B"),
+ PINCTRL_PIN(198, "PREQ_B"),
+ PINCTRL_PIN(199, "JTAG_TDI"),
+ PINCTRL_PIN(200, "JTAG_TMS"),
+ PINCTRL_PIN(201, "JTAG_TCK"),
+ PINCTRL_PIN(202, "DBG_PMODE"),
+ PINCTRL_PIN(203, "JTAG_TRST_B"),
+ /* GPP_B */
+ PINCTRL_PIN(204, "ADM_VID_0"),
+ PINCTRL_PIN(205, "ADM_VID_1"),
+ PINCTRL_PIN(206, "GPP_B_2"),
+ PINCTRL_PIN(207, "GPP_B_3"),
+ PINCTRL_PIN(208, "GPP_B_4"),
+ PINCTRL_PIN(209, "GPP_B_5"),
+ PINCTRL_PIN(210, "GPP_B_6"),
+ PINCTRL_PIN(211, "GPP_B_7"),
+ PINCTRL_PIN(212, "GPP_B_8"),
+ PINCTRL_PIN(213, "GPP_B_9"),
+ PINCTRL_PIN(214, "GPP_B_10"),
+ PINCTRL_PIN(215, "GPP_B_11"),
+ PINCTRL_PIN(216, "SLP_S0_B"),
+ PINCTRL_PIN(217, "PLTRST_B"),
+ PINCTRL_PIN(218, "GPP_B_14"),
+ PINCTRL_PIN(219, "GPP_B_15"),
+ PINCTRL_PIN(220, "GPP_B_16"),
+ PINCTRL_PIN(221, "GPP_B_17"),
+ PINCTRL_PIN(222, "GPP_B_18"),
+ PINCTRL_PIN(223, "GPP_B_19"),
+ PINCTRL_PIN(224, "GPP_B_20"),
+ PINCTRL_PIN(225, "GPP_B_21"),
+ PINCTRL_PIN(226, "GPP_B_22"),
+ PINCTRL_PIN(227, "GPP_B_23"),
+ PINCTRL_PIN(228, "ISH_I3C0_CLK_LOOPBK"),
+ /* GPP_D */
+ PINCTRL_PIN(229, "GPP_D_0"),
+ PINCTRL_PIN(230, "GPP_D_1"),
+ PINCTRL_PIN(231, "GPP_D_2"),
+ PINCTRL_PIN(232, "GPP_D_3"),
+ PINCTRL_PIN(233, "GPP_D_4"),
+ PINCTRL_PIN(234, "GPP_D_5"),
+ PINCTRL_PIN(235, "GPP_D_6"),
+ PINCTRL_PIN(236, "GPP_D_7"),
+ PINCTRL_PIN(237, "GPP_D_8"),
+ PINCTRL_PIN(238, "GPP_D_9"),
+ PINCTRL_PIN(239, "HDA_BCLK"),
+ PINCTRL_PIN(240, "HDA_SYNC"),
+ PINCTRL_PIN(241, "HDA_SDO"),
+ PINCTRL_PIN(242, "HDA_SDI_0"),
+ PINCTRL_PIN(243, "GPP_D_14"),
+ PINCTRL_PIN(244, "GPP_D_15"),
+ PINCTRL_PIN(245, "GPP_D_16"),
+ PINCTRL_PIN(246, "HDA_RST_B"),
+ PINCTRL_PIN(247, "GPP_D_18"),
+ PINCTRL_PIN(248, "GPP_D_19"),
+ PINCTRL_PIN(249, "GPP_D_20"),
+ PINCTRL_PIN(250, "UFS_REFCLK"),
+ PINCTRL_PIN(251, "BPKI3C_SDA"),
+ PINCTRL_PIN(252, "BPKI3C_SCL"),
+ PINCTRL_PIN(253, "BOOTHALT_B"),
+ /* vGPIO */
+ PINCTRL_PIN(254, "CNV_BTEN"),
+ PINCTRL_PIN(255, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(256, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(257, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(258, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(259, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(260, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(261, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(262, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(263, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(264, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(265, "vUART0_TXD"),
+ PINCTRL_PIN(266, "vUART0_RXD"),
+ PINCTRL_PIN(267, "vUART0_CTS_B"),
+ PINCTRL_PIN(268, "vUART0_RTS_B"),
+ PINCTRL_PIN(269, "vISH_UART0_TXD"),
+ PINCTRL_PIN(270, "vISH_UART0_RXD"),
+ PINCTRL_PIN(271, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(272, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(273, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(274, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(275, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(276, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(277, "vI2S2_SCLK"),
+ PINCTRL_PIN(278, "vI2S2_SFRM"),
+ PINCTRL_PIN(279, "vI2S2_TXD"),
+ PINCTRL_PIN(280, "vI2S2_RXD"),
+ PINCTRL_PIN(281, "vCNV_BT_I2S_BCLK_2"),
+ PINCTRL_PIN(282, "vCNV_BT_I2S_WS_SYNC_2"),
+ PINCTRL_PIN(283, "vCNV_BT_I2S_SDO_2"),
+ PINCTRL_PIN(284, "vCNV_BT_I2S_SDI_2"),
+ PINCTRL_PIN(285, "vI2S2_SCLK_2"),
+ PINCTRL_PIN(286, "vI2S2_SFRM_2"),
+ PINCTRL_PIN(287, "vI2S2_TXD_2"),
+ PINCTRL_PIN(288, "vI2S2_RXD_2"),
+};
+
+static const struct intel_padgroup mtlp_community0_gpps[] = {
+ MTL_GPP(0, 0, 4, 0), /* CPU */
+ MTL_GPP(1, 5, 28, 32), /* GPP_V */
+ MTL_GPP(2, 29, 52, 64), /* GPP_C */
+};
+
+static const struct intel_padgroup mtlp_community1_gpps[] = {
+ MTL_GPP(0, 53, 77, 96), /* GPP_A */
+ MTL_GPP(1, 78, 102, 128), /* GPP_E */
+};
+
+static const struct intel_padgroup mtlp_community3_gpps[] = {
+ MTL_GPP(0, 103, 128, 160), /* GPP_H */
+ MTL_GPP(1, 129, 154, 192), /* GPP_F */
+ MTL_GPP(2, 155, 169, 224), /* SPI0 */
+ MTL_GPP(3, 170, 183, 256), /* vGPIO_3 */
+};
+
+static const struct intel_padgroup mtlp_community4_gpps[] = {
+ MTL_GPP(0, 184, 191, 288), /* GPP_S */
+ MTL_GPP(1, 192, 203, 320), /* JTAG */
+};
+
+static const struct intel_padgroup mtlp_community5_gpps[] = {
+ MTL_GPP(0, 204, 228, 352), /* GPP_B */
+ MTL_GPP(1, 229, 253, 384), /* GPP_D */
+ MTL_GPP(2, 254, 285, 416), /* vGPIO_0 */
+ MTL_GPP(3, 286, 288, 448), /* vGPIO_1 */
+};
+
+static const struct intel_community mtlp_communities[] = {
+ MTL_COMMUNITY(0, 0, 52, mtlp_community0_gpps),
+ MTL_COMMUNITY(1, 53, 102, mtlp_community1_gpps),
+ MTL_COMMUNITY(2, 103, 183, mtlp_community3_gpps),
+ MTL_COMMUNITY(3, 184, 203, mtlp_community4_gpps),
+ MTL_COMMUNITY(4, 204, 288, mtlp_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data mtlp_soc_data = {
+ .pins = mtlp_pins,
+ .npins = ARRAY_SIZE(mtlp_pins),
+ .communities = mtlp_communities,
+ .ncommunities = ARRAY_SIZE(mtlp_communities),
+};
+
+static const struct acpi_device_id mtl_pinctrl_acpi_match[] = {
+ { "INTC1083", (kernel_ulong_t)&mtlp_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, mtl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(mtl_pinctrl_pm_ops);
+
+static struct platform_driver mtl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_hid,
+ .driver = {
+ .name = "meteorlake-pinctrl",
+ .acpi_match_table = mtl_pinctrl_acpi_match,
+ .pm = &mtl_pinctrl_pm_ops,
+ },
+};
+module_platform_driver(mtl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Meteor Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
index acccde9262ba..78c02b7c81f0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -1107,24 +1107,10 @@ static const struct mtk_pin_field_calc mt8192_pin_pupd_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0060, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0060, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0060, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 31, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 31, 1),
PIN_FIELD_BASE(152, 152, 7, 0x0090, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x0090, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x0090, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x0090, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 31, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 31, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0030, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0030, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0030, 0x10, 4, 1),
@@ -1137,12 +1123,6 @@ static const struct mtk_pin_field_calc mt8192_pin_pupd_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0030, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0030, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0030, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 31, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 31, 1),
};
static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
@@ -1164,24 +1144,10 @@ static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0080, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0080, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0080, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 0, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 12, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 10, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 22, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 8, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 20, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 6, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 18, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 4, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 16, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 2, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 14, 1),
PIN_FIELD_BASE(152, 152, 7, 0x00c0, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x00c0, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x00c0, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x00c0, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 0, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 2, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0040, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0040, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0040, 0x10, 4, 1),
@@ -1194,12 +1160,6 @@ static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0040, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0040, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0040, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 2, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 6, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 0, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 2, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 0, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 4, 1),
};
static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
@@ -1221,24 +1181,10 @@ static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
PIN_FIELD_BASE(54, 54, 1, 0x0090, 0x10, 2, 1),
PIN_FIELD_BASE(55, 55, 1, 0x0090, 0x10, 4, 1),
PIN_FIELD_BASE(56, 56, 1, 0x0090, 0x10, 3, 1),
- PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 1, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 13, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 11, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 23, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 9, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 21, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 7, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 19, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 5, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 17, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 3, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 15, 1),
PIN_FIELD_BASE(152, 152, 7, 0x00d0, 0x10, 3, 1),
PIN_FIELD_BASE(153, 153, 7, 0x00d0, 0x10, 2, 1),
PIN_FIELD_BASE(154, 154, 7, 0x00d0, 0x10, 0, 1),
PIN_FIELD_BASE(155, 155, 7, 0x00d0, 0x10, 1, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 1, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 3, 1),
PIN_FIELD_BASE(183, 183, 9, 0x0050, 0x10, 1, 1),
PIN_FIELD_BASE(184, 184, 9, 0x0050, 0x10, 2, 1),
PIN_FIELD_BASE(185, 185, 9, 0x0050, 0x10, 4, 1),
@@ -1251,83 +1197,169 @@ static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
PIN_FIELD_BASE(192, 192, 9, 0x0050, 0x10, 0, 1),
PIN_FIELD_BASE(193, 193, 9, 0x0050, 0x10, 5, 1),
PIN_FIELD_BASE(194, 194, 9, 0x0050, 0x10, 11, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 3, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 7, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 1, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 3, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 1, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 5, 1),
};
-static const struct mtk_pin_field_calc mt8192_pin_e1e0en_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 0, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 18, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 15, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 3, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 12, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 0, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 9, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 27, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 6, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 24, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 3, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 21, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 0, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 3, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 3, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 9, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 0, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 3, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 0, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 6, 1),
-};
+static const struct mtk_pin_field_calc mt8192_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(89, 89, 2, 0x0040, 0x10, 0, 5),
+ PIN_FIELD_BASE(90, 90, 2, 0x0040, 0x10, 5, 5),
-static const struct mtk_pin_field_calc mt8192_pin_e0_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 1, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 19, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 16, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 4, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 13, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 1, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 10, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 28, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 7, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 25, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 4, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 22, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 1, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 4, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 4, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 10, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 1, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 4, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 1, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 7, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 18, 3),
+ PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 15, 3),
+ PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 3, 3),
+ PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 0, 3),
+ PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 27, 3),
+ PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 24, 3),
+ PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 21, 3),
+ PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 6, 3),
};
-static const struct mtk_pin_field_calc mt8192_pin_e1_range[] = {
- PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 2, 1),
- PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 20, 1),
- PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 17, 1),
- PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 5, 1),
- PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 14, 1),
- PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 2, 1),
- PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 11, 1),
- PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 29, 1),
- PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 8, 1),
- PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 26, 1),
- PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 5, 1),
- PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 23, 1),
- PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 2, 1),
- PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 5, 1),
- PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 5, 1),
- PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 11, 1),
- PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 2, 1),
- PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 5, 1),
- PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 2, 1),
- PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 8, 1),
+static const struct mtk_pin_field_calc mt8192_pin_rsel_range[] = {
+ PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 0, 2),
+ PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 12, 2),
+ PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 10, 2),
+ PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 22, 2),
+ PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 8, 2),
+ PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 20, 2),
+ PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 6, 2),
+ PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 18, 2),
+ PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 4, 2),
+ PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 2, 2),
+ PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 14, 2),
+ PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 0, 2),
+ PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 2, 2),
+ PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 2, 2),
+ PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 6, 2),
+ PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 0, 2),
+ PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 2, 2),
+ PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 0, 2),
+ PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 4, 2),
};
+static const unsigned int mt8192_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE,/*0*/ MTK_PULL_PU_PD_TYPE,/*1*/
+ MTK_PULL_PU_PD_TYPE,/*2*/ MTK_PULL_PU_PD_TYPE,/*3*/
+ MTK_PULL_PU_PD_TYPE,/*4*/ MTK_PULL_PU_PD_TYPE,/*5*/
+ MTK_PULL_PU_PD_TYPE,/*6*/ MTK_PULL_PU_PD_TYPE,/*7*/
+ MTK_PULL_PU_PD_TYPE,/*8*/ MTK_PULL_PU_PD_TYPE,/*9*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*10*/ MTK_PULL_PUPD_R1R0_TYPE,/*11*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*12*/ MTK_PULL_PUPD_R1R0_TYPE,/*13*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*14*/ MTK_PULL_PUPD_R1R0_TYPE,/*15*/
+ MTK_PULL_PU_PD_TYPE,/*16*/ MTK_PULL_PU_PD_TYPE,/*17*/
+ MTK_PULL_PU_PD_TYPE,/*18*/ MTK_PULL_PU_PD_TYPE,/*19*/
+ MTK_PULL_PU_PD_TYPE,/*20*/ MTK_PULL_PU_PD_TYPE,/*21*/
+ MTK_PULL_PU_PD_TYPE,/*22*/ MTK_PULL_PU_PD_TYPE,/*23*/
+ MTK_PULL_PU_PD_TYPE,/*24*/ MTK_PULL_PU_PD_TYPE,/*25*/
+ MTK_PULL_PU_PD_TYPE,/*26*/ MTK_PULL_PU_PD_TYPE,/*27*/
+ MTK_PULL_PU_PD_TYPE,/*28*/ MTK_PULL_PU_PD_TYPE,/*29*/
+ MTK_PULL_PU_PD_TYPE,/*30*/ MTK_PULL_PU_PD_TYPE,/*31*/
+ MTK_PULL_PU_PD_TYPE,/*32*/ MTK_PULL_PU_PD_TYPE,/*33*/
+ MTK_PULL_PU_PD_TYPE,/*34*/ MTK_PULL_PU_PD_TYPE,/*35*/
+ MTK_PULL_PU_PD_TYPE,/*36*/ MTK_PULL_PU_PD_TYPE,/*37*/
+ MTK_PULL_PU_PD_TYPE,/*38*/ MTK_PULL_PU_PD_TYPE,/*39*/
+ MTK_PULL_PU_PD_TYPE,/*40*/ MTK_PULL_PU_PD_TYPE,/*41*/
+ MTK_PULL_PU_PD_TYPE,/*42*/ MTK_PULL_PU_PD_TYPE,/*43*/
+ MTK_PULL_PU_PD_TYPE,/*44*/ MTK_PULL_PUPD_R1R0_TYPE,/*45*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*46*/ MTK_PULL_PUPD_R1R0_TYPE,/*47*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*48*/ MTK_PULL_PUPD_R1R0_TYPE,/*49*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*50*/ MTK_PULL_PUPD_R1R0_TYPE,/*51*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*52*/ MTK_PULL_PUPD_R1R0_TYPE,/*53*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*54*/ MTK_PULL_PUPD_R1R0_TYPE,/*55*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*56*/ MTK_PULL_PU_PD_TYPE,/*57*/
+ MTK_PULL_PU_PD_TYPE,/*58*/ MTK_PULL_PU_PD_TYPE,/*59*/
+ MTK_PULL_PU_PD_TYPE,/*60*/ MTK_PULL_PU_PD_TYPE,/*61*/
+ MTK_PULL_PU_PD_TYPE,/*62*/ MTK_PULL_PU_PD_TYPE,/*63*/
+ MTK_PULL_PU_PD_TYPE,/*64*/ MTK_PULL_PU_PD_TYPE,/*65*/
+ MTK_PULL_PU_PD_TYPE,/*66*/ MTK_PULL_PU_PD_TYPE,/*67*/
+ MTK_PULL_PU_PD_TYPE,/*68*/ MTK_PULL_PU_PD_TYPE,/*69*/
+ MTK_PULL_PU_PD_TYPE,/*70*/ MTK_PULL_PU_PD_TYPE,/*71*/
+ MTK_PULL_PU_PD_TYPE,/*72*/ MTK_PULL_PU_PD_TYPE,/*73*/
+ MTK_PULL_PU_PD_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE,/*75*/
+ MTK_PULL_PU_PD_TYPE,/*76*/ MTK_PULL_PU_PD_TYPE,/*77*/
+ MTK_PULL_PU_PD_TYPE,/*78*/ MTK_PULL_PU_PD_TYPE,/*79*/
+ MTK_PULL_PU_PD_TYPE,/*80*/ MTK_PULL_PU_PD_TYPE,/*81*/
+ MTK_PULL_PU_PD_TYPE,/*82*/ MTK_PULL_PU_PD_TYPE,/*83*/
+ MTK_PULL_PU_PD_TYPE,/*84*/ MTK_PULL_PU_PD_TYPE,/*85*/
+ MTK_PULL_PU_PD_TYPE,/*86*/ MTK_PULL_PU_PD_TYPE,/*87*/
+ MTK_PULL_PU_PD_TYPE,/*88*/ MTK_PULL_PU_PD_TYPE,/*89*/
+ MTK_PULL_PU_PD_TYPE,/*90*/ MTK_PULL_PU_PD_TYPE,/*91*/
+ MTK_PULL_PU_PD_TYPE,/*92*/ MTK_PULL_PU_PD_TYPE,/*93*/
+ MTK_PULL_PU_PD_TYPE,/*94*/ MTK_PULL_PU_PD_TYPE,/*95*/
+ MTK_PULL_PU_PD_TYPE,/*96*/ MTK_PULL_PU_PD_TYPE,/*97*/
+ MTK_PULL_PU_PD_TYPE,/*98*/ MTK_PULL_PU_PD_TYPE,/*99*/
+ MTK_PULL_PU_PD_TYPE,/*100*/ MTK_PULL_PU_PD_TYPE,/*101*/
+ MTK_PULL_PU_PD_TYPE,/*102*/ MTK_PULL_PU_PD_TYPE,/*103*/
+ MTK_PULL_PU_PD_TYPE,/*104*/ MTK_PULL_PU_PD_TYPE,/*105*/
+ MTK_PULL_PU_PD_TYPE,/*106*/ MTK_PULL_PU_PD_TYPE,/*107*/
+ MTK_PULL_PU_PD_TYPE,/*108*/ MTK_PULL_PU_PD_TYPE,/*109*/
+ MTK_PULL_PU_PD_TYPE,/*110*/ MTK_PULL_PU_PD_TYPE,/*111*/
+ MTK_PULL_PU_PD_TYPE,/*112*/ MTK_PULL_PU_PD_TYPE,/*113*/
+ MTK_PULL_PU_PD_TYPE,/*114*/ MTK_PULL_PU_PD_TYPE,/*115*/
+ MTK_PULL_PU_PD_TYPE,/*116*/ MTK_PULL_PU_PD_TYPE,/*117*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*118*/ MTK_PULL_PU_PD_RSEL_TYPE,/*119*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*120*/ MTK_PULL_PU_PD_RSEL_TYPE,/*121*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*122*/ MTK_PULL_PU_PD_RSEL_TYPE,/*123*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*124*/ MTK_PULL_PU_PD_RSEL_TYPE,/*125*/
+ MTK_PULL_PU_PD_TYPE,/*126*/ MTK_PULL_PU_PD_TYPE,/*127*/
+ MTK_PULL_PU_PD_TYPE,/*128*/ MTK_PULL_PU_PD_TYPE,/*129*/
+ MTK_PULL_PU_PD_TYPE,/*130*/ MTK_PULL_PU_PD_TYPE,/*131*/
+ MTK_PULL_PU_PD_TYPE,/*132*/ MTK_PULL_PU_PD_TYPE,/*133*/
+ MTK_PULL_PU_PD_TYPE,/*134*/ MTK_PULL_PU_PD_TYPE,/*135*/
+ MTK_PULL_PU_PD_TYPE,/*136*/ MTK_PULL_PU_PD_TYPE,/*137*/
+ MTK_PULL_PU_PD_TYPE,/*138*/ MTK_PULL_PU_PD_RSEL_TYPE,/*139*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*140*/ MTK_PULL_PU_PD_RSEL_TYPE,/*141*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*142*/ MTK_PULL_PU_PD_TYPE,/*143*/
+ MTK_PULL_PU_PD_TYPE,/*144*/ MTK_PULL_PU_PD_TYPE,/*145*/
+ MTK_PULL_PU_PD_TYPE,/*146*/ MTK_PULL_PU_PD_TYPE,/*147*/
+ MTK_PULL_PU_PD_TYPE,/*148*/ MTK_PULL_PU_PD_TYPE,/*149*/
+ MTK_PULL_PU_PD_TYPE,/*150*/ MTK_PULL_PU_PD_TYPE,/*151*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*152*/ MTK_PULL_PUPD_R1R0_TYPE,/*153*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*154*/ MTK_PULL_PUPD_R1R0_TYPE,/*155*/
+ MTK_PULL_PU_PD_TYPE,/*156*/ MTK_PULL_PU_PD_TYPE,/*157*/
+ MTK_PULL_PU_PD_TYPE,/*158*/ MTK_PULL_PU_PD_TYPE,/*159*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*160*/ MTK_PULL_PU_PD_RSEL_TYPE,/*161*/
+ MTK_PULL_PU_PD_TYPE,/*162*/ MTK_PULL_PU_PD_TYPE,/*163*/
+ MTK_PULL_PU_PD_TYPE,/*164*/ MTK_PULL_PU_PD_TYPE,/*165*/
+ MTK_PULL_PU_PD_TYPE,/*166*/ MTK_PULL_PU_PD_TYPE,/*167*/
+ MTK_PULL_PU_PD_TYPE,/*168*/ MTK_PULL_PU_PD_TYPE,/*169*/
+ MTK_PULL_PU_PD_TYPE,/*170*/ MTK_PULL_PU_PD_TYPE,/*171*/
+ MTK_PULL_PU_PD_TYPE,/*172*/ MTK_PULL_PU_PD_TYPE,/*173*/
+ MTK_PULL_PU_PD_TYPE,/*174*/ MTK_PULL_PU_PD_TYPE,/*175*/
+ MTK_PULL_PU_PD_TYPE,/*176*/ MTK_PULL_PU_PD_TYPE,/*177*/
+ MTK_PULL_PU_PD_TYPE,/*178*/ MTK_PULL_PU_PD_TYPE,/*179*/
+ MTK_PULL_PU_PD_TYPE,/*180*/ MTK_PULL_PU_PD_TYPE,/*181*/
+ MTK_PULL_PU_PD_TYPE,/*182*/ MTK_PULL_PUPD_R1R0_TYPE,/*183*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*184*/ MTK_PULL_PUPD_R1R0_TYPE,/*185*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*186*/ MTK_PULL_PUPD_R1R0_TYPE,/*187*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*188*/ MTK_PULL_PUPD_R1R0_TYPE,/*189*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*190*/ MTK_PULL_PUPD_R1R0_TYPE,/*191*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*192*/ MTK_PULL_PUPD_R1R0_TYPE,/*193*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*194*/ MTK_PULL_PU_PD_TYPE,/*195*/
+ MTK_PULL_PU_PD_TYPE,/*196*/ MTK_PULL_PU_PD_TYPE,/*197*/
+ MTK_PULL_PU_PD_TYPE,/*198*/ MTK_PULL_PU_PD_TYPE,/*199*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*200*/ MTK_PULL_PU_PD_RSEL_TYPE,/*201*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*202*/ MTK_PULL_PU_PD_RSEL_TYPE,/*203*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*204*/ MTK_PULL_PU_PD_RSEL_TYPE,/*205*/
+ MTK_PULL_PU_PD_TYPE,/*206*/ MTK_PULL_PU_PD_TYPE,/*207*/
+ MTK_PULL_PU_PD_TYPE,/*208*/ MTK_PULL_PU_PD_TYPE,/*209*/
+ MTK_PULL_PU_PD_TYPE,/*210*/ MTK_PULL_PU_PD_TYPE,/*211*/
+ MTK_PULL_PU_PD_TYPE,/*212*/ MTK_PULL_PU_PD_TYPE,/*213*/
+ MTK_PULL_PU_PD_TYPE,/*214*/ MTK_PULL_PU_PD_TYPE,/*215*/
+ MTK_PULL_PU_PD_TYPE,/*216*/ MTK_PULL_PU_PD_TYPE,/*217*/
+ MTK_PULL_PU_PD_TYPE,/*218*/ MTK_PULL_PU_PD_TYPE,/*219*/
+};
static const char * const mt8192_pinctrl_register_base_names[] = {
"iocfg0", "iocfg_rm", "iocfg_bm", "iocfg_bl", "iocfg_br",
@@ -1355,9 +1387,8 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8192_pin_pupd_range),
[PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8192_pin_r0_range),
[PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8192_pin_r1_range),
- [PINCTRL_PIN_REG_DRV_EN] = MTK_RANGE(mt8192_pin_e1e0en_range),
- [PINCTRL_PIN_REG_DRV_E0] = MTK_RANGE(mt8192_pin_e0_range),
- [PINCTRL_PIN_REG_DRV_E1] = MTK_RANGE(mt8192_pin_e1_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8192_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt8192_pin_rsel_range),
};
static const struct mtk_pin_soc mt8192_data = {
@@ -1367,17 +1398,16 @@ static const struct mtk_pin_soc mt8192_data = {
.ngrps = ARRAY_SIZE(mtk_pins_mt8192),
.base_names = mt8192_pinctrl_register_base_names,
.nbase_names = ARRAY_SIZE(mt8192_pinctrl_register_base_names),
+ .pull_type = mt8192_pull_type,
.eint_hw = &mt8192_eint_hw,
.nfuncs = 8,
.gpio_m = 0,
.bias_set_combo = mtk_pinconf_bias_set_combo,
.bias_get_combo = mtk_pinconf_bias_get_combo,
- .drive_set = mtk_pinconf_drive_set_raw,
- .drive_get = mtk_pinconf_drive_get_raw,
- .adv_pull_get = mtk_pinconf_adv_pull_get,
- .adv_pull_set = mtk_pinconf_adv_pull_set,
- .adv_drive_get = mtk_pinconf_adv_drive_get,
- .adv_drive_set = mtk_pinconf_adv_drive_set,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
};
static const struct of_device_id mt8192_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index a1f93859e7ca..8ef0a97d2bf5 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -96,10 +96,12 @@ static struct mvebu_pinctrl_group *mvebu_pinctrl_find_group_by_name(
struct mvebu_pinctrl *pctl, const char *name)
{
unsigned n;
+
for (n = 0; n < pctl->num_groups; n++) {
if (strcmp(name, pctl->groups[n].name) == 0)
return &pctl->groups[n];
}
+
return NULL;
}
@@ -108,6 +110,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_val(
unsigned long config)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (config == grp->settings[n].val) {
if (!pctl->variant || (pctl->variant &
@@ -115,6 +118,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_val(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -123,6 +127,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_name(
const char *name)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (strcmp(name, grp->settings[n].name) == 0) {
if (!pctl->variant || (pctl->variant &
@@ -130,6 +135,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_name(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -137,6 +143,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_gpio_setting(
struct mvebu_pinctrl *pctl, struct mvebu_pinctrl_group *grp)
{
unsigned n;
+
for (n = 0; n < grp->num_settings; n++) {
if (grp->settings[n].flags &
(MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
@@ -145,6 +152,7 @@ static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_gpio_setting(
return &grp->settings[n];
}
}
+
return NULL;
}
@@ -152,10 +160,12 @@ static struct mvebu_pinctrl_function *mvebu_pinctrl_find_function_by_name(
struct mvebu_pinctrl *pctl, const char *name)
{
unsigned n;
+
for (n = 0; n < pctl->num_functions; n++) {
if (strcmp(name, pctl->functions[n].name) == 0)
return &pctl->functions[n];
}
+
return NULL;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 640e50d94f27..f5014d09d81a 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1421,8 +1421,10 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
has_config = nmk_pinctrl_dt_get_config(np, &configs);
np_config = of_parse_phandle(np, "ste,config", 0);
- if (np_config)
+ if (np_config) {
has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
+ of_node_put(np_config);
+ }
if (has_config) {
const char *gpio_name;
const char *pin;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 0645c2c24f50..4691a33bc374 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -6,8 +6,6 @@
* Authors: Ken Xue <Ken.Xue@amd.com>
* Wu, Jeff <Jeff.Wu@amd.com>
*
- * Contact Information: Nehal Shah <Nehal-bakulchandra.Shah@amd.com>
- * Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
*/
#include <linux/err.h>
@@ -31,6 +29,7 @@
#include <linux/bitops.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
#include "core.h"
#include "pinctrl-utils.h"
@@ -203,8 +202,6 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
bool tmr_out_unit;
- unsigned int time;
- unsigned int unit;
bool tmr_large;
char *level_trig;
@@ -218,13 +215,13 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *pull_up_sel;
char *pull_up_enable;
char *pull_down_enable;
- char *output_value;
- char *output_enable;
+ char *orientation;
char debounce_value[40];
char *debounce_enable;
for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
- seq_printf(s, "GPIO bank%d\t", bank);
+ unsigned int time = 0;
+ unsigned int unit = 0;
switch (bank) {
case 0:
@@ -247,8 +244,9 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
/* Illegal bank number, ignore */
continue;
}
+ seq_printf(s, "GPIO bank%d\n", bank);
for (; i < pin_num; i++) {
- seq_printf(s, "pin%d\t", i);
+ seq_printf(s, "📌%d\t", i);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + i * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -256,84 +254,91 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
u8 level = (pin_reg >> ACTIVE_LEVEL_OFF) &
ACTIVE_LEVEL_MASK;
- interrupt_enable = "interrupt is enabled|";
+ interrupt_enable = "+";
if (level == ACTIVE_LEVEL_HIGH)
- active_level = "Active high|";
+ active_level = "↑";
else if (level == ACTIVE_LEVEL_LOW)
- active_level = "Active low|";
+ active_level = "↓";
else if (!(pin_reg & BIT(LEVEL_TRIG_OFF)) &&
level == ACTIVE_LEVEL_BOTH)
- active_level = "Active on both|";
+ active_level = "b";
else
- active_level = "Unknown Active level|";
+ active_level = "?";
if (pin_reg & BIT(LEVEL_TRIG_OFF))
- level_trig = "Level trigger|";
+ level_trig = "level";
else
- level_trig = "Edge trigger|";
+ level_trig = " edge";
} else {
- interrupt_enable =
- "interrupt is disabled|";
- active_level = " ";
- level_trig = " ";
+ interrupt_enable = "∅";
+ active_level = "∅";
+ level_trig = " ∅";
}
if (pin_reg & BIT(INTERRUPT_MASK_OFF))
- interrupt_mask =
- "interrupt is unmasked|";
+ interrupt_mask = "-";
else
- interrupt_mask =
- "interrupt is masked|";
+ interrupt_mask = "+";
+ seq_printf(s, "int %s (🎭 %s)| active-%s| %s-🔫| ",
+ interrupt_enable,
+ interrupt_mask,
+ active_level,
+ level_trig);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S0I3))
- wake_cntrl0 = "enable wakeup in S0i3 state|";
+ wake_cntrl0 = "+";
else
- wake_cntrl0 = "disable wakeup in S0i3 state|";
+ wake_cntrl0 = "∅";
+ seq_printf(s, "S0i3 🌅 %s| ", wake_cntrl0);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S3))
- wake_cntrl1 = "enable wakeup in S3 state|";
+ wake_cntrl1 = "+";
else
- wake_cntrl1 = "disable wakeup in S3 state|";
+ wake_cntrl1 = "∅";
+ seq_printf(s, "S3 🌅 %s| ", wake_cntrl1);
if (pin_reg & BIT(WAKE_CNTRL_OFF_S4))
- wake_cntrl2 = "enable wakeup in S4/S5 state|";
+ wake_cntrl2 = "+";
else
- wake_cntrl2 = "disable wakeup in S4/S5 state|";
+ wake_cntrl2 = "∅";
+ seq_printf(s, "S4/S5 🌅 %s| ", wake_cntrl2);
if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
- pull_up_enable = "pull-up is enabled|";
+ pull_up_enable = "+";
if (pin_reg & BIT(PULL_UP_SEL_OFF))
- pull_up_sel = "8k pull-up|";
+ pull_up_sel = "8k";
else
- pull_up_sel = "4k pull-up|";
+ pull_up_sel = "4k";
} else {
- pull_up_enable = "pull-up is disabled|";
- pull_up_sel = " ";
+ pull_up_enable = "∅";
+ pull_up_sel = " ";
}
+ seq_printf(s, "pull-↑ %s (%s)| ",
+ pull_up_enable,
+ pull_up_sel);
if (pin_reg & BIT(PULL_DOWN_ENABLE_OFF))
- pull_down_enable = "pull-down is enabled|";
+ pull_down_enable = "+";
else
- pull_down_enable = "Pull-down is disabled|";
+ pull_down_enable = "∅";
+ seq_printf(s, "pull-↓ %s| ", pull_down_enable);
if (pin_reg & BIT(OUTPUT_ENABLE_OFF)) {
- pin_sts = " ";
- output_enable = "output is enabled|";
+ pin_sts = "output";
if (pin_reg & BIT(OUTPUT_VALUE_OFF))
- output_value = "output is high|";
+ orientation = "↑";
else
- output_value = "output is low|";
+ orientation = "↓";
} else {
- output_enable = "output is disabled|";
- output_value = " ";
-
+ pin_sts = "input ";
if (pin_reg & BIT(PIN_STS_OFF))
- pin_sts = "input is high|";
+ orientation = "↑";
else
- pin_sts = "input is low|";
+ orientation = "↓";
}
+ seq_printf(s, "%s %s| ", pin_sts, orientation);
db_cntrl = (DB_CNTRl_MASK << DB_CNTRL_OFF) & pin_reg;
if (db_cntrl) {
@@ -352,27 +357,18 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
unit = 61;
}
if ((DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF) == db_cntrl)
- debounce_enable = "debouncing filter (high and low) enabled|";
+ debounce_enable = "b +";
else if ((DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF) == db_cntrl)
- debounce_enable = "debouncing filter (low) enabled|";
+ debounce_enable = "↓ +";
else
- debounce_enable = "debouncing filter (high) enabled|";
+ debounce_enable = "↑ +";
- snprintf(debounce_value, sizeof(debounce_value),
- "debouncing timeout is %u (us)|", time * unit);
} else {
- debounce_enable = "debouncing filter disabled|";
- snprintf(debounce_value, sizeof(debounce_value), " ");
+ debounce_enable = " ∅";
}
-
- seq_printf(s, "%s %s %s %s %s %s\n"
- " %s %s %s %s %s %s %s %s %s 0x%x\n",
- level_trig, active_level, interrupt_enable,
- interrupt_mask, wake_cntrl0, wake_cntrl1,
- wake_cntrl2, pin_sts, pull_up_sel,
- pull_up_enable, pull_down_enable,
- output_value, output_enable,
- debounce_enable, debounce_value, pin_reg);
+ snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
+ seq_printf(s, "debounce %s (â° %sus)| ", debounce_enable, debounce_value);
+ seq_printf(s, " 0x%x\n", pin_reg);
}
}
}
@@ -917,6 +913,7 @@ static int amd_gpio_suspend(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -925,7 +922,9 @@ static int amd_gpio_suspend(struct device *dev)
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -935,6 +934,7 @@ static int amd_gpio_resume(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
int i;
for (i = 0; i < desc->npins; i++) {
@@ -943,7 +943,10 @@ static int amd_gpio_resume(struct device *dev)
if (!amd_gpio_should_save(gpio_dev, pin))
continue;
- writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
+ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
return 0;
@@ -955,14 +958,115 @@ static const struct dev_pm_ops amd_gpio_pm_ops = {
};
#endif
+static int amd_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(pmx_functions);
+}
+
+static const char *amd_get_fname(struct pinctrl_dev *pctrldev, unsigned int selector)
+{
+ return pmx_functions[selector].name;
+}
+
+static int amd_get_groups(struct pinctrl_dev *pctrldev, unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctrldev);
+
+ if (!gpio_dev->iomux_base) {
+ dev_err(&gpio_dev->pdev->dev, "iomux function %d group not supported\n", selector);
+ return -EINVAL;
+ }
+
+ *groups = pmx_functions[selector].groups;
+ *num_groups = pmx_functions[selector].ngroups;
+ return 0;
+}
+
+static int amd_set_mux(struct pinctrl_dev *pctrldev, unsigned int function, unsigned int group)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctrldev);
+ struct device *dev = &gpio_dev->pdev->dev;
+ struct pin_desc *pd;
+ int ind, index;
+
+ if (!gpio_dev->iomux_base)
+ return -EINVAL;
+
+ for (index = 0; index < NSELECTS; index++) {
+ if (strcmp(gpio_dev->groups[group].name, pmx_functions[function].groups[index]))
+ continue;
+
+ if (readb(gpio_dev->iomux_base + pmx_functions[function].index) ==
+ FUNCTION_INVALID) {
+ dev_err(dev, "IOMUX_GPIO 0x%x not present or supported\n",
+ pmx_functions[function].index);
+ return -EINVAL;
+ }
+
+ writeb(index, gpio_dev->iomux_base + pmx_functions[function].index);
+
+ if (index != (readb(gpio_dev->iomux_base + pmx_functions[function].index) &
+ FUNCTION_MASK)) {
+ dev_err(dev, "IOMUX_GPIO 0x%x not present or supported\n",
+ pmx_functions[function].index);
+ return -EINVAL;
+ }
+
+ for (ind = 0; ind < gpio_dev->groups[group].npins; ind++) {
+ if (strncmp(gpio_dev->groups[group].name, "IMX_F", strlen("IMX_F")))
+ continue;
+
+ pd = pin_desc_get(gpio_dev->pctrl, gpio_dev->groups[group].pins[ind]);
+ pd->mux_owner = gpio_dev->groups[group].name;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops amd_pmxops = {
+ .get_functions_count = amd_get_functions_count,
+ .get_function_name = amd_get_fname,
+ .get_function_groups = amd_get_groups,
+ .set_mux = amd_set_mux,
+};
+
static struct pinctrl_desc amd_pinctrl_desc = {
.pins = kerncz_pins,
.npins = ARRAY_SIZE(kerncz_pins),
.pctlops = &amd_pinctrl_ops,
+ .pmxops = &amd_pmxops,
.confops = &amd_pinconf_ops,
.owner = THIS_MODULE,
};
+static void amd_get_iomux_res(struct amd_gpio *gpio_dev)
+{
+ struct pinctrl_desc *desc = &amd_pinctrl_desc;
+ struct device *dev = &gpio_dev->pdev->dev;
+ int index;
+
+ index = device_property_match_string(dev, "pinctrl-resource-names", "iomux");
+ if (index < 0) {
+ dev_warn(dev, "failed to get iomux index\n");
+ goto out_no_pinmux;
+ }
+
+ gpio_dev->iomux_base = devm_platform_ioremap_resource(gpio_dev->pdev, index);
+ if (IS_ERR(gpio_dev->iomux_base)) {
+ dev_warn(dev, "Failed to get iomux %d io resource\n", index);
+ goto out_no_pinmux;
+ }
+
+ return;
+
+out_no_pinmux:
+ desc->pmxops = NULL;
+}
+
static int amd_gpio_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -977,17 +1081,12 @@ static int amd_gpio_probe(struct platform_device *pdev)
raw_spin_lock_init(&gpio_dev->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
+ gpio_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(gpio_dev->base)) {
dev_err(&pdev->dev, "Failed to get gpio io resource.\n");
- return -EINVAL;
+ return PTR_ERR(gpio_dev->base);
}
- gpio_dev->base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!gpio_dev->base)
- return -ENOMEM;
-
gpio_dev->irq = platform_get_irq(pdev, 0);
if (gpio_dev->irq < 0)
return gpio_dev->irq;
@@ -1020,6 +1119,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->ngroups = ARRAY_SIZE(kerncz_groups);
amd_pinctrl_desc.name = dev_name(&pdev->dev);
+ amd_get_iomux_res(gpio_dev);
gpio_dev->pctrl = devm_pinctrl_register(&pdev->dev, &amd_pinctrl_desc,
gpio_dev);
if (IS_ERR(gpio_dev->pctrl)) {
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 1d4317073654..c8635998465d 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -74,23 +74,24 @@
#define CLR_INTR_STAT 0x1UL
-struct amd_pingroup {
- const char *name;
- const unsigned *pins;
- unsigned npins;
-};
+#define NSELECTS 0x4
+
+#define FUNCTION_MASK GENMASK(1, 0)
+#define FUNCTION_INVALID GENMASK(7, 0)
struct amd_function {
const char *name;
- const char * const *groups;
+ const char * const groups[NSELECTS];
unsigned ngroups;
+ int index;
};
struct amd_gpio {
raw_spinlock_t lock;
void __iomem *base;
+ void __iomem *iomux_base;
- const struct amd_pingroup *groups;
+ const struct pingroup *groups;
u32 ngroups;
struct pinctrl_dev *pctrl;
struct gpio_chip gc;
@@ -288,45 +289,1332 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(183, "GPIO_183"),
};
-static const unsigned i2c0_pins[] = {145, 146};
-static const unsigned i2c1_pins[] = {147, 148};
-static const unsigned i2c2_pins[] = {113, 114};
-static const unsigned i2c3_pins[] = {19, 20};
+#define AMD_PINS(...) (const unsigned int []){__VA_ARGS__}
+
+enum amd_functions {
+ IMX_F0_GPIO0,
+ IMX_F1_GPIO0,
+ IMX_F2_GPIO0,
+ IMX_F3_GPIO0,
+ IMX_F0_GPIO1,
+ IMX_F1_GPIO1,
+ IMX_F2_GPIO1,
+ IMX_F3_GPIO1,
+ IMX_F0_GPIO2,
+ IMX_F1_GPIO2,
+ IMX_F2_GPIO2,
+ IMX_F3_GPIO2,
+ IMX_F0_GPIO3,
+ IMX_F1_GPIO3,
+ IMX_F2_GPIO3,
+ IMX_F3_GPIO3,
+ IMX_F0_GPIO4,
+ IMX_F1_GPIO4,
+ IMX_F2_GPIO4,
+ IMX_F3_GPIO4,
+ IMX_F0_GPIO5,
+ IMX_F1_GPIO5,
+ IMX_F2_GPIO5,
+ IMX_F3_GPIO5,
+ IMX_F0_GPIO6,
+ IMX_F1_GPIO6,
+ IMX_F2_GPIO6,
+ IMX_F3_GPIO6,
+ IMX_F0_GPIO7,
+ IMX_F1_GPIO7,
+ IMX_F2_GPIO7,
+ IMX_F3_GPIO7,
+ IMX_F0_GPIO8,
+ IMX_F1_GPIO8,
+ IMX_F2_GPIO8,
+ IMX_F3_GPIO8,
+ IMX_F0_GPIO9,
+ IMX_F1_GPIO9,
+ IMX_F2_GPIO9,
+ IMX_F3_GPIO9,
+ IMX_F0_GPIO10,
+ IMX_F1_GPIO10,
+ IMX_F2_GPIO10,
+ IMX_F3_GPIO10,
+ IMX_F0_GPIO11,
+ IMX_F1_GPIO11,
+ IMX_F2_GPIO11,
+ IMX_F3_GPIO11,
+ IMX_F0_GPIO12,
+ IMX_F1_GPIO12,
+ IMX_F2_GPIO12,
+ IMX_F3_GPIO12,
+ IMX_F0_GPIO13,
+ IMX_F1_GPIO13,
+ IMX_F2_GPIO13,
+ IMX_F3_GPIO13,
+ IMX_F0_GPIO14,
+ IMX_F1_GPIO14,
+ IMX_F2_GPIO14,
+ IMX_F3_GPIO14,
+ IMX_F0_GPIO15,
+ IMX_F1_GPIO15,
+ IMX_F2_GPIO15,
+ IMX_F3_GPIO15,
+ IMX_F0_GPIO16,
+ IMX_F1_GPIO16,
+ IMX_F2_GPIO16,
+ IMX_F3_GPIO16,
+ IMX_F0_GPIO17,
+ IMX_F1_GPIO17,
+ IMX_F2_GPIO17,
+ IMX_F3_GPIO17,
+ IMX_F0_GPIO18,
+ IMX_F1_GPIO18,
+ IMX_F2_GPIO18,
+ IMX_F3_GPIO18,
+ IMX_F0_GPIO19,
+ IMX_F1_GPIO19,
+ IMX_F2_GPIO19,
+ IMX_F3_GPIO19,
+ IMX_F0_GPIO20,
+ IMX_F1_GPIO20,
+ IMX_F2_GPIO20,
+ IMX_F3_GPIO20,
+ IMX_F0_GPIO21,
+ IMX_F1_GPIO21,
+ IMX_F2_GPIO21,
+ IMX_F3_GPIO21,
+ IMX_F0_GPIO22,
+ IMX_F1_GPIO22,
+ IMX_F2_GPIO22,
+ IMX_F3_GPIO22,
+ IMX_F0_GPIO23,
+ IMX_F1_GPIO23,
+ IMX_F2_GPIO23,
+ IMX_F3_GPIO23,
+ IMX_F0_GPIO24,
+ IMX_F1_GPIO24,
+ IMX_F2_GPIO24,
+ IMX_F3_GPIO24,
+ IMX_F0_GPIO25,
+ IMX_F1_GPIO25,
+ IMX_F2_GPIO25,
+ IMX_F3_GPIO25,
+ IMX_F0_GPIO26,
+ IMX_F1_GPIO26,
+ IMX_F2_GPIO26,
+ IMX_F3_GPIO26,
+ IMX_F0_GPIO27,
+ IMX_F1_GPIO27,
+ IMX_F2_GPIO27,
+ IMX_F3_GPIO27,
+ IMX_F0_GPIO28,
+ IMX_F1_GPIO28,
+ IMX_F2_GPIO28,
+ IMX_F3_GPIO28,
+ IMX_F0_GPIO29,
+ IMX_F1_GPIO29,
+ IMX_F2_GPIO29,
+ IMX_F3_GPIO29,
+ IMX_F0_GPIO30,
+ IMX_F1_GPIO30,
+ IMX_F2_GPIO30,
+ IMX_F3_GPIO30,
+ IMX_F0_GPIO31,
+ IMX_F1_GPIO31,
+ IMX_F2_GPIO31,
+ IMX_F3_GPIO31,
+ IMX_F0_GPIO32,
+ IMX_F1_GPIO32,
+ IMX_F2_GPIO32,
+ IMX_F3_GPIO32,
+ IMX_F0_GPIO33,
+ IMX_F1_GPIO33,
+ IMX_F2_GPIO33,
+ IMX_F3_GPIO33,
+ IMX_F0_GPIO34,
+ IMX_F1_GPIO34,
+ IMX_F2_GPIO34,
+ IMX_F3_GPIO34,
+ IMX_F0_GPIO35,
+ IMX_F1_GPIO35,
+ IMX_F2_GPIO35,
+ IMX_F3_GPIO35,
+ IMX_F0_GPIO36,
+ IMX_F1_GPIO36,
+ IMX_F2_GPIO36,
+ IMX_F3_GPIO36,
+ IMX_F0_GPIO37,
+ IMX_F1_GPIO37,
+ IMX_F2_GPIO37,
+ IMX_F3_GPIO37,
+ IMX_F0_GPIO38,
+ IMX_F1_GPIO38,
+ IMX_F2_GPIO38,
+ IMX_F3_GPIO38,
+ IMX_F0_GPIO39,
+ IMX_F1_GPIO39,
+ IMX_F2_GPIO39,
+ IMX_F3_GPIO39,
+ IMX_F0_GPIO40,
+ IMX_F1_GPIO40,
+ IMX_F2_GPIO40,
+ IMX_F3_GPIO40,
+ IMX_F0_GPIO41,
+ IMX_F1_GPIO41,
+ IMX_F2_GPIO41,
+ IMX_F3_GPIO41,
+ IMX_F0_GPIO42,
+ IMX_F1_GPIO42,
+ IMX_F2_GPIO42,
+ IMX_F3_GPIO42,
+ IMX_F0_GPIO43,
+ IMX_F1_GPIO43,
+ IMX_F2_GPIO43,
+ IMX_F3_GPIO43,
+ IMX_F0_GPIO44,
+ IMX_F1_GPIO44,
+ IMX_F2_GPIO44,
+ IMX_F3_GPIO44,
+ IMX_F0_GPIO45,
+ IMX_F1_GPIO45,
+ IMX_F2_GPIO45,
+ IMX_F3_GPIO45,
+ IMX_F0_GPIO46,
+ IMX_F1_GPIO46,
+ IMX_F2_GPIO46,
+ IMX_F3_GPIO46,
+ IMX_F0_GPIO47,
+ IMX_F1_GPIO47,
+ IMX_F2_GPIO47,
+ IMX_F3_GPIO47,
+ IMX_F0_GPIO48,
+ IMX_F1_GPIO48,
+ IMX_F2_GPIO48,
+ IMX_F3_GPIO48,
+ IMX_F0_GPIO49,
+ IMX_F1_GPIO49,
+ IMX_F2_GPIO49,
+ IMX_F3_GPIO49,
+ IMX_F0_GPIO50,
+ IMX_F1_GPIO50,
+ IMX_F2_GPIO50,
+ IMX_F3_GPIO50,
+ IMX_F0_GPIO51,
+ IMX_F1_GPIO51,
+ IMX_F2_GPIO51,
+ IMX_F3_GPIO51,
+ IMX_F0_GPIO52,
+ IMX_F1_GPIO52,
+ IMX_F2_GPIO52,
+ IMX_F3_GPIO52,
+ IMX_F0_GPIO53,
+ IMX_F1_GPIO53,
+ IMX_F2_GPIO53,
+ IMX_F3_GPIO53,
+ IMX_F0_GPIO54,
+ IMX_F1_GPIO54,
+ IMX_F2_GPIO54,
+ IMX_F3_GPIO54,
+ IMX_F0_GPIO55,
+ IMX_F1_GPIO55,
+ IMX_F2_GPIO55,
+ IMX_F3_GPIO55,
+ IMX_F0_GPIO56,
+ IMX_F1_GPIO56,
+ IMX_F2_GPIO56,
+ IMX_F3_GPIO56,
+ IMX_F0_GPIO57,
+ IMX_F1_GPIO57,
+ IMX_F2_GPIO57,
+ IMX_F3_GPIO57,
+ IMX_F0_GPIO58,
+ IMX_F1_GPIO58,
+ IMX_F2_GPIO58,
+ IMX_F3_GPIO58,
+ IMX_F0_GPIO59,
+ IMX_F1_GPIO59,
+ IMX_F2_GPIO59,
+ IMX_F3_GPIO59,
+ IMX_F0_GPIO60,
+ IMX_F1_GPIO60,
+ IMX_F2_GPIO60,
+ IMX_F3_GPIO60,
+ IMX_F0_GPIO61,
+ IMX_F1_GPIO61,
+ IMX_F2_GPIO61,
+ IMX_F3_GPIO61,
+ IMX_F0_GPIO62,
+ IMX_F1_GPIO62,
+ IMX_F2_GPIO62,
+ IMX_F3_GPIO62,
+ IMX_F0_GPIO64,
+ IMX_F1_GPIO64,
+ IMX_F2_GPIO64,
+ IMX_F3_GPIO64,
+ IMX_F0_GPIO65,
+ IMX_F1_GPIO65,
+ IMX_F2_GPIO65,
+ IMX_F3_GPIO65,
+ IMX_F0_GPIO66,
+ IMX_F1_GPIO66,
+ IMX_F2_GPIO66,
+ IMX_F3_GPIO66,
+ IMX_F0_GPIO67,
+ IMX_F1_GPIO67,
+ IMX_F2_GPIO67,
+ IMX_F3_GPIO67,
+ IMX_F0_GPIO68,
+ IMX_F1_GPIO68,
+ IMX_F2_GPIO68,
+ IMX_F3_GPIO68,
+ IMX_F0_GPIO69,
+ IMX_F1_GPIO69,
+ IMX_F2_GPIO69,
+ IMX_F3_GPIO69,
+ IMX_F0_GPIO70,
+ IMX_F1_GPIO70,
+ IMX_F2_GPIO70,
+ IMX_F3_GPIO70,
+ IMX_F0_GPIO71,
+ IMX_F1_GPIO71,
+ IMX_F2_GPIO71,
+ IMX_F3_GPIO71,
+ IMX_F0_GPIO72,
+ IMX_F1_GPIO72,
+ IMX_F2_GPIO72,
+ IMX_F3_GPIO72,
+ IMX_F0_GPIO73,
+ IMX_F1_GPIO73,
+ IMX_F2_GPIO73,
+ IMX_F3_GPIO73,
+ IMX_F0_GPIO74,
+ IMX_F1_GPIO74,
+ IMX_F2_GPIO74,
+ IMX_F3_GPIO74,
+ IMX_F0_GPIO75,
+ IMX_F1_GPIO75,
+ IMX_F2_GPIO75,
+ IMX_F3_GPIO75,
+ IMX_F0_GPIO76,
+ IMX_F1_GPIO76,
+ IMX_F2_GPIO76,
+ IMX_F3_GPIO76,
+ IMX_F0_GPIO77,
+ IMX_F1_GPIO77,
+ IMX_F2_GPIO77,
+ IMX_F3_GPIO77,
+ IMX_F0_GPIO78,
+ IMX_F1_GPIO78,
+ IMX_F2_GPIO78,
+ IMX_F3_GPIO78,
+ IMX_F0_GPIO79,
+ IMX_F1_GPIO79,
+ IMX_F2_GPIO79,
+ IMX_F3_GPIO79,
+ IMX_F0_GPIO80,
+ IMX_F1_GPIO80,
+ IMX_F2_GPIO80,
+ IMX_F3_GPIO80,
+ IMX_F0_GPIO81,
+ IMX_F1_GPIO81,
+ IMX_F2_GPIO81,
+ IMX_F3_GPIO81,
+ IMX_F0_GPIO82,
+ IMX_F1_GPIO82,
+ IMX_F2_GPIO82,
+ IMX_F3_GPIO82,
+ IMX_F0_GPIO83,
+ IMX_F1_GPIO83,
+ IMX_F2_GPIO83,
+ IMX_F3_GPIO83,
+ IMX_F0_GPIO84,
+ IMX_F1_GPIO84,
+ IMX_F2_GPIO84,
+ IMX_F3_GPIO84,
+ IMX_F0_GPIO85,
+ IMX_F1_GPIO85,
+ IMX_F2_GPIO85,
+ IMX_F3_GPIO85,
+ IMX_F0_GPIO86,
+ IMX_F1_GPIO86,
+ IMX_F2_GPIO86,
+ IMX_F3_GPIO86,
+ IMX_F0_GPIO87,
+ IMX_F1_GPIO87,
+ IMX_F2_GPIO87,
+ IMX_F3_GPIO87,
+ IMX_F0_GPIO88,
+ IMX_F1_GPIO88,
+ IMX_F2_GPIO88,
+ IMX_F3_GPIO88,
+ IMX_F0_GPIO89,
+ IMX_F1_GPIO89,
+ IMX_F2_GPIO89,
+ IMX_F3_GPIO89,
+ IMX_F0_GPIO90,
+ IMX_F1_GPIO90,
+ IMX_F2_GPIO90,
+ IMX_F3_GPIO90,
+ IMX_F0_GPIO91,
+ IMX_F1_GPIO91,
+ IMX_F2_GPIO91,
+ IMX_F3_GPIO91,
+ IMX_F0_GPIO92,
+ IMX_F1_GPIO92,
+ IMX_F2_GPIO92,
+ IMX_F3_GPIO92,
+ IMX_F0_GPIO93,
+ IMX_F1_GPIO93,
+ IMX_F2_GPIO93,
+ IMX_F3_GPIO93,
+ IMX_F0_GPIO94,
+ IMX_F1_GPIO94,
+ IMX_F2_GPIO94,
+ IMX_F3_GPIO94,
+ IMX_F0_GPIO95,
+ IMX_F1_GPIO95,
+ IMX_F2_GPIO95,
+ IMX_F3_GPIO95,
+ IMX_F0_GPIO96,
+ IMX_F1_GPIO96,
+ IMX_F2_GPIO96,
+ IMX_F3_GPIO96,
+ IMX_F0_GPIO97,
+ IMX_F1_GPIO97,
+ IMX_F2_GPIO97,
+ IMX_F3_GPIO97,
+ IMX_F0_GPIO98,
+ IMX_F1_GPIO98,
+ IMX_F2_GPIO98,
+ IMX_F3_GPIO98,
+ IMX_F0_GPIO99,
+ IMX_F1_GPIO99,
+ IMX_F2_GPIO99,
+ IMX_F3_GPIO99,
+ IMX_F0_GPIO100,
+ IMX_F1_GPIO100,
+ IMX_F2_GPIO100,
+ IMX_F3_GPIO100,
+ IMX_F0_GPIO101,
+ IMX_F1_GPIO101,
+ IMX_F2_GPIO101,
+ IMX_F3_GPIO101,
+ IMX_F0_GPIO102,
+ IMX_F1_GPIO102,
+ IMX_F2_GPIO102,
+ IMX_F3_GPIO102,
+ IMX_F0_GPIO103,
+ IMX_F1_GPIO103,
+ IMX_F2_GPIO103,
+ IMX_F3_GPIO103,
+ IMX_F0_GPIO104,
+ IMX_F1_GPIO104,
+ IMX_F2_GPIO104,
+ IMX_F3_GPIO104,
+ IMX_F0_GPIO105,
+ IMX_F1_GPIO105,
+ IMX_F2_GPIO105,
+ IMX_F3_GPIO105,
+ IMX_F0_GPIO106,
+ IMX_F1_GPIO106,
+ IMX_F2_GPIO106,
+ IMX_F3_GPIO106,
+ IMX_F0_GPIO107,
+ IMX_F1_GPIO107,
+ IMX_F2_GPIO107,
+ IMX_F3_GPIO107,
+ IMX_F0_GPIO108,
+ IMX_F1_GPIO108,
+ IMX_F2_GPIO108,
+ IMX_F3_GPIO108,
+ IMX_F0_GPIO109,
+ IMX_F1_GPIO109,
+ IMX_F2_GPIO109,
+ IMX_F3_GPIO109,
+ IMX_F0_GPIO110,
+ IMX_F1_GPIO110,
+ IMX_F2_GPIO110,
+ IMX_F3_GPIO110,
+ IMX_F0_GPIO111,
+ IMX_F1_GPIO111,
+ IMX_F2_GPIO111,
+ IMX_F3_GPIO111,
+ IMX_F0_GPIO112,
+ IMX_F1_GPIO112,
+ IMX_F2_GPIO112,
+ IMX_F3_GPIO112,
+ IMX_F0_GPIO113,
+ IMX_F1_GPIO113,
+ IMX_F2_GPIO113,
+ IMX_F3_GPIO113,
+ IMX_F0_GPIO114,
+ IMX_F1_GPIO114,
+ IMX_F2_GPIO114,
+ IMX_F3_GPIO114,
+ IMX_F0_GPIO115,
+ IMX_F1_GPIO115,
+ IMX_F2_GPIO115,
+ IMX_F3_GPIO115,
+ IMX_F0_GPIO116,
+ IMX_F1_GPIO116,
+ IMX_F2_GPIO116,
+ IMX_F3_GPIO116,
+ IMX_F0_GPIO117,
+ IMX_F1_GPIO117,
+ IMX_F2_GPIO117,
+ IMX_F3_GPIO117,
+ IMX_F0_GPIO118,
+ IMX_F1_GPIO118,
+ IMX_F2_GPIO118,
+ IMX_F3_GPIO118,
+ IMX_F0_GPIO119,
+ IMX_F1_GPIO119,
+ IMX_F2_GPIO119,
+ IMX_F3_GPIO119,
+ IMX_F0_GPIO120,
+ IMX_F1_GPIO120,
+ IMX_F2_GPIO120,
+ IMX_F3_GPIO120,
+ IMX_F0_GPIO121,
+ IMX_F1_GPIO121,
+ IMX_F2_GPIO121,
+ IMX_F3_GPIO121,
+ IMX_F0_GPIO122,
+ IMX_F1_GPIO122,
+ IMX_F2_GPIO122,
+ IMX_F3_GPIO122,
+ IMX_F0_GPIO123,
+ IMX_F1_GPIO123,
+ IMX_F2_GPIO123,
+ IMX_F3_GPIO123,
+ IMX_F0_GPIO124,
+ IMX_F1_GPIO124,
+ IMX_F2_GPIO124,
+ IMX_F3_GPIO124,
+ IMX_F0_GPIO125,
+ IMX_F1_GPIO125,
+ IMX_F2_GPIO125,
+ IMX_F3_GPIO125,
+ IMX_F0_GPIO126,
+ IMX_F1_GPIO126,
+ IMX_F2_GPIO126,
+ IMX_F3_GPIO126,
+ IMX_F0_GPIO127,
+ IMX_F1_GPIO127,
+ IMX_F2_GPIO127,
+ IMX_F3_GPIO127,
+ IMX_F0_GPIO128,
+ IMX_F1_GPIO128,
+ IMX_F2_GPIO128,
+ IMX_F3_GPIO128,
+ IMX_F0_GPIO129,
+ IMX_F1_GPIO129,
+ IMX_F2_GPIO129,
+ IMX_F3_GPIO129,
+ IMX_F0_GPIO130,
+ IMX_F1_GPIO130,
+ IMX_F2_GPIO130,
+ IMX_F3_GPIO130,
+ IMX_F0_GPIO131,
+ IMX_F1_GPIO131,
+ IMX_F2_GPIO131,
+ IMX_F3_GPIO131,
+ IMX_F0_GPIO132,
+ IMX_F1_GPIO132,
+ IMX_F2_GPIO132,
+ IMX_F3_GPIO132,
+ IMX_F0_GPIO133,
+ IMX_F1_GPIO133,
+ IMX_F2_GPIO133,
+ IMX_F3_GPIO133,
+ IMX_F0_GPIO134,
+ IMX_F1_GPIO134,
+ IMX_F2_GPIO134,
+ IMX_F3_GPIO134,
+ IMX_F0_GPIO135,
+ IMX_F1_GPIO135,
+ IMX_F2_GPIO135,
+ IMX_F3_GPIO135,
+ IMX_F0_GPIO136,
+ IMX_F1_GPIO136,
+ IMX_F2_GPIO136,
+ IMX_F3_GPIO136,
+ IMX_F0_GPIO137,
+ IMX_F1_GPIO137,
+ IMX_F2_GPIO137,
+ IMX_F3_GPIO137,
+ IMX_F0_GPIO138,
+ IMX_F1_GPIO138,
+ IMX_F2_GPIO138,
+ IMX_F3_GPIO138,
+ IMX_F0_GPIO139,
+ IMX_F1_GPIO139,
+ IMX_F2_GPIO139,
+ IMX_F3_GPIO139,
+ IMX_F0_GPIO140,
+ IMX_F1_GPIO140,
+ IMX_F2_GPIO140,
+ IMX_F3_GPIO140,
+ IMX_F0_GPIO141,
+ IMX_F1_GPIO141,
+ IMX_F2_GPIO141,
+ IMX_F3_GPIO141,
+ IMX_F0_GPIO142,
+ IMX_F1_GPIO142,
+ IMX_F2_GPIO142,
+ IMX_F3_GPIO142,
+ IMX_F0_GPIO143,
+ IMX_F1_GPIO143,
+ IMX_F2_GPIO143,
+ IMX_F3_GPIO143,
+ IMX_F0_GPIO144,
+ IMX_F1_GPIO144,
+ IMX_F2_GPIO144,
+ IMX_F3_GPIO144,
+};
+
+#define AMD_PINCTRL_FUNC_GRP(_number, _func) \
+ [IMX_F##_func##_GPIO##_number] = \
+ PINCTRL_PINGROUP("IMX_F"#_func "_GPIO"#_number, AMD_PINS(_number), 1)
+
+static const struct pingroup kerncz_groups[] = {
+ AMD_PINCTRL_FUNC_GRP(0, 0),
+ AMD_PINCTRL_FUNC_GRP(0, 1),
+ AMD_PINCTRL_FUNC_GRP(0, 2),
+ AMD_PINCTRL_FUNC_GRP(0, 3),
+ AMD_PINCTRL_FUNC_GRP(1, 0),
+ AMD_PINCTRL_FUNC_GRP(1, 1),
+ AMD_PINCTRL_FUNC_GRP(1, 2),
+ AMD_PINCTRL_FUNC_GRP(1, 3),
+ AMD_PINCTRL_FUNC_GRP(2, 0),
+ AMD_PINCTRL_FUNC_GRP(2, 1),
+ AMD_PINCTRL_FUNC_GRP(2, 2),
+ AMD_PINCTRL_FUNC_GRP(2, 3),
+ AMD_PINCTRL_FUNC_GRP(3, 0),
+ AMD_PINCTRL_FUNC_GRP(3, 1),
+ AMD_PINCTRL_FUNC_GRP(3, 2),
+ AMD_PINCTRL_FUNC_GRP(3, 3),
+ AMD_PINCTRL_FUNC_GRP(4, 0),
+ AMD_PINCTRL_FUNC_GRP(4, 1),
+ AMD_PINCTRL_FUNC_GRP(4, 2),
+ AMD_PINCTRL_FUNC_GRP(4, 3),
+ AMD_PINCTRL_FUNC_GRP(5, 0),
+ AMD_PINCTRL_FUNC_GRP(5, 1),
+ AMD_PINCTRL_FUNC_GRP(5, 2),
+ AMD_PINCTRL_FUNC_GRP(5, 3),
+ AMD_PINCTRL_FUNC_GRP(6, 0),
+ AMD_PINCTRL_FUNC_GRP(6, 1),
+ AMD_PINCTRL_FUNC_GRP(6, 2),
+ AMD_PINCTRL_FUNC_GRP(6, 3),
+ AMD_PINCTRL_FUNC_GRP(7, 0),
+ AMD_PINCTRL_FUNC_GRP(7, 1),
+ AMD_PINCTRL_FUNC_GRP(7, 2),
+ AMD_PINCTRL_FUNC_GRP(7, 3),
+ AMD_PINCTRL_FUNC_GRP(8, 0),
+ AMD_PINCTRL_FUNC_GRP(8, 1),
+ AMD_PINCTRL_FUNC_GRP(8, 2),
+ AMD_PINCTRL_FUNC_GRP(8, 3),
+ AMD_PINCTRL_FUNC_GRP(9, 0),
+ AMD_PINCTRL_FUNC_GRP(9, 1),
+ AMD_PINCTRL_FUNC_GRP(9, 2),
+ AMD_PINCTRL_FUNC_GRP(9, 3),
+ AMD_PINCTRL_FUNC_GRP(10, 0),
+ AMD_PINCTRL_FUNC_GRP(10, 1),
+ AMD_PINCTRL_FUNC_GRP(10, 2),
+ AMD_PINCTRL_FUNC_GRP(10, 3),
+ AMD_PINCTRL_FUNC_GRP(11, 0),
+ AMD_PINCTRL_FUNC_GRP(11, 1),
+ AMD_PINCTRL_FUNC_GRP(11, 2),
+ AMD_PINCTRL_FUNC_GRP(11, 3),
+ AMD_PINCTRL_FUNC_GRP(12, 0),
+ AMD_PINCTRL_FUNC_GRP(12, 1),
+ AMD_PINCTRL_FUNC_GRP(12, 2),
+ AMD_PINCTRL_FUNC_GRP(12, 3),
+ AMD_PINCTRL_FUNC_GRP(13, 0),
+ AMD_PINCTRL_FUNC_GRP(13, 1),
+ AMD_PINCTRL_FUNC_GRP(13, 2),
+ AMD_PINCTRL_FUNC_GRP(13, 3),
+ AMD_PINCTRL_FUNC_GRP(14, 0),
+ AMD_PINCTRL_FUNC_GRP(14, 1),
+ AMD_PINCTRL_FUNC_GRP(14, 2),
+ AMD_PINCTRL_FUNC_GRP(14, 3),
+ AMD_PINCTRL_FUNC_GRP(15, 0),
+ AMD_PINCTRL_FUNC_GRP(15, 1),
+ AMD_PINCTRL_FUNC_GRP(15, 2),
+ AMD_PINCTRL_FUNC_GRP(15, 3),
+ AMD_PINCTRL_FUNC_GRP(16, 0),
+ AMD_PINCTRL_FUNC_GRP(16, 1),
+ AMD_PINCTRL_FUNC_GRP(16, 2),
+ AMD_PINCTRL_FUNC_GRP(16, 3),
+ AMD_PINCTRL_FUNC_GRP(17, 0),
+ AMD_PINCTRL_FUNC_GRP(17, 1),
+ AMD_PINCTRL_FUNC_GRP(17, 2),
+ AMD_PINCTRL_FUNC_GRP(17, 3),
+ AMD_PINCTRL_FUNC_GRP(18, 0),
+ AMD_PINCTRL_FUNC_GRP(18, 1),
+ AMD_PINCTRL_FUNC_GRP(18, 2),
+ AMD_PINCTRL_FUNC_GRP(18, 3),
+ AMD_PINCTRL_FUNC_GRP(19, 0),
+ AMD_PINCTRL_FUNC_GRP(19, 1),
+ AMD_PINCTRL_FUNC_GRP(19, 2),
+ AMD_PINCTRL_FUNC_GRP(19, 3),
+ AMD_PINCTRL_FUNC_GRP(20, 0),
+ AMD_PINCTRL_FUNC_GRP(20, 1),
+ AMD_PINCTRL_FUNC_GRP(20, 2),
+ AMD_PINCTRL_FUNC_GRP(20, 3),
+ AMD_PINCTRL_FUNC_GRP(21, 0),
+ AMD_PINCTRL_FUNC_GRP(21, 1),
+ AMD_PINCTRL_FUNC_GRP(21, 2),
+ AMD_PINCTRL_FUNC_GRP(21, 3),
+ AMD_PINCTRL_FUNC_GRP(22, 0),
+ AMD_PINCTRL_FUNC_GRP(22, 1),
+ AMD_PINCTRL_FUNC_GRP(22, 2),
+ AMD_PINCTRL_FUNC_GRP(22, 3),
+ AMD_PINCTRL_FUNC_GRP(23, 0),
+ AMD_PINCTRL_FUNC_GRP(23, 1),
+ AMD_PINCTRL_FUNC_GRP(23, 2),
+ AMD_PINCTRL_FUNC_GRP(23, 3),
+ AMD_PINCTRL_FUNC_GRP(24, 0),
+ AMD_PINCTRL_FUNC_GRP(24, 1),
+ AMD_PINCTRL_FUNC_GRP(24, 2),
+ AMD_PINCTRL_FUNC_GRP(24, 3),
+ AMD_PINCTRL_FUNC_GRP(25, 0),
+ AMD_PINCTRL_FUNC_GRP(25, 1),
+ AMD_PINCTRL_FUNC_GRP(25, 2),
+ AMD_PINCTRL_FUNC_GRP(25, 3),
+ AMD_PINCTRL_FUNC_GRP(26, 0),
+ AMD_PINCTRL_FUNC_GRP(26, 1),
+ AMD_PINCTRL_FUNC_GRP(26, 2),
+ AMD_PINCTRL_FUNC_GRP(26, 3),
+ AMD_PINCTRL_FUNC_GRP(27, 0),
+ AMD_PINCTRL_FUNC_GRP(27, 1),
+ AMD_PINCTRL_FUNC_GRP(27, 2),
+ AMD_PINCTRL_FUNC_GRP(27, 3),
+ AMD_PINCTRL_FUNC_GRP(28, 0),
+ AMD_PINCTRL_FUNC_GRP(28, 1),
+ AMD_PINCTRL_FUNC_GRP(28, 2),
+ AMD_PINCTRL_FUNC_GRP(28, 3),
+ AMD_PINCTRL_FUNC_GRP(29, 0),
+ AMD_PINCTRL_FUNC_GRP(29, 1),
+ AMD_PINCTRL_FUNC_GRP(29, 2),
+ AMD_PINCTRL_FUNC_GRP(29, 3),
+ AMD_PINCTRL_FUNC_GRP(30, 0),
+ AMD_PINCTRL_FUNC_GRP(30, 1),
+ AMD_PINCTRL_FUNC_GRP(30, 2),
+ AMD_PINCTRL_FUNC_GRP(30, 3),
+ AMD_PINCTRL_FUNC_GRP(31, 0),
+ AMD_PINCTRL_FUNC_GRP(31, 1),
+ AMD_PINCTRL_FUNC_GRP(31, 2),
+ AMD_PINCTRL_FUNC_GRP(31, 3),
+ AMD_PINCTRL_FUNC_GRP(32, 0),
+ AMD_PINCTRL_FUNC_GRP(32, 1),
+ AMD_PINCTRL_FUNC_GRP(32, 2),
+ AMD_PINCTRL_FUNC_GRP(32, 3),
+ AMD_PINCTRL_FUNC_GRP(33, 0),
+ AMD_PINCTRL_FUNC_GRP(33, 1),
+ AMD_PINCTRL_FUNC_GRP(33, 2),
+ AMD_PINCTRL_FUNC_GRP(33, 3),
+ AMD_PINCTRL_FUNC_GRP(34, 0),
+ AMD_PINCTRL_FUNC_GRP(34, 1),
+ AMD_PINCTRL_FUNC_GRP(34, 2),
+ AMD_PINCTRL_FUNC_GRP(34, 3),
+ AMD_PINCTRL_FUNC_GRP(35, 0),
+ AMD_PINCTRL_FUNC_GRP(35, 1),
+ AMD_PINCTRL_FUNC_GRP(35, 2),
+ AMD_PINCTRL_FUNC_GRP(35, 3),
+ AMD_PINCTRL_FUNC_GRP(36, 0),
+ AMD_PINCTRL_FUNC_GRP(36, 1),
+ AMD_PINCTRL_FUNC_GRP(36, 2),
+ AMD_PINCTRL_FUNC_GRP(36, 3),
+ AMD_PINCTRL_FUNC_GRP(37, 0),
+ AMD_PINCTRL_FUNC_GRP(37, 1),
+ AMD_PINCTRL_FUNC_GRP(37, 2),
+ AMD_PINCTRL_FUNC_GRP(37, 3),
+ AMD_PINCTRL_FUNC_GRP(38, 0),
+ AMD_PINCTRL_FUNC_GRP(38, 1),
+ AMD_PINCTRL_FUNC_GRP(38, 2),
+ AMD_PINCTRL_FUNC_GRP(38, 3),
+ AMD_PINCTRL_FUNC_GRP(39, 0),
+ AMD_PINCTRL_FUNC_GRP(39, 1),
+ AMD_PINCTRL_FUNC_GRP(39, 2),
+ AMD_PINCTRL_FUNC_GRP(39, 3),
+ AMD_PINCTRL_FUNC_GRP(40, 0),
+ AMD_PINCTRL_FUNC_GRP(40, 1),
+ AMD_PINCTRL_FUNC_GRP(40, 2),
+ AMD_PINCTRL_FUNC_GRP(40, 3),
+ AMD_PINCTRL_FUNC_GRP(41, 0),
+ AMD_PINCTRL_FUNC_GRP(41, 1),
+ AMD_PINCTRL_FUNC_GRP(41, 2),
+ AMD_PINCTRL_FUNC_GRP(41, 3),
+ AMD_PINCTRL_FUNC_GRP(42, 0),
+ AMD_PINCTRL_FUNC_GRP(42, 1),
+ AMD_PINCTRL_FUNC_GRP(42, 2),
+ AMD_PINCTRL_FUNC_GRP(42, 3),
+ AMD_PINCTRL_FUNC_GRP(43, 0),
+ AMD_PINCTRL_FUNC_GRP(43, 1),
+ AMD_PINCTRL_FUNC_GRP(43, 2),
+ AMD_PINCTRL_FUNC_GRP(43, 3),
+ AMD_PINCTRL_FUNC_GRP(44, 0),
+ AMD_PINCTRL_FUNC_GRP(44, 1),
+ AMD_PINCTRL_FUNC_GRP(44, 2),
+ AMD_PINCTRL_FUNC_GRP(44, 3),
+ AMD_PINCTRL_FUNC_GRP(45, 0),
+ AMD_PINCTRL_FUNC_GRP(45, 1),
+ AMD_PINCTRL_FUNC_GRP(45, 2),
+ AMD_PINCTRL_FUNC_GRP(45, 3),
+ AMD_PINCTRL_FUNC_GRP(46, 0),
+ AMD_PINCTRL_FUNC_GRP(46, 1),
+ AMD_PINCTRL_FUNC_GRP(46, 2),
+ AMD_PINCTRL_FUNC_GRP(46, 3),
+ AMD_PINCTRL_FUNC_GRP(47, 0),
+ AMD_PINCTRL_FUNC_GRP(47, 1),
+ AMD_PINCTRL_FUNC_GRP(47, 2),
+ AMD_PINCTRL_FUNC_GRP(47, 3),
+ AMD_PINCTRL_FUNC_GRP(48, 0),
+ AMD_PINCTRL_FUNC_GRP(48, 1),
+ AMD_PINCTRL_FUNC_GRP(48, 2),
+ AMD_PINCTRL_FUNC_GRP(48, 3),
+ AMD_PINCTRL_FUNC_GRP(49, 0),
+ AMD_PINCTRL_FUNC_GRP(49, 1),
+ AMD_PINCTRL_FUNC_GRP(49, 2),
+ AMD_PINCTRL_FUNC_GRP(49, 3),
+ AMD_PINCTRL_FUNC_GRP(50, 0),
+ AMD_PINCTRL_FUNC_GRP(50, 1),
+ AMD_PINCTRL_FUNC_GRP(50, 2),
+ AMD_PINCTRL_FUNC_GRP(50, 3),
+ AMD_PINCTRL_FUNC_GRP(51, 0),
+ AMD_PINCTRL_FUNC_GRP(51, 1),
+ AMD_PINCTRL_FUNC_GRP(51, 2),
+ AMD_PINCTRL_FUNC_GRP(51, 3),
+ AMD_PINCTRL_FUNC_GRP(52, 0),
+ AMD_PINCTRL_FUNC_GRP(52, 1),
+ AMD_PINCTRL_FUNC_GRP(52, 2),
+ AMD_PINCTRL_FUNC_GRP(52, 3),
+ AMD_PINCTRL_FUNC_GRP(53, 0),
+ AMD_PINCTRL_FUNC_GRP(53, 1),
+ AMD_PINCTRL_FUNC_GRP(53, 2),
+ AMD_PINCTRL_FUNC_GRP(53, 3),
+ AMD_PINCTRL_FUNC_GRP(54, 0),
+ AMD_PINCTRL_FUNC_GRP(54, 1),
+ AMD_PINCTRL_FUNC_GRP(54, 2),
+ AMD_PINCTRL_FUNC_GRP(54, 3),
+ AMD_PINCTRL_FUNC_GRP(55, 0),
+ AMD_PINCTRL_FUNC_GRP(55, 1),
+ AMD_PINCTRL_FUNC_GRP(55, 2),
+ AMD_PINCTRL_FUNC_GRP(55, 3),
+ AMD_PINCTRL_FUNC_GRP(56, 0),
+ AMD_PINCTRL_FUNC_GRP(56, 1),
+ AMD_PINCTRL_FUNC_GRP(56, 2),
+ AMD_PINCTRL_FUNC_GRP(56, 3),
+ AMD_PINCTRL_FUNC_GRP(57, 0),
+ AMD_PINCTRL_FUNC_GRP(57, 1),
+ AMD_PINCTRL_FUNC_GRP(57, 2),
+ AMD_PINCTRL_FUNC_GRP(57, 3),
+ AMD_PINCTRL_FUNC_GRP(58, 0),
+ AMD_PINCTRL_FUNC_GRP(58, 1),
+ AMD_PINCTRL_FUNC_GRP(58, 2),
+ AMD_PINCTRL_FUNC_GRP(58, 3),
+ AMD_PINCTRL_FUNC_GRP(59, 0),
+ AMD_PINCTRL_FUNC_GRP(59, 1),
+ AMD_PINCTRL_FUNC_GRP(59, 2),
+ AMD_PINCTRL_FUNC_GRP(59, 3),
+ AMD_PINCTRL_FUNC_GRP(60, 0),
+ AMD_PINCTRL_FUNC_GRP(60, 1),
+ AMD_PINCTRL_FUNC_GRP(60, 2),
+ AMD_PINCTRL_FUNC_GRP(60, 3),
+ AMD_PINCTRL_FUNC_GRP(61, 0),
+ AMD_PINCTRL_FUNC_GRP(61, 1),
+ AMD_PINCTRL_FUNC_GRP(61, 2),
+ AMD_PINCTRL_FUNC_GRP(61, 3),
+ AMD_PINCTRL_FUNC_GRP(62, 0),
+ AMD_PINCTRL_FUNC_GRP(62, 1),
+ AMD_PINCTRL_FUNC_GRP(62, 2),
+ AMD_PINCTRL_FUNC_GRP(62, 3),
+ AMD_PINCTRL_FUNC_GRP(64, 0),
+ AMD_PINCTRL_FUNC_GRP(64, 1),
+ AMD_PINCTRL_FUNC_GRP(64, 2),
+ AMD_PINCTRL_FUNC_GRP(64, 3),
+ AMD_PINCTRL_FUNC_GRP(65, 0),
+ AMD_PINCTRL_FUNC_GRP(65, 1),
+ AMD_PINCTRL_FUNC_GRP(65, 2),
+ AMD_PINCTRL_FUNC_GRP(65, 3),
+ AMD_PINCTRL_FUNC_GRP(66, 0),
+ AMD_PINCTRL_FUNC_GRP(66, 1),
+ AMD_PINCTRL_FUNC_GRP(66, 2),
+ AMD_PINCTRL_FUNC_GRP(66, 3),
+ AMD_PINCTRL_FUNC_GRP(67, 0),
+ AMD_PINCTRL_FUNC_GRP(67, 1),
+ AMD_PINCTRL_FUNC_GRP(67, 2),
+ AMD_PINCTRL_FUNC_GRP(67, 3),
+ AMD_PINCTRL_FUNC_GRP(68, 0),
+ AMD_PINCTRL_FUNC_GRP(68, 1),
+ AMD_PINCTRL_FUNC_GRP(68, 2),
+ AMD_PINCTRL_FUNC_GRP(68, 3),
+ AMD_PINCTRL_FUNC_GRP(69, 0),
+ AMD_PINCTRL_FUNC_GRP(69, 1),
+ AMD_PINCTRL_FUNC_GRP(69, 2),
+ AMD_PINCTRL_FUNC_GRP(69, 3),
+ AMD_PINCTRL_FUNC_GRP(70, 0),
+ AMD_PINCTRL_FUNC_GRP(70, 1),
+ AMD_PINCTRL_FUNC_GRP(70, 2),
+ AMD_PINCTRL_FUNC_GRP(70, 3),
+ AMD_PINCTRL_FUNC_GRP(71, 0),
+ AMD_PINCTRL_FUNC_GRP(71, 1),
+ AMD_PINCTRL_FUNC_GRP(71, 2),
+ AMD_PINCTRL_FUNC_GRP(71, 3),
+ AMD_PINCTRL_FUNC_GRP(72, 0),
+ AMD_PINCTRL_FUNC_GRP(72, 1),
+ AMD_PINCTRL_FUNC_GRP(72, 2),
+ AMD_PINCTRL_FUNC_GRP(72, 3),
+ AMD_PINCTRL_FUNC_GRP(73, 0),
+ AMD_PINCTRL_FUNC_GRP(73, 1),
+ AMD_PINCTRL_FUNC_GRP(73, 2),
+ AMD_PINCTRL_FUNC_GRP(73, 3),
+ AMD_PINCTRL_FUNC_GRP(74, 0),
+ AMD_PINCTRL_FUNC_GRP(74, 1),
+ AMD_PINCTRL_FUNC_GRP(74, 2),
+ AMD_PINCTRL_FUNC_GRP(74, 3),
+ AMD_PINCTRL_FUNC_GRP(75, 0),
+ AMD_PINCTRL_FUNC_GRP(75, 1),
+ AMD_PINCTRL_FUNC_GRP(75, 2),
+ AMD_PINCTRL_FUNC_GRP(75, 3),
+ AMD_PINCTRL_FUNC_GRP(76, 0),
+ AMD_PINCTRL_FUNC_GRP(76, 1),
+ AMD_PINCTRL_FUNC_GRP(76, 2),
+ AMD_PINCTRL_FUNC_GRP(76, 3),
+ AMD_PINCTRL_FUNC_GRP(77, 0),
+ AMD_PINCTRL_FUNC_GRP(77, 1),
+ AMD_PINCTRL_FUNC_GRP(77, 2),
+ AMD_PINCTRL_FUNC_GRP(77, 3),
+ AMD_PINCTRL_FUNC_GRP(78, 0),
+ AMD_PINCTRL_FUNC_GRP(78, 1),
+ AMD_PINCTRL_FUNC_GRP(78, 2),
+ AMD_PINCTRL_FUNC_GRP(78, 3),
+ AMD_PINCTRL_FUNC_GRP(79, 0),
+ AMD_PINCTRL_FUNC_GRP(79, 1),
+ AMD_PINCTRL_FUNC_GRP(79, 2),
+ AMD_PINCTRL_FUNC_GRP(79, 3),
+ AMD_PINCTRL_FUNC_GRP(80, 0),
+ AMD_PINCTRL_FUNC_GRP(80, 1),
+ AMD_PINCTRL_FUNC_GRP(80, 2),
+ AMD_PINCTRL_FUNC_GRP(80, 3),
+ AMD_PINCTRL_FUNC_GRP(81, 0),
+ AMD_PINCTRL_FUNC_GRP(81, 1),
+ AMD_PINCTRL_FUNC_GRP(81, 2),
+ AMD_PINCTRL_FUNC_GRP(81, 3),
+ AMD_PINCTRL_FUNC_GRP(82, 0),
+ AMD_PINCTRL_FUNC_GRP(82, 1),
+ AMD_PINCTRL_FUNC_GRP(82, 2),
+ AMD_PINCTRL_FUNC_GRP(82, 3),
+ AMD_PINCTRL_FUNC_GRP(83, 0),
+ AMD_PINCTRL_FUNC_GRP(83, 1),
+ AMD_PINCTRL_FUNC_GRP(83, 2),
+ AMD_PINCTRL_FUNC_GRP(83, 3),
+ AMD_PINCTRL_FUNC_GRP(84, 0),
+ AMD_PINCTRL_FUNC_GRP(84, 1),
+ AMD_PINCTRL_FUNC_GRP(84, 2),
+ AMD_PINCTRL_FUNC_GRP(84, 3),
+ AMD_PINCTRL_FUNC_GRP(85, 0),
+ AMD_PINCTRL_FUNC_GRP(85, 1),
+ AMD_PINCTRL_FUNC_GRP(85, 2),
+ AMD_PINCTRL_FUNC_GRP(85, 3),
+ AMD_PINCTRL_FUNC_GRP(86, 0),
+ AMD_PINCTRL_FUNC_GRP(86, 1),
+ AMD_PINCTRL_FUNC_GRP(86, 2),
+ AMD_PINCTRL_FUNC_GRP(86, 3),
+ AMD_PINCTRL_FUNC_GRP(87, 0),
+ AMD_PINCTRL_FUNC_GRP(87, 1),
+ AMD_PINCTRL_FUNC_GRP(87, 2),
+ AMD_PINCTRL_FUNC_GRP(87, 3),
+ AMD_PINCTRL_FUNC_GRP(88, 0),
+ AMD_PINCTRL_FUNC_GRP(88, 1),
+ AMD_PINCTRL_FUNC_GRP(88, 2),
+ AMD_PINCTRL_FUNC_GRP(88, 3),
+ AMD_PINCTRL_FUNC_GRP(89, 0),
+ AMD_PINCTRL_FUNC_GRP(89, 1),
+ AMD_PINCTRL_FUNC_GRP(89, 2),
+ AMD_PINCTRL_FUNC_GRP(89, 3),
+ AMD_PINCTRL_FUNC_GRP(90, 0),
+ AMD_PINCTRL_FUNC_GRP(90, 1),
+ AMD_PINCTRL_FUNC_GRP(90, 2),
+ AMD_PINCTRL_FUNC_GRP(90, 3),
+ AMD_PINCTRL_FUNC_GRP(91, 0),
+ AMD_PINCTRL_FUNC_GRP(91, 1),
+ AMD_PINCTRL_FUNC_GRP(91, 2),
+ AMD_PINCTRL_FUNC_GRP(91, 3),
+ AMD_PINCTRL_FUNC_GRP(92, 0),
+ AMD_PINCTRL_FUNC_GRP(92, 1),
+ AMD_PINCTRL_FUNC_GRP(92, 2),
+ AMD_PINCTRL_FUNC_GRP(92, 3),
+ AMD_PINCTRL_FUNC_GRP(93, 0),
+ AMD_PINCTRL_FUNC_GRP(93, 1),
+ AMD_PINCTRL_FUNC_GRP(93, 2),
+ AMD_PINCTRL_FUNC_GRP(93, 3),
+ AMD_PINCTRL_FUNC_GRP(94, 0),
+ AMD_PINCTRL_FUNC_GRP(94, 1),
+ AMD_PINCTRL_FUNC_GRP(94, 2),
+ AMD_PINCTRL_FUNC_GRP(94, 3),
+ AMD_PINCTRL_FUNC_GRP(95, 0),
+ AMD_PINCTRL_FUNC_GRP(95, 1),
+ AMD_PINCTRL_FUNC_GRP(95, 2),
+ AMD_PINCTRL_FUNC_GRP(95, 3),
+ AMD_PINCTRL_FUNC_GRP(96, 0),
+ AMD_PINCTRL_FUNC_GRP(96, 1),
+ AMD_PINCTRL_FUNC_GRP(96, 2),
+ AMD_PINCTRL_FUNC_GRP(96, 3),
+ AMD_PINCTRL_FUNC_GRP(97, 0),
+ AMD_PINCTRL_FUNC_GRP(97, 1),
+ AMD_PINCTRL_FUNC_GRP(97, 2),
+ AMD_PINCTRL_FUNC_GRP(97, 3),
+ AMD_PINCTRL_FUNC_GRP(98, 0),
+ AMD_PINCTRL_FUNC_GRP(98, 1),
+ AMD_PINCTRL_FUNC_GRP(98, 2),
+ AMD_PINCTRL_FUNC_GRP(98, 3),
+ AMD_PINCTRL_FUNC_GRP(99, 0),
+ AMD_PINCTRL_FUNC_GRP(99, 1),
+ AMD_PINCTRL_FUNC_GRP(99, 2),
+ AMD_PINCTRL_FUNC_GRP(99, 3),
+ AMD_PINCTRL_FUNC_GRP(100, 0),
+ AMD_PINCTRL_FUNC_GRP(100, 1),
+ AMD_PINCTRL_FUNC_GRP(100, 2),
+ AMD_PINCTRL_FUNC_GRP(100, 3),
+ AMD_PINCTRL_FUNC_GRP(101, 0),
+ AMD_PINCTRL_FUNC_GRP(101, 1),
+ AMD_PINCTRL_FUNC_GRP(101, 2),
+ AMD_PINCTRL_FUNC_GRP(101, 3),
+ AMD_PINCTRL_FUNC_GRP(102, 0),
+ AMD_PINCTRL_FUNC_GRP(102, 1),
+ AMD_PINCTRL_FUNC_GRP(102, 2),
+ AMD_PINCTRL_FUNC_GRP(102, 3),
+ AMD_PINCTRL_FUNC_GRP(103, 0),
+ AMD_PINCTRL_FUNC_GRP(103, 1),
+ AMD_PINCTRL_FUNC_GRP(103, 2),
+ AMD_PINCTRL_FUNC_GRP(103, 3),
+ AMD_PINCTRL_FUNC_GRP(104, 0),
+ AMD_PINCTRL_FUNC_GRP(104, 1),
+ AMD_PINCTRL_FUNC_GRP(104, 2),
+ AMD_PINCTRL_FUNC_GRP(104, 3),
+ AMD_PINCTRL_FUNC_GRP(105, 0),
+ AMD_PINCTRL_FUNC_GRP(105, 1),
+ AMD_PINCTRL_FUNC_GRP(105, 2),
+ AMD_PINCTRL_FUNC_GRP(105, 3),
+ AMD_PINCTRL_FUNC_GRP(106, 0),
+ AMD_PINCTRL_FUNC_GRP(106, 1),
+ AMD_PINCTRL_FUNC_GRP(106, 2),
+ AMD_PINCTRL_FUNC_GRP(106, 3),
+ AMD_PINCTRL_FUNC_GRP(107, 0),
+ AMD_PINCTRL_FUNC_GRP(107, 1),
+ AMD_PINCTRL_FUNC_GRP(107, 2),
+ AMD_PINCTRL_FUNC_GRP(107, 3),
+ AMD_PINCTRL_FUNC_GRP(108, 0),
+ AMD_PINCTRL_FUNC_GRP(108, 1),
+ AMD_PINCTRL_FUNC_GRP(108, 2),
+ AMD_PINCTRL_FUNC_GRP(108, 3),
+ AMD_PINCTRL_FUNC_GRP(109, 0),
+ AMD_PINCTRL_FUNC_GRP(109, 1),
+ AMD_PINCTRL_FUNC_GRP(109, 2),
+ AMD_PINCTRL_FUNC_GRP(109, 3),
+ AMD_PINCTRL_FUNC_GRP(110, 0),
+ AMD_PINCTRL_FUNC_GRP(110, 1),
+ AMD_PINCTRL_FUNC_GRP(110, 2),
+ AMD_PINCTRL_FUNC_GRP(110, 3),
+ AMD_PINCTRL_FUNC_GRP(111, 0),
+ AMD_PINCTRL_FUNC_GRP(111, 1),
+ AMD_PINCTRL_FUNC_GRP(111, 2),
+ AMD_PINCTRL_FUNC_GRP(111, 3),
+ AMD_PINCTRL_FUNC_GRP(112, 0),
+ AMD_PINCTRL_FUNC_GRP(112, 1),
+ AMD_PINCTRL_FUNC_GRP(112, 2),
+ AMD_PINCTRL_FUNC_GRP(112, 3),
+ AMD_PINCTRL_FUNC_GRP(113, 0),
+ AMD_PINCTRL_FUNC_GRP(113, 1),
+ AMD_PINCTRL_FUNC_GRP(113, 2),
+ AMD_PINCTRL_FUNC_GRP(113, 3),
+ AMD_PINCTRL_FUNC_GRP(114, 0),
+ AMD_PINCTRL_FUNC_GRP(114, 1),
+ AMD_PINCTRL_FUNC_GRP(114, 2),
+ AMD_PINCTRL_FUNC_GRP(114, 3),
+ AMD_PINCTRL_FUNC_GRP(115, 0),
+ AMD_PINCTRL_FUNC_GRP(115, 1),
+ AMD_PINCTRL_FUNC_GRP(115, 2),
+ AMD_PINCTRL_FUNC_GRP(115, 3),
+ AMD_PINCTRL_FUNC_GRP(116, 0),
+ AMD_PINCTRL_FUNC_GRP(116, 1),
+ AMD_PINCTRL_FUNC_GRP(116, 2),
+ AMD_PINCTRL_FUNC_GRP(116, 3),
+ AMD_PINCTRL_FUNC_GRP(117, 0),
+ AMD_PINCTRL_FUNC_GRP(117, 1),
+ AMD_PINCTRL_FUNC_GRP(117, 2),
+ AMD_PINCTRL_FUNC_GRP(117, 3),
+ AMD_PINCTRL_FUNC_GRP(118, 0),
+ AMD_PINCTRL_FUNC_GRP(118, 1),
+ AMD_PINCTRL_FUNC_GRP(118, 2),
+ AMD_PINCTRL_FUNC_GRP(118, 3),
+ AMD_PINCTRL_FUNC_GRP(119, 0),
+ AMD_PINCTRL_FUNC_GRP(119, 1),
+ AMD_PINCTRL_FUNC_GRP(119, 2),
+ AMD_PINCTRL_FUNC_GRP(119, 3),
+ AMD_PINCTRL_FUNC_GRP(120, 0),
+ AMD_PINCTRL_FUNC_GRP(120, 1),
+ AMD_PINCTRL_FUNC_GRP(120, 2),
+ AMD_PINCTRL_FUNC_GRP(120, 3),
+ AMD_PINCTRL_FUNC_GRP(121, 0),
+ AMD_PINCTRL_FUNC_GRP(121, 1),
+ AMD_PINCTRL_FUNC_GRP(121, 2),
+ AMD_PINCTRL_FUNC_GRP(121, 3),
+ AMD_PINCTRL_FUNC_GRP(122, 0),
+ AMD_PINCTRL_FUNC_GRP(122, 1),
+ AMD_PINCTRL_FUNC_GRP(122, 2),
+ AMD_PINCTRL_FUNC_GRP(122, 3),
+ AMD_PINCTRL_FUNC_GRP(123, 0),
+ AMD_PINCTRL_FUNC_GRP(123, 1),
+ AMD_PINCTRL_FUNC_GRP(123, 2),
+ AMD_PINCTRL_FUNC_GRP(123, 3),
+ AMD_PINCTRL_FUNC_GRP(124, 0),
+ AMD_PINCTRL_FUNC_GRP(124, 1),
+ AMD_PINCTRL_FUNC_GRP(124, 2),
+ AMD_PINCTRL_FUNC_GRP(124, 3),
+ AMD_PINCTRL_FUNC_GRP(125, 0),
+ AMD_PINCTRL_FUNC_GRP(125, 1),
+ AMD_PINCTRL_FUNC_GRP(125, 2),
+ AMD_PINCTRL_FUNC_GRP(125, 3),
+ AMD_PINCTRL_FUNC_GRP(126, 0),
+ AMD_PINCTRL_FUNC_GRP(126, 1),
+ AMD_PINCTRL_FUNC_GRP(126, 2),
+ AMD_PINCTRL_FUNC_GRP(126, 3),
+ AMD_PINCTRL_FUNC_GRP(127, 0),
+ AMD_PINCTRL_FUNC_GRP(127, 1),
+ AMD_PINCTRL_FUNC_GRP(127, 2),
+ AMD_PINCTRL_FUNC_GRP(127, 3),
+ AMD_PINCTRL_FUNC_GRP(128, 0),
+ AMD_PINCTRL_FUNC_GRP(128, 1),
+ AMD_PINCTRL_FUNC_GRP(128, 2),
+ AMD_PINCTRL_FUNC_GRP(128, 3),
+ AMD_PINCTRL_FUNC_GRP(129, 0),
+ AMD_PINCTRL_FUNC_GRP(129, 1),
+ AMD_PINCTRL_FUNC_GRP(129, 2),
+ AMD_PINCTRL_FUNC_GRP(129, 3),
+ AMD_PINCTRL_FUNC_GRP(130, 0),
+ AMD_PINCTRL_FUNC_GRP(130, 1),
+ AMD_PINCTRL_FUNC_GRP(130, 2),
+ AMD_PINCTRL_FUNC_GRP(130, 3),
+ AMD_PINCTRL_FUNC_GRP(131, 0),
+ AMD_PINCTRL_FUNC_GRP(131, 1),
+ AMD_PINCTRL_FUNC_GRP(131, 2),
+ AMD_PINCTRL_FUNC_GRP(131, 3),
+ AMD_PINCTRL_FUNC_GRP(132, 0),
+ AMD_PINCTRL_FUNC_GRP(132, 1),
+ AMD_PINCTRL_FUNC_GRP(132, 2),
+ AMD_PINCTRL_FUNC_GRP(132, 3),
+ AMD_PINCTRL_FUNC_GRP(133, 0),
+ AMD_PINCTRL_FUNC_GRP(133, 1),
+ AMD_PINCTRL_FUNC_GRP(133, 2),
+ AMD_PINCTRL_FUNC_GRP(133, 3),
+ AMD_PINCTRL_FUNC_GRP(134, 0),
+ AMD_PINCTRL_FUNC_GRP(134, 1),
+ AMD_PINCTRL_FUNC_GRP(134, 2),
+ AMD_PINCTRL_FUNC_GRP(134, 3),
+ AMD_PINCTRL_FUNC_GRP(135, 0),
+ AMD_PINCTRL_FUNC_GRP(135, 1),
+ AMD_PINCTRL_FUNC_GRP(135, 2),
+ AMD_PINCTRL_FUNC_GRP(135, 3),
+ AMD_PINCTRL_FUNC_GRP(136, 0),
+ AMD_PINCTRL_FUNC_GRP(136, 1),
+ AMD_PINCTRL_FUNC_GRP(136, 2),
+ AMD_PINCTRL_FUNC_GRP(136, 3),
+ AMD_PINCTRL_FUNC_GRP(137, 0),
+ AMD_PINCTRL_FUNC_GRP(137, 1),
+ AMD_PINCTRL_FUNC_GRP(137, 2),
+ AMD_PINCTRL_FUNC_GRP(137, 3),
+ AMD_PINCTRL_FUNC_GRP(138, 0),
+ AMD_PINCTRL_FUNC_GRP(138, 1),
+ AMD_PINCTRL_FUNC_GRP(138, 2),
+ AMD_PINCTRL_FUNC_GRP(138, 3),
+ AMD_PINCTRL_FUNC_GRP(139, 0),
+ AMD_PINCTRL_FUNC_GRP(139, 1),
+ AMD_PINCTRL_FUNC_GRP(139, 2),
+ AMD_PINCTRL_FUNC_GRP(139, 3),
+ AMD_PINCTRL_FUNC_GRP(140, 0),
+ AMD_PINCTRL_FUNC_GRP(140, 1),
+ AMD_PINCTRL_FUNC_GRP(140, 2),
+ AMD_PINCTRL_FUNC_GRP(140, 3),
+ AMD_PINCTRL_FUNC_GRP(141, 0),
+ AMD_PINCTRL_FUNC_GRP(141, 1),
+ AMD_PINCTRL_FUNC_GRP(141, 2),
+ AMD_PINCTRL_FUNC_GRP(141, 3),
+ AMD_PINCTRL_FUNC_GRP(142, 0),
+ AMD_PINCTRL_FUNC_GRP(142, 1),
+ AMD_PINCTRL_FUNC_GRP(142, 2),
+ AMD_PINCTRL_FUNC_GRP(142, 3),
+ AMD_PINCTRL_FUNC_GRP(143, 0),
+ AMD_PINCTRL_FUNC_GRP(143, 1),
+ AMD_PINCTRL_FUNC_GRP(143, 2),
+ AMD_PINCTRL_FUNC_GRP(143, 3),
+ AMD_PINCTRL_FUNC_GRP(144, 0),
+ AMD_PINCTRL_FUNC_GRP(144, 1),
+ AMD_PINCTRL_FUNC_GRP(144, 2),
+ AMD_PINCTRL_FUNC_GRP(144, 3),
+
+ PINCTRL_PINGROUP("i2c0", AMD_PINS(145, 146), 2),
+ PINCTRL_PINGROUP("i2c1", AMD_PINS(147, 148), 2),
+ PINCTRL_PINGROUP("i2c2", AMD_PINS(113, 114), 2),
+ PINCTRL_PINGROUP("i2c3", AMD_PINS(19, 20), 2),
+ PINCTRL_PINGROUP("uart0", AMD_PINS(135, 136, 137, 138, 139), 5),
+ PINCTRL_PINGROUP("uart1", AMD_PINS(140, 141, 142, 143, 144), 5),
+};
-static const unsigned uart0_pins[] = {135, 136, 137, 138, 139};
-static const unsigned uart1_pins[] = {140, 141, 142, 143, 144};
+#define AMD_PMUX_FUNC(_number) { \
+ .name = "iomux_gpio_"#_number, \
+ .groups = { \
+ "IMX_F0_GPIO"#_number, "IMX_F1_GPIO"#_number, \
+ "IMX_F2_GPIO"#_number, "IMX_F3_GPIO"#_number, \
+ }, \
+ .index = _number, \
+ .ngroups = NSELECTS, \
+}
-static const struct amd_pingroup kerncz_groups[] = {
- {
- .name = "i2c0",
- .pins = i2c0_pins,
- .npins = 2,
- },
- {
- .name = "i2c1",
- .pins = i2c1_pins,
- .npins = 2,
- },
- {
- .name = "i2c2",
- .pins = i2c2_pins,
- .npins = 2,
- },
- {
- .name = "i2c3",
- .pins = i2c3_pins,
- .npins = 2,
- },
- {
- .name = "uart0",
- .pins = uart0_pins,
- .npins = 5,
- },
- {
- .name = "uart1",
- .pins = uart1_pins,
- .npins = 5,
- },
+static const struct amd_function pmx_functions[] = {
+ AMD_PMUX_FUNC(0),
+ AMD_PMUX_FUNC(1),
+ AMD_PMUX_FUNC(2),
+ AMD_PMUX_FUNC(3),
+ AMD_PMUX_FUNC(4),
+ AMD_PMUX_FUNC(5),
+ AMD_PMUX_FUNC(6),
+ AMD_PMUX_FUNC(7),
+ AMD_PMUX_FUNC(8),
+ AMD_PMUX_FUNC(9),
+ AMD_PMUX_FUNC(10),
+ AMD_PMUX_FUNC(11),
+ AMD_PMUX_FUNC(12),
+ AMD_PMUX_FUNC(13),
+ AMD_PMUX_FUNC(14),
+ AMD_PMUX_FUNC(15),
+ AMD_PMUX_FUNC(16),
+ AMD_PMUX_FUNC(17),
+ AMD_PMUX_FUNC(18),
+ AMD_PMUX_FUNC(19),
+ AMD_PMUX_FUNC(20),
+ AMD_PMUX_FUNC(21),
+ AMD_PMUX_FUNC(22),
+ AMD_PMUX_FUNC(23),
+ AMD_PMUX_FUNC(24),
+ AMD_PMUX_FUNC(25),
+ AMD_PMUX_FUNC(26),
+ AMD_PMUX_FUNC(27),
+ AMD_PMUX_FUNC(28),
+ AMD_PMUX_FUNC(29),
+ AMD_PMUX_FUNC(30),
+ AMD_PMUX_FUNC(31),
+ AMD_PMUX_FUNC(32),
+ AMD_PMUX_FUNC(33),
+ AMD_PMUX_FUNC(34),
+ AMD_PMUX_FUNC(35),
+ AMD_PMUX_FUNC(36),
+ AMD_PMUX_FUNC(37),
+ AMD_PMUX_FUNC(38),
+ AMD_PMUX_FUNC(39),
+ AMD_PMUX_FUNC(40),
+ AMD_PMUX_FUNC(41),
+ AMD_PMUX_FUNC(42),
+ AMD_PMUX_FUNC(43),
+ AMD_PMUX_FUNC(44),
+ AMD_PMUX_FUNC(45),
+ AMD_PMUX_FUNC(46),
+ AMD_PMUX_FUNC(47),
+ AMD_PMUX_FUNC(48),
+ AMD_PMUX_FUNC(49),
+ AMD_PMUX_FUNC(50),
+ AMD_PMUX_FUNC(51),
+ AMD_PMUX_FUNC(52),
+ AMD_PMUX_FUNC(53),
+ AMD_PMUX_FUNC(54),
+ AMD_PMUX_FUNC(55),
+ AMD_PMUX_FUNC(56),
+ AMD_PMUX_FUNC(57),
+ AMD_PMUX_FUNC(58),
+ AMD_PMUX_FUNC(59),
+ AMD_PMUX_FUNC(60),
+ AMD_PMUX_FUNC(61),
+ AMD_PMUX_FUNC(62),
+ AMD_PMUX_FUNC(64),
+ AMD_PMUX_FUNC(65),
+ AMD_PMUX_FUNC(66),
+ AMD_PMUX_FUNC(67),
+ AMD_PMUX_FUNC(68),
+ AMD_PMUX_FUNC(69),
+ AMD_PMUX_FUNC(70),
+ AMD_PMUX_FUNC(71),
+ AMD_PMUX_FUNC(72),
+ AMD_PMUX_FUNC(73),
+ AMD_PMUX_FUNC(74),
+ AMD_PMUX_FUNC(75),
+ AMD_PMUX_FUNC(76),
+ AMD_PMUX_FUNC(77),
+ AMD_PMUX_FUNC(78),
+ AMD_PMUX_FUNC(79),
+ AMD_PMUX_FUNC(80),
+ AMD_PMUX_FUNC(81),
+ AMD_PMUX_FUNC(82),
+ AMD_PMUX_FUNC(83),
+ AMD_PMUX_FUNC(84),
+ AMD_PMUX_FUNC(85),
+ AMD_PMUX_FUNC(86),
+ AMD_PMUX_FUNC(87),
+ AMD_PMUX_FUNC(88),
+ AMD_PMUX_FUNC(89),
+ AMD_PMUX_FUNC(90),
+ AMD_PMUX_FUNC(91),
+ AMD_PMUX_FUNC(92),
+ AMD_PMUX_FUNC(93),
+ AMD_PMUX_FUNC(94),
+ AMD_PMUX_FUNC(95),
+ AMD_PMUX_FUNC(96),
+ AMD_PMUX_FUNC(97),
+ AMD_PMUX_FUNC(98),
+ AMD_PMUX_FUNC(99),
+ AMD_PMUX_FUNC(100),
+ AMD_PMUX_FUNC(101),
+ AMD_PMUX_FUNC(102),
+ AMD_PMUX_FUNC(103),
+ AMD_PMUX_FUNC(104),
+ AMD_PMUX_FUNC(105),
+ AMD_PMUX_FUNC(106),
+ AMD_PMUX_FUNC(107),
+ AMD_PMUX_FUNC(108),
+ AMD_PMUX_FUNC(109),
+ AMD_PMUX_FUNC(110),
+ AMD_PMUX_FUNC(111),
+ AMD_PMUX_FUNC(112),
+ AMD_PMUX_FUNC(113),
+ AMD_PMUX_FUNC(114),
+ AMD_PMUX_FUNC(115),
+ AMD_PMUX_FUNC(116),
+ AMD_PMUX_FUNC(117),
+ AMD_PMUX_FUNC(118),
+ AMD_PMUX_FUNC(119),
+ AMD_PMUX_FUNC(120),
+ AMD_PMUX_FUNC(121),
+ AMD_PMUX_FUNC(122),
+ AMD_PMUX_FUNC(123),
+ AMD_PMUX_FUNC(124),
+ AMD_PMUX_FUNC(125),
+ AMD_PMUX_FUNC(126),
+ AMD_PMUX_FUNC(127),
+ AMD_PMUX_FUNC(128),
+ AMD_PMUX_FUNC(129),
+ AMD_PMUX_FUNC(130),
+ AMD_PMUX_FUNC(131),
+ AMD_PMUX_FUNC(132),
+ AMD_PMUX_FUNC(133),
+ AMD_PMUX_FUNC(134),
+ AMD_PMUX_FUNC(135),
+ AMD_PMUX_FUNC(136),
+ AMD_PMUX_FUNC(137),
+ AMD_PMUX_FUNC(138),
+ AMD_PMUX_FUNC(139),
+ AMD_PMUX_FUNC(140),
+ AMD_PMUX_FUNC(141),
+ AMD_PMUX_FUNC(142),
+ AMD_PMUX_FUNC(143),
+ AMD_PMUX_FUNC(144),
};
#endif
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 517f2a6330ad..82b921fd630d 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -237,8 +237,6 @@ static void atmel_gpio_irq_unmask(struct irq_data *d)
BIT(pin->line));
}
-#ifdef CONFIG_PM_SLEEP
-
static int atmel_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct atmel_pioctrl *atmel_pioctrl = irq_data_get_irq_chip_data(d);
@@ -255,9 +253,6 @@ static int atmel_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-#else
-#define atmel_gpio_irq_set_wake NULL
-#endif /* CONFIG_PM_SLEEP */
static struct irq_chip atmel_gpio_irq_chip = {
.name = "GPIO",
@@ -265,7 +260,7 @@ static struct irq_chip atmel_gpio_irq_chip = {
.irq_mask = atmel_gpio_irq_mask,
.irq_unmask = atmel_gpio_irq_unmask,
.irq_set_type = atmel_gpio_irq_set_type,
- .irq_set_wake = atmel_gpio_irq_set_wake,
+ .irq_set_wake = pm_sleep_ptr(atmel_gpio_irq_set_wake),
};
static int atmel_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index d91a010e65f5..5634fa063ebf 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1615,8 +1615,6 @@ static void gpio_irq_ack(struct irq_data *d)
/* the interrupt is already cleared before by reading ISR */
}
-#ifdef CONFIG_PM
-
static u32 wakeups[MAX_GPIO_BANKS];
static u32 backups[MAX_GPIO_BANKS];
@@ -1683,10 +1681,6 @@ void at91_pinctrl_gpio_resume(void)
}
}
-#else
-#define gpio_irq_set_wake NULL
-#endif /* CONFIG_PM */
-
static void gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1741,14 +1735,14 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
gpio_irqchip->irq_disable = gpio_irq_mask;
gpio_irqchip->irq_mask = gpio_irq_mask;
gpio_irqchip->irq_unmask = gpio_irq_unmask;
- gpio_irqchip->irq_set_wake = gpio_irq_set_wake;
+ gpio_irqchip->irq_set_wake = pm_ptr(gpio_irq_set_wake);
gpio_irqchip->irq_set_type = at91_gpio->ops->irq_type;
/* Disable irqs of this PIO controller */
writel_relaxed(~0, at91_gpio->regbase + PIO_IDR);
/*
- * Let the generic code handle this edge IRQ, the the chained
+ * Let the generic code handle this edge IRQ, the chained
* handler will perform the actual work of handling the parent
* interrupt.
*/
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index 207cbae3a7bf..7ab20ac15391 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -73,7 +73,7 @@ static const struct pinctrl_pin_desc axp209_pins[] = {
PINCTRL_PIN(2, "GPIO2"),
};
-static const struct pinctrl_pin_desc axp813_pins[] = {
+static const struct pinctrl_pin_desc axp22x_pins[] = {
PINCTRL_PIN(0, "GPIO0"),
PINCTRL_PIN(1, "GPIO1"),
};
@@ -87,9 +87,16 @@ static const struct axp20x_pctrl_desc axp20x_data = {
.adc_mux = AXP20X_MUX_ADC,
};
+static const struct axp20x_pctrl_desc axp22x_data = {
+ .pins = axp22x_pins,
+ .npins = ARRAY_SIZE(axp22x_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .gpio_status_offset = 0,
+};
+
static const struct axp20x_pctrl_desc axp813_data = {
- .pins = axp813_pins,
- .npins = ARRAY_SIZE(axp813_pins),
+ .pins = axp22x_pins,
+ .npins = ARRAY_SIZE(axp22x_pins),
.ldo_mask = BIT(0) | BIT(1),
.adc_mask = BIT(0),
.gpio_status_offset = 0,
@@ -388,6 +395,7 @@ static int axp20x_build_funcs_groups(struct platform_device *pdev)
static const struct of_device_id axp20x_pctl_match[] = {
{ .compatible = "x-powers,axp209-gpio", .data = &axp20x_data, },
+ { .compatible = "x-powers,axp221-gpio", .data = &axp22x_data, },
{ .compatible = "x-powers,axp813-gpio", .data = &axp813_data, },
{ }
};
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 1ca11616db74..3a9ee9c8af11 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -21,6 +21,7 @@
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include "core.h"
@@ -135,7 +136,6 @@ struct ingenic_pinctrl {
struct ingenic_gpio_chip {
struct ingenic_pinctrl *jzpc;
struct gpio_chip gc;
- struct irq_chip irq_chip;
unsigned int irq, reg_base;
};
@@ -3393,7 +3393,7 @@ static void ingenic_gpio_irq_mask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, true);
@@ -3405,7 +3405,7 @@ static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4740))
ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, false);
@@ -3417,7 +3417,9 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ gpiochip_enable_irq(gc, irq);
if (is_soc_or_above(jzgc->jzpc, ID_JZ4770))
ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
@@ -3433,7 +3435,7 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
ingenic_gpio_irq_mask(irqd);
@@ -3443,13 +3445,15 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
else
ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, false);
+
+ gpiochip_disable_irq(gc, irq);
}
static void ingenic_gpio_irq_ack(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
bool high;
if ((irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) &&
@@ -3477,6 +3481,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
@@ -3498,12 +3503,12 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
* best we can do is to set up a single-edge interrupt and then
* switch to the opposing edge when ACKing the interrupt.
*/
- bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq);
+ bool high = ingenic_gpio_get_value(jzgc, irq);
type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH;
}
- irq_set_type(jzgc, irqd->hwirq, type);
+ irq_set_type(jzgc, irq, type);
return 0;
}
@@ -3668,22 +3673,45 @@ static const struct pinctrl_ops ingenic_pctlops = {
static int ingenic_gpio_irq_request(struct irq_data *data)
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t irq = irqd_to_hwirq(data);
int ret;
- ret = ingenic_gpio_direction_input(gpio_chip, data->hwirq);
+ ret = ingenic_gpio_direction_input(gpio_chip, irq);
if (ret)
return ret;
- return gpiochip_reqres_irq(gpio_chip, data->hwirq);
+ return gpiochip_reqres_irq(gpio_chip, irq);
}
static void ingenic_gpio_irq_release(struct irq_data *data)
{
struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t irq = irqd_to_hwirq(data);
+
+ return gpiochip_relres_irq(gpio_chip, irq);
+}
+
+static void ingenic_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
- return gpiochip_relres_irq(gpio_chip, data->hwirq);
+ seq_printf(p, "%s", gpio_chip->label);
}
+static const struct irq_chip ingenic_gpio_irqchip = {
+ .irq_enable = ingenic_gpio_irq_enable,
+ .irq_disable = ingenic_gpio_irq_disable,
+ .irq_unmask = ingenic_gpio_irq_unmask,
+ .irq_mask = ingenic_gpio_irq_mask,
+ .irq_ack = ingenic_gpio_irq_ack,
+ .irq_set_type = ingenic_gpio_irq_set_type,
+ .irq_set_wake = ingenic_gpio_irq_set_wake,
+ .irq_request_resources = ingenic_gpio_irq_request,
+ .irq_release_resources = ingenic_gpio_irq_release,
+ .irq_print_chip = ingenic_gpio_irq_print_chip,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+};
+
static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
int pin, int func)
{
@@ -4172,20 +4200,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
if (!jzgc->irq)
return -EINVAL;
- jzgc->irq_chip.name = jzgc->gc.label;
- jzgc->irq_chip.irq_enable = ingenic_gpio_irq_enable;
- jzgc->irq_chip.irq_disable = ingenic_gpio_irq_disable;
- jzgc->irq_chip.irq_unmask = ingenic_gpio_irq_unmask;
- jzgc->irq_chip.irq_mask = ingenic_gpio_irq_mask;
- jzgc->irq_chip.irq_ack = ingenic_gpio_irq_ack;
- jzgc->irq_chip.irq_set_type = ingenic_gpio_irq_set_type;
- jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
- jzgc->irq_chip.irq_request_resources = ingenic_gpio_irq_request;
- jzgc->irq_chip.irq_release_resources = ingenic_gpio_irq_release;
- jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
-
girq = &jzgc->gc.irq;
- girq->chip = &jzgc->irq_chip;
+ gpio_irq_chip_set_chip(girq, &ingenic_gpio_irqchip);
girq->parent_handler = ingenic_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 771dd1f4fbe0..c5fd154990c8 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1944,6 +1944,7 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
{ .compatible = "microchip,lan966x-pinctrl", .data = &lan966x_desc },
{},
};
+MODULE_DEVICE_TABLE(of, ocelot_pinctrl_of_match);
static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
const struct ocelot_pinctrl *info)
@@ -2050,4 +2051,5 @@ static struct platform_driver ocelot_pinctrl_driver = {
},
.probe = ocelot_pinctrl_probe,
};
-builtin_platform_driver(ocelot_pinctrl_driver);
+module_platform_driver(ocelot_pinctrl_driver);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c
index 2a86c1035cc8..3eb40e230d98 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/pinctrl-starfive.c
@@ -207,6 +207,7 @@ struct starfive_pinctrl {
void __iomem *base;
void __iomem *padctl;
struct pinctrl_dev *pctl;
+ struct mutex mutex; /* serialize adding groups and functions */
};
static inline unsigned int starfive_pin_to_gpio(const struct starfive_pinctrl *sfp,
@@ -522,6 +523,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
nmaps = 0;
ngroups = 0;
+ mutex_lock(&sfp->mutex);
for_each_child_of_node(np, child) {
int npins;
int i;
@@ -615,12 +617,14 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
*maps = map;
*num_maps = nmaps;
+ mutex_unlock(&sfp->mutex);
return 0;
put_child:
of_node_put(child);
free_map:
pinctrl_utils_free_map(pctldev, map, nmaps);
+ mutex_unlock(&sfp->mutex);
return ret;
}
@@ -1267,6 +1271,7 @@ static int starfive_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sfp);
sfp->gc.parent = dev;
raw_spin_lock_init(&sfp->lock);
+ mutex_init(&sfp->mutex);
ret = devm_pinctrl_register_and_init(dev, &starfive_desc, sfp, &sfp->pctl);
if (ret)
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index e14012209992..7d2fbf8a02cd 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -163,6 +163,8 @@ static const char *zynqmp_pmux_get_function_name(struct pinctrl_dev *pctldev,
* @num_groups: Number of function groups.
*
* Get function's group count and group names.
+ *
+ * Return: 0
*/
static int zynqmp_pmux_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
@@ -410,6 +412,10 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ param = PM_PINCTRL_CONFIG_TRI_STATE;
+ arg = PM_PINCTRL_TRI_STATE_ENABLE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
case PIN_CONFIG_MODE_LOW_POWER:
/*
* These cases are mentioned in dts but configurable
@@ -418,6 +424,11 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
*/
ret = 0;
break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ param = PM_PINCTRL_CONFIG_TRI_STATE;
+ arg = PM_PINCTRL_TRI_STATE_DISABLE;
+ ret = zynqmp_pm_pinctrl_set_config(pin, param, arg);
+ break;
default:
dev_warn(pctldev->dev,
"unsupported configuration parameter '%u'\n",
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 3daeb9772391..f415c13caae0 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -113,6 +113,14 @@ config PINCTRL_MSM8X74
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8974 platform.
+config PINCTRL_MSM8909
+ tristate "Qualcomm 8909 pin controller driver"
+ depends on OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8909 platform.
+
config PINCTRL_MSM8916
tristate "Qualcomm 8916 pin controller driver"
depends on OF
@@ -320,6 +328,15 @@ config PINCTRL_SM6350
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM6350 platform.
+config PINCTRL_SM6375
+ tristate "Qualcomm Technologies Inc SM6375 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM6375 platform.
+
config PINCTRL_SDX65
tristate "Qualcomm Technologies Inc SDX65 pin controller driver"
depends on GPIOLIB && OF
@@ -367,7 +384,7 @@ config PINCTRL_SM8350
config PINCTRL_SM8450
tristate "Qualcomm Technologies Inc SM8450 pin controller driver"
depends on GPIOLIB && OF
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 4f0ee7597f81..fbd64853a24d 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_PINCTRL_MSM8226) += pinctrl-msm8226.o
obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
+obj-$(CONFIG_PINCTRL_MSM8909) += pinctrl-msm8909.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
obj-$(CONFIG_PINCTRL_MSM8953) += pinctrl-msm8953.o
obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
@@ -37,6 +38,7 @@ obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
obj-$(CONFIG_PINCTRL_SM6115) += pinctrl-sm6115.o
obj-$(CONFIG_PINCTRL_SM6125) += pinctrl-sm6125.o
obj-$(CONFIG_PINCTRL_SM6350) += pinctrl-sm6350.o
+obj-$(CONFIG_PINCTRL_SM6375) += pinctrl-sm6375.o
obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 74810ec4df44..e97ce45b6d53 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -401,7 +401,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(pctrl->slew_base),
"Slew resource not provided\n");
- if (data->is_clk_optional)
+ if (of_property_read_bool(dev->of_node, "qcom,adsp-bypass-mode"))
ret = devm_clk_bulk_get_optional(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
else
ret = devm_clk_bulk_get(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
index 759d5d8da562..afbac2a6c82c 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
@@ -77,7 +77,6 @@ struct lpi_pinctrl_variant_data {
int ngroups;
const struct lpi_function *functions;
int nfunctions;
- bool is_clk_optional;
};
int lpi_pinctrl_probe(struct platform_device *pdev);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c
new file mode 100644
index 000000000000..6dd15b910632
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2022, Kernkonzept GmbH.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9, \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc msm8909_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "SDC1_CLK"),
+ PINCTRL_PIN(114, "SDC1_CMD"),
+ PINCTRL_PIN(115, "SDC1_DATA"),
+ PINCTRL_PIN(116, "SDC2_CLK"),
+ PINCTRL_PIN(117, "SDC2_CMD"),
+ PINCTRL_PIN(118, "SDC2_DATA"),
+ PINCTRL_PIN(119, "QDSD_CLK"),
+ PINCTRL_PIN(120, "QDSD_CMD"),
+ PINCTRL_PIN(121, "QDSD_DATA0"),
+ PINCTRL_PIN(122, "QDSD_DATA1"),
+ PINCTRL_PIN(123, "QDSD_DATA2"),
+ PINCTRL_PIN(124, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+
+static const unsigned int sdc1_clk_pins[] = { 113 };
+static const unsigned int sdc1_cmd_pins[] = { 114 };
+static const unsigned int sdc1_data_pins[] = { 115 };
+static const unsigned int sdc2_clk_pins[] = { 116 };
+static const unsigned int sdc2_cmd_pins[] = { 117 };
+static const unsigned int sdc2_data_pins[] = { 118 };
+static const unsigned int qdsd_clk_pins[] = { 119 };
+static const unsigned int qdsd_cmd_pins[] = { 120 };
+static const unsigned int qdsd_data0_pins[] = { 121 };
+static const unsigned int qdsd_data1_pins[] = { 122 };
+static const unsigned int qdsd_data2_pins[] = { 123 };
+static const unsigned int qdsd_data3_pins[] = { 124 };
+
+enum msm8909_functions {
+ msm_mux_gpio,
+ msm_mux_adsp_ext,
+ msm_mux_atest_bbrx0,
+ msm_mux_atest_bbrx1,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_combodac,
+ msm_mux_atest_gpsadc0,
+ msm_mux_atest_gpsadc1,
+ msm_mux_atest_wlan0,
+ msm_mux_atest_wlan1,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi1_cs1,
+ msm_mux_blsp_spi1_cs2,
+ msm_mux_blsp_spi1_cs3,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi2_cs1,
+ msm_mux_blsp_spi2_cs2,
+ msm_mux_blsp_spi2_cs3,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi3_cs1,
+ msm_mux_blsp_spi3_cs2,
+ msm_mux_blsp_spi3_cs3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uim1,
+ msm_mux_blsp_uim2,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cdc_pdm0,
+ msm_mux_dbg_out,
+ msm_mux_dmic0_clk,
+ msm_mux_dmic0_data,
+ msm_mux_ebi0_wrcdc,
+ msm_mux_ebi2_a,
+ msm_mux_ebi2_lcd,
+ msm_mux_ext_lpass,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_gcc_plltest,
+ msm_mux_gsm0_tx,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_m_voc,
+ msm_mux_mdp_vsync,
+ msm_mux_modem_tsync,
+ msm_mux_nav_pps,
+ msm_mux_nav_tsync,
+ msm_mux_pa_indicator,
+ msm_mux_pbs0,
+ msm_mux_pbs1,
+ msm_mux_pbs2,
+ msm_mux_pri_mi2s_data0_a,
+ msm_mux_pri_mi2s_data0_b,
+ msm_mux_pri_mi2s_data1_a,
+ msm_mux_pri_mi2s_data1_b,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_sck_a,
+ msm_mux_pri_mi2s_sck_b,
+ msm_mux_pri_mi2s_ws_a,
+ msm_mux_pri_mi2s_ws_b,
+ msm_mux_prng_rosc,
+ msm_mux_pwr_crypto_enabled_a,
+ msm_mux_pwr_crypto_enabled_b,
+ msm_mux_pwr_modem_enabled_a,
+ msm_mux_pwr_modem_enabled_b,
+ msm_mux_pwr_nav_enabled_a,
+ msm_mux_pwr_nav_enabled_b,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_sd_write,
+ msm_mux_sec_mi2s,
+ msm_mux_smb_int,
+ msm_mux_ssbi0,
+ msm_mux_ssbi1,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_uim3_clk,
+ msm_mux_uim3_data,
+ msm_mux_uim3_present,
+ msm_mux_uim3_reset,
+ msm_mux_uim_batt,
+ msm_mux_wcss_bt,
+ msm_mux_wcss_fm,
+ msm_mux_wcss_wlan,
+ msm_mux__,
+};
+
+static const char * const adsp_ext_groups[] = { "gpio38" };
+static const char * const atest_bbrx0_groups[] = { "gpio37" };
+static const char * const atest_bbrx1_groups[] = { "gpio36" };
+static const char * const atest_char0_groups[] = { "gpio62" };
+static const char * const atest_char1_groups[] = { "gpio61" };
+static const char * const atest_char2_groups[] = { "gpio60" };
+static const char * const atest_char3_groups[] = { "gpio59" };
+static const char * const atest_char_groups[] = { "gpio63" };
+static const char * const atest_combodac_groups[] = {
+ "gpio32", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio47", "gpio48", "gpio66", "gpio81", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio94", "gpio95", "gpio110"
+};
+static const char * const atest_gpsadc0_groups[] = { "gpio65" };
+static const char * const atest_gpsadc1_groups[] = { "gpio79" };
+static const char * const atest_wlan0_groups[] = { "gpio96" };
+static const char * const atest_wlan1_groups[] = { "gpio97" };
+static const char * const bimc_dte0_groups[] = { "gpio6", "gpio59" };
+static const char * const bimc_dte1_groups[] = { "gpio7", "gpio60" };
+static const char * const blsp_i2c1_groups[] = { "gpio6", "gpio7" };
+static const char * const blsp_i2c2_groups[] = { "gpio111", "gpio112" };
+static const char * const blsp_i2c3_groups[] = { "gpio29", "gpio30" };
+static const char * const blsp_i2c4_groups[] = { "gpio14", "gpio15" };
+static const char * const blsp_i2c5_groups[] = { "gpio18", "gpio19" };
+static const char * const blsp_i2c6_groups[] = { "gpio10", "gpio11" };
+static const char * const blsp_spi1_cs1_groups[] = { "gpio97" };
+static const char * const blsp_spi1_cs2_groups[] = { "gpio37" };
+static const char * const blsp_spi1_cs3_groups[] = { "gpio65" };
+static const char * const blsp_spi1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_spi2_cs1_groups[] = { "gpio98" };
+static const char * const blsp_spi2_cs2_groups[] = { "gpio17" };
+static const char * const blsp_spi2_cs3_groups[] = { "gpio5" };
+static const char * const blsp_spi2_groups[] = {
+ "gpio20", "gpio21", "gpio111", "gpio112"
+};
+static const char * const blsp_spi3_cs1_groups[] = { "gpio95" };
+static const char * const blsp_spi3_cs2_groups[] = { "gpio65" };
+static const char * const blsp_spi3_cs3_groups[] = { "gpio4" };
+static const char * const blsp_spi3_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15"
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19"
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio20", "gpio21", "gpio111", "gpio112"
+};
+static const char * const blsp_uim1_groups[] = { "gpio4", "gpio5" };
+static const char * const blsp_uim2_groups[] = { "gpio20", "gpio21" };
+static const char * const cam_mclk_groups[] = { "gpio26", "gpio27" };
+static const char * const cci_async_groups[] = { "gpio33" };
+static const char * const cci_timer0_groups[] = { "gpio31" };
+static const char * const cci_timer1_groups[] = { "gpio32" };
+static const char * const cci_timer2_groups[] = { "gpio38" };
+static const char * const cdc_pdm0_groups[] = {
+ "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio64"
+};
+static const char * const dbg_out_groups[] = { "gpio10" };
+static const char * const dmic0_clk_groups[] = { "gpio4" };
+static const char * const dmic0_data_groups[] = { "gpio5" };
+static const char * const ebi0_wrcdc_groups[] = { "gpio64" };
+static const char * const ebi2_a_groups[] = { "gpio99" };
+static const char * const ebi2_lcd_groups[] = {
+ "gpio24", "gpio24", "gpio25", "gpio95"
+};
+static const char * const ext_lpass_groups[] = { "gpio45" };
+static const char * const gcc_gp1_clk_a_groups[] = { "gpio49" };
+static const char * const gcc_gp1_clk_b_groups[] = { "gpio14" };
+static const char * const gcc_gp2_clk_a_groups[] = { "gpio50" };
+static const char * const gcc_gp2_clk_b_groups[] = { "gpio12" };
+static const char * const gcc_gp3_clk_a_groups[] = { "gpio51" };
+static const char * const gcc_gp3_clk_b_groups[] = { "gpio13" };
+static const char * const gcc_plltest_groups[] = { "gpio66", "gpio67" };
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112"
+};
+static const char * const gsm0_tx_groups[] = { "gpio85" };
+static const char * const ldo_en_groups[] = { "gpio99" };
+static const char * const ldo_update_groups[] = { "gpio98" };
+static const char * const m_voc_groups[] = { "gpio8", "gpio95" };
+static const char * const mdp_vsync_groups[] = { "gpio24", "gpio25" };
+static const char * const modem_tsync_groups[] = { "gpio83" };
+static const char * const nav_pps_groups[] = { "gpio83" };
+static const char * const nav_tsync_groups[] = { "gpio83" };
+static const char * const pa_indicator_groups[] = { "gpio82" };
+static const char * const pbs0_groups[] = { "gpio90" };
+static const char * const pbs1_groups[] = { "gpio91" };
+static const char * const pbs2_groups[] = { "gpio92" };
+static const char * const pri_mi2s_data0_a_groups[] = { "gpio62" };
+static const char * const pri_mi2s_data0_b_groups[] = { "gpio95" };
+static const char * const pri_mi2s_data1_a_groups[] = { "gpio63" };
+static const char * const pri_mi2s_data1_b_groups[] = { "gpio96" };
+static const char * const pri_mi2s_mclk_a_groups[] = { "gpio59" };
+static const char * const pri_mi2s_mclk_b_groups[] = { "gpio98" };
+static const char * const pri_mi2s_sck_a_groups[] = { "gpio60" };
+static const char * const pri_mi2s_sck_b_groups[] = { "gpio94" };
+static const char * const pri_mi2s_ws_a_groups[] = { "gpio61" };
+static const char * const pri_mi2s_ws_b_groups[] = { "gpio110" };
+static const char * const prng_rosc_groups[] = { "gpio43" };
+static const char * const pwr_crypto_enabled_a_groups[] = { "gpio35" };
+static const char * const pwr_crypto_enabled_b_groups[] = { "gpio96" };
+static const char * const pwr_modem_enabled_a_groups[] = { "gpio28" };
+static const char * const pwr_modem_enabled_b_groups[] = { "gpio94" };
+static const char * const pwr_nav_enabled_a_groups[] = { "gpio34" };
+static const char * const pwr_nav_enabled_b_groups[] = { "gpio95" };
+static const char * const qdss_cti_trig_in_a0_groups[] = { "gpio20" };
+static const char * const qdss_cti_trig_in_a1_groups[] = { "gpio49" };
+static const char * const qdss_cti_trig_in_b0_groups[] = { "gpio21" };
+static const char * const qdss_cti_trig_in_b1_groups[] = { "gpio50" };
+static const char * const qdss_cti_trig_out_a0_groups[] = { "gpio23" };
+static const char * const qdss_cti_trig_out_a1_groups[] = { "gpio52" };
+static const char * const qdss_cti_trig_out_b0_groups[] = { "gpio22" };
+static const char * const qdss_cti_trig_out_b1_groups[] = { "gpio51" };
+static const char * const qdss_traceclk_a_groups[] = { "gpio46" };
+static const char * const qdss_tracectl_a_groups[] = { "gpio45" };
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio47", "gpio48", "gpio58", "gpio65", "gpio94", "gpio96",
+ "gpio97"
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio14", "gpio16", "gpio17", "gpio29", "gpio30", "gpio31", "gpio32",
+ "gpio33", "gpio34", "gpio35", "gpio36", "gpio37", "gpio93"
+};
+static const char * const sd_write_groups[] = { "gpio99" };
+static const char * const sec_mi2s_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio98"
+};
+static const char * const smb_int_groups[] = { "gpio58" };
+static const char * const ssbi0_groups[] = { "gpio88" };
+static const char * const ssbi1_groups[] = { "gpio89" };
+static const char * const uim1_clk_groups[] = { "gpio54" };
+static const char * const uim1_data_groups[] = { "gpio53" };
+static const char * const uim1_present_groups[] = { "gpio56" };
+static const char * const uim1_reset_groups[] = { "gpio55" };
+static const char * const uim2_clk_groups[] = { "gpio50" };
+static const char * const uim2_data_groups[] = { "gpio49" };
+static const char * const uim2_present_groups[] = { "gpio52" };
+static const char * const uim2_reset_groups[] = { "gpio51" };
+static const char * const uim3_clk_groups[] = { "gpio23" };
+static const char * const uim3_data_groups[] = { "gpio20" };
+static const char * const uim3_present_groups[] = { "gpio21" };
+static const char * const uim3_reset_groups[] = { "gpio22" };
+static const char * const uim_batt_groups[] = { "gpio57" };
+static const char * const wcss_bt_groups[] = { "gpio39", "gpio47", "gpio48" };
+static const char * const wcss_fm_groups[] = { "gpio45", "gpio46" };
+static const char * const wcss_wlan_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43", "gpio44"
+};
+
+static const struct msm_function msm8909_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(atest_bbrx0),
+ FUNCTION(atest_bbrx1),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_combodac),
+ FUNCTION(atest_gpsadc0),
+ FUNCTION(atest_gpsadc1),
+ FUNCTION(atest_wlan0),
+ FUNCTION(atest_wlan1),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi1_cs1),
+ FUNCTION(blsp_spi1_cs2),
+ FUNCTION(blsp_spi1_cs3),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi2_cs1),
+ FUNCTION(blsp_spi2_cs2),
+ FUNCTION(blsp_spi2_cs3),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi3_cs1),
+ FUNCTION(blsp_spi3_cs2),
+ FUNCTION(blsp_spi3_cs3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uim1),
+ FUNCTION(blsp_uim2),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(dbg_out),
+ FUNCTION(dmic0_clk),
+ FUNCTION(dmic0_data),
+ FUNCTION(ebi0_wrcdc),
+ FUNCTION(ebi2_a),
+ FUNCTION(ebi2_lcd),
+ FUNCTION(ext_lpass),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gpio),
+ FUNCTION(gsm0_tx),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(m_voc),
+ FUNCTION(mdp_vsync),
+ FUNCTION(modem_tsync),
+ FUNCTION(nav_pps),
+ FUNCTION(nav_tsync),
+ FUNCTION(pa_indicator),
+ FUNCTION(pbs0),
+ FUNCTION(pbs1),
+ FUNCTION(pbs2),
+ FUNCTION(pri_mi2s_data0_a),
+ FUNCTION(pri_mi2s_data0_b),
+ FUNCTION(pri_mi2s_data1_a),
+ FUNCTION(pri_mi2s_data1_b),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(pri_mi2s_mclk_b),
+ FUNCTION(pri_mi2s_sck_a),
+ FUNCTION(pri_mi2s_sck_b),
+ FUNCTION(pri_mi2s_ws_a),
+ FUNCTION(pri_mi2s_ws_b),
+ FUNCTION(prng_rosc),
+ FUNCTION(pwr_crypto_enabled_a),
+ FUNCTION(pwr_crypto_enabled_b),
+ FUNCTION(pwr_modem_enabled_a),
+ FUNCTION(pwr_modem_enabled_b),
+ FUNCTION(pwr_nav_enabled_a),
+ FUNCTION(pwr_nav_enabled_b),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(sd_write),
+ FUNCTION(sec_mi2s),
+ FUNCTION(smb_int),
+ FUNCTION(ssbi0),
+ FUNCTION(ssbi1),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim3_clk),
+ FUNCTION(uim3_data),
+ FUNCTION(uim3_present),
+ FUNCTION(uim3_reset),
+ FUNCTION(uim_batt),
+ FUNCTION(wcss_bt),
+ FUNCTION(wcss_fm),
+ FUNCTION(wcss_wlan),
+};
+
+static const struct msm_pingroup msm8909_groups[] = {
+ PINGROUP(0, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(1, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(2, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(3, blsp_spi3, sec_mi2s, _, _, _, _, _, _, _),
+ PINGROUP(4, blsp_spi1, blsp_uart1, blsp_uim1, blsp_spi3_cs3, dmic0_clk, _, _, _, _),
+ PINGROUP(5, blsp_spi1, blsp_uart1, blsp_uim1, blsp_spi2_cs3, dmic0_data, _, _, _, _),
+ PINGROUP(6, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, bimc_dte0),
+ PINGROUP(7, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, bimc_dte1),
+ PINGROUP(8, blsp_spi6, m_voc, _, _, _, _, _, qdss_tracedata_a, _),
+ PINGROUP(9, blsp_spi6, _, _, _, _, _, qdss_tracedata_a, _, _),
+ PINGROUP(10, blsp_spi6, blsp_i2c6, dbg_out, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(11, blsp_spi6, blsp_i2c6, _, _, _, _, _, _, _),
+ PINGROUP(12, blsp_spi4, gcc_gp2_clk_b, _, _, _, _, _, _, _),
+ PINGROUP(13, blsp_spi4, gcc_gp3_clk_b, _, _, _, _, _, _, _),
+ PINGROUP(14, blsp_spi4, blsp_i2c4, gcc_gp1_clk_b, _, _, _, _, _, qdss_tracedata_b),
+ PINGROUP(15, blsp_spi4, blsp_i2c4, _, _, _, _, _, _, _),
+ PINGROUP(16, blsp_spi5, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(17, blsp_spi5, blsp_spi2_cs2, _, _, _, _, _, qdss_tracedata_b, _),
+ PINGROUP(18, blsp_spi5, blsp_i2c5, _, _, _, _, _, _, _),
+ PINGROUP(19, blsp_spi5, blsp_i2c5, _, _, _, _, _, _, _),
+ PINGROUP(20, uim3_data, blsp_spi2, blsp_uart2, blsp_uim2, _, qdss_cti_trig_in_a0, _, _, _),
+ PINGROUP(21, uim3_present, blsp_spi2, blsp_uart2, blsp_uim2, _, qdss_cti_trig_in_b0, _, _, _),
+ PINGROUP(22, uim3_reset, _, qdss_cti_trig_out_b0, _, _, _, _, _, _),
+ PINGROUP(23, uim3_clk, qdss_cti_trig_out_a0, _, _, _, _, _, _, _),
+ PINGROUP(24, mdp_vsync, ebi2_lcd, ebi2_lcd, _, _, _, _, _, _),
+ PINGROUP(25, mdp_vsync, ebi2_lcd, _, _, _, _, _, _, _),
+ PINGROUP(26, cam_mclk, _, _, _, _, _, _, _, _),
+ PINGROUP(27, cam_mclk, _, _, _, _, _, _, _, _),
+ PINGROUP(28, _, pwr_modem_enabled_a, _, _, _, _, _, _, _),
+ PINGROUP(29, blsp_i2c3, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(30, blsp_i2c3, _, _, _, _, _, qdss_tracedata_b, _, _),
+ PINGROUP(31, cci_timer0, _, _, _, _, _, _, qdss_tracedata_b, _),
+ PINGROUP(32, cci_timer1, _, qdss_tracedata_b, _, atest_combodac, _, _, _, _),
+ PINGROUP(33, cci_async, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(34, pwr_nav_enabled_a, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(35, pwr_crypto_enabled_a, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(36, qdss_tracedata_b, _, atest_bbrx1, _, _, _, _, _, _),
+ PINGROUP(37, blsp_spi1_cs2, qdss_tracedata_b, _, atest_bbrx0, _, _, _, _, _),
+ PINGROUP(38, cci_timer2, adsp_ext, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(39, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(40, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(41, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(42, wcss_wlan, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(43, wcss_wlan, prng_rosc, qdss_tracedata_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(44, wcss_wlan, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(45, wcss_fm, ext_lpass, qdss_tracectl_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(46, wcss_fm, qdss_traceclk_a, _, _, _, _, _, _, _),
+ PINGROUP(47, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(48, wcss_bt, qdss_tracedata_a, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(49, uim2_data, gcc_gp1_clk_a, qdss_cti_trig_in_a1, _, _, _, _, _, _),
+ PINGROUP(50, uim2_clk, gcc_gp2_clk_a, qdss_cti_trig_in_b1, _, _, _, _, _, _),
+ PINGROUP(51, uim2_reset, gcc_gp3_clk_a, qdss_cti_trig_out_b1, _, _, _, _, _, _),
+ PINGROUP(52, uim2_present, qdss_cti_trig_out_a1, _, _, _, _, _, _, _),
+ PINGROUP(53, uim1_data, _, _, _, _, _, _, _, _),
+ PINGROUP(54, uim1_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(55, uim1_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(56, uim1_present, _, _, _, _, _, _, _, _),
+ PINGROUP(57, uim_batt, _, _, _, _, _, _, _, _),
+ PINGROUP(58, qdss_tracedata_a, smb_int, _, _, _, _, _, _, _),
+ PINGROUP(59, cdc_pdm0, pri_mi2s_mclk_a, atest_char3, _, _, _, _, _, bimc_dte0),
+ PINGROUP(60, cdc_pdm0, pri_mi2s_sck_a, atest_char2, _, _, _, _, _, bimc_dte1),
+ PINGROUP(61, cdc_pdm0, pri_mi2s_ws_a, atest_char1, _, _, _, _, _, _),
+ PINGROUP(62, cdc_pdm0, pri_mi2s_data0_a, atest_char0, _, _, _, _, _, _),
+ PINGROUP(63, cdc_pdm0, pri_mi2s_data1_a, atest_char, _, _, _, _, _, _),
+ PINGROUP(64, cdc_pdm0, _, _, _, _, _, ebi0_wrcdc, _, _),
+ PINGROUP(65, blsp_spi3_cs2, blsp_spi1_cs3, qdss_tracedata_a, _, atest_gpsadc0, _, _, _, _),
+ PINGROUP(66, _, gcc_plltest, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(67, _, gcc_plltest, _, _, _, _, _, _, _),
+ PINGROUP(68, _, _, _, _, _, _, _, _, _),
+ PINGROUP(69, _, _, _, _, _, _, _, _, _),
+ PINGROUP(70, _, _, _, _, _, _, _, _, _),
+ PINGROUP(71, _, _, _, _, _, _, _, _, _),
+ PINGROUP(72, _, _, _, _, _, _, _, _, _),
+ PINGROUP(73, _, _, _, _, _, _, _, _, _),
+ PINGROUP(74, _, _, _, _, _, _, _, _, _),
+ PINGROUP(75, _, _, _, _, _, _, _, _, _),
+ PINGROUP(76, _, _, _, _, _, _, _, _, _),
+ PINGROUP(77, _, _, _, _, _, _, _, _, _),
+ PINGROUP(78, _, _, _, _, _, _, _, _, _),
+ PINGROUP(79, _, _, atest_gpsadc1, _, _, _, _, _, _),
+ PINGROUP(80, _, _, _, _, _, _, _, _, _),
+ PINGROUP(81, _, _, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(82, _, pa_indicator, _, _, _, _, _, _, _),
+ PINGROUP(83, _, modem_tsync, nav_tsync, nav_pps, _, atest_combodac, _, _, _),
+ PINGROUP(84, _, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(85, gsm0_tx, _, _, atest_combodac, _, _, _, _, _),
+ PINGROUP(86, _, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(87, _, _, _, _, _, _, _, _, _),
+ PINGROUP(88, _, ssbi0, _, _, _, _, _, _, _),
+ PINGROUP(89, _, ssbi1, _, _, _, _, _, _, _),
+ PINGROUP(90, pbs0, _, _, _, _, _, _, _, _),
+ PINGROUP(91, pbs1, _, _, _, _, _, _, _, _),
+ PINGROUP(92, pbs2, _, _, _, _, _, _, _, _),
+ PINGROUP(93, qdss_tracedata_b, _, _, _, _, _, _, _, _),
+ PINGROUP(94, pri_mi2s_sck_b, pwr_modem_enabled_b, qdss_tracedata_a, _, atest_combodac, _, _, _, _),
+ PINGROUP(95, blsp_spi3_cs1, pri_mi2s_data0_b, ebi2_lcd, m_voc, pwr_nav_enabled_b, _, atest_combodac, _, _),
+ PINGROUP(96, pri_mi2s_data1_b, _, pwr_crypto_enabled_b, qdss_tracedata_a, _, atest_wlan0, _, _, _),
+ PINGROUP(97, blsp_spi1_cs1, qdss_tracedata_a, _, atest_wlan1, _, _, _, _, _),
+ PINGROUP(98, sec_mi2s, pri_mi2s_mclk_b, blsp_spi2_cs1, ldo_update, _, _, _, _, _),
+ PINGROUP(99, ebi2_a, sd_write, ldo_en, _, _, _, _, _, _),
+ PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ PINGROUP(102, _, _, _, _, _, _, _, _, _),
+ PINGROUP(103, _, _, _, _, _, _, _, _, _),
+ PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ PINGROUP(106, _, _, _, _, _, _, _, _, _),
+ PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ PINGROUP(108, _, _, _, _, _, _, _, _, _),
+ PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ PINGROUP(110, pri_mi2s_ws_b, _, atest_combodac, _, _, _, _, _, _),
+ PINGROUP(111, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _),
+ PINGROUP(112, blsp_spi2, blsp_uart2, blsp_i2c2, _, _, _, _, _, _),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_gpio_wakeirq_map msm8909_mpm_map[] = {
+ { 65, 3 }, { 5, 4 }, { 11, 5 }, { 12, 6 }, { 64, 7 }, { 58, 8 },
+ { 50, 9 }, { 13, 10 }, { 49, 11 }, { 20, 12 }, { 21, 13 }, { 25, 14 },
+ { 46, 15 }, { 45, 16 }, { 28, 17 }, { 44, 18 }, { 31, 19 }, { 43, 20 },
+ { 42, 21 }, { 34, 22 }, { 35, 23 }, { 36, 24 }, { 37, 25 }, { 38, 26 },
+ { 39, 27 }, { 40, 28 }, { 41, 29 }, { 90, 30 }, { 91, 32 }, { 92, 33 },
+ { 94, 34 }, { 95, 35 }, { 96, 36 }, { 97, 37 }, { 98, 38 },
+ { 110, 39 }, { 111, 40 }, { 112, 41 }, { 105, 42 }, { 107, 43 },
+ { 47, 50 }, { 48, 51 },
+};
+
+static const struct msm_pinctrl_soc_data msm8909_pinctrl = {
+ .pins = msm8909_pins,
+ .npins = ARRAY_SIZE(msm8909_pins),
+ .functions = msm8909_functions,
+ .nfunctions = ARRAY_SIZE(msm8909_functions),
+ .groups = msm8909_groups,
+ .ngroups = ARRAY_SIZE(msm8909_groups),
+ .ngpios = 113,
+ .wakeirq_map = msm8909_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(msm8909_mpm_map),
+};
+
+static int msm8909_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8909_pinctrl);
+}
+
+static const struct of_device_id msm8909_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8909-tlmm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8909_pinctrl_of_match);
+
+static struct platform_driver msm8909_pinctrl_driver = {
+ .driver = {
+ .name = "msm8909-pinctrl",
+ .of_match_table = msm8909_pinctrl_of_match,
+ },
+ .probe = msm8909_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8909_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8909_pinctrl_driver);
+}
+arch_initcall(msm8909_pinctrl_init);
+
+static void __exit msm8909_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8909_pinctrl_driver);
+}
+module_exit(msm8909_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm MSM8909 TLMM pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index 396db12ae904..bf68913ba821 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -844,8 +844,8 @@ static const struct msm_pingroup msm8916_groups[] = {
PINGROUP(28, pwr_modem_enabled_a, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(29, cci_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac),
PINGROUP(30, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
- PINGROUP(31, cci_timer0, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(32, cci_timer1, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, cci_timer0, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, cci_timer1, flash_strobe, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(33, cci_async, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(34, pwr_nav_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
PINGROUP(35, pwr_crypto_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
index 2add9a4520c2..d615b6c55b89 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
@@ -141,7 +141,6 @@ static const struct lpi_pinctrl_variant_data sc7280_lpi_data = {
.ngroups = ARRAY_SIZE(sc7280_groups),
.functions = sc7280_functions,
.nfunctions = ARRAY_SIZE(sc7280_functions),
- .is_clk_optional = true,
};
static const struct of_device_id lpi_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c
new file mode 100644
index 000000000000..1138e683e6f4
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c
@@ -0,0 +1,1544 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x100000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = REG_SIZE * id + 0x4, \
+ .intr_cfg_reg = REG_SIZE * id + 0x8, \
+ .intr_status_reg = REG_SIZE * id + 0xc, \
+ .intr_target_reg = REG_SIZE * id + 0x8, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm6375_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "UFS_RESET"),
+ PINCTRL_PIN(157, "SDC1_RCLK"),
+ PINCTRL_PIN(158, "SDC1_CLK"),
+ PINCTRL_PIN(159, "SDC1_CMD"),
+ PINCTRL_PIN(160, "SDC1_DATA"),
+ PINCTRL_PIN(161, "SDC2_CLK"),
+ PINCTRL_PIN(162, "SDC2_CMD"),
+ PINCTRL_PIN(163, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+
+
+static const unsigned int sdc1_rclk_pins[] = { 157 };
+static const unsigned int sdc1_clk_pins[] = { 158 };
+static const unsigned int sdc1_cmd_pins[] = { 159 };
+static const unsigned int sdc1_data_pins[] = { 160 };
+static const unsigned int sdc2_clk_pins[] = { 161 };
+static const unsigned int sdc2_cmd_pins[] = { 162 };
+static const unsigned int sdc2_data_pins[] = { 163 };
+static const unsigned int ufs_reset_pins[] = { 156 };
+
+enum sm6375_functions {
+ msm_mux_adsp_ext,
+ msm_mux_agera_pll,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_tsens,
+ msm_mux_atest_tsens2,
+ msm_mux_atest_usb1,
+ msm_mux_atest_usb10,
+ msm_mux_atest_usb11,
+ msm_mux_atest_usb12,
+ msm_mux_atest_usb13,
+ msm_mux_atest_usb2,
+ msm_mux_atest_usb20,
+ msm_mux_atest_usb21,
+ msm_mux_atest_usb22,
+ msm_mux_atest_usb23,
+ msm_mux_audio_ref,
+ msm_mux_btfm_slimbus,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_dp_hot,
+ msm_mux_edp_lcd,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gp_pdm0,
+ msm_mux_gp_pdm1,
+ msm_mux_gp_pdm2,
+ msm_mux_gpio,
+ msm_mux_gps_tx,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_lpass_ext,
+ msm_mux_m_voc,
+ msm_mux_mclk,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mi2s_0,
+ msm_mux_mi2s_1,
+ msm_mux_mi2s_2,
+ msm_mux_mss_lte,
+ msm_mux_nav_gpio,
+ msm_mux_nav_pps,
+ msm_mux_pa_indicator,
+ msm_mux_phase_flag0,
+ msm_mux_phase_flag1,
+ msm_mux_phase_flag10,
+ msm_mux_phase_flag11,
+ msm_mux_phase_flag12,
+ msm_mux_phase_flag13,
+ msm_mux_phase_flag14,
+ msm_mux_phase_flag15,
+ msm_mux_phase_flag16,
+ msm_mux_phase_flag17,
+ msm_mux_phase_flag18,
+ msm_mux_phase_flag19,
+ msm_mux_phase_flag2,
+ msm_mux_phase_flag20,
+ msm_mux_phase_flag21,
+ msm_mux_phase_flag22,
+ msm_mux_phase_flag23,
+ msm_mux_phase_flag24,
+ msm_mux_phase_flag25,
+ msm_mux_phase_flag26,
+ msm_mux_phase_flag27,
+ msm_mux_phase_flag28,
+ msm_mux_phase_flag29,
+ msm_mux_phase_flag3,
+ msm_mux_phase_flag30,
+ msm_mux_phase_flag31,
+ msm_mux_phase_flag4,
+ msm_mux_phase_flag5,
+ msm_mux_phase_flag6,
+ msm_mux_phase_flag7,
+ msm_mux_phase_flag8,
+ msm_mux_phase_flag9,
+ msm_mux_pll_bist,
+ msm_mux_pll_bypassnl,
+ msm_mux_pll_clk,
+ msm_mux_pll_reset,
+ msm_mux_prng_rosc0,
+ msm_mux_prng_rosc1,
+ msm_mux_prng_rosc2,
+ msm_mux_prng_rosc3,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qdss_gpio0,
+ msm_mux_qdss_gpio1,
+ msm_mux_qdss_gpio10,
+ msm_mux_qdss_gpio11,
+ msm_mux_qdss_gpio12,
+ msm_mux_qdss_gpio13,
+ msm_mux_qdss_gpio14,
+ msm_mux_qdss_gpio15,
+ msm_mux_qdss_gpio2,
+ msm_mux_qdss_gpio3,
+ msm_mux_qdss_gpio4,
+ msm_mux_qdss_gpio5,
+ msm_mux_qdss_gpio6,
+ msm_mux_qdss_gpio7,
+ msm_mux_qdss_gpio8,
+ msm_mux_qdss_gpio9,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qup00,
+ msm_mux_qup01,
+ msm_mux_qup02,
+ msm_mux_qup10,
+ msm_mux_qup11_f1,
+ msm_mux_qup11_f2,
+ msm_mux_qup12,
+ msm_mux_qup13_f1,
+ msm_mux_qup13_f2,
+ msm_mux_qup14,
+ msm_mux_sd_write,
+ msm_mux_sdc1_tb,
+ msm_mux_sdc2_tb,
+ msm_mux_sp_cmu,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_phy,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux_wlan1_adc0,
+ msm_mux_wlan1_adc1,
+ msm_mux_wlan2_adc0,
+ msm_mux_wlan2_adc1,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
+ "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50",
+ "gpio51", "gpio52", "gpio53", "gpio56", "gpio57", "gpio58", "gpio59",
+ "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68", "gpio69", "gpio75", "gpio76", "gpio77", "gpio78",
+ "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", "gpio85",
+ "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", "gpio92",
+ "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99",
+ "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105",
+ "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", "gpio111",
+ "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", "gpio117",
+ "gpio118", "gpio119", "gpio120", "gpio124", "gpio125", "gpio126",
+ "gpio127", "gpio128", "gpio129", "gpio130", "gpio131", "gpio132",
+ "gpio133", "gpio134", "gpio135", "gpio136", "gpio141", "gpio142",
+ "gpio143", "gpio150", "gpio151", "gpio152", "gpio153", "gpio154",
+ "gpio155",
+};
+static const char * const agera_pll_groups[] = {
+ "gpio89",
+};
+static const char * const cci_async_groups[] = {
+ "gpio35", "gpio36", "gpio48", "gpio52", "gpio53",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio2", "gpio3", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44",
+};
+static const char * const gps_tx_groups[] = {
+ "gpio101", "gpio102", "gpio107", "gpio108",
+};
+static const char * const gp_pdm0_groups[] = {
+ "gpio37", "gpio68",
+};
+static const char * const gp_pdm1_groups[] = {
+ "gpio8", "gpio52",
+};
+static const char * const gp_pdm2_groups[] = {
+ "gpio57",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio90",
+};
+static const char * const mclk_groups[] = {
+ "gpio93",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio6", "gpio23", "gpio24", "gpio27", "gpio28",
+};
+static const char * const mss_lte_groups[] = {
+ "gpio65", "gpio66",
+};
+static const char * const nav_pps_groups[] = {
+ "gpio101", "gpio101", "gpio102", "gpio102",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio27",
+};
+static const char * const qlink0_wmss_groups[] = {
+ "gpio103",
+};
+static const char * const qlink1_wmss_groups[] = {
+ "gpio106",
+};
+static const char * const usb_phy_groups[] = {
+ "gpio124",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio87",
+};
+static const char * const atest_char_groups[] = {
+ "gpio95",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio96",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio97",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio98",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio99",
+};
+static const char * const atest_tsens_groups[] = {
+ "gpio92",
+};
+static const char * const atest_tsens2_groups[] = {
+ "gpio93",
+};
+static const char * const atest_usb1_groups[] = {
+ "gpio83",
+};
+static const char * const atest_usb10_groups[] = {
+ "gpio84",
+};
+static const char * const atest_usb11_groups[] = {
+ "gpio85",
+};
+static const char * const atest_usb12_groups[] = {
+ "gpio86",
+};
+static const char * const atest_usb13_groups[] = {
+ "gpio87",
+};
+static const char * const atest_usb2_groups[] = {
+ "gpio88",
+};
+static const char * const atest_usb20_groups[] = {
+ "gpio89",
+};
+static const char * const atest_usb21_groups[] = {
+ "gpio90",
+};
+static const char * const atest_usb22_groups[] = {
+ "gpio91",
+};
+static const char * const atest_usb23_groups[] = {
+ "gpio92",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio60",
+};
+static const char * const btfm_slimbus_groups[] = {
+ "gpio67", "gpio68", "gpio86", "gpio87",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33",
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio34",
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio35",
+};
+static const char * const cci_timer2_groups[] = {
+ "gpio36",
+};
+static const char * const cci_timer3_groups[] = {
+ "gpio37",
+};
+static const char * const cci_timer4_groups[] = {
+ "gpio38",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio0", "gpio1", "gpio2",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio3",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio19", "gpio20", "gpio21", "gpio22",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio86", "gpio90",
+};
+static const char * const ddr_pxi1_groups[] = {
+ "gpio87", "gpio91",
+};
+static const char * const ddr_pxi2_groups[] = {
+ "gpio88", "gpio92",
+};
+static const char * const ddr_pxi3_groups[] = {
+ "gpio89", "gpio93",
+};
+static const char * const dp_hot_groups[] = {
+ "gpio12", "gpio118",
+};
+static const char * const edp_lcd_groups[] = {
+ "gpio23",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio48", "gpio58",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio21",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio22",
+};
+static const char * const ibi_i3c_groups[] = {
+ "gpio0", "gpio1",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio95",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio96",
+};
+static const char * const lpass_ext_groups[] = {
+ "gpio60", "gpio93",
+};
+static const char * const m_voc_groups[] = {
+ "gpio12",
+};
+static const char * const mdp_vsync0_groups[] = {
+ "gpio47",
+};
+static const char * const mdp_vsync1_groups[] = {
+ "gpio48",
+};
+static const char * const mdp_vsync2_groups[] = {
+ "gpio56",
+};
+static const char * const mdp_vsync3_groups[] = {
+ "gpio57",
+};
+static const char * const mi2s_0_groups[] = {
+ "gpio88", "gpio89", "gpio90", "gpio91",
+};
+static const char * const mi2s_1_groups[] = {
+ "gpio67", "gpio68", "gpio86", "gpio87",
+};
+static const char * const mi2s_2_groups[] = {
+ "gpio60",
+};
+static const char * const nav_gpio_groups[] = {
+ "gpio101", "gpio102",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio118",
+};
+static const char * const phase_flag0_groups[] = {
+ "gpio12",
+};
+static const char * const phase_flag1_groups[] = {
+ "gpio17",
+};
+static const char * const phase_flag10_groups[] = {
+ "gpio41",
+};
+static const char * const phase_flag11_groups[] = {
+ "gpio42",
+};
+static const char * const phase_flag12_groups[] = {
+ "gpio43",
+};
+static const char * const phase_flag13_groups[] = {
+ "gpio44",
+};
+static const char * const phase_flag14_groups[] = {
+ "gpio45",
+};
+static const char * const phase_flag15_groups[] = {
+ "gpio46",
+};
+static const char * const phase_flag16_groups[] = {
+ "gpio47",
+};
+static const char * const phase_flag17_groups[] = {
+ "gpio48",
+};
+static const char * const phase_flag18_groups[] = {
+ "gpio49",
+};
+static const char * const phase_flag19_groups[] = {
+ "gpio50",
+};
+static const char * const phase_flag2_groups[] = {
+ "gpio18",
+};
+static const char * const phase_flag20_groups[] = {
+ "gpio51",
+};
+static const char * const phase_flag21_groups[] = {
+ "gpio52",
+};
+static const char * const phase_flag22_groups[] = {
+ "gpio53",
+};
+static const char * const phase_flag23_groups[] = {
+ "gpio56",
+};
+static const char * const phase_flag24_groups[] = {
+ "gpio57",
+};
+static const char * const phase_flag25_groups[] = {
+ "gpio60",
+};
+static const char * const phase_flag26_groups[] = {
+ "gpio61",
+};
+static const char * const phase_flag27_groups[] = {
+ "gpio62",
+};
+static const char * const phase_flag28_groups[] = {
+ "gpio63",
+};
+static const char * const phase_flag29_groups[] = {
+ "gpio64",
+};
+static const char * const phase_flag3_groups[] = {
+ "gpio34",
+};
+static const char * const phase_flag30_groups[] = {
+ "gpio67",
+};
+static const char * const phase_flag31_groups[] = {
+ "gpio68",
+};
+static const char * const phase_flag4_groups[] = {
+ "gpio35",
+};
+static const char * const phase_flag5_groups[] = {
+ "gpio36",
+};
+static const char * const phase_flag6_groups[] = {
+ "gpio37",
+};
+static const char * const phase_flag7_groups[] = {
+ "gpio38",
+};
+static const char * const phase_flag8_groups[] = {
+ "gpio39",
+};
+static const char * const phase_flag9_groups[] = {
+ "gpio40",
+};
+static const char * const pll_bypassnl_groups[] = {
+ "gpio13",
+};
+static const char * const pll_clk_groups[] = {
+ "gpio98",
+};
+static const char * const pll_reset_groups[] = {
+ "gpio14",
+};
+static const char * const prng_rosc0_groups[] = {
+ "gpio97",
+};
+static const char * const prng_rosc1_groups[] = {
+ "gpio98",
+};
+static const char * const prng_rosc2_groups[] = {
+ "gpio99",
+};
+static const char * const prng_rosc3_groups[] = {
+ "gpio100",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio2", "gpio3", "gpio6", "gpio7", "gpio61", "gpio62", "gpio86",
+ "gpio87",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio8", "gpio9", "gpio63", "gpio64",
+};
+static const char * const qdss_gpio0_groups[] = {
+ "gpio39", "gpio65",
+};
+static const char * const qdss_gpio1_groups[] = {
+ "gpio40", "gpio66",
+};
+static const char * const qdss_gpio10_groups[] = {
+ "gpio50", "gpio56",
+};
+static const char * const qdss_gpio11_groups[] = {
+ "gpio51", "gpio57",
+};
+static const char * const qdss_gpio12_groups[] = {
+ "gpio34", "gpio52",
+};
+static const char * const qdss_gpio13_groups[] = {
+ "gpio35", "gpio53",
+};
+static const char * const qdss_gpio14_groups[] = {
+ "gpio27", "gpio36",
+};
+static const char * const qdss_gpio15_groups[] = {
+ "gpio28", "gpio37",
+};
+static const char * const qdss_gpio2_groups[] = {
+ "gpio38", "gpio41",
+};
+static const char * const qdss_gpio3_groups[] = {
+ "gpio42", "gpio47",
+};
+static const char * const qdss_gpio4_groups[] = {
+ "gpio43", "gpio88",
+};
+static const char * const qdss_gpio5_groups[] = {
+ "gpio44", "gpio89",
+};
+static const char * const qdss_gpio6_groups[] = {
+ "gpio45", "gpio90",
+};
+static const char * const qdss_gpio7_groups[] = {
+ "gpio46", "gpio91",
+};
+static const char * const qdss_gpio8_groups[] = {
+ "gpio48", "gpio92",
+};
+static const char * const qdss_gpio9_groups[] = {
+ "gpio49", "gpio93",
+};
+static const char * const qlink0_enable_groups[] = {
+ "gpio105",
+};
+static const char * const qlink0_request_groups[] = {
+ "gpio104",
+};
+static const char * const qlink1_enable_groups[] = {
+ "gpio108",
+};
+static const char * const qlink1_request_groups[] = {
+ "gpio107",
+};
+static const char * const qup00_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const qup01_groups[] = {
+ "gpio61", "gpio62", "gpio63", "gpio64",
+};
+static const char * const qup02_groups[] = {
+ "gpio45", "gpio46", "gpio48", "gpio56", "gpio57",
+};
+static const char * const qup10_groups[] = {
+ "gpio13", "gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const qup11_f1_groups[] = {
+ "gpio27", "gpio28",
+};
+static const char * const qup11_f2_groups[] = {
+ "gpio27", "gpio28",
+};
+
+static const char * const qup12_groups[] = {
+ "gpio19", "gpio19", "gpio20", "gpio20",
+};
+static const char * const qup13_f1_groups[] = {
+ "gpio25", "gpio26",
+};
+static const char * const qup13_f2_groups[] = {
+ "gpio25", "gpio26",
+};
+static const char * const qup14_groups[] = {
+ "gpio4", "gpio4", "gpio5", "gpio5",
+};
+static const char * const sd_write_groups[] = {
+ "gpio85",
+};
+static const char * const sdc1_tb_groups[] = {
+ "gpio4",
+};
+static const char * const sdc2_tb_groups[] = {
+ "gpio5",
+};
+static const char * const sp_cmu_groups[] = {
+ "gpio3",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio61",
+};
+static const char * const tgu_ch1_groups[] = {
+ "gpio62",
+};
+static const char * const tgu_ch2_groups[] = {
+ "gpio63",
+};
+static const char * const tgu_ch3_groups[] = {
+ "gpio64",
+};
+static const char * const tsense_pwm1_groups[] = {
+ "gpio88",
+};
+static const char * const tsense_pwm2_groups[] = {
+ "gpio88",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio80",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio79",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio82",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio81",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio76",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio75",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio78",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio77",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio47",
+};
+static const char * const vfr_1_groups[] = {
+ "gpio49",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio89",
+};
+static const char * const wlan1_adc0_groups[] = {
+ "gpio90",
+};
+static const char * const wlan1_adc1_groups[] = {
+ "gpio92",
+};
+static const char * const wlan2_adc0_groups[] = {
+ "gpio91",
+};
+static const char * const wlan2_adc1_groups[] = {
+ "gpio93",
+};
+
+static const struct msm_function sm6375_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(agera_pll),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_tsens),
+ FUNCTION(atest_tsens2),
+ FUNCTION(atest_usb1),
+ FUNCTION(atest_usb10),
+ FUNCTION(atest_usb11),
+ FUNCTION(atest_usb12),
+ FUNCTION(atest_usb13),
+ FUNCTION(atest_usb2),
+ FUNCTION(atest_usb20),
+ FUNCTION(atest_usb21),
+ FUNCTION(atest_usb22),
+ FUNCTION(atest_usb23),
+ FUNCTION(audio_ref),
+ FUNCTION(btfm_slimbus),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(cri_trng),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(dp_hot),
+ FUNCTION(edp_lcd),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gp_pdm0),
+ FUNCTION(gp_pdm1),
+ FUNCTION(gp_pdm2),
+ FUNCTION(gpio),
+ FUNCTION(gps_tx),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(lpass_ext),
+ FUNCTION(m_voc),
+ FUNCTION(mclk),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mi2s_0),
+ FUNCTION(mi2s_1),
+ FUNCTION(mi2s_2),
+ FUNCTION(mss_lte),
+ FUNCTION(nav_gpio),
+ FUNCTION(nav_pps),
+ FUNCTION(pa_indicator),
+ FUNCTION(phase_flag0),
+ FUNCTION(phase_flag1),
+ FUNCTION(phase_flag10),
+ FUNCTION(phase_flag11),
+ FUNCTION(phase_flag12),
+ FUNCTION(phase_flag13),
+ FUNCTION(phase_flag14),
+ FUNCTION(phase_flag15),
+ FUNCTION(phase_flag16),
+ FUNCTION(phase_flag17),
+ FUNCTION(phase_flag18),
+ FUNCTION(phase_flag19),
+ FUNCTION(phase_flag2),
+ FUNCTION(phase_flag20),
+ FUNCTION(phase_flag21),
+ FUNCTION(phase_flag22),
+ FUNCTION(phase_flag23),
+ FUNCTION(phase_flag24),
+ FUNCTION(phase_flag25),
+ FUNCTION(phase_flag26),
+ FUNCTION(phase_flag27),
+ FUNCTION(phase_flag28),
+ FUNCTION(phase_flag29),
+ FUNCTION(phase_flag3),
+ FUNCTION(phase_flag30),
+ FUNCTION(phase_flag31),
+ FUNCTION(phase_flag4),
+ FUNCTION(phase_flag5),
+ FUNCTION(phase_flag6),
+ FUNCTION(phase_flag7),
+ FUNCTION(phase_flag8),
+ FUNCTION(phase_flag9),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_bypassnl),
+ FUNCTION(pll_clk),
+ FUNCTION(pll_reset),
+ FUNCTION(prng_rosc0),
+ FUNCTION(prng_rosc1),
+ FUNCTION(prng_rosc2),
+ FUNCTION(prng_rosc3),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qdss_gpio0),
+ FUNCTION(qdss_gpio1),
+ FUNCTION(qdss_gpio10),
+ FUNCTION(qdss_gpio11),
+ FUNCTION(qdss_gpio12),
+ FUNCTION(qdss_gpio13),
+ FUNCTION(qdss_gpio14),
+ FUNCTION(qdss_gpio15),
+ FUNCTION(qdss_gpio2),
+ FUNCTION(qdss_gpio3),
+ FUNCTION(qdss_gpio4),
+ FUNCTION(qdss_gpio5),
+ FUNCTION(qdss_gpio6),
+ FUNCTION(qdss_gpio7),
+ FUNCTION(qdss_gpio8),
+ FUNCTION(qdss_gpio9),
+ FUNCTION(qlink0_enable),
+ FUNCTION(qlink0_request),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_enable),
+ FUNCTION(qlink1_request),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qup00),
+ FUNCTION(qup01),
+ FUNCTION(qup02),
+ FUNCTION(qup10),
+ FUNCTION(qup11_f1),
+ FUNCTION(qup11_f2),
+ FUNCTION(qup12),
+ FUNCTION(qup13_f1),
+ FUNCTION(qup13_f2),
+ FUNCTION(qup14),
+ FUNCTION(sd_write),
+ FUNCTION(sdc1_tb),
+ FUNCTION(sdc2_tb),
+ FUNCTION(sp_cmu),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+ FUNCTION(wlan1_adc0),
+ FUNCTION(wlan1_adc1),
+ FUNCTION(wlan2_adc0),
+ FUNCTION(wlan2_adc1),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm6375_groups[] = {
+ [0] = PINGROUP(0, ibi_i3c, qup00, cri_trng, _, _, _, _, _, _),
+ [1] = PINGROUP(1, ibi_i3c, qup00, cri_trng, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup00, cci_i2c, cri_trng, qdss_cti, _, _, _, _, _),
+ [3] = PINGROUP(3, qup00, cci_i2c, sp_cmu, dbg_out, qdss_cti, _, _, _, _),
+ [4] = PINGROUP(4, qup14, qup14, sdc1_tb, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup14, qup14, sdc2_tb, _, _, _, _, _, _),
+ [6] = PINGROUP(6, mdp_vsync, qdss_cti, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qdss_cti, _, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, gp_pdm1, qdss_gpio, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qdss_gpio, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, _, _, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, _, _, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, m_voc, dp_hot, _, phase_flag0, _, _, _, _, _),
+ [13] = PINGROUP(13, qup10, pll_bypassnl, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup10, pll_reset, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup10, _, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup10, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, _, phase_flag1, qup10, _, _, _, _, _, _),
+ [18] = PINGROUP(18, _, phase_flag2, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, qup12, qup12, ddr_bist, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup12, qup12, ddr_bist, _, _, _, _, _, _),
+ [21] = PINGROUP(21, gcc_gp2, ddr_bist, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, gcc_gp3, ddr_bist, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, mdp_vsync, edp_lcd, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, mdp_vsync, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup13_f1, qup13_f2, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup13_f1, qup13_f2, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup11_f1, qup11_f2, mdp_vsync, pll_bist, _, qdss_gpio14, _, _, _),
+ [28] = PINGROUP(28, qup11_f1, qup11_f2, mdp_vsync, _, qdss_gpio15, _, _, _, _),
+ [29] = PINGROUP(29, cam_mclk, _, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, cam_mclk, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, cam_mclk, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, cam_mclk, _, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, cam_mclk, _, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, cci_timer0, _, phase_flag3, qdss_gpio12, _, _, _, _, _),
+ [35] = PINGROUP(35, cci_timer1, cci_async, _, phase_flag4, qdss_gpio13, _, _, _, _),
+ [36] = PINGROUP(36, cci_timer2, cci_async, _, phase_flag5, qdss_gpio14, _, _, _, _),
+ [37] = PINGROUP(37, cci_timer3, gp_pdm0, _, phase_flag6, qdss_gpio15, _, _, _, _),
+ [38] = PINGROUP(38, cci_timer4, _, phase_flag7, qdss_gpio2, _, _, _, _, _),
+ [39] = PINGROUP(39, cci_i2c, _, phase_flag8, qdss_gpio0, _, _, _, _, _),
+ [40] = PINGROUP(40, cci_i2c, _, phase_flag9, qdss_gpio1, _, _, _, _, _),
+ [41] = PINGROUP(41, cci_i2c, _, phase_flag10, qdss_gpio2, _, _, _, _, _),
+ [42] = PINGROUP(42, cci_i2c, _, phase_flag11, qdss_gpio3, _, _, _, _, _),
+ [43] = PINGROUP(43, cci_i2c, _, phase_flag12, qdss_gpio4, _, _, _, _, _),
+ [44] = PINGROUP(44, cci_i2c, _, phase_flag13, qdss_gpio5, _, _, _, _, _),
+ [45] = PINGROUP(45, qup02, _, phase_flag14, qdss_gpio6, _, _, _, _, _),
+ [46] = PINGROUP(46, qup02, _, phase_flag15, qdss_gpio7, _, _, _, _, _),
+ [47] = PINGROUP(47, mdp_vsync0, _, phase_flag16, qdss_gpio3, _, _, usb2phy_ac, _, _),
+ [48] = PINGROUP(48, cci_async, mdp_vsync1, gcc_gp1, _, phase_flag17, qdss_gpio8, qup02,
+ _, _),
+ [49] = PINGROUP(49, vfr_1, _, phase_flag18, qdss_gpio9, _, _, _, _, _),
+ [50] = PINGROUP(50, _, phase_flag19, qdss_gpio10, _, _, _, _, _, _),
+ [51] = PINGROUP(51, _, phase_flag20, qdss_gpio11, _, _, _, _, _, _),
+ [52] = PINGROUP(52, cci_async, gp_pdm1, _, phase_flag21, qdss_gpio12, _, _, _, _),
+ [53] = PINGROUP(53, cci_async, _, phase_flag22, qdss_gpio13, _, _, _, _, _),
+ [54] = PINGROUP(54, _, _, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, _, _, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup02, mdp_vsync2, _, phase_flag23, qdss_gpio10, _, _, _, _),
+ [57] = PINGROUP(57, qup02, mdp_vsync3, gp_pdm2, _, phase_flag24, qdss_gpio11, _, _, _),
+ [58] = PINGROUP(58, gcc_gp1, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, _, _, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, audio_ref, lpass_ext, mi2s_2, _, phase_flag25, _, _, _, _),
+ [61] = PINGROUP(61, qup01, tgu_ch0, _, phase_flag26, qdss_cti, _, _, _, _),
+ [62] = PINGROUP(62, qup01, tgu_ch1, _, phase_flag27, qdss_cti, _, _, _, _),
+ [63] = PINGROUP(63, qup01, tgu_ch2, _, phase_flag28, qdss_gpio, _, _, _, _),
+ [64] = PINGROUP(64, qup01, tgu_ch3, _, phase_flag29, qdss_gpio, _, _, _, _),
+ [65] = PINGROUP(65, mss_lte, _, qdss_gpio0, _, _, _, _, _, _),
+ [66] = PINGROUP(66, mss_lte, _, qdss_gpio1, _, _, _, _, _, _),
+ [67] = PINGROUP(67, btfm_slimbus, mi2s_1, _, phase_flag30, _, _, _, _, _),
+ [68] = PINGROUP(68, btfm_slimbus, mi2s_1, gp_pdm0, _, phase_flag31, _, _, _, _),
+ [69] = PINGROUP(69, _, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, _, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, _, _, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, _, _, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, _, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, _, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, uim2_data, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, uim2_clk, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, uim2_reset, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, uim2_present, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, uim1_data, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, uim1_clk, _, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, uim1_reset, _, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, uim1_present, _, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, atest_usb1, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, _, atest_usb10, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, sd_write, _, atest_usb11, _, _, _, _, _, _),
+ [86] = PINGROUP(86, btfm_slimbus, mi2s_1, _, qdss_cti, atest_usb12, ddr_pxi0, _, _, _),
+ [87] = PINGROUP(87, btfm_slimbus, mi2s_1, adsp_ext, _, qdss_cti, atest_usb13, ddr_pxi1, _,
+ _),
+ [88] = PINGROUP(88, mi2s_0, _, qdss_gpio4, _, atest_usb2, ddr_pxi2, tsense_pwm1,
+ tsense_pwm2, _),
+ [89] = PINGROUP(89, mi2s_0, agera_pll, _, qdss_gpio5, _, vsense_trigger, atest_usb20,
+ ddr_pxi3, _),
+ [90] = PINGROUP(90, mi2s_0, jitter_bist, _, qdss_gpio6, _, wlan1_adc0, atest_usb21,
+ ddr_pxi0, _),
+ [91] = PINGROUP(91, mi2s_0, _, qdss_gpio7, _, wlan2_adc0, atest_usb22, ddr_pxi1, _, _),
+ [92] = PINGROUP(92, _, qdss_gpio8, atest_tsens, wlan1_adc1, atest_usb23, ddr_pxi2, _, _,
+ _),
+ [93] = PINGROUP(93, mclk, lpass_ext, _, qdss_gpio9, atest_tsens2, wlan2_adc1, ddr_pxi3,
+ _, _),
+ [94] = PINGROUP(94, _, _, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, ldo_en, _, atest_char, _, _, _, _, _, _),
+ [96] = PINGROUP(96, ldo_update, _, atest_char0, _, _, _, _, _, _),
+ [97] = PINGROUP(97, prng_rosc0, _, atest_char1, _, _, _, _, _, _),
+ [98] = PINGROUP(98, _, atest_char2, _, _, prng_rosc1, pll_clk, _, _, _),
+ [99] = PINGROUP(99, _, atest_char3, _, _, prng_rosc2, _, _, _, _),
+ [100] = PINGROUP(100, _, _, prng_rosc3, _, _, _, _, _, _),
+ [101] = PINGROUP(101, nav_gpio, nav_pps, nav_pps, gps_tx, _, _, _, _, _),
+ [102] = PINGROUP(102, nav_gpio, nav_pps, nav_pps, gps_tx, _, _, _, _, _),
+ [103] = PINGROUP(103, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, qlink0_request, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, qlink0_enable, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, qlink1_request, gps_tx, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, qlink1_enable, gps_tx, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, _, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, _, _, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, _, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, _, _, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, _, _, pa_indicator, dp_hot, _, _, _, _, _),
+ [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, _, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, _, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, usb_phy, _, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, _, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, _, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, _, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, _, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, _, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, _, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, _, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, _, _, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, _, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, _, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, _, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, _, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, _, _, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, _, _, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, _, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, _, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _),
+ [156] = UFS_RESET(ufs_reset, 0x1ae000),
+ [157] = SDC_PINGROUP(sdc1_rclk, 0x1a1000, 0, 0),
+ [158] = SDC_PINGROUP(sdc1_clk, 0x1a0000, 13, 6),
+ [159] = SDC_PINGROUP(sdc1_cmd, 0x1a0000, 11, 3),
+ [160] = SDC_PINGROUP(sdc1_data, 0x1a0000, 9, 0),
+ [161] = SDC_PINGROUP(sdc2_clk, 0x1a2000, 14, 6),
+ [162] = SDC_PINGROUP(sdc2_cmd, 0x1a2000, 11, 3),
+ [163] = SDC_PINGROUP(sdc2_data, 0x1a2000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm6375_mpm_map[] = {
+ { 0, 84 }, { 3, 6 }, { 4, 7 }, { 7, 8 }, { 8, 9 }, { 9, 10 }, { 11, 11 }, { 12, 13 },
+ { 13, 14 }, { 16, 16 }, { 17, 17 }, { 18, 18 }, { 19, 19 }, { 21, 20 }, { 22, 21 },
+ { 23, 23 }, { 24, 24 }, { 25, 25 }, { 27, 26 }, { 28, 27 }, { 37, 28 }, { 38, 29 },
+ { 48, 30 }, { 50, 31 }, { 51, 32 }, { 52, 33 }, { 57, 34 }, { 59, 35 }, { 60, 37 },
+ { 61, 38 }, { 62, 39 }, { 64, 40 }, { 66, 41 }, { 67, 42 }, { 68, 43 }, { 69, 44 },
+ { 78, 45 }, { 82, 36 }, { 83, 47 }, { 84, 48 }, { 85, 49 }, { 87, 50 }, { 88, 51 },
+ { 91, 52 }, { 94, 53 }, { 95, 54 }, { 96, 55 }, { 97, 56 }, { 98, 57 }, { 99, 58 },
+ { 100, 59 }, { 104, 60 }, { 107, 61 }, { 118, 62 }, { 124, 63 }, { 125, 64 }, { 126, 65 },
+ { 128, 66 }, { 129, 67 }, { 131, 69 }, { 133, 70 }, { 134, 71 }, { 136, 73 }, { 142, 74 },
+ { 150, 75 }, { 153, 76 }, { 155, 77 },
+};
+
+static const struct msm_pinctrl_soc_data sm6375_tlmm = {
+ .pins = sm6375_pins,
+ .npins = ARRAY_SIZE(sm6375_pins),
+ .functions = sm6375_functions,
+ .nfunctions = ARRAY_SIZE(sm6375_functions),
+ .groups = sm6375_groups,
+ .ngroups = ARRAY_SIZE(sm6375_groups),
+ .ngpios = 157,
+ .wakeirq_map = sm6375_mpm_map,
+ .nwakeirq_map = ARRAY_SIZE(sm6375_mpm_map),
+};
+
+static int sm6375_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm6375_tlmm);
+}
+
+static const struct of_device_id sm6375_tlmm_of_match[] = {
+ { .compatible = "qcom,sm6375-tlmm", },
+ { },
+};
+
+static struct platform_driver sm6375_tlmm_driver = {
+ .driver = {
+ .name = "sm6375-tlmm",
+ .of_match_table = sm6375_tlmm_of_match,
+ },
+ .probe = sm6375_tlmm_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm6375_tlmm_init(void)
+{
+ return platform_driver_register(&sm6375_tlmm_driver);
+}
+arch_initcall(sm6375_tlmm_init);
+
+static void __exit sm6375_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm6375_tlmm_driver);
+}
+module_exit(sm6375_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM6375 TLMM driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, sm6375_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index af144e724bd9..3bd7f9fedcc3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1316,7 +1316,7 @@ static const struct msm_pingroup sm8250_groups[] = {
static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = {
{ 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 },
{ 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 },
- { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 },
+ { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 73 },
{ 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 },
{ 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 },
{ 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 },
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 3be2a08ae3a6..ccaf40a9c0e6 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1159,6 +1159,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pmc8180c-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm8226-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8350-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8350b-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8350c-gpio", .data = (void *) 9 },
@@ -1175,6 +1176,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pmi8998-gpio", .data = (void *) 14 },
{ .compatible = "qcom,pmk8350-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmm8155au-gpio", .data = (void *) 10 },
+ /* pmp8074 has 12 GPIOs with holes on 1 and 12 */
+ { .compatible = "qcom,pmp8074-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pmr735a-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmr735b-gpio", .data = (void *) 4 },
/* pms405 has 12 GPIOs with holes on 1, 9, and 10 */
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 961007ce7b3a..0903a0a41831 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -38,7 +38,9 @@ config PINCTRL_RENESAS
select PINCTRL_PFC_R8A77995 if ARCH_R8A77995
select PINCTRL_PFC_R8A779A0 if ARCH_R8A779A0
select PINCTRL_PFC_R8A779F0 if ARCH_R8A779F0
+ select PINCTRL_PFC_R8A779G0 if ARCH_R8A779G0
select PINCTRL_RZG2L if ARCH_RZG2L
+ select PINCTRL_RZV2M if ARCH_R9A09G011
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
@@ -153,6 +155,10 @@ config PINCTRL_PFC_R8A779A0
bool "pin control support for R-Car V3U" if COMPILE_TEST
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A779G0
+ bool "pin control support for R-Car V4H" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7740
bool "pin control support for R-Mobile A1" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
@@ -237,6 +243,18 @@ config PINCTRL_RZN1
help
This selects pinctrl driver for Renesas RZ/N1 devices.
+config PINCTRL_RZV2M
+ bool "pin control support for RZ/V2M"
+ depends on OF
+ depends on ARCH_R9A09G011 || COMPILE_TEST
+ select GPIOLIB
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ help
+ This selects GPIO and pinctrl driver for Renesas RZ/V2M
+ platforms.
+
config PINCTRL_PFC_SH7203
bool "pin control support for SH7203" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
diff --git a/drivers/pinctrl/renesas/Makefile b/drivers/pinctrl/renesas/Makefile
index 5d936c154a6f..558b30ce0dec 100644
--- a/drivers/pinctrl/renesas/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A77990) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A77995) += pfc-r8a77995.o
obj-$(CONFIG_PINCTRL_PFC_R8A779A0) += pfc-r8a779a0.o
obj-$(CONFIG_PINCTRL_PFC_R8A779F0) += pfc-r8a779f0.o
+obj-$(CONFIG_PINCTRL_PFC_R8A779G0) += pfc-r8a779g0.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
obj-$(CONFIG_PINCTRL_PFC_SH7269) += pfc-sh7269.o
@@ -49,6 +50,7 @@ obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
obj-$(CONFIG_PINCTRL_RZA2) += pinctrl-rza2.o
obj-$(CONFIG_PINCTRL_RZG2L) += pinctrl-rzg2l.o
obj-$(CONFIG_PINCTRL_RZN1) += pinctrl-rzn1.o
+obj-$(CONFIG_PINCTRL_RZV2M) += pinctrl-rzv2m.o
ifeq ($(CONFIG_COMPILE_TEST),y)
CFLAGS_pfc-sh7203.o += -I$(srctree)/arch/sh/include/cpu-sh2a
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 8c14b2021bf0..c91102d3f1d1 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -644,6 +644,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a779f0_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A779G0
+ {
+ .compatible = "renesas,pfc-r8a779g0",
+ .data = &r8a779g0_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_SH73A0
{
.compatible = "renesas,pfc-sh73a0",
diff --git a/drivers/pinctrl/renesas/pfc-r8a779f0.c b/drivers/pinctrl/renesas/pfc-r8a779f0.c
index aaca4ee2af55..417c357f16b1 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779f0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779f0.c
@@ -1902,7 +1902,6 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
enum ioctrl_regs {
POC0,
POC1,
- POC2,
POC3,
TD0SEL1,
};
@@ -1910,7 +1909,6 @@ enum ioctrl_regs {
static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
[POC0] = { 0xe60500a0, },
[POC1] = { 0xe60508a0, },
- [POC2] = { 0xe60510a0, },
[POC3] = { 0xe60518a0, },
[TD0SEL1] = { 0xe6050920, },
{ /* sentinel */ },
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
new file mode 100644
index 000000000000..5dd1c2c7708a
--- /dev/null
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -0,0 +1,4262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A779A0 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a779a0.c
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_PULL_UP_DOWN)
+
+#define CPU_ALL_GP(fn, sfx) \
+ PORT_GP_CFG_19(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_23(1, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(1, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 27, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(1, 28, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_20(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_13(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33), \
+ PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 16, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 17, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 18, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 19, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 20, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 21, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 22, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 23, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 24, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 25, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 26, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 27, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 28, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_1(3, 29, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_25(4, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(5, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(6, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_21(7, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_14(8, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE_18_33)
+
+/* GPSR0 */
+#define GPSR0_18 F_(MSIOF2_RXD, IP2SR0_11_8)
+#define GPSR0_17 F_(MSIOF2_SCK, IP2SR0_7_4)
+#define GPSR0_16 F_(MSIOF2_TXD, IP2SR0_3_0)
+#define GPSR0_15 F_(MSIOF2_SYNC, IP1SR0_31_28)
+#define GPSR0_14 F_(MSIOF2_SS1, IP1SR0_27_24)
+#define GPSR0_13 F_(MSIOF2_SS2, IP1SR0_23_20)
+#define GPSR0_12 F_(MSIOF5_RXD, IP1SR0_19_16)
+#define GPSR0_11 F_(MSIOF5_SCK, IP1SR0_15_12)
+#define GPSR0_10 F_(MSIOF5_TXD, IP1SR0_11_8)
+#define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4)
+#define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0)
+#define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28)
+#define GPSR0_6 F_(IRQ0, IP0SR0_27_24)
+#define GPSR0_5 F_(IRQ1, IP0SR0_23_20)
+#define GPSR0_4 F_(IRQ2, IP0SR0_19_16)
+#define GPSR0_3 F_(IRQ3, IP0SR0_15_12)
+#define GPSR0_2 F_(GP0_02, IP0SR0_11_8)
+#define GPSR0_1 F_(GP0_01, IP0SR0_7_4)
+#define GPSR0_0 F_(GP0_00, IP0SR0_3_0)
+
+/* GPSR1 */
+#define GPSR1_28 F_(HTX3, IP3SR1_19_16)
+#define GPSR1_27 F_(HCTS3_N, IP3SR1_15_12)
+#define GPSR1_26 F_(HRTS3_N, IP3SR1_11_8)
+#define GPSR1_25 F_(HSCK3, IP3SR1_7_4)
+#define GPSR1_24 F_(HRX3, IP3SR1_3_0)
+#define GPSR1_23 F_(GP1_23, IP2SR1_31_28)
+#define GPSR1_22 F_(AUDIO_CLKIN, IP2SR1_27_24)
+#define GPSR1_21 F_(AUDIO_CLKOUT, IP2SR1_23_20)
+#define GPSR1_20 F_(SSI_SD, IP2SR1_19_16)
+#define GPSR1_19 F_(SSI_WS, IP2SR1_15_12)
+#define GPSR1_18 F_(SSI_SCK, IP2SR1_11_8)
+#define GPSR1_17 F_(SCIF_CLK, IP2SR1_7_4)
+#define GPSR1_16 F_(HRX0, IP2SR1_3_0)
+#define GPSR1_15 F_(HSCK0, IP1SR1_31_28)
+#define GPSR1_14 F_(HRTS0_N, IP1SR1_27_24)
+#define GPSR1_13 F_(HCTS0_N, IP1SR1_23_20)
+#define GPSR1_12 F_(HTX0, IP1SR1_19_16)
+#define GPSR1_11 F_(MSIOF0_RXD, IP1SR1_15_12)
+#define GPSR1_10 F_(MSIOF0_SCK, IP1SR1_11_8)
+#define GPSR1_9 F_(MSIOF0_TXD, IP1SR1_7_4)
+#define GPSR1_8 F_(MSIOF0_SYNC, IP1SR1_3_0)
+#define GPSR1_7 F_(MSIOF0_SS1, IP0SR1_31_28)
+#define GPSR1_6 F_(MSIOF0_SS2, IP0SR1_27_24)
+#define GPSR1_5 F_(MSIOF1_RXD, IP0SR1_23_20)
+#define GPSR1_4 F_(MSIOF1_TXD, IP0SR1_19_16)
+#define GPSR1_3 F_(MSIOF1_SCK, IP0SR1_15_12)
+#define GPSR1_2 F_(MSIOF1_SYNC, IP0SR1_11_8)
+#define GPSR1_1 F_(MSIOF1_SS1, IP0SR1_7_4)
+#define GPSR1_0 F_(MSIOF1_SS2, IP0SR1_3_0)
+
+/* GPSR2 */
+#define GPSR2_19 F_(CANFD7_RX, IP2SR2_15_12)
+#define GPSR2_18 F_(CANFD7_TX, IP2SR2_11_8)
+#define GPSR2_17 F_(CANFD4_RX, IP2SR2_7_4)
+#define GPSR2_16 F_(CANFD4_TX, IP2SR2_3_0)
+#define GPSR2_15 F_(CANFD3_RX, IP1SR2_31_28)
+#define GPSR2_14 F_(CANFD3_TX, IP1SR2_27_24)
+#define GPSR2_13 F_(CANFD2_RX, IP1SR2_23_20)
+#define GPSR2_12 F_(CANFD2_TX, IP1SR2_19_16)
+#define GPSR2_11 F_(CANFD0_RX, IP1SR2_15_12)
+#define GPSR2_10 F_(CANFD0_TX, IP1SR2_11_8)
+#define GPSR2_9 F_(CAN_CLK, IP1SR2_7_4)
+#define GPSR2_8 F_(TPU0TO0, IP1SR2_3_0)
+#define GPSR2_7 F_(TPU0TO1, IP0SR2_31_28)
+#define GPSR2_6 F_(FXR_TXDB, IP0SR2_27_24)
+#define GPSR2_5 F_(FXR_TXENB_N, IP0SR2_23_20)
+#define GPSR2_4 F_(RXDB_EXTFXR, IP0SR2_19_16)
+#define GPSR2_3 F_(CLK_EXTFXR, IP0SR2_15_12)
+#define GPSR2_2 F_(RXDA_EXTFXR, IP0SR2_11_8)
+#define GPSR2_1 F_(FXR_TXENA_N, IP0SR2_7_4)
+#define GPSR2_0 F_(FXR_TXDA, IP0SR2_3_0)
+
+/* GPSR3 */
+#define GPSR3_29 F_(RPC_INT_N, IP3SR3_23_20)
+#define GPSR3_28 F_(RPC_WP_N, IP3SR3_19_16)
+#define GPSR3_27 F_(RPC_RESET_N, IP3SR3_15_12)
+#define GPSR3_26 F_(QSPI1_IO3, IP3SR3_11_8)
+#define GPSR3_25 F_(QSPI1_SSL, IP3SR3_7_4)
+#define GPSR3_24 F_(QSPI1_IO2, IP3SR3_3_0)
+#define GPSR3_23 F_(QSPI1_MISO_IO1, IP2SR3_31_28)
+#define GPSR3_22 F_(QSPI1_SPCLK, IP2SR3_27_24)
+#define GPSR3_21 F_(QSPI1_MOSI_IO0, IP2SR3_23_20)
+#define GPSR3_20 F_(QSPI0_SPCLK, IP2SR3_19_16)
+#define GPSR3_19 F_(QSPI0_MOSI_IO0, IP2SR3_15_12)
+#define GPSR3_18 F_(QSPI0_MISO_IO1, IP2SR3_11_8)
+#define GPSR3_17 F_(QSPI0_IO2, IP2SR3_7_4)
+#define GPSR3_16 F_(QSPI0_IO3, IP2SR3_3_0)
+#define GPSR3_15 F_(QSPI0_SSL, IP1SR3_31_28)
+#define GPSR3_14 F_(IPC_CLKOUT, IP1SR3_27_24)
+#define GPSR3_13 F_(IPC_CLKIN, IP1SR3_23_20)
+#define GPSR3_12 F_(SD_WP, IP1SR3_19_16)
+#define GPSR3_11 F_(SD_CD, IP1SR3_15_12)
+#define GPSR3_10 F_(MMC_SD_CMD, IP1SR3_11_8)
+#define GPSR3_9 F_(MMC_D6, IP1SR3_7_4)
+#define GPSR3_8 F_(MMC_D7, IP1SR3_3_0)
+#define GPSR3_7 F_(MMC_D4, IP0SR3_31_28)
+#define GPSR3_6 F_(MMC_D5, IP0SR3_27_24)
+#define GPSR3_5 F_(MMC_SD_D3, IP0SR3_23_20)
+#define GPSR3_4 F_(MMC_DS, IP0SR3_19_16)
+#define GPSR3_3 F_(MMC_SD_CLK, IP0SR3_15_12)
+#define GPSR3_2 F_(MMC_SD_D2, IP0SR3_11_8)
+#define GPSR3_1 F_(MMC_SD_D0, IP0SR3_7_4)
+#define GPSR3_0 F_(MMC_SD_D1, IP0SR3_3_0)
+
+/* GPSR4 */
+#define GPSR4_24 FM(AVS1)
+#define GPSR4_23 FM(AVS0)
+#define GPSR4_22 FM(PCIE1_CLKREQ_N)
+#define GPSR4_21 FM(PCIE0_CLKREQ_N)
+#define GPSR4_20 FM(TSN0_TXCREFCLK)
+#define GPSR4_19 FM(TSN0_TD2)
+#define GPSR4_18 FM(TSN0_TD3)
+#define GPSR4_17 FM(TSN0_RD2)
+#define GPSR4_16 FM(TSN0_RD3)
+#define GPSR4_15 FM(TSN0_TD0)
+#define GPSR4_14 FM(TSN0_TD1)
+#define GPSR4_13 FM(TSN0_RD1)
+#define GPSR4_12 FM(TSN0_TXC)
+#define GPSR4_11 FM(TSN0_RXC)
+#define GPSR4_10 FM(TSN0_RD0)
+#define GPSR4_9 FM(TSN0_TX_CTL)
+#define GPSR4_8 FM(TSN0_AVTP_PPS0)
+#define GPSR4_7 FM(TSN0_RX_CTL)
+#define GPSR4_6 FM(TSN0_AVTP_CAPTURE)
+#define GPSR4_5 FM(TSN0_AVTP_MATCH)
+#define GPSR4_4 FM(TSN0_LINK)
+#define GPSR4_3 FM(TSN0_PHY_INT)
+#define GPSR4_2 FM(TSN0_AVTP_PPS1)
+#define GPSR4_1 FM(TSN0_MDC)
+#define GPSR4_0 FM(TSN0_MDIO)
+
+/* GPSR 5 */
+#define GPSR5_20 FM(AVB2_RX_CTL)
+#define GPSR5_19 FM(AVB2_TX_CTL)
+#define GPSR5_18 FM(AVB2_RXC)
+#define GPSR5_17 FM(AVB2_RD0)
+#define GPSR5_16 FM(AVB2_TXC)
+#define GPSR5_15 FM(AVB2_TD0)
+#define GPSR5_14 FM(AVB2_RD1)
+#define GPSR5_13 FM(AVB2_RD2)
+#define GPSR5_12 FM(AVB2_TD1)
+#define GPSR5_11 FM(AVB2_TD2)
+#define GPSR5_10 FM(AVB2_MDIO)
+#define GPSR5_9 FM(AVB2_RD3)
+#define GPSR5_8 FM(AVB2_TD3)
+#define GPSR5_7 FM(AVB2_TXCREFCLK)
+#define GPSR5_6 FM(AVB2_MDC)
+#define GPSR5_5 FM(AVB2_MAGIC)
+#define GPSR5_4 FM(AVB2_PHY_INT)
+#define GPSR5_3 FM(AVB2_LINK)
+#define GPSR5_2 FM(AVB2_AVTP_MATCH)
+#define GPSR5_1 FM(AVB2_AVTP_CAPTURE)
+#define GPSR5_0 FM(AVB2_AVTP_PPS)
+
+/* GPSR 6 */
+#define GPSR6_20 F_(AVB1_TXCREFCLK, IP2SR6_19_16)
+#define GPSR6_19 F_(AVB1_RD3, IP2SR6_15_12)
+#define GPSR6_18 F_(AVB1_TD3, IP2SR6_11_8)
+#define GPSR6_17 F_(AVB1_RD2, IP2SR6_7_4)
+#define GPSR6_16 F_(AVB1_TD2, IP2SR6_3_0)
+#define GPSR6_15 F_(AVB1_RD0, IP1SR6_31_28)
+#define GPSR6_14 F_(AVB1_RD1, IP1SR6_27_24)
+#define GPSR6_13 F_(AVB1_TD0, IP1SR6_23_20)
+#define GPSR6_12 F_(AVB1_TD1, IP1SR6_19_16)
+#define GPSR6_11 F_(AVB1_AVTP_CAPTURE, IP1SR6_15_12)
+#define GPSR6_10 F_(AVB1_AVTP_PPS, IP1SR6_11_8)
+#define GPSR6_9 F_(AVB1_RX_CTL, IP1SR6_7_4)
+#define GPSR6_8 F_(AVB1_RXC, IP1SR6_3_0)
+#define GPSR6_7 F_(AVB1_TX_CTL, IP0SR6_31_28)
+#define GPSR6_6 F_(AVB1_TXC, IP0SR6_27_24)
+#define GPSR6_5 F_(AVB1_AVTP_MATCH, IP0SR6_23_20)
+#define GPSR6_4 F_(AVB1_LINK, IP0SR6_19_16)
+#define GPSR6_3 F_(AVB1_PHY_INT, IP0SR6_15_12)
+#define GPSR6_2 F_(AVB1_MDC, IP0SR6_11_8)
+#define GPSR6_1 F_(AVB1_MAGIC, IP0SR6_7_4)
+#define GPSR6_0 F_(AVB1_MDIO, IP0SR6_3_0)
+
+/* GPSR7 */
+#define GPSR7_20 F_(AVB0_RX_CTL, IP2SR7_19_16)
+#define GPSR7_19 F_(AVB0_RXC, IP2SR7_15_12)
+#define GPSR7_18 F_(AVB0_RD0, IP2SR7_11_8)
+#define GPSR7_17 F_(AVB0_RD1, IP2SR7_7_4)
+#define GPSR7_16 F_(AVB0_TX_CTL, IP2SR7_3_0)
+#define GPSR7_15 F_(AVB0_TXC, IP1SR7_31_28)
+#define GPSR7_14 F_(AVB0_MDIO, IP1SR7_27_24)
+#define GPSR7_13 F_(AVB0_MDC, IP1SR7_23_20)
+#define GPSR7_12 F_(AVB0_RD2, IP1SR7_19_16)
+#define GPSR7_11 F_(AVB0_TD0, IP1SR7_15_12)
+#define GPSR7_10 F_(AVB0_MAGIC, IP1SR7_11_8)
+#define GPSR7_9 F_(AVB0_TXCREFCLK, IP1SR7_7_4)
+#define GPSR7_8 F_(AVB0_RD3, IP1SR7_3_0)
+#define GPSR7_7 F_(AVB0_TD1, IP0SR7_31_28)
+#define GPSR7_6 F_(AVB0_TD2, IP0SR7_27_24)
+#define GPSR7_5 F_(AVB0_PHY_INT, IP0SR7_23_20)
+#define GPSR7_4 F_(AVB0_LINK, IP0SR7_19_16)
+#define GPSR7_3 F_(AVB0_TD3, IP0SR7_15_12)
+#define GPSR7_2 F_(AVB0_AVTP_MATCH, IP0SR7_11_8)
+#define GPSR7_1 F_(AVB0_AVTP_CAPTURE, IP0SR7_7_4)
+#define GPSR7_0 F_(AVB0_AVTP_PPS, IP0SR7_3_0)
+
+/* GPSR8 */
+#define GPSR8_13 F_(GP8_13, IP1SR8_23_20)
+#define GPSR8_12 F_(GP8_12, IP1SR8_19_16)
+#define GPSR8_11 F_(SDA5, IP1SR8_15_12)
+#define GPSR8_10 F_(SCL5, IP1SR8_11_8)
+#define GPSR8_9 F_(SDA4, IP1SR8_7_4)
+#define GPSR8_8 F_(SCL4, IP1SR8_3_0)
+#define GPSR8_7 F_(SDA3, IP0SR8_31_28)
+#define GPSR8_6 F_(SCL3, IP0SR8_27_24)
+#define GPSR8_5 F_(SDA2, IP0SR8_23_20)
+#define GPSR8_4 F_(SCL2, IP0SR8_19_16)
+#define GPSR8_3 F_(SDA1, IP0SR8_15_12)
+#define GPSR8_2 F_(SCL1, IP0SR8_11_8)
+#define GPSR8_1 F_(SDA0, IP0SR8_7_4)
+#define GPSR8_0 F_(SCL0, IP0SR8_3_0)
+
+/* SR0 */
+/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_B) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR0_3_0 FM(MSIOF5_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_7_4 FM(MSIOF5_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1) FM(IRQ2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR1 */
+/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_A) FM(TX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_A) FM(RX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_A) FM(RTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_A) FM(CTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_A) FM(SCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_X) FM(TX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_X) FM(RX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_X) FM(CTS1_N_X) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_X) FM(RTS1_N_X) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_X) FM(SCK1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_31_28 F_(0, 0) FM(TCLK2) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR1_3_0 FM(HRX3) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_7_4 FM(HSCK3) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_11_8 FM(HRTS3_N) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_15_12 FM(HCTS3_N) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_19_16 FM(HTX3) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR2 */
+/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_7_4 FM(FXR_TXENA_N) FM(CANFD1_RX) FM(TPU0TO3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_23_20 FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_31_28 FM(TPU0TO1) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR2_3_0 FM(TPU0TO0) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2) F_(0, 0) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1_B) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR2_3_0 FM(CANFD4_TX) F_(0, 0) FM(PWM4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_7_4 FM(CANFD4_RX) F_(0, 0) FM(PWM5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_11_8 FM(CANFD7_TX) F_(0, 0) FM(PWM6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR2_15_12 FM(CANFD7_RX) F_(0, 0) FM(PWM7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR3 */
+/* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR3_3_0 FM(MMC_SD_D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_7_4 FM(MMC_SD_D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_11_8 FM(MMC_SD_D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_15_12 FM(MMC_SD_CLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_19_16 FM(MMC_DS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_23_20 FM(MMC_SD_D3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_27_24 FM(MMC_D5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR3_31_28 FM(MMC_D4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR3_3_0 FM(MMC_D7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_7_4 FM(MMC_D6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_A) FM(TCLK4_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR3_3_0 FM(QSPI0_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_7_4 FM(QSPI0_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_11_8 FM(QSPI0_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_15_12 FM(QSPI0_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_19_16 FM(QSPI0_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_23_20 FM(QSPI1_MOSI_IO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_27_24 FM(QSPI1_SPCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR3_31_28 FM(QSPI1_MISO_IO1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP3SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP3SR3_3_0 FM(QSPI1_IO2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_7_4 FM(QSPI1_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_11_8 FM(QSPI1_IO3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_15_12 FM(RPC_RESET_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_19_16 FM(RPC_WP_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR3_23_20 FM(RPC_INT_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR6 */
+/* IP0SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR6_3_0 FM(AVB1_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_7_4 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_11_8 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_15_12 FM(AVB1_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_19_16 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_23_20 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_27_24 FM(AVB1_TXC) FM(AVB1_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR6_31_28 FM(AVB1_TX_CTL) FM(AVB1_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR6_3_0 FM(AVB1_RXC) FM(AVB1_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_7_4 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_11_8 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_19_16 FM(AVB1_TD1) FM(AVB1_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_23_20 FM(AVB1_TD0) FM(AVB1_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_27_24 FM(AVB1_RD1) FM(AVB1_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR6_31_28 FM(AVB1_RD0) FM(AVB1_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR6 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR6_3_0 FM(AVB1_TD2) FM(AVB1_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_7_4 FM(AVB1_RD2) FM(AVB1_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_11_8 FM(AVB1_TD3) FM(AVB1_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_15_12 FM(AVB1_RD3) FM(AVB1_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR6_19_16 FM(AVB1_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR7 */
+/* IP0SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR7_3_0 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_7_4 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_15_12 FM(AVB0_TD3) FM(AVB0_MII_TD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_19_16 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_23_20 FM(AVB0_PHY_INT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_27_24 FM(AVB0_TD2) FM(AVB0_MII_TD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR7_31_28 FM(AVB0_TD1) FM(AVB0_MII_TD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR7_3_0 FM(AVB0_RD3) FM(AVB0_MII_RD3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_7_4 FM(AVB0_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_11_8 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_15_12 FM(AVB0_TD0) FM(AVB0_MII_TD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_19_16 FM(AVB0_RD2) FM(AVB0_MII_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_23_20 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_27_24 FM(AVB0_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR7_31_28 FM(AVB0_TXC) FM(AVB0_MII_TXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP2SR7 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP2SR7_3_0 FM(AVB0_TX_CTL) FM(AVB0_MII_TX_EN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_7_4 FM(AVB0_RD1) FM(AVB0_MII_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_11_8 FM(AVB0_RD0) FM(AVB0_MII_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_15_12 FM(AVB0_RXC) FM(AVB0_MII_RXC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR7_19_16 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* SR8 */
+/* IP0SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP0SR8_3_0 FM(SCL0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_7_4 FM(SDA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_11_8 FM(SCL1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_15_12 FM(SDA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_19_16 FM(SCL2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_23_20 FM(SDA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_27_24 FM(SCL3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR8_31_28 FM(SDA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IP1SR8 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+#define IP1SR8_3_0 FM(SCL4) FM(HRX2) FM(SCK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_7_4 FM(SDA4) FM(HTX2) FM(CTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_11_8 FM(SCL5) FM(HRTS2_N) FM(RTS4_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_15_12 FM(SDA5) FM(SCIF_CLK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_19_16 F_(0, 0) FM(HCTS2_N) FM(TX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR8_23_20 F_(0, 0) FM(HSCK2) FM(RX4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR \
+ GPSR3_29 \
+ GPSR1_28 GPSR3_28 \
+ GPSR1_27 GPSR3_27 \
+ GPSR1_26 GPSR3_26 \
+ GPSR1_25 GPSR3_25 \
+ GPSR1_24 GPSR3_24 GPSR4_24 \
+ GPSR1_23 GPSR3_23 GPSR4_23 \
+ GPSR1_22 GPSR3_22 GPSR4_22 \
+ GPSR1_21 GPSR3_21 GPSR4_21 \
+ GPSR1_20 GPSR3_20 GPSR4_20 GPSR5_20 GPSR6_20 GPSR7_20 \
+ GPSR1_19 GPSR2_19 GPSR3_19 GPSR4_19 GPSR5_19 GPSR6_19 GPSR7_19 \
+GPSR0_18 GPSR1_18 GPSR2_18 GPSR3_18 GPSR4_18 GPSR5_18 GPSR6_18 GPSR7_18 \
+GPSR0_17 GPSR1_17 GPSR2_17 GPSR3_17 GPSR4_17 GPSR5_17 GPSR6_17 GPSR7_17 \
+GPSR0_16 GPSR1_16 GPSR2_16 GPSR3_16 GPSR4_16 GPSR5_16 GPSR6_16 GPSR7_16 \
+GPSR0_15 GPSR1_15 GPSR2_15 GPSR3_15 GPSR4_15 GPSR5_15 GPSR6_15 GPSR7_15 \
+GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR4_14 GPSR5_14 GPSR6_14 GPSR7_14 \
+GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR4_13 GPSR5_13 GPSR6_13 GPSR7_13 GPSR8_13 \
+GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR4_12 GPSR5_12 GPSR6_12 GPSR7_12 GPSR8_12 \
+GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR4_11 GPSR5_11 GPSR6_11 GPSR7_11 GPSR8_11 \
+GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 GPSR7_10 GPSR8_10 \
+GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 GPSR7_9 GPSR8_9 \
+GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 GPSR7_8 GPSR8_8 \
+GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 GPSR7_7 GPSR8_7 \
+GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 GPSR7_6 GPSR8_6 \
+GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 GPSR7_5 GPSR8_5 \
+GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 GPSR7_4 GPSR8_4 \
+GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 GPSR7_3 GPSR8_3 \
+GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 GPSR7_2 GPSR8_2 \
+GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 GPSR7_1 GPSR8_1 \
+GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0 GPSR7_0 GPSR8_0
+
+#define PINMUX_IPSR \
+\
+FM(IP0SR0_3_0) IP0SR0_3_0 FM(IP1SR0_3_0) IP1SR0_3_0 FM(IP2SR0_3_0) IP2SR0_3_0 \
+FM(IP0SR0_7_4) IP0SR0_7_4 FM(IP1SR0_7_4) IP1SR0_7_4 FM(IP2SR0_7_4) IP2SR0_7_4 \
+FM(IP0SR0_11_8) IP0SR0_11_8 FM(IP1SR0_11_8) IP1SR0_11_8 FM(IP2SR0_11_8) IP2SR0_11_8 \
+FM(IP0SR0_15_12) IP0SR0_15_12 FM(IP1SR0_15_12) IP1SR0_15_12 \
+FM(IP0SR0_19_16) IP0SR0_19_16 FM(IP1SR0_19_16) IP1SR0_19_16 \
+FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 \
+FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 \
+FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 \
+\
+FM(IP0SR1_3_0) IP0SR1_3_0 FM(IP1SR1_3_0) IP1SR1_3_0 FM(IP2SR1_3_0) IP2SR1_3_0 FM(IP3SR1_3_0) IP3SR1_3_0 \
+FM(IP0SR1_7_4) IP0SR1_7_4 FM(IP1SR1_7_4) IP1SR1_7_4 FM(IP2SR1_7_4) IP2SR1_7_4 FM(IP3SR1_7_4) IP3SR1_7_4 \
+FM(IP0SR1_11_8) IP0SR1_11_8 FM(IP1SR1_11_8) IP1SR1_11_8 FM(IP2SR1_11_8) IP2SR1_11_8 FM(IP3SR1_11_8) IP3SR1_11_8 \
+FM(IP0SR1_15_12) IP0SR1_15_12 FM(IP1SR1_15_12) IP1SR1_15_12 FM(IP2SR1_15_12) IP2SR1_15_12 FM(IP3SR1_15_12) IP3SR1_15_12 \
+FM(IP0SR1_19_16) IP0SR1_19_16 FM(IP1SR1_19_16) IP1SR1_19_16 FM(IP2SR1_19_16) IP2SR1_19_16 FM(IP3SR1_19_16) IP3SR1_19_16 \
+FM(IP0SR1_23_20) IP0SR1_23_20 FM(IP1SR1_23_20) IP1SR1_23_20 FM(IP2SR1_23_20) IP2SR1_23_20 \
+FM(IP0SR1_27_24) IP0SR1_27_24 FM(IP1SR1_27_24) IP1SR1_27_24 FM(IP2SR1_27_24) IP2SR1_27_24 \
+FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 \
+\
+FM(IP0SR2_3_0) IP0SR2_3_0 FM(IP1SR2_3_0) IP1SR2_3_0 FM(IP2SR2_3_0) IP2SR2_3_0 \
+FM(IP0SR2_7_4) IP0SR2_7_4 FM(IP1SR2_7_4) IP1SR2_7_4 FM(IP2SR2_7_4) IP2SR2_7_4 \
+FM(IP0SR2_11_8) IP0SR2_11_8 FM(IP1SR2_11_8) IP1SR2_11_8 FM(IP2SR2_11_8) IP2SR2_11_8 \
+FM(IP0SR2_15_12) IP0SR2_15_12 FM(IP1SR2_15_12) IP1SR2_15_12 FM(IP2SR2_15_12) IP2SR2_15_12 \
+FM(IP0SR2_19_16) IP0SR2_19_16 FM(IP1SR2_19_16) IP1SR2_19_16 \
+FM(IP0SR2_23_20) IP0SR2_23_20 FM(IP1SR2_23_20) IP1SR2_23_20 \
+FM(IP0SR2_27_24) IP0SR2_27_24 FM(IP1SR2_27_24) IP1SR2_27_24 \
+FM(IP0SR2_31_28) IP0SR2_31_28 FM(IP1SR2_31_28) IP1SR2_31_28 \
+\
+FM(IP0SR3_3_0) IP0SR3_3_0 FM(IP1SR3_3_0) IP1SR3_3_0 FM(IP2SR3_3_0) IP2SR3_3_0 FM(IP3SR3_3_0) IP3SR3_3_0 \
+FM(IP0SR3_7_4) IP0SR3_7_4 FM(IP1SR3_7_4) IP1SR3_7_4 FM(IP2SR3_7_4) IP2SR3_7_4 FM(IP3SR3_7_4) IP3SR3_7_4 \
+FM(IP0SR3_11_8) IP0SR3_11_8 FM(IP1SR3_11_8) IP1SR3_11_8 FM(IP2SR3_11_8) IP2SR3_11_8 FM(IP3SR3_11_8) IP3SR3_11_8 \
+FM(IP0SR3_15_12) IP0SR3_15_12 FM(IP1SR3_15_12) IP1SR3_15_12 FM(IP2SR3_15_12) IP2SR3_15_12 FM(IP3SR3_15_12) IP3SR3_15_12 \
+FM(IP0SR3_19_16) IP0SR3_19_16 FM(IP1SR3_19_16) IP1SR3_19_16 FM(IP2SR3_19_16) IP2SR3_19_16 FM(IP3SR3_19_16) IP3SR3_19_16 \
+FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 FM(IP2SR3_23_20) IP2SR3_23_20 FM(IP3SR3_23_20) IP3SR3_23_20 \
+FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 FM(IP2SR3_27_24) IP2SR3_27_24 \
+FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 FM(IP2SR3_31_28) IP2SR3_31_28 \
+\
+FM(IP0SR6_3_0) IP0SR6_3_0 FM(IP1SR6_3_0) IP1SR6_3_0 FM(IP2SR6_3_0) IP2SR6_3_0 \
+FM(IP0SR6_7_4) IP0SR6_7_4 FM(IP1SR6_7_4) IP1SR6_7_4 FM(IP2SR6_7_4) IP2SR6_7_4 \
+FM(IP0SR6_11_8) IP0SR6_11_8 FM(IP1SR6_11_8) IP1SR6_11_8 FM(IP2SR6_11_8) IP2SR6_11_8 \
+FM(IP0SR6_15_12) IP0SR6_15_12 FM(IP1SR6_15_12) IP1SR6_15_12 FM(IP2SR6_15_12) IP2SR6_15_12 \
+FM(IP0SR6_19_16) IP0SR6_19_16 FM(IP1SR6_19_16) IP1SR6_19_16 FM(IP2SR6_19_16) IP2SR6_19_16 \
+FM(IP0SR6_23_20) IP0SR6_23_20 FM(IP1SR6_23_20) IP1SR6_23_20 \
+FM(IP0SR6_27_24) IP0SR6_27_24 FM(IP1SR6_27_24) IP1SR6_27_24 \
+FM(IP0SR6_31_28) IP0SR6_31_28 FM(IP1SR6_31_28) IP1SR6_31_28 \
+\
+FM(IP0SR7_3_0) IP0SR7_3_0 FM(IP1SR7_3_0) IP1SR7_3_0 FM(IP2SR7_3_0) IP2SR7_3_0 \
+FM(IP0SR7_7_4) IP0SR7_7_4 FM(IP1SR7_7_4) IP1SR7_7_4 FM(IP2SR7_7_4) IP2SR7_7_4 \
+FM(IP0SR7_11_8) IP0SR7_11_8 FM(IP1SR7_11_8) IP1SR7_11_8 FM(IP2SR7_11_8) IP2SR7_11_8 \
+FM(IP0SR7_15_12) IP0SR7_15_12 FM(IP1SR7_15_12) IP1SR7_15_12 FM(IP2SR7_15_12) IP2SR7_15_12 \
+FM(IP0SR7_19_16) IP0SR7_19_16 FM(IP1SR7_19_16) IP1SR7_19_16 FM(IP2SR7_19_16) IP2SR7_19_16 \
+FM(IP0SR7_23_20) IP0SR7_23_20 FM(IP1SR7_23_20) IP1SR7_23_20 \
+FM(IP0SR7_27_24) IP0SR7_27_24 FM(IP1SR7_27_24) IP1SR7_27_24 \
+FM(IP0SR7_31_28) IP0SR7_31_28 FM(IP1SR7_31_28) IP1SR7_31_28 \
+\
+FM(IP0SR8_3_0) IP0SR8_3_0 FM(IP1SR8_3_0) IP1SR8_3_0 \
+FM(IP0SR8_7_4) IP0SR8_7_4 FM(IP1SR8_7_4) IP1SR8_7_4 \
+FM(IP0SR8_11_8) IP0SR8_11_8 FM(IP1SR8_11_8) IP1SR8_11_8 \
+FM(IP0SR8_15_12) IP0SR8_15_12 FM(IP1SR8_15_12) IP1SR8_15_12 \
+FM(IP0SR8_19_16) IP0SR8_19_16 FM(IP1SR8_19_16) IP1SR8_19_16 \
+FM(IP0SR8_23_20) IP0SR8_23_20 FM(IP1SR8_23_20) IP1SR8_23_20 \
+FM(IP0SR8_27_24) IP0SR8_27_24 \
+FM(IP0SR8_31_28) IP0SR8_31_28
+
+/* MOD_SEL4 */ /* 0 */ /* 1 */
+#define MOD_SEL4_19 FM(SEL_TSN0_TD2_0) FM(SEL_TSN0_TD2_1)
+#define MOD_SEL4_18 FM(SEL_TSN0_TD3_0) FM(SEL_TSN0_TD3_1)
+#define MOD_SEL4_15 FM(SEL_TSN0_TD0_0) FM(SEL_TSN0_TD0_1)
+#define MOD_SEL4_14 FM(SEL_TSN0_TD1_0) FM(SEL_TSN0_TD1_1)
+#define MOD_SEL4_12 FM(SEL_TSN0_TXC_0) FM(SEL_TSN0_TXC_1)
+#define MOD_SEL4_9 FM(SEL_TSN0_TX_CTL_0) FM(SEL_TSN0_TX_CTL_1)
+#define MOD_SEL4_8 FM(SEL_TSN0_AVTP_PPS0_0) FM(SEL_TSN0_AVTP_PPS0_1)
+#define MOD_SEL4_5 FM(SEL_TSN0_AVTP_MATCH_0) FM(SEL_TSN0_AVTP_MATCH_1)
+#define MOD_SEL4_2 FM(SEL_TSN0_AVTP_PPS1_0) FM(SEL_TSN0_AVTP_PPS1_1)
+#define MOD_SEL4_1 FM(SEL_TSN0_MDC_0) FM(SEL_TSN0_MDC_1)
+
+/* MOD_SEL5 */ /* 0 */ /* 1 */
+#define MOD_SEL5_19 FM(SEL_AVB2_TX_CTL_0) FM(SEL_AVB2_TX_CTL_1)
+#define MOD_SEL5_16 FM(SEL_AVB2_TXC_0) FM(SEL_AVB2_TXC_1)
+#define MOD_SEL5_15 FM(SEL_AVB2_TD0_0) FM(SEL_AVB2_TD0_1)
+#define MOD_SEL5_12 FM(SEL_AVB2_TD1_0) FM(SEL_AVB2_TD1_1)
+#define MOD_SEL5_11 FM(SEL_AVB2_TD2_0) FM(SEL_AVB2_TD2_1)
+#define MOD_SEL5_8 FM(SEL_AVB2_TD3_0) FM(SEL_AVB2_TD3_1)
+#define MOD_SEL5_6 FM(SEL_AVB2_MDC_0) FM(SEL_AVB2_MDC_1)
+#define MOD_SEL5_5 FM(SEL_AVB2_MAGIC_0) FM(SEL_AVB2_MAGIC_1)
+#define MOD_SEL5_2 FM(SEL_AVB2_AVTP_MATCH_0) FM(SEL_AVB2_AVTP_MATCH_1)
+#define MOD_SEL5_0 FM(SEL_AVB2_AVTP_PPS_0) FM(SEL_AVB2_AVTP_PPS_1)
+
+/* MOD_SEL6 */ /* 0 */ /* 1 */
+#define MOD_SEL6_18 FM(SEL_AVB1_TD3_0) FM(SEL_AVB1_TD3_1)
+#define MOD_SEL6_16 FM(SEL_AVB1_TD2_0) FM(SEL_AVB1_TD2_1)
+#define MOD_SEL6_13 FM(SEL_AVB1_TD0_0) FM(SEL_AVB1_TD0_1)
+#define MOD_SEL6_12 FM(SEL_AVB1_TD1_0) FM(SEL_AVB1_TD1_1)
+#define MOD_SEL6_10 FM(SEL_AVB1_AVTP_PPS_0) FM(SEL_AVB1_AVTP_PPS_1)
+#define MOD_SEL6_7 FM(SEL_AVB1_TX_CTL_0) FM(SEL_AVB1_TX_CTL_1)
+#define MOD_SEL6_6 FM(SEL_AVB1_TXC_0) FM(SEL_AVB1_TXC_1)
+#define MOD_SEL6_5 FM(SEL_AVB1_AVTP_MATCH_0) FM(SEL_AVB1_AVTP_MATCH_1)
+#define MOD_SEL6_2 FM(SEL_AVB1_MDC_0) FM(SEL_AVB1_MDC_1)
+#define MOD_SEL6_1 FM(SEL_AVB1_MAGIC_0) FM(SEL_AVB1_MAGIC_1)
+
+/* MOD_SEL7 */ /* 0 */ /* 1 */
+#define MOD_SEL7_16 FM(SEL_AVB0_TX_CTL_0) FM(SEL_AVB0_TX_CTL_1)
+#define MOD_SEL7_15 FM(SEL_AVB0_TXC_0) FM(SEL_AVB0_TXC_1)
+#define MOD_SEL7_13 FM(SEL_AVB0_MDC_0) FM(SEL_AVB0_MDC_1)
+#define MOD_SEL7_11 FM(SEL_AVB0_TD0_0) FM(SEL_AVB0_TD0_1)
+#define MOD_SEL7_10 FM(SEL_AVB0_MAGIC_0) FM(SEL_AVB0_MAGIC_1)
+#define MOD_SEL7_7 FM(SEL_AVB0_TD1_0) FM(SEL_AVB0_TD1_1)
+#define MOD_SEL7_6 FM(SEL_AVB0_TD2_0) FM(SEL_AVB0_TD2_1)
+#define MOD_SEL7_3 FM(SEL_AVB0_TD3_0) FM(SEL_AVB0_TD3_1)
+#define MOD_SEL7_2 FM(SEL_AVB0_AVTP_MATCH_0) FM(SEL_AVB0_AVTP_MATCH_1)
+#define MOD_SEL7_0 FM(SEL_AVB0_AVTP_PPS_0) FM(SEL_AVB0_AVTP_PPS_1)
+
+/* MOD_SEL8 */ /* 0 */ /* 1 */
+#define MOD_SEL8_11 FM(SEL_SDA5_0) FM(SEL_SDA5_1)
+#define MOD_SEL8_10 FM(SEL_SCL5_0) FM(SEL_SCL5_1)
+#define MOD_SEL8_9 FM(SEL_SDA4_0) FM(SEL_SDA4_1)
+#define MOD_SEL8_8 FM(SEL_SCL4_0) FM(SEL_SCL4_1)
+#define MOD_SEL8_7 FM(SEL_SDA3_0) FM(SEL_SDA3_1)
+#define MOD_SEL8_6 FM(SEL_SCL3_0) FM(SEL_SCL3_1)
+#define MOD_SEL8_5 FM(SEL_SDA2_0) FM(SEL_SDA2_1)
+#define MOD_SEL8_4 FM(SEL_SCL2_0) FM(SEL_SCL2_1)
+#define MOD_SEL8_3 FM(SEL_SDA1_0) FM(SEL_SDA1_1)
+#define MOD_SEL8_2 FM(SEL_SCL1_0) FM(SEL_SCL1_1)
+#define MOD_SEL8_1 FM(SEL_SDA0_0) FM(SEL_SDA0_1)
+#define MOD_SEL8_0 FM(SEL_SCL0_0) FM(SEL_SCL0_1)
+
+#define PINMUX_MOD_SELS \
+\
+MOD_SEL4_19 MOD_SEL5_19 \
+MOD_SEL4_18 MOD_SEL6_18 \
+ \
+ MOD_SEL5_16 MOD_SEL6_16 MOD_SEL7_16 \
+MOD_SEL4_15 MOD_SEL5_15 MOD_SEL7_15 \
+MOD_SEL4_14 \
+ MOD_SEL6_13 MOD_SEL7_13 \
+MOD_SEL4_12 MOD_SEL5_12 MOD_SEL6_12 \
+ MOD_SEL5_11 MOD_SEL7_11 MOD_SEL8_11 \
+ MOD_SEL6_10 MOD_SEL7_10 MOD_SEL8_10 \
+MOD_SEL4_9 MOD_SEL8_9 \
+MOD_SEL4_8 MOD_SEL5_8 MOD_SEL8_8 \
+ MOD_SEL6_7 MOD_SEL7_7 MOD_SEL8_7 \
+ MOD_SEL5_6 MOD_SEL6_6 MOD_SEL7_6 MOD_SEL8_6 \
+MOD_SEL4_5 MOD_SEL5_5 MOD_SEL6_5 MOD_SEL8_5 \
+ MOD_SEL8_4 \
+ MOD_SEL7_3 MOD_SEL8_3 \
+MOD_SEL4_2 MOD_SEL5_2 MOD_SEL6_2 MOD_SEL7_2 MOD_SEL8_2 \
+MOD_SEL4_1 MOD_SEL6_1 MOD_SEL8_1 \
+ MOD_SEL5_0 MOD_SEL7_0 MOD_SEL8_0
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x) FN_##x,
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x) x##_MARK,
+ PINMUX_MARK_BEGIN,
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(),
+
+ PINMUX_SINGLE(AVS1),
+ PINMUX_SINGLE(AVS0),
+ PINMUX_SINGLE(PCIE1_CLKREQ_N),
+ PINMUX_SINGLE(PCIE0_CLKREQ_N),
+
+ /* TSN0 without MODSEL4 */
+ PINMUX_SINGLE(TSN0_TXCREFCLK),
+ PINMUX_SINGLE(TSN0_RD2),
+ PINMUX_SINGLE(TSN0_RD3),
+ PINMUX_SINGLE(TSN0_RD1),
+ PINMUX_SINGLE(TSN0_RXC),
+ PINMUX_SINGLE(TSN0_RD0),
+ PINMUX_SINGLE(TSN0_RX_CTL),
+ PINMUX_SINGLE(TSN0_AVTP_CAPTURE),
+ PINMUX_SINGLE(TSN0_LINK),
+ PINMUX_SINGLE(TSN0_PHY_INT),
+ PINMUX_SINGLE(TSN0_MDIO),
+ /* TSN0 with MODSEL4 */
+ PINMUX_IPSR_NOGM(0, TSN0_TD2, SEL_TSN0_TD2_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD3, SEL_TSN0_TD3_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD0, SEL_TSN0_TD0_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TD1, SEL_TSN0_TD1_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TXC, SEL_TSN0_TXC_1),
+ PINMUX_IPSR_NOGM(0, TSN0_TX_CTL, SEL_TSN0_TX_CTL_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS0, SEL_TSN0_AVTP_PPS0_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_MATCH, SEL_TSN0_AVTP_MATCH_1),
+ PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS1, SEL_TSN0_AVTP_PPS1_1),
+ PINMUX_IPSR_NOGM(0, TSN0_MDC, SEL_TSN0_MDC_1),
+
+ /* TSN0 without MODSEL5 */
+ PINMUX_SINGLE(AVB2_RX_CTL),
+ PINMUX_SINGLE(AVB2_RXC),
+ PINMUX_SINGLE(AVB2_RD0),
+ PINMUX_SINGLE(AVB2_RD1),
+ PINMUX_SINGLE(AVB2_RD2),
+ PINMUX_SINGLE(AVB2_MDIO),
+ PINMUX_SINGLE(AVB2_RD3),
+ PINMUX_SINGLE(AVB2_TXCREFCLK),
+ PINMUX_SINGLE(AVB2_PHY_INT),
+ PINMUX_SINGLE(AVB2_LINK),
+ PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
+ /* TSN0 with MODSEL5 */
+ PINMUX_IPSR_NOGM(0, AVB2_TX_CTL, SEL_AVB2_TX_CTL_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TXC, SEL_AVB2_TXC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD0, SEL_AVB2_TD0_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD1, SEL_AVB2_TD1_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD2, SEL_AVB2_TD2_1),
+ PINMUX_IPSR_NOGM(0, AVB2_TD3, SEL_AVB2_TD3_1),
+ PINMUX_IPSR_NOGM(0, AVB2_MDC, SEL_AVB2_MDC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_MAGIC, SEL_AVB2_MAGIC_1),
+ PINMUX_IPSR_NOGM(0, AVB2_AVTP_MATCH, SEL_AVB2_AVTP_MATCH_1),
+ PINMUX_IPSR_NOGM(0, AVB2_AVTP_PPS, SEL_AVB2_AVTP_PPS_1),
+
+ /* IP0SR0 */
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_B),
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_A),
+
+ PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1),
+
+ PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2),
+
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3),
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK),
+
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2),
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1),
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC),
+
+ PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2),
+
+ /* IP1SR0 */
+ PINMUX_IPSR_GPSR(IP1SR0_3_0, MSIOF5_SS1),
+
+ PINMUX_IPSR_GPSR(IP1SR0_7_4, MSIOF5_SYNC),
+
+ PINMUX_IPSR_GPSR(IP1SR0_11_8, MSIOF5_TXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_15_12, MSIOF5_SCK),
+
+ PINMUX_IPSR_GPSR(IP1SR0_19_16, MSIOF5_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, MSIOF2_SS2),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_A),
+
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, MSIOF2_SS1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1),
+
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, MSIOF2_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1),
+
+ /* IP2SR0 */
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, MSIOF2_TXD),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N),
+
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, MSIOF2_SCK),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N),
+
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, MSIOF2_RXD),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1),
+
+ /* IP0SR1 */
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, MSIOF1_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, MSIOF1_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_A),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_A),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N),
+
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, MSIOF1_SCK),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N),
+
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, MSIOF1_TXD),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_A),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3),
+
+ PINMUX_IPSR_GPSR(IP0SR1_23_20, MSIOF1_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_X),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_X),
+
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_SS1),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_X),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_X),
+
+ /* IP1SR1 */
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CANFD5_TX_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, CANFD5_RX_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_X),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_X),
+
+ PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, HTX0),
+ PINMUX_IPSR_GPSR(IP1SR1_19_16, TX0),
+
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, CTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8_A),
+
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, RTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9_A),
+
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, HSCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, SCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0_A),
+
+ /* IP2SR1 */
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, HRX0),
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, RX0),
+
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, SCIF_CLK),
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, IRQ4_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, SSI_SCK),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3),
+
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, SSI_WS),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4),
+
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, SSI_SD),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, AUDIO_CLKIN),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, PWM3_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF4_SS1),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, IRQ3_B),
+
+ /* IP3SR1 */
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, SCK3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, MSIOF4_SS2),
+
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, CTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, MSIOF4_SCK),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_A),
+
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, RTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, MSIOF4_TXD),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_A),
+
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, RX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, MSIOF4_RXD),
+
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, TX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, MSIOF4_SYNC),
+
+ /* IP0SR2 */
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, FXR_TXDA),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, CANFD1_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, CANFD1_RX),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, RXDA_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, IRQ5),
+
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CLK_EXTFXR),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, IRQ4_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_19_16, RXDB_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N),
+
+ PINMUX_IPSR_GPSR(IP0SR2_27_24, FXR_TXDB),
+
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, CANFD6_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_B),
+
+ /* IP1SR2 */
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, CANFD6_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, CAN_CLK),
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_X),
+
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, CANFD0_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_X),
+
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, CANFD0_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, STPWT_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, CANFD2_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, CANFD2_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, PWM1_B),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_A),
+
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, CANFD3_TX),
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, CANFD3_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, PWM3_B),
+
+ /* IP2SR2 */
+ PINMUX_IPSR_GPSR(IP2SR2_3_0, CANFD4_TX),
+ PINMUX_IPSR_GPSR(IP2SR2_3_0, PWM4),
+
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, CANFD4_RX),
+ PINMUX_IPSR_GPSR(IP2SR2_7_4, PWM5),
+
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, CANFD7_TX),
+ PINMUX_IPSR_GPSR(IP2SR2_11_8, PWM6),
+
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, CANFD7_RX),
+ PINMUX_IPSR_GPSR(IP2SR2_15_12, PWM7),
+
+ /* IP0SR3 */
+ PINMUX_IPSR_GPSR(IP0SR3_3_0, MMC_SD_D1),
+ PINMUX_IPSR_GPSR(IP0SR3_7_4, MMC_SD_D0),
+ PINMUX_IPSR_GPSR(IP0SR3_11_8, MMC_SD_D2),
+ PINMUX_IPSR_GPSR(IP0SR3_15_12, MMC_SD_CLK),
+ PINMUX_IPSR_GPSR(IP0SR3_19_16, MMC_DS),
+ PINMUX_IPSR_GPSR(IP0SR3_23_20, MMC_SD_D3),
+ PINMUX_IPSR_GPSR(IP0SR3_27_24, MMC_D5),
+ PINMUX_IPSR_GPSR(IP0SR3_31_28, MMC_D4),
+
+ /* IP1SR3 */
+ PINMUX_IPSR_GPSR(IP1SR3_3_0, MMC_D7),
+
+ PINMUX_IPSR_GPSR(IP1SR3_7_4, MMC_D6),
+
+ PINMUX_IPSR_GPSR(IP1SR3_11_8, MMC_SD_CMD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_15_12, SD_CD),
+
+ PINMUX_IPSR_GPSR(IP1SR3_19_16, SD_WP),
+
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKIN),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKEN_IN),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, PWM1_A),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_X),
+
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKOUT),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKEN_OUT),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, ERROROUTC_A),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_X),
+
+ PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL),
+
+ /* IP2SR3 */
+ PINMUX_IPSR_GPSR(IP2SR3_3_0, QSPI0_IO3),
+ PINMUX_IPSR_GPSR(IP2SR3_7_4, QSPI0_IO2),
+ PINMUX_IPSR_GPSR(IP2SR3_11_8, QSPI0_MISO_IO1),
+ PINMUX_IPSR_GPSR(IP2SR3_15_12, QSPI0_MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP2SR3_19_16, QSPI0_SPCLK),
+ PINMUX_IPSR_GPSR(IP2SR3_23_20, QSPI1_MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP2SR3_27_24, QSPI1_SPCLK),
+ PINMUX_IPSR_GPSR(IP2SR3_31_28, QSPI1_MISO_IO1),
+
+ /* IP3SR3 */
+ PINMUX_IPSR_GPSR(IP3SR3_3_0, QSPI1_IO2),
+ PINMUX_IPSR_GPSR(IP3SR3_7_4, QSPI1_SSL),
+ PINMUX_IPSR_GPSR(IP3SR3_11_8, QSPI1_IO3),
+ PINMUX_IPSR_GPSR(IP3SR3_15_12, RPC_RESET_N),
+ PINMUX_IPSR_GPSR(IP3SR3_19_16, RPC_WP_N),
+ PINMUX_IPSR_GPSR(IP3SR3_23_20, RPC_INT_N),
+
+ /* IP0SR6 */
+ PINMUX_IPSR_GPSR(IP0SR6_3_0, AVB1_MDIO),
+
+ PINMUX_IPSR_MSEL(IP0SR6_7_4, AVB1_MAGIC, SEL_AVB1_MAGIC_1),
+
+ PINMUX_IPSR_MSEL(IP0SR6_11_8, AVB1_MDC, SEL_AVB1_MDC_1),
+
+ PINMUX_IPSR_GPSR(IP0SR6_15_12, AVB1_PHY_INT),
+
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_LINK),
+ PINMUX_IPSR_GPSR(IP0SR6_19_16, AVB1_MII_TX_ER),
+
+ PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_AVTP_MATCH, SEL_AVB1_AVTP_MATCH_1),
+ PINMUX_IPSR_MSEL(IP0SR6_23_20, AVB1_MII_RX_ER, SEL_AVB1_AVTP_MATCH_0),
+
+ PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_TXC, SEL_AVB1_TXC_1),
+ PINMUX_IPSR_MSEL(IP0SR6_27_24, AVB1_MII_TXC, SEL_AVB1_TXC_0),
+
+ PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_TX_CTL, SEL_AVB1_TX_CTL_1),
+ PINMUX_IPSR_MSEL(IP0SR6_31_28, AVB1_MII_TX_EN, SEL_AVB1_TX_CTL_0),
+
+ /* IP1SR6 */
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_RXC),
+ PINMUX_IPSR_GPSR(IP1SR6_3_0, AVB1_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_RX_CTL),
+ PINMUX_IPSR_GPSR(IP1SR6_7_4, AVB1_MII_RX_DV),
+
+ PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_AVTP_PPS, SEL_AVB1_AVTP_PPS_1),
+ PINMUX_IPSR_MSEL(IP1SR6_11_8, AVB1_MII_COL, SEL_AVB1_AVTP_PPS_0),
+
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP1SR6_15_12, AVB1_MII_CRS),
+
+ PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_TD1, SEL_AVB1_TD1_1),
+ PINMUX_IPSR_MSEL(IP1SR6_19_16, AVB1_MII_TD1, SEL_AVB1_TD1_0),
+
+ PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_TD0, SEL_AVB1_TD0_1),
+ PINMUX_IPSR_MSEL(IP1SR6_23_20, AVB1_MII_TD0, SEL_AVB1_TD0_0),
+
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_RD1),
+ PINMUX_IPSR_GPSR(IP1SR6_27_24, AVB1_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_RD0),
+ PINMUX_IPSR_GPSR(IP1SR6_31_28, AVB1_MII_RD0),
+
+ /* IP2SR6 */
+ PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_TD2, SEL_AVB1_TD2_1),
+ PINMUX_IPSR_MSEL(IP2SR6_3_0, AVB1_MII_TD2, SEL_AVB1_TD2_0),
+
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_RD2),
+ PINMUX_IPSR_GPSR(IP2SR6_7_4, AVB1_MII_RD2),
+
+ PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_TD3, SEL_AVB1_TD3_1),
+ PINMUX_IPSR_MSEL(IP2SR6_11_8, AVB1_MII_TD3, SEL_AVB1_TD3_0),
+
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_RD3),
+ PINMUX_IPSR_GPSR(IP2SR6_15_12, AVB1_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP2SR6_19_16, AVB1_TXCREFCLK),
+
+ /* IP0SR7 */
+ PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_AVTP_PPS, SEL_AVB0_AVTP_PPS_1),
+ PINMUX_IPSR_MSEL(IP0SR7_3_0, AVB0_MII_COL, SEL_AVB0_AVTP_PPS_0),
+
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_AVTP_CAPTURE),
+ PINMUX_IPSR_GPSR(IP0SR7_7_4, AVB0_MII_CRS),
+
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_AVTP_MATCH, SEL_AVB0_AVTP_MATCH_1),
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, AVB0_MII_RX_ER, SEL_AVB0_AVTP_MATCH_0),
+ PINMUX_IPSR_MSEL(IP0SR7_11_8, CC5_OSCOUT, SEL_AVB0_AVTP_MATCH_0),
+
+ PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_TD3, SEL_AVB0_TD3_1),
+ PINMUX_IPSR_MSEL(IP0SR7_15_12, AVB0_MII_TD3, SEL_AVB0_TD3_0),
+
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_LINK),
+ PINMUX_IPSR_GPSR(IP0SR7_19_16, AVB0_MII_TX_ER),
+
+ PINMUX_IPSR_GPSR(IP0SR7_23_20, AVB0_PHY_INT),
+
+ PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_TD2, SEL_AVB0_TD2_1),
+ PINMUX_IPSR_MSEL(IP0SR7_27_24, AVB0_MII_TD2, SEL_AVB0_TD2_0),
+
+ PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_TD1, SEL_AVB0_TD1_1),
+ PINMUX_IPSR_MSEL(IP0SR7_31_28, AVB0_MII_TD1, SEL_AVB0_TD1_0),
+
+ /* IP1SR7 */
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_RD3),
+ PINMUX_IPSR_GPSR(IP1SR7_3_0, AVB0_MII_RD3),
+
+ PINMUX_IPSR_GPSR(IP1SR7_7_4, AVB0_TXCREFCLK),
+
+ PINMUX_IPSR_MSEL(IP1SR7_11_8, AVB0_MAGIC, SEL_AVB0_MAGIC_1),
+
+ PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_TD0, SEL_AVB0_TD0_1),
+ PINMUX_IPSR_MSEL(IP1SR7_15_12, AVB0_MII_TD0, SEL_AVB0_TD0_0),
+
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_RD2),
+ PINMUX_IPSR_GPSR(IP1SR7_19_16, AVB0_MII_RD2),
+
+ PINMUX_IPSR_MSEL(IP1SR7_23_20, AVB0_MDC, SEL_AVB0_MDC_1),
+
+ PINMUX_IPSR_GPSR(IP1SR7_27_24, AVB0_MDIO),
+
+ PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_TXC, SEL_AVB0_TXC_1),
+ PINMUX_IPSR_MSEL(IP1SR7_31_28, AVB0_MII_TXC, SEL_AVB0_TXC_0),
+
+ /* IP2SR7 */
+ PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_TX_CTL, SEL_AVB0_TX_CTL_1),
+ PINMUX_IPSR_MSEL(IP2SR7_3_0, AVB0_MII_TX_EN, SEL_AVB0_TX_CTL_0),
+
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_RD1),
+ PINMUX_IPSR_GPSR(IP2SR7_7_4, AVB0_MII_RD1),
+
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_RD0),
+ PINMUX_IPSR_GPSR(IP2SR7_11_8, AVB0_MII_RD0),
+
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_RXC),
+ PINMUX_IPSR_GPSR(IP2SR7_15_12, AVB0_MII_RXC),
+
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_RX_CTL),
+ PINMUX_IPSR_GPSR(IP2SR7_19_16, AVB0_MII_RX_DV),
+
+ /* IP0SR8 */
+ PINMUX_IPSR_MSEL(IP0SR8_3_0, SCL0, SEL_SCL0_0),
+ PINMUX_IPSR_MSEL(IP0SR8_7_4, SDA0, SEL_SDA0_0),
+ PINMUX_IPSR_MSEL(IP0SR8_11_8, SCL1, SEL_SCL1_0),
+ PINMUX_IPSR_MSEL(IP0SR8_15_12, SDA1, SEL_SDA1_0),
+ PINMUX_IPSR_MSEL(IP0SR8_19_16, SCL2, SEL_SCL2_0),
+ PINMUX_IPSR_MSEL(IP0SR8_23_20, SDA2, SEL_SDA2_0),
+ PINMUX_IPSR_MSEL(IP0SR8_27_24, SCL3, SEL_SCL3_0),
+ PINMUX_IPSR_MSEL(IP0SR8_31_28, SDA3, SEL_SDA3_0),
+
+ /* IP1SR8 */
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, SCL4, SEL_SCL4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, HRX2, SEL_SCL4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_3_0, SCK4, SEL_SCL4_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, SDA4, SEL_SDA4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, HTX2, SEL_SDA4_0),
+ PINMUX_IPSR_MSEL(IP1SR8_7_4, CTS4_N, SEL_SDA4_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, SCL5, SEL_SCL5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, HRTS2_N, SEL_SCL5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_11_8, RTS4_N, SEL_SCL5_0),
+
+ PINMUX_IPSR_MSEL(IP1SR8_15_12, SDA5, SEL_SDA5_0),
+ PINMUX_IPSR_MSEL(IP1SR8_15_12, SCIF_CLK2, SEL_SDA5_0),
+
+ PINMUX_IPSR_GPSR(IP1SR8_19_16, HCTS2_N),
+ PINMUX_IPSR_GPSR(IP1SR8_19_16, TX4),
+
+ PINMUX_IPSR_GPSR(IP1SR8_23_20, HSCK2),
+ PINMUX_IPSR_GPSR(IP1SR8_23_20, RX4),
+};
+
+/*
+ * Pins not associated with a GPIO port.
+ */
+enum {
+ GP_ASSIGN_LAST(),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+};
+
+/* - AVB0 ------------------------------------------------ */
+static const unsigned int avb0_link_pins[] = {
+ /* AVB0_LINK */
+ RCAR_GP_PIN(7, 4),
+};
+static const unsigned int avb0_link_mux[] = {
+ AVB0_LINK_MARK,
+};
+static const unsigned int avb0_magic_pins[] = {
+ /* AVB0_MAGIC */
+ RCAR_GP_PIN(7, 10),
+};
+static const unsigned int avb0_magic_mux[] = {
+ AVB0_MAGIC_MARK,
+};
+static const unsigned int avb0_phy_int_pins[] = {
+ /* AVB0_PHY_INT */
+ RCAR_GP_PIN(7, 5),
+};
+static const unsigned int avb0_phy_int_mux[] = {
+ AVB0_PHY_INT_MARK,
+};
+static const unsigned int avb0_mdio_pins[] = {
+ /* AVB0_MDC, AVB0_MDIO */
+ RCAR_GP_PIN(7, 13), RCAR_GP_PIN(7, 14),
+};
+static const unsigned int avb0_mdio_mux[] = {
+ AVB0_MDC_MARK, AVB0_MDIO_MARK,
+};
+static const unsigned int avb0_rgmii_pins[] = {
+ /*
+ * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0, AVB0_TD1, AVB0_TD2, AVB0_TD3,
+ * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0, AVB0_RD1, AVB0_RD2, AVB0_RD3,
+ */
+ RCAR_GP_PIN(7, 16), RCAR_GP_PIN(7, 15),
+ RCAR_GP_PIN(7, 11), RCAR_GP_PIN(7, 7),
+ RCAR_GP_PIN(7, 6), RCAR_GP_PIN(7, 3),
+ RCAR_GP_PIN(7, 20), RCAR_GP_PIN(7, 19),
+ RCAR_GP_PIN(7, 18), RCAR_GP_PIN(7, 17),
+ RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 8),
+};
+static const unsigned int avb0_rgmii_mux[] = {
+ AVB0_TX_CTL_MARK, AVB0_TXC_MARK,
+ AVB0_TD0_MARK, AVB0_TD1_MARK,
+ AVB0_TD2_MARK, AVB0_TD3_MARK,
+ AVB0_RX_CTL_MARK, AVB0_RXC_MARK,
+ AVB0_RD0_MARK, AVB0_RD1_MARK,
+ AVB0_RD2_MARK, AVB0_RD3_MARK,
+};
+static const unsigned int avb0_txcrefclk_pins[] = {
+ /* AVB0_TXCREFCLK */
+ RCAR_GP_PIN(7, 9),
+};
+static const unsigned int avb0_txcrefclk_mux[] = {
+ AVB0_TXCREFCLK_MARK,
+};
+static const unsigned int avb0_avtp_pps_pins[] = {
+ /* AVB0_AVTP_PPS */
+ RCAR_GP_PIN(7, 0),
+};
+static const unsigned int avb0_avtp_pps_mux[] = {
+ AVB0_AVTP_PPS_MARK,
+};
+static const unsigned int avb0_avtp_capture_pins[] = {
+ /* AVB0_AVTP_CAPTURE */
+ RCAR_GP_PIN(7, 1),
+};
+static const unsigned int avb0_avtp_capture_mux[] = {
+ AVB0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb0_avtp_match_pins[] = {
+ /* AVB0_AVTP_MATCH */
+ RCAR_GP_PIN(7, 2),
+};
+static const unsigned int avb0_avtp_match_mux[] = {
+ AVB0_AVTP_MATCH_MARK,
+};
+
+/* - AVB1 ------------------------------------------------ */
+static const unsigned int avb1_link_pins[] = {
+ /* AVB1_LINK */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int avb1_link_mux[] = {
+ AVB1_LINK_MARK,
+};
+static const unsigned int avb1_magic_pins[] = {
+ /* AVB1_MAGIC */
+ RCAR_GP_PIN(6, 1),
+};
+static const unsigned int avb1_magic_mux[] = {
+ AVB1_MAGIC_MARK,
+};
+static const unsigned int avb1_phy_int_pins[] = {
+ /* AVB1_PHY_INT */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int avb1_phy_int_mux[] = {
+ AVB1_PHY_INT_MARK,
+};
+static const unsigned int avb1_mdio_pins[] = {
+ /* AVB1_MDC, AVB1_MDIO */
+ RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 0),
+};
+static const unsigned int avb1_mdio_mux[] = {
+ AVB1_MDC_MARK, AVB1_MDIO_MARK,
+};
+static const unsigned int avb1_rgmii_pins[] = {
+ /*
+ * AVB1_TX_CTL, AVB1_TXC, AVB1_TD0, AVB1_TD1, AVB1_TD2, AVB1_TD3,
+ * AVB1_RX_CTL, AVB1_RXC, AVB1_RD0, AVB1_RD1, AVB1_RD2, AVB1_RD3,
+ */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+ RCAR_GP_PIN(6, 13), RCAR_GP_PIN(6, 12),
+ RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 18),
+ RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 8),
+ RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 14),
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int avb1_rgmii_mux[] = {
+ AVB1_TX_CTL_MARK, AVB1_TXC_MARK,
+ AVB1_TD0_MARK, AVB1_TD1_MARK,
+ AVB1_TD2_MARK, AVB1_TD3_MARK,
+ AVB1_RX_CTL_MARK, AVB1_RXC_MARK,
+ AVB1_RD0_MARK, AVB1_RD1_MARK,
+ AVB1_RD2_MARK, AVB1_RD3_MARK,
+};
+static const unsigned int avb1_txcrefclk_pins[] = {
+ /* AVB1_TXCREFCLK */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int avb1_txcrefclk_mux[] = {
+ AVB1_TXCREFCLK_MARK,
+};
+static const unsigned int avb1_avtp_pps_pins[] = {
+ /* AVB1_AVTP_PPS */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int avb1_avtp_pps_mux[] = {
+ AVB1_AVTP_PPS_MARK,
+};
+static const unsigned int avb1_avtp_capture_pins[] = {
+ /* AVB1_AVTP_CAPTURE */
+ RCAR_GP_PIN(6, 11),
+};
+static const unsigned int avb1_avtp_capture_mux[] = {
+ AVB1_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb1_avtp_match_pins[] = {
+ /* AVB1_AVTP_MATCH */
+ RCAR_GP_PIN(6, 5),
+};
+static const unsigned int avb1_avtp_match_mux[] = {
+ AVB1_AVTP_MATCH_MARK,
+};
+
+/* - AVB2 ------------------------------------------------ */
+static const unsigned int avb2_link_pins[] = {
+ /* AVB2_LINK */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int avb2_link_mux[] = {
+ AVB2_LINK_MARK,
+};
+static const unsigned int avb2_magic_pins[] = {
+ /* AVB2_MAGIC */
+ RCAR_GP_PIN(5, 5),
+};
+static const unsigned int avb2_magic_mux[] = {
+ AVB2_MAGIC_MARK,
+};
+static const unsigned int avb2_phy_int_pins[] = {
+ /* AVB2_PHY_INT */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int avb2_phy_int_mux[] = {
+ AVB2_PHY_INT_MARK,
+};
+static const unsigned int avb2_mdio_pins[] = {
+ /* AVB2_MDC, AVB2_MDIO */
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int avb2_mdio_mux[] = {
+ AVB2_MDC_MARK, AVB2_MDIO_MARK,
+};
+static const unsigned int avb2_rgmii_pins[] = {
+ /*
+ * AVB2_TX_CTL, AVB2_TXC, AVB2_TD0, AVB2_TD1, AVB2_TD2, AVB2_TD3,
+ * AVB2_RX_CTL, AVB2_RXC, AVB2_RD0, AVB2_RD1, AVB2_RD2, AVB2_RD3,
+ */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 16),
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 12),
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 8),
+ RCAR_GP_PIN(5, 20), RCAR_GP_PIN(5, 18),
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 14),
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 9),
+};
+static const unsigned int avb2_rgmii_mux[] = {
+ AVB2_TX_CTL_MARK, AVB2_TXC_MARK,
+ AVB2_TD0_MARK, AVB2_TD1_MARK,
+ AVB2_TD2_MARK, AVB2_TD3_MARK,
+ AVB2_RX_CTL_MARK, AVB2_RXC_MARK,
+ AVB2_RD0_MARK, AVB2_RD1_MARK,
+ AVB2_RD2_MARK, AVB2_RD3_MARK,
+};
+static const unsigned int avb2_txcrefclk_pins[] = {
+ /* AVB2_TXCREFCLK */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int avb2_txcrefclk_mux[] = {
+ AVB2_TXCREFCLK_MARK,
+};
+static const unsigned int avb2_avtp_pps_pins[] = {
+ /* AVB2_AVTP_PPS */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int avb2_avtp_pps_mux[] = {
+ AVB2_AVTP_PPS_MARK,
+};
+static const unsigned int avb2_avtp_capture_pins[] = {
+ /* AVB2_AVTP_CAPTURE */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int avb2_avtp_capture_mux[] = {
+ AVB2_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb2_avtp_match_pins[] = {
+ /* AVB2_AVTP_MATCH */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int avb2_avtp_match_mux[] = {
+ AVB2_AVTP_MATCH_MARK,
+};
+
+/* - CANFD0 ----------------------------------------------------------------- */
+static const unsigned int canfd0_data_pins[] = {
+ /* CANFD0_TX, CANFD0_RX */
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+};
+static const unsigned int canfd0_data_mux[] = {
+ CANFD0_TX_MARK, CANFD0_RX_MARK,
+};
+
+/* - CANFD1 ----------------------------------------------------------------- */
+static const unsigned int canfd1_data_pins[] = {
+ /* CANFD1_TX, CANFD1_RX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int canfd1_data_mux[] = {
+ CANFD1_TX_MARK, CANFD1_RX_MARK,
+};
+
+/* - CANFD2 ----------------------------------------------------------------- */
+static const unsigned int canfd2_data_pins[] = {
+ /* CANFD2_TX, CANFD2_RX */
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13),
+};
+static const unsigned int canfd2_data_mux[] = {
+ CANFD2_TX_MARK, CANFD2_RX_MARK,
+};
+
+/* - CANFD3 ----------------------------------------------------------------- */
+static const unsigned int canfd3_data_pins[] = {
+ /* CANFD3_TX, CANFD3_RX */
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
+};
+static const unsigned int canfd3_data_mux[] = {
+ CANFD3_TX_MARK, CANFD3_RX_MARK,
+};
+
+/* - CANFD4 ----------------------------------------------------------------- */
+static const unsigned int canfd4_data_pins[] = {
+ /* CANFD4_TX, CANFD4_RX */
+ RCAR_GP_PIN(2, 16), RCAR_GP_PIN(2, 17),
+};
+static const unsigned int canfd4_data_mux[] = {
+ CANFD4_TX_MARK, CANFD4_RX_MARK,
+};
+
+/* - CANFD5 ----------------------------------------------------------------- */
+static const unsigned int canfd5_data_pins[] = {
+ /* CANFD5_TX, CANFD5_RX */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+};
+static const unsigned int canfd5_data_mux[] = {
+ CANFD5_TX_MARK, CANFD5_RX_MARK,
+};
+
+/* - CANFD5_B ----------------------------------------------------------------- */
+static const unsigned int canfd5_data_b_pins[] = {
+ /* CANFD5_TX_B, CANFD5_RX_B */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int canfd5_data_b_mux[] = {
+ CANFD5_TX_B_MARK, CANFD5_RX_B_MARK,
+};
+
+/* - CANFD6 ----------------------------------------------------------------- */
+static const unsigned int canfd6_data_pins[] = {
+ /* CANFD6_TX, CANFD6_RX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int canfd6_data_mux[] = {
+ CANFD6_TX_MARK, CANFD6_RX_MARK,
+};
+
+/* - CANFD7 ----------------------------------------------------------------- */
+static const unsigned int canfd7_data_pins[] = {
+ /* CANFD7_TX, CANFD7_RX */
+ RCAR_GP_PIN(2, 18), RCAR_GP_PIN(2, 19),
+};
+static const unsigned int canfd7_data_mux[] = {
+ CANFD7_TX_MARK, CANFD7_RX_MARK,
+};
+
+/* - CANFD Clock ------------------------------------------------------------ */
+static const unsigned int can_clk_pins[] = {
+ /* CAN_CLK */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* HRX0, HTX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* HSCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* HRTS0_N, HCTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_pins[] = {
+ /* HRX1, HTX1 */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int hscif1_data_mux[] = {
+ HRX1_MARK, HTX1_MARK,
+};
+static const unsigned int hscif1_clk_pins[] = {
+ /* HSCK1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int hscif1_clk_mux[] = {
+ HSCK1_MARK,
+};
+static const unsigned int hscif1_ctrl_pins[] = {
+ /* HRTS1_N, HCTS1_N */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int hscif1_ctrl_mux[] = {
+ HRTS1_N_MARK, HCTS1_N_MARK,
+};
+
+/* - HSCIF1_X---------------------------------------------------------------- */
+static const unsigned int hscif1_data_x_pins[] = {
+ /* HRX1_X, HTX1_X */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int hscif1_data_x_mux[] = {
+ HRX1_X_MARK, HTX1_X_MARK,
+};
+static const unsigned int hscif1_clk_x_pins[] = {
+ /* HSCK1_X */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int hscif1_clk_x_mux[] = {
+ HSCK1_X_MARK,
+};
+static const unsigned int hscif1_ctrl_x_pins[] = {
+ /* HRTS1_N_X, HCTS1_N_X */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int hscif1_ctrl_x_mux[] = {
+ HRTS1_N_X_MARK, HCTS1_N_X_MARK,
+};
+
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_pins[] = {
+ /* HRX2, HTX2 */
+ RCAR_GP_PIN(8, 8), RCAR_GP_PIN(8, 9),
+};
+static const unsigned int hscif2_data_mux[] = {
+ HRX2_MARK, HTX2_MARK,
+};
+static const unsigned int hscif2_clk_pins[] = {
+ /* HSCK2 */
+ RCAR_GP_PIN(8, 13),
+};
+static const unsigned int hscif2_clk_mux[] = {
+ HSCK2_MARK,
+};
+static const unsigned int hscif2_ctrl_pins[] = {
+ /* HRTS2_N, HCTS2_N */
+ RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 12),
+};
+static const unsigned int hscif2_ctrl_mux[] = {
+ HRTS2_N_MARK, HCTS2_N_MARK,
+};
+
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_pins[] = {
+ /* HRX3, HTX3 */
+ RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int hscif3_data_mux[] = {
+ HRX3_MARK, HTX3_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+ /* HSCK3 */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* HRTS3_N, HCTS3_N */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+/* - HSCIF3_A ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* HRX3_A, HTX3_A */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_a_pins[] = {
+ /* HSCK3_A */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int hscif3_clk_a_mux[] = {
+ HSCK3_A_MARK,
+};
+static const unsigned int hscif3_ctrl_a_pins[] = {
+ /* HRTS3_N_A, HCTS3_N_A */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
+};
+static const unsigned int hscif3_ctrl_a_mux[] = {
+ HRTS3_N_A_MARK, HCTS3_N_A_MARK,
+};
+
+/* - I2C0 ------------------------------------------------------------------- */
+static const unsigned int i2c0_pins[] = {
+ /* SDA0, SCL0 */
+ RCAR_GP_PIN(8, 1), RCAR_GP_PIN(8, 0),
+};
+static const unsigned int i2c0_mux[] = {
+ SDA0_MARK, SCL0_MARK,
+};
+
+/* - I2C1 ------------------------------------------------------------------- */
+static const unsigned int i2c1_pins[] = {
+ /* SDA1, SCL1 */
+ RCAR_GP_PIN(8, 3), RCAR_GP_PIN(8, 2),
+};
+static const unsigned int i2c1_mux[] = {
+ SDA1_MARK, SCL1_MARK,
+};
+
+/* - I2C2 ------------------------------------------------------------------- */
+static const unsigned int i2c2_pins[] = {
+ /* SDA2, SCL2 */
+ RCAR_GP_PIN(8, 5), RCAR_GP_PIN(8, 4),
+};
+static const unsigned int i2c2_mux[] = {
+ SDA2_MARK, SCL2_MARK,
+};
+
+/* - I2C3 ------------------------------------------------------------------- */
+static const unsigned int i2c3_pins[] = {
+ /* SDA3, SCL3 */
+ RCAR_GP_PIN(8, 7), RCAR_GP_PIN(8, 6),
+};
+static const unsigned int i2c3_mux[] = {
+ SDA3_MARK, SCL3_MARK,
+};
+
+/* - I2C4 ------------------------------------------------------------------- */
+static const unsigned int i2c4_pins[] = {
+ /* SDA4, SCL4 */
+ RCAR_GP_PIN(8, 9), RCAR_GP_PIN(8, 8),
+};
+static const unsigned int i2c4_mux[] = {
+ SDA4_MARK, SCL4_MARK,
+};
+
+/* - I2C5 ------------------------------------------------------------------- */
+static const unsigned int i2c5_pins[] = {
+ /* SDA5, SCL5 */
+ RCAR_GP_PIN(8, 11), RCAR_GP_PIN(8, 10),
+};
+static const unsigned int i2c5_mux[] = {
+ SDA5_MARK, SCL5_MARK,
+};
+
+/* - MMC -------------------------------------------------------------------- */
+static const unsigned int mmc_data_pins[] = {
+ /* MMC_SD_D[0:3], MMC_D[4:7] */
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 0),
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 5),
+ RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 6),
+ RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 8),
+};
+static const unsigned int mmc_data_mux[] = {
+ MMC_SD_D0_MARK, MMC_SD_D1_MARK,
+ MMC_SD_D2_MARK, MMC_SD_D3_MARK,
+ MMC_D4_MARK, MMC_D5_MARK,
+ MMC_D6_MARK, MMC_D7_MARK,
+};
+static const unsigned int mmc_ctrl_pins[] = {
+ /* MMC_SD_CLK, MMC_SD_CMD */
+ RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 10),
+};
+static const unsigned int mmc_ctrl_mux[] = {
+ MMC_SD_CLK_MARK, MMC_SD_CMD_MARK,
+};
+static const unsigned int mmc_cd_pins[] = {
+ /* SD_CD */
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int mmc_cd_mux[] = {
+ SD_CD_MARK,
+};
+static const unsigned int mmc_wp_pins[] = {
+ /* SD_WP */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int mmc_wp_mux[] = {
+ SD_WP_MARK,
+};
+static const unsigned int mmc_ds_pins[] = {
+ /* MMC_DS */
+ RCAR_GP_PIN(3, 4),
+};
+static const unsigned int mmc_ds_mux[] = {
+ MMC_DS_MARK,
+};
+
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* MSIOF0_SCK */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* MSIOF0_SYNC */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* MSIOF0_SS1 */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* MSIOF0_SS2 */
+ RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+ /* MSIOF0_TXD */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+ /* MSIOF0_RXD */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* MSIOF1_SCK */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+static const unsigned int msiof1_sync_pins[] = {
+ /* MSIOF1_SYNC */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+static const unsigned int msiof1_ss1_pins[] = {
+ /* MSIOF1_SS1 */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+static const unsigned int msiof1_ss2_pins[] = {
+ /* MSIOF1_SS2 */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+static const unsigned int msiof1_txd_pins[] = {
+ /* MSIOF1_TXD */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof1_txd_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+static const unsigned int msiof1_rxd_pins[] = {
+ /* MSIOF1_RXD */
+ RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof1_rxd_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_pins[] = {
+ /* MSIOF2_SCK */
+ RCAR_GP_PIN(0, 17),
+};
+static const unsigned int msiof2_clk_mux[] = {
+ MSIOF2_SCK_MARK,
+};
+static const unsigned int msiof2_sync_pins[] = {
+ /* MSIOF2_SYNC */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof2_sync_mux[] = {
+ MSIOF2_SYNC_MARK,
+};
+static const unsigned int msiof2_ss1_pins[] = {
+ /* MSIOF2_SS1 */
+ RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof2_ss1_mux[] = {
+ MSIOF2_SS1_MARK,
+};
+static const unsigned int msiof2_ss2_pins[] = {
+ /* MSIOF2_SS2 */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_mux[] = {
+ MSIOF2_SS2_MARK,
+};
+static const unsigned int msiof2_txd_pins[] = {
+ /* MSIOF2_TXD */
+ RCAR_GP_PIN(0, 16),
+};
+static const unsigned int msiof2_txd_mux[] = {
+ MSIOF2_TXD_MARK,
+};
+static const unsigned int msiof2_rxd_pins[] = {
+ /* MSIOF2_RXD */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof2_rxd_mux[] = {
+ MSIOF2_RXD_MARK,
+};
+
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_pins[] = {
+ /* MSIOF3_SCK */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_clk_mux[] = {
+ MSIOF3_SCK_MARK,
+};
+static const unsigned int msiof3_sync_pins[] = {
+ /* MSIOF3_SYNC */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof3_sync_mux[] = {
+ MSIOF3_SYNC_MARK,
+};
+static const unsigned int msiof3_ss1_pins[] = {
+ /* MSIOF3_SS1 */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_ss1_mux[] = {
+ MSIOF3_SS1_MARK,
+};
+static const unsigned int msiof3_ss2_pins[] = {
+ /* MSIOF3_SS2 */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_ss2_mux[] = {
+ MSIOF3_SS2_MARK,
+};
+static const unsigned int msiof3_txd_pins[] = {
+ /* MSIOF3_TXD */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof3_txd_mux[] = {
+ MSIOF3_TXD_MARK,
+};
+static const unsigned int msiof3_rxd_pins[] = {
+ /* MSIOF3_RXD */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof3_rxd_mux[] = {
+ MSIOF3_RXD_MARK,
+};
+
+/* - MSIOF4 ----------------------------------------------------------------- */
+static const unsigned int msiof4_clk_pins[] = {
+ /* MSIOF4_SCK */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof4_clk_mux[] = {
+ MSIOF4_SCK_MARK,
+};
+static const unsigned int msiof4_sync_pins[] = {
+ /* MSIOF4_SYNC */
+ RCAR_GP_PIN(1, 28),
+};
+static const unsigned int msiof4_sync_mux[] = {
+ MSIOF4_SYNC_MARK,
+};
+static const unsigned int msiof4_ss1_pins[] = {
+ /* MSIOF4_SS1 */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof4_ss1_mux[] = {
+ MSIOF4_SS1_MARK,
+};
+static const unsigned int msiof4_ss2_pins[] = {
+ /* MSIOF4_SS2 */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof4_ss2_mux[] = {
+ MSIOF4_SS2_MARK,
+};
+static const unsigned int msiof4_txd_pins[] = {
+ /* MSIOF4_TXD */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof4_txd_mux[] = {
+ MSIOF4_TXD_MARK,
+};
+static const unsigned int msiof4_rxd_pins[] = {
+ /* MSIOF4_RXD */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int msiof4_rxd_mux[] = {
+ MSIOF4_RXD_MARK,
+};
+
+/* - MSIOF5 ----------------------------------------------------------------- */
+static const unsigned int msiof5_clk_pins[] = {
+ /* MSIOF5_SCK */
+ RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof5_clk_mux[] = {
+ MSIOF5_SCK_MARK,
+};
+static const unsigned int msiof5_sync_pins[] = {
+ /* MSIOF5_SYNC */
+ RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof5_sync_mux[] = {
+ MSIOF5_SYNC_MARK,
+};
+static const unsigned int msiof5_ss1_pins[] = {
+ /* MSIOF5_SS1 */
+ RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof5_ss1_mux[] = {
+ MSIOF5_SS1_MARK,
+};
+static const unsigned int msiof5_ss2_pins[] = {
+ /* MSIOF5_SS2 */
+ RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof5_ss2_mux[] = {
+ MSIOF5_SS2_MARK,
+};
+static const unsigned int msiof5_txd_pins[] = {
+ /* MSIOF5_TXD */
+ RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof5_txd_mux[] = {
+ MSIOF5_TXD_MARK,
+};
+static const unsigned int msiof5_rxd_pins[] = {
+ /* MSIOF5_RXD */
+ RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof5_rxd_mux[] = {
+ MSIOF5_RXD_MARK,
+};
+
+/* - PCIE ------------------------------------------------------------------- */
+static const unsigned int pcie0_clkreq_n_pins[] = {
+ /* PCIE0_CLKREQ_N */
+ RCAR_GP_PIN(4, 21),
+};
+
+static const unsigned int pcie0_clkreq_n_mux[] = {
+ PCIE0_CLKREQ_N_MARK,
+};
+
+static const unsigned int pcie1_clkreq_n_pins[] = {
+ /* PCIE1_CLKREQ_N */
+ RCAR_GP_PIN(4, 22),
+};
+
+static const unsigned int pcie1_clkreq_n_mux[] = {
+ PCIE1_CLKREQ_N_MARK,
+};
+
+/* - PWM0_A ------------------------------------------------------------------- */
+static const unsigned int pwm0_a_pins[] = {
+ /* PWM0_A */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int pwm0_a_mux[] = {
+ PWM0_A_MARK,
+};
+
+/* - PWM1_A ------------------------------------------------------------------- */
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM1_A */
+ RCAR_GP_PIN(3, 13),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+
+/* - PWM1_B ------------------------------------------------------------------- */
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM1_B */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+
+/* - PWM2_B ------------------------------------------------------------------- */
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM2_B */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+
+/* - PWM3_A ------------------------------------------------------------------- */
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM3_A */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+
+/* - PWM3_B ------------------------------------------------------------------- */
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM3_B */
+ RCAR_GP_PIN(2, 15),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+
+/* - PWM4 ------------------------------------------------------------------- */
+static const unsigned int pwm4_pins[] = {
+ /* PWM4 */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int pwm4_mux[] = {
+ PWM4_MARK,
+};
+
+/* - PWM5 ------------------------------------------------------------------- */
+static const unsigned int pwm5_pins[] = {
+ /* PWM5 */
+ RCAR_GP_PIN(2, 17),
+};
+static const unsigned int pwm5_mux[] = {
+ PWM5_MARK,
+};
+
+/* - PWM6 ------------------------------------------------------------------- */
+static const unsigned int pwm6_pins[] = {
+ /* PWM6 */
+ RCAR_GP_PIN(2, 18),
+};
+static const unsigned int pwm6_mux[] = {
+ PWM6_MARK,
+};
+
+/* - PWM7 ------------------------------------------------------------------- */
+static const unsigned int pwm7_pins[] = {
+ /* PWM7 */
+ RCAR_GP_PIN(2, 19),
+};
+static const unsigned int pwm7_mux[] = {
+ PWM7_MARK,
+};
+
+/* - PWM8_A ------------------------------------------------------------------- */
+static const unsigned int pwm8_a_pins[] = {
+ /* PWM8_A */
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int pwm8_a_mux[] = {
+ PWM8_A_MARK,
+};
+
+/* - PWM9_A ------------------------------------------------------------------- */
+static const unsigned int pwm9_a_pins[] = {
+ /* PWM9_A */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int pwm9_a_mux[] = {
+ PWM9_A_MARK,
+};
+
+/* - QSPI0 ------------------------------------------------------------------ */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 15),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 18),
+ RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 16),
+};
+static const unsigned int qspi0_data_mux[] = {
+ QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK
+};
+
+/* - QSPI1 ------------------------------------------------------------------ */
+static const unsigned int qspi1_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 25),
+};
+static const unsigned int qspi1_ctrl_mux[] = {
+ QSPI1_SPCLK_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int qspi1_data_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 23),
+ RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 26),
+};
+static const unsigned int qspi1_data_mux[] = {
+ QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+ QSPI1_IO2_MARK, QSPI1_IO3_MARK
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX0, TX0 */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 12),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK0 */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS0_N, CTS0_N */
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_N_MARK, CTS0_N_MARK,
+};
+
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_pins[] = {
+ /* RX1, TX1 */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int scif1_data_mux[] = {
+ RX1_MARK, TX1_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK1 */
+ RCAR_GP_PIN(0, 18),
+};
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS1_N, CTS1_N */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_N_MARK, CTS1_N_MARK,
+};
+
+/* - SCIF1_X ------------------------------------------------------------------ */
+static const unsigned int scif1_data_x_pins[] = {
+ /* RX1_X, TX1_X */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int scif1_data_x_mux[] = {
+ RX1_X_MARK, TX1_X_MARK,
+};
+static const unsigned int scif1_clk_x_pins[] = {
+ /* SCK1_X */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int scif1_clk_x_mux[] = {
+ SCK1_X_MARK,
+};
+static const unsigned int scif1_ctrl_x_pins[] = {
+ /* RTS1_N_X, CTS1_N_X */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int scif1_ctrl_x_mux[] = {
+ RTS1_N_X_MARK, CTS1_N_X_MARK,
+};
+
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_pins[] = {
+ /* RX3, TX3 */
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int scif3_data_mux[] = {
+ RX3_MARK, TX3_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK3 */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCK3_MARK,
+};
+static const unsigned int scif3_ctrl_pins[] = {
+ /* RTS3_N, CTS3_N */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scif3_ctrl_mux[] = {
+ RTS3_N_MARK, CTS3_N_MARK,
+};
+
+/* - SCIF3_A ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX3_A, TX3_A */
+ RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28),
+};
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_a_pins[] = {
+ /* SCK3_A */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int scif3_clk_a_mux[] = {
+ SCK3_A_MARK,
+};
+static const unsigned int scif3_ctrl_a_pins[] = {
+ /* RTS3_N_A, CTS3_N_A */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int scif3_ctrl_a_mux[] = {
+ RTS3_N_A_MARK, CTS3_N_A_MARK,
+};
+
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_pins[] = {
+ /* RX4, TX4 */
+ RCAR_GP_PIN(8, 13), RCAR_GP_PIN(8, 12),
+};
+static const unsigned int scif4_data_mux[] = {
+ RX4_MARK, TX4_MARK,
+};
+static const unsigned int scif4_clk_pins[] = {
+ /* SCK4 */
+ RCAR_GP_PIN(8, 8),
+};
+static const unsigned int scif4_clk_mux[] = {
+ SCK4_MARK,
+};
+static const unsigned int scif4_ctrl_pins[] = {
+ /* RTS4_N, CTS4_N */
+ RCAR_GP_PIN(8, 10), RCAR_GP_PIN(8, 9),
+};
+static const unsigned int scif4_ctrl_mux[] = {
+ RTS4_N_MARK, CTS4_N_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int scif_clk_mux[] = {
+ SCIF_CLK_MARK,
+};
+
+/* - TPU ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_pins[] = {
+ /* TPU0TO0 */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int tpu_to0_mux[] = {
+ TPU0TO0_MARK,
+};
+static const unsigned int tpu_to1_pins[] = {
+ /* TPU0TO1 */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int tpu_to1_mux[] = {
+ TPU0TO1_MARK,
+};
+static const unsigned int tpu_to2_pins[] = {
+ /* TPU0TO2 */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int tpu_to2_mux[] = {
+ TPU0TO2_MARK,
+};
+static const unsigned int tpu_to3_pins[] = {
+ /* TPU0TO3 */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int tpu_to3_mux[] = {
+ TPU0TO3_MARK,
+};
+
+/* - TPU_A ------------------------------------------------------------------- */
+static const unsigned int tpu_to0_a_pins[] = {
+ /* TPU0TO0_A */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int tpu_to0_a_mux[] = {
+ TPU0TO0_A_MARK,
+};
+static const unsigned int tpu_to1_a_pins[] = {
+ /* TPU0TO1_A */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int tpu_to1_a_mux[] = {
+ TPU0TO1_A_MARK,
+};
+static const unsigned int tpu_to2_a_pins[] = {
+ /* TPU0TO2_A */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int tpu_to2_a_mux[] = {
+ TPU0TO2_A_MARK,
+};
+static const unsigned int tpu_to3_a_pins[] = {
+ /* TPU0TO3_A */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int tpu_to3_a_mux[] = {
+ TPU0TO3_A_MARK,
+};
+
+/* - TSN0 ------------------------------------------------ */
+static const unsigned int tsn0_link_pins[] = {
+ /* TSN0_LINK */
+ RCAR_GP_PIN(4, 4),
+};
+static const unsigned int tsn0_link_mux[] = {
+ TSN0_LINK_MARK,
+};
+static const unsigned int tsn0_phy_int_pins[] = {
+ /* TSN0_PHY_INT */
+ RCAR_GP_PIN(4, 3),
+};
+static const unsigned int tsn0_phy_int_mux[] = {
+ TSN0_PHY_INT_MARK,
+};
+static const unsigned int tsn0_mdio_pins[] = {
+ /* TSN0_MDC, TSN0_MDIO */
+ RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 0),
+};
+static const unsigned int tsn0_mdio_mux[] = {
+ TSN0_MDC_MARK, TSN0_MDIO_MARK,
+};
+static const unsigned int tsn0_rgmii_pins[] = {
+ /*
+ * TSN0_TX_CTL, TSN0_TXC, TSN0_TD0, TSN0_TD1, TSN0_TD2, TSN0_TD3,
+ * TSN0_RX_CTL, TSN0_RXC, TSN0_RD0, TSN0_RD1, TSN0_RD2, TSN0_RD3,
+ */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 12),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 18),
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 11),
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 13),
+ RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int tsn0_rgmii_mux[] = {
+ TSN0_TX_CTL_MARK, TSN0_TXC_MARK,
+ TSN0_TD0_MARK, TSN0_TD1_MARK,
+ TSN0_TD2_MARK, TSN0_TD3_MARK,
+ TSN0_RX_CTL_MARK, TSN0_RXC_MARK,
+ TSN0_RD0_MARK, TSN0_RD1_MARK,
+ TSN0_RD2_MARK, TSN0_RD3_MARK,
+};
+static const unsigned int tsn0_txcrefclk_pins[] = {
+ /* TSN0_TXCREFCLK */
+ RCAR_GP_PIN(4, 20),
+};
+static const unsigned int tsn0_txcrefclk_mux[] = {
+ TSN0_TXCREFCLK_MARK,
+};
+static const unsigned int tsn0_avtp_pps_pins[] = {
+ /* TSN0_AVTP_PPS0, TSN0_AVTP_PPS1 */
+ RCAR_GP_PIN(4, 8), RCAR_GP_PIN(4, 2),
+};
+static const unsigned int tsn0_avtp_pps_mux[] = {
+ TSN0_AVTP_PPS0_MARK, TSN0_AVTP_PPS1_MARK,
+};
+static const unsigned int tsn0_avtp_capture_pins[] = {
+ /* TSN0_AVTP_CAPTURE */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int tsn0_avtp_capture_mux[] = {
+ TSN0_AVTP_CAPTURE_MARK,
+};
+static const unsigned int tsn0_avtp_match_pins[] = {
+ /* TSN0_AVTP_MATCH */
+ RCAR_GP_PIN(4, 5),
+};
+static const unsigned int tsn0_avtp_match_mux[] = {
+ TSN0_AVTP_MATCH_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb0_link),
+ SH_PFC_PIN_GROUP(avb0_magic),
+ SH_PFC_PIN_GROUP(avb0_phy_int),
+ SH_PFC_PIN_GROUP(avb0_mdio),
+ SH_PFC_PIN_GROUP(avb0_rgmii),
+ SH_PFC_PIN_GROUP(avb0_txcrefclk),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture),
+ SH_PFC_PIN_GROUP(avb0_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb1_link),
+ SH_PFC_PIN_GROUP(avb1_magic),
+ SH_PFC_PIN_GROUP(avb1_phy_int),
+ SH_PFC_PIN_GROUP(avb1_mdio),
+ SH_PFC_PIN_GROUP(avb1_rgmii),
+ SH_PFC_PIN_GROUP(avb1_txcrefclk),
+ SH_PFC_PIN_GROUP(avb1_avtp_pps),
+ SH_PFC_PIN_GROUP(avb1_avtp_capture),
+ SH_PFC_PIN_GROUP(avb1_avtp_match),
+
+ SH_PFC_PIN_GROUP(avb2_link),
+ SH_PFC_PIN_GROUP(avb2_magic),
+ SH_PFC_PIN_GROUP(avb2_phy_int),
+ SH_PFC_PIN_GROUP(avb2_mdio),
+ SH_PFC_PIN_GROUP(avb2_rgmii),
+ SH_PFC_PIN_GROUP(avb2_txcrefclk),
+ SH_PFC_PIN_GROUP(avb2_avtp_pps),
+ SH_PFC_PIN_GROUP(avb2_avtp_capture),
+ SH_PFC_PIN_GROUP(avb2_avtp_match),
+
+ SH_PFC_PIN_GROUP(canfd0_data),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(canfd2_data),
+ SH_PFC_PIN_GROUP(canfd3_data),
+ SH_PFC_PIN_GROUP(canfd4_data),
+ SH_PFC_PIN_GROUP(canfd5_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(canfd5_data_b), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(canfd6_data),
+ SH_PFC_PIN_GROUP(canfd7_data),
+ SH_PFC_PIN_GROUP(can_clk),
+
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_data_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_clk_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_ctrl_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif2_data),
+ SH_PFC_PIN_GROUP(hscif2_clk),
+ SH_PFC_PIN_GROUP(hscif2_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_data_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_clk_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_ctrl_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c4),
+ SH_PFC_PIN_GROUP(i2c5),
+
+ BUS_DATA_PIN_GROUP(mmc_data, 1),
+ BUS_DATA_PIN_GROUP(mmc_data, 4),
+ BUS_DATA_PIN_GROUP(mmc_data, 8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(mmc_cd),
+ SH_PFC_PIN_GROUP(mmc_wp),
+ SH_PFC_PIN_GROUP(mmc_ds),
+
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_txd),
+ SH_PFC_PIN_GROUP(msiof1_rxd),
+
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_txd),
+ SH_PFC_PIN_GROUP(msiof2_rxd),
+
+ SH_PFC_PIN_GROUP(msiof3_clk),
+ SH_PFC_PIN_GROUP(msiof3_sync),
+ SH_PFC_PIN_GROUP(msiof3_ss1),
+ SH_PFC_PIN_GROUP(msiof3_ss2),
+ SH_PFC_PIN_GROUP(msiof3_txd),
+ SH_PFC_PIN_GROUP(msiof3_rxd),
+
+ SH_PFC_PIN_GROUP(msiof4_clk),
+ SH_PFC_PIN_GROUP(msiof4_sync),
+ SH_PFC_PIN_GROUP(msiof4_ss1),
+ SH_PFC_PIN_GROUP(msiof4_ss2),
+ SH_PFC_PIN_GROUP(msiof4_txd),
+ SH_PFC_PIN_GROUP(msiof4_rxd),
+
+ SH_PFC_PIN_GROUP(msiof5_clk),
+ SH_PFC_PIN_GROUP(msiof5_sync),
+ SH_PFC_PIN_GROUP(msiof5_ss1),
+ SH_PFC_PIN_GROUP(msiof5_ss2),
+ SH_PFC_PIN_GROUP(msiof5_txd),
+ SH_PFC_PIN_GROUP(msiof5_rxd),
+
+ SH_PFC_PIN_GROUP(pcie0_clkreq_n),
+ SH_PFC_PIN_GROUP(pcie1_clkreq_n),
+
+ SH_PFC_PIN_GROUP(pwm0_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_b), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4),
+ SH_PFC_PIN_GROUP(pwm5),
+ SH_PFC_PIN_GROUP(pwm6),
+ SH_PFC_PIN_GROUP(pwm7),
+ SH_PFC_PIN_GROUP(pwm8_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm9_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ BUS_DATA_PIN_GROUP(qspi0_data, 2),
+ BUS_DATA_PIN_GROUP(qspi0_data, 4),
+ SH_PFC_PIN_GROUP(qspi1_ctrl),
+ BUS_DATA_PIN_GROUP(qspi1_data, 2),
+ BUS_DATA_PIN_GROUP(qspi1_data, 4),
+
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_data_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_clk_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_ctrl_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_data), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_clk), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_ctrl), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_data_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_clk_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif3_ctrl_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_clk),
+ SH_PFC_PIN_GROUP(scif4_ctrl),
+ SH_PFC_PIN_GROUP(scif_clk),
+
+ SH_PFC_PIN_GROUP(tpu_to0), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to0_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to1), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to1_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to2), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to2_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to3), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to3_a), /* suffix might be updated */
+
+ SH_PFC_PIN_GROUP(tsn0_link),
+ SH_PFC_PIN_GROUP(tsn0_phy_int),
+ SH_PFC_PIN_GROUP(tsn0_mdio),
+ SH_PFC_PIN_GROUP(tsn0_rgmii),
+ SH_PFC_PIN_GROUP(tsn0_txcrefclk),
+ SH_PFC_PIN_GROUP(tsn0_avtp_pps),
+ SH_PFC_PIN_GROUP(tsn0_avtp_capture),
+ SH_PFC_PIN_GROUP(tsn0_avtp_match),
+};
+
+static const char * const avb0_groups[] = {
+ "avb0_link",
+ "avb0_magic",
+ "avb0_phy_int",
+ "avb0_mdio",
+ "avb0_rgmii",
+ "avb0_txcrefclk",
+ "avb0_avtp_pps",
+ "avb0_avtp_capture",
+ "avb0_avtp_match",
+};
+
+static const char * const avb1_groups[] = {
+ "avb1_link",
+ "avb1_magic",
+ "avb1_phy_int",
+ "avb1_mdio",
+ "avb1_rgmii",
+ "avb1_txcrefclk",
+ "avb1_avtp_pps",
+ "avb1_avtp_capture",
+ "avb1_avtp_match",
+};
+
+static const char * const avb2_groups[] = {
+ "avb2_link",
+ "avb2_magic",
+ "avb2_phy_int",
+ "avb2_mdio",
+ "avb2_rgmii",
+ "avb2_txcrefclk",
+ "avb2_avtp_pps",
+ "avb2_avtp_capture",
+ "avb2_avtp_match",
+};
+
+static const char * const canfd0_groups[] = {
+ "canfd0_data",
+};
+
+static const char * const canfd1_groups[] = {
+ "canfd1_data",
+};
+
+static const char * const canfd2_groups[] = {
+ "canfd2_data",
+};
+
+static const char * const canfd3_groups[] = {
+ "canfd3_data",
+};
+
+static const char * const canfd4_groups[] = {
+ "canfd4_data",
+};
+
+static const char * const canfd5_groups[] = {
+ /* suffix might be updated */
+ "canfd5_data",
+ "canfd5_data_b",
+};
+
+static const char * const canfd6_groups[] = {
+ "canfd6_data",
+};
+
+static const char * const canfd7_groups[] = {
+ "canfd7_data",
+};
+
+static const char * const can_clk_groups[] = {
+ "can_clk",
+};
+
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ /* suffix might be updated */
+ "hscif1_data",
+ "hscif1_clk",
+ "hscif1_ctrl",
+ "hscif1_data_x",
+ "hscif1_clk_x",
+ "hscif1_ctrl_x",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data",
+ "hscif2_clk",
+ "hscif2_ctrl",
+};
+
+static const char * const hscif3_groups[] = {
+ /* suffix might be updated */
+ "hscif3_data",
+ "hscif3_clk",
+ "hscif3_ctrl",
+ "hscif3_data_a",
+ "hscif3_clk_a",
+ "hscif3_ctrl_a",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3",
+};
+
+static const char * const i2c4_groups[] = {
+ "i2c4",
+};
+
+static const char * const i2c5_groups[] = {
+ "i2c5",
+};
+
+static const char * const mmc_groups[] = {
+ "mmc_data1",
+ "mmc_data4",
+ "mmc_data8",
+ "mmc_ctrl",
+ "mmc_cd",
+ "mmc_wp",
+ "mmc_ds",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_txd",
+ "msiof1_rxd",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk",
+ "msiof2_sync",
+ "msiof2_ss1",
+ "msiof2_ss2",
+ "msiof2_txd",
+ "msiof2_rxd",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk",
+ "msiof3_sync",
+ "msiof3_ss1",
+ "msiof3_ss2",
+ "msiof3_txd",
+ "msiof3_rxd",
+};
+
+static const char * const msiof4_groups[] = {
+ "msiof4_clk",
+ "msiof4_sync",
+ "msiof4_ss1",
+ "msiof4_ss2",
+ "msiof4_txd",
+ "msiof4_rxd",
+};
+
+static const char * const msiof5_groups[] = {
+ "msiof5_clk",
+ "msiof5_sync",
+ "msiof5_ss1",
+ "msiof5_ss2",
+ "msiof5_txd",
+ "msiof5_rxd",
+};
+
+static const char * const pcie_groups[] = {
+ "pcie0_clkreq_n",
+ "pcie1_clkreq_n",
+};
+
+static const char * const pwm0_groups[] = {
+ /* suffix might be updated */
+ "pwm0_a",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+ /* suffix might be updated */
+ "pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4",
+};
+
+static const char * const pwm5_groups[] = {
+ "pwm5",
+};
+
+static const char * const pwm6_groups[] = {
+ "pwm6",
+};
+
+static const char * const pwm7_groups[] = {
+ "pwm7",
+};
+
+static const char * const pwm8_groups[] = {
+ /* suffix might be updated */
+ "pwm8_a",
+};
+
+static const char * const pwm9_groups[] = {
+ /* suffix might be updated */
+ "pwm9_a",
+};
+
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
+static const char * const qspi1_groups[] = {
+ "qspi1_ctrl",
+ "qspi1_data2",
+ "qspi1_data4",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data",
+ "scif0_clk",
+ "scif0_ctrl",
+};
+
+static const char * const scif1_groups[] = {
+ /* suffix might be updated */
+ "scif1_data",
+ "scif1_clk",
+ "scif1_ctrl",
+ "scif1_data_x",
+ "scif1_clk_x",
+ "scif1_ctrl_x",
+};
+
+static const char * const scif3_groups[] = {
+ /* suffix might be updated */
+ "scif3_data",
+ "scif3_clk",
+ "scif3_ctrl",
+ "scif3_data_a",
+ "scif3_clk_a",
+ "scif3_ctrl_a",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data",
+ "scif4_clk",
+ "scif4_ctrl",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk",
+};
+
+static const char * const tpu_groups[] = {
+ /* suffix might be updated */
+ "tpu_to0",
+ "tpu_to0_a",
+ "tpu_to1",
+ "tpu_to1_a",
+ "tpu_to2",
+ "tpu_to2_a",
+ "tpu_to3",
+ "tpu_to3_a",
+};
+
+static const char * const tsn0_groups[] = {
+ "tsn0_link",
+ "tsn0_phy_int",
+ "tsn0_mdio",
+ "tsn0_rgmii",
+ "tsn0_txcrefclk",
+ "tsn0_avtp_pps",
+ "tsn0_avtp_capture",
+ "tsn0_avtp_match",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb0),
+ SH_PFC_FUNCTION(avb1),
+ SH_PFC_FUNCTION(avb2),
+
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(canfd2),
+ SH_PFC_FUNCTION(canfd3),
+ SH_PFC_FUNCTION(canfd4),
+ SH_PFC_FUNCTION(canfd5),
+ SH_PFC_FUNCTION(canfd6),
+ SH_PFC_FUNCTION(canfd7),
+ SH_PFC_FUNCTION(can_clk),
+
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c5),
+
+ SH_PFC_FUNCTION(mmc),
+
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(msiof4),
+ SH_PFC_FUNCTION(msiof5),
+
+ SH_PFC_FUNCTION(pcie),
+
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(pwm7),
+ SH_PFC_FUNCTION(pwm8),
+ SH_PFC_FUNCTION(pwm9),
+
+ SH_PFC_FUNCTION(qspi0),
+ SH_PFC_FUNCTION(qspi1),
+
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif_clk),
+
+ SH_PFC_FUNCTION(tpu),
+
+ SH_PFC_FUNCTION(tsn0),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y) FN_##y
+#define FM(x) FN_##x
+ { PINMUX_CFG_REG_VAR("GPSR0", 0xE6050040, 32,
+ GROUP(-13, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP0_31_19 RESERVED */
+ GP_0_18_FN, GPSR0_18,
+ GP_0_17_FN, GPSR0_17,
+ GP_0_16_FN, GPSR0_16,
+ GP_0_15_FN, GPSR0_15,
+ GP_0_14_FN, GPSR0_14,
+ GP_0_13_FN, GPSR0_13,
+ GP_0_12_FN, GPSR0_12,
+ GP_0_11_FN, GPSR0_11,
+ GP_0_10_FN, GPSR0_10,
+ GP_0_9_FN, GPSR0_9,
+ GP_0_8_FN, GPSR0_8,
+ GP_0_7_FN, GPSR0_7,
+ GP_0_6_FN, GPSR0_6,
+ GP_0_5_FN, GPSR0_5,
+ GP_0_4_FN, GPSR0_4,
+ GP_0_3_FN, GPSR0_3,
+ GP_0_2_FN, GPSR0_2,
+ GP_0_1_FN, GPSR0_1,
+ GP_0_0_FN, GPSR0_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xE6050840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_28_FN, GPSR1_28,
+ GP_1_27_FN, GPSR1_27,
+ GP_1_26_FN, GPSR1_26,
+ GP_1_25_FN, GPSR1_25,
+ GP_1_24_FN, GPSR1_24,
+ GP_1_23_FN, GPSR1_23,
+ GP_1_22_FN, GPSR1_22,
+ GP_1_21_FN, GPSR1_21,
+ GP_1_20_FN, GPSR1_20,
+ GP_1_19_FN, GPSR1_19,
+ GP_1_18_FN, GPSR1_18,
+ GP_1_17_FN, GPSR1_17,
+ GP_1_16_FN, GPSR1_16,
+ GP_1_15_FN, GPSR1_15,
+ GP_1_14_FN, GPSR1_14,
+ GP_1_13_FN, GPSR1_13,
+ GP_1_12_FN, GPSR1_12,
+ GP_1_11_FN, GPSR1_11,
+ GP_1_10_FN, GPSR1_10,
+ GP_1_9_FN, GPSR1_9,
+ GP_1_8_FN, GPSR1_8,
+ GP_1_7_FN, GPSR1_7,
+ GP_1_6_FN, GPSR1_6,
+ GP_1_5_FN, GPSR1_5,
+ GP_1_4_FN, GPSR1_4,
+ GP_1_3_FN, GPSR1_3,
+ GP_1_2_FN, GPSR1_2,
+ GP_1_1_FN, GPSR1_1,
+ GP_1_0_FN, GPSR1_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR2", 0xE6058040, 32,
+ GROUP(-12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP2_31_20 RESERVED */
+ GP_2_19_FN, GPSR2_19,
+ GP_2_18_FN, GPSR2_18,
+ GP_2_17_FN, GPSR2_17,
+ GP_2_16_FN, GPSR2_16,
+ GP_2_15_FN, GPSR2_15,
+ GP_2_14_FN, GPSR2_14,
+ GP_2_13_FN, GPSR2_13,
+ GP_2_12_FN, GPSR2_12,
+ GP_2_11_FN, GPSR2_11,
+ GP_2_10_FN, GPSR2_10,
+ GP_2_9_FN, GPSR2_9,
+ GP_2_8_FN, GPSR2_8,
+ GP_2_7_FN, GPSR2_7,
+ GP_2_6_FN, GPSR2_6,
+ GP_2_5_FN, GPSR2_5,
+ GP_2_4_FN, GPSR2_4,
+ GP_2_3_FN, GPSR2_3,
+ GP_2_2_FN, GPSR2_2,
+ GP_2_1_FN, GPSR2_1,
+ GP_2_0_FN, GPSR2_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xE6058840, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ GP_3_29_FN, GPSR3_29,
+ GP_3_28_FN, GPSR3_28,
+ GP_3_27_FN, GPSR3_27,
+ GP_3_26_FN, GPSR3_26,
+ GP_3_25_FN, GPSR3_25,
+ GP_3_24_FN, GPSR3_24,
+ GP_3_23_FN, GPSR3_23,
+ GP_3_22_FN, GPSR3_22,
+ GP_3_21_FN, GPSR3_21,
+ GP_3_20_FN, GPSR3_20,
+ GP_3_19_FN, GPSR3_19,
+ GP_3_18_FN, GPSR3_18,
+ GP_3_17_FN, GPSR3_17,
+ GP_3_16_FN, GPSR3_16,
+ GP_3_15_FN, GPSR3_15,
+ GP_3_14_FN, GPSR3_14,
+ GP_3_13_FN, GPSR3_13,
+ GP_3_12_FN, GPSR3_12,
+ GP_3_11_FN, GPSR3_11,
+ GP_3_10_FN, GPSR3_10,
+ GP_3_9_FN, GPSR3_9,
+ GP_3_8_FN, GPSR3_8,
+ GP_3_7_FN, GPSR3_7,
+ GP_3_6_FN, GPSR3_6,
+ GP_3_5_FN, GPSR3_5,
+ GP_3_4_FN, GPSR3_4,
+ GP_3_3_FN, GPSR3_3,
+ GP_3_2_FN, GPSR3_2,
+ GP_3_1_FN, GPSR3_1,
+ GP_3_0_FN, GPSR3_0, ))
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xE6060040, 32, 1, GROUP(
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_24_FN, GPSR4_24,
+ GP_4_23_FN, GPSR4_23,
+ GP_4_22_FN, GPSR4_22,
+ GP_4_21_FN, GPSR4_21,
+ GP_4_20_FN, GPSR4_20,
+ GP_4_19_FN, GPSR4_19,
+ GP_4_18_FN, GPSR4_18,
+ GP_4_17_FN, GPSR4_17,
+ GP_4_16_FN, GPSR4_16,
+ GP_4_15_FN, GPSR4_15,
+ GP_4_14_FN, GPSR4_14,
+ GP_4_13_FN, GPSR4_13,
+ GP_4_12_FN, GPSR4_12,
+ GP_4_11_FN, GPSR4_11,
+ GP_4_10_FN, GPSR4_10,
+ GP_4_9_FN, GPSR4_9,
+ GP_4_8_FN, GPSR4_8,
+ GP_4_7_FN, GPSR4_7,
+ GP_4_6_FN, GPSR4_6,
+ GP_4_5_FN, GPSR4_5,
+ GP_4_4_FN, GPSR4_4,
+ GP_4_3_FN, GPSR4_3,
+ GP_4_2_FN, GPSR4_2,
+ GP_4_1_FN, GPSR4_1,
+ GP_4_0_FN, GPSR4_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR5", 0xE6060840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP5_31_21 RESERVED */
+ GP_5_20_FN, GPSR5_20,
+ GP_5_19_FN, GPSR5_19,
+ GP_5_18_FN, GPSR5_18,
+ GP_5_17_FN, GPSR5_17,
+ GP_5_16_FN, GPSR5_16,
+ GP_5_15_FN, GPSR5_15,
+ GP_5_14_FN, GPSR5_14,
+ GP_5_13_FN, GPSR5_13,
+ GP_5_12_FN, GPSR5_12,
+ GP_5_11_FN, GPSR5_11,
+ GP_5_10_FN, GPSR5_10,
+ GP_5_9_FN, GPSR5_9,
+ GP_5_8_FN, GPSR5_8,
+ GP_5_7_FN, GPSR5_7,
+ GP_5_6_FN, GPSR5_6,
+ GP_5_5_FN, GPSR5_5,
+ GP_5_4_FN, GPSR5_4,
+ GP_5_3_FN, GPSR5_3,
+ GP_5_2_FN, GPSR5_2,
+ GP_5_1_FN, GPSR5_1,
+ GP_5_0_FN, GPSR5_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR6", 0xE6061040, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP6_31_21 RESERVED */
+ GP_6_20_FN, GPSR6_20,
+ GP_6_19_FN, GPSR6_19,
+ GP_6_18_FN, GPSR6_18,
+ GP_6_17_FN, GPSR6_17,
+ GP_6_16_FN, GPSR6_16,
+ GP_6_15_FN, GPSR6_15,
+ GP_6_14_FN, GPSR6_14,
+ GP_6_13_FN, GPSR6_13,
+ GP_6_12_FN, GPSR6_12,
+ GP_6_11_FN, GPSR6_11,
+ GP_6_10_FN, GPSR6_10,
+ GP_6_9_FN, GPSR6_9,
+ GP_6_8_FN, GPSR6_8,
+ GP_6_7_FN, GPSR6_7,
+ GP_6_6_FN, GPSR6_6,
+ GP_6_5_FN, GPSR6_5,
+ GP_6_4_FN, GPSR6_4,
+ GP_6_3_FN, GPSR6_3,
+ GP_6_2_FN, GPSR6_2,
+ GP_6_1_FN, GPSR6_1,
+ GP_6_0_FN, GPSR6_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR7", 0xE6061840, 32,
+ GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP7_31_21 RESERVED */
+ GP_7_20_FN, GPSR7_20,
+ GP_7_19_FN, GPSR7_19,
+ GP_7_18_FN, GPSR7_18,
+ GP_7_17_FN, GPSR7_17,
+ GP_7_16_FN, GPSR7_16,
+ GP_7_15_FN, GPSR7_15,
+ GP_7_14_FN, GPSR7_14,
+ GP_7_13_FN, GPSR7_13,
+ GP_7_12_FN, GPSR7_12,
+ GP_7_11_FN, GPSR7_11,
+ GP_7_10_FN, GPSR7_10,
+ GP_7_9_FN, GPSR7_9,
+ GP_7_8_FN, GPSR7_8,
+ GP_7_7_FN, GPSR7_7,
+ GP_7_6_FN, GPSR7_6,
+ GP_7_5_FN, GPSR7_5,
+ GP_7_4_FN, GPSR7_4,
+ GP_7_3_FN, GPSR7_3,
+ GP_7_2_FN, GPSR7_2,
+ GP_7_1_FN, GPSR7_1,
+ GP_7_0_FN, GPSR7_0, ))
+ },
+ { PINMUX_CFG_REG_VAR("GPSR8", 0xE6068040, 32,
+ GROUP(-18, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* GP8_31_14 RESERVED */
+ GP_8_13_FN, GPSR8_13,
+ GP_8_12_FN, GPSR8_12,
+ GP_8_11_FN, GPSR8_11,
+ GP_8_10_FN, GPSR8_10,
+ GP_8_9_FN, GPSR8_9,
+ GP_8_8_FN, GPSR8_8,
+ GP_8_7_FN, GPSR8_7,
+ GP_8_6_FN, GPSR8_6,
+ GP_8_5_FN, GPSR8_5,
+ GP_8_4_FN, GPSR8_4,
+ GP_8_3_FN, GPSR8_3,
+ GP_8_2_FN, GPSR8_2,
+ GP_8_1_FN, GPSR8_1,
+ GP_8_0_FN, GPSR8_0, ))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG("IP0SR0", 0xE6050060, 32, 4, GROUP(
+ IP0SR0_31_28
+ IP0SR0_27_24
+ IP0SR0_23_20
+ IP0SR0_19_16
+ IP0SR0_15_12
+ IP0SR0_11_8
+ IP0SR0_7_4
+ IP0SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR0", 0xE6050064, 32, 4, GROUP(
+ IP1SR0_31_28
+ IP1SR0_27_24
+ IP1SR0_23_20
+ IP1SR0_19_16
+ IP1SR0_15_12
+ IP1SR0_11_8
+ IP1SR0_7_4
+ IP1SR0_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR0", 0xE6050068, 32,
+ GROUP(-20, 4, 4, 4),
+ GROUP(
+ /* IP2SR0_31_12 RESERVED */
+ IP2SR0_11_8
+ IP2SR0_7_4
+ IP2SR0_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR1", 0xE6050860, 32, 4, GROUP(
+ IP0SR1_31_28
+ IP0SR1_27_24
+ IP0SR1_23_20
+ IP0SR1_19_16
+ IP0SR1_15_12
+ IP0SR1_11_8
+ IP0SR1_7_4
+ IP0SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR1", 0xE6050864, 32, 4, GROUP(
+ IP1SR1_31_28
+ IP1SR1_27_24
+ IP1SR1_23_20
+ IP1SR1_19_16
+ IP1SR1_15_12
+ IP1SR1_11_8
+ IP1SR1_7_4
+ IP1SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR1", 0xE6050868, 32, 4, GROUP(
+ IP2SR1_31_28
+ IP2SR1_27_24
+ IP2SR1_23_20
+ IP2SR1_19_16
+ IP2SR1_15_12
+ IP2SR1_11_8
+ IP2SR1_7_4
+ IP2SR1_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR1", 0xE605086C, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR1_31_20 RESERVED */
+ IP3SR1_19_16
+ IP3SR1_15_12
+ IP3SR1_11_8
+ IP3SR1_7_4
+ IP3SR1_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR2", 0xE6058060, 32, 4, GROUP(
+ IP0SR2_31_28
+ IP0SR2_27_24
+ IP0SR2_23_20
+ IP0SR2_19_16
+ IP0SR2_15_12
+ IP0SR2_11_8
+ IP0SR2_7_4
+ IP0SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR2", 0xE6058064, 32, 4, GROUP(
+ IP1SR2_31_28
+ IP1SR2_27_24
+ IP1SR2_23_20
+ IP1SR2_19_16
+ IP1SR2_15_12
+ IP1SR2_11_8
+ IP1SR2_7_4
+ IP1SR2_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR2", 0xE6058068, 32,
+ GROUP(-16, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR2_31_16 RESERVED */
+ IP2SR2_15_12
+ IP2SR2_11_8
+ IP2SR2_7_4
+ IP2SR2_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR3", 0xE6058860, 32, 4, GROUP(
+ IP0SR3_31_28
+ IP0SR3_27_24
+ IP0SR3_23_20
+ IP0SR3_19_16
+ IP0SR3_15_12
+ IP0SR3_11_8
+ IP0SR3_7_4
+ IP0SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR3", 0xE6058864, 32, 4, GROUP(
+ IP1SR3_31_28
+ IP1SR3_27_24
+ IP1SR3_23_20
+ IP1SR3_19_16
+ IP1SR3_15_12
+ IP1SR3_11_8
+ IP1SR3_7_4
+ IP1SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP2SR3", 0xE6058868, 32, 4, GROUP(
+ IP2SR3_31_28
+ IP2SR3_27_24
+ IP2SR3_23_20
+ IP2SR3_19_16
+ IP2SR3_15_12
+ IP2SR3_11_8
+ IP2SR3_7_4
+ IP2SR3_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP3SR3", 0xE605886C, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP3SR3_31_24 RESERVED */
+ IP3SR3_23_20
+ IP3SR3_19_16
+ IP3SR3_15_12
+ IP3SR3_11_8
+ IP3SR3_7_4
+ IP3SR3_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR6", 0xE6061060, 32, 4, GROUP(
+ IP0SR6_31_28
+ IP0SR6_27_24
+ IP0SR6_23_20
+ IP0SR6_19_16
+ IP0SR6_15_12
+ IP0SR6_11_8
+ IP0SR6_7_4
+ IP0SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR6", 0xE6061064, 32, 4, GROUP(
+ IP1SR6_31_28
+ IP1SR6_27_24
+ IP1SR6_23_20
+ IP1SR6_19_16
+ IP1SR6_15_12
+ IP1SR6_11_8
+ IP1SR6_7_4
+ IP1SR6_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR6", 0xE6061068, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR6_31_20 RESERVED */
+ IP2SR6_19_16
+ IP2SR6_15_12
+ IP2SR6_11_8
+ IP2SR6_7_4
+ IP2SR6_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR7", 0xE6061860, 32, 4, GROUP(
+ IP0SR7_31_28
+ IP0SR7_27_24
+ IP0SR7_23_20
+ IP0SR7_19_16
+ IP0SR7_15_12
+ IP0SR7_11_8
+ IP0SR7_7_4
+ IP0SR7_3_0))
+ },
+ { PINMUX_CFG_REG("IP1SR7", 0xE6061864, 32, 4, GROUP(
+ IP1SR7_31_28
+ IP1SR7_27_24
+ IP1SR7_23_20
+ IP1SR7_19_16
+ IP1SR7_15_12
+ IP1SR7_11_8
+ IP1SR7_7_4
+ IP1SR7_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP2SR7", 0xE6061868, 32,
+ GROUP(-12, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP2SR7_31_20 RESERVED */
+ IP2SR7_19_16
+ IP2SR7_15_12
+ IP2SR7_11_8
+ IP2SR7_7_4
+ IP2SR7_3_0))
+ },
+ { PINMUX_CFG_REG("IP0SR8", 0xE6068060, 32, 4, GROUP(
+ IP0SR8_31_28
+ IP0SR8_27_24
+ IP0SR8_23_20
+ IP0SR8_19_16
+ IP0SR8_15_12
+ IP0SR8_11_8
+ IP0SR8_7_4
+ IP0SR8_3_0))
+ },
+ { PINMUX_CFG_REG_VAR("IP1SR8", 0xE6068064, 32,
+ GROUP(-8, 4, 4, 4, 4, 4, 4),
+ GROUP(
+ /* IP1SR8_31_24 RESERVED */
+ IP1SR8_23_20
+ IP1SR8_19_16
+ IP1SR8_15_12
+ IP1SR8_11_8
+ IP1SR8_7_4
+ IP1SR8_3_0))
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE6060100, 32,
+ GROUP(-12, 1, 1, -2, 1, 1, -1, 1, -2, 1, 1, -2, 1,
+ -2, 1, 1, -1),
+ GROUP(
+ /* RESERVED 31-20 */
+ MOD_SEL4_19
+ MOD_SEL4_18
+ /* RESERVED 17-16 */
+ MOD_SEL4_15
+ MOD_SEL4_14
+ /* RESERVED 13 */
+ MOD_SEL4_12
+ /* RESERVED 11-10 */
+ MOD_SEL4_9
+ MOD_SEL4_8
+ /* RESERVED 7-6 */
+ MOD_SEL4_5
+ /* RESERVED 4-3 */
+ MOD_SEL4_2
+ MOD_SEL4_1
+ /* RESERVED 0 */
+ ))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL5", 0xE6060900, 32,
+ GROUP(-12, 1, -2, 1, 1, -2, 1, 1, -2, 1, -1,
+ 1, 1, -2, 1, -1, 1),
+ GROUP(
+ /* RESERVED 31-20 */
+ MOD_SEL5_19
+ /* RESERVED 18-17 */
+ MOD_SEL5_16
+ MOD_SEL5_15
+ /* RESERVED 14-13 */
+ MOD_SEL5_12
+ MOD_SEL5_11
+ /* RESERVED 10-9 */
+ MOD_SEL5_8
+ /* RESERVED 7 */
+ MOD_SEL5_6
+ MOD_SEL5_5
+ /* RESERVED 4-3 */
+ MOD_SEL5_2
+ /* RESERVED 1 */
+ MOD_SEL5_0))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL6", 0xE6061100, 32,
+ GROUP(-13, 1, -1, 1, -2, 1, 1,
+ -1, 1, -2, 1, 1, 1, -2, 1, 1, -1),
+ GROUP(
+ /* RESERVED 31-19 */
+ MOD_SEL6_18
+ /* RESERVED 17 */
+ MOD_SEL6_16
+ /* RESERVED 15-14 */
+ MOD_SEL6_13
+ MOD_SEL6_12
+ /* RESERVED 11 */
+ MOD_SEL6_10
+ /* RESERVED 9-8 */
+ MOD_SEL6_7
+ MOD_SEL6_6
+ MOD_SEL6_5
+ /* RESERVED 4-3 */
+ MOD_SEL6_2
+ MOD_SEL6_1
+ /* RESERVED 0 */
+ ))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL7", 0xE6061900, 32,
+ GROUP(-15, 1, 1, -1, 1, -1, 1, 1, -2, 1, 1,
+ -2, 1, 1, -1, 1),
+ GROUP(
+ /* RESERVED 31-17 */
+ MOD_SEL7_16
+ MOD_SEL7_15
+ /* RESERVED 14 */
+ MOD_SEL7_13
+ /* RESERVED 12 */
+ MOD_SEL7_11
+ MOD_SEL7_10
+ /* RESERVED 9-8 */
+ MOD_SEL7_7
+ MOD_SEL7_6
+ /* RESERVED 5-4 */
+ MOD_SEL7_3
+ MOD_SEL7_2
+ /* RESERVED 1 */
+ MOD_SEL7_0))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL8", 0xE6068100, 32,
+ GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ GROUP(
+ /* RESERVED 31-12 */
+ MOD_SEL8_11
+ MOD_SEL8_10
+ MOD_SEL8_9
+ MOD_SEL8_8
+ MOD_SEL8_7
+ MOD_SEL8_6
+ MOD_SEL8_5
+ MOD_SEL8_4
+ MOD_SEL8_3
+ MOD_SEL8_2
+ MOD_SEL8_1
+ MOD_SEL8_0))
+ },
+ { },
+};
+
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRV0CTRL0", 0xE6050080) {
+ { RCAR_GP_PIN(0, 7), 28, 3 }, /* MSIOF5_SS2 */
+ { RCAR_GP_PIN(0, 6), 24, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(0, 5), 20, 3 }, /* IRQ1 */
+ { RCAR_GP_PIN(0, 4), 16, 3 }, /* IRQ2 */
+ { RCAR_GP_PIN(0, 3), 12, 3 }, /* IRQ3 */
+ { RCAR_GP_PIN(0, 2), 8, 3 }, /* GP0_02 */
+ { RCAR_GP_PIN(0, 1), 4, 3 }, /* GP0_01 */
+ { RCAR_GP_PIN(0, 0), 0, 3 }, /* GP0_00 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL0", 0xE6050084) {
+ { RCAR_GP_PIN(0, 15), 28, 3 }, /* MSIOF2_SYNC */
+ { RCAR_GP_PIN(0, 14), 24, 3 }, /* MSIOF2_SS1 */
+ { RCAR_GP_PIN(0, 13), 20, 3 }, /* MSIOF2_SS2 */
+ { RCAR_GP_PIN(0, 12), 16, 3 }, /* MSIOF5_RXD */
+ { RCAR_GP_PIN(0, 11), 12, 3 }, /* MSIOF5_SCK */
+ { RCAR_GP_PIN(0, 10), 8, 3 }, /* MSIOF5_TXD */
+ { RCAR_GP_PIN(0, 9), 4, 3 }, /* MSIOF5_SYNC */
+ { RCAR_GP_PIN(0, 8), 0, 3 }, /* MSIOF5_SS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL0", 0xE6050088) {
+ { RCAR_GP_PIN(0, 18), 8, 3 }, /* MSIOF2_RXD */
+ { RCAR_GP_PIN(0, 17), 4, 3 }, /* MSIOF2_SCK */
+ { RCAR_GP_PIN(0, 16), 0, 3 }, /* MSIOF2_TXD */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL1", 0xE6050880) {
+ { RCAR_GP_PIN(1, 7), 28, 3 }, /* MSIOF0_SS1 */
+ { RCAR_GP_PIN(1, 6), 24, 3 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(1, 5), 20, 3 }, /* MSIOF1_RXD */
+ { RCAR_GP_PIN(1, 4), 16, 3 }, /* MSIOF1_TXD */
+ { RCAR_GP_PIN(1, 3), 12, 3 }, /* MSIOF1_SCK */
+ { RCAR_GP_PIN(1, 2), 8, 3 }, /* MSIOF1_SYNC */
+ { RCAR_GP_PIN(1, 1), 4, 3 }, /* MSIOF1_SS1 */
+ { RCAR_GP_PIN(1, 0), 0, 3 }, /* MSIOF1_SS2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL1", 0xE6050884) {
+ { RCAR_GP_PIN(1, 15), 28, 3 }, /* HSCK0 */
+ { RCAR_GP_PIN(1, 14), 24, 3 }, /* HRTS0_N */
+ { RCAR_GP_PIN(1, 13), 20, 3 }, /* HCTS0_N */
+ { RCAR_GP_PIN(1, 12), 16, 3 }, /* HTX0 */
+ { RCAR_GP_PIN(1, 11), 12, 3 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(1, 10), 8, 3 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(1, 9), 4, 3 }, /* MSIOF0_TXD */
+ { RCAR_GP_PIN(1, 8), 0, 3 }, /* MSIOF0_SYNC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL1", 0xE6050888) {
+ { RCAR_GP_PIN(1, 23), 28, 3 }, /* GP1_23 */
+ { RCAR_GP_PIN(1, 22), 24, 3 }, /* AUDIO_CLKIN */
+ { RCAR_GP_PIN(1, 21), 20, 3 }, /* AUDIO_CLKOUT */
+ { RCAR_GP_PIN(1, 20), 16, 3 }, /* SSI_SD */
+ { RCAR_GP_PIN(1, 19), 12, 3 }, /* SSI_WS */
+ { RCAR_GP_PIN(1, 18), 8, 3 }, /* SSI_SCK */
+ { RCAR_GP_PIN(1, 17), 4, 3 }, /* SCIF_CLK */
+ { RCAR_GP_PIN(1, 16), 0, 3 }, /* HRX0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL1", 0xE605088C) {
+ { RCAR_GP_PIN(1, 28), 16, 3 }, /* HTX3 */
+ { RCAR_GP_PIN(1, 27), 12, 3 }, /* HCTS3_N */
+ { RCAR_GP_PIN(1, 26), 8, 3 }, /* HRTS3_N */
+ { RCAR_GP_PIN(1, 25), 4, 3 }, /* HSCK3 */
+ { RCAR_GP_PIN(1, 24), 0, 3 }, /* HRX3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL2", 0xE6058080) {
+ { RCAR_GP_PIN(2, 7), 28, 3 }, /* TPU0TO1 */
+ { RCAR_GP_PIN(2, 6), 24, 3 }, /* FXR_TXDB */
+ { RCAR_GP_PIN(2, 5), 20, 3 }, /* FXR_TXENB_N */
+ { RCAR_GP_PIN(2, 4), 16, 3 }, /* RXDB_EXTFXR */
+ { RCAR_GP_PIN(2, 3), 12, 3 }, /* CLK_EXTFXR */
+ { RCAR_GP_PIN(2, 2), 8, 3 }, /* RXDA_EXTFXR */
+ { RCAR_GP_PIN(2, 1), 4, 3 }, /* FXR_TXENA_N */
+ { RCAR_GP_PIN(2, 0), 0, 3 }, /* FXR_TXDA */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL2", 0xE6058084) {
+ { RCAR_GP_PIN(2, 15), 28, 3 }, /* CANFD3_RX */
+ { RCAR_GP_PIN(2, 14), 24, 3 }, /* CANFD3_TX */
+ { RCAR_GP_PIN(2, 13), 20, 3 }, /* CANFD2_RX */
+ { RCAR_GP_PIN(2, 12), 16, 3 }, /* CANFD2_TX */
+ { RCAR_GP_PIN(2, 11), 12, 3 }, /* CANFD0_RX */
+ { RCAR_GP_PIN(2, 10), 8, 3 }, /* CANFD0_TX */
+ { RCAR_GP_PIN(2, 9), 4, 3 }, /* CAN_CLK */
+ { RCAR_GP_PIN(2, 8), 0, 3 }, /* TPU0TO0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL2", 0xE6058088) {
+ { RCAR_GP_PIN(2, 19), 12, 3 }, /* CANFD7_RX */
+ { RCAR_GP_PIN(2, 18), 8, 3 }, /* CANFD7_TX */
+ { RCAR_GP_PIN(2, 17), 4, 3 }, /* CANFD4_RX */
+ { RCAR_GP_PIN(2, 16), 0, 3 }, /* CANFD4_TX */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL3", 0xE6058880) {
+ { RCAR_GP_PIN(3, 7), 28, 3 }, /* MMC_D4 */
+ { RCAR_GP_PIN(3, 6), 24, 3 }, /* MMC_D5 */
+ { RCAR_GP_PIN(3, 5), 20, 3 }, /* MMC_SD_D3 */
+ { RCAR_GP_PIN(3, 4), 16, 3 }, /* MMC_DS */
+ { RCAR_GP_PIN(3, 3), 12, 3 }, /* MMC_SD_CLK */
+ { RCAR_GP_PIN(3, 2), 8, 3 }, /* MMC_SD_D2 */
+ { RCAR_GP_PIN(3, 1), 4, 3 }, /* MMC_SD_D0 */
+ { RCAR_GP_PIN(3, 0), 0, 3 }, /* MMC_SD_D1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL3", 0xE6058884) {
+ { RCAR_GP_PIN(3, 15), 28, 2 }, /* QSPI0_SSL */
+ { RCAR_GP_PIN(3, 14), 24, 2 }, /* IPC_CLKOUT */
+ { RCAR_GP_PIN(3, 13), 20, 2 }, /* IPC_CLKIN */
+ { RCAR_GP_PIN(3, 12), 16, 3 }, /* SD_WP */
+ { RCAR_GP_PIN(3, 11), 12, 3 }, /* SD_CD */
+ { RCAR_GP_PIN(3, 10), 8, 3 }, /* MMC_SD_CMD */
+ { RCAR_GP_PIN(3, 9), 4, 3 }, /* MMC_D6*/
+ { RCAR_GP_PIN(3, 8), 0, 3 }, /* MMC_D7 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL3", 0xE6058888) {
+ { RCAR_GP_PIN(3, 23), 28, 2 }, /* QSPI1_MISO_IO1 */
+ { RCAR_GP_PIN(3, 22), 24, 2 }, /* QSPI1_SPCLK */
+ { RCAR_GP_PIN(3, 21), 20, 2 }, /* QSPI1_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 20), 16, 2 }, /* QSPI0_SPCLK */
+ { RCAR_GP_PIN(3, 19), 12, 2 }, /* QSPI0_MOSI_IO0 */
+ { RCAR_GP_PIN(3, 18), 8, 2 }, /* QSPI0_MISO_IO1 */
+ { RCAR_GP_PIN(3, 17), 4, 2 }, /* QSPI0_IO2 */
+ { RCAR_GP_PIN(3, 16), 0, 2 }, /* QSPI0_IO3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL3", 0xE605888C) {
+ { RCAR_GP_PIN(3, 29), 20, 2 }, /* RPC_INT_N */
+ { RCAR_GP_PIN(3, 28), 16, 2 }, /* RPC_WP_N */
+ { RCAR_GP_PIN(3, 27), 12, 2 }, /* RPC_RESET_N */
+ { RCAR_GP_PIN(3, 26), 8, 2 }, /* QSPI1_IO3 */
+ { RCAR_GP_PIN(3, 25), 4, 2 }, /* QSPI1_SSL */
+ { RCAR_GP_PIN(3, 24), 0, 2 }, /* QSPI1_IO2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL4", 0xE6060080) {
+ { RCAR_GP_PIN(4, 7), 28, 3 }, /* TSN0_RX_CTL */
+ { RCAR_GP_PIN(4, 6), 24, 3 }, /* TSN0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(4, 5), 20, 3 }, /* TSN0_AVTP_MATCH */
+ { RCAR_GP_PIN(4, 4), 16, 3 }, /* TSN0_LINK */
+ { RCAR_GP_PIN(4, 3), 12, 3 }, /* TSN0_PHY_INT */
+ { RCAR_GP_PIN(4, 2), 8, 3 }, /* TSN0_AVTP_PPS1 */
+ { RCAR_GP_PIN(4, 1), 4, 3 }, /* TSN0_MDC */
+ { RCAR_GP_PIN(4, 0), 0, 3 }, /* TSN0_MDIO */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL4", 0xE6060084) {
+ { RCAR_GP_PIN(4, 15), 28, 3 }, /* TSN0_TD0 */
+ { RCAR_GP_PIN(4, 14), 24, 3 }, /* TSN0_TD1 */
+ { RCAR_GP_PIN(4, 13), 20, 3 }, /* TSN0_RD1 */
+ { RCAR_GP_PIN(4, 12), 16, 3 }, /* TSN0_TXC */
+ { RCAR_GP_PIN(4, 11), 12, 3 }, /* TSN0_RXC */
+ { RCAR_GP_PIN(4, 10), 8, 3 }, /* TSN0_RD0 */
+ { RCAR_GP_PIN(4, 9), 4, 3 }, /* TSN0_TX_CTL */
+ { RCAR_GP_PIN(4, 8), 0, 3 }, /* TSN0_AVTP_PPS0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL4", 0xE6060088) {
+ { RCAR_GP_PIN(4, 23), 28, 3 }, /* AVS0 */
+ { RCAR_GP_PIN(4, 22), 24, 3 }, /* PCIE1_CLKREQ_N */
+ { RCAR_GP_PIN(4, 21), 20, 3 }, /* PCIE0_CLKREQ_N */
+ { RCAR_GP_PIN(4, 20), 16, 3 }, /* TSN0_TXCREFCLK */
+ { RCAR_GP_PIN(4, 19), 12, 3 }, /* TSN0_TD2 */
+ { RCAR_GP_PIN(4, 18), 8, 3 }, /* TSN0_TD3 */
+ { RCAR_GP_PIN(4, 17), 4, 3 }, /* TSN0_RD2 */
+ { RCAR_GP_PIN(4, 16), 0, 3 }, /* TSN0_RD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV3CTRL4", 0xE606008C) {
+ { RCAR_GP_PIN(4, 24), 0, 3 }, /* AVS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL5", 0xE6060880) {
+ { RCAR_GP_PIN(5, 7), 28, 3 }, /* AVB2_TXCREFCLK */
+ { RCAR_GP_PIN(5, 6), 24, 3 }, /* AVB2_MDC */
+ { RCAR_GP_PIN(5, 5), 20, 3 }, /* AVB2_MAGIC */
+ { RCAR_GP_PIN(5, 4), 16, 3 }, /* AVB2_PHY_INT */
+ { RCAR_GP_PIN(5, 3), 12, 3 }, /* AVB2_LINK */
+ { RCAR_GP_PIN(5, 2), 8, 3 }, /* AVB2_AVTP_MATCH */
+ { RCAR_GP_PIN(5, 1), 4, 3 }, /* AVB2_AVTP_CAPTURE */
+ { RCAR_GP_PIN(5, 0), 0, 3 }, /* AVB2_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL5", 0xE6060884) {
+ { RCAR_GP_PIN(5, 15), 28, 3 }, /* AVB2_TD0 */
+ { RCAR_GP_PIN(5, 14), 24, 3 }, /* AVB2_RD1 */
+ { RCAR_GP_PIN(5, 13), 20, 3 }, /* AVB2_RD2 */
+ { RCAR_GP_PIN(5, 12), 16, 3 }, /* AVB2_TD1 */
+ { RCAR_GP_PIN(5, 11), 12, 3 }, /* AVB2_TD2 */
+ { RCAR_GP_PIN(5, 10), 8, 3 }, /* AVB2_MDIO */
+ { RCAR_GP_PIN(5, 9), 4, 3 }, /* AVB2_RD3 */
+ { RCAR_GP_PIN(5, 8), 0, 3 }, /* AVB2_TD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL5", 0xE6060888) {
+ { RCAR_GP_PIN(5, 20), 16, 3 }, /* AVB2_RX_CTL */
+ { RCAR_GP_PIN(5, 19), 12, 3 }, /* AVB2_TX_CTL */
+ { RCAR_GP_PIN(5, 18), 8, 3 }, /* AVB2_RXC */
+ { RCAR_GP_PIN(5, 17), 4, 3 }, /* AVB2_RD0 */
+ { RCAR_GP_PIN(5, 16), 0, 3 }, /* AVB2_TXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL6", 0xE6061080) {
+ { RCAR_GP_PIN(6, 7), 28, 3 }, /* AVB1_TX_CTL */
+ { RCAR_GP_PIN(6, 6), 24, 3 }, /* AVB1_TXC */
+ { RCAR_GP_PIN(6, 5), 20, 3 }, /* AVB1_AVTP_MATCH */
+ { RCAR_GP_PIN(6, 4), 16, 3 }, /* AVB1_LINK */
+ { RCAR_GP_PIN(6, 3), 12, 3 }, /* AVB1_PHY_INT */
+ { RCAR_GP_PIN(6, 2), 8, 3 }, /* AVB1_MDC */
+ { RCAR_GP_PIN(6, 1), 4, 3 }, /* AVB1_MAGIC */
+ { RCAR_GP_PIN(6, 0), 0, 3 }, /* AVB1_MDIO */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL6", 0xE6061084) {
+ { RCAR_GP_PIN(6, 15), 28, 3 }, /* AVB1_RD0 */
+ { RCAR_GP_PIN(6, 14), 24, 3 }, /* AVB1_RD1 */
+ { RCAR_GP_PIN(6, 13), 20, 3 }, /* AVB1_TD0 */
+ { RCAR_GP_PIN(6, 12), 16, 3 }, /* AVB1_TD1 */
+ { RCAR_GP_PIN(6, 11), 12, 3 }, /* AVB1_AVTP_CAPTURE */
+ { RCAR_GP_PIN(6, 10), 8, 3 }, /* AVB1_AVTP_PPS */
+ { RCAR_GP_PIN(6, 9), 4, 3 }, /* AVB1_RX_CTL */
+ { RCAR_GP_PIN(6, 8), 0, 3 }, /* AVB1_RXC */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL6", 0xE6061088) {
+ { RCAR_GP_PIN(6, 20), 16, 3 }, /* AVB1_TXCREFCLK */
+ { RCAR_GP_PIN(6, 19), 12, 3 }, /* AVB1_RD3 */
+ { RCAR_GP_PIN(6, 18), 8, 3 }, /* AVB1_TD3 */
+ { RCAR_GP_PIN(6, 17), 4, 3 }, /* AVB1_RD2 */
+ { RCAR_GP_PIN(6, 16), 0, 3 }, /* AVB1_TD2 */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL7", 0xE6061880) {
+ { RCAR_GP_PIN(7, 7), 28, 3 }, /* AVB0_TD1 */
+ { RCAR_GP_PIN(7, 6), 24, 3 }, /* AVB0_TD2 */
+ { RCAR_GP_PIN(7, 5), 20, 3 }, /* AVB0_PHY_INT */
+ { RCAR_GP_PIN(7, 4), 16, 3 }, /* AVB0_LINK */
+ { RCAR_GP_PIN(7, 3), 12, 3 }, /* AVB0_TD3 */
+ { RCAR_GP_PIN(7, 2), 8, 3 }, /* AVB0_AVTP_MATCH */
+ { RCAR_GP_PIN(7, 1), 4, 3 }, /* AVB0_AVTP_CAPTURE */
+ { RCAR_GP_PIN(7, 0), 0, 3 }, /* AVB0_AVTP_PPS */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL7", 0xE6061884) {
+ { RCAR_GP_PIN(7, 15), 28, 3 }, /* AVB0_TXC */
+ { RCAR_GP_PIN(7, 14), 24, 3 }, /* AVB0_MDIO */
+ { RCAR_GP_PIN(7, 13), 20, 3 }, /* AVB0_MDC */
+ { RCAR_GP_PIN(7, 12), 16, 3 }, /* AVB0_RD2 */
+ { RCAR_GP_PIN(7, 11), 12, 3 }, /* AVB0_TD0 */
+ { RCAR_GP_PIN(7, 10), 8, 3 }, /* AVB0_MAGIC */
+ { RCAR_GP_PIN(7, 9), 4, 3 }, /* AVB0_TXCREFCLK */
+ { RCAR_GP_PIN(7, 8), 0, 3 }, /* AVB0_RD3 */
+ } },
+ { PINMUX_DRIVE_REG("DRV2CTRL7", 0xE6061888) {
+ { RCAR_GP_PIN(7, 20), 16, 3 }, /* AVB0_RX_CTL */
+ { RCAR_GP_PIN(7, 19), 12, 3 }, /* AVB0_RXC */
+ { RCAR_GP_PIN(7, 18), 8, 3 }, /* AVB0_RD0 */
+ { RCAR_GP_PIN(7, 17), 4, 3 }, /* AVB0_RD1 */
+ { RCAR_GP_PIN(7, 16), 0, 3 }, /* AVB0_TX_CTL */
+ } },
+ { PINMUX_DRIVE_REG("DRV0CTRL8", 0xE6068080) {
+ { RCAR_GP_PIN(8, 7), 28, 3 }, /* SDA3 */
+ { RCAR_GP_PIN(8, 6), 24, 3 }, /* SCL3 */
+ { RCAR_GP_PIN(8, 5), 20, 3 }, /* SDA2 */
+ { RCAR_GP_PIN(8, 4), 16, 3 }, /* SCL2 */
+ { RCAR_GP_PIN(8, 3), 12, 3 }, /* SDA1 */
+ { RCAR_GP_PIN(8, 2), 8, 3 }, /* SCL1 */
+ { RCAR_GP_PIN(8, 1), 4, 3 }, /* SDA0 */
+ { RCAR_GP_PIN(8, 0), 0, 3 }, /* SCL0 */
+ } },
+ { PINMUX_DRIVE_REG("DRV1CTRL8", 0xE6068084) {
+ { RCAR_GP_PIN(8, 13), 20, 3 }, /* GP8_13 */
+ { RCAR_GP_PIN(8, 12), 16, 3 }, /* GP8_12 */
+ { RCAR_GP_PIN(8, 11), 12, 3 }, /* SDA5 */
+ { RCAR_GP_PIN(8, 10), 8, 3 }, /* SCL5 */
+ { RCAR_GP_PIN(8, 9), 4, 3 }, /* SDA4 */
+ { RCAR_GP_PIN(8, 8), 0, 3 }, /* SCL4 */
+ } },
+ { },
+};
+
+enum ioctrl_regs {
+ POC0,
+ POC1,
+ POC3,
+ POC4,
+ POC5,
+ POC6,
+ POC7,
+ POC8,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POC0] = { 0xE60500A0, },
+ [POC1] = { 0xE60508A0, },
+ [POC3] = { 0xE60588A0, },
+ [POC4] = { 0xE60600A0, },
+ [POC5] = { 0xE60608A0, },
+ [POC6] = { 0xE60610A0, },
+ [POC7] = { 0xE60618A0, },
+ [POC8] = { 0xE60680A0, },
+ { /* sentinel */ },
+};
+
+static int r8a779g0_pin_to_pocctrl(unsigned int pin, u32 *pocctrl)
+{
+ int bit = pin & 0x1f;
+
+ *pocctrl = pinmux_ioctrl_regs[POC0].reg;
+ if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 18))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC1].reg;
+ if (pin >= RCAR_GP_PIN(1, 0) && pin <= RCAR_GP_PIN(1, 22))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC3].reg;
+ if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 12))
+ return bit;
+
+ *pocctrl = pinmux_ioctrl_regs[POC8].reg;
+ if (pin >= RCAR_GP_PIN(8, 0) && pin <= RCAR_GP_PIN(8, 13))
+ return bit;
+
+ return -EINVAL;
+}
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xE60500C0, "PUD0", 0xE60500E0) {
+ [ 0] = RCAR_GP_PIN(0, 0), /* GP0_00 */
+ [ 1] = RCAR_GP_PIN(0, 1), /* GP0_01 */
+ [ 2] = RCAR_GP_PIN(0, 2), /* GP0_02 */
+ [ 3] = RCAR_GP_PIN(0, 3), /* IRQ3 */
+ [ 4] = RCAR_GP_PIN(0, 4), /* IRQ2 */
+ [ 5] = RCAR_GP_PIN(0, 5), /* IRQ1 */
+ [ 6] = RCAR_GP_PIN(0, 6), /* IRQ0 */
+ [ 7] = RCAR_GP_PIN(0, 7), /* MSIOF5_SS2 */
+ [ 8] = RCAR_GP_PIN(0, 8), /* MSIOF5_SS1 */
+ [ 9] = RCAR_GP_PIN(0, 9), /* MSIOF5_SYNC */
+ [10] = RCAR_GP_PIN(0, 10), /* MSIOF5_TXD */
+ [11] = RCAR_GP_PIN(0, 11), /* MSIOF5_SCK */
+ [12] = RCAR_GP_PIN(0, 12), /* MSIOF5_RXD */
+ [13] = RCAR_GP_PIN(0, 13), /* MSIOF2_SS2 */
+ [14] = RCAR_GP_PIN(0, 14), /* MSIOF2_SS1 */
+ [15] = RCAR_GP_PIN(0, 15), /* MSIOF2_SYNC */
+ [16] = RCAR_GP_PIN(0, 16), /* MSIOF2_TXD */
+ [17] = RCAR_GP_PIN(0, 17), /* MSIOF2_SCK */
+ [18] = RCAR_GP_PIN(0, 18), /* MSIOF2_RXD */
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xE60508C0, "PUD1", 0xE60508E0) {
+ [ 0] = RCAR_GP_PIN(1, 0), /* MSIOF1_SS2 */
+ [ 1] = RCAR_GP_PIN(1, 1), /* MSIOF1_SS1 */
+ [ 2] = RCAR_GP_PIN(1, 2), /* MSIOF1_SYNC */
+ [ 3] = RCAR_GP_PIN(1, 3), /* MSIOF1_SCK */
+ [ 4] = RCAR_GP_PIN(1, 4), /* MSIOF1_TXD */
+ [ 5] = RCAR_GP_PIN(1, 5), /* MSIOF1_RXD */
+ [ 6] = RCAR_GP_PIN(1, 6), /* MSIOF0_SS2 */
+ [ 7] = RCAR_GP_PIN(1, 7), /* MSIOF0_SS1 */
+ [ 8] = RCAR_GP_PIN(1, 8), /* MSIOF0_SYNC */
+ [ 9] = RCAR_GP_PIN(1, 9), /* MSIOF0_TXD */
+ [10] = RCAR_GP_PIN(1, 10), /* MSIOF0_SCK */
+ [11] = RCAR_GP_PIN(1, 11), /* MSIOF0_RXD */
+ [12] = RCAR_GP_PIN(1, 12), /* HTX0 */
+ [13] = RCAR_GP_PIN(1, 13), /* HCTS0_N */
+ [14] = RCAR_GP_PIN(1, 14), /* HRTS0_N */
+ [15] = RCAR_GP_PIN(1, 15), /* HSCK0 */
+ [16] = RCAR_GP_PIN(1, 16), /* HRX0 */
+ [17] = RCAR_GP_PIN(1, 17), /* SCIF_CLK */
+ [18] = RCAR_GP_PIN(1, 18), /* SSI_SCK */
+ [19] = RCAR_GP_PIN(1, 19), /* SSI_WS */
+ [20] = RCAR_GP_PIN(1, 20), /* SSI_SD */
+ [21] = RCAR_GP_PIN(1, 21), /* AUDIO_CLKOUT */
+ [22] = RCAR_GP_PIN(1, 22), /* AUDIO_CLKIN */
+ [23] = RCAR_GP_PIN(1, 23), /* GP1_23 */
+ [24] = RCAR_GP_PIN(1, 24), /* HRX3 */
+ [25] = RCAR_GP_PIN(1, 25), /* HSCK3 */
+ [26] = RCAR_GP_PIN(1, 26), /* HRTS3_N */
+ [27] = RCAR_GP_PIN(1, 27), /* HCTS3_N */
+ [28] = RCAR_GP_PIN(1, 28), /* HTX3 */
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xE60580C0, "PUD2", 0xE60580E0) {
+ [ 0] = RCAR_GP_PIN(2, 0), /* FXR_TXDA */
+ [ 1] = RCAR_GP_PIN(2, 1), /* FXR_TXENA_N */
+ [ 2] = RCAR_GP_PIN(2, 2), /* RXDA_EXTFXR */
+ [ 3] = RCAR_GP_PIN(2, 3), /* CLK_EXTFXR */
+ [ 4] = RCAR_GP_PIN(2, 4), /* RXDB_EXTFXR */
+ [ 5] = RCAR_GP_PIN(2, 5), /* FXR_TXENB_N */
+ [ 6] = RCAR_GP_PIN(2, 6), /* FXR_TXDB */
+ [ 7] = RCAR_GP_PIN(2, 7), /* TPU0TO1 */
+ [ 8] = RCAR_GP_PIN(2, 8), /* TPU0TO0 */
+ [ 9] = RCAR_GP_PIN(2, 9), /* CAN_CLK */
+ [10] = RCAR_GP_PIN(2, 10), /* CANFD0_TX */
+ [11] = RCAR_GP_PIN(2, 11), /* CANFD0_RX */
+ [12] = RCAR_GP_PIN(2, 12), /* CANFD2_TX */
+ [13] = RCAR_GP_PIN(2, 13), /* CANFD2_RX */
+ [14] = RCAR_GP_PIN(2, 14), /* CANFD3_TX */
+ [15] = RCAR_GP_PIN(2, 15), /* CANFD3_RX */
+ [16] = RCAR_GP_PIN(2, 16), /* CANFD4_TX */
+ [17] = RCAR_GP_PIN(2, 17), /* CANFD4_RX */
+ [18] = RCAR_GP_PIN(2, 18), /* CANFD7_TX */
+ [19] = RCAR_GP_PIN(2, 19), /* CANFD7_RX */
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xE60588C0, "PUD3", 0xE60588E0) {
+ [ 0] = RCAR_GP_PIN(3, 0), /* MMC_SD_D1 */
+ [ 1] = RCAR_GP_PIN(3, 1), /* MMC_SD_D0 */
+ [ 2] = RCAR_GP_PIN(3, 2), /* MMC_SD_D2 */
+ [ 3] = RCAR_GP_PIN(3, 3), /* MMC_SD_CLK */
+ [ 4] = RCAR_GP_PIN(3, 4), /* MMC_DS */
+ [ 5] = RCAR_GP_PIN(3, 5), /* MMC_SD_D3 */
+ [ 6] = RCAR_GP_PIN(3, 6), /* MMC_D5 */
+ [ 7] = RCAR_GP_PIN(3, 7), /* MMC_D4 */
+ [ 8] = RCAR_GP_PIN(3, 8), /* MMC_D7 */
+ [ 9] = RCAR_GP_PIN(3, 9), /* MMC_D6 */
+ [10] = RCAR_GP_PIN(3, 10), /* MMC_SD_CMD */
+ [11] = RCAR_GP_PIN(3, 11), /* SD_CD */
+ [12] = RCAR_GP_PIN(3, 12), /* SD_WP */
+ [13] = RCAR_GP_PIN(3, 13), /* IPC_CLKIN */
+ [14] = RCAR_GP_PIN(3, 14), /* IPC_CLKOUT */
+ [15] = RCAR_GP_PIN(3, 15), /* QSPI0_SSL */
+ [16] = RCAR_GP_PIN(3, 16), /* QSPI0_IO3 */
+ [17] = RCAR_GP_PIN(3, 17), /* QSPI0_IO2 */
+ [18] = RCAR_GP_PIN(3, 18), /* QSPI0_MISO_IO1 */
+ [19] = RCAR_GP_PIN(3, 19), /* QSPI0_MOSI_IO0 */
+ [20] = RCAR_GP_PIN(3, 20), /* QSPI0_SPCLK */
+ [21] = RCAR_GP_PIN(3, 21), /* QSPI1_MOSI_IO0 */
+ [22] = RCAR_GP_PIN(3, 22), /* QSPI1_SPCLK */
+ [23] = RCAR_GP_PIN(3, 23), /* QSPI1_MISO_IO1 */
+ [24] = RCAR_GP_PIN(3, 24), /* QSPI1_IO2 */
+ [25] = RCAR_GP_PIN(3, 25), /* QSPI1_SSL */
+ [26] = RCAR_GP_PIN(3, 26), /* QSPI1_IO3 */
+ [27] = RCAR_GP_PIN(3, 27), /* RPC_RESET_N */
+ [28] = RCAR_GP_PIN(3, 28), /* RPC_WP_N */
+ [29] = RCAR_GP_PIN(3, 29), /* RPC_INT_N */
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xE60600C0, "PUD4", 0xE60600E0) {
+ [ 0] = RCAR_GP_PIN(4, 0), /* TSN0_MDIO */
+ [ 1] = RCAR_GP_PIN(4, 1), /* TSN0_MDC */
+ [ 2] = RCAR_GP_PIN(4, 2), /* TSN0_AVTP_PPS1 */
+ [ 3] = RCAR_GP_PIN(4, 3), /* TSN0_PHY_INT */
+ [ 4] = RCAR_GP_PIN(4, 4), /* TSN0_LINK */
+ [ 5] = RCAR_GP_PIN(4, 5), /* TSN0_AVTP_MATCH */
+ [ 6] = RCAR_GP_PIN(4, 6), /* TSN0_AVTP_CAPTURE */
+ [ 7] = RCAR_GP_PIN(4, 7), /* TSN0_RX_CTL */
+ [ 8] = RCAR_GP_PIN(4, 8), /* TSN0_AVTP_PPS0 */
+ [ 9] = RCAR_GP_PIN(4, 9), /* TSN0_TX_CTL */
+ [10] = RCAR_GP_PIN(4, 10), /* TSN0_RD0 */
+ [11] = RCAR_GP_PIN(4, 11), /* TSN0_RXC */
+ [12] = RCAR_GP_PIN(4, 12), /* TSN0_TXC */
+ [13] = RCAR_GP_PIN(4, 13), /* TSN0_RD1 */
+ [14] = RCAR_GP_PIN(4, 14), /* TSN0_TD1 */
+ [15] = RCAR_GP_PIN(4, 15), /* TSN0_TD0 */
+ [16] = RCAR_GP_PIN(4, 16), /* TSN0_RD3 */
+ [17] = RCAR_GP_PIN(4, 17), /* TSN0_RD2 */
+ [18] = RCAR_GP_PIN(4, 18), /* TSN0_TD3 */
+ [19] = RCAR_GP_PIN(4, 19), /* TSN0_TD2 */
+ [20] = RCAR_GP_PIN(4, 20), /* TSN0_TXCREFCLK */
+ [21] = RCAR_GP_PIN(4, 21), /* PCIE0_CLKREQ_N */
+ [22] = RCAR_GP_PIN(4, 22), /* PCIE1_CLKREQ_N */
+ [23] = RCAR_GP_PIN(4, 23), /* AVS0 */
+ [24] = RCAR_GP_PIN(4, 24), /* AVS1 */
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xE60608C0, "PUD5", 0xE60608E0) {
+ [ 0] = RCAR_GP_PIN(5, 0), /* AVB2_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(5, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(5, 2), /* AVB2_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(5, 3), /* AVB2_LINK */
+ [ 4] = RCAR_GP_PIN(5, 4), /* AVB2_PHY_INT */
+ [ 5] = RCAR_GP_PIN(5, 5), /* AVB2_MAGIC */
+ [ 6] = RCAR_GP_PIN(5, 6), /* AVB2_MDC */
+ [ 7] = RCAR_GP_PIN(5, 7), /* AVB2_TXCREFCLK */
+ [ 8] = RCAR_GP_PIN(5, 8), /* AVB2_TD3 */
+ [ 9] = RCAR_GP_PIN(5, 9), /* AVB2_RD3 */
+ [10] = RCAR_GP_PIN(5, 10), /* AVB2_MDIO */
+ [11] = RCAR_GP_PIN(5, 11), /* AVB2_TD2 */
+ [12] = RCAR_GP_PIN(5, 12), /* AVB2_TD1 */
+ [13] = RCAR_GP_PIN(5, 13), /* AVB2_RD2 */
+ [14] = RCAR_GP_PIN(5, 14), /* AVB2_RD1 */
+ [15] = RCAR_GP_PIN(5, 15), /* AVB2_TD0 */
+ [16] = RCAR_GP_PIN(5, 16), /* AVB2_TXC */
+ [17] = RCAR_GP_PIN(5, 17), /* AVB2_RD0 */
+ [18] = RCAR_GP_PIN(5, 18), /* AVB2_RXC */
+ [19] = RCAR_GP_PIN(5, 19), /* AVB2_TX_CTL */
+ [20] = RCAR_GP_PIN(5, 20), /* AVB2_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xE60610C0, "PUD6", 0xE60610E0) {
+ [ 0] = RCAR_GP_PIN(6, 0), /* AVB1_MDIO */
+ [ 1] = RCAR_GP_PIN(6, 1), /* AVB1_MAGIC */
+ [ 2] = RCAR_GP_PIN(6, 2), /* AVB1_MDC */
+ [ 3] = RCAR_GP_PIN(6, 3), /* AVB1_PHY_INT */
+ [ 4] = RCAR_GP_PIN(6, 4), /* AVB1_LINK */
+ [ 5] = RCAR_GP_PIN(6, 5), /* AVB1_AVTP_MATCH */
+ [ 6] = RCAR_GP_PIN(6, 6), /* AVB1_TXC */
+ [ 7] = RCAR_GP_PIN(6, 7), /* AVB1_TX_CTL */
+ [ 8] = RCAR_GP_PIN(6, 8), /* AVB1_RXC */
+ [ 9] = RCAR_GP_PIN(6, 9), /* AVB1_RX_CTL */
+ [10] = RCAR_GP_PIN(6, 10), /* AVB1_AVTP_PPS */
+ [11] = RCAR_GP_PIN(6, 11), /* AVB1_AVTP_CAPTURE */
+ [12] = RCAR_GP_PIN(6, 12), /* AVB1_TD1 */
+ [13] = RCAR_GP_PIN(6, 13), /* AVB1_TD0 */
+ [14] = RCAR_GP_PIN(6, 14), /* AVB1_RD1*/
+ [15] = RCAR_GP_PIN(6, 15), /* AVB1_RD0 */
+ [16] = RCAR_GP_PIN(6, 16), /* AVB1_TD2 */
+ [17] = RCAR_GP_PIN(6, 17), /* AVB1_RD2 */
+ [18] = RCAR_GP_PIN(6, 18), /* AVB1_TD3 */
+ [19] = RCAR_GP_PIN(6, 19), /* AVB1_RD3 */
+ [20] = RCAR_GP_PIN(6, 20), /* AVB1_TXCREFCLK */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN7", 0xE60618C0, "PUD7", 0xE60618E0) {
+ [ 0] = RCAR_GP_PIN(7, 0), /* AVB0_AVTP_PPS */
+ [ 1] = RCAR_GP_PIN(7, 1), /* AVB0_AVTP_CAPTURE */
+ [ 2] = RCAR_GP_PIN(7, 2), /* AVB0_AVTP_MATCH */
+ [ 3] = RCAR_GP_PIN(7, 3), /* AVB0_TD3 */
+ [ 4] = RCAR_GP_PIN(7, 4), /* AVB0_LINK */
+ [ 5] = RCAR_GP_PIN(7, 5), /* AVB0_PHY_INT */
+ [ 6] = RCAR_GP_PIN(7, 6), /* AVB0_TD2 */
+ [ 7] = RCAR_GP_PIN(7, 7), /* AVB0_TD1 */
+ [ 8] = RCAR_GP_PIN(7, 8), /* AVB0_RD3 */
+ [ 9] = RCAR_GP_PIN(7, 9), /* AVB0_TXCREFCLK */
+ [10] = RCAR_GP_PIN(7, 10), /* AVB0_MAGIC */
+ [11] = RCAR_GP_PIN(7, 11), /* AVB0_TD0 */
+ [12] = RCAR_GP_PIN(7, 12), /* AVB0_RD2 */
+ [13] = RCAR_GP_PIN(7, 13), /* AVB0_MDC */
+ [14] = RCAR_GP_PIN(7, 14), /* AVB0_MDIO */
+ [15] = RCAR_GP_PIN(7, 15), /* AVB0_TXC */
+ [16] = RCAR_GP_PIN(7, 16), /* AVB0_TX_CTL */
+ [17] = RCAR_GP_PIN(7, 17), /* AVB0_RD1 */
+ [18] = RCAR_GP_PIN(7, 18), /* AVB0_RD0 */
+ [19] = RCAR_GP_PIN(7, 19), /* AVB0_RXC */
+ [20] = RCAR_GP_PIN(7, 20), /* AVB0_RX_CTL */
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUEN8", 0xE60680C0, "PUD8", 0xE60680E0) {
+ [ 0] = RCAR_GP_PIN(8, 0), /* SCL0 */
+ [ 1] = RCAR_GP_PIN(8, 1), /* SDA0 */
+ [ 2] = RCAR_GP_PIN(8, 2), /* SCL1 */
+ [ 3] = RCAR_GP_PIN(8, 3), /* SDA1 */
+ [ 4] = RCAR_GP_PIN(8, 4), /* SCL2 */
+ [ 5] = RCAR_GP_PIN(8, 5), /* SDA2 */
+ [ 6] = RCAR_GP_PIN(8, 6), /* SCL3 */
+ [ 7] = RCAR_GP_PIN(8, 7), /* SDA3 */
+ [ 8] = RCAR_GP_PIN(8, 8), /* SCL4 */
+ [ 9] = RCAR_GP_PIN(8, 9), /* SDA4 */
+ [10] = RCAR_GP_PIN(8, 10), /* SCL5 */
+ [11] = RCAR_GP_PIN(8, 11), /* SDA5 */
+ [12] = RCAR_GP_PIN(8, 12), /* GP8_12 */
+ [13] = RCAR_GP_PIN(8, 13), /* GP8_13 */
+ [14] = SH_PFC_PIN_NONE,
+ [15] = SH_PFC_PIN_NONE,
+ [16] = SH_PFC_PIN_NONE,
+ [17] = SH_PFC_PIN_NONE,
+ [18] = SH_PFC_PIN_NONE,
+ [19] = SH_PFC_PIN_NONE,
+ [20] = SH_PFC_PIN_NONE,
+ [21] = SH_PFC_PIN_NONE,
+ [22] = SH_PFC_PIN_NONE,
+ [23] = SH_PFC_PIN_NONE,
+ [24] = SH_PFC_PIN_NONE,
+ [25] = SH_PFC_PIN_NONE,
+ [26] = SH_PFC_PIN_NONE,
+ [27] = SH_PFC_PIN_NONE,
+ [28] = SH_PFC_PIN_NONE,
+ [29] = SH_PFC_PIN_NONE,
+ [30] = SH_PFC_PIN_NONE,
+ [31] = SH_PFC_PIN_NONE,
+ } },
+ { /* sentinel */ },
+};
+
+static const struct sh_pfc_soc_operations r8a779g0_pin_ops = {
+ .pin_to_pocctrl = r8a779g0_pin_to_pocctrl,
+ .get_bias = rcar_pinmux_get_bias,
+ .set_bias = rcar_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a779g0_pinmux_info = {
+ .name = "r8a779g0_pfc",
+ .ops = &r8a779g0_pin_ops,
+ .unlock_reg = 0x1ff, /* PMMRn mask */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index c47eed9d948f..a43824fd9505 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -527,6 +527,8 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
arg = rzg2l_read_pin_config(pctrl, IEN(port_offset), bit, IEN_MASK);
+ if (!arg)
+ return -EINVAL;
break;
case PIN_CONFIG_POWER_SOURCE: {
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
new file mode 100644
index 000000000000..e8c18198bebd
--- /dev/null
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2M Pin Control and GPIO driver core
+ *
+ * Based on:
+ * Renesas RZ/G2L Pin Control and GPIO driver core
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/pinctrl/rzv2m-pinctrl.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+
+#define DRV_NAME "pinctrl-rzv2m"
+
+/*
+ * Use 16 lower bits [15:0] for pin identifier
+ * Use 16 higher bits [31:16] for pin mux function
+ */
+#define MUX_PIN_ID_MASK GENMASK(15, 0)
+#define MUX_FUNC_MASK GENMASK(31, 16)
+#define MUX_FUNC(pinconf) FIELD_GET(MUX_FUNC_MASK, (pinconf))
+
+/* PIN capabilities */
+#define PIN_CFG_GRP_1_8V_2 1
+#define PIN_CFG_GRP_1_8V_3 2
+#define PIN_CFG_GRP_SWIO_1 3
+#define PIN_CFG_GRP_SWIO_2 4
+#define PIN_CFG_GRP_3_3V 5
+#define PIN_CFG_GRP_MASK GENMASK(2, 0)
+#define PIN_CFG_BIAS BIT(3)
+#define PIN_CFG_DRV BIT(4)
+#define PIN_CFG_SLEW BIT(5)
+
+#define RZV2M_MPXED_PIN_FUNCS (PIN_CFG_BIAS | \
+ PIN_CFG_DRV | \
+ PIN_CFG_SLEW)
+
+/*
+ * n indicates number of pins in the port, a is the register index
+ * and f is pin configuration capabilities supported.
+ */
+#define RZV2M_GPIO_PORT_PACK(n, a, f) (((n) << 24) | ((a) << 16) | (f))
+#define RZV2M_GPIO_PORT_GET_PINCNT(x) FIELD_GET(GENMASK(31, 24), (x))
+#define RZV2M_GPIO_PORT_GET_INDEX(x) FIELD_GET(GENMASK(23, 16), (x))
+#define RZV2M_GPIO_PORT_GET_CFGS(x) FIELD_GET(GENMASK(15, 0), (x))
+
+#define RZV2M_DEDICATED_PORT_IDX 22
+
+/*
+ * BIT(31) indicates dedicated pin, b is the register bits (b * 16)
+ * and f is the pin configuration capabilities supported.
+ */
+#define RZV2M_SINGLE_PIN BIT(31)
+#define RZV2M_SINGLE_PIN_PACK(b, f) (RZV2M_SINGLE_PIN | \
+ ((RZV2M_DEDICATED_PORT_IDX) << 24) | \
+ ((b) << 16) | (f))
+#define RZV2M_SINGLE_PIN_GET_PORT(x) FIELD_GET(GENMASK(30, 24), (x))
+#define RZV2M_SINGLE_PIN_GET_BIT(x) FIELD_GET(GENMASK(23, 16), (x))
+#define RZV2M_SINGLE_PIN_GET_CFGS(x) FIELD_GET(GENMASK(15, 0), (x))
+
+#define RZV2M_PIN_ID_TO_PORT(id) ((id) / RZV2M_PINS_PER_PORT)
+#define RZV2M_PIN_ID_TO_PIN(id) ((id) % RZV2M_PINS_PER_PORT)
+
+#define DO(n) (0x00 + (n) * 0x40)
+#define OE(n) (0x04 + (n) * 0x40)
+#define IE(n) (0x08 + (n) * 0x40)
+#define PFSEL(n) (0x10 + (n) * 0x40)
+#define DI(n) (0x20 + (n) * 0x40)
+#define PUPD(n) (0x24 + (n) * 0x40)
+#define DRV(n) ((n) < RZV2M_DEDICATED_PORT_IDX ? (0x28 + (n) * 0x40) \
+ : 0x590)
+#define SR(n) ((n) < RZV2M_DEDICATED_PORT_IDX ? (0x2c + (n) * 0x40) \
+ : 0x594)
+#define DI_MSK(n) (0x30 + (n) * 0x40)
+#define EN_MSK(n) (0x34 + (n) * 0x40)
+
+#define PFC_MASK 0x07
+#define PUPD_MASK 0x03
+#define DRV_MASK 0x03
+
+struct rzv2m_dedicated_configs {
+ const char *name;
+ u32 config;
+};
+
+struct rzv2m_pinctrl_data {
+ const char * const *port_pins;
+ const u32 *port_pin_configs;
+ const struct rzv2m_dedicated_configs *dedicated_pins;
+ unsigned int n_port_pins;
+ unsigned int n_dedicated_pins;
+};
+
+struct rzv2m_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc desc;
+ struct pinctrl_pin_desc *pins;
+
+ const struct rzv2m_pinctrl_data *data;
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+
+ struct gpio_chip gpio_chip;
+ struct pinctrl_gpio_range gpio_range;
+
+ spinlock_t lock;
+};
+
+static const unsigned int drv_1_8V_group2_uA[] = { 1800, 3800, 7800, 11000 };
+static const unsigned int drv_1_8V_group3_uA[] = { 1600, 3200, 6400, 9600 };
+static const unsigned int drv_SWIO_group2_3_3V_uA[] = { 9000, 11000, 13000, 18000 };
+static const unsigned int drv_3_3V_group_uA[] = { 2000, 4000, 8000, 12000 };
+
+/* Helper for registers that have a write enable bit in the upper word */
+static void rzv2m_writel_we(void __iomem *addr, u8 shift, u8 value)
+{
+ writel((BIT(16) | value) << shift, addr);
+}
+
+static void rzv2m_pinctrl_set_pfc_mode(struct rzv2m_pinctrl *pctrl,
+ u8 port, u8 pin, u8 func)
+{
+ void __iomem *addr;
+
+ /* Mask input/output */
+ rzv2m_writel_we(pctrl->base + DI_MSK(port), pin, 1);
+ rzv2m_writel_we(pctrl->base + EN_MSK(port), pin, 1);
+
+ /* Select the function and set the write enable bits */
+ addr = pctrl->base + PFSEL(port) + (pin / 4) * 4;
+ writel(((PFC_MASK << 16) | func) << ((pin % 4) * 4), addr);
+
+ /* Unmask input/output */
+ rzv2m_writel_we(pctrl->base + EN_MSK(port), pin, 0);
+ rzv2m_writel_we(pctrl->base + DI_MSK(port), pin, 0);
+};
+
+static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ unsigned int i, *psel_val;
+ struct group_desc *group;
+ int *pins;
+
+ func = pinmux_generic_get_function(pctldev, func_selector);
+ if (!func)
+ return -EINVAL;
+ group = pinctrl_generic_get_group(pctldev, group_selector);
+ if (!group)
+ return -EINVAL;
+
+ psel_val = func->data;
+ pins = group->pins;
+
+ for (i = 0; i < group->num_pins; i++) {
+ dev_dbg(pctrl->dev, "port:%u pin: %u PSEL:%u\n",
+ RZV2M_PIN_ID_TO_PORT(pins[i]), RZV2M_PIN_ID_TO_PIN(pins[i]),
+ psel_val[i]);
+ rzv2m_pinctrl_set_pfc_mode(pctrl, RZV2M_PIN_ID_TO_PORT(pins[i]),
+ RZV2M_PIN_ID_TO_PIN(pins[i]), psel_val[i]);
+ }
+
+ return 0;
+};
+
+static int rzv2m_map_add_config(struct pinctrl_map *map,
+ const char *group_or_pin,
+ enum pinctrl_map_type type,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ unsigned long *cfgs;
+
+ cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
+ GFP_KERNEL);
+ if (!cfgs)
+ return -ENOMEM;
+
+ map->type = type;
+ map->data.configs.group_or_pin = group_or_pin;
+ map->data.configs.configs = cfgs;
+ map->data.configs.num_configs = num_configs;
+
+ return 0;
+}
+
+static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps,
+ unsigned int *index)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_map *maps = *map;
+ unsigned int nmaps = *num_maps;
+ unsigned long *configs = NULL;
+ unsigned int *pins, *psel_val;
+ unsigned int num_pinmux = 0;
+ unsigned int idx = *index;
+ unsigned int num_pins, i;
+ unsigned int num_configs;
+ struct property *pinmux;
+ struct property *prop;
+ int ret, gsel, fsel;
+ const char **pin_fn;
+ const char *pin;
+
+ pinmux = of_find_property(np, "pinmux", NULL);
+ if (pinmux)
+ num_pinmux = pinmux->length / sizeof(u32);
+
+ ret = of_property_count_strings(np, "pins");
+ if (ret == -EINVAL) {
+ num_pins = 0;
+ } else if (ret < 0) {
+ dev_err(pctrl->dev, "Invalid pins list in DT\n");
+ return ret;
+ } else {
+ num_pins = ret;
+ }
+
+ if (!num_pinmux && !num_pins)
+ return 0;
+
+ if (num_pinmux && num_pins) {
+ dev_err(pctrl->dev,
+ "DT node must contain either a pinmux or pins and not both\n");
+ return -EINVAL;
+ }
+
+ ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &num_configs);
+ if (ret < 0)
+ return ret;
+
+ if (num_pins && !num_configs) {
+ dev_err(pctrl->dev, "DT node must contain a config\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (num_pinmux)
+ nmaps += 1;
+
+ if (num_pins)
+ nmaps += num_pins;
+
+ maps = krealloc_array(maps, nmaps, sizeof(*maps), GFP_KERNEL);
+ if (!maps) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ *map = maps;
+ *num_maps = nmaps;
+ if (num_pins) {
+ of_property_for_each_string(np, "pins", prop, pin) {
+ ret = rzv2m_map_add_config(&maps[idx], pin,
+ PIN_MAP_TYPE_CONFIGS_PIN,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+ ret = 0;
+ goto done;
+ }
+
+ pins = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*pins), GFP_KERNEL);
+ psel_val = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*psel_val),
+ GFP_KERNEL);
+ pin_fn = devm_kzalloc(pctrl->dev, sizeof(*pin_fn), GFP_KERNEL);
+ if (!pins || !psel_val || !pin_fn) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* Collect pin locations and mux settings from DT properties */
+ for (i = 0; i < num_pinmux; ++i) {
+ u32 value;
+
+ ret = of_property_read_u32_index(np, "pinmux", i, &value);
+ if (ret)
+ goto done;
+ pins[i] = value & MUX_PIN_ID_MASK;
+ psel_val[i] = MUX_FUNC(value);
+ }
+
+ /* Register a single pin group listing all the pins we read from DT */
+ gsel = pinctrl_generic_add_group(pctldev, np->name, pins, num_pinmux, NULL);
+ if (gsel < 0) {
+ ret = gsel;
+ goto done;
+ }
+
+ /*
+ * Register a single group function where the 'data' is an array PSEL
+ * register values read from DT.
+ */
+ pin_fn[0] = np->name;
+ fsel = pinmux_generic_add_function(pctldev, np->name, pin_fn, 1,
+ psel_val);
+ if (fsel < 0) {
+ ret = fsel;
+ goto remove_group;
+ }
+
+ maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+ maps[idx].data.mux.group = np->name;
+ maps[idx].data.mux.function = np->name;
+ idx++;
+
+ dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
+ ret = 0;
+ goto done;
+
+remove_group:
+ pinctrl_generic_remove_group(pctldev, gsel);
+done:
+ *index = idx;
+ kfree(configs);
+ return ret;
+}
+
+static void rzv2m_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned int num_maps)
+{
+ unsigned int i;
+
+ if (!map)
+ return;
+
+ for (i = 0; i < num_maps; ++i) {
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
+ map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(map[i].data.configs.configs);
+ }
+ kfree(map);
+}
+
+static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device_node *child;
+ unsigned int index;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+ index = 0;
+
+ for_each_child_of_node(np, child) {
+ ret = rzv2m_dt_subnode_to_map(pctldev, child, map,
+ num_maps, &index);
+ if (ret < 0) {
+ of_node_put(child);
+ goto done;
+ }
+ }
+
+ if (*num_maps == 0) {
+ ret = rzv2m_dt_subnode_to_map(pctldev, np, map,
+ num_maps, &index);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (*num_maps)
+ return 0;
+
+ dev_err(pctrl->dev, "no mapping found in node %pOF\n", np);
+ ret = -EINVAL;
+
+done:
+ if (ret < 0)
+ rzv2m_dt_free_map(pctldev, *map, *num_maps);
+
+ return ret;
+}
+
+static int rzv2m_validate_gpio_pin(struct rzv2m_pinctrl *pctrl,
+ u32 cfg, u32 port, u8 bit)
+{
+ u8 pincount = RZV2M_GPIO_PORT_GET_PINCNT(cfg);
+ u32 port_index = RZV2M_GPIO_PORT_GET_INDEX(cfg);
+ u32 data;
+
+ if (bit >= pincount || port >= pctrl->data->n_port_pins)
+ return -EINVAL;
+
+ data = pctrl->data->port_pin_configs[port];
+ if (port_index != RZV2M_GPIO_PORT_GET_INDEX(data))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void rzv2m_rmw_pin_config(struct rzv2m_pinctrl *pctrl, u32 offset,
+ u8 shift, u32 mask, u32 val)
+{
+ void __iomem *addr = pctrl->base + offset;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ reg = readl(addr) & ~(mask << shift);
+ writel(reg | (val << shift), addr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int rzv2m_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *config)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ unsigned int *pin_data = pin->drv_data;
+ unsigned int arg = 0;
+ u32 port;
+ u32 cfg;
+ u8 bit;
+ u32 val;
+
+ if (!pin_data)
+ return -EINVAL;
+
+ if (*pin_data & RZV2M_SINGLE_PIN) {
+ port = RZV2M_SINGLE_PIN_GET_PORT(*pin_data);
+ cfg = RZV2M_SINGLE_PIN_GET_CFGS(*pin_data);
+ bit = RZV2M_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZV2M_GPIO_PORT_GET_CFGS(*pin_data);
+ port = RZV2M_PIN_ID_TO_PORT(_pin);
+ bit = RZV2M_PIN_ID_TO_PIN(_pin);
+
+ if (rzv2m_validate_gpio_pin(pctrl, *pin_data, RZV2M_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN: {
+ enum pin_config_param bias;
+
+ if (!(cfg & PIN_CFG_BIAS))
+ return -EINVAL;
+
+ /* PUPD uses 2-bits per pin */
+ bit *= 2;
+
+ switch ((readl(pctrl->base + PUPD(port)) >> bit) & PUPD_MASK) {
+ case 0:
+ bias = PIN_CONFIG_BIAS_PULL_DOWN;
+ break;
+ case 2:
+ bias = PIN_CONFIG_BIAS_PULL_UP;
+ break;
+ default:
+ bias = PIN_CONFIG_BIAS_DISABLE;
+ }
+
+ if (bias != param)
+ return -EINVAL;
+ break;
+ }
+
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ if (!(cfg & PIN_CFG_DRV))
+ return -EINVAL;
+
+ /* DRV uses 2-bits per pin */
+ bit *= 2;
+
+ val = (readl(pctrl->base + DRV(port)) >> bit) & DRV_MASK;
+
+ switch (cfg & PIN_CFG_GRP_MASK) {
+ case PIN_CFG_GRP_1_8V_2:
+ arg = drv_1_8V_group2_uA[val];
+ break;
+ case PIN_CFG_GRP_1_8V_3:
+ arg = drv_1_8V_group3_uA[val];
+ break;
+ case PIN_CFG_GRP_SWIO_2:
+ arg = drv_SWIO_group2_3_3V_uA[val];
+ break;
+ case PIN_CFG_GRP_SWIO_1:
+ case PIN_CFG_GRP_3_3V:
+ arg = drv_3_3V_group_uA[val];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ if (!(cfg & PIN_CFG_SLEW))
+ return -EINVAL;
+
+ arg = readl(pctrl->base + SR(port)) & BIT(bit);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+};
+
+static int rzv2m_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *_configs,
+ unsigned int num_configs)
+{
+ struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ unsigned int *pin_data = pin->drv_data;
+ enum pin_config_param param;
+ u32 port;
+ unsigned int i;
+ u32 cfg;
+ u8 bit;
+ u32 val;
+
+ if (!pin_data)
+ return -EINVAL;
+
+ if (*pin_data & RZV2M_SINGLE_PIN) {
+ port = RZV2M_SINGLE_PIN_GET_PORT(*pin_data);
+ cfg = RZV2M_SINGLE_PIN_GET_CFGS(*pin_data);
+ bit = RZV2M_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZV2M_GPIO_PORT_GET_CFGS(*pin_data);
+ port = RZV2M_PIN_ID_TO_PORT(_pin);
+ bit = RZV2M_PIN_ID_TO_PIN(_pin);
+
+ if (rzv2m_validate_gpio_pin(pctrl, *pin_data, RZV2M_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(_configs[i]);
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!(cfg & PIN_CFG_BIAS))
+ return -EINVAL;
+
+ /* PUPD uses 2-bits per pin */
+ bit *= 2;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val = 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = 2;
+ break;
+ default:
+ val = 1;
+ }
+
+ rzv2m_rmw_pin_config(pctrl, PUPD(port), bit, PUPD_MASK, val);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH_UA: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+ const unsigned int *drv_strengths;
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_DRV))
+ return -EINVAL;
+
+ switch (cfg & PIN_CFG_GRP_MASK) {
+ case PIN_CFG_GRP_1_8V_2:
+ drv_strengths = drv_1_8V_group2_uA;
+ break;
+ case PIN_CFG_GRP_1_8V_3:
+ drv_strengths = drv_1_8V_group3_uA;
+ break;
+ case PIN_CFG_GRP_SWIO_2:
+ drv_strengths = drv_SWIO_group2_3_3V_uA;
+ break;
+ case PIN_CFG_GRP_SWIO_1:
+ case PIN_CFG_GRP_3_3V:
+ drv_strengths = drv_3_3V_group_uA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (index = 0; index < 4; index++) {
+ if (arg == drv_strengths[index])
+ break;
+ }
+ if (index >= 4)
+ return -EINVAL;
+
+ /* DRV uses 2-bits per pin */
+ bit *= 2;
+
+ rzv2m_rmw_pin_config(pctrl, DRV(port), bit, DRV_MASK, index);
+ break;
+ }
+
+ case PIN_CONFIG_SLEW_RATE: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+
+ if (!(cfg & PIN_CFG_SLEW))
+ return -EINVAL;
+
+ rzv2m_writel_we(pctrl->base + SR(port), bit, !arg);
+ break;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int rzv2m_pinctrl_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = rzv2m_pinctrl_pinconf_set(pctldev, pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+};
+
+static int rzv2m_pinctrl_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ const unsigned int *pins;
+ unsigned int i, npins, prev_config = 0;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = rzv2m_pinctrl_pinconf_get(pctldev, pins[i], config);
+ if (ret)
+ return ret;
+
+ /* Check config matches previous pins */
+ if (i && prev_config != *config)
+ return -EOPNOTSUPP;
+
+ prev_config = *config;
+ }
+
+ return 0;
+};
+
+static const struct pinctrl_ops rzv2m_pinctrl_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = rzv2m_dt_node_to_map,
+ .dt_free_map = rzv2m_dt_free_map,
+};
+
+static const struct pinmux_ops rzv2m_pinctrl_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = rzv2m_pinctrl_set_mux,
+ .strict = true,
+};
+
+static const struct pinconf_ops rzv2m_pinctrl_confops = {
+ .is_generic = true,
+ .pin_config_get = rzv2m_pinctrl_pinconf_get,
+ .pin_config_set = rzv2m_pinctrl_pinconf_set,
+ .pin_config_group_set = rzv2m_pinctrl_pinconf_group_set,
+ .pin_config_group_get = rzv2m_pinctrl_pinconf_group_get,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static int rzv2m_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+ int ret;
+
+ ret = pinctrl_gpio_request(chip->base + offset);
+ if (ret)
+ return ret;
+
+ rzv2m_pinctrl_set_pfc_mode(pctrl, port, bit, 0);
+
+ return 0;
+}
+
+static void rzv2m_gpio_set_direction(struct rzv2m_pinctrl *pctrl, u32 port,
+ u8 bit, bool output)
+{
+ rzv2m_writel_we(pctrl->base + OE(port), bit, output);
+ rzv2m_writel_we(pctrl->base + IE(port), bit, !output);
+}
+
+static int rzv2m_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ if (!(readl(pctrl->base + IE(port)) & BIT(bit)))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int rzv2m_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_gpio_set_direction(pctrl, port, bit, false);
+
+ return 0;
+}
+
+static void rzv2m_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_writel_we(pctrl->base + DO(port), bit, !!value);
+}
+
+static int rzv2m_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+
+ rzv2m_gpio_set(chip, offset, value);
+ rzv2m_gpio_set_direction(pctrl, port, bit, true);
+
+ return 0;
+}
+
+static int rzv2m_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
+ u32 port = RZV2M_PIN_ID_TO_PORT(offset);
+ u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
+ int direction = rzv2m_gpio_get_direction(chip, offset);
+
+ if (direction == GPIO_LINE_DIRECTION_IN)
+ return !!(readl(pctrl->base + DI(port)) & BIT(bit));
+ else
+ return !!(readl(pctrl->base + DO(port)) & BIT(bit));
+}
+
+static void rzv2m_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pinctrl_gpio_free(chip->base + offset);
+
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
+ * drive the GPIO pin as an output.
+ */
+ rzv2m_gpio_direction_input(chip, offset);
+}
+
+static const char * const rzv2m_gpio_names[] = {
+ "P0_0", "P0_1", "P0_2", "P0_3", "P0_4", "P0_5", "P0_6", "P0_7",
+ "P0_8", "P0_9", "P0_10", "P0_11", "P0_12", "P0_13", "P0_14", "P0_15",
+ "P1_0", "P1_1", "P1_2", "P1_3", "P1_4", "P1_5", "P1_6", "P1_7",
+ "P1_8", "P1_9", "P1_10", "P1_11", "P1_12", "P1_13", "P1_14", "P1_15",
+ "P2_0", "P2_1", "P2_2", "P2_3", "P2_4", "P2_5", "P2_6", "P2_7",
+ "P2_8", "P2_9", "P2_10", "P2_11", "P2_12", "P2_13", "P2_14", "P2_15",
+ "P3_0", "P3_1", "P3_2", "P3_3", "P3_4", "P3_5", "P3_6", "P3_7",
+ "P3_8", "P3_9", "P3_10", "P3_11", "P3_12", "P3_13", "P3_14", "P3_15",
+ "P4_0", "P4_1", "P4_2", "P4_3", "P4_4", "P4_5", "P4_6", "P4_7",
+ "P4_8", "P4_9", "P4_10", "P4_11", "P4_12", "P4_13", "P4_14", "P4_15",
+ "P5_0", "P5_1", "P5_2", "P5_3", "P5_4", "P5_5", "P5_6", "P5_7",
+ "P5_8", "P5_9", "P5_10", "P5_11", "P5_12", "P5_13", "P5_14", "P5_15",
+ "P6_0", "P6_1", "P6_2", "P6_3", "P6_4", "P6_5", "P6_6", "P6_7",
+ "P6_8", "P6_9", "P6_10", "P6_11", "P6_12", "P6_13", "P6_14", "P6_15",
+ "P7_0", "P7_1", "P7_2", "P7_3", "P7_4", "P7_5", "P7_6", "P7_7",
+ "P7_8", "P7_9", "P7_10", "P7_11", "P7_12", "P7_13", "P7_14", "P7_15",
+ "P8_0", "P8_1", "P8_2", "P8_3", "P8_4", "P8_5", "P8_6", "P8_7",
+ "P8_8", "P8_9", "P8_10", "P8_11", "P8_12", "P8_13", "P8_14", "P8_15",
+ "P9_0", "P9_1", "P9_2", "P9_3", "P9_4", "P9_5", "P9_6", "P9_7",
+ "P9_8", "P9_9", "P9_10", "P9_11", "P9_12", "P9_13", "P9_14", "P9_15",
+ "P10_0", "P10_1", "P10_2", "P10_3", "P10_4", "P10_5", "P10_6", "P10_7",
+ "P10_8", "P10_9", "P10_10", "P10_11", "P10_12", "P10_13", "P10_14", "P10_15",
+ "P11_0", "P11_1", "P11_2", "P11_3", "P11_4", "P11_5", "P11_6", "P11_7",
+ "P11_8", "P11_9", "P11_10", "P11_11", "P11_12", "P11_13", "P11_14", "P11_15",
+ "P12_0", "P12_1", "P12_2", "P12_3", "P12_4", "P12_5", "P12_6", "P12_7",
+ "P12_8", "P12_9", "P12_10", "P12_11", "P12_12", "P12_13", "P12_14", "P12_15",
+ "P13_0", "P13_1", "P13_2", "P13_3", "P13_4", "P13_5", "P13_6", "P13_7",
+ "P13_8", "P13_9", "P13_10", "P13_11", "P13_12", "P13_13", "P13_14", "P13_15",
+ "P14_0", "P14_1", "P14_2", "P14_3", "P14_4", "P14_5", "P14_6", "P14_7",
+ "P14_8", "P14_9", "P14_10", "P14_11", "P14_12", "P14_13", "P14_14", "P14_15",
+ "P15_0", "P15_1", "P15_2", "P15_3", "P15_4", "P15_5", "P15_6", "P15_7",
+ "P15_8", "P15_9", "P15_10", "P15_11", "P15_12", "P15_13", "P15_14", "P15_15",
+ "P16_0", "P16_1", "P16_2", "P16_3", "P16_4", "P16_5", "P16_6", "P16_7",
+ "P16_8", "P16_9", "P16_10", "P16_11", "P16_12", "P16_13", "P16_14", "P16_15",
+ "P17_0", "P17_1", "P17_2", "P17_3", "P17_4", "P17_5", "P17_6", "P17_7",
+ "P17_8", "P17_9", "P17_10", "P17_11", "P17_12", "P17_13", "P17_14", "P17_15",
+ "P18_0", "P18_1", "P18_2", "P18_3", "P18_4", "P18_5", "P18_6", "P18_7",
+ "P18_8", "P18_9", "P18_10", "P18_11", "P18_12", "P18_13", "P18_14", "P18_15",
+ "P19_0", "P19_1", "P19_2", "P19_3", "P19_4", "P19_5", "P19_6", "P19_7",
+ "P19_8", "P19_9", "P19_10", "P19_11", "P19_12", "P19_13", "P19_14", "P19_15",
+ "P20_0", "P20_1", "P20_2", "P20_3", "P20_4", "P20_5", "P20_6", "P20_7",
+ "P20_8", "P20_9", "P20_10", "P20_11", "P20_12", "P20_13", "P20_14", "P20_15",
+ "P21_0", "P21_1", "P21_2", "P21_3", "P21_4", "P21_5", "P21_6", "P21_7",
+ "P21_8", "P21_9", "P21_10", "P21_11", "P21_12", "P21_13", "P21_14", "P21_15",
+};
+
+static const u32 rzv2m_gpio_configs[] = {
+ RZV2M_GPIO_PORT_PACK(14, 0, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 1, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 2, PIN_CFG_GRP_1_8V_3 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 3, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 4, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(4, 5, PIN_CFG_GRP_1_8V_3 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(12, 6, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(6, 7, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 8, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 9, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(9, 10, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(9, 11, PIN_CFG_GRP_SWIO_1 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(4, 12, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(12, 13, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(8, 14, PIN_CFG_GRP_3_3V | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(16, 15, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(14, 16, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(1, 17, PIN_CFG_GRP_SWIO_2 | RZV2M_MPXED_PIN_FUNCS),
+ RZV2M_GPIO_PORT_PACK(0, 18, 0),
+ RZV2M_GPIO_PORT_PACK(0, 19, 0),
+ RZV2M_GPIO_PORT_PACK(3, 20, PIN_CFG_GRP_1_8V_2 | PIN_CFG_DRV),
+ RZV2M_GPIO_PORT_PACK(1, 21, PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW),
+};
+
+static const struct rzv2m_dedicated_configs rzv2m_dedicated_pins[] = {
+ { "NAWPN", RZV2M_SINGLE_PIN_PACK(0,
+ (PIN_CFG_GRP_SWIO_2 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "IM0CLK", RZV2M_SINGLE_PIN_PACK(1,
+ (PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "IM1CLK", RZV2M_SINGLE_PIN_PACK(2,
+ (PIN_CFG_GRP_SWIO_1 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "DETDO", RZV2M_SINGLE_PIN_PACK(5,
+ (PIN_CFG_GRP_1_8V_3 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "DETMS", RZV2M_SINGLE_PIN_PACK(6,
+ (PIN_CFG_GRP_1_8V_3 | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "PCRSTOUTB", RZV2M_SINGLE_PIN_PACK(12,
+ (PIN_CFG_GRP_3_3V | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+ { "USPWEN", RZV2M_SINGLE_PIN_PACK(14,
+ (PIN_CFG_GRP_3_3V | PIN_CFG_DRV | PIN_CFG_SLEW)) },
+};
+
+static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl)
+{
+ struct device_node *np = pctrl->dev->of_node;
+ struct gpio_chip *chip = &pctrl->gpio_chip;
+ const char *name = dev_name(pctrl->dev);
+ struct of_phandle_args of_args;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args);
+ if (ret) {
+ dev_err(pctrl->dev, "Unable to parse gpio-ranges\n");
+ return ret;
+ }
+
+ if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
+ of_args.args[2] != pctrl->data->n_port_pins) {
+ dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n");
+ return -EINVAL;
+ }
+
+ chip->names = pctrl->data->port_pins;
+ chip->request = rzv2m_gpio_request;
+ chip->free = rzv2m_gpio_free;
+ chip->get_direction = rzv2m_gpio_get_direction;
+ chip->direction_input = rzv2m_gpio_direction_input;
+ chip->direction_output = rzv2m_gpio_direction_output;
+ chip->get = rzv2m_gpio_get;
+ chip->set = rzv2m_gpio_set;
+ chip->label = name;
+ chip->parent = pctrl->dev;
+ chip->owner = THIS_MODULE;
+ chip->base = -1;
+ chip->ngpio = of_args.args[2];
+
+ pctrl->gpio_range.id = 0;
+ pctrl->gpio_range.pin_base = 0;
+ pctrl->gpio_range.base = 0;
+ pctrl->gpio_range.npins = chip->ngpio;
+ pctrl->gpio_range.name = chip->label;
+ pctrl->gpio_range.gc = chip;
+ ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO controller\n");
+ return ret;
+ }
+
+ dev_dbg(pctrl->dev, "Registered gpio controller\n");
+
+ return 0;
+}
+
+static int rzv2m_pinctrl_register(struct rzv2m_pinctrl *pctrl)
+{
+ struct pinctrl_pin_desc *pins;
+ unsigned int i, j;
+ u32 *pin_data;
+ int ret;
+
+ pctrl->desc.name = DRV_NAME;
+ pctrl->desc.npins = pctrl->data->n_port_pins + pctrl->data->n_dedicated_pins;
+ pctrl->desc.pctlops = &rzv2m_pinctrl_pctlops;
+ pctrl->desc.pmxops = &rzv2m_pinctrl_pmxops;
+ pctrl->desc.confops = &rzv2m_pinctrl_confops;
+ pctrl->desc.owner = THIS_MODULE;
+
+ pins = devm_kcalloc(pctrl->dev, pctrl->desc.npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ pin_data = devm_kcalloc(pctrl->dev, pctrl->desc.npins,
+ sizeof(*pin_data), GFP_KERNEL);
+ if (!pin_data)
+ return -ENOMEM;
+
+ pctrl->pins = pins;
+ pctrl->desc.pins = pins;
+
+ for (i = 0, j = 0; i < pctrl->data->n_port_pins; i++) {
+ pins[i].number = i;
+ pins[i].name = pctrl->data->port_pins[i];
+ if (i && !(i % RZV2M_PINS_PER_PORT))
+ j++;
+ pin_data[i] = pctrl->data->port_pin_configs[j];
+ pins[i].drv_data = &pin_data[i];
+ }
+
+ for (i = 0; i < pctrl->data->n_dedicated_pins; i++) {
+ unsigned int index = pctrl->data->n_port_pins + i;
+
+ pins[index].number = index;
+ pins[index].name = pctrl->data->dedicated_pins[i].name;
+ pin_data[index] = pctrl->data->dedicated_pins[i].config;
+ pins[index].drv_data = &pin_data[index];
+ }
+
+ ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl,
+ &pctrl->pctl);
+ if (ret) {
+ dev_err(pctrl->dev, "pinctrl registration failed\n");
+ return ret;
+ }
+
+ ret = pinctrl_enable(pctrl->pctl);
+ if (ret) {
+ dev_err(pctrl->dev, "pinctrl enable failed\n");
+ return ret;
+ }
+
+ ret = rzv2m_gpio_register(pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add GPIO chip: %i\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rzv2m_pinctrl_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int rzv2m_pinctrl_probe(struct platform_device *pdev)
+{
+ struct rzv2m_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = &pdev->dev;
+
+ pctrl->data = of_device_get_match_data(&pdev->dev);
+ if (!pctrl->data)
+ return -EINVAL;
+
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pctrl->base))
+ return PTR_ERR(pctrl->base);
+
+ pctrl->clk = devm_clk_get(pctrl->dev, NULL);
+ if (IS_ERR(pctrl->clk)) {
+ ret = PTR_ERR(pctrl->clk);
+ dev_err(pctrl->dev, "failed to get GPIO clk : %i\n", ret);
+ return ret;
+ }
+
+ spin_lock_init(&pctrl->lock);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ ret = clk_prepare_enable(pctrl->clk);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to enable GPIO clk: %i\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, rzv2m_pinctrl_clk_disable,
+ pctrl->clk);
+ if (ret) {
+ dev_err(pctrl->dev,
+ "failed to register GPIO clk disable action, %i\n",
+ ret);
+ return ret;
+ }
+
+ ret = rzv2m_pinctrl_register(pctrl);
+ if (ret)
+ return ret;
+
+ dev_info(pctrl->dev, "%s support registered\n", DRV_NAME);
+ return 0;
+}
+
+static struct rzv2m_pinctrl_data r9a09g011_data = {
+ .port_pins = rzv2m_gpio_names,
+ .port_pin_configs = rzv2m_gpio_configs,
+ .dedicated_pins = rzv2m_dedicated_pins,
+ .n_port_pins = ARRAY_SIZE(rzv2m_gpio_configs) * RZV2M_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzv2m_dedicated_pins),
+};
+
+static const struct of_device_id rzv2m_pinctrl_of_table[] = {
+ {
+ .compatible = "renesas,r9a09g011-pinctrl",
+ .data = &r9a09g011_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver rzv2m_pinctrl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(rzv2m_pinctrl_of_table),
+ },
+ .probe = rzv2m_pinctrl_probe,
+};
+
+static int __init rzv2m_pinctrl_init(void)
+{
+ return platform_driver_register(&rzv2m_pinctrl_driver);
+}
+core_initcall(rzv2m_pinctrl_init);
+
+MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
+MODULE_DESCRIPTION("Pin and gpio controller driver for RZ/V2M");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index 12bc279f5733..0fcb29ab0c84 100644
--- a/drivers/pinctrl/renesas/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
@@ -325,6 +325,7 @@ extern const struct sh_pfc_soc_info r8a77990_pinmux_info;
extern const struct sh_pfc_soc_info r8a77995_pinmux_info;
extern const struct sh_pfc_soc_info r8a779a0_pinmux_info;
extern const struct sh_pfc_soc_info r8a779f0_pinmux_info;
+extern const struct sh_pfc_soc_info r8a779g0_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
extern const struct sh_pfc_soc_info sh7269_pinmux_info;
@@ -492,9 +493,13 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
PORT_GP_CFG_1(bank, 11, fn, sfx, cfg)
#define PORT_GP_12(bank, fn, sfx) PORT_GP_CFG_12(bank, fn, sfx, 0)
-#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \
+#define PORT_GP_CFG_13(bank, fn, sfx, cfg) \
PORT_GP_CFG_12(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 12, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 12, fn, sfx, cfg)
+#define PORT_GP_13(bank, fn, sfx) PORT_GP_CFG_13(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_14(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_13(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 13, fn, sfx, cfg)
#define PORT_GP_14(bank, fn, sfx) PORT_GP_CFG_14(bank, fn, sfx, 0)
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 6d7ca1758292..a8212fc126bf 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -27,8 +27,6 @@
#include <linux/soc/samsung/exynos-pmu.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
-#include <dt-bindings/pinctrl/samsung.h>
-
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
@@ -173,7 +171,7 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
con = readl(bank->pctl_base + reg_con);
con &= ~(mask << shift);
- con |= EXYNOS_PIN_FUNC_EINT << shift;
+ con |= EXYNOS_PIN_CON_FUNC_EINT << shift;
writel(con, bank->pctl_base + reg_con);
raw_spin_unlock_irqrestore(&bank->slock, flags);
@@ -196,7 +194,7 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
con = readl(bank->pctl_base + reg_con);
con &= ~(mask << shift);
- con |= EXYNOS_PIN_FUNC_INPUT << shift;
+ con |= PIN_CON_FUNC_INPUT << shift;
writel(con, bank->pctl_base + reg_con);
raw_spin_unlock_irqrestore(&bank->slock, flags);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index bfad1ced8017..7bd6d82c9f36 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -16,6 +16,9 @@
#ifndef __PINCTRL_SAMSUNG_EXYNOS_H
#define __PINCTRL_SAMSUNG_EXYNOS_H
+/* Values for the pin CON register */
+#define EXYNOS_PIN_CON_FUNC_EINT 0xf
+
/* External GPIO and wakeup interrupt related definitions */
#define EXYNOS_GPIO_ECON_OFFSET 0x700
#define EXYNOS_GPIO_EFLTCON_OFFSET 0x800
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 26d309d2516d..4837bceb767b 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -26,8 +26,6 @@
#include <linux/of_device.h>
#include <linux/spinlock.h>
-#include <dt-bindings/pinctrl/samsung.h>
-
#include "../core.h"
#include "pinctrl-samsung.h"
@@ -614,7 +612,7 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
data = readl(reg);
data &= ~(mask << shift);
if (!input)
- data |= EXYNOS_PIN_FUNC_OUTPUT << shift;
+ data |= PIN_CON_FUNC_OUTPUT << shift;
writel(data, reg);
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index fc6f5199c548..9af93e3d8d9f 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -53,6 +53,14 @@ enum pincfg_type {
#define PINCFG_UNPACK_TYPE(cfg) ((cfg) & PINCFG_TYPE_MASK)
#define PINCFG_UNPACK_VALUE(cfg) (((cfg) & PINCFG_VALUE_MASK) >> \
PINCFG_VALUE_SHIFT)
+/*
+ * Values for the pin CON register, choosing pin function.
+ * The basic set (input and output) are same between: S3C24xx, S3C64xx, S5PV210,
+ * Exynos ARMv7, Exynos ARMv8, Tesla FSD.
+ */
+#define PIN_CON_FUNC_INPUT 0x0
+#define PIN_CON_FUNC_OUTPUT 0x1
+
/**
* enum eint_type - possible external interrupt types.
* @EINT_TYPE_NONE: bank does not support external interrupts
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index 33751a6a0757..a78fdbbdfc0c 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -29,7 +29,6 @@ config PINCTRL_SUN6I_A31
config PINCTRL_SUN6I_A31_R
bool "Support for the Allwinner A31 R-PIO"
default MACH_SUN6I
- depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN8I_A23
@@ -55,7 +54,6 @@ config PINCTRL_SUN8I_A83T_R
config PINCTRL_SUN8I_A23_R
bool "Support for the Allwinner A23 and A33 R-PIO"
default MACH_SUN8I
- depends on RESET_CONTROLLER
select PINCTRL_SUNXI
config PINCTRL_SUN8I_H3
@@ -81,7 +79,11 @@ config PINCTRL_SUN9I_A80
config PINCTRL_SUN9I_A80_R
bool "Support for the Allwinner A80 R-PIO"
default MACH_SUN9I
- depends on RESET_CONTROLLER
+ select PINCTRL_SUNXI
+
+config PINCTRL_SUN20I_D1
+ bool "Support for the Allwinner D1 PIO"
+ default MACH_SUN8I || (RISCV && ARCH_SUNXI)
select PINCTRL_SUNXI
config PINCTRL_SUN50I_A64
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index d3440c42b9d6..2ff5a55927ad 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PINCTRL_SUN8I_A83T_R) += pinctrl-sun8i-a83t-r.o
obj-$(CONFIG_PINCTRL_SUN8I_H3) += pinctrl-sun8i-h3.o
obj-$(CONFIG_PINCTRL_SUN8I_H3_R) += pinctrl-sun8i-h3-r.o
obj-$(CONFIG_PINCTRL_SUN8I_V3S) += pinctrl-sun8i-v3s.o
+obj-$(CONFIG_PINCTRL_SUN20I_D1) += pinctrl-sun20i-d1.o
obj-$(CONFIG_PINCTRL_SUN50I_H5) += pinctrl-sun50i-h5.o
obj-$(CONFIG_PINCTRL_SUN50I_H6) += pinctrl-sun50i-h6.o
obj-$(CONFIG_PINCTRL_SUN50I_H6_R) += pinctrl-sun50i-h6-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
new file mode 100644
index 000000000000..40858b881298
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun20i-d1.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner D1 SoC pinctrl driver.
+ *
+ * Copyright (c) 2020 wuyan@allwinnertech.com
+ * Copyright (c) 2021-2022 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin d1_pins[] = {
+ /* PB */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm3"),
+ SUNXI_FUNCTION(0x3, "ir"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* WP */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x8, "spdif"), /* OUT */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm4"),
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT3 */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN3 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x7, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x8, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT2 */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN2 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x7, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x7, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
+ SUNXI_FUNCTION(0x3, "i2s2_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x5, "i2s2_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x7, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x5, "pwm0"),
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x7, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* LRCK */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x7, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x8, "bist0"), /* BIST_RESULT0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
+ SUNXI_FUNCTION(0x3, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION(0x6, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x7, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x8, "bist1"), /* BIST_RESULT1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x3, "pwm5"),
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* HOLD */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x3, "pwm6"),
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x3, "pwm7"),
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION(0x3, "pwm2"),
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x7, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* CLK */
+ SUNXI_FUNCTION(0x3, "pwm0"),
+ SUNXI_FUNCTION(0x4, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x5, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x6, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x7, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 0, 12)),
+ /* PC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "ledc"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D2 */
+ SUNXI_FUNCTION(0x4, "boot"), /* SEL0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* MISO */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D1 */
+ SUNXI_FUNCTION(0x4, "boot"), /* SEL1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* WP */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D0 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x6, "pll"), /* DBG-CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* HOLD */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x5, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x6, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 1, 7)),
+ /* PD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V0P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D0P */
+ SUNXI_FUNCTION(0x5, "i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V0N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D0N */
+ SUNXI_FUNCTION(0x5, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V1P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D1P */
+ SUNXI_FUNCTION(0x5, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V1N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D1N */
+ SUNXI_FUNCTION(0x5, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V2P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* CKP */
+ SUNXI_FUNCTION(0x5, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V2N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* CKN */
+ SUNXI_FUNCTION(0x5, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* CKP */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D2P */
+ SUNXI_FUNCTION(0x5, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* CKN */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D2N */
+ SUNXI_FUNCTION(0x5, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V3P */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D3P */
+ SUNXI_FUNCTION(0x5, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* V3N */
+ SUNXI_FUNCTION(0x4, "dsi"), /* D3N */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V0P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V0N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V1P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x5, "i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V1N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V2P */
+ SUNXI_FUNCTION(0x4, "spi1"), /* HOLD */
+ SUNXI_FUNCTION(0x5, "uart3"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V2N */
+ SUNXI_FUNCTION(0x4, "spi1"), /* WP */
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKP */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x5, "pwm0"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* CKN */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 17)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V3P */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x5, "pwm2"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 18)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "lvds1"), /* V3N */
+ SUNXI_FUNCTION(0x4, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION(0x5, "pwm3"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 19)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "dmic"), /* CLK */
+ SUNXI_FUNCTION(0x5, "pwm4"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 20)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x5, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 21)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x3, "ir"), /* RX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x5, "pwm7"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 2, 22)),
+ /* PE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x5, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXCTL/CRS_DV */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x4, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x5, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* PCLK */
+ SUNXI_FUNCTION(0x3, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* MS */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* MS */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x5, "ledc"),
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* DI */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* DI */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x5, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* DO */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* DO */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXCTL/TXEN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x5, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x6, "d_jtag"), /* CK */
+ SUNXI_FUNCTION(0x7, "r_jtag"), /* CK */
+ SUNXI_FUNCTION(0x8, "emac"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x4, "pwm2"),
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* MS */
+ SUNXI_FUNCTION(0x8, "emac"), /* MDC */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* CTS */
+ SUNXI_FUNCTION(0x4, "pwm3"),
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* DI */
+ SUNXI_FUNCTION(0x8, "emac"), /* MDIO */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x4, "pwm4"),
+ SUNXI_FUNCTION(0x5, "ir"), /* RX */
+ SUNXI_FUNCTION(0x6, "jtag"), /* DO */
+ SUNXI_FUNCTION(0x8, "emac"), /* EPHY-25M */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ncsi0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT3 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN3 */
+ SUNXI_FUNCTION(0x6, "jtag"), /* CK */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD2 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x3, "ncsi0"), /* FIELD */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT2 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN2 */
+ SUNXI_FUNCTION(0x8, "emac"), /* TXD3 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x3, "pwm5"),
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD2 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* MS */
+ SUNXI_FUNCTION(0x4, "i2s0_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x5, "i2s0_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXD3 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* DI */
+ SUNXI_FUNCTION(0x4, "pwm6"),
+ SUNXI_FUNCTION(0x5, "i2s0"), /* LRCK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x8, "emac"), /* RXCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* DO */
+ SUNXI_FUNCTION(0x4, "pwm7"),
+ SUNXI_FUNCTION(0x5, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x3, "d_jtag"), /* CK */
+ SUNXI_FUNCTION(0x4, "ir"), /* TX */
+ SUNXI_FUNCTION(0x5, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION(0x6, "dmic"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 3, 17)),
+ /* PF */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* MS */
+ SUNXI_FUNCTION(0x5, "i2s2_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x6, "i2s2_din"), /* DIN0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* DI */
+ SUNXI_FUNCTION(0x5, "i2s2_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x6, "i2s2_din"), /* DIN1 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "ledc"),
+ SUNXI_FUNCTION(0x6, "spdif"), /* IN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* DO */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION(0x6, "ir"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK */
+ SUNXI_FUNCTION(0x4, "r_jtag"), /* CK */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* LRCK */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x4, "ir"), /* RX */
+ SUNXI_FUNCTION(0x5, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x6, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 4, 6)),
+ /* PG */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXCTRL/CRS_DV */
+ SUNXI_FUNCTION(0x5, "pwm7"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD0 */
+ SUNXI_FUNCTION(0x5, "pwm6"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD1 */
+ SUNXI_FUNCTION(0x5, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXCK */
+ SUNXI_FUNCTION(0x5, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD0 */
+ SUNXI_FUNCTION(0x5, "pwm5"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD1 */
+ SUNXI_FUNCTION(0x5, "pwm4"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD2 */
+ SUNXI_FUNCTION(0x5, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXD3 */
+ SUNXI_FUNCTION(0x5, "spdif"), /* IN */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD2 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXD3 */
+ SUNXI_FUNCTION(0x5, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm3"),
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* RXCK */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "ir"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* EPHY-25M */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* LRCK */
+ SUNXI_FUNCTION(0x3, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* TXCTL/TXEN */
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "pwm0"),
+ SUNXI_FUNCTION(0x7, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* BCLK */
+ SUNXI_FUNCTION(0x3, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* CLKIN/RXER */
+ SUNXI_FUNCTION(0x5, "pwm2"),
+ SUNXI_FUNCTION(0x6, "ledc"),
+ SUNXI_FUNCTION(0x7, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1_din"), /* DIN0 */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SCK */
+ SUNXI_FUNCTION(0x4, "emac"), /* MDC */
+ SUNXI_FUNCTION(0x5, "i2s1_dout"), /* DOUT1 */
+ SUNXI_FUNCTION(0x6, "spi0"), /* WP */
+ SUNXI_FUNCTION(0x7, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1_dout"), /* DOUT0 */
+ SUNXI_FUNCTION(0x3, "i2c2"), /* SDA */
+ SUNXI_FUNCTION(0x4, "emac"), /* MDIO */
+ SUNXI_FUNCTION(0x5, "i2s1_din"), /* DIN1 */
+ SUNXI_FUNCTION(0x6, "spi0"), /* HOLD */
+ SUNXI_FUNCTION(0x7, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ir"), /* RX */
+ SUNXI_FUNCTION(0x3, "tcon"), /* TRIG0 */
+ SUNXI_FUNCTION(0x4, "pwm5"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT2 */
+ SUNXI_FUNCTION(0x6, "spdif"), /* IN */
+ SUNXI_FUNCTION(0x7, "ledc"),
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x4, "pwm7"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT0 */
+ SUNXI_FUNCTION(0x6, "ir"), /* TX */
+ SUNXI_FUNCTION(0x7, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 17)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x4, "pwm6"),
+ SUNXI_FUNCTION(0x5, "clk"), /* FANOUT1 */
+ SUNXI_FUNCTION(0x6, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x7, "uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0xe, 5, 18)),
+};
+
+static const unsigned int d1_irq_bank_map[] = { 1, 2, 3, 4, 5, 6 };
+
+static const struct sunxi_pinctrl_desc d1_pinctrl_data = {
+ .pins = d1_pins,
+ .npins = ARRAY_SIZE(d1_pins),
+ .irq_banks = ARRAY_SIZE(d1_irq_bank_map),
+ .irq_bank_map = d1_irq_bank_map,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
+};
+
+static int d1_pinctrl_probe(struct platform_device *pdev)
+{
+ unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ return sunxi_pinctrl_init_with_variant(pdev, &d1_pinctrl_data, variant);
+}
+
+static const struct of_device_id d1_pinctrl_match[] = {
+ {
+ .compatible = "allwinner,sun20i-d1-pinctrl",
+ .data = (void *)PINCTRL_SUN20I_D1
+ },
+ {}
+};
+
+static struct platform_driver d1_pinctrl_driver = {
+ .probe = d1_pinctrl_probe,
+ .driver = {
+ .name = "sun20i-d1-pinctrl",
+ .of_match_table = d1_pinctrl_match,
+ },
+};
+builtin_platform_driver(d1_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
index 21054fcacd34..afc1f5df7545 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
@@ -82,6 +82,7 @@ static const struct sunxi_pinctrl_desc a100_r_pinctrl_data = {
.npins = ARRAY_SIZE(a100_r_pins),
.pin_base = PL_BASE,
.irq_banks = 1,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int a100_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
index e69f6da40dc0..f682e0e4244d 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
@@ -684,7 +684,7 @@ static const struct sunxi_pinctrl_desc a100_pinctrl_data = {
.npins = ARRAY_SIZE(a100_pins),
.irq_banks = 7,
.irq_bank_map = a100_irq_bank_map,
- .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int a100_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
index e69c8dae121a..ef261eccda56 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64-r.c
@@ -24,7 +24,6 @@
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
index c7d90c44e87a..3aba0aec3d78 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
@@ -16,7 +16,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -107,6 +106,7 @@ static const struct sunxi_pinctrl_desc sun50i_h6_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun50i_h6_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
};
static int sun50i_h6_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
index 8e4f10ab96ce..c39ea46046c2 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616-r.c
@@ -12,7 +12,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
index 152b71226a80..d6ca720ee8d8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
@@ -525,7 +525,7 @@ static const struct sunxi_pinctrl_desc h616_pinctrl_data = {
.irq_banks = ARRAY_SIZE(h616_irq_bank_map),
.irq_bank_map = h616_irq_bank_map,
.irq_read_needs_mux = true,
- .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
static int h616_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index a00246d3dd49..2486cdf345e1 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -17,7 +17,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -111,26 +110,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_r_pinctrl_data = {
static int sun6i_a31_r_pinctrl_probe(struct platform_device *pdev)
{
- struct reset_control *rstc;
- int ret;
-
- rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- dev_err(&pdev->dev, "Reset controller missing\n");
- return PTR_ERR(rstc);
- }
-
- ret = reset_control_deassert(rstc);
- if (ret)
- return ret;
-
- ret = sunxi_pinctrl_init(pdev,
- &sun6i_a31_r_pinctrl_data);
-
- if (ret)
- reset_control_assert(rstc);
-
- return ret;
+ return sunxi_pinctrl_init(pdev, &sun6i_a31_r_pinctrl_data);
}
static const struct of_device_id sun6i_a31_r_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 9e5b61449999..4fae12c905b7 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
@@ -98,29 +97,7 @@ static const struct sunxi_pinctrl_desc sun8i_a23_r_pinctrl_data = {
static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
{
- struct reset_control *rstc;
- int ret;
-
- rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- ret = PTR_ERR(rstc);
- if (ret == -EPROBE_DEFER)
- return ret;
- dev_err(&pdev->dev, "Reset controller missing err=%d\n", ret);
- return ret;
- }
-
- ret = reset_control_deassert(rstc);
- if (ret)
- return ret;
-
- ret = sunxi_pinctrl_init(pdev,
- &sun8i_a23_r_pinctrl_data);
-
- if (ret)
- reset_control_assert(rstc);
-
- return ret;
+ return sunxi_pinctrl_init(pdev, &sun8i_a23_r_pinctrl_data);
}
static const struct of_device_id sun8i_a23_r_pinctrl_match[] = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
index 6531cf67958e..0cb6c1a970c9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
@@ -27,7 +27,6 @@
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index a191a65217ac..f11cb5bba0f7 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -14,7 +14,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/reset.h>
#include "pinctrl-sunxi.h"
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index dd928402af99..6c04027d0dd9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -46,6 +46,67 @@ static struct lock_class_key sunxi_pinctrl_irq_request_class;
static struct irq_chip sunxi_pinctrl_edge_irq_chip;
static struct irq_chip sunxi_pinctrl_level_irq_chip;
+/*
+ * The sunXi PIO registers are organized as a series of banks, with registers
+ * for each bank in the following order:
+ * - Mux config
+ * - Data value
+ * - Drive level
+ * - Pull direction
+ *
+ * Multiple consecutive registers are used for fields wider than one bit.
+ *
+ * The following functions calculate the register and the bit offset to access.
+ * They take a pin number which is relative to the start of the current device.
+ */
+static void sunxi_mux_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * MUX_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + MUX_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(MUX_FIELD_WIDTH) - 1) << *shift;
+}
+
+static void sunxi_data_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * DATA_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + DATA_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(DATA_FIELD_WIDTH) - 1) << *shift;
+}
+
+static void sunxi_dlevel_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * pctl->dlevel_field_width;
+
+ *reg = bank * pctl->bank_mem_size + DLEVEL_REGS_OFFSET +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(pctl->dlevel_field_width) - 1) << *shift;
+}
+
+static void sunxi_pull_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, u32 *reg, u32 *shift, u32 *mask)
+{
+ u32 bank = pin / PINS_PER_BANK;
+ u32 offset = pin % PINS_PER_BANK * PULL_FIELD_WIDTH;
+
+ *reg = bank * pctl->bank_mem_size + pctl->pull_regs_offset +
+ offset / BITS_PER_TYPE(u32) * sizeof(u32);
+ *shift = offset % BITS_PER_TYPE(u32);
+ *mask = (BIT(PULL_FIELD_WIDTH) - 1) << *shift;
+}
+
static struct sunxi_pinctrl_group *
sunxi_pinctrl_find_group_by_name(struct sunxi_pinctrl *pctl, const char *group)
{
@@ -451,22 +512,19 @@ static const struct pinctrl_ops sunxi_pctrl_ops = {
.get_group_pins = sunxi_pctrl_get_group_pins,
};
-static int sunxi_pconf_reg(unsigned pin, enum pin_config_param param,
- u32 *offset, u32 *shift, u32 *mask)
+static int sunxi_pconf_reg(const struct sunxi_pinctrl *pctl,
+ u32 pin, enum pin_config_param param,
+ u32 *reg, u32 *shift, u32 *mask)
{
switch (param) {
case PIN_CONFIG_DRIVE_STRENGTH:
- *offset = sunxi_dlevel_reg(pin);
- *shift = sunxi_dlevel_offset(pin);
- *mask = DLEVEL_PINS_MASK;
+ sunxi_dlevel_reg(pctl, pin, reg, shift, mask);
break;
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_DISABLE:
- *offset = sunxi_pull_reg(pin);
- *shift = sunxi_pull_offset(pin);
- *mask = PULL_PINS_MASK;
+ sunxi_pull_reg(pctl, pin, reg, shift, mask);
break;
default:
@@ -481,17 +539,17 @@ static int sunxi_pconf_get(struct pinctrl_dev *pctldev, unsigned pin,
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
- u32 offset, shift, mask, val;
+ u32 reg, shift, mask, val;
u16 arg;
int ret;
pin -= pctl->desc->pin_base;
- ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask);
+ ret = sunxi_pconf_reg(pctl, pin, param, &reg, &shift, &mask);
if (ret < 0)
return ret;
- val = (readl(pctl->membase + offset) >> shift) & mask;
+ val = (readl(pctl->membase + reg) & mask) >> shift;
switch (pinconf_to_config_param(*config)) {
case PIN_CONFIG_DRIVE_STRENGTH:
@@ -547,16 +605,15 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
pin -= pctl->desc->pin_base;
for (i = 0; i < num_configs; i++) {
+ u32 arg, reg, shift, mask, val;
enum pin_config_param param;
unsigned long flags;
- u32 offset, shift, mask, reg;
- u32 arg, val;
int ret;
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
- ret = sunxi_pconf_reg(pin, param, &offset, &shift, &mask);
+ ret = sunxi_pconf_reg(pctl, pin, param, &reg, &shift, &mask);
if (ret < 0)
return ret;
@@ -593,9 +650,8 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
}
raw_spin_lock_irqsave(&pctl->lock, flags);
- reg = readl(pctl->membase + offset);
- reg &= ~(mask << shift);
- writel(reg | val << shift, pctl->membase + offset);
+ writel((readl(pctl->membase + reg) & ~mask) | val << shift,
+ pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
} /* for each config */
@@ -624,7 +680,7 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
unsigned pin,
struct regulator *supply)
{
- unsigned short bank = pin / PINS_PER_BANK;
+ unsigned short bank;
unsigned long flags;
u32 val, reg;
int uV;
@@ -640,6 +696,9 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
if (uV == 0)
return 0;
+ pin -= pctl->desc->pin_base;
+ bank = pin / PINS_PER_BANK;
+
switch (pctl->desc->io_bias_cfg_variant) {
case BIAS_VOLTAGE_GRP_CONFIG:
/*
@@ -657,12 +716,20 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl,
else
val = 0xD; /* 3.3V */
- pin -= pctl->desc->pin_base;
-
reg = readl(pctl->membase + sunxi_grp_config_reg(pin));
reg &= ~IO_BIAS_MASK;
writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin));
return 0;
+ case BIAS_VOLTAGE_PIO_POW_MODE_CTL:
+ val = uV > 1800000 && uV <= 2500000 ? BIT(bank) : 0;
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ reg = readl(pctl->membase + PIO_POW_MOD_CTL_REG);
+ reg &= ~BIT(bank);
+ writel(reg | val, pctl->membase + PIO_POW_MOD_CTL_REG);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+
+ fallthrough;
case BIAS_VOLTAGE_PIO_POW_MODE_SEL:
val = uV <= 1800000 ? 1 : 0;
@@ -710,16 +777,16 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
u8 config)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ u32 reg, shift, mask;
unsigned long flags;
- u32 val, mask;
+
+ pin -= pctl->desc->pin_base;
+ sunxi_mux_reg(pctl, pin, &reg, &shift, &mask);
raw_spin_lock_irqsave(&pctl->lock, flags);
- pin -= pctl->desc->pin_base;
- val = readl(pctl->membase + sunxi_mux_reg(pin));
- mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
- writel((val & ~mask) | config << sunxi_mux_offset(pin),
- pctl->membase + sunxi_mux_reg(pin));
+ writel((readl(pctl->membase + reg) & ~mask) | config << shift,
+ pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
}
@@ -852,43 +919,43 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- u32 reg = sunxi_data_reg(offset);
- u8 index = sunxi_data_offset(offset);
bool set_mux = pctl->desc->irq_read_needs_mux &&
gpiochip_line_is_irq(chip, offset);
u32 pin = offset + chip->base;
- u32 val;
+ u32 reg, shift, mask, val;
+
+ sunxi_data_reg(pctl, offset, &reg, &shift, &mask);
if (set_mux)
sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_INPUT);
- val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
+ val = (readl(pctl->membase + reg) & mask) >> shift;
if (set_mux)
sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_IRQ);
- return !!val;
+ return val;
}
static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- u32 reg = sunxi_data_reg(offset);
- u8 index = sunxi_data_offset(offset);
+ u32 reg, shift, mask, val;
unsigned long flags;
- u32 regval;
+
+ sunxi_data_reg(pctl, offset, &reg, &shift, &mask);
raw_spin_lock_irqsave(&pctl->lock, flags);
- regval = readl(pctl->membase + reg);
+ val = readl(pctl->membase + reg);
if (value)
- regval |= BIT(index);
+ val |= mask;
else
- regval &= ~(BIT(index));
+ val &= ~mask;
- writel(regval, pctl->membase + reg);
+ writel(val, pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
}
@@ -1232,11 +1299,11 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
/*
* Find an upper bound for the maximum number of functions: in
- * the worst case we have gpio_in, gpio_out, irq and up to four
+ * the worst case we have gpio_in, gpio_out, irq and up to seven
* special functions per pin, plus one entry for the sentinel.
* We'll reallocate that later anyway.
*/
- pctl->functions = kcalloc(4 * pctl->ngroups + 4,
+ pctl->functions = kcalloc(7 * pctl->ngroups + 4,
sizeof(*pctl->functions),
GFP_KERNEL);
if (!pctl->functions)
@@ -1429,6 +1496,15 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
pctl->dev = &pdev->dev;
pctl->desc = desc;
pctl->variant = variant;
+ if (pctl->variant >= PINCTRL_SUN20I_D1) {
+ pctl->bank_mem_size = D1_BANK_MEM_SIZE;
+ pctl->pull_regs_offset = D1_PULL_REGS_OFFSET;
+ pctl->dlevel_field_width = D1_DLEVEL_FIELD_WIDTH;
+ } else {
+ pctl->bank_mem_size = BANK_MEM_SIZE;
+ pctl->pull_regs_offset = PULL_REGS_OFFSET;
+ pctl->dlevel_field_width = DLEVEL_FIELD_WIDTH;
+ }
pctl->irq_array = devm_kcalloc(&pdev->dev,
IRQ_PER_BANK * pctl->desc->irq_banks,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index a32bb5bcb754..a87a2f944d60 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -36,23 +36,19 @@
#define BANK_MEM_SIZE 0x24
#define MUX_REGS_OFFSET 0x0
+#define MUX_FIELD_WIDTH 4
#define DATA_REGS_OFFSET 0x10
+#define DATA_FIELD_WIDTH 1
#define DLEVEL_REGS_OFFSET 0x14
+#define DLEVEL_FIELD_WIDTH 2
#define PULL_REGS_OFFSET 0x1c
+#define PULL_FIELD_WIDTH 2
+
+#define D1_BANK_MEM_SIZE 0x30
+#define D1_DLEVEL_FIELD_WIDTH 4
+#define D1_PULL_REGS_OFFSET 0x24
#define PINS_PER_BANK 32
-#define MUX_PINS_PER_REG 8
-#define MUX_PINS_BITS 4
-#define MUX_PINS_MASK 0x0f
-#define DATA_PINS_PER_REG 32
-#define DATA_PINS_BITS 1
-#define DATA_PINS_MASK 0x01
-#define DLEVEL_PINS_PER_REG 16
-#define DLEVEL_PINS_BITS 2
-#define DLEVEL_PINS_MASK 0x03
-#define PULL_PINS_PER_REG 16
-#define PULL_PINS_BITS 2
-#define PULL_PINS_MASK 0x03
#define IRQ_PER_BANK 32
@@ -96,8 +92,11 @@
#define PINCTRL_SUN8I_R40 BIT(8)
#define PINCTRL_SUN8I_V3 BIT(9)
#define PINCTRL_SUN8I_V3S BIT(10)
+/* Variants below here have an updated register layout. */
+#define PINCTRL_SUN20I_D1 BIT(11)
#define PIO_POW_MOD_SEL_REG 0x340
+#define PIO_POW_MOD_CTL_REG 0x344
enum sunxi_desc_bias_voltage {
BIAS_VOLTAGE_NONE,
@@ -111,6 +110,12 @@ enum sunxi_desc_bias_voltage {
* register, as seen on H6 SoC, for example.
*/
BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+ /*
+ * Bias voltage is set through PIO_POW_MOD_SEL_REG
+ * and PIO_POW_MOD_CTL_REG register, as seen on
+ * A100 and D1 SoC, for example.
+ */
+ BIAS_VOLTAGE_PIO_POW_MODE_CTL,
};
struct sunxi_desc_function {
@@ -170,6 +175,9 @@ struct sunxi_pinctrl {
raw_spinlock_t lock;
struct pinctrl_dev *pctl_dev;
unsigned long variant;
+ u32 bank_mem_size;
+ u32 pull_regs_offset;
+ u32 dlevel_field_width;
};
#define SUNXI_PIN(_pin, ...) \
@@ -215,83 +223,6 @@ struct sunxi_pinctrl {
.irqnum = _irq, \
}
-/*
- * The sunXi PIO registers are organized as is:
- * 0x00 - 0x0c Muxing values.
- * 8 pins per register, each pin having a 4bits value
- * 0x10 Pin values
- * 32 bits per register, each pin corresponding to one bit
- * 0x14 - 0x18 Drive level
- * 16 pins per register, each pin having a 2bits value
- * 0x1c - 0x20 Pull-Up values
- * 16 pins per register, each pin having a 2bits value
- *
- * This is for the first bank. Each bank will have the same layout,
- * with an offset being a multiple of 0x24.
- *
- * The following functions calculate from the pin number the register
- * and the bit offset that we should access.
- */
-static inline u32 sunxi_mux_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += MUX_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / MUX_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_mux_offset(u16 pin)
-{
- u32 pin_num = pin % MUX_PINS_PER_REG;
- return pin_num * MUX_PINS_BITS;
-}
-
-static inline u32 sunxi_data_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += DATA_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / DATA_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_data_offset(u16 pin)
-{
- u32 pin_num = pin % DATA_PINS_PER_REG;
- return pin_num * DATA_PINS_BITS;
-}
-
-static inline u32 sunxi_dlevel_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += DLEVEL_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / DLEVEL_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_dlevel_offset(u16 pin)
-{
- u32 pin_num = pin % DLEVEL_PINS_PER_REG;
- return pin_num * DLEVEL_PINS_BITS;
-}
-
-static inline u32 sunxi_pull_reg(u16 pin)
-{
- u8 bank = pin / PINS_PER_BANK;
- u32 offset = bank * BANK_MEM_SIZE;
- offset += PULL_REGS_OFFSET;
- offset += pin % PINS_PER_BANK / PULL_PINS_PER_REG * 0x04;
- return round_down(offset, 4);
-}
-
-static inline u32 sunxi_pull_offset(u16 pin)
-{
- u32 pin_num = pin % PULL_PINS_PER_REG;
- return pin_num * PULL_PINS_BITS;
-}
-
static inline u32 sunxi_irq_hw_bank_num(const struct sunxi_pinctrl_desc *desc, u8 bank)
{
if (!desc->irq_bank_map)
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 18fc6a08569e..b437847b6237 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-if X86
-source "drivers/platform/x86/Kconfig"
-endif
if MIPS
source "drivers/platform/mips/Kconfig"
endif
@@ -15,3 +12,5 @@ source "drivers/platform/mellanox/Kconfig"
source "drivers/platform/olpc/Kconfig"
source "drivers/platform/surface/Kconfig"
+
+source "drivers/platform/x86/Kconfig"
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 717299cbccac..c45fb376d653 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -139,7 +139,7 @@ config CROS_EC_PROTO
config CROS_KBD_LED_BACKLIGHT
tristate "Backlight LED support for Chrome OS keyboards"
- depends on LEDS_CLASS && ACPI
+ depends on LEDS_CLASS && (ACPI || CROS_EC)
help
This option enables support for the keyboard backlight LEDs on
select Chrome OS systems.
@@ -267,4 +267,13 @@ config CHROMEOS_PRIVACY_SCREEN
source "drivers/platform/chrome/wilco_ec/Kconfig"
+# Kunit test cases
+config CROS_KUNIT
+ tristate "Kunit tests for ChromeOS" if !KUNIT_ALL_TESTS
+ depends on KUNIT && CROS_EC
+ default KUNIT_ALL_TESTS
+ select CROS_EC_PROTO
+ help
+ ChromeOS Kunit tests.
+
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 52f5a2dde8b8..f7e74a845afc 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -30,3 +30,8 @@ obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
obj-$(CONFIG_CROS_USBPD_NOTIFY) += cros_usbpd_notify.o
obj-$(CONFIG_WILCO_EC) += wilco_ec/
+
+# Kunit test cases
+obj-$(CONFIG_CROS_KUNIT) += cros_kunit.o
+cros_kunit-objs := cros_kunit_util.o
+cros_kunit-objs += cros_ec_proto_test.o
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index b3e94cdf7d1a..8aace50d446d 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -19,9 +19,6 @@
#include "cros_ec.h"
-#define CROS_EC_DEV_EC_INDEX 0
-#define CROS_EC_DEV_PD_INDEX 1
-
static struct cros_ec_platform ec_p = {
.ec_name = CROS_EC_DEV_NAME,
.cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_EC_INDEX),
@@ -135,16 +132,16 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
buf.msg.command = EC_CMD_HOST_SLEEP_EVENT;
ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
-
- /* For now, report failure to transition to S0ix with a warning. */
+ /* Report failure to transition to system wide suspend with a warning. */
if (ret >= 0 && ec_dev->host_sleep_v1 &&
- (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME)) {
+ (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME ||
+ sleep_event == HOST_SLEEP_EVENT_S3_RESUME)) {
ec_dev->last_resume_result =
buf.u.resp1.resume_response.sleep_transitions;
WARN_ONCE(buf.u.resp1.resume_response.sleep_transitions &
EC_HOST_RESUME_SLEEP_TIMEOUT,
- "EC detected sleep transition timeout. Total slp_s0 transitions: %d",
+ "EC detected sleep transition timeout. Total sleep transitions: %d",
buf.u.resp1.resume_response.sleep_transitions &
EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK);
}
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index ff767dccdf0f..05d2e8765a66 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -52,8 +52,8 @@ static int cros_ec_map_error(uint32_t result)
return ret;
}
-static int prepare_packet(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
+static int prepare_tx(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
{
struct ec_host_request *request;
u8 *out;
@@ -85,8 +85,29 @@ static int prepare_packet(struct cros_ec_device *ec_dev,
return sizeof(*request) + msg->outsize;
}
-static int send_command(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
+static int prepare_tx_legacy(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ u8 *out;
+ u8 csum;
+ int i;
+
+ if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE)
+ return -EINVAL;
+
+ out = ec_dev->dout;
+ out[0] = EC_CMD_VERSION0 + msg->version;
+ out[1] = msg->command;
+ out[2] = msg->outsize;
+ csum = out[0] + out[1] + out[2];
+ for (i = 0; i < msg->outsize; i++)
+ csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->data[i];
+ out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = csum;
+
+ return EC_MSG_TX_PROTO_BYTES + msg->outsize;
+}
+
+static int cros_ec_xfer_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
int ret;
int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
@@ -102,57 +123,68 @@ static int send_command(struct cros_ec_device *ec_dev,
* the EC is trying to use protocol v2, on an underlying
* communication mechanism that does not support v2.
*/
- dev_err_once(ec_dev->dev,
- "missing EC transfer API, cannot send command\n");
+ dev_err_once(ec_dev->dev, "missing EC transfer API, cannot send command\n");
return -EIO;
}
trace_cros_ec_request_start(msg);
ret = (*xfer_fxn)(ec_dev, msg);
trace_cros_ec_request_done(msg, ret);
- if (msg->result == EC_RES_IN_PROGRESS) {
- int i;
- struct cros_ec_command *status_msg;
- struct ec_response_get_comms_status *status;
- status_msg = kmalloc(sizeof(*status_msg) + sizeof(*status),
- GFP_KERNEL);
- if (!status_msg)
- return -ENOMEM;
+ return ret;
+}
- status_msg->version = 0;
- status_msg->command = EC_CMD_GET_COMMS_STATUS;
- status_msg->insize = sizeof(*status);
- status_msg->outsize = 0;
+static int cros_ec_wait_until_complete(struct cros_ec_device *ec_dev, uint32_t *result)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_get_comms_status status;
+ } __packed buf;
+ struct cros_ec_command *msg = &buf.msg;
+ struct ec_response_get_comms_status *status = &buf.status;
+ int ret = 0, i;
- /*
- * Query the EC's status until it's no longer busy or
- * we encounter an error.
- */
- for (i = 0; i < EC_COMMAND_RETRIES; i++) {
- usleep_range(10000, 11000);
-
- trace_cros_ec_request_start(status_msg);
- ret = (*xfer_fxn)(ec_dev, status_msg);
- trace_cros_ec_request_done(status_msg, ret);
- if (ret == -EAGAIN)
- continue;
- if (ret < 0)
- break;
-
- msg->result = status_msg->result;
- if (status_msg->result != EC_RES_SUCCESS)
- break;
-
- status = (struct ec_response_get_comms_status *)
- status_msg->data;
- if (!(status->flags & EC_COMMS_STATUS_PROCESSING))
- break;
+ msg->version = 0;
+ msg->command = EC_CMD_GET_COMMS_STATUS;
+ msg->insize = sizeof(*status);
+ msg->outsize = 0;
+
+ /* Query the EC's status until it's no longer busy or we encounter an error. */
+ for (i = 0; i < EC_COMMAND_RETRIES; ++i) {
+ usleep_range(10000, 11000);
+
+ ret = cros_ec_xfer_command(ec_dev, msg);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ return ret;
+
+ *result = msg->result;
+ if (msg->result != EC_RES_SUCCESS)
+ return ret;
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ break;
}
- kfree(status_msg);
+ if (!(status->flags & EC_COMMS_STATUS_PROCESSING))
+ return ret;
}
+ if (i >= EC_COMMAND_RETRIES)
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static int cros_ec_send_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ int ret = cros_ec_xfer_command(ec_dev, msg);
+
+ if (msg->result == EC_RES_IN_PROGRESS)
+ ret = cros_ec_wait_until_complete(ec_dev, &msg->result);
+
return ret;
}
@@ -161,35 +193,18 @@ static int send_command(struct cros_ec_device *ec_dev,
* @ec_dev: Device to register.
* @msg: Message to write.
*
- * This is intended to be used by all ChromeOS EC drivers, but at present
- * only SPI uses it. Once LPC uses the same protocol it can start using it.
- * I2C could use it now, with a refactor of the existing code.
+ * This is used by all ChromeOS EC drivers to prepare the outgoing message
+ * according to different protocol versions.
*
* Return: number of prepared bytes on success or negative error code.
*/
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
- u8 *out;
- u8 csum;
- int i;
-
if (ec_dev->proto_version > 2)
- return prepare_packet(ec_dev, msg);
-
- if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE)
- return -EINVAL;
+ return prepare_tx(ec_dev, msg);
- out = ec_dev->dout;
- out[0] = EC_CMD_VERSION0 + msg->version;
- out[1] = msg->command;
- out[2] = msg->outsize;
- csum = out[0] + out[1] + out[2];
- for (i = 0; i < msg->outsize; i++)
- csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->data[i];
- out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = csum;
-
- return EC_MSG_TX_PROTO_BYTES + msg->outsize;
+ return prepare_tx_legacy(ec_dev, msg);
}
EXPORT_SYMBOL(cros_ec_prepare_tx);
@@ -199,9 +214,12 @@ EXPORT_SYMBOL(cros_ec_prepare_tx);
* @msg: Message to check.
*
* This is used by ChromeOS EC drivers to check the ec_msg->result for
- * errors and to warn about them.
+ * EC_RES_IN_PROGRESS and to warn about them.
*
- * Return: 0 on success or negative error code.
+ * The function should not check for furthermore error codes. Otherwise,
+ * it would break the ABI.
+ *
+ * Return: -EAGAIN if ec_msg->result == EC_RES_IN_PROGRESS. Otherwise, 0.
*/
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
@@ -228,59 +246,66 @@ EXPORT_SYMBOL(cros_ec_check_result);
*
* @ec_dev: EC device to call
* @msg: message structure to use
- * @mask: result when function returns >=0.
+ * @mask: result when function returns 0.
*
* LOCKING:
* the caller has ec_dev->lock mutex, or the caller knows there is
* no other command in progress.
*/
-static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg,
- uint32_t *mask)
+static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev, uint32_t *mask)
{
+ struct cros_ec_command *msg;
struct ec_response_host_event_mask *r;
- int ret;
+ int ret, mapped;
+
+ msg = kzalloc(sizeof(*msg) + sizeof(*r), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
msg->command = EC_CMD_HOST_EVENT_GET_WAKE_MASK;
- msg->version = 0;
- msg->outsize = 0;
msg->insize = sizeof(*r);
- ret = send_command(ec_dev, msg);
- if (ret >= 0) {
- if (msg->result == EC_RES_INVALID_COMMAND)
- return -EOPNOTSUPP;
- if (msg->result != EC_RES_SUCCESS)
- return -EPROTO;
+ ret = cros_ec_send_command(ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
}
- if (ret > 0) {
- r = (struct ec_response_host_event_mask *)msg->data;
- *mask = r->mask;
+
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
}
+ r = (struct ec_response_host_event_mask *)msg->data;
+ *mask = r->mask;
+ ret = 0;
+exit:
+ kfree(msg);
return ret;
}
-static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
- int devidx,
- struct cros_ec_command *msg)
+static int cros_ec_get_proto_info(struct cros_ec_device *ec_dev, int devidx)
{
- /*
- * Try using v3+ to query for supported protocols. If this
- * command fails, fall back to v2. Returns the highest protocol
- * supported by the EC.
- * Also sets the max request/response/passthru size.
- */
- int ret;
+ struct cros_ec_command *msg;
+ struct ec_response_get_protocol_info *info;
+ int ret, mapped;
- if (!ec_dev->pkt_xfer)
- return -EPROTONOSUPPORT;
+ ec_dev->proto_version = 3;
+ if (devidx > 0)
+ ec_dev->max_passthru = 0;
+
+ msg = kzalloc(sizeof(*msg) + sizeof(*info), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
- memset(msg, 0, sizeof(*msg));
msg->command = EC_CMD_PASSTHRU_OFFSET(devidx) | EC_CMD_GET_PROTOCOL_INFO;
- msg->insize = sizeof(struct ec_response_get_protocol_info);
+ msg->insize = sizeof(*info);
- ret = send_command(ec_dev, msg);
+ ret = cros_ec_send_command(ec_dev, msg);
/*
* Send command once again when timeout occurred.
* Fingerprint MCU (FPMCU) is restarted during system boot which
@@ -289,68 +314,115 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
* attempt because we waited at least EC_MSG_DEADLINE_MS.
*/
if (ret == -ETIMEDOUT)
- ret = send_command(ec_dev, msg);
+ ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0) {
dev_dbg(ec_dev->dev,
"failed to check for EC[%d] protocol version: %d\n",
devidx, ret);
- return ret;
+ goto exit;
}
- if (devidx > 0 && msg->result == EC_RES_INVALID_COMMAND)
- return -ENODEV;
- else if (msg->result != EC_RES_SUCCESS)
- return msg->result;
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
+ }
- return 0;
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ info = (struct ec_response_get_protocol_info *)msg->data;
+
+ switch (devidx) {
+ case CROS_EC_DEV_EC_INDEX:
+ ec_dev->max_request = info->max_request_packet_size -
+ sizeof(struct ec_host_request);
+ ec_dev->max_response = info->max_response_packet_size -
+ sizeof(struct ec_host_response);
+ ec_dev->proto_version = min(EC_HOST_REQUEST_VERSION,
+ fls(info->protocol_versions) - 1);
+ ec_dev->din_size = info->max_response_packet_size + EC_MAX_RESPONSE_OVERHEAD;
+ ec_dev->dout_size = info->max_request_packet_size + EC_MAX_REQUEST_OVERHEAD;
+
+ dev_dbg(ec_dev->dev, "using proto v%u\n", ec_dev->proto_version);
+ break;
+ case CROS_EC_DEV_PD_INDEX:
+ ec_dev->max_passthru = info->max_request_packet_size -
+ sizeof(struct ec_host_request);
+
+ dev_dbg(ec_dev->dev, "found PD chip\n");
+ break;
+ default:
+ dev_dbg(ec_dev->dev, "unknown passthru index: %d\n", devidx);
+ break;
+ }
+
+ ret = 0;
+exit:
+ kfree(msg);
+ return ret;
}
-static int cros_ec_host_command_proto_query_v2(struct cros_ec_device *ec_dev)
+static int cros_ec_get_proto_info_legacy(struct cros_ec_device *ec_dev)
{
struct cros_ec_command *msg;
- struct ec_params_hello *hello_params;
- struct ec_response_hello *hello_response;
- int ret;
- int len = max(sizeof(*hello_params), sizeof(*hello_response));
+ struct ec_params_hello *params;
+ struct ec_response_hello *response;
+ int ret, mapped;
+
+ ec_dev->proto_version = 2;
- msg = kmalloc(sizeof(*msg) + len, GFP_KERNEL);
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*response)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
- msg->version = 0;
msg->command = EC_CMD_HELLO;
- hello_params = (struct ec_params_hello *)msg->data;
- msg->outsize = sizeof(*hello_params);
- hello_response = (struct ec_response_hello *)msg->data;
- msg->insize = sizeof(*hello_response);
-
- hello_params->in_data = 0xa0b0c0d0;
+ msg->insize = sizeof(*response);
+ msg->outsize = sizeof(*params);
- ret = send_command(ec_dev, msg);
+ params = (struct ec_params_hello *)msg->data;
+ params->in_data = 0xa0b0c0d0;
+ ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0) {
- dev_dbg(ec_dev->dev,
- "EC failed to respond to v2 hello: %d\n",
- ret);
+ dev_dbg(ec_dev->dev, "EC failed to respond to v2 hello: %d\n", ret);
goto exit;
- } else if (msg->result != EC_RES_SUCCESS) {
- dev_err(ec_dev->dev,
- "EC responded to v2 hello with error: %d\n",
- msg->result);
- ret = msg->result;
+ }
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ dev_err(ec_dev->dev, "EC responded to v2 hello with error: %d\n", msg->result);
+ goto exit;
+ }
+
+ if (ret == 0) {
+ ret = -EPROTO;
goto exit;
- } else if (hello_response->out_data != 0xa1b2c3d4) {
+ }
+
+ response = (struct ec_response_hello *)msg->data;
+ if (response->out_data != 0xa1b2c3d4) {
dev_err(ec_dev->dev,
"EC responded to v2 hello with bad result: %u\n",
- hello_response->out_data);
+ response->out_data);
ret = -EBADMSG;
goto exit;
}
- ret = 0;
+ ec_dev->max_request = EC_PROTO2_MAX_PARAM_SIZE;
+ ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
+ ec_dev->max_passthru = 0;
+ ec_dev->pkt_xfer = NULL;
+ ec_dev->din_size = EC_PROTO2_MSG_BYTES;
+ ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
- exit:
+ dev_dbg(ec_dev->dev, "falling back to proto v2\n");
+ ret = 0;
+exit:
kfree(msg);
return ret;
}
@@ -371,13 +443,12 @@ static int cros_ec_host_command_proto_query_v2(struct cros_ec_device *ec_dev)
* the caller has ec_dev->lock mutex or the caller knows there is
* no other command in progress.
*/
-static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
- u16 cmd, u32 *mask)
+static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev, u16 cmd, u32 *mask)
{
struct ec_params_get_cmd_versions *pver;
struct ec_response_get_cmd_versions *rver;
struct cros_ec_command *msg;
- int ret;
+ int ret, mapped;
msg = kmalloc(sizeof(*msg) + max(sizeof(*rver), sizeof(*pver)),
GFP_KERNEL);
@@ -392,14 +463,26 @@ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
pver = (struct ec_params_get_cmd_versions *)msg->data;
pver->cmd = cmd;
- ret = send_command(ec_dev, msg);
- if (ret > 0) {
- rver = (struct ec_response_get_cmd_versions *)msg->data;
- *mask = rver->version_mask;
+ ret = cros_ec_send_command(ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ ret = mapped;
+ goto exit;
}
- kfree(msg);
+ if (ret == 0) {
+ ret = -EPROTO;
+ goto exit;
+ }
+ rver = (struct ec_response_get_cmd_versions *)msg->data;
+ *mask = rver->version_mask;
+ ret = 0;
+exit:
+ kfree(msg);
return ret;
}
@@ -413,71 +496,17 @@ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
int cros_ec_query_all(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
- struct cros_ec_command *proto_msg;
- struct ec_response_get_protocol_info *proto_info;
- u32 ver_mask = 0;
+ u32 ver_mask;
int ret;
- proto_msg = kzalloc(sizeof(*proto_msg) + sizeof(*proto_info),
- GFP_KERNEL);
- if (!proto_msg)
- return -ENOMEM;
-
/* First try sending with proto v3. */
- ec_dev->proto_version = 3;
- ret = cros_ec_host_command_proto_query(ec_dev, 0, proto_msg);
-
- if (ret == 0) {
- proto_info = (struct ec_response_get_protocol_info *)
- proto_msg->data;
- ec_dev->max_request = proto_info->max_request_packet_size -
- sizeof(struct ec_host_request);
- ec_dev->max_response = proto_info->max_response_packet_size -
- sizeof(struct ec_host_response);
- ec_dev->proto_version =
- min(EC_HOST_REQUEST_VERSION,
- fls(proto_info->protocol_versions) - 1);
- dev_dbg(ec_dev->dev,
- "using proto v%u\n",
- ec_dev->proto_version);
-
- ec_dev->din_size = ec_dev->max_response +
- sizeof(struct ec_host_response) +
- EC_MAX_RESPONSE_OVERHEAD;
- ec_dev->dout_size = ec_dev->max_request +
- sizeof(struct ec_host_request) +
- EC_MAX_REQUEST_OVERHEAD;
-
- /*
- * Check for PD
- */
- ret = cros_ec_host_command_proto_query(ec_dev, 1, proto_msg);
-
- if (ret) {
- dev_dbg(ec_dev->dev, "no PD chip found: %d\n", ret);
- ec_dev->max_passthru = 0;
- } else {
- dev_dbg(ec_dev->dev, "found PD chip\n");
- ec_dev->max_passthru =
- proto_info->max_request_packet_size -
- sizeof(struct ec_host_request);
- }
+ if (!cros_ec_get_proto_info(ec_dev, CROS_EC_DEV_EC_INDEX)) {
+ /* Check for PD. */
+ cros_ec_get_proto_info(ec_dev, CROS_EC_DEV_PD_INDEX);
} else {
/* Try querying with a v2 hello message. */
- ec_dev->proto_version = 2;
- ret = cros_ec_host_command_proto_query_v2(ec_dev);
-
- if (ret == 0) {
- /* V2 hello succeeded. */
- dev_dbg(ec_dev->dev, "falling back to proto v2\n");
-
- ec_dev->max_request = EC_PROTO2_MAX_PARAM_SIZE;
- ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
- ec_dev->max_passthru = 0;
- ec_dev->pkt_xfer = NULL;
- ec_dev->din_size = EC_PROTO2_MSG_BYTES;
- ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
- } else {
+ ret = cros_ec_get_proto_info_legacy(ec_dev);
+ if (ret) {
/*
* It's possible for a test to occur too early when
* the EC isn't listening. If this happens, we'll
@@ -485,7 +514,7 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
*/
ec_dev->proto_version = EC_PROTO_VERSION_UNKNOWN;
dev_dbg(ec_dev->dev, "EC query failed: %d\n", ret);
- goto exit;
+ return ret;
}
}
@@ -506,26 +535,21 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
}
/* Probe if MKBP event is supported */
- ret = cros_ec_get_host_command_version_mask(ec_dev,
- EC_CMD_GET_NEXT_EVENT,
- &ver_mask);
- if (ret < 0 || ver_mask == 0)
+ ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_GET_NEXT_EVENT, &ver_mask);
+ if (ret < 0 || ver_mask == 0) {
ec_dev->mkbp_event_supported = 0;
- else
+ } else {
ec_dev->mkbp_event_supported = fls(ver_mask);
- dev_dbg(ec_dev->dev, "MKBP support version %u\n",
- ec_dev->mkbp_event_supported - 1);
+ dev_dbg(ec_dev->dev, "MKBP support version %u\n", ec_dev->mkbp_event_supported - 1);
+ }
/* Probe if host sleep v1 is supported for S0ix failure detection. */
- ret = cros_ec_get_host_command_version_mask(ec_dev,
- EC_CMD_HOST_SLEEP_EVENT,
- &ver_mask);
- ec_dev->host_sleep_v1 = (ret >= 0 && (ver_mask & EC_VER_MASK(1)));
+ ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_HOST_SLEEP_EVENT, &ver_mask);
+ ec_dev->host_sleep_v1 = (ret == 0 && (ver_mask & EC_VER_MASK(1)));
/* Get host event wake mask. */
- ret = cros_ec_get_host_event_wake_mask(ec_dev, proto_msg,
- &ec_dev->host_event_wake_mask);
+ ret = cros_ec_get_host_event_wake_mask(ec_dev, &ec_dev->host_event_wake_mask);
if (ret < 0) {
/*
* If the EC doesn't support EC_CMD_HOST_EVENT_GET_WAKE_MASK,
@@ -556,7 +580,6 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
ret = 0;
exit:
- kfree(proto_msg);
return ret;
}
EXPORT_SYMBOL(cros_ec_query_all);
@@ -601,7 +624,7 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
msg->insize = ec_dev->max_response;
}
- if (msg->command < EC_CMD_PASSTHRU_OFFSET(1)) {
+ if (msg->command < EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX)) {
if (msg->outsize > ec_dev->max_request) {
dev_err(ec_dev->dev,
"request of size %u is too big (max: %u)\n",
@@ -621,7 +644,7 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
}
}
- ret = send_command(ec_dev, msg);
+ ret = cros_ec_send_command(ec_dev, msg);
mutex_unlock(&ec_dev->lock);
return ret;
@@ -852,8 +875,8 @@ bool cros_ec_check_features(struct cros_ec_dev *ec, int feature)
if (features->flags[0] == -1U && features->flags[1] == -1U) {
/* features bitmap not read yet */
- ret = cros_ec_command(ec->ec_dev, 0, EC_CMD_GET_FEATURES + ec->cmd_offset,
- NULL, 0, features, sizeof(*features));
+ ret = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_GET_FEATURES + ec->cmd_offset,
+ NULL, 0, features, sizeof(*features));
if (ret < 0) {
dev_warn(ec->dev, "cannot get EC features: %d\n", ret);
memset(features, 0, sizeof(*features));
@@ -934,7 +957,7 @@ int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
/**
- * cros_ec_command - Send a command to the EC.
+ * cros_ec_cmd - Send a command to the EC.
*
* @ec_dev: EC device
* @version: EC command version
@@ -946,13 +969,13 @@ EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
*
* Return: >= 0 on success, negative error number on failure.
*/
-int cros_ec_command(struct cros_ec_device *ec_dev,
- unsigned int version,
- int command,
- void *outdata,
- int outsize,
- void *indata,
- int insize)
+int cros_ec_cmd(struct cros_ec_device *ec_dev,
+ unsigned int version,
+ int command,
+ void *outdata,
+ size_t outsize,
+ void *indata,
+ size_t insize)
{
struct cros_ec_command *msg;
int ret;
@@ -979,4 +1002,4 @@ error:
kfree(msg);
return ret;
}
-EXPORT_SYMBOL_GPL(cros_ec_command);
+EXPORT_SYMBOL_GPL(cros_ec_cmd);
diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
new file mode 100644
index 000000000000..c6a83df91ae1
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_proto_test.c
@@ -0,0 +1,2753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit tests for ChromeOS Embedded Controller protocol.
+ */
+
+#include <kunit/test.h>
+
+#include <asm-generic/unaligned.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include "cros_ec.h"
+#include "cros_kunit_util.h"
+
+#define BUFSIZE 512
+
+struct cros_ec_proto_test_priv {
+ struct cros_ec_device ec_dev;
+ u8 dout[BUFSIZE];
+ u8 din[BUFSIZE];
+ struct cros_ec_command *msg;
+ u8 _msg[BUFSIZE];
+};
+
+static void cros_ec_proto_test_prepare_tx_legacy_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret, i;
+ u8 csum;
+
+ ec_dev->proto_version = 2;
+
+ msg->command = EC_CMD_HELLO;
+ msg->outsize = EC_PROTO2_MAX_PARAM_SIZE;
+ msg->data[0] = 0xde;
+ msg->data[1] = 0xad;
+ msg->data[2] = 0xbe;
+ msg->data[3] = 0xef;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+
+ KUNIT_EXPECT_EQ(test, ret, EC_MSG_TX_PROTO_BYTES + EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[0], EC_CMD_VERSION0);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[1], EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[2], EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, EC_MSG_TX_HEADER_BYTES, 3);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 0], 0xde);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 1], 0xad);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 2], 0xbe);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 3], 0xef);
+ for (i = 4; i < EC_PROTO2_MAX_PARAM_SIZE; ++i)
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + i], 0);
+
+ csum = EC_CMD_VERSION0;
+ csum += EC_CMD_HELLO;
+ csum += EC_PROTO2_MAX_PARAM_SIZE;
+ csum += 0xde;
+ csum += 0xad;
+ csum += 0xbe;
+ csum += 0xef;
+ KUNIT_EXPECT_EQ(test,
+ ec_dev->dout[EC_MSG_TX_HEADER_BYTES + EC_PROTO2_MAX_PARAM_SIZE],
+ csum);
+}
+
+static void cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret;
+
+ ec_dev->proto_version = 2;
+
+ msg->outsize = EC_PROTO2_MAX_PARAM_SIZE + 1;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+}
+
+static void cros_ec_proto_test_prepare_tx_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ struct ec_host_request *request = (struct ec_host_request *)ec_dev->dout;
+ int ret, i;
+ u8 csum;
+
+ msg->command = EC_CMD_HELLO;
+ msg->outsize = 0x88;
+ msg->data[0] = 0xde;
+ msg->data[1] = 0xad;
+ msg->data[2] = 0xbe;
+ msg->data[3] = 0xef;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+
+ KUNIT_EXPECT_EQ(test, ret, sizeof(*request) + 0x88);
+
+ KUNIT_EXPECT_EQ(test, request->struct_version, EC_HOST_REQUEST_VERSION);
+ KUNIT_EXPECT_EQ(test, request->command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, request->command_version, 0);
+ KUNIT_EXPECT_EQ(test, request->data_len, 0x88);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 0], 0xde);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 1], 0xad);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 2], 0xbe);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 3], 0xef);
+ for (i = 4; i < 0x88; ++i)
+ KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + i], 0);
+
+ csum = EC_HOST_REQUEST_VERSION;
+ csum += EC_CMD_HELLO;
+ csum += 0x88;
+ csum += 0xde;
+ csum += 0xad;
+ csum += 0xbe;
+ csum += 0xef;
+ KUNIT_EXPECT_EQ(test, request->checksum, (u8)-csum);
+}
+
+static void cros_ec_proto_test_prepare_tx_bad_msg_outsize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret;
+
+ msg->outsize = ec_dev->dout_size - sizeof(struct ec_host_request) + 1;
+
+ ret = cros_ec_prepare_tx(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+}
+
+static void cros_ec_proto_test_check_result(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct cros_ec_command *msg = priv->msg;
+ int ret, i;
+ static enum ec_status status[] = {
+ EC_RES_SUCCESS,
+ EC_RES_INVALID_COMMAND,
+ EC_RES_ERROR,
+ EC_RES_INVALID_PARAM,
+ EC_RES_ACCESS_DENIED,
+ EC_RES_INVALID_RESPONSE,
+ EC_RES_INVALID_VERSION,
+ EC_RES_INVALID_CHECKSUM,
+ EC_RES_UNAVAILABLE,
+ EC_RES_TIMEOUT,
+ EC_RES_OVERFLOW,
+ EC_RES_INVALID_HEADER,
+ EC_RES_REQUEST_TRUNCATED,
+ EC_RES_RESPONSE_TOO_BIG,
+ EC_RES_BUS_ERROR,
+ EC_RES_BUSY,
+ EC_RES_INVALID_HEADER_VERSION,
+ EC_RES_INVALID_HEADER_CRC,
+ EC_RES_INVALID_DATA_CRC,
+ EC_RES_DUP_UNAVAILABLE,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(status); ++i) {
+ msg->result = status[i];
+ ret = cros_ec_check_result(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ }
+
+ msg->result = EC_RES_IN_PROGRESS;
+ ret = cros_ec_check_result(ec_dev, msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+}
+
+static void cros_ec_proto_test_query_all_pretest(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+
+ /*
+ * cros_ec_query_all() will free din and dout and allocate them again to fit the usage by
+ * calling devm_kfree() and devm_kzalloc(). Set them to NULL as they aren't managed by
+ * ec_dev->dev but allocated statically in struct cros_ec_proto_test_priv
+ * (see cros_ec_proto_test_init()).
+ */
+ ec_dev->din = NULL;
+ ec_dev->dout = NULL;
+}
+
+static void cros_ec_proto_test_query_all_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->protocol_versions = BIT(3) | BIT(2);
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbf;
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = BIT(6) | BIT(5);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = BIT(1);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ struct ec_response_host_event_mask *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_host_event_mask *)mock->o_data;
+ data->mask = 0xbeef;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, 0xbe - sizeof(struct ec_host_request));
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, 0xef - sizeof(struct ec_host_response));
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 3);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, 0xef + EC_MAX_RESPONSE_OVERHEAD);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, 0xbe + EC_MAX_REQUEST_OVERHEAD);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0xbf - sizeof(struct ec_host_request));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 7);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_HOST_SLEEP_EVENT);
+
+ KUNIT_EXPECT_TRUE(test, ec_dev->host_sleep_v1);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->host_event_wake_mask, 0xbeef);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_pd_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->max_passthru = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_pd_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->max_passthru = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_normal_v3_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xa1b2c3d4;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_params_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_hello *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_normal_v3_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xa1b2c3d4;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_params_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_hello *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE);
+ KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
+ KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL);
+ KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES);
+ KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES);
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EOPNOTSUPP);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_data_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ struct ec_response_hello *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_hello *)mock->o_data;
+ data->out_data = 0xbeefbfbf;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EBADMSG);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_legacy_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+ KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info_legacy(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_mkbp_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->mkbp_event_supported = 0xbf;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_params_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_get_cmd_versions *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
+
+ KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_host_sleep(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_sleep_v1 = true;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0;
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+
+ KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1);
+ }
+}
+
+static void cros_ec_proto_test_query_all_no_host_sleep_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_sleep_v1 = true;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ struct ec_response_get_cmd_versions *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /* In order to pollute next cros_ec_get_host_command_version_mask(). */
+ data = (struct ec_response_get_cmd_versions *)mock->o_data;
+ data->version_mask = 0xbeef;
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+
+ KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1);
+ }
+}
+
+static void cros_ec_proto_test_query_all_default_wake_mask_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_event_wake_mask(). */
+ {
+ u32 mask;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ mask = ec_dev->host_event_wake_mask;
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0);
+ }
+}
+
+static void cros_ec_proto_test_query_all_default_wake_mask_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+
+ /* Set some garbage bytes. */
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ struct ec_response_get_protocol_info *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ /*
+ * Although it doesn't check the value, provides valid sizes so that
+ * cros_ec_query_all() allocates din and dout correctly.
+ */
+ data = (struct ec_response_get_protocol_info *)mock->o_data;
+ data->max_request_packet_size = 0xbe;
+ data->max_response_packet_size = 0xef;
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For get_host_event_wake_mask(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ cros_ec_proto_test_query_all_pretest(test);
+ ret = cros_ec_query_all(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* For cros_ec_get_proto_info() without passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_proto_info() with passthru. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command,
+ EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
+ EC_CMD_GET_PROTOCOL_INFO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_protocol_info));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for MKBP. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For cros_ec_get_host_command_version_mask() for host sleep v1. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_cmd_versions));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
+ }
+
+ /* For get_host_event_wake_mask(). */
+ {
+ u32 mask;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+
+ mask = ec_dev->host_event_wake_mask;
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0);
+ KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 2;
+ buf.data[0] = 0x55;
+ buf.data[1] = 0xaa;
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, 4);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (u8 *)mock->o_data;
+ data[0] = 0xaa;
+ data[1] = 0x55;
+ data[2] = 0xcc;
+ data[3] = 0x33;
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, 4);
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, 4);
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2);
+
+ data = (u8 *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data[0], 0x55);
+ KUNIT_EXPECT_EQ(test, data[1], 0xaa);
+
+ KUNIT_EXPECT_EQ(test, buf.data[0], 0xaa);
+ KUNIT_EXPECT_EQ(test, buf.data[1], 0x55);
+ KUNIT_EXPECT_EQ(test, buf.data[2], 0xcc);
+ KUNIT_EXPECT_EQ(test, buf.data[3], 0x33);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_insize(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 0xee + 1;
+ buf.msg.outsize = 2;
+
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0xcc);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, 0xcc);
+
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, 0xee);
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2);
+ }
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 0xff + 1;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
+}
+
+static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ u8 data[0x100];
+ } __packed buf;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->max_passthru = 0xdd;
+
+ buf.msg.version = 0;
+ buf.msg.command = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) + EC_CMD_HELLO;
+ buf.msg.insize = 4;
+ buf.msg.outsize = 0xdd + 1;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v3_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 3;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 0);
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 1);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v3_no_op(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 3;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = NULL;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v2_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 2;
+ ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 1);
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 0);
+}
+
+static void cros_ec_proto_test_cmd_xfer_protocol_v2_no_op(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->proto_version = 2;
+ ec_dev->cmd_xfer = NULL;
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ struct ec_response_get_comms_status *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_comms_status *)mock->o_data;
+ data->flags = 0;
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_comms_status));
+
+ KUNIT_EXPECT_EQ(test, msg.result, EC_RES_SUCCESS);
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_COMMS_STATUS);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_comms_status));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ cros_kunit_ec_xfer_mock_default_ret = -EAGAIN;
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ {
+ struct ec_response_get_comms_status *data;
+ int i;
+
+ for (i = 0; i < 50; ++i) {
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_comms_status *)mock->o_data;
+ data->flags |= EC_COMMS_STATUS_PROCESSING;
+ }
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
+
+ /* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EIO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, msg.result, EC_RES_INVALID_COMMAND);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_in_progress_return0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
+
+ /* For the first host command to return EC_RES_IN_PROGRESS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For EC_CMD_GET_COMMS_STATUS. */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+
+ KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For cros_ec_cmd_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_command msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* For cros_ec_cmd_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+}
+
+static void cros_ec_proto_test_cmd_xfer_status_return_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_command msg;
+ static const int map[] = {
+ [EC_RES_SUCCESS] = 0,
+ [EC_RES_INVALID_COMMAND] = -EOPNOTSUPP,
+ [EC_RES_ERROR] = -EIO,
+ [EC_RES_INVALID_PARAM] = -EINVAL,
+ [EC_RES_ACCESS_DENIED] = -EACCES,
+ [EC_RES_INVALID_RESPONSE] = -EPROTO,
+ [EC_RES_INVALID_VERSION] = -ENOPROTOOPT,
+ [EC_RES_INVALID_CHECKSUM] = -EBADMSG,
+ /*
+ * EC_RES_IN_PROGRESS is special because cros_ec_send_command() has extra logic to
+ * handle it. Note that default cros_kunit_ec_xfer_mock_default_ret == 0 thus
+ * cros_ec_xfer_command() in cros_ec_wait_until_complete() returns 0. As a result,
+ * it returns -EPROTO without calling cros_ec_map_error().
+ */
+ [EC_RES_IN_PROGRESS] = -EPROTO,
+ [EC_RES_UNAVAILABLE] = -ENODATA,
+ [EC_RES_TIMEOUT] = -ETIMEDOUT,
+ [EC_RES_OVERFLOW] = -EOVERFLOW,
+ [EC_RES_INVALID_HEADER] = -EBADR,
+ [EC_RES_REQUEST_TRUNCATED] = -EBADR,
+ [EC_RES_RESPONSE_TOO_BIG] = -EFBIG,
+ [EC_RES_BUS_ERROR] = -EFAULT,
+ [EC_RES_BUSY] = -EBUSY,
+ [EC_RES_INVALID_HEADER_VERSION] = -EBADMSG,
+ [EC_RES_INVALID_HEADER_CRC] = -EBADMSG,
+ [EC_RES_INVALID_DATA_CRC] = -EBADMSG,
+ [EC_RES_DUP_UNAVAILABLE] = -ENODATA,
+ };
+
+ memset(&msg, 0, sizeof(msg));
+
+ for (i = 0; i < ARRAY_SIZE(map); ++i) {
+ mock = cros_kunit_ec_xfer_mock_addx(test, 0, i, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
+ KUNIT_EXPECT_EQ(test, ret, map[i]);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_no_mkbp_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 0;
+
+ /* Set some garbage bytes. */
+ wake_event = false;
+ more_events = true;
+
+ /* For get_keyboard_state_event(). */
+ {
+ union ec_response_get_next_data_v1 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (union ec_response_get_next_data_v1 *)mock->o_data;
+ data->host_event = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(union ec_response_get_next_data_v1));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_KEY_MATRIX);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.host_event, 0xbeef);
+
+ KUNIT_EXPECT_TRUE(test, wake_event);
+ KUNIT_EXPECT_FALSE(test, more_events);
+
+ /* For get_keyboard_state_event(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MKBP_STATE);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(union ec_response_get_next_data_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->suspended = true;
+
+ ret = cros_ec_get_next_event(ec_dev, NULL, NULL);
+ KUNIT_EXPECT_EQ(test, ret, -EHOSTDOWN);
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_version0(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 1;
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+ more_events = false;
+
+ /* For get_next_event_xfer(). */
+ {
+ struct ec_response_get_next_event *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_SENSOR_FIFO | EC_MKBP_HAS_MORE_EVENTS;
+ data->data.sysrq = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_SENSOR_FIFO);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+ KUNIT_EXPECT_TRUE(test, more_events);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_next_event));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_version2(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event, more_events;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+
+ /* Set some garbage bytes. */
+ wake_event = false;
+ more_events = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ struct ec_response_get_next_event_v1 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_FINGERPRINT;
+ data->data.sysrq = 0xbeef;
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event_v1));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_FINGERPRINT);
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef);
+
+ KUNIT_EXPECT_TRUE(test, wake_event);
+ KUNIT_EXPECT_FALSE(test, more_events);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event;
+ struct ec_response_get_next_event_v1 *data;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+ ec_dev->host_event_wake_mask = U32_MAX;
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test,
+ sizeof(data->event_type) +
+ sizeof(data->data.host_event));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_HOST_EVENT;
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC), &data->data.host_event);
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ bool wake_event;
+ struct ec_response_get_next_event_v1 *data;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->mkbp_event_supported = 3;
+ ec_dev->host_event_wake_mask = U32_MAX & ~EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED);
+
+ /* Set some garbage bytes. */
+ wake_event = true;
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_add(test,
+ sizeof(data->event_type) +
+ sizeof(data->data.host_event));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_next_event_v1 *)mock->o_data;
+ data->event_type = EC_MKBP_EVENT_HOST_EVENT;
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED),
+ &data->data.host_event);
+ }
+
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL);
+ KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event));
+
+ KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT);
+
+ KUNIT_EXPECT_FALSE(test, wake_event);
+
+ /* For get_next_event_xfer(). */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_get_next_event_v1));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_host_event_no_mkbp_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 0;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_not_host_event(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_FINGERPRINT;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_wrong_event_size(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT;
+ ec_dev->event_size = 0xff;
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void cros_ec_proto_test_get_host_event_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ int ret;
+
+ ec_dev->mkbp_event_supported = 1;
+ ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT;
+ ec_dev->event_size = sizeof(ec_dev->event_data.data.host_event);
+ put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC),
+ &ec_dev->event_data.data.host_event);
+
+ ret = cros_ec_get_host_event(ec_dev);
+ KUNIT_EXPECT_EQ(test, ret, EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC));
+}
+
+static void cros_ec_proto_test_check_features_cached(struct kunit *test)
+{
+ int ret, i;
+ struct cros_ec_dev ec;
+
+ ec.features.flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
+ ec.features.flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
+
+ for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) {
+ ret = cros_ec_check_features(&ec, i);
+ switch (i) {
+ case EC_FEATURE_FINGERPRINT:
+ case EC_FEATURE_SCP:
+ KUNIT_EXPECT_TRUE(test, ret);
+ break;
+ default:
+ KUNIT_EXPECT_FALSE(test, ret);
+ break;
+ }
+ }
+}
+
+static void cros_ec_proto_test_check_features_not_cached(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+ ec.features.flags[0] = -1;
+ ec.features.flags[1] = -1;
+
+ /* For EC_CMD_GET_FEATURES. */
+ {
+ struct ec_response_get_features *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_get_features *)mock->o_data;
+ data->flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
+ data->flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
+ }
+
+ for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) {
+ ret = cros_ec_check_features(&ec, i);
+ switch (i) {
+ case EC_FEATURE_FINGERPRINT:
+ case EC_FEATURE_SCP:
+ KUNIT_EXPECT_TRUE(test, ret);
+ break;
+ default:
+ KUNIT_EXPECT_FALSE(test, ret);
+ break;
+ }
+ }
+
+ /* For EC_CMD_GET_FEATURES. */
+ {
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_FEATURES);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_features));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_normal(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_response_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (struct ec_response_motion_sense *)mock->o_data;
+ data->dump.sensor_count = 0xbf;
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, 0xbf);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_xfer_error(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ struct cros_ec_dev ec;
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, -EPROTO);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+}
+
+static void cros_ec_proto_test_get_sensor_count_legacy(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret, i;
+ struct cros_ec_dev ec;
+ struct {
+ u8 readmem_data;
+ int expected_result;
+ } test_data[] = {
+ { 0, 0 },
+ { EC_MEMMAP_ACC_STATUS_PRESENCE_BIT, 2 },
+ };
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+ ec_dev->cmd_readmem = cros_kunit_readmem_mock;
+ ec.ec_dev = ec_dev;
+ ec.dev = ec_dev->dev;
+ ec.cmd_offset = 0;
+
+ for (i = 0; i < ARRAY_SIZE(test_data); ++i) {
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+ }
+
+ /* For readmem. */
+ {
+ cros_kunit_readmem_mock_data = kunit_kzalloc(test, 1, GFP_KERNEL);
+ KUNIT_ASSERT_PTR_NE(test, cros_kunit_readmem_mock_data, NULL);
+ cros_kunit_readmem_mock_data[0] = test_data[i].readmem_data;
+
+ cros_kunit_ec_xfer_mock_default_ret = 1;
+ }
+
+ ret = cros_ec_get_sensor_count(&ec);
+ KUNIT_EXPECT_EQ(test, ret, test_data[i].expected_result);
+
+ /* For EC_CMD_MOTION_SENSE_CMD. */
+ {
+ struct ec_params_motion_sense *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize,
+ sizeof(struct ec_response_motion_sense));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
+
+ data = (struct ec_params_motion_sense *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
+ }
+
+ /* For readmem. */
+ {
+ KUNIT_EXPECT_EQ(test, cros_kunit_readmem_mock_offset, EC_MEMMAP_ACC_STATUS);
+ }
+ }
+}
+
+static void cros_ec_proto_test_ec_cmd(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+ struct ec_xfer_mock *mock;
+ int ret;
+ u8 out[3], in[2];
+
+ ec_dev->max_request = 0xff;
+ ec_dev->max_response = 0xee;
+
+ out[0] = 0xdd;
+ out[1] = 0xcc;
+ out[2] = 0xbb;
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_add(test, 2);
+ KUNIT_ASSERT_PTR_NE(test, mock, NULL);
+
+ data = (u8 *)mock->o_data;
+ data[0] = 0xaa;
+ data[1] = 0x99;
+ }
+
+ ret = cros_ec_cmd(ec_dev, 0x88, 0x77, out, ARRAY_SIZE(out), in, ARRAY_SIZE(in));
+ KUNIT_EXPECT_EQ(test, ret, 2);
+
+ {
+ u8 *data;
+
+ mock = cros_kunit_ec_xfer_mock_next();
+ KUNIT_EXPECT_PTR_NE(test, mock, NULL);
+
+ KUNIT_EXPECT_EQ(test, mock->msg.version, 0x88);
+ KUNIT_EXPECT_EQ(test, mock->msg.command, 0x77);
+ KUNIT_EXPECT_EQ(test, mock->msg.insize, ARRAY_SIZE(in));
+ KUNIT_EXPECT_EQ(test, mock->msg.outsize, ARRAY_SIZE(out));
+
+ data = (u8 *)mock->i_data;
+ KUNIT_EXPECT_EQ(test, data[0], 0xdd);
+ KUNIT_EXPECT_EQ(test, data[1], 0xcc);
+ KUNIT_EXPECT_EQ(test, data[2], 0xbb);
+ }
+}
+
+static void cros_ec_proto_test_release(struct device *dev)
+{
+}
+
+static int cros_ec_proto_test_init(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv;
+ struct cros_ec_device *ec_dev;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ test->priv = priv;
+
+ ec_dev = &priv->ec_dev;
+ ec_dev->dout = (u8 *)priv->dout;
+ ec_dev->dout_size = ARRAY_SIZE(priv->dout);
+ ec_dev->din = (u8 *)priv->din;
+ ec_dev->din_size = ARRAY_SIZE(priv->din);
+ ec_dev->proto_version = EC_HOST_REQUEST_VERSION;
+ ec_dev->dev = kunit_kzalloc(test, sizeof(*ec_dev->dev), GFP_KERNEL);
+ if (!ec_dev->dev)
+ return -ENOMEM;
+ device_initialize(ec_dev->dev);
+ dev_set_name(ec_dev->dev, "cros_ec_proto_test");
+ ec_dev->dev->release = cros_ec_proto_test_release;
+ ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
+
+ priv->msg = (struct cros_ec_command *)priv->_msg;
+
+ cros_kunit_mock_reset();
+
+ return 0;
+}
+
+static void cros_ec_proto_test_exit(struct kunit *test)
+{
+ struct cros_ec_proto_test_priv *priv = test->priv;
+ struct cros_ec_device *ec_dev = &priv->ec_dev;
+
+ put_device(ec_dev->dev);
+}
+
+static struct kunit_case cros_ec_proto_test_cases[] = {
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_normal),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_normal),
+ KUNIT_CASE(cros_ec_proto_test_prepare_tx_bad_msg_outsize),
+ KUNIT_CASE(cros_ec_proto_test_check_result),
+ KUNIT_CASE(cros_ec_proto_test_query_all_normal),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_data_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep),
+ KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep_return0),
+ KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return_error),
+ KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return0),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_insize),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_no_op),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_no_op),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return0),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_normal),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_return_error),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_no_mkbp_event),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version0),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version2),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc),
+ KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_no_mkbp_event),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_not_host_event),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_wrong_event_size),
+ KUNIT_CASE(cros_ec_proto_test_get_host_event_normal),
+ KUNIT_CASE(cros_ec_proto_test_check_features_cached),
+ KUNIT_CASE(cros_ec_proto_test_check_features_not_cached),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_normal),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_xfer_error),
+ KUNIT_CASE(cros_ec_proto_test_get_sensor_count_legacy),
+ KUNIT_CASE(cros_ec_proto_test_ec_cmd),
+ {}
+};
+
+static struct kunit_suite cros_ec_proto_test_suite = {
+ .name = "cros_ec_proto_test",
+ .init = cros_ec_proto_test_init,
+ .exit = cros_ec_proto_test_exit,
+ .test_cases = cros_ec_proto_test_cases,
+};
+
+kunit_test_suite(cros_ec_proto_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index 9bb5cd2c98b8..d7e407de88df 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -30,8 +30,8 @@ TRACE_EVENT(cros_ec_request_start,
),
TP_fast_assign(
__entry->version = cmd->version;
- __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(1);
- __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(1);
+ __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
+ __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
__entry->outsize = cmd->outsize;
__entry->insize = cmd->insize;
),
@@ -55,8 +55,8 @@ TRACE_EVENT(cros_ec_request_done,
),
TP_fast_assign(
__entry->version = cmd->version;
- __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(1);
- __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(1);
+ __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
+ __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX);
__entry->outsize = cmd->outsize;
__entry->insize = cmd->insize;
__entry->result = cmd->result;
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 7cb2e35c4ded..de6ee0f926a6 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -25,6 +25,8 @@
#define DRV_NAME "cros-ec-typec"
+#define DP_PORT_VDO (BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D) | DP_CAP_DFP_D)
+
/* Supported alt modes. */
enum {
CROS_EC_ALTMODE_DP = 0,
@@ -60,8 +62,7 @@ struct cros_typec_port {
uint8_t mux_flags;
uint8_t role;
- /* Port alt modes. */
- struct typec_altmode p_altmode[CROS_EC_ALTMODE_MAX];
+ struct typec_altmode *port_altmode[CROS_EC_ALTMODE_MAX];
/* Flag indicating that PD partner discovery data parsing is completed. */
bool sop_disc_done;
@@ -254,6 +255,14 @@ static void cros_typec_remove_cable(struct cros_typec_data *typec,
port->sop_prime_disc_done = false;
}
+static void cros_typec_unregister_port_altmodes(struct cros_typec_port *port)
+{
+ int i;
+
+ for (i = 0; i < CROS_EC_ALTMODE_MAX; i++)
+ typec_unregister_altmode(port->port_altmode[i]);
+}
+
static void cros_unregister_ports(struct cros_typec_data *typec)
{
int i;
@@ -268,34 +277,49 @@ static void cros_unregister_ports(struct cros_typec_data *typec)
usb_role_switch_put(typec->ports[i]->role_sw);
typec_switch_put(typec->ports[i]->ori_sw);
typec_mux_put(typec->ports[i]->mux);
+ cros_typec_unregister_port_altmodes(typec->ports[i]);
typec_unregister_port(typec->ports[i]->port);
}
}
/*
- * Fake the alt mode structs until we actually start registering Type C port
- * and partner alt modes.
+ * Register port alt modes with known values till we start retrieving
+ * port capabilities from the EC.
*/
-static void cros_typec_register_port_altmodes(struct cros_typec_data *typec,
+static int cros_typec_register_port_altmodes(struct cros_typec_data *typec,
int port_num)
{
struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_altmode_desc desc;
+ struct typec_altmode *amode;
/* All PD capable CrOS devices are assumed to support DP altmode. */
- port->p_altmode[CROS_EC_ALTMODE_DP].svid = USB_TYPEC_DP_SID;
- port->p_altmode[CROS_EC_ALTMODE_DP].mode = USB_TYPEC_DP_MODE;
+ desc.svid = USB_TYPEC_DP_SID,
+ desc.mode = USB_TYPEC_DP_MODE,
+ desc.vdo = DP_PORT_VDO,
+ amode = typec_port_register_altmode(port->port, &desc);
+ if (IS_ERR(amode))
+ return PTR_ERR(amode);
+ port->port_altmode[CROS_EC_ALTMODE_DP] = amode;
/*
* Register TBT compatibility alt mode. The EC will not enter the mode
* if it doesn't support it, so it's safe to register it unconditionally
* here for now.
*/
- port->p_altmode[CROS_EC_ALTMODE_TBT].svid = USB_TYPEC_TBT_SID;
- port->p_altmode[CROS_EC_ALTMODE_TBT].mode = TYPEC_ANY_MODE;
+ memset(&desc, 0, sizeof(desc));
+ desc.svid = USB_TYPEC_TBT_SID,
+ desc.mode = TYPEC_ANY_MODE,
+ amode = typec_port_register_altmode(port->port, &desc);
+ if (IS_ERR(amode))
+ return PTR_ERR(amode);
+ port->port_altmode[CROS_EC_ALTMODE_TBT] = amode;
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
port->state.data = NULL;
+
+ return 0;
}
static int cros_typec_init_ports(struct cros_typec_data *typec)
@@ -352,8 +376,8 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
cros_port->port = typec_register_port(dev, cap);
if (IS_ERR(cros_port->port)) {
- dev_err(dev, "Failed to register port %d\n", port_num);
ret = PTR_ERR(cros_port->port);
+ dev_err_probe(dev, ret, "Failed to register port %d\n", port_num);
goto unregister_ports;
}
@@ -362,7 +386,11 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
dev_dbg(dev, "No switch control for port %d\n",
port_num);
- cros_typec_register_port_altmodes(typec, port_num);
+ ret = cros_typec_register_port_altmodes(typec, port_num);
+ if (ret) {
+ dev_err(dev, "Failed to register port altmodes\n");
+ goto unregister_ports;
+ }
cros_port->disc_data = devm_kzalloc(dev, EC_PROTO2_MAX_RESPONSE_SIZE, GFP_KERNEL);
if (!cros_port->disc_data) {
@@ -431,7 +459,7 @@ static int cros_typec_enable_tbt(struct cros_typec_data *typec,
data.enter_vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
if (!port->state.alt) {
- port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_TBT];
+ port->state.alt = port->port_altmode[CROS_EC_ALTMODE_TBT];
ret = cros_typec_usb_safe_state(port);
if (ret)
return ret;
@@ -473,7 +501,7 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
/* Configuration VDO. */
dp_data.conf = DP_CONF_SET_PIN_ASSIGN(pd_ctrl->dp_mode);
if (!port->state.alt) {
- port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_DP];
+ port->state.alt = port->port_altmode[CROS_EC_ALTMODE_DP];
ret = cros_typec_usb_safe_state(port);
if (ret)
return ret;
@@ -525,8 +553,8 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
enum typec_orientation orientation;
int ret;
- ret = cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
- &req, sizeof(req), &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
+ &req, sizeof(req), &resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev, "Failed to get mux info for port: %d, err = %d\n",
port_num, ret);
@@ -585,8 +613,8 @@ mux_ack:
/* Sending Acknowledgment to EC */
mux_ack.port = port_num;
- if (cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_MUX_ACK, &mux_ack,
- sizeof(mux_ack), NULL, 0) < 0)
+ if (cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_ACK, &mux_ack,
+ sizeof(mux_ack), NULL, 0) < 0)
dev_warn(typec->dev,
"Failed to send Mux ACK to EC for port: %d\n",
port_num);
@@ -754,8 +782,8 @@ static int cros_typec_handle_sop_prime_disc(struct cros_typec_data *typec, int p
int ret = 0;
memset(disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
- ret = cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
- disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ disc, EC_PROTO2_MAX_RESPONSE_SIZE);
if (ret < 0) {
dev_err(typec->dev, "Failed to get SOP' discovery data for port: %d\n", port_num);
goto sop_prime_disc_exit;
@@ -837,8 +865,8 @@ static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_nu
typec_partner_set_pd_revision(port->partner, pd_revision);
memset(sop_disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
- ret = cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
- sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
if (ret < 0) {
dev_err(typec->dev, "Failed to get SOP discovery data for port: %d\n", port_num);
goto disc_exit;
@@ -870,8 +898,8 @@ static int cros_typec_send_clear_event(struct cros_typec_data *typec, int port_n
.clear_events_mask = events_mask,
};
- return cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
- sizeof(req), NULL, 0);
+ return cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
+ sizeof(req), NULL, 0);
}
static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num)
@@ -882,8 +910,8 @@ static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num
};
int ret;
- ret = cros_ec_command(typec->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
- &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
+ &resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
return;
@@ -960,9 +988,9 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
req.mux = USB_PD_CTRL_MUX_NO_CHANGE;
req.swap = USB_PD_CTRL_SWAP_NONE;
- ret = cros_ec_command(typec->ec, typec->pd_ctrl_ver,
- EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
- &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, typec->pd_ctrl_ver,
+ EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
+ &resp, sizeof(resp));
if (ret < 0)
return ret;
@@ -997,9 +1025,8 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
/* We're interested in the PD control command version. */
req_v1.cmd = EC_CMD_USB_PD_CONTROL;
- ret = cros_ec_command(typec->ec, 1, EC_CMD_GET_CMD_VERSIONS,
- &req_v1, sizeof(req_v1), &resp,
- sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 1, EC_CMD_GET_CMD_VERSIONS,
+ &req_v1, sizeof(req_v1), &resp, sizeof(resp));
if (ret < 0)
return ret;
@@ -1090,8 +1117,8 @@ static int cros_typec_probe(struct platform_device *pdev)
typec->typec_cmd_supported = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_CMD);
typec->needs_mux_ack = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
- ret = cros_ec_command(typec->ec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
- &resp, sizeof(resp));
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
+ &resp, sizeof(resp));
if (ret < 0)
return ret;
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
index aa409f0201fb..793fd3f1015d 100644
--- a/drivers/platform/chrome/cros_kbd_led_backlight.c
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -4,24 +4,60 @@
// Copyright (C) 2012 Google, Inc.
#include <linux/acpi.h>
-#include <linux/leds.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
+struct keyboard_led {
+ struct led_classdev cdev;
+ struct cros_ec_device *ec;
+};
+
+/**
+ * struct keyboard_led_drvdata - keyboard LED driver data.
+ * @init: Init function.
+ * @brightness_get: Get LED brightness level.
+ * @brightness_set: Set LED brightness level. Must not sleep.
+ * @brightness_set_blocking: Set LED brightness level. It can block the
+ * caller for the time required for accessing a
+ * LED device register
+ * @max_brightness: Maximum brightness.
+ *
+ * See struct led_classdev in include/linux/leds.h for more details.
+ */
+struct keyboard_led_drvdata {
+ int (*init)(struct platform_device *pdev);
+
+ enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
+
+ void (*brightness_set)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+ int (*brightness_set_blocking)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+
+ enum led_brightness max_brightness;
+};
+
+#define KEYBOARD_BACKLIGHT_MAX 100
+
+#ifdef CONFIG_ACPI
+
/* Keyboard LED ACPI Device must be defined in firmware */
#define ACPI_KEYBOARD_BACKLIGHT_DEVICE "\\_SB.KBLT"
#define ACPI_KEYBOARD_BACKLIGHT_READ ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC"
#define ACPI_KEYBOARD_BACKLIGHT_WRITE ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM"
-#define ACPI_KEYBOARD_BACKLIGHT_MAX 100
-
-static void keyboard_led_set_brightness(struct led_classdev *cdev,
- enum led_brightness brightness)
+static void keyboard_led_set_brightness_acpi(struct led_classdev *cdev,
+ enum led_brightness brightness)
{
union acpi_object param;
struct acpi_object_list input;
@@ -40,7 +76,7 @@ static void keyboard_led_set_brightness(struct led_classdev *cdev,
}
static enum led_brightness
-keyboard_led_get_brightness(struct led_classdev *cdev)
+keyboard_led_get_brightness_acpi(struct led_classdev *cdev)
{
unsigned long long brightness;
acpi_status status;
@@ -56,12 +92,10 @@ keyboard_led_get_brightness(struct led_classdev *cdev)
return brightness;
}
-static int keyboard_led_probe(struct platform_device *pdev)
+static int keyboard_led_init_acpi(struct platform_device *pdev)
{
- struct led_classdev *cdev;
acpi_handle handle;
acpi_status status;
- int error;
/* Look for the keyboard LED ACPI Device */
status = acpi_get_handle(ACPI_ROOT_OBJECT,
@@ -73,33 +107,151 @@ static int keyboard_led_probe(struct platform_device *pdev)
return -ENXIO;
}
- cdev = devm_kzalloc(&pdev->dev, sizeof(*cdev), GFP_KERNEL);
- if (!cdev)
+ return 0;
+}
+
+static const struct keyboard_led_drvdata keyboard_led_drvdata_acpi = {
+ .init = keyboard_led_init_acpi,
+ .brightness_set = keyboard_led_set_brightness_acpi,
+ .brightness_get = keyboard_led_get_brightness_acpi,
+ .max_brightness = KEYBOARD_BACKLIGHT_MAX,
+};
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_CROS_EC)
+
+static int
+keyboard_led_set_brightness_ec_pwm(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_pwm_set_keyboard_backlight params;
+ } __packed buf;
+ struct ec_params_pwm_set_keyboard_backlight *params = &buf.params;
+ struct cros_ec_command *msg = &buf.msg;
+ struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->command = EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT;
+ msg->outsize = sizeof(*params);
+
+ params->percent = brightness;
+
+ return cros_ec_cmd_xfer_status(keyboard_led->ec, msg);
+}
+
+static enum led_brightness
+keyboard_led_get_brightness_ec_pwm(struct led_classdev *cdev)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_pwm_get_keyboard_backlight resp;
+ } __packed buf;
+ struct ec_response_pwm_get_keyboard_backlight *resp = &buf.resp;
+ struct cros_ec_command *msg = &buf.msg;
+ struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
+ int ret;
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->command = EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT;
+ msg->insize = sizeof(*resp);
+
+ ret = cros_ec_cmd_xfer_status(keyboard_led->ec, msg);
+ if (ret < 0)
+ return ret;
+
+ return resp->percent;
+}
+
+static int keyboard_led_init_ec_pwm(struct platform_device *pdev)
+{
+ struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
+
+ keyboard_led->ec = dev_get_drvdata(pdev->dev.parent);
+ if (!keyboard_led->ec) {
+ dev_err(&pdev->dev, "no parent EC device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {
+ .init = keyboard_led_init_ec_pwm,
+ .brightness_set_blocking = keyboard_led_set_brightness_ec_pwm,
+ .brightness_get = keyboard_led_get_brightness_ec_pwm,
+ .max_brightness = KEYBOARD_BACKLIGHT_MAX,
+};
+
+#else /* IS_ENABLED(CONFIG_CROS_EC) */
+
+static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {};
+
+#endif /* IS_ENABLED(CONFIG_CROS_EC) */
+
+static int keyboard_led_probe(struct platform_device *pdev)
+{
+ const struct keyboard_led_drvdata *drvdata;
+ struct keyboard_led *keyboard_led;
+ int error;
+
+ drvdata = device_get_match_data(&pdev->dev);
+ if (!drvdata)
+ return -EINVAL;
+
+ keyboard_led = devm_kzalloc(&pdev->dev, sizeof(*keyboard_led), GFP_KERNEL);
+ if (!keyboard_led)
return -ENOMEM;
+ platform_set_drvdata(pdev, keyboard_led);
+
+ if (drvdata->init) {
+ error = drvdata->init(pdev);
+ if (error)
+ return error;
+ }
- cdev->name = "chromeos::kbd_backlight";
- cdev->max_brightness = ACPI_KEYBOARD_BACKLIGHT_MAX;
- cdev->flags |= LED_CORE_SUSPENDRESUME;
- cdev->brightness_set = keyboard_led_set_brightness;
- cdev->brightness_get = keyboard_led_get_brightness;
+ keyboard_led->cdev.name = "chromeos::kbd_backlight";
+ keyboard_led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ keyboard_led->cdev.max_brightness = drvdata->max_brightness;
+ keyboard_led->cdev.brightness_set = drvdata->brightness_set;
+ keyboard_led->cdev.brightness_set_blocking = drvdata->brightness_set_blocking;
+ keyboard_led->cdev.brightness_get = drvdata->brightness_get;
- error = devm_led_classdev_register(&pdev->dev, cdev);
+ error = devm_led_classdev_register(&pdev->dev, &keyboard_led->cdev);
if (error)
return error;
return 0;
}
-static const struct acpi_device_id keyboard_led_id[] = {
- { "GOOG0002", 0 },
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id keyboard_led_acpi_match[] = {
+ { "GOOG0002", (kernel_ulong_t)&keyboard_led_drvdata_acpi },
{ }
};
-MODULE_DEVICE_TABLE(acpi, keyboard_led_id);
+MODULE_DEVICE_TABLE(acpi, keyboard_led_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id keyboard_led_of_match[] = {
+ {
+ .compatible = "google,cros-kbd-led-backlight",
+ .data = &keyboard_led_drvdata_ec_pwm,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, keyboard_led_of_match);
+#endif
static struct platform_driver keyboard_led_driver = {
.driver = {
.name = "chromeos-keyboard-leds",
- .acpi_match_table = ACPI_PTR(keyboard_led_id),
+ .acpi_match_table = ACPI_PTR(keyboard_led_acpi_match),
+ .of_match_table = of_match_ptr(keyboard_led_of_match),
},
.probe = keyboard_led_probe,
};
diff --git a/drivers/platform/chrome/cros_kunit_util.c b/drivers/platform/chrome/cros_kunit_util.c
new file mode 100644
index 000000000000..f0fda96b11bd
--- /dev/null
+++ b/drivers/platform/chrome/cros_kunit_util.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CrOS Kunit tests utilities.
+ */
+
+#include <kunit/test.h>
+
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include "cros_ec.h"
+#include "cros_kunit_util.h"
+
+int cros_kunit_ec_xfer_mock_default_result;
+int cros_kunit_ec_xfer_mock_default_ret;
+int cros_kunit_ec_cmd_xfer_mock_called;
+int cros_kunit_ec_pkt_xfer_mock_called;
+
+static struct list_head cros_kunit_ec_xfer_mock_in;
+static struct list_head cros_kunit_ec_xfer_mock_out;
+
+int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_in, struct ec_xfer_mock, list);
+ if (!mock) {
+ msg->result = cros_kunit_ec_xfer_mock_default_result;
+ return cros_kunit_ec_xfer_mock_default_ret;
+ }
+
+ list_del(&mock->list);
+
+ memcpy(&mock->msg, msg, sizeof(*msg));
+ if (msg->outsize) {
+ mock->i_data = kunit_kzalloc(mock->test, msg->outsize, GFP_KERNEL);
+ if (mock->i_data)
+ memcpy(mock->i_data, msg->data, msg->outsize);
+ }
+
+ msg->result = mock->result;
+ if (msg->insize)
+ memcpy(msg->data, mock->o_data, min(msg->insize, mock->o_data_len));
+
+ list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_out);
+
+ return mock->ret;
+}
+
+int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ ++cros_kunit_ec_cmd_xfer_mock_called;
+ return cros_kunit_ec_xfer_mock(ec_dev, msg);
+}
+
+int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
+{
+ ++cros_kunit_ec_pkt_xfer_mock_called;
+ return cros_kunit_ec_xfer_mock(ec_dev, msg);
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size)
+{
+ return cros_kunit_ec_xfer_mock_addx(test, size, EC_RES_SUCCESS, size);
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test,
+ int ret, int result, size_t size)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ if (!mock)
+ return NULL;
+
+ list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_in);
+ mock->test = test;
+
+ mock->ret = ret;
+ mock->result = result;
+ mock->o_data = kunit_kzalloc(test, size, GFP_KERNEL);
+ if (!mock->o_data)
+ return NULL;
+ mock->o_data_len = size;
+
+ return mock;
+}
+
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void)
+{
+ struct ec_xfer_mock *mock;
+
+ mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_out, struct ec_xfer_mock, list);
+ if (mock)
+ list_del(&mock->list);
+
+ return mock;
+}
+
+int cros_kunit_readmem_mock_offset;
+u8 *cros_kunit_readmem_mock_data;
+int cros_kunit_readmem_mock_ret;
+
+int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset,
+ unsigned int bytes, void *dest)
+{
+ cros_kunit_readmem_mock_offset = offset;
+
+ memcpy(dest, cros_kunit_readmem_mock_data, bytes);
+
+ return cros_kunit_readmem_mock_ret;
+}
+
+void cros_kunit_mock_reset(void)
+{
+ cros_kunit_ec_xfer_mock_default_result = 0;
+ cros_kunit_ec_xfer_mock_default_ret = 0;
+ cros_kunit_ec_cmd_xfer_mock_called = 0;
+ cros_kunit_ec_pkt_xfer_mock_called = 0;
+ INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_in);
+ INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_out);
+
+ cros_kunit_readmem_mock_offset = 0;
+ cros_kunit_readmem_mock_data = NULL;
+ cros_kunit_readmem_mock_ret = 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_kunit_util.h b/drivers/platform/chrome/cros_kunit_util.h
new file mode 100644
index 000000000000..414002271c9c
--- /dev/null
+++ b/drivers/platform/chrome/cros_kunit_util.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CrOS Kunit tests utilities.
+ */
+
+#ifndef _CROS_KUNIT_UTIL_H_
+#define _CROS_KUNIT_UTIL_H_
+
+#include <linux/platform_data/cros_ec_proto.h>
+
+struct ec_xfer_mock {
+ struct list_head list;
+ struct kunit *test;
+
+ /* input */
+ struct cros_ec_command msg;
+ void *i_data;
+
+ /* output */
+ int ret;
+ int result;
+ void *o_data;
+ u32 o_data_len;
+};
+
+extern int cros_kunit_ec_xfer_mock_default_result;
+extern int cros_kunit_ec_xfer_mock_default_ret;
+extern int cros_kunit_ec_cmd_xfer_mock_called;
+extern int cros_kunit_ec_pkt_xfer_mock_called;
+
+int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test,
+ int ret, int result, size_t size);
+struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void);
+
+extern int cros_kunit_readmem_mock_offset;
+extern u8 *cros_kunit_readmem_mock_data;
+extern int cros_kunit_readmem_mock_ret;
+
+int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset,
+ unsigned int bytes, void *dest);
+
+void cros_kunit_mock_reset(void);
+
+#endif
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
index 91ce6be91aac..4b5a81c9dc6d 100644
--- a/drivers/platform/chrome/cros_usbpd_notify.c
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -71,8 +71,8 @@ static void cros_usbpd_get_event_and_notify(struct device *dev,
}
/* Check for PD host events on EC. */
- ret = cros_ec_command(ec_dev, 0, EC_CMD_PD_HOST_EVENT_STATUS,
- NULL, 0, &host_event_status, sizeof(host_event_status));
+ ret = cros_ec_cmd(ec_dev, 0, EC_CMD_PD_HOST_EVENT_STATUS,
+ NULL, 0, &host_event_status, sizeof(host_event_status));
if (ret < 0) {
dev_warn(dev, "Can't get host event status (err: %d)\n", ret);
goto send_notify;
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
index 814518509739..32e400590be5 100644
--- a/drivers/platform/chrome/wilco_ec/event.c
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -343,7 +343,7 @@ static __poll_t event_poll(struct file *filp, poll_table *wait)
*
* Removes the first event from the queue, places it in the passed buffer.
*
- * If there are no events in the the queue, then one of two things happens,
+ * If there are no events in the queue, then one of two things happens,
* depending on if the file was opened in nonblocking mode: If in nonblocking
* mode, then return -EAGAIN to say there's no data. If in blocking mode, then
* block until an event is available.
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 38800e86ed8a..1ae3c56b66b0 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -959,6 +959,8 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
goto error;
}
+ vq->num_max = vring->num;
+
vqs[i] = vq;
vring->vq = vq;
vq->priv = vring;
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index 2c2686d5c2fc..ddc08abf398c 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -31,6 +31,7 @@
* @group: sysfs attribute group;
* @groups: list of sysfs attribute group for hwmon registration;
* @regsize: size of a register value;
+ * @io_lock: user access locking;
*/
struct mlxreg_io_priv_data {
struct platform_device *pdev;
@@ -41,6 +42,7 @@ struct mlxreg_io_priv_data {
struct attribute_group group;
const struct attribute_group *groups[2];
int regsize;
+ struct mutex io_lock; /* Protects user access. */
};
static int
@@ -116,14 +118,19 @@ mlxreg_io_attr_show(struct device *dev, struct device_attribute *attr,
u32 regval = 0;
int ret;
+ mutex_lock(&priv->io_lock);
+
ret = mlxreg_io_get_reg(priv->pdata->regmap, data, 0, true,
priv->regsize, &regval);
if (ret)
goto access_error;
+ mutex_unlock(&priv->io_lock);
+
return sprintf(buf, "%u\n", regval);
access_error:
+ mutex_unlock(&priv->io_lock);
return ret;
}
@@ -145,6 +152,8 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
+ mutex_lock(&priv->io_lock);
+
ret = mlxreg_io_get_reg(priv->pdata->regmap, data, input_val, false,
priv->regsize, &regval);
if (ret)
@@ -154,9 +163,12 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
if (ret)
goto access_error;
+ mutex_unlock(&priv->io_lock);
+
return len;
access_error:
+ mutex_unlock(&priv->io_lock);
dev_err(&priv->pdev->dev, "Bus access error\n");
return ret;
}
@@ -246,16 +258,27 @@ static int mlxreg_io_probe(struct platform_device *pdev)
return PTR_ERR(priv->hwmon);
}
+ mutex_init(&priv->io_lock);
dev_set_drvdata(&pdev->dev, priv);
return 0;
}
+static int mlxreg_io_remove(struct platform_device *pdev)
+{
+ struct mlxreg_io_priv_data *priv = dev_get_drvdata(&pdev->dev);
+
+ mutex_destroy(&priv->io_lock);
+
+ return 0;
+}
+
static struct platform_driver mlxreg_io_driver = {
.driver = {
.name = "mlxreg-io",
},
.probe = mlxreg_io_probe,
+ .remove = mlxreg_io_remove,
};
module_platform_driver(mlxreg_io_driver);
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
index c897a2f15840..55834ccb4ac7 100644
--- a/drivers/platform/mellanox/mlxreg-lc.c
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -716,8 +716,12 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
switch (regval) {
case MLXREG_LC_SN4800_C16:
err = mlxreg_lc_sn4800_c16_config_init(mlxreg_lc, regmap, data);
- if (err)
+ if (err) {
+ dev_err(dev, "Failed to config client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
return err;
+ }
break;
default:
return -ENODEV;
@@ -730,8 +734,11 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
mlxreg_lc->mux = platform_device_register_resndata(dev, "i2c-mux-mlxcpld", data->hpdev.nr,
NULL, 0, mlxreg_lc->mux_data,
sizeof(*mlxreg_lc->mux_data));
- if (IS_ERR(mlxreg_lc->mux))
+ if (IS_ERR(mlxreg_lc->mux)) {
+ dev_err(dev, "Failed to create mux infra for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
return PTR_ERR(mlxreg_lc->mux);
+ }
/* Register IO access driver. */
if (mlxreg_lc->io_data) {
@@ -740,6 +747,9 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
platform_device_register_resndata(dev, "mlxreg-io", data->hpdev.nr, NULL, 0,
mlxreg_lc->io_data, sizeof(*mlxreg_lc->io_data));
if (IS_ERR(mlxreg_lc->io_regs)) {
+ dev_err(dev, "Failed to create regio for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
err = PTR_ERR(mlxreg_lc->io_regs);
goto fail_register_io;
}
@@ -753,6 +763,9 @@ mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
mlxreg_lc->led_data,
sizeof(*mlxreg_lc->led_data));
if (IS_ERR(mlxreg_lc->led)) {
+ dev_err(dev, "Failed to create LED objects for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
err = PTR_ERR(mlxreg_lc->led);
goto fail_register_led;
}
@@ -809,7 +822,8 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (!data->hpdev.adapter) {
dev_err(&pdev->dev, "Failed to get adapter for bus %d\n",
data->hpdev.nr);
- return -EFAULT;
+ err = -EFAULT;
+ goto i2c_get_adapter_fail;
}
/* Create device at the top of line card I2C tree.*/
@@ -818,32 +832,40 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (IS_ERR(data->hpdev.client)) {
dev_err(&pdev->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
-
- i2c_put_adapter(data->hpdev.adapter);
- data->hpdev.adapter = NULL;
- return PTR_ERR(data->hpdev.client);
+ err = PTR_ERR(data->hpdev.client);
+ goto i2c_new_device_fail;
}
regmap = devm_regmap_init_i2c(data->hpdev.client,
&mlxreg_lc_regmap_conf);
if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to create regmap for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
err = PTR_ERR(regmap);
- goto mlxreg_lc_probe_fail;
+ goto devm_regmap_init_i2c_fail;
}
/* Set default registers. */
for (i = 0; i < mlxreg_lc_regmap_conf.num_reg_defaults; i++) {
err = regmap_write(regmap, mlxreg_lc_regmap_default[i].reg,
mlxreg_lc_regmap_default[i].def);
- if (err)
- goto mlxreg_lc_probe_fail;
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set default regmap %d for client %s at bus %d at addr 0x%02x\n",
+ i, data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
+ goto regmap_write_fail;
+ }
}
/* Sync registers with hardware. */
regcache_mark_dirty(regmap);
err = regcache_sync(regmap);
- if (err)
- goto mlxreg_lc_probe_fail;
+ if (err) {
+ dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+ err = PTR_ERR(regmap);
+ goto regcache_sync_fail;
+ }
par_pdata = data->hpdev.brdinfo->platform_data;
mlxreg_lc->par_regmap = par_pdata->regmap;
@@ -854,12 +876,27 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
/* Configure line card. */
err = mlxreg_lc_config_init(mlxreg_lc, regmap, data);
if (err)
- goto mlxreg_lc_probe_fail;
+ goto mlxreg_lc_config_init_fail;
return err;
-mlxreg_lc_probe_fail:
+mlxreg_lc_config_init_fail:
+regcache_sync_fail:
+regmap_write_fail:
+devm_regmap_init_i2c_fail:
+ if (data->hpdev.client) {
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
+ }
+i2c_new_device_fail:
i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+i2c_get_adapter_fail:
+ /* Clear event notification callback and handle. */
+ if (data->notifier) {
+ data->notifier->user_handler = NULL;
+ data->notifier->handle = NULL;
+ }
return err;
}
@@ -868,11 +905,18 @@ static int mlxreg_lc_remove(struct platform_device *pdev)
struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
- /* Clear event notification callback. */
- if (data->notifier) {
- data->notifier->user_handler = NULL;
- data->notifier->handle = NULL;
- }
+ /*
+ * Probing and removing are invoked by hotplug events raised upon line card insertion and
+ * removing. If probing procedure fails all data is cleared. However, hotplug event still
+ * will be raised on line card removing and activate removing procedure. In this case there
+ * is nothing to remove.
+ */
+ if (!data->notifier || !data->notifier->handle)
+ return 0;
+
+ /* Clear event notification callback and handle. */
+ data->notifier->user_handler = NULL;
+ data->notifier->handle = NULL;
/* Destroy static I2C device feeding by main power. */
mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 4ff5c3a12991..921520475ff6 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -264,7 +264,7 @@ static ssize_t ec_dbgfs_cmd_write(struct file *file, const char __user *buf,
int i, m;
unsigned char ec_cmd[EC_MAX_CMD_ARGS];
unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
- char cmdbuf[64];
+ char cmdbuf[64] = "";
int ec_cmd_bytes;
mutex_lock(&ec_dbgfs_lock);
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index eb79fbed8059..b629e82af97c 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -72,18 +72,45 @@ config SURFACE_AGGREGATOR_CDEV
The provided interface is intended for debugging and development only,
and should not be used otherwise.
+config SURFACE_AGGREGATOR_HUB
+ tristate "Surface System Aggregator Module Subsystem Device Hubs"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ help
+ Device-hub drivers for Surface System Aggregator Module (SSAM) subsystem
+ devices.
+
+ Provides subsystem hub drivers which manage client devices on various
+ SSAM subsystems. In some subsystems, notably the BAS subsystem managing
+ devices contained in the base of the Surface Book 3 and the KIP subsystem
+ managing type-cover devices in the Surface Pro 8 and Surface Pro X,
+ devices can be (hot-)removed. Hub devices and drivers are required to
+ manage these subdevices.
+
+ Devices managed via these hubs are:
+ - Battery/AC devices (Surface Book 3).
+ - HID input devices (7th-generation and later models with detachable
+ input devices).
+
+ Select M (recommended) or Y here if you want support for the above
+ mentioned devices on the corresponding Surface models. Without this
+ module, the respective devices mentioned above will not be instantiated
+ and thus any functionality provided by them will be missing, even when
+ drivers for these devices are present. This module only provides the
+ respective subsystem hubs. Both drivers and device specification (e.g.
+ via the Surface Aggregator Registry) for these devices still need to be
+ selected via other options.
+
config SURFACE_AGGREGATOR_REGISTRY
tristate "Surface System Aggregator Module Device Registry"
depends on SURFACE_AGGREGATOR
depends on SURFACE_AGGREGATOR_BUS
help
- Device-registry and device-hubs for Surface System Aggregator Module
- (SSAM) devices.
+ Device-registry for Surface System Aggregator Module (SSAM) devices.
Provides a module and driver which act as a device-registry for SSAM
client devices that cannot be detected automatically, e.g. via ACPI.
- Such devices are instead provided via this registry and attached via
- device hubs, also provided in this module.
+ Such devices are instead provided and managed via this registry.
Devices provided via this registry are:
- Platform profile (performance-/cooling-mode) device (5th- and later
@@ -99,6 +126,29 @@ config SURFACE_AGGREGATOR_REGISTRY
the respective client devices. Drivers for these devices still need to
be selected via the other options.
+config SURFACE_AGGREGATOR_TABLET_SWITCH
+ tristate "Surface Aggregator Generic Tablet-Mode Switch Driver"
+ depends on SURFACE_AGGREGATOR
+ depends on SURFACE_AGGREGATOR_BUS
+ depends on INPUT
+ help
+ Provides a tablet-mode switch input device on Microsoft Surface models
+ using the KIP subsystem for detachable keyboards (e.g. keyboard covers)
+ or the POS subsystem for device/screen posture changes.
+
+ The KIP subsystem is used on newer Surface generations to handle
+ detachable input peripherals, specifically the keyboard cover (containing
+ keyboard and touchpad) on the Surface Pro 8 and Surface Pro X. The POS
+ subsystem is used for device posture change notifications on the Surface
+ Laptop Studio. This module provides a driver to let user-space know when
+ the device should be considered in tablet-mode due to the keyboard cover
+ being detached or folded back (essentially signaling when the keyboard is
+ not available for input). It does so by creating a tablet-mode switch
+ input device, sending the standard SW_TABLET_MODE event on mode change.
+
+ Select M or Y here, if you want to provide tablet-mode switch input
+ events on the Surface Pro 8, Surface Pro X, and Surface Laptop Studio.
+
config SURFACE_DTX
tristate "Surface DTX (Detachment System) Driver"
depends on SURFACE_AGGREGATOR
diff --git a/drivers/platform/surface/Makefile b/drivers/platform/surface/Makefile
index 0fc9cd3e4dd9..53344330939b 100644
--- a/drivers/platform/surface/Makefile
+++ b/drivers/platform/surface/Makefile
@@ -9,7 +9,9 @@ obj-$(CONFIG_SURFACE_3_POWER_OPREGION) += surface3_power.o
obj-$(CONFIG_SURFACE_ACPI_NOTIFY) += surface_acpi_notify.o
obj-$(CONFIG_SURFACE_AGGREGATOR) += aggregator/
obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_HUB) += surface_aggregator_hub.o
obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
+obj-$(CONFIG_SURFACE_AGGREGATOR_TABLET_SWITCH) += surface_aggregator_tabletsw.o
obj-$(CONFIG_SURFACE_DTX) += surface_dtx.o
obj-$(CONFIG_SURFACE_GPE) += surface_gpe.o
obj-$(CONFIG_SURFACE_HOTPLUG) += surface_hotplug.o
diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
index cab020324256..c114f9dd5fe1 100644
--- a/drivers/platform/surface/aggregator/Kconfig
+++ b/drivers/platform/surface/aggregator/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
-# Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+# Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
menuconfig SURFACE_AGGREGATOR
tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
diff --git a/drivers/platform/surface/aggregator/Makefile b/drivers/platform/surface/aggregator/Makefile
index c0d550eda5cd..fdf664a217f9 100644
--- a/drivers/platform/surface/aggregator/Makefile
+++ b/drivers/platform/surface/aggregator/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
-# Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+# Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
# For include/trace/define_trace.h to include trace.h
CFLAGS_core.o = -I$(src)
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
index abbbb5b08b07..de539938896e 100644
--- a/drivers/platform/surface/aggregator/bus.c
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -2,10 +2,11 @@
/*
* Surface System Aggregator Module bus and device integration.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/surface_aggregator/controller.h>
@@ -14,6 +15,9 @@
#include "bus.h"
#include "controller.h"
+
+/* -- Device and bus functions. --------------------------------------------- */
+
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -46,6 +50,7 @@ static void ssam_device_release(struct device *dev)
struct ssam_device *sdev = to_ssam_device(dev);
ssam_controller_put(sdev->ctrl);
+ fwnode_handle_put(sdev->dev.fwnode);
kfree(sdev);
}
@@ -363,6 +368,134 @@ void ssam_device_driver_unregister(struct ssam_device_driver *sdrv)
}
EXPORT_SYMBOL_GPL(ssam_device_driver_unregister);
+
+/* -- Bus registration. ----------------------------------------------------- */
+
+/**
+ * ssam_bus_register() - Register and set-up the SSAM client device bus.
+ */
+int ssam_bus_register(void)
+{
+ return bus_register(&ssam_bus_type);
+}
+
+/**
+ * ssam_bus_unregister() - Unregister the SSAM client device bus.
+ */
+void ssam_bus_unregister(void)
+{
+ return bus_unregister(&ssam_bus_type);
+}
+
+
+/* -- Helpers for controller and hub devices. ------------------------------- */
+
+static int ssam_device_uid_from_string(const char *str, struct ssam_device_uid *uid)
+{
+ u8 d, tc, tid, iid, fn;
+ int n;
+
+ n = sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
+ if (n != 5)
+ return -EINVAL;
+
+ uid->domain = d;
+ uid->category = tc;
+ uid->target = tid;
+ uid->instance = iid;
+ uid->function = fn;
+
+ return 0;
+}
+
+static int ssam_get_uid_for_node(struct fwnode_handle *node, struct ssam_device_uid *uid)
+{
+ const char *str = fwnode_get_name(node);
+
+ /*
+ * To simplify definitions of firmware nodes, we set the device name
+ * based on the UID of the device, prefixed with "ssam:".
+ */
+ if (strncmp(str, "ssam:", strlen("ssam:")) != 0)
+ return -ENODEV;
+
+ str += strlen("ssam:");
+ return ssam_device_uid_from_string(str, uid);
+}
+
+static int ssam_add_client_device(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct ssam_device_uid uid;
+ struct ssam_device *sdev;
+ int status;
+
+ status = ssam_get_uid_for_node(node, &uid);
+ if (status)
+ return status;
+
+ sdev = ssam_device_alloc(ctrl, uid);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->dev.parent = parent;
+ sdev->dev.fwnode = fwnode_handle_get(node);
+
+ status = ssam_device_add(sdev);
+ if (status)
+ ssam_device_put(sdev);
+
+ return status;
+}
+
+/**
+ * __ssam_register_clients() - Register client devices defined under the
+ * given firmware node as children of the given device.
+ * @parent: The parent device under which clients should be registered.
+ * @ctrl: The controller with which client should be registered.
+ * @node: The firmware node holding definitions of the devices to be added.
+ *
+ * Register all clients that have been defined as children of the given root
+ * firmware node as children of the given parent device. The respective child
+ * firmware nodes will be associated with the correspondingly created child
+ * devices.
+ *
+ * The given controller will be used to instantiate the new devices. See
+ * ssam_device_add() for details.
+ *
+ * Note that, generally, the use of either ssam_device_register_clients() or
+ * ssam_register_clients() should be preferred as they directly use the
+ * firmware node and/or controller associated with the given device. This
+ * function is only intended for use when different device specifications (e.g.
+ * ACPI and firmware nodes) need to be combined (as is done in the platform hub
+ * of the device registry).
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ struct fwnode_handle *child;
+ int status;
+
+ fwnode_for_each_child_node(node, child) {
+ /*
+ * Try to add the device specified in the firmware node. If
+ * this fails with -ENODEV, the node does not specify any SSAM
+ * device, so ignore it and continue with the next one.
+ */
+ status = ssam_add_client_device(parent, ctrl, child);
+ if (status && status != -ENODEV)
+ goto err;
+ }
+
+ return 0;
+err:
+ ssam_remove_clients(parent);
+ return status;
+}
+EXPORT_SYMBOL_GPL(__ssam_register_clients);
+
static int ssam_remove_device(struct device *dev, void *_data)
{
struct ssam_device *sdev = to_ssam_device(dev);
@@ -387,19 +520,3 @@ void ssam_remove_clients(struct device *dev)
device_for_each_child_reverse(dev, NULL, ssam_remove_device);
}
EXPORT_SYMBOL_GPL(ssam_remove_clients);
-
-/**
- * ssam_bus_register() - Register and set-up the SSAM client device bus.
- */
-int ssam_bus_register(void)
-{
- return bus_register(&ssam_bus_type);
-}
-
-/**
- * ssam_bus_unregister() - Unregister the SSAM client device bus.
- */
-void ssam_bus_unregister(void)
-{
- return bus_unregister(&ssam_bus_type);
-}
diff --git a/drivers/platform/surface/aggregator/bus.h b/drivers/platform/surface/aggregator/bus.h
index 6964ee84e79c..5b4dbf21906c 100644
--- a/drivers/platform/surface/aggregator/bus.h
+++ b/drivers/platform/surface/aggregator/bus.h
@@ -2,7 +2,7 @@
/*
* Surface System Aggregator Module bus and device integration.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_BUS_H
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index b8c377b3f932..43e765199137 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -2,7 +2,7 @@
/*
* Main SSAM/SSH controller structure and functionality.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
@@ -2199,16 +2199,26 @@ static int ssam_nf_refcount_enable(struct ssam_controller *ctrl,
}
/**
- * ssam_nf_refcount_disable_free() - Disable event for reference count entry if it is
- * no longer in use and free the corresponding entry.
+ * ssam_nf_refcount_disable_free() - Disable event for reference count entry if
+ * it is no longer in use and free the corresponding entry.
* @ctrl: The controller to disable the event on.
* @entry: The reference count entry for the event to be disabled.
* @flags: The flags used for enabling the event on the EC.
+ * @ec: Flag specifying if the event should actually be disabled on the EC.
*
- * If the reference count equals zero, i.e. the event is no longer requested by
- * any client, the event will be disabled and the corresponding reference count
- * entry freed. The reference count entry must not be used any more after a
- * call to this function.
+ * If ``ec`` equals ``true`` and the reference count equals zero (i.e. the
+ * event is no longer requested by any client), the specified event will be
+ * disabled on the EC via the corresponding request.
+ *
+ * If ``ec`` equals ``false``, no request will be sent to the EC and the event
+ * can be considered in a detached state (i.e. no longer used but still
+ * enabled). Disabling an event via this method may be required for
+ * hot-removable devices, where event disable requests may time out after the
+ * device has been physically removed.
+ *
+ * In both cases, if the reference count equals zero, the corresponding
+ * reference count entry will be freed. The reference count entry must not be
+ * used any more after a call to this function.
*
* Also checks if the flags used for disabling the event match the flags used
* for enabling the event and warns if they do not (regardless of reference
@@ -2223,7 +2233,7 @@ static int ssam_nf_refcount_enable(struct ssam_controller *ctrl,
* returns the status of the event-enable EC command.
*/
static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
- struct ssam_nf_refcount_entry *entry, u8 flags)
+ struct ssam_nf_refcount_entry *entry, u8 flags, bool ec)
{
const struct ssam_event_registry reg = entry->key.reg;
const struct ssam_event_id id = entry->key.id;
@@ -2232,8 +2242,9 @@ static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
lockdep_assert_held(&nf->lock);
- ssam_dbg(ctrl, "disabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
- reg.target_category, id.target_category, id.instance, entry->refcount);
+ ssam_dbg(ctrl, "%s event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
+ ec ? "disabling" : "detaching", reg.target_category, id.target_category,
+ id.instance, entry->refcount);
if (entry->flags != flags) {
ssam_warn(ctrl,
@@ -2242,7 +2253,7 @@ static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
id.instance);
}
- if (entry->refcount == 0) {
+ if (ec && entry->refcount == 0) {
status = ssam_ssh_event_disable(ctrl, reg, id, flags);
kfree(entry);
}
@@ -2322,20 +2333,26 @@ int ssam_notifier_register(struct ssam_controller *ctrl, struct ssam_event_notif
EXPORT_SYMBOL_GPL(ssam_notifier_register);
/**
- * ssam_notifier_unregister() - Unregister an event notifier.
- * @ctrl: The controller the notifier has been registered on.
- * @n: The event notifier to unregister.
+ * __ssam_notifier_unregister() - Unregister an event notifier.
+ * @ctrl: The controller the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ * @disable: Whether to disable the corresponding event on the EC.
*
* Unregister an event notifier. Decrement the usage counter of the associated
* SAM event if the notifier is not marked as an observer. If the usage counter
- * reaches zero, the event will be disabled.
+ * reaches zero and ``disable`` equals ``true``, the event will be disabled.
+ *
+ * Useful for hot-removable devices, where communication may fail once the
+ * device has been physically removed. In that case, specifying ``disable`` as
+ * ``false`` avoids communication with the EC.
*
* Return: Returns zero on success, %-ENOENT if the given notifier block has
* not been registered on the controller. If the given notifier block was the
* last one associated with its specific event, returns the status of the
* event-disable EC-command.
*/
-int ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n)
+int __ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n,
+ bool disable)
{
u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
struct ssam_nf_refcount_entry *entry;
@@ -2373,7 +2390,7 @@ int ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_not
goto remove;
}
- status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags);
+ status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags, disable);
}
remove:
@@ -2383,7 +2400,7 @@ remove:
return status;
}
-EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
+EXPORT_SYMBOL_GPL(__ssam_notifier_unregister);
/**
* ssam_controller_event_enable() - Enable the specified event.
@@ -2477,7 +2494,7 @@ int ssam_controller_event_disable(struct ssam_controller *ctrl,
return -ENOENT;
}
- status = ssam_nf_refcount_disable_free(ctrl, entry, flags);
+ status = ssam_nf_refcount_disable_free(ctrl, entry, flags, true);
mutex_unlock(&nf->lock);
return status;
diff --git a/drivers/platform/surface/aggregator/controller.h b/drivers/platform/surface/aggregator/controller.h
index a0963c3562ff..f0d987abc51e 100644
--- a/drivers/platform/surface/aggregator/controller.h
+++ b/drivers/platform/surface/aggregator/controller.h
@@ -2,7 +2,7 @@
/*
* Main SSAM/SSH controller structure and functionality.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_CONTROLLER_H
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index a62c5dfe42d6..1a6373dea109 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -7,7 +7,7 @@
* Handles communication via requests as well as enabling, disabling, and
* relaying of events.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
diff --git a/drivers/platform/surface/aggregator/ssh_msgb.h b/drivers/platform/surface/aggregator/ssh_msgb.h
index e562958ffdf0..f3ecad92eefd 100644
--- a/drivers/platform/surface/aggregator/ssh_msgb.h
+++ b/drivers/platform/surface/aggregator/ssh_msgb.h
@@ -2,7 +2,7 @@
/*
* SSH message builder functions.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
index 8a4451c1ffe5..6748fe4ac5d5 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
@@ -2,7 +2,7 @@
/*
* SSH packet transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.h b/drivers/platform/surface/aggregator/ssh_packet_layer.h
index 2eb329f0b91a..64633522f971 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.h
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.h
@@ -2,7 +2,7 @@
/*
* SSH packet transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
diff --git a/drivers/platform/surface/aggregator/ssh_parser.c b/drivers/platform/surface/aggregator/ssh_parser.c
index b77912f8f13b..a6f668694365 100644
--- a/drivers/platform/surface/aggregator/ssh_parser.c
+++ b/drivers/platform/surface/aggregator/ssh_parser.c
@@ -2,7 +2,7 @@
/*
* SSH message parser.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/surface/aggregator/ssh_parser.h b/drivers/platform/surface/aggregator/ssh_parser.h
index 3bd6e180fd16..801d8fa69fb5 100644
--- a/drivers/platform/surface/aggregator/ssh_parser.h
+++ b/drivers/platform/surface/aggregator/ssh_parser.h
@@ -2,7 +2,7 @@
/*
* SSH message parser.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_PARSER_H
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
index 790f7f0eee98..f5565570f16c 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
@@ -2,7 +2,7 @@
/*
* SSH request transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.h b/drivers/platform/surface/aggregator/ssh_request_layer.h
index 9c3cbae2d4bd..4e387a031351 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.h
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.h
@@ -2,7 +2,7 @@
/*
* SSH request transport layer.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h
index de64cf169060..2a2c17771d01 100644
--- a/drivers/platform/surface/aggregator/trace.h
+++ b/drivers/platform/surface/aggregator/trace.h
@@ -2,7 +2,7 @@
/*
* Trace points for SSAM/SSH.
*
- * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#undef TRACE_SYSTEM
@@ -76,7 +76,7 @@ TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
-TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC0);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
@@ -85,6 +85,11 @@ TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SPT);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SYS);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC1);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SHB);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_POS);
#define SSAM_PTR_UID_LEN 9
#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1)
@@ -229,40 +234,45 @@ static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
#define ssam_show_ssh_tc(rqid) \
__print_symbolic(rqid, \
- { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
- { SSAM_SSH_TC_SAM, "SAM" }, \
- { SSAM_SSH_TC_BAT, "BAT" }, \
- { SSAM_SSH_TC_TMP, "TMP" }, \
- { SSAM_SSH_TC_PMC, "PMC" }, \
- { SSAM_SSH_TC_FAN, "FAN" }, \
- { SSAM_SSH_TC_PoM, "PoM" }, \
- { SSAM_SSH_TC_DBG, "DBG" }, \
- { SSAM_SSH_TC_KBD, "KBD" }, \
- { SSAM_SSH_TC_FWU, "FWU" }, \
- { SSAM_SSH_TC_UNI, "UNI" }, \
- { SSAM_SSH_TC_LPC, "LPC" }, \
- { SSAM_SSH_TC_TCL, "TCL" }, \
- { SSAM_SSH_TC_SFL, "SFL" }, \
- { SSAM_SSH_TC_KIP, "KIP" }, \
- { SSAM_SSH_TC_EXT, "EXT" }, \
- { SSAM_SSH_TC_BLD, "BLD" }, \
- { SSAM_SSH_TC_BAS, "BAS" }, \
- { SSAM_SSH_TC_SEN, "SEN" }, \
- { SSAM_SSH_TC_SRQ, "SRQ" }, \
- { SSAM_SSH_TC_MCU, "MCU" }, \
- { SSAM_SSH_TC_HID, "HID" }, \
- { SSAM_SSH_TC_TCH, "TCH" }, \
- { SSAM_SSH_TC_BKL, "BKL" }, \
- { SSAM_SSH_TC_TAM, "TAM" }, \
- { SSAM_SSH_TC_ACC, "ACC" }, \
- { SSAM_SSH_TC_UFI, "UFI" }, \
- { SSAM_SSH_TC_USC, "USC" }, \
- { SSAM_SSH_TC_PEN, "PEN" }, \
- { SSAM_SSH_TC_VID, "VID" }, \
- { SSAM_SSH_TC_AUD, "AUD" }, \
- { SSAM_SSH_TC_SMC, "SMC" }, \
- { SSAM_SSH_TC_KPD, "KPD" }, \
- { SSAM_SSH_TC_REG, "REG" } \
+ { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
+ { SSAM_SSH_TC_SAM, "SAM" }, \
+ { SSAM_SSH_TC_BAT, "BAT" }, \
+ { SSAM_SSH_TC_TMP, "TMP" }, \
+ { SSAM_SSH_TC_PMC, "PMC" }, \
+ { SSAM_SSH_TC_FAN, "FAN" }, \
+ { SSAM_SSH_TC_PoM, "PoM" }, \
+ { SSAM_SSH_TC_DBG, "DBG" }, \
+ { SSAM_SSH_TC_KBD, "KBD" }, \
+ { SSAM_SSH_TC_FWU, "FWU" }, \
+ { SSAM_SSH_TC_UNI, "UNI" }, \
+ { SSAM_SSH_TC_LPC, "LPC" }, \
+ { SSAM_SSH_TC_TCL, "TCL" }, \
+ { SSAM_SSH_TC_SFL, "SFL" }, \
+ { SSAM_SSH_TC_KIP, "KIP" }, \
+ { SSAM_SSH_TC_EXT, "EXT" }, \
+ { SSAM_SSH_TC_BLD, "BLD" }, \
+ { SSAM_SSH_TC_BAS, "BAS" }, \
+ { SSAM_SSH_TC_SEN, "SEN" }, \
+ { SSAM_SSH_TC_SRQ, "SRQ" }, \
+ { SSAM_SSH_TC_MCU, "MCU" }, \
+ { SSAM_SSH_TC_HID, "HID" }, \
+ { SSAM_SSH_TC_TCH, "TCH" }, \
+ { SSAM_SSH_TC_BKL, "BKL" }, \
+ { SSAM_SSH_TC_TAM, "TAM" }, \
+ { SSAM_SSH_TC_ACC0, "ACC0" }, \
+ { SSAM_SSH_TC_UFI, "UFI" }, \
+ { SSAM_SSH_TC_USC, "USC" }, \
+ { SSAM_SSH_TC_PEN, "PEN" }, \
+ { SSAM_SSH_TC_VID, "VID" }, \
+ { SSAM_SSH_TC_AUD, "AUD" }, \
+ { SSAM_SSH_TC_SMC, "SMC" }, \
+ { SSAM_SSH_TC_KPD, "KPD" }, \
+ { SSAM_SSH_TC_REG, "REG" }, \
+ { SSAM_SSH_TC_SPT, "SPT" }, \
+ { SSAM_SSH_TC_SYS, "SYS" }, \
+ { SSAM_SSH_TC_ACC1, "ACC1" }, \
+ { SSAM_SSH_TC_SHB, "SMB" }, \
+ { SSAM_SSH_TC_POS, "POS" } \
)
DECLARE_EVENT_CLASS(ssam_frame_class,
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index 7b758f8cc137..44e317970557 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -8,7 +8,7 @@
* notifications sent from ACPI via the SAN interface by providing them to any
* registered external driver.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
@@ -37,6 +37,7 @@ struct san_data {
#define to_san_data(ptr, member) \
container_of(ptr, struct san_data, member)
+static struct workqueue_struct *san_wq;
/* -- dGPU notifier interface. ---------------------------------------------- */
@@ -356,7 +357,7 @@ static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
- schedule_delayed_work(&work->work, delay);
+ queue_delayed_work(san_wq, &work->work, delay);
return SSAM_NOTIF_HANDLED;
}
@@ -861,7 +862,7 @@ static int san_remove(struct platform_device *pdev)
* We have unregistered our event sources. Now we need to ensure that
* all delayed works they may have spawned are run to completion.
*/
- flush_scheduled_work();
+ flush_workqueue(san_wq);
return 0;
}
@@ -881,7 +882,27 @@ static struct platform_driver surface_acpi_notify = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
-module_platform_driver(surface_acpi_notify);
+
+static int __init san_init(void)
+{
+ int ret;
+
+ san_wq = alloc_workqueue("san_wq", 0, 0);
+ if (!san_wq)
+ return -ENOMEM;
+ ret = platform_driver_register(&surface_acpi_notify);
+ if (ret)
+ destroy_workqueue(san_wq);
+ return ret;
+}
+module_init(san_init);
+
+static void __exit san_exit(void)
+{
+ platform_driver_unregister(&surface_acpi_notify);
+ destroy_workqueue(san_wq);
+}
+module_exit(san_exit);
MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
MODULE_DESCRIPTION("Surface ACPI Notify driver for Surface System Aggregator Module");
diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
index 30fb50fde450..492c82e69182 100644
--- a/drivers/platform/surface/surface_aggregator_cdev.c
+++ b/drivers/platform/surface/surface_aggregator_cdev.c
@@ -3,7 +3,7 @@
* Provides user-space access to the SSAM EC via the /dev/surface/aggregator
* misc device. Intended for debugging and development.
*
- * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/fs.h>
diff --git a/drivers/platform/surface/surface_aggregator_hub.c b/drivers/platform/surface/surface_aggregator_hub.c
new file mode 100644
index 000000000000..43061514be38
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_hub.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Surface System Aggregator Module (SSAM) subsystem device hubs.
+ *
+ * Provides a driver for SSAM subsystems device hubs. This driver performs
+ * instantiation of the devices managed by said hubs and takes care of
+ * (hot-)removal.
+ *
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- SSAM generic subsystem hub driver framework. -------------------------- */
+
+enum ssam_hub_state {
+ SSAM_HUB_UNINITIALIZED, /* Only set during initialization. */
+ SSAM_HUB_CONNECTED,
+ SSAM_HUB_DISCONNECTED,
+};
+
+enum ssam_hub_flags {
+ SSAM_HUB_HOT_REMOVED,
+};
+
+struct ssam_hub;
+
+struct ssam_hub_ops {
+ int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state);
+};
+
+struct ssam_hub {
+ struct ssam_device *sdev;
+
+ enum ssam_hub_state state;
+ unsigned long flags;
+
+ struct delayed_work update_work;
+ unsigned long connect_delay;
+
+ struct ssam_event_notifier notif;
+ struct ssam_hub_ops ops;
+};
+
+struct ssam_hub_desc {
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ } event;
+
+ struct {
+ u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event);
+ int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state);
+ } ops;
+
+ unsigned long connect_delay_ms;
+};
+
+static void ssam_hub_update_workfn(struct work_struct *work)
+{
+ struct ssam_hub *hub = container_of(work, struct ssam_hub, update_work.work);
+ enum ssam_hub_state state;
+ int status = 0;
+
+ status = hub->ops.get_state(hub, &state);
+ if (status)
+ return;
+
+ /*
+ * There is a small possibility that hub devices were hot-removed and
+ * re-added before we were able to remove them here. In that case, both
+ * the state returned by get_state() and the state of the hub will
+ * equal SSAM_HUB_CONNECTED and we would bail early below, which would
+ * leave child devices without proper (re-)initialization and the
+ * hot-remove flag set.
+ *
+ * Therefore, we check whether devices have been hot-removed via an
+ * additional flag on the hub and, in this case, override the returned
+ * hub state. In case of a missed disconnect (i.e. get_state returned
+ * "connected"), we further need to re-schedule this work (with the
+ * appropriate delay) as the actual connect work submission might have
+ * been merged with this one.
+ *
+ * This then leads to one of two cases: Either we submit an unnecessary
+ * work item (which will get ignored via either the queue or the state
+ * checks) or, in the unlikely case that the work is actually required,
+ * double the normal connect delay.
+ */
+ if (test_and_clear_bit(SSAM_HUB_HOT_REMOVED, &hub->flags)) {
+ if (state == SSAM_HUB_CONNECTED)
+ schedule_delayed_work(&hub->update_work, hub->connect_delay);
+
+ state = SSAM_HUB_DISCONNECTED;
+ }
+
+ if (hub->state == state)
+ return;
+ hub->state = state;
+
+ if (hub->state == SSAM_HUB_CONNECTED)
+ status = ssam_device_register_clients(hub->sdev);
+ else
+ ssam_remove_clients(&hub->sdev->dev);
+
+ if (status)
+ dev_err(&hub->sdev->dev, "failed to update hub child devices: %d\n", status);
+}
+
+static int ssam_hub_mark_hot_removed(struct device *dev, void *_data)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ if (is_ssam_device(dev))
+ ssam_device_mark_hot_removed(sdev);
+
+ return 0;
+}
+
+static void ssam_hub_update(struct ssam_hub *hub, bool connected)
+{
+ unsigned long delay;
+
+ /* Mark devices as hot-removed before we remove any. */
+ if (!connected) {
+ set_bit(SSAM_HUB_HOT_REMOVED, &hub->flags);
+ device_for_each_child_reverse(&hub->sdev->dev, NULL, ssam_hub_mark_hot_removed);
+ }
+
+ /*
+ * Delay update when the base/keyboard cover is being connected to give
+ * devices/EC some time to set up.
+ */
+ delay = connected ? hub->connect_delay : 0;
+
+ schedule_delayed_work(&hub->update_work, delay);
+}
+
+static int __maybe_unused ssam_hub_resume(struct device *dev)
+{
+ struct ssam_hub *hub = dev_get_drvdata(dev);
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_hub_pm_ops, NULL, ssam_hub_resume);
+
+static int ssam_hub_probe(struct ssam_device *sdev)
+{
+ const struct ssam_hub_desc *desc;
+ struct ssam_hub *hub;
+ int status;
+
+ desc = ssam_device_get_match_data(sdev);
+ if (!desc) {
+ WARN(1, "no driver match data specified");
+ return -EINVAL;
+ }
+
+ hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ hub->sdev = sdev;
+ hub->state = SSAM_HUB_UNINITIALIZED;
+
+ hub->notif.base.priority = INT_MAX; /* This notifier should run first. */
+ hub->notif.base.fn = desc->ops.notify;
+ hub->notif.event.reg = desc->event.reg;
+ hub->notif.event.id = desc->event.id;
+ hub->notif.event.mask = desc->event.mask;
+ hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ hub->connect_delay = msecs_to_jiffies(desc->connect_delay_ms);
+ hub->ops.get_state = desc->ops.get_state;
+
+ INIT_DELAYED_WORK(&hub->update_work, ssam_hub_update_workfn);
+
+ ssam_device_set_drvdata(sdev, hub);
+
+ status = ssam_device_notifier_register(sdev, &hub->notif);
+ if (status)
+ return status;
+
+ schedule_delayed_work(&hub->update_work, 0);
+ return 0;
+}
+
+static void ssam_hub_remove(struct ssam_device *sdev)
+{
+ struct ssam_hub *hub = ssam_device_get_drvdata(sdev);
+
+ ssam_device_notifier_unregister(sdev, &hub->notif);
+ cancel_delayed_work_sync(&hub->update_work);
+ ssam_remove_clients(&sdev->dev);
+}
+
+
+/* -- SSAM base-subsystem hub driver. --------------------------------------- */
+
+/*
+ * Some devices (especially battery) may need a bit of time to be fully usable
+ * after being (re-)connected. This delay has been determined via
+ * experimentation.
+ */
+#define SSAM_BASE_UPDATE_CONNECT_DELAY 2500
+
+SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+#define SSAM_BAS_OPMODE_TABLET 0x00
+#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
+
+static int ssam_base_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state)
+{
+ u8 opmode;
+ int status;
+
+ status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
+ if (status < 0) {
+ dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
+ return status;
+ }
+
+ if (opmode != SSAM_BAS_OPMODE_TABLET)
+ *state = SSAM_HUB_CONNECTED;
+ else
+ *state = SSAM_HUB_DISCONNECTED;
+
+ return 0;
+}
+
+static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif);
+
+ if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
+ return 0;
+
+ if (event->length < 1) {
+ dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+ return 0;
+ }
+
+ ssam_hub_update(hub, event->data[0]);
+
+ /*
+ * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
+ * consumed by the detachment system driver. We're just a (more or less)
+ * silent observer.
+ */
+ return 0;
+}
+
+static const struct ssam_hub_desc base_hub = {
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_BAS,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_NONE,
+ },
+ .ops = {
+ .notify = ssam_base_hub_notif,
+ .get_state = ssam_base_hub_query_state,
+ },
+ .connect_delay_ms = SSAM_BASE_UPDATE_CONNECT_DELAY,
+};
+
+
+/* -- SSAM KIP-subsystem hub driver. ---------------------------------------- */
+
+/*
+ * Some devices may need a bit of time to be fully usable after being
+ * (re-)connected. This delay has been determined via experimentation.
+ */
+#define SSAM_KIP_UPDATE_CONNECT_DELAY 250
+
+#define SSAM_EVENT_KIP_CID_CONNECTION 0x2c
+
+SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_query_state, u8, {
+ .target_category = SSAM_SSH_TC_KIP,
+ .target_id = 0x01,
+ .command_id = 0x2c,
+ .instance_id = 0x00,
+});
+
+static int ssam_kip_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state)
+{
+ int status;
+ u8 connected;
+
+ status = ssam_retry(__ssam_kip_query_state, hub->sdev->ctrl, &connected);
+ if (status < 0) {
+ dev_err(&hub->sdev->dev, "failed to query KIP connection state: %d\n", status);
+ return status;
+ }
+
+ *state = connected ? SSAM_HUB_CONNECTED : SSAM_HUB_DISCONNECTED;
+ return 0;
+}
+
+static u32 ssam_kip_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif);
+
+ if (event->command_id != SSAM_EVENT_KIP_CID_CONNECTION)
+ return 0; /* Return "unhandled". */
+
+ if (event->length < 1) {
+ dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
+ return 0;
+ }
+
+ ssam_hub_update(hub, event->data[0]);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_hub_desc kip_hub = {
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_KIP,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+ .ops = {
+ .notify = ssam_kip_hub_notif,
+ .get_state = ssam_kip_hub_query_state,
+ },
+ .connect_delay_ms = SSAM_KIP_UPDATE_CONNECT_DELAY,
+};
+
+
+/* -- Driver registration. -------------------------------------------------- */
+
+static const struct ssam_device_id ssam_hub_match[] = {
+ { SSAM_VDEV(HUB, 0x01, SSAM_SSH_TC_KIP, 0x00), (unsigned long)&kip_hub },
+ { SSAM_VDEV(HUB, 0x02, SSAM_SSH_TC_BAS, 0x00), (unsigned long)&base_hub },
+ { }
+};
+MODULE_DEVICE_TABLE(ssam, ssam_hub_match);
+
+static struct ssam_device_driver ssam_subsystem_hub_driver = {
+ .probe = ssam_hub_probe,
+ .remove = ssam_hub_remove,
+ .match_table = ssam_hub_match,
+ .driver = {
+ .name = "surface_aggregator_subsystem_hub",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_hub_pm_ops,
+ },
+};
+module_ssam_device_driver(ssam_subsystem_hub_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Subsystem device hub driver for Surface System Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index ce2bd88feeaa..d5655f6a4a41 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -6,19 +6,16 @@
* cannot be auto-detected. Provides device-hubs and performs instantiation
* for these devices.
*
- * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
-#include <linux/limits.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
-#include <linux/surface_aggregator/controller.h>
#include <linux/surface_aggregator/device.h>
@@ -41,9 +38,15 @@ static const struct software_node ssam_node_root = {
.name = "ssam_platform_hub",
};
+/* KIP device hub (connects keyboard cover devices on Surface Pro 8). */
+static const struct software_node ssam_node_hub_kip = {
+ .name = "ssam:00:00:01:0e:00",
+ .parent = &ssam_node_root,
+};
+
/* Base device hub (devices attached to Surface Book 3 base). */
static const struct software_node ssam_node_hub_base = {
- .name = "ssam:00:00:02:00:00",
+ .name = "ssam:00:00:02:11:00",
.parent = &ssam_node_root,
};
@@ -71,6 +74,12 @@ static const struct software_node ssam_node_tmp_pprof = {
.parent = &ssam_node_root,
};
+/* Tablet-mode switch via KIP subsystem. */
+static const struct software_node ssam_node_kip_tablet_switch = {
+ .name = "ssam:01:0e:01:00:01",
+ .parent = &ssam_node_root,
+};
+
/* DTX / detachment-system device (Surface Book 3). */
static const struct software_node ssam_node_bas_dtx = {
.name = "ssam:01:11:01:00:00",
@@ -155,6 +164,36 @@ static const struct software_node ssam_node_hid_base_iid6 = {
.parent = &ssam_node_hub_base,
};
+/* HID keyboard (KIP hub). */
+static const struct software_node ssam_node_hid_kip_keyboard = {
+ .name = "ssam:01:15:02:01:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID pen stash (KIP hub; pen taken / stashed away evens). */
+static const struct software_node ssam_node_hid_kip_penstash = {
+ .name = "ssam:01:15:02:02:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID touchpad (KIP hub). */
+static const struct software_node ssam_node_hid_kip_touchpad = {
+ .name = "ssam:01:15:02:03:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* HID device instance 5 (KIP hub, unknown HID device). */
+static const struct software_node ssam_node_hid_kip_iid5 = {
+ .name = "ssam:01:15:02:05:00",
+ .parent = &ssam_node_hub_kip,
+};
+
+/* Tablet-mode switch via POS subsystem. */
+static const struct software_node ssam_node_pos_tablet_switch = {
+ .name = "ssam:01:26:01:00:01",
+ .parent = &ssam_node_root,
+};
+
/*
* Devices for 5th- and 6th-generations models:
* - Surface Book 2,
@@ -201,6 +240,7 @@ static const struct software_node *ssam_node_group_sls[] = {
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
+ &ssam_node_pos_tablet_switch,
&ssam_node_hid_tid1_keyboard,
&ssam_node_hid_tid1_penstash,
&ssam_node_hid_tid1_touchpad,
@@ -230,289 +270,18 @@ static const struct software_node *ssam_node_group_sp7[] = {
static const struct software_node *ssam_node_group_sp8[] = {
&ssam_node_root,
+ &ssam_node_hub_kip,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
- /* TODO: Add support for keyboard cover. */
- NULL,
-};
-
-
-/* -- Device registry helper functions. ------------------------------------- */
-
-static int ssam_uid_from_string(const char *str, struct ssam_device_uid *uid)
-{
- u8 d, tc, tid, iid, fn;
- int n;
-
- n = sscanf(str, "ssam:%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
- if (n != 5)
- return -EINVAL;
-
- uid->domain = d;
- uid->category = tc;
- uid->target = tid;
- uid->instance = iid;
- uid->function = fn;
-
- return 0;
-}
-
-static int ssam_hub_add_device(struct device *parent, struct ssam_controller *ctrl,
- struct fwnode_handle *node)
-{
- struct ssam_device_uid uid;
- struct ssam_device *sdev;
- int status;
-
- status = ssam_uid_from_string(fwnode_get_name(node), &uid);
- if (status)
- return status;
-
- sdev = ssam_device_alloc(ctrl, uid);
- if (!sdev)
- return -ENOMEM;
-
- sdev->dev.parent = parent;
- sdev->dev.fwnode = node;
-
- status = ssam_device_add(sdev);
- if (status)
- ssam_device_put(sdev);
-
- return status;
-}
-
-static int ssam_hub_register_clients(struct device *parent, struct ssam_controller *ctrl,
- struct fwnode_handle *node)
-{
- struct fwnode_handle *child;
- int status;
-
- fwnode_for_each_child_node(node, child) {
- /*
- * Try to add the device specified in the firmware node. If
- * this fails with -EINVAL, the node does not specify any SSAM
- * device, so ignore it and continue with the next one.
- */
-
- status = ssam_hub_add_device(parent, ctrl, child);
- if (status && status != -EINVAL)
- goto err;
- }
-
- return 0;
-err:
- ssam_remove_clients(parent);
- return status;
-}
-
-
-/* -- SSAM base-hub driver. ------------------------------------------------- */
-
-/*
- * Some devices (especially battery) may need a bit of time to be fully usable
- * after being (re-)connected. This delay has been determined via
- * experimentation.
- */
-#define SSAM_BASE_UPDATE_CONNECT_DELAY msecs_to_jiffies(2500)
-
-enum ssam_base_hub_state {
- SSAM_BASE_HUB_UNINITIALIZED,
- SSAM_BASE_HUB_CONNECTED,
- SSAM_BASE_HUB_DISCONNECTED,
-};
-
-struct ssam_base_hub {
- struct ssam_device *sdev;
-
- enum ssam_base_hub_state state;
- struct delayed_work update_work;
-
- struct ssam_event_notifier notif;
-};
-
-SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
- .target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
- .command_id = 0x0d,
- .instance_id = 0x00,
-});
-
-#define SSAM_BAS_OPMODE_TABLET 0x00
-#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
-
-static int ssam_base_hub_query_state(struct ssam_base_hub *hub, enum ssam_base_hub_state *state)
-{
- u8 opmode;
- int status;
-
- status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
- if (status < 0) {
- dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
- return status;
- }
-
- if (opmode != SSAM_BAS_OPMODE_TABLET)
- *state = SSAM_BASE_HUB_CONNECTED;
- else
- *state = SSAM_BASE_HUB_DISCONNECTED;
-
- return 0;
-}
-
-static ssize_t ssam_base_hub_state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ssam_base_hub *hub = dev_get_drvdata(dev);
- bool connected = hub->state == SSAM_BASE_HUB_CONNECTED;
-
- return sysfs_emit(buf, "%d\n", connected);
-}
-
-static struct device_attribute ssam_base_hub_attr_state =
- __ATTR(state, 0444, ssam_base_hub_state_show, NULL);
-
-static struct attribute *ssam_base_hub_attrs[] = {
- &ssam_base_hub_attr_state.attr,
+ &ssam_node_kip_tablet_switch,
+ &ssam_node_hid_kip_keyboard,
+ &ssam_node_hid_kip_penstash,
+ &ssam_node_hid_kip_touchpad,
+ &ssam_node_hid_kip_iid5,
NULL,
};
-static const struct attribute_group ssam_base_hub_group = {
- .attrs = ssam_base_hub_attrs,
-};
-
-static void ssam_base_hub_update_workfn(struct work_struct *work)
-{
- struct ssam_base_hub *hub = container_of(work, struct ssam_base_hub, update_work.work);
- struct fwnode_handle *node = dev_fwnode(&hub->sdev->dev);
- enum ssam_base_hub_state state;
- int status = 0;
-
- status = ssam_base_hub_query_state(hub, &state);
- if (status)
- return;
-
- if (hub->state == state)
- return;
- hub->state = state;
-
- if (hub->state == SSAM_BASE_HUB_CONNECTED)
- status = ssam_hub_register_clients(&hub->sdev->dev, hub->sdev->ctrl, node);
- else
- ssam_remove_clients(&hub->sdev->dev);
-
- if (status)
- dev_err(&hub->sdev->dev, "failed to update base-hub devices: %d\n", status);
-}
-
-static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
-{
- struct ssam_base_hub *hub = container_of(nf, struct ssam_base_hub, notif);
- unsigned long delay;
-
- if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
- return 0;
-
- if (event->length < 1) {
- dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
- return 0;
- }
-
- /*
- * Delay update when the base is being connected to give devices/EC
- * some time to set up.
- */
- delay = event->data[0] ? SSAM_BASE_UPDATE_CONNECT_DELAY : 0;
-
- schedule_delayed_work(&hub->update_work, delay);
-
- /*
- * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
- * consumed by the detachment system driver. We're just a (more or less)
- * silent observer.
- */
- return 0;
-}
-
-static int __maybe_unused ssam_base_hub_resume(struct device *dev)
-{
- struct ssam_base_hub *hub = dev_get_drvdata(dev);
-
- schedule_delayed_work(&hub->update_work, 0);
- return 0;
-}
-static SIMPLE_DEV_PM_OPS(ssam_base_hub_pm_ops, NULL, ssam_base_hub_resume);
-
-static int ssam_base_hub_probe(struct ssam_device *sdev)
-{
- struct ssam_base_hub *hub;
- int status;
-
- hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
- if (!hub)
- return -ENOMEM;
-
- hub->sdev = sdev;
- hub->state = SSAM_BASE_HUB_UNINITIALIZED;
-
- hub->notif.base.priority = INT_MAX; /* This notifier should run first. */
- hub->notif.base.fn = ssam_base_hub_notif;
- hub->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
- hub->notif.event.id.target_category = SSAM_SSH_TC_BAS,
- hub->notif.event.id.instance = 0,
- hub->notif.event.mask = SSAM_EVENT_MASK_NONE;
- hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
-
- INIT_DELAYED_WORK(&hub->update_work, ssam_base_hub_update_workfn);
-
- ssam_device_set_drvdata(sdev, hub);
-
- status = ssam_notifier_register(sdev->ctrl, &hub->notif);
- if (status)
- return status;
-
- status = sysfs_create_group(&sdev->dev.kobj, &ssam_base_hub_group);
- if (status)
- goto err;
-
- schedule_delayed_work(&hub->update_work, 0);
- return 0;
-
-err:
- ssam_notifier_unregister(sdev->ctrl, &hub->notif);
- cancel_delayed_work_sync(&hub->update_work);
- ssam_remove_clients(&sdev->dev);
- return status;
-}
-
-static void ssam_base_hub_remove(struct ssam_device *sdev)
-{
- struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
-
- sysfs_remove_group(&sdev->dev.kobj, &ssam_base_hub_group);
-
- ssam_notifier_unregister(sdev->ctrl, &hub->notif);
- cancel_delayed_work_sync(&hub->update_work);
- ssam_remove_clients(&sdev->dev);
-}
-
-static const struct ssam_device_id ssam_base_hub_match[] = {
- { SSAM_VDEV(HUB, 0x02, SSAM_ANY_IID, 0x00) },
- { },
-};
-
-static struct ssam_device_driver ssam_base_hub_driver = {
- .probe = ssam_base_hub_probe,
- .remove = ssam_base_hub_remove,
- .match_table = ssam_base_hub_match,
- .driver = {
- .name = "surface_aggregator_base_hub",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &ssam_base_hub_pm_ops,
- },
-};
-
/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
@@ -597,7 +366,7 @@ static int ssam_platform_hub_probe(struct platform_device *pdev)
set_secondary_fwnode(&pdev->dev, root);
- status = ssam_hub_register_clients(&pdev->dev, ctrl, root);
+ status = __ssam_register_clients(&pdev->dev, ctrl, root);
if (status) {
set_secondary_fwnode(&pdev->dev, NULL);
software_node_unregister_node_group(nodes);
@@ -626,32 +395,7 @@ static struct platform_driver ssam_platform_hub_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
-
-
-/* -- Module initialization. ------------------------------------------------ */
-
-static int __init ssam_device_hub_init(void)
-{
- int status;
-
- status = platform_driver_register(&ssam_platform_hub_driver);
- if (status)
- return status;
-
- status = ssam_device_driver_register(&ssam_base_hub_driver);
- if (status)
- platform_driver_unregister(&ssam_platform_hub_driver);
-
- return status;
-}
-module_init(ssam_device_hub_init);
-
-static void __exit ssam_device_hub_exit(void)
-{
- ssam_device_driver_unregister(&ssam_base_hub_driver);
- platform_driver_unregister(&ssam_platform_hub_driver);
-}
-module_exit(ssam_device_hub_exit);
+module_platform_driver(ssam_platform_hub_driver);
MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c
new file mode 100644
index 000000000000..27d95a6a7851
--- /dev/null
+++ b/drivers/platform/surface/surface_aggregator_tabletsw.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Surface System Aggregator Module (SSAM) tablet mode switch driver.
+ *
+ * Copyright (C) 2022 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator/controller.h>
+#include <linux/surface_aggregator/device.h>
+
+
+/* -- SSAM generic tablet switch driver framework. -------------------------- */
+
+struct ssam_tablet_sw;
+
+struct ssam_tablet_sw_ops {
+ int (*get_state)(struct ssam_tablet_sw *sw, u32 *state);
+ const char *(*state_name)(struct ssam_tablet_sw *sw, u32 state);
+ bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, u32 state);
+};
+
+struct ssam_tablet_sw {
+ struct ssam_device *sdev;
+
+ u32 state;
+ struct work_struct update_work;
+ struct input_dev *mode_switch;
+
+ struct ssam_tablet_sw_ops ops;
+ struct ssam_event_notifier notif;
+};
+
+struct ssam_tablet_sw_desc {
+ struct {
+ const char *name;
+ const char *phys;
+ } dev;
+
+ struct {
+ u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event);
+ int (*get_state)(struct ssam_tablet_sw *sw, u32 *state);
+ const char *(*state_name)(struct ssam_tablet_sw *sw, u32 state);
+ bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw, u32 state);
+ } ops;
+
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ u8 flags;
+ } event;
+};
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ssam_tablet_sw *sw = dev_get_drvdata(dev);
+ const char *state = sw->ops.state_name(sw, sw->state);
+
+ return sysfs_emit(buf, "%s\n", state);
+}
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *ssam_tablet_sw_attrs[] = {
+ &dev_attr_state.attr,
+ NULL,
+};
+
+static const struct attribute_group ssam_tablet_sw_group = {
+ .attrs = ssam_tablet_sw_attrs,
+};
+
+static void ssam_tablet_sw_update_workfn(struct work_struct *work)
+{
+ struct ssam_tablet_sw *sw = container_of(work, struct ssam_tablet_sw, update_work);
+ int tablet, status;
+ u32 state;
+
+ status = sw->ops.get_state(sw, &state);
+ if (status)
+ return;
+
+ if (sw->state == state)
+ return;
+ sw->state = state;
+
+ /* Send SW_TABLET_MODE event. */
+ tablet = sw->ops.state_is_tablet_mode(sw, state);
+ input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet);
+ input_sync(sw->mode_switch);
+}
+
+static int __maybe_unused ssam_tablet_sw_resume(struct device *dev)
+{
+ struct ssam_tablet_sw *sw = dev_get_drvdata(dev);
+
+ schedule_work(&sw->update_work);
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(ssam_tablet_sw_pm_ops, NULL, ssam_tablet_sw_resume);
+
+static int ssam_tablet_sw_probe(struct ssam_device *sdev)
+{
+ const struct ssam_tablet_sw_desc *desc;
+ struct ssam_tablet_sw *sw;
+ int tablet, status;
+
+ desc = ssam_device_get_match_data(sdev);
+ if (!desc) {
+ WARN(1, "no driver match data specified");
+ return -EINVAL;
+ }
+
+ sw = devm_kzalloc(&sdev->dev, sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return -ENOMEM;
+
+ sw->sdev = sdev;
+
+ sw->ops.get_state = desc->ops.get_state;
+ sw->ops.state_name = desc->ops.state_name;
+ sw->ops.state_is_tablet_mode = desc->ops.state_is_tablet_mode;
+
+ INIT_WORK(&sw->update_work, ssam_tablet_sw_update_workfn);
+
+ ssam_device_set_drvdata(sdev, sw);
+
+ /* Get initial state. */
+ status = sw->ops.get_state(sw, &sw->state);
+ if (status)
+ return status;
+
+ /* Set up tablet mode switch. */
+ sw->mode_switch = devm_input_allocate_device(&sdev->dev);
+ if (!sw->mode_switch)
+ return -ENOMEM;
+
+ sw->mode_switch->name = desc->dev.name;
+ sw->mode_switch->phys = desc->dev.phys;
+ sw->mode_switch->id.bustype = BUS_HOST;
+ sw->mode_switch->dev.parent = &sdev->dev;
+
+ tablet = sw->ops.state_is_tablet_mode(sw, sw->state);
+ input_set_capability(sw->mode_switch, EV_SW, SW_TABLET_MODE);
+ input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet);
+
+ status = input_register_device(sw->mode_switch);
+ if (status)
+ return status;
+
+ /* Set up notifier. */
+ sw->notif.base.priority = 0;
+ sw->notif.base.fn = desc->ops.notify;
+ sw->notif.event.reg = desc->event.reg;
+ sw->notif.event.id = desc->event.id;
+ sw->notif.event.mask = desc->event.mask;
+ sw->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_device_notifier_register(sdev, &sw->notif);
+ if (status)
+ return status;
+
+ status = sysfs_create_group(&sdev->dev.kobj, &ssam_tablet_sw_group);
+ if (status)
+ goto err;
+
+ /* We might have missed events during setup, so check again. */
+ schedule_work(&sw->update_work);
+ return 0;
+
+err:
+ ssam_device_notifier_unregister(sdev, &sw->notif);
+ cancel_work_sync(&sw->update_work);
+ return status;
+}
+
+static void ssam_tablet_sw_remove(struct ssam_device *sdev)
+{
+ struct ssam_tablet_sw *sw = ssam_device_get_drvdata(sdev);
+
+ sysfs_remove_group(&sdev->dev.kobj, &ssam_tablet_sw_group);
+
+ ssam_device_notifier_unregister(sdev, &sw->notif);
+ cancel_work_sync(&sw->update_work);
+}
+
+
+/* -- SSAM KIP tablet switch implementation. -------------------------------- */
+
+#define SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED 0x1d
+
+enum ssam_kip_cover_state {
+ SSAM_KIP_COVER_STATE_DISCONNECTED = 0x01,
+ SSAM_KIP_COVER_STATE_CLOSED = 0x02,
+ SSAM_KIP_COVER_STATE_LAPTOP = 0x03,
+ SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04,
+ SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05,
+};
+
+static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_KIP_COVER_STATE_DISCONNECTED:
+ return "disconnected";
+
+ case SSAM_KIP_COVER_STATE_CLOSED:
+ return "closed";
+
+ case SSAM_KIP_COVER_STATE_LAPTOP:
+ return "laptop";
+
+ case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
+ return "folded-canvas";
+
+ case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+ return "folded-back";
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state);
+ return "<unknown>";
+ }
+}
+
+static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_KIP_COVER_STATE_DISCONNECTED:
+ case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
+ case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+ return true;
+
+ case SSAM_KIP_COVER_STATE_CLOSED:
+ case SSAM_KIP_COVER_STATE_LAPTOP:
+ return false;
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown KIP cover state: %d\n", sw->state);
+ return true;
+ }
+}
+
+SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_get_cover_state, u8, {
+ .target_category = SSAM_SSH_TC_KIP,
+ .target_id = 0x01,
+ .command_id = 0x1d,
+ .instance_id = 0x00,
+});
+
+static int ssam_kip_get_cover_state(struct ssam_tablet_sw *sw, u32 *state)
+{
+ int status;
+ u8 raw;
+
+ status = ssam_retry(__ssam_kip_get_cover_state, sw->sdev->ctrl, &raw);
+ if (status < 0) {
+ dev_err(&sw->sdev->dev, "failed to query KIP lid state: %d\n", status);
+ return status;
+ }
+
+ *state = raw;
+ return 0;
+}
+
+static u32 ssam_kip_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif);
+
+ if (event->command_id != SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED)
+ return 0; /* Return "unhandled". */
+
+ if (event->length < 1)
+ dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length);
+
+ schedule_work(&sw->update_work);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_tablet_sw_desc ssam_kip_sw_desc = {
+ .dev = {
+ .name = "Microsoft Surface KIP Tablet Mode Switch",
+ .phys = "ssam/01:0e:01:00:01/input0",
+ },
+ .ops = {
+ .notify = ssam_kip_sw_notif,
+ .get_state = ssam_kip_get_cover_state,
+ .state_name = ssam_kip_cover_state_name,
+ .state_is_tablet_mode = ssam_kip_cover_state_is_tablet_mode,
+ },
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_KIP,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+};
+
+
+/* -- SSAM POS tablet switch implementation. -------------------------------- */
+
+static bool tablet_mode_in_slate_state = true;
+module_param(tablet_mode_in_slate_state, bool, 0644);
+MODULE_PARM_DESC(tablet_mode_in_slate_state, "Enable tablet mode in slate device posture, default is 'true'");
+
+#define SSAM_EVENT_POS_CID_POSTURE_CHANGED 0x03
+#define SSAM_POS_MAX_SOURCES 4
+
+enum ssam_pos_state {
+ SSAM_POS_POSTURE_LID_CLOSED = 0x00,
+ SSAM_POS_POSTURE_LAPTOP = 0x01,
+ SSAM_POS_POSTURE_SLATE = 0x02,
+ SSAM_POS_POSTURE_TABLET = 0x03,
+};
+
+struct ssam_sources_list {
+ __le32 count;
+ __le32 id[SSAM_POS_MAX_SOURCES];
+} __packed;
+
+static const char *ssam_pos_state_name(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_POS_POSTURE_LID_CLOSED:
+ return "closed";
+
+ case SSAM_POS_POSTURE_LAPTOP:
+ return "laptop";
+
+ case SSAM_POS_POSTURE_SLATE:
+ return "slate";
+
+ case SSAM_POS_POSTURE_TABLET:
+ return "tablet";
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown device posture: %u\n", state);
+ return "<unknown>";
+ }
+}
+
+static bool ssam_pos_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 state)
+{
+ switch (state) {
+ case SSAM_POS_POSTURE_LAPTOP:
+ case SSAM_POS_POSTURE_LID_CLOSED:
+ return false;
+
+ case SSAM_POS_POSTURE_SLATE:
+ return tablet_mode_in_slate_state;
+
+ case SSAM_POS_POSTURE_TABLET:
+ return true;
+
+ default:
+ dev_warn(&sw->sdev->dev, "unknown device posture: %u\n", state);
+ return true;
+ }
+}
+
+static int ssam_pos_get_sources_list(struct ssam_tablet_sw *sw, struct ssam_sources_list *sources)
+{
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status;
+
+ rqst.target_category = SSAM_SSH_TC_POS;
+ rqst.target_id = 0x01;
+ rqst.command_id = 0x01;
+ rqst.instance_id = 0x00;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = 0;
+ rqst.payload = NULL;
+
+ rsp.capacity = sizeof(*sources);
+ rsp.length = 0;
+ rsp.pointer = (u8 *)sources;
+
+ status = ssam_retry(ssam_request_sync_onstack, sw->sdev->ctrl, &rqst, &rsp, 0);
+ if (status)
+ return status;
+
+ /* We need at least the 'sources->count' field. */
+ if (rsp.length < sizeof(__le32)) {
+ dev_err(&sw->sdev->dev, "received source list response is too small\n");
+ return -EPROTO;
+ }
+
+ /* Make sure 'sources->count' matches with the response length. */
+ if (get_unaligned_le32(&sources->count) * sizeof(__le32) + sizeof(__le32) != rsp.length) {
+ dev_err(&sw->sdev->dev, "mismatch between number of sources and response size\n");
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int ssam_pos_get_source(struct ssam_tablet_sw *sw, u32 *source_id)
+{
+ struct ssam_sources_list sources = {};
+ int status;
+
+ status = ssam_pos_get_sources_list(sw, &sources);
+ if (status)
+ return status;
+
+ if (get_unaligned_le32(&sources.count) == 0) {
+ dev_err(&sw->sdev->dev, "no posture sources found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We currently don't know what to do with more than one posture
+ * source. At the moment, only one source seems to be used/provided.
+ * The WARN_ON() here should hopefully let us know quickly once there
+ * is a device that provides multiple sources, at which point we can
+ * then try to figure out how to handle them.
+ */
+ WARN_ON(get_unaligned_le32(&sources.count) > 1);
+
+ *source_id = get_unaligned_le32(&sources.id[0]);
+ return 0;
+}
+
+SSAM_DEFINE_SYNC_REQUEST_WR(__ssam_pos_get_posture_for_source, __le32, __le32, {
+ .target_category = SSAM_SSH_TC_POS,
+ .target_id = 0x01,
+ .command_id = 0x02,
+ .instance_id = 0x00,
+});
+
+static int ssam_pos_get_posture_for_source(struct ssam_tablet_sw *sw, u32 source_id, u32 *posture)
+{
+ __le32 source_le = cpu_to_le32(source_id);
+ __le32 rspval_le = 0;
+ int status;
+
+ status = ssam_retry(__ssam_pos_get_posture_for_source, sw->sdev->ctrl,
+ &source_le, &rspval_le);
+ if (status)
+ return status;
+
+ *posture = le32_to_cpu(rspval_le);
+ return 0;
+}
+
+static int ssam_pos_get_posture(struct ssam_tablet_sw *sw, u32 *state)
+{
+ u32 source_id;
+ int status;
+
+ status = ssam_pos_get_source(sw, &source_id);
+ if (status) {
+ dev_err(&sw->sdev->dev, "failed to get posture source ID: %d\n", status);
+ return status;
+ }
+
+ status = ssam_pos_get_posture_for_source(sw, source_id, state);
+ if (status) {
+ dev_err(&sw->sdev->dev, "failed to get posture value for source %u: %d\n",
+ source_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+static u32 ssam_pos_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
+{
+ struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif);
+
+ if (event->command_id != SSAM_EVENT_POS_CID_POSTURE_CHANGED)
+ return 0; /* Return "unhandled". */
+
+ if (event->length != sizeof(__le32) * 3)
+ dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length);
+
+ schedule_work(&sw->update_work);
+ return SSAM_NOTIF_HANDLED;
+}
+
+static const struct ssam_tablet_sw_desc ssam_pos_sw_desc = {
+ .dev = {
+ .name = "Microsoft Surface POS Tablet Mode Switch",
+ .phys = "ssam/01:26:01:00:01/input0",
+ },
+ .ops = {
+ .notify = ssam_pos_sw_notif,
+ .get_state = ssam_pos_get_posture,
+ .state_name = ssam_pos_state_name,
+ .state_is_tablet_mode = ssam_pos_state_is_tablet_mode,
+ },
+ .event = {
+ .reg = SSAM_EVENT_REGISTRY_SAM,
+ .id = {
+ .target_category = SSAM_SSH_TC_POS,
+ .instance = 0,
+ },
+ .mask = SSAM_EVENT_MASK_TARGET,
+ },
+};
+
+
+/* -- Driver registration. -------------------------------------------------- */
+
+static const struct ssam_device_id ssam_tablet_sw_match[] = {
+ { SSAM_SDEV(KIP, 0x01, 0x00, 0x01), (unsigned long)&ssam_kip_sw_desc },
+ { SSAM_SDEV(POS, 0x01, 0x00, 0x01), (unsigned long)&ssam_pos_sw_desc },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, ssam_tablet_sw_match);
+
+static struct ssam_device_driver ssam_tablet_sw_driver = {
+ .probe = ssam_tablet_sw_probe,
+ .remove = ssam_tablet_sw_remove,
+ .match_table = ssam_tablet_sw_match,
+ .driver = {
+ .name = "surface_aggregator_tablet_mode_switch",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_tablet_sw_pm_ops,
+ },
+};
+module_ssam_device_driver(ssam_tablet_sw_driver);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Tablet mode switch driver for Surface devices using the Surface Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index 1203b9a82993..ed36944467f9 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -8,7 +8,7 @@
* acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
* use), or request detachment via user-space.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/fs.h>
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
index ec66fde28e75..c219b840d491 100644
--- a/drivers/platform/surface/surface_gpe.c
+++ b/drivers/platform/surface/surface_gpe.c
@@ -4,7 +4,7 @@
* properly configuring the respective GPEs. Required for wakeup via lid on
* newer Intel-based Microsoft Surface devices.
*
- * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2020-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -172,6 +172,18 @@ static const struct dmi_system_id dmi_lid_device_table[] = {
.driver_data = (void *)lid_device_props_l4D,
},
{
+ .ident = "Surface Laptop 4 (Intel 13\")",
+ .matches = {
+ /*
+ * We match for SKU here due to different variants: The
+ * AMD (15") version does not rely on GPEs.
+ */
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_4_1950:1951"),
+ },
+ .driver_data = (void *)lid_device_props_l4B,
+ },
+ {
.ident = "Surface Laptop Studio",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
diff --git a/drivers/platform/surface/surface_hotplug.c b/drivers/platform/surface/surface_hotplug.c
index cfcc15cfbacb..f004a2495201 100644
--- a/drivers/platform/surface/surface_hotplug.c
+++ b/drivers/platform/surface/surface_hotplug.c
@@ -10,7 +10,7 @@
* Event signaling is handled via ACPI, which will generate the appropriate
* device-check notifications to be picked up by the PCIe hot-plug driver.
*
- * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <linux/acpi.h>
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index 6373d3b5eb7f..fbf2e11fd6ce 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -3,7 +3,7 @@
* Surface Platform Profile / Performance Mode driver for Surface System
* Aggregator Module (thermal subsystem).
*
- * Copyright (C) 2021 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2021-2022 Maximilian Luz <luzmaximilian@gmail.com>
*/
#include <asm/unaligned.h>
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index bc4013e950ed..f2f98e942cf2 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -177,17 +177,15 @@ config ACER_WIRELESS
config ACER_WMI
tristate "Acer WMI Laptop Extras"
- depends on ACPI
- select LEDS_CLASS
- select NEW_LEDS
depends on BACKLIGHT_CLASS_DEVICE
depends on SERIO_I8042
depends on INPUT
depends on RFKILL || RFKILL = n
depends on ACPI_WMI
+ select ACPI_VIDEO
select INPUT_SPARSEKMAP
- # Acer WMI depends on ACPI_VIDEO when ACPI is enabled
- select ACPI_VIDEO if ACPI
+ select LEDS_CLASS
+ select NEW_LEDS
help
This is a driver for newer Acer (and Wistron) laptops. It adds
wireless radio and bluetooth control, and on some laptops,
@@ -196,32 +194,7 @@ config ACER_WMI
If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
here.
-config AMD_PMC
- tristate "AMD SoC PMC driver"
- depends on ACPI && PCI && RTC_CLASS
- help
- The driver provides support for AMD Power Management Controller
- primarily responsible for S2Idle transactions that are driven from
- a platform firmware running on SMU. This driver also provides a debug
- mechanism to investigate the S2Idle transactions and failures.
-
- Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU.
-
- If you choose to compile this driver as a module the module will be
- called amd-pmc.
-
-config AMD_HSMP
- tristate "AMD HSMP Driver"
- depends on AMD_NB && X86_64
- help
- The driver provides a way for user space tools to monitor and manage
- system management functionality on EPYC server CPUs from AMD.
-
- Host System Management Port (HSMP) interface is a mailbox interface
- between the x86 core and the System Management Unit (SMU) firmware.
-
- If you choose to compile this driver as a module the module will be
- called amd_hsmp.
+source "drivers/platform/x86/amd/Kconfig"
config ADV_SWBUTTON
tristate "Advantech ACPI Software Button Driver"
@@ -300,6 +273,8 @@ config ASUS_WMI
select INPUT_SPARSEKMAP
select LEDS_CLASS
select NEW_LEDS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
select ACPI_PLATFORM_PROFILE
help
Say Y here if you have a WMI aware Asus laptop (like Eee PCs or new
@@ -1164,7 +1139,14 @@ config WINMATE_FM07_KEYS
endif # X86_PLATFORM_DEVICES
-config PMC_ATOM
- def_bool y
- depends on PCI
- select COMMON_CLK
+config P2SB
+ bool "Primary to Sideband (P2SB) bridge access support"
+ depends on PCI && X86
+ help
+ The Primary to Sideband (P2SB) bridge is an interface to some
+ PCI devices connected through it. In particular, SPI NOR controller
+ in Intel Apollo Lake SoC is one of such devices.
+
+ The main purpose of this library is to unhide P2SB device in case
+ firmware kept it hidden on some platforms in order to access devices
+ behind it.
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 4a59f47a46e2..5a428caa654a 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -23,8 +23,7 @@ obj-$(CONFIG_ACER_WIRELESS) += acer-wireless.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
# AMD
-obj-$(CONFIG_AMD_PMC) += amd-pmc.o
-obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
+obj-y += amd/
# Advantech
obj-$(CONFIG_ADV_SWBUTTON) += adv_swbutton.o
@@ -120,13 +119,17 @@ obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets.o
# Intel uncore drivers
obj-$(CONFIG_INTEL_IPS) += intel_ips.o
+# Intel miscellaneous drivers
+intel_p2sb-y := p2sb.o
+obj-$(CONFIG_P2SB) += intel_p2sb.o
+
# Intel PMIC / PMC / P-Unit devices
obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
obj-$(CONFIG_INTEL_SCU_PCI) += intel_scu_pcidrv.o
obj-$(CONFIG_INTEL_SCU_PLATFORM) += intel_scu_pltdrv.o
obj-$(CONFIG_INTEL_SCU_WDT) += intel_scu_wdt.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
-obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
+obj-$(CONFIG_X86_INTEL_LPSS) += pmc_atom.o
# Siemens Simatic Industrial PCs
obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 9c6943e401a6..e0230ea0cb7e 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1615,12 +1615,7 @@ static int read_brightness(struct backlight_device *bd)
static int update_bl_status(struct backlight_device *bd)
{
- int intensity = bd->props.brightness;
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
+ int intensity = backlight_get_brightness(bd);
set_u32(intensity, ACER_CAP_BRIGHTNESS);
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
new file mode 100644
index 000000000000..c0d0a3c5170c
--- /dev/null
+++ b/drivers/platform/x86/amd/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD x86 Platform Specific Drivers
+#
+
+config AMD_PMC
+ tristate "AMD SoC PMC driver"
+ depends on ACPI && PCI && RTC_CLASS
+ help
+ The driver provides support for AMD Power Management Controller
+ primarily responsible for S2Idle transactions that are driven from
+ a platform firmware running on SMU. This driver also provides a debug
+ mechanism to investigate the S2Idle transactions and failures.
+
+ Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU.
+
+ If you choose to compile this driver as a module the module will be
+ called amd-pmc.
+
+config AMD_HSMP
+ tristate "AMD HSMP Driver"
+ depends on AMD_NB && X86_64
+ help
+ The driver provides a way for user space tools to monitor and manage
+ system management functionality on EPYC server CPUs from AMD.
+
+ Host System Management Port (HSMP) interface is a mailbox interface
+ between the x86 core and the System Management Unit (SMU) firmware.
+
+ If you choose to compile this driver as a module the module will be
+ called amd_hsmp.
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
new file mode 100644
index 000000000000..a03fbb08e808
--- /dev/null
+++ b/drivers/platform/x86/amd/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for drivers/platform/x86/amd
+# AMD x86 Platform-Specific Drivers
+#
+
+amd-pmc-y := pmc.o
+obj-$(CONFIG_AMD_PMC) += amd-pmc.o
+amd_hsmp-y := hsmp.o
+obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
diff --git a/drivers/platform/x86/amd_hsmp.c b/drivers/platform/x86/amd/hsmp.c
index a0c54b838c11..a0c54b838c11 100644
--- a/drivers/platform/x86/amd_hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd/pmc.c
index 700eb19e8450..700eb19e8450 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 57553f9b4d1d..ffe98a18440b 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -291,10 +291,7 @@ static int gmux_get_brightness(struct backlight_device *bd)
static int gmux_update_status(struct backlight_device *bd)
{
struct apple_gmux_data *gmux_data = bl_get_data(bd);
- u32 brightness = bd->props.brightness;
-
- if (bd->props.state & BL_CORE_SUSPENDED)
- return 0;
+ u32 brightness = backlight_get_brightness(bd);
gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 62ce198a3463..89b604e04d7f 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -208,6 +208,7 @@ struct asus_wmi {
int kbd_led_wk;
struct led_classdev lightbar_led;
int lightbar_led_wk;
+ struct led_classdev micmute_led;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
struct work_struct wlan_led_work;
@@ -1028,12 +1029,23 @@ static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev)
return result & ASUS_WMI_DSTS_LIGHTBAR_MASK;
}
+static int micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int state = brightness != LED_OFF;
+ int err;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MICMUTE_LED, state, NULL);
+ return err < 0 ? err : 0;
+}
+
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
led_classdev_unregister(&asus->kbd_led);
led_classdev_unregister(&asus->tpd_led);
led_classdev_unregister(&asus->wlan_led);
led_classdev_unregister(&asus->lightbar_led);
+ led_classdev_unregister(&asus->micmute_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
@@ -1105,6 +1117,19 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
&asus->lightbar_led);
}
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
+ asus->micmute_led.name = "asus::micmute";
+ asus->micmute_led.max_brightness = 1;
+ asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ asus->micmute_led.brightness_set_blocking = micmute_led_set;
+ asus->micmute_led.default_trigger = "audio-micmute";
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->micmute_led);
+ if (rv)
+ goto error;
+ }
+
error:
if (rv)
asus_wmi_led_exit(asus);
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index ab610376fdad..0942f50bd793 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -324,9 +324,7 @@ static int bl_update_status(struct backlight_device *b)
if (ret)
return ret;
- set_backlight_state((b->props.power == FB_BLANK_UNBLANK)
- && !(b->props.state & BL_CORE_SUSPENDED)
- && !(b->props.state & BL_CORE_FBBLANK));
+ set_backlight_state(!backlight_is_blank(b));
return 0;
}
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index fe224a54f24c..25421e061c47 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -5,7 +5,6 @@
menuconfig X86_PLATFORM_DRIVERS_DELL
bool "Dell X86 Platform Specific Device Drivers"
- depends on X86_PLATFORM_DEVICES
help
Say Y here to get to see options for device drivers for various
Dell x86 platforms, including vendor-specific laptop extension drivers.
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 1c9e3f3ea41c..53d7fd2943b4 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -20,25 +20,16 @@
#define PMT_XA_MAX INT_MAX
#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
-/*
- * Early implementations of PMT on client platforms have some
- * differences from the server platforms (which use the Out Of Band
- * Management Services Module OOBMSM). This list tracks those
- * platforms as needed to handle those differences. Newer client
- * platforms are expected to be fully compatible with server.
- */
-static const struct pci_device_id pmt_telem_early_client_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */
- { PCI_VDEVICE(INTEL, 0x490e) }, /* DG1 */
- { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */
- { }
-};
-
bool intel_pmt_is_early_client_hw(struct device *dev)
{
- struct pci_dev *parent = to_pci_dev(dev->parent);
+ struct intel_vsec_device *ivdev = dev_to_ivdev(dev);
- return !!pci_match_id(pmt_telem_early_client_pci_ids, parent);
+ /*
+ * Early implementations of PMT on client platforms have some
+ * differences from the server platforms (which use the Out Of Band
+ * Management Services Module OOBMSM).
+ */
+ return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW);
}
EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index f73ecfd4a309..5e4009c05ecf 100644
--- a/drivers/platform/x86/intel/pmt/telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -23,12 +23,19 @@
#define TELEM_GUID_OFFSET 0x4
#define TELEM_BASE_OFFSET 0x8
#define TELEM_ACCESS(v) ((v) & GENMASK(3, 0))
+#define TELEM_TYPE(v) (((v) & GENMASK(7, 4)) >> 4)
/* size is in bytes */
#define TELEM_SIZE(v) (((v) & GENMASK(27, 12)) >> 10)
/* Used by client hardware to identify a fixed telemetry entry*/
#define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000
+enum telem_type {
+ TELEM_TYPE_PUNIT = 0,
+ TELEM_TYPE_CRASHLOG,
+ TELEM_TYPE_PUNIT_FIXED,
+};
+
struct pmt_telem_priv {
int num_entries;
struct intel_pmt_entry entry[];
@@ -39,10 +46,15 @@ static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
{
u32 guid = readl(entry->disc_table + TELEM_GUID_OFFSET);
- if (guid != TELEM_CLIENT_FIXED_BLOCK_GUID)
- return false;
+ if (intel_pmt_is_early_client_hw(dev)) {
+ u32 type = TELEM_TYPE(readl(entry->disc_table));
+
+ if ((type == TELEM_TYPE_PUNIT_FIXED) ||
+ (guid == TELEM_CLIENT_FIXED_BLOCK_GUID))
+ return true;
+ }
- return intel_pmt_is_early_client_hw(dev);
+ return false;
}
static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index e8424e70d81d..fd102678c75f 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -277,29 +277,38 @@ static int isst_if_get_platform_info(void __user *argp)
return 0;
}
+#define ISST_MAX_BUS_NUMBER 2
struct isst_if_cpu_info {
/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
- int bus_info[2];
- struct pci_dev *pci_dev[2];
+ int bus_info[ISST_MAX_BUS_NUMBER];
+ struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
int punit_cpu_id;
int numa_node;
};
+struct isst_if_pkg_info {
+ struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
+};
+
static struct isst_if_cpu_info *isst_cpu_info;
+static struct isst_if_pkg_info *isst_pkg_info;
+
#define ISST_MAX_PCI_DOMAINS 8
static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
{
struct pci_dev *matched_pci_dev = NULL;
struct pci_dev *pci_dev = NULL;
- int no_matches = 0;
+ int no_matches = 0, pkg_id;
int i, bus_number;
- if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
- cpu >= num_possible_cpus())
+ if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
return NULL;
+ pkg_id = topology_physical_package_id(cpu);
+
bus_number = isst_cpu_info[cpu].bus_info[bus_no];
if (bus_number < 0)
return NULL;
@@ -324,6 +333,8 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
}
if (node == isst_cpu_info[cpu].numa_node) {
+ isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
+
pci_dev = _pci_dev;
break;
}
@@ -342,6 +353,10 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
if (!pci_dev && no_matches == 1)
pci_dev = matched_pci_dev;
+ /* Return pci_dev pointer for any matched CPU in the package */
+ if (!pci_dev)
+ pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
+
return pci_dev;
}
@@ -361,8 +376,8 @@ struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
{
struct pci_dev *pci_dev;
- if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
- cpu >= num_possible_cpus())
+ if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
return NULL;
pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
@@ -417,10 +432,19 @@ static int isst_if_cpu_info_init(void)
if (!isst_cpu_info)
return -ENOMEM;
+ isst_pkg_info = kcalloc(topology_max_packages(),
+ sizeof(*isst_pkg_info),
+ GFP_KERNEL);
+ if (!isst_pkg_info) {
+ kfree(isst_cpu_info);
+ return -ENOMEM;
+ }
+
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"platform/x86/isst-if:online",
isst_if_cpu_online, NULL);
if (ret < 0) {
+ kfree(isst_pkg_info);
kfree(isst_cpu_info);
return ret;
}
@@ -433,6 +457,7 @@ static int isst_if_cpu_info_init(void)
static void isst_if_cpu_info_exit(void)
{
cpuhp_remove_state(isst_if_online_id);
+ kfree(isst_pkg_info);
kfree(isst_cpu_info);
};
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index bed436bf181f..bb81b8b1f7e9 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -15,6 +15,7 @@
#include <linux/auxiliary_bus.h>
#include <linux/bits.h>
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/idr.h>
#include <linux/module.h>
@@ -30,9 +31,13 @@
#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
#define TABLE_OFFSET_SHIFT 3
+#define PMT_XA_START 0
+#define PMT_XA_MAX INT_MAX
+#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
static DEFINE_IDA(intel_vsec_ida);
static DEFINE_IDA(intel_vsec_sdsi_ida);
+static DEFINE_XARRAY_ALLOC(auxdev_array);
/**
* struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
@@ -54,12 +59,6 @@ struct intel_vsec_header {
u32 offset;
};
-/* Platform specific data */
-struct intel_vsec_platform_info {
- struct intel_vsec_header **capabilities;
- unsigned long quirks;
-};
-
enum intel_vsec_id {
VSEC_ID_TELEMETRY = 2,
VSEC_ID_WATCHER = 3,
@@ -138,7 +137,7 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
const char *name)
{
struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
- int ret;
+ int ret, id;
ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
if (ret < 0) {
@@ -165,14 +164,26 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
return ret;
}
- return devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux, auxdev);
+ ret = devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux,
+ auxdev);
+ if (ret < 0)
+ return ret;
+
+ /* Add auxdev to list */
+ ret = xa_alloc(&auxdev_array, &id, intel_vsec_dev, PMT_XA_LIMIT,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
- unsigned long quirks)
+ struct intel_vsec_platform_info *info)
{
struct intel_vsec_device *intel_vsec_dev;
struct resource *res, *tmp;
+ unsigned long quirks = info->quirks;
int i;
if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks))
@@ -216,7 +227,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
intel_vsec_dev->pcidev = pdev;
intel_vsec_dev->resource = res;
intel_vsec_dev->num_resources = header->num_entries;
- intel_vsec_dev->quirks = quirks;
+ intel_vsec_dev->info = info;
if (header->id == VSEC_ID_SDSI)
intel_vsec_dev->ida = &intel_vsec_sdsi_ida;
@@ -226,14 +237,15 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id));
}
-static bool intel_vsec_walk_header(struct pci_dev *pdev, unsigned long quirks,
- struct intel_vsec_header **header)
+static bool intel_vsec_walk_header(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
{
+ struct intel_vsec_header **header = info->capabilities;
bool have_devices = false;
int ret;
for ( ; *header; header++) {
- ret = intel_vsec_add_dev(pdev, *header, quirks);
+ ret = intel_vsec_add_dev(pdev, *header, info);
if (ret)
dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n",
(*header)->id);
@@ -244,7 +256,8 @@ static bool intel_vsec_walk_header(struct pci_dev *pdev, unsigned long quirks,
return have_devices;
}
-static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
+static bool intel_vsec_walk_dvsec(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
{
bool have_devices = false;
int pos = 0;
@@ -283,7 +296,7 @@ static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr);
header.id = PCI_DVSEC_HEADER2_ID(hdr);
- ret = intel_vsec_add_dev(pdev, &header, quirks);
+ ret = intel_vsec_add_dev(pdev, &header, info);
if (ret)
continue;
@@ -293,7 +306,8 @@ static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
return have_devices;
}
-static bool intel_vsec_walk_vsec(struct pci_dev *pdev, unsigned long quirks)
+static bool intel_vsec_walk_vsec(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
{
bool have_devices = false;
int pos = 0;
@@ -327,7 +341,7 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, unsigned long quirks)
header.tbir = INTEL_DVSEC_TABLE_BAR(table);
header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
- ret = intel_vsec_add_dev(pdev, &header, quirks);
+ ret = intel_vsec_add_dev(pdev, &header, info);
if (ret)
continue;
@@ -341,25 +355,25 @@ static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct intel_vsec_platform_info *info;
bool have_devices = false;
- unsigned long quirks = 0;
int ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
+ pci_save_state(pdev);
info = (struct intel_vsec_platform_info *)id->driver_data;
- if (info)
- quirks = info->quirks;
+ if (!info)
+ return -EINVAL;
- if (intel_vsec_walk_dvsec(pdev, quirks))
+ if (intel_vsec_walk_dvsec(pdev, info))
have_devices = true;
- if (intel_vsec_walk_vsec(pdev, quirks))
+ if (intel_vsec_walk_vsec(pdev, info))
have_devices = true;
if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) &&
- intel_vsec_walk_header(pdev, quirks, info->capabilities))
+ intel_vsec_walk_header(pdev, info))
have_devices = true;
if (!have_devices)
@@ -370,7 +384,8 @@ static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id
/* TGL info */
static const struct intel_vsec_platform_info tgl_info = {
- .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG | VSEC_QUIRK_TABLE_SHIFT,
+ .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG |
+ VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW,
};
/* DG1 info */
@@ -390,26 +405,89 @@ static struct intel_vsec_header *dg1_capabilities[] = {
static const struct intel_vsec_platform_info dg1_info = {
.capabilities = dg1_capabilities,
- .quirks = VSEC_QUIRK_NO_DVSEC,
+ .quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW,
};
#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
- { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, NULL) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &(struct intel_vsec_platform_info) {}) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
+static pci_ers_result_t intel_vsec_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_NEED_RESET;
+
+ dev_info(&pdev->dev, "PCI error detected, state %d", state);
+
+ if (state == pci_channel_io_perm_failure)
+ status = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+static pci_ers_result_t intel_vsec_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct intel_vsec_device *intel_vsec_dev;
+ pci_ers_result_t status = PCI_ERS_RESULT_DISCONNECT;
+ const struct pci_device_id *pci_dev_id;
+ unsigned long index;
+
+ dev_info(&pdev->dev, "Resetting PCI slot\n");
+
+ msleep(2000);
+ if (pci_enable_device(pdev)) {
+ dev_info(&pdev->dev,
+ "Failed to re-enable PCI device after reset.\n");
+ goto out;
+ }
+
+ status = PCI_ERS_RESULT_RECOVERED;
+
+ xa_for_each(&auxdev_array, index, intel_vsec_dev) {
+ /* check if pdev doesn't match */
+ if (pdev != intel_vsec_dev->pcidev)
+ continue;
+ devm_release_action(&pdev->dev, intel_vsec_remove_aux,
+ &intel_vsec_dev->auxdev);
+ }
+ pci_disable_device(pdev);
+ pci_restore_state(pdev);
+ pci_dev_id = pci_match_id(intel_vsec_pci_ids, pdev);
+ intel_vsec_pci_probe(pdev, pci_dev_id);
+
+out:
+ return status;
+}
+
+static void intel_vsec_pci_resume(struct pci_dev *pdev)
+{
+ dev_info(&pdev->dev, "Done resuming PCI device\n");
+}
+
+static const struct pci_error_handlers intel_vsec_pci_err_handlers = {
+ .error_detected = intel_vsec_pci_error_detected,
+ .slot_reset = intel_vsec_pci_slot_reset,
+ .resume = intel_vsec_pci_resume,
+};
+
static struct pci_driver intel_vsec_pci_driver = {
.name = "intel_vsec",
.id_table = intel_vsec_pci_ids,
.probe = intel_vsec_pci_probe,
+ .err_handler = &intel_vsec_pci_err_handlers,
};
module_pci_driver(intel_vsec_pci_driver);
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
index 4cc36678e8c5..3deeb05cf394 100644
--- a/drivers/platform/x86/intel/vsec.h
+++ b/drivers/platform/x86/intel/vsec.h
@@ -20,6 +20,15 @@ enum intel_vsec_quirks {
/* DVSEC not present (provided in driver data) */
VSEC_QUIRK_NO_DVSEC = BIT(3),
+
+ /* Platforms requiring quirk in the auxiliary driver */
+ VSEC_QUIRK_EARLY_HW = BIT(4),
+};
+
+/* Platform specific data */
+struct intel_vsec_platform_info {
+ struct intel_vsec_header **capabilities;
+ unsigned long quirks;
};
struct intel_vsec_device {
@@ -27,7 +36,7 @@ struct intel_vsec_device {
struct pci_dev *pcidev;
struct resource *resource;
struct ida *ida;
- unsigned long quirks;
+ struct intel_vsec_platform_info *info;
int num_resources;
};
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 447044fdcb77..5e072a0666f4 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -34,6 +34,7 @@
#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET 0x09
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET 0x0a
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET 0x0b
+#define MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET 0x19
#define MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET 0x1c
#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e
@@ -66,9 +67,15 @@
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
+#define MLXPLAT_CPLD_LPC_REG_GWP_OFFSET 0x4a
+#define MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET 0x4b
+#define MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET 0x4c
#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET 0x53
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET 0x54
+#define MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET 0x55
#define MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET 0x56
#define MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET 0x57
#define MLXPLAT_CPLD_LPC_REG_PSU_OFFSET 0x58
@@ -143,6 +150,7 @@
#define MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET 0xfa
#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
+#define MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET 0xfd
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
@@ -193,6 +201,7 @@
MLXPLAT_CPLD_AGGR_MASK_LC_ACT | \
MLXPLAT_CPLD_AGGR_MASK_LC_SDWN)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc1
+#define MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2 BIT(2)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
@@ -204,6 +213,7 @@
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
+#define MLXPLAT_CPLD_GWP_MASK GENMASK(0, 0)
#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
@@ -588,6 +598,15 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_asic_items_data[] = {
},
};
+static struct mlxreg_core_data mlxplat_mlxcpld_default_asic2_items_data[] = {
+ {
+ .label = "asic2",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
{
.data = mlxplat_mlxcpld_default_psu_items_data,
@@ -1151,6 +1170,15 @@ static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
.inversed = 0,
.health = true,
},
+ {
+ .data = mlxplat_mlxcpld_default_asic2_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic2_items_data),
+ .inversed = 0,
+ .health = true,
+ }
};
static
@@ -1160,7 +1188,7 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
- .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2,
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_pwr_items_data[] = {
@@ -2004,6 +2032,38 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_modular_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+/* Platform hotplug for NVLink blade systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_global_wp_items_data[] = {
+ {
+ .label = "global_wp_grant",
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = MLXPLAT_CPLD_GWP_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_nvlink_blade_items[] = {
+ {
+ .data = mlxplat_mlxcpld_global_wp_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = MLXPLAT_CPLD_GWP_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_global_wp_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_nvlink_blade_data = {
+ .items = mlxplat_mlxcpld_nvlink_blade_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_nvlink_blade_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
@@ -2102,6 +2162,25 @@ static struct mlxreg_core_platform_data mlxplat_default_led_wc_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_wc_data),
};
+/* Platform led default data for water cooling Ethernet switch blade */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_led_eth_wc_blade_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_led_eth_wc_blade_data = {
+ .data = mlxplat_mlxcpld_default_led_eth_wc_blade_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_eth_wc_blade_data),
+};
+
/* Platform led MSN21xx system family data */
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_led_data[] = {
{
@@ -2857,6 +2936,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "asic_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "asic2_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -2996,6 +3087,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "asic2_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+ {
.label = "fan_dir",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION,
.bit = GENMASK(7, 0),
@@ -3057,6 +3155,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "ufm_version",
.reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
.bit = GENMASK(7, 0),
@@ -3535,6 +3639,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_modular_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "ufm_version",
.reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
.bit = GENMASK(7, 0),
@@ -3547,6 +3657,209 @@ static struct mlxreg_core_platform_data mlxplat_modular_regs_io_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_regs_io_data),
};
+/* Platform register access for NVLink blade systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_nvlink_blade_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_from_comex",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_voltmon_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_system",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_pwr_off",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_reload_bios",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_ac_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "global_wp_request",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "jtag_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "comm_chnl_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0200,
+ },
+ {
+ .label = "bios_safe_mode",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_active_image",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_auth_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "global_wp_response",
+ .reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_nvlink_blade_regs_io_data = {
+ .data = mlxplat_mlxcpld_nvlink_blade_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_nvlink_blade_regs_io_data),
+};
+
/* Platform FAN default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
{
@@ -3932,8 +4245,12 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
@@ -4023,9 +4340,15 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
@@ -4100,6 +4423,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
@@ -4150,9 +4474,15 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
@@ -4221,6 +4551,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
@@ -4417,6 +4748,31 @@ static int __init mlxplat_dmi_default_wc_matched(const struct dmi_system_id *dmi
return 1;
}
+static int __init mlxplat_dmi_default_eth_wc_blade_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_default_wc_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_led_eth_wc_blade_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
+
+ return 1;
+}
+
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
{
int i;
@@ -4579,6 +4935,28 @@ static int __init mlxplat_dmi_modular_matched(const struct dmi_system_id *dmi)
return 1;
}
+static int __init mlxplat_dmi_nvlink_blade_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_nvlink_blade_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_regs_io = &mlxplat_nvlink_blade_regs_io_data;
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return 1;
+}
+
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
.callback = mlxplat_dmi_default_wc_matched,
@@ -4612,6 +4990,13 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_default_eth_wc_blade_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI139"),
+ },
+ },
+ {
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
@@ -4642,6 +5027,12 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_nvlink_blade_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0015"),
+ },
+ },
+ {
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
@@ -4830,22 +5221,20 @@ static int __init mlxplat_init(void)
nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
if (mlxplat_i2c)
mlxplat_i2c->regmap = priv->regmap;
- priv->pdev_i2c = platform_device_register_resndata(
- &mlxplat_dev->dev, "i2c_mlxcpld",
- nr, mlxplat_mlxcpld_resources,
- ARRAY_SIZE(mlxplat_mlxcpld_resources),
- mlxplat_i2c, sizeof(*mlxplat_i2c));
+ priv->pdev_i2c = platform_device_register_resndata(&mlxplat_dev->dev, "i2c_mlxcpld",
+ nr, mlxplat_mlxcpld_resources,
+ ARRAY_SIZE(mlxplat_mlxcpld_resources),
+ mlxplat_i2c, sizeof(*mlxplat_i2c));
if (IS_ERR(priv->pdev_i2c)) {
err = PTR_ERR(priv->pdev_i2c);
goto fail_alloc;
}
for (i = 0; i < mlxplat_mux_num; i++) {
- priv->pdev_mux[i] = platform_device_register_resndata(
- &priv->pdev_i2c->dev,
- "i2c-mux-reg", i, NULL,
- 0, &mlxplat_mux_data[i],
- sizeof(mlxplat_mux_data[i]));
+ priv->pdev_mux[i] = platform_device_register_resndata(&priv->pdev_i2c->dev,
+ "i2c-mux-reg", i, NULL, 0,
+ &mlxplat_mux_data[i],
+ sizeof(mlxplat_mux_data[i]));
if (IS_ERR(priv->pdev_mux[i])) {
err = PTR_ERR(priv->pdev_mux[i]);
goto fail_platform_mux_register;
@@ -4853,16 +5242,18 @@ static int __init mlxplat_init(void)
}
/* Add hotplug driver */
- mlxplat_hotplug->regmap = priv->regmap;
- priv->pdev_hotplug = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlxreg-hotplug",
- PLATFORM_DEVID_NONE,
- mlxplat_mlxcpld_resources,
- ARRAY_SIZE(mlxplat_mlxcpld_resources),
- mlxplat_hotplug, sizeof(*mlxplat_hotplug));
- if (IS_ERR(priv->pdev_hotplug)) {
- err = PTR_ERR(priv->pdev_hotplug);
- goto fail_platform_mux_register;
+ if (mlxplat_hotplug) {
+ mlxplat_hotplug->regmap = priv->regmap;
+ priv->pdev_hotplug =
+ platform_device_register_resndata(&mlxplat_dev->dev,
+ "mlxreg-hotplug", PLATFORM_DEVID_NONE,
+ mlxplat_mlxcpld_resources,
+ ARRAY_SIZE(mlxplat_mlxcpld_resources),
+ mlxplat_hotplug, sizeof(*mlxplat_hotplug));
+ if (IS_ERR(priv->pdev_hotplug)) {
+ err = PTR_ERR(priv->pdev_hotplug);
+ goto fail_platform_mux_register;
+ }
}
/* Set default registers. */
@@ -4875,24 +5266,26 @@ static int __init mlxplat_init(void)
}
/* Add LED driver. */
- mlxplat_led->regmap = priv->regmap;
- priv->pdev_led = platform_device_register_resndata(
- &mlxplat_dev->dev, "leds-mlxreg",
- PLATFORM_DEVID_NONE, NULL, 0,
- mlxplat_led, sizeof(*mlxplat_led));
- if (IS_ERR(priv->pdev_led)) {
- err = PTR_ERR(priv->pdev_led);
- goto fail_platform_hotplug_register;
+ if (mlxplat_led) {
+ mlxplat_led->regmap = priv->regmap;
+ priv->pdev_led =
+ platform_device_register_resndata(&mlxplat_dev->dev, "leds-mlxreg",
+ PLATFORM_DEVID_NONE, NULL, 0, mlxplat_led,
+ sizeof(*mlxplat_led));
+ if (IS_ERR(priv->pdev_led)) {
+ err = PTR_ERR(priv->pdev_led);
+ goto fail_platform_hotplug_register;
+ }
}
/* Add registers io access driver. */
if (mlxplat_regs_io) {
mlxplat_regs_io->regmap = priv->regmap;
- priv->pdev_io_regs = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlxreg-io",
- PLATFORM_DEVID_NONE, NULL, 0,
- mlxplat_regs_io,
- sizeof(*mlxplat_regs_io));
+ priv->pdev_io_regs = platform_device_register_resndata(&mlxplat_dev->dev,
+ "mlxreg-io",
+ PLATFORM_DEVID_NONE, NULL,
+ 0, mlxplat_regs_io,
+ sizeof(*mlxplat_regs_io));
if (IS_ERR(priv->pdev_io_regs)) {
err = PTR_ERR(priv->pdev_io_regs);
goto fail_platform_led_register;
@@ -4902,11 +5295,10 @@ static int __init mlxplat_init(void)
/* Add FAN driver. */
if (mlxplat_fan) {
mlxplat_fan->regmap = priv->regmap;
- priv->pdev_fan = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlxreg-fan",
- PLATFORM_DEVID_NONE, NULL, 0,
- mlxplat_fan,
- sizeof(*mlxplat_fan));
+ priv->pdev_fan = platform_device_register_resndata(&mlxplat_dev->dev, "mlxreg-fan",
+ PLATFORM_DEVID_NONE, NULL, 0,
+ mlxplat_fan,
+ sizeof(*mlxplat_fan));
if (IS_ERR(priv->pdev_fan)) {
err = PTR_ERR(priv->pdev_fan);
goto fail_platform_io_regs_register;
@@ -4920,11 +5312,10 @@ static int __init mlxplat_init(void)
for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) {
if (mlxplat_wd_data[j]) {
mlxplat_wd_data[j]->regmap = priv->regmap;
- priv->pdev_wd[j] = platform_device_register_resndata(
- &mlxplat_dev->dev, "mlx-wdt",
- j, NULL, 0,
- mlxplat_wd_data[j],
- sizeof(*mlxplat_wd_data[j]));
+ priv->pdev_wd[j] =
+ platform_device_register_resndata(&mlxplat_dev->dev, "mlx-wdt", j,
+ NULL, 0, mlxplat_wd_data[j],
+ sizeof(*mlxplat_wd_data[j]));
if (IS_ERR(priv->pdev_wd[j])) {
err = PTR_ERR(priv->pdev_wd[j]);
goto fail_platform_wd_register;
@@ -4949,9 +5340,11 @@ fail_platform_io_regs_register:
if (mlxplat_regs_io)
platform_device_unregister(priv->pdev_io_regs);
fail_platform_led_register:
- platform_device_unregister(priv->pdev_led);
+ if (mlxplat_led)
+ platform_device_unregister(priv->pdev_led);
fail_platform_hotplug_register:
- platform_device_unregister(priv->pdev_hotplug);
+ if (mlxplat_hotplug)
+ platform_device_unregister(priv->pdev_hotplug);
fail_platform_mux_register:
while (--i >= 0)
platform_device_unregister(priv->pdev_mux[i]);
@@ -4974,8 +5367,10 @@ static void __exit mlxplat_exit(void)
platform_device_unregister(priv->pdev_fan);
if (priv->pdev_io_regs)
platform_device_unregister(priv->pdev_io_regs);
- platform_device_unregister(priv->pdev_led);
- platform_device_unregister(priv->pdev_hotplug);
+ if (priv->pdev_led)
+ platform_device_unregister(priv->pdev_led);
+ if (priv->pdev_hotplug)
+ platform_device_unregister(priv->pdev_hotplug);
for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
platform_device_unregister(priv->pdev_mux[i]);
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
new file mode 100644
index 000000000000..fb2e141f3eb8
--- /dev/null
+++ b/drivers/platform/x86/p2sb.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Primary to Sideband (P2SB) bridge access support
+ *
+ * Copyright (c) 2017, 2021-2022 Intel Corporation.
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Jonathan Yong <jonathan.yong@intel.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/p2sb.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define P2SBC 0xe0
+#define P2SBC_HIDE BIT(8)
+
+static const struct x86_cpu_id p2sb_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, PCI_DEVFN(31, 1)),
+ X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, PCI_DEVFN(31, 1)),
+ {}
+};
+
+static int p2sb_get_devfn(unsigned int *devfn)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(p2sb_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ *devfn = (unsigned int)id->driver_data;
+ return 0;
+}
+
+static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+{
+ /* Copy resource from the first BAR of the device in question */
+ *mem = pdev->resource[0];
+ return 0;
+}
+
+static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_scan_single_device(bus, devfn);
+ if (!pdev)
+ return -ENODEV;
+
+ ret = p2sb_read_bar0(pdev, mem);
+
+ pci_stop_and_remove_bus_device(pdev);
+ return ret;
+}
+
+/**
+ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+ * @bus: PCI bus to communicate with
+ * @devfn: PCI slot and function to communicate with
+ * @mem: memory resource to be filled in
+ *
+ * The BIOS prevents the P2SB device from being enumerated by the PCI
+ * subsystem, so we need to unhide and hide it back to lookup the BAR.
+ *
+ * if @bus is NULL, the bus 0 in domain 0 will be used.
+ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+ *
+ * Caller must provide a valid pointer to @mem.
+ *
+ * Locking is handled by pci_rescan_remove_lock mutex.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
+ */
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ struct pci_dev *pdev_p2sb;
+ unsigned int devfn_p2sb;
+ u32 value = P2SBC_HIDE;
+ int ret;
+
+ /* Get devfn for P2SB device itself */
+ ret = p2sb_get_devfn(&devfn_p2sb);
+ if (ret)
+ return ret;
+
+ /* if @bus is NULL, use bus 0 in domain 0 */
+ bus = bus ?: pci_find_bus(0, 0);
+
+ /*
+ * Prevent concurrent PCI bus scan from seeing the P2SB device and
+ * removing via sysfs while it is temporarily exposed.
+ */
+ pci_lock_rescan_remove();
+
+ /* Unhide the P2SB device, if needed */
+ pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
+ if (value & P2SBC_HIDE)
+ pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
+
+ pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
+ if (devfn)
+ ret = p2sb_scan_and_read(bus, devfn, mem);
+ else
+ ret = p2sb_read_bar0(pdev_p2sb, mem);
+ pci_stop_and_remove_bus_device(pdev_p2sb);
+
+ /* Hide the P2SB device, if it was hidden */
+ if (value & P2SBC_HIDE)
+ pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE);
+
+ pci_unlock_rescan_remove();
+
+ if (ret)
+ return ret;
+
+ if (mem->flags == 0)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(p2sb_bar);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 615e39cbbbf1..d9a095d2c0eb 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -998,19 +998,23 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
pr_err("Couldn't retrieve BIOS data\n");
goto out_input;
}
- /* initialize backlight */
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
- pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
- &pcc_backlight_ops, &props);
- if (IS_ERR(pcc->backlight)) {
- result = PTR_ERR(pcc->backlight);
- goto out_input;
- }
- /* read the initial brightness setting from the hardware */
- pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ /* initialize backlight */
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
+
+ pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
+ &pcc_backlight_ops, &props);
+ if (IS_ERR(pcc->backlight)) {
+ result = PTR_ERR(pcc->backlight);
+ goto out_input;
+ }
+
+ /* read the initial brightness setting from the hardware */
+ pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ }
/* Reset initial sticky key mode since the hardware register state is not consistent */
acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, 0);
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index b8b1ed1406de..154317e9910d 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -389,21 +389,16 @@ static const struct dmi_system_id critclk_systems[] = {
},
},
{
- /* pmc_plt_clk0 - 3 are used for the 4 ethernet controllers */
- .ident = "Lex 3I380D",
+ /*
+ * Lex System / Lex Computech Co. makes a lot of Bay Trail
+ * based embedded boards which often come with multiple
+ * ethernet controllers using multiple pmc_plt_clks. See:
+ * https://www.lex.com.tw/products/embedded-ipc-board/
+ */
+ .ident = "Lex BayTrail",
.callback = dmi_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
- DMI_MATCH(DMI_PRODUCT_NAME, "3I380D"),
- },
- },
- {
- /* pmc_plt_clk* - are used for ethernet controllers */
- .ident = "Lex 2I385SW",
- .callback = dmi_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
- DMI_MATCH(DMI_PRODUCT_NAME, "2I385SW"),
},
},
{
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index 1e8063b7c169..5362f1a7b77c 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -61,36 +61,35 @@ static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
default:
return 0;
}
-
if (ret < 0)
- dev_err_probe(&pdev->dev, ret, "Error requesting irq at index %d: %d\n",
- inst->irq_idx, ret);
+ return dev_err_probe(&pdev->dev, ret, "Error requesting irq at index %d\n",
+ inst->irq_idx);
return ret;
}
static void smi_devs_unregister(struct smi *smi)
{
- while (smi->i2c_num > 0)
- i2c_unregister_device(smi->i2c_devs[--smi->i2c_num]);
+ while (smi->i2c_num--)
+ i2c_unregister_device(smi->i2c_devs[smi->i2c_num]);
- while (smi->spi_num > 0)
- spi_unregister_device(smi->spi_devs[--smi->spi_num]);
+ while (smi->spi_num--)
+ spi_unregister_device(smi->spi_devs[smi->spi_num]);
}
/**
* smi_spi_probe - Instantiate multiple SPI devices from inst array
* @pdev: Platform device
- * @adev: ACPI device
* @smi: Internal struct for Serial multi instantiate driver
* @inst_array: Array of instances to probe
*
* Returns the number of SPI devices instantiate, Zero if none is found or a negative error code.
*/
-static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev, struct smi *smi,
+static int smi_spi_probe(struct platform_device *pdev, struct smi *smi,
const struct smi_instance *inst_array)
{
struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct spi_controller *ctlr;
struct spi_device *spi_dev;
char name[50];
@@ -99,8 +98,8 @@ static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev,
ret = acpi_spi_count_resources(adev);
if (ret < 0)
return ret;
- else if (!ret)
- return -ENODEV;
+ if (!ret)
+ return -ENOENT;
count = ret;
@@ -112,9 +111,8 @@ static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev,
spi_dev = acpi_spi_device_alloc(NULL, adev, i);
if (IS_ERR(spi_dev)) {
- ret = PTR_ERR(spi_dev);
- dev_err_probe(dev, ret, "failed to allocate SPI device %s from ACPI: %d\n",
- dev_name(&adev->dev), ret);
+ ret = dev_err_probe(dev, PTR_ERR(spi_dev), "failed to allocate SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
goto error;
}
@@ -135,9 +133,8 @@ static int smi_spi_probe(struct platform_device *pdev, struct acpi_device *adev,
ret = spi_add_device(spi_dev);
if (ret) {
- dev_err_probe(&ctlr->dev, ret,
- "failed to add SPI device %s from ACPI: %d\n",
- dev_name(&adev->dev), ret);
+ dev_err_probe(&ctlr->dev, ret, "failed to add SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
spi_dev_put(spi_dev);
goto error;
}
@@ -166,25 +163,25 @@ error:
/**
* smi_i2c_probe - Instantiate multiple I2C devices from inst array
* @pdev: Platform device
- * @adev: ACPI device
* @smi: Internal struct for Serial multi instantiate driver
* @inst_array: Array of instances to probe
*
* Returns the number of I2C devices instantiate, Zero if none is found or a negative error code.
*/
-static int smi_i2c_probe(struct platform_device *pdev, struct acpi_device *adev, struct smi *smi,
+static int smi_i2c_probe(struct platform_device *pdev, struct smi *smi,
const struct smi_instance *inst_array)
{
struct i2c_board_info board_info = {};
struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
char name[32];
int i, ret, count;
ret = i2c_acpi_client_count(adev);
if (ret < 0)
return ret;
- else if (!ret)
- return -ENODEV;
+ if (!ret)
+ return -ENOENT;
count = ret;
@@ -230,12 +227,8 @@ static int smi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct smi_node *node;
- struct acpi_device *adev;
struct smi *smi;
-
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENODEV;
+ int ret;
node = device_get_match_data(dev);
if (!node) {
@@ -251,19 +244,25 @@ static int smi_probe(struct platform_device *pdev)
switch (node->bus_type) {
case SMI_I2C:
- return smi_i2c_probe(pdev, adev, smi, node->instances);
+ return smi_i2c_probe(pdev, smi, node->instances);
case SMI_SPI:
- return smi_spi_probe(pdev, adev, smi, node->instances);
+ return smi_spi_probe(pdev, smi, node->instances);
case SMI_AUTO_DETECT:
- if (i2c_acpi_client_count(adev) > 0)
- return smi_i2c_probe(pdev, adev, smi, node->instances);
- else
- return smi_spi_probe(pdev, adev, smi, node->instances);
+ /*
+ * For backwards-compatibility with the existing nodes I2C
+ * is checked first and if such entries are found ONLY I2C
+ * devices are created. Since some existing nodes that were
+ * already handled by this driver could also contain unrelated
+ * SpiSerialBus nodes that were previously ignored, and this
+ * preserves that behavior.
+ */
+ ret = smi_i2c_probe(pdev, smi, node->instances);
+ if (ret != -ENOENT)
+ return ret;
+ return smi_spi_probe(pdev, smi, node->instances);
default:
return -EINVAL;
}
-
- return 0; /* never reached */
}
static int smi_remove(struct platform_device *pdev)
@@ -325,10 +324,11 @@ static const struct smi_node cs35l41_hda = {
static const struct acpi_device_id smi_acpi_ids[] = {
{ "BSG1160", (unsigned long)&bsg1160_data },
{ "BSG2150", (unsigned long)&bsg2150_data },
- { "INT3515", (unsigned long)&int3515_data },
{ "CSC3551", (unsigned long)&cs35l41_hda },
+ { "INT3515", (unsigned long)&int3515_data },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
+ { "CLSA0101", (unsigned long)&cs35l41_hda },
{ }
};
MODULE_DEVICE_TABLE(acpi, smi_acpi_ids);
diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
index b599cda5ba3c..ca3647b751d5 100644
--- a/drivers/platform/x86/simatic-ipc.c
+++ b/drivers/platform/x86/simatic-ipc.c
@@ -51,6 +51,7 @@ static int register_platform_devices(u32 station_id)
{
u8 ledmode = SIMATIC_IPC_DEVICE_NONE;
u8 wdtmode = SIMATIC_IPC_DEVICE_NONE;
+ char *pdevname = KBUILD_MODNAME "_leds";
int i;
platform_data.devmode = SIMATIC_IPC_DEVICE_NONE;
@@ -64,10 +65,12 @@ static int register_platform_devices(u32 station_id)
}
if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
+ if (ledmode == SIMATIC_IPC_DEVICE_127E)
+ pdevname = KBUILD_MODNAME "_leds_gpio";
platform_data.devmode = ledmode;
ipc_led_platform_device =
platform_device_register_data(NULL,
- KBUILD_MODNAME "_leds", PLATFORM_DEVID_NONE,
+ pdevname, PLATFORM_DEVID_NONE,
&platform_data,
sizeof(struct simatic_ipc_platform));
if (IS_ERR(ipc_led_platform_device))
@@ -101,44 +104,6 @@ static int register_platform_devices(u32 station_id)
return 0;
}
-/* FIXME: this should eventually be done with generic P2SB discovery code
- * the individual drivers for watchdogs and LEDs access memory that implements
- * GPIO, but pinctrl will not come up because of missing ACPI entries
- *
- * While there is no conflict a cleaner solution would be to somehow bring up
- * pinctrl even with these ACPI entries missing, and base the drivers on pinctrl.
- * After which the following function could be dropped, together with the code
- * poking the memory.
- */
-/*
- * Get membase address from PCI, used in leds and wdt module. Here we read
- * the bar0. The final address calculation is done in the appropriate modules
- */
-u32 simatic_ipc_get_membase0(unsigned int p2sb)
-{
- struct pci_bus *bus;
- u32 bar0 = 0;
- /*
- * The GPIO memory is in bar0 of the hidden P2SB device.
- * Unhide the device to have a quick look at it, before we hide it
- * again.
- * Also grab the pci rescan lock so that device does not get discovered
- * and remapped while it is visible.
- * This code is inspired by drivers/mfd/lpc_ich.c
- */
- bus = pci_find_bus(0, 0);
- pci_lock_rescan_remove();
- pci_bus_write_config_byte(bus, p2sb, 0xE1, 0x0);
- pci_bus_read_config_dword(bus, p2sb, PCI_BASE_ADDRESS_0, &bar0);
-
- bar0 &= ~0xf;
- pci_bus_write_config_byte(bus, p2sb, 0xE1, 0x1);
- pci_unlock_rescan_remove();
-
- return bar0;
-}
-EXPORT_SYMBOL(simatic_ipc_get_membase0);
-
static int __init simatic_ipc_init_module(void)
{
const struct dmi_system_id *match;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index d8d0c0bed5e9..07ef05f727a2 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4341,7 +4341,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
{
struct acpi_resource_irq *p = &resource->data.irq;
struct sony_pic_irq *interrupt = NULL;
- if (!p || !p->interrupt_count) {
+ if (!p->interrupt_count) {
/*
* IRQ descriptors may have no IRQ# bits set,
* particularly those those w/ _STA disabled
@@ -4374,11 +4374,6 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
struct acpi_resource_io *io = &resource->data.io;
struct sony_pic_ioport *ioport =
list_first_entry(&dev->ioports, struct sony_pic_ioport, list);
- if (!io) {
- dprintk("Blank IO resource\n");
- return AE_OK;
- }
-
if (!ioport->io1.minimum) {
memcpy(&ioport->io1, io, sizeof(*io));
dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum,
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
index 7299ad08c838..958df41ad509 100644
--- a/drivers/platform/x86/system76_acpi.c
+++ b/drivers/platform/x86/system76_acpi.c
@@ -339,7 +339,7 @@ static ssize_t kb_led_color_show(
struct led_classdev *led;
struct system76_data *data;
- led = (struct led_classdev *)dev->driver_data;
+ led = dev_get_drvdata(dev);
data = container_of(led, struct system76_data, kb_led);
return sysfs_emit(buf, "%06X\n", data->kb_color);
}
@@ -356,7 +356,7 @@ static ssize_t kb_led_color_store(
unsigned int val;
int ret;
- led = (struct led_classdev *)dev->driver_data;
+ led = dev_get_drvdata(dev);
data = container_of(led, struct system76_data, kb_led);
ret = kstrtouint(buf, 16, &val);
if (ret)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 502dcd1c33b7..22d4e8633e30 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -34,46 +34,51 @@
* thanks to Chris Wright <chrisw@osdl.org>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/sched/signal.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/nvram.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/sysfs.h>
+#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/fb.h>
-#include <linux/platform_device.h>
+#include <linux/freezer.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
#include <linux/input.h>
-#include <linux/leds.h>
-#include <linux/rfkill.h>
-#include <linux/dmi.h>
#include <linux/jiffies.h>
-#include <linux/workqueue.h>
-#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/leds.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvram.h>
#include <linux/pci.h>
-#include <linux/power_supply.h>
+#include <linux/platform_device.h>
#include <linux/platform_profile.h>
-#include <sound/core.h>
-#include <sound/control.h>
-#include <sound/initval.h>
+#include <linux/power_supply.h>
+#include <linux/proc_fs.h>
+#include <linux/rfkill.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
#include <acpi/battery.h>
#include <acpi/video.h>
+
#include <drm/drm_privacy_screen_driver.h>
+
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+
#include "dual_accel_detect.h"
/* ThinkPad CMOS commands */
@@ -159,6 +164,7 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */
TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
TP_HKEY_EV_PRIVACYGUARD_TOGGLE = 0x130f, /* Toggle priv.guard on/off */
+ TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
/* Reasons for waking up from S3/S4 */
TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
@@ -257,8 +263,6 @@ enum tpacpi_hkey_event_t {
#define TPACPI_DBG_BRGHT 0x0020
#define TPACPI_DBG_MIXER 0x0040
-#define onoff(status, bit) ((status) & (1 << (bit)) ? "on" : "off")
-#define enabled(status, bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
#define strlencmp(a, b) (strncmp((a), (b), strlen(b)))
@@ -1312,9 +1316,7 @@ static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, struct seq_file *
return status;
}
- seq_printf(m, "status:\t\t%s\n",
- (status == TPACPI_RFK_RADIO_ON) ?
- "enabled" : "disabled");
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status == TPACPI_RFK_RADIO_ON));
seq_printf(m, "commands:\tenable, disable\n");
}
@@ -1341,8 +1343,7 @@ static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
if (status != -1) {
tpacpi_disclose_usertask("procfs", "attempt to %s %s\n",
- (status == TPACPI_RFK_RADIO_ON) ?
- "enable" : "disable",
+ str_enable_disable(status == TPACPI_RFK_RADIO_ON),
tpacpi_rfkill_names[id]);
res = (tpacpi_rfkill_switches[id]->ops->set_status)(status);
tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[id]);
@@ -3499,8 +3500,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
radiosw_state = !!status;
- pr_info("radio switch found; radios are %s\n",
- enabled(status, 0));
+ pr_info("radio switch found; radios are %s\n", str_enabled_disabled(status & BIT(0)));
}
tabletsw_state = hotkey_init_tablet_mode();
@@ -3735,6 +3735,7 @@ static bool hotkey_notify_extended_hotkey(const u32 hkey)
switch (hkey) {
case TP_HKEY_EV_PRIVACYGUARD_TOGGLE:
+ case TP_HKEY_EV_AMT_TOGGLE:
tpacpi_driver_event(hkey);
return true;
}
@@ -4159,7 +4160,7 @@ static int hotkey_read(struct seq_file *m)
if (res)
return res;
- seq_printf(m, "status:\t\t%s\n", enabled(status, 0));
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status & BIT(0)));
if (hotkey_all_mask) {
seq_printf(m, "mask:\t\t0x%08x\n", hotkey_user_mask);
seq_printf(m, "commands:\tenable, disable, reset, <mask>\n");
@@ -4292,9 +4293,8 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
{
int status;
- vdbg_printk(TPACPI_DBG_RFKILL,
- "will attempt to %s bluetooth\n",
- (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s bluetooth\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_bluetoothemul) {
@@ -4659,9 +4659,8 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
{
int status;
- vdbg_printk(TPACPI_DBG_RFKILL,
- "will attempt to %s wwan\n",
- (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s wwan\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wwanemul) {
@@ -4837,9 +4836,8 @@ static int uwb_set_status(enum tpacpi_rfkill_state state)
{
int status;
- vdbg_printk(TPACPI_DBG_RFKILL,
- "will attempt to %s UWB\n",
- (state == TPACPI_RFK_RADIO_ON) ? "enable" : "disable");
+ vdbg_printk(TPACPI_DBG_RFKILL, "will attempt to %s UWB\n",
+ str_enable_disable(state == TPACPI_RFK_RADIO_ON));
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_uwbemul) {
@@ -5193,11 +5191,11 @@ static int video_read(struct seq_file *m)
return autosw;
seq_printf(m, "status:\t\tsupported\n");
- seq_printf(m, "lcd:\t\t%s\n", enabled(status, 0));
- seq_printf(m, "crt:\t\t%s\n", enabled(status, 1));
+ seq_printf(m, "lcd:\t\t%s\n", str_enabled_disabled(status & BIT(0)));
+ seq_printf(m, "crt:\t\t%s\n", str_enabled_disabled(status & BIT(1)));
if (video_supported == TPACPI_VIDEO_NEW)
- seq_printf(m, "dvi:\t\t%s\n", enabled(status, 3));
- seq_printf(m, "auto:\t\t%s\n", enabled(autosw, 0));
+ seq_printf(m, "dvi:\t\t%s\n", str_enabled_disabled(status & BIT(3)));
+ seq_printf(m, "auto:\t\t%s\n", str_enabled_disabled(autosw & BIT(0)));
seq_printf(m, "commands:\tlcd_enable, lcd_disable\n");
seq_printf(m, "commands:\tcrt_enable, crt_disable\n");
if (video_supported == TPACPI_VIDEO_NEW)
@@ -5628,7 +5626,7 @@ static int light_read(struct seq_file *m)
status = light_get_status();
if (status < 0)
return status;
- seq_printf(m, "status:\t\t%s\n", onoff(status, 0));
+ seq_printf(m, "status:\t\t%s\n", str_on_off(status & BIT(0)));
seq_printf(m, "commands:\ton, off\n");
}
@@ -6084,9 +6082,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
return 0;
}
-#define str_led_status(s) \
- ((s) == TPACPI_LED_OFF ? "off" : \
- ((s) == TPACPI_LED_ON ? "on" : "blinking"))
+#define str_led_status(s) ((s) >= TPACPI_LED_BLINK ? "blinking" : str_on_off(s))
static int led_read(struct seq_file *m)
{
@@ -6103,8 +6099,7 @@ static int led_read(struct seq_file *m)
status = led_get_status(i);
if (status < 0)
return -EIO;
- seq_printf(m, "%d:\t\t%s\n",
- i, str_led_status(status));
+ seq_printf(m, "%d:\t\t%s\n", i, str_led_status(status));
}
}
@@ -6797,10 +6792,7 @@ static int brightness_set(unsigned int value)
static int brightness_update_status(struct backlight_device *bd)
{
- unsigned int level =
- (bd->props.fb_blank == FB_BLANK_UNBLANK &&
- bd->props.power == FB_BLANK_UNBLANK) ?
- bd->props.brightness : 0;
+ int level = backlight_get_brightness(bd);
dbg_printk(TPACPI_DBG_BRGHT,
"backlight: attempt to set level to %d\n",
@@ -7830,8 +7822,7 @@ static int volume_read(struct seq_file *m)
seq_printf(m, "level:\t\t%d\n",
status & TP_EC_AUDIO_LVL_MSK);
- seq_printf(m, "mute:\t\t%s\n",
- onoff(status, TP_EC_AUDIO_MUTESW));
+ seq_printf(m, "mute:\t\t%s\n", str_on_off(status & BIT(TP_EC_AUDIO_MUTESW)));
if (volume_control_allowed) {
seq_printf(m, "commands:\tunmute, mute\n");
@@ -9060,7 +9051,7 @@ static int fan_read(struct seq_file *m)
seq_printf(m, "status:\t\t%s\n"
"level:\t\t%d\n",
- (status != 0) ? "enabled" : "disabled", status);
+ str_enabled_disabled(status), status);
break;
case TPACPI_FAN_RD_TPEC:
@@ -9069,8 +9060,7 @@ static int fan_read(struct seq_file *m)
if (rc)
return rc;
- seq_printf(m, "status:\t\t%s\n",
- (status != 0) ? "enabled" : "disabled");
+ seq_printf(m, "status:\t\t%s\n", str_enabled_disabled(status));
rc = fan_get_speed(&speed);
if (rc < 0)
@@ -10268,6 +10258,7 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_CMD_FUNC_CAP 3 /* To get DYTC capabilities */
#define DYTC_FC_MMC 27 /* MMC Mode supported */
#define DYTC_FC_PSC 29 /* PSC Mode supported */
+#define DYTC_FC_AMT 31 /* AMT mode supported */
#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
#define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */
@@ -10280,6 +10271,10 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_FUNCTION_CQL 1 /* Function = 1, lap mode */
#define DYTC_FUNCTION_MMC 11 /* Function = 11, MMC mode */
#define DYTC_FUNCTION_PSC 13 /* Function = 13, PSC mode */
+#define DYTC_FUNCTION_AMT 15 /* Function = 15, AMT mode */
+
+#define DYTC_MODE_AMT_ENABLE 0x1 /* Enable AMT (in balanced mode) */
+#define DYTC_MODE_AMT_DISABLE 0xF /* Disable AMT (in other modes) */
#define DYTC_MODE_MMC_PERFORM 2 /* High power mode aka performance */
#define DYTC_MODE_MMC_LOWPOWER 3 /* Low power mode */
@@ -10300,6 +10295,8 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_DISABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 0)
#define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 1)
+static int dytc_control_amt(bool enable);
+static bool dytc_amt_active;
static enum platform_profile_option dytc_current_profile;
static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
@@ -10382,6 +10379,30 @@ static int dytc_profile_get(struct platform_profile_handler *pprof,
return 0;
}
+static int dytc_control_amt(bool enable)
+{
+ int dummy;
+ int err;
+ int cmd;
+
+ if (!(dytc_capabilities & BIT(DYTC_FC_AMT))) {
+ pr_warn("Attempting to toggle AMT on a system that doesn't advertise support\n");
+ return -ENODEV;
+ }
+
+ if (enable)
+ cmd = DYTC_SET_COMMAND(DYTC_FUNCTION_AMT, DYTC_MODE_AMT_ENABLE, enable);
+ else
+ cmd = DYTC_SET_COMMAND(DYTC_FUNCTION_AMT, DYTC_MODE_AMT_DISABLE, enable);
+
+ pr_debug("%sabling AMT (cmd 0x%x)", enable ? "en":"dis", cmd);
+ err = dytc_command(cmd, &dummy);
+ if (err)
+ return err;
+ dytc_amt_active = enable;
+ return 0;
+}
+
/*
* Helper function - check if we are in CQL mode and if we are
* - disable CQL,
@@ -10464,6 +10485,9 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
if (err)
goto unlock;
+ /* system supports AMT, activate it when on balanced */
+ if (dytc_capabilities & BIT(DYTC_FC_AMT))
+ dytc_control_amt(profile == PLATFORM_PROFILE_BALANCED);
}
/* Success - update current profile */
dytc_current_profile = profile;
@@ -10568,6 +10592,11 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
/* Ensure initial values are correct */
dytc_profile_refresh();
+ /* Set AMT correctly now we know current profile */
+ if ((dytc_capabilities & BIT(DYTC_FC_PSC)) &&
+ (dytc_capabilities & BIT(DYTC_FC_AMT)))
+ dytc_control_amt(dytc_current_profile == PLATFORM_PROFILE_BALANCED);
+
return 0;
}
@@ -11011,6 +11040,15 @@ static void tpacpi_driver_event(const unsigned int hkey_event)
if (changed)
drm_privacy_screen_call_notifier_chain(lcdshadow_dev);
}
+ if (hkey_event == TP_HKEY_EV_AMT_TOGGLE) {
+ /* If we're enabling AMT we need to force balanced mode */
+ if (!dytc_amt_active)
+ /* This will also set AMT mode enabled */
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
+ else
+ dytc_control_amt(!dytc_amt_active);
+ }
+
}
static void hotkey_driver_event(const unsigned int scancode)
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 2fa0f7d55259..8f7695624c8c 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -17,6 +17,7 @@
#include <asm/dma.h>
#include <asm/irq.h>
#include <linux/pci.h>
+#include <linux/libata.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -322,8 +323,8 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
* treat the compatibility IRQs as busy.
*/
if ((progif & 0x5) != 0x5)
- if (pci_get_legacy_ide_irq(pci, 0) == irq ||
- pci_get_legacy_ide_irq(pci, 1) == irq) {
+ if (ATA_PRIMARY_IRQ(pci) == irq ||
+ ATA_SECONDARY_IRQ(pci) == irq) {
pnp_dbg(&pnp->dev, " legacy IDE device %s "
"using irq %d\n", pci_name(pci), irq);
return 1;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 4b563db3ab3e..a8c46ba5878f 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -297,4 +297,10 @@ config NVMEM_REBOOT_MODE
then the bootloader can read it and take different
action according to the mode.
+config POWER_MLXBF
+ tristate "Mellanox BlueField power handling driver"
+ depends on (GPIO_MLXBF2 && ACPI)
+ help
+ This driver supports reset or low power mode handling for Mellanox BlueField.
+
endif
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index f606a2f60539..0a39424fc558 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -35,3 +35,4 @@ obj-$(CONFIG_REBOOT_MODE) += reboot-mode.o
obj-$(CONFIG_SYSCON_REBOOT_MODE) += syscon-reboot-mode.o
obj-$(CONFIG_POWER_RESET_SC27XX) += sc27xx-poweroff.o
obj-$(CONFIG_NVMEM_REBOOT_MODE) += nvmem-reboot-mode.o
+obj-$(CONFIG_POWER_MLXBF) += pwr-mlxbf.o
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 64def79d557a..741e44a017c3 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -17,10 +17,13 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
+#include <linux/reset-controller.h>
#include <soc/at91/at91sam9_ddrsdr.h>
#include <soc/at91/at91sam9_sdramc.h>
+#include <dt-bindings/reset/sama7g5-reset.h>
+
#define AT91_RSTC_CR 0x00 /* Reset Controller Control Register */
#define AT91_RSTC_PROCRST BIT(0) /* Processor Reset */
#define AT91_RSTC_PERRST BIT(2) /* Peripheral Reset */
@@ -39,6 +42,17 @@
#define AT91_RSTC_URSTIEN BIT(4) /* User Reset Interrupt Enable */
#define AT91_RSTC_ERSTL GENMASK(11, 8) /* External Reset Length */
+/**
+ * enum reset_type - reset types
+ * @RESET_TYPE_GENERAL: first power-up reset
+ * @RESET_TYPE_WAKEUP: return from backup mode
+ * @RESET_TYPE_WATCHDOG: watchdog fault
+ * @RESET_TYPE_SOFTWARE: processor reset required by software
+ * @RESET_TYPE_USER: NRST pin detected low
+ * @RESET_TYPE_CPU_FAIL: CPU clock failure detection
+ * @RESET_TYPE_XTAL_FAIL: 32KHz crystal failure dectection fault
+ * @RESET_TYPE_ULP2: ULP2 reset
+ */
enum reset_type {
RESET_TYPE_GENERAL = 0,
RESET_TYPE_WAKEUP = 1,
@@ -50,15 +64,48 @@ enum reset_type {
RESET_TYPE_ULP2 = 8,
};
+/**
+ * struct at91_reset - AT91 reset specific data structure
+ * @rstc_base: base address for system reset
+ * @ramc_base: array with base addresses of RAM controllers
+ * @dev_base: base address for devices reset
+ * @sclk: slow clock
+ * @data: platform specific reset data
+ * @rcdev: reset controller device
+ * @lock: lock for devices reset register access
+ * @nb: reset notifier block
+ * @args: SoC specific system reset arguments
+ * @ramc_lpr: SDRAM Controller Low Power Register
+ */
struct at91_reset {
void __iomem *rstc_base;
void __iomem *ramc_base[2];
+ void __iomem *dev_base;
struct clk *sclk;
+ const struct at91_reset_data *data;
+ struct reset_controller_dev rcdev;
+ spinlock_t lock;
struct notifier_block nb;
u32 args;
u32 ramc_lpr;
};
+#define to_at91_reset(r) container_of(r, struct at91_reset, rcdev)
+
+/**
+ * struct at91_reset_data - AT91 reset data
+ * @reset_args: SoC specific system reset arguments
+ * @n_device_reset: number of device resets
+ * @device_reset_min_id: min id for device reset
+ * @device_reset_max_id: max id for device reset
+ */
+struct at91_reset_data {
+ u32 reset_args;
+ u32 n_device_reset;
+ u8 device_reset_min_id;
+ u8 device_reset_max_id;
+};
+
/*
* unless the SDRAM is cleanly shutdown before we hit the
* reset register it can be left driving the data bus and
@@ -95,7 +142,7 @@ static int at91_reset(struct notifier_block *this, unsigned long mode,
"r" (reset->rstc_base),
"r" (1),
"r" cpu_to_le32(AT91_DDRSDRC_LPCB_POWER_DOWN),
- "r" (reset->args),
+ "r" (reset->data->reset_args),
"r" (reset->ramc_lpr)
: "r4");
@@ -153,34 +200,133 @@ static const struct of_device_id at91_ramc_of_match[] = {
{ /* sentinel */ }
};
+static const struct at91_reset_data sam9260 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST,
+};
+
+static const struct at91_reset_data samx7 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PROCRST,
+};
+
+static const struct at91_reset_data sama7g5 = {
+ .reset_args = AT91_RSTC_KEY | AT91_RSTC_PROCRST,
+ .n_device_reset = 3,
+ .device_reset_min_id = SAMA7G5_RESET_USB_PHY1,
+ .device_reset_max_id = SAMA7G5_RESET_USB_PHY3,
+};
+
static const struct of_device_id at91_reset_of_match[] = {
{
.compatible = "atmel,at91sam9260-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST),
+ .data = &sam9260,
},
{
.compatible = "atmel,at91sam9g45-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST)
+ .data = &sam9260,
},
{
.compatible = "atmel,sama5d3-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
- AT91_RSTC_PROCRST)
+ .data = &sam9260,
},
{
.compatible = "atmel,samx7-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ .data = &samx7,
},
{
.compatible = "microchip,sam9x60-rstc",
- .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ .data = &samx7,
+ },
+ {
+ .compatible = "microchip,sama7g5-rstc",
+ .data = &sama7g5,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_reset_of_match);
+static int at91_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&reset->lock, flags);
+ val = readl_relaxed(reset->dev_base);
+ if (assert)
+ val |= BIT(id);
+ else
+ val &= ~BIT(id);
+ writel_relaxed(val, reset->dev_base);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ return 0;
+}
+
+static int at91_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return at91_reset_update(rcdev, id, true);
+}
+
+static int at91_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return at91_reset_update(rcdev, id, false);
+}
+
+static int at91_reset_dev_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+ u32 val;
+
+ val = readl_relaxed(reset->dev_base);
+
+ return !!(val & BIT(id));
+}
+
+static const struct reset_control_ops at91_reset_ops = {
+ .assert = at91_reset_assert,
+ .deassert = at91_reset_deassert,
+ .status = at91_reset_dev_status,
+};
+
+static int at91_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct at91_reset *reset = to_at91_reset(rcdev);
+
+ if (!reset->data->n_device_reset ||
+ (reset_spec->args[0] < reset->data->device_reset_min_id ||
+ reset_spec->args[0] > reset->data->device_reset_max_id))
+ return -EINVAL;
+
+ return reset_spec->args[0];
+}
+
+static int at91_rcdev_init(struct at91_reset *reset,
+ struct platform_device *pdev)
+{
+ if (!reset->data->n_device_reset)
+ return 0;
+
+ reset->dev_base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 1,
+ NULL);
+ if (IS_ERR(reset->dev_base))
+ return -ENODEV;
+
+ spin_lock_init(&reset->lock);
+ reset->rcdev.ops = &at91_reset_ops;
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.of_node = pdev->dev.of_node;
+ reset->rcdev.nr_resets = reset->data->n_device_reset;
+ reset->rcdev.of_reset_n_cells = 1;
+ reset->rcdev.of_xlate = at91_reset_of_xlate;
+
+ return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+}
+
static int __init at91_reset_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -212,10 +358,12 @@ static int __init at91_reset_probe(struct platform_device *pdev)
}
}
- match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
+ reset->data = device_get_match_data(&pdev->dev);
+ if (!reset->data)
+ return -ENODEV;
+
reset->nb.notifier_call = at91_reset;
reset->nb.priority = 192;
- reset->args = (u32)match->data;
reset->sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(reset->sclk))
@@ -229,6 +377,10 @@ static int __init at91_reset_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, reset);
+ ret = at91_rcdev_init(reset, pdev);
+ if (ret)
+ goto disable_clk;
+
if (of_device_is_compatible(pdev->dev.of_node, "microchip,sam9x60-rstc")) {
u32 val = readl(reset->rstc_base + AT91_RSTC_MR);
@@ -237,14 +389,16 @@ static int __init at91_reset_probe(struct platform_device *pdev)
}
ret = register_restart_handler(&reset->nb);
- if (ret) {
- clk_disable_unprepare(reset->sclk);
- return ret;
- }
+ if (ret)
+ goto disable_clk;
at91_reset_status(pdev, reset->rstc_base);
return 0;
+
+disable_clk:
+ clk_disable_unprepare(reset->sclk);
+ return ret;
}
static int __exit at91_reset_remove(struct platform_device *pdev)
diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c
new file mode 100644
index 000000000000..12dedf841a44
--- /dev/null
+++ b/drivers/power/reset/pwr-mlxbf.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only or BSD-3-Clause
+
+/*
+ * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reboot.h>
+#include <linux/types.h>
+
+struct pwr_mlxbf {
+ struct work_struct send_work;
+ const char *hid;
+};
+
+static void pwr_mlxbf_send_work(struct work_struct *work)
+{
+ acpi_bus_generate_netlink_event("button/power.*", "Power Button", 0x80, 1);
+}
+
+static irqreturn_t pwr_mlxbf_irq(int irq, void *ptr)
+{
+ const char *rst_pwr_hid = "MLNXBF24";
+ const char *low_pwr_hid = "MLNXBF29";
+ struct pwr_mlxbf *priv = ptr;
+
+ if (!strncmp(priv->hid, rst_pwr_hid, 8))
+ emergency_restart();
+
+ if (!strncmp(priv->hid, low_pwr_hid, 8))
+ schedule_work(&priv->send_work);
+
+ return IRQ_HANDLED;
+}
+
+static int pwr_mlxbf_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev;
+ struct pwr_mlxbf *priv;
+ const char *hid;
+ int irq, err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENXIO;
+
+ hid = acpi_device_hid(adev);
+ priv->hid = hid;
+
+ irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Error getting %s irq.\n", priv->hid);
+
+ err = devm_work_autocancel(dev, &priv->send_work, pwr_mlxbf_send_work);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev, irq, pwr_mlxbf_irq, 0, hid, priv);
+ if (err)
+ dev_err(dev, "Failed request of %s irq\n", priv->hid);
+
+ return err;
+}
+
+static const struct acpi_device_id __maybe_unused pwr_mlxbf_acpi_match[] = {
+ { "MLNXBF24", 0 },
+ { "MLNXBF29", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, pwr_mlxbf_acpi_match);
+
+static struct platform_driver pwr_mlxbf_driver = {
+ .driver = {
+ .name = "pwr_mlxbf",
+ .acpi_match_table = pwr_mlxbf_acpi_match,
+ },
+ .probe = pwr_mlxbf_probe,
+};
+
+module_platform_driver(pwr_mlxbf_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField power driver");
+MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/power/supply/ab8500-chargalg.h b/drivers/power/supply/ab8500-chargalg.h
index f47a0061c36a..8534d067ba95 100644
--- a/drivers/power/supply/ab8500-chargalg.h
+++ b/drivers/power/supply/ab8500-chargalg.h
@@ -34,7 +34,6 @@ struct ux500_charger_ops {
* @max_out_volt_uv maximum output charger voltage in uV
* @max_out_curr_ua maximum output charger current in uA
* @enabled indicates if this charger is used or not
- * @external external charger unit (pm2xxx)
*/
struct ux500_charger {
struct power_supply *psy;
@@ -43,9 +42,6 @@ struct ux500_charger {
int max_out_curr_ua;
int wdt_refresh;
bool enabled;
- bool external;
};
-extern struct blocking_notifier_head charger_notifier_list;
-
#endif /* _AB8500_CHARGALG_H_ */
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index b7e842dff567..863fabe05bdc 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -697,7 +697,6 @@ static void ab8500_btemp_unbind(struct device *dev, struct device *master,
/* Delete the work queue */
destroy_workqueue(di->btemp_wq);
- flush_scheduled_work();
}
static const struct component_ops ab8500_btemp_component_ops = {
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 431bbc352d1b..ae4be553f424 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -246,9 +246,6 @@ struct ab8500_chargalg {
struct kobject chargalg_kobject;
};
-/*External charger prepare notifier*/
-BLOCKING_NOTIFIER_HEAD(charger_notifier_list);
-
/* Main battery properties */
static enum power_supply_property ab8500_chargalg_props[] = {
POWER_SUPPLY_PROP_STATUS,
@@ -343,8 +340,7 @@ static int ab8500_chargalg_check_charger_enable(struct ab8500_chargalg *di)
return di->usb_chg->ops.check_enable(di->usb_chg,
bi->constant_charge_voltage_max_uv,
bi->constant_charge_current_max_ua);
- } else if ((di->chg_info.charger_type & AC_CHG) &&
- !(di->ac_chg->external)) {
+ } else if (di->chg_info.charger_type & AC_CHG) {
return di->ac_chg->ops.check_enable(di->ac_chg,
bi->constant_charge_voltage_max_uv,
bi->constant_charge_current_max_ua);
@@ -473,15 +469,6 @@ static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
/* Check if charger exists and kick watchdog if charging */
if (di->ac_chg && di->ac_chg->ops.kick_wd &&
di->chg_info.online_chg & AC_CHG) {
- /*
- * If AB charger watchdog expired, pm2xxx charging
- * gets disabled. To be safe, kick both AB charger watchdog
- * and pm2xxx watchdog.
- */
- if (di->ac_chg->external &&
- di->usb_chg && di->usb_chg->ops.kick_wd)
- di->usb_chg->ops.kick_wd(di->usb_chg);
-
return di->ac_chg->ops.kick_wd(di->ac_chg);
} else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
di->chg_info.online_chg & USB_CHG)
@@ -517,14 +504,6 @@ static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
di->chg_info.ac_iset_ua = iset_ua;
di->chg_info.ac_vset_uv = vset_uv;
- /* Enable external charger */
- if (enable && di->ac_chg->external &&
- !ab8500_chargalg_ex_ac_enable_toggle) {
- blocking_notifier_call_chain(&charger_notifier_list,
- 0, di->dev);
- ab8500_chargalg_ex_ac_enable_toggle++;
- }
-
return di->ac_chg->ops.enable(di->ac_chg, enable, vset_uv, iset_ua);
}
@@ -1217,6 +1196,34 @@ static void ab8500_chargalg_external_power_changed(struct power_supply *psy)
}
/**
+ * ab8500_chargalg_time_to_restart() - time to restart CC/CV charging?
+ * @di: charging algorithm state
+ *
+ * This checks if the voltage or capacity of the battery has fallen so
+ * low that we need to restart the CC/CV charge cycle.
+ */
+static bool ab8500_chargalg_time_to_restart(struct ab8500_chargalg *di)
+{
+ struct power_supply_battery_info *bi = di->bm->bi;
+
+ /* Sanity check - these need to have some reasonable values */
+ if (!di->batt_data.volt_uv || !di->batt_data.percent)
+ return false;
+
+ /* Some batteries tell us at which voltage we should restart charging */
+ if (bi->charge_restart_voltage_uv > 0) {
+ if (di->batt_data.volt_uv <= bi->charge_restart_voltage_uv)
+ return true;
+ /* Else we restart as we reach a certain capacity */
+ } else {
+ if (di->batt_data.percent <= AB8500_RECHARGE_CAP)
+ return true;
+ }
+
+ return false;
+}
+
+/**
* ab8500_chargalg_algorithm() - Main function for the algorithm
* @di: pointer to the ab8500_chargalg structure
*
@@ -1459,7 +1466,7 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
fallthrough;
case STATE_WAIT_FOR_RECHARGE:
- if (di->batt_data.percent <= AB8500_RECHARGE_CAP)
+ if (ab8500_chargalg_time_to_restart(di))
ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
break;
@@ -1486,6 +1493,14 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
ab8500_chargalg_stop_maintenance_timer(di);
ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
}
+ /*
+ * This happens if the voltage drops too quickly during
+ * maintenance charging, especially in older batteries.
+ */
+ if (ab8500_chargalg_time_to_restart(di)) {
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ dev_info(di->dev, "restarted charging from maintenance state A - battery getting old?\n");
+ }
break;
case STATE_MAINTENANCE_B_INIT:
@@ -1510,6 +1525,14 @@ static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
ab8500_chargalg_stop_maintenance_timer(di);
ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
}
+ /*
+ * This happens if the voltage drops too quickly during
+ * maintenance charging, especially in older batteries.
+ */
+ if (ab8500_chargalg_time_to_restart(di)) {
+ ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
+ dev_info(di->dev, "restarted charging from maintenance state B - battery getting old?\n");
+ }
break;
case STATE_TEMP_LOWHIGH_INIT:
@@ -1746,7 +1769,6 @@ static void ab8500_chargalg_unbind(struct device *dev, struct device *master,
/* Delete the work queue */
destroy_workqueue(di->chargalg_wq);
- flush_scheduled_work();
}
static const struct component_ops ab8500_chargalg_component_ops = {
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index d04d087caa50..c19c50442761 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -1716,29 +1716,6 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
return ret;
}
-static int ab8500_external_charger_prepare(struct notifier_block *charger_nb,
- unsigned long event, void *data)
-{
- int ret;
- struct device *dev = data;
- /*Toggle External charger control pin*/
- ret = abx500_set_register_interruptible(dev, AB8500_SYS_CTRL1_BLOCK,
- AB8500_SYS_CHARGER_CONTROL_REG,
- EXTERNAL_CHARGER_DISABLE_REG_VAL);
- if (ret < 0) {
- dev_err(dev, "write reg failed %d\n", ret);
- goto out;
- }
- ret = abx500_set_register_interruptible(dev, AB8500_SYS_CTRL1_BLOCK,
- AB8500_SYS_CHARGER_CONTROL_REG,
- EXTERNAL_CHARGER_ENABLE_REG_VAL);
- if (ret < 0)
- dev_err(dev, "Write reg failed %d\n", ret);
-
-out:
- return ret;
-}
-
/**
* ab8500_charger_usb_check_enable() - enable usb charging
* @charger: pointer to the ux500_charger structure
@@ -3316,10 +3293,6 @@ static int __maybe_unused ab8500_charger_suspend(struct device *dev)
return 0;
}
-static struct notifier_block charger_nb = {
- .notifier_call = ab8500_external_charger_prepare,
-};
-
static char *supply_interface[] = {
"ab8500_chargalg",
"ab8500_fg",
@@ -3378,6 +3351,7 @@ static int ab8500_charger_bind(struct device *dev)
ret = component_bind_all(dev, di);
if (ret) {
dev_err(dev, "can't bind component devices\n");
+ destroy_workqueue(di->charger_wq);
return ret;
}
@@ -3404,8 +3378,6 @@ static void ab8500_charger_unbind(struct device *dev)
/* Delete the work queue */
destroy_workqueue(di->charger_wq);
- flush_scheduled_work();
-
/* Unbind fg, btemp, algorithm */
component_unbind_all(dev, di);
}
@@ -3540,7 +3512,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
*/
if (!is_ab8505(di->parent))
di->ac_chg.enabled = true;
- di->ac_chg.external = false;
/* USB supply */
/* ux500_charger sub-class */
@@ -3553,7 +3524,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_chg.max_out_curr_ua =
ab8500_charge_output_curr_map[ARRAY_SIZE(ab8500_charge_output_curr_map) - 1];
di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
- di->usb_chg.external = false;
di->usb_state.usb_current_ua = -1;
mutex_init(&di->charger_attached_mutex);
@@ -3677,17 +3647,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
goto remove_ab8500_bm;
}
- /* Notifier for external charger enabling */
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_register(
- &charger_notifier_list, &charger_nb);
-
-
di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(di->usb_phy)) {
dev_err(dev, "failed to get usb transceiver\n");
ret = -EINVAL;
- goto out_charger_notifier;
+ goto remove_ab8500_bm;
}
di->nb.notifier_call = ab8500_charger_usb_notifier_call;
ret = usb_register_notifier(di->usb_phy, &di->nb);
@@ -3696,7 +3660,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
goto put_usb_phy;
}
-
ret = component_master_add_with_match(&pdev->dev,
&ab8500_charger_comp_ops,
match);
@@ -3711,10 +3674,6 @@ free_notifier:
usb_unregister_notifier(di->usb_phy, &di->nb);
put_usb_phy:
usb_put_phy(di->usb_phy);
-out_charger_notifier:
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_unregister(
- &charger_notifier_list, &charger_nb);
remove_ab8500_bm:
ab8500_bm_of_remove(di->usb_chg.psy, di->bm);
return ret;
@@ -3729,9 +3688,6 @@ static int ab8500_charger_remove(struct platform_device *pdev)
usb_unregister_notifier(di->usb_phy, &di->nb);
ab8500_bm_of_remove(di->usb_chg.psy, di->bm);
usb_put_phy(di->usb_phy);
- if (!di->ac_chg.enabled)
- blocking_notifier_chain_unregister(
- &charger_notifier_list, &charger_nb);
return 0;
}
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 4339fa9ff009..c6c9804280db 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -412,7 +412,7 @@ static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
* ab8500_fg_clear_cap_samples() - Clear average filter
* @di: pointer to the ab8500_fg structure
*
- * The capacity filter is is reset to zero.
+ * The capacity filter is reset to zero.
*/
static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
{
@@ -3234,7 +3234,6 @@ static int ab8500_fg_remove(struct platform_device *pdev)
struct ab8500_fg *di = platform_get_drvdata(pdev);
destroy_workqueue(di->fg_wq);
- flush_scheduled_work();
component_del(&pdev->dev, &ab8500_fg_component_ops);
list_del(&di->node);
ab8500_fg_sysfs_exit(di);
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index 96cb3290bcaa..ecba9ab86faf 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -287,7 +287,7 @@ static int bq24257_set_input_current_limit(struct bq24257_device *bq,
{
/*
* Address the case where the user manually sets an input current limit
- * while the charger auto-detection mechanism is is active. In this
+ * while the charger auto-detection mechanism is active. In this
* case we want to abort and go straight to the user-specified value.
*/
if (bq->iilimit_autoset_enable)
diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c
index 9fe6d826148d..1379afd9698d 100644
--- a/drivers/power/supply/cros_peripheral_charger.c
+++ b/drivers/power/supply/cros_peripheral_charger.c
@@ -63,7 +63,7 @@ static int cros_pchg_ec_command(const struct charger_data *charger,
struct cros_ec_command *msg;
int ret;
- msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ msg = kzalloc(struct_size(msg, data, max(outsize, insize)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c
index bf1754355c9f..a58d713d75ce 100644
--- a/drivers/power/supply/goldfish_battery.c
+++ b/drivers/power/supply/goldfish_battery.c
@@ -221,10 +221,8 @@ static int goldfish_battery_probe(struct platform_device *pdev)
}
data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
+ if (data->irq < 0)
return -ENODEV;
- }
ret = devm_request_irq(&pdev->dev, data->irq,
goldfish_battery_interrupt,
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 397e5a03b7d9..56c57529c228 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -376,7 +376,7 @@ static int lp8788_update_charger_params(struct platform_device *pdev,
return 0;
}
- /* settting charging parameters */
+ /* setting charging parameters */
for (i = 0; i < pdata->num_chg_params; i++) {
param = pdata->chg_params + i;
diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
index 8b6c8cfa7503..4fed74511931 100644
--- a/drivers/power/supply/max77976_charger.c
+++ b/drivers/power/supply/max77976_charger.c
@@ -3,7 +3,7 @@
* max77976_charger.c - Driver for the Maxim MAX77976 battery charger
*
* Copyright (C) 2021 Luca Ceresoli
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/i2c.h>
@@ -504,6 +504,6 @@ static struct i2c_driver max77976_driver = {
};
module_i2c_driver(max77976_driver);
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_DESCRIPTION("Maxim MAX77976 charger driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index e0476ec06601..a5da20ffd685 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -635,6 +635,7 @@ static int olpc_battery_probe(struct platform_device *pdev)
struct power_supply_config bat_psy_cfg = {};
struct power_supply_config ac_psy_cfg = {};
struct olpc_battery_data *data;
+ struct device_node *np;
uint8_t status;
uint8_t ecver;
int ret;
@@ -649,7 +650,9 @@ static int olpc_battery_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec")) {
+ np = of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec");
+ if (np) {
+ of_node_put(np);
/* XO 1.75 */
data->new_proto = true;
data->little_endian = true;
diff --git a/drivers/power/supply/pm2301_charger.h b/drivers/power/supply/pm2301_charger.h
deleted file mode 100644
index 74397e377982..000000000000
--- a/drivers/power/supply/pm2301_charger.h
+++ /dev/null
@@ -1,492 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2012
- *
- * PM2301 power supply interface
- */
-
-#ifndef PM2301_CHARGER_H
-#define PM2301_CHARGER_H
-
-/* Watchdog timeout constant */
-#define WD_TIMER 0x30 /* 4min */
-#define WD_KICK_INTERVAL (30 * HZ)
-
-#define PM2XXX_NUM_INT_REG 0x6
-
-/* Constant voltage/current */
-#define PM2XXX_CONST_CURR 0x0
-#define PM2XXX_CONST_VOLT 0x1
-
-/* Lowest charger voltage is 3.39V -> 0x4E */
-#define LOW_VOLT_REG 0x4E
-
-#define PM2XXX_BATT_CTRL_REG1 0x00
-#define PM2XXX_BATT_CTRL_REG2 0x01
-#define PM2XXX_BATT_CTRL_REG3 0x02
-#define PM2XXX_BATT_CTRL_REG4 0x03
-#define PM2XXX_BATT_CTRL_REG5 0x04
-#define PM2XXX_BATT_CTRL_REG6 0x05
-#define PM2XXX_BATT_CTRL_REG7 0x06
-#define PM2XXX_BATT_CTRL_REG8 0x07
-#define PM2XXX_NTC_CTRL_REG1 0x08
-#define PM2XXX_NTC_CTRL_REG2 0x09
-#define PM2XXX_BATT_CTRL_REG9 0x0A
-#define PM2XXX_BATT_STAT_REG1 0x0B
-#define PM2XXX_INP_VOLT_VPWR2 0x11
-#define PM2XXX_INP_DROP_VPWR2 0x13
-#define PM2XXX_INP_VOLT_VPWR1 0x15
-#define PM2XXX_INP_DROP_VPWR1 0x17
-#define PM2XXX_INP_MODE_VPWR 0x18
-#define PM2XXX_BATT_WD_KICK 0x70
-#define PM2XXX_DEV_VER_STAT 0x0C
-#define PM2XXX_THERM_WARN_CTRL_REG 0x20
-#define PM2XXX_BATT_DISC_REG 0x21
-#define PM2XXX_BATT_LOW_LEV_COMP_REG 0x22
-#define PM2XXX_BATT_LOW_LEV_VAL_REG 0x23
-#define PM2XXX_I2C_PAD_CTRL_REG 0x24
-#define PM2XXX_SW_CTRL_REG 0x26
-#define PM2XXX_LED_CTRL_REG 0x28
-
-#define PM2XXX_REG_INT1 0x40
-#define PM2XXX_MASK_REG_INT1 0x50
-#define PM2XXX_SRCE_REG_INT1 0x60
-#define PM2XXX_REG_INT2 0x41
-#define PM2XXX_MASK_REG_INT2 0x51
-#define PM2XXX_SRCE_REG_INT2 0x61
-#define PM2XXX_REG_INT3 0x42
-#define PM2XXX_MASK_REG_INT3 0x52
-#define PM2XXX_SRCE_REG_INT3 0x62
-#define PM2XXX_REG_INT4 0x43
-#define PM2XXX_MASK_REG_INT4 0x53
-#define PM2XXX_SRCE_REG_INT4 0x63
-#define PM2XXX_REG_INT5 0x44
-#define PM2XXX_MASK_REG_INT5 0x54
-#define PM2XXX_SRCE_REG_INT5 0x64
-#define PM2XXX_REG_INT6 0x45
-#define PM2XXX_MASK_REG_INT6 0x55
-#define PM2XXX_SRCE_REG_INT6 0x65
-
-#define VPWR_OVV 0x0
-#define VSYSTEM_OVV 0x1
-
-/* control Reg 1 */
-#define PM2XXX_CH_RESUME_EN 0x1
-#define PM2XXX_CH_RESUME_DIS 0x0
-
-/* control Reg 2 */
-#define PM2XXX_CH_AUTO_RESUME_EN 0X2
-#define PM2XXX_CH_AUTO_RESUME_DIS 0X0
-#define PM2XXX_CHARGER_ENA 0x4
-#define PM2XXX_CHARGER_DIS 0x0
-
-/* control Reg 3 */
-#define PM2XXX_CH_WD_CC_PHASE_OFF 0x0
-#define PM2XXX_CH_WD_CC_PHASE_5MIN 0x1
-#define PM2XXX_CH_WD_CC_PHASE_10MIN 0x2
-#define PM2XXX_CH_WD_CC_PHASE_30MIN 0x3
-#define PM2XXX_CH_WD_CC_PHASE_60MIN 0x4
-#define PM2XXX_CH_WD_CC_PHASE_120MIN 0x5
-#define PM2XXX_CH_WD_CC_PHASE_240MIN 0x6
-#define PM2XXX_CH_WD_CC_PHASE_360MIN 0x7
-
-#define PM2XXX_CH_WD_CV_PHASE_OFF (0x0<<3)
-#define PM2XXX_CH_WD_CV_PHASE_5MIN (0x1<<3)
-#define PM2XXX_CH_WD_CV_PHASE_10MIN (0x2<<3)
-#define PM2XXX_CH_WD_CV_PHASE_30MIN (0x3<<3)
-#define PM2XXX_CH_WD_CV_PHASE_60MIN (0x4<<3)
-#define PM2XXX_CH_WD_CV_PHASE_120MIN (0x5<<3)
-#define PM2XXX_CH_WD_CV_PHASE_240MIN (0x6<<3)
-#define PM2XXX_CH_WD_CV_PHASE_360MIN (0x7<<3)
-
-/* control Reg 4 */
-#define PM2XXX_CH_WD_PRECH_PHASE_OFF 0x0
-#define PM2XXX_CH_WD_PRECH_PHASE_1MIN 0x1
-#define PM2XXX_CH_WD_PRECH_PHASE_5MIN 0x2
-#define PM2XXX_CH_WD_PRECH_PHASE_10MIN 0x3
-#define PM2XXX_CH_WD_PRECH_PHASE_30MIN 0x4
-#define PM2XXX_CH_WD_PRECH_PHASE_60MIN 0x5
-#define PM2XXX_CH_WD_PRECH_PHASE_120MIN 0x6
-#define PM2XXX_CH_WD_PRECH_PHASE_240MIN 0x7
-
-/* control Reg 5 */
-#define PM2XXX_CH_WD_AUTO_TIMEOUT_NONE 0x0
-#define PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN 0x1
-
-/* control Reg 6 */
-#define PM2XXX_DIR_CH_CC_CURRENT_MASK 0x0F
-#define PM2XXX_DIR_CH_CC_CURRENT_200MA 0x0
-#define PM2XXX_DIR_CH_CC_CURRENT_400MA 0x2
-#define PM2XXX_DIR_CH_CC_CURRENT_600MA 0x3
-#define PM2XXX_DIR_CH_CC_CURRENT_800MA 0x4
-#define PM2XXX_DIR_CH_CC_CURRENT_1000MA 0x5
-#define PM2XXX_DIR_CH_CC_CURRENT_1200MA 0x6
-#define PM2XXX_DIR_CH_CC_CURRENT_1400MA 0x7
-#define PM2XXX_DIR_CH_CC_CURRENT_1600MA 0x8
-#define PM2XXX_DIR_CH_CC_CURRENT_1800MA 0x9
-#define PM2XXX_DIR_CH_CC_CURRENT_2000MA 0xA
-#define PM2XXX_DIR_CH_CC_CURRENT_2200MA 0xB
-#define PM2XXX_DIR_CH_CC_CURRENT_2400MA 0xC
-#define PM2XXX_DIR_CH_CC_CURRENT_2600MA 0xD
-#define PM2XXX_DIR_CH_CC_CURRENT_2800MA 0xE
-#define PM2XXX_DIR_CH_CC_CURRENT_3000MA 0xF
-
-#define PM2XXX_CH_PRECH_CURRENT_MASK 0x30
-#define PM2XXX_CH_PRECH_CURRENT_25MA (0x0<<4)
-#define PM2XXX_CH_PRECH_CURRENT_50MA (0x1<<4)
-#define PM2XXX_CH_PRECH_CURRENT_75MA (0x2<<4)
-#define PM2XXX_CH_PRECH_CURRENT_100MA (0x3<<4)
-
-#define PM2XXX_CH_EOC_CURRENT_MASK 0xC0
-#define PM2XXX_CH_EOC_CURRENT_100MA (0x0<<6)
-#define PM2XXX_CH_EOC_CURRENT_150MA (0x1<<6)
-#define PM2XXX_CH_EOC_CURRENT_300MA (0x2<<6)
-#define PM2XXX_CH_EOC_CURRENT_400MA (0x3<<6)
-
-/* control Reg 7 */
-#define PM2XXX_CH_PRECH_VOL_2_5 0x0
-#define PM2XXX_CH_PRECH_VOL_2_7 0x1
-#define PM2XXX_CH_PRECH_VOL_2_9 0x2
-#define PM2XXX_CH_PRECH_VOL_3_1 0x3
-
-#define PM2XXX_CH_VRESUME_VOL_3_2 (0x0<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_4 (0x1<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_6 (0x2<<2)
-#define PM2XXX_CH_VRESUME_VOL_3_8 (0x3<<2)
-
-/* control Reg 8 */
-#define PM2XXX_CH_VOLT_MASK 0x3F
-#define PM2XXX_CH_VOLT_3_5 0x0
-#define PM2XXX_CH_VOLT_3_5225 0x1
-#define PM2XXX_CH_VOLT_3_6 0x4
-#define PM2XXX_CH_VOLT_3_7 0x8
-#define PM2XXX_CH_VOLT_4_0 0x14
-#define PM2XXX_CH_VOLT_4_175 0x1B
-#define PM2XXX_CH_VOLT_4_2 0x1C
-#define PM2XXX_CH_VOLT_4_275 0x1F
-#define PM2XXX_CH_VOLT_4_3 0x20
-
-/*NTC control register 1*/
-#define PM2XXX_BTEMP_HIGH_TH_45 0x0
-#define PM2XXX_BTEMP_HIGH_TH_50 0x1
-#define PM2XXX_BTEMP_HIGH_TH_55 0x2
-#define PM2XXX_BTEMP_HIGH_TH_60 0x3
-#define PM2XXX_BTEMP_HIGH_TH_65 0x4
-
-#define PM2XXX_BTEMP_LOW_TH_N5 (0x0<<3)
-#define PM2XXX_BTEMP_LOW_TH_0 (0x1<<3)
-#define PM2XXX_BTEMP_LOW_TH_5 (0x2<<3)
-#define PM2XXX_BTEMP_LOW_TH_10 (0x3<<3)
-
-/*NTC control register 2*/
-#define PM2XXX_NTC_BETA_COEFF_3477 0x0
-#define PM2XXX_NTC_BETA_COEFF_3964 0x1
-
-#define PM2XXX_NTC_RES_10K (0x0<<2)
-#define PM2XXX_NTC_RES_47K (0x1<<2)
-#define PM2XXX_NTC_RES_100K (0x2<<2)
-#define PM2XXX_NTC_RES_NO_NTC (0x3<<2)
-
-/* control Reg 9 */
-#define PM2XXX_CH_CC_MODEDROP_EN 1
-#define PM2XXX_CH_CC_MODEDROP_DIS 0
-
-#define PM2XXX_CH_CC_REDUCED_CURRENT_100MA (0x0<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_200MA (0x1<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_400MA (0x2<<1)
-#define PM2XXX_CH_CC_REDUCED_CURRENT_IDENT (0x3<<1)
-
-#define PM2XXX_CHARCHING_INFO_DIS (0<<3)
-#define PM2XXX_CHARCHING_INFO_EN (1<<3)
-
-#define PM2XXX_CH_150MV_DROP_300MV (0<<4)
-#define PM2XXX_CH_150MV_DROP_150MV (1<<4)
-
-
-/* charger status register */
-#define PM2XXX_CHG_STATUS_OFF 0x0
-#define PM2XXX_CHG_STATUS_ON 0x1
-#define PM2XXX_CHG_STATUS_FULL 0x2
-#define PM2XXX_CHG_STATUS_ERR 0x3
-#define PM2XXX_CHG_STATUS_WAIT 0x4
-#define PM2XXX_CHG_STATUS_NOBAT 0x5
-
-/* Input charger voltage VPWR2 */
-#define PM2XXX_VPWR2_OVV_6_0 0x0
-#define PM2XXX_VPWR2_OVV_6_3 0x1
-#define PM2XXX_VPWR2_OVV_10 0x2
-#define PM2XXX_VPWR2_OVV_NONE 0x3
-
-/* Input charger drop VPWR2 */
-#define PM2XXX_VPWR2_HW_OPT_EN (0x1<<4)
-#define PM2XXX_VPWR2_HW_OPT_DIS (0x0<<4)
-
-#define PM2XXX_VPWR2_VALID_EN (0x1<<3)
-#define PM2XXX_VPWR2_VALID_DIS (0x0<<3)
-
-#define PM2XXX_VPWR2_DROP_EN (0x1<<2)
-#define PM2XXX_VPWR2_DROP_DIS (0x0<<2)
-
-/* Input charger voltage VPWR1 */
-#define PM2XXX_VPWR1_OVV_6_0 0x0
-#define PM2XXX_VPWR1_OVV_6_3 0x1
-#define PM2XXX_VPWR1_OVV_10 0x2
-#define PM2XXX_VPWR1_OVV_NONE 0x3
-
-/* Input charger drop VPWR1 */
-#define PM2XXX_VPWR1_HW_OPT_EN (0x1<<4)
-#define PM2XXX_VPWR1_HW_OPT_DIS (0x0<<4)
-
-#define PM2XXX_VPWR1_VALID_EN (0x1<<3)
-#define PM2XXX_VPWR1_VALID_DIS (0x0<<3)
-
-#define PM2XXX_VPWR1_DROP_EN (0x1<<2)
-#define PM2XXX_VPWR1_DROP_DIS (0x0<<2)
-
-/* Battery low level comparator control register */
-#define PM2XXX_VBAT_LOW_MONITORING_DIS 0x0
-#define PM2XXX_VBAT_LOW_MONITORING_ENA 0x1
-
-/* Battery low level value control register */
-#define PM2XXX_VBAT_LOW_LEVEL_2_3 0x0
-#define PM2XXX_VBAT_LOW_LEVEL_2_4 0x1
-#define PM2XXX_VBAT_LOW_LEVEL_2_5 0x2
-#define PM2XXX_VBAT_LOW_LEVEL_2_6 0x3
-#define PM2XXX_VBAT_LOW_LEVEL_2_7 0x4
-#define PM2XXX_VBAT_LOW_LEVEL_2_8 0x5
-#define PM2XXX_VBAT_LOW_LEVEL_2_9 0x6
-#define PM2XXX_VBAT_LOW_LEVEL_3_0 0x7
-#define PM2XXX_VBAT_LOW_LEVEL_3_1 0x8
-#define PM2XXX_VBAT_LOW_LEVEL_3_2 0x9
-#define PM2XXX_VBAT_LOW_LEVEL_3_3 0xA
-#define PM2XXX_VBAT_LOW_LEVEL_3_4 0xB
-#define PM2XXX_VBAT_LOW_LEVEL_3_5 0xC
-#define PM2XXX_VBAT_LOW_LEVEL_3_6 0xD
-#define PM2XXX_VBAT_LOW_LEVEL_3_7 0xE
-#define PM2XXX_VBAT_LOW_LEVEL_3_8 0xF
-#define PM2XXX_VBAT_LOW_LEVEL_3_9 0x10
-#define PM2XXX_VBAT_LOW_LEVEL_4_0 0x11
-#define PM2XXX_VBAT_LOW_LEVEL_4_1 0x12
-#define PM2XXX_VBAT_LOW_LEVEL_4_2 0x13
-
-/* SW CTRL */
-#define PM2XXX_SWCTRL_HW 0x0
-#define PM2XXX_SWCTRL_SW 0x1
-
-
-/* LED Driver Control */
-#define PM2XXX_LED_CURRENT_MASK 0x0C
-#define PM2XXX_LED_CURRENT_2_5MA (0X0<<2)
-#define PM2XXX_LED_CURRENT_1MA (0X1<<2)
-#define PM2XXX_LED_CURRENT_5MA (0X2<<2)
-#define PM2XXX_LED_CURRENT_10MA (0X3<<2)
-
-#define PM2XXX_LED_SELECT_MASK 0x02
-#define PM2XXX_LED_SELECT_EN (0X0<<1)
-#define PM2XXX_LED_SELECT_DIS (0X1<<1)
-
-#define PM2XXX_ANTI_OVERSHOOT_MASK 0x01
-#define PM2XXX_ANTI_OVERSHOOT_DIS 0X0
-#define PM2XXX_ANTI_OVERSHOOT_EN 0X1
-
-enum pm2xxx_reg_int1 {
- PM2XXX_INT1_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_ITVBATLOWR = 0x04,
- PM2XXX_INT1_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_mask_reg_int1 {
- PM2XXX_INT1_M_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_M_ITVBATLOWR = 0x04,
- PM2XXX_INT1_M_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_source_reg_int1 {
- PM2XXX_INT1_S_ITVBATDISCONNECT = 0x02,
- PM2XXX_INT1_S_ITVBATLOWR = 0x04,
- PM2XXX_INT1_S_ITVBATLOWF = 0x08,
-};
-
-enum pm2xxx_reg_int2 {
- PM2XXX_INT2_ITVPWR2PLUG = 0x01,
- PM2XXX_INT2_ITVPWR2UNPLUG = 0x02,
- PM2XXX_INT2_ITVPWR1PLUG = 0x04,
- PM2XXX_INT2_ITVPWR1UNPLUG = 0x08,
-};
-
-enum pm2xxx_mask_reg_int2 {
- PM2XXX_INT2_M_ITVPWR2PLUG = 0x01,
- PM2XXX_INT2_M_ITVPWR2UNPLUG = 0x02,
- PM2XXX_INT2_M_ITVPWR1PLUG = 0x04,
- PM2XXX_INT2_M_ITVPWR1UNPLUG = 0x08,
-};
-
-enum pm2xxx_source_reg_int2 {
- PM2XXX_INT2_S_ITVPWR2PLUG = 0x03,
- PM2XXX_INT2_S_ITVPWR1PLUG = 0x0c,
-};
-
-enum pm2xxx_reg_int3 {
- PM2XXX_INT3_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_ITCHCCWD = 0x02,
- PM2XXX_INT3_ITCHCVWD = 0x04,
- PM2XXX_INT3_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_mask_reg_int3 {
- PM2XXX_INT3_M_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_M_ITCHCCWD = 0x02,
- PM2XXX_INT3_M_ITCHCVWD = 0x04,
- PM2XXX_INT3_M_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_source_reg_int3 {
- PM2XXX_INT3_S_ITCHPRECHARGEWD = 0x01,
- PM2XXX_INT3_S_ITCHCCWD = 0x02,
- PM2XXX_INT3_S_ITCHCVWD = 0x04,
- PM2XXX_INT3_S_ITAUTOTIMEOUTWD = 0x08,
-};
-
-enum pm2xxx_reg_int4 {
- PM2XXX_INT4_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_ITCHARGINGON = 0x10,
- PM2XXX_INT4_ITVRESUME = 0x20,
- PM2XXX_INT4_ITBATTFULL = 0x40,
- PM2XXX_INT4_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_mask_reg_int4 {
- PM2XXX_INT4_M_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_M_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_M_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_M_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_M_ITCHARGINGON = 0x10,
- PM2XXX_INT4_M_ITVRESUME = 0x20,
- PM2XXX_INT4_M_ITBATTFULL = 0x40,
- PM2XXX_INT4_M_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_source_reg_int4 {
- PM2XXX_INT4_S_ITBATTEMPCOLD = 0x01,
- PM2XXX_INT4_S_ITBATTEMPHOT = 0x02,
- PM2XXX_INT4_S_ITVPWR2OVV = 0x04,
- PM2XXX_INT4_S_ITVPWR1OVV = 0x08,
- PM2XXX_INT4_S_ITCHARGINGON = 0x10,
- PM2XXX_INT4_S_ITVRESUME = 0x20,
- PM2XXX_INT4_S_ITBATTFULL = 0x40,
- PM2XXX_INT4_S_ITCVPHASE = 0x80,
-};
-
-enum pm2xxx_reg_int5 {
- PM2XXX_INT5_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_mask_reg_int5 {
- PM2XXX_INT5_M_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_M_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_M_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_M_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_M_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_source_reg_int5 {
- PM2XXX_INT5_S_ITTHERMALSHUTDOWNRISE = 0x01,
- PM2XXX_INT5_S_ITTHERMALSHUTDOWNFALL = 0x02,
- PM2XXX_INT5_S_ITTHERMALWARNINGRISE = 0x04,
- PM2XXX_INT5_S_ITTHERMALWARNINGFALL = 0x08,
- PM2XXX_INT5_S_ITVSYSTEMOVV = 0x10,
-};
-
-enum pm2xxx_reg_int6 {
- PM2XXX_INT6_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_ITVPWR1VALIDFALL = 0x20,
-};
-
-enum pm2xxx_mask_reg_int6 {
- PM2XXX_INT6_M_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_M_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_M_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_M_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_M_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_M_ITVPWR1VALIDFALL = 0x20,
-};
-
-enum pm2xxx_source_reg_int6 {
- PM2XXX_INT6_S_ITVPWR2DROP = 0x01,
- PM2XXX_INT6_S_ITVPWR1DROP = 0x02,
- PM2XXX_INT6_S_ITVPWR2VALIDRISE = 0x04,
- PM2XXX_INT6_S_ITVPWR2VALIDFALL = 0x08,
- PM2XXX_INT6_S_ITVPWR1VALIDRISE = 0x10,
- PM2XXX_INT6_S_ITVPWR1VALIDFALL = 0x20,
-};
-
-struct pm2xxx_charger_info {
- int charger_connected;
- int charger_online;
- int cv_active;
- bool wd_expired;
-};
-
-struct pm2xxx_charger_event_flags {
- bool mainextchnotok;
- bool main_thermal_prot;
- bool ovv;
- bool chgwdexp;
-};
-
-struct pm2xxx_interrupts {
- u8 reg[PM2XXX_NUM_INT_REG];
- int (*handler[PM2XXX_NUM_INT_REG])(void *, int);
-};
-
-struct pm2xxx_config {
- struct i2c_client *pm2xxx_i2c;
- struct i2c_device_id *pm2xxx_id;
-};
-
-struct pm2xxx_irq {
- char *name;
- irqreturn_t (*isr)(int irq, void *data);
-};
-
-struct pm2xxx_charger {
- struct device *dev;
- u8 chip_id;
- bool vddadc_en_ac;
- struct pm2xxx_config config;
- bool ac_conn;
- unsigned int gpio_irq;
- int vbat;
- int old_vbat;
- int failure_case;
- int failure_input_ovv;
- unsigned int lpn_pin;
- struct pm2xxx_interrupts *pm2_int;
- struct regulator *regu;
- struct pm2xxx_bm_data *bat;
- struct mutex lock;
- struct ab8500 *parent;
- struct pm2xxx_charger_info ac;
- struct pm2xxx_charger_platform_data *pdata;
- struct workqueue_struct *charger_wq;
- struct delayed_work check_vbat_work;
- struct work_struct ac_work;
- struct work_struct check_main_thermal_prot_work;
- struct delayed_work check_hw_failure_work;
- struct ux500_charger ac_chg;
- struct power_supply_desc ac_chg_desc;
- struct pm2xxx_charger_event_flags flags;
-};
-
-#endif /* PM2301_CHARGER_H */
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 470253c337c7..4b5fb172fa99 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -263,13 +263,13 @@ static int power_supply_check_supplies(struct power_supply *psy)
return 0;
/* All supplies found, allocate char ** array for filling */
- psy->supplied_from = devm_kzalloc(&psy->dev, sizeof(psy->supplied_from),
+ psy->supplied_from = devm_kzalloc(&psy->dev, sizeof(*psy->supplied_from),
GFP_KERNEL);
if (!psy->supplied_from)
return -ENOMEM;
*psy->supplied_from = devm_kcalloc(&psy->dev,
- cnt - 1, sizeof(char *),
+ cnt - 1, sizeof(**psy->supplied_from),
GFP_KERNEL);
if (!*psy->supplied_from)
return -ENOMEM;
diff --git a/drivers/power/supply/surface_battery.c b/drivers/power/supply/surface_battery.c
index 5ec2e6bb2465..540707882bb0 100644
--- a/drivers/power/supply/surface_battery.c
+++ b/drivers/power/supply/surface_battery.c
@@ -802,7 +802,7 @@ static int spwr_battery_register(struct spwr_battery_device *bat)
if (IS_ERR(bat->psy))
return PTR_ERR(bat->psy);
- return ssam_notifier_register(bat->sdev->ctrl, &bat->notif);
+ return ssam_device_notifier_register(bat->sdev, &bat->notif);
}
@@ -837,7 +837,7 @@ static void surface_battery_remove(struct ssam_device *sdev)
{
struct spwr_battery_device *bat = ssam_device_get_drvdata(sdev);
- ssam_notifier_unregister(sdev->ctrl, &bat->notif);
+ ssam_device_notifier_unregister(sdev, &bat->notif);
cancel_delayed_work_sync(&bat->update_work);
}
diff --git a/drivers/power/supply/surface_charger.c b/drivers/power/supply/surface_charger.c
index a060c36c7766..59182d55742d 100644
--- a/drivers/power/supply/surface_charger.c
+++ b/drivers/power/supply/surface_charger.c
@@ -216,7 +216,7 @@ static int spwr_ac_register(struct spwr_ac_device *ac)
if (IS_ERR(ac->psy))
return PTR_ERR(ac->psy);
- return ssam_notifier_register(ac->sdev->ctrl, &ac->notif);
+ return ssam_device_notifier_register(ac->sdev, &ac->notif);
}
@@ -251,7 +251,7 @@ static void surface_ac_remove(struct ssam_device *sdev)
{
struct spwr_ac_device *ac = ssam_device_get_drvdata(sdev);
- ssam_notifier_unregister(sdev->ctrl, &ac->notif);
+ ssam_device_notifier_unregister(sdev, &ac->notif);
}
static const struct spwr_psy_properties spwr_psy_props_adp1 = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7150b1d0159e..d8373cb04f90 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4784,10 +4784,10 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].consumer = regulator_get(dev,
consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
- consumers[i].consumer = NULL;
ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
"Failed to get supply '%s'",
consumers[i].supply);
+ consumers[i].consumer = NULL;
goto err;
}
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
index c4754f3cf233..1591636f86c3 100644
--- a/drivers/regulator/cros-ec-regulator.c
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -22,36 +22,6 @@ struct cros_ec_regulator_data {
u16 num_voltages;
};
-static int cros_ec_cmd(struct cros_ec_device *ec, u32 version, u32 command,
- void *outdata, u32 outsize, void *indata, u32 insize)
-{
- struct cros_ec_command *msg;
- int ret;
-
- msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->version = version;
- msg->command = command;
- msg->outsize = outsize;
- msg->insize = insize;
-
- if (outdata && outsize > 0)
- memcpy(msg->data, outdata, outsize);
-
- ret = cros_ec_cmd_xfer_status(ec, msg);
- if (ret < 0)
- goto cleanup;
-
- if (insize)
- memcpy(indata, msg->data, insize);
-
-cleanup:
- kfree(msg);
- return ret;
-}
-
static int cros_ec_regulator_enable(struct regulator_dev *dev)
{
struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
@@ -61,7 +31,7 @@ static int cros_ec_regulator_enable(struct regulator_dev *dev)
};
return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_ENABLE, &cmd,
- sizeof(cmd), NULL, 0);
+ sizeof(cmd), NULL, 0);
}
static int cros_ec_regulator_disable(struct regulator_dev *dev)
@@ -73,7 +43,7 @@ static int cros_ec_regulator_disable(struct regulator_dev *dev)
};
return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_ENABLE, &cmd,
- sizeof(cmd), NULL, 0);
+ sizeof(cmd), NULL, 0);
}
static int cros_ec_regulator_is_enabled(struct regulator_dev *dev)
@@ -161,7 +131,7 @@ static int cros_ec_regulator_init_info(struct device *dev,
int ret;
ret = cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_GET_INFO, &cmd,
- sizeof(cmd), &resp, sizeof(resp));
+ sizeof(cmd), &resp, sizeof(resp));
if (ret < 0)
return ret;
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 4a3352821b1d..38383e7de3c1 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -594,16 +594,17 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
node = of_parse_phandle(np, "memory-region", a);
/* Not map vdevbuffer, vdevring region */
- if (!strncmp(node->name, "vdev", strlen("vdev")))
+ if (!strncmp(node->name, "vdev", strlen("vdev"))) {
+ of_node_put(node);
continue;
+ }
err = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
return err;
}
- of_node_put(node);
-
if (b >= IMX_RPROC_MEM_MAX)
break;
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 54781f553f4e..594a9b43b7ae 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -410,10 +410,9 @@ static int keystone_rproc_probe(struct platform_device *pdev)
/* enable clock for accessing DSP internal memories */
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "failed to enable clock, status = %d\n", ret);
- pm_runtime_put_noidle(dev);
goto disable_rpm;
}
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 47b2a40e1b4a..d421a2ccaa1e 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -401,6 +401,14 @@ static int mt8186_scp_before_load(struct mtk_scp *scp)
writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
+ /*
+ * Set I-cache and D-cache size before loading SCP FW.
+ * SCP SRAM logical address may change when cache size setting differs.
+ */
+ writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
+ scp->reg_base + MT8183_SCP_CACHE_CON);
+ writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+
return 0;
}
@@ -943,7 +951,19 @@ static const struct mtk_scp_of_data mt8186_of_data = {
.scp_da_to_va = mt8183_scp_da_to_va,
.host_to_scp_reg = MT8183_HOST_TO_SCP,
.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
- .ipi_buf_offset = 0x7bdb0,
+ .ipi_buf_offset = 0x3bdb0,
+};
+
+static const struct mtk_scp_of_data mt8188_of_data = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8192_scp_before_load,
+ .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_reset_assert = mt8192_scp_reset_assert,
+ .scp_reset_deassert = mt8192_scp_reset_deassert,
+ .scp_stop = mt8192_scp_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
};
static const struct mtk_scp_of_data mt8192_of_data = {
@@ -973,6 +993,7 @@ static const struct mtk_scp_of_data mt8195_of_data = {
static const struct of_device_id mtk_scp_of_match[] = {
{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
+ { .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
{},
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 32a588fefbdc..430fab0266ed 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -243,7 +243,7 @@ static inline int omap_rproc_get_timer_irq(struct omap_rproc_timer *timer)
* omap_rproc_ack_timer_irq() - acknowledge a timer irq
* @timer: handle to a OMAP rproc timer
*
- * This function is used to clear the irq associated with a watchdog timer. The
+ * This function is used to clear the irq associated with a watchdog timer.
* The function is called by the OMAP remoteproc upon a watchdog event on the
* remote processor to clear the interrupt status of the watchdog timer.
*/
@@ -303,7 +303,7 @@ static irqreturn_t omap_rproc_watchdog_isr(int irq, void *data)
* @configure: boolean flag used to acquire and configure the timer handle
*
* This function is used primarily to enable the timers associated with
- * a remoteproc. The configure flag is provided to allow the driver to
+ * a remoteproc. The configure flag is provided to allow the driver
* to either acquire and start a timer (during device initialization) or
* to just start a timer (during a resume operation).
*
@@ -443,7 +443,7 @@ free_timers:
* @configure: boolean flag used to release the timer handle
*
* This function is used primarily to disable the timers associated with
- * a remoteproc. The configure flag is provided to allow the driver to
+ * a remoteproc. The configure flag is provided to allow the driver
* to either stop and release a timer (during device shutdown) or to just
* stop a timer (during a suspend operation).
*
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 1777a01fa84e..128bf9912f2c 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -897,6 +897,7 @@ static const struct of_device_id pru_rproc_match[] = {
{ .compatible = "ti,j721e-pru", .data = &k3_pru_data },
{ .compatible = "ti,j721e-rtu", .data = &k3_rtu_data },
{ .compatible = "ti,j721e-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,am625-pru", .data = &k3_pru_data },
{},
};
MODULE_DEVICE_TABLE(of, pru_rproc_match);
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 4b91e3c9eafa..020349f8979d 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -50,7 +50,7 @@ struct minidump_region {
};
/**
- * struct minidump_subsystem_toc: Subsystem's SMEM Table of content
+ * struct minidump_subsystem - Subsystem's SMEM Table of content
* @status : Subsystem toc init status
* @enabled : if set to 1, this region would be copied during coredump
* @encryption_status: Encryption status for this subsystem
@@ -68,7 +68,7 @@ struct minidump_subsystem {
};
/**
- * struct minidump_global_toc: Global Table of Content
+ * struct minidump_global_toc - Global Table of Content
* @status : Global Minidump init status
* @md_revision : Minidump revision
* @enabled : Minidump enable status
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 5280ec9b5449..497acfb33f8f 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -112,6 +112,7 @@ static irqreturn_t q6v5_wdog_interrupt(int irq, void *data)
else
dev_err(q6v5->dev, "watchdog without message\n");
+ q6v5->running = false;
rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG);
return IRQ_HANDLED;
@@ -123,6 +124,9 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
size_t len;
char *msg;
+ if (!q6v5->running)
+ return IRQ_HANDLED;
+
msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
if (!IS_ERR(msg) && len > 0 && msg[0])
dev_err(q6v5->dev, "fatal error received: %s\n", msg);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 2f3b9f54251e..4c9a1b99cd51 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -175,9 +175,8 @@ static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
- ret = pm_runtime_get_sync(pds[i]);
+ ret = pm_runtime_resume_and_get(pds[i]);
if (ret < 0) {
- pm_runtime_put_noidle(pds[i]);
dev_pm_genpd_set_performance_state(pds[i], 0);
goto unroll_pd_votes;
}
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index af217de75e4d..fddb63cffee0 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/devcoredump.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -932,27 +933,52 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
const char *fw_name)
{
- unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
+ unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_KERNEL_MAPPING;
+ unsigned long flags = VM_DMA_COHERENT | VM_FLUSH_RESET_PERMS;
+ struct page **pages;
+ struct page *page;
dma_addr_t phys;
void *metadata;
int mdata_perm;
int xferop_ret;
size_t size;
- void *ptr;
+ void *vaddr;
+ int count;
int ret;
+ int i;
metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
if (IS_ERR(metadata))
return PTR_ERR(metadata);
- ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
- if (!ptr) {
+ page = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
+ if (!page) {
kfree(metadata);
dev_err(qproc->dev, "failed to allocate mdt buffer\n");
return -ENOMEM;
}
- memcpy(ptr, metadata, size);
+ count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ goto free_dma_attrs;
+ }
+
+ for (i = 0; i < count; i++)
+ pages[i] = nth_page(page, i);
+
+ vaddr = vmap(pages, count, flags, pgprot_dmacoherent(PAGE_KERNEL));
+ kfree(pages);
+ if (!vaddr) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", &phys, size);
+ ret = -EBUSY;
+ goto free_dma_attrs;
+ }
+
+ memcpy(vaddr, metadata, size);
+
+ vunmap(vaddr);
/* Hypervisor mapping to access metadata by modem */
mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
@@ -982,7 +1008,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
"mdt buffer not reclaimed system may become unstable\n");
free_dma_attrs:
- dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
+ dma_free_attrs(qproc->dev, size, page, phys, dma_attrs);
kfree(metadata);
return ret < 0 ? ret : 0;
@@ -1102,6 +1128,9 @@ static int q6v5_mba_load(struct q6v5 *qproc)
if (ret)
goto reclaim_mba;
+ if (qproc->has_mba_logs)
+ qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
+
ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "MBA boot timed out\n");
@@ -1594,11 +1623,19 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
return ret;
}
+static unsigned long q6v5_panic(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+
+ return qcom_q6v5_panic(&qproc->q6v5);
+}
+
static const struct rproc_ops q6v5_ops = {
.start = q6v5_start,
.stop = q6v5_stop,
.parse_fw = qcom_q6v5_register_dump_segments,
.load = q6v5_load,
+ .panic = q6v5_panic,
};
static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
@@ -2188,6 +2225,11 @@ static const struct rproc_hexagon_res msm8996_mss = {
"mnoc_axi",
NULL
},
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
.need_mem_protection = true,
.has_alt_reset = false,
.has_mba_logs = false,
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 6ae39c5653b1..6afd0941e552 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -29,6 +30,8 @@
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
+#define ADSP_DECRYPT_SHUTDOWN_DELAY_MS 100
+
struct adsp_data {
int crash_reason_smem;
const char *firmware_name;
@@ -36,6 +39,7 @@ struct adsp_data {
unsigned int minidump_id;
bool has_aggre2_clk;
bool auto_boot;
+ bool decrypt_shutdown;
char **proxy_pd_names;
@@ -65,6 +69,7 @@ struct qcom_adsp {
unsigned int minidump_id;
int crash_reason_smem;
bool has_aggre2_clk;
+ bool decrypt_shutdown;
const char *info_name;
struct completion start_done;
@@ -87,6 +92,9 @@ static void adsp_minidump(struct rproc *rproc)
{
struct qcom_adsp *adsp = rproc->priv;
+ if (rproc->dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
qcom_minidump(rproc, adsp->minidump_id);
}
@@ -128,6 +136,19 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
}
}
+static int adsp_shutdown_poll_decrypt(struct qcom_adsp *adsp)
+{
+ unsigned int retry_num = 50;
+ int ret;
+
+ do {
+ msleep(ADSP_DECRYPT_SHUTDOWN_DELAY_MS);
+ ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ } while (ret == -EINVAL && --retry_num);
+
+ return ret;
+}
+
static int adsp_unprepare(struct rproc *rproc)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
@@ -185,13 +206,17 @@ static int adsp_start(struct rproc *rproc)
if (ret)
goto disable_xo_clk;
- ret = regulator_enable(adsp->cx_supply);
- if (ret)
- goto disable_aggre2_clk;
+ if (adsp->cx_supply) {
+ ret = regulator_enable(adsp->cx_supply);
+ if (ret)
+ goto disable_aggre2_clk;
+ }
- ret = regulator_enable(adsp->px_supply);
- if (ret)
- goto disable_cx_supply;
+ if (adsp->px_supply) {
+ ret = regulator_enable(adsp->px_supply);
+ if (ret)
+ goto disable_cx_supply;
+ }
ret = qcom_scm_pas_auth_and_reset(adsp->pas_id);
if (ret) {
@@ -212,9 +237,11 @@ static int adsp_start(struct rproc *rproc)
return 0;
disable_px_supply:
- regulator_disable(adsp->px_supply);
+ if (adsp->px_supply)
+ regulator_disable(adsp->px_supply);
disable_cx_supply:
- regulator_disable(adsp->cx_supply);
+ if (adsp->cx_supply)
+ regulator_disable(adsp->cx_supply);
disable_aggre2_clk:
clk_disable_unprepare(adsp->aggre2_clk);
disable_xo_clk:
@@ -231,8 +258,10 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
{
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
- regulator_disable(adsp->px_supply);
- regulator_disable(adsp->cx_supply);
+ if (adsp->px_supply)
+ regulator_disable(adsp->px_supply);
+ if (adsp->cx_supply)
+ regulator_disable(adsp->cx_supply);
clk_disable_unprepare(adsp->aggre2_clk);
clk_disable_unprepare(adsp->xo);
adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
@@ -249,6 +278,9 @@ static int adsp_stop(struct rproc *rproc)
dev_err(adsp->dev, "timed out on wait\n");
ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ if (ret && adsp->decrypt_shutdown)
+ ret = adsp_shutdown_poll_decrypt(adsp);
+
if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
@@ -268,6 +300,9 @@ static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iom
if (offset < 0 || offset + len > adsp->mem_size)
return NULL;
+ if (is_iomem)
+ *is_iomem = true;
+
return adsp->mem_region + offset;
}
@@ -326,14 +361,26 @@ static int adsp_init_clock(struct qcom_adsp *adsp)
static int adsp_init_regulator(struct qcom_adsp *adsp)
{
- adsp->cx_supply = devm_regulator_get(adsp->dev, "cx");
- if (IS_ERR(adsp->cx_supply))
- return PTR_ERR(adsp->cx_supply);
+ adsp->cx_supply = devm_regulator_get_optional(adsp->dev, "cx");
+ if (IS_ERR(adsp->cx_supply)) {
+ if (PTR_ERR(adsp->cx_supply) == -ENODEV)
+ adsp->cx_supply = NULL;
+ else
+ return PTR_ERR(adsp->cx_supply);
+ }
- regulator_set_load(adsp->cx_supply, 100000);
+ if (adsp->cx_supply)
+ regulator_set_load(adsp->cx_supply, 100000);
- adsp->px_supply = devm_regulator_get(adsp->dev, "px");
- return PTR_ERR_OR_ZERO(adsp->px_supply);
+ adsp->px_supply = devm_regulator_get_optional(adsp->dev, "px");
+ if (IS_ERR(adsp->px_supply)) {
+ if (PTR_ERR(adsp->px_supply) == -ENODEV)
+ adsp->px_supply = NULL;
+ else
+ return PTR_ERR(adsp->px_supply);
+ }
+
+ return 0;
}
static int adsp_pds_attach(struct device *dev, struct device **devs,
@@ -459,9 +506,12 @@ static int adsp_probe(struct platform_device *pdev)
adsp->pas_id = desc->pas_id;
adsp->has_aggre2_clk = desc->has_aggre2_clk;
adsp->info_name = desc->sysmon_name;
+ adsp->decrypt_shutdown = desc->decrypt_shutdown;
platform_set_drvdata(pdev, adsp);
- device_wakeup_enable(adsp->dev);
+ ret = device_init_wakeup(adsp->dev, true);
+ if (ret)
+ goto free_rproc;
ret = adsp_alloc_memory_region(adsp);
if (ret)
@@ -877,6 +927,25 @@ static const struct adsp_data sdx55_mpss_resource = {
.ssctl_id = 0x22,
};
+static const struct adsp_data sm8450_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .minidump_id = 3,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .decrypt_shutdown = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
@@ -916,7 +985,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource},
{ .compatible = "qcom,sm8450-slpi-pas", .data = &sm8350_slpi_resource},
- { .compatible = "qcom,sm8450-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8450-mpss-pas", .data = &sm8450_mpss_resource},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index 9fca81492863..57dde2a69b9d 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -41,6 +41,7 @@ struct qcom_sysmon {
struct completion comp;
struct completion ind_comp;
struct completion shutdown_comp;
+ struct completion ssctl_comp;
struct mutex lock;
bool ssr_ack;
@@ -445,6 +446,8 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
svc->priv = sysmon;
+ complete(&sysmon->ssctl_comp);
+
return 0;
}
@@ -501,6 +504,7 @@ static int sysmon_start(struct rproc_subdev *subdev)
.ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
};
+ reinit_completion(&sysmon->ssctl_comp);
mutex_lock(&sysmon->state_lock);
sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP;
blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
@@ -508,10 +512,12 @@ static int sysmon_start(struct rproc_subdev *subdev)
mutex_lock(&sysmon_lock);
list_for_each_entry(target, &sysmon_list, node) {
- if (target == sysmon)
+ mutex_lock(&target->state_lock);
+ if (target == sysmon || target->state != SSCTL_SSR_EVENT_AFTER_POWERUP) {
+ mutex_unlock(&target->state_lock);
continue;
+ }
- mutex_lock(&target->state_lock);
event.subsys_name = target->name;
event.ssr_event = target->state;
@@ -545,6 +551,11 @@ static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
if (crashed)
return;
+ if (sysmon->ssctl_instance) {
+ if (!wait_for_completion_timeout(&sysmon->ssctl_comp, HZ / 2))
+ dev_err(sysmon->dev, "timeout waiting for ssctl service\n");
+ }
+
if (sysmon->ssctl_version)
sysmon->shutdown_acked = ssctl_request_shutdown(sysmon);
else if (sysmon->ept)
@@ -631,6 +642,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
init_completion(&sysmon->comp);
init_completion(&sysmon->ind_comp);
init_completion(&sysmon->shutdown_comp);
+ init_completion(&sysmon->ssctl_comp);
mutex_init(&sysmon->lock);
mutex_init(&sysmon->state_lock);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 9a223d394087..68f37296b151 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -467,6 +467,7 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
irq_handler_t thread_fn)
{
int ret;
+ int irq_number;
ret = platform_get_irq_byname(pdev, name);
if (ret < 0 && optional) {
@@ -477,14 +478,19 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
return ret;
}
+ irq_number = ret;
+
ret = devm_request_threaded_irq(&pdev->dev, ret,
NULL, thread_fn,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"wcnss", wcnss);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+ }
- return ret;
+ /* Return the IRQ number if the IRQ was successfully acquired */
+ return irq_number;
}
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 02a04ab34a23..e5279ed9a8d7 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -59,6 +59,7 @@ static int rproc_release_carveout(struct rproc *rproc,
/* Unique indices for remoteproc devices */
static DEFINE_IDA(rproc_dev_index);
+static struct workqueue_struct *rproc_recovery_wq;
static const char * const rproc_crash_names[] = {
[RPROC_MMUFAULT] = "mmufault",
@@ -334,7 +335,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
size_t size;
/* actual size of vring (in bytes) */
- size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
+ size = PAGE_ALIGN(vring_size(rvring->num, rvring->align));
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
@@ -401,7 +402,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
return -EINVAL;
}
- rvring->len = vring->num;
+ rvring->num = vring->num;
rvring->align = vring->align;
rvring->rvdev = rvdev;
@@ -461,6 +462,7 @@ static void rproc_rvdev_release(struct device *dev)
struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
of_reserved_mem_device_release(dev);
+ dma_release_coherent_memory(dev);
kfree(rvdev);
}
@@ -970,7 +972,7 @@ static int rproc_handle_carveout(struct rproc *rproc,
return 0;
}
- /* Register carveout in in list */
+ /* Register carveout in list */
carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da,
rproc_alloc_carveout,
rproc_release_carveout, rsc->name);
@@ -2434,7 +2436,7 @@ static void rproc_type_release(struct device *dev)
idr_destroy(&rproc->notifyids);
if (rproc->index >= 0)
- ida_simple_remove(&rproc_dev_index, rproc->index);
+ ida_free(&rproc_dev_index, rproc->index);
kfree_const(rproc->firmware);
kfree_const(rproc->name);
@@ -2551,9 +2553,9 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
goto put_device;
/* Assign a unique device index and name */
- rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
+ rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
if (rproc->index < 0) {
- dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
+ dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
goto put_device;
}
@@ -2762,8 +2764,7 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
dev_err(&rproc->dev, "crash detected in %s: type %s\n",
rproc->name, rproc_crash_to_string(type));
- /* Have a worker handle the error; ensure system is not suspended */
- queue_work(system_freezable_wq, &rproc->crash_handler);
+ queue_work(rproc_recovery_wq, &rproc->crash_handler);
}
EXPORT_SYMBOL(rproc_report_crash);
@@ -2812,6 +2813,13 @@ static void __exit rproc_exit_panic(void)
static int __init remoteproc_init(void)
{
+ rproc_recovery_wq = alloc_workqueue("rproc_recovery_wq",
+ WQ_UNBOUND | WQ_FREEZABLE, 0);
+ if (!rproc_recovery_wq) {
+ pr_err("remoteproc: creation of rproc_recovery_wq failed\n");
+ return -ENOMEM;
+ }
+
rproc_init_sysfs();
rproc_init_debugfs();
rproc_init_cdev();
@@ -2825,9 +2833,13 @@ static void __exit remoteproc_exit(void)
{
ida_destroy(&rproc_dev_index);
+ if (!rproc_recovery_wq)
+ return;
+
rproc_exit_panic();
rproc_exit_debugfs();
rproc_exit_sysfs();
+ destroy_workqueue(rproc_recovery_wq);
}
module_exit(remoteproc_exit);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 70ab496d0431..0f7706e23eb9 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -87,7 +87,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
struct fw_rsc_vdev *rsc;
struct virtqueue *vq;
void *addr;
- int len, size;
+ int num, size;
/* we're temporarily limited to two virtqueues per rvdev */
if (id >= ARRAY_SIZE(rvdev->vring))
@@ -104,20 +104,20 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
rvring = &rvdev->vring[id];
addr = mem->va;
- len = rvring->len;
+ num = rvring->num;
/* zero vring */
- size = vring_size(len, rvring->align);
+ size = vring_size(num, rvring->align);
memset(addr, 0, size);
dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
- id, addr, len, rvring->notifyid);
+ id, addr, num, rvring->notifyid);
/*
* Create the new vq, and tell virtio we're not interested in
* the 'weak' smp barriers, since we're talking with a real device.
*/
- vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx,
+ vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
addr, rproc_virtio_notify, callback, name);
if (!vq) {
dev_err(dev, "vring_new_virtqueue %s failed\n", name);
@@ -125,6 +125,8 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
return ERR_PTR(-ENOMEM);
}
+ vq->num_max = num;
+
rvring->vq = vq;
vq->priv = rvring;
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 4840ad906018..0481926c6975 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -1655,6 +1655,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
if (!cpdev) {
ret = -ENODEV;
dev_err(dev, "could not get R5 core platform device\n");
+ of_node_put(child);
goto fail;
}
@@ -1663,6 +1664,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
ret);
put_device(&cpdev->dev);
+ of_node_put(child);
goto fail;
}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 48d94649ea82..806773e88832 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -17,7 +17,7 @@ if RESET_CONTROLLER
config RESET_A10SR
tristate "Altera Arria10 System Resource Reset"
- depends on MFD_ALTERA_A10SR
+ depends on MFD_ALTERA_A10SR || COMPILE_TEST
help
This option enables support for the external reset functions for
peripheral PHYs on the Altera Arria10 System Resource Chip.
@@ -200,8 +200,9 @@ config RESET_SCMI
firmware controlling all the reset signals.
config RESET_SIMPLE
- bool "Simple Reset Controller Driver" if COMPILE_TEST
+ bool "Simple Reset Controller Driver" if COMPILE_TEST || EXPERT
default ARCH_ASPEED || ARCH_BCM4908 || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
+ depends on HAS_IOMEM
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
@@ -265,6 +266,14 @@ config RESET_TI_SYSCON
you wish to use the reset framework for such memory-mapped devices,
say Y here. Otherwise, say N.
+config RESET_TI_TPS380X
+ tristate "TI TPS380x Reset Driver"
+ select GPIOLIB
+ help
+ This enables the reset driver support for TI TPS380x devices. If
+ you wish to use the reset framework for such devices, say Y here.
+ Otherwise, say N.
+
config RESET_TN48M_CPLD
tristate "Delta Networks TN48M switch CPLD reset controller"
depends on MFD_TN48M_CPLD || COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 3ff378f43348..cd5cf8e7c6a7 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
+obj-$(CONFIG_RESET_TI_TPS380X) += reset-tps380x.o
obj-$(CONFIG_RESET_TN48M_CPLD) += reset-tn48m.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
diff --git a/drivers/reset/reset-tps380x.c b/drivers/reset/reset-tps380x.c
new file mode 100644
index 000000000000..09d511f069ba
--- /dev/null
+++ b/drivers/reset/reset-tps380x.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * TI TPS380x Supply Voltage Supervisor and Reset Controller Driver
+ *
+ * Copyright (C) 2022 Pengutronix, Marco Felsch <kernel@pengutronix.de>
+ *
+ * Based on Simple Reset Controller Driver
+ *
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reset-controller.h>
+
+struct tps380x_reset {
+ struct reset_controller_dev rcdev;
+ struct gpio_desc *reset_gpio;
+ unsigned int reset_ms;
+};
+
+struct tps380x_reset_devdata {
+ unsigned int min_reset_ms;
+ unsigned int typ_reset_ms;
+ unsigned int max_reset_ms;
+};
+
+static inline
+struct tps380x_reset *to_tps380x_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct tps380x_reset, rcdev);
+}
+
+static int
+tps380x_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct tps380x_reset *tps380x = to_tps380x_reset(rcdev);
+
+ gpiod_set_value_cansleep(tps380x->reset_gpio, 1);
+
+ return 0;
+}
+
+static int
+tps380x_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct tps380x_reset *tps380x = to_tps380x_reset(rcdev);
+
+ gpiod_set_value_cansleep(tps380x->reset_gpio, 0);
+ msleep(tps380x->reset_ms);
+
+ return 0;
+}
+
+static const struct reset_control_ops reset_tps380x_ops = {
+ .assert = tps380x_reset_assert,
+ .deassert = tps380x_reset_deassert,
+};
+
+static int tps380x_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ /* No special handling needed, we have only one reset line per device */
+ return 0;
+}
+
+static int tps380x_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct tps380x_reset_devdata *devdata;
+ struct tps380x_reset *tps380x;
+
+ devdata = device_get_match_data(dev);
+ if (!devdata)
+ return -EINVAL;
+
+ tps380x = devm_kzalloc(dev, sizeof(*tps380x), GFP_KERNEL);
+ if (!tps380x)
+ return -ENOMEM;
+
+ tps380x->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(tps380x->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(tps380x->reset_gpio),
+ "Failed to get GPIO\n");
+
+ tps380x->reset_ms = devdata->max_reset_ms;
+
+ tps380x->rcdev.ops = &reset_tps380x_ops;
+ tps380x->rcdev.owner = THIS_MODULE;
+ tps380x->rcdev.dev = dev;
+ tps380x->rcdev.of_node = dev->of_node;
+ tps380x->rcdev.of_reset_n_cells = 0;
+ tps380x->rcdev.of_xlate = tps380x_reset_of_xlate;
+ tps380x->rcdev.nr_resets = 1;
+
+ return devm_reset_controller_register(dev, &tps380x->rcdev);
+}
+
+static const struct tps380x_reset_devdata tps3801_reset_data = {
+ .min_reset_ms = 120,
+ .typ_reset_ms = 200,
+ .max_reset_ms = 280,
+};
+
+static const struct of_device_id tps380x_reset_dt_ids[] = {
+ { .compatible = "ti,tps3801", .data = &tps3801_reset_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tps380x_reset_dt_ids);
+
+static struct platform_driver tps380x_reset_driver = {
+ .probe = tps380x_reset_probe,
+ .driver = {
+ .name = "tps380x-reset",
+ .of_match_table = tps380x_reset_dt_ids,
+ },
+};
+module_platform_driver(tps380x_reset_driver);
+
+MODULE_AUTHOR("Marco Felsch <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("TI TPS380x Supply Voltage Supervisor and Reset Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
index 5b4404b8be4c..d1213c33da20 100644
--- a/drivers/rpmsg/mtk_rpmsg.c
+++ b/drivers/rpmsg/mtk_rpmsg.c
@@ -234,7 +234,9 @@ static void mtk_register_device_work_function(struct work_struct *register_work)
if (info->registered)
continue;
+ mutex_unlock(&subdev->channels_lock);
ret = mtk_rpmsg_register_device(subdev, &info->info);
+ mutex_lock(&subdev->channels_lock);
if (ret) {
dev_err(&pdev->dev, "Can't create rpmsg_device\n");
continue;
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 07586514991f..115c0a1eddb1 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -98,8 +98,6 @@ struct glink_core_rx_intent {
struct qcom_glink {
struct device *dev;
- const char *name;
-
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
@@ -1546,7 +1544,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
cancel_work_sync(&channel->intent_work);
if (channel->rpdev) {
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
@@ -1674,7 +1672,7 @@ static ssize_t rpmsg_name_show(struct device *dev,
if (ret < 0)
name = dev->of_node->name;
- return snprintf(buf, RPMSG_NAME_SIZE, "%s\n", name);
+ return sysfs_emit(buf, "%s\n", name);
}
static DEVICE_ATTR_RO(rpmsg_name);
@@ -1755,10 +1753,6 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
if (ret)
dev_err(dev, "failed to add groups\n");
- ret = of_property_read_string(dev->of_node, "label", &glink->name);
- if (ret < 0)
- glink->name = dev->of_node->name;
-
glink->mbox_client.dev = dev;
glink->mbox_client.knows_txdone = true;
glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
diff --git a/drivers/rpmsg/qcom_glink_ssr.c b/drivers/rpmsg/qcom_glink_ssr.c
index dea929c6045d..776d64446879 100644
--- a/drivers/rpmsg/qcom_glink_ssr.c
+++ b/drivers/rpmsg/qcom_glink_ssr.c
@@ -39,7 +39,7 @@ struct cleanup_done_msg {
__le32 seq_num;
};
-/**
+/*
* G-Link SSR protocol commands
*/
#define GLINK_SSR_DO_CLEANUP 0
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 1957b27c4cf3..1044cf03c542 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -729,11 +729,11 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
}
/**
- * qcom_smd_send - write data to smd channel
+ * __qcom_smd_send - write data to smd channel
* @channel: channel handle
* @data: buffer of data to write
* @len: number of bytes to write
- * @wait: flag to indicate if write has ca wait
+ * @wait: flag to indicate if write can wait
*
* This is a blocking write of len bytes into the channel's tx ring buffer and
* signal the remote end. It will sleep until there is enough space available
@@ -1089,7 +1089,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
/* Assign public information to the rpmsg_device */
rpdev = &qsdev->rpdev;
- strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
+ strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
rpdev->src = RPMSG_ADDR_ANY;
rpdev->dst = RPMSG_ADDR_ANY;
@@ -1323,7 +1323,7 @@ static void qcom_channel_state_worker(struct work_struct *work)
spin_unlock_irqrestore(&edge->channels_lock, flags);
- strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
rpmsg_unregister_device(&edge->dev, &chinfo);
@@ -1383,6 +1383,7 @@ static int qcom_smd_parse_edge(struct device *dev,
}
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
if (IS_ERR(edge->ipc_regmap)) {
ret = PTR_ERR(edge->ipc_regmap);
goto put_node;
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index b6183d4f62a2..4f2189111494 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -120,8 +120,11 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
struct rpmsg_device *rpdev = eptdev->rpdev;
struct device *dev = &eptdev->dev;
- if (eptdev->ept)
+ mutex_lock(&eptdev->ept_lock);
+ if (eptdev->ept) {
+ mutex_unlock(&eptdev->ept_lock);
return -EBUSY;
+ }
get_device(dev);
@@ -137,11 +140,13 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
if (!ept) {
dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
put_device(dev);
+ mutex_unlock(&eptdev->ept_lock);
return -EINVAL;
}
eptdev->ept = ept;
filp->private_data = eptdev;
+ mutex_unlock(&eptdev->ept_lock);
return 0;
}
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 290c1f02da10..d6dde00efdae 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -604,7 +604,7 @@ int rpmsg_register_device_override(struct rpmsg_device *rpdev,
int ret;
if (driver_override)
- strcpy(rpdev->id.name, driver_override);
+ strscpy_pad(rpdev->id.name, driver_override, RPMSG_NAME_SIZE);
dev_set_name(dev, "%s.%s.%d.%d", dev_name(dev->parent),
rpdev->id.name, rpdev->src, rpdev->dst);
@@ -618,6 +618,7 @@ int rpmsg_register_device_override(struct rpmsg_device *rpdev,
strlen(driver_override));
if (ret) {
dev_err(dev, "device_set_override failed: %d\n", ret);
+ put_device(dev);
return ret;
}
}
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index a22cd4abe7d1..39b646d0d40d 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -41,8 +41,8 @@ struct rpmsg_device_ops {
rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo);
- int (*announce_create)(struct rpmsg_device *ept);
- int (*announce_destroy)(struct rpmsg_device *ept);
+ int (*announce_create)(struct rpmsg_device *rpdev);
+ int (*announce_destroy)(struct rpmsg_device *rpdev);
};
/**
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a00f901b5c1d..b8de25118ad0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -383,6 +383,16 @@ config RTC_DRV_MAX77686
This driver can also be built as a module. If so, the module
will be called rtc-max77686.
+config RTC_DRV_NCT3018Y
+ tristate "Nuvoton NCT3018Y"
+ depends on OF
+ help
+ If you say yes here you get support for the Nuvoton NCT3018Y I2C RTC
+ chip.
+
+ This driver can also be built as a module, if so, the module will be
+ called "rtc-nct3018y".
+
config RTC_DRV_RK808
tristate "Rockchip RK805/RK808/RK809/RK817/RK818 RTC"
depends on MFD_RK808
@@ -1478,16 +1488,6 @@ config RTC_DRV_SUNPLUS
This driver can also be built as a module. If so, the module
will be called rtc-sunplus.
-config RTC_DRV_VR41XX
- tristate "NEC VR41XX"
- depends on CPU_VR41XX || COMPILE_TEST
- help
- If you say Y here you will get access to the real time clock
- built into your NEC VR41XX CPU.
-
- To compile this driver as a module, choose M here: the
- module will be called rtc-vr41xx.
-
config RTC_DRV_PL030
tristate "ARM AMBA PL030 RTC"
depends on ARM_AMBA
@@ -1929,6 +1929,17 @@ config RTC_DRV_ASPEED
This driver can also be built as a module, if so, the module
will be called "rtc-aspeed".
+config RTC_DRV_TI_K3
+ tristate "TI K3 RTC"
+ depends on ARCH_K3 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ If you say yes here you get support for the Texas Instruments's
+ Real Time Clock for K3 architecture.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-ti-k3".
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
@@ -1973,4 +1984,14 @@ config RTC_DRV_MSC313
This driver can also be built as a module, if so, the module
will be called "rtc-msc313".
+config RTC_DRV_POLARFIRE_SOC
+ tristate "Microchip PolarFire SoC built-in RTC"
+ depends on SOC_MICROCHIP_POLARFIRE
+ help
+ If you say yes here you will get support for the
+ built-in RTC on Polarfire SoC.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-mpfs".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index fb04467b652d..aab22bc63432 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -112,6 +112,7 @@ obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MXC_V2) += rtc-mxc_v2.o
obj-$(CONFIG_RTC_DRV_GAMECUBE) += rtc-gamecube.o
+obj-$(CONFIG_RTC_DRV_NCT3018Y) += rtc-nct3018y.o
obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
@@ -130,6 +131,7 @@ obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o
obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
+obj-$(CONFIG_RTC_DRV_POLARFIRE_SOC) += rtc-mpfs.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o
@@ -172,11 +174,11 @@ obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o
obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_TI_K3) += rtc-ti-k3.o
obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
-obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
obj-$(CONFIG_RTC_DRV_WILCO_EC) += rtc-wilco-ec.o
obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 3c8eec2218df..e48223c00c67 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -36,7 +36,7 @@ static void rtc_device_release(struct device *dev)
cancel_work_sync(&rtc->irqwork);
- ida_simple_remove(&rtc_ida, rtc->id);
+ ida_free(&rtc_ida, rtc->id);
mutex_destroy(&rtc->ops_lock);
kfree(rtc);
}
@@ -262,7 +262,7 @@ static int rtc_device_get_id(struct device *dev)
}
if (id < 0)
- id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&rtc_ida, GFP_KERNEL);
return id;
}
@@ -368,7 +368,7 @@ struct rtc_device *devm_rtc_allocate_device(struct device *dev)
rtc = rtc_allocate_device();
if (!rtc) {
- ida_simple_remove(&rtc_ida, id);
+ ida_free(&rtc_ida, id);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 69325aeede1a..4aad9bb99868 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -96,7 +96,7 @@ static int clear_uie(struct rtc_device *rtc)
}
if (rtc->uie_task_active) {
spin_unlock_irq(&rtc->irq_lock);
- flush_scheduled_work();
+ flush_work(&rtc->uie_task);
spin_lock_irq(&rtc->irq_lock);
}
rtc->uie_irq_active = 0;
@@ -566,9 +566,3 @@ void __init rtc_dev_init(void)
if (err < 0)
pr_err("failed to allocate char dev region\n");
}
-
-void __exit rtc_dev_exit(void)
-{
- if (rtc_devt)
- unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
-}
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index 6e3e320dc727..f2b0971d2c65 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -817,8 +817,7 @@ static const struct regmap_config abb5zes3_rtc_regmap_config = {
.val_bits = 8,
};
-static int abb5zes3_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int abb5zes3_probe(struct i2c_client *client)
{
struct abb5zes3_rtc_data *data = NULL;
struct device *dev = &client->dev;
@@ -945,7 +944,7 @@ static struct i2c_driver abb5zes3_driver = {
.pm = &abb5zes3_rtc_pm_ops,
.of_match_table = of_match_ptr(abb5zes3_dt_match),
},
- .probe = abb5zes3_probe,
+ .probe_new = abb5zes3_probe,
.id_table = abb5zes3_id,
};
module_i2c_driver(abb5zes3_driver);
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index e188ab517f1e..2f8deb8c4cd3 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -495,8 +495,7 @@ static void abeoz9_hwmon_register(struct device *dev,
#endif
-static int abeoz9_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int abeoz9_probe(struct i2c_client *client)
{
struct abeoz9_rtc_data *data = NULL;
struct device *dev = &client->dev;
@@ -580,7 +579,7 @@ static struct i2c_driver abeoz9_driver = {
.name = "rtc-ab-eoz9",
.of_match_table = of_match_ptr(abeoz9_dt_match),
},
- .probe = abeoz9_probe,
+ .probe_new = abeoz9_probe,
.id_table = abeoz9_id,
};
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 2235c968842d..e0bbb11d912e 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -249,8 +249,7 @@ static void bq32k_sysfs_unregister(struct device *dev)
device_remove_file(dev, &dev_attr_trickle_charge_bypass);
}
-static int bq32k_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int bq32k_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct rtc_device *rtc;
@@ -322,7 +321,7 @@ static struct i2c_driver bq32k_driver = {
.name = "bq32k",
.of_match_table = of_match_ptr(bq32k_of_match),
},
- .probe = bq32k_probe,
+ .probe_new = bq32k_probe,
.remove = bq32k_remove,
.id_table = bq32k_id,
};
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7c006c2b125f..bdb1df843c78 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1260,9 +1260,6 @@ static void use_acpi_alarm_quirks(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- return;
-
if (!is_hpet_enabled())
return;
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index 0abf98983e13..4b10a1b8f370 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -2,7 +2,6 @@
#ifdef CONFIG_RTC_INTF_DEV
extern void __init rtc_dev_init(void);
-extern void __exit rtc_dev_exit(void);
extern void rtc_dev_prepare(struct rtc_device *rtc);
#else
@@ -11,10 +10,6 @@ static inline void rtc_dev_init(void)
{
}
-static inline void rtc_dev_exit(void)
-{
-}
-
static inline void rtc_dev_prepare(struct rtc_device *rtc)
{
}
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 70626793ca69..887f5193e253 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -375,10 +375,8 @@ static int cros_ec_rtc_remove(struct platform_device *pdev)
ret = blocking_notifier_chain_unregister(
&cros_ec_rtc->cros_ec->event_notifier,
&cros_ec_rtc->notifier);
- if (ret) {
+ if (ret)
dev_err(dev, "failed to unregister notifier\n");
- return ret;
- }
return 0;
}
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 8db5a631bca8..b19de5100b1a 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -467,8 +467,7 @@ static const struct watchdog_ops ds1374_wdt_ops = {
*
*****************************************************************************
*/
-static int ds1374_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds1374_probe(struct i2c_client *client)
{
struct ds1374 *ds1374;
int ret;
@@ -575,7 +574,7 @@ static struct i2c_driver ds1374_driver = {
.of_match_table = of_match_ptr(ds1374_of_match),
.pm = &ds1374_pm,
},
- .probe = ds1374_probe,
+ .probe_new = ds1374_probe,
.remove = ds1374_remove,
.id_table = ds1374_id,
};
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 4cd8efbef6cf..a3bb2cd9c881 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -106,8 +106,7 @@ static const struct rtc_class_ops ds1672_rtc_ops = {
.set_time = ds1672_set_time,
};
-static int ds1672_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds1672_probe(struct i2c_client *client)
{
int err = 0;
struct rtc_device *rtc;
@@ -150,7 +149,7 @@ static struct i2c_driver ds1672_driver = {
.name = "rtc-ds1672",
.of_match_table = of_match_ptr(ds1672_of_match),
},
- .probe = &ds1672_probe,
+ .probe_new = ds1672_probe,
.id_table = ds1672_id,
};
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 168bc27f1f5a..dd31a60c1fc6 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -566,8 +566,7 @@ static const struct dev_pm_ops ds3232_pm_ops = {
#if IS_ENABLED(CONFIG_I2C)
-static int ds3232_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds3232_i2c_probe(struct i2c_client *client)
{
struct regmap *regmap;
static const struct regmap_config config = {
@@ -604,7 +603,7 @@ static struct i2c_driver ds3232_driver = {
.of_match_table = of_match_ptr(ds3232_of_match),
.pm = &ds3232_pm_ops,
},
- .probe = ds3232_i2c_probe,
+ .probe_new = ds3232_i2c_probe,
.id_table = ds3232_id,
};
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index 9f176bce48ba..53f9f9391a5f 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -111,8 +111,7 @@ static const struct rtc_class_ops em3027_rtc_ops = {
.set_time = em3027_set_time,
};
-static int em3027_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int em3027_probe(struct i2c_client *client)
{
struct rtc_device *rtc;
@@ -148,7 +147,7 @@ static struct i2c_driver em3027_driver = {
.name = "rtc-em3027",
.of_match_table = of_match_ptr(em3027_of_match),
},
- .probe = &em3027_probe,
+ .probe_new = em3027_probe,
.id_table = em3027_id,
};
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 677ec2da13d8..f59bb81f23c0 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -340,8 +340,7 @@ static const struct rtc_class_ops fm3130_rtc_ops = {
static struct i2c_driver fm3130_driver;
-static int fm3130_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int fm3130_probe(struct i2c_client *client)
{
struct fm3130 *fm3130;
int err = -ENODEV;
@@ -518,7 +517,7 @@ static struct i2c_driver fm3130_driver = {
.driver = {
.name = "rtc-fm3130",
},
- .probe = fm3130_probe,
+ .probe_new = fm3130_probe,
.id_table = fm3130_id,
};
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 90e602e99d03..cc710d682121 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -495,8 +495,7 @@ static int hym8563_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(hym8563_pm_ops, hym8563_suspend, hym8563_resume);
-static int hym8563_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int hym8563_probe(struct i2c_client *client)
{
struct hym8563 *hym8563;
int ret;
@@ -572,7 +571,7 @@ static struct i2c_driver hym8563_driver = {
.pm = &hym8563_pm_ops,
.of_match_table = hym8563_dt_idtable,
},
- .probe = hym8563_probe,
+ .probe_new = hym8563_probe,
.id_table = hym8563_id,
};
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 961bd5d1d109..79461ded1a48 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -232,8 +232,7 @@ static const struct rtc_class_ops isl12022_rtc_ops = {
.set_time = isl12022_rtc_set_time,
};
-static int isl12022_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int isl12022_probe(struct i2c_client *client)
{
struct isl12022 *isl12022;
@@ -275,7 +274,7 @@ static struct i2c_driver isl12022_driver = {
.of_match_table = of_match_ptr(isl12022_dt_match),
#endif
},
- .probe = isl12022_probe,
+ .probe_new = isl12022_probe,
.id_table = isl12022_id,
};
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 182dfa605515..f448a525333e 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -880,10 +880,14 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (rc)
return rc;
- if (client->irq > 0)
+ if (client->irq > 0) {
rc = isl1208_setup_irq(client, client->irq);
- if (rc)
- return rc;
+ if (rc)
+ return rc;
+
+ } else {
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, isl1208->rtc->features);
+ }
if (evdet_irq > 0 && evdet_irq != client->irq)
rc = isl1208_setup_irq(client, evdet_irq);
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 4beadfa41644..0a33851cc51f 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -197,8 +197,7 @@ static const struct rtc_class_ops max6900_rtc_ops = {
.set_time = max6900_rtc_set_time,
};
-static int
-max6900_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int max6900_probe(struct i2c_client *client)
{
struct rtc_device *rtc;
@@ -225,7 +224,7 @@ static struct i2c_driver max6900_driver = {
.driver = {
.name = "rtc-max6900",
},
- .probe = max6900_probe,
+ .probe_new = max6900_probe,
.id_table = max6900_id,
};
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 522449b25921..f1c09f1db044 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -21,13 +21,13 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
unsigned long flags;
unsigned char seconds;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 100; i++) {
spin_lock_irqsave(&rtc_lock, flags);
/*
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
- * every msec for completion.
+ * every 100 usec for completion.
*
* Store the second value before checking UIP so a long lasting
* NMI which happens to hit after the UIP check cannot make
@@ -37,7 +37,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
- mdelay(1);
+ udelay(100);
continue;
}
@@ -56,7 +56,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
*/
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
- mdelay(1);
+ udelay(100);
continue;
}
diff --git a/drivers/rtc/rtc-mpfs.c b/drivers/rtc/rtc-mpfs.c
new file mode 100644
index 000000000000..f14d1925e0c9
--- /dev/null
+++ b/drivers/rtc/rtc-mpfs.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip MPFS RTC driver
+ *
+ * Copyright (c) 2021-2022 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ * & Conor Dooley <conor.dooley@microchip.com>
+ */
+#include "linux/bits.h"
+#include "linux/iopoll.h"
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+
+#define CONTROL_REG 0x00
+#define MODE_REG 0x04
+#define PRESCALER_REG 0x08
+#define ALARM_LOWER_REG 0x0c
+#define ALARM_UPPER_REG 0x10
+#define COMPARE_LOWER_REG 0x14
+#define COMPARE_UPPER_REG 0x18
+#define DATETIME_LOWER_REG 0x20
+#define DATETIME_UPPER_REG 0x24
+
+#define CONTROL_RUNNING_BIT BIT(0)
+#define CONTROL_START_BIT BIT(0)
+#define CONTROL_STOP_BIT BIT(1)
+#define CONTROL_ALARM_ON_BIT BIT(2)
+#define CONTROL_ALARM_OFF_BIT BIT(3)
+#define CONTROL_RESET_BIT BIT(4)
+#define CONTROL_UPLOAD_BIT BIT(5)
+#define CONTROL_DOWNLOAD_BIT BIT(6)
+#define CONTROL_MATCH_BIT BIT(7)
+#define CONTROL_WAKEUP_CLR_BIT BIT(8)
+#define CONTROL_WAKEUP_SET_BIT BIT(9)
+#define CONTROL_UPDATED_BIT BIT(10)
+
+#define MODE_CLOCK_CALENDAR BIT(0)
+#define MODE_WAKE_EN BIT(1)
+#define MODE_WAKE_RESET BIT(2)
+#define MODE_WAKE_CONTINUE BIT(3)
+
+#define MAX_PRESCALER_COUNT GENMASK(25, 0)
+#define DATETIME_UPPER_MASK GENMASK(29, 0)
+#define ALARM_UPPER_MASK GENMASK(10, 0)
+
+#define UPLOAD_TIMEOUT_US 50
+
+struct mpfs_rtc_dev {
+ struct rtc_device *rtc;
+ void __iomem *base;
+};
+
+static void mpfs_rtc_start(struct mpfs_rtc_dev *rtcdev)
+{
+ u32 ctrl;
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_START_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+}
+
+static void mpfs_rtc_clear_irq(struct mpfs_rtc_dev *rtcdev)
+{
+ u32 val = readl(rtcdev->base + CONTROL_REG);
+
+ val &= ~(CONTROL_ALARM_ON_BIT | CONTROL_STOP_BIT);
+ val |= CONTROL_ALARM_OFF_BIT;
+ writel(val, rtcdev->base + CONTROL_REG);
+ /*
+ * Ensure that the posted write to the CONTROL_REG register completed before
+ * returning from this function. Not doing this may result in the interrupt
+ * only being cleared some time after this function returns.
+ */
+ (void)readl(rtcdev->base + CONTROL_REG);
+}
+
+static int mpfs_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u64 time;
+
+ time = readl(rtcdev->base + DATETIME_LOWER_REG);
+ time |= ((u64)readl(rtcdev->base + DATETIME_UPPER_REG) & DATETIME_UPPER_MASK) << 32;
+ rtc_time64_to_tm(time, tm);
+
+ return 0;
+}
+
+static int mpfs_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 ctrl, prog;
+ u64 time;
+ int ret;
+
+ time = rtc_tm_to_time64(tm);
+
+ writel((u32)time, rtcdev->base + DATETIME_LOWER_REG);
+ writel((u32)(time >> 32) & DATETIME_UPPER_MASK, rtcdev->base + DATETIME_UPPER_REG);
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_UPLOAD_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ ret = read_poll_timeout(readl, prog, prog & CONTROL_UPLOAD_BIT, 0, UPLOAD_TIMEOUT_US,
+ false, rtcdev->base + CONTROL_REG);
+ if (ret) {
+ dev_err(dev, "timed out uploading time to rtc");
+ return ret;
+ }
+ mpfs_rtc_start(rtcdev);
+
+ return 0;
+}
+
+static int mpfs_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 mode = readl(rtcdev->base + MODE_REG);
+ u64 time;
+
+ alrm->enabled = mode & MODE_WAKE_EN;
+
+ time = (u64)readl(rtcdev->base + ALARM_LOWER_REG) << 32;
+ time |= (readl(rtcdev->base + ALARM_UPPER_REG) & ALARM_UPPER_MASK);
+ rtc_time64_to_tm(time, &alrm->time);
+
+ return 0;
+}
+
+static int mpfs_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 mode, ctrl;
+ u64 time;
+
+ /* Disable the alarm before updating */
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl |= CONTROL_ALARM_OFF_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ time = rtc_tm_to_time64(&alrm->time);
+
+ writel((u32)time, rtcdev->base + ALARM_LOWER_REG);
+ writel((u32)(time >> 32) & ALARM_UPPER_MASK, rtcdev->base + ALARM_UPPER_REG);
+
+ /* Bypass compare register in alarm mode */
+ writel(GENMASK(31, 0), rtcdev->base + COMPARE_LOWER_REG);
+ writel(GENMASK(29, 0), rtcdev->base + COMPARE_UPPER_REG);
+
+ /* Configure the RTC to enable the alarm. */
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ mode = readl(rtcdev->base + MODE_REG);
+ if (alrm->enabled) {
+ mode = MODE_WAKE_EN | MODE_WAKE_CONTINUE;
+ /* Enable the alarm */
+ ctrl &= ~CONTROL_ALARM_OFF_BIT;
+ ctrl |= CONTROL_ALARM_ON_BIT;
+ }
+ ctrl &= ~CONTROL_STOP_BIT;
+ ctrl |= CONTROL_START_BIT;
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+ writel(mode, rtcdev->base + MODE_REG);
+
+ return 0;
+}
+
+static int mpfs_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct mpfs_rtc_dev *rtcdev = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ ctrl = readl(rtcdev->base + CONTROL_REG);
+ ctrl &= ~(CONTROL_ALARM_ON_BIT | CONTROL_ALARM_OFF_BIT | CONTROL_STOP_BIT);
+
+ if (enabled)
+ ctrl |= CONTROL_ALARM_ON_BIT;
+ else
+ ctrl |= CONTROL_ALARM_OFF_BIT;
+
+ writel(ctrl, rtcdev->base + CONTROL_REG);
+
+ return 0;
+}
+
+static inline struct clk *mpfs_rtc_init_clk(struct device *dev)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, "rtc");
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk);
+ return clk;
+}
+
+static irqreturn_t mpfs_rtc_wakeup_irq_handler(int irq, void *dev)
+{
+ struct mpfs_rtc_dev *rtcdev = dev;
+
+ mpfs_rtc_clear_irq(rtcdev);
+
+ rtc_update_irq(rtcdev->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops mpfs_rtc_ops = {
+ .read_time = mpfs_rtc_readtime,
+ .set_time = mpfs_rtc_settime,
+ .read_alarm = mpfs_rtc_readalarm,
+ .set_alarm = mpfs_rtc_setalarm,
+ .alarm_irq_enable = mpfs_rtc_alarm_irq_enable,
+};
+
+static int mpfs_rtc_probe(struct platform_device *pdev)
+{
+ struct mpfs_rtc_dev *rtcdev;
+ struct clk *clk;
+ u32 prescaler;
+ int wakeup_irq, ret;
+
+ rtcdev = devm_kzalloc(&pdev->dev, sizeof(struct mpfs_rtc_dev), GFP_KERNEL);
+ if (!rtcdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rtcdev);
+
+ rtcdev->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtcdev->rtc))
+ return PTR_ERR(rtcdev->rtc);
+
+ rtcdev->rtc->ops = &mpfs_rtc_ops;
+
+ /* range is capped by alarm max, lower reg is 31:0 & upper is 10:0 */
+ rtcdev->rtc->range_max = GENMASK_ULL(42, 0);
+
+ clk = mpfs_rtc_init_clk(&pdev->dev);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rtcdev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtcdev->base)) {
+ dev_dbg(&pdev->dev, "invalid ioremap resources\n");
+ return PTR_ERR(rtcdev->base);
+ }
+
+ wakeup_irq = platform_get_irq(pdev, 0);
+ if (wakeup_irq <= 0) {
+ dev_dbg(&pdev->dev, "could not get wakeup irq\n");
+ return wakeup_irq;
+ }
+ ret = devm_request_irq(&pdev->dev, wakeup_irq, mpfs_rtc_wakeup_irq_handler, 0,
+ dev_name(&pdev->dev), rtcdev);
+ if (ret) {
+ dev_dbg(&pdev->dev, "could not request wakeup irq\n");
+ return ret;
+ }
+
+ /* prescaler hardware adds 1 to reg value */
+ prescaler = clk_get_rate(devm_clk_get(&pdev->dev, "rtcref")) - 1;
+
+ if (prescaler > MAX_PRESCALER_COUNT) {
+ dev_dbg(&pdev->dev, "invalid prescaler %d\n", prescaler);
+ return -EINVAL;
+ }
+
+ writel(prescaler, rtcdev->base + PRESCALER_REG);
+ dev_info(&pdev->dev, "prescaler set to: 0x%X \r\n", prescaler);
+
+ device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_wake_irq(&pdev->dev, wakeup_irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to enable irq wake\n");
+
+ return devm_rtc_register_device(rtcdev->rtc);
+}
+
+static int mpfs_rtc_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id mpfs_rtc_of_match[] = {
+ { .compatible = "microchip,mpfs-rtc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mpfs_rtc_of_match);
+
+static struct platform_driver mpfs_rtc_driver = {
+ .probe = mpfs_rtc_probe,
+ .remove = mpfs_rtc_remove,
+ .driver = {
+ .name = "mpfs_rtc",
+ .of_match_table = mpfs_rtc_of_match,
+ },
+};
+
+module_platform_driver(mpfs_rtc_driver);
+
+MODULE_DESCRIPTION("Real time clock for Microchip Polarfire SoC");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
new file mode 100644
index 000000000000..d43acd3920ed
--- /dev/null
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -0,0 +1,553 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Nuvoton Technology Corporation
+
+#include <linux/bcd.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+#define NCT3018Y_REG_SC 0x00 /* seconds */
+#define NCT3018Y_REG_SCA 0x01 /* alarm */
+#define NCT3018Y_REG_MN 0x02
+#define NCT3018Y_REG_MNA 0x03 /* alarm */
+#define NCT3018Y_REG_HR 0x04
+#define NCT3018Y_REG_HRA 0x05 /* alarm */
+#define NCT3018Y_REG_DW 0x06
+#define NCT3018Y_REG_DM 0x07
+#define NCT3018Y_REG_MO 0x08
+#define NCT3018Y_REG_YR 0x09
+#define NCT3018Y_REG_CTRL 0x0A /* timer control */
+#define NCT3018Y_REG_ST 0x0B /* status */
+#define NCT3018Y_REG_CLKO 0x0C /* clock out */
+
+#define NCT3018Y_BIT_AF BIT(7)
+#define NCT3018Y_BIT_ST BIT(7)
+#define NCT3018Y_BIT_DM BIT(6)
+#define NCT3018Y_BIT_HF BIT(5)
+#define NCT3018Y_BIT_DSM BIT(4)
+#define NCT3018Y_BIT_AIE BIT(3)
+#define NCT3018Y_BIT_OFIE BIT(2)
+#define NCT3018Y_BIT_CIE BIT(1)
+#define NCT3018Y_BIT_TWO BIT(0)
+
+#define NCT3018Y_REG_BAT_MASK 0x07
+#define NCT3018Y_REG_CLKO_F_MASK 0x03 /* frequenc mask */
+#define NCT3018Y_REG_CLKO_CKE 0x80 /* clock out enabled */
+
+struct nct3018y {
+ struct rtc_device *rtc;
+ struct i2c_client *client;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
+};
+
+static int nct3018y_set_alarm_mode(struct i2c_client *client, bool on)
+{
+ int err, flags;
+
+ dev_dbg(&client->dev, "%s:on:%d\n", __func__, on);
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0) {
+ dev_dbg(&client->dev,
+ "Failed to read NCT3018Y_REG_CTRL\n");
+ return flags;
+ }
+
+ if (on)
+ flags |= NCT3018Y_BIT_AIE;
+ else
+ flags &= ~NCT3018Y_BIT_AIE;
+
+ flags |= NCT3018Y_BIT_CIE;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
+ return err;
+ }
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (flags < 0) {
+ dev_dbg(&client->dev,
+ "Failed to read NCT3018Y_REG_ST\n");
+ return flags;
+ }
+
+ flags &= ~(NCT3018Y_BIT_AF);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_ST\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *alarm_enable,
+ unsigned char *alarm_flag)
+{
+ int flags;
+
+ if (alarm_enable) {
+ dev_dbg(&client->dev, "%s:NCT3018Y_REG_CTRL\n", __func__);
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0)
+ return flags;
+ *alarm_enable = flags & NCT3018Y_BIT_AIE;
+ }
+
+ if (alarm_flag) {
+ dev_dbg(&client->dev, "%s:NCT3018Y_REG_ST\n", __func__);
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (flags < 0)
+ return flags;
+ *alarm_flag = flags & NCT3018Y_BIT_AF;
+ }
+
+ dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n",
+ __func__, *alarm_enable, *alarm_flag);
+
+ return 0;
+}
+
+static irqreturn_t nct3018y_irq(int irq, void *dev_id)
+{
+ struct nct3018y *nct3018y = i2c_get_clientdata(dev_id);
+ struct i2c_client *client = nct3018y->client;
+ int err;
+ unsigned char alarm_flag;
+ unsigned char alarm_enable;
+
+ dev_dbg(&client->dev, "%s:irq:%d\n", __func__, irq);
+ err = nct3018y_get_alarm_mode(nct3018y->client, &alarm_enable, &alarm_flag);
+ if (err)
+ return IRQ_NONE;
+
+ if (alarm_flag) {
+ dev_dbg(&client->dev, "%s:alarm flag:%x\n",
+ __func__, alarm_flag);
+ rtc_update_irq(nct3018y->rtc, 1, RTC_IRQF | RTC_AF);
+ nct3018y_set_alarm_mode(nct3018y->client, 0);
+ dev_dbg(&client->dev, "%s:IRQ_HANDLED\n", __func__);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/*
+ * In the routines that deal directly with the nct3018y hardware, we use
+ * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
+ */
+static int nct3018y_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[10];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_ST, 1, buf);
+ if (err < 0)
+ return err;
+
+ if (!buf[0]) {
+ dev_dbg(&client->dev, " voltage <=1.7, date/time is not reliable.\n");
+ return -EINVAL;
+ }
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SC, sizeof(buf), buf);
+ if (err < 0)
+ return err;
+
+ tm->tm_sec = bcd2bin(buf[0] & 0x7F);
+ tm->tm_min = bcd2bin(buf[2] & 0x7F);
+ tm->tm_hour = bcd2bin(buf[4] & 0x3F);
+ tm->tm_wday = buf[6] & 0x07;
+ tm->tm_mday = bcd2bin(buf[7] & 0x3F);
+ tm->tm_mon = bcd2bin(buf[8] & 0x1F) - 1;
+ tm->tm_year = bcd2bin(buf[9]) + 100;
+
+ return 0;
+}
+
+static int nct3018y_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[4] = {0};
+ int err;
+
+ buf[0] = bin2bcd(tm->tm_sec);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SC, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SC\n");
+ return err;
+ }
+
+ buf[0] = bin2bcd(tm->tm_min);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MN, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MN\n");
+ return err;
+ }
+
+ buf[0] = bin2bcd(tm->tm_hour);
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HR, buf[0]);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HR\n");
+ return err;
+ }
+
+ buf[0] = tm->tm_wday & 0x07;
+ buf[1] = bin2bcd(tm->tm_mday);
+ buf[2] = bin2bcd(tm->tm_mon + 1);
+ buf[3] = bin2bcd(tm->tm_year - 100);
+ err = i2c_smbus_write_i2c_block_data(client, NCT3018Y_REG_DW,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write for day and mon and year\n");
+ return -EIO;
+ }
+
+ return err;
+}
+
+static int nct3018y_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[5];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, NCT3018Y_REG_SCA,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to read date\n");
+ return -EIO;
+ }
+
+ dev_dbg(&client->dev, "%s: raw data is sec=%02x, min=%02x hr=%02x\n",
+ __func__, buf[0], buf[2], buf[4]);
+
+ tm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
+ tm->time.tm_min = bcd2bin(buf[2] & 0x7F);
+ tm->time.tm_hour = bcd2bin(buf[4] & 0x3F);
+
+ err = nct3018y_get_alarm_mode(client, &tm->enabled, &tm->pending);
+ if (err < 0)
+ return err;
+
+ dev_dbg(&client->dev, "%s:s=%d m=%d, hr=%d, enabled=%d, pending=%d\n",
+ __func__, tm->time.tm_sec, tm->time.tm_min,
+ tm->time.tm_hour, tm->enabled, tm->pending);
+
+ return 0;
+}
+
+static int nct3018y_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int err;
+
+ dev_dbg(dev, "%s, sec=%d, min=%d hour=%d tm->enabled:%d\n",
+ __func__, tm->time.tm_sec, tm->time.tm_min, tm->time.tm_hour,
+ tm->enabled);
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SCA, bin2bcd(tm->time.tm_sec));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_SCA\n");
+ return err;
+ }
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_MNA, bin2bcd(tm->time.tm_min));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_MNA\n");
+ return err;
+ }
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_HRA, bin2bcd(tm->time.tm_hour));
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_HRA\n");
+ return err;
+ }
+
+ return nct3018y_set_alarm_mode(client, tm->enabled);
+}
+
+static int nct3018y_irq_enable(struct device *dev, unsigned int enabled)
+{
+ dev_dbg(dev, "%s: alarm enable=%d\n", __func__, enabled);
+
+ return nct3018y_set_alarm_mode(to_i2c_client(dev), enabled);
+}
+
+static int nct3018y_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int status, flags = 0;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ status = i2c_smbus_read_byte_data(client, NCT3018Y_REG_ST);
+ if (status < 0)
+ return status;
+
+ if (!(status & NCT3018Y_REG_BAT_MASK))
+ flags |= RTC_VL_DATA_INVALID;
+
+ return put_user(flags, (unsigned int __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMMON_CLK
+/*
+ * Handling of the clkout
+ */
+
+#define clkout_hw_to_nct3018y(_hw) container_of(_hw, struct nct3018y, clkout_hw)
+
+static const int clkout_rates[] = {
+ 32768,
+ 1024,
+ 32,
+ 1,
+};
+
+static unsigned long nct3018y_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return 0;
+
+ flags &= NCT3018Y_REG_CLKO_F_MASK;
+ return clkout_rates[flags];
+}
+
+static long nct3018y_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int nct3018y_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int i, flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] == rate) {
+ flags &= ~NCT3018Y_REG_CLKO_F_MASK;
+ flags |= i;
+ return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags);
+ }
+
+ return -EINVAL;
+}
+
+static int nct3018y_clkout_control(struct clk_hw *hw, bool enable)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ if (enable)
+ flags |= NCT3018Y_REG_CLKO_CKE;
+ else
+ flags &= ~NCT3018Y_REG_CLKO_CKE;
+
+ return i2c_smbus_write_byte_data(client, NCT3018Y_REG_CLKO, flags);
+}
+
+static int nct3018y_clkout_prepare(struct clk_hw *hw)
+{
+ return nct3018y_clkout_control(hw, 1);
+}
+
+static void nct3018y_clkout_unprepare(struct clk_hw *hw)
+{
+ nct3018y_clkout_control(hw, 0);
+}
+
+static int nct3018y_clkout_is_prepared(struct clk_hw *hw)
+{
+ struct nct3018y *nct3018y = clkout_hw_to_nct3018y(hw);
+ struct i2c_client *client = nct3018y->client;
+ int flags;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CLKO);
+ if (flags < 0)
+ return flags;
+
+ return flags & NCT3018Y_REG_CLKO_CKE;
+}
+
+static const struct clk_ops nct3018y_clkout_ops = {
+ .prepare = nct3018y_clkout_prepare,
+ .unprepare = nct3018y_clkout_unprepare,
+ .is_prepared = nct3018y_clkout_is_prepared,
+ .recalc_rate = nct3018y_clkout_recalc_rate,
+ .round_rate = nct3018y_clkout_round_rate,
+ .set_rate = nct3018y_clkout_set_rate,
+};
+
+static struct clk *nct3018y_clkout_register_clk(struct nct3018y *nct3018y)
+{
+ struct i2c_client *client = nct3018y->client;
+ struct device_node *node = client->dev.of_node;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ init.name = "nct3018y-clkout";
+ init.ops = &nct3018y_clkout_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ nct3018y->clkout_hw.init = &init;
+
+ /* optional override of the clockname */
+ of_property_read_string(node, "clock-output-names", &init.name);
+
+ /* register the clock */
+ clk = devm_clk_register(&client->dev, &nct3018y->clkout_hw);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return clk;
+}
+#endif
+
+static const struct rtc_class_ops nct3018y_rtc_ops = {
+ .read_time = nct3018y_rtc_read_time,
+ .set_time = nct3018y_rtc_set_time,
+ .read_alarm = nct3018y_rtc_read_alarm,
+ .set_alarm = nct3018y_rtc_set_alarm,
+ .alarm_irq_enable = nct3018y_irq_enable,
+ .ioctl = nct3018y_ioctl,
+};
+
+static int nct3018y_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct nct3018y *nct3018y;
+ int err, flags;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ nct3018y = devm_kzalloc(&client->dev, sizeof(struct nct3018y),
+ GFP_KERNEL);
+ if (!nct3018y)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, nct3018y);
+ nct3018y->client = client;
+ device_set_wakeup_capable(&client->dev, 1);
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0) {
+ dev_dbg(&client->dev, "%s: read error\n", __func__);
+ return flags;
+ } else if (flags & NCT3018Y_BIT_TWO) {
+ dev_dbg(&client->dev, "%s: NCT3018Y_BIT_TWO is set\n", __func__);
+ }
+
+ flags = NCT3018Y_BIT_TWO;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
+ return err;
+ }
+
+ flags = 0;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_ST, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "%s: write error\n", __func__);
+ return err;
+ }
+
+ nct3018y->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(nct3018y->rtc))
+ return PTR_ERR(nct3018y->rtc);
+
+ nct3018y->rtc->ops = &nct3018y_rtc_ops;
+ nct3018y->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ nct3018y->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ if (client->irq > 0) {
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, nct3018y_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ "nct3018y", client);
+ if (err) {
+ dev_dbg(&client->dev, "unable to request IRQ %d\n", client->irq);
+ return err;
+ }
+ } else {
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, nct3018y->rtc->features);
+ clear_bit(RTC_FEATURE_ALARM, nct3018y->rtc->features);
+ }
+
+#ifdef CONFIG_COMMON_CLK
+ /* register clk in common clk framework */
+ nct3018y_clkout_register_clk(nct3018y);
+#endif
+
+ return devm_rtc_register_device(nct3018y->rtc);
+}
+
+static const struct i2c_device_id nct3018y_id[] = {
+ { "nct3018y", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nct3018y_id);
+
+static const struct of_device_id nct3018y_of_match[] = {
+ { .compatible = "nuvoton,nct3018y" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, nct3018y_of_match);
+
+static struct i2c_driver nct3018y_driver = {
+ .driver = {
+ .name = "rtc-nct3018y",
+ .of_match_table = of_match_ptr(nct3018y_of_match),
+ },
+ .probe = nct3018y_probe,
+ .id_table = nct3018y_id,
+};
+
+module_i2c_driver(nct3018y_driver);
+
+MODULE_AUTHOR("Medad CChien <ctcchien@nuvoton.com>");
+MODULE_AUTHOR("Mia Lin <mimi05633@gmail.com>");
+MODULE_DESCRIPTION("Nuvoton NCT3018Y RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index b1b1943de844..6174b3fd4b98 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -390,8 +390,7 @@ static const struct regmap_config regmap_config = {
.max_register = 0x13,
};
-static int pcf8523_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8523_probe(struct i2c_client *client)
{
struct pcf8523 *pcf8523;
struct rtc_device *rtc;
@@ -485,7 +484,7 @@ static struct i2c_driver pcf8523_driver = {
.name = "rtc-pcf8523",
.of_match_table = pcf8523_of_match,
},
- .probe = pcf8523_probe,
+ .probe_new = pcf8523_probe,
.id_table = pcf8523_id,
};
module_i2c_driver(pcf8523_driver);
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index bb3e9ba75f6c..c05b722f0060 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -350,8 +350,7 @@ static const struct pcf85x63_config pcf_85363_config = {
.num_nvram = 2
};
-static int pcf85363_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf85363_probe(struct i2c_client *client)
{
struct pcf85363 *pcf85363;
const struct pcf85x63_config *config = &pcf_85363_config;
@@ -436,7 +435,7 @@ static struct i2c_driver pcf85363_driver = {
.name = "pcf85363",
.of_match_table = of_match_ptr(dev_ids),
},
- .probe = pcf85363_probe,
+ .probe_new = pcf85363_probe,
};
module_i2c_driver(pcf85363_driver);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 9d06813e2e6d..11fa9788558b 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -509,8 +509,7 @@ static const struct rtc_class_ops pcf8563_rtc_ops = {
.alarm_irq_enable = pcf8563_irq_enable,
};
-static int pcf8563_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8563_probe(struct i2c_client *client)
{
struct pcf8563 *pcf8563;
int err;
@@ -606,7 +605,7 @@ static struct i2c_driver pcf8563_driver = {
.name = "rtc-pcf8563",
.of_match_table = of_match_ptr(pcf8563_of_match),
},
- .probe = pcf8563_probe,
+ .probe_new = pcf8563_probe,
.id_table = pcf8563_id,
};
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index c80ca20e5d8d..87074d178274 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -275,8 +275,7 @@ static const struct rtc_class_ops pcf8583_rtc_ops = {
.set_time = pcf8583_rtc_set_time,
};
-static int pcf8583_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8583_probe(struct i2c_client *client)
{
struct pcf8583 *pcf8583;
@@ -307,7 +306,7 @@ static struct i2c_driver pcf8583_driver = {
.driver = {
.name = "pcf8583",
},
- .probe = pcf8583_probe,
+ .probe_new = pcf8583_probe,
.id_table = pcf8583_id,
};
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 8cb84c9595fc..eb483a30bd92 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -784,8 +784,7 @@ static const struct regmap_config config = {
#if IS_ENABLED(CONFIG_I2C)
-static int rv3029_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rv3029_i2c_probe(struct i2c_client *client)
{
struct regmap *regmap;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK |
@@ -819,7 +818,7 @@ static struct i2c_driver rv3029_driver = {
.name = "rv3029",
.of_match_table = of_match_ptr(rv3029_of_match),
},
- .probe = rv3029_i2c_probe,
+ .probe_new = rv3029_i2c_probe,
.id_table = rv3029_id,
};
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index f69e0b1137cd..3527a0521e9b 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -9,6 +9,7 @@
#include <linux/bcd.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/log2.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -33,6 +34,7 @@
#define RV8803_EXT 0x0D
#define RV8803_FLAG 0x0E
#define RV8803_CTRL 0x0F
+#define RV8803_OSC_OFFSET 0x2C
#define RV8803_EXT_WADA BIT(6)
@@ -49,12 +51,15 @@
#define RV8803_CTRL_TIE BIT(4)
#define RV8803_CTRL_UIE BIT(5)
+#define RX8803_CTRL_CSEL GENMASK(7, 6)
+
#define RX8900_BACKUP_CTRL 0x18
#define RX8900_FLAG_SWOFF BIT(2)
#define RX8900_FLAG_VDETOFF BIT(3)
enum rv8803_type {
rv_8803,
+ rx_8803,
rx_8804,
rx_8900
};
@@ -64,6 +69,7 @@ struct rv8803_data {
struct rtc_device *rtc;
struct mutex flags_lock;
u8 ctrl;
+ u8 backup;
enum rv8803_type type;
};
@@ -136,6 +142,44 @@ static int rv8803_write_regs(const struct i2c_client *client,
return ret;
}
+static int rv8803_regs_init(struct rv8803_data *rv8803)
+{
+ int ret;
+
+ ret = rv8803_write_reg(rv8803->client, RV8803_OSC_OFFSET, 0x00);
+ if (ret)
+ return ret;
+
+ ret = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ FIELD_PREP(RX8803_CTRL_CSEL, 1)); /* 2s */
+ if (ret)
+ return ret;
+
+ ret = rv8803_write_regs(rv8803->client, RV8803_ALARM_MIN, 3,
+ (u8[]){ 0, 0, 0 });
+ if (ret)
+ return ret;
+
+ return rv8803_write_reg(rv8803->client, RV8803_RAM, 0x00);
+}
+
+static int rv8803_regs_configure(struct rv8803_data *rv8803);
+
+static int rv8803_regs_reset(struct rv8803_data *rv8803)
+{
+ /*
+ * The RV-8803 resets all registers to POR defaults after voltage-loss,
+ * the Epson RTCs don't, so we manually reset the remainder here.
+ */
+ if (rv8803->type == rx_8803 || rv8803->type == rx_8900) {
+ int ret = rv8803_regs_init(rv8803);
+ if (ret)
+ return ret;
+ }
+
+ return rv8803_regs_configure(rv8803);
+}
+
static irqreturn_t rv8803_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
@@ -269,6 +313,14 @@ static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
return flags;
}
+ if (flags & RV8803_FLAG_V2F) {
+ ret = rv8803_regs_reset(rv8803);
+ if (ret) {
+ mutex_unlock(&rv8803->flags_lock);
+ return ret;
+ }
+ }
+
ret = rv8803_write_reg(rv8803->client, RV8803_FLAG,
flags & ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F));
@@ -498,18 +550,32 @@ static int rx8900_trickle_charger_init(struct rv8803_data *rv8803)
if (err < 0)
return err;
- flags = ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF) & (u8)err;
-
- if (of_property_read_bool(node, "epson,vdet-disable"))
- flags |= RX8900_FLAG_VDETOFF;
-
- if (of_property_read_bool(node, "trickle-diode-disable"))
- flags |= RX8900_FLAG_SWOFF;
+ flags = (u8)err;
+ flags &= ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF);
+ flags |= rv8803->backup;
return i2c_smbus_write_byte_data(rv8803->client, RX8900_BACKUP_CTRL,
flags);
}
+/* configure registers with values different than the Power-On reset defaults */
+static int rv8803_regs_configure(struct rv8803_data *rv8803)
+{
+ int err;
+
+ err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
+ if (err)
+ return err;
+
+ err = rx8900_trickle_charger_init(rv8803);
+ if (err) {
+ dev_err(&rv8803->client->dev, "failed to init charger\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int rv8803_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -576,15 +642,15 @@ static int rv8803_probe(struct i2c_client *client,
if (!client->irq)
clear_bit(RTC_FEATURE_ALARM, rv8803->rtc->features);
- err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
- if (err)
- return err;
+ if (of_property_read_bool(client->dev.of_node, "epson,vdet-disable"))
+ rv8803->backup |= RX8900_FLAG_VDETOFF;
- err = rx8900_trickle_charger_init(rv8803);
- if (err) {
- dev_err(&client->dev, "failed to init charger\n");
+ if (of_property_read_bool(client->dev.of_node, "trickle-diode-disable"))
+ rv8803->backup |= RX8900_FLAG_SWOFF;
+
+ err = rv8803_regs_configure(rv8803);
+ if (err)
return err;
- }
rv8803->rtc->ops = &rv8803_rtc_ops;
rv8803->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
@@ -603,7 +669,7 @@ static int rv8803_probe(struct i2c_client *client,
static const struct i2c_device_id rv8803_id[] = {
{ "rv8803", rv_8803 },
{ "rv8804", rx_8804 },
- { "rx8803", rv_8803 },
+ { "rx8803", rx_8803 },
{ "rx8900", rx_8900 },
{ }
};
@@ -616,7 +682,7 @@ static const __maybe_unused struct of_device_id rv8803_of_match[] = {
},
{
.compatible = "epson,rx8803",
- .data = (void *)rv_8803
+ .data = (void *)rx_8803
},
{
.compatible = "epson,rx8804",
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 758fd6e11a15..cc634558b928 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -419,8 +419,7 @@ static struct regmap_config regmap_i2c_config = {
.read_flag_mask = 0x80,
};
-static int rx6110_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx6110_i2c_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct rx6110_data *rx6110;
@@ -464,7 +463,7 @@ static struct i2c_driver rx6110_i2c_driver = {
.name = RX6110_DRIVER_NAME,
.acpi_match_table = rx6110_i2c_acpi_match,
},
- .probe = rx6110_i2c_probe,
+ .probe_new = rx6110_i2c_probe,
.id_table = rx6110_i2c_id,
};
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index b32117ccd74b..dde86f3e2a4b 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -55,6 +55,8 @@
#define RX8025_BIT_CTRL2_XST BIT(5)
#define RX8025_BIT_CTRL2_VDET BIT(6)
+#define RX8035_BIT_HOUR_1224 BIT(7)
+
/* Clock precision adjustment */
#define RX8025_ADJ_RESOLUTION 3050 /* in ppb */
#define RX8025_ADJ_DATA_MAX 62
@@ -78,6 +80,7 @@ struct rx8025_data {
struct rtc_device *rtc;
enum rx_model model;
u8 ctrl1;
+ int is_24;
};
static s32 rx8025_read_reg(const struct i2c_client *client, u8 number)
@@ -226,7 +229,7 @@ static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
dt->tm_sec = bcd2bin(date[RX8025_REG_SEC] & 0x7f);
dt->tm_min = bcd2bin(date[RX8025_REG_MIN] & 0x7f);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x3f);
else
dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x1f) % 12
@@ -254,7 +257,7 @@ static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
*/
date[RX8025_REG_SEC] = bin2bcd(dt->tm_sec);
date[RX8025_REG_MIN] = bin2bcd(dt->tm_min);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
date[RX8025_REG_HOUR] = bin2bcd(dt->tm_hour);
else
date[RX8025_REG_HOUR] = (dt->tm_hour >= 12 ? 0x20 : 0)
@@ -279,6 +282,7 @@ static int rx8025_init_client(struct i2c_client *client)
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
u8 ctrl[2], ctrl2;
int need_clear = 0;
+ int hour_reg;
int err;
err = rx8025_read_regs(client, RX8025_REG_CTRL1, 2, ctrl);
@@ -303,6 +307,16 @@ static int rx8025_init_client(struct i2c_client *client)
err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2);
}
+
+ if (rx8025->model == model_rx_8035) {
+ /* In RX-8035, 12/24 flag is in the hour register */
+ hour_reg = rx8025_read_reg(client, RX8025_REG_HOUR);
+ if (hour_reg < 0)
+ return hour_reg;
+ rx8025->is_24 = (hour_reg & RX8035_BIT_HOUR_1224);
+ } else {
+ rx8025->is_24 = (ctrl[1] & RX8025_BIT_CTRL1_1224);
+ }
out:
return err;
}
@@ -329,7 +343,7 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
/* Hardware alarms precision is 1 minute! */
t->time.tm_sec = 0;
t->time.tm_min = bcd2bin(ald[0] & 0x7f);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
t->time.tm_hour = bcd2bin(ald[1] & 0x3f);
else
t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
@@ -350,7 +364,7 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
int err;
ald[0] = bin2bcd(t->time.tm_min);
- if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ if (rx8025->is_24)
ald[1] = bin2bcd(t->time.tm_hour);
else
ald[1] = (t->time.tm_hour >= 12 ? 0x20 : 0)
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index aed4898a0ff4..14edb7534c97 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -248,8 +248,7 @@ static const struct rx85x1_config rx8571_config = {
.num_nvram = 2
};
-static int rx8581_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx8581_probe(struct i2c_client *client)
{
struct rx8581 *rx8581;
const struct rx85x1_config *config = &rx8581_config;
@@ -326,7 +325,7 @@ static struct i2c_driver rx8581_driver = {
.name = "rtc-rx8581",
.of_match_table = of_match_ptr(rx8581_of_match),
},
- .probe = rx8581_probe,
+ .probe_new = rx8581_probe,
.id_table = rx8581_id,
};
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 26278c770731..81d97b1d3159 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -420,8 +420,7 @@ static const struct rtc_class_ops s35390a_rtc_ops = {
.ioctl = s35390a_rtc_ioctl,
};
-static int s35390a_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int s35390a_probe(struct i2c_client *client)
{
int err, err_read;
unsigned int i;
@@ -502,7 +501,7 @@ static struct i2c_driver s35390a_driver = {
.name = "rtc-s35390a",
.of_match_table = of_match_ptr(s35390a_of_match),
},
- .probe = s35390a_probe,
+ .probe_new = s35390a_probe,
.id_table = s35390a_id,
};
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index 24e8528e23ec..e2f90d768ca8 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -163,8 +163,7 @@ static const struct regmap_config regmap_config = {
.max_register = 0x11,
};
-static int sd3078_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sd3078_probe(struct i2c_client *client)
{
int ret;
struct sd3078 *sd3078;
@@ -218,7 +217,7 @@ static struct i2c_driver sd3078_driver = {
.name = "sd3078",
.of_match_table = of_match_ptr(rtc_dt_match),
},
- .probe = sd3078_probe,
+ .probe_new = sd3078_probe,
.id_table = sd3078_id,
};
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index d4777b01ab22..736fe535cd45 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -388,7 +388,7 @@ static int spear_rtc_probe(struct platform_device *pdev)
config->rtc->ops = &spear_rtc_ops;
config->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
- config->rtc->range_min = RTC_TIMESTAMP_END_9999;
+ config->rtc->range_max = RTC_TIMESTAMP_END_9999;
status = devm_rtc_register_device(config->rtc);
if (status)
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 57540727ce1c..ed5516089e9a 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -875,6 +875,8 @@ static const struct of_device_id sun6i_rtc_dt_ids[] = {
{ .compatible = "allwinner,sun50i-h6-rtc" },
{ .compatible = "allwinner,sun50i-h616-rtc",
.data = (void *)RTC_LINEAR_DAY },
+ { .compatible = "allwinner,sun50i-r329-rtc",
+ .data = (void *)RTC_LINEAR_DAY },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids);
diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
new file mode 100644
index 000000000000..7a0f181d3fef
--- /dev/null
+++ b/drivers/rtc/rtc-ti-k3.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments K3 RTC driver
+ *
+ * Copyright (C) 2021-2022 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+/* Registers */
+#define REG_K3RTC_S_CNT_LSW 0x08
+#define REG_K3RTC_S_CNT_MSW 0x0c
+#define REG_K3RTC_COMP 0x10
+#define REG_K3RTC_ON_OFF_S_CNT_LSW 0x20
+#define REG_K3RTC_ON_OFF_S_CNT_MSW 0x24
+#define REG_K3RTC_SCRATCH0 0x30
+#define REG_K3RTC_SCRATCH7 0x4c
+#define REG_K3RTC_GENERAL_CTL 0x50
+#define REG_K3RTC_IRQSTATUS_RAW_SYS 0x54
+#define REG_K3RTC_IRQSTATUS_SYS 0x58
+#define REG_K3RTC_IRQENABLE_SET_SYS 0x5c
+#define REG_K3RTC_IRQENABLE_CLR_SYS 0x60
+#define REG_K3RTC_SYNCPEND 0x68
+#define REG_K3RTC_KICK0 0x70
+#define REG_K3RTC_KICK1 0x74
+
+/* Freeze when lsw is read and unfreeze when msw is read */
+#define K3RTC_CNT_FMODE_S_CNT_VALUE (0x2 << 24)
+
+/* Magic values for lock/unlock */
+#define K3RTC_KICK0_UNLOCK_VALUE 0x83e70b13
+#define K3RTC_KICK1_UNLOCK_VALUE 0x95a4f1e0
+
+/* Multiplier for ppb conversions */
+#define K3RTC_PPB_MULT (1000000000LL)
+/* Min and max values supported with 'offset' interface (swapped sign) */
+#define K3RTC_MIN_OFFSET (-277761)
+#define K3RTC_MAX_OFFSET (277778)
+
+/**
+ * struct ti_k3_rtc_soc_data - Private of compatible data for ti-k3-rtc
+ * @unlock_irq_erratum: Has erratum for unlock infinite IRQs (erratum i2327)
+ */
+struct ti_k3_rtc_soc_data {
+ const bool unlock_irq_erratum;
+};
+
+static const struct regmap_config ti_k3_rtc_regmap_config = {
+ .name = "peripheral-registers",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_K3RTC_KICK1,
+};
+
+enum ti_k3_rtc_fields {
+ K3RTC_KICK0,
+ K3RTC_KICK1,
+ K3RTC_S_CNT_LSW,
+ K3RTC_S_CNT_MSW,
+ K3RTC_O32K_OSC_DEP_EN,
+ K3RTC_UNLOCK,
+ K3RTC_CNT_FMODE,
+ K3RTC_PEND,
+ K3RTC_RELOAD_FROM_BBD,
+ K3RTC_COMP,
+
+ K3RTC_ALM_S_CNT_LSW,
+ K3RTC_ALM_S_CNT_MSW,
+ K3RTC_IRQ_STATUS_RAW,
+ K3RTC_IRQ_STATUS,
+ K3RTC_IRQ_ENABLE_SET,
+ K3RTC_IRQ_ENABLE_CLR,
+
+ K3RTC_IRQ_STATUS_ALT,
+ K3RTC_IRQ_ENABLE_CLR_ALT,
+
+ K3_RTC_MAX_FIELDS
+};
+
+static const struct reg_field ti_rtc_reg_fields[] = {
+ [K3RTC_KICK0] = REG_FIELD(REG_K3RTC_KICK0, 0, 31),
+ [K3RTC_KICK1] = REG_FIELD(REG_K3RTC_KICK1, 0, 31),
+ [K3RTC_S_CNT_LSW] = REG_FIELD(REG_K3RTC_S_CNT_LSW, 0, 31),
+ [K3RTC_S_CNT_MSW] = REG_FIELD(REG_K3RTC_S_CNT_MSW, 0, 15),
+ [K3RTC_O32K_OSC_DEP_EN] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 21, 21),
+ [K3RTC_UNLOCK] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 23, 23),
+ [K3RTC_CNT_FMODE] = REG_FIELD(REG_K3RTC_GENERAL_CTL, 24, 25),
+ [K3RTC_PEND] = REG_FIELD(REG_K3RTC_SYNCPEND, 0, 1),
+ [K3RTC_RELOAD_FROM_BBD] = REG_FIELD(REG_K3RTC_SYNCPEND, 31, 31),
+ [K3RTC_COMP] = REG_FIELD(REG_K3RTC_COMP, 0, 31),
+
+ /* We use on to off as alarm trigger */
+ [K3RTC_ALM_S_CNT_LSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_LSW, 0, 31),
+ [K3RTC_ALM_S_CNT_MSW] = REG_FIELD(REG_K3RTC_ON_OFF_S_CNT_MSW, 0, 15),
+ [K3RTC_IRQ_STATUS_RAW] = REG_FIELD(REG_K3RTC_IRQSTATUS_RAW_SYS, 0, 0),
+ [K3RTC_IRQ_STATUS] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 0, 0),
+ [K3RTC_IRQ_ENABLE_SET] = REG_FIELD(REG_K3RTC_IRQENABLE_SET_SYS, 0, 0),
+ [K3RTC_IRQ_ENABLE_CLR] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 0, 0),
+ /* Off to on is alternate */
+ [K3RTC_IRQ_STATUS_ALT] = REG_FIELD(REG_K3RTC_IRQSTATUS_SYS, 1, 1),
+ [K3RTC_IRQ_ENABLE_CLR_ALT] = REG_FIELD(REG_K3RTC_IRQENABLE_CLR_SYS, 1, 1),
+};
+
+/**
+ * struct ti_k3_rtc - Private data for ti-k3-rtc
+ * @irq: IRQ
+ * @sync_timeout_us: data sync timeout period in uSec
+ * @rate_32k: 32k clock rate in Hz
+ * @rtc_dev: rtc device
+ * @regmap: rtc mmio regmap
+ * @r_fields: rtc register fields
+ * @soc: SoC compatible match data
+ */
+struct ti_k3_rtc {
+ unsigned int irq;
+ u32 sync_timeout_us;
+ unsigned long rate_32k;
+ struct rtc_device *rtc_dev;
+ struct regmap *regmap;
+ struct regmap_field *r_fields[K3_RTC_MAX_FIELDS];
+ const struct ti_k3_rtc_soc_data *soc;
+};
+
+static int k3rtc_field_read(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f)
+{
+ int ret;
+ int val;
+
+ ret = regmap_field_read(priv->r_fields[f], &val);
+ /*
+ * We shouldn't be seeing regmap fail on us for mmio reads
+ * This is possible if clock context fails, but that isn't the case for us
+ */
+ if (WARN_ON_ONCE(ret))
+ return ret;
+ return val;
+}
+
+static void k3rtc_field_write(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f, u32 val)
+{
+ regmap_field_write(priv->r_fields[f], val);
+}
+
+/**
+ * k3rtc_fence - Ensure a register sync took place between the two domains
+ * @priv: pointer to priv data
+ *
+ * Return: 0 if the sync took place, else returns -ETIMEDOUT
+ */
+static int k3rtc_fence(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_PEND], ret,
+ !ret, 2, priv->sync_timeout_us);
+
+ return ret;
+}
+
+static inline int k3rtc_check_unlocked(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = k3rtc_field_read(priv, K3RTC_UNLOCK);
+ if (ret < 0)
+ return ret;
+
+ return (ret) ? 0 : 1;
+}
+
+static int k3rtc_unlock_rtc(struct ti_k3_rtc *priv)
+{
+ int ret;
+
+ ret = k3rtc_check_unlocked(priv);
+ if (!ret)
+ return ret;
+
+ k3rtc_field_write(priv, K3RTC_KICK0, K3RTC_KICK0_UNLOCK_VALUE);
+ k3rtc_field_write(priv, K3RTC_KICK1, K3RTC_KICK1_UNLOCK_VALUE);
+
+ /* Skip fence since we are going to check the unlock bit as fence */
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_UNLOCK], ret,
+ !ret, 2, priv->sync_timeout_us);
+
+ return ret;
+}
+
+static int k3rtc_configure(struct device *dev)
+{
+ int ret;
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ /*
+ * HWBUG: The compare state machine is broken if the RTC module
+ * is NOT unlocked in under one second of boot - which is pretty long
+ * time from the perspective of Linux driver (module load, u-boot
+ * shell all can take much longer than this.
+ *
+ * In such occurrence, it is assumed that the RTC module is unusable
+ */
+ if (priv->soc->unlock_irq_erratum) {
+ ret = k3rtc_check_unlocked(priv);
+ /* If there is an error OR if we are locked, return error */
+ if (ret) {
+ dev_err(dev,
+ HW_ERR "Erratum i2327 unlock QUIRK! Cannot operate!!\n");
+ return -EFAULT;
+ }
+ } else {
+ /* May need to explicitly unlock first time */
+ ret = k3rtc_unlock_rtc(priv);
+ if (ret) {
+ dev_err(dev, "Failed to unlock(%d)!\n", ret);
+ return ret;
+ }
+ }
+
+ /* Enable Shadow register sync on 32k clock boundary */
+ k3rtc_field_write(priv, K3RTC_O32K_OSC_DEP_EN, 0x1);
+
+ /*
+ * Wait at least clock sync time before proceeding further programming.
+ * This ensures that the 32k based sync is active.
+ */
+ usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 5);
+
+ /* We need to ensure fence here to make sure sync here */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev,
+ "Failed fence osc_dep enable(%d) - is 32k clk working?!\n", ret);
+ return ret;
+ }
+
+ /*
+ * FMODE setting: Reading lower seconds will freeze value on higher
+ * seconds. This also implies that we must *ALWAYS* read lower seconds
+ * prior to reading higher seconds
+ */
+ k3rtc_field_write(priv, K3RTC_CNT_FMODE, K3RTC_CNT_FMODE_S_CNT_VALUE);
+
+ /* Clear any spurious IRQ sources if any */
+ k3rtc_field_write(priv, K3RTC_IRQ_STATUS_ALT, 0x1);
+ k3rtc_field_write(priv, K3RTC_IRQ_STATUS, 0x1);
+ /* Disable all IRQs */
+ k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR_ALT, 0x1);
+ k3rtc_field_write(priv, K3RTC_IRQ_ENABLE_CLR, 0x1);
+
+ /* And.. Let us Sync the writes in */
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds_lo, seconds_hi;
+
+ seconds_lo = k3rtc_field_read(priv, K3RTC_S_CNT_LSW);
+ seconds_hi = k3rtc_field_read(priv, K3RTC_S_CNT_MSW);
+
+ rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, tm);
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ time64_t seconds;
+
+ seconds = rtc_tm_to_time64(tm);
+
+ /*
+ * Read operation on LSW will freeze the RTC, so to update
+ * the time, we cannot use field operations. Just write since the
+ * reserved bits are ignored.
+ */
+ regmap_write(priv->regmap, REG_K3RTC_S_CNT_LSW, seconds);
+ regmap_write(priv->regmap, REG_K3RTC_S_CNT_MSW, seconds >> 32);
+
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 reg;
+ u32 offset = enabled ? K3RTC_IRQ_ENABLE_SET : K3RTC_IRQ_ENABLE_CLR;
+
+ reg = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET);
+ if ((enabled && reg) || (!enabled && !reg))
+ return 0;
+
+ k3rtc_field_write(priv, offset, 0x1);
+
+ /*
+ * Ensure the write sync is through - NOTE: it should be OK to have
+ * ISR to fire as we are checking sync (which should be done in a 32k
+ * cycle or so).
+ */
+ return k3rtc_fence(priv);
+}
+
+static int ti_k3_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 seconds_lo, seconds_hi;
+
+ seconds_lo = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_LSW);
+ seconds_hi = k3rtc_field_read(priv, K3RTC_ALM_S_CNT_MSW);
+
+ rtc_time64_to_tm((((time64_t)seconds_hi) << 32) | (time64_t)seconds_lo, &alarm->time);
+
+ alarm->enabled = k3rtc_field_read(priv, K3RTC_IRQ_ENABLE_SET);
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ time64_t seconds;
+ int ret;
+
+ seconds = rtc_tm_to_time64(&alarm->time);
+
+ k3rtc_field_write(priv, K3RTC_ALM_S_CNT_LSW, seconds);
+ k3rtc_field_write(priv, K3RTC_ALM_S_CNT_MSW, (seconds >> 32));
+
+ /* Make sure the alarm time is synced in */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence(%d)! Potential config issue?\n", ret);
+ return ret;
+ }
+
+ /* Alarm IRQ enable will do a sync */
+ return ti_k3_rtc_alarm_irq_enable(dev, alarm->enabled);
+}
+
+static int ti_k3_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 ticks_per_hr = priv->rate_32k * 3600;
+ int comp;
+ s64 tmp;
+
+ comp = k3rtc_field_read(priv, K3RTC_COMP);
+
+ /* Convert from RTC calibration register format to ppb format */
+ tmp = comp * (s64)K3RTC_PPB_MULT;
+ if (tmp < 0)
+ tmp -= ticks_per_hr / 2LL;
+ else
+ tmp += ticks_per_hr / 2LL;
+ tmp = div_s64(tmp, ticks_per_hr);
+
+ /* Offset value operates in negative way, so swap sign */
+ *offset = (long)-tmp;
+
+ return 0;
+}
+
+static int ti_k3_rtc_set_offset(struct device *dev, long offset)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 ticks_per_hr = priv->rate_32k * 3600;
+ int comp;
+ s64 tmp;
+
+ /* Make sure offset value is within supported range */
+ if (offset < K3RTC_MIN_OFFSET || offset > K3RTC_MAX_OFFSET)
+ return -ERANGE;
+
+ /* Convert from ppb format to RTC calibration register format */
+ tmp = offset * (s64)ticks_per_hr;
+ if (tmp < 0)
+ tmp -= K3RTC_PPB_MULT / 2LL;
+ else
+ tmp += K3RTC_PPB_MULT / 2LL;
+ tmp = div_s64(tmp, K3RTC_PPB_MULT);
+
+ /* Offset value operates in negative way, so swap sign */
+ comp = (int)-tmp;
+
+ k3rtc_field_write(priv, K3RTC_COMP, comp);
+
+ return k3rtc_fence(priv);
+}
+
+static irqreturn_t ti_k3_rtc_interrupt(s32 irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ u32 reg;
+ int ret;
+
+ /*
+ * IRQ assertion can be very fast, however, the IRQ Status clear
+ * de-assert depends on 32k clock edge in the 32k domain
+ * If we clear the status prior to the first 32k clock edge,
+ * the status bit is cleared, but the IRQ stays re-asserted.
+ *
+ * To prevent this condition, we need to wait for clock sync time.
+ * We can either do that by polling the 32k observability signal for
+ * a toggle OR we could just sleep and let the processor do other
+ * stuff.
+ */
+ usleep_range(priv->sync_timeout_us, priv->sync_timeout_us + 2);
+
+ /* Lets make sure that this is a valid interrupt */
+ reg = k3rtc_field_read(priv, K3RTC_IRQ_STATUS);
+
+ if (!reg) {
+ u32 raw = k3rtc_field_read(priv, K3RTC_IRQ_STATUS_RAW);
+
+ dev_err(dev,
+ HW_ERR
+ "Erratum i2327/IRQ trig: status: 0x%08x / 0x%08x\n", reg, raw);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Write 1 to clear status reg
+ * We cannot use a field operation here due to a potential race between
+ * 32k domain and vbus domain.
+ */
+ regmap_write(priv->regmap, REG_K3RTC_IRQSTATUS_SYS, 0x1);
+
+ /* Sync the write in */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence irq status clr(%d)!\n", ret);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Force the 32k status to be reloaded back in to ensure status is
+ * reflected back correctly.
+ */
+ k3rtc_field_write(priv, K3RTC_RELOAD_FROM_BBD, 0x1);
+
+ /* Ensure the write sync is through */
+ ret = k3rtc_fence(priv);
+ if (ret) {
+ dev_err(dev, "Failed to fence reload from bbd(%d)!\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* Now we ensure that the status bit is cleared */
+ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_IRQ_STATUS],
+ ret, !ret, 2, priv->sync_timeout_us);
+ if (ret) {
+ dev_err(dev, "Time out waiting for status clear\n");
+ return IRQ_NONE;
+ }
+
+ /* Notify RTC core on event */
+ rtc_update_irq(priv->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops ti_k3_rtc_ops = {
+ .read_time = ti_k3_rtc_read_time,
+ .set_time = ti_k3_rtc_set_time,
+ .read_alarm = ti_k3_rtc_read_alarm,
+ .set_alarm = ti_k3_rtc_set_alarm,
+ .read_offset = ti_k3_rtc_read_offset,
+ .set_offset = ti_k3_rtc_set_offset,
+ .alarm_irq_enable = ti_k3_rtc_alarm_irq_enable,
+};
+
+static int ti_k3_rtc_scratch_read(void *priv_data, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data;
+
+ return regmap_bulk_read(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4);
+}
+
+static int ti_k3_rtc_scratch_write(void *priv_data, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ti_k3_rtc *priv = (struct ti_k3_rtc *)priv_data;
+ int ret;
+
+ ret = regmap_bulk_write(priv->regmap, REG_K3RTC_SCRATCH0 + offset, val, bytes / 4);
+ if (ret)
+ return ret;
+
+ return k3rtc_fence(priv);
+}
+
+static struct nvmem_config ti_k3_rtc_nvmem_config = {
+ .name = "ti_k3_rtc_scratch",
+ .word_size = 4,
+ .stride = 4,
+ .size = REG_K3RTC_SCRATCH7 - REG_K3RTC_SCRATCH0 + 4,
+ .reg_read = ti_k3_rtc_scratch_read,
+ .reg_write = ti_k3_rtc_scratch_write,
+};
+
+static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = devm_clk_get(dev, "osc32k");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
+
+ priv->rate_32k = clk_get_rate(clk);
+
+ /* Make sure we are exact 32k clock. Else, try to compensate delay */
+ if (priv->rate_32k != 32768)
+ dev_warn(dev, "Clock rate %ld is not 32768! Could misbehave!\n",
+ priv->rate_32k);
+
+ /*
+ * Sync timeout should be two 32k clk sync cycles = ~61uS. We double
+ * it to comprehend intermediate bus segment and cpu frequency
+ * deltas
+ */
+ priv->sync_timeout_us = (u32)(DIV_ROUND_UP_ULL(1000000, priv->rate_32k) * 4);
+
+ return ret;
+}
+
+static int k3rtc_get_vbusclk(struct device *dev, struct ti_k3_rtc *priv)
+{
+ int ret;
+ struct clk *clk;
+
+ /* Note: VBUS isn't a context clock, it is needed for hardware operation */
+ clk = devm_clk_get(dev, "vbus");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+}
+
+static int ti_k3_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_k3_rtc *priv;
+ void __iomem *rtc_base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct ti_k3_rtc), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rtc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc_base))
+ return PTR_ERR(rtc_base);
+
+ priv->regmap = devm_regmap_init_mmio(dev, rtc_base, &ti_k3_rtc_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ ret = devm_regmap_field_bulk_alloc(dev, priv->regmap, priv->r_fields,
+ ti_rtc_reg_fields, K3_RTC_MAX_FIELDS);
+ if (ret)
+ return ret;
+
+ ret = k3rtc_get_32kclk(dev, priv);
+ if (ret)
+ return ret;
+ ret = k3rtc_get_vbusclk(dev, priv);
+ if (ret)
+ return ret;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ priv->irq = (unsigned int)ret;
+
+ priv->rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(priv->rtc_dev))
+ return PTR_ERR(priv->rtc_dev);
+
+ priv->soc = of_device_get_match_data(dev);
+
+ priv->rtc_dev->ops = &ti_k3_rtc_ops;
+ priv->rtc_dev->range_max = (1ULL << 48) - 1; /* 48Bit seconds */
+ ti_k3_rtc_nvmem_config.priv = priv;
+
+ ret = devm_request_threaded_irq(dev, priv->irq, NULL,
+ ti_k3_rtc_interrupt,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (ret) {
+ dev_err(dev, "Could not request IRQ: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = k3rtc_configure(dev);
+ if (ret)
+ return ret;
+
+ if (device_property_present(dev, "wakeup-source"))
+ device_init_wakeup(dev, true);
+ else
+ device_set_wakeup_capable(dev, true);
+
+ ret = devm_rtc_register_device(priv->rtc_dev);
+ if (ret)
+ return ret;
+
+ return devm_rtc_nvmem_register(priv->rtc_dev, &ti_k3_rtc_nvmem_config);
+}
+
+static const struct ti_k3_rtc_soc_data ti_k3_am62_data = {
+ .unlock_irq_erratum = true,
+};
+
+static const struct of_device_id ti_k3_rtc_of_match_table[] = {
+ {.compatible = "ti,am62-rtc", .data = &ti_k3_am62_data},
+ {}
+};
+MODULE_DEVICE_TABLE(of, ti_k3_rtc_of_match_table);
+
+static int __maybe_unused ti_k3_rtc_suspend(struct device *dev)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(priv->irq);
+ return 0;
+}
+
+static int __maybe_unused ti_k3_rtc_resume(struct device *dev)
+{
+ struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(priv->irq);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ti_k3_rtc_pm_ops, ti_k3_rtc_suspend, ti_k3_rtc_resume);
+
+static struct platform_driver ti_k3_rtc_driver = {
+ .probe = ti_k3_rtc_probe,
+ .driver = {
+ .name = "rtc-ti-k3",
+ .of_match_table = ti_k3_rtc_of_match_table,
+ .pm = &ti_k3_rtc_pm_ops,
+ },
+};
+module_platform_driver(ti_k3_rtc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 RTC driver");
+MODULE_AUTHOR("Nishanth Menon");
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
deleted file mode 100644
index 5a9f9ad86d32..000000000000
--- a/drivers/rtc/rtc-vr41xx.c
+++ /dev/null
@@ -1,363 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for NEC VR4100 series Real Time Clock unit.
- *
- * Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/compat.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/rtc.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/log2.h>
-
-#include <asm/div64.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
-MODULE_LICENSE("GPL v2");
-
-/* RTC 1 registers */
-#define ETIMELREG 0x00
-#define ETIMEMREG 0x02
-#define ETIMEHREG 0x04
-/* RFU */
-#define ECMPLREG 0x08
-#define ECMPMREG 0x0a
-#define ECMPHREG 0x0c
-/* RFU */
-#define RTCL1LREG 0x10
-#define RTCL1HREG 0x12
-#define RTCL1CNTLREG 0x14
-#define RTCL1CNTHREG 0x16
-#define RTCL2LREG 0x18
-#define RTCL2HREG 0x1a
-#define RTCL2CNTLREG 0x1c
-#define RTCL2CNTHREG 0x1e
-
-/* RTC 2 registers */
-#define TCLKLREG 0x00
-#define TCLKHREG 0x02
-#define TCLKCNTLREG 0x04
-#define TCLKCNTHREG 0x06
-/* RFU */
-#define RTCINTREG 0x1e
- #define TCLOCK_INT 0x08
- #define RTCLONG2_INT 0x04
- #define RTCLONG1_INT 0x02
- #define ELAPSEDTIME_INT 0x01
-
-#define RTC_FREQUENCY 32768
-#define MAX_PERIODIC_RATE 6553
-
-static void __iomem *rtc1_base;
-static void __iomem *rtc2_base;
-
-#define rtc1_read(offset) readw(rtc1_base + (offset))
-#define rtc1_write(offset, value) writew((value), rtc1_base + (offset))
-
-#define rtc2_read(offset) readw(rtc2_base + (offset))
-#define rtc2_write(offset, value) writew((value), rtc2_base + (offset))
-
-/* 32-bit compat for ioctls that nobody else uses */
-#define RTC_EPOCH_READ32 _IOR('p', 0x0d, __u32)
-
-static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
-
-static DEFINE_SPINLOCK(rtc_lock);
-static char rtc_name[] = "RTC";
-static unsigned long periodic_count;
-static unsigned int alarm_enabled;
-static int aie_irq;
-static int pie_irq;
-
-static inline time64_t read_elapsed_second(void)
-{
-
- unsigned long first_low, first_mid, first_high;
-
- unsigned long second_low, second_mid, second_high;
-
- do {
- first_low = rtc1_read(ETIMELREG);
- first_mid = rtc1_read(ETIMEMREG);
- first_high = rtc1_read(ETIMEHREG);
- second_low = rtc1_read(ETIMELREG);
- second_mid = rtc1_read(ETIMEMREG);
- second_high = rtc1_read(ETIMEHREG);
- } while (first_low != second_low || first_mid != second_mid ||
- first_high != second_high);
-
- return ((u64)first_high << 17) | (first_mid << 1) | (first_low >> 15);
-}
-
-static inline void write_elapsed_second(time64_t sec)
-{
- spin_lock_irq(&rtc_lock);
-
- rtc1_write(ETIMELREG, (uint16_t)(sec << 15));
- rtc1_write(ETIMEMREG, (uint16_t)(sec >> 1));
- rtc1_write(ETIMEHREG, (uint16_t)(sec >> 17));
-
- spin_unlock_irq(&rtc_lock);
-}
-
-static int vr41xx_rtc_read_time(struct device *dev, struct rtc_time *time)
-{
- time64_t epoch_sec, elapsed_sec;
-
- epoch_sec = mktime64(epoch, 1, 1, 0, 0, 0);
- elapsed_sec = read_elapsed_second();
-
- rtc_time64_to_tm(epoch_sec + elapsed_sec, time);
-
- return 0;
-}
-
-static int vr41xx_rtc_set_time(struct device *dev, struct rtc_time *time)
-{
- time64_t epoch_sec, current_sec;
-
- epoch_sec = mktime64(epoch, 1, 1, 0, 0, 0);
- current_sec = rtc_tm_to_time64(time);
-
- write_elapsed_second(current_sec - epoch_sec);
-
- return 0;
-}
-
-static int vr41xx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
-{
- unsigned long low, mid, high;
- struct rtc_time *time = &wkalrm->time;
-
- spin_lock_irq(&rtc_lock);
-
- low = rtc1_read(ECMPLREG);
- mid = rtc1_read(ECMPMREG);
- high = rtc1_read(ECMPHREG);
- wkalrm->enabled = alarm_enabled;
-
- spin_unlock_irq(&rtc_lock);
-
- rtc_time64_to_tm((high << 17) | (mid << 1) | (low >> 15), time);
-
- return 0;
-}
-
-static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
-{
- time64_t alarm_sec;
-
- alarm_sec = rtc_tm_to_time64(&wkalrm->time);
-
- spin_lock_irq(&rtc_lock);
-
- if (alarm_enabled)
- disable_irq(aie_irq);
-
- rtc1_write(ECMPLREG, (uint16_t)(alarm_sec << 15));
- rtc1_write(ECMPMREG, (uint16_t)(alarm_sec >> 1));
- rtc1_write(ECMPHREG, (uint16_t)(alarm_sec >> 17));
-
- if (wkalrm->enabled)
- enable_irq(aie_irq);
-
- alarm_enabled = wkalrm->enabled;
-
- spin_unlock_irq(&rtc_lock);
-
- return 0;
-}
-
-static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case RTC_EPOCH_READ:
- return put_user(epoch, (unsigned long __user *)arg);
-#ifdef CONFIG_64BIT
- case RTC_EPOCH_READ32:
- return put_user(epoch, (unsigned int __user *)arg);
-#endif
- case RTC_EPOCH_SET:
- /* Doesn't support before 1900 */
- if (arg < 1900)
- return -EINVAL;
- epoch = arg;
- break;
- default:
- return -ENOIOCTLCMD;
- }
-
- return 0;
-}
-
-static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
-{
- spin_lock_irq(&rtc_lock);
- if (enabled) {
- if (!alarm_enabled) {
- enable_irq(aie_irq);
- alarm_enabled = 1;
- }
- } else {
- if (alarm_enabled) {
- disable_irq(aie_irq);
- alarm_enabled = 0;
- }
- }
- spin_unlock_irq(&rtc_lock);
- return 0;
-}
-
-static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
-{
- struct platform_device *pdev = (struct platform_device *)dev_id;
- struct rtc_device *rtc = platform_get_drvdata(pdev);
-
- rtc2_write(RTCINTREG, ELAPSEDTIME_INT);
-
- rtc_update_irq(rtc, 1, RTC_AF);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
-{
- struct platform_device *pdev = (struct platform_device *)dev_id;
- struct rtc_device *rtc = platform_get_drvdata(pdev);
- unsigned long count = periodic_count;
-
- rtc2_write(RTCINTREG, RTCLONG1_INT);
-
- rtc1_write(RTCL1LREG, count);
- rtc1_write(RTCL1HREG, count >> 16);
-
- rtc_update_irq(rtc, 1, RTC_PF);
-
- return IRQ_HANDLED;
-}
-
-static const struct rtc_class_ops vr41xx_rtc_ops = {
- .ioctl = vr41xx_rtc_ioctl,
- .read_time = vr41xx_rtc_read_time,
- .set_time = vr41xx_rtc_set_time,
- .read_alarm = vr41xx_rtc_read_alarm,
- .set_alarm = vr41xx_rtc_set_alarm,
- .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
-};
-
-static int rtc_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct rtc_device *rtc;
- int retval;
-
- if (pdev->num_resources != 4)
- return -EBUSY;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
-
- rtc1_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!rtc1_base)
- return -EBUSY;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- retval = -EBUSY;
- goto err_rtc1_iounmap;
- }
-
- rtc2_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!rtc2_base) {
- retval = -EBUSY;
- goto err_rtc1_iounmap;
- }
-
- rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtc)) {
- retval = PTR_ERR(rtc);
- goto err_iounmap_all;
- }
-
- rtc->ops = &vr41xx_rtc_ops;
-
- /* 48-bit counter at 32.768 kHz */
- rtc->range_max = (1ULL << 33) - 1;
- rtc->max_user_freq = MAX_PERIODIC_RATE;
-
- spin_lock_irq(&rtc_lock);
-
- rtc1_write(ECMPLREG, 0);
- rtc1_write(ECMPMREG, 0);
- rtc1_write(ECMPHREG, 0);
- rtc1_write(RTCL1LREG, 0);
- rtc1_write(RTCL1HREG, 0);
-
- spin_unlock_irq(&rtc_lock);
-
- aie_irq = platform_get_irq(pdev, 0);
- if (aie_irq <= 0) {
- retval = -EBUSY;
- goto err_iounmap_all;
- }
-
- retval = devm_request_irq(&pdev->dev, aie_irq, elapsedtime_interrupt, 0,
- "elapsed_time", pdev);
- if (retval < 0)
- goto err_iounmap_all;
-
- pie_irq = platform_get_irq(pdev, 1);
- if (pie_irq <= 0) {
- retval = -EBUSY;
- goto err_iounmap_all;
- }
-
- retval = devm_request_irq(&pdev->dev, pie_irq, rtclong1_interrupt, 0,
- "rtclong1", pdev);
- if (retval < 0)
- goto err_iounmap_all;
-
- platform_set_drvdata(pdev, rtc);
-
- disable_irq(aie_irq);
- disable_irq(pie_irq);
-
- dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
-
- retval = devm_rtc_register_device(rtc);
- if (retval)
- goto err_iounmap_all;
-
- return 0;
-
-err_iounmap_all:
- rtc2_base = NULL;
-
-err_rtc1_iounmap:
- rtc1_base = NULL;
-
- return retval;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:RTC");
-
-static struct platform_driver rtc_platform_driver = {
- .probe = rtc_probe,
- .driver = {
- .name = rtc_name,
- },
-};
-
-module_platform_driver(rtc_platform_driver);
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index d1d5a44d9122..ba0d22a5b421 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -614,8 +614,7 @@ static void x1205_sysfs_unregister(struct device *dev)
}
-static int x1205_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int x1205_probe(struct i2c_client *client)
{
int err = 0;
unsigned char sr;
@@ -681,7 +680,7 @@ static struct i2c_driver x1205_driver = {
.name = "rtc-x1205",
.of_match_table = x1205_dt_ids,
},
- .probe = x1205_probe,
+ .probe_new = x1205_probe,
.remove = x1205_remove,
.id_table = x1205_id,
};
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index f440bb52be92..c9b85c838ebe 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -36,17 +37,23 @@
#define RTC_OSC_EN BIT(24)
#define RTC_BATT_EN BIT(31)
-#define RTC_CALIB_DEF 0x198233
+#define RTC_CALIB_DEF 0x7FFF
#define RTC_CALIB_MASK 0x1FFFFF
#define RTC_ALRM_MASK BIT(1)
#define RTC_MSEC 1000
+#define RTC_FR_MASK 0xF0000
+#define RTC_FR_MAX_TICKS 16
+#define RTC_PPB 1000000000LL
+#define RTC_MIN_OFFSET -32768000
+#define RTC_MAX_OFFSET 32767000
struct xlnx_rtc_dev {
struct rtc_device *rtc;
void __iomem *reg_base;
int alarm_irq;
int sec_irq;
- unsigned int calibval;
+ struct clk *rtc_clk;
+ unsigned int freq;
};
static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -61,13 +68,6 @@ static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
*/
new_time = rtc_tm_to_time64(tm) + 1;
- /*
- * Writing into calibration register will clear the Tick Counter and
- * force the next second to be signaled exactly in 1 second period
- */
- xrtcdev->calibval &= RTC_CALIB_MASK;
- writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
-
writel(new_time, xrtcdev->reg_base + RTC_SET_TM_WR);
/*
@@ -173,15 +173,76 @@ static void xlnx_init_rtc(struct xlnx_rtc_dev *xrtcdev)
rtc_ctrl = readl(xrtcdev->reg_base + RTC_CTRL);
rtc_ctrl |= RTC_BATT_EN;
writel(rtc_ctrl, xrtcdev->reg_base + RTC_CTRL);
+}
- /*
- * Based on crystal freq of 33.330 KHz
- * set the seconds counter and enable, set fractions counter
- * to default value suggested as per design spec
- * to correct RTC delay in frequency over period of time.
+static int xlnx_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned long long rtc_ppb = RTC_PPB;
+ unsigned int tick_mult = do_div(rtc_ppb, xrtcdev->freq);
+ unsigned int calibval;
+ long offset_val;
+
+ calibval = readl(xrtcdev->reg_base + RTC_CALIB_RD);
+ /* Offset with seconds ticks */
+ offset_val = calibval & RTC_TICK_MASK;
+ offset_val = offset_val - RTC_CALIB_DEF;
+ offset_val = offset_val * tick_mult;
+
+ /* Offset with fractional ticks */
+ if (calibval & RTC_FR_EN)
+ offset_val += ((calibval & RTC_FR_MASK) >> RTC_FR_DATSHIFT)
+ * (tick_mult / RTC_FR_MAX_TICKS);
+ *offset = offset_val;
+
+ return 0;
+}
+
+static int xlnx_rtc_set_offset(struct device *dev, long offset)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned long long rtc_ppb = RTC_PPB;
+ unsigned int tick_mult = do_div(rtc_ppb, xrtcdev->freq);
+ unsigned char fract_tick = 0;
+ unsigned int calibval;
+ short int max_tick;
+ int fract_offset;
+
+ if (offset < RTC_MIN_OFFSET || offset > RTC_MAX_OFFSET)
+ return -ERANGE;
+
+ /* Number ticks for given offset */
+ max_tick = div_s64_rem(offset, tick_mult, &fract_offset);
+
+ /* Number fractional ticks for given offset */
+ if (fract_offset) {
+ if (fract_offset < 0) {
+ fract_offset = fract_offset + tick_mult;
+ max_tick--;
+ }
+ if (fract_offset > (tick_mult / RTC_FR_MAX_TICKS)) {
+ for (fract_tick = 1; fract_tick < 16; fract_tick++) {
+ if (fract_offset <=
+ (fract_tick *
+ (tick_mult / RTC_FR_MAX_TICKS)))
+ break;
+ }
+ }
+ }
+
+ /* Zynqmp RTC uses second and fractional tick
+ * counters for compensation
*/
- xrtcdev->calibval &= RTC_CALIB_MASK;
- writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+ calibval = max_tick + RTC_CALIB_DEF;
+
+ if (fract_tick)
+ calibval |= RTC_FR_EN;
+
+ calibval |= (fract_tick << RTC_FR_DATSHIFT);
+
+ writel(calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+
+ return 0;
}
static const struct rtc_class_ops xlnx_rtc_ops = {
@@ -190,6 +251,8 @@ static const struct rtc_class_ops xlnx_rtc_ops = {
.read_alarm = xlnx_rtc_read_alarm,
.set_alarm = xlnx_rtc_set_alarm,
.alarm_irq_enable = xlnx_rtc_alarm_irq_enable,
+ .read_offset = xlnx_rtc_read_offset,
+ .set_offset = xlnx_rtc_set_offset,
};
static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
@@ -255,10 +318,22 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(pdev->dev.of_node, "calibration",
- &xrtcdev->calibval);
- if (ret)
- xrtcdev->calibval = RTC_CALIB_DEF;
+ /* Getting the rtc_clk info */
+ xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc_clk");
+ if (IS_ERR(xrtcdev->rtc_clk)) {
+ if (PTR_ERR(xrtcdev->rtc_clk) != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "Device clock not found.\n");
+ }
+ xrtcdev->freq = clk_get_rate(xrtcdev->rtc_clk);
+ if (!xrtcdev->freq) {
+ ret = of_property_read_u32(pdev->dev.of_node, "calibration",
+ &xrtcdev->freq);
+ if (ret)
+ xrtcdev->freq = RTC_CALIB_DEF;
+ }
+ ret = readl(xrtcdev->reg_base + RTC_CALIB_RD);
+ if (!ret)
+ writel(xrtcdev->freq, (xrtcdev->reg_base + RTC_CALIB_WR));
xlnx_init_rtc(xrtcdev);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 4df8bf6505fc..ea82821599f6 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1725,7 +1725,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
dasd_put_device(device);
}
- /* check for for attention message */
+ /* check for attention message */
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
device = dasd_device_from_cdev_locked(cdev);
if (!IS_ERR(device)) {
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index e9edf3b6ed7c..94ee59864971 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -639,6 +639,7 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, PAGE_SIZE - 1);
}
static int dasd_diag_pe_handler(struct dasd_device *device,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 836838f7d686..3cc93e2e4e15 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -6626,6 +6626,7 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, PAGE_SIZE - 1);
}
static struct ccw_driver dasd_eckd_driver = {
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4d8d1759775a..5187705bd0f3 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -863,7 +863,7 @@ dcssblk_submit_bio(struct bio *bio)
unsigned long source_addr;
unsigned long bytes_done;
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 57f41efb8043..7d1749b0d378 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -89,7 +89,7 @@ config HMC_DRV
Management Console (HMC) drive CD/DVD-ROM. It is available as a
module, called 'hmcdrv', and also as kernel built-in. There is one
optional parameter for this module: cachesize=N, which modifies the
- transfer cache size from it's default value 0.5MB to N bytes. If N
+ transfer cache size from its default value 0.5MB to N bytes. If N
is zero, then no caching is performed.
config SCLP_OFB
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 38cc1565d6ae..751945fb6793 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -548,7 +548,7 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
case 0x2e:
/*
* Not capable. This indicates either that the drive fails
- * reading the format id mark or that that format specified
+ * reading the format id mark or that format specified
* is not supported by the drive.
*/
dev_warn (&device->cdev->dev, "The tape unit cannot process "
diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c
index 66505d7166a6..1d40457c7b10 100644
--- a/drivers/s390/char/uvdevice.c
+++ b/drivers/s390/char/uvdevice.c
@@ -27,6 +27,7 @@
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/cpufeature.h>
#include <asm/uvdevice.h>
#include <asm/uv.h>
@@ -244,12 +245,10 @@ static void __exit uvio_dev_exit(void)
static int __init uvio_dev_init(void)
{
- if (!test_facility(158))
- return -ENXIO;
return misc_register(&uvio_dev_miscdev);
}
-module_init(uvio_dev_init);
+module_cpu_feature_match(S390_CPU_FEATURE_UV, uvio_dev_init);
module_exit(uvio_dev_exit);
MODULE_AUTHOR("IBM Corporation");
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 516783ba950f..f6da215ccf9f 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
+#include <linux/uio.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h>
@@ -50,36 +51,41 @@ static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block;
+static DEFINE_MUTEX(hsa_buf_mutex);
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
/*
- * Copy memory from HSA to user memory (not reentrant):
+ * Copy memory from HSA to iterator (not reentrant):
*
- * @dest: User buffer where memory should be copied to
+ * @iter: Iterator where memory should be copied to
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
*/
-int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
+size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count)
{
- unsigned long offset, bytes;
+ size_t bytes, copied, res = 0;
+ unsigned long offset;
if (!hsa_available)
- return -ENODATA;
+ return 0;
+ mutex_lock(&hsa_buf_mutex);
while (count) {
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n");
- return -EIO;
+ break;
}
offset = src % PAGE_SIZE;
bytes = min(PAGE_SIZE - offset, count);
- if (copy_to_user(dest, hsa_buf + offset, bytes))
- return -EFAULT;
- src += bytes;
- dest += bytes;
- count -= bytes;
+ copied = copy_to_iter(hsa_buf + offset, bytes, iter);
+ count -= copied;
+ src += copied;
+ res += copied;
+ if (copied < bytes)
+ break;
}
- return 0;
+ mutex_unlock(&hsa_buf_mutex);
+ return res;
}
/*
@@ -89,25 +95,16 @@ int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
*/
-int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
+static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count)
{
- unsigned long offset, bytes;
+ struct iov_iter iter;
+ struct kvec kvec;
- if (!hsa_available)
- return -ENODATA;
-
- while (count) {
- if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
- TRACE("sclp_sdias_copy() failed\n");
- return -EIO;
- }
- offset = src % PAGE_SIZE;
- bytes = min(PAGE_SIZE - offset, count);
- memcpy(dest, hsa_buf + offset, bytes);
- src += bytes;
- dest += bytes;
- count -= bytes;
- }
+ kvec.iov_base = dst;
+ kvec.iov_len = count;
+ iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
+ if (memcpy_hsa_iter(&iter, src, count) < count)
+ return -EIO;
return 0;
}
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
index 7a838e3d7c0f..420d89ba7f83 100644
--- a/drivers/s390/cio/vfio_ccw_async.c
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -8,7 +8,6 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
#include "vfio_ccw_private.h"
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 0c2be9421ab7..7b02e97f4b29 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -11,6 +11,7 @@
#include <linux/ratelimit.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/vfio.h>
#include <asm/idals.h>
@@ -18,13 +19,11 @@
#include "vfio_ccw_cp.h"
#include "vfio_ccw_private.h"
-struct pfn_array {
- /* Starting guest physical I/O address. */
- unsigned long pa_iova;
- /* Array that stores PFNs of the pages need to pin. */
- unsigned long *pa_iova_pfn;
- /* Array that receives PFNs of the pages pinned. */
- unsigned long *pa_pfn;
+struct page_array {
+ /* Array that stores pages need to pin. */
+ dma_addr_t *pa_iova;
+ /* Array that receives the pinned pages. */
+ struct page **pa_page;
/* Number of pages pinned from @pa_iova. */
int pa_nr;
};
@@ -37,116 +36,158 @@ struct ccwchain {
/* Count of the valid ccws in chain. */
int ch_len;
/* Pinned PAGEs for the original data. */
- struct pfn_array *ch_pa;
+ struct page_array *ch_pa;
};
/*
- * pfn_array_alloc() - alloc memory for PFNs
- * @pa: pfn_array on which to perform the operation
+ * page_array_alloc() - alloc memory for page array
+ * @pa: page_array on which to perform the operation
* @iova: target guest physical address
* @len: number of bytes that should be pinned from @iova
*
- * Attempt to allocate memory for PFNs.
+ * Attempt to allocate memory for page array.
*
- * Usage of pfn_array:
- * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * Usage of page_array:
+ * We expect (pa_nr == 0) and (pa_iova == NULL), any field in
* this structure will be filled in by this function.
*
* Returns:
- * 0 if PFNs are allocated
- * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
+ * 0 if page array is allocated
+ * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is not NULL
* -ENOMEM if alloc failed
*/
-static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
+static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
{
int i;
- if (pa->pa_nr || pa->pa_iova_pfn)
+ if (pa->pa_nr || pa->pa_iova)
return -EINVAL;
- pa->pa_iova = iova;
-
pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (!pa->pa_nr)
return -EINVAL;
- pa->pa_iova_pfn = kcalloc(pa->pa_nr,
- sizeof(*pa->pa_iova_pfn) +
- sizeof(*pa->pa_pfn),
- GFP_KERNEL);
- if (unlikely(!pa->pa_iova_pfn)) {
+ pa->pa_iova = kcalloc(pa->pa_nr,
+ sizeof(*pa->pa_iova) + sizeof(*pa->pa_page),
+ GFP_KERNEL);
+ if (unlikely(!pa->pa_iova)) {
pa->pa_nr = 0;
return -ENOMEM;
}
- pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
+ pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr];
- pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
- pa->pa_pfn[0] = -1ULL;
+ pa->pa_iova[0] = iova;
+ pa->pa_page[0] = NULL;
for (i = 1; i < pa->pa_nr; i++) {
- pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
- pa->pa_pfn[i] = -1ULL;
+ pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
+ pa->pa_page[i] = NULL;
}
return 0;
}
/*
- * pfn_array_pin() - Pin user pages in memory
- * @pa: pfn_array on which to perform the operation
+ * page_array_unpin() - Unpin user pages in memory
+ * @pa: page_array on which to perform the operation
+ * @vdev: the vfio device to perform the operation
+ * @pa_nr: number of user pages to unpin
+ *
+ * Only unpin if any pages were pinned to begin with, i.e. pa_nr > 0,
+ * otherwise only clear pa->pa_nr
+ */
+static void page_array_unpin(struct page_array *pa,
+ struct vfio_device *vdev, int pa_nr)
+{
+ int unpinned = 0, npage = 1;
+
+ while (unpinned < pa_nr) {
+ dma_addr_t *first = &pa->pa_iova[unpinned];
+ dma_addr_t *last = &first[npage];
+
+ if (unpinned + npage < pa_nr &&
+ *first + npage * PAGE_SIZE == *last) {
+ npage++;
+ continue;
+ }
+
+ vfio_unpin_pages(vdev, *first, npage);
+ unpinned += npage;
+ npage = 1;
+ }
+
+ pa->pa_nr = 0;
+}
+
+/*
+ * page_array_pin() - Pin user pages in memory
+ * @pa: page_array on which to perform the operation
* @mdev: the mediated device to perform pin operations
*
* Returns number of pages pinned upon success.
* If the pin request partially succeeds, or fails completely,
* all pages are left unpinned and a negative error value is returned.
*/
-static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev)
+static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
{
+ int pinned = 0, npage = 1;
int ret = 0;
- ret = vfio_pin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr,
- IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+ while (pinned < pa->pa_nr) {
+ dma_addr_t *first = &pa->pa_iova[pinned];
+ dma_addr_t *last = &first[npage];
- if (ret < 0) {
- goto err_out;
- } else if (ret > 0 && ret != pa->pa_nr) {
- vfio_unpin_pages(vdev, pa->pa_iova_pfn, ret);
- ret = -EINVAL;
- goto err_out;
+ if (pinned + npage < pa->pa_nr &&
+ *first + npage * PAGE_SIZE == *last) {
+ npage++;
+ continue;
+ }
+
+ ret = vfio_pin_pages(vdev, *first, npage,
+ IOMMU_READ | IOMMU_WRITE,
+ &pa->pa_page[pinned]);
+ if (ret < 0) {
+ goto err_out;
+ } else if (ret > 0 && ret != npage) {
+ pinned += ret;
+ ret = -EINVAL;
+ goto err_out;
+ }
+ pinned += npage;
+ npage = 1;
}
return ret;
err_out:
- pa->pa_nr = 0;
-
+ page_array_unpin(pa, vdev, pinned);
return ret;
}
/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev)
+static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev)
{
- /* Only unpin if any pages were pinned to begin with */
- if (pa->pa_nr)
- vfio_unpin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr);
- pa->pa_nr = 0;
- kfree(pa->pa_iova_pfn);
+ page_array_unpin(pa, vdev, pa->pa_nr);
+ kfree(pa->pa_iova);
}
-static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
+static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
{
- unsigned long iova_pfn = iova >> PAGE_SHIFT;
+ u64 iova_pfn_start = iova >> PAGE_SHIFT;
+ u64 iova_pfn_end = (iova + length - 1) >> PAGE_SHIFT;
+ u64 pfn;
int i;
- for (i = 0; i < pa->pa_nr; i++)
- if (pa->pa_iova_pfn[i] == iova_pfn)
+ for (i = 0; i < pa->pa_nr; i++) {
+ pfn = pa->pa_iova[i] >> PAGE_SHIFT;
+ if (pfn >= iova_pfn_start && pfn <= iova_pfn_end)
return true;
+ }
return false;
}
-/* Create the list of IDAL words for a pfn_array. */
-static inline void pfn_array_idal_create_words(
- struct pfn_array *pa,
- unsigned long *idaws)
+/* Create the list of IDAL words for a page_array. */
+static inline void page_array_idal_create_words(struct page_array *pa,
+ unsigned long *idaws)
{
int i;
@@ -159,10 +200,10 @@ static inline void pfn_array_idal_create_words(
*/
for (i = 0; i < pa->pa_nr; i++)
- idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+ idaws[i] = page_to_phys(pa->pa_page[i]);
/* Adjust the first IDAW, since it may not start on a page boundary */
- idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
+ idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1);
}
static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
@@ -194,24 +235,24 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
unsigned long n)
{
- struct pfn_array pa = {0};
- u64 from;
+ struct page_array pa = {0};
int i, ret;
unsigned long l, m;
- ret = pfn_array_alloc(&pa, iova, n);
+ ret = page_array_alloc(&pa, iova, n);
if (ret < 0)
return ret;
- ret = pfn_array_pin(&pa, vdev);
+ ret = page_array_pin(&pa, vdev);
if (ret < 0) {
- pfn_array_unpin_free(&pa, vdev);
+ page_array_unpin_free(&pa, vdev);
return ret;
}
l = n;
for (i = 0; i < pa.pa_nr; i++) {
- from = pa.pa_pfn[i] << PAGE_SHIFT;
+ void *from = kmap_local_page(pa.pa_page[i]);
+
m = PAGE_SIZE;
if (i == 0) {
from += iova & (PAGE_SIZE - 1);
@@ -219,14 +260,15 @@ static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
}
m = min(l, m);
- memcpy(to + (n - l), (void *)from, m);
+ memcpy(to + (n - l), from, m);
+ kunmap_local(from);
l -= m;
if (l == 0)
break;
}
- pfn_array_unpin_free(&pa, vdev);
+ page_array_unpin_free(&pa, vdev);
return l;
}
@@ -329,7 +371,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
chain->ch_ccw = (struct ccw1 *)data;
data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pa = (struct pfn_array *)data;
+ chain->ch_pa = (struct page_array *)data;
chain->ch_len = len;
@@ -513,7 +555,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccw1 *ccw;
- struct pfn_array *pa;
+ struct page_array *pa;
u64 iova;
unsigned long *idaws;
int ret;
@@ -547,13 +589,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
}
/*
- * Allocate an array of pfn's for pages to pin/translate.
+ * Allocate an array of pages to pin/translate.
* The number of pages is actually the count of the idaws
* required for the data transfer, since we only only support
* 4K IDAWs today.
*/
pa = chain->ch_pa + idx;
- ret = pfn_array_alloc(pa, iova, bytes);
+ ret = page_array_alloc(pa, iova, bytes);
if (ret < 0)
goto out_free_idaws;
@@ -564,21 +606,21 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
goto out_unpin;
/*
- * Copy guest IDAWs into pfn_array, in case the memory they
+ * Copy guest IDAWs into page_array, in case the memory they
* occupy is not contiguous.
*/
for (i = 0; i < idaw_nr; i++)
- pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
+ pa->pa_iova[i] = idaws[i];
} else {
/*
- * No action is required here; the iova addresses in pfn_array
- * were initialized sequentially in pfn_array_alloc() beginning
+ * No action is required here; the iova addresses in page_array
+ * were initialized sequentially in page_array_alloc() beginning
* with the contents of ccw->cda.
*/
}
if (ccw_does_data_transfer(ccw)) {
- ret = pfn_array_pin(pa, vdev);
+ ret = page_array_pin(pa, vdev);
if (ret < 0)
goto out_unpin;
} else {
@@ -588,13 +630,13 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
ccw->cda = (__u32) virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
- /* Populate the IDAL with pinned/translated addresses from pfn */
- pfn_array_idal_create_words(pa, idaws);
+ /* Populate the IDAL with pinned/translated addresses from page */
+ page_array_idal_create_words(pa, idaws);
return 0;
out_unpin:
- pfn_array_unpin_free(pa, vdev);
+ page_array_unpin_free(pa, vdev);
out_free_idaws:
kfree(idaws);
out_init:
@@ -700,7 +742,7 @@ void cp_free(struct channel_program *cp)
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
- pfn_array_unpin_free(chain->ch_pa + i, vdev);
+ page_array_unpin_free(chain->ch_pa + i, vdev);
ccwchain_cda_free(chain, i);
}
ccwchain_free(chain);
@@ -862,11 +904,12 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
* cp_iova_pinned() - check if an iova is pinned for a ccw chain.
* @cp: channel_program on which to perform the operation
* @iova: the iova to check
+ * @length: the length to check from @iova
*
* If the @iova is currently pinned for the ccw chain, return true;
* else return false.
*/
-bool cp_iova_pinned(struct channel_program *cp, u64 iova)
+bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length)
{
struct ccwchain *chain;
int i;
@@ -876,7 +919,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
+ if (page_array_iova_pinned(chain->ch_pa + i, iova, length))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index e4c436199b4c..54d26e242533 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -41,11 +41,11 @@ struct channel_program {
struct ccw1 *guest_cp;
};
-extern int cp_init(struct channel_program *cp, union orb *orb);
-extern void cp_free(struct channel_program *cp);
-extern int cp_prefetch(struct channel_program *cp);
-extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
-extern void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
-extern bool cp_iova_pinned(struct channel_program *cp, u64 iova);
+int cp_init(struct channel_program *cp, union orb *orb);
+void cp_free(struct channel_program *cp);
+int cp_prefetch(struct channel_program *cp);
+union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
+void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
+bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length);
#endif
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index ee182cfb467d..86d9e428357b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include <asm/isc.h>
@@ -42,13 +41,6 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
DECLARE_COMPLETION_ONSTACK(completion);
int iretry, ret = 0;
- spin_lock_irq(sch->lock);
- if (!sch->schib.pmcw.ena)
- goto out_unlock;
- ret = cio_disable_subchannel(sch);
- if (ret != -EBUSY)
- goto out_unlock;
-
iretry = 255;
do {
@@ -75,9 +67,7 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
-out_unlock:
- private->state = VFIO_CCW_STATE_NOT_OPER;
- spin_unlock_irq(sch->lock);
+
return ret;
}
@@ -107,9 +97,10 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
/*
* Reset to IDLE only if processing of a channel program
* has finished. Do not overwrite a possible processing
- * state if the final interrupt was for HSCH or CSCH.
+ * state if the interrupt was unsolicited, or if the final
+ * interrupt was for HSCH or CSCH.
*/
- if (private->mdev && cp_is_finished)
+ if (cp_is_finished)
private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger)
@@ -147,7 +138,7 @@ static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
private->sch = sch;
mutex_init(&private->io_mutex);
- private->state = VFIO_CCW_STATE_NOT_OPER;
+ private->state = VFIO_CCW_STATE_STANDBY;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
@@ -231,26 +222,15 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
dev_set_drvdata(&sch->dev, private);
- spin_lock_irq(sch->lock);
- sch->isc = VFIO_CCW_ISC;
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
- spin_unlock_irq(sch->lock);
+ ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
if (ret)
goto out_free;
- private->state = VFIO_CCW_STATE_STANDBY;
-
- ret = vfio_ccw_mdev_reg(sch);
- if (ret)
- goto out_disable;
-
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
return 0;
-out_disable:
- cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
vfio_ccw_free_private(private);
@@ -261,8 +241,7 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
- vfio_ccw_sch_quiesce(sch);
- vfio_ccw_mdev_unreg(sch);
+ mdev_unregister_device(&sch->dev);
dev_set_drvdata(&sch->dev, NULL);
@@ -275,7 +254,10 @@ static void vfio_ccw_sch_remove(struct subchannel *sch)
static void vfio_ccw_sch_shutdown(struct subchannel *sch)
{
- vfio_ccw_sch_quiesce(sch);
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
/**
@@ -301,19 +283,11 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
if (work_pending(&sch->todo_work))
goto out_unlock;
- if (cio_update_schib(sch)) {
- vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
- rc = 0;
- goto out_unlock;
- }
-
- private = dev_get_drvdata(&sch->dev);
- if (private->state == VFIO_CCW_STATE_NOT_OPER) {
- private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
- VFIO_CCW_STATE_STANDBY;
- }
rc = 0;
+ if (cio_update_schib(sch))
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
@@ -358,8 +332,8 @@ static int vfio_ccw_chp_event(struct subchannel *sch,
return 0;
trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
- VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
- mdev_uuid(private->mdev), sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
+ sch->schid.cssid,
sch->schid.ssid, sch->schid.sch_no,
mask, event);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 8483a266051c..a59c758869f8 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -10,7 +10,8 @@
*/
#include <linux/vfio.h>
-#include <linux/mdev.h>
+
+#include <asm/isc.h>
#include "ioasm.h"
#include "vfio_ccw_private.h"
@@ -161,8 +162,12 @@ static void fsm_notoper(struct vfio_ccw_private *private,
{
struct subchannel *sch = private->sch;
- VFIO_CCW_TRACE_EVENT(2, "notoper");
- VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
+ sch->schid.cssid,
+ sch->schid.ssid,
+ sch->schid.sch_no,
+ event,
+ private->state);
/*
* TODO:
@@ -170,6 +175,9 @@ static void fsm_notoper(struct vfio_ccw_private *private,
*/
css_sched_sch_todo(sch, SCH_TODO_UNREG);
private->state = VFIO_CCW_STATE_NOT_OPER;
+
+ /* This is usually handled during CLOSE event */
+ cp_free(&private->cp);
}
/*
@@ -242,7 +250,6 @@ static void fsm_io_request(struct vfio_ccw_private *private,
union orb *orb;
union scsw *scsw = &private->scsw;
struct ccw_io_region *io_region = private->io_region;
- struct mdev_device *mdev = private->mdev;
char *errstr = "request";
struct subchannel_id schid = get_schid(private);
@@ -256,8 +263,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
if (orb->tm.b) {
io_region->ret_code = -EOPNOTSUPP;
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): transport mode\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: transport mode\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
errstr = "transport mode";
goto err_out;
@@ -265,8 +272,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_init(&private->cp, orb);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_init=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_init=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp init";
@@ -276,8 +283,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = cp_prefetch(&private->cp);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: cp_prefetch=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp prefetch";
@@ -289,8 +296,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
io_region->ret_code = fsm_io_helper(private);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: fsm_io_helper=%d\n",
+ schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp fsm_io_helper";
@@ -300,16 +307,16 @@ static void fsm_io_request(struct vfio_ccw_private *private,
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): halt on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: halt on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
VFIO_CCW_MSG_EVENT(2,
- "%pUl (%x.%x.%04x): clear on io_region\n",
- mdev_uuid(mdev), schid.cssid,
+ "sch %x.%x.%04x: clear on io_region\n",
+ schid.cssid,
schid.ssid, schid.sch_no);
/* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
@@ -366,6 +373,54 @@ static void fsm_irq(struct vfio_ccw_private *private,
complete(private->completion);
}
+static void fsm_open(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ sch->isc = VFIO_CCW_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_IDLE;
+ spin_unlock_irq(sch->lock);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
+static void fsm_close(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+
+ if (!sch->schib.pmcw.ena)
+ goto err_unlock;
+
+ ret = cio_disable_subchannel(sch);
+ if (ret == -EBUSY)
+ ret = vfio_ccw_sch_quiesce(sch);
+ if (ret)
+ goto err_unlock;
+
+ private->state = VFIO_CCW_STATE_STANDBY;
+ spin_unlock_irq(sch->lock);
+ cp_free(&private->cp);
+ return;
+
+err_unlock:
+ spin_unlock_irq(sch->lock);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+}
+
/*
* Device statemachine
*/
@@ -375,29 +430,39 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_nop,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_nop,
},
[VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
- [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_open,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_notoper,
},
[VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PROCESSING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ [VFIO_CCW_EVENT_OPEN] = fsm_notoper,
+ [VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index b49e2e9db2dc..4a806a2273b5 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -21,54 +21,28 @@ static const struct vfio_device_ops vfio_ccw_dev_ops;
static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
{
- struct subchannel *sch;
- int ret;
-
- sch = private->sch;
/*
- * TODO:
- * In the cureent stage, some things like "no I/O running" and "no
- * interrupt pending" are clear, but we are not sure what other state
- * we need to care about.
- * There are still a lot more instructions need to be handled. We
- * should come back here later.
+ * If the FSM state is seen as Not Operational after closing
+ * and re-opening the mdev, return an error.
*/
- ret = vfio_ccw_sch_quiesce(sch);
- if (ret)
- return ret;
-
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
- if (!ret)
- private->state = VFIO_CCW_STATE_IDLE;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -EINVAL;
- return ret;
+ return 0;
}
-static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
+static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length)
{
struct vfio_ccw_private *private =
- container_of(nb, struct vfio_ccw_private, nb);
-
- /*
- * Vendor drivers MUST unpin pages in response to an
- * invalidation.
- */
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
-
- if (!cp_iova_pinned(&private->cp, unmap->iova))
- return NOTIFY_OK;
+ container_of(vdev, struct vfio_ccw_private, vdev);
- if (vfio_ccw_mdev_reset(private))
- return NOTIFY_BAD;
+ /* Drivers MUST unpin pages in response to an invalidation. */
+ if (!cp_iova_pinned(&private->cp, iova, length))
+ return;
- cp_free(&private->cp);
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
+ vfio_ccw_mdev_reset(private);
}
static ssize_t name_show(struct mdev_type *mtype,
@@ -128,11 +102,8 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
vfio_init_group_dev(&private->vdev, &mdev->dev,
&vfio_ccw_dev_ops);
- private->mdev = mdev;
- private->state = VFIO_CCW_STATE_IDLE;
-
- VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
- mdev_uuid(mdev), private->sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
+ private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
@@ -145,8 +116,6 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
err_atomic:
vfio_uninit_group_dev(&private->vdev);
atomic_inc(&private->avail);
- private->mdev = NULL;
- private->state = VFIO_CCW_STATE_IDLE;
return ret;
}
@@ -154,23 +123,14 @@ static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
- VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
- mdev_uuid(mdev), private->sch->schid.cssid,
+ VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: remove\n",
+ private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
vfio_unregister_group_dev(&private->vdev);
- if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
- (private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_sch_quiesce(private->sch))
- private->state = VFIO_CCW_STATE_STANDBY;
- /* The state will be NOT_OPER on error. */
- }
-
vfio_uninit_group_dev(&private->vdev);
- cp_free(&private->cp);
- private->mdev = NULL;
atomic_inc(&private->avail);
}
@@ -178,19 +138,15 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
- unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
int ret;
- private->nb.notifier_call = vfio_ccw_mdev_notifier;
-
- ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY,
- &events, &private->nb);
- if (ret)
- return ret;
+ /* Device cannot simply be opened again from this state */
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -EINVAL;
ret = vfio_ccw_register_async_dev_regions(private);
if (ret)
- goto out_unregister;
+ return ret;
ret = vfio_ccw_register_schib_dev_regions(private);
if (ret)
@@ -200,11 +156,16 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
if (ret)
goto out_unregister;
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
+ if (private->state == VFIO_CCW_STATE_NOT_OPER) {
+ ret = -EINVAL;
+ goto out_unregister;
+ }
+
return ret;
out_unregister:
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
return ret;
}
@@ -213,16 +174,8 @@ static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
- if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
- (private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_mdev_reset(private))
- private->state = VFIO_CCW_STATE_STANDBY;
- /* The state will be NOT_OPER on error. */
- }
-
- cp_free(&private->cp);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb);
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
@@ -645,6 +598,7 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = {
.write = vfio_ccw_mdev_write,
.ioctl = vfio_ccw_mdev_ioctl,
.request = vfio_ccw_mdev_request,
+ .dma_unmap = vfio_ccw_dma_unmap,
};
struct mdev_driver vfio_ccw_mdev_driver = {
@@ -657,13 +611,3 @@ struct mdev_driver vfio_ccw_mdev_driver = {
.remove = vfio_ccw_mdev_remove,
.supported_type_groups = mdev_type_groups,
};
-
-int vfio_ccw_mdev_reg(struct subchannel *sch)
-{
- return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
-}
-
-void vfio_ccw_mdev_unreg(struct subchannel *sch)
-{
- mdev_unregister_device(&sch->dev);
-}
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 7272eb788612..cd24b7fada91 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -73,8 +73,6 @@ struct vfio_ccw_crw {
* @state: internal state of the device
* @completion: synchronization helper of the I/O completion
* @avail: available for creating a mediated device
- * @mdev: pointer to the mediated device
- * @nb: notifier for vfio events
* @io_region: MMIO region to input/output I/O arguments/results
* @io_mutex: protect against concurrent update of I/O regions
* @region: additional regions for other subchannel operations
@@ -97,8 +95,6 @@ struct vfio_ccw_private {
int state;
struct completion *completion;
atomic_t avail;
- struct mdev_device *mdev;
- struct notifier_block nb;
struct ccw_io_region *io_region;
struct mutex io_mutex;
struct vfio_ccw_region *region;
@@ -119,10 +115,7 @@ struct vfio_ccw_private {
struct work_struct crw_work;
} __aligned(8);
-extern int vfio_ccw_mdev_reg(struct subchannel *sch);
-extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
-
-extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
+int vfio_ccw_sch_quiesce(struct subchannel *sch);
extern struct mdev_driver vfio_ccw_mdev_driver;
@@ -147,6 +140,8 @@ enum vfio_ccw_event {
VFIO_CCW_EVENT_IO_REQ,
VFIO_CCW_EVENT_INTERRUPT,
VFIO_CCW_EVENT_ASYNC_REQ,
+ VFIO_CCW_EVENT_OPEN,
+ VFIO_CCW_EVENT_CLOSE,
/* last element! */
NR_VFIO_CCW_EVENTS
};
@@ -158,7 +153,7 @@ typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event);
extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
- int event)
+ enum vfio_ccw_event event)
{
trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
vfio_ccw_jumptable[private->state][event](private, event);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 95cfdeab5baf..59ac98f2bd27 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -838,6 +838,17 @@ static void ap_bus_revise_bindings(void)
bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
}
+/**
+ * ap_owned_by_def_drv: indicates whether an AP adapter is reserved for the
+ * default host driver or not.
+ * @card: the APID of the adapter card to check
+ * @queue: the APQI of the queue to check
+ *
+ * Note: the ap_perms_mutex must be locked by the caller of this function.
+ *
+ * Return: an int specifying whether the AP adapter is reserved for the host (1)
+ * or not (0).
+ */
int ap_owned_by_def_drv(int card, int queue)
{
int rc = 0;
@@ -845,25 +856,31 @@ int ap_owned_by_def_drv(int card, int queue)
if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
return -EINVAL;
- mutex_lock(&ap_perms_mutex);
-
if (test_bit_inv(card, ap_perms.apm) &&
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
- mutex_unlock(&ap_perms_mutex);
-
return rc;
}
EXPORT_SYMBOL(ap_owned_by_def_drv);
+/**
+ * ap_apqn_in_matrix_owned_by_def_drv: indicates whether every APQN contained in
+ * a set is reserved for the host drivers
+ * or not.
+ * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check
+ * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check
+ *
+ * Note: the ap_perms_mutex must be locked by the caller of this function.
+ *
+ * Return: an int specifying whether each APQN is reserved for the host (1) or
+ * not (0)
+ */
int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
unsigned long *aqm)
{
int card, queue, rc = 0;
- mutex_lock(&ap_perms_mutex);
-
for (card = 0; !rc && card < AP_DEVICES; card++)
if (test_bit_inv(card, apm) &&
test_bit_inv(card, ap_perms.apm))
@@ -872,8 +889,6 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
- mutex_unlock(&ap_perms_mutex);
-
return rc;
}
EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
@@ -2071,6 +2086,9 @@ static inline void ap_scan_adapter(int ap)
*/
static bool ap_get_configuration(void)
{
+ if (!ap_qci_info) /* QCI not supported */
+ return false;
+
memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
ap_fetch_qci_info(ap_qci_info);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 0c40af157df2..0f17933954fb 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -148,12 +148,16 @@ struct ap_driver {
/*
* Called at the start of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_config_changed)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
/*
* Called at the end of the ap bus scan function when
* the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
*/
void (*on_scan_complete)(struct ap_config_info *new_config_info,
struct ap_config_info *old_config_info);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index c48b0db824e3..a32457b4cbb8 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -34,7 +34,7 @@ static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
qirqctrl.ir = 1;
qirqctrl.isc = AP_ISC;
- status = ap_aqic(aq->qid, qirqctrl, ind);
+ status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 7329caa7d467..5a05d1cdfec2 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -2115,5 +2115,5 @@ static void __exit pkey_exit(void)
pkey_debug_exit();
}
-module_cpu_feature_match(MSA, pkey_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 4ac9c6521ec1..f43cfeabd2cc 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -18,9 +18,6 @@
#define VFIO_AP_ROOT_NAME "vfio_ap"
#define VFIO_AP_DEV_NAME "matrix"
-#define AP_QUEUE_ASSIGNED "assigned"
-#define AP_QUEUE_UNASSIGNED "unassigned"
-#define AP_QUEUE_IN_USE "in use"
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
@@ -46,120 +43,12 @@ static struct ap_device_id ap_queue_ids[] = {
{ /* end of sibling */ },
};
-static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
-{
- struct ap_matrix_mdev *matrix_mdev;
- unsigned long apid = AP_QID_CARD(q->apqn);
- unsigned long apqi = AP_QID_QUEUE(q->apqn);
-
- list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
- if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
- test_bit_inv(apqi, matrix_mdev->matrix.aqm))
- return matrix_mdev;
- }
-
- return NULL;
-}
-
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- ssize_t nchars = 0;
- struct vfio_ap_queue *q;
- struct ap_matrix_mdev *matrix_mdev;
- struct ap_device *apdev = to_ap_dev(dev);
-
- mutex_lock(&matrix_dev->lock);
- q = dev_get_drvdata(&apdev->device);
- matrix_mdev = vfio_ap_mdev_for_queue(q);
-
- if (matrix_mdev) {
- if (matrix_mdev->kvm)
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_IN_USE);
- else
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_ASSIGNED);
- } else {
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_UNASSIGNED);
- }
-
- mutex_unlock(&matrix_dev->lock);
-
- return nchars;
-}
-
-static DEVICE_ATTR_RO(status);
-
-static struct attribute *vfio_queue_attrs[] = {
- &dev_attr_status.attr,
- NULL,
-};
-
-static const struct attribute_group vfio_queue_attr_group = {
- .attrs = vfio_queue_attrs,
-};
-
-/**
- * vfio_ap_queue_dev_probe: Allocate a vfio_ap_queue structure and associate it
- * with the device as driver_data.
- *
- * @apdev: the AP device being probed
- *
- * Return: returns 0 if the probe succeeded; otherwise, returns an error if
- * storage could not be allocated for a vfio_ap_queue object or the
- * sysfs 'status' attribute could not be created for the queue device.
- */
-static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
-{
- int ret;
- struct vfio_ap_queue *q;
-
- q = kzalloc(sizeof(*q), GFP_KERNEL);
- if (!q)
- return -ENOMEM;
-
- mutex_lock(&matrix_dev->lock);
- dev_set_drvdata(&apdev->device, q);
- q->apqn = to_ap_queue(&apdev->device)->qid;
- q->saved_isc = VFIO_AP_ISC_INVALID;
-
- ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
- if (ret) {
- dev_set_drvdata(&apdev->device, NULL);
- kfree(q);
- }
-
- mutex_unlock(&matrix_dev->lock);
-
- return ret;
-}
-
-/**
- * vfio_ap_queue_dev_remove: Free the associated vfio_ap_queue structure.
- *
- * @apdev: the AP device being removed
- *
- * Takes the matrix lock to avoid actions on this device while doing the remove.
- */
-static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
-{
- struct vfio_ap_queue *q;
-
- mutex_lock(&matrix_dev->lock);
- sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
- q = dev_get_drvdata(&apdev->device);
- vfio_ap_mdev_reset_queue(q, 1);
- dev_set_drvdata(&apdev->device, NULL);
- kfree(q);
- mutex_unlock(&matrix_dev->lock);
-}
-
static struct ap_driver vfio_ap_drv = {
- .probe = vfio_ap_queue_dev_probe,
- .remove = vfio_ap_queue_dev_remove,
+ .probe = vfio_ap_mdev_probe_queue,
+ .remove = vfio_ap_mdev_remove_queue,
+ .in_use = vfio_ap_mdev_resource_in_use,
+ .on_config_changed = vfio_ap_on_cfg_changed,
+ .on_scan_complete = vfio_ap_on_scan_complete,
.ids = ap_queue_ids,
};
@@ -212,8 +101,9 @@ static int vfio_ap_matrix_dev_create(void)
goto matrix_alloc_err;
}
- mutex_init(&matrix_dev->lock);
+ mutex_init(&matrix_dev->mdevs_lock);
INIT_LIST_HEAD(&matrix_dev->mdev_list);
+ mutex_init(&matrix_dev->guests_lock);
dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
matrix_dev->device.parent = root_device;
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index a7d2a95796d3..6c8c41fac4e1 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -26,44 +26,193 @@
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
-static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
+#define AP_QUEUE_ASSIGNED "assigned"
+#define AP_QUEUE_UNASSIGNED "unassigned"
+#define AP_QUEUE_IN_USE "in use"
+
+static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
+static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry);
+
+/**
+ * get_update_locks_for_kvm: Acquire the locks required to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @kvm is NULL, the KVM lock will not be taken.
+ */
+static inline void get_update_locks_for_kvm(struct kvm *kvm)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (kvm)
+ mutex_lock(&kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * release_update_locks_for_kvm: Release the locks used to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
+ *
+ * The proper unlocking order is:
+ * 1. matrix_dev->mdevs_lock
+ * 2. kvm->lock
+ * 3. matrix_dev->guests_lock
+ *
+ * Note: If @kvm is NULL, the KVM lock will not be released.
+ */
+static inline void release_update_locks_for_kvm(struct kvm *kvm)
+{
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ if (kvm)
+ mutex_unlock(&kvm->lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+}
+
+/**
+ * get_update_locks_for_mdev: Acquire the locks required to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
+ * configuration data to use to update a KVM guest's APCB.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
+ * lock will not be taken.
+ */
+static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (matrix_mdev && matrix_mdev->kvm)
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * release_update_locks_for_mdev: Release the locks used to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
+ * configuration data to use to update a KVM guest's APCB.
+ *
+ * The proper unlocking order is:
+ * 1. matrix_dev->mdevs_lock
+ * 2. matrix_mdev->kvm->lock
+ * 3. matrix_dev->guests_lock
+ *
+ * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
+ * lock will not be released.
+ */
+static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
+{
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ if (matrix_mdev && matrix_mdev->kvm)
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+}
-static int match_apqn(struct device *dev, const void *data)
+/**
+ * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and
+ * acquire the locks required to update the APCB of
+ * the KVM guest to which the mdev is attached.
+ *
+ * @apqn: the APQN of a queue device.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock
+ * will not be taken.
+ *
+ * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn
+ * is not assigned to an ap_matrix_mdev.
+ */
+static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn)
{
- struct vfio_ap_queue *q = dev_get_drvdata(dev);
+ struct ap_matrix_mdev *matrix_mdev;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) &&
+ test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) {
+ if (matrix_mdev->kvm)
+ mutex_lock(&matrix_mdev->kvm->lock);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ return matrix_mdev;
+ }
+ }
- return (q->apqn == *(int *)(data)) ? 1 : 0;
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ return NULL;
}
/**
- * vfio_ap_get_queue - retrieve a queue with a specific APQN from a list
- * @matrix_mdev: the associated mediated matrix
- * @apqn: The queue APQN
+ * get_update_locks_for_queue: get the locks required to update the APCB of the
+ * KVM guest to which the matrix mdev linked to a
+ * vfio_ap_queue object is attached.
+ *
+ * @q: a pointer to a vfio_ap_queue object.
*
- * Retrieve a queue with a specific APQN from the list of the
- * devices of the vfio_ap_drv.
- * Verify that the APID and the APQI are set in the matrix.
+ * The proper locking order is:
+ * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a
+ * KVM guest's APCB.
+ * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev
+ *
+ * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock
+ * will not be taken.
+ */
+static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (q->matrix_mdev && q->matrix_mdev->kvm)
+ mutex_lock(&q->matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a
+ * hash table of queues assigned to a matrix mdev
+ * @matrix_mdev: the matrix mdev
+ * @apqn: The APQN of a queue device
*
- * Return: the pointer to the associated vfio_ap_queue
+ * Return: the pointer to the vfio_ap_queue struct representing the queue or
+ * NULL if the queue is not assigned to @matrix_mdev
*/
-static struct vfio_ap_queue *vfio_ap_get_queue(
+static struct vfio_ap_queue *vfio_ap_mdev_get_queue(
struct ap_matrix_mdev *matrix_mdev,
int apqn)
{
struct vfio_ap_queue *q;
- if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
- return NULL;
- if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
- return NULL;
-
- q = vfio_ap_find_queue(apqn);
- if (q)
- q->matrix_mdev = matrix_mdev;
+ hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
+ apqn) {
+ if (q && q->apqn == apqn)
+ return q;
+ }
- return q;
+ return NULL;
}
/**
@@ -112,7 +261,7 @@ static void vfio_ap_wait_for_irqclear(int apqn)
*
* Unregisters the ISC in the GIB when the saved ISC not invalid.
* Unpins the guest's page holding the NIB when it exists.
- * Resets the saved_pfn and saved_isc to invalid values.
+ * Resets the saved_iova and saved_isc to invalid values.
*/
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
@@ -123,9 +272,9 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
q->saved_isc = VFIO_AP_ISC_INVALID;
}
- if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
- vfio_unpin_pages(&q->matrix_mdev->vdev, &q->saved_pfn, 1);
- q->saved_pfn = 0;
+ if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
+ vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
+ q->saved_iova = 0;
}
}
@@ -154,7 +303,7 @@ static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
int retries = 5;
do {
- status = ap_aqic(q->apqn, aqic_gisa, NULL);
+ status = ap_aqic(q->apqn, aqic_gisa, 0);
switch (status.response_code) {
case AP_RESPONSE_OTHERWISE_CHANGED:
case AP_RESPONSE_NORMAL:
@@ -180,7 +329,6 @@ static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
status.response_code);
end_free:
vfio_ap_free_aqic_resources(q);
- q->matrix_mdev = NULL;
return status;
}
@@ -189,27 +337,19 @@ end_free:
*
* @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction.
* @nib: the location for storing the nib address.
- * @g_pfn: the location for storing the page frame number of the page containing
- * the nib.
*
* When the PQAP(AQIC) instruction is executed, general register 2 contains the
* address of the notification indicator byte (nib) used for IRQ notification.
- * This function parses the nib from gr2 and calculates the page frame
- * number for the guest of the page containing the nib. The values are
- * stored in @nib and @g_pfn respectively.
- *
- * The g_pfn of the nib is then validated to ensure the nib address is valid.
+ * This function parses and validates the nib from gr2.
*
* Return: returns zero if the nib address is a valid; otherwise, returns
* -EINVAL.
*/
-static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, unsigned long *nib,
- unsigned long *g_pfn)
+static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
{
*nib = vcpu->run->s.regs.gprs[2];
- *g_pfn = *nib >> PAGE_SHIFT;
- if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *g_pfn)))
+ if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
return -EINVAL;
return 0;
@@ -239,33 +379,34 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
int isc,
struct kvm_vcpu *vcpu)
{
- unsigned long nib;
struct ap_qirq_ctrl aqic_gisa = {};
struct ap_queue_status status = {};
struct kvm_s390_gisa *gisa;
+ struct page *h_page;
int nisc;
struct kvm *kvm;
- unsigned long h_nib, g_pfn, h_pfn;
+ phys_addr_t h_nib;
+ dma_addr_t nib;
int ret;
/* Verify that the notification indicator byte address is valid */
- if (vfio_ap_validate_nib(vcpu, &nib, &g_pfn)) {
- VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%#lx, g_pfn=%#lx, apqn=%#04x\n",
- __func__, nib, g_pfn, q->apqn);
+ if (vfio_ap_validate_nib(vcpu, &nib)) {
+ VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n",
+ __func__, &nib, q->apqn);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
}
- ret = vfio_pin_pages(&q->matrix_mdev->vdev, &g_pfn, 1,
- IOMMU_READ | IOMMU_WRITE, &h_pfn);
+ ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
+ IOMMU_READ | IOMMU_WRITE, &h_page);
switch (ret) {
case 1:
break;
default:
VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d,"
- "nib=%#lx, g_pfn=%#lx, apqn=%#04x\n",
- __func__, ret, nib, g_pfn, q->apqn);
+ "nib=%pad, apqn=%#04x\n",
+ __func__, ret, &nib, q->apqn);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
@@ -274,7 +415,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
kvm = q->matrix_mdev->kvm;
gisa = kvm->arch.gisa_int.origin;
- h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
+ h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
aqic_gisa.gisc = isc;
nisc = kvm_s390_gisc_register(kvm, isc);
@@ -290,17 +431,17 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
aqic_gisa.ir = 1;
aqic_gisa.gisa = (uint64_t)gisa >> 4;
- status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
+ status = ap_aqic(q->apqn, aqic_gisa, h_nib);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
/* See if we did clear older IRQ configuration */
vfio_ap_free_aqic_resources(q);
- q->saved_pfn = g_pfn;
+ q->saved_iova = nib;
q->saved_isc = isc;
break;
case AP_RESPONSE_OTHERWISE_CHANGED:
/* We could not modify IRQ setings: clear new configuration */
- vfio_unpin_pages(&q->matrix_mdev->vdev, &g_pfn, 1);
+ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
kvm_s390_gisc_unregister(kvm, isc);
break;
default:
@@ -406,10 +547,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
if (!vcpu->kvm->arch.crypto.pqap_hook) {
VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n",
__func__, apqn);
+
goto out_unlock;
}
@@ -425,7 +568,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
goto out_unlock;
}
- q = vfio_ap_get_queue(matrix_mdev, apqn);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
if (!q) {
VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n",
__func__, AP_QID_CARD(apqn),
@@ -444,7 +587,7 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
out_unlock:
memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
vcpu->run->s.regs.gprs[1] >>= 32;
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
}
@@ -456,6 +599,91 @@ static void vfio_ap_matrix_init(struct ap_config_info *info,
matrix->adm_max = info->apxa ? info->Nd : 15;
}
+static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (matrix_mdev->kvm)
+ kvm_arch_crypto_set_masks(matrix_mdev->kvm,
+ matrix_mdev->shadow_apcb.apm,
+ matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev->shadow_apcb.adm);
+}
+
+static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
+{
+ DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS);
+
+ bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS);
+ bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm,
+ (unsigned long *)matrix_dev->info.adm, AP_DOMAINS);
+
+ return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm,
+ AP_DOMAINS);
+}
+
+/*
+ * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
+ * to ensure no queue devices are passed through to
+ * the guest that are not bound to the vfio_ap
+ * device driver.
+ *
+ * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
+ *
+ * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
+ * driver, its APID will be filtered from the guest's APCB. The matrix
+ * structure precludes filtering an individual APQN, so its APID will be
+ * filtered.
+ *
+ * Return: a boolean value indicating whether the KVM guest's APCB was changed
+ * by the filtering or not.
+ */
+static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ struct ap_matrix_mdev *matrix_mdev)
+{
+ unsigned long apid, apqi, apqn;
+ DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
+ DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
+ struct vfio_ap_queue *q;
+
+ bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
+ bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+
+ /*
+ * Copy the adapters, domains and control domains to the shadow_apcb
+ * from the matrix mdev, but only those that are assigned to the host's
+ * AP configuration.
+ */
+ bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm,
+ (unsigned long *)matrix_dev->info.apm, AP_DEVICES);
+ bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
+ (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ /*
+ * If the APQN is not bound to the vfio_ap device
+ * driver, then we can't assign it to the guest's
+ * AP configuration. The AP architecture won't
+ * allow filtering of a single APQN, so let's filter
+ * the APID since an adapter represents a physical
+ * hardware device.
+ */
+ apqn = AP_MKQID(apid, apqi);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
+ if (!q || q->reset_rc) {
+ clear_bit_inv(apid,
+ matrix_mdev->shadow_apcb.apm);
+ break;
+ }
+ }
+ }
+
+ return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm,
+ AP_DEVICES) ||
+ !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm,
+ AP_DOMAINS);
+}
+
static int vfio_ap_mdev_probe(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev;
@@ -475,20 +703,19 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev)
matrix_mdev->mdev = mdev;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
matrix_mdev->pqap_hook = handle_pqap;
- mutex_lock(&matrix_dev->lock);
- list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
- mutex_unlock(&matrix_dev->lock);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+ hash_init(matrix_mdev->qtable.queues);
ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret)
goto err_list;
dev_set_drvdata(&mdev->dev, matrix_mdev);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return 0;
err_list:
- mutex_lock(&matrix_dev->lock);
- list_del(&matrix_mdev->node);
- mutex_unlock(&matrix_dev->lock);
vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
err_dec_available:
@@ -496,16 +723,62 @@ err_dec_available:
return ret;
}
+static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
+ struct vfio_ap_queue *q)
+{
+ if (q) {
+ q->matrix_mdev = matrix_mdev;
+ hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
+ }
+}
+
+static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
+{
+ struct vfio_ap_queue *q;
+
+ q = vfio_ap_find_queue(apqn);
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+}
+
+static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
+{
+ hash_del(&q->mdev_qnode);
+}
+
+static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
+{
+ q->matrix_mdev = NULL;
+}
+
+static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
+{
+ struct vfio_ap_queue *q;
+ unsigned long apid, apqi;
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ AP_DOMAINS) {
+ q = vfio_ap_mdev_get_queue(matrix_mdev,
+ AP_MKQID(apid, apqi));
+ if (q)
+ q->matrix_mdev = NULL;
+ }
+ }
+}
+
static void vfio_ap_mdev_remove(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
vfio_unregister_group_dev(&matrix_mdev->vdev);
- mutex_lock(&matrix_dev->lock);
- vfio_ap_mdev_reset_queues(matrix_mdev);
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
+ vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
list_del(&matrix_mdev->node);
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_dev->guests_lock);
vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
atomic_inc(&matrix_dev->available_instances);
@@ -554,141 +827,48 @@ static struct attribute_group *vfio_ap_mdev_type_groups[] = {
NULL,
};
-struct vfio_ap_queue_reserved {
- unsigned long *apid;
- unsigned long *apqi;
- bool reserved;
-};
-
-/**
- * vfio_ap_has_queue - determines if the AP queue containing the target in @data
- *
- * @dev: an AP queue device
- * @data: a struct vfio_ap_queue_reserved reference
- *
- * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
- * apid or apqi specified in @data:
- *
- * - If @data contains both an apid and apqi value, then @data will be flagged
- * as reserved if the APID and APQI fields for the AP queue device matches
- *
- * - If @data contains only an apid value, @data will be flagged as
- * reserved if the APID field in the AP queue device matches
- *
- * - If @data contains only an apqi value, @data will be flagged as
- * reserved if the APQI field in the AP queue device matches
- *
- * Return: 0 to indicate the input to function succeeded. Returns -EINVAL if
- * @data does not contain either an apid or apqi.
- */
-static int vfio_ap_has_queue(struct device *dev, void *data)
-{
- struct vfio_ap_queue_reserved *qres = data;
- struct ap_queue *ap_queue = to_ap_queue(dev);
- ap_qid_t qid;
- unsigned long id;
-
- if (qres->apid && qres->apqi) {
- qid = AP_MKQID(*qres->apid, *qres->apqi);
- if (qid == ap_queue->qid)
- qres->reserved = true;
- } else if (qres->apid && !qres->apqi) {
- id = AP_QID_CARD(ap_queue->qid);
- if (id == *qres->apid)
- qres->reserved = true;
- } else if (!qres->apid && qres->apqi) {
- id = AP_QID_QUEUE(ap_queue->qid);
- if (id == *qres->apqi)
- qres->reserved = true;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * vfio_ap_verify_queue_reserved - verifies that the AP queue containing
- * @apid or @aqpi is reserved
- *
- * @apid: an AP adapter ID
- * @apqi: an AP queue index
- *
- * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
- * driver according to the following rules:
- *
- * - If both @apid and @apqi are not NULL, then there must be an AP queue
- * device bound to the vfio_ap driver with the APQN identified by @apid and
- * @apqi
- *
- * - If only @apid is not NULL, then there must be an AP queue device bound
- * to the vfio_ap driver with an APQN containing @apid
- *
- * - If only @apqi is not NULL, then there must be an AP queue device bound
- * to the vfio_ap driver with an APQN containing @apqi
- *
- * Return: 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
- */
-static int vfio_ap_verify_queue_reserved(unsigned long *apid,
- unsigned long *apqi)
-{
- int ret;
- struct vfio_ap_queue_reserved qres;
-
- qres.apid = apid;
- qres.apqi = apqi;
- qres.reserved = false;
-
- ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &qres, vfio_ap_has_queue);
- if (ret)
- return ret;
-
- if (qres.reserved)
- return 0;
-
- return -EADDRNOTAVAIL;
-}
+#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
+ "already assigned to %s"
-static int
-vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apid)
+static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apm,
+ unsigned long *aqm)
{
- int ret;
- unsigned long apqi;
- unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
-
- if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
- return vfio_ap_verify_queue_reserved(&apid, NULL);
-
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
- ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
- if (ret)
- return ret;
- }
+ unsigned long apid, apqi;
+ const struct device *dev = mdev_dev(matrix_mdev->mdev);
+ const char *mdev_name = dev_name(dev);
- return 0;
+ for_each_set_bit_inv(apid, apm, AP_DEVICES)
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
+ dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
}
/**
- * vfio_ap_mdev_verify_no_sharing - verifies that the AP matrix is not configured
+ * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
*
- * @matrix_mdev: the mediated matrix device
+ * @mdev_apm: mask indicating the APIDs of the APQNs to be verified
+ * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
*
- * Verifies that the APQNs derived from the cross product of the AP adapter IDs
- * and AP queue indexes comprising the AP matrix are not configured for another
+ * Verifies that each APQN derived from the Cartesian product of a bitmap of
+ * AP adapter IDs and AP queue indexes is not configured for any matrix
* mediated device. AP queue sharing is not allowed.
*
- * Return: 0 if the APQNs are not shared; otherwise returns -EADDRINUSE.
+ * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
*/
-static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
+static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
+ unsigned long *mdev_aqm)
{
- struct ap_matrix_mdev *lstdev;
+ struct ap_matrix_mdev *matrix_mdev;
DECLARE_BITMAP(apm, AP_DEVICES);
DECLARE_BITMAP(aqm, AP_DOMAINS);
- list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
- if (matrix_mdev == lstdev)
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ /*
+ * If the input apm and aqm are fields of the matrix_mdev
+ * object, then move on to the next matrix_mdev.
+ */
+ if (mdev_apm == matrix_mdev->matrix.apm &&
+ mdev_aqm == matrix_mdev->matrix.aqm)
continue;
memset(apm, 0, sizeof(apm));
@@ -698,14 +878,16 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* We work on full longs, as we can only exclude the leftover
* bits in non-inverse order. The leftover is all zeros.
*/
- if (!bitmap_and(apm, matrix_mdev->matrix.apm,
- lstdev->matrix.apm, AP_DEVICES))
+ if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
+ AP_DEVICES))
continue;
- if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
- lstdev->matrix.aqm, AP_DOMAINS))
+ if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
+ AP_DOMAINS))
continue;
+ vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
+
return -EADDRINUSE;
}
@@ -713,6 +895,41 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
}
/**
+ * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are
+ * not reserved for the default zcrypt driver and
+ * are not assigned to another mdev.
+ *
+ * @matrix_mdev: the mdev to which the APQNs being validated are assigned.
+ *
+ * Return: One of the following values:
+ * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function,
+ * most likely -EBUSY indicating the ap_perms_mutex lock is already held.
+ * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the
+ * zcrypt default driver.
+ * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev
+ * o A zero indicating validation succeeded.
+ */
+static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm))
+ return -EADDRNOTAVAIL;
+
+ return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm);
+}
+
+static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ unsigned long apqi;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS)
+ vfio_ap_mdev_link_apqn(matrix_mdev,
+ AP_MKQID(apid, apqi));
+}
+
+/**
* assign_adapter_store - parses the APID from @buf and sets the
* corresponding bit in the mediated matrix device's APM
*
@@ -741,6 +958,10 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* An APQN derived from the cross product of the APID being assigned
* and the APQIs previously assigned is being used by another mediated
* matrix device
+ *
+ * 5. -EAGAIN
+ * A lock required to validate the mdev's AP configuration could not
+ * be obtained.
*/
static ssize_t assign_adapter_store(struct device *dev,
struct device_attribute *attr,
@@ -748,15 +969,11 @@ static ssize_t assign_adapter_store(struct device *dev,
{
int ret;
unsigned long apid;
+ DECLARE_BITMAP(apm_delta, AP_DEVICES);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow assignment of adapter */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ mutex_lock(&ap_perms_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
@@ -767,33 +984,97 @@ static ssize_t assign_adapter_store(struct device *dev,
goto done;
}
- /*
- * Set the bit in the AP mask (APM) corresponding to the AP adapter
- * number (APID). The bits in the mask, from most significant to least
- * significant bit, correspond to APIDs 0-255.
- */
- ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
- if (ret)
+ set_bit_inv(apid, matrix_mdev->matrix.apm);
+
+ ret = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (ret) {
+ clear_bit_inv(apid, matrix_mdev->matrix.apm);
goto done;
+ }
- set_bit_inv(apid, matrix_mdev->matrix.apm);
+ vfio_ap_mdev_link_adapter(matrix_mdev, apid);
+ memset(apm_delta, 0, sizeof(apm_delta));
+ set_bit_inv(apid, apm_delta);
- ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
- if (ret)
- goto share_err;
+ if (vfio_ap_mdev_filter_matrix(apm_delta,
+ matrix_mdev->matrix.aqm, matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
- goto done;
-
-share_err:
- clear_bit_inv(apid, matrix_mdev->matrix.apm);
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_perms_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_adapter);
+static struct vfio_ap_queue
+*vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid, unsigned long apqi)
+{
+ struct vfio_ap_queue *q = NULL;
+
+ q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
+ /* If the queue is assigned to the matrix mdev, unlink it. */
+ if (q)
+ vfio_ap_unlink_queue_fr_mdev(q);
+
+ return q;
+}
+
+/**
+ * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned
+ * adapter from the matrix mdev to which the
+ * adapter was assigned.
+ * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
+ * @apid: the APID of the unassigned adapter.
+ * @qtable: table for storing queues associated with unassigned adapter.
+ */
+static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid,
+ struct ap_queue_table *qtable)
+{
+ unsigned long apqi;
+ struct vfio_ap_queue *q;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+ if (q && qtable) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ hash_add(qtable->queues, &q->mdev_qnode,
+ q->apqn);
+ }
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ int loop_cursor;
+ struct vfio_ap_queue *q;
+ struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
+
+ hash_init(qtable->queues);
+ vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
+
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+ vfio_ap_mdev_reset_queues(qtable);
+
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+ hash_del(&q->mdev_qnode);
+ }
+
+ kfree(qtable);
+}
+
/**
* unassign_adapter_store - parses the APID from @buf and clears the
* corresponding bit in the mediated matrix device's APM
@@ -817,13 +1098,7 @@ static ssize_t unassign_adapter_store(struct device *dev,
unsigned long apid;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow unassignment of adapter */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
@@ -835,31 +1110,22 @@ static ssize_t unassign_adapter_store(struct device *dev,
}
clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
+ vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_adapter);
-static int
-vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apqi)
+static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
{
- int ret;
unsigned long apid;
- unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
-
- if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
- return vfio_ap_verify_queue_reserved(NULL, &apqi);
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
- ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
- if (ret)
- return ret;
- }
-
- return 0;
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES)
+ vfio_ap_mdev_link_apqn(matrix_mdev,
+ AP_MKQID(apid, apqi));
}
/**
@@ -891,6 +1157,10 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
* An APQN derived from the cross product of the APQI being assigned
* and the APIDs previously assigned is being used by another mediated
* matrix device
+ *
+ * 5. -EAGAIN
+ * The lock required to validate the mdev's AP configuration could not
+ * be obtained.
*/
static ssize_t assign_domain_store(struct device *dev,
struct device_attribute *attr,
@@ -898,47 +1168,89 @@ static ssize_t assign_domain_store(struct device *dev,
{
int ret;
unsigned long apqi;
+ DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
-
- mutex_lock(&matrix_dev->lock);
- /* If the KVM guest is running, disallow assignment of domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ mutex_lock(&ap_perms_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
goto done;
- if (apqi > max_apqi) {
+
+ if (apqi > matrix_mdev->matrix.aqm_max) {
ret = -ENODEV;
goto done;
}
- ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
- if (ret)
+ set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+
+ ret = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (ret) {
+ clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
goto done;
+ }
- set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+ vfio_ap_mdev_link_domain(matrix_mdev, apqi);
+ memset(aqm_delta, 0, sizeof(aqm_delta));
+ set_bit_inv(apqi, aqm_delta);
- ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
- if (ret)
- goto share_err;
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
+ matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
ret = count;
- goto done;
-
-share_err:
- clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_perms_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_domain);
+static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi,
+ struct ap_queue_table *qtable)
+{
+ unsigned long apid;
+ struct vfio_ap_queue *q;
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+ if (q && qtable) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ hash_add(qtable->queues, &q->mdev_qnode,
+ q->apqn);
+ }
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+{
+ int loop_cursor;
+ struct vfio_ap_queue *q;
+ struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
+
+ hash_init(qtable->queues);
+ vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
+
+ if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+ vfio_ap_mdev_reset_queues(qtable);
+
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+ hash_del(&q->mdev_qnode);
+ }
+
+ kfree(qtable);
+}
/**
* unassign_domain_store - parses the APQI from @buf and clears the
@@ -963,13 +1275,7 @@ static ssize_t unassign_domain_store(struct device *dev,
unsigned long apqi;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow unassignment of domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
@@ -981,10 +1287,11 @@ static ssize_t unassign_domain_store(struct device *dev,
}
clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
+ vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_domain);
@@ -1011,13 +1318,7 @@ static ssize_t assign_control_domain_store(struct device *dev,
unsigned long id;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&matrix_dev->lock);
-
- /* If the KVM guest is running, disallow assignment of control domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &id);
if (ret)
@@ -1034,9 +1335,12 @@ static ssize_t assign_control_domain_store(struct device *dev,
* number of control domains that can be assigned.
*/
set_bit_inv(id, matrix_mdev->matrix.adm);
+ if (vfio_ap_mdev_filter_cdoms(matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(assign_control_domain);
@@ -1062,28 +1366,28 @@ static ssize_t unassign_control_domain_store(struct device *dev,
int ret;
unsigned long domid;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- unsigned long max_domid = matrix_mdev->matrix.adm_max;
-
- mutex_lock(&matrix_dev->lock);
- /* If a KVM guest is running, disallow unassignment of control domain */
- if (matrix_mdev->kvm) {
- ret = -EBUSY;
- goto done;
- }
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &domid);
if (ret)
goto done;
- if (domid > max_domid) {
+
+ if (domid > matrix_mdev->matrix.adm_max) {
ret = -ENODEV;
goto done;
}
clear_bit_inv(domid, matrix_mdev->matrix.adm);
+
+ if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
+ clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
return ret;
}
static DEVICE_ATTR_WO(unassign_control_domain);
@@ -1099,40 +1403,36 @@ static ssize_t control_domains_show(struct device *dev,
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
n = sprintf(bufpos, "%04lx\n", id);
bufpos += n;
nchars += n;
}
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(control_domains);
-static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
{
- struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
char *bufpos = buf;
unsigned long apid;
unsigned long apqi;
unsigned long apid1;
unsigned long apqi1;
- unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
- unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
+ unsigned long napm_bits = matrix->apm_max + 1;
+ unsigned long naqm_bits = matrix->aqm_max + 1;
int nchars = 0;
int n;
- apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
- apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
-
- mutex_lock(&matrix_dev->lock);
+ apid1 = find_first_bit_inv(matrix->apm, napm_bits);
+ apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
+ for_each_set_bit_inv(apqi, matrix->aqm,
naqm_bits) {
n = sprintf(bufpos, "%02lx.%04lx\n", apid,
apqi);
@@ -1141,25 +1441,50 @@ static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
}
}
} else if (apid1 < napm_bits) {
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
n = sprintf(bufpos, "%02lx.\n", apid);
bufpos += n;
nchars += n;
}
} else if (apqi1 < naqm_bits) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
+ for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
n = sprintf(bufpos, ".%04lx\n", apqi);
bufpos += n;
nchars += n;
}
}
- mutex_unlock(&matrix_dev->lock);
+ return nchars;
+}
+
+static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(matrix);
+static ssize_t guest_matrix_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t nchars;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(guest_matrix);
+
static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_assign_adapter.attr,
&dev_attr_unassign_adapter.attr,
@@ -1169,6 +1494,7 @@ static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_unassign_control_domain.attr,
&dev_attr_control_domains.attr,
&dev_attr_matrix.attr,
+ &dev_attr_guest_matrix.attr,
NULL,
};
@@ -1201,59 +1527,32 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
- mutex_lock(&kvm->lock);
- mutex_lock(&matrix_dev->lock);
+ get_update_locks_for_kvm(kvm);
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
if (m != matrix_mdev && m->kvm == kvm) {
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
return -EPERM;
}
}
kvm_get_kvm(kvm);
matrix_mdev->kvm = kvm;
- kvm_arch_crypto_set_masks(kvm,
- matrix_mdev->matrix.apm,
- matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.adm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
}
return 0;
}
-/**
- * vfio_ap_mdev_iommu_notifier - IOMMU notifier callback
- *
- * @nb: The notifier block
- * @action: Action to be taken
- * @data: data associated with the request
- *
- * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
- * pinned before). Other requests are ignored.
- *
- * Return: for an UNMAP request, NOFITY_OK; otherwise NOTIFY_DONE.
- */
-static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
+ u64 length)
{
- struct ap_matrix_mdev *matrix_mdev;
-
- matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
-
- if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
- struct vfio_iommu_type1_dma_unmap *unmap = data;
- unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
-
- vfio_unpin_pages(&matrix_mdev->vdev, &g_pfn, 1);
- return NOTIFY_OK;
- }
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
- return NOTIFY_DONE;
+ vfio_unpin_pages(&matrix_mdev->vdev, iova, 1);
}
/**
@@ -1271,36 +1570,36 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
kvm->arch.crypto.pqap_hook = NULL;
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
- mutex_lock(&kvm->lock);
- mutex_lock(&matrix_dev->lock);
+ get_update_locks_for_kvm(kvm);
kvm_arch_crypto_clear_masks(kvm);
- vfio_ap_mdev_reset_queues(matrix_mdev);
+ vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
kvm_put_kvm(kvm);
matrix_mdev->kvm = NULL;
- mutex_unlock(&kvm->lock);
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_kvm(kvm);
}
}
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
{
- struct device *dev;
+ struct ap_queue *queue;
struct vfio_ap_queue *q = NULL;
- dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
- &apqn, match_apqn);
- if (dev) {
- q = dev_get_drvdata(dev);
- put_device(dev);
- }
+ queue = ap_get_qdev(apqn);
+ if (!queue)
+ return NULL;
+
+ if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver)
+ q = dev_get_drvdata(&queue->ap_dev.device);
+
+ put_device(&queue->ap_dev.device);
return q;
}
-int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
- unsigned int retry)
+static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+ unsigned int retry)
{
struct ap_queue_status status;
int ret;
@@ -1308,9 +1607,9 @@ int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
if (!q)
return 0;
-
retry_zapq:
status = ap_zapq(q->apqn);
+ q->reset_rc = status.response_code;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
ret = 0;
@@ -1325,12 +1624,17 @@ retry_zapq:
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
- WARN_ON_ONCE(status.irq_enabled);
+ WARN_ONCE(status.irq_enabled,
+ "PQAP/ZAPQ for %02x.%04x failed with rc=%u while IRQ enabled",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
+ status.response_code);
ret = -EBUSY;
goto free_resources;
default:
/* things are really broken, give up */
- WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
+ WARN(true,
+ "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
status.response_code);
return -EIO;
}
@@ -1342,7 +1646,8 @@ retry_zapq:
msleep(20);
status = ap_tapq(q->apqn, NULL);
}
- WARN_ON_ONCE(retry2 <= 0);
+ WARN_ONCE(retry2 <= 0, "unable to verify reset of queue %02x.%04x",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn));
free_resources:
vfio_ap_free_aqic_resources(q);
@@ -1350,27 +1655,20 @@ free_resources:
return ret;
}
-static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
+static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
{
- int ret;
- int rc = 0;
- unsigned long apid, apqi;
+ int ret, loop_cursor, rc = 0;
struct vfio_ap_queue *q;
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
- matrix_mdev->matrix.apm_max + 1) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.aqm_max + 1) {
- q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
- ret = vfio_ap_mdev_reset_queue(q, 1);
- /*
- * Regardless whether a queue turns out to be busy, or
- * is not operational, we need to continue resetting
- * the remaining queues.
- */
- if (ret)
- rc = ret;
- }
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ ret = vfio_ap_mdev_reset_queue(q, 1);
+ /*
+ * Regardless whether a queue turns out to be busy, or
+ * is not operational, we need to continue resetting
+ * the remaining queues.
+ */
+ if (ret)
+ rc = ret;
}
return rc;
@@ -1380,27 +1678,11 @@ static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
{
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
- unsigned long events;
- int ret;
if (!vdev->kvm)
return -EINVAL;
- ret = vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
- if (ret)
- return ret;
-
- matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
- events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY, &events,
- &matrix_mdev->iommu_notifier);
- if (ret)
- goto err_kvm;
- return 0;
-
-err_kvm:
- vfio_ap_mdev_unset_kvm(matrix_mdev);
- return ret;
+ return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
}
static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
@@ -1408,8 +1690,6 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
- vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY,
- &matrix_mdev->iommu_notifier);
vfio_ap_mdev_unset_kvm(matrix_mdev);
}
@@ -1440,27 +1720,84 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
container_of(vdev, struct ap_matrix_mdev, vdev);
int ret;
- mutex_lock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
ret = vfio_ap_mdev_get_device_info(arg);
break;
case VFIO_DEVICE_RESET:
- ret = vfio_ap_mdev_reset_queues(matrix_mdev);
+ ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
break;
default:
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&matrix_dev->lock);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return ret;
}
+static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long apid = AP_QID_CARD(q->apqn);
+ unsigned long apqi = AP_QID_QUEUE(q->apqn);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
+ test_bit_inv(apqi, matrix_mdev->matrix.aqm))
+ return matrix_mdev;
+ }
+
+ return NULL;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars = 0;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+ struct ap_device *apdev = to_ap_dev(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ q = dev_get_drvdata(&apdev->device);
+ matrix_mdev = vfio_ap_mdev_for_queue(q);
+
+ if (matrix_mdev) {
+ if (matrix_mdev->kvm)
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_IN_USE);
+ else
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_ASSIGNED);
+ } else {
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_UNASSIGNED);
+ }
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+
+ return nchars;
+}
+
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *vfio_queue_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group vfio_queue_attr_group = {
+ .attrs = vfio_queue_attrs,
+};
+
static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
.open_device = vfio_ap_mdev_open_device,
.close_device = vfio_ap_mdev_close_device,
.ioctl = vfio_ap_mdev_ioctl,
+ .dma_unmap = vfio_ap_mdev_dma_unmap,
};
static struct mdev_driver vfio_ap_matrix_driver = {
@@ -1500,3 +1837,432 @@ void vfio_ap_mdev_unregister(void)
mdev_unregister_device(&matrix_dev->device);
mdev_unregister_driver(&vfio_ap_matrix_driver);
}
+
+int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+{
+ int ret;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ if (ret)
+ return ret;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ q->apqn = to_ap_queue(&apdev->device)->qid;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ matrix_mdev = get_update_locks_by_apqn(q->apqn);
+
+ if (matrix_mdev) {
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm,
+ matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+ dev_set_drvdata(&apdev->device, q);
+ release_update_locks_for_mdev(matrix_mdev);
+
+ return 0;
+}
+
+void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
+{
+ unsigned long apid, apqi;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ q = dev_get_drvdata(&apdev->device);
+ get_update_locks_for_queue(q);
+ matrix_mdev = q->matrix_mdev;
+
+ if (matrix_mdev) {
+ vfio_ap_unlink_queue_fr_mdev(q);
+
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+
+ /*
+ * If the queue is assigned to the guest's APCB, then remove
+ * the adapter's APID from the APCB and hot it into the guest.
+ */
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+ }
+
+ vfio_ap_mdev_reset_queue(q, 1);
+ dev_set_drvdata(&apdev->device, NULL);
+ kfree(q);
+ release_update_locks_for_mdev(matrix_mdev);
+}
+
+/**
+ * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is
+ * assigned to a mediated device under the control
+ * of the vfio_ap device driver.
+ *
+ * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check.
+ * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check.
+ *
+ * Return:
+ * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are
+ * assigned to a mediated device under the control of the vfio_ap
+ * device driver.
+ * * Otherwise, return 0.
+ */
+int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
+{
+ int ret;
+
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+
+ return ret;
+}
+
+/**
+ * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control
+ * domains that have been removed from the host's
+ * AP configuration from a guest.
+ *
+ * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest.
+ * @aprem: the adapters that have been removed from the host's AP configuration
+ * @aqrem: the domains that have been removed from the host's AP configuration
+ * @cdrem: the control domains that have been removed from the host's AP
+ * configuration.
+ */
+static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *aprem,
+ unsigned long *aqrem,
+ unsigned long *cdrem)
+{
+ int do_hotplug = 0;
+
+ if (!bitmap_empty(aprem, AP_DEVICES)) {
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm,
+ matrix_mdev->shadow_apcb.apm,
+ aprem, AP_DEVICES);
+ }
+
+ if (!bitmap_empty(aqrem, AP_DOMAINS)) {
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev->shadow_apcb.aqm,
+ aqrem, AP_DEVICES);
+ }
+
+ if (!bitmap_empty(cdrem, AP_DOMAINS))
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm,
+ matrix_mdev->shadow_apcb.adm,
+ cdrem, AP_DOMAINS);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+}
+
+/**
+ * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters,
+ * domains and control domains that have been removed
+ * from the host AP configuration and unplugs them
+ * from those guests.
+ *
+ * @ap_remove: bitmap specifying which adapters have been removed from the host
+ * config.
+ * @aq_remove: bitmap specifying which domains have been removed from the host
+ * config.
+ * @cd_remove: bitmap specifying which control domains have been removed from
+ * the host config.
+ */
+static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove,
+ unsigned long *aq_remove,
+ unsigned long *cd_remove)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ DECLARE_BITMAP(aprem, AP_DEVICES);
+ DECLARE_BITMAP(aqrem, AP_DOMAINS);
+ DECLARE_BITMAP(cdrem, AP_DOMAINS);
+ int do_remove = 0;
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ do_remove |= bitmap_and(aprem, ap_remove,
+ matrix_mdev->matrix.apm,
+ AP_DEVICES);
+ do_remove |= bitmap_and(aqrem, aq_remove,
+ matrix_mdev->matrix.aqm,
+ AP_DOMAINS);
+ do_remove |= bitmap_andnot(cdrem, cd_remove,
+ matrix_mdev->matrix.adm,
+ AP_DOMAINS);
+
+ if (do_remove)
+ vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem,
+ cdrem);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ }
+}
+
+/**
+ * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and
+ * control domains from the host AP configuration
+ * by unplugging them from the guests that are
+ * using them.
+ * @cur_config_info: the current host AP configuration information
+ * @prev_config_info: the previous host AP configuration information
+ */
+static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info,
+ struct ap_config_info *prev_config_info)
+{
+ int do_remove;
+ DECLARE_BITMAP(aprem, AP_DEVICES);
+ DECLARE_BITMAP(aqrem, AP_DOMAINS);
+ DECLARE_BITMAP(cdrem, AP_DOMAINS);
+
+ do_remove = bitmap_andnot(aprem,
+ (unsigned long *)prev_config_info->apm,
+ (unsigned long *)cur_config_info->apm,
+ AP_DEVICES);
+ do_remove |= bitmap_andnot(aqrem,
+ (unsigned long *)prev_config_info->aqm,
+ (unsigned long *)cur_config_info->aqm,
+ AP_DEVICES);
+ do_remove |= bitmap_andnot(cdrem,
+ (unsigned long *)prev_config_info->adm,
+ (unsigned long *)cur_config_info->adm,
+ AP_DEVICES);
+
+ if (do_remove)
+ vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem);
+}
+
+/**
+ * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that
+ * are older than AP type 10 (CEX4).
+ * @apm: a bitmap of the APIDs to examine
+ * @aqm: a bitmap of the APQIs of the queues to query for the AP type.
+ */
+static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
+{
+ bool apid_cleared;
+ struct ap_queue_status status;
+ unsigned long apid, apqi, info;
+ int qtype, qtype_mask = 0xff000000;
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ apid_cleared = false;
+
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info);
+ switch (status.response_code) {
+ /*
+ * According to the architecture in each case
+ * below, the queue's info should be filled.
+ */
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_BUSY:
+ qtype = info & qtype_mask;
+
+ /*
+ * The vfio_ap device driver only
+ * supports CEX4 and newer adapters, so
+ * remove the APID if the adapter is
+ * older than a CEX4.
+ */
+ if (qtype < AP_DEVICE_TYPE_CEX4) {
+ clear_bit_inv(apid, apm);
+ apid_cleared = true;
+ }
+
+ break;
+
+ default:
+ /*
+ * If we don't know the adapter type,
+ * clear its APID since it can't be
+ * determined whether the vfio_ap
+ * device driver supports it.
+ */
+ clear_bit_inv(apid, apm);
+ apid_cleared = true;
+ break;
+ }
+
+ /*
+ * If we've already cleared the APID from the apm, there
+ * is no need to continue examining the remainin AP
+ * queues to determine the type of the adapter.
+ */
+ if (apid_cleared)
+ continue;
+ }
+ }
+}
+
+/**
+ * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and
+ * control domains that have been added to the host's
+ * AP configuration for each matrix mdev to which they
+ * are assigned.
+ *
+ * @apm_add: a bitmap specifying the adapters that have been added to the AP
+ * configuration.
+ * @aqm_add: a bitmap specifying the domains that have been added to the AP
+ * configuration.
+ * @adm_add: a bitmap specifying the control domains that have been added to the
+ * AP configuration.
+ */
+static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add,
+ unsigned long *adm_add)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if (list_empty(&matrix_dev->mdev_list))
+ return;
+
+ vfio_ap_filter_apid_by_qtype(apm_add, aqm_add);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ bitmap_and(matrix_mdev->apm_add,
+ matrix_mdev->matrix.apm, apm_add, AP_DEVICES);
+ bitmap_and(matrix_mdev->aqm_add,
+ matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS);
+ bitmap_and(matrix_mdev->adm_add,
+ matrix_mdev->matrix.adm, adm_add, AP_DEVICES);
+ }
+}
+
+/**
+ * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and
+ * control domains to the host AP configuration
+ * by updating the bitmaps that specify what adapters,
+ * domains and control domains have been added so they
+ * can be hot plugged into the guest when the AP bus
+ * scan completes (see vfio_ap_on_scan_complete
+ * function).
+ * @cur_config_info: the current AP configuration information
+ * @prev_config_info: the previous AP configuration information
+ */
+static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info,
+ struct ap_config_info *prev_config_info)
+{
+ bool do_add;
+ DECLARE_BITMAP(apm_add, AP_DEVICES);
+ DECLARE_BITMAP(aqm_add, AP_DOMAINS);
+ DECLARE_BITMAP(adm_add, AP_DOMAINS);
+
+ do_add = bitmap_andnot(apm_add,
+ (unsigned long *)cur_config_info->apm,
+ (unsigned long *)prev_config_info->apm,
+ AP_DEVICES);
+ do_add |= bitmap_andnot(aqm_add,
+ (unsigned long *)cur_config_info->aqm,
+ (unsigned long *)prev_config_info->aqm,
+ AP_DOMAINS);
+ do_add |= bitmap_andnot(adm_add,
+ (unsigned long *)cur_config_info->adm,
+ (unsigned long *)prev_config_info->adm,
+ AP_DOMAINS);
+
+ if (do_add)
+ vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add);
+}
+
+/**
+ * vfio_ap_on_cfg_changed - handles notification of changes to the host AP
+ * configuration.
+ *
+ * @cur_cfg_info: the current host AP configuration
+ * @prev_cfg_info: the previous host AP configuration
+ */
+void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
+ struct ap_config_info *prev_cfg_info)
+{
+ if (!cur_cfg_info || !prev_cfg_info)
+ return;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info);
+ vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info);
+ memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info));
+
+ mutex_unlock(&matrix_dev->guests_lock);
+}
+
+static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
+{
+ bool do_hotplug = false;
+ int filter_domains = 0;
+ int filter_adapters = 0;
+ DECLARE_BITMAP(apm, AP_DEVICES);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
+
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
+ matrix_mdev->apm_add, AP_DEVICES);
+ filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
+ matrix_mdev->aqm_add, AP_DOMAINS);
+
+ if (filter_adapters && filter_domains)
+ do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
+ else if (filter_adapters)
+ do_hotplug |=
+ vfio_ap_mdev_filter_matrix(apm,
+ matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev);
+ else
+ do_hotplug |=
+ vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
+ aqm, matrix_mdev);
+
+ if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
+ AP_DOMAINS))
+ do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+}
+
+void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) &&
+ bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) &&
+ bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS))
+ continue;
+
+ vfio_ap_mdev_hot_plug_cfg(matrix_mdev);
+ bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES);
+ bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS);
+ bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS);
+ }
+
+ mutex_unlock(&matrix_dev->guests_lock);
+}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index a26efd804d0d..d782cf463eab 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/hashtable.h>
#include "ap_bus.h"
@@ -32,20 +33,26 @@
* @available_instances: number of mediated matrix devices that can be created
* @info: the struct containing the output from the PQAP(QCI) instruction
* @mdev_list: the list of mediated matrix devices created
- * @lock: mutex for locking the AP matrix device. This lock will be
+ * @mdevs_lock: mutex for locking the AP matrix device. This lock will be
* taken every time we fiddle with state managed by the vfio_ap
* driver, be it using @mdev_list or writing the state of a
* single ap_matrix_mdev device. It's quite coarse but we don't
* expect much contention.
* @vfio_ap_drv: the vfio_ap device driver
+ * @guests_lock: mutex for controlling access to a guest that is using AP
+ * devices passed through by the vfio_ap device driver. This lock
+ * will be taken when the AP devices are plugged into or unplugged
+ * from a guest, and when an ap_matrix_mdev device is added to or
+ * removed from @mdev_list or the list is iterated.
*/
struct ap_matrix_dev {
struct device device;
atomic_t available_instances;
struct ap_config_info info;
struct list_head mdev_list;
- struct mutex lock;
+ struct mutex mdevs_lock; /* serializes access to each ap_matrix_mdev */
struct ap_driver *vfio_ap_drv;
+ struct mutex guests_lock; /* serializes access to each KVM guest */
};
extern struct ap_matrix_dev *matrix_dev;
@@ -75,48 +82,77 @@ struct ap_matrix {
};
/**
+ * struct ap_queue_table - a table of queue objects.
+ *
+ * @queues: a hashtable of queues (struct vfio_ap_queue).
+ */
+struct ap_queue_table {
+ DECLARE_HASHTABLE(queues, 8);
+};
+
+/**
* struct ap_matrix_mdev - Contains the data associated with a matrix mediated
* device.
* @vdev: the vfio device
* @node: allows the ap_matrix_mdev struct to be added to a list
* @matrix: the adapters, usage domains and control domains assigned to the
* mediated matrix device.
- * @iommu_notifier: notifier block used for specifying callback function for
- * handling the VFIO_IOMMU_NOTIFY_DMA_UNMAP even
+ * @shadow_apcb: the shadow copy of the APCB field of the KVM guest's CRYCB
* @kvm: the struct holding guest's state
* @pqap_hook: the function pointer to the interception handler for the
* PQAP(AQIC) instruction.
* @mdev: the mediated device
+ * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev
+ * @apm_add: bitmap of APIDs added to the host's AP configuration
+ * @aqm_add: bitmap of APQIs added to the host's AP configuration
+ * @adm_add: bitmap of control domain numbers added to the host's AP
+ * configuration
*/
struct ap_matrix_mdev {
struct vfio_device vdev;
struct list_head node;
struct ap_matrix matrix;
- struct notifier_block iommu_notifier;
+ struct ap_matrix shadow_apcb;
struct kvm *kvm;
crypto_hook pqap_hook;
struct mdev_device *mdev;
+ struct ap_queue_table qtable;
+ DECLARE_BITMAP(apm_add, AP_DEVICES);
+ DECLARE_BITMAP(aqm_add, AP_DOMAINS);
+ DECLARE_BITMAP(adm_add, AP_DOMAINS);
};
/**
* struct vfio_ap_queue - contains the data associated with a queue bound to the
* vfio_ap device driver
* @matrix_mdev: the matrix mediated device
- * @saved_pfn: the guest PFN pinned for the guest
+ * @saved_iova: the notification indicator byte (nib) address
* @apqn: the APQN of the AP queue device
* @saved_isc: the guest ISC registered with the GIB interface
+ * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
+ * @reset_rc: the status response code from the last reset of the queue
*/
struct vfio_ap_queue {
struct ap_matrix_mdev *matrix_mdev;
- unsigned long saved_pfn;
+ dma_addr_t saved_iova;
int apqn;
#define VFIO_AP_ISC_INVALID 0xff
unsigned char saved_isc;
+ struct hlist_node mdev_qnode;
+ unsigned int reset_rc;
};
int vfio_ap_mdev_register(void);
void vfio_ap_mdev_unregister(void);
-int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
- unsigned int retry);
+
+int vfio_ap_mdev_probe_queue(struct ap_device *queue);
+void vfio_ap_mdev_remove_queue(struct ap_device *queue);
+
+int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm);
+
+void vfio_ap_on_cfg_changed(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
+void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 35d4b398c197..8bd9fd51208c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -763,6 +763,49 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
ipa_name, com, CARD_DEVID(card));
}
+static void qeth_default_link_info(struct qeth_card *card)
+{
+ struct qeth_link_info *link_info = &card->info.link_info;
+
+ QETH_CARD_TEXT(card, 2, "dftlinfo");
+ link_info->duplex = DUPLEX_FULL;
+
+ if (IS_IQD(card) || IS_VM_NIC(card)) {
+ link_info->speed = SPEED_10000;
+ link_info->port = PORT_FIBRE;
+ link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
+ } else {
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_FAST_ETH:
+ case QETH_LINK_TYPE_LANE_ETH100:
+ link_info->speed = SPEED_100;
+ link_info->port = PORT_TP;
+ break;
+ case QETH_LINK_TYPE_GBIT_ETH:
+ case QETH_LINK_TYPE_LANE_ETH1000:
+ link_info->speed = SPEED_1000;
+ link_info->port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ link_info->speed = SPEED_10000;
+ link_info->port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ link_info->speed = SPEED_25000;
+ link_info->port = PORT_FIBRE;
+ break;
+ default:
+ dev_info(&card->gdev->dev,
+ "Unknown link type %x\n",
+ card->info.link_type);
+ link_info->speed = SPEED_UNKNOWN;
+ link_info->port = PORT_OTHER;
+ }
+
+ link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
+ }
+}
+
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
@@ -790,6 +833,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
netdev_name(card->dev), card->info.chpid);
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
netif_carrier_off(card->dev);
+ qeth_default_link_info(card);
}
return NULL;
case IPA_CMD_STARTLAN:
@@ -4744,92 +4788,6 @@ out_free:
return rc;
}
-static int qeth_query_card_info_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
-{
- struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
- struct qeth_link_info *link_info = reply->param;
- struct qeth_query_card_info *card_info;
-
- QETH_CARD_TEXT(card, 2, "qcrdincb");
- if (qeth_setadpparms_inspect_rc(cmd))
- return -EIO;
-
- card_info = &cmd->data.setadapterparms.data.card_info;
- netdev_dbg(card->dev,
- "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
- card_info->card_type, card_info->port_mode,
- card_info->port_speed);
-
- switch (card_info->port_mode) {
- case CARD_INFO_PORTM_FULLDUPLEX:
- link_info->duplex = DUPLEX_FULL;
- break;
- case CARD_INFO_PORTM_HALFDUPLEX:
- link_info->duplex = DUPLEX_HALF;
- break;
- default:
- link_info->duplex = DUPLEX_UNKNOWN;
- }
-
- switch (card_info->card_type) {
- case CARD_INFO_TYPE_1G_COPPER_A:
- case CARD_INFO_TYPE_1G_COPPER_B:
- link_info->speed = SPEED_1000;
- link_info->port = PORT_TP;
- break;
- case CARD_INFO_TYPE_1G_FIBRE_A:
- case CARD_INFO_TYPE_1G_FIBRE_B:
- link_info->speed = SPEED_1000;
- link_info->port = PORT_FIBRE;
- break;
- case CARD_INFO_TYPE_10G_FIBRE_A:
- case CARD_INFO_TYPE_10G_FIBRE_B:
- link_info->speed = SPEED_10000;
- link_info->port = PORT_FIBRE;
- break;
- default:
- switch (card_info->port_speed) {
- case CARD_INFO_PORTS_10M:
- link_info->speed = SPEED_10;
- break;
- case CARD_INFO_PORTS_100M:
- link_info->speed = SPEED_100;
- break;
- case CARD_INFO_PORTS_1G:
- link_info->speed = SPEED_1000;
- break;
- case CARD_INFO_PORTS_10G:
- link_info->speed = SPEED_10000;
- break;
- case CARD_INFO_PORTS_25G:
- link_info->speed = SPEED_25000;
- break;
- default:
- link_info->speed = SPEED_UNKNOWN;
- }
-
- link_info->port = PORT_OTHER;
- }
-
- return 0;
-}
-
-int qeth_query_card_info(struct qeth_card *card,
- struct qeth_link_info *link_info)
-{
- struct qeth_cmd_buffer *iob;
-
- QETH_CARD_TEXT(card, 2, "qcrdinfo");
- if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
- return -EOPNOTSUPP;
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
- if (!iob)
- return -ENOMEM;
-
- return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
-}
-
static int qeth_init_link_info_oat_cb(struct qeth_card *card,
struct qeth_reply *reply_priv,
unsigned long data)
@@ -4839,6 +4797,7 @@ static int qeth_init_link_info_oat_cb(struct qeth_card *card,
struct qeth_query_oat_physical_if *phys_if;
struct qeth_query_oat_reply *reply;
+ QETH_CARD_TEXT(card, 2, "qoatincb");
if (qeth_setadpparms_inspect_rc(cmd))
return -EIO;
@@ -4918,41 +4877,7 @@ static int qeth_init_link_info_oat_cb(struct qeth_card *card,
static void qeth_init_link_info(struct qeth_card *card)
{
- card->info.link_info.duplex = DUPLEX_FULL;
-
- if (IS_IQD(card) || IS_VM_NIC(card)) {
- card->info.link_info.speed = SPEED_10000;
- card->info.link_info.port = PORT_FIBRE;
- card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
- } else {
- switch (card->info.link_type) {
- case QETH_LINK_TYPE_FAST_ETH:
- case QETH_LINK_TYPE_LANE_ETH100:
- card->info.link_info.speed = SPEED_100;
- card->info.link_info.port = PORT_TP;
- break;
- case QETH_LINK_TYPE_GBIT_ETH:
- case QETH_LINK_TYPE_LANE_ETH1000:
- card->info.link_info.speed = SPEED_1000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_10GBIT_ETH:
- card->info.link_info.speed = SPEED_10000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_25GBIT_ETH:
- card->info.link_info.speed = SPEED_25000;
- card->info.link_info.port = PORT_FIBRE;
- break;
- default:
- dev_info(&card->gdev->dev, "Unknown link type %x\n",
- card->info.link_type);
- card->info.link_info.speed = SPEED_UNKNOWN;
- card->info.link_info.port = PORT_OTHER;
- }
-
- card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
- }
+ qeth_default_link_info(card);
/* Get more accurate data via QUERY OAT: */
if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
@@ -5461,6 +5386,7 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
qeth_clear_working_pool_list(card);
qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
+ qeth_default_link_info(card);
rc = qeth_stop_channel(&card->data);
rc2 = qeth_stop_channel(&card->write);
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index b0b36b2132fe..9eba0a32e9f9 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -428,8 +428,8 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct qeth_card *card = netdev->ml_priv;
- struct qeth_link_info link_info;
+ QETH_CARD_TEXT(card, 4, "ethtglks");
cmd->base.speed = card->info.link_info.speed;
cmd->base.duplex = card->info.link_info.duplex;
cmd->base.port = card->info.link_info.port;
@@ -439,16 +439,6 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
- /* Check if we can obtain more accurate information. */
- if (!qeth_query_card_info(card, &link_info)) {
- if (link_info.speed != SPEED_UNKNOWN)
- cmd->base.speed = link_info.speed;
- if (link_info.duplex != DUPLEX_UNKNOWN)
- cmd->base.duplex = link_info.duplex;
- if (link_info.port != PORT_OTHER)
- cmd->base.port = link_info.port;
- }
-
qeth_set_ethtool_link_modes(cmd, card->info.link_info.link_mode);
return 0;
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
index da55133da8fe..15c25fefe91a 100644
--- a/drivers/s390/scsi/zfcp_diag.h
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -2,7 +2,7 @@
/*
* zfcp device driver
*
- * Definitions for handling diagnostics in the the zfcp device driver.
+ * Definitions for handling diagnostics in the zfcp device driver.
*
* Copyright IBM Corp. 2018, 2020
*/
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 511bf8e0a436..b61acbb09be3 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -145,27 +145,33 @@ void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
+ int ret = -EIO;
+
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
- if (zfcp_fsf_open_wka_port(wka_port))
+ if (zfcp_fsf_open_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
+ goto out;
+ }
}
- mutex_unlock(&wka_port->mutex);
-
- wait_event(wka_port->completion_wq,
+ wait_event(wka_port->opened,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
- return 0;
+ ret = 0;
+ goto out;
}
- return -EIO;
+out:
+ mutex_unlock(&wka_port->mutex);
+ return ret;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
@@ -181,9 +187,12 @@ static void zfcp_fc_wka_port_offline(struct work_struct *work)
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
+ /* could not even send request, nothing to wait for */
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ goto out;
}
+ wait_event(wka_port->closed,
+ wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
out:
mutex_unlock(&wka_port->mutex);
}
@@ -193,13 +202,15 @@ static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
- schedule_delayed_work(&wka_port->work, HZ / 100);
+ queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work,
+ msecs_to_jiffies(10));
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
- init_waitqueue_head(&wka_port->completion_wq);
+ init_waitqueue_head(&wka_port->opened);
+ init_waitqueue_head(&wka_port->closed);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 8aaf409ce9cb..97755407ce1b 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -185,7 +185,8 @@ enum zfcp_fc_wka_status {
/**
* struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
* @adapter: Pointer to adapter structure this WKA port belongs to
- * @completion_wq: Wait for completion of open/close command
+ * @opened: Wait for completion of open command
+ * @closed: Wait for completion of close command
* @status: Current status of WKA port
* @refcount: Reference count to keep port open as long as it is in use
* @d_id: FC destination id or well-known-address
@@ -195,7 +196,8 @@ enum zfcp_fc_wka_status {
*/
struct zfcp_fc_wka_port {
struct zfcp_adapter *adapter;
- wait_queue_head_t completion_wq;
+ wait_queue_head_t opened;
+ wait_queue_head_t closed;
enum zfcp_fc_wka_status status;
atomic_t refcount;
u32 d_id;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4f1e4385ce58..19223b075568 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1907,7 +1907,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
out:
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->opened);
}
/**
@@ -1966,7 +1966,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
}
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
- wake_up(&wka_port->completion_wq);
+ wake_up(&wka_port->closed);
}
/**
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index dbf3e50444e6..cb67fa80fb12 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -672,7 +672,7 @@ ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n",
ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n",
atomic_read(&zfcp_sdev->status));
-struct attribute *zfcp_sdev_attrs[] = {
+static struct attribute *zfcp_sdev_attrs[] = {
&dev_attr_fcp_lun.attr,
&dev_attr_wwpn.attr,
&dev_attr_hba_id.attr,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index aa96f67dd0b1..a10dbe632ef9 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -532,6 +532,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = -ENOMEM;
goto out_err;
}
+
+ vq->num_max = info->num;
+
/* it may have been reduced */
info->num = virtqueue_get_vring_size(vq);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index a897c8f914cf..f2abffce2659 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2515,12 +2515,26 @@ static int blogic_resultcode(struct blogic_adapter *adapter,
return (hoststatus << 16) | tgt_status;
}
+/*
+ * turn the dma address from an inbox into a ccb pointer
+ * This is rather inefficient.
+ */
+static struct blogic_ccb *
+blogic_inbox_to_ccb(struct blogic_adapter *adapter, struct blogic_inbox *inbox)
+{
+ struct blogic_ccb *ccb;
+
+ for (ccb = adapter->all_ccbs; ccb; ccb = ccb->next_all)
+ if (inbox->ccb == ccb->dma_handle)
+ break;
+
+ return ccb;
+}
/*
blogic_scan_inbox scans the Incoming Mailboxes saving any
Incoming Mailbox entries for completion processing.
*/
-
static void blogic_scan_inbox(struct blogic_adapter *adapter)
{
/*
@@ -2540,17 +2554,14 @@ static void blogic_scan_inbox(struct blogic_adapter *adapter)
enum blogic_cmplt_code comp_code;
while ((comp_code = next_inbox->comp_code) != BLOGIC_INBOX_FREE) {
- /*
- We are only allowed to do this because we limit our
- architectures we run on to machines where bus_to_virt(
- actually works. There *needs* to be a dma_addr_to_virt()
- in the new PCI DMA mapping interface to replace
- bus_to_virt() or else this code is going to become very
- innefficient.
- */
- struct blogic_ccb *ccb =
- (struct blogic_ccb *) bus_to_virt(next_inbox->ccb);
- if (comp_code != BLOGIC_CMD_NOTFOUND) {
+ struct blogic_ccb *ccb = blogic_inbox_to_ccb(adapter, next_inbox);
+ if (!ccb) {
+ /*
+ * This should never happen, unless the CCB list is
+ * corrupted in memory.
+ */
+ blogic_warn("Could not find CCB for dma address %x\n", adapter, next_inbox->ccb);
+ } else if (comp_code != BLOGIC_CMD_NOTFOUND) {
if (ccb->status == BLOGIC_CCB_ACTIVE ||
ccb->status == BLOGIC_CCB_RESET) {
/*
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 90253208a72f..3d9c56ac8224 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1712,7 +1712,7 @@ static unsigned char FlashPoint_InterruptPending(void *pCurrCard)
static int FlashPoint_HandleInterrupt(void *pcard)
{
struct sccb *currSCCB;
- unsigned char thisCard, result, bm_status, bm_int_st;
+ unsigned char thisCard, result, bm_status;
unsigned short hp_int;
unsigned char i, target;
struct sccb_card *pCurrCard = pcard;
@@ -1723,7 +1723,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
MDISABLE_INT(ioport);
- if ((bm_int_st = RD_HARPOON(ioport + hp_int_status)) & EXT_STATUS_ON)
+ if (RD_HARPOON(ioport + hp_int_status) & EXT_STATUS_ON)
bm_status = RD_HARPOON(ioport + hp_ext_status) &
(unsigned char)BAD_EXT_STATUS;
else
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a9fe5152addd..955cb69a5418 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -458,17 +458,6 @@ config SCSI_MVUMI
To compile this driver as a module, choose M here: the
module will be called mvumi.
-config SCSI_DPT_I2O
- tristate "Adaptec I2O RAID support "
- depends on SCSI && PCI && VIRT_TO_BUS
- help
- This driver supports all of Adaptec's I2O based RAID controllers as
- well as the DPT SmartRaid V cards. This is an Adaptec maintained
- driver by Deanna Bonds. See <file:Documentation/scsi/dpti.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called dpt_i2o.
-
config SCSI_ADVANSYS
tristate "AdvanSys SCSI support"
depends on SCSI
@@ -513,7 +502,7 @@ config SCSI_HPTIOP
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
- depends on PCI && SCSI && VIRT_TO_BUS
+ depends on PCI && SCSI
help
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2ad3bc052531..f055bfd54a68 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -63,7 +63,6 @@ obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
-obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index cf703a1ecdda..74312400468b 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -24,8 +24,11 @@
struct a2091_hostdata {
struct WD33C93_hostdata wh;
struct a2091_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
static irqreturn_t a2091_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -45,15 +48,31 @@ static irqreturn_t a2091_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct a2091_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a2091_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(scsi_pointer->ptr);
+ dma_addr_t addr;
+
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
/* don't allow DMA if the physical address is bad */
if (addr & A2091_XFER_MASK) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
+
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
@@ -64,8 +83,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- /* get the physical address of the bounce buffer */
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+
+ /* will flush/invalidate cache for us */
+ addr = dma_map_single(hdata->dev, wh->dma_bounce_buffer,
+ wh->dma_bounce_len, DMA_DIR(dir_in));
+ /* can't map buffer; use PIO */
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map bounce buffer %p\n",
+ wh->dma_bounce_buffer);
+ return 1;
+ }
/* the bounce buffer may not be in the first 16M of physmem */
if (addr & A2091_XFER_MASK) {
@@ -76,11 +108,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
- scsi_pointer->this_residual);
- }
+ scsi_pointer->dma_handle = addr;
}
/* setup dma direction */
@@ -95,13 +123,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, scsi_pointer->this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, scsi_pointer->this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
+
/* start DMA */
regs->ST_DMA = 1;
@@ -142,6 +165,10 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* restore the CONTROL bits (minus the direction flag) */
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir)
@@ -178,6 +205,11 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wd33c93_regs wdregs;
struct a2091_hostdata *hdata;
+ if (dma_set_mask_and_coherent(&z->dev, DMA_BIT_MASK(24))) {
+ dev_warn(&z->dev, "cannot use 24 bit DMA\n");
+ return -ENODEV;
+ }
+
if (!request_mem_region(z->resource.start, 256, "wd33c93"))
return -EBUSY;
@@ -198,6 +230,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
+ hdata->dev = &z->dev;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index dd161885eed1..2c5cb1a02e86 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/page.h>
@@ -25,8 +26,11 @@
struct a3000_hostdata {
struct WD33C93_hostdata wh;
struct a3000_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
static irqreturn_t a3000_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -49,20 +53,38 @@ static irqreturn_t a3000_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct a3000_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a3000_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(scsi_pointer->ptr);
+ dma_addr_t addr;
+
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
/*
* if the physical address has the wrong alignment, or if
* physical address is bad, or if it is a write and at the
* end of a physical memory chunk, then allocate a bounce
* buffer
+ * MSch 20220629 - only wrong alignment tested - bounce
+ * buffer returned by kmalloc is guaranteed to be aligned
*/
if (addr & A3000_XFER_MASK) {
+ WARN_ONCE(1, "Invalid alignment for DMA!");
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
@@ -70,6 +92,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* can't allocate memory; use PIO */
if (!wh->dma_bounce_buffer) {
wh->dma_bounce_len = 0;
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
return 1;
}
@@ -79,7 +102,15 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
scsi_pointer->this_residual);
}
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev,
+ "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
}
/* setup dma direction */
@@ -94,13 +125,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, scsi_pointer->this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, scsi_pointer->this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
/* start DMA */
mb(); /* make sure setup is completed */
@@ -151,6 +176,10 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
mb(); /* make sure CNTR is updated before next IO */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (SCpnt) {
@@ -193,6 +222,11 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wd33c93_regs wdregs;
struct a3000_hostdata *hdata;
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_warn(&pdev->dev, "cannot use 32 bit DMA\n");
+ return -ENODEV;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
@@ -216,6 +250,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
+ hdata->dev = &pdev->dev;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 81462f4ddb90..4d4cb47b3846 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1050,7 +1050,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
vpdpage83data.type1.productid));
/* Convert to ascii based serial number.
- * The LSB is the the end.
+ * The LSB is the end.
*/
for (i = 0; i < 8; i++) {
u8 temp =
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 73506a459bf8..91d196f26b76 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -159,7 +159,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
flags |= OPEN_REQUIRED;
if ((dev->dev_type == SAS_SATA_DEV) ||
(dev->tproto & SAS_PROTOCOL_STP)) {
- struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
+ struct smp_rps_resp *rps_resp = &dev->sata_dev.rps_resp;
if (rps_resp->frame_type == SMP_RESPONSE &&
rps_resp->function == SMP_REPORT_PHY_SATA &&
rps_resp->result == SMP_RESP_FUNC_ACC) {
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 3bb0adefbe06..50a577ac3bb4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -231,6 +231,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
+completion_check:
/* check if we raced, task just got cleaned up under us */
spin_lock_bh(&session->back_lock);
if (!abrt_task || !abrt_task->sc) {
@@ -238,7 +239,13 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(abrt_task);
+ if (!iscsi_get_task(abrt_task)) {
+ spin_unlock(&session->back_lock);
+ /* We are just about to call iscsi_free_task so wait for it. */
+ udelay(5);
+ goto completion_check;
+ }
+
abrt_io_task = abrt_task->dd_data;
conn = abrt_task->conn;
beiscsi_conn = conn->dd_data;
@@ -323,7 +330,15 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ /*
+ * The task has completed in the driver and is
+ * completing in libiscsi. Just ignore it here. When we
+ * call iscsi_eh_device_reset, it will wait for us.
+ */
+ continue;
+ }
+
io_task = task->dd_data;
/* mark WRB invalid which have been not processed by FW yet */
if (is_chip_be2_be3r(phba)) {
@@ -5745,7 +5760,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
cancel_work_sync(&phba->sess_work);
beiscsi_iface_destroy_default(phba);
- iscsi_host_remove(phba->shost);
+ iscsi_host_remove(phba->shost, false);
beiscsi_disable_port(phba, 1);
/* after cancelling boot_work */
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 15fbd09baa94..a3c800e04a2e 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -909,7 +909,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
{
struct Scsi_Host *shost = hba->shost;
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 908854869864..7ab29eaec6f3 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -63,7 +63,7 @@ static int verbose = 1;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose,"be verbose (default: on)");
-static int debug = 0;
+static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"enable/disable debug messages, also prints more "
"detailed sense codes on scsi errors (default: off)");
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 4365d52c6430..af281e271f88 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -328,7 +328,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
chba = cdev->hbas[i];
if (chba) {
cdev->hbas[i] = NULL;
- iscsi_host_remove(chba->shost);
+ iscsi_host_remove(chba->shost, false);
pci_dev_put(cdev->pdev);
iscsi_host_free(chba->shost);
}
@@ -1455,7 +1455,7 @@ void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
if (conn) {
log_debug(1 << CXGBI_DBG_SOCK,
"csk 0x%p, cid %d.\n", csk, conn->id);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
}
EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 244fc27215dc..631eda2d467e 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -16,6 +16,7 @@
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <asm/xive.h>
#include <misc/ocxl.h>
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
deleted file mode 100644
index e1fbbf55c09d..000000000000
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ /dev/null
@@ -1,441 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _SCSI_I2O_H
-#define _SCSI_I2O_H
-
-/* I2O kernel space accessible structures/APIs
- *
- * (c) Copyright 1999, 2000 Red Hat Software
- *
- *************************************************************************
- *
- * This header file defined the I2O APIs/structures for use by
- * the I2O kernel modules.
- */
-
-#ifdef __KERNEL__ /* This file to be included by kernel only */
-
-#include <linux/i2o-dev.h>
-
-#include <linux/notifier.h>
-#include <linux/atomic.h>
-
-
-/*
- * Tunable parameters first
- */
-
-/* How many different OSM's are we allowing */
-#define MAX_I2O_MODULES 64
-
-#define I2O_EVT_CAPABILITY_OTHER 0x01
-#define I2O_EVT_CAPABILITY_CHANGED 0x02
-
-#define I2O_EVT_SENSOR_STATE_CHANGED 0x01
-
-//#ifdef __KERNEL__ /* ioctl stuff only thing exported to users */
-
-#define I2O_MAX_MANAGERS 4
-
-/*
- * I2O Interface Objects
- */
-
-#include <linux/wait.h>
-typedef wait_queue_head_t adpt_wait_queue_head_t;
-#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
-typedef wait_queue_entry_t adpt_wait_queue_entry_t;
-
-/*
- * message structures
- */
-
-struct i2o_message
-{
- u8 version_offset;
- u8 flags;
- u16 size;
- u32 target_tid:12;
- u32 init_tid:12;
- u32 function:8;
- u32 initiator_context;
- /* List follows */
-};
-
-struct adpt_device;
-struct _adpt_hba;
-struct i2o_device
-{
- struct i2o_device *next; /* Chain */
- struct i2o_device *prev;
-
- char dev_name[8]; /* linux /dev name if available */
- i2o_lct_entry lct_data;/* Device LCT information */
- u32 flags;
- struct proc_dir_entry* proc_entry; /* /proc dir */
- struct adpt_device *owner;
- struct _adpt_hba *controller; /* Controlling IOP */
-};
-
-/*
- * Each I2O controller has one of these objects
- */
-
-struct i2o_controller
-{
- char name[16];
- int unit;
- int type;
- int enabled;
-
- struct notifier_block *event_notifer; /* Events */
- atomic_t users;
- struct i2o_device *devices; /* I2O device chain */
- struct i2o_controller *next; /* Controller chain */
-
-};
-
-/*
- * I2O System table entry
- */
-struct i2o_sys_tbl_entry
-{
- u16 org_id;
- u16 reserved1;
- u32 iop_id:12;
- u32 reserved2:20;
- u16 seg_num:12;
- u16 i2o_version:4;
- u8 iop_state;
- u8 msg_type;
- u16 frame_size;
- u16 reserved3;
- u32 last_changed;
- u32 iop_capabilities;
- u32 inbound_low;
- u32 inbound_high;
-};
-
-struct i2o_sys_tbl
-{
- u8 num_entries;
- u8 version;
- u16 reserved1;
- u32 change_ind;
- u32 reserved2;
- u32 reserved3;
- struct i2o_sys_tbl_entry iops[];
-};
-
-/*
- * I2O classes / subclasses
- */
-
-/* Class ID and Code Assignments
- * (LCT.ClassID.Version field)
- */
-#define I2O_CLASS_VERSION_10 0x00
-#define I2O_CLASS_VERSION_11 0x01
-
-/* Class code names
- * (from v1.5 Table 6-1 Class Code Assignments.)
- */
-
-#define I2O_CLASS_EXECUTIVE 0x000
-#define I2O_CLASS_DDM 0x001
-#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
-#define I2O_CLASS_SEQUENTIAL_STORAGE 0x011
-#define I2O_CLASS_LAN 0x020
-#define I2O_CLASS_WAN 0x030
-#define I2O_CLASS_FIBRE_CHANNEL_PORT 0x040
-#define I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL 0x041
-#define I2O_CLASS_SCSI_PERIPHERAL 0x051
-#define I2O_CLASS_ATE_PORT 0x060
-#define I2O_CLASS_ATE_PERIPHERAL 0x061
-#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
-#define I2O_CLASS_FLOPPY_DEVICE 0x071
-#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
-#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
-#define I2O_CLASS_PEER_TRANSPORT 0x091
-
-/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes
- */
-
-#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
-
-/* Subclasses
- */
-
-#define I2O_SUBCLASS_i960 0x001
-#define I2O_SUBCLASS_HDM 0x020
-#define I2O_SUBCLASS_ISM 0x021
-
-/* Operation functions */
-
-#define I2O_PARAMS_FIELD_GET 0x0001
-#define I2O_PARAMS_LIST_GET 0x0002
-#define I2O_PARAMS_MORE_GET 0x0003
-#define I2O_PARAMS_SIZE_GET 0x0004
-#define I2O_PARAMS_TABLE_GET 0x0005
-#define I2O_PARAMS_FIELD_SET 0x0006
-#define I2O_PARAMS_LIST_SET 0x0007
-#define I2O_PARAMS_ROW_ADD 0x0008
-#define I2O_PARAMS_ROW_DELETE 0x0009
-#define I2O_PARAMS_TABLE_CLEAR 0x000A
-
-/*
- * I2O serial number conventions / formats
- * (circa v1.5)
- */
-
-#define I2O_SNFORMAT_UNKNOWN 0
-#define I2O_SNFORMAT_BINARY 1
-#define I2O_SNFORMAT_ASCII 2
-#define I2O_SNFORMAT_UNICODE 3
-#define I2O_SNFORMAT_LAN48_MAC 4
-#define I2O_SNFORMAT_WAN 5
-
-/* Plus new in v2.0 (Yellowstone pdf doc)
- */
-
-#define I2O_SNFORMAT_LAN64_MAC 6
-#define I2O_SNFORMAT_DDM 7
-#define I2O_SNFORMAT_IEEE_REG64 8
-#define I2O_SNFORMAT_IEEE_REG128 9
-#define I2O_SNFORMAT_UNKNOWN2 0xff
-
-/* Transaction Reply Lists (TRL) Control Word structure */
-
-#define TRL_SINGLE_FIXED_LENGTH 0x00
-#define TRL_SINGLE_VARIABLE_LENGTH 0x40
-#define TRL_MULTIPLE_FIXED_LENGTH 0x80
-
-/*
- * Messaging API values
- */
-
-#define I2O_CMD_ADAPTER_ASSIGN 0xB3
-#define I2O_CMD_ADAPTER_READ 0xB2
-#define I2O_CMD_ADAPTER_RELEASE 0xB5
-#define I2O_CMD_BIOS_INFO_SET 0xA5
-#define I2O_CMD_BOOT_DEVICE_SET 0xA7
-#define I2O_CMD_CONFIG_VALIDATE 0xBB
-#define I2O_CMD_CONN_SETUP 0xCA
-#define I2O_CMD_DDM_DESTROY 0xB1
-#define I2O_CMD_DDM_ENABLE 0xD5
-#define I2O_CMD_DDM_QUIESCE 0xC7
-#define I2O_CMD_DDM_RESET 0xD9
-#define I2O_CMD_DDM_SUSPEND 0xAF
-#define I2O_CMD_DEVICE_ASSIGN 0xB7
-#define I2O_CMD_DEVICE_RELEASE 0xB9
-#define I2O_CMD_HRT_GET 0xA8
-#define I2O_CMD_ADAPTER_CLEAR 0xBE
-#define I2O_CMD_ADAPTER_CONNECT 0xC9
-#define I2O_CMD_ADAPTER_RESET 0xBD
-#define I2O_CMD_LCT_NOTIFY 0xA2
-#define I2O_CMD_OUTBOUND_INIT 0xA1
-#define I2O_CMD_PATH_ENABLE 0xD3
-#define I2O_CMD_PATH_QUIESCE 0xC5
-#define I2O_CMD_PATH_RESET 0xD7
-#define I2O_CMD_STATIC_MF_CREATE 0xDD
-#define I2O_CMD_STATIC_MF_RELEASE 0xDF
-#define I2O_CMD_STATUS_GET 0xA0
-#define I2O_CMD_SW_DOWNLOAD 0xA9
-#define I2O_CMD_SW_UPLOAD 0xAB
-#define I2O_CMD_SW_REMOVE 0xAD
-#define I2O_CMD_SYS_ENABLE 0xD1
-#define I2O_CMD_SYS_MODIFY 0xC1
-#define I2O_CMD_SYS_QUIESCE 0xC3
-#define I2O_CMD_SYS_TAB_SET 0xA3
-
-#define I2O_CMD_UTIL_NOP 0x00
-#define I2O_CMD_UTIL_ABORT 0x01
-#define I2O_CMD_UTIL_CLAIM 0x09
-#define I2O_CMD_UTIL_RELEASE 0x0B
-#define I2O_CMD_UTIL_PARAMS_GET 0x06
-#define I2O_CMD_UTIL_PARAMS_SET 0x05
-#define I2O_CMD_UTIL_EVT_REGISTER 0x13
-#define I2O_CMD_UTIL_EVT_ACK 0x14
-#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
-#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
-#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
-#define I2O_CMD_UTIL_LOCK 0x17
-#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
-#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
-
-#define I2O_CMD_SCSI_EXEC 0x81
-#define I2O_CMD_SCSI_ABORT 0x83
-#define I2O_CMD_SCSI_BUSRESET 0x27
-
-#define I2O_CMD_BLOCK_READ 0x30
-#define I2O_CMD_BLOCK_WRITE 0x31
-#define I2O_CMD_BLOCK_CFLUSH 0x37
-#define I2O_CMD_BLOCK_MLOCK 0x49
-#define I2O_CMD_BLOCK_MUNLOCK 0x4B
-#define I2O_CMD_BLOCK_MMOUNT 0x41
-#define I2O_CMD_BLOCK_MEJECT 0x43
-
-#define I2O_PRIVATE_MSG 0xFF
-
-/*
- * Init Outbound Q status
- */
-
-#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01
-#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02
-#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03
-#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04
-
-/*
- * I2O Get Status State values
- */
-
-#define ADAPTER_STATE_INITIALIZING 0x01
-#define ADAPTER_STATE_RESET 0x02
-#define ADAPTER_STATE_HOLD 0x04
-#define ADAPTER_STATE_READY 0x05
-#define ADAPTER_STATE_OPERATIONAL 0x08
-#define ADAPTER_STATE_FAILED 0x10
-#define ADAPTER_STATE_FAULTED 0x11
-
-/* I2O API function return values */
-
-#define I2O_RTN_NO_ERROR 0
-#define I2O_RTN_NOT_INIT 1
-#define I2O_RTN_FREE_Q_EMPTY 2
-#define I2O_RTN_TCB_ERROR 3
-#define I2O_RTN_TRANSACTION_ERROR 4
-#define I2O_RTN_ADAPTER_ALREADY_INIT 5
-#define I2O_RTN_MALLOC_ERROR 6
-#define I2O_RTN_ADPTR_NOT_REGISTERED 7
-#define I2O_RTN_MSG_REPLY_TIMEOUT 8
-#define I2O_RTN_NO_STATUS 9
-#define I2O_RTN_NO_FIRM_VER 10
-#define I2O_RTN_NO_LINK_SPEED 11
-
-/* Reply message status defines for all messages */
-
-#define I2O_REPLY_STATUS_SUCCESS 0x00
-#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
-#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
-#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
-#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
-#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
-#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
-#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
-#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
-#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
-#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
-#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
-
-/* Status codes and Error Information for Parameter functions */
-
-#define I2O_PARAMS_STATUS_SUCCESS 0x00
-#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
-#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
-#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
-#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
-#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
-#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
-#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
-#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
-#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
-#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
-#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
-#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
-#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
-#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
-#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
-#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
-
-/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
- * messages: Table 3-2 Detailed Status Codes.*/
-
-#define I2O_DSC_SUCCESS 0x0000
-#define I2O_DSC_BAD_KEY 0x0002
-#define I2O_DSC_TCL_ERROR 0x0003
-#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
-#define I2O_DSC_NO_SUCH_PAGE 0x0005
-#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
-#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
-#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
-#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
-#define I2O_DSC_DEVICE_LOCKED 0x000B
-#define I2O_DSC_DEVICE_RESET 0x000C
-#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
-#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
-#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
-#define I2O_DSC_INVALID_OFFSET 0x0010
-#define I2O_DSC_INVALID_PARAMETER 0x0011
-#define I2O_DSC_INVALID_REQUEST 0x0012
-#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
-#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
-#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
-#define I2O_DSC_MISSING_PARAMETER 0x0016
-#define I2O_DSC_TIMEOUT 0x0017
-#define I2O_DSC_UNKNOWN_ERROR 0x0018
-#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
-#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
-#define I2O_DSC_DEVICE_BUSY 0x001B
-#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
-
-/* Device Claim Types */
-#define I2O_CLAIM_PRIMARY 0x01000000
-#define I2O_CLAIM_MANAGEMENT 0x02000000
-#define I2O_CLAIM_AUTHORIZED 0x03000000
-#define I2O_CLAIM_SECONDARY 0x04000000
-
-/* Message header defines for VersionOffset */
-#define I2OVER15 0x0001
-#define I2OVER20 0x0002
-/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */
-#define I2OVERSION I2OVER15
-#define SGL_OFFSET_0 I2OVERSION
-#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
-#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
-#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
-#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
-#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
-#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
-#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
-#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
-
-#define TRL_OFFSET_5 (0x0050 | I2OVERSION)
-#define TRL_OFFSET_6 (0x0060 | I2OVERSION)
-
- /* msg header defines for MsgFlags */
-#define MSG_STATIC 0x0100
-#define MSG_64BIT_CNTXT 0x0200
-#define MSG_MULTI_TRANS 0x1000
-#define MSG_FAIL 0x2000
-#define MSG_LAST 0x4000
-#define MSG_REPLY 0x8000
-
- /* minimum size msg */
-#define THREE_WORD_MSG_SIZE 0x00030000
-#define FOUR_WORD_MSG_SIZE 0x00040000
-#define FIVE_WORD_MSG_SIZE 0x00050000
-#define SIX_WORD_MSG_SIZE 0x00060000
-#define SEVEN_WORD_MSG_SIZE 0x00070000
-#define EIGHT_WORD_MSG_SIZE 0x00080000
-#define NINE_WORD_MSG_SIZE 0x00090000
-#define TEN_WORD_MSG_SIZE 0x000A0000
-#define I2O_MESSAGE_SIZE(x) ((x)<<16)
-
-
-/* Special TID Assignments */
-
-#define ADAPTER_TID 0
-#define HOST_TID 1
-
-#define MSG_FRAME_SIZE 128
-#define NMBR_MSG_FRAMES 128
-
-#define MSG_POOL_SIZE 16384
-
-#define I2O_POST_WAIT_OK 0
-#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
-
-
-#endif /* __KERNEL__ */
-
-#endif /* _SCSI_I2O_H */
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
deleted file mode 100644
index 25e9251f8c78..000000000000
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/***************************************************************************
- dpti_ioctl.h - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2001 by Adaptec
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-
-/***************************************************************************
- * This file is generated from osd_unix.h *
- * *************************************************************************/
-
-#ifndef _dpti_ioctl_h
-#define _dpti_ioctl_h
-
-// IOCTL interface commands
-
-#ifndef _IOWR
-# define _IOWR(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IOW
-# define _IOW(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IOR
-# define _IOR(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IO
-# define _IO(x,y) (((x)<<8)|y)
-#endif
-/* EATA PassThrough Command */
-#define EATAUSRCMD _IOWR('D',65,EATA_CP)
-/* Set Debug Level If Enabled */
-#define DPT_DEBUG _IOW('D',66,int)
-/* Get Signature Structure */
-#define DPT_SIGNATURE _IOR('D',67,dpt_sig_S)
-#if defined __bsdi__
-#define DPT_SIGNATURE_PACKED _IOR('D',67,dpt_sig_S_Packed)
-#endif
-/* Get Number Of DPT Adapters */
-#define DPT_NUMCTRLS _IOR('D',68,int)
-/* Get Adapter Info Structure */
-#define DPT_CTRLINFO _IOR('D',69,CtrlInfo)
-/* Get Statistics If Enabled */
-#define DPT_STATINFO _IO('D',70)
-/* Clear Stats If Enabled */
-#define DPT_CLRSTAT _IO('D',71)
-/* Get System Info Structure */
-#define DPT_SYSINFO _IOR('D',72,sysInfo_S)
-/* Set Timeout Value */
-#define DPT_TIMEOUT _IO('D',73)
-/* Get config Data */
-#define DPT_CONFIG _IO('D',74)
-/* Get Blink LED Code */
-#define DPT_BLINKLED _IOR('D',75,int)
-/* Get Statistical information (if available) */
-#define DPT_STATS_INFO _IOR('D',80,STATS_DATA)
-/* Clear the statistical information */
-#define DPT_STATS_CLEAR _IO('D',81)
-/* Get Performance metrics */
-#define DPT_PERF_INFO _IOR('D',82,dpt_perf_t)
-/* Send an I2O command */
-#define I2OUSRCMD _IO('D',76)
-/* Inform driver to re-acquire LCT information */
-#define I2ORESCANCMD _IO('D',77)
-/* Inform driver to reset adapter */
-#define I2ORESETCMD _IO('D',78)
-/* See if the target is mounted */
-#define DPT_TARGET_BUSY _IOR('D',79, TARGET_BUSY_T)
-
-
- /* Structure Returned From Get Controller Info */
-
-typedef struct {
- uCHAR state; /* Operational state */
- uCHAR id; /* Host adapter SCSI id */
- int vect; /* Interrupt vector number */
- int base; /* Base I/O address */
- int njobs; /* # of jobs sent to HA */
- int qdepth; /* Controller queue depth. */
- int wakebase; /* mpx wakeup base index. */
- uINT SGsize; /* Scatter/Gather list size. */
- unsigned heads; /* heads for drives on cntlr. */
- unsigned sectors; /* sectors for drives on cntlr. */
- uCHAR do_drive32; /* Flag for Above 16 MB Ability */
- uCHAR BusQuiet; /* SCSI Bus Quiet Flag */
- char idPAL[4]; /* 4 Bytes Of The ID Pal */
- uCHAR primary; /* 1 For Primary, 0 For Secondary */
- uCHAR eataVersion; /* EATA Version */
- uINT cpLength; /* EATA Command Packet Length */
- uINT spLength; /* EATA Status Packet Length */
- uCHAR drqNum; /* DRQ Index (0,5,6,7) */
- uCHAR flag1; /* EATA Flags 1 (Byte 9) */
- uCHAR flag2; /* EATA Flags 2 (Byte 30) */
-} CtrlInfo;
-
-typedef struct {
- uSHORT length; // Remaining length of this
- uSHORT drvrHBAnum; // Relative HBA # used by the driver
- uINT baseAddr; // Base I/O address
- uSHORT blinkState; // Blink LED state (0=Not in blink LED)
- uCHAR pciBusNum; // PCI Bus # (Optional)
- uCHAR pciDeviceNum; // PCI Device # (Optional)
- uSHORT hbaFlags; // Miscellaneous HBA flags
- uSHORT Interrupt; // Interrupt set for this device.
-# if (defined(_DPT_ARC))
- uINT baseLength;
- ADAPTER_OBJECT *AdapterObject;
- LARGE_INTEGER DmaLogicalAddress;
- PVOID DmaVirtualAddress;
- LARGE_INTEGER ReplyLogicalAddress;
- PVOID ReplyVirtualAddress;
-# else
- uINT reserved1; // Reserved for future expansion
- uINT reserved2; // Reserved for future expansion
- uINT reserved3; // Reserved for future expansion
-# endif
-} drvrHBAinfo_S;
-
-typedef struct TARGET_BUSY
-{
- uLONG channel;
- uLONG id;
- uLONG lun;
- uLONG isBusy;
-} TARGET_BUSY_T;
-
-#endif
-
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
deleted file mode 100644
index a6644b332b53..000000000000
--- a/drivers/scsi/dpt/dptsig.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/* BSDI dptsig.h,v 1.7 1998/06/03 19:15:00 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __DPTSIG_H_
-#define __DPTSIG_H_
-#ifdef _SINIX_ADDON
-#include "dpt.h"
-#endif
-/* DPT SIGNATURE SPEC AND HEADER FILE */
-/* Signature Version 1 (sorry no 'A') */
-
-/* to make sure we are talking the same size under all OS's */
-typedef unsigned char sigBYTE;
-typedef unsigned short sigWORD;
-typedef unsigned int sigINT;
-
-/*
- * use sigWORDLittleEndian for:
- * dsCapabilities
- * dsDeviceSupp
- * dsAdapterSupp
- * dsApplication
- * use sigLONGLittleEndian for:
- * dsOS
- * so that the sig can be standardised to Little Endian
- */
-#if (defined(_DPT_BIG_ENDIAN))
-# define sigWORDLittleEndian(x) ((((x)&0xFF)<<8)|(((x)>>8)&0xFF))
-# define sigLONGLittleEndian(x) \
- ((((x)&0xFF)<<24) | \
- (((x)&0xFF00)<<8) | \
- (((x)&0xFF0000L)>>8) | \
- (((x)&0xFF000000L)>>24))
-#else
-# define sigWORDLittleEndian(x) (x)
-# define sigLONGLittleEndian(x) (x)
-#endif
-
-/* must make sure the structure is not word or double-word aligned */
-/* --------------------------------------------------------------- */
-/* Borland will ignore the following pragma: */
-/* Word alignment is OFF by default. If in the, IDE make */
-/* sure that Options | Compiler | Code Generation | Word Alignment */
-/* is not checked. If using BCC, do not use the -a option. */
-
-#ifndef NO_PACK
-#if defined (_DPT_AIX)
-#pragma options align=packed
-#else
-#pragma pack(1)
-#endif /* aix */
-#endif
-/* For the Macintosh */
-#ifdef STRUCTALIGNMENTSUPPORTED
-#pragma options align=mac68k
-#endif
-
-
-/* Current Signature Version - sigBYTE dsSigVersion; */
-/* ------------------------------------------------------------------ */
-#define SIG_VERSION 1
-
-/* Processor Family - sigBYTE dsProcessorFamily; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-/* What type of processor the file is meant to run on. */
-/* This will let us know whether to read sigWORDs as high/low or low/high. */
-#define PROC_INTEL 0x00 /* Intel 80x86/ia64 */
-#define PROC_MOTOROLA 0x01 /* Motorola 68K */
-#define PROC_MIPS4000 0x02 /* MIPS RISC 4000 */
-#define PROC_ALPHA 0x03 /* DEC Alpha */
-#define PROC_POWERPC 0x04 /* IBM Power PC */
-#define PROC_i960 0x05 /* Intel i960 */
-#define PROC_ULTRASPARC 0x06 /* SPARC processor */
-
-/* Specific Minimim Processor - sigBYTE dsProcessor; FLAG BITS */
-/* ------------------------------------------------------------------ */
-/* Different bit definitions dependent on processor_family */
-
-/* PROC_INTEL: */
-#define PROC_8086 0x01 /* Intel 8086 */
-#define PROC_286 0x02 /* Intel 80286 */
-#define PROC_386 0x04 /* Intel 80386 */
-#define PROC_486 0x08 /* Intel 80486 */
-#define PROC_PENTIUM 0x10 /* Intel 586 aka P5 aka Pentium */
-#define PROC_SEXIUM 0x20 /* Intel 686 aka P6 aka Pentium Pro or MMX */
-#define PROC_IA64 0x40 /* Intel IA64 processor */
-
-/* PROC_i960: */
-#define PROC_960RX 0x01 /* Intel 80960RC/RD */
-#define PROC_960HX 0x02 /* Intel 80960HA/HD/HT */
-
-/* PROC_MOTOROLA: */
-#define PROC_68000 0x01 /* Motorola 68000 */
-#define PROC_68010 0x02 /* Motorola 68010 */
-#define PROC_68020 0x04 /* Motorola 68020 */
-#define PROC_68030 0x08 /* Motorola 68030 */
-#define PROC_68040 0x10 /* Motorola 68040 */
-
-/* PROC_POWERPC */
-#define PROC_PPC601 0x01 /* PowerPC 601 */
-#define PROC_PPC603 0x02 /* PowerPC 603 */
-#define PROC_PPC604 0x04 /* PowerPC 604 */
-
-/* PROC_MIPS4000: */
-#define PROC_R4000 0x01 /* MIPS R4000 */
-
-/* Filetype - sigBYTE dsFiletype; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-#define FT_EXECUTABLE 0 /* Executable Program */
-#define FT_SCRIPT 1 /* Script/Batch File??? */
-#define FT_HBADRVR 2 /* HBA Driver */
-#define FT_OTHERDRVR 3 /* Other Driver */
-#define FT_IFS 4 /* Installable Filesystem Driver */
-#define FT_ENGINE 5 /* DPT Engine */
-#define FT_COMPDRVR 6 /* Compressed Driver Disk */
-#define FT_LANGUAGE 7 /* Foreign Language file */
-#define FT_FIRMWARE 8 /* Downloadable or actual Firmware */
-#define FT_COMMMODL 9 /* Communications Module */
-#define FT_INT13 10 /* INT 13 style HBA Driver */
-#define FT_HELPFILE 11 /* Help file */
-#define FT_LOGGER 12 /* Event Logger */
-#define FT_INSTALL 13 /* An Install Program */
-#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */
-#define FT_RESOURCE 15 /* Storage Manager Resource File */
-#define FT_MODEM_DB 16 /* Storage Manager Modem Database */
-
-/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define FTF_DLL 0x01 /* Dynamic Link Library */
-#define FTF_NLM 0x02 /* Netware Loadable Module */
-#define FTF_OVERLAYS 0x04 /* Uses overlays */
-#define FTF_DEBUG 0x08 /* Debug version */
-#define FTF_TSR 0x10 /* TSR */
-#define FTF_SYS 0x20 /* DOS Loadable driver */
-#define FTF_PROTECTED 0x40 /* Runs in protected mode */
-#define FTF_APP_SPEC 0x80 /* Application Specific */
-#define FTF_ROM (FTF_SYS|FTF_TSR) /* Special Case */
-
-/* OEM - sigBYTE dsOEM; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-#define OEM_DPT 0 /* DPT */
-#define OEM_ATT 1 /* ATT */
-#define OEM_NEC 2 /* NEC */
-#define OEM_ALPHA 3 /* Alphatronix */
-#define OEM_AST 4 /* AST */
-#define OEM_OLIVETTI 5 /* Olivetti */
-#define OEM_SNI 6 /* Siemens/Nixdorf */
-#define OEM_SUN 7 /* SUN Microsystems */
-
-/* Operating System - sigLONG dsOS; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define OS_DOS 0x00000001 /* PC/MS-DOS */
-#define OS_WINDOWS 0x00000002 /* Microsoft Windows 3.x */
-#define OS_WINDOWS_NT 0x00000004 /* Microsoft Windows NT */
-#define OS_OS2M 0x00000008 /* OS/2 1.2.x,MS 1.3.0,IBM 1.3.x - Monolithic */
-#define OS_OS2L 0x00000010 /* Microsoft OS/2 1.301 - LADDR */
-#define OS_OS22x 0x00000020 /* IBM OS/2 2.x */
-#define OS_NW286 0x00000040 /* Novell NetWare 286 */
-#define OS_NW386 0x00000080 /* Novell NetWare 386 */
-#define OS_GEN_UNIX 0x00000100 /* Generic Unix */
-#define OS_SCO_UNIX 0x00000200 /* SCO Unix */
-#define OS_ATT_UNIX 0x00000400 /* ATT Unix */
-#define OS_UNIXWARE 0x00000800 /* USL Unix */
-#define OS_INT_UNIX 0x00001000 /* Interactive Unix */
-#define OS_SOLARIS 0x00002000 /* SunSoft Solaris */
-#define OS_QNX 0x00004000 /* QNX for Tom Moch */
-#define OS_NEXTSTEP 0x00008000 /* NeXTSTEP/OPENSTEP/MACH */
-#define OS_BANYAN 0x00010000 /* Banyan Vines */
-#define OS_OLIVETTI_UNIX 0x00020000/* Olivetti Unix */
-#define OS_MAC_OS 0x00040000 /* Mac OS */
-#define OS_WINDOWS_95 0x00080000 /* Microsoft Windows '95 */
-#define OS_NW4x 0x00100000 /* Novell Netware 4.x */
-#define OS_BSDI_UNIX 0x00200000 /* BSDi Unix BSD/OS 2.0 and up */
-#define OS_AIX_UNIX 0x00400000 /* AIX Unix */
-#define OS_FREE_BSD 0x00800000 /* FreeBSD Unix */
-#define OS_LINUX 0x01000000 /* Linux */
-#define OS_DGUX_UNIX 0x02000000 /* Data General Unix */
-#define OS_SINIX_N 0x04000000 /* SNI SINIX-N */
-#define OS_PLAN9 0x08000000 /* ATT Plan 9 */
-#define OS_TSX 0x10000000 /* SNH TSX-32 */
-
-#define OS_OTHER 0x80000000 /* Other */
-
-/* Capabilities - sigWORD dsCapabilities; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define CAP_RAID0 0x0001 /* RAID-0 */
-#define CAP_RAID1 0x0002 /* RAID-1 */
-#define CAP_RAID3 0x0004 /* RAID-3 */
-#define CAP_RAID5 0x0008 /* RAID-5 */
-#define CAP_SPAN 0x0010 /* Spanning */
-#define CAP_PASS 0x0020 /* Provides passthrough */
-#define CAP_OVERLAP 0x0040 /* Passthrough supports overlapped commands */
-#define CAP_ASPI 0x0080 /* Supports ASPI Command Requests */
-#define CAP_ABOVE16MB 0x0100 /* ISA Driver supports greater than 16MB */
-#define CAP_EXTEND 0x8000 /* Extended info appears after description */
-#ifdef SNI_MIPS
-#define CAP_CACHEMODE 0x1000 /* dpt_force_cache is set in driver */
-#endif
-
-/* Devices Supported - sigWORD dsDeviceSupp; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define DEV_DASD 0x0001 /* DASD (hard drives) */
-#define DEV_TAPE 0x0002 /* Tape drives */
-#define DEV_PRINTER 0x0004 /* Printers */
-#define DEV_PROC 0x0008 /* Processors */
-#define DEV_WORM 0x0010 /* WORM drives */
-#define DEV_CDROM 0x0020 /* CD-ROM drives */
-#define DEV_SCANNER 0x0040 /* Scanners */
-#define DEV_OPTICAL 0x0080 /* Optical Drives */
-#define DEV_JUKEBOX 0x0100 /* Jukebox */
-#define DEV_COMM 0x0200 /* Communications Devices */
-#define DEV_OTHER 0x0400 /* Other Devices */
-#define DEV_ALL 0xFFFF /* All SCSI Devices */
-
-/* Adapters Families Supported - sigWORD dsAdapterSupp; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define ADF_2001 0x0001 /* PM2001 */
-#define ADF_2012A 0x0002 /* PM2012A */
-#define ADF_PLUS_ISA 0x0004 /* PM2011,PM2021 */
-#define ADF_PLUS_EISA 0x0008 /* PM2012B,PM2022 */
-#define ADF_SC3_ISA 0x0010 /* PM2021 */
-#define ADF_SC3_EISA 0x0020 /* PM2022,PM2122, etc */
-#define ADF_SC3_PCI 0x0040 /* SmartCache III PCI */
-#define ADF_SC4_ISA 0x0080 /* SmartCache IV ISA */
-#define ADF_SC4_EISA 0x0100 /* SmartCache IV EISA */
-#define ADF_SC4_PCI 0x0200 /* SmartCache IV PCI */
-#define ADF_SC5_PCI 0x0400 /* Fifth Generation I2O products */
-/*
- * Combinations of products
- */
-#define ADF_ALL_2000 (ADF_2001|ADF_2012A)
-#define ADF_ALL_PLUS (ADF_PLUS_ISA|ADF_PLUS_EISA)
-#define ADF_ALL_SC3 (ADF_SC3_ISA|ADF_SC3_EISA|ADF_SC3_PCI)
-#define ADF_ALL_SC4 (ADF_SC4_ISA|ADF_SC4_EISA|ADF_SC4_PCI)
-#define ADF_ALL_SC5 (ADF_SC5_PCI)
-/* All EATA Cacheing Products */
-#define ADF_ALL_CACHE (ADF_ALL_PLUS|ADF_ALL_SC3|ADF_ALL_SC4)
-/* All EATA Bus Mastering Products */
-#define ADF_ALL_MASTER (ADF_2012A|ADF_ALL_CACHE)
-/* All EATA Adapter Products */
-#define ADF_ALL_EATA (ADF_2001|ADF_ALL_MASTER)
-#define ADF_ALL ADF_ALL_EATA
-
-/* Application - sigWORD dsApplication; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define APP_DPTMGR 0x0001 /* DPT Storage Manager */
-#define APP_ENGINE 0x0002 /* DPT Engine */
-#define APP_SYTOS 0x0004 /* Sytron Sytos Plus */
-#define APP_CHEYENNE 0x0008 /* Cheyenne ARCServe + ARCSolo */
-#define APP_MSCDEX 0x0010 /* Microsoft CD-ROM extensions */
-#define APP_NOVABACK 0x0020 /* NovaStor Novaback */
-#define APP_AIM 0x0040 /* Archive Information Manager */
-
-/* Requirements - sigBYTE dsRequirements; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define REQ_SMARTROM 0x01 /* Requires SmartROM to be present */
-#define REQ_DPTDDL 0x02 /* Requires DPTDDL.SYS to be loaded */
-#define REQ_HBA_DRIVER 0x04 /* Requires an HBA driver to be loaded */
-#define REQ_ASPI_TRAN 0x08 /* Requires an ASPI Transport Modules */
-#define REQ_ENGINE 0x10 /* Requires a DPT Engine to be loaded */
-#define REQ_COMM_ENG 0x20 /* Requires a DPT Communications Engine */
-
-/*
- * You may adjust dsDescription_size with an override to a value less than
- * 50 so that the structure allocates less real space.
- */
-#if (!defined(dsDescription_size))
-# define dsDescription_size 50
-#endif
-
-typedef struct dpt_sig {
- char dsSignature[6]; /* ALWAYS "dPtSiG" */
- sigBYTE dsSigVersion; /* signature version (currently 1) */
- sigBYTE dsProcessorFamily; /* what type of processor */
- sigBYTE dsProcessor; /* precise processor */
- sigBYTE dsFiletype; /* type of file */
- sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */
- sigBYTE dsOEM; /* OEM file was created for */
- sigINT dsOS; /* which Operating systems */
- sigWORD dsCapabilities; /* RAID levels, etc. */
- sigWORD dsDeviceSupp; /* Types of SCSI devices supported */
- sigWORD dsAdapterSupp; /* DPT adapter families supported */
- sigWORD dsApplication; /* applications file is for */
- sigBYTE dsRequirements; /* Other driver dependencies */
- sigBYTE dsVersion; /* 1 */
- sigBYTE dsRevision; /* 'J' */
- sigBYTE dsSubRevision; /* '9' ' ' if N/A */
- sigBYTE dsMonth; /* creation month */
- sigBYTE dsDay; /* creation day */
- sigBYTE dsYear; /* creation year since 1980 (1993=13) */
- /* description (NULL terminated) */
- char dsDescription[dsDescription_size];
-} dpt_sig_S;
-/* 32 bytes minimum - with no description. Put NULL at description[0] */
-/* 81 bytes maximum - with 49 character description plus NULL. */
-
-/* This line added at Roycroft's request */
-/* Microsoft's NT compiler gets confused if you do a pack and don't */
-/* restore it. */
-
-#ifndef NO_UNPACK
-#if defined (_DPT_AIX)
-#pragma options align=reset
-#elif defined (UNPACK_FOUR)
-#pragma pack(4)
-#else
-#pragma pack()
-#endif /* aix */
-#endif
-/* For the Macintosh */
-#ifdef STRUCTALIGNMENTSUPPORTED
-#pragma options align=reset
-#endif
-
-#endif
diff --git a/drivers/scsi/dpt/osd_defs.h b/drivers/scsi/dpt/osd_defs.h
deleted file mode 100644
index de3ae5722982..000000000000
--- a/drivers/scsi/dpt/osd_defs.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* BSDI osd_defs.h,v 1.4 1998/06/03 19:14:58 karels Exp */
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef _OSD_DEFS_H
-#define _OSD_DEFS_H
-
-/*File - OSD_DEFS.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains the OS dependent defines. This file is included
- *in osd_util.h and provides the OS specific defines for that file.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Doug Anderson
- *Date: 1/31/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Definitions - Defines & Constants ----------------------------------------- */
-
- /* Define the operating system */
-#if (defined(__linux__))
-# define _DPT_LINUX
-#elif (defined(__bsdi__))
-# define _DPT_BSDI
-#elif (defined(__FreeBSD__))
-# define _DPT_FREE_BSD
-#else
-# define _DPT_SCO
-#endif
-
-#if defined (ZIL_CURSES)
-#define _DPT_CURSES
-#else
-#define _DPT_MOTIF
-#endif
-
- /* Redefine 'far' to nothing - no far pointer type required in UNIX */
-#define far
-
- /* Define the mutually exclusive semaphore type */
-#define SEMAPHORE_T unsigned int *
- /* Define a handle to a DLL */
-#define DLL_HANDLE_T unsigned int *
-
-#endif
diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h
deleted file mode 100644
index b2613c2eaac7..000000000000
--- a/drivers/scsi/dpt/osd_util.h
+++ /dev/null
@@ -1,358 +0,0 @@
-/* BSDI osd_util.h,v 1.8 1998/06/03 19:14:58 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __OSD_UTIL_H
-#define __OSD_UTIL_H
-
-/*File - OSD_UTIL.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains defines and function prototypes that are
- *operating system dependent. The resources defined in this file
- *are not specific to any particular application.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Doug Anderson
- *Date: 1/7/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Definitions - Defines & Constants ----------------------------------------- */
-
-/*----------------------------- */
-/* Operating system selections: */
-/*----------------------------- */
-
-/*#define _DPT_MSDOS */
-/*#define _DPT_WIN_3X */
-/*#define _DPT_WIN_4X */
-/*#define _DPT_WIN_NT */
-/*#define _DPT_NETWARE */
-/*#define _DPT_OS2 */
-/*#define _DPT_SCO */
-/*#define _DPT_UNIXWARE */
-/*#define _DPT_SOLARIS */
-/*#define _DPT_NEXTSTEP */
-/*#define _DPT_BANYAN */
-
-/*-------------------------------- */
-/* Include the OS specific defines */
-/*-------------------------------- */
-
-/*#define OS_SELECTION From Above List */
-/*#define SEMAPHORE_T ??? */
-/*#define DLL_HANDLE_T ??? */
-
-#if (defined(KERNEL) && (defined(__FreeBSD__) || defined(__bsdi__)))
-# include "i386/isa/dpt_osd_defs.h"
-#else
-# include "osd_defs.h"
-#endif
-
-#ifndef DPT_UNALIGNED
- #define DPT_UNALIGNED
-#endif
-
-#ifndef DPT_EXPORT
- #define DPT_EXPORT
-#endif
-
-#ifndef DPT_IMPORT
- #define DPT_IMPORT
-#endif
-
-#ifndef DPT_RUNTIME_IMPORT
- #define DPT_RUNTIME_IMPORT DPT_IMPORT
-#endif
-
-/*--------------------- */
-/* OS dependent defines */
-/*--------------------- */
-
-#if defined (_DPT_MSDOS) || defined (_DPT_WIN_3X)
- #define _DPT_16_BIT
-#else
- #define _DPT_32_BIT
-#endif
-
-#if defined (_DPT_SCO) || defined (_DPT_UNIXWARE) || defined (_DPT_SOLARIS) || defined (_DPT_AIX) || defined (SNI_MIPS) || defined (_DPT_BSDI) || defined (_DPT_FREE_BSD) || defined(_DPT_LINUX)
- #define _DPT_UNIX
-#endif
-
-#if defined (_DPT_WIN_3x) || defined (_DPT_WIN_4X) || defined (_DPT_WIN_NT) \
- || defined (_DPT_OS2)
- #define _DPT_DLL_SUPPORT
-#endif
-
-#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X) && !defined (_DPT_NETWARE)
- #define _DPT_PREEMPTIVE
-#endif
-
-#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X)
- #define _DPT_MULTI_THREADED
-#endif
-
-#if !defined (_DPT_MSDOS)
- #define _DPT_MULTI_TASKING
-#endif
-
- /* These exist for platforms that */
- /* chunk when accessing mis-aligned */
- /* data */
-#if defined (SNI_MIPS) || defined (_DPT_SOLARIS)
- #if defined (_DPT_BIG_ENDIAN)
- #if !defined (_DPT_STRICT_ALIGN)
- #define _DPT_STRICT_ALIGN
- #endif
- #endif
-#endif
-
- /* Determine if in C or C++ mode */
-#ifdef __cplusplus
- #define _DPT_CPP
-#else
- #define _DPT_C
-#endif
-
-/*-------------------------------------------------------------------*/
-/* Under Solaris the compiler refuses to accept code like: */
-/* { {"DPT"}, 0, NULL .... }, */
-/* and complains about the {"DPT"} part by saying "cannot use { } */
-/* to initialize char*". */
-/* */
-/* By defining these ugly macros we can get around this and also */
-/* not have to copy and #ifdef large sections of code. I know that */
-/* these macros are *really* ugly, but they should help reduce */
-/* maintenance in the long run. */
-/* */
-/*-------------------------------------------------------------------*/
-#if !defined (DPTSQO)
- #if defined (_DPT_SOLARIS)
- #define DPTSQO
- #define DPTSQC
- #else
- #define DPTSQO {
- #define DPTSQC }
- #endif /* solaris */
-#endif /* DPTSQO */
-
-
-/*---------------------- */
-/* OS dependent typedefs */
-/*---------------------- */
-
-#if defined (_DPT_MSDOS) || defined (_DPT_SCO)
- #define BYTE unsigned char
- #define WORD unsigned short
-#endif
-
-#ifndef _DPT_TYPEDEFS
- #define _DPT_TYPEDEFS
- typedef unsigned char uCHAR;
- typedef unsigned short uSHORT;
- typedef unsigned int uINT;
- typedef unsigned long uLONG;
-
- typedef union {
- uCHAR u8[4];
- uSHORT u16[2];
- uLONG u32;
- } access_U;
-#endif
-
-#if !defined (NULL)
- #define NULL 0
-#endif
-
-
-/*Prototypes - function ----------------------------------------------------- */
-
-#ifdef __cplusplus
- extern "C" { /* Declare all these functions as "C" functions */
-#endif
-
-/*------------------------ */
-/* Byte reversal functions */
-/*------------------------ */
-
- /* Reverses the byte ordering of a 2 byte variable */
-#if (!defined(osdSwap2))
- uSHORT osdSwap2(DPT_UNALIGNED uSHORT *);
-#endif // !osdSwap2
-
- /* Reverses the byte ordering of a 4 byte variable and shifts left 8 bits */
-#if (!defined(osdSwap3))
- uLONG osdSwap3(DPT_UNALIGNED uLONG *);
-#endif // !osdSwap3
-
-
-#ifdef _DPT_NETWARE
- #include "novpass.h" /* For DPT_Bswapl() prototype */
- /* Inline the byte swap */
- #ifdef __cplusplus
- inline uLONG osdSwap4(uLONG *inLong) {
- return *inLong = DPT_Bswapl(*inLong);
- }
- #else
- #define osdSwap4(inLong) DPT_Bswapl(inLong)
- #endif // cplusplus
-#else
- /* Reverses the byte ordering of a 4 byte variable */
-# if (!defined(osdSwap4))
- uLONG osdSwap4(DPT_UNALIGNED uLONG *);
-# endif // !osdSwap4
-
- /* The following functions ALWAYS swap regardless of the *
- * presence of DPT_BIG_ENDIAN */
-
- uSHORT trueSwap2(DPT_UNALIGNED uSHORT *);
- uLONG trueSwap4(DPT_UNALIGNED uLONG *);
-
-#endif // netware
-
-
-/*-------------------------------------*
- * Network order swap functions *
- * *
- * These functions/macros will be used *
- * by the structure insert()/extract() *
- * functions. *
- *
- * We will enclose all structure *
- * portability modifications inside *
- * #ifdefs. When we are ready, we *
- * will #define DPT_PORTABLE to begin *
- * using the modifications. *
- *-------------------------------------*/
-uLONG netSwap4(uLONG val);
-
-#if defined (_DPT_BIG_ENDIAN)
-
-// for big-endian we need to swap
-
-#ifndef NET_SWAP_2
-#define NET_SWAP_2(x) (((x) >> 8) | ((x) << 8))
-#endif // NET_SWAP_2
-
-#ifndef NET_SWAP_4
-#define NET_SWAP_4(x) netSwap4((x))
-#endif // NET_SWAP_4
-
-#else
-
-// for little-endian we don't need to do anything
-
-#ifndef NET_SWAP_2
-#define NET_SWAP_2(x) (x)
-#endif // NET_SWAP_2
-
-#ifndef NET_SWAP_4
-#define NET_SWAP_4(x) (x)
-#endif // NET_SWAP_4
-
-#endif // big endian
-
-
-
-/*----------------------------------- */
-/* Run-time loadable module functions */
-/*----------------------------------- */
-
- /* Loads the specified run-time loadable DLL */
-DLL_HANDLE_T osdLoadModule(uCHAR *);
- /* Unloads the specified run-time loadable DLL */
-uSHORT osdUnloadModule(DLL_HANDLE_T);
- /* Returns a pointer to a function inside a run-time loadable DLL */
-void * osdGetFnAddr(DLL_HANDLE_T,uCHAR *);
-
-/*--------------------------------------- */
-/* Mutually exclusive semaphore functions */
-/*--------------------------------------- */
-
- /* Create a named semaphore */
-SEMAPHORE_T osdCreateNamedSemaphore(char *);
- /* Create a mutually exlusive semaphore */
-SEMAPHORE_T osdCreateSemaphore(void);
- /* create an event semaphore */
-SEMAPHORE_T osdCreateEventSemaphore(void);
- /* create a named event semaphore */
-SEMAPHORE_T osdCreateNamedEventSemaphore(char *);
-
- /* Destroy the specified mutually exclusive semaphore object */
-uSHORT osdDestroySemaphore(SEMAPHORE_T);
- /* Request access to the specified mutually exclusive semaphore */
-uLONG osdRequestSemaphore(SEMAPHORE_T,uLONG);
- /* Release access to the specified mutually exclusive semaphore */
-uSHORT osdReleaseSemaphore(SEMAPHORE_T);
- /* wait for a event to happen */
-uLONG osdWaitForEventSemaphore(SEMAPHORE_T, uLONG);
- /* signal an event */
-uLONG osdSignalEventSemaphore(SEMAPHORE_T);
- /* reset the event */
-uLONG osdResetEventSemaphore(SEMAPHORE_T);
-
-/*----------------- */
-/* Thread functions */
-/*----------------- */
-
- /* Releases control to the task switcher in non-preemptive */
- /* multitasking operating systems. */
-void osdSwitchThreads(void);
-
- /* Starts a thread function */
-uLONG osdStartThread(void *,void *);
-
-/* what is my thread id */
-uLONG osdGetThreadID(void);
-
-/* wakes up the specifed thread */
-void osdWakeThread(uLONG);
-
-/* osd sleep for x milliseconds */
-void osdSleep(uLONG);
-
-#define DPT_THREAD_PRIORITY_LOWEST 0x00
-#define DPT_THREAD_PRIORITY_NORMAL 0x01
-#define DPT_THREAD_PRIORITY_HIGHEST 0x02
-
-uCHAR osdSetThreadPriority(uLONG tid, uCHAR priority);
-
-#ifdef __cplusplus
- } /* end the xtern "C" declaration */
-#endif
-
-#endif /* osd_util_h */
diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h
deleted file mode 100644
index a4aa1c31ff72..000000000000
--- a/drivers/scsi/dpt/sys_info.h
+++ /dev/null
@@ -1,417 +0,0 @@
-/* BSDI sys_info.h,v 1.6 1998/06/03 19:14:59 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __SYS_INFO_H
-#define __SYS_INFO_H
-
-/*File - SYS_INFO.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains structure definitions for the OS dependent
- *layer system information buffers.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Don Kemper
- *Date: 5/10/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Include Files ------------------------------------------------------------- */
-
-#include "osd_util.h"
-
-#ifndef NO_PACK
-#if defined (_DPT_AIX)
-#pragma options align=packed
-#else
-#pragma pack(1)
-#endif /* aix */
-#endif // no unpack
-
-
-/*struct - driveParam_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the drive parameters seen during
- *booting.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct driveParam_S {
-#else
- typedef struct {
-#endif
-
- uSHORT cylinders; /* Up to 1024 */
- uCHAR heads; /* Up to 255 */
- uCHAR sectors; /* Up to 63 */
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } driveParam_S;
-#endif
-/*driveParam_S - end */
-
-
-/*struct - sysInfo_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the command system information that
- *should be returned by every OS dependent layer.
- *
- *---------------------------------------------------------------------------*/
-
-/*flags - bit definitions */
-#define SI_CMOS_Valid 0x0001
-#define SI_NumDrivesValid 0x0002
-#define SI_ProcessorValid 0x0004
-#define SI_MemorySizeValid 0x0008
-#define SI_DriveParamsValid 0x0010
-#define SI_SmartROMverValid 0x0020
-#define SI_OSversionValid 0x0040
-#define SI_OSspecificValid 0x0080 /* 1 if OS structure returned */
-#define SI_BusTypeValid 0x0100
-
-#define SI_ALL_VALID 0x0FFF /* All Std SysInfo is valid */
-#define SI_NO_SmartROM 0x8000
-
-/*busType - definitions */
-#define SI_ISA_BUS 0x00
-#define SI_MCA_BUS 0x01
-#define SI_EISA_BUS 0x02
-#define SI_PCI_BUS 0x04
-
-#ifdef __cplusplus
- struct sysInfo_S {
-#else
- typedef struct {
-#endif
-
- uCHAR drive0CMOS; /* CMOS Drive 0 Type */
- uCHAR drive1CMOS; /* CMOS Drive 1 Type */
- uCHAR numDrives; /* 0040:0075 contents */
- uCHAR processorFamily; /* Same as DPTSIG's definition */
- uCHAR processorType; /* Same as DPTSIG's definition */
- uCHAR smartROMMajorVersion;
- uCHAR smartROMMinorVersion; /* SmartROM version */
- uCHAR smartROMRevision;
- uSHORT flags; /* See bit definitions above */
- uSHORT conventionalMemSize; /* in KB */
- uINT extendedMemSize; /* in KB */
- uINT osType; /* Same as DPTSIG's definition */
- uCHAR osMajorVersion;
- uCHAR osMinorVersion; /* The OS version */
- uCHAR osRevision;
-#ifdef _SINIX_ADDON
- uCHAR busType; /* See defininitions above */
- uSHORT osSubRevision;
- uCHAR pad[2]; /* For alignment */
-#else
- uCHAR osSubRevision;
- uCHAR busType; /* See defininitions above */
- uCHAR pad[3]; /* For alignment */
-#endif
- driveParam_S drives[16]; /* SmartROM Logical Drives */
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } sysInfo_S;
-#endif
-/*sysInfo_S - end */
-
-
-/*struct - DOS_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *DOS workstation.
- *
- *---------------------------------------------------------------------------*/
-
-/*flags - bit definitions */
-#define DI_DOS_HIGH 0x01 /* DOS is loaded high */
-#define DI_DPMI_VALID 0x02 /* DPMI version is valid */
-
-#ifdef __cplusplus
- struct DOS_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR flags; /* See bit definitions above */
- uSHORT driverLocation; /* SmartROM BIOS address */
- uSHORT DOS_version;
- uSHORT DPMI_version;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } DOS_Info_S;
-#endif
-/*DOS_Info_S - end */
-
-
-/*struct - Netware_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *Netware machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct Netware_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR driverName[13]; /* ie PM12NW31.DSK */
- uCHAR serverName[48];
- uCHAR netwareVersion; /* The Netware OS version */
- uCHAR netwareSubVersion;
- uCHAR netwareRevision;
- uSHORT maxConnections; /* Probably 250 or 1000 */
- uSHORT connectionsInUse;
- uSHORT maxVolumes;
- uCHAR unused;
- uCHAR SFTlevel;
- uCHAR TTSlevel;
-
- uCHAR clibMajorVersion; /* The CLIB.NLM version */
- uCHAR clibMinorVersion;
- uCHAR clibRevision;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } Netware_Info_S;
-#endif
-/*Netware_Info_S - end */
-
-
-/*struct - OS2_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to an
- *OS/2 machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct OS2_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } OS2_Info_S;
-#endif
-/*OS2_Info_S - end */
-
-
-/*struct - WinNT_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *Windows NT machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct WinNT_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } WinNT_Info_S;
-#endif
-/*WinNT_Info_S - end */
-
-
-/*struct - SCO_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to an
- *SCO UNIX machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct SCO_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } SCO_Info_S;
-#endif
-/*SCO_Info_S - end */
-
-
-/*struct - USL_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *USL UNIX machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct USL_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } USL_Info_S;
-#endif
-/*USL_Info_S - end */
-
-
- /* Restore default structure packing */
-#ifndef NO_UNPACK
-#if defined (_DPT_AIX)
-#pragma options align=reset
-#elif defined (UNPACK_FOUR)
-#pragma pack(4)
-#else
-#pragma pack()
-#endif /* aix */
-#endif // no unpack
-
-#endif // __SYS_INFO_H
-
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
deleted file mode 100644
index 2e9155ba7408..000000000000
--- a/drivers/scsi/dpt_i2o.c
+++ /dev/null
@@ -1,3545 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/***************************************************************************
- dpti.c - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2000 by Adaptec
-
- July 30, 2001 First version being submitted
- for inclusion in the kernel. V2.4
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-/***************************************************************************
- * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
- - Support 2.6 kernel and DMA-mapping
- - ioctl fix for raid tools
- - use schedule_timeout in long long loop
- **************************************************************************/
-
-/*#define DEBUG 1 */
-/*#define UARTDELAY 1 */
-
-#include <linux/module.h>
-#include <linux/pgtable.h>
-
-MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
-MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
-
-////////////////////////////////////////////////////////////////
-
-#include <linux/ioctl.h> /* For SCSI-Passthrough */
-#include <linux/uaccess.h>
-
-#include <linux/stat.h>
-#include <linux/slab.h> /* for kmalloc() */
-#include <linux/pci.h> /* for PCI support */
-#include <linux/proc_fs.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h> /* for udelay */
-#include <linux/interrupt.h>
-#include <linux/kernel.h> /* for printk */
-#include <linux/sched.h>
-#include <linux/reboot.h>
-#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/mutex.h>
-
-#include <asm/processor.h> /* for boot_cpu_data */
-#include <asm/io.h> /* for virt_to_bus, etc. */
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-
-#include "dpt/dptsig.h"
-#include "dpti.h"
-
-/*============================================================================
- * Create a binary signature - this is read by dptsig
- * Needed for our management apps
- *============================================================================
- */
-static DEFINE_MUTEX(adpt_mutex);
-static dpt_sig_S DPTI_sig = {
- {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
-#ifdef __i386__
- PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
-#elif defined(__ia64__)
- PROC_INTEL, PROC_IA64,
-#elif defined(__sparc__)
- PROC_ULTRASPARC, PROC_ULTRASPARC,
-#elif defined(__alpha__)
- PROC_ALPHA, PROC_ALPHA,
-#else
- (-1),(-1),
-#endif
- FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
- ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
- DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
-};
-
-
-
-
-/*============================================================================
- * Globals
- *============================================================================
- */
-
-static DEFINE_MUTEX(adpt_configuration_lock);
-
-static struct i2o_sys_tbl *sys_tbl;
-static dma_addr_t sys_tbl_pa;
-static int sys_tbl_ind;
-static int sys_tbl_len;
-
-static adpt_hba* hba_chain = NULL;
-static int hba_count = 0;
-
-static struct class *adpt_sysfs_class;
-
-static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-#ifdef CONFIG_COMPAT
-static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
-#endif
-
-static const struct file_operations adpt_fops = {
- .unlocked_ioctl = adpt_unlocked_ioctl,
- .open = adpt_open,
- .release = adpt_close,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_adpt_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
-/* Structures and definitions for synchronous message posting.
- * See adpt_i2o_post_wait() for description
- * */
-struct adpt_i2o_post_wait_data
-{
- int status;
- u32 id;
- adpt_wait_queue_head_t *wq;
- struct adpt_i2o_post_wait_data *next;
-};
-
-static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
-static u32 adpt_post_wait_id = 0;
-static DEFINE_SPINLOCK(adpt_post_wait_lock);
-
-
-/*============================================================================
- * Functions
- *============================================================================
- */
-
-static inline int dpt_dma64(adpt_hba *pHba)
-{
- return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
-}
-
-static inline u32 dma_high(dma_addr_t addr)
-{
- return upper_32_bits(addr);
-}
-
-static inline u32 dma_low(dma_addr_t addr)
-{
- return (u32)addr;
-}
-
-static u8 adpt_read_blink_led(adpt_hba* host)
-{
- if (host->FwDebugBLEDflag_P) {
- if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
- return readb(host->FwDebugBLEDvalue_P);
- }
- }
- return 0;
-}
-
-/*============================================================================
- * Scsi host template interface functions
- *============================================================================
- */
-
-#ifdef MODULE
-static struct pci_device_id dptids[] = {
- { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- { 0, }
-};
-#endif
-
-MODULE_DEVICE_TABLE(pci,dptids);
-
-static int adpt_detect(struct scsi_host_template* sht)
-{
- struct pci_dev *pDev = NULL;
- adpt_hba *pHba;
- adpt_hba *next;
-
- PINFO("Detecting Adaptec I2O RAID controllers...\n");
-
- /* search for all Adatpec I2O RAID cards */
- while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
- if(pDev->device == PCI_DPT_DEVICE_ID ||
- pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
- if(adpt_install_hba(sht, pDev) ){
- PERROR("Could not Init an I2O RAID device\n");
- PERROR("Will not try to detect others.\n");
- return hba_count-1;
- }
- pci_dev_get(pDev);
- }
- }
-
- /* In INIT state, Activate IOPs */
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- // Activate does get status , init outbound, and get hrt
- if (adpt_i2o_activate_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
- }
- }
-
-
- /* Active IOPs in HOLD state */
-
-rebuild_sys_tab:
- if (hba_chain == NULL)
- return 0;
-
- /*
- * If build_sys_table fails, we kill everything and bail
- * as we can't init the IOPs w/o a system table
- */
- if (adpt_i2o_build_sys_table() < 0) {
- adpt_i2o_sys_shutdown();
- return 0;
- }
-
- PDEBUG("HBA's in HOLD state\n");
-
- /* If IOP don't get online, we need to rebuild the System table */
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (adpt_i2o_online_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
- goto rebuild_sys_tab;
- }
- }
-
- /* Active IOPs now in OPERATIONAL state */
- PDEBUG("HBA's in OPERATIONAL state\n");
-
- printk("dpti: If you have a lot of devices this could take a few minutes.\n");
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
- if (adpt_i2o_lct_get(pHba) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
-
- if (adpt_i2o_parse_lct(pHba) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
- adpt_inquiry(pHba);
- }
-
- adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
- if (IS_ERR(adpt_sysfs_class)) {
- printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
- adpt_sysfs_class = NULL;
- }
-
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- if (adpt_scsi_host_alloc(pHba, sht) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
- pHba->initialized = TRUE;
- pHba->state &= ~DPTI_STATE_RESET;
- if (adpt_sysfs_class) {
- struct device *dev = device_create(adpt_sysfs_class,
- NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
- "dpti%d", pHba->unit);
- if (IS_ERR(dev)) {
- printk(KERN_WARNING"dpti%d: unable to "
- "create device in dpt_i2o class\n",
- pHba->unit);
- }
- }
- }
-
- // Register our control device node
- // nodes will need to be created in /dev to access this
- // the nodes can not be created from within the driver
- if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
- adpt_i2o_sys_shutdown();
- return 0;
- }
- return hba_count;
-}
-
-
-static void adpt_release(adpt_hba *pHba)
-{
- struct Scsi_Host *shost = pHba->host;
-
- scsi_remove_host(shost);
-// adpt_i2o_quiesce_hba(pHba);
- adpt_i2o_delete_hba(pHba);
- scsi_host_put(shost);
-}
-
-
-static void adpt_inquiry(adpt_hba* pHba)
-{
- u32 msg[17];
- u32 *mptr;
- u32 *lenptr;
- int direction;
- int scsidir;
- u32 len;
- u32 reqlen;
- u8* buf;
- dma_addr_t addr;
- u8 scb[16];
- s32 rcode;
-
- memset(msg, 0, sizeof(msg));
- buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
- if(!buf){
- printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
- return;
- }
- memset((void*)buf, 0, 36);
-
- len = 36;
- direction = 0x00000000;
- scsidir =0x40000000; // DATA IN (iop<--dev)
-
- if (dpt_dma64(pHba))
- reqlen = 17; // SINGLE SGE, 64 bit
- else
- reqlen = 14; // SINGLE SGE, 32 bit
- /* Stick the headers on */
- msg[0] = reqlen<<16 | SGL_OFFSET_12;
- msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
- msg[2] = 0;
- msg[3] = 0;
- // Adaptec/DPT Private stuff
- msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
- msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
- /* Direction, disconnect ok | sense data | simple queue , CDBLen */
- // I2O_SCB_FLAG_ENABLE_DISCONNECT |
- // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
- // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
- msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
-
- mptr=msg+7;
-
- memset(scb, 0, sizeof(scb));
- // Write SCSI command into the message - always 16 byte block
- scb[0] = INQUIRY;
- scb[1] = 0;
- scb[2] = 0;
- scb[3] = 0;
- scb[4] = 36;
- scb[5] = 0;
- // Don't care about the rest of scb
-
- memcpy(mptr, scb, sizeof(scb));
- mptr+=4;
- lenptr=mptr++; /* Remember me - fill in when we know */
-
- /* Now fill in the SGList and command */
- *lenptr = len;
- if (dpt_dma64(pHba)) {
- *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
- *mptr++ = 1 << PAGE_SHIFT;
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = dma_low(addr);
- *mptr++ = dma_high(addr);
- } else {
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = addr;
- }
-
- // Send it on it's way
- rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
- if (rcode != 0) {
- sprintf(pHba->detail, "Adaptec I2O RAID");
- printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
- if (rcode != -ETIME && rcode != -EINTR)
- dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
- } else {
- memset(pHba->detail, 0, sizeof(pHba->detail));
- memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
- memcpy(&(pHba->detail[16]), " Model: ", 8);
- memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
- memcpy(&(pHba->detail[40]), " FW: ", 4);
- memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
- pHba->detail[48] = '\0'; /* precautionary */
- dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
- }
- adpt_i2o_status_get(pHba);
- return ;
-}
-
-
-static int adpt_slave_configure(struct scsi_device * device)
-{
- struct Scsi_Host *host = device->host;
-
- if (host->can_queue && device->tagged_supported) {
- scsi_change_queue_depth(device,
- host->can_queue - 1);
- }
- return 0;
-}
-
-static int adpt_queue_lck(struct scsi_cmnd *cmd)
-{
- adpt_hba* pHba = NULL;
- struct adpt_device* pDev = NULL; /* dpt per device information */
-
- /*
- * SCSI REQUEST_SENSE commands will be executed automatically by the
- * Host Adapter for any errors, so they should not be executed
- * explicitly unless the Sense Data is zero indicating that no error
- * occurred.
- */
-
- if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
- cmd->result = (DID_OK << 16);
- scsi_done(cmd);
- return 0;
- }
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- if (!pHba) {
- return FAILED;
- }
-
- rmb();
- if ((pHba->state) & DPTI_STATE_RESET)
- return SCSI_MLQUEUE_HOST_BUSY;
-
- // TODO if the cmd->device if offline then I may need to issue a bus rescan
- // followed by a get_lct to see if the device is there anymore
- if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
- /*
- * First command request for this device. Set up a pointer
- * to the device structure. This should be a TEST_UNIT_READY
- * command from scan_scsis_single.
- */
- if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
- // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
- // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
- cmd->result = (DID_NO_CONNECT << 16);
- scsi_done(cmd);
- return 0;
- }
- cmd->device->hostdata = pDev;
- }
- pDev->pScsi_dev = cmd->device;
-
- /*
- * If we are being called from when the device is being reset,
- * delay processing of the command until later.
- */
- if (pDev->state & DPTI_DEV_RESET ) {
- return FAILED;
- }
- return adpt_scsi_to_i2o(pHba, cmd, pDev);
-}
-
-static DEF_SCSI_QCMD(adpt_queue)
-
-static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
- sector_t capacity, int geom[])
-{
- int heads=-1;
- int sectors=-1;
- int cylinders=-1;
-
- // *** First lets set the default geometry ****
-
- // If the capacity is less than ox2000
- if (capacity < 0x2000 ) { // floppy
- heads = 18;
- sectors = 2;
- }
- // else if between 0x2000 and 0x20000
- else if (capacity < 0x20000) {
- heads = 64;
- sectors = 32;
- }
- // else if between 0x20000 and 0x40000
- else if (capacity < 0x40000) {
- heads = 65;
- sectors = 63;
- }
- // else if between 0x4000 and 0x80000
- else if (capacity < 0x80000) {
- heads = 128;
- sectors = 63;
- }
- // else if greater than 0x80000
- else {
- heads = 255;
- sectors = 63;
- }
- cylinders = sector_div(capacity, heads * sectors);
-
- // Special case if CDROM
- if(sdev->type == 5) { // CDROM
- heads = 252;
- sectors = 63;
- cylinders = 1111;
- }
-
- geom[0] = heads;
- geom[1] = sectors;
- geom[2] = cylinders;
-
- PDEBUG("adpt_bios_param: exit\n");
- return 0;
-}
-
-
-static const char *adpt_info(struct Scsi_Host *host)
-{
- adpt_hba* pHba;
-
- pHba = (adpt_hba *) host->hostdata[0];
- return (char *) (pHba->detail);
-}
-
-static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
-{
- struct adpt_device* d;
- int id;
- int chan;
- adpt_hba* pHba;
- int unit;
-
- // Find HBA (host bus adapter) we are looking for
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->host == host) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if (pHba == NULL) {
- return 0;
- }
- host = pHba->host;
-
- seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
- seq_printf(m, "%s\n", pHba->detail);
- seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
- pHba->host->host_no, pHba->name, host->irq);
- seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
- host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
-
- seq_puts(m, "Devices:\n");
- for(chan = 0; chan < MAX_CHANNEL; chan++) {
- for(id = 0; id < MAX_ID; id++) {
- d = pHba->channel[chan].device[id];
- while(d) {
- seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
- seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
-
- unit = d->pI2o_dev->lct_data.tid;
- seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
- unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
- scsi_device_online(d->pScsi_dev)? "online":"offline");
- d = d->next_lun;
- }
- }
- }
- return 0;
-}
-
-/*
- * Turn a pointer to ioctl reply data into an u32 'context'
- */
-static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
-{
-#if BITS_PER_LONG == 32
- return (u32)(unsigned long)reply;
-#else
- ulong flags = 0;
- u32 nr, i;
-
- spin_lock_irqsave(pHba->host->host_lock, flags);
- nr = ARRAY_SIZE(pHba->ioctl_reply_context);
- for (i = 0; i < nr; i++) {
- if (pHba->ioctl_reply_context[i] == NULL) {
- pHba->ioctl_reply_context[i] = reply;
- break;
- }
- }
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- if (i >= nr) {
- printk(KERN_WARNING"%s: Too many outstanding "
- "ioctl commands\n", pHba->name);
- return (u32)-1;
- }
-
- return i;
-#endif
-}
-
-/*
- * Go from an u32 'context' to a pointer to ioctl reply data.
- */
-static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
-{
-#if BITS_PER_LONG == 32
- return (void *)(unsigned long)context;
-#else
- void *p = pHba->ioctl_reply_context[context];
- pHba->ioctl_reply_context[context] = NULL;
-
- return p;
-#endif
-}
-
-/*===========================================================================
- * Error Handling routines
- *===========================================================================
- */
-
-static int adpt_abort(struct scsi_cmnd * cmd)
-{
- adpt_hba* pHba = NULL; /* host bus adapter structure */
- struct adpt_device* dptdevice; /* dpt per device information */
- u32 msg[5];
- int rcode;
-
- pHba = (adpt_hba*) cmd->device->host->hostdata[0];
- printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
- if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
- printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
- return FAILED;
- }
-
- memset(msg, 0, sizeof(msg));
- msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
- msg[2] = 0;
- msg[3]= 0;
- /* Add 1 to avoid firmware treating it as invalid command */
- msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- if(rcode == -EOPNOTSUPP ){
- printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
- return SUCCESS;
-}
-
-
-#define I2O_DEVICE_RESET 0x27
-// This is the same for BLK and SCSI devices
-// NOTE this is wrong in the i2o.h definitions
-// This is not currently supported by our adapter but we issue it anyway
-static int adpt_device_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 msg[4];
- u32 rcode;
- int old_state;
- struct adpt_device* d = cmd->device->hostdata;
-
- pHba = (void*) cmd->device->host->hostdata[0];
- printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
- if (!d) {
- printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
- return FAILED;
- }
- memset(msg, 0, sizeof(msg));
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
- msg[2] = 0;
- msg[3] = 0;
-
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- old_state = d->state;
- d->state |= DPTI_DEV_RESET;
- rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
- d->state = old_state;
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- if(rcode == -EOPNOTSUPP ){
- printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
- return FAILED;
- } else {
- printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
- return SUCCESS;
- }
-}
-
-
-#define I2O_HBA_BUS_RESET 0x87
-// This version of bus reset is called by the eh_error handler
-static int adpt_bus_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 msg[4];
- u32 rcode;
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- memset(msg, 0, sizeof(msg));
- printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
- msg[2] = 0;
- msg[3] = 0;
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
- return FAILED;
- } else {
- printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
- return SUCCESS;
- }
-}
-
-// This version of reset is called by the eh_error_handler
-static int __adpt_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- int rcode;
- char name[32];
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- strncpy(name, pHba->name, sizeof(name));
- printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
- rcode = adpt_hba_reset(pHba);
- if(rcode == 0){
- printk(KERN_WARNING"%s: HBA reset complete\n", name);
- return SUCCESS;
- } else {
- printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
- return FAILED;
- }
-}
-
-static int adpt_reset(struct scsi_cmnd* cmd)
-{
- int rc;
-
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __adpt_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
-
- return rc;
-}
-
-// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
-static int adpt_hba_reset(adpt_hba* pHba)
-{
- int rcode;
-
- pHba->state |= DPTI_STATE_RESET;
-
- // Activate does get status , init outbound, and get hrt
- if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
- printk(KERN_ERR "%s: Could not activate\n", pHba->name);
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
-
- if ((rcode=adpt_i2o_build_sys_table()) < 0) {
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- PDEBUG("%s: in HOLD state\n",pHba->name);
-
- if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
-
- if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
-
- if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- pHba->state &= ~DPTI_STATE_RESET;
-
- scsi_host_complete_all_commands(pHba->host, DID_RESET);
- return 0; /* return success */
-}
-
-/*===========================================================================
- *
- *===========================================================================
- */
-
-
-static void adpt_i2o_sys_shutdown(void)
-{
- adpt_hba *pHba, *pNext;
- struct adpt_i2o_post_wait_data *p1, *old;
-
- printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
- printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
- /* Delete all IOPs from the controller chain */
- /* They should have already been released by the
- * scsi-core
- */
- for (pHba = hba_chain; pHba; pHba = pNext) {
- pNext = pHba->next;
- adpt_i2o_delete_hba(pHba);
- }
-
- /* Remove any timedout entries from the wait queue. */
-// spin_lock_irqsave(&adpt_post_wait_lock, flags);
- /* Nothing should be outstanding at this point so just
- * free them
- */
- for(p1 = adpt_post_wait_queue; p1;) {
- old = p1;
- p1 = p1->next;
- kfree(old);
- }
-// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
- adpt_post_wait_queue = NULL;
-
- printk(KERN_INFO "Adaptec I2O controllers down.\n");
-}
-
-static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
-{
-
- adpt_hba* pHba = NULL;
- adpt_hba* p = NULL;
- ulong base_addr0_phys = 0;
- ulong base_addr1_phys = 0;
- u32 hba_map0_area_size = 0;
- u32 hba_map1_area_size = 0;
- void __iomem *base_addr_virt = NULL;
- void __iomem *msg_addr_virt = NULL;
- int dma64 = 0;
-
- int raptorFlag = FALSE;
-
- if(pci_enable_device(pDev)) {
- return -EINVAL;
- }
-
- if (pci_request_regions(pDev, "dpt_i2o")) {
- PERROR("dpti: adpt_config_hba: pci request region failed\n");
- return -EINVAL;
- }
-
- pci_set_master(pDev);
-
- /*
- * See if we should enable dma64 mode.
- */
- if (sizeof(dma_addr_t) > 4 &&
- dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
- dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
- dma64 = 1;
-
- if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
- return -EINVAL;
-
- /* adapter only supports message blocks below 4GB */
- dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
-
- base_addr0_phys = pci_resource_start(pDev,0);
- hba_map0_area_size = pci_resource_len(pDev,0);
-
- // Check if standard PCI card or single BAR Raptor
- if(pDev->device == PCI_DPT_DEVICE_ID){
- if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
- // Raptor card with this device id needs 4M
- hba_map0_area_size = 0x400000;
- } else { // Not Raptor - it is a PCI card
- if(hba_map0_area_size > 0x100000 ){
- hba_map0_area_size = 0x100000;
- }
- }
- } else {// Raptor split BAR config
- // Use BAR1 in this configuration
- base_addr1_phys = pci_resource_start(pDev,1);
- hba_map1_area_size = pci_resource_len(pDev,1);
- raptorFlag = TRUE;
- }
-
-#if BITS_PER_LONG == 64
- /*
- * The original Adaptec 64 bit driver has this comment here:
- * "x86_64 machines need more optimal mappings"
- *
- * I assume some HBAs report ridiculously large mappings
- * and we need to limit them on platforms with IOMMUs.
- */
- if (raptorFlag == TRUE) {
- if (hba_map0_area_size > 128)
- hba_map0_area_size = 128;
- if (hba_map1_area_size > 524288)
- hba_map1_area_size = 524288;
- } else {
- if (hba_map0_area_size > 524288)
- hba_map0_area_size = 524288;
- }
-#endif
-
- base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
- if (!base_addr_virt) {
- pci_release_regions(pDev);
- PERROR("dpti: adpt_config_hba: io remap failed\n");
- return -EINVAL;
- }
-
- if(raptorFlag == TRUE) {
- msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
- if (!msg_addr_virt) {
- PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
- iounmap(base_addr_virt);
- pci_release_regions(pDev);
- return -EINVAL;
- }
- } else {
- msg_addr_virt = base_addr_virt;
- }
-
- // Allocate and zero the data structure
- pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
- if (!pHba) {
- if (msg_addr_virt != base_addr_virt)
- iounmap(msg_addr_virt);
- iounmap(base_addr_virt);
- pci_release_regions(pDev);
- return -ENOMEM;
- }
-
- mutex_lock(&adpt_configuration_lock);
-
- if(hba_chain != NULL){
- for(p = hba_chain; p->next; p = p->next);
- p->next = pHba;
- } else {
- hba_chain = pHba;
- }
- pHba->next = NULL;
- pHba->unit = hba_count;
- sprintf(pHba->name, "dpti%d", hba_count);
- hba_count++;
-
- mutex_unlock(&adpt_configuration_lock);
-
- pHba->pDev = pDev;
- pHba->base_addr_phys = base_addr0_phys;
-
- // Set up the Virtual Base Address of the I2O Device
- pHba->base_addr_virt = base_addr_virt;
- pHba->msg_addr_virt = msg_addr_virt;
- pHba->irq_mask = base_addr_virt+0x30;
- pHba->post_port = base_addr_virt+0x40;
- pHba->reply_port = base_addr_virt+0x44;
-
- pHba->hrt = NULL;
- pHba->lct = NULL;
- pHba->lct_size = 0;
- pHba->status_block = NULL;
- pHba->post_count = 0;
- pHba->state = DPTI_STATE_RESET;
- pHba->pDev = pDev;
- pHba->devices = NULL;
- pHba->dma64 = dma64;
-
- // Initializing the spinlocks
- spin_lock_init(&pHba->state_lock);
-
- if(raptorFlag == 0){
- printk(KERN_INFO "Adaptec I2O RAID controller"
- " %d at %p size=%x irq=%d%s\n",
- hba_count-1, base_addr_virt,
- hba_map0_area_size, pDev->irq,
- dma64 ? " (64-bit DMA)" : "");
- } else {
- printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
- hba_count-1, pDev->irq,
- dma64 ? " (64-bit DMA)" : "");
- printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
- printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
- }
-
- if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
- printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
- adpt_i2o_delete_hba(pHba);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static void adpt_i2o_delete_hba(adpt_hba* pHba)
-{
- adpt_hba* p1;
- adpt_hba* p2;
- struct i2o_device* d;
- struct i2o_device* next;
- int i;
- int j;
- struct adpt_device* pDev;
- struct adpt_device* pNext;
-
-
- mutex_lock(&adpt_configuration_lock);
- if(pHba->host){
- free_irq(pHba->host->irq, pHba);
- }
- p2 = NULL;
- for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
- if(p1 == pHba) {
- if(p2) {
- p2->next = p1->next;
- } else {
- hba_chain = p1->next;
- }
- break;
- }
- }
-
- hba_count--;
- mutex_unlock(&adpt_configuration_lock);
-
- iounmap(pHba->base_addr_virt);
- pci_release_regions(pHba->pDev);
- if(pHba->msg_addr_virt != pHba->base_addr_virt){
- iounmap(pHba->msg_addr_virt);
- }
- if(pHba->FwDebugBuffer_P)
- iounmap(pHba->FwDebugBuffer_P);
- if(pHba->hrt) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
- pHba->hrt, pHba->hrt_pa);
- }
- if(pHba->lct) {
- dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
- pHba->lct, pHba->lct_pa);
- }
- if(pHba->status_block) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
- pHba->status_block, pHba->status_block_pa);
- }
- if(pHba->reply_pool) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- pHba->reply_pool, pHba->reply_pool_pa);
- }
-
- for(d = pHba->devices; d ; d = next){
- next = d->next;
- kfree(d);
- }
- for(i = 0 ; i < pHba->top_scsi_channel ; i++){
- for(j = 0; j < MAX_ID; j++){
- if(pHba->channel[i].device[j] != NULL){
- for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
- pNext = pDev->next_lun;
- kfree(pDev);
- }
- }
- }
- }
- pci_dev_put(pHba->pDev);
- if (adpt_sysfs_class)
- device_destroy(adpt_sysfs_class,
- MKDEV(DPTI_I2O_MAJOR, pHba->unit));
- kfree(pHba);
-
- if(hba_count <= 0){
- unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
- if (adpt_sysfs_class) {
- class_destroy(adpt_sysfs_class);
- adpt_sysfs_class = NULL;
- }
- }
-}
-
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
-{
- struct adpt_device* d;
-
- if (chan >= MAX_CHANNEL)
- return NULL;
-
- d = pHba->channel[chan].device[id];
- if(!d || d->tid == 0) {
- return NULL;
- }
-
- /* If it is the only lun at that address then this should match*/
- if(d->scsi_lun == lun){
- return d;
- }
-
- /* else we need to look through all the luns */
- for(d=d->next_lun ; d ; d = d->next_lun){
- if(d->scsi_lun == lun){
- return d;
- }
- }
- return NULL;
-}
-
-
-static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
-{
- // I used my own version of the WAIT_QUEUE_HEAD
- // to handle some version differences
- // When embedded in the kernel this could go back to the vanilla one
- ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
- int status = 0;
- ulong flags = 0;
- struct adpt_i2o_post_wait_data *p1, *p2;
- struct adpt_i2o_post_wait_data *wait_data =
- kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
- DECLARE_WAITQUEUE(wait, current);
-
- if (!wait_data)
- return -ENOMEM;
-
- /*
- * The spin locking is needed to keep anyone from playing
- * with the queue pointers and id while we do the same
- */
- spin_lock_irqsave(&adpt_post_wait_lock, flags);
- // TODO we need a MORE unique way of getting ids
- // to support async LCT get
- wait_data->next = adpt_post_wait_queue;
- adpt_post_wait_queue = wait_data;
- adpt_post_wait_id++;
- adpt_post_wait_id &= 0x7fff;
- wait_data->id = adpt_post_wait_id;
- spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
-
- wait_data->wq = &adpt_wq_i2o_post;
- wait_data->status = -ETIMEDOUT;
-
- add_wait_queue(&adpt_wq_i2o_post, &wait);
-
- msg[2] |= 0x80000000 | ((u32)wait_data->id);
- timeout *= HZ;
- if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
- set_current_state(TASK_INTERRUPTIBLE);
- if(pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (!timeout)
- schedule();
- else{
- timeout = schedule_timeout(timeout);
- if (timeout == 0) {
- // I/O issued, but cannot get result in
- // specified time. Freeing resorces is
- // dangerous.
- status = -ETIME;
- }
- }
- if(pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- }
- remove_wait_queue(&adpt_wq_i2o_post, &wait);
-
- if(status == -ETIMEDOUT){
- printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
- // We will have to free the wait_data memory during shutdown
- return status;
- }
-
- /* Remove the entry from the queue. */
- p2 = NULL;
- spin_lock_irqsave(&adpt_post_wait_lock, flags);
- for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
- if(p1 == wait_data) {
- if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
- status = -EOPNOTSUPP;
- }
- if(p2) {
- p2->next = p1->next;
- } else {
- adpt_post_wait_queue = p1->next;
- }
- break;
- }
- }
- spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
-
- kfree(wait_data);
-
- return status;
-}
-
-
-static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
-{
-
- u32 m = EMPTY_QUEUE;
- u32 __iomem *msg;
- ulong timeout = jiffies + 30*HZ;
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m == EMPTY_QUEUE);
-
- msg = pHba->msg_addr_virt + m;
- memcpy_toio(msg, data, len);
- wmb();
-
- //post message
- writel(m, pHba->post_port);
- wmb();
-
- return 0;
-}
-
-
-static void adpt_i2o_post_wait_complete(u32 context, int status)
-{
- struct adpt_i2o_post_wait_data *p1 = NULL;
- /*
- * We need to search through the adpt_post_wait
- * queue to see if the given message is still
- * outstanding. If not, it means that the IOP
- * took longer to respond to the message than we
- * had allowed and timer has already expired.
- * Not much we can do about that except log
- * it for debug purposes, increase timeout, and recompile
- *
- * Lock needed to keep anyone from moving queue pointers
- * around while we're looking through them.
- */
-
- context &= 0x7fff;
-
- spin_lock(&adpt_post_wait_lock);
- for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
- if(p1->id == context) {
- p1->status = status;
- spin_unlock(&adpt_post_wait_lock);
- wake_up_interruptible(p1->wq);
- return;
- }
- }
- spin_unlock(&adpt_post_wait_lock);
- // If this happens we lose commands that probably really completed
- printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
- printk(KERN_DEBUG" Tasks in wait queue:\n");
- for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
- printk(KERN_DEBUG" %d\n",p1->id);
- }
- return;
-}
-
-static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
-{
- u32 msg[8];
- u8* status;
- dma_addr_t addr;
- u32 m = EMPTY_QUEUE ;
- ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
-
- if(pHba->initialized == FALSE) { // First time reset should be quick
- timeout = jiffies + (25*HZ);
- } else {
- adpt_i2o_quiesce_hba(pHba);
- }
-
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"Timeout waiting for message!\n");
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (m == EMPTY_QUEUE);
-
- status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
- if(status == NULL) {
- adpt_send_nop(pHba, m);
- printk(KERN_ERR"IOP reset failed - no free memory.\n");
- return -ENOMEM;
- }
-
- msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2]=0;
- msg[3]=0;
- msg[4]=0;
- msg[5]=0;
- msg[6]=dma_low(addr);
- msg[7]=dma_high(addr);
-
- memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
- wmb();
- writel(m, pHba->post_port);
- wmb();
-
- while(*status == 0){
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we cannot
- free these because controller may awake and corrupt
- those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
- return -ETIMEDOUT;
- }
- rmb();
- schedule_timeout_uninterruptible(1);
- }
-
- if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
- PDEBUG("%s: Reset in progress...\n", pHba->name);
- // Here we wait for message frame to become available
- // indicated that reset has finished
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we
- cannot free these because controller may
- awake and corrupt those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (m == EMPTY_QUEUE);
- // Flush the offset
- adpt_send_nop(pHba, m);
- }
- adpt_i2o_status_get(pHba);
- if(*status == 0x02 ||
- pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
- printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
- pHba->name);
- } else {
- PDEBUG("%s: Reset completed.\n", pHba->name);
- }
-
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
-#ifdef UARTDELAY
- // This delay is to allow someone attached to the card through the debug UART to
- // set up the dump levels that they want before the rest of the initialization sequence
- adpt_delay(20000);
-#endif
- return 0;
-}
-
-
-static int adpt_i2o_parse_lct(adpt_hba* pHba)
-{
- int i;
- int max;
- int tid;
- struct i2o_device *d;
- i2o_lct *lct = pHba->lct;
- u8 bus_no = 0;
- s16 scsi_id;
- u64 scsi_lun;
- u32 buf[10]; // larger than 7, or 8 ...
- struct adpt_device* pDev;
-
- if (lct == NULL) {
- printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
- return -1;
- }
-
- max = lct->table_size;
- max -= 3;
- max /= 9;
-
- for(i=0;i<max;i++) {
- if( lct->lct_entry[i].user_tid != 0xfff){
- /*
- * If we have hidden devices, we need to inform the upper layers about
- * the possible maximum id reference to handle device access when
- * an array is disassembled. This code has no other purpose but to
- * allow us future access to devices that are currently hidden
- * behind arrays, hotspares or have not been configured (JBOD mode).
- */
- if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
- lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
- lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
- continue;
- }
- tid = lct->lct_entry[i].tid;
- // I2O_DPT_DEVICE_INFO_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
- continue;
- }
- bus_no = buf[0]>>16;
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
- continue;
- }
- if (scsi_id >= MAX_ID){
- printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
- continue;
- }
- if(bus_no > pHba->top_scsi_channel){
- pHba->top_scsi_channel = bus_no;
- }
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- continue;
- }
- d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
- if(d==NULL)
- {
- printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
- return -ENOMEM;
- }
-
- d->controller = pHba;
- d->next = NULL;
-
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
-
- d->flags = 0;
- tid = d->lct_data.tid;
- adpt_i2o_report_hba_unit(pHba, d);
- adpt_i2o_install_device(pHba, d);
- }
- bus_no = 0;
- for(d = pHba->devices; d ; d = d->next) {
- if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
- d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
- tid = d->lct_data.tid;
- // TODO get the bus_no from hrt-but for now they are in order
- //bus_no =
- if(bus_no > pHba->top_scsi_channel){
- pHba->top_scsi_channel = bus_no;
- }
- pHba->channel[bus_no].type = d->lct_data.class_id;
- pHba->channel[bus_no].tid = tid;
- if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
- {
- pHba->channel[bus_no].scsi_id = buf[1];
- PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
- }
- // TODO remove - this is just until we get from hrt
- bus_no++;
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
- break;
- }
- }
- }
-
- // Setup adpt_device table
- for(d = pHba->devices; d ; d = d->next) {
- if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
- d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
- d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
-
- tid = d->lct_data.tid;
- scsi_id = -1;
- // I2O_DPT_DEVICE_INFO_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
- bus_no = buf[0]>>16;
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- continue;
- }
- if (scsi_id >= MAX_ID) {
- continue;
- }
- if( pHba->channel[bus_no].device[scsi_id] == NULL){
- pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- pHba->channel[bus_no].device[scsi_id] = pDev;
- } else {
- for( pDev = pHba->channel[bus_no].device[scsi_id];
- pDev->next_lun; pDev = pDev->next_lun){
- }
- pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
- if(pDev->next_lun == NULL) {
- return -ENOMEM;
- }
- pDev = pDev->next_lun;
- }
- pDev->tid = tid;
- pDev->scsi_channel = bus_no;
- pDev->scsi_id = scsi_id;
- pDev->scsi_lun = scsi_lun;
- pDev->pI2o_dev = d;
- d->owner = pDev;
- pDev->type = (buf[0])&0xff;
- pDev->flags = (buf[0]>>8)&0xff;
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- }
- if(scsi_id == -1){
- printk(KERN_WARNING"Could not find SCSI ID for %s\n",
- d->lct_data.identity_tag);
- }
- }
- }
- return 0;
-}
-
-
-/*
- * Each I2O controller has a chain of devices on it - these match
- * the useful parts of the LCT of the board.
- */
-
-static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
-{
- mutex_lock(&adpt_configuration_lock);
- d->controller=pHba;
- d->owner=NULL;
- d->next=pHba->devices;
- d->prev=NULL;
- if (pHba->devices != NULL){
- pHba->devices->prev=d;
- }
- pHba->devices=d;
- *d->dev_name = 0;
-
- mutex_unlock(&adpt_configuration_lock);
- return 0;
-}
-
-static int adpt_open(struct inode *inode, struct file *file)
-{
- int minor;
- adpt_hba* pHba;
-
- mutex_lock(&adpt_mutex);
- //TODO check for root access
- //
- minor = iminor(inode);
- if (minor >= hba_count) {
- mutex_unlock(&adpt_mutex);
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- if (pHba == NULL) {
- mutex_unlock(&adpt_configuration_lock);
- mutex_unlock(&adpt_mutex);
- return -ENXIO;
- }
-
-// if(pHba->in_use){
- // mutex_unlock(&adpt_configuration_lock);
-// return -EBUSY;
-// }
-
- pHba->in_use = 1;
- mutex_unlock(&adpt_configuration_lock);
- mutex_unlock(&adpt_mutex);
-
- return 0;
-}
-
-static int adpt_close(struct inode *inode, struct file *file)
-{
- int minor;
- adpt_hba* pHba;
-
- minor = iminor(inode);
- if (minor >= hba_count) {
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if (pHba == NULL) {
- return -ENXIO;
- }
-
- pHba->in_use = 0;
-
- return 0;
-}
-
-
-static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
-{
- u32 msg[MAX_MESSAGE_SIZE];
- u32* reply = NULL;
- u32 size = 0;
- u32 reply_size = 0;
- u32 __user *user_msg = arg;
- u32 __user * user_reply = NULL;
- void **sg_list = NULL;
- u32 sg_offset = 0;
- u32 sg_count = 0;
- int sg_index = 0;
- u32 i = 0;
- u32 rcode = 0;
- void *p = NULL;
- dma_addr_t addr;
- ulong flags = 0;
-
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- return -EFAULT;
- }
- size = size>>16;
-
- user_reply = &user_msg[size];
- if(size > MAX_MESSAGE_SIZE){
- return -EFAULT;
- }
- size *= 4; // Convert to bytes
-
- /* Copy in the user's I2O command */
- if(copy_from_user(msg, user_msg, size)) {
- return -EFAULT;
- }
- get_user(reply_size, &user_reply[0]);
- reply_size = reply_size>>16;
- if(reply_size > REPLY_FRAME_SIZE){
- reply_size = REPLY_FRAME_SIZE;
- }
- reply_size *= 4;
- reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
- if(reply == NULL) {
- printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
- return -ENOMEM;
- }
- sg_offset = (msg[0]>>4)&0xf;
- msg[2] = 0x40000000; // IOCTL context
- msg[3] = adpt_ioctl_to_context(pHba, reply);
- if (msg[3] == (u32)-1) {
- rcode = -EBUSY;
- goto free;
- }
-
- sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
- if (!sg_list) {
- rcode = -ENOMEM;
- goto free;
- }
- if(sg_offset) {
- // TODO add 64 bit API
- struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
- if (sg_count > pHba->sg_tablesize){
- printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
- rcode = -EINVAL;
- goto free;
- }
-
- for(i = 0; i < sg_count; i++) {
- int sg_size;
-
- if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
- printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
- rcode = -EINVAL;
- goto cleanup;
- }
- sg_size = sg[i].flag_count & 0xffffff;
- /* Allocate memory for the transfer */
- p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
- if(!p) {
- printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- pHba->name,sg_size,i,sg_count);
- rcode = -ENOMEM;
- goto cleanup;
- }
- sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
- /* Copy in the user's SG buffer if necessary */
- if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
- // sg_simple_element API is 32 bit
- if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
- printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- /* sg_simple_element API is 32 bit, but addr < 4GB */
- sg[i].addr_bus = addr;
- }
- }
-
- do {
- /*
- * Stop any new commands from enterring the
- * controller while processing the ioctl
- */
- if (pHba->host) {
- scsi_block_requests(pHba->host);
- spin_lock_irqsave(pHba->host->host_lock, flags);
- }
- rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
- if (rcode != 0)
- printk("adpt_i2o_passthru: post wait failed %d %p\n",
- rcode, reply);
- if (pHba->host) {
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- scsi_unblock_requests(pHba->host);
- }
- } while (rcode == -ETIMEDOUT);
-
- if(rcode){
- goto cleanup;
- }
-
- if(sg_offset) {
- /* Copy back the Scatter Gather buffers back to user space */
- u32 j;
- // TODO add 64 bit API
- struct sg_simple_element* sg;
- int sg_size;
-
- // re-acquire the original message to handle correctly the sg copy operation
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- rcode = -EFAULT;
- goto cleanup;
- }
- size = size>>16;
- size *= 4;
- if (size > MAX_MESSAGE_SIZE) {
- rcode = -EINVAL;
- goto cleanup;
- }
- /* Copy in the user's I2O command */
- if (copy_from_user (msg, user_msg, size)) {
- rcode = -EFAULT;
- goto cleanup;
- }
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
-
- // TODO add 64 bit API
- sg = (struct sg_simple_element*)(msg + sg_offset);
- for (j = 0; j < sg_count; j++) {
- /* Copy out the SG list to user's buffer if necessary */
- if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
- sg_size = sg[j].flag_count & 0xffffff;
- // sg_simple_element API is 32 bit
- if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
- printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- }
- }
-
- /* Copy back the reply to user space */
- if (reply_size) {
- // we wrote our own values for context - now restore the user supplied ones
- if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
- printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
- rcode = -EFAULT;
- }
- if(copy_to_user(user_reply, reply, reply_size)) {
- printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
- rcode = -EFAULT;
- }
- }
-
-
-cleanup:
- if (rcode != -ETIME && rcode != -EINTR) {
- struct sg_simple_element *sg =
- (struct sg_simple_element*) (msg +sg_offset);
- while(sg_index) {
- if(sg_list[--sg_index]) {
- dma_free_coherent(&pHba->pDev->dev,
- sg[sg_index].flag_count & 0xffffff,
- sg_list[sg_index],
- sg[sg_index].addr_bus);
- }
- }
- }
-
-free:
- kfree(sg_list);
- kfree(reply);
- return rcode;
-}
-
-#if defined __ia64__
-static void adpt_ia64_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_IA64;
-}
-#endif
-
-#if defined __sparc__
-static void adpt_sparc_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_ULTRASPARC;
-}
-#endif
-#if defined __alpha__
-static void adpt_alpha_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_ALPHA;
-}
-#endif
-
-#if defined __i386__
-
-#include <uapi/asm/vm86.h>
-
-static void adpt_i386_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- switch (boot_cpu_data.x86) {
- case CPU_386:
- si->processorType = PROC_386;
- break;
- case CPU_486:
- si->processorType = PROC_486;
- break;
- case CPU_586:
- si->processorType = PROC_PENTIUM;
- break;
- default: // Just in case
- si->processorType = PROC_PENTIUM;
- break;
- }
-}
-#endif
-
-/*
- * This routine returns information about the system. This does not effect
- * any logic and if the info is wrong - it doesn't matter.
- */
-
-/* Get all the info we can not get from kernel services */
-static int adpt_system_info(void __user *buffer)
-{
- sysInfo_S si;
-
- memset(&si, 0, sizeof(si));
-
- si.osType = OS_LINUX;
- si.osMajorVersion = 0;
- si.osMinorVersion = 0;
- si.osRevision = 0;
- si.busType = SI_PCI_BUS;
- si.processorFamily = DPTI_sig.dsProcessorFamily;
-
-#if defined __i386__
- adpt_i386_info(&si);
-#elif defined (__ia64__)
- adpt_ia64_info(&si);
-#elif defined(__sparc__)
- adpt_sparc_info(&si);
-#elif defined (__alpha__)
- adpt_alpha_info(&si);
-#else
- si.processorType = 0xff ;
-#endif
- if (copy_to_user(buffer, &si, sizeof(si))){
- printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
-{
- int minor;
- int error = 0;
- adpt_hba* pHba;
- ulong flags = 0;
- void __user *argp = (void __user *)arg;
-
- minor = iminor(inode);
- if (minor >= DPTI_MAX_HBA){
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if(pHba == NULL){
- return -ENXIO;
- }
-
- while((volatile u32) pHba->state & DPTI_STATE_RESET )
- schedule_timeout_uninterruptible(2);
-
- switch (cmd) {
- // TODO: handle 3 cases
- case DPT_SIGNATURE:
- if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
- return -EFAULT;
- }
- break;
- case I2OUSRCMD:
- return adpt_i2o_passthru(pHba, argp);
-
- case DPT_CTRLINFO:{
- drvrHBAinfo_S HbaInfo;
-
-#define FLG_OSD_PCI_VALID 0x0001
-#define FLG_OSD_DMA 0x0002
-#define FLG_OSD_I2O 0x0004
- memset(&HbaInfo, 0, sizeof(HbaInfo));
- HbaInfo.drvrHBAnum = pHba->unit;
- HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
- HbaInfo.blinkState = adpt_read_blink_led(pHba);
- HbaInfo.pciBusNum = pHba->pDev->bus->number;
- HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
- HbaInfo.Interrupt = pHba->pDev->irq;
- HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
- if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
- printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
- return -EFAULT;
- }
- break;
- }
- case DPT_SYSINFO:
- return adpt_system_info(argp);
- case DPT_BLINKLED:{
- u32 value;
- value = (u32)adpt_read_blink_led(pHba);
- if (copy_to_user(argp, &value, sizeof(value))) {
- return -EFAULT;
- }
- break;
- }
- case I2ORESETCMD: {
- struct Scsi_Host *shost = pHba->host;
-
- if (shost)
- spin_lock_irqsave(shost->host_lock, flags);
- adpt_hba_reset(pHba);
- if (shost)
- spin_unlock_irqrestore(shost->host_lock, flags);
- break;
- }
- case I2ORESCANCMD:
- adpt_rescan(pHba);
- break;
- default:
- return -EINVAL;
- }
-
- return error;
-}
-
-static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
-{
- struct inode *inode;
- long ret;
-
- inode = file_inode(file);
-
- mutex_lock(&adpt_mutex);
- ret = adpt_ioctl(inode, file, cmd, arg);
- mutex_unlock(&adpt_mutex);
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long compat_adpt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct inode *inode;
- long ret;
-
- inode = file_inode(file);
-
- mutex_lock(&adpt_mutex);
-
- switch(cmd) {
- case DPT_SIGNATURE:
- case I2OUSRCMD:
- case DPT_CTRLINFO:
- case DPT_SYSINFO:
- case DPT_BLINKLED:
- case I2ORESETCMD:
- case I2ORESCANCMD:
- case (DPT_TARGET_BUSY & 0xFFFF):
- case DPT_TARGET_BUSY:
- ret = adpt_ioctl(inode, file, cmd, arg);
- break;
- default:
- ret = -ENOIOCTLCMD;
- }
-
- mutex_unlock(&adpt_mutex);
-
- return ret;
-}
-#endif
-
-static irqreturn_t adpt_isr(int irq, void *dev_id)
-{
- struct scsi_cmnd* cmd;
- adpt_hba* pHba = dev_id;
- u32 m;
- void __iomem *reply;
- u32 status=0;
- u32 context;
- ulong flags = 0;
- int handled = 0;
-
- if (pHba == NULL){
- printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
- return IRQ_NONE;
- }
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
-
- while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
- m = readl(pHba->reply_port);
- if(m == EMPTY_QUEUE){
- // Try twice then give up
- rmb();
- m = readl(pHba->reply_port);
- if(m == EMPTY_QUEUE){
- // This really should not happen
- printk(KERN_ERR"dpti: Could not get reply frame\n");
- goto out;
- }
- }
- if (pHba->reply_pool_pa <= m &&
- m < pHba->reply_pool_pa +
- (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
- reply = (u8 *)pHba->reply_pool +
- (m - pHba->reply_pool_pa);
- } else {
- /* Ick, we should *never* be here */
- printk(KERN_ERR "dpti: reply frame not from pool\n");
- reply = (u8 *)bus_to_virt(m);
- }
-
- if (readl(reply) & MSG_FAIL) {
- u32 old_m = readl(reply+28);
- void __iomem *msg;
- u32 old_context;
- PDEBUG("%s: Failed message\n",pHba->name);
- if(old_m >= 0x100000){
- printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
- writel(m,pHba->reply_port);
- continue;
- }
- // Transaction context is 0 in failed reply frame
- msg = pHba->msg_addr_virt + old_m;
- old_context = readl(msg+12);
- writel(old_context, reply+12);
- adpt_send_nop(pHba, old_m);
- }
- context = readl(reply+8);
- if(context & 0x40000000){ // IOCTL
- void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
- if( p != NULL) {
- memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
- }
- // All IOCTLs will also be post wait
- }
- if(context & 0x80000000){ // Post wait message
- status = readl(reply+16);
- if(status >> 24){
- status &= 0xffff; /* Get detail status */
- } else {
- status = I2O_POST_WAIT_OK;
- }
- if(!(context & 0x40000000)) {
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL) {
- printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
- }
- }
- adpt_i2o_post_wait_complete(context, status);
- } else { // SCSI message
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL){
- scsi_dma_unmap(cmd);
- adpt_i2o_scsi_complete(reply, cmd);
- }
- }
- writel(m, pHba->reply_port);
- wmb();
- rmb();
- }
- handled = 1;
-out: if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- return IRQ_RETVAL(handled);
-}
-
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
-{
- int i;
- u32 msg[MAX_MESSAGE_SIZE];
- u32* mptr;
- u32* lptr;
- u32 *lenptr;
- int direction;
- int scsidir;
- int nseg;
- u32 len;
- u32 reqlen;
- s32 rcode;
- dma_addr_t addr;
-
- memset(msg, 0 , sizeof(msg));
- len = scsi_bufflen(cmd);
- direction = 0x00000000;
-
- scsidir = 0x00000000; // DATA NO XFER
- if(len) {
- /*
- * Set SCBFlags to indicate if data is being transferred
- * in or out, or no data transfer
- * Note: Do not have to verify index is less than 0 since
- * cmd->cmnd[0] is an unsigned char
- */
- switch(cmd->sc_data_direction){
- case DMA_FROM_DEVICE:
- scsidir =0x40000000; // DATA IN (iop<--dev)
- break;
- case DMA_TO_DEVICE:
- direction=0x04000000; // SGL OUT
- scsidir =0x80000000; // DATA OUT (iop-->dev)
- break;
- case DMA_NONE:
- break;
- case DMA_BIDIRECTIONAL:
- scsidir =0x40000000; // DATA IN (iop<--dev)
- // Assume In - and continue;
- break;
- default:
- printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
- pHba->name, cmd->cmnd[0]);
- cmd->result = (DID_ERROR <<16);
- scsi_done(cmd);
- return 0;
- }
- }
- // msg[0] is set later
- // I2O_CMD_SCSI_EXEC
- msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
- msg[2] = 0;
- /* Add 1 to avoid firmware treating it as invalid command */
- msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
- // Our cards use the transaction context as the tag for queueing
- // Adaptec/DPT Private stuff
- msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
- msg[5] = d->tid;
- /* Direction, disconnect ok | sense data | simple queue , CDBLen */
- // I2O_SCB_FLAG_ENABLE_DISCONNECT |
- // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
- // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
- msg[6] = scsidir|0x20a00000|cmd->cmd_len;
-
- mptr=msg+7;
-
- // Write SCSI command into the message - always 16 byte block
- memset(mptr, 0, 16);
- memcpy(mptr, cmd->cmnd, cmd->cmd_len);
- mptr+=4;
- lenptr=mptr++; /* Remember me - fill in when we know */
- if (dpt_dma64(pHba)) {
- reqlen = 16; // SINGLE SGE
- *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
- *mptr++ = 1 << PAGE_SHIFT;
- } else {
- reqlen = 14; // SINGLE SGE
- }
- /* Now fill in the SGList and command */
-
- nseg = scsi_dma_map(cmd);
- BUG_ON(nseg < 0);
- if (nseg) {
- struct scatterlist *sg;
-
- len = 0;
- scsi_for_each_sg(cmd, sg, nseg, i) {
- lptr = mptr;
- *mptr++ = direction|0x10000000|sg_dma_len(sg);
- len+=sg_dma_len(sg);
- addr = sg_dma_address(sg);
- *mptr++ = dma_low(addr);
- if (dpt_dma64(pHba))
- *mptr++ = dma_high(addr);
- /* Make this an end of list */
- if (i == nseg - 1)
- *lptr = direction|0xD0000000|sg_dma_len(sg);
- }
- reqlen = mptr - msg;
- *lenptr = len;
-
- if(cmd->underflow && len != cmd->underflow){
- printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
- len, cmd->underflow);
- }
- } else {
- *lenptr = len = 0;
- reqlen = 12;
- }
-
- /* Stick the headers on */
- msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
-
- // Send it on it's way
- rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
- if (rcode == 0) {
- return 0;
- }
- return rcode;
-}
-
-
-static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
-{
- struct Scsi_Host *host;
-
- host = scsi_host_alloc(sht, sizeof(adpt_hba*));
- if (host == NULL) {
- printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
- return -1;
- }
- host->hostdata[0] = (unsigned long)pHba;
- pHba->host = host;
-
- host->irq = pHba->pDev->irq;
- /* no IO ports, so don't have to set host->io_port and
- * host->n_io_port
- */
- host->io_port = 0;
- host->n_io_port = 0;
- /* see comments in scsi_host.h */
- host->max_id = 16;
- host->max_lun = 256;
- host->max_channel = pHba->top_scsi_channel + 1;
- host->cmd_per_lun = 1;
- host->unique_id = (u32)sys_tbl_pa + pHba->unit;
- host->sg_tablesize = pHba->sg_tablesize;
- host->can_queue = pHba->post_fifo_size;
-
- return 0;
-}
-
-
-static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
-{
- adpt_hba* pHba;
- u32 hba_status;
- u32 dev_status;
- u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
- // I know this would look cleaner if I just read bytes
- // but the model I have been using for all the rest of the
- // io is in 4 byte words - so I keep that model
- u16 detailed_status = readl(reply+16) &0xffff;
- dev_status = (detailed_status & 0xff);
- hba_status = detailed_status >> 8;
-
- // calculate resid for sg
- scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
-
- pHba = (adpt_hba*) cmd->device->host->hostdata[0];
-
- cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
-
- if(!(reply_flags & MSG_FAIL)) {
- switch(detailed_status & I2O_SCSI_DSC_MASK) {
- case I2O_SCSI_DSC_SUCCESS:
- cmd->result = (DID_OK << 16);
- // handle underflow
- if (readl(reply+20) < cmd->underflow) {
- cmd->result = (DID_ERROR <<16);
- printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
- }
- break;
- case I2O_SCSI_DSC_REQUEST_ABORTED:
- cmd->result = (DID_ABORT << 16);
- break;
- case I2O_SCSI_DSC_PATH_INVALID:
- case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
- case I2O_SCSI_DSC_SELECTION_TIMEOUT:
- case I2O_SCSI_DSC_COMMAND_TIMEOUT:
- case I2O_SCSI_DSC_NO_ADAPTER:
- case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
- printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
- cmd->result = (DID_TIME_OUT << 16);
- break;
- case I2O_SCSI_DSC_ADAPTER_BUSY:
- case I2O_SCSI_DSC_BUS_BUSY:
- cmd->result = (DID_BUS_BUSY << 16);
- break;
- case I2O_SCSI_DSC_SCSI_BUS_RESET:
- case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
- cmd->result = (DID_RESET << 16);
- break;
- case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
- printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
- cmd->result = (DID_PARITY << 16);
- break;
- case I2O_SCSI_DSC_UNABLE_TO_ABORT:
- case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
- case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
- case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
- case I2O_SCSI_DSC_AUTOSENSE_FAILED:
- case I2O_SCSI_DSC_DATA_OVERRUN:
- case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
- case I2O_SCSI_DSC_SEQUENCE_FAILURE:
- case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
- case I2O_SCSI_DSC_PROVIDE_FAILURE:
- case I2O_SCSI_DSC_REQUEST_TERMINATED:
- case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
- case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
- case I2O_SCSI_DSC_MESSAGE_RECEIVED:
- case I2O_SCSI_DSC_INVALID_CDB:
- case I2O_SCSI_DSC_LUN_INVALID:
- case I2O_SCSI_DSC_SCSI_TID_INVALID:
- case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
- case I2O_SCSI_DSC_NO_NEXUS:
- case I2O_SCSI_DSC_CDB_RECEIVED:
- case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
- case I2O_SCSI_DSC_QUEUE_FROZEN:
- case I2O_SCSI_DSC_REQUEST_INVALID:
- default:
- printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- hba_status, dev_status, cmd->cmnd[0]);
- cmd->result = (DID_ERROR << 16);
- break;
- }
-
- // copy over the request sense data if it was a check
- // condition status
- if (dev_status == SAM_STAT_CHECK_CONDITION) {
- u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
- // Copy over the sense data
- memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
- if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
- cmd->sense_buffer[2] == DATA_PROTECT ){
- /* This is to handle an array failed */
- cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- hba_status, dev_status, cmd->cmnd[0]);
-
- }
- }
- } else {
- /* In this condtion we could not talk to the tid
- * the card rejected it. We should signal a retry
- * for a limitted number of retries.
- */
- cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
- }
-
- cmd->result |= (dev_status);
-
- scsi_done(cmd);
-}
-
-
-static s32 adpt_rescan(adpt_hba* pHba)
-{
- s32 rcode;
- ulong flags = 0;
-
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
- if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
- goto out;
- if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
- goto out;
- rcode = 0;
-out: if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- return rcode;
-}
-
-
-static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
-{
- int i;
- int max;
- int tid;
- struct i2o_device *d;
- i2o_lct *lct = pHba->lct;
- u8 bus_no = 0;
- s16 scsi_id;
- u64 scsi_lun;
- u32 buf[10]; // at least 8 u32's
- struct adpt_device* pDev = NULL;
- struct i2o_device* pI2o_dev = NULL;
-
- if (lct == NULL) {
- printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
- return -1;
- }
-
- max = lct->table_size;
- max -= 3;
- max /= 9;
-
- // Mark each drive as unscanned
- for (d = pHba->devices; d; d = d->next) {
- pDev =(struct adpt_device*) d->owner;
- if(!pDev){
- continue;
- }
- pDev->state |= DPTI_DEV_UNSCANNED;
- }
-
- printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
-
- for(i=0;i<max;i++) {
- if( lct->lct_entry[i].user_tid != 0xfff){
- continue;
- }
-
- if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
- lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
- lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
- tid = lct->lct_entry[i].tid;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
- printk(KERN_ERR"%s: Could not query device\n",pHba->name);
- continue;
- }
- bus_no = buf[0]>>16;
- if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
- printk(KERN_WARNING
- "%s: Channel number %d out of range\n",
- pHba->name, bus_no);
- continue;
- }
-
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- pDev = pHba->channel[bus_no].device[scsi_id];
- /* da lun */
- while(pDev) {
- if(pDev->scsi_lun == scsi_lun) {
- break;
- }
- pDev = pDev->next_lun;
- }
- if(!pDev ) { // Something new add it
- d = kmalloc(sizeof(struct i2o_device),
- GFP_ATOMIC);
- if(d==NULL)
- {
- printk(KERN_CRIT "Out of memory for I2O device data.\n");
- return -ENOMEM;
- }
-
- d->controller = pHba;
- d->next = NULL;
-
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
-
- d->flags = 0;
- adpt_i2o_report_hba_unit(pHba, d);
- adpt_i2o_install_device(pHba, d);
-
- pDev = pHba->channel[bus_no].device[scsi_id];
- if( pDev == NULL){
- pDev =
- kzalloc(sizeof(struct adpt_device),
- GFP_ATOMIC);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- pHba->channel[bus_no].device[scsi_id] = pDev;
- } else {
- while (pDev->next_lun) {
- pDev = pDev->next_lun;
- }
- pDev = pDev->next_lun =
- kzalloc(sizeof(struct adpt_device),
- GFP_ATOMIC);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- }
- pDev->tid = d->lct_data.tid;
- pDev->scsi_channel = bus_no;
- pDev->scsi_id = scsi_id;
- pDev->scsi_lun = scsi_lun;
- pDev->pI2o_dev = d;
- d->owner = pDev;
- pDev->type = (buf[0])&0xff;
- pDev->flags = (buf[0]>>8)&0xff;
- // Too late, SCSI system has made up it's mind, but what the hey ...
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- continue;
- } // end of new i2o device
-
- // We found an old device - check it
- while(pDev) {
- if(pDev->scsi_lun == scsi_lun) {
- if(!scsi_device_online(pDev->pScsi_dev)) {
- printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
- pHba->name,bus_no,scsi_id,scsi_lun);
- if (pDev->pScsi_dev) {
- scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
- }
- }
- d = pDev->pI2o_dev;
- if(d->lct_data.tid != tid) { // something changed
- pDev->tid = tid;
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
- if (pDev->pScsi_dev) {
- pDev->pScsi_dev->changed = TRUE;
- pDev->pScsi_dev->removable = TRUE;
- }
- }
- // Found it - mark it scanned
- pDev->state = DPTI_DEV_ONLINE;
- break;
- }
- pDev = pDev->next_lun;
- }
- }
- }
- for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
- pDev =(struct adpt_device*) pI2o_dev->owner;
- if(!pDev){
- continue;
- }
- // Drive offline drives that previously existed but could not be found
- // in the LCT table
- if (pDev->state & DPTI_DEV_UNSCANNED){
- pDev->state = DPTI_DEV_OFFLINE;
- printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
- if (pDev->pScsi_dev) {
- scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
- }
- }
- }
- return 0;
-}
-
-/*============================================================================
- * Routines from i2o subsystem
- *============================================================================
- */
-
-
-
-/*
- * Bring an I2O controller into HOLD state. See the spec.
- */
-static int adpt_i2o_activate_hba(adpt_hba* pHba)
-{
- int rcode;
-
- if(pHba->initialized ) {
- if (adpt_i2o_status_get(pHba) < 0) {
- if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
- printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
- return rcode;
- }
- if (adpt_i2o_status_get(pHba) < 0) {
- printk(KERN_INFO "HBA not responding.\n");
- return -1;
- }
- }
-
- if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
- printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
- return -1;
- }
-
- if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
- pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
- pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
- pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
- adpt_i2o_reset_hba(pHba);
- if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
- printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
- return -1;
- }
- }
- } else {
- if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
- printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
- return rcode;
- }
-
- }
-
- if (adpt_i2o_init_outbound_q(pHba) < 0) {
- return -1;
- }
-
- /* In HOLD state */
-
- if (adpt_i2o_hrt_get(pHba) < 0) {
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Bring a controller online into OPERATIONAL state.
- */
-
-static int adpt_i2o_online_hba(adpt_hba* pHba)
-{
- if (adpt_i2o_systab_send(pHba) < 0)
- return -1;
- /* In READY state */
-
- if (adpt_i2o_enable_hba(pHba) < 0)
- return -1;
-
- /* In OPERATIONAL state */
- return 0;
-}
-
-static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
-{
- u32 __iomem *msg;
- ulong timeout = jiffies + 5*HZ;
-
- while(m == EMPTY_QUEUE){
- rmb();
- m = readl(pHba->post_port);
- if(m != EMPTY_QUEUE){
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
- return 2;
- }
- schedule_timeout_uninterruptible(1);
- }
- msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
- writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
- writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
- writel( 0,&msg[2]);
- wmb();
-
- writel(m, pHba->post_port);
- wmb();
- return 0;
-}
-
-static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
-{
- u8 *status;
- dma_addr_t addr;
- u32 __iomem *msg = NULL;
- int i;
- ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
- u32 m;
-
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
-
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m == EMPTY_QUEUE);
-
- msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
-
- status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
- if (!status) {
- adpt_send_nop(pHba, m);
- printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
- pHba->name);
- return -ENOMEM;
- }
-
- writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
- writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
- writel(0, &msg[2]);
- writel(0x0106, &msg[3]); /* Transaction context */
- writel(4096, &msg[4]); /* Host page frame size */
- writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
- writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
- writel((u32)addr, &msg[7]);
-
- writel(m, pHba->post_port);
- wmb();
-
- // Wait for the reply status to come back
- do {
- if (*status) {
- if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
- break;
- }
- }
- rmb();
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we
- cannot free these because controller may
- awake and corrupt those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (1);
-
- // If the command was successful, fill the fifo with our reply
- // message packets
- if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
- return -2;
- }
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
-
- if(pHba->reply_pool != NULL) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- pHba->reply_pool, pHba->reply_pool_pa);
- }
-
- pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- &pHba->reply_pool_pa, GFP_KERNEL);
- if (!pHba->reply_pool) {
- printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
- return -ENOMEM;
- }
-
- for(i = 0; i < pHba->reply_fifo_size; i++) {
- writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
- pHba->reply_port);
- wmb();
- }
- adpt_i2o_status_get(pHba);
- return 0;
-}
-
-
-/*
- * I2O System Table. Contains information about
- * all the IOPs in the system. Used to inform IOPs
- * about each other's existence.
- *
- * sys_tbl_ver is the CurrentChangeIndicator that is
- * used by IOPs to track changes.
- */
-
-
-
-static s32 adpt_i2o_status_get(adpt_hba* pHba)
-{
- ulong timeout;
- u32 m;
- u32 __iomem *msg;
- u8 *status_block=NULL;
-
- if(pHba->status_block == NULL) {
- pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(i2o_status_block),
- &pHba->status_block_pa, GFP_KERNEL);
- if(pHba->status_block == NULL) {
- printk(KERN_ERR
- "dpti%d: Get Status Block failed; Out of memory. \n",
- pHba->unit);
- return -ENOMEM;
- }
- }
- memset(pHba->status_block, 0, sizeof(i2o_status_block));
- status_block = (u8*)(pHba->status_block);
- timeout = jiffies+TMOUT_GETSTATUS*HZ;
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s: Timeout waiting for message !\n",
- pHba->name);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m==EMPTY_QUEUE);
-
-
- msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
-
- writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
- writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
- writel(1, &msg[2]);
- writel(0, &msg[3]);
- writel(0, &msg[4]);
- writel(0, &msg[5]);
- writel( dma_low(pHba->status_block_pa), &msg[6]);
- writel( dma_high(pHba->status_block_pa), &msg[7]);
- writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
-
- //post message
- writel(m, pHba->post_port);
- wmb();
-
- while(status_block[87]!=0xff){
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR"dpti%d: Get status timeout.\n",
- pHba->unit);
- return -ETIMEDOUT;
- }
- rmb();
- schedule_timeout_uninterruptible(1);
- }
-
- // Set up our number of outbound and inbound messages
- pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
- if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
- pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
- }
-
- pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
- if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
- pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
- }
-
- // Calculate the Scatter Gather list size
- if (dpt_dma64(pHba)) {
- pHba->sg_tablesize
- = ((pHba->status_block->inbound_frame_size * 4
- - 14 * sizeof(u32))
- / (sizeof(struct sg_simple_element) + sizeof(u32)));
- } else {
- pHba->sg_tablesize
- = ((pHba->status_block->inbound_frame_size * 4
- - 12 * sizeof(u32))
- / sizeof(struct sg_simple_element));
- }
- if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
- pHba->sg_tablesize = SG_LIST_ELEMENTS;
- }
-
-
-#ifdef DEBUG
- printk("dpti%d: State = ",pHba->unit);
- switch(pHba->status_block->iop_state) {
- case 0x01:
- printk("INIT\n");
- break;
- case 0x02:
- printk("RESET\n");
- break;
- case 0x04:
- printk("HOLD\n");
- break;
- case 0x05:
- printk("READY\n");
- break;
- case 0x08:
- printk("OPERATIONAL\n");
- break;
- case 0x10:
- printk("FAILED\n");
- break;
- case 0x11:
- printk("FAULTED\n");
- break;
- default:
- printk("%x (unknown!!)\n",pHba->status_block->iop_state);
- }
-#endif
- return 0;
-}
-
-/*
- * Get the IOP's Logical Configuration Table
- */
-static int adpt_i2o_lct_get(adpt_hba* pHba)
-{
- u32 msg[8];
- int ret;
- u32 buf[16];
-
- if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
- pHba->lct_size = pHba->status_block->expected_lct_size;
- }
- do {
- if (pHba->lct == NULL) {
- pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
- pHba->lct_size, &pHba->lct_pa,
- GFP_ATOMIC);
- if(pHba->lct == NULL) {
- printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- }
- memset(pHba->lct, 0, pHba->lct_size);
-
- msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
- msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = 0xFFFFFFFF; /* All devices */
- msg[5] = 0x00000000; /* Report now */
- msg[6] = 0xD0000000|pHba->lct_size;
- msg[7] = (u32)pHba->lct_pa;
-
- if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
- printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
- pHba->name, ret);
- printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
- return ret;
- }
-
- if ((pHba->lct->table_size << 2) > pHba->lct_size) {
- pHba->lct_size = pHba->lct->table_size << 2;
- dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
- pHba->lct, pHba->lct_pa);
- pHba->lct = NULL;
- }
- } while (pHba->lct == NULL);
-
- PDEBUG("%s: Hardware resource table read.\n", pHba->name);
-
-
- // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
- pHba->FwDebugBufferSize = buf[1];
- pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
- pHba->FwDebugBufferSize);
- if (pHba->FwDebugBuffer_P) {
- pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_FLAGS_OFFSET;
- pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_BLED_OFFSET;
- pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
- pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_STR_LENGTH_OFFSET;
- pHba->FwDebugBuffer_P += buf[2];
- pHba->FwDebugFlags = 0;
- }
- }
-
- return 0;
-}
-
-static int adpt_i2o_build_sys_table(void)
-{
- adpt_hba* pHba = hba_chain;
- int count = 0;
-
- if (sys_tbl)
- dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
- sys_tbl, sys_tbl_pa);
-
- sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
- (hba_count) * sizeof(struct i2o_sys_tbl_entry);
-
- sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
- sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
- if (!sys_tbl) {
- printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
- return -ENOMEM;
- }
-
- sys_tbl->num_entries = hba_count;
- sys_tbl->version = I2OVERSION;
- sys_tbl->change_ind = sys_tbl_ind++;
-
- for(pHba = hba_chain; pHba; pHba = pHba->next) {
- u64 addr;
- // Get updated Status Block so we have the latest information
- if (adpt_i2o_status_get(pHba)) {
- sys_tbl->num_entries--;
- continue; // try next one
- }
-
- sys_tbl->iops[count].org_id = pHba->status_block->org_id;
- sys_tbl->iops[count].iop_id = pHba->unit + 2;
- sys_tbl->iops[count].seg_num = 0;
- sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
- sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
- sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
- sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
- sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
- sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
- addr = pHba->base_addr_phys + 0x40;
- sys_tbl->iops[count].inbound_low = dma_low(addr);
- sys_tbl->iops[count].inbound_high = dma_high(addr);
-
- count++;
- }
-
-#ifdef DEBUG
-{
- u32 *table = (u32*)sys_tbl;
- printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
- for(count = 0; count < (sys_tbl_len >>2); count++) {
- printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
- count, table[count]);
- }
-}
-#endif
-
- return 0;
-}
-
-
-/*
- * Dump the information block associated with a given unit (TID)
- */
-
-static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
-{
- char buf[64];
- int unit = d->lct_data.tid;
-
- printk(KERN_INFO "TID %3.3d ", unit);
-
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
- {
- buf[16]=0;
- printk(" Vendor: %-12.12s", buf);
- }
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
- {
- buf[16]=0;
- printk(" Device: %-12.12s", buf);
- }
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
- {
- buf[8]=0;
- printk(" Rev: %-12.12s\n", buf);
- }
-#ifdef DEBUG
- printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
- printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
- printk(KERN_INFO "\tFlags: ");
-
- if(d->lct_data.device_flags&(1<<0))
- printk("C"); // ConfigDialog requested
- if(d->lct_data.device_flags&(1<<1))
- printk("U"); // Multi-user capable
- if(!(d->lct_data.device_flags&(1<<4)))
- printk("P"); // Peer service enabled!
- if(!(d->lct_data.device_flags&(1<<5)))
- printk("M"); // Mgmt service enabled!
- printk("\n");
-#endif
-}
-
-#ifdef DEBUG
-/*
- * Do i2o class name lookup
- */
-static const char *adpt_i2o_get_class_name(int class)
-{
- int idx = 16;
- static char *i2o_class_name[] = {
- "Executive",
- "Device Driver Module",
- "Block Device",
- "Tape Device",
- "LAN Interface",
- "WAN Interface",
- "Fibre Channel Port",
- "Fibre Channel Device",
- "SCSI Device",
- "ATE Port",
- "ATE Device",
- "Floppy Controller",
- "Floppy Device",
- "Secondary Bus Port",
- "Peer Transport Agent",
- "Peer Transport",
- "Unknown"
- };
-
- switch(class&0xFFF) {
- case I2O_CLASS_EXECUTIVE:
- idx = 0; break;
- case I2O_CLASS_DDM:
- idx = 1; break;
- case I2O_CLASS_RANDOM_BLOCK_STORAGE:
- idx = 2; break;
- case I2O_CLASS_SEQUENTIAL_STORAGE:
- idx = 3; break;
- case I2O_CLASS_LAN:
- idx = 4; break;
- case I2O_CLASS_WAN:
- idx = 5; break;
- case I2O_CLASS_FIBRE_CHANNEL_PORT:
- idx = 6; break;
- case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
- idx = 7; break;
- case I2O_CLASS_SCSI_PERIPHERAL:
- idx = 8; break;
- case I2O_CLASS_ATE_PORT:
- idx = 9; break;
- case I2O_CLASS_ATE_PERIPHERAL:
- idx = 10; break;
- case I2O_CLASS_FLOPPY_CONTROLLER:
- idx = 11; break;
- case I2O_CLASS_FLOPPY_DEVICE:
- idx = 12; break;
- case I2O_CLASS_BUS_ADAPTER_PORT:
- idx = 13; break;
- case I2O_CLASS_PEER_TRANSPORT_AGENT:
- idx = 14; break;
- case I2O_CLASS_PEER_TRANSPORT:
- idx = 15; break;
- }
- return i2o_class_name[idx];
-}
-#endif
-
-
-static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
-{
- u32 msg[6];
- int ret, size = sizeof(i2o_hrt);
-
- do {
- if (pHba->hrt == NULL) {
- pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
- size, &pHba->hrt_pa, GFP_KERNEL);
- if (pHba->hrt == NULL) {
- printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
- return -ENOMEM;
- }
- }
-
- msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
- msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2]= 0;
- msg[3]= 0;
- msg[4]= (0xD0000000 | size); /* Simple transaction */
- msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
-
- if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
- printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
- return ret;
- }
-
- if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
- int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
- dma_free_coherent(&pHba->pDev->dev, size,
- pHba->hrt, pHba->hrt_pa);
- size = newsize;
- pHba->hrt = NULL;
- }
- } while(pHba->hrt == NULL);
- return 0;
-}
-
-/*
- * Query one scalar group value or a whole scalar group.
- */
-static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
- int group, int field, void *buf, int buflen)
-{
- u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
- u8 *opblk_va;
- dma_addr_t opblk_pa;
- u8 *resblk_va;
- dma_addr_t resblk_pa;
-
- int size;
-
- /* 8 bytes for header */
- resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
- if (resblk_va == NULL) {
- printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
- return -ENOMEM;
- }
-
- opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(opblk), &opblk_pa, GFP_KERNEL);
- if (opblk_va == NULL) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- if (field == -1) /* whole group */
- opblk[4] = -1;
-
- memcpy(opblk_va, opblk, sizeof(opblk));
- size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
- opblk_va, opblk_pa, sizeof(opblk),
- resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
- dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
- if (size == -ETIME) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
- return -ETIME;
- } else if (size == -EINTR) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
- return -EINTR;
- }
-
- memcpy(buf, resblk_va+8, buflen); /* cut off header */
-
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- if (size < 0)
- return size;
-
- return buflen;
-}
-
-
-/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
- *
- * This function can be used for all UtilParamsGet/Set operations.
- * The OperationBlock is given in opblk-buffer,
- * and results are returned in resblk-buffer.
- * Note that the minimum sized resblk is 8 bytes and contains
- * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
- */
-static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk_va, dma_addr_t opblk_pa, int oplen,
- void *resblk_va, dma_addr_t resblk_pa, int reslen)
-{
- u32 msg[9];
- u32 *res = (u32 *)resblk_va;
- int wait_status;
-
- msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
- msg[1] = cmd << 24 | HOST_TID << 12 | tid;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = 0;
- msg[5] = 0x54000000 | oplen; /* OperationBlock */
- msg[6] = (u32)opblk_pa;
- msg[7] = 0xD0000000 | reslen; /* ResultBlock */
- msg[8] = (u32)resblk_pa;
-
- if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
- printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
- return wait_status; /* -DetailedStatus */
- }
-
- if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
- printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
- "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
- pHba->name,
- (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
- : "PARAMS_GET",
- res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
- return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
- }
-
- return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
-}
-
-
-static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
-{
- u32 msg[4];
- int ret;
-
- adpt_i2o_status_get(pHba);
-
- /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
-
- if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
- (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
- return 0;
- }
-
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
-
- if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
- printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
- pHba->unit, -ret);
- } else {
- printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
- }
-
- adpt_i2o_status_get(pHba);
- return ret;
-}
-
-
-/*
- * Enable IOP. Allows the IOP to resume external operations.
- */
-static int adpt_i2o_enable_hba(adpt_hba* pHba)
-{
- u32 msg[4];
- int ret;
-
- adpt_i2o_status_get(pHba);
- if(!pHba->status_block){
- return -ENOMEM;
- }
- /* Enable only allowed on READY state */
- if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
- return 0;
-
- if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
- return -EINVAL;
-
- msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2]= 0;
- msg[3]= 0;
-
- if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
- printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
- pHba->name, ret);
- } else {
- PDEBUG("%s: Enabled.\n", pHba->name);
- }
-
- adpt_i2o_status_get(pHba);
- return ret;
-}
-
-
-static int adpt_i2o_systab_send(adpt_hba* pHba)
-{
- u32 msg[12];
- int ret;
-
- msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
- msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
- msg[5] = 0; /* Segment 0 */
-
- /*
- * Provide three SGL-elements:
- * System table (SysTab), Private memory space declaration and
- * Private i/o space declaration
- */
- msg[6] = 0x54000000 | sys_tbl_len;
- msg[7] = (u32)sys_tbl_pa;
- msg[8] = 0x54000000 | 0;
- msg[9] = 0;
- msg[10] = 0xD4000000 | 0;
- msg[11] = 0;
-
- if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
- printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
- pHba->name, ret);
- }
-#ifdef DEBUG
- else {
- PINFO("%s: SysTab set.\n", pHba->name);
- }
-#endif
-
- return ret;
-}
-
-
-/*============================================================================
- *
- *============================================================================
- */
-
-
-#ifdef UARTDELAY
-
-static static void adpt_delay(int millisec)
-{
- int i;
- for (i = 0; i < millisec; i++) {
- udelay(1000); /* delay for one millisecond */
- }
-}
-
-#endif
-
-static struct scsi_host_template driver_template = {
- .module = THIS_MODULE,
- .name = "dpt_i2o",
- .proc_name = "dpt_i2o",
- .show_info = adpt_show_info,
- .info = adpt_info,
- .queuecommand = adpt_queue,
- .eh_abort_handler = adpt_abort,
- .eh_device_reset_handler = adpt_device_reset,
- .eh_bus_reset_handler = adpt_bus_reset,
- .eh_host_reset_handler = adpt_reset,
- .bios_param = adpt_bios_param,
- .slave_configure = adpt_slave_configure,
- .can_queue = MAX_TO_IOP_MESSAGES,
- .this_id = 7,
-};
-
-static int __init adpt_init(void)
-{
- int error;
- adpt_hba *pHba, *next;
-
- printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
-
- error = adpt_detect(&driver_template);
- if (error < 0)
- return error;
- if (hba_chain == NULL)
- return -ENODEV;
-
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- error = scsi_add_host(pHba->host, &pHba->pDev->dev);
- if (error)
- goto fail;
- scsi_scan_host(pHba->host);
- }
- return 0;
-fail:
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- scsi_remove_host(pHba->host);
- }
- return error;
-}
-
-static void __exit adpt_exit(void)
-{
- adpt_hba *pHba, *next;
-
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- adpt_release(pHba);
- }
-}
-
-module_init(adpt_init);
-module_exit(adpt_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
deleted file mode 100644
index 8a079e8d7f65..000000000000
--- a/drivers/scsi/dpti.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/***************************************************************************
- dpti.h - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2001 by Adaptec
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-
-#ifndef _DPT_H
-#define _DPT_H
-
-#define MAX_TO_IOP_MESSAGES (255)
-#define MAX_FROM_IOP_MESSAGES (255)
-
-
-/*
- * SCSI interface function Prototypes
- */
-
-static int adpt_detect(struct scsi_host_template * sht);
-static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd);
-static int adpt_abort(struct scsi_cmnd * cmd);
-static int adpt_reset(struct scsi_cmnd* cmd);
-static int adpt_slave_configure(struct scsi_device *);
-
-static const char *adpt_info(struct Scsi_Host *pSHost);
-static int adpt_bios_param(struct scsi_device * sdev, struct block_device *dev,
- sector_t, int geom[]);
-
-static int adpt_bus_reset(struct scsi_cmnd* cmd);
-static int adpt_device_reset(struct scsi_cmnd* cmd);
-
-
-/*
- * struct scsi_host_template (see scsi/scsi_host.h)
- */
-
-#define DPT_DRIVER_NAME "Adaptec I2O RAID"
-
-#ifndef HOSTS_C
-
-#include "dpt/sys_info.h"
-#include <linux/wait.h>
-#include "dpt/dpti_i2o.h"
-#include "dpt/dpti_ioctl.h"
-
-#define DPT_I2O_VERSION "2.4 Build 5go"
-#define DPT_VERSION 2
-#define DPT_REVISION '4'
-#define DPT_SUBREVISION '5'
-#define DPT_BETA ""
-#define DPT_MONTH 8
-#define DPT_DAY 7
-#define DPT_YEAR (2001-1980)
-
-#define DPT_DRIVER "dpt_i2o"
-#define DPTI_I2O_MAJOR (151)
-#define DPT_ORGANIZATION_ID (0x1B) /* For Private Messages */
-#define DPTI_MAX_HBA (16)
-#define MAX_CHANNEL (5) // Maximum Channel # Supported
-#define MAX_ID (128) // Maximum Target ID Supported
-
-/* Sizes in 4 byte words */
-#define REPLY_FRAME_SIZE (17)
-#define MAX_MESSAGE_SIZE (128)
-#define SG_LIST_ELEMENTS (56)
-
-#define EMPTY_QUEUE 0xffffffff
-#define I2O_INTERRUPT_PENDING_B (0x08)
-
-#define PCI_DPT_VENDOR_ID (0x1044) // DPT PCI Vendor ID
-#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID
-#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511)
-
-/* Debugging macro from Linux Device Drivers - Rubini */
-#undef PDEBUG
-#ifdef DEBUG
-//TODO add debug level switch
-# define PDEBUG(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
-# define PDEBUGV(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
-#else
-# define PDEBUG(fmt, args...) /* not debugging: nothing */
-# define PDEBUGV(fmt, args...) /* not debugging: nothing */
-#endif
-
-#define PERROR(fmt, args...) printk(KERN_ERR fmt, ##args)
-#define PWARN(fmt, args...) printk(KERN_WARNING fmt, ##args)
-#define PINFO(fmt, args...) printk(KERN_INFO fmt, ##args)
-#define PCRIT(fmt, args...) printk(KERN_CRIT fmt, ##args)
-
-#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
-
-// Command timeouts
-#define FOREVER (0)
-#define TMOUT_INQUIRY (20)
-#define TMOUT_FLUSH (360/45)
-#define TMOUT_ABORT (30)
-#define TMOUT_SCSI (300)
-#define TMOUT_IOPRESET (360)
-#define TMOUT_GETSTATUS (15)
-#define TMOUT_INITOUTBOUND (15)
-#define TMOUT_LCT (360)
-
-
-#define I2O_SCSI_DEVICE_DSC_MASK 0x00FF
-
-#define I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION 0x000A
-
-#define I2O_SCSI_DSC_MASK 0xFF00
-#define I2O_SCSI_DSC_SUCCESS 0x0000
-#define I2O_SCSI_DSC_REQUEST_ABORTED 0x0200
-#define I2O_SCSI_DSC_UNABLE_TO_ABORT 0x0300
-#define I2O_SCSI_DSC_COMPLETE_WITH_ERROR 0x0400
-#define I2O_SCSI_DSC_ADAPTER_BUSY 0x0500
-#define I2O_SCSI_DSC_REQUEST_INVALID 0x0600
-#define I2O_SCSI_DSC_PATH_INVALID 0x0700
-#define I2O_SCSI_DSC_DEVICE_NOT_PRESENT 0x0800
-#define I2O_SCSI_DSC_UNABLE_TO_TERMINATE 0x0900
-#define I2O_SCSI_DSC_SELECTION_TIMEOUT 0x0A00
-#define I2O_SCSI_DSC_COMMAND_TIMEOUT 0x0B00
-#define I2O_SCSI_DSC_MR_MESSAGE_RECEIVED 0x0D00
-#define I2O_SCSI_DSC_SCSI_BUS_RESET 0x0E00
-#define I2O_SCSI_DSC_PARITY_ERROR_FAILURE 0x0F00
-#define I2O_SCSI_DSC_AUTOSENSE_FAILED 0x1000
-#define I2O_SCSI_DSC_NO_ADAPTER 0x1100
-#define I2O_SCSI_DSC_DATA_OVERRUN 0x1200
-#define I2O_SCSI_DSC_UNEXPECTED_BUS_FREE 0x1300
-#define I2O_SCSI_DSC_SEQUENCE_FAILURE 0x1400
-#define I2O_SCSI_DSC_REQUEST_LENGTH_ERROR 0x1500
-#define I2O_SCSI_DSC_PROVIDE_FAILURE 0x1600
-#define I2O_SCSI_DSC_BDR_MESSAGE_SENT 0x1700
-#define I2O_SCSI_DSC_REQUEST_TERMINATED 0x1800
-#define I2O_SCSI_DSC_IDE_MESSAGE_SENT 0x3300
-#define I2O_SCSI_DSC_RESOURCE_UNAVAILABLE 0x3400
-#define I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT 0x3500
-#define I2O_SCSI_DSC_MESSAGE_RECEIVED 0x3600
-#define I2O_SCSI_DSC_INVALID_CDB 0x3700
-#define I2O_SCSI_DSC_LUN_INVALID 0x3800
-#define I2O_SCSI_DSC_SCSI_TID_INVALID 0x3900
-#define I2O_SCSI_DSC_FUNCTION_UNAVAILABLE 0x3A00
-#define I2O_SCSI_DSC_NO_NEXUS 0x3B00
-#define I2O_SCSI_DSC_SCSI_IID_INVALID 0x3C00
-#define I2O_SCSI_DSC_CDB_RECEIVED 0x3D00
-#define I2O_SCSI_DSC_LUN_ALREADY_ENABLED 0x3E00
-#define I2O_SCSI_DSC_BUS_BUSY 0x3F00
-#define I2O_SCSI_DSC_QUEUE_FROZEN 0x4000
-
-
-#ifndef TRUE
-#define TRUE 1
-#define FALSE 0
-#endif
-
-#define HBA_FLAGS_INSTALLED_B 0x00000001 // Adapter Was Installed
-#define HBA_FLAGS_BLINKLED_B 0x00000002 // Adapter In Blink LED State
-#define HBA_FLAGS_IN_RESET 0x00000040 /* in reset */
-#define HBA_HOSTRESET_FAILED 0x00000080 /* adpt_resethost failed */
-
-
-// Device state flags
-#define DPTI_DEV_ONLINE 0x00
-#define DPTI_DEV_UNSCANNED 0x01
-#define DPTI_DEV_RESET 0x02
-#define DPTI_DEV_OFFLINE 0x04
-
-
-struct adpt_device {
- struct adpt_device* next_lun;
- u32 flags;
- u32 type;
- u32 capacity;
- u32 block_size;
- u8 scsi_channel;
- u8 scsi_id;
- u64 scsi_lun;
- u8 state;
- u16 tid;
- struct i2o_device* pI2o_dev;
- struct scsi_device *pScsi_dev;
-};
-
-struct adpt_channel {
- struct adpt_device* device[MAX_ID]; /* used as an array of 128 scsi ids */
- u8 scsi_id;
- u8 type;
- u16 tid;
- u32 state;
- struct i2o_device* pI2o_dev;
-};
-
-// HBA state flags
-#define DPTI_STATE_RESET (0x01)
-
-typedef struct _adpt_hba {
- struct _adpt_hba *next;
- struct pci_dev *pDev;
- struct Scsi_Host *host;
- u32 state;
- spinlock_t state_lock;
- int unit;
- int host_no; /* SCSI host number */
- u8 initialized;
- u8 in_use; /* is the management node open*/
-
- char name[32];
- char detail[55];
-
- void __iomem *base_addr_virt;
- void __iomem *msg_addr_virt;
- ulong base_addr_phys;
- void __iomem *post_port;
- void __iomem *reply_port;
- void __iomem *irq_mask;
- u16 post_count;
- u32 post_fifo_size;
- u32 reply_fifo_size;
- u32* reply_pool;
- dma_addr_t reply_pool_pa;
- u32 sg_tablesize; // Scatter/Gather List Size.
- u8 top_scsi_channel;
- u8 top_scsi_id;
- u64 top_scsi_lun;
- u8 dma64;
-
- i2o_status_block* status_block;
- dma_addr_t status_block_pa;
- i2o_hrt* hrt;
- dma_addr_t hrt_pa;
- i2o_lct* lct;
- dma_addr_t lct_pa;
- uint lct_size;
- struct i2o_device* devices;
- struct adpt_channel channel[MAX_CHANNEL];
- struct proc_dir_entry* proc_entry; /* /proc dir */
-
- void __iomem *FwDebugBuffer_P; // Virtual Address Of FW Debug Buffer
- u32 FwDebugBufferSize; // FW Debug Buffer Size In Bytes
- void __iomem *FwDebugStrLength_P;// Virtual Addr Of FW Debug String Len
- void __iomem *FwDebugFlags_P; // Virtual Address Of FW Debug Flags
- void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
- void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
- u32 FwDebugFlags;
- u32 *ioctl_reply_context[4];
-} adpt_hba;
-
-struct sg_simple_element {
- u32 flag_count;
- u32 addr_bus;
-};
-
-/*
- * Function Prototypes
- */
-
-static void adpt_i2o_sys_shutdown(void);
-static int adpt_init(void);
-static int adpt_i2o_build_sys_table(void);
-static irqreturn_t adpt_isr(int irq, void *dev_id);
-
-static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d);
-static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
- int group, int field, void *buf, int buflen);
-#ifdef DEBUG
-static const char *adpt_i2o_get_class_name(int class);
-#endif
-static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk, dma_addr_t opblk_pa, int oplen,
- void *resblk, dma_addr_t resblk_pa, int reslen);
-static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
-static int adpt_i2o_lct_get(adpt_hba* pHba);
-static int adpt_i2o_parse_lct(adpt_hba* pHba);
-static int adpt_i2o_activate_hba(adpt_hba* pHba);
-static int adpt_i2o_enable_hba(adpt_hba* pHba);
-static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d);
-static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len);
-static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba);
-static s32 adpt_i2o_status_get(adpt_hba* pHba);
-static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
-static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
-static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd);
-static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
-static s32 adpt_hba_reset(adpt_hba* pHba);
-static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
-static s32 adpt_rescan(adpt_hba* pHba);
-static s32 adpt_i2o_reparse_lct(adpt_hba* pHba);
-static s32 adpt_send_nop(adpt_hba*pHba,u32 m);
-static void adpt_i2o_delete_hba(adpt_hba* pHba);
-static void adpt_inquiry(adpt_hba* pHba);
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun);
-static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
-static int adpt_i2o_online_hba(adpt_hba* pHba);
-static void adpt_i2o_post_wait_complete(u32, int);
-static int adpt_i2o_systab_send(adpt_hba* pHba);
-
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg);
-static int adpt_open(struct inode *inode, struct file *file);
-static int adpt_close(struct inode *inode, struct file *file);
-
-
-#ifdef UARTDELAY
-static void adpt_delay(int millisec);
-#endif
-
-#define PRINT_BUFFER_SIZE 512
-
-#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags
-#define HBA_FLAGS_DBG_KERNEL_PRINT_B 0x00010000 // Kernel Debugger Print
-#define HBA_FLAGS_DBG_FW_PRINT_B 0x00020000 // Firmware Debugger Print
-#define HBA_FLAGS_DBG_FUNCTION_ENTRY_B 0x00040000 // Function Entry Point
-#define HBA_FLAGS_DBG_FUNCTION_EXIT_B 0x00080000 // Function Exit
-#define HBA_FLAGS_DBG_ERROR_B 0x00100000 // Error Conditions
-#define HBA_FLAGS_DBG_INIT_B 0x00200000 // Init Prints
-#define HBA_FLAGS_DBG_OS_COMMANDS_B 0x00400000 // OS Command Info
-#define HBA_FLAGS_DBG_SCAN_B 0x00800000 // Device Scan
-
-#define FW_DEBUG_STR_LENGTH_OFFSET 0
-#define FW_DEBUG_FLAGS_OFFSET 4
-#define FW_DEBUG_BLED_OFFSET 8
-
-#define FW_DEBUG_FLAGS_NO_HEADERS_B 0x01
-#endif /* !HOSTS_C */
-#endif /* _DPT_H */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index c2a59109857a..6ec296321ffc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1488,7 +1488,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
fh = fc_frame_header_get(fp);
skb = fp_skb(fp);
- wlen = skb->len / FCOE_WORD_TO_BYTE;
if (!lport->link_up) {
kfree_skb(skb);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 9eba910aaa86..1077110ab273 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -544,6 +544,39 @@ static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
}
+static int fnic_scsi_drv_init(struct fnic *fnic)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+
+ /* Configure maximum outstanding IO reqs*/
+ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
+ host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
+ max_t(u32, FNIC_MIN_IO_REQ,
+ fnic->config.io_throttle_count));
+
+ fnic->fnic_max_tag_id = host->can_queue;
+ host->max_lun = fnic->config.luns_per_tgt;
+ host->max_id = FNIC_MAX_FCP_TARGET;
+ host->max_cmd_len = FCOE_MAX_CMD_LEN;
+
+ host->nr_hw_queues = fnic->wq_copy_count;
+ if (host->nr_hw_queues > 1)
+ shost_printk(KERN_ERR, host,
+ "fnic: blk-mq is not supported");
+
+ host->nr_hw_queues = fnic->wq_copy_count = 1;
+
+ shost_printk(KERN_INFO, host,
+ "fnic: can_queue: %d max_lun: %llu",
+ host->can_queue, host->max_lun);
+
+ shost_printk(KERN_INFO, host,
+ "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
+ host->max_id, host->max_cmd_len, host->nr_hw_queues);
+
+ return 0;
+}
+
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct Scsi_Host *host;
@@ -684,17 +717,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close;
}
- /* Configure Maximum Outstanding IO reqs*/
- if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
- host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
- max_t(u32, FNIC_MIN_IO_REQ,
- fnic->config.io_throttle_count));
- }
- fnic->fnic_max_tag_id = host->can_queue;
-
- host->max_lun = fnic->config.luns_per_tgt;
- host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FCOE_MAX_CMD_LEN;
+ fnic_scsi_drv_init(fnic);
fnic_get_res_counts(fnic);
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 2f6c56aabe1d..7d56a236a011 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -26,8 +26,12 @@
struct gvp11_hostdata {
struct WD33C93_hostdata wh;
struct gvp11_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define TO_DMA_MASK(m) (~((unsigned long long)m & 0xffffffff))
+
static irqreturn_t gvp11_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -54,17 +58,33 @@ void gvp11_setup(char *str, int *ints)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
unsigned short cntr = GVP11_DMAC_INT_ENABLE;
- unsigned long addr = virt_to_bus(scsi_pointer->ptr);
+ dma_addr_t addr;
int bank_mask;
static int scsi_alloc_out_of_range = 0;
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
+
/* use bounce buffer if the physical address is bad */
if (addr & wh->dma_xfer_mask) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
+
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
if (!scsi_alloc_out_of_range) {
@@ -87,10 +107,32 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
- /* check if the address of the bounce buffer is OK */
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
+ /* will flush/invalidate cache for us */
+ addr = dma_map_single(hdata->dev,
+ wh->dma_bounce_buffer,
+ wh->dma_bounce_len,
+ DMA_DIR(dir_in));
+ /* can't map buffer; use PIO */
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev,
+ "cannot map bounce buffer %p\n",
+ wh->dma_bounce_buffer);
+ return 1;
+ }
+ }
if (addr & wh->dma_xfer_mask) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
/* fall back to Chip RAM if address out of range */
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
kfree(wh->dma_bounce_buffer);
@@ -108,15 +150,19 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+ /* chip RAM can be mapped to phys. address directly */
+ addr = virt_to_phys(wh->dma_bounce_buffer);
+ /* no need to flush/invalidate cache */
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
+ /* finally, have OK mapping (punted for PIO else) */
+ scsi_pointer->dma_handle = addr;
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
- scsi_pointer->this_residual);
- }
}
/* setup dma direction */
@@ -129,13 +175,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, scsi_pointer->this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, scsi_pointer->this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
if (bank_mask)
@@ -161,6 +201,11 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* remove write bit from CONTROL bits */
regs->CNTR = GVP11_DMAC_INT_ENABLE;
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir && SCpnt)
@@ -287,6 +332,13 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
default_dma_xfer_mask = ent->driver_data;
+ if (dma_set_mask_and_coherent(&z->dev,
+ TO_DMA_MASK(default_dma_xfer_mask))) {
+ dev_warn(&z->dev, "cannot use DMA mask %llx\n",
+ TO_DMA_MASK(default_dma_xfer_mask));
+ return -ENODEV;
+ }
+
/*
* Rumors state that some GVP ram boards use the same product
* code as the SCSI controllers. Therefore if the board-size
@@ -327,9 +379,16 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
- if (gvp11_xfer_mask)
+ if (gvp11_xfer_mask) {
hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
- else
+ if (dma_set_mask_and_coherent(&z->dev,
+ TO_DMA_MASK(gvp11_xfer_mask))) {
+ dev_warn(&z->dev, "cannot use DMA mask %llx\n",
+ TO_DMA_MASK(gvp11_xfer_mask));
+ error = -ENODEV;
+ goto fail_check_or_alloc;
+ }
+ } else
hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
hdata->wh.no_sync = 0xff;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 764e859d0106..33af5b8dede2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -219,10 +219,15 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->lldd_task = NULL;
if (!sas_protocol_ata(task->task_proto)) {
- if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
+ if (slot->n_elem) {
+ if (task->task_proto & SAS_PROTOCOL_SSP)
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
+ task->data_dir);
+ else
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ }
if (slot->n_elem_dif) {
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
@@ -269,28 +274,23 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
}
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
- struct sas_task *task, int n_elem,
- int n_elem_req)
+ struct sas_task *task, int n_elem)
{
struct device *dev = hisi_hba->dev;
- if (!sas_protocol_ata(task->task_proto)) {
+ if (!sas_protocol_ata(task->task_proto) && n_elem) {
if (task->num_scatter) {
- if (n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(dev, task->scatter, task->num_scatter,
+ task->data_dir);
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
- if (n_elem_req)
- dma_unmap_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
}
}
}
static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
- struct sas_task *task, int *n_elem,
- int *n_elem_req)
+ struct sas_task *task, int *n_elem)
{
struct device *dev = hisi_hba->dev;
int rc;
@@ -308,9 +308,9 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
goto prep_out;
}
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
- *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
- if (!*n_elem_req) {
+ *n_elem = dma_map_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ if (!*n_elem) {
rc = -ENOMEM;
goto prep_out;
}
@@ -332,8 +332,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
err_out_dma_unmap:
/* It would be better to call dma_unmap_sg() here, but it's messy */
- hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
- *n_elem_req);
+ hisi_sas_dma_unmap(hisi_hba, task, *n_elem);
prep_out:
return rc;
}
@@ -457,7 +456,7 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
- int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
+ int n_elem = 0, n_elem_dif = 0;
struct domain_device *device = task->dev;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -568,8 +567,7 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
return -EINVAL;
}
- rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
- &n_elem_req);
+ rc = hisi_sas_dma_map(hisi_hba, task, &n_elem);
if (rc < 0)
goto prep_out;
@@ -605,8 +603,7 @@ err_out_dif_dma_unmap:
if (!sas_protocol_ata(task->task_proto))
hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
err_out_dma_unmap:
- hisi_sas_dma_unmap(hisi_hba, task, n_elem,
- n_elem_req);
+ hisi_sas_dma_unmap(hisi_hba, task, n_elem);
prep_out:
dev_err(dev, "task exec: failed[%d]!\n", rc);
return rc;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 4582791def32..349546bacb2b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1282,8 +1282,6 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 455d49299ddf..70e401fd432a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -805,8 +805,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
return -SAS_QUEUE_FULL;
}
/*
- * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
- */
+ * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
+ */
if (sata_dev ^ (start & 1))
break;
start++;
@@ -2428,8 +2428,6 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index eb86afb21aab..efe8c5be5870 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -481,6 +481,9 @@ struct hisi_sas_err_record_v3 {
#define RX_DATA_LEN_UNDERFLOW_OFF 6
#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
+#define RX_FIS_STATUS_ERR_OFF 0
+#define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF)
+
#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
#define HISI_SAS_MSI_COUNT_V3_HW 32
@@ -2161,6 +2164,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
hisi_sas_status_buf_addr_mem(slot);
u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
+ u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type);
u32 dw3 = le32_to_cpu(complete_hdr->dw3);
switch (task->task_proto) {
@@ -2188,7 +2192,10 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ if ((complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
+ (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
+ ts->stat = SAS_PROTO_RESPONSE;
+ } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
@@ -2311,8 +2318,6 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
@@ -2778,16 +2783,13 @@ static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
static int slave_configure_v3_hw(struct scsi_device *sdev)
{
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
- struct domain_device *ddev = sdev_to_domain_dev(sdev);
struct hisi_hba *hisi_hba = shost_priv(shost);
+ int ret = hisi_sas_slave_configure(sdev);
struct device *dev = hisi_hba->dev;
- int ret = sas_slave_configure(sdev);
unsigned int max_sectors;
if (ret)
return ret;
- if (!dev_is_sata(ddev))
- sas_change_queue_depth(sdev, 64);
if (sdev->type == TYPE_ENCLOSURE)
return 0;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 315c7ac730e9..0738238ed6cc 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -190,6 +190,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
transport_unregister_device(&shost->shost_gendev);
device_unregister(&shost->shost_dev);
device_del(&shost->shost_gendev);
+
+ /*
+ * After scsi_remove_host() has returned the scsi LLD module can be
+ * unloaded and/or the host resources can be released. Hence wait until
+ * the dependent SCSI targets and devices are gone before returning.
+ */
+ wait_event(shost->targets_wq, atomic_read(&shost->target_count) == 0);
+
+ scsi_mq_destroy_tags(shost);
}
EXPORT_SYMBOL(scsi_remove_host);
@@ -236,6 +245,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
shost->dma_dev = dma_dev;
+ if (dma_dev->dma_mask) {
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
error = scsi_mq_setup_tags(shost);
if (error)
goto fail;
@@ -295,8 +309,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
return error;
/*
- * Any host allocation in this function will be freed in
- * scsi_host_dev_release().
+ * Any resources associated with the SCSI host in this function except
+ * the tag set will be freed by scsi_host_dev_release().
*/
out_del_dev:
device_del(&shost->shost_dev);
@@ -312,6 +326,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
+ scsi_mq_destroy_tags(shost);
fail:
return error;
}
@@ -345,12 +360,9 @@ static void scsi_host_dev_release(struct device *dev)
kfree(dev_name(&shost->shost_dev));
}
- if (shost->tag_set.tags)
- scsi_mq_destroy_tags(shost);
-
kfree(shost->shost_data);
- ida_simple_remove(&host_index_ida, shost->host_no);
+ ida_free(&host_index_ida, shost->host_no);
if (shost->shost_state != SHOST_CREATED)
put_device(parent);
@@ -394,8 +406,9 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
+ init_waitqueue_head(&shost->targets_wq);
- index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
+ index = ida_alloc(&host_index_ida, GFP_KERNEL);
if (index < 0) {
kfree(shost);
return NULL;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 9fee70d6434a..29b1bd755afe 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -52,6 +52,10 @@ static struct iscsi_transport iscsi_sw_tcp_transport;
static unsigned int iscsi_max_lun = ~0;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+static bool iscsi_recv_from_iscsi_q;
+module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644);
+MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
+
static int iscsi_sw_tcp_dbg;
module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
S_IRUGO | S_IWUSR);
@@ -122,20 +126,13 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
return 0;
}
-static void iscsi_sw_tcp_data_ready(struct sock *sk)
+static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
{
- struct iscsi_conn *conn;
- struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
read_descriptor_t rd_desc;
- read_lock_bh(&sk->sk_callback_lock);
- conn = sk->sk_user_data;
- if (!conn) {
- read_unlock_bh(&sk->sk_callback_lock);
- return;
- }
- tcp_conn = conn->dd_data;
-
/*
* Use rd_desc to pass 'conn' to iscsi_tcp_recv.
* We set count to 1 because we want the network layer to
@@ -144,13 +141,48 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
*/
rd_desc.arg.data = conn;
rd_desc.count = 1;
- tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
- iscsi_sw_sk_state_check(sk);
+ tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
/* If we had to (atomically) map a highmem page,
* unmap it now. */
iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+
+ iscsi_sw_sk_state_check(sk);
+}
+
+static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
+{
+ struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
+ recvwork);
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
+
+ lock_sock(sk);
+ iscsi_sw_tcp_recv_data(conn);
+ release_sock(sk);
+}
+
+static void iscsi_sw_tcp_data_ready(struct sock *sk)
+{
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_conn *conn;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ if (tcp_sw_conn->queue_recv)
+ iscsi_conn_queue_recv(conn);
+ else
+ iscsi_sw_tcp_recv_data(conn);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -205,7 +237,7 @@ static void iscsi_sw_tcp_write_space(struct sock *sk)
old_write_space(sk);
ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
@@ -274,7 +306,10 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
copy = segment->size - offset;
if (segment->total_copied + segment->size < segment->total_size)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
+
+ if (tcp_sw_conn->queue_recv)
+ flags |= MSG_DONTWAIT;
/* Use sendpage if we can; else fall back to sendmsg */
if (!segment->data) {
@@ -557,6 +592,8 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
conn = cls_conn->dd_data;
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
+ INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
+ tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
@@ -610,6 +647,8 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
+ iscsi_suspend_rx(conn);
+
spin_lock_bh(&session->frwd_lock);
tcp_sw_conn->sock = NULL;
spin_unlock_bh(&session->frwd_lock);
@@ -898,7 +937,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
remove_session:
iscsi_session_teardown(cls_session);
remove_host:
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
free_host:
iscsi_host_free(shost);
return NULL;
@@ -915,7 +954,7 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
iscsi_host_free(shost);
}
@@ -1003,7 +1042,6 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
.slave_configure = iscsi_sw_tcp_slave_configure,
- .target_alloc = iscsi_target_alloc,
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 791453195099..850a018aefb9 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -28,6 +28,8 @@ struct iscsi_sw_tcp_send {
struct iscsi_sw_tcp_conn {
struct socket *sock;
+ struct work_struct recvwork;
+ bool queue_recv;
struct iscsi_sw_tcp_send out;
/* old values for socket callbacks */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 797abf4f5399..d95f4bcdeb2e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -83,7 +83,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
"%s " dbg_fmt, __func__, ##arg); \
} while (0);
-inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
+#define ISCSI_CMD_COMPL_WAIT 5
+
+inline void iscsi_conn_queue_xmit(struct iscsi_conn *conn)
{
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
@@ -91,7 +93,17 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
if (ihost->workq)
queue_work(ihost->workq, &conn->xmitwork);
}
-EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_xmit);
+
+inline void iscsi_conn_queue_recv(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ if (ihost->workq && !test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))
+ queue_work(ihost->workq, &conn->recvwork);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_recv);
static void __iscsi_update_cmdsn(struct iscsi_session *session,
uint32_t exp_cmdsn, uint32_t max_cmdsn)
@@ -472,12 +484,18 @@ static void iscsi_free_task(struct iscsi_task *task)
}
}
-void __iscsi_get_task(struct iscsi_task *task)
+bool iscsi_get_task(struct iscsi_task *task)
{
- refcount_inc(&task->refcount);
+ return refcount_inc_not_zero(&task->refcount);
}
-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+EXPORT_SYMBOL_GPL(iscsi_get_task);
+/**
+ * __iscsi_put_task - drop the refcount on a task
+ * @task: iscsi_task to drop the refcount on
+ *
+ * The back_lock must be held when calling in case it frees the task.
+ */
void __iscsi_put_task(struct iscsi_task *task)
{
if (refcount_dec_and_test(&task->refcount))
@@ -489,10 +507,11 @@ void iscsi_put_task(struct iscsi_task *task)
{
struct iscsi_session *session = task->conn->session;
- /* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
+ if (refcount_dec_and_test(&task->refcount)) {
+ spin_lock_bh(&session->back_lock);
+ iscsi_free_task(task);
+ spin_unlock_bh(&session->back_lock);
+ }
}
EXPORT_SYMBOL_GPL(iscsi_put_task);
@@ -557,16 +576,19 @@ static bool cleanup_queued_task(struct iscsi_task *task)
struct iscsi_conn *conn = task->conn;
bool early_complete = false;
- /* Bad target might have completed task while it was still running */
+ /*
+ * We might have raced where we handled a R2T early and got a response
+ * but have not yet taken the task off the requeue list, then a TMF or
+ * recovery happened and so we can still see it here.
+ */
if (task->state == ISCSI_TASK_COMPLETED)
early_complete = true;
if (!list_empty(&task->running)) {
list_del_init(&task->running);
/*
- * If it's on a list but still running, this could be from
- * a bad target sending a rsp early, cleanup from a TMF, or
- * session recovery.
+ * If it's on a list but still running this could be cleanup
+ * from a TMF or session recovery.
*/
if (task->state == ISCSI_TASK_RUNNING ||
task->state == ISCSI_TASK_COMPLETED)
@@ -587,20 +609,17 @@ static bool cleanup_queued_task(struct iscsi_task *task)
}
/*
- * session frwd lock must be held and if not called for a task that is still
- * pending or from the xmit thread, then xmit thread must be suspended
+ * session back and frwd lock must be held and if not called for a task that
+ * is still pending or from the xmit thread, then xmit thread must be suspended
*/
-static void fail_scsi_task(struct iscsi_task *task, int err)
+static void __fail_scsi_task(struct iscsi_task *task, int err)
{
struct iscsi_conn *conn = task->conn;
struct scsi_cmnd *sc;
int state;
- spin_lock_bh(&conn->session->back_lock);
- if (cleanup_queued_task(task)) {
- spin_unlock_bh(&conn->session->back_lock);
+ if (cleanup_queued_task(task))
return;
- }
if (task->state == ISCSI_TASK_PENDING) {
/*
@@ -619,7 +638,15 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
sc->result = err << 16;
scsi_set_resid(sc, scsi_bufflen(sc));
iscsi_complete_task(task, state);
- spin_unlock_bh(&conn->session->back_lock);
+}
+
+static void fail_scsi_task(struct iscsi_task *task, int err)
+{
+ struct iscsi_session *session = task->conn->session;
+
+ spin_lock_bh(&session->back_lock);
+ __fail_scsi_task(task, err);
+ spin_unlock_bh(&session->back_lock);
}
static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -668,12 +695,18 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
return 0;
}
+/**
+ * iscsi_alloc_mgmt_task - allocate and setup a mgmt task.
+ * @conn: iscsi conn that the task will be sent on.
+ * @hdr: iscsi pdu that will be sent.
+ * @data: buffer for data segment if needed.
+ * @data_size: length of data in bytes.
+ */
static struct iscsi_task *
-__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+iscsi_alloc_mgmt_task(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct iscsi_session *session = conn->session;
- struct iscsi_host *ihost = shost_priv(session->host);
uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
struct iscsi_task *task;
itt_t itt;
@@ -754,28 +787,57 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task->conn->session->age);
}
- if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
- WRITE_ONCE(conn->ping_task, task);
+ return task;
+
+free_task:
+ iscsi_put_task(task);
+ return NULL;
+}
+
+/**
+ * iscsi_send_mgmt_task - Send task created with iscsi_alloc_mgmt_task.
+ * @task: iscsi task to send.
+ *
+ * On failure this returns a non-zero error code, and the driver must free
+ * the task with iscsi_put_task;
+ */
+static int iscsi_send_mgmt_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_host *ihost = shost_priv(conn->session->host);
+ int rc = 0;
if (!ihost->workq) {
- if (iscsi_prep_mgmt_task(conn, task))
- goto free_task;
+ rc = iscsi_prep_mgmt_task(conn, task);
+ if (rc)
+ return rc;
- if (session->tt->xmit_task(task))
- goto free_task;
+ rc = session->tt->xmit_task(task);
+ if (rc)
+ return rc;
} else {
list_add_tail(&task->running, &conn->mgmtqueue);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
- return task;
+ return 0;
+}
-free_task:
- /* regular RX path uses back_lock */
- spin_lock(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock(&session->back_lock);
- return NULL;
+static int __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+{
+ struct iscsi_task *task;
+ int rc;
+
+ task = iscsi_alloc_mgmt_task(conn, hdr, data, data_size);
+ if (!task)
+ return -ENOMEM;
+
+ rc = iscsi_send_mgmt_task(task);
+ if (rc)
+ iscsi_put_task(task);
+ return rc;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -786,7 +848,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
int err = 0;
spin_lock_bh(&session->frwd_lock);
- if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ if (__iscsi_conn_send_pdu(conn, hdr, data, data_size))
err = -EPERM;
spin_unlock_bh(&session->frwd_lock);
return err;
@@ -959,7 +1021,6 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
if (!rhdr) {
if (READ_ONCE(conn->ping_task))
return -EINVAL;
- WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
}
memset(&hdr, 0, sizeof(struct iscsi_nopout));
@@ -973,10 +1034,18 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
} else
hdr.ttt = RESERVED_ITT;
- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
- if (!task) {
+ task = iscsi_alloc_mgmt_task(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+ if (!task)
+ return -ENOMEM;
+
+ if (!rhdr)
+ WRITE_ONCE(conn->ping_task, task);
+
+ if (iscsi_send_mgmt_task(task)) {
if (!rhdr)
WRITE_ONCE(conn->ping_task, NULL);
+ iscsi_put_task(task);
+
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
return -EIO;
} else if (!rhdr) {
@@ -1434,11 +1503,17 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
{
int rc;
- spin_lock_bh(&conn->session->back_lock);
-
if (!conn->task) {
- /* Take a ref so we can access it after xmit_task() */
- __iscsi_get_task(task);
+ /*
+ * Take a ref so we can access it after xmit_task().
+ *
+ * This should never fail because the failure paths will have
+ * stopped the xmit thread.
+ */
+ if (!iscsi_get_task(task)) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
} else {
/* Already have a ref from when we failed to send it last call */
conn->task = NULL;
@@ -1449,7 +1524,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* case a bad target sends a cmd rsp before we have handled the task.
*/
if (was_requeue)
- __iscsi_put_task(task);
+ iscsi_put_task(task);
/*
* Do this after dropping the extra ref because if this was a requeue
@@ -1461,10 +1536,8 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* task and get woken up again.
*/
conn->task = task;
- spin_unlock_bh(&conn->session->back_lock);
return -ENODATA;
}
- spin_unlock_bh(&conn->session->back_lock);
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
@@ -1472,20 +1545,16 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
if (!rc) {
/* done with this task */
task->last_xfer = jiffies;
- }
- /* regular RX path uses back_lock */
- spin_lock(&conn->session->back_lock);
- if (rc && task->state == ISCSI_TASK_RUNNING) {
+ } else {
/*
* get an extra ref that is released next time we access it
* as conn->task above.
*/
- __iscsi_get_task(task);
+ iscsi_get_task(task);
conn->task = task;
}
- __iscsi_put_task(task);
- spin_unlock(&conn->session->back_lock);
+ iscsi_put_task(task);
return rc;
}
@@ -1513,7 +1582,7 @@ void iscsi_requeue_task(struct iscsi_task *task)
*/
iscsi_put_task(task);
}
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1567,6 +1636,28 @@ check_mgmt:
goto done;
}
+check_requeue:
+ while (!list_empty(&conn->requeue)) {
+ /*
+ * we always do fastlogout - conn stop code will clean up.
+ */
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+ task = list_entry(conn->requeue.next, struct iscsi_task,
+ running);
+
+ if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
+ break;
+
+ list_del_init(&task->running);
+ rc = iscsi_xmit_task(conn, task, true);
+ if (rc)
+ goto done;
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
+
/* process pending command queue */
while (!list_empty(&conn->cmdqueue)) {
task = list_entry(conn->cmdqueue.next, struct iscsi_task,
@@ -1594,28 +1685,10 @@ check_mgmt:
*/
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
+ if (!list_empty(&conn->requeue))
+ goto check_requeue;
}
- while (!list_empty(&conn->requeue)) {
- /*
- * we always do fastlogout - conn stop code will clean up.
- */
- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
- break;
-
- task = list_entry(conn->requeue.next, struct iscsi_task,
- running);
-
- if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
- break;
-
- list_del_init(&task->running);
- rc = iscsi_xmit_task(conn, task, true);
- if (rc)
- goto done;
- if (!list_empty(&conn->mgmtqueue))
- goto check_mgmt;
- }
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1782,7 +1855,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
}
} else {
list_add_tail(&task->running, &conn->cmdqueue);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
session->queued_cmdsn++;
@@ -1843,11 +1916,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
__must_hold(&session->frwd_lock)
{
struct iscsi_session *session = conn->session;
- struct iscsi_task *task;
- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
- NULL, 0);
- if (!task) {
+ if (__iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0)) {
spin_unlock_bh(&session->frwd_lock);
iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
@@ -1895,6 +1965,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
struct iscsi_task *task;
int i;
+restart_cmd_loop:
spin_lock_bh(&session->back_lock);
for (i = 0; i < session->cmds_max; i++) {
task = session->cmds[i];
@@ -1903,22 +1974,25 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
if (lun != -1 && lun != task->sc->device->lun)
continue;
-
- __iscsi_get_task(task);
- spin_unlock_bh(&session->back_lock);
+ /*
+ * The cmd is completing but if this is called from an eh
+ * callout path then when we return scsi-ml owns the cmd. Wait
+ * for the completion path to finish freeing the cmd.
+ */
+ if (!iscsi_get_task(task)) {
+ spin_unlock_bh(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ udelay(ISCSI_CMD_COMPL_WAIT);
+ spin_lock_bh(&session->frwd_lock);
+ goto restart_cmd_loop;
+ }
ISCSI_DBG_SESSION(session,
"failing sc %p itt 0x%x state %d\n",
task->sc, task->itt, task->state);
- fail_scsi_task(task, error);
-
- spin_unlock_bh(&session->frwd_lock);
- iscsi_put_task(task);
- spin_lock_bh(&session->frwd_lock);
-
- spin_lock_bh(&session->back_lock);
+ __fail_scsi_task(task, error);
+ __iscsi_put_task(task);
}
-
spin_unlock_bh(&session->back_lock);
}
@@ -1943,7 +2017,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
/**
* iscsi_suspend_tx - suspend iscsi_data_xmit
- * @conn: iscsi conn tp stop processing IO on.
+ * @conn: iscsi conn to stop processing IO on.
*
* This function sets the suspend bit to prevent iscsi_data_xmit
* from sending new IO, and if work is queued on the xmit thread
@@ -1956,15 +2030,30 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
if (ihost->workq)
- flush_workqueue(ihost->workq);
+ flush_work(&conn->xmitwork);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
+}
+
+/**
+ * iscsi_suspend_rx - Prevent recvwork from running again.
+ * @conn: iscsi conn to stop.
+ */
+void iscsi_suspend_rx(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ if (ihost->workq)
+ flush_work(&conn->recvwork);
}
+EXPORT_SYMBOL_GPL(iscsi_suspend_rx);
/*
* We want to make sure a ping is in flight. It has timed out.
@@ -2008,7 +2097,16 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
spin_unlock(&session->back_lock);
goto done;
}
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ /*
+ * Racing with the completion path right now, so give it more
+ * time so that path can complete it like normal.
+ */
+ rc = BLK_EH_RESET_TIMER;
+ task = NULL;
+ spin_unlock(&session->back_lock);
+ goto done;
+ }
spin_unlock(&session->back_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -2257,6 +2355,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
+completion_check:
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
/*
@@ -2296,13 +2395,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
+ if (!iscsi_get_task(task)) {
+ spin_unlock(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ /* We are just about to call iscsi_free_task so wait for it. */
+ udelay(ISCSI_CMD_COMPL_WAIT);
+ goto completion_check;
+ }
+
+ ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
conn = session->leadconn;
iscsi_get_conn(conn->cls_conn);
conn->eh_abort_cnt++;
age = session->age;
-
- ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
- __iscsi_get_task(task);
spin_unlock(&session->back_lock);
if (task->state == ISCSI_TASK_PENDING) {
@@ -2828,11 +2934,12 @@ static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
/**
* iscsi_host_remove - remove host and sessions
* @shost: scsi host
+ * @is_shutdown: true if called from a driver shutdown callout
*
* If there are any sessions left, this will initiate the removal and wait
* for the completion.
*/
-void iscsi_host_remove(struct Scsi_Host *shost)
+void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown)
{
struct iscsi_host *ihost = shost_priv(shost);
unsigned long flags;
@@ -2841,7 +2948,11 @@ void iscsi_host_remove(struct Scsi_Host *shost)
ihost->state = ISCSI_HOST_REMOVED;
spin_unlock_irqrestore(&ihost->lock, flags);
- iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ if (!is_shutdown)
+ iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ else
+ iscsi_host_for_each_session(shost, iscsi_force_destroy_session);
+
wait_event_interruptible(ihost->session_removal_wq,
ihost->num_sessions == 0);
if (signal_pending(current))
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 883005757ddb..c182aa83f2c9 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -558,7 +558,11 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
return 0;
}
task->last_xfer = jiffies;
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ spin_unlock(&session->back_lock);
+ /* Let the path that got the early rsp complete it */
+ return 0;
+ }
tcp_conn = conn->dd_data;
rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 260e735d06fa..fa2209080cc2 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -175,13 +175,13 @@ static enum sas_device_type to_dev_type(struct discover_resp *dr)
return dr->attached_dev_type;
}
-static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
+static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
+ struct smp_disc_resp *disc_resp)
{
enum sas_device_type dev_type;
enum sas_linkrate linkrate;
u8 sas_addr[SAS_ADDR_SIZE];
- struct smp_resp *resp = rsp;
- struct discover_resp *dr = &resp->disc;
+ struct discover_resp *dr = &disc_resp->disc;
struct sas_ha_struct *ha = dev->port->ha;
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
@@ -198,7 +198,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
BUG_ON(!phy->phy);
}
- switch (resp->result) {
+ switch (disc_resp->result) {
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
break;
@@ -347,12 +347,13 @@ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
}
#define DISCOVER_REQ_SIZE 16
-#define DISCOVER_RESP_SIZE 56
+#define DISCOVER_RESP_SIZE sizeof(struct smp_disc_resp)
static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
- u8 *disc_resp, int single)
+ struct smp_disc_resp *disc_resp,
+ int single)
{
- struct discover_resp *dr;
+ struct discover_resp *dr = &disc_resp->disc;
int res;
disc_req[9] = single;
@@ -361,7 +362,6 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
return res;
- dr = &((struct smp_resp *)disc_resp)->disc;
if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
pr_notice("Found loopback topology, just ignore it!\n");
return 0;
@@ -375,7 +375,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
struct expander_device *ex = &dev->ex_dev;
int res = 0;
u8 *disc_req;
- u8 *disc_resp;
+ struct smp_disc_resp *disc_resp;
disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
if (!disc_req)
@@ -429,27 +429,14 @@ static int sas_expander_discover(struct domain_device *dev)
#define MAX_EXPANDER_PHYS 128
-static void ex_assign_report_general(struct domain_device *dev,
- struct smp_resp *resp)
-{
- struct report_general_resp *rg = &resp->rg;
-
- dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
- dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
- dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
- dev->ex_dev.t2t_supp = rg->t2t_supp;
- dev->ex_dev.conf_route_table = rg->conf_route_table;
- dev->ex_dev.configuring = rg->configuring;
- memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
-}
-
#define RG_REQ_SIZE 8
-#define RG_RESP_SIZE 32
+#define RG_RESP_SIZE sizeof(struct smp_rg_resp)
static int sas_ex_general(struct domain_device *dev)
{
u8 *rg_req;
- struct smp_resp *rg_resp;
+ struct smp_rg_resp *rg_resp;
+ struct report_general_resp *rg;
int res;
int i;
@@ -480,7 +467,15 @@ static int sas_ex_general(struct domain_device *dev)
goto out;
}
- ex_assign_report_general(dev, rg_resp);
+ rg = &rg_resp->rg;
+ dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
+ dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
+ dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
+ dev->ex_dev.t2t_supp = rg->t2t_supp;
+ dev->ex_dev.conf_route_table = rg->conf_route_table;
+ dev->ex_dev.configuring = rg->configuring;
+ memcpy(dev->ex_dev.enclosure_logical_id,
+ rg->enclosure_logical_id, 8);
if (dev->ex_dev.configuring) {
pr_debug("RG: ex %016llx self-configuring...\n",
@@ -681,10 +676,10 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
#ifdef CONFIG_SCSI_SAS_ATA
#define RPS_REQ_SIZE 16
-#define RPS_RESP_SIZE 60
+#define RPS_RESP_SIZE sizeof(struct smp_rps_resp)
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
- struct smp_resp *rps_resp)
+ struct smp_rps_resp *rps_resp)
{
int res;
u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
@@ -1657,7 +1652,7 @@ out_err:
/* ---------- Domain revalidation ---------- */
static int sas_get_phy_discover(struct domain_device *dev,
- int phy_id, struct smp_resp *disc_resp)
+ int phy_id, struct smp_disc_resp *disc_resp)
{
int res;
u8 *disc_req;
@@ -1673,10 +1668,8 @@ static int sas_get_phy_discover(struct domain_device *dev,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
goto out;
- else if (disc_resp->result != SMP_RESP_FUNC_ACC) {
+ if (disc_resp->result != SMP_RESP_FUNC_ACC)
res = disc_resp->result;
- goto out;
- }
out:
kfree(disc_req);
return res;
@@ -1686,7 +1679,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
int phy_id, int *pcc)
{
int res;
- struct smp_resp *disc_resp;
+ struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
@@ -1704,19 +1697,17 @@ static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_device_type *type)
{
int res;
- struct smp_resp *disc_resp;
- struct discover_resp *dr;
+ struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
return -ENOMEM;
- dr = &disc_resp->disc;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
if (res == 0) {
memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
SAS_ADDR_SIZE);
- *type = to_dev_type(dr);
+ *type = to_dev_type(&disc_resp->disc);
if (*type == 0)
memset(sas_addr, 0, SAS_ADDR_SIZE);
}
@@ -1760,7 +1751,7 @@ static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
{
int res;
u8 *rg_req;
- struct smp_resp *rg_resp;
+ struct smp_rg_resp *rg_resp;
rg_req = alloc_smp_req(RG_REQ_SIZE);
if (!rg_req)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index dc35f0f8eae3..e4f77072a58d 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -531,6 +531,7 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
if (!d)
return -ENOMEM;
+ pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->reset_result = 0;
@@ -544,6 +545,7 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
if (rc == 0)
rc = d->reset_result;
mutex_unlock(&d->event_lock);
+ pm_runtime_put_sync(ha->dev);
return rc;
}
@@ -558,6 +560,7 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
if (!d)
return -ENOMEM;
+ pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->enable_result = 0;
@@ -571,6 +574,7 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
if (rc == 0)
rc = d->enable_result;
mutex_unlock(&d->event_lock);
+ pm_runtime_put_sync(ha->dev);
return rc;
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 13d0ffaada93..8d0ad3abc7b5 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -83,7 +83,7 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
int sas_ex_phy_discover(struct domain_device *dev, int single);
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
- struct smp_resp *rps_resp);
+ struct smp_rps_resp *rps_resp);
int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index da9070cdad91..e6a083d098a1 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -48,9 +48,6 @@ struct lpfc_sli2_slim;
the NameServer before giving up. */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
-#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
- cmnd for menlo needs nearly twice as for firmware
- downloads using bsg */
#define LPFC_DEFAULT_XPSGL_SIZE 256
#define LPFC_MAX_SG_TABLESIZE 0xffff
@@ -604,7 +601,6 @@ struct lpfc_vport {
#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
-#define FC_RSCN_MEMENTO 0x4000000/* RSCN cmd processed */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -987,7 +983,8 @@ struct lpfc_hba {
u8 last_seq, u8 cr_cx_cmd);
void (*__lpfc_sli_prep_abort_xri)(struct lpfc_iocbq *cmdiocbq,
u16 ulp_context, u16 iotag,
- u8 ulp_class, u16 cqid, bool ia);
+ u8 ulp_class, u16 cqid, bool ia,
+ bool wqec);
/* expedite pool */
struct lpfc_epd_pool epd_pool;
@@ -1439,8 +1436,6 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
- int wait_4_mlo_maint_flg;
- wait_queue_head_t wait_4_mlo_m_q;
/* data structure used for latency data collection */
#define LPFC_NO_BUCKET 0
#define LPFC_LINEAR_BUCKET 1
@@ -1475,8 +1470,6 @@ struct lpfc_hba {
/* RAS Support */
struct lpfc_ras_fwlog ras_fwlog;
- uint8_t menlo_flag; /* menlo generic flags */
-#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
uint32_t iocb_cnt;
uint32_t iocb_max;
atomic_t sdev_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3caaa7c4af48..09cf2cd0ae60 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -922,25 +922,6 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
- * @dev: class converted to a Scsi_host structure.
- * @attr: device attribute, not used.
- * @buf: on return contains the Menlo Maintenance sli flag.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- (phba->sli.sli_flag & LPFC_MENLO_MAINT));
-}
-
-/**
* lpfc_vportnum_show - Return the port number in ascii of the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
@@ -1109,10 +1090,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
"Unknown\n");
break;
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- len += scnprintf(buf + len, PAGE_SIZE-len,
- " Menlo Maint Mode\n");
- else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
len += scnprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
@@ -2827,7 +2805,6 @@ static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
-static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
static DEVICE_ATTR_RO(lpfc_drvr_version);
static DEVICE_ATTR_RO(lpfc_enable_fip);
@@ -6220,7 +6197,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_option_rom_version.attr,
&dev_attr_link_state.attr,
&dev_attr_num_discovered_ports.attr,
- &dev_attr_menlo_mgmt_mode.attr,
&dev_attr_lpfc_drvr_version.attr,
&dev_attr_lpfc_enable_fip.attr,
&dev_attr_lpfc_temp_sensor.attr,
@@ -7396,7 +7372,6 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
case PCI_DEVICE_ID_LANCER_FCOE:
case PCI_DEVICE_ID_LANCER_FCOE_VF:
case PCI_DEVICE_ID_ZEPHYR_DCSP:
- case PCI_DEVICE_ID_HORNET:
case PCI_DEVICE_ID_TIGERSHARK:
case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 676e7d54b97a..9be3bb01a8ec 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -88,17 +88,9 @@ struct lpfc_bsg_mbox {
uint32_t outExtWLen; /* from app */
};
-#define MENLO_DID 0x0000FC0E
-
-struct lpfc_bsg_menlo {
- struct lpfc_iocbq *cmdiocbq;
- struct lpfc_dmabuf *rmp;
-};
-
#define TYPE_EVT 1
#define TYPE_IOCB 2
#define TYPE_MBOX 3
-#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
struct bsg_job *set_job; /* job waiting for this iocb to finish */
@@ -106,7 +98,6 @@ struct bsg_job_data {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
struct lpfc_bsg_mbox mbox;
- struct lpfc_bsg_menlo menlo;
} context_un;
};
@@ -3502,15 +3493,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
"1226 mbox: set_variable 0x%x, 0x%x\n",
mb->un.varWords[0],
mb->un.varWords[1]);
- if ((mb->un.varWords[0] == SETVAR_MLOMNT)
- && (mb->un.varWords[1] == 1)) {
- phba->wait_4_mlo_maint_flg = 1;
- } else if (mb->un.varWords[0] == SETVAR_MLORST) {
- spin_lock_irq(&phba->hbalock);
- phba->link_flag &= ~LS_LOOPBACK_MODE;
- spin_unlock_irq(&phba->hbalock);
- phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
- }
break;
case MBX_READ_SPARM64:
case MBX_REG_LOGIN:
@@ -4992,283 +4974,6 @@ lpfc_bsg_mbox_cmd(struct bsg_job *job)
return rc;
}
-/**
- * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
- * @phba: Pointer to HBA context object.
- * @cmdiocbq: Pointer to command iocb.
- * @rspiocbq: Pointer to response iocb.
- *
- * This function is the completion handler for iocbs issued using
- * lpfc_menlo_cmd function. This function is called by the
- * ring event handler function without any lock held. This function
- * can be called from both worker thread context and interrupt
- * context. This function also can be called from another thread which
- * cleans up the SLI layer objects.
- * This function copies the contents of the response iocb to the
- * response iocb memory object provided by the caller of
- * lpfc_sli_issue_iocb_wait and then wakes up the thread which
- * sleeps for the iocb completion.
- **/
-static void
-lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
- struct lpfc_iocbq *cmdiocbq,
- struct lpfc_iocbq *rspiocbq)
-{
- struct bsg_job_data *dd_data;
- struct bsg_job *job;
- struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
- struct lpfc_dmabuf *bmp, *cmp, *rmp;
- struct lpfc_bsg_menlo *menlo;
- unsigned long flags;
- struct menlo_response *menlo_resp;
- unsigned int rsp_size;
- int rc = 0;
-
- dd_data = cmdiocbq->context_un.dd_data;
- cmp = cmdiocbq->cmd_dmabuf;
- bmp = cmdiocbq->bpl_dmabuf;
- menlo = &dd_data->context_un.menlo;
- rmp = menlo->rmp;
- rsp = &rspiocbq->iocb;
-
- /* Determine if job has been aborted */
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
- job = dd_data->set_job;
- if (job) {
- bsg_reply = job->reply;
- /* Prevent timeout handling from trying to abort job */
- job->dd_data = NULL;
- }
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- /* Copy the job data or set the failing status for the job */
-
- if (job) {
- /* always return the xri, this would be used in the case
- * of a menlo download to allow the data to be sent as a
- * continuation of the exchange.
- */
-
- menlo_resp = (struct menlo_response *)
- bsg_reply->reply_data.vendor_reply.vendor_rsp;
- menlo_resp->xri = rsp->ulpContext;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
- rc = -EACCES;
- break;
- }
- } else {
- rc = -EACCES;
- }
- } else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
- bsg_reply->reply_payload_rcv_len =
- lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
- }
-
- }
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
- lpfc_free_bsg_buffers(phba, cmp);
- lpfc_free_bsg_buffers(phba, rmp);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
- kfree(dd_data);
-
- /* Complete the job if active */
-
- if (job) {
- bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
- }
-
- return;
-}
-
-/**
- * lpfc_menlo_cmd - send an ioctl for menlo hardware
- * @job: fc_bsg_job to handle
- *
- * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
- * all the command completions will return the xri for the command.
- * For menlo data requests a gen request 64 CX is used to continue the exchange
- * supplied in the menlo request header xri field.
- **/
-static int
-lpfc_menlo_cmd(struct bsg_job *job)
-{
- struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
- struct fc_bsg_request *bsg_request = job->request;
- struct fc_bsg_reply *bsg_reply = job->reply;
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *cmdiocbq;
- IOCB_t *cmd;
- int rc = 0;
- struct menlo_command *menlo_cmd;
- struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
- struct bsg_job_data *dd_data;
- struct ulp_bde64 *bpl = NULL;
-
- /* in case no data is returned return just the return code */
- bsg_reply->reply_payload_rcv_len = 0;
-
- if (job->request_len <
- sizeof(struct fc_bsg_request) +
- sizeof(struct menlo_command)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2784 Received MENLO_CMD request below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (job->reply_len < sizeof(*bsg_reply) +
- sizeof(struct menlo_response)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2785 Received MENLO_CMD reply below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2786 Adapter does not support menlo "
- "commands\n");
- rc = -EPERM;
- goto no_dd_data;
- }
-
- menlo_cmd = (struct menlo_command *)
- bsg_request->rqst_data.h_vendor.vendor_cmd;
-
- /* allocate our bsg tracking structure */
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2787 Failed allocation of dd_data\n");
- rc = -ENOMEM;
- goto no_dd_data;
- }
-
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp) {
- rc = -ENOMEM;
- goto free_dd;
- }
-
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
- if (!bmp->virt) {
- rc = -ENOMEM;
- goto free_bmp;
- }
-
- INIT_LIST_HEAD(&bmp->list);
-
- bpl = (struct ulp_bde64 *)bmp->virt;
- request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
- cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
- 1, bpl, &request_nseg);
- if (!cmp) {
- rc = -ENOMEM;
- goto free_bmp;
- }
- lpfc_bsg_copy_data(cmp, &job->request_payload,
- job->request_payload.payload_len, 1);
-
- bpl += request_nseg;
- reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
- rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
- bpl, &reply_nseg);
- if (!rmp) {
- rc = -ENOMEM;
- goto free_cmp;
- }
-
- cmdiocbq = lpfc_sli_get_iocbq(phba);
- if (!cmdiocbq) {
- rc = -ENOMEM;
- goto free_rmp;
- }
-
- cmd = &cmdiocbq->iocb;
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
- cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
- cmd->ulpBdeCount = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpOwner = OWN_CHIP;
- cmd->ulpLe = 1; /* Limited Edition */
- cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->vport = phba->pport;
- /* We want the firmware to timeout before we do */
- cmd->ulpTimeout = MENLO_TIMEOUT - 5;
- cmdiocbq->cmd_cmpl = lpfc_bsg_menlo_cmd_cmp;
- cmdiocbq->context_un.dd_data = dd_data;
- cmdiocbq->cmd_dmabuf = cmp;
- cmdiocbq->bpl_dmabuf = bmp;
- if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->ulpPU = MENLO_PU; /* 3 */
- cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
- cmd->ulpContext = MENLO_CONTEXT; /* 0 */
- } else {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
- cmd->ulpPU = 1;
- cmd->un.ulpWord[4] = 0;
- cmd->ulpContext = menlo_cmd->xri;
- }
-
- dd_data->type = TYPE_MENLO;
- dd_data->set_job = job;
- dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
- dd_data->context_un.menlo.rmp = rmp;
- job->dd_data = dd_data;
-
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
- MENLO_TIMEOUT - 5);
- if (rc == IOCB_SUCCESS)
- return 0; /* done for now */
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
-
-free_rmp:
- lpfc_free_bsg_buffers(phba, rmp);
-free_cmp:
- lpfc_free_bsg_buffers(phba, cmp);
-free_bmp:
- if (bmp->virt)
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
-free_dd:
- kfree(dd_data);
-no_dd_data:
- /* make error code available to userspace */
- bsg_reply->result = rc;
- job->dd_data = NULL;
- return rc;
-}
-
static int
lpfc_forced_link_speed(struct bsg_job *job)
{
@@ -5823,10 +5528,6 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_MBOX:
rc = lpfc_bsg_mbox_cmd(job);
break;
- case LPFC_BSG_VENDOR_MENLO_CMD:
- case LPFC_BSG_VENDOR_MENLO_DATA:
- rc = lpfc_menlo_cmd(job);
- break;
case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
rc = lpfc_forced_link_speed(job);
break;
@@ -5979,31 +5680,6 @@ lpfc_bsg_timeout(struct bsg_job *job)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
- case TYPE_MENLO:
- /* Check to see if IOCB was issued to the port or not. If not,
- * remove it from the txq queue and call cancel iocbs.
- * Otherwise, call abort iotag.
- */
- cmdiocb = dd_data->context_un.menlo.cmdiocbq;
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- spin_lock_irqsave(&phba->hbalock, flags);
- list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
- list) {
- if (check_iocb == cmdiocb) {
- list_move_tail(&check_iocb->list, &completions);
- break;
- }
- }
- if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- if (!list_empty(&completions)) {
- lpfc_sli_cancel_iocbs(phba, &completions,
- IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
- }
- break;
default:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 749d6c43cfce..3c04ca2d7455 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -33,8 +33,6 @@
#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
@@ -131,16 +129,6 @@ struct dfc_mbox_req {
uint32_t extSeqNum;
};
-/* Used for menlo command or menlo data. The xri is only used for menlo data */
-struct menlo_command {
- uint32_t cmd;
- uint32_t xri;
-};
-
-struct menlo_response {
- uint32_t xri; /* return the xri of the iocb exchange */
-};
-
/*
* macros and data structures for handling sli-config mailbox command
* pass-through support, this header file is shared between user and
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f5d74958b664..bcad91204328 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -370,7 +370,7 @@ void lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba,
u8 cr_cx_cmd);
void lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
- bool ia);
+ bool ia, bool wqec);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 7b24c932e812..5037ea09a810 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2607,8 +2607,8 @@ lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_multixri_pool *multixri_pool;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2688,8 +2688,8 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
if (!phba->targetport)
return -ENXIO;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2826,8 +2826,8 @@ lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 63)
- nbytes = 63;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -3060,8 +3060,8 @@ lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
char *pbuf;
int i;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 3fababb7c181..9e69de9eb992 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1790,18 +1790,20 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Move this back to NPR state */
if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
- /* The new_ndlp is replacing ndlp totally, so we need
- * to put ndlp on UNUSED list and try to free it.
+ /* The ndlp doesn't have a portname yet, but does have an
+ * NPort ID. The new_ndlp portname matches the Rport's
+ * portname. Reinstantiate the new_ndlp and reset the ndlp.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3179 PLOGI confirm NEW: %x %x\n",
new_ndlp->nlp_DID, keepDID);
/* Two ndlps cannot have the same did on the nodelist.
- * Note: for this case, ndlp has a NULL WWPN so setting
- * the nlp_fc4_type isn't required.
+ * The KeepDID and keep_nlp_fc4_type need to be swapped
+ * because ndlp is inflight with no WWPN.
*/
ndlp->nlp_DID = keepDID;
+ ndlp->nlp_fc4_type = keep_nlp_fc4_type;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
@@ -1816,9 +1818,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
lpfc_unreg_rpi(vport, ndlp);
- /* Two ndlps cannot have the same did and the fc4
- * type must be transferred because the ndlp is in
- * flight.
+ /* The ndlp and new_ndlp both have WWPNs but are swapping
+ * NPort Ids and attributes.
*/
ndlp->nlp_DID = keepDID;
ndlp->nlp_fc4_type = keep_nlp_fc4_type;
@@ -1886,7 +1887,6 @@ lpfc_end_rscn(struct lpfc_vport *vport)
else {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_RSCN_MODE;
- vport->fc_flag |= FC_RSCN_MEMENTO;
spin_unlock_irq(shost->host_lock);
}
}
@@ -2434,14 +2434,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
u32 local_nlp_type, elscmd;
/*
- * If discovery was kicked off from RSCN mode,
- * the FC4 types supported from a
+ * If we are in RSCN mode, the FC4 types supported from a
* previous GFT_ID command may not be accurate. So, if we
* are a NVME Initiator, always look for the possibility of
* the remote NPort beng a NVME Target.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- vport->fc_flag & (FC_RSCN_MODE | FC_RSCN_MEMENTO) &&
+ vport->fc_flag & FC_RSCN_MODE &&
vport->nvmei_support)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
@@ -4571,15 +4570,6 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOSTAT_LOCAL_REJECT:
switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_LOOP_OPEN_FAILURE:
- if (cmd == ELS_CMD_FLOGI) {
- if (PCI_DEVICE_ID_HORNET ==
- phba->pcidev->device) {
- phba->fc_topology = LPFC_TOPOLOGY_LOOP;
- phba->pport->fc_myDID = 0;
- phba->alpa_map[0] = 0;
- phba->alpa_map[1] = 0;
- }
- }
if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
delay = 1000;
retry = 1;
@@ -7915,7 +7905,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
vport->fc_flag |= FC_RSCN_MODE;
- vport->fc_flag &= ~FC_RSCN_MEMENTO;
spin_unlock_irq(shost->host_lock);
if (rscn_cnt) {
cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
@@ -7965,7 +7954,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_MODE;
- vport->fc_flag &= ~FC_RSCN_MEMENTO;
spin_unlock_irq(shost->host_lock);
vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
/* Indicate we are done walking fc_rscn_id_list on this vport */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index fb36f26170e4..2645def612e6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1354,8 +1354,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MEMENTO | FC_RSCN_MODE |
- FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
@@ -3763,18 +3762,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
- if (phba->sli_rev < LPFC_SLI_REV4) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (bf_get(lpfc_mbx_read_top_mm, la))
- phba->sli.sli_flag |= LPFC_MENLO_MAINT;
- else
- phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
phba->link_events++;
- if ((attn_type == LPFC_ATT_LINK_UP) &&
- !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
+ if (attn_type == LPFC_ATT_LINK_UP) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3788,15 +3777,13 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1303 Link Up Event x%x received "
- "Data: x%x x%x x%x x%x x%x x%x %d\n",
+ "Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
bf_get(lpfc_mbx_read_top_alpa_granted,
la),
bf_get(lpfc_mbx_read_top_link_spd, la),
phba->alpa_map[0],
- bf_get(lpfc_mbx_read_top_mm, la),
- bf_get(lpfc_mbx_read_top_fa, la),
- phba->wait_4_mlo_maint_flg);
+ bf_get(lpfc_mbx_read_top_fa, la));
}
lpfc_mbx_process_link_up(phba, la);
@@ -3816,58 +3803,25 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1313 Link Down Unexpected FA WWPN Event x%x "
- "received Data: x%x x%x x%x x%x x%x\n",
+ "received Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
lpfc_mbx_issue_link_down(phba);
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
- attn_type == LPFC_ATT_LINK_UP) {
- if (phba->link_state != LPFC_LINK_DOWN) {
- phba->fc_stat.LinkDown++;
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1312 Link Down Event x%x received "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- lpfc_mbx_issue_link_down(phba);
- } else
- lpfc_enable_la(phba);
-
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1310 Menlo Maint Mode Link up Event x%x rcvd "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- /*
- * The cmnd that triggered this will be waiting for this
- * signal.
- */
- /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
- if (phba->wait_4_mlo_maint_flg) {
- phba->wait_4_mlo_maint_flg = 0;
- wake_up_interruptible(&phba->wait_4_mlo_m_q);
- }
- }
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- bf_get(lpfc_mbx_read_top_fa, la)) {
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- lpfc_issue_clear_la(phba, vport);
+ bf_get(lpfc_mbx_read_top_fa, la))
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"1311 fa %d\n",
bf_get(lpfc_mbx_read_top_fa, la));
- }
lpfc_mbx_cmpl_read_topology_free_mbuf:
lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7b8cf678abb5..071983e2cdfe 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1728,7 +1728,6 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
#define PCI_DEVICE_ID_ZEPHYR 0xfe00
-#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
@@ -1773,7 +1772,6 @@ struct lpfc_fdmi_reg_portattr {
#define ZEPHYR_JEDEC_ID 0x0577
#define VIPER_JEDEC_ID 0x4838
#define SATURN_JEDEC_ID 0x1004
-#define HORNET_JDEC_ID 0x2057706D
#define JEDEC_ID_MASK 0x0FFFF000
#define JEDEC_ID_SHIFT 12
@@ -3074,7 +3072,6 @@ struct lpfc_mbx_read_top {
#define lpfc_mbx_read_top_topology_WORD word3
#define LPFC_TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
#define LPFC_TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
-#define LPFC_TOPOLOGY_MM 0x05 /* maint mode zephtr to menlo */
/* store the LILP AL_PA position map into */
struct ulp_bde64 lilpBde64;
#define LPFC_ALPA_MAP_SIZE 128
@@ -4423,11 +4420,4 @@ lpfc_error_lost_link(u32 ulp_status, u32 ulp_word4)
ulp_word4 == IOERR_SLI_DOWN));
}
-#define MENLO_TRANSPORT_TYPE 0xfe
-#define MENLO_CONTEXT 0
-#define MENLO_PU 3
-#define MENLO_TIMEOUT 30
-#define SETVAR_MLOMNT 0x103107
-#define SETVAR_MLORST 0x103007
-
#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f024415731ac..4527fef23ae7 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4736,7 +4736,6 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
-#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index a1b9be245560..0b1616e93cf4 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -60,8 +60,6 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
- PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 750dd1e9f2cc..c69c5a0979ec 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -375,6 +375,9 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4 &&
vport->port_type == LPFC_PHYSICAL_PORT &&
phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
+ if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
+ phba->sli4_hba.fawwpn_flag &=
+ ~LPFC_FAWWPN_FABRIC;
lpfc_printf_log(phba, KERN_INFO,
LOG_SLI | LOG_DISCOVERY | LOG_ELS,
"2701 FA-PWWN change WWPN from %llx to "
@@ -2682,11 +2685,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_SAT_S:
m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
break;
- case PCI_DEVICE_ID_HORNET:
- m = (typeof(m)){"LP21000", "PCIe",
- "Obsolete, Unsupported FCoE Adapter"};
- GE = 1;
- break;
case PCI_DEVICE_ID_PROTEUS_VF:
m = (typeof(m)){"LPev12000", "PCIe IOV",
"Obsolete, Unsupported Fibre Channel Adapter"};
@@ -7692,7 +7690,6 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->port_list);
INIT_LIST_HEAD(&phba->work_list);
- init_waitqueue_head(&phba->wait_4_mlo_m_q);
/* Initialize the wait queue head for the kernel thread */
init_waitqueue_head(&phba->work_waitq);
@@ -7776,13 +7773,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (rc)
return -ENODEV;
- if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
- phba->menlo_flag |= HBA_MENLO_SUPPORT;
- /* check for menlo minimum sg count */
- if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
- phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
- }
-
if (!phba->sli.sli3_ring)
phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
sizeof(struct lpfc_sli_ring),
@@ -7958,6 +7948,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* The lpfc_wq workqueue for deferred irq use */
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ if (!phba->wq)
+ return -ENOMEM;
/*
* Initialize timers used by driver
@@ -9975,7 +9967,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
"configured on\n");
phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
} else {
- phba->sli4_hba.fawwpn_flag = 0;
+ /* Clear FW configured flag, preserve driver flag */
+ phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
}
phba->sli4_hba.conf_trunk =
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index cd10ee6482fc..152245f7cacc 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2824,6 +2824,7 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
wcqep->word0 = 0;
bf_set(lpfc_wcqe_c_status, wcqep, stat);
wcqep->parameter = param;
+ wcqep->total_data_placed = 0;
wcqep->word3 = 0; /* xb is 0 */
/* Call release with XB=1 to queue the IO into the abort list. */
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index c0ee0b39075d..f7cfac0da9b6 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -722,7 +722,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *rsp;
struct lpfc_async_xchg_ctx *ctxp;
- uint32_t status, result, op, start_clean, logerr;
+ uint32_t status, result, op, logerr;
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int id;
@@ -820,9 +820,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
} else {
ctxp->entry_cnt++;
- start_clean = offsetof(struct lpfc_iocbq, cmd_flag);
- memset(((char *)cmdwqe) + start_clean, 0,
- (sizeof(struct lpfc_iocbq) - start_clean));
+ memset_startat(cmdwqe, 0, cmd_flag);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
ctxp->ts_isr_data = cmdwqe->isr_timestamp;
@@ -3337,46 +3335,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
return 1;
}
-/**
- * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
- * @pwqeq: Pointer to command iocb.
- * @xritag: Tag that uniqely identifies the local exchange resource.
- * @opt: Option bits -
- * bit 0 = inhibit sending abts on the link
- *
- * This function is called with hbalock held.
- **/
-static void
-lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
-{
- union lpfc_wqe128 *wqe = &pwqeq->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(wqe, 0, sizeof(*wqe));
-
- if (opt & INHIBIT_ABORT)
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- /* Abort specified xri tag, with the mask deliberately zeroed */
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* Abort the I/O associated with this outstanding exchange ID. */
- wqe->abort_cmd.wqe_com.abort_tag = xritag;
-
- /* iotag for the wqe completion. */
- bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
-
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-}
-
static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_async_xchg_ctx *ctxp,
@@ -3386,7 +3344,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_iocbq *abts_wqeq;
struct lpfc_nodelist *ndlp;
unsigned long flags;
- u8 opt;
+ bool ia;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3426,7 +3384,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
abts_wqeq = ctxp->abort_wqeq;
ctxp->state = LPFC_NVME_STE_ABORT;
- opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
+ ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3472,7 +3430,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */
abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
- lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
+ lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag,
+ abts_wqeq->iotag, CLASS3,
+ LPFC_WQE_CQ_ID_DEFAULT, ia, true);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index ba5e4016262e..084c0f9fdc3a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5456,7 +5456,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
cur_iocbq->cmd_flag |= LPFC_IO_VMID;
}
}
- atomic_inc(&ndlp->cmd_pending);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 80ac3a051c19..608016725db9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2003,10 +2003,12 @@ initpath:
sync_buf->cmd_flag |= LPFC_IO_CMF;
ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
- if (ret_val)
+ if (ret_val) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6214 Cannot issue CMF_SYNC_WQE: x%x\n",
ret_val);
+ __lpfc_sli_release_iocbq(phba, sync_buf);
+ }
out_unlock:
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret_val;
@@ -5263,7 +5265,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
phba->pport->stopped = 0;
phba->link_state = LPFC_INIT_START;
phba->hba_flag = 0;
- phba->sli4_hba.fawwpn_flag = 0;
+ /* Preserve FA-PWWN expectation */
+ phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -6052,6 +6055,10 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
/* obtain link type and link number via READ_CONFIG */
phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
lpfc_sli4_read_config(phba);
+
+ if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+
if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
goto retrieve_ppname;
@@ -10216,16 +10223,6 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
* can be issued if the link is not up.
*/
switch (piocb->iocb.ulpCommand) {
- case CMD_GEN_REQUEST64_CR:
- case CMD_GEN_REQUEST64_CX:
- if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
- FC_RCTL_DD_UNSOL_CMD) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Type !=
- MENLO_TRANSPORT_TYPE))
-
- goto iocb_busy;
- break;
case CMD_QUE_RING_BUF_CN:
case CMD_QUE_RING_BUF64_CN:
/*
@@ -10549,6 +10546,7 @@ __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+ cmd->ulpPU = PARM_NPIV_DID;
}
cmd->ulpBdeCount = 1;
cmd->ulpLe = 1;
@@ -10855,7 +10853,8 @@ lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
static void
__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
- u16 iotag, u8 ulp_class, u16 cqid, bool ia)
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
{
IOCB_t *icmd = NULL;
@@ -10884,7 +10883,8 @@ __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
static void
__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
- u16 iotag, u8 ulp_class, u16 cqid, bool ia)
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
{
union lpfc_wqe128 *wqe;
@@ -10911,6 +10911,8 @@ __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
/* Word 11 */
+ if (wqec)
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
}
@@ -10918,10 +10920,10 @@ __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
void
lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
- bool ia)
+ bool ia, bool wqec)
{
phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
- cqid, ia);
+ cqid, ia, wqec);
}
/**
@@ -12199,7 +12201,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
cmdiocb->iocb.ulpClass,
- LPFC_WQE_CQ_ID_DEFAULT, ia);
+ LPFC_WQE_CQ_ID_DEFAULT, ia, false);
abtsiocbp->vport = vport;
@@ -12659,7 +12661,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
iocbq->iocb.ulpClass, cqid,
- ia);
+ ia, false);
abtsiocbq->vport = vport;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 0af6860b8936..cd33dfec758c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -355,7 +355,6 @@ struct lpfc_sli {
#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
-#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 2ab6f7db64d8..63eba9928e4b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.4"
+#define LPFC_DRIVER_VERSION "14.2.0.5"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 2a339d4a7e9d..157c3bdb50be 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -181,7 +181,7 @@ MODULE_PARM_DESC(cmd_per_lun,
* This would result in non-disk devices being skipped during driver load
* time. These can be later added though, using /proc/scsi/scsi
*/
-static unsigned int megaraid_fast_load = 0;
+static unsigned int megaraid_fast_load;
module_param_named(fast_load, megaraid_fast_load, int, 0);
MODULE_PARM_DESC(fast_load,
"Faster loading of the driver, skips physical devices! (default=0)");
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0917b05059b4..f6c37a97544e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3950,9 +3950,9 @@ process_fw_state_change_wq(struct work_struct *work)
u32 wait;
unsigned long flags;
- if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
- atomic_read(&instance->adprecovery));
+ atomic_read(&instance->adprecovery));
return ;
}
@@ -7153,22 +7153,18 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
switch (instance->adapter_type) {
case MFI_SERIES:
if (megasas_alloc_mfi_ctrl_mem(instance))
- goto fail;
+ return -ENOMEM;
break;
case AERO_SERIES:
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
if (megasas_alloc_fusion_context(instance))
- goto fail;
+ return -ENOMEM;
break;
}
return 0;
- fail:
- kfree(instance->reply_map);
- instance->reply_map = NULL;
- return -ENOMEM;
}
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 5b5885d9732b..09c5fe37754c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3199,7 +3199,6 @@ megasas_build_io_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd)
{
int sge_count;
- u8 cmd_type;
u16 pd_index = 0;
u8 drive_type = 0;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
@@ -3225,7 +3224,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
*/
io_request->IoFlags = cpu_to_le16(scp->cmd_len);
- switch (cmd_type = megasas_cmd_type(scp)) {
+ switch (megasas_cmd_type(scp)) {
case READ_WRITE_LDIO:
megasas_build_ldio_fusion(instance, scp, cmd);
break;
@@ -5311,7 +5310,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
if (!fusion->log_to_span) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- kfree(instance->ctrl_context);
return -ENOMEM;
}
}
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 322d3ad38159..84b541a57b7b 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -38,7 +38,7 @@
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/processor.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/pmac_feature.h>
#include <asm/macio.h>
@@ -1882,11 +1882,6 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
goto out_release;
}
- /* Old junk for root discovery, that will die ultimately */
-#if !defined(MODULE)
- note_scsi_host(mesh, mesh_host);
-#endif
-
mesh_host->base = macio_resource_start(mdev, 0);
mesh_host->irq = macio_irq(mdev, 0);
ms = (struct mesh_state *) mesh_host->hostdata;
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0e1cb4aa4ca2..0935b2e80662 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -66,12 +66,14 @@ extern atomic64_t event_counter;
#define MPI3MR_NAME_LENGTH 32
#define IOCNAME "%s: "
+#define MPI3MR_MAX_SECTORS 2048
+
/* Definitions for internal SGL and Chain SGL buffers */
#define MPI3MR_PAGE_SIZE_4K 4096
#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
/* Definitions for MAX values for shost */
-#define MPI3MR_MAX_CMDS_LUN 7
+#define MPI3MR_MAX_CMDS_LUN 128
#define MPI3MR_MAX_CDB_LENGTH 32
/* Admin queue management definitions */
@@ -333,6 +335,12 @@ struct mpi3mr_ioc_facts {
u8 sge_mod_mask;
u8 sge_mod_value;
u8 sge_mod_shift;
+ u8 max_dev_per_tg;
+ u16 max_io_throttle_group;
+ u16 io_throttle_data_length;
+ u16 io_throttle_low;
+ u16 io_throttle_high;
+
};
/**
@@ -425,6 +433,31 @@ struct mpi3mr_intr_info {
};
/**
+ * struct mpi3mr_throttle_group_info - Throttle group info
+ *
+ * @io_divert: Flag indicates io divert is on or off for the TG
+ * @need_qd_reduction: Flag to indicate QD reduction is needed
+ * @qd_reduction: Queue Depth reduction in units of 10%
+ * @fw_qd: QueueDepth value reported by the firmware
+ * @modified_qd: Modified QueueDepth value due to throttling
+ * @id: Throttle Group ID.
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @pend_large_data_sz: Counter to track pending large data
+ */
+struct mpi3mr_throttle_group_info {
+ u8 io_divert;
+ u8 need_qd_reduction;
+ u8 qd_reduction;
+ u16 fw_qd;
+ u16 modified_qd;
+ u16 id;
+ u32 high;
+ u32 low;
+ atomic_t pend_large_data_sz;
+};
+
+/**
* struct tgt_dev_sas_sata - SAS/SATA device specific
* information cached from firmware given data
*
@@ -457,22 +490,33 @@ struct tgt_dev_pcie {
};
/**
- * struct tgt_dev_volume - virtual device specific information
+ * struct tgt_dev_vd - virtual device specific information
* cached from firmware given data
*
* @state: State of the VD
+ * @tg_qd_reduction: Queue Depth reduction in units of 10%
+ * @tg_id: VDs throttle group ID
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @tg: Pointer to throttle group info
*/
-struct tgt_dev_volume {
+struct tgt_dev_vd {
u8 state;
+ u8 tg_qd_reduction;
+ u16 tg_id;
+ u32 tg_high;
+ u32 tg_low;
+ struct mpi3mr_throttle_group_info *tg;
};
+
/**
* union _form_spec_inf - union of device specific information
*/
union _form_spec_inf {
struct tgt_dev_sas_sata sas_sata_inf;
struct tgt_dev_pcie pcie_inf;
- struct tgt_dev_volume vol_inf;
+ struct tgt_dev_vd vd_inf;
};
@@ -490,6 +534,7 @@ union _form_spec_inf {
* @dev_type: SAS/SATA/PCIE device type
* @is_hidden: Should be exposed to upper layers or not
* @host_exposed: Already exposed to host or not
+ * @io_throttle_enabled: I/O throttling needed or not
* @q_depth: Device specific Queue Depth
* @wwid: World wide ID
* @dev_spec: Device type specific information
@@ -506,6 +551,7 @@ struct mpi3mr_tgt_dev {
u8 dev_type;
u8 is_hidden;
u8 host_exposed;
+ u8 io_throttle_enabled;
u16 q_depth;
u64 wwid;
union _form_spec_inf dev_spec;
@@ -557,6 +603,9 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
* @dev_removed: Device removed in the Firmware
* @dev_removedelay: Device is waiting to be removed in FW
* @dev_type: Device type
+ * @io_throttle_enabled: I/O throttling needed or not
+ * @io_divert: Flag indicates io divert is on or off for the dev
+ * @throttle_group: Pointer to throttle group info
* @tgt_dev: Internal target device pointer
* @pend_count: Counter to track pending I/Os during error
* handling
@@ -570,6 +619,9 @@ struct mpi3mr_stgt_priv_data {
u8 dev_removed;
u8 dev_removedelay;
u8 dev_type;
+ u8 io_throttle_enabled;
+ u8 io_divert;
+ struct mpi3mr_throttle_group_info *throttle_group;
struct mpi3mr_tgt_dev *tgt_dev;
u32 pend_count;
};
@@ -796,6 +848,12 @@ struct scmd_priv {
* @logdata_buf: Circular buffer to store log data entries
* @logdata_buf_idx: Index of entry in buffer to store
* @logdata_entry_sz: log data entry size
+ * @pend_large_data_sz: Counter to track pending large data
+ * @io_throttle_data_length: I/O size to track in 512b blocks
+ * @io_throttle_high: I/O size to start throttle in 512b blocks
+ * @io_throttle_low: I/O size to stop throttle in 512b blocks
+ * @num_io_throttle_group: Maximum number of throttle groups
+ * @throttle_groups: Pointer to throttle group info structures
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -960,6 +1018,13 @@ struct mpi3mr_ioc {
u8 *logdata_buf;
u16 logdata_buf_idx;
u16 logdata_entry_sz;
+
+ atomic_t pend_large_data_sz;
+ u32 io_throttle_data_length;
+ u32 io_throttle_high;
+ u32 io_throttle_low;
+ u16 num_io_throttle_group;
+ struct mpi3mr_throttle_group_info *throttle_groups;
};
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index f1d4ea8ba989..0866dfd43318 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -2785,6 +2785,27 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.shutdown_timeout =
le16_to_cpu(facts_data->shutdown_timeout);
+ mrioc->facts.max_dev_per_tg =
+ facts_data->max_devices_per_throttle_group;
+ mrioc->facts.io_throttle_data_length =
+ le16_to_cpu(facts_data->io_throttle_data_length);
+ mrioc->facts.max_io_throttle_group =
+ le16_to_cpu(facts_data->max_io_throttle_group);
+ mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
+ mrioc->facts.io_throttle_high =
+ le16_to_cpu(facts_data->io_throttle_high);
+
+ /* Store in 512b block count */
+ if (mrioc->facts.io_throttle_data_length)
+ mrioc->io_throttle_data_length =
+ (mrioc->facts.io_throttle_data_length * 2 * 4);
+ else
+ /* set the length to 1MB + 1K to disable throttle */
+ mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
+
+ mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
+ mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
+
ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
@@ -2798,6 +2819,13 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
mrioc->facts.dma_mask, (facts_flags &
MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
+ ioc_info(mrioc,
+ "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
+ mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
+ ioc_info(mrioc,
+ "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
+ mrioc->facts.io_throttle_data_length * 4,
+ mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
}
/**
@@ -3666,6 +3694,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
int retval = 0;
u8 retry = 0;
struct mpi3_ioc_facts_data facts_data;
+ u32 sz;
retry_init:
retval = mpi3mr_bring_ioc_ready(mrioc);
@@ -3691,6 +3720,9 @@ retry_init:
mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
+ mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+
if (reset_devices)
mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
MPI3MR_HOST_IOS_KDUMP);
@@ -3760,6 +3792,15 @@ retry_init:
}
}
+ if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
+ dprint_init(mrioc, "allocating memory for throttle groups\n");
+ sz = sizeof(struct mpi3mr_throttle_group_info);
+ mrioc->throttle_groups = (struct mpi3mr_throttle_group_info *)
+ kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+ if (!mrioc->throttle_groups)
+ goto out_failed_noretry;
+ }
+
retval = mpi3mr_enable_events(mrioc);
if (retval) {
ioc_err(mrioc, "failed to enable events %d\n",
@@ -3981,6 +4022,7 @@ static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
{
u16 i;
+ struct mpi3mr_throttle_group_info *tg;
mrioc->change_count = 0;
mrioc->active_poll_qcount = 0;
@@ -4029,6 +4071,22 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
spin_lock_init(&mrioc->req_qinfo[i].q_lock);
mpi3mr_memset_op_req_q_buffers(mrioc, i);
}
+
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+ if (mrioc->throttle_groups) {
+ tg = mrioc->throttle_groups;
+ for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
+ tg->id = 0;
+ tg->fw_qd = 0;
+ tg->modified_qd = 0;
+ tg->io_divert = 0;
+ tg->need_qd_reduction = 0;
+ tg->high = 0;
+ tg->low = 0;
+ tg->qd_reduction = 0;
+ atomic_set(&tg->pend_large_data_sz, 0);
+ }
+ }
}
/**
@@ -4663,6 +4721,15 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
goto out;
}
+ if (mrioc->num_io_throttle_group !=
+ mrioc->facts.max_io_throttle_group) {
+ ioc_err(mrioc,
+ "max io throttle group doesn't match old(%d), new(%d)\n",
+ mrioc->num_io_throttle_group,
+ mrioc->facts.max_io_throttle_group);
+ retval = -EPERM;
+ goto out;
+ }
mpi3mr_flush_delayed_cmd_lists(mrioc);
mpi3mr_flush_drv_cmds(mrioc);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 59a18769a4fe..bfa1165e23b6 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -38,6 +38,8 @@ MODULE_PARM_DESC(logging_level,
static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
+#define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
+
/**
* mpi3mr_host_tag_for_scmd - Get host tag for a scmd
* @mrioc: Adapter instance reference
@@ -355,6 +357,50 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
}
/**
+ * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to queue on synthetically generated driver event to
+ * the event worker thread, the driver event will be used to
+ * reduce the QD of all VDs in the TG from the worker thread.
+ *
+ * Return: None.
+ */
+static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ struct mpi3mr_fwevt *fwevt;
+ u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
+
+ /*
+ * If the QD reduction event is already queued due to throttle and if
+ * the QD is not restored through device info change event
+ * then dont queue further reduction events
+ */
+ if (tg->fw_qd != tg->modified_qd)
+ return;
+
+ fwevt = mpi3mr_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
+ return;
+ }
+ *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = sz;
+ tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
+
+ dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
+ tg->id);
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+}
+
+/**
* mpi3mr_invalidate_devhandles -Invalidate device handles
* @mrioc: Adapter instance reference
*
@@ -373,6 +419,9 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
if (tgtdev->starget && tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ tgt_priv->io_throttle_enabled = 0;
+ tgt_priv->io_divert = 0;
+ tgt_priv->throttle_group = NULL;
}
}
}
@@ -710,6 +759,35 @@ static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
}
/**
+ * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ * @divert_value: 1 or 0
+ *
+ * Accessor to set io_divert flag for each device associated
+ * with the given throttle group with the given value.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg, u8 divert_value)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg)
+ tgt_priv->io_divert = divert_value;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
* mpi3mr_print_device_event_notice - print notice related to post processing of
* device event after controller reset.
*
@@ -840,6 +918,7 @@ static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
else if (!q_depth)
q_depth = MPI3MR_DEFAULT_SDEV_QD;
retval = scsi_change_queue_depth(sdev, q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
return retval;
}
@@ -926,6 +1005,7 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
* @mrioc: Adapter instance reference
* @tgtdev: Target device internal structure
* @dev_pg0: New device page0
+ * @is_added: Flag to indicate the device is just added
*
* Update the information from the device page0 into the driver
* cached target device structure.
@@ -933,10 +1013,11 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
* Return: Nothing.
*/
static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0)
+ struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
+ bool is_added)
{
u16 flags = 0;
- struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
u8 prot_mask = 0;
tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
@@ -951,12 +1032,19 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
flags = le16_to_cpu(dev_pg0->flags);
tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
+ if (is_added == true)
+ tgtdev->io_throttle_enabled =
+ (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+
+
if (tgtdev->starget && tgtdev->starget->hostdata) {
scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
tgtdev->starget->hostdata;
scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgtdev->io_throttle_enabled;
}
switch (dev_pg0->access_status) {
@@ -1034,10 +1122,32 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
{
struct mpi3_device0_vd_format *vdinf =
&dev_pg0->device_specific.vd_format;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u16 vdinf_io_throttle_group =
+ le16_to_cpu(vdinf->io_throttle_group);
- tgtdev->dev_spec.vol_inf.state = vdinf->vd_state;
+ tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
tgtdev->is_hidden = 1;
+ tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
+ tgtdev->dev_spec.vd_inf.tg_high =
+ le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
+ tgtdev->dev_spec.vd_inf.tg_low =
+ le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
+ if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
+ tg = mrioc->throttle_groups + vdinf_io_throttle_group;
+ tg->id = vdinf_io_throttle_group;
+ tg->high = tgtdev->dev_spec.vd_inf.tg_high;
+ tg->low = tgtdev->dev_spec.vd_inf.tg_low;
+ tg->qd_reduction =
+ tgtdev->dev_spec.vd_inf.tg_qd_reduction;
+ if (is_added == true)
+ tg->fw_qd = tgtdev->q_depth;
+ tg->modified_qd = tgtdev->q_depth;
+ }
+ tgtdev->dev_spec.vd_inf.tg = tg;
+ if (scsi_tgt_priv_data)
+ scsi_tgt_priv_data->throttle_group = tg;
break;
}
default:
@@ -1134,7 +1244,7 @@ static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
if (!tgtdev)
goto out;
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
if (!tgtdev->is_hidden && !tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
if (tgtdev->is_hidden && tgtdev->host_exposed)
@@ -1428,6 +1538,60 @@ static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_update_sdev_qd - Update SCSI device queue depath
+ * @sdev: SCSI device reference
+ * @data: Queue depth reference
+ *
+ * This is an iterator function called for each SCSI device in a
+ * target to update the QD of each SCSI device.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
+{
+ u16 *q_depth = (u16 *)data;
+
+ scsi_change_queue_depth(sdev, (int)*q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
+}
+
+/**
+ * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to reduce QD for each device associated with the
+ * given throttle group.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg) {
+ dprint_event_bh(mrioc,
+ "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
+ tgt_priv->perst_id, tgtdev->q_depth,
+ tg->modified_qd);
+ starget_for_each_device(tgtdev->starget,
+ (void *)&tg->modified_qd,
+ mpi3mr_update_sdev_qd);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
* mpi3mr_fwevt_bh - Firmware event bottomhalf handler
* @mrioc: Adapter instance reference
* @fwevt: Firmware event reference
@@ -1484,6 +1648,20 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_logdata_evt_bh(mrioc, fwevt);
break;
}
+ case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
+ {
+ struct mpi3mr_throttle_group_info *tg;
+
+ tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
+ dprint_event_bh(mrioc,
+ "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
+ tg->id, tg->need_qd_reduction);
+ if (tg->need_qd_reduction) {
+ mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
+ tg->need_qd_reduction = 0;
+ }
+ break;
+ }
default:
break;
}
@@ -1540,13 +1718,13 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
perst_id = le16_to_cpu(dev_pg0->persistent_id);
tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
if (tgtdev) {
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
mpi3mr_tgtdev_put(tgtdev);
} else {
tgtdev = mpi3mr_alloc_tgtdev();
if (!tgtdev)
return -ENOMEM;
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
}
@@ -2558,6 +2736,11 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
u32 xfer_count = 0, sense_count = 0, resp_data = 0;
u16 dev_handle = 0xFFFF;
struct scsi_sense_hdr sshdr;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u8 throttle_enabled_dev = 0;
*reply_dma = 0;
reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
@@ -2614,6 +2797,51 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
goto out;
}
priv = scsi_cmd_priv(scmd);
+
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ sdev_priv_data = scmd->device->hostdata;
+ if (sdev_priv_data) {
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (stgt_priv_data) {
+ tg = stgt_priv_data->throttle_group;
+ throttle_enabled_dev =
+ stgt_priv_data->io_throttle_enabled;
+ }
+ }
+ if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
+ throttle_enabled_dev)) {
+ ioc_pend_data_len = atomic_sub_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (tg) {
+ tg_pend_data_len = atomic_sub_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (tg->io_divert && ((ioc_pend_data_len <=
+ mrioc->io_throttle_low) &&
+ (tg_pend_data_len <= tg->low))) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ } else {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+ }
+ } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
+ ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
+ if (!tg) {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+
+ } else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
+ tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
+ if (tg->io_divert && (tg_pend_data_len <= tg->low)) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ }
+ }
+
if (success_desc) {
scmd->result = DID_OK << 16;
goto out_success;
@@ -3834,6 +4062,11 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
tgt_dev->starget = starget;
atomic_set(&scsi_tgt_priv_data->block_io, 0);
retval = 0;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgt_dev->io_throttle_enabled;
+ if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ scsi_tgt_priv_data->throttle_group =
+ tgt_dev->dev_spec.vd_inf.tg;
} else
retval = -ENXIO;
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
@@ -3989,10 +4222,13 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
int retval = 0;
u16 dev_handle;
u16 host_tag;
- u32 scsiio_flags = 0;
+ u32 scsiio_flags = 0, data_len_blks = 0;
struct request *rq = scsi_cmd_to_rq(scmd);
int iprio_class;
u8 is_pcie_dev = 0;
+ u32 tracked_io_sz = 0;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
if (mrioc->unrecoverable) {
scmd->result = DID_ERROR << 16;
@@ -4096,11 +4332,50 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ if ((data_len_blks >= mrioc->io_throttle_data_length) &&
+ stgt_priv_data->io_throttle_enabled) {
+ tracked_io_sz = data_len_blks;
+ tg = stgt_priv_data->throttle_group;
+ if (tg) {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ tg_pend_data_len = atomic_add_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (!tg->io_divert && ((ioc_pend_data_len >=
+ mrioc->io_throttle_high) ||
+ (tg_pend_data_len >= tg->high))) {
+ tg->io_divert = 1;
+ tg->need_qd_reduction = 1;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
+ tg, 1);
+ mpi3mr_queue_qd_reduction_event(mrioc, tg);
+ }
+ } else {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (ioc_pend_data_len >= mrioc->io_throttle_high)
+ stgt_priv_data->io_divert = 1;
+ }
+ }
+
+ if (stgt_priv_data->io_divert) {
+ scsiio_req->msg_flags |=
+ MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
+ }
+ scsiio_req->flags = cpu_to_le32(scsiio_flags);
if (mpi3mr_op_request_post(mrioc, op_req_q,
scmd_priv_data->mpi3mr_scsiio_req)) {
mpi3mr_clear_scmd_priv(mrioc, scmd);
retval = SCSI_MLQUEUE_HOST_BUSY;
+ if (tracked_io_sz) {
+ atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
+ if (tg)
+ atomic_sub(tracked_io_sz,
+ &tg->pend_large_data_sz);
+ }
goto out;
}
@@ -4313,6 +4588,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_channel = 0;
shost->max_id = 0xFFFFFFFF;
+ shost->host_tagset = 1;
+
if (prot_mask >= 0)
scsi_host_set_prot(shost, prot_mask);
else {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 9a1ae52bb621..565339a0811d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -873,7 +873,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
* @fault_code: fault code
*/
void
-mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
+mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
{
ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
}
@@ -1057,7 +1057,7 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
desc = "config no defaults";
break;
case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
- desc = "config cant commit";
+ desc = "config can't commit";
break;
/****************************************************************************
@@ -1321,7 +1321,7 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
* @log_info: log info
*/
static void
-_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
+_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info)
{
union loginfo_type {
u32 loginfo;
@@ -1393,7 +1393,7 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
(ioc->logging_level & MPT_DEBUG_REPLY)) {
- _base_sas_ioc_info(ioc , mpi_reply,
+ _base_sas_ioc_info(ioc, mpi_reply,
mpt3sas_base_get_msg_frame(ioc, smid));
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 5e8887fa02c8..def37a7e5980 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5294,7 +5294,7 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
}
/**
- * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
+ * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
* @ioc: per adapter object
* @scmd: pointer to scsi command object
* @mpi_reply: reply mf payload returned from firmware
@@ -12410,7 +12410,6 @@ scsih_suspend(struct device *dev)
return rc;
mpt3sas_base_stop_watchdog(ioc);
- flush_scheduled_work();
scsi_block_requests(shost);
_scsih_nvme_shutdown(ioc);
ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 991eb01bb1e0..91d78d0a38fe 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -699,6 +699,10 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
return 0;
}
+static void pm8001_chip_post_init(struct pm8001_hba_info *pm8001_ha)
+{
+}
+
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
{
u32 max_wait_count;
@@ -3134,7 +3138,7 @@ int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
*
* when HBA driver received the identify done event or initiate FIS received
* event(for SATA), it will invoke this function to notify the sas layer that
- * the sas toplogy has formed, please discover the the whole sas domain,
+ * the sas toplogy has formed, please discover the whole sas domain,
* while receive a broadcast(change) primitive just tell the sas
* layer to discover the changed domain rather than the whole domain.
*/
@@ -4934,6 +4938,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
const struct pm8001_dispatch pm8001_8001_dispatch = {
.name = "pmc8001",
.chip_init = pm8001_chip_init,
+ .chip_post_init = pm8001_chip_post_init,
.chip_soft_rst = pm8001_chip_soft_rst,
.chip_rst = pm8001_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 01f2f41928eb..a0028e130a7e 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
" 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt;
-static int pm8001_init_ccb_tag(struct pm8001_hba_info *, struct Scsi_Host *, struct pci_dev *);
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *);
/*
* chip info structure to identify chip key functionality as
@@ -81,6 +81,18 @@ LIST_HEAD(hba_list);
struct workqueue_struct *pm8001_wq;
+static int pm8001_map_queues(struct Scsi_Host *shost)
+{
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+
+ if (pm8001_ha->number_of_intr > 1)
+ blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+
+ return blk_mq_map_queues(qmap);
+}
+
/*
* The main structure which LLDD must register for scsi core.
*/
@@ -109,6 +121,8 @@ static struct scsi_host_template pm8001_sht = {
#endif
.shost_groups = pm8001_host_groups,
.track_queue_depth = 1,
+ .cmd_per_lun = 32,
+ .map_queues = pm8001_map_queues,
};
/*
@@ -607,12 +621,8 @@ static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
shost->transportt = pm8001_stt;
shost->max_id = PM8001_MAX_DEVICES;
- shost->max_lun = 8;
- shost->max_channel = 0;
shost->unique_id = pm8001_id;
shost->max_cmd_len = 16;
- shost->can_queue = PM8001_CAN_QUEUE;
- shost->cmd_per_lun = 32;
return 0;
exit_free1:
kfree(arr_port);
@@ -933,31 +943,35 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
*/
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
{
- u32 number_of_intr;
- int rc, cpu_online_count;
unsigned int allocated_irq_vectors;
+ int rc;
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
- number_of_intr = 1;
+ rc = pci_alloc_irq_vectors(pm8001_ha->pdev, 1, 1,
+ PCI_IRQ_MSIX);
} else {
- number_of_intr = PM8001_MAX_MSIX_VEC;
+ /*
+ * Queue index #0 is used always for housekeeping, so don't
+ * include in the affinity spreading.
+ */
+ struct irq_affinity desc = {
+ .pre_vectors = 1,
+ };
+ rc = pci_alloc_irq_vectors_affinity(
+ pm8001_ha->pdev, 2, PM8001_MAX_MSIX_VEC,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
}
- cpu_online_count = num_online_cpus();
- number_of_intr = min_t(int, cpu_online_count, number_of_intr);
- rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
- number_of_intr, PCI_IRQ_MSIX);
allocated_irq_vectors = rc;
if (rc < 0)
return rc;
/* Assigns the number of interrupts */
- number_of_intr = min_t(int, allocated_irq_vectors, number_of_intr);
- pm8001_ha->number_of_intr = number_of_intr;
+ pm8001_ha->number_of_intr = allocated_irq_vectors;
/* Maximum queue number updating in HBA structure */
- pm8001_ha->max_q_num = number_of_intr;
+ pm8001_ha->max_q_num = allocated_irq_vectors;
pm8001_dbg(pm8001_ha, INIT,
"pci_alloc_irq_vectors request ret:%d no of intr %d\n",
@@ -1124,10 +1138,23 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_ha_free;
}
- rc = pm8001_init_ccb_tag(pm8001_ha, shost, pdev);
+ rc = pm8001_init_ccb_tag(pm8001_ha);
if (rc)
goto err_out_enable;
+
+ PM8001_CHIP_DISP->chip_post_init(pm8001_ha);
+
+ if (pm8001_ha->number_of_intr > 1) {
+ shost->nr_hw_queues = pm8001_ha->number_of_intr - 1;
+ /*
+ * For now, ensure we're not sent too many commands by setting
+ * host_tagset. This is also required if we start using request
+ * tag.
+ */
+ shost->host_tagset = 1;
+ }
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha_free;
@@ -1177,16 +1204,14 @@ err_out_enable:
/**
* pm8001_init_ccb_tag - allocate memory to CCB and tag.
* @pm8001_ha: our hba card information.
- * @shost: scsi host which has been allocated outside.
- * @pdev: pci device.
*/
-static int
-pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
- struct pci_dev *pdev)
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha)
{
- int i = 0;
+ struct Scsi_Host *shost = pm8001_ha->shost;
+ struct device *dev = pm8001_ha->dev;
u32 max_out_io, ccb_count;
u32 can_queue;
+ int i;
max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io;
ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io);
@@ -1209,7 +1234,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
goto err_out_noccb;
}
for (i = 0; i < ccb_count; i++) {
- pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(&pdev->dev,
+ pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(dev,
sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
&pm8001_ha->ccb_info[i].ccb_dma_handle,
GFP_KERNEL);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 3a863d776724..8e3f2f9ddaac 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -66,7 +66,11 @@ static int pm8001_find_tag(struct sas_task *task, u32 *tag)
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
{
void *bitmap = pm8001_ha->tags;
- clear_bit(tag, bitmap);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
+ __clear_bit(tag, bitmap);
+ spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
}
/**
@@ -76,9 +80,9 @@ void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
*/
int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
{
- unsigned int tag;
void *bitmap = pm8001_ha->tags;
unsigned long flags;
+ unsigned int tag;
spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
@@ -86,7 +90,7 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
return -SAS_QUEUE_FULL;
}
- set_bit(tag, bitmap);
+ __set_bit(tag, bitmap);
spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
*tag_out = tag;
return 0;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 060ab680a7ed..c5e3f380a01c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -55,6 +55,8 @@
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
@@ -172,6 +174,7 @@ struct forensic_data {
struct pm8001_dispatch {
char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
+ void (*chip_post_init)(struct pm8001_hba_info *pm8001_ha);
int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 303cd05fec50..f8b8624458f7 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -1469,11 +1469,18 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
} else
return -EBUSY;
+ return 0;
+}
+
+static void pm80xx_chip_post_init(struct pm8001_hba_info *pm8001_ha)
+{
/* send SAS protocol timer configuration page to FW */
- ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+ pm80xx_set_sas_protocol_timer_config(pm8001_ha);
/* Check for encryption */
if (pm8001_ha->chip->encrypt) {
+ int ret;
+
pm8001_dbg(pm8001_ha, INIT, "Checking for encryption\n");
ret = pm80xx_get_encrypt_info(pm8001_ha);
if (ret == -1) {
@@ -1485,7 +1492,6 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
}
}
}
- return 0;
}
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
@@ -4349,6 +4355,29 @@ static int check_enc_sat_cmd(struct sas_task *task)
return ret;
}
+static u32 pm80xx_chip_get_q_index(struct sas_task *task)
+{
+ struct scsi_cmnd *scmd = NULL;
+ u32 blk_tag;
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(task->dev)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
+ }
+
+ if (!scmd)
+ return 0;
+
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ return blk_mq_unique_tag_to_hwq(blk_tag);
+}
+
/**
* pm80xx_chip_ssp_io_req - send an SSP task to FW
* @pm8001_ha: our hba card information.
@@ -4364,7 +4393,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
u32 tag = ccb->ccb_tag;
u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
- u32 q_index, cpu_id;
+ u32 q_index;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
@@ -4385,8 +4414,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- cpu_id = smp_processor_id();
- q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
+ q_index = pm80xx_chip_get_q_index(task);
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
@@ -4515,8 +4543,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
struct ata_queued_cmd *qc = task->uldd_task;
- u32 tag = ccb->ccb_tag;
- u32 q_index, cpu_id;
+ u32 tag = ccb->ccb_tag, q_index;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
u64 phys_addr, end_addr;
@@ -4526,8 +4553,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- cpu_id = smp_processor_id();
- q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
+
+ q_index = pm80xx_chip_get_q_index(task);
if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
@@ -5011,6 +5038,7 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
const struct pm8001_dispatch pm8001_80xx_dispatch = {
.name = "pmc80xx",
.chip_init = pm80xx_chip_init,
+ .chip_post_init = pm80xx_chip_post_init,
.chip_soft_rst = pm80xx_chip_soft_rst,
.chip_rst = pm80xx_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 83ffba7f51da..cecfb2cb4c7b 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2414,9 +2414,12 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
int rval;
u16 retry = 10;
- if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
- iscsi_host_remove(qedi->shost);
+ if (mode == QEDI_MODE_NORMAL)
+ iscsi_host_remove(qedi->shost, false);
+ else if (mode == QEDI_MODE_SHUTDOWN)
+ iscsi_host_remove(qedi->shost, true);
+ if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
if (qedi->tmf_thread) {
destroy_workqueue(qedi->tmf_thread);
qedi->tmf_thread = NULL;
@@ -2491,7 +2494,7 @@ static void qedi_board_disable_work(struct work_struct *work)
if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
return;
- __qedi_remove(qedi->pdev, QEDI_MODE_SHUTDOWN);
+ __qedi_remove(qedi->pdev, QEDI_MODE_NORMAL);
}
static void qedi_shutdown(struct pci_dev *pdev)
@@ -2791,7 +2794,7 @@ remove_host:
#ifdef CONFIG_DEBUG_FS
qedi_dbg_host_exit(&qedi->dbg_ctx);
#endif
- iscsi_host_remove(qedi->shost);
+ iscsi_host_remove(qedi->shost, false);
stop_iscsi_func:
qedi_ops->stop(qedi->cdev);
stop_slowpath:
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3b3e4234f37a..fa1fcbfb946f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2476,7 +2476,6 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
qla2x00_port_speed_store);
static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
-static DEVICE_ATTR_RO(edif_doorbell);
static struct attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version.attr,
@@ -2521,7 +2520,6 @@ static struct attribute *qla2x00_host_attrs[] = {
&dev_attr_port_no.attr,
&dev_attr_fw_attr.attr,
&dev_attr_dport_diagnostics.attr,
- &dev_attr_edif_doorbell.attr,
&dev_attr_mpi_pause.attr,
&dev_attr_qlini_mode.attr,
&dev_attr_ql2xiniexchg.attr,
@@ -2716,17 +2714,27 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!fcport)
return;
- /* Now that the rport has been deleted, set the fcport state to
- FCS_DEVICE_DEAD */
- qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5101,
+ DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d",
+ rport->port_state));
+
+ /*
+ * Now that the rport has been deleted, set the fcport state to
+ * FCS_DEVICE_DEAD, if the fcport is still lost.
+ */
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
/*
* Transport has effectively 'deleted' the rport, clear
* all local references.
*/
spin_lock_irqsave(host->host_lock, flags);
- fcport->rport = fcport->drport = NULL;
- *((fc_port_t **)rport->dd_data) = NULL;
+ /* Confirm port has not reappeared before clearing pointers. */
+ if (rport->port_state != FC_PORTSTATE_ONLINE) {
+ fcport->rport = fcport->drport = NULL;
+ *((fc_port_t **)rport->dd_data) = NULL;
+ }
spin_unlock_irqrestore(host->host_lock, flags);
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
@@ -2759,9 +2767,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
/*
* At this point all fcport's software-states are cleared. Perform any
* final cleanup of firmware resources (PCBs and XCBs).
+ *
+ * Attempt to cleanup only lost devices.
*/
if (fcport->loop_id != FC_NO_LOOP_ID) {
- if (IS_FWI2_CAPABLE(fcport->vha->hw)) {
+ if (IS_FWI2_CAPABLE(fcport->vha->hw) &&
+ fcport->scan_state != QLA_FCPORT_FOUND) {
if (fcport->loop_id != FC_NO_LOOP_ID)
fcport->logout_on_delete = 1;
@@ -2771,7 +2782,7 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
__LINE__);
qlt_schedule_sess_for_deletion(fcport);
}
- } else {
+ } else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) {
qla2x00_port_logout(fcport->vha, fcport);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c2f00f076f79..5db9bf69dcff 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2425,6 +2425,89 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
}
static int
+qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval;
+ struct qla_dport_diag_v2 *dd;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint16_t options;
+
+ if (!IS_DPORT_CAPABLE(vha->hw))
+ return -EPERM;
+
+ dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
+
+ options = dd->options;
+
+ /* Check dport Test in progress */
+ if (options == QLA_GET_DPORT_RESULT_V2 &&
+ vha->dport_status & DPORT_DIAG_IN_PROGRESS) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_IN_PROCESS;
+ goto dportcomplete;
+ }
+
+ /* Check chip reset in progress and start/restart requests arrive */
+ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
+ (options == QLA_START_DPORT_TEST_V2 ||
+ options == QLA_RESTART_DPORT_TEST_V2)) {
+ vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
+ }
+
+ /* Check chip reset in progress and get result request arrive */
+ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
+ options == QLA_GET_DPORT_RESULT_V2) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_NOT_RUNNING;
+ goto dportcomplete;
+ }
+
+ rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_OK;
+ if (options == QLA_START_DPORT_TEST_V2 ||
+ options == QLA_RESTART_DPORT_TEST_V2) {
+ dd->mbx1 = mcp->mb[0];
+ dd->mbx2 = mcp->mb[1];
+ vha->dport_status |= DPORT_DIAG_IN_PROGRESS;
+ } else if (options == QLA_GET_DPORT_RESULT_V2) {
+ dd->mbx1 = le16_to_cpu(vha->dport_data[1]);
+ dd->mbx2 = le16_to_cpu(vha->dport_data[2]);
+ }
+ } else {
+ dd->mbx1 = mcp->mb[0];
+ dd->mbx2 = mcp->mb[1];
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_ERR;
+ }
+
+dportcomplete:
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
+
+ bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ kfree(dd);
+
+ return 0;
+}
+
+static int
qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
{
scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
@@ -2860,6 +2943,9 @@ qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_j
case QL_VND_DPORT_DIAGNOSTICS:
return qla2x00_do_dport_diagnostics(bsg_job);
+ case QL_VND_DPORT_DIAGNOSTICS_V2:
+ return qla2x00_do_dport_diagnostics_v2(bsg_job);
+
case QL_VND_EDIF_MGMT:
return qla_edif_app_mgmt(bsg_job);
@@ -2975,6 +3061,13 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
__func__, bsg_job);
+
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x9007,
+ "PCI/Register disconnect.\n");
+ qla_pci_set_eeh_busy(vha);
+ }
+
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
@@ -2992,7 +3085,8 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
sp->u.bsg_job == bsg_job) {
req->outstanding_cmds[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
+
+ if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command failed.\n");
bsg_reply->result = -EIO;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 6d2b0a7436c1..bb64b9c5a74b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -37,6 +37,7 @@
#define QL_VND_GET_TGT_STATS 0x25
#define QL_VND_MANAGE_HOST_PORT 0x26
#define QL_VND_MBX_PASSTHRU 0x2B
+#define QL_VND_DPORT_DIAGNOSTICS_V2 0x2C
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -60,6 +61,9 @@
#define EXT_STATUS_TIMEOUT 30
#define EXT_STATUS_THREAD_FAILED 31
#define EXT_STATUS_DATA_CMP_FAILED 32
+#define EXT_STATUS_DPORT_DIAG_ERR 40
+#define EXT_STATUS_DPORT_DIAG_IN_PROCESS 41
+#define EXT_STATUS_DPORT_DIAG_NOT_RUNNING 42
/* BSG definations for interpreting CommandSent field */
#define INT_DEF_LB_LOOPBACK_CMD 0
@@ -288,6 +292,17 @@ struct qla_dport_diag {
uint8_t unused[62];
} __packed;
+#define QLA_GET_DPORT_RESULT_V2 0 /* Get Result */
+#define QLA_RESTART_DPORT_TEST_V2 1 /* Restart test */
+#define QLA_START_DPORT_TEST_V2 2 /* Start test */
+struct qla_dport_diag_v2 {
+ uint16_t options;
+ uint16_t mbx1;
+ uint16_t mbx2;
+ uint8_t unused[58];
+ uint8_t buf[1024]; /* Test Result */
+} __packed;
+
/* D_Port options */
#define QLA_DPORT_RESULT 0x0
#define QLA_DPORT_START 0x2
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f1f6c740bdcd..feeb1666227f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -383,5 +383,5 @@ ql_mask_match(uint level)
if (ql2xextended_error_logging == 1)
ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
- return (level & ql2xextended_error_logging) == level;
+ return level && ((level & ql2xextended_error_logging) == level);
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e8f69c486be1..3ec6a200942e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -78,7 +78,7 @@ typedef union {
#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
-#define QLA2XXX_MANUFACTURER "QLogic Corporation"
+#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc."
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -1173,6 +1173,12 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
/* ISP mailbox loopback echo diagnostic error code */
#define MBS_LB_RESET 0x17
+
+/* AEN mailbox Port Diagnostics test */
+#define AEN_START_DIAG_TEST 0x0 /* start the diagnostics */
+#define AEN_DONE_DIAG_TEST_WITH_NOERR 0x1 /* Done with no errors */
+#define AEN_DONE_DIAG_TEST_WITH_ERR 0x2 /* Done with error.*/
+
/*
* Firmware options 1, 2, 3.
*/
@@ -2158,6 +2164,11 @@ typedef struct {
#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request
failure */
#define CS_REJECT_RECEIVED 0x4E /* Reject received */
+#define CS_EDIF_AUTH_ERROR 0x63 /* decrypt error */
+#define CS_EDIF_PAD_LEN_ERROR 0x65 /* pad > frame size, not 4byte align */
+#define CS_EDIF_INV_REQ 0x66 /* invalid request */
+#define CS_EDIF_SPI_ERROR 0x67 /* rx frame unable to locate sa */
+#define CS_EDIF_HDR_ERROR 0x69 /* data frame != expected len */
#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
#define CS_UNKNOWN 0x81 /* Driver defined */
#define CS_RETRY 0x82 /* Driver defined */
@@ -2626,7 +2637,6 @@ typedef struct fc_port {
struct {
uint32_t enable:1; /* device is edif enabled/req'd */
uint32_t app_stop:2;
- uint32_t app_started:1;
uint32_t aes_gmac:1;
uint32_t app_sess_online:1;
uint32_t tx_sa_set:1;
@@ -2637,6 +2647,7 @@ typedef struct fc_port {
uint32_t rx_rekey_cnt;
uint64_t tx_bytes;
uint64_t rx_bytes;
+ uint8_t sess_down_acked;
uint8_t auth_state;
uint16_t authok:1;
uint16_t rekey_cnt;
@@ -3204,6 +3215,8 @@ struct ct_sns_rsp {
#define GFF_NVME_OFFSET 23 /* type = 28h */
struct {
uint8_t fc4_features[128];
+#define FC4_FF_TARGET BIT_0
+#define FC4_FF_INITIATOR BIT_1
} gff_id;
struct {
uint8_t reserved;
@@ -3975,6 +3988,7 @@ struct qla_hw_data {
/* SRB cache. */
#define SRB_MIN_REQ 128
mempool_t *srb_mempool;
+ u8 port_name[WWN_SIZE];
volatile struct {
uint32_t mbox_int :1;
@@ -4040,6 +4054,9 @@ struct qla_hw_data {
uint32_t n2n_fw_acc_sec:1;
uint32_t plogi_template_valid:1;
uint32_t port_isolated:1;
+ uint32_t eeh_flush:2;
+#define EEH_FLUSH_RDY 1
+#define EEH_FLUSH_DONE 2
} flags;
uint16_t max_exchg;
@@ -4074,6 +4091,7 @@ struct qla_hw_data {
uint32_t rsp_que_len;
uint32_t req_que_off;
uint32_t rsp_que_off;
+ unsigned long eeh_jif;
/* Multi queue data structs */
device_reg_t *mqiobase;
@@ -4256,8 +4274,8 @@ struct qla_hw_data {
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_MQUE_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_BIDI_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
/* Bit 21 of fw_attributes decides the MCTP capabilities */
@@ -5012,6 +5030,10 @@ typedef struct scsi_qla_host {
u64 short_link_down_cnt;
struct edif_dbell e_dbell;
struct pur_core pur_cinfo;
+
+#define DPORT_DIAG_IN_PROGRESS BIT_0
+#define DPORT_DIAG_CHIP_RESET_IN_PROGRESS BIT_1
+ uint16_t dport_status;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -5443,4 +5465,10 @@ struct ql_vnd_tgt_stats_resp {
#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \
_fcport->disc_state == DSC_DELETED)
+#define DBG_FCPORT_PRFMT(_fp, _fmt, _args...) \
+ "%s: %8phC: " _fmt " (state=%d disc_state=%d scan_state=%d loopid=0x%x deleted=%d flags=0x%x)\n", \
+ __func__, _fp->port_name, ##_args, atomic_read(&_fp->state), \
+ _fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \
+ _fp->flags
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index cb8145a9ac09..400a8b6f3982 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -52,6 +52,31 @@ const char *sc_to_str(uint16_t cmd)
return "unknown";
}
+static struct edb_node *qla_edb_getnext(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct edb_node *edbnode = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+ /* db nodes are fifo - no qualifications done */
+ if (!list_empty(&vha->e_dbell.head)) {
+ edbnode = list_first_entry(&vha->e_dbell.head,
+ struct edb_node, list);
+ list_del_init(&edbnode->list);
+ }
+
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ return edbnode;
+}
+
+static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
+{
+ list_del_init(&node->list);
+ kfree(node);
+}
+
static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
uint16_t handle)
{
@@ -257,14 +282,8 @@ qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
f = NULL;
list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
- if ((f->flags & FCF_FCSP_DEVICE)) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058,
- "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n",
- f->node_name, f->port_name,
- f->d_id.b24, id->b24);
- if (f->d_id.b24 == id->b24)
- return f;
- }
+ if (f->d_id.b24 == id->b24)
+ return f;
}
return NULL;
}
@@ -280,14 +299,19 @@ qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
{
/* check that the app is allow/known to the driver */
- if (appid.app_vid == EDIF_APP_ID) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__);
- return true;
+ if (appid.app_vid != EDIF_APP_ID) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
+ __func__, appid.app_vid);
+ return false;
+ }
+
+ if (appid.version != EDIF_VERSION1) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app version is not ok (%x)",
+ __func__, appid.version);
+ return false;
}
- ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
- __func__, appid.app_vid);
- return false;
+ return true;
}
static void
@@ -486,16 +510,35 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
/* mark doorbell as active since an app is now present */
vha->e_dbell.db_flags |= EDB_ACTIVE;
} else {
- ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
- __func__);
+ goto out;
}
if (N2N_TOPO(vha->hw)) {
- if (vha->hw->flags.n2n_fw_acc_sec)
- set_bit(N2N_LINK_RESET, &vha->dpc_flags);
- else
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
+ fcport->n2n_link_reset_cnt = 0;
+
+ if (vha->hw->flags.n2n_fw_acc_sec) {
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
+ qla_edif_sa_ctl_init(vha, fcport);
+
+ /*
+ * While authentication app was not running, remote device
+ * could still try to login with this local port. Let's
+ * clear the state and try again.
+ */
+ qla2x00_wait_for_sess_deletion(vha);
+
+ /* bounce the link to get the other guy to relogin */
+ if (!vha->hw->flags.n2n_bigger) {
+ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else {
+ qla2x00_wait_for_hba_online(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+ }
} else {
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_edif, vha, 0x2058,
@@ -517,19 +560,31 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (atomic_read(&vha->loop_state) == LOOP_DOWN)
break;
- fcport->edif.app_started = 1;
fcport->login_retry = vha->hw->login_retry_count;
- /* no activity */
fcport->edif.app_stop = 0;
+ fcport->edif.app_sess_online = 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ continue;
+
+ if (fcport->port_type == FCT_UNKNOWN &&
+ !fcport->fc4_features)
+ rval = qla24xx_async_gffid(vha, fcport, true);
+
+ if (!rval && !(fcport->fc4_features & FC4_FF_TARGET ||
+ fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET)))
+ continue;
+
+ rval = 0;
ql_dbg(ql_dbg_edif, vha, 0x911e,
"%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
__func__, fcport->port_name);
- fcport->edif.app_sess_online = 0;
qlt_schedule_sess_for_deletion(fcport);
qla_edif_sa_ctl_init(vha, fcport);
}
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
@@ -540,9 +595,11 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
__func__);
}
+out:
appreply.host_support_edif = vha->hw->flags.edif_enabled;
appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
appreply.edif_edb_active = vha->e_dbell.db_flags;
+ appreply.version = EDIF_VERSION1;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
@@ -610,9 +667,6 @@ qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
fcport->send_els_logo = 1;
qlt_schedule_sess_for_deletion(fcport);
-
- /* qla_edif_flush_sa_ctl_lists(fcport); */
- fcport->edif.app_started = 0;
}
}
@@ -672,6 +726,7 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
portid.b.area = appplogiok.u.d_id.b.area;
portid.b.al_pa = appplogiok.u.d_id.b.al_pa;
+ appplogireply.version = EDIF_VERSION1;
switch (appplogiok.type) {
case PL_TYPE_WWPN:
fcport = qla2x00_find_fcport_by_wwpn(vha,
@@ -864,6 +919,8 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
} else {
struct fc_port *fcport = NULL, *tf;
+ app_reply->version = EDIF_VERSION1;
+
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (!(fcport->flags & FCF_FCSP_DEVICE))
continue;
@@ -880,9 +937,25 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
continue;
- app_reply->ports[pcnt].rekey_count =
- fcport->edif.rekey_cnt;
+ if (!N2N_TOPO(vha->hw)) {
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ continue;
+
+ if (fcport->port_type == FCT_UNKNOWN &&
+ !fcport->fc4_features)
+ rval = qla24xx_async_gffid(vha, fcport,
+ true);
+ if (!rval &&
+ !(fcport->fc4_features & FC4_FF_TARGET ||
+ fcport->port_type &
+ (FCT_TARGET | FCT_NVME_TARGET)))
+ continue;
+ }
+
+ rval = 0;
+
+ app_reply->ports[pcnt].version = EDIF_VERSION1;
app_reply->ports[pcnt].remote_type =
VND_CMD_RTYPE_UNKNOWN;
if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
@@ -979,6 +1052,8 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
} else {
struct fc_port *fcport = NULL, *tf;
+ app_reply->version = EDIF_VERSION1;
+
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (fcport->edif.enable) {
if (pcnt > app_req.num_ports)
@@ -1012,6 +1087,164 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
return rval;
}
+static int32_t
+qla_edif_ack(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_port *fcport;
+ struct aen_complete_cmd ack;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &ack, sizeof(ack));
+
+ ql_dbg(ql_dbg_edif, vha, 0x70cf,
+ "%s: %06x event_code %x\n",
+ __func__, ack.port_id.b24, ack.event_code);
+
+ fcport = qla2x00_find_fcport_by_pid(vha, &ack.port_id);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x70cf,
+ "%s: unable to find fcport %06x \n",
+ __func__, ack.port_id.b24);
+ return 0;
+ }
+
+ switch (ack.event_code) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ fcport->edif.sess_down_acked = 1;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int qla_edif_consume_dbell(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ u32 sg_skip, reply_payload_len;
+ bool keep;
+ struct edb_node *dbnode = NULL;
+ struct edif_app_dbell ap;
+ int dat_size = 0;
+
+ sg_skip = 0;
+ reply_payload_len = bsg_job->reply_payload.payload_len;
+
+ while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) {
+ dbnode = qla_edb_getnext(vha);
+ if (dbnode) {
+ keep = true;
+ dat_size = 0;
+ ap.event_code = dbnode->ntype;
+ switch (dbnode->ntype) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ case VND_CMD_AUTH_STATE_NEEDED:
+ ap.port_id = dbnode->u.plogi_did;
+ dat_size += sizeof(ap.port_id);
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ ap.port_id = dbnode->u.els_sid;
+ dat_size += sizeof(ap.port_id);
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ ap.port_id = dbnode->u.sa_aen.port_id;
+ memcpy(&ap.event_data, &dbnode->u,
+ sizeof(struct edif_sa_update_aen));
+ dat_size += sizeof(struct edif_sa_update_aen);
+ break;
+ default:
+ keep = false;
+ ql_log(ql_log_warn, vha, 0x09102,
+ "%s unknown DB type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+ break;
+ }
+ ap.event_data_size = dat_size;
+ /* 8 = sizeof(ap.event_code + ap.event_data_size) */
+ dat_size += 8;
+ if (keep)
+ sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &ap, dat_size, sg_skip, false);
+
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s Doorbell consumed : type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+
+ kfree(dbnode);
+ } else {
+ break;
+ }
+ }
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ bsg_reply->reply_payload_rcv_len = sg_skip;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+ return 0;
+}
+
+static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
+ u32 delay)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+ /* small sleep for doorbell events to accumulate */
+ if (delay)
+ msleep(delay);
+
+ qla_edif_consume_dbell(vha, bsg_job);
+
+ bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+}
+
+static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct bsg_job *prev_bsg_job = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ if (vha->e_dbell.dbell_bsg_job) {
+ prev_bsg_job = vha->e_dbell.dbell_bsg_job;
+ vha->e_dbell.dbell_bsg_job = NULL;
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ if (prev_bsg_job)
+ __qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0);
+}
+
+static int
+qla_edif_dbell_bsg(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ unsigned long flags;
+ bool return_bsg = false;
+
+ /* flush previous dbell bsg */
+ qla_edif_dbell_bsg_done(vha);
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) {
+ /*
+ * when the next db event happens, bsg_job will return.
+ * Otherwise, timer will return it.
+ */
+ vha->e_dbell.dbell_bsg_job = bsg_job;
+ vha->e_dbell.bsg_expire = jiffies + 10 * HZ;
+ } else {
+ return_bsg = true;
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ if (return_bsg)
+ __qla_edif_dbell_bsg_done(vha, bsg_job, 1);
+
+ return 0;
+}
+
int32_t
qla_edif_app_mgmt(struct bsg_job *bsg_job)
{
@@ -1023,8 +1256,13 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
bool done = true;
int32_t rval = 0;
uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+ u32 level = ql_dbg_edif;
+
+ /* doorbell is high traffic */
+ if (vnd_sc == QL_VND_SC_READ_DBELL)
+ level = 0;
- ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n",
+ ql_dbg(level, vha, 0x911d, "%s vnd subcmd=%x\n",
__func__, vnd_sc);
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
@@ -1033,7 +1271,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
if (!vha->hw->flags.edif_enabled ||
test_bit(VPORT_DELETE, &vha->dpc_flags)) {
- ql_dbg(ql_dbg_edif, vha, 0x911d,
+ ql_dbg(level, vha, 0x911d,
"%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
__func__, bsg_job, vha->dpc_flags);
@@ -1042,7 +1280,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
}
if (!qla_edif_app_check(vha, appcheck)) {
- ql_dbg(ql_dbg_edif, vha, 0x911d,
+ ql_dbg(level, vha, 0x911d,
"%s app checked failed.\n",
__func__);
@@ -1074,6 +1312,13 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
case QL_VND_SC_GET_STATS:
rval = qla_edif_app_getstats(vha, bsg_job);
break;
+ case QL_VND_SC_AEN_COMPLETE:
+ rval = qla_edif_ack(vha, bsg_job);
+ break;
+ case QL_VND_SC_READ_DBELL:
+ rval = qla_edif_dbell_bsg(vha, bsg_job);
+ done = false;
+ break;
default:
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
__func__,
@@ -1085,7 +1330,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
done:
if (done) {
- ql_dbg(ql_dbg_user, vha, 0x7009,
+ ql_dbg(level, vha, 0x7009,
"%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job);
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
@@ -1247,6 +1492,8 @@ qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
+#define EDIF_MSLEEP_INTERVAL 100
+#define EDIF_RETRY_COUNT 50
int
qla24xx_sadb_update(struct bsg_job *bsg_job)
@@ -1259,7 +1506,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
struct edif_list_entry *edif_entry = NULL;
int found = 0;
int rval = 0;
- int result = 0;
+ int result = 0, cnt;
struct qla_sa_update_frame sa_frame;
struct srb_iocb *iocb_cmd;
port_id_t portid;
@@ -1500,11 +1747,23 @@ force_rx_delete:
sp->done = qla2x00_bsg_job_done;
iocb_cmd = &sp->u.iocb_cmd;
iocb_cmd->u.sa_update.sa_frame = sa_frame;
-
+ cnt = 0;
+retry:
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS) {
+ switch (rval) {
+ case QLA_SUCCESS:
+ break;
+ case EAGAIN:
+ msleep(EDIF_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < EDIF_RETRY_COUNT)
+ goto retry;
+
+ fallthrough;
+ default:
ql_log(ql_dbg_edif, vha, 0x70e3,
- "qla2x00_start_sp failed=%d.\n", rval);
+ "%s qla2x00_start_sp failed=%d.\n",
+ __func__, rval);
qla2x00_rel_sp(sp);
rval = -EIO;
@@ -1797,30 +2056,6 @@ qla_edb_init(scsi_qla_host_t *vha)
/* initialize lock which protects doorbell & init list */
spin_lock_init(&vha->e_dbell.db_lock);
INIT_LIST_HEAD(&vha->e_dbell.head);
-
- /* create and initialize doorbell */
- init_completion(&vha->e_dbell.dbell);
-}
-
-static void
-qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
-{
- /*
- * releases the space held by this edb node entry
- * this function does _not_ free the edb node itself
- * NB: the edb node entry passed should not be on any list
- *
- * currently for doorbell there's no additional cleanup
- * needed, but here as a placeholder for furture use.
- */
-
- if (!node) {
- ql_dbg(ql_dbg_edif, vha, 0x09122,
- "%s error - no valid node passed\n", __func__);
- return;
- }
-
- node->ntype = N_UNDEF;
}
static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
@@ -1867,11 +2102,8 @@ static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
}
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- list_for_each_entry_safe(e, tmp, &edb_list, list) {
+ list_for_each_entry_safe(e, tmp, &edb_list, list)
qla_edb_node_free(vha, e);
- list_del_init(&e->list);
- kfree(e);
- }
}
/* function called when app is stopping */
@@ -1899,14 +2131,10 @@ qla_edb_stop(scsi_qla_host_t *vha)
"%s freeing edb_node type=%x\n",
__func__, node->ntype);
qla_edb_node_free(vha, node);
- list_del(&node->list);
-
- kfree(node);
}
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- /* wake up doorbell waiters - they'll be dismissed with error code */
- complete_all(&vha->e_dbell.dbell);
+ qla_edif_dbell_bsg_done(vha);
}
static struct edb_node *
@@ -1944,9 +2172,6 @@ qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
list_add_tail(&ptr->list, &vha->e_dbell.head);
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- /* ring doorbell for waiters */
- complete(&vha->e_dbell.dbell);
-
return true;
}
@@ -2010,47 +2235,29 @@ qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
edbnode->u.sa_aen.port_id = fcport->d_id;
edbnode->u.sa_aen.status = data;
edbnode->u.sa_aen.key_type = data2;
+ edbnode->u.sa_aen.version = EDIF_VERSION1;
break;
default:
ql_dbg(ql_dbg_edif, vha, 0x09102,
"%s unknown type: %x\n", __func__, dbtype);
- qla_edb_node_free(vha, edbnode);
kfree(edbnode);
edbnode = NULL;
break;
}
- if (edbnode && (!qla_edb_node_add(vha, edbnode))) {
+ if (edbnode) {
+ if (!qla_edb_node_add(vha, edbnode)) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unable to add dbnode\n", __func__);
+ kfree(edbnode);
+ return;
+ }
ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s unable to add dbnode\n", __func__);
- qla_edb_node_free(vha, edbnode);
- kfree(edbnode);
- return;
- }
- if (edbnode && fcport)
- fcport->edif.auth_state = dbtype;
- ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
-}
-
-static struct edb_node *
-qla_edb_getnext(scsi_qla_host_t *vha)
-{
- unsigned long flags;
- struct edb_node *edbnode = NULL;
-
- spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
-
- /* db nodes are fifo - no qualifications done */
- if (!list_empty(&vha->e_dbell.head)) {
- edbnode = list_first_entry(&vha->e_dbell.head,
- struct edb_node, list);
- list_del(&edbnode->list);
+ "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
+ qla_edif_dbell_bsg_done(vha);
+ if (fcport)
+ fcport->edif.auth_state = dbtype;
}
-
- spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
-
- return edbnode;
}
void
@@ -2078,89 +2285,14 @@ qla_edif_timer(scsi_qla_host_t *vha)
ha->edif_post_stop_cnt_down = 60;
}
}
-}
-/*
- * app uses separate thread to read this. It'll wait until the doorbell
- * is rung by the driver or the max wait time has expired
- */
-ssize_t
-edif_doorbell_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- struct edb_node *dbnode = NULL;
- struct edif_app_dbell *ap = (struct edif_app_dbell *)buf;
- uint32_t dat_siz, buf_size, sz;
-
- /* TODO: app currently hardcoded to 256. Will transition to bsg */
- sz = 256;
-
- /* stop new threads from waiting if we're not init'd */
- if (DBELL_INACTIVE(vha)) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
- "%s error - edif db not enabled\n", __func__);
- return 0;
- }
-
- if (!vha->hw->flags.edif_enabled) {
- /* edif not enabled */
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
- "%s error - edif not enabled\n", __func__);
- return -1;
- }
-
- buf_size = 0;
- while ((sz - buf_size) >= sizeof(struct edb_node)) {
- /* remove the next item from the doorbell list */
- dat_siz = 0;
- dbnode = qla_edb_getnext(vha);
- if (dbnode) {
- ap->event_code = dbnode->ntype;
- switch (dbnode->ntype) {
- case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
- case VND_CMD_AUTH_STATE_NEEDED:
- ap->port_id = dbnode->u.plogi_did;
- dat_siz += sizeof(ap->port_id);
- break;
- case VND_CMD_AUTH_STATE_ELS_RCVD:
- ap->port_id = dbnode->u.els_sid;
- dat_siz += sizeof(ap->port_id);
- break;
- case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
- ap->port_id = dbnode->u.sa_aen.port_id;
- memcpy(ap->event_data, &dbnode->u,
- sizeof(struct edif_sa_update_aen));
- dat_siz += sizeof(struct edif_sa_update_aen);
- break;
- default:
- /* unknown node type, rtn unknown ntype */
- ap->event_code = VND_CMD_AUTH_STATE_UNDEF;
- memcpy(ap->event_data, &dbnode->ntype, 4);
- dat_siz += 4;
- break;
- }
-
- ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s Doorbell consumed : type=%d %p\n",
- __func__, dbnode->ntype, dbnode);
- /* we're done with the db node, so free it up */
- qla_edb_node_free(vha, dbnode);
- kfree(dbnode);
- } else {
- break;
- }
-
- ap->event_data_size = dat_siz;
- /* 8bytes = ap->event_code + ap->event_data_size */
- buf_size += dat_siz + 8;
- ap = (struct edif_app_dbell *)(buf + buf_size);
- }
- return buf_size;
+ if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire))
+ qla_edif_dbell_bsg_done(vha);
}
static void qla_noop_sp_done(srb_t *sp, int res)
{
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
@@ -2185,7 +2317,8 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
if (!sa_ctl) {
ql_dbg(ql_dbg_edif, vha, 0x70e6,
"sa_ctl allocation failed\n");
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto done;
}
fcport = sa_ctl->fcport;
@@ -2195,7 +2328,8 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
if (!sp) {
ql_dbg(ql_dbg_edif, vha, 0x70e6,
"SRB allocation failed\n");
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto done;
}
fcport->flags |= FCF_ASYNC_SENT;
@@ -2224,10 +2358,17 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- rval = QLA_FUNCTION_FAILED;
+ if (rval != QLA_SUCCESS) {
+ goto done_free_sp;
+ }
return rval;
+done_free_sp:
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
+ return rval;
}
void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
@@ -2446,8 +2587,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
- if (DBELL_INACTIVE(vha) ||
- (fcport && EDIF_SESSION_DOWN(fcport))) {
+ if (DBELL_INACTIVE(vha)) {
ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
__func__, host->e_dbell.db_flags,
fcport ? fcport->d_id.b24 : 0);
@@ -2457,6 +2597,22 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
return;
}
+ if (fcport && EDIF_SESSION_DOWN(fcport)) {
+ ql_dbg(ql_dbg_edif, host, 0x13b6,
+ "%s terminate exchange. Send logo to 0x%x\n",
+ __func__, a.did.b24);
+
+ a.tx_byte_count = a.tx_len = 0;
+ a.tx_addr = 0;
+ a.control_flags = EPD_RX_XCHG; /* EPD_RX_XCHG = terminate cmd */
+ qla_els_reject_iocb(host, (*rsp)->qpair, &a);
+ qla_enode_free(host, ptr);
+ /* send logo to let remote port knows to tear down session */
+ fcport->send_els_logo = 1;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+
/* add the local enode to the list */
qla_enode_add(host, ptr);
@@ -2832,6 +2988,12 @@ qla28xx_start_scsi_edif(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword(req->req_q_out);
@@ -3023,6 +3185,7 @@ queuing_error:
mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
sp->u.scmd.ct6_ctx = NULL;
}
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(lock, flags);
return QLA_FUNCTION_FAILED;
@@ -3349,10 +3512,14 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
fc_port_t *fcport = NULL;
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- int rval = (DID_ERROR << 16);
+ int rval = (DID_ERROR << 16), cnt;
port_id_t d_id;
struct qla_bsg_auth_els_request *p =
(struct qla_bsg_auth_els_request *)bsg_job->request;
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ rpl->version = EDIF_VERSION1;
d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
@@ -3371,7 +3538,7 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (qla_bsg_check(vha, bsg_job, fcport))
return 0;
- if (fcport->loop_id == FC_NO_LOOP_ID) {
+ if (EDIF_SESS_DELETE(fcport)) {
ql_dbg(ql_dbg_edif, vha, 0x910d,
"%s ELS code %x, no loop id.\n", __func__,
bsg_request->rqst_data.r_els.els_code);
@@ -3440,17 +3607,26 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
+ cnt = 0;
+retry:
rval = qla2x00_start_sp(sp);
-
- ql_dbg(ql_dbg_edif, vha, 0x700a,
- "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
- __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
- p->e.extra_rx_xchg_address, p->e.extra_control_flags,
- sp->handle, sp->remap.req.len, bsg_job);
-
- if (rval != QLA_SUCCESS) {
+ switch (rval) {
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_edif, vha, 0x700a,
+ "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
+ __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
+ p->e.extra_rx_xchg_address, p->e.extra_control_flags,
+ sp->handle, sp->remap.req.len, bsg_job);
+ break;
+ case EAGAIN:
+ msleep(EDIF_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < EDIF_RETRY_COUNT)
+ goto retry;
+ fallthrough;
+ default:
ql_log(ql_log_warn, vha, 0x700e,
- "qla2x00_start_sp failed = %d\n", rval);
+ "%s qla2x00_start_sp failed = %d\n", __func__, rval);
SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
rval = -EIO;
goto done_free_remap_rsp;
@@ -3472,14 +3648,29 @@ done:
void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
{
+ u16 cnt = 0;
+
if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) {
ql_dbg(ql_dbg_disc, vha, 0xf09c,
"%s: sess %8phN send port_offline event\n",
__func__, sess->port_name);
sess->edif.app_sess_online = 0;
+ sess->edif.sess_down_acked = 0;
qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
sess->d_id.b24, 0, sess);
qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
+
+ while (!READ_ONCE(sess->edif.sess_down_acked) &&
+ !test_bit(VPORT_DELETE, &vha->dpc_flags)) {
+ msleep(100);
+ cnt++;
+ if (cnt > 100)
+ break;
+ }
+ sess->edif.sess_down_acked = 0;
+ ql_dbg(ql_dbg_disc, vha, 0xf09c,
+ "%s: sess %8phN port_offline event completed\n",
+ __func__, sess->port_name);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
index a965ca8e47ce..7cdb89ccdc6e 100644
--- a/drivers/scsi/qla2xxx/qla_edif.h
+++ b/drivers/scsi/qla2xxx/qla_edif.h
@@ -51,7 +51,8 @@ struct edif_dbell {
enum db_flags_t db_flags;
spinlock_t db_lock;
struct list_head head;
- struct completion dbell;
+ struct bsg_job *dbell_bsg_job;
+ unsigned long bsg_expire;
};
#define SA_UPDATE_IOCB_TYPE 0x71 /* Security Association Update IOCB entry */
@@ -140,4 +141,8 @@ struct enode {
(DBELL_ACTIVE(_fcport->vha) && \
(_fcport->disc_state == DSC_LOGIN_AUTH_PEND))
+#define EDIF_SESS_DELETE(_s) \
+ (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
+ _s->disc_state == DSC_DELETED))
+
#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
index 5a26c77157da..0931f4e4e127 100644
--- a/drivers/scsi/qla2xxx/qla_edif_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -7,13 +7,15 @@
#ifndef __QLA_EDIF_BSG_H
#define __QLA_EDIF_BSG_H
+#define EDIF_VERSION1 1
+
/* BSG Vendor specific commands */
#define ELS_MAX_PAYLOAD 2112
#ifndef WWN_SIZE
#define WWN_SIZE 8
#endif
-#define VND_CMD_APP_RESERVED_SIZE 32
-
+#define VND_CMD_APP_RESERVED_SIZE 28
+#define VND_CMD_PAD_SIZE 3
enum auth_els_sub_cmd {
SEND_ELS = 0,
SEND_ELS_REPLY,
@@ -28,7 +30,9 @@ struct extra_auth_els {
#define BSG_CTL_FLAG_LS_ACC 1
#define BSG_CTL_FLAG_LS_RJT 2
#define BSG_CTL_FLAG_TRM 3
- uint8_t extra_rsvd[3];
+ uint8_t version;
+ uint8_t pad[2];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct qla_bsg_auth_els_request {
@@ -39,51 +43,46 @@ struct qla_bsg_auth_els_request {
struct qla_bsg_auth_els_reply {
struct fc_bsg_reply r;
uint32_t rx_xchg_address;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
};
struct app_id {
int app_vid;
- uint8_t app_key[32];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_start_reply {
uint32_t host_support_edif;
uint32_t edif_enode_active;
uint32_t edif_edb_active;
- uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_start {
struct app_id app_info;
- uint32_t prli_to;
- uint32_t key_shred;
uint8_t app_start_flags;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE - 1];
+ uint8_t version;
+ uint8_t pad[2];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_stop {
struct app_id app_info;
- char buf[16];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_plogi_reply {
uint32_t prli_status;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
-} __packed;
-
-#define RECFG_TIME 1
-#define RECFG_BYTES 2
-
-struct app_rekey_cfg {
- struct app_id app_info;
- uint8_t rekey_mode;
- port_id_t d_id;
- uint8_t force;
- union {
- int64_t bytes;
- int64_t time;
- } rky_units;
-
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -91,7 +90,9 @@ struct app_pinfo_req {
struct app_id app_info;
uint8_t num_ports;
port_id_t remote_pid;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_pinfo {
@@ -103,11 +104,8 @@ struct app_pinfo {
#define VND_CMD_RTYPE_INITIATOR 2
uint8_t remote_state;
uint8_t auth_state;
- uint8_t rekey_mode;
- int64_t rekey_count;
- int64_t rekey_config_value;
- int64_t rekey_consumed_value;
-
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -120,6 +118,8 @@ struct app_pinfo {
struct app_pinfo_reply {
uint8_t port_count;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
struct app_pinfo ports[];
} __packed;
@@ -127,6 +127,8 @@ struct app_pinfo_reply {
struct app_sinfo_req {
struct app_id app_info;
uint8_t num_ports;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -140,6 +142,9 @@ struct app_sinfo {
struct app_stats_reply {
uint8_t elem_count;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
struct app_sinfo elem[];
} __packed;
@@ -163,9 +168,11 @@ struct qla_sa_update_frame {
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
port_id_t port_id;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved2[VND_CMD_APP_RESERVED_SIZE];
} __packed;
-// used for edif mgmt bsg interface
#define QL_VND_SC_UNDEF 0
#define QL_VND_SC_SA_UPDATE 1
#define QL_VND_SC_APP_START 2
@@ -175,6 +182,22 @@ struct qla_sa_update_frame {
#define QL_VND_SC_REKEY_CONFIG 6
#define QL_VND_SC_GET_FCINFO 7
#define QL_VND_SC_GET_STATS 8
+#define QL_VND_SC_AEN_COMPLETE 9
+#define QL_VND_SC_READ_DBELL 10
+
+/*
+ * bsg caller to provide empty buffer for doorbell events.
+ *
+ * sg_io_v4.din_xferp = empty buffer for door bell events
+ * sg_io_v4.dout_xferp = struct edif_read_dbell *buf
+ */
+struct edif_read_dbell {
+ struct app_id app_info;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+};
+
/* Application interface data structure for rtn data */
#define EXT_DEF_EVENT_DATA_SIZE 64
@@ -191,7 +214,9 @@ struct edif_sa_update_aen {
port_id_t port_id;
uint32_t key_type; /* Tx (1) or RX (2) */
uint32_t status; /* 0 succes, 1 failed, 2 timeout , 3 error */
- uint8_t reserved[16];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
#define QL_VND_SA_STAT_SUCCESS 0
@@ -212,9 +237,22 @@ struct auth_complete_cmd {
uint8_t wwpn[WWN_SIZE];
port_id_t d_id;
} u;
- uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct aen_complete_cmd {
+ struct app_id app_info;
+ port_id_t port_id;
+ uint32_t event_code;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
#define RX_DELAY_DELETE_TIMEOUT 20
+#define FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN 1
+
#endif /* QLA_EDIF_BSG_H */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 0bb1d562f0bf..361015b5763e 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -807,7 +807,7 @@ struct els_entry_24xx {
#define EPD_ELS_COMMAND (0 << 13)
#define EPD_ELS_ACC (1 << 13)
#define EPD_ELS_RJT (2 << 13)
-#define EPD_RX_XCHG (3 << 13)
+#define EPD_RX_XCHG (3 << 13) /* terminate exchange */
#define ECF_CLR_PASSTHRU_PEND BIT_12
#define ECF_INCL_FRAME_HDR BIT_11
#define ECF_SEC_LOGIN BIT_3
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index dac27b5ff0ac..5dd2932382ee 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -193,6 +193,8 @@ extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
extern u32 ql2xnvme_queues;
+extern int ql2xrspq_follow_inptr;
+extern int ql2xrspq_follow_inptr_legacy;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -335,6 +337,7 @@ extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha,
struct qla_work_evt *e);
void qla2x00_sp_release(struct kref *kref);
+void qla2x00_els_dcmd2_iocb_timeout(void *data);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -433,7 +436,8 @@ extern int
qla2x00_get_resource_cnts(scsi_qla_host_t *);
extern int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
+qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map,
+ u8 *num_entries);
extern int
qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
@@ -554,6 +558,10 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int
qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+extern int
+qla26xx_dport_diagnostics_v2(scsi_qla_host_t *,
+ struct qla_dport_diag_v2 *, mbx_cmd_t *);
+
int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
@@ -727,7 +735,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
-int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
@@ -989,7 +997,6 @@ fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id);
void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2,
fc_port_t *fcport);
void qla_edb_stop(scsi_qla_host_t *vha);
-ssize_t edif_doorbell_show(struct device *dev, struct device_attribute *attr, char *buf);
int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job);
void qla_enode_init(scsi_qla_host_t *vha);
void qla_enode_stop(scsi_qla_host_t *vha);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index e811de2f6a25..64ab070b8716 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1596,7 +1596,6 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
unsigned int callopt)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (void *)ha->init_cb;
struct new_utsname *p_sysid = utsname();
struct ct_fdmi_hba_attr *eiter;
uint16_t alen;
@@ -1617,7 +1616,7 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
alen = scnprintf(
eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
- "%s", "QLogic Corporation");
+ "%s", QLA2XXX_MANUFACTURER);
alen += FDMI_ATTR_ALIGNMENT(alen);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -1758,8 +1757,8 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
/* MAX CT Payload Length */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
- icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2);
+
alen = sizeof(eiter->a.max_ct_len);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -1851,7 +1850,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
unsigned int callopt)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (void *)ha->init_cb;
struct new_utsname *p_sysid = utsname();
char *hostname = p_sysid ?
p_sysid->nodename : fc_host_system_hostname(vha->host);
@@ -1903,8 +1901,7 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
/* Max frame size. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
- icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size);
alen = sizeof(eiter->a.max_frame_size);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -3280,19 +3277,12 @@ done:
return rval;
}
-void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- qla24xx_post_gnl_work(vha, fcport);
-}
void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
{
struct scsi_qla_host *vha = sp->vha;
fc_port_t *fcport = sp->fcport;
struct ct_sns_rsp *ct_rsp;
- struct event_arg ea;
uint8_t fc4_scsi_feat;
uint8_t fc4_nvme_feat;
@@ -3300,10 +3290,10 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
"Async done-%s res %x ID %x. %8phC\n",
sp->name, res, fcport->d_id.b24, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
- ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp;
fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+ sp->rc = res;
/*
* FC-GS-7, 5.2.3.12 FC-4 Features - format
@@ -3324,24 +3314,42 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
}
}
- memset(&ea, 0, sizeof(ea));
- ea.sp = sp;
- ea.fcport = sp->fcport;
- ea.rc = res;
+ if (sp->flags & SRB_WAKEUP_ON_COMP) {
+ complete(sp->comp);
+ } else {
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
- qla24xx_handle_gffid_event(vha, &ea);
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ /* we should not be here */
+ dump_stack();
+ }
}
/* Get FC4 Feature with Nport ID. */
-int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
srb_t *sp;
+ DECLARE_COMPLETION_ONSTACK(comp);
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ /* this routine does not have handling for no wait */
+ if (!vha->flags.online || !wait)
return rval;
/* ref: INIT */
@@ -3349,43 +3357,86 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!sp)
return rval;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gffid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
qla24xx_async_gffid_sp_done);
+ sp->comp = &comp;
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
+
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
+
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns response.\n",
+ __func__);
+ goto done_free_sp;
+ }
/* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
- GFF_ID_RSP_SIZE);
+ ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE);
ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
- sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- ql_dbg(ql_dbg_disc, vha, 0x2132,
- "Async-%s hdl=%x %8phC.\n", sp->name,
- sp->handle, fcport->port_name);
-
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+
+ if (rval != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
goto done_free_sp;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x3074,
+ "Async-%s hdl=%x portid %06x\n",
+ sp->name, sp->handle, fcport->d_id.b24);
+ }
+
+ wait_for_completion(sp->comp);
+ rval = sp->rc;
- return rval;
done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
- fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
@@ -3578,7 +3629,7 @@ login_logout:
do_delete) {
if (fcport->loop_id != FC_NO_LOOP_ID) {
if (fcport->flags & FCF_FCP2_DEVICE)
- fcport->logout_on_delete = 0;
+ continue;
ql_log(ql_log_warn, vha, 0x20f0,
"%s %d %8phC post del sess\n",
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3f3417a3e891..e7fe0e52c11d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -47,6 +47,7 @@ qla2x00_sp_timeout(struct timer_list *t)
{
srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
+ scsi_qla_host_t *vha = sp->vha;
WARN_ON(irqs_disabled());
iocb = &sp->u.iocb_cmd;
@@ -54,6 +55,12 @@ qla2x00_sp_timeout(struct timer_list *t)
/* ref: TMR */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+
+ if (vha && qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9008,
+ "PCI/Register disconnect.\n");
+ qla_pci_set_eeh_busy(vha);
+ }
}
void qla2x00_sp_free(srb_t *sp)
@@ -161,6 +168,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
struct srb_iocb *abt_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ uint8_t bail;
/* ref: INIT for ABTS command */
sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
@@ -168,6 +176,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
if (!sp)
return QLA_MEMORY_ALLOC_FAILED;
+ QLA_VHA_MARK_BUSY(vha, bail);
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
@@ -1480,7 +1489,6 @@ static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
ql_dbg(ql_dbg_disc, vha, 0x20ef,
"%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
__func__, __LINE__, fcport->port_name);
- fcport->edif.app_started = 1;
fcport->edif.app_sess_online = 1;
qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
@@ -1763,8 +1771,16 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
case DSC_LOGIN_PEND:
- if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ if (vha->hw->flags.edif_enabled)
+ break;
+
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post %s PRLI\n",
+ __func__, __LINE__, fcport->port_name,
+ NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC");
qla24xx_post_prli_work(vha, fcport);
+ }
break;
case DSC_UPD_FCPORT:
@@ -1818,7 +1834,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_PORT_ADDR:
fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
if (fcport) {
- if (fcport->flags & FCF_FCP2_DEVICE) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, vha, 0x2115,
"Delaying session delete for FCP2 portid=%06x %8phC ",
fcport->d_id.b24, fcport->port_name);
@@ -1850,7 +1867,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
break;
case RSCN_AREA_ADDR:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
@@ -1861,7 +1879,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
break;
case RSCN_DOM_ADDR:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
@@ -1873,7 +1892,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_FAB_ADDR:
default:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
fcport->scan_needed = 1;
@@ -2000,12 +2020,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ uint8_t bail;
/* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ QLA_VHA_MARK_BUSY(vha, bail);
sp->type = SRB_TM_CMD;
sp->name = "tmf";
qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
@@ -2124,6 +2146,13 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
}
if (N2N_TOPO(vha->hw)) {
+ if (ea->fcport->n2n_link_reset_cnt ==
+ vha->hw->login_retry_count &&
+ ea->fcport->flags & FCF_FCSP_DEVICE) {
+ /* remote authentication app just started */
+ ea->fcport->n2n_link_reset_cnt = 0;
+ }
+
if (ea->fcport->n2n_link_reset_cnt <
vha->hw->login_retry_count) {
ea->fcport->n2n_link_reset_cnt++;
@@ -4509,6 +4538,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
+ /* Init_cb will be reused for other command(s). Save a backup copy of port_name */
+ memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
}
/* ELS pass through payload is limit by frame size. */
@@ -5273,9 +5304,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
- if (vha->e_dbell.db_flags == EDB_ACTIVE)
- fcport->edif.app_started = 1;
-
spin_lock_init(&fcport->edif.indx_list_lock);
INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
@@ -5488,6 +5516,22 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
+static void
+qla_reinitialize_link(scsi_qla_host_t *vha)
+{
+ int rval;
+
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ rval = qla2x00_full_login_lip(vha);
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xd051,
+ "Link reinitialization failed (%d)\n", rval);
+ }
+}
+
/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
@@ -5539,6 +5583,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&vha->work_lock, flags);
if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ u8 loop_map_entries = 0;
+ int rc;
+
+ rc = qla2x00_get_fcal_position_map(vha, NULL,
+ &loop_map_entries);
+ if (rc == QLA_SUCCESS && loop_map_entries > 1) {
+ /*
+ * There are devices that are still not logged
+ * in. Reinitialize to give them a chance.
+ */
+ qla_reinitialize_link(vha);
+ return QLA_FUNCTION_FAILED;
+ }
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
@@ -5767,8 +5824,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
if (atomic_read(&fcport->state) == FCS_ONLINE)
return;
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-
rport_ids.node_name = wwn_to_u64(fcport->node_name);
rport_ids.port_name = wwn_to_u64(fcport->port_name);
rport_ids.port_id = fcport->d_id.b.domain << 16 |
@@ -5869,7 +5924,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_reg_remote_port(vha, fcport);
break;
case MODE_TARGET:
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
if (!vha->vha_tgt.qla_tgt->tgt_stop &&
!vha->vha_tgt.qla_tgt->tgt_stopped)
qlt_fc_port_added(vha, fcport);
@@ -5887,6 +5941,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (NVME_TARGET(vha->hw, fcport))
qla_nvme_register_remote(vha, fcport);
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
if (fcport->id_changed) {
fcport->id_changed = 0;
@@ -7197,6 +7253,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
+ vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
+ vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
+
if (vha->hw->flags.port_isolated)
return status;
@@ -9657,6 +9716,12 @@ int qla2xxx_disable_port(struct Scsi_Host *host)
vha->hw->flags.port_isolated = 1;
+ if (qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9006,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
if (qla2x00_chip_is_down(vha))
return 0;
@@ -9672,6 +9737,13 @@ int qla2xxx_enable_port(struct Scsi_Host *host)
{
scsi_qla_host_t *vha = shost_priv(host);
+ if (qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9001,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
+
vha->hw->flags.port_isolated = 0;
/* Set the flag to 1, so that isp_abort can proceed */
vha->flags.online = 1;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index e0fe9ddb4bd2..42ce4e1fe744 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2819,7 +2819,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
sp->vha->qla_stats.control_requests++;
}
-static void
+void
qla2x00_els_dcmd2_iocb_timeout(void *data)
{
srb_t *sp = data;
@@ -2882,6 +2882,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
+ /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/
+ fcport->logout_on_delete = 1;
+ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 21b31d6359c8..76e79f350a22 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1354,9 +1354,7 @@ skip_rio:
if (!vha->vp_idx) {
if (ha->flags.fawwpn_enabled &&
(ha->current_topology == ISP_CFG_F)) {
- void *wwpn = ha->init_cb->port_name;
-
- memcpy(vha->port_name, wwpn, WWN_SIZE);
+ memcpy(vha->port_name, ha->port_name, WWN_SIZE);
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
ql_dbg(ql_dbg_init + ql_dbg_verbose,
@@ -1761,6 +1759,9 @@ global_port_update:
break;
case MBA_DPORT_DIAGNOSTICS:
+ if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR ||
+ (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR)
+ vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
ql_dbg(ql_dbg_async, vha, 0x5052,
"D-Port Diagnostics: %04x %04x %04x %04x\n",
mb[0], mb[1], mb[2], mb[3]);
@@ -2245,9 +2246,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
res = DID_ERROR << 16;
}
- if (logit) {
- if (sp->remap.remapped &&
- ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ if (sp->remap.remapped &&
+ ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ if (logit) {
ql_dbg(ql_dbg_user, vha, 0x503f,
"%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
type, sp->handle, comp_status);
@@ -2259,18 +2260,24 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
pkt)->total_byte_count),
e->s_id[0], e->s_id[2], e->s_id[1],
e->d_id[2], e->d_id[1], e->d_id[0]);
- } else {
- ql_log(ql_log_info, vha, 0x503f,
- "%s IOCB Done hdl=%x comp_status=0x%x\n",
- type, sp->handle, comp_status);
- ql_log(ql_log_info, vha, 0x503f,
- "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
- fw_status[1], fw_status[2],
- le32_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count),
- e->s_id[0], e->s_id[2], e->s_id[1],
- e->d_id[2], e->d_id[1], e->d_id[0]);
}
+ if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE &&
+ sp->type == SRB_ELS_CMD_HST_NOLOGIN) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s rcv reject. Sched delete\n", __func__);
+ qlt_schedule_sess_for_deletion(sp->fcport);
+ }
+ } else if (logit) {
+ ql_log(ql_log_info, vha, 0x503f,
+ "%s IOCB Done hdl=%x comp_status=0x%x\n",
+ type, sp->handle, comp_status);
+ ql_log(ql_log_info, vha, 0x503f,
+ "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+ fw_status[1], fw_status[2],
+ le32_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count),
+ e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
}
}
goto els_ct_done;
@@ -2639,7 +2646,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (unlikely(logit))
- ql_log(ql_dbg_io, fcport->vha, 0x5060,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x5060,
"NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
sp->name, sp->handle, comp_status,
fd->transferred_length, le32_to_cpu(sts->residual_len),
@@ -3426,6 +3433,7 @@ check_scsi_status:
case CS_PORT_UNAVAILABLE:
case CS_TIMEOUT:
case CS_RESET:
+ case CS_EDIF_INV_REQ:
/*
* We are going to have the fc class block the rport
@@ -3496,7 +3504,7 @@ check_scsi_status:
out:
if (logit)
- ql_log(ql_dbg_io, fcport->vha, 0x3022,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
@@ -3712,12 +3720,11 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
* Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
*/
static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
- struct rsp_que *rsp, response_t *pkt)
+ struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
{
- int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
- response_t *end_pkt;
+ int start_pkt_ring_index;
+ u32 iocb_cnt = 0;
int rc = 0;
- u32 rsp_q_in;
if (pkt->entry_count == 1)
return rc;
@@ -3728,34 +3735,18 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
else
start_pkt_ring_index = rsp->ring_index - 1;
- if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
- end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
- rsp->length - 1;
+ if (rsp_q_in < start_pkt_ring_index)
+ /* q in ptr is wrapped */
+ iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
else
- end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
+ iocb_cnt = rsp_q_in - start_pkt_ring_index;
- end_pkt = rsp->ring + end_pkt_ring_index;
-
- /* next pkt = end_pkt + 1 */
- n_ring_index = end_pkt_ring_index + 1;
- if (n_ring_index >= rsp->length)
- n_ring_index = 0;
-
- rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
- rd_reg_dword(rsp->rsp_q_in);
-
- /* rsp_q_in is either wrapped or pointing beyond endpkt */
- if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
- rsp_q_in >= n_ring_index)
- /* all IOCBs arrived. */
- rc = 0;
- else
+ if (iocb_cnt < pkt->entry_count)
rc = -EIO;
- ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
- "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
- __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
- rsp_q_in, rc);
+ ql_dbg(ql_dbg_init, vha, 0x5091,
+ "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
+ __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
return rc;
}
@@ -3772,6 +3763,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct purex_entry_24xx *purex_entry;
struct purex_item *pure_item;
+ u16 rsp_in = 0, cur_ring_index;
+ int follow_inptr, is_shadow_hba;
if (!ha->flags.fw_started)
return;
@@ -3781,8 +3774,27 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla_cpu_update(rsp->qpair, smp_processor_id());
}
- while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+#define __update_rsp_in(_update, _is_shadow_hba, _rsp, _rsp_in) \
+ do { \
+ if (_update) { \
+ _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
+ rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
+ } \
+ } while (0)
+
+ is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
+ follow_inptr = is_shadow_hba ? ql2xrspq_follow_inptr :
+ ql2xrspq_follow_inptr_legacy;
+
+ __update_rsp_in(follow_inptr, is_shadow_hba, rsp, rsp_in);
+
+ while ((likely(follow_inptr &&
+ rsp->ring_index != rsp_in &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)) ||
+ (!follow_inptr &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
+ cur_ring_index = rsp->ring_index;
rsp->ring_index++;
if (rsp->ring_index == rsp->length) {
@@ -3894,6 +3906,8 @@ process_err:
}
pure_item = qla27xx_copy_fpin_pkt(vha,
(void **)&pkt, &rsp);
+ __update_rsp_in(follow_inptr, is_shadow_hba,
+ rsp, rsp_in);
if (!pure_item)
break;
qla24xx_queue_purex_item(vha, pure_item,
@@ -3901,7 +3915,17 @@ process_err:
break;
case ELS_AUTH_ELS:
- if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
+ /*
+ * ring_ptr and ring_index were
+ * pre-incremented above. Reset them
+ * back to current. Wait for next
+ * interrupt with all IOCBs to arrive
+ * and re-process.
+ */
+ rsp->ring_ptr = (response_t *)pkt;
+ rsp->ring_index = cur_ring_index;
+
ql_dbg(ql_dbg_init, vha, 0x5091,
"Defer processing ELS opcode %#x...\n",
purex_entry->els_frame_payload[3]);
@@ -4420,16 +4444,12 @@ msix_register_fail:
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
- } else
- if (ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
+ if (IS_MQUE_CAPABLE(ha) &&
+ (ha->msixbase && ha->mqiobase && ha->max_qpairs))
+ ha->mqenable = 1;
+ else
+ ha->mqenable = 0;
+
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 892caf2475df..359595a64664 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -238,6 +238,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_dbg(ql_dbg_mbx, vha, 0x1112,
"mbox[%d]<-0x%04x\n", cnt, *iptr);
wrt_reg_word(optr, *iptr);
+ } else {
+ wrt_reg_word(optr, 0);
}
mboxes >>= 1;
@@ -274,6 +276,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117a,
+ "cmd=%x Timeout.\n", command);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
if (chip_reset != ha->chip_reset) {
eeh_delay = ha->flags.eeh_busy ? 1 : 0;
@@ -286,12 +294,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_ABORTED;
goto premature_exit;
}
- ql_dbg(ql_dbg_mbx, vha, 0x117a,
- "cmd=%x Timeout.\n", command);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
eeh_delay = ha->flags.eeh_busy ? 1 : 0;
@@ -3066,7 +3068,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
* Kernel context.
*/
int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
+ u8 *num_entries)
{
int rval;
mbx_cmd_t mc;
@@ -3106,6 +3109,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
if (pos_map)
memcpy(pos_map, pmap, FCAL_MAP_SIZE);
+ if (num_entries)
+ *num_entries = pmap[0];
}
dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
@@ -6471,6 +6476,54 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
return rval;
}
+int
+qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha,
+ struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp)
+{
+ int rval;
+ dma_addr_t dd_dma;
+ uint size = sizeof(dd->buf);
+ uint16_t options = dd->options;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
+ "Entered %s.\n", __func__);
+
+ dd_dma = dma_map_single(&vha->hw->pdev->dev,
+ dd->buf, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
+ ql_log(ql_log_warn, vha, 0x1194,
+ "Failed to map dma buffer.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memset(dd->buf, 0, size);
+
+ mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
+ mcp->mb[1] = options;
+ mcp->mb[2] = MSW(LSD(dd_dma));
+ mcp->mb[3] = LSW(LSD(dd_dma));
+ mcp->mb[6] = MSW(MSD(dd_dma));
+ mcp->mb[7] = LSW(MSD(dd_dma));
+ mcp->mb[8] = size;
+ mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0;
+ mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = MBX_TOV_SECONDS * 4;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
+ "Done %s.\n", __func__);
+ }
+
+ dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE);
+
+ return rval;
+}
+
static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
{
sp->u.iocb_cmd.u.mbx.rc = res;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 346d47b61c07..16a9f22bb860 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -166,9 +166,13 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
int ret = QLA_SUCCESS;
fc_port_t *fcport;
- if (vha->hw->flags.edif_enabled)
+ if (vha->hw->flags.edif_enabled) {
+ if (DBELL_ACTIVE(vha))
+ qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE,
+ FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN);
/* delete sessions and flush sa_indexes */
qla2x00_wait_for_sess_deletion(vha);
+ }
if (vha->hw->flags.fw_started)
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 87c9404aa401..7450c3458be7 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -37,11 +37,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return 0;
- if (atomic_read(&fcport->state) == FCS_ONLINE)
- return 0;
-
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-
fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
memset(&req, 0, sizeof(struct nvme_fc_port_info));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 73073fb08369..0bd0fd1042df 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -333,6 +333,21 @@ MODULE_PARM_DESC(ql2xabts_wait_nvme,
"To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
+u32 ql2xdelay_before_pci_error_handling = 5;
+module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
+MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
+ "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
+
+int ql2xrspq_follow_inptr = 1;
+module_param(ql2xrspq_follow_inptr, int, 0644);
+MODULE_PARM_DESC(ql2xrspq_follow_inptr,
+ "Follow RSP IN pointer for RSP updates for HBAs 27xx and newer (default: 1).");
+
+int ql2xrspq_follow_inptr_legacy = 1;
+module_param(ql2xrspq_follow_inptr_legacy, int, 0644);
+MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy,
+ "Follow RSP IN pointer for RSP updates for HBAs older than 27XX. (default: 1).");
+
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
@@ -1337,21 +1352,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
/*
* Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
*/
-int
-qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
- uint64_t l, enum nexus_wait_type type)
+static int
+__qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
{
int cnt, match, status;
unsigned long flags;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req;
+ scsi_qla_host_t *vha = qpair->vha;
+ struct req_que *req = qpair->req;
srb_t *sp;
struct scsi_cmnd *cmd;
status = QLA_SUCCESS;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = vha->req;
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (cnt = 1; status == QLA_SUCCESS &&
cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
@@ -1378,12 +1392,32 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
if (!match)
continue;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
status = qla2x00_eh_wait_on_command(cmd);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ return status;
+}
+
+int
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
+{
+ struct qla_qpair *qpair;
+ struct qla_hw_data *ha = vha->hw;
+ int i, status = QLA_SUCCESS;
+ status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
+ type);
+ for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
+ qpair = ha->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
+ type);
+ }
return status;
}
@@ -1420,7 +1454,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
return err;
if (fcport->deleted)
- return SUCCESS;
+ return FAILED;
ql_log(ql_log_info, vha, 0x8009,
"DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
@@ -1488,7 +1522,7 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
return err;
if (fcport->deleted)
- return SUCCESS;
+ return FAILED;
ql_log(ql_log_info, vha, 0x8009,
"TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
@@ -5472,7 +5506,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
e->u.fcport.fcport, false);
break;
case QLA_EVT_SA_REPLACE:
- qla24xx_issue_sa_replace_iocb(vha, e);
+ rc = qla24xx_issue_sa_replace_iocb(vha, e);
break;
}
@@ -7238,6 +7272,44 @@ static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
}
}
+static void qla_wind_down_chip(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->flags.eeh_busy)
+ return;
+ if (ha->pci_error_state)
+ /* system is trying to recover */
+ return;
+
+ /*
+ * Current system is not handling PCIE error. At this point, this is
+ * best effort to wind down the adapter.
+ */
+ if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) &&
+ !ha->flags.eeh_flush) {
+ ql_log(ql_log_info, vha, 0x9009,
+ "PCI Error detected, attempting to reset hardware.\n");
+
+ ha->isp_ops->reset_chip(vha);
+ ha->isp_ops->disable_intrs(ha);
+
+ ha->flags.eeh_flush = EEH_FLUSH_RDY;
+ ha->eeh_jif = jiffies;
+
+ } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY &&
+ time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) {
+ pci_clear_master(ha->pdev);
+
+ /* flush all command */
+ qla2x00_abort_isp_cleanup(vha);
+ ha->flags.eeh_flush = EEH_FLUSH_DONE;
+
+ ql_log(ql_log_info, vha, 0x900a,
+ "PCI Error handling complete, all IOs aborted.\n");
+ }
+}
+
/**************************************************************************
* qla2x00_timer
*
@@ -7261,6 +7333,8 @@ qla2x00_timer(struct timer_list *t)
fc_port_t *fcport = NULL;
if (ha->flags.eeh_busy) {
+ qla_wind_down_chip(vha);
+
ql_dbg(ql_dbg_timer, vha, 0x6000,
"EEH = %d, restarting timer.\n",
ha->flags.eeh_busy);
@@ -7841,6 +7915,9 @@ void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
spin_lock_irqsave(&base_vha->work_lock, flags);
if (!ha->flags.eeh_busy) {
+ ha->eeh_jif = jiffies;
+ ha->flags.eeh_flush = 0;
+
ha->flags.eeh_busy = 1;
do_cleanup = true;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index cb97f625970d..62666df1a59e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -981,22 +981,6 @@ void qlt_free_session_done(struct work_struct *work)
sess->send_els_logo);
if (!IS_SW_RESV_ADDR(sess->d_id)) {
- if (ha->flags.edif_enabled &&
- (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
- sess->edif.authok = 0;
- if (!ha->flags.host_shutting_down) {
- ql_dbg(ql_dbg_edif, vha, 0x911e,
- "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
- __func__, sess->port_name);
- qla2x00_release_all_sadb(vha, sess);
- } else {
- ql_dbg(ql_dbg_edif, vha, 0x911e,
- "%s bypassing release_all_sadb\n",
- __func__);
- }
- qla_edif_clear_appdata(vha, sess);
- qla_edif_sess_down(vha, sess);
- }
qla2x00_mark_device_lost(vha, sess, 0);
if (sess->send_els_logo) {
@@ -1042,6 +1026,25 @@ void qlt_free_session_done(struct work_struct *work)
sess->nvme_flag |= NVME_FLAG_DELETING;
qla_nvme_unregister_remote_port(sess);
}
+
+ if (ha->flags.edif_enabled &&
+ (!own || (own &&
+ own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ sess->edif.authok = 0;
+ if (!ha->flags.host_shutting_down) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
+ __func__, sess->port_name);
+ qla2x00_release_all_sadb(vha, sess);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s bypassing release_all_sadb\n",
+ __func__);
+ }
+
+ qla_edif_clear_appdata(vha, sess);
+ qla_edif_sess_down(vha, sess);
+ }
}
/*
@@ -6932,14 +6935,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (IS_QLA2071(ha)) {
- /* 4 ports Baker: Enable Interrupt Handshake */
- icb->msix_atio = 0;
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- } else {
- icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
- }
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b09d7d2080c0..f3257d46b6d2 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.07.400-k"
+#define QLA2XXX_VERSION "10.02.07.800-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 7
-#define QLA_DRIVER_BETA_VER 400
+#define QLA_DRIVER_BETA_VER 800
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c59eac7a32f2..086ec5b5862d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -586,10 +586,13 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
- struct module *mod = sdev->host->hostt->module;
-
+ /*
+ * Decreasing the module reference count before the device reference
+ * count is safe since scsi_remove_host() only returns after all
+ * devices have been removed.
+ */
+ module_put(sdev->host->hostt->module);
put_device(&sdev->sdev_gendev);
- module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b776cefc7cda..448748e3fba5 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -463,14 +463,12 @@ static void scsi_report_sense(struct scsi_device *sdev,
evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
scsi_report_lun_change(sdev);
sdev_printk(KERN_WARNING, sdev,
- "Warning! Received an indication that the "
"LUN assignments on this target have "
"changed. The Linux SCSI layer does not "
"automatically remap LUN assignments.\n");
} else if (sshdr->asc == 0x3f)
sdev_printk(KERN_WARNING, sdev,
- "Warning! Received an indication that the "
- "operating parameters on this target have "
+ "Operating parameters on this target have "
"changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n");
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 17a617db9ae0..ef08029a0079 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -75,13 +75,6 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
return ret;
}
-/*
- * When to reinvoke queueing after a resource shortage. It's 3 msecs to
- * not change behaviour from the previous unplug mechanism, experimentation
- * may prove this needs changing.
- */
-#define SCSI_QUEUE_DELAY 3
-
static void
scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
{
@@ -118,7 +111,7 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
}
}
-static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
{
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -128,7 +121,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
} else {
WARN_ON_ONCE(true);
}
- blk_mq_requeue_request(rq, true);
+
+ if (msecs) {
+ blk_mq_requeue_request(rq, false);
+ blk_mq_delay_kick_requeue_list(rq->q, msecs);
+ } else
+ blk_mq_requeue_request(rq, true);
}
/**
@@ -658,14 +656,6 @@ static unsigned int scsi_rq_err_bytes(const struct request *rq)
return bytes;
}
-/* Helper for scsi_io_completion() when "reprep" action required. */
-static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
- struct request_queue *q)
-{
- /* A new command will be prepared and issued. */
- scsi_mq_requeue_cmd(cmd);
-}
-
static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
{
struct request *req = scsi_cmd_to_rq(cmd);
@@ -683,14 +673,21 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
return false;
}
+/*
+ * When ALUA transition state is returned, reprep the cmd to
+ * use the ALUA handler's transition timeout. Delay the reprep
+ * 1 sec to avoid aggressive retries of the target in that
+ * state.
+ */
+#define ALUA_TRANSITION_REPREP_DELAY 1000
+
/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
int level = 0;
- enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
- ACTION_DELAYED_RETRY} action;
+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
+ ACTION_RETRY, ACTION_DELAYED_RETRY} action;
struct scsi_sense_hdr sshdr;
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
@@ -779,8 +776,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
- blk_stat = BLK_STS_TRANSPORT;
- fallthrough;
+ action = ACTION_DELAYED_REPREP;
+ break;
default:
action = ACTION_FAIL;
break;
@@ -839,7 +836,10 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
return;
fallthrough;
case ACTION_REPREP:
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
+ break;
+ case ACTION_DELAYED_REPREP:
+ scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -933,7 +933,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
* command block will be released and the queue function will be goosed. If we
* are not done then we have to figure out what to do next:
*
- * a) We can call scsi_io_completion_reprep(). The request will be
+ * a) We can call scsi_mq_requeue_cmd(). The request will be
* unprepared and put back on the queue. Then a new command will
* be created for it. This should be used if we made forward
* progress, or if we want to switch from READ(10) to READ(6) for
@@ -949,7 +949,6 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
blk_status_t blk_stat = BLK_STS_OK;
@@ -986,7 +985,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* request just queue the command up again.
*/
if (likely(result == 0))
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
else
scsi_io_completion_action(cmd, result);
}
@@ -1549,7 +1548,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
scsi_init_command(sdev, cmd);
cmd->eh_eflags = 0;
- cmd->allowed = 0;
cmd->prot_type = 0;
cmd->prot_flags = 0;
cmd->submitter = 0;
@@ -1600,6 +1598,8 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
return ret;
}
+ /* Usually overridden by the ULP */
+ cmd->allowed = 0;
memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
@@ -1648,6 +1648,13 @@ static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
sbitmap_put(&sdev->budget_map, budget_token);
}
+/*
+ * When to reinvoke queueing after a resource shortage. It's 3 msecs to
+ * not change behaviour from the previous unplug mechanism, experimentation
+ * may prove this needs changing.
+ */
+#define SCSI_QUEUE_DELAY 3
+
static int scsi_mq_get_budget(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
@@ -1876,10 +1883,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
- if (dev->dma_mask) {
- shost->max_sectors = min_t(unsigned int, shost->max_sectors,
- dma_max_mapping_size(dev) >> SECTOR_SHIFT);
- }
blk_queue_max_hw_sectors(q, shost->max_sectors);
blk_queue_segment_boundary(q, shost->dma_boundary);
dma_set_seg_boundary(dev, shost->dma_boundary);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 91ac901a6682..ac6059702d13 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -406,9 +406,14 @@ static void scsi_target_destroy(struct scsi_target *starget)
static void scsi_target_dev_release(struct device *dev)
{
struct device *parent = dev->parent;
+ struct Scsi_Host *shost = dev_to_shost(parent);
struct scsi_target *starget = to_scsi_target(dev);
kfree(starget);
+
+ if (atomic_dec_return(&shost->target_count) == 0)
+ wake_up(&shost->targets_wq);
+
put_device(parent);
}
@@ -521,6 +526,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
+ init_waitqueue_head(&starget->sdev_wq);
+
+ atomic_inc(&shost->target_count);
+
retry:
spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index aa70d9282161..9dad2fd5297f 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -443,18 +443,15 @@ static void scsi_device_cls_release(struct device *class_dev)
static void scsi_device_dev_release_usercontext(struct work_struct *work)
{
- struct scsi_device *sdev;
+ struct scsi_device *sdev = container_of(work, struct scsi_device,
+ ew.work);
+ struct scsi_target *starget = sdev->sdev_target;
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
- struct module *mod;
-
- sdev = container_of(work, struct scsi_device, ew.work);
-
- mod = sdev->host->hostt->module;
scsi_dh_release_device(sdev);
@@ -516,19 +513,16 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree(sdev->inquiry);
kfree(sdev);
+ if (starget && atomic_dec_return(&starget->sdev_count) == 0)
+ wake_up(&starget->sdev_wq);
+
if (parent)
put_device(parent);
- module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
-
- /* Set module pointer as NULL in case of module unloading */
- if (!try_module_get(sdp->host->hostt->module))
- sdp->host->hostt->module = NULL;
-
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
@@ -1535,6 +1529,14 @@ static void __scsi_remove_target(struct scsi_target *starget)
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * After scsi_remove_target() returns its caller can remove resources
+ * associated with @starget, e.g. an rport or session. Wait until all
+ * devices associated with @starget have been removed to prevent that
+ * a SCSI error handling callback function triggers a use-after-free.
+ */
+ wait_event(starget->sdev_wq, atomic_read(&starget->sdev_count) == 0);
}
/**
@@ -1645,6 +1647,9 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ atomic_inc(&starget->sdev_count);
+
/*
* device can now only be removed via __scsi_remove_device() so hold
* the target. Target will be held in CREATED state until something
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 5d21f07456c6..cd3db9684e52 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1980,7 +1980,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
scsi_remove_target(&session->dev);
if (session->ida_used)
- ida_simple_remove(&iscsi_sess_ida, target_id);
+ ida_free(&iscsi_sess_ida, target_id);
unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
@@ -2049,7 +2049,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
return -ENOMEM;
if (target_id == ISCSI_MAX_TARGET) {
- id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&iscsi_sess_ida, GFP_KERNEL);
if (id < 0) {
iscsi_cls_session_printk(KERN_ERR, session,
@@ -2088,7 +2088,7 @@ release_dev:
device_del(&session->dev);
release_ida:
if (session->ida_used)
- ida_simple_remove(&iscsi_sess_ida, session->target_id);
+ ida_free(&iscsi_sess_ida, session->target_id);
destroy_wq:
destroy_workqueue(session->workq);
return err;
@@ -2143,8 +2143,6 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
return 0;
iscsi_remove_conn(iscsi_dev_to_conn(dev));
- iscsi_put_conn(iscsi_dev_to_conn(dev));
-
return 0;
}
@@ -2264,18 +2262,20 @@ static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
}
}
-static int iscsi_if_stop_conn(struct iscsi_transport *transport,
- struct iscsi_uevent *ev)
+static int iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
{
- int flag = ev->u.stop_conn.flag;
- struct iscsi_cls_conn *conn;
-
- conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
- if (!conn)
- return -EINVAL;
-
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
/*
+ * For offload, iscsid may not know about the ep like when iscsid is
+ * restarted or for kernel based session shutdown iscsid is not even
+ * up. For these cases, we do the disconnect now.
+ */
+ mutex_lock(&conn->ep_mutex);
+ if (conn->ep)
+ iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+ mutex_unlock(&conn->ep_mutex);
+
+ /*
* If this is a termination we have to call stop_conn with that flag
* so the correct states get set. If we haven't run the work yet try to
* avoid the extra run.
@@ -2285,16 +2285,6 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
iscsi_stop_conn(conn, flag);
} else {
/*
- * For offload, when iscsid is restarted it won't know about
- * existing endpoints so it can't do a ep_disconnect. We clean
- * it up here for userspace.
- */
- mutex_lock(&conn->ep_mutex);
- if (conn->ep)
- iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
- mutex_unlock(&conn->ep_mutex);
-
- /*
* Figure out if it was the kernel or userspace initiating this.
*/
spin_lock_irq(&conn->lock);
@@ -2349,6 +2339,55 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
}
+static int iscsi_iter_force_destroy_conn_fn(struct device *dev, void *data)
+{
+ struct iscsi_transport *transport;
+ struct iscsi_cls_conn *conn;
+
+ if (!iscsi_is_conn_dev(dev))
+ return 0;
+
+ conn = iscsi_dev_to_conn(dev);
+ transport = conn->transport;
+
+ if (READ_ONCE(conn->state) != ISCSI_CONN_DOWN)
+ iscsi_if_stop_conn(conn, STOP_CONN_TERM);
+
+ transport->destroy_conn(conn);
+ return 0;
+}
+
+/**
+ * iscsi_force_destroy_session - destroy a session from the kernel
+ * @session: session to destroy
+ *
+ * Force the destruction of a session from the kernel. This should only be
+ * used when userspace is no longer running during system shutdown.
+ */
+void iscsi_force_destroy_session(struct iscsi_cls_session *session)
+{
+ struct iscsi_transport *transport = session->transport;
+ unsigned long flags;
+
+ WARN_ON_ONCE(system_state == SYSTEM_RUNNING);
+
+ spin_lock_irqsave(&sesslock, flags);
+ if (list_empty(&session->sess_list)) {
+ spin_unlock_irqrestore(&sesslock, flags);
+ /*
+ * Conn/ep is already freed. Session is being torn down via
+ * async path. For shutdown we don't care about it so return.
+ */
+ return;
+ }
+ spin_unlock_irqrestore(&sesslock, flags);
+
+ device_for_each_child(&session->dev, NULL,
+ iscsi_iter_force_destroy_conn_fn);
+ transport->destroy_session(session);
+}
+EXPORT_SYMBOL_GPL(iscsi_force_destroy_session);
+
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
@@ -3720,7 +3759,12 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
case ISCSI_UEVENT_DESTROY_CONN:
return iscsi_if_destroy_conn(transport, ev);
case ISCSI_UEVENT_STOP_CONN:
- return iscsi_if_stop_conn(transport, ev);
+ conn = iscsi_conn_lookup(ev->u.stop_conn.sid,
+ ev->u.stop_conn.cid);
+ if (!conn)
+ return -EINVAL;
+
+ return iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
}
/*
@@ -4812,7 +4856,7 @@ free_priv:
}
EXPORT_SYMBOL_GPL(iscsi_register_transport);
-int iscsi_unregister_transport(struct iscsi_transport *tt)
+void iscsi_unregister_transport(struct iscsi_transport *tt)
{
struct iscsi_internal *priv;
unsigned long flags;
@@ -4835,8 +4879,6 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
device_unregister(&priv->dev);
mutex_unlock(&rx_queue_mutex);
-
- return 0;
}
EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 12bff64dade6..2f88c61216ee 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -225,6 +225,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct device *dma_dev = shost->dma_dev;
INIT_LIST_HEAD(&sas_host->rphy_list);
mutex_init(&sas_host->lock);
@@ -236,6 +237,11 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
shost->host_no);
+ if (dma_dev->dma_mask) {
+ shost->opt_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_opt_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
return 0;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index eb02d939dd44..eb76ba055021 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3296,6 +3296,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
(sector_t)BLK_DEF_MAX_SECTORS);
}
+ /*
+ * Limit default to SCSI host optimal sector limit if set. There may be
+ * an impact on performance for when the size of a request exceeds this
+ * host limit.
+ */
+ rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
+
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 118c7b4a8af2..340b050ad28d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -195,7 +195,7 @@ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
-static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
@@ -444,6 +444,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
+ bool busy;
sg_io_hdr_t *hp;
struct sg_header *old_hdr;
int retval;
@@ -466,20 +467,16 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
if (retval)
return retval;
- srp = sg_get_rq_mark(sfp, req_pack_id);
+ srp = sg_get_rq_mark(sfp, req_pack_id, &busy);
if (!srp) { /* now wait on packet to arrive */
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
retval = wait_event_interruptible(sfp->read_wait,
- (atomic_read(&sdp->detaching) ||
- (srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
- if (retval)
- /* -ERESTARTSYS as signal hit process */
- return retval;
+ ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) ||
+ (!busy && atomic_read(&sdp->detaching))));
+ if (!srp)
+ /* signal or detaching */
+ return retval ? retval : -ENODEV;
}
if (srp->header.interface_id != '\0')
return sg_new_read(sfp, buf, count, srp);
@@ -940,9 +937,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
- (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
+ srp_done(sfp, srp));
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
@@ -2079,19 +2074,28 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
}
static Sg_request *
-sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy)
{
Sg_request *resp;
unsigned long iflags;
+ *busy = false;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
- /* look for requests that are ready + not SG_IO owned */
- if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ /* look for requests that are not SG_IO owned */
+ if ((!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
- resp->done = 2; /* guard against other readers */
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ switch (resp->done) {
+ case 0: /* request active */
+ *busy = true;
+ break;
+ case 1: /* request done; response ready to return */
+ resp->done = 2; /* guard against other readers */
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
+ case 2: /* response already being returned */
+ break;
+ }
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
@@ -2145,6 +2149,15 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ /*
+ * If the device is detaching, wakeup any readers in case we just
+ * removed the last response, which would leave nothing for them to
+ * return other than -ENODEV.
+ */
+ if (unlikely(atomic_read(&sfp->parentdp->detaching)))
+ wake_up_interruptible_all(&sfp->read_wait);
+
return res;
}
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 6f83e2df4d64..973d240649ab 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,7 +1,7 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 2e40320129c0..e550b12e525a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -293,7 +293,8 @@ struct pqi_raid_path_request {
u8 additional_cdb_bytes_usage : 3;
u8 reserved5 : 3;
u8 cdb[16];
- u8 reserved6[12];
+ u8 reserved6[11];
+ u8 ml_device_lun_number;
__le32 timeout;
struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
};
@@ -467,7 +468,8 @@ struct pqi_task_management_request {
struct pqi_iu_header header;
__le16 request_id;
__le16 nexus_id;
- u8 reserved[2];
+ u8 reserved;
+ u8 ml_device_lun_number;
__le16 timeout;
u8 lun_number[8];
__le16 protocol_specific;
@@ -708,6 +710,7 @@ typedef u32 pqi_index_t;
#define SOP_TMF_COMPLETE 0x0
#define SOP_TMF_REJECTED 0x4
#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
+#define SOP_RC_INCORRECT_LOGICAL_UNIT 0x9
/* additional CDB bytes usage field codes */
#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
@@ -863,7 +866,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN 16
#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17
#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18
-#define PQI_FIRMWARE_FEATURE_MAXIMUM 18
+#define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 21
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -1081,6 +1085,8 @@ struct pqi_stream_data {
u32 last_accessed;
};
+#define PQI_MAX_LUNS_PER_DEVICE 256
+
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */
@@ -1124,6 +1130,7 @@ struct pqi_scsi_dev {
u8 phy_id;
u8 ncq_prio_enable;
u8 ncq_prio_support;
+ u8 multi_lun_device_lun_count;
bool raid_bypass_configured; /* RAID bypass configured */
bool raid_bypass_enabled; /* RAID bypass enabled */
u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
@@ -1139,7 +1146,7 @@ struct pqi_scsi_dev {
struct list_head delete_list_entry;
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
- atomic_t scsi_cmds_outstanding;
+ atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
atomic_t raid_bypass_cnt;
};
@@ -1262,6 +1269,12 @@ struct pqi_event {
#define PQI_CTRL_PRODUCT_REVISION_A 0
#define PQI_CTRL_PRODUCT_REVISION_B 1
+enum pqi_ctrl_removal_state {
+ PQI_CTRL_PRESENT = 0,
+ PQI_CTRL_GRACEFUL_REMOVAL,
+ PQI_CTRL_SURPRISE_REMOVAL
+};
+
struct pqi_ctrl_info {
unsigned int ctrl_id;
struct pci_dev *pci_dev;
@@ -1332,12 +1345,13 @@ struct pqi_ctrl_info {
u8 tmf_iu_timeout_supported : 1;
u8 firmware_triage_supported : 1;
u8 rpl_extended_format_4_5_supported : 1;
+ u8 multi_lun_device_supported : 1;
u8 enable_r1_writes : 1;
u8 enable_r5_writes : 1;
u8 enable_r6_writes : 1;
u8 lv_drive_type_mix_valid : 1;
u8 enable_stream_detection : 1;
-
+ u8 disable_managed_interrupts : 1;
u8 ciss_report_log_flags;
u32 max_transfer_encrypted_sas_sata;
u32 max_transfer_encrypted_nvme;
@@ -1381,6 +1395,7 @@ struct pqi_ctrl_info {
struct work_struct ofa_quiesce_work;
u32 ofa_bytes_requested;
u16 ofa_cancel_reason;
+ enum pqi_ctrl_removal_state ctrl_removal_state;
};
enum pqi_ctrl_mode {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 7c0d069a3158..7a8c2c75acba 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.14-035"
+#define DRIVER_VERSION "2.1.18-045"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 14
-#define DRIVER_REVISION 35
+#define DRIVER_RELEASE 18
+#define DRIVER_REVISION 45
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -94,7 +94,8 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_msecs);
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
+static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -174,6 +175,18 @@ module_param_named(hide_vsep,
pqi_hide_vsep, int, 0644);
MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
+static int pqi_disable_managed_interrupts;
+module_param_named(disable_managed_interrupts,
+ pqi_disable_managed_interrupts, int, 0644);
+MODULE_PARM_DESC(disable_managed_interrupts,
+ "Disable the kernel automatically assigning SMP affinity to IRQs.");
+
+static unsigned int pqi_ctrl_ready_timeout_secs;
+module_param_named(ctrl_ready_timeout,
+ pqi_ctrl_ready_timeout_secs, uint, 0644);
+MODULE_PARM_DESC(ctrl_ready_timeout,
+ "Timeout in seconds for driver to wait for controller ready.");
+
static char *raid_levels[] = {
"RAID-0",
"RAID-4",
@@ -1597,7 +1610,9 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
&id_phys->alternate_paths_phys_connector,
sizeof(device->phys_connector));
device->bay = id_phys->phys_bay_in_box;
-
+ device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
+ if (!device->multi_lun_device_lun_count)
+ device->multi_lun_device_lun_count = 1;
if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
id_phys->phy_count)
device->phy_id =
@@ -1880,15 +1895,18 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{
int rc;
+ int lun;
- rc = pqi_device_wait_for_pending_io(ctrl_info, device,
- PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
- if (rc)
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
- ctrl_info->scsi_host->host_no, device->bus,
- device->target, device->lun,
- atomic_read(&device->scsi_cmds_outstanding));
+ for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
+ PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus,
+ device->target, lun,
+ atomic_read(&device->scsi_cmds_outstanding[lun]));
+ }
if (pqi_is_logical_device(device))
scsi_remove_device(device->sdev);
@@ -2020,6 +2038,23 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
}
+static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
+{
+ u32 raid_map1_size;
+ u32 raid_map2_size;
+
+ if (raid_map1 == NULL || raid_map2 == NULL)
+ return raid_map1 == raid_map2;
+
+ raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
+ raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
+
+ if (raid_map1_size != raid_map2_size)
+ return false;
+
+ return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
+}
+
/* Assumes the SCSI device list lock is held. */
static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
@@ -2033,49 +2068,51 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
existing_device->target_lun_valid = true;
}
- if (pqi_is_logical_device(existing_device) &&
- ctrl_info->logical_volume_rescan_needed)
- existing_device->rescan = true;
-
/* By definition, the scsi3addr and wwid fields are already the same. */
existing_device->is_physical_device = new_device->is_physical_device;
- existing_device->is_external_raid_device =
- new_device->is_external_raid_device;
- existing_device->is_expander_smp_device =
- new_device->is_expander_smp_device;
- existing_device->aio_enabled = new_device->aio_enabled;
- memcpy(existing_device->vendor, new_device->vendor,
- sizeof(existing_device->vendor));
- memcpy(existing_device->model, new_device->model,
- sizeof(existing_device->model));
+ memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
+ memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
existing_device->sas_address = new_device->sas_address;
- existing_device->raid_level = new_device->raid_level;
existing_device->queue_depth = new_device->queue_depth;
- existing_device->aio_handle = new_device->aio_handle;
- existing_device->volume_status = new_device->volume_status;
- existing_device->active_path_index = new_device->active_path_index;
- existing_device->phy_id = new_device->phy_id;
- existing_device->path_map = new_device->path_map;
- existing_device->bay = new_device->bay;
- existing_device->box_index = new_device->box_index;
- existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
- existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
- memcpy(existing_device->box, new_device->box,
- sizeof(existing_device->box));
- memcpy(existing_device->phys_connector, new_device->phys_connector,
- sizeof(existing_device->phys_connector));
- memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
- kfree(existing_device->raid_map);
- existing_device->raid_map = new_device->raid_map;
- existing_device->raid_bypass_configured =
- new_device->raid_bypass_configured;
- existing_device->raid_bypass_enabled =
- new_device->raid_bypass_enabled;
existing_device->device_offline = false;
- /* To prevent this from being freed later. */
- new_device->raid_map = NULL;
+ if (pqi_is_logical_device(existing_device)) {
+ existing_device->is_external_raid_device = new_device->is_external_raid_device;
+
+ if (existing_device->devtype == TYPE_DISK) {
+ existing_device->raid_level = new_device->raid_level;
+ existing_device->volume_status = new_device->volume_status;
+ if (ctrl_info->logical_volume_rescan_needed)
+ existing_device->rescan = true;
+ memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
+ if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
+ kfree(existing_device->raid_map);
+ existing_device->raid_map = new_device->raid_map;
+ /* To prevent this from being freed later. */
+ new_device->raid_map = NULL;
+ }
+ existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
+ existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
+ }
+ } else {
+ existing_device->aio_enabled = new_device->aio_enabled;
+ existing_device->aio_handle = new_device->aio_handle;
+ existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
+ existing_device->active_path_index = new_device->active_path_index;
+ existing_device->phy_id = new_device->phy_id;
+ existing_device->path_map = new_device->path_map;
+ existing_device->bay = new_device->bay;
+ existing_device->box_index = new_device->box_index;
+ existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
+ existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
+ memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
+ memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
+
+ existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
+ if (existing_device->multi_lun_device_lun_count == 0)
+ existing_device->multi_lun_device_lun_count = 1;
+ }
}
static inline void pqi_free_device(struct pqi_scsi_dev *device)
@@ -2505,23 +2542,6 @@ out:
return rc;
}
-static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_scsi_dev *device;
- struct pqi_scsi_dev *next;
-
- list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (pqi_is_device_added(device))
- pqi_remove_device(ctrl_info, device);
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
- list_del(&device->scsi_device_list_entry);
- pqi_free_device(device);
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- }
-}
-
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -3322,6 +3342,9 @@ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_inf
case SOP_TMF_REJECTED:
rc = -EAGAIN;
break;
+ case SOP_RC_INCORRECT_LOGICAL_UNIT:
+ rc = -ENODEV;
+ break;
default:
rc = -EIO;
break;
@@ -3663,6 +3686,20 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
return ack_event;
}
+static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+ if (device->raid_bypass_enabled)
+ device->raid_bypass_enabled = false;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
static void pqi_event_worker(struct work_struct *work)
{
unsigned int i;
@@ -3690,6 +3727,8 @@ static void pqi_event_worker(struct work_struct *work)
rescan_needed = true;
if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
ctrl_info->logical_volume_rescan_needed = true;
+ else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
+ pqi_disable_raid_bypass(ctrl_info);
}
if (ack_event)
pqi_acknowledge_event(ctrl_info, event);
@@ -3697,8 +3736,11 @@ static void pqi_event_worker(struct work_struct *work)
event++;
}
+#define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
+
if (rescan_needed)
- pqi_schedule_rescan_worker_delayed(ctrl_info);
+ pqi_schedule_rescan_worker_with_delay(ctrl_info,
+ PQI_RESCAN_WORK_FOR_EVENT_DELAY);
out:
pqi_ctrl_unbusy(ctrl_info);
@@ -3992,10 +4034,14 @@ static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
int num_vectors_enabled;
+ unsigned int flags = PCI_IRQ_MSIX;
+
+ if (!pqi_disable_managed_interrupts)
+ flags |= PCI_IRQ_AFFINITY;
num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ flags);
if (num_vectors_enabled < 0) {
dev_err(&ctrl_info->pci_dev->dev,
"MSI-X init failed with error %d\n",
@@ -5457,6 +5503,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
+ request->ml_device_lun_number = (u8)scmd->device->lun;
cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
memcpy(request->cdb, scmd->cmnd, cdb_length);
@@ -5484,10 +5531,10 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
}
switch (scmd->sc_data_direction) {
- case DMA_TO_DEVICE:
+ case DMA_FROM_DEVICE:
request->data_direction = SOP_READ_FLAG;
break;
- case DMA_FROM_DEVICE:
+ case DMA_TO_DEVICE:
request->data_direction = SOP_WRITE_FLAG;
break;
case DMA_NONE:
@@ -5621,7 +5668,9 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
int rc;
struct pqi_io_request *io_request;
struct pqi_aio_path_request *request;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
@@ -5637,6 +5686,8 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
if (cdb_length > sizeof(request->cdb))
cdb_length = sizeof(request->cdb);
request->cdb_length = cdb_length;
@@ -5846,7 +5897,7 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
return;
}
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
}
static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
@@ -5941,7 +5992,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
return 0;
}
- atomic_inc(&device->scsi_cmds_outstanding);
+ atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
ctrl_info = shost_to_hba(shost);
@@ -5987,7 +6038,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
out:
if (rc)
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
return rc;
}
@@ -6127,7 +6178,7 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_msecs)
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
{
int cmds_outstanding;
unsigned long start_jiffies;
@@ -6137,23 +6188,25 @@ static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
start_jiffies = jiffies;
warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
- while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
+ while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
+ if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ }
msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
if (msecs_waiting >= timeout_msecs) {
dev_err(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus, device->target,
- device->lun, msecs_waiting / 1000, cmds_outstanding);
+ lun, msecs_waiting / 1000, cmds_outstanding);
return -ETIMEDOUT;
}
if (time_after(jiffies, warning_timeout)) {
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus, device->target,
- device->lun, msecs_waiting / 1000, cmds_outstanding);
+ lun, msecs_waiting / 1000, cmds_outstanding);
warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
@@ -6173,7 +6226,7 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct completion *wait)
+ struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
{
int rc;
unsigned int wait_secs;
@@ -6195,10 +6248,10 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
}
wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
- cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding);
+ cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
- ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding);
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
}
return rc;
@@ -6206,13 +6259,15 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int rc;
struct pqi_io_request *io_request;
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -6226,6 +6281,8 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
put_unaligned_le16(io_request->index, &request->request_id);
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ request->ml_device_lun_number = (u8)scmd->device->lun;
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
if (ctrl_info->tmf_iu_timeout_supported)
put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
@@ -6233,7 +6290,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
+ rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
if (rc == 0)
rc = io_request->status;
@@ -6247,16 +6304,18 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
-static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int reset_rc;
int wait_rc;
unsigned int retries;
unsigned long timeout_msecs;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
for (retries = 0;;) {
- reset_rc = pqi_lun_reset(ctrl_info, device);
- if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
+ reset_rc = pqi_lun_reset(ctrl_info, scmd);
+ if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -6264,18 +6323,19 @@ static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pq
timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
- wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
+ wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
if (wait_rc && reset_rc == 0)
reset_rc = wait_rc;
return reset_rc == 0 ? SUCCESS : FAILED;
}
-static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int rc;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_io_queued_for_device(ctrl_info, device);
@@ -6283,7 +6343,7 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
if (rc)
rc = FAILED;
else
- rc = pqi_lun_reset_with_retries(ctrl_info, device);
+ rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
pqi_ctrl_unblock_requests(ctrl_info);
return rc;
@@ -6305,18 +6365,18 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
shost->host_no,
- device->bus, device->target, device->lun,
+ device->bus, device->target, (u32)scmd->device->lun,
scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
rc = FAILED;
else
- rc = pqi_device_reset(ctrl_info, device);
+ rc = pqi_device_reset(ctrl_info, scmd);
dev_err(&ctrl_info->pci_dev->dev,
"reset of scsi %d:%d:%d:%d: %s\n",
- shost->host_no, device->bus, device->target, device->lun,
+ shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&ctrl_info->lun_reset_mutex);
@@ -6405,6 +6465,35 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return rc;
}
+static void pqi_slave_destroy(struct scsi_device *sdev)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ int mutex_acquired;
+ unsigned long flags;
+
+ ctrl_info = shost_to_hba(sdev->host);
+
+ mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+ if (!mutex_acquired)
+ return;
+
+ device = sdev->hostdata;
+ if (!device) {
+ mutex_unlock(&ctrl_info->scan_mutex);
+ return;
+ }
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_del(&device->scsi_device_list_entry);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ mutex_unlock(&ctrl_info->scan_mutex);
+
+ pqi_dev_info(ctrl_info, "removed", device);
+ pqi_free_device(device);
+}
+
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
{
struct pci_dev *pci_dev;
@@ -6919,6 +7008,9 @@ static ssize_t pqi_unique_id_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -6955,6 +7047,9 @@ static ssize_t pqi_lunid_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -6990,6 +7085,9 @@ static ssize_t pqi_path_info_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7067,6 +7165,9 @@ static ssize_t pqi_sas_address_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7093,6 +7194,9 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7122,6 +7226,9 @@ static ssize_t pqi_raid_level_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7152,6 +7259,9 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7179,6 +7289,9 @@ static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7268,6 +7381,7 @@ static struct scsi_host_template pqi_driver_template = {
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
+ .slave_destroy = pqi_slave_destroy,
.map_queues = pqi_map_queues,
.sdev_groups = pqi_sdev_groups,
.shost_groups = pqi_shost_groups,
@@ -7290,6 +7404,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->this_id = -1;
shost->max_channel = PQI_MAX_BUS;
shost->max_cmd_len = MAX_COMMAND_SIZE;
+ shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
shost->max_lun = ~0;
shost->max_id = ~0;
shost->max_sectors = ctrl_info->max_sectors;
@@ -7358,8 +7473,7 @@ static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
reset_reg.all_bits = readl(&pqi_registers->device_reset);
if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
+ if (!sis_is_firmware_running(ctrl_info)) {
rc = -ENXIO;
break;
}
@@ -7463,6 +7577,9 @@ static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
sizeof(identify->vendor_id));
ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
+ dev_info(&ctrl_info->pci_dev->dev,
+ "Firmware version: %s\n", ctrl_info->firmware_version);
+
out:
kfree(identify);
@@ -7634,6 +7751,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
break;
+ case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
+ ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
+ break;
}
pqi_firmware_feature_status(ctrl_info, firmware_feature);
@@ -7734,6 +7854,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
.feature_status = pqi_ctrl_update_feature_flags,
},
+ {
+ .feature_name = "Multi-LUN Target",
+ .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -7835,6 +7960,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
ctrl_info->tmf_iu_timeout_supported = false;
ctrl_info->firmware_triage_supported = false;
ctrl_info->rpl_extended_format_4_5_supported = false;
+ ctrl_info->multi_lun_device_supported = false;
}
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@@ -8491,6 +8617,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
ctrl_info->max_write_raid_1_10_2drive = ~0;
ctrl_info->max_write_raid_1_10_3drive = ~0;
+ ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
return ctrl_info;
}
@@ -8508,7 +8635,6 @@ static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
{
- pqi_stop_heartbeat_timer(ctrl_info);
pqi_free_interrupts(ctrl_info);
if (ctrl_info->queue_memory_base)
dma_free_coherent(&ctrl_info->pci_dev->dev,
@@ -8533,9 +8659,15 @@ static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
+ ctrl_info->controller_online = false;
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
pqi_cancel_rescan_worker(ctrl_info);
pqi_cancel_update_time_worker(ctrl_info);
- pqi_remove_all_scsi_devices(ctrl_info);
+ if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
+ pqi_fail_all_outstanding_requests(ctrl_info);
+ ctrl_info->pqi_mode_enabled = false;
+ }
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -8875,11 +9007,18 @@ error:
static void pqi_pci_remove(struct pci_dev *pci_dev)
{
struct pqi_ctrl_info *ctrl_info;
+ u16 vendor_id;
ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
return;
+ pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
+ if (vendor_id == 0xffff)
+ ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
+ else
+ ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
+
pqi_remove_ctrl(ctrl_info);
}
@@ -8956,9 +9095,31 @@ static void pqi_process_lockup_action_param(void)
DRIVER_NAME_SHORT, pqi_lockup_action_param);
}
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
+
+static void pqi_process_ctrl_ready_timeout_param(void)
+{
+ if (pqi_ctrl_ready_timeout_secs == 0)
+ return;
+
+ if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
+ } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
+ }
+
+ sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
+}
+
static void pqi_process_module_params(void)
{
pqi_process_lockup_action_param();
+ pqi_process_ctrl_ready_timeout_param();
}
#if defined(CONFIG_PM)
@@ -9275,6 +9436,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0659)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0800)
},
{
@@ -9739,6 +9904,46 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0101)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0201)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0220)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0221)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0520)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0522)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0620)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0621)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0622)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0623)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index dea4ebaf1677..13e8c539010e 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index afc27adf68e9..5811fb3c22a9 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -86,6 +86,8 @@ struct sis_base_struct {
#pragma pack()
+unsigned int sis_ctrl_ready_timeout_secs = SIS_CTRL_READY_TIMEOUT_SECS;
+
static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
unsigned int timeout_secs)
{
@@ -122,7 +124,7 @@ static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
{
return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
- SIS_CTRL_READY_TIMEOUT_SECS);
+ sis_ctrl_ready_timeout_secs);
}
int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info)
@@ -138,7 +140,7 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
status = readl(&ctrl_info->registers->sis_firmware_status);
- if (status & SIS_CTRL_KERNEL_PANIC)
+ if (status != ~0 && (status & SIS_CTRL_KERNEL_PANIC))
running = false;
else
running = true;
@@ -194,6 +196,7 @@ static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
/* Disable doorbell interrupts by masking all interrupts. */
writel(~0, &registers->sis_interrupt_mask);
+ usleep_range(1000, 2000);
/*
* Force the completion of the interrupt mask register write before
@@ -383,6 +386,7 @@ static int sis_wait_for_doorbell_bit_to_clear(
static inline int sis_set_doorbell_bit(struct pqi_ctrl_info *ctrl_info, u32 bit)
{
writel(bit, &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ usleep_range(1000, 2000);
return sis_wait_for_doorbell_bit_to_clear(ctrl_info, bit);
}
@@ -423,6 +427,7 @@ int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value)
{
writel(value, &ctrl_info->registers->sis_driver_scratch);
+ usleep_range(1000, 2000);
}
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 5f3575261a8e..9dcbae96a5c6 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -32,4 +32,6 @@ void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info);
+extern unsigned int sis_ctrl_ready_timeout_secs;
+
#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
index e6b3e8b431c0..2550ba964b03 100644
--- a/drivers/scsi/snic/snic_fwint.h
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -145,7 +145,7 @@ struct snic_exch_ver_req {
* HBA Capabilities
* Bit 1: Reserved.
* Bit 2: Dynamic Discovery of LUNs.
- * Bit 3: Async event notifications on on tgt online/offline events.
+ * Bit 3: Async event notifications on tgt online/offline events.
* Bit 4: IO timeout support in FW.
* Bit 5-31: Reserved.
*/
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index fe000da11332..8ced292c4b96 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -2012,7 +2012,7 @@ static int storvsc_probe(struct hv_device *device,
*/
host_dev->handle_error_wq =
alloc_ordered_workqueue("storvsc_error_wq_%d",
- WQ_MEM_RECLAIM,
+ 0,
host->host_no);
if (!host_dev->handle_error_wq) {
ret = -ENOMEM;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 255a2d48d421..f0db17e34ea0 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3598,7 +3598,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num)
}
/*
- * Gerard's alchemy:) that deals with with the data
+ * Gerard's alchemy:) that deals with the data
* pointer for both MDP and the residual calculation.
*
* I didn't want to bloat the code by more than 200
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 28a8c0dda66c..a0ceeede450f 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
+#define __DISABLE_TRACE_MMIO__
+
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/slab.h>
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index 32c346b72635..dff6d5ef4e46 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -107,30 +107,47 @@ int devm_tegra_core_dev_init_opp_table(struct device *dev,
{
u32 hw_version;
int err;
-
- err = devm_pm_opp_set_clkname(dev, NULL);
- if (err) {
- dev_err(dev, "failed to set OPP clk: %d\n", err);
- return err;
- }
-
- /* Tegra114+ doesn't support OPP yet */
- if (!of_machine_is_compatible("nvidia,tegra20") &&
- !of_machine_is_compatible("nvidia,tegra30"))
- return -ENODEV;
-
- if (of_machine_is_compatible("nvidia,tegra20"))
+ /*
+ * The clk's connection id to set is NULL and this is a NULL terminated
+ * array, hence two NULL entries.
+ */
+ const char *clk_names[] = { NULL, NULL };
+ struct dev_pm_opp_config config = {
+ /*
+ * For some devices we don't have any OPP table in the DT, and
+ * in order to use the same code path for all the devices, we
+ * create a dummy OPP table for them via this. The dummy OPP
+ * table is only capable of doing clk_set_rate() on invocation
+ * of dev_pm_opp_set_rate() and doesn't provide any other
+ * functionality.
+ */
+ .clk_names = clk_names,
+ };
+
+ if (of_machine_is_compatible("nvidia,tegra20")) {
hw_version = BIT(tegra_sku_info.soc_process_id);
- else
+ config.supported_hw = &hw_version;
+ config.supported_hw_count = 1;
+ } else if (of_machine_is_compatible("nvidia,tegra30")) {
hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ config.supported_hw = &hw_version;
+ config.supported_hw_count = 1;
+ }
- err = devm_pm_opp_set_supported_hw(dev, &hw_version, 1);
+ err = devm_pm_opp_set_config(dev, &config);
if (err) {
- dev_err(dev, "failed to set OPP supported HW: %d\n", err);
+ dev_err(dev, "failed to set OPP config: %d\n", err);
return err;
}
/*
+ * Tegra114+ doesn't support OPP yet, return early for non tegra20/30
+ * case.
+ */
+ if (!config.supported_hw)
+ return -ENODEV;
+
+ /*
* Older device-trees have an empty OPP table, we will get
* -ENODEV from devm_pm_opp_of_add_table() in this case.
*/
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 5611d14d3ba2..6a4b8f7e7948 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -1384,7 +1384,7 @@ tegra_pmc_core_pd_opp_to_performance_state(struct generic_pm_domain *genpd,
static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
{
struct generic_pm_domain *genpd;
- const char *rname = "core";
+ const char *rname[] = { "core", NULL};
int err;
genpd = devm_kzalloc(pmc->dev, sizeof(*genpd), GFP_KERNEL);
@@ -1395,7 +1395,7 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state;
genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state;
- err = devm_pm_opp_set_regulators(pmc->dev, &rname, 1);
+ err = devm_pm_opp_set_regulators(pmc->dev, rname);
if (err)
return dev_err_probe(pmc->dev, err,
"failed to set core OPP regulator\n");
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 95ce292994cc..89d1d0d021fc 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -1004,9 +1004,18 @@ static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct sn
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_cdns_dma_data *dma;
int ret = 0;
+ /*
+ * The .trigger callback is used to send required IPC to audio
+ * firmware. The .free_stream callback will still be called
+ * by intel_free_stream() in the TRIGGER_SUSPEND case.
+ */
+ if (res->ops && res->ops->trigger)
+ res->ops->trigger(dai, cmd, substream->stream);
+
dma = snd_soc_dai_get_dma_data(dai, substream);
if (!dma) {
dev_err(dai->dev, "failed to get dma data in %s\n",
@@ -1114,9 +1123,10 @@ static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
};
static const struct snd_soc_component_driver dai_component = {
- .name = "soundwire",
- .probe = intel_component_probe,
- .suspend = intel_component_dais_suspend
+ .name = "soundwire",
+ .probe = intel_component_probe,
+ .suspend = intel_component_dais_suspend,
+ .legacy_dai_naming = 1,
};
static int intel_create_dai(struct sdw_cdns *cdns,
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 0bc7daa7afc8..e4cb52e1fe26 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -156,6 +156,7 @@ struct meson_spicc_device {
void __iomem *base;
struct clk *core;
struct clk *pclk;
+ struct clk_divider pow2_div;
struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
@@ -168,6 +169,8 @@ struct meson_spicc_device {
unsigned long xfer_remain;
};
+#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
+
static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
{
u32 conf;
@@ -421,7 +424,7 @@ static int meson_spicc_prepare_message(struct spi_master *master,
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
struct spi_device *spi = message->spi;
- u32 conf = 0;
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Store current message */
spicc->message = message;
@@ -458,8 +461,6 @@ static int meson_spicc_prepare_message(struct spi_master *master,
/* Select CS */
conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
- /* Default Clock rate core/4 */
-
/* Default 8bit word */
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
@@ -476,12 +477,16 @@ static int meson_spicc_prepare_message(struct spi_master *master,
static int meson_spicc_unprepare_transfer(struct spi_master *master)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+ u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
device_reset_optional(&spicc->pdev->dev);
+ /* Set default configuration, keeping datarate field */
+ writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
return 0;
}
@@ -518,14 +523,60 @@ static void meson_spicc_cleanup(struct spi_device *spi)
* Clk path for G12A series:
* pclk -> pow2 fixed div -> pow2 div -> mux -> out
* pclk -> enh fixed div -> enh div -> mux -> out
+ *
+ * The pow2 divider is tied to the controller HW state, and the
+ * divider is only valid when the controller is initialized.
+ *
+ * A set of clock ops is added to make sure we don't read/set this
+ * clock rate while the controller is in an unknown state.
*/
-static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
+static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return 0;
+
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return -EINVAL;
+
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
+
+ if (!spicc->master->cur_msg || !spicc->master->busy)
+ return -EINVAL;
+
+ return clk_divider_ops.set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops meson_spicc_pow2_clk_ops = {
+ .recalc_rate = meson_spicc_pow2_recalc_rate,
+ .determine_rate = meson_spicc_pow2_determine_rate,
+ .set_rate = meson_spicc_pow2_set_rate,
+};
+
+static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
{
struct device *dev = &spicc->pdev->dev;
- struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
- struct clk_divider *pow2_div, *enh_div;
- struct clk_mux *mux;
+ struct clk_fixed_factor *pow2_fixed_div;
struct clk_init_data init;
struct clk *clk;
struct clk_parent_data parent_data[2];
@@ -560,31 +611,45 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
if (WARN_ON(IS_ERR(clk)))
return PTR_ERR(clk);
- pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
- if (!pow2_div)
- return -ENOMEM;
-
snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
init.name = name;
- init.ops = &clk_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
+ init.ops = &meson_spicc_pow2_clk_ops;
+ /*
+ * Set NOCACHE here to make sure we read the actual HW value
+ * since we reset the HW after each transfer.
+ */
+ init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
parent_data[0].hw = &pow2_fixed_div->hw;
init.num_parents = 1;
- pow2_div->shift = 16,
- pow2_div->width = 3,
- pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
- pow2_div->reg = spicc->base + SPICC_CONREG;
- pow2_div->hw.init = &init;
+ spicc->pow2_div.shift = 16,
+ spicc->pow2_div.width = 3,
+ spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
+ spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
+ spicc->pow2_div.hw.init = &init;
- clk = devm_clk_register(dev, &pow2_div->hw);
- if (WARN_ON(IS_ERR(clk)))
- return PTR_ERR(clk);
+ spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
- if (!spicc->data->has_enhance_clk_div) {
- spicc->clk = clk;
- return 0;
- }
+ return 0;
+}
+
+static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *enh_fixed_div;
+ struct clk_divider *enh_div;
+ struct clk_mux *mux;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
/* algorithm for enh div: rate = freq / 2 / (N + 1) */
@@ -637,7 +702,7 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
init.name = name;
init.ops = &clk_mux_ops;
- parent_data[0].hw = &pow2_div->hw;
+ parent_data[0].hw = &spicc->pow2_div.hw;
parent_data[1].hw = &enh_div->hw;
init.num_parents = 2;
init.flags = CLK_SET_RATE_PARENT;
@@ -754,12 +819,20 @@ static int meson_spicc_probe(struct platform_device *pdev)
meson_spicc_oen_enable(spicc);
- ret = meson_spicc_clk_init(spicc);
+ ret = meson_spicc_pow2_clk_init(spicc);
if (ret) {
- dev_err(&pdev->dev, "clock registration failed\n");
+ dev_err(&pdev->dev, "pow2 clock registration failed\n");
goto out_clk;
}
+ if (spicc->data->has_enhance_clk_div) {
+ ret = meson_spicc_enh_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "clock registration failed\n");
+ goto out_clk;
+ }
+ }
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "spi master registration failed\n");
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 3ebdce804b90..bc5e36fd4288 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -437,7 +437,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
ms->state = mpc52xx_spi_fsmstate_idle;
- ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ ms->ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
if (ms->gpio_cs_count > 0) {
master->num_chipselect = ms->gpio_cs_count;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8f97a3eacdea..83da8862b8f2 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -95,7 +95,7 @@ static ssize_t driver_override_show(struct device *dev,
}
static DEVICE_ATTR_RW(driver_override);
-static struct spi_statistics *spi_alloc_pcpu_stats(struct device *dev)
+static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
{
struct spi_statistics __percpu *pcpu_stats;
@@ -162,7 +162,7 @@ static struct device_attribute dev_attr_spi_device_##field = { \
}
#define SPI_STATISTICS_SHOW_NAME(name, file, field) \
-static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
+static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
char *buf) \
{ \
ssize_t len; \
@@ -309,7 +309,7 @@ static const struct attribute_group *spi_master_groups[] = {
NULL,
};
-static void spi_statistics_add_transfer_stats(struct spi_statistics *pcpu_stats,
+static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
struct spi_transfer *xfer,
struct spi_controller *ctlr)
{
@@ -1275,8 +1275,8 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer *xfer)
{
- struct spi_statistics *statm = ctlr->pcpu_statistics;
- struct spi_statistics *stats = msg->spi->pcpu_statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
@@ -1432,8 +1432,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- struct spi_statistics *statm = ctlr->pcpu_statistics;
- struct spi_statistics *stats = msg->spi->pcpu_statistics;
+ struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
+ struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
spi_set_cs(msg->spi, true, false);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index e368f038ff5c..baf4da7bb3b4 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1004,8 +1004,10 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
int data_direction, payload_length;
+ struct iscsi_ecdb_ahdr *ecdb_ahdr;
struct iscsi_scsi_req *hdr;
int iscsi_task_attr;
+ unsigned char *cdb;
int sam_task_attr;
atomic_long_inc(&conn->sess->cmd_pdus);
@@ -1106,6 +1108,27 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
+ cdb = hdr->cdb;
+
+ if (hdr->hlength) {
+ ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1);
+ if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) {
+ pr_err("Additional Header Segment type %d not supported!\n",
+ ecdb_ahdr->ahstype);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
+ }
+
+ cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15,
+ GFP_KERNEL);
+ if (cdb == NULL)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
+ memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb,
+ be16_to_cpu(ecdb_ahdr->ahslength) - 1);
+ }
+
data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
(hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
DMA_NONE;
@@ -1153,9 +1176,12 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr;
dr = iscsit_allocate_datain_req();
- if (!dr)
+ if (!dr) {
+ if (cdb != hdr->cdb)
+ kfree(cdb);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ }
iscsit_attach_datain_req(cmd, dr);
}
@@ -1176,9 +1202,12 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
target_get_sess_cmd(&cmd->se_cmd, true);
cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
- cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb,
+ cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
GFP_KERNEL);
+ if (cdb != hdr->cdb)
+ kfree(cdb);
+
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
return iscsit_add_reject_cmd(cmd,
@@ -4036,8 +4065,9 @@ static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
{
int ret;
- u8 *buffer, opcode;
+ u8 *buffer, *tmp_buf, opcode;
u32 checksum = 0, digest = 0;
+ struct iscsi_hdr *hdr;
struct kvec iov;
buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
@@ -4062,6 +4092,25 @@ static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
break;
}
+ hdr = (struct iscsi_hdr *) buffer;
+ if (hdr->hlength) {
+ iov.iov_len = hdr->hlength * 4;
+ tmp_buf = krealloc(buffer,
+ ISCSI_HDR_LEN + iov.iov_len,
+ GFP_KERNEL);
+ if (!tmp_buf)
+ break;
+
+ buffer = tmp_buf;
+ iov.iov_base = &buffer[ISCSI_HDR_LEN];
+
+ ret = rx_data(conn, &iov, 1, iov.iov_len);
+ if (ret != iov.iov_len) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ break;
+ }
+ }
+
if (conn->conn_ops->HeaderDigest) {
iov.iov_base = &digest;
iov.iov_len = ISCSI_CRC_LEN;
@@ -4361,7 +4410,7 @@ int iscsit_close_connection(
spin_lock_bh(&sess->conn_lock);
atomic_dec(&sess->nconn);
- pr_debug("Decremented iSCSI connection count to %hu from node:"
+ pr_debug("Decremented iSCSI connection count to %d from node:"
" %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
/*
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 6e5611d8f51b..c8a248bd11be 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -205,6 +205,38 @@ static struct iscsi_chap *chap_server_open(
return chap;
}
+static const char base64_lookup_table[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+static int chap_base64_decode(u8 *dst, const char *src, size_t len)
+{
+ int i, bits = 0, ac = 0;
+ const char *p;
+ u8 *cp = dst;
+
+ for (i = 0; i < len; i++) {
+ if (src[i] == '=')
+ return cp - dst;
+
+ p = strchr(base64_lookup_table, src[i]);
+ if (p == NULL || src[i] == 0)
+ return -2;
+
+ ac <<= 6;
+ ac += (p - base64_lookup_table);
+ bits += 6;
+ if (bits >= 8) {
+ *cp++ = (ac >> (bits - 8)) & 0xff;
+ ac &= ~(BIT(16) - BIT(bits - 8));
+ bits -= 8;
+ }
+ }
+ if (ac)
+ return -1;
+
+ return cp - dst;
+}
+
static int chap_server_compute_hash(
struct iscsit_conn *conn,
struct iscsi_node_auth *auth,
@@ -295,16 +327,27 @@ static int chap_server_compute_hash(
pr_err("Could not find CHAP_R.\n");
goto out;
}
- if (type != HEX) {
- pr_err("Could not find CHAP_R.\n");
- goto out;
- }
- if (strlen(chap_r) != chap->digest_size * 2) {
- pr_err("Malformed CHAP_R\n");
- goto out;
- }
- if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
- pr_err("Malformed CHAP_R\n");
+
+ switch (type) {
+ case HEX:
+ if (strlen(chap_r) != chap->digest_size * 2) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
+ if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
+ pr_err("Malformed CHAP_R: invalid HEX\n");
+ goto out;
+ }
+ break;
+ case BASE64:
+ if (chap_base64_decode(client_digest, chap_r, strlen(chap_r)) !=
+ chap->digest_size) {
+ pr_err("Malformed CHAP_R: invalid BASE64\n");
+ goto out;
+ }
+ break;
+ default:
+ pr_err("Could not find CHAP_R\n");
goto out;
}
@@ -373,7 +416,13 @@ static int chap_server_compute_hash(
/*
* Get CHAP_I.
*/
- if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
+ ret = extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type);
+ if (ret == -ENOENT) {
+ pr_debug("Could not find CHAP_I. Initiator uses One way authentication.\n");
+ auth_ret = 0;
+ goto out;
+ }
+ if (ret < 0) {
pr_err("Could not find CHAP_I.\n");
goto out;
}
@@ -404,23 +453,46 @@ static int chap_server_compute_hash(
goto out;
}
- if (type != HEX) {
+ switch (type) {
+ case HEX:
+ initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
+ if (!initiatorchg_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ if (initiatorchg_len > 1024) {
+ pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ goto out;
+ }
+
+ if (hex2bin(initiatorchg_binhex, initiatorchg,
+ initiatorchg_len) < 0) {
+ pr_err("Malformed CHAP_C: invalid HEX\n");
+ goto out;
+ }
+ break;
+ case BASE64:
+ initiatorchg_len = chap_base64_decode(initiatorchg_binhex,
+ initiatorchg,
+ strlen(initiatorchg));
+ if (initiatorchg_len < 0) {
+ pr_err("Malformed CHAP_C: invalid BASE64\n");
+ goto out;
+ }
+ if (!initiatorchg_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ if (initiatorchg_len > 1024) {
+ pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ goto out;
+ }
+ break;
+ default:
pr_err("Could not find CHAP_C.\n");
goto out;
}
- initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
- if (!initiatorchg_len) {
- pr_err("Unable to convert incoming challenge\n");
- goto out;
- }
- if (initiatorchg_len > 1024) {
- pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
- goto out;
- }
- if (hex2bin(initiatorchg_binhex, initiatorchg, initiatorchg_len) < 0) {
- pr_err("Malformed CHAP_C\n");
- goto out;
- }
+
pr_debug("[server] Got CHAP_C=%s\n", initiatorchg);
/*
* During mutual authentication, the CHAP_C generated by the
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index ce14540ba650..5d0f51822414 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -210,7 +210,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
return ERR_PTR(ret);
}
- tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ tpg = to_iscsi_tpg(se_tpg);
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return ERR_PTR(-EINVAL);
@@ -281,9 +281,7 @@ static ssize_t iscsi_nacl_attrib_##name##_show(struct config_item *item,\
char *page) \
{ \
struct se_node_acl *se_nacl = attrib_to_nacl(item); \
- struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
- se_node_acl); \
- \
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
return sprintf(page, "%u\n", nacl->node_attrib.name); \
} \
\
@@ -291,8 +289,7 @@ static ssize_t iscsi_nacl_attrib_##name##_store(struct config_item *item,\
const char *page, size_t count) \
{ \
struct se_node_acl *se_nacl = attrib_to_nacl(item); \
- struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
- se_node_acl); \
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
u32 val; \
int ret; \
\
@@ -317,6 +314,36 @@ ISCSI_NACL_ATTR(random_datain_pdu_offsets);
ISCSI_NACL_ATTR(random_datain_seq_offsets);
ISCSI_NACL_ATTR(random_r2t_offsets);
+static ssize_t iscsi_nacl_attrib_authentication_show(struct config_item *item,
+ char *page)
+{
+ struct se_node_acl *se_nacl = attrib_to_nacl(item);
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
+
+ return sprintf(page, "%d\n", nacl->node_attrib.authentication);
+}
+
+static ssize_t iscsi_nacl_attrib_authentication_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_node_acl *se_nacl = attrib_to_nacl(item);
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
+ s32 val;
+ int ret;
+
+ ret = kstrtos32(page, 0, &val);
+ if (ret)
+ return ret;
+ if (val != 0 && val != 1 && val != NA_AUTHENTICATION_INHERITED)
+ return -EINVAL;
+
+ nacl->node_attrib.authentication = val;
+
+ return count;
+}
+
+CONFIGFS_ATTR(iscsi_nacl_attrib_, authentication);
+
static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
&iscsi_nacl_attrib_attr_dataout_timeout,
&iscsi_nacl_attrib_attr_dataout_timeout_retries,
@@ -326,6 +353,7 @@ static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
&iscsi_nacl_attrib_attr_random_datain_pdu_offsets,
&iscsi_nacl_attrib_attr_random_datain_seq_offsets,
&iscsi_nacl_attrib_attr_random_r2t_offsets,
+ &iscsi_nacl_attrib_attr_authentication,
NULL,
};
@@ -377,15 +405,14 @@ static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
- return __iscsi_nacl_auth_##name##_show(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page); \
+ return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \
} \
static ssize_t iscsi_nacl_auth_##name##_store(struct config_item *item, \
const char *page, size_t count) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
- return __iscsi_nacl_auth_##name##_store(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page, count); \
+ return __iscsi_nacl_auth_##name##_store(to_iscsi_nacl(nacl), \
+ page, count); \
} \
\
CONFIGFS_ATTR(iscsi_nacl_auth_, name)
@@ -417,8 +444,7 @@ static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_node_acl *nacl = auth_to_nacl(item); \
- return __iscsi_nacl_auth_##name##_show(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page); \
+ return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \
} \
\
CONFIGFS_ATTR_RO(iscsi_nacl_auth_, name)
@@ -623,8 +649,7 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
{
struct se_node_acl *se_nacl = acl_to_nacl(item);
struct se_portal_group *se_tpg = se_nacl->se_tpg;
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
struct config_item *acl_ci, *tpg_ci, *wwn_ci;
u32 cmdsn_depth = 0;
int ret;
@@ -700,8 +725,7 @@ static struct configfs_attribute *lio_target_initiator_attrs[] = {
static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
const char *name)
{
- struct iscsi_node_acl *acl =
- container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl);
config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
@@ -720,8 +744,7 @@ static ssize_t iscsi_tpg_attrib_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_portal_group *se_tpg = attrib_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
ssize_t rb; \
\
if (iscsit_get_tpg(tpg) < 0) \
@@ -736,8 +759,7 @@ static ssize_t iscsi_tpg_attrib_##name##_store(struct config_item *item,\
const char *page, size_t count) \
{ \
struct se_portal_group *se_tpg = attrib_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
u32 val; \
int ret; \
\
@@ -800,8 +822,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \
char *page) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -813,8 +834,7 @@ static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg,
static ssize_t __iscsi_##prefix##_##name##_store(struct se_portal_group *se_tpg,\
const char *page, size_t count) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -861,8 +881,7 @@ DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \
char *page) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -900,8 +919,7 @@ static ssize_t iscsi_tpg_param_##name##_show(struct config_item *item, \
char *page) \
{ \
struct se_portal_group *se_tpg = param_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_param *param; \
ssize_t rb; \
\
@@ -923,8 +941,7 @@ static ssize_t iscsi_tpg_param_##name##_store(struct config_item *item, \
const char *page, size_t count) \
{ \
struct se_portal_group *se_tpg = param_to_tpg(item); \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
char *buf; \
int ret, len; \
\
@@ -1073,8 +1090,7 @@ free_out:
static int lio_target_tiqn_enabletpg(struct se_portal_group *se_tpg,
bool enable)
{
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
int ret;
ret = iscsit_get_tpg(tpg);
@@ -1106,7 +1122,7 @@ static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
- tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ tpg = to_iscsi_tpg(se_tpg);
tiqn = tpg->tpg_tiqn;
/*
* iscsit_tpg_del_portal_group() assumes force=1
@@ -1416,46 +1432,41 @@ static void lio_aborted_task(struct se_cmd *se_cmd)
cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
}
-static inline struct iscsi_portal_group *iscsi_tpg(struct se_portal_group *se_tpg)
-{
- return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
-}
-
static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
+ return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
}
static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpgt;
+ return to_iscsi_tpg(se_tpg)->tpgt;
}
static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
}
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
}
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
}
static int lio_tpg_check_demo_mode_write_protect(
struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
}
static int lio_tpg_check_prod_mode_write_protect(
struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
}
static int lio_tpg_check_prot_fabric_only(
@@ -1465,9 +1476,9 @@ static int lio_tpg_check_prot_fabric_only(
* Only report fabric_prot_type if t10_pi has also been enabled
* for incoming ib_isert sessions.
*/
- if (!iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
+ if (!to_iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
return 0;
- return iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
}
/*
@@ -1504,16 +1515,14 @@ static void lio_tpg_close_session(struct se_session *se_sess)
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
- return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
+ return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
}
static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
{
- struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
- se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_acl);
struct se_portal_group *se_tpg = se_acl->se_tpg;
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
acl->node_attrib.nacl = acl;
iscsit_set_default_node_attribues(acl, tpg);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 6b94eecc4790..27e448c2d066 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -341,6 +341,7 @@ static int iscsi_login_zero_tsih_s2(
{
struct iscsi_node_attrib *na;
struct iscsit_session *sess = conn->sess;
+ struct iscsi_param *param;
bool iser = false;
sess->tpg = conn->tpg;
@@ -375,6 +376,18 @@ static int iscsi_login_zero_tsih_s2(
na = iscsit_tpg_get_node_attrib(sess);
/*
+ * If ACL allows non-authorized access in TPG with CHAP,
+ * then set None to AuthMethod.
+ */
+ param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+ if (param && !strstr(param->value, NONE)) {
+ if (!iscsi_conn_auth_required(conn))
+ if (iscsi_change_param_sprintf(conn, "AuthMethod=%s",
+ NONE))
+ return -1;
+ }
+
+ /*
* Need to send TargetPortalGroupTag back in first login response
* on any iSCSI connection where the Initiator provides TargetName.
* See 5.3.1. Login Phase Start
@@ -715,7 +728,7 @@ void iscsi_post_login_handler(
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
- pr_debug("Incremented iSCSI Connection count to %hu"
+ pr_debug("Incremented iSCSI Connection count to %d"
" from node: %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
@@ -763,7 +776,7 @@ void iscsi_post_login_handler(
spin_lock_bh(&sess->conn_lock);
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
- pr_debug("Incremented iSCSI Connection count to %hu from node:"
+ pr_debug("Incremented iSCSI Connection count to %d from node:"
" %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index b34ac9ecac31..f2919319ad38 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -62,31 +62,34 @@ int extract_param(
int len;
if (!in_buf || !pattern || !out_buf || !type)
- return -1;
+ return -EINVAL;
ptr = strstr(in_buf, pattern);
if (!ptr)
- return -1;
+ return -ENOENT;
ptr = strstr(ptr, "=");
if (!ptr)
- return -1;
+ return -EINVAL;
ptr += 1;
if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
ptr += 2; /* skip 0x */
*type = HEX;
+ } else if (*ptr == '0' && (*(ptr+1) == 'b' || *(ptr+1) == 'B')) {
+ ptr += 2; /* skip 0b */
+ *type = BASE64;
} else
*type = DECIMAL;
len = strlen_semi(ptr);
if (len < 0)
- return -1;
+ return -EINVAL;
if (len >= max_length) {
pr_err("Length of input: %d exceeds max_length:"
" %d\n", len, max_length);
- return -1;
+ return -EINVAL;
}
memcpy(out_buf, ptr, len);
out_buf[len] = '\0';
@@ -94,6 +97,31 @@ int extract_param(
return 0;
}
+static struct iscsi_node_auth *iscsi_get_node_auth(struct iscsit_conn *conn)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_node_acl *nacl;
+ struct se_node_acl *se_nacl;
+
+ if (conn->sess->sess_ops->SessionType)
+ return &iscsit_global->discovery_acl.node_auth;
+
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_err("Unable to locate struct se_node_acl for CHAP auth\n");
+ return NULL;
+ }
+
+ if (se_nacl->dynamic_node_acl) {
+ tpg = to_iscsi_tpg(se_nacl->se_tpg);
+ return &tpg->tpg_demo_auth;
+ }
+
+ nacl = to_iscsi_nacl(se_nacl);
+
+ return &nacl->node_auth;
+}
+
static u32 iscsi_handle_authentication(
struct iscsit_conn *conn,
char *in_buf,
@@ -102,40 +130,11 @@ static u32 iscsi_handle_authentication(
int *out_length,
unsigned char *authtype)
{
- struct iscsit_session *sess = conn->sess;
struct iscsi_node_auth *auth;
- struct iscsi_node_acl *iscsi_nacl;
- struct iscsi_portal_group *iscsi_tpg;
- struct se_node_acl *se_nacl;
-
- if (!sess->sess_ops->SessionType) {
- /*
- * For SessionType=Normal
- */
- se_nacl = conn->sess->se_sess->se_node_acl;
- if (!se_nacl) {
- pr_err("Unable to locate struct se_node_acl for"
- " CHAP auth\n");
- return -1;
- }
-
- if (se_nacl->dynamic_node_acl) {
- iscsi_tpg = container_of(se_nacl->se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
-
- auth = &iscsi_tpg->tpg_demo_auth;
- } else {
- iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
- auth = &iscsi_nacl->node_auth;
- }
- } else {
- /*
- * For SessionType=Discovery
- */
- auth = &iscsit_global->discovery_acl.node_auth;
- }
+ auth = iscsi_get_node_auth(conn);
+ if (!auth)
+ return -1;
if (strstr("CHAP", authtype))
strcpy(conn->sess->auth_type, "CHAP");
@@ -815,6 +814,42 @@ static int iscsi_target_do_authentication(
return 0;
}
+bool iscsi_conn_auth_required(struct iscsit_conn *conn)
+{
+ struct iscsi_node_acl *nacl;
+ struct se_node_acl *se_nacl;
+
+ if (conn->sess->sess_ops->SessionType) {
+ /*
+ * For SessionType=Discovery
+ */
+ return conn->tpg->tpg_attrib.authentication;
+ }
+ /*
+ * For SessionType=Normal
+ */
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_debug("Unknown ACL is trying to connect\n");
+ return true;
+ }
+
+ if (se_nacl->dynamic_node_acl) {
+ pr_debug("Dynamic ACL %s is trying to connect\n",
+ se_nacl->initiatorname);
+ return conn->tpg->tpg_attrib.authentication;
+ }
+
+ pr_debug("Known ACL %s is trying to connect\n",
+ se_nacl->initiatorname);
+
+ nacl = to_iscsi_nacl(se_nacl);
+ if (nacl->node_attrib.authentication == NA_AUTHENTICATION_INHERITED)
+ return conn->tpg->tpg_attrib.authentication;
+
+ return nacl->node_attrib.authentication;
+}
+
static int iscsi_target_handle_csg_zero(
struct iscsit_conn *conn,
struct iscsi_login *login)
@@ -876,22 +911,26 @@ static int iscsi_target_handle_csg_zero(
return -1;
if (!iscsi_check_negotiated_keys(conn->param_list)) {
- if (conn->tpg->tpg_attrib.authentication &&
- !strncmp(param->value, NONE, 4)) {
- pr_err("Initiator sent AuthMethod=None but"
- " Target is enforcing iSCSI Authentication,"
- " login failed.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
- ISCSI_LOGIN_STATUS_AUTH_FAILED);
- return -1;
- }
+ bool auth_required = iscsi_conn_auth_required(conn);
+
+ if (auth_required) {
+ if (!strncmp(param->value, NONE, 4)) {
+ pr_err("Initiator sent AuthMethod=None but"
+ " Target is enforcing iSCSI Authentication,"
+ " login failed.\n");
+ iscsit_tx_login_rsp(conn,
+ ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ }
- if (conn->tpg->tpg_attrib.authentication &&
- !login->auth_complete)
- return 0;
+ if (!login->auth_complete)
+ return 0;
- if (strncmp(param->value, NONE, 4) && !login->auth_complete)
- return 0;
+ if (strncmp(param->value, NONE, 4) &&
+ !login->auth_complete)
+ return 0;
+ }
if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
(login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
@@ -906,6 +945,18 @@ do_auth:
return iscsi_target_do_authentication(conn, login);
}
+static bool iscsi_conn_authenticated(struct iscsit_conn *conn,
+ struct iscsi_login *login)
+{
+ if (!iscsi_conn_auth_required(conn))
+ return true;
+
+ if (login->auth_complete)
+ return true;
+
+ return false;
+}
+
static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_login *login)
{
int ret;
@@ -949,11 +1000,10 @@ static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_lo
return -1;
}
- if (!login->auth_complete &&
- conn->tpg->tpg_attrib.authentication) {
+ if (!iscsi_conn_authenticated(conn, login)) {
pr_err("Initiator is requesting CSG: 1, has not been"
- " successfully authenticated, and the Target is"
- " enforcing iSCSI Authentication, login failed.\n");
+ " successfully authenticated, and the Target is"
+ " enforcing iSCSI Authentication, login failed.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_AUTH_FAILED);
return -1;
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index ed30b9ee75e6..41c3db3ddeaa 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -4,6 +4,7 @@
#define DECIMAL 0
#define HEX 1
+#define BASE64 2
struct iscsit_conn;
struct iscsi_login;
@@ -21,5 +22,5 @@ extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *,
extern int iscsi_target_start_negotiation(
struct iscsi_login *, struct iscsit_conn *);
extern void iscsi_target_nego_release(struct iscsit_conn *);
-
+extern bool iscsi_conn_auth_required(struct iscsit_conn *conn);
#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 874cb33c9be0..d63efdefb18e 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -30,6 +30,7 @@ void iscsit_set_default_node_attribues(
{
struct iscsi_node_attrib *a = &acl->node_attrib;
+ a->authentication = NA_AUTHENTICATION_INHERITED;
a->dataout_timeout = NA_DATAOUT_TIMEOUT;
a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
a->nopin_timeout = NA_NOPIN_TIMEOUT;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4339ee517434..3cac1aafef68 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -394,8 +394,7 @@ struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
{
struct se_session *se_sess = sess->se_sess;
struct se_node_acl *se_nacl = se_sess->se_node_acl;
- struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl);
return &acl->node_attrib;
}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index b56ef8af66e7..fb91423a4e2e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -385,7 +385,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
/*
* Extract the RELATIVE TARGET PORT IDENTIFIER to identify
- * the Target Port in question for the the incoming
+ * the Target Port in question for the incoming
* SET_TARGET_PORT_GROUPS op.
*/
rtpi = get_unaligned_be16(ptr + 2);
@@ -934,8 +934,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
spin_lock(&lun->lun_deve_lock);
list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
- lacl = rcu_dereference_check(se_deve->se_lun_acl,
- lockdep_is_held(&lun->lun_deve_lock));
+ lacl = se_deve->se_lun_acl;
/*
* spc4r37 p.242:
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index bbcbbfa72b07..416514c5c7ac 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -732,6 +732,7 @@ static ssize_t emulate_tpu_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
bool flag;
int ret;
@@ -744,8 +745,11 @@ static ssize_t emulate_tpu_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
+ if (!dev->transport->configure_unmap ||
+ !dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
}
da->emulate_tpu = flag;
@@ -758,6 +762,7 @@ static ssize_t emulate_tpws_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
bool flag;
int ret;
@@ -770,8 +775,11 @@ static ssize_t emulate_tpws_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
+ if (!dev->transport->configure_unmap ||
+ !dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
}
da->emulate_tpws = flag;
@@ -964,6 +972,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
const char *page, size_t count)
{
struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
bool flag;
int ret;
@@ -982,10 +991,12 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
* Discard supported is detected iblock_configure_device().
*/
if (flag && !da->max_unmap_block_desc_count) {
- pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
- " because max_unmap_block_desc_count is zero\n",
- da->da_dev);
- return -ENOSYS;
+ if (!dev->transport->configure_unmap ||
+ !dev->transport->configure_unmap(dev)) {
+ pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n",
+ da->da_dev);
+ return -ENOSYS;
+ }
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 25f33eb25337..b7f16ee8aa0e 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -75,7 +75,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd)
return TCM_WRITE_PROTECTED;
}
- se_lun = rcu_dereference(deve->se_lun);
+ se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
@@ -152,7 +152,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
if (deve) {
- se_lun = rcu_dereference(deve->se_lun);
+ se_lun = deve->se_lun;
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
se_lun = NULL;
@@ -216,7 +216,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- lun = rcu_dereference(deve->se_lun);
+ lun = deve->se_lun;
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
@@ -243,11 +243,8 @@ void core_free_device_list_for_node(
struct se_dev_entry *deve;
mutex_lock(&nacl->lun_entry_mutex);
- hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- struct se_lun *lun = rcu_dereference_check(deve->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
- core_disable_device_list_for_node(lun, deve, nacl, tpg);
- }
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+ core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
mutex_unlock(&nacl->lun_entry_mutex);
}
@@ -334,8 +331,7 @@ int core_enable_device_list_for_node(
mutex_lock(&nacl->lun_entry_mutex);
orig = target_nacl_find_deve(nacl, mapped_lun);
if (orig && orig->se_lun) {
- struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
+ struct se_lun *orig_lun = orig->se_lun;
if (orig_lun != lun) {
pr_err("Existing orig->se_lun doesn't match new lun"
@@ -355,8 +351,8 @@ int core_enable_device_list_for_node(
return -EINVAL;
}
- rcu_assign_pointer(new->se_lun, lun);
- rcu_assign_pointer(new->se_lun_acl, lun_acl);
+ new->se_lun = lun;
+ new->se_lun_acl = lun_acl;
hlist_del_rcu(&orig->link);
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
@@ -374,8 +370,8 @@ int core_enable_device_list_for_node(
return 0;
}
- rcu_assign_pointer(new->se_lun, lun);
- rcu_assign_pointer(new->se_lun_acl, lun_acl);
+ new->se_lun = lun;
+ new->se_lun_acl = lun_acl;
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
@@ -434,9 +430,6 @@ void core_disable_device_list_for_node(
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
- rcu_assign_pointer(orig->se_lun, NULL);
- rcu_assign_pointer(orig->se_lun_acl, NULL);
-
kfree_rcu(orig, rcu_head);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
@@ -457,10 +450,7 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
- struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
- lockdep_is_held(&nacl->lun_entry_mutex));
-
- if (lun != tmp_lun)
+ if (lun != deve->se_lun)
continue;
core_disable_device_list_for_node(lun, deve, nacl, tpg);
@@ -960,6 +950,12 @@ int target_configure_device(struct se_device *dev)
ret = dev->transport->configure_device(dev);
if (ret)
goto out_free_index;
+
+ if (dev->transport->configure_unmap &&
+ dev->transport->configure_unmap(dev)) {
+ pr_debug("Discard support available, but disabled by default.\n");
+ }
+
/*
* XXX: there is not much point to have two different values here..
*/
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 6c8d8b051bfd..28aa643be5d5 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -86,6 +86,24 @@ static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
return &fd_dev->dev;
}
+static bool fd_configure_unmap(struct se_device *dev)
+{
+ struct file *file = FD_DEV(dev)->fd_file;
+ struct inode *inode = file->f_mapping->host;
+
+ if (S_ISBLK(inode->i_mode))
+ return target_configure_unmap_from_queue(&dev->dev_attrib,
+ I_BDEV(inode));
+
+ /* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
+ dev->dev_attrib.max_unmap_lba_count = 0x2000;
+ /* Currently hardcoded to 1 in Linux/SCSI code. */
+ dev->dev_attrib.max_unmap_block_desc_count = 1;
+ dev->dev_attrib.unmap_granularity = 1;
+ dev->dev_attrib.unmap_granularity_alignment = 0;
+ return true;
+}
+
static int fd_configure_device(struct se_device *dev)
{
struct fd_dev *fd_dev = FD_DEV(dev);
@@ -149,10 +167,6 @@ static int fd_configure_device(struct se_device *dev)
" block_device blocks: %llu logical_block_size: %d\n",
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
-
- if (target_configure_unmap_from_queue(&dev->dev_attrib, bdev))
- pr_debug("IFILE: BLOCK Discard support available,"
- " disabled by default\n");
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -170,16 +184,6 @@ static int fd_configure_device(struct se_device *dev)
}
fd_dev->fd_block_size = FD_BLOCKSIZE;
- /*
- * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
- */
- dev->dev_attrib.max_unmap_lba_count = 0x2000;
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity = 1;
- dev->dev_attrib.unmap_granularity_alignment = 0;
/*
* Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
@@ -438,10 +442,6 @@ fd_execute_write_same(struct se_cmd *cmd)
unsigned int len = 0, i;
ssize_t ret;
- if (!nolb) {
- target_complete_cmd(cmd, SAM_STAT_GOOD);
- return 0;
- }
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with FILEIO"
" backends not supported\n");
@@ -927,6 +927,7 @@ static const struct target_backend_ops fileio_ops = {
.configure_device = fd_configure_device,
.destroy_device = fd_destroy_device,
.free_device = fd_free_device,
+ .configure_unmap = fd_configure_unmap,
.parse_cdb = fd_parse_cdb,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 30712a12b151..8351c974cee3 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -76,6 +76,14 @@ free_dev:
return NULL;
}
+static bool iblock_configure_unmap(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+ return target_configure_unmap_from_queue(&dev->dev_attrib,
+ ib_dev->ibd_bd);
+}
+
static int iblock_configure_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -119,10 +127,6 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
- pr_debug("IBLOCK: BLOCK Discard support available,"
- " disabled by default\n");
-
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -903,6 +907,7 @@ static const struct target_backend_ops iblock_ops = {
.configure_device = iblock_configure_device,
.destroy_device = iblock_destroy_device,
.free_device = iblock_free_device,
+ .configure_unmap = iblock_configure_unmap,
.plug_device = iblock_plug_device,
.unplug_device = iblock_unplug_device,
.parse_cdb = iblock_parse_cdb,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 3829b61b56c1..a1d67554709f 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -739,8 +739,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (!deve_tmp->se_lun_acl)
continue;
- lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl,
- lockdep_is_held(&lun_tmp->lun_deve_lock));
+ lacl_tmp = deve_tmp->se_lun_acl;
nacl_tmp = lacl_tmp->se_lun_nacl;
/*
* Skip the matching struct se_node_acl that is allocated
@@ -784,8 +783,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* the original *pr_reg is processed in
* __core_scsi3_add_registration()
*/
- dest_lun = rcu_dereference_check(deve_tmp->se_lun,
- kref_read(&deve_tmp->pr_kref) != 0);
+ dest_lun = deve_tmp->se_lun;
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
nacl_tmp, dest_lun, deve_tmp,
@@ -1437,34 +1435,26 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl;
-
/*
* For nacl->dynamic_node_acl=1
*/
- lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
- kref_read(&se_deve->pr_kref) != 0);
- if (!lun_acl)
+ if (!se_deve->se_lun_acl)
return 0;
- return target_depend_item(&lun_acl->se_lun_group.cg_item);
+ return target_depend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
}
static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl;
-
/*
* For nacl->dynamic_node_acl=1
*/
- lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
- kref_read(&se_deve->pr_kref) != 0);
- if (!lun_acl) {
+ if (!se_deve->se_lun_acl) {
kref_put(&se_deve->pr_kref, target_pr_kref_release);
return;
}
- target_undepend_item(&lun_acl->se_lun_group.cg_item);
+ target_undepend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
kref_put(&se_deve->pr_kref, target_pr_kref_release);
}
@@ -1751,8 +1741,7 @@ core_scsi3_decode_spec_i_port(
* and then call __core_scsi3_add_registration() in the
* 2nd loop which will never fail.
*/
- dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
- kref_read(&dest_se_deve->pr_kref) != 0);
+ dest_lun = dest_se_deve->se_lun;
dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_lun, dest_se_deve,
@@ -3446,8 +3435,7 @@ after_iport_check:
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
if (!dest_pr_reg) {
- struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
- kref_read(&dest_se_deve->pr_kref) != 0);
+ struct se_lun *dest_lun = dest_se_deve->se_lun;
spin_unlock(&dev->dev_reservation_lock);
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index f6132836eb38..1e3216de1e04 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -345,68 +345,6 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op
return 0;
}
-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
- int *post_ret)
-{
- unsigned char *buf, *addr;
- struct scatterlist *sg;
- unsigned int offset;
- sense_reason_t ret = TCM_NO_SENSE;
- int i, count;
-
- if (!success)
- return 0;
-
- /*
- * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
- *
- * 1) read the specified logical block(s);
- * 2) transfer logical blocks from the data-out buffer;
- * 3) XOR the logical blocks transferred from the data-out buffer with
- * the logical blocks read, storing the resulting XOR data in a buffer;
- * 4) if the DISABLE WRITE bit is set to zero, then write the logical
- * blocks transferred from the data-out buffer; and
- * 5) transfer the resulting XOR data to the data-in buffer.
- */
- buf = kmalloc(cmd->data_length, GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate xor_callback buf\n");
- return TCM_OUT_OF_RESOURCES;
- }
- /*
- * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
- * into the locally allocated *buf
- */
- sg_copy_to_buffer(cmd->t_data_sg,
- cmd->t_data_nents,
- buf,
- cmd->data_length);
-
- /*
- * Now perform the XOR against the BIDI read memory located at
- * cmd->t_mem_bidi_list
- */
-
- offset = 0;
- for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
- addr = kmap_atomic(sg_page(sg));
- if (!addr) {
- ret = TCM_OUT_OF_RESOURCES;
- goto out;
- }
-
- for (i = 0; i < sg->length; i++)
- *(addr + sg->offset + i) ^= *(buf + offset + i);
-
- offset += sg->length;
- kunmap_atomic(addr);
- }
-
-out:
- kfree(buf);
- return ret;
-}
-
static sense_reason_t
sbc_execute_rw(struct se_cmd *cmd)
{
@@ -933,47 +871,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = sbc_execute_rw;
break;
- case XDWRITEREAD_10:
- if (cmd->data_direction != DMA_TO_DEVICE ||
- !(cmd->se_cmd_flags & SCF_BIDI))
- return TCM_INVALID_CDB_FIELD;
- sectors = transport_get_sectors_10(cdb);
-
- if (sbc_check_dpofua(dev, cmd, cdb))
- return TCM_INVALID_CDB_FIELD;
-
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-
- /*
- * Setup BIDI XOR callback to be run after I/O completion.
- */
- cmd->execute_cmd = sbc_execute_rw;
- cmd->transport_complete_callback = &xdreadwrite_callback;
- break;
case VARIABLE_LENGTH_CMD:
{
u16 service_action = get_unaligned_be16(&cdb[8]);
switch (service_action) {
- case XDWRITEREAD_32:
- sectors = transport_get_sectors_32(cdb);
-
- if (sbc_check_dpofua(dev, cmd, cdb))
- return TCM_INVALID_CDB_FIELD;
- /*
- * Use WRITE_32 and READ_32 opcodes for the emulated
- * XDWRITE_READ_32 logic.
- */
- cmd->t_task_lba = transport_lba_64_ext(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-
- /*
- * Setup BIDI XOR callback to be run during after I/O
- * completion.
- */
- cmd->execute_cmd = sbc_execute_rw;
- cmd->transport_complete_callback = &xdreadwrite_callback;
- break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb);
if (!sectors) {
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 62d15bcc3d93..f85ee5b0fd80 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -877,7 +877,6 @@ static ssize_t target_stat_auth_dev_show(struct config_item *item,
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- struct se_lun *lun;
ssize_t ret;
rcu_read_lock();
@@ -886,9 +885,9 @@ static ssize_t target_stat_auth_dev_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
- lun = rcu_dereference(deve->se_lun);
+
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
@@ -1217,7 +1216,6 @@ static ssize_t target_stat_iport_dev_show(struct config_item *item,
struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- struct se_lun *lun;
ssize_t ret;
rcu_read_lock();
@@ -1226,9 +1224,9 @@ static ssize_t target_stat_iport_dev_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
- lun = rcu_dereference(deve->se_lun);
+
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
rcu_read_unlock();
return ret;
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 6bb20aa9c5bc..8713cda0c2fb 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -88,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
struct se_device *this_dev;
int rc;
- this_lun = rcu_dereference(deve->se_lun);
+ this_lun = deve->se_lun;
this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index f2b1bcefcadd..1175f3a46859 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -326,6 +326,9 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
void *ret;
int id;
+ if (!access_ok((void __user *)addr, length))
+ return ERR_PTR(-EFAULT);
+
mutex_lock(&teedev->mutex);
id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 0e5cc948373c..e052dae614eb 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -221,7 +221,7 @@ config THERMAL_EMULATION
config THERMAL_MMIO
tristate "Generic Thermal MMIO driver"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
help
This option enables the generic thermal MMIO driver that will use
@@ -496,7 +496,7 @@ config SPRD_THERMAL
config KHADAS_MCU_FAN_THERMAL
tristate "Khadas MCU controller FAN cooling support"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on MFD_KHADAS_MCU
select MFD_CORE
select REGMAP
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 80d4e0676083..365489bf4b8c 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -527,7 +527,7 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
obj->package.elements[0].buffer.length,
GFP_KERNEL);
- if (!priv->data_vault)
+ if (ZERO_OR_NULL_PTR(priv->data_vault))
goto out_free;
bin_attr_data_vault.private = priv->data_vault;
@@ -597,7 +597,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
goto free_imok;
}
- if (priv->data_vault) {
+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
result = sysfs_create_group(&pdev->dev.kobj,
&data_attribute_group);
if (result)
@@ -615,7 +615,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
free_sysfs:
cleanup_odvp(priv);
if (priv->data_vault) {
- sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
+ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
kfree(priv->data_vault);
}
free_uuid:
@@ -647,7 +648,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
if (!priv->rel_misc_dev_res)
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
- if (priv->data_vault)
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index a9596e7562ea..95adac427b6f 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -81,7 +81,9 @@ static const struct x86_cpu_id tcc_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
{}
};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6a5d0ae5d7a4..50d50cec7774 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1329,6 +1329,7 @@ free_tz:
kfree(tz);
return ERR_PTR(result);
}
+EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips);
struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
void *devdata, struct thermal_zone_device_ops *ops,
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 5018459e8dd9..3a8d6e747c25 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -813,12 +813,13 @@ static const struct attribute_group cooling_device_stats_attr_group = {
static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
{
+ const struct attribute_group *stats_attr_group = NULL;
struct cooling_dev_stats *stats;
unsigned long states;
int var;
if (cdev->ops->get_max_state(cdev, &states))
- return;
+ goto out;
states++; /* Total number of states is highest state + 1 */
@@ -828,7 +829,7 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
stats = kzalloc(var, GFP_KERNEL);
if (!stats)
- return;
+ goto out;
stats->time_in_state = (ktime_t *)(stats + 1);
stats->trans_table = (unsigned int *)(stats->time_in_state + states);
@@ -838,9 +839,12 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
spin_lock_init(&stats->lock);
+ stats_attr_group = &cooling_device_stats_attr_group;
+
+out:
/* Fill the empty slot left in cooling_device_attr_groups */
var = ARRAY_SIZE(cooling_device_attr_groups) - 2;
- cooling_device_attr_groups[var] = &cooling_device_stats_attr_group;
+ cooling_device_attr_groups[var] = stats_attr_group;
}
static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index afb2d373dd47..81e7f64c1739 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -12,7 +12,7 @@
* (non hardware specific) changes to serial.c.
*
* The port is registered with the tty driver as minor device 64, and
- * therefore other ports should should only use 65 upwards.
+ * therefore other ports should only use 65 upwards.
*
* Richard Lucock 28/12/99
*
@@ -51,6 +51,7 @@
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
+#include <linux/serial_core.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/slab.h>
@@ -283,12 +284,12 @@ static void transmit_chars(struct serial_state *info)
amiga_custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100;
mb();
- info->xmit.tail = info->xmit.tail & (SERIAL_XMIT_SIZE-1);
+ info->xmit.tail = info->xmit.tail & (UART_XMIT_SIZE - 1);
info->icount.tx++;
if (CIRC_CNT(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
+ UART_XMIT_SIZE) < WAKEUP_CHARS)
tty_wakeup(info->tport.tty);
#ifdef SERIAL_DEBUG_INTR
@@ -708,13 +709,13 @@ static int rs_put_char(struct tty_struct *tty, unsigned char ch)
local_irq_save(flags);
if (CIRC_SPACE(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE) == 0) {
+ UART_XMIT_SIZE) == 0) {
local_irq_restore(flags);
return 0;
}
info->xmit.buf[info->xmit.head++] = ch;
- info->xmit.head &= SERIAL_XMIT_SIZE-1;
+ info->xmit.head &= UART_XMIT_SIZE - 1;
local_irq_restore(flags);
return 1;
}
@@ -753,15 +754,14 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head,
info->xmit.tail,
- SERIAL_XMIT_SIZE);
+ UART_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0) {
break;
}
memcpy(info->xmit.buf + info->xmit.head, buf, c);
- info->xmit.head = ((info->xmit.head + c) &
- (SERIAL_XMIT_SIZE-1));
+ info->xmit.head = (info->xmit.head + c) & (UART_XMIT_SIZE - 1);
buf += c;
count -= c;
ret += c;
@@ -788,14 +788,14 @@ static unsigned int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+ return CIRC_SPACE(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
}
static unsigned int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+ return CIRC_CNT(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
}
static void rs_flush_buffer(struct tty_struct *tty)
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 31dceb5039b5..e81701a66429 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -916,7 +916,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
/* Make each port's xmit FIFO big enough to fill FDC TX FIFO */
- priv->xmit_size = min(tx_fifo * 4, (unsigned int)SERIAL_XMIT_SIZE);
+ priv->xmit_size = min(tx_fifo * 4, (unsigned int)UART_XMIT_SIZE);
driver = tty_alloc_driver(NUM_TTY_CHANNELS, TTY_DRIVER_REAL_RAW);
if (IS_ERR(driver))
@@ -1222,7 +1222,7 @@ static void kgdbfdc_push_one(void)
/* Construct a word from any data in buffer */
word = mips_ejtag_fdc_encode(bufs, &kgdbfdc_wbuflen, 1);
- /* Relocate any remaining data to beginnning of buffer */
+ /* Relocate any remaining data to beginning of buffer */
kgdbfdc_wbuflen -= word.bytes;
for (i = 0; i < kgdbfdc_wbuflen; ++i)
kgdbfdc_wbuf[i] = kgdbfdc_wbuf[i + word.bytes];
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index fd4d24f61c46..caa5c14ed57f 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -5,6 +5,14 @@
*
* * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
*
+ * Outgoing path:
+ * tty -> DLCI fifo -> scheduler -> GSM MUX data queue ---o-> ldisc
+ * control message -> GSM MUX control queue --´
+ *
+ * Incoming path:
+ * ldisc -> gsm_queue() -o--> tty
+ * `-> gsm_control_response()
+ *
* TO DO:
* Mostly done: ioctls for setting modes/timing
* Partly done: hooks so you can pull off frames to non tty devs
@@ -210,6 +218,9 @@ struct gsm_mux {
/* Events on the GSM channel */
wait_queue_head_t event;
+ /* ldisc send work */
+ struct work_struct tx_work;
+
/* Bits for GSM mode decoding */
/* Framing Layer */
@@ -235,14 +246,17 @@ struct gsm_mux {
struct gsm_dlci *dlci[NUM_DLCI];
int old_c_iflag; /* termios c_iflag value before attach */
bool constipated; /* Asked by remote to shut up */
+ bool has_devices; /* Devices were registered */
spinlock_t tx_lock;
unsigned int tx_bytes; /* TX data outstanding */
#define TX_THRESH_HI 8192
#define TX_THRESH_LO 2048
- struct list_head tx_list; /* Pending data packets */
+ struct list_head tx_ctrl_list; /* Pending control packets */
+ struct list_head tx_data_list; /* Pending data packets */
/* Control messages */
+ struct timer_list kick_timer; /* Kick TX queuing on timeout */
struct timer_list t2_timer; /* Retransmit timer for commands */
int cretries; /* Command retry counter */
struct gsm_control *pending_cmd;/* Our current pending command */
@@ -369,6 +383,11 @@ static const u8 gsm_fcs8[256] = {
static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len);
static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk);
+static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ u8 ctrl);
+static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg);
+static void gsmld_write_trigger(struct gsm_mux *gsm);
+static void gsmld_write_task(struct work_struct *work);
/**
* gsm_fcs_add - update FCS
@@ -420,6 +439,27 @@ static int gsm_read_ea(unsigned int *val, u8 c)
}
/**
+ * gsm_read_ea_val - read a value until EA
+ * @val: variable holding value
+ * @data: buffer of data
+ * @dlen: length of data
+ *
+ * Processes an EA value. Updates the passed variable and
+ * returns the processed data length.
+ */
+static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen)
+{
+ unsigned int len = 0;
+
+ for (; dlen > 0; dlen--) {
+ len++;
+ if (gsm_read_ea(val, *data++))
+ break;
+ }
+ return len;
+}
+
+/**
* gsm_encode_modem - encode modem data bits
* @dlci: DLCI to encode from
*
@@ -464,6 +504,68 @@ static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
}
/**
+ * gsm_register_devices - register all tty devices for a given mux index
+ *
+ * @driver: the tty driver that describes the tty devices
+ * @index: the mux number is used to calculate the minor numbers of the
+ * ttys for this mux and may differ from the position in the
+ * mux array.
+ */
+static int gsm_register_devices(struct tty_driver *driver, unsigned int index)
+{
+ struct device *dev;
+ int i;
+ unsigned int base;
+
+ if (!driver || index >= MAX_MUX)
+ return -EINVAL;
+
+ base = index * NUM_DLCI; /* first minor for this index */
+ for (i = 1; i < NUM_DLCI; i++) {
+ /* Don't register device 0 - this is the control channel
+ * and not a usable tty interface
+ */
+ dev = tty_register_device(gsm_tty_driver, base + i, NULL);
+ if (IS_ERR(dev)) {
+ if (debug & 8)
+ pr_info("%s failed to register device minor %u",
+ __func__, base + i);
+ for (i--; i >= 1; i--)
+ tty_unregister_device(gsm_tty_driver, base + i);
+ return PTR_ERR(dev);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * gsm_unregister_devices - unregister all tty devices for a given mux index
+ *
+ * @driver: the tty driver that describes the tty devices
+ * @index: the mux number is used to calculate the minor numbers of the
+ * ttys for this mux and may differ from the position in the
+ * mux array.
+ */
+static void gsm_unregister_devices(struct tty_driver *driver,
+ unsigned int index)
+{
+ int i;
+ unsigned int base;
+
+ if (!driver || index >= MAX_MUX)
+ return;
+
+ base = index * NUM_DLCI; /* first minor for this index */
+ for (i = 1; i < NUM_DLCI; i++) {
+ /* Don't unregister device 0 - this is the control
+ * channel and not a usable tty interface
+ */
+ tty_unregister_device(gsm_tty_driver, base + i);
+ }
+}
+
+/**
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
* @addr: address EA from the frame
@@ -570,57 +672,73 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
* @cr: command/response bit seen as initiator
* @control: control byte including PF bit
*
- * Format up and transmit a control frame. These do not go via the
- * queueing logic as they should be transmitted ahead of data when
- * they are needed.
- *
- * FIXME: Lock versus data TX path
+ * Format up and transmit a control frame. These should be transmitted
+ * ahead of data when they are needed.
*/
-
-static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
+static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
{
- int len;
- u8 cbuf[10];
- u8 ibuf[3];
+ struct gsm_msg *msg;
+ u8 *dp;
int ocr;
+ unsigned long flags;
+
+ msg = gsm_data_alloc(gsm, addr, 0, control);
+ if (!msg)
+ return -ENOMEM;
/* toggle C/R coding if not initiator */
ocr = cr ^ (gsm->initiator ? 0 : 1);
- switch (gsm->encoding) {
- case 0:
- cbuf[0] = GSM0_SOF;
- cbuf[1] = (addr << 2) | (ocr << 1) | EA;
- cbuf[2] = control;
- cbuf[3] = EA; /* Length of data = 0 */
- cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
- cbuf[5] = GSM0_SOF;
- len = 6;
- break;
- case 1:
- case 2:
- /* Control frame + packing (but not frame stuffing) in mode 1 */
- ibuf[0] = (addr << 2) | (ocr << 1) | EA;
- ibuf[1] = control;
- ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
- /* Stuffing may double the size worst case */
- len = gsm_stuff_frame(ibuf, cbuf + 1, 3);
- /* Now add the SOF markers */
- cbuf[0] = GSM1_SOF;
- cbuf[len + 1] = GSM1_SOF;
- /* FIXME: we can omit the lead one in many cases */
- len += 2;
- break;
- default:
- WARN_ON(1);
- return;
- }
- gsmld_output(gsm, cbuf, len);
- if (!gsm->initiator) {
- cr = cr & gsm->initiator;
- control = control & ~PF;
+ msg->data -= 3;
+ dp = msg->data;
+ *dp++ = (addr << 2) | (ocr << 1) | EA;
+ *dp++ = control;
+
+ if (gsm->encoding == 0)
+ *dp++ = EA; /* Length of data = 0 */
+
+ *dp = 0xFF - gsm_fcs_add_block(INIT_FCS, msg->data, dp - msg->data);
+ msg->len = (dp - msg->data) + 1;
+
+ gsm_print_packet("Q->", addr, cr, control, NULL, 0);
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ list_add_tail(&msg->list, &gsm->tx_ctrl_list);
+ gsm->tx_bytes += msg->len;
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
+
+ return 0;
+}
+
+/**
+ * gsm_dlci_clear_queues - remove outstanding data for a DLCI
+ * @gsm: mux
+ * @dlci: clear for this DLCI
+ *
+ * Clears the data queues for a given DLCI.
+ */
+static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg, *nmsg;
+ int addr = dlci->addr;
+ unsigned long flags;
+
+ /* Clear DLCI write fifo first */
+ spin_lock_irqsave(&dlci->lock, flags);
+ kfifo_reset(&dlci->fifo);
+ spin_unlock_irqrestore(&dlci->lock, flags);
+
+ /* Clear data packets in MUX write queue */
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
+ if (msg->addr != addr)
+ continue;
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
}
- gsm_print_packet("-->", addr, cr, control, NULL, 0);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
}
/**
@@ -683,59 +801,151 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
}
/**
- * gsm_data_kick - poke the queue
+ * gsm_send_packet - sends a single packet
* @gsm: GSM Mux
- * @dlci: DLCI sending the data
+ * @msg: packet to send
*
- * The tty device has called us to indicate that room has appeared in
- * the transmit queue. Ram more data into the pipe if we have any
- * If we have been flow-stopped by a CMD_FCOFF, then we can only
- * send messages on DLCI0 until CMD_FCON
+ * The given packet is encoded and sent out. No memory is freed.
+ * The caller must hold the gsm tx lock.
+ */
+static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg)
+{
+ int len, ret;
+
+
+ if (gsm->encoding == 0) {
+ gsm->txframe[0] = GSM0_SOF;
+ memcpy(gsm->txframe + 1, msg->data, msg->len);
+ gsm->txframe[msg->len + 1] = GSM0_SOF;
+ len = msg->len + 2;
+ } else {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data, gsm->txframe + 1, msg->len);
+ gsm->txframe[len + 1] = GSM1_SOF;
+ len += 2;
+ }
+
+ if (debug & 4)
+ gsm_hex_dump_bytes(__func__, gsm->txframe, len);
+ gsm_print_packet("-->", msg->addr, gsm->initiator, msg->ctrl, msg->data,
+ msg->len);
+
+ ret = gsmld_output(gsm, gsm->txframe, len);
+ if (ret <= 0)
+ return ret;
+ /* FIXME: Can eliminate one SOF in many more cases */
+ gsm->tx_bytes -= msg->len;
+
+ return 0;
+}
+
+/**
+ * gsm_is_flow_ctrl_msg - checks if flow control message
+ * @msg: message to check
*
- * FIXME: lock against link layer control transmissions
+ * Returns true if the given message is a flow control command of the
+ * control channel. False is returned in any other case.
*/
+static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg)
+{
+ unsigned int cmd;
+
+ if (msg->addr > 0)
+ return false;
+
+ switch (msg->ctrl & ~PF) {
+ case UI:
+ case UIH:
+ cmd = 0;
+ if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1)
+ break;
+ switch (cmd & ~PF) {
+ case CMD_FCOFF:
+ case CMD_FCON:
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
-static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+/**
+ * gsm_data_kick - poke the queue
+ * @gsm: GSM Mux
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any.
+ * If we have been flow-stopped by a CMD_FCOFF, then we can only
+ * send messages on DLCI0 until CMD_FCON. The caller must hold
+ * the gsm tx lock.
+ */
+static int gsm_data_kick(struct gsm_mux *gsm)
{
struct gsm_msg *msg, *nmsg;
- int len;
+ struct gsm_dlci *dlci;
+ int ret;
- list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
- if (gsm->constipated && msg->addr)
- continue;
- if (gsm->encoding != 0) {
- gsm->txframe[0] = GSM1_SOF;
- len = gsm_stuff_frame(msg->data,
- gsm->txframe + 1, msg->len);
- gsm->txframe[len + 1] = GSM1_SOF;
- len += 2;
- } else {
- gsm->txframe[0] = GSM0_SOF;
- memcpy(gsm->txframe + 1 , msg->data, msg->len);
- gsm->txframe[msg->len + 1] = GSM0_SOF;
- len = msg->len + 2;
- }
+ clear_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
- if (debug & 4)
- gsm_hex_dump_bytes(__func__, gsm->txframe, len);
- if (gsmld_output(gsm, gsm->txframe, len) <= 0)
+ /* Serialize control messages and control channel messages first */
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_ctrl_list, list) {
+ if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg))
+ continue;
+ ret = gsm_send_packet(gsm, msg);
+ switch (ret) {
+ case -ENOSPC:
+ return -ENOSPC;
+ case -ENODEV:
+ /* ldisc not open */
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ default:
+ if (ret >= 0) {
+ list_del(&msg->list);
+ kfree(msg);
+ }
break;
- /* FIXME: Can eliminate one SOF in many more cases */
- gsm->tx_bytes -= msg->len;
-
- list_del(&msg->list);
- kfree(msg);
+ }
+ }
- if (dlci) {
- tty_port_tty_wakeup(&dlci->port);
- } else {
- int i = 0;
+ if (gsm->constipated)
+ return -EAGAIN;
- for (i = 0; i < NUM_DLCI; i++)
- if (gsm->dlci[i])
- tty_port_tty_wakeup(&gsm->dlci[i]->port);
+ /* Serialize other channels */
+ if (list_empty(&gsm->tx_data_list))
+ return 0;
+ list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
+ dlci = gsm->dlci[msg->addr];
+ /* Send only messages for DLCIs with valid state */
+ if (dlci->state != DLCI_OPEN) {
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ }
+ ret = gsm_send_packet(gsm, msg);
+ switch (ret) {
+ case -ENOSPC:
+ return -ENOSPC;
+ case -ENODEV:
+ /* ldisc not open */
+ gsm->tx_bytes -= msg->len;
+ list_del(&msg->list);
+ kfree(msg);
+ continue;
+ default:
+ if (ret >= 0) {
+ list_del(&msg->list);
+ kfree(msg);
+ }
+ break;
}
}
+
+ return 1;
}
/**
@@ -784,9 +994,22 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
msg->data = dp;
/* Add to the actual output queue */
- list_add_tail(&msg->list, &gsm->tx_list);
+ switch (msg->ctrl & ~PF) {
+ case UI:
+ case UIH:
+ if (msg->addr > 0) {
+ list_add_tail(&msg->list, &gsm->tx_data_list);
+ break;
+ }
+ fallthrough;
+ default:
+ list_add_tail(&msg->list, &gsm->tx_ctrl_list);
+ break;
+ }
gsm->tx_bytes += msg->len;
- gsm_data_kick(gsm, dlci);
+
+ gsmld_write_trigger(gsm);
+ mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
}
/**
@@ -823,41 +1046,48 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg;
u8 *dp;
- int len, total_size, size;
- int h = dlci->adaption - 1;
+ int h, len, size;
- total_size = 0;
- while (1) {
- len = kfifo_len(&dlci->fifo);
- if (len == 0)
- return total_size;
-
- /* MTU/MRU count only the data bits */
- if (len > gsm->mtu)
- len = gsm->mtu;
-
- size = len + h;
-
- msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
- /* FIXME: need a timer or something to kick this so it can't
- get stuck with no work outstanding and no buffer free */
- if (msg == NULL)
- return -ENOMEM;
- dp = msg->data;
- switch (dlci->adaption) {
- case 1: /* Unstructured */
- break;
- case 2: /* Unstructed with modem bits.
- Always one byte as we never send inline break data */
- *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
- break;
- }
- WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len);
- __gsm_data_queue(dlci, msg);
- total_size += size;
+ /* for modem bits without break data */
+ h = ((dlci->adaption == 1) ? 0 : 1);
+
+ len = kfifo_len(&dlci->fifo);
+ if (len == 0)
+ return 0;
+
+ /* MTU/MRU count only the data bits but watch adaption mode */
+ if ((len + h) > gsm->mtu)
+ len = gsm->mtu - h;
+
+ size = len + h;
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ if (!msg)
+ return -ENOMEM;
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructured with modem bits.
+ * Always one byte as we never send inline break data
+ */
+ *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
+ break;
+ default:
+ pr_err("%s: unsupported adaption %d\n", __func__,
+ dlci->adaption);
+ break;
}
+
+ WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len,
+ &dlci->lock));
+
+ /* Notify upper layer about available send space. */
+ tty_port_tty_wakeup(&dlci->port);
+
+ __gsm_data_queue(dlci, msg);
/* Bytes of data we used up */
- return total_size;
+ return size;
}
/**
@@ -908,9 +1138,6 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
size = len + overhead;
msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
-
- /* FIXME: need a timer or something to kick this so it can't
- get stuck with no work outstanding and no buffer free */
if (msg == NULL) {
skb_queue_tail(&dlci->skb_list, dlci->skb);
dlci->skb = NULL;
@@ -1006,32 +1233,43 @@ static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
* renegotiate DLCI priorities with optional stuff. Needs optimising.
*/
-static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
{
- int len;
/* Priority ordering: We should do priority with RR of the groups */
- int i = 1;
-
- while (i < NUM_DLCI) {
- struct gsm_dlci *dlci;
+ int i, len, ret = 0;
+ bool sent;
+ struct gsm_dlci *dlci;
- if (gsm->tx_bytes > TX_THRESH_HI)
- break;
- dlci = gsm->dlci[i];
- if (dlci == NULL || dlci->constipated) {
- i++;
- continue;
+ while (gsm->tx_bytes < TX_THRESH_HI) {
+ for (sent = false, i = 1; i < NUM_DLCI; i++) {
+ dlci = gsm->dlci[i];
+ /* skip unused or blocked channel */
+ if (!dlci || dlci->constipated)
+ continue;
+ /* skip channels with invalid state */
+ if (dlci->state != DLCI_OPEN)
+ continue;
+ /* count the sent data per adaption */
+ if (dlci->adaption < 3 && !dlci->net)
+ len = gsm_dlci_data_output(gsm, dlci);
+ else
+ len = gsm_dlci_data_output_framed(gsm, dlci);
+ /* on error exit */
+ if (len < 0)
+ return ret;
+ if (len > 0) {
+ ret++;
+ sent = true;
+ /* The lower DLCs can starve the higher DLCs! */
+ break;
+ }
+ /* try next */
}
- if (dlci->adaption < 3 && !dlci->net)
- len = gsm_dlci_data_output(gsm, dlci);
- else
- len = gsm_dlci_data_output_framed(gsm, dlci);
- if (len < 0)
+ if (!sent)
break;
- /* DLCI empty - try the next */
- if (len == 0)
- i++;
- }
+ };
+
+ return ret;
}
/**
@@ -1277,7 +1515,6 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
const u8 *data, int clen)
{
u8 buf[1];
- unsigned long flags;
switch (command) {
case CMD_CLD: {
@@ -1299,9 +1536,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
gsm->constipated = false;
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
/* Kick the link in case it is idling */
- spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm, NULL);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
break;
case CMD_FCOFF:
/* Modem wants us to STFU */
@@ -1407,7 +1642,7 @@ static void gsm_control_retransmit(struct timer_list *t)
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
if (ctrl) {
- if (gsm->cretries == 0) {
+ if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) {
gsm->pending_cmd = NULL;
ctrl->error = -ETIMEDOUT;
ctrl->done = 1;
@@ -1504,25 +1739,24 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
static void gsm_dlci_close(struct gsm_dlci *dlci)
{
- unsigned long flags;
-
del_timer(&dlci->t1);
if (debug & 8)
pr_debug("DLCI %d goes closed.\n", dlci->addr);
dlci->state = DLCI_CLOSED;
+ /* Prevent us from sending data before the link is up again */
+ dlci->constipated = true;
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
- spin_lock_irqsave(&dlci->lock, flags);
- kfifo_reset(&dlci->fifo);
- spin_unlock_irqrestore(&dlci->lock, flags);
+ gsm_dlci_clear_queues(dlci->gsm, dlci);
/* Ensure that gsmtty_open() can return. */
tty_port_set_initialized(&dlci->port, 0);
wake_up_interruptible(&dlci->port.open_wait);
} else
dlci->gsm->dead = true;
- wake_up(&dlci->gsm->event);
/* A DLCI 0 close is a MUX termination so we need to kick that
back to userspace somehow */
+ gsm_dlci_data_kick(dlci);
+ wake_up(&dlci->gsm->event);
}
/**
@@ -1539,11 +1773,13 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
del_timer(&dlci->t1);
/* This will let a tty open continue */
dlci->state = DLCI_OPEN;
+ dlci->constipated = false;
if (debug & 8)
pr_debug("DLCI %d goes open.\n", dlci->addr);
/* Send current modem state */
if (dlci->addr)
gsm_modem_update(dlci, 0);
+ gsm_dlci_data_kick(dlci);
wake_up(&dlci->gsm->event);
}
@@ -1569,8 +1805,8 @@ static void gsm_dlci_t1(struct timer_list *t)
switch (dlci->state) {
case DLCI_OPENING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else if (!dlci->addr && gsm->control == (DM | PF)) {
@@ -1585,8 +1821,8 @@ static void gsm_dlci_t1(struct timer_list *t)
break;
case DLCI_CLOSING:
- dlci->retries--;
if (dlci->retries) {
+ dlci->retries--;
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else
@@ -1620,6 +1856,25 @@ static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
}
/**
+ * gsm_dlci_set_opening - change state to opening
+ * @dlci: DLCI to open
+ *
+ * Change internal state to wait for DLCI open from initiator side.
+ * We set off timers and responses upon reception of an SABM.
+ */
+static void gsm_dlci_set_opening(struct gsm_dlci *dlci)
+{
+ switch (dlci->state) {
+ case DLCI_CLOSED:
+ case DLCI_CLOSING:
+ dlci->state = DLCI_OPENING;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
* gsm_dlci_begin_close - start channel open procedure
* @dlci: DLCI to open
*
@@ -1728,6 +1983,30 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
}
}
+/**
+ * gsm_kick_timer - transmit if possible
+ * @t: timer contained in our gsm object
+ *
+ * Transmit data from DLCIs if the queue is empty. We can't rely on
+ * a tty wakeup except when we filled the pipe so we need to fire off
+ * new data ourselves in other cases.
+ */
+static void gsm_kick_timer(struct timer_list *t)
+{
+ struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
+ unsigned long flags;
+ int sent = 0;
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
+ if (gsm->tx_bytes < TX_THRESH_LO)
+ sent = gsm_dlci_data_sweep(gsm);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ if (sent && debug & 4)
+ pr_info("%s TX queue stalled\n", __func__);
+}
+
/*
* Allocate/Free DLCI channels
*/
@@ -1762,10 +2041,13 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
dlci->addr = addr;
dlci->adaption = gsm->adaption;
dlci->state = DLCI_CLOSED;
- if (addr)
+ if (addr) {
dlci->data = gsm_dlci_data;
- else
+ /* Prevent us from sending data before the link is up */
+ dlci->constipated = true;
+ } else {
dlci->data = gsm_dlci_command;
+ }
gsm->dlci[addr] = dlci;
return dlci;
}
@@ -1925,7 +2207,7 @@ static void gsm_queue(struct gsm_mux *gsm)
case UIH:
case UIH|PF:
if (dlci == NULL || dlci->state != DLCI_OPEN) {
- gsm_command(gsm, address, DM|PF);
+ gsm_response(gsm, address, DM|PF);
return;
}
dlci->data(dlci, gsm->buf, gsm->len);
@@ -2048,7 +2330,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
} else if ((c & ISO_IEC_646_MASK) == XOFF) {
gsm->constipated = false;
/* Kick the link in case it is idling */
- gsm_data_kick(gsm, NULL);
+ gsmld_write_trigger(gsm);
return;
}
if (c == GSM1_SOF) {
@@ -2176,18 +2458,29 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
}
/* Finish outstanding timers, making sure they are done */
+ del_timer_sync(&gsm->kick_timer);
del_timer_sync(&gsm->t2_timer);
+ /* Finish writing to ldisc */
+ flush_work(&gsm->tx_work);
+
/* Free up any link layer users and finally the control channel */
+ if (gsm->has_devices) {
+ gsm_unregister_devices(gsm_tty_driver, gsm->num);
+ gsm->has_devices = false;
+ }
for (i = NUM_DLCI - 1; i >= 0; i--)
if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);
- list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
+ list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list)
+ kfree(txq);
+ INIT_LIST_HEAD(&gsm->tx_ctrl_list);
+ list_for_each_entry_safe(txq, ntxq, &gsm->tx_data_list, list)
kfree(txq);
- INIT_LIST_HEAD(&gsm->tx_list);
+ INIT_LIST_HEAD(&gsm->tx_data_list);
}
/**
@@ -2202,8 +2495,15 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
static int gsm_activate_mux(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
+ int ret;
+
+ dlci = gsm_dlci_alloc(gsm, 0);
+ if (dlci == NULL)
+ return -ENOMEM;
+ timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ INIT_WORK(&gsm->tx_work, gsmld_write_task);
init_waitqueue_head(&gsm->event);
spin_lock_init(&gsm->control_lock);
spin_lock_init(&gsm->tx_lock);
@@ -2213,9 +2513,11 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
else
gsm->receive = gsm1_receive;
- dlci = gsm_dlci_alloc(gsm, 0);
- if (dlci == NULL)
- return -ENOMEM;
+ ret = gsm_register_devices(gsm_tty_driver, gsm->num);
+ if (ret)
+ return ret;
+
+ gsm->has_devices = true;
gsm->dead = false; /* Tty opens are now permissible */
return 0;
}
@@ -2308,7 +2610,8 @@ static struct gsm_mux *gsm_alloc_mux(void)
spin_lock_init(&gsm->lock);
mutex_init(&gsm->mutex);
kref_init(&gsm->ref);
- INIT_LIST_HEAD(&gsm->tx_list);
+ INIT_LIST_HEAD(&gsm->tx_ctrl_list);
+ INIT_LIST_HEAD(&gsm->tx_data_list);
gsm->t1 = T1;
gsm->t2 = T2;
@@ -2465,6 +2768,47 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
return gsm->tty->ops->write(gsm->tty, data, len);
}
+
+/**
+ * gsmld_write_trigger - schedule ldisc write task
+ * @gsm: our mux
+ */
+static void gsmld_write_trigger(struct gsm_mux *gsm)
+{
+ if (!gsm || !gsm->dlci[0] || gsm->dlci[0]->dead)
+ return;
+ schedule_work(&gsm->tx_work);
+}
+
+
+/**
+ * gsmld_write_task - ldisc write task
+ * @work: our tx write work
+ *
+ * Writes out data to the ldisc if possible. We are doing this here to
+ * avoid dead-locking. This returns if no space or data is left for output.
+ */
+static void gsmld_write_task(struct work_struct *work)
+{
+ struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
+ unsigned long flags;
+ int i, ret;
+
+ /* All outstanding control channel and control messages and one data
+ * frame is sent.
+ */
+ ret = -ENODEV;
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ if (gsm->tty)
+ ret = gsm_data_kick(gsm);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ if (ret >= 0)
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ tty_port_tty_wakeup(&gsm->dlci[i]->port);
+}
+
/**
* gsmld_attach_gsm - mode set up
* @tty: our tty structure
@@ -2475,39 +2819,14 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
* will need moving to an ioctl path.
*/
-static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+static void gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
- unsigned int base;
- int ret, i;
-
gsm->tty = tty_kref_get(tty);
/* Turn off tty XON/XOFF handling to handle it explicitly. */
gsm->old_c_iflag = tty->termios.c_iflag;
tty->termios.c_iflag &= (IXON | IXOFF);
- ret = gsm_activate_mux(gsm);
- if (ret != 0)
- tty_kref_put(gsm->tty);
- else {
- /* Don't register device 0 - this is the control channel and not
- a usable tty interface */
- base = mux_num_to_base(gsm); /* Base for this MUX */
- for (i = 1; i < NUM_DLCI; i++) {
- struct device *dev;
-
- dev = tty_register_device(gsm_tty_driver,
- base + i, NULL);
- if (IS_ERR(dev)) {
- for (i--; i >= 1; i--)
- tty_unregister_device(gsm_tty_driver,
- base + i);
- return PTR_ERR(dev);
- }
- }
- }
- return ret;
}
-
/**
* gsmld_detach_gsm - stop doing 0710 mux
* @tty: tty attached to the mux
@@ -2518,12 +2837,7 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
- unsigned int base = mux_num_to_base(gsm); /* Base for this MUX */
- int i;
-
WARN_ON(tty != gsm->tty);
- for (i = 1; i < NUM_DLCI; i++)
- tty_unregister_device(gsm_tty_driver, base + i);
/* Restore tty XON/XOFF handling. */
gsm->tty->termios.c_iflag = gsm->old_c_iflag;
tty_kref_put(gsm->tty);
@@ -2615,7 +2929,6 @@ static void gsmld_close(struct tty_struct *tty)
static int gsmld_open(struct tty_struct *tty)
{
struct gsm_mux *gsm;
- int ret;
if (tty->ops->write == NULL)
return -EINVAL;
@@ -2631,12 +2944,13 @@ static int gsmld_open(struct tty_struct *tty)
/* Attach the initial passive connection */
gsm->encoding = 1;
- ret = gsmld_attach_gsm(tty, gsm);
- if (ret != 0) {
- gsm_cleanup_mux(gsm, false);
- mux_put(gsm);
- }
- return ret;
+ gsmld_attach_gsm(tty, gsm);
+
+ timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ INIT_WORK(&gsm->tx_work, gsmld_write_task);
+
+ return 0;
}
/**
@@ -2651,16 +2965,9 @@ static int gsmld_open(struct tty_struct *tty)
static void gsmld_write_wakeup(struct tty_struct *tty)
{
struct gsm_mux *gsm = tty->disc_data;
- unsigned long flags;
/* Queue poll */
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm, NULL);
- if (gsm->tx_bytes < TX_THRESH_LO) {
- gsm_dlci_data_sweep(gsm);
- }
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ gsmld_write_trigger(gsm);
}
/**
@@ -2704,11 +3011,24 @@ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
- int space = tty_write_room(tty);
+ struct gsm_mux *gsm = tty->disc_data;
+ unsigned long flags;
+ int space;
+ int ret;
+
+ if (!gsm)
+ return -ENODEV;
+
+ ret = -ENOBUFS;
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ space = tty_write_room(tty);
if (space >= nr)
- return tty->ops->write(tty, buf, nr);
- set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- return -ENOBUFS;
+ ret = tty->ops->write(tty, buf, nr);
+ else
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ return ret;
}
/**
@@ -2733,12 +3053,15 @@ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
+
+ if (gsm->dead)
+ mask |= EPOLLHUP;
if (tty_hung_up_p(file))
mask |= EPOLLHUP;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= EPOLLHUP;
if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (gsm->dead)
- mask |= EPOLLHUP;
return mask;
}
@@ -3174,6 +3497,8 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
/* Start sending off SABM messages */
if (gsm->initiator)
gsm_dlci_begin_open(dlci);
+ else
+ gsm_dlci_set_opening(dlci);
/* And wait for virtual carrier */
return tty_port_block_til_ready(port, tty, filp);
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 640c9e871044..3afdd9033a9c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -118,6 +118,9 @@ struct n_tty_data {
size_t read_tail;
size_t line_start;
+ /* # of chars looked ahead (to find software flow control chars) */
+ size_t lookahead_count;
+
/* protected by output lock */
unsigned int column;
unsigned int canon_column;
@@ -333,6 +336,8 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
ldata->erasing = 0;
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->push = 0;
+
+ ldata->lookahead_count = 0;
}
static void n_tty_packet_mode_flush(struct tty_struct *tty)
@@ -1225,12 +1230,30 @@ static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
return c == START_CHAR(tty) || c == STOP_CHAR(tty);
}
-/* Returns true if c is consumed as flow-control character */
-static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+/**
+ * n_tty_receive_char_flow_ctrl - receive flow control chars
+ * @tty: terminal device
+ * @c: character
+ * @lookahead_done: lookahead has processed this character already
+ *
+ * Receive and process flow control character actions.
+ *
+ * In case lookahead for flow control chars already handled the character in
+ * advance to the normal receive, the actions are skipped during normal
+ * receive.
+ *
+ * Returns true if @c is consumed as flow-control character, the character
+ * must not be treated as normal character.
+ */
+static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
if (!n_tty_is_char_flow_ctrl(tty, c))
return false;
+ if (lookahead_done)
+ return true;
+
if (c == START_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
@@ -1242,11 +1265,12 @@ static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c
return true;
}
-static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
- if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c))
+ if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c, lookahead_done))
return;
if (L_ISIG(tty)) {
@@ -1401,7 +1425,8 @@ static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
put_tty_queue(c, ldata);
}
-static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c,
+ bool lookahead_done)
{
if (I_ISTRIP(tty))
c &= 0x7f;
@@ -1409,12 +1434,10 @@ static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
c = tolower(c);
if (I_IXON(tty)) {
- if (c == STOP_CHAR(tty))
- stop_tty(tty);
- else if (c == START_CHAR(tty) ||
- (tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) &&
- c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
- c != SUSP_CHAR(tty))) {
+ if (!n_tty_receive_char_flow_ctrl(tty, c, lookahead_done) &&
+ tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) &&
+ c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
+ c != SUSP_CHAR(tty)) {
start_tty(tty);
process_echoes(tty);
}
@@ -1457,6 +1480,27 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
n_tty_receive_char_flagged(tty, c, flag);
}
+/* Caller must ensure count > 0 */
+static void n_tty_lookahead_flow_ctrl(struct tty_struct *tty, const unsigned char *cp,
+ const unsigned char *fp, unsigned int count)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ unsigned char flag = TTY_NORMAL;
+
+ ldata->lookahead_count += count;
+
+ if (!I_IXON(tty))
+ return;
+
+ while (count--) {
+ if (fp)
+ flag = *fp++;
+ if (likely(flag == TTY_NORMAL))
+ n_tty_receive_char_flow_ctrl(tty, *cp, false);
+ cp++;
+ }
+}
+
static void
n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
const char *fp, int count)
@@ -1496,7 +1540,7 @@ n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
static void
n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+ const char *fp, int count, bool lookahead_done)
{
char flag = TTY_NORMAL;
@@ -1504,12 +1548,12 @@ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
if (fp)
flag = *fp++;
if (likely(flag == TTY_NORMAL))
- n_tty_receive_char_closing(tty, *cp++);
+ n_tty_receive_char_closing(tty, *cp++, lookahead_done);
}
}
static void n_tty_receive_buf_standard(struct tty_struct *tty,
- const unsigned char *cp, const char *fp, int count)
+ const unsigned char *cp, const char *fp, int count, bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
char flag = TTY_NORMAL;
@@ -1540,7 +1584,7 @@ static void n_tty_receive_buf_standard(struct tty_struct *tty,
}
if (test_bit(c, ldata->char_map))
- n_tty_receive_char_special(tty, c);
+ n_tty_receive_char_special(tty, c, lookahead_done);
else
n_tty_receive_char(tty, c);
}
@@ -1551,21 +1595,30 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
{
struct n_tty_data *ldata = tty->disc_data;
bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty));
+ size_t la_count = min_t(size_t, ldata->lookahead_count, count);
if (ldata->real_raw)
n_tty_receive_buf_real_raw(tty, cp, fp, count);
else if (ldata->raw || (L_EXTPROC(tty) && !preops))
n_tty_receive_buf_raw(tty, cp, fp, count);
- else if (tty->closing && !L_EXTPROC(tty))
- n_tty_receive_buf_closing(tty, cp, fp, count);
- else {
- n_tty_receive_buf_standard(tty, cp, fp, count);
+ else if (tty->closing && !L_EXTPROC(tty)) {
+ if (la_count > 0)
+ n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
+ if (count > la_count)
+ n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
+ } else {
+ if (la_count > 0)
+ n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
+ if (count > la_count)
+ n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
flush_echoes(tty);
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
+ ldata->lookahead_count -= la_count;
+
if (ldata->icanon && !L_EXTPROC(tty))
return;
@@ -2446,6 +2499,7 @@ static struct tty_ldisc_ops n_tty_ops = {
.receive_buf = n_tty_receive_buf,
.write_wakeup = n_tty_write_wakeup,
.receive_buf2 = n_tty_receive_buf2,
+ .lookahead_buf = n_tty_lookahead_flow_ctrl,
};
/**
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 696030cfcb09..287153d32536 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -123,6 +123,26 @@ static inline void serial_out(struct uart_8250_port *up, int offset, int value)
up->port.serial_out(&up->port, offset, value);
}
+/**
+ * serial_lsr_in - Read LSR register and preserve flags across reads
+ * @up: uart 8250 port
+ *
+ * Read LSR register and handle saving non-preserved flags across reads.
+ * The flags that are not preserved across reads are stored into
+ * up->lsr_saved_flags.
+ *
+ * Returns LSR value or'ed with the preserved flags (if any).
+ */
+static inline u16 serial_lsr_in(struct uart_8250_port *up)
+{
+ u16 lsr = up->lsr_saved_flags;
+
+ lsr |= serial_in(up, UART_LSR);
+ up->lsr_saved_flags = lsr & up->lsr_save_mask;
+
+ return lsr;
+}
+
/*
* For the 16C950
*/
@@ -183,10 +203,12 @@ void serial8250_rpm_put(struct uart_8250_port *p);
void serial8250_rpm_get_tx(struct uart_8250_port *p);
void serial8250_rpm_put_tx(struct uart_8250_port *p);
-int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485);
+int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485);
void serial8250_em485_start_tx(struct uart_8250_port *p);
void serial8250_em485_stop_tx(struct uart_8250_port *p);
void serial8250_em485_destroy(struct uart_8250_port *p);
+extern struct serial_rs485 serial8250_em485_supported;
/* MCR <-> TIOCM conversion */
static inline int serial8250_TIOCM_to_MCR(int tiocm)
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 2a1226a78a0c..15a2387a5b25 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -108,6 +108,7 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE |
UPF_SKIP_TEST | UPF_IOREMAP;
up.port.rs485_config = serial8250_em485_config;
+ up.port.rs485_supported = serial8250_em485_supported;
up.rs485_start_tx = bcm2835aux_rs485_start_tx;
up.rs485_stop_tx = bcm2835aux_rs485_stop_tx;
@@ -166,8 +167,10 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
uartclk = clk_get_rate(data->clk);
if (!uartclk) {
ret = device_property_read_u32(&pdev->dev, "clock-frequency", &uartclk);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "could not get clk rate\n");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "could not get clk rate\n");
+ goto dis_clk;
+ }
}
/* the HW-clock divider for bcm2835aux is 8,
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 9b878d023dac..8efdc271eb75 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1139,16 +1139,19 @@ static int __maybe_unused brcmuart_suspend(struct device *dev)
struct brcmuart_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
struct uart_port *port = &up->port;
-
- serial8250_suspend_port(priv->line);
- clk_disable_unprepare(priv->baud_mux_clk);
+ unsigned long flags;
/*
* This will prevent resume from enabling RTS before the
- * baud rate has been resored.
+ * baud rate has been restored.
*/
+ spin_lock_irqsave(&port->lock, flags);
priv->saved_mctrl = port->mctrl;
- port->mctrl = 0;
+ port->mctrl &= ~TIOCM_RTS;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ serial8250_suspend_port(priv->line);
+ clk_disable_unprepare(priv->baud_mux_clk);
return 0;
}
@@ -1158,6 +1161,7 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
struct brcmuart_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
struct uart_port *port = &up->port;
+ unsigned long flags;
int ret;
ret = clk_prepare_enable(priv->baud_mux_clk);
@@ -1180,7 +1184,15 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
start_rx_dma(serial8250_get_port(priv->line));
}
serial8250_resume_port(priv->line);
- port->mctrl = priv->saved_mctrl;
+
+ if (priv->saved_mctrl & TIOCM_RTS) {
+ /* Restore RTS */
+ spin_lock_irqsave(&port->lock, flags);
+ port->mctrl |= TIOCM_RTS;
+ port->ops->set_mctrl(port, port->mctrl);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 3f56dbc9432b..2e83e7367441 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -277,8 +277,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
* the "Diva" UART used on the management processor on many HP
* ia64 and parisc boxes.
*/
- lsr = serial_in(up, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ lsr = serial_lsr_in(up);
if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
(!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
(lsr & UART_LSR_THRE)) {
@@ -1008,9 +1007,11 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
uart->port.throttle = up->port.throttle;
uart->port.unthrottle = up->port.unthrottle;
uart->port.rs485_config = up->port.rs485_config;
+ uart->port.rs485_supported = up->port.rs485_supported;
uart->port.rs485 = up->port.rs485;
uart->rs485_start_tx = up->rs485_start_tx;
uart->rs485_stop_tx = up->rs485_stop_tx;
+ uart->lsr_save_mask = up->lsr_save_mask;
uart->dma = up->dma;
/* Take tx_loadsz from fifosize if it wasn't set separately */
@@ -1098,6 +1099,9 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
ret = 0;
}
+ if (!uart->lsr_save_mask)
+ uart->lsr_save_mask = LSR_SAVE_FLAGS; /* Use default LSR mask */
+
/* Initialise interrupt backoff work if required */
if (up->overrun_backoff_time_ms > 0) {
uart->overrun_backoff_time_ms =
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index bb6aca07ab56..a604b42e4458 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -9,26 +9,27 @@
* LCR is written whilst busy. If it is, then a busy detect interrupt is
* raised, the LCR needs to be rewritten and the uart status register read.
*/
+#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/serial_8250.h>
-#include <linux/serial_reg.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
-#include <linux/workqueue.h>
-#include <linux/notifier.h>
-#include <linux/slab.h>
-#include <linux/acpi.h>
-#include <linux/clk.h>
#include <linux/reset.h>
-#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+
#include "8250_dwlib.h"
/* Offsets for the DesignWare specific registers */
@@ -82,8 +83,21 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
static void dw8250_force_idle(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
+ unsigned int lsr;
serial8250_clear_and_reinit_fifos(up);
+
+ /*
+ * With PSLVERR_RESP_EN parameter set to 1, the device generates an
+ * error response when an attempt to read an empty RBR with FIFO
+ * enabled.
+ */
+ if (up->fcr & UART_FCR_ENABLE_FIFO) {
+ lsr = p->serial_in(p, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ return;
+ }
+
(void)p->serial_in(p, UART_RX);
}
@@ -122,12 +136,15 @@ static void dw8250_check_lcr(struct uart_port *p, int value)
/* Returns once the transmitter is empty or we run out of retries */
static void dw8250_tx_wait_empty(struct uart_port *p)
{
+ struct uart_8250_port *up = up_to_u8250p(p);
unsigned int tries = 20000;
unsigned int delay_threshold = tries - 1000;
unsigned int lsr;
while (tries--) {
lsr = readb (p->membase + (UART_LSR << p->regshift));
+ up->lsr_saved_flags |= lsr & up->lsr_save_mask;
+
if (lsr & UART_LSR_TEMT)
break;
@@ -140,29 +157,23 @@ static void dw8250_tx_wait_empty(struct uart_port *p)
}
}
-static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
+static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = to_dw8250_data(p->private_data);
- /* Allow the TX to drain before we reconfigure */
- if (offset == UART_LCR)
- dw8250_tx_wait_empty(p);
-
writeb(value, p->membase + (offset << p->regshift));
if (offset == UART_LCR && !d->uart_16550_compatible)
dw8250_check_lcr(p, value);
}
-
-static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = to_dw8250_data(p->private_data);
-
- writeb(value, p->membase + (offset << p->regshift));
+ /* Allow the TX to drain before we reconfigure */
+ if (offset == UART_LCR)
+ dw8250_tx_wait_empty(p);
- if (offset == UART_LCR && !d->uart_16550_compatible)
- dw8250_check_lcr(p, value);
+ dw8250_serial_out(p, offset, value);
}
static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
@@ -253,7 +264,7 @@ static int dw8250_handle_irq(struct uart_port *p)
*/
if (!up->dma && rx_timeout) {
spin_lock_irqsave(&p->lock, flags);
- status = p->serial_in(p, UART_LSR);
+ status = serial_lsr_in(up);
if (!(status & (UART_LSR_DR | UART_LSR_BI)))
(void) p->serial_in(p, UART_RX);
@@ -263,7 +274,10 @@ static int dw8250_handle_irq(struct uart_port *p)
/* Manually stop the Rx DMA transfer when acting as flow controller */
if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
- status = p->serial_in(p, UART_LSR);
+ spin_lock_irqsave(&p->lock, flags);
+ status = serial_lsr_in(up);
+ spin_unlock_irqrestore(&p->lock, flags);
+
if (status & (UART_LSR_DR | UART_LSR_BI)) {
dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
dw8250_writel_ext(p, DW_UART_DMASA, 1);
@@ -688,7 +702,6 @@ static int dw8250_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -706,9 +719,7 @@ static int dw8250_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
static int dw8250_runtime_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -730,11 +741,10 @@ static int dw8250_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops dw8250_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dw8250_suspend, dw8250_resume)
- SET_RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dw8250_suspend, dw8250_resume)
+ RUNTIME_PM_OPS(dw8250_runtime_suspend, dw8250_runtime_resume, NULL)
};
static const struct dw8250_platform_data dw8250_dw_apb = {
@@ -792,7 +802,7 @@ MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
static struct platform_driver dw8250_platform_driver = {
.driver = {
.name = "dw-apb-uart",
- .pm = &dw8250_pm_ops,
+ .pm = pm_ptr(&dw8250_pm_ops),
.of_match_table = dw8250_of_match,
.acpi_match_table = dw8250_acpi_match,
},
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index fbabfdd8c7b8..dbe4d44f60d4 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -3,8 +3,10 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/property.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
@@ -16,9 +18,18 @@
#define DW_UART_DE_EN 0xb0 /* Driver Output Enable Register */
#define DW_UART_RE_EN 0xb4 /* Receiver Output Enable Register */
#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */
+#define DW_UART_RAR 0xc4 /* Receive Address Register */
+#define DW_UART_TAR 0xc8 /* Transmit Address Register */
+#define DW_UART_LCR_EXT 0xcc /* Line Extended Control Register */
#define DW_UART_CPR 0xf4 /* Component Parameter Register */
#define DW_UART_UCV 0xf8 /* UART Component Version */
+/* Receive / Transmit Address Register bits */
+#define DW_UART_ADDR_MASK GENMASK(7, 0)
+
+/* Line Status Register bits */
+#define DW_UART_LSR_ADDR_RCVD BIT(8)
+
/* Transceiver Control Register bits */
#define DW_UART_TCR_RS485_EN BIT(0)
#define DW_UART_TCR_RE_POL BIT(1)
@@ -28,22 +39,28 @@
#define DW_UART_TCR_XFER_MODE_SW_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 1)
#define DW_UART_TCR_XFER_MODE_DE_OR_RE FIELD_PREP(DW_UART_TCR_XFER_MODE, 2)
+/* Line Extended Control Register bits */
+#define DW_UART_LCR_EXT_DLS_E BIT(0)
+#define DW_UART_LCR_EXT_ADDR_MATCH BIT(1)
+#define DW_UART_LCR_EXT_SEND_ADDR BIT(2)
+#define DW_UART_LCR_EXT_TRANSMIT_MODE BIT(3)
+
/* Component Parameter Register bits */
-#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
-#define DW_UART_CPR_AFCE_MODE (1 << 4)
-#define DW_UART_CPR_THRE_MODE (1 << 5)
-#define DW_UART_CPR_SIR_MODE (1 << 6)
-#define DW_UART_CPR_SIR_LP_MODE (1 << 7)
-#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8)
-#define DW_UART_CPR_FIFO_ACCESS (1 << 9)
-#define DW_UART_CPR_FIFO_STAT (1 << 10)
-#define DW_UART_CPR_SHADOW (1 << 11)
-#define DW_UART_CPR_ENCODED_PARMS (1 << 12)
-#define DW_UART_CPR_DMA_EXTRA (1 << 13)
-#define DW_UART_CPR_FIFO_MODE (0xff << 16)
+#define DW_UART_CPR_ABP_DATA_WIDTH GENMASK(1, 0)
+#define DW_UART_CPR_AFCE_MODE BIT(4)
+#define DW_UART_CPR_THRE_MODE BIT(5)
+#define DW_UART_CPR_SIR_MODE BIT(6)
+#define DW_UART_CPR_SIR_LP_MODE BIT(7)
+#define DW_UART_CPR_ADDITIONAL_FEATURES BIT(8)
+#define DW_UART_CPR_FIFO_ACCESS BIT(9)
+#define DW_UART_CPR_FIFO_STAT BIT(10)
+#define DW_UART_CPR_SHADOW BIT(11)
+#define DW_UART_CPR_ENCODED_PARMS BIT(12)
+#define DW_UART_CPR_DMA_EXTRA BIT(13)
+#define DW_UART_CPR_FIFO_MODE GENMASK(23, 16)
/* Helper for FIFO size calculation */
-#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
+#define DW_UART_CPR_FIFO_SIZE(a) (FIELD_GET(DW_UART_CPR_FIFO_MODE, (a)) * 16)
/*
* divisor = div(I) + div(F)
@@ -82,10 +99,85 @@ void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct
p->status |= UPSTAT_AUTOCTS;
serial8250_do_set_termios(p, termios, old);
+
+ /* Filter addresses which have 9th bit set */
+ p->ignore_status_mask |= DW_UART_LSR_ADDR_RCVD;
+ p->read_status_mask |= DW_UART_LSR_ADDR_RCVD;
}
EXPORT_SYMBOL_GPL(dw8250_do_set_termios);
-static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
+/*
+ * Wait until re is de-asserted for sure. An ongoing receive will keep
+ * re asserted until end of frame. Without BUSY indication available,
+ * only available course of action is to wait for the time it takes to
+ * receive one frame (there might nothing to receive but w/o BUSY the
+ * driver cannot know).
+ */
+static void dw8250_wait_re_deassert(struct uart_port *p)
+{
+ ndelay(p->frame_time);
+}
+
+static void dw8250_update_rar(struct uart_port *p, u32 addr)
+{
+ u32 re_en = dw8250_readl_ext(p, DW_UART_RE_EN);
+
+ /*
+ * RAR shouldn't be changed while receiving. Thus, de-assert RE_EN
+ * if asserted and wait.
+ */
+ if (re_en)
+ dw8250_writel_ext(p, DW_UART_RE_EN, 0);
+ dw8250_wait_re_deassert(p);
+ dw8250_writel_ext(p, DW_UART_RAR, addr);
+ if (re_en)
+ dw8250_writel_ext(p, DW_UART_RE_EN, re_en);
+}
+
+static void dw8250_rs485_set_addr(struct uart_port *p, struct serial_rs485 *rs485,
+ struct ktermios *termios)
+{
+ u32 lcr = dw8250_readl_ext(p, DW_UART_LCR_EXT);
+
+ if (rs485->flags & SER_RS485_ADDRB) {
+ lcr |= DW_UART_LCR_EXT_DLS_E;
+ if (termios)
+ termios->c_cflag |= ADDRB;
+
+ if (rs485->flags & SER_RS485_ADDR_RECV) {
+ u32 delta = p->rs485.flags ^ rs485->flags;
+
+ /*
+ * rs485 (param) is equal to uart_port's rs485 only during init
+ * (during init, delta is not yet applicable).
+ */
+ if (unlikely(&p->rs485 == rs485))
+ delta = rs485->flags;
+
+ if ((delta & SER_RS485_ADDR_RECV) ||
+ (p->rs485.addr_recv != rs485->addr_recv))
+ dw8250_update_rar(p, rs485->addr_recv);
+ lcr |= DW_UART_LCR_EXT_ADDR_MATCH;
+ } else {
+ lcr &= ~DW_UART_LCR_EXT_ADDR_MATCH;
+ }
+ if (rs485->flags & SER_RS485_ADDR_DEST) {
+ /*
+ * Don't skip writes here as another endpoint could
+ * have changed communication line's destination
+ * address in between.
+ */
+ dw8250_writel_ext(p, DW_UART_TAR, rs485->addr_dest);
+ lcr |= DW_UART_LCR_EXT_SEND_ADDR;
+ }
+ } else {
+ lcr = 0;
+ }
+ dw8250_writel_ext(p, DW_UART_LCR_EXT, lcr);
+}
+
+static int dw8250_rs485_config(struct uart_port *p, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
u32 tcr;
@@ -93,25 +185,17 @@ static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
tcr &= ~DW_UART_TCR_XFER_MODE;
if (rs485->flags & SER_RS485_ENABLED) {
- /* Clear unsupported flags. */
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RX_DURING_TX |
- SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND;
tcr |= DW_UART_TCR_RS485_EN;
- if (rs485->flags & SER_RS485_RX_DURING_TX) {
+ if (rs485->flags & SER_RS485_RX_DURING_TX)
tcr |= DW_UART_TCR_XFER_MODE_DE_DURING_RE;
- } else {
- /* HW does not support same DE level for tx and rx */
- if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
- return -EINVAL;
-
+ else
tcr |= DW_UART_TCR_XFER_MODE_DE_OR_RE;
- }
dw8250_writel_ext(p, DW_UART_DE_EN, 1);
dw8250_writel_ext(p, DW_UART_RE_EN, 1);
} else {
- rs485->flags = 0;
+ if (termios)
+ termios->c_cflag &= ~ADDRB;
tcr &= ~DW_UART_TCR_RS485_EN;
}
@@ -127,10 +211,9 @@ static int dw8250_rs485_config(struct uart_port *p, struct serial_rs485 *rs485)
dw8250_writel_ext(p, DW_UART_TCR, tcr);
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
-
- p->rs485 = *rs485;
+ /* Addressing mode can only be set up after TCR */
+ if (rs485->flags & SER_RS485_ENABLED)
+ dw8250_rs485_set_addr(p, rs485, termios);
return 0;
}
@@ -149,6 +232,12 @@ static bool dw8250_detect_rs485_hw(struct uart_port *p)
return reg;
}
+static const struct serial_rs485 dw8250_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND | SER_RS485_ADDRB | SER_RS485_ADDR_RECV |
+ SER_RS485_ADDR_DEST,
+};
+
void dw8250_setup_port(struct uart_port *p)
{
struct dw8250_port_data *pd = p->private_data;
@@ -159,8 +248,11 @@ void dw8250_setup_port(struct uart_port *p)
pd->hw_rs485_support = dw8250_detect_rs485_hw(p);
if (pd->hw_rs485_support) {
p->rs485_config = dw8250_rs485_config;
+ up->lsr_save_mask = LSR_SAVE_FLAGS | DW_UART_LSR_ADDR_RCVD;
+ p->rs485_supported = dw8250_rs485_supported;
} else {
p->rs485_config = serial8250_em485_config;
+ p->rs485_supported = serial8250_em485_supported;
up->rs485_start_tx = serial8250_em485_start_tx;
up->rs485_stop_tx = serial8250_em485_stop_tx;
}
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index e52585064565..f271becfc46c 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -84,8 +84,6 @@ static void serial8250_early_out(struct uart_port *port, int offset, int value)
}
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
static void serial_putc(struct uart_port *port, unsigned char c)
{
unsigned int status;
@@ -94,7 +92,7 @@ static void serial_putc(struct uart_port *port, unsigned char c)
for (;;) {
status = serial8250_early_in(port, UART_LSR);
- if ((status & BOTH_EMPTY) == BOTH_EMPTY)
+ if (uart_lsr_tx_empty(status))
break;
cpu_relax();
}
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 7292917ac878..314a05e009df 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -112,7 +112,9 @@
struct exar8250;
struct exar8250_platform {
- int (*rs485_config)(struct uart_port *, struct serial_rs485 *);
+ int (*rs485_config)(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485);
+ const struct serial_rs485 *rs485_supported;
int (*register_gpio)(struct pci_dev *, struct uart_8250_port *);
void (*unregister_gpio)(struct uart_8250_port *);
};
@@ -194,11 +196,11 @@ static int xr17v35x_startup(struct uart_port *port)
static void exar_shutdown(struct uart_port *port)
{
- unsigned char lsr;
bool tx_complete = false;
struct uart_8250_port *up = up_to_u8250p(port);
struct circ_buf *xmit = &port->state->xmit;
int i = 0;
+ u16 lsr;
do {
lsr = serial_in(up, UART_LSR);
@@ -408,7 +410,7 @@ static void xr17v35x_unregister_gpio(struct uart_8250_port *port)
port->port.private_data = NULL;
}
-static int generic_rs485_config(struct uart_port *port,
+static int generic_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED);
@@ -426,18 +428,21 @@ static int generic_rs485_config(struct uart_port *port,
if (is_rs485)
writeb(UART_EXAR_RS485_DLY(4), p + UART_MSR);
- port->rs485 = *rs485;
-
return 0;
}
+static const struct serial_rs485 generic_rs485_supported = {
+ .flags = SER_RS485_ENABLED,
+};
+
static const struct exar8250_platform exar8250_default_platform = {
.register_gpio = xr17v35x_register_gpio,
.unregister_gpio = xr17v35x_unregister_gpio,
.rs485_config = generic_rs485_config,
+ .rs485_supported = &generic_rs485_supported,
};
-static int iot2040_rs485_config(struct uart_port *port,
+static int iot2040_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED);
@@ -467,9 +472,13 @@ static int iot2040_rs485_config(struct uart_port *port,
value |= mode;
writeb(value, p + UART_EXAR_MPIOLVL_7_0);
- return generic_rs485_config(port, rs485);
+ return generic_rs485_config(port, termios, rs485);
}
+static const struct serial_rs485 iot2040_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
+};
+
static const struct property_entry iot2040_gpio_properties[] = {
PROPERTY_ENTRY_U32("exar,first-pin", 10),
PROPERTY_ENTRY_U32("ngpios", 1),
@@ -498,6 +507,7 @@ static int iot2040_register_gpio(struct pci_dev *pcidev,
static const struct exar8250_platform iot2040_platform = {
.rs485_config = iot2040_rs485_config,
+ .rs485_supported = &iot2040_rs485_supported,
.register_gpio = iot2040_register_gpio,
.unregister_gpio = xr17v35x_unregister_gpio,
};
@@ -540,6 +550,7 @@ pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
port->port.uartclk = baud * 16;
port->port.rs485_config = platform->rs485_config;
+ port->port.rs485_supported = *(platform->rs485_supported);
/*
* Setup the UART clock for the devices on expansion slot to
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index dba5950b8d0e..65b6b3cbaff6 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -191,7 +191,7 @@ static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min,
return -ENODEV;
}
-static int fintek_8250_rs485_config(struct uart_port *port,
+static int fintek_8250_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
uint8_t config = 0;
@@ -206,19 +206,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
!(rs485->flags & SER_RS485_RTS_AFTER_SEND))
return -EINVAL;
- memset(rs485->padding, 0, sizeof(rs485->padding));
config |= RS485_URA;
- } else {
- memset(rs485, 0, sizeof(*rs485));
- }
-
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND;
-
- /* Only the first port supports delays */
- if (pdata->index) {
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
}
if (rs485->delay_rts_before_send) {
@@ -241,8 +229,6 @@ static int fintek_8250_rs485_config(struct uart_port *port,
sio_write_reg(pdata, RS485, config);
fintek_8250_exit_key(pdata->base_port);
- port->rs485 = *rs485;
-
return 0;
}
@@ -424,6 +410,17 @@ static int probe_setup_port(struct fintek_8250 *pdata,
return -ENODEV;
}
+/* Only the first port supports delays */
+static const struct serial_rs485 fintek_8250_rs485_supported_port0 = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
+static const struct serial_rs485 fintek_8250_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+};
+
static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
{
struct fintek_8250 *pdata = uart->port.private_data;
@@ -435,6 +432,10 @@ static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
case CHIP_ID_F81866:
case CHIP_ID_F81865:
uart->port.rs485_config = fintek_8250_rs485_config;
+ if (!pdata->index)
+ uart->port.rs485_supported = fintek_8250_rs485_supported_port0;
+ else
+ uart->port.rs485_supported = fintek_8250_rs485_supported;
break;
default: /* No RS485 Auto direction functional */
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 9c01c531349d..8aad15622a2e 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -25,8 +25,8 @@
int fsl8250_handle_irq(struct uart_port *port)
{
- unsigned char lsr, orig_lsr;
unsigned long flags;
+ u16 lsr, orig_lsr;
unsigned int iir;
struct uart_8250_port *up = up_to_u8250p(port);
@@ -77,7 +77,7 @@ int fsl8250_handle_irq(struct uart_port *port)
if ((lsr & UART_LSR_THRE) && (up->ier & UART_IER_THRI))
serial8250_tx_chars(up);
- up->lsr_saved_flags = orig_lsr;
+ up->lsr_saved_flags |= orig_lsr & UART_LSR_BI;
uart_unlock_and_check_sysrq_irqrestore(&up->port, flags);
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index cff91aa03f29..2b2f5d8d24b9 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -54,7 +54,7 @@ static void early_out(struct uart_port *port, int offset, uint8_t value)
static void ingenic_early_console_putc(struct uart_port *port, unsigned char c)
{
- uint8_t lsr;
+ u16 lsr;
do {
lsr = early_in(port, UART_LSR);
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 570e25d6f37e..6dc85aaba5d0 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -32,7 +32,7 @@ struct lpc18xx_uart_data {
int line;
};
-static int lpc18xx_rs485_config(struct uart_port *port,
+static int lpc18xx_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -40,24 +40,12 @@ static int lpc18xx_rs485_config(struct uart_port *port,
u32 rs485_dly_reg = 0;
unsigned baud_clk;
- if (rs485->flags & SER_RS485_ENABLED)
- memset(rs485->padding, 0, sizeof(rs485->padding));
- else
- memset(rs485, 0, sizeof(*rs485));
-
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND;
-
if (rs485->flags & SER_RS485_ENABLED) {
rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_NMMEN |
LPC18XX_UART_RS485CTRL_DCTRL;
- if (rs485->flags & SER_RS485_RTS_ON_SEND) {
+ if (rs485->flags & SER_RS485_RTS_ON_SEND)
rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_OINV;
- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
- } else {
- rs485->flags |= SER_RS485_RTS_AFTER_SEND;
- }
}
if (rs485->delay_rts_after_send) {
@@ -73,14 +61,9 @@ static int lpc18xx_rs485_config(struct uart_port *port,
/ baud_clk;
}
- /* Delay RTS before send not supported */
- rs485->delay_rts_before_send = 0;
-
serial_out(up, LPC18XX_UART_RS485CTRL, rs485_ctrl_reg);
serial_out(up, LPC18XX_UART_RS485DLY, rs485_dly_reg);
- port->rs485 = *rs485;
-
return 0;
}
@@ -98,6 +81,12 @@ static void lpc18xx_uart_serial_out(struct uart_port *p, int offset, int value)
writel(value, p->membase + offset);
}
+static const struct serial_rs485 lpc18xx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_after_send = 1,
+ /* Delay RTS before send is not supported */
+};
+
static int lpc18xx_serial_probe(struct platform_device *pdev)
{
struct lpc18xx_uart_data *data;
@@ -168,6 +157,7 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
uart.port.uartclk = clk_get_rate(data->clk_uart);
uart.port.private_data = data;
uart.port.rs485_config = lpc18xx_rs485_config;
+ uart.port.rs485_supported = lpc18xx_rs485_supported;
uart.port.serial_out = lpc18xx_uart_serial_out;
uart.dma = &data->dma;
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 0f5af061e0b4..4ba43bef9933 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -330,7 +330,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
uart.port.irq = pci_irq_vector(pdev, 0);
uart.port.private_data = &lpss->data;
uart.port.type = PORT_16550A;
- uart.port.iotype = UPIO_MEM;
+ uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
uart.port.uartclk = lpss->board->base_baud * 16;
uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 5a699a1aa79c..1b461fba15a3 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -165,6 +165,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->dev = &ofdev->dev;
port->rs485_config = serial8250_em485_config;
+ port->rs485_supported = serial8250_em485_supported;
up->rs485_start_tx = serial8250_em485_start_tx;
up->rs485_stop_tx = serial8250_em485_stop_tx;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index ac8bfa042391..0dcecbbc3967 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1115,8 +1115,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
return omap_8250_rx_dma(up);
}
-static unsigned char omap_8250_handle_rx_dma(struct uart_8250_port *up,
- u8 iir, unsigned char status)
+static u16 omap_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir, u16 status)
{
if ((status & (UART_LSR_DR | UART_LSR_BI)) &&
(iir & UART_IIR_RDI)) {
@@ -1130,7 +1129,7 @@ static unsigned char omap_8250_handle_rx_dma(struct uart_8250_port *up,
}
static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir,
- unsigned char status)
+ u16 status)
{
/*
* Queue a new transfer if FIFO has data.
@@ -1164,7 +1163,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = up->port.private_data;
- unsigned char status;
+ u16 status;
u8 iir;
serial8250_rpm_get(up);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index a17619db7939..6f66dc2ebacc 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1553,7 +1553,7 @@ pci_brcm_trumanage_setup(struct serial_private *priv,
#define FINTEK_RTS_INVERT BIT(5)
/* We should do proper H/W transceiver setting before change to RS485 mode */
-static int pci_fintek_rs485_config(struct uart_port *port,
+static int pci_fintek_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct pci_dev *pci_dev = to_pci_dev(port->dev);
@@ -1562,16 +1562,6 @@ static int pci_fintek_rs485_config(struct uart_port *port,
pci_read_config_byte(pci_dev, 0x40 + 8 * *index + 7, &setting);
- if (!rs485)
- rs485 = &port->rs485;
- else if (rs485->flags & SER_RS485_ENABLED)
- memset(rs485->padding, 0, sizeof(rs485->padding));
- else
- memset(rs485, 0, sizeof(*rs485));
-
- /* F81504/508/512 not support RTS delay before or after send */
- rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable RTS H/W control mode */
setting |= FINTEK_RTS_CONTROL_BY_HW;
@@ -1583,9 +1573,6 @@ static int pci_fintek_rs485_config(struct uart_port *port,
/* RTS driving low on TX */
setting |= FINTEK_RTS_INVERT;
}
-
- rs485->delay_rts_after_send = 0;
- rs485->delay_rts_before_send = 0;
} else {
/* Disable RTS H/W control mode */
setting &= ~(FINTEK_RTS_CONTROL_BY_HW | FINTEK_RTS_INVERT);
@@ -1593,12 +1580,14 @@ static int pci_fintek_rs485_config(struct uart_port *port,
pci_write_config_byte(pci_dev, 0x40 + 8 * *index + 7, setting);
- if (rs485 != &port->rs485)
- port->rs485 = *rs485;
-
return 0;
}
+static const struct serial_rs485 pci_fintek_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND,
+ /* F81504/508/512 does not support RTS delay before or after send */
+};
+
static int pci_fintek_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1618,6 +1607,7 @@ static int pci_fintek_setup(struct serial_private *priv,
port->port.iotype = UPIO_PORT;
port->port.iobase = iobase;
port->port.rs485_config = pci_fintek_rs485_config;
+ port->port.rs485_supported = pci_fintek_rs485_supported;
data = devm_kzalloc(&pdev->dev, sizeof(u8), GFP_KERNEL);
if (!data)
@@ -1689,7 +1679,7 @@ static int pci_fintek_init(struct pci_dev *dev)
* pciserial_resume_ports()
*/
port = serial8250_get_port(priv->line[i]);
- pci_fintek_rs485_config(&port->port, NULL);
+ uart_rs485_config(&port->port);
} else {
/* First init without port data
* force init to RS232 Mode
@@ -5077,6 +5067,115 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_b2_4_115200 },
/*
+ * Brainboxes PX-101
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4005,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4019,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-235/246
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4004,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4016,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-203/PX-257
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4006,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4015,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-260/PX-701
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400A,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-310
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400E,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-313
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400C,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-320/324/PX-376/PX-387
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400B,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-335/346
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x400F,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-368
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4010,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-420
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4000,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4011,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-803
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4009,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x401E,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes PX-846
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4008,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b0_1_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x4017,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+
+ /*
* Perle PCI-RAS cards
*/
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c
index 95ff10f25d58..b8d5b7714a9d 100644
--- a/drivers/tty/serial/8250/8250_pericom.c
+++ b/drivers/tty/serial/8250/8250_pericom.c
@@ -73,7 +73,7 @@ static void pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
struct uart_8250_port *up = up_to_u8250p(port);
int lcr = serial_port_in(port, UART_LCR);
- serial_port_out(port, UART_LCR, lcr | 0x80);
+ serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB);
serial_dl_write(up, divisor);
serial_port_out(port, 2, 16 - scr);
serial_port_out(port, UART_LCR, lcr);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 3c36a06a20b0..39b35a61958c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -50,8 +50,6 @@
#define DEBUG_AUTOCONF(fmt...) do { } while (0)
#endif
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Here we define the default xmit fifo size used for each type of UART.
*/
@@ -336,27 +334,29 @@ static void default_serial_dl_write(struct uart_8250_port *up, int value)
#ifdef CONFIG_SERIAL_8250_RT288X
+#define UART_REG_UNMAPPED -1
+
/* Au1x00/RT288x UART hardware has a weird register layout */
static const s8 au_io_in_map[8] = {
- 0, /* UART_RX */
- 2, /* UART_IER */
- 3, /* UART_IIR */
- 5, /* UART_LCR */
- 6, /* UART_MCR */
- 7, /* UART_LSR */
- 8, /* UART_MSR */
- -1, /* UART_SCR (unmapped) */
+ [UART_RX] = 0,
+ [UART_IER] = 2,
+ [UART_IIR] = 3,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+ [UART_LSR] = 7,
+ [UART_MSR] = 8,
+ [UART_SCR] = UART_REG_UNMAPPED,
};
static const s8 au_io_out_map[8] = {
- 1, /* UART_TX */
- 2, /* UART_IER */
- 4, /* UART_FCR */
- 5, /* UART_LCR */
- 6, /* UART_MCR */
- -1, /* UART_LSR (unmapped) */
- -1, /* UART_MSR (unmapped) */
- -1, /* UART_SCR (unmapped) */
+ [UART_TX] = 1,
+ [UART_IER] = 2,
+ [UART_FCR] = 4,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+ [UART_LSR] = UART_REG_UNMAPPED,
+ [UART_MSR] = UART_REG_UNMAPPED,
+ [UART_SCR] = UART_REG_UNMAPPED,
};
unsigned int au_serial_in(struct uart_port *p, int offset)
@@ -364,7 +364,7 @@ unsigned int au_serial_in(struct uart_port *p, int offset)
if (offset >= ARRAY_SIZE(au_io_in_map))
return UINT_MAX;
offset = au_io_in_map[offset];
- if (offset < 0)
+ if (offset == UART_REG_UNMAPPED)
return UINT_MAX;
return __raw_readl(p->membase + (offset << p->regshift));
}
@@ -374,7 +374,7 @@ void au_serial_out(struct uart_port *p, int offset, int value)
if (offset >= ARRAY_SIZE(au_io_out_map))
return;
offset = au_io_out_map[offset];
- if (offset < 0)
+ if (offset == UART_REG_UNMAPPED)
return;
__raw_writel(value, p->membase + (offset << p->regshift));
}
@@ -647,6 +647,14 @@ void serial8250_em485_destroy(struct uart_8250_port *p)
}
EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
+struct serial_rs485 serial8250_em485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_TERMINATE_BUS | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+EXPORT_SYMBOL_GPL(serial8250_em485_supported);
+
/**
* serial8250_em485_config() - generic ->rs485_config() callback
* @port: uart port
@@ -656,7 +664,8 @@ EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
* if the uart is incapable of driving RTS as a Transmit Enable signal in
* hardware, relying on software emulation instead.
*/
-int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
+int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -667,29 +676,12 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
}
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
- memset(rs485->padding, 0, sizeof(rs485->padding));
- port->rs485 = *rs485;
-
- gpiod_set_value(port->rs485_term_gpio,
- rs485->flags & SER_RS485_TERMINATE_BUS);
-
/*
* Both serial8250_em485_init() and serial8250_em485_destroy()
* are idempotent.
*/
- if (rs485->flags & SER_RS485_ENABLED) {
- int ret = serial8250_em485_init(up);
-
- if (ret) {
- rs485->flags &= ~SER_RS485_ENABLED;
- port->rs485.flags &= ~SER_RS485_ENABLED;
- }
- return ret;
- }
+ if (rs485->flags & SER_RS485_ENABLED)
+ return serial8250_em485_init(up);
serial8250_em485_destroy(up);
return 0;
@@ -849,7 +841,7 @@ static int size_fifo(struct uart_8250_port *up)
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
old_dl = serial_dl_read(up);
serial_dl_write(up, 0x0001);
- serial_out(up, UART_LCR, 0x03);
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
for (count = 0; count < 256; count++)
serial_out(up, UART_TX, count);
mdelay(20);/* FIXME - schedule_timeout */
@@ -1503,18 +1495,12 @@ static void __stop_tx_rs485(struct uart_8250_port *p, u64 stop_delay)
}
}
-static inline void __do_stop_tx(struct uart_8250_port *p)
-{
- if (serial8250_clear_THRI(p))
- serial8250_rpm_put_tx(p);
-}
-
static inline void __stop_tx(struct uart_8250_port *p)
{
struct uart_8250_em485 *em485 = p->em485;
if (em485) {
- unsigned char lsr = serial_in(p, UART_LSR);
+ u16 lsr = serial_lsr_in(p);
u64 stop_delay = 0;
p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
@@ -1522,7 +1508,7 @@ static inline void __stop_tx(struct uart_8250_port *p)
if (!(lsr & UART_LSR_THRE))
return;
/*
- * To provide required timeing and allow FIFO transfer,
+ * To provide required timing and allow FIFO transfer,
* __stop_tx_rs485() must be called only when both FIFO and
* shift register are empty. The device driver should either
* enable interrupt on TEMT or set UART_CAP_NOTEMT that will
@@ -1544,7 +1530,9 @@ static inline void __stop_tx(struct uart_8250_port *p)
__stop_tx_rs485(p, stop_delay);
}
- __do_stop_tx(p);
+
+ if (serial8250_clear_THRI(p))
+ serial8250_rpm_put_tx(p);
}
static void serial8250_stop_tx(struct uart_port *port)
@@ -1573,10 +1561,8 @@ static inline void __start_tx(struct uart_port *port)
if (serial8250_set_THRI(up)) {
if (up->bugs & UART_BUG_TXEN) {
- unsigned char lsr;
+ u16 lsr = serial_lsr_in(up);
- lsr = serial_in(up, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
if (lsr & UART_LSR_THRE)
serial8250_tx_chars(up);
}
@@ -1616,7 +1602,8 @@ void serial8250_em485_start_tx(struct uart_8250_port *up)
}
EXPORT_SYMBOL_GPL(serial8250_em485_start_tx);
-static inline void start_tx_rs485(struct uart_port *port)
+/* Returns false, if start_tx_timer was setup to defer TX start */
+static bool start_tx_rs485(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct uart_8250_em485 *em485 = up->em485;
@@ -1644,11 +1631,11 @@ static inline void start_tx_rs485(struct uart_port *port)
em485->active_timer = &em485->start_tx_timer;
start_hrtimer_ms(&em485->start_tx_timer,
up->port.rs485.delay_rts_before_send);
- return;
+ return false;
}
}
- __start_tx(port);
+ return true;
}
static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t)
@@ -1678,14 +1665,12 @@ static void serial8250_start_tx(struct uart_port *port)
serial8250_rpm_get_tx(up);
- if (em485 &&
- em485->active_timer == &em485->start_tx_timer)
- return;
-
- if (em485)
- start_tx_rs485(port);
- else
- __start_tx(port);
+ if (em485) {
+ if ((em485->active_timer == &em485->start_tx_timer) ||
+ !start_tx_rs485(port))
+ return;
+ }
+ __start_tx(port);
}
static void serial8250_throttle(struct uart_port *port)
@@ -1729,7 +1714,7 @@ static void serial8250_enable_ms(struct uart_port *port)
serial8250_rpm_put(up);
}
-void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
+void serial8250_read_char(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
unsigned char ch;
@@ -1792,11 +1777,13 @@ void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
EXPORT_SYMBOL_GPL(serial8250_read_char);
/*
- * serial8250_rx_chars: processes according to the passed in LSR
- * value, and returns the remaining LSR bits not handled
- * by this Rx routine.
+ * serial8250_rx_chars - Read characters. The first LSR value must be passed in.
+ *
+ * Returns LSR bits. The caller should rely only on non-Rx related LSR bits
+ * (such as THRE) because the LSR value might come from an already consumed
+ * character.
*/
-unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
+u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
int max_count = 256;
@@ -1852,7 +1839,7 @@ void serial8250_tx_chars(struct uart_8250_port *up)
if (uart_circ_empty(xmit))
break;
if ((up->capabilities & UART_CAP_HFIFO) &&
- (serial_in(up, UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
+ !uart_lsr_tx_empty(serial_in(up, UART_LSR)))
break;
/* The BCM2835 MINI UART THRE bit is really a not-full bit. */
if ((up->capabilities & UART_CAP_MINI) &&
@@ -1916,17 +1903,17 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
*/
int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
{
- unsigned char status;
struct uart_8250_port *up = up_to_u8250p(port);
bool skip_rx = false;
unsigned long flags;
+ u16 status;
if (iir & UART_IIR_NO_INT)
return 0;
spin_lock_irqsave(&port->lock, flags);
- status = serial_port_in(port, UART_LSR);
+ status = serial_lsr_in(up);
/*
* If port is stopped and there are no error conditions in the
@@ -2002,18 +1989,17 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- unsigned int lsr;
+ u16 lsr;
serial8250_rpm_get(up);
spin_lock_irqsave(&port->lock, flags);
- lsr = serial_port_in(port, UART_LSR);
- up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ lsr = serial_lsr_in(up);
spin_unlock_irqrestore(&port->lock, flags);
serial8250_rpm_put(up);
- return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+ return uart_lsr_tx_empty(lsr) ? TIOCSER_TEMT : 0;
}
unsigned int serial8250_do_get_mctrl(struct uart_port *port)
@@ -2084,9 +2070,7 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits)
/* Wait up to 10ms for the character(s) to be sent. */
for (;;) {
- status = serial_in(up, UART_LSR);
-
- up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+ status = serial_lsr_in(up);
if ((status & bits) == bits)
break;
@@ -2128,8 +2112,8 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
static int serial8250_get_poll_char(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
- unsigned char lsr;
int status;
+ u16 lsr;
serial8250_rpm_get(up);
@@ -2163,7 +2147,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
else
serial_port_out(port, UART_IER, 0);
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
/*
* Send the character out.
*/
@@ -2173,7 +2157,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
serial_port_out(port, UART_IER, ier);
serial8250_rpm_put(up);
}
@@ -2184,8 +2168,9 @@ int serial8250_do_startup(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- unsigned char lsr, iir;
+ unsigned char iir;
int retval;
+ u16 lsr;
if (!port->fifosize)
port->fifosize = uart_config[port->type].fifo_size;
@@ -2810,7 +2795,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
port->read_status_mask |= UART_LSR_BI;
/*
- * Characteres to ignore
+ * Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
@@ -3203,7 +3188,7 @@ static void serial8250_config_port(struct uart_port *port, int flags)
autoconfig(up);
if (port->rs485.flags & SER_RS485_ENABLED)
- port->rs485_config(port, &port->rs485);
+ uart_rs485_config(port);
/* if access method is AU, it is a 16550 with a quirk */
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
@@ -3445,7 +3430,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(up, BOTH_EMPTY);
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
if (em485) {
mdelay(port->rs485.delay_rts_after_send);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index fdb6c4188695..d0b49e15fbf5 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -254,6 +254,7 @@ config SERIAL_8250_ASPEED_VUART
depends on SERIAL_8250
depends on OF
depends on REGMAP && MFD_SYSCON
+ depends on ARCH_ASPEED || COMPILE_TEST
help
If you want to use the virtual UART (VUART) device on Aspeed
BMC platforms, enable this option. This enables the 16550A-
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 7172cd1792df..877173907c53 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -324,6 +324,7 @@ config SERIAL_MAX310X
depends on SPI_MASTER
select SERIAL_CORE
select REGMAP_SPI if SPI_MASTER
+ select REGMAP_I2C if I2C
help
This selects support for an advanced UART from Maxim (Dallas).
Supported ICs are MAX3107, MAX3108, MAX3109, MAX14830.
@@ -889,23 +890,6 @@ config SERIAL_TXX9_STDSERIAL
bool "TX39XX/49XX SIO act as standard serial"
depends on !SERIAL_8250 && SERIAL_TXX9
-config SERIAL_VR41XX
- tristate "NEC VR4100 series Serial Interface Unit support"
- depends on CPU_VR41XX
- select SERIAL_CORE
- help
- If you have a NEC VR4100 series processor and you want to use
- Serial Interface Unit(SIU) or Debug Serial Interface Unit(DSIU)
- (not include VR4111/VR4121 DSIU), say Y. Otherwise, say N.
-
-config SERIAL_VR41XX_CONSOLE
- bool "Enable NEC VR4100 series Serial Interface Unit console"
- depends on SERIAL_VR41XX=y
- select SERIAL_CORE_CONSOLE
- help
- If you have a NEC VR4100 series processor and you want to use
- a console on a serial port, say Y. Otherwise, say N.
-
config SERIAL_JSM
tristate "Digi International NEO and Classic PCI Support"
depends on PCI
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 61cc8de95571..238a9557b487 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
-obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 16a21422ddce..15f0e4d88c5a 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2214,7 +2214,7 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
return ret;
}
-static int pl011_rs485_config(struct uart_port *port,
+static int pl011_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct uart_amba_port *uap =
@@ -2700,17 +2700,12 @@ static int pl011_find_free_port(void)
static int pl011_get_rs485_mode(struct uart_amba_port *uap)
{
struct uart_port *port = &uap->port;
- struct serial_rs485 *rs485 = &port->rs485;
int ret;
ret = uart_get_rs485_mode(port);
if (ret)
return ret;
- /* clamp the delays to [0, 100ms] */
- rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
- rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
-
return 0;
}
@@ -2770,6 +2765,13 @@ static int pl011_register_port(struct uart_amba_port *uap)
return ret;
}
+static const struct serial_rs485 pl011_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
@@ -2796,6 +2798,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.irq = dev->irq[0];
uap->port.ops = &amba_pl011_pops;
uap->port.rs485_config = pl011_rs485_config;
+ uap->port.rs485_supported = pl011_rs485_supported;
snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 6269dbf93546..32caeac12985 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -580,18 +580,9 @@ static const struct uart_ops ar933x_uart_ops = {
.verify_port = ar933x_uart_verify_port,
};
-static int ar933x_config_rs485(struct uart_port *port,
+static int ar933x_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
- struct ar933x_uart_port *up =
- container_of(port, struct ar933x_uart_port, port);
-
- if ((rs485conf->flags & SER_RS485_ENABLED) &&
- !up->rts_gpiod) {
- dev_err(port->dev, "RS485 needs rts-gpio\n");
- return 1;
- }
- port->rs485 = *rs485conf;
return 0;
}
@@ -702,6 +693,11 @@ static struct uart_driver ar933x_uart_driver = {
.cons = NULL, /* filled in runtime */
};
+static const struct serial_rs485 ar933x_no_rs485 = {};
+static const struct serial_rs485 ar933x_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+};
+
static int ar933x_uart_probe(struct platform_device *pdev)
{
struct ar933x_uart_port *up;
@@ -773,6 +769,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
port->fifosize = AR933X_UART_FIFO_SIZE;
port->ops = &ar933x_uart_ops;
port->rs485_config = ar933x_config_rs485;
+ port->rs485_supported = ar933x_rs485_supported;
baud = ar933x_uart_get_baud(port->uartclk, AR933X_UART_MAX_SCALE, 1);
up->min_baud = max_t(unsigned int, baud, AR933X_UART_MIN_BAUD);
@@ -792,10 +789,12 @@ static int ar933x_uart_probe(struct platform_device *pdev)
up->rts_gpiod = mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS);
- if ((port->rs485.flags & SER_RS485_ENABLED) &&
- !up->rts_gpiod) {
- dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
- port->rs485.flags &= ~SER_RS485_ENABLED;
+ if (!up->rts_gpiod) {
+ port->rs485_supported = ar933x_no_rs485;
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
+ port->rs485.flags &= ~SER_RS485_ENABLED;
+ }
}
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index dd1c7e4bd1c9..30ba9eef7b39 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,7 +166,6 @@ struct atmel_uart_port {
unsigned int fidi_min;
unsigned int fidi_max;
-#ifdef CONFIG_PM
struct {
u32 cr;
u32 mr;
@@ -177,7 +176,6 @@ struct atmel_uart_port {
u32 fmr;
u32 fimr;
} cache;
-#endif
int (*prepare_rx)(struct uart_port *port);
int (*prepare_tx)(struct uart_port *port);
@@ -285,7 +283,7 @@ static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
}
/* Enable or disable the rs485 support */
-static int atmel_config_rs485(struct uart_port *port,
+static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -2473,6 +2471,12 @@ static const struct uart_ops atmel_pops = {
#endif
};
+static const struct serial_rs485 atmel_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
/*
* Configure the port from the platform device resource info.
*/
@@ -2494,6 +2498,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
port->mapbase = mpdev->resource[0].start;
port->irq = platform_get_irq(mpdev, 0);
port->rs485_config = atmel_config_rs485;
+ port->rs485_supported = atmel_rs485_supported;
port->iso7816_config = atmel_config_iso7816;
port->membase = NULL;
@@ -2503,24 +2508,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
if (ret)
return ret;
- /* for console, the clock could already be configured */
- if (!atmel_port->clk) {
- atmel_port->clk = clk_get(&mpdev->dev, "usart");
- if (IS_ERR(atmel_port->clk)) {
- ret = PTR_ERR(atmel_port->clk);
- atmel_port->clk = NULL;
- return ret;
- }
- ret = clk_prepare_enable(atmel_port->clk);
- if (ret) {
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
- return ret;
- }
- port->uartclk = clk_get_rate(atmel_port->clk);
- clk_disable_unprepare(atmel_port->clk);
- /* only enable clock when USART is in use */
- }
+ port->uartclk = clk_get_rate(atmel_port->clk);
/*
* Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or
@@ -2629,7 +2617,6 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
static int __init atmel_console_setup(struct console *co, char *options)
{
- int ret;
struct uart_port *port = &atmel_ports[co->index].uart;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int baud = 115200;
@@ -2642,10 +2629,6 @@ static int __init atmel_console_setup(struct console *co, char *options)
return -ENODEV;
}
- ret = clk_prepare_enable(atmel_ports[co->index].clk);
- if (ret)
- return ret;
-
atmel_uart_writel(port, ATMEL_US_IDR, -1);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
@@ -2711,7 +2694,6 @@ static struct uart_driver atmel_uart = {
.cons = ATMEL_CONSOLE_DEVICE,
};
-#ifdef CONFIG_PM
static bool atmel_serial_clk_will_stop(void)
{
#ifdef CONFIG_ARCH_AT91
@@ -2721,10 +2703,9 @@ static bool atmel_serial_clk_will_stop(void)
#endif
}
-static int atmel_serial_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused atmel_serial_suspend(struct device *dev)
{
- struct uart_port *port = platform_get_drvdata(pdev);
+ struct uart_port *port = dev_get_drvdata(dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (uart_console(port) && console_suspend_enabled) {
@@ -2749,14 +2730,14 @@ static int atmel_serial_suspend(struct platform_device *pdev,
}
/* we can not wake up if we're running on slow clock */
- atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
+ atmel_port->may_wakeup = device_may_wakeup(dev);
if (atmel_serial_clk_will_stop()) {
unsigned long flags;
spin_lock_irqsave(&atmel_port->lock_suspended, flags);
atmel_port->suspended = true;
spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
- device_set_wakeup_enable(&pdev->dev, 0);
+ device_set_wakeup_enable(dev, 0);
}
uart_suspend_port(&atmel_uart, port);
@@ -2764,9 +2745,9 @@ static int atmel_serial_suspend(struct platform_device *pdev,
return 0;
}
-static int atmel_serial_resume(struct platform_device *pdev)
+static int __maybe_unused atmel_serial_resume(struct device *dev)
{
- struct uart_port *port = platform_get_drvdata(pdev);
+ struct uart_port *port = dev_get_drvdata(dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned long flags;
@@ -2801,14 +2782,10 @@ static int atmel_serial_resume(struct platform_device *pdev)
spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
uart_resume_port(&atmel_uart, port);
- device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
+ device_set_wakeup_enable(dev, atmel_port->may_wakeup);
return 0;
}
-#else
-#define atmel_serial_suspend NULL
-#define atmel_serial_resume NULL
-#endif
static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
@@ -2897,14 +2874,23 @@ static int atmel_serial_probe(struct platform_device *pdev)
atomic_set(&atmel_port->tasklet_shutdown, 0);
spin_lock_init(&atmel_port->lock_suspended);
+ atmel_port->clk = devm_clk_get(&pdev->dev, "usart");
+ if (IS_ERR(atmel_port->clk)) {
+ ret = PTR_ERR(atmel_port->clk);
+ goto err;
+ }
+ ret = clk_prepare_enable(atmel_port->clk);
+ if (ret)
+ goto err;
+
ret = atmel_init_port(atmel_port, pdev);
if (ret)
- goto err_clear_bit;
+ goto err_clk_disable_unprepare;
atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
if (IS_ERR(atmel_port->gpios)) {
ret = PTR_ERR(atmel_port->gpios);
- goto err_clear_bit;
+ goto err_clk_disable_unprepare;
}
if (!atmel_use_pdc_rx(&atmel_port->uart)) {
@@ -2913,7 +2899,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
sizeof(struct atmel_uart_char),
GFP_KERNEL);
if (!data)
- goto err_alloc_ring;
+ goto err_clk_disable_unprepare;
atmel_port->rx_ring.buf = data;
}
@@ -2923,26 +2909,9 @@ static int atmel_serial_probe(struct platform_device *pdev)
if (ret)
goto err_add_port;
-#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
- if (uart_console(&atmel_port->uart)
- && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
- /*
- * The serial core enabled the clock for us, so undo
- * the clk_prepare_enable() in atmel_console_setup()
- */
- clk_disable_unprepare(atmel_port->clk);
- }
-#endif
-
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, atmel_port);
- /*
- * The peripheral clock has been disabled by atmel_init_port():
- * enable it before accessing I/O registers
- */
- clk_prepare_enable(atmel_port->clk);
-
if (rs485_enabled) {
atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
ATMEL_US_USMODE_NORMAL);
@@ -2966,12 +2935,8 @@ static int atmel_serial_probe(struct platform_device *pdev)
err_add_port:
kfree(atmel_port->rx_ring.buf);
atmel_port->rx_ring.buf = NULL;
-err_alloc_ring:
- if (!uart_console(&atmel_port->uart)) {
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
- }
-err_clear_bit:
+err_clk_disable_unprepare:
+ clk_disable_unprepare(atmel_port->clk);
clear_bit(atmel_port->uart.line, atmel_ports_in_use);
err:
return ret;
@@ -3005,21 +2970,21 @@ static int atmel_serial_remove(struct platform_device *pdev)
clear_bit(port->line, atmel_ports_in_use);
- clk_put(atmel_port->clk);
- atmel_port->clk = NULL;
pdev->dev.of_node = NULL;
return ret;
}
+static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
+ atmel_serial_resume);
+
static struct platform_driver atmel_serial_driver = {
.probe = atmel_serial_probe,
.remove = atmel_serial_remove,
- .suspend = atmel_serial_suspend,
- .resume = atmel_serial_resume,
.driver = {
.name = "atmel_usart_serial",
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
+ .pm = pm_ptr(&atmel_serial_pm_ops),
},
};
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 57c70851f22a..88d08ba1ca83 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -253,6 +253,9 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
bool big_endian;
u64 addr;
+ if (early_con.flags & CON_ENABLED)
+ return -EALREADY;
+
spin_lock_init(&port->lock);
port->iotype = UPIO_MEM;
addr = of_flat_dt_translate_address(node);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 0d6e62f6bb07..f6c33cd228c8 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -274,6 +274,8 @@ struct lpuart_port {
int rx_dma_rng_buf_len;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
+ bool is_cs7; /* Set to true when character size is 7 */
+ /* and the parity is enabled */
};
struct lpuart_soc_data {
@@ -990,12 +992,12 @@ static void lpuart32_rxint(struct lpuart_port *sport)
if (sr & (UARTSTAT_PE | UARTSTAT_OR | UARTSTAT_FE)) {
if (sr & UARTSTAT_PE) {
+ sport->port.icount.parity++;
+ } else if (sr & UARTSTAT_FE) {
if (is_break)
sport->port.icount.brk++;
else
- sport->port.icount.parity++;
- } else if (sr & UARTSTAT_FE) {
- sport->port.icount.frame++;
+ sport->port.icount.frame++;
}
if (sr & UARTSTAT_OR)
@@ -1010,18 +1012,21 @@ static void lpuart32_rxint(struct lpuart_port *sport)
sr &= sport->port.read_status_mask;
if (sr & UARTSTAT_PE) {
+ flg = TTY_PARITY;
+ } else if (sr & UARTSTAT_FE) {
if (is_break)
flg = TTY_BREAK;
else
- flg = TTY_PARITY;
- } else if (sr & UARTSTAT_FE) {
- flg = TTY_FRAME;
+ flg = TTY_FRAME;
}
if (sr & UARTSTAT_OR)
flg = TTY_OVERRUN;
}
+ if (sport->is_cs7)
+ rx &= 0x7F;
+
if (tty_insert_flip_char(port, rx, flg) == 0)
sport->port.icount.buf_overrun++;
}
@@ -1107,6 +1112,17 @@ static void lpuart_handle_sysrq(struct lpuart_port *sport)
}
}
+static int lpuart_tty_insert_flip_string(struct tty_port *port,
+ unsigned char *chars, size_t size, bool is_cs7)
+{
+ int i;
+
+ if (is_cs7)
+ for (i = 0; i < size; i++)
+ chars[i] &= 0x7F;
+ return tty_insert_flip_string(port, chars, size);
+}
+
static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
{
struct tty_port *port = &sport->port.state->port;
@@ -1217,7 +1233,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
if (ring->head < ring->tail) {
count = sport->rx_sgl.length - ring->tail;
- copied = tty_insert_flip_string(port, ring->buf + ring->tail, count);
+ copied = lpuart_tty_insert_flip_string(port, ring->buf + ring->tail,
+ count, sport->is_cs7);
if (copied != count)
sport->port.icount.buf_overrun++;
ring->tail = 0;
@@ -1227,7 +1244,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
/* Finally we read data from tail to head */
if (ring->tail < ring->head) {
count = ring->head - ring->tail;
- copied = tty_insert_flip_string(port, ring->buf + ring->tail, count);
+ copied = lpuart_tty_insert_flip_string(port, ring->buf + ring->tail,
+ count, sport->is_cs7);
if (copied != count)
sport->port.icount.buf_overrun++;
/* Wrap ring->head if needed */
@@ -1355,7 +1373,7 @@ static void lpuart_dma_rx_free(struct uart_port *port)
sport->dma_rx_cookie = -EINVAL;
}
-static int lpuart_config_rs485(struct uart_port *port,
+static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct lpuart_port *sport = container_of(port,
@@ -1365,11 +1383,6 @@ static int lpuart_config_rs485(struct uart_port *port,
~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
writeb(modem, sport->port.membase + UARTMODEM);
- /* clear unsupported configurations */
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
- rs485->flags &= ~SER_RS485_RX_DURING_TX;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
modem |= UARTMODEM_TXRTSE;
@@ -1390,7 +1403,7 @@ static int lpuart_config_rs485(struct uart_port *port,
return 0;
}
-static int lpuart32_config_rs485(struct uart_port *port,
+static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct lpuart_port *sport = container_of(port,
@@ -1400,11 +1413,6 @@ static int lpuart32_config_rs485(struct uart_port *port,
& ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
lpuart32_write(&sport->port, modem, UARTMODIR);
- /* clear unsupported configurations */
- rs485->delay_rts_before_send = 0;
- rs485->delay_rts_after_send = 0;
- rs485->flags &= ~SER_RS485_RX_DURING_TX;
-
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
modem |= UARTMODEM_TXRTSE;
@@ -2076,6 +2084,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
bd = lpuart32_read(&sport->port, UARTBAUD);
modem = lpuart32_read(&sport->port, UARTMODIR);
+ sport->is_cs7 = false;
/*
* only support CS8 and CS7, and for CS7 must enable PE.
* supported mode:
@@ -2194,6 +2203,9 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
lpuart32_write(&sport->port, ctrl, UARTCTRL);
/* restore control register */
+ if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
+ sport->is_cs7 = true;
+
if (old && sport->lpuart_dma_rx_use) {
if (!lpuart_start_rx_dma(sport))
rx_dma_timer_init(sport);
@@ -2621,6 +2633,11 @@ static struct uart_driver lpuart_reg = {
.cons = LPUART_CONSOLE,
};
+static const struct serial_rs485 lpuart_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
+ /* delay_rts_* and RX_DURING_TX are not supported */
+};
+
static int lpuart_probe(struct platform_device *pdev)
{
const struct lpuart_soc_data *sdata = of_device_get_match_data(&pdev->dev);
@@ -2660,6 +2677,7 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485_config = lpuart32_config_rs485;
else
sport->port.rs485_config = lpuart_config_rs485;
+ sport->port.rs485_supported = lpuart_rs485_supported;
sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->ipg_clk)) {
@@ -2717,14 +2735,7 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_get_rs485;
- if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX)
- dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
-
- if (sport->port.rs485.delay_rts_before_send ||
- sport->port.rs485.delay_rts_after_send)
- dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
-
- sport->port.rs485_config(&sport->port, &sport->port.rs485);
+ uart_rs485_config(&sport->port);
ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
DRIVER_NAME, sport);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 30edb35a6a15..522445a8f666 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1907,16 +1907,12 @@ static void imx_uart_poll_put_char(struct uart_port *port, unsigned char c)
#endif
/* called with port.lock taken and irqs off or from .probe without locking */
-static int imx_uart_rs485_config(struct uart_port *port,
+static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr2;
- /* RTS is required to control the transmitter */
- if (!sport->have_rtscts && !sport->have_rtsgpio)
- rs485conf->flags &= ~SER_RS485_ENABLED;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
/* Enable receiver if low-active RTS signal is requested */
if (sport->have_rtscts && !sport->have_rtsgpio &&
@@ -2200,6 +2196,14 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
return HRTIMER_NORESTART;
}
+static const struct serial_rs485 imx_no_rs485 = {}; /* No RS485 if no RTS */
+static const struct serial_rs485 imx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
/* Default RX DMA buffer configuration */
#define RX_DMA_PERIODS 16
#define RX_DMA_PERIOD_LEN (PAGE_SIZE / 4)
@@ -2279,6 +2283,11 @@ static int imx_uart_probe(struct platform_device *pdev)
sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE);
sport->port.ops = &imx_uart_pops;
sport->port.rs485_config = imx_uart_rs485_config;
+ /* RTS is required to control the RS485 transmitter */
+ if (sport->have_rtscts || sport->have_rtsgpio)
+ sport->port.rs485_supported = imx_rs485_supported;
+ else
+ sport->port.rs485_supported = imx_no_rs485;
sport->port.flags = UPF_BOOT_AUTOCONF;
timer_setup(&sport->timer, imx_uart_timeout, 0);
@@ -2338,7 +2347,7 @@ static int imx_uart_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"low-active RTS not possible when receiver is off, enabling receiver\n");
- imx_uart_rs485_config(&sport->port, &sport->port.rs485);
+ uart_rs485_config(&sport->port);
/* Disable interrupts before requesting them */
ucr1 = imx_uart_readl(sport, UCR1);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 79b7db8580e0..7aa37be3216a 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -342,7 +342,7 @@ static int param_set_kgdboc_var(const char *kmessage,
/*
* Configure with the new params as long as init already ran.
* Note that we can get called before init if someone loads us
- * with "modprobe kgdboc kgdboc=..." or if they happen to use the
+ * with "modprobe kgdboc kgdboc=..." or if they happen to use
* the odd syntax of "kgdboc.kgdboc=..." on the kernel command.
*/
if (configured >= 0)
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index a0b6ea52d133..ab10ca4a45b5 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
@@ -72,7 +73,8 @@
#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */
/* Extended registers */
-#define MAX310X_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
+#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
+#define MAX310X_I2C_REVID_EXTREG (0x25) /* Revision ID */
/* IRQ register bits */
#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
@@ -245,7 +247,17 @@
#define MAX14830_BRGCFG_CLKDIS_BIT (1 << 6) /* Clock Disable */
#define MAX14830_REV_ID (0xb0)
+struct max310x_if_cfg {
+ int (*extended_reg_enable)(struct device *dev, bool enable);
+
+ unsigned int rev_id_reg;
+};
+
struct max310x_devtype {
+ struct {
+ unsigned short min;
+ unsigned short max;
+ } slave_addr;
char name[9];
int nr;
u8 mode1;
@@ -258,9 +270,8 @@ struct max310x_one {
struct work_struct tx_work;
struct work_struct md_work;
struct work_struct rs_work;
+ struct regmap *regmap;
- u8 wr_header;
- u8 rd_header;
u8 rx_buf[MAX310X_FIFO_SIZE];
};
#define to_max310x_port(_port) \
@@ -268,6 +279,7 @@ struct max310x_one {
struct max310x_port {
const struct max310x_devtype *devtype;
+ const struct max310x_if_cfg *if_cfg;
struct regmap *regmap;
struct clk *clk;
#ifdef CONFIG_GPIOLIB
@@ -289,26 +301,26 @@ static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX);
static u8 max310x_port_read(struct uart_port *port, u8 reg)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
unsigned int val = 0;
- regmap_read(s->regmap, port->iobase + reg, &val);
+ regmap_read(one->regmap, reg, &val);
return val;
}
static void max310x_port_write(struct uart_port *port, u8 reg, u8 val)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
- regmap_write(s->regmap, port->iobase + reg, val);
+ regmap_write(one->regmap, reg, val);
}
static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
+ struct max310x_one *one = to_max310x_port(port);
- regmap_update_bits(s->regmap, port->iobase + reg, mask, val);
+ regmap_update_bits(one->regmap, reg, mask, val);
}
static int max3107_detect(struct device *dev)
@@ -357,13 +369,12 @@ static int max3109_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
- MAX310X_EXTREG_ENBL);
+ ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
- regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
- regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
+ s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -388,13 +399,12 @@ static int max14830_detect(struct device *dev)
unsigned int val = 0;
int ret;
- ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
- MAX310X_EXTREG_ENBL);
+ ret = s->if_cfg->extended_reg_enable(dev, true);
if (ret)
return ret;
- regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
- regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
+ s->if_cfg->extended_reg_enable(dev, false);
if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
dev_err(dev,
"%s ID 0x%02x does not match\n", s->devtype->name, val);
@@ -419,6 +429,10 @@ static const struct max310x_devtype max3107_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT,
.detect = max3107_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x2c,
+ .max = 0x2f,
+ },
};
static const struct max310x_devtype max3108_devtype = {
@@ -427,6 +441,10 @@ static const struct max310x_devtype max3108_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3108_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static const struct max310x_devtype max3109_devtype = {
@@ -435,6 +453,10 @@ static const struct max310x_devtype max3109_devtype = {
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
.detect = max3109_detect,
.power = max310x_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static const struct max310x_devtype max14830_devtype = {
@@ -443,11 +465,15 @@ static const struct max310x_devtype max14830_devtype = {
.mode1 = MAX310X_MODE1_IRQSEL_BIT,
.detect = max14830_detect,
.power = max14830_power,
+ .slave_addr = {
+ .min = 0x60,
+ .max = 0x6f,
+ },
};
static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -464,7 +490,7 @@ static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
@@ -486,7 +512,7 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
static bool max310x_reg_precious(struct device *dev, unsigned int reg)
{
- switch (reg & 0x1f) {
+ switch (reg) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -624,31 +650,15 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
- struct spi_transfer xfer[] = {
- {
- .tx_buf = &one->wr_header,
- .len = sizeof(one->wr_header),
- }, {
- .tx_buf = txbuf,
- .len = len,
- }
- };
- spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+
+ regmap_raw_write(one->regmap, MAX310X_THR_REG, txbuf, len);
}
static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
{
struct max310x_one *one = to_max310x_port(port);
- struct spi_transfer xfer[] = {
- {
- .tx_buf = &one->rd_header,
- .len = sizeof(one->rd_header),
- }, {
- .rx_buf = rxbuf,
- .len = len,
- }
- };
- spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer));
+
+ regmap_raw_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
}
static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
@@ -1026,7 +1036,7 @@ static void max310x_rs_proc(struct work_struct *ws)
MAX310X_MODE2_ECHOSUPR_BIT, mode2);
}
-static int max310x_rs485_config(struct uart_port *port,
+static int max310x_rs485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct max310x_one *one = to_max310x_port(port);
@@ -1035,8 +1045,6 @@ static int max310x_rs485_config(struct uart_port *port,
(rs485->delay_rts_after_send > 0x0f))
return -ERANGE;
- rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX |
- SER_RS485_ENABLED;
port->rs485 = *rs485;
schedule_work(&one->rs_work);
@@ -1249,16 +1257,24 @@ static int max310x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
#endif
+static const struct serial_rs485 max310x_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int max310x_probe(struct device *dev, const struct max310x_devtype *devtype,
- struct regmap *regmap, int irq)
+ const struct max310x_if_cfg *if_cfg,
+ struct regmap *regmaps[], int irq)
{
int i, ret, fmin, fmax, freq;
struct max310x_port *s;
u32 uartclk = 0;
bool xtal;
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ for (i = 0; i < devtype->nr; i++)
+ if (IS_ERR(regmaps[i]))
+ return PTR_ERR(regmaps[i]);
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
@@ -1305,8 +1321,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
goto out_clk;
}
- s->regmap = regmap;
+ s->regmap = regmaps[0];
s->devtype = devtype;
+ s->if_cfg = if_cfg;
dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
@@ -1315,22 +1332,18 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
goto out_clk;
for (i = 0; i < devtype->nr; i++) {
- unsigned int offs = i << 5;
-
/* Reset port */
- regmap_write(s->regmap, MAX310X_MODE2_REG + offs,
+ regmap_write(regmaps[i], MAX310X_MODE2_REG,
MAX310X_MODE2_RST_BIT);
/* Clear port reset */
- regmap_write(s->regmap, MAX310X_MODE2_REG + offs, 0);
+ regmap_write(regmaps[i], MAX310X_MODE2_REG, 0);
/* Wait for port startup */
do {
- regmap_read(s->regmap,
- MAX310X_BRGDIVLSB_REG + offs, &ret);
+ regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret);
} while (ret != 0x01);
- regmap_write(s->regmap, MAX310X_MODE1_REG + offs,
- devtype->mode1);
+ regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
}
uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
@@ -1353,11 +1366,14 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
s->p[i].port.fifosize = MAX310X_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
- s->p[i].port.iobase = i * 0x20;
+ s->p[i].port.iobase = i;
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.uartclk = uartclk;
s->p[i].port.rs485_config = max310x_rs485_config;
+ s->p[i].port.rs485_supported = max310x_rs485_supported;
s->p[i].port.ops = &max310x_ops;
+ s->p[i].regmap = regmaps[i];
+
/* Disable all interrupts */
max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
/* Clear IRQ status register */
@@ -1368,10 +1384,6 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
INIT_WORK(&s->p[i].md_work, max310x_md_proc);
/* Initialize queue for changing RS485 mode */
INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
- /* Initialize SPI-transfer buffers */
- s->p[i].wr_header = (s->p[i].port.iobase + MAX310X_THR_REG) |
- MAX310X_WRITE_BIT;
- s->p[i].rd_header = (s->p[i].port.iobase + MAX310X_RHR_REG);
/* Register port */
ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
@@ -1456,16 +1468,31 @@ static struct regmap_config regcfg = {
.val_bits = 8,
.write_flag_mask = MAX310X_WRITE_BIT,
.cache_type = REGCACHE_RBTREE,
+ .max_register = MAX310X_REG_1F,
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
};
#ifdef CONFIG_SPI_MASTER
+static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
+{
+ struct max310x_port *s = dev_get_drvdata(dev);
+
+ return regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
+ enable ? MAX310X_EXTREG_ENBL : MAX310X_EXTREG_DSBL);
+}
+
+static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = {
+ .extended_reg_enable = max310x_spi_extended_reg_enable,
+ .rev_id_reg = MAX310X_SPI_REVID_EXTREG,
+};
+
static int max310x_spi_probe(struct spi_device *spi)
{
const struct max310x_devtype *devtype;
- struct regmap *regmap;
+ struct regmap *regmaps[4];
+ unsigned int i;
int ret;
/* Setup SPI bus */
@@ -1480,10 +1507,14 @@ static int max310x_spi_probe(struct spi_device *spi)
if (!devtype)
devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
- regcfg.max_register = devtype->nr * 0x20 - 1;
- regmap = devm_regmap_init_spi(spi, &regcfg);
+ for (i = 0; i < devtype->nr; i++) {
+ u8 port_mask = i * 0x20;
+ regcfg.read_flag_mask = port_mask;
+ regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT;
+ regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
+ }
- return max310x_probe(&spi->dev, devtype, regmap, spi->irq);
+ return max310x_probe(&spi->dev, devtype, &max310x_spi_if_cfg, regmaps, spi->irq);
}
static void max310x_spi_remove(struct spi_device *spi)
@@ -1512,6 +1543,97 @@ static struct spi_driver max310x_spi_driver = {
};
#endif
+#ifdef CONFIG_I2C
+static int max310x_i2c_extended_reg_enable(struct device *dev, bool enable)
+{
+ return 0;
+}
+
+static struct regmap_config regcfg_i2c = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = max310x_reg_writeable,
+ .volatile_reg = max310x_reg_volatile,
+ .precious_reg = max310x_reg_precious,
+ .max_register = MAX310X_I2C_REVID_EXTREG,
+};
+
+static const struct max310x_if_cfg max310x_i2c_if_cfg = {
+ .extended_reg_enable = max310x_i2c_extended_reg_enable,
+ .rev_id_reg = MAX310X_I2C_REVID_EXTREG,
+};
+
+static unsigned short max310x_i2c_slave_addr(unsigned short addr,
+ unsigned int nr)
+{
+ /*
+ * For MAX14830 and MAX3109, the slave address depends on what the
+ * A0 and A1 pins are tied to.
+ * See Table I2C Address Map of the datasheet.
+ * Based on that table, the following formulas were determined.
+ * UART1 - UART0 = 0x10
+ * UART2 - UART1 = 0x20 + 0x10
+ * UART3 - UART2 = 0x10
+ */
+
+ addr -= nr * 0x10;
+
+ if (nr >= 2)
+ addr -= 0x20;
+
+ return addr;
+}
+
+static int max310x_i2c_probe(struct i2c_client *client)
+{
+ const struct max310x_devtype *devtype =
+ device_get_match_data(&client->dev);
+ struct i2c_client *port_client;
+ struct regmap *regmaps[4];
+ unsigned int i;
+ u8 port_addr;
+
+ if (client->addr < devtype->slave_addr.min ||
+ client->addr > devtype->slave_addr.max)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "Slave addr 0x%x outside of range [0x%x, 0x%x]\n",
+ client->addr, devtype->slave_addr.min,
+ devtype->slave_addr.max);
+
+ regmaps[0] = devm_regmap_init_i2c(client, &regcfg_i2c);
+
+ for (i = 1; i < devtype->nr; i++) {
+ port_addr = max310x_i2c_slave_addr(client->addr, i);
+ port_client = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ port_addr);
+
+ regmaps[i] = devm_regmap_init_i2c(port_client, &regcfg_i2c);
+ }
+
+ return max310x_probe(&client->dev, devtype, &max310x_i2c_if_cfg,
+ regmaps, client->irq);
+}
+
+static int max310x_i2c_remove(struct i2c_client *client)
+{
+ max310x_remove(&client->dev);
+
+ return 0;
+}
+
+static struct i2c_driver max310x_i2c_driver = {
+ .driver = {
+ .name = MAX310X_NAME,
+ .of_match_table = max310x_dt_ids,
+ .pm = &max310x_pm_ops,
+ },
+ .probe_new = max310x_i2c_probe,
+ .remove = max310x_i2c_remove,
+};
+#endif
+
static int __init max310x_uart_init(void)
{
int ret;
@@ -1525,15 +1647,35 @@ static int __init max310x_uart_init(void)
#ifdef CONFIG_SPI_MASTER
ret = spi_register_driver(&max310x_spi_driver);
if (ret)
- uart_unregister_driver(&max310x_uart);
+ goto err_spi_register;
+#endif
+
+#ifdef CONFIG_I2C
+ ret = i2c_add_driver(&max310x_i2c_driver);
+ if (ret)
+ goto err_i2c_register;
+#endif
+
+ return 0;
+
+#ifdef CONFIG_I2C
+err_i2c_register:
+ spi_unregister_driver(&max310x_spi_driver);
#endif
+err_spi_register:
+ uart_unregister_driver(&max310x_uart);
+
return ret;
}
module_init(max310x_uart_init);
static void __exit max310x_uart_exit(void)
{
+#ifdef CONFIG_I2C
+ i2c_del_driver(&max310x_i2c_driver);
+#endif
+
#ifdef CONFIG_SPI_MASTER
spi_unregister_driver(&max310x_spi_driver);
#endif
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 2aec62b5d6c4..f4aaaadd0742 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -431,7 +431,8 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
/****************************************************************************/
/* Enable or disable the RS485 support */
-static int mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+static int mcf_config_rs485(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
unsigned char mr1, mr2;
@@ -448,11 +449,14 @@ static int mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
}
writeb(mr1, port->membase + MCFUART_UMR);
writeb(mr2, port->membase + MCFUART_UMR);
- port->rs485 = *rs485;
return 0;
}
+static const struct serial_rs485 mcf_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
+};
+
/****************************************************************************/
/*
@@ -502,6 +506,7 @@ int __init early_mcf_setup(struct mcf_platform_uart *platp)
port->uartclk = MCF_BUSCLK;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
+ port->rs485_supported = mcf_rs485_supported;
port->ops = &mcf_uart_ops;
}
@@ -629,6 +634,7 @@ static int mcf_probe(struct platform_device *pdev)
port->ops = &mcf_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
+ port->rs485_supported = mcf_rs485_supported;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MCF_CONSOLE);
uart_add_one_port(&mcf_driver, port);
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 4869c0059c98..6c8db19fd572 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -162,7 +162,7 @@ static void meson_uart_start_tx(struct uart_port *port)
ch = xmit->buf[xmit->tail];
writel(ch, port->membase + AML_UART_WFIFO);
- xmit->tail = (xmit->tail+1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index e50f069b5ebb..3f1986c89694 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1630,7 +1630,7 @@ mpc52xx_console_setup(struct console *co, char *options)
return ret;
}
- uartclk = mpc5xxx_get_bus_frequency(np);
+ uartclk = mpc5xxx_fwnode_get_bus_frequency(of_fwnode_handle(np));
if (uartclk == 0) {
pr_debug("Could not find uart clock frequency!\n");
return -EINVAL;
@@ -1747,7 +1747,7 @@ static int mpc52xx_uart_of_probe(struct platform_device *op)
/* set the uart clock to the input clock of the psc, the different
* prescalers are taken into account in the set_baudrate() methods
* of the respective chip */
- uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ uartclk = mpc5xxx_get_bus_frequency(&op->dev);
if (uartclk == 0) {
dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
return -EINVAL;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index e676ec761f18..3159889ddae1 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -29,103 +29,103 @@
#include <linux/of_device.h>
#include <linux/wait.h>
-#define UART_MR1 0x0000
-
-#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
-#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
-#define UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
-#define UART_MR1_RX_RDY_CTL BIT(7)
-#define UART_MR1_CTS_CTL BIT(6)
-
-#define UART_MR2 0x0004
-#define UART_MR2_ERROR_MODE BIT(6)
-#define UART_MR2_BITS_PER_CHAR 0x30
-#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
-#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
-#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
-#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
-#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
-#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
-#define UART_MR2_PARITY_MODE_NONE 0x0
-#define UART_MR2_PARITY_MODE_ODD 0x1
-#define UART_MR2_PARITY_MODE_EVEN 0x2
-#define UART_MR2_PARITY_MODE_SPACE 0x3
-#define UART_MR2_PARITY_MODE 0x3
-
-#define UART_CSR 0x0008
-
-#define UART_TF 0x000C
+#define MSM_UART_MR1 0x0000
+
+#define MSM_UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define MSM_UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define MSM_UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
+#define MSM_UART_MR1_RX_RDY_CTL BIT(7)
+#define MSM_UART_MR1_CTS_CTL BIT(6)
+
+#define MSM_UART_MR2 0x0004
+#define MSM_UART_MR2_ERROR_MODE BIT(6)
+#define MSM_UART_MR2_BITS_PER_CHAR 0x30
+#define MSM_UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define MSM_UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define MSM_UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define MSM_UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define MSM_UART_MR2_PARITY_MODE_NONE 0x0
+#define MSM_UART_MR2_PARITY_MODE_ODD 0x1
+#define MSM_UART_MR2_PARITY_MODE_EVEN 0x2
+#define MSM_UART_MR2_PARITY_MODE_SPACE 0x3
+#define MSM_UART_MR2_PARITY_MODE 0x3
+
+#define MSM_UART_CSR 0x0008
+
+#define MSM_UART_TF 0x000C
#define UARTDM_TF 0x0070
-#define UART_CR 0x0010
-#define UART_CR_CMD_NULL (0 << 4)
-#define UART_CR_CMD_RESET_RX (1 << 4)
-#define UART_CR_CMD_RESET_TX (2 << 4)
-#define UART_CR_CMD_RESET_ERR (3 << 4)
-#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
-#define UART_CR_CMD_START_BREAK (5 << 4)
-#define UART_CR_CMD_STOP_BREAK (6 << 4)
-#define UART_CR_CMD_RESET_CTS (7 << 4)
-#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
-#define UART_CR_CMD_PACKET_MODE (9 << 4)
-#define UART_CR_CMD_MODE_RESET (12 << 4)
-#define UART_CR_CMD_SET_RFR (13 << 4)
-#define UART_CR_CMD_RESET_RFR (14 << 4)
-#define UART_CR_CMD_PROTECTION_EN (16 << 4)
-#define UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
-#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
-#define UART_CR_CMD_FORCE_STALE (4 << 8)
-#define UART_CR_CMD_RESET_TX_READY (3 << 8)
-#define UART_CR_TX_DISABLE BIT(3)
-#define UART_CR_TX_ENABLE BIT(2)
-#define UART_CR_RX_DISABLE BIT(1)
-#define UART_CR_RX_ENABLE BIT(0)
-#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
-
-#define UART_IMR 0x0014
-#define UART_IMR_TXLEV BIT(0)
-#define UART_IMR_RXSTALE BIT(3)
-#define UART_IMR_RXLEV BIT(4)
-#define UART_IMR_DELTA_CTS BIT(5)
-#define UART_IMR_CURRENT_CTS BIT(6)
-#define UART_IMR_RXBREAK_START BIT(10)
-
-#define UART_IPR_RXSTALE_LAST 0x20
-#define UART_IPR_STALE_LSB 0x1F
-#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
-#define UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
-
-#define UART_IPR 0x0018
-#define UART_TFWR 0x001C
-#define UART_RFWR 0x0020
-#define UART_HCR 0x0024
-
-#define UART_MREG 0x0028
-#define UART_NREG 0x002C
-#define UART_DREG 0x0030
-#define UART_MNDREG 0x0034
-#define UART_IRDA 0x0038
-#define UART_MISR_MODE 0x0040
-#define UART_MISR_RESET 0x0044
-#define UART_MISR_EXPORT 0x0048
-#define UART_MISR_VAL 0x004C
-#define UART_TEST_CTRL 0x0050
-
-#define UART_SR 0x0008
-#define UART_SR_HUNT_CHAR BIT(7)
-#define UART_SR_RX_BREAK BIT(6)
-#define UART_SR_PAR_FRAME_ERR BIT(5)
-#define UART_SR_OVERRUN BIT(4)
-#define UART_SR_TX_EMPTY BIT(3)
-#define UART_SR_TX_READY BIT(2)
-#define UART_SR_RX_FULL BIT(1)
-#define UART_SR_RX_READY BIT(0)
-
-#define UART_RF 0x000C
+#define MSM_UART_CR 0x0010
+#define MSM_UART_CR_CMD_NULL (0 << 4)
+#define MSM_UART_CR_CMD_RESET_RX (1 << 4)
+#define MSM_UART_CR_CMD_RESET_TX (2 << 4)
+#define MSM_UART_CR_CMD_RESET_ERR (3 << 4)
+#define MSM_UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define MSM_UART_CR_CMD_START_BREAK (5 << 4)
+#define MSM_UART_CR_CMD_STOP_BREAK (6 << 4)
+#define MSM_UART_CR_CMD_RESET_CTS (7 << 4)
+#define MSM_UART_CR_CMD_RESET_STALE_INT (8 << 4)
+#define MSM_UART_CR_CMD_PACKET_MODE (9 << 4)
+#define MSM_UART_CR_CMD_MODE_RESET (12 << 4)
+#define MSM_UART_CR_CMD_SET_RFR (13 << 4)
+#define MSM_UART_CR_CMD_RESET_RFR (14 << 4)
+#define MSM_UART_CR_CMD_PROTECTION_EN (16 << 4)
+#define MSM_UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
+#define MSM_UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define MSM_UART_CR_CMD_FORCE_STALE (4 << 8)
+#define MSM_UART_CR_CMD_RESET_TX_READY (3 << 8)
+#define MSM_UART_CR_TX_DISABLE BIT(3)
+#define MSM_UART_CR_TX_ENABLE BIT(2)
+#define MSM_UART_CR_RX_DISABLE BIT(1)
+#define MSM_UART_CR_RX_ENABLE BIT(0)
+#define MSM_UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
+
+#define MSM_UART_IMR 0x0014
+#define MSM_UART_IMR_TXLEV BIT(0)
+#define MSM_UART_IMR_RXSTALE BIT(3)
+#define MSM_UART_IMR_RXLEV BIT(4)
+#define MSM_UART_IMR_DELTA_CTS BIT(5)
+#define MSM_UART_IMR_CURRENT_CTS BIT(6)
+#define MSM_UART_IMR_RXBREAK_START BIT(10)
+
+#define MSM_UART_IPR_RXSTALE_LAST 0x20
+#define MSM_UART_IPR_STALE_LSB 0x1F
+#define MSM_UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+#define MSM_UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
+
+#define MSM_UART_IPR 0x0018
+#define MSM_UART_TFWR 0x001C
+#define MSM_UART_RFWR 0x0020
+#define MSM_UART_HCR 0x0024
+
+#define MSM_UART_MREG 0x0028
+#define MSM_UART_NREG 0x002C
+#define MSM_UART_DREG 0x0030
+#define MSM_UART_MNDREG 0x0034
+#define MSM_UART_IRDA 0x0038
+#define MSM_UART_MISR_MODE 0x0040
+#define MSM_UART_MISR_RESET 0x0044
+#define MSM_UART_MISR_EXPORT 0x0048
+#define MSM_UART_MISR_VAL 0x004C
+#define MSM_UART_TEST_CTRL 0x0050
+
+#define MSM_UART_SR 0x0008
+#define MSM_UART_SR_HUNT_CHAR BIT(7)
+#define MSM_UART_SR_RX_BREAK BIT(6)
+#define MSM_UART_SR_PAR_FRAME_ERR BIT(5)
+#define MSM_UART_SR_OVERRUN BIT(4)
+#define MSM_UART_SR_TX_EMPTY BIT(3)
+#define MSM_UART_SR_TX_READY BIT(2)
+#define MSM_UART_SR_RX_FULL BIT(1)
+#define MSM_UART_SR_RX_READY BIT(0)
+
+#define MSM_UART_RF 0x000C
#define UARTDM_RF 0x0070
-#define UART_MISR 0x0010
-#define UART_ISR 0x0014
-#define UART_ISR_TX_READY BIT(7)
+#define MSM_UART_MISR 0x0010
+#define MSM_UART_ISR 0x0014
+#define MSM_UART_ISR_TX_READY BIT(7)
#define UARTDM_RXFS 0x50
#define UARTDM_RXFS_BUF_SHIFT 0x7
@@ -181,7 +181,10 @@ struct msm_port {
struct msm_dma rx_dma;
};
-#define UART_TO_MSM(uart_port) container_of(uart_port, struct msm_port, uart)
+static inline struct msm_port *to_msm_port(struct uart_port *up)
+{
+ return container_of(up, struct msm_port, uart);
+}
static
void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
@@ -200,10 +203,10 @@ unsigned int msm_read(struct uart_port *port, unsigned int off)
*/
static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
{
- msm_write(port, 0x06, UART_MREG);
- msm_write(port, 0xF1, UART_NREG);
- msm_write(port, 0x0F, UART_DREG);
- msm_write(port, 0x1A, UART_MNDREG);
+ msm_write(port, 0x06, MSM_UART_MREG);
+ msm_write(port, 0xF1, MSM_UART_NREG);
+ msm_write(port, 0x0F, MSM_UART_DREG);
+ msm_write(port, 0x1A, MSM_UART_MNDREG);
port->uartclk = 1843200;
}
@@ -212,16 +215,16 @@ static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
*/
static void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
{
- msm_write(port, 0x18, UART_MREG);
- msm_write(port, 0xF6, UART_NREG);
- msm_write(port, 0x0F, UART_DREG);
- msm_write(port, 0x0A, UART_MNDREG);
+ msm_write(port, 0x18, MSM_UART_MREG);
+ msm_write(port, 0xF6, MSM_UART_NREG);
+ msm_write(port, 0x0F, MSM_UART_DREG);
+ msm_write(port, 0x0A, MSM_UART_MNDREG);
port->uartclk = 1843200;
}
static void msm_serial_set_mnd_regs(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/*
* These registers don't exist so we change the clk input rate
@@ -392,35 +395,35 @@ static inline void msm_wait_for_xmitr(struct uart_port *port)
{
unsigned int timeout = 500000;
- while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
- if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_EMPTY)) {
+ if (msm_read(port, MSM_UART_ISR) & MSM_UART_ISR_TX_READY)
break;
udelay(1);
if (!timeout--)
break;
}
- msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX_READY, MSM_UART_CR);
}
static void msm_stop_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- msm_port->imr &= ~UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_start_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->tx_dma;
/* Already started in DMA mode */
if (dma->count)
return;
- msm_port->imr |= UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_reset_dm_count(struct uart_port *port, int count)
@@ -456,8 +459,8 @@ static void msm_complete_tx_dma(void *args)
msm_write(port, val, UARTDM_DMEN);
if (msm_port->is_uartdm > UARTDM_1P3) {
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR);
}
count = dma->count - state.residue;
@@ -468,8 +471,8 @@ static void msm_complete_tx_dma(void *args)
xmit->tail &= UART_XMIT_SIZE - 1;
/* Restore "Tx FIFO below watermark" interrupt */
- msm_port->imr |= UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -516,8 +519,8 @@ static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
* Using DMA complete for Tx FIFO reload, no need for
* "Tx FIFO below watermark" one, disable it
*/
- msm_port->imr &= ~UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~MSM_UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
dma->count = count;
@@ -559,10 +562,10 @@ static void msm_complete_rx_dma(void *args)
val &= ~dma->enable_bit;
msm_write(port, val, UARTDM_DMEN);
- if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
+ if (msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
count = msm_read(port, UARTDM_RX_TOTAL_SNAP);
@@ -584,7 +587,7 @@ static void msm_complete_rx_dma(void *args)
continue;
}
- if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
spin_unlock_irqrestore(&port->lock, flags);
@@ -638,23 +641,23 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
* Using DMA for FIFO off-load, no need for "Rx FIFO over
* watermark" or "stale" interrupts, disable them
*/
- msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_port->imr &= ~(MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE);
/*
* Well, when DMA is ADM3 engine(implied by <= UARTDM v1.3),
* we need RXSTALE to flush input DMA fifo to memory
*/
if (msm_port->is_uartdm < UARTDM_1P4)
- msm_port->imr |= UART_IMR_RXSTALE;
+ msm_port->imr |= MSM_UART_IMR_RXSTALE;
- msm_write(uart, msm_port->imr, UART_IMR);
+ msm_write(uart, msm_port->imr, MSM_UART_IMR);
dma->count = UARTDM_RX_SIZE;
dma_async_issue_pending(dma->chan);
- msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
- msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
val = msm_read(uart, UARTDM_DMEN);
val |= dma->enable_bit;
@@ -676,25 +679,25 @@ sw_mode:
* Switch from DMA to SW/FIFO mode. After clearing Rx BAM (UARTDM_DMEN),
* receiver must be reset.
*/
- msm_write(uart, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(uart, UART_CR_RX_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
+ msm_write(uart, MSM_UART_CR_RX_ENABLE, MSM_UART_CR);
- msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(uart, 0xFFFFFF, UARTDM_DMRX);
- msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(uart, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
/* Re-enable RX interrupts */
- msm_port->imr |= (UART_IMR_RXLEV | UART_IMR_RXSTALE);
- msm_write(uart, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE;
+ msm_write(uart, msm_port->imr, MSM_UART_IMR);
}
static void msm_stop_rx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
- msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr &= ~(MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE);
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (dma->chan)
msm_stop_dma(port, dma);
@@ -702,10 +705,10 @@ static void msm_stop_rx(struct uart_port *port)
static void msm_enable_ms(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- msm_port->imr |= UART_IMR_DELTA_CTS;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_port->imr |= MSM_UART_IMR_DELTA_CTS;
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
}
static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
@@ -714,20 +717,20 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
struct tty_port *tport = &port->state->port;
unsigned int sr;
int count = 0;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
- if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ if ((msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN)) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
- if (misr & UART_IMR_RXSTALE) {
+ if (misr & MSM_UART_IMR_RXSTALE) {
count = msm_read(port, UARTDM_RX_TOTAL_SNAP) -
msm_port->old_snap_state;
msm_port->old_snap_state = 0;
} else {
- count = 4 * (msm_read(port, UART_RFWR));
+ count = 4 * (msm_read(port, MSM_UART_RFWR));
msm_port->old_snap_state += count;
}
@@ -739,8 +742,8 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
unsigned char buf[4];
int sysrq, r_count, i;
- sr = msm_read(port, UART_SR);
- if ((sr & UART_SR_RX_READY) == 0) {
+ sr = msm_read(port, MSM_UART_SR);
+ if ((sr & MSM_UART_SR_RX_READY) == 0) {
msm_port->old_snap_state -= count;
break;
}
@@ -759,7 +762,7 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
continue;
}
- if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
spin_unlock(&port->lock);
@@ -773,10 +776,10 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
tty_flip_buffer_push(tport);
- if (misr & (UART_IMR_RXSTALE))
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ if (misr & (MSM_UART_IMR_RXSTALE))
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
/* Try to use DMA */
msm_start_rx_dma(msm_port);
@@ -792,25 +795,25 @@ static void msm_handle_rx(struct uart_port *port)
* Handle overrun. My understanding of the hardware is that overrun
* is not tied to the RX buffer, so we handle the case out of band.
*/
- if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ if ((msm_read(port, MSM_UART_SR) & MSM_UART_SR_OVERRUN)) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
}
/* and now the main RX loop */
- while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
+ while ((sr = msm_read(port, MSM_UART_SR)) & MSM_UART_SR_RX_READY) {
unsigned int c;
char flag = TTY_NORMAL;
int sysrq;
- c = msm_read(port, UART_RF);
+ c = msm_read(port, MSM_UART_RF);
- if (sr & UART_SR_RX_BREAK) {
+ if (sr & MSM_UART_SR_RX_BREAK) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
- } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ } else if (sr & MSM_UART_SR_PAR_FRAME_ERR) {
port->icount.frame++;
} else {
port->icount.rx++;
@@ -819,9 +822,9 @@ static void msm_handle_rx(struct uart_port *port)
/* Mask conditions we're ignorning. */
sr &= port->read_status_mask;
- if (sr & UART_SR_RX_BREAK)
+ if (sr & MSM_UART_SR_RX_BREAK)
flag = TTY_BREAK;
- else if (sr & UART_SR_PAR_FRAME_ERR)
+ else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
flag = TTY_FRAME;
spin_unlock(&port->lock);
@@ -837,7 +840,7 @@ static void msm_handle_rx(struct uart_port *port)
static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
{
struct circ_buf *xmit = &port->state->xmit;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int num_chars;
unsigned int tf_pointer = 0;
void __iomem *tf;
@@ -845,7 +848,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
if (msm_port->is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
if (tx_count && msm_port->is_uartdm)
msm_reset_dm_count(port, tx_count);
@@ -854,7 +857,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
int i;
char buf[4] = { 0 };
- if (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
break;
if (msm_port->is_uartdm)
@@ -883,7 +886,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
static void msm_handle_tx(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct circ_buf *xmit = &msm_port->uart.state->xmit;
struct msm_dma *dma = &msm_port->tx_dma;
unsigned int pio_count, dma_count, dma_min;
@@ -895,7 +898,7 @@ static void msm_handle_tx(struct uart_port *port)
if (msm_port->is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
buf[0] = port->x_char;
@@ -939,7 +942,7 @@ static void msm_handle_tx(struct uart_port *port)
static void msm_handle_delta_cts(struct uart_port *port)
{
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_CTS, MSM_UART_CR);
port->icount.cts++;
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
@@ -947,27 +950,27 @@ static void msm_handle_delta_cts(struct uart_port *port)
static irqreturn_t msm_uart_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int misr;
u32 val;
spin_lock_irqsave(&port->lock, flags);
- misr = msm_read(port, UART_MISR);
- msm_write(port, 0, UART_IMR); /* disable interrupt */
+ misr = msm_read(port, MSM_UART_MISR);
+ msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
- if (misr & UART_IMR_RXBREAK_START) {
+ if (misr & MSM_UART_IMR_RXBREAK_START) {
msm_port->break_detected = true;
- msm_write(port, UART_CR_CMD_RESET_RXBREAK_START, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RXBREAK_START, MSM_UART_CR);
}
- if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
+ if (misr & (MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE)) {
if (dma->count) {
- val = UART_CR_CMD_STALE_EVENT_DISABLE;
- msm_write(port, val, UART_CR);
- val = UART_CR_CMD_RESET_STALE_INT;
- msm_write(port, val, UART_CR);
+ val = MSM_UART_CR_CMD_STALE_EVENT_DISABLE;
+ msm_write(port, val, MSM_UART_CR);
+ val = MSM_UART_CR_CMD_RESET_STALE_INT;
+ msm_write(port, val, MSM_UART_CR);
/*
* Flush DMA input fifo to memory, this will also
* trigger DMA RX completion
@@ -979,12 +982,12 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
msm_handle_rx(port);
}
}
- if (misr & UART_IMR_TXLEV)
+ if (misr & MSM_UART_IMR_TXLEV)
msm_handle_tx(port);
- if (misr & UART_IMR_DELTA_CTS)
+ if (misr & MSM_UART_IMR_DELTA_CTS)
msm_handle_delta_cts(port);
- msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
+ msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
@@ -992,7 +995,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
static unsigned int msm_tx_empty(struct uart_port *port)
{
- return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+ return (msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
}
static unsigned int msm_get_mctrl(struct uart_port *port)
@@ -1002,19 +1005,19 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int mr;
/* reset everything */
- msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
- mr = msm_read(port, UART_MR1);
- mr &= ~UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_ERR, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_BREAK_INT, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_CTS, MSM_UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RFR, MSM_UART_CR);
+ mr = msm_read(port, MSM_UART_MR1);
+ mr &= ~MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
/* Disable DM modes */
if (msm_port->is_uartdm)
@@ -1025,24 +1028,24 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int mr;
- mr = msm_read(port, UART_MR1);
+ mr = msm_read(port, MSM_UART_MR1);
if (!(mctrl & TIOCM_RTS)) {
- mr &= ~UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
- msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ mr &= ~MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
+ msm_write(port, MSM_UART_CR_CMD_RESET_RFR, MSM_UART_CR);
} else {
- mr |= UART_MR1_RX_RDY_CTL;
- msm_write(port, mr, UART_MR1);
+ mr |= MSM_UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, MSM_UART_MR1);
}
}
static void msm_break_ctl(struct uart_port *port, int break_ctl)
{
if (break_ctl)
- msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_START_BREAK, MSM_UART_CR);
else
- msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STOP_BREAK, MSM_UART_CR);
}
struct msm_baud_map {
@@ -1055,7 +1058,7 @@ static const struct msm_baud_map *
msm_find_best_baud(struct uart_port *port, unsigned int baud,
unsigned long *rate)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int divisor, result;
unsigned long target, old, best_rate = 0, diff, best_diff = ULONG_MAX;
const struct msm_baud_map *entry, *end, *best;
@@ -1124,7 +1127,7 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
unsigned long *saved_flags)
{
unsigned int rxstale, watermark, mask;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
const struct msm_baud_map *entry;
unsigned long flags, rate;
@@ -1139,45 +1142,45 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
*saved_flags = flags;
port->uartclk = rate;
- msm_write(port, entry->code, UART_CSR);
+ msm_write(port, entry->code, MSM_UART_CSR);
/* RX stale watermark */
rxstale = entry->rxstale;
- watermark = UART_IPR_STALE_LSB & rxstale;
+ watermark = MSM_UART_IPR_STALE_LSB & rxstale;
if (msm_port->is_uartdm) {
- mask = UART_DM_IPR_STALE_TIMEOUT_MSB;
+ mask = MSM_UART_DM_IPR_STALE_TIMEOUT_MSB;
} else {
- watermark |= UART_IPR_RXSTALE_LAST;
- mask = UART_IPR_STALE_TIMEOUT_MSB;
+ watermark |= MSM_UART_IPR_RXSTALE_LAST;
+ mask = MSM_UART_IPR_STALE_TIMEOUT_MSB;
}
watermark |= mask & (rxstale << 2);
- msm_write(port, watermark, UART_IPR);
+ msm_write(port, watermark, MSM_UART_IPR);
/* set RX watermark */
watermark = (port->fifosize * 3) / 4;
- msm_write(port, watermark, UART_RFWR);
+ msm_write(port, watermark, MSM_UART_RFWR);
/* set TX watermark */
- msm_write(port, 10, UART_TFWR);
+ msm_write(port, 10, MSM_UART_TFWR);
- msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_PROTECTION_EN, MSM_UART_CR);
msm_reset(port);
/* Enable RX and TX */
- msm_write(port, UART_CR_TX_ENABLE | UART_CR_RX_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_TX_ENABLE | MSM_UART_CR_RX_ENABLE, MSM_UART_CR);
/* turn on RX and CTS interrupts */
- msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
- UART_IMR_CURRENT_CTS | UART_IMR_RXBREAK_START;
+ msm_port->imr = MSM_UART_IMR_RXLEV | MSM_UART_IMR_RXSTALE |
+ MSM_UART_IMR_CURRENT_CTS | MSM_UART_IMR_RXBREAK_START;
- msm_write(port, msm_port->imr, UART_IMR);
+ msm_write(port, msm_port->imr, MSM_UART_IMR);
if (msm_port->is_uartdm) {
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
}
return baud;
@@ -1185,7 +1188,7 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
static void msm_init_clock(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
clk_prepare_enable(msm_port->clk);
clk_prepare_enable(msm_port->pclk);
@@ -1194,7 +1197,7 @@ static void msm_init_clock(struct uart_port *port)
static int msm_startup(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
unsigned int data, rfr_level, mask;
int ret;
@@ -1209,18 +1212,18 @@ static int msm_startup(struct uart_port *port)
rfr_level = port->fifosize;
/* set automatic RFR level */
- data = msm_read(port, UART_MR1);
+ data = msm_read(port, MSM_UART_MR1);
if (msm_port->is_uartdm)
- mask = UART_DM_MR1_AUTO_RFR_LEVEL1;
+ mask = MSM_UART_DM_MR1_AUTO_RFR_LEVEL1;
else
- mask = UART_MR1_AUTO_RFR_LEVEL1;
+ mask = MSM_UART_MR1_AUTO_RFR_LEVEL1;
data &= ~mask;
- data &= ~UART_MR1_AUTO_RFR_LEVEL0;
+ data &= ~MSM_UART_MR1_AUTO_RFR_LEVEL0;
data |= mask & (rfr_level << 2);
- data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
- msm_write(port, data, UART_MR1);
+ data |= MSM_UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
+ msm_write(port, data, MSM_UART_MR1);
if (msm_port->is_uartdm) {
msm_request_tx_dma(msm_port, msm_port->uart.mapbase);
@@ -1246,10 +1249,10 @@ err_irq:
static void msm_shutdown(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
msm_port->imr = 0;
- msm_write(port, 0, UART_IMR); /* disable interrupts */
+ msm_write(port, 0, MSM_UART_IMR); /* disable interrupts */
if (msm_port->is_uartdm)
msm_release_dma(msm_port);
@@ -1262,7 +1265,7 @@ static void msm_shutdown(struct uart_port *port)
static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int baud, mr;
@@ -1279,60 +1282,60 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
tty_termios_encode_baud_rate(termios, baud, baud);
/* calculate parity */
- mr = msm_read(port, UART_MR2);
- mr &= ~UART_MR2_PARITY_MODE;
+ mr = msm_read(port, MSM_UART_MR2);
+ mr &= ~MSM_UART_MR2_PARITY_MODE;
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & PARODD)
- mr |= UART_MR2_PARITY_MODE_ODD;
+ mr |= MSM_UART_MR2_PARITY_MODE_ODD;
else if (termios->c_cflag & CMSPAR)
- mr |= UART_MR2_PARITY_MODE_SPACE;
+ mr |= MSM_UART_MR2_PARITY_MODE_SPACE;
else
- mr |= UART_MR2_PARITY_MODE_EVEN;
+ mr |= MSM_UART_MR2_PARITY_MODE_EVEN;
}
/* calculate bits per char */
- mr &= ~UART_MR2_BITS_PER_CHAR;
+ mr &= ~MSM_UART_MR2_BITS_PER_CHAR;
switch (termios->c_cflag & CSIZE) {
case CS5:
- mr |= UART_MR2_BITS_PER_CHAR_5;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_5;
break;
case CS6:
- mr |= UART_MR2_BITS_PER_CHAR_6;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_6;
break;
case CS7:
- mr |= UART_MR2_BITS_PER_CHAR_7;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_7;
break;
case CS8:
default:
- mr |= UART_MR2_BITS_PER_CHAR_8;
+ mr |= MSM_UART_MR2_BITS_PER_CHAR_8;
break;
}
/* calculate stop bits */
- mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
+ mr &= ~(MSM_UART_MR2_STOP_BIT_LEN_ONE | MSM_UART_MR2_STOP_BIT_LEN_TWO);
if (termios->c_cflag & CSTOPB)
- mr |= UART_MR2_STOP_BIT_LEN_TWO;
+ mr |= MSM_UART_MR2_STOP_BIT_LEN_TWO;
else
- mr |= UART_MR2_STOP_BIT_LEN_ONE;
+ mr |= MSM_UART_MR2_STOP_BIT_LEN_ONE;
/* set parity, bits per char, and stop bit */
- msm_write(port, mr, UART_MR2);
+ msm_write(port, mr, MSM_UART_MR2);
/* calculate and set hardware flow control */
- mr = msm_read(port, UART_MR1);
- mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
+ mr = msm_read(port, MSM_UART_MR1);
+ mr &= ~(MSM_UART_MR1_CTS_CTL | MSM_UART_MR1_RX_RDY_CTL);
if (termios->c_cflag & CRTSCTS) {
- mr |= UART_MR1_CTS_CTL;
- mr |= UART_MR1_RX_RDY_CTL;
+ mr |= MSM_UART_MR1_CTS_CTL;
+ mr |= MSM_UART_MR1_RX_RDY_CTL;
}
- msm_write(port, mr, UART_MR1);
+ msm_write(port, mr, MSM_UART_MR1);
/* Configure status bits to ignore based on termio flags. */
port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
- port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+ port->read_status_mask |= MSM_UART_SR_PAR_FRAME_ERR;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= UART_SR_RX_BREAK;
+ port->read_status_mask |= MSM_UART_SR_RX_BREAK;
uart_update_timeout(port, termios->c_cflag, baud);
@@ -1416,7 +1419,7 @@ static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
static void msm_power(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
switch (state) {
case 0:
@@ -1435,10 +1438,10 @@ static void msm_power(struct uart_port *port, unsigned int state,
#ifdef CONFIG_CONSOLE_POLL
static int msm_poll_get_char_single(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
- unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : UART_RF;
+ struct msm_port *msm_port = to_msm_port(port);
+ unsigned int rf_reg = msm_port->is_uartdm ? UARTDM_RF : MSM_UART_RF;
- if (!(msm_read(port, UART_SR) & UART_SR_RX_READY))
+ if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_RX_READY))
return NO_POLL_CHAR;
return msm_read(port, rf_reg) & 0xff;
@@ -1456,7 +1459,7 @@ static int msm_poll_get_char_dm(struct uart_port *port)
c = sp[sizeof(slop) - count];
count--;
/* Or if FIFO is empty */
- } else if (!(msm_read(port, UART_SR) & UART_SR_RX_READY)) {
+ } else if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_RX_READY)) {
/*
* If RX packing buffer has less than a word, force stale to
* push contents into RX FIFO
@@ -1464,14 +1467,13 @@ static int msm_poll_get_char_dm(struct uart_port *port)
count = msm_read(port, UARTDM_RXFS);
count = (count >> UARTDM_RXFS_BUF_SHIFT) & UARTDM_RXFS_BUF_MASK;
if (count) {
- msm_write(port, UART_CR_CMD_FORCE_STALE, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_FORCE_STALE, MSM_UART_CR);
slop = msm_read(port, UARTDM_RF);
c = sp[0];
count--;
- msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_RESET_STALE_INT, MSM_UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
- msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE,
- UART_CR);
+ msm_write(port, MSM_UART_CR_CMD_STALE_EVENT_ENABLE, MSM_UART_CR);
} else {
c = NO_POLL_CHAR;
}
@@ -1489,11 +1491,11 @@ static int msm_poll_get_char(struct uart_port *port)
{
u32 imr;
int c;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/* Disable all interrupts */
- imr = msm_read(port, UART_IMR);
- msm_write(port, 0, UART_IMR);
+ imr = msm_read(port, MSM_UART_IMR);
+ msm_write(port, 0, MSM_UART_IMR);
if (msm_port->is_uartdm)
c = msm_poll_get_char_dm(port);
@@ -1501,7 +1503,7 @@ static int msm_poll_get_char(struct uart_port *port)
c = msm_poll_get_char_single(port);
/* Enable interrupts */
- msm_write(port, imr, UART_IMR);
+ msm_write(port, imr, MSM_UART_IMR);
return c;
}
@@ -1509,28 +1511,28 @@ static int msm_poll_get_char(struct uart_port *port)
static void msm_poll_put_char(struct uart_port *port, unsigned char c)
{
u32 imr;
- struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_port *msm_port = to_msm_port(port);
/* Disable all interrupts */
- imr = msm_read(port, UART_IMR);
- msm_write(port, 0, UART_IMR);
+ imr = msm_read(port, MSM_UART_IMR);
+ msm_write(port, 0, MSM_UART_IMR);
if (msm_port->is_uartdm)
msm_reset_dm_count(port, 1);
/* Wait until FIFO is empty */
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
/* Write a character */
- msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+ msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : MSM_UART_TF);
/* Wait until FIFO is empty */
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
/* Enable interrupts */
- msm_write(port, imr, UART_IMR);
+ msm_write(port, imr, MSM_UART_IMR);
}
#endif
@@ -1588,7 +1590,7 @@ static struct msm_port msm_uart_ports[] = {
},
};
-#define UART_NR ARRAY_SIZE(msm_uart_ports)
+#define MSM_UART_NR ARRAY_SIZE(msm_uart_ports)
static inline struct uart_port *msm_get_port_from_line(unsigned int line)
{
@@ -1609,7 +1611,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
if (is_uartdm)
tf = port->membase + UARTDM_TF;
else
- tf = port->membase + UART_TF;
+ tf = port->membase + MSM_UART_TF;
/* Account for newlines that will get a carriage return added */
for (i = 0; i < count; i++)
@@ -1655,7 +1657,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
}
}
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ while (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY))
cpu_relax();
iowrite32_rep(tf, buf, 1);
@@ -1674,10 +1676,10 @@ static void msm_console_write(struct console *co, const char *s,
struct uart_port *port;
struct msm_port *msm_port;
- BUG_ON(co->index < 0 || co->index >= UART_NR);
+ BUG_ON(co->index < 0 || co->index >= MSM_UART_NR);
port = msm_get_port_from_line(co->index);
- msm_port = UART_TO_MSM(port);
+ msm_port = to_msm_port(port);
__msm_console_write(port, s, count, msm_port->is_uartdm);
}
@@ -1690,7 +1692,7 @@ static int msm_console_setup(struct console *co, char *options)
int parity = 'n';
int flow = 'n';
- if (unlikely(co->index >= UART_NR || co->index < 0))
+ if (unlikely(co->index >= MSM_UART_NR || co->index < 0))
return -ENXIO;
port = msm_get_port_from_line(co->index);
@@ -1771,7 +1773,7 @@ static struct uart_driver msm_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "msm_serial",
.dev_name = "ttyMSM",
- .nr = UART_NR,
+ .nr = MSM_UART_NR,
.cons = MSM_CONSOLE,
};
@@ -1801,14 +1803,14 @@ static int msm_serial_probe(struct platform_device *pdev)
if (line < 0)
line = atomic_inc_return(&msm_uart_next_id) - 1;
- if (unlikely(line < 0 || line >= UART_NR))
+ if (unlikely(line < 0 || line >= MSM_UART_NR))
return -ENXIO;
dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line);
port = msm_get_port_from_line(line);
port->dev = &pdev->dev;
- msm_port = UART_TO_MSM(port);
+ msm_port = to_msm_port(port);
id = of_match_device(msm_uartdm_table, &pdev->dev);
if (id)
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 643dfbcc43f9..0ba0f4d9459d 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -481,12 +481,6 @@ static int __init mux_probe(struct parisc_device *dev)
port->line = port_cnt;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MUX_CONSOLE);
- /* The port->timeout needs to match what is present in
- * uart_wait_until_sent in serial_core.c. Otherwise
- * the time spent in msleep_interruptable will be very
- * long, causing the appearance of a console hang.
- */
- port->timeout = HZ / 50;
spin_lock_init(&port->lock);
status = uart_add_one_port(&mux_driver, port);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 93489fe334d0..65eaecd10b7c 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -265,6 +265,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = 0;
+ int ret;
do {
if (status & STAT_RX_RDY(port)) {
@@ -277,6 +278,16 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
port->icount.parity++;
}
+ /*
+ * For UART2, error bits are not cleared on buffer read.
+ * This causes interrupt loop and system hang.
+ */
+ if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
+ ret = readl(port->membase + UART_STAT);
+ ret |= STAT_BRK_ERR;
+ writel(ret, port->membase + UART_STAT);
+ }
+
if (status & STAT_BRK_DET) {
port->icount.brk++;
status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 46f4d4cacb6e..0aa666e247d5 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -1102,8 +1103,6 @@ serial_omap_type(struct uart_port *port)
return up->name;
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
{
unsigned int status, tmout = 10000;
@@ -1118,7 +1117,7 @@ static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
@@ -1186,7 +1185,7 @@ static void omap_serial_early_putc(struct uart_port *port, unsigned char c)
for (;;) {
status = omap_serial_early_in(port, UART_LSR);
- if ((status & BOTH_EMPTY) == BOTH_EMPTY)
+ if (uart_lsr_tx_empty(status))
break;
cpu_relax();
}
@@ -1325,7 +1324,8 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
/* Enable or disable the rs485 support */
static int
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+serial_omap_config_rs485(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int mode;
@@ -1559,6 +1559,13 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
return 0;
}
+static const struct serial_rs485 serial_omap_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int serial_omap_probe(struct platform_device *pdev)
{
struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
@@ -1636,6 +1643,7 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.flags = omap_up_info->flags;
up->port.uartclk = omap_up_info->uartclk;
up->port.rs485_config = serial_omap_config_rs485;
+ up->port.rs485_supported = serial_omap_rs485_supported;
if (!up->port.uartclk) {
up->port.uartclk = DEFAULT_CLK_SPEED;
dev_warn(&pdev->dev,
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 44d20e5a7dd3..888e17e3f25f 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -201,7 +201,7 @@ static void owl_uart_send_chars(struct uart_port *port)
ch = xmit->buf[xmit->tail];
owl_uart_write(port, ch, OWL_UART_TXDAT);
- xmit->tail = (xmit->tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 3b26524d48e3..8a9065e4a903 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -3,6 +3,7 @@
*Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
#include <linux/kernel.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -189,8 +190,6 @@ enum {
#define PCH_UART_HAL_LOOP (PCH_UART_MCR_LOOP)
#define PCH_UART_HAL_AFE (PCH_UART_MCR_AFE)
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
#define DEFAULT_UARTCLK 1843200 /* 1.8432 MHz */
#define CMITC_UARTCLK 192000000 /* 192.0000 MHz */
#define FRI2_64_UARTCLK 64000000 /* 64.0000 MHz */
@@ -1516,7 +1515,7 @@ static void pch_uart_put_poll_char(struct uart_port *port,
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(priv, BOTH_EMPTY);
+ wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -1602,7 +1601,7 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
* Finally, wait for transmitter to become empty
* and restore the IER
*/
- wait_for_xmitr(priv, BOTH_EMPTY);
+ wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
if (port_locked)
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index b399aac530fe..f418f1de66b3 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -503,7 +503,7 @@ static int pic32_uart_startup(struct uart_port *port)
if (!sport->irq_fault_name) {
dev_err(port->dev, "%s: kasprintf err!", __func__);
ret = -ENOMEM;
- goto out_done;
+ goto out_disable_clk;
}
irq_set_status_flags(sport->irq_fault, IRQ_NOAUTOEN);
ret = request_irq(sport->irq_fault, pic32_uart_fault_interrupt,
@@ -579,6 +579,8 @@ out_r:
out_f:
free_irq(sport->irq_fault, port);
kfree(sport->irq_fault_name);
+out_disable_clk:
+ clk_disable_unprepare(sport->clk);
out_done:
return ret;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 3133446e806c..f63257b8e872 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -52,7 +52,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/dbdma.h>
#include <asm/macio.h>
#else
#include <linux/platform_device.h>
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index e80ba8e10407..9309ffd87c8e 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
+#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
@@ -575,8 +576,6 @@ static struct uart_driver serial_pxa_reg;
#ifdef CONFIG_SERIAL_PXA_CONSOLE
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Wait for transmitter & holding register to empty
*/
@@ -594,7 +593,7 @@ static void wait_for_xmitr(struct uart_pxa_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index f8f950641ad9..f4698a064a4d 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
+#define __DISABLE_TRACE_MMIO__
+
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/io.h>
@@ -940,52 +943,63 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
return 0;
}
-static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
- unsigned int sampling_rate, unsigned int *clk_div)
+static unsigned long find_clk_rate_in_tol(struct clk *clk, unsigned int desired_clk,
+ unsigned int *clk_div, unsigned int percent_tol)
{
- unsigned long ser_clk;
- unsigned long desired_clk;
- unsigned long freq, prev;
+ unsigned long freq;
unsigned long div, maxdiv;
- int64_t mult;
-
- desired_clk = baud * sampling_rate;
- if (!desired_clk) {
- pr_err("%s: Invalid frequency\n", __func__);
- return 0;
- }
+ u64 mult;
+ unsigned long offset, abs_tol, achieved;
+ abs_tol = div_u64((u64)desired_clk * percent_tol, 100);
maxdiv = CLK_DIV_MSK >> CLK_DIV_SHFT;
- prev = 0;
-
- for (div = 1; div <= maxdiv; div++) {
- mult = div * desired_clk;
- if (mult > ULONG_MAX)
+ div = 1;
+ while (div <= maxdiv) {
+ mult = (u64)div * desired_clk;
+ if (mult != (unsigned long)mult)
break;
- freq = clk_round_rate(clk, (unsigned long)mult);
- if (!(freq % desired_clk)) {
- ser_clk = freq;
- break;
- }
+ offset = div * abs_tol;
+ freq = clk_round_rate(clk, mult - offset);
- if (!prev)
- ser_clk = freq;
- else if (prev == freq)
+ /* Can only get lower if we're done */
+ if (freq < mult - offset)
break;
- prev = freq;
- }
+ /*
+ * Re-calculate div in case rounding skipped rates but we
+ * ended up at a good one, then check for a match.
+ */
+ div = DIV_ROUND_CLOSEST(freq, desired_clk);
+ achieved = DIV_ROUND_CLOSEST(freq, div);
+ if (achieved <= desired_clk + abs_tol &&
+ achieved >= desired_clk - abs_tol) {
+ *clk_div = div;
+ return freq;
+ }
- if (!ser_clk) {
- pr_err("%s: Can't find matching DFS entry for baud %d\n",
- __func__, baud);
- return ser_clk;
+ div = DIV_ROUND_UP(freq, desired_clk);
}
- *clk_div = ser_clk / desired_clk;
- if (!(*clk_div))
- *clk_div = 1;
+ return 0;
+}
+
+static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
+ unsigned int sampling_rate, unsigned int *clk_div)
+{
+ unsigned long ser_clk;
+ unsigned long desired_clk;
+
+ desired_clk = baud * sampling_rate;
+ if (!desired_clk)
+ return 0;
+
+ /*
+ * try to find a clock rate within 2% tolerance, then within 5%
+ */
+ ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 2);
+ if (!ser_clk)
+ ser_clk = find_clk_rate_in_tol(clk, desired_clk, clk_div, 5);
return ser_clk;
}
@@ -1020,8 +1034,15 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
clk_rate = get_clk_div_rate(port->se.clk, baud,
sampling_rate, &clk_div);
- if (!clk_rate)
+ if (!clk_rate) {
+ dev_err(port->se.dev,
+ "Couldn't find suitable clock rate for %u\n",
+ baud * sampling_rate);
goto out_restart_rx;
+ }
+
+ dev_dbg(port->se.dev, "desired_rate-%u, clk_rate-%lu, clk_div-%u\n",
+ baud * sampling_rate, clk_rate, clk_div);
uport->uartclk = clk_rate;
dev_pm_opp_set_rate(uport->dev, clk_rate);
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index f556b4955f59..feb2054aba37 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -353,7 +353,7 @@ static void rda_uart_send_chars(struct uart_port *port)
ch = xmit->buf[xmit->tail];
rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER);
- xmit->tail = (xmit->tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 1afe47b62ad5..b7a4b47ce74e 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -48,6 +48,12 @@
#define S3C24XX_SERIAL_MAJOR 204
#define S3C24XX_SERIAL_MINOR 64
+#ifdef CONFIG_ARM64
+#define UART_NR 12
+#else
+#define UART_NR CONFIG_SERIAL_SAMSUNG_UARTS
+#endif
+
#define S3C24XX_TX_PIO 1
#define S3C24XX_TX_DMA 2
#define S3C24XX_RX_PIO 1
@@ -87,7 +93,7 @@ struct s3c24xx_uart_info {
struct s3c24xx_serial_drv_data {
const struct s3c24xx_uart_info info;
const struct s3c2410_uartcfg def_cfg;
- const unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
+ const unsigned int fifosize[UART_NR];
};
struct s3c24xx_uart_dma {
@@ -1011,6 +1017,7 @@ static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int umcon = rd_regl(port, S3C2410_UMCON);
+ unsigned int ucon = rd_regl(port, S3C2410_UCON);
if (mctrl & TIOCM_RTS)
umcon |= S3C2410_UMCOM_RTS_LOW;
@@ -1018,6 +1025,13 @@ static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
umcon &= ~S3C2410_UMCOM_RTS_LOW;
wr_regl(port, S3C2410_UMCON, umcon);
+
+ if (mctrl & TIOCM_LOOP)
+ ucon |= S3C2410_UCON_LOOPBACK;
+ else
+ ucon &= ~S3C2410_UCON_LOOPBACK;
+
+ wr_regl(port, S3C2410_UCON, ucon);
}
static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
@@ -1801,67 +1815,27 @@ static const struct uart_ops apple_s5l_serial_ops = {
static struct uart_driver s3c24xx_uart_drv = {
.owner = THIS_MODULE,
.driver_name = "s3c2410_serial",
- .nr = CONFIG_SERIAL_SAMSUNG_UARTS,
+ .nr = UART_NR,
.cons = S3C24XX_SERIAL_CONSOLE,
.dev_name = S3C24XX_SERIAL_NAME,
.major = S3C24XX_SERIAL_MAJOR,
.minor = S3C24XX_SERIAL_MINOR,
};
-#define __PORT_LOCK_UNLOCKED(i) \
- __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[i].port.lock)
-static struct s3c24xx_uart_port
-s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
- [0] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(0),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 0,
- }
- },
- [1] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(1),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 1,
- }
- },
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 2
- [2] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(2),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 2,
- }
- },
-#endif
-#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
- [3] = {
- .port = {
- .lock = __PORT_LOCK_UNLOCKED(3),
- .iotype = UPIO_MEM,
- .uartclk = 0,
- .fifosize = 16,
- .ops = &s3c24xx_serial_ops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 3,
- }
- }
-#endif
-};
-#undef __PORT_LOCK_UNLOCKED
+static struct s3c24xx_uart_port s3c24xx_serial_ports[UART_NR];
+
+static void s3c24xx_serial_init_port_default(int index) {
+ struct uart_port *port = &s3c24xx_serial_ports[index].port;
+
+ spin_lock_init(&port->lock);
+
+ port->iotype = UPIO_MEM;
+ port->uartclk = 0;
+ port->fifosize = 16;
+ port->ops = &s3c24xx_serial_ops;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->line = index;
+}
/* s3c24xx_serial_resetport
*
@@ -2177,6 +2151,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
}
ourport = &s3c24xx_serial_ports[index];
+ s3c24xx_serial_init_port_default(index);
+
ourport->drv_data = s3c24xx_get_driver_data(pdev);
if (!ourport->drv_data) {
dev_err(&pdev->dev, "could not find driver data\n");
@@ -2575,7 +2551,7 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
/* is this a valid port */
- if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS)
+ if (co->index == -1 || co->index >= UART_NR)
co->index = 0;
port = &s3c24xx_serial_ports[co->index].port;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 8472bf70477c..259e08cc347c 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1127,7 +1127,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&port->lock, flags);
}
-static int sc16is7xx_config_rs485(struct uart_port *port,
+static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
@@ -1143,7 +1143,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
return -EINVAL;
}
- port->rs485 = *rs485;
one->config.flags |= SC16IS7XX_RECONF_RS485;
kthread_queue_work(&s->kworker, &one->reg_work);
@@ -1354,6 +1353,12 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
}
#endif
+static const struct serial_rs485 sc16is7xx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1, /* Not supported but keep returning -EINVAL */
+};
+
static int sc16is7xx_probe(struct device *dev,
const struct sc16is7xx_devtype *devtype,
struct regmap *regmap, int irq)
@@ -1456,6 +1461,7 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.uartclk = freq;
s->p[i].port.rs485_config = sc16is7xx_config_rs485;
+ s->p[i].port.rs485_supported = sc16is7xx_rs485_supported;
s->p[i].port.ops = &sc16is7xx_ops;
s->p[i].old_mctrl = 0;
s->p[i].port.line = sc16is7xx_alloc_line();
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d942ab152f5a..ad4f3567ff90 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -441,7 +441,7 @@ static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
if (lsr & UART_LSR_OE) {
- /* Overrrun error */
+ /* Overrun error */
flag = TTY_OVERRUN;
tup->uport.icount.overrun++;
dev_dbg(tup->uport.dev, "Got overrun errors\n");
@@ -1080,7 +1080,7 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
tup->rx_in_progress = 1;
/*
- * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RXS for the receive status interrupts like line errors.
* Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
*
* EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
@@ -1667,6 +1667,7 @@ static int __init tegra_uart_init(void)
node = of_find_matching_node(NULL, tegra_uart_of_match);
if (node)
match = of_match_node(tegra_uart_of_match, node);
+ of_node_put(node);
if (match)
cdata = match->data;
if (cdata)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 3dc926d6c00a..12c87cd201a7 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -97,9 +97,16 @@ static inline struct uart_port *uart_port_check(struct uart_state *state)
return state->uart_port;
}
-/*
- * This routine is used by the interrupt handler to schedule processing in
- * the software interrupt portion of the driver.
+/**
+ * uart_write_wakeup - schedule write processing
+ * @port: port to be processed
+ *
+ * This routine is used by the interrupt handler to schedule processing in the
+ * software interrupt portion of the driver. A driver is expected to call this
+ * function when the number of characters in the transmit buffer have dropped
+ * below a threshold.
+ *
+ * Locking: @port->lock should be held
*/
void uart_write_wakeup(struct uart_port *port)
{
@@ -327,13 +334,16 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
}
/**
- * uart_update_timeout - update per-port FIFO timeout.
- * @port: uart_port structure describing the port
- * @cflag: termios cflag value
- * @baud: speed of the port
+ * uart_update_timeout - update per-port frame timing information
+ * @port: uart_port structure describing the port
+ * @cflag: termios cflag value
+ * @baud: speed of the port
*
- * Set the port FIFO timeout value. The @cflag value should
- * reflect the actual hardware settings.
+ * Set the @port frame timing information from which the FIFO timeout value is
+ * derived. The @cflag value should reflect the actual hardware settings as
+ * number of bits, parity, stop bits and baud rate is taken into account here.
+ *
+ * Locking: caller is expected to take @port->lock
*/
void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
@@ -343,35 +353,30 @@ uart_update_timeout(struct uart_port *port, unsigned int cflag,
u64 frame_time;
frame_time = (u64)size * NSEC_PER_SEC;
- size *= port->fifosize;
-
- /*
- * Figure the timeout to send the above number of bits.
- * Add .02 seconds of slop
- */
- port->timeout = (HZ * size) / baud + HZ/50;
port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud);
}
EXPORT_SYMBOL(uart_update_timeout);
/**
- * uart_get_baud_rate - return baud rate for a particular port
- * @port: uart_port structure describing the port in question.
- * @termios: desired termios settings.
- * @old: old termios (or NULL)
- * @min: minimum acceptable baud rate
- * @max: maximum acceptable baud rate
+ * uart_get_baud_rate - return baud rate for a particular port
+ * @port: uart_port structure describing the port in question.
+ * @termios: desired termios settings
+ * @old: old termios (or %NULL)
+ * @min: minimum acceptable baud rate
+ * @max: maximum acceptable baud rate
+ *
+ * Decode the termios structure into a numeric baud rate, taking account of the
+ * magic 38400 baud rate (with spd_* flags), and mapping the %B0 rate to 9600
+ * baud.
*
- * Decode the termios structure into a numeric baud rate,
- * taking account of the magic 38400 baud rate (with spd_*
- * flags), and mapping the %B0 rate to 9600 baud.
+ * If the new baud rate is invalid, try the @old termios setting. If it's still
+ * invalid, we try 9600 baud.
*
- * If the new baud rate is invalid, try the old termios setting.
- * If it's still invalid, we try 9600 baud.
+ * The @termios structure is updated to reflect the baud rate we're actually
+ * going to be using. Don't do this for the case where B0 is requested ("hang
+ * up").
*
- * Update the @termios structure to reflect the baud rate
- * we're actually going to be using. Don't do this for the case
- * where B0 is requested ("hang up").
+ * Locking: caller dependent
*/
unsigned int
uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
@@ -456,11 +461,17 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
EXPORT_SYMBOL(uart_get_baud_rate);
/**
- * uart_get_divisor - return uart clock divisor
- * @port: uart_port structure describing the port.
- * @baud: desired baud rate
+ * uart_get_divisor - return uart clock divisor
+ * @port: uart_port structure describing the port
+ * @baud: desired baud rate
+ *
+ * Calculate the divisor (baud_base / baud) for the specified @baud,
+ * appropriately rounded.
*
- * Calculate the uart clock divisor for the port.
+ * If 38400 baud and custom divisor is selected, return the custom divisor
+ * instead.
+ *
+ * Locking: caller dependent
*/
unsigned int
uart_get_divisor(struct uart_port *port, unsigned int baud)
@@ -1023,10 +1034,10 @@ static int uart_set_info_user(struct tty_struct *tty, struct serial_struct *ss)
}
/**
- * uart_get_lsr_info - get line status register info
- * @tty: tty associated with the UART
- * @state: UART being queried
- * @value: returned modem value
+ * uart_get_lsr_info - get line status register info
+ * @tty: tty associated with the UART
+ * @state: UART being queried
+ * @value: returned modem value
*/
static int uart_get_lsr_info(struct tty_struct *tty,
struct uart_state *state, unsigned int __user *value)
@@ -1276,6 +1287,126 @@ static int uart_get_icount(struct tty_struct *tty,
return 0;
}
+#define SER_RS485_LEGACY_FLAGS (SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | \
+ SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX | \
+ SER_RS485_TERMINATE_BUS)
+
+static int uart_check_rs485_flags(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ u32 flags = rs485->flags;
+
+ /* Don't return -EINVAL for unsupported legacy flags */
+ flags &= ~SER_RS485_LEGACY_FLAGS;
+
+ /*
+ * For any bit outside of the legacy ones that is not supported by
+ * the driver, return -EINVAL.
+ */
+ if (flags & ~port->rs485_supported.flags)
+ return -EINVAL;
+
+ /* Asking for address w/o addressing mode? */
+ if (!(rs485->flags & SER_RS485_ADDRB) &&
+ (rs485->flags & (SER_RS485_ADDR_RECV|SER_RS485_ADDR_DEST)))
+ return -EINVAL;
+
+ /* Address given but not enabled? */
+ if (!(rs485->flags & SER_RS485_ADDR_RECV) && rs485->addr_recv)
+ return -EINVAL;
+ if (!(rs485->flags & SER_RS485_ADDR_DEST) && rs485->addr_dest)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void uart_sanitize_serial_rs485_delays(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ if (!port->rs485_supported.delay_rts_before_send) {
+ if (rs485->delay_rts_before_send) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending not supported\n",
+ port->name, port->line);
+ }
+ rs485->delay_rts_before_send = 0;
+ } else if (rs485->delay_rts_before_send > RS485_MAX_RTS_DELAY) {
+ rs485->delay_rts_before_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay before sending clamped to %u ms\n",
+ port->name, port->line, rs485->delay_rts_before_send);
+ }
+
+ if (!port->rs485_supported.delay_rts_after_send) {
+ if (rs485->delay_rts_after_send) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending not supported\n",
+ port->name, port->line);
+ }
+ rs485->delay_rts_after_send = 0;
+ } else if (rs485->delay_rts_after_send > RS485_MAX_RTS_DELAY) {
+ rs485->delay_rts_after_send = RS485_MAX_RTS_DELAY;
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): RTS delay after sending clamped to %u ms\n",
+ port->name, port->line, rs485->delay_rts_after_send);
+ }
+}
+
+static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+{
+ u32 supported_flags = port->rs485_supported.flags;
+
+ if (!(rs485->flags & SER_RS485_ENABLED)) {
+ memset(rs485, 0, sizeof(*rs485));
+ return;
+ }
+
+ /* Pick sane settings if the user hasn't */
+ if ((supported_flags & (SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND)) &&
+ !(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
+ dev_warn_ratelimited(port->dev,
+ "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+ port->name, port->line);
+ rs485->flags |= SER_RS485_RTS_ON_SEND;
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+ supported_flags |= SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND;
+ }
+
+ rs485->flags &= supported_flags;
+
+ uart_sanitize_serial_rs485_delays(port, rs485);
+
+ /* Return clean padding area to userspace */
+ memset(rs485->padding0, 0, sizeof(rs485->padding0));
+ memset(rs485->padding1, 0, sizeof(rs485->padding1));
+}
+
+static void uart_set_rs485_termination(struct uart_port *port,
+ const struct serial_rs485 *rs485)
+{
+ if (!(rs485->flags & SER_RS485_ENABLED))
+ return;
+
+ gpiod_set_value_cansleep(port->rs485_term_gpio,
+ !!(rs485->flags & SER_RS485_TERMINATE_BUS));
+}
+
+int uart_rs485_config(struct uart_port *port)
+{
+ struct serial_rs485 *rs485 = &port->rs485;
+ int ret;
+
+ uart_sanitize_serial_rs485(port, rs485);
+ uart_set_rs485_termination(port, rs485);
+
+ ret = port->rs485_config(port, NULL, rs485);
+ if (ret)
+ memset(rs485, 0, sizeof(*rs485));
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(uart_rs485_config);
+
static int uart_get_rs485_config(struct uart_port *port,
struct serial_rs485 __user *rs485)
{
@@ -1292,7 +1423,7 @@ static int uart_get_rs485_config(struct uart_port *port,
return 0;
}
-static int uart_set_rs485_config(struct uart_port *port,
+static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
struct serial_rs485 __user *rs485_user)
{
struct serial_rs485 rs485;
@@ -1305,34 +1436,14 @@ static int uart_set_rs485_config(struct uart_port *port,
if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
return -EFAULT;
- /* pick sane settings if the user hasn't */
- if (!(rs485.flags & SER_RS485_RTS_ON_SEND) ==
- !(rs485.flags & SER_RS485_RTS_AFTER_SEND)) {
- dev_warn_ratelimited(port->dev,
- "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
- port->name, port->line);
- rs485.flags |= SER_RS485_RTS_ON_SEND;
- rs485.flags &= ~SER_RS485_RTS_AFTER_SEND;
- }
-
- if (rs485.delay_rts_before_send > RS485_MAX_RTS_DELAY) {
- rs485.delay_rts_before_send = RS485_MAX_RTS_DELAY;
- dev_warn_ratelimited(port->dev,
- "%s (%d): RTS delay before sending clamped to %u ms\n",
- port->name, port->line, rs485.delay_rts_before_send);
- }
-
- if (rs485.delay_rts_after_send > RS485_MAX_RTS_DELAY) {
- rs485.delay_rts_after_send = RS485_MAX_RTS_DELAY;
- dev_warn_ratelimited(port->dev,
- "%s (%d): RTS delay after sending clamped to %u ms\n",
- port->name, port->line, rs485.delay_rts_after_send);
- }
- /* Return clean padding area to userspace */
- memset(rs485.padding, 0, sizeof(rs485.padding));
+ ret = uart_check_rs485_flags(port, &rs485);
+ if (ret)
+ return ret;
+ uart_sanitize_serial_rs485(port, &rs485);
+ uart_set_rs485_termination(port, &rs485);
spin_lock_irqsave(&port->lock, flags);
- ret = port->rs485_config(port, &rs485);
+ ret = port->rs485_config(port, &tty->termios, &rs485);
if (!ret)
port->rs485 = rs485;
spin_unlock_irqrestore(&port->lock, flags);
@@ -1441,6 +1552,10 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
if (ret != -ENOIOCTLCMD)
goto out;
+ /* rs485_config requires more locking than others */
+ if (cmd == TIOCGRS485)
+ down_write(&tty->termios_rwsem);
+
mutex_lock(&port->mutex);
uport = uart_port_check(state);
@@ -1464,7 +1579,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
break;
case TIOCSRS485:
- ret = uart_set_rs485_config(uport, uarg);
+ ret = uart_set_rs485_config(tty, uport, uarg);
break;
case TIOCSISO7816:
@@ -1481,6 +1596,8 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
}
out_up:
mutex_unlock(&port->mutex);
+ if (cmd == TIOCGRS485)
+ up_write(&tty->termios_rwsem);
out:
return ret;
}
@@ -1628,7 +1745,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
- unsigned long char_time, expire;
+ unsigned long char_time, expire, fifo_timeout;
port = uart_port_ref(state);
if (!port)
@@ -1658,12 +1775,13 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
- * takes longer than port->timeout, this is probably due to a
+ * takes longer than FIFO timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
- * 2*port->timeout.
+ * 2 * FIFO timeout.
*/
- if (timeout == 0 || timeout > 2 * port->timeout)
- timeout = 2 * port->timeout;
+ fifo_timeout = uart_fifo_timeout(port);
+ if (timeout == 0 || timeout > 2 * fifo_timeout)
+ timeout = 2 * fifo_timeout;
}
expire = jiffies + timeout;
@@ -1949,11 +2067,11 @@ static void uart_port_spin_lock_init(struct uart_port *port)
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/**
- * uart_console_write - write a console message to a serial port
- * @port: the port to write the message
- * @s: array of characters
- * @count: number of characters in string to write
- * @putchar: function to write character to port
+ * uart_console_write - write a console message to a serial port
+ * @port: the port to write the message
+ * @s: array of characters
+ * @count: number of characters in string to write
+ * @putchar: function to write character to port
*/
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
@@ -1969,10 +2087,15 @@ void uart_console_write(struct uart_port *port, const char *s,
}
EXPORT_SYMBOL_GPL(uart_console_write);
-/*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
+/**
+ * uart_get_console - get uart port for console
+ * @ports: ports to search in
+ * @nr: number of @ports
+ * @co: console to search for
+ * Returns: uart_port for the console @co
+ *
+ * Check whether an invalid uart number has been specified (as @co->index), and
+ * if so, search for the first available port that does have console support.
*/
struct uart_port * __init
uart_get_console(struct uart_port *ports, int nr, struct console *co)
@@ -1992,24 +2115,23 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
}
/**
- * uart_parse_earlycon - Parse earlycon options
- * @p: ptr to 2nd field (ie., just beyond '<name>,')
- * @iotype: ptr for decoded iotype (out)
- * @addr: ptr for decoded mapbase/iobase (out)
- * @options: ptr for <options> field; NULL if not present (out)
- *
- * Decodes earlycon kernel command line parameters of the form
- * earlycon=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
- * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
+ * uart_parse_earlycon - Parse earlycon options
+ * @p: ptr to 2nd field (ie., just beyond '<name>,')
+ * @iotype: ptr for decoded iotype (out)
+ * @addr: ptr for decoded mapbase/iobase (out)
+ * @options: ptr for <options> field; %NULL if not present (out)
*
- * The optional form
+ * Decodes earlycon kernel command line parameters of the form:
+ * * earlycon=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
+ * * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
*
- * earlycon=<name>,0x<addr>,<options>
- * console=<name>,0x<addr>,<options>
+ * The optional form:
+ * * earlycon=<name>,0x<addr>,<options>
+ * * console=<name>,0x<addr>,<options>
*
- * is also accepted; the returned @iotype will be UPIO_MEM.
+ * is also accepted; the returned @iotype will be %UPIO_MEM.
*
- * Returns 0 on success or -EINVAL on failure
+ * Returns: 0 on success or -%EINVAL on failure
*/
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options)
@@ -2054,16 +2176,16 @@ int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
EXPORT_SYMBOL_GPL(uart_parse_earlycon);
/**
- * uart_parse_options - Parse serial port baud/parity/bits/flow control.
- * @options: pointer to option string
- * @baud: pointer to an 'int' variable for the baud rate.
- * @parity: pointer to an 'int' variable for the parity.
- * @bits: pointer to an 'int' variable for the number of data bits.
- * @flow: pointer to an 'int' variable for the flow control character.
+ * uart_parse_options - Parse serial port baud/parity/bits/flow control.
+ * @options: pointer to option string
+ * @baud: pointer to an 'int' variable for the baud rate.
+ * @parity: pointer to an 'int' variable for the parity.
+ * @bits: pointer to an 'int' variable for the number of data bits.
+ * @flow: pointer to an 'int' variable for the flow control character.
*
- * uart_parse_options decodes a string containing the serial console
- * options. The format of the string is <baud><parity><bits><flow>,
- * eg: 115200n8r
+ * uart_parse_options() decodes a string containing the serial console
+ * options. The format of the string is <baud><parity><bits><flow>,
+ * eg: 115200n8r
*/
void
uart_parse_options(const char *options, int *baud, int *parity,
@@ -2084,13 +2206,13 @@ uart_parse_options(const char *options, int *baud, int *parity,
EXPORT_SYMBOL_GPL(uart_parse_options);
/**
- * uart_set_options - setup the serial console parameters
- * @port: pointer to the serial ports uart_port structure
- * @co: console pointer
- * @baud: baud rate
- * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
- * @bits: number of data bits
- * @flow: flow control character - 'r' (rts)
+ * uart_set_options - setup the serial console parameters
+ * @port: pointer to the serial ports uart_port structure
+ * @co: console pointer
+ * @baud: baud rate
+ * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
+ * @bits: number of data bits
+ * @flow: flow control character - 'r' (rts)
*/
int
uart_set_options(struct uart_port *port, struct console *co,
@@ -2577,17 +2699,19 @@ static const struct tty_port_operations uart_port_ops = {
};
/**
- * uart_register_driver - register a driver with the uart core layer
- * @drv: low level driver structure
+ * uart_register_driver - register a driver with the uart core layer
+ * @drv: low level driver structure
+ *
+ * Register a uart driver with the core driver. We in turn register with the
+ * tty layer, and initialise the core driver per-port state.
*
- * Register a uart driver with the core driver. We in turn register
- * with the tty layer, and initialise the core driver per-port state.
+ * We have a proc file in /proc/tty/driver which is named after the normal
+ * driver.
*
- * We have a proc file in /proc/tty/driver which is named after the
- * normal driver.
+ * @drv->port should be %NULL, and the per-port structures should be registered
+ * using uart_add_one_port() after this call has succeeded.
*
- * drv->port should be NULL, and the per-port structures should be
- * registered using uart_add_one_port after this call has succeeded.
+ * Locking: none, Interrupts: enabled
*/
int uart_register_driver(struct uart_driver *drv)
{
@@ -2651,13 +2775,14 @@ out:
EXPORT_SYMBOL(uart_register_driver);
/**
- * uart_unregister_driver - remove a driver from the uart core layer
- * @drv: low level driver structure
+ * uart_unregister_driver - remove a driver from the uart core layer
+ * @drv: low level driver structure
*
- * Remove all references to a driver from the core driver. The low
- * level driver must have removed all its ports via the
- * uart_remove_one_port() if it registered them with uart_add_one_port().
- * (ie, drv->port == NULL)
+ * Remove all references to a driver from the core driver. The low level
+ * driver must have removed all its ports via the uart_remove_one_port() if it
+ * registered them with uart_add_one_port(). (I.e. @drv->port is %NULL.)
+ *
+ * Locking: none, Interrupts: enabled
*/
void uart_unregister_driver(struct uart_driver *drv)
{
@@ -2906,16 +3031,15 @@ static const struct attribute_group tty_dev_attr_group = {
};
/**
- * uart_add_one_port - attach a driver-defined port structure
- * @drv: pointer to the uart low level driver structure for this port
- * @uport: uart port structure to use for this port.
+ * uart_add_one_port - attach a driver-defined port structure
+ * @drv: pointer to the uart low level driver structure for this port
+ * @uport: uart port structure to use for this port.
*
- * Context: task context, might sleep
+ * Context: task context, might sleep
*
- * This allows the driver to register its own uart_port structure
- * with the core driver. The main purpose is to allow the low
- * level uart drivers to expand uart_port, rather than having yet
- * more levels of structures.
+ * This allows the driver @drv to register its own uart_port structure with the
+ * core driver. The main purpose is to allow the low level uart drivers to
+ * expand uart_port, rather than having yet more levels of structures.
*/
int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
{
@@ -3010,15 +3134,14 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
EXPORT_SYMBOL(uart_add_one_port);
/**
- * uart_remove_one_port - detach a driver defined port structure
- * @drv: pointer to the uart low level driver structure for this port
- * @uport: uart port structure for this port
+ * uart_remove_one_port - detach a driver defined port structure
+ * @drv: pointer to the uart low level driver structure for this port
+ * @uport: uart port structure for this port
*
- * Context: task context, might sleep
+ * Context: task context, might sleep
*
- * This unhooks (and hangs up) the specified port structure from the
- * core driver. No further calls will be made to the low-level code
- * for this port.
+ * This unhooks (and hangs up) the specified port structure from the core
+ * driver. No further calls will be made to the low-level code for this port.
*/
int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
@@ -3090,8 +3213,13 @@ out:
}
EXPORT_SYMBOL(uart_remove_one_port);
-/*
- * Are the two ports equivalent?
+/**
+ * uart_match_port - are the two ports equivalent?
+ * @port1: first port
+ * @port2: second port
+ *
+ * This utility function can be used to determine whether two uart_port
+ * structures describe the same port.
*/
bool uart_match_port(const struct uart_port *port1,
const struct uart_port *port2)
@@ -3119,11 +3247,11 @@ bool uart_match_port(const struct uart_port *port1,
EXPORT_SYMBOL(uart_match_port);
/**
- * uart_handle_dcd_change - handle a change of carrier detect state
- * @uport: uart_port structure for the open port
- * @status: new carrier detect status, nonzero if active
+ * uart_handle_dcd_change - handle a change of carrier detect state
+ * @uport: uart_port structure for the open port
+ * @status: new carrier detect status, nonzero if active
*
- * Caller must hold uport->lock
+ * Caller must hold uport->lock.
*/
void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
{
@@ -3154,11 +3282,11 @@ void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
/**
- * uart_handle_cts_change - handle a change of clear-to-send state
- * @uport: uart_port structure for the open port
- * @status: new clear to send status, nonzero if active
+ * uart_handle_cts_change - handle a change of clear-to-send state
+ * @uport: uart_port structure for the open port
+ * @status: new clear to send status, nonzero if active
*
- * Caller must hold uport->lock
+ * Caller must hold uport->lock.
*/
void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
{
@@ -3229,15 +3357,15 @@ static void uart_sysrq_on(struct work_struct *w)
static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on);
/**
- * uart_try_toggle_sysrq - Enables SysRq from serial line
- * @port: uart_port structure where char(s) after BREAK met
- * @ch: new character in the sequence after received BREAK
+ * uart_try_toggle_sysrq - Enables SysRq from serial line
+ * @port: uart_port structure where char(s) after BREAK met
+ * @ch: new character in the sequence after received BREAK
*
- * Enables magic SysRq when the required sequence is met on port
- * (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE).
+ * Enables magic SysRq when the required sequence is met on port
+ * (see CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE).
*
- * Returns false if @ch is out of enabling sequence and should be
- * handled some other way, true if @ch was consumed.
+ * Returns: %false if @ch is out of enabling sequence and should be
+ * handled some other way, %true if @ch was consumed.
*/
bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
{
@@ -3289,6 +3417,8 @@ int uart_get_rs485_mode(struct uart_port *port)
rs485conf->delay_rts_after_send = 0;
}
+ uart_sanitize_serial_rs485_delays(port, rs485conf);
+
/*
* Clear full-duplex and enabled flags, set RTS polarity to active high
* to get to a defined state with the following properties:
@@ -3321,10 +3451,20 @@ int uart_get_rs485_mode(struct uart_port *port)
port->rs485_term_gpio = NULL;
return dev_err_probe(dev, ret, "Cannot get rs485-term-gpios\n");
}
+ if (port->rs485_term_gpio)
+ port->rs485_supported.flags |= SER_RS485_TERMINATE_BUS;
return 0;
}
EXPORT_SYMBOL_GPL(uart_get_rs485_mode);
+/* Compile-time assertions for serial_rs485 layout */
+static_assert(offsetof(struct serial_rs485, padding) ==
+ (offsetof(struct serial_rs485, delay_rts_after_send) + sizeof(__u32)));
+static_assert(offsetof(struct serial_rs485, padding1) ==
+ offsetof(struct serial_rs485, padding[1]));
+static_assert((offsetof(struct serial_rs485, padding[4]) + sizeof(__u32)) ==
+ sizeof(struct serial_rs485));
+
MODULE_DESCRIPTION("Serial driver core");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 1663b3afc3a0..7d5aaa8d422b 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -42,6 +42,13 @@ static bool mctrl_gpio_flags_is_dir_out(unsigned int idx)
return mctrl_gpios_desc[idx].flags & GPIOD_FLAGS_BIT_DIR_OUT;
}
+/**
+ * mctrl_gpio_set - set gpios according to mctrl state
+ * @gpios: gpios to set
+ * @mctrl: state to set
+ *
+ * Set the gpios according to the mctrl state.
+ */
void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
{
enum mctrl_gpio_idx i;
@@ -63,6 +70,12 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_set);
+/**
+ * mctrl_gpio_to_gpiod - obtain gpio_desc of modem line index
+ * @gpios: gpios to look into
+ * @gidx: index of the modem line
+ * Returns: the gpio_desc structure associated to the modem line index
+ */
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
@@ -73,6 +86,14 @@ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
+/**
+ * mctrl_gpio_get - update mctrl with the gpios values.
+ * @gpios: gpios to get the info from
+ * @mctrl: mctrl to set
+ * Returns: modified mctrl (the same value as in @mctrl)
+ *
+ * Update mctrl with the gpios values.
+ */
unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
{
enum mctrl_gpio_idx i;
@@ -189,6 +210,17 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
return IRQ_HANDLED;
}
+/**
+ * mctrl_gpio_init - initialize uart gpios
+ * @port: port to initialize gpios for
+ * @idx: index of the gpio in the @port's device
+ *
+ * This will get the {cts,rts,...}-gpios from device tree if they are present
+ * and request them, set direction etc, and return an allocated structure.
+ * `devm_*` functions are used, so there's no need to call mctrl_gpio_free().
+ * As this sets up the irq handling, make sure to not handle changes to the
+ * gpio input lines in your driver, too.
+ */
struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
{
struct mctrl_gpios *gpios;
@@ -235,6 +267,14 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_init);
+/**
+ * mctrl_gpio_free - explicitly free uart gpios
+ * @dev: uart port's device
+ * @gpios: gpios structure to be freed
+ *
+ * This will free the requested gpios in mctrl_gpio_init(). As `devm_*`
+ * functions are used, there's generally no need to call this function.
+ */
void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
@@ -253,6 +293,10 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_free);
+/**
+ * mctrl_gpio_enable_ms - enable irqs and handling of changes to the ms lines
+ * @gpios: gpios to enable
+ */
void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
@@ -278,6 +322,10 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms);
+/**
+ * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines
+ * @gpios: gpios to disable
+ */
void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index c0869b080cc3..5c3a07546a58 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -4,16 +4,6 @@
* Copyright (C) 2018 Paul Walmsley <paul@pwsan.com>
* Copyright (C) 2018-2019 SiFive
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Based partially on:
* - drivers/tty/serial/pxa.c
* - drivers/tty/serial/amba-pl011.c
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 1b0da603ab54..cce42f4c9bc2 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -17,7 +17,6 @@
#include <linux/tty_flip.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
-#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 0973b03eeeaa..2c85dbf165c4 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -35,6 +35,75 @@
#include "serial_mctrl_gpio.h"
#include "stm32-usart.h"
+
+/* Register offsets */
+static struct stm32_usart_info stm32f4_info = {
+ .ofs = {
+ .isr = 0x00,
+ .rdr = 0x04,
+ .tdr = 0x04,
+ .brr = 0x08,
+ .cr1 = 0x0c,
+ .cr2 = 0x10,
+ .cr3 = 0x14,
+ .gtpr = 0x18,
+ .rtor = UNDEF_REG,
+ .rqr = UNDEF_REG,
+ .icr = UNDEF_REG,
+ },
+ .cfg = {
+ .uart_enable_bit = 13,
+ .has_7bits_data = false,
+ .fifosize = 1,
+ }
+};
+
+static struct stm32_usart_info stm32f7_info = {
+ .ofs = {
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ },
+ .cfg = {
+ .uart_enable_bit = 0,
+ .has_7bits_data = true,
+ .has_swap = true,
+ .fifosize = 1,
+ }
+};
+
+static struct stm32_usart_info stm32h7_info = {
+ .ofs = {
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ },
+ .cfg = {
+ .uart_enable_bit = 0,
+ .has_7bits_data = true,
+ .has_swap = true,
+ .has_wakeup = true,
+ .has_fifo = true,
+ .fifosize = 16,
+ }
+};
+
static void stm32_usart_stop_tx(struct uart_port *port);
static void stm32_usart_transmit_chars(struct uart_port *port);
static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
@@ -99,7 +168,7 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
*cr1 |= rs485_deat_dedt;
}
-static int stm32_usart_config_rs485(struct uart_port *port,
+static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -1377,6 +1446,13 @@ static void stm32_usart_deinit_port(struct stm32_port *stm32port)
clk_disable_unprepare(stm32port->clk);
}
+static const struct serial_rs485 stm32_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
static int stm32_usart_init_port(struct stm32_port *stm32port,
struct platform_device *pdev)
{
@@ -1396,6 +1472,7 @@ static int stm32_usart_init_port(struct stm32_port *stm32port,
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
port->irq = irq;
port->rs485_config = stm32_usart_config_rs485;
+ port->rs485_supported = stm32_rs485_supported;
ret = stm32_usart_init_rs485(port, pdev);
if (ret)
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index ee69c203b926..0ec41a732c88 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -38,74 +38,6 @@ struct stm32_usart_info {
#define UNDEF_REG 0xff
-/* Register offsets */
-struct stm32_usart_info stm32f4_info = {
- .ofs = {
- .isr = 0x00,
- .rdr = 0x04,
- .tdr = 0x04,
- .brr = 0x08,
- .cr1 = 0x0c,
- .cr2 = 0x10,
- .cr3 = 0x14,
- .gtpr = 0x18,
- .rtor = UNDEF_REG,
- .rqr = UNDEF_REG,
- .icr = UNDEF_REG,
- },
- .cfg = {
- .uart_enable_bit = 13,
- .has_7bits_data = false,
- .fifosize = 1,
- }
-};
-
-struct stm32_usart_info stm32f7_info = {
- .ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
- },
- .cfg = {
- .uart_enable_bit = 0,
- .has_7bits_data = true,
- .has_swap = true,
- .fifosize = 1,
- }
-};
-
-struct stm32_usart_info stm32h7_info = {
- .ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
- },
- .cfg = {
- .uart_enable_bit = 0,
- .has_7bits_data = true,
- .has_swap = true,
- .has_wakeup = true,
- .has_fifo = true,
- .fifosize = 16,
- }
-};
-
/* USART_SR (F4) / USART_ISR (F7) */
#define USART_SR_PE BIT(0)
#define USART_SR_FE BIT(1)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index fff50b5b82eb..84d545e5a8c7 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1249,8 +1249,6 @@ static int sunsu_kbd_ms_init(struct uart_sunsu_port *up)
#ifdef CONFIG_SERIAL_SUNSU_CONSOLE
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Wait for transmitter & holding register to empty
*/
@@ -1268,7 +1266,7 @@ static void wait_for_xmitr(struct uart_sunsu_port *up)
if (--tmout == 0)
break;
udelay(1);
- } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+ } while (!uart_lsr_tx_empty(status));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 6000853973c1..3cc9ef08455c 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1137,6 +1137,8 @@ static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l)
/* No compatible property, so try the name. */
soc_string = np->name;
+ of_node_put(np);
+
/* Extract the SOC number from the "PowerPC," string */
if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc)
return 0;
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
deleted file mode 100644
index e0bf003ca3a1..000000000000
--- a/drivers/tty/serial/vr41xx_siu.c
+++ /dev/null
@@ -1,934 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for NEC VR4100 series Serial Interface Unit.
- *
- * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
- *
- * Based on drivers/serial/8250.c, by Russell King.
- */
-
-#include <linux/console.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/serial_reg.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-
-#include <linux/io.h>
-#include <asm/vr41xx/siu.h>
-#include <asm/vr41xx/vr41xx.h>
-
-#define SIU_BAUD_BASE 1152000
-#define SIU_MAJOR 204
-#define SIU_MINOR_BASE 82
-
-#define RX_MAX_COUNT 256
-#define TX_MAX_COUNT 15
-
-#define SIUIRSEL 0x08
- #define TMICMODE 0x20
- #define TMICTX 0x10
- #define IRMSEL 0x0c
- #define IRMSEL_HP 0x08
- #define IRMSEL_TEMIC 0x04
- #define IRMSEL_SHARP 0x00
- #define IRUSESEL 0x02
- #define SIRSEL 0x01
-
-static struct uart_port siu_uart_ports[SIU_PORTS_MAX] = {
- [0 ... SIU_PORTS_MAX-1] = {
- .lock = __SPIN_LOCK_UNLOCKED(siu_uart_ports->lock),
- .irq = 0,
- },
-};
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
-static uint8_t lsr_break_flag[SIU_PORTS_MAX];
-#endif
-
-#define siu_read(port, offset) readb((port)->membase + (offset))
-#define siu_write(port, offset, value) writeb((value), (port)->membase + (offset))
-
-void vr41xx_select_siu_interface(siu_interface_t interface)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- if (interface == SIU_INTERFACE_IRDA)
- irsel |= SIRSEL;
- else
- irsel &= ~SIRSEL;
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_select_siu_interface);
-
-void vr41xx_use_irda(irda_use_t use)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- if (use == FIR_USE_IRDA)
- irsel |= IRUSESEL;
- else
- irsel &= ~IRUSESEL;
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_use_irda);
-
-void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed)
-{
- struct uart_port *port;
- unsigned long flags;
- uint8_t irsel;
-
- port = &siu_uart_ports[0];
-
- spin_lock_irqsave(&port->lock, flags);
-
- irsel = siu_read(port, SIUIRSEL);
- irsel &= ~(IRMSEL | TMICTX | TMICMODE);
- switch (module) {
- case SHARP_IRDA:
- irsel |= IRMSEL_SHARP;
- break;
- case TEMIC_IRDA:
- irsel |= IRMSEL_TEMIC | TMICMODE;
- if (speed == IRDA_TX_4MBPS)
- irsel |= TMICTX;
- break;
- case HP_IRDA:
- irsel |= IRMSEL_HP;
- break;
- default:
- break;
- }
- siu_write(port, SIUIRSEL, irsel);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-EXPORT_SYMBOL_GPL(vr41xx_select_irda_module);
-
-static inline void siu_clear_fifo(struct uart_port *port)
-{
- siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO);
- siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT);
- siu_write(port, UART_FCR, 0);
-}
-
-static inline unsigned long siu_port_size(struct uart_port *port)
-{
- switch (port->type) {
- case PORT_VR41XX_SIU:
- return 11UL;
- case PORT_VR41XX_DSIU:
- return 8UL;
- }
-
- return 0;
-}
-
-static inline unsigned int siu_check_type(struct uart_port *port)
-{
- if (port->line == 0)
- return PORT_VR41XX_SIU;
- if (port->line == 1 && port->irq)
- return PORT_VR41XX_DSIU;
-
- return PORT_UNKNOWN;
-}
-
-static inline const char *siu_type_name(struct uart_port *port)
-{
- switch (port->type) {
- case PORT_VR41XX_SIU:
- return "SIU";
- case PORT_VR41XX_DSIU:
- return "DSIU";
- }
-
- return NULL;
-}
-
-static unsigned int siu_tx_empty(struct uart_port *port)
-{
- uint8_t lsr;
-
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_TEMT)
- return TIOCSER_TEMT;
-
- return 0;
-}
-
-static void siu_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- uint8_t mcr = 0;
-
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
- siu_write(port, UART_MCR, mcr);
-}
-
-static unsigned int siu_get_mctrl(struct uart_port *port)
-{
- uint8_t msr;
- unsigned int mctrl = 0;
-
- msr = siu_read(port, UART_MSR);
- if (msr & UART_MSR_DCD)
- mctrl |= TIOCM_CAR;
- if (msr & UART_MSR_RI)
- mctrl |= TIOCM_RNG;
- if (msr & UART_MSR_DSR)
- mctrl |= TIOCM_DSR;
- if (msr & UART_MSR_CTS)
- mctrl |= TIOCM_CTS;
-
- return mctrl;
-}
-
-static void siu_stop_tx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_THRI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_start_tx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier |= UART_IER_THRI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_stop_rx(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_RLSI;
- siu_write(port, UART_IER, ier);
-
- port->read_status_mask &= ~UART_LSR_DR;
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_enable_ms(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t ier;
-
- spin_lock_irqsave(&port->lock, flags);
-
- ier = siu_read(port, UART_IER);
- ier |= UART_IER_MSI;
- siu_write(port, UART_IER, ier);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_break_ctl(struct uart_port *port, int ctl)
-{
- unsigned long flags;
- uint8_t lcr;
-
- spin_lock_irqsave(&port->lock, flags);
-
- lcr = siu_read(port, UART_LCR);
- if (ctl == -1)
- lcr |= UART_LCR_SBC;
- else
- lcr &= ~UART_LCR_SBC;
- siu_write(port, UART_LCR, lcr);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static inline void receive_chars(struct uart_port *port, uint8_t *status)
-{
- uint8_t lsr, ch;
- char flag;
- int max_count = RX_MAX_COUNT;
-
- lsr = *status;
-
- do {
- ch = siu_read(port, UART_RX);
- port->icount.rx++;
- flag = TTY_NORMAL;
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
- lsr |= lsr_break_flag[port->line];
- lsr_break_flag[port->line] = 0;
-#endif
- if (unlikely(lsr & (UART_LSR_BI | UART_LSR_FE |
- UART_LSR_PE | UART_LSR_OE))) {
- if (lsr & UART_LSR_BI) {
- lsr &= ~(UART_LSR_FE | UART_LSR_PE);
- port->icount.brk++;
-
- if (uart_handle_break(port))
- goto ignore_char;
- }
-
- if (lsr & UART_LSR_FE)
- port->icount.frame++;
- if (lsr & UART_LSR_PE)
- port->icount.parity++;
- if (lsr & UART_LSR_OE)
- port->icount.overrun++;
-
- lsr &= port->read_status_mask;
- if (lsr & UART_LSR_BI)
- flag = TTY_BREAK;
- if (lsr & UART_LSR_FE)
- flag = TTY_FRAME;
- if (lsr & UART_LSR_PE)
- flag = TTY_PARITY;
- }
-
- if (uart_handle_sysrq_char(port, ch))
- goto ignore_char;
-
- uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
-
- ignore_char:
- lsr = siu_read(port, UART_LSR);
- } while ((lsr & UART_LSR_DR) && (max_count-- > 0));
-
- tty_flip_buffer_push(&port->state->port);
-
- *status = lsr;
-}
-
-static inline void check_modem_status(struct uart_port *port)
-{
- uint8_t msr;
-
- msr = siu_read(port, UART_MSR);
- if ((msr & UART_MSR_ANY_DELTA) == 0)
- return;
- if (msr & UART_MSR_DDCD)
- uart_handle_dcd_change(port, msr & UART_MSR_DCD);
- if (msr & UART_MSR_TERI)
- port->icount.rng++;
- if (msr & UART_MSR_DDSR)
- port->icount.dsr++;
- if (msr & UART_MSR_DCTS)
- uart_handle_cts_change(port, msr & UART_MSR_CTS);
-
- wake_up_interruptible(&port->state->port.delta_msr_wait);
-}
-
-static inline void transmit_chars(struct uart_port *port)
-{
- struct circ_buf *xmit;
- int max_count = TX_MAX_COUNT;
-
- xmit = &port->state->xmit;
-
- if (port->x_char) {
- siu_write(port, UART_TX, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
- return;
- }
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- siu_stop_tx(port);
- return;
- }
-
- do {
- siu_write(port, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- } while (max_count-- > 0);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- if (uart_circ_empty(xmit))
- siu_stop_tx(port);
-}
-
-static irqreturn_t siu_interrupt(int irq, void *dev_id)
-{
- struct uart_port *port;
- uint8_t iir, lsr;
-
- port = (struct uart_port *)dev_id;
-
- iir = siu_read(port, UART_IIR);
- if (iir & UART_IIR_NO_INT)
- return IRQ_NONE;
-
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_DR)
- receive_chars(port, &lsr);
-
- check_modem_status(port);
-
- if (lsr & UART_LSR_THRE)
- transmit_chars(port);
-
- return IRQ_HANDLED;
-}
-
-static int siu_startup(struct uart_port *port)
-{
- int retval;
-
- if (port->membase == NULL)
- return -ENODEV;
-
- siu_clear_fifo(port);
-
- (void)siu_read(port, UART_LSR);
- (void)siu_read(port, UART_RX);
- (void)siu_read(port, UART_IIR);
- (void)siu_read(port, UART_MSR);
-
- if (siu_read(port, UART_LSR) == 0xff)
- return -ENODEV;
-
- retval = request_irq(port->irq, siu_interrupt, 0, siu_type_name(port), port);
- if (retval)
- return retval;
-
- if (port->type == PORT_VR41XX_DSIU)
- vr41xx_enable_dsiuint(DSIUINT_ALL);
-
- siu_write(port, UART_LCR, UART_LCR_WLEN8);
-
- spin_lock_irq(&port->lock);
- siu_set_mctrl(port, port->mctrl);
- spin_unlock_irq(&port->lock);
-
- siu_write(port, UART_IER, UART_IER_RLSI | UART_IER_RDI);
-
- (void)siu_read(port, UART_LSR);
- (void)siu_read(port, UART_RX);
- (void)siu_read(port, UART_IIR);
- (void)siu_read(port, UART_MSR);
-
- return 0;
-}
-
-static void siu_shutdown(struct uart_port *port)
-{
- unsigned long flags;
- uint8_t lcr;
-
- siu_write(port, UART_IER, 0);
-
- spin_lock_irqsave(&port->lock, flags);
-
- port->mctrl &= ~TIOCM_OUT2;
- siu_set_mctrl(port, port->mctrl);
-
- spin_unlock_irqrestore(&port->lock, flags);
-
- lcr = siu_read(port, UART_LCR);
- lcr &= ~UART_LCR_SBC;
- siu_write(port, UART_LCR, lcr);
-
- siu_clear_fifo(port);
-
- (void)siu_read(port, UART_RX);
-
- if (port->type == PORT_VR41XX_DSIU)
- vr41xx_disable_dsiuint(DSIUINT_ALL);
-
- free_irq(port->irq, port);
-}
-
-static void siu_set_termios(struct uart_port *port, struct ktermios *new,
- struct ktermios *old)
-{
- tcflag_t c_cflag, c_iflag;
- uint8_t lcr, fcr, ier;
- unsigned int baud, quot;
- unsigned long flags;
-
- c_cflag = new->c_cflag;
- lcr = UART_LCR_WLEN(tty_get_char_size(c_cflag));
-
- if (c_cflag & CSTOPB)
- lcr |= UART_LCR_STOP;
- if (c_cflag & PARENB)
- lcr |= UART_LCR_PARITY;
- if ((c_cflag & PARODD) != PARODD)
- lcr |= UART_LCR_EPAR;
- if (c_cflag & CMSPAR)
- lcr |= UART_LCR_SPAR;
-
- baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
-
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
-
- spin_lock_irqsave(&port->lock, flags);
-
- uart_update_timeout(port, c_cflag, baud);
-
- c_iflag = new->c_iflag;
-
- port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
- if (c_iflag & INPCK)
- port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= UART_LSR_BI;
-
- port->ignore_status_mask = 0;
- if (c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (c_iflag & IGNBRK) {
- port->ignore_status_mask |= UART_LSR_BI;
- if (c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_LSR_OE;
- }
-
- if ((c_cflag & CREAD) == 0)
- port->ignore_status_mask |= UART_LSR_DR;
-
- ier = siu_read(port, UART_IER);
- ier &= ~UART_IER_MSI;
- if (UART_ENABLE_MS(port, c_cflag))
- ier |= UART_IER_MSI;
- siu_write(port, UART_IER, ier);
-
- siu_write(port, UART_LCR, lcr | UART_LCR_DLAB);
-
- siu_write(port, UART_DLL, (uint8_t)quot);
- siu_write(port, UART_DLM, (uint8_t)(quot >> 8));
-
- siu_write(port, UART_LCR, lcr);
-
- siu_write(port, UART_FCR, fcr);
-
- siu_set_mctrl(port, port->mctrl);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void siu_pm(struct uart_port *port, unsigned int state, unsigned int oldstate)
-{
- switch (state) {
- case 0:
- switch (port->type) {
- case PORT_VR41XX_SIU:
- vr41xx_supply_clock(SIU_CLOCK);
- break;
- case PORT_VR41XX_DSIU:
- vr41xx_supply_clock(DSIU_CLOCK);
- break;
- }
- break;
- case 3:
- switch (port->type) {
- case PORT_VR41XX_SIU:
- vr41xx_mask_clock(SIU_CLOCK);
- break;
- case PORT_VR41XX_DSIU:
- vr41xx_mask_clock(DSIU_CLOCK);
- break;
- }
- break;
- }
-}
-
-static const char *siu_type(struct uart_port *port)
-{
- return siu_type_name(port);
-}
-
-static void siu_release_port(struct uart_port *port)
-{
- unsigned long size;
-
- if (port->flags & UPF_IOREMAP) {
- iounmap(port->membase);
- port->membase = NULL;
- }
-
- size = siu_port_size(port);
- release_mem_region(port->mapbase, size);
-}
-
-static int siu_request_port(struct uart_port *port)
-{
- unsigned long size;
- struct resource *res;
-
- size = siu_port_size(port);
- res = request_mem_region(port->mapbase, size, siu_type_name(port));
- if (res == NULL)
- return -EBUSY;
-
- if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap(port->mapbase, size);
- if (port->membase == NULL) {
- release_resource(res);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static void siu_config_port(struct uart_port *port, int flags)
-{
- if (flags & UART_CONFIG_TYPE) {
- port->type = siu_check_type(port);
- (void)siu_request_port(port);
- }
-}
-
-static int siu_verify_port(struct uart_port *port, struct serial_struct *serial)
-{
- if (port->type != PORT_VR41XX_SIU && port->type != PORT_VR41XX_DSIU)
- return -EINVAL;
- if (port->irq != serial->irq)
- return -EINVAL;
- if (port->iotype != serial->io_type)
- return -EINVAL;
- if (port->mapbase != (unsigned long)serial->iomem_base)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct uart_ops siu_uart_ops = {
- .tx_empty = siu_tx_empty,
- .set_mctrl = siu_set_mctrl,
- .get_mctrl = siu_get_mctrl,
- .stop_tx = siu_stop_tx,
- .start_tx = siu_start_tx,
- .stop_rx = siu_stop_rx,
- .enable_ms = siu_enable_ms,
- .break_ctl = siu_break_ctl,
- .startup = siu_startup,
- .shutdown = siu_shutdown,
- .set_termios = siu_set_termios,
- .pm = siu_pm,
- .type = siu_type,
- .release_port = siu_release_port,
- .request_port = siu_request_port,
- .config_port = siu_config_port,
- .verify_port = siu_verify_port,
-};
-
-static int siu_init_ports(struct platform_device *pdev)
-{
- struct uart_port *port;
- struct resource *res;
- int *type = dev_get_platdata(&pdev->dev);
- int i;
-
- if (!type)
- return 0;
-
- port = siu_uart_ports;
- for (i = 0; i < SIU_PORTS_MAX; i++) {
- port->type = type[i];
- if (port->type == PORT_UNKNOWN)
- continue;
- port->irq = platform_get_irq(pdev, i);
- port->uartclk = SIU_BAUD_BASE * 16;
- port->fifosize = 16;
- port->regshift = 0;
- port->iotype = UPIO_MEM;
- port->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
- port->line = i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- port->mapbase = res->start;
- port++;
- }
-
- return i;
-}
-
-#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
-
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-static void wait_for_xmitr(struct uart_port *port)
-{
- int timeout = 10000;
- uint8_t lsr, msr;
-
- do {
- lsr = siu_read(port, UART_LSR);
- if (lsr & UART_LSR_BI)
- lsr_break_flag[port->line] = UART_LSR_BI;
-
- if ((lsr & BOTH_EMPTY) == BOTH_EMPTY)
- break;
- } while (timeout-- > 0);
-
- if (port->flags & UPF_CONS_FLOW) {
- timeout = 1000000;
-
- do {
- msr = siu_read(port, UART_MSR);
- if ((msr & UART_MSR_CTS) != 0)
- break;
- } while (timeout-- > 0);
- }
-}
-
-static void siu_console_putchar(struct uart_port *port, unsigned char ch)
-{
- wait_for_xmitr(port);
- siu_write(port, UART_TX, ch);
-}
-
-static void siu_console_write(struct console *con, const char *s, unsigned count)
-{
- struct uart_port *port;
- uint8_t ier;
-
- port = &siu_uart_ports[con->index];
-
- ier = siu_read(port, UART_IER);
- siu_write(port, UART_IER, 0);
-
- uart_console_write(port, s, count, siu_console_putchar);
-
- wait_for_xmitr(port);
- siu_write(port, UART_IER, ier);
-}
-
-static int __init siu_console_setup(struct console *con, char *options)
-{
- struct uart_port *port;
- int baud = 9600;
- int parity = 'n';
- int bits = 8;
- int flow = 'n';
-
- if (con->index >= SIU_PORTS_MAX)
- con->index = 0;
-
- port = &siu_uart_ports[con->index];
- if (port->membase == NULL) {
- if (port->mapbase == 0)
- return -ENODEV;
- port->membase = ioremap(port->mapbase, siu_port_size(port));
- }
-
- if (port->type == PORT_VR41XX_SIU)
- vr41xx_select_siu_interface(SIU_INTERFACE_RS232C);
-
- if (options != NULL)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
-
- return uart_set_options(port, con, baud, parity, bits, flow);
-}
-
-static struct uart_driver siu_uart_driver;
-
-static struct console siu_console = {
- .name = "ttyVR",
- .write = siu_console_write,
- .device = uart_console_device,
- .setup = siu_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &siu_uart_driver,
-};
-
-static int siu_console_init(void)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < SIU_PORTS_MAX; i++) {
- port = &siu_uart_ports[i];
- port->ops = &siu_uart_ops;
- }
-
- register_console(&siu_console);
-
- return 0;
-}
-
-console_initcall(siu_console_init);
-
-void __init vr41xx_siu_early_setup(struct uart_port *port)
-{
- if (port->type == PORT_UNKNOWN)
- return;
-
- siu_uart_ports[port->line].line = port->line;
- siu_uart_ports[port->line].type = port->type;
- siu_uart_ports[port->line].uartclk = SIU_BAUD_BASE * 16;
- siu_uart_ports[port->line].mapbase = port->mapbase;
- siu_uart_ports[port->line].ops = &siu_uart_ops;
-}
-
-#define SERIAL_VR41XX_CONSOLE &siu_console
-#else
-#define SERIAL_VR41XX_CONSOLE NULL
-#endif
-
-static struct uart_driver siu_uart_driver = {
- .owner = THIS_MODULE,
- .driver_name = "SIU",
- .dev_name = "ttyVR",
- .major = SIU_MAJOR,
- .minor = SIU_MINOR_BASE,
- .cons = SERIAL_VR41XX_CONSOLE,
-};
-
-static int siu_probe(struct platform_device *dev)
-{
- struct uart_port *port;
- int num, i, retval;
-
- num = siu_init_ports(dev);
- if (num <= 0)
- return -ENODEV;
-
- siu_uart_driver.nr = num;
- retval = uart_register_driver(&siu_uart_driver);
- if (retval)
- return retval;
-
- for (i = 0; i < num; i++) {
- port = &siu_uart_ports[i];
- port->ops = &siu_uart_ops;
- port->dev = &dev->dev;
- port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_VR41XX_CONSOLE);
-
- retval = uart_add_one_port(&siu_uart_driver, port);
- if (retval < 0) {
- port->dev = NULL;
- break;
- }
- }
-
- if (i == 0 && retval < 0) {
- uart_unregister_driver(&siu_uart_driver);
- return retval;
- }
-
- return 0;
-}
-
-static int siu_remove(struct platform_device *dev)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if (port->dev == &dev->dev) {
- uart_remove_one_port(&siu_uart_driver, port);
- port->dev = NULL;
- }
- }
-
- uart_unregister_driver(&siu_uart_driver);
-
- return 0;
-}
-
-static int siu_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if ((port->type == PORT_VR41XX_SIU ||
- port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
- uart_suspend_port(&siu_uart_driver, port);
-
- }
-
- return 0;
-}
-
-static int siu_resume(struct platform_device *dev)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; i < siu_uart_driver.nr; i++) {
- port = &siu_uart_ports[i];
- if ((port->type == PORT_VR41XX_SIU ||
- port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
- uart_resume_port(&siu_uart_driver, port);
- }
-
- return 0;
-}
-
-static struct platform_driver siu_device_driver = {
- .probe = siu_probe,
- .remove = siu_remove,
- .suspend = siu_suspend,
- .resume = siu_resume,
- .driver = {
- .name = "SIU",
- },
-};
-
-module_platform_driver(siu_device_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:SIU");
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 595d8b49c745..9fdecc795b6b 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/minmax.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
@@ -104,6 +105,7 @@ static void tty_buffer_reset(struct tty_buffer *p, size_t size)
p->size = size;
p->next = NULL;
p->commit = 0;
+ p->lookahead = 0;
p->read = 0;
p->flags = 0;
}
@@ -234,6 +236,7 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
buf->head = next;
}
buf->head->read = buf->head->commit;
+ buf->head->lookahead = buf->head->read;
if (ld && ld->ops->flush_buffer)
ld->ops->flush_buffer(tty);
@@ -276,13 +279,15 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
if (n != NULL) {
n->flags = flags;
buf->tail = n;
- /* paired w/ acquire in flush_to_ldisc(); ensures
- * flush_to_ldisc() sees buffer data.
+ /*
+ * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
+ * ensures they see all buffer data.
*/
smp_store_release(&b->commit, b->used);
- /* paired w/ acquire in flush_to_ldisc(); ensures the
- * latest commit value can be read before the head is
- * advanced to the next buffer
+ /*
+ * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
+ * ensures the latest commit value can be read before the head
+ * is advanced to the next buffer.
*/
smp_store_release(&b->next, n);
} else if (change)
@@ -459,6 +464,40 @@ int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
}
EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
+static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
+{
+ head->lookahead = max(head->lookahead, head->read);
+
+ while (head) {
+ struct tty_buffer *next;
+ unsigned char *p, *f = NULL;
+ unsigned int count;
+
+ /*
+ * Paired w/ release in __tty_buffer_request_room();
+ * ensures commit value read is not stale if the head
+ * is advancing to the next buffer.
+ */
+ next = smp_load_acquire(&head->next);
+ /*
+ * Paired w/ release in __tty_buffer_request_room() or in
+ * tty_buffer_flush(); ensures we see the committed buffer data.
+ */
+ count = smp_load_acquire(&head->commit) - head->lookahead;
+ if (!count) {
+ head = next;
+ continue;
+ }
+
+ p = char_buf_ptr(head, head->lookahead);
+ if (~head->flags & TTYB_NORMAL)
+ f = flag_buf_ptr(head, head->lookahead);
+
+ port->client_ops->lookahead_buf(port, p, f, count);
+ head->lookahead += count;
+ }
+}
+
static int
receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
{
@@ -496,7 +535,7 @@ static void flush_to_ldisc(struct work_struct *work)
while (1) {
struct tty_buffer *head = buf->head;
struct tty_buffer *next;
- int count;
+ int count, rcvd;
/* Ldisc or user is trying to gain exclusive access */
if (atomic_read(&buf->priority))
@@ -519,10 +558,12 @@ static void flush_to_ldisc(struct work_struct *work)
continue;
}
- count = receive_buf(port, head, count);
- if (!count)
+ rcvd = receive_buf(port, head, count);
+ head->read += rcvd;
+ if (rcvd < count)
+ lookahead_bufs(port, head);
+ if (!rcvd)
break;
- head->read += count;
if (need_resched())
cond_resched();
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8fec1d8648f5..82a8855981f7 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1663,7 +1663,7 @@ void tty_kclose(struct tty_struct *tty)
*/
tty_ldisc_release(tty);
- /* Wait for pending work before tty destruction commmences */
+ /* Wait for pending work before tty destruction commences */
tty_flush_works(tty);
tty_debug_hangup(tty, "freeing structure\n");
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index adae687f654b..2a76b330e108 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -319,6 +319,8 @@ unsigned char tty_get_frame_size(unsigned int cflag)
bits++;
if (cflag & PARENB)
bits++;
+ if (cflag & ADDRB)
+ bits++;
return bits;
}
@@ -353,6 +355,8 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
old_termios = tty->termios;
tty->termios = *new_termios;
unset_locked_termios(tty, &old_termios);
+ /* Reset any ADDRB changes, ADDRB is changed through ->rs485_config() */
+ tty->termios.c_cflag ^= (tty->termios.c_cflag ^ old_termios.c_cflag) & ADDRB;
if (tty->ops->set_termios)
tty->ops->set_termios(tty, &old_termios);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 880608a65773..dce08a6d7b5e 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -43,6 +43,26 @@ static int tty_port_default_receive_buf(struct tty_port *port,
return ret;
}
+static void tty_port_default_lookahead_buf(struct tty_port *port, const unsigned char *p,
+ const unsigned char *f, unsigned int count)
+{
+ struct tty_struct *tty;
+ struct tty_ldisc *disc;
+
+ tty = READ_ONCE(port->itty);
+ if (!tty)
+ return;
+
+ disc = tty_ldisc_ref(tty);
+ if (!disc)
+ return;
+
+ if (disc->ops->lookahead_buf)
+ disc->ops->lookahead_buf(disc->tty, p, f, count);
+
+ tty_ldisc_deref(disc);
+}
+
static void tty_port_default_wakeup(struct tty_port *port)
{
struct tty_struct *tty = tty_port_tty_get(port);
@@ -55,6 +75,7 @@ static void tty_port_default_wakeup(struct tty_port *port)
const struct tty_port_client_operations tty_port_default_client_ops = {
.receive_buf = tty_port_default_receive_buf,
+ .lookahead_buf = tty_port_default_lookahead_buf,
.write_wakeup = tty_port_default_wakeup,
};
EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index fe30ce512819..b3dfe9d5717e 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -30,6 +30,6 @@ $(obj)/defkeymap.o: $(obj)/defkeymap.c
ifdef GENERATE_KEYMAP
$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
- loadkeys --mktable $< > $@
+ loadkeys --mktable --unicode $< > $@
endif
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index d815ac98b39e..f02d21e2a96e 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -23,6 +23,8 @@
* stack overflow.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kd.h>
#include <linux/errno.h>
@@ -36,9 +38,9 @@
#include <linux/vt_kern.h>
#include <linux/string.h>
-static unsigned short translations[][256] = {
+static unsigned short translations[][E_TABSZ] = {
/* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
- {
+ [LAT1_MAP] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
@@ -71,9 +73,9 @@ static unsigned short translations[][256] = {
0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
- },
+ },
/* VT100 graphics mapped to Unicode */
- {
+ [GRAF_MAP] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
@@ -108,8 +110,8 @@ static unsigned short translations[][256] = {
0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
},
/* IBM Codepage 437 mapped to Unicode */
- {
- 0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
+ [IBMPC_MAP] = {
+ 0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
0x25d8, 0x25cb, 0x25d9, 0x2642, 0x2640, 0x266a, 0x266b, 0x263c,
0x25b6, 0x25c0, 0x2195, 0x203c, 0x00b6, 0x00a7, 0x25ac, 0x21a8,
0x2191, 0x2193, 0x2192, 0x2190, 0x221f, 0x2194, 0x25b2, 0x25bc,
@@ -141,9 +143,9 @@ static unsigned short translations[][256] = {
0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229,
0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0
- },
+ },
/* User mapping -- default to codes for direct font mapping */
- {
+ [USER_MAP] = {
0xf000, 0xf001, 0xf002, 0xf003, 0xf004, 0xf005, 0xf006, 0xf007,
0xf008, 0xf009, 0xf00a, 0xf00b, 0xf00c, 0xf00d, 0xf00e, 0xf00f,
0xf010, 0xf011, 0xf012, 0xf013, 0xf014, 0xf015, 0xf016, 0xf017,
@@ -184,78 +186,105 @@ static unsigned short translations[][256] = {
#define MAX_GLYPH 512 /* Max possible glyph value */
-static int inv_translate[MAX_NR_CONSOLES];
+static enum translation_map inv_translate[MAX_NR_CONSOLES];
+
+#define UNI_DIRS 32U
+#define UNI_DIR_ROWS 32U
+#define UNI_ROW_GLYPHS 64U
+
+#define UNI_DIR_BITS GENMASK(15, 11)
+#define UNI_ROW_BITS GENMASK(10, 6)
+#define UNI_GLYPH_BITS GENMASK( 5, 0)
-struct uni_pagedir {
- u16 **uni_pgdir[32];
+#define UNI_DIR(uni) FIELD_GET(UNI_DIR_BITS, (uni))
+#define UNI_ROW(uni) FIELD_GET(UNI_ROW_BITS, (uni))
+#define UNI_GLYPH(uni) FIELD_GET(UNI_GLYPH_BITS, (uni))
+
+#define UNI(dir, row, glyph) (FIELD_PREP(UNI_DIR_BITS, (dir)) | \
+ FIELD_PREP(UNI_ROW_BITS, (row)) | \
+ FIELD_PREP(UNI_GLYPH_BITS, (glyph)))
+
+/**
+ * struct uni_pagedict -- unicode directory
+ *
+ * @uni_pgdir: 32*32*64 table with glyphs
+ * @refcount: reference count of this structure
+ * @sum: checksum
+ * @inverse_translations: best-effort inverse mapping
+ * @inverse_trans_unicode: best-effort inverse mapping to unicode
+ */
+struct uni_pagedict {
+ u16 **uni_pgdir[UNI_DIRS];
unsigned long refcount;
unsigned long sum;
- unsigned char *inverse_translations[4];
+ unsigned char *inverse_translations[LAST_MAP + 1];
u16 *inverse_trans_unicode;
};
-static struct uni_pagedir *dflt;
+static struct uni_pagedict *dflt;
-static void set_inverse_transl(struct vc_data *conp, struct uni_pagedir *p, int i)
+static void set_inverse_transl(struct vc_data *conp, struct uni_pagedict *dict,
+ enum translation_map m)
{
- int j, glyph;
- unsigned short *t = translations[i];
- unsigned char *q;
-
- if (!p) return;
- q = p->inverse_translations[i];
-
- if (!q) {
- q = p->inverse_translations[i] = kmalloc(MAX_GLYPH, GFP_KERNEL);
- if (!q) return;
+ unsigned short *t = translations[m];
+ unsigned char *inv;
+
+ if (!dict)
+ return;
+ inv = dict->inverse_translations[m];
+
+ if (!inv) {
+ inv = dict->inverse_translations[m] = kmalloc(MAX_GLYPH,
+ GFP_KERNEL);
+ if (!inv)
+ return;
}
- memset(q, 0, MAX_GLYPH);
+ memset(inv, 0, MAX_GLYPH);
- for (j = 0; j < E_TABSZ; j++) {
- glyph = conv_uni_to_pc(conp, t[j]);
- if (glyph >= 0 && glyph < MAX_GLYPH && q[glyph] < 32) {
+ for (unsigned int ch = 0; ch < ARRAY_SIZE(translations[m]); ch++) {
+ int glyph = conv_uni_to_pc(conp, t[ch]);
+ if (glyph >= 0 && glyph < MAX_GLYPH && inv[glyph] < 32) {
/* prefer '-' above SHY etc. */
- q[glyph] = j;
+ inv[glyph] = ch;
}
}
}
-static void set_inverse_trans_unicode(struct vc_data *conp,
- struct uni_pagedir *p)
+static void set_inverse_trans_unicode(struct uni_pagedict *dict)
{
- int i, j, k, glyph;
- u16 **p1, *p2;
- u16 *q;
-
- if (!p) return;
- q = p->inverse_trans_unicode;
- if (!q) {
- q = p->inverse_trans_unicode =
- kmalloc_array(MAX_GLYPH, sizeof(u16), GFP_KERNEL);
- if (!q)
+ unsigned int d, r, g;
+ u16 *inv;
+
+ if (!dict)
+ return;
+
+ inv = dict->inverse_trans_unicode;
+ if (!inv) {
+ inv = dict->inverse_trans_unicode = kmalloc_array(MAX_GLYPH,
+ sizeof(*inv), GFP_KERNEL);
+ if (!inv)
return;
}
- memset(q, 0, MAX_GLYPH * sizeof(u16));
+ memset(inv, 0, MAX_GLYPH * sizeof(*inv));
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (!p1)
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (!dir)
continue;
- for (j = 0; j < 32; j++) {
- p2 = p1[j];
- if (!p2)
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row)
continue;
- for (k = 0; k < 64; k++) {
- glyph = p2[k];
- if (glyph >= 0 && glyph < MAX_GLYPH
- && q[glyph] < 32)
- q[glyph] = (i << 11) + (j << 6) + k;
+ for (g = 0; g < UNI_ROW_GLYPHS; g++) {
+ u16 glyph = row[g];
+ if (glyph < MAX_GLYPH && inv[glyph] < 32)
+ inv[glyph] = UNI(d, r, g);
}
}
}
}
-unsigned short *set_translate(int m, struct vc_data *vc)
+unsigned short *set_translate(enum translation_map m, struct vc_data *vc)
{
inv_translate[vc->vc_num] = m;
return translations[m];
@@ -268,44 +297,45 @@ unsigned short *set_translate(int m, struct vc_data *vc)
* was active.
* Still, it is now possible to a certain extent to cut and paste non-ASCII.
*/
-u16 inverse_translate(const struct vc_data *conp, int glyph, int use_unicode)
+u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode)
{
- struct uni_pagedir *p;
- int m;
- if (glyph < 0 || glyph >= MAX_GLYPH)
+ struct uni_pagedict *p;
+ enum translation_map m;
+
+ if (glyph >= MAX_GLYPH)
return 0;
- else {
- p = *conp->vc_uni_pagedir_loc;
- if (!p)
+
+ p = *conp->uni_pagedict_loc;
+ if (!p)
+ return glyph;
+
+ if (use_unicode) {
+ if (!p->inverse_trans_unicode)
return glyph;
- else if (use_unicode) {
- if (!p->inverse_trans_unicode)
- return glyph;
- else
- return p->inverse_trans_unicode[glyph];
- } else {
- m = inv_translate[conp->vc_num];
- if (!p->inverse_translations[m])
- return glyph;
- else
- return p->inverse_translations[m][glyph];
- }
+
+ return p->inverse_trans_unicode[glyph];
}
+
+ m = inv_translate[conp->vc_num];
+ if (!p->inverse_translations[m])
+ return glyph;
+
+ return p->inverse_translations[m][glyph];
}
EXPORT_SYMBOL_GPL(inverse_translate);
static void update_user_maps(void)
{
int i;
- struct uni_pagedir *p, *q = NULL;
-
+ struct uni_pagedict *p, *q = NULL;
+
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons_allocated(i))
continue;
- p = *vc_cons[i].d->vc_uni_pagedir_loc;
+ p = *vc_cons[i].d->uni_pagedict_loc;
if (p && p != q) {
set_inverse_transl(vc_cons[i].d, p, USER_MAP);
- set_inverse_trans_unicode(vc_cons[i].d, p);
+ set_inverse_trans_unicode(p);
q = p;
}
}
@@ -321,15 +351,15 @@ static void update_user_maps(void)
*/
int con_set_trans_old(unsigned char __user * arg)
{
- int i;
unsigned short inbuf[E_TABSZ];
- unsigned char ubuf[E_TABSZ];
-
- if (copy_from_user(ubuf, arg, E_TABSZ))
- return -EFAULT;
+ unsigned int i;
+ unsigned char ch;
- for (i = 0; i < E_TABSZ ; i++)
- inbuf[i] = UNI_DIRECT_BASE | ubuf[i];
+ for (i = 0; i < ARRAY_SIZE(inbuf); i++) {
+ if (get_user(ch, &arg[i]))
+ return -EFAULT;
+ inbuf[i] = UNI_DIRECT_BASE | ch;
+ }
console_lock();
memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
@@ -345,7 +375,7 @@ int con_get_trans_old(unsigned char __user * arg)
unsigned char outbuf[E_TABSZ];
console_lock();
- for (i = 0; i < E_TABSZ ; i++)
+ for (i = 0; i < ARRAY_SIZE(outbuf); i++)
{
ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
outbuf[i] = (ch & ~0xff) ? 0 : ch;
@@ -381,7 +411,7 @@ int con_get_trans_new(ushort __user * arg)
}
/*
- * Unicode -> current font conversion
+ * Unicode -> current font conversion
*
* A font has at most 512 chars, usually 256.
* But one font position may represent several Unicode chars.
@@ -393,78 +423,82 @@ int con_get_trans_new(ushort __user * arg)
extern u8 dfont_unicount[]; /* Defined in console_defmap.c */
extern u16 dfont_unitable[];
-static void con_release_unimap(struct uni_pagedir *p)
+static void con_release_unimap(struct uni_pagedict *dict)
{
- u16 **p1;
- int i, j;
-
- if (p == dflt) dflt = NULL;
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1 != NULL) {
- for (j = 0; j < 32; j++)
- kfree(p1[j]);
- kfree(p1);
+ unsigned int d, r;
+
+ if (dict == dflt)
+ dflt = NULL;
+
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (dir != NULL) {
+ for (r = 0; r < UNI_DIR_ROWS; r++)
+ kfree(dir[r]);
+ kfree(dir);
}
- p->uni_pgdir[i] = NULL;
+ dict->uni_pgdir[d] = NULL;
}
- for (i = 0; i < 4; i++) {
- kfree(p->inverse_translations[i]);
- p->inverse_translations[i] = NULL;
+
+ for (r = 0; r < ARRAY_SIZE(dict->inverse_translations); r++) {
+ kfree(dict->inverse_translations[r]);
+ dict->inverse_translations[r] = NULL;
}
- kfree(p->inverse_trans_unicode);
- p->inverse_trans_unicode = NULL;
+
+ kfree(dict->inverse_trans_unicode);
+ dict->inverse_trans_unicode = NULL;
}
/* Caller must hold the console lock */
void con_free_unimap(struct vc_data *vc)
{
- struct uni_pagedir *p;
+ struct uni_pagedict *p;
- p = *vc->vc_uni_pagedir_loc;
+ p = *vc->uni_pagedict_loc;
if (!p)
return;
- *vc->vc_uni_pagedir_loc = NULL;
+ *vc->uni_pagedict_loc = NULL;
if (--p->refcount)
return;
con_release_unimap(p);
kfree(p);
}
-
-static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
+
+static int con_unify_unimap(struct vc_data *conp, struct uni_pagedict *dict1)
{
- int i, j, k;
- struct uni_pagedir *q;
-
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
- if (!vc_cons_allocated(i))
+ struct uni_pagedict *dict2;
+ unsigned int cons, d, r;
+
+ for (cons = 0; cons < MAX_NR_CONSOLES; cons++) {
+ if (!vc_cons_allocated(cons))
continue;
- q = *vc_cons[i].d->vc_uni_pagedir_loc;
- if (!q || q == p || q->sum != p->sum)
+ dict2 = *vc_cons[cons].d->uni_pagedict_loc;
+ if (!dict2 || dict2 == dict1 || dict2->sum != dict1->sum)
continue;
- for (j = 0; j < 32; j++) {
- u16 **p1, **q1;
- p1 = p->uni_pgdir[j]; q1 = q->uni_pgdir[j];
- if (!p1 && !q1)
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir1 = dict1->uni_pgdir[d];
+ u16 **dir2 = dict2->uni_pgdir[d];
+ if (!dir1 && !dir2)
continue;
- if (!p1 || !q1)
+ if (!dir1 || !dir2)
break;
- for (k = 0; k < 32; k++) {
- if (!p1[k] && !q1[k])
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ if (!dir1[r] && !dir2[r])
continue;
- if (!p1[k] || !q1[k])
+ if (!dir1[r] || !dir2[r])
break;
- if (memcmp(p1[k], q1[k], 64*sizeof(u16)))
+ if (memcmp(dir1[r], dir2[r], UNI_ROW_GLYPHS *
+ sizeof(*dir1[r])))
break;
}
- if (k < 32)
+ if (r < UNI_DIR_ROWS)
break;
}
- if (j == 32) {
- q->refcount++;
- *conp->vc_uni_pagedir_loc = q;
- con_release_unimap(p);
- kfree(p);
+ if (d == UNI_DIRS) {
+ dict2->refcount++;
+ *conp->uni_pagedict_loc = dict2;
+ con_release_unimap(dict1);
+ kfree(dict1);
return 1;
}
}
@@ -472,55 +506,66 @@ static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
}
static int
-con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
+con_insert_unipair(struct uni_pagedict *p, u_short unicode, u_short fontpos)
{
- int i, n;
- u16 **p1, *p2;
-
- p1 = p->uni_pgdir[n = unicode >> 11];
- if (!p1) {
- p1 = p->uni_pgdir[n] = kmalloc_array(32, sizeof(u16 *),
- GFP_KERNEL);
- if (!p1) return -ENOMEM;
- for (i = 0; i < 32; i++)
- p1[i] = NULL;
+ u16 **dir, *row;
+ unsigned int n;
+
+ n = UNI_DIR(unicode);
+ dir = p->uni_pgdir[n];
+ if (!dir) {
+ dir = p->uni_pgdir[n] = kcalloc(UNI_DIR_ROWS, sizeof(*dir),
+ GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
}
- p2 = p1[n = (unicode >> 6) & 0x1f];
- if (!p2) {
- p2 = p1[n] = kmalloc_array(64, sizeof(u16), GFP_KERNEL);
- if (!p2) return -ENOMEM;
- memset(p2, 0xff, 64*sizeof(u16)); /* No glyphs for the characters (yet) */
+ n = UNI_ROW(unicode);
+ row = dir[n];
+ if (!row) {
+ row = dir[n] = kmalloc_array(UNI_ROW_GLYPHS, sizeof(*row),
+ GFP_KERNEL);
+ if (!row)
+ return -ENOMEM;
+ /* No glyphs for the characters (yet) */
+ memset(row, 0xff, UNI_ROW_GLYPHS * sizeof(*row));
}
- p2[unicode & 0x3f] = fontpos;
-
+ row[UNI_GLYPH(unicode)] = fontpos;
+
p->sum += (fontpos << 20U) + unicode;
return 0;
}
+static int con_allocate_new(struct vc_data *vc)
+{
+ struct uni_pagedict *new, *old = *vc->uni_pagedict_loc;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ new->refcount = 1;
+ *vc->uni_pagedict_loc = new;
+
+ if (old)
+ old->refcount--;
+
+ return 0;
+}
+
/* Caller must hold the lock */
static int con_do_clear_unimap(struct vc_data *vc)
{
- struct uni_pagedir *p, *q;
-
- p = *vc->vc_uni_pagedir_loc;
- if (!p || --p->refcount) {
- q = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!q) {
- if (p)
- p->refcount++;
- return -ENOMEM;
- }
- q->refcount=1;
- *vc->vc_uni_pagedir_loc = q;
- } else {
- if (p == dflt) dflt = NULL;
- p->refcount++;
- p->sum = 0;
- con_release_unimap(p);
- }
+ struct uni_pagedict *old = *vc->uni_pagedict_loc;
+
+ if (!old || old->refcount > 1)
+ return con_allocate_new(vc);
+
+ old->sum = 0;
+ con_release_unimap(old);
+
return 0;
}
@@ -532,91 +577,93 @@ int con_clear_unimap(struct vc_data *vc)
console_unlock();
return ret;
}
-
+
+static struct uni_pagedict *con_unshare_unimap(struct vc_data *vc,
+ struct uni_pagedict *old)
+{
+ struct uni_pagedict *new;
+ unsigned int d, r, g;
+ int ret;
+ u16 uni = 0;
+
+ ret = con_allocate_new(vc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ new = *vc->uni_pagedict_loc;
+
+ /*
+ * uni_pgdir is a 32*32*64 table with rows allocated when its first
+ * entry is added. The unicode value must still be incremented for
+ * empty rows. We are copying entries from "old" to "new".
+ */
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = old->uni_pgdir[d];
+ if (!dir) {
+ /* Account for empty table */
+ uni += UNI_DIR_ROWS * UNI_ROW_GLYPHS;
+ continue;
+ }
+
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row) {
+ /* Account for row of 64 empty entries */
+ uni += UNI_ROW_GLYPHS;
+ continue;
+ }
+
+ for (g = 0; g < UNI_ROW_GLYPHS; g++, uni++) {
+ if (row[g] == 0xffff)
+ continue;
+ /*
+ * Found one, copy entry for unicode uni with
+ * fontpos value row[g].
+ */
+ ret = con_insert_unipair(new, uni, row[g]);
+ if (ret) {
+ old->refcount++;
+ *vc->uni_pagedict_loc = old;
+ con_release_unimap(new);
+ kfree(new);
+ return ERR_PTR(ret);
+ }
+ }
+ }
+ }
+
+ return new;
+}
+
int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
{
- int err = 0, err1, i;
- struct uni_pagedir *p, *q;
+ int err = 0, err1;
+ struct uni_pagedict *dict;
struct unipair *unilist, *plist;
if (!ct)
return 0;
- unilist = vmemdup_user(list, array_size(sizeof(struct unipair), ct));
+ unilist = vmemdup_user(list, array_size(sizeof(*unilist), ct));
if (IS_ERR(unilist))
return PTR_ERR(unilist);
console_lock();
/* Save original vc_unipagdir_loc in case we allocate a new one */
- p = *vc->vc_uni_pagedir_loc;
-
- if (!p) {
+ dict = *vc->uni_pagedict_loc;
+ if (!dict) {
err = -EINVAL;
-
goto out_unlock;
}
-
- if (p->refcount > 1) {
- int j, k;
- u16 **p1, *p2, l;
-
- err1 = con_do_clear_unimap(vc);
- if (err1) {
- err = err1;
+
+ if (dict->refcount > 1) {
+ dict = con_unshare_unimap(vc, dict);
+ if (IS_ERR(dict)) {
+ err = PTR_ERR(dict);
goto out_unlock;
}
-
- /*
- * Since refcount was > 1, con_clear_unimap() allocated a
- * a new uni_pagedir for this vc. Re: p != q
- */
- q = *vc->vc_uni_pagedir_loc;
-
- /*
- * uni_pgdir is a 32*32*64 table with rows allocated
- * when its first entry is added. The unicode value must
- * still be incremented for empty rows. We are copying
- * entries from "p" (old) to "q" (new).
- */
- l = 0; /* unicode value */
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1)
- for (j = 0; j < 32; j++) {
- p2 = p1[j];
- if (p2) {
- for (k = 0; k < 64; k++, l++)
- if (p2[k] != 0xffff) {
- /*
- * Found one, copy entry for unicode
- * l with fontpos value p2[k].
- */
- err1 = con_insert_unipair(q, l, p2[k]);
- if (err1) {
- p->refcount++;
- *vc->vc_uni_pagedir_loc = p;
- con_release_unimap(q);
- kfree(q);
- err = err1;
- goto out_unlock;
- }
- }
- } else {
- /* Account for row of 64 empty entries */
- l += 64;
- }
- }
- else
- /* Account for empty table */
- l += 32 * 64;
- }
-
- /*
- * Finished copying font table, set vc_uni_pagedir to new table
- */
- p = q;
- } else if (p == dflt) {
+ } else if (dict == dflt) {
dflt = NULL;
}
@@ -624,20 +671,20 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
* Insert user specified unicode pairs into new table.
*/
for (plist = unilist; ct; ct--, plist++) {
- err1 = con_insert_unipair(p, plist->unicode, plist->fontpos);
+ err1 = con_insert_unipair(dict, plist->unicode, plist->fontpos);
if (err1)
err = err1;
}
-
+
/*
* Merge with fontmaps of any other virtual consoles.
*/
- if (con_unify_unimap(vc, p))
+ if (con_unify_unimap(vc, dict))
goto out_unlock;
- for (i = 0; i <= 3; i++)
- set_inverse_transl(vc, p, i); /* Update inverse translations */
- set_inverse_trans_unicode(vc, p);
+ for (enum translation_map m = FIRST_MAP; m <= LAST_MAP; m++)
+ set_inverse_transl(vc, dict, m);
+ set_inverse_trans_unicode(dict);
out_unlock:
console_unlock();
@@ -652,55 +699,56 @@ out_unlock:
* Loads the unimap for the hardware font, as defined in uni_hash.tbl.
* The representation used was the most compact I could come up
* with. This routine is executed at video setup, and when the
- * PIO_FONTRESET ioctl is called.
+ * PIO_FONTRESET ioctl is called.
*
* The caller must hold the console lock
*/
int con_set_default_unimap(struct vc_data *vc)
{
- int i, j, err = 0, err1;
- u16 *q;
- struct uni_pagedir *p;
+ struct uni_pagedict *dict;
+ unsigned int fontpos, count;
+ int err = 0, err1;
+ u16 *dfont;
if (dflt) {
- p = *vc->vc_uni_pagedir_loc;
- if (p == dflt)
+ dict = *vc->uni_pagedict_loc;
+ if (dict == dflt)
return 0;
dflt->refcount++;
- *vc->vc_uni_pagedir_loc = dflt;
- if (p && !--p->refcount) {
- con_release_unimap(p);
- kfree(p);
+ *vc->uni_pagedict_loc = dflt;
+ if (dict && !--dict->refcount) {
+ con_release_unimap(dict);
+ kfree(dict);
}
return 0;
}
-
+
/* The default font is always 256 characters */
err = con_do_clear_unimap(vc);
if (err)
return err;
-
- p = *vc->vc_uni_pagedir_loc;
- q = dfont_unitable;
-
- for (i = 0; i < 256; i++)
- for (j = dfont_unicount[i]; j; j--) {
- err1 = con_insert_unipair(p, *(q++), i);
+
+ dict = *vc->uni_pagedict_loc;
+ dfont = dfont_unitable;
+
+ for (fontpos = 0; fontpos < 256U; fontpos++)
+ for (count = dfont_unicount[fontpos]; count; count--) {
+ err1 = con_insert_unipair(dict, *(dfont++), fontpos);
if (err1)
err = err1;
}
-
- if (con_unify_unimap(vc, p)) {
- dflt = *vc->vc_uni_pagedir_loc;
+
+ if (con_unify_unimap(vc, dict)) {
+ dflt = *vc->uni_pagedict_loc;
return err;
}
- for (i = 0; i <= 3; i++)
- set_inverse_transl(vc, p, i); /* Update all inverse translations */
- set_inverse_trans_unicode(vc, p);
- dflt = p;
+ for (enum translation_map m = FIRST_MAP; m <= LAST_MAP; m++)
+ set_inverse_transl(vc, dict, m);
+ set_inverse_trans_unicode(dict);
+ dflt = dict;
return err;
}
EXPORT_SYMBOL(con_set_default_unimap);
@@ -714,16 +762,16 @@ EXPORT_SYMBOL(con_set_default_unimap);
*/
int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc)
{
- struct uni_pagedir *q;
+ struct uni_pagedict *src;
- if (!*src_vc->vc_uni_pagedir_loc)
+ if (!*src_vc->uni_pagedict_loc)
return -EINVAL;
- if (*dst_vc->vc_uni_pagedir_loc == *src_vc->vc_uni_pagedir_loc)
+ if (*dst_vc->uni_pagedict_loc == *src_vc->uni_pagedict_loc)
return 0;
con_free_unimap(dst_vc);
- q = *src_vc->vc_uni_pagedir_loc;
- q->refcount++;
- *dst_vc->vc_uni_pagedir_loc = q;
+ src = *src_vc->uni_pagedict_loc;
+ src->refcount++;
+ *dst_vc->uni_pagedict_loc = src;
return 0;
}
EXPORT_SYMBOL(con_copy_unimap);
@@ -734,46 +782,53 @@ EXPORT_SYMBOL(con_copy_unimap);
* Read the console unicode data for this console. Called from the ioctl
* handlers.
*/
-int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
+int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct,
+ struct unipair __user *list)
{
- int i, j, k, ret = 0;
ushort ect;
- u16 **p1, *p2;
- struct uni_pagedir *p;
+ struct uni_pagedict *dict;
struct unipair *unilist;
+ unsigned int d, r, g;
+ int ret = 0;
- unilist = kvmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+ unilist = kvmalloc_array(ct, sizeof(*unilist), GFP_KERNEL);
if (!unilist)
return -ENOMEM;
console_lock();
ect = 0;
- if (*vc->vc_uni_pagedir_loc) {
- p = *vc->vc_uni_pagedir_loc;
- for (i = 0; i < 32; i++) {
- p1 = p->uni_pgdir[i];
- if (p1)
- for (j = 0; j < 32; j++) {
- p2 = *(p1++);
- if (p2)
- for (k = 0; k < 64; k++, p2++) {
- if (*p2 >= MAX_GLYPH)
- continue;
- if (ect < ct) {
- unilist[ect].unicode =
- (i<<11)+(j<<6)+k;
- unilist[ect].fontpos = *p2;
- }
- ect++;
+ dict = *vc->uni_pagedict_loc;
+ if (!dict)
+ goto unlock;
+
+ for (d = 0; d < UNI_DIRS; d++) {
+ u16 **dir = dict->uni_pgdir[d];
+ if (!dir)
+ continue;
+
+ for (r = 0; r < UNI_DIR_ROWS; r++) {
+ u16 *row = dir[r];
+ if (!row)
+ continue;
+
+ for (g = 0; g < UNI_ROW_GLYPHS; g++, row++) {
+ if (*row >= MAX_GLYPH)
+ continue;
+ if (ect < ct) {
+ unilist[ect].unicode = UNI(d, r, g);
+ unilist[ect].fontpos = *row;
}
+ ect++;
}
}
}
+unlock:
console_unlock();
- if (copy_to_user(list, unilist, min(ect, ct) * sizeof(struct unipair)))
+ if (copy_to_user(list, unilist, min(ect, ct) * sizeof(*unilist)))
+ ret = -EFAULT;
+ if (put_user(ect, uct))
ret = -EFAULT;
- put_user(ect, uct);
kvfree(unilist);
return ret ? ret : (ect <= ct) ? 0 : -ENOMEM;
}
@@ -798,20 +853,18 @@ u32 conv_8bit_to_uni(unsigned char c)
int conv_uni_to_8bit(u32 uni)
{
int c;
- for (c = 0; c < 0x100; c++)
+ for (c = 0; c < ARRAY_SIZE(translations[USER_MAP]); c++)
if (translations[USER_MAP][c] == uni ||
(translations[USER_MAP][c] == (c | 0xf000) && uni == c))
return c;
return -1;
}
-int
-conv_uni_to_pc(struct vc_data *conp, long ucs)
+int conv_uni_to_pc(struct vc_data *conp, long ucs)
{
- int h;
- u16 **p1, *p2;
- struct uni_pagedir *p;
-
+ struct uni_pagedict *dict;
+ u16 **dir, *row, glyph;
+
/* Only 16-bit codes supported at this time */
if (ucs > 0xffff)
return -4; /* Not found */
@@ -826,17 +879,24 @@ conv_uni_to_pc(struct vc_data *conp, long ucs)
*/
else if ((ucs & ~UNI_DIRECT_MASK) == UNI_DIRECT_BASE)
return ucs & UNI_DIRECT_MASK;
-
- if (!*conp->vc_uni_pagedir_loc)
+
+ dict = *conp->uni_pagedict_loc;
+ if (!dict)
return -3;
- p = *conp->vc_uni_pagedir_loc;
- if ((p1 = p->uni_pgdir[ucs >> 11]) &&
- (p2 = p1[(ucs >> 6) & 0x1f]) &&
- (h = p2[ucs & 0x3f]) < MAX_GLYPH)
- return h;
+ dir = dict->uni_pgdir[UNI_DIR(ucs)];
+ if (!dir)
+ return -4;
- return -4; /* not found */
+ row = dir[UNI_ROW(ucs)];
+ if (!row)
+ return -4;
+
+ glyph = row[UNI_GLYPH(ucs)];
+ if (glyph >= MAX_GLYPH)
+ return -4;
+
+ return glyph;
}
/*
@@ -844,13 +904,13 @@ conv_uni_to_pc(struct vc_data *conp, long ucs)
* initialized. It must be possible to call kmalloc(..., GFP_KERNEL)
* from this function, hence the call from sys_setup.
*/
-void __init
+void __init
console_map_init(void)
{
int i;
-
+
for (i = 0; i < MAX_NR_CONSOLES; i++)
- if (vc_cons_allocated(i) && !*vc_cons[i].d->vc_uni_pagedir_loc)
+ if (vc_cons_allocated(i) && !*vc_cons[i].d->uni_pagedict_loc)
con_set_default_unimap(vc_cons[i].d);
}
diff --git a/drivers/tty/vt/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
index 094d95bf0005..0c043e4f292e 100644
--- a/drivers/tty/vt/defkeymap.c_shipped
+++ b/drivers/tty/vt/defkeymap.c_shipped
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-/* Do not edit this file! It was automatically generated by */
-/* loadkeys --mktable defkeymap.map > defkeymap.c */
+/* Do not edit this file! It was automatically generated by */
+/* loadkeys --mktable --unicode defkeymap.map > defkeymap.c */
#include <linux/types.h>
#include <linux/keyboard.h>
@@ -139,7 +139,7 @@ static unsigned short ctrl_alt_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
-ushort *key_maps[MAX_NR_KEYMAPS] = {
+unsigned short *key_maps[MAX_NR_KEYMAPS] = {
plain_map, shift_map, altgr_map, NULL,
ctrl_map, shift_ctrl_map, NULL, NULL,
alt_map, NULL, NULL, NULL,
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index f7755e73696e..6ef22f01cc51 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -68,7 +68,8 @@ sel_pos(int n, bool unicode)
{
if (unicode)
return screen_glyph_unicode(vc_sel.cons, n / 2);
- return inverse_translate(vc_sel.cons, screen_glyph(vc_sel.cons, n), 0);
+ return inverse_translate(vc_sel.cons, screen_glyph(vc_sel.cons, n),
+ false);
}
/**
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index dfc1f4b445f3..ae9c926acd6f 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -344,7 +344,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
/* allocate everything in one go */
memsize = cols * rows * sizeof(char32_t);
memsize += rows * sizeof(char32_t *);
- p = vmalloc(memsize);
+ p = vzalloc(memsize);
if (!p)
return NULL;
@@ -1063,10 +1063,10 @@ static void visual_init(struct vc_data *vc, int num, int init)
__module_get(vc->vc_sw->owner);
vc->vc_num = num;
vc->vc_display_fg = &master_display_fg;
- if (vc->vc_uni_pagedir_loc)
+ if (vc->uni_pagedict_loc)
con_free_unimap(vc);
- vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
- vc->vc_uni_pagedir = NULL;
+ vc->uni_pagedict_loc = &vc->uni_pagedict;
+ vc->uni_pagedict = NULL;
vc->vc_hi_font_mask = 0;
vc->vc_complement_mask = 0;
vc->vc_can_do_color = 0;
@@ -1136,7 +1136,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
visual_init(vc, currcons, 1);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_set_default_unimap(vc);
err = -EINVAL;
@@ -3939,7 +3939,7 @@ static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
bind = con_is_bound(con->con);
console_unlock();
- return snprintf(buf, PAGE_SIZE, "%i\n", bind);
+ return sysfs_emit(buf, "%i\n", bind);
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr,
@@ -3947,7 +3947,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr,
{
struct con_driver *con = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s %s\n",
+ return sysfs_emit(buf, "%s %s\n",
(con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
con->desc);
@@ -4741,7 +4741,7 @@ u32 screen_glyph_unicode(const struct vc_data *vc, int n)
if (uniscr)
return uniscr->lines[n / vc->vc_cols][n % vc->vc_cols];
- return inverse_translate(vc, screen_glyph(vc, n * 2), 1);
+ return inverse_translate(vc, screen_glyph(vc, n * 2), true);
}
EXPORT_SYMBOL_GPL(screen_glyph_unicode);
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index ffb01fc6de75..8f67db202d7b 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -215,7 +215,7 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
hba->vops->config_scaling_param(hba, p, data);
}
-extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
+extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
* ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
@@ -234,8 +234,8 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
int ufshcd_write_ee_control(struct ufs_hba *hba);
-int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
- u16 set, u16 clr);
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
+ const u16 *other_mask, u16 set, u16 clr);
static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
u16 set, u16 clr)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 2b40174e93ac..a202d7d5240d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -64,9 +64,6 @@
/* maximum number of link-startup retries */
#define DME_LINKSTARTUP_RETRIES 3
-/* Maximum retries for Hibern8 enter */
-#define UIC_HIBERN8_ENTER_RETRIES 3
-
/* maximum number of reset retries before giving up */
#define MAX_HOST_RESET_RETRIES 5
@@ -175,7 +172,7 @@ enum {
#define ufshcd_clear_eh_in_progress(h) \
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
-struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
[UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
[UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
[UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
@@ -363,7 +360,7 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
}
static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
- struct uic_command *ucmd,
+ const struct uic_command *ucmd,
enum ufs_trace_str_t str_t)
{
u32 cmd;
@@ -443,11 +440,11 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
}
static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
- char *err_name)
+ const char *err_name)
{
int i;
bool found = false;
- struct ufs_event_hist *e;
+ const struct ufs_event_hist *e;
if (id >= UFS_EVT_CNT)
return;
@@ -497,7 +494,7 @@ static void ufshcd_print_evt_hist(struct ufs_hba *hba)
static
void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
{
- struct ufshcd_lrb *lrbp;
+ const struct ufshcd_lrb *lrbp;
int prdt_length;
int tag;
@@ -553,7 +550,7 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
- struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
+ const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
@@ -1109,7 +1106,7 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
*/
static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{
- struct scsi_device *sdev;
+ const struct scsi_device *sdev;
u32 pending = 0;
lockdep_assert_held(hba->host->host_lock);
@@ -2080,14 +2077,15 @@ static inline int ufshcd_monitor_opcode2dir(u8 opcode)
static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
- struct ufs_hba_monitor *m = &hba->monitor;
+ const struct ufs_hba_monitor *m = &hba->monitor;
return (m->enabled && lrbp && lrbp->cmd &&
(!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
}
-static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_start_monitor(struct ufs_hba *hba,
+ const struct ufshcd_lrb *lrbp)
{
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
unsigned long flags;
@@ -2098,14 +2096,14 @@ static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
-static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
{
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
- struct request *req = scsi_cmd_to_rq(lrbp->cmd);
+ const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
struct ufs_hba_monitor *m = &hba->monitor;
ktime_t now, inc, lat;
@@ -2227,6 +2225,8 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
int err;
hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
+ hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* nutrs and nutmrs are 0 based values */
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
@@ -3095,7 +3095,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
if (ret)
dev_err(hba->dev,
- "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+ "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retries\n",
__func__, opcode, idn, ret, retries);
return ret;
}
@@ -3263,7 +3263,7 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba,
if (ret)
dev_err(hba->dev,
- "%s: query attribute, idn %d, failed with error %d after %d retires\n",
+ "%s: query attribute, idn %d, failed with error %d after %d retries\n",
__func__, idn, ret, QUERY_REQ_RETRIES);
return ret;
}
@@ -3834,7 +3834,7 @@ int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
{
int ret;
- if (agreed_gear != UFS_HS_G4)
+ if (agreed_gear < UFS_HS_G4)
adapt_val = PA_NO_ADAPT;
ret = ufshcd_dme_set(hba,
@@ -4123,7 +4123,7 @@ out_unlock:
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
struct uic_command uic_cmd = {0};
int ret;
@@ -4148,6 +4148,7 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
int ufshcd_link_recovery(struct ufs_hba *hba)
{
@@ -4290,8 +4291,13 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
if (hba->max_pwr_info.is_valid)
return 0;
- pwr_info->pwr_tx = FAST_MODE;
- pwr_info->pwr_rx = FAST_MODE;
+ if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
+ pwr_info->pwr_tx = FASTAUTO_MODE;
+ pwr_info->pwr_rx = FASTAUTO_MODE;
+ } else {
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
+ }
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
@@ -4766,7 +4772,7 @@ link_startup:
* but we can't be sure if the link is up until link startup
* succeeds. So reset the local Uni-Pro and try again.
*/
- if (ret && ufshcd_hba_enable(hba)) {
+ if (ret && retries && ufshcd_hba_enable(hba)) {
ufshcd_update_evt_hist(hba,
UFS_EVT_LINK_STARTUP_FAIL,
(u32)ret);
@@ -4931,7 +4937,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba,
*
*/
static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
- struct scsi_device *sdev)
+ const struct scsi_device *sdev)
{
if (hba->dev_info.f_power_on_wp_en &&
!hba->dev_info.is_lu_power_on_wp) {
@@ -5450,8 +5456,8 @@ int ufshcd_write_ee_control(struct ufs_hba *hba)
return err;
}
-int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
- u16 set, u16 clr)
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
+ const u16 *other_mask, u16 set, u16 clr)
{
u16 new_mask, ee_ctrl_mask;
int err = 0;
@@ -7388,7 +7394,8 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
*
* Returns calculated max ICC level for specific regulator
*/
-static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
+static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
+ const char *buff)
{
int i;
int curr_uA;
@@ -7435,7 +7442,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
* Returns calculated ICC level
*/
static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
- u8 *desc_buf, int len)
+ const u8 *desc_buf, int len)
{
u32 icc_level = 0;
@@ -7585,7 +7592,7 @@ out:
return ret;
}
-static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
+static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u8 lun;
@@ -7656,7 +7663,7 @@ wb_disabled:
hba->caps &= ~UFSHCD_CAP_WB_EN;
}
-static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
+static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u32 ext_ufs_feature;
@@ -7890,7 +7897,7 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
u32 granularity, peer_granularity;
u32 pa_tactivate, peer_pa_tactivate;
u32 pa_tactivate_us, peer_pa_tactivate_us;
- u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+ static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
&granularity);
@@ -8007,7 +8014,7 @@ struct ufs_ref_clk {
enum ufs_ref_clk_freq val;
};
-static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
+static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
{38400000, REF_CLK_FREQ_38_4_MHZ},
@@ -8319,6 +8326,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
+ .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
@@ -8449,7 +8457,7 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
}
-static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
+int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
@@ -8465,6 +8473,7 @@ static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
static int ufshcd_init_vreg(struct ufs_hba *hba)
{
@@ -8558,6 +8567,19 @@ out:
return ret;
}
+static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
+{
+ u32 freq;
+ int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
+
+ if (ret) {
+ dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
+ return REF_CLK_FREQ_INVAL;
+ }
+
+ return ufs_get_bref_clk_from_hz(freq);
+}
+
static int ufshcd_init_clocks(struct ufs_hba *hba)
{
int ret = 0;
@@ -8651,6 +8673,9 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
if (err)
goto out_disable_hba_vreg;
+ if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
+ hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
+
err = ufshcd_setup_clocks(hba, true);
if (err)
goto out_disable_hba_vreg;
@@ -8716,6 +8741,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
struct scsi_device *sdp;
unsigned long flags;
int ret, retries;
+ unsigned long deadline;
+ int32_t remaining;
spin_lock_irqsave(hba->host->host_lock, flags);
sdp = hba->ufs_device_wlun;
@@ -8748,9 +8775,14 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
+ deadline = jiffies + 10 * HZ;
for (retries = 3; retries > 0; --retries) {
+ ret = -ETIMEDOUT;
+ remaining = deadline - jiffies;
+ if (remaining <= 0)
+ break;
ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
+ remaining / HZ, 0, 0, RQF_PM, NULL);
if (!scsi_status_is_check_condition(ret) ||
!scsi_sense_valid(&sshdr) ||
sshdr.sense_key != UNIT_ATTENTION)
@@ -9484,12 +9516,8 @@ EXPORT_SYMBOL(ufshcd_runtime_resume);
int ufshcd_shutdown(struct ufs_hba *hba)
{
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
- goto out;
+ ufshcd_suspend(hba);
- pm_runtime_get_sync(hba->dev);
-
- ufshcd_suspend(hba);
-out:
hba->is_powered = false;
/* allow force shutdown even in case of errors */
return 0;
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 82590224da13..4cc2dbd79ed0 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -92,6 +92,18 @@ config SCSI_UFS_HISI
Select this if you have UFS controller on Hisilicon chipset.
If unsure, say N.
+config SCSI_UFS_RENESAS
+ tristate "Renesas specific hooks to UFS controller platform driver"
+ depends on (ARCH_RENESAS || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
+ help
+ This selects the Renesas specific additions to UFSHCD platform driver.
+ UFS host on Renesas needs some vendor specific configuration before
+ accessing the hardware.
+
+ Select this if you have UFS controller on Renesas chipset.
+
+ If unsure, say N.
+
config SCSI_UFS_TI_J721E
tristate "TI glue layer for Cadence UFS Controller"
depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST)
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index e4be54273c98..7717ca93e7d5 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -11,4 +11,5 @@ obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
+obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index a81d8cbd542f..c3628a8645a5 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -52,11 +52,12 @@
#define HCI_ERR_EN_DME_LAYER 0x88
#define HCI_CLKSTOP_CTRL 0xB0
#define REFCLKOUT_STOP BIT(4)
+#define MPHY_APBCLK_STOP BIT(3)
#define REFCLK_STOP BIT(2)
#define UNIPRO_MCLK_STOP BIT(1)
#define UNIPRO_PCLK_STOP BIT(0)
#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\
- UNIPRO_MCLK_STOP |\
+ UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\
UNIPRO_PCLK_STOP)
#define HCI_MISC 0xB4
#define REFCLK_CTRL_EN BIT(7)
@@ -135,15 +136,9 @@ enum {
/*
* UNIPRO registers
*/
-#define UNIPRO_COMP_VERSION 0x000
-#define UNIPRO_DME_PWR_REQ 0x090
-#define UNIPRO_DME_PWR_REQ_POWERMODE 0x094
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER0 0x098
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER1 0x09C
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER2 0x0A0
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER0 0x0A4
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER1 0x0A8
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER2 0x0AC
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0 0x78B8
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1 0x78BC
+#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2 0x78C0
/*
* UFS Protector registers
@@ -157,7 +152,6 @@ enum {
#define CNTR_DIV_VAL 40
-static struct exynos_ufs_drv_data exynos_ufs_drvs;
static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
@@ -657,8 +651,9 @@ static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs)
if (attr->rx_min_actv_time_cap)
ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_MIN_ACTIVATETIME_CAP,
- i), attr->rx_min_actv_time_cap);
+ UIC_ARG_MIB_SEL(
+ RX_MIN_ACTIVATETIME_CAPABILITY, i),
+ attr->rx_min_actv_time_cap);
if (attr->rx_hibern8_time_cap)
ufshcd_dme_set(hba,
@@ -910,9 +905,13 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
- goto out_exit_phy;
+ return ret;
}
+ ret = phy_power_on(generic_phy);
+ if (ret)
+ goto out_exit_phy;
+
return 0;
out_exit_phy:
@@ -1174,10 +1173,6 @@ static int exynos_ufs_init(struct ufs_hba *hba)
goto out;
}
- ret = phy_power_on(ufs->phy);
- if (ret)
- goto phy_off;
-
exynos_ufs_priv_init(hba, ufs);
if (ufs->drv_data->drv_init) {
@@ -1195,8 +1190,6 @@ static int exynos_ufs_init(struct ufs_hba *hba)
exynos_ufs_config_smu(ufs);
return 0;
-phy_off:
- phy_power_off(ufs->phy);
out:
hba->priv = NULL;
return ret;
@@ -1473,7 +1466,100 @@ static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
return 0;
}
-static struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
+static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
+{
+ int i;
+ struct ufs_hba *hba = ufs->hba;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+
+ for_each_ufs_tx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xAA, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8F, i), 0x3F);
+ }
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x12, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x5C, i), 0x38);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0F, i), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x65, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x69, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x21, i), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x22, i), 0x0);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
+
+ exynos_ufs_establish_connt(ufs);
+
+ return 0;
+}
+
+static int fsd_ufs_post_link(struct exynos_ufs *ufs)
+{
+ int i;
+ struct ufs_hba *hba = ufs->hba;
+ u32 hw_cap_min_tactivate;
+ u32 peer_rx_min_actv_time_cap;
+ u32 max_rx_hibern8_time_cap;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x8F, 4),
+ &hw_cap_min_tactivate); /* HW Capability of MIN_TACTIVATE */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_rx_min_actv_time_cap); /* PA_TActivate */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ &max_rx_hibern8_time_cap); /* PA_Hibern8Time */
+
+ if (peer_rx_min_actv_time_cap >= hw_cap_min_tactivate)
+ ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ peer_rx_min_actv_time_cap + 1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), max_rx_hibern8_time_cap + 1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xFA);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x00);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x35, i), 0x05);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x73, i), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x41, i), 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x42, i), 0xAC);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+
+ return 0;
+}
+
+static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
+
+ unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0);
+ unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1);
+ unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2);
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
@@ -1514,9 +1600,14 @@ static int exynos_ufs_probe(struct platform_device *pdev)
static int exynos_ufs_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
+
+ phy_power_off(ufs->phy);
+ phy_exit(ufs->phy);
+
return 0;
}
@@ -1545,7 +1636,7 @@ static struct exynos_ufs_uic_attr exynos7_uic_attr = {
.pa_dbg_option_suite = 0x30103,
};
-static struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
+static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
@@ -1561,7 +1652,7 @@ static struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.post_pwr_change = exynosauto_ufs_post_pwr_change,
};
-static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
+static const struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
.vops = &ufs_hba_exynosauto_vh_ops,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
@@ -1573,7 +1664,7 @@ static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
.opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
};
-static struct exynos_ufs_drv_data exynos_ufs_drvs = {
+static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
@@ -1595,6 +1686,47 @@ static struct exynos_ufs_drv_data exynos_ufs_drvs = {
.post_pwr_change = exynos7_ufs_post_pwr_change,
};
+static struct exynos_ufs_uic_attr fsd_uic_attr = {
+ .tx_trailingclks = 0x10,
+ .tx_dif_p_nsec = 3000000, /* unit: ns */
+ .tx_dif_n_nsec = 1000000, /* unit: ns */
+ .tx_high_z_cnt_nsec = 20000, /* unit: ns */
+ .tx_base_unit_nsec = 100000, /* unit: ns */
+ .tx_gran_unit_nsec = 4000, /* unit: ns */
+ .tx_sleep_cnt = 1000, /* unit: ns */
+ .tx_min_activatetime = 0xa,
+ .rx_filler_enable = 0x2,
+ .rx_dif_p_nsec = 1000000, /* unit: ns */
+ .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
+ .rx_base_unit_nsec = 100000, /* unit: ns */
+ .rx_gran_unit_nsec = 4000, /* unit: ns */
+ .rx_sleep_cnt = 1280, /* unit: ns */
+ .rx_stall_cnt = 320, /* unit: ns */
+ .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
+ .pa_dbg_option_suite = 0x2E820183,
+};
+
+static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
+ .uic_attr = &fsd_uic_attr,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR,
+ .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
+ EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .pre_link = fsd_ufs_pre_link,
+ .post_link = fsd_ufs_post_link,
+ .pre_pwr_change = fsd_ufs_pre_pwr_change,
+};
+
static const struct of_device_id exynos_ufs_of_match[] = {
{ .compatible = "samsung,exynos7-ufs",
.data = &exynos_ufs_drvs },
@@ -1602,6 +1734,8 @@ static const struct of_device_id exynos_ufs_of_match[] = {
.data = &exynosauto_ufs_drvs },
{ .compatible = "samsung,exynosautov9-ufs-vh",
.data = &exynosauto_ufs_vh_drvs },
+ { .compatible = "tesla,fsd-ufs",
+ .data = &fsd_ufs_drvs },
{},
};
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index 0b0a3d530ca6..a4bd6646d7f1 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -22,6 +22,7 @@
#define PA_DBG_RXPHY_CFGUPDT 0x9519
#define PA_DBG_MODE 0x9529
#define PA_DBG_SKIP_RESET_PHY 0x9539
+#define PA_DBG_AUTOMODE_THLD 0x9536
#define PA_DBG_OV_TM 0x9540
#define PA_DBG_SKIP_LINE_RESET 0x9541
#define PA_DBG_LINE_RESET_REQ 0x9543
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index beabc3ccd30b..c958279bdd8f 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -16,6 +16,7 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sched/clock.h>
@@ -30,26 +31,11 @@
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
-#define ufs_mtk_smc(cmd, val, res) \
- arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
- cmd, val, 0, 0, 0, 0, 0, &(res))
-
-#define ufs_mtk_va09_pwr_ctrl(res, on) \
- ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
-
-#define ufs_mtk_crypto_ctrl(res, enable) \
- ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
-
-#define ufs_mtk_ref_clk_notify(on, res) \
- ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
-
-#define ufs_mtk_device_reset_ctrl(high, res) \
- ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
-
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
- { .wmanufacturerid = UFS_VENDOR_MICRON,
+ { .wmanufacturerid = UFS_ANY_VENDOR,
.model = UFS_ANY_MODEL,
- .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM },
+ .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = "H9HQ21AFAMZDAR",
.quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
@@ -82,6 +68,13 @@ static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
}
+static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
+}
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -191,6 +184,14 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
hba->ahit = 0;
}
+
+ /*
+ * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
+ * to prevent host hang issue
+ */
+ ufshcd_writel(hba,
+ ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
+ REG_UFS_XOUFS_CTRL);
}
return 0;
@@ -244,8 +245,9 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
if (host->ref_clk_enabled == on)
return 0;
+ ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
+
if (on) {
- ufs_mtk_ref_clk_notify(on, res);
ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
} else {
ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
@@ -267,7 +269,7 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
- ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
+ ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
return -ETIMEDOUT;
@@ -275,8 +277,8 @@ out:
host->ref_clk_enabled = on;
if (on)
ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
- else
- ufs_mtk_ref_clk_notify(on, res);
+
+ ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
return 0;
}
@@ -579,20 +581,38 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
host->caps |= UFS_MTK_CAP_BROKEN_VCC;
+ if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
+ host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
-static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
+static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- ufs_mtk_boost_crypt(hba, up);
- ufs_mtk_setup_ref_clk(hba, up);
+ if (!host || !host->pm_qos_init)
+ return;
+
+ cpu_latency_qos_update_request(&host->pm_qos_req,
+ boost ? 0 : PM_QOS_DEFAULT_VALUE);
+}
- if (up)
+static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (on) {
phy_power_on(host->mphy);
- else
+ ufs_mtk_setup_ref_clk(hba, on);
+ ufs_mtk_boost_crypt(hba, on);
+ ufs_mtk_boost_pm_qos(hba, on);
+ } else {
+ ufs_mtk_boost_pm_qos(hba, on);
+ ufs_mtk_boost_crypt(hba, on);
+ ufs_mtk_setup_ref_clk(hba, on);
phy_power_off(host->mphy);
+ }
}
/**
@@ -637,9 +657,9 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
}
if (clk_pwr_off)
- ufs_mtk_scale_perf(hba, false);
+ ufs_mtk_pwr_ctrl(hba, false);
} else if (on && status == POST_CHANGE) {
- ufs_mtk_scale_perf(hba, true);
+ ufs_mtk_pwr_ctrl(hba, true);
}
return ret;
@@ -675,6 +695,73 @@ static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
return hba->ufs_version;
}
+#define MAX_VCC_NAME 30
+static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+ struct device_node *np = hba->dev->of_node;
+ struct device *dev = hba->dev;
+ char vcc_name[MAX_VCC_NAME];
+ struct arm_smccc_res res;
+ int err, ver;
+
+ if (hba->vreg_info.vcc)
+ return 0;
+
+ if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
+ ufs_mtk_get_vcc_num(res);
+ if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
+ snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
+ else
+ return -ENODEV;
+ } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
+ ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
+ snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
+ } else {
+ return 0;
+ }
+
+ err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
+ if (err)
+ return err;
+
+ err = ufshcd_get_vreg(dev, info->vcc);
+ if (err)
+ return err;
+
+ err = regulator_enable(info->vcc->reg);
+ if (!err) {
+ info->vcc->enabled = true;
+ dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
+ }
+
+ return err;
+}
+
+static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
+{
+ struct ufs_vreg_info *info = &hba->vreg_info;
+ struct ufs_vreg **vreg_on, **vreg_off;
+
+ if (hba->dev_info.wspecversion >= 0x0300) {
+ vreg_on = &info->vccq;
+ vreg_off = &info->vccq2;
+ } else {
+ vreg_on = &info->vccq2;
+ vreg_off = &info->vccq;
+ }
+
+ if (*vreg_on)
+ (*vreg_on)->always_on = true;
+
+ if (*vreg_off) {
+ regulator_disable((*vreg_off)->reg);
+ devm_kfree(hba->dev, (*vreg_off)->name);
+ devm_kfree(hba->dev, *vreg_off);
+ *vreg_off = NULL;
+ }
+}
+
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
@@ -754,6 +841,26 @@ out:
return err;
}
+static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ if (!ufs_mtk_is_pmc_via_fastauto(hba))
+ return false;
+
+ if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
+ return false;
+
+ if (dev_req_params->pwr_tx != FAST_MODE &&
+ dev_req_params->gear_tx < UFS_HS_G4)
+ return false;
+
+ if (dev_req_params->pwr_rx != FAST_MODE &&
+ dev_req_params->gear_rx < UFS_HS_G4)
+ return false;
+
+ return true;
+}
+
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
@@ -763,8 +870,8 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
int ret;
ufshcd_init_pwr_dev_param(&host_cap);
- host_cap.hs_rx_gear = UFS_HS_G4;
- host_cap.hs_tx_gear = UFS_HS_G4;
+ host_cap.hs_rx_gear = UFS_HS_G5;
+ host_cap.hs_tx_gear = UFS_HS_G5;
ret = ufshcd_get_pwr_dev_param(&host_cap,
dev_max_params,
@@ -774,6 +881,32 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
__func__);
}
+ if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+ dev_req_params->lane_tx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+ dev_req_params->lane_rx);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ dev_req_params->hs_rate);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ PA_NO_ADAPT);
+
+ ret = ufshcd_uic_change_pwr_mode(hba,
+ FASTAUTO_MODE << 4 | FASTAUTO_MODE);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
+ __func__, ret);
+ }
+ }
+
if (host->hw_ver.major >= 3) {
ret = ufshcd_dme_configure_adapt(hba,
dev_req_params->gear_tx,
@@ -963,6 +1096,11 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
{
int err;
+ /* Disable reset confirm feature by UniPro */
+ ufshcd_writel(hba,
+ (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
+ REG_UFS_XOUFS_CTRL);
+
err = ufs_mtk_unipro_set_lpm(hba, true);
if (err) {
/* Resume UniPro state for following error recovery */
@@ -973,17 +1111,52 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
return 0;
}
-static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
+static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
+{
+ struct ufs_vreg *vccqx = NULL;
+
+ if (hba->vreg_info.vccq)
+ vccqx = hba->vreg_info.vccq;
+ else
+ vccqx = hba->vreg_info.vccq2;
+
+ regulator_set_mode(vccqx->reg,
+ lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
+}
+
+static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
+{
+ struct arm_smccc_res res;
+
+ ufs_mtk_device_pwr_ctrl(!lpm,
+ (unsigned long)hba->dev_info.wspecversion,
+ res);
+}
+
+static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
{
- if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
+ if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
+ return;
+
+ /* Skip if VCC is assumed always-on */
+ if (!hba->vreg_info.vcc)
+ return;
+
+ /* Bypass LPM when device is still active */
+ if (lpm && ufshcd_is_ufs_dev_active(hba))
+ return;
+
+ /* Bypass LPM if VCC is enabled */
+ if (lpm && hba->vreg_info.vcc->enabled)
return;
- if (lpm && !hba->vreg_info.vcc->enabled)
- regulator_set_mode(hba->vreg_info.vccq2->reg,
- REGULATOR_MODE_IDLE);
- else if (!lpm)
- regulator_set_mode(hba->vreg_info.vccq2->reg,
- REGULATOR_MODE_NORMAL);
+ if (lpm) {
+ ufs_mtk_vccqx_set_lpm(hba, lpm);
+ ufs_mtk_vsx_set_lpm(hba, lpm);
+ } else {
+ ufs_mtk_vsx_set_lpm(hba, lpm);
+ ufs_mtk_vccqx_set_lpm(hba, lpm);
+ }
}
static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
@@ -1026,7 +1199,6 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
* ufshcd_suspend() re-enabling regulators while vreg is still
* in low-power mode.
*/
- ufs_mtk_vreg_set_lpm(hba, true);
err = ufs_mtk_mphy_power_on(hba, false);
if (err)
goto fail;
@@ -1035,6 +1207,8 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (ufshcd_is_link_off(hba))
ufs_mtk_device_reset_ctrl(0, res);
+ ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
+
return 0;
fail:
/*
@@ -1049,13 +1223,17 @@ fail:
static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
+ struct arm_smccc_res res;
+
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
err = ufs_mtk_mphy_power_on(hba, true);
if (err)
goto fail;
- ufs_mtk_vreg_set_lpm(hba, false);
-
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
@@ -1087,8 +1265,10 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
- if (mid == UFS_VENDOR_SAMSUNG)
+ if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
+ }
/*
* Decide waiting time before gating reference clock and
@@ -1104,7 +1284,6 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
else
ufs_mtk_setup_ref_clk_wait_us(hba,
REFCLK_DEFAULT_WAIT_US);
-
return 0;
}
@@ -1122,6 +1301,9 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
}
+
+ ufs_mtk_vreg_fix_vcc(hba);
+ ufs_mtk_vreg_fix_vccqx(hba);
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
@@ -1220,9 +1402,59 @@ static int ufs_mtk_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int ufs_mtk_system_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ufshcd_system_suspend(dev);
+ if (ret)
+ return ret;
+
+ ufs_mtk_dev_vreg_set_lpm(hba, true);
+
+ return 0;
+}
+
+static int ufs_mtk_system_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ return ufshcd_system_resume(dev);
+}
+#endif
+
+static int ufs_mtk_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = ufshcd_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ufs_mtk_dev_vreg_set_lpm(hba, true);
+
+ return 0;
+}
+
+static int ufs_mtk_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+ return ufshcd_runtime_resume(dev);
+}
+
static const struct dev_pm_ops ufs_mtk_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
+ ufs_mtk_system_resume)
+ SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
+ ufs_mtk_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 414dca86c09f..aa26d415527b 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -7,11 +7,13 @@
#define _UFS_MEDIATEK_H
#include <linux/bitops.h>
+#include <linux/pm_qos.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
/*
* Vendor specific UFSHCI Registers
*/
+#define REG_UFS_XOUFS_CTRL 0x140
#define REG_UFS_REFCLK_CTRL 0x144
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
@@ -83,6 +85,9 @@ enum {
#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2)
#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
+#define UFS_MTK_SIP_HOST_PWR_CTRL BIT(5)
+#define UFS_MTK_SIP_GET_VCC_NUM BIT(6)
+#define UFS_MTK_SIP_DEVICE_PWR_CTRL BIT(7)
/*
* VS_DEBUGCLOCKENABLE
@@ -108,6 +113,7 @@ enum ufs_mtk_host_caps {
UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1,
UFS_MTK_CAP_DISABLE_AH8 = 1 << 2,
UFS_MTK_CAP_BROKEN_VCC = 1 << 3,
+ UFS_MTK_CAP_PMC_VIA_FASTAUTO = 1 << 6,
};
struct ufs_mtk_crypt_cfg {
@@ -126,6 +132,7 @@ struct ufs_mtk_hw_ver {
struct ufs_mtk_host {
struct phy *mphy;
+ struct pm_qos_request pm_qos_req;
struct regulator *reg_va09;
struct reset_control *hci_reset;
struct reset_control *unipro_reset;
@@ -135,6 +142,7 @@ struct ufs_mtk_host {
struct ufs_mtk_hw_ver hw_ver;
enum ufs_mtk_host_caps caps;
bool mphy_powered_on;
+ bool pm_qos_init;
bool unipro_lpm;
bool ref_clk_enabled;
u16 ref_clk_ungating_wait_us;
@@ -142,4 +150,70 @@ struct ufs_mtk_host {
u32 ip_ver;
};
+/*
+ * Multi-VCC by Numbering
+ */
+enum ufs_mtk_vcc_num {
+ UFS_VCC_NONE = 0,
+ UFS_VCC_1,
+ UFS_VCC_2,
+ UFS_VCC_MAX
+};
+
+/*
+ * Host Power Control options
+ */
+enum {
+ HOST_PWR_HCI = 0,
+ HOST_PWR_MPHY
+};
+
+/*
+ * SMC call wrapper function
+ */
+struct ufs_mtk_smc_arg {
+ unsigned long cmd;
+ struct arm_smccc_res *res;
+ unsigned long v1;
+ unsigned long v2;
+ unsigned long v3;
+ unsigned long v4;
+ unsigned long v5;
+ unsigned long v6;
+ unsigned long v7;
+};
+
+static void _ufs_mtk_smc(struct ufs_mtk_smc_arg s)
+{
+ arm_smccc_smc(MTK_SIP_UFS_CONTROL,
+ s.cmd, s.v1, s.v2, s.v3, s.v4, s.v5, s.v6, s.res);
+}
+
+#define ufs_mtk_smc(...) \
+ _ufs_mtk_smc((struct ufs_mtk_smc_arg) {__VA_ARGS__})
+
+/*
+ * SMC call interface
+ */
+#define ufs_mtk_va09_pwr_ctrl(res, on) \
+ ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, &(res), on)
+
+#define ufs_mtk_crypto_ctrl(res, enable) \
+ ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, &(res), enable)
+
+#define ufs_mtk_ref_clk_notify(on, stage, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, &(res), on, stage)
+
+#define ufs_mtk_device_reset_ctrl(high, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, &(res), high)
+
+#define ufs_mtk_host_pwr_ctrl(opt, on, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_HOST_PWR_CTRL, &(res), opt, on)
+
+#define ufs_mtk_get_vcc_num(res) \
+ ufs_mtk_smc(UFS_MTK_SIP_GET_VCC_NUM, &(res))
+
+#define ufs_mtk_device_pwr_ctrl(on, ufs_ver, res) \
+ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_PWR_CTRL, &(res), on, ufs_ver)
+
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index f10d4668814c..473fad83701e 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -58,19 +58,6 @@ static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
ufshcd_dump_regs(hba, offset, len * 4, prefix);
}
-static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
-{
- int err = 0;
-
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
- __func__, err);
-
- return err;
-}
-
static int ufs_qcom_host_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool optional)
{
@@ -194,13 +181,6 @@ out:
return err;
}
-static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
-{
- u32 tx_lanes;
-
- return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
-}
-
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
{
int err;
@@ -570,9 +550,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
err = ufshcd_disable_host_tx_lcc(hba);
break;
- case POST_CHANGE:
- ufs_qcom_link_startup_post_change(hba);
- break;
default:
break;
}
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
new file mode 100644
index 000000000000..f8a5e79ed3b4
--- /dev/null
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Renesas UFS host controller driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <ufs/ufshcd.h>
+
+#include "ufshcd-pltfrm.h"
+
+struct ufs_renesas_priv {
+ bool initialized; /* The hardware needs initialization once */
+};
+
+enum {
+ SET_PHY_INDEX_LO = 0,
+ SET_PHY_INDEX_HI,
+ TIMER_INDEX,
+ MAX_INDEX
+};
+
+enum ufs_renesas_init_param_mode {
+ MODE_RESTORE,
+ MODE_SET,
+ MODE_SAVE,
+ MODE_POLL,
+ MODE_WAIT,
+ MODE_WRITE,
+};
+
+#define PARAM_RESTORE(_reg, _index) \
+ { .mode = MODE_RESTORE, .reg = _reg, .index = _index }
+#define PARAM_SET(_index, _set) \
+ { .mode = MODE_SET, .index = _index, .u.set = _set }
+#define PARAM_SAVE(_reg, _mask, _index) \
+ { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \
+ .index = _index }
+#define PARAM_POLL(_reg, _expected, _mask) \
+ { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \
+ .mask = (u32)(_mask) }
+#define PARAM_WAIT(_delay_us) \
+ { .mode = MODE_WAIT, .u.delay_us = _delay_us }
+
+#define PARAM_WRITE(_reg, _val) \
+ { .mode = MODE_WRITE, .reg = _reg, .u.val = _val }
+
+#define PARAM_WRITE_D0_D4(_d0, _d4) \
+ PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4)
+
+#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_RESTORE_800_80C_POLL(_index) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE(0xd0, 0x00000800), \
+ PARAM_RESTORE(0xd4, _index), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
+ PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \
+ PARAM_WRITE(0xd0, 0x0000080c), \
+ PARAM_POLL(0xd4, BIT(8), BIT(8))
+
+#define PARAM_WRITE_828_82C_POLL(_data_828) \
+ PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \
+ PARAM_WRITE_D0_D4(0x00000828, _data_828), \
+ PARAM_WRITE(0xd0, 0x0000082c), \
+ PARAM_POLL(0xd4, _data_828, _data_828)
+
+#define PARAM_WRITE_PHY(_addr16, _data16) \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_SET_PHY(_addr16, _data16) \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE_804_80C_POLL(0x1a, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \
+ PARAM_WRITE_804_80C_POLL(0x1b, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0), \
+ PARAM_WRITE(0xf0, 1), \
+ PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
+ PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
+ PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \
+ PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \
+ PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \
+ PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \
+ PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \
+ PARAM_WRITE(0xf0, _gpio), \
+ PARAM_WRITE_800_80C_POLL(_addr, _data_800), \
+ PARAM_WRITE_828_82C_POLL(0x0f000000), \
+ PARAM_WRITE(0xf0, 0)
+
+#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \
+ PARAM_WRITE(0xf0, _gpio), \
+ PARAM_WRITE_800_80C_POLL(_addr, 0), \
+ PARAM_WRITE(0xd0, 0x00000808), \
+ PARAM_POLL(0xd4, _expected, _mask), \
+ PARAM_WRITE(0xf0, 0)
+
+struct ufs_renesas_init_param {
+ enum ufs_renesas_init_param_mode mode;
+ u32 reg;
+ union {
+ u32 expected;
+ u32 delay_us;
+ u32 set;
+ u32 val;
+ } u;
+ u32 mask;
+ u32 index;
+};
+
+/* This setting is for SERIES B */
+static const struct ufs_renesas_init_param ufs_param[] = {
+ PARAM_WRITE(0xc0, 0x49425308),
+ PARAM_WRITE_D0_D4(0x00000104, 0x00000002),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000828, 0x00000200),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000828, 0x00000000),
+ PARAM_WRITE_D0_D4(0x00000104, 0x00000001),
+ PARAM_WRITE_D0_D4(0x00000940, 0x00000001),
+ PARAM_WAIT(1),
+ PARAM_WRITE_D0_D4(0x00000940, 0x00000000),
+
+ PARAM_WRITE(0xc0, 0x49425308),
+ PARAM_WRITE(0xc0, 0x41584901),
+
+ PARAM_WRITE_D0_D4(0x0000080c, 0x00000100),
+ PARAM_WRITE_D0_D4(0x00000804, 0x00000000),
+ PARAM_WRITE(0xd0, 0x0000080c),
+ PARAM_POLL(0xd4, BIT(8), BIT(8)),
+
+ PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001),
+
+ PARAM_WRITE(0xd0, 0x00000804),
+ PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)),
+
+ PARAM_WRITE(0xd0, 0x00000d00),
+ PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX),
+ PARAM_WRITE(0xd4, 0x00000000),
+ PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000),
+ PARAM_WRITE_D0_D4(0x00000828, 0x08000000),
+ PARAM_WRITE(0xd0, 0x0000082c),
+ PARAM_POLL(0xd4, BIT(27), BIT(27)),
+ PARAM_WRITE(0xd0, 0x00000d2c),
+ PARAM_POLL(0xd4, BIT(0), BIT(0)),
+
+ /* phy setup */
+ PARAM_INDIRECT_WRITE(1, 0x01, 0x001f),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
+ PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x60, 0x0003),
+ PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6),
+ PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003),
+
+ PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)),
+ PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)),
+
+ PARAM_INDIRECT_WRITE(1, 0x32, 0x0080),
+ PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001),
+ PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001),
+ PARAM_INDIRECT_WRITE(0, 0x32, 0x0087),
+
+ PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061),
+ PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009),
+ PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005),
+ PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058),
+ PARAM_INDIRECT_WRITE(1, 0x39, 0x0027),
+ PARAM_INDIRECT_WRITE(1, 0x47, 0x004c),
+
+ PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002),
+ PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
+
+ PARAM_WRITE_PHY(0x0028, 0x0061),
+ PARAM_WRITE_PHY(0x4014, 0x0061),
+ PARAM_SET_PHY(0x401c, BIT(2)),
+ PARAM_WRITE_PHY(0x4000, 0x0000),
+ PARAM_WRITE_PHY(0x4001, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0000),
+ PARAM_WRITE_PHY(0x10af, 0x0001),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0000),
+ PARAM_WRITE_PHY(0x10af, 0x0002),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0080),
+ PARAM_WRITE_PHY(0x10af, 0x0000),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_WRITE_PHY(0x10ae, 0x0001),
+ PARAM_WRITE_PHY(0x10ad, 0x0080),
+ PARAM_WRITE_PHY(0x10af, 0x001a),
+ PARAM_WRITE_PHY(0x10b6, 0x0001),
+ PARAM_WRITE_PHY(0x10ae, 0x0000),
+
+ PARAM_INDIRECT_WRITE(7, 0x70, 0x0016),
+ PARAM_INDIRECT_WRITE(7, 0x71, 0x0016),
+ PARAM_INDIRECT_WRITE(7, 0x72, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x73, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x74, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x75, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x76, 0x0010),
+ PARAM_INDIRECT_WRITE(7, 0x77, 0x0010),
+ PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff),
+ PARAM_INDIRECT_WRITE(7, 0x79, 0x0000),
+
+ PARAM_INDIRECT_WRITE(7, 0x19, 0x0007),
+
+ PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007),
+
+ PARAM_INDIRECT_WRITE(7, 0x24, 0x000c),
+
+ PARAM_INDIRECT_WRITE(7, 0x25, 0x000c),
+
+ PARAM_INDIRECT_WRITE(7, 0x62, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x63, 0x0000),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
+ PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004),
+ PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
+ PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)),
+ PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)),
+ /* end of phy setup */
+
+ PARAM_WRITE(0xf0, 0),
+ PARAM_WRITE(0xd0, 0x00000d00),
+ PARAM_RESTORE(0xd4, TIMER_INDEX),
+};
+
+static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
+}
+
+static void ufs_renesas_reg_control(struct ufs_hba *hba,
+ const struct ufs_renesas_init_param *p)
+{
+ static u32 save[MAX_INDEX];
+ int ret;
+ u32 val;
+
+ WARN_ON(p->index >= MAX_INDEX);
+
+ switch (p->mode) {
+ case MODE_RESTORE:
+ ufshcd_writel(hba, save[p->index], p->reg);
+ break;
+ case MODE_SET:
+ save[p->index] |= p->u.set;
+ break;
+ case MODE_SAVE:
+ save[p->index] = ufshcd_readl(hba, p->reg) & p->mask;
+ break;
+ case MODE_POLL:
+ ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg,
+ val,
+ (val & p->mask) == p->u.expected,
+ 10, 1000);
+ if (ret)
+ dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
+ __func__, ret, val, p->mask, p->u.expected);
+ break;
+ case MODE_WAIT:
+ if (p->u.delay_us > 1000)
+ mdelay(DIV_ROUND_UP(p->u.delay_us, 1000));
+ else
+ udelay(p->u.delay_us);
+ break;
+ case MODE_WRITE:
+ ufshcd_writel(hba, p->u.val, p->reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ufs_renesas_pre_init(struct ufs_hba *hba)
+{
+ const struct ufs_renesas_init_param *p = ufs_param;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ufs_param); i++)
+ ufs_renesas_reg_control(hba, &p[i]);
+}
+
+static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
+
+ if (priv->initialized)
+ return 0;
+
+ if (status == PRE_CHANGE)
+ ufs_renesas_pre_init(hba);
+
+ priv->initialized = true;
+
+ return 0;
+}
+
+static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ if (on && status == PRE_CHANGE)
+ pm_runtime_get_sync(hba->dev);
+ else if (!on && status == POST_CHANGE)
+ pm_runtime_put(hba->dev);
+
+ return 0;
+}
+
+static int ufs_renesas_init(struct ufs_hba *hba)
+{
+ struct ufs_renesas_priv *priv;
+
+ priv = devm_kmalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, priv);
+
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO;
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_renesas_vops = {
+ .name = "renesas",
+ .init = ufs_renesas_init,
+ .setup_clocks = ufs_renesas_setup_clocks,
+ .hce_enable_notify = ufs_renesas_hce_enable_notify,
+ .dbg_register_dump = ufs_renesas_dbg_register_dump,
+};
+
+static const struct of_device_id __maybe_unused ufs_renesas_of_match[] = {
+ { .compatible = "renesas,r8a779f0-ufs" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ufs_renesas_of_match);
+
+static int ufs_renesas_probe(struct platform_device *pdev)
+{
+ return ufshcd_pltfrm_init(pdev, &ufs_renesas_vops);
+}
+
+static int ufs_renesas_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ ufshcd_remove(hba);
+
+ return 0;
+}
+
+static struct platform_driver ufs_renesas_platform = {
+ .probe = ufs_renesas_probe,
+ .remove = ufs_renesas_remove,
+ .driver = {
+ .name = "ufshcd-renesas",
+ .of_match_table = of_match_ptr(ufs_renesas_of_match),
+ },
+};
+module_platform_driver(ufs_renesas_platform);
+
+MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
+MODULE_DESCRIPTION("Renesas UFS host controller driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 04166bda41da..1c91f43e15c8 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -24,7 +24,7 @@ struct ufs_host {
void (*late_init)(struct ufs_hba *hba);
};
-enum {
+enum intel_ufs_dsm_func_id {
INTEL_DSM_FNS = 0,
INTEL_DSM_RESET = 1,
};
@@ -42,6 +42,15 @@ static const guid_t intel_dsm_guid =
GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
+static bool __intel_dsm_supported(struct intel_host *host,
+ enum intel_ufs_dsm_func_id fn)
+{
+ return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn));
+}
+
+#define INTEL_DSM_SUPPORTED(host, name) \
+ __intel_dsm_supported(host, INTEL_DSM_##name)
+
static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
@@ -71,7 +80,7 @@ out:
static int intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
- if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+ if (!__intel_dsm_supported(intel_host, fn))
return -EOPNOTSUPP;
return __intel_dsm(intel_host, dev, fn, result);
@@ -300,7 +309,7 @@ static int ufs_intel_device_reset(struct ufs_hba *hba)
{
struct intel_host *host = ufshcd_get_variant(hba);
- if (host->dsm_fns & INTEL_DSM_RESET) {
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
u32 result = 0;
int err;
@@ -342,7 +351,7 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
return -ENOMEM;
ufshcd_set_variant(hba, host);
intel_dsm_init(host, hba->dev);
- if (host->dsm_fns & INTEL_DSM_RESET) {
+ if (INTEL_DSM_SUPPORTED(host, RESET)) {
if (hba->vops->device_reset)
hba->caps |= UFSHCD_CAP_DEEPSLEEP;
} else {
@@ -426,6 +435,7 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
{
hba->nop_out_timeout = 200;
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ hba->caps |= UFSHCD_CAP_WB_EN;
return ufs_intel_common_init(hba);
}
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index 173aea8e9997..5739ff007828 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -26,7 +26,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
int i;
struct device *dev = hba->dev;
struct device_node *np = dev->of_node;
- char *name;
+ const char *name;
u32 *clkfreq = NULL;
struct ufs_clk_info *clki;
int len = 0;
@@ -79,8 +79,8 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
}
for (i = 0; i < sz; i += 2) {
- ret = of_property_read_string_index(np,
- "clock-names", i/2, (const char **)&name);
+ ret = of_property_read_string_index(np, "clock-names", i/2,
+ &name);
if (ret)
goto out;
@@ -120,8 +120,8 @@ static bool phandle_exists(const struct device_node *np,
}
#define MAX_PROP_SIZE 32
-static int ufshcd_populate_vreg(struct device *dev, const char *name,
- struct ufs_vreg **out_vreg)
+int ufshcd_populate_vreg(struct device *dev, const char *name,
+ struct ufs_vreg **out_vreg)
{
char prop_name[MAX_PROP_SIZE];
struct ufs_vreg *vreg = NULL;
@@ -156,6 +156,7 @@ out:
*out_vreg = vreg;
return 0;
}
+EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
/**
* ufshcd_parse_regulator_info - get regulator info from device tree
@@ -219,8 +220,8 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
*
* Returns 0 on success, non-zero value on failure
*/
-int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
- struct ufs_pa_layer_attr *dev_max,
+int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
+ const struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr)
{
int min_pltfrm_gear;
diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index 43c2e412bd99..2e4ba2bfbcad 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -25,12 +25,14 @@ struct ufs_dev_params {
u32 desired_working_mode;
};
-int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param,
- struct ufs_pa_layer_attr *dev_max,
+int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *dev_param,
+ const struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr);
void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
+int ufshcd_populate_vreg(struct device *dev, const char *name,
+ struct ufs_vreg **out_vreg);
#endif /* UFSHCD_PLTFRM_H_ */
diff --git a/drivers/usb/chipidea/trace.h b/drivers/usb/chipidea/trace.h
index 1601fd86c4c1..ca0e65b48f0a 100644
--- a/drivers/usb/chipidea/trace.h
+++ b/drivers/usb/chipidea/trace.h
@@ -28,11 +28,11 @@ TRACE_EVENT(ci_log,
TP_ARGS(ci, vaf),
TP_STRUCT__entry(
__string(name, dev_name(ci->dev))
- __dynamic_array(char, msg, CHIPIDEA_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(ci->dev));
- vsnprintf(__get_str(msg), CHIPIDEA_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index a6a87c5d1b05..94b305bbd621 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1251,7 +1251,8 @@ void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
/*
- * Some usb host controllers can only perform dma using a small SRAM area.
+ * Some usb host controllers can only perform dma using a small SRAM area,
+ * or have restrictions on addressable DRAM.
* The usb core itself is however optimized for host controllers that can dma
* using regular system memory - like pci devices doing bus mastering.
*
@@ -3127,8 +3128,18 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
if (IS_ERR(hcd->localmem_pool))
return PTR_ERR(hcd->localmem_pool);
- local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
- size, MEMREMAP_WC);
+ /*
+ * if a physical SRAM address was passed, map it, otherwise
+ * allocate system memory as a buffer.
+ */
+ if (phys_addr)
+ local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
+ size, MEMREMAP_WC);
+ else
+ local_mem = dmam_alloc_attrs(hcd->self.sysdev, size, &dma,
+ GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+
if (IS_ERR(local_mem))
return PTR_ERR(local_mem);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index feca826d3f6a..75c2b28b3379 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -203,6 +203,31 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
goto err1;
}
+ /*
+ * According to the "Intel StrongARM SA-1111 Microprocessor Companion
+ * Chip Specification Update" (June 2000), erratum #7, there is a
+ * significant bug in the SA1111 SDRAM shared memory controller. If
+ * an access to a region of memory above 1MB relative to the bank base,
+ * it is important that address bit 10 _NOT_ be asserted. Depending
+ * on the configuration of the RAM, bit 10 may correspond to one
+ * of several different (processor-relative) address bits.
+ *
+ * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
+ * User's Guide" mentions that jumpers R51 and R52 control the
+ * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
+ * SDRAM bank 1 on Neponset). The default configuration selects
+ * Assabet, so any address in bank 1 is necessarily invalid.
+ *
+ * As a workaround, use a bounce buffer in addressable memory
+ * as local_mem, relying on ZONE_DMA to provide an area that
+ * fits within the above constraints.
+ *
+ * SZ_64K is an estimate for what size this might need.
+ */
+ ret = usb_hcd_setup_local_mem(hcd, 0, 0, SZ_64K);
+ if (ret)
+ goto err1;
+
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&dev->dev, "request_mem_region failed\n");
ret = -EBUSY;
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index a5da02077297..61e93a3540a7 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -28,9 +28,9 @@
DECLARE_EVENT_CLASS(xhci_log_msg,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
- TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
+ TP_STRUCT__entry(__vstring(msg, vaf->fmt, vaf->va)),
TP_fast_assign(
- vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
);
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index dfa0d5ce6012..fcb95fb639e0 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -248,7 +248,7 @@ sisusbcon_init(struct vc_data *c, int init)
*/
kref_get(&sisusb->kref);
- if (!*c->vc_uni_pagedir_loc)
+ if (!*c->uni_pagedict_loc)
con_set_default_unimap(c);
mutex_unlock(&sisusb->lock);
diff --git a/drivers/usb/mtu3/mtu3_trace.h b/drivers/usb/mtu3/mtu3_trace.h
index a09deae1146f..03d2a9bac27e 100644
--- a/drivers/usb/mtu3/mtu3_trace.h
+++ b/drivers/usb/mtu3/mtu3_trace.h
@@ -18,18 +18,16 @@
#include "mtu3.h"
-#define MTU3_MSG_MAX 256
-
TRACE_EVENT(mtu3_log,
TP_PROTO(struct device *dev, struct va_format *vaf),
TP_ARGS(dev, vaf),
TP_STRUCT__entry(
__string(name, dev_name(dev))
- __dynamic_array(char, msg, MTU3_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
- vsnprintf(__get_str(msg), MTU3_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index ec28b5716796..f246b14394c4 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -28,11 +28,11 @@ TRACE_EVENT(musb_log,
TP_ARGS(musb, vaf),
TP_STRUCT__entry(
__string(name, dev_name(musb->controller))
- __dynamic_array(char, msg, MUSB_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(name, dev_name(musb->controller));
- vsnprintf(__get_str(msg), MUSB_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(name), __get_str(msg))
);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 48c4dadb0c7c..75a703b803a2 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -29,7 +29,6 @@ u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
{
struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
- cfg = hw->common_cfg;
vp_iowrite16(vector, &cfg->msix_config);
return vp_ioread16(&cfg->msix_config);
@@ -128,6 +127,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
hw->dev_cfg = get_cap_addr(hw, &cap);
+ hw->cap_dev_config_size = le32_to_cpu(cap.length);
IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
break;
}
@@ -233,15 +233,23 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
{
struct ifcvf_adapter *adapter;
+ u32 net_config_size = sizeof(struct virtio_net_config);
+ u32 blk_config_size = sizeof(struct virtio_blk_config);
+ u32 cap_size = hw->cap_dev_config_size;
u32 config_size;
adapter = vf_to_adapter(hw);
+ /* If the onboard device config space size is greater than
+ * the size of struct virtio_net/blk_config, only the spec
+ * implementing contents size is returned, this is very
+ * unlikely, defensive programming.
+ */
switch (hw->dev_type) {
case VIRTIO_ID_NET:
- config_size = sizeof(struct virtio_net_config);
+ config_size = min(cap_size, net_config_size);
break;
case VIRTIO_ID_BLOCK:
- config_size = sizeof(struct virtio_blk_config);
+ config_size = min(cap_size, blk_config_size);
break;
default:
config_size = 0;
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 115b61f4924b..f5563f665cc6 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -87,6 +87,8 @@ struct ifcvf_hw {
int config_irq;
int vqs_reused_irq;
u16 nr_vring;
+ /* VIRTIO_PCI_CAP_DEVICE_CFG size */
+ u32 cap_dev_config_size;
};
struct ifcvf_adapter {
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 0a5670729412..f9c0044c6442 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -685,7 +685,7 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic
}
/*
- * IFCVF currently does't have on-chip IOMMU, so not
+ * IFCVF currently doesn't have on-chip IOMMU, so not
* implemented set_map()/dma_map()/dma_unmap()
*/
static const struct vdpa_config_ops ifc_vdpa_ops = {
@@ -752,59 +752,36 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct ifcvf_adapter *adapter;
+ struct vdpa_device *vdpa_dev;
struct pci_dev *pdev;
struct ifcvf_hw *vf;
- struct device *dev;
- int ret, i;
+ int ret;
ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
- if (ifcvf_mgmt_dev->adapter)
+ if (!ifcvf_mgmt_dev->adapter)
return -EOPNOTSUPP;
- pdev = ifcvf_mgmt_dev->pdev;
- dev = &pdev->dev;
- adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, 1, 1, name, false);
- if (IS_ERR(adapter)) {
- IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
- return PTR_ERR(adapter);
- }
-
- ifcvf_mgmt_dev->adapter = adapter;
-
+ adapter = ifcvf_mgmt_dev->adapter;
vf = &adapter->vf;
- vf->dev_type = get_dev_type(pdev);
- vf->base = pcim_iomap_table(pdev);
+ pdev = adapter->pdev;
+ vdpa_dev = &adapter->vdpa;
- adapter->pdev = pdev;
- adapter->vdpa.dma_dev = &pdev->dev;
-
- ret = ifcvf_init_hw(vf, pdev);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
- goto err;
- }
-
- for (i = 0; i < vf->nr_vring; i++)
- vf->vring[i].irq = -EINVAL;
-
- vf->hw_features = ifcvf_get_hw_features(vf);
- vf->config_size = ifcvf_get_config_size(vf);
+ if (name)
+ ret = dev_set_name(&vdpa_dev->dev, "%s", name);
+ else
+ ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
- adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
if (ret) {
+ put_device(&adapter->vdpa.dev);
IFCVF_ERR(pdev, "Failed to register to vDPA bus");
- goto err;
+ return ret;
}
return 0;
-
-err:
- put_device(&adapter->vdpa.dev);
- return ret;
}
+
static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
@@ -823,61 +800,94 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct device *dev = &pdev->dev;
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
u32 dev_type;
- int ret;
-
- ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
- if (!ifcvf_mgmt_dev) {
- IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
- return -ENOMEM;
- }
-
- dev_type = get_dev_type(pdev);
- switch (dev_type) {
- case VIRTIO_ID_NET:
- ifcvf_mgmt_dev->mdev.id_table = id_table_net;
- break;
- case VIRTIO_ID_BLOCK:
- ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
- break;
- default:
- IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
- ret = -EOPNOTSUPP;
- goto err;
- }
-
- ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
- ifcvf_mgmt_dev->mdev.device = dev;
- ifcvf_mgmt_dev->pdev = pdev;
+ int ret, i;
ret = pcim_enable_device(pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to enable device\n");
- goto err;
+ return ret;
}
-
ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
IFCVF_DRIVER_NAME);
if (ret) {
IFCVF_ERR(pdev, "Failed to request MMIO region\n");
- goto err;
+ return ret;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
IFCVF_ERR(pdev, "No usable DMA configuration\n");
- goto err;
+ return ret;
}
ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
if (ret) {
IFCVF_ERR(pdev,
"Failed for adding devres for freeing irq vectors\n");
- goto err;
+ return ret;
}
pci_set_master(pdev);
+ adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
+ dev, &ifc_vdpa_ops, 1, 1, NULL, false);
+ if (IS_ERR(adapter)) {
+ IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
+ return PTR_ERR(adapter);
+ }
+
+ vf = &adapter->vf;
+ vf->dev_type = get_dev_type(pdev);
+ vf->base = pcim_iomap_table(pdev);
+
+ adapter->pdev = pdev;
+ adapter->vdpa.dma_dev = &pdev->dev;
+
+ ret = ifcvf_init_hw(vf, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
+ return ret;
+ }
+
+ for (i = 0; i < vf->nr_vring; i++)
+ vf->vring[i].irq = -EINVAL;
+
+ vf->hw_features = ifcvf_get_hw_features(vf);
+ vf->config_size = ifcvf_get_config_size(vf);
+
+ ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
+ if (!ifcvf_mgmt_dev) {
+ IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
+ return -ENOMEM;
+ }
+
+ ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
+ ifcvf_mgmt_dev->mdev.device = dev;
+ ifcvf_mgmt_dev->adapter = adapter;
+
+ dev_type = get_dev_type(pdev);
+ switch (dev_type) {
+ case VIRTIO_ID_NET:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_net;
+ break;
+ case VIRTIO_ID_BLOCK:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
+ break;
+ default:
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
+ ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
+
+ adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
+
+
ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
if (ret) {
IFCVF_ERR(pdev,
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 44104093163b..6af9fdbb86b7 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -70,6 +70,16 @@ struct mlx5_vdpa_wq_ent {
struct mlx5_vdpa_dev *mvdev;
};
+enum {
+ MLX5_VDPA_DATAVQ_GROUP,
+ MLX5_VDPA_CVQ_GROUP,
+ MLX5_VDPA_NUMVQ_GROUPS
+};
+
+enum {
+ MLX5_VDPA_NUM_AS = MLX5_VDPA_NUMVQ_GROUPS
+};
+
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
@@ -85,6 +95,7 @@ struct mlx5_vdpa_dev {
struct mlx5_vdpa_mr mr;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
+ unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index e85c1d71f4ed..ed100a35e596 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -164,6 +164,7 @@ struct mlx5_vdpa_net {
bool setup;
u32 cur_num_vqs;
u32 rqt_size;
+ bool nb_registered;
struct notifier_block nb;
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
@@ -895,6 +896,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
if (err)
goto err_cmd;
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT;
kfree(in);
mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
@@ -922,6 +924,7 @@ static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtq
mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
return;
}
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
umems_destroy(ndev, mvq);
}
@@ -1121,6 +1124,20 @@ err_cmd:
return err;
}
+static bool is_valid_state_change(int oldstate, int newstate)
+{
+ switch (oldstate) {
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
+ return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY;
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
+ return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
+ case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR:
+ default:
+ return false;
+ }
+}
+
static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
{
int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
@@ -1130,6 +1147,12 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
void *in;
int err;
+ if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
+ return 0;
+
+ if (!is_valid_state_change(mvq->fw_state, state))
+ return -EINVAL;
+
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1440,7 +1463,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
- memset(dmac_c, 0xff, ETH_ALEN);
+ eth_broadcast_addr(dmac_c);
ether_addr_copy(dmac_v, mac);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
if (tagged) {
@@ -1992,6 +2015,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
+ int err;
if (!mvdev->actual_features)
return;
@@ -2005,8 +2029,16 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
}
mvq = &ndev->vqs[idx];
- if (!ready)
+ if (!ready) {
suspend_vq(ndev, mvq);
+ } else {
+ err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err);
+ ready = false;
+ }
+ }
+
mvq->ready = ready;
}
@@ -2095,9 +2127,14 @@ static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
return PAGE_SIZE;
}
-static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdev, u16 idx)
{
- return 0;
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ if (is_ctrl_vq_idx(mvdev, idx))
+ return MLX5_VDPA_CVQ_GROUP;
+
+ return MLX5_VDPA_DATAVQ_GROUP;
}
enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
@@ -2511,6 +2548,15 @@ err_clear:
up_write(&ndev->reslock);
}
+static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
+{
+ int i;
+
+ /* default mapping all groups are mapped to asid 0 */
+ for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
+ mvdev->group2asid[i] = 0;
+}
+
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2529,7 +2575,9 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
ndev->mvdev.cvq.completed_desc = 0;
memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
+ init_group_to_asid_map(mvdev);
++mvdev->generation;
+
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
@@ -2567,26 +2615,63 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
return mvdev->generation;
}
-static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
- struct vhost_iotlb *iotlb)
+static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ u64 start = 0ULL, last = 0ULL - 1;
+ struct vhost_iotlb_map *map;
+ int err = 0;
+
+ spin_lock(&mvdev->cvq.iommu_lock);
+ vhost_iotlb_reset(mvdev->cvq.iotlb);
+
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start,
+ map->last, map->addr, map->perm);
+ if (err)
+ goto out;
+ }
+
+out:
+ spin_unlock(&mvdev->cvq.iommu_lock);
+ return err;
+}
+
+static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
- struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
- struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
- down_write(&ndev->reslock);
-
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
- goto err;
+ return err;
}
if (change_map)
err = mlx5_vdpa_change_map(mvdev, iotlb);
-err:
+ return err;
+}
+
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
+ struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int err = -EINVAL;
+
+ down_write(&ndev->reslock);
+ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+ err = set_map_data(mvdev, iotlb);
+ if (err)
+ goto out;
+ }
+
+ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid)
+ err = set_map_control(mvdev, iotlb);
+
+out:
up_write(&ndev->reslock);
return err;
}
@@ -2733,6 +2818,49 @@ out_err:
return err;
}
+static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_control_vq *cvq;
+
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ return;
+
+ cvq = &mvdev->cvq;
+ cvq->ready = false;
+}
+
+static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
+ down_write(&ndev->reslock);
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ ndev->nb_registered = false;
+ flush_workqueue(ndev->mvdev.wq);
+ for (i = 0; i < ndev->cur_num_vqs; i++) {
+ mvq = &ndev->vqs[i];
+ suspend_vq(ndev, mvq);
+ }
+ mlx5_vdpa_cvq_suspend(mvdev);
+ up_write(&ndev->reslock);
+ return 0;
+}
+
+static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
+ unsigned int asid)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ if (group >= MLX5_VDPA_NUMVQ_GROUPS)
+ return -EINVAL;
+
+ mvdev->group2asid[group] = asid;
+ return 0;
+}
+
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2762,7 +2890,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_config = mlx5_vdpa_set_config,
.get_generation = mlx5_vdpa_get_generation,
.set_map = mlx5_vdpa_set_map,
+ .set_group_asid = mlx5_set_group_asid,
.free = mlx5_vdpa_free,
+ .suspend = mlx5_vdpa_suspend,
};
static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
@@ -2828,6 +2958,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
mvq->index = i;
mvq->ndev = ndev;
mvq->fwqp.fw = true;
+ mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
}
for (; i < ndev->mvdev.max_vqs; i++) {
mvq = &ndev->vqs[i];
@@ -2902,13 +3033,21 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ down_read(&ndev->reslock);
+ if (!ndev->nb_registered) {
+ up_read(&ndev->reslock);
+ return NOTIFY_DONE;
+ }
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
- if (!wqent)
+ if (!wqent) {
+ up_read(&ndev->reslock);
return NOTIFY_DONE;
+ }
wqent->mvdev = &ndev->mvdev;
INIT_WORK(&wqent->work, update_carrier);
queue_work(ndev->mvdev.wq, &wqent->work);
+ up_read(&ndev->reslock);
ret = NOTIFY_OK;
break;
default:
@@ -2982,7 +3121,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
}
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- 1, 1, name, false);
+ MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -3062,6 +3201,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(mdev, &ndev->nb);
+ ndev->nb_registered = true;
mvdev->vdev.mdev = &mgtdev->mgtdev;
err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
if (err)
@@ -3093,7 +3233,10 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct workqueue_struct *wq;
- mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ if (ndev->nb_registered) {
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+ ndev->nb_registered = false;
+ }
wq = mvdev->wq;
mvdev->wq = NULL;
destroy_workqueue(wq);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index ebf2f363fbe7..c06c02704461 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -824,11 +824,11 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
config.mac))
return -EMSGSIZE;
- val_u16 = le16_to_cpu(config.status);
+ val_u16 = __virtio16_to_cpu(true, config.status);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
return -EMSGSIZE;
- val_u16 = le16_to_cpu(config.mtu);
+ val_u16 = __virtio16_to_cpu(true, config.mtu);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
return -EMSGSIZE;
@@ -846,17 +846,9 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
{
u32 device_id;
void *hdr;
- u8 status;
int err;
down_read(&vdev->cf_lock);
- status = vdev->config->get_status(vdev);
- if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
- NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
- err = -EAGAIN;
- goto out;
- }
-
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
VDPA_CMD_DEV_CONFIG_GET);
if (!hdr) {
@@ -913,7 +905,7 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
}
vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
- max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
+ max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
return -EMSGSIZE;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 0f2865899647..225b7f5d8be3 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
static int max_iotlb_entries = 2048;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
- "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
+ "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
#define VDPASIM_QUEUE_MAX 256
@@ -107,6 +107,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
for (i = 0; i < vdpasim->dev_attr.nas; i++)
vhost_iotlb_reset(&vdpasim->iommu[i]);
+ vdpasim->running = true;
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
@@ -291,7 +292,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
goto err_iommu;
for (i = 0; i < vdpasim->dev_attr.nas; i++)
- vhost_iotlb_init(&vdpasim->iommu[i], 0, 0);
+ vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
if (!vdpasim->buffer)
@@ -505,6 +506,17 @@ static int vdpasim_reset(struct vdpa_device *vdpa)
return 0;
}
+static int vdpasim_suspend(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ spin_lock(&vdpasim->lock);
+ vdpasim->running = false;
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -694,6 +706,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
+ .suspend = vdpasim_suspend,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
@@ -726,6 +739,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
.reset = vdpasim_reset,
+ .suspend = vdpasim_suspend,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index 622782e92239..061986f30911 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -66,6 +66,7 @@ struct vdpasim {
u32 generation;
u64 features;
u32 groups;
+ bool running;
/* spinlock to synchronize iommu table */
spinlock_t iommu_lock;
};
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index 42d401d43911..c8bfea3b7db2 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -25,31 +25,49 @@
#define DRV_LICENSE "GPL v2"
#define VDPASIM_BLK_FEATURES (VDPASIM_FEATURES | \
+ (1ULL << VIRTIO_BLK_F_FLUSH) | \
(1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
(1ULL << VIRTIO_BLK_F_SEG_MAX) | \
(1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
(1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
- (1ULL << VIRTIO_BLK_F_MQ))
+ (1ULL << VIRTIO_BLK_F_MQ) | \
+ (1ULL << VIRTIO_BLK_F_DISCARD) | \
+ (1ULL << VIRTIO_BLK_F_WRITE_ZEROES))
#define VDPASIM_BLK_CAPACITY 0x40000
#define VDPASIM_BLK_SIZE_MAX 0x1000
#define VDPASIM_BLK_SEG_MAX 32
+#define VDPASIM_BLK_DWZ_MAX_SECTORS UINT_MAX
+
+/* 1 virtqueue, 1 address space, 1 virtqueue group */
#define VDPASIM_BLK_VQ_NUM 1
+#define VDPASIM_BLK_AS_NUM 1
+#define VDPASIM_BLK_GROUP_NUM 1
static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
-static bool vdpasim_blk_check_range(u64 start_sector, size_t range_size)
+static bool vdpasim_blk_check_range(struct vdpasim *vdpasim, u64 start_sector,
+ u64 num_sectors, u64 max_sectors)
{
- u64 range_sectors = range_size >> SECTOR_SHIFT;
-
- if (range_size > VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)
- return false;
+ if (start_sector > VDPASIM_BLK_CAPACITY) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "starting sector exceeds the capacity - start: 0x%llx capacity: 0x%x\n",
+ start_sector, VDPASIM_BLK_CAPACITY);
+ }
- if (start_sector > VDPASIM_BLK_CAPACITY)
+ if (num_sectors > max_sectors) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "number of sectors exceeds the max allowed in a request - num: 0x%llx max: 0x%llx\n",
+ num_sectors, max_sectors);
return false;
+ }
- if (range_sectors > VDPASIM_BLK_CAPACITY - start_sector)
+ if (num_sectors > VDPASIM_BLK_CAPACITY - start_sector) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "request exceeds the capacity - start: 0x%llx num: 0x%llx capacity: 0x%x\n",
+ start_sector, num_sectors, VDPASIM_BLK_CAPACITY);
return false;
+ }
return true;
}
@@ -63,6 +81,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
{
size_t pushed = 0, to_pull, to_push;
struct virtio_blk_outhdr hdr;
+ bool handled = false;
ssize_t bytes;
loff_t offset;
u64 sector;
@@ -76,14 +95,14 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
return false;
if (vq->out_iov.used < 1 || vq->in_iov.used < 1) {
- dev_err(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
+ dev_dbg(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
vq->out_iov.used, vq->in_iov.used);
- return false;
+ goto err;
}
if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) {
- dev_err(&vdpasim->vdpa.dev, "request in header too short\n");
- return false;
+ dev_dbg(&vdpasim->vdpa.dev, "request in header too short\n");
+ goto err;
}
/* The last byte is the status and we checked if the last iov has
@@ -96,8 +115,8 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr,
sizeof(hdr));
if (bytes != sizeof(hdr)) {
- dev_err(&vdpasim->vdpa.dev, "request out header too short\n");
- return false;
+ dev_dbg(&vdpasim->vdpa.dev, "request out header too short\n");
+ goto err;
}
to_pull -= bytes;
@@ -107,12 +126,20 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
offset = sector << SECTOR_SHIFT;
status = VIRTIO_BLK_S_OK;
+ if (type != VIRTIO_BLK_T_IN && type != VIRTIO_BLK_T_OUT &&
+ sector != 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "sector must be 0 for %u request - sector: 0x%llx\n",
+ type, sector);
+ status = VIRTIO_BLK_S_IOERR;
+ goto err_status;
+ }
+
switch (type) {
case VIRTIO_BLK_T_IN:
- if (!vdpasim_blk_check_range(sector, to_push)) {
- dev_err(&vdpasim->vdpa.dev,
- "reading over the capacity - offset: 0x%llx len: 0x%zx\n",
- offset, to_push);
+ if (!vdpasim_blk_check_range(vdpasim, sector,
+ to_push >> SECTOR_SHIFT,
+ VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
@@ -121,7 +148,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim->buffer + offset,
to_push);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_push);
status = VIRTIO_BLK_S_IOERR;
@@ -132,10 +159,9 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
break;
case VIRTIO_BLK_T_OUT:
- if (!vdpasim_blk_check_range(sector, to_pull)) {
- dev_err(&vdpasim->vdpa.dev,
- "writing over the capacity - offset: 0x%llx len: 0x%zx\n",
- offset, to_pull);
+ if (!vdpasim_blk_check_range(vdpasim, sector,
+ to_pull >> SECTOR_SHIFT,
+ VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)) {
status = VIRTIO_BLK_S_IOERR;
break;
}
@@ -144,7 +170,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim->buffer + offset,
to_pull);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
bytes, offset, to_pull);
status = VIRTIO_BLK_S_IOERR;
@@ -157,7 +183,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
vdpasim_blk_id,
VIRTIO_BLK_ID_BYTES);
if (bytes < 0) {
- dev_err(&vdpasim->vdpa.dev,
+ dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd\n", bytes);
status = VIRTIO_BLK_S_IOERR;
break;
@@ -166,13 +192,76 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
pushed += bytes;
break;
+ case VIRTIO_BLK_T_FLUSH:
+ /* nothing to do */
+ break;
+
+ case VIRTIO_BLK_T_DISCARD:
+ case VIRTIO_BLK_T_WRITE_ZEROES: {
+ struct virtio_blk_discard_write_zeroes range;
+ u32 num_sectors, flags;
+
+ if (to_pull != sizeof(range)) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "discard/write_zeroes header len: 0x%zx [expected: 0x%zx]\n",
+ to_pull, sizeof(range));
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &range,
+ to_pull);
+ if (bytes < 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
+ bytes, offset, to_pull);
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ sector = le64_to_cpu(range.sector);
+ offset = sector << SECTOR_SHIFT;
+ num_sectors = le32_to_cpu(range.num_sectors);
+ flags = le32_to_cpu(range.flags);
+
+ if (type == VIRTIO_BLK_T_DISCARD && flags != 0) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "discard unexpected flags set - flags: 0x%x\n",
+ flags);
+ status = VIRTIO_BLK_S_UNSUPP;
+ break;
+ }
+
+ if (type == VIRTIO_BLK_T_WRITE_ZEROES &&
+ flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
+ dev_dbg(&vdpasim->vdpa.dev,
+ "write_zeroes unexpected flags set - flags: 0x%x\n",
+ flags);
+ status = VIRTIO_BLK_S_UNSUPP;
+ break;
+ }
+
+ if (!vdpasim_blk_check_range(vdpasim, sector, num_sectors,
+ VDPASIM_BLK_DWZ_MAX_SECTORS)) {
+ status = VIRTIO_BLK_S_IOERR;
+ break;
+ }
+
+ if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
+ memset(vdpasim->buffer + offset, 0,
+ num_sectors << SECTOR_SHIFT);
+ }
+
+ break;
+ }
default:
- dev_warn(&vdpasim->vdpa.dev,
- "Unsupported request type %d\n", type);
+ dev_dbg(&vdpasim->vdpa.dev,
+ "Unsupported request type %d\n", type);
status = VIRTIO_BLK_S_IOERR;
break;
}
+err_status:
/* If some operations fail, we need to skip the remaining bytes
* to put the status in the last byte
*/
@@ -182,21 +271,25 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
/* Last byte is the status */
bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1);
if (bytes != 1)
- return false;
+ goto err;
pushed += bytes;
/* Make sure data is wrote before advancing index */
smp_wmb();
+ handled = true;
+
+err:
vringh_complete_iotlb(&vq->vring, vq->head, pushed);
- return true;
+ return handled;
}
static void vdpasim_blk_work(struct work_struct *work)
{
struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
+ bool reschedule = false;
int i;
spin_lock(&vdpasim->lock);
@@ -204,8 +297,12 @@ static void vdpasim_blk_work(struct work_struct *work)
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
+ if (!vdpasim->running)
+ goto out;
+
for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) {
struct vdpasim_virtqueue *vq = &vdpasim->vqs[i];
+ int reqs = 0;
if (!vq->ready)
continue;
@@ -218,10 +315,18 @@ static void vdpasim_blk_work(struct work_struct *work)
if (vringh_need_notify_iotlb(&vq->vring) > 0)
vringh_notify(&vq->vring);
local_bh_enable();
+
+ if (++reqs > 4) {
+ reschedule = true;
+ break;
+ }
}
}
out:
spin_unlock(&vdpasim->lock);
+
+ if (reschedule)
+ schedule_work(&vdpasim->work);
}
static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
@@ -237,6 +342,17 @@ static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
blk_config->min_io_size = cpu_to_vdpasim16(vdpasim, 1);
blk_config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1);
blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
+ /* VIRTIO_BLK_F_DISCARD */
+ blk_config->discard_sector_alignment =
+ cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
+ blk_config->max_discard_sectors =
+ cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_DWZ_MAX_SECTORS);
+ blk_config->max_discard_seg = cpu_to_vdpasim32(vdpasim, 1);
+ /* VIRTIO_BLK_F_WRITE_ZEROES */
+ blk_config->max_write_zeroes_sectors =
+ cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_DWZ_MAX_SECTORS);
+ blk_config->max_write_zeroes_seg = cpu_to_vdpasim32(vdpasim, 1);
+
}
static void vdpasim_blk_mgmtdev_release(struct device *dev)
@@ -260,6 +376,8 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.id = VIRTIO_ID_BLOCK;
dev_attr.supported_features = VDPASIM_BLK_FEATURES;
dev_attr.nvqs = VDPASIM_BLK_VQ_NUM;
+ dev_attr.ngroups = VDPASIM_BLK_GROUP_NUM;
+ dev_attr.nas = VDPASIM_BLK_AS_NUM;
dev_attr.config_size = sizeof(struct virtio_blk_config);
dev_attr.get_config = vdpasim_blk_get_config;
dev_attr.work_fn = vdpasim_blk_work;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 5125976a4df8..886449e88502 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -154,6 +154,9 @@ static void vdpasim_net_work(struct work_struct *work)
spin_lock(&vdpasim->lock);
+ if (!vdpasim->running)
+ goto out;
+
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 6daa3978d290..e682bc7ee6c9 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -138,18 +138,17 @@ static void do_bounce(phys_addr_t orig, void *addr, size_t size,
{
unsigned long pfn = PFN_DOWN(orig);
unsigned int offset = offset_in_page(orig);
- char *buffer;
+ struct page *page;
unsigned int sz = 0;
while (size) {
sz = min_t(size_t, PAGE_SIZE - offset, size);
- buffer = kmap_atomic(pfn_to_page(pfn));
+ page = pfn_to_page(pfn);
if (dir == DMA_TO_DEVICE)
- memcpy(addr, buffer + offset, sz);
+ memcpy_from_page(addr, page, offset, sz);
else
- memcpy(buffer + offset, addr, sz);
- kunmap_atomic(buffer);
+ memcpy_to_page(page, offset, addr, sz);
size -= sz;
pfn++;
@@ -179,8 +178,9 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
map->orig_phys == INVALID_PHYS_ADDR))
return;
- addr = page_address(map->bounce_page) + offset;
- do_bounce(map->orig_phys + offset, addr, sz, dir);
+ addr = kmap_local_page(map->bounce_page);
+ do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
+ kunmap_local(addr);
size -= sz;
iova += sz;
}
@@ -213,21 +213,21 @@ vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
struct vduse_bounce_map *map;
struct page *page = NULL;
- spin_lock(&domain->iotlb_lock);
+ read_lock(&domain->bounce_lock);
map = &domain->bounce_maps[iova >> PAGE_SHIFT];
- if (!map->bounce_page)
+ if (domain->user_bounce_pages || !map->bounce_page)
goto out;
page = map->bounce_page;
get_page(page);
out:
- spin_unlock(&domain->iotlb_lock);
+ read_unlock(&domain->bounce_lock);
return page;
}
static void
-vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
+vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
{
struct vduse_bounce_map *map;
unsigned long pfn, bounce_pfns;
@@ -247,6 +247,73 @@ vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
}
}
+int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
+ struct page **pages, int count)
+{
+ struct vduse_bounce_map *map;
+ int i, ret;
+
+ /* Now we don't support partial mapping */
+ if (count != (domain->bounce_size >> PAGE_SHIFT))
+ return -EINVAL;
+
+ write_lock(&domain->bounce_lock);
+ ret = -EEXIST;
+ if (domain->user_bounce_pages)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ map = &domain->bounce_maps[i];
+ if (map->bounce_page) {
+ /* Copy kernel page to user page if it's in use */
+ if (map->orig_phys != INVALID_PHYS_ADDR)
+ memcpy_to_page(pages[i], 0,
+ page_address(map->bounce_page),
+ PAGE_SIZE);
+ __free_page(map->bounce_page);
+ }
+ map->bounce_page = pages[i];
+ get_page(pages[i]);
+ }
+ domain->user_bounce_pages = true;
+ ret = 0;
+out:
+ write_unlock(&domain->bounce_lock);
+
+ return ret;
+}
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
+{
+ struct vduse_bounce_map *map;
+ unsigned long i, count;
+
+ write_lock(&domain->bounce_lock);
+ if (!domain->user_bounce_pages)
+ goto out;
+
+ count = domain->bounce_size >> PAGE_SHIFT;
+ for (i = 0; i < count; i++) {
+ struct page *page = NULL;
+
+ map = &domain->bounce_maps[i];
+ if (WARN_ON(!map->bounce_page))
+ continue;
+
+ /* Copy user page to kernel page if it's in use */
+ if (map->orig_phys != INVALID_PHYS_ADDR) {
+ page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
+ memcpy_from_page(page_address(page),
+ map->bounce_page, 0, PAGE_SIZE);
+ }
+ put_page(map->bounce_page);
+ map->bounce_page = page;
+ }
+ domain->user_bounce_pages = false;
+out:
+ write_unlock(&domain->bounce_lock);
+}
+
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
{
if (!domain->bounce_map)
@@ -322,13 +389,18 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
if (vduse_domain_init_bounce_map(domain))
goto err;
+ read_lock(&domain->bounce_lock);
if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
- goto err;
+ goto err_unlock;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
+ read_unlock(&domain->bounce_lock);
+
return iova;
+err_unlock:
+ read_unlock(&domain->bounce_lock);
err:
vduse_domain_free_iova(iovad, iova, size);
return DMA_MAPPING_ERROR;
@@ -340,10 +412,12 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
{
struct iova_domain *iovad = &domain->stream_iovad;
+ read_lock(&domain->bounce_lock);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
+ read_unlock(&domain->bounce_lock);
vduse_domain_free_iova(iovad, dma_addr, size);
}
@@ -451,7 +525,8 @@ static int vduse_domain_release(struct inode *inode, struct file *file)
spin_lock(&domain->iotlb_lock);
vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
- vduse_domain_free_bounce_pages(domain);
+ vduse_domain_remove_user_bounce_pages(domain);
+ vduse_domain_free_kernel_bounce_pages(domain);
spin_unlock(&domain->iotlb_lock);
put_iova_domain(&domain->stream_iovad);
put_iova_domain(&domain->consistent_iovad);
@@ -511,6 +586,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
goto err_file;
domain->file = file;
+ rwlock_init(&domain->bounce_lock);
spin_lock_init(&domain->iotlb_lock);
init_iova_domain(&domain->stream_iovad,
PAGE_SIZE, IOVA_START_PFN);
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 2722d9b8e21a..4e0e50e7ac15 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -14,6 +14,7 @@
#include <linux/iova.h>
#include <linux/dma-mapping.h>
#include <linux/vhost_iotlb.h>
+#include <linux/rwlock.h>
#define IOVA_START_PFN 1
@@ -34,6 +35,8 @@ struct vduse_iova_domain {
struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock;
struct file *file;
+ bool user_bounce_pages;
+ rwlock_t bounce_lock;
};
int vduse_domain_set_map(struct vduse_iova_domain *domain,
@@ -61,6 +64,11 @@ void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
+int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
+ struct page **pages, int count);
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
+
void vduse_domain_destroy(struct vduse_iova_domain *domain);
struct vduse_iova_domain *vduse_domain_create(unsigned long iova_limit,
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 3bc27de58f46..41c0b29739f1 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -21,6 +21,8 @@
#include <linux/uio.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
#include <uapi/linux/vduse.h>
#include <uapi/linux/vdpa.h>
#include <uapi/linux/virtio_config.h>
@@ -64,6 +66,13 @@ struct vduse_vdpa {
struct vduse_dev *dev;
};
+struct vduse_umem {
+ unsigned long iova;
+ unsigned long npages;
+ struct page **pages;
+ struct mm_struct *mm;
+};
+
struct vduse_dev {
struct vduse_vdpa *vdev;
struct device *dev;
@@ -95,6 +104,8 @@ struct vduse_dev {
u8 status;
u32 vq_num;
u32 vq_align;
+ struct vduse_umem *umem;
+ struct mutex mem_lock;
};
struct vduse_dev_msg {
@@ -917,6 +928,102 @@ unlock:
return ret;
}
+static int vduse_dev_dereg_umem(struct vduse_dev *dev,
+ u64 iova, u64 size)
+{
+ int ret;
+
+ mutex_lock(&dev->mem_lock);
+ ret = -ENOENT;
+ if (!dev->umem)
+ goto unlock;
+
+ ret = -EINVAL;
+ if (dev->umem->iova != iova || size != dev->domain->bounce_size)
+ goto unlock;
+
+ vduse_domain_remove_user_bounce_pages(dev->domain);
+ unpin_user_pages_dirty_lock(dev->umem->pages,
+ dev->umem->npages, true);
+ atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
+ mmdrop(dev->umem->mm);
+ vfree(dev->umem->pages);
+ kfree(dev->umem);
+ dev->umem = NULL;
+ ret = 0;
+unlock:
+ mutex_unlock(&dev->mem_lock);
+ return ret;
+}
+
+static int vduse_dev_reg_umem(struct vduse_dev *dev,
+ u64 iova, u64 uaddr, u64 size)
+{
+ struct page **page_list = NULL;
+ struct vduse_umem *umem = NULL;
+ long pinned = 0;
+ unsigned long npages, lock_limit;
+ int ret;
+
+ if (!dev->domain->bounce_map ||
+ size != dev->domain->bounce_size ||
+ iova != 0 || uaddr & ~PAGE_MASK)
+ return -EINVAL;
+
+ mutex_lock(&dev->mem_lock);
+ ret = -EEXIST;
+ if (dev->umem)
+ goto unlock;
+
+ ret = -ENOMEM;
+ npages = size >> PAGE_SHIFT;
+ page_list = __vmalloc(array_size(npages, sizeof(struct page *)),
+ GFP_KERNEL_ACCOUNT);
+ umem = kzalloc(sizeof(*umem), GFP_KERNEL);
+ if (!page_list || !umem)
+ goto unlock;
+
+ mmap_read_lock(current->mm);
+
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
+ if (npages + atomic64_read(&current->mm->pinned_vm) > lock_limit)
+ goto out;
+
+ pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE,
+ page_list, NULL);
+ if (pinned != npages) {
+ ret = pinned < 0 ? pinned : -ENOMEM;
+ goto out;
+ }
+
+ ret = vduse_domain_add_user_bounce_pages(dev->domain,
+ page_list, pinned);
+ if (ret)
+ goto out;
+
+ atomic64_add(npages, &current->mm->pinned_vm);
+
+ umem->pages = page_list;
+ umem->npages = pinned;
+ umem->iova = iova;
+ umem->mm = current->mm;
+ mmgrab(current->mm);
+
+ dev->umem = umem;
+out:
+ if (ret && pinned > 0)
+ unpin_user_pages(page_list, pinned);
+
+ mmap_read_unlock(current->mm);
+unlock:
+ if (ret) {
+ vfree(page_list);
+ kfree(umem);
+ }
+ mutex_unlock(&dev->mem_lock);
+ return ret;
+}
+
static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -1089,6 +1196,77 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index].inject);
break;
}
+ case VDUSE_IOTLB_REG_UMEM: {
+ struct vduse_iova_umem umem;
+
+ ret = -EFAULT;
+ if (copy_from_user(&umem, argp, sizeof(umem)))
+ break;
+
+ ret = -EINVAL;
+ if (!is_mem_zero((const char *)umem.reserved,
+ sizeof(umem.reserved)))
+ break;
+
+ ret = vduse_dev_reg_umem(dev, umem.iova,
+ umem.uaddr, umem.size);
+ break;
+ }
+ case VDUSE_IOTLB_DEREG_UMEM: {
+ struct vduse_iova_umem umem;
+
+ ret = -EFAULT;
+ if (copy_from_user(&umem, argp, sizeof(umem)))
+ break;
+
+ ret = -EINVAL;
+ if (!is_mem_zero((const char *)umem.reserved,
+ sizeof(umem.reserved)))
+ break;
+
+ ret = vduse_dev_dereg_umem(dev, umem.iova,
+ umem.size);
+ break;
+ }
+ case VDUSE_IOTLB_GET_INFO: {
+ struct vduse_iova_info info;
+ struct vhost_iotlb_map *map;
+ struct vduse_iova_domain *domain = dev->domain;
+
+ ret = -EFAULT;
+ if (copy_from_user(&info, argp, sizeof(info)))
+ break;
+
+ ret = -EINVAL;
+ if (info.start > info.last)
+ break;
+
+ if (!is_mem_zero((const char *)info.reserved,
+ sizeof(info.reserved)))
+ break;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb,
+ info.start, info.last);
+ if (map) {
+ info.start = map->start;
+ info.last = map->last;
+ info.capability = 0;
+ if (domain->bounce_map && map->start == 0 &&
+ map->last == domain->bounce_size - 1)
+ info.capability |= VDUSE_IOVA_CAP_UMEM;
+ }
+ spin_unlock(&domain->iotlb_lock);
+ if (!map)
+ break;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &info, sizeof(info)))
+ break;
+
+ ret = 0;
+ break;
+ }
default:
ret = -ENOIOCTLCMD;
break;
@@ -1101,6 +1279,7 @@ static int vduse_dev_release(struct inode *inode, struct file *file)
{
struct vduse_dev *dev = file->private_data;
+ vduse_dev_dereg_umem(dev, 0, dev->domain->bounce_size);
spin_lock(&dev->msg_lock);
/* Make sure the inflight messages can processed after reconncection */
list_splice_init(&dev->recv_list, &dev->send_list);
@@ -1163,6 +1342,7 @@ static struct vduse_dev *vduse_dev_create(void)
return NULL;
mutex_init(&dev->lock);
+ mutex_init(&dev->mem_lock);
spin_lock_init(&dev->msg_lock);
INIT_LIST_HEAD(&dev->send_list);
INIT_LIST_HEAD(&dev->recv_list);
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index fee73f3d9480..1a32357592e3 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
vfio_virqfd-y := virqfd.o
+vfio-y += vfio_main.o
+
obj-$(CONFIG_VFIO) += vfio.o
obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
index 4ad63ececb91..7a29f572f93d 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
@@ -39,7 +39,7 @@ struct vfio_fsl_mc_device {
struct vfio_fsl_mc_irq *mc_irqs;
};
-extern int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
+int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
u32 flags, unsigned int index,
unsigned int start, unsigned int count,
void *data);
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 4def43f5f7b6..ea762e28c1cc 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -1185,7 +1185,7 @@ static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
if (ret)
return ret;
- if (core_vdev->ops->migration_set_state) {
+ if (core_vdev->mig_ops) {
ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
if (ret) {
vfio_pci_core_disable(vdev);
@@ -1208,6 +1208,11 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
vfio_pci_core_close_device(core_vdev);
}
+static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
+ .migration_set_state = hisi_acc_vfio_pci_set_device_state,
+ .migration_get_state = hisi_acc_vfio_pci_get_device_state,
+};
+
static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.name = "hisi-acc-vfio-pci-migration",
.open_device = hisi_acc_vfio_pci_open_device,
@@ -1219,8 +1224,6 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
.mmap = hisi_acc_vfio_pci_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
- .migration_set_state = hisi_acc_vfio_pci_set_device_state,
- .migration_get_state = hisi_acc_vfio_pci_get_device_state,
};
static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
@@ -1272,6 +1275,8 @@ static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device
if (!ret) {
vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
&hisi_acc_vfio_pci_migrn_ops);
+ hisi_acc_vdev->core_device.vdev.mig_ops =
+ &hisi_acc_vfio_pci_migrn_state_ops;
} else {
pci_warn(pdev, "migration support failed, continue with generic interface\n");
vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 9b9f33ca270a..dd5d7bfe0a49 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -88,6 +88,16 @@ static int mlx5fv_vf_event(struct notifier_block *nb,
return 0;
}
+void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
+{
+ if (!mvdev->migrate_cap)
+ return;
+
+ mutex_lock(&mvdev->state_mutex);
+ mlx5vf_disable_fds(mvdev);
+ mlx5vf_state_mutex_unlock(mvdev);
+}
+
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
{
if (!mvdev->migrate_cap)
@@ -98,7 +108,8 @@ void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
destroy_workqueue(mvdev->cb_wq);
}
-void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
+ const struct vfio_migration_ops *mig_ops)
{
struct pci_dev *pdev = mvdev->core_device.pdev;
int ret;
@@ -139,6 +150,7 @@ void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
mvdev->core_device.vdev.migration_flags =
VFIO_MIGRATION_STOP_COPY |
VFIO_MIGRATION_P2P;
+ mvdev->core_device.vdev.mig_ops = mig_ops;
end:
mlx5_vf_put_core_dev(mvdev->mdev);
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 6c3112fdd8b1..8208f4701a90 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -62,8 +62,10 @@ int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
size_t *state_size);
-void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
+ const struct vfio_migration_ops *mig_ops);
void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
struct mlx5_vf_migration_file *migf);
int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 0558d0649ddb..a9b63d15c5d3 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -570,10 +570,15 @@ static void mlx5vf_pci_close_device(struct vfio_device *core_vdev)
struct mlx5vf_pci_core_device *mvdev = container_of(
core_vdev, struct mlx5vf_pci_core_device, core_device.vdev);
- mlx5vf_disable_fds(mvdev);
+ mlx5vf_cmd_close_migratable(mvdev);
vfio_pci_core_close_device(core_vdev);
}
+static const struct vfio_migration_ops mlx5vf_pci_mig_ops = {
+ .migration_set_state = mlx5vf_pci_set_device_state,
+ .migration_get_state = mlx5vf_pci_get_device_state,
+};
+
static const struct vfio_device_ops mlx5vf_pci_ops = {
.name = "mlx5-vfio-pci",
.open_device = mlx5vf_pci_open_device,
@@ -585,8 +590,6 @@ static const struct vfio_device_ops mlx5vf_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
- .migration_set_state = mlx5vf_pci_set_device_state,
- .migration_get_state = mlx5vf_pci_get_device_state,
};
static int mlx5vf_pci_probe(struct pci_dev *pdev,
@@ -599,7 +602,7 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
if (!mvdev)
return -ENOMEM;
vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
- mlx5vf_cmd_set_migratable(mvdev);
+ mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops);
dev_set_drvdata(&pdev->dev, &mvdev->core_device);
ret = vfio_pci_core_register_device(&mvdev->core_device);
if (ret)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 9343f597182d..442d3ba4122b 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -222,7 +222,7 @@ static int vfio_default_config_write(struct vfio_pci_core_device *vdev, int pos,
memcpy(vdev->vconfig + pos, &virt_val, count);
}
- /* Non-virtualzed and writable bits go to hardware */
+ /* Non-virtualized and writable bits go to hardware */
if (write & ~virt) {
struct pci_dev *pdev = vdev->pdev;
__le32 phys_val = 0;
@@ -1728,7 +1728,7 @@ int vfio_config_init(struct vfio_pci_core_device *vdev)
/*
* Config space, caps and ecaps are all dword aligned, so we could
* use one byte per dword to record the type. However, there are
- * no requiremenst on the length of a capability, so the gap between
+ * no requirements on the length of a capability, so the gap between
* capabilities needs byte granularity.
*/
map = kmalloc(pdev->cfg_size, GFP_KERNEL);
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 0d8d4cdfb2f0..c8d3b0450fb3 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1868,6 +1868,13 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
+ if (vdev->vdev.mig_ops) {
+ if (!(vdev->vdev.mig_ops->migration_get_state &&
+ vdev->vdev.mig_ops->migration_set_state) ||
+ !(vdev->vdev.migration_flags & VFIO_MIGRATION_STOP_COPY))
+ return -EINVAL;
+ }
+
/*
* Prevent binding to PFs with VFs enabled, the VFs might be in use
* by the host or other users. We cannot capture the VFs if they
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h
index 520d2a8e8375..691b43f4b2b2 100644
--- a/drivers/vfio/platform/vfio_platform_private.h
+++ b/drivers/vfio/platform/vfio_platform_private.h
@@ -78,21 +78,20 @@ struct vfio_platform_reset_node {
vfio_platform_reset_fn_t of_reset;
};
-extern int vfio_platform_probe_common(struct vfio_platform_device *vdev,
- struct device *dev);
+int vfio_platform_probe_common(struct vfio_platform_device *vdev,
+ struct device *dev);
void vfio_platform_remove_common(struct vfio_platform_device *vdev);
-extern int vfio_platform_irq_init(struct vfio_platform_device *vdev);
-extern void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
+int vfio_platform_irq_init(struct vfio_platform_device *vdev);
+void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev);
-extern int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
- uint32_t flags, unsigned index,
- unsigned start, unsigned count,
- void *data);
+int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
+ uint32_t flags, unsigned index,
+ unsigned start, unsigned count, void *data);
-extern void __vfio_platform_register_reset(struct vfio_platform_reset_node *n);
-extern void vfio_platform_unregister_reset(const char *compat,
- vfio_platform_reset_fn_t fn);
+void __vfio_platform_register_reset(struct vfio_platform_reset_node *n);
+void vfio_platform_unregister_reset(const char *compat,
+ vfio_platform_reset_fn_t fn);
#define vfio_platform_register_reset(__compat, __reset) \
static struct vfio_platform_reset_node __reset ## _node = { \
.owner = THIS_MODULE, \
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index a67130221151..503bea6c843d 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -50,16 +50,15 @@ struct vfio_iommu_driver_ops {
struct iommu_group *group);
int (*pin_pages)(void *iommu_data,
struct iommu_group *group,
- unsigned long *user_pfn,
+ dma_addr_t user_iova,
int npage, int prot,
- unsigned long *phys_pfn);
- int (*unpin_pages)(void *iommu_data,
- unsigned long *user_pfn, int npage);
- int (*register_notifier)(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb);
- int (*unregister_notifier)(void *iommu_data,
- struct notifier_block *nb);
+ struct page **pages);
+ void (*unpin_pages)(void *iommu_data,
+ dma_addr_t user_iova, int npage);
+ void (*register_device)(void *iommu_data,
+ struct vfio_device *vdev);
+ void (*unregister_device)(void *iommu_data,
+ struct vfio_device *vdev);
int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
void *data, size_t count, bool write);
struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 708a95e61831..169f07ac162d 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -378,8 +378,7 @@ static void tce_iommu_release(void *iommu_data)
kfree(container);
}
-static void tce_iommu_unuse_page(struct tce_container *container,
- unsigned long hpa)
+static void tce_iommu_unuse_page(unsigned long hpa)
{
struct page *page;
@@ -474,7 +473,7 @@ static int tce_iommu_clear(struct tce_container *container,
continue;
}
- tce_iommu_unuse_page(container, oldhpa);
+ tce_iommu_unuse_page(oldhpa);
}
iommu_tce_kill(tbl, firstentry, pages);
@@ -524,7 +523,7 @@ static long tce_iommu_build(struct tce_container *container,
ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
&hpa, &dirtmp);
if (ret) {
- tce_iommu_unuse_page(container, hpa);
+ tce_iommu_unuse_page(hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);
@@ -532,7 +531,7 @@ static long tce_iommu_build(struct tce_container *container,
}
if (dirtmp != DMA_NONE)
- tce_iommu_unuse_page(container, hpa);
+ tce_iommu_unuse_page(hpa);
tce += IOMMU_PAGE_SIZE(tbl);
}
@@ -1266,7 +1265,10 @@ static int tce_iommu_attach_group(void *iommu_data,
goto unlock_exit;
}
- /* Check if new group has the same iommu_ops (i.e. compatible) */
+ /*
+ * Check if new group has the same iommu_table_group_ops
+ * (i.e. compatible)
+ */
list_for_each_entry(tcegrp, &container->group_list, next) {
struct iommu_table_group *table_group_tmp;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c13b9290e357..db516c90a977 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -67,7 +67,8 @@ struct vfio_iommu {
struct list_head iova_list;
struct mutex lock;
struct rb_root dma_list;
- struct blocking_notifier_head notifier;
+ struct list_head device_list;
+ struct mutex device_list_lock;
unsigned int dma_avail;
unsigned int vaddr_invalid_count;
uint64_t pgsize_bitmap;
@@ -828,9 +829,9 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
static int vfio_iommu_type1_pin_pages(void *iommu_data,
struct iommu_group *iommu_group,
- unsigned long *user_pfn,
+ dma_addr_t user_iova,
int npage, int prot,
- unsigned long *phys_pfn)
+ struct page **pages)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
@@ -840,7 +841,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
bool do_accounting;
dma_addr_t iova;
- if (!iommu || !user_pfn || !phys_pfn)
+ if (!iommu || !pages)
return -EINVAL;
/* Supported for v2 version only */
@@ -856,7 +857,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
again:
if (iommu->vaddr_invalid_count) {
for (i = 0; i < npage; i++) {
- iova = user_pfn[i] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * i;
ret = vfio_find_dma_valid(iommu, iova, PAGE_SIZE, &dma);
if (ret < 0)
goto pin_done;
@@ -865,8 +866,8 @@ again:
}
}
- /* Fail if notifier list is empty */
- if (!iommu->notifier.head) {
+ /* Fail if no dma_umap notifier is registered */
+ if (list_empty(&iommu->device_list)) {
ret = -EINVAL;
goto pin_done;
}
@@ -879,9 +880,10 @@ again:
do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
+ unsigned long phys_pfn;
struct vfio_pfn *vpfn;
- iova = user_pfn[i] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * i;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma) {
ret = -EINVAL;
@@ -895,23 +897,25 @@ again:
vpfn = vfio_iova_get_vfio_pfn(dma, iova);
if (vpfn) {
- phys_pfn[i] = vpfn->pfn;
+ pages[i] = pfn_to_page(vpfn->pfn);
continue;
}
remote_vaddr = dma->vaddr + (iova - dma->iova);
- ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
+ ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn,
do_accounting);
if (ret)
goto pin_unwind;
- ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
+ ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
if (ret) {
- if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
+ if (put_pfn(phys_pfn, dma->prot) && do_accounting)
vfio_lock_acct(dma, -1, true);
goto pin_unwind;
}
+ pages[i] = pfn_to_page(phys_pfn);
+
if (iommu->dirty_page_tracking) {
unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
@@ -934,43 +938,38 @@ again:
goto pin_done;
pin_unwind:
- phys_pfn[i] = 0;
+ pages[i] = NULL;
for (j = 0; j < i; j++) {
dma_addr_t iova;
- iova = user_pfn[j] << PAGE_SHIFT;
+ iova = user_iova + PAGE_SIZE * j;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
vfio_unpin_page_external(dma, iova, do_accounting);
- phys_pfn[j] = 0;
+ pages[j] = NULL;
}
pin_done:
mutex_unlock(&iommu->lock);
return ret;
}
-static int vfio_iommu_type1_unpin_pages(void *iommu_data,
- unsigned long *user_pfn,
- int npage)
+static void vfio_iommu_type1_unpin_pages(void *iommu_data,
+ dma_addr_t user_iova, int npage)
{
struct vfio_iommu *iommu = iommu_data;
bool do_accounting;
int i;
- if (!iommu || !user_pfn || npage <= 0)
- return -EINVAL;
-
/* Supported for v2 version only */
- if (!iommu->v2)
- return -EACCES;
+ if (WARN_ON(!iommu->v2))
+ return;
mutex_lock(&iommu->lock);
do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
+ dma_addr_t iova = user_iova + PAGE_SIZE * i;
struct vfio_dma *dma;
- dma_addr_t iova;
- iova = user_pfn[i] << PAGE_SHIFT;
dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
if (!dma)
break;
@@ -979,7 +978,8 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
}
mutex_unlock(&iommu->lock);
- return i > 0 ? i : -EINVAL;
+
+ WARN_ON(i != npage);
}
static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
@@ -1287,6 +1287,35 @@ static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size)
return 0;
}
+/*
+ * Notify VFIO drivers using vfio_register_emulated_iommu_dev() to invalidate
+ * and unmap iovas within the range we're about to unmap. Drivers MUST unpin
+ * pages in response to an invalidation.
+ */
+static void vfio_notify_dma_unmap(struct vfio_iommu *iommu,
+ struct vfio_dma *dma)
+{
+ struct vfio_device *device;
+
+ if (list_empty(&iommu->device_list))
+ return;
+
+ /*
+ * The device is expected to call vfio_unpin_pages() for any IOVA it has
+ * pinned within the range. Since vfio_unpin_pages() will eventually
+ * call back down to this code and try to obtain the iommu->lock we must
+ * drop it.
+ */
+ mutex_lock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
+
+ list_for_each_entry(device, &iommu->device_list, iommu_entry)
+ device->ops->dma_unmap(device, dma->iova, dma->size);
+
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_lock(&iommu->lock);
+}
+
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_unmap *unmap,
struct vfio_bitmap *bitmap)
@@ -1377,12 +1406,6 @@ again:
if (!iommu->v2 && iova > dma->iova)
break;
- /*
- * Task with same address space who mapped this iova range is
- * allowed to unmap the iova range.
- */
- if (dma->task->mm != current->mm)
- break;
if (invalidate_vaddr) {
if (dma->vaddr_invalid) {
@@ -1406,8 +1429,6 @@ again:
}
if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
- struct vfio_iommu_type1_dma_unmap nb_unmap;
-
if (dma_last == dma) {
BUG_ON(++retries > 10);
} else {
@@ -1415,20 +1436,7 @@ again:
retries = 0;
}
- nb_unmap.iova = dma->iova;
- nb_unmap.size = dma->size;
-
- /*
- * Notify anyone (mdev vendor drivers) to invalidate and
- * unmap iovas within the range we're about to unmap.
- * Vendor drivers MUST unpin pages in response to an
- * invalidation.
- */
- mutex_unlock(&iommu->lock);
- blocking_notifier_call_chain(&iommu->notifier,
- VFIO_IOMMU_NOTIFY_DMA_UNMAP,
- &nb_unmap);
- mutex_lock(&iommu->lock);
+ vfio_notify_dma_unmap(iommu, dma);
goto again;
}
@@ -1679,18 +1687,6 @@ out_unlock:
return ret;
}
-static int vfio_bus_type(struct device *dev, void *data)
-{
- struct bus_type **bus = data;
-
- if (*bus && *bus != dev->bus)
- return -EINVAL;
-
- *bus = dev->bus;
-
- return 0;
-}
-
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
@@ -2153,13 +2149,26 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
list_splice_tail(iova_copy, iova);
}
+/* Redundantly walks non-present capabilities to simplify caller */
+static int vfio_iommu_device_capable(struct device *dev, void *data)
+{
+ return device_iommu_capable(dev, (enum iommu_cap)data);
+}
+
+static int vfio_iommu_domain_alloc(struct device *dev, void *data)
+{
+ struct iommu_domain **domain = data;
+
+ *domain = iommu_domain_alloc(dev->bus);
+ return 1; /* Don't iterate */
+}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group, enum vfio_group_type type)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
struct vfio_domain *domain, *d;
- struct bus_type *bus = NULL;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry *geo;
@@ -2192,18 +2201,19 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_unlock;
}
- /* Determine bus_type in order to allocate a domain */
- ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
- if (ret)
- goto out_free_group;
-
ret = -ENOMEM;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
goto out_free_group;
+ /*
+ * Going via the iommu_group iterator avoids races, and trivially gives
+ * us a representative device for the IOMMU API call. We don't actually
+ * want to iterate beyond the first device (if any).
+ */
ret = -EIO;
- domain->domain = iommu_domain_alloc(bus);
+ iommu_group_for_each_dev(iommu_group, &domain->domain,
+ vfio_iommu_domain_alloc);
if (!domain->domain)
goto out_free_domain;
@@ -2258,7 +2268,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
list_add(&group->next, &domain->group_list);
msi_remap = irq_domain_check_msi_remap() ||
- iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
+ iommu_group_for_each_dev(iommu_group, (void *)IOMMU_CAP_INTR_REMAP,
+ vfio_iommu_device_capable);
if (!allow_unsafe_interrupts && !msi_remap) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
@@ -2478,7 +2489,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (list_empty(&iommu->emulated_iommu_groups) &&
list_empty(&iommu->domain_list)) {
- WARN_ON(iommu->notifier.head);
+ WARN_ON(!list_empty(&iommu->device_list));
vfio_iommu_unmap_unpin_all(iommu);
}
goto detach_group_done;
@@ -2510,7 +2521,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (list_empty(&domain->group_list)) {
if (list_is_singular(&iommu->domain_list)) {
if (list_empty(&iommu->emulated_iommu_groups)) {
- WARN_ON(iommu->notifier.head);
+ WARN_ON(!list_empty(
+ &iommu->device_list));
vfio_iommu_unmap_unpin_all(iommu);
} else {
vfio_iommu_unmap_unpin_reaccount(iommu);
@@ -2571,7 +2583,8 @@ static void *vfio_iommu_type1_open(unsigned long arg)
iommu->dma_avail = dma_entry_limit;
iommu->container_open = true;
mutex_init(&iommu->lock);
- BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
+ mutex_init(&iommu->device_list_lock);
+ INIT_LIST_HEAD(&iommu->device_list);
init_waitqueue_head(&iommu->vaddr_wait);
iommu->pgsize_bitmap = PAGE_MASK;
INIT_LIST_HEAD(&iommu->emulated_iommu_groups);
@@ -3008,28 +3021,40 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
}
}
-static int vfio_iommu_type1_register_notifier(void *iommu_data,
- unsigned long *events,
- struct notifier_block *nb)
+static void vfio_iommu_type1_register_device(void *iommu_data,
+ struct vfio_device *vdev)
{
struct vfio_iommu *iommu = iommu_data;
- /* clear known events */
- *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP;
-
- /* refuse to register if still events remaining */
- if (*events)
- return -EINVAL;
+ if (!vdev->ops->dma_unmap)
+ return;
- return blocking_notifier_chain_register(&iommu->notifier, nb);
+ /*
+ * list_empty(&iommu->device_list) is tested under the iommu->lock while
+ * iteration for dma_unmap must be done under the device_list_lock.
+ * Holding both locks here allows avoiding the device_list_lock in
+ * several fast paths. See vfio_notify_dma_unmap()
+ */
+ mutex_lock(&iommu->lock);
+ mutex_lock(&iommu->device_list_lock);
+ list_add(&vdev->iommu_entry, &iommu->device_list);
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
}
-static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
- struct notifier_block *nb)
+static void vfio_iommu_type1_unregister_device(void *iommu_data,
+ struct vfio_device *vdev)
{
struct vfio_iommu *iommu = iommu_data;
- return blocking_notifier_chain_unregister(&iommu->notifier, nb);
+ if (!vdev->ops->dma_unmap)
+ return;
+
+ mutex_lock(&iommu->lock);
+ mutex_lock(&iommu->device_list_lock);
+ list_del(&vdev->iommu_entry);
+ mutex_unlock(&iommu->device_list_lock);
+ mutex_unlock(&iommu->lock);
}
static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
@@ -3163,8 +3188,8 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.detach_group = vfio_iommu_type1_detach_group,
.pin_pages = vfio_iommu_type1_pin_pages,
.unpin_pages = vfio_iommu_type1_unpin_pages,
- .register_notifier = vfio_iommu_type1_register_notifier,
- .unregister_notifier = vfio_iommu_type1_unregister_notifier,
+ .register_device = vfio_iommu_type1_register_device,
+ .unregister_device = vfio_iommu_type1_unregister_device,
.dma_rw = vfio_iommu_type1_dma_rw,
.group_iommu_domain = vfio_iommu_type1_group_iommu_domain,
.notify = vfio_iommu_type1_notify,
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio_main.c
index 521a3eabf0e1..7cb56c382c97 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio_main.c
@@ -231,6 +231,9 @@ int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
{
struct vfio_iommu_driver *driver, *tmp;
+ if (WARN_ON(!ops->register_device != !ops->unregister_device))
+ return -EINVAL;
+
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
return -ENOMEM;
@@ -504,7 +507,9 @@ static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev,
if (IS_ERR(iommu_group))
return ERR_CAST(iommu_group);
- iommu_group_set_name(iommu_group, "vfio-noiommu");
+ ret = iommu_group_set_name(iommu_group, "vfio-noiommu");
+ if (ret)
+ goto out_put_group;
ret = iommu_group_add_device(iommu_group, dev);
if (ret)
goto out_put_group;
@@ -554,7 +559,7 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
* restore cache coherency. It has to be checked here because it is only
* valid for cases where we are using iommu groups.
*/
- if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
+ if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
iommu_group_put(iommu_group);
return ERR_PTR(-EINVAL);
}
@@ -1082,6 +1087,7 @@ static void vfio_device_unassign_container(struct vfio_device *device)
static struct file *vfio_device_open(struct vfio_device *device)
{
+ struct vfio_iommu_driver *iommu_driver;
struct file *filep;
int ret;
@@ -1112,6 +1118,12 @@ static struct file *vfio_device_open(struct vfio_device *device)
if (ret)
goto err_undo_count;
}
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->register_device)
+ iommu_driver->ops->register_device(
+ device->group->container->iommu_data, device);
+
up_read(&device->group->group_rwsem);
}
mutex_unlock(&device->dev_set->lock);
@@ -1146,13 +1158,19 @@ static struct file *vfio_device_open(struct vfio_device *device)
err_close_device:
mutex_lock(&device->dev_set->lock);
down_read(&device->group->group_rwsem);
- if (device->open_count == 1 && device->ops->close_device)
+ if (device->open_count == 1 && device->ops->close_device) {
device->ops->close_device(device);
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->unregister_device)
+ iommu_driver->ops->unregister_device(
+ device->group->container->iommu_data, device);
+ }
err_undo_count:
+ up_read(&device->group->group_rwsem);
device->open_count--;
if (device->open_count == 0 && device->kvm)
device->kvm = NULL;
- up_read(&device->group->group_rwsem);
mutex_unlock(&device->dev_set->lock);
module_put(device->dev->driver->owner);
err_unassign_container:
@@ -1342,12 +1360,18 @@ static const struct file_operations vfio_group_fops = {
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
struct vfio_device *device = filep->private_data;
+ struct vfio_iommu_driver *iommu_driver;
mutex_lock(&device->dev_set->lock);
vfio_assert_device_open(device);
down_read(&device->group->group_rwsem);
if (device->open_count == 1 && device->ops->close_device)
device->ops->close_device(device);
+
+ iommu_driver = device->group->container->iommu_driver;
+ if (iommu_driver && iommu_driver->ops->unregister_device)
+ iommu_driver->ops->unregister_device(
+ device->group->container->iommu_data, device);
up_read(&device->group->group_rwsem);
device->open_count--;
if (device->open_count == 0)
@@ -1544,8 +1568,7 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
struct file *filp = NULL;
int ret;
- if (!device->ops->migration_set_state ||
- !device->ops->migration_get_state)
+ if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz,
@@ -1561,7 +1584,8 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
if (flags & VFIO_DEVICE_FEATURE_GET) {
enum vfio_device_mig_state curr_state;
- ret = device->ops->migration_get_state(device, &curr_state);
+ ret = device->mig_ops->migration_get_state(device,
+ &curr_state);
if (ret)
return ret;
mig.device_state = curr_state;
@@ -1569,7 +1593,7 @@ vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
}
/* Handle the VFIO_DEVICE_FEATURE_SET */
- filp = device->ops->migration_set_state(device, mig.device_state);
+ filp = device->mig_ops->migration_set_state(device, mig.device_state);
if (IS_ERR(filp) || !filp)
goto out_copy;
@@ -1592,8 +1616,7 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
};
int ret;
- if (!device->ops->migration_set_state ||
- !device->ops->migration_get_state)
+ if (!device->mig_ops)
return -ENOTTY;
ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
@@ -1815,6 +1838,7 @@ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
if (!buf) {
kfree(caps->buf);
+ caps->buf = NULL;
caps->size = 0;
return ERR_PTR(-ENOMEM);
}
@@ -1913,26 +1937,25 @@ int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
/*
- * Pin a set of guest PFNs and return their associated host PFNs for local
+ * Pin contiguous user pages and return their associated host pages for local
* domain only.
* @device [in] : device
- * @user_pfn [in]: array of user/guest PFNs to be pinned.
- * @npage [in] : count of elements in user_pfn array. This count should not
- * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @iova [in] : starting IOVA of user pages to be pinned.
+ * @npage [in] : count of pages to be pinned. This count should not
+ * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
* @prot [in] : protection flags
- * @phys_pfn[out]: array of host PFNs
+ * @pages[out] : array of host pages
* Return error or number of pages pinned.
*/
-int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage, int prot, unsigned long *phys_pfn)
+int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
+ int npage, int prot, struct page **pages)
{
struct vfio_container *container;
struct vfio_group *group = device->group;
struct vfio_iommu_driver *driver;
int ret;
- if (!user_pfn || !phys_pfn || !npage ||
- !vfio_assert_device_open(device))
+ if (!pages || !npage || !vfio_assert_device_open(device))
return -EINVAL;
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
@@ -1946,8 +1969,8 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
ret = driver->ops->pin_pages(container->iommu_data,
- group->iommu_group, user_pfn,
- npage, prot, phys_pfn);
+ group->iommu_group, iova,
+ npage, prot, pages);
else
ret = -ENOTTY;
@@ -1956,37 +1979,28 @@ int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
EXPORT_SYMBOL(vfio_pin_pages);
/*
- * Unpin set of host PFNs for local domain only.
+ * Unpin contiguous host pages for local domain only.
* @device [in] : device
- * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
- * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- * @npage [in] : count of elements in user_pfn array. This count should not
+ * @iova [in] : starting address of user pages to be unpinned.
+ * @npage [in] : count of pages to be unpinned. This count should not
* be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
- * Return error or number of pages unpinned.
*/
-int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage)
+void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
{
struct vfio_container *container;
struct vfio_iommu_driver *driver;
- int ret;
- if (!user_pfn || !npage || !vfio_assert_device_open(device))
- return -EINVAL;
+ if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES))
+ return;
- if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
- return -E2BIG;
+ if (WARN_ON(!vfio_assert_device_open(device)))
+ return;
/* group->container cannot change while a vfio device is open */
container = device->group->container;
driver = container->iommu_driver;
- if (likely(driver && driver->ops->unpin_pages))
- ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
- npage);
- else
- ret = -ENOTTY;
- return ret;
+ driver->ops->unpin_pages(container->iommu_data, iova, npage);
}
EXPORT_SYMBOL(vfio_unpin_pages);
@@ -2001,13 +2015,13 @@ EXPORT_SYMBOL(vfio_unpin_pages);
* not a real device DMA, it is not necessary to pin the user space memory.
*
* @device [in] : VFIO device
- * @user_iova [in] : base IOVA of a user space buffer
+ * @iova [in] : base IOVA of a user space buffer
* @data [in] : pointer to kernel buffer
* @len [in] : kernel buffer length
* @write : indicate read or write
* Return error code on failure or 0 on success.
*/
-int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
size_t len, bool write)
{
struct vfio_container *container;
@@ -2023,97 +2037,13 @@ int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data,
if (likely(driver && driver->ops->dma_rw))
ret = driver->ops->dma_rw(container->iommu_data,
- user_iova, data, len, write);
+ iova, data, len, write);
else
ret = -ENOTTY;
return ret;
}
EXPORT_SYMBOL(vfio_dma_rw);
-static int vfio_register_iommu_notifier(struct vfio_group *group,
- unsigned long *events,
- struct notifier_block *nb)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret;
-
- lockdep_assert_held_read(&group->group_rwsem);
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->register_notifier))
- ret = driver->ops->register_notifier(container->iommu_data,
- events, nb);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-
-static int vfio_unregister_iommu_notifier(struct vfio_group *group,
- struct notifier_block *nb)
-{
- struct vfio_container *container;
- struct vfio_iommu_driver *driver;
- int ret;
-
- lockdep_assert_held_read(&group->group_rwsem);
-
- container = group->container;
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->unregister_notifier))
- ret = driver->ops->unregister_notifier(container->iommu_data,
- nb);
- else
- ret = -ENOTTY;
-
- return ret;
-}
-
-int vfio_register_notifier(struct vfio_device *device,
- enum vfio_notify_type type, unsigned long *events,
- struct notifier_block *nb)
-{
- struct vfio_group *group = device->group;
- int ret;
-
- if (!nb || !events || (*events == 0) ||
- !vfio_assert_device_open(device))
- return -EINVAL;
-
- switch (type) {
- case VFIO_IOMMU_NOTIFY:
- ret = vfio_register_iommu_notifier(group, events, nb);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-EXPORT_SYMBOL(vfio_register_notifier);
-
-int vfio_unregister_notifier(struct vfio_device *device,
- enum vfio_notify_type type,
- struct notifier_block *nb)
-{
- struct vfio_group *group = device->group;
- int ret;
-
- if (!nb || !vfio_assert_device_open(device))
- return -EINVAL;
-
- switch (type) {
- case VFIO_IOMMU_NOTIFY:
- ret = vfio_unregister_iommu_notifier(group, nb);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-EXPORT_SYMBOL(vfio_unregister_notifier);
-
/*
* Module/class support
*/
@@ -2159,13 +2089,17 @@ static int __init vfio_init(void)
if (ret)
goto err_alloc_chrdev;
- pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
#ifdef CONFIG_VFIO_NOIOMMU
- vfio_register_iommu_driver(&vfio_noiommu_ops);
+ ret = vfio_register_iommu_driver(&vfio_noiommu_ops);
#endif
+ if (ret)
+ goto err_driver_register;
+
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return 0;
+err_driver_register:
+ unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ffd9e6c2ffc1..7ebf106d50c1 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -159,9 +159,13 @@ enum {
};
#define VHOST_SCSI_MAX_TARGET 256
-#define VHOST_SCSI_MAX_VQ 128
+#define VHOST_SCSI_MAX_IO_VQ 1024
#define VHOST_SCSI_MAX_EVENT 128
+static unsigned vhost_scsi_max_io_vqs = 128;
+module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
+MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
+
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq;
/*
@@ -186,7 +190,9 @@ struct vhost_scsi {
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
struct vhost_dev dev;
- struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
+ struct vhost_scsi_virtqueue *vqs;
+ unsigned long *compl_bitmap;
+ struct vhost_scsi_inflight **old_inflight;
struct vhost_work vs_completion_work; /* cmd completion work item */
struct llist_head vs_completion_list; /* cmd completion queue */
@@ -245,7 +251,7 @@ static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
struct vhost_virtqueue *vq;
int idx, i;
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
@@ -533,7 +539,6 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_completion_work);
- DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
struct virtio_scsi_cmd_resp v_rsp;
struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
@@ -541,7 +546,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
struct iov_iter iov_iter;
int ret, vq;
- bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
+ bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
llnode = llist_del_all(&vs->vs_completion_list);
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
@@ -566,7 +571,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
vq = q - vs->vqs;
- __set_bit(vq, signal);
+ __set_bit(vq, vs->compl_bitmap);
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
@@ -574,8 +579,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
}
vq = -1;
- while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
- < VHOST_SCSI_MAX_VQ)
+ while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
+ < vs->dev.nvqs)
vhost_signal(&vs->dev, &vs->vqs[vq].vq);
}
@@ -643,14 +648,12 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
size_t offset;
unsigned int npages = 0;
- bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
+ bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
VHOST_SCSI_PREALLOC_UPAGES, &offset);
/* No pages were pinned */
if (bytes <= 0)
return bytes < 0 ? bytes : -EFAULT;
- iov_iter_advance(iter, bytes);
-
while (bytes) {
unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
sg_set_page(sg++, pages[npages++], n, offset);
@@ -1421,26 +1424,25 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
/* Callers must hold dev mutex */
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
- struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
int i;
/* Init new inflight and remember the old inflight */
- vhost_scsi_init_inflight(vs, old_inflight);
+ vhost_scsi_init_inflight(vs, vs->old_inflight);
/*
* The inflight->kref was initialized to 1. We decrement it here to
* indicate the start of the flush operation so that it will reach 0
* when all the reqs are finished.
*/
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
+ for (i = 0; i < vs->dev.nvqs; i++)
+ kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */
vhost_dev_flush(&vs->dev);
/* Wait for all reqs issued before the flush to be finished */
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- wait_for_completion(&old_inflight[i]->comp);
+ for (i = 0; i < vs->dev.nvqs; i++)
+ wait_for_completion(&vs->old_inflight[i]->comp);
}
static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
@@ -1603,7 +1605,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
- for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
if (!vhost_vq_is_setup(vq))
continue;
@@ -1613,7 +1615,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
goto destroy_vq_cmds;
}
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, vs_tpg);
@@ -1715,7 +1717,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
target_undepend_item(&se_tpg->tpg_group.cg_item);
}
if (match) {
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
@@ -1724,7 +1726,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
/* Make sure cmds are not running before tearing them down. */
vhost_scsi_flush(vs);
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
vhost_scsi_destroy_vq_cmds(vq);
}
@@ -1764,7 +1766,7 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
return -EFAULT;
}
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->acked_features = features;
@@ -1778,16 +1780,40 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *vs;
struct vhost_virtqueue **vqs;
- int r = -ENOMEM, i;
+ int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
goto err_vs;
- vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
- if (!vqs)
+ if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
+ pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
+ VHOST_SCSI_MAX_IO_VQ);
+ nvqs = VHOST_SCSI_MAX_IO_VQ;
+ } else if (nvqs == 0) {
+ pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
+ nvqs = 1;
+ }
+ nvqs += VHOST_SCSI_VQ_IO;
+
+ vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
+ if (!vs->compl_bitmap)
+ goto err_compl_bitmap;
+
+ vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->old_inflight)
+ goto err_inflight;
+
+ vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vs->vqs)
goto err_vqs;
+ vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_local_vqs;
+
vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
@@ -1798,11 +1824,11 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
- for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
+ vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
VHOST_SCSI_WEIGHT, 0, true, NULL);
vhost_scsi_init_inflight(vs, NULL);
@@ -1810,7 +1836,13 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
f->private_data = vs;
return 0;
+err_local_vqs:
+ kfree(vs->vqs);
err_vqs:
+ kfree(vs->old_inflight);
+err_inflight:
+ bitmap_free(vs->compl_bitmap);
+err_compl_bitmap:
kvfree(vs);
err_vs:
return r;
@@ -1828,6 +1860,9 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
vhost_dev_stop(&vs->dev);
vhost_dev_cleanup(&vs->dev);
kfree(vs->dev.vqs);
+ kfree(vs->vqs);
+ kfree(vs->old_inflight);
+ bitmap_free(vs->compl_bitmap);
kvfree(vs);
return 0;
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 23dcbfdfa13b..166044642fd5 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -347,6 +347,14 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
return 0;
}
+static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->suspend;
+}
+
static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -470,6 +478,22 @@ static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
return 0;
}
+/* After a successful return of ioctl the device must not process more
+ * virtqueue descriptors. The device can answer to read or writes of config
+ * fields as if it were not suspended. In particular, writing to "queue_enable"
+ * with a value of 1 will not make the device start processing buffers.
+ */
+static long vhost_vdpa_suspend(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!ops->suspend)
+ return -EOPNOTSUPP;
+
+ return ops->suspend(vdpa);
+}
+
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
@@ -577,7 +601,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
if (cmd == VHOST_SET_BACKEND_FEATURES) {
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
- if (features & ~VHOST_VDPA_BACKEND_FEATURES)
+ if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
+ BIT_ULL(VHOST_BACKEND_F_SUSPEND)))
+ return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
+ !vhost_vdpa_can_suspend(v))
return -EOPNOTSUPP;
vhost_set_backend_features(&v->vdev, features);
return 0;
@@ -628,6 +656,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
break;
case VHOST_GET_BACKEND_FEATURES:
features = VHOST_VDPA_BACKEND_FEATURES;
+ if (vhost_vdpa_can_suspend(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
break;
@@ -640,6 +670,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
case VHOST_VDPA_GET_VQS_COUNT:
r = vhost_vdpa_get_vqs_count(v, argp);
break;
+ case VHOST_VDPA_SUSPEND:
+ r = vhost_vdpa_suspend(v);
+ break;
default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
if (r == -ENOIOCTLCMD)
@@ -1076,7 +1109,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
if (!bus)
return -EFAULT;
- if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
+ if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY))
return -ENOTSUPP;
v->domain = iommu_domain_alloc(bus);
@@ -1363,6 +1396,7 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
err:
put_device(&v->dev);
+ ida_simple_remove(&vhost_vdpa_ida, v->minor);
return r;
}
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index eab55accf381..11f59dd06a74 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -1095,7 +1095,8 @@ EXPORT_SYMBOL(vringh_need_notify_kern);
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
static int iotlb_translate(const struct vringh *vrh,
- u64 addr, u64 len, struct bio_vec iov[],
+ u64 addr, u64 len, u64 *translated,
+ struct bio_vec iov[],
int iov_size, u32 perm)
{
struct vhost_iotlb_map *map;
@@ -1136,43 +1137,76 @@ static int iotlb_translate(const struct vringh *vrh,
spin_unlock(vrh->iotlb_lock);
+ if (translated)
+ *translated = min(len, s);
+
return ret;
}
static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
- struct iov_iter iter;
- struct bio_vec iov[16];
- int ret;
+ u64 total_translated = 0;
- ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
- len, iov, 16, VHOST_MAP_RO);
- if (ret < 0)
- return ret;
+ while (total_translated < len) {
+ struct bio_vec iov[16];
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
- iov_iter_bvec(&iter, READ, iov, ret, len);
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
+ len - total_translated, &translated,
+ iov, ARRAY_SIZE(iov), VHOST_MAP_RO);
+ if (ret == -ENOBUFS)
+ ret = ARRAY_SIZE(iov);
+ else if (ret < 0)
+ return ret;
- ret = copy_from_iter(dst, len, &iter);
+ iov_iter_bvec(&iter, READ, iov, ret, translated);
- return ret;
+ ret = copy_from_iter(dst, translated, &iter);
+ if (ret < 0)
+ return ret;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
+
+ return total_translated;
}
static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
void *src, size_t len)
{
- struct iov_iter iter;
- struct bio_vec iov[16];
- int ret;
+ u64 total_translated = 0;
- ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
- len, iov, 16, VHOST_MAP_WO);
- if (ret < 0)
- return ret;
+ while (total_translated < len) {
+ struct bio_vec iov[16];
+ struct iov_iter iter;
+ u64 translated;
+ int ret;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
+ len - total_translated, &translated,
+ iov, ARRAY_SIZE(iov), VHOST_MAP_WO);
+ if (ret == -ENOBUFS)
+ ret = ARRAY_SIZE(iov);
+ else if (ret < 0)
+ return ret;
- iov_iter_bvec(&iter, WRITE, iov, ret, len);
+ iov_iter_bvec(&iter, WRITE, iov, ret, translated);
+
+ ret = copy_to_iter(src, translated, &iter);
+ if (ret < 0)
+ return ret;
+
+ src += translated;
+ dst += translated;
+ total_translated += translated;
+ }
- return copy_to_iter(src, len, &iter);
+ return total_translated;
}
static inline int getu16_iotlb(const struct vringh *vrh,
@@ -1183,7 +1217,7 @@ static inline int getu16_iotlb(const struct vringh *vrh,
int ret;
/* Atomic read is needed for getu16 */
- ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
&iov, 1, VHOST_MAP_RO);
if (ret < 0)
return ret;
@@ -1204,7 +1238,7 @@ static inline int putu16_iotlb(const struct vringh *vrh,
int ret;
/* Atomic write is needed for putu16 */
- ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
&iov, 1, VHOST_MAP_WO);
if (ret < 0)
return ret;
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 2b9e2bbbb03e..fc02c5c16055 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -218,9 +218,8 @@ err:
static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
{
- unsigned int period = lp->pdata->period_ns;
- unsigned int duty = br * period / max_br;
struct pwm_device *pwm;
+ struct pwm_state state;
/* request pwm device with the consumer name */
if (!lp->pwm) {
@@ -230,18 +229,16 @@ static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
lp->pwm = pwm;
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pwm);
+ pwm_init_state(lp->pwm, &state);
+ } else {
+ pwm_get_state(lp->pwm, &state);
}
- pwm_config(lp->pwm, duty, period);
- if (duty)
- pwm_enable(lp->pwm);
- else
- pwm_disable(lp->pwm);
+ state.period = lp->pdata->period_ns;
+ state.duty_cycle = div_u64(br * state.period, max_br);
+ state.enabled = state.duty_cycle;
+
+ pwm_apply_state(lp->pwm, &state);
}
static int lp855x_bl_update_status(struct backlight_device *bl)
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index b2bfbf070200..dc37494baf42 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -12,7 +12,6 @@
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/lcd.h>
-#include <linux/of.h>
#include <linux/slab.h>
#include <video/platform_lcd.h>
@@ -133,19 +132,10 @@ static int platform_lcd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(platform_lcd_pm_ops, platform_lcd_suspend,
platform_lcd_resume);
-#ifdef CONFIG_OF
-static const struct of_device_id platform_lcd_of_match[] = {
- { .compatible = "platform-lcd" },
- {},
-};
-MODULE_DEVICE_TABLE(of, platform_lcd_of_match);
-#endif
-
static struct platform_driver platform_lcd_driver = {
.driver = {
.name = "platform-lcd",
.pm = &platform_lcd_pm_ops,
- .of_match_table = of_match_ptr(platform_lcd_of_match),
},
.probe = platform_lcd_probe,
};
diff --git a/drivers/video/backlight/rt4831-backlight.c b/drivers/video/backlight/rt4831-backlight.c
index 42155c7d2db1..eb8c59e8713f 100644
--- a/drivers/video/backlight/rt4831-backlight.c
+++ b/drivers/video/backlight/rt4831-backlight.c
@@ -12,6 +12,7 @@
#define RT4831_REG_BLCFG 0x02
#define RT4831_REG_BLDIML 0x04
#define RT4831_REG_ENABLE 0x08
+#define RT4831_REG_BLOPT2 0x11
#define RT4831_BLMAX_BRIGHTNESS 2048
@@ -23,6 +24,11 @@
#define RT4831_BLDIML_MASK GENMASK(2, 0)
#define RT4831_BLDIMH_MASK GENMASK(10, 3)
#define RT4831_BLDIMH_SHIFT 3
+#define RT4831_BLOCP_MASK GENMASK(1, 0)
+
+#define RT4831_BLOCP_MINUA 900000
+#define RT4831_BLOCP_MAXUA 1800000
+#define RT4831_BLOCP_STEPUA 300000
struct rt4831_priv {
struct device *dev;
@@ -85,7 +91,7 @@ static int rt4831_parse_backlight_properties(struct rt4831_priv *priv,
{
struct device *dev = priv->dev;
u8 propval;
- u32 brightness;
+ u32 brightness, ocp_uA;
unsigned int val = 0;
int ret;
@@ -120,6 +126,31 @@ static int rt4831_parse_backlight_properties(struct rt4831_priv *priv,
if (ret)
return ret;
+ /*
+ * This OCP level is used to protect and limit the inductor current.
+ * If inductor peak current reach the level, low-side MOSFET will be
+ * turned off. Meanwhile, the output channel current may be limited.
+ * To match the configured channel current, the inductor chosen must
+ * be higher than the OCP level.
+ *
+ * Not like the OVP level, the default 21V can be used in the most
+ * application. But if the chosen OCP level is smaller than needed,
+ * it will also affect the backlight channel output current to be
+ * smaller than the register setting.
+ */
+ ret = device_property_read_u32(dev, "richtek,bled-ocp-microamp",
+ &ocp_uA);
+ if (!ret) {
+ ocp_uA = clamp_val(ocp_uA, RT4831_BLOCP_MINUA,
+ RT4831_BLOCP_MAXUA);
+ val = DIV_ROUND_UP(ocp_uA - RT4831_BLOCP_MINUA,
+ RT4831_BLOCP_STEPUA);
+ ret = regmap_update_bits(priv->regmap, RT4831_REG_BLOPT2,
+ RT4831_BLOCP_MASK, val);
+ if (ret)
+ return ret;
+ }
+
ret = device_property_read_u8(dev, "richtek,channel-use", &propval);
if (ret) {
dev_err(dev, "richtek,channel-use DT property missing\n");
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index bd4dc97d4d34..db568f67e4dc 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -290,7 +290,7 @@ static char default_sti_path[21] __read_mostly;
static int __init sti_setup(char *str)
{
if (str)
- strlcpy (default_sti_path, str, sizeof (default_sti_path));
+ strscpy(default_sti_path, str, sizeof(default_sti_path));
return 1;
}
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 576612f18d59..fcdf017e2665 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -75,7 +75,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
static void vgacon_save_screen(struct vc_data *c);
static void vgacon_invert_region(struct vc_data *c, u16 * p, int count);
-static struct uni_pagedir *vgacon_uni_pagedir;
+static struct uni_pagedict *vgacon_uni_pagedir;
static int vgacon_refcount;
/* Description of the hardware situation */
@@ -342,7 +342,7 @@ static const char *vgacon_startup(void)
static void vgacon_init(struct vc_data *c, int init)
{
- struct uni_pagedir *p;
+ struct uni_pagedict *p;
/*
* We cannot be loaded as a module, therefore init will be 1
@@ -367,10 +367,10 @@ static void vgacon_init(struct vc_data *c, int init)
c->vc_complement_mask = 0x7700;
if (vga_512_chars)
c->vc_hi_font_mask = 0x0800;
- p = *c->vc_uni_pagedir_loc;
- if (c->vc_uni_pagedir_loc != &vgacon_uni_pagedir) {
+ p = *c->uni_pagedict_loc;
+ if (c->uni_pagedict_loc != &vgacon_uni_pagedir) {
con_free_unimap(c);
- c->vc_uni_pagedir_loc = &vgacon_uni_pagedir;
+ c->uni_pagedict_loc = &vgacon_uni_pagedir;
vgacon_refcount++;
}
if (!vgacon_uni_pagedir && p)
@@ -392,7 +392,7 @@ static void vgacon_deinit(struct vc_data *c)
if (!--vgacon_refcount)
con_free_unimap(c);
- c->vc_uni_pagedir_loc = &c->vc_uni_pagedir;
+ c->uni_pagedict_loc = &c->uni_pagedict;
con_set_default_unimap(c);
}
diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c
index 9811f1bad8d4..7db03ed77c76 100644
--- a/drivers/video/fbdev/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
@@ -84,9 +84,6 @@ static const struct fb_fix_screeninfo mc68x328fb_fix __initconst = {
/*
* Interface used by the world
*/
-int mc68x328fb_init(void);
-int mc68x328fb_setup(char *);
-
static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
static int mc68x328fb_set_par(struct fb_info *info);
@@ -403,7 +400,7 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
#endif
}
-int __init mc68x328fb_setup(char *options)
+static int __init mc68x328fb_setup(char *options)
{
if (!options || !*options)
return 1;
@@ -414,7 +411,7 @@ int __init mc68x328fb_setup(char *options)
* Initialisation
*/
-int __init mc68x328fb_init(void)
+static int __init mc68x328fb_init(void)
{
#ifndef MODULE
char *option = NULL;
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 8080116aea84..f65c96d1394d 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -698,16 +698,18 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
return -ENODEV;
panel = of_graph_get_remote_port_parent(endpoint);
- if (!panel)
- return -ENODEV;
+ if (!panel) {
+ err = -ENODEV;
+ goto out_endpoint_put;
+ }
err = clcdfb_of_get_backlight(&fb->dev->dev, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = clcdfb_of_get_mode(&fb->dev->dev, panel, fb->panel);
if (err)
- return err;
+ goto out_panel_put;
err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth",
&max_bandwidth);
@@ -736,11 +738,21 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
if (of_property_read_u32_array(endpoint,
"arm,pl11x,tft-r0g0b0-pads",
- tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0)
- return -ENOENT;
+ tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0) {
+ err = -ENOENT;
+ goto out_panel_put;
+ }
+
+ of_node_put(panel);
+ of_node_put(endpoint);
return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
tft_r0b0g0[1], tft_r0b0g0[2]);
+out_panel_put:
+ of_node_put(panel);
+out_endpoint_put:
+ of_node_put(endpoint);
+ return err;
}
static int clcdfb_of_vram_setup(struct clcd_fb *fb)
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index 6e07a97bbd31..d88265dbebf4 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -2540,27 +2540,16 @@ static int amifb_blank(int blank, struct fb_info *info)
static int amifb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0 ||
- var->yoffset >= info->var.yres_virtual || var->xoffset)
- return -EINVAL;
- } else {
+ if (!(var->vmode & FB_VMODE_YWRAP)) {
/*
* TODO: There will be problems when xpan!=1, so some columns
* on the right side will never be seen
*/
if (var->xoffset + info->var.xres >
- upx(16 << maxfmode, info->var.xres_virtual) ||
- var->yoffset + info->var.yres > info->var.yres_virtual)
+ upx(16 << maxfmode, info->var.xres_virtual))
return -EINVAL;
}
ami_pan_var(var, info);
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index eb3e47c58c5f..a2a381631628 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -781,7 +781,12 @@ static int arkfb_set_par(struct fb_info *info)
return -EINVAL;
}
- ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
+ value = (hdiv * info->var.pixclock) / hmul;
+ if (!value) {
+ fb_dbg(info, "invalid pixclock\n");
+ value = 1;
+ }
+ ark_set_pixclock(info, value);
svga_set_timings(par->state.vgabase, &ark_timing_regs, &(info->var), hmul, hdiv,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
(info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
@@ -792,6 +797,8 @@ static int arkfb_set_par(struct fb_info *info)
value = ((value * hmul / hdiv) / 8) - 5;
vga_wcrt(par->state.vgabase, 0x42, (value + 1) / 2);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index 52a35b661643..2bc4089865e6 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -236,8 +236,6 @@ static int *MV300_reg = MV300_reg_8bit;
#endif /* ATAFB_EXT */
-static int inverse;
-
/*
* struct fb_ops {
* * open/release and usage marking
@@ -467,27 +465,27 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 320x200, 15 kHz, 60 Hz (ST low) */
"st-low", 60, 320, 200, 32000, 32, 16, 31, 14, 96, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x200, 15 kHz, 60 Hz (ST medium) */
"st-mid", 60, 640, 200, 32000, 32, 16, 31, 14, 96, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x400, 30.25 kHz, 63.5 Hz (ST high) */
"st-high", 63, 640, 400, 32000, 128, 0, 40, 14, 128, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 320x480, 15 kHz, 60 Hz (TT low) */
"tt-low", 60, 320, 480, 31041, 120, 100, 8, 16, 140, 30,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x480, 29 kHz, 57 Hz (TT medium) */
"tt-mid", 60, 640, 480, 31041, 120, 100, 8, 16, 140, 30,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 1280x960, 72 kHz, 72 Hz (TT high) */
- "tt-high", 57, 1280, 960, 7760, 260, 60, 36, 4, 192, 4,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "tt-high", 72, 1280, 960, 7760, 260, 60, 36, 4, 192, 4,
+ 0, FB_VMODE_NONINTERLACED
},
/*
@@ -496,12 +494,12 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 640x480, 31 kHz, 60 Hz (VGA) */
- "vga", 63.5, 640, 480, 32000, 18, 42, 31, 11, 96, 3,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "vga", 60, 640, 480, 39721, 42, 18, 31, 11, 100, 3,
+ 0, FB_VMODE_NONINTERLACED
}, {
/* 640x400, 31 kHz, 70 Hz (VGA) */
- "vga70", 70, 640, 400, 32000, 18, 42, 31, 11, 96, 3,
- FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ "vga70", 70, 640, 400, 39721, 42, 18, 31, 11, 100, 3,
+ FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED
},
/*
@@ -511,7 +509,7 @@ static struct fb_videomode atafb_modedb[] __initdata = {
{
/* 896x608, 31 kHz, 60 Hz (Falcon High) */
"falh", 60, 896, 608, 32000, 18, 42, 31, 1, 96,3,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ 0, FB_VMODE_NONINTERLACED
},
};
@@ -1010,10 +1008,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
else if (yres_virtual < yres)
yres_virtual = yres;
- /* backward bug-compatibility */
- if (var->pixclock > 1)
- var->pixclock -= 1;
-
par->hw.falcon.line_width = bpp * xres / 16;
par->hw.falcon.line_offset = bpp * (xres_virtual - xres) / 16;
@@ -1072,8 +1066,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
xstretch = 2; /* Double pixel width only for hicolor */
/* Default values are used for vert./hor. timing if no pixelclock given. */
if (var->pixclock == 0) {
- int linesize;
-
/* Choose master pixelclock depending on hor. timing */
plen = 1 * xstretch;
if ((plen * xres + f25.right + f25.hsync + f25.left) *
@@ -1092,7 +1084,6 @@ static int falcon_decode_var(struct fb_var_screeninfo *var,
left_margin = pclock->left / plen;
right_margin = pclock->right / plen;
hsync_len = pclock->hsync / plen;
- linesize = left_margin + xres + right_margin + hsync_len;
upper_margin = 31;
lower_margin = 11;
vsync_len = 3;
@@ -1641,7 +1632,7 @@ static irqreturn_t falcon_vbl_switcher(int irq, void *dummy)
static int falcon_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int xoffset;
int bpp = info->var.bits_per_pixel;
@@ -2208,6 +2199,10 @@ static int ext_setcolreg(unsigned int regno, unsigned int red,
if (regno > 255)
return 1;
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+
switch (external_card_type) {
case IS_VGA:
OUTB(0x3c8, regno);
@@ -2261,7 +2256,7 @@ static void set_screen_base(void *s_base)
static int pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
if (!fbhw->set_screen_base ||
(!ATARIHW_PRESENT(EXTD_SHIFTER) && var->xoffset))
@@ -2407,55 +2402,19 @@ static void atafb_set_disp(struct fb_info *info)
static int
atafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
- int xoffset = var->xoffset;
- int yoffset = var->yoffset;
- int err;
-
- if (var->vmode & FB_VMODE_YWRAP) {
- if (yoffset < 0 || yoffset >= info->var.yres_virtual || xoffset)
- return -EINVAL;
- } else {
- if (xoffset + info->var.xres > info->var.xres_virtual ||
- yoffset + info->var.yres > info->var.yres_virtual)
- return -EINVAL;
- }
-
- if (fbhw->pan_display) {
- err = fbhw->pan_display(var, info);
- if (err)
- return err;
- } else
+ if (!fbhw->pan_display)
return -EINVAL;
- info->var.xoffset = xoffset;
- info->var.yoffset = yoffset;
-
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
-
- return 0;
+ return fbhw->pan_display(var, info);
}
/*
* generic drawing routines; imageblit needs updating for image depth > 1
*/
-#if BITS_PER_LONG == 32
-#define BYTES_PER_LONG 4
-#define SHIFT_PER_LONG 5
-#elif BITS_PER_LONG == 64
-#define BYTES_PER_LONG 8
-#define SHIFT_PER_LONG 6
-#else
-#define Please update me
-#endif
-
-
static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
u32 width, height;
@@ -2498,7 +2457,7 @@ static void atafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
u32 dx, dy, sx, sy, width, height;
int rev_copy = 0;
@@ -2552,10 +2511,8 @@ static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
static void atafb_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
int x2, y2;
- unsigned long *dst;
- int dst_idx;
const char *src;
u32 dx, dy, width, height, pitch;
@@ -2582,10 +2539,6 @@ static void atafb_imageblit(struct fb_info *info, const struct fb_image *image)
if (image->depth == 1) {
// used for font data
- dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
- dst_idx += dy * par->next_line * 8 + dx;
src = image->data;
pitch = (image->width + 7) / 8;
while (height--) {
@@ -2622,14 +2575,14 @@ atafb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
switch (cmd) {
#ifdef FBCMD_GET_CURRENTPAR
case FBCMD_GET_CURRENTPAR:
- if (copy_to_user((void *)arg, (void *)&current_par,
+ if (copy_to_user((void *)arg, &current_par,
sizeof(struct atafb_par)))
return -EFAULT;
return 0;
#endif
#ifdef FBCMD_SET_CURRENTPAR
case FBCMD_SET_CURRENTPAR:
- if (copy_from_user((void *)&current_par, (void *)arg,
+ if (copy_from_user(&current_par, (void *)arg,
sizeof(struct atafb_par)))
return -EFAULT;
ata_set_par(&current_par);
@@ -2695,7 +2648,7 @@ static int atafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
* hw par just decoded */
static int atafb_set_par(struct fb_info *info)
{
- struct atafb_par *par = (struct atafb_par *)info->par;
+ struct atafb_par *par = info->par;
/* Decode wanted screen parameters */
fbhw->decode_var(&info->var, par);
@@ -2981,7 +2934,7 @@ static void __init atafb_setup_user(char *spec)
}
}
-int __init atafb_setup(char *options)
+static int __init atafb_setup(char *options)
{
char *this_opt;
int temp;
@@ -2996,7 +2949,7 @@ int __init atafb_setup(char *options)
default_par = temp;
mode_option = this_opt;
} else if (!strcmp(this_opt, "inverse"))
- inverse = 1;
+ fb_invert_cmaps();
else if (!strncmp(this_opt, "hwscroll_", 9)) {
hwscroll = simple_strtoul(this_opt + 9, NULL, 10);
if (hwscroll < 0)
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index a3e6faed7745..14eb718bd67c 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3891,7 +3891,7 @@ static int __init atyfb_setup(char *options)
&& (!strncmp(this_opt, "Mach64:", 7))) {
static unsigned char m64_num;
static char mach64_str[80];
- strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str));
+ strscpy(mach64_str, this_opt + 7, sizeof(mach64_str));
if (!store_video_par(mach64_str, m64_num)) {
m64_num++;
mach64_count = m64_num;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 6851f47613e1..a14a8d73035c 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -1980,7 +1980,7 @@ static int radeon_set_fbinfo(struct radeonfb_info *rinfo)
info->screen_base = rinfo->fb_base;
info->screen_size = rinfo->mapped_vram;
/* Fill fix common fields */
- strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
+ strscpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
info->fix.smem_start = rinfo->fb_base_phys;
info->fix.smem_len = rinfo->video_ram;
info->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -2094,34 +2094,34 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
u32 tmp;
/* framebuffer size */
- if ((rinfo->family == CHIP_FAMILY_RS100) ||
+ if ((rinfo->family == CHIP_FAMILY_RS100) ||
(rinfo->family == CHIP_FAMILY_RS200) ||
(rinfo->family == CHIP_FAMILY_RS300) ||
(rinfo->family == CHIP_FAMILY_RC410) ||
(rinfo->family == CHIP_FAMILY_RS400) ||
(rinfo->family == CHIP_FAMILY_RS480) ) {
- u32 tom = INREG(NB_TOM);
- tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
-
- radeon_fifo_wait(6);
- OUTREG(MC_FB_LOCATION, tom);
- OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
- OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
- OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
-
- /* This is supposed to fix the crtc2 noise problem. */
- OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
-
- if ((rinfo->family == CHIP_FAMILY_RS100) ||
- (rinfo->family == CHIP_FAMILY_RS200)) {
- /* This is to workaround the asic bug for RMX, some versions
- of BIOS doesn't have this register initialized correctly.
- */
- OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
- ~CRTC_H_CUTOFF_ACTIVE_EN);
- }
- } else {
- tmp = INREG(CNFG_MEMSIZE);
+ u32 tom = INREG(NB_TOM);
+
+ tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
+ radeon_fifo_wait(6);
+ OUTREG(MC_FB_LOCATION, tom);
+ OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+ OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
+ OUTREG(OV0_BASE_ADDR, (tom & 0xffff) << 16);
+
+ /* This is supposed to fix the crtc2 noise problem. */
+ OUTREG(GRPH2_BUFFER_CNTL, INREG(GRPH2_BUFFER_CNTL) & ~0x7f0000);
+
+ if ((rinfo->family == CHIP_FAMILY_RS100) ||
+ (rinfo->family == CHIP_FAMILY_RS200)) {
+ /* This is to workaround the asic bug for RMX, some versions
+ * of BIOS doesn't have this register initialized correctly.
+ */
+ OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
+ ~CRTC_H_CUTOFF_ACTIVE_EN);
+ }
+ } else {
+ tmp = INREG(CNFG_MEMSIZE);
}
/* mem size is bits [28:0], mask off the rest */
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index e7702fe1fe7d..6403ae07970d 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -182,7 +182,7 @@ static int bw2_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
static void bw2_init_fix(struct fb_info *info, int linebytes)
{
- strlcpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
+ strscpy(info->fix.id, "bwtwo", sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_MONO01;
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 393894af26f8..2b00a9d554fc 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
err_release_fb:
framebuffer_release(p);
err_disable:
+ pci_disable_device(dp);
err_out:
return rc;
}
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 51e072c03e1c..2a9fa06881b5 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -1999,7 +1999,7 @@ static int cirrusfb_set_fbinfo(struct fb_info *info)
}
/* Fill fix common fields */
- strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
+ strscpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
sizeof(info->fix.id));
/* monochrome: only 1 memory plane */
@@ -2301,7 +2301,7 @@ err_release_fb:
return error;
}
-void cirrusfb_zorro_unregister(struct zorro_dev *z)
+static void cirrusfb_zorro_unregister(struct zorro_dev *z)
{
struct fb_info *info = zorro_get_drvdata(z);
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index 771ce1f76951..a1061c2f1640 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -326,7 +326,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
info->var.vmode = FB_VMODE_NONINTERLACED;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.accel = FB_ACCEL_NONE;
- strlcpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
+ strscpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
fb_videomode_to_var(&info->var, &cfb->mode);
ret = fb_alloc_cmap(&info->cmap, BIT(CLPS711X_FB_BPP_MAX), 0);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index f114242b5a70..098b62f7b701 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -412,7 +412,7 @@ static int __init fb_console_setup(char *this_opt)
while ((options = strsep(&this_opt, ",")) != NULL) {
if (!strncmp(options, "font:", 5)) {
- strlcpy(fontname, options + 5, sizeof(fontname));
+ strscpy(fontname, options + 5, sizeof(fontname));
continue;
}
@@ -1060,9 +1060,9 @@ static void fbcon_init(struct vc_data *vc, int init)
vc->vc_complement_mask <<= 1;
}
- if (!*svc->vc_uni_pagedir_loc)
+ if (!*svc->uni_pagedict_loc)
con_set_default_unimap(svc);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
ops = info->fbcon_par;
@@ -1384,9 +1384,9 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
vc->vc_complement_mask <<= 1;
}
- if (!*svc->vc_uni_pagedir_loc)
+ if (!*svc->uni_pagedict_loc)
con_set_default_unimap(svc);
- if (!*vc->vc_uni_pagedir_loc)
+ if (!*vc->uni_pagedict_loc)
con_copy_unimap(vc, svc);
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
@@ -2401,15 +2401,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
- int resize;
+ int resize, ret, old_userfont, old_width, old_height, old_charcount;
char *old_data = NULL;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
if (p->userfont)
old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
+ old_userfont = p->userfont;
if ((p->userfont = userfont))
REFCOUNT(data)++;
+
+ old_width = vc->vc_font.width;
+ old_height = vc->vc_font.height;
+ old_charcount = vc->vc_font.charcount;
+
vc->vc_font.width = w;
vc->vc_font.height = h;
vc->vc_font.charcount = charcount;
@@ -2425,7 +2431,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
cols /= w;
rows /= h;
- vc_resize(vc, cols, rows);
+ ret = vc_resize(vc, cols, rows);
+ if (ret)
+ goto err_out;
} else if (con_is_visible(vc)
&& vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
@@ -2435,6 +2443,21 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
if (old_data && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
+
+err_out:
+ p->fontdata = old_data;
+ vc->vc_font.data = (void *)old_data;
+
+ if (userfont) {
+ p->userfont = old_userfont;
+ REFCOUNT(data)--;
+ }
+
+ vc->vc_font.width = old_width;
+ vc->vc_font.height = old_height;
+ vc->vc_font.charcount = old_charcount;
+
+ return ret;
}
/*
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index c2a60b187467..4d7f63892dcc 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -84,6 +84,10 @@ void framebuffer_release(struct fb_info *info)
if (WARN_ON(refcount_read(&info->count)))
return;
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+ mutex_destroy(&info->bl_curve_mutex);
+#endif
+
kfree(info->apertures);
kfree(info);
}
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index d45355b9a58c..8f041f9b14c7 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -1134,7 +1134,7 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx)
info->fb_size = int_cfb_info->fb.fix.smem_len;
info->info = int_cfb_info;
- strlcpy(info->dev_name, int_cfb_info->fb.fix.id,
+ strscpy(info->dev_name, int_cfb_info->fb.fix.id,
sizeof(info->dev_name));
}
@@ -1229,7 +1229,7 @@ static int cyber2000fb_ddc_getsda(void *data)
static int cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
{
- strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
+ strscpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
sizeof(cfb->ddc_adapter.name));
cfb->ddc_adapter.owner = THIS_MODULE;
cfb->ddc_adapter.class = I2C_CLASS_DDC;
@@ -1304,7 +1304,7 @@ static int cyber2000fb_i2c_getscl(void *data)
static int cyber2000fb_i2c_register(struct cfb_info *cfb)
{
- strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
+ strscpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
sizeof(cfb->i2c_adapter.name));
cfb->i2c_adapter.owner = THIS_MODULE;
cfb->i2c_adapter.algo_data = &cfb->i2c_algo;
@@ -1500,7 +1500,7 @@ static int cyber2000fb_setup(char *options)
if (strncmp(opt, "font:", 5) == 0) {
static char default_font_storage[40];
- strlcpy(default_font_storage, opt + 5,
+ strscpy(default_font_storage, opt + 5,
sizeof(default_font_storage));
default_font = default_font_storage;
continue;
diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c
index 3688f9165848..18405c402ec1 100644
--- a/drivers/video/fbdev/dnfb.c
+++ b/drivers/video/fbdev/dnfb.c
@@ -280,7 +280,7 @@ static struct platform_device dnfb_device = {
.name = "dnfb",
};
-int __init dnfb_init(void)
+static int __init dnfb_init(void)
{
int ret;
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index b3d580e57221..7cba3969a970 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -883,7 +883,7 @@ static void ffb_init_fix(struct fb_info *info)
} else
ffb_type_name = "Elite 3D";
- strlcpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, ffb_type_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/fbdev/fm2fb.c b/drivers/video/fbdev/fm2fb.c
index 3b727d528fde..942e382cf1cf 100644
--- a/drivers/video/fbdev/fm2fb.c
+++ b/drivers/video/fbdev/fm2fb.c
@@ -293,7 +293,7 @@ static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id)
return 0;
}
-int __init fm2fb_setup(char *options)
+static int __init fm2fb_setup(char *options)
{
char *this_opt;
@@ -309,7 +309,7 @@ int __init fm2fb_setup(char *options)
return 0;
}
-int __init fm2fb_init(void)
+static int __init fm2fb_init(void)
{
char *option = NULL;
diff --git a/drivers/video/fbdev/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index 5d34d89fb665..e41204ecb0e3 100644
--- a/drivers/video/fbdev/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
@@ -410,13 +410,13 @@ static void __init gx1fb_setup(char *options)
continue;
if (!strncmp(this_opt, "mode:", 5))
- strlcpy(mode_option, this_opt + 5, sizeof(mode_option));
+ strscpy(mode_option, this_opt + 5, sizeof(mode_option));
else if (!strncmp(this_opt, "crt:", 4))
crt_option = !!simple_strtoul(this_opt + 4, NULL, 0);
else if (!strncmp(this_opt, "panel:", 6))
- strlcpy(panel_option, this_opt + 6, sizeof(panel_option));
+ strscpy(panel_option, this_opt + 6, sizeof(panel_option));
else
- strlcpy(mode_option, this_opt, sizeof(mode_option));
+ strscpy(mode_option, this_opt, sizeof(mode_option));
}
}
#endif
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index e5475ae1e158..94588b809ebf 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -650,7 +650,7 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cardtype = ent->driver_data;
par->refclk_ps = cardinfo[cardtype].refclk_ps;
info->fix = gxt4500_fix;
- strlcpy(info->fix.id, cardinfo[cardtype].cardname,
+ strscpy(info->fix.id, cardinfo[cardtype].cardname,
sizeof(info->fix.id));
info->pseudo_palette = par->pseudo_palette;
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index 8d418abdd767..cdd44e5deafe 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -375,7 +375,7 @@ static struct dio_driver hpfb_driver = {
.remove = hpfb_remove_one,
};
-int __init hpfb_init(void)
+static int __init hpfb_init(void)
{
unsigned int sid;
unsigned char i;
@@ -415,7 +415,7 @@ int __init hpfb_init(void)
return 0;
}
-void __exit hpfb_cleanup_module(void)
+static void __exit hpfb_cleanup_module(void)
{
dio_unregister_driver(&hpfb_driver);
}
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 09dd85553d4f..bd30d8314b68 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -159,7 +159,7 @@ static int i740fb_setup_ddc_bus(struct fb_info *info)
{
struct i740fb_par *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
@@ -400,7 +400,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
u32 xres, right, hslen, left, xtotal;
u32 yres, lower, vslen, upper, ytotal;
u32 vxres, xoffset, vyres, yoffset;
- u32 bpp, base, dacspeed24, mem;
+ u32 bpp, base, dacspeed24, mem, freq;
u8 r7;
int i;
@@ -643,7 +643,12 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
par->atc[VGA_ATC_OVERSCAN] = 0;
/* Calculate VCLK that most closely matches the requested dot clock */
- i740_calc_vclk((((u32)1e9) / var->pixclock) * (u32)(1e3), par);
+ freq = (((u32)1e9) / var->pixclock) * (u32)(1e3);
+ if (freq < I740_RFREQ_FIX) {
+ fb_dbg(info, "invalid pixclock\n");
+ freq = I740_RFREQ_FIX;
+ }
+ i740_calc_vclk(freq, par);
/* Since we program the clocks ourselves, always use VCLK2. */
par->misc |= 0x0C;
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index a2f644c97f28..94f3bc637fc8 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -41,7 +41,18 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
-#include <linux/platform_data/video-imxfb.h>
+#define PCR_TFT (1 << 31)
+#define PCR_BPIX_8 (3 << 25)
+#define PCR_BPIX_12 (4 << 25)
+#define PCR_BPIX_16 (5 << 25)
+#define PCR_BPIX_18 (6 << 25)
+
+struct imx_fb_videomode {
+ struct fb_videomode mode;
+ u32 pcr;
+ bool aus_mode;
+ unsigned char bpp;
+};
/*
* Complain if VAR is out of range.
@@ -656,7 +667,6 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
static int imxfb_init_fbinfo(struct platform_device *pdev)
{
- struct imx_fb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct fb_info *info = platform_get_drvdata(pdev);
struct imxfb_info *fbi = info->par;
struct device_node *np;
@@ -671,7 +681,7 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
fbi->devtype = pdev->id_entry->driver_data;
- strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
+ strscpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
@@ -690,25 +700,20 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
info->fbops = &imxfb_ops;
info->flags = FBINFO_FLAG_DEFAULT |
FBINFO_READS_FAST;
- if (pdata) {
- fbi->lscr1 = pdata->lscr1;
- fbi->dmacr = pdata->dmacr;
- fbi->pwmr = pdata->pwmr;
- } else {
- np = pdev->dev.of_node;
- info->var.grayscale = of_property_read_bool(np,
- "cmap-greyscale");
- fbi->cmap_inverse = of_property_read_bool(np, "cmap-inverse");
- fbi->cmap_static = of_property_read_bool(np, "cmap-static");
- fbi->lscr1 = IMXFB_LSCR1_DEFAULT;
+ np = pdev->dev.of_node;
+ info->var.grayscale = of_property_read_bool(np,
+ "cmap-greyscale");
+ fbi->cmap_inverse = of_property_read_bool(np, "cmap-inverse");
+ fbi->cmap_static = of_property_read_bool(np, "cmap-static");
- of_property_read_u32(np, "fsl,lpccr", &fbi->pwmr);
+ fbi->lscr1 = IMXFB_LSCR1_DEFAULT;
- of_property_read_u32(np, "fsl,lscr1", &fbi->lscr1);
+ of_property_read_u32(np, "fsl,lpccr", &fbi->pwmr);
- of_property_read_u32(np, "fsl,dmacr", &fbi->dmacr);
- }
+ of_property_read_u32(np, "fsl,lscr1", &fbi->lscr1);
+
+ of_property_read_u32(np, "fsl,dmacr", &fbi->dmacr);
return 0;
}
@@ -863,10 +868,10 @@ static int imxfb_probe(struct platform_device *pdev)
struct imxfb_info *fbi;
struct lcd_device *lcd;
struct fb_info *info;
- struct imx_fb_platform_data *pdata;
struct resource *res;
struct imx_fb_videomode *m;
const struct of_device_id *of_id;
+ struct device_node *display_np;
int ret, i;
int bytes_per_pixel;
@@ -884,8 +889,6 @@ static int imxfb_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- pdata = dev_get_platdata(&pdev->dev);
-
info = framebuffer_alloc(sizeof(struct imxfb_info), &pdev->dev);
if (!info)
return -ENOMEM;
@@ -898,43 +901,34 @@ static int imxfb_probe(struct platform_device *pdev)
if (ret < 0)
goto failed_init;
- if (pdata) {
- if (!fb_mode)
- fb_mode = pdata->mode[0].mode.name;
-
- fbi->mode = pdata->mode;
- fbi->num_modes = pdata->num_modes;
- } else {
- struct device_node *display_np;
- fb_mode = NULL;
-
- display_np = of_parse_phandle(pdev->dev.of_node, "display", 0);
- if (!display_np) {
- dev_err(&pdev->dev, "No display defined in devicetree\n");
- ret = -EINVAL;
- goto failed_of_parse;
- }
+ fb_mode = NULL;
- /*
- * imxfb does not support more modes, we choose only the native
- * mode.
- */
- fbi->num_modes = 1;
-
- fbi->mode = devm_kzalloc(&pdev->dev,
- sizeof(struct imx_fb_videomode), GFP_KERNEL);
- if (!fbi->mode) {
- ret = -ENOMEM;
- of_node_put(display_np);
- goto failed_of_parse;
- }
+ display_np = of_parse_phandle(pdev->dev.of_node, "display", 0);
+ if (!display_np) {
+ dev_err(&pdev->dev, "No display defined in devicetree\n");
+ ret = -EINVAL;
+ goto failed_of_parse;
+ }
- ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+ /*
+ * imxfb does not support more modes, we choose only the native
+ * mode.
+ */
+ fbi->num_modes = 1;
+
+ fbi->mode = devm_kzalloc(&pdev->dev,
+ sizeof(struct imx_fb_videomode), GFP_KERNEL);
+ if (!fbi->mode) {
+ ret = -ENOMEM;
of_node_put(display_np);
- if (ret)
- goto failed_of_parse;
+ goto failed_of_parse;
}
+ ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+ of_node_put(display_np);
+ if (ret)
+ goto failed_of_parse;
+
/* Calculate maximum bytes used per pixel. In most cases this should
* be the same as m->bpp/8 */
m = &fbi->mode[0];
@@ -943,13 +937,6 @@ static int imxfb_probe(struct platform_device *pdev)
info->fix.smem_len = max_t(size_t, info->fix.smem_len,
m->mode.xres * m->mode.yres * bytes_per_pixel);
- res = request_mem_region(res->start, resource_size(res),
- DRIVER_NAME);
- if (!res) {
- ret = -EBUSY;
- goto failed_req;
- }
-
fbi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fbi->clk_ipg)) {
ret = PTR_ERR(fbi->clk_ipg);
@@ -983,10 +970,10 @@ static int imxfb_probe(struct platform_device *pdev)
goto failed_getclock;
}
- fbi->regs = ioremap(res->start, resource_size(res));
- if (fbi->regs == NULL) {
+ fbi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fbi->regs)) {
dev_err(&pdev->dev, "Cannot map frame buffer registers\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(fbi->regs);
goto failed_ioremap;
}
@@ -1001,13 +988,6 @@ static int imxfb_probe(struct platform_device *pdev)
info->fix.smem_start = fbi->map_dma;
- if (pdata && pdata->init) {
- ret = pdata->init(fbi->pdev);
- if (ret)
- goto failed_platform_init;
- }
-
-
INIT_LIST_HEAD(&info->modelist);
for (i = 0; i < fbi->num_modes; i++)
fb_add_videomode(&fbi->mode[i].mode, &info->modelist);
@@ -1059,17 +1039,12 @@ failed_lcd:
failed_register:
fb_dealloc_cmap(&info->cmap);
failed_cmap:
- if (pdata && pdata->exit)
- pdata->exit(fbi->pdev);
-failed_platform_init:
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer,
fbi->map_dma);
failed_map:
- iounmap(fbi->regs);
failed_ioremap:
failed_getclock:
release_mem_region(res->start, resource_size(res));
-failed_req:
failed_of_parse:
kfree(info->pseudo_palette);
failed_init:
@@ -1079,26 +1054,15 @@ failed_init:
static int imxfb_remove(struct platform_device *pdev)
{
- struct imx_fb_platform_data *pdata;
struct fb_info *info = platform_get_drvdata(pdev);
struct imxfb_info *fbi = info->par;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
imxfb_disable_controller(fbi);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata && pdata->exit)
- pdata->exit(fbi->pdev);
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer,
fbi->map_dma);
- iounmap(fbi->regs);
- release_mem_region(res->start, resource_size(res));
kfree(info->pseudo_palette);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 236521b19daf..68bba2688f4c 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2383,9 +2383,9 @@ static int __init matroxfb_setup(char *options) {
else if (!strncmp(this_opt, "mem:", 4))
mem = simple_strtoul(this_opt+4, NULL, 0);
else if (!strncmp(this_opt, "mode:", 5))
- strlcpy(videomode, this_opt+5, sizeof(videomode));
+ strscpy(videomode, this_opt + 5, sizeof(videomode));
else if (!strncmp(this_opt, "outputs:", 8))
- strlcpy(outputs, this_opt+8, sizeof(outputs));
+ strscpy(outputs, this_opt + 8, sizeof(outputs));
else if (!strncmp(this_opt, "dfp:", 4)) {
dfp_type = simple_strtoul(this_opt+4, NULL, 0);
dfp = 1;
@@ -2455,7 +2455,7 @@ static int __init matroxfb_setup(char *options) {
else if (!strcmp(this_opt, "dfp"))
dfp = value;
else {
- strlcpy(videomode, this_opt, sizeof(videomode));
+ strscpy(videomode, this_opt, sizeof(videomode));
}
}
}
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index b1acb1ebebe9..91001990e351 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#ifdef CONFIG_PPC32
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index 9d9fe5c3a7a1..161fc65d6b57 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -489,7 +489,7 @@ static void hwa742_update_window_auto(struct timer_list *unused)
__hwa742_update_window_auto(false);
}
-int hwa742_update_window_async(struct fb_info *fbi,
+static int hwa742_update_window_async(struct fb_info *fbi,
struct omapfb_update_window *win,
void (*complete_callback)(void *arg),
void *complete_callback_data)
@@ -522,7 +522,6 @@ int hwa742_update_window_async(struct fb_info *fbi,
out:
return r;
}
-EXPORT_SYMBOL(hwa742_update_window_async);
static int hwa742_setup_plane(int plane, int channel_out,
unsigned long offset, int screen_width,
diff --git a/drivers/video/fbdev/omap/omapfb.h b/drivers/video/fbdev/omap/omapfb.h
index beb841ccb99c..ab1cb6e7f5f8 100644
--- a/drivers/video/fbdev/omap/omapfb.h
+++ b/drivers/video/fbdev/omap/omapfb.h
@@ -227,13 +227,4 @@ extern int omapfb_register_client(struct omapfb_notifier_block *nb,
omapfb_notifier_callback_t callback,
void *callback_data);
extern int omapfb_unregister_client(struct omapfb_notifier_block *nb);
-extern int omapfb_update_window_async(struct fb_info *fbi,
- struct omapfb_update_window *win,
- void (*callback)(void *),
- void *callback_data);
-extern int hwa742_update_window_async(struct fb_info *fbi,
- struct omapfb_update_window *win,
- void (*callback)(void *),
- void *callback_data);
-
#endif /* __OMAPFB_H */
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 292fcb0a24fc..17cda5765683 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -668,7 +668,7 @@ static int omapfb_set_par(struct fb_info *fbi)
return r;
}
-int omapfb_update_window_async(struct fb_info *fbi,
+static int omapfb_update_window_async(struct fb_info *fbi,
struct omapfb_update_window *win,
void (*callback)(void *),
void *callback_data)
@@ -714,7 +714,6 @@ int omapfb_update_window_async(struct fb_info *fbi,
return fbdev->ctrl->update_window(fbi, win, callback, callback_data);
}
-EXPORT_SYMBOL(omapfb_update_window_async);
static int omapfb_update_win(struct fb_info *fbi,
struct omapfb_update_window *win)
@@ -1643,15 +1642,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
goto cleanup;
}
fbdev->int_irq = platform_get_irq(pdev, 0);
- if (!fbdev->int_irq) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (fbdev->int_irq < 0) {
r = ENXIO;
goto cleanup;
}
fbdev->ext_irq = platform_get_irq(pdev, 1);
- if (!fbdev->ext_irq) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (fbdev->ext_irq < 0) {
r = ENXIO;
goto cleanup;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index afa688e754b9..5ccddcfce722 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1331,7 +1331,7 @@ static void clear_fb_info(struct fb_info *fbi)
{
memset(&fbi->var, 0, sizeof(fbi->var));
memset(&fbi->fix, 0, sizeof(fbi->fix));
- strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
+ strscpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
}
static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev)
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index d3be2c64f1c0..8fd79deb1e2a 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -617,6 +617,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return -EINVAL;
}
+ if (!var->pixclock) {
+ DPRINTK("pixclock is zero\n");
+ return -EINVAL;
+ }
+
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n",
PICOS2KHZ(var->pixclock));
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index e943300d23e8..d5d0bbd39213 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -640,7 +640,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
info->node = -1;
- strlcpy(info->fix.id, mi->id, 16);
+ strscpy(info->fix.id, mi->id, 16);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
info->fix.xpanstep = 0;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 66cfc3e9d3cf..696ac5431180 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2042,7 +2042,7 @@ static int __init pxafb_setup_options(void)
return -ENODEV;
if (options)
- strlcpy(g_options, options, sizeof(g_options));
+ strscpy(g_options, options, sizeof(g_options));
return 0;
}
diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c
index 079a2a7fb2c5..964bc88bb89c 100644
--- a/drivers/video/fbdev/q40fb.c
+++ b/drivers/video/fbdev/q40fb.c
@@ -133,7 +133,7 @@ static struct platform_device q40fb_device = {
.name = "q40fb",
};
-int __init q40fb_init(void)
+static int __init q40fb_init(void)
{
int ret = 0;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index b93c8eb02336..67b63a753cb2 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -248,7 +248,7 @@ static int s3fb_setup_ddc_bus(struct fb_info *info)
{
struct s3fb_info *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
@@ -905,6 +905,8 @@ static int s3fb_set_par(struct fb_info *info)
value = clamp((htotal + hsstart + 1) / 2 + 2, hsstart + 4, htotal + 1);
svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index e31cf63b0a62..017c8efe8267 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1224,47 +1224,6 @@ int __init sa1100fb_init(void)
return platform_driver_register(&sa1100fb_driver);
}
-int __init sa1100fb_setup(char *options)
-{
-#if 0
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
-
- if (!strncmp(this_opt, "bpp:", 4))
- current_par.max_bpp =
- simple_strtoul(this_opt + 4, NULL, 0);
-
- if (!strncmp(this_opt, "lccr0:", 6))
- lcd_shadow.lccr0 =
- simple_strtoul(this_opt + 6, NULL, 0);
- if (!strncmp(this_opt, "lccr1:", 6)) {
- lcd_shadow.lccr1 =
- simple_strtoul(this_opt + 6, NULL, 0);
- current_par.max_xres =
- (lcd_shadow.lccr1 & 0x3ff) + 16;
- }
- if (!strncmp(this_opt, "lccr2:", 6)) {
- lcd_shadow.lccr2 =
- simple_strtoul(this_opt + 6, NULL, 0);
- current_par.max_yres =
- (lcd_shadow.
- lccr0 & LCCR0_SDS) ? ((lcd_shadow.
- lccr2 & 0x3ff) +
- 1) *
- 2 : ((lcd_shadow.lccr2 & 0x3ff) + 1);
- }
- if (!strncmp(this_opt, "lccr3:", 6))
- lcd_shadow.lccr3 =
- simple_strtoul(this_opt + 6, NULL, 0);
- }
-#endif
- return 0;
-}
-
module_init(sa1100fb_init);
MODULE_DESCRIPTION("StrongARM-1100/1110 framebuffer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index cf2a90ecd64e..e770b4a356b5 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -355,7 +355,7 @@ static int simplefb_regulators_get(struct simplefb_par *par,
if (!p || p == prop->name)
continue;
- strlcpy(name, prop->name,
+ strscpy(name, prop->name,
strlen(prop->name) - strlen(SUPPLY_SUFFIX) + 1);
regulator = devm_regulator_get_optional(&pdev->dev, name);
if (IS_ERR(regulator)) {
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index b568c646a76c..2ba91d62af92 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -355,12 +355,12 @@ SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay,
}
break;
case 400:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDwidth >= 600))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDheight >= 600))) {
if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth];
}
break;
case 512:
- if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDwidth >= 768))) {
+ if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDheight >= 768))) {
if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth];
}
break;
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index f28fd69d5eb7..c9e77429dfa3 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -649,37 +649,37 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
u16 xres=0, yres, myres;
#ifdef CONFIG_FB_SIS_300
- if(ivideo->sisvga_engine == SIS_300_VGA) {
- if(!(sisbios_mode[myindex].chipset & MD_SIS300))
+ if (ivideo->sisvga_engine == SIS_300_VGA) {
+ if (!(sisbios_mode[myindex].chipset & MD_SIS300))
return -1 ;
}
#endif
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- if(!(sisbios_mode[myindex].chipset & MD_SIS315))
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
+ if (!(sisbios_mode[myindex].chipset & MD_SIS315))
return -1;
}
#endif
myres = sisbios_mode[myindex].yres;
- switch(vbflags & VB_DISPTYPE_DISP2) {
+ switch (vbflags & VB_DISPTYPE_DISP2) {
case CRT2_LCD:
xres = ivideo->lcdxres; yres = ivideo->lcdyres;
- if((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
- (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
- if(sisbios_mode[myindex].xres > xres)
+ if ((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) &&
+ (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) {
+ if (sisbios_mode[myindex].xres > xres)
return -1;
- if(myres > yres)
+ if (myres > yres)
return -1;
}
- if(ivideo->sisfb_fstn) {
- if(sisbios_mode[myindex].xres == 320) {
- if(myres == 240) {
- switch(sisbios_mode[myindex].mode_no[1]) {
+ if (ivideo->sisfb_fstn) {
+ if (sisbios_mode[myindex].xres == 320) {
+ if (myres == 240) {
+ switch (sisbios_mode[myindex].mode_no[1]) {
case 0x50: myindex = MODE_FSTN_8; break;
case 0x56: myindex = MODE_FSTN_16; break;
case 0x53: return -1;
@@ -688,7 +688,7 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
}
}
- if(SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->sisfb_fstn,
ivideo->SiS_Pr.SiS_CustomT, xres, yres, ivideo->vbflags2) < 0x14) {
return -1;
@@ -696,14 +696,14 @@ sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags)
break;
case CRT2_TV:
- if(SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
break;
case CRT2_VGA:
- if(SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
+ if (SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres,
sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) {
return -1;
}
@@ -1872,7 +1872,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strlcpy(fix->id, ivideo->myid, sizeof(fix->id));
+ strscpy(fix->id, ivideo->myid, sizeof(fix->id));
mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
@@ -2204,82 +2204,88 @@ static bool sisfb_test_DDC1(struct sis_video_info *ivideo)
static void sisfb_sense_crt1(struct sis_video_info *ivideo)
{
- bool mustwait = false;
- u8 sr1F, cr17;
+ bool mustwait = false;
+ u8 sr1F, cr17;
#ifdef CONFIG_FB_SIS_315
- u8 cr63=0;
+ u8 cr63 = 0;
#endif
- u16 temp = 0xffff;
- int i;
+ u16 temp = 0xffff;
+ int i;
+
+ sr1F = SiS_GetReg(SISSR, 0x1F);
+ SiS_SetRegOR(SISSR, 0x1F, 0x04);
+ SiS_SetRegAND(SISSR, 0x1F, 0x3F);
- sr1F = SiS_GetReg(SISSR, 0x1F);
- SiS_SetRegOR(SISSR, 0x1F, 0x04);
- SiS_SetRegAND(SISSR, 0x1F, 0x3F);
- if(sr1F & 0xc0) mustwait = true;
+ if (sr1F & 0xc0)
+ mustwait = true;
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
- cr63 &= 0x40;
- SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
- }
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
+ cr63 = SiS_GetReg(SISCR, ivideo->SiS_Pr.SiS_MyCR63);
+ cr63 &= 0x40;
+ SiS_SetRegAND(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF);
+ }
#endif
- cr17 = SiS_GetReg(SISCR, 0x17);
- cr17 &= 0x80;
- if(!cr17) {
- SiS_SetRegOR(SISCR, 0x17, 0x80);
- mustwait = true;
- SiS_SetReg(SISSR, 0x00, 0x01);
- SiS_SetReg(SISSR, 0x00, 0x03);
- }
+ cr17 = SiS_GetReg(SISCR, 0x17);
+ cr17 &= 0x80;
- if(mustwait) {
- for(i=0; i < 10; i++) sisfbwaitretracecrt1(ivideo);
- }
+ if (!cr17) {
+ SiS_SetRegOR(SISCR, 0x17, 0x80);
+ mustwait = true;
+ SiS_SetReg(SISSR, 0x00, 0x01);
+ SiS_SetReg(SISSR, 0x00, 0x03);
+ }
+ if (mustwait) {
+ for (i = 0; i < 10; i++)
+ sisfbwaitretracecrt1(ivideo);
+ }
#ifdef CONFIG_FB_SIS_315
- if(ivideo->chip >= SIS_330) {
- SiS_SetRegAND(SISCR, 0x32, ~0x20);
- if(ivideo->chip >= SIS_340) {
- SiS_SetReg(SISCR, 0x57, 0x4a);
- } else {
- SiS_SetReg(SISCR, 0x57, 0x5f);
- }
- SiS_SetRegOR(SISCR, 0x53, 0x02);
- while ((SiS_GetRegByte(SISINPSTAT)) & 0x01) break;
- while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01)) break;
- if ((SiS_GetRegByte(SISMISCW)) & 0x10) temp = 1;
- SiS_SetRegAND(SISCR, 0x53, 0xfd);
- SiS_SetRegAND(SISCR, 0x57, 0x00);
- }
+ if (ivideo->chip >= SIS_330) {
+ SiS_SetRegAND(SISCR, 0x32, ~0x20);
+ if (ivideo->chip >= SIS_340)
+ SiS_SetReg(SISCR, 0x57, 0x4a);
+ else
+ SiS_SetReg(SISCR, 0x57, 0x5f);
+
+ SiS_SetRegOR(SISCR, 0x53, 0x02);
+ while ((SiS_GetRegByte(SISINPSTAT)) & 0x01)
+ break;
+ while (!((SiS_GetRegByte(SISINPSTAT)) & 0x01))
+ break;
+ if ((SiS_GetRegByte(SISMISCW)) & 0x10)
+ temp = 1;
+
+ SiS_SetRegAND(SISCR, 0x53, 0xfd);
+ SiS_SetRegAND(SISCR, 0x57, 0x00);
+ }
#endif
- if(temp == 0xffff) {
- i = 3;
- do {
- temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
- ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
- } while(((temp == 0) || (temp == 0xffff)) && i--);
+ if (temp == 0xffff) {
+ i = 3;
- if((temp == 0) || (temp == 0xffff)) {
- if(sisfb_test_DDC1(ivideo)) temp = 1;
- }
- }
+ do {
+ temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
+ ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2);
+ } while (((temp == 0) || (temp == 0xffff)) && i--);
- if((temp) && (temp != 0xffff)) {
- SiS_SetRegOR(SISCR, 0x32, 0x20);
- }
+ if ((temp == 0) || (temp == 0xffff)) {
+ if (sisfb_test_DDC1(ivideo))
+ temp = 1;
+ }
+ }
+
+ if ((temp) && (temp != 0xffff))
+ SiS_SetRegOR(SISCR, 0x32, 0x20);
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
- SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
- }
+ if (ivideo->sisvga_engine == SIS_315_VGA)
+ SiS_SetRegANDOR(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xBF, cr63);
#endif
- SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
-
- SiS_SetReg(SISSR, 0x1F, sr1F);
+ SiS_SetRegANDOR(SISCR, 0x17, 0x7F, cr17);
+ SiS_SetReg(SISSR, 0x1F, sr1F);
}
/* Determine and detect attached devices on SiS30x */
@@ -2293,25 +2299,25 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
ivideo->SiS_Pr.PanelSelfDetected = false;
/* LCD detection only for TMDS bridges */
- if(!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
+ if (!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE))
return;
- if(ivideo->vbflags2 & VB2_30xBDH)
+ if (ivideo->vbflags2 & VB2_30xBDH)
return;
/* If LCD already set up by BIOS, skip it */
reg = SiS_GetReg(SISCR, 0x32);
- if(reg & 0x08)
+ if (reg & 0x08)
return;
realcrtno = 1;
- if(ivideo->SiS_Pr.DDCPortMixup)
+ if (ivideo->SiS_Pr.DDCPortMixup)
realcrtno = 0;
/* Check DDC capabilities */
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine,
realcrtno, 0, &buffer[0], ivideo->vbflags2);
- if((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
+ if ((!temp) || (temp == 0xffff) || (!(temp & 0x02)))
return;
/* Read DDC data */
@@ -2320,17 +2326,17 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags,
ivideo->sisvga_engine, realcrtno, 1,
&buffer[0], ivideo->vbflags2);
- } while((temp) && i--);
+ } while ((temp) && i--);
- if(temp)
+ if (temp)
return;
/* No digital device */
- if(!(buffer[0x14] & 0x80))
+ if (!(buffer[0x14] & 0x80))
return;
/* First detailed timing preferred timing? */
- if(!(buffer[0x18] & 0x02))
+ if (!(buffer[0x18] & 0x02))
return;
xres = buffer[0x38] | ((buffer[0x3a] & 0xf0) << 4);
@@ -2338,26 +2344,26 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
switch(xres) {
case 1024:
- if(yres == 768)
+ if (yres == 768)
paneltype = 0x02;
break;
case 1280:
- if(yres == 1024)
+ if (yres == 1024)
paneltype = 0x03;
break;
case 1600:
- if((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
+ if ((yres == 1200) && (ivideo->vbflags2 & VB2_30xC))
paneltype = 0x0b;
break;
}
- if(!paneltype)
+ if (!paneltype)
return;
- if(buffer[0x23])
+ if (buffer[0x23])
cr37 |= 0x10;
- if((buffer[0x47] & 0x18) == 0x18)
+ if ((buffer[0x47] & 0x18) == 0x18)
cr37 |= ((((buffer[0x47] & 0x06) ^ 0x06) << 5) | 0x20);
else
cr37 |= 0xc0;
@@ -2372,31 +2378,34 @@ static void SiS_SenseLCD(struct sis_video_info *ivideo)
static int SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test)
{
- int temp, mytest, result, i, j;
-
- for(j = 0; j < 10; j++) {
- result = 0;
- for(i = 0; i < 3; i++) {
- mytest = test;
- SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
- temp = (type >> 8) | (mytest & 0x00ff);
- SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
- SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
- mytest >>= 8;
- mytest &= 0x7f;
- temp = SiS_GetReg(SISPART4, 0x03);
- temp ^= 0x0e;
- temp &= mytest;
- if(temp == mytest) result++;
+ int temp, mytest, result, i, j;
+
+ for (j = 0; j < 10; j++) {
+ result = 0;
+ for (i = 0; i < 3; i++) {
+ mytest = test;
+ SiS_SetReg(SISPART4, 0x11, (type & 0x00ff));
+ temp = (type >> 8) | (mytest & 0x00ff);
+ SiS_SetRegANDOR(SISPART4, 0x10, 0xe0, temp);
+ SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500);
+ mytest >>= 8;
+ mytest &= 0x7f;
+ temp = SiS_GetReg(SISPART4, 0x03);
+ temp ^= 0x0e;
+ temp &= mytest;
+ if (temp == mytest)
+ result++;
#if 1
- SiS_SetReg(SISPART4, 0x11, 0x00);
- SiS_SetRegAND(SISPART4, 0x10, 0xe0);
- SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
+ SiS_SetReg(SISPART4, 0x11, 0x00);
+ SiS_SetRegAND(SISPART4, 0x10, 0xe0);
+ SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000);
#endif
- }
- if((result == 0) || (result >= 2)) break;
- }
- return result;
+ }
+
+ if ((result == 0) || (result >= 2))
+ break;
+ }
+ return result;
}
static void SiS_Sense30x(struct sis_video_info *ivideo)
@@ -4262,18 +4271,17 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
- for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
-
+ for (k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
RankCapacity = buswidth * SiS_DRAMType[k][3];
- if(RankCapacity != PseudoRankCapacity)
+ if (RankCapacity != PseudoRankCapacity)
continue;
- if((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
+ if ((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount)
continue;
BankNumHigh = RankCapacity * 16 * iteration - 1;
- if(iteration == 3) { /* Rank No */
+ if (iteration == 3) { /* Rank No */
BankNumMid = RankCapacity * 16 - 1;
} else {
BankNumMid = RankCapacity * 16 * iteration / 2 - 1;
@@ -4287,18 +4295,22 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
SiS_SetRegAND(SISSR, 0x15, 0xFB); /* Test */
SiS_SetRegOR(SISSR, 0x15, 0x04); /* Test */
sr14 = (SiS_DRAMType[k][3] * buswidth) - 1;
- if(buswidth == 4) sr14 |= 0x80;
- else if(buswidth == 2) sr14 |= 0x40;
+
+ if (buswidth == 4)
+ sr14 |= 0x80;
+ else if (buswidth == 2)
+ sr14 |= 0x40;
+
SiS_SetReg(SISSR, 0x13, SiS_DRAMType[k][4]);
SiS_SetReg(SISSR, 0x14, sr14);
BankNumHigh <<= 16;
BankNumMid <<= 16;
- if((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
- (BankNumMid + PhysicalAdrHigh >= mapsize) ||
- (BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
- (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
+ if ((BankNumHigh + PhysicalAdrHigh >= mapsize) ||
+ (BankNumMid + PhysicalAdrHigh >= mapsize) ||
+ (BankNumHigh + PhysicalAdrHalfPage >= mapsize) ||
+ (BankNumHigh + PhysicalAdrOtherPage >= mapsize))
continue;
/* Write data */
@@ -4312,7 +4324,7 @@ static int sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration,
(FBAddr + BankNumHigh + PhysicalAdrOtherPage));
/* Read data */
- if(readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
+ if (readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh)
return 1;
}
@@ -5867,7 +5879,7 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ivideo->cardnumber++;
}
- strlcpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
+ strscpy(ivideo->myid, chipinfo->chip_name, sizeof(ivideo->myid));
ivideo->warncount = 0;
ivideo->chip_id = pdev->device;
@@ -6150,24 +6162,20 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
#ifdef CONFIG_FB_SIS_315
- if(ivideo->sisvga_engine == SIS_315_VGA) {
+ if (ivideo->sisvga_engine == SIS_315_VGA) {
int result = 1;
- /* if((ivideo->chip == SIS_315H) ||
- (ivideo->chip == SIS_315) ||
- (ivideo->chip == SIS_315PRO) ||
- (ivideo->chip == SIS_330)) {
- sisfb_post_sis315330(pdev);
- } else */ if(ivideo->chip == XGI_20) {
+
+ if (ivideo->chip == XGI_20) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
- } else if((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
+ } else if ((ivideo->chip == XGI_40) && ivideo->haveXGIROM) {
result = sisfb_post_xgi(pdev);
ivideo->sisfb_can_post = 1;
} else {
printk(KERN_INFO "sisfb: Card is not "
"POSTed and sisfb can't do this either.\n");
}
- if(!result) {
+ if (!result) {
printk(KERN_ERR "sisfb: Failed to POST card\n");
ret = -ENODEV;
goto error_3;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index d119b1d08007..8ab9a3fbd281 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -131,8 +131,6 @@ static struct fb_info info;
*/
static struct xxx_par __initdata current_par;
-int xxxfb_init(void);
-
/**
* xxxfb_open - Optional function. Called when the framebuffer is
* first accessed.
@@ -886,7 +884,7 @@ static struct pci_driver xxxfb_driver = {
MODULE_DEVICE_TABLE(pci, xxxfb_id_table);
-int __init xxxfb_init(void)
+static int __init xxxfb_init(void)
{
/*
* For kernel boot options (in 'video=xxxfb:<options>' format)
@@ -967,7 +965,7 @@ static struct platform_device *xxxfb_device;
* Only necessary if your driver takes special options,
* otherwise we fall back on the generic fb_setup().
*/
-int __init xxxfb_setup(char *options)
+static int __init xxxfb_setup(char *options)
{
/* Parse user specified options (`video=xxxfb:') */
}
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 6a52eba64559..fce6cfbadfd6 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1719,7 +1719,7 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
enable = 0;
}
- strlcpy(fb->fix.id, fbname, sizeof(fb->fix.id));
+ strscpy(fb->fix.id, fbname, sizeof(fb->fix.id));
memcpy(&par->ops,
(head == HEAD_CRT) ? &sm501fb_ops_crt : &sm501fb_ops_pnl,
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 5c765655d000..52e4ed9da78c 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -450,7 +450,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
if (ret < 0)
return ret;
- /* Set Set Area Color Mode ON/OFF & Low Power Display Mode */
+ /* Set Area Color Mode ON/OFF & Low Power Display Mode */
if (par->area_color_enable || par->low_power) {
u32 mode;
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index 27d4b0ace2d6..cd4d640f9477 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -1382,7 +1382,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail;
}
sst_get_memsize(info, &fix->smem_len);
- strlcpy(fix->id, spec->name, sizeof(fix->id));
+ strscpy(fix->id, spec->name, sizeof(fix->id));
printk(KERN_INFO "%s (revision %d) with %s dac\n",
fix->id, par->revision, par->dac_sw.name);
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 15b079505a00..490bd9a14763 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -80,7 +80,7 @@ static int gfb_set_fbinfo(struct gfb_info *gp)
info->pseudo_palette = gp->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "gfb", sizeof(info->fix.id));
+ strscpy(info->fix.id, "gfb", sizeof(info->fix.id));
info->fix.smem_start = gp->fb_base_phys;
info->fix.smem_len = gp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 1d3bacd9d5ac..1279b02234f8 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -84,7 +84,7 @@ static int s3d_set_fbinfo(struct s3d_info *sp)
info->pseudo_palette = sp->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "s3d", sizeof(info->fix.id));
+ strscpy(info->fix.id, "s3d", sizeof(info->fix.id));
info->fix.smem_start = sp->fb_base_phys;
info->fix.smem_len = sp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index 9daf17b11106..f7b463633ba0 100644
--- a/drivers/video/fbdev/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
@@ -207,7 +207,7 @@ static int e3d_set_fbinfo(struct e3d_info *ep)
info->pseudo_palette = ep->pseudo_palette;
/* Fill fix common fields */
- strlcpy(info->fix.id, "e3d", sizeof(info->fix.id));
+ strscpy(info->fix.id, "e3d", sizeof(info->fix.id));
info->fix.smem_start = ep->fb_base_phys;
info->fix.smem_len = ep->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c
index 1638a40fed22..01d87f53324d 100644
--- a/drivers/video/fbdev/tcx.c
+++ b/drivers/video/fbdev/tcx.c
@@ -333,7 +333,7 @@ tcx_init_fix(struct fb_info *info, int linebytes)
else
tcx_name = "TCX24";
- strlcpy(info->fix.id, tcx_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, tcx_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 67e37a62b07c..8a8122f8bfeb 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -1264,7 +1264,7 @@ static int tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan, const char *name,
{
int rc;
- strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.class = I2C_CLASS_DDC;
chan->adapter.algo_data = &chan->algo;
@@ -1293,7 +1293,7 @@ static int tdfxfb_setup_i2c_bus(struct tdfxfb_i2c_chan *chan, const char *name,
{
int rc;
- strlcpy(chan->adapter.name, name, sizeof(chan->adapter.name));
+ strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = dev;
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index ae0cf5540636..1fff5fd7ab51 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -1344,7 +1344,7 @@ tgafb_init_fix(struct fb_info *info)
memory_size = 16777216;
}
- strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
+ strscpy(info->fix.id, tga_type_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 319131bd72cf..cda095420ee8 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -270,7 +270,7 @@ static int tridentfb_setup_ddc_bus(struct fb_info *info)
{
struct tridentfb_par *par = info->par;
- strlcpy(par->ddc_adapter.name, info->fix.id,
+ strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index a6c9d4f26669..1007023a5e88 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -90,11 +90,7 @@ struct fb_info_valkyrie {
u32 pseudo_palette[16];
};
-/*
- * Exported functions
- */
-int valkyriefb_init(void);
-int valkyriefb_setup(char*);
+static int valkyriefb_setup(char*);
static int valkyriefb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
@@ -302,7 +298,7 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
default_vmode, default_cmode);
}
-int __init valkyriefb_init(void)
+static int __init valkyriefb_init(void)
{
struct fb_info_valkyrie *p;
unsigned long frame_buffer_phys, cmap_regs_phys;
@@ -549,7 +545,7 @@ static int __init valkyrie_init_info(struct fb_info *info,
/*
* Parse user specified options (`video=valkyriefb:')
*/
-int __init valkyriefb_setup(char *options)
+static int __init valkyriefb_setup(char *options)
{
char *this_opt;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index a92a8c670cf0..4274c6efb249 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -507,6 +507,8 @@ static int vt8623fb_set_par(struct fb_info *info)
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
1, info->node);
+ if (screen_size > info->screen_size)
+ screen_size = info->screen_size;
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 56c77f63cd22..0a53a61231c2 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -35,11 +35,12 @@ if VIRTIO_MENU
config VIRTIO_HARDEN_NOTIFICATION
bool "Harden virtio notification"
+ depends on BROKEN
help
Enable this to harden the device notifications and suppress
those that happen at a time where notifications are illegal.
- Experimental: Note that several drivers still have bugs that
+ Experimental: Note that several drivers still have issues that
may cause crashes or hangs when correct handling of
notifications is enforced; depending on the subset of
drivers and devices you use, this may or may not work.
@@ -126,9 +127,11 @@ config VIRTIO_MEM
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
- This driver was only tested under x86-64 and arm64, but should
- theoretically work on all architectures that support memory hotplug
- and hotremove.
+ This driver currently only supports x86-64 and arm64. Although it
+ should compile on other architectures that implement memory
+ hot(un)plug, architecture-specific and/or common
+ code changes may be required for virtio-mem, kdump and kexec to work as
+ expected.
If unsure, say M.
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 14c142d77fba..828ced060742 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -428,7 +428,9 @@ int register_virtio_device(struct virtio_device *dev)
goto out;
dev->index = err;
- dev_set_name(&dev->dev, "virtio%u", dev->index);
+ err = dev_set_name(&dev->dev, "virtio%u", dev->index);
+ if (err)
+ goto out_ida_remove;
err = virtio_device_of_init(dev);
if (err)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bd360b91e9d3..3f78a3a1eb75 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -856,7 +856,7 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
vb->shrinker.count_objects = virtio_balloon_shrinker_count;
vb->shrinker.seeks = DEFAULT_SEEKS;
- return register_shrinker(&vb->shrinker);
+ return register_shrinker(&vb->shrinker, "virtio-balloon");
}
static int virtballoon_probe(struct virtio_device *vdev)
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index e07486f01999..0c2892ec6817 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -862,8 +862,7 @@ static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
unsigned long mb_id,
unsigned long start_pfn)
{
- const bool is_movable = page_zonenum(pfn_to_page(start_pfn)) ==
- ZONE_MOVABLE;
+ const bool is_movable = is_zone_movable_page(pfn_to_page(start_pfn));
int new_state;
switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
@@ -1158,8 +1157,7 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
*/
static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
{
- const bool is_movable = page_zonenum(pfn_to_page(pfn)) ==
- ZONE_MOVABLE;
+ const bool is_movable = is_zone_movable_page(pfn_to_page(pfn));
int rc, retry_count;
/*
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 083ff1eb743d..3ff746e3f24a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -403,6 +403,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
goto error_new_virtqueue;
}
+ vq->num_max = num;
+
/* Activate the queue */
writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
if (vm_dev->version == 1) {
@@ -487,6 +489,9 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
if (err)
return err;
+ if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source"))
+ enable_irq_wake(irq);
+
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index ca51fcc9daab..ad258a9d3b9f 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -214,9 +214,15 @@ static void vp_del_vq(struct virtqueue *vq)
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
- spin_lock_irqsave(&vp_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vp_dev->lock, flags);
+ /*
+ * If it fails during re-enable reset vq. This way we won't rejoin
+ * info->node to the queue. Prevent unexpected irqs.
+ */
+ if (!vq->reset) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ }
vp_dev->del_vq(info);
kfree(info);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index a5e5721145c7..2257f1b3d8ae 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -135,6 +135,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
+ vq->num_max = num;
+
q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (q_pfn >> 32) {
dev_err(&vp_dev->pci_dev->dev,
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 623906b4996c..c3b9f2761849 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
+
+ if (features & BIT_ULL(VIRTIO_F_RING_RESET))
+ __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
}
/* virtio config->finalize_features() implementation */
@@ -176,6 +179,110 @@ static void vp_reset(struct virtio_device *vdev)
vp_synchronize_vectors(vdev);
}
+static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ unsigned long index;
+
+ index = vq->index;
+
+ /* activate the queue */
+ vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
+ vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
+ virtqueue_get_avail_addr(vq),
+ virtqueue_get_used_addr(vq));
+
+ if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
+ msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
+ if (msix_vec == VIRTIO_MSI_NO_VECTOR)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_vq_info *info;
+ unsigned long flags;
+
+ if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+ return -ENOENT;
+
+ vp_modern_set_queue_reset(mdev, vq->index);
+
+ info = vp_dev->vqs[vq->index];
+
+ /* delete vq from irq handler */
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+ INIT_LIST_HEAD(&info->node);
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ __virtqueue_break(vq);
+#endif
+
+ /* For the case where vq has an exclusive irq, call synchronize_irq() to
+ * wait for completion.
+ *
+ * note: We can't use disable_irq() since it conflicts with the affinity
+ * managed IRQ that is used by some drivers.
+ */
+ if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+ synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+ vq->reset = true;
+
+ return 0;
+}
+
+static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ struct virtio_pci_vq_info *info;
+ unsigned long flags, index;
+ int err;
+
+ if (!vq->reset)
+ return -EBUSY;
+
+ index = vq->index;
+ info = vp_dev->vqs[index];
+
+ if (vp_modern_get_queue_reset(mdev, index))
+ return -EBUSY;
+
+ if (vp_modern_get_queue_enable(mdev, index))
+ return -EBUSY;
+
+ err = vp_active_vq(vq, info->msix_vector);
+ if (err)
+ return err;
+
+ if (vq->callback) {
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_add(&info->node, &vp_dev->virtqueues);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+ } else {
+ INIT_LIST_HEAD(&info->node);
+ }
+
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ __virtqueue_unbreak(vq);
+#endif
+
+ vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+ vq->reset = false;
+
+ return 0;
+}
+
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
return vp_modern_config_vector(&vp_dev->mdev, vector);
@@ -218,32 +325,21 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
- /* activate the queue */
- vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
- vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
- virtqueue_get_avail_addr(vq),
- virtqueue_get_used_addr(vq));
+ vq->num_max = num;
+
+ err = vp_active_vq(vq, msix_vec);
+ if (err)
+ goto err;
vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
if (!vq->priv) {
err = -ENOMEM;
- goto err_map_notify;
- }
-
- if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
- if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
- err = -EBUSY;
- goto err_assign_vector;
- }
+ goto err;
}
return vq;
-err_assign_vector:
- if (!mdev->notify_base)
- pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv);
-err_map_notify:
+err:
vring_del_virtqueue(vq);
return ERR_PTR(err);
}
@@ -401,6 +497,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
+ .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
+ .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -419,6 +517,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
+ .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
+ .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};
/* the PCI probing function */
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index fa2a9445bb18..869cb46bef96 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -3,6 +3,7 @@
#include <linux/virtio_pci_modern.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/delay.h>
/*
* vp_modern_map_capability - map a part of virtio pci capability
@@ -475,6 +476,44 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
EXPORT_SYMBOL_GPL(vp_modern_set_status);
/*
+ * vp_modern_get_queue_reset - get the queue reset status
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ */
+int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+
+ vp_iowrite16(index, &cfg->cfg.queue_select);
+ return vp_ioread16(&cfg->queue_reset);
+}
+EXPORT_SYMBOL_GPL(vp_modern_get_queue_reset);
+
+/*
+ * vp_modern_set_queue_reset - reset the queue
+ * @mdev: the modern virtio-pci device
+ * @index: queue index
+ */
+void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+
+ vp_iowrite16(index, &cfg->cfg.queue_select);
+ vp_iowrite16(1, &cfg->queue_reset);
+
+ while (vp_ioread16(&cfg->queue_reset))
+ msleep(1);
+
+ while (vp_ioread16(&cfg->cfg.queue_enable))
+ msleep(1);
+}
+EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
+
+/*
* vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
* @mdev: the modern virtio-pci device
* @index: queue index
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 643ca779fcc6..4620e9d79dde 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -85,6 +85,71 @@ struct vring_desc_extra {
u16 next; /* The next desc state in a list. */
};
+struct vring_virtqueue_split {
+ /* Actual memory layout for this queue. */
+ struct vring vring;
+
+ /* Last written value to avail->flags */
+ u16 avail_flags_shadow;
+
+ /*
+ * Last written value to avail->idx in
+ * guest byte order.
+ */
+ u16 avail_idx_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_split *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t queue_dma_addr;
+ size_t queue_size_in_bytes;
+
+ /*
+ * The parameters for creating vrings are reserved for creating new
+ * vring.
+ */
+ u32 vring_align;
+ bool may_reduce_num;
+};
+
+struct vring_virtqueue_packed {
+ /* Actual memory layout for this queue. */
+ struct {
+ unsigned int num;
+ struct vring_packed_desc *desc;
+ struct vring_packed_desc_event *driver;
+ struct vring_packed_desc_event *device;
+ } vring;
+
+ /* Driver ring wrap counter. */
+ bool avail_wrap_counter;
+
+ /* Avail used flags. */
+ u16 avail_used_flags;
+
+ /* Index of the next avail descriptor. */
+ u16 next_avail_idx;
+
+ /*
+ * Last written value to driver->flags in
+ * guest byte order.
+ */
+ u16 event_flags_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_packed *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t ring_dma_addr;
+ dma_addr_t driver_event_dma_addr;
+ dma_addr_t device_event_dma_addr;
+ size_t ring_size_in_bytes;
+ size_t event_size_in_bytes;
+};
+
struct vring_virtqueue {
struct virtqueue vq;
@@ -124,64 +189,10 @@ struct vring_virtqueue {
union {
/* Available for split ring */
- struct {
- /* Actual memory layout for this queue. */
- struct vring vring;
-
- /* Last written value to avail->flags */
- u16 avail_flags_shadow;
-
- /*
- * Last written value to avail->idx in
- * guest byte order.
- */
- u16 avail_idx_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_split *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t queue_dma_addr;
- size_t queue_size_in_bytes;
- } split;
+ struct vring_virtqueue_split split;
/* Available for packed ring */
- struct {
- /* Actual memory layout for this queue. */
- struct {
- unsigned int num;
- struct vring_packed_desc *desc;
- struct vring_packed_desc_event *driver;
- struct vring_packed_desc_event *device;
- } vring;
-
- /* Driver ring wrap counter. */
- bool avail_wrap_counter;
-
- /* Avail used flags. */
- u16 avail_used_flags;
-
- /* Index of the next avail descriptor. */
- u16 next_avail_idx;
-
- /*
- * Last written value to driver->flags in
- * guest byte order.
- */
- u16 event_flags_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_packed *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t ring_dma_addr;
- dma_addr_t driver_event_dma_addr;
- dma_addr_t device_event_dma_addr;
- size_t ring_size_in_bytes;
- size_t event_size_in_bytes;
- } packed;
+ struct vring_virtqueue_packed packed;
};
/* How to notify other side. FIXME: commonalize hcalls! */
@@ -200,6 +211,16 @@ struct vring_virtqueue {
#endif
};
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name);
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
+static void vring_free(struct virtqueue *_vq);
/*
* Helpers.
@@ -364,6 +385,24 @@ static int vring_mapping_error(const struct vring_virtqueue *vq,
return dma_mapping_error(vring_dma_dev(vq), addr);
}
+static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
+{
+ vq->vq.num_free = num;
+
+ if (vq->packed_ring)
+ vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+ else
+ vq->last_used_idx = 0;
+
+ vq->event_triggered = false;
+ vq->num_added = 0;
+
+#ifdef DEBUG
+ vq->in_use = false;
+ vq->last_add_time_valid = false;
+#endif
+}
+
/*
* Split ring specific functions - *_split().
@@ -907,28 +946,107 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
return NULL;
}
-static struct virtqueue *vring_create_virtqueue_split(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
+ struct vring_virtqueue *vq)
+{
+ struct virtio_device *vdev;
+
+ vdev = vq->vq.vdev;
+
+ vring_split->avail_flags_shadow = 0;
+ vring_split->avail_idx_shadow = 0;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!vq->vq.callback) {
+ vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
+ vring_split->avail_flags_shadow);
+ }
+}
+
+static void virtqueue_reinit_split(struct vring_virtqueue *vq)
+{
+ int num;
+
+ num = vq->split.vring.num;
+
+ vq->split.vring.avail->flags = 0;
+ vq->split.vring.avail->idx = 0;
+
+ /* reset avail event */
+ vq->split.vring.avail->ring[num] = 0;
+
+ vq->split.vring.used->flags = 0;
+ vq->split.vring.used->idx = 0;
+
+ /* reset used event */
+ *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
+
+ virtqueue_init(vq, num);
+
+ virtqueue_vring_init_split(&vq->split, vq);
+}
+
+static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
+ struct vring_virtqueue_split *vring_split)
+{
+ vq->split = *vring_split;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
+{
+ struct vring_desc_state_split *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_split->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
+ if (!state)
+ goto err_state;
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_extra;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_split));
+
+ vring_split->desc_state = state;
+ vring_split->desc_extra = extra;
+ return 0;
+
+err_extra:
+ kfree(state);
+err_state:
+ return -ENOMEM;
+}
+
+static void vring_free_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev)
+{
+ vring_free_queue(vdev, vring_split->queue_size_in_bytes,
+ vring_split->vring.desc,
+ vring_split->queue_dma_addr);
+
+ kfree(vring_split->desc_state);
+ kfree(vring_split->desc_extra);
+}
+
+static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ u32 num,
+ unsigned int vring_align,
+ bool may_reduce_num)
{
- struct virtqueue *vq;
void *queue = NULL;
dma_addr_t dma_addr;
- size_t queue_size_in_bytes;
- struct vring vring;
/* We assume num is a power of 2. */
if (num & (num - 1)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
- return NULL;
+ return -EINVAL;
}
/* TODO: allocate each queue chunk individually */
@@ -939,11 +1057,11 @@ static struct virtqueue *vring_create_virtqueue_split(
if (queue)
break;
if (!may_reduce_num)
- return NULL;
+ return -ENOMEM;
}
if (!num)
- return NULL;
+ return -ENOMEM;
if (!queue) {
/* Try to get a single page. You are my only hope! */
@@ -951,26 +1069,85 @@ static struct virtqueue *vring_create_virtqueue_split(
&dma_addr, GFP_KERNEL|__GFP_ZERO);
}
if (!queue)
- return NULL;
+ return -ENOMEM;
+
+ vring_init(&vring_split->vring, num, queue, vring_align);
+
+ vring_split->queue_dma_addr = dma_addr;
+ vring_split->queue_size_in_bytes = vring_size(num, vring_align);
+
+ vring_split->vring_align = vring_align;
+ vring_split->may_reduce_num = may_reduce_num;
+
+ return 0;
+}
+
+static struct virtqueue *vring_create_virtqueue_split(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct virtqueue *vq;
+ int err;
- queue_size_in_bytes = vring_size(num, vring_align);
- vring_init(&vring, num, queue, vring_align);
+ err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
+ may_reduce_num);
+ if (err)
+ return NULL;
- vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
if (!vq) {
- vring_free_queue(vdev, queue_size_in_bytes, queue,
- dma_addr);
+ vring_free_split(&vring_split, vdev);
return NULL;
}
- to_vvq(vq)->split.queue_dma_addr = dma_addr;
- to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
to_vvq(vq)->we_own_ring = true;
return vq;
}
+static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ err = vring_alloc_queue_split(&vring_split, vdev, num,
+ vq->split.vring_align,
+ vq->split.may_reduce_num);
+ if (err)
+ goto err;
+
+ err = vring_alloc_state_extra_split(&vring_split);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_split(&vring_split, vq);
+
+ virtqueue_init(vq, vring_split.vring.num);
+ virtqueue_vring_attach_split(vq, &vring_split);
+
+ return 0;
+
+err_state_extra:
+ vring_free_split(&vring_split, vdev);
+err:
+ virtqueue_reinit_split(vq);
+ return -ENOMEM;
+}
+
/*
* Packed ring specific functions - *_packed().
@@ -1637,8 +1814,7 @@ static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
return NULL;
}
-static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
- unsigned int num)
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
{
struct vring_desc_extra *desc_extra;
unsigned int i;
@@ -1656,19 +1832,32 @@ static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *v
return desc_extra;
}
-static struct virtqueue *vring_create_virtqueue_packed(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev)
+{
+ if (vring_packed->vring.desc)
+ vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
+ vring_packed->vring.desc,
+ vring_packed->ring_dma_addr);
+
+ if (vring_packed->vring.driver)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.driver,
+ vring_packed->driver_event_dma_addr);
+
+ if (vring_packed->vring.device)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.device,
+ vring_packed->device_event_dma_addr);
+
+ kfree(vring_packed->desc_state);
+ kfree(vring_packed->desc_extra);
+}
+
+static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev,
+ u32 num)
{
- struct vring_virtqueue *vq;
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
@@ -1680,7 +1869,11 @@ static struct virtqueue *vring_create_virtqueue_packed(
&ring_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!ring)
- goto err_ring;
+ goto err;
+
+ vring_packed->vring.desc = ring;
+ vring_packed->ring_dma_addr = ring_dma_addr;
+ vring_packed->ring_size_in_bytes = ring_size_in_bytes;
event_size_in_bytes = sizeof(struct vring_packed_desc_event);
@@ -1688,13 +1881,112 @@ static struct virtqueue *vring_create_virtqueue_packed(
&driver_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!driver)
- goto err_driver;
+ goto err;
+
+ vring_packed->vring.driver = driver;
+ vring_packed->event_size_in_bytes = event_size_in_bytes;
+ vring_packed->driver_event_dma_addr = driver_event_dma_addr;
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!device)
- goto err_device;
+ goto err;
+
+ vring_packed->vring.device = device;
+ vring_packed->device_event_dma_addr = device_event_dma_addr;
+
+ vring_packed->vring.num = num;
+
+ return 0;
+
+err:
+ vring_free_packed(vring_packed, vdev);
+ return -ENOMEM;
+}
+
+static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
+{
+ struct vring_desc_state_packed *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_packed->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
+ if (!state)
+ goto err_desc_state;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_packed));
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_desc_extra;
+
+ vring_packed->desc_state = state;
+ vring_packed->desc_extra = extra;
+
+ return 0;
+
+err_desc_extra:
+ kfree(state);
+err_desc_state:
+ return -ENOMEM;
+}
+
+static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
+ bool callback)
+{
+ vring_packed->next_avail_idx = 0;
+ vring_packed->avail_wrap_counter = 1;
+ vring_packed->event_flags_shadow = 0;
+ vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!callback) {
+ vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+ vring_packed->vring.driver->flags =
+ cpu_to_le16(vring_packed->event_flags_shadow);
+ }
+}
+
+static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
+ struct vring_virtqueue_packed *vring_packed)
+{
+ vq->packed = *vring_packed;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
+{
+ memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
+ memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
+
+ /* we need to reset the desc.flags. For more, see is_used_desc_packed() */
+ memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
+
+ virtqueue_init(vq, vq->packed.vring.num);
+ virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
+}
+
+static struct virtqueue *vring_create_virtqueue_packed(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
@@ -1703,8 +1995,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = true;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -1713,15 +2005,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
- vq->event_triggered = false;
- vq->num_added = 0;
vq->packed_ring = true;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -1730,65 +2015,58 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->packed.ring_dma_addr = ring_dma_addr;
- vq->packed.driver_event_dma_addr = driver_event_dma_addr;
- vq->packed.device_event_dma_addr = device_event_dma_addr;
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
- vq->packed.ring_size_in_bytes = ring_size_in_bytes;
- vq->packed.event_size_in_bytes = event_size_in_bytes;
+ virtqueue_vring_init_packed(&vring_packed, !!callback);
- vq->packed.vring.num = num;
- vq->packed.vring.desc = ring;
- vq->packed.vring.driver = driver;
- vq->packed.vring.device = device;
-
- vq->packed.next_avail_idx = 0;
- vq->packed.avail_wrap_counter = 1;
- vq->packed.event_flags_shadow = 0;
- vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
-
- vq->packed.desc_state = kmalloc_array(num,
- sizeof(struct vring_desc_state_packed),
- GFP_KERNEL);
- if (!vq->packed.desc_state)
- goto err_desc_state;
-
- memset(vq->packed.desc_state, 0,
- num * sizeof(struct vring_desc_state_packed));
-
- /* Put everything in free lists. */
- vq->free_head = 0;
-
- vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
- if (!vq->packed.desc_extra)
- goto err_desc_extra;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
- vq->packed.vring.driver->flags =
- cpu_to_le16(vq->packed.event_flags_shadow);
- }
+ virtqueue_init(vq, num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-err_desc_extra:
- kfree(vq->packed.desc_state);
-err_desc_state:
+err_state_extra:
kfree(vq);
err_vq:
- vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
-err_device:
- vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
-err_driver:
- vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
+ vring_free_packed(&vring_packed, vdev);
err_ring:
return NULL;
}
+static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
+
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
+
+ virtqueue_init(vq, vring_packed.vring.num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
+
+ return 0;
+
+err_state_extra:
+ vring_free_packed(&vring_packed, vdev);
+err_ring:
+ virtqueue_reinit_packed(vq);
+ return -ENOMEM;
+}
+
/*
* Generic functions and exported symbols.
@@ -2131,8 +2409,8 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
* @_vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
- * This is not valid on an active queue; it is useful only for device
- * shutdown.
+ * This is not valid on an active queue; it is useful for device
+ * shutdown or the reset queue.
*/
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
@@ -2148,6 +2426,14 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
}
+/**
+ * vring_interrupt - notify a virtqueue on an interrupt
+ * @irq: the IRQ number (ignored)
+ * @_vq: the struct virtqueue to notify
+ *
+ * Calls the callback function of @_vq to process the virtqueue
+ * notification.
+ */
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -2180,16 +2466,17 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
EXPORT_SYMBOL_GPL(vring_interrupt);
/* Only available for split ring */
-struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring vring,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
{
struct vring_virtqueue *vq;
+ int err;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
@@ -2202,8 +2489,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = vring.num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -2212,14 +2499,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0;
- vq->event_triggered = false;
- vq->num_added = 0;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2228,47 +2508,22 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->split.queue_dma_addr = 0;
- vq->split.queue_size_in_bytes = 0;
-
- vq->split.vring = vring;
- vq->split.avail_flags_shadow = 0;
- vq->split.avail_idx_shadow = 0;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- if (!vq->event)
- vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
- vq->split.avail_flags_shadow);
+ err = vring_alloc_state_extra_split(vring_split);
+ if (err) {
+ kfree(vq);
+ return NULL;
}
- vq->split.desc_state = kmalloc_array(vring.num,
- sizeof(struct vring_desc_state_split), GFP_KERNEL);
- if (!vq->split.desc_state)
- goto err_state;
-
- vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
- if (!vq->split.desc_extra)
- goto err_extra;
+ virtqueue_vring_init_split(vring_split, vq);
- /* Put everything in free lists. */
- vq->free_head = 0;
- memset(vq->split.desc_state, 0, vring.num *
- sizeof(struct vring_desc_state_split));
+ virtqueue_init(vq, vring_split->vring.num);
+ virtqueue_vring_attach_split(vq, vring_split);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-
-err_extra:
- kfree(vq->split.desc_state);
-err_state:
- kfree(vq);
- return NULL;
}
-EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
struct virtqueue *vring_create_virtqueue(
unsigned int index,
@@ -2294,6 +2549,75 @@ struct virtqueue *vring_create_virtqueue(
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+/**
+ * virtqueue_resize - resize the vring of vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @num: new ring num
+ * @recycle: callback for recycle the useless buffer
+ *
+ * When it is really necessary to create a new vring, it will set the current vq
+ * into the reset state. Then call the passed callback to recycle the buffer
+ * that is no longer used. Only after the new vring is successfully created, the
+ * old vring will be released.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
+ * vq can still work normally
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -E2BIG/-EINVAL: num error
+ * -EPERM: Operation not permitted
+ *
+ */
+int virtqueue_resize(struct virtqueue *_vq, u32 num,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+ void *buf;
+ int err;
+
+ if (!vq->we_own_ring)
+ return -EPERM;
+
+ if (num > vq->vq.num_max)
+ return -E2BIG;
+
+ if (!num)
+ return -EINVAL;
+
+ if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
+ return 0;
+
+ if (!vdev->config->disable_vq_and_reset)
+ return -ENOENT;
+
+ if (!vdev->config->enable_vq_after_reset)
+ return -ENOENT;
+
+ err = vdev->config->disable_vq_and_reset(_vq);
+ if (err)
+ return err;
+
+ while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+ recycle(_vq, buf);
+
+ if (vq->packed_ring)
+ err = virtqueue_resize_packed(_vq, num);
+ else
+ err = virtqueue_resize_split(_vq, num);
+
+ if (vdev->config->enable_vq_after_reset(_vq))
+ return -EBUSY;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtqueue_resize);
+
/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
@@ -2306,25 +2630,21 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name)
{
- struct vring vring;
+ struct vring_virtqueue_split vring_split = {};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
- vring_init(&vring, num, pages, vring_align);
- return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vring_init(&vring_split.vring, num, pages, vring_align);
+ return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
-void vring_del_virtqueue(struct virtqueue *_vq)
+static void vring_free(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- spin_lock(&vq->vq.vdev->vqs_list_lock);
- list_del(&_vq->list);
- spin_unlock(&vq->vq.vdev->vqs_list_lock);
-
if (vq->we_own_ring) {
if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev,
@@ -2355,6 +2675,18 @@ void vring_del_virtqueue(struct virtqueue *_vq)
kfree(vq->split.desc_state);
kfree(vq->split.desc_extra);
}
+}
+
+void vring_del_virtqueue(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ spin_lock(&vq->vq.vdev->vqs_list_lock);
+ list_del(&_vq->list);
+ spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
+ vring_free(_vq);
+
kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2402,6 +2734,30 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_break(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, true);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_break);
+
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_unbreak(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, false);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
+
bool virtqueue_is_broken(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index c40f7deb6b5a..9670cc79371d 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -183,6 +183,8 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto error_new_virtqueue;
}
+ vq->num_max = max_num;
+
/* Setup virtqueue callback */
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
cb.private = info;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 32fd37698932..9295492d24f7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1647,6 +1647,7 @@ config SIEMENS_SIMATIC_IPC_WDT
tristate "Siemens Simatic IPC Watchdog"
depends on SIEMENS_SIMATIC_IPC
select WATCHDOG_CORE
+ select P2SB
help
This driver adds support for several watchdogs found in Industrial
PCs from Siemens.
@@ -1962,6 +1963,14 @@ config MEN_A21_WDT
# PPC64 Architecture
+config PSERIES_WDT
+ tristate "POWER Architecture Platform Watchdog Timer"
+ depends on PPC_PSERIES
+ select WATCHDOG_CORE
+ help
+ Driver for virtual watchdog timers provided by PAPR
+ hypervisors (e.g. PowerVM, KVM).
+
config WATCHDOG_RTAS
tristate "RTAS watchdog"
depends on PPC_RTAS
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c324e9d820e9..cdeb119e6e61 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -187,6 +187,7 @@ obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
obj-$(CONFIG_MEN_A21_WDT) += mena21_wdt.o
# PPC64 Architecture
+obj-$(CONFIG_PSERIES_WDT) += pseries-wdt.o
obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
# S390 Architecture
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index 1635f421ef2c..854b1cc723cb 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -274,6 +274,8 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
dev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!dev->reg)
+ return -ENOMEM;
/* init clock */
dev->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index 1ffcf6aca6ae..9388838899ac 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -192,7 +192,6 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int bcm7038_wdt_suspend(struct device *dev)
{
struct bcm7038_watchdog *wdt = dev_get_drvdata(dev);
@@ -212,10 +211,9 @@ static int bcm7038_wdt_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops, bcm7038_wdt_suspend,
- bcm7038_wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops,
+ bcm7038_wdt_suspend, bcm7038_wdt_resume);
static const struct of_device_id bcm7038_wdt_match[] = {
{ .compatible = "brcm,bcm6345-wdt" },
@@ -236,7 +234,7 @@ static struct platform_driver bcm7038_wdt_driver = {
.driver = {
.name = "bcm7038-wdt",
.of_match_table = bcm7038_wdt_match,
- .pm = &bcm7038_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&bcm7038_wdt_pm_ops),
}
};
module_platform_driver(bcm7038_wdt_driver);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 5e4dc1a0f2c6..75da5cd02615 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -74,7 +74,7 @@ static unsigned long long period_to_sec(unsigned int period)
/*
* This procedure will find the highest period which will give a timeout
* greater than the one required. e.g. for a bus speed of 66666666 and
- * and a parameter of 2 secs, then this procedure will return a value of 38.
+ * a parameter of 2 secs, then this procedure will return a value of 38.
*/
static unsigned int sec_to_period(unsigned int secs)
{
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index cd578843277e..52962e8d11a6 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -218,7 +218,7 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
/*
* Set the new value in the watchdog. Some versions of dw_wdt
- * have have TOPINIT in the TIMEOUT_RANGE register (as per
+ * have TOPINIT in the TIMEOUT_RANGE register (as per
* CP_WDT_DUAL_TOP in WDT_COMP_PARAMS_1). On those we
* effectively get a pat of the watchdog right here.
*/
@@ -375,7 +375,6 @@ static irqreturn_t dw_wdt_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
static int dw_wdt_suspend(struct device *dev)
{
struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
@@ -410,9 +409,8 @@ static int dw_wdt_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
/*
* In case if DW WDT IP core is synthesized with fixed TOP feature disabled the
@@ -710,7 +708,7 @@ static struct platform_driver dw_wdt_driver = {
.driver = {
.name = "dw_wdt",
.of_match_table = of_match_ptr(dw_wdt_of_match),
- .pm = &dw_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&dw_wdt_pm_ops),
},
};
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 7f59c680de25..6a16d3d0bb1e 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -634,7 +634,9 @@ static int __init fintek_wdt_init(void)
pdata.type = ret;
- platform_driver_register(&fintek_wdt_driver);
+ ret = platform_driver_register(&fintek_wdt_driver);
+ if (ret)
+ return ret;
wdt_res.name = "superio port";
wdt_res.flags = IORESOURCE_IO;
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index b76ad6ba0915..33835c0b06de 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -6,7 +6,7 @@
* Copyright (C) 2022 Luca Ceresoli
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#include <linux/err.h>
@@ -260,5 +260,5 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index f0d4e3cc7459..e97787536792 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -401,7 +401,6 @@ static int mtk_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int mtk_wdt_suspend(struct device *dev)
{
struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev);
@@ -423,7 +422,6 @@ static int mtk_wdt_resume(struct device *dev)
return 0;
}
-#endif
static const struct of_device_id mtk_wdt_dt_ids[] = {
{ .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data },
@@ -437,16 +435,14 @@ static const struct of_device_id mtk_wdt_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
-static const struct dev_pm_ops mtk_wdt_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mtk_wdt_suspend,
- mtk_wdt_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(mtk_wdt_pm_ops,
+ mtk_wdt_suspend, mtk_wdt_resume);
static struct platform_driver mtk_wdt_driver = {
.probe = mtk_wdt_probe,
.driver = {
.name = DRV_NAME,
- .pm = &mtk_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_wdt_pm_ops),
.of_match_table = mtk_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 9f9a340427fc..c7f745caf203 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -442,7 +442,7 @@ static long pc87413_ioctl(struct file *file, unsigned int cmd,
}
}
-/* -- Notifier funtions -----------------------------------------*/
+/* -- Notifier functions -----------------------------------------*/
/**
* pc87413_notify_sys:
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index 0937b8d33104..f4bfbffaf49c 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -9,6 +9,12 @@
#include <linux/regmap.h>
#include <linux/watchdog.h>
+#define PON_POFF_REASON1 0x0c
+#define PON_POFF_REASON1_PMIC_WD BIT(2)
+#define PON_POFF_REASON2 0x0d
+#define PON_POFF_REASON2_UVLO BIT(5)
+#define PON_POFF_REASON2_OTST3 BIT(6)
+
#define PON_INT_RT_STS 0x10
#define PMIC_WD_BARK_STS_BIT BIT(6)
@@ -58,9 +64,8 @@ static int pm8916_wdt_ping(struct watchdog_device *wdev)
{
struct pm8916_wdt *wdt = watchdog_get_drvdata(wdev);
- return regmap_update_bits(wdt->regmap,
- wdt->baseaddr + PON_PMIC_WD_RESET_PET,
- WATCHDOG_PET_BIT, WATCHDOG_PET_BIT);
+ return regmap_write(wdt->regmap, wdt->baseaddr + PON_PMIC_WD_RESET_PET,
+ WATCHDOG_PET_BIT);
}
static int pm8916_wdt_configure_timers(struct watchdog_device *wdev)
@@ -111,12 +116,14 @@ static irqreturn_t pm8916_wdt_isr(int irq, void *arg)
}
static const struct watchdog_info pm8916_wdt_ident = {
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE |
+ WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_POWERUNDER,
.identity = "QCOM PM8916 PON WDT",
};
static const struct watchdog_info pm8916_wdt_pt_ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE |
+ WDIOF_OVERHEAT | WDIOF_CARDRESET | WDIOF_POWERUNDER |
WDIOF_PRETIMEOUT,
.identity = "QCOM PM8916 PON WDT",
};
@@ -135,7 +142,9 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pm8916_wdt *wdt;
struct device *parent;
+ unsigned int val;
int err, irq;
+ u8 poff[2];
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
@@ -176,6 +185,30 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
wdt->wdev.info = &pm8916_wdt_ident;
}
+ err = regmap_bulk_read(wdt->regmap, wdt->baseaddr + PON_POFF_REASON1,
+ &poff, ARRAY_SIZE(poff));
+ if (err) {
+ dev_err(dev, "failed to read POFF reason: %d\n", err);
+ return err;
+ }
+
+ dev_dbg(dev, "POFF reason: %#x %#x\n", poff[0], poff[1]);
+ if (poff[0] & PON_POFF_REASON1_PMIC_WD)
+ wdt->wdev.bootstatus |= WDIOF_CARDRESET;
+ if (poff[1] & PON_POFF_REASON2_UVLO)
+ wdt->wdev.bootstatus |= WDIOF_POWERUNDER;
+ if (poff[1] & PON_POFF_REASON2_OTST3)
+ wdt->wdev.bootstatus |= WDIOF_OVERHEAT;
+
+ err = regmap_read(wdt->regmap, wdt->baseaddr + PON_PMIC_WD_RESET_S2_CTL2,
+ &val);
+ if (err) {
+ dev_err(dev, "failed to check if watchdog is active: %d\n", err);
+ return err;
+ }
+ if (val & S2_RESET_EN_BIT)
+ set_bit(WDOG_HW_RUNNING, &wdt->wdev.status);
+
/* Configure watchdog to hard-reset mode */
err = regmap_write(wdt->regmap,
wdt->baseaddr + PON_PMIC_WD_RESET_S2_CTL,
diff --git a/drivers/watchdog/pseries-wdt.c b/drivers/watchdog/pseries-wdt.c
new file mode 100644
index 000000000000..7f53b5293409
--- /dev/null
+++ b/drivers/watchdog/pseries-wdt.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022 International Business Machines, Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/time64.h>
+#include <linux/watchdog.h>
+
+#define DRV_NAME "pseries-wdt"
+
+/*
+ * H_WATCHDOG Input
+ *
+ * R4: "flags":
+ *
+ * Bits 48-55: "operation"
+ */
+#define PSERIES_WDTF_OP_START 0x100UL /* start timer */
+#define PSERIES_WDTF_OP_STOP 0x200UL /* stop timer */
+#define PSERIES_WDTF_OP_QUERY 0x300UL /* query timer capabilities */
+
+/*
+ * Bits 56-63: "timeoutAction" (for "Start Watchdog" only)
+ */
+#define PSERIES_WDTF_ACTION_HARD_POWEROFF 0x1UL /* poweroff */
+#define PSERIES_WDTF_ACTION_HARD_RESTART 0x2UL /* restart */
+#define PSERIES_WDTF_ACTION_DUMP_RESTART 0x3UL /* dump + restart */
+
+/*
+ * H_WATCHDOG Output
+ *
+ * R3: Return code
+ *
+ * H_SUCCESS The operation completed.
+ *
+ * H_BUSY The hypervisor is too busy; retry the operation.
+ *
+ * H_PARAMETER The given "flags" are somehow invalid. Either the
+ * "operation" or "timeoutAction" is invalid, or a
+ * reserved bit is set.
+ *
+ * H_P2 The given "watchdogNumber" is zero or exceeds the
+ * supported maximum value.
+ *
+ * H_P3 The given "timeoutInMs" is below the supported
+ * minimum value.
+ *
+ * H_NOOP The given "watchdogNumber" is already stopped.
+ *
+ * H_HARDWARE The operation failed for ineffable reasons.
+ *
+ * H_FUNCTION The H_WATCHDOG hypercall is not supported by this
+ * hypervisor.
+ *
+ * R4:
+ *
+ * - For the "Query Watchdog Capabilities" operation, a 64-bit
+ * structure:
+ */
+#define PSERIES_WDTQ_MIN_TIMEOUT(cap) (((cap) >> 48) & 0xffff)
+#define PSERIES_WDTQ_MAX_NUMBER(cap) (((cap) >> 32) & 0xffff)
+
+static const unsigned long pseries_wdt_action[] = {
+ [0] = PSERIES_WDTF_ACTION_HARD_POWEROFF,
+ [1] = PSERIES_WDTF_ACTION_HARD_RESTART,
+ [2] = PSERIES_WDTF_ACTION_DUMP_RESTART,
+};
+
+#define WATCHDOG_ACTION 1
+static unsigned int action = WATCHDOG_ACTION;
+module_param(action, uint, 0444);
+MODULE_PARM_DESC(action, "Action taken when watchdog expires (default="
+ __MODULE_STRING(WATCHDOG_ACTION) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0444);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define WATCHDOG_TIMEOUT 60
+static unsigned int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, uint, 0444);
+MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds (default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+struct pseries_wdt {
+ struct watchdog_device wd;
+ unsigned long action;
+ unsigned long num; /* Watchdog numbers are 1-based */
+};
+
+static int pseries_wdt_start(struct watchdog_device *wdd)
+{
+ struct pseries_wdt *pw = watchdog_get_drvdata(wdd);
+ struct device *dev = wdd->parent;
+ unsigned long flags, msecs;
+ long rc;
+
+ flags = pw->action | PSERIES_WDTF_OP_START;
+ msecs = wdd->timeout * MSEC_PER_SEC;
+ rc = plpar_hcall_norets(H_WATCHDOG, flags, pw->num, msecs);
+ if (rc != H_SUCCESS) {
+ dev_crit(dev, "H_WATCHDOG: %ld: failed to start timer %lu",
+ rc, pw->num);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pseries_wdt_stop(struct watchdog_device *wdd)
+{
+ struct pseries_wdt *pw = watchdog_get_drvdata(wdd);
+ struct device *dev = wdd->parent;
+ long rc;
+
+ rc = plpar_hcall_norets(H_WATCHDOG, PSERIES_WDTF_OP_STOP, pw->num);
+ if (rc != H_SUCCESS && rc != H_NOOP) {
+ dev_crit(dev, "H_WATCHDOG: %ld: failed to stop timer %lu",
+ rc, pw->num);
+ return -EIO;
+ }
+ return 0;
+}
+
+static struct watchdog_info pseries_wdt_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT
+ | WDIOF_PRETIMEOUT,
+};
+
+static const struct watchdog_ops pseries_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = pseries_wdt_start,
+ .stop = pseries_wdt_stop,
+};
+
+static int pseries_wdt_probe(struct platform_device *pdev)
+{
+ unsigned long ret[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct pseries_wdt *pw;
+ unsigned long cap;
+ long msecs, rc;
+ int err;
+
+ rc = plpar_hcall(H_WATCHDOG, ret, PSERIES_WDTF_OP_QUERY);
+ if (rc == H_FUNCTION)
+ return -ENODEV;
+ if (rc != H_SUCCESS)
+ return -EIO;
+ cap = ret[0];
+
+ pw = devm_kzalloc(&pdev->dev, sizeof(*pw), GFP_KERNEL);
+ if (!pw)
+ return -ENOMEM;
+
+ /*
+ * Assume watchdogNumber 1 for now. If we ever support
+ * multiple timers we will need to devise a way to choose a
+ * distinct watchdogNumber for each platform device at device
+ * registration time.
+ */
+ pw->num = 1;
+ if (PSERIES_WDTQ_MAX_NUMBER(cap) < pw->num)
+ return -ENODEV;
+
+ if (action >= ARRAY_SIZE(pseries_wdt_action))
+ return -EINVAL;
+ pw->action = pseries_wdt_action[action];
+
+ pw->wd.parent = &pdev->dev;
+ pw->wd.info = &pseries_wdt_info;
+ pw->wd.ops = &pseries_wdt_ops;
+ msecs = PSERIES_WDTQ_MIN_TIMEOUT(cap);
+ pw->wd.min_timeout = DIV_ROUND_UP(msecs, MSEC_PER_SEC);
+ pw->wd.max_timeout = UINT_MAX / 1000; /* from linux/watchdog.h */
+ pw->wd.timeout = timeout;
+ if (watchdog_init_timeout(&pw->wd, 0, NULL))
+ return -EINVAL;
+ watchdog_set_nowayout(&pw->wd, nowayout);
+ watchdog_stop_on_reboot(&pw->wd);
+ watchdog_stop_on_unregister(&pw->wd);
+ watchdog_set_drvdata(&pw->wd, pw);
+
+ err = devm_watchdog_register_device(&pdev->dev, &pw->wd);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, &pw->wd);
+
+ return 0;
+}
+
+static int pseries_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct watchdog_device *wd = platform_get_drvdata(pdev);
+
+ if (watchdog_active(wd))
+ return pseries_wdt_stop(wd);
+ return 0;
+}
+
+static int pseries_wdt_resume(struct platform_device *pdev)
+{
+ struct watchdog_device *wd = platform_get_drvdata(pdev);
+
+ if (watchdog_active(wd))
+ return pseries_wdt_start(wd);
+ return 0;
+}
+
+static const struct platform_device_id pseries_wdt_id[] = {
+ { .name = "pseries-wdt" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, pseries_wdt_id);
+
+static struct platform_driver pseries_wdt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .id_table = pseries_wdt_id,
+ .probe = pseries_wdt_probe,
+ .resume = pseries_wdt_resume,
+ .suspend = pseries_wdt_suspend,
+};
+module_platform_driver(pseries_wdt_driver);
+
+MODULE_AUTHOR("Alexey Kardashevskiy");
+MODULE_AUTHOR("Scott Cheloha");
+MODULE_DESCRIPTION("POWER Architecture Platform Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/realtek_otto_wdt.c b/drivers/watchdog/realtek_otto_wdt.c
index 60058a0c3ec4..2a5298c5e8e4 100644
--- a/drivers/watchdog/realtek_otto_wdt.c
+++ b/drivers/watchdog/realtek_otto_wdt.c
@@ -366,6 +366,7 @@ static const struct of_device_id otto_wdt_ids[] = {
{ .compatible = "realtek,rtl8380-wdt" },
{ .compatible = "realtek,rtl8390-wdt" },
{ .compatible = "realtek,rtl9300-wdt" },
+ { .compatible = "realtek,rtl9310-wdt" },
{ }
};
MODULE_DEVICE_TABLE(of, otto_wdt_ids);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 6db22f2e3a4f..95919392927f 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -845,8 +845,6 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
s3c2410wdt_stop(&wdt->wdt_device);
}
-#ifdef CONFIG_PM_SLEEP
-
static int s3c2410wdt_suspend(struct device *dev)
{
int ret;
@@ -885,10 +883,9 @@ static int s3c2410wdt_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops, s3c2410wdt_suspend,
- s3c2410wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(s3c2410wdt_pm_ops,
+ s3c2410wdt_suspend, s3c2410wdt_resume);
static struct platform_driver s3c2410wdt_driver = {
.probe = s3c2410wdt_probe,
@@ -897,7 +894,7 @@ static struct platform_driver s3c2410wdt_driver = {
.id_table = s3c2410_wdt_ids,
.driver = {
.name = "s3c2410-wdt",
- .pm = &s3c2410wdt_pm_ops,
+ .pm = pm_sleep_ptr(&s3c2410wdt_pm_ops),
.of_match_table = of_match_ptr(s3c2410_wdt_match),
},
};
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index ec20ad4e534f..aeee934ca51b 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -339,7 +339,6 @@ static const struct of_device_id sama5d4_wdt_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sama5d4_wdt_of_match);
-#ifdef CONFIG_PM_SLEEP
static int sama5d4_wdt_suspend_late(struct device *dev)
{
struct sama5d4_wdt *wdt = dev_get_drvdata(dev);
@@ -366,18 +365,17 @@ static int sama5d4_wdt_resume_early(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sama5d4_wdt_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(sama5d4_wdt_suspend_late,
- sama5d4_wdt_resume_early)
+ LATE_SYSTEM_SLEEP_PM_OPS(sama5d4_wdt_suspend_late,
+ sama5d4_wdt_resume_early)
};
static struct platform_driver sama5d4_wdt_driver = {
.probe = sama5d4_wdt_probe,
.driver = {
.name = "sama5d4_wdt",
- .pm = &sama5d4_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&sama5d4_wdt_pm_ops),
.of_match_table = sama5d4_wdt_of_match,
}
};
diff --git a/drivers/watchdog/simatic-ipc-wdt.c b/drivers/watchdog/simatic-ipc-wdt.c
index 8bac793c63fb..6599695dc672 100644
--- a/drivers/watchdog/simatic-ipc-wdt.c
+++ b/drivers/watchdog/simatic-ipc-wdt.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_data/x86/p2sb.h>
#include <linux/platform_data/x86/simatic-ipc-base.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
@@ -54,9 +55,9 @@ static struct resource io_resource_trigger =
DEFINE_RES_IO_NAMED(WD_TRIGGER_IOADR, SZ_1,
KBUILD_MODNAME " WD_TRIGGER_IOADR");
-/* the actual start will be discovered with pci, 0 is a placeholder */
+/* the actual start will be discovered with p2sb, 0 is a placeholder */
static struct resource mem_resource =
- DEFINE_RES_MEM_NAMED(0, SZ_4, "WD_RESET_BASE_ADR");
+ DEFINE_RES_MEM_NAMED(0, 0, "WD_RESET_BASE_ADR");
static u32 wd_timeout_table[] = {2, 4, 6, 8, 16, 32, 48, 64 };
static void __iomem *wd_reset_base_addr;
@@ -150,6 +151,7 @@ static int simatic_ipc_wdt_probe(struct platform_device *pdev)
struct simatic_ipc_platform *plat = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct resource *res;
+ int ret;
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_227E:
@@ -190,15 +192,14 @@ static int simatic_ipc_wdt_probe(struct platform_device *pdev)
if (plat->devmode == SIMATIC_IPC_DEVICE_427E) {
res = &mem_resource;
- /* get GPIO base from PCI */
- res->start = simatic_ipc_get_membase0(PCI_DEVFN(0x1f, 1));
- if (res->start == 0)
- return -ENODEV;
+ ret = p2sb_bar(NULL, 0, res);
+ if (ret)
+ return ret;
/* do the final address calculation */
res->start = res->start + (GPIO_COMMUNITY0_PORT_ID << 16) +
PAD_CFG_DW0_GPP_A_23;
- res->end += res->start;
+ res->end = res->start + SZ_4 - 1;
wd_reset_base_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(wd_reset_base_addr))
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 86ffb58fbc85..ae54dd33e233 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -402,6 +402,7 @@ out:
iounmap(addr);
release_resource(res);
+ kfree(res);
return ret;
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index f9479a3fe2a6..78ba36689eec 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/char/watchdog/sp805-wdt.c
*
@@ -341,6 +342,10 @@ static const struct amba_id sp805_wdt_ids[] = {
.id = 0x00141805,
.mask = 0x00ffffff,
},
+ {
+ .id = 0x001bb824,
+ .mask = 0x00ffffff,
+ },
{ 0, 0 },
};
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 14ab6559c748..39abecdb9dd1 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -248,7 +248,6 @@ static int st_wdog_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int st_wdog_suspend(struct device *dev)
{
struct st_wdog *st_wdog = watchdog_get_drvdata(&st_wdog_dev);
@@ -285,16 +284,14 @@ static int st_wdog_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(st_wdog_pm_ops,
- st_wdog_suspend,
- st_wdog_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_wdog_pm_ops,
+ st_wdog_suspend, st_wdog_resume);
static struct platform_driver st_wdog_driver = {
.driver = {
.name = "st-lpc-wdt",
- .pm = &st_wdog_pm_ops,
+ .pm = pm_sleep_ptr(&st_wdog_pm_ops),
.of_match_table = st_wdog_match,
},
.probe = st_wdog_probe,
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index dfe06e506cad..d5de6c0657a5 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -230,8 +230,7 @@ static int tegra_wdt_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tegra_wdt_runtime_suspend(struct device *dev)
+static int tegra_wdt_suspend(struct device *dev)
{
struct tegra_wdt *wdt = dev_get_drvdata(dev);
@@ -241,7 +240,7 @@ static int tegra_wdt_runtime_suspend(struct device *dev)
return 0;
}
-static int tegra_wdt_runtime_resume(struct device *dev)
+static int tegra_wdt_resume(struct device *dev)
{
struct tegra_wdt *wdt = dev_get_drvdata(dev);
@@ -250,7 +249,6 @@ static int tegra_wdt_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct of_device_id tegra_wdt_of_match[] = {
{ .compatible = "nvidia,tegra30-timer", },
@@ -258,16 +256,14 @@ static const struct of_device_id tegra_wdt_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_wdt_of_match);
-static const struct dev_pm_ops tegra_wdt_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_wdt_runtime_suspend,
- tegra_wdt_runtime_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(tegra_wdt_pm_ops,
+ tegra_wdt_suspend, tegra_wdt_resume);
static struct platform_driver tegra_wdt_driver = {
.probe = tegra_wdt_probe,
.driver = {
.name = "tegra-wdt",
- .pm = &tegra_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&tegra_wdt_pm_ops),
.of_match_table = tegra_wdt_of_match,
},
};
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e6f95e99156d..aeadaa07c891 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -467,7 +467,6 @@ static int wdat_wdt_probe(struct platform_device *pdev)
return devm_watchdog_register_device(dev, &wdat->wdd);
}
-#ifdef CONFIG_PM_SLEEP
static int wdat_wdt_suspend_noirq(struct device *dev)
{
struct wdat_wdt *wdat = dev_get_drvdata(dev);
@@ -528,18 +527,16 @@ static int wdat_wdt_resume_noirq(struct device *dev)
return wdat_wdt_start(&wdat->wdd);
}
-#endif
static const struct dev_pm_ops wdat_wdt_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(wdat_wdt_suspend_noirq,
- wdat_wdt_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(wdat_wdt_suspend_noirq, wdat_wdt_resume_noirq)
};
static struct platform_driver wdat_wdt_driver = {
.probe = wdat_wdt_probe,
.driver = {
.name = "wdat_wdt",
- .pm = &wdat_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&wdat_wdt_pm_ops),
},
};
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 5e8321f43cbd..c443f04aaad7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -45,6 +45,7 @@
#include <asm/irq.h>
#include <asm/io_apic.h>
#include <asm/i8259.h>
+#include <asm/xen/cpuid.h>
#include <asm/xen/pci.h>
#endif
#include <asm/sync_bitops.h>
@@ -2184,6 +2185,7 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
.irq_ack = ack_dynirq,
};
+#ifdef CONFIG_X86
#ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event
* channel notifications because we can receive vector callbacks on any
@@ -2196,11 +2198,48 @@ void xen_setup_callback_vector(void)
callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
if (xen_set_callback_via(callback_via)) {
pr_err("Request for Xen HVM callback vector failed\n");
- xen_have_vector_callback = 0;
+ xen_have_vector_callback = false;
}
}
}
+/*
+ * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
+ * fallback to the global vector-type callback.
+ */
+static __init void xen_init_setup_upcall_vector(void)
+{
+ if (!xen_have_vector_callback)
+ return;
+
+ if ((cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_UPCALL_VECTOR) &&
+ !xen_set_upcall_vector(0))
+ xen_percpu_upcall = true;
+ else if (xen_feature(XENFEAT_hvm_callback_vector))
+ xen_setup_callback_vector();
+ else
+ xen_have_vector_callback = false;
+}
+
+int xen_set_upcall_vector(unsigned int cpu)
+{
+ int rc;
+ xen_hvm_evtchn_upcall_vector_t op = {
+ .vector = HYPERVISOR_CALLBACK_VECTOR,
+ .vcpu = per_cpu(xen_vcpu_id, cpu),
+ };
+
+ rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &op);
+ if (rc)
+ return rc;
+
+ /* Trick toolstack to think we are enlightened. */
+ if (!cpu)
+ rc = xen_set_callback_via(1);
+
+ return rc;
+}
+
static __init void xen_alloc_callback_vector(void)
{
if (!xen_have_vector_callback)
@@ -2211,8 +2250,11 @@ static __init void xen_alloc_callback_vector(void)
}
#else
void xen_setup_callback_vector(void) {}
+static inline void xen_init_setup_upcall_vector(void) {}
+int xen_set_upcall_vector(unsigned int cpu) {}
static inline void xen_alloc_callback_vector(void) {}
-#endif
+#endif /* CONFIG_XEN_PVHVM */
+#endif /* CONFIG_X86 */
bool xen_fifo_events = true;
module_param_named(fifo_events, xen_fifo_events, bool, 0);
@@ -2272,10 +2314,9 @@ void __init xen_init_IRQ(void)
if (xen_initial_domain())
pci_xen_initial_domain();
}
- if (xen_feature(XENFEAT_hvm_callback_vector)) {
- xen_setup_callback_vector();
- xen_alloc_callback_vector();
- }
+ xen_init_setup_upcall_vector();
+ xen_alloc_callback_vector();
+
if (xen_hvm_domain()) {
native_init_IRQ();
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 3369734108af..e88e8f6f0a33 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -581,27 +581,30 @@ static int lock_pages(
struct privcmd_dm_op_buf kbufs[], unsigned int num,
struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{
- unsigned int i;
+ unsigned int i, off = 0;
- for (i = 0; i < num; i++) {
+ for (i = 0; i < num; ) {
unsigned int requested;
int page_count;
requested = DIV_ROUND_UP(
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
- PAGE_SIZE);
+ PAGE_SIZE) - off;
if (requested > nr_pages)
return -ENOSPC;
page_count = pin_user_pages_fast(
- (unsigned long) kbufs[i].uptr,
+ (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
requested, FOLL_WRITE, pages);
- if (page_count < 0)
- return page_count;
+ if (page_count <= 0)
+ return page_count ? : -EFAULT;
*pinned += page_count;
nr_pages -= page_count;
pages += page_count;
+
+ off = (requested == page_count) ? 0 : off + page_count;
+ i += !off;
}
return 0;
@@ -677,10 +680,8 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
}
rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
- if (rc < 0) {
- nr_pages = pinned;
+ if (rc < 0)
goto out;
- }
for (i = 0; i < kdata.num; i++) {
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
@@ -692,7 +693,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
xen_preemptible_hcall_end();
out:
- unlock_pages(pages, nr_pages);
+ unlock_pages(pages, pinned);
kfree(xbufs);
kfree(pages);
kfree(kbufs);
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 3fbc21466a93..84e014490950 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -159,7 +159,7 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
return XEN_PCI_ERR_op_failed;
}
- /* The value the guest needs is actually the IDT vector, not the
+ /* The value the guest needs is actually the IDT vector, not
* the local domain's IRQ number. */
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 7a0c93acc2c5..d3dcda344989 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1121,7 +1121,7 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
"%s: writing %s", __func__, state);
return;
}
- strlcpy(phy, val, VSCSI_NAMELEN);
+ strscpy(phy, val, VSCSI_NAMELEN);
kfree(val);
/* virtual SCSI device */
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 597af455a522..0792fda49a15 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -128,7 +128,7 @@ static ssize_t xenbus_file_read(struct file *filp,
{
struct xenbus_file_priv *u = filp->private_data;
struct read_buffer *rb;
- unsigned i;
+ ssize_t i;
int ret;
mutex_lock(&u->reply_mutex);
@@ -148,7 +148,7 @@ again:
rb = list_entry(u->read_buffers.next, struct read_buffer, list);
i = 0;
while (i < len) {
- unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
+ size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 5abded97e1a7..9c09f89d8278 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -305,7 +305,7 @@ static int __init xenbus_probe_backend_init(void)
register_xenstore_notifier(&xenstore_notifier);
- if (register_shrinker(&backend_memory_shrinker))
+ if (register_shrinker(&backend_memory_shrinker, "xen-backend"))
pr_warn("shrinker registration failed\n");
return 0;
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 07b010a68fcf..f44d5a64351e 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -40,7 +40,7 @@ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
return -EINVAL;
}
- strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
+ strscpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
if (!strchr(bus_id, '/')) {
pr_warn("bus_id %s no slash\n", bus_id);
return -EINVAL;
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index baf2b152229e..23cf9b2fbfe4 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -28,14 +28,18 @@ static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)
/**
* v9fs_fid_add - add a fid to a dentry
* @dentry: dentry that the fid is being added to
- * @fid: fid to add
+ * @pfid: fid to add, NULLed out
*
*/
-void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
+void v9fs_fid_add(struct dentry *dentry, struct p9_fid **pfid)
{
+ struct p9_fid *fid = *pfid;
+
spin_lock(&dentry->d_lock);
__add_fid(dentry, fid);
spin_unlock(&dentry->d_lock);
+
+ *pfid = NULL;
}
/**
@@ -56,7 +60,7 @@ static struct p9_fid *v9fs_fid_find_inode(struct inode *inode, kuid_t uid)
h = (struct hlist_head *)&inode->i_private;
hlist_for_each_entry(fid, h, ilist) {
if (uid_eq(fid->uid, uid)) {
- refcount_inc(&fid->count);
+ p9_fid_get(fid);
ret = fid;
break;
}
@@ -68,15 +72,19 @@ static struct p9_fid *v9fs_fid_find_inode(struct inode *inode, kuid_t uid)
/**
* v9fs_open_fid_add - add an open fid to an inode
* @inode: inode that the fid is being added to
- * @fid: fid to add
+ * @pfid: fid to add, NULLed out
*
*/
-void v9fs_open_fid_add(struct inode *inode, struct p9_fid *fid)
+void v9fs_open_fid_add(struct inode *inode, struct p9_fid **pfid)
{
+ struct p9_fid *fid = *pfid;
+
spin_lock(&inode->i_lock);
hlist_add_head(&fid->ilist, (struct hlist_head *)&inode->i_private);
spin_unlock(&inode->i_lock);
+
+ *pfid = NULL;
}
@@ -104,7 +112,7 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
hlist_for_each_entry(fid, h, dlist) {
if (any || uid_eq(fid->uid, uid)) {
ret = fid;
- refcount_inc(&ret->count);
+ p9_fid_get(ret);
break;
}
}
@@ -150,9 +158,9 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
{
struct dentry *ds;
const unsigned char **wnames, *uname;
- int i, n, l, clone, access;
+ int i, n, l, access;
struct v9fs_session_info *v9ses;
- struct p9_fid *fid, *old_fid;
+ struct p9_fid *fid, *root_fid, *old_fid;
v9ses = v9fs_dentry2v9ses(dentry);
access = v9ses->flags & V9FS_ACCESS_MASK;
@@ -169,17 +177,17 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
fid = v9fs_fid_find(ds, uid, any);
if (fid) {
/* Found the parent fid do a lookup with that */
- struct p9_fid *ofid = fid;
+ old_fid = fid;
- fid = p9_client_walk(ofid, 1, &dentry->d_name.name, 1);
- p9_client_clunk(ofid);
+ fid = p9_client_walk(old_fid, 1, &dentry->d_name.name, 1);
+ p9_fid_put(old_fid);
goto fid_out;
}
up_read(&v9ses->rename_sem);
/* start from the root and try to do a lookup */
- fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any);
- if (!fid) {
+ root_fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any);
+ if (!root_fid) {
/* the user is not attached to the fs yet */
if (access == V9FS_ACCESS_SINGLE)
return ERR_PTR(-EPERM);
@@ -194,12 +202,13 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
if (IS_ERR(fid))
return fid;
- refcount_inc(&fid->count);
- v9fs_fid_add(dentry->d_sb->s_root, fid);
+ root_fid = p9_fid_get(fid);
+ v9fs_fid_add(dentry->d_sb->s_root, &fid);
}
/* If we are root ourself just return that */
if (dentry->d_sb->s_root == dentry)
- return fid;
+ return root_fid;
+
/*
* Do a multipath walk with attached root.
* When walking parent we need to make sure we
@@ -211,19 +220,20 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
fid = ERR_PTR(n);
goto err_out;
}
- old_fid = fid;
- clone = 1;
+ fid = root_fid;
+ old_fid = root_fid;
i = 0;
while (i < n) {
l = min(n - i, P9_MAXWELEM);
/*
* We need to hold rename lock when doing a multipath
- * walk to ensure none of the patch component change
+ * walk to ensure none of the path components change
*/
- fid = p9_client_walk(fid, l, &wnames[i], clone);
+ fid = p9_client_walk(old_fid, l, &wnames[i],
+ old_fid == root_fid /* clone */);
/* non-cloning walk will return the same fid */
if (fid != old_fid) {
- p9_client_clunk(old_fid);
+ p9_fid_put(old_fid);
old_fid = fid;
}
if (IS_ERR(fid)) {
@@ -231,7 +241,6 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
goto err_out;
}
i += l;
- clone = 0;
}
kfree(wnames);
fid_out:
@@ -239,11 +248,11 @@ fid_out:
spin_lock(&dentry->d_lock);
if (d_unhashed(dentry)) {
spin_unlock(&dentry->d_lock);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
fid = ERR_PTR(-ENOENT);
} else {
__add_fid(dentry, fid);
- refcount_inc(&fid->count);
+ p9_fid_get(fid);
spin_unlock(&dentry->d_lock);
}
}
@@ -300,7 +309,7 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
fid = clone_fid(ofid);
if (IS_ERR(fid))
goto error_out;
- p9_client_clunk(ofid);
+ p9_fid_put(ofid);
/*
* writeback fid will only be used to write back the
* dirty pages. We always request for the open fid in read-write
@@ -309,7 +318,7 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
*/
err = p9_client_open(fid, O_RDWR);
if (err < 0) {
- p9_client_clunk(fid);
+ p9_fid_put(fid);
fid = ERR_PTR(err);
goto error_out;
}
diff --git a/fs/9p/fid.h b/fs/9p/fid.h
index f7f33509e169..8a4e8cd12ca2 100644
--- a/fs/9p/fid.h
+++ b/fs/9p/fid.h
@@ -13,9 +13,9 @@ static inline struct p9_fid *v9fs_parent_fid(struct dentry *dentry)
{
return v9fs_fid_lookup(dentry->d_parent);
}
-void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid);
+void v9fs_fid_add(struct dentry *dentry, struct p9_fid **fid);
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry);
-void v9fs_open_fid_add(struct inode *inode, struct p9_fid *fid);
+void v9fs_open_fid_add(struct inode *inode, struct p9_fid **fid);
static inline struct p9_fid *clone_fid(struct p9_fid *fid)
{
return IS_ERR(fid) ? fid : p9_client_walk(fid, 0, NULL, 1);
@@ -29,7 +29,7 @@ static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
return fid;
nfid = clone_fid(fid);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return nfid;
}
#endif
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index d0833fa69faf..47b9a1122f34 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -73,7 +73,7 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
BUG_ON(!fid);
}
- refcount_inc(&fid->count);
+ p9_fid_get(fid);
rreq->netfs_priv = fid;
return 0;
}
@@ -86,7 +86,7 @@ static void v9fs_free_request(struct netfs_io_request *rreq)
{
struct p9_fid *fid = rreq->netfs_priv;
- p9_client_clunk(fid);
+ p9_fid_put(fid);
}
/**
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 1c609e99d280..f89f01734587 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -54,7 +54,7 @@ static void v9fs_dentry_release(struct dentry *dentry)
p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
dentry, dentry);
hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
- p9_client_clunk(hlist_entry(p, struct p9_fid, dlist));
+ p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
dentry->d_fsdata = NULL;
}
@@ -85,7 +85,7 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
retval = v9fs_refresh_inode_dotl(fid, inode);
else
retval = v9fs_refresh_inode(fid, inode);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (retval == -ENOENT)
return 0;
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 958680f7f23e..000fbaae9b18 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -218,7 +218,7 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
spin_lock(&inode->i_lock);
hlist_del(&fid->ilist);
spin_unlock(&inode->i_lock);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
}
if ((filp->f_mode & FMODE_WRITE)) {
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 2573c08f335c..aec43ba83799 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -63,15 +63,16 @@ int v9fs_file_open(struct inode *inode, struct file *file)
err = p9_client_open(fid, omode);
if (err < 0) {
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return err;
}
if ((file->f_flags & O_APPEND) &&
(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
generic_file_llseek(file, 0, SEEK_END);
+
+ file->private_data = fid;
}
- file->private_data = fid;
mutex_lock(&v9inode->v_mutex);
if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
!v9inode->writeback_fid &&
@@ -95,10 +96,10 @@ int v9fs_file_open(struct inode *inode, struct file *file)
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
fscache_use_cookie(v9fs_inode_cookie(v9inode),
file->f_mode & FMODE_WRITE);
- v9fs_open_fid_add(inode, fid);
+ v9fs_open_fid_add(inode, &fid);
return 0;
out_error:
- p9_client_clunk(file->private_data);
+ p9_fid_put(file->private_data);
file->private_data = NULL;
return err;
}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 3d8297714772..4d1a4a8d9277 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -399,10 +399,8 @@ void v9fs_evict_inode(struct inode *inode)
fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
/* clunk the fid stashed in writeback_fid */
- if (v9inode->writeback_fid) {
- p9_client_clunk(v9inode->writeback_fid);
- v9inode->writeback_fid = NULL;
- }
+ p9_fid_put(v9inode->writeback_fid);
+ v9inode->writeback_fid = NULL;
}
static int v9fs_test_inode(struct inode *inode, void *data)
@@ -569,7 +567,7 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
if (v9fs_proto_dotl(v9ses))
retval = p9_client_unlinkat(dfid, dentry->d_name.name,
v9fs_at_to_dotl_flags(flags));
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
if (retval == -EOPNOTSUPP) {
/* Try the one based on path */
v9fid = v9fs_fid_clone(dentry);
@@ -633,14 +631,12 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
- p9_client_clunk(dfid);
- return ERR_PTR(err);
+ goto error;
}
err = p9_client_fcreate(ofid, name, perm, mode, extension);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_fcreate failed %d\n", err);
- p9_client_clunk(dfid);
goto error;
}
@@ -651,8 +647,6 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS,
"p9_client_walk failed %d\n", err);
- fid = NULL;
- p9_client_clunk(dfid);
goto error;
}
/*
@@ -663,21 +657,17 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS,
"inode creation failed %d\n", err);
- p9_client_clunk(dfid);
goto error;
}
- v9fs_fid_add(dentry, fid);
+ v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
}
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
return ofid;
error:
- if (ofid)
- p9_client_clunk(ofid);
-
- if (fid)
- p9_client_clunk(fid);
-
+ p9_fid_put(dfid);
+ p9_fid_put(ofid);
+ p9_fid_put(fid);
return ERR_PTR(err);
}
@@ -708,7 +698,7 @@ v9fs_vfs_create(struct user_namespace *mnt_userns, struct inode *dir,
return PTR_ERR(fid);
v9fs_invalidate_inode_attr(dir);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return 0;
}
@@ -744,7 +734,7 @@ static int v9fs_vfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
}
if (fid)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return err;
}
@@ -785,7 +775,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
*/
name = dentry->d_name.name;
fid = p9_client_walk(dfid, 1, &name, 1);
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
if (fid == ERR_PTR(-ENOENT))
inode = NULL;
else if (IS_ERR(fid))
@@ -804,11 +794,11 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
res = d_splice_alias(inode, dentry);
if (!IS_ERR(fid)) {
if (!res)
- v9fs_fid_add(dentry, fid);
+ v9fs_fid_add(dentry, &fid);
else if (!IS_ERR(res))
- v9fs_fid_add(res, fid);
+ v9fs_fid_add(res, &fid);
else
- p9_client_clunk(fid);
+ p9_fid_put(fid);
}
return res;
}
@@ -847,7 +837,6 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
v9fs_proto_dotu(v9ses)));
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
- fid = NULL;
goto error;
}
@@ -882,7 +871,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
fscache_use_cookie(v9fs_inode_cookie(v9inode),
file->f_mode & FMODE_WRITE);
- v9fs_open_fid_add(inode, fid);
+ v9fs_open_fid_add(inode, &fid);
file->f_mode |= FMODE_CREATED;
out:
@@ -890,8 +879,7 @@ out:
return err;
error:
- if (fid)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
goto out;
}
@@ -939,9 +927,9 @@ v9fs_vfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
struct inode *old_inode;
struct inode *new_inode;
struct v9fs_session_info *v9ses;
- struct p9_fid *oldfid, *dfid;
- struct p9_fid *olddirfid;
- struct p9_fid *newdirfid;
+ struct p9_fid *oldfid = NULL, *dfid = NULL;
+ struct p9_fid *olddirfid = NULL;
+ struct p9_fid *newdirfid = NULL;
struct p9_wstat wstat;
if (flags)
@@ -958,21 +946,22 @@ v9fs_vfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
dfid = v9fs_parent_fid(old_dentry);
olddirfid = clone_fid(dfid);
- if (dfid && !IS_ERR(dfid))
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
+ dfid = NULL;
if (IS_ERR(olddirfid)) {
retval = PTR_ERR(olddirfid);
- goto done;
+ goto error;
}
dfid = v9fs_parent_fid(new_dentry);
newdirfid = clone_fid(dfid);
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
+ dfid = NULL;
if (IS_ERR(newdirfid)) {
retval = PTR_ERR(newdirfid);
- goto clunk_olddir;
+ goto error;
}
down_write(&v9ses->rename_sem);
@@ -983,7 +972,7 @@ v9fs_vfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
retval = p9_client_rename(oldfid, newdirfid,
new_dentry->d_name.name);
if (retval != -EOPNOTSUPP)
- goto clunk_newdir;
+ goto error_locked;
}
if (old_dentry->d_parent != new_dentry->d_parent) {
/*
@@ -992,14 +981,14 @@ v9fs_vfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
p9_debug(P9_DEBUG_ERROR, "old dir and new dir are different\n");
retval = -EXDEV;
- goto clunk_newdir;
+ goto error_locked;
}
v9fs_blank_wstat(&wstat);
wstat.muid = v9ses->uname;
wstat.name = new_dentry->d_name.name;
retval = p9_client_wstat(oldfid, &wstat);
-clunk_newdir:
+error_locked:
if (!retval) {
if (new_inode) {
if (S_ISDIR(new_inode->i_mode))
@@ -1020,13 +1009,11 @@ clunk_newdir:
d_move(old_dentry, new_dentry);
}
up_write(&v9ses->rename_sem);
- p9_client_clunk(newdirfid);
-
-clunk_olddir:
- p9_client_clunk(olddirfid);
-done:
- p9_client_clunk(oldfid);
+error:
+ p9_fid_put(newdirfid);
+ p9_fid_put(olddirfid);
+ p9_fid_put(oldfid);
return retval;
}
@@ -1060,7 +1047,7 @@ v9fs_vfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
return PTR_ERR(fid);
st = p9_client_stat(fid);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (IS_ERR(st))
return PTR_ERR(st);
@@ -1136,7 +1123,7 @@ static int v9fs_vfs_setattr(struct user_namespace *mnt_userns,
retval = p9_client_wstat(fid, &wstat);
if (use_dentry)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (retval < 0)
return retval;
@@ -1261,7 +1248,7 @@ static const char *v9fs_vfs_get_link(struct dentry *dentry,
return ERR_CAST(fid);
st = p9_client_stat(fid);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (IS_ERR(st))
return ERR_CAST(st);
@@ -1308,7 +1295,7 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
return PTR_ERR(fid);
v9fs_invalidate_inode_attr(dir);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return 0;
}
@@ -1364,7 +1351,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
v9fs_refresh_inode(oldfid, d_inode(old_dentry));
v9fs_invalidate_inode_attr(dir);
}
- p9_client_clunk(oldfid);
+ p9_fid_put(oldfid);
return retval;
}
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index b6eb1160296c..5cfa4b4f070f 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -238,7 +238,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
struct inode *inode;
struct p9_fid *fid = NULL;
struct v9fs_inode *v9inode;
- struct p9_fid *dfid, *ofid, *inode_fid;
+ struct p9_fid *dfid = NULL, *ofid = NULL, *inode_fid = NULL;
struct v9fs_session_info *v9ses;
struct posix_acl *pacl = NULL, *dacl = NULL;
struct dentry *res = NULL;
@@ -274,7 +274,6 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
- p9_client_clunk(dfid);
goto out;
}
@@ -286,38 +285,34 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
if (err) {
p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n",
err);
- p9_client_clunk(dfid);
- goto error;
+ goto out;
}
err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
mode, gid, &qid);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n",
err);
- p9_client_clunk(dfid);
- goto error;
+ goto out;
}
v9fs_invalidate_inode_attr(dir);
/* instantiate inode and assign the unopened fid to the dentry */
fid = p9_client_walk(dfid, 1, &name, 1);
- p9_client_clunk(dfid);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
- fid = NULL;
- goto error;
+ goto out;
}
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
- goto error;
+ goto out;
}
/* Now set the ACL based on the default value */
v9fs_set_create_acl(inode, fid, dacl, pacl);
- v9fs_fid_add(dentry, fid);
+ v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
v9inode = V9FS_I(inode);
@@ -336,7 +331,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
mutex_unlock(&v9inode->v_mutex);
- goto err_clunk_old_fid;
+ goto out;
}
v9inode->writeback_fid = (void *) inode_fid;
}
@@ -344,25 +339,20 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
/* Since we are opening a file, assign the open fid to the file */
err = finish_open(file, dentry, generic_file_open);
if (err)
- goto err_clunk_old_fid;
+ goto out;
file->private_data = ofid;
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
fscache_use_cookie(v9fs_inode_cookie(v9inode),
file->f_mode & FMODE_WRITE);
- v9fs_open_fid_add(inode, ofid);
+ v9fs_open_fid_add(inode, &ofid);
file->f_mode |= FMODE_CREATED;
out:
+ p9_fid_put(dfid);
+ p9_fid_put(ofid);
+ p9_fid_put(fid);
v9fs_put_acl(dacl, pacl);
dput(res);
return err;
-
-error:
- if (fid)
- p9_client_clunk(fid);
-err_clunk_old_fid:
- if (ofid)
- p9_client_clunk(ofid);
- goto out;
}
/**
@@ -400,7 +390,6 @@ static int v9fs_vfs_mkdir_dotl(struct user_namespace *mnt_userns,
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
- dfid = NULL;
goto error;
}
@@ -422,7 +411,6 @@ static int v9fs_vfs_mkdir_dotl(struct user_namespace *mnt_userns,
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
- fid = NULL;
goto error;
}
@@ -435,10 +423,9 @@ static int v9fs_vfs_mkdir_dotl(struct user_namespace *mnt_userns,
err);
goto error;
}
- v9fs_fid_add(dentry, fid);
+ v9fs_fid_add(dentry, &fid);
v9fs_set_create_acl(inode, fid, dacl, pacl);
d_instantiate(dentry, inode);
- fid = NULL;
err = 0;
} else {
/*
@@ -457,10 +444,9 @@ static int v9fs_vfs_mkdir_dotl(struct user_namespace *mnt_userns,
inc_nlink(dir);
v9fs_invalidate_inode_attr(dir);
error:
- if (fid)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
v9fs_put_acl(dacl, pacl);
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
return err;
}
@@ -489,7 +475,7 @@ v9fs_vfs_getattr_dotl(struct user_namespace *mnt_userns,
*/
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (IS_ERR(st))
return PTR_ERR(st);
@@ -603,7 +589,7 @@ int v9fs_vfs_setattr_dotl(struct user_namespace *mnt_userns,
retval = p9_client_setattr(fid, &p9attr);
if (retval < 0) {
if (use_dentry)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return retval;
}
@@ -619,12 +605,12 @@ int v9fs_vfs_setattr_dotl(struct user_namespace *mnt_userns,
retval = v9fs_acl_chmod(inode, fid);
if (retval < 0) {
if (use_dentry)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return retval;
}
}
if (use_dentry)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return 0;
}
@@ -743,7 +729,6 @@ v9fs_vfs_symlink_dotl(struct user_namespace *mnt_userns, struct inode *dir,
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
- fid = NULL;
goto error;
}
@@ -755,9 +740,8 @@ v9fs_vfs_symlink_dotl(struct user_namespace *mnt_userns, struct inode *dir,
err);
goto error;
}
- v9fs_fid_add(dentry, fid);
+ v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
- fid = NULL;
err = 0;
} else {
/* Not in cached mode. No need to populate inode with stat */
@@ -770,10 +754,8 @@ v9fs_vfs_symlink_dotl(struct user_namespace *mnt_userns, struct inode *dir,
}
error:
- if (fid)
- p9_client_clunk(fid);
-
- p9_client_clunk(dfid);
+ p9_fid_put(fid);
+ p9_fid_put(dfid);
return err;
}
@@ -803,14 +785,14 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
oldfid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(oldfid)) {
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
return PTR_ERR(oldfid);
}
err = p9_client_link(dfid, oldfid, dentry->d_name.name);
- p9_client_clunk(dfid);
- p9_client_clunk(oldfid);
+ p9_fid_put(dfid);
+ p9_fid_put(oldfid);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
return err;
@@ -826,7 +808,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
return PTR_ERR(fid);
v9fs_refresh_inode_dotl(fid, d_inode(old_dentry));
- p9_client_clunk(fid);
+ p9_fid_put(fid);
}
ihold(d_inode(old_dentry));
d_instantiate(dentry, d_inode(old_dentry));
@@ -866,7 +848,6 @@ v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
- dfid = NULL;
goto error;
}
@@ -891,7 +872,6 @@ v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
- fid = NULL;
goto error;
}
@@ -905,9 +885,8 @@ v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
goto error;
}
v9fs_set_create_acl(inode, fid, dacl, pacl);
- v9fs_fid_add(dentry, fid);
+ v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
- fid = NULL;
err = 0;
} else {
/*
@@ -923,10 +902,9 @@ v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
d_instantiate(dentry, inode);
}
error:
- if (fid)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
v9fs_put_acl(dacl, pacl);
- p9_client_clunk(dfid);
+ p9_fid_put(dfid);
return err;
}
@@ -956,7 +934,7 @@ v9fs_vfs_get_link_dotl(struct dentry *dentry,
if (IS_ERR(fid))
return ERR_CAST(fid);
retval = p9_client_readlink(fid, &target);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
if (retval)
return ERR_PTR(retval);
set_delayed_call(done, kfree_link, target);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 97e23b4e6982..2d9ee073d12c 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -184,13 +184,13 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
retval = v9fs_get_acl(inode, fid);
if (retval)
goto release_sb;
- v9fs_fid_add(root, fid);
+ v9fs_fid_add(root, &fid);
p9_debug(P9_DEBUG_VFS, " simple set mount, return 0\n");
return dget(sb->s_root);
clunk_fid:
- p9_client_clunk(fid);
+ p9_fid_put(fid);
v9fs_session_close(v9ses);
free_session:
kfree(v9ses);
@@ -203,7 +203,7 @@ release_sb:
* attached the fid to dentry so it won't get clunked
* automatically.
*/
- p9_client_clunk(fid);
+ p9_fid_put(fid);
deactivate_locked_super(sb);
return ERR_PTR(retval);
}
@@ -270,7 +270,7 @@ static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf)
}
res = simple_statfs(dentry, buf);
done:
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return res;
}
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index a824441b95a2..1f9298a4bd42 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -44,7 +44,7 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
if (err)
retval = err;
}
- p9_client_clunk(attr_fid);
+ p9_fid_put(attr_fid);
return retval;
}
@@ -71,7 +71,7 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
if (IS_ERR(fid))
return PTR_ERR(fid);
ret = v9fs_fid_xattr_get(fid, name, buffer, buffer_size);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return ret;
}
@@ -98,7 +98,7 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
if (IS_ERR(fid))
return PTR_ERR(fid);
ret = v9fs_fid_xattr_set(fid, name, value, value_len, flags);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
return ret;
}
@@ -128,7 +128,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
retval);
else
p9_client_write(fid, 0, &from, &retval);
- err = p9_client_clunk(fid);
+ err = p9_fid_put(fid);
if (!retval && err)
retval = err;
return retval;
diff --git a/fs/Kconfig b/fs/Kconfig
index 5976eb33535f..a547307c1ae8 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -247,8 +247,7 @@ config HUGETLB_PAGE
#
# Select this config option from the architecture Kconfig, if it is preferred
-# to enable the feature of minimizing overhead of struct page associated with
-# each HugeTLB page.
+# to enable the feature of HugeTLB Vmemmap Optimization (HVO).
#
config ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
bool
@@ -259,14 +258,13 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
depends on SPARSEMEM_VMEMMAP
config HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON
- bool "Default optimizing vmemmap pages of HugeTLB to on"
+ bool "HugeTLB Vmemmap Optimization (HVO) defaults to on"
default n
depends on HUGETLB_PAGE_OPTIMIZE_VMEMMAP
help
- When using HUGETLB_PAGE_OPTIMIZE_VMEMMAP, the optimizing unused vmemmap
- pages associated with each HugeTLB page is default off. Say Y here
- to enable optimizing vmemmap pages of HugeTLB by default. It can then
- be disabled on the command line via hugetlb_free_vmemmap=off.
+ The HugeTLB VmemmapvOptimization (HVO) defaults to off. Say Y here to
+ enable HVO by default. It can be disabled via hugetlb_free_vmemmap=off
+ (boot command line) or hugetlb_optimize_vmemmap (sysctl).
config MEMFD_CREATE
def_bool TMPFS || HUGETLBFS
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 07ad744eef77..988c2ac7cece 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -158,7 +158,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
cell->name[i] = tolower(name[i]);
cell->name[i] = 0;
- atomic_set(&cell->ref, 1);
+ refcount_set(&cell->ref, 1);
atomic_set(&cell->active, 0);
INIT_WORK(&cell->manager, afs_manage_cell_work);
cell->volumes = RB_ROOT;
@@ -287,7 +287,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
cell = candidate;
candidate = NULL;
atomic_set(&cell->active, 2);
- trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 2, afs_cell_trace_insert);
+ trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert);
rb_link_node_rcu(&cell->net_node, parent, pp);
rb_insert_color(&cell->net_node, &net->cells);
up_write(&net->cells_lock);
@@ -295,7 +295,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
afs_queue_cell(cell, afs_cell_trace_get_queue_new);
wait_for_cell:
- trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), atomic_read(&cell->active),
+ trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active),
afs_cell_trace_wait);
_debug("wait_for_cell");
wait_var_event(&cell->state,
@@ -490,13 +490,13 @@ static void afs_cell_destroy(struct rcu_head *rcu)
{
struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
struct afs_net *net = cell->net;
- int u;
+ int r;
_enter("%p{%s}", cell, cell->name);
- u = atomic_read(&cell->ref);
- ASSERTCMP(u, ==, 0);
- trace_afs_cell(cell->debug_id, u, atomic_read(&cell->active), afs_cell_trace_free);
+ r = refcount_read(&cell->ref);
+ ASSERTCMP(r, ==, 0);
+ trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias);
@@ -539,13 +539,10 @@ void afs_cells_timer(struct timer_list *timer)
*/
struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
- int u;
+ int r;
- if (atomic_read(&cell->ref) <= 0)
- BUG();
-
- u = atomic_inc_return(&cell->ref);
- trace_afs_cell(cell->debug_id, u, atomic_read(&cell->active), reason);
+ __refcount_inc(&cell->ref, &r);
+ trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason);
return cell;
}
@@ -556,12 +553,14 @@ void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
if (cell) {
unsigned int debug_id = cell->debug_id;
- unsigned int u, a;
+ unsigned int a;
+ bool zero;
+ int r;
a = atomic_read(&cell->active);
- u = atomic_dec_return(&cell->ref);
- trace_afs_cell(debug_id, u, a, reason);
- if (u == 0) {
+ zero = __refcount_dec_and_test(&cell->ref, &r);
+ trace_afs_cell(debug_id, r - 1, a, reason);
+ if (zero) {
a = atomic_read(&cell->active);
WARN(a != 0, "Cell active count %u > 0\n", a);
call_rcu(&cell->rcu, afs_cell_destroy);
@@ -574,14 +573,12 @@ void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
*/
struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
- int u, a;
-
- if (atomic_read(&cell->ref) <= 0)
- BUG();
+ int r, a;
- u = atomic_read(&cell->ref);
+ r = refcount_read(&cell->ref);
+ WARN_ON(r == 0);
a = atomic_inc_return(&cell->active);
- trace_afs_cell(cell->debug_id, u, a, reason);
+ trace_afs_cell(cell->debug_id, r, a, reason);
return cell;
}
@@ -593,7 +590,7 @@ void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_tr
{
unsigned int debug_id;
time64_t now, expire_delay;
- int u, a;
+ int r, a;
if (!cell)
return;
@@ -607,9 +604,9 @@ void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_tr
expire_delay = afs_cell_gc_delay;
debug_id = cell->debug_id;
- u = atomic_read(&cell->ref);
+ r = refcount_read(&cell->ref);
a = atomic_dec_return(&cell->active);
- trace_afs_cell(debug_id, u, a, reason);
+ trace_afs_cell(debug_id, r, a, reason);
WARN_ON(a == 0);
if (a == 1)
/* 'cell' may now be garbage collected. */
@@ -621,11 +618,11 @@ void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_tr
*/
void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
- int u, a;
+ int r, a;
- u = atomic_read(&cell->ref);
+ r = refcount_read(&cell->ref);
a = atomic_read(&cell->active);
- trace_afs_cell(cell->debug_id, u, a, reason);
+ trace_afs_cell(cell->debug_id, r, a, reason);
}
/*
@@ -739,7 +736,7 @@ again:
active = 1;
if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
rb_erase(&cell->net_node, &net->cells);
- trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), 0,
+ trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0,
afs_cell_trace_unuse_delete);
smp_store_release(&cell->state, AFS_CELL_REMOVED);
}
@@ -866,7 +863,7 @@ void afs_manage_cells(struct work_struct *work)
bool sched_cell = false;
active = atomic_read(&cell->active);
- trace_afs_cell(cell->debug_id, atomic_read(&cell->ref),
+ trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
active, afs_cell_trace_manage);
ASSERTCMP(active, >=, 1);
@@ -874,7 +871,7 @@ void afs_manage_cells(struct work_struct *work)
if (purging) {
if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) {
active = atomic_dec_return(&cell->active);
- trace_afs_cell(cell->debug_id, atomic_read(&cell->ref),
+ trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
active, afs_cell_trace_unuse_pin);
}
}
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index a3f5de28be79..0a090d614e76 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -212,8 +212,8 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
* to maintain cache coherency.
*/
if (call->server) {
- trace_afs_server(call->server,
- atomic_read(&call->server->ref),
+ trace_afs_server(call->server->debug_id,
+ refcount_read(&call->server->ref),
atomic_read(&call->server->active),
afs_server_trace_callback);
afs_break_callbacks(call->server, call->count, call->request);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 64dab70d4a4f..6d3a3dbe4928 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -104,12 +104,14 @@ static int afs_inode_init_from_status(struct afs_operation *op,
inode->i_op = &afs_file_inode_operations;
inode->i_fop = &afs_file_operations;
inode->i_mapping->a_ops = &afs_file_aops;
+ mapping_set_large_folios(inode->i_mapping);
break;
case AFS_FTYPE_DIR:
inode->i_mode = S_IFDIR | (status->mode & S_IALLUGO);
inode->i_op = &afs_dir_inode_operations;
inode->i_fop = &afs_dir_file_operations;
inode->i_mapping->a_ops = &afs_dir_aops;
+ mapping_set_large_folios(inode->i_mapping);
break;
case AFS_FTYPE_SYMLINK:
/* Symlinks with a mode of 0644 are actually mountpoints. */
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index a6f25d9e75b5..64ad55494349 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -122,7 +122,7 @@ struct afs_call {
};
struct afs_operation *op;
unsigned int server_index;
- atomic_t usage;
+ refcount_t ref;
enum afs_call_state state;
spinlock_t state_lock;
int error; /* error code */
@@ -365,7 +365,7 @@ struct afs_cell {
struct hlist_node proc_link; /* /proc cell list link */
time64_t dns_expiry; /* Time AFSDB/SRV record expires */
time64_t last_inactive; /* Time of last drop of usage count */
- atomic_t ref; /* Struct refcount */
+ refcount_t ref; /* Struct refcount */
atomic_t active; /* Active usage counter */
unsigned long flags;
#define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */
@@ -410,7 +410,7 @@ struct afs_vlserver {
#define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */
#define AFS_VLSERVER_FL_RESPONDING 3 /* VL server is responding */
rwlock_t lock; /* Lock on addresses */
- atomic_t usage;
+ refcount_t ref;
unsigned int rtt; /* Server's current RTT in uS */
/* Probe state */
@@ -446,7 +446,7 @@ struct afs_vlserver_entry {
struct afs_vlserver_list {
struct rcu_head rcu;
- atomic_t usage;
+ refcount_t ref;
u8 nr_servers;
u8 index; /* Server currently in use */
u8 preferred; /* Preferred server */
@@ -517,7 +517,7 @@ struct afs_server {
#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
#define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
#define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */
- atomic_t ref; /* Object refcount */
+ refcount_t ref; /* Object refcount */
atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
unsigned int rtt; /* Server's current RTT in uS */
@@ -571,7 +571,7 @@ struct afs_volume {
struct rcu_head rcu;
afs_volid_t vid; /* volume ID */
};
- atomic_t usage;
+ refcount_t ref;
time64_t update_at; /* Time at which to next update */
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
struct rb_node cell_node; /* Link in cell->volumes */
@@ -1493,14 +1493,14 @@ extern int afs_end_vlserver_operation(struct afs_vl_cursor *);
*/
static inline struct afs_vlserver *afs_get_vlserver(struct afs_vlserver *vlserver)
{
- atomic_inc(&vlserver->usage);
+ refcount_inc(&vlserver->ref);
return vlserver;
}
static inline struct afs_vlserver_list *afs_get_vlserverlist(struct afs_vlserver_list *vllist)
{
if (vllist)
- atomic_inc(&vllist->usage);
+ refcount_inc(&vllist->ref);
return vllist;
}
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index e1b863449296..2a0c83d71565 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -47,7 +47,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
/* display one cell per line on subsequent lines */
seq_printf(m, "%3u %3u %6lld %2u %2u %s\n",
- atomic_read(&cell->ref),
+ refcount_read(&cell->ref),
atomic_read(&cell->active),
cell->dns_expiry - ktime_get_real_seconds(),
vllist ? vllist->nr_servers : 0,
@@ -217,7 +217,7 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
}
seq_printf(m, "%3d %08llx %s %s\n",
- atomic_read(&vol->usage), vol->vid,
+ refcount_read(&vol->ref), vol->vid,
afs_vol_types[vol->type],
vol->name);
@@ -388,7 +388,7 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
alist = rcu_dereference(server->addresses);
seq_printf(m, "%pU %3d %3d\n",
&server->uuid,
- atomic_read(&server->ref),
+ refcount_read(&server->ref),
atomic_read(&server->active));
seq_printf(m, " - info: fl=%lx rtt=%u brk=%x\n",
server->flags, server->rtt, server->cb_s_break);
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index a5434f3e57c6..d5c4785c862d 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -145,14 +145,14 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
call->type = type;
call->net = net;
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
- atomic_set(&call->usage, 1);
+ refcount_set(&call->ref, 1);
INIT_WORK(&call->async_work, afs_process_async_call);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->state_lock);
call->iter = &call->def_iter;
o = atomic_inc_return(&net->nr_outstanding_calls);
- trace_afs_call(call, afs_call_trace_alloc, 1, o,
+ trace_afs_call(call->debug_id, afs_call_trace_alloc, 1, o,
__builtin_return_address(0));
return call;
}
@@ -163,14 +163,16 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
void afs_put_call(struct afs_call *call)
{
struct afs_net *net = call->net;
- int n = atomic_dec_return(&call->usage);
- int o = atomic_read(&net->nr_outstanding_calls);
+ unsigned int debug_id = call->debug_id;
+ bool zero;
+ int r, o;
- trace_afs_call(call, afs_call_trace_put, n, o,
+ zero = __refcount_dec_and_test(&call->ref, &r);
+ o = atomic_read(&net->nr_outstanding_calls);
+ trace_afs_call(debug_id, afs_call_trace_put, r - 1, o,
__builtin_return_address(0));
- ASSERTCMP(n, >=, 0);
- if (n == 0) {
+ if (zero) {
ASSERT(!work_pending(&call->async_work));
ASSERT(call->type->name != NULL);
@@ -185,7 +187,7 @@ void afs_put_call(struct afs_call *call)
afs_put_addrlist(call->alist);
kfree(call->request);
- trace_afs_call(call, afs_call_trace_free, 0, o,
+ trace_afs_call(call->debug_id, afs_call_trace_free, 0, o,
__builtin_return_address(0));
kfree(call);
@@ -198,9 +200,11 @@ void afs_put_call(struct afs_call *call)
static struct afs_call *afs_get_call(struct afs_call *call,
enum afs_call_trace why)
{
- int u = atomic_inc_return(&call->usage);
+ int r;
- trace_afs_call(call, why, u,
+ __refcount_inc(&call->ref, &r);
+
+ trace_afs_call(call->debug_id, why, r + 1,
atomic_read(&call->net->nr_outstanding_calls),
__builtin_return_address(0));
return call;
@@ -668,14 +672,13 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long call_user_ID)
{
struct afs_call *call = (struct afs_call *)call_user_ID;
- int u;
+ int r;
trace_afs_notify_call(rxcall, call);
call->need_attention = true;
- u = atomic_fetch_add_unless(&call->usage, 1, 0);
- if (u != 0) {
- trace_afs_call(call, afs_call_trace_wake, u + 1,
+ if (__refcount_inc_not_zero(&call->ref, &r)) {
+ trace_afs_call(call->debug_id, afs_call_trace_wake, r + 1,
atomic_read(&call->net->nr_outstanding_calls),
__builtin_return_address(0));
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 6e5b9a19b234..4981baf97835 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -228,7 +228,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
if (!server)
goto enomem;
- atomic_set(&server->ref, 1);
+ refcount_set(&server->ref, 1);
atomic_set(&server->active, 1);
server->debug_id = atomic_inc_return(&afs_server_debug_id);
RCU_INIT_POINTER(server->addresses, alist);
@@ -243,7 +243,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
server->rtt = UINT_MAX;
afs_inc_servers_outstanding(net);
- trace_afs_server(server, 1, 1, afs_server_trace_alloc);
+ trace_afs_server(server->debug_id, 1, 1, afs_server_trace_alloc);
_leave(" = %p", server);
return server;
@@ -352,9 +352,12 @@ void afs_servers_timer(struct timer_list *timer)
struct afs_server *afs_get_server(struct afs_server *server,
enum afs_server_trace reason)
{
- unsigned int u = atomic_inc_return(&server->ref);
+ unsigned int a;
+ int r;
- trace_afs_server(server, u, atomic_read(&server->active), reason);
+ __refcount_inc(&server->ref, &r);
+ a = atomic_read(&server->active);
+ trace_afs_server(server->debug_id, r + 1, a, reason);
return server;
}
@@ -364,14 +367,14 @@ struct afs_server *afs_get_server(struct afs_server *server,
static struct afs_server *afs_maybe_use_server(struct afs_server *server,
enum afs_server_trace reason)
{
- unsigned int r = atomic_fetch_add_unless(&server->ref, 1, 0);
unsigned int a;
+ int r;
- if (r == 0)
+ if (!__refcount_inc_not_zero(&server->ref, &r))
return NULL;
a = atomic_inc_return(&server->active);
- trace_afs_server(server, r, a, reason);
+ trace_afs_server(server->debug_id, r + 1, a, reason);
return server;
}
@@ -380,10 +383,13 @@ static struct afs_server *afs_maybe_use_server(struct afs_server *server,
*/
struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason)
{
- unsigned int r = atomic_inc_return(&server->ref);
- unsigned int a = atomic_inc_return(&server->active);
+ unsigned int a;
+ int r;
- trace_afs_server(server, r, a, reason);
+ __refcount_inc(&server->ref, &r);
+ a = atomic_inc_return(&server->active);
+
+ trace_afs_server(server->debug_id, r + 1, a, reason);
return server;
}
@@ -393,14 +399,17 @@ struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_tra
void afs_put_server(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- unsigned int usage;
+ unsigned int a, debug_id = server->debug_id;
+ bool zero;
+ int r;
if (!server)
return;
- usage = atomic_dec_return(&server->ref);
- trace_afs_server(server, usage, atomic_read(&server->active), reason);
- if (unlikely(usage == 0))
+ a = atomic_inc_return(&server->active);
+ zero = __refcount_dec_and_test(&server->ref, &r);
+ trace_afs_server(debug_id, r - 1, a, reason);
+ if (unlikely(zero))
__afs_put_server(net, server);
}
@@ -436,7 +445,7 @@ static void afs_server_rcu(struct rcu_head *rcu)
{
struct afs_server *server = container_of(rcu, struct afs_server, rcu);
- trace_afs_server(server, atomic_read(&server->ref),
+ trace_afs_server(server->debug_id, refcount_read(&server->ref),
atomic_read(&server->active), afs_server_trace_free);
afs_put_addrlist(rcu_access_pointer(server->addresses));
kfree(server);
@@ -487,7 +496,7 @@ static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
active = atomic_read(&server->active);
if (active == 0) {
- trace_afs_server(server, atomic_read(&server->ref),
+ trace_afs_server(server->debug_id, refcount_read(&server->ref),
active, afs_server_trace_gc);
next = rcu_dereference_protected(
server->uuid_next, lockdep_is_held(&net->fs_lock.lock));
@@ -553,7 +562,7 @@ void afs_manage_servers(struct work_struct *work)
_debug("manage %pU %u", &server->uuid, active);
if (purging) {
- trace_afs_server(server, atomic_read(&server->ref),
+ trace_afs_server(server->debug_id, refcount_read(&server->ref),
active, afs_server_trace_purging);
if (active != 0)
pr_notice("Can't purge s=%08x\n", server->debug_id);
@@ -633,7 +642,8 @@ static noinline bool afs_update_server_record(struct afs_operation *op,
_enter("");
- trace_afs_server(server, atomic_read(&server->ref), atomic_read(&server->active),
+ trace_afs_server(server->debug_id, refcount_read(&server->ref),
+ atomic_read(&server->active),
afs_server_trace_update);
alist = afs_vl_lookup_addrs(op->volume->cell, op->key, &server->uuid);
diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c
index 38b2ba1d9ec0..acc48216136a 100644
--- a/fs/afs/vl_list.c
+++ b/fs/afs/vl_list.c
@@ -17,7 +17,7 @@ struct afs_vlserver *afs_alloc_vlserver(const char *name, size_t name_len,
vlserver = kzalloc(struct_size(vlserver, name, name_len + 1),
GFP_KERNEL);
if (vlserver) {
- atomic_set(&vlserver->usage, 1);
+ refcount_set(&vlserver->ref, 1);
rwlock_init(&vlserver->lock);
init_waitqueue_head(&vlserver->probe_wq);
spin_lock_init(&vlserver->probe_lock);
@@ -39,13 +39,9 @@ static void afs_vlserver_rcu(struct rcu_head *rcu)
void afs_put_vlserver(struct afs_net *net, struct afs_vlserver *vlserver)
{
- if (vlserver) {
- unsigned int u = atomic_dec_return(&vlserver->usage);
- //_debug("VL PUT %p{%u}", vlserver, u);
-
- if (u == 0)
- call_rcu(&vlserver->rcu, afs_vlserver_rcu);
- }
+ if (vlserver &&
+ refcount_dec_and_test(&vlserver->ref))
+ call_rcu(&vlserver->rcu, afs_vlserver_rcu);
}
struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers)
@@ -54,7 +50,7 @@ struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers)
vllist = kzalloc(struct_size(vllist, servers, nr_servers), GFP_KERNEL);
if (vllist) {
- atomic_set(&vllist->usage, 1);
+ refcount_set(&vllist->ref, 1);
rwlock_init(&vllist->lock);
}
@@ -64,10 +60,7 @@ struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers)
void afs_put_vlserverlist(struct afs_net *net, struct afs_vlserver_list *vllist)
{
if (vllist) {
- unsigned int u = atomic_dec_return(&vllist->usage);
-
- //_debug("VLLS PUT %p{%u}", vllist, u);
- if (u == 0) {
+ if (refcount_dec_and_test(&vllist->ref)) {
int i;
for (i = 0; i < vllist->nr_servers; i++) {
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index cc665cef0abe..f4937029dcd7 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -52,7 +52,7 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
struct afs_cell *cell = volume->cell;
if (!hlist_unhashed(&volume->proc_link)) {
- trace_afs_volume(volume->vid, atomic_read(&volume->usage),
+ trace_afs_volume(volume->vid, refcount_read(&cell->ref),
afs_volume_trace_remove);
write_seqlock(&cell->volume_lock);
hlist_del_rcu(&volume->proc_link);
@@ -87,7 +87,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
volume->type_force = params->force;
volume->name_len = vldb->name_len;
- atomic_set(&volume->usage, 1);
+ refcount_set(&volume->ref, 1);
INIT_HLIST_NODE(&volume->proc_link);
rwlock_init(&volume->servers_lock);
rwlock_init(&volume->cb_v_break_lock);
@@ -228,7 +228,7 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
afs_remove_volume_from_cell(volume);
afs_put_serverlist(net, rcu_access_pointer(volume->servers));
afs_put_cell(volume->cell, afs_cell_trace_put_vol);
- trace_afs_volume(volume->vid, atomic_read(&volume->usage),
+ trace_afs_volume(volume->vid, refcount_read(&volume->ref),
afs_volume_trace_free);
kfree_rcu(volume, rcu);
@@ -242,8 +242,10 @@ struct afs_volume *afs_get_volume(struct afs_volume *volume,
enum afs_volume_trace reason)
{
if (volume) {
- int u = atomic_inc_return(&volume->usage);
- trace_afs_volume(volume->vid, u, reason);
+ int r;
+
+ __refcount_inc(&volume->ref, &r);
+ trace_afs_volume(volume->vid, r + 1, reason);
}
return volume;
}
@@ -257,9 +259,12 @@ void afs_put_volume(struct afs_net *net, struct afs_volume *volume,
{
if (volume) {
afs_volid_t vid = volume->vid;
- int u = atomic_dec_return(&volume->usage);
- trace_afs_volume(vid, u, reason);
- if (u == 0)
+ bool zero;
+ int r;
+
+ zero = __refcount_dec_and_test(&volume->ref, &r);
+ trace_afs_volume(vid, r - 1, reason);
+ if (zero)
afs_destroy_volume(net, volume);
}
}
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 2c885b22de34..9ebdd36eaf2f 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -91,7 +91,7 @@ try_again:
goto flush_conflicting_write;
}
- *_page = &folio->page;
+ *_page = folio_file_page(folio, pos / PAGE_SIZE);
_leave(" = 0");
return 0;
diff --git a/fs/attr.c b/fs/attr.c
index b5b8835ddf15..1552a5f23d6b 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -183,6 +183,8 @@ EXPORT_SYMBOL(setattr_prepare);
*/
int inode_newsize_ok(const struct inode *inode, loff_t offset)
{
+ if (offset < 0)
+ return -EINVAL;
if (inode->i_size < offset) {
unsigned long limit;
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 918826eaceea..d5a44fa88acf 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -51,8 +51,6 @@ extern struct file_system_type autofs_fs_type;
*/
struct autofs_info {
struct dentry *dentry;
- struct inode *inode;
-
int flags;
struct completion expire_complete;
@@ -148,6 +146,11 @@ static inline int autofs_oz_mode(struct autofs_sb_info *sbi)
task_pgrp(current) == sbi->oz_pgrp);
}
+static inline bool autofs_empty(struct autofs_info *ino)
+{
+ return ino->count < 2;
+}
+
struct inode *autofs_get_inode(struct super_block *, umode_t);
void autofs_free_ino(struct autofs_info *);
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index b3fefd6237c3..038b3d2d9f57 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -371,7 +371,7 @@ static struct dentry *should_expire(struct dentry *dentry,
return NULL;
}
- if (simple_empty(dentry))
+ if (autofs_empty(ino))
return NULL;
/* Case 2: tree mount, expire iff entire tree is not busy */
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 9edf243713eb..affa70360b1f 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -20,6 +20,7 @@ struct autofs_info *autofs_new_ino(struct autofs_sb_info *sbi)
INIT_LIST_HEAD(&ino->expiring);
ino->last_used = jiffies;
ino->sbi = sbi;
+ ino->count = 1;
}
return ino;
}
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 91fe4548c256..ca03c1cae2be 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -10,6 +10,7 @@
#include "autofs_i.h"
+static int autofs_dir_permission(struct user_namespace *, struct inode *, int);
static int autofs_dir_symlink(struct user_namespace *, struct inode *,
struct dentry *, const char *);
static int autofs_dir_unlink(struct inode *, struct dentry *);
@@ -50,6 +51,7 @@ const struct file_operations autofs_dir_operations = {
const struct inode_operations autofs_dir_inode_operations = {
.lookup = autofs_lookup,
+ .permission = autofs_dir_permission,
.unlink = autofs_dir_unlink,
.symlink = autofs_dir_symlink,
.mkdir = autofs_dir_mkdir,
@@ -77,6 +79,7 @@ static int autofs_dir_open(struct inode *inode, struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs_dentry_ino(dentry);
pr_debug("file=%p dentry=%p %pd\n", file, dentry, dentry);
@@ -93,7 +96,7 @@ static int autofs_dir_open(struct inode *inode, struct file *file)
* it.
*/
spin_lock(&sbi->lookup_lock);
- if (!path_is_mountpoint(&file->f_path) && simple_empty(dentry)) {
+ if (!path_is_mountpoint(&file->f_path) && autofs_empty(ino)) {
spin_unlock(&sbi->lookup_lock);
return -ENOENT;
}
@@ -288,9 +291,26 @@ static struct dentry *autofs_mountpoint_changed(struct path *path)
struct dentry *dentry = path->dentry;
struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb);
- /*
- * If this is an indirect mount the dentry could have gone away
- * as a result of an expire and a new one created.
+ /* If this is an indirect mount the dentry could have gone away
+ * and a new one created.
+ *
+ * This is unusual and I can't remember the case for which it
+ * was originally added now. But an example of how this can
+ * happen is an autofs indirect mount that has the "browse"
+ * option set and also has the "symlink" option in the autofs
+ * map entry. In this case the daemon will remove the browse
+ * directory and create a symlink as the mount leaving the
+ * struct path stale.
+ *
+ * Another not so obvious case is when a mount in an autofs
+ * indirect mount that uses the "nobrowse" option is being
+ * expired at the same time as a path walk. If the mount has
+ * been umounted but the mount point directory seen before
+ * becoming unhashed (during a lockless path walk) when a stat
+ * family system call is made the mount won't be re-mounted as
+ * it should. In this case the mount point that's been removed
+ * (by the daemon) will be stale and the a new mount point
+ * dentry created.
*/
if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
struct dentry *parent = dentry->d_parent;
@@ -362,7 +382,7 @@ static struct vfsmount *autofs_d_automount(struct path *path)
* the mount never trigger mounts themselves (they have an
* autofs trigger mount mounted on them). But v4 pseudo direct
* mounts do need the leaves to trigger mounts. In this case
- * we have no choice but to use the list_empty() check and
+ * we have no choice but to use the autofs_empty() check and
* require user space behave.
*/
if (sbi->version > 4) {
@@ -371,7 +391,7 @@ static struct vfsmount *autofs_d_automount(struct path *path)
goto done;
}
} else {
- if (!simple_empty(dentry)) {
+ if (!autofs_empty(ino)) {
spin_unlock(&sbi->fs_lock);
goto done;
}
@@ -426,9 +446,8 @@ static int autofs_d_manage(const struct path *path, bool rcu_walk)
if (rcu_walk) {
/* We don't need fs_lock in rcu_walk mode,
- * just testing 'AUTOFS_INFO_NO_RCU' is enough.
- * simple_empty() takes a spinlock, so leave it
- * to last.
+ * just testing 'AUTOFS_INF_WANT_EXPIRE' is enough.
+ *
* We only return -EISDIR when certain this isn't
* a mount-trap.
*/
@@ -441,9 +460,7 @@ static int autofs_d_manage(const struct path *path, bool rcu_walk)
inode = d_inode_rcu(dentry);
if (inode && S_ISLNK(inode->i_mode))
return -EISDIR;
- if (list_empty(&dentry->d_subdirs))
- return 0;
- if (!simple_empty(dentry))
+ if (!autofs_empty(ino))
return -EISDIR;
return 0;
}
@@ -463,7 +480,7 @@ static int autofs_d_manage(const struct path *path, bool rcu_walk)
* we can avoid needless calls ->d_automount() and avoid
* an incorrect ELOOP error return.
*/
- if ((!path_is_mountpoint(path) && !simple_empty(dentry)) ||
+ if ((!path_is_mountpoint(path) && !autofs_empty(ino)) ||
(d_really_is_positive(dentry) && d_is_symlink(dentry)))
status = -EISDIR;
}
@@ -526,11 +543,30 @@ static struct dentry *autofs_lookup(struct inode *dir,
return NULL;
}
+static int autofs_dir_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
+{
+ if (mask & MAY_WRITE) {
+ struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb);
+
+ if (!autofs_oz_mode(sbi))
+ return -EACCES;
+
+ /* autofs_oz_mode() needs to allow path walks when the
+ * autofs mount is catatonic but the state of an autofs
+ * file system needs to be preserved over restarts.
+ */
+ if (sbi->flags & AUTOFS_SBI_CATATONIC)
+ return -EACCES;
+ }
+
+ return generic_permission(mnt_userns, inode, mask);
+}
+
static int autofs_dir_symlink(struct user_namespace *mnt_userns,
struct inode *dir, struct dentry *dentry,
const char *symname)
{
- struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
struct inode *inode;
@@ -539,16 +575,6 @@ static int autofs_dir_symlink(struct user_namespace *mnt_userns,
pr_debug("%s <- %pd\n", symname, dentry);
- if (!autofs_oz_mode(sbi))
- return -EACCES;
-
- /* autofs_oz_mode() needs to allow path walks when the
- * autofs mount is catatonic but the state of an autofs
- * file system needs to be preserved over restarts.
- */
- if (sbi->flags & AUTOFS_SBI_CATATONIC)
- return -EACCES;
-
BUG_ON(!ino);
autofs_clean_ino(ino);
@@ -571,7 +597,6 @@ static int autofs_dir_symlink(struct user_namespace *mnt_userns,
d_add(dentry, inode);
dget(dentry);
- ino->count++;
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
@@ -601,17 +626,6 @@ static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry)
struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
- if (!autofs_oz_mode(sbi))
- return -EACCES;
-
- /* autofs_oz_mode() needs to allow path walks when the
- * autofs mount is catatonic but the state of an autofs
- * file system needs to be preserved over restarts.
- */
- if (sbi->flags & AUTOFS_SBI_CATATONIC)
- return -EACCES;
-
- ino->count--;
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count--;
dput(ino->dentry);
@@ -683,16 +697,6 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
pr_debug("dentry %p, removing %pd\n", dentry, dentry);
- if (!autofs_oz_mode(sbi))
- return -EACCES;
-
- /* autofs_oz_mode() needs to allow path walks when the
- * autofs mount is catatonic but the state of an autofs
- * file system needs to be preserved over restarts.
- */
- if (sbi->flags & AUTOFS_SBI_CATATONIC)
- return -EACCES;
-
if (ino->count != 1)
return -ENOTEMPTY;
@@ -704,7 +708,6 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
if (sbi->version < 5)
autofs_clear_leaf_automount_flags(dentry);
- ino->count--;
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count--;
dput(ino->dentry);
@@ -726,16 +729,6 @@ static int autofs_dir_mkdir(struct user_namespace *mnt_userns,
struct autofs_info *p_ino;
struct inode *inode;
- if (!autofs_oz_mode(sbi))
- return -EACCES;
-
- /* autofs_oz_mode() needs to allow path walks when the
- * autofs mount is catatonic but the state of an autofs
- * file system needs to be preserved over restarts.
- */
- if (sbi->flags & AUTOFS_SBI_CATATONIC)
- return -EACCES;
-
pr_debug("dentry %p, creating %pd\n", dentry, dentry);
BUG_ON(!ino);
@@ -753,7 +746,6 @@ static int autofs_dir_mkdir(struct user_namespace *mnt_userns,
autofs_set_leaf_automount_flags(dentry);
dget(dentry);
- ino->count++;
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
inc_nlink(dir);
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index c3aecfb0a71d..e0375ba9d0fe 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -440,39 +440,26 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
btrfs_put_caching_control(caching_ctl);
}
-int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
+static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
+ struct btrfs_caching_control *caching_ctl)
+{
+ wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
+ return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
+}
+
+static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
{
struct btrfs_caching_control *caching_ctl;
- int ret = 0;
+ int ret;
caching_ctl = btrfs_get_caching_control(cache);
if (!caching_ctl)
return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
-
- wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
- if (cache->cached == BTRFS_CACHE_ERROR)
- ret = -EIO;
+ ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
btrfs_put_caching_control(caching_ctl);
return ret;
}
-static bool space_cache_v1_done(struct btrfs_block_group *cache)
-{
- bool ret;
-
- spin_lock(&cache->lock);
- ret = cache->cached != BTRFS_CACHE_FAST;
- spin_unlock(&cache->lock);
-
- return ret;
-}
-
-void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
- struct btrfs_caching_control *caching_ctl)
-{
- wait_event(caching_ctl->wait, space_cache_v1_done(cache));
-}
-
#ifdef CONFIG_BTRFS_DEBUG
static void fragment_free_space(struct btrfs_block_group *block_group)
{
@@ -750,9 +737,8 @@ done:
btrfs_put_block_group(block_group);
}
-int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
+int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
{
- DEFINE_WAIT(wait);
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_caching_control *caching_ctl = NULL;
int ret = 0;
@@ -785,10 +771,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
}
WARN_ON(cache->caching_ctl);
cache->caching_ctl = caching_ctl;
- if (btrfs_test_opt(fs_info, SPACE_CACHE))
- cache->cached = BTRFS_CACHE_FAST;
- else
- cache->cached = BTRFS_CACHE_STARTED;
+ cache->cached = BTRFS_CACHE_STARTED;
cache->has_caching_ctl = 1;
spin_unlock(&cache->lock);
@@ -801,8 +784,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
out:
- if (load_cache_only && caching_ctl)
- btrfs_wait_space_cache_v1_finished(cache, caching_ctl);
+ if (wait && caching_ctl)
+ ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
if (caching_ctl)
btrfs_put_caching_control(caching_ctl);
@@ -1640,9 +1623,11 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
div64_u64(zone_unusable * 100, bg->length));
trace_btrfs_reclaim_block_group(bg);
ret = btrfs_relocate_chunk(fs_info, bg->start);
- if (ret)
+ if (ret) {
+ btrfs_dec_block_group_ro(bg);
btrfs_err(fs_info, "error relocating chunk %llu",
bg->start);
+ }
next:
btrfs_put_block_group(bg);
@@ -3310,7 +3295,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
* space back to the block group, otherwise we will leak space.
*/
if (!alloc && !btrfs_block_group_done(cache))
- btrfs_cache_block_group(cache, 1);
+ btrfs_cache_block_group(cache, true);
byte_in_group = bytenr - cache->start;
WARN_ON(byte_in_group > cache->length);
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 35e0e860cc0b..6b3cdc4cbc41 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -263,9 +263,7 @@ void btrfs_dec_nocow_writers(struct btrfs_block_group *bg);
void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
u64 num_bytes);
-int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
-int btrfs_cache_block_group(struct btrfs_block_group *cache,
- int load_cache_only);
+int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
struct btrfs_caching_control *btrfs_get_caching_control(
struct btrfs_block_group *cache);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 6e556031a8f3..ebfa35fe1c38 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2075,6 +2075,9 @@ cow_done:
if (!p->skip_locking) {
level = btrfs_header_level(b);
+
+ btrfs_maybe_reset_lockdep_class(root, b);
+
if (level <= write_lock_level) {
btrfs_tree_lock(b);
p->locks[level] = BTRFS_WRITE_LOCK;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4db85b9dc7ed..9ef162dbd4bc 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -505,7 +505,6 @@ struct btrfs_free_cluster {
enum btrfs_caching_type {
BTRFS_CACHE_NO,
BTRFS_CACHE_STARTED,
- BTRFS_CACHE_FAST,
BTRFS_CACHE_FINISHED,
BTRFS_CACHE_ERROR,
};
@@ -1173,6 +1172,8 @@ enum {
BTRFS_ROOT_ORPHAN_CLEANUP,
/* This root has a drop operation that was started previously. */
BTRFS_ROOT_UNFINISHED_DROP,
+ /* This reloc root needs to have its buffers lockdep class reset. */
+ BTRFS_ROOT_RESET_LOCKDEP_CLASS,
};
static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index f43196a893ca..41cddd3ff059 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -165,7 +165,7 @@ no_valid_dev_replace_entry_found:
*/
if (btrfs_find_device(fs_info->fs_devices, &args)) {
btrfs_err(fs_info,
- "replace devid present without an active replace item");
+"replace without active item, run 'device scan --forget' on the target device");
ret = -EUCLEAN;
} else {
dev_replace->srcdev = NULL;
@@ -1129,8 +1129,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
up_write(&dev_replace->rwsem);
/* Scrub for replace must not be running in suspended state */
- ret = btrfs_scrub_cancel(fs_info);
- ASSERT(ret != -ENOTCONN);
+ btrfs_scrub_cancel(fs_info);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4c3166f3c725..820b1f1e6b67 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -87,88 +87,6 @@ struct async_submit_bio {
};
/*
- * Lockdep class keys for extent_buffer->lock's in this root. For a given
- * eb, the lockdep key is determined by the btrfs_root it belongs to and
- * the level the eb occupies in the tree.
- *
- * Different roots are used for different purposes and may nest inside each
- * other and they require separate keysets. As lockdep keys should be
- * static, assign keysets according to the purpose of the root as indicated
- * by btrfs_root->root_key.objectid. This ensures that all special purpose
- * roots have separate keysets.
- *
- * Lock-nesting across peer nodes is always done with the immediate parent
- * node locked thus preventing deadlock. As lockdep doesn't know this, use
- * subclass to avoid triggering lockdep warning in such cases.
- *
- * The key is set by the readpage_end_io_hook after the buffer has passed
- * csum validation but before the pages are unlocked. It is also set by
- * btrfs_init_new_buffer on freshly allocated blocks.
- *
- * We also add a check to make sure the highest level of the tree is the
- * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
- * needs update as well.
- */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# if BTRFS_MAX_LEVEL != 8
-# error
-# endif
-
-#define DEFINE_LEVEL(stem, level) \
- .names[level] = "btrfs-" stem "-0" #level,
-
-#define DEFINE_NAME(stem) \
- DEFINE_LEVEL(stem, 0) \
- DEFINE_LEVEL(stem, 1) \
- DEFINE_LEVEL(stem, 2) \
- DEFINE_LEVEL(stem, 3) \
- DEFINE_LEVEL(stem, 4) \
- DEFINE_LEVEL(stem, 5) \
- DEFINE_LEVEL(stem, 6) \
- DEFINE_LEVEL(stem, 7)
-
-static struct btrfs_lockdep_keyset {
- u64 id; /* root objectid */
- /* Longest entry: btrfs-free-space-00 */
- char names[BTRFS_MAX_LEVEL][20];
- struct lock_class_key keys[BTRFS_MAX_LEVEL];
-} btrfs_lockdep_keysets[] = {
- { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") },
- { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") },
- { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") },
- { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") },
- { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") },
- { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") },
- { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") },
- { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") },
- { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") },
- { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
- { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
- { .id = 0, DEFINE_NAME("tree") },
-};
-
-#undef DEFINE_LEVEL
-#undef DEFINE_NAME
-
-void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
- int level)
-{
- struct btrfs_lockdep_keyset *ks;
-
- BUG_ON(level >= ARRAY_SIZE(ks->keys));
-
- /* find the matching keyset, id 0 is the default entry */
- for (ks = btrfs_lockdep_keysets; ks->id; ks++)
- if (ks->id == objectid)
- break;
-
- lockdep_set_class_and_name(&eb->lock,
- &ks->keys[level], ks->names[level]);
-}
-
-#endif
-
-/*
* Compute the csum of a btree block and store the result to provided buffer.
*/
static void csum_tree_block(struct extent_buffer *buf, u8 *result)
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 8993b428e09c..47ad8e0a2d33 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -137,14 +137,4 @@ int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid);
int btrfs_init_root_free_objectid(struct btrfs_root *root);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void btrfs_set_buffer_lockdep_class(u64 objectid,
- struct extent_buffer *eb, int level);
-#else
-static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
- struct extent_buffer *eb, int level)
-{
-}
-#endif
-
#endif
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index ea3ec1e761e8..6914cd8024ba 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2551,17 +2551,10 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
return -EINVAL;
/*
- * pull in the free space cache (if any) so that our pin
- * removes the free space from the cache. We have load_only set
- * to one because the slow code to read in the free extents does check
- * the pinned extents.
+ * Fully cache the free space first so that our pin removes the free space
+ * from the cache.
*/
- btrfs_cache_block_group(cache, 1);
- /*
- * Make sure we wait until the cache is completely built in case it is
- * missing or is invalid and therefore needs to be rebuilt.
- */
- ret = btrfs_wait_block_group_cache_done(cache);
+ ret = btrfs_cache_block_group(cache, true);
if (ret)
goto out;
@@ -2584,12 +2577,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
if (!block_group)
return -EINVAL;
- btrfs_cache_block_group(block_group, 1);
- /*
- * Make sure we wait until the cache is completely built in case it is
- * missing or is invalid and therefore needs to be rebuilt.
- */
- ret = btrfs_wait_block_group_cache_done(block_group);
+ ret = btrfs_cache_block_group(block_group, true);
if (ret)
goto out;
@@ -4399,7 +4387,7 @@ have_block_group:
ffe_ctl->cached = btrfs_block_group_done(block_group);
if (unlikely(!ffe_ctl->cached)) {
ffe_ctl->have_caching_bg = true;
- ret = btrfs_cache_block_group(block_group, 0);
+ ret = btrfs_cache_block_group(block_group, false);
/*
* If we get ENOMEM here or something else we want to
@@ -4867,6 +4855,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *buf;
+ u64 lockdep_owner = owner;
buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level);
if (IS_ERR(buf))
@@ -4886,11 +4875,26 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
}
/*
+ * The reloc trees are just snapshots, so we need them to appear to be
+ * just like any other fs tree WRT lockdep.
+ *
+ * The exception however is in replace_path() in relocation, where we
+ * hold the lock on the original fs root and then search for the reloc
+ * root. At that point we need to make sure any reloc root buffers are
+ * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make
+ * lockdep happy.
+ */
+ if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID &&
+ !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
+ lockdep_owner = BTRFS_FS_TREE_OBJECTID;
+
+ /*
* This needs to stay, because we could allocate a freed block from an
* old tree into a new tree, so we need to make sure this new block is
* set to the appropriate level and owner.
*/
- btrfs_set_buffer_lockdep_class(owner, buf, level);
+ btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level);
+
__btrfs_tree_lock(buf, nest);
btrfs_clean_tree_block(buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
@@ -6153,13 +6157,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
if (end - start >= range->minlen) {
if (!btrfs_block_group_done(cache)) {
- ret = btrfs_cache_block_group(cache, 0);
- if (ret) {
- bg_failed++;
- bg_ret = ret;
- continue;
- }
- ret = btrfs_wait_block_group_cache_done(cache);
+ ret = btrfs_cache_block_group(cache, true);
if (ret) {
bg_failed++;
bg_ret = ret;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index bfae67c593c5..cf4f19e80e2f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3233,7 +3233,7 @@ static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
u32 bio_size = bio->bi_iter.bi_size;
u32 real_size;
const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
- bool contig;
+ bool contig = false;
int ret;
ASSERT(bio);
@@ -3242,10 +3242,35 @@ static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
if (bio_ctrl->compress_type != compress_type)
return 0;
- if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
+
+ if (bio->bi_iter.bi_size == 0) {
+ /* We can always add a page into an empty bio. */
+ contig = true;
+ } else if (bio_ctrl->compress_type == BTRFS_COMPRESS_NONE) {
+ struct bio_vec *bvec = bio_last_bvec_all(bio);
+
+ /*
+ * The contig check requires the following conditions to be met:
+ * 1) The pages are belonging to the same inode
+ * This is implied by the call chain.
+ *
+ * 2) The range has adjacent logical bytenr
+ *
+ * 3) The range has adjacent file offset
+ * This is required for the usage of btrfs_bio->file_offset.
+ */
+ if (bio_end_sector(bio) == sector &&
+ page_offset(bvec->bv_page) + bvec->bv_offset +
+ bvec->bv_len == page_offset(page) + pg_offset)
+ contig = true;
+ } else {
+ /*
+ * For compression, all IO should have its logical bytenr
+ * set to the starting bytenr of the compressed extent.
+ */
contig = bio->bi_iter.bi_sector == sector;
- else
- contig = bio_end_sector(bio) == sector;
+ }
+
if (!contig)
return 0;
@@ -6140,6 +6165,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
struct extent_buffer *exists = NULL;
struct page *p;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
+ u64 lockdep_owner = owner_root;
int uptodate = 1;
int ret;
@@ -6164,7 +6190,15 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
eb = __alloc_extent_buffer(fs_info, start, len);
if (!eb)
return ERR_PTR(-ENOMEM);
- btrfs_set_buffer_lockdep_class(owner_root, eb, level);
+
+ /*
+ * The reloc trees are just snapshots, so we need them to appear to be
+ * just like any other fs tree WRT lockdep.
+ */
+ if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
+ lockdep_owner = BTRFS_FS_TREE_OBJECTID;
+
+ btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++, index++) {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 66c822182ecc..5a3f6e0d9688 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2482,6 +2482,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_mark_buffer_dirty(leaf);
goto out;
}
@@ -2498,6 +2499,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_mark_buffer_dirty(leaf);
goto out;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f0c97d25b4a0..ad250892028d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7694,6 +7694,20 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
bool unlock_extents = false;
/*
+ * We could potentially fault if we have a buffer > PAGE_SIZE, and if
+ * we're NOWAIT we may submit a bio for a partial range and return
+ * EIOCBQUEUED, which would result in an errant short read.
+ *
+ * The best way to handle this would be to allow for partial completions
+ * of iocb's, so we could submit the partial bio, return and fault in
+ * the rest of the pages, and then submit the io for the rest of the
+ * range. However we don't have that currently, so simply return
+ * -EAGAIN at this point so that the normal path is used.
+ */
+ if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
+ return -EAGAIN;
+
+ /*
* Cap the size of reads to that usually seen in buffered I/O as we need
* to allocate a contiguous array for the checksums.
*/
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 33461b4f9c8b..9063072b399b 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -14,6 +14,93 @@
#include "locking.h"
/*
+ * Lockdep class keys for extent_buffer->lock's in this root. For a given
+ * eb, the lockdep key is determined by the btrfs_root it belongs to and
+ * the level the eb occupies in the tree.
+ *
+ * Different roots are used for different purposes and may nest inside each
+ * other and they require separate keysets. As lockdep keys should be
+ * static, assign keysets according to the purpose of the root as indicated
+ * by btrfs_root->root_key.objectid. This ensures that all special purpose
+ * roots have separate keysets.
+ *
+ * Lock-nesting across peer nodes is always done with the immediate parent
+ * node locked thus preventing deadlock. As lockdep doesn't know this, use
+ * subclass to avoid triggering lockdep warning in such cases.
+ *
+ * The key is set by the readpage_end_io_hook after the buffer has passed
+ * csum validation but before the pages are unlocked. It is also set by
+ * btrfs_init_new_buffer on freshly allocated blocks.
+ *
+ * We also add a check to make sure the highest level of the tree is the
+ * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
+ * needs update as well.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if BTRFS_MAX_LEVEL != 8
+#error
+#endif
+
+#define DEFINE_LEVEL(stem, level) \
+ .names[level] = "btrfs-" stem "-0" #level,
+
+#define DEFINE_NAME(stem) \
+ DEFINE_LEVEL(stem, 0) \
+ DEFINE_LEVEL(stem, 1) \
+ DEFINE_LEVEL(stem, 2) \
+ DEFINE_LEVEL(stem, 3) \
+ DEFINE_LEVEL(stem, 4) \
+ DEFINE_LEVEL(stem, 5) \
+ DEFINE_LEVEL(stem, 6) \
+ DEFINE_LEVEL(stem, 7)
+
+static struct btrfs_lockdep_keyset {
+ u64 id; /* root objectid */
+ /* Longest entry: btrfs-free-space-00 */
+ char names[BTRFS_MAX_LEVEL][20];
+ struct lock_class_key keys[BTRFS_MAX_LEVEL];
+} btrfs_lockdep_keysets[] = {
+ { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") },
+ { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") },
+ { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") },
+ { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") },
+ { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") },
+ { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") },
+ { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") },
+ { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") },
+ { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") },
+ { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
+ { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
+ { .id = 0, DEFINE_NAME("tree") },
+};
+
+#undef DEFINE_LEVEL
+#undef DEFINE_NAME
+
+void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level)
+{
+ struct btrfs_lockdep_keyset *ks;
+
+ BUG_ON(level >= ARRAY_SIZE(ks->keys));
+
+ /* Find the matching keyset, id 0 is the default entry */
+ for (ks = btrfs_lockdep_keysets; ks->id; ks++)
+ if (ks->id == objectid)
+ break;
+
+ lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]);
+}
+
+void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb)
+{
+ if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
+ btrfs_set_buffer_lockdep_class(root->root_key.objectid,
+ eb, btrfs_header_level(eb));
+}
+
+#endif
+
+/*
* Extent buffer locking
* =====================
*
@@ -164,6 +251,8 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
while (1) {
eb = btrfs_root_node(root);
+
+ btrfs_maybe_reset_lockdep_class(root, eb);
btrfs_tree_lock(eb);
if (eb == root->node)
break;
@@ -185,6 +274,8 @@ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
while (1) {
eb = btrfs_root_node(root);
+
+ btrfs_maybe_reset_lockdep_class(root, eb);
btrfs_tree_read_lock(eb);
if (eb == root->node)
break;
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index bbc45534ae9a..ab268be09bb5 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -131,4 +131,18 @@ void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level);
+void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb);
+#else
+static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
+ struct extent_buffer *eb, int level)
+{
+}
+static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root,
+ struct extent_buffer *eb)
+{
+}
+#endif
+
#endif
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index a6dc827e75af..45c02aba2492 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1326,7 +1326,9 @@ again:
btrfs_release_path(path);
path->lowest_level = level;
+ set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
+ clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
path->lowest_level = 0;
if (ret) {
if (ret > 0)
@@ -3573,7 +3575,12 @@ int prepare_to_relocate(struct reloc_control *rc)
*/
return PTR_ERR(trans);
}
- return btrfs_commit_transaction(trans);
+
+ ret = btrfs_commit_transaction(trans);
+ if (ret)
+ unset_reloc_control(rc);
+
+ return ret;
}
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index a64b26b16904..d647cb2938c0 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -349,9 +349,10 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
key.offset = ref_id;
again:
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
- if (ret < 0)
+ if (ret < 0) {
+ err = ret;
goto out;
- if (ret == 0) {
+ } else if (ret == 0) {
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_root_ref);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4c7089b1681b..f89beac3c665 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1816,6 +1816,8 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
error = -EBUSY;
} else {
snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
+ shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s", fs_type->name,
+ s->s_id);
btrfs_sb(s)->bdev_holder = fs_type;
if (!strstr(crc32c_impl(), "generic"))
set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 9e0e0ae2288c..43f905ab0a18 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1233,7 +1233,8 @@ static void extent_err(const struct extent_buffer *eb, int slot,
}
static int check_extent_item(struct extent_buffer *leaf,
- struct btrfs_key *key, int slot)
+ struct btrfs_key *key, int slot,
+ struct btrfs_key *prev_key)
{
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_extent_item *ei;
@@ -1453,6 +1454,26 @@ static int check_extent_item(struct extent_buffer *leaf,
total_refs, inline_refs);
return -EUCLEAN;
}
+
+ if ((prev_key->type == BTRFS_EXTENT_ITEM_KEY) ||
+ (prev_key->type == BTRFS_METADATA_ITEM_KEY)) {
+ u64 prev_end = prev_key->objectid;
+
+ if (prev_key->type == BTRFS_METADATA_ITEM_KEY)
+ prev_end += fs_info->nodesize;
+ else
+ prev_end += prev_key->offset;
+
+ if (unlikely(prev_end > key->objectid)) {
+ extent_err(leaf, slot,
+ "previous extent [%llu %u %llu] overlaps current extent [%llu %u %llu]",
+ prev_key->objectid, prev_key->type,
+ prev_key->offset, key->objectid, key->type,
+ key->offset);
+ return -EUCLEAN;
+ }
+ }
+
return 0;
}
@@ -1621,7 +1642,7 @@ static int check_leaf_item(struct extent_buffer *leaf,
break;
case BTRFS_EXTENT_ITEM_KEY:
case BTRFS_METADATA_ITEM_KEY:
- ret = check_extent_item(leaf, key, slot);
+ ret = check_extent_item(leaf, key, slot, prev_key);
break;
case BTRFS_TREE_BLOCK_REF_KEY:
case BTRFS_SHARED_DATA_REF_KEY:
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index dcf75a8daa20..9205c4a5ca81 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1146,7 +1146,9 @@ again:
extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
inode_objectid, parent_objectid, 0,
0);
- if (!IS_ERR_OR_NULL(extref)) {
+ if (IS_ERR(extref)) {
+ return PTR_ERR(extref);
+ } else if (extref) {
u32 item_size;
u32 cur_offset = 0;
unsigned long base;
@@ -1457,7 +1459,7 @@ static int add_link(struct btrfs_trans_handle *trans,
* on the inode will not free it. We will fixup the link count later.
*/
if (other_inode->i_nlink == 0)
- inc_nlink(other_inode);
+ set_nlink(other_inode, 1);
add_link:
ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
name, namelen, 0, ref_index);
@@ -1600,7 +1602,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
* free it. We will fixup the link count later.
*/
if (!ret && inode->i_nlink == 0)
- inc_nlink(inode);
+ set_nlink(inode, 1);
}
if (ret < 0)
goto out;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 272901514b0c..064ab2a79c80 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2345,8 +2345,11 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
&bdev, &disk_super);
- if (ret)
+ if (ret) {
+ btrfs_put_dev_args_from_path(args);
return ret;
+ }
+
args->devid = btrfs_stack_device_id(&disk_super->dev_item);
memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
if (btrfs_fs_incompat(fs_info, METADATA_UUID))
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 7421abcf325a..5bb8d8c86311 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -371,6 +371,9 @@ static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
const char *name, const void *buffer,
size_t size, int flags)
{
+ if (btrfs_root_readonly(BTRFS_I(inode)->root))
+ return -EROFS;
+
name = xattr_full_name(handler, name);
return btrfs_setxattr_trans(inode, name, buffer, size, flags);
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index d6e5916138e4..dcf701b05cc1 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -122,7 +122,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
* Reference snap context in folio->private. Also set
* PagePrivate so that we get invalidate_folio callback.
*/
- VM_BUG_ON_FOLIO(folio_test_private(folio), folio);
+ VM_WARN_ON_FOLIO(folio->private, folio);
folio_attach_private(folio, snapc);
return ceph_fscache_dirty_folio(mapping, folio);
@@ -237,7 +237,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
if (err >= 0 && err < subreq->len)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
- netfs_subreq_terminated(subreq, err, true);
+ netfs_subreq_terminated(subreq, err, false);
num_pages = calc_pages_for(osd_data->alignment, osd_data->length);
ceph_put_page_vector(osd_data->pages, num_pages, false);
@@ -313,8 +313,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
int err = 0;
u64 len = subreq->len;
- if (ci->i_inline_version != CEPH_INLINE_NONE &&
- ceph_netfs_issue_op_inline(subreq))
+ if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
return;
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len,
@@ -329,7 +328,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len);
- err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off);
+ err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
if (err < 0) {
dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
goto out;
@@ -338,6 +337,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
/* should always give us a page-aligned read */
WARN_ON_ONCE(page_off);
len = err;
+ err = 0;
osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
req->r_callback = finish_netfs_read;
@@ -345,9 +345,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
req->r_inode = inode;
ihold(inode);
- err = ceph_osdc_start_request(req->r_osdc, req, false);
- if (err)
- iput(inode);
+ ceph_osdc_start_request(req->r_osdc, req);
out:
ceph_osdc_put_request(req);
if (err)
@@ -621,9 +619,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len);
req->r_mtime = inode->i_mtime;
- err = ceph_osdc_start_request(osdc, req, true);
- if (!err)
- err = ceph_osdc_wait_request(osdc, req);
+ ceph_osdc_start_request(osdc, req);
+ err = ceph_osdc_wait_request(osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, len, err);
@@ -1151,8 +1148,7 @@ new_request:
}
req->r_mtime = inode->i_mtime;
- rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
- BUG_ON(rc);
+ ceph_osdc_start_request(&fsc->client->osdc, req);
req = NULL;
wbc->nr_to_write -= i;
@@ -1327,16 +1323,13 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
int r;
r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL);
- if (r == 0)
- folio_wait_fscache(folio);
- if (r < 0) {
- if (folio)
- folio_put(folio);
- } else {
- WARN_ON_ONCE(!folio_test_locked(folio));
- *pagep = &folio->page;
- }
- return r;
+ if (r < 0)
+ return r;
+
+ folio_wait_fscache(folio);
+ WARN_ON_ONCE(!folio_test_locked(folio));
+ *pagep = &folio->page;
+ return 0;
}
/*
@@ -1439,7 +1432,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
inode, off, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
- ci->i_inline_version == CEPH_INLINE_NONE) {
+ !ceph_has_inline_data(ci)) {
CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
ceph_add_rw_context(fi, &rw_ctx);
ret = filemap_fault(vmf);
@@ -1696,9 +1689,8 @@ int ceph_uninline_data(struct file *file)
}
req->r_mtime = inode->i_mtime;
- err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
- if (!err)
- err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ ceph_osdc_start_request(&fsc->client->osdc, req);
+ err = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_osdc_put_request(req);
if (err < 0)
goto out_unlock;
@@ -1739,9 +1731,8 @@ int ceph_uninline_data(struct file *file)
}
req->r_mtime = inode->i_mtime;
- err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
- if (!err)
- err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ ceph_osdc_start_request(&fsc->client->osdc, req);
+ err = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, len, err);
@@ -1912,15 +1903,13 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
0, false, true);
- err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
+ ceph_osdc_start_request(&fsc->client->osdc, rd_req);
wr_req->r_mtime = ci->netfs.inode.i_mtime;
- err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
+ ceph_osdc_start_request(&fsc->client->osdc, wr_req);
- if (!err)
- err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
- if (!err2)
- err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
+ err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
+ err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
if (err >= 0 || err == -ENOENT)
have |= POOL_READ;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index ac8fd5e7f540..53cfe026b3ea 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -602,8 +602,8 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
* @ci: inode to be moved
* @session: new auth caps session
*/
-static void change_auth_cap_ses(struct ceph_inode_info *ci,
- struct ceph_mds_session *session)
+void change_auth_cap_ses(struct ceph_inode_info *ci,
+ struct ceph_mds_session *session)
{
lockdep_assert_held(&ci->i_ceph_lock);
@@ -1978,14 +1978,15 @@ retry:
}
dout("check_caps %llx.%llx file_want %s used %s dirty %s flushing %s"
- " issued %s revoking %s retain %s %s%s\n", ceph_vinop(inode),
+ " issued %s revoking %s retain %s %s%s%s\n", ceph_vinop(inode),
ceph_cap_string(file_wanted),
ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
ceph_cap_string(ci->i_flushing_caps),
ceph_cap_string(issued), ceph_cap_string(revoking),
ceph_cap_string(retain),
(flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
- (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
+ (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "",
+ (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "");
/*
* If we no longer need to hold onto old our caps, and we may
@@ -3005,7 +3006,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
}
if (S_ISREG(ci->netfs.inode.i_mode) &&
- ci->i_inline_version != CEPH_INLINE_NONE &&
+ ceph_has_inline_data(ci) &&
(_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
i_size_read(inode) > 0) {
struct page *page =
@@ -3578,24 +3579,23 @@ static void handle_cap_grant(struct inode *inode,
fill_inline = true;
}
- if (ci->i_auth_cap == cap &&
- le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
- if (newcaps & ~extra_info->issued)
- wake = true;
+ if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
+ if (ci->i_auth_cap == cap) {
+ if (newcaps & ~extra_info->issued)
+ wake = true;
- if (ci->i_requested_max_size > max_size ||
- !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) {
- /* re-request max_size if necessary */
- ci->i_requested_max_size = 0;
- wake = true;
- }
+ if (ci->i_requested_max_size > max_size ||
+ !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) {
+ /* re-request max_size if necessary */
+ ci->i_requested_max_size = 0;
+ wake = true;
+ }
- ceph_kick_flushing_inode_caps(session, ci);
- spin_unlock(&ci->i_ceph_lock);
+ ceph_kick_flushing_inode_caps(session, ci);
+ }
up_read(&session->s_mdsc->snap_rwsem);
- } else {
- spin_unlock(&ci->i_ceph_lock);
}
+ spin_unlock(&ci->i_ceph_lock);
if (fill_inline)
ceph_fill_inline_data(inode, NULL, extra_info->inline_data,
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index eae417d71136..e7e2ebac330d 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -856,6 +856,10 @@ static int ceph_mknod(struct user_namespace *mnt_userns, struct inode *dir,
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
+ err = ceph_wait_on_conflict_unlink(dentry);
+ if (err)
+ return err;
+
if (ceph_quota_is_max_files_exceeded(dir)) {
err = -EDQUOT;
goto out;
@@ -918,6 +922,10 @@ static int ceph_symlink(struct user_namespace *mnt_userns, struct inode *dir,
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
+ err = ceph_wait_on_conflict_unlink(dentry);
+ if (err)
+ return err;
+
if (ceph_quota_is_max_files_exceeded(dir)) {
err = -EDQUOT;
goto out;
@@ -968,9 +976,13 @@ static int ceph_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
- int err = -EROFS;
+ int err;
int op;
+ err = ceph_wait_on_conflict_unlink(dentry);
+ if (err)
+ return err;
+
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* mkdir .snap/foo is a MKSNAP */
op = CEPH_MDS_OP_MKSNAP;
@@ -980,6 +992,7 @@ static int ceph_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
op = CEPH_MDS_OP_MKDIR;
} else {
+ err = -EROFS;
goto out;
}
@@ -1037,6 +1050,10 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
struct ceph_mds_request *req;
int err;
+ err = ceph_wait_on_conflict_unlink(dentry);
+ if (err)
+ return err;
+
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
@@ -1071,9 +1088,27 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
+ struct dentry *dentry = req->r_dentry;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
int result = req->r_err ? req->r_err :
le32_to_cpu(req->r_reply_info.head->result);
+ if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
+ pr_warn("%s dentry %p:%pd async unlink bit is not set\n",
+ __func__, dentry, dentry);
+
+ spin_lock(&fsc->async_unlink_conflict_lock);
+ hash_del_rcu(&di->hnode);
+ spin_unlock(&fsc->async_unlink_conflict_lock);
+
+ spin_lock(&dentry->d_lock);
+ di->flags &= ~CEPH_DENTRY_ASYNC_UNLINK;
+ wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT);
+ spin_unlock(&dentry->d_lock);
+
+ synchronize_rcu();
+
if (result == -EJUKEBOX)
goto out;
@@ -1081,7 +1116,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
if (result) {
int pathlen = 0;
u64 base = 0;
- char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
+ char *path = ceph_mdsc_build_path(dentry, &pathlen,
&base, 0);
/* mark error on parent + clear complete */
@@ -1089,13 +1124,13 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
ceph_dir_clear_complete(req->r_parent);
/* drop the dentry -- we don't know its status */
- if (!d_unhashed(req->r_dentry))
- d_drop(req->r_dentry);
+ if (!d_unhashed(dentry))
+ d_drop(dentry);
/* mark inode itself for an error (since metadata is bogus) */
mapping_set_error(req->r_old_inode->i_mapping, result);
- pr_warn("ceph: async unlink failure path=(%llx)%s result=%d!\n",
+ pr_warn("async unlink failure path=(%llx)%s result=%d!\n",
base, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path(path, pathlen);
}
@@ -1180,6 +1215,8 @@ retry:
if (try_async && op == CEPH_MDS_OP_UNLINK &&
(req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) {
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+
dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir),
dentry->d_name.len, dentry->d_name.name,
ceph_cap_string(req->r_dir_caps));
@@ -1187,6 +1224,16 @@ retry:
req->r_callback = ceph_async_unlink_cb;
req->r_old_inode = d_inode(dentry);
ihold(req->r_old_inode);
+
+ spin_lock(&dentry->d_lock);
+ di->flags |= CEPH_DENTRY_ASYNC_UNLINK;
+ spin_unlock(&dentry->d_lock);
+
+ spin_lock(&fsc->async_unlink_conflict_lock);
+ hash_add_rcu(fsc->async_unlink_conflict, &di->hnode,
+ dentry->d_name.hash);
+ spin_unlock(&fsc->async_unlink_conflict_lock);
+
err = ceph_mdsc_submit_request(mdsc, dir, req);
if (!err) {
/*
@@ -1195,10 +1242,20 @@ retry:
*/
drop_nlink(inode);
d_delete(dentry);
- } else if (err == -EJUKEBOX) {
- try_async = false;
- ceph_mdsc_put_request(req);
- goto retry;
+ } else {
+ spin_lock(&fsc->async_unlink_conflict_lock);
+ hash_del_rcu(&di->hnode);
+ spin_unlock(&fsc->async_unlink_conflict_lock);
+
+ spin_lock(&dentry->d_lock);
+ di->flags &= ~CEPH_DENTRY_ASYNC_UNLINK;
+ spin_unlock(&dentry->d_lock);
+
+ if (err == -EJUKEBOX) {
+ try_async = false;
+ ceph_mdsc_put_request(req);
+ goto retry;
+ }
}
} else {
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
@@ -1237,6 +1294,10 @@ static int ceph_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
(!ceph_quota_is_same_realm(old_dir, new_dir)))
return -EXDEV;
+ err = ceph_wait_on_conflict_unlink(new_dentry);
+ if (err)
+ return err;
+
dout("rename dir %p dentry %p to dir %p dentry %p\n",
old_dir, old_dentry, new_dir, new_dentry);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index da59e836a06e..04fd34557de8 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -95,12 +95,11 @@ static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
size_t start;
int idx = 0;
- bytes = iov_iter_get_pages(iter, pages, maxsize - size,
+ bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
ITER_GET_BVECS_PAGES, &start);
if (bytes < 0)
return size ?: bytes;
- iov_iter_advance(iter, bytes);
size += bytes;
for ( ; bytes; idx++, bvec_idx++) {
@@ -241,8 +240,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
INIT_LIST_HEAD(&fi->rw_contexts);
fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
- if ((file->f_mode & FMODE_WRITE) &&
- ci->i_inline_version != CEPH_INLINE_NONE) {
+ if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
ret = ceph_uninline_data(file);
if (ret < 0)
goto error;
@@ -569,7 +567,7 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
&base, 0);
- pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
+ pr_warn("async create failure path=(%llx)%s result=%d!\n",
base, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path(path, pathlen);
@@ -612,6 +610,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
struct ceph_mds_reply_inode in = { };
struct ceph_mds_reply_info_in iinfo = { .in = &in };
struct ceph_inode_info *ci = ceph_inode(dir);
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
struct inode *inode;
struct timespec64 now;
struct ceph_string *pool_ns;
@@ -657,10 +656,6 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
/* Directories always inherit the setgid bit. */
if (S_ISDIR(mode))
mode |= S_ISGID;
- else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
- !in_group_p(dir->i_gid) &&
- !capable_wrt_inode_uidgid(&init_user_ns, dir, CAP_FSETID))
- mode &= ~S_ISGID;
} else {
in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
}
@@ -714,6 +709,12 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
file->f_mode |= FMODE_CREATED;
ret = finish_open(file, dentry, ceph_open);
}
+
+ spin_lock(&dentry->d_lock);
+ di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
+ wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
+ spin_unlock(&dentry->d_lock);
+
return ret;
}
@@ -740,6 +741,15 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
if (dentry->d_name.len > NAME_MAX)
return -ENAMETOOLONG;
+ err = ceph_wait_on_conflict_unlink(dentry);
+ if (err)
+ return err;
+ /*
+ * Do not truncate the file, since atomic_open is called before the
+ * permission check. The caller will do the truncation afterward.
+ */
+ flags &= ~O_TRUNC;
+
if (flags & O_CREAT) {
if (ceph_quota_is_max_files_exceeded(dir))
return -EDQUOT;
@@ -786,9 +796,16 @@ retry:
(req->r_dir_caps =
try_prep_async_create(dir, dentry, &lo,
&req->r_deleg_ino))) {
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+
set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
req->r_callback = ceph_async_create_cb;
+
+ spin_lock(&dentry->d_lock);
+ di->flags |= CEPH_DENTRY_ASYNC_CREATE;
+ spin_unlock(&dentry->d_lock);
+
err = ceph_mdsc_submit_request(mdsc, dir, req);
if (!err) {
err = ceph_finish_async_create(dir, dentry,
@@ -807,9 +824,7 @@ retry:
}
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
- err = ceph_mdsc_do_request(mdsc,
- (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
- req);
+ err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
if (err == -ENOENT) {
dentry = ceph_handle_snapdir(req, dentry);
if (IS_ERR(dentry)) {
@@ -965,9 +980,8 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
false, false);
- ret = ceph_osdc_start_request(osdc, req, false);
- if (!ret)
- ret = ceph_osdc_wait_request(osdc, req);
+ ceph_osdc_start_request(osdc, req);
+ ret = ceph_osdc_wait_request(osdc, req);
ceph_update_read_metrics(&fsc->mdsc->metric,
req->r_start_latency,
@@ -1230,7 +1244,7 @@ static void ceph_aio_retry_work(struct work_struct *work)
req->r_inode = inode;
req->r_priv = aio_req;
- ret = ceph_osdc_start_request(req->r_osdc, req, false);
+ ceph_osdc_start_request(req->r_osdc, req);
out:
if (ret < 0) {
req->r_result = ret;
@@ -1262,7 +1276,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
loff_t pos = iocb->ki_pos;
bool write = iov_iter_rw(iter) == WRITE;
- bool should_dirty = !write && iter_is_iovec(iter);
+ bool should_dirty = !write && user_backed_iter(iter);
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
@@ -1367,9 +1381,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
continue;
}
- ret = ceph_osdc_start_request(req->r_osdc, req, false);
- if (!ret)
- ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ ceph_osdc_start_request(req->r_osdc, req);
+ ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
if (write)
ceph_update_write_metrics(metric, req->r_start_latency,
@@ -1432,8 +1445,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
r_private_item);
list_del_init(&req->r_private_item);
if (ret >= 0)
- ret = ceph_osdc_start_request(req->r_osdc,
- req, false);
+ ceph_osdc_start_request(req->r_osdc, req);
if (ret < 0) {
req->r_result = ret;
ceph_aio_complete_req(req);
@@ -1546,9 +1558,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
false, true);
req->r_mtime = mtime;
- ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
- if (!ret)
- ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ ceph_osdc_start_request(&fsc->client->osdc, req);
+ ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, len, ret);
@@ -1632,7 +1643,7 @@ again:
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
- if (ci->i_inline_version == CEPH_INLINE_NONE) {
+ if (!ceph_has_inline_data(ci)) {
if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
ret = ceph_direct_read_write(iocb, to,
NULL, NULL);
@@ -1895,7 +1906,7 @@ retry_snap:
if (dirty)
__mark_inode_dirty(inode, dirty);
if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
- ceph_check_caps(ci, 0, NULL);
+ ceph_check_caps(ci, CHECK_CAPS_FLUSH, NULL);
}
dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
@@ -1935,57 +1946,15 @@ out_unlocked:
*/
static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
{
- struct inode *inode = file->f_mapping->host;
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- loff_t i_size;
- loff_t ret;
-
- inode_lock(inode);
-
if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
+ struct inode *inode = file_inode(file);
+ int ret;
+
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
if (ret < 0)
- goto out;
- }
-
- i_size = i_size_read(inode);
- switch (whence) {
- case SEEK_END:
- offset += i_size;
- break;
- case SEEK_CUR:
- /*
- * Here we special-case the lseek(fd, 0, SEEK_CUR)
- * position-querying operation. Avoid rewriting the "same"
- * f_pos value back to the file because a concurrent read(),
- * write() or lseek() might have altered it
- */
- if (offset == 0) {
- ret = file->f_pos;
- goto out;
- }
- offset += file->f_pos;
- break;
- case SEEK_DATA:
- if (offset < 0 || offset >= i_size) {
- ret = -ENXIO;
- goto out;
- }
- break;
- case SEEK_HOLE:
- if (offset < 0 || offset >= i_size) {
- ret = -ENXIO;
- goto out;
- }
- offset = i_size;
- break;
+ return ret;
}
-
- ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
-
-out:
- inode_unlock(inode);
- return ret;
+ return generic_file_llseek(file, offset, whence);
}
static inline void ceph_zero_partial_page(
@@ -2054,12 +2023,10 @@ static int ceph_zero_partial_object(struct inode *inode,
}
req->r_mtime = inode->i_mtime;
- ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
- if (!ret) {
- ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
- if (ret == -ENOENT)
- ret = 0;
- }
+ ceph_osdc_start_request(&fsc->client->osdc, req);
+ ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ if (ret == -ENOENT)
+ ret = 0;
ceph_osdc_put_request(req);
out:
@@ -2361,7 +2328,7 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
if (IS_ERR(req))
ret = PTR_ERR(req);
else {
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
req->r_start_latency,
@@ -2554,7 +2521,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
/* Let the MDS know about dst file size change */
if (ceph_inode_set_size(dst_inode, dst_off) ||
ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
- ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
+ ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH,
+ NULL);
}
/* Mark Fw dirty */
spin_lock(&dst_ci->i_ceph_lock);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 56c53ab3618e..42351d7a0dd6 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1049,7 +1049,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
iinfo->inline_version >= ci->i_inline_version) {
int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
ci->i_inline_version = iinfo->inline_version;
- if (ci->i_inline_version != CEPH_INLINE_NONE &&
+ if (ceph_has_inline_data(ci) &&
(locked_page || (info_caps & cache_caps)))
fill_inline = true;
}
@@ -2275,9 +2275,15 @@ int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
*
* This cost much when doing the Locker state transition and
* usually will need to revoke caps from clients.
+ *
+ * And for the 'Xs' caps for getxattr we will also choose the
+ * auth MDS, because the MDS side code is buggy due to setxattr
+ * won't notify the replica MDSes when the values changed and
+ * the replica MDS will return the old values. Though we will
+ * fix it in MDS code, but this still makes sense for old ceph.
*/
if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
- || (mask & CEPH_STAT_RSTAT))
+ || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
return USE_AUTH_MDS;
else
return USE_ANY_MDS;
@@ -2321,7 +2327,8 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
if (inline_version == 0) {
/* the reply is supposed to contain inline data */
err = -EINVAL;
- } else if (inline_version == CEPH_INLINE_NONE) {
+ } else if (inline_version == CEPH_INLINE_NONE ||
+ inline_version == 1) {
err = -ENODATA;
} else {
err = req->r_reply_info.targeti.inline_len;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 33f517d549ce..80f8b9ec1a31 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -456,7 +456,7 @@ static int ceph_parse_deleg_inos(void **p, void *end,
dout("added delegated inode 0x%llx\n",
start - 1);
} else if (err == -EBUSY) {
- pr_warn("ceph: MDS delegated inode 0x%llx more than once.\n",
+ pr_warn("MDS delegated inode 0x%llx more than once.\n",
start - 1);
} else {
return err;
@@ -655,6 +655,79 @@ static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
}
+/*
+ * In async unlink case the kclient won't wait for the first reply
+ * from MDS and just drop all the links and unhash the dentry and then
+ * succeeds immediately.
+ *
+ * For any new create/link/rename,etc requests followed by using the
+ * same file names we must wait for the first reply of the inflight
+ * unlink request, or the MDS possibly will fail these following
+ * requests with -EEXIST if the inflight async unlink request was
+ * delayed for some reasons.
+ *
+ * And the worst case is that for the none async openc request it will
+ * successfully open the file if the CDentry hasn't been unlinked yet,
+ * but later the previous delayed async unlink request will remove the
+ * CDenty. That means the just created file is possiblly deleted later
+ * by accident.
+ *
+ * We need to wait for the inflight async unlink requests to finish
+ * when creating new files/directories by using the same file names.
+ */
+int ceph_wait_on_conflict_unlink(struct dentry *dentry)
+{
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+ struct dentry *pdentry = dentry->d_parent;
+ struct dentry *udentry, *found = NULL;
+ struct ceph_dentry_info *di;
+ struct qstr dname;
+ u32 hash = dentry->d_name.hash;
+ int err;
+
+ dname.name = dentry->d_name.name;
+ dname.len = dentry->d_name.len;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(fsc->async_unlink_conflict, di,
+ hnode, hash) {
+ udentry = di->dentry;
+
+ spin_lock(&udentry->d_lock);
+ if (udentry->d_name.hash != hash)
+ goto next;
+ if (unlikely(udentry->d_parent != pdentry))
+ goto next;
+ if (!hash_hashed(&di->hnode))
+ goto next;
+
+ if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
+ pr_warn("%s dentry %p:%pd async unlink bit is not set\n",
+ __func__, dentry, dentry);
+
+ if (!d_same_name(udentry, pdentry, &dname))
+ goto next;
+
+ spin_unlock(&udentry->d_lock);
+ found = dget(udentry);
+ break;
+next:
+ spin_unlock(&udentry->d_lock);
+ }
+ rcu_read_unlock();
+
+ if (likely(!found))
+ return 0;
+
+ dout("%s dentry %p:%pd conflict with old %p:%pd\n", __func__,
+ dentry, dentry, found, found);
+
+ err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT,
+ TASK_KILLABLE);
+ dput(found);
+ return err;
+}
+
/*
* sessions
@@ -1220,14 +1293,17 @@ static int encode_supported_features(void **p, void *end)
if (count > 0) {
size_t i;
size_t size = FEATURE_BYTES(count);
+ unsigned long bit;
if (WARN_ON_ONCE(*p + 4 + size > end))
return -ERANGE;
ceph_encode_32(p, size);
memset(*p, 0, size);
- for (i = 0; i < count; i++)
- ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
+ for (i = 0; i < count; i++) {
+ bit = feature_bits[i];
+ ((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8);
+ }
*p += size;
} else {
if (WARN_ON_ONCE(*p + 4 > end))
@@ -2884,6 +2960,64 @@ static void __do_request(struct ceph_mds_client *mdsc,
if (req->r_request_started == 0) /* note request start time */
req->r_request_started = jiffies;
+ /*
+ * For async create we will choose the auth MDS of frag in parent
+ * directory to send the request and ususally this works fine, but
+ * if the migrated the dirtory to another MDS before it could handle
+ * it the request will be forwarded.
+ *
+ * And then the auth cap will be changed.
+ */
+ if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && req->r_num_fwd) {
+ struct ceph_dentry_info *di = ceph_dentry(req->r_dentry);
+ struct ceph_inode_info *ci;
+ struct ceph_cap *cap;
+
+ /*
+ * The request maybe handled very fast and the new inode
+ * hasn't been linked to the dentry yet. We need to wait
+ * for the ceph_finish_async_create(), which shouldn't be
+ * stuck too long or fail in thoery, to finish when forwarding
+ * the request.
+ */
+ if (!d_inode(req->r_dentry)) {
+ err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT,
+ TASK_KILLABLE);
+ if (err) {
+ mutex_lock(&req->r_fill_mutex);
+ set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
+ mutex_unlock(&req->r_fill_mutex);
+ goto out_session;
+ }
+ }
+
+ ci = ceph_inode(d_inode(req->r_dentry));
+
+ spin_lock(&ci->i_ceph_lock);
+ cap = ci->i_auth_cap;
+ if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) {
+ dout("do_request session changed for auth cap %d -> %d\n",
+ cap->session->s_mds, session->s_mds);
+
+ /* Remove the auth cap from old session */
+ spin_lock(&cap->session->s_cap_lock);
+ cap->session->s_nr_caps--;
+ list_del_init(&cap->session_caps);
+ spin_unlock(&cap->session->s_cap_lock);
+
+ /* Add the auth cap to the new session */
+ cap->mds = mds;
+ cap->session = session;
+ spin_lock(&session->s_cap_lock);
+ session->s_nr_caps++;
+ list_add_tail(&cap->session_caps, &session->s_caps);
+ spin_unlock(&session->s_cap_lock);
+
+ change_auth_cap_ses(ci, session);
+ }
+ spin_unlock(&ci->i_ceph_lock);
+ }
+
err = __send_request(session, req, false);
out_session:
@@ -3464,11 +3598,26 @@ static void handle_session(struct ceph_mds_session *session,
case CEPH_SESSION_OPEN:
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
pr_info("mds%d reconnect success\n", session->s_mds);
- session->s_state = CEPH_MDS_SESSION_OPEN;
- session->s_features = features;
- renewed_caps(mdsc, session, 0);
- if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features))
- metric_schedule_delayed(&mdsc->metric);
+
+ if (session->s_state == CEPH_MDS_SESSION_OPEN) {
+ pr_notice("mds%d is already opened\n", session->s_mds);
+ } else {
+ session->s_state = CEPH_MDS_SESSION_OPEN;
+ session->s_features = features;
+ renewed_caps(mdsc, session, 0);
+ if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT,
+ &session->s_features))
+ metric_schedule_delayed(&mdsc->metric);
+ }
+
+ /*
+ * The connection maybe broken and the session in client
+ * side has been reinitialized, need to update the seq
+ * anyway.
+ */
+ if (!session->s_seq && seq)
+ session->s_seq = seq;
+
wake = 1;
if (mdsc->stopping)
__close_session(mdsc, session);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 1140aecd82ce..256e3eada6c1 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -29,14 +29,12 @@ enum ceph_feature_type {
CEPHFS_FEATURE_MULTI_RECONNECT,
CEPHFS_FEATURE_DELEG_INO,
CEPHFS_FEATURE_METRIC_COLLECT,
+ CEPHFS_FEATURE_ALTERNATE_NAME,
+ CEPHFS_FEATURE_NOTIFY_SESSION_STATE,
- CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_METRIC_COLLECT,
+ CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_NOTIFY_SESSION_STATE,
};
-/*
- * This will always have the highest feature bit value
- * as the last element of the array.
- */
#define CEPHFS_FEATURES_CLIENT_SUPPORTED { \
0, 1, 2, 3, 4, 5, 6, 7, \
CEPHFS_FEATURE_MIMIC, \
@@ -45,10 +43,8 @@ enum ceph_feature_type {
CEPHFS_FEATURE_MULTI_RECONNECT, \
CEPHFS_FEATURE_DELEG_INO, \
CEPHFS_FEATURE_METRIC_COLLECT, \
- \
- CEPHFS_FEATURE_MAX, \
+ CEPHFS_FEATURE_NOTIFY_SESSION_STATE, \
}
-#define CEPHFS_FEATURES_CLIENT_REQUIRED {}
/*
* Some lock dependencies:
@@ -582,6 +578,7 @@ static inline int ceph_wait_on_async_create(struct inode *inode)
TASK_KILLABLE);
}
+extern int ceph_wait_on_conflict_unlink(struct dentry *dentry);
extern u64 ceph_get_deleg_ino(struct ceph_mds_session *session);
extern int ceph_restore_deleg_ino(struct ceph_mds_session *session, u64 ino);
#endif
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 30387733765d..8d0a6d2c2da4 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -352,12 +352,10 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
__decode_and_drop_type(p, end, u8, bad_ext);
}
if (mdsmap_ev >= 8) {
- u32 name_len;
/* enabled */
ceph_decode_8_safe(p, end, m->m_enabled, bad_ext);
- ceph_decode_32_safe(p, end, name_len, bad_ext);
- ceph_decode_need(p, end, name_len, bad_ext);
- *p += name_len;
+ /* fs_name */
+ ceph_decode_skip_string(p, end, bad_ext);
}
/* damaged */
if (mdsmap_ev >= 9) {
@@ -370,6 +368,22 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
} else {
m->m_damaged = false;
}
+ if (mdsmap_ev >= 17) {
+ /* balancer */
+ ceph_decode_skip_string(p, end, bad_ext);
+ /* standby_count_wanted */
+ ceph_decode_skip_32(p, end, bad_ext);
+ /* old_max_mds */
+ ceph_decode_skip_32(p, end, bad_ext);
+ /* min_compat_client */
+ ceph_decode_skip_8(p, end, bad_ext);
+ /* required_client_features */
+ ceph_decode_skip_set(p, end, 64, bad_ext);
+ ceph_decode_64_safe(p, end, m->m_max_xattr_size, bad_ext);
+ } else {
+ /* This forces the usage of the (sync) SETXATTR Op */
+ m->m_max_xattr_size = 0;
+ }
bad_ext:
dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
!!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 40140805bdcf..3fc48b43cab0 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -72,15 +72,9 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
/*
- * express utilization in terms of large blocks to avoid
+ * Express utilization in terms of large blocks to avoid
* overflow on 32-bit machines.
- *
- * NOTE: for the time being, we make bsize == frsize to humor
- * not-yet-ancient versions of glibc that are broken.
- * Someday, we will probably want to report a real block
- * size... whatever that may mean for a network file system!
*/
- buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
/*
@@ -95,6 +89,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
}
+ /*
+ * NOTE: for the time being, we make bsize == frsize to humor
+ * not-yet-ancient versions of glibc that are broken.
+ * Someday, we will probably want to report a real block
+ * size... whatever that may mean for a network file system!
+ */
+ buf->f_bsize = buf->f_frsize;
+
buf->f_files = le64_to_cpu(st.num_objects);
buf->f_ffree = -1;
buf->f_namelen = NAME_MAX;
@@ -816,6 +818,9 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
if (!fsc->cap_wq)
goto fail_inode_wq;
+ hash_init(fsc->async_unlink_conflict);
+ spin_lock_init(&fsc->async_unlink_conflict_lock);
+
spin_lock(&ceph_fsc_lock);
list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list);
spin_unlock(&ceph_fsc_lock);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index f59dac66955b..40630e6f691c 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -19,6 +19,7 @@
#include <linux/security.h>
#include <linux/netfs.h>
#include <linux/fscache.h>
+#include <linux/hashtable.h>
#include <linux/ceph/libceph.h>
@@ -99,6 +100,8 @@ struct ceph_mount_options {
char *mon_addr;
};
+#define CEPH_ASYNC_CREATE_CONFLICT_BITS 8
+
struct ceph_fs_client {
struct super_block *sb;
@@ -124,6 +127,9 @@ struct ceph_fs_client {
struct workqueue_struct *inode_wq;
struct workqueue_struct *cap_wq;
+ DECLARE_HASHTABLE(async_unlink_conflict, CEPH_ASYNC_CREATE_CONFLICT_BITS);
+ spinlock_t async_unlink_conflict_lock;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_dentry_lru, *debugfs_caps;
struct dentry *debugfs_congestion_kb;
@@ -280,7 +286,8 @@ struct ceph_dentry_info {
struct dentry *dentry;
struct ceph_mds_session *lease_session;
struct list_head lease_list;
- unsigned flags;
+ struct hlist_node hnode;
+ unsigned long flags;
int lease_shared_gen;
u32 lease_gen;
u32 lease_seq;
@@ -289,10 +296,14 @@ struct ceph_dentry_info {
u64 offset;
};
-#define CEPH_DENTRY_REFERENCED 1
-#define CEPH_DENTRY_LEASE_LIST 2
-#define CEPH_DENTRY_SHRINK_LIST 4
-#define CEPH_DENTRY_PRIMARY_LINK 8
+#define CEPH_DENTRY_REFERENCED (1 << 0)
+#define CEPH_DENTRY_LEASE_LIST (1 << 1)
+#define CEPH_DENTRY_SHRINK_LIST (1 << 2)
+#define CEPH_DENTRY_PRIMARY_LINK (1 << 3)
+#define CEPH_DENTRY_ASYNC_UNLINK_BIT (4)
+#define CEPH_DENTRY_ASYNC_UNLINK (1 << CEPH_DENTRY_ASYNC_UNLINK_BIT)
+#define CEPH_DENTRY_ASYNC_CREATE_BIT (5)
+#define CEPH_DENTRY_ASYNC_CREATE (1 << CEPH_DENTRY_ASYNC_CREATE_BIT)
struct ceph_inode_xattrs_info {
/*
@@ -758,6 +769,8 @@ extern void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
extern void ceph_reservation_status(struct ceph_fs_client *client,
int *total, int *avail, int *used,
int *reserved, int *min);
+extern void change_auth_cap_ses(struct ceph_inode_info *ci,
+ struct ceph_mds_session *session);
@@ -1218,6 +1231,14 @@ extern int ceph_pool_perm_check(struct inode *inode, int need);
extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate);
+static inline bool ceph_has_inline_data(struct ceph_inode_info *ci)
+{
+ if (ci->i_inline_version == CEPH_INLINE_NONE ||
+ ci->i_inline_version == 1) /* initial version, no data */
+ return false;
+ return true;
+}
+
/* file.c */
extern const struct file_operations ceph_file_fops;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index f141f5246163..f31350cda960 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1086,7 +1086,7 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
flags |= CEPH_XATTR_REMOVE;
}
- dout("setxattr value=%.*s\n", (int)size, value);
+ dout("setxattr value size: %zu\n", size);
/* do request */
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
@@ -1184,8 +1184,14 @@ int __ceph_setxattr(struct inode *inode, const char *name,
spin_lock(&ci->i_ceph_lock);
retry:
issued = __ceph_caps_issued(ci, NULL);
- if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
+ required_blob_size = __get_required_blob_size(ci, name_len, val_len);
+ if ((ci->i_xattrs.version == 0) || !(issued & CEPH_CAP_XATTR_EXCL) ||
+ (required_blob_size > mdsc->mdsmap->m_max_xattr_size)) {
+ dout("%s do sync setxattr: version: %llu size: %d max: %llu\n",
+ __func__, ci->i_xattrs.version, required_blob_size,
+ mdsc->mdsmap->m_max_xattr_size);
goto do_sync;
+ }
if (!lock_snap_rwsem && !ci->i_head_snapc) {
lock_snap_rwsem = true;
@@ -1201,8 +1207,6 @@ retry:
ceph_cap_string(issued));
__build_xattrs(inode);
- required_blob_size = __get_required_blob_size(ci, name_len, val_len);
-
if (!ci->i_xattrs.prealloc_blob ||
required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
struct ceph_buffer *blob;
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 8c9f2c00be72..7c9785973f49 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -5,9 +5,9 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_CIFS) += cifs.o
-cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \
+cifs-y := trace.o cifsfs.o cifs_debug.o connect.o dir.o file.o \
inode.o link.o misc.o netmisc.o smbencrypt.o transport.o \
- cifs_unicode.o nterr.o cifsencrypt.o \
+ cached_dir.o cifs_unicode.o nterr.o cifsencrypt.o \
readdir.o ioctl.o sess.o export.o unc.o winucase.o \
smb2ops.o smb2maperror.o smb2transport.o \
smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
@@ -31,4 +31,4 @@ cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
-cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o
+cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o
diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c
new file mode 100644
index 000000000000..b401339f6e73
--- /dev/null
+++ b/fs/cifs/cached_dir.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions to handle the cached directory entries
+ *
+ * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
+ */
+
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "smb2proto.h"
+#include "cached_dir.h"
+
+/*
+ * Open the and cache a directory handle.
+ * If error then *cfid is not initialized.
+ */
+int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ const char *path,
+ struct cifs_sb_info *cifs_sb,
+ bool lookup_only, struct cached_fid **ret_cfid)
+{
+ struct cifs_ses *ses;
+ struct TCP_Server_Info *server;
+ struct cifs_open_parms oparms;
+ struct smb2_create_rsp *o_rsp = NULL;
+ struct smb2_query_info_rsp *qi_rsp = NULL;
+ int resp_buftype[2];
+ struct smb_rqst rqst[2];
+ struct kvec rsp_iov[2];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qi_iov[1];
+ int rc, flags = 0;
+ __le16 utf16_path = 0; /* Null - since an open of top of share */
+ u8 oplock = SMB2_OPLOCK_LEVEL_II;
+ struct cifs_fid *pfid;
+ struct dentry *dentry;
+ struct cached_fid *cfid;
+
+ if (tcon == NULL || tcon->nohandlecache ||
+ is_smb1_server(tcon->ses->server))
+ return -EOPNOTSUPP;
+
+ ses = tcon->ses;
+ server = ses->server;
+
+ if (cifs_sb->root == NULL)
+ return -ENOENT;
+
+ if (strlen(path))
+ return -ENOENT;
+
+ dentry = cifs_sb->root;
+
+ cfid = tcon->cfid;
+ mutex_lock(&cfid->fid_mutex);
+ if (cfid->is_valid) {
+ cifs_dbg(FYI, "found a cached root file handle\n");
+ *ret_cfid = cfid;
+ kref_get(&cfid->refcount);
+ mutex_unlock(&cfid->fid_mutex);
+ return 0;
+ }
+
+ /*
+ * We do not hold the lock for the open because in case
+ * SMB2_open needs to reconnect, it will end up calling
+ * cifs_mark_open_files_invalid() which takes the lock again
+ * thus causing a deadlock
+ */
+ mutex_unlock(&cfid->fid_mutex);
+
+ if (lookup_only)
+ return -ENOENT;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ if (!server->ops->new_lease_key)
+ return -EIO;
+
+ pfid = &cfid->fid;
+ server->ops->new_lease_key(pfid);
+
+ memset(rqst, 0, sizeof(rqst));
+ resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+ /* Open */
+ memset(&open_iov, 0, sizeof(open_iov));
+ rqst[0].rq_iov = open_iov;
+ rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+
+ oparms.tcon = tcon;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE);
+ oparms.desired_access = FILE_READ_ATTRIBUTES;
+ oparms.disposition = FILE_OPEN;
+ oparms.fid = pfid;
+ oparms.reconnect = false;
+
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, &utf16_path);
+ if (rc)
+ goto oshr_free;
+ smb2_set_next_command(tcon, &rqst[0]);
+
+ memset(&qi_iov, 0, sizeof(qi_iov));
+ rqst[1].rq_iov = qi_iov;
+ rqst[1].rq_nvec = 1;
+
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[1], COMPOUND_FID,
+ COMPOUND_FID, FILE_ALL_INFORMATION,
+ SMB2_O_INFO_FILE, 0,
+ sizeof(struct smb2_file_all_info) +
+ PATH_MAX * 2, 0, NULL);
+ if (rc)
+ goto oshr_free;
+
+ smb2_set_related(&rqst[1]);
+
+ rc = compound_send_recv(xid, ses, server,
+ flags, 2, rqst,
+ resp_buftype, rsp_iov);
+ mutex_lock(&cfid->fid_mutex);
+
+ /*
+ * Now we need to check again as the cached root might have
+ * been successfully re-opened from a concurrent process
+ */
+
+ if (cfid->is_valid) {
+ /* work was already done */
+
+ /* stash fids for close() later */
+ struct cifs_fid fid = {
+ .persistent_fid = pfid->persistent_fid,
+ .volatile_fid = pfid->volatile_fid,
+ };
+
+ /*
+ * caller expects this func to set the fid in cfid to valid
+ * cached root, so increment the refcount.
+ */
+ kref_get(&cfid->refcount);
+
+ mutex_unlock(&cfid->fid_mutex);
+
+ if (rc == 0) {
+ /* close extra handle outside of crit sec */
+ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ }
+ rc = 0;
+ goto oshr_free;
+ }
+
+ /* Cached root is still invalid, continue normaly */
+
+ if (rc) {
+ if (rc == -EREMCHG) {
+ tcon->need_reconnect = true;
+ pr_warn_once("server share %s deleted\n",
+ tcon->treeName);
+ }
+ goto oshr_exit;
+ }
+
+ atomic_inc(&tcon->num_remote_opens);
+
+ o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+ oparms.fid->volatile_fid = o_rsp->VolatileFileId;
+#ifdef CONFIG_CIFS_DEBUG2
+ oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
+#endif /* CIFS_DEBUG2 */
+
+ cfid->tcon = tcon;
+ cfid->is_valid = true;
+ cfid->dentry = dentry;
+ dget(dentry);
+ kref_init(&cfid->refcount);
+
+ /* BB TBD check to see if oplock level check can be removed below */
+ if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
+ /*
+ * See commit 2f94a3125b87. Increment the refcount when we
+ * get a lease for root, release it if lease break occurs
+ */
+ kref_get(&cfid->refcount);
+ cfid->has_lease = true;
+ smb2_parse_contexts(server, o_rsp,
+ &oparms.fid->epoch,
+ oparms.fid->lease_key, &oplock,
+ NULL, NULL);
+ } else
+ goto oshr_exit;
+
+ qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
+ goto oshr_exit;
+ if (!smb2_validate_and_copy_iov(
+ le16_to_cpu(qi_rsp->OutputBufferOffset),
+ sizeof(struct smb2_file_all_info),
+ &rsp_iov[1], sizeof(struct smb2_file_all_info),
+ (char *)&cfid->file_all_info))
+ cfid->file_all_info_is_valid = true;
+
+ cfid->time = jiffies;
+
+oshr_exit:
+ mutex_unlock(&cfid->fid_mutex);
+oshr_free:
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_info_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ if (rc == 0)
+ *ret_cfid = cfid;
+
+ return rc;
+}
+
+int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
+ struct dentry *dentry,
+ struct cached_fid **ret_cfid)
+{
+ struct cached_fid *cfid;
+
+ cfid = tcon->cfid;
+
+ mutex_lock(&cfid->fid_mutex);
+ if (cfid->dentry == dentry) {
+ cifs_dbg(FYI, "found a cached root file handle by dentry\n");
+ *ret_cfid = cfid;
+ kref_get(&cfid->refcount);
+ mutex_unlock(&cfid->fid_mutex);
+ return 0;
+ }
+ mutex_unlock(&cfid->fid_mutex);
+ return -ENOENT;
+}
+
+static void
+smb2_close_cached_fid(struct kref *ref)
+{
+ struct cached_fid *cfid = container_of(ref, struct cached_fid,
+ refcount);
+ struct cached_dirent *dirent, *q;
+
+ if (cfid->is_valid) {
+ cifs_dbg(FYI, "clear cached root file handle\n");
+ SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid);
+ }
+
+ /*
+ * We only check validity above to send SMB2_close,
+ * but we still need to invalidate these entries
+ * when this function is called
+ */
+ cfid->is_valid = false;
+ cfid->file_all_info_is_valid = false;
+ cfid->has_lease = false;
+ if (cfid->dentry) {
+ dput(cfid->dentry);
+ cfid->dentry = NULL;
+ }
+ /*
+ * Delete all cached dirent names
+ */
+ mutex_lock(&cfid->dirents.de_mutex);
+ list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
+ list_del(&dirent->entry);
+ kfree(dirent->name);
+ kfree(dirent);
+ }
+ cfid->dirents.is_valid = 0;
+ cfid->dirents.is_failed = 0;
+ cfid->dirents.ctx = NULL;
+ cfid->dirents.pos = 0;
+ mutex_unlock(&cfid->dirents.de_mutex);
+
+}
+
+void close_cached_dir(struct cached_fid *cfid)
+{
+ mutex_lock(&cfid->fid_mutex);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ mutex_unlock(&cfid->fid_mutex);
+}
+
+void close_cached_dir_lease_locked(struct cached_fid *cfid)
+{
+ if (cfid->has_lease) {
+ cfid->has_lease = false;
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+}
+
+void close_cached_dir_lease(struct cached_fid *cfid)
+{
+ mutex_lock(&cfid->fid_mutex);
+ close_cached_dir_lease_locked(cfid);
+ mutex_unlock(&cfid->fid_mutex);
+}
+
+/*
+ * Called from cifs_kill_sb when we unmount a share
+ */
+void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
+{
+ struct rb_root *root = &cifs_sb->tlink_tree;
+ struct rb_node *node;
+ struct cached_fid *cfid;
+ struct cifs_tcon *tcon;
+ struct tcon_link *tlink;
+
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+ tcon = tlink_tcon(tlink);
+ if (IS_ERR(tcon))
+ continue;
+ cfid = tcon->cfid;
+ mutex_lock(&cfid->fid_mutex);
+ if (cfid->dentry) {
+ dput(cfid->dentry);
+ cfid->dentry = NULL;
+ }
+ mutex_unlock(&cfid->fid_mutex);
+ }
+}
+
+/*
+ * Invalidate and close all cached dirs when a TCON has been reset
+ * due to a session loss.
+ */
+void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
+{
+ mutex_lock(&tcon->cfid->fid_mutex);
+ tcon->cfid->is_valid = false;
+ /* cached handle is not valid, so SMB2_CLOSE won't be sent below */
+ close_cached_dir_lease_locked(tcon->cfid);
+ memset(&tcon->cfid->fid, 0, sizeof(struct cifs_fid));
+ mutex_unlock(&tcon->cfid->fid_mutex);
+}
+
+static void
+smb2_cached_lease_break(struct work_struct *work)
+{
+ struct cached_fid *cfid = container_of(work,
+ struct cached_fid, lease_break);
+
+ close_cached_dir_lease(cfid);
+}
+
+int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
+{
+ if (tcon->cfid->is_valid &&
+ !memcmp(lease_key,
+ tcon->cfid->fid.lease_key,
+ SMB2_LEASE_KEY_SIZE)) {
+ tcon->cfid->time = 0;
+ INIT_WORK(&tcon->cfid->lease_break,
+ smb2_cached_lease_break);
+ queue_work(cifsiod_wq,
+ &tcon->cfid->lease_break);
+ return true;
+ }
+ return false;
+}
+
+struct cached_fid *init_cached_dir(void)
+{
+ struct cached_fid *cfid;
+
+ cfid = kzalloc(sizeof(*cfid), GFP_KERNEL);
+ if (!cfid)
+ return NULL;
+ INIT_LIST_HEAD(&cfid->dirents.entries);
+ mutex_init(&cfid->dirents.de_mutex);
+ mutex_init(&cfid->fid_mutex);
+ return cfid;
+}
+
+void free_cached_dir(struct cifs_tcon *tcon)
+{
+ kfree(tcon->cfid);
+}
diff --git a/fs/cifs/cached_dir.h b/fs/cifs/cached_dir.h
new file mode 100644
index 000000000000..bd262dc8b179
--- /dev/null
+++ b/fs/cifs/cached_dir.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Functions to handle the cached directory entries
+ *
+ * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
+ */
+
+#ifndef _CACHED_DIR_H
+#define _CACHED_DIR_H
+
+
+struct cached_dirent {
+ struct list_head entry;
+ char *name;
+ int namelen;
+ loff_t pos;
+
+ struct cifs_fattr fattr;
+};
+
+struct cached_dirents {
+ bool is_valid:1;
+ bool is_failed:1;
+ struct dir_context *ctx; /*
+ * Only used to make sure we only take entries
+ * from a single context. Never dereferenced.
+ */
+ struct mutex de_mutex;
+ int pos; /* Expected ctx->pos */
+ struct list_head entries;
+};
+
+struct cached_fid {
+ bool is_valid:1; /* Do we have a useable root fid */
+ bool file_all_info_is_valid:1;
+ bool has_lease:1;
+ unsigned long time; /* jiffies of when lease was taken */
+ struct kref refcount;
+ struct cifs_fid fid;
+ struct mutex fid_mutex;
+ struct cifs_tcon *tcon;
+ struct dentry *dentry;
+ struct work_struct lease_break;
+ struct smb2_file_all_info file_all_info;
+ struct cached_dirents dirents;
+};
+
+extern struct cached_fid *init_cached_dir(void);
+extern void free_cached_dir(struct cifs_tcon *tcon);
+extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ const char *path,
+ struct cifs_sb_info *cifs_sb,
+ bool lookup_only, struct cached_fid **cfid);
+extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
+ struct dentry *dentry,
+ struct cached_fid **cfid);
+extern void close_cached_dir(struct cached_fid *cfid);
+extern void close_cached_dir_lease(struct cached_fid *cfid);
+extern void close_cached_dir_lease_locked(struct cached_fid *cfid);
+extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb);
+extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon);
+extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]);
+
+#endif /* _CACHED_DIR_H */
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 2cfbac8bb965..c05477e28cff 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -36,13 +36,13 @@ cifs_dump_mem(char *label, void *data, int length)
void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
{
#ifdef CONFIG_CIFS_DEBUG2
- struct smb_hdr *smb = (struct smb_hdr *)buf;
+ struct smb_hdr *smb = buf;
cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n",
smb->Command, smb->Status.CifsError,
smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
cifs_dbg(VFS, "smb buf %p len %u\n", smb,
- server->ops->calc_smb_size(smb, server));
+ server->ops->calc_smb_size(smb));
#endif /* CONFIG_CIFS_DEBUG2 */
}
@@ -55,7 +55,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
return;
cifs_dbg(VFS, "Dump pending requests:\n");
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n",
mid_entry->mid_state,
@@ -78,7 +78,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
mid_entry->resp_buf, 62);
}
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
#endif /* CONFIG_CIFS_DEBUG2 */
}
@@ -168,7 +168,6 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
{
- struct list_head *tmp, *tmp1, *tmp2;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
@@ -184,14 +183,10 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
#endif /* CIFS_DEBUG2 */
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
- list_for_each(tmp1, &ses->tcon_list) {
- tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->open_file_lock);
- list_for_each(tmp2, &tcon->openFileList) {
- cfile = list_entry(tmp2, struct cifsFileInfo,
- tlist);
+ list_for_each_entry(cfile, &tcon->openFileList, tlist) {
seq_printf(m,
"0x%x 0x%llx 0x%x %d %d %d %pd",
tcon->tid,
@@ -218,7 +213,6 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
{
- struct list_head *tmp2, *tmp3;
struct mid_q_entry *mid_entry;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
@@ -381,9 +375,7 @@ skip_rdma:
seq_printf(m, "\n\n\tSessions: ");
i = 0;
- list_for_each(tmp2, &server->smb_ses_list) {
- ses = list_entry(tmp2, struct cifs_ses,
- smb_ses_list);
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
i++;
if ((ses->serverDomain == NULL) ||
(ses->serverOS == NULL) ||
@@ -447,9 +439,7 @@ skip_rdma:
else
seq_puts(m, "none\n");
- list_for_each(tmp3, &ses->tcon_list) {
- tcon = list_entry(tmp3, struct cifs_tcon,
- tcon_list);
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++j;
seq_printf(m, "\n\t%d) ", j);
cifs_debug_tcon(m, tcon);
@@ -473,10 +463,8 @@ skip_rdma:
seq_printf(m, "\n\t\t[NONE]");
seq_puts(m, "\n\n\tMIDs: ");
- spin_lock(&GlobalMid_Lock);
- list_for_each(tmp3, &server->pending_mid_q) {
- mid_entry = list_entry(tmp3, struct mid_q_entry,
- qhead);
+ spin_lock(&server->mid_lock);
+ list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
seq_printf(m, "\n\tState: %d com: %d pid:"
" %d cbdata: %p mid %llu\n",
mid_entry->mid_state,
@@ -485,7 +473,7 @@ skip_rdma:
mid_entry->callback_data,
mid_entry->mid);
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
seq_printf(m, "\n--\n");
}
if (c == 0)
@@ -504,7 +492,6 @@ static ssize_t cifs_stats_proc_write(struct file *file,
{
bool bv;
int rc;
- struct list_head *tmp1, *tmp2, *tmp3;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
@@ -514,8 +501,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
#ifdef CONFIG_CIFS_STATS2
int i;
- atomic_set(&totBufAllocCount, 0);
- atomic_set(&totSmBufAllocCount, 0);
+ atomic_set(&total_buf_alloc_count, 0);
+ atomic_set(&total_small_buf_alloc_count, 0);
#endif /* CONFIG_CIFS_STATS2 */
atomic_set(&tcpSesReconnectCount, 0);
atomic_set(&tconInfoReconnectCount, 0);
@@ -525,9 +512,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
GlobalCurrentXid = 0;
spin_unlock(&GlobalMid_Lock);
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp1, &cifs_tcp_ses_list) {
- server = list_entry(tmp1, struct TCP_Server_Info,
- tcp_ses_list);
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
server->max_in_flight = 0;
#ifdef CONFIG_CIFS_STATS2
for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
@@ -538,13 +523,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
server->fastest_cmd[0] = 0;
}
#endif /* CONFIG_CIFS_STATS2 */
- list_for_each(tmp2, &server->smb_ses_list) {
- ses = list_entry(tmp2, struct cifs_ses,
- smb_ses_list);
- list_for_each(tmp3, &ses->tcon_list) {
- tcon = list_entry(tmp3,
- struct cifs_tcon,
- tcon_list);
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
atomic_set(&tcon->num_smbs_sent, 0);
spin_lock(&tcon->stat_lock);
tcon->bytes_read = 0;
@@ -569,7 +549,6 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
#ifdef CONFIG_CIFS_STATS2
int j;
#endif /* STATS2 */
- struct list_head *tmp2, *tmp3;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
@@ -579,17 +558,17 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Share (unique mount targets): %d\n",
tconInfoAllocCount.counter);
seq_printf(m, "SMB Request/Response Buffer: %d Pool size: %d\n",
- bufAllocCount.counter,
+ buf_alloc_count.counter,
cifs_min_rcv + tcpSesAllocCount.counter);
seq_printf(m, "SMB Small Req/Resp Buffer: %d Pool size: %d\n",
- smBufAllocCount.counter, cifs_min_small);
+ small_buf_alloc_count.counter, cifs_min_small);
#ifdef CONFIG_CIFS_STATS2
seq_printf(m, "Total Large %d Small %d Allocations\n",
- atomic_read(&totBufAllocCount),
- atomic_read(&totSmBufAllocCount));
+ atomic_read(&total_buf_alloc_count),
+ atomic_read(&total_small_buf_alloc_count));
#endif /* CONFIG_CIFS_STATS2 */
- seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
+ seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&mid_count));
seq_printf(m,
"\n%d session %d share reconnects\n",
tcpSesReconnectCount.counter, tconInfoReconnectCount.counter);
@@ -619,13 +598,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
atomic_read(&server->smb2slowcmd[j]),
server->hostname, j);
#endif /* STATS2 */
- list_for_each(tmp2, &server->smb_ses_list) {
- ses = list_entry(tmp2, struct cifs_ses,
- smb_ses_list);
- list_for_each(tmp3, &ses->tcon_list) {
- tcon = list_entry(tmp3,
- struct cifs_tcon,
- tcon_list);
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
i++;
seq_printf(m, "\n%d) %s", i, tcon->treeName);
if (tcon->need_reconnect)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index bf861fef2f0c..fa480d62f313 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1379,6 +1379,7 @@ chown_chgrp_exit:
return rc;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
const struct cifs_fid *cifsfid, u32 *pacllen,
u32 __maybe_unused unused)
@@ -1512,6 +1513,7 @@ out:
cifs_put_tlink(tlink);
return rc;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */
int
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 663cb9db4908..46f5718754f9 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -32,10 +32,9 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
int rc;
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
- int is_smb2 = server->vals->header_preamble_size == 0;
/* iov[0] is actual data and not the rfc1002 length for SMB2+ */
- if (is_smb2) {
+ if (!is_smb1(server)) {
if (iov[0].iov_len <= 4)
return -EIO;
i = 0;
@@ -141,13 +140,13 @@ int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
server->tcpStatus == CifsNeedNegotiate) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return rc;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
if (!server->session_estab) {
memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8f2e003e0590..f54d8bf2732a 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -46,6 +46,7 @@
#include "netlink.h"
#endif
#include "fs_context.h"
+#include "cached_dir.h"
/*
* DOS dates from 1980/1/1 through 2107/12/31
@@ -68,6 +69,34 @@ bool enable_negotiate_signing; /* false by default */
unsigned int global_secflags = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
+
+/*
+ * Global transaction id (XID) information
+ */
+unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
+unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
+unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
+spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
+
+/*
+ * Global counters, updated atomically
+ */
+atomic_t sesInfoAllocCount;
+atomic_t tconInfoAllocCount;
+atomic_t tcpSesNextId;
+atomic_t tcpSesAllocCount;
+atomic_t tcpSesReconnectCount;
+atomic_t tconInfoReconnectCount;
+
+atomic_t mid_count;
+atomic_t buf_alloc_count;
+atomic_t small_buf_alloc_count;
+#ifdef CONFIG_CIFS_STATS2
+atomic_t total_buf_alloc_count;
+atomic_t total_small_buf_alloc_count;
+#endif/* STATS2 */
+struct list_head cifs_tcp_ses_list;
+spinlock_t cifs_tcp_ses_lock;
static const struct super_operations cifs_super_ops;
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
module_param(CIFSMaxBufSize, uint, 0444);
@@ -255,30 +284,13 @@ out_no_root:
static void cifs_kill_sb(struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifs_tcon *tcon;
- struct cached_fid *cfid;
- struct rb_root *root = &cifs_sb->tlink_tree;
- struct rb_node *node;
- struct tcon_link *tlink;
/*
* We ned to release all dentries for the cached directories
* before we kill the sb.
*/
if (cifs_sb->root) {
- for (node = rb_first(root); node; node = rb_next(node)) {
- tlink = rb_entry(node, struct tcon_link, tl_rbnode);
- tcon = tlink_tcon(tlink);
- if (IS_ERR(tcon))
- continue;
- cfid = &tcon->crfid;
- mutex_lock(&cfid->fid_mutex);
- if (cfid->dentry) {
- dput(cfid->dentry);
- cfid->dentry = NULL;
- }
- mutex_unlock(&cfid->fid_mutex);
- }
+ close_all_cached_dirs(cifs_sb);
/* finally release root dentry */
dput(cifs_sb->root);
@@ -681,6 +693,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
}
+ seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
if (tcon->ses->chan_max > 1)
seq_printf(s, ",multichannel,max_channels=%zu",
@@ -703,14 +716,17 @@ static void cifs_umount_begin(struct super_block *sb)
tcon = cifs_sb_master_tcon(cifs_sb);
spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
/* we have other mounts to same share or we have
already tried to force umount this and woken up
all waiting network requests, nothing to do */
+ spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
return;
} else if (tcon->tc_count == 1)
tcon->status = TID_EXITING;
+ spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
@@ -1537,8 +1553,7 @@ cifs_destroy_request_bufs(void)
kmem_cache_destroy(cifs_sm_req_cachep);
}
-static int
-cifs_init_mids(void)
+static int init_mids(void)
{
cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
sizeof(struct mid_q_entry), 0,
@@ -1556,8 +1571,7 @@ cifs_init_mids(void)
return 0;
}
-static void
-cifs_destroy_mids(void)
+static void destroy_mids(void)
{
mempool_destroy(cifs_mid_poolp);
kmem_cache_destroy(cifs_mid_cachep);
@@ -1579,11 +1593,11 @@ init_cifs(void)
atomic_set(&tcpSesReconnectCount, 0);
atomic_set(&tconInfoReconnectCount, 0);
- atomic_set(&bufAllocCount, 0);
- atomic_set(&smBufAllocCount, 0);
+ atomic_set(&buf_alloc_count, 0);
+ atomic_set(&small_buf_alloc_count, 0);
#ifdef CONFIG_CIFS_STATS2
- atomic_set(&totBufAllocCount, 0);
- atomic_set(&totSmBufAllocCount, 0);
+ atomic_set(&total_buf_alloc_count, 0);
+ atomic_set(&total_small_buf_alloc_count, 0);
if (slow_rsp_threshold < 1)
cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
else if (slow_rsp_threshold > 32767)
@@ -1591,7 +1605,7 @@ init_cifs(void)
"slow response threshold set higher than recommended (0 to 32767)\n");
#endif /* CONFIG_CIFS_STATS2 */
- atomic_set(&midCount, 0);
+ atomic_set(&mid_count, 0);
GlobalCurrentXid = 0;
GlobalTotalActiveXid = 0;
GlobalMaxActiveXid = 0;
@@ -1654,7 +1668,7 @@ init_cifs(void)
if (rc)
goto out_destroy_deferredclose_wq;
- rc = cifs_init_mids();
+ rc = init_mids();
if (rc)
goto out_destroy_inodecache;
@@ -1711,7 +1725,7 @@ out_destroy_request_bufs:
#endif
cifs_destroy_request_bufs();
out_destroy_mids:
- cifs_destroy_mids();
+ destroy_mids();
out_destroy_inodecache:
cifs_destroy_inodecache();
out_destroy_deferredclose_wq:
@@ -1747,7 +1761,7 @@ exit_cifs(void)
dfs_cache_destroy();
#endif
cifs_destroy_request_bufs();
- cifs_destroy_mids();
+ destroy_mids();
cifs_destroy_inodecache();
destroy_workqueue(deferredclose_wq);
destroy_workqueue(cifsoplockd_wq);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index b17be47a8e59..81f4c15936d0 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -153,6 +153,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 37
-#define CIFS_VERSION "2.37"
+#define SMB3_PRODUCT_BUILD 38
+#define CIFS_VERSION "2.38"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a643c84ff1e9..ae7f571a7dba 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -417,7 +417,7 @@ struct smb_version_operations {
int (*close_dir)(const unsigned int, struct cifs_tcon *,
struct cifs_fid *);
/* calculate a size of SMB message */
- unsigned int (*calc_smb_size)(void *buf, struct TCP_Server_Info *ptcpi);
+ unsigned int (*calc_smb_size)(void *buf);
/* check for STATUS_PENDING and process the response if yes */
bool (*is_status_pending)(char *buf, struct TCP_Server_Info *server);
/* check for STATUS_NETWORK_SESSION_EXPIRED */
@@ -557,6 +557,8 @@ struct smb_version_values {
#define HEADER_SIZE(server) (server->vals->header_size)
#define MAX_HEADER_SIZE(server) (server->vals->max_header_size)
+#define HEADER_PREAMBLE_SIZE(server) (server->vals->header_preamble_size)
+#define MID_HEADER_SIZE(server) (HEADER_SIZE(server) - 1 - HEADER_PREAMBLE_SIZE(server))
/**
* CIFS superblock mount flags (mnt_cifs_flags) to consider when
@@ -605,6 +607,7 @@ inc_rfc1001_len(void *buf, int count)
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
+ spinlock_t srv_lock; /* protect anything here that is not protected */
__u64 conn_id; /* connection identifier (useful for debugging) */
int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */
@@ -622,6 +625,7 @@ struct TCP_Server_Info {
#endif
wait_queue_head_t response_q;
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
+ spinlock_t mid_lock; /* protect mid queue and it's entries */
struct list_head pending_mid_q;
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
@@ -748,6 +752,11 @@ struct TCP_Server_Info {
#endif
};
+static inline bool is_smb1(struct TCP_Server_Info *server)
+{
+ return HEADER_PREAMBLE_SIZE(server) != 0;
+}
+
static inline void cifs_server_lock(struct TCP_Server_Info *server)
{
unsigned int nofs_flag = memalloc_nofs_save();
@@ -1008,6 +1017,7 @@ struct cifs_ses {
struct list_head rlist; /* reconnect list */
struct list_head tcon_list;
struct cifs_tcon *tcon_ipc;
+ spinlock_t ses_lock; /* protect anything here that is not protected */
struct mutex session_mutex;
struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */
@@ -1125,42 +1135,6 @@ struct cifs_fattr {
u32 cf_cifstag;
};
-struct cached_dirent {
- struct list_head entry;
- char *name;
- int namelen;
- loff_t pos;
-
- struct cifs_fattr fattr;
-};
-
-struct cached_dirents {
- bool is_valid:1;
- bool is_failed:1;
- struct dir_context *ctx; /*
- * Only used to make sure we only take entries
- * from a single context. Never dereferenced.
- */
- struct mutex de_mutex;
- int pos; /* Expected ctx->pos */
- struct list_head entries;
-};
-
-struct cached_fid {
- bool is_valid:1; /* Do we have a useable root fid */
- bool file_all_info_is_valid:1;
- bool has_lease:1;
- unsigned long time; /* jiffies of when lease was taken */
- struct kref refcount;
- struct cifs_fid *fid;
- struct mutex fid_mutex;
- struct cifs_tcon *tcon;
- struct dentry *dentry;
- struct work_struct lease_break;
- struct smb2_file_all_info file_all_info;
- struct cached_dirents dirents;
-};
-
/*
* there is one of these for each connection to a resource on a particular
* session
@@ -1169,6 +1143,7 @@ struct cifs_tcon {
struct list_head tcon_list;
int tc_count;
struct list_head rlist; /* reconnect list */
+ spinlock_t tc_lock; /* protect anything here that is not protected */
atomic_t num_local_opens; /* num of all opens including disconnected */
atomic_t num_remote_opens; /* num of all network opens on server */
struct list_head openFileList;
@@ -1253,7 +1228,7 @@ struct cifs_tcon {
struct fscache_volume *fscache; /* cookie for share */
#endif
struct list_head pending_opens; /* list of incomplete opens */
- struct cached_fid crfid; /* Cached root fid */
+ struct cached_fid *cfid; /* Cached root fid */
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
struct list_head ulist; /* cache update list */
@@ -1899,33 +1874,78 @@ require use of the stronger protocol */
*/
/****************************************************************************
- * Locking notes. All updates to global variables and lists should be
- * protected by spinlocks or semaphores.
+ * Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according
+ * to the locking order. i.e. if two locks are to be held together, the lock that
+ * appears higher in this list needs to be taken before the other.
+ *
+ * If you hold a lock that is lower in this list, and you need to take a higher lock
+ * (or if you think that one of the functions that you're calling may need to), first
+ * drop the lock you hold, pick up the higher lock, then the lower one. This will
+ * ensure that locks are picked up only in one direction in the below table
+ * (top to bottom).
*
- * Spinlocks
- * ---------
- * GlobalMid_Lock protects:
- * list operations on pending_mid_q and oplockQ
- * updates to XID counters, multiplex id and SMB sequence numbers
- * list operations on global DnotifyReqList
- * updates to ses->status and TCP_Server_Info->tcpStatus
- * updates to server->CurrentMid
- * tcp_ses_lock protects:
- * list operations on tcp and SMB session lists
- * tcon->open_file_lock protects the list of open files hanging off the tcon
- * inode->open_file_lock protects the openFileList hanging off the inode
- * cfile->file_info_lock protects counters and fields in cifs file struct
- * f_owner.lock protects certain per file struct operations
- * mapping->page_lock protects certain per page operations
+ * Also, if you expect a function to be called with a lock held, explicitly document
+ * this in the comments on top of your function definition.
*
- * Note that the cifs_tcon.open_file_lock should be taken before
- * not after the cifsInodeInfo.open_file_lock
+ * And also, try to keep the critical sections (lock hold time) to be as minimal as
+ * possible. Blocking / calling other functions with a lock held always increase
+ * the risk of a possible deadlock.
*
- * Semaphores
- * ----------
- * cifsInodeInfo->lock_sem protects:
- * the list of locks held by the inode
+ * Following this rule will avoid unnecessary deadlocks, which can get really hard to
+ * debug. Also, any new lock that you introduce, please add to this list in the correct
+ * order.
*
+ * Please populate this list whenever you introduce new locks in your changes. Or in
+ * case I've missed some existing locks. Please ensure that it's added in the list
+ * based on the locking order expected.
+ *
+ * =====================================================================================
+ * Lock Protects Initialization fn
+ * =====================================================================================
+ * vol_list_lock
+ * vol_info->ctx_lock vol_info->ctx
+ * cifs_sb_info->tlink_tree_lock cifs_sb_info->tlink_tree cifs_setup_cifs_sb
+ * TCP_Server_Info-> TCP_Server_Info cifs_get_tcp_session
+ * reconnect_mutex
+ * TCP_Server_Info->srv_mutex TCP_Server_Info cifs_get_tcp_session
+ * cifs_ses->session_mutex cifs_ses sesInfoAlloc
+ * cifs_tcon
+ * cifs_tcon->open_file_lock cifs_tcon->openFileList tconInfoAlloc
+ * cifs_tcon->pending_opens
+ * cifs_tcon->stat_lock cifs_tcon->bytes_read tconInfoAlloc
+ * cifs_tcon->bytes_written
+ * cifs_tcp_ses_lock cifs_tcp_ses_list sesInfoAlloc
+ * GlobalMid_Lock GlobalMaxActiveXid init_cifs
+ * GlobalCurrentXid
+ * GlobalTotalActiveXid
+ * TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change)
+ * TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session
+ * ->CurrentMid
+ * (any changes in mid_q_entry fields)
+ * TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session
+ * ->credits
+ * ->echo_credits
+ * ->oplock_credits
+ * ->reconnect_instance
+ * cifs_ses->ses_lock (anything that is not protected by another lock and can change)
+ * cifs_ses->iface_lock cifs_ses->iface_list sesInfoAlloc
+ * ->iface_count
+ * ->iface_last_update
+ * cifs_ses->chan_lock cifs_ses->chans
+ * ->chans_need_reconnect
+ * ->chans_in_reconnect
+ * cifs_tcon->tc_lock (anything that is not protected by another lock and can change)
+ * cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode
+ * cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc
+ * cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
+ * ->can_cache_brlcks
+ * cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
+ * cached_fid->fid_mutex cifs_tcon->crfid tconInfoAlloc
+ * cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
+ * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
+ * ->invalidHandle initiate_cifs_search
+ * ->oplock_break_cancelled
+ * cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc
****************************************************************************/
#ifdef DECLARE_GLOBALS_HERE
@@ -1941,47 +1961,44 @@ require use of the stronger protocol */
* sessions (and from that the tree connections) can be found
* by iterating over cifs_tcp_ses_list
*/
-GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
+extern struct list_head cifs_tcp_ses_list;
/*
* This lock protects the cifs_tcp_ses_list, the list of smb sessions per
* tcp session, and the list of tcon's per smb session. It also protects
- * the reference counters for the server, smb session, and tcon. It also
- * protects some fields in the TCP_Server_Info struct such as dstaddr. Finally,
- * changes to the tcon->tidStatus should be done while holding this lock.
+ * the reference counters for the server, smb session, and tcon.
* generally the locks should be taken in order tcp_ses_lock before
* tcon->open_file_lock and that before file->file_info_lock since the
* structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
*/
-GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
+extern spinlock_t cifs_tcp_ses_lock;
/*
* Global transaction id (XID) information
*/
-GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
-GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
-GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
-GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */
- /* on midQ entries */
+extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
+extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
+extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
+extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
+
/*
* Global counters, updated atomically
*/
-GLOBAL_EXTERN atomic_t sesInfoAllocCount;
-GLOBAL_EXTERN atomic_t tconInfoAllocCount;
-GLOBAL_EXTERN atomic_t tcpSesNextId;
-GLOBAL_EXTERN atomic_t tcpSesAllocCount;
-GLOBAL_EXTERN atomic_t tcpSesReconnectCount;
-GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
+extern atomic_t sesInfoAllocCount;
+extern atomic_t tconInfoAllocCount;
+extern atomic_t tcpSesNextId;
+extern atomic_t tcpSesAllocCount;
+extern atomic_t tcpSesReconnectCount;
+extern atomic_t tconInfoReconnectCount;
/* Various Debug counters */
-GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
+extern atomic_t buf_alloc_count; /* current number allocated */
+extern atomic_t small_buf_alloc_count;
#ifdef CONFIG_CIFS_STATS2
-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
+extern atomic_t total_buf_alloc_count; /* total allocated over all time */
+extern atomic_t total_small_buf_alloc_count;
extern unsigned int slow_rsp_threshold; /* number of secs before logging */
#endif
-GLOBAL_EXTERN atomic_t smBufAllocCount;
-GLOBAL_EXTERN atomic_t midCount;
/* Misc globals */
extern bool enable_oplocks; /* enable or disable oplocks */
@@ -1998,6 +2015,7 @@ extern unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
extern unsigned int cifs_min_small; /* min size of small buf pool */
extern unsigned int cifs_max_pending; /* MAX requests at once to server*/
extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */
+extern atomic_t mid_count;
void cifs_oplock_break(struct work_struct *work);
void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
@@ -2085,9 +2103,9 @@ static inline bool cifs_is_referral_server(struct cifs_tcon *tcon,
return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER));
}
-static inline u64 cifs_flock_len(struct file_lock *fl)
+static inline u64 cifs_flock_len(const struct file_lock *fl)
{
- return fl->fl_end == OFFSET_MAX ? 0 : fl->fl_end - fl->fl_start + 1;
+ return (u64)fl->fl_end - fl->fl_start + 1;
}
static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index d59aebefa71c..3bc94bcc7177 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -78,12 +78,8 @@ extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
extern char *cifs_compose_mount_options(const char *sb_mountdata,
const char *fullpath, const struct dfs_info3_param *ref,
char **devname);
-/* extern void renew_parental_timestamps(struct dentry *direntry);*/
-extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
- struct TCP_Server_Info *server);
-extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
-extern void cifs_delete_mid(struct mid_q_entry *mid);
-extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry);
+extern void delete_mid(struct mid_q_entry *mid);
+extern void release_mid(struct mid_q_entry *mid);
extern void cifs_wake_up_task(struct mid_q_entry *mid);
extern int cifs_handle_standard(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
@@ -155,7 +151,7 @@ extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
struct cifsFileInfo **ret_file);
-extern unsigned int smbCalcSize(void *buf, struct TCP_Server_Info *server);
+extern unsigned int smbCalcSize(void *buf);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
struct TCP_Server_Info *server);
extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
@@ -521,6 +517,7 @@ extern int generate_smb30signingkey(struct cifs_ses *ses,
extern int generate_smb311signingkey(struct cifs_ses *ses,
struct TCP_Server_Info *server);
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
extern int CIFSSMBCopy(unsigned int xid,
struct cifs_tcon *source_tcon,
const char *fromName,
@@ -551,6 +548,7 @@ extern int CIFSSMBSetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
const struct nls_table *nls_codepage, int remap_special_chars);
extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
+#endif /* CIFS_ALLOW_INSECURE_LEGACY */
extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
extern bool couldbe_mf_symlink(const struct cifs_fattr *fattr);
extern int check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
@@ -599,7 +597,6 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
void cifs_aio_ctx_release(struct kref *refcount);
int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
-void smb2_cached_lease_break(struct work_struct *work);
int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
struct sdesc **sdesc);
diff --git a/fs/cifs/cifsroot.c b/fs/cifs/cifsroot.c
index 9e91a5a40aae..56ec1b233f52 100644
--- a/fs/cifs/cifsroot.c
+++ b/fs/cifs/cifsroot.c
@@ -59,7 +59,7 @@ static int __init cifs_root_setup(char *line)
pr_err("Root-CIFS: UNC path too long\n");
return 1;
}
- strlcpy(root_dev, line, len);
+ strscpy(root_dev, line, len);
srvaddr = parse_srvaddr(&line[2], s);
if (*s) {
int n = snprintf(root_opts,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 6371b9eebdad..7aa91e272027 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -29,7 +29,6 @@
#include "cifsproto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
-#include "smb2proto.h"
#include "fscache.h"
#include "smbdirect.h"
#ifdef CONFIG_CIFS_DFS_UPCALL
@@ -62,52 +61,6 @@ static struct {
#define CIFS_NUM_PROT 1
#endif /* CIFS_POSIX */
-/*
- * Mark as invalid, all open files on tree connections since they
- * were closed when session to server was lost.
- */
-void
-cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
-{
- struct cifsFileInfo *open_file = NULL;
- struct list_head *tmp;
- struct list_head *tmp1;
-
- /* only send once per connect */
- spin_lock(&cifs_tcp_ses_lock);
- if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
- spin_unlock(&cifs_tcp_ses_lock);
- return;
- }
- tcon->status = TID_IN_FILES_INVALIDATE;
- spin_unlock(&cifs_tcp_ses_lock);
-
- /* list all files open on tree connection and mark them invalid */
- spin_lock(&tcon->open_file_lock);
- list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
- open_file = list_entry(tmp, struct cifsFileInfo, tlist);
- open_file->invalidHandle = true;
- open_file->oplock_break_cancelled = true;
- }
- spin_unlock(&tcon->open_file_lock);
-
- mutex_lock(&tcon->crfid.fid_mutex);
- tcon->crfid.is_valid = false;
- /* cached handle is not valid, so SMB2_CLOSE won't be sent below */
- close_cached_dir_lease_locked(&tcon->crfid);
- memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
- mutex_unlock(&tcon->crfid.fid_mutex);
-
- spin_lock(&cifs_tcp_ses_lock);
- if (tcon->status == TID_IN_FILES_INVALIDATE)
- tcon->status = TID_NEED_TCON;
- spin_unlock(&cifs_tcp_ses_lock);
-
- /*
- * BB Add call to invalidate_inodes(sb) for all superblocks mounted
- * to this tcon.
- */
-}
/* reconnect the socket, tcon, and smb session if needed */
static int
@@ -134,18 +87,18 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* only tree disconnect, open, and write, (and ulogoff which does not
* have tcon) are allowed as we start force umount
*/
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->status == TID_EXITING) {
if (smb_command != SMB_COM_WRITE_ANDX &&
smb_command != SMB_COM_OPEN_ANDX &&
smb_command != SMB_COM_TREE_DISCONNECT) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb_command);
return -ENODEV;
}
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
retries = server->nr_targets;
@@ -165,12 +118,12 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
}
/* are we still trying to reconnect? */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsNeedReconnect) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
break;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
if (retries && --retries)
continue;
@@ -201,13 +154,13 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect.
*/
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
rc = -EHOSTDOWN;
goto out;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
/*
* need to prevent multiple threads trying to simultaneously
@@ -457,52 +410,6 @@ decode_ext_sec_blob(struct cifs_ses *ses, NEGOTIATE_RSP *pSMBr)
return 0;
}
-int
-cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
-{
- bool srv_sign_required = server->sec_mode & server->vals->signing_required;
- bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
- bool mnt_sign_enabled = global_secflags & CIFSSEC_MAY_SIGN;
-
- /*
- * Is signing required by mnt options? If not then check
- * global_secflags to see if it is there.
- */
- if (!mnt_sign_required)
- mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) ==
- CIFSSEC_MUST_SIGN);
-
- /*
- * If signing is required then it's automatically enabled too,
- * otherwise, check to see if the secflags allow it.
- */
- mnt_sign_enabled = mnt_sign_required ? mnt_sign_required :
- (global_secflags & CIFSSEC_MAY_SIGN);
-
- /* If server requires signing, does client allow it? */
- if (srv_sign_required) {
- if (!mnt_sign_enabled) {
- cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
- return -ENOTSUPP;
- }
- server->sign = true;
- }
-
- /* If client requires signing, does server allow it? */
- if (mnt_sign_required) {
- if (!srv_sign_enabled) {
- cifs_dbg(VFS, "Server does not support signing!\n");
- return -ENOTSUPP;
- }
- server->sign = true;
- }
-
- if (cifs_rdma_enabled(server) && server->sign)
- cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
-
- return 0;
-}
-
static bool
should_set_ext_sec_flag(enum securityEnum sectype)
{
@@ -684,7 +591,7 @@ cifs_echo_callback(struct mid_q_entry *mid)
struct TCP_Server_Info *server = mid->callback_data;
struct cifs_credits credits = { .value = 1, .instance = 0 };
- DeleteMidQEntry(mid);
+ release_mid(mid);
add_credits(server, &credits, CIFS_ECHO_OP);
}
@@ -1379,184 +1286,6 @@ openRetry:
return rc;
}
-/*
- * Discard any remaining data in the current SMB. To do this, we borrow the
- * current bigbuf.
- */
-int
-cifs_discard_remaining_data(struct TCP_Server_Info *server)
-{
- unsigned int rfclen = server->pdu_size;
- int remaining = rfclen + server->vals->header_preamble_size -
- server->total_read;
-
- while (remaining > 0) {
- int length;
-
- length = cifs_discard_from_socket(server,
- min_t(size_t, remaining,
- CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
- if (length < 0)
- return length;
- server->total_read += length;
- remaining -= length;
- }
-
- return 0;
-}
-
-static int
-__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
- bool malformed)
-{
- int length;
-
- length = cifs_discard_remaining_data(server);
- dequeue_mid(mid, malformed);
- mid->resp_buf = server->smallbuf;
- server->smallbuf = NULL;
- return length;
-}
-
-static int
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
-{
- struct cifs_readdata *rdata = mid->callback_data;
-
- return __cifs_readv_discard(server, mid, rdata->result);
-}
-
-int
-cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
-{
- int length, len;
- unsigned int data_offset, data_len;
- struct cifs_readdata *rdata = mid->callback_data;
- char *buf = server->smallbuf;
- unsigned int buflen = server->pdu_size +
- server->vals->header_preamble_size;
- bool use_rdma_mr = false;
-
- cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
- __func__, mid->mid, rdata->offset, rdata->bytes);
-
- /*
- * read the rest of READ_RSP header (sans Data array), or whatever we
- * can if there's not enough data. At this point, we've read down to
- * the Mid.
- */
- len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
- HEADER_SIZE(server) + 1;
-
- length = cifs_read_from_socket(server,
- buf + HEADER_SIZE(server) - 1, len);
- if (length < 0)
- return length;
- server->total_read += length;
-
- if (server->ops->is_session_expired &&
- server->ops->is_session_expired(buf)) {
- cifs_reconnect(server, true);
- return -1;
- }
-
- if (server->ops->is_status_pending &&
- server->ops->is_status_pending(buf, server)) {
- cifs_discard_remaining_data(server);
- return -1;
- }
-
- /* set up first two iov for signature check and to get credits */
- rdata->iov[0].iov_base = buf;
- rdata->iov[0].iov_len = server->vals->header_preamble_size;
- rdata->iov[1].iov_base = buf + server->vals->header_preamble_size;
- rdata->iov[1].iov_len =
- server->total_read - server->vals->header_preamble_size;
- cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
- rdata->iov[0].iov_base, rdata->iov[0].iov_len);
- cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
- rdata->iov[1].iov_base, rdata->iov[1].iov_len);
-
- /* Was the SMB read successful? */
- rdata->result = server->ops->map_error(buf, false);
- if (rdata->result != 0) {
- cifs_dbg(FYI, "%s: server returned error %d\n",
- __func__, rdata->result);
- /* normal error on read response */
- return __cifs_readv_discard(server, mid, false);
- }
-
- /* Is there enough to get to the rest of the READ_RSP header? */
- if (server->total_read < server->vals->read_rsp_size) {
- cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
- __func__, server->total_read,
- server->vals->read_rsp_size);
- rdata->result = -EIO;
- return cifs_readv_discard(server, mid);
- }
-
- data_offset = server->ops->read_data_offset(buf) +
- server->vals->header_preamble_size;
- if (data_offset < server->total_read) {
- /*
- * win2k8 sometimes sends an offset of 0 when the read
- * is beyond the EOF. Treat it as if the data starts just after
- * the header.
- */
- cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
- __func__, data_offset);
- data_offset = server->total_read;
- } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
- /* data_offset is beyond the end of smallbuf */
- cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
- __func__, data_offset);
- rdata->result = -EIO;
- return cifs_readv_discard(server, mid);
- }
-
- cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
- __func__, server->total_read, data_offset);
-
- len = data_offset - server->total_read;
- if (len > 0) {
- /* read any junk before data into the rest of smallbuf */
- length = cifs_read_from_socket(server,
- buf + server->total_read, len);
- if (length < 0)
- return length;
- server->total_read += length;
- }
-
- /* how much data is in the response? */
-#ifdef CONFIG_CIFS_SMB_DIRECT
- use_rdma_mr = rdata->mr;
-#endif
- data_len = server->ops->read_data_length(buf, use_rdma_mr);
- if (!use_rdma_mr && (data_offset + data_len > buflen)) {
- /* data_len is corrupt -- discard frame */
- rdata->result = -EIO;
- return cifs_readv_discard(server, mid);
- }
-
- length = rdata->read_into_pages(server, rdata, data_len);
- if (length < 0)
- return length;
-
- server->total_read += length;
-
- cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
- server->total_read, buflen, data_len);
-
- /* discard anything left over */
- if (server->total_read < buflen)
- return cifs_readv_discard(server, mid);
-
- dequeue_mid(mid, false);
- mid->resp_buf = server->smallbuf;
- server->smallbuf = NULL;
- return length;
-}
-
static void
cifs_readv_callback(struct mid_q_entry *mid)
{
@@ -1607,7 +1336,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
}
queue_work(cifsiod_wq, &rdata->work);
- DeleteMidQEntry(mid);
+ release_mid(mid);
add_credits(server, &credits, 0);
}
@@ -1909,183 +1638,6 @@ CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms,
return rc;
}
-void
-cifs_writedata_release(struct kref *refcount)
-{
- struct cifs_writedata *wdata = container_of(refcount,
- struct cifs_writedata, refcount);
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (wdata->mr) {
- smbd_deregister_mr(wdata->mr);
- wdata->mr = NULL;
- }
-#endif
-
- if (wdata->cfile)
- cifsFileInfo_put(wdata->cfile);
-
- kvfree(wdata->pages);
- kfree(wdata);
-}
-
-/*
- * Write failed with a retryable error. Resend the write request. It's also
- * possible that the page was redirtied so re-clean the page.
- */
-static void
-cifs_writev_requeue(struct cifs_writedata *wdata)
-{
- int i, rc = 0;
- struct inode *inode = d_inode(wdata->cfile->dentry);
- struct TCP_Server_Info *server;
- unsigned int rest_len;
-
- server = tlink_tcon(wdata->cfile->tlink)->ses->server;
- i = 0;
- rest_len = wdata->bytes;
- do {
- struct cifs_writedata *wdata2;
- unsigned int j, nr_pages, wsize, tailsz, cur_len;
-
- wsize = server->ops->wp_retry_size(inode);
- if (wsize < rest_len) {
- nr_pages = wsize / PAGE_SIZE;
- if (!nr_pages) {
- rc = -ENOTSUPP;
- break;
- }
- cur_len = nr_pages * PAGE_SIZE;
- tailsz = PAGE_SIZE;
- } else {
- nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
- cur_len = rest_len;
- tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
- }
-
- wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
- if (!wdata2) {
- rc = -ENOMEM;
- break;
- }
-
- for (j = 0; j < nr_pages; j++) {
- wdata2->pages[j] = wdata->pages[i + j];
- lock_page(wdata2->pages[j]);
- clear_page_dirty_for_io(wdata2->pages[j]);
- }
-
- wdata2->sync_mode = wdata->sync_mode;
- wdata2->nr_pages = nr_pages;
- wdata2->offset = page_offset(wdata2->pages[0]);
- wdata2->pagesz = PAGE_SIZE;
- wdata2->tailsz = tailsz;
- wdata2->bytes = cur_len;
-
- rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
- &wdata2->cfile);
- if (!wdata2->cfile) {
- cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
- rc);
- if (!is_retryable_error(rc))
- rc = -EBADF;
- } else {
- wdata2->pid = wdata2->cfile->pid;
- rc = server->ops->async_writev(wdata2,
- cifs_writedata_release);
- }
-
- for (j = 0; j < nr_pages; j++) {
- unlock_page(wdata2->pages[j]);
- if (rc != 0 && !is_retryable_error(rc)) {
- SetPageError(wdata2->pages[j]);
- end_page_writeback(wdata2->pages[j]);
- put_page(wdata2->pages[j]);
- }
- }
-
- kref_put(&wdata2->refcount, cifs_writedata_release);
- if (rc) {
- if (is_retryable_error(rc))
- continue;
- i += nr_pages;
- break;
- }
-
- rest_len -= cur_len;
- i += nr_pages;
- } while (i < wdata->nr_pages);
-
- /* cleanup remaining pages from the original wdata */
- for (; i < wdata->nr_pages; i++) {
- SetPageError(wdata->pages[i]);
- end_page_writeback(wdata->pages[i]);
- put_page(wdata->pages[i]);
- }
-
- if (rc != 0 && !is_retryable_error(rc))
- mapping_set_error(inode->i_mapping, rc);
- kref_put(&wdata->refcount, cifs_writedata_release);
-}
-
-void
-cifs_writev_complete(struct work_struct *work)
-{
- struct cifs_writedata *wdata = container_of(work,
- struct cifs_writedata, work);
- struct inode *inode = d_inode(wdata->cfile->dentry);
- int i = 0;
-
- if (wdata->result == 0) {
- spin_lock(&inode->i_lock);
- cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
- spin_unlock(&inode->i_lock);
- cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
- wdata->bytes);
- } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
- return cifs_writev_requeue(wdata);
-
- for (i = 0; i < wdata->nr_pages; i++) {
- struct page *page = wdata->pages[i];
- if (wdata->result == -EAGAIN)
- __set_page_dirty_nobuffers(page);
- else if (wdata->result < 0)
- SetPageError(page);
- end_page_writeback(page);
- cifs_readpage_to_fscache(inode, page);
- put_page(page);
- }
- if (wdata->result != -EAGAIN)
- mapping_set_error(inode->i_mapping, wdata->result);
- kref_put(&wdata->refcount, cifs_writedata_release);
-}
-
-struct cifs_writedata *
-cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
-{
- struct page **pages =
- kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
- if (pages)
- return cifs_writedata_direct_alloc(pages, complete);
-
- return NULL;
-}
-
-struct cifs_writedata *
-cifs_writedata_direct_alloc(struct page **pages, work_func_t complete)
-{
- struct cifs_writedata *wdata;
-
- wdata = kzalloc(sizeof(*wdata), GFP_NOFS);
- if (wdata != NULL) {
- wdata->pages = pages;
- kref_init(&wdata->refcount);
- INIT_LIST_HEAD(&wdata->list);
- init_completion(&wdata->done);
- INIT_WORK(&wdata->work, complete);
- }
- return wdata;
-}
-
/*
* Check the mid_state and signature on received buffer (if any), and queue the
* workqueue completion task.
@@ -2132,7 +1684,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
}
queue_work(cifsiod_wq, &wdata->work);
- DeleteMidQEntry(mid);
+ release_mid(mid);
add_credits(tcon->ses->server, &credits, 0);
}
@@ -3660,7 +3212,6 @@ setACLerrorExit:
return rc;
}
-/* BB fix tabs in this function FIXME BB */
int
CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
@@ -3677,7 +3228,7 @@ CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
GetExtAttrRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
+ (void **) &pSMBr);
if (rc)
return rc;
@@ -3723,7 +3274,7 @@ GetExtAttrRetry:
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
struct file_chattr_info *pfinfo;
- /* BB Do we need a cast or hash here ? */
+
if (count != 16) {
cifs_dbg(FYI, "Invalid size ret in GetExtAttr\n");
rc = -EIO;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 386bb523c69e..a0a06b6f252b 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -119,10 +119,10 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
goto requeue_resolve;
}
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
strlen(ipaddr));
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
kfree(ipaddr);
/* rc == 1 means success here */
@@ -205,17 +205,22 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
/* If server is a channel, select the primary channel */
pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&pserver->srv_lock);
if (!all_channels) {
pserver->tcpStatus = CifsNeedReconnect;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&pserver->srv_lock);
return;
}
+ spin_unlock(&pserver->srv_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
spin_lock(&ses->chan_lock);
- for (i = 0; i < ses->chan_count; i++)
+ for (i = 0; i < ses->chan_count; i++) {
+ spin_lock(&ses->chans[i].server->srv_lock);
ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&ses->chans[i].server->srv_lock);
+ }
spin_unlock(&ses->chan_lock);
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -252,17 +257,8 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
/* check if iface is still active */
- if (!cifs_chan_is_iface_active(ses, server)) {
- /*
- * HACK: drop the lock before calling
- * cifs_chan_update_iface to avoid deadlock
- */
- ses->ses_count++;
- spin_unlock(&cifs_tcp_ses_lock);
+ if (!cifs_chan_is_iface_active(ses, server))
cifs_chan_update_iface(ses, server);
- spin_lock(&cifs_tcp_ses_lock);
- ses->ses_count--;
- }
spin_lock(&ses->chan_lock);
if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server))
@@ -323,7 +319,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
/* mark submitted MIDs for retry and issue callback */
INIT_LIST_HEAD(&retry_list);
cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
kref_get(&mid->refcount);
if (mid->mid_state == MID_REQUEST_SUBMITTED)
@@ -331,14 +327,14 @@ cifs_abort_connection(struct TCP_Server_Info *server)
list_move(&mid->qhead, &retry_list);
mid->mid_flags |= MID_DELETED;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
cifs_server_unlock(server);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
list_del_init(&mid->qhead);
mid->callback(mid);
- cifs_mid_q_entry_release(mid);
+ release_mid(mid);
}
if (cifs_rdma_enabled(server)) {
@@ -350,11 +346,11 @@ cifs_abort_connection(struct TCP_Server_Info *server)
static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
{
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
server->nr_targets = num_targets;
if (server->tcpStatus == CifsExiting) {
/* the demux thread will exit normally next time through the loop */
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
wake_up(&server->response_q);
return false;
}
@@ -364,7 +360,7 @@ static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num
server->hostname);
server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return true;
}
@@ -414,20 +410,20 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
} else {
atomic_inc(&tcpSesReconnectCount);
set_credits(server, 1);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_swn_reset_server_dstaddr(server);
cifs_server_unlock(server);
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
}
} while (server->tcpStatus == CifsNeedReconnect);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
wake_up(&server->response_q);
return rc;
@@ -541,10 +537,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
*/
atomic_inc(&tcpSesReconnectCount);
set_credits(server, 1);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_swn_reset_server_dstaddr(server);
cifs_server_unlock(server);
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
@@ -556,11 +552,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
dfs_cache_free_tgts(&tl);
/* Need to set up echo worker again once connection has been established */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
-
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
wake_up(&server->response_q);
return rc;
@@ -569,12 +564,12 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
{
/* If tcp session is not an dfs connection, then reconnect to last target server */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (!server->is_dfs_conn) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return __cifs_reconnect(server, mark_smb_session);
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
mutex_lock(&server->refpath_lock);
if (!server->origin_fullpath || !server->leaf_fullpath) {
@@ -670,18 +665,18 @@ server_unresponsive(struct TCP_Server_Info *server)
* 65s kernel_recvmsg times out, and we see that we haven't gotten
* a response in >60s.
*/
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if ((server->tcpStatus == CifsGood ||
server->tcpStatus == CifsNeedNegotiate) &&
(!server->ops->can_echo || server->ops->can_echo(server)) &&
time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
(3 * server->echo_interval) / HZ);
cifs_reconnect(server, false);
return true;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return false;
}
@@ -726,18 +721,18 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
else
length = sock_recvmsg(server->ssocket, smb_msg, 0);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return -ESHUTDOWN;
}
if (server->tcpStatus == CifsNeedReconnect) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_reconnect(server, false);
return -ECONNABORTED;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
if (length == -ERESTARTSYS ||
length == -EAGAIN ||
@@ -849,7 +844,7 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&mid->server->mid_lock);
if (!malformed)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
@@ -859,12 +854,12 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
* function has finished processing it is a bug.
*/
if (mid->mid_flags & MID_DELETED) {
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&mid->server->mid_lock);
pr_warn_once("trying to dequeue a deleted mid\n");
} else {
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&mid->server->mid_lock);
}
}
@@ -876,7 +871,7 @@ smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
/*
* SMB1 does not use credits.
*/
- if (server->vals->header_preamble_size)
+ if (is_smb1(server))
return 0;
return le16_to_cpu(shdr->CreditRequest);
@@ -903,21 +898,68 @@ handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
dequeue_mid(mid, malformed);
}
+int
+cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
+{
+ bool srv_sign_required = server->sec_mode & server->vals->signing_required;
+ bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
+ bool mnt_sign_enabled;
+
+ /*
+ * Is signing required by mnt options? If not then check
+ * global_secflags to see if it is there.
+ */
+ if (!mnt_sign_required)
+ mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) ==
+ CIFSSEC_MUST_SIGN);
+
+ /*
+ * If signing is required then it's automatically enabled too,
+ * otherwise, check to see if the secflags allow it.
+ */
+ mnt_sign_enabled = mnt_sign_required ? mnt_sign_required :
+ (global_secflags & CIFSSEC_MAY_SIGN);
+
+ /* If server requires signing, does client allow it? */
+ if (srv_sign_required) {
+ if (!mnt_sign_enabled) {
+ cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
+ return -EOPNOTSUPP;
+ }
+ server->sign = true;
+ }
+
+ /* If client requires signing, does server allow it? */
+ if (mnt_sign_required) {
+ if (!srv_sign_enabled) {
+ cifs_dbg(VFS, "Server does not support signing!\n");
+ return -EOPNOTSUPP;
+ }
+ server->sign = true;
+ }
+
+ if (cifs_rdma_enabled(server) && server->sign)
+ cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
+
+ return 0;
+}
+
+
static void clean_demultiplex_info(struct TCP_Server_Info *server)
{
int length;
/* take it off the list, if it's not already */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
list_del_init(&server->tcp_ses_list);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cancel_delayed_work_sync(&server->echo);
cancel_delayed_work_sync(&server->resolve);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
server->tcpStatus = CifsExiting;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
wake_up_all(&server->response_q);
/* check if we have blocked requests that need to free */
@@ -948,7 +990,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
struct list_head *tmp, *tmp2;
INIT_LIST_HEAD(&dispose_list);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
@@ -957,7 +999,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
list_move(&mid_entry->qhead, &dispose_list);
mid_entry->mid_flags |= MID_DELETED;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
/* now walk dispose list and issue callbacks */
list_for_each_safe(tmp, tmp2, &dispose_list) {
@@ -965,7 +1007,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
- cifs_mid_q_entry_release(mid_entry);
+ release_mid(mid_entry);
}
/* 1/8th of sec is more than enough time for them to exit */
msleep(125);
@@ -1008,7 +1050,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
/* make sure this will fit in a large buffer */
if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
- server->vals->header_preamble_size) {
+ HEADER_PREAMBLE_SIZE(server)) {
cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
cifs_reconnect(server, true);
return -ECONNABORTED;
@@ -1023,8 +1065,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
/* now read the rest */
length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
- pdu_length - HEADER_SIZE(server) + 1
- + server->vals->header_preamble_size);
+ pdu_length - MID_HEADER_SIZE(server));
if (length < 0)
return length;
@@ -1039,19 +1080,18 @@ int
cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
- int length;
+ int rc;
/*
* We know that we received enough to get to the MID as we
* checked the pdu_length earlier. Now check to see
- * if the rest of the header is OK. We borrow the length
- * var for the rest of the loop to avoid a new stack var.
+ * if the rest of the header is OK.
*
* 48 bytes is enough to display the header and a little bit
* into the payload for debugging purposes.
*/
- length = server->ops->check_message(buf, server->total_read, server);
- if (length != 0)
+ rc = server->ops->check_message(buf, server->total_read, server);
+ if (rc)
cifs_dump_mem("Bad SMB: ", buf,
min_t(unsigned int, server->total_read, 48));
@@ -1066,9 +1106,9 @@ cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return -1;
if (!mid)
- return length;
+ return rc;
- handle_mid(mid, server, buf, length);
+ handle_mid(mid, server, buf, rc);
return 0;
}
@@ -1081,7 +1121,7 @@ smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
/*
* SMB1 does not use credits.
*/
- if (server->vals->header_preamble_size)
+ if (is_smb1(server))
return;
if (shdr->CreditRequest) {
@@ -1139,10 +1179,10 @@ cifs_demultiplex_thread(void *p)
if (length < 0)
continue;
- if (server->vals->header_preamble_size == 0)
- server->total_read = 0;
- else
+ if (is_smb1(server))
server->total_read = length;
+ else
+ server->total_read = 0;
/*
* The right amount was read from socket - 4 bytes,
@@ -1157,8 +1197,7 @@ next_pdu:
server->pdu_size = pdu_length;
/* make sure we have enough to get to the MID */
- if (server->pdu_size < HEADER_SIZE(server) - 1 -
- server->vals->header_preamble_size) {
+ if (server->pdu_size < MID_HEADER_SIZE(server)) {
cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
server->pdu_size);
cifs_reconnect(server, true);
@@ -1167,9 +1206,8 @@ next_pdu:
/* read down to the MID */
length = cifs_read_from_socket(server,
- buf + server->vals->header_preamble_size,
- HEADER_SIZE(server) - 1
- - server->vals->header_preamble_size);
+ buf + HEADER_PREAMBLE_SIZE(server),
+ MID_HEADER_SIZE(server));
if (length < 0)
continue;
server->total_read += length;
@@ -1205,7 +1243,7 @@ next_pdu:
if (length < 0) {
for (i = 0; i < num_mids; i++)
if (mids[i])
- cifs_mid_q_entry_release(mids[i]);
+ release_mid(mids[i]);
continue;
}
@@ -1232,7 +1270,7 @@ next_pdu:
if (!mids[i]->multiRsp || mids[i]->multiEnd)
mids[i]->callback(mids[i]);
- cifs_mid_q_entry_release(mids[i]);
+ release_mid(mids[i]);
} else if (server->ops->is_oplock_break &&
server->ops->is_oplock_break(bufs[i],
server)) {
@@ -1240,7 +1278,7 @@ next_pdu:
cifs_dbg(FYI, "Received oplock break\n");
} else {
cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
- atomic_read(&midCount));
+ atomic_read(&mid_count));
cifs_dump_mem("Received Data is: ", bufs[i],
HEADER_SIZE(server));
smb2_add_credits_from_hdr(bufs[i], server);
@@ -1411,6 +1449,7 @@ match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
return true;
}
+/* this function must be called with srv_lock held */
static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
@@ -1471,6 +1510,7 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ spin_lock(&server->srv_lock);
#ifdef CONFIG_CIFS_DFS_UPCALL
/*
* DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
@@ -1478,15 +1518,20 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
* shares or even links that may connect to same server but having completely
* different failover targets.
*/
- if (server->is_dfs_conn)
+ if (server->is_dfs_conn) {
+ spin_unlock(&server->srv_lock);
continue;
+ }
#endif
/*
* Skip ses channels since they're only handled in lower layers
* (e.g. cifs_send_recv).
*/
- if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx))
+ if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
+ spin_unlock(&server->srv_lock);
continue;
+ }
+ spin_unlock(&server->srv_lock);
++server->srv_count;
spin_unlock(&cifs_tcp_ses_lock);
@@ -1534,9 +1579,9 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
else
cancel_delayed_work_sync(&server->reconnect);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
server->tcpStatus = CifsExiting;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_crypto_secmech_release(server);
@@ -1595,8 +1640,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
if (primary_server) {
spin_lock(&cifs_tcp_ses_lock);
++primary_server->srv_count;
- tcp_ses->primary_server = primary_server;
spin_unlock(&cifs_tcp_ses_lock);
+ tcp_ses->primary_server = primary_server;
}
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
@@ -1612,6 +1657,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
tcp_ses->lstrp = jiffies;
tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
spin_lock_init(&tcp_ses->req_lock);
+ spin_lock_init(&tcp_ses->srv_lock);
+ spin_lock_init(&tcp_ses->mid_lock);
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
@@ -1685,9 +1732,9 @@ smbd_connected:
* to the struct since the kernel thread not created yet
* no need to spinlock this update of tcpStatus
*/
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcp_ses->srv_lock);
tcp_ses->tcpStatus = CifsNeedNegotiate;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcp_ses->srv_lock);
if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
@@ -1729,6 +1776,7 @@ out_err:
return ERR_PTR(rc);
}
+/* this function must be called with ses_lock held */
static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
{
if (ctx->sectype != Unspecified &&
@@ -1864,10 +1912,17 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
- if (ses->ses_status == SES_EXITING)
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+ spin_unlock(&ses->ses_lock);
continue;
- if (!match_session(ses, ctx))
+ }
+ if (!match_session(ses, ctx)) {
+ spin_unlock(&ses->ses_lock);
continue;
+ }
+ spin_unlock(&ses->ses_lock);
+
++ses->ses_count;
spin_unlock(&cifs_tcp_ses_lock);
return ses;
@@ -1882,26 +1937,28 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
unsigned int chan_count;
struct TCP_Server_Info *server = ses->server;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
return;
}
+ spin_unlock(&ses->ses_lock);
cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
cifs_dbg(FYI, "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->treeName : "NONE");
+ spin_lock(&cifs_tcp_ses_lock);
if (--ses->ses_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
+ spin_unlock(&cifs_tcp_ses_lock);
/* ses_count can never go negative */
WARN_ON(ses->ses_count < 0);
if (ses->ses_status == SES_GOOD)
ses->ses_status = SES_EXITING;
- spin_unlock(&cifs_tcp_ses_lock);
cifs_free_ipc(ses);
@@ -2236,6 +2293,7 @@ get_ses_fail:
return ERR_PTR(rc);
}
+/* this function must be called with tc_lock held */
static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
if (tcon->status == TID_EXITING)
@@ -2258,16 +2316,17 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
static struct cifs_tcon *
cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
{
- struct list_head *tmp;
struct cifs_tcon *tcon;
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &ses->tcon_list) {
- tcon = list_entry(tmp, struct cifs_tcon, tcon_list);
-
- if (!match_tcon(tcon, ctx))
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ spin_lock(&tcon->tc_lock);
+ if (!match_tcon(tcon, ctx)) {
+ spin_unlock(&tcon->tc_lock);
continue;
+ }
++tcon->tc_count;
+ spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
return tcon;
}
@@ -2619,6 +2678,8 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
return 0;
if (old->ctx->acdirmax != new->ctx->acdirmax)
return 0;
+ if (old->ctx->closetimeo != new->ctx->closetimeo)
+ return 0;
return 1;
}
@@ -2644,7 +2705,7 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
int
cifs_match_super(struct super_block *sb, void *data)
{
- struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data;
+ struct cifs_mnt_data *mnt_data = data;
struct smb3_fs_context *ctx;
struct cifs_sb_info *cifs_sb;
struct TCP_Server_Info *tcp_srv;
@@ -2667,6 +2728,9 @@ cifs_match_super(struct super_block *sb, void *data)
ctx = mnt_data->ctx;
+ spin_lock(&tcp_srv->srv_lock);
+ spin_lock(&ses->ses_lock);
+ spin_lock(&tcon->tc_lock);
if (!match_server(tcp_srv, ctx) ||
!match_session(ses, ctx) ||
!match_tcon(tcon, ctx) ||
@@ -2677,6 +2741,10 @@ cifs_match_super(struct super_block *sb, void *data)
rc = compare_mount_options(sb, mnt_data);
out:
+ spin_unlock(&tcon->tc_lock);
+ spin_unlock(&ses->ses_lock);
+ spin_unlock(&tcp_srv->srv_lock);
+
spin_unlock(&cifs_tcp_ses_lock);
cifs_put_tlink(tlink);
return rc;
@@ -2954,6 +3022,7 @@ ip_connect(struct TCP_Server_Info *server)
return generic_ip_connect(server);
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
{
@@ -3059,6 +3128,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
}
}
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
{
@@ -3175,6 +3245,7 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
if (tcon->posix_extensions)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/* tell server which Unix caps we support */
if (cap_unix(tcon->ses)) {
/*
@@ -3182,16 +3253,17 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
* for just this mount.
*/
reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->ses->server->srv_lock);
if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
(le64_to_cpu(tcon->fsUnixInfo.Capability) &
CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->ses->server->srv_lock);
rc = -EACCES;
goto out;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->ses->server->srv_lock);
} else
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
tcon->unix_ext = 0; /* server does not support them */
/* do not care if a following call succeed - informational */
@@ -3273,9 +3345,9 @@ static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx)
rc = mount_get_conns(mnt_ctx);
if (mnt_ctx->server) {
cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&mnt_ctx->server->srv_lock);
mnt_ctx->server->is_dfs_conn = true;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&mnt_ctx->server->srv_lock);
}
return rc;
}
@@ -3919,7 +3991,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
}
bcc_ptr += length + 1;
bytes_left -= (length + 1);
- strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
+ strscpy(tcon->treeName, tree, sizeof(tcon->treeName));
/* mostly informational -- no need to fail on error here */
kfree(tcon->nativeFileSystem);
@@ -3990,28 +4062,28 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
return -ENOSYS;
/* only send once per connect */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (!server->ops->need_neg(server) ||
server->tcpStatus != CifsNeedNegotiate) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return 0;
}
server->tcpStatus = CifsInNegotiate;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
rc = server->ops->negotiate(xid, ses, server);
if (rc == 0) {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsInNegotiate)
server->tcpStatus = CifsGood;
else
rc = -EHOSTDOWN;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
} else {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsInNegotiate)
server->tcpStatus = CifsNeedNegotiate;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
}
return rc;
@@ -4027,7 +4099,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
bool is_binding = false;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if (server->dstaddr.ss_family == AF_INET6)
scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
else
@@ -4036,7 +4108,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (ses->ses_status != SES_GOOD &&
ses->ses_status != SES_NEW &&
ses->ses_status != SES_NEED_RECON) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
return 0;
}
@@ -4045,7 +4117,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (CIFS_ALL_CHANS_GOOD(ses) ||
cifs_chan_in_reconnect(ses, server)) {
spin_unlock(&ses->chan_lock);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
return 0;
}
is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
@@ -4054,7 +4126,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (!is_binding)
ses->ses_status = SES_IN_SETUP;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
if (!is_binding) {
ses->capabilities = server->capabilities;
@@ -4078,22 +4150,22 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (rc) {
cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_IN_SETUP)
ses->ses_status = SES_NEED_RECON;
spin_lock(&ses->chan_lock);
cifs_chan_clear_in_reconnect(ses, server);
spin_unlock(&ses->chan_lock);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
} else {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_IN_SETUP)
ses->ses_status = SES_GOOD;
spin_lock(&ses->chan_lock);
cifs_chan_clear_in_reconnect(ses, server);
cifs_chan_clear_need_reconnect(ses, server);
spin_unlock(&ses->chan_lock);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
}
return rc;
@@ -4167,8 +4239,10 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
goto out;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(ses))
reset_cifs_unix_caps(0, tcon, NULL, ctx);
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
out:
kfree(ctx->username);
@@ -4557,15 +4631,15 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
struct dfs_info3_param ref = {0};
/* only send once per connect */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->ses->ses_status != SES_GOOD ||
(tcon->status != TID_NEW &&
tcon->status != TID_NEED_TCON)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
return 0;
}
tcon->status = TID_IN_TCON;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
if (!tree) {
@@ -4604,15 +4678,15 @@ out:
cifs_put_tcp_super(sb);
if (rc) {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON)
tcon->status = TID_NEED_TCON;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
} else {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON)
tcon->status = TID_GOOD;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
tcon->need_reconnect = false;
}
@@ -4625,28 +4699,28 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
const struct smb_version_operations *ops = tcon->ses->server->ops;
/* only send once per connect */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->ses->ses_status != SES_GOOD ||
(tcon->status != TID_NEW &&
tcon->status != TID_NEED_TCON)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
return 0;
}
tcon->status = TID_IN_TCON;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
if (rc) {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON)
tcon->status = TID_NEED_TCON;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
} else {
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON)
tcon->status = TID_GOOD;
- spin_unlock(&cifs_tcp_ses_lock);
tcon->need_reconnect = false;
+ spin_unlock(&tcon->tc_lock);
}
return rc;
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 34a8f3baed5e..a9b6c3eba6de 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -1526,15 +1526,21 @@ static void refresh_mounts(struct cifs_ses **sessions)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
- if (!server->is_dfs_conn)
+ spin_lock(&server->srv_lock);
+ if (!server->is_dfs_conn) {
+ spin_unlock(&server->srv_lock);
continue;
+ }
+ spin_unlock(&server->srv_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ spin_lock(&tcon->tc_lock);
if (!tcon->ipc && !tcon->need_reconnect) {
tcon->tc_count++;
list_add_tail(&tcon->ulist, &tcons);
}
+ spin_unlock(&tcon->tc_lock);
}
}
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index ce9b22aecfba..08f7392716e2 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -193,6 +193,7 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
return PTR_ERR(full_path);
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
@@ -261,6 +262,7 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
* rare for path not covered on files)
*/
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
desired_access = 0;
if (OPEN_FMODE(oflags) & FMODE_READ)
@@ -316,6 +318,7 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
goto out;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/*
* If Open reported that we actually created a file then we now have to
* set the mode if possible.
@@ -357,6 +360,9 @@ cifs_create_get_file_info:
rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb,
xid);
else {
+#else
+ {
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/* TODO: Add support for calling POSIX query info here, but passing in fid */
rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb,
xid, fid);
@@ -377,7 +383,9 @@ cifs_create_get_file_info:
}
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_create_set_dentry:
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
if (rc != 0) {
cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
rc);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e64cda7a7610..fa738adc031f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -26,6 +26,7 @@
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
+#include "smb2proto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
@@ -33,6 +34,48 @@
#include "smbdirect.h"
#include "fs_context.h"
#include "cifs_ioctl.h"
+#include "cached_dir.h"
+
+/*
+ * Mark as invalid, all open files on tree connections since they
+ * were closed when session to server was lost.
+ */
+void
+cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+{
+ struct cifsFileInfo *open_file = NULL;
+ struct list_head *tmp;
+ struct list_head *tmp1;
+
+ /* only send once per connect */
+ spin_lock(&tcon->ses->ses_lock);
+ if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
+ spin_unlock(&tcon->ses->ses_lock);
+ return;
+ }
+ tcon->status = TID_IN_FILES_INVALIDATE;
+ spin_unlock(&tcon->ses->ses_lock);
+
+ /* list all files open on tree connection and mark them invalid */
+ spin_lock(&tcon->open_file_lock);
+ list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
+ open_file = list_entry(tmp, struct cifsFileInfo, tlist);
+ open_file->invalidHandle = true;
+ open_file->oplock_break_cancelled = true;
+ }
+ spin_unlock(&tcon->open_file_lock);
+
+ invalidate_all_cached_dirs(tcon);
+ spin_lock(&tcon->tc_lock);
+ if (tcon->status == TID_IN_FILES_INVALIDATE)
+ tcon->status = TID_NEED_TCON;
+ spin_unlock(&tcon->tc_lock);
+
+ /*
+ * BB Add call to invalidate_inodes(sb) for all superblocks mounted
+ * to this tcon.
+ */
+}
static inline int cifs_convert_flags(unsigned int flags)
{
@@ -52,6 +95,7 @@ static inline int cifs_convert_flags(unsigned int flags)
FILE_READ_DATA);
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
static u32 cifs_posix_convert_flags(unsigned int flags)
{
u32 posix_flags = 0;
@@ -85,6 +129,7 @@ static u32 cifs_posix_convert_flags(unsigned int flags)
return posix_flags;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
static inline int cifs_get_disposition(unsigned int flags)
{
@@ -100,6 +145,7 @@ static inline int cifs_get_disposition(unsigned int flags)
return FILE_OPEN;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
int cifs_posix_open(const char *full_path, struct inode **pinode,
struct super_block *sb, int mode, unsigned int f_flags,
__u32 *poplock, __u16 *pnetfid, unsigned int xid)
@@ -161,6 +207,7 @@ posix_open_ret:
kfree(presp_data);
return rc;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
static int
cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
@@ -579,6 +626,7 @@ int cifs_open(struct inode *inode, struct file *file)
else
oplock = 0;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (!tcon->broken_posix_open && tcon->unix_ext &&
cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
@@ -603,6 +651,7 @@ int cifs_open(struct inode *inode, struct file *file)
* or DFS errors.
*/
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &fid);
@@ -630,6 +679,7 @@ int cifs_open(struct inode *inode, struct file *file)
goto out;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
/*
* Time to set mode which we can not set earlier due to
@@ -647,6 +697,7 @@ int cifs_open(struct inode *inode, struct file *file)
CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
cfile->pid);
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
use_cache:
fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
@@ -664,7 +715,9 @@ out:
return rc;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/*
* Try to reacquire byte range locks that were released when session
@@ -673,10 +726,12 @@ static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
static int
cifs_relock_file(struct cifsFileInfo *cfile)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
if (cinode->can_cache_brlcks) {
@@ -685,11 +740,13 @@ cifs_relock_file(struct cifsFileInfo *cfile)
return rc;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
rc = cifs_push_posix_locks(cfile);
else
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
rc = tcon->ses->server->ops->push_mand_locks(cfile);
up_read(&cinode->lock_sem);
@@ -750,6 +807,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
else
oplock = 0;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (tcon->unix_ext && cap_unix(tcon->ses) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
@@ -773,6 +831,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
* in the reconnect path it is important to retry hard
*/
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
desired_access = cifs_convert_flags(cfile->f_flags);
@@ -817,7 +876,9 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
goto reopen_error_exit;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
reopen_success:
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
cfile->invalidHandle = false;
mutex_unlock(&cfile->fh_mutex);
cinode = CIFS_I(inode);
@@ -903,12 +964,12 @@ int cifs_close(struct inode *inode, struct file *file)
* So, Increase the ref count to avoid use-after-free.
*/
if (!mod_delayed_work(deferredclose_wq,
- &cfile->deferred, cifs_sb->ctx->acregmax))
+ &cfile->deferred, cifs_sb->ctx->closetimeo))
cifsFileInfo_get(cfile);
} else {
/* Deferred close for files */
queue_delayed_work(deferredclose_wq,
- &cfile->deferred, cifs_sb->ctx->acregmax);
+ &cfile->deferred, cifs_sb->ctx->closetimeo);
cfile->deferred_close_scheduled = true;
spin_unlock(&cinode->deferred_lock);
return 0;
@@ -928,9 +989,7 @@ int cifs_close(struct inode *inode, struct file *file)
void
cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
{
- struct cifsFileInfo *open_file;
- struct list_head *tmp;
- struct list_head *tmp1;
+ struct cifsFileInfo *open_file, *tmp;
struct list_head tmp_list;
if (!tcon->use_persistent || !tcon->need_reopen_files)
@@ -943,8 +1002,7 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
/* list all files open on tree connection, reopen resilient handles */
spin_lock(&tcon->open_file_lock);
- list_for_each(tmp, &tcon->openFileList) {
- open_file = list_entry(tmp, struct cifsFileInfo, tlist);
+ list_for_each_entry(open_file, &tcon->openFileList, tlist) {
if (!open_file->invalidHandle)
continue;
cifsFileInfo_get(open_file);
@@ -952,8 +1010,7 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
}
spin_unlock(&tcon->open_file_lock);
- list_for_each_safe(tmp, tmp1, &tmp_list) {
- open_file = list_entry(tmp, struct cifsFileInfo, rlist);
+ list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
if (cifs_reopen_file(open_file, false /* do not flush */))
tcon->need_reopen_files = true;
list_del_init(&open_file->rlist);
@@ -1196,6 +1253,7 @@ try_again:
return rc;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/*
* Check if there is another lock that prevents us to set the lock (posix
* style). If such a lock exists, update the flock structure with its
@@ -1334,6 +1392,7 @@ hash_lockowner(fl_owner_t owner)
{
return cifs_lock_secret ^ hash32_ptr((const void *)owner);
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
struct lock_to_push {
struct list_head llist;
@@ -1344,6 +1403,7 @@ struct lock_to_push {
__u8 type;
};
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
static int
cifs_push_posix_locks(struct cifsFileInfo *cfile)
{
@@ -1431,14 +1491,17 @@ err_out:
}
goto out;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
static int
cifs_push_locks(struct cifsFileInfo *cfile)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/* we are going to update can_cache_brlcks here - need a write access */
cifs_down_write(&cinode->lock_sem);
@@ -1447,11 +1510,13 @@ cifs_push_locks(struct cifsFileInfo *cfile)
return rc;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
rc = cifs_push_posix_locks(cfile);
else
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
rc = tcon->ses->server->ops->push_mand_locks(cfile);
cinode->can_cache_brlcks = false;
@@ -1515,6 +1580,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
__u16 netfid = cfile->fid.netfid;
if (posix_lck) {
@@ -1534,6 +1600,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
posix_lock_type, wait_flag);
return rc;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
if (!rc)
@@ -1594,6 +1661,7 @@ cifs_free_llist(struct list_head *llist)
}
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
int
cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
unsigned int xid)
@@ -1706,6 +1774,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
kfree(buf);
return rc;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
static int
cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
@@ -1719,6 +1788,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
struct TCP_Server_Info *server = tcon->ses->server;
struct inode *inode = d_inode(cfile->dentry);
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (posix_lck) {
int posix_lock_type;
@@ -1740,7 +1810,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
NULL, posix_lock_type, wait_flag);
goto out;
}
-
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
if (lock) {
struct cifsLockInfo *lock;
@@ -1861,9 +1931,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
rc = -EACCES;
xid = get_xid();
- cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
- cmd, flock->fl_flags, flock->fl_type,
- flock->fl_start, flock->fl_end);
+ cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
+ flock->fl_flags, flock->fl_type, (long long)flock->fl_start,
+ (long long)flock->fl_end);
cfile = (struct cifsFileInfo *)file->private_data;
tcon = tlink_tcon(cfile->tlink);
@@ -2204,6 +2274,185 @@ cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
return -ENOENT;
}
+void
+cifs_writedata_release(struct kref *refcount)
+{
+ struct cifs_writedata *wdata = container_of(refcount,
+ struct cifs_writedata, refcount);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (wdata->mr) {
+ smbd_deregister_mr(wdata->mr);
+ wdata->mr = NULL;
+ }
+#endif
+
+ if (wdata->cfile)
+ cifsFileInfo_put(wdata->cfile);
+
+ kvfree(wdata->pages);
+ kfree(wdata);
+}
+
+/*
+ * Write failed with a retryable error. Resend the write request. It's also
+ * possible that the page was redirtied so re-clean the page.
+ */
+static void
+cifs_writev_requeue(struct cifs_writedata *wdata)
+{
+ int i, rc = 0;
+ struct inode *inode = d_inode(wdata->cfile->dentry);
+ struct TCP_Server_Info *server;
+ unsigned int rest_len;
+
+ server = tlink_tcon(wdata->cfile->tlink)->ses->server;
+ i = 0;
+ rest_len = wdata->bytes;
+ do {
+ struct cifs_writedata *wdata2;
+ unsigned int j, nr_pages, wsize, tailsz, cur_len;
+
+ wsize = server->ops->wp_retry_size(inode);
+ if (wsize < rest_len) {
+ nr_pages = wsize / PAGE_SIZE;
+ if (!nr_pages) {
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ cur_len = nr_pages * PAGE_SIZE;
+ tailsz = PAGE_SIZE;
+ } else {
+ nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
+ cur_len = rest_len;
+ tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
+ }
+
+ wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
+ if (!wdata2) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ for (j = 0; j < nr_pages; j++) {
+ wdata2->pages[j] = wdata->pages[i + j];
+ lock_page(wdata2->pages[j]);
+ clear_page_dirty_for_io(wdata2->pages[j]);
+ }
+
+ wdata2->sync_mode = wdata->sync_mode;
+ wdata2->nr_pages = nr_pages;
+ wdata2->offset = page_offset(wdata2->pages[0]);
+ wdata2->pagesz = PAGE_SIZE;
+ wdata2->tailsz = tailsz;
+ wdata2->bytes = cur_len;
+
+ rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
+ &wdata2->cfile);
+ if (!wdata2->cfile) {
+ cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
+ rc);
+ if (!is_retryable_error(rc))
+ rc = -EBADF;
+ } else {
+ wdata2->pid = wdata2->cfile->pid;
+ rc = server->ops->async_writev(wdata2,
+ cifs_writedata_release);
+ }
+
+ for (j = 0; j < nr_pages; j++) {
+ unlock_page(wdata2->pages[j]);
+ if (rc != 0 && !is_retryable_error(rc)) {
+ SetPageError(wdata2->pages[j]);
+ end_page_writeback(wdata2->pages[j]);
+ put_page(wdata2->pages[j]);
+ }
+ }
+
+ kref_put(&wdata2->refcount, cifs_writedata_release);
+ if (rc) {
+ if (is_retryable_error(rc))
+ continue;
+ i += nr_pages;
+ break;
+ }
+
+ rest_len -= cur_len;
+ i += nr_pages;
+ } while (i < wdata->nr_pages);
+
+ /* cleanup remaining pages from the original wdata */
+ for (; i < wdata->nr_pages; i++) {
+ SetPageError(wdata->pages[i]);
+ end_page_writeback(wdata->pages[i]);
+ put_page(wdata->pages[i]);
+ }
+
+ if (rc != 0 && !is_retryable_error(rc))
+ mapping_set_error(inode->i_mapping, rc);
+ kref_put(&wdata->refcount, cifs_writedata_release);
+}
+
+void
+cifs_writev_complete(struct work_struct *work)
+{
+ struct cifs_writedata *wdata = container_of(work,
+ struct cifs_writedata, work);
+ struct inode *inode = d_inode(wdata->cfile->dentry);
+ int i = 0;
+
+ if (wdata->result == 0) {
+ spin_lock(&inode->i_lock);
+ cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
+ spin_unlock(&inode->i_lock);
+ cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
+ wdata->bytes);
+ } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
+ return cifs_writev_requeue(wdata);
+
+ for (i = 0; i < wdata->nr_pages; i++) {
+ struct page *page = wdata->pages[i];
+
+ if (wdata->result == -EAGAIN)
+ __set_page_dirty_nobuffers(page);
+ else if (wdata->result < 0)
+ SetPageError(page);
+ end_page_writeback(page);
+ cifs_readpage_to_fscache(inode, page);
+ put_page(page);
+ }
+ if (wdata->result != -EAGAIN)
+ mapping_set_error(inode->i_mapping, wdata->result);
+ kref_put(&wdata->refcount, cifs_writedata_release);
+}
+
+struct cifs_writedata *
+cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
+{
+ struct page **pages =
+ kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
+ if (pages)
+ return cifs_writedata_direct_alloc(pages, complete);
+
+ return NULL;
+}
+
+struct cifs_writedata *
+cifs_writedata_direct_alloc(struct page **pages, work_func_t complete)
+{
+ struct cifs_writedata *wdata;
+
+ wdata = kzalloc(sizeof(*wdata), GFP_NOFS);
+ if (wdata != NULL) {
+ wdata->pages = pages;
+ kref_init(&wdata->refcount);
+ INIT_LIST_HEAD(&wdata->list);
+ init_completion(&wdata->done);
+ INIT_WORK(&wdata->work, complete);
+ }
+ return wdata;
+}
+
+
static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
{
struct address_space *mapping = page->mapping;
@@ -3022,7 +3271,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
if (ctx->direct_io) {
ssize_t result;
- result = iov_iter_get_pages_alloc(
+ result = iov_iter_get_pages_alloc2(
from, &pagevec, cur_len, &start);
if (result < 0) {
cifs_dbg(VFS,
@@ -3036,7 +3285,6 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
break;
}
cur_len = (size_t)result;
- iov_iter_advance(from, cur_len);
nr_pages =
(cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
@@ -3758,7 +4006,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
if (ctx->direct_io) {
ssize_t result;
- result = iov_iter_get_pages_alloc(
+ result = iov_iter_get_pages_alloc2(
&direct_iov, &pagevec,
cur_len, &start);
if (result < 0) {
@@ -3774,7 +4022,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
break;
}
cur_len = (size_t)result;
- iov_iter_advance(&direct_iov, cur_len);
rdata = cifs_readdata_direct_alloc(
pagevec, cifs_uncached_readv_complete);
@@ -4004,7 +4251,7 @@ static ssize_t __cifs_readv(
if (!is_sync_kiocb(iocb))
ctx->iocb = iocb;
- if (iter_is_iovec(to))
+ if (user_backed_iter(to))
ctx->should_dirty = true;
if (direct) {
@@ -4459,10 +4706,11 @@ static void cifs_readahead(struct readahead_control *ractl)
* TODO: Send a whole batch of pages to be read
* by the cache.
*/
- page = readahead_page(ractl);
- last_batch_size = 1 << thp_order(page);
+ struct folio *folio = readahead_folio(ractl);
+
+ last_batch_size = folio_nr_pages(folio);
if (cifs_readpage_from_fscache(ractl->mapping->host,
- page) < 0) {
+ &folio->page) < 0) {
/*
* TODO: Deal with cache read failure
* here, but for the moment, delegate
@@ -4470,7 +4718,7 @@ static void cifs_readahead(struct readahead_control *ractl)
*/
caching = false;
}
- unlock_page(page);
+ folio_unlock(folio);
next_cached++;
cache_nr_pages--;
if (cache_nr_pages == 0)
@@ -4811,8 +5059,6 @@ void cifs_oplock_break(struct work_struct *work)
struct TCP_Server_Info *server = tcon->ses->server;
int rc = 0;
bool purge_cache = false;
- bool is_deferred = false;
- struct cifs_deferred_close *dclose;
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
@@ -4849,22 +5095,6 @@ void cifs_oplock_break(struct work_struct *work)
oplock_break_ack:
/*
- * When oplock break is received and there are no active
- * file handles but cached, then schedule deferred close immediately.
- * So, new open will not use cached handle.
- */
- spin_lock(&CIFS_I(inode)->deferred_lock);
- is_deferred = cifs_is_deferred_close(cfile, &dclose);
- spin_unlock(&CIFS_I(inode)->deferred_lock);
- if (is_deferred &&
- cfile->deferred_close_scheduled &&
- delayed_work_pending(&cfile->deferred)) {
- if (cancel_delayed_work(&cfile->deferred)) {
- _cifsFileInfo_put(cfile, false, false);
- goto oplock_break_done;
- }
- }
- /*
* releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do
* not bother sending an oplock release if session to server still is
@@ -4875,7 +5105,7 @@ oplock_break_ack:
cinode);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
}
-oplock_break_done:
+
_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
cifs_done_oplock_break(cinode);
}
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 8dc0d923ef6a..0e13dec86b25 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -147,6 +147,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_u32("actimeo", Opt_actimeo),
fsparam_u32("acdirmax", Opt_acdirmax),
fsparam_u32("acregmax", Opt_acregmax),
+ fsparam_u32("closetimeo", Opt_closetimeo),
fsparam_u32("echo_interval", Opt_echo_interval),
fsparam_u32("max_credits", Opt_max_credits),
fsparam_u32("handletimeout", Opt_handletimeout),
@@ -1074,6 +1075,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
}
ctx->acdirmax = ctx->acregmax = HZ * result.uint_32;
break;
+ case Opt_closetimeo:
+ ctx->closetimeo = HZ * result.uint_32;
+ if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) {
+ cifs_errorf(fc, "closetimeo too large\n");
+ goto cifs_parse_mount_err;
+ }
+ break;
case Opt_echo_interval:
ctx->echo_interval = result.uint_32;
break;
@@ -1521,6 +1529,7 @@ int smb3_init_fs_context(struct fs_context *fc)
ctx->acregmax = CIFS_DEF_ACTIMEO;
ctx->acdirmax = CIFS_DEF_ACTIMEO;
+ ctx->closetimeo = SMB3_DEF_DCLOSETIMEO;
/* Most clients set timeout to 0, allows server to use its default */
ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
index 5f093cb7e9b9..bbaee4c2281f 100644
--- a/fs/cifs/fs_context.h
+++ b/fs/cifs/fs_context.h
@@ -125,6 +125,7 @@ enum cifs_param {
Opt_actimeo,
Opt_acdirmax,
Opt_acregmax,
+ Opt_closetimeo,
Opt_echo_interval,
Opt_max_credits,
Opt_snapshot,
@@ -247,6 +248,8 @@ struct smb3_fs_context {
/* attribute cache timemout for files and directories in jiffies */
unsigned long acregmax;
unsigned long acdirmax;
+ /* timeout for deferred close of files in jiffies */
+ unsigned long closetimeo;
struct smb_version_operations *ops;
struct smb_version_values *vals;
char *prepath;
@@ -279,4 +282,9 @@ static inline struct smb3_fs_context *smb3_fc2context(const struct fs_context *f
extern int smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx);
extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+/*
+ * max deferred close timeout (jiffies) - 2^30
+ */
+#define SMB3_MAX_DCLOSETIMEO (1 << 30)
+#define SMB3_DEF_DCLOSETIMEO (5 * HZ) /* Can increase later, other clients use larger */
#endif
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index aa3b941a5555..67b601041f0a 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -108,17 +108,6 @@ static inline void cifs_readpage_to_fscache(struct inode *inode,
__cifs_readpage_to_fscache(inode, page);
}
-static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
-{
- if (PageFsCache(page)) {
- if (current_is_kswapd() || !(gfp & __GFP_FS))
- return false;
- wait_on_page_fscache(page);
- fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
- }
- return true;
-}
-
#else /* CONFIG_CIFS_FSCACHE */
static inline
void cifs_fscache_fill_coherency(struct inode *inode,
@@ -154,11 +143,6 @@ cifs_readpage_from_fscache(struct inode *inode, struct page *page)
static inline
void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {}
-static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
-{
- return true; /* May release page */
-}
-
#endif /* CONFIG_CIFS_FSCACHE */
#endif /* _CIFS_FSCACHE_H */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 81da81e18553..bac08c20f559 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -25,6 +25,7 @@
#include "fscache.h"
#include "fs_context.h"
#include "cifs_ioctl.h"
+#include "cached_dir.h"
static void cifs_set_ops(struct inode *inode)
{
@@ -339,6 +340,7 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
fattr->cf_flags = CIFS_FATTR_DFS_REFERRAL;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
static int
cifs_get_file_info_unix(struct file *filp)
{
@@ -432,6 +434,14 @@ int cifs_get_inode_info_unix(struct inode **pinode,
cgiiu_exit:
return rc;
}
+#else
+int cifs_get_inode_info_unix(struct inode **pinode,
+ const unsigned char *full_path,
+ struct super_block *sb, unsigned int xid)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
static int
cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
@@ -795,6 +805,7 @@ static __u64 simple_hashstr(const char *str)
return hash;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/**
* cifs_backup_query_path_info - SMB1 fallback code to get ino
*
@@ -847,6 +858,7 @@ cifs_backup_query_path_info(int xid,
*data = (FILE_ALL_INFO *)info.srch_entries_start;
return 0;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
static void
cifs_set_fattr_ino(int xid,
@@ -991,6 +1003,7 @@ cifs_get_inode_info(struct inode **inode,
rc = 0;
break;
case -EACCES:
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/*
* perm errors, try again with backup flags if possible
*
@@ -1022,6 +1035,9 @@ cifs_get_inode_info(struct inode **inode,
/* nothing we can do, bail out */
goto out;
}
+#else
+ goto out;
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
break;
default:
cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc);
@@ -1037,8 +1053,9 @@ cifs_get_inode_info(struct inode **inode,
/*
* 4. Tweak fattr based on mount options
*/
-
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
handle_mnt_opt:
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/* query for SFU type info if supported and needed */
if (fattr.cf_cifsattrs & ATTR_SYSTEM &&
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
@@ -1223,7 +1240,7 @@ static const struct inode_operations cifs_ipc_inode_ops = {
static int
cifs_find_inode(struct inode *inode, void *opaque)
{
- struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
+ struct cifs_fattr *fattr = opaque;
/* don't match inode with different uniqueid */
if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
@@ -1247,7 +1264,7 @@ cifs_find_inode(struct inode *inode, void *opaque)
static int
cifs_init_inode(struct inode *inode, void *opaque)
{
- struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
+ struct cifs_fattr *fattr = opaque;
CIFS_I(inode)->uniqueid = fattr->cf_uniqueid;
CIFS_I(inode)->createtime = fattr->cf_createtime;
@@ -1435,6 +1452,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
return server->ops->set_file_info(inode, full_path, &info_buf, xid);
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/*
* Open the given file (if it isn't already), set the DELETE_ON_CLOSE bit
* and rename it to a random name that hopefully won't conflict with
@@ -1565,6 +1583,7 @@ undo_setattr:
goto out_close;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/* copied from fs/nfs/dir.c with small changes */
static void
@@ -1627,6 +1646,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
}
cifs_close_deferred_file_under_dentry(tcon, full_path);
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -1636,6 +1656,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
if ((rc == 0) || (rc == -ENOENT))
goto psx_del_no_retry;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
retry_std_delete:
if (!server->ops->unlink) {
@@ -1714,9 +1735,11 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
if (tcon->posix_extensions)
rc = smb311_posix_get_inode_info(&inode, full_path, parent->i_sb, xid);
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
else if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, parent->i_sb,
xid);
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
else
rc = cifs_get_inode_info(&inode, full_path, NULL, parent->i_sb,
xid, NULL);
@@ -1746,6 +1769,7 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
if (parent->i_mode & S_ISGID)
mode |= S_ISGID;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (tcon->unix_ext) {
struct cifs_unix_set_info_args args = {
.mode = mode,
@@ -1768,6 +1792,9 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
cifs_sb->local_nls,
cifs_remap(cifs_sb));
} else {
+#else
+ {
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
struct TCP_Server_Info *server = tcon->ses->server;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
(mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo)
@@ -1788,6 +1815,7 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
return 0;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
static int
cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode,
const char *full_path, struct cifs_sb_info *cifs_sb,
@@ -1850,6 +1878,7 @@ posix_mkdir_get_info:
xid);
goto posix_mkdir_out;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode,
struct dentry *direntry, umode_t mode)
@@ -1892,6 +1921,7 @@ int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode,
goto mkdir_out;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
@@ -1899,6 +1929,7 @@ int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode,
if (rc != -EOPNOTSUPP)
goto mkdir_out;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
if (!server->ops->mkdir) {
rc = -ENOSYS;
@@ -2015,9 +2046,12 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
struct tcon_link *tlink;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
struct cifs_fid fid;
struct cifs_open_parms oparms;
- int oplock, rc;
+ int oplock;
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+ int rc;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
@@ -2043,6 +2077,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
if (server->vals->protocol_id != 0)
goto do_rename_exit;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/* open-file renames don't work across directories */
if (to_dentry->d_parent != from_dentry->d_parent)
goto do_rename_exit;
@@ -2064,6 +2099,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
cifs_sb->local_nls, cifs_remap(cifs_sb));
CIFSSMBClose(xid, tcon, fid.netfid);
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
do_rename_exit:
if (rc == 0)
d_move(from_dentry, to_dentry);
@@ -2081,11 +2117,13 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
- FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
- FILE_UNIX_BASIC_INFO *info_buf_target;
unsigned int xid;
int rc, tmprc;
int retry_count = 0;
+ FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ FILE_UNIX_BASIC_INFO *info_buf_target;
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
@@ -2139,6 +2177,7 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
if (flags & RENAME_NOREPLACE)
goto cifs_rename_exit;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (rc == -EEXIST && tcon->unix_ext) {
/*
* Are src and dst hardlinks of same inode? We can only tell
@@ -2178,6 +2217,8 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
*/
unlink_target:
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+
/* Try unlinking the target dentry if it's not negative */
if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
if (d_is_dir(target_dentry))
@@ -2337,14 +2378,18 @@ int cifs_revalidate_file_attr(struct file *filp)
{
int rc = 0;
struct dentry *dentry = file_dentry(filp);
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
if (!cifs_dentry_needs_reval(dentry))
return rc;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (tlink_tcon(cfile->tlink)->unix_ext)
rc = cifs_get_file_info_unix(filp);
else
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
rc = cifs_get_file_info(filp);
return rc;
@@ -2653,6 +2698,7 @@ set_size_out:
return rc;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
static int
cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
{
@@ -2800,6 +2846,7 @@ out:
free_xid(xid);
return rc;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
static int
cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
@@ -2995,16 +3042,20 @@ cifs_setattr(struct user_namespace *mnt_userns, struct dentry *direntry,
struct iattr *attrs)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
- struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
int rc, retries = 0;
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
if (unlikely(cifs_forced_shutdown(cifs_sb)))
return -EIO;
do {
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (pTcon->unix_ext)
rc = cifs_setattr_unix(direntry, attrs);
else
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
rc = cifs_setattr_nounix(direntry, attrs);
retries++;
} while (is_retryable_error(rc) && retries < 2);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 0359b604bdbc..b6e6e5d6c8dd 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -333,6 +333,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
tcon = tlink_tcon(pSMBFile->tlink);
caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
#ifdef CONFIG_CIFS_POSIX
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (CIFS_UNIX_EXTATTR_CAP & caps) {
__u64 ExtAttrMask = 0;
rc = CIFSGetExtAttr(xid, tcon,
@@ -345,6 +346,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
if (rc != EOPNOTSUPP)
break;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
#endif /* CONFIG_CIFS_POSIX */
rc = 0;
if (CIFS_I(inode)->cifsAttrs & ATTR_COMPRESSED) {
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index bbdf3281559c..6803cb27eecc 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -286,6 +286,7 @@ out:
return rc;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/*
* SMB 1.0 Protocol specific functions
*/
@@ -368,6 +369,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
CIFSSMBClose(xid, tcon, fid.netfid);
return rc;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/*
* SMB 2.1/SMB3 Protocol specific functions
@@ -532,11 +534,15 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
goto cifs_hl_exit;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (tcon->unix_ext)
rc = CIFSUnixCreateHardLink(xid, tcon, from_name, to_name,
cifs_sb->local_nls,
cifs_remap(cifs_sb));
else {
+#else
+ {
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
server = tcon->ses->server;
if (!server->ops->create_hardlink) {
rc = -ENOSYS;
@@ -704,10 +710,12 @@ cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode,
/* BB what if DFS and this volume is on different share? BB */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
else if (pTcon->unix_ext)
rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
cifs_sb->local_nls,
cifs_remap(cifs_sb));
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/* else
rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
cifs_sb_target->local_nls); */
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 0e84e6fcf8ab..87f60f736731 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -23,6 +23,7 @@
#include "dns_resolve.h"
#endif
#include "fs_context.h"
+#include "cached_dir.h"
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
@@ -69,6 +70,7 @@ sesInfoAlloc(void)
ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
if (ret_buf) {
atomic_inc(&sesInfoAllocCount);
+ spin_lock_init(&ret_buf->ses_lock);
ret_buf->ses_status = SES_NEW;
++ret_buf->ses_count;
INIT_LIST_HEAD(&ret_buf->smb_ses_list);
@@ -115,21 +117,19 @@ tconInfoAlloc(void)
ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
if (!ret_buf)
return NULL;
- ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL);
- if (!ret_buf->crfid.fid) {
+ ret_buf->cfid = init_cached_dir();
+ if (!ret_buf->cfid) {
kfree(ret_buf);
return NULL;
}
- INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries);
- mutex_init(&ret_buf->crfid.dirents.de_mutex);
atomic_inc(&tconInfoAllocCount);
ret_buf->status = TID_NEW;
++ret_buf->tc_count;
+ spin_lock_init(&ret_buf->tc_lock);
INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list);
spin_lock_init(&ret_buf->open_file_lock);
- mutex_init(&ret_buf->crfid.fid_mutex);
spin_lock_init(&ret_buf->stat_lock);
atomic_set(&ret_buf->num_local_opens, 0);
atomic_set(&ret_buf->num_remote_opens, 0);
@@ -138,17 +138,17 @@ tconInfoAlloc(void)
}
void
-tconInfoFree(struct cifs_tcon *buf_to_free)
+tconInfoFree(struct cifs_tcon *tcon)
{
- if (buf_to_free == NULL) {
+ if (tcon == NULL) {
cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
return;
}
+ free_cached_dir(tcon);
atomic_dec(&tconInfoAllocCount);
- kfree(buf_to_free->nativeFileSystem);
- kfree_sensitive(buf_to_free->password);
- kfree(buf_to_free->crfid.fid);
- kfree(buf_to_free);
+ kfree(tcon->nativeFileSystem);
+ kfree_sensitive(tcon->password);
+ kfree(tcon);
}
struct smb_hdr *
@@ -172,9 +172,9 @@ cifs_buf_get(void)
/* clear the first few header bytes */
/* for most paths, more is cleared in header_assemble */
memset(ret_buf, 0, buf_size + 3);
- atomic_inc(&bufAllocCount);
+ atomic_inc(&buf_alloc_count);
#ifdef CONFIG_CIFS_STATS2
- atomic_inc(&totBufAllocCount);
+ atomic_inc(&total_buf_alloc_count);
#endif /* CONFIG_CIFS_STATS2 */
return ret_buf;
@@ -189,7 +189,7 @@ cifs_buf_release(void *buf_to_free)
}
mempool_free(buf_to_free, cifs_req_poolp);
- atomic_dec(&bufAllocCount);
+ atomic_dec(&buf_alloc_count);
return;
}
@@ -205,9 +205,9 @@ cifs_small_buf_get(void)
ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
/* No need to clear memory here, cleared in header assemble */
/* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
- atomic_inc(&smBufAllocCount);
+ atomic_inc(&small_buf_alloc_count);
#ifdef CONFIG_CIFS_STATS2
- atomic_inc(&totSmBufAllocCount);
+ atomic_inc(&total_small_buf_alloc_count);
#endif /* CONFIG_CIFS_STATS2 */
return ret_buf;
@@ -223,7 +223,7 @@ cifs_small_buf_release(void *buf_to_free)
}
mempool_free(buf_to_free, cifs_sm_req_poolp);
- atomic_dec(&smBufAllocCount);
+ atomic_dec(&small_buf_alloc_count);
return;
}
@@ -354,7 +354,7 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
/* otherwise, there is enough to get to the BCC */
if (check_smb_hdr(smb))
return -EIO;
- clc_len = smbCalcSize(smb, server);
+ clc_len = smbCalcSize(smb);
if (4 + rfclen != total_read) {
cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
@@ -400,7 +400,6 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
{
struct smb_hdr *buf = (struct smb_hdr *)buffer;
struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
- struct list_head *tmp, *tmp1, *tmp2;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct cifsInodeInfo *pCifsInode;
@@ -467,18 +466,14 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &srv->smb_ses_list) {
- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
- list_for_each(tmp1, &ses->tcon_list) {
- tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+ list_for_each_entry(ses, &srv->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->tid != buf->Tid)
continue;
cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
spin_lock(&tcon->open_file_lock);
- list_for_each(tmp2, &tcon->openFileList) {
- netfile = list_entry(tmp2, struct cifsFileInfo,
- tlist);
+ list_for_each_entry(netfile, &tcon->openFileList, tlist) {
if (pSMB->Fid != netfile->fid.netfid)
continue;
@@ -742,6 +737,8 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
if (delayed_work_pending(&cfile->deferred)) {
if (cancel_delayed_work(&cfile->deferred)) {
+ cifs_del_deferred_close(cfile);
+
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
break;
@@ -763,16 +760,16 @@ void
cifs_close_all_deferred_files(struct cifs_tcon *tcon)
{
struct cifsFileInfo *cfile;
- struct list_head *tmp;
struct file_list *tmp_list, *tmp_next_list;
struct list_head file_head;
INIT_LIST_HEAD(&file_head);
spin_lock(&tcon->open_file_lock);
- list_for_each(tmp, &tcon->openFileList) {
- cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+ list_for_each_entry(cfile, &tcon->openFileList, tlist) {
if (delayed_work_pending(&cfile->deferred)) {
if (cancel_delayed_work(&cfile->deferred)) {
+ cifs_del_deferred_close(cfile);
+
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
break;
@@ -793,7 +790,6 @@ void
cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
{
struct cifsFileInfo *cfile;
- struct list_head *tmp;
struct file_list *tmp_list, *tmp_next_list;
struct list_head file_head;
void *page;
@@ -802,12 +798,13 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
INIT_LIST_HEAD(&file_head);
page = alloc_dentry_path();
spin_lock(&tcon->open_file_lock);
- list_for_each(tmp, &tcon->openFileList) {
- cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+ list_for_each_entry(cfile, &tcon->openFileList, tlist) {
full_path = build_path_from_dentry(cfile->dentry, page);
if (strstr(full_path, path)) {
if (delayed_work_pending(&cfile->deferred)) {
if (cancel_delayed_work(&cfile->deferred)) {
+ cifs_del_deferred_close(cfile);
+
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
break;
@@ -1029,7 +1026,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
saved_len = count;
while (count && npages < max_pages) {
- rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
+ rc = iov_iter_get_pages2(iter, pages, count, max_pages, &start);
if (rc < 0) {
cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
break;
@@ -1041,7 +1038,6 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
break;
}
- iov_iter_advance(iter, rc);
count -= rc;
rc += start;
cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 235aa1b395eb..1b52e6ac431c 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -909,9 +909,9 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
* portion, the number of word parameters and the data portion of the message
*/
unsigned int
-smbCalcSize(void *buf, struct TCP_Server_Info *server)
+smbCalcSize(void *buf)
{
- struct smb_hdr *ptr = (struct smb_hdr *)buf;
+ struct smb_hdr *ptr = buf;
return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
2 /* size of the bcc field */ + get_bcc(ptr));
}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 384cabdf47ca..8e060c00c969 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -21,6 +21,7 @@
#include "cifsfs.h"
#include "smb2proto.h"
#include "fs_context.h"
+#include "cached_dir.h"
/*
* To be safe - for UCS to UTF-8 with strings loaded with the rare long
@@ -805,8 +806,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
end_of_smb = cfile->srch_inf.ntwrk_buf_start +
server->ops->calc_smb_size(
- cfile->srch_inf.ntwrk_buf_start,
- server);
+ cfile->srch_inf.ntwrk_buf_start);
cur_ent = cfile->srch_inf.srch_entries_start;
first_entry_in_buffer = cfile->srch_inf.index_of_last_entry
@@ -1071,7 +1071,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
tcon = tlink_tcon(cifsFile->tlink);
}
- rc = open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid);
+ rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
cifs_put_tlink(tlink);
if (rc)
goto cache_not_found;
@@ -1142,7 +1142,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
tcon = tlink_tcon(cifsFile->tlink);
rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
&current_entry, &num_to_fill);
- open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid);
+ open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
if (rc) {
cifs_dbg(FYI, "fce error %d\n", rc);
goto rddir2_exit;
@@ -1160,8 +1160,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
max_len = tcon->ses->server->ops->calc_smb_size(
- cifsFile->srch_inf.ntwrk_buf_start,
- tcon->ses->server);
+ cifsFile->srch_inf.ntwrk_buf_start);
end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 02c8b2906196..3af3b05b6c74 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -499,6 +499,7 @@ out:
return rc;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
static __u32 cifs_ssetup_hdr(struct cifs_ses *ses,
struct TCP_Server_Info *server,
SESSION_SETUP_ANDX *pSMB)
@@ -591,7 +592,6 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
*pbcc_area = bcc_ptr;
}
-
static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
@@ -753,6 +753,7 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
for it later, but it is not very important */
cifs_dbg(FYI, "ascii: bytes left %d\n", bleft);
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
struct cifs_ses *ses)
@@ -1170,6 +1171,7 @@ struct sess_data {
struct kvec iov[3];
};
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
static int
sess_alloc_buffer(struct sess_data *sess_data, int wct)
{
@@ -1846,3 +1848,4 @@ out:
kfree(sess_data);
return rc;
}
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 2e20ee4dab7b..f36b2d2d40ca 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -92,17 +92,17 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
struct smb_hdr *buf = (struct smb_hdr *)buffer;
struct mid_q_entry *mid;
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if (compare_mid(mid->mid, buf) &&
mid->mid_state == MID_REQUEST_SUBMITTED &&
le16_to_cpu(mid->command) == buf->Command) {
kref_get(&mid->refcount);
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
return mid;
}
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
return NULL;
}
@@ -166,7 +166,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
__u16 last_mid, cur_mid;
bool collision, reconnect = false;
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
/* mid is 16 bit only for CIFS/SMB */
cur_mid = (__u16)((server->CurrentMid) & 0xffff);
@@ -225,7 +225,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
}
cur_mid++;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
if (reconnect) {
cifs_signal_cifsd_for_reconnect(server, false);
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index f5dcc4940b6d..9dfd2dd612c2 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -61,7 +61,6 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
nr_ioctl_req.Reserved = 0;
rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
- true /* is_fsctl */,
(char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
CIFSMaxBufSize, NULL, NULL /* no return info */);
if (rc == -EOPNOTSUPP) {
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 8571a459c710..b83f59051b26 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -23,6 +23,7 @@
#include "smb2glob.h"
#include "smb2pdu.h"
#include "smb2proto.h"
+#include "cached_dir.h"
static void
free_set_inf_compound(struct smb_rqst *rqst)
@@ -515,16 +516,16 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
if (strcmp(full_path, ""))
rc = -ENOENT;
else
- rc = open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid);
+ rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
/* If it is a root and its handle is cached then use it */
if (!rc) {
- if (tcon->crfid.file_all_info_is_valid) {
+ if (cfid->file_all_info_is_valid) {
move_smb2_info_to_cifs(data,
- &tcon->crfid.file_all_info);
+ &cfid->file_all_info);
} else {
rc = SMB2_query_info(xid, tcon,
- cfid->fid->persistent_fid,
- cfid->fid->volatile_fid, smb2_data);
+ cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid, smb2_data);
if (!rc)
move_smb2_info_to_cifs(data, smb2_data);
}
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 17813c3d0c6e..d73e5672aac4 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -16,6 +16,7 @@
#include "smb2status.h"
#include "smb2glob.h"
#include "nterr.h"
+#include "cached_dir.h"
static int
check_smb2_hdr(struct smb2_hdr *shdr, __u64 mid)
@@ -132,15 +133,15 @@ static __u32 get_neg_ctxt_len(struct smb2_hdr *hdr, __u32 len,
}
int
-smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
+smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
{
struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
struct smb2_pdu *pdu = (struct smb2_pdu *)shdr;
- __u64 mid;
- __u32 clc_len; /* calculated length */
- int command;
- int pdu_size = sizeof(struct smb2_pdu);
int hdr_size = sizeof(struct smb2_hdr);
+ int pdu_size = sizeof(struct smb2_pdu);
+ int command;
+ __u32 calc_len; /* calculated length */
+ __u64 mid;
/*
* Add function to do table lookup of StructureSize by command
@@ -154,7 +155,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
/* decrypt frame now that it is completely read in */
spin_lock(&cifs_tcp_ses_lock);
- list_for_each_entry(iter, &srvr->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(iter, &server->smb_ses_list, smb_ses_list) {
if (iter->Suid == le64_to_cpu(thdr->SessionId)) {
ses = iter;
break;
@@ -221,30 +222,33 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
}
}
- clc_len = smb2_calc_size(buf, srvr);
+ calc_len = smb2_calc_size(buf);
+
+ /* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might
+ * be 0, and not a real miscalculation */
+ if (command == SMB2_IOCTL_HE && calc_len == 0)
+ return 0;
- if (shdr->Command == SMB2_NEGOTIATE)
- clc_len += get_neg_ctxt_len(shdr, len, clc_len);
+ if (command == SMB2_NEGOTIATE_HE)
+ calc_len += get_neg_ctxt_len(shdr, len, calc_len);
- if (len != clc_len) {
- cifs_dbg(FYI, "Calculated size %u length %u mismatch mid %llu\n",
- clc_len, len, mid);
+ if (len != calc_len) {
/* create failed on symlink */
if (command == SMB2_CREATE_HE &&
shdr->Status == STATUS_STOPPED_ON_SYMLINK)
return 0;
/* Windows 7 server returns 24 bytes more */
- if (clc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE)
+ if (calc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE)
return 0;
/* server can return one byte more due to implied bcc[0] */
- if (clc_len == len + 1)
+ if (calc_len == len + 1)
return 0;
/*
* Some windows servers (win2016) will pad also the final
* PDU in a compound to 8 bytes.
*/
- if (((clc_len + 7) & ~7) == len)
+ if (((calc_len + 7) & ~7) == len)
return 0;
/*
@@ -253,12 +257,18 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
* SMB2/SMB3 frame length (header + smb2 response specific data)
* Some windows servers also pad up to 8 bytes when compounding.
*/
- if (clc_len < len)
+ if (calc_len < len)
return 0;
- pr_warn_once(
- "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
- len, clc_len, command, mid);
+ /* Only log a message if len was really miscalculated */
+ if (unlikely(cifsFYI))
+ cifs_dbg(FYI, "Server response too short: calculated "
+ "length %u doesn't match read length %u (cmd=%d, mid=%llu)\n",
+ calc_len, len, command, mid);
+ else
+ pr_warn("Server response too short: calculated length "
+ "%u doesn't match read length %u (cmd=%d, mid=%llu)\n",
+ calc_len, len, command, mid);
return 1;
}
@@ -400,9 +410,9 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
* portion, the number of word parameters and the data portion of the message.
*/
unsigned int
-smb2_calc_size(void *buf, struct TCP_Server_Info *srvr)
+smb2_calc_size(void *buf)
{
- struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
+ struct smb2_pdu *pdu = buf;
struct smb2_hdr *shdr = &pdu->hdr;
int offset; /* the offset from the beginning of SMB to data area */
int data_length; /* the length of the variable length data area */
@@ -639,15 +649,7 @@ smb2_is_valid_lease_break(char *buffer)
}
spin_unlock(&tcon->open_file_lock);
- if (tcon->crfid.is_valid &&
- !memcmp(rsp->LeaseKey,
- tcon->crfid.fid->lease_key,
- SMB2_LEASE_KEY_SIZE)) {
- tcon->crfid.time = 0;
- INIT_WORK(&tcon->crfid.lease_break,
- smb2_cached_lease_break);
- queue_work(cifsiod_wq,
- &tcon->crfid.lease_break);
+ if (cached_dir_lease_break(tcon, rsp->LeaseKey)) {
spin_unlock(&cifs_tcp_ses_lock);
return true;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 8802995b2d3d..4810bd62266a 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -27,6 +27,7 @@
#include "smbdirect.h"
#include "fscache.h"
#include "fs_context.h"
+#include "cached_dir.h"
/* Change credits for different ops and return the total number of credits */
static int
@@ -126,13 +127,13 @@ smb2_add_credits(struct TCP_Server_Info *server,
optype, scredits, add);
}
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect
|| server->tcpStatus == CifsExiting) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
switch (rc) {
case -1:
@@ -218,12 +219,12 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
spin_lock(&server->req_lock);
} else {
spin_unlock(&server->req_lock);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return -ENOENT;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
spin_lock(&server->req_lock);
scredits = server->credits;
@@ -319,19 +320,19 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
{
__u64 mid;
/* for SMB2 we need the current value */
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
mid = server->CurrentMid++;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
return mid;
}
static void
smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
{
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
if (server->CurrentMid >= val)
server->CurrentMid -= val;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
}
static struct mid_q_entry *
@@ -346,7 +347,7 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
return NULL;
}
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if ((mid->mid == wire_mid) &&
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
@@ -356,11 +357,11 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
return mid;
}
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
return NULL;
}
@@ -386,7 +387,7 @@ smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
shdr->Id.SyncId.ProcessId);
cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
- server->ops->calc_smb_size(buf, server));
+ server->ops->calc_smb_size(buf));
#endif
}
@@ -403,9 +404,9 @@ smb2_negotiate(const unsigned int xid,
{
int rc;
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
server->CurrentMid = 0;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
rc = SMB2_negotiate(xid, ses, server);
/* BB we probably don't need to retry with modern servers */
if (rc == -EAGAIN)
@@ -680,7 +681,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
struct cifs_ses *ses = tcon->ses;
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
- FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
+ FSCTL_QUERY_NETWORK_INTERFACE_INFO,
NULL /* no data input */, 0 /* no data input */,
CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
if (rc == -EOPNOTSUPP) {
@@ -702,300 +703,6 @@ out:
}
static void
-smb2_close_cached_fid(struct kref *ref)
-{
- struct cached_fid *cfid = container_of(ref, struct cached_fid,
- refcount);
- struct cached_dirent *dirent, *q;
-
- if (cfid->is_valid) {
- cifs_dbg(FYI, "clear cached root file handle\n");
- SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
- cfid->fid->volatile_fid);
- }
-
- /*
- * We only check validity above to send SMB2_close,
- * but we still need to invalidate these entries
- * when this function is called
- */
- cfid->is_valid = false;
- cfid->file_all_info_is_valid = false;
- cfid->has_lease = false;
- if (cfid->dentry) {
- dput(cfid->dentry);
- cfid->dentry = NULL;
- }
- /*
- * Delete all cached dirent names
- */
- mutex_lock(&cfid->dirents.de_mutex);
- list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
- list_del(&dirent->entry);
- kfree(dirent->name);
- kfree(dirent);
- }
- cfid->dirents.is_valid = 0;
- cfid->dirents.is_failed = 0;
- cfid->dirents.ctx = NULL;
- cfid->dirents.pos = 0;
- mutex_unlock(&cfid->dirents.de_mutex);
-
-}
-
-void close_cached_dir(struct cached_fid *cfid)
-{
- mutex_lock(&cfid->fid_mutex);
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- mutex_unlock(&cfid->fid_mutex);
-}
-
-void close_cached_dir_lease_locked(struct cached_fid *cfid)
-{
- if (cfid->has_lease) {
- cfid->has_lease = false;
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- }
-}
-
-void close_cached_dir_lease(struct cached_fid *cfid)
-{
- mutex_lock(&cfid->fid_mutex);
- close_cached_dir_lease_locked(cfid);
- mutex_unlock(&cfid->fid_mutex);
-}
-
-void
-smb2_cached_lease_break(struct work_struct *work)
-{
- struct cached_fid *cfid = container_of(work,
- struct cached_fid, lease_break);
-
- close_cached_dir_lease(cfid);
-}
-
-/*
- * Open the and cache a directory handle.
- * Only supported for the root handle.
- * If error then *cfid is not initialized.
- */
-int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
- const char *path,
- struct cifs_sb_info *cifs_sb,
- struct cached_fid **cfid)
-{
- struct cifs_ses *ses;
- struct TCP_Server_Info *server;
- struct cifs_open_parms oparms;
- struct smb2_create_rsp *o_rsp = NULL;
- struct smb2_query_info_rsp *qi_rsp = NULL;
- int resp_buftype[2];
- struct smb_rqst rqst[2];
- struct kvec rsp_iov[2];
- struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
- struct kvec qi_iov[1];
- int rc, flags = 0;
- __le16 utf16_path = 0; /* Null - since an open of top of share */
- u8 oplock = SMB2_OPLOCK_LEVEL_II;
- struct cifs_fid *pfid;
- struct dentry *dentry;
-
- if (tcon == NULL || tcon->nohandlecache ||
- is_smb1_server(tcon->ses->server))
- return -ENOTSUPP;
-
- ses = tcon->ses;
- server = ses->server;
-
- if (cifs_sb->root == NULL)
- return -ENOENT;
-
- if (strlen(path))
- return -ENOENT;
-
- dentry = cifs_sb->root;
-
- mutex_lock(&tcon->crfid.fid_mutex);
- if (tcon->crfid.is_valid) {
- cifs_dbg(FYI, "found a cached root file handle\n");
- *cfid = &tcon->crfid;
- kref_get(&tcon->crfid.refcount);
- mutex_unlock(&tcon->crfid.fid_mutex);
- return 0;
- }
-
- /*
- * We do not hold the lock for the open because in case
- * SMB2_open needs to reconnect, it will end up calling
- * cifs_mark_open_files_invalid() which takes the lock again
- * thus causing a deadlock
- */
-
- mutex_unlock(&tcon->crfid.fid_mutex);
-
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
-
- if (!server->ops->new_lease_key)
- return -EIO;
-
- pfid = tcon->crfid.fid;
- server->ops->new_lease_key(pfid);
-
- memset(rqst, 0, sizeof(rqst));
- resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
- memset(rsp_iov, 0, sizeof(rsp_iov));
-
- /* Open */
- memset(&open_iov, 0, sizeof(open_iov));
- rqst[0].rq_iov = open_iov;
- rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
-
- oparms.tcon = tcon;
- oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE);
- oparms.desired_access = FILE_READ_ATTRIBUTES;
- oparms.disposition = FILE_OPEN;
- oparms.fid = pfid;
- oparms.reconnect = false;
-
- rc = SMB2_open_init(tcon, server,
- &rqst[0], &oplock, &oparms, &utf16_path);
- if (rc)
- goto oshr_free;
- smb2_set_next_command(tcon, &rqst[0]);
-
- memset(&qi_iov, 0, sizeof(qi_iov));
- rqst[1].rq_iov = qi_iov;
- rqst[1].rq_nvec = 1;
-
- rc = SMB2_query_info_init(tcon, server,
- &rqst[1], COMPOUND_FID,
- COMPOUND_FID, FILE_ALL_INFORMATION,
- SMB2_O_INFO_FILE, 0,
- sizeof(struct smb2_file_all_info) +
- PATH_MAX * 2, 0, NULL);
- if (rc)
- goto oshr_free;
-
- smb2_set_related(&rqst[1]);
-
- rc = compound_send_recv(xid, ses, server,
- flags, 2, rqst,
- resp_buftype, rsp_iov);
- mutex_lock(&tcon->crfid.fid_mutex);
-
- /*
- * Now we need to check again as the cached root might have
- * been successfully re-opened from a concurrent process
- */
-
- if (tcon->crfid.is_valid) {
- /* work was already done */
-
- /* stash fids for close() later */
- struct cifs_fid fid = {
- .persistent_fid = pfid->persistent_fid,
- .volatile_fid = pfid->volatile_fid,
- };
-
- /*
- * caller expects this func to set the fid in crfid to valid
- * cached root, so increment the refcount.
- */
- kref_get(&tcon->crfid.refcount);
-
- mutex_unlock(&tcon->crfid.fid_mutex);
-
- if (rc == 0) {
- /* close extra handle outside of crit sec */
- SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
- }
- rc = 0;
- goto oshr_free;
- }
-
- /* Cached root is still invalid, continue normaly */
-
- if (rc) {
- if (rc == -EREMCHG) {
- tcon->need_reconnect = true;
- pr_warn_once("server share %s deleted\n",
- tcon->treeName);
- }
- goto oshr_exit;
- }
-
- atomic_inc(&tcon->num_remote_opens);
-
- o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
- oparms.fid->persistent_fid = o_rsp->PersistentFileId;
- oparms.fid->volatile_fid = o_rsp->VolatileFileId;
-#ifdef CONFIG_CIFS_DEBUG2
- oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
-#endif /* CIFS_DEBUG2 */
-
- tcon->crfid.tcon = tcon;
- tcon->crfid.is_valid = true;
- tcon->crfid.dentry = dentry;
- dget(dentry);
- kref_init(&tcon->crfid.refcount);
-
- /* BB TBD check to see if oplock level check can be removed below */
- if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
- /*
- * See commit 2f94a3125b87. Increment the refcount when we
- * get a lease for root, release it if lease break occurs
- */
- kref_get(&tcon->crfid.refcount);
- tcon->crfid.has_lease = true;
- smb2_parse_contexts(server, o_rsp,
- &oparms.fid->epoch,
- oparms.fid->lease_key, &oplock,
- NULL, NULL);
- } else
- goto oshr_exit;
-
- qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
- if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
- goto oshr_exit;
- if (!smb2_validate_and_copy_iov(
- le16_to_cpu(qi_rsp->OutputBufferOffset),
- sizeof(struct smb2_file_all_info),
- &rsp_iov[1], sizeof(struct smb2_file_all_info),
- (char *)&tcon->crfid.file_all_info))
- tcon->crfid.file_all_info_is_valid = true;
- tcon->crfid.time = jiffies;
-
-
-oshr_exit:
- mutex_unlock(&tcon->crfid.fid_mutex);
-oshr_free:
- SMB2_open_free(&rqst[0]);
- SMB2_query_info_free(&rqst[1]);
- free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
- free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
- if (rc == 0)
- *cfid = &tcon->crfid;
- return rc;
-}
-
-int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
- struct dentry *dentry,
- struct cached_fid **cfid)
-{
- mutex_lock(&tcon->crfid.fid_mutex);
- if (tcon->crfid.dentry == dentry) {
- cifs_dbg(FYI, "found a cached root file handle by dentry\n");
- *cfid = &tcon->crfid;
- kref_get(&tcon->crfid.refcount);
- mutex_unlock(&tcon->crfid.fid_mutex);
- return 0;
- }
- mutex_unlock(&tcon->crfid.fid_mutex);
- return -ENOENT;
-}
-
-static void
smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb)
{
@@ -1013,9 +720,9 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = open_cached_dir(xid, tcon, "", cifs_sb, &cfid);
+ rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid);
if (rc == 0)
- memcpy(&fid, cfid->fid, sizeof(struct cifs_fid));
+ memcpy(&fid, &cfid->fid, sizeof(struct cifs_fid));
else
rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
NULL, NULL);
@@ -1076,9 +783,16 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_open_parms oparms;
struct cifs_fid fid;
+ struct cached_fid *cfid;
- if ((*full_path == 0) && tcon->crfid.is_valid)
- return 0;
+ rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
+ if (!rc) {
+ if (cfid->is_valid) {
+ close_cached_dir(cfid);
+ return 0;
+ }
+ close_cached_dir(cfid);
+ }
utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
if (!utf16_path)
@@ -1145,9 +859,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
size_t name_len, value_len, user_name_len;
while (src_size > 0) {
- name = &src->ea_data[0];
name_len = (size_t)src->ea_name_length;
- value = &src->ea_data[src->ea_name_length + 1];
value_len = (size_t)le16_to_cpu(src->ea_value_length);
if (name_len == 0)
@@ -1159,6 +871,9 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
goto out;
}
+ name = &src->ea_data[0];
+ value = &src->ea_data[src->ea_name_length + 1];
+
if (ea_name) {
if (ea_name_len == name_len &&
memcmp(ea_name, name, name_len) == 0) {
@@ -1608,9 +1323,8 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
struct resume_key_req *res_key;
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
- FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
- NULL, 0 /* no input */, CIFSMaxBufSize,
- (char **)&res_key, &ret_data_len);
+ FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */,
+ CIFSMaxBufSize, (char **)&res_key, &ret_data_len);
if (rc == -EOPNOTSUPP) {
pr_warn_once("Server share %s does not support copy range\n", tcon->treeName);
@@ -1752,7 +1466,7 @@ smb2_ioctl_query_info(const unsigned int xid,
rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
- qi.info_type, true, buffer, qi.output_buffer_length,
+ qi.info_type, buffer, qi.output_buffer_length,
CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE);
free_req1_func = SMB2_ioctl_free;
@@ -1928,9 +1642,8 @@ smb2_copychunk_range(const unsigned int xid,
retbuf = NULL;
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
- true /* is_fsctl */, (char *)pcchunk,
- sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
- (char **)&retbuf, &ret_data_len);
+ (char *)pcchunk, sizeof(struct copychunk_ioctl),
+ CIFSMaxBufSize, (char **)&retbuf, &ret_data_len);
if (rc == 0) {
if (ret_data_len !=
sizeof(struct copychunk_ioctl_rsp)) {
@@ -2090,7 +1803,6 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
- true /* is_fctl */,
&setsparse, 1, CIFSMaxBufSize, NULL, NULL);
if (rc) {
tcon->broken_sparse_sup = true;
@@ -2173,7 +1885,6 @@ smb2_duplicate_extents(const unsigned int xid,
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid,
FSCTL_DUPLICATE_EXTENTS_TO_FILE,
- true /* is_fsctl */,
(char *)&dup_ext_buf,
sizeof(struct duplicate_extents_to_file),
CIFSMaxBufSize, NULL,
@@ -2208,7 +1919,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FSCTL_SET_INTEGRITY_INFORMATION,
- true /* is_fsctl */,
(char *)&integr_info,
sizeof(struct fsctl_set_integrity_information_req),
CIFSMaxBufSize, NULL,
@@ -2261,7 +1971,6 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FSCTL_SRV_ENUMERATE_SNAPSHOTS,
- true /* is_fsctl */,
NULL, 0 /* no input data */, max_response_size,
(char **)&retbuf,
&ret_data_len);
@@ -2574,7 +2283,6 @@ static void
smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
{
struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
- struct list_head *tmp, *tmp1;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
@@ -2582,12 +2290,12 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
return;
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
- list_for_each(tmp1, &ses->tcon_list) {
- tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
+ spin_lock(&tcon->tc_lock);
tcon->need_reconnect = true;
+ spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
pr_warn_once("Server share %s deleted.\n",
tcon->treeName);
@@ -2723,8 +2431,12 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
memset(rsp_iov, 0, sizeof(rsp_iov));
+ /*
+ * We can only call this for things we know are directories.
+ */
if (!strcmp(path, ""))
- open_cached_dir(xid, tcon, path, cifs_sb, &cfid); /* cfid null if open dir failed */
+ open_cached_dir(xid, tcon, path, cifs_sb, false,
+ &cfid); /* cfid null if open dir failed */
memset(&open_iov, 0, sizeof(open_iov));
rqst[0].rq_iov = open_iov;
@@ -2750,8 +2462,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
if (cfid) {
rc = SMB2_query_info_init(tcon, server,
&rqst[1],
- cfid->fid->persistent_fid,
- cfid->fid->volatile_fid,
+ cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid,
class, type, 0,
output_len, 0,
NULL);
@@ -2981,7 +2693,6 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
do {
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_DFS_GET_REFERRALS,
- true /* is_fsctl */,
(char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
(char **)&dfs_rsp, &dfs_rsp_size);
if (!is_retryable_error(rc))
@@ -3188,8 +2899,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl_init(tcon, server,
&rqst[1], fid.persistent_fid,
- fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
- true /* is_fctl */, NULL, 0,
+ fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0,
CIFSMaxBufSize -
MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE);
@@ -3369,8 +3079,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl_init(tcon, server,
&rqst[1], COMPOUND_FID,
- COMPOUND_FID, FSCTL_GET_REPARSE_POINT,
- true /* is_fctl */, NULL, 0,
+ COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0,
CIFSMaxBufSize -
MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE);
@@ -3598,26 +3307,43 @@ get_smb2_acl(struct cifs_sb_info *cifs_sb,
return pntsd;
}
+static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
+ loff_t offset, loff_t len, unsigned int xid)
+{
+ struct cifsFileInfo *cfile = file->private_data;
+ struct file_zero_data_information fsctl_buf;
+
+ cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
+
+ fsctl_buf.FileOffset = cpu_to_le64(offset);
+ fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+
+ return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
+ (char *)&fsctl_buf,
+ sizeof(struct file_zero_data_information),
+ 0, NULL, NULL);
+}
+
static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
loff_t offset, loff_t len, bool keep_size)
{
struct cifs_ses *ses = tcon->ses;
- struct inode *inode;
- struct cifsInodeInfo *cifsi;
+ struct inode *inode = file_inode(file);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifsFileInfo *cfile = file->private_data;
- struct file_zero_data_information fsctl_buf;
long rc;
unsigned int xid;
__le64 eof;
xid = get_xid();
- inode = d_inode(cfile->dentry);
- cifsi = CIFS_I(inode);
-
trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
ses->Suid, offset, len);
+ inode_lock(inode);
+ filemap_invalidate_lock(inode->i_mapping);
+
/*
* We zero the range through ioctl, so we need remove the page caches
* first, otherwise the data may be inconsistent with the server.
@@ -3625,26 +3351,12 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
truncate_pagecache_range(inode, offset, offset + len - 1);
/* if file not oplocked can't be sure whether asking to extend size */
- if (!CIFS_CACHE_READ(cifsi))
- if (keep_size == false) {
- rc = -EOPNOTSUPP;
- trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
- tcon->tid, ses->Suid, offset, len, rc);
- free_xid(xid);
- return rc;
- }
-
- cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
-
- fsctl_buf.FileOffset = cpu_to_le64(offset);
- fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
+ rc = -EOPNOTSUPP;
+ if (keep_size == false && !CIFS_CACHE_READ(cifsi))
+ goto zero_range_exit;
- rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
- cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
- (char *)&fsctl_buf,
- sizeof(struct file_zero_data_information),
- 0, NULL, NULL);
- if (rc)
+ rc = smb3_zero_data(file, tcon, offset, len, xid);
+ if (rc < 0)
goto zero_range_exit;
/*
@@ -3657,6 +3369,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
}
zero_range_exit:
+ filemap_invalidate_unlock(inode->i_mapping);
+ inode_unlock(inode);
free_xid(xid);
if (rc)
trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
@@ -3670,7 +3384,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
loff_t offset, loff_t len)
{
- struct inode *inode;
+ struct inode *inode = file_inode(file);
struct cifsFileInfo *cfile = file->private_data;
struct file_zero_data_information fsctl_buf;
long rc;
@@ -3679,14 +3393,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
xid = get_xid();
- inode = d_inode(cfile->dentry);
-
+ inode_lock(inode);
/* Need to make file sparse, if not already, before freeing range. */
/* Consider adding equivalent for compressed since it could also work */
if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
rc = -EOPNOTSUPP;
- free_xid(xid);
- return rc;
+ goto out;
}
filemap_invalidate_lock(inode->i_mapping);
@@ -3703,11 +3415,13 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
- true /* is_fctl */, (char *)&fsctl_buf,
+ (char *)&fsctl_buf,
sizeof(struct file_zero_data_information),
CIFSMaxBufSize, NULL, NULL);
- free_xid(xid);
filemap_invalidate_unlock(inode->i_mapping);
+out:
+ inode_unlock(inode);
+ free_xid(xid);
return rc;
}
@@ -3763,7 +3477,7 @@ static int smb3_simple_fallocate_range(unsigned int xid,
in_data.length = cpu_to_le64(len);
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
- FSCTL_QUERY_ALLOCATED_RANGES, true,
+ FSCTL_QUERY_ALLOCATED_RANGES,
(char *)&in_data, sizeof(in_data),
1024 * sizeof(struct file_allocated_range_buffer),
(char **)&out_data, &out_data_len);
@@ -4084,7 +3798,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
- FSCTL_QUERY_ALLOCATED_RANGES, true,
+ FSCTL_QUERY_ALLOCATED_RANGES,
(char *)&in_data, sizeof(in_data),
sizeof(struct file_allocated_range_buffer),
(char **)&out_data, &out_data_len);
@@ -4144,7 +3858,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
- FSCTL_QUERY_ALLOCATED_RANGES, true,
+ FSCTL_QUERY_ALLOCATED_RANGES,
(char *)&in_data, sizeof(in_data),
1024 * sizeof(struct file_allocated_range_buffer),
(char **)&out_data, &out_data_len);
@@ -4563,9 +4277,11 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->Suid == ses_id) {
+ spin_lock(&ses->ses_lock);
ses_enc_key = enc ? ses->smb3encryptionkey :
ses->smb3decryptionkey;
memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
+ spin_unlock(&ses->ses_lock);
spin_unlock(&cifs_tcp_ses_lock);
return 0;
}
@@ -5080,23 +4796,24 @@ static void smb2_decrypt_offload(struct work_struct *work)
mid->callback(mid);
} else {
- spin_lock(&cifs_tcp_ses_lock);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&dw->server->srv_lock);
if (dw->server->tcpStatus == CifsNeedReconnect) {
+ spin_lock(&dw->server->mid_lock);
mid->mid_state = MID_RETRY_NEEDED;
- spin_unlock(&GlobalMid_Lock);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&dw->server->mid_lock);
+ spin_unlock(&dw->server->srv_lock);
mid->callback(mid);
} else {
+ spin_lock(&dw->server->mid_lock);
mid->mid_state = MID_REQUEST_SUBMITTED;
mid->mid_flags &= ~(MID_DELETED);
list_add_tail(&mid->qhead,
&dw->server->pending_mid_q);
- spin_unlock(&GlobalMid_Lock);
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&dw->server->mid_lock);
+ spin_unlock(&dw->server->srv_lock);
}
}
- cifs_mid_q_entry_release(mid);
+ release_mid(mid);
}
free_pages:
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c705de32e225..128e44e57528 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -39,6 +39,7 @@
#ifdef CONFIG_CIFS_DFS_UPCALL
#include "dfs_cache.h"
#endif
+#include "cached_dir.h"
/*
* The following table defines the expected "StructureSize" of SMB2 requests
@@ -162,7 +163,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
return 0;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (tcon->status == TID_EXITING) {
/*
* only tree disconnect, open, and write,
@@ -172,13 +173,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if ((smb2_command != SMB2_WRITE) &&
(smb2_command != SMB2_CREATE) &&
(smb2_command != SMB2_TREE_DISCONNECT)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb2_command);
return -ENODEV;
}
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&tcon->tc_lock);
if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) ||
(!tcon->ses->server) || !server)
return -EIO;
@@ -217,12 +218,12 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
}
/* are we still trying to reconnect? */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsNeedReconnect) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
break;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
if (retries && --retries)
continue;
@@ -256,13 +257,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
* and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect.
*/
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
rc = -EHOSTDOWN;
goto out;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
/*
* need to prevent multiple threads trying to simultaneously
@@ -354,7 +355,7 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
void *buf,
unsigned int *total_len)
{
- struct smb2_pdu *spdu = (struct smb2_pdu *)buf;
+ struct smb2_pdu *spdu = buf;
/* lookup word count ie StructureSize from table */
__u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
@@ -1172,7 +1173,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
}
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
- FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
+ FSCTL_VALIDATE_NEGOTIATE_INFO,
(char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
(char **)&pneg_rsp, &rsplen);
if (rc == -EOPNOTSUPP) {
@@ -1927,7 +1928,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
- strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
+ strscpy(tcon->treeName, tree, sizeof(tcon->treeName));
if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
@@ -1978,7 +1979,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
}
spin_unlock(&ses->chan_lock);
- close_cached_dir_lease(&tcon->crfid);
+ invalidate_all_cached_dirs(tcon);
rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
(void **) &req,
@@ -2571,19 +2572,15 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
- /*
- * make room for one path separator between the treename and
- * path
- */
- *out_len = treename_len + 1 + path_len;
+ /* make room for one path separator only if @path isn't empty */
+ *out_len = treename_len + (path[0] ? 1 : 0) + path_len;
/*
- * final path needs to be null-terminated UTF16 with a
- * size aligned to 8
+ * final path needs to be 8-byte aligned as specified in
+ * MS-SMB2 2.2.13 SMB2 CREATE Request.
*/
-
- *out_size = roundup((*out_len+1)*2, 8);
- *out_path = kzalloc(*out_size, GFP_KERNEL);
+ *out_size = roundup(*out_len * sizeof(__le16), 8);
+ *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL);
if (!*out_path)
return -ENOMEM;
@@ -3055,7 +3052,7 @@ int
SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
- bool is_fsctl, char *in_data, u32 indatalen,
+ char *in_data, u32 indatalen,
__u32 max_response_size)
{
struct smb2_ioctl_req *req;
@@ -3130,10 +3127,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
req->hdr.CreditCharge =
cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
SMB2_MAX_BUFFER_SIZE));
- if (is_fsctl)
- req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
- else
- req->Flags = 0;
+ /* always an FSCTL (for now) */
+ req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
@@ -3160,9 +3155,9 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
*/
int
SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
- u64 volatile_fid, u32 opcode, bool is_fsctl,
- char *in_data, u32 indatalen, u32 max_out_data_len,
- char **out_data, u32 *plen /* returned data len */)
+ u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
+ u32 max_out_data_len, char **out_data,
+ u32 *plen /* returned data len */)
{
struct smb_rqst rqst;
struct smb2_ioctl_rsp *rsp = NULL;
@@ -3204,7 +3199,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rc = SMB2_ioctl_init(tcon, server,
&rqst, persistent_fid, volatile_fid, opcode,
- is_fsctl, in_data, indatalen, max_out_data_len);
+ in_data, indatalen, max_out_data_len);
if (rc)
goto ioctl_exit;
@@ -3296,7 +3291,7 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
- FSCTL_SET_COMPRESSION, true /* is_fsctl */,
+ FSCTL_SET_COMPRESSION,
(char *)&fsctl_input /* data input */,
2 /* in data len */, CIFSMaxBufSize /* max out data */,
&ret_data /* out data */, NULL);
@@ -3776,7 +3771,7 @@ smb2_echo_callback(struct mid_q_entry *mid)
credits.instance = server->reconnect_instance;
}
- DeleteMidQEntry(mid);
+ release_mid(mid);
add_credits(server, &credits, CIFS_ECHO_OP);
}
@@ -3911,15 +3906,15 @@ SMB2_echo(struct TCP_Server_Info *server)
cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->ops->need_neg &&
server->ops->need_neg(server)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
/* No need to send echo on newly established connections */
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
return rc;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
(void **)&req, &total_len);
@@ -4201,7 +4196,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
rdata->offset, rdata->got_bytes);
queue_work(cifsiod_wq, &rdata->work);
- DeleteMidQEntry(mid);
+ release_mid(mid);
add_credits(server, &credits, 0);
}
@@ -4440,7 +4435,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
wdata->offset, wdata->bytes);
queue_work(cifsiod_wq, &wdata->work);
- DeleteMidQEntry(mid);
+ release_mid(mid);
add_credits(server, &credits, 0);
}
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index a69f1eed1cfe..3f740f24b96a 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -23,7 +23,7 @@ struct smb_rqst;
extern int map_smb2_to_linux_error(char *buf, bool log_err);
extern int smb2_check_message(char *buf, unsigned int length,
struct TCP_Server_Info *server);
-extern unsigned int smb2_calc_size(void *buf, struct TCP_Server_Info *server);
+extern unsigned int smb2_calc_size(void *buf);
extern char *smb2_get_data_area_len(int *off, int *len,
struct smb2_hdr *shdr);
extern __le16 *cifs_convert_path_to_utf16(const char *from,
@@ -54,16 +54,6 @@ extern bool smb2_is_valid_oplock_break(char *buffer,
extern int smb3_handle_read_data(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
-extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
- const char *path,
- struct cifs_sb_info *cifs_sb,
- struct cached_fid **cfid);
-extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
- struct dentry *dentry,
- struct cached_fid **cfid);
-extern void close_cached_dir(struct cached_fid *cfid);
-extern void close_cached_dir_lease(struct cached_fid *cfid);
-extern void close_cached_dir_lease_locked(struct cached_fid *cfid);
extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst,
struct smb2_file_all_info *src);
extern int smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
@@ -147,13 +137,13 @@ extern int SMB2_open_init(struct cifs_tcon *tcon,
extern void SMB2_open_free(struct smb_rqst *rqst);
extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
- bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
+ char *in_data, u32 indatalen, u32 maxoutlen,
char **out_data, u32 *plen /* returned data len */);
extern int SMB2_ioctl_init(struct cifs_tcon *tcon,
struct TCP_Server_Info *server,
struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
- bool is_fsctl, char *in_data, u32 indatalen,
+ char *in_data, u32 indatalen,
__u32 max_response_size);
extern void SMB2_ioctl_free(struct smb_rqst *rqst);
extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 55e79f6ee78d..1a5fc3314dbf 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -640,13 +640,13 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
if (!is_signed)
return 0;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->ops->need_neg &&
server->ops->need_neg(server)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return 0;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
if (!is_binding && !server->session_estab) {
strncpy(shdr->Signature, "BSRSPYL", 8);
return 0;
@@ -750,7 +750,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *shdr,
temp->callback = cifs_wake_up_task;
temp->callback_data = current;
- atomic_inc(&midCount);
+ atomic_inc(&mid_count);
temp->mid_state = MID_REQUEST_ALLOCATED;
trace_smb3_cmd_enter(le32_to_cpu(shdr->Id.SyncId.TreeId),
le64_to_cpu(shdr->SessionId),
@@ -762,28 +762,30 @@ static int
smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
struct smb2_hdr *shdr, struct mid_q_entry **mid)
{
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return -ENOENT;
}
if (server->tcpStatus == CifsNeedReconnect) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
return -EAGAIN;
}
if (server->tcpStatus == CifsNeedNegotiate &&
shdr->Command != SMB2_NEGOTIATE) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return -EAGAIN;
}
+ spin_unlock(&server->srv_lock);
+ spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_NEW) {
if ((shdr->Command != SMB2_SESSION_SETUP) &&
(shdr->Command != SMB2_NEGOTIATE)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
return -EAGAIN;
}
/* else ok - we are setting up session */
@@ -791,19 +793,19 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
if (ses->ses_status == SES_EXITING) {
if (shdr->Command != SMB2_LOGOFF) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
return -EAGAIN;
}
/* else ok - we are shutting down the session */
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
*mid = smb2_mid_entry_alloc(shdr, server);
if (*mid == NULL)
return -ENOMEM;
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
return 0;
}
@@ -854,7 +856,7 @@ smb2_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *server,
rc = smb2_sign_rqst(rqst, server);
if (rc) {
revert_current_mid_from_hdr(server, shdr);
- cifs_delete_mid(mid);
+ delete_mid(mid);
return ERR_PTR(rc);
}
@@ -869,13 +871,13 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
(struct smb2_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedNegotiate &&
shdr->Command != SMB2_NEGOTIATE) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return ERR_PTR(-EAGAIN);
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
smb2_seq_num_into_buf(server, shdr);
@@ -888,7 +890,7 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
rc = smb2_sign_rqst(rqst, server);
if (rc) {
revert_current_mid_from_hdr(server, shdr);
- DeleteMidQEntry(mid);
+ release_mid(mid);
return ERR_PTR(rc);
}
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index bfc9bd55870a..c2fe035e573b 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -21,6 +21,7 @@
#include <asm/processor.h>
#include <linux/mempool.h>
#include <linux/sched/signal.h>
+#include <linux/task_io_accounting_ops.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
@@ -37,13 +38,13 @@ cifs_wake_up_task(struct mid_q_entry *mid)
wake_up_process(mid->callback_data);
}
-struct mid_q_entry *
-AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
+static struct mid_q_entry *
+alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
{
struct mid_q_entry *temp;
if (server == NULL) {
- cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
+ cifs_dbg(VFS, "%s: null TCP session\n", __func__);
return NULL;
}
@@ -68,12 +69,12 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
temp->callback = cifs_wake_up_task;
temp->callback_data = current;
- atomic_inc(&midCount);
+ atomic_inc(&mid_count);
temp->mid_state = MID_REQUEST_ALLOCATED;
return temp;
}
-static void _cifs_mid_q_entry_release(struct kref *refcount)
+static void __release_mid(struct kref *refcount)
{
struct mid_q_entry *midEntry =
container_of(refcount, struct mid_q_entry, refcount);
@@ -91,7 +92,7 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
server->ops->handle_cancelled_mid(midEntry, server);
midEntry->mid_state = MID_FREE;
- atomic_dec(&midCount);
+ atomic_dec(&mid_count);
if (midEntry->large_buf)
cifs_buf_release(midEntry->resp_buf);
else
@@ -152,29 +153,26 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
mempool_free(midEntry, cifs_mid_poolp);
}
-void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
+void release_mid(struct mid_q_entry *mid)
{
- spin_lock(&GlobalMid_Lock);
- kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
- spin_unlock(&GlobalMid_Lock);
-}
+ struct TCP_Server_Info *server = mid->server;
-void DeleteMidQEntry(struct mid_q_entry *midEntry)
-{
- cifs_mid_q_entry_release(midEntry);
+ spin_lock(&server->mid_lock);
+ kref_put(&mid->refcount, __release_mid);
+ spin_unlock(&server->mid_lock);
}
void
-cifs_delete_mid(struct mid_q_entry *mid)
+delete_mid(struct mid_q_entry *mid)
{
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&mid->server->mid_lock);
if (!(mid->mid_flags & MID_DELETED)) {
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&mid->server->mid_lock);
- DeleteMidQEntry(mid);
+ release_mid(mid);
}
/*
@@ -263,8 +261,8 @@ smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
int nvec;
unsigned long buflen = 0;
- if (server->vals->header_preamble_size == 0 &&
- rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
+ if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
+ rqst->rq_iov[0].iov_len == 4) {
iov = &rqst->rq_iov[1];
nvec = rqst->rq_nvec - 1;
} else {
@@ -348,7 +346,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
sigprocmask(SIG_BLOCK, &mask, &oldmask);
/* Generate a rfc1002 marker for SMB2+ */
- if (server->vals->header_preamble_size == 0) {
+ if (!is_smb1(server)) {
struct kvec hiov = {
.iov_base = &rfc1002_marker,
.iov_len = 4
@@ -577,12 +575,12 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
} else {
spin_unlock(&server->req_lock);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return -ENOENT;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
/*
* For normal commands, reserve the last MAX_COMPOUND
@@ -725,11 +723,11 @@ cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ)
{
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_NEW) {
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
return -EAGAIN;
}
/* else ok - we are setting up session */
@@ -738,19 +736,19 @@ static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
if (ses->ses_status == SES_EXITING) {
/* check if SMB session is bad because we are setting it up */
if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
return -EAGAIN;
}
/* else ok - we are shutting down session */
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
- *ppmidQ = AllocMidQEntry(in_buf, ses->server);
+ *ppmidQ = alloc_mid(in_buf, ses->server);
if (*ppmidQ == NULL)
return -ENOMEM;
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&ses->server->mid_lock);
list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&ses->server->mid_lock);
return 0;
}
@@ -782,13 +780,13 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
if (server->sign)
hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
- mid = AllocMidQEntry(hdr, server);
+ mid = alloc_mid(hdr, server);
if (mid == NULL)
return ERR_PTR(-ENOMEM);
rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
if (rc) {
- DeleteMidQEntry(mid);
+ release_mid(mid);
return ERR_PTR(rc);
}
@@ -849,9 +847,9 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
mid->mid_state = MID_REQUEST_SUBMITTED;
/* put it on the pending_mid_q */
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
list_add_tail(&mid->qhead, &server->pending_mid_q);
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
/*
* Need to store the time in mid before calling I/O. For call_async,
@@ -865,7 +863,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
if (rc < 0) {
revert_current_mid(server, mid->credits);
server->sequence_number -= 2;
- cifs_delete_mid(mid);
+ delete_mid(mid);
}
cifs_server_unlock(server);
@@ -912,10 +910,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
__func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
return rc;
case MID_RETRY_NEEDED:
rc = -EAGAIN;
@@ -935,9 +933,9 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
__func__, mid->mid, mid->mid_state);
rc = -EIO;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
- DeleteMidQEntry(mid);
+ release_mid(mid);
return rc;
}
@@ -997,7 +995,7 @@ cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
return ERR_PTR(rc);
rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
if (rc) {
- cifs_delete_mid(mid);
+ delete_mid(mid);
return ERR_PTR(rc);
}
return mid;
@@ -1026,7 +1024,7 @@ static void
cifs_cancelled_callback(struct mid_q_entry *mid)
{
cifs_compound_callback(mid);
- DeleteMidQEntry(mid);
+ release_mid(mid);
}
/*
@@ -1078,12 +1076,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return -ENOENT;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
/*
* Wait for all the requests to become available.
@@ -1130,7 +1128,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
if (IS_ERR(midQ[i])) {
revert_current_mid(server, i);
for (j = 0; j < i; j++)
- cifs_delete_mid(midQ[j]);
+ delete_mid(midQ[j]);
cifs_server_unlock(server);
/* Update # of requests on wire to server */
@@ -1186,17 +1184,17 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/*
* Compounding is never used during session establish.
*/
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
cifs_server_lock(server);
smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cifs_server_unlock(server);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(server, midQ[i]);
@@ -1208,14 +1206,14 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(server, &rqst[i], midQ[i]);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
midQ[i]->callback = cifs_cancelled_callback;
cancelled_mid[i] = true;
credits[i].value = 0;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
}
}
@@ -1240,7 +1238,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
buf = (char *)midQ[i]->resp_buf;
resp_iov[i].iov_base = buf;
resp_iov[i].iov_len = midQ[i]->resp_buf_size +
- server->vals->header_preamble_size;
+ HEADER_PREAMBLE_SIZE(server);
if (midQ[i]->large_buf)
resp_buf_type[i] = CIFS_LARGE_BUFFER;
@@ -1250,7 +1248,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
rc = server->ops->check_receive(midQ[i], server,
flags & CIFS_LOG_ERROR);
- /* mark it so buf will not be freed by cifs_delete_mid */
+ /* mark it so buf will not be freed by delete_mid */
if ((flags & CIFS_NO_RSP_BUF) == 0)
midQ[i]->resp_buf = NULL;
@@ -1259,19 +1257,19 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/*
* Compounding is never used during session establish.
*/
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
struct kvec iov = {
.iov_base = resp_iov[0].iov_base,
.iov_len = resp_iov[0].iov_len
};
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
cifs_server_lock(server);
smb311_update_preauth_hash(ses, server, &iov, 1);
cifs_server_unlock(server);
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->ses_lock);
out:
/*
@@ -1282,7 +1280,7 @@ out:
*/
for (i = 0; i < num_rqst; i++) {
if (!cancelled_mid[i])
- cifs_delete_mid(midQ[i]);
+ delete_mid(midQ[i]);
}
return rc;
@@ -1360,12 +1358,12 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return -ENOENT;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
@@ -1419,15 +1417,15 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = wait_for_response(server, midQ);
if (rc != 0) {
send_cancel(server, &rqst, midQ);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
- midQ->callback = DeleteMidQEntry;
- spin_unlock(&GlobalMid_Lock);
+ midQ->callback = release_mid;
+ spin_unlock(&server->mid_lock);
add_credits(server, &credits, 0);
return rc;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
}
rc = cifs_sync_mid_result(midQ, server);
@@ -1447,7 +1445,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
rc = cifs_check_receive(midQ, server, 0);
out:
- cifs_delete_mid(midQ);
+ delete_mid(midQ);
add_credits(server, &credits, 0);
return rc;
@@ -1505,12 +1503,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return -EIO;
}
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
return -ENOENT;
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
@@ -1540,7 +1538,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
if (rc) {
- cifs_delete_mid(midQ);
+ delete_mid(midQ);
cifs_server_unlock(server);
return rc;
}
@@ -1557,7 +1555,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
cifs_server_unlock(server);
if (rc < 0) {
- cifs_delete_mid(midQ);
+ delete_mid(midQ);
return rc;
}
@@ -1568,19 +1566,19 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
(server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
if ((rc == -ERESTARTSYS) &&
(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
((server->tcpStatus == CifsGood) ||
(server->tcpStatus == CifsNew))) {
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the
blocking lock to return. */
rc = send_cancel(server, &rqst, midQ);
if (rc) {
- cifs_delete_mid(midQ);
+ delete_mid(midQ);
return rc;
}
} else {
@@ -1592,7 +1590,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
/* If we get -ENOLCK back the lock may have
already been removed. Don't exit in this case. */
if (rc && rc != -ENOLCK) {
- cifs_delete_mid(midQ);
+ delete_mid(midQ);
return rc;
}
}
@@ -1600,21 +1598,21 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
rc = wait_for_response(server, midQ);
if (rc) {
send_cancel(server, &rqst, midQ);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&server->mid_lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
- midQ->callback = DeleteMidQEntry;
- spin_unlock(&GlobalMid_Lock);
+ midQ->callback = release_mid;
+ spin_unlock(&server->mid_lock);
return rc;
}
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&server->mid_lock);
}
/* We got the response - restart system call. */
rstart = 1;
- spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&server->srv_lock);
}
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&server->srv_lock);
rc = cifs_sync_mid_result(midQ, server);
if (rc != 0)
@@ -1631,8 +1629,185 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
rc = cifs_check_receive(midQ, server, 0);
out:
- cifs_delete_mid(midQ);
+ delete_mid(midQ);
if (rstart && rc == -EACCES)
return -ERESTARTSYS;
return rc;
}
+
+/*
+ * Discard any remaining data in the current SMB. To do this, we borrow the
+ * current bigbuf.
+ */
+int
+cifs_discard_remaining_data(struct TCP_Server_Info *server)
+{
+ unsigned int rfclen = server->pdu_size;
+ int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
+ server->total_read;
+
+ while (remaining > 0) {
+ int length;
+
+ length = cifs_discard_from_socket(server,
+ min_t(size_t, remaining,
+ CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
+ if (length < 0)
+ return length;
+ server->total_read += length;
+ remaining -= length;
+ }
+
+ return 0;
+}
+
+static int
+__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ bool malformed)
+{
+ int length;
+
+ length = cifs_discard_remaining_data(server);
+ dequeue_mid(mid, malformed);
+ mid->resp_buf = server->smallbuf;
+ server->smallbuf = NULL;
+ return length;
+}
+
+static int
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+ struct cifs_readdata *rdata = mid->callback_data;
+
+ return __cifs_readv_discard(server, mid, rdata->result);
+}
+
+int
+cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+ int length, len;
+ unsigned int data_offset, data_len;
+ struct cifs_readdata *rdata = mid->callback_data;
+ char *buf = server->smallbuf;
+ unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
+ bool use_rdma_mr = false;
+
+ cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
+ __func__, mid->mid, rdata->offset, rdata->bytes);
+
+ /*
+ * read the rest of READ_RSP header (sans Data array), or whatever we
+ * can if there's not enough data. At this point, we've read down to
+ * the Mid.
+ */
+ len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
+ HEADER_SIZE(server) + 1;
+
+ length = cifs_read_from_socket(server,
+ buf + HEADER_SIZE(server) - 1, len);
+ if (length < 0)
+ return length;
+ server->total_read += length;
+
+ if (server->ops->is_session_expired &&
+ server->ops->is_session_expired(buf)) {
+ cifs_reconnect(server, true);
+ return -1;
+ }
+
+ if (server->ops->is_status_pending &&
+ server->ops->is_status_pending(buf, server)) {
+ cifs_discard_remaining_data(server);
+ return -1;
+ }
+
+ /* set up first two iov for signature check and to get credits */
+ rdata->iov[0].iov_base = buf;
+ rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
+ rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
+ rdata->iov[1].iov_len =
+ server->total_read - HEADER_PREAMBLE_SIZE(server);
+ cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+ rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+ cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+ rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+
+ /* Was the SMB read successful? */
+ rdata->result = server->ops->map_error(buf, false);
+ if (rdata->result != 0) {
+ cifs_dbg(FYI, "%s: server returned error %d\n",
+ __func__, rdata->result);
+ /* normal error on read response */
+ return __cifs_readv_discard(server, mid, false);
+ }
+
+ /* Is there enough to get to the rest of the READ_RSP header? */
+ if (server->total_read < server->vals->read_rsp_size) {
+ cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
+ __func__, server->total_read,
+ server->vals->read_rsp_size);
+ rdata->result = -EIO;
+ return cifs_readv_discard(server, mid);
+ }
+
+ data_offset = server->ops->read_data_offset(buf) +
+ HEADER_PREAMBLE_SIZE(server);
+ if (data_offset < server->total_read) {
+ /*
+ * win2k8 sometimes sends an offset of 0 when the read
+ * is beyond the EOF. Treat it as if the data starts just after
+ * the header.
+ */
+ cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
+ __func__, data_offset);
+ data_offset = server->total_read;
+ } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
+ /* data_offset is beyond the end of smallbuf */
+ cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
+ __func__, data_offset);
+ rdata->result = -EIO;
+ return cifs_readv_discard(server, mid);
+ }
+
+ cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
+ __func__, server->total_read, data_offset);
+
+ len = data_offset - server->total_read;
+ if (len > 0) {
+ /* read any junk before data into the rest of smallbuf */
+ length = cifs_read_from_socket(server,
+ buf + server->total_read, len);
+ if (length < 0)
+ return length;
+ server->total_read += length;
+ }
+
+ /* how much data is in the response? */
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ use_rdma_mr = rdata->mr;
+#endif
+ data_len = server->ops->read_data_length(buf, use_rdma_mr);
+ if (!use_rdma_mr && (data_offset + data_len > buflen)) {
+ /* data_len is corrupt -- discard frame */
+ rdata->result = -EIO;
+ return cifs_readv_discard(server, mid);
+ }
+
+ length = rdata->read_into_pages(server, rdata, data_len);
+ if (length < 0)
+ return length;
+
+ server->total_read += length;
+
+ cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
+ server->total_read, buflen, data_len);
+
+ /* discard anything left over */
+ if (server->total_read < buflen)
+ return cifs_readv_discard(server, mid);
+
+ dequeue_mid(mid, false);
+ mid->resp_buf = server->smallbuf;
+ server->smallbuf = NULL;
+ return length;
+}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 9d486fbbfbbd..998fa51f9b68 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -201,6 +201,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
break;
}
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
case XATTR_ACL_ACCESS:
#ifdef CONFIG_CIFS_POSIX
if (!value)
@@ -224,6 +225,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
cifs_remap(cifs_sb));
#endif /* CONFIG_CIFS_POSIX */
break;
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
}
out:
@@ -364,7 +366,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
}
break;
}
-
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
case XATTR_ACL_ACCESS:
#ifdef CONFIG_CIFS_POSIX
if (sb->s_flags & SB_POSIXACL)
@@ -384,6 +386,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
cifs_remap(cifs_sb));
#endif /* CONFIG_CIFS_POSIX */
break;
+#endif /* ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
}
/* We could add an additional check for streams ie
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 14e0ef5e9a20..12bd61d20f69 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -86,7 +86,8 @@ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
/**
* fscrypt_fname_encrypt() - encrypt a filename
* @inode: inode of the parent directory (for regular filenames)
- * or of the symlink (for symlink targets)
+ * or of the symlink (for symlink targets). Key must already be
+ * set up.
* @iname: the filename to encrypt
* @out: (output) the encrypted filename
* @olen: size of the encrypted filename. It must be at least @iname->len.
@@ -137,6 +138,7 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
return 0;
}
+EXPORT_SYMBOL_GPL(fscrypt_fname_encrypt);
/**
* fname_decrypt() - decrypt a filename
@@ -264,9 +266,9 @@ static int fscrypt_base64url_decode(const char *src, int srclen, u8 *dst)
return bp - dst;
}
-bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
- u32 orig_len, u32 max_len,
- u32 *encrypted_len_ret)
+bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
+ u32 orig_len, u32 max_len,
+ u32 *encrypted_len_ret)
{
int padding = 4 << (fscrypt_policy_flags(policy) &
FSCRYPT_POLICY_FLAGS_PAD_MASK);
@@ -281,6 +283,29 @@ bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
}
/**
+ * fscrypt_fname_encrypted_size() - calculate length of encrypted filename
+ * @inode: parent inode of dentry name being encrypted. Key must
+ * already be set up.
+ * @orig_len: length of the original filename
+ * @max_len: maximum length to return
+ * @encrypted_len_ret: where calculated length should be returned (on success)
+ *
+ * Filenames that are shorter than the maximum length may have their lengths
+ * increased slightly by encryption, due to padding that is applied.
+ *
+ * Return: false if the orig_len is greater than max_len. Otherwise, true and
+ * fill out encrypted_len_ret with the length (up to max_len).
+ */
+bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
+ u32 max_len, u32 *encrypted_len_ret)
+{
+ return __fscrypt_fname_encrypted_size(&inode->i_crypt_info->ci_policy,
+ orig_len, max_len,
+ encrypted_len_ret);
+}
+EXPORT_SYMBOL_GPL(fscrypt_fname_encrypted_size);
+
+/**
* fscrypt_fname_alloc_buffer() - allocate a buffer for presented filenames
* @max_encrypted_len: maximum length of encrypted filenames the buffer will be
* used to present
@@ -435,8 +460,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
return ret;
if (fscrypt_has_encryption_key(dir)) {
- if (!fscrypt_fname_encrypted_size(&dir->i_crypt_info->ci_policy,
- iname->len, NAME_MAX,
+ if (!fscrypt_fname_encrypted_size(dir, iname->len, NAME_MAX,
&fname->crypto_buf.len))
return -ENAMETOOLONG;
fname->crypto_buf.name = kmalloc(fname->crypto_buf.len,
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index f5be777d8279..3afdaa084773 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -297,14 +297,11 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
const struct fscrypt_info *ci);
/* fname.c */
-int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
- u8 *out, unsigned int olen);
-bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
- u32 orig_len, u32 max_len,
- u32 *encrypted_len_ret);
+bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
+ u32 orig_len, u32 max_len,
+ u32 *encrypted_len_ret);
/* hkdf.c */
-
struct fscrypt_hkdf {
struct crypto_shash *hmac_tfm;
};
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index af74599ae1cf..7c01025879b3 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -228,9 +228,9 @@ int fscrypt_prepare_symlink(struct inode *dir, const char *target,
* counting it (even though it is meaningless for ciphertext) is simpler
* for now since filesystems will assume it is there and subtract it.
*/
- if (!fscrypt_fname_encrypted_size(policy, len,
- max_len - sizeof(struct fscrypt_symlink_data),
- &disk_link->len))
+ if (!__fscrypt_fname_encrypted_size(policy, len,
+ max_len - sizeof(struct fscrypt_symlink_data),
+ &disk_link->len))
return -ENAMETOOLONG;
disk_link->len += sizeof(struct fscrypt_symlink_data);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 8a054e6d1e68..80b8ca0f340b 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -694,6 +694,32 @@ const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir)
}
/**
+ * fscrypt_context_for_new_inode() - create an encryption context for a new inode
+ * @ctx: where context should be written
+ * @inode: inode from which to fetch policy and nonce
+ *
+ * Given an in-core "prepared" (via fscrypt_prepare_new_inode) inode,
+ * generate a new context and write it to ctx. ctx _must_ be at least
+ * FSCRYPT_SET_CONTEXT_MAX_SIZE bytes.
+ *
+ * Return: size of the resulting context or a negative error code.
+ */
+int fscrypt_context_for_new_inode(void *ctx, struct inode *inode)
+{
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ BUILD_BUG_ON(sizeof(union fscrypt_context) !=
+ FSCRYPT_SET_CONTEXT_MAX_SIZE);
+
+ /* fscrypt_prepare_new_inode() should have set up the key already. */
+ if (WARN_ON_ONCE(!ci))
+ return -ENOKEY;
+
+ return fscrypt_new_context(ctx, &ci->ci_policy, ci->ci_nonce);
+}
+EXPORT_SYMBOL_GPL(fscrypt_context_for_new_inode);
+
+/**
* fscrypt_set_context() - Set the fscrypt context of a new inode
* @inode: a new inode
* @fs_data: private data given by FS and passed to ->set_context()
@@ -709,12 +735,9 @@ int fscrypt_set_context(struct inode *inode, void *fs_data)
union fscrypt_context ctx;
int ctxsize;
- /* fscrypt_prepare_new_inode() should have set up the key already. */
- if (WARN_ON_ONCE(!ci))
- return -ENOKEY;
-
- BUILD_BUG_ON(sizeof(ctx) != FSCRYPT_SET_CONTEXT_MAX_SIZE);
- ctxsize = fscrypt_new_context(&ctx, &ci->ci_policy, ci->ci_nonce);
+ ctxsize = fscrypt_context_for_new_inode(&ctx, inode);
+ if (ctxsize < 0)
+ return ctxsize;
/*
* This may be the first time the inode number is available, so do any
diff --git a/fs/dax.c b/fs/dax.c
index 649ff51c9a26..c440dcef4b1b 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -334,13 +334,35 @@ static unsigned long dax_end_pfn(void *entry)
for (pfn = dax_to_pfn(entry); \
pfn < dax_end_pfn(entry); pfn++)
+static inline bool dax_mapping_is_cow(struct address_space *mapping)
+{
+ return (unsigned long)mapping == PAGE_MAPPING_DAX_COW;
+}
+
/*
- * TODO: for reflink+dax we need a way to associate a single page with
- * multiple address_space instances at different linear_page_index()
- * offsets.
+ * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount.
+ */
+static inline void dax_mapping_set_cow(struct page *page)
+{
+ if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) {
+ /*
+ * Reset the index if the page was already mapped
+ * regularly before.
+ */
+ if (page->mapping)
+ page->index = 1;
+ page->mapping = (void *)PAGE_MAPPING_DAX_COW;
+ }
+ page->index++;
+}
+
+/*
+ * When it is called in dax_insert_entry(), the cow flag will indicate that
+ * whether this entry is shared by multiple files. If so, set the page->mapping
+ * FS_DAX_MAPPING_COW, and use page->index as refcount.
*/
static void dax_associate_entry(void *entry, struct address_space *mapping,
- struct vm_area_struct *vma, unsigned long address)
+ struct vm_area_struct *vma, unsigned long address, bool cow)
{
unsigned long size = dax_entry_size(entry), pfn, index;
int i = 0;
@@ -352,9 +374,13 @@ static void dax_associate_entry(void *entry, struct address_space *mapping,
for_each_mapped_pfn(entry, pfn) {
struct page *page = pfn_to_page(pfn);
- WARN_ON_ONCE(page->mapping);
- page->mapping = mapping;
- page->index = index + i++;
+ if (cow) {
+ dax_mapping_set_cow(page);
+ } else {
+ WARN_ON_ONCE(page->mapping);
+ page->mapping = mapping;
+ page->index = index + i++;
+ }
}
}
@@ -370,7 +396,12 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
struct page *page = pfn_to_page(pfn);
WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
- WARN_ON_ONCE(page->mapping && page->mapping != mapping);
+ if (dax_mapping_is_cow(page->mapping)) {
+ /* keep the CoW flag if this page is still shared */
+ if (page->index-- > 0)
+ continue;
+ } else
+ WARN_ON_ONCE(page->mapping && page->mapping != mapping);
page->mapping = NULL;
page->index = 0;
}
@@ -456,6 +487,69 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie)
}
/*
+ * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
+ * @mapping: the file's mapping whose entry we want to lock
+ * @index: the offset within this file
+ * @page: output the dax page corresponding to this dax entry
+ *
+ * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
+ * could not be locked.
+ */
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
+ struct page **page)
+{
+ XA_STATE(xas, NULL, 0);
+ void *entry;
+
+ rcu_read_lock();
+ for (;;) {
+ entry = NULL;
+ if (!dax_mapping(mapping))
+ break;
+
+ xas.xa = &mapping->i_pages;
+ xas_lock_irq(&xas);
+ xas_set(&xas, index);
+ entry = xas_load(&xas);
+ if (dax_is_locked(entry)) {
+ rcu_read_unlock();
+ wait_entry_unlocked(&xas, entry);
+ rcu_read_lock();
+ continue;
+ }
+ if (!entry ||
+ dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+ /*
+ * Because we are looking for entry from file's mapping
+ * and index, so the entry may not be inserted for now,
+ * or even a zero/empty entry. We don't think this is
+ * an error case. So, return a special value and do
+ * not output @page.
+ */
+ entry = (void *)~0UL;
+ } else {
+ *page = pfn_to_page(dax_to_pfn(entry));
+ dax_lock_entry(&xas, entry);
+ }
+ xas_unlock_irq(&xas);
+ break;
+ }
+ rcu_read_unlock();
+ return (dax_entry_t)entry;
+}
+
+void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
+ dax_entry_t cookie)
+{
+ XA_STATE(xas, &mapping->i_pages, index);
+
+ if (cookie == ~0UL)
+ return;
+
+ dax_unlock_entry(&xas, (void *)cookie);
+}
+
+/*
* Find page cache entry at given index. If it is a DAX entry, return it
* with the entry locked. If the page cache doesn't contain an entry at
* that index, add a locked empty entry.
@@ -736,22 +830,42 @@ static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter
}
/*
+ * MAP_SYNC on a dax mapping guarantees dirty metadata is
+ * flushed on write-faults (non-cow), but not read-faults.
+ */
+static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
+ struct vm_area_struct *vma)
+{
+ return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
+ (iter->iomap.flags & IOMAP_F_DIRTY);
+}
+
+static bool dax_fault_is_cow(const struct iomap_iter *iter)
+{
+ return (iter->flags & IOMAP_WRITE) &&
+ (iter->iomap.flags & IOMAP_F_SHARED);
+}
+
+/*
* By this point grab_mapping_entry() has ensured that we have a locked entry
* of the appropriate size so we don't have to worry about downgrading PMDs to
* PTEs. If we happen to be trying to insert a PTE and there is a PMD
* already in the tree, we will skip the insertion and just dirty the PMD as
* appropriate.
*/
-static void *dax_insert_entry(struct xa_state *xas,
- struct address_space *mapping, struct vm_fault *vmf,
- void *entry, pfn_t pfn, unsigned long flags, bool dirty)
+static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
+ const struct iomap_iter *iter, void *entry, pfn_t pfn,
+ unsigned long flags)
{
+ struct address_space *mapping = vmf->vma->vm_file->f_mapping;
void *new_entry = dax_make_entry(pfn, flags);
+ bool dirty = !dax_fault_is_synchronous(iter, vmf->vma);
+ bool cow = dax_fault_is_cow(iter);
if (dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
+ if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
unsigned long index = xas->xa_index;
/* we are replacing a zero page with block mapping */
if (dax_is_pmd_entry(entry))
@@ -763,11 +877,12 @@ static void *dax_insert_entry(struct xa_state *xas,
xas_reset(xas);
xas_lock_irq(xas);
- if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+ if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
void *old;
dax_disassociate_entry(entry, mapping, false);
- dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
+ dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
+ cow);
/*
* Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or
@@ -787,6 +902,9 @@ static void *dax_insert_entry(struct xa_state *xas,
if (dirty)
xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
+ if (cow)
+ xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
+
xas_unlock_irq(xas);
return entry;
}
@@ -931,20 +1049,22 @@ int dax_writeback_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
-static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
- pfn_t *pfnp)
+static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
+ size_t size, void **kaddr, pfn_t *pfnp)
{
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
- int id, rc;
+ int id, rc = 0;
long length;
id = dax_read_lock();
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
- DAX_ACCESS, NULL, pfnp);
+ DAX_ACCESS, kaddr, pfnp);
if (length < 0) {
rc = length;
goto out;
}
+ if (!pfnp)
+ goto out_check_addr;
rc = -EINVAL;
if (PFN_PHYS(length) < size)
goto out;
@@ -954,11 +1074,71 @@ static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
if (length > 1 && !pfn_t_devmap(*pfnp))
goto out;
rc = 0;
+
+out_check_addr:
+ if (!kaddr)
+ goto out;
+ if (!*kaddr)
+ rc = -EFAULT;
out:
dax_read_unlock(id);
return rc;
}
+/**
+ * dax_iomap_cow_copy - Copy the data from source to destination before write
+ * @pos: address to do copy from.
+ * @length: size of copy operation.
+ * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
+ * @srcmap: iomap srcmap
+ * @daddr: destination address to copy to.
+ *
+ * This can be called from two places. Either during DAX write fault (page
+ * aligned), to copy the length size data to daddr. Or, while doing normal DAX
+ * write operation, dax_iomap_actor() might call this to do the copy of either
+ * start or end unaligned address. In the latter case the rest of the copy of
+ * aligned ranges is taken care by dax_iomap_actor() itself.
+ */
+static int dax_iomap_cow_copy(loff_t pos, uint64_t length, size_t align_size,
+ const struct iomap *srcmap, void *daddr)
+{
+ loff_t head_off = pos & (align_size - 1);
+ size_t size = ALIGN(head_off + length, align_size);
+ loff_t end = pos + length;
+ loff_t pg_end = round_up(end, align_size);
+ bool copy_all = head_off == 0 && end == pg_end;
+ void *saddr = 0;
+ int ret = 0;
+
+ ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
+ if (ret)
+ return ret;
+
+ if (copy_all) {
+ ret = copy_mc_to_kernel(daddr, saddr, length);
+ return ret ? -EIO : 0;
+ }
+
+ /* Copy the head part of the range */
+ if (head_off) {
+ ret = copy_mc_to_kernel(daddr, saddr, head_off);
+ if (ret)
+ return -EIO;
+ }
+
+ /* Copy the tail part of the range */
+ if (end < pg_end) {
+ loff_t tail_off = head_off + length;
+ loff_t tail_len = pg_end - end;
+
+ ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off,
+ tail_len);
+ if (ret)
+ return -EIO;
+ }
+ return 0;
+}
+
/*
* The user has performed a load from a hole in the file. Allocating a new
* page in the file would cause excessive storage usage for workloads with
@@ -966,17 +1146,15 @@ out:
* If this page is ever written to we will re-fault and change the mapping to
* point to real DAX storage instead.
*/
-static vm_fault_t dax_load_hole(struct xa_state *xas,
- struct address_space *mapping, void **entry,
- struct vm_fault *vmf)
+static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+ const struct iomap_iter *iter, void **entry)
{
- struct inode *inode = mapping->host;
+ struct inode *inode = iter->inode;
unsigned long vaddr = vmf->address;
pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
vm_fault_t ret;
- *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
- DAX_ZERO_PAGE, false);
+ *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
trace_dax_load_hole(inode, vmf, ret);
@@ -985,7 +1163,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
#ifdef CONFIG_FS_DAX_PMD
static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
- const struct iomap *iomap, void **entry)
+ const struct iomap_iter *iter, void **entry)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
unsigned long pmd_addr = vmf->address & PMD_MASK;
@@ -1003,8 +1181,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
goto fallback;
pfn = page_to_pfn_t(zero_page);
- *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
- DAX_PMD | DAX_ZERO_PAGE, false);
+ *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
+ DAX_PMD | DAX_ZERO_PAGE);
if (arch_needs_pgtable_deposit()) {
pgtable = pte_alloc_one(vma->vm_mm);
@@ -1037,23 +1215,34 @@ fallback:
}
#else
static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
- const struct iomap *iomap, void **entry)
+ const struct iomap_iter *iter, void **entry)
{
return VM_FAULT_FALLBACK;
}
#endif /* CONFIG_FS_DAX_PMD */
-static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
- unsigned int offset, size_t size)
+static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
{
+ const struct iomap *iomap = &iter->iomap;
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ unsigned offset = offset_in_page(pos);
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
void *kaddr;
long ret;
- ret = dax_direct_access(dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL);
- if (ret > 0) {
- memset(kaddr + offset, 0, size);
- dax_flush(dax_dev, kaddr + offset, size);
- }
+ ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
+ NULL);
+ if (ret < 0)
+ return ret;
+ memset(kaddr + offset, 0, size);
+ if (srcmap->addr != iomap->addr) {
+ ret = dax_iomap_cow_copy(pos, size, PAGE_SIZE, srcmap,
+ kaddr);
+ if (ret < 0)
+ return ret;
+ dax_flush(iomap->dax_dev, kaddr, PAGE_SIZE);
+ } else
+ dax_flush(iomap->dax_dev, kaddr + offset, size);
return ret;
}
@@ -1080,7 +1269,7 @@ static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
else
- rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
+ rc = dax_memzero(iter, pos, size);
dax_read_unlock(id);
if (rc < 0)
@@ -1129,15 +1318,17 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
struct iov_iter *iter)
{
const struct iomap *iomap = &iomi->iomap;
+ const struct iomap *srcmap = &iomi->srcmap;
loff_t length = iomap_length(iomi);
loff_t pos = iomi->pos;
struct dax_device *dax_dev = iomap->dax_dev;
loff_t end = pos + length, done = 0;
+ bool write = iov_iter_rw(iter) == WRITE;
ssize_t ret = 0;
size_t xfer;
int id;
- if (iov_iter_rw(iter) == READ) {
+ if (!write) {
end = min(end, i_size_read(iomi->inode));
if (pos >= end)
return 0;
@@ -1146,7 +1337,12 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
return iov_iter_zero(min(length, end - pos), iter);
}
- if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
+ /*
+ * In DAX mode, enforce either pure overwrites of written extents, or
+ * writes to unwritten extents as part of a copy-on-write operation.
+ */
+ if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
+ !(iomap->flags & IOMAP_F_SHARED)))
return -EIO;
/*
@@ -1188,6 +1384,14 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
break;
}
+ if (write &&
+ srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {
+ ret = dax_iomap_cow_copy(pos, length, PAGE_SIZE, srcmap,
+ kaddr);
+ if (ret)
+ break;
+ }
+
map_len = PFN_PHYS(map_len);
kaddr += offset;
map_len -= offset;
@@ -1197,7 +1401,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
if (recovery)
xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
map_len, iter);
- else if (iov_iter_rw(iter) == WRITE)
+ else if (write)
xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
map_len, iter);
else
@@ -1268,17 +1472,6 @@ static vm_fault_t dax_fault_return(int error)
}
/*
- * MAP_SYNC on a dax mapping guarantees dirty metadata is
- * flushed on write-faults (non-cow), but not read-faults.
- */
-static bool dax_fault_is_synchronous(unsigned long flags,
- struct vm_area_struct *vma, const struct iomap *iomap)
-{
- return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
- && (iomap->flags & IOMAP_F_DIRTY);
-}
-
-/*
* When handling a synchronous page fault and the inode need a fsync, we can
* insert the PTE/PMD into page tables only after that fsync happened. Skip
* insertion for now and return the pfn so that caller can insert it after the
@@ -1335,15 +1528,15 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
const struct iomap_iter *iter, pfn_t *pfnp,
struct xa_state *xas, void **entry, bool pmd)
{
- struct address_space *mapping = vmf->vma->vm_file->f_mapping;
const struct iomap *iomap = &iter->iomap;
+ const struct iomap *srcmap = &iter->srcmap;
size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
- bool write = vmf->flags & FAULT_FLAG_WRITE;
- bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap);
+ bool write = iter->flags & IOMAP_WRITE;
unsigned long entry_flags = pmd ? DAX_PMD : 0;
int err = 0;
pfn_t pfn;
+ void *kaddr;
if (!pmd && vmf->cow_page)
return dax_fault_cow_page(vmf, iter);
@@ -1352,23 +1545,29 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
if (!write &&
(iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
if (!pmd)
- return dax_load_hole(xas, mapping, entry, vmf);
- return dax_pmd_load_hole(xas, vmf, iomap, entry);
+ return dax_load_hole(xas, vmf, iter, entry);
+ return dax_pmd_load_hole(xas, vmf, iter, entry);
}
- if (iomap->type != IOMAP_MAPPED) {
+ if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
WARN_ON_ONCE(1);
return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
}
- err = dax_iomap_pfn(&iter->iomap, pos, size, &pfn);
+ err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
if (err)
return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
- *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags,
- write && !sync);
+ *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
+
+ if (write &&
+ srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {
+ err = dax_iomap_cow_copy(pos, size, size, srcmap, kaddr);
+ if (err)
+ return dax_fault_return(err);
+ }
- if (sync)
+ if (dax_fault_is_synchronous(iter, vmf->vma))
return dax_fault_synchronous_pfnp(pfnp, pfn);
/* insert PMD pfn */
@@ -1674,3 +1873,85 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
return dax_insert_pfn_mkwrite(vmf, pfn, order);
}
EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
+
+static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
+ struct iomap_iter *it_dest, u64 len, bool *same)
+{
+ const struct iomap *smap = &it_src->iomap;
+ const struct iomap *dmap = &it_dest->iomap;
+ loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
+ void *saddr, *daddr;
+ int id, ret;
+
+ len = min(len, min(smap->length, dmap->length));
+
+ if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
+ *same = true;
+ return len;
+ }
+
+ if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
+ *same = false;
+ return 0;
+ }
+
+ id = dax_read_lock();
+ ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
+ &saddr, NULL);
+ if (ret < 0)
+ goto out_unlock;
+
+ ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
+ &daddr, NULL);
+ if (ret < 0)
+ goto out_unlock;
+
+ *same = !memcmp(saddr, daddr, len);
+ if (!*same)
+ len = 0;
+ dax_read_unlock(id);
+ return len;
+
+out_unlock:
+ dax_read_unlock(id);
+ return -EIO;
+}
+
+int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ struct inode *dst, loff_t dstoff, loff_t len, bool *same,
+ const struct iomap_ops *ops)
+{
+ struct iomap_iter src_iter = {
+ .inode = src,
+ .pos = srcoff,
+ .len = len,
+ .flags = IOMAP_DAX,
+ };
+ struct iomap_iter dst_iter = {
+ .inode = dst,
+ .pos = dstoff,
+ .len = len,
+ .flags = IOMAP_DAX,
+ };
+ int ret;
+
+ while ((ret = iomap_iter(&src_iter, ops)) > 0) {
+ while ((ret = iomap_iter(&dst_iter, ops)) > 0) {
+ dst_iter.processed = dax_range_compare_iter(&src_iter,
+ &dst_iter, len, same);
+ }
+ if (ret <= 0)
+ src_iter.processed = ret;
+ }
+ return ret;
+}
+
+int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *ops)
+{
+ return __generic_remap_file_range_prep(file_in, pos_in, file_out,
+ pos_out, len, remap_flags, ops);
+}
+EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);
diff --git a/fs/dcache.c b/fs/dcache.c
index ea5cdec24ea7..bb0c4d0038db 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2248,10 +2248,16 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
}
EXPORT_SYMBOL(d_add_ci);
-
-static inline bool d_same_name(const struct dentry *dentry,
- const struct dentry *parent,
- const struct qstr *name)
+/**
+ * d_same_name - compare dentry name with case-exact name
+ * @parent: parent dentry
+ * @dentry: the negative dentry that was passed to the parent's lookup func
+ * @name: the case-exact name to be associated with the returned dentry
+ *
+ * Return: true if names are same, or false
+ */
+bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
+ const struct qstr *name)
{
if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
if (dentry->d_name.len != name->len)
@@ -2262,6 +2268,49 @@ static inline bool d_same_name(const struct dentry *dentry,
dentry->d_name.len, dentry->d_name.name,
name) == 0;
}
+EXPORT_SYMBOL_GPL(d_same_name);
+
+/*
+ * This is __d_lookup_rcu() when the parent dentry has
+ * DCACHE_OP_COMPARE, which makes things much nastier.
+ */
+static noinline struct dentry *__d_lookup_rcu_op_compare(
+ const struct dentry *parent,
+ const struct qstr *name,
+ unsigned *seqp)
+{
+ u64 hashlen = name->hash_len;
+ struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
+ struct hlist_bl_node *node;
+ struct dentry *dentry;
+
+ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
+ int tlen;
+ const char *tname;
+ unsigned seq;
+
+seqretry:
+ seq = raw_seqcount_begin(&dentry->d_seq);
+ if (dentry->d_parent != parent)
+ continue;
+ if (d_unhashed(dentry))
+ continue;
+ if (dentry->d_name.hash != hashlen_hash(hashlen))
+ continue;
+ tlen = dentry->d_name.len;
+ tname = dentry->d_name.name;
+ /* we want a consistent (name,len) pair */
+ if (read_seqcount_retry(&dentry->d_seq, seq)) {
+ cpu_relax();
+ goto seqretry;
+ }
+ if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
+ continue;
+ *seqp = seq;
+ return dentry;
+ }
+ return NULL;
+}
/**
* __d_lookup_rcu - search for a dentry (racy, store-free)
@@ -2309,6 +2358,9 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
* Keep the two functions in sync.
*/
+ if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
+ return __d_lookup_rcu_op_compare(parent, name, seqp);
+
/*
* The hash list is protected using RCU.
*
@@ -2325,7 +2377,6 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
unsigned seq;
-seqretry:
/*
* The dentry sequence count protects us from concurrent
* renames, and thus protects parent and name fields.
@@ -2348,28 +2399,10 @@ seqretry:
continue;
if (d_unhashed(dentry))
continue;
-
- if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
- int tlen;
- const char *tname;
- if (dentry->d_name.hash != hashlen_hash(hashlen))
- continue;
- tlen = dentry->d_name.len;
- tname = dentry->d_name.name;
- /* we want a consistent (name,len) pair */
- if (read_seqcount_retry(&dentry->d_seq, seq)) {
- cpu_relax();
- goto seqretry;
- }
- if (parent->d_op->d_compare(dentry,
- tlen, tname, name) != 0)
- continue;
- } else {
- if (dentry->d_name.hash_len != hashlen)
- continue;
- if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
- continue;
- }
+ if (dentry->d_name.hash_len != hashlen)
+ continue;
+ if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
+ continue;
*seqp = seq;
return dentry;
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index df5e2d048799..f669163d5860 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -169,7 +169,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
const enum req_op dio_op = dio->opf & REQ_OP_MASK;
ssize_t ret;
- ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
+ ret = iov_iter_get_pages2(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
&sdio->from);
if (ret < 0 && sdio->blocks_available && dio_op == REQ_OP_WRITE) {
@@ -191,7 +191,6 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
}
if (ret >= 0) {
- iov_iter_advance(sdio->iter, ret);
ret += sdio->from;
sdio->head = 0;
sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
@@ -1251,7 +1250,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
spin_lock_init(&dio->bio_lock);
dio->refcount = 1;
- dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
+ dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ;
sdio.iter = iter;
sdio.final_block_in_request = end >> blkbits;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 95addc5c9d34..3173debeaa5a 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -255,7 +255,8 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
if (IS_ERR(bdev))
return PTR_ERR(bdev);
dif->bdev = bdev;
- dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off);
+ dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
+ NULL, NULL);
}
dif->blocks = le32_to_cpu(dis->blocks);
@@ -720,7 +721,8 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
}
sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
- &sbi->dax_part_off);
+ &sbi->dax_part_off,
+ NULL, NULL);
}
err = erofs_read_superblock(sb);
@@ -812,7 +814,7 @@ static int erofs_release_device_info(int id, void *ptr, void *data)
{
struct erofs_device_info *dif = ptr;
- fs_put_dax(dif->dax_dev);
+ fs_put_dax(dif->dax_dev, NULL);
if (dif->bdev)
blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL);
erofs_fscache_unregister_cookie(&dif->fscache);
@@ -886,7 +888,7 @@ static void erofs_kill_sb(struct super_block *sb)
return;
erofs_free_dev_context(sbi->devs);
- fs_put_dax(sbi->dax_dev);
+ fs_put_dax(sbi->dax_dev, NULL);
erofs_fscache_unregister_cookie(&sbi->s_fscache);
erofs_fscache_unregister_fs(sb);
kfree(sbi->opt.fsid);
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index ec9a1d780dc1..46627cb69abe 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -282,7 +282,7 @@ static struct shrinker erofs_shrinker_info = {
int __init erofs_init_shrinker(void)
{
- return register_shrinker(&erofs_shrinker_info);
+ return register_shrinker(&erofs_shrinker_info, "erofs-shrinker");
}
void erofs_exit_shrinker(void)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index e2daa940ebce..8b56b94e2f56 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1747,6 +1747,21 @@ static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)
return to;
}
+/*
+ * autoremove_wake_function, but remove even on failure to wake up, because we
+ * know that default_wake_function/ttwu will only fail if the thread is already
+ * woken, and in that case the ep_poll loop will remove the entry anyways, not
+ * try to reuse it.
+ */
+static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
+ unsigned int mode, int sync, void *key)
+{
+ int ret = default_wake_function(wq_entry, mode, sync, key);
+
+ list_del_init(&wq_entry->entry);
+ return ret;
+}
+
/**
* ep_poll - Retrieves ready events, and delivers them to the caller-supplied
* event buffer.
@@ -1828,8 +1843,15 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
* normal wakeup path no need to call __remove_wait_queue()
* explicitly, thus ep->lock is not taken, which halts the
* event delivery.
+ *
+ * In fact, we now use an even more aggressive function that
+ * unconditionally removes, because we don't reuse the wait
+ * entry between loop iterations. This lets us also avoid the
+ * performance issue if a process is killed, causing all of its
+ * threads to wake up without being removed normally.
*/
init_wait(&wait);
+ wait.func = ep_autoremove_wake_function;
write_lock_irq(&ep->lock);
/*
diff --git a/fs/exec.c b/fs/exec.c
index 5fd73915c62c..9a5ca7b82bfc 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -584,11 +584,11 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
if (kmapped_page) {
flush_dcache_page(kmapped_page);
- kunmap(kmapped_page);
+ kunmap_local(kaddr);
put_arg_page(kmapped_page);
}
kmapped_page = page;
- kaddr = kmap(kmapped_page);
+ kaddr = kmap_local_page(kmapped_page);
kpos = pos & PAGE_MASK;
flush_arg_page(bprm, kpos, kmapped_page);
}
@@ -602,7 +602,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
out:
if (kmapped_page) {
flush_dcache_page(kmapped_page);
- kunmap(kmapped_page);
+ kunmap_local(kaddr);
put_arg_page(kmapped_page);
}
return ret;
@@ -880,11 +880,11 @@ int transfer_args_to_stack(struct linux_binprm *bprm,
for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
- char *src = kmap(bprm->page[index]) + offset;
+ char *src = kmap_local_page(bprm->page[index]) + offset;
sp -= PAGE_SIZE - offset;
if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
ret = -EFAULT;
- kunmap(bprm->page[index]);
+ kunmap_local(src);
if (ret)
goto out;
}
@@ -1304,6 +1304,9 @@ int begin_new_exec(struct linux_binprm * bprm)
bprm->mm = NULL;
#ifdef CONFIG_POSIX_TIMERS
+ spin_lock_irq(&me->sighand->siglock);
+ posix_cpu_timers_exit(me);
+ spin_unlock_irq(&me->sighand->siglock);
exit_itimers(me);
flush_itimer_signals();
#endif
@@ -1683,13 +1686,13 @@ int remove_arg_zero(struct linux_binprm *bprm)
ret = -EFAULT;
goto out;
}
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_page(page);
for (; offset < PAGE_SIZE && kaddr[offset];
offset++, bprm->p++)
;
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
put_arg_page(page);
} while (offset == PAGE_SIZE);
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index 4a7a2308eb72..a8f8eee4937c 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -27,9 +27,9 @@ enum exfat_error_mode {
* exfat nls lossy flag
*/
enum {
- NLS_NAME_NO_LOSSY, /* no lossy */
- NLS_NAME_LOSSY, /* just detected incorrect filename(s) */
- NLS_NAME_OVERLEN, /* the length is over than its limit */
+ NLS_NAME_NO_LOSSY = 0, /* no lossy */
+ NLS_NAME_LOSSY = 1 << 0, /* just detected incorrect filename(s) */
+ NLS_NAME_OVERLEN = 1 << 1, /* the length is over than its limit */
};
#define EXFAT_HASH_BITS 8
@@ -483,6 +483,7 @@ struct inode *exfat_build_inode(struct super_block *sb,
void exfat_hash_inode(struct inode *inode, loff_t i_pos);
void exfat_unhash_inode(struct inode *inode);
struct inode *exfat_iget(struct super_block *sb, loff_t i_pos);
+int __exfat_write_inode(struct inode *inode, int sync);
int exfat_write_inode(struct inode *inode, struct writeback_control *wbc);
void exfat_evict_inode(struct inode *inode);
int exfat_block_truncate_page(struct inode *inode, loff_t from);
@@ -508,14 +509,16 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
#define exfat_fs_error_ratelimit(sb, fmt, args...) \
__exfat_fs_error(sb, __ratelimit(&EXFAT_SB(sb)->ratelimit), \
fmt, ## args)
-void exfat_msg(struct super_block *sb, const char *lv, const char *fmt, ...)
- __printf(3, 4) __cold;
+
+/* expand to pr_*() with prefix */
#define exfat_err(sb, fmt, ...) \
- exfat_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__)
+ pr_err("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__)
#define exfat_warn(sb, fmt, ...) \
- exfat_msg(sb, KERN_WARNING, fmt, ##__VA_ARGS__)
+ pr_warn("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__)
#define exfat_info(sb, fmt, ...) \
- exfat_msg(sb, KERN_INFO, fmt, ##__VA_ARGS__)
+ pr_info("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__)
+#define exfat_debug(sb, fmt, ...) \
+ pr_debug("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__)
void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
u8 tz, __le16 time, __le16 date, u8 time_cs);
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index 9de6a6b844c9..ee0b7cf51157 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -331,7 +331,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
/* find new cluster */
if (hint_clu == EXFAT_EOF_CLUSTER) {
if (sbi->clu_srch_ptr < EXFAT_FIRST_CLUSTER) {
- exfat_err(sb, "sbi->clu_srch_ptr is invalid (%u)\n",
+ exfat_err(sb, "sbi->clu_srch_ptr is invalid (%u)",
sbi->clu_srch_ptr);
sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
}
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 20d4e47f57ab..4e0793f35e8f 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -101,7 +101,6 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- int evict = (ei->dir.dir == DIR_DELETED) ? 1 : 0;
/* check if the given file ID is opened */
if (ei->type != TYPE_FILE && ei->type != TYPE_DIR)
@@ -149,50 +148,19 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
if (ei->type == TYPE_FILE)
ei->attr |= ATTR_ARCHIVE;
- /* update the directory entry */
- if (!evict) {
- struct timespec64 ts;
- struct exfat_dentry *ep, *ep2;
- struct exfat_entry_set_cache *es;
- int err;
-
- es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
- ES_ALL_ENTRIES);
- if (!es)
- return -EIO;
- ep = exfat_get_dentry_cached(es, 0);
- ep2 = exfat_get_dentry_cached(es, 1);
-
- ts = current_time(inode);
- exfat_set_entry_time(sbi, &ts,
- &ep->dentry.file.modify_tz,
- &ep->dentry.file.modify_time,
- &ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_cs);
- ep->dentry.file.attr = cpu_to_le16(ei->attr);
-
- /* File size should be zero if there is no cluster allocated */
- if (ei->start_clu == EXFAT_EOF_CLUSTER) {
- ep2->dentry.stream.valid_size = 0;
- ep2->dentry.stream.size = 0;
- } else {
- ep2->dentry.stream.valid_size = cpu_to_le64(new_size);
- ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
- }
-
- if (new_size == 0) {
- /* Any directory can not be truncated to zero */
- WARN_ON(ei->type != TYPE_FILE);
-
- ep2->dentry.stream.flags = ALLOC_FAT_CHAIN;
- ep2->dentry.stream.start_clu = EXFAT_FREE_CLUSTER;
- }
-
- exfat_update_dir_chksum_with_entry_set(es);
- err = exfat_free_dentry_set(es, inode_needs_sync(inode));
- if (err)
- return err;
- }
+ /*
+ * update the directory entry
+ *
+ * If the directory entry is updated by mark_inode_dirty(), the
+ * directory entry will be written after a writeback cycle of
+ * updating the bitmap/FAT, which may result in clusters being
+ * freed but referenced by the directory entry in the event of a
+ * sudden power failure.
+ * __exfat_write_inode() is called for directory entry, bitmap
+ * and FAT to be written in a same writeback.
+ */
+ if (__exfat_write_inode(inode, inode_needs_sync(inode)))
+ return -EIO;
/* cut off from the FAT chain */
if (ei->flags == ALLOC_FAT_CHAIN && last_clu != EXFAT_FREE_CLUSTER &&
@@ -243,12 +211,6 @@ void exfat_truncate(struct inode *inode, loff_t size)
if (err)
goto write_size;
- inode->i_ctime = inode->i_mtime = current_time(inode);
- if (IS_DIRSYNC(inode))
- exfat_sync_inode(inode);
- else
- mark_inode_dirty(inode);
-
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >>
inode->i_blkbits;
write_size:
@@ -330,6 +292,12 @@ int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
attr->ia_valid &= ~ATTR_MODE;
}
+ if (attr->ia_valid & ATTR_SIZE)
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+
+ setattr_copy(&init_user_ns, inode, attr);
+ exfat_truncate_atime(&inode->i_atime);
+
if (attr->ia_valid & ATTR_SIZE) {
error = exfat_block_truncate_page(inode, attr->ia_size);
if (error)
@@ -337,13 +305,15 @@ int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
down_write(&EXFAT_I(inode)->truncate_lock);
truncate_setsize(inode, attr->ia_size);
+
+ /*
+ * __exfat_write_inode() is called from exfat_truncate(), inode
+ * is already written by it, so mark_inode_dirty() is unneeded.
+ */
exfat_truncate(inode, attr->ia_size);
up_write(&EXFAT_I(inode)->truncate_lock);
- }
-
- setattr_copy(&init_user_ns, inode, attr);
- exfat_truncate_atime(&inode->i_atime);
- mark_inode_dirty(inode);
+ } else
+ mark_inode_dirty(inode);
out:
return error;
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 0133d385d8e8..a795437b86d0 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -17,7 +17,7 @@
#include "exfat_raw.h"
#include "exfat_fs.h"
-static int __exfat_write_inode(struct inode *inode, int sync)
+int __exfat_write_inode(struct inode *inode, int sync)
{
unsigned long long on_disk_size;
struct exfat_dentry *ep, *ep2;
@@ -75,6 +75,13 @@ static int __exfat_write_inode(struct inode *inode, int sync)
ep2->dentry.stream.valid_size = cpu_to_le64(on_disk_size);
ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
+ if (on_disk_size) {
+ ep2->dentry.stream.flags = ei->flags;
+ ep2->dentry.stream.start_clu = cpu_to_le32(ei->start_clu);
+ } else {
+ ep2->dentry.stream.flags = ALLOC_FAT_CHAIN;
+ ep2->dentry.stream.start_clu = EXFAT_FREE_CLUSTER;
+ }
exfat_update_dir_chksum_with_entry_set(es);
return exfat_free_dentry_set(es, sync);
@@ -105,7 +112,7 @@ void exfat_sync_inode(struct inode *inode)
static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
unsigned int *clu, int create)
{
- int ret, modified = false;
+ int ret;
unsigned int last_clu;
struct exfat_chain new_clu;
struct super_block *sb = inode->i_sb;
@@ -196,7 +203,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
if (new_clu.flags == ALLOC_FAT_CHAIN)
ei->flags = ALLOC_FAT_CHAIN;
ei->start_clu = new_clu.dir;
- modified = true;
} else {
if (new_clu.flags != ei->flags) {
/* no-fat-chain bit is disabled,
@@ -206,7 +212,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
exfat_chain_cont_cluster(sb, ei->start_clu,
num_clusters);
ei->flags = ALLOC_FAT_CHAIN;
- modified = true;
}
if (new_clu.flags == ALLOC_FAT_CHAIN)
if (exfat_ent_set(sb, last_clu, new_clu.dir))
@@ -216,33 +221,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
num_clusters += num_to_be_allocated;
*clu = new_clu.dir;
- if (ei->dir.dir != DIR_DELETED && modified) {
- struct exfat_dentry *ep;
- struct exfat_entry_set_cache *es;
- int err;
-
- es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
- ES_ALL_ENTRIES);
- if (!es)
- return -EIO;
- /* get stream entry */
- ep = exfat_get_dentry_cached(es, 1);
-
- /* update directory entry */
- ep->dentry.stream.flags = ei->flags;
- ep->dentry.stream.start_clu =
- cpu_to_le32(ei->start_clu);
- ep->dentry.stream.valid_size =
- cpu_to_le64(i_size_read(inode));
- ep->dentry.stream.size =
- ep->dentry.stream.valid_size;
-
- exfat_update_dir_chksum_with_entry_set(es);
- err = exfat_free_dentry_set(es, inode_needs_sync(inode));
- if (err)
- return err;
- } /* end of if != DIR_DELETED */
-
inode->i_blocks +=
num_to_be_allocated << sbi->sect_per_clus_bits;
@@ -384,6 +362,7 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
if (to > i_size_read(inode)) {
truncate_pagecache(inode, i_size_read(inode));
+ inode->i_mtime = inode->i_ctime = current_time(inode);
exfat_truncate(inode, EXFAT_I(inode)->i_size_aligned);
}
}
diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c
index 9380e0188b55..2e1a1a6b1021 100644
--- a/fs/exfat/misc.c
+++ b/fs/exfat/misc.c
@@ -46,23 +46,6 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
}
}
-/*
- * exfat_msg() - print preformated EXFAT specific messages.
- * All logs except what uses exfat_fs_error() should be written by exfat_msg()
- */
-void exfat_msg(struct super_block *sb, const char *level, const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
- /* level means KERN_ pacility level */
- printk("%sexFAT-fs (%s): %pV\n", level, sb->s_id, &vaf);
- va_end(args);
-}
-
#define SECS_PER_MIN (60)
#define TIMEZONE_SEC(x) ((x) * 15 * SECS_PER_MIN)
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index c6eaf7e9ea74..b617bebc3d0f 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -318,7 +318,6 @@ static int exfat_find_empty_entry(struct inode *inode,
unsigned int ret, last_clu;
loff_t size = 0;
struct exfat_chain clu;
- struct exfat_dentry *ep = NULL;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
@@ -383,25 +382,6 @@ static int exfat_find_empty_entry(struct inode *inode,
p_dir->size++;
size = EXFAT_CLU_TO_B(p_dir->size, sbi);
- /* update the directory entry */
- if (p_dir->dir != sbi->root_dir) {
- struct buffer_head *bh;
-
- ep = exfat_get_dentry(sb,
- &(ei->dir), ei->entry + 1, &bh);
- if (!ep)
- return -EIO;
-
- ep->dentry.stream.valid_size = cpu_to_le64(size);
- ep->dentry.stream.size = ep->dentry.stream.valid_size;
- ep->dentry.stream.flags = p_dir->flags;
- exfat_update_bh(bh, IS_DIRSYNC(inode));
- brelse(bh);
- if (exfat_update_dir_chksum(inode, &(ei->dir),
- ei->entry))
- return -EIO;
- }
-
/* directory inode should be updated in here */
i_size_write(inode, size);
ei->i_size_ondisk += sbi->cluster_size;
@@ -462,7 +442,7 @@ static int __exfat_resolve_path(struct inode *inode, const unsigned char *path,
return namelen; /* return error value */
if ((lossy && !lookup) || !namelen)
- return -EINVAL;
+ return (lossy & NLS_NAME_OVERLEN) ? -ENAMETOOLONG : -EINVAL;
exfat_chain_set(p_dir, ei->start_clu,
EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags);
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index ef115e673406..705710f93e2d 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -509,7 +509,7 @@ static int exfat_utf8_to_utf16(struct super_block *sb,
}
if (unilen > MAX_NAME_LENGTH) {
- exfat_err(sb, "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d",
+ exfat_debug(sb, "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d",
__func__, len, unilen, MAX_NAME_LENGTH);
return -ENAMETOOLONG;
}
@@ -671,7 +671,7 @@ static int exfat_load_upcase_table(struct super_block *sb,
bh = sb_bread(sb, sector);
if (!bh) {
- exfat_err(sb, "failed to read sector(0x%llx)\n",
+ exfat_err(sb, "failed to read sector(0x%llx)",
(unsigned long long)sector);
ret = -EIO;
goto free_table;
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 6a4dfe9f31ee..35f0305cd493 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -464,7 +464,7 @@ static int exfat_read_boot_sector(struct super_block *sb)
*/
if (p_boot->sect_size_bits < EXFAT_MIN_SECT_SIZE_BITS ||
p_boot->sect_size_bits > EXFAT_MAX_SECT_SIZE_BITS) {
- exfat_err(sb, "bogus sector size bits : %u\n",
+ exfat_err(sb, "bogus sector size bits : %u",
p_boot->sect_size_bits);
return -EINVAL;
}
@@ -473,7 +473,7 @@ static int exfat_read_boot_sector(struct super_block *sb)
* sect_per_clus_bits could be at least 0 and at most 25 - sect_size_bits.
*/
if (p_boot->sect_per_clus_bits > EXFAT_MAX_SECT_PER_CLUS_BITS(p_boot)) {
- exfat_err(sb, "bogus sectors bits per cluster : %u\n",
+ exfat_err(sb, "bogus sectors bits per cluster : %u",
p_boot->sect_per_clus_bits);
return -EINVAL;
}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 27a0a8c74f7a..252c742379cf 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -171,7 +171,7 @@ static void ext2_put_super (struct super_block * sb)
brelse (sbi->s_sbh);
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
- fs_put_dax(sbi->s_daxdev);
+ fs_put_dax(sbi->s_daxdev, NULL);
kfree(sbi);
}
@@ -833,7 +833,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
}
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
- sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off);
+ sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off,
+ NULL, NULL);
spin_lock_init(&sbi->s_lock);
ret = -EINVAL;
@@ -1210,7 +1211,7 @@ failed_mount_group_desc:
failed_mount:
brelse(bh);
failed_sbi:
- fs_put_dax(sbi->s_daxdev);
+ fs_put_dax(sbi->s_daxdev, NULL);
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 841fa6d9d744..641abfa4b718 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -517,36 +517,36 @@ bad_block:
/* Here we know that we can set the new attribute. */
if (header) {
- /* assert(header == HDR(bh)); */
+ int offset;
+
lock_buffer(bh);
if (header->h_refcount == cpu_to_le32(1)) {
__u32 hash = le32_to_cpu(header->h_hash);
+ struct mb_cache_entry *oe;
- ea_bdebug(bh, "modifying in-place");
+ oe = mb_cache_entry_delete_or_get(EA_BLOCK_CACHE(inode),
+ hash, bh->b_blocknr);
+ if (!oe) {
+ ea_bdebug(bh, "modifying in-place");
+ goto update_block;
+ }
/*
- * This must happen under buffer lock for
- * ext2_xattr_set2() to reliably detect modified block
+ * Someone is trying to reuse the block, leave it alone
*/
- mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
- bh->b_blocknr);
-
- /* keep the buffer locked while modifying it. */
- } else {
- int offset;
-
- unlock_buffer(bh);
- ea_bdebug(bh, "cloning");
- header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL);
- error = -ENOMEM;
- if (header == NULL)
- goto cleanup;
- header->h_refcount = cpu_to_le32(1);
-
- offset = (char *)here - bh->b_data;
- here = ENTRY((char *)header + offset);
- offset = (char *)last - bh->b_data;
- last = ENTRY((char *)header + offset);
+ mb_cache_entry_put(EA_BLOCK_CACHE(inode), oe);
}
+ unlock_buffer(bh);
+ ea_bdebug(bh, "cloning");
+ header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL);
+ error = -ENOMEM;
+ if (header == NULL)
+ goto cleanup;
+ header->h_refcount = cpu_to_le32(1);
+
+ offset = (char *)here - bh->b_data;
+ here = ENTRY((char *)header + offset);
+ offset = (char *)last - bh->b_data;
+ last = ENTRY((char *)header + offset);
} else {
/* Allocate a buffer where we construct the new block. */
header = kzalloc(sb->s_blocksize, GFP_KERNEL);
@@ -559,6 +559,7 @@ bad_block:
last = here = ENTRY(header+1);
}
+update_block:
/* Iff we are modifying the block in-place, bh is locked here. */
if (not_found) {
@@ -651,6 +652,55 @@ cleanup:
return error;
}
+static void ext2_xattr_release_block(struct inode *inode,
+ struct buffer_head *bh)
+{
+ struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
+
+retry_ref:
+ lock_buffer(bh);
+ if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
+ __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
+ struct mb_cache_entry *oe;
+
+ /*
+ * This must happen under buffer lock to properly
+ * serialize with ext2_xattr_set() reusing the block.
+ */
+ oe = mb_cache_entry_delete_or_get(ea_block_cache, hash,
+ bh->b_blocknr);
+ if (oe) {
+ /*
+ * Someone is trying to reuse the block. Wait
+ * and retry.
+ */
+ unlock_buffer(bh);
+ mb_cache_entry_wait_unused(oe);
+ mb_cache_entry_put(ea_block_cache, oe);
+ goto retry_ref;
+ }
+
+ /* Free the old block. */
+ ea_bdebug(bh, "freeing");
+ ext2_free_blocks(inode, bh->b_blocknr, 1);
+ /* We let our caller release bh, so we
+ * need to duplicate the buffer before. */
+ get_bh(bh);
+ bforget(bh);
+ unlock_buffer(bh);
+ } else {
+ /* Decrement the refcount only. */
+ le32_add_cpu(&HDR(bh)->h_refcount, -1);
+ dquot_free_block(inode, 1);
+ mark_buffer_dirty(bh);
+ unlock_buffer(bh);
+ ea_bdebug(bh, "refcount now=%d",
+ le32_to_cpu(HDR(bh)->h_refcount));
+ if (IS_SYNC(inode))
+ sync_dirty_buffer(bh);
+ }
+}
+
/*
* Second half of ext2_xattr_set(): Update the file system.
*/
@@ -747,34 +797,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
* If there was an old block and we are no longer using it,
* release the old block.
*/
- lock_buffer(old_bh);
- if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
- __u32 hash = le32_to_cpu(HDR(old_bh)->h_hash);
-
- /*
- * This must happen under buffer lock for
- * ext2_xattr_set2() to reliably detect freed block
- */
- mb_cache_entry_delete(ea_block_cache, hash,
- old_bh->b_blocknr);
- /* Free the old block. */
- ea_bdebug(old_bh, "freeing");
- ext2_free_blocks(inode, old_bh->b_blocknr, 1);
- mark_inode_dirty(inode);
- /* We let our caller release old_bh, so we
- * need to duplicate the buffer before. */
- get_bh(old_bh);
- bforget(old_bh);
- } else {
- /* Decrement the refcount only. */
- le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
- dquot_free_block_nodirty(inode, 1);
- mark_inode_dirty(inode);
- mark_buffer_dirty(old_bh);
- ea_bdebug(old_bh, "refcount now=%d",
- le32_to_cpu(HDR(old_bh)->h_refcount));
- }
- unlock_buffer(old_bh);
+ ext2_xattr_release_block(inode, old_bh);
}
cleanup:
@@ -828,30 +851,7 @@ ext2_xattr_delete_inode(struct inode *inode)
EXT2_I(inode)->i_file_acl);
goto cleanup;
}
- lock_buffer(bh);
- if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
- __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
-
- /*
- * This must happen under buffer lock for ext2_xattr_set2() to
- * reliably detect freed block
- */
- mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
- bh->b_blocknr);
- ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
- get_bh(bh);
- bforget(bh);
- unlock_buffer(bh);
- } else {
- le32_add_cpu(&HDR(bh)->h_refcount, -1);
- ea_bdebug(bh, "refcount now=%d",
- le32_to_cpu(HDR(bh)->h_refcount));
- unlock_buffer(bh);
- mark_buffer_dirty(bh);
- if (IS_SYNC(inode))
- sync_dirty_buffer(bh);
- dquot_free_block_nodirty(inode, 1);
- }
+ ext2_xattr_release_block(inode, bh);
EXT2_I(inode)->i_file_acl = 0;
cleanup:
@@ -943,7 +943,7 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
if (!header->h_hash)
return NULL; /* never share */
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
-again:
+
ce = mb_cache_entry_find_first(ea_block_cache, hash);
while (ce) {
struct buffer_head *bh;
@@ -955,22 +955,8 @@ again:
inode->i_ino, (unsigned long) ce->e_value);
} else {
lock_buffer(bh);
- /*
- * We have to be careful about races with freeing or
- * rehashing of xattr block. Once we hold buffer lock
- * xattr block's state is stable so we can check
- * whether the block got freed / rehashed or not.
- * Since we unhash mbcache entry under buffer lock when
- * freeing / rehashing xattr block, checking whether
- * entry is still hashed is reliable.
- */
- if (hlist_bl_unhashed(&ce->e_hash_list)) {
- mb_cache_entry_put(ea_block_cache, ce);
- unlock_buffer(bh);
- brelse(bh);
- goto again;
- } else if (le32_to_cpu(HDR(bh)->h_refcount) >
- EXT2_XATTR_REFCOUNT_MAX) {
+ if (le32_to_cpu(HDR(bh)->h_refcount) >
+ EXT2_XATTR_REFCOUNT_MAX) {
ea_idebug(inode, "block %ld refcount %d>%d",
(unsigned long) ce->e_value,
le32_to_cpu(HDR(bh)->h_refcount),
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 78ee3ef795ae..8ff4b9192a9f 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -666,7 +666,7 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
* it's possible we've just missed a transaction commit here,
* so ignore the returned status
*/
- jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
+ ext4_debug("%s: retrying operation after ENOSPC\n", sb->s_id);
(void) jbd2_journal_force_commit_nested(sbi->s_journal);
return 1;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 29fc575a4eb6..9bca5565547b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -724,6 +724,8 @@ enum {
#define EXT4_IOC_GETSTATE _IOW('f', 41, __u32)
#define EXT4_IOC_GET_ES_CACHE _IOWR('f', 42, struct fiemap)
#define EXT4_IOC_CHECKPOINT _IOW('f', 43, __u32)
+#define EXT4_IOC_GETFSUUID _IOR('f', 44, struct fsuuid)
+#define EXT4_IOC_SETFSUUID _IOW('f', 44, struct fsuuid)
#define EXT4_IOC_SHUTDOWN _IOR ('X', 125, __u32)
@@ -753,6 +755,15 @@ enum {
EXT4_IOC_CHECKPOINT_FLAG_ZEROOUT | \
EXT4_IOC_CHECKPOINT_FLAG_DRY_RUN)
+/*
+ * Structure for EXT4_IOC_GETFSUUID/EXT4_IOC_SETFSUUID
+ */
+struct fsuuid {
+ __u32 fsu_len;
+ __u32 fsu_flags;
+ __u8 fsu_uuid[];
+};
+
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
* ioctl commands in 32 bit emulation
@@ -3016,7 +3027,7 @@ int ext4_fileattr_set(struct user_namespace *mnt_userns,
struct dentry *dentry, struct fileattr *fa);
int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa);
extern void ext4_reset_inode_seed(struct inode *inode);
-int ext4_update_overhead(struct super_block *sb);
+int ext4_update_overhead(struct super_block *sb, bool force);
/* migrate.c */
extern int ext4_ext_migrate(struct inode *);
@@ -3583,6 +3594,7 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
extern int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
int *has_inline, __u64 start, __u64 len);
+extern void *ext4_read_inline_link(struct inode *inode);
struct iomap;
extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap);
@@ -3799,7 +3811,7 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
extern int ext4_resize_begin(struct super_block *sb);
-extern void ext4_resize_end(struct super_block *sb);
+extern int ext4_resize_end(struct super_block *sb, bool update_backups);
static inline void ext4_set_io_unwritten_flag(struct inode *inode,
struct ext4_io_end *io_end)
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 3477a16d08ae..8e1fb18f465e 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -267,8 +267,7 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
trace_ext4_forget(inode, is_metadata, blocknr);
BUFFER_TRACE(bh, "enter");
- jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
- "data mode %x\n",
+ ext4_debug("forgetting bh %p: is_metadata=%d, mode %o, data mode %x\n",
bh, is_metadata, inode->i_mode,
test_opt(inode->i_sb, DATA_FLAGS));
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 9a3a8996aacf..23167efda95e 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -1654,7 +1654,8 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
sbi->s_es_shrinker.scan_objects = ext4_es_scan;
sbi->s_es_shrinker.count_objects = ext4_es_count;
sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
- err = register_shrinker(&sbi->s_es_shrinker);
+ err = register_shrinker(&sbi->s_es_shrinker, "ext4-es:%s",
+ sbi->s_sb->s_id);
if (err)
goto err4;
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index eb4c8ad1bb61..2af962cbb835 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -917,8 +917,8 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
mutex_unlock(&ei->i_fc_lock);
cur_lblk_off = old_blk_size;
- jbd_debug(1, "%s: will try writing %d to %d for inode %ld\n",
- __func__, cur_lblk_off, new_blk_size, inode->i_ino);
+ ext4_debug("will try writing %d to %d for inode %ld\n",
+ cur_lblk_off, new_blk_size, inode->i_ino);
while (cur_lblk_off <= new_blk_size) {
map.m_lblk = cur_lblk_off;
@@ -1168,7 +1168,7 @@ static void ext4_fc_update_stats(struct super_block *sb, int status,
{
struct ext4_fc_stats *stats = &EXT4_SB(sb)->s_fc_stats;
- jbd_debug(1, "Fast commit ended with status = %d for tid %u",
+ ext4_debug("Fast commit ended with status = %d for tid %u",
status, commit_tid);
if (status == EXT4_FC_STATUS_OK) {
stats->fc_num_commits++;
@@ -1375,14 +1375,14 @@ static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode %d not found", darg.ino);
+ ext4_debug("Inode %d not found", darg.ino);
return 0;
}
old_parent = ext4_iget(sb, darg.parent_ino,
EXT4_IGET_NORMAL);
if (IS_ERR(old_parent)) {
- jbd_debug(1, "Dir with inode %d not found", darg.parent_ino);
+ ext4_debug("Dir with inode %d not found", darg.parent_ino);
iput(inode);
return 0;
}
@@ -1407,21 +1407,21 @@ static int ext4_fc_replay_link_internal(struct super_block *sb,
dir = ext4_iget(sb, darg->parent_ino, EXT4_IGET_NORMAL);
if (IS_ERR(dir)) {
- jbd_debug(1, "Dir with inode %d not found.", darg->parent_ino);
+ ext4_debug("Dir with inode %d not found.", darg->parent_ino);
dir = NULL;
goto out;
}
dentry_dir = d_obtain_alias(dir);
if (IS_ERR(dentry_dir)) {
- jbd_debug(1, "Failed to obtain dentry");
+ ext4_debug("Failed to obtain dentry");
dentry_dir = NULL;
goto out;
}
dentry_inode = d_alloc(dentry_dir, &qstr_dname);
if (!dentry_inode) {
- jbd_debug(1, "Inode dentry not created.");
+ ext4_debug("Inode dentry not created.");
ret = -ENOMEM;
goto out;
}
@@ -1434,7 +1434,7 @@ static int ext4_fc_replay_link_internal(struct super_block *sb,
* could complete.
*/
if (ret && ret != -EEXIST) {
- jbd_debug(1, "Failed to link\n");
+ ext4_debug("Failed to link\n");
goto out;
}
@@ -1468,7 +1468,7 @@ static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode not found.");
+ ext4_debug("Inode not found.");
return 0;
}
@@ -1576,7 +1576,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
/* Given that we just wrote the inode on disk, this SHOULD succeed. */
inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode not found.");
+ ext4_debug("Inode not found.");
return -EFSCORRUPTED;
}
@@ -1630,7 +1630,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "inode %d not found.", darg.ino);
+ ext4_debug("inode %d not found.", darg.ino);
inode = NULL;
ret = -EINVAL;
goto out;
@@ -1643,7 +1643,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
*/
dir = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL);
if (IS_ERR(dir)) {
- jbd_debug(1, "Dir %d not found.", darg.ino);
+ ext4_debug("Dir %d not found.", darg.ino);
goto out;
}
ret = ext4_init_new_dir(NULL, dir, inode);
@@ -1727,7 +1727,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode not found.");
+ ext4_debug("Inode not found.");
return 0;
}
@@ -1741,7 +1741,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
cur = start;
remaining = len;
- jbd_debug(1, "ADD_RANGE, lblk %d, pblk %lld, len %d, unwritten %d, inode %ld\n",
+ ext4_debug("ADD_RANGE, lblk %d, pblk %lld, len %d, unwritten %d, inode %ld\n",
start, start_pblk, len, ext4_ext_is_unwritten(ex),
inode->i_ino);
@@ -1802,7 +1802,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
}
/* Range is mapped and needs a state change */
- jbd_debug(1, "Converting from %ld to %d %lld",
+ ext4_debug("Converting from %ld to %d %lld",
map.m_flags & EXT4_MAP_UNWRITTEN,
ext4_ext_is_unwritten(ex), map.m_pblk);
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
@@ -1845,7 +1845,7 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino));
+ ext4_debug("Inode %d not found", le32_to_cpu(lrange.fc_ino));
return 0;
}
@@ -1853,7 +1853,7 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
if (ret)
goto out;
- jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
+ ext4_debug("DEL_RANGE, inode %ld, lblk %d, len %d\n",
inode->i_ino, le32_to_cpu(lrange.fc_lblk),
le32_to_cpu(lrange.fc_len));
while (remaining > 0) {
@@ -1902,7 +1902,7 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
inode = ext4_iget(sb, state->fc_modified_inodes[i],
EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode %d not found.",
+ ext4_debug("Inode %d not found.",
state->fc_modified_inodes[i]);
continue;
}
@@ -2031,7 +2031,7 @@ static int ext4_fc_replay_scan(journal_t *journal,
for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
memcpy(&tl, cur, sizeof(tl));
val = cur + sizeof(tl);
- jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
+ ext4_debug("Scan phase, tag:%s, blk %lld\n",
tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr);
switch (le16_to_cpu(tl.fc_tag)) {
case EXT4_FC_TAG_ADD_RANGE:
@@ -2126,7 +2126,7 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
sbi->s_mount_state |= EXT4_FC_REPLAY;
}
if (!sbi->s_fc_replay_state.fc_replay_num_tags) {
- jbd_debug(1, "Replay stops\n");
+ ext4_debug("Replay stops\n");
ext4_fc_set_bitmaps_and_counters(sb);
return 0;
}
@@ -2150,7 +2150,7 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
ext4_fc_set_bitmaps_and_counters(sb);
break;
}
- jbd_debug(3, "Replay phase, tag:%s\n",
+ ext4_debug("Replay phase, tag:%s\n",
tag2str(le16_to_cpu(tl.fc_tag)));
state->fc_replay_num_tags--;
switch (le16_to_cpu(tl.fc_tag)) {
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 07a8c75b65ed..860fc5119009 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -460,7 +460,7 @@ static int ext4_splice_branch(handle_t *handle,
* the new i_size. But that is not done here - it is done in
* generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
*/
- jbd_debug(5, "splicing indirect only\n");
+ ext4_debug("splicing indirect only\n");
BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
if (err)
@@ -472,7 +472,7 @@ static int ext4_splice_branch(handle_t *handle,
err = ext4_mark_inode_dirty(handle, ar->inode);
if (unlikely(err))
goto err_out;
- jbd_debug(5, "splicing direct\n");
+ ext4_debug("splicing direct\n");
}
return err;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index cff52ff6549d..a4fbe825694b 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -6,6 +6,7 @@
#include <linux/iomap.h>
#include <linux/fiemap.h>
+#include <linux/namei.h>
#include <linux/iversion.h>
#include <linux/sched/mm.h>
@@ -35,6 +36,9 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
struct ext4_inode *raw_inode;
int free, min_offs;
+ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
+ return 0;
+
min_offs = EXT4_SB(inode->i_sb)->s_inode_size -
EXT4_GOOD_OLD_INODE_SIZE -
EXT4_I(inode)->i_extra_isize -
@@ -1588,6 +1592,35 @@ out:
return ret;
}
+void *ext4_read_inline_link(struct inode *inode)
+{
+ struct ext4_iloc iloc;
+ int ret, inline_size;
+ void *link;
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = -ENOMEM;
+ inline_size = ext4_get_inline_size(inode);
+ link = kmalloc(inline_size + 1, GFP_NOFS);
+ if (!link)
+ goto out;
+
+ ret = ext4_read_inline_data(inode, link, inline_size, &iloc);
+ if (ret < 0) {
+ kfree(link);
+ goto out;
+ }
+ nd_terminate_link(link, inode->i_size, ret);
+out:
+ if (ret < 0)
+ link = ERR_PTR(ret);
+ brelse(iloc.bh);
+ return link;
+}
+
struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
struct ext4_dir_entry_2 **parent_de,
int *retval)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9fd60fc8ba4c..601214453c3a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -177,6 +177,8 @@ void ext4_evict_inode(struct inode *inode)
trace_ext4_evict_inode(inode);
+ if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
+ ext4_evict_ea_inode(inode);
if (inode->i_nlink) {
/*
* When journalling data dirty buffers are tracked only in the
@@ -1571,7 +1573,14 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
ext4_lblk_t start, last;
start = index << (PAGE_SHIFT - inode->i_blkbits);
last = end << (PAGE_SHIFT - inode->i_blkbits);
+
+ /*
+ * avoid racing with extent status tree scans made by
+ * ext4_insert_delayed_block()
+ */
+ down_write(&EXT4_I(inode)->i_data_sem);
ext4_es_remove_extent(inode, start, last - start + 1);
+ up_write(&EXT4_I(inode)->i_data_sem);
}
folio_batch_init(&fbatch);
@@ -3142,13 +3151,15 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
journal_t *journal;
+ sector_t ret = 0;
int err;
+ inode_lock_shared(inode);
/*
* We can get here for an inline file via the FIBMAP ioctl
*/
if (ext4_has_inline_data(inode))
- return 0;
+ goto out;
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
test_opt(inode->i_sb, DELALLOC)) {
@@ -3187,10 +3198,14 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
jbd2_journal_unlock_updates(journal);
if (err)
- return 0;
+ goto out;
}
- return iomap_bmap(mapping, block, &ext4_iomap_ops);
+ ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
+
+out:
+ inode_unlock_shared(inode);
+ return ret;
}
static int ext4_read_folio(struct file *file, struct folio *folio)
@@ -4687,8 +4702,7 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
__le32 *magic = (void *)raw_inode +
EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
- if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
- EXT4_INODE_SIZE(inode->i_sb) &&
+ if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
return ext4_find_inline_data_nolock(inode);
@@ -5215,7 +5229,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
if (EXT4_SB(inode->i_sb)->s_journal) {
if (ext4_journal_current_handle()) {
- jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
+ ext4_debug("called recursively, non-PF_MEMALLOC!\n");
dump_stack();
return -EIO;
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index cb01c1da0f9d..3cf3ec4b1c21 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/iversion.h>
#include <linux/fileattr.h>
+#include <linux/uuid.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include <linux/fsmap.h>
@@ -41,6 +42,15 @@ static void ext4_sb_setlabel(struct ext4_super_block *es, const void *arg)
memcpy(es->s_volume_name, (char *)arg, EXT4_LABEL_MAX);
}
+/*
+ * Superblock modification callback function for changing file system
+ * UUID.
+ */
+static void ext4_sb_setuuid(struct ext4_super_block *es, const void *arg)
+{
+ memcpy(es->s_uuid, (__u8 *)arg, UUID_SIZE);
+}
+
static
int ext4_update_primary_sb(struct super_block *sb, handle_t *handle,
ext4_update_sb_callback func,
@@ -944,7 +954,9 @@ static long ext4_ioctl_group_add(struct file *file,
test_opt(sb, INIT_INODE_TABLE))
err = ext4_register_li_request(sb, input->group);
group_add_out:
- ext4_resize_end(sb);
+ err2 = ext4_resize_end(sb, false);
+ if (err == 0)
+ err = err2;
return err;
}
@@ -1131,6 +1143,73 @@ static int ext4_ioctl_getlabel(struct ext4_sb_info *sbi, char __user *user_label
return 0;
}
+static int ext4_ioctl_getuuid(struct ext4_sb_info *sbi,
+ struct fsuuid __user *ufsuuid)
+{
+ struct fsuuid fsuuid;
+ __u8 uuid[UUID_SIZE];
+
+ if (copy_from_user(&fsuuid, ufsuuid, sizeof(fsuuid)))
+ return -EFAULT;
+
+ if (fsuuid.fsu_len == 0) {
+ fsuuid.fsu_len = UUID_SIZE;
+ if (copy_to_user(ufsuuid, &fsuuid, sizeof(fsuuid.fsu_len)))
+ return -EFAULT;
+ return -EINVAL;
+ }
+
+ if (fsuuid.fsu_len != UUID_SIZE || fsuuid.fsu_flags != 0)
+ return -EINVAL;
+
+ lock_buffer(sbi->s_sbh);
+ memcpy(uuid, sbi->s_es->s_uuid, UUID_SIZE);
+ unlock_buffer(sbi->s_sbh);
+
+ if (copy_to_user(&ufsuuid->fsu_uuid[0], uuid, UUID_SIZE))
+ return -EFAULT;
+ return 0;
+}
+
+static int ext4_ioctl_setuuid(struct file *filp,
+ const struct fsuuid __user *ufsuuid)
+{
+ int ret = 0;
+ struct super_block *sb = file_inode(filp)->i_sb;
+ struct fsuuid fsuuid;
+ __u8 uuid[UUID_SIZE];
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * If any checksums (group descriptors or metadata) are being used
+ * then the checksum seed feature is required to change the UUID.
+ */
+ if (((ext4_has_feature_gdt_csum(sb) || ext4_has_metadata_csum(sb))
+ && !ext4_has_feature_csum_seed(sb))
+ || ext4_has_feature_stable_inodes(sb))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&fsuuid, ufsuuid, sizeof(fsuuid)))
+ return -EFAULT;
+
+ if (fsuuid.fsu_len != UUID_SIZE || fsuuid.fsu_flags != 0)
+ return -EINVAL;
+
+ if (copy_from_user(uuid, &ufsuuid->fsu_uuid[0], UUID_SIZE))
+ return -EFAULT;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ ret = ext4_update_superblocks_fn(sb, ext4_sb_setuuid, &uuid);
+ mnt_drop_write_file(filp);
+
+ return ret;
+}
+
static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -1223,7 +1302,9 @@ setversion_out:
err = err2;
mnt_drop_write_file(filp);
group_extend_out:
- ext4_resize_end(sb);
+ err2 = ext4_resize_end(sb, false);
+ if (err == 0)
+ err = err2;
return err;
}
@@ -1371,7 +1452,9 @@ mext_out:
err = ext4_register_li_request(sb, o_group);
resizefs_out:
- ext4_resize_end(sb);
+ err2 = ext4_resize_end(sb, true);
+ if (err == 0)
+ err = err2;
return err;
}
@@ -1509,6 +1592,10 @@ resizefs_out:
return ext4_ioctl_setlabel(filp,
(const void __user *)arg);
+ case EXT4_IOC_GETFSUUID:
+ return ext4_ioctl_getuuid(EXT4_SB(sb), (void __user *)arg);
+ case EXT4_IOC_SETFSUUID:
+ return ext4_ioctl_setuuid(filp, (const void __user *)arg);
default:
return -ENOTTY;
}
@@ -1586,6 +1673,8 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case EXT4_IOC_CHECKPOINT:
case FS_IOC_GETFSLABEL:
case FS_IOC_SETFSLABEL:
+ case EXT4_IOC_GETFSUUID:
+ case EXT4_IOC_SETFSUUID:
break;
default:
return -ENOIOCTLCMD;
@@ -1599,13 +1688,15 @@ static void set_overhead(struct ext4_super_block *es, const void *arg)
es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
}
-int ext4_update_overhead(struct super_block *sb)
+int ext4_update_overhead(struct super_block *sb, bool force)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (sb_rdonly(sb) || sbi->s_overhead == 0 ||
- sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters))
+ if (sb_rdonly(sb))
+ return 0;
+ if (!force &&
+ (sbi->s_overhead == 0 ||
+ sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters)))
return 0;
-
return ext4_update_superblocks_fn(sb, set_overhead, &sbi->s_overhead);
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9e06334771a3..bd8f8b5c3d30 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1933,6 +1933,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
unsigned ret = 0;
int len0 = len;
void *buddy;
+ bool split = false;
BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
BUG_ON(e4b->bd_group != ex->fe_group);
@@ -1957,12 +1958,16 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
/* let's maintain buddy itself */
while (len) {
- ord = mb_find_order_for_block(e4b, start);
+ if (!split)
+ ord = mb_find_order_for_block(e4b, start);
if (((start >> ord) << ord) == start && len >= (1 << ord)) {
/* the whole chunk may be allocated at once! */
mlen = 1 << ord;
- buddy = mb_find_buddy(e4b, ord, &max);
+ if (!split)
+ buddy = mb_find_buddy(e4b, ord, &max);
+ else
+ split = false;
BUG_ON((start >> ord) >= max);
mb_set_bit(start >> ord, buddy);
e4b->bd_info->bb_counters[ord]--;
@@ -1989,6 +1994,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
mb_clear_bit(cur + 1, buddy);
e4b->bd_info->bb_counters[ord]++;
e4b->bd_info->bb_counters[ord]++;
+ split = true;
}
mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
@@ -5928,6 +5934,15 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
sbi = EXT4_SB(sb);
+ if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
+ !ext4_inode_block_valid(inode, block, count)) {
+ ext4_error(sb, "Freeing blocks in system zone - "
+ "Block = %llu, count = %lu", block, count);
+ /* err = 0. ext4_std_error should be a no op */
+ goto error_return;
+ }
+ flags |= EXT4_FREE_BLOCKS_VALIDATED;
+
do_more:
overflow = 0;
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
@@ -5944,6 +5959,8 @@ do_more:
overflow = EXT4_C2B(sbi, bit) + count -
EXT4_BLOCKS_PER_GROUP(sb);
count -= overflow;
+ /* The range changed so it's no longer validated */
+ flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
}
count_clusters = EXT4_NUM_B2C(sbi, count);
bitmap_bh = ext4_read_block_bitmap(sb, block_group);
@@ -5958,7 +5975,8 @@ do_more:
goto error_return;
}
- if (!ext4_inode_block_valid(inode, block, count)) {
+ if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
+ !ext4_inode_block_valid(inode, block, count)) {
ext4_error(sb, "Freeing blocks in system zone - "
"Block = %llu, count = %lu", block, count);
/* err = 0. ext4_std_error should be a no op */
@@ -6081,6 +6099,8 @@ do_more:
block += count;
count = overflow;
put_bh(bitmap_bh);
+ /* The range changed so it's no longer validated */
+ flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
goto do_more;
}
error_return:
@@ -6127,6 +6147,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
"block = %llu, count = %lu", block, count);
return;
}
+ flags |= EXT4_FREE_BLOCKS_VALIDATED;
ext4_debug("freeing block %llu\n", block);
trace_ext4_free_blocks(inode, block, count, flags);
@@ -6158,6 +6179,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
block -= overflow;
count += overflow;
}
+ /* The range changed so it's no longer validated */
+ flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
}
overflow = EXT4_LBLK_COFF(sbi, count);
if (overflow) {
@@ -6168,6 +6191,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
return;
} else
count += sbi->s_cluster_ratio - overflow;
+ /* The range changed so it's no longer validated */
+ flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
}
if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 42f590518b4c..54e7d3c95fd7 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -417,7 +417,7 @@ int ext4_ext_migrate(struct inode *inode)
struct inode *tmp_inode = NULL;
struct migrate_struct lb;
unsigned long max_entries;
- __u32 goal;
+ __u32 goal, tmp_csum_seed;
uid_t owner[2];
/*
@@ -465,6 +465,7 @@ int ext4_ext_migrate(struct inode *inode)
* the migration.
*/
ei = EXT4_I(inode);
+ tmp_csum_seed = EXT4_I(tmp_inode)->i_csum_seed;
EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed;
i_size_write(tmp_inode, i_size_read(inode));
/*
@@ -575,6 +576,7 @@ err_out:
* the inode is not visible to user space.
*/
tmp_inode->i_blocks = 0;
+ EXT4_I(tmp_inode)->i_csum_seed = tmp_csum_seed;
/* Reset the extent details */
ext4_ext_tree_init(handle, tmp_inode);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index db4ba99d1ceb..3a31b662f661 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -54,6 +54,7 @@ static struct buffer_head *ext4_append(handle_t *handle,
struct inode *inode,
ext4_lblk_t *block)
{
+ struct ext4_map_blocks map;
struct buffer_head *bh;
int err;
@@ -63,6 +64,21 @@ static struct buffer_head *ext4_append(handle_t *handle,
return ERR_PTR(-ENOSPC);
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+ map.m_lblk = *block;
+ map.m_len = 1;
+
+ /*
+ * We're appending new directory block. Make sure the block is not
+ * allocated yet, otherwise we will end up corrupting the
+ * directory.
+ */
+ err = ext4_map_blocks(NULL, inode, &map, 0);
+ if (err < 0)
+ return ERR_PTR(err);
+ if (err) {
+ EXT4_ERROR_INODE(inode, "Logical block already allocated");
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE);
if (IS_ERR(bh))
@@ -110,6 +126,13 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
struct ext4_dir_entry *dirent;
int is_dx_block = 0;
+ if (block >= inode->i_size) {
+ ext4_error_inode(inode, func, line, block,
+ "Attempting to read directory block (%u) that is past i_size (%llu)",
+ block, inode->i_size);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+
if (ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_EIO))
bh = ERR_PTR(-EIO);
else
@@ -3067,11 +3090,8 @@ bool ext4_empty_dir(struct inode *inode)
de = (struct ext4_dir_entry_2 *) (bh->b_data +
(offset & (sb->s_blocksize - 1)));
if (ext4_check_dir_entry(inode, NULL, de, bh,
- bh->b_data, bh->b_size, offset)) {
- offset = (offset | (sb->s_blocksize - 1)) + 1;
- continue;
- }
- if (le32_to_cpu(de->inode)) {
+ bh->b_data, bh->b_size, offset) ||
+ le32_to_cpu(de->inode)) {
brelse(bh);
return false;
}
diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
index 7de0612eb42d..69a9cf9137a6 100644
--- a/fs/ext4/orphan.c
+++ b/fs/ext4/orphan.c
@@ -181,8 +181,8 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
} else
brelse(iloc.bh);
- jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
- jbd_debug(4, "orphan inode %lu will point to %d\n",
+ ext4_debug("superblock will point to %lu\n", inode->i_ino);
+ ext4_debug("orphan inode %lu will point to %d\n",
inode->i_ino, NEXT_ORPHAN(inode));
out:
ext4_std_error(sb, err);
@@ -251,7 +251,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
}
mutex_lock(&sbi->s_orphan_lock);
- jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
+ ext4_debug("remove inode %lu from orphan list\n", inode->i_ino);
prev = ei->i_orphan.prev;
list_del_init(&ei->i_orphan);
@@ -267,7 +267,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
ino_next = NEXT_ORPHAN(inode);
if (prev == &sbi->s_orphan) {
- jbd_debug(4, "superblock will point to %u\n", ino_next);
+ ext4_debug("superblock will point to %u\n", ino_next);
BUFFER_TRACE(sbi->s_sbh, "get_write_access");
err = ext4_journal_get_write_access(handle, inode->i_sb,
sbi->s_sbh, EXT4_JTR_NONE);
@@ -286,7 +286,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
struct inode *i_prev =
&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
- jbd_debug(4, "orphan inode %lu will point to %u\n",
+ ext4_debug("orphan inode %lu will point to %u\n",
i_prev->i_ino, ino_next);
err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
if (err) {
@@ -332,8 +332,8 @@ static void ext4_process_orphan(struct inode *inode,
ext4_msg(sb, KERN_DEBUG,
"%s: truncating inode %lu to %lld bytes",
__func__, inode->i_ino, inode->i_size);
- jbd_debug(2, "truncating inode %lu to %lld bytes\n",
- inode->i_ino, inode->i_size);
+ ext4_debug("truncating inode %lu to %lld bytes\n",
+ inode->i_ino, inode->i_size);
inode_lock(inode);
truncate_inode_pages(inode->i_mapping, inode->i_size);
ret = ext4_truncate(inode);
@@ -353,8 +353,8 @@ static void ext4_process_orphan(struct inode *inode,
ext4_msg(sb, KERN_DEBUG,
"%s: deleting unreferenced inode %lu",
__func__, inode->i_ino);
- jbd_debug(2, "deleting unreferenced inode %lu\n",
- inode->i_ino);
+ ext4_debug("deleting unreferenced inode %lu\n",
+ inode->i_ino);
(*nr_orphans)++;
}
iput(inode); /* The delete magic happens here! */
@@ -391,7 +391,7 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es)
int inodes_per_ob = ext4_inodes_per_orphan_block(sb);
if (!es->s_last_orphan && !oi->of_blocks) {
- jbd_debug(4, "no orphan inodes to clean up\n");
+ ext4_debug("no orphan inodes to clean up\n");
return;
}
@@ -415,7 +415,7 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es)
"clearing orphan list.\n");
es->s_last_orphan = 0;
}
- jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
+ ext4_debug("Skipping orphan recovery on fs with errors.\n");
return;
}
@@ -459,7 +459,7 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es)
* so, skip the rest.
*/
if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
- jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
+ ext4_debug("Skipping orphan recovery on fs with errors.\n");
es->s_last_orphan = 0;
break;
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 8b70a4701293..fea2a68d067b 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -97,10 +97,13 @@ int ext4_resize_begin(struct super_block *sb)
return ret;
}
-void ext4_resize_end(struct super_block *sb)
+int ext4_resize_end(struct super_block *sb, bool update_backups)
{
clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
smp_mb__after_atomic();
+ if (update_backups)
+ return ext4_update_overhead(sb, true);
+ return 0;
}
static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
@@ -1380,6 +1383,17 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
return err;
}
+static void ext4_add_overhead(struct super_block *sb,
+ const ext4_fsblk_t overhead)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+
+ sbi->s_overhead += overhead;
+ es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
+ smp_wmb();
+}
+
/*
* ext4_update_super() updates the super block so that the newly added
* groups can be seen by the filesystem.
@@ -1481,9 +1495,18 @@ static void ext4_update_super(struct super_block *sb,
}
/*
- * Update the fs overhead information
+ * Update the fs overhead information.
+ *
+ * For bigalloc, if the superblock already has a properly calculated
+ * overhead, update it with a value based on numbers already computed
+ * above for the newly allocated capacity.
*/
- ext4_calculate_overhead(sb);
+ if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0))
+ ext4_add_overhead(sb,
+ EXT4_NUM_B2C(sbi, blocks_count - free_blocks));
+ else
+ ext4_calculate_overhead(sb);
+ es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: added group %u:"
@@ -1988,6 +2011,16 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
}
brelse(bh);
+ /*
+ * For bigalloc, trim the requested size to the nearest cluster
+ * boundary to avoid creating an unusable filesystem. We do this
+ * silently, instead of returning an error, to avoid breaking
+ * callers that blindly resize the filesystem to the full size of
+ * the underlying block device.
+ */
+ if (ext4_has_feature_bigalloc(sb))
+ n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
+
retry:
o_blocks_count = ext4_blocks_count(es);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2c68dec63e54..9a66abcca1a8 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1307,7 +1307,7 @@ static void ext4_put_super(struct super_block *sb)
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->s_blockgroup_lock);
- fs_put_dax(sbi->s_daxdev);
+ fs_put_dax(sbi->s_daxdev, NULL);
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
@@ -3011,6 +3011,15 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
} else if (test_opt2(sb, DAX_INODE)) {
SEQ_OPTS_PUTS("dax=inode");
}
+
+ if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD &&
+ !test_opt2(sb, MB_OPTIMIZE_SCAN)) {
+ SEQ_OPTS_PUTS("mb_optimize_scan=0");
+ } else if (sbi->s_groups_count < MB_DEFAULT_LINEAR_SCAN_THRESHOLD &&
+ test_opt2(sb, MB_OPTIMIZE_SCAN)) {
+ SEQ_OPTS_PUTS("mb_optimize_scan=1");
+ }
+
ext4_show_quota_options(seq, sb);
return 0;
}
@@ -4272,7 +4281,7 @@ static void ext4_free_sbi(struct ext4_sb_info *sbi)
return;
kfree(sbi->s_blockgroup_lock);
- fs_put_dax(sbi->s_daxdev);
+ fs_put_dax(sbi->s_daxdev, NULL);
kfree(sbi);
}
@@ -4284,7 +4293,8 @@ static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb)
if (!sbi)
return NULL;
- sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off);
+ sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off,
+ NULL, NULL);
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
@@ -4296,7 +4306,7 @@ static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb)
sbi->s_sb = sb;
return sbi;
err_out:
- fs_put_dax(sbi->s_daxdev);
+ fs_put_dax(sbi->s_daxdev, NULL);
kfree(sbi);
return NULL;
}
@@ -5523,7 +5533,7 @@ static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
"Quota mode: %s.", descr, ext4_quota_mode(sb));
/* Update the s_overhead_clusters if necessary */
- ext4_update_overhead(sb);
+ ext4_update_overhead(sb, false);
return 0;
free_sbi:
@@ -5585,7 +5595,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
return NULL;
}
- jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
+ ext4_debug("Journal inode found at %p: %lld bytes\n",
journal_inode, journal_inode->i_size);
if (!S_ISREG(journal_inode->i_mode)) {
ext4_msg(sb, KERN_ERR, "invalid journal inode");
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index d281f5bcc526..3d3ed3c38f56 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -74,6 +74,21 @@ static const char *ext4_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *callback)
{
struct buffer_head *bh;
+ char *inline_link;
+
+ /*
+ * Create a new inlined symlink is not supported, just provide a
+ * method to read the leftovers.
+ */
+ if (ext4_has_inline_data(inode)) {
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ inline_link = ext4_read_inline_link(inode);
+ if (!IS_ERR(inline_link))
+ set_delayed_call(callback, kfree_link, inline_link);
+ return inline_link;
+ }
if (!dentry) {
bh = ext4_getblk(NULL, inode, 0, EXT4_GET_BLOCKS_CACHED_NOWAIT);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 564e28a1aa94..533216e80fa2 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -436,6 +436,21 @@ error:
return err;
}
+/* Remove entry from mbcache when EA inode is getting evicted */
+void ext4_evict_ea_inode(struct inode *inode)
+{
+ struct mb_cache_entry *oe;
+
+ if (!EA_INODE_CACHE(inode))
+ return;
+ /* Wait for entry to get unused so that we can remove it */
+ while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode),
+ ext4_xattr_inode_get_hash(inode), inode->i_ino))) {
+ mb_cache_entry_wait_unused(oe);
+ mb_cache_entry_put(EA_INODE_CACHE(inode), oe);
+ }
+}
+
static int
ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
struct ext4_xattr_entry *entry, void *buffer,
@@ -976,10 +991,8 @@ int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
int ref_change)
{
- struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode);
struct ext4_iloc iloc;
s64 ref_count;
- u32 hash;
int ret;
inode_lock(ea_inode);
@@ -1002,14 +1015,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
set_nlink(ea_inode, 1);
ext4_orphan_del(handle, ea_inode);
-
- if (ea_inode_cache) {
- hash = ext4_xattr_inode_get_hash(ea_inode);
- mb_cache_entry_create(ea_inode_cache,
- GFP_NOFS, hash,
- ea_inode->i_ino,
- true /* reusable */);
- }
}
} else {
WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
@@ -1022,12 +1027,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
clear_nlink(ea_inode);
ext4_orphan_add(handle, ea_inode);
-
- if (ea_inode_cache) {
- hash = ext4_xattr_inode_get_hash(ea_inode);
- mb_cache_entry_delete(ea_inode_cache, hash,
- ea_inode->i_ino);
- }
}
}
@@ -1237,6 +1236,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
if (error)
goto out;
+retry_ref:
lock_buffer(bh);
hash = le32_to_cpu(BHDR(bh)->h_hash);
ref = le32_to_cpu(BHDR(bh)->h_refcount);
@@ -1246,9 +1246,18 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
* This must happen under buffer lock for
* ext4_xattr_block_set() to reliably detect freed block
*/
- if (ea_block_cache)
- mb_cache_entry_delete(ea_block_cache, hash,
- bh->b_blocknr);
+ if (ea_block_cache) {
+ struct mb_cache_entry *oe;
+
+ oe = mb_cache_entry_delete_or_get(ea_block_cache, hash,
+ bh->b_blocknr);
+ if (oe) {
+ unlock_buffer(bh);
+ mb_cache_entry_wait_unused(oe);
+ mb_cache_entry_put(ea_block_cache, oe);
+ goto retry_ref;
+ }
+ }
get_bh(bh);
unlock_buffer(bh);
@@ -1858,6 +1867,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
#define header(x) ((struct ext4_xattr_header *)(x))
if (s->base) {
+ int offset = (char *)s->here - bs->bh->b_data;
+
BUFFER_TRACE(bs->bh, "get_write_access");
error = ext4_journal_get_write_access(handle, sb, bs->bh,
EXT4_JTR_NONE);
@@ -1873,9 +1884,20 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
* ext4_xattr_block_set() to reliably detect modified
* block
*/
- if (ea_block_cache)
- mb_cache_entry_delete(ea_block_cache, hash,
- bs->bh->b_blocknr);
+ if (ea_block_cache) {
+ struct mb_cache_entry *oe;
+
+ oe = mb_cache_entry_delete_or_get(ea_block_cache,
+ hash, bs->bh->b_blocknr);
+ if (oe) {
+ /*
+ * Xattr block is getting reused. Leave
+ * it alone.
+ */
+ mb_cache_entry_put(ea_block_cache, oe);
+ goto clone_block;
+ }
+ }
ea_bdebug(bs->bh, "modifying in-place");
error = ext4_xattr_set_entry(i, s, handle, inode,
true /* is_block */);
@@ -1890,49 +1912,47 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
if (error)
goto cleanup;
goto inserted;
- } else {
- int offset = (char *)s->here - bs->bh->b_data;
+ }
+clone_block:
+ unlock_buffer(bs->bh);
+ ea_bdebug(bs->bh, "cloning");
+ s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS);
+ error = -ENOMEM;
+ if (s->base == NULL)
+ goto cleanup;
+ s->first = ENTRY(header(s->base)+1);
+ header(s->base)->h_refcount = cpu_to_le32(1);
+ s->here = ENTRY(s->base + offset);
+ s->end = s->base + bs->bh->b_size;
- unlock_buffer(bs->bh);
- ea_bdebug(bs->bh, "cloning");
- s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS);
- error = -ENOMEM;
- if (s->base == NULL)
+ /*
+ * If existing entry points to an xattr inode, we need
+ * to prevent ext4_xattr_set_entry() from decrementing
+ * ref count on it because the reference belongs to the
+ * original block. In this case, make the entry look
+ * like it has an empty value.
+ */
+ if (!s->not_found && s->here->e_value_inum) {
+ ea_ino = le32_to_cpu(s->here->e_value_inum);
+ error = ext4_xattr_inode_iget(inode, ea_ino,
+ le32_to_cpu(s->here->e_hash),
+ &tmp_inode);
+ if (error)
goto cleanup;
- s->first = ENTRY(header(s->base)+1);
- header(s->base)->h_refcount = cpu_to_le32(1);
- s->here = ENTRY(s->base + offset);
- s->end = s->base + bs->bh->b_size;
-
- /*
- * If existing entry points to an xattr inode, we need
- * to prevent ext4_xattr_set_entry() from decrementing
- * ref count on it because the reference belongs to the
- * original block. In this case, make the entry look
- * like it has an empty value.
- */
- if (!s->not_found && s->here->e_value_inum) {
- ea_ino = le32_to_cpu(s->here->e_value_inum);
- error = ext4_xattr_inode_iget(inode, ea_ino,
- le32_to_cpu(s->here->e_hash),
- &tmp_inode);
- if (error)
- goto cleanup;
-
- if (!ext4_test_inode_state(tmp_inode,
- EXT4_STATE_LUSTRE_EA_INODE)) {
- /*
- * Defer quota free call for previous
- * inode until success is guaranteed.
- */
- old_ea_inode_quota = le32_to_cpu(
- s->here->e_value_size);
- }
- iput(tmp_inode);
- s->here->e_value_inum = 0;
- s->here->e_value_size = 0;
+ if (!ext4_test_inode_state(tmp_inode,
+ EXT4_STATE_LUSTRE_EA_INODE)) {
+ /*
+ * Defer quota free call for previous
+ * inode until success is guaranteed.
+ */
+ old_ea_inode_quota = le32_to_cpu(
+ s->here->e_value_size);
}
+ iput(tmp_inode);
+
+ s->here->e_value_inum = 0;
+ s->here->e_value_size = 0;
}
} else {
/* Allocate a buffer where we construct the new block. */
@@ -1999,18 +2019,13 @@ inserted:
lock_buffer(new_bh);
/*
* We have to be careful about races with
- * freeing, rehashing or adding references to
- * xattr block. Once we hold buffer lock xattr
- * block's state is stable so we can check
- * whether the block got freed / rehashed or
- * not. Since we unhash mbcache entry under
- * buffer lock when freeing / rehashing xattr
- * block, checking whether entry is still
- * hashed is reliable. Same rules hold for
- * e_reusable handling.
+ * adding references to xattr block. Once we
+ * hold buffer lock xattr block's state is
+ * stable so we can check the additional
+ * reference fits.
*/
- if (hlist_bl_unhashed(&ce->e_hash_list) ||
- !ce->e_reusable) {
+ ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
+ if (ref > EXT4_XATTR_REFCOUNT_MAX) {
/*
* Undo everything and check mbcache
* again.
@@ -2025,9 +2040,8 @@ inserted:
new_bh = NULL;
goto inserted;
}
- ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
- if (ref >= EXT4_XATTR_REFCOUNT_MAX)
+ if (ref == EXT4_XATTR_REFCOUNT_MAX)
ce->e_reusable = 0;
ea_bdebug(new_bh, "reusing; refcount now=%d",
ref);
@@ -2175,8 +2189,9 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
struct ext4_inode *raw_inode;
int error;
- if (EXT4_I(inode)->i_extra_isize == 0)
+ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
return 0;
+
raw_inode = ext4_raw_inode(&is->iloc);
header = IHDR(inode, raw_inode);
is->s.base = is->s.first = IFIRST(header);
@@ -2204,8 +2219,9 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
struct ext4_xattr_search *s = &is->s;
int error;
- if (EXT4_I(inode)->i_extra_isize == 0)
+ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
return -ENOSPC;
+
error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
if (error)
return error;
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 77efb9a627ad..824faf0b15a8 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -84,7 +84,7 @@ struct ext4_xattr_entry {
/*
* The minimum size of EA value when you start storing it in an external inode
* size of block - size of header - size of 1 entry - 4 null bytes
-*/
+ */
#define EXT4_XATTR_MIN_LARGE_EA_SIZE(b) \
((b) - EXT4_XATTR_LEN(3) - sizeof(struct ext4_xattr_header) - 4)
@@ -95,6 +95,19 @@ struct ext4_xattr_entry {
#define EXT4_ZERO_XATTR_VALUE ((void *)-1)
+/*
+ * If we want to add an xattr to the inode, we should make sure that
+ * i_extra_isize is not 0 and that the inode size is not less than
+ * EXT4_GOOD_OLD_INODE_SIZE + extra_isize + pad.
+ * EXT4_GOOD_OLD_INODE_SIZE extra_isize header entry pad data
+ * |--------------------------|------------|------|---------|---|-------|
+ */
+#define EXT4_INODE_HAS_XATTR_SPACE(inode) \
+ ((EXT4_I(inode)->i_extra_isize != 0) && \
+ (EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize + \
+ sizeof(struct ext4_xattr_ibody_header) + EXT4_XATTR_PAD <= \
+ EXT4_INODE_SIZE((inode)->i_sb)))
+
struct ext4_xattr_info {
const char *name;
const void *value;
@@ -178,6 +191,7 @@ extern void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *array);
extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
struct ext4_inode *raw_inode, handle_t *handle);
+extern void ext4_evict_ea_inode(struct inode *inode);
extern const struct xattr_handler *ext4_xattr_handlers[];
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 009e6c519e98..70e97075e535 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -729,14 +729,19 @@ out:
return ret;
}
-void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
+static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
+ bool pre_alloc);
+static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
+ bool bypass_destroy_callback, bool pre_alloc);
+
+void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
+ bool bypass_callback = false;
int ret;
- int i;
trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
dic->cluster_size, fi->i_compress_algorithm);
@@ -746,41 +751,10 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
goto out_end_io;
}
- dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
- if (!dic->tpages) {
- ret = -ENOMEM;
- goto out_end_io;
- }
-
- for (i = 0; i < dic->cluster_size; i++) {
- if (dic->rpages[i]) {
- dic->tpages[i] = dic->rpages[i];
- continue;
- }
-
- dic->tpages[i] = f2fs_compress_alloc_page();
- if (!dic->tpages[i]) {
- ret = -ENOMEM;
- goto out_end_io;
- }
- }
-
- if (cops->init_decompress_ctx) {
- ret = cops->init_decompress_ctx(dic);
- if (ret)
- goto out_end_io;
- }
-
- dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
- if (!dic->rbuf) {
- ret = -ENOMEM;
- goto out_destroy_decompress_ctx;
- }
-
- dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
- if (!dic->cbuf) {
- ret = -ENOMEM;
- goto out_vunmap_rbuf;
+ ret = f2fs_prepare_decomp_mem(dic, false);
+ if (ret) {
+ bypass_callback = true;
+ goto out_release;
}
dic->clen = le32_to_cpu(dic->cbuf->clen);
@@ -788,7 +762,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
ret = -EFSCORRUPTED;
- goto out_vunmap_cbuf;
+ goto out_release;
}
ret = cops->decompress_pages(dic);
@@ -809,17 +783,13 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
}
}
-out_vunmap_cbuf:
- vm_unmap_ram(dic->cbuf, dic->nr_cpages);
-out_vunmap_rbuf:
- vm_unmap_ram(dic->rbuf, dic->cluster_size);
-out_destroy_decompress_ctx:
- if (cops->destroy_decompress_ctx)
- cops->destroy_decompress_ctx(dic);
+out_release:
+ f2fs_release_decomp_mem(dic, bypass_callback, false);
+
out_end_io:
trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
dic->clen, ret);
- f2fs_decompress_end_io(dic, ret);
+ f2fs_decompress_end_io(dic, ret, in_task);
}
/*
@@ -829,7 +799,7 @@ out_end_io:
* (or in the case of a failure, cleans up without actually decompressing).
*/
void f2fs_end_read_compressed_page(struct page *page, bool failed,
- block_t blkaddr)
+ block_t blkaddr, bool in_task)
{
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
@@ -839,12 +809,12 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
if (failed)
WRITE_ONCE(dic->failed, true);
- else if (blkaddr)
+ else if (blkaddr && in_task)
f2fs_cache_compressed_page(sbi, page,
dic->inode->i_ino, blkaddr);
if (atomic_dec_and_test(&dic->remaining_pages))
- f2fs_decompress_cluster(dic);
+ f2fs_decompress_cluster(dic, in_task);
}
static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
@@ -871,19 +841,26 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
return is_page_in_cluster(cc, index);
}
-bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
- int index, int nr_pages)
+bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
+ int index, int nr_pages, bool uptodate)
{
- unsigned long pgidx;
- int i;
+ unsigned long pgidx = pages[index]->index;
+ int i = uptodate ? 0 : 1;
- if (nr_pages - index < cc->cluster_size)
+ /*
+ * when uptodate set to true, try to check all pages in cluster is
+ * uptodate or not.
+ */
+ if (uptodate && (pgidx % cc->cluster_size))
return false;
- pgidx = pvec->pages[index]->index;
+ if (nr_pages - index < cc->cluster_size)
+ return false;
- for (i = 1; i < cc->cluster_size; i++) {
- if (pvec->pages[index + i]->index != pgidx + i)
+ for (; i < cc->cluster_size; i++) {
+ if (pages[index + i]->index != pgidx + i)
+ return false;
+ if (uptodate && !PageUptodate(pages[index + i]))
return false;
}
@@ -1552,16 +1529,85 @@ destroy_out:
return err;
}
-static void f2fs_free_dic(struct decompress_io_ctx *dic);
+static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
+ bool pre_alloc)
+{
+ return pre_alloc ^ f2fs_low_mem_mode(sbi);
+}
+
+static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
+ bool pre_alloc)
+{
+ const struct f2fs_compress_ops *cops =
+ f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
+ int i;
+
+ if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
+ return 0;
+
+ dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
+ if (!dic->tpages)
+ return -ENOMEM;
+
+ for (i = 0; i < dic->cluster_size; i++) {
+ if (dic->rpages[i]) {
+ dic->tpages[i] = dic->rpages[i];
+ continue;
+ }
+
+ dic->tpages[i] = f2fs_compress_alloc_page();
+ if (!dic->tpages[i])
+ return -ENOMEM;
+ }
+
+ dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
+ if (!dic->rbuf)
+ return -ENOMEM;
+
+ dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
+ if (!dic->cbuf)
+ return -ENOMEM;
+
+ if (cops->init_decompress_ctx) {
+ int ret = cops->init_decompress_ctx(dic);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
+ bool bypass_destroy_callback, bool pre_alloc)
+{
+ const struct f2fs_compress_ops *cops =
+ f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
+
+ if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
+ return;
+
+ if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
+ cops->destroy_decompress_ctx(dic);
+
+ if (dic->cbuf)
+ vm_unmap_ram(dic->cbuf, dic->nr_cpages);
+
+ if (dic->rbuf)
+ vm_unmap_ram(dic->rbuf, dic->cluster_size);
+}
+
+static void f2fs_free_dic(struct decompress_io_ctx *dic,
+ bool bypass_destroy_callback);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
{
struct decompress_io_ctx *dic;
pgoff_t start_idx = start_idx_of_cluster(cc);
- int i;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+ int i, ret;
- dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO,
- false, F2FS_I_SB(cc->inode));
+ dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
if (!dic)
return ERR_PTR(-ENOMEM);
@@ -1587,32 +1633,43 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->nr_rpages = cc->cluster_size;
dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
- if (!dic->cpages)
+ if (!dic->cpages) {
+ ret = -ENOMEM;
goto out_free;
+ }
for (i = 0; i < dic->nr_cpages; i++) {
struct page *page;
page = f2fs_compress_alloc_page();
- if (!page)
+ if (!page) {
+ ret = -ENOMEM;
goto out_free;
+ }
f2fs_set_compressed_page(page, cc->inode,
start_idx + i + 1, dic);
dic->cpages[i] = page;
}
+ ret = f2fs_prepare_decomp_mem(dic, true);
+ if (ret)
+ goto out_free;
+
return dic;
out_free:
- f2fs_free_dic(dic);
- return ERR_PTR(-ENOMEM);
+ f2fs_free_dic(dic, true);
+ return ERR_PTR(ret);
}
-static void f2fs_free_dic(struct decompress_io_ctx *dic)
+static void f2fs_free_dic(struct decompress_io_ctx *dic,
+ bool bypass_destroy_callback)
{
int i;
+ f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
+
if (dic->tpages) {
for (i = 0; i < dic->cluster_size; i++) {
if (dic->rpages[i])
@@ -1637,17 +1694,33 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic)
kmem_cache_free(dic_entry_slab, dic);
}
-static void f2fs_put_dic(struct decompress_io_ctx *dic)
+static void f2fs_late_free_dic(struct work_struct *work)
{
- if (refcount_dec_and_test(&dic->refcnt))
- f2fs_free_dic(dic);
+ struct decompress_io_ctx *dic =
+ container_of(work, struct decompress_io_ctx, free_work);
+
+ f2fs_free_dic(dic, false);
+}
+
+static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
+{
+ if (refcount_dec_and_test(&dic->refcnt)) {
+ if (in_task) {
+ f2fs_free_dic(dic, false);
+ } else {
+ INIT_WORK(&dic->free_work, f2fs_late_free_dic);
+ queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
+ &dic->free_work);
+ }
+ }
}
/*
* Update and unlock the cluster's pagecache pages, and release the reference to
* the decompress_io_ctx that was being held for I/O completion.
*/
-static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
+static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
+ bool in_task)
{
int i;
@@ -1668,7 +1741,7 @@ static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
unlock_page(rpage);
}
- f2fs_put_dic(dic);
+ f2fs_put_dic(dic, in_task);
}
static void f2fs_verify_cluster(struct work_struct *work)
@@ -1685,14 +1758,15 @@ static void f2fs_verify_cluster(struct work_struct *work)
SetPageError(rpage);
}
- __f2fs_decompress_end_io(dic, false);
+ __f2fs_decompress_end_io(dic, false, true);
}
/*
* This is called when a compressed cluster has been decompressed
* (or failed to be read and/or decompressed).
*/
-void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
+void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
+ bool in_task)
{
if (!failed && dic->need_verity) {
/*
@@ -1704,7 +1778,7 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
fsverity_enqueue_verify_work(&dic->verity_work);
} else {
- __f2fs_decompress_end_io(dic, failed);
+ __f2fs_decompress_end_io(dic, failed, in_task);
}
}
@@ -1713,12 +1787,12 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
*
* This is called when the page is no longer needed and can be freed.
*/
-void f2fs_put_page_dic(struct page *page)
+void f2fs_put_page_dic(struct page *page, bool in_task)
{
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
- f2fs_put_dic(dic);
+ f2fs_put_dic(dic, in_task);
}
/*
@@ -1903,6 +1977,9 @@ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
dev_t dev = sbi->sb->s_bdev->bd_dev;
char slab_name[32];
+ if (!f2fs_sb_has_compression(sbi))
+ return 0;
+
sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
sbi->page_array_slab_size = sizeof(struct page *) <<
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index ed503d50f95f..aa3ccddfa037 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -119,7 +119,7 @@ struct bio_post_read_ctx {
block_t fs_blkaddr;
};
-static void f2fs_finish_read_bio(struct bio *bio)
+static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
{
struct bio_vec *bv;
struct bvec_iter_all iter_all;
@@ -133,8 +133,9 @@ static void f2fs_finish_read_bio(struct bio *bio)
if (f2fs_is_compressed_page(page)) {
if (bio->bi_status)
- f2fs_end_read_compressed_page(page, true, 0);
- f2fs_put_page_dic(page);
+ f2fs_end_read_compressed_page(page, true, 0,
+ in_task);
+ f2fs_put_page_dic(page, in_task);
continue;
}
@@ -191,7 +192,7 @@ static void f2fs_verify_bio(struct work_struct *work)
fsverity_verify_bio(bio);
}
- f2fs_finish_read_bio(bio);
+ f2fs_finish_read_bio(bio, true);
}
/*
@@ -203,7 +204,7 @@ static void f2fs_verify_bio(struct work_struct *work)
* can involve reading verity metadata pages from the file, and these verity
* metadata pages may be encrypted and/or compressed.
*/
-static void f2fs_verify_and_finish_bio(struct bio *bio)
+static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
{
struct bio_post_read_ctx *ctx = bio->bi_private;
@@ -211,7 +212,7 @@ static void f2fs_verify_and_finish_bio(struct bio *bio)
INIT_WORK(&ctx->work, f2fs_verify_bio);
fsverity_enqueue_verify_work(&ctx->work);
} else {
- f2fs_finish_read_bio(bio);
+ f2fs_finish_read_bio(bio, in_task);
}
}
@@ -224,7 +225,8 @@ static void f2fs_verify_and_finish_bio(struct bio *bio)
* that the bio includes at least one compressed page. The actual decompression
* is done on a per-cluster basis, not a per-bio basis.
*/
-static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
+static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
+ bool in_task)
{
struct bio_vec *bv;
struct bvec_iter_all iter_all;
@@ -237,7 +239,7 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
/* PG_error was set if decryption failed. */
if (f2fs_is_compressed_page(page))
f2fs_end_read_compressed_page(page, PageError(page),
- blkaddr);
+ blkaddr, in_task);
else
all_compressed = false;
@@ -262,15 +264,16 @@ static void f2fs_post_read_work(struct work_struct *work)
fscrypt_decrypt_bio(ctx->bio);
if (ctx->enabled_steps & STEP_DECOMPRESS)
- f2fs_handle_step_decompress(ctx);
+ f2fs_handle_step_decompress(ctx, true);
- f2fs_verify_and_finish_bio(ctx->bio);
+ f2fs_verify_and_finish_bio(ctx->bio, true);
}
static void f2fs_read_end_io(struct bio *bio)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
struct bio_post_read_ctx *ctx;
+ bool intask = in_task();
iostat_update_and_unbind_ctx(bio, 0);
ctx = bio->bi_private;
@@ -281,16 +284,29 @@ static void f2fs_read_end_io(struct bio *bio)
}
if (bio->bi_status) {
- f2fs_finish_read_bio(bio);
+ f2fs_finish_read_bio(bio, intask);
return;
}
- if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
- INIT_WORK(&ctx->work, f2fs_post_read_work);
- queue_work(ctx->sbi->post_read_wq, &ctx->work);
- } else {
- f2fs_verify_and_finish_bio(bio);
+ if (ctx) {
+ unsigned int enabled_steps = ctx->enabled_steps &
+ (STEP_DECRYPT | STEP_DECOMPRESS);
+
+ /*
+ * If we have only decompression step between decompression and
+ * decrypt, we don't need post processing for this.
+ */
+ if (enabled_steps == STEP_DECOMPRESS &&
+ !f2fs_low_mem_mode(sbi)) {
+ f2fs_handle_step_decompress(ctx, intask);
+ } else if (enabled_steps) {
+ INIT_WORK(&ctx->work, f2fs_post_read_work);
+ queue_work(ctx->sbi->post_read_wq, &ctx->work);
+ return;
+ }
}
+
+ f2fs_verify_and_finish_bio(bio, intask);
}
static void f2fs_write_end_io(struct bio *bio)
@@ -1682,8 +1698,6 @@ sync_out:
*/
f2fs_wait_on_block_writeback_range(inode,
map->m_pblk, map->m_len);
- invalidate_mapping_pages(META_MAPPING(sbi),
- map->m_pblk, map->m_pblk);
if (map->m_multidev_dio) {
block_t blk_addr = map->m_pblk;
@@ -2223,7 +2237,7 @@ skip_reading_dnode:
if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
if (atomic_dec_and_test(&dic->remaining_pages))
- f2fs_decompress_cluster(dic);
+ f2fs_decompress_cluster(dic, true);
continue;
}
@@ -2241,7 +2255,7 @@ submit_and_realloc:
page->index, for_write);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- f2fs_decompress_end_io(dic, ret);
+ f2fs_decompress_end_io(dic, ret, true);
f2fs_put_dnode(&dn);
*bio_ret = NULL;
return ret;
@@ -2731,6 +2745,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.submitted = false,
.compr_blocks = compr_blocks,
.need_lock = LOCK_RETRY,
+ .post_read = f2fs_post_read_required(inode),
.io_type = io_type,
.io_wbc = wbc,
.bio = bio,
@@ -2902,7 +2917,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
{
int ret = 0;
int done = 0, retry = 0;
- struct pagevec pvec;
+ struct page *pages[F2FS_ONSTACK_PAGES];
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct bio *bio = NULL;
sector_t last_block;
@@ -2933,8 +2948,6 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int submitted = 0;
int i;
- pagevec_init(&pvec);
-
if (get_dirty_pages(mapping->host) <=
SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
set_inode_flag(mapping->host, FI_HOT_DATA);
@@ -2960,13 +2973,13 @@ retry:
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !retry && (index <= end)) {
- nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
- tag);
+ nr_pages = find_get_pages_range_tag(mapping, &index, end,
+ tag, F2FS_ONSTACK_PAGES, pages);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ struct page *page = pages[i];
bool need_readd;
readd:
need_readd = false;
@@ -2997,6 +3010,10 @@ readd:
if (!f2fs_cluster_is_empty(&cc))
goto lock_page;
+ if (f2fs_all_cluster_page_ready(&cc,
+ pages, i, nr_pages, true))
+ goto lock_page;
+
ret2 = f2fs_prepare_compress_overwrite(
inode, &pagep,
page->index, &fsdata);
@@ -3007,8 +3024,8 @@ readd:
} else if (ret2 &&
(!f2fs_compress_write_end(inode,
fsdata, page->index, 1) ||
- !f2fs_all_cluster_page_loaded(&cc,
- &pvec, i, nr_pages))) {
+ !f2fs_all_cluster_page_ready(&cc,
+ pages, i, nr_pages, false))) {
retry = 1;
break;
}
@@ -3098,7 +3115,7 @@ next:
if (need_readd)
goto readd;
}
- pagevec_release(&pvec);
+ release_pages(pages, nr_pages);
cond_resched();
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -3408,12 +3425,11 @@ static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
struct inode *cow_inode = F2FS_I(inode)->cow_inode;
pgoff_t index = page->index;
int err = 0;
- block_t ori_blk_addr;
+ block_t ori_blk_addr = NULL_ADDR;
/* If pos is beyond the end of file, reserve a new block in COW inode */
if ((pos & PAGE_MASK) >= i_size_read(inode))
- return __reserve_data_block(cow_inode, index, blk_addr,
- node_changed);
+ goto reserve_block;
/* Look for the block in COW inode first */
err = __find_data_block(cow_inode, index, blk_addr);
@@ -3427,10 +3443,12 @@ static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
if (err)
return err;
+reserve_block:
/* Finally, we should reserve a new block in COW inode for the update */
err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
if (err)
return err;
+ inc_atomic_write_cnt(inode);
if (ori_blk_addr != NULL_ADDR)
*blk_addr = ori_blk_addr;
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index c92625ef16d0..c01471573977 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -39,7 +39,7 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi)
bimodal = 0;
total_vblocks = 0;
- blks_per_sec = BLKS_PER_SEC(sbi);
+ blks_per_sec = CAP_BLKS_PER_SEC(sbi);
hblks_per_sec = blks_per_sec / 2;
for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
vblocks = get_valid_blocks(sbi, segno, true);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 85921a1f2db8..3c7cdb70fe2e 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -159,6 +159,7 @@ struct f2fs_mount_info {
int fsync_mode; /* fsync policy */
int fs_mode; /* fs mode: LFS or ADAPTIVE */
int bggc_mode; /* bggc mode: off, on or sync */
+ int memory_mode; /* memory mode */
int discard_unit; /*
* discard command's offset/size should
* be aligned to this unit: block,
@@ -229,7 +230,6 @@ enum {
#define CP_PAUSE 0x00000040
#define CP_RESIZE 0x00000080
-#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
#define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
#define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
@@ -598,6 +598,8 @@ enum {
#define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS
#define RECOVERY_MIN_RA_BLOCKS 1
+#define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */
+
struct rb_entry {
struct rb_node rb_node; /* rb node located in rb-tree */
union {
@@ -757,6 +759,7 @@ enum {
FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
FI_COMPRESS_RELEASED, /* compressed blocks were released */
FI_ALIGNED_WRITE, /* enable aligned write */
+ FI_COW_FILE, /* indicate COW file */
FI_MAX, /* max flag, never be used */
};
@@ -812,6 +815,8 @@ struct f2fs_inode_info {
unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
unsigned short i_compress_flag; /* compress flag */
unsigned int i_cluster_size; /* cluster size */
+
+ unsigned int atomic_write_cnt;
};
static inline void get_extent_info(struct extent_info *ext,
@@ -1198,6 +1203,7 @@ struct f2fs_io_info {
bool retry; /* need to reallocate block address */
int compr_blocks; /* # of compressed block addresses */
bool encrypted; /* indicate file is encrypted */
+ bool post_read; /* require post read */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
struct bio **bio; /* bio for ipu */
@@ -1234,7 +1240,6 @@ struct f2fs_dev_info {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int nr_blkz; /* Total number of zones */
unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
- block_t *zone_capacity_blocks; /* Array of zone capacity in blks */
#endif
};
@@ -1360,6 +1365,13 @@ enum {
DISCARD_UNIT_SECTION, /* basic discard unit is section */
};
+enum {
+ MEMORY_MODE_NORMAL, /* memory mode for normal devices */
+ MEMORY_MODE_LOW, /* memory mode for low memry devices */
+};
+
+
+
static inline int f2fs_test_bit(unsigned int nr, char *addr);
static inline void f2fs_set_bit(unsigned int nr, char *addr);
static inline void f2fs_clear_bit(unsigned int nr, char *addr);
@@ -1580,6 +1592,7 @@ struct decompress_io_ctx {
void *private; /* payload buffer for specified decompression algorithm */
void *private2; /* extra payload buffer */
struct work_struct verity_work; /* work to verify the decompressed pages */
+ struct work_struct free_work; /* work for late free this structure itself */
};
#define NULL_CLUSTER ((unsigned int)(~0))
@@ -1664,6 +1677,7 @@ struct f2fs_sb_info {
unsigned int meta_ino_num; /* meta inode number*/
unsigned int log_blocks_per_seg; /* log2 blocks per segment */
unsigned int blocks_per_seg; /* blocks per segment */
+ unsigned int unusable_blocks_per_sec; /* unusable blocks per section */
unsigned int segs_per_sec; /* segments per section */
unsigned int secs_per_zone; /* sections per zone */
unsigned int total_sections; /* total section count */
@@ -1804,6 +1818,12 @@ struct f2fs_sb_info {
int max_fragment_chunk; /* max chunk size for block fragmentation mode */
int max_fragment_hole; /* max hole size for block fragmentation mode */
+ /* For atomic write statistics */
+ atomic64_t current_atomic_write;
+ s64 peak_atomic_write;
+ u64 committed_atomic_block;
+ u64 revoked_atomic_block;
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct kmem_cache *page_array_slab; /* page array entry */
unsigned int page_array_slab_size; /* default page array slab size */
@@ -2418,6 +2438,28 @@ static inline void inode_dec_dirty_pages(struct inode *inode)
dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
}
+static inline void inc_atomic_write_cnt(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ u64 current_write;
+
+ fi->atomic_write_cnt++;
+ atomic64_inc(&sbi->current_atomic_write);
+ current_write = atomic64_read(&sbi->current_atomic_write);
+ if (current_write > sbi->peak_atomic_write)
+ sbi->peak_atomic_write = current_write;
+}
+
+static inline void release_atomic_write_cnt(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
+ atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write);
+ fi->atomic_write_cnt = 0;
+}
+
static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
{
return atomic_read(&sbi->nr_pages[count_type]);
@@ -2696,16 +2738,6 @@ static inline struct page *f2fs_pagecache_get_page(
return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
}
-static inline void f2fs_copy_page(struct page *src, struct page *dst)
-{
- char *src_kaddr = kmap(src);
- char *dst_kaddr = kmap(dst);
-
- memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
- kunmap(dst);
- kunmap(src);
-}
-
static inline void f2fs_put_page(struct page *page, int unlock)
{
if (!page)
@@ -3208,6 +3240,11 @@ static inline bool f2fs_is_atomic_file(struct inode *inode)
return is_inode_flag_set(inode, FI_ATOMIC_FILE);
}
+static inline bool f2fs_is_cow_file(struct inode *inode)
+{
+ return is_inode_flag_set(inode, FI_COW_FILE);
+}
+
static inline bool f2fs_is_first_block_written(struct inode *inode)
{
return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
@@ -4154,13 +4191,13 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
bool f2fs_is_compress_backend_ready(struct inode *inode);
int f2fs_init_compress_mempool(void);
void f2fs_destroy_compress_mempool(void);
-void f2fs_decompress_cluster(struct decompress_io_ctx *dic);
+void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
void f2fs_end_read_compressed_page(struct page *page, bool failed,
- block_t blkaddr);
+ block_t blkaddr, bool in_task);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
-bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
- int index, int nr_pages);
+bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
+ int index, int nr_pages, bool uptodate);
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
int f2fs_write_multi_pages(struct compress_ctx *cc,
@@ -4175,8 +4212,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
bool is_readahead, bool for_write);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
-void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
-void f2fs_put_page_dic(struct page *page);
+void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
+ bool in_task);
+void f2fs_put_page_dic(struct page *page, bool in_task);
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
int f2fs_init_compress_ctx(struct compress_ctx *cc);
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
@@ -4222,13 +4260,14 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
}
static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
-static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
+static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
+ bool in_task) { }
static inline void f2fs_end_read_compressed_page(struct page *page,
- bool failed, block_t blkaddr)
+ bool failed, block_t blkaddr, bool in_task)
{
WARN_ON_ONCE(1);
}
-static inline void f2fs_put_page_dic(struct page *page)
+static inline void f2fs_put_page_dic(struct page *page, bool in_task)
{
WARN_ON_ONCE(1);
}
@@ -4254,8 +4293,9 @@ static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode,
unsigned int c_len) { }
#endif
-static inline void set_compress_context(struct inode *inode)
+static inline int set_compress_context(struct inode *inode)
{
+#ifdef CONFIG_F2FS_FS_COMPRESSION
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
F2FS_I(inode)->i_compress_algorithm =
@@ -4278,6 +4318,10 @@ static inline void set_compress_context(struct inode *inode)
stat_inc_compr_inode(inode);
inc_compr_inode_stat(inode);
f2fs_mark_inode_dirty_sync(inode, true);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
}
static inline bool f2fs_disable_compressed_file(struct inode *inode)
@@ -4394,10 +4438,15 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
}
+static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
+{
+ return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
+}
+
static inline bool f2fs_may_compress(struct inode *inode)
{
if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
- f2fs_is_atomic_file(inode))
+ f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode))
return false;
return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
}
@@ -4459,12 +4508,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
/* disallow direct IO if any of devices has unaligned blksize */
if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
return true;
- /*
- * for blkzoned device, fallback direct IO to buffered IO, so
- * all IOs can be serialized by log-structured write.
- */
- if (f2fs_sb_has_blkzoned(sbi))
- return true;
+
if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
if (block_unaligned_IO(inode, iocb, iter))
return true;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index d66e37d80a2d..ce4905a073b3 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1272,7 +1272,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
f2fs_put_page(psrc, 1);
return PTR_ERR(pdst);
}
- f2fs_copy_page(psrc, pdst);
+ memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
set_page_dirty(pdst);
f2fs_put_page(pdst, 1);
f2fs_put_page(psrc, 1);
@@ -1675,7 +1675,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
return 0;
if (f2fs_is_pinned_file(inode)) {
- block_t sec_blks = BLKS_PER_SEC(sbi);
+ block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
block_t sec_len = roundup(map.m_len, sec_blks);
map.m_len = sec_blks;
@@ -1816,8 +1816,7 @@ static int f2fs_release_file(struct inode *inode, struct file *filp)
atomic_read(&inode->i_writecount) != 1)
return 0;
- if (f2fs_is_atomic_file(inode))
- f2fs_abort_atomic_write(inode, true);
+ f2fs_abort_atomic_write(inode, true);
return 0;
}
@@ -1831,8 +1830,7 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
* until all the writers close its file. Since this should be done
* before dropping file lock, it needs to do in ->flush.
*/
- if (f2fs_is_atomic_file(inode) &&
- F2FS_I(inode)->atomic_write_task == current)
+ if (F2FS_I(inode)->atomic_write_task == current)
f2fs_abort_atomic_write(inode, true);
return 0;
}
@@ -1867,22 +1865,15 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
if (masked_flags & F2FS_COMPR_FL) {
if (!f2fs_disable_compressed_file(inode))
return -EINVAL;
- }
- if (iflags & F2FS_NOCOMP_FL)
- return -EINVAL;
- if (iflags & F2FS_COMPR_FL) {
+ } else {
if (!f2fs_may_compress(inode))
return -EINVAL;
- if (S_ISREG(inode->i_mode) && inode->i_size)
+ if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
return -EINVAL;
-
- set_compress_context(inode);
+ if (set_compress_context(inode))
+ return -EOPNOTSUPP;
}
}
- if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
- if (masked_flags & F2FS_COMPR_FL)
- return -EINVAL;
- }
fi->i_flags = iflags | (fi->i_flags & ~mask);
f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
@@ -2062,13 +2053,14 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
set_inode_flag(inode, FI_ATOMIC_FILE);
- set_inode_flag(fi->cow_inode, FI_ATOMIC_FILE);
+ set_inode_flag(fi->cow_inode, FI_COW_FILE);
clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
f2fs_update_time(sbi, REQ_TIME);
fi->atomic_write_task = current;
stat_update_max_atomic_write(inode);
+ fi->atomic_write_cnt = 0;
out:
inode_unlock(inode);
mnt_drop_write_file(filp);
@@ -2109,6 +2101,30 @@ unlock_out:
return ret;
}
+static int f2fs_ioc_abort_atomic_write(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+ struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
+ int ret;
+
+ if (!inode_owner_or_capable(mnt_userns, inode))
+ return -EACCES;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
+ f2fs_abort_atomic_write(inode, true);
+
+ inode_unlock(inode);
+
+ mnt_drop_write_file(filp);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ return ret;
+}
+
static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -2426,7 +2442,7 @@ do_more:
ret = -EAGAIN;
goto out;
}
- range->start += BLKS_PER_SEC(sbi);
+ range->start += CAP_BLKS_PER_SEC(sbi);
if (range->start <= end)
goto do_more;
out:
@@ -2551,7 +2567,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
goto out;
}
- sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
+ sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
/*
* make sure there are enough free section for LFS allocation, this can
@@ -3897,10 +3913,10 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
for (i = 0; i < page_len; i++, redirty_idx++) {
page = find_lock_page(mapping, redirty_idx);
- if (!page) {
- ret = -ENOMEM;
- break;
- }
+
+ /* It will never fail, when page has pinned above */
+ f2fs_bug_on(F2FS_I_SB(inode), !page);
+
set_page_dirty(page);
f2fs_put_page(page, 1);
f2fs_put_page(page, 0);
@@ -3939,6 +3955,11 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
goto out;
}
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret)
goto out;
@@ -4006,6 +4027,11 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
goto out;
}
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret)
goto out;
@@ -4054,9 +4080,10 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_start_atomic_write(filp);
case F2FS_IOC_COMMIT_ATOMIC_WRITE:
return f2fs_ioc_commit_atomic_write(filp);
+ case F2FS_IOC_ABORT_ATOMIC_WRITE:
+ return f2fs_ioc_abort_atomic_write(filp);
case F2FS_IOC_START_VOLATILE_WRITE:
case F2FS_IOC_RELEASE_VOLATILE_WRITE:
- case F2FS_IOC_ABORT_VOLATILE_WRITE:
return -EOPNOTSUPP;
case F2FS_IOC_SHUTDOWN:
return f2fs_ioc_shutdown(filp, arg);
@@ -4725,7 +4752,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_COMMIT_ATOMIC_WRITE:
case F2FS_IOC_START_VOLATILE_WRITE:
case F2FS_IOC_RELEASE_VOLATILE_WRITE:
- case F2FS_IOC_ABORT_VOLATILE_WRITE:
+ case F2FS_IOC_ABORT_ATOMIC_WRITE:
case F2FS_IOC_SHUTDOWN:
case FITRIM:
case FS_IOC_SET_ENCRYPTION_POLICY:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index d5fb426e0747..6da21d405ce1 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -150,8 +150,11 @@ do_gc:
gc_control.nr_free_secs = foreground ? 1 : 0;
/* if return value is not zero, no victim was selected */
- if (f2fs_gc(sbi, &gc_control))
- wait_ms = gc_th->no_gc_sleep_time;
+ if (f2fs_gc(sbi, &gc_control)) {
+ /* don't bother wait_ms by foreground gc */
+ if (!foreground)
+ wait_ms = gc_th->no_gc_sleep_time;
+ }
if (foreground)
wake_up_all(&gc_th->fggc_wq);
@@ -487,7 +490,7 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
unsigned long long age, u, accu;
unsigned long long max_mtime = sit_i->dirty_max_mtime;
unsigned long long min_mtime = sit_i->dirty_min_mtime;
- unsigned int sec_blocks = BLKS_PER_SEC(sbi);
+ unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
unsigned int vblocks;
unsigned int dirty_threshold = max(am->max_candidate_count,
am->candidate_ratio *
@@ -1487,7 +1490,7 @@ next_step:
*/
if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
(!force_migrate && get_valid_blocks(sbi, segno, true) ==
- BLKS_PER_SEC(sbi)))
+ CAP_BLKS_PER_SEC(sbi)))
return submitted;
if (check_valid_map(sbi, segno, off) == 0)
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index 3fe145e8e594..19b956c2d697 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -120,15 +120,13 @@ static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
return free_blks - ovp_blks;
}
-static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
+static inline block_t limit_invalid_user_blocks(block_t user_block_count)
{
- return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
+ return (long)(user_block_count * LIMIT_INVALID_BLOCK) / 100;
}
-static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
+static inline block_t limit_free_user_blocks(block_t reclaimable_user_blocks)
{
- block_t reclaimable_user_blocks = sbi->user_block_count -
- written_block_count(sbi);
return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
}
@@ -163,15 +161,16 @@ static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
{
- block_t invalid_user_blocks = sbi->user_block_count -
- written_block_count(sbi);
+ block_t user_block_count = sbi->user_block_count;
+ block_t invalid_user_blocks = user_block_count -
+ written_block_count(sbi);
/*
* Background GC is triggered with the following conditions.
* 1. There are a number of invalid blocks.
* 2. There is not enough free space.
*/
- if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
- free_user_blocks(sbi) < limit_free_user_blocks(sbi))
- return true;
- return false;
+ return (invalid_user_blocks >
+ limit_invalid_user_blocks(user_block_count) &&
+ free_user_blocks(sbi) <
+ limit_free_user_blocks(invalid_user_blocks));
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index fc55f5bd1fcc..6d11c365d7b4 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -744,8 +744,7 @@ void f2fs_evict_inode(struct inode *inode)
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
int err = 0;
- if (f2fs_is_atomic_file(inode))
- f2fs_abort_atomic_write(inode, true);
+ f2fs_abort_atomic_write(inode, true);
trace_f2fs_evict_inode(inode);
truncate_inode_pages_final(&inode->i_data);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 740c72d088f3..e06a0c478b39 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1292,7 +1292,11 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
dec_valid_node_count(sbi, dn->inode, !ofs);
goto fail;
}
- f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
+ if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
+ err = -EFSCORRUPTED;
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ goto fail;
+ }
#endif
new_ni.nid = dn->nid;
new_ni.ino = dn->inode->i_ino;
@@ -1945,7 +1949,6 @@ next_step:
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
bool submitted = false;
- bool may_dirty = true;
/* give a priority to WB_SYNC threads */
if (atomic_read(&sbi->wb_sync_req[NODE]) &&
@@ -1998,11 +2001,8 @@ continue_unlock:
}
/* flush dirty inode */
- if (IS_INODE(page) && may_dirty) {
- may_dirty = false;
- if (flush_dirty_inode(page))
- goto lock_node;
- }
+ if (IS_INODE(page) && flush_dirty_inode(page))
+ goto lock_node;
write_node:
f2fs_wait_on_page_writeback(page, NODE, true, true);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index c7afc588cf26..0de21f82d7bc 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -190,18 +190,20 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- if (f2fs_is_atomic_file(inode)) {
- if (clean)
- truncate_inode_pages_final(inode->i_mapping);
- clear_inode_flag(fi->cow_inode, FI_ATOMIC_FILE);
- iput(fi->cow_inode);
- fi->cow_inode = NULL;
- clear_inode_flag(inode, FI_ATOMIC_FILE);
+ if (!f2fs_is_atomic_file(inode))
+ return;
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- sbi->atomic_files--;
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
- }
+ if (clean)
+ truncate_inode_pages_final(inode->i_mapping);
+ clear_inode_flag(fi->cow_inode, FI_COW_FILE);
+ iput(fi->cow_inode);
+ fi->cow_inode = NULL;
+ release_atomic_write_cnt(inode);
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ sbi->atomic_files--;
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
}
static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
@@ -335,6 +337,11 @@ next:
}
out:
+ if (ret)
+ sbi->revoked_atomic_block += fi->atomic_write_cnt;
+ else
+ sbi->committed_atomic_block += fi->atomic_write_cnt;
+
__complete_revoke_list(inode, &revoke_list, ret ? true : false);
return ret;
@@ -728,7 +735,7 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
get_valid_blocks(sbi, segno, true);
f2fs_bug_on(sbi, unlikely(!valid_blocks ||
- valid_blocks == BLKS_PER_SEC(sbi)));
+ valid_blocks == CAP_BLKS_PER_SEC(sbi)));
if (!IS_CURSEC(sbi, secno))
set_bit(secno, dirty_i->dirty_secmap);
@@ -764,7 +771,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
if (!valid_blocks ||
- valid_blocks == BLKS_PER_SEC(sbi)) {
+ valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
clear_bit(secno, dirty_i->dirty_secmap);
return;
}
@@ -3166,7 +3173,7 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
return CURSEG_COLD_DATA;
if (file_is_hot(inode) ||
is_inode_flag_set(inode, FI_HOT_DATA) ||
- f2fs_is_atomic_file(inode))
+ f2fs_is_cow_file(inode))
return CURSEG_HOT_DATA;
return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
} else {
@@ -3433,7 +3440,8 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
goto drop_bio;
}
- invalidate_mapping_pages(META_MAPPING(sbi),
+ if (fio->post_read)
+ invalidate_mapping_pages(META_MAPPING(sbi),
fio->new_blkaddr, fio->new_blkaddr);
stat_inc_inplace_blocks(fio->sbi);
@@ -3616,10 +3624,16 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
block_t len)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
block_t i;
+ if (!f2fs_post_read_required(inode))
+ return;
+
for (i = 0; i < len; i++)
f2fs_wait_on_block_writeback(inode, blkaddr + i);
+
+ invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1);
}
static int read_compacted_summaries(struct f2fs_sb_info *sbi)
@@ -4362,6 +4376,12 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
return err;
seg_info_from_raw_sit(se, &sit);
+ if (se->type >= NR_PERSISTENT_LOG) {
+ f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
+ se->type, start);
+ return -EFSCORRUPTED;
+ }
+
sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
if (f2fs_block_unit_discard(sbi)) {
@@ -4410,6 +4430,13 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
break;
seg_info_from_raw_sit(se, &sit);
+ if (se->type >= NR_PERSISTENT_LOG) {
+ f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
+ se->type, start);
+ err = -EFSCORRUPTED;
+ break;
+ }
+
sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
if (f2fs_block_unit_discard(sbi)) {
@@ -4483,7 +4510,6 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int segno = 0, offset = 0, secno;
block_t valid_blocks, usable_blks_in_seg;
- block_t blks_per_sec = BLKS_PER_SEC(sbi);
while (1) {
/* find dirty segment based on free segmap */
@@ -4512,7 +4538,7 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
valid_blocks = get_valid_blocks(sbi, segno, true);
secno = GET_SEC_FROM_SEG(sbi, segno);
- if (!valid_blocks || valid_blocks == blks_per_sec)
+ if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
continue;
if (IS_CURSEC(sbi, secno))
continue;
@@ -4895,7 +4921,7 @@ static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
static inline unsigned int f2fs_usable_zone_segs_in_sec(
struct f2fs_sb_info *sbi, unsigned int segno)
{
- unsigned int dev_idx, zone_idx, unusable_segs_in_sec;
+ unsigned int dev_idx, zone_idx;
dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
@@ -4904,18 +4930,12 @@ static inline unsigned int f2fs_usable_zone_segs_in_sec(
if (is_conv_zone(sbi, zone_idx, dev_idx))
return sbi->segs_per_sec;
- /*
- * If the zone_capacity_blocks array is NULL, then zone capacity
- * is equal to the zone size for all zones
- */
- if (!FDEV(dev_idx).zone_capacity_blocks)
+ if (!sbi->unusable_blocks_per_sec)
return sbi->segs_per_sec;
/* Get the segment count beyond zone capacity block */
- unusable_segs_in_sec = (sbi->blocks_per_blkz -
- FDEV(dev_idx).zone_capacity_blocks[zone_idx]) >>
- sbi->log_blocks_per_seg;
- return sbi->segs_per_sec - unusable_segs_in_sec;
+ return sbi->segs_per_sec - (sbi->unusable_blocks_per_sec >>
+ sbi->log_blocks_per_seg);
}
/*
@@ -4944,12 +4964,11 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(
if (is_conv_zone(sbi, zone_idx, dev_idx))
return sbi->blocks_per_seg;
- if (!FDEV(dev_idx).zone_capacity_blocks)
+ if (!sbi->unusable_blocks_per_sec)
return sbi->blocks_per_seg;
sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
- sec_cap_blkaddr = sec_start_blkaddr +
- FDEV(dev_idx).zone_capacity_blocks[zone_idx];
+ sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
/*
* If segment starts before zone capacity and spans beyond
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 3f277dfcb131..d1d63766f2c7 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -101,6 +101,9 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
#define BLKS_PER_SEC(sbi) \
((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
+#define CAP_BLKS_PER_SEC(sbi) \
+ ((sbi)->segs_per_sec * (sbi)->blocks_per_seg - \
+ (sbi)->unusable_blocks_per_sec)
#define GET_SEC_FROM_SEG(sbi, segno) \
(((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
#define GET_SEG_FROM_SEC(sbi, secno) \
@@ -609,10 +612,10 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
get_pages(sbi, F2FS_DIRTY_DENTS) +
get_pages(sbi, F2FS_DIRTY_IMETA);
unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
- unsigned int node_secs = total_node_blocks / BLKS_PER_SEC(sbi);
- unsigned int dent_secs = total_dent_blocks / BLKS_PER_SEC(sbi);
- unsigned int node_blocks = total_node_blocks % BLKS_PER_SEC(sbi);
- unsigned int dent_blocks = total_dent_blocks % BLKS_PER_SEC(sbi);
+ unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi);
+ unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi);
+ unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi);
+ unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
unsigned int free, need_lower, need_upper;
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 37221e94e5ef..2451623c05a7 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
#include <linux/sched/mm.h>
#include <linux/statfs.h>
#include <linux/buffer_head.h>
@@ -159,6 +160,7 @@ enum {
Opt_gc_merge,
Opt_nogc_merge,
Opt_discard_unit,
+ Opt_memory_mode,
Opt_err,
};
@@ -235,6 +237,7 @@ static match_table_t f2fs_tokens = {
{Opt_gc_merge, "gc_merge"},
{Opt_nogc_merge, "nogc_merge"},
{Opt_discard_unit, "discard_unit=%s"},
+ {Opt_memory_mode, "memory=%s"},
{Opt_err, NULL},
};
@@ -492,9 +495,19 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
bool is_remount)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
-#ifdef CONFIG_FS_ENCRYPTION
+ struct fs_parameter param = {
+ .type = fs_value_is_string,
+ .string = arg->from ? arg->from : "",
+ };
+ struct fscrypt_dummy_policy *policy =
+ &F2FS_OPTION(sbi).dummy_enc_policy;
int err;
+ if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
+ f2fs_warn(sbi, "test_dummy_encryption option not supported");
+ return -EINVAL;
+ }
+
if (!f2fs_sb_has_encrypt(sbi)) {
f2fs_err(sbi, "Encrypt feature is off");
return -EINVAL;
@@ -506,12 +519,12 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
* needed to allow it to be set or changed during remount. We do allow
* it to be specified during remount, but only if there is no change.
*/
- if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
+ if (is_remount && !fscrypt_is_dummy_policy_set(policy)) {
f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
return -EINVAL;
}
- err = fscrypt_set_test_dummy_encryption(
- sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
+
+ err = fscrypt_parse_test_dummy_encryption(&param, policy);
if (err) {
if (err == -EEXIST)
f2fs_warn(sbi,
@@ -524,12 +537,14 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
opt, err);
return -EINVAL;
}
+ err = fscrypt_add_test_dummy_key(sb, policy);
+ if (err) {
+ f2fs_warn(sbi, "Error adding test dummy encryption key [%d]",
+ err);
+ return err;
+ }
f2fs_warn(sbi, "Test dummy encryption mode enabled");
return 0;
-#else
- f2fs_warn(sbi, "test_dummy_encryption option not supported");
- return -EINVAL;
-#endif
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -1222,6 +1237,22 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
}
kfree(name);
break;
+ case Opt_memory_mode:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+ if (!strcmp(name, "normal")) {
+ F2FS_OPTION(sbi).memory_mode =
+ MEMORY_MODE_NORMAL;
+ } else if (!strcmp(name, "low")) {
+ F2FS_OPTION(sbi).memory_mode =
+ MEMORY_MODE_LOW;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
default:
f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
p);
@@ -1380,8 +1411,7 @@ static int f2fs_drop_inode(struct inode *inode)
atomic_inc(&inode->i_count);
spin_unlock(&inode->i_lock);
- if (f2fs_is_atomic_file(inode))
- f2fs_abort_atomic_write(inode, true);
+ f2fs_abort_atomic_write(inode, true);
/* should remain fi->extent_tree for writepage */
f2fs_destroy_extent_node(inode);
@@ -1491,7 +1521,6 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
blkdev_put(FDEV(i).bdev, FMODE_EXCL);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
- kfree(FDEV(i).zone_capacity_blocks);
#endif
}
kvfree(sbi->devs);
@@ -1993,6 +2022,11 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
seq_printf(seq, ",discard_unit=%s", "section");
+ if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
+ seq_printf(seq, ",memory=%s", "normal");
+ else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
+ seq_printf(seq, ",memory=%s", "low");
+
return 0;
}
@@ -2014,6 +2048,7 @@ static void default_options(struct f2fs_sb_info *sbi)
F2FS_OPTION(sbi).compress_ext_cnt = 0;
F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
+ F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
sbi->sb->s_flags &= ~SB_INLINECRYPT;
@@ -3579,6 +3614,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
spin_lock_init(&sbi->gc_urgent_high_lock);
+ atomic64_set(&sbi->current_atomic_write, 0);
sbi->dir_level = DEF_DIR_LEVEL;
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
@@ -3636,24 +3672,29 @@ err_valid_block:
#ifdef CONFIG_BLK_DEV_ZONED
struct f2fs_report_zones_args {
+ struct f2fs_sb_info *sbi;
struct f2fs_dev_info *dev;
- bool zone_cap_mismatch;
};
static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
void *data)
{
struct f2fs_report_zones_args *rz_args = data;
+ block_t unusable_blocks = (zone->len - zone->capacity) >>
+ F2FS_LOG_SECTORS_PER_BLOCK;
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return 0;
set_bit(idx, rz_args->dev->blkz_seq);
- rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
- F2FS_LOG_SECTORS_PER_BLOCK;
- if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
- rz_args->zone_cap_mismatch = true;
-
+ if (!rz_args->sbi->unusable_blocks_per_sec) {
+ rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
+ return 0;
+ }
+ if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
+ f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -3694,26 +3735,13 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
if (!FDEV(devi).blkz_seq)
return -ENOMEM;
- /* Get block zones type and zone-capacity */
- FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
- FDEV(devi).nr_blkz * sizeof(block_t),
- GFP_KERNEL);
- if (!FDEV(devi).zone_capacity_blocks)
- return -ENOMEM;
-
+ rep_zone_arg.sbi = sbi;
rep_zone_arg.dev = &FDEV(devi);
- rep_zone_arg.zone_cap_mismatch = false;
ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
&rep_zone_arg);
if (ret < 0)
return ret;
-
- if (!rep_zone_arg.zone_cap_mismatch) {
- kfree(FDEV(devi).zone_capacity_blocks);
- FDEV(devi).zone_capacity_blocks = NULL;
- }
-
return 0;
}
#endif
@@ -4579,7 +4607,7 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_sysfs();
if (err)
goto free_garbage_collection_cache;
- err = register_shrinker(&f2fs_shrinker_info);
+ err = register_shrinker(&f2fs_shrinker_info, "f2fs-shrinker");
if (err)
goto free_sysfs;
err = register_filesystem(&f2fs_fs_type);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 4c50aedd5144..eba5fb1629d7 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -339,6 +339,21 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
sbi->gc_reclaimed_segs[sbi->gc_segment_mode]);
}
+ if (!strcmp(a->attr.name, "current_atomic_write")) {
+ s64 current_write = atomic64_read(&sbi->current_atomic_write);
+
+ return sysfs_emit(buf, "%lld\n", current_write);
+ }
+
+ if (!strcmp(a->attr.name, "peak_atomic_write"))
+ return sysfs_emit(buf, "%lld\n", sbi->peak_atomic_write);
+
+ if (!strcmp(a->attr.name, "committed_atomic_block"))
+ return sysfs_emit(buf, "%llu\n", sbi->committed_atomic_block);
+
+ if (!strcmp(a->attr.name, "revoked_atomic_block"))
+ return sysfs_emit(buf, "%llu\n", sbi->revoked_atomic_block);
+
ui = (unsigned int *)(ptr + a->offset);
return sprintf(buf, "%u\n", *ui);
@@ -608,6 +623,27 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "peak_atomic_write")) {
+ if (t != 0)
+ return -EINVAL;
+ sbi->peak_atomic_write = 0;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "committed_atomic_block")) {
+ if (t != 0)
+ return -EINVAL;
+ sbi->committed_atomic_block = 0;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "revoked_atomic_block")) {
+ if (t != 0)
+ return -EINVAL;
+ sbi->revoked_atomic_block = 0;
+ return count;
+ }
+
*ui = (unsigned int)t;
return count;
@@ -713,6 +749,11 @@ static struct f2fs_attr f2fs_attr_##_name = { \
.offset = _offset \
}
+#define F2FS_RO_ATTR(struct_type, struct_name, name, elname) \
+ F2FS_ATTR_OFFSET(struct_type, name, 0444, \
+ f2fs_sbi_show, NULL, \
+ offsetof(struct struct_name, elname))
+
#define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
F2FS_ATTR_OFFSET(struct_type, name, 0644, \
f2fs_sbi_show, f2fs_sbi_store, \
@@ -811,6 +852,8 @@ F2FS_FEATURE_RO_ATTR(encrypted_casefold);
#endif /* CONFIG_FS_ENCRYPTION */
#ifdef CONFIG_BLK_DEV_ZONED
F2FS_FEATURE_RO_ATTR(block_zoned);
+F2FS_RO_ATTR(F2FS_SBI, f2fs_sb_info, unusable_blocks_per_sec,
+ unusable_blocks_per_sec);
#endif
F2FS_FEATURE_RO_ATTR(atomic_write);
F2FS_FEATURE_RO_ATTR(extra_attr);
@@ -848,6 +891,12 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_reclaimed_segments, gc_reclaimed_segs);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_fragment_chunk, max_fragment_chunk);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_fragment_hole, max_fragment_hole);
+/* For atomic write */
+F2FS_RO_ATTR(F2FS_SBI, f2fs_sb_info, current_atomic_write, current_atomic_write);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, peak_atomic_write, peak_atomic_write);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, committed_atomic_block, committed_atomic_block);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, revoked_atomic_block, revoked_atomic_block);
+
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_urgent_sleep_time),
@@ -919,6 +968,9 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(moved_blocks_background),
ATTR_LIST(avg_vblocks),
#endif
+#ifdef CONFIG_BLK_DEV_ZONED
+ ATTR_LIST(unusable_blocks_per_sec),
+#endif
#ifdef CONFIG_F2FS_FS_COMPRESSION
ATTR_LIST(compr_written_block),
ATTR_LIST(compr_saved_block),
@@ -934,6 +986,10 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_reclaimed_segments),
ATTR_LIST(max_fragment_chunk),
ATTR_LIST(max_fragment_hole),
+ ATTR_LIST(current_atomic_write),
+ ATTR_LIST(peak_atomic_write),
+ ATTR_LIST(committed_atomic_block),
+ ATTR_LIST(revoked_atomic_block),
NULL,
};
ATTRIBUTE_GROUPS(f2fs);
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index c573314806cf..21620054e1c4 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -889,22 +889,57 @@ out:
return err;
}
-static int vfat_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
- struct dentry *old_dentry, struct inode *new_dir,
- struct dentry *new_dentry, unsigned int flags)
+static int vfat_get_dotdot_de(struct inode *inode, struct buffer_head **bh,
+ struct msdos_dir_entry **de)
+{
+ if (S_ISDIR(inode->i_mode)) {
+ if (fat_get_dotdot_entry(inode, bh, de))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int vfat_sync_ipos(struct inode *dir, struct inode *inode)
+{
+ if (IS_DIRSYNC(dir))
+ return fat_sync_inode(inode);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
+static int vfat_update_dotdot_de(struct inode *dir, struct inode *inode,
+ struct buffer_head *dotdot_bh,
+ struct msdos_dir_entry *dotdot_de)
+{
+ fat_set_start(dotdot_de, MSDOS_I(dir)->i_logstart);
+ mark_buffer_dirty_inode(dotdot_bh, inode);
+ if (IS_DIRSYNC(dir))
+ return sync_dirty_buffer(dotdot_bh);
+ return 0;
+}
+
+static void vfat_update_dir_metadata(struct inode *dir, struct timespec64 *ts)
+{
+ inode_inc_iversion(dir);
+ fat_truncate_time(dir, ts, S_CTIME | S_MTIME);
+ if (IS_DIRSYNC(dir))
+ (void)fat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+}
+
+static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
{
struct buffer_head *dotdot_bh;
- struct msdos_dir_entry *dotdot_de;
+ struct msdos_dir_entry *dotdot_de = NULL;
struct inode *old_inode, *new_inode;
struct fat_slot_info old_sinfo, sinfo;
struct timespec64 ts;
loff_t new_i_pos;
- int err, is_dir, update_dotdot, corrupt = 0;
+ int err, is_dir, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
- if (flags & ~RENAME_NOREPLACE)
- return -EINVAL;
-
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
old_inode = d_inode(old_dentry);
new_inode = d_inode(new_dentry);
@@ -913,15 +948,13 @@ static int vfat_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (err)
goto out;
- is_dir = S_ISDIR(old_inode->i_mode);
- update_dotdot = (is_dir && old_dir != new_dir);
- if (update_dotdot) {
- if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de)) {
- err = -EIO;
+ if (old_dir != new_dir) {
+ err = vfat_get_dotdot_de(old_inode, &dotdot_bh, &dotdot_de);
+ if (err)
goto out;
- }
}
+ is_dir = S_ISDIR(old_inode->i_mode);
ts = current_time(old_dir);
if (new_inode) {
if (is_dir) {
@@ -942,21 +975,15 @@ static int vfat_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
- if (IS_DIRSYNC(new_dir)) {
- err = fat_sync_inode(old_inode);
- if (err)
- goto error_inode;
- } else
- mark_inode_dirty(old_inode);
+ err = vfat_sync_ipos(new_dir, old_inode);
+ if (err)
+ goto error_inode;
- if (update_dotdot) {
- fat_set_start(dotdot_de, MSDOS_I(new_dir)->i_logstart);
- mark_buffer_dirty_inode(dotdot_bh, old_inode);
- if (IS_DIRSYNC(new_dir)) {
- err = sync_dirty_buffer(dotdot_bh);
- if (err)
- goto error_dotdot;
- }
+ if (dotdot_de) {
+ err = vfat_update_dotdot_de(new_dir, old_inode, dotdot_bh,
+ dotdot_de);
+ if (err)
+ goto error_dotdot;
drop_nlink(old_dir);
if (!new_inode)
inc_nlink(new_dir);
@@ -966,12 +993,7 @@ static int vfat_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
old_sinfo.bh = NULL;
if (err)
goto error_dotdot;
- inode_inc_iversion(old_dir);
- fat_truncate_time(old_dir, &ts, S_CTIME|S_MTIME);
- if (IS_DIRSYNC(old_dir))
- (void)fat_sync_inode(old_dir);
- else
- mark_inode_dirty(old_dir);
+ vfat_update_dir_metadata(old_dir, &ts);
if (new_inode) {
drop_nlink(new_inode);
@@ -991,10 +1013,9 @@ error_dotdot:
/* data cluster is shared, serious corruption */
corrupt = 1;
- if (update_dotdot) {
- fat_set_start(dotdot_de, MSDOS_I(old_dir)->i_logstart);
- mark_buffer_dirty_inode(dotdot_bh, old_inode);
- corrupt |= sync_dirty_buffer(dotdot_bh);
+ if (dotdot_de) {
+ corrupt |= vfat_update_dotdot_de(old_dir, old_inode, dotdot_bh,
+ dotdot_de);
}
error_inode:
fat_detach(old_inode);
@@ -1021,13 +1042,145 @@ error_inode:
goto out;
}
+static void vfat_exchange_ipos(struct inode *old_inode, struct inode *new_inode,
+ loff_t old_i_pos, loff_t new_i_pos)
+{
+ fat_detach(old_inode);
+ fat_detach(new_inode);
+ fat_attach(old_inode, new_i_pos);
+ fat_attach(new_inode, old_i_pos);
+}
+
+static void vfat_move_nlink(struct inode *src, struct inode *dst)
+{
+ drop_nlink(src);
+ inc_nlink(dst);
+}
+
+static int vfat_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct buffer_head *old_dotdot_bh = NULL, *new_dotdot_bh = NULL;
+ struct msdos_dir_entry *old_dotdot_de = NULL, *new_dotdot_de = NULL;
+ struct inode *old_inode, *new_inode;
+ struct timespec64 ts = current_time(old_dir);
+ loff_t old_i_pos, new_i_pos;
+ int err, corrupt = 0;
+ struct super_block *sb = old_dir->i_sb;
+
+ old_inode = d_inode(old_dentry);
+ new_inode = d_inode(new_dentry);
+
+ /* Acquire super block lock for the operation to be atomic */
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
+
+ /* if directories are not the same, get ".." info to update */
+ if (old_dir != new_dir) {
+ err = vfat_get_dotdot_de(old_inode, &old_dotdot_bh,
+ &old_dotdot_de);
+ if (err)
+ goto out;
+
+ err = vfat_get_dotdot_de(new_inode, &new_dotdot_bh,
+ &new_dotdot_de);
+ if (err)
+ goto out;
+ }
+
+ old_i_pos = MSDOS_I(old_inode)->i_pos;
+ new_i_pos = MSDOS_I(new_inode)->i_pos;
+
+ vfat_exchange_ipos(old_inode, new_inode, old_i_pos, new_i_pos);
+
+ err = vfat_sync_ipos(old_dir, new_inode);
+ if (err)
+ goto error_exchange;
+ err = vfat_sync_ipos(new_dir, old_inode);
+ if (err)
+ goto error_exchange;
+
+ /* update ".." directory entry info */
+ if (old_dotdot_de) {
+ err = vfat_update_dotdot_de(new_dir, old_inode, old_dotdot_bh,
+ old_dotdot_de);
+ if (err)
+ goto error_old_dotdot;
+ }
+ if (new_dotdot_de) {
+ err = vfat_update_dotdot_de(old_dir, new_inode, new_dotdot_bh,
+ new_dotdot_de);
+ if (err)
+ goto error_new_dotdot;
+ }
+
+ /* if cross directory and only one is a directory, adjust nlink */
+ if (!old_dotdot_de != !new_dotdot_de) {
+ if (old_dotdot_de)
+ vfat_move_nlink(old_dir, new_dir);
+ else
+ vfat_move_nlink(new_dir, old_dir);
+ }
+
+ vfat_update_dir_metadata(old_dir, &ts);
+ /* if directories are not the same, update new_dir as well */
+ if (old_dir != new_dir)
+ vfat_update_dir_metadata(new_dir, &ts);
+
+out:
+ brelse(old_dotdot_bh);
+ brelse(new_dotdot_bh);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
+
+ return err;
+
+error_new_dotdot:
+ if (new_dotdot_de) {
+ corrupt |= vfat_update_dotdot_de(new_dir, new_inode,
+ new_dotdot_bh, new_dotdot_de);
+ }
+
+error_old_dotdot:
+ if (old_dotdot_de) {
+ corrupt |= vfat_update_dotdot_de(old_dir, old_inode,
+ old_dotdot_bh, old_dotdot_de);
+ }
+
+error_exchange:
+ vfat_exchange_ipos(old_inode, new_inode, new_i_pos, old_i_pos);
+ corrupt |= vfat_sync_ipos(new_dir, new_inode);
+ corrupt |= vfat_sync_ipos(old_dir, old_inode);
+
+ if (corrupt < 0) {
+ fat_fs_error(new_dir->i_sb,
+ "%s: Filesystem corrupted (i_pos %lld, %lld)",
+ __func__, old_i_pos, new_i_pos);
+ }
+ goto out;
+}
+
+static int vfat_rename2(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
+{
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+ return -EINVAL;
+
+ if (flags & RENAME_EXCHANGE) {
+ return vfat_rename_exchange(old_dir, old_dentry,
+ new_dir, new_dentry);
+ }
+
+ /* VFS already handled RENAME_NOREPLACE, handle it as a normal rename */
+ return vfat_rename(old_dir, old_dentry, new_dir, new_dentry);
+}
+
static const struct inode_operations vfat_dir_inode_operations = {
.create = vfat_create,
.lookup = vfat_lookup,
.unlink = vfat_unlink,
.mkdir = vfat_mkdir,
.rmdir = vfat_rmdir,
- .rename = vfat_rename,
+ .rename = vfat_rename2,
.setattr = fat_setattr,
.getattr = fat_getattr,
.update_time = fat_update_time,
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 05221366a16d..08a1993ab7fd 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -134,10 +134,10 @@ static bool inode_io_list_move_locked(struct inode *inode,
static void wb_wakeup(struct bdi_writeback *wb)
{
- spin_lock_bh(&wb->work_lock);
+ spin_lock_irq(&wb->work_lock);
if (test_bit(WB_registered, &wb->state))
mod_delayed_work(bdi_wq, &wb->dwork, 0);
- spin_unlock_bh(&wb->work_lock);
+ spin_unlock_irq(&wb->work_lock);
}
static void finish_writeback_work(struct bdi_writeback *wb,
@@ -164,7 +164,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
if (work->done)
atomic_inc(&work->done->cnt);
- spin_lock_bh(&wb->work_lock);
+ spin_lock_irq(&wb->work_lock);
if (test_bit(WB_registered, &wb->state)) {
list_add_tail(&work->list, &wb->work_list);
@@ -172,7 +172,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
} else
finish_writeback_work(wb, work);
- spin_unlock_bh(&wb->work_lock);
+ spin_unlock_irq(&wb->work_lock);
}
/**
@@ -2082,13 +2082,13 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
{
struct wb_writeback_work *work = NULL;
- spin_lock_bh(&wb->work_lock);
+ spin_lock_irq(&wb->work_lock);
if (!list_empty(&wb->work_list)) {
work = list_entry(wb->work_list.next,
struct wb_writeback_work, list);
list_del_init(&work->list);
}
- spin_unlock_bh(&wb->work_lock);
+ spin_unlock_irq(&wb->work_lock);
return work;
}
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 74920826d8f6..451d8a077e12 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -263,6 +263,8 @@ void fscache_caching_failed(struct fscache_cookie *cookie)
{
clear_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_FAILED);
+ trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
+ fscache_cookie_failed);
}
EXPORT_SYMBOL(fscache_caching_failed);
@@ -739,6 +741,9 @@ again_locked:
fallthrough;
case FSCACHE_COOKIE_STATE_FAILED:
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+ fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
+
if (atomic_read(&cookie->n_accesses) != 0)
break;
if (test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
@@ -1063,8 +1068,8 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
return;
case FSCACHE_COOKIE_STATE_LOOKING_UP:
- __fscache_begin_cookie_access(cookie, fscache_access_invalidate_cookie);
- set_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags);
+ if (!test_and_set_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
+ __fscache_begin_cookie_access(cookie, fscache_access_invalidate_cookie);
fallthrough;
case FSCACHE_COOKIE_STATE_CREATING:
spin_unlock(&cookie->lock);
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 7cede9a3bc96..247ef4f76761 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -258,7 +258,7 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
struct dentry *parent;
char name[32];
- if (!fuse_control_sb)
+ if (!fuse_control_sb || fc->no_control)
return 0;
parent = fuse_control_sb->s_root;
@@ -296,7 +296,7 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
{
int i;
- if (!fuse_control_sb)
+ if (!fuse_control_sb || fc->no_control)
return;
for (i = fc->ctl_ndents - 1; i >= 0; i--) {
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index 10eb50cbf398..e23e802a8013 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -138,9 +138,9 @@ static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
WARN_ON(fcd->nr_free_ranges <= 0);
fcd->nr_free_ranges--;
}
+ __kick_dmap_free_worker(fcd, 0);
spin_unlock(&fcd->lock);
- kick_dmap_free_worker(fcd, 0);
return dmap;
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 0e537e580dc1..51897427a534 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -730,14 +730,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
}
} else {
size_t off;
- err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
+ err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
if (err < 0)
return err;
BUG_ON(!err);
cs->len = err;
cs->offset = off;
cs->pg = page;
- iov_iter_advance(cs->iter, err);
}
return lock_request(cs->req);
@@ -1356,7 +1355,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
if (!fud)
return -EPERM;
- if (!iter_is_iovec(to))
+ if (!user_backed_iter(to))
return -EINVAL;
fuse_copy_init(&cs, 1, to);
@@ -1949,7 +1948,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
if (!fud)
return -EPERM;
- if (!iter_is_iovec(from))
+ if (!user_backed_iter(from))
return -EINVAL;
fuse_copy_init(&cs, 0, from);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 74303d6e987b..b585b04e815e 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -11,6 +11,7 @@
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/fs_context.h>
+#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/namei.h>
#include <linux/slab.h>
@@ -21,6 +22,11 @@
#include <linux/types.h>
#include <linux/kernel.h>
+static bool __read_mostly allow_sys_admin_access;
+module_param(allow_sys_admin_access, bool, 0644);
+MODULE_PARM_DESC(allow_sys_admin_access,
+ "Allow users with CAP_SYS_ADMIN in initial userns to bypass allow_other access check");
+
static void fuse_advise_use_readdirplus(struct inode *dir)
{
struct fuse_inode *fi = get_fuse_inode(dir);
@@ -537,6 +543,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct fuse_file *ff;
void *security_ctx = NULL;
u32 security_ctxlen;
+ bool trunc = flags & O_TRUNC;
/* Userspace expects S_IFREG in create mode */
BUG_ON((mode & S_IFMT) != S_IFREG);
@@ -561,7 +568,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
inarg.mode = mode;
inarg.umask = current_umask();
- if (fm->fc->handle_killpriv_v2 && (flags & O_TRUNC) &&
+ if (fm->fc->handle_killpriv_v2 && trunc &&
!(flags & O_EXCL) && !capable(CAP_FSETID)) {
inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
}
@@ -623,6 +630,10 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
} else {
file->private_data = ff;
fuse_finish_open(inode, file);
+ if (fm->fc->atomic_o_trunc && trunc)
+ truncate_pagecache(inode, 0);
+ else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
+ invalidate_inode_pages2(inode->i_mapping);
}
return err;
@@ -1224,6 +1235,9 @@ int fuse_allow_current_process(struct fuse_conn *fc)
{
const struct cred *cred;
+ if (allow_sys_admin_access && capable(CAP_SYS_ADMIN))
+ return 1;
+
if (fc->allow_other)
return current_in_userns(fc->user_ns);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 00fa861aeead..1a3afd469e3a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -210,13 +210,9 @@ void fuse_finish_open(struct inode *inode, struct file *file)
fi->attr_version = atomic64_inc_return(&fc->attr_version);
i_size_write(inode, 0);
spin_unlock(&fi->lock);
- truncate_pagecache(inode, 0);
file_update_time(file);
fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
- } else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) {
- invalidate_inode_pages2(inode->i_mapping);
}
-
if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
fuse_link_write_file(file);
}
@@ -239,30 +235,38 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
if (err)
return err;
- if (is_wb_truncate || dax_truncate) {
+ if (is_wb_truncate || dax_truncate)
inode_lock(inode);
- fuse_set_nowrite(inode);
- }
if (dax_truncate) {
filemap_invalidate_lock(inode->i_mapping);
err = fuse_dax_break_layouts(inode, 0, 0);
if (err)
- goto out;
+ goto out_inode_unlock;
}
+ if (is_wb_truncate || dax_truncate)
+ fuse_set_nowrite(inode);
+
err = fuse_do_open(fm, get_node_id(inode), file, isdir);
if (!err)
fuse_finish_open(inode, file);
-out:
+ if (is_wb_truncate || dax_truncate)
+ fuse_release_nowrite(inode);
+ if (!err) {
+ struct fuse_file *ff = file->private_data;
+
+ if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC))
+ truncate_pagecache(inode, 0);
+ else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
+ invalidate_inode_pages2(inode->i_mapping);
+ }
if (dax_truncate)
filemap_invalidate_unlock(inode->i_mapping);
-
- if (is_wb_truncate | dax_truncate) {
- fuse_release_nowrite(inode);
+out_inode_unlock:
+ if (is_wb_truncate || dax_truncate)
inode_unlock(inode);
- }
return err;
}
@@ -338,6 +342,15 @@ static int fuse_open(struct inode *inode, struct file *file)
static int fuse_release(struct inode *inode, struct file *file)
{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+
+ /*
+ * Dirty pages might remain despite write_inode_now() call from
+ * fuse_flush() due to writes racing with the close.
+ */
+ if (fc->writeback_cache)
+ write_inode_now(inode, 1);
+
fuse_release_common(file, false);
/* return value is ignored by VFS */
@@ -1401,14 +1414,13 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
while (nbytes < *nbytesp && ap->num_pages < max_pages) {
unsigned npages;
size_t start;
- ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages],
+ ret = iov_iter_get_pages2(ii, &ap->pages[ap->num_pages],
*nbytesp - nbytes,
max_pages - ap->num_pages,
&start);
if (ret < 0)
break;
- iov_iter_advance(ii, ret);
nbytes += ret;
ret += start;
@@ -1465,7 +1477,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
inode_unlock(inode);
}
- io->should_dirty = !write && iter_is_iovec(iter);
+ io->should_dirty = !write && user_backed_iter(iter);
while (count) {
ssize_t nres;
fl_owner_t owner = current->files;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 8c0665c5dff8..6b3beda16c1b 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -180,6 +180,12 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
inode->i_uid = make_kuid(fc->user_ns, attr->uid);
inode->i_gid = make_kgid(fc->user_ns, attr->gid);
inode->i_blocks = attr->blocks;
+
+ /* Sanitize nsecs */
+ attr->atimensec = min_t(u32, attr->atimensec, NSEC_PER_SEC - 1);
+ attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1);
+ attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1);
+
inode->i_atime.tv_sec = attr->atime;
inode->i_atime.tv_nsec = attr->atimensec;
/* mtime from server may be stale due to local buffered write */
@@ -476,8 +482,14 @@ static void fuse_umount_begin(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
- if (!fc->no_force_umount)
- fuse_abort_conn(fc);
+ if (fc->no_force_umount)
+ return;
+
+ fuse_abort_conn(fc);
+
+ // Only retire block-device-based superblocks.
+ if (sb->s_bdev != NULL)
+ retire_super(sb);
}
static void fuse_send_destroy(struct fuse_mount *fm)
diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c
index 33cde4bbccdc..61d8afcb10a3 100644
--- a/fs/fuse/ioctl.c
+++ b/fs/fuse/ioctl.c
@@ -9,6 +9,17 @@
#include <linux/compat.h>
#include <linux/fileattr.h>
+static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args)
+{
+ ssize_t ret = fuse_simple_request(fm, args);
+
+ /* Translate ENOSYS, which shouldn't be returned from fs */
+ if (ret == -ENOSYS)
+ ret = -ENOTTY;
+
+ return ret;
+}
+
/*
* CUSE servers compiled on 32bit broke on 64bit kernels because the
* ABI was defined to be 'struct iovec' which is different on 32bit
@@ -259,7 +270,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
ap.args.out_pages = true;
ap.args.out_argvar = true;
- transferred = fuse_simple_request(fm, &ap.args);
+ transferred = fuse_send_ioctl(fm, &ap.args);
err = transferred;
if (transferred < 0)
goto out;
@@ -393,7 +404,7 @@ static int fuse_priv_ioctl(struct inode *inode, struct fuse_file *ff,
args.out_args[1].size = inarg.out_size;
args.out_args[1].value = ptr;
- err = fuse_simple_request(fm, &args);
+ err = fuse_send_ioctl(fm, &args);
if (!err) {
if (outarg.result < 0)
err = outarg.result;
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 8db53fa67359..4d8d4f16c727 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -741,8 +741,7 @@ out:
}
/* Free virtqueues (device must already be reset) */
-static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
- struct virtio_fs *fs)
+static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
{
vdev->config->del_vqs(vdev);
}
@@ -757,7 +756,7 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
{
struct virtio_fs *fs = dax_get_private(dax_dev);
phys_addr_t offset = PFN_PHYS(pgoff);
- size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff;
+ size_t max_nr_pages = fs->window_len / PAGE_SIZE - pgoff;
if (kaddr)
*kaddr = fs->window_kaddr + offset;
@@ -895,7 +894,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
out_vqs:
virtio_reset_device(vdev);
- virtio_fs_cleanup_vqs(vdev, fs);
+ virtio_fs_cleanup_vqs(vdev);
kfree(fs->vqs);
out:
@@ -927,7 +926,7 @@ static void virtio_fs_remove(struct virtio_device *vdev)
virtio_fs_stop_all_queues(fs);
virtio_fs_drain_all_queues_locked(fs);
virtio_reset_device(vdev);
- virtio_fs_cleanup_vqs(vdev, fs);
+ virtio_fs_cleanup_vqs(vdev);
vdev->priv = NULL;
/* Put device reference on virtio_fs object */
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 57ff883d432c..05bee80ac7de 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -82,31 +82,6 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
}
/**
- * gfs2_writepage - Write page for writeback mappings
- * @page: The page
- * @wbc: The writeback control
- */
-static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct inode *inode = page->mapping->host;
- struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct iomap_writepage_ctx wpc = { };
-
- if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
- goto out;
- if (current->journal_info)
- goto redirty;
- return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
-
-redirty:
- redirty_page_for_writepage(wbc, page);
-out:
- unlock_page(page);
- return 0;
-}
-
-/**
* gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
* @page: The page to write
* @wbc: The writeback control
@@ -765,7 +740,6 @@ cannot_release:
}
static const struct address_space_operations gfs2_aops = {
- .writepage = gfs2_writepage,
.writepages = gfs2_writepages,
.read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index a0562dd1bada..54a6d17b8c25 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -2016,7 +2016,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
l_blocks++;
}
- gfs2_rlist_alloc(&rlist);
+ gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE);
for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 2cceb193dcd8..892006fbbb09 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -780,7 +780,7 @@ static inline bool should_fault_in_pages(struct iov_iter *i,
if (!count)
return false;
- if (!iter_is_iovec(i))
+ if (!user_backed_iter(i))
return false;
size = PAGE_SIZE;
@@ -1066,8 +1066,7 @@ out_unlock:
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
- if (statfs_gh)
- kfree(statfs_gh);
+ kfree(statfs_gh);
from->count = orig_count - written;
return written ? written : ret;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c992d53013d3..41b6c89e4bf7 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -405,10 +405,13 @@ static void do_error(struct gfs2_glock *gl, const int ret)
/**
* demote_incompat_holders - demote incompatible demoteable holders
* @gl: the glock we want to promote
- * @new_gh: the new holder to be promoted
+ * @current_gh: the newly promoted holder
+ *
+ * We're passing the newly promoted holder in @current_gh, but actually, any of
+ * the strong holders would do.
*/
static void demote_incompat_holders(struct gfs2_glock *gl,
- struct gfs2_holder *new_gh)
+ struct gfs2_holder *current_gh)
{
struct gfs2_holder *gh, *tmp;
@@ -424,8 +427,10 @@ static void demote_incompat_holders(struct gfs2_glock *gl,
*/
if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
return;
+ if (gh == current_gh)
+ continue;
if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags) &&
- !may_grant(gl, new_gh, gh)) {
+ !may_grant(gl, current_gh, gh)) {
/*
* We should not recurse into do_promote because
* __gfs2_glock_dq only calls handle_callback,
@@ -478,8 +483,7 @@ find_first_strong_holder(struct gfs2_glock *gl)
* gfs2_instantiate - Call the glops instantiate function
* @gh: The glock holder
*
- * Returns: 0 if instantiate was successful, 2 if type specific operation is
- * underway, or error.
+ * Returns: 0 if instantiate was successful, or error.
*/
int gfs2_instantiate(struct gfs2_holder *gh)
{
@@ -489,7 +493,7 @@ int gfs2_instantiate(struct gfs2_holder *gh)
again:
if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags))
- return 0;
+ goto done;
/*
* Since we unlock the lockref lock, we set a flag to indicate
@@ -508,78 +512,55 @@ again:
goto again;
}
- ret = glops->go_instantiate(gh);
+ ret = glops->go_instantiate(gl);
if (!ret)
clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
- return ret;
+ if (ret)
+ return ret;
+
+done:
+ if (glops->go_held)
+ return glops->go_held(gh);
+ return 0;
}
/**
* do_promote - promote as many requests as possible on the current queue
* @gl: The glock
*
- * Returns: 1 if there is a blocked holder at the head of the list, or 2
- * if a type specific operation is underway.
+ * Returns: 1 if there is a blocked holder at the head of the list
*/
static int do_promote(struct gfs2_glock *gl)
-__releases(&gl->gl_lockref.lock)
-__acquires(&gl->gl_lockref.lock)
{
- struct gfs2_holder *gh, *tmp, *first_gh;
+ struct gfs2_holder *gh, *current_gh;
bool incompat_holders_demoted = false;
- bool lock_released;
- int ret;
-restart:
- first_gh = find_first_strong_holder(gl);
- list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
- lock_released = false;
+ current_gh = find_first_strong_holder(gl);
+ list_for_each_entry(gh, &gl->gl_holders, gh_list) {
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
continue;
- if (!may_grant(gl, first_gh, gh)) {
+ if (!may_grant(gl, current_gh, gh)) {
/*
- * If we get here, it means we may not grant this holder for
- * some reason. If this holder is the head of the list, it
- * means we have a blocked holder at the head, so return 1.
+ * If we get here, it means we may not grant this
+ * holder for some reason. If this holder is at the
+ * head of the list, it means we have a blocked holder
+ * at the head, so return 1.
*/
if (list_is_first(&gh->gh_list, &gl->gl_holders))
return 1;
do_error(gl, 0);
break;
}
- if (!incompat_holders_demoted) {
- demote_incompat_holders(gl, first_gh);
- incompat_holders_demoted = true;
- first_gh = gh;
- }
- if (test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags) &&
- !(gh->gh_flags & GL_SKIP) && gl->gl_ops->go_instantiate) {
- lock_released = true;
- spin_unlock(&gl->gl_lockref.lock);
- ret = gfs2_instantiate(gh);
- spin_lock(&gl->gl_lockref.lock);
- if (ret) {
- if (ret == 1)
- return 2;
- gh->gh_error = ret;
- list_del_init(&gh->gh_list);
- trace_gfs2_glock_queue(gh, 0);
- gfs2_holder_wake(gh);
- goto restart;
- }
- }
set_bit(HIF_HOLDER, &gh->gh_iflags);
trace_gfs2_promote(gh);
gfs2_holder_wake(gh);
- /*
- * If we released the gl_lockref.lock the holders list may have
- * changed. For that reason, we start again at the start of
- * the holders queue.
- */
- if (lock_released)
- goto restart;
+ if (!incompat_holders_demoted) {
+ current_gh = gh;
+ demote_incompat_holders(gl, current_gh);
+ incompat_holders_demoted = true;
+ }
}
return 0;
}
@@ -657,7 +638,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_holder *gh;
unsigned state = ret & LM_OUT_ST_MASK;
- int rv;
spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state);
@@ -715,6 +695,8 @@ retry:
gfs2_demote_wake(gl);
if (state != LM_ST_UNLOCKED) {
if (glops->go_xmote_bh) {
+ int rv;
+
spin_unlock(&gl->gl_lockref.lock);
rv = glops->go_xmote_bh(gl);
spin_lock(&gl->gl_lockref.lock);
@@ -723,13 +705,10 @@ retry:
goto out;
}
}
- rv = do_promote(gl);
- if (rv == 2)
- goto out_locked;
+ do_promote(gl);
}
out:
clear_bit(GLF_LOCK, &gl->gl_flags);
-out_locked:
spin_unlock(&gl->gl_lockref.lock);
}
@@ -886,7 +865,6 @@ __releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
struct gfs2_holder *gh = NULL;
- int ret;
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
return;
@@ -905,18 +883,14 @@ __acquires(&gl->gl_lockref.lock)
} else {
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
gfs2_demote_wake(gl);
- ret = do_promote(gl);
- if (ret == 0)
+ if (do_promote(gl) == 0)
goto out_unlock;
- if (ret == 2)
- goto out;
gh = find_first_waiter(gl);
gl->gl_target = gh->gh_state;
if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
do_error(gl, 0); /* Fail queued try locks */
}
do_xmote(gl, gh, gl->gl_target);
-out:
return;
out_sched:
@@ -1314,6 +1288,25 @@ static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
}
/**
+ * gfs2_glock_holder_ready - holder is ready and its error code can be collected
+ * @gh: the glock holder
+ *
+ * Called when a glock holder no longer needs to be waited for because it is
+ * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has
+ * failed (gh_error != 0).
+ */
+
+int gfs2_glock_holder_ready(struct gfs2_holder *gh)
+{
+ if (gh->gh_error || (gh->gh_flags & GL_SKIP))
+ return gh->gh_error;
+ gh->gh_error = gfs2_instantiate(gh);
+ if (gh->gh_error)
+ gfs2_glock_dq(gh);
+ return gh->gh_error;
+}
+
+/**
* gfs2_glock_wait - wait on a glock acquisition
* @gh: the glock holder
*
@@ -1327,7 +1320,7 @@ int gfs2_glock_wait(struct gfs2_holder *gh)
might_sleep();
wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
gfs2_glock_update_hold_time(gh->gh_gl, start_time);
- return gh->gh_error;
+ return gfs2_glock_holder_ready(gh);
}
static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
@@ -1355,7 +1348,6 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
int i, ret = 0, timeout = 0;
unsigned long start_time = jiffies;
- bool keep_waiting;
might_sleep();
/*
@@ -1365,53 +1357,33 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
for (i = 0; i < num_gh; i++)
timeout += ghs[i].gh_gl->gl_hold_time << 1;
-wait_for_dlm:
if (!wait_event_timeout(sdp->sd_async_glock_wait,
- !glocks_pending(num_gh, ghs), timeout))
+ !glocks_pending(num_gh, ghs), timeout)) {
ret = -ESTALE; /* request timed out. */
+ goto out;
+ }
- /*
- * If dlm granted all our requests, we need to adjust the glock
- * minimum hold time values according to how long we waited.
- *
- * If our request timed out, we need to repeatedly release any held
- * glocks we acquired thus far to allow dlm to acquire the remaining
- * glocks without deadlocking. We cannot currently cancel outstanding
- * glock acquisitions.
- *
- * The HIF_WAIT bit tells us which requests still need a response from
- * dlm.
- *
- * If dlm sent us any errors, we return the first error we find.
- */
- keep_waiting = false;
for (i = 0; i < num_gh; i++) {
- /* Skip holders we have already dequeued below. */
- if (!gfs2_holder_queued(&ghs[i]))
- continue;
- /* Skip holders with a pending DLM response. */
- if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
- keep_waiting = true;
- continue;
- }
+ struct gfs2_holder *gh = &ghs[i];
+ int ret2;
- if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
- if (ret == -ESTALE)
- gfs2_glock_dq(&ghs[i]);
- else
- gfs2_glock_update_hold_time(ghs[i].gh_gl,
- start_time);
+ if (test_bit(HIF_HOLDER, &gh->gh_iflags)) {
+ gfs2_glock_update_hold_time(gh->gh_gl,
+ start_time);
}
+ ret2 = gfs2_glock_holder_ready(gh);
if (!ret)
- ret = ghs[i].gh_error;
+ ret = ret2;
}
- if (keep_waiting)
- goto wait_for_dlm;
+out:
+ if (ret) {
+ for (i = 0; i < num_gh; i++) {
+ struct gfs2_holder *gh = &ghs[i];
- /*
- * At this point, we've either acquired all locks or released them all.
- */
+ gfs2_glock_dq(gh);
+ }
+ }
return ret;
}
@@ -1490,10 +1462,10 @@ __acquires(&gl->gl_lockref.lock)
if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
if (test_bit(GLF_LOCK, &gl->gl_flags)) {
- struct gfs2_holder *first_gh;
+ struct gfs2_holder *current_gh;
- first_gh = find_first_strong_holder(gl);
- try_futile = !may_grant(gl, first_gh, gh);
+ current_gh = find_first_strong_holder(gl);
+ try_futile = !may_grant(gl, current_gh, gh);
}
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
goto fail;
@@ -1779,7 +1751,7 @@ static int glock_compare(const void *arg_a, const void *arg_b)
}
/**
- * nq_m_sync - synchonously acquire more than one glock in deadlock free order
+ * nq_m_sync - synchronously acquire more than one glock in deadlock free order
* @num_gh: the number of structures
* @ghs: an array of struct gfs2_holder structures
* @p: placeholder for the holder structure to pass back
@@ -1800,8 +1772,6 @@ static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
for (x = 0; x < num_gh; x++) {
- p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
-
error = gfs2_glock_nq(p[x]);
if (error) {
while (x--)
@@ -1818,7 +1788,6 @@ static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
* @num_gh: the number of structures
* @ghs: an array of struct gfs2_holder structures
*
- *
* Returns: 0 on success (all glocks acquired),
* errno on failure (no glocks acquired)
*/
@@ -1833,7 +1802,6 @@ int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
case 0:
return 0;
case 1:
- ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
return gfs2_glock_nq(ghs);
default:
if (num_gh <= 4)
@@ -2245,20 +2213,6 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
glock_hash_walk(dump_glock_func, sdp);
}
-void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
-{
- struct gfs2_glock *gl = ip->i_gl;
- int ret;
-
- ret = gfs2_truncatei_resume(ip);
- gfs2_glock_assert_withdraw(gl, ret == 0);
-
- spin_lock(&gl->gl_lockref.lock);
- clear_bit(GLF_LOCK, &gl->gl_flags);
- run_queue(gl, 1);
- spin_unlock(&gl->gl_lockref.lock);
-}
-
static const char *state2str(unsigned state)
{
switch(state) {
@@ -2533,7 +2487,7 @@ int __init gfs2_glock_init(void)
return -ENOMEM;
}
- ret = register_shrinker(&glock_shrinker);
+ ret = register_shrinker(&glock_shrinker, "gfs2-glock");
if (ret) {
destroy_workqueue(gfs2_delete_workqueue);
destroy_workqueue(glock_workqueue);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c0ae9100a0bc..5aed8b500cf5 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -213,6 +213,7 @@ extern void gfs2_holder_uninit(struct gfs2_holder *gh);
extern int gfs2_glock_nq(struct gfs2_holder *gh);
extern int gfs2_glock_poll(struct gfs2_holder *gh);
extern int gfs2_instantiate(struct gfs2_holder *gh);
+extern int gfs2_glock_holder_ready(struct gfs2_holder *gh);
extern int gfs2_glock_wait(struct gfs2_holder *gh);
extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
extern void gfs2_glock_dq(struct gfs2_holder *gh);
@@ -273,7 +274,6 @@ extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
-extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
extern void gfs2_glock_free(struct gfs2_glock *gl);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 392800f082a6..49210a2e7ce7 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -485,35 +485,33 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
* Returns: errno
*/
-static int inode_go_instantiate(struct gfs2_holder *gh)
+static int inode_go_instantiate(struct gfs2_glock *gl)
+{
+ struct gfs2_inode *ip = gl->gl_object;
+
+ if (!ip) /* no inode to populate - read it in later */
+ return 0;
+
+ return gfs2_inode_refresh(ip);
+}
+
+static int inode_go_held(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_inode *ip = gl->gl_object;
int error = 0;
if (!ip) /* no inode to populate - read it in later */
- goto out;
-
- error = gfs2_inode_refresh(ip);
- if (error)
- goto out;
+ return 0;
if (gh->gh_state != LM_ST_DEFERRED)
inode_dio_wait(&ip->i_inode);
if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
(gl->gl_state == LM_ST_EXCLUSIVE) &&
- (gh->gh_state == LM_ST_EXCLUSIVE)) {
- spin_lock(&sdp->sd_trunc_lock);
- if (list_empty(&ip->i_trunc_list))
- list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
- spin_unlock(&sdp->sd_trunc_lock);
- wake_up(&sdp->sd_quota_wait);
- error = 1;
- }
+ (gh->gh_state == LM_ST_EXCLUSIVE))
+ error = gfs2_truncatei_resume(ip);
-out:
return error;
}
@@ -737,6 +735,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_inval = inode_go_inval,
.go_demote_ok = inode_go_demote_ok,
.go_instantiate = inode_go_instantiate,
+ .go_held = inode_go_held,
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
.go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 8c00fb389ae5..d09d9892cd05 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -219,7 +219,8 @@ struct gfs2_glock_operations {
int (*go_xmote_bh)(struct gfs2_glock *gl);
void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (const struct gfs2_glock *gl);
- int (*go_instantiate) (struct gfs2_holder *gh);
+ int (*go_instantiate) (struct gfs2_glock *gl);
+ int (*go_held)(struct gfs2_holder *gh);
void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl,
const char *fs_id_buf);
void (*go_callback)(struct gfs2_glock *gl, bool remote);
@@ -396,7 +397,6 @@ struct gfs2_inode {
atomic_t i_sizehint; /* hint of the write size */
struct rw_semaphore i_rw_mutex;
struct list_head i_ordered;
- struct list_head i_trunc_list;
__be64 *i_hash_cache;
u32 i_entries;
u32 i_diskflags;
@@ -784,8 +784,6 @@ struct gfs2_sbd {
struct mutex sd_quota_mutex;
struct mutex sd_quota_sync_mutex;
wait_queue_head_t sd_quota_wait;
- struct list_head sd_trunc_list;
- spinlock_t sd_trunc_lock;
unsigned int sd_quota_slots;
unsigned long *sd_quota_bitmap;
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 2559a79cf14b..6ce369b096d4 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1058,7 +1058,7 @@ restart:
/*
* Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC)
- * to accomodate the largest slot number. (NB dlm slot numbers start at 1,
+ * to accommodate the largest slot number. (NB dlm slot numbers start at 1,
* gfs2 jids start at 0, so jid = slot - 1)
*/
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index eec4159b08aa..723639376ae2 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -131,7 +131,7 @@ __acquires(&sdp->sd_ail_lock)
if (!mapping)
continue;
spin_unlock(&sdp->sd_ail_lock);
- ret = generic_writepages(mapping, wbc);
+ ret = filemap_fdatawrite_wbc(mapping, wbc);
if (need_resched()) {
blk_finish_plug(plug);
cond_resched();
@@ -222,8 +222,7 @@ out:
spin_unlock(&sdp->sd_ail_lock);
blk_finish_plug(&plug);
if (ret) {
- gfs2_lm(sdp, "gfs2_ail1_start_one (generic_writepages) "
- "returned: %d\n", ret);
+ gfs2_lm(sdp, "gfs2_ail1_start_one returned: %d\n", ret);
gfs2_withdraw(sdp);
}
trace_gfs2_ail_flush(sdp, wbc, 0);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 244187e3e70f..14ae9de76277 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -38,7 +38,6 @@ static void gfs2_init_inode_once(void *foo)
inode_init_once(&ip->i_inode);
atomic_set(&ip->i_sizehint, 0);
init_rwsem(&ip->i_rw_mutex);
- INIT_LIST_HEAD(&ip->i_trunc_list);
INIT_LIST_HEAD(&ip->i_ordered);
ip->i_qadata = NULL;
gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
@@ -148,7 +147,7 @@ static int __init init_gfs2_fs(void)
if (!gfs2_trans_cachep)
goto fail_cachep8;
- error = register_shrinker(&gfs2_qd_shrinker);
+ error = register_shrinker(&gfs2_qd_shrinker, "gfs2-qd");
if (error)
goto fail_shrinker;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index c9b423c874a3..549879929c84 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -106,8 +106,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
mutex_init(&sdp->sd_quota_mutex);
mutex_init(&sdp->sd_quota_sync_mutex);
init_waitqueue_head(&sdp->sd_quota_wait);
- INIT_LIST_HEAD(&sdp->sd_trunc_list);
- spin_lock_init(&sdp->sd_trunc_lock);
spin_lock_init(&sdp->sd_bitmap_lock);
INIT_LIST_HEAD(&sdp->sd_sc_inodes_list);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index c98a7faa67d3..f201eaf59d0d 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1517,25 +1517,6 @@ static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
}
}
-static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
-{
- struct gfs2_inode *ip;
-
- while(1) {
- ip = NULL;
- spin_lock(&sdp->sd_trunc_lock);
- if (!list_empty(&sdp->sd_trunc_list)) {
- ip = list_first_entry(&sdp->sd_trunc_list,
- struct gfs2_inode, i_trunc_list);
- list_del_init(&ip->i_trunc_list);
- }
- spin_unlock(&sdp->sd_trunc_lock);
- if (ip == NULL)
- return;
- gfs2_glock_finish_truncate(ip);
- }
-}
-
void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
if (!sdp->sd_statfs_force_sync) {
sdp->sd_statfs_force_sync = 1;
@@ -1558,7 +1539,6 @@ int gfs2_quotad(void *data)
unsigned long quotad_timeo = 0;
unsigned long t = 0;
DEFINE_WAIT(wait);
- int empty;
while (!kthread_should_stop()) {
@@ -1579,19 +1559,13 @@ int gfs2_quotad(void *data)
quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
&quotad_timeo, &tune->gt_quota_quantum);
- /* Check for & recover partially truncated inodes */
- quotad_check_trunc_list(sdp);
-
try_to_freeze();
bypass:
t = min(quotad_timeo, statfs_timeo);
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
- spin_lock(&sdp->sd_trunc_lock);
- empty = list_empty(&sdp->sd_trunc_list);
- spin_unlock(&sdp->sd_trunc_lock);
- if (empty && !sdp->sd_statfs_force_sync)
+ if (!sdp->sd_statfs_force_sync)
t -= schedule_timeout(t);
else
t = 0;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 8a63870eef5a..f602fb844951 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1196,9 +1196,8 @@ static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd)
* Returns: errno
*/
-int gfs2_rgrp_go_instantiate(struct gfs2_holder *gh)
+int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl)
{
- struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_rgrpd *rgd = gl->gl_object;
struct gfs2_sbd *sdp = rgd->rd_sbd;
unsigned int length = rgd->rd_length;
@@ -2720,12 +2719,15 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
* gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
* and initialize an array of glock holders for them
* @rlist: the list of resource groups
+ * @state: the state we're requesting
+ * @flags: the modifier flags
*
* FIXME: Don't use NOFAIL
*
*/
-void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
+void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
+ unsigned int state, u16 flags)
{
unsigned int x;
@@ -2733,8 +2735,8 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
sizeof(struct gfs2_holder),
GFP_NOFS | __GFP_NOFAIL);
for (x = 0; x < rlist->rl_rgrps; x++)
- gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, LM_ST_EXCLUSIVE,
- LM_FLAG_NODE_SCOPE, &rlist->rl_ghs[x]);
+ gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, state, flags,
+ &rlist->rl_ghs[x]);
}
/**
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 46dd94e9e085..00b30cf893af 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -31,7 +31,7 @@ extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
-extern int gfs2_rgrp_go_instantiate(struct gfs2_holder *gh);
+extern int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl);
extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
@@ -64,7 +64,8 @@ struct gfs2_rgrp_list {
extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
u64 block);
-extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist);
+extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
+ unsigned int state, u16 flags);
extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index bdb773e5c88f..b5b0f285b27f 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1196,7 +1196,7 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
gfs2_glock_dq(gh);
return false;
}
- return true;
+ return gfs2_glock_holder_ready(gh) == 0;
}
/**
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 0c5650fe1fd1..f6a66050380e 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1313,7 +1313,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
else
goto out;
- gfs2_rlist_alloc(&rlist);
+ gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE);
for (x = 0; x < rlist.rl_rgrps; x++) {
rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 20336cb3c040..f7a5b5124d8a 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -11,7 +11,6 @@
#include <linux/thread_info.h>
#include <asm/current.h>
-#include <linux/sched/signal.h> /* remove ASAP */
#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/mount.h>
@@ -40,7 +39,6 @@
#include <linux/uaccess.h>
#include <linux/sched/mm.h>
-static const struct super_operations hugetlbfs_ops;
static const struct address_space_operations hugetlbfs_aops;
const struct file_operations hugetlbfs_file_operations;
static const struct inode_operations hugetlbfs_dir_inode_operations;
@@ -284,39 +282,9 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
#endif
-static size_t
-hugetlbfs_read_actor(struct page *page, unsigned long offset,
- struct iov_iter *to, unsigned long size)
-{
- size_t copied = 0;
- int i, chunksize;
-
- /* Find which 4k chunk and offset with in that chunk */
- i = offset >> PAGE_SHIFT;
- offset = offset & ~PAGE_MASK;
-
- while (size) {
- size_t n;
- chunksize = PAGE_SIZE;
- if (offset)
- chunksize -= offset;
- if (chunksize > size)
- chunksize = size;
- n = copy_page_to_iter(&page[i], offset, chunksize, to);
- copied += n;
- if (n != chunksize)
- return copied;
- offset = 0;
- size -= chunksize;
- i++;
- }
- return copied;
-}
-
/*
* Support for read() - Find the page attached to f_mapping and copy out the
- * data. Its *very* similar to generic_file_buffered_read(), we can't use that
- * since it has PAGE_SIZE assumptions.
+ * data. This provides functionality similar to filemap_read().
*/
static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
@@ -363,7 +331,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
/*
* We have the page, copy it to user space buffer.
*/
- copied = hugetlbfs_read_actor(page, offset, to, nr);
+ copied = copy_page_to_iter(page, offset, nr, to);
put_page(page);
}
offset += copied;
@@ -1082,7 +1050,7 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bsize = huge_page_size(h);
if (sbinfo) {
spin_lock(&sbinfo->stat_lock);
- /* If no limits set, just report 0 for max/free/used
+ /* If no limits set, just report 0 or -1 for max/free/used
* blocks, like simple_statfs() */
if (sbinfo->spool) {
long free_pages;
@@ -1309,7 +1277,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
ps = memparse(param->string, &rest);
ctx->hstate = size_to_hstate(ps);
if (!ctx->hstate) {
- pr_err("Unsupported page size %lu MB\n", ps >> 20);
+ pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
return -EINVAL;
}
return 0;
@@ -1385,7 +1353,7 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
/*
* Allocate and initialize subpool if maximum or minimum size is
* specified. Any needed reservations (for minimum size) are taken
- * taken when the subpool is created.
+ * when the subpool is created.
*/
if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
sbinfo->spool = hugepage_new_subpool(ctx->hstate,
@@ -1555,7 +1523,7 @@ static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
}
if (IS_ERR(mnt))
pr_err("Cannot mount internal hugetlbfs for page size %luK",
- huge_page_size(h) >> 10);
+ huge_page_size(h) / SZ_1K);
return mnt;
}
diff --git a/fs/inode.c b/fs/inode.c
index 524ee91f74a6..ba1de23c13c1 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -422,6 +422,7 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->i_io_list);
INIT_LIST_HEAD(&inode->i_wb_list);
INIT_LIST_HEAD(&inode->i_lru);
+ INIT_LIST_HEAD(&inode->i_sb_list);
__address_space_init_once(&inode->i_data);
i_size_ordered_init(inode);
}
@@ -1021,7 +1022,6 @@ struct inode *new_inode_pseudo(struct super_block *sb)
spin_lock(&inode->i_lock);
inode->i_state = 0;
spin_unlock(&inode->i_lock);
- INIT_LIST_HEAD(&inode->i_sb_list);
}
return inode;
}
@@ -1165,7 +1165,6 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
{
struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
struct inode *old;
- bool creating = inode->i_state & I_CREATING;
again:
spin_lock(&inode_hash_lock);
@@ -1199,7 +1198,12 @@ again:
inode->i_state |= I_NEW;
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
- if (!creating)
+
+ /*
+ * Add inode to the sb list if it's not already. It has I_NEW at this
+ * point, so it should be safe to test i_sb_list locklessly.
+ */
+ if (list_empty(&inode->i_sb_list))
inode_sb_list_add(inode);
unlock:
spin_unlock(&inode_hash_lock);
@@ -2014,23 +2018,25 @@ static int __file_remove_privs(struct file *file, unsigned int flags)
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = file_inode(file);
- int error;
+ int error = 0;
int kill;
if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
return 0;
kill = dentry_needs_remove_privs(dentry);
- if (kill <= 0)
+ if (kill < 0)
return kill;
- if (flags & IOCB_NOWAIT)
- return -EAGAIN;
+ if (kill) {
+ if (flags & IOCB_NOWAIT)
+ return -EAGAIN;
+
+ error = __remove_privs(file_mnt_user_ns(file), dentry, kill);
+ }
- error = __remove_privs(file_mnt_user_ns(file), dentry, kill);
if (!error)
inode_has_no_xattr(inode);
-
return error;
}
@@ -2326,10 +2332,6 @@ void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode,
/* Directories are special, and always inherit S_ISGID */
if (S_ISDIR(mode))
mode |= S_ISGID;
- else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
- !in_group_p(i_gid_into_mnt(mnt_userns, dir)) &&
- !capable_wrt_inode_uidgid(mnt_userns, dir, CAP_FSETID))
- mode &= ~S_ISGID;
} else
inode_fsgid_set(inode, mnt_userns);
inode->i_mode = mode;
@@ -2485,3 +2487,33 @@ struct timespec64 current_time(struct inode *inode)
return timestamp_truncate(now, inode);
}
EXPORT_SYMBOL(current_time);
+
+/**
+ * mode_strip_sgid - handle the sgid bit for non-directories
+ * @mnt_userns: User namespace of the mount the inode was created from
+ * @dir: parent directory inode
+ * @mode: mode of the file to be created in @dir
+ *
+ * If the @mode of the new file has both the S_ISGID and S_IXGRP bit
+ * raised and @dir has the S_ISGID bit raised ensure that the caller is
+ * either in the group of the parent directory or they have CAP_FSETID
+ * in their user namespace and are privileged over the parent directory.
+ * In all other cases, strip the S_ISGID bit from @mode.
+ *
+ * Return: the new mode to use for the file
+ */
+umode_t mode_strip_sgid(struct user_namespace *mnt_userns,
+ const struct inode *dir, umode_t mode)
+{
+ if ((mode & (S_ISGID | S_IXGRP)) != (S_ISGID | S_IXGRP))
+ return mode;
+ if (S_ISDIR(mode) || !dir || !(dir->i_mode & S_ISGID))
+ return mode;
+ if (in_group_p(i_gid_into_mnt(mnt_userns, dir)))
+ return mode;
+ if (capable_wrt_inode_uidgid(mnt_userns, dir, CAP_FSETID))
+ return mode;
+
+ return mode & ~S_ISGID;
+}
+EXPORT_SYMBOL(mode_strip_sgid);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 2b82c7f1de88..ca5c62901541 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1529,21 +1529,6 @@ unlock:
}
int
-iomap_writepage(struct page *page, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops)
-{
- int ret;
-
- wpc->ops = ops;
- ret = iomap_do_writepage(page, wbc, wpc);
- if (!wpc->ioend)
- return ret;
- return iomap_submit_ioend(wpc, wpc->ioend, ret);
-}
-EXPORT_SYMBOL_GPL(iomap_writepage);
-
-int
iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
struct iomap_writepage_ctx *wpc,
const struct iomap_writeback_ops *ops)
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index c75d33d5c3ce..4eb559a16c9e 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -533,7 +533,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_NOWAIT;
}
- if (iter_is_iovec(iter))
+ if (user_backed_iter(iter))
dio->flags |= IOMAP_DIO_DIRTY;
} else {
iomi.flags |= IOMAP_WRITE;
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 746132998c57..51bd38da21cd 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -203,7 +203,7 @@ int jbd2_log_do_checkpoint(journal_t *journal)
tid_t this_tid;
int result, batch_count = 0;
- jbd_debug(1, "Start checkpoint\n");
+ jbd2_debug(1, "Start checkpoint\n");
/*
* First thing: if there are any transactions in the log which
@@ -212,7 +212,7 @@ int jbd2_log_do_checkpoint(journal_t *journal)
*/
result = jbd2_cleanup_journal_tail(journal);
trace_jbd2_checkpoint(journal, result);
- jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
+ jbd2_debug(1, "cleanup_journal_tail returned %d\n", result);
if (result <= 0)
return result;
@@ -804,5 +804,5 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact
trace_jbd2_drop_transaction(journal, transaction);
- jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
+ jbd2_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 890b5543a1c5..b2b2bc9b88d9 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -421,7 +421,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
/* Do we need to erase the effects of a prior jbd2_journal_flush? */
if (journal->j_flags & JBD2_FLUSHED) {
- jbd_debug(3, "super block updated\n");
+ jbd2_debug(3, "super block updated\n");
mutex_lock_io(&journal->j_checkpoint_mutex);
/*
* We hold j_checkpoint_mutex so tail cannot change under us.
@@ -435,7 +435,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
REQ_SYNC);
mutex_unlock(&journal->j_checkpoint_mutex);
} else {
- jbd_debug(3, "superblock not updated\n");
+ jbd2_debug(3, "superblock not updated\n");
}
J_ASSERT(journal->j_running_transaction != NULL);
@@ -467,7 +467,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
commit_transaction = journal->j_running_transaction;
trace_jbd2_start_commit(journal, commit_transaction);
- jbd_debug(1, "JBD2: starting commit of transaction %d\n",
+ jbd2_debug(1, "JBD2: starting commit of transaction %d\n",
commit_transaction->t_tid);
write_lock(&journal->j_state_lock);
@@ -540,7 +540,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
__jbd2_journal_clean_checkpoint_list(journal, false);
spin_unlock(&journal->j_list_lock);
- jbd_debug(3, "JBD2: commit phase 1\n");
+ jbd2_debug(3, "JBD2: commit phase 1\n");
/*
* Clear revoked flag to reflect there is no revoked buffers
@@ -553,13 +553,13 @@ void jbd2_journal_commit_transaction(journal_t *journal)
*/
jbd2_journal_switch_revoke_table(journal);
+ write_lock(&journal->j_state_lock);
/*
* Reserved credits cannot be claimed anymore, free them
*/
atomic_sub(atomic_read(&journal->j_reserved_credits),
&commit_transaction->t_outstanding_credits);
- write_lock(&journal->j_state_lock);
trace_jbd2_commit_flushing(journal, commit_transaction);
stats.run.rs_flushing = jiffies;
stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
@@ -573,7 +573,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
wake_up(&journal->j_wait_transaction_locked);
write_unlock(&journal->j_state_lock);
- jbd_debug(3, "JBD2: commit phase 2a\n");
+ jbd2_debug(3, "JBD2: commit phase 2a\n");
/*
* Now start flushing things to disk, in the order they appear
@@ -586,7 +586,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
blk_start_plug(&plug);
jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
- jbd_debug(3, "JBD2: commit phase 2b\n");
+ jbd2_debug(3, "JBD2: commit phase 2b\n");
/*
* Way to go: we have now written out all of the data for a
@@ -642,7 +642,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (!descriptor) {
J_ASSERT (bufs == 0);
- jbd_debug(4, "JBD2: get descriptor\n");
+ jbd2_debug(4, "JBD2: get descriptor\n");
descriptor = jbd2_journal_get_descriptor_buffer(
commit_transaction,
@@ -652,7 +652,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
continue;
}
- jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
+ jbd2_debug(4, "JBD2: got buffer %llu (%p)\n",
(unsigned long long)descriptor->b_blocknr,
descriptor->b_data);
tagp = &descriptor->b_data[sizeof(journal_header_t)];
@@ -737,7 +737,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
commit_transaction->t_buffers == NULL ||
space_left < tag_bytes + 16 + csum_size) {
- jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
+ jbd2_debug(4, "JBD2: Submit %d IOs\n", bufs);
/* Write an end-of-descriptor marker before
submitting the IOs. "tag" still points to
@@ -839,7 +839,7 @@ start_journal_io:
so we incur less scheduling load.
*/
- jbd_debug(3, "JBD2: commit phase 3\n");
+ jbd2_debug(3, "JBD2: commit phase 3\n");
while (!list_empty(&io_bufs)) {
struct buffer_head *bh = list_entry(io_bufs.prev,
@@ -882,7 +882,7 @@ start_journal_io:
J_ASSERT (commit_transaction->t_shadow_list == NULL);
- jbd_debug(3, "JBD2: commit phase 4\n");
+ jbd2_debug(3, "JBD2: commit phase 4\n");
/* Here we wait for the revoke record and descriptor record buffers */
while (!list_empty(&log_bufs)) {
@@ -906,7 +906,7 @@ start_journal_io:
if (err)
jbd2_journal_abort(journal, err);
- jbd_debug(3, "JBD2: commit phase 5\n");
+ jbd2_debug(3, "JBD2: commit phase 5\n");
write_lock(&journal->j_state_lock);
J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
commit_transaction->t_state = T_COMMIT_JFLUSH;
@@ -945,7 +945,7 @@ start_journal_io:
transaction can be removed from any checkpoint list it was on
before. */
- jbd_debug(3, "JBD2: commit phase 6\n");
+ jbd2_debug(3, "JBD2: commit phase 6\n");
J_ASSERT(list_empty(&commit_transaction->t_inode_list));
J_ASSERT(commit_transaction->t_buffers == NULL);
@@ -1122,7 +1122,7 @@ restart_loop:
/* Done with this transaction! */
- jbd_debug(3, "JBD2: commit phase 7\n");
+ jbd2_debug(3, "JBD2: commit phase 7\n");
J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
@@ -1164,7 +1164,7 @@ restart_loop:
journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
trace_jbd2_end_commit(journal, commit_transaction);
- jbd_debug(1, "JBD2: commit %d complete, head %d\n",
+ jbd2_debug(1, "JBD2: commit %d complete, head %d\n",
journal->j_commit_sequence, journal->j_tail_sequence);
write_lock(&journal->j_state_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 2a1b9da7c3e3..6350d3857c89 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -49,8 +49,7 @@
#include <asm/page.h>
#ifdef CONFIG_JBD2_DEBUG
-ushort jbd2_journal_enable_debug __read_mostly;
-EXPORT_SYMBOL(jbd2_journal_enable_debug);
+static ushort jbd2_journal_enable_debug __read_mostly;
module_param_named(jbd2_debug, jbd2_journal_enable_debug, ushort, 0644);
MODULE_PARM_DESC(jbd2_debug, "Debugging level for jbd2");
@@ -81,7 +80,6 @@ EXPORT_SYMBOL(jbd2_journal_errno);
EXPORT_SYMBOL(jbd2_journal_ack_err);
EXPORT_SYMBOL(jbd2_journal_clear_err);
EXPORT_SYMBOL(jbd2_log_wait_commit);
-EXPORT_SYMBOL(jbd2_log_start_commit);
EXPORT_SYMBOL(jbd2_journal_start_commit);
EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
EXPORT_SYMBOL(jbd2_journal_wipe);
@@ -115,7 +113,6 @@ void __jbd2_debug(int level, const char *file, const char *func,
printk(KERN_DEBUG "%s: (%s, %u): %pV", file, func, line, &vaf);
va_end(args);
}
-EXPORT_SYMBOL(__jbd2_debug);
#endif
/* Checksumming functions */
@@ -203,11 +200,11 @@ loop:
if (journal->j_flags & JBD2_UNMOUNT)
goto end_loop;
- jbd_debug(1, "commit_sequence=%u, commit_request=%u\n",
+ jbd2_debug(1, "commit_sequence=%u, commit_request=%u\n",
journal->j_commit_sequence, journal->j_commit_request);
if (journal->j_commit_sequence != journal->j_commit_request) {
- jbd_debug(1, "OK, requests differ\n");
+ jbd2_debug(1, "OK, requests differ\n");
write_unlock(&journal->j_state_lock);
del_timer_sync(&journal->j_commit_timer);
jbd2_journal_commit_transaction(journal);
@@ -222,7 +219,7 @@ loop:
* good idea, because that depends on threads that may
* be already stopped.
*/
- jbd_debug(1, "Now suspending kjournald2\n");
+ jbd2_debug(1, "Now suspending kjournald2\n");
write_unlock(&journal->j_state_lock);
try_to_freeze();
write_lock(&journal->j_state_lock);
@@ -252,7 +249,7 @@ loop:
finish_wait(&journal->j_wait_commit, &wait);
}
- jbd_debug(1, "kjournald2 wakes\n");
+ jbd2_debug(1, "kjournald2 wakes\n");
/*
* Were we woken up by a commit wakeup event?
@@ -260,7 +257,7 @@ loop:
transaction = journal->j_running_transaction;
if (transaction && time_after_eq(jiffies, transaction->t_expires)) {
journal->j_commit_request = transaction->t_tid;
- jbd_debug(1, "woke because of timeout\n");
+ jbd2_debug(1, "woke because of timeout\n");
}
goto loop;
@@ -268,7 +265,7 @@ end_loop:
del_timer_sync(&journal->j_commit_timer);
journal->j_task = NULL;
wake_up(&journal->j_wait_done_commit);
- jbd_debug(1, "Journal thread exiting.\n");
+ jbd2_debug(1, "Journal thread exiting.\n");
write_unlock(&journal->j_state_lock);
return 0;
}
@@ -481,7 +478,7 @@ repeat:
* Called with j_state_lock locked for writing.
* Returns true if a transaction commit was started.
*/
-int __jbd2_log_start_commit(journal_t *journal, tid_t target)
+static int __jbd2_log_start_commit(journal_t *journal, tid_t target)
{
/* Return if the txn has already requested to be committed */
if (journal->j_commit_request == target)
@@ -500,7 +497,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t target)
*/
journal->j_commit_request = target;
- jbd_debug(1, "JBD2: requesting commit %u/%u\n",
+ jbd2_debug(1, "JBD2: requesting commit %u/%u\n",
journal->j_commit_request,
journal->j_commit_sequence);
journal->j_running_transaction->t_requested = jiffies;
@@ -705,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
}
#endif
while (tid_gt(tid, journal->j_commit_sequence)) {
- jbd_debug(1, "JBD2: want %u, j_commit_sequence=%u\n",
+ jbd2_debug(1, "JBD2: want %u, j_commit_sequence=%u\n",
tid, journal->j_commit_sequence);
read_unlock(&journal->j_state_lock);
wake_up(&journal->j_wait_commit);
@@ -1117,7 +1114,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
freed += journal->j_last - journal->j_first;
trace_jbd2_update_log_tail(journal, tid, block, freed);
- jbd_debug(1,
+ jbd2_debug(1,
"Cleaning journal tail from %u to %u (offset %lu), "
"freeing %lu\n",
journal->j_tail_sequence, tid, block, freed);
@@ -1418,7 +1415,8 @@ static journal_t *journal_init_common(struct block_device *bdev,
if (percpu_counter_init(&journal->j_checkpoint_jh_count, 0, GFP_KERNEL))
goto err_cleanup;
- if (register_shrinker(&journal->j_shrinker)) {
+ if (register_shrinker(&journal->j_shrinker, "jbd2-journal:(%u:%u)",
+ MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev))) {
percpu_counter_destroy(&journal->j_checkpoint_jh_count);
goto err_cleanup;
}
@@ -1497,7 +1495,7 @@ journal_t *jbd2_journal_init_inode(struct inode *inode)
return NULL;
}
- jbd_debug(1, "JBD2: inode %s/%ld, size %lld, bits %d, blksize %ld\n",
+ jbd2_debug(1, "JBD2: inode %s/%ld, size %lld, bits %d, blksize %ld\n",
inode->i_sb->s_id, inode->i_ino, (long long) inode->i_size,
inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
@@ -1577,7 +1575,7 @@ static int journal_reset(journal_t *journal)
* attempting a write to a potential-readonly device.
*/
if (sb->s_start == 0) {
- jbd_debug(1, "JBD2: Skipping superblock update on recovered sb "
+ jbd2_debug(1, "JBD2: Skipping superblock update on recovered sb "
"(start %ld, seq %u, errno %d)\n",
journal->j_tail, journal->j_tail_sequence,
journal->j_errno);
@@ -1681,7 +1679,7 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
}
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
- jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
+ jbd2_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
tail_block, tail_tid);
lock_buffer(journal->j_sb_buffer);
@@ -1722,7 +1720,7 @@ static void jbd2_mark_journal_empty(journal_t *journal, blk_opf_t write_flags)
return;
}
- jbd_debug(1, "JBD2: Marking journal as empty (seq %u)\n",
+ jbd2_debug(1, "JBD2: Marking journal as empty (seq %u)\n",
journal->j_tail_sequence);
sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
@@ -1865,7 +1863,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
errcode = journal->j_errno;
if (errcode == -ESHUTDOWN)
errcode = 0;
- jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
+ jbd2_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
sb->s_errno = cpu_to_be32(errcode);
jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA);
@@ -2337,7 +2335,7 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
compat & JBD2_FEATURE_COMPAT_CHECKSUM)
compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
- jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
+ jbd2_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
compat, ro, incompat);
sb = journal->j_superblock;
@@ -2406,7 +2404,7 @@ void jbd2_journal_clear_features(journal_t *journal, unsigned long compat,
{
journal_superblock_t *sb;
- jbd_debug(1, "Clear features 0x%lx/0x%lx/0x%lx\n",
+ jbd2_debug(1, "Clear features 0x%lx/0x%lx/0x%lx\n",
compat, ro, incompat);
sb = journal->j_superblock;
@@ -2863,7 +2861,7 @@ static struct journal_head *journal_alloc_journal_head(void)
#endif
ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
if (!ret) {
- jbd_debug(1, "out of memory for journal_head\n");
+ jbd2_debug(1, "out of memory for journal_head\n");
pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
ret = kmem_cache_zalloc(jbd2_journal_head_cache,
GFP_NOFS | __GFP_NOFAIL);
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index e699d6ab2c0e..f548479615c6 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -245,11 +245,11 @@ static int fc_do_one_pass(journal_t *journal,
return 0;
while (next_fc_block <= journal->j_fc_last) {
- jbd_debug(3, "Fast commit replay: next block %ld\n",
+ jbd2_debug(3, "Fast commit replay: next block %ld\n",
next_fc_block);
err = jread(&bh, journal, next_fc_block);
if (err) {
- jbd_debug(3, "Fast commit replay: read error\n");
+ jbd2_debug(3, "Fast commit replay: read error\n");
break;
}
@@ -263,7 +263,7 @@ static int fc_do_one_pass(journal_t *journal,
}
if (err)
- jbd_debug(3, "Fast commit replay failed, err = %d\n", err);
+ jbd2_debug(3, "Fast commit replay failed, err = %d\n", err);
return err;
}
@@ -297,7 +297,7 @@ int jbd2_journal_recover(journal_t *journal)
*/
if (!sb->s_start) {
- jbd_debug(1, "No recovery required, last transaction %d\n",
+ jbd2_debug(1, "No recovery required, last transaction %d\n",
be32_to_cpu(sb->s_sequence));
journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
return 0;
@@ -309,10 +309,10 @@ int jbd2_journal_recover(journal_t *journal)
if (!err)
err = do_one_pass(journal, &info, PASS_REPLAY);
- jbd_debug(1, "JBD2: recovery, exit status %d, "
+ jbd2_debug(1, "JBD2: recovery, exit status %d, "
"recovered transactions %u to %u\n",
err, info.start_transaction, info.end_transaction);
- jbd_debug(1, "JBD2: Replayed %d and revoked %d/%d blocks\n",
+ jbd2_debug(1, "JBD2: Replayed %d and revoked %d/%d blocks\n",
info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
/* Restart the log at the next transaction ID, thus invalidating
@@ -362,7 +362,7 @@ int jbd2_journal_skip_recovery(journal_t *journal)
#ifdef CONFIG_JBD2_DEBUG
int dropped = info.end_transaction -
be32_to_cpu(journal->j_superblock->s_sequence);
- jbd_debug(1,
+ jbd2_debug(1,
"JBD2: ignoring %d transaction%s from the journal.\n",
dropped, (dropped == 1) ? "" : "s");
#endif
@@ -484,7 +484,7 @@ static int do_one_pass(journal_t *journal,
if (pass == PASS_SCAN)
info->start_transaction = first_commit_ID;
- jbd_debug(1, "Starting recovery pass %d\n", pass);
+ jbd2_debug(1, "Starting recovery pass %d\n", pass);
/*
* Now we walk through the log, transaction by transaction,
@@ -510,7 +510,7 @@ static int do_one_pass(journal_t *journal,
if (tid_geq(next_commit_ID, info->end_transaction))
break;
- jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
+ jbd2_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
next_commit_ID, next_log_block,
jbd2_has_feature_fast_commit(journal) ?
journal->j_fc_last : journal->j_last);
@@ -519,7 +519,7 @@ static int do_one_pass(journal_t *journal,
* either the next descriptor block or the final commit
* record. */
- jbd_debug(3, "JBD2: checking block %ld\n", next_log_block);
+ jbd2_debug(3, "JBD2: checking block %ld\n", next_log_block);
err = jread(&bh, journal, next_log_block);
if (err)
goto failed;
@@ -542,7 +542,7 @@ static int do_one_pass(journal_t *journal,
blocktype = be32_to_cpu(tmp->h_blocktype);
sequence = be32_to_cpu(tmp->h_sequence);
- jbd_debug(3, "Found magic %d, sequence %d\n",
+ jbd2_debug(3, "Found magic %d, sequence %d\n",
blocktype, sequence);
if (sequence != next_commit_ID) {
@@ -575,7 +575,7 @@ static int do_one_pass(journal_t *journal,
goto failed;
}
need_check_commit_time = true;
- jbd_debug(1,
+ jbd2_debug(1,
"invalid descriptor block found in %lu\n",
next_log_block);
}
@@ -758,7 +758,7 @@ static int do_one_pass(journal_t *journal,
* It likely does not belong to same journal,
* just end this recovery with success.
*/
- jbd_debug(1, "JBD2: Invalid checksum ignored in transaction %u, likely stale data\n",
+ jbd2_debug(1, "JBD2: Invalid checksum ignored in transaction %u, likely stale data\n",
next_commit_ID);
brelse(bh);
goto done;
@@ -826,7 +826,7 @@ static int do_one_pass(journal_t *journal,
if (pass == PASS_SCAN &&
!jbd2_descriptor_block_csum_verify(journal,
bh->b_data)) {
- jbd_debug(1, "JBD2: invalid revoke block found in %lu\n",
+ jbd2_debug(1, "JBD2: invalid revoke block found in %lu\n",
next_log_block);
need_check_commit_time = true;
}
@@ -845,7 +845,7 @@ static int do_one_pass(journal_t *journal,
continue;
default:
- jbd_debug(3, "Unrecognised magic %d, end of scan.\n",
+ jbd2_debug(3, "Unrecognised magic %d, end of scan.\n",
blocktype);
brelse(bh);
goto done;
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index fa608788b93d..4556e4689024 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -398,7 +398,7 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
}
handle->h_revoke_credits--;
- jbd_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in);
+ jbd2_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in);
err = insert_revoke_hash(journal, blocknr,
handle->h_transaction->t_tid);
BUFFER_TRACE(bh_in, "exit");
@@ -428,7 +428,7 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
int did_revoke = 0; /* akpm: debug */
struct buffer_head *bh = jh2bh(jh);
- jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);
+ jbd2_debug(4, "journal_head %p, cancelling revoke\n", jh);
/* Is the existing Revoke bit valid? If so, we trust it, and
* only perform the full cancel if the revoke bit is set. If
@@ -444,7 +444,7 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
if (need_cancel) {
record = find_revoke_record(journal, bh->b_blocknr);
if (record) {
- jbd_debug(4, "cancelled existing revoke on "
+ jbd2_debug(4, "cancelled existing revoke on "
"blocknr %llu\n", (unsigned long long)bh->b_blocknr);
spin_lock(&journal->j_revoke_lock);
list_del(&record->hash);
@@ -560,7 +560,7 @@ void jbd2_journal_write_revoke_records(transaction_t *transaction,
}
if (descriptor)
flush_descriptor(journal, descriptor, offset);
- jbd_debug(1, "Wrote %d revoke records\n", count);
+ jbd2_debug(1, "Wrote %d revoke records\n", count);
}
/*
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index e9c308ae475f..e1be93ccd81c 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -373,7 +373,7 @@ alloc_transaction:
return -ENOMEM;
}
- jbd_debug(3, "New handle %p going live.\n", handle);
+ jbd2_debug(3, "New handle %p going live.\n", handle);
/*
* We need to hold j_state_lock until t_updates has been incremented,
@@ -453,7 +453,7 @@ repeat:
handle->h_start_jiffies = jiffies;
atomic_inc(&transaction->t_updates);
atomic_inc(&transaction->t_handle_count);
- jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
+ jbd2_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
handle, blocks,
atomic_read(&transaction->t_outstanding_credits),
jbd2_log_space_left(journal));
@@ -674,7 +674,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
/* Don't extend a locked-down transaction! */
if (transaction->t_state != T_RUNNING) {
- jbd_debug(3, "denied handle %p %d blocks: "
+ jbd2_debug(3, "denied handle %p %d blocks: "
"transaction not running\n", handle, nblocks);
goto error_out;
}
@@ -689,7 +689,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
&transaction->t_outstanding_credits);
if (wanted > journal->j_max_transaction_buffers) {
- jbd_debug(3, "denied handle %p %d blocks: "
+ jbd2_debug(3, "denied handle %p %d blocks: "
"transaction too large\n", handle, nblocks);
atomic_sub(nblocks, &transaction->t_outstanding_credits);
goto error_out;
@@ -707,7 +707,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
handle->h_revoke_credits_requested += revoke_records;
result = 0;
- jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
+ jbd2_debug(3, "extended handle %p by %d\n", handle, nblocks);
error_out:
read_unlock(&journal->j_state_lock);
return result;
@@ -795,7 +795,7 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int revoke_records,
* First unlink the handle from its current transaction, and start the
* commit on that.
*/
- jbd_debug(2, "restarting handle %p\n", handle);
+ jbd2_debug(2, "restarting handle %p\n", handle);
stop_this_handle(handle);
handle->h_transaction = NULL;
@@ -979,7 +979,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
journal = transaction->t_journal;
- jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
+ jbd2_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
JBUFFER_TRACE(jh, "entry");
repeat:
@@ -1271,7 +1271,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh = jbd2_journal_add_journal_head(bh);
int err;
- jbd_debug(5, "journal_head %p\n", jh);
+ jbd2_debug(5, "journal_head %p\n", jh);
err = -EROFS;
if (is_handle_aborted(handle))
goto out;
@@ -1486,8 +1486,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
int ret = 0;
- if (is_handle_aborted(handle))
- return -EROFS;
if (!buffer_jbd(bh))
return -EUCLEAN;
@@ -1496,7 +1494,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* of the running transaction.
*/
jh = bh2jh(bh);
- jbd_debug(5, "journal_head %p\n", jh);
+ jbd2_debug(5, "journal_head %p\n", jh);
JBUFFER_TRACE(jh, "entry");
/*
@@ -1534,6 +1532,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
journal = transaction->t_journal;
spin_lock(&jh->b_state_lock);
+ if (is_handle_aborted(handle)) {
+ /*
+ * Check journal aborting with @jh->b_state_lock locked,
+ * since 'jh->b_transaction' could be replaced with
+ * 'jh->b_next_transaction' during old transaction
+ * committing if journal aborted, which may fail
+ * assertion on 'jh->b_frozen_data == NULL'.
+ */
+ ret = -EROFS;
+ goto out_unlock_bh;
+ }
+
if (jh->b_modified == 0) {
/*
* This buffer's got modified and becoming part
@@ -1818,7 +1828,7 @@ int jbd2_journal_stop(handle_t *handle)
pid_t pid;
if (--handle->h_ref > 0) {
- jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
+ jbd2_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
handle->h_ref);
if (is_handle_aborted(handle))
return -EIO;
@@ -1838,7 +1848,7 @@ int jbd2_journal_stop(handle_t *handle)
if (is_handle_aborted(handle))
err = -EIO;
- jbd_debug(4, "Handle %p going down\n", handle);
+ jbd2_debug(4, "Handle %p going down\n", handle);
trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
tid, handle->h_type, handle->h_line_no,
jiffies - handle->h_start_jiffies,
@@ -1916,7 +1926,7 @@ int jbd2_journal_stop(handle_t *handle)
* completes the commit thread, it just doesn't write
* anything to disk. */
- jbd_debug(2, "transaction too old, requesting commit for "
+ jbd2_debug(2, "transaction too old, requesting commit for "
"handle %p\n", handle);
/* This is non-blocking */
jbd2_log_start_commit(journal, tid);
@@ -2662,7 +2672,7 @@ static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
return -EROFS;
journal = transaction->t_journal;
- jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
+ jbd2_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
transaction->t_tid);
spin_lock(&journal->j_list_lock);
diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
index 1b07550485b9..5d826274570c 100644
--- a/fs/kernel_read_file.c
+++ b/fs/kernel_read_file.c
@@ -29,15 +29,15 @@
* change between calls to kernel_read_file().
*
* Returns number of bytes read (no single read will be bigger
- * than INT_MAX), or negative on error.
+ * than SSIZE_MAX), or negative on error.
*
*/
-int kernel_read_file(struct file *file, loff_t offset, void **buf,
- size_t buf_size, size_t *file_size,
- enum kernel_read_file_id id)
+ssize_t kernel_read_file(struct file *file, loff_t offset, void **buf,
+ size_t buf_size, size_t *file_size,
+ enum kernel_read_file_id id)
{
loff_t i_size, pos;
- size_t copied;
+ ssize_t copied;
void *allocated = NULL;
bool whole_file;
int ret;
@@ -58,7 +58,7 @@ int kernel_read_file(struct file *file, loff_t offset, void **buf,
goto out;
}
/* The file is too big for sane activities. */
- if (i_size > INT_MAX) {
+ if (i_size > SSIZE_MAX) {
ret = -EFBIG;
goto out;
}
@@ -124,12 +124,12 @@ out:
}
EXPORT_SYMBOL_GPL(kernel_read_file);
-int kernel_read_file_from_path(const char *path, loff_t offset, void **buf,
- size_t buf_size, size_t *file_size,
- enum kernel_read_file_id id)
+ssize_t kernel_read_file_from_path(const char *path, loff_t offset, void **buf,
+ size_t buf_size, size_t *file_size,
+ enum kernel_read_file_id id)
{
struct file *file;
- int ret;
+ ssize_t ret;
if (!path || !*path)
return -EINVAL;
@@ -144,14 +144,14 @@ int kernel_read_file_from_path(const char *path, loff_t offset, void **buf,
}
EXPORT_SYMBOL_GPL(kernel_read_file_from_path);
-int kernel_read_file_from_path_initns(const char *path, loff_t offset,
- void **buf, size_t buf_size,
- size_t *file_size,
- enum kernel_read_file_id id)
+ssize_t kernel_read_file_from_path_initns(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id)
{
struct file *file;
struct path root;
- int ret;
+ ssize_t ret;
if (!path || !*path)
return -EINVAL;
@@ -171,12 +171,12 @@ int kernel_read_file_from_path_initns(const char *path, loff_t offset,
}
EXPORT_SYMBOL_GPL(kernel_read_file_from_path_initns);
-int kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
- size_t buf_size, size_t *file_size,
- enum kernel_read_file_id id)
+ssize_t kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
+ size_t buf_size, size_t *file_size,
+ enum kernel_read_file_id id)
{
struct fd f = fdget(fd);
- int ret = -EBADF;
+ ssize_t ret = -EBADF;
if (!f.file || !(f.file->f_mode & FMODE_READ))
goto out;
diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
index 911444d21267..c5a5c7b90d72 100644
--- a/fs/ksmbd/auth.c
+++ b/fs/ksmbd/auth.c
@@ -121,8 +121,8 @@ out:
return rc;
}
-static int calc_ntlmv2_hash(struct ksmbd_session *sess, char *ntlmv2_hash,
- char *dname)
+static int calc_ntlmv2_hash(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ char *ntlmv2_hash, char *dname)
{
int ret, len, conv_len;
wchar_t *domain = NULL;
@@ -158,7 +158,7 @@ static int calc_ntlmv2_hash(struct ksmbd_session *sess, char *ntlmv2_hash,
}
conv_len = smb_strtoUTF16(uniname, user_name(sess->user), len,
- sess->conn->local_nls);
+ conn->local_nls);
if (conv_len < 0 || conv_len > len) {
ret = -EINVAL;
goto out;
@@ -182,7 +182,7 @@ static int calc_ntlmv2_hash(struct ksmbd_session *sess, char *ntlmv2_hash,
}
conv_len = smb_strtoUTF16((__le16 *)domain, dname, len,
- sess->conn->local_nls);
+ conn->local_nls);
if (conv_len < 0 || conv_len > len) {
ret = -EINVAL;
goto out;
@@ -215,8 +215,9 @@ out:
*
* Return: 0 on success, error number on error
*/
-int ksmbd_auth_ntlmv2(struct ksmbd_session *sess, struct ntlmv2_resp *ntlmv2,
- int blen, char *domain_name, char *cryptkey)
+int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ struct ntlmv2_resp *ntlmv2, int blen, char *domain_name,
+ char *cryptkey)
{
char ntlmv2_hash[CIFS_ENCPWD_SIZE];
char ntlmv2_rsp[CIFS_HMAC_MD5_HASH_SIZE];
@@ -230,7 +231,7 @@ int ksmbd_auth_ntlmv2(struct ksmbd_session *sess, struct ntlmv2_resp *ntlmv2,
return -ENOMEM;
}
- rc = calc_ntlmv2_hash(sess, ntlmv2_hash, domain_name);
+ rc = calc_ntlmv2_hash(conn, sess, ntlmv2_hash, domain_name);
if (rc) {
ksmbd_debug(AUTH, "could not get v2 hash rc %d\n", rc);
goto out;
@@ -333,7 +334,8 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
/* process NTLMv2 authentication */
ksmbd_debug(AUTH, "decode_ntlmssp_authenticate_blob dname%s\n",
domain_name);
- ret = ksmbd_auth_ntlmv2(sess, (struct ntlmv2_resp *)((char *)authblob + nt_off),
+ ret = ksmbd_auth_ntlmv2(conn, sess,
+ (struct ntlmv2_resp *)((char *)authblob + nt_off),
nt_len - CIFS_ENCPWD_SIZE,
domain_name, conn->ntlmssp.cryptkey);
kfree(domain_name);
@@ -659,8 +661,9 @@ struct derivation {
bool binding;
};
-static int generate_key(struct ksmbd_session *sess, struct kvec label,
- struct kvec context, __u8 *key, unsigned int key_size)
+static int generate_key(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ struct kvec label, struct kvec context, __u8 *key,
+ unsigned int key_size)
{
unsigned char zero = 0x0;
__u8 i[4] = {0, 0, 0, 1};
@@ -720,8 +723,8 @@ static int generate_key(struct ksmbd_session *sess, struct kvec label,
goto smb3signkey_ret;
}
- if (sess->conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
- sess->conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+ if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+ conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L256, 4);
else
rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L128, 4);
@@ -756,17 +759,17 @@ static int generate_smb3signingkey(struct ksmbd_session *sess,
if (!chann)
return 0;
- if (sess->conn->dialect >= SMB30_PROT_ID && signing->binding)
+ if (conn->dialect >= SMB30_PROT_ID && signing->binding)
key = chann->smb3signingkey;
else
key = sess->smb3signingkey;
- rc = generate_key(sess, signing->label, signing->context, key,
+ rc = generate_key(conn, sess, signing->label, signing->context, key,
SMB3_SIGN_KEY_SIZE);
if (rc)
return rc;
- if (!(sess->conn->dialect >= SMB30_PROT_ID && signing->binding))
+ if (!(conn->dialect >= SMB30_PROT_ID && signing->binding))
memcpy(chann->smb3signingkey, key, SMB3_SIGN_KEY_SIZE);
ksmbd_debug(AUTH, "dumping generated AES signing keys\n");
@@ -820,30 +823,31 @@ struct derivation_twin {
struct derivation decryption;
};
-static int generate_smb3encryptionkey(struct ksmbd_session *sess,
+static int generate_smb3encryptionkey(struct ksmbd_conn *conn,
+ struct ksmbd_session *sess,
const struct derivation_twin *ptwin)
{
int rc;
- rc = generate_key(sess, ptwin->encryption.label,
+ rc = generate_key(conn, sess, ptwin->encryption.label,
ptwin->encryption.context, sess->smb3encryptionkey,
SMB3_ENC_DEC_KEY_SIZE);
if (rc)
return rc;
- rc = generate_key(sess, ptwin->decryption.label,
+ rc = generate_key(conn, sess, ptwin->decryption.label,
ptwin->decryption.context,
sess->smb3decryptionkey, SMB3_ENC_DEC_KEY_SIZE);
if (rc)
return rc;
ksmbd_debug(AUTH, "dumping generated AES encryption keys\n");
- ksmbd_debug(AUTH, "Cipher type %d\n", sess->conn->cipher_type);
+ ksmbd_debug(AUTH, "Cipher type %d\n", conn->cipher_type);
ksmbd_debug(AUTH, "Session Id %llu\n", sess->id);
ksmbd_debug(AUTH, "Session Key %*ph\n",
SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
- if (sess->conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
- sess->conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
+ if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+ conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
ksmbd_debug(AUTH, "ServerIn Key %*ph\n",
SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3encryptionkey);
ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
@@ -857,7 +861,8 @@ static int generate_smb3encryptionkey(struct ksmbd_session *sess,
return 0;
}
-int ksmbd_gen_smb30_encryptionkey(struct ksmbd_session *sess)
+int ksmbd_gen_smb30_encryptionkey(struct ksmbd_conn *conn,
+ struct ksmbd_session *sess)
{
struct derivation_twin twin;
struct derivation *d;
@@ -874,10 +879,11 @@ int ksmbd_gen_smb30_encryptionkey(struct ksmbd_session *sess)
d->context.iov_base = "ServerIn ";
d->context.iov_len = 10;
- return generate_smb3encryptionkey(sess, &twin);
+ return generate_smb3encryptionkey(conn, sess, &twin);
}
-int ksmbd_gen_smb311_encryptionkey(struct ksmbd_session *sess)
+int ksmbd_gen_smb311_encryptionkey(struct ksmbd_conn *conn,
+ struct ksmbd_session *sess)
{
struct derivation_twin twin;
struct derivation *d;
@@ -894,7 +900,7 @@ int ksmbd_gen_smb311_encryptionkey(struct ksmbd_session *sess)
d->context.iov_base = sess->Preauth_HashValue;
d->context.iov_len = 64;
- return generate_smb3encryptionkey(sess, &twin);
+ return generate_smb3encryptionkey(conn, sess, &twin);
}
int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
diff --git a/fs/ksmbd/auth.h b/fs/ksmbd/auth.h
index 95629651cf26..25b772653de0 100644
--- a/fs/ksmbd/auth.h
+++ b/fs/ksmbd/auth.h
@@ -38,8 +38,9 @@ struct kvec;
int ksmbd_crypt_message(struct ksmbd_conn *conn, struct kvec *iov,
unsigned int nvec, int enc);
void ksmbd_copy_gss_neg_header(void *buf);
-int ksmbd_auth_ntlmv2(struct ksmbd_session *sess, struct ntlmv2_resp *ntlmv2,
- int blen, char *domain_name, char *cryptkey);
+int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ struct ntlmv2_resp *ntlmv2, int blen, char *domain_name,
+ char *cryptkey);
int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
int blob_len, struct ksmbd_conn *conn,
struct ksmbd_session *sess);
@@ -58,8 +59,10 @@ int ksmbd_gen_smb30_signingkey(struct ksmbd_session *sess,
struct ksmbd_conn *conn);
int ksmbd_gen_smb311_signingkey(struct ksmbd_session *sess,
struct ksmbd_conn *conn);
-int ksmbd_gen_smb30_encryptionkey(struct ksmbd_session *sess);
-int ksmbd_gen_smb311_encryptionkey(struct ksmbd_session *sess);
+int ksmbd_gen_smb30_encryptionkey(struct ksmbd_conn *conn,
+ struct ksmbd_session *sess);
+int ksmbd_gen_smb311_encryptionkey(struct ksmbd_conn *conn,
+ struct ksmbd_session *sess);
int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
__u8 *pi_hash);
int ksmbd_gen_sd_hash(struct ksmbd_conn *conn, char *sd_buf, int len,
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index e8f476c5f189..756ad631c019 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -36,6 +36,7 @@ void ksmbd_conn_free(struct ksmbd_conn *conn)
list_del(&conn->conns_list);
write_unlock(&conn_list_lock);
+ xa_destroy(&conn->sessions);
kvfree(conn->request_buf);
kfree(conn->preauth_info);
kfree(conn);
@@ -65,13 +66,14 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
conn->outstanding_credits = 0;
init_waitqueue_head(&conn->req_running_q);
+ init_waitqueue_head(&conn->r_count_q);
INIT_LIST_HEAD(&conn->conns_list);
- INIT_LIST_HEAD(&conn->sessions);
INIT_LIST_HEAD(&conn->requests);
INIT_LIST_HEAD(&conn->async_requests);
spin_lock_init(&conn->request_lock);
spin_lock_init(&conn->credits_lock);
ida_init(&conn->async_ida);
+ xa_init(&conn->sessions);
spin_lock_init(&conn->llist_lock);
INIT_LIST_HEAD(&conn->lock_list);
@@ -164,7 +166,6 @@ int ksmbd_conn_write(struct ksmbd_work *work)
struct kvec iov[3];
int iov_idx = 0;
- ksmbd_conn_try_dequeue_request(work);
if (!work->response_buf) {
pr_err("NULL response header\n");
return -EINVAL;
@@ -346,8 +347,8 @@ int ksmbd_conn_handler_loop(void *p)
out:
/* Wait till all reference dropped to the Server object*/
- while (atomic_read(&conn->r_count) > 0)
- schedule_timeout(HZ);
+ wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
+
unload_nls(conn->local_nls);
if (default_conn_ops.terminate_fn)
diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
index 98c1cbe45ec9..e7f7d5707951 100644
--- a/fs/ksmbd/connection.h
+++ b/fs/ksmbd/connection.h
@@ -20,13 +20,6 @@
#define KSMBD_SOCKET_BACKLOG 16
-/*
- * WARNING
- *
- * This is nothing but a HACK. Session status should move to channel
- * or to session. As of now we have 1 tcp_conn : 1 ksmbd_session, but
- * we need to change it to 1 tcp_conn : N ksmbd_sessions.
- */
enum {
KSMBD_SESS_NEW = 0,
KSMBD_SESS_GOOD,
@@ -55,7 +48,7 @@ struct ksmbd_conn {
struct nls_table *local_nls;
struct list_head conns_list;
/* smb session 1 per user */
- struct list_head sessions;
+ struct xarray sessions;
unsigned long last_active;
/* How many request are running currently */
atomic_t req_running;
@@ -65,6 +58,7 @@ struct ksmbd_conn {
unsigned int outstanding_credits;
spinlock_t credits_lock;
wait_queue_head_t req_running_q;
+ wait_queue_head_t r_count_q;
/* Lock to protect requests list*/
spinlock_t request_lock;
struct list_head requests;
diff --git a/fs/ksmbd/ksmbd_netlink.h b/fs/ksmbd/ksmbd_netlink.h
index 52aa0adeb951..e0cbcfa98c7e 100644
--- a/fs/ksmbd/ksmbd_netlink.h
+++ b/fs/ksmbd/ksmbd_netlink.h
@@ -349,6 +349,7 @@ enum KSMBD_TREE_CONN_STATUS {
#define KSMBD_SHARE_FLAG_STREAMS BIT(11)
#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12)
#define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13)
+#define KSMBD_SHARE_FLAG_UPDATE BIT(14)
/*
* Tree connect request flags.
@@ -364,6 +365,7 @@ enum KSMBD_TREE_CONN_STATUS {
#define KSMBD_TREE_CONN_FLAG_READ_ONLY BIT(1)
#define KSMBD_TREE_CONN_FLAG_WRITABLE BIT(2)
#define KSMBD_TREE_CONN_FLAG_ADMIN_ACCOUNT BIT(3)
+#define KSMBD_TREE_CONN_FLAG_UPDATE BIT(4)
/*
* RPC over IPC.
diff --git a/fs/ksmbd/mgmt/share_config.c b/fs/ksmbd/mgmt/share_config.c
index cb72d30f5b71..c9bca1c2c834 100644
--- a/fs/ksmbd/mgmt/share_config.c
+++ b/fs/ksmbd/mgmt/share_config.c
@@ -51,12 +51,16 @@ static void kill_share(struct ksmbd_share_config *share)
kfree(share);
}
-void __ksmbd_share_config_put(struct ksmbd_share_config *share)
+void ksmbd_share_config_del(struct ksmbd_share_config *share)
{
down_write(&shares_table_lock);
hash_del(&share->hlist);
up_write(&shares_table_lock);
+}
+void __ksmbd_share_config_put(struct ksmbd_share_config *share)
+{
+ ksmbd_share_config_del(share);
kill_share(share);
}
@@ -222,17 +226,3 @@ bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
}
return false;
}
-
-void ksmbd_share_configs_cleanup(void)
-{
- struct ksmbd_share_config *share;
- struct hlist_node *tmp;
- int i;
-
- down_write(&shares_table_lock);
- hash_for_each_safe(shares_table, i, tmp, share, hlist) {
- hash_del(&share->hlist);
- kill_share(share);
- }
- up_write(&shares_table_lock);
-}
diff --git a/fs/ksmbd/mgmt/share_config.h b/fs/ksmbd/mgmt/share_config.h
index 953befc94e84..902f2cb1963a 100644
--- a/fs/ksmbd/mgmt/share_config.h
+++ b/fs/ksmbd/mgmt/share_config.h
@@ -64,6 +64,7 @@ static inline int test_share_config_flag(struct ksmbd_share_config *share,
return share->flags & flag;
}
+void ksmbd_share_config_del(struct ksmbd_share_config *share);
void __ksmbd_share_config_put(struct ksmbd_share_config *share);
static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
@@ -76,6 +77,4 @@ static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
struct ksmbd_share_config *ksmbd_share_config_get(char *name);
bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
const char *filename);
-void ksmbd_share_configs_cleanup(void);
-
#endif /* __SHARE_CONFIG_MANAGEMENT_H__ */
diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
index 0d28e723a28c..97ab7987df6e 100644
--- a/fs/ksmbd/mgmt/tree_connect.c
+++ b/fs/ksmbd/mgmt/tree_connect.c
@@ -16,9 +16,10 @@
#include "user_session.h"
struct ksmbd_tree_conn_status
-ksmbd_tree_conn_connect(struct ksmbd_session *sess, char *share_name)
+ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ char *share_name)
{
- struct ksmbd_tree_conn_status status = {-EINVAL, NULL};
+ struct ksmbd_tree_conn_status status = {-ENOENT, NULL};
struct ksmbd_tree_connect_response *resp = NULL;
struct ksmbd_share_config *sc;
struct ksmbd_tree_connect *tree_conn = NULL;
@@ -41,7 +42,7 @@ ksmbd_tree_conn_connect(struct ksmbd_session *sess, char *share_name)
goto out_error;
}
- peer_addr = KSMBD_TCP_PEER_SOCKADDR(sess->conn);
+ peer_addr = KSMBD_TCP_PEER_SOCKADDR(conn);
resp = ksmbd_ipc_tree_connect_request(sess,
sc,
tree_conn,
@@ -56,6 +57,20 @@ ksmbd_tree_conn_connect(struct ksmbd_session *sess, char *share_name)
goto out_error;
tree_conn->flags = resp->connection_flags;
+ if (test_tree_conn_flag(tree_conn, KSMBD_TREE_CONN_FLAG_UPDATE)) {
+ struct ksmbd_share_config *new_sc;
+
+ ksmbd_share_config_del(sc);
+ new_sc = ksmbd_share_config_get(share_name);
+ if (!new_sc) {
+ pr_err("Failed to update stale share config\n");
+ status.ret = -ESTALE;
+ goto out_error;
+ }
+ ksmbd_share_config_put(sc);
+ sc = new_sc;
+ }
+
tree_conn->user = sess->user;
tree_conn->share_conf = sc;
status.tree_conn = tree_conn;
diff --git a/fs/ksmbd/mgmt/tree_connect.h b/fs/ksmbd/mgmt/tree_connect.h
index 18e2a996e0aa..71e50271dccf 100644
--- a/fs/ksmbd/mgmt/tree_connect.h
+++ b/fs/ksmbd/mgmt/tree_connect.h
@@ -12,6 +12,7 @@
struct ksmbd_share_config;
struct ksmbd_user;
+struct ksmbd_conn;
struct ksmbd_tree_connect {
int id;
@@ -40,7 +41,8 @@ static inline int test_tree_conn_flag(struct ksmbd_tree_connect *tree_conn,
struct ksmbd_session;
struct ksmbd_tree_conn_status
-ksmbd_tree_conn_connect(struct ksmbd_session *sess, char *share_name);
+ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ char *share_name);
int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
struct ksmbd_tree_connect *tree_conn);
diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
index 8d8ffd8c6f19..3fa2139a0b30 100644
--- a/fs/ksmbd/mgmt/user_session.c
+++ b/fs/ksmbd/mgmt/user_session.c
@@ -32,11 +32,13 @@ static void free_channel_list(struct ksmbd_session *sess)
{
struct channel *chann, *tmp;
+ write_lock(&sess->chann_lock);
list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
chann_list) {
list_del(&chann->chann_list);
kfree(chann);
}
+ write_unlock(&sess->chann_lock);
}
static void __session_rpc_close(struct ksmbd_session *sess,
@@ -149,11 +151,6 @@ void ksmbd_session_destroy(struct ksmbd_session *sess)
if (!sess)
return;
- if (!atomic_dec_and_test(&sess->refcnt))
- return;
-
- list_del(&sess->sessions_entry);
-
down_write(&sessions_table_lock);
hash_del(&sess->hlist);
up_write(&sessions_table_lock);
@@ -181,53 +178,70 @@ static struct ksmbd_session *__session_lookup(unsigned long long id)
return NULL;
}
-void ksmbd_session_register(struct ksmbd_conn *conn,
- struct ksmbd_session *sess)
+int ksmbd_session_register(struct ksmbd_conn *conn,
+ struct ksmbd_session *sess)
{
- sess->conn = conn;
- list_add(&sess->sessions_entry, &conn->sessions);
+ sess->dialect = conn->dialect;
+ memcpy(sess->ClientGUID, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
+ return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
}
-void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
{
- struct ksmbd_session *sess;
-
- while (!list_empty(&conn->sessions)) {
- sess = list_entry(conn->sessions.next,
- struct ksmbd_session,
- sessions_entry);
+ struct channel *chann, *tmp;
- ksmbd_session_destroy(sess);
+ write_lock(&sess->chann_lock);
+ list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
+ chann_list) {
+ if (chann->conn == conn) {
+ list_del(&chann->chann_list);
+ kfree(chann);
+ write_unlock(&sess->chann_lock);
+ return 0;
+ }
}
-}
+ write_unlock(&sess->chann_lock);
-static bool ksmbd_session_id_match(struct ksmbd_session *sess,
- unsigned long long id)
-{
- return sess->id == id;
+ return -ENOENT;
}
-struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
- unsigned long long id)
+void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
{
- struct ksmbd_session *sess = NULL;
+ struct ksmbd_session *sess;
- list_for_each_entry(sess, &conn->sessions, sessions_entry) {
- if (ksmbd_session_id_match(sess, id))
- return sess;
+ if (conn->binding) {
+ int bkt;
+
+ down_write(&sessions_table_lock);
+ hash_for_each(sessions_table, bkt, sess, hlist) {
+ if (!ksmbd_chann_del(conn, sess)) {
+ up_write(&sessions_table_lock);
+ goto sess_destroy;
+ }
+ }
+ up_write(&sessions_table_lock);
+ } else {
+ unsigned long id;
+
+ xa_for_each(&conn->sessions, id, sess) {
+ if (!ksmbd_chann_del(conn, sess))
+ goto sess_destroy;
+ }
}
- return NULL;
-}
-int get_session(struct ksmbd_session *sess)
-{
- return atomic_inc_not_zero(&sess->refcnt);
+ return;
+
+sess_destroy:
+ if (list_empty(&sess->ksmbd_chann_list)) {
+ xa_erase(&conn->sessions, sess->id);
+ ksmbd_session_destroy(sess);
+ }
}
-void put_session(struct ksmbd_session *sess)
+struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+ unsigned long long id)
{
- if (atomic_dec_and_test(&sess->refcnt))
- pr_err("get/%s seems to be mismatched.", __func__);
+ return xa_load(&conn->sessions, id);
}
struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
@@ -236,10 +250,6 @@ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
down_read(&sessions_table_lock);
sess = __session_lookup(id);
- if (sess) {
- if (!get_session(sess))
- sess = NULL;
- }
up_read(&sessions_table_lock);
return sess;
@@ -253,6 +263,8 @@ struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
sess = ksmbd_session_lookup(conn, id);
if (!sess && conn->binding)
sess = ksmbd_session_lookup_slowpath(id);
+ if (sess && sess->state != SMB2_SESSION_VALID)
+ sess = NULL;
return sess;
}
@@ -314,12 +326,11 @@ static struct ksmbd_session *__session_create(int protocol)
goto error;
set_session_flag(sess, protocol);
- INIT_LIST_HEAD(&sess->sessions_entry);
xa_init(&sess->tree_conns);
INIT_LIST_HEAD(&sess->ksmbd_chann_list);
INIT_LIST_HEAD(&sess->rpc_handle_list);
sess->sequence_number = 1;
- atomic_set(&sess->refcnt, 1);
+ rwlock_init(&sess->chann_lock);
switch (protocol) {
case CIFDS_SESSION_FLAG_SMB2:
diff --git a/fs/ksmbd/mgmt/user_session.h b/fs/ksmbd/mgmt/user_session.h
index e241f16a3851..8934b8ee275b 100644
--- a/fs/ksmbd/mgmt/user_session.h
+++ b/fs/ksmbd/mgmt/user_session.h
@@ -33,8 +33,10 @@ struct preauth_session {
struct ksmbd_session {
u64 id;
+ __u16 dialect;
+ char ClientGUID[SMB2_CLIENT_GUID_SIZE];
+
struct ksmbd_user *user;
- struct ksmbd_conn *conn;
unsigned int sequence_number;
unsigned int flags;
@@ -48,6 +50,7 @@ struct ksmbd_session {
char sess_key[CIFS_KEY_SIZE];
struct hlist_node hlist;
+ rwlock_t chann_lock;
struct list_head ksmbd_chann_list;
struct xarray tree_conns;
struct ida tree_conn_ida;
@@ -57,9 +60,7 @@ struct ksmbd_session {
__u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
- struct list_head sessions_entry;
struct ksmbd_file_table file_table;
- atomic_t refcnt;
};
static inline int test_session_flag(struct ksmbd_session *sess, int bit)
@@ -84,8 +85,8 @@ void ksmbd_session_destroy(struct ksmbd_session *sess);
struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id);
struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
unsigned long long id);
-void ksmbd_session_register(struct ksmbd_conn *conn,
- struct ksmbd_session *sess);
+int ksmbd_session_register(struct ksmbd_conn *conn,
+ struct ksmbd_session *sess);
void ksmbd_sessions_deregister(struct ksmbd_conn *conn);
struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
unsigned long long id);
@@ -100,6 +101,4 @@ void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id);
int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name);
void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id);
int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id);
-int get_session(struct ksmbd_session *sess);
-void put_session(struct ksmbd_session *sess);
#endif /* __USER_SESSION_MANAGEMENT_H__ */
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
index 8b5560574d4c..9046cff4374b 100644
--- a/fs/ksmbd/oplock.c
+++ b/fs/ksmbd/oplock.c
@@ -30,6 +30,7 @@ static DEFINE_RWLOCK(lease_list_lock);
static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
u64 id, __u16 Tid)
{
+ struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess = work->sess;
struct oplock_info *opinfo;
@@ -38,7 +39,7 @@ static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
return NULL;
opinfo->sess = sess;
- opinfo->conn = sess->conn;
+ opinfo->conn = conn;
opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
opinfo->op_state = OPLOCK_STATE_NONE;
opinfo->pending_break = 0;
@@ -615,18 +616,13 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
struct ksmbd_file *fp;
fp = ksmbd_lookup_durable_fd(br_info->fid);
- if (!fp) {
- atomic_dec(&conn->r_count);
- ksmbd_free_work_struct(work);
- return;
- }
+ if (!fp)
+ goto out;
if (allocate_oplock_break_buf(work)) {
pr_err("smb2_allocate_rsp_buf failed! ");
- atomic_dec(&conn->r_count);
ksmbd_fd_put(work, fp);
- ksmbd_free_work_struct(work);
- return;
+ goto out;
}
rsp_hdr = smb2_get_msg(work->response_buf);
@@ -667,8 +663,16 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
ksmbd_fd_put(work, fp);
ksmbd_conn_write(work);
+
+out:
ksmbd_free_work_struct(work);
- atomic_dec(&conn->r_count);
+ /*
+ * Checking waitqueue to dropping pending requests on
+ * disconnection. waitqueue_active is safe because it
+ * uses atomic operation for condition.
+ */
+ if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+ wake_up(&conn->r_count_q);
}
/**
@@ -731,9 +735,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
if (allocate_oplock_break_buf(work)) {
ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
- ksmbd_free_work_struct(work);
- atomic_dec(&conn->r_count);
- return;
+ goto out;
}
rsp_hdr = smb2_get_msg(work->response_buf);
@@ -771,8 +773,16 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
inc_rfc1001_len(work->response_buf, 44);
ksmbd_conn_write(work);
+
+out:
ksmbd_free_work_struct(work);
- atomic_dec(&conn->r_count);
+ /*
+ * Checking waitqueue to dropping pending requests on
+ * disconnection. waitqueue_active is safe because it
+ * uses atomic operation for condition.
+ */
+ if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+ wake_up(&conn->r_count_q);
}
/**
@@ -972,7 +982,7 @@ int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
}
list_for_each_entry(lb, &lease_table_list, l_entry) {
- if (!memcmp(lb->client_guid, sess->conn->ClientGUID,
+ if (!memcmp(lb->client_guid, sess->ClientGUID,
SMB2_CLIENT_GUID_SIZE))
goto found;
}
@@ -988,7 +998,7 @@ found:
rcu_read_unlock();
if (opinfo->o_fp->f_ci == ci)
goto op_next;
- err = compare_guid_key(opinfo, sess->conn->ClientGUID,
+ err = compare_guid_key(opinfo, sess->ClientGUID,
lctx->lease_key);
if (err) {
err = -EINVAL;
@@ -1122,7 +1132,7 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
struct oplock_info *m_opinfo;
/* is lease already granted ? */
- m_opinfo = same_client_has_lease(ci, sess->conn->ClientGUID,
+ m_opinfo = same_client_has_lease(ci, sess->ClientGUID,
lctx);
if (m_opinfo) {
copy_lease(m_opinfo, opinfo);
@@ -1240,7 +1250,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
{
struct oplock_info *op, *brk_op;
struct ksmbd_inode *ci;
- struct ksmbd_conn *conn = work->sess->conn;
+ struct ksmbd_conn *conn = work->conn;
if (!test_share_config_flag(work->tcon->share_conf,
KSMBD_SHARE_FLAG_OPLOCKS))
diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
index 4cd03d661df0..ce42bff42ef9 100644
--- a/fs/ksmbd/server.c
+++ b/fs/ksmbd/server.c
@@ -261,7 +261,13 @@ static void handle_ksmbd_work(struct work_struct *wk)
ksmbd_conn_try_dequeue_request(work);
ksmbd_free_work_struct(work);
- atomic_dec(&conn->r_count);
+ /*
+ * Checking waitqueue to dropping pending requests on
+ * disconnection. waitqueue_active is safe because it
+ * uses atomic operation for condition.
+ */
+ if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+ wake_up(&conn->r_count_q);
}
/**
diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
index f8f456377a51..6e25ace36568 100644
--- a/fs/ksmbd/smb2misc.c
+++ b/fs/ksmbd/smb2misc.c
@@ -90,11 +90,6 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
*off = 0;
*len = 0;
- /* error reqeusts do not have data area */
- if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
- (((struct smb2_err_rsp *)hdr)->StructureSize) == SMB2_ERROR_STRUCTURE_SIZE2_LE)
- return ret;
-
/*
* Following commands have data areas so we have to get the location
* of the data buffer offset and data buffer length for the particular
@@ -136,8 +131,11 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
*len = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoLength);
break;
case SMB2_WRITE:
- if (((struct smb2_write_req *)hdr)->DataOffset) {
- *off = le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset);
+ if (((struct smb2_write_req *)hdr)->DataOffset ||
+ ((struct smb2_write_req *)hdr)->Length) {
+ *off = max_t(unsigned int,
+ le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset),
+ offsetof(struct smb2_write_req, Buffer));
*len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length);
break;
}
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 353f047e783c..19412ac701a6 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -535,9 +535,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
struct smb2_query_info_req *req;
req = smb2_get_msg(work->request_buf);
- if (req->InfoType == SMB2_O_INFO_FILE &&
- (req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
- req->FileInfoClass == FILE_ALL_INFORMATION))
+ if ((req->InfoType == SMB2_O_INFO_FILE &&
+ (req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
+ req->FileInfoClass == FILE_ALL_INFORMATION)) ||
+ req->InfoType == SMB2_O_INFO_SECURITY)
sz = large_sz;
}
@@ -588,10 +589,12 @@ int smb2_check_user_session(struct ksmbd_work *work)
return -EINVAL;
}
-static void destroy_previous_session(struct ksmbd_user *user, u64 id)
+static void destroy_previous_session(struct ksmbd_conn *conn,
+ struct ksmbd_user *user, u64 id)
{
struct ksmbd_session *prev_sess = ksmbd_session_lookup_slowpath(id);
struct ksmbd_user *prev_user;
+ struct channel *chann;
if (!prev_sess)
return;
@@ -601,13 +604,14 @@ static void destroy_previous_session(struct ksmbd_user *user, u64 id)
if (!prev_user ||
strcmp(user->name, prev_user->name) ||
user->passkey_sz != prev_user->passkey_sz ||
- memcmp(user->passkey, prev_user->passkey, user->passkey_sz)) {
- put_session(prev_sess);
+ memcmp(user->passkey, prev_user->passkey, user->passkey_sz))
return;
- }
- put_session(prev_sess);
- ksmbd_session_destroy(prev_sess);
+ prev_sess->state = SMB2_SESSION_EXPIRED;
+ write_lock(&prev_sess->chann_lock);
+ list_for_each_entry(chann, &prev_sess->ksmbd_chann_list, chann_list)
+ chann->conn->status = KSMBD_SESS_EXITING;
+ write_unlock(&prev_sess->chann_lock);
}
/**
@@ -1139,12 +1143,16 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
status);
rsp->hdr.Status = status;
rc = -EINVAL;
+ kfree(conn->preauth_info);
+ conn->preauth_info = NULL;
goto err_out;
}
rc = init_smb3_11_server(conn);
if (rc < 0) {
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ kfree(conn->preauth_info);
+ conn->preauth_info = NULL;
goto err_out;
}
@@ -1439,7 +1447,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
/* Check for previous session */
prev_id = le64_to_cpu(req->PreviousSessionId);
if (prev_id && prev_id != sess->id)
- destroy_previous_session(user, prev_id);
+ destroy_previous_session(conn, user, prev_id);
if (sess->state == SMB2_SESSION_VALID) {
/*
@@ -1493,7 +1501,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
if (smb3_encryption_negotiated(conn) &&
!(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
- rc = conn->ops->generate_encryptionkey(sess);
+ rc = conn->ops->generate_encryptionkey(conn, sess);
if (rc) {
ksmbd_debug(SMB,
"SMB3 encryption key generation failed\n");
@@ -1510,7 +1518,9 @@ static int ntlm_authenticate(struct ksmbd_work *work)
binding_session:
if (conn->dialect >= SMB30_PROT_ID) {
+ read_lock(&sess->chann_lock);
chann = lookup_chann_list(sess, conn);
+ read_unlock(&sess->chann_lock);
if (!chann) {
chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
if (!chann)
@@ -1518,7 +1528,9 @@ binding_session:
chann->conn = conn;
INIT_LIST_HEAD(&chann->chann_list);
+ write_lock(&sess->chann_lock);
list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+ write_unlock(&sess->chann_lock);
}
}
@@ -1561,7 +1573,7 @@ static int krb5_authenticate(struct ksmbd_work *work)
/* Check previous session */
prev_sess_id = le64_to_cpu(req->PreviousSessionId);
if (prev_sess_id && prev_sess_id != sess->id)
- destroy_previous_session(sess->user, prev_sess_id);
+ destroy_previous_session(conn, sess->user, prev_sess_id);
if (sess->state == SMB2_SESSION_VALID)
ksmbd_free_user(sess->user);
@@ -1580,7 +1592,7 @@ static int krb5_authenticate(struct ksmbd_work *work)
sess->sign = true;
if (smb3_encryption_negotiated(conn)) {
- retval = conn->ops->generate_encryptionkey(sess);
+ retval = conn->ops->generate_encryptionkey(conn, sess);
if (retval) {
ksmbd_debug(SMB,
"SMB3 encryption key generation failed\n");
@@ -1592,7 +1604,9 @@ static int krb5_authenticate(struct ksmbd_work *work)
}
if (conn->dialect >= SMB30_PROT_ID) {
+ read_lock(&sess->chann_lock);
chann = lookup_chann_list(sess, conn);
+ read_unlock(&sess->chann_lock);
if (!chann) {
chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
if (!chann)
@@ -1600,7 +1614,9 @@ static int krb5_authenticate(struct ksmbd_work *work)
chann->conn = conn;
INIT_LIST_HEAD(&chann->chann_list);
+ write_lock(&sess->chann_lock);
list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+ write_unlock(&sess->chann_lock);
}
}
@@ -1650,7 +1666,9 @@ int smb2_sess_setup(struct ksmbd_work *work)
goto out_err;
}
rsp->hdr.SessionId = cpu_to_le64(sess->id);
- ksmbd_session_register(conn, sess);
+ rc = ksmbd_session_register(conn, sess);
+ if (rc)
+ goto out_err;
} else if (conn->dialect >= SMB30_PROT_ID &&
(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) {
@@ -1662,7 +1680,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
goto out_err;
}
- if (conn->dialect != sess->conn->dialect) {
+ if (conn->dialect != sess->dialect) {
rc = -EINVAL;
goto out_err;
}
@@ -1672,7 +1690,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
goto out_err;
}
- if (strncmp(conn->ClientGUID, sess->conn->ClientGUID,
+ if (strncmp(conn->ClientGUID, sess->ClientGUID,
SMB2_CLIENT_GUID_SIZE)) {
rc = -ENOENT;
goto out_err;
@@ -1828,6 +1846,7 @@ out_err:
if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
try_delay = true;
+ xa_erase(&conn->sessions, sess->id);
ksmbd_session_destroy(sess);
work->sess = NULL;
if (try_delay)
@@ -1873,7 +1892,7 @@ int smb2_tree_connect(struct ksmbd_work *work)
ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n",
name, treename);
- status = ksmbd_tree_conn_connect(sess, name);
+ status = ksmbd_tree_conn_connect(conn, sess, name);
if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id);
else
@@ -1925,8 +1944,10 @@ out_err1:
rsp->hdr.Status = STATUS_SUCCESS;
rc = 0;
break;
+ case -ESTALE:
+ case -ENOENT:
case KSMBD_TREE_CONN_STATUS_NO_SHARE:
- rsp->hdr.Status = STATUS_BAD_NETWORK_PATH;
+ rsp->hdr.Status = STATUS_BAD_NETWORK_NAME;
break;
case -ENOMEM:
case KSMBD_TREE_CONN_STATUS_NOMEM:
@@ -2039,6 +2060,7 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
ksmbd_close_tree_conn_fds(work);
ksmbd_tree_conn_disconnect(sess, tcon);
+ work->tcon = NULL;
return 0;
}
@@ -2308,15 +2330,15 @@ static int smb2_remove_smb_xattrs(struct path *path)
name += strlen(name) + 1) {
ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
- if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
- strncmp(&name[XATTR_USER_PREFIX_LEN], DOS_ATTRIBUTE_PREFIX,
- DOS_ATTRIBUTE_PREFIX_LEN) &&
- strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX, STREAM_PREFIX_LEN))
- continue;
-
- err = ksmbd_vfs_remove_xattr(user_ns, path->dentry, name);
- if (err)
- ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+ !strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
+ STREAM_PREFIX_LEN)) {
+ err = ksmbd_vfs_remove_xattr(user_ns, path->dentry,
+ name);
+ if (err)
+ ksmbd_debug(SMB, "remove xattr failed : %s\n",
+ name);
+ }
}
out:
kvfree(xattr_list);
@@ -2969,7 +2991,7 @@ int smb2_open(struct ksmbd_work *work)
goto err_out;
rc = build_sec_desc(user_ns,
- pntsd, NULL,
+ pntsd, NULL, 0,
OWNER_SECINFO |
GROUP_SECINFO |
DACL_SECINFO,
@@ -3022,12 +3044,6 @@ int smb2_open(struct ksmbd_work *work)
list_add(&fp->node, &fp->f_ci->m_fp_list);
write_unlock(&fp->f_ci->m_lock);
- rc = ksmbd_vfs_getattr(&path, &stat);
- if (rc) {
- generic_fillattr(user_ns, d_inode(path.dentry), &stat);
- rc = 0;
- }
-
/* Check delete pending among previous fp before oplock break */
if (ksmbd_inode_pending_delete(fp)) {
rc = -EBUSY;
@@ -3114,6 +3130,10 @@ int smb2_open(struct ksmbd_work *work)
}
}
+ rc = ksmbd_vfs_getattr(&path, &stat);
+ if (rc)
+ goto err_out;
+
if (stat.result_mask & STATX_BTIME)
fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
else
@@ -3129,9 +3149,6 @@ int smb2_open(struct ksmbd_work *work)
memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
- generic_fillattr(user_ns, file_inode(fp->filp),
- &stat);
-
rsp->StructureSize = cpu_to_le16(89);
rcu_read_lock();
opinfo = rcu_dereference(fp->f_opinfo);
@@ -3814,6 +3831,15 @@ static int verify_info_level(int info_level)
return 0;
}
+static int smb2_resp_buf_len(struct ksmbd_work *work, unsigned short hdr2_len)
+{
+ int free_len;
+
+ free_len = (int)(work->response_sz -
+ (get_rfc1002_len(work->response_buf) + 4)) - hdr2_len;
+ return free_len;
+}
+
static int smb2_calc_max_out_buf_len(struct ksmbd_work *work,
unsigned short hdr2_len,
unsigned int out_buf_len)
@@ -3823,9 +3849,7 @@ static int smb2_calc_max_out_buf_len(struct ksmbd_work *work,
if (out_buf_len > work->conn->vals->max_trans_size)
return -EINVAL;
- free_len = (int)(work->response_sz -
- (get_rfc1002_len(work->response_buf) + 4)) -
- hdr2_len;
+ free_len = smb2_resp_buf_len(work, hdr2_len);
if (free_len < 0)
return -EINVAL;
@@ -4858,7 +4882,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
struct smb2_query_info_rsp *rsp)
{
struct ksmbd_session *sess = work->sess;
- struct ksmbd_conn *conn = sess->conn;
+ struct ksmbd_conn *conn = work->conn;
struct ksmbd_share_config *share = work->tcon->share_conf;
int fsinfoclass = 0;
struct kstatfs stfs;
@@ -5088,10 +5112,10 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
struct smb_ntsd *pntsd = (struct smb_ntsd *)rsp->Buffer, *ppntsd = NULL;
struct smb_fattr fattr = {{0}};
struct inode *inode;
- __u32 secdesclen;
+ __u32 secdesclen = 0;
unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
int addition_info = le32_to_cpu(req->AdditionalInformation);
- int rc;
+ int rc = 0, ppntsd_size = 0;
if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
PROTECTED_DACL_SECINFO |
@@ -5137,11 +5161,14 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
if (test_share_config_flag(work->tcon->share_conf,
KSMBD_SHARE_FLAG_ACL_XATTR))
- ksmbd_vfs_get_sd_xattr(work->conn, user_ns,
- fp->filp->f_path.dentry, &ppntsd);
-
- rc = build_sec_desc(user_ns, pntsd, ppntsd, addition_info,
- &secdesclen, &fattr);
+ ppntsd_size = ksmbd_vfs_get_sd_xattr(work->conn, user_ns,
+ fp->filp->f_path.dentry,
+ &ppntsd);
+
+ /* Check if sd buffer size exceeds response buffer size */
+ if (smb2_resp_buf_len(work, 8) > ppntsd_size)
+ rc = build_sec_desc(user_ns, pntsd, ppntsd, ppntsd_size,
+ addition_info, &secdesclen, &fattr);
posix_acl_release(fattr.cf_acls);
posix_acl_release(fattr.cf_dacls);
kfree(ppntsd);
@@ -5776,7 +5803,7 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
}
next:
return smb2_rename(work, fp, user_ns, rename_info,
- work->sess->conn->local_nls);
+ work->conn->local_nls);
}
static int set_file_disposition_info(struct ksmbd_file *fp,
@@ -5908,7 +5935,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
return smb2_create_link(work, work->tcon->share_conf,
(struct smb2_file_link_info *)req->Buffer,
buf_len, fp->filp,
- work->sess->conn->local_nls);
+ work->conn->local_nls);
}
case FILE_DISPOSITION_INFORMATION:
{
@@ -6495,14 +6522,12 @@ int smb2_write(struct ksmbd_work *work)
writethrough = true;
if (is_rdma_channel == false) {
- if ((u64)le16_to_cpu(req->DataOffset) + length >
- get_rfc1002_len(work->request_buf)) {
- pr_err("invalid write data offset %u, smb_len %u\n",
- le16_to_cpu(req->DataOffset),
- get_rfc1002_len(work->request_buf));
+ if (le16_to_cpu(req->DataOffset) <
+ offsetof(struct smb2_write_req, Buffer)) {
err = -EINVAL;
goto out;
}
+
data_buf = (char *)(((char *)&req->hdr.ProtocolId) +
le16_to_cpu(req->DataOffset));
@@ -8356,10 +8381,14 @@ int smb3_check_sign_req(struct ksmbd_work *work)
if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
signing_key = work->sess->smb3signingkey;
} else {
+ read_lock(&work->sess->chann_lock);
chann = lookup_chann_list(work->sess, conn);
- if (!chann)
+ if (!chann) {
+ read_unlock(&work->sess->chann_lock);
return 0;
+ }
signing_key = chann->smb3signingkey;
+ read_unlock(&work->sess->chann_lock);
}
if (!signing_key) {
@@ -8419,10 +8448,14 @@ void smb3_set_sign_rsp(struct ksmbd_work *work)
le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
signing_key = work->sess->smb3signingkey;
} else {
+ read_lock(&work->sess->chann_lock);
chann = lookup_chann_list(work->sess, work->conn);
- if (!chann)
+ if (!chann) {
+ read_unlock(&work->sess->chann_lock);
return;
+ }
signing_key = chann->smb3signingkey;
+ read_unlock(&work->sess->chann_lock);
}
if (!signing_key)
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
index e1369b4345a9..318c16fa81da 100644
--- a/fs/ksmbd/smb_common.h
+++ b/fs/ksmbd/smb_common.h
@@ -421,7 +421,7 @@ struct smb_version_ops {
int (*check_sign_req)(struct ksmbd_work *work);
void (*set_sign_rsp)(struct ksmbd_work *work);
int (*generate_signingkey)(struct ksmbd_session *sess, struct ksmbd_conn *conn);
- int (*generate_encryptionkey)(struct ksmbd_session *sess);
+ int (*generate_encryptionkey)(struct ksmbd_conn *conn, struct ksmbd_session *sess);
bool (*is_transform_hdr)(void *buf);
int (*decrypt_req)(struct ksmbd_work *work);
int (*encrypt_resp)(struct ksmbd_work *work);
diff --git a/fs/ksmbd/smbacl.c b/fs/ksmbd/smbacl.c
index 38f23bf981ac..3781bca2c8fc 100644
--- a/fs/ksmbd/smbacl.c
+++ b/fs/ksmbd/smbacl.c
@@ -690,6 +690,7 @@ posix_default_acl:
static void set_ntacl_dacl(struct user_namespace *user_ns,
struct smb_acl *pndacl,
struct smb_acl *nt_dacl,
+ unsigned int aces_size,
const struct smb_sid *pownersid,
const struct smb_sid *pgrpsid,
struct smb_fattr *fattr)
@@ -703,9 +704,19 @@ static void set_ntacl_dacl(struct user_namespace *user_ns,
if (nt_num_aces) {
ntace = (struct smb_ace *)((char *)nt_dacl + sizeof(struct smb_acl));
for (i = 0; i < nt_num_aces; i++) {
- memcpy((char *)pndace + size, ntace, le16_to_cpu(ntace->size));
- size += le16_to_cpu(ntace->size);
- ntace = (struct smb_ace *)((char *)ntace + le16_to_cpu(ntace->size));
+ unsigned short nt_ace_size;
+
+ if (offsetof(struct smb_ace, access_req) > aces_size)
+ break;
+
+ nt_ace_size = le16_to_cpu(ntace->size);
+ if (nt_ace_size > aces_size)
+ break;
+
+ memcpy((char *)pndace + size, ntace, nt_ace_size);
+ size += nt_ace_size;
+ aces_size -= nt_ace_size;
+ ntace = (struct smb_ace *)((char *)ntace + nt_ace_size);
num_aces++;
}
}
@@ -878,7 +889,7 @@ int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
/* Convert permission bits from mode to equivalent CIFS ACL */
int build_sec_desc(struct user_namespace *user_ns,
struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd,
- int addition_info, __u32 *secdesclen,
+ int ppntsd_size, int addition_info, __u32 *secdesclen,
struct smb_fattr *fattr)
{
int rc = 0;
@@ -938,15 +949,25 @@ int build_sec_desc(struct user_namespace *user_ns,
if (!ppntsd) {
set_mode_dacl(user_ns, dacl_ptr, fattr);
- } else if (!ppntsd->dacloffset) {
- goto out;
} else {
struct smb_acl *ppdacl_ptr;
+ unsigned int dacl_offset = le32_to_cpu(ppntsd->dacloffset);
+ int ppdacl_size, ntacl_size = ppntsd_size - dacl_offset;
+
+ if (!dacl_offset ||
+ (dacl_offset + sizeof(struct smb_acl) > ppntsd_size))
+ goto out;
+
+ ppdacl_ptr = (struct smb_acl *)((char *)ppntsd + dacl_offset);
+ ppdacl_size = le16_to_cpu(ppdacl_ptr->size);
+ if (ppdacl_size > ntacl_size ||
+ ppdacl_size < sizeof(struct smb_acl))
+ goto out;
- ppdacl_ptr = (struct smb_acl *)((char *)ppntsd +
- le32_to_cpu(ppntsd->dacloffset));
set_ntacl_dacl(user_ns, dacl_ptr, ppdacl_ptr,
- nowner_sid_ptr, ngroup_sid_ptr, fattr);
+ ntacl_size - sizeof(struct smb_acl),
+ nowner_sid_ptr, ngroup_sid_ptr,
+ fattr);
}
pntsd->dacloffset = cpu_to_le32(offset);
offset += le16_to_cpu(dacl_ptr->size);
@@ -980,24 +1001,31 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
struct smb_sid owner_sid, group_sid;
struct dentry *parent = path->dentry->d_parent;
struct user_namespace *user_ns = mnt_user_ns(path->mnt);
- int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0;
- int rc = 0, num_aces, dacloffset, pntsd_type, acl_len;
+ int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size;
+ int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
char *aces_base;
bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
- acl_len = ksmbd_vfs_get_sd_xattr(conn, user_ns,
- parent, &parent_pntsd);
- if (acl_len <= 0)
+ pntsd_size = ksmbd_vfs_get_sd_xattr(conn, user_ns,
+ parent, &parent_pntsd);
+ if (pntsd_size <= 0)
return -ENOENT;
dacloffset = le32_to_cpu(parent_pntsd->dacloffset);
- if (!dacloffset) {
+ if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) {
rc = -EINVAL;
goto free_parent_pntsd;
}
parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset);
+ acl_len = pntsd_size - dacloffset;
num_aces = le32_to_cpu(parent_pdacl->num_aces);
pntsd_type = le16_to_cpu(parent_pntsd->type);
+ pdacl_size = le16_to_cpu(parent_pdacl->size);
+
+ if (pdacl_size > acl_len || pdacl_size < sizeof(struct smb_acl)) {
+ rc = -EINVAL;
+ goto free_parent_pntsd;
+ }
aces_base = kmalloc(sizeof(struct smb_ace) * num_aces * 2, GFP_KERNEL);
if (!aces_base) {
@@ -1008,11 +1036,23 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
aces = (struct smb_ace *)aces_base;
parent_aces = (struct smb_ace *)((char *)parent_pdacl +
sizeof(struct smb_acl));
+ aces_size = acl_len - sizeof(struct smb_acl);
if (pntsd_type & DACL_AUTO_INHERITED)
inherited_flags = INHERITED_ACE;
for (i = 0; i < num_aces; i++) {
+ int pace_size;
+
+ if (offsetof(struct smb_ace, access_req) > aces_size)
+ break;
+
+ pace_size = le16_to_cpu(parent_aces->size);
+ if (pace_size > aces_size)
+ break;
+
+ aces_size -= pace_size;
+
flags = parent_aces->flags;
if (!smb_inherit_flags(flags, is_dir))
goto pass;
@@ -1057,8 +1097,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
aces = (struct smb_ace *)((char *)aces + le16_to_cpu(aces->size));
ace_cnt++;
pass:
- parent_aces =
- (struct smb_ace *)((char *)parent_aces + le16_to_cpu(parent_aces->size));
+ parent_aces = (struct smb_ace *)((char *)parent_aces + pace_size);
}
if (nt_size > 0) {
@@ -1153,7 +1192,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
struct smb_ntsd *pntsd = NULL;
struct smb_acl *pdacl;
struct posix_acl *posix_acls;
- int rc = 0, acl_size;
+ int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset;
struct smb_sid sid;
int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE);
struct smb_ace *ace;
@@ -1162,37 +1201,33 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
struct smb_ace *others_ace = NULL;
struct posix_acl_entry *pa_entry;
unsigned int sid_type = SIDOWNER;
- char *end_of_acl;
+ unsigned short ace_size;
ksmbd_debug(SMB, "check permission using windows acl\n");
- acl_size = ksmbd_vfs_get_sd_xattr(conn, user_ns,
- path->dentry, &pntsd);
- if (acl_size <= 0 || !pntsd || !pntsd->dacloffset) {
- kfree(pntsd);
- return 0;
- }
+ pntsd_size = ksmbd_vfs_get_sd_xattr(conn, user_ns,
+ path->dentry, &pntsd);
+ if (pntsd_size <= 0 || !pntsd)
+ goto err_out;
+
+ dacl_offset = le32_to_cpu(pntsd->dacloffset);
+ if (!dacl_offset ||
+ (dacl_offset + sizeof(struct smb_acl) > pntsd_size))
+ goto err_out;
pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
- end_of_acl = ((char *)pntsd) + acl_size;
- if (end_of_acl <= (char *)pdacl) {
- kfree(pntsd);
- return 0;
- }
+ acl_size = pntsd_size - dacl_offset;
+ pdacl_size = le16_to_cpu(pdacl->size);
- if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size) ||
- le16_to_cpu(pdacl->size) < sizeof(struct smb_acl)) {
- kfree(pntsd);
- return 0;
- }
+ if (pdacl_size > acl_size || pdacl_size < sizeof(struct smb_acl))
+ goto err_out;
if (!pdacl->num_aces) {
- if (!(le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) &&
+ if (!(pdacl_size - sizeof(struct smb_acl)) &&
*pdaccess & ~(FILE_READ_CONTROL_LE | FILE_WRITE_DAC_LE)) {
rc = -EACCES;
goto err_out;
}
- kfree(pntsd);
- return 0;
+ goto err_out;
}
if (*pdaccess & FILE_MAXIMAL_ACCESS_LE) {
@@ -1200,11 +1235,16 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
DELETE;
ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+ aces_size = acl_size - sizeof(struct smb_acl);
for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
+ if (offsetof(struct smb_ace, access_req) > aces_size)
+ break;
+ ace_size = le16_to_cpu(ace->size);
+ if (ace_size > aces_size)
+ break;
+ aces_size -= ace_size;
granted |= le32_to_cpu(ace->access_req);
ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
- if (end_of_acl < (char *)ace)
- goto err_out;
}
if (!pdacl->num_aces)
@@ -1216,7 +1256,15 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
id_to_sid(uid, sid_type, &sid);
ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
+ aces_size = acl_size - sizeof(struct smb_acl);
for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
+ if (offsetof(struct smb_ace, access_req) > aces_size)
+ break;
+ ace_size = le16_to_cpu(ace->size);
+ if (ace_size > aces_size)
+ break;
+ aces_size -= ace_size;
+
if (!compare_sids(&sid, &ace->sid) ||
!compare_sids(&sid_unix_NFS_mode, &ace->sid)) {
found = 1;
@@ -1226,8 +1274,6 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
others_ace = ace;
ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
- if (end_of_acl < (char *)ace)
- goto err_out;
}
if (*pdaccess & FILE_MAXIMAL_ACCESS_LE && found) {
diff --git a/fs/ksmbd/smbacl.h b/fs/ksmbd/smbacl.h
index 811af3309429..fcb2c83f2992 100644
--- a/fs/ksmbd/smbacl.h
+++ b/fs/ksmbd/smbacl.h
@@ -193,7 +193,7 @@ struct posix_acl_state {
int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
int acl_len, struct smb_fattr *fattr);
int build_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
- struct smb_ntsd *ppntsd, int addition_info,
+ struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info,
__u32 *secdesclen, struct smb_fattr *fattr);
int init_acl_state(struct posix_acl_state *state, int cnt);
void free_acl_state(struct posix_acl_state *state);
diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
index 7c849024999f..78d01033604c 100644
--- a/fs/ksmbd/vfs.c
+++ b/fs/ksmbd/vfs.c
@@ -481,12 +481,11 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
char *buf, size_t count, loff_t *pos, bool sync,
ssize_t *written)
{
- struct ksmbd_session *sess = work->sess;
struct file *filp;
loff_t offset = *pos;
int err = 0;
- if (sess->conn->connection_type) {
+ if (work->conn->connection_type) {
if (!(fp->daccess & FILE_WRITE_DATA_LE)) {
pr_err("no right to write(%pd)\n",
fp->filp->f_path.dentry);
@@ -1540,6 +1539,11 @@ int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
}
*pntsd = acl.sd_buf;
+ if (acl.sd_size < sizeof(struct smb_ntsd)) {
+ pr_err("sd size is invalid\n");
+ goto out_free;
+ }
+
(*pntsd)->osidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->osidoffset) -
NDR_NTSD_OFFSETOF);
(*pntsd)->gsidoffset = cpu_to_le32(le32_to_cpu((*pntsd)->gsidoffset) -
diff --git a/fs/ksmbd/vfs_cache.c b/fs/ksmbd/vfs_cache.c
index c4d59d2735f0..da9163b00350 100644
--- a/fs/ksmbd/vfs_cache.c
+++ b/fs/ksmbd/vfs_cache.c
@@ -569,7 +569,7 @@ struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
atomic_set(&fp->refcount, 1);
fp->filp = filp;
- fp->conn = work->sess->conn;
+ fp->conn = work->conn;
fp->tcon = work->tcon;
fp->volatile_id = KSMBD_NO_FID;
fp->persistent_id = KSMBD_NO_FID;
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 176b468a61c7..bf274f23969b 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -32,6 +32,10 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
if (!nlmsvc_ops)
return nlm_lck_denied_nolocks;
+ if (lock->lock_start > OFFSET_MAX ||
+ (lock->lock_len && ((lock->lock_len - 1) > (OFFSET_MAX - lock->lock_start))))
+ return nlm4_fbig;
+
/* Obtain host handle */
if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len))
|| (argp->monitor && nsm_monitor(host) < 0))
@@ -50,6 +54,10 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
/* Set up the missing parts of the file_lock structure */
lock->fl.fl_file = file->f_file[mode];
lock->fl.fl_pid = current->tgid;
+ lock->fl.fl_start = (loff_t)lock->lock_start;
+ lock->fl.fl_end = lock->lock_len ?
+ (loff_t)(lock->lock_start + lock->lock_len - 1) :
+ OFFSET_MAX;
lock->fl.fl_lmops = &nlmsvc_lock_operations;
nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid);
if (!lock->fl.fl_owner) {
@@ -87,6 +95,7 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
+ struct nlm_lockowner *test_owner;
__be32 rc = rpc_success;
dprintk("lockd: TEST4 called\n");
@@ -96,6 +105,7 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ test_owner = argp->lock.fl.fl_owner;
/* Now check for conflicting locks */
resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie);
if (resp->status == nlm_drop_reply)
@@ -103,7 +113,7 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
else
dprintk("lockd: TEST4 status %d\n", ntohl(resp->status));
- nlmsvc_release_lockowner(&argp->lock);
+ nlmsvc_put_lockowner(test_owner);
nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index cb3658ab9b7a..9c1aa75441e1 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -340,7 +340,7 @@ nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
return lockowner;
}
-static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
+void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
{
if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
return;
@@ -590,7 +590,6 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
int error;
int mode;
__be32 ret;
- struct nlm_lockowner *test_owner;
dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
nlmsvc_file_inode(file)->i_sb->s_id,
@@ -604,9 +603,6 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
}
- /* If there's a conflicting lock, remember to clean up the test lock */
- test_owner = (struct nlm_lockowner *)lock->fl.fl_owner;
-
mode = lock_to_openmode(&lock->fl);
error = vfs_test_lock(file->f_file[mode], &lock->fl);
if (error) {
@@ -635,10 +631,6 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
conflock->fl.fl_end = lock->fl.fl_end;
locks_release_private(&lock->fl);
- /* Clean up the test lock */
- lock->fl.fl_owner = NULL;
- nlmsvc_put_lockowner(test_owner);
-
ret = nlm_lck_denied;
out:
return ret;
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 4dc1b40a489a..b09ca35b527c 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -116,6 +116,7 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
struct nlm_args *argp = rqstp->rq_argp;
struct nlm_host *host;
struct nlm_file *file;
+ struct nlm_lockowner *test_owner;
__be32 rc = rpc_success;
dprintk("lockd: TEST called\n");
@@ -125,6 +126,8 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ test_owner = argp->lock.fl.fl_owner;
+
/* Now check for conflicting locks */
resp->status = cast_status(nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie));
if (resp->status == nlm_drop_reply)
@@ -133,7 +136,7 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
dprintk("lockd: TEST status %d vers %d\n",
ntohl(resp->status), rqstp->rq_vers);
- nlmsvc_release_lockowner(&argp->lock);
+ nlmsvc_put_lockowner(test_owner);
nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index 856267c0864b..712fdfeb8ef0 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -20,13 +20,6 @@
#include "svcxdr.h"
-static inline loff_t
-s64_to_loff_t(__s64 offset)
-{
- return (loff_t)offset;
-}
-
-
static inline s64
loff_t_to_s64(loff_t offset)
{
@@ -70,8 +63,6 @@ static bool
svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
{
struct file_lock *fl = &lock->fl;
- u64 len, start;
- s64 end;
if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len))
return false;
@@ -81,20 +72,14 @@ svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
return false;
if (xdr_stream_decode_u32(xdr, &lock->svid) < 0)
return false;
- if (xdr_stream_decode_u64(xdr, &start) < 0)
+ if (xdr_stream_decode_u64(xdr, &lock->lock_start) < 0)
return false;
- if (xdr_stream_decode_u64(xdr, &len) < 0)
+ if (xdr_stream_decode_u64(xdr, &lock->lock_len) < 0)
return false;
locks_init_lock(fl);
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK;
- end = start + len - 1;
- fl->fl_start = s64_to_loff_t(start);
- if (len == 0 || end < 0)
- fl->fl_end = OFFSET_MAX;
- else
- fl->fl_end = s64_to_loff_t(end);
return true;
}
diff --git a/fs/locks.c b/fs/locks.c
index c266cfdc3291..607f94a0e789 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2129,6 +2129,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
else
error = locks_lock_file_wait(f.file, &fl);
+ locks_release_private(&fl);
out_putf:
fdput(f);
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 97c54d3a2227..47ccfcbe0a22 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -11,7 +11,7 @@
/*
* Mbcache is a simple key-value store. Keys need not be unique, however
* key-value pairs are expected to be unique (we use this fact in
- * mb_cache_entry_delete()).
+ * mb_cache_entry_delete_or_get()).
*
* Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
* Ext4 also uses it for deduplication of xattr values stored in inodes.
@@ -90,7 +90,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
return -ENOMEM;
INIT_LIST_HEAD(&entry->e_list);
- /* One ref for hash, one ref returned */
+ /* Initial hash reference */
atomic_set(&entry->e_refcnt, 1);
entry->e_key = key;
entry->e_value = value;
@@ -106,25 +106,45 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
}
}
hlist_bl_add_head(&entry->e_hash_list, head);
- hlist_bl_unlock(head);
-
+ /*
+ * Add entry to LRU list before it can be found by
+ * mb_cache_entry_delete() to avoid races
+ */
spin_lock(&cache->c_list_lock);
list_add_tail(&entry->e_list, &cache->c_list);
- /* Grab ref for LRU list */
- atomic_inc(&entry->e_refcnt);
cache->c_entry_count++;
spin_unlock(&cache->c_list_lock);
+ hlist_bl_unlock(head);
return 0;
}
EXPORT_SYMBOL(mb_cache_entry_create);
-void __mb_cache_entry_free(struct mb_cache_entry *entry)
+void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry)
{
+ struct hlist_bl_head *head;
+
+ head = mb_cache_entry_head(cache, entry->e_key);
+ hlist_bl_lock(head);
+ hlist_bl_del(&entry->e_hash_list);
+ hlist_bl_unlock(head);
kmem_cache_free(mb_entry_cache, entry);
}
EXPORT_SYMBOL(__mb_cache_entry_free);
+/*
+ * mb_cache_entry_wait_unused - wait to be the last user of the entry
+ *
+ * @entry - entry to work on
+ *
+ * Wait to be the last user of the entry.
+ */
+void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
+{
+ wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2);
+}
+EXPORT_SYMBOL(mb_cache_entry_wait_unused);
+
static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
struct mb_cache_entry *entry,
u32 key)
@@ -142,10 +162,9 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
while (node) {
entry = hlist_bl_entry(node, struct mb_cache_entry,
e_hash_list);
- if (entry->e_key == key && entry->e_reusable) {
- atomic_inc(&entry->e_refcnt);
+ if (entry->e_key == key && entry->e_reusable &&
+ atomic_inc_not_zero(&entry->e_refcnt))
goto out;
- }
node = node->next;
}
entry = NULL;
@@ -205,10 +224,9 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
- if (entry->e_key == key && entry->e_value == value) {
- atomic_inc(&entry->e_refcnt);
+ if (entry->e_key == key && entry->e_value == value &&
+ atomic_inc_not_zero(&entry->e_refcnt))
goto out;
- }
}
entry = NULL;
out:
@@ -217,42 +235,42 @@ out:
}
EXPORT_SYMBOL(mb_cache_entry_get);
-/* mb_cache_entry_delete - remove a cache entry
+/* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
* @cache - cache we work with
* @key - key
* @value - value
*
- * Remove entry from cache @cache with key @key and value @value.
+ * Remove entry from cache @cache with key @key and value @value. The removal
+ * happens only if the entry is unused. The function returns NULL in case the
+ * entry was successfully removed or there's no entry in cache. Otherwise the
+ * function grabs reference of the entry that we failed to delete because it
+ * still has users and return it.
*/
-void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
+ u32 key, u64 value)
{
- struct hlist_bl_node *node;
- struct hlist_bl_head *head;
struct mb_cache_entry *entry;
- head = mb_cache_entry_head(cache, key);
- hlist_bl_lock(head);
- hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
- if (entry->e_key == key && entry->e_value == value) {
- /* We keep hash list reference to keep entry alive */
- hlist_bl_del_init(&entry->e_hash_list);
- hlist_bl_unlock(head);
- spin_lock(&cache->c_list_lock);
- if (!list_empty(&entry->e_list)) {
- list_del_init(&entry->e_list);
- if (!WARN_ONCE(cache->c_entry_count == 0,
- "mbcache: attempt to decrement c_entry_count past zero"))
- cache->c_entry_count--;
- atomic_dec(&entry->e_refcnt);
- }
- spin_unlock(&cache->c_list_lock);
- mb_cache_entry_put(cache, entry);
- return;
- }
- }
- hlist_bl_unlock(head);
+ entry = mb_cache_entry_get(cache, key, value);
+ if (!entry)
+ return NULL;
+
+ /*
+ * Drop the ref we got from mb_cache_entry_get() and the initial hash
+ * ref if we are the last user
+ */
+ if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2)
+ return entry;
+
+ spin_lock(&cache->c_list_lock);
+ if (!list_empty(&entry->e_list))
+ list_del_init(&entry->e_list);
+ cache->c_entry_count--;
+ spin_unlock(&cache->c_list_lock);
+ __mb_cache_entry_free(cache, entry);
+ return NULL;
}
-EXPORT_SYMBOL(mb_cache_entry_delete);
+EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
/* mb_cache_entry_touch - cache entry got used
* @cache - cache the entry belongs to
@@ -281,34 +299,24 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
unsigned long nr_to_scan)
{
struct mb_cache_entry *entry;
- struct hlist_bl_head *head;
unsigned long shrunk = 0;
spin_lock(&cache->c_list_lock);
while (nr_to_scan-- && !list_empty(&cache->c_list)) {
entry = list_first_entry(&cache->c_list,
struct mb_cache_entry, e_list);
- if (entry->e_referenced) {
+ /* Drop initial hash reference if there is no user */
+ if (entry->e_referenced ||
+ atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
entry->e_referenced = 0;
list_move_tail(&entry->e_list, &cache->c_list);
continue;
}
list_del_init(&entry->e_list);
cache->c_entry_count--;
- /*
- * We keep LRU list reference so that entry doesn't go away
- * from under us.
- */
spin_unlock(&cache->c_list_lock);
- head = mb_cache_entry_head(cache, entry->e_key);
- hlist_bl_lock(head);
- if (!hlist_bl_unhashed(&entry->e_hash_list)) {
- hlist_bl_del_init(&entry->e_hash_list);
- atomic_dec(&entry->e_refcnt);
- }
- hlist_bl_unlock(head);
- if (mb_cache_entry_put(cache, entry))
- shrunk++;
+ __mb_cache_entry_free(cache, entry);
+ shrunk++;
cond_resched();
spin_lock(&cache->c_list_lock);
}
@@ -367,7 +375,7 @@ struct mb_cache *mb_cache_create(int bucket_bits)
cache->c_shrink.count_objects = mb_cache_count;
cache->c_shrink.scan_objects = mb_cache_scan;
cache->c_shrink.seeks = DEFAULT_SEEKS;
- if (register_shrinker(&cache->c_shrink)) {
+ if (register_shrinker(&cache->c_shrink, "mbcache-shrinker")) {
kfree(cache->c_hash);
kfree(cache);
goto err_out;
@@ -400,11 +408,6 @@ void mb_cache_destroy(struct mb_cache *cache)
* point.
*/
list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
- if (!hlist_bl_unhashed(&entry->e_hash_list)) {
- hlist_bl_del_init(&entry->e_hash_list);
- atomic_dec(&entry->e_refcnt);
- } else
- WARN_ON(1);
list_del(&entry->e_list);
WARN_ON(atomic_read(&entry->e_refcnt) != 1);
mb_cache_entry_put(cache, entry);
diff --git a/fs/namei.c b/fs/namei.c
index ed3ffd9b22a3..53b4bc094db2 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3024,6 +3024,65 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
EXPORT_SYMBOL(unlock_rename);
/**
+ * mode_strip_umask - handle vfs umask stripping
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode to be created in @dir
+ *
+ * Umask stripping depends on whether or not the filesystem supports POSIX
+ * ACLs. If the filesystem doesn't support it umask stripping is done directly
+ * in here. If the filesystem does support POSIX ACLs umask stripping is
+ * deferred until the filesystem calls posix_acl_create().
+ *
+ * Returns: mode
+ */
+static inline umode_t mode_strip_umask(const struct inode *dir, umode_t mode)
+{
+ if (!IS_POSIXACL(dir))
+ mode &= ~current_umask();
+ return mode;
+}
+
+/**
+ * vfs_prepare_mode - prepare the mode to be used for a new inode
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode
+ * @mask_perms: allowed permission by the vfs
+ * @type: type of file to be created
+ *
+ * This helper consolidates and enforces vfs restrictions on the @mode of a new
+ * object to be created.
+ *
+ * Umask stripping depends on whether the filesystem supports POSIX ACLs (see
+ * the kernel documentation for mode_strip_umask()). Moving umask stripping
+ * after setgid stripping allows the same ordering for both non-POSIX ACL and
+ * POSIX ACL supporting filesystems.
+ *
+ * Note that it's currently valid for @type to be 0 if a directory is created.
+ * Filesystems raise that flag individually and we need to check whether each
+ * filesystem can deal with receiving S_IFDIR from the vfs before we enforce a
+ * non-zero type.
+ *
+ * Returns: mode to be passed to the filesystem
+ */
+static inline umode_t vfs_prepare_mode(struct user_namespace *mnt_userns,
+ const struct inode *dir, umode_t mode,
+ umode_t mask_perms, umode_t type)
+{
+ mode = mode_strip_sgid(mnt_userns, dir, mode);
+ mode = mode_strip_umask(dir, mode);
+
+ /*
+ * Apply the vfs mandated allowed permission mask and set the type of
+ * file to be created before we call into the filesystem.
+ */
+ mode &= (mask_perms & ~S_IFMT);
+ mode |= (type & S_IFMT);
+
+ return mode;
+}
+
+/**
* vfs_create - create new file
* @mnt_userns: user namespace of the mount the inode was found from
* @dir: inode of @dentry
@@ -3048,8 +3107,8 @@ int vfs_create(struct user_namespace *mnt_userns, struct inode *dir,
if (!dir->i_op->create)
return -EACCES; /* shouldn't it be ENOSYS? */
- mode &= S_IALLUGO;
- mode |= S_IFREG;
+
+ mode = vfs_prepare_mode(mnt_userns, dir, mode, S_IALLUGO, S_IFREG);
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
@@ -3312,8 +3371,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
if (open_flag & O_CREAT) {
if (open_flag & O_EXCL)
open_flag &= ~O_TRUNC;
- if (!IS_POSIXACL(dir->d_inode))
- mode &= ~current_umask();
+ mode = vfs_prepare_mode(mnt_userns, dir->d_inode, mode, mode, mode);
if (likely(got_write))
create_error = may_o_create(mnt_userns, &nd->path,
dentry, mode);
@@ -3544,6 +3602,7 @@ struct dentry *vfs_tmpfile(struct user_namespace *mnt_userns,
child = d_alloc(dentry, &slash_name);
if (unlikely(!child))
goto out_err;
+ mode = vfs_prepare_mode(mnt_userns, dir, mode, mode, mode);
error = dir->i_op->tmpfile(mnt_userns, dir, child, mode);
if (error)
goto out_err;
@@ -3821,6 +3880,7 @@ int vfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
if (!dir->i_op->mknod)
return -EPERM;
+ mode = vfs_prepare_mode(mnt_userns, dir, mode, mode, mode);
error = devcgroup_inode_mknod(mode, dev);
if (error)
return error;
@@ -3871,9 +3931,8 @@ retry:
if (IS_ERR(dentry))
goto out1;
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
- error = security_path_mknod(&path, dentry, mode, dev);
+ error = security_path_mknod(&path, dentry,
+ mode_strip_umask(path.dentry->d_inode, mode), dev);
if (error)
goto out2;
@@ -3943,7 +4002,7 @@ int vfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
if (!dir->i_op->mkdir)
return -EPERM;
- mode &= (S_IRWXUGO|S_ISVTX);
+ mode = vfs_prepare_mode(mnt_userns, dir, mode, S_IRWXUGO | S_ISVTX, 0);
error = security_inode_mkdir(dir, dentry, mode);
if (error)
return error;
@@ -3971,9 +4030,8 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
- error = security_path_mkdir(&path, dentry, mode);
+ error = security_path_mkdir(&path, dentry,
+ mode_strip_umask(path.dentry->d_inode, mode));
if (!error) {
struct user_namespace *mnt_userns;
mnt_userns = mnt_user_ns(path.mnt);
diff --git a/fs/namespace.c b/fs/namespace.c
index 68789f896f08..df137ba19d37 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -4238,6 +4238,13 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
err = -EPERM;
goto out_fput;
}
+
+ /* We're not controlling the target namespace. */
+ if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
+ err = -EPERM;
+ goto out_fput;
+ }
+
kattr->mnt_userns = get_user_ns(mnt_userns);
out_fput:
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index 5e56da748b2a..fea5f8821da5 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -301,18 +301,14 @@ bl_validate_designator(struct pnfs_block_volume *v)
}
}
-/*
- * Try to open the udev path for the WWN. At least on Debian the udev
- * by-id path will always point to the dm-multipath device if one exists.
- */
static struct block_device *
-bl_open_udev_path(struct pnfs_block_volume *v)
+bl_open_path(struct pnfs_block_volume *v, const char *prefix)
{
struct block_device *bdev;
const char *devname;
- devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%*phN",
- v->scsi.designator_len, v->scsi.designator);
+ devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/%s%*phN",
+ prefix, v->scsi.designator_len, v->scsi.designator);
if (!devname)
return ERR_PTR(-ENOMEM);
@@ -326,28 +322,6 @@ bl_open_udev_path(struct pnfs_block_volume *v)
return bdev;
}
-/*
- * Try to open the RH/Fedora specific dm-mpath udev path for this WWN, as the
- * wwn- links will only point to the first discovered SCSI device there.
- */
-static struct block_device *
-bl_open_dm_mpath_udev_path(struct pnfs_block_volume *v)
-{
- struct block_device *bdev;
- const char *devname;
-
- devname = kasprintf(GFP_KERNEL,
- "/dev/disk/by-id/dm-uuid-mpath-%d%*phN",
- v->scsi.designator_type,
- v->scsi.designator_len, v->scsi.designator);
- if (!devname)
- return ERR_PTR(-ENOMEM);
-
- bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL);
- kfree(devname);
- return bdev;
-}
-
static int
bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
@@ -360,9 +334,15 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
if (!bl_validate_designator(v))
return -EINVAL;
- bdev = bl_open_dm_mpath_udev_path(v);
+ /*
+ * Try to open the RH/Fedora specific dm-mpath udev path first, as the
+ * wwn- links will only point to the first discovered SCSI device there.
+ * On other distributions like Debian, the default SCSI by-id path will
+ * point to the dm-multipath device if one exists.
+ */
+ bdev = bl_open_path(v, "dm-uuid-mpath-0x");
if (IS_ERR(bdev))
- bdev = bl_open_udev_path(v);
+ bdev = bl_open_path(v, "wwn-0x");
if (IS_ERR(bdev))
return PTR_ERR(bdev);
d->bdev = bdev;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index e828504cc396..da8da5cdbbc1 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -708,9 +708,9 @@ static int nfs_init_server(struct nfs_server *server,
}
if (ctx->rsize)
- server->rsize = nfs_block_size(ctx->rsize, NULL);
+ server->rsize = nfs_io_size(ctx->rsize, clp->cl_proto);
if (ctx->wsize)
- server->wsize = nfs_block_size(ctx->wsize, NULL);
+ server->wsize = nfs_io_size(ctx->wsize, clp->cl_proto);
server->acregmin = ctx->acregmin * HZ;
server->acregmax = ctx->acregmax * HZ;
@@ -755,18 +755,19 @@ error:
static void nfs_server_set_fsinfo(struct nfs_server *server,
struct nfs_fsinfo *fsinfo)
{
+ struct nfs_client *clp = server->nfs_client;
unsigned long max_rpc_payload, raw_max_rpc_payload;
/* Work out a lot of parameters */
if (server->rsize == 0)
- server->rsize = nfs_block_size(fsinfo->rtpref, NULL);
+ server->rsize = nfs_io_size(fsinfo->rtpref, clp->cl_proto);
if (server->wsize == 0)
- server->wsize = nfs_block_size(fsinfo->wtpref, NULL);
+ server->wsize = nfs_io_size(fsinfo->wtpref, clp->cl_proto);
if (fsinfo->rtmax >= 512 && server->rsize > fsinfo->rtmax)
- server->rsize = nfs_block_size(fsinfo->rtmax, NULL);
+ server->rsize = nfs_io_size(fsinfo->rtmax, clp->cl_proto);
if (fsinfo->wtmax >= 512 && server->wsize > fsinfo->wtmax)
- server->wsize = nfs_block_size(fsinfo->wtmax, NULL);
+ server->wsize = nfs_io_size(fsinfo->wtmax, clp->cl_proto);
raw_max_rpc_payload = rpc_max_payload(server->client);
max_rpc_payload = nfs_block_size(raw_max_rpc_payload, NULL);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 0c4e8dd6aa96..5d6c2ddc7ea6 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1084,7 +1084,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
struct nfs_cache_array *array;
unsigned int i;
- array = kmap(desc->page);
+ array = kmap_local_page(desc->page);
for (i = desc->cache_entry_index; i < array->size; i++) {
struct nfs_cache_array_entry *ent;
@@ -1110,7 +1110,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
if (array->page_is_eof)
desc->eof = !desc->eob;
- kunmap(desc->page);
+ kunmap_local(array);
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %llu\n",
(unsigned long long)desc->dir_cookie);
}
@@ -1739,6 +1739,10 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
goto out_bad;
}
+ if ((flags & LOOKUP_RENAME_TARGET) && d_count(dentry) < 2 &&
+ nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
+ goto out_bad;
+
if (nfs_verifier_is_delegated(dentry))
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
@@ -1778,6 +1782,8 @@ __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
int ret;
if (flags & LOOKUP_RCU) {
+ if (dentry->d_fsdata == NFS_FSDATA_BLOCKED)
+ return -ECHILD;
parent = READ_ONCE(dentry->d_parent);
dir = d_inode_rcu(parent);
if (!dir)
@@ -1786,6 +1792,9 @@ __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
if (parent != READ_ONCE(dentry->d_parent))
return -ECHILD;
} else {
+ /* Wait for unlink to complete */
+ wait_var_event(&dentry->d_fsdata,
+ dentry->d_fsdata != NFS_FSDATA_BLOCKED);
parent = dget_parent(dentry);
ret = reval(d_inode(parent), dentry, flags);
dput(parent);
@@ -2373,7 +2382,8 @@ static void nfs_dentry_remove_handle_error(struct inode *dir,
{
switch (error) {
case -ENOENT:
- d_delete(dentry);
+ if (d_really_is_positive(dentry))
+ d_delete(dentry);
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
break;
case 0:
@@ -2454,7 +2464,6 @@ out:
int nfs_unlink(struct inode *dir, struct dentry *dentry)
{
int error;
- int need_rehash = 0;
dfprintk(VFS, "NFS: unlink(%s/%lu, %pd)\n", dir->i_sb->s_id,
dir->i_ino, dentry);
@@ -2469,15 +2478,27 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
error = nfs_sillyrename(dir, dentry);
goto out;
}
- if (!d_unhashed(dentry)) {
- __d_drop(dentry);
- need_rehash = 1;
+ /* We must prevent any concurrent open until the unlink
+ * completes. ->d_revalidate will wait for ->d_fsdata
+ * to clear. We set it here to ensure no lookup succeeds until
+ * the unlink is complete on the server.
+ */
+ error = -ETXTBSY;
+ if (WARN_ON(dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
+ WARN_ON(dentry->d_fsdata == NFS_FSDATA_BLOCKED)) {
+ spin_unlock(&dentry->d_lock);
+ goto out;
}
+ if (dentry->d_fsdata)
+ /* old devname */
+ kfree(dentry->d_fsdata);
+ dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+
spin_unlock(&dentry->d_lock);
error = nfs_safe_remove(dentry);
nfs_dentry_remove_handle_error(dir, dentry, error);
- if (need_rehash)
- d_rehash(dentry);
+ dentry->d_fsdata = NULL;
+ wake_up_var(&dentry->d_fsdata);
out:
trace_nfs_unlink_exit(dir, dentry, error);
return error;
@@ -2584,6 +2605,15 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
}
EXPORT_SYMBOL_GPL(nfs_link);
+static void
+nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
+{
+ struct dentry *new_dentry = data->new_dentry;
+
+ new_dentry->d_fsdata = NULL;
+ wake_up_var(&new_dentry->d_fsdata);
+}
+
/*
* RENAME
* FIXME: Some nfsds, like the Linux user space nfsd, may generate a
@@ -2614,8 +2644,9 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
{
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
- struct dentry *dentry = NULL, *rehash = NULL;
+ struct dentry *dentry = NULL;
struct rpc_task *task;
+ bool must_unblock = false;
int error = -EBUSY;
if (flags)
@@ -2633,18 +2664,27 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
* the new target.
*/
if (new_inode && !S_ISDIR(new_inode->i_mode)) {
- /*
- * To prevent any new references to the target during the
- * rename, we unhash the dentry in advance.
+ /* We must prevent any concurrent open until the unlink
+ * completes. ->d_revalidate will wait for ->d_fsdata
+ * to clear. We set it here to ensure no lookup succeeds until
+ * the unlink is complete on the server.
*/
- if (!d_unhashed(new_dentry)) {
- d_drop(new_dentry);
- rehash = new_dentry;
+ error = -ETXTBSY;
+ if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
+ WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED))
+ goto out;
+ if (new_dentry->d_fsdata) {
+ /* old devname */
+ kfree(new_dentry->d_fsdata);
+ new_dentry->d_fsdata = NULL;
}
+ spin_lock(&new_dentry->d_lock);
if (d_count(new_dentry) > 2) {
int err;
+ spin_unlock(&new_dentry->d_lock);
+
/* copy the target dentry's name */
dentry = d_alloc(new_dentry->d_parent,
&new_dentry->d_name);
@@ -2657,14 +2697,19 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
goto out;
new_dentry = dentry;
- rehash = NULL;
new_inode = NULL;
+ } else {
+ new_dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+ must_unblock = true;
+ spin_unlock(&new_dentry->d_lock);
}
+
}
if (S_ISREG(old_inode->i_mode))
nfs_sync_inode(old_inode);
- task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
+ task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
+ must_unblock ? nfs_unblock_rename : NULL);
if (IS_ERR(task)) {
error = PTR_ERR(task);
goto out;
@@ -2688,8 +2733,6 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
spin_unlock(&old_inode->i_lock);
}
out:
- if (rehash)
- d_rehash(rehash);
trace_nfs_rename_exit(old_dir, old_dentry,
new_dir, new_dentry, error);
if (!error) {
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 4eb2a8380a28..1707f46b1335 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -60,44 +60,12 @@
#include "iostat.h"
#include "pnfs.h"
#include "fscache.h"
+#include "nfstrace.h"
#define NFSDBG_FACILITY NFSDBG_VFS
static struct kmem_cache *nfs_direct_cachep;
-struct nfs_direct_req {
- struct kref kref; /* release manager */
-
- /* I/O parameters */
- struct nfs_open_context *ctx; /* file open context info */
- struct nfs_lock_context *l_ctx; /* Lock context info */
- struct kiocb * iocb; /* controlling i/o request */
- struct inode * inode; /* target file of i/o */
-
- /* completion state */
- atomic_t io_count; /* i/os we're waiting for */
- spinlock_t lock; /* protect completion state */
-
- loff_t io_start; /* Start offset for I/O */
- ssize_t count, /* bytes actually processed */
- max_count, /* max expected count */
- bytes_left, /* bytes left to be sent */
- error; /* any reported error */
- struct completion completion; /* wait for i/o completion */
-
- /* commit state */
- struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
- struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
- struct work_struct work;
- int flags;
- /* for write */
-#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
-#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
- /* for read */
-#define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
-#define NFS_ODIRECT_DONE INT_MAX /* write verification failed */
-};
-
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
@@ -364,13 +332,12 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
size_t pgbase;
unsigned npages, i;
- result = iov_iter_get_pages_alloc(iter, &pagevec,
+ result = iov_iter_get_pages_alloc2(iter, &pagevec,
rsize, &pgbase);
if (result < 0)
break;
bytes = result;
- iov_iter_advance(iter, bytes);
npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
for (i = 0; i < npages; i++) {
struct nfs_page *req;
@@ -478,7 +445,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
- if (iter_is_iovec(iter))
+ if (user_backed_iter(iter))
dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
if (!swap)
@@ -595,14 +562,17 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
struct nfs_page *req;
int status = data->task.tk_status;
+ trace_nfs_direct_commit_complete(dreq);
+
if (status < 0) {
/* Errors in commit are fatal */
dreq->error = status;
dreq->max_count = 0;
dreq->count = 0;
dreq->flags = NFS_ODIRECT_DONE;
- } else if (dreq->flags == NFS_ODIRECT_DONE)
+ } else {
status = dreq->error;
+ }
nfs_init_cinfo_from_dreq(&cinfo, dreq);
@@ -631,6 +601,8 @@ static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
{
struct nfs_direct_req *dreq = cinfo->dreq;
+ trace_nfs_direct_resched_write(dreq);
+
spin_lock(&dreq->lock);
if (dreq->flags != NFS_ODIRECT_DONE)
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
@@ -695,6 +667,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
{
+ trace_nfs_direct_write_complete(dreq);
queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
}
@@ -705,6 +678,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
int flags = NFS_ODIRECT_DONE;
+ trace_nfs_direct_write_completion(dreq);
+
nfs_init_cinfo_from_dreq(&cinfo, dreq);
spin_lock(&dreq->lock);
@@ -714,7 +689,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
}
nfs_direct_count_bytes(dreq, hdr);
- if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
+ if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags)) {
if (!dreq->flags)
dreq->flags = NFS_ODIRECT_DO_COMMIT;
flags = dreq->flags;
@@ -759,6 +734,8 @@ static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
{
struct nfs_direct_req *dreq = hdr->dreq;
+ trace_nfs_direct_write_reschedule_io(dreq);
+
spin_lock(&dreq->lock);
if (dreq->error == 0) {
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
@@ -799,6 +776,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
size_t requested_bytes = 0;
size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
+ trace_nfs_direct_write_schedule_iovec(dreq);
+
nfs_pageio_init_write(&desc, inode, ioflags, false,
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
@@ -812,13 +791,12 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
size_t pgbase;
unsigned npages, i;
- result = iov_iter_get_pages_alloc(iter, &pagevec,
+ result = iov_iter_get_pages_alloc2(iter, &pagevec,
wsize, &pgbase);
if (result < 0)
break;
bytes = result;
- iov_iter_advance(iter, bytes);
npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
for (i = 0; i < npages; i++) {
struct nfs_page *req;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 549baed76351..e032fe201a36 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -221,8 +221,10 @@ nfs_file_fsync_commit(struct file *file, int datasync)
int
nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct nfs_open_context *ctx = nfs_file_open_context(file);
struct inode *inode = file_inode(file);
+ struct nfs_inode *nfsi = NFS_I(inode);
+ long save_nredirtied = atomic_long_read(&nfsi->redirtied_pages);
+ long nredirtied;
int ret;
trace_nfs_fsync_enter(inode);
@@ -237,15 +239,10 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
ret = pnfs_sync_inode(inode, !!datasync);
if (ret != 0)
break;
- if (!test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags))
+ nredirtied = atomic_long_read(&nfsi->redirtied_pages);
+ if (nredirtied == save_nredirtied)
break;
- /*
- * If nfs_file_fsync_commit detected a server reboot, then
- * resend all dirty pages that might have been covered by
- * the NFS_CONTEXT_RESEND_WRITES flag
- */
- start = 0;
- end = LLONG_MAX;
+ save_nredirtied = nredirtied;
}
trace_nfs_fsync_exit(inode, ret);
@@ -661,8 +658,6 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
result = filemap_fdatawait_range(file->f_mapping,
iocb->ki_pos - written,
iocb->ki_pos - 1);
- if (result < 0)
- goto out;
}
result = generic_write_sync(iocb, written);
if (result < 0)
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 2b2661582bbe..ad34a33b0737 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -181,6 +181,8 @@ static int filelayout_async_handle_error(struct rpc_task *task,
case -EIO:
case -ETIMEDOUT:
case -EPIPE:
+ case -EPROTO:
+ case -ENODEV:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
nfs4_mark_deviceid_unavailable(devid);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 604be402ae13..7d285561e59f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1131,6 +1131,8 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
case -EIO:
case -ETIMEDOUT:
case -EPIPE:
+ case -EPROTO:
+ case -ENODEV:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
nfs4_delete_deviceid(devid->ld, devid->nfs_client,
@@ -1236,6 +1238,8 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
case -ENOBUFS:
case -EPIPE:
case -EPERM:
+ case -EPROTO:
+ case -ENODEV:
*op_status = status = NFS4ERR_NXIO;
break;
case -EACCES:
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index bfa7202ca7be..e028f5a0ef5f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -113,8 +113,10 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
goto out_err_drain_dsaddrs;
ds_versions[i].version = be32_to_cpup(p++);
ds_versions[i].minor_version = be32_to_cpup(p++);
- ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL);
- ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL);
+ ds_versions[i].rsize = nfs_io_size(be32_to_cpup(p++),
+ server->nfs_client->cl_proto);
+ ds_versions[i].wsize = nfs_io_size(be32_to_cpup(p++),
+ server->nfs_client->cl_proto);
ds_versions[i].tightly_coupled = be32_to_cpup(p);
if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index 9a16897e8dc6..4da701fd1424 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -21,6 +21,8 @@
#include "nfs.h"
#include "internal.h"
+#include "nfstrace.h"
+
#define NFSDBG_FACILITY NFSDBG_MOUNT
#if IS_ENABLED(CONFIG_NFS_V3)
@@ -284,7 +286,6 @@ static int nfs_verify_server_address(struct sockaddr *addr)
}
}
- dfprintk(MOUNT, "NFS: Invalid IP address specified\n");
return 0;
}
@@ -378,7 +379,7 @@ static int nfs_parse_security_flavors(struct fs_context *fc,
char *string = param->string, *p;
int ret;
- dfprintk(MOUNT, "NFS: parsing %s=%s option\n", param->key, param->string);
+ trace_nfs_mount_assign(param->key, string);
while ((p = strsep(&string, ":")) != NULL) {
if (!*p)
@@ -480,11 +481,11 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
unsigned int len;
int ret, opt;
- dfprintk(MOUNT, "NFS: parsing nfs mount option '%s'\n", param->key);
+ trace_nfs_mount_option(param);
opt = fs_parse(fc, nfs_fs_parameters, param, &result);
if (opt < 0)
- return ctx->sloppy ? 1 : opt;
+ return (opt == -ENOPARAM && ctx->sloppy) ? 1 : opt;
if (fc->security)
ctx->has_sec_mnt_opts = 1;
@@ -683,6 +684,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
return ret;
break;
case Opt_vers:
+ trace_nfs_mount_assign(param->key, param->string);
ret = nfs_parse_version_string(fc, param->string);
if (ret < 0)
return ret;
@@ -694,6 +696,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
break;
case Opt_proto:
+ trace_nfs_mount_assign(param->key, param->string);
protofamily = AF_INET;
switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
case Opt_xprt_udp6:
@@ -729,6 +732,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
break;
case Opt_mountproto:
+ trace_nfs_mount_assign(param->key, param->string);
mountfamily = AF_INET;
switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
case Opt_xprt_udp6:
@@ -751,6 +755,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
break;
case Opt_addr:
+ trace_nfs_mount_assign(param->key, param->string);
len = rpc_pton(fc->net_ns, param->string, param->size,
&ctx->nfs_server.address,
sizeof(ctx->nfs_server._address));
@@ -759,16 +764,19 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->nfs_server.addrlen = len;
break;
case Opt_clientaddr:
+ trace_nfs_mount_assign(param->key, param->string);
kfree(ctx->client_address);
ctx->client_address = param->string;
param->string = NULL;
break;
case Opt_mounthost:
+ trace_nfs_mount_assign(param->key, param->string);
kfree(ctx->mount_server.hostname);
ctx->mount_server.hostname = param->string;
param->string = NULL;
break;
case Opt_mountaddr:
+ trace_nfs_mount_assign(param->key, param->string);
len = rpc_pton(fc->net_ns, param->string, param->size,
&ctx->mount_server.address,
sizeof(ctx->mount_server._address));
@@ -846,7 +854,6 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
*/
case Opt_sloppy:
ctx->sloppy = true;
- dfprintk(MOUNT, "NFS: relaxing parsing rules\n");
break;
}
@@ -879,10 +886,8 @@ static int nfs_parse_source(struct fs_context *fc,
size_t len;
const char *end;
- if (unlikely(!dev_name || !*dev_name)) {
- dfprintk(MOUNT, "NFS: device name not specified\n");
+ if (unlikely(!dev_name || !*dev_name))
return -EINVAL;
- }
/* Is the host name protected with square brakcets? */
if (*dev_name == '[') {
@@ -922,7 +927,7 @@ static int nfs_parse_source(struct fs_context *fc,
if (!ctx->nfs_server.export_path)
goto out_nomem;
- dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", ctx->nfs_server.export_path);
+ trace_nfs_mount_path(ctx->nfs_server.export_path);
return 0;
out_bad_devname:
@@ -1116,7 +1121,6 @@ out_no_sec:
return nfs_invalf(fc, "NFS: nfs_mount_data version supports only AUTH_SYS");
out_nomem:
- dfprintk(MOUNT, "NFS: not enough memory to handle mount options");
return -ENOMEM;
out_no_address:
@@ -1248,7 +1252,7 @@ static int nfs4_parse_monolithic(struct fs_context *fc,
if (IS_ERR(c))
return PTR_ERR(c);
ctx->nfs_server.export_path = c;
- dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", c);
+ trace_nfs_mount_path(c);
c = strndup_user(data->client_addr.data, 16);
if (IS_ERR(c))
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b4e46b0ffa2d..bea7c005119c 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -426,6 +426,7 @@ nfs_ilookup(struct super_block *sb, struct nfs_fattr *fattr, struct nfs_fh *fh)
static void nfs_inode_init_regular(struct nfs_inode *nfsi)
{
atomic_long_set(&nfsi->nrequests, 0);
+ atomic_long_set(&nfsi->redirtied_pages, 0);
INIT_LIST_HEAD(&nfsi->commit_info.list);
atomic_long_set(&nfsi->commit_info.ncommit, 0);
atomic_set(&nfsi->commit_info.rpcs_out, 0);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 437ebe544aaf..27c720d71b4e 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -707,6 +707,24 @@ unsigned long nfs_block_size(unsigned long bsize, unsigned char *nrbitsp)
}
/*
+ * Compute and set NFS server rsize / wsize
+ */
+static inline
+unsigned long nfs_io_size(unsigned long iosize, enum xprt_transports proto)
+{
+ if (iosize < NFS_MIN_FILE_IO_SIZE)
+ iosize = NFS_DEF_FILE_IO_SIZE;
+ else if (iosize >= NFS_MAX_FILE_IO_SIZE)
+ iosize = NFS_MAX_FILE_IO_SIZE;
+ else
+ iosize = iosize & PAGE_MASK;
+
+ if (proto == XPRT_TRANSPORT_UDP)
+ return nfs_block_bits(iosize, NULL);
+ return iosize;
+}
+
+/*
* Determine the maximum file size for a superblock
*/
static inline
@@ -861,3 +879,36 @@ static inline void nfs_set_port(struct sockaddr *sap, int *port,
rpc_set_port(sap, *port);
}
+
+struct nfs_direct_req {
+ struct kref kref; /* release manager */
+
+ /* I/O parameters */
+ struct nfs_open_context *ctx; /* file open context info */
+ struct nfs_lock_context *l_ctx; /* Lock context info */
+ struct kiocb * iocb; /* controlling i/o request */
+ struct inode * inode; /* target file of i/o */
+
+ /* completion state */
+ atomic_t io_count; /* i/os we're waiting for */
+ spinlock_t lock; /* protect completion state */
+
+ loff_t io_start; /* Start offset for I/O */
+ ssize_t count, /* bytes actually processed */
+ max_count, /* max expected count */
+ bytes_left, /* bytes left to be sent */
+ error; /* any reported error */
+ struct completion completion; /* wait for i/o completion */
+
+ /* commit state */
+ struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
+ struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
+ struct work_struct work;
+ int flags;
+ /* for write */
+#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
+#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
+ /* for read */
+#define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
+#define NFS_ODIRECT_DONE INT_MAX /* write verification failed */
+};
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index 5601e47360c2..b49359afac88 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -108,7 +108,6 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
- __set_bit(NFS_CS_NOPING, &cl_init.init_flags);
__set_bit(NFS_CS_DS, &cl_init.init_flags);
/* Use the MDS nfs_client cl_ipaddr. */
diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
index e7b34f7e0614..a9bf09fdf2c3 100644
--- a/fs/nfs/nfs42xattr.c
+++ b/fs/nfs/nfs42xattr.c
@@ -1017,15 +1017,16 @@ int __init nfs4_xattr_cache_init(void)
if (ret)
goto out2;
- ret = register_shrinker(&nfs4_xattr_cache_shrinker);
+ ret = register_shrinker(&nfs4_xattr_cache_shrinker, "nfs-xattr_cache");
if (ret)
goto out1;
- ret = register_shrinker(&nfs4_xattr_entry_shrinker);
+ ret = register_shrinker(&nfs4_xattr_entry_shrinker, "nfs-xattr_entry");
if (ret)
goto out;
- ret = register_shrinker(&nfs4_xattr_large_entry_shrinker);
+ ret = register_shrinker(&nfs4_xattr_large_entry_shrinker,
+ "nfs-xattr_large_entry");
if (!ret)
return 0;
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 271e5f92ed01..b56f05113d36 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -1025,82 +1025,95 @@ static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *re
return decode_op_hdr(xdr, OP_DEALLOCATE);
}
-static int decode_read_plus_data(struct xdr_stream *xdr,
- struct nfs_pgio_args *args,
- struct nfs_pgio_res *res)
-{
- uint32_t count, recvd;
+struct read_plus_segment {
+ enum data_content4 type;
uint64_t offset;
- __be32 *p;
-
- p = xdr_inline_decode(xdr, 8 + 4);
- if (!p)
- return 1;
+ union {
+ struct {
+ uint64_t length;
+ } hole;
+
+ struct {
+ uint32_t length;
+ unsigned int from;
+ } data;
+ };
+};
- p = xdr_decode_hyper(p, &offset);
- count = be32_to_cpup(p);
- recvd = xdr_align_data(xdr, res->count, xdr_align_size(count));
- if (recvd > count)
- recvd = count;
- if (res->count + recvd > args->count) {
- if (args->count > res->count)
- res->count += args->count - res->count;
- return 1;
- }
- res->count += recvd;
- if (count > recvd)
- return 1;
- return 0;
+static inline uint64_t read_plus_segment_length(struct read_plus_segment *seg)
+{
+ return seg->type == NFS4_CONTENT_DATA ? seg->data.length : seg->hole.length;
}
-static int decode_read_plus_hole(struct xdr_stream *xdr,
- struct nfs_pgio_args *args,
- struct nfs_pgio_res *res, uint32_t *eof)
+static int decode_read_plus_segment(struct xdr_stream *xdr,
+ struct read_plus_segment *seg)
{
- uint64_t offset, length, recvd;
__be32 *p;
- p = xdr_inline_decode(xdr, 8 + 8);
+ p = xdr_inline_decode(xdr, 4);
if (!p)
- return 1;
-
- p = xdr_decode_hyper(p, &offset);
- p = xdr_decode_hyper(p, &length);
- if (offset != args->offset + res->count) {
- /* Server returned an out-of-sequence extent */
- if (offset > args->offset + res->count ||
- offset + length < args->offset + res->count) {
- dprintk("NFS: server returned out of sequence extent: "
- "offset/size = %llu/%llu != expected %llu\n",
- (unsigned long long)offset,
- (unsigned long long)length,
- (unsigned long long)(args->offset +
- res->count));
- return 1;
- }
- length -= args->offset + res->count - offset;
- }
- if (length + res->count > args->count) {
- *eof = 0;
- if (unlikely(res->count >= args->count))
- return 1;
- length = args->count - res->count;
- }
- recvd = xdr_expand_hole(xdr, res->count, length);
- res->count += recvd;
+ return -EIO;
+ seg->type = be32_to_cpup(p++);
+
+ p = xdr_inline_decode(xdr, seg->type == NFS4_CONTENT_DATA ? 12 : 16);
+ if (!p)
+ return -EIO;
+ p = xdr_decode_hyper(p, &seg->offset);
+
+ if (seg->type == NFS4_CONTENT_DATA) {
+ struct xdr_buf buf;
+ uint32_t len = be32_to_cpup(p);
+
+ seg->data.length = len;
+ seg->data.from = xdr_stream_pos(xdr);
- if (recvd < length)
- return 1;
+ if (!xdr_stream_subsegment(xdr, &buf, xdr_align_size(len)))
+ return -EIO;
+ } else if (seg->type == NFS4_CONTENT_HOLE) {
+ xdr_decode_hyper(p, &seg->hole.length);
+ } else
+ return -EINVAL;
return 0;
}
+static int process_read_plus_segment(struct xdr_stream *xdr,
+ struct nfs_pgio_args *args,
+ struct nfs_pgio_res *res,
+ struct read_plus_segment *seg)
+{
+ unsigned long offset = seg->offset;
+ unsigned long length = read_plus_segment_length(seg);
+ unsigned int bufpos;
+
+ if (offset + length < args->offset)
+ return 0;
+ else if (offset > args->offset + args->count) {
+ res->eof = 0;
+ return 0;
+ } else if (offset < args->offset) {
+ length -= (args->offset - offset);
+ offset = args->offset;
+ } else if (offset + length > args->offset + args->count) {
+ length = (args->offset + args->count) - offset;
+ res->eof = 0;
+ }
+
+ bufpos = xdr->buf->head[0].iov_len + (offset - args->offset);
+ if (seg->type == NFS4_CONTENT_HOLE)
+ return xdr_stream_zero(xdr, bufpos, length);
+ else
+ return xdr_stream_move_subsegment(xdr, seg->data.from, bufpos, length);
+}
+
static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
{
struct nfs_pgio_header *hdr =
container_of(res, struct nfs_pgio_header, res);
struct nfs_pgio_args *args = &hdr->args;
- uint32_t eof, segments, type;
+ uint32_t segments;
+ struct read_plus_segment *segs;
int status, i;
+ char scratch_buf[16];
__be32 *p;
status = decode_op_hdr(xdr, OP_READ_PLUS);
@@ -1112,38 +1125,31 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
return -EIO;
res->count = 0;
- eof = be32_to_cpup(p++);
+ res->eof = be32_to_cpup(p++);
segments = be32_to_cpup(p++);
if (segments == 0)
- goto out;
-
- for (i = 0; i < segments; i++) {
- p = xdr_inline_decode(xdr, 4);
- if (!p)
- goto early_out;
+ return status;
- type = be32_to_cpup(p++);
- if (type == NFS4_CONTENT_DATA)
- status = decode_read_plus_data(xdr, args, res);
- else if (type == NFS4_CONTENT_HOLE)
- status = decode_read_plus_hole(xdr, args, res, &eof);
- else
- return -EINVAL;
+ segs = kmalloc_array(segments, sizeof(*segs), GFP_KERNEL);
+ if (!segs)
+ return -ENOMEM;
+ xdr_set_scratch_buffer(xdr, &scratch_buf, 32);
+ status = -EIO;
+ for (i = 0; i < segments; i++) {
+ status = decode_read_plus_segment(xdr, &segs[i]);
if (status < 0)
- return status;
- if (status > 0)
- goto early_out;
+ goto out;
}
+ xdr_set_pagelen(xdr, xdr_align_size(args->count));
+ for (i = segments; i > 0; i--)
+ res->count += process_read_plus_segment(xdr, args, res, &segs[i-1]);
+ status = 0;
+
out:
- res->eof = eof;
- return 0;
-early_out:
- if (unlikely(!i))
- return -EIO;
- res->eof = 0;
- return 0;
+ kfree(segs);
+ return status;
}
static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res)
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 47a6cf892c95..3c5678aec006 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1161,9 +1161,9 @@ static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc)
return error;
if (ctx->rsize)
- server->rsize = nfs_block_size(ctx->rsize, NULL);
+ server->rsize = nfs_io_size(ctx->rsize, server->nfs_client->cl_proto);
if (ctx->wsize)
- server->wsize = nfs_block_size(ctx->wsize, NULL);
+ server->wsize = nfs_io_size(ctx->wsize, server->nfs_client->cl_proto);
server->acregmin = ctx->acregmin * HZ;
server->acregmax = ctx->acregmax * HZ;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index e88f6b18445e..9eb181287879 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -340,6 +340,11 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
goto out;
}
+ if (!S_ISREG(fattr->mode)) {
+ res = ERR_PTR(-EBADF);
+ goto out;
+ }
+
res = ERR_PTR(-ENOMEM);
len = strlen(SSC_READ_NAME_BODY) + 16;
read_name = kzalloc(len, GFP_KERNEL);
@@ -357,6 +362,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
r_ino->i_fop);
if (IS_ERR(filep)) {
res = ERR_CAST(filep);
+ iput(r_ino);
goto out_free_name;
}
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index f331866dd418..ec6afd3c4bca 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -561,22 +561,20 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
return true;
}
-static void
-nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
+static void nfs_idmap_complete_pipe_upcall(struct idmap_legacy_upcalldata *data,
+ int ret)
{
- struct key *authkey = idmap->idmap_upcall_data->authkey;
-
- kfree(idmap->idmap_upcall_data);
- idmap->idmap_upcall_data = NULL;
- complete_request_key(authkey, ret);
- key_put(authkey);
+ complete_request_key(data->authkey, ret);
+ key_put(data->authkey);
+ kfree(data);
}
-static void
-nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
+static void nfs_idmap_abort_pipe_upcall(struct idmap *idmap,
+ struct idmap_legacy_upcalldata *data,
+ int ret)
{
- if (idmap->idmap_upcall_data != NULL)
- nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
+ if (cmpxchg(&idmap->idmap_upcall_data, data, NULL) == data)
+ nfs_idmap_complete_pipe_upcall(data, ret);
}
static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
@@ -613,7 +611,7 @@ static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
if (ret < 0)
- nfs_idmap_abort_pipe_upcall(idmap, ret);
+ nfs_idmap_abort_pipe_upcall(idmap, data, ret);
return ret;
out2:
@@ -669,6 +667,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
struct request_key_auth *rka;
struct rpc_inode *rpci = RPC_I(file_inode(filp));
struct idmap *idmap = (struct idmap *)rpci->private;
+ struct idmap_legacy_upcalldata *data;
struct key *authkey;
struct idmap_msg im;
size_t namelen_in;
@@ -678,10 +677,11 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
* will have been woken up and someone else may now have used
* idmap_key_cons - so after this point we may no longer touch it.
*/
- if (idmap->idmap_upcall_data == NULL)
+ data = xchg(&idmap->idmap_upcall_data, NULL);
+ if (data == NULL)
goto out_noupcall;
- authkey = idmap->idmap_upcall_data->authkey;
+ authkey = data->authkey;
rka = get_request_key_auth(authkey);
if (mlen != sizeof(im)) {
@@ -703,18 +703,17 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) {
ret = -EINVAL;
goto out;
-}
+ }
- ret = nfs_idmap_read_and_verify_message(&im,
- &idmap->idmap_upcall_data->idmap_msg,
- rka->target_key, authkey);
+ ret = nfs_idmap_read_and_verify_message(&im, &data->idmap_msg,
+ rka->target_key, authkey);
if (ret >= 0) {
key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
ret = mlen;
}
out:
- nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
+ nfs_idmap_complete_pipe_upcall(data, ret);
out_noupcall:
return ret;
}
@@ -728,7 +727,7 @@ idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
struct idmap *idmap = data->idmap;
if (msg->errno)
- nfs_idmap_abort_pipe_upcall(idmap, msg->errno);
+ nfs_idmap_abort_pipe_upcall(idmap, data, msg->errno);
}
static void
@@ -736,8 +735,11 @@ idmap_release_pipe(struct inode *inode)
{
struct rpc_inode *rpci = RPC_I(inode);
struct idmap *idmap = (struct idmap *)rpci->private;
+ struct idmap_legacy_upcalldata *data;
- nfs_idmap_abort_pipe_upcall(idmap, -EPIPE);
+ data = xchg(&idmap->idmap_upcall_data, NULL);
+ if (data)
+ nfs_idmap_complete_pipe_upcall(data, -EPIPE);
}
int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, kuid_t *uid)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index bb0e84a46d61..3ed14a2a84a4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -784,10 +784,9 @@ static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
slot->seq_nr_highest_sent = seqnr;
}
-static void nfs4_slot_sequence_acked(struct nfs4_slot *slot,
- u32 seqnr)
+static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
{
- slot->seq_nr_highest_sent = seqnr;
+ nfs4_slot_sequence_record_sent(slot, seqnr);
slot->seq_nr_last_acked = seqnr;
}
@@ -854,7 +853,6 @@ static int nfs41_sequence_process(struct rpc_task *task,
__func__,
slot->slot_nr,
slot->seq_nr);
- nfs4_slot_sequence_acked(slot, slot->seq_nr);
goto out_retry;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_SEQ_FALSE_RETRY:
@@ -3098,12 +3096,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
}
out:
- if (opendata->lgp) {
- nfs4_lgopen_release(opendata->lgp);
- opendata->lgp = NULL;
- }
- if (!opendata->cancelled)
+ if (!opendata->cancelled) {
+ if (opendata->lgp) {
+ nfs4_lgopen_release(opendata->lgp);
+ opendata->lgp = NULL;
+ }
nfs4_sequence_free_slot(&opendata->o_res.seq_res);
+ }
return ret;
}
@@ -8924,6 +8923,9 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
if (status == 0)
rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
+ else if (rpc_clnt_xprt_switch_has_addr(clnt,
+ (struct sockaddr *)&xprt->addr))
+ rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
rpc_put_task(task);
}
@@ -9248,6 +9250,13 @@ int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
int status;
unsigned *ptr;
struct nfs4_session *session = clp->cl_session;
+ struct nfs4_add_xprt_data xprtdata = {
+ .clp = clp,
+ };
+ struct rpc_add_xprt_test rpcdata = {
+ .add_xprt_test = clp->cl_mvops->session_trunk,
+ .data = &xprtdata,
+ };
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
@@ -9264,6 +9273,7 @@ int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
ptr = (unsigned *)&session->sess_id.data[0];
dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
+ rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata);
out:
return status;
}
@@ -9293,6 +9303,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session,
if (status)
dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
"Session has been destroyed regardless...\n", status);
+ rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient);
return status;
}
@@ -9477,6 +9488,9 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
rpc_delay(task, NFS4_POLL_RETRY_MAX);
fallthrough;
case -NFS4ERR_RETRY_UNCACHED_REP:
+ case -EACCES:
+ dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
+ __func__, task->tk_status, clp->cl_hostname);
return -EAGAIN;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_DEADSESSION:
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 012bd7339862..8c6cc58679ff 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -1137,7 +1137,7 @@ TRACE_EVENT(nfs_readpage_done,
__field(u32, arg_count)
__field(u32, res_count)
__field(bool, eof)
- __field(int, status)
+ __field(int, error)
),
TP_fast_assign(
@@ -1146,7 +1146,7 @@ TRACE_EVENT(nfs_readpage_done,
const struct nfs_fh *fh = hdr->args.fh ?
hdr->args.fh : &nfsi->fh;
- __entry->status = task->tk_status;
+ __entry->error = task->tk_status;
__entry->offset = hdr->args.offset;
__entry->arg_count = hdr->args.count;
__entry->res_count = hdr->res.count;
@@ -1157,14 +1157,13 @@ TRACE_EVENT(nfs_readpage_done,
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%u res=%u status=%d%s",
+ "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u%s", __entry->error,
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
(long long)__entry->offset, __entry->arg_count,
- __entry->res_count, __entry->status,
- __entry->eof ? " eof" : ""
+ __entry->res_count, __entry->eof ? " eof" : ""
)
);
@@ -1184,7 +1183,7 @@ TRACE_EVENT(nfs_readpage_short,
__field(u32, arg_count)
__field(u32, res_count)
__field(bool, eof)
- __field(int, status)
+ __field(int, error)
),
TP_fast_assign(
@@ -1193,7 +1192,7 @@ TRACE_EVENT(nfs_readpage_short,
const struct nfs_fh *fh = hdr->args.fh ?
hdr->args.fh : &nfsi->fh;
- __entry->status = task->tk_status;
+ __entry->error = task->tk_status;
__entry->offset = hdr->args.offset;
__entry->arg_count = hdr->args.count;
__entry->res_count = hdr->res.count;
@@ -1204,14 +1203,13 @@ TRACE_EVENT(nfs_readpage_short,
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%u res=%u status=%d%s",
+ "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u%s", __entry->error,
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
(long long)__entry->offset, __entry->arg_count,
- __entry->res_count, __entry->status,
- __entry->eof ? " eof" : ""
+ __entry->res_count, __entry->eof ? " eof" : ""
)
);
@@ -1323,7 +1321,7 @@ TRACE_EVENT(nfs_pgio_error,
__field(u32, arg_count)
__field(u32, res_count)
__field(loff_t, pos)
- __field(int, status)
+ __field(int, error)
),
TP_fast_assign(
@@ -1332,7 +1330,7 @@ TRACE_EVENT(nfs_pgio_error,
const struct nfs_fh *fh = hdr->args.fh ?
hdr->args.fh : &nfsi->fh;
- __entry->status = error;
+ __entry->error = error;
__entry->offset = hdr->args.offset;
__entry->arg_count = hdr->args.count;
__entry->res_count = hdr->res.count;
@@ -1341,12 +1339,12 @@ TRACE_EVENT(nfs_pgio_error,
__entry->fhandle = nfs_fhandle_hash(fh);
),
- TP_printk("fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%u res=%u pos=%llu status=%d",
+ TP_printk("error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u pos=%llu", __entry->error,
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, __entry->fhandle,
(long long)__entry->offset, __entry->arg_count, __entry->res_count,
- __entry->pos, __entry->status
+ __entry->pos
)
);
@@ -1406,7 +1404,7 @@ TRACE_EVENT(nfs_writeback_done,
__field(loff_t, offset)
__field(u32, arg_count)
__field(u32, res_count)
- __field(int, status)
+ __field(int, error)
__field(unsigned long, stable)
__array(char, verifier, NFS4_VERIFIER_SIZE)
),
@@ -1418,7 +1416,7 @@ TRACE_EVENT(nfs_writeback_done,
hdr->args.fh : &nfsi->fh;
const struct nfs_writeverf *verf = hdr->res.verf;
- __entry->status = task->tk_status;
+ __entry->error = task->tk_status;
__entry->offset = hdr->args.offset;
__entry->arg_count = hdr->args.count;
__entry->res_count = hdr->res.count;
@@ -1432,14 +1430,14 @@ TRACE_EVENT(nfs_writeback_done,
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%u res=%u status=%d stable=%s "
- "verifier=%s",
+ "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u stable=%s "
+ "verifier=%s", __entry->error,
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
(long long)__entry->offset, __entry->arg_count,
- __entry->res_count, __entry->status,
+ __entry->res_count,
show_nfs_stable_how(__entry->stable),
show_nfs4_verifier(__entry->verifier)
)
@@ -1447,44 +1445,50 @@ TRACE_EVENT(nfs_writeback_done,
DECLARE_EVENT_CLASS(nfs_page_error_class,
TP_PROTO(
+ const struct inode *inode,
const struct nfs_page *req,
int error
),
- TP_ARGS(req, error),
+ TP_ARGS(inode, req, error),
TP_STRUCT__entry(
- __field(const void *, req)
- __field(pgoff_t, index)
- __field(unsigned int, offset)
- __field(unsigned int, pgbase)
- __field(unsigned int, bytes)
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(loff_t, offset)
+ __field(unsigned int, count)
__field(int, error)
),
TP_fast_assign(
- __entry->req = req;
- __entry->index = req->wb_index;
- __entry->offset = req->wb_offset;
- __entry->pgbase = req->wb_pgbase;
- __entry->bytes = req->wb_bytes;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->offset = req_offset(req);
+ __entry->count = req->wb_bytes;
__entry->error = error;
),
TP_printk(
- "req=%p index=%lu offset=%u pgbase=%u bytes=%u error=%d",
- __entry->req, __entry->index, __entry->offset,
- __entry->pgbase, __entry->bytes, __entry->error
+ "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u", __entry->error,
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle, __entry->offset,
+ __entry->count
)
);
#define DEFINE_NFS_PAGEERR_EVENT(name) \
DEFINE_EVENT(nfs_page_error_class, name, \
TP_PROTO( \
+ const struct inode *inode, \
const struct nfs_page *req, \
int error \
), \
- TP_ARGS(req, error))
+ TP_ARGS(inode, req, error))
DEFINE_NFS_PAGEERR_EVENT(nfs_write_error);
DEFINE_NFS_PAGEERR_EVENT(nfs_comp_error);
@@ -1541,7 +1545,7 @@ TRACE_EVENT(nfs_commit_done,
__field(u32, fhandle)
__field(u64, fileid)
__field(loff_t, offset)
- __field(int, status)
+ __field(int, error)
__field(unsigned long, stable)
__array(char, verifier, NFS4_VERIFIER_SIZE)
),
@@ -1553,7 +1557,7 @@ TRACE_EVENT(nfs_commit_done,
data->args.fh : &nfsi->fh;
const struct nfs_writeverf *verf = data->res.verf;
- __entry->status = task->tk_status;
+ __entry->error = task->tk_status;
__entry->offset = data->args.offset;
__entry->stable = verf->committed;
memcpy(__entry->verifier,
@@ -1565,17 +1569,83 @@ TRACE_EVENT(nfs_commit_done,
),
TP_printk(
- "fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld status=%d stable=%s verifier=%s",
+ "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld stable=%s verifier=%s", __entry->error,
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
- (long long)__entry->offset, __entry->status,
+ (long long)__entry->offset,
show_nfs_stable_how(__entry->stable),
show_nfs4_verifier(__entry->verifier)
)
);
+#define nfs_show_direct_req_flags(v) \
+ __print_flags(v, "|", \
+ { NFS_ODIRECT_DO_COMMIT, "DO_COMMIT" }, \
+ { NFS_ODIRECT_RESCHED_WRITES, "RESCHED_WRITES" }, \
+ { NFS_ODIRECT_SHOULD_DIRTY, "SHOULD DIRTY" }, \
+ { NFS_ODIRECT_DONE, "DONE" } )
+
+DECLARE_EVENT_CLASS(nfs_direct_req_class,
+ TP_PROTO(
+ const struct nfs_direct_req *dreq
+ ),
+
+ TP_ARGS(dreq),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, fileid)
+ __field(u32, fhandle)
+ __field(loff_t, offset)
+ __field(ssize_t, count)
+ __field(ssize_t, bytes_left)
+ __field(ssize_t, error)
+ __field(int, flags)
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = dreq->inode;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = &nfsi->fh;
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ __entry->offset = dreq->io_start;
+ __entry->count = dreq->count;
+ __entry->bytes_left = dreq->bytes_left;
+ __entry->error = dreq->error;
+ __entry->flags = dreq->flags;
+ ),
+
+ TP_printk(
+ "error=%zd fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%zd bytes_left=%zd flags=%s",
+ __entry->error, MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle, __entry->offset,
+ __entry->count, __entry->bytes_left,
+ nfs_show_direct_req_flags(__entry->flags)
+ )
+);
+
+#define DEFINE_NFS_DIRECT_REQ_EVENT(name) \
+ DEFINE_EVENT(nfs_direct_req_class, name, \
+ TP_PROTO( \
+ const struct nfs_direct_req *dreq \
+ ), \
+ TP_ARGS(dreq))
+
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_commit_complete);
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_resched_write);
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_complete);
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_completion);
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_schedule_iovec);
+DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_reschedule_io);
+
TRACE_EVENT(nfs_fh_to_dentry,
TP_PROTO(
const struct super_block *sb,
@@ -1609,6 +1679,65 @@ TRACE_EVENT(nfs_fh_to_dentry,
)
);
+TRACE_EVENT(nfs_mount_assign,
+ TP_PROTO(
+ const char *option,
+ const char *value
+ ),
+
+ TP_ARGS(option, value),
+
+ TP_STRUCT__entry(
+ __string(option, option)
+ __string(value, value)
+ ),
+
+ TP_fast_assign(
+ __assign_str(option, option);
+ __assign_str(value, value);
+ ),
+
+ TP_printk("option %s=%s",
+ __get_str(option), __get_str(value)
+ )
+);
+
+TRACE_EVENT(nfs_mount_option,
+ TP_PROTO(
+ const struct fs_parameter *param
+ ),
+
+ TP_ARGS(param),
+
+ TP_STRUCT__entry(
+ __string(option, param->key)
+ ),
+
+ TP_fast_assign(
+ __assign_str(option, param->key);
+ ),
+
+ TP_printk("option %s", __get_str(option))
+);
+
+TRACE_EVENT(nfs_mount_path,
+ TP_PROTO(
+ const char *path
+ ),
+
+ TP_ARGS(path),
+
+ TP_STRUCT__entry(
+ __string(path, path)
+ ),
+
+ TP_fast_assign(
+ __assign_str(path, path);
+ ),
+
+ TP_printk("path='%s'", __get_str(path))
+);
+
DECLARE_EVENT_CLASS(nfs_xdr_event,
TP_PROTO(
const struct xdr_stream *xdr,
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 41a9b6b58fb9..2613b7e36eb9 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2817,7 +2817,6 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
/* Resend all requests through the MDS */
nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
hdr->completion_ops);
- set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
return nfs_pageio_resend(&pgio, hdr);
}
EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 6ab5eeb000dc..82944e14fcea 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -149,7 +149,7 @@ int __init register_nfs_fs(void)
ret = nfs_register_sysctl();
if (ret < 0)
goto error_2;
- ret = register_shrinker(&acl_shrinker);
+ ret = register_shrinker(&acl_shrinker, "nfs-acl");
if (ret < 0)
goto error_3;
#ifdef CONFIG_NFS_V4_2
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 69569696dde0..1843fa235d9b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -592,7 +592,8 @@ nfs_lock_and_join_requests(struct page *page)
static void nfs_write_error(struct nfs_page *req, int error)
{
- trace_nfs_write_error(req, error);
+ trace_nfs_write_error(page_file_mapping(req->wb_page)->host, req,
+ error);
nfs_mapping_set_error(req->wb_page, error);
nfs_inode_remove_request(req);
nfs_end_page_writeback(req);
@@ -1000,7 +1001,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
nfs_list_remove_request(req);
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
(hdr->good_bytes < bytes)) {
- trace_nfs_comp_error(req, hdr->error);
+ trace_nfs_comp_error(hdr->inode, req, hdr->error);
nfs_mapping_set_error(req->wb_page, hdr->error);
goto remove_req;
}
@@ -1419,10 +1420,12 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
*/
static void nfs_redirty_request(struct nfs_page *req)
{
+ struct nfs_inode *nfsi = NFS_I(page_file_mapping(req->wb_page)->host);
+
/* Bump the transmission count */
req->wb_nio++;
nfs_mark_request_dirty(req);
- set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
+ atomic_long_inc(&nfsi->redirtied_pages);
nfs_end_page_writeback(req);
nfs_release_request(req);
}
@@ -1444,8 +1447,6 @@ static void nfs_async_write_error(struct list_head *head, int error)
static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
{
nfs_async_write_error(&hdr->pages, 0);
- filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
- hdr->args.offset + hdr->args.count - 1);
}
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
@@ -1576,25 +1577,37 @@ static int nfs_writeback_done(struct rpc_task *task,
nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
trace_nfs_writeback_done(task, hdr);
- if (hdr->res.verf->committed < hdr->args.stable &&
- task->tk_status >= 0) {
- /* We tried a write call, but the server did not
- * commit data to stable storage even though we
- * requested it.
- * Note: There is a known bug in Tru64 < 5.0 in which
- * the server reports NFS_DATA_SYNC, but performs
- * NFS_FILE_SYNC. We therefore implement this checking
- * as a dprintk() in order to avoid filling syslog.
- */
- static unsigned long complain;
+ if (task->tk_status >= 0) {
+ enum nfs3_stable_how committed = hdr->res.verf->committed;
+
+ if (committed == NFS_UNSTABLE) {
+ /*
+ * We have some uncommitted data on the server at
+ * this point, so ensure that we keep track of that
+ * fact irrespective of what later writes do.
+ */
+ set_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags);
+ }
- /* Note this will print the MDS for a DS write */
- if (time_before(complain, jiffies)) {
- dprintk("NFS: faulty NFS server %s:"
- " (committed = %d) != (stable = %d)\n",
- NFS_SERVER(inode)->nfs_client->cl_hostname,
- hdr->res.verf->committed, hdr->args.stable);
- complain = jiffies + 300 * HZ;
+ if (committed < hdr->args.stable) {
+ /* We tried a write call, but the server did not
+ * commit data to stable storage even though we
+ * requested it.
+ * Note: There is a known bug in Tru64 < 5.0 in which
+ * the server reports NFS_DATA_SYNC, but performs
+ * NFS_FILE_SYNC. We therefore implement this checking
+ * as a dprintk() in order to avoid filling syslog.
+ */
+ static unsigned long complain;
+
+ /* Note this will print the MDS for a DS write */
+ if (time_before(complain, jiffies)) {
+ dprintk("NFS: faulty NFS server %s:"
+ " (committed = %d) != (stable = %d)\n",
+ NFS_SERVER(inode)->nfs_client->cl_hostname,
+ committed, hdr->args.stable);
+ complain = jiffies + 300 * HZ;
+ }
}
}
@@ -1872,7 +1885,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
(long long)req_offset(req));
if (status < 0) {
if (req->wb_page) {
- trace_nfs_commit_error(req, status);
+ trace_nfs_commit_error(data->inode, req,
+ status);
nfs_mapping_set_error(req->wb_page, status);
nfs_inode_remove_request(req);
}
@@ -1892,7 +1906,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
/* We have a mismatch. Write the page again */
dprintk_cont(" mismatch\n");
nfs_mark_request_dirty(req);
- set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
+ atomic_long_inc(&NFS_I(data->inode)->redirtied_pages);
next:
nfs_unlock_and_release_request(req);
/* Latency breaker */
diff --git a/fs/nfsd/acl.h b/fs/nfsd/acl.h
index ba14d2f4b64f..4b7324458a94 100644
--- a/fs/nfsd/acl.h
+++ b/fs/nfsd/acl.h
@@ -38,6 +38,8 @@
struct nfs4_acl;
struct svc_fh;
struct svc_rqst;
+struct nfsd_attrs;
+enum nfs_ftype4;
int nfs4_acl_bytes(int entries);
int nfs4_acl_get_whotype(char *, u32);
@@ -45,7 +47,7 @@ __be32 nfs4_acl_write_who(struct xdr_stream *xdr, int who);
int nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
struct nfs4_acl **acl);
-__be32 nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct nfs4_acl *acl);
+__be32 nfsd4_acl_to_attr(enum nfs_ftype4 type, struct nfs4_acl *acl,
+ struct nfsd_attrs *attr);
#endif /* LINUX_NFS4_ACL_H */
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index 9cb2d590c036..eeed4ae5b4ad 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -13,6 +13,7 @@
#include <linux/fsnotify_backend.h>
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
+#include <linux/rhashtable.h>
#include "vfs.h"
#include "nfsd.h"
@@ -21,28 +22,19 @@
#include "filecache.h"
#include "trace.h"
-#define NFSDDBG_FACILITY NFSDDBG_FH
-
-/* FIXME: dynamically size this for the machine somehow? */
-#define NFSD_FILE_HASH_BITS 12
-#define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
#define NFSD_LAUNDRETTE_DELAY (2 * HZ)
-#define NFSD_FILE_SHUTDOWN (1)
-#define NFSD_FILE_LRU_THRESHOLD (4096UL)
-#define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
+#define NFSD_FILE_CACHE_UP (0)
/* We only care about NFSD_MAY_READ/WRITE for this cache */
#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
-struct nfsd_fcache_bucket {
- struct hlist_head nfb_head;
- spinlock_t nfb_lock;
- unsigned int nfb_count;
- unsigned int nfb_maxcount;
-};
-
static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_acquisitions);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_releases);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_total_age);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_pages_flushed);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_evictions);
struct nfsd_fcache_disposal {
struct work_struct work;
@@ -54,21 +46,146 @@ static struct workqueue_struct *nfsd_filecache_wq __read_mostly;
static struct kmem_cache *nfsd_file_slab;
static struct kmem_cache *nfsd_file_mark_slab;
-static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
static struct list_lru nfsd_file_lru;
-static long nfsd_file_lru_flags;
+static unsigned long nfsd_file_flags;
static struct fsnotify_group *nfsd_file_fsnotify_group;
-static atomic_long_t nfsd_filecache_count;
static struct delayed_work nfsd_filecache_laundrette;
+static struct rhashtable nfsd_file_rhash_tbl
+ ____cacheline_aligned_in_smp;
+
+enum nfsd_file_lookup_type {
+ NFSD_FILE_KEY_INODE,
+ NFSD_FILE_KEY_FULL,
+};
+
+struct nfsd_file_lookup_key {
+ struct inode *inode;
+ struct net *net;
+ const struct cred *cred;
+ unsigned char need;
+ enum nfsd_file_lookup_type type;
+};
+
+/*
+ * The returned hash value is based solely on the address of an in-code
+ * inode, a pointer to a slab-allocated object. The entropy in such a
+ * pointer is concentrated in its middle bits.
+ */
+static u32 nfsd_file_inode_hash(const struct inode *inode, u32 seed)
+{
+ unsigned long ptr = (unsigned long)inode;
+ u32 k;
+
+ k = ptr >> L1_CACHE_SHIFT;
+ k &= 0x00ffffff;
+ return jhash2(&k, 1, seed);
+}
+
+/**
+ * nfsd_file_key_hashfn - Compute the hash value of a lookup key
+ * @data: key on which to compute the hash value
+ * @len: rhash table's key_len parameter (unused)
+ * @seed: rhash table's random seed of the day
+ *
+ * Return value:
+ * Computed 32-bit hash value
+ */
+static u32 nfsd_file_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct nfsd_file_lookup_key *key = data;
+
+ return nfsd_file_inode_hash(key->inode, seed);
+}
+
+/**
+ * nfsd_file_obj_hashfn - Compute the hash value of an nfsd_file
+ * @data: object on which to compute the hash value
+ * @len: rhash table's key_len parameter (unused)
+ * @seed: rhash table's random seed of the day
+ *
+ * Return value:
+ * Computed 32-bit hash value
+ */
+static u32 nfsd_file_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct nfsd_file *nf = data;
+
+ return nfsd_file_inode_hash(nf->nf_inode, seed);
+}
-static void nfsd_file_gc(void);
+static bool
+nfsd_match_cred(const struct cred *c1, const struct cred *c2)
+{
+ int i;
+
+ if (!uid_eq(c1->fsuid, c2->fsuid))
+ return false;
+ if (!gid_eq(c1->fsgid, c2->fsgid))
+ return false;
+ if (c1->group_info == NULL || c2->group_info == NULL)
+ return c1->group_info == c2->group_info;
+ if (c1->group_info->ngroups != c2->group_info->ngroups)
+ return false;
+ for (i = 0; i < c1->group_info->ngroups; i++) {
+ if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
+ return false;
+ }
+ return true;
+}
+
+/**
+ * nfsd_file_obj_cmpfn - Match a cache item against search criteria
+ * @arg: search criteria
+ * @ptr: cache item to check
+ *
+ * Return values:
+ * %0 - Item matches search criteria
+ * %1 - Item does not match search criteria
+ */
+static int nfsd_file_obj_cmpfn(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct nfsd_file_lookup_key *key = arg->key;
+ const struct nfsd_file *nf = ptr;
+
+ switch (key->type) {
+ case NFSD_FILE_KEY_INODE:
+ if (nf->nf_inode != key->inode)
+ return 1;
+ break;
+ case NFSD_FILE_KEY_FULL:
+ if (nf->nf_inode != key->inode)
+ return 1;
+ if (nf->nf_may != key->need)
+ return 1;
+ if (nf->nf_net != key->net)
+ return 1;
+ if (!nfsd_match_cred(nf->nf_cred, key->cred))
+ return 1;
+ if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+static const struct rhashtable_params nfsd_file_rhash_params = {
+ .key_len = sizeof_field(struct nfsd_file, nf_inode),
+ .key_offset = offsetof(struct nfsd_file, nf_inode),
+ .head_offset = offsetof(struct nfsd_file, nf_rhash),
+ .hashfn = nfsd_file_key_hashfn,
+ .obj_hashfn = nfsd_file_obj_hashfn,
+ .obj_cmpfn = nfsd_file_obj_cmpfn,
+ /* Reduce resizing churn on light workloads */
+ .min_size = 512, /* buckets */
+ .automatic_shrinking = true,
+};
static void
nfsd_file_schedule_laundrette(void)
{
- long count = atomic_long_read(&nfsd_filecache_count);
-
- if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
+ if ((atomic_read(&nfsd_file_rhash_tbl.nelems) == 0) ||
+ test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 0)
return;
queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
@@ -111,12 +228,11 @@ nfsd_file_mark_put(struct nfsd_file_mark *nfm)
}
static struct nfsd_file_mark *
-nfsd_file_mark_find_or_create(struct nfsd_file *nf)
+nfsd_file_mark_find_or_create(struct nfsd_file *nf, struct inode *inode)
{
int err;
struct fsnotify_mark *mark;
struct nfsd_file_mark *nfm = NULL, *new;
- struct inode *inode = nf->nf_inode;
do {
fsnotify_group_lock(nfsd_file_fsnotify_group);
@@ -167,31 +283,25 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf)
}
static struct nfsd_file *
-nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
- struct net *net)
+nfsd_file_alloc(struct nfsd_file_lookup_key *key, unsigned int may)
{
struct nfsd_file *nf;
nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
if (nf) {
- INIT_HLIST_NODE(&nf->nf_node);
INIT_LIST_HEAD(&nf->nf_lru);
+ nf->nf_birthtime = ktime_get();
nf->nf_file = NULL;
nf->nf_cred = get_current_cred();
- nf->nf_net = net;
+ nf->nf_net = key->net;
nf->nf_flags = 0;
- nf->nf_inode = inode;
- nf->nf_hashval = hashval;
- refcount_set(&nf->nf_ref, 1);
- nf->nf_may = may & NFSD_FILE_MAY_MASK;
- if (may & NFSD_MAY_NOT_BREAK_LEASE) {
- if (may & NFSD_MAY_WRITE)
- __set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
- if (may & NFSD_MAY_READ)
- __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
- }
+ __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
+ __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
+ nf->nf_inode = key->inode;
+ /* nf_ref is pre-incremented for hash table */
+ refcount_set(&nf->nf_ref, 2);
+ nf->nf_may = key->need;
nf->nf_mark = NULL;
- trace_nfsd_file_alloc(nf);
}
return nf;
}
@@ -199,8 +309,12 @@ nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
static bool
nfsd_file_free(struct nfsd_file *nf)
{
+ s64 age = ktime_to_ms(ktime_sub(ktime_get(), nf->nf_birthtime));
bool flush = false;
+ this_cpu_inc(nfsd_file_releases);
+ this_cpu_add(nfsd_file_total_age, age);
+
trace_nfsd_file_put_final(nf);
if (nf->nf_mark)
nfsd_file_mark_put(nf->nf_mark);
@@ -210,6 +324,14 @@ nfsd_file_free(struct nfsd_file *nf)
fput(nf->nf_file);
flush = true;
}
+
+ /*
+ * If this item is still linked via nf_lru, that's a bug.
+ * WARN and leak it to preserve system stability.
+ */
+ if (WARN_ON_ONCE(!list_empty(&nf->nf_lru)))
+ return flush;
+
call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
return flush;
}
@@ -240,31 +362,44 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
static void
nfsd_file_flush(struct nfsd_file *nf)
{
- if (nf->nf_file && vfs_fsync(nf->nf_file, 1) != 0)
+ struct file *file = nf->nf_file;
+
+ if (!file || !(file->f_mode & FMODE_WRITE))
+ return;
+ this_cpu_add(nfsd_file_pages_flushed, file->f_mapping->nrpages);
+ if (vfs_fsync(file, 1) != 0)
nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
}
-static void
-nfsd_file_do_unhash(struct nfsd_file *nf)
+static void nfsd_file_lru_add(struct nfsd_file *nf)
{
- lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
+ set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+ if (list_lru_add(&nfsd_file_lru, &nf->nf_lru))
+ trace_nfsd_file_lru_add(nf);
+}
+static void nfsd_file_lru_remove(struct nfsd_file *nf)
+{
+ if (list_lru_del(&nfsd_file_lru, &nf->nf_lru))
+ trace_nfsd_file_lru_del(nf);
+}
+
+static void
+nfsd_file_hash_remove(struct nfsd_file *nf)
+{
trace_nfsd_file_unhash(nf);
if (nfsd_file_check_write_error(nf))
nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
- --nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
- hlist_del_rcu(&nf->nf_node);
- atomic_long_dec(&nfsd_filecache_count);
+ rhashtable_remove_fast(&nfsd_file_rhash_tbl, &nf->nf_rhash,
+ nfsd_file_rhash_params);
}
static bool
nfsd_file_unhash(struct nfsd_file *nf)
{
if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
- nfsd_file_do_unhash(nf);
- if (!list_empty(&nf->nf_lru))
- list_lru_del(&nfsd_file_lru, &nf->nf_lru);
+ nfsd_file_hash_remove(nf);
return true;
}
return false;
@@ -274,17 +409,16 @@ nfsd_file_unhash(struct nfsd_file *nf)
* Return true if the file was unhashed.
*/
static bool
-nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
+nfsd_file_unhash_and_dispose(struct nfsd_file *nf, struct list_head *dispose)
{
- lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
-
- trace_nfsd_file_unhash_and_release_locked(nf);
+ trace_nfsd_file_unhash_and_dispose(nf);
if (!nfsd_file_unhash(nf))
return false;
/* keep final reference for nfsd_file_lru_dispose */
if (refcount_dec_not_one(&nf->nf_ref))
return true;
+ nfsd_file_lru_remove(nf);
list_add(&nf->nf_lru, dispose);
return true;
}
@@ -296,6 +430,7 @@ nfsd_file_put_noref(struct nfsd_file *nf)
if (refcount_dec_and_test(&nf->nf_ref)) {
WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
+ nfsd_file_lru_remove(nf);
nfsd_file_free(nf);
}
}
@@ -305,7 +440,7 @@ nfsd_file_put(struct nfsd_file *nf)
{
might_sleep();
- set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+ nfsd_file_lru_add(nf);
if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) {
nfsd_file_flush(nf);
nfsd_file_put_noref(nf);
@@ -314,9 +449,24 @@ nfsd_file_put(struct nfsd_file *nf)
nfsd_file_schedule_laundrette();
} else
nfsd_file_put_noref(nf);
+}
- if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
- nfsd_file_gc();
+/**
+ * nfsd_file_close - Close an nfsd_file
+ * @nf: nfsd_file to close
+ *
+ * If this is the final reference for @nf, free it immediately.
+ * This reflects an on-the-wire CLOSE or DELEGRETURN into the
+ * VFS and exported filesystem.
+ */
+void nfsd_file_close(struct nfsd_file *nf)
+{
+ nfsd_file_put(nf);
+ if (refcount_dec_if_one(&nf->nf_ref)) {
+ nfsd_file_unhash(nf);
+ nfsd_file_lru_remove(nf);
+ nfsd_file_free(nf);
+ }
}
struct nfsd_file *
@@ -334,7 +484,7 @@ nfsd_file_dispose_list(struct list_head *dispose)
while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
- list_del(&nf->nf_lru);
+ list_del_init(&nf->nf_lru);
nfsd_file_flush(nf);
nfsd_file_put_noref(nf);
}
@@ -348,7 +498,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
- list_del(&nf->nf_lru);
+ list_del_init(&nf->nf_lru);
nfsd_file_flush(nf);
if (!refcount_dec_and_test(&nf->nf_ref))
continue;
@@ -405,8 +555,19 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose)
}
}
-/*
+/**
+ * nfsd_file_lru_cb - Examine an entry on the LRU list
+ * @item: LRU entry to examine
+ * @lru: controlling LRU
+ * @lock: LRU list lock (unused)
+ * @arg: dispose list
+ *
* Note this can deadlock with nfsd_file_cache_purge.
+ *
+ * Return values:
+ * %LRU_REMOVED: @item was removed from the LRU
+ * %LRU_ROTATE: @item is to be moved to the LRU tail
+ * %LRU_SKIP: @item cannot be evicted
*/
static enum lru_status
nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
@@ -427,55 +588,65 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
* counter. Here we check the counter and then test and clear the flag.
* That order is deliberate to ensure that we can do this locklessly.
*/
- if (refcount_read(&nf->nf_ref) > 1)
- goto out_skip;
+ if (refcount_read(&nf->nf_ref) > 1) {
+ list_lru_isolate(lru, &nf->nf_lru);
+ trace_nfsd_file_gc_in_use(nf);
+ return LRU_REMOVED;
+ }
/*
* Don't throw out files that are still undergoing I/O or
* that have uncleared errors pending.
*/
- if (nfsd_file_check_writeback(nf))
- goto out_skip;
+ if (nfsd_file_check_writeback(nf)) {
+ trace_nfsd_file_gc_writeback(nf);
+ return LRU_SKIP;
+ }
- if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
- goto out_skip;
+ if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags)) {
+ trace_nfsd_file_gc_referenced(nf);
+ return LRU_ROTATE;
+ }
- if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
- goto out_skip;
+ if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
+ trace_nfsd_file_gc_hashed(nf);
+ return LRU_SKIP;
+ }
list_lru_isolate_move(lru, &nf->nf_lru, head);
+ this_cpu_inc(nfsd_file_evictions);
+ trace_nfsd_file_gc_disposed(nf);
return LRU_REMOVED;
-out_skip:
- return LRU_SKIP;
}
-static unsigned long
-nfsd_file_lru_walk_list(struct shrink_control *sc)
+/*
+ * Unhash items on @dispose immediately, then queue them on the
+ * disposal workqueue to finish releasing them in the background.
+ *
+ * cel: Note that between the time list_lru_shrink_walk runs and
+ * now, these items are in the hash table but marked unhashed.
+ * Why release these outside of lru_cb ? There's no lock ordering
+ * problem since lru_cb currently takes no lock.
+ */
+static void nfsd_file_gc_dispose_list(struct list_head *dispose)
{
- LIST_HEAD(head);
struct nfsd_file *nf;
- unsigned long ret;
- if (sc)
- ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
- nfsd_file_lru_cb, &head);
- else
- ret = list_lru_walk(&nfsd_file_lru,
- nfsd_file_lru_cb,
- &head, LONG_MAX);
- list_for_each_entry(nf, &head, nf_lru) {
- spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
- nfsd_file_do_unhash(nf);
- spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
- }
- nfsd_file_dispose_list_delayed(&head);
- return ret;
+ list_for_each_entry(nf, dispose, nf_lru)
+ nfsd_file_hash_remove(nf);
+ nfsd_file_dispose_list_delayed(dispose);
}
static void
nfsd_file_gc(void)
{
- nfsd_file_lru_walk_list(NULL);
+ LIST_HEAD(dispose);
+ unsigned long ret;
+
+ ret = list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb,
+ &dispose, list_lru_count(&nfsd_file_lru));
+ trace_nfsd_file_gc_removed(ret, list_lru_count(&nfsd_file_lru));
+ nfsd_file_gc_dispose_list(&dispose);
}
static void
@@ -494,7 +665,14 @@ nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
static unsigned long
nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
{
- return nfsd_file_lru_walk_list(sc);
+ LIST_HEAD(dispose);
+ unsigned long ret;
+
+ ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
+ nfsd_file_lru_cb, &dispose);
+ trace_nfsd_file_shrinker_removed(ret, list_lru_count(&nfsd_file_lru));
+ nfsd_file_gc_dispose_list(&dispose);
+ return ret;
}
static struct shrinker nfsd_file_shrinker = {
@@ -503,39 +681,47 @@ static struct shrinker nfsd_file_shrinker = {
.seeks = 1,
};
-static void
-__nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
- struct list_head *dispose)
+/*
+ * Find all cache items across all net namespaces that match @inode and
+ * move them to @dispose. The lookup is atomic wrt nfsd_file_acquire().
+ */
+static unsigned int
+__nfsd_file_close_inode(struct inode *inode, struct list_head *dispose)
{
- struct nfsd_file *nf;
- struct hlist_node *tmp;
+ struct nfsd_file_lookup_key key = {
+ .type = NFSD_FILE_KEY_INODE,
+ .inode = inode,
+ };
+ unsigned int count = 0;
+ struct nfsd_file *nf;
- spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
- hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
- if (inode == nf->nf_inode)
- nfsd_file_unhash_and_release_locked(nf, dispose);
- }
- spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
+ rcu_read_lock();
+ do {
+ nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
+ nfsd_file_rhash_params);
+ if (!nf)
+ break;
+ nfsd_file_unhash_and_dispose(nf, dispose);
+ count++;
+ } while (1);
+ rcu_read_unlock();
+ return count;
}
/**
* nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
* @inode: inode of the file to attempt to remove
*
- * Walk the whole hash bucket, looking for any files that correspond to "inode".
- * If any do, then unhash them and put the hashtable reference to them and
- * destroy any that had their last reference put. Also ensure that any of the
- * fputs also have their final __fput done as well.
+ * Unhash and put, then flush and fput all cache items associated with @inode.
*/
void
nfsd_file_close_inode_sync(struct inode *inode)
{
- unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
- NFSD_FILE_HASH_BITS);
LIST_HEAD(dispose);
+ unsigned int count;
- __nfsd_file_close_inode(inode, hashval, &dispose);
- trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
+ count = __nfsd_file_close_inode(inode, &dispose);
+ trace_nfsd_file_close_inode_sync(inode, count);
nfsd_file_dispose_list_sync(&dispose);
}
@@ -543,19 +729,16 @@ nfsd_file_close_inode_sync(struct inode *inode)
* nfsd_file_close_inode - attempt a delayed close of a nfsd_file
* @inode: inode of the file to attempt to remove
*
- * Walk the whole hash bucket, looking for any files that correspond to "inode".
- * If any do, then unhash them and put the hashtable reference to them and
- * destroy any that had their last reference put.
+ * Unhash and put all cache item associated with @inode.
*/
static void
nfsd_file_close_inode(struct inode *inode)
{
- unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
- NFSD_FILE_HASH_BITS);
LIST_HEAD(dispose);
+ unsigned int count;
- __nfsd_file_close_inode(inode, hashval, &dispose);
- trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
+ count = __nfsd_file_close_inode(inode, &dispose);
+ trace_nfsd_file_close_inode(inode, count);
nfsd_file_dispose_list_delayed(&dispose);
}
@@ -630,25 +813,21 @@ static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
int
nfsd_file_cache_init(void)
{
- int ret = -ENOMEM;
- unsigned int i;
+ int ret;
- clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
-
- if (nfsd_file_hashtbl)
+ lockdep_assert_held(&nfsd_mutex);
+ if (test_and_set_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
return 0;
+ ret = rhashtable_init(&nfsd_file_rhash_tbl, &nfsd_file_rhash_params);
+ if (ret)
+ return ret;
+
+ ret = -ENOMEM;
nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
if (!nfsd_filecache_wq)
goto out;
- nfsd_file_hashtbl = kvcalloc(NFSD_FILE_HASH_SIZE,
- sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
- if (!nfsd_file_hashtbl) {
- pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
- goto out_err;
- }
-
nfsd_file_slab = kmem_cache_create("nfsd_file",
sizeof(struct nfsd_file), 0, 0, NULL);
if (!nfsd_file_slab) {
@@ -670,7 +849,7 @@ nfsd_file_cache_init(void)
goto out_err;
}
- ret = register_shrinker(&nfsd_file_shrinker);
+ ret = register_shrinker(&nfsd_file_shrinker, "nfsd-filecache");
if (ret) {
pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
goto out_lru;
@@ -692,11 +871,6 @@ nfsd_file_cache_init(void)
goto out_notifier;
}
- for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
- INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
- spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
- }
-
INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
out:
return ret;
@@ -711,46 +885,47 @@ out_err:
nfsd_file_slab = NULL;
kmem_cache_destroy(nfsd_file_mark_slab);
nfsd_file_mark_slab = NULL;
- kvfree(nfsd_file_hashtbl);
- nfsd_file_hashtbl = NULL;
destroy_workqueue(nfsd_filecache_wq);
nfsd_filecache_wq = NULL;
+ rhashtable_destroy(&nfsd_file_rhash_tbl);
goto out;
}
/*
* Note this can deadlock with nfsd_file_lru_cb.
*/
-void
-nfsd_file_cache_purge(struct net *net)
+static void
+__nfsd_file_cache_purge(struct net *net)
{
- unsigned int i;
- struct nfsd_file *nf;
- struct hlist_node *next;
+ struct rhashtable_iter iter;
+ struct nfsd_file *nf;
LIST_HEAD(dispose);
bool del;
- if (!nfsd_file_hashtbl)
- return;
-
- for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
- struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
+ rhashtable_walk_enter(&nfsd_file_rhash_tbl, &iter);
+ do {
+ rhashtable_walk_start(&iter);
- spin_lock(&nfb->nfb_lock);
- hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
+ nf = rhashtable_walk_next(&iter);
+ while (!IS_ERR_OR_NULL(nf)) {
if (net && nf->nf_net != net)
continue;
- del = nfsd_file_unhash_and_release_locked(nf, &dispose);
+ del = nfsd_file_unhash_and_dispose(nf, &dispose);
/*
* Deadlock detected! Something marked this entry as
* unhased, but hasn't removed it from the hash list.
*/
WARN_ON_ONCE(!del);
+
+ nf = rhashtable_walk_next(&iter);
}
- spin_unlock(&nfb->nfb_lock);
- nfsd_file_dispose_list(&dispose);
- }
+
+ rhashtable_walk_stop(&iter);
+ } while (nf == ERR_PTR(-EAGAIN));
+ rhashtable_walk_exit(&iter);
+
+ nfsd_file_dispose_list(&dispose);
}
static struct nfsd_fcache_disposal *
@@ -793,6 +968,19 @@ nfsd_file_cache_start_net(struct net *net)
return nn->fcache_disposal ? 0 : -ENOMEM;
}
+/**
+ * nfsd_file_cache_purge - Remove all cache items associated with @net
+ * @net: target net namespace
+ *
+ */
+void
+nfsd_file_cache_purge(struct net *net)
+{
+ lockdep_assert_held(&nfsd_mutex);
+ if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1)
+ __nfsd_file_cache_purge(net);
+}
+
void
nfsd_file_cache_shutdown_net(struct net *net)
{
@@ -803,7 +991,11 @@ nfsd_file_cache_shutdown_net(struct net *net)
void
nfsd_file_cache_shutdown(void)
{
- set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
+ int i;
+
+ lockdep_assert_held(&nfsd_mutex);
+ if (test_and_clear_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 0)
+ return;
lease_unregister_notifier(&nfsd_file_lease_notifier);
unregister_shrinker(&nfsd_file_shrinker);
@@ -812,7 +1004,7 @@ nfsd_file_cache_shutdown(void)
* calling nfsd_file_cache_purge
*/
cancel_delayed_work_sync(&nfsd_filecache_laundrette);
- nfsd_file_cache_purge(NULL);
+ __nfsd_file_cache_purge(NULL);
list_lru_destroy(&nfsd_file_lru);
rcu_barrier();
fsnotify_put_group(nfsd_file_fsnotify_group);
@@ -822,124 +1014,96 @@ nfsd_file_cache_shutdown(void)
fsnotify_wait_marks_destroyed();
kmem_cache_destroy(nfsd_file_mark_slab);
nfsd_file_mark_slab = NULL;
- kvfree(nfsd_file_hashtbl);
- nfsd_file_hashtbl = NULL;
destroy_workqueue(nfsd_filecache_wq);
nfsd_filecache_wq = NULL;
-}
-
-static bool
-nfsd_match_cred(const struct cred *c1, const struct cred *c2)
-{
- int i;
-
- if (!uid_eq(c1->fsuid, c2->fsuid))
- return false;
- if (!gid_eq(c1->fsgid, c2->fsgid))
- return false;
- if (c1->group_info == NULL || c2->group_info == NULL)
- return c1->group_info == c2->group_info;
- if (c1->group_info->ngroups != c2->group_info->ngroups)
- return false;
- for (i = 0; i < c1->group_info->ngroups; i++) {
- if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
- return false;
- }
- return true;
-}
-
-static struct nfsd_file *
-nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
- unsigned int hashval, struct net *net)
-{
- struct nfsd_file *nf;
- unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
-
- hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
- nf_node, lockdep_is_held(&nfsd_file_hashtbl[hashval].nfb_lock)) {
- if (nf->nf_may != need)
- continue;
- if (nf->nf_inode != inode)
- continue;
- if (nf->nf_net != net)
- continue;
- if (!nfsd_match_cred(nf->nf_cred, current_cred()))
- continue;
- if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
- continue;
- if (nfsd_file_get(nf) != NULL)
- return nf;
+ rhashtable_destroy(&nfsd_file_rhash_tbl);
+
+ for_each_possible_cpu(i) {
+ per_cpu(nfsd_file_cache_hits, i) = 0;
+ per_cpu(nfsd_file_acquisitions, i) = 0;
+ per_cpu(nfsd_file_releases, i) = 0;
+ per_cpu(nfsd_file_total_age, i) = 0;
+ per_cpu(nfsd_file_pages_flushed, i) = 0;
+ per_cpu(nfsd_file_evictions, i) = 0;
}
- return NULL;
}
/**
- * nfsd_file_is_cached - are there any cached open files for this fh?
- * @inode: inode of the file to check
+ * nfsd_file_is_cached - are there any cached open files for this inode?
+ * @inode: inode to check
+ *
+ * The lookup matches inodes in all net namespaces and is atomic wrt
+ * nfsd_file_acquire().
*
- * Scan the hashtable for open files that match this fh. Returns true if there
- * are any, and false if not.
+ * Return values:
+ * %true: filecache contains at least one file matching this inode
+ * %false: filecache contains no files matching this inode
*/
bool
nfsd_file_is_cached(struct inode *inode)
{
- bool ret = false;
- struct nfsd_file *nf;
- unsigned int hashval;
-
- hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
- nf_node) {
- if (inode == nf->nf_inode) {
- ret = true;
- break;
- }
- }
- rcu_read_unlock();
- trace_nfsd_file_is_cached(inode, hashval, (int)ret);
+ struct nfsd_file_lookup_key key = {
+ .type = NFSD_FILE_KEY_INODE,
+ .inode = inode,
+ };
+ bool ret = false;
+
+ if (rhashtable_lookup_fast(&nfsd_file_rhash_tbl, &key,
+ nfsd_file_rhash_params) != NULL)
+ ret = true;
+ trace_nfsd_file_is_cached(inode, (int)ret);
return ret;
}
static __be32
-nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf, bool open)
{
- __be32 status;
- struct net *net = SVC_NET(rqstp);
+ struct nfsd_file_lookup_key key = {
+ .type = NFSD_FILE_KEY_FULL,
+ .need = may_flags & NFSD_FILE_MAY_MASK,
+ .net = SVC_NET(rqstp),
+ };
struct nfsd_file *nf, *new;
- struct inode *inode;
- unsigned int hashval;
bool retry = true;
+ __be32 status;
- /* FIXME: skip this if fh_dentry is already set? */
status = fh_verify(rqstp, fhp, S_IFREG,
may_flags|NFSD_MAY_OWNER_OVERRIDE);
if (status != nfs_ok)
return status;
+ key.inode = d_inode(fhp->fh_dentry);
+ key.cred = get_current_cred();
- inode = d_inode(fhp->fh_dentry);
- hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
retry:
- rcu_read_lock();
- nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
- rcu_read_unlock();
+ /* Avoid allocation if the item is already in cache */
+ nf = rhashtable_lookup_fast(&nfsd_file_rhash_tbl, &key,
+ nfsd_file_rhash_params);
+ if (nf)
+ nf = nfsd_file_get(nf);
if (nf)
goto wait_for_construction;
- new = nfsd_file_alloc(inode, may_flags, hashval, net);
+ new = nfsd_file_alloc(&key, may_flags);
if (!new) {
- trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
- NULL, nfserr_jukebox);
- return nfserr_jukebox;
+ status = nfserr_jukebox;
+ goto out_status;
}
- spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
- nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
- if (nf == NULL)
+ nf = rhashtable_lookup_get_insert_key(&nfsd_file_rhash_tbl,
+ &key, &new->nf_rhash,
+ nfsd_file_rhash_params);
+ if (!nf) {
+ nf = new;
goto open_file;
- spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
+ }
+ if (IS_ERR(nf))
+ goto insert_err;
+ nf = nfsd_file_get(nf);
+ if (nf == NULL) {
+ nf = new;
+ goto open_file;
+ }
nfsd_file_slab_free(&new->nf_rcu);
wait_for_construction:
@@ -947,6 +1111,7 @@ wait_for_construction:
/* Did construction of this file fail? */
if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
+ trace_nfsd_file_cons_err(rqstp, key.inode, may_flags, nf);
if (!retry) {
status = nfserr_jukebox;
goto out;
@@ -956,49 +1121,29 @@ wait_for_construction:
goto retry;
}
+ nfsd_file_lru_remove(nf);
this_cpu_inc(nfsd_file_cache_hits);
- if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
- bool write = (may_flags & NFSD_MAY_WRITE);
-
- if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
- (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
- status = nfserrno(nfsd_open_break_lease(
- file_inode(nf->nf_file), may_flags));
- if (status == nfs_ok) {
- clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
- if (write)
- clear_bit(NFSD_FILE_BREAK_WRITE,
- &nf->nf_flags);
- }
- }
- }
+ status = nfserrno(nfsd_open_break_lease(file_inode(nf->nf_file), may_flags));
out:
if (status == nfs_ok) {
+ if (open)
+ this_cpu_inc(nfsd_file_acquisitions);
*pnf = nf;
} else {
nfsd_file_put(nf);
nf = NULL;
}
- trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
+out_status:
+ put_cred(key.cred);
+ if (open)
+ trace_nfsd_file_acquire(rqstp, key.inode, may_flags, nf, status);
return status;
+
open_file:
- nf = new;
- /* Take reference for the hashtable */
- refcount_inc(&nf->nf_ref);
- __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
- __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
- list_lru_add(&nfsd_file_lru, &nf->nf_lru);
- hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
- ++nfsd_file_hashtbl[hashval].nfb_count;
- nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
- nfsd_file_hashtbl[hashval].nfb_count);
- spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
- if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD)
- nfsd_file_gc();
-
- nf->nf_mark = nfsd_file_mark_find_or_create(nf);
+ trace_nfsd_file_alloc(nf);
+ nf->nf_mark = nfsd_file_mark_find_or_create(nf, key.inode);
if (nf->nf_mark) {
if (open) {
status = nfsd_open_verified(rqstp, fhp, may_flags,
@@ -1012,18 +1157,20 @@ open_file:
* If construction failed, or we raced with a call to unlink()
* then unhash.
*/
- if (status != nfs_ok || inode->i_nlink == 0) {
- bool do_free;
- spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
- do_free = nfsd_file_unhash(nf);
- spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
- if (do_free)
+ if (status != nfs_ok || key.inode->i_nlink == 0)
+ if (nfsd_file_unhash(nf))
nfsd_file_put_noref(nf);
- }
clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
smp_mb__after_atomic();
wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
goto out;
+
+insert_err:
+ nfsd_file_slab_free(&new->nf_rcu);
+ trace_nfsd_file_insert_err(rqstp, key.inode, may_flags, PTR_ERR(nf));
+ nf = NULL;
+ status = nfserr_jukebox;
+ goto out_status;
}
/**
@@ -1040,7 +1187,7 @@ __be32
nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
- return nfsd_do_file_acquire(rqstp, fhp, may_flags, pnf, true);
+ return nfsd_file_do_acquire(rqstp, fhp, may_flags, pnf, true);
}
/**
@@ -1057,7 +1204,7 @@ __be32
nfsd_file_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
- return nfsd_do_file_acquire(rqstp, fhp, may_flags, pnf, false);
+ return nfsd_file_do_acquire(rqstp, fhp, may_flags, pnf, false);
}
/*
@@ -1067,29 +1214,49 @@ nfsd_file_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
*/
static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
{
- unsigned int i, count = 0, longest = 0;
- unsigned long hits = 0;
+ unsigned long releases = 0, pages_flushed = 0, evictions = 0;
+ unsigned long hits = 0, acquisitions = 0;
+ unsigned int i, count = 0, buckets = 0;
+ unsigned long lru = 0, total_age = 0;
- /*
- * No need for spinlocks here since we're not terribly interested in
- * accuracy. We do take the nfsd_mutex simply to ensure that we
- * don't end up racing with server shutdown
- */
+ /* Serialize with server shutdown */
mutex_lock(&nfsd_mutex);
- if (nfsd_file_hashtbl) {
- for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
- count += nfsd_file_hashtbl[i].nfb_count;
- longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
- }
+ if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags) == 1) {
+ struct bucket_table *tbl;
+ struct rhashtable *ht;
+
+ lru = list_lru_count(&nfsd_file_lru);
+
+ rcu_read_lock();
+ ht = &nfsd_file_rhash_tbl;
+ count = atomic_read(&ht->nelems);
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ buckets = tbl->size;
+ rcu_read_unlock();
}
mutex_unlock(&nfsd_mutex);
- for_each_possible_cpu(i)
+ for_each_possible_cpu(i) {
hits += per_cpu(nfsd_file_cache_hits, i);
+ acquisitions += per_cpu(nfsd_file_acquisitions, i);
+ releases += per_cpu(nfsd_file_releases, i);
+ total_age += per_cpu(nfsd_file_total_age, i);
+ evictions += per_cpu(nfsd_file_evictions, i);
+ pages_flushed += per_cpu(nfsd_file_pages_flushed, i);
+ }
seq_printf(m, "total entries: %u\n", count);
- seq_printf(m, "longest chain: %u\n", longest);
+ seq_printf(m, "hash buckets: %u\n", buckets);
+ seq_printf(m, "lru entries: %lu\n", lru);
seq_printf(m, "cache hits: %lu\n", hits);
+ seq_printf(m, "acquisitions: %lu\n", acquisitions);
+ seq_printf(m, "releases: %lu\n", releases);
+ seq_printf(m, "evictions: %lu\n", evictions);
+ if (releases)
+ seq_printf(m, "mean age (ms): %ld\n", total_age / releases);
+ else
+ seq_printf(m, "mean age (ms): -\n");
+ seq_printf(m, "pages flushed: %lu\n", pages_flushed);
return 0;
}
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index 1da0c79a5580..8e8c0c47d67d 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -29,7 +29,7 @@ struct nfsd_file_mark {
* never be dereferenced, only used for comparison.
*/
struct nfsd_file {
- struct hlist_node nf_node;
+ struct rhash_head nf_rhash;
struct list_head nf_lru;
struct rcu_head nf_rcu;
struct file *nf_file;
@@ -37,15 +37,13 @@ struct nfsd_file {
struct net *nf_net;
#define NFSD_FILE_HASHED (0)
#define NFSD_FILE_PENDING (1)
-#define NFSD_FILE_BREAK_READ (2)
-#define NFSD_FILE_BREAK_WRITE (3)
-#define NFSD_FILE_REFERENCED (4)
+#define NFSD_FILE_REFERENCED (2)
unsigned long nf_flags;
- struct inode *nf_inode;
- unsigned int nf_hashval;
+ struct inode *nf_inode; /* don't deref */
refcount_t nf_ref;
unsigned char nf_may;
struct nfsd_file_mark *nf_mark;
+ ktime_t nf_birthtime;
};
int nfsd_file_cache_init(void);
@@ -54,6 +52,7 @@ void nfsd_file_cache_shutdown(void);
int nfsd_file_cache_start_net(struct net *net);
void nfsd_file_cache_shutdown_net(struct net *net);
void nfsd_file_put(struct nfsd_file *nf);
+void nfsd_file_close(struct nfsd_file *nf);
struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
void nfsd_file_close_inode_sync(struct inode *inode);
bool nfsd_file_is_cached(struct inode *inode);
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 1b1a962a1804..ffe17743cc74 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -189,6 +189,9 @@ struct nfsd_net {
struct nfsd_fcache_disposal *fcache_disposal;
siphash_key_t siphash_key;
+
+ atomic_t nfs4_client_count;
+ int nfs4_max_clients;
};
/* Simple check to find out if a given net was properly initialized */
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index b5760801d377..9edd3c1a30fb 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -111,7 +111,7 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst *rqstp)
if (error)
goto out_errno;
- fh_lock(fh);
+ inode_lock(inode);
error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_ACCESS,
argp->acl_access);
@@ -122,7 +122,7 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst *rqstp)
if (error)
goto out_drop_lock;
- fh_unlock(fh);
+ inode_unlock(inode);
fh_drop_write(fh);
@@ -136,7 +136,7 @@ out:
return rpc_success;
out_drop_lock:
- fh_unlock(fh);
+ inode_unlock(inode);
fh_drop_write(fh);
out_errno:
resp->status = nfserrno(error);
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 35b2ebda14da..9446c6743664 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -101,7 +101,7 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst *rqstp)
if (error)
goto out_errno;
- fh_lock(fh);
+ inode_lock(inode);
error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_ACCESS,
argp->acl_access);
@@ -111,7 +111,7 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst *rqstp)
argp->acl_default);
out_drop_lock:
- fh_unlock(fh);
+ inode_unlock(inode);
fh_drop_write(fh);
out_errno:
resp->status = nfserrno(error);
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 981a3a7a6e16..a41cca619338 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -67,12 +67,15 @@ nfsd3_proc_setattr(struct svc_rqst *rqstp)
{
struct nfsd3_sattrargs *argp = rqstp->rq_argp;
struct nfsd3_attrstat *resp = rqstp->rq_resp;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
dprintk("nfsd: SETATTR(3) %s\n",
SVCFH_fmt(&argp->fh));
fh_copy(&resp->fh, &argp->fh);
- resp->status = nfsd_setattr(rqstp, &resp->fh, &argp->attrs,
+ resp->status = nfsd_setattr(rqstp, &resp->fh, &attrs,
argp->check_guard, argp->guardtime);
return rpc_success;
}
@@ -233,6 +236,9 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
{
struct iattr *iap = &argp->attrs;
struct dentry *parent, *child;
+ struct nfsd_attrs attrs = {
+ .na_iattr = iap,
+ };
__u32 v_mtime, v_atime;
struct inode *inode;
__be32 status;
@@ -254,7 +260,7 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (host_err)
return nfserrno(host_err);
- fh_lock_nested(fhp, I_MUTEX_PARENT);
+ inode_lock_nested(inode, I_MUTEX_PARENT);
child = lookup_one_len(argp->name, parent, argp->len);
if (IS_ERR(child)) {
@@ -312,11 +318,13 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (!IS_POSIXACL(inode))
iap->ia_mode &= ~current_umask();
+ fh_fill_pre_attrs(fhp);
host_err = vfs_create(&init_user_ns, inode, child, iap->ia_mode, true);
if (host_err < 0) {
status = nfserrno(host_err);
goto out;
}
+ fh_fill_post_attrs(fhp);
/* A newly created file already has a file size of zero. */
if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
@@ -331,10 +339,10 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
set_attr:
- status = nfsd_create_setattr(rqstp, fhp, resfhp, iap);
+ status = nfsd_create_setattr(rqstp, fhp, resfhp, &attrs);
out:
- fh_unlock(fhp);
+ inode_unlock(inode);
if (child && !IS_ERR(child))
dput(child);
fh_drop_write(fhp);
@@ -368,6 +376,9 @@ nfsd3_proc_mkdir(struct svc_rqst *rqstp)
{
struct nfsd3_createargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
dprintk("nfsd: MKDIR(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
@@ -378,8 +389,7 @@ nfsd3_proc_mkdir(struct svc_rqst *rqstp)
fh_copy(&resp->dirfh, &argp->fh);
fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
- &argp->attrs, S_IFDIR, 0, &resp->fh);
- fh_unlock(&resp->dirfh);
+ &attrs, S_IFDIR, 0, &resp->fh);
return rpc_success;
}
@@ -388,6 +398,9 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp)
{
struct nfsd3_symlinkargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
if (argp->tlen == 0) {
resp->status = nfserr_inval;
@@ -414,7 +427,7 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp)
fh_copy(&resp->dirfh, &argp->ffh);
fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd_symlink(rqstp, &resp->dirfh, argp->fname,
- argp->flen, argp->tname, &resp->fh);
+ argp->flen, argp->tname, &attrs, &resp->fh);
kfree(argp->tname);
out:
return rpc_success;
@@ -428,6 +441,9 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp)
{
struct nfsd3_mknodargs *argp = rqstp->rq_argp;
struct nfsd3_diropres *resp = rqstp->rq_resp;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
int type;
dev_t rdev = 0;
@@ -453,8 +469,7 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp)
type = nfs3_ftypes[argp->ftype];
resp->status = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
- &argp->attrs, type, rdev, &resp->fh);
- fh_unlock(&resp->dirfh);
+ &attrs, type, rdev, &resp->fh);
out:
return rpc_success;
}
@@ -477,7 +492,6 @@ nfsd3_proc_remove(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, -S_IFDIR,
argp->name, argp->len);
- fh_unlock(&resp->fh);
return rpc_success;
}
@@ -498,7 +512,6 @@ nfsd3_proc_rmdir(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, S_IFDIR,
argp->name, argp->len);
- fh_unlock(&resp->fh);
return rpc_success;
}
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index eaa3a0cf38f1..bb8e2f6d7d03 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -751,58 +751,26 @@ out_estate:
return ret;
}
-__be32
-nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct nfs4_acl *acl)
+__be32 nfsd4_acl_to_attr(enum nfs_ftype4 type, struct nfs4_acl *acl,
+ struct nfsd_attrs *attr)
{
- __be32 error;
int host_error;
- struct dentry *dentry;
- struct inode *inode;
- struct posix_acl *pacl = NULL, *dpacl = NULL;
unsigned int flags = 0;
- /* Get inode */
- error = fh_verify(rqstp, fhp, 0, NFSD_MAY_SATTR);
- if (error)
- return error;
-
- dentry = fhp->fh_dentry;
- inode = d_inode(dentry);
+ if (!acl)
+ return nfs_ok;
- if (S_ISDIR(inode->i_mode))
+ if (type == NF4DIR)
flags = NFS4_ACL_DIR;
- host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags);
+ host_error = nfs4_acl_nfsv4_to_posix(acl, &attr->na_pacl,
+ &attr->na_dpacl, flags);
if (host_error == -EINVAL)
return nfserr_attrnotsupp;
- if (host_error < 0)
- goto out_nfserr;
-
- fh_lock(fhp);
-
- host_error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_ACCESS, pacl);
- if (host_error < 0)
- goto out_drop_lock;
-
- if (S_ISDIR(inode->i_mode)) {
- host_error = set_posix_acl(&init_user_ns, inode,
- ACL_TYPE_DEFAULT, dpacl);
- }
-
-out_drop_lock:
- fh_unlock(fhp);
-
- posix_acl_release(pacl);
- posix_acl_release(dpacl);
-out_nfserr:
- if (host_error == -EOPNOTSUPP)
- return nfserr_attrnotsupp;
else
return nfserrno(host_error);
}
-
static short
ace2type(struct nfs4_ace *ace)
{
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 11f8715d92d6..4ce328209f61 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -679,7 +679,7 @@ static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
* case NFS4_OK:
* write_response4 coa_resok4;
* default:
- * length4 coa_bytes_copied;
+ * length4 coa_bytes_copied;
* };
* struct CB_OFFLOAD4args {
* nfs_fh4 coa_fh;
@@ -688,21 +688,22 @@ static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
* };
*/
static void encode_offload_info4(struct xdr_stream *xdr,
- __be32 nfserr,
- const struct nfsd4_copy *cp)
+ const struct nfsd4_cb_offload *cbo)
{
__be32 *p;
p = xdr_reserve_space(xdr, 4);
- *p++ = nfserr;
- if (!nfserr) {
+ *p = cbo->co_nfserr;
+ switch (cbo->co_nfserr) {
+ case nfs_ok:
p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
p = xdr_encode_empty_array(p);
- p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written);
- *p++ = cpu_to_be32(cp->cp_res.wr_stable_how);
- p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data,
+ p = xdr_encode_hyper(p, cbo->co_res.wr_bytes_written);
+ *p++ = cpu_to_be32(cbo->co_res.wr_stable_how);
+ p = xdr_encode_opaque_fixed(p, cbo->co_res.wr_verifier.data,
NFS4_VERIFIER_SIZE);
- } else {
+ break;
+ default:
p = xdr_reserve_space(xdr, 8);
/* We always return success if bytes were written */
p = xdr_encode_hyper(p, 0);
@@ -710,18 +711,16 @@ static void encode_offload_info4(struct xdr_stream *xdr,
}
static void encode_cb_offload4args(struct xdr_stream *xdr,
- __be32 nfserr,
- const struct knfsd_fh *fh,
- const struct nfsd4_copy *cp,
+ const struct nfsd4_cb_offload *cbo,
struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
p = xdr_reserve_space(xdr, 4);
- *p++ = cpu_to_be32(OP_CB_OFFLOAD);
- encode_nfs_fh4(xdr, fh);
- encode_stateid4(xdr, &cp->cp_res.cb_stateid);
- encode_offload_info4(xdr, nfserr, cp);
+ *p = cpu_to_be32(OP_CB_OFFLOAD);
+ encode_nfs_fh4(xdr, &cbo->co_fh);
+ encode_stateid4(xdr, &cbo->co_res.cb_stateid);
+ encode_offload_info4(xdr, cbo);
hdr->nops++;
}
@@ -731,8 +730,8 @@ static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
const void *data)
{
const struct nfsd4_callback *cb = data;
- const struct nfsd4_copy *cp =
- container_of(cb, struct nfsd4_copy, cp_cb);
+ const struct nfsd4_cb_offload *cbo =
+ container_of(cb, struct nfsd4_cb_offload, co_cb);
struct nfs4_cb_compound_hdr hdr = {
.ident = 0,
.minorversion = cb->cb_clp->cl_minorversion,
@@ -740,7 +739,7 @@ static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
encode_cb_compound4args(xdr, &hdr);
encode_cb_sequence4args(xdr, cb, &hdr);
- encode_cb_offload4args(xdr, cp->nfserr, &cp->fh, cp, &hdr);
+ encode_cb_offload4args(xdr, cbo, &hdr);
encode_cb_nops(&hdr);
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 3895eb52d2b1..a72ab97f77ef 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -64,36 +64,6 @@ MODULE_PARM_DESC(nfsd4_ssc_umount_timeout,
"idle msecs before unmount export from source server");
#endif
-#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-#include <linux/security.h>
-
-static inline void
-nfsd4_security_inode_setsecctx(struct svc_fh *resfh, struct xdr_netobj *label, u32 *bmval)
-{
- struct inode *inode = d_inode(resfh->fh_dentry);
- int status;
-
- inode_lock(inode);
- status = security_inode_setsecctx(resfh->fh_dentry,
- label->data, label->len);
- inode_unlock(inode);
-
- if (status)
- /*
- * XXX: We should really fail the whole open, but we may
- * already have created a new file, so it may be too
- * late. For now this seems the least of evils:
- */
- bmval[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
-
- return;
-}
-#else
-static inline void
-nfsd4_security_inode_setsecctx(struct svc_fh *resfh, struct xdr_netobj *label, u32 *bmval)
-{ }
-#endif
-
#define NFSDDBG_FACILITY NFSDDBG_PROC
static u32 nfsd_attrmask[] = {
@@ -158,26 +128,6 @@ is_create_with_attrs(struct nfsd4_open *open)
|| open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1);
}
-/*
- * if error occurs when setting the acl, just clear the acl bit
- * in the returned attr bitmap.
- */
-static void
-do_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct nfs4_acl *acl, u32 *bmval)
-{
- __be32 status;
-
- status = nfsd4_set_nfs4_acl(rqstp, fhp, acl);
- if (status)
- /*
- * We should probably fail the whole open at this point,
- * but we've already created the file, so it's too late;
- * So this seems the least of evils:
- */
- bmval[0] &= ~FATTR4_WORD0_ACL;
-}
-
static inline void
fh_dup2(struct svc_fh *dst, struct svc_fh *src)
{
@@ -286,6 +236,10 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct svc_fh *resfhp, struct nfsd4_open *open)
{
struct iattr *iap = &open->op_iattr;
+ struct nfsd_attrs attrs = {
+ .na_iattr = iap,
+ .na_seclabel = &open->op_label,
+ };
struct dentry *parent, *child;
__u32 v_mtime, v_atime;
struct inode *inode;
@@ -307,7 +261,10 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (host_err)
return nfserrno(host_err);
- fh_lock_nested(fhp, I_MUTEX_PARENT);
+ if (is_create_with_attrs(open))
+ nfsd4_acl_to_attr(NF4REG, open->op_acl, &attrs);
+
+ inode_lock_nested(inode, I_MUTEX_PARENT);
child = lookup_one_len(open->op_fname, parent, open->op_fnamelen);
if (IS_ERR(child)) {
@@ -345,6 +302,11 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (d_really_is_positive(child)) {
status = nfs_ok;
+ /* NFSv4 protocol requires change attributes even though
+ * no change happened.
+ */
+ fh_fill_both_attrs(fhp);
+
switch (open->op_createmode) {
case NFS4_CREATE_UNCHECKED:
if (!d_is_reg(child))
@@ -386,10 +348,12 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (!IS_POSIXACL(inode))
iap->ia_mode &= ~current_umask();
+ fh_fill_pre_attrs(fhp);
status = nfsd4_vfs_create(fhp, child, open);
if (status != nfs_ok)
goto out;
open->op_created = true;
+ fh_fill_post_attrs(fhp);
/* A newly created file already has a file size of zero. */
if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
@@ -404,10 +368,15 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
set_attr:
- status = nfsd_create_setattr(rqstp, fhp, resfhp, iap);
+ status = nfsd_create_setattr(rqstp, fhp, resfhp, &attrs);
+ if (attrs.na_labelerr)
+ open->op_bmval[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ if (attrs.na_aclerr)
+ open->op_bmval[0] &= ~FATTR4_WORD0_ACL;
out:
- fh_unlock(fhp);
+ inode_unlock(inode);
+ nfsd_attrs_free(&attrs);
if (child && !IS_ERR(child))
dput(child);
fh_drop_write(fhp);
@@ -447,9 +416,6 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
status = nfsd4_create_file(rqstp, current_fh, *resfh, open);
current->fs->umask = 0;
- if (!status && open->op_label.len)
- nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
-
/*
* Following rfc 3530 14.2.16, and rfc 5661 18.16.4
* use the returned bitmask to indicate which attributes
@@ -458,24 +424,21 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
if (nfsd4_create_is_exclusive(open->op_createmode) && status == 0)
open->op_bmval[1] |= (FATTR4_WORD1_TIME_ACCESS |
FATTR4_WORD1_TIME_MODIFY);
- } else
- /*
- * Note this may exit with the parent still locked.
- * We will hold the lock until nfsd4_open's final
- * lookup, to prevent renames or unlinks until we've had
- * a chance to an acquire a delegation if appropriate.
- */
+ } else {
status = nfsd_lookup(rqstp, current_fh,
open->op_fname, open->op_fnamelen, *resfh);
+ if (!status)
+ /* NFSv4 protocol requires change attributes even though
+ * no change happened.
+ */
+ fh_fill_both_attrs(current_fh);
+ }
if (status)
goto out;
status = nfsd_check_obj_isreg(*resfh);
if (status)
goto out;
- if (is_create_with_attrs(open) && open->op_acl != NULL)
- do_set_nfs4_acl(rqstp, *resfh, open->op_acl, open->op_bmval);
-
nfsd4_set_open_owner_reply_cache(cstate, open, *resfh);
accmode = NFSD_MAY_NOP;
if (open->op_created ||
@@ -547,6 +510,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
open->op_openowner);
open->op_filp = NULL;
+ open->op_rqstp = rqstp;
/* This check required by spec. */
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
@@ -630,9 +594,9 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
status = nfsd4_process_open2(rqstp, resfh, open);
- WARN(status && open->op_created,
- "nfsd4_process_open2 failed to open newly-created file! status=%u\n",
- be32_to_cpu(status));
+ if (status && open->op_created)
+ pr_warn("nfsd4_process_open2 failed to open newly-created file: status=%u\n",
+ be32_to_cpu(status));
if (reclaim && !status)
nn->somebody_reclaimed = true;
out:
@@ -786,6 +750,10 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_create *create = &u->create;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &create->cr_iattr,
+ .na_seclabel = &create->cr_label,
+ };
struct svc_fh resfh;
__be32 status;
dev_t rdev;
@@ -801,12 +769,13 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
return status;
+ status = nfsd4_acl_to_attr(create->cr_type, create->cr_acl, &attrs);
current->fs->umask = create->cr_umask;
switch (create->cr_type) {
case NF4LNK:
status = nfsd_symlink(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- create->cr_data, &resfh);
+ create->cr_data, &attrs, &resfh);
break;
case NF4BLK:
@@ -817,7 +786,7 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out_umask;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- &create->cr_iattr, S_IFBLK, rdev, &resfh);
+ &attrs, S_IFBLK, rdev, &resfh);
break;
case NF4CHR:
@@ -828,26 +797,26 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out_umask;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- &create->cr_iattr,S_IFCHR, rdev, &resfh);
+ &attrs, S_IFCHR, rdev, &resfh);
break;
case NF4SOCK:
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- &create->cr_iattr, S_IFSOCK, 0, &resfh);
+ &attrs, S_IFSOCK, 0, &resfh);
break;
case NF4FIFO:
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- &create->cr_iattr, S_IFIFO, 0, &resfh);
+ &attrs, S_IFIFO, 0, &resfh);
break;
case NF4DIR:
create->cr_iattr.ia_valid &= ~ATTR_SIZE;
status = nfsd_create(rqstp, &cstate->current_fh,
create->cr_name, create->cr_namelen,
- &create->cr_iattr, S_IFDIR, 0, &resfh);
+ &attrs, S_IFDIR, 0, &resfh);
break;
default:
@@ -857,20 +826,17 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
- if (create->cr_label.len)
- nfsd4_security_inode_setsecctx(&resfh, &create->cr_label, create->cr_bmval);
-
- if (create->cr_acl != NULL)
- do_set_nfs4_acl(rqstp, &resfh, create->cr_acl,
- create->cr_bmval);
-
- fh_unlock(&cstate->current_fh);
+ if (attrs.na_labelerr)
+ create->cr_bmval[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ if (attrs.na_aclerr)
+ create->cr_bmval[0] &= ~FATTR4_WORD0_ACL;
set_change_info(&create->cr_cinfo, &cstate->current_fh);
fh_dup2(&cstate->current_fh, &resfh);
out:
fh_put(&resfh);
out_umask:
current->fs->umask = 0;
+ nfsd_attrs_free(&attrs);
return status;
}
@@ -1043,10 +1009,8 @@ nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_grace;
status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
remove->rm_name, remove->rm_namelen);
- if (!status) {
- fh_unlock(&cstate->current_fh);
+ if (!status)
set_change_info(&remove->rm_cinfo, &cstate->current_fh);
- }
return status;
}
@@ -1086,7 +1050,6 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&exp, &dentry);
if (err)
return err;
- fh_unlock(&cstate->current_fh);
if (d_really_is_negative(dentry)) {
exp_put(exp);
err = nfserr_noent;
@@ -1141,6 +1104,11 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_setattr *setattr = &u->setattr;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &setattr->sa_iattr,
+ .na_seclabel = &setattr->sa_label,
+ };
+ struct inode *inode;
__be32 status = nfs_ok;
int err;
@@ -1163,19 +1131,18 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
- if (setattr->sa_acl != NULL)
- status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
- setattr->sa_acl);
- if (status)
- goto out;
- if (setattr->sa_label.len)
- status = nfsd4_set_nfs4_label(rqstp, &cstate->current_fh,
- &setattr->sa_label);
+ inode = cstate->current_fh.fh_dentry->d_inode;
+ status = nfsd4_acl_to_attr(S_ISDIR(inode->i_mode) ? NF4DIR : NF4REG,
+ setattr->sa_acl, &attrs);
+
if (status)
goto out;
- status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
+ status = nfsd_setattr(rqstp, &cstate->current_fh, &attrs,
0, (time64_t)0);
+ if (!status)
+ status = nfserrno(attrs.na_labelerr);
out:
+ nfsd_attrs_free(&attrs);
fh_drop_write(&cstate->current_fh);
return status;
}
@@ -1285,30 +1252,17 @@ out:
return status;
}
-void nfs4_put_copy(struct nfsd4_copy *copy)
+static void nfs4_put_copy(struct nfsd4_copy *copy)
{
if (!refcount_dec_and_test(&copy->refcount))
return;
+ kfree(copy->cp_src);
kfree(copy);
}
-static bool
-check_and_set_stop_copy(struct nfsd4_copy *copy)
-{
- bool value;
-
- spin_lock(&copy->cp_clp->async_lock);
- value = copy->stopped;
- if (!copy->stopped)
- copy->stopped = true;
- spin_unlock(&copy->cp_clp->async_lock);
- return value;
-}
-
static void nfsd4_stop_copy(struct nfsd4_copy *copy)
{
- /* only 1 thread should stop the copy */
- if (!check_and_set_stop_copy(copy))
+ if (!test_and_set_bit(NFSD4_COPY_F_STOPPED, &copy->cp_flags))
kthread_stop(copy->copy_task);
nfs4_put_copy(copy);
}
@@ -1389,7 +1343,7 @@ try_again:
return 0;
}
if (work) {
- strncpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr));
+ strlcpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr) - 1);
refcount_set(&work->nsui_refcnt, 2);
work->nsui_busy = true;
list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
@@ -1549,7 +1503,7 @@ nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
if (status)
goto out;
- status = nfsd4_interssc_connect(&copy->cp_src, rqstp, mount);
+ status = nfsd4_interssc_connect(copy->cp_src, rqstp, mount);
if (status)
goto out;
@@ -1567,7 +1521,7 @@ out:
}
static void
-nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
struct nfsd_file *dst)
{
bool found = false;
@@ -1576,9 +1530,9 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd_net *nn = net_generic(dst->nf_net, nfsd_net_id);
- nfs42_ssc_close(src->nf_file);
+ nfs42_ssc_close(filp);
nfsd_file_put(dst);
- fput(src->nf_file);
+ fput(filp);
if (!nn) {
mntput(ss_mnt);
@@ -1621,7 +1575,7 @@ nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
}
static void
-nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
struct nfsd_file *dst)
{
}
@@ -1658,9 +1612,10 @@ nfsd4_cleanup_intra_ssc(struct nfsd_file *src, struct nfsd_file *dst)
static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
{
- struct nfsd4_copy *copy = container_of(cb, struct nfsd4_copy, cp_cb);
+ struct nfsd4_cb_offload *cbo =
+ container_of(cb, struct nfsd4_cb_offload, co_cb);
- nfs4_put_copy(copy);
+ kfree(cbo);
}
static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
@@ -1677,15 +1632,16 @@ static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = {
static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
{
copy->cp_res.wr_stable_how =
- copy->committed ? NFS_FILE_SYNC : NFS_UNSTABLE;
- copy->cp_synchronous = sync;
+ test_bit(NFSD4_COPY_F_COMMITTED, &copy->cp_flags) ?
+ NFS_FILE_SYNC : NFS_UNSTABLE;
+ nfsd4_copy_set_sync(copy, sync);
gen_boot_verifier(&copy->cp_res.wr_verifier, copy->cp_clp->net);
}
-static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
+static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
+ struct file *dst,
+ struct file *src)
{
- struct file *dst = copy->nf_dst->nf_file;
- struct file *src = copy->nf_src->nf_file;
errseq_t since;
ssize_t bytes_copied = 0;
u64 bytes_total = copy->cp_count;
@@ -1707,26 +1663,29 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
copy->cp_res.wr_bytes_written += bytes_copied;
src_pos += bytes_copied;
dst_pos += bytes_copied;
- } while (bytes_total > 0 && !copy->cp_synchronous);
+ } while (bytes_total > 0 && nfsd4_copy_is_async(copy));
/* for a non-zero asynchronous copy do a commit of data */
- if (!copy->cp_synchronous && copy->cp_res.wr_bytes_written > 0) {
+ if (nfsd4_copy_is_async(copy) && copy->cp_res.wr_bytes_written > 0) {
since = READ_ONCE(dst->f_wb_err);
status = vfs_fsync_range(dst, copy->cp_dst_pos,
copy->cp_res.wr_bytes_written, 0);
if (!status)
status = filemap_check_wb_err(dst->f_mapping, since);
if (!status)
- copy->committed = true;
+ set_bit(NFSD4_COPY_F_COMMITTED, &copy->cp_flags);
}
return bytes_copied;
}
-static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
+static __be32 nfsd4_do_copy(struct nfsd4_copy *copy,
+ struct file *src, struct file *dst,
+ bool sync)
{
__be32 status;
ssize_t bytes;
- bytes = _nfsd_copy_file_range(copy);
+ bytes = _nfsd_copy_file_range(copy, dst, src);
+
/* for async copy, we ignore the error, client can always retry
* to get the error
*/
@@ -1736,13 +1695,6 @@ static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
nfsd4_init_copy_res(copy, sync);
status = nfs_ok;
}
-
- if (!copy->cp_intra) /* Inter server SSC */
- nfsd4_cleanup_inter_ssc(copy->ss_mnt, copy->nf_src,
- copy->nf_dst);
- else
- nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
-
return status;
}
@@ -1751,17 +1703,17 @@ static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
dst->cp_src_pos = src->cp_src_pos;
dst->cp_dst_pos = src->cp_dst_pos;
dst->cp_count = src->cp_count;
- dst->cp_synchronous = src->cp_synchronous;
+ dst->cp_flags = src->cp_flags;
memcpy(&dst->cp_res, &src->cp_res, sizeof(src->cp_res));
memcpy(&dst->fh, &src->fh, sizeof(src->fh));
dst->cp_clp = src->cp_clp;
dst->nf_dst = nfsd_file_get(src->nf_dst);
- dst->cp_intra = src->cp_intra;
- if (src->cp_intra) /* for inter, file_src doesn't exist yet */
+ /* for inter, nf_src doesn't exist yet */
+ if (!nfsd4_ssc_is_inter(src))
dst->nf_src = nfsd_file_get(src->nf_src);
memcpy(&dst->cp_stateid, &src->cp_stateid, sizeof(src->cp_stateid));
- memcpy(&dst->cp_src, &src->cp_src, sizeof(struct nl4_server));
+ memcpy(dst->cp_src, src->cp_src, sizeof(struct nl4_server));
memcpy(&dst->stateid, &src->stateid, sizeof(src->stateid));
memcpy(&dst->c_fh, &src->c_fh, sizeof(src->c_fh));
dst->ss_mnt = src->ss_mnt;
@@ -1771,7 +1723,7 @@ static void cleanup_async_copy(struct nfsd4_copy *copy)
{
nfs4_free_copy_state(copy);
nfsd_file_put(copy->nf_dst);
- if (copy->cp_intra)
+ if (!nfsd4_ssc_is_inter(copy))
nfsd_file_put(copy->nf_src);
spin_lock(&copy->cp_clp->async_lock);
list_del(&copy->copies);
@@ -1779,45 +1731,58 @@ static void cleanup_async_copy(struct nfsd4_copy *copy)
nfs4_put_copy(copy);
}
+static void nfsd4_send_cb_offload(struct nfsd4_copy *copy, __be32 nfserr)
+{
+ struct nfsd4_cb_offload *cbo;
+
+ cbo = kzalloc(sizeof(*cbo), GFP_KERNEL);
+ if (!cbo)
+ return;
+
+ memcpy(&cbo->co_res, &copy->cp_res, sizeof(copy->cp_res));
+ memcpy(&cbo->co_fh, &copy->fh, sizeof(copy->fh));
+ cbo->co_nfserr = nfserr;
+
+ nfsd4_init_cb(&cbo->co_cb, copy->cp_clp, &nfsd4_cb_offload_ops,
+ NFSPROC4_CLNT_CB_OFFLOAD);
+ trace_nfsd_cb_offload(copy->cp_clp, &cbo->co_res.cb_stateid,
+ &cbo->co_fh, copy->cp_count, nfserr);
+ nfsd4_run_cb(&cbo->co_cb);
+}
+
+/**
+ * nfsd4_do_async_copy - kthread function for background server-side COPY
+ * @data: arguments for COPY operation
+ *
+ * Return values:
+ * %0: Copy operation is done.
+ */
static int nfsd4_do_async_copy(void *data)
{
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
- struct nfsd4_copy *cb_copy;
+ __be32 nfserr;
- if (!copy->cp_intra) { /* Inter server SSC */
- copy->nf_src = kzalloc(sizeof(struct nfsd_file), GFP_KERNEL);
- if (!copy->nf_src) {
- copy->nfserr = nfserr_serverfault;
- nfsd4_interssc_disconnect(copy->ss_mnt);
- goto do_callback;
- }
- copy->nf_src->nf_file = nfs42_ssc_open(copy->ss_mnt, &copy->c_fh,
- &copy->stateid);
- if (IS_ERR(copy->nf_src->nf_file)) {
- copy->nfserr = nfserr_offload_denied;
+ if (nfsd4_ssc_is_inter(copy)) {
+ struct file *filp;
+
+ filp = nfs42_ssc_open(copy->ss_mnt, &copy->c_fh,
+ &copy->stateid);
+ if (IS_ERR(filp)) {
+ nfserr = nfserr_offload_denied;
nfsd4_interssc_disconnect(copy->ss_mnt);
goto do_callback;
}
+ nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
+ false);
+ nfsd4_cleanup_inter_ssc(copy->ss_mnt, filp, copy->nf_dst);
+ } else {
+ nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file,
+ copy->nf_dst->nf_file, false);
+ nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
}
- copy->nfserr = nfsd4_do_copy(copy, 0);
do_callback:
- cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
- if (!cb_copy)
- goto out;
- refcount_set(&cb_copy->refcount, 1);
- memcpy(&cb_copy->cp_res, &copy->cp_res, sizeof(copy->cp_res));
- cb_copy->cp_clp = copy->cp_clp;
- cb_copy->nfserr = copy->nfserr;
- memcpy(&cb_copy->fh, &copy->fh, sizeof(copy->fh));
- nfsd4_init_cb(&cb_copy->cp_cb, cb_copy->cp_clp,
- &nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD);
- trace_nfsd_cb_offload(copy->cp_clp, &copy->cp_res.cb_stateid,
- &copy->fh, copy->cp_count, copy->nfserr);
- nfsd4_run_cb(&cb_copy->cp_cb);
-out:
- if (!copy->cp_intra)
- kfree(copy->nf_src);
+ nfsd4_send_cb_offload(copy, nfserr);
cleanup_async_copy(copy);
return 0;
}
@@ -1830,8 +1795,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd4_copy *async_copy = NULL;
- if (!copy->cp_intra) { /* Inter server SSC */
- if (!inter_copy_offload_enable || copy->cp_synchronous) {
+ if (nfsd4_ssc_is_inter(copy)) {
+ if (!inter_copy_offload_enable || nfsd4_copy_is_sync(copy)) {
status = nfserr_notsupp;
goto out;
}
@@ -1848,13 +1813,16 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
copy->cp_clp = cstate->clp;
memcpy(&copy->fh, &cstate->current_fh.fh_handle,
sizeof(struct knfsd_fh));
- if (!copy->cp_synchronous) {
+ if (nfsd4_copy_is_async(copy)) {
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
status = nfserrno(-ENOMEM);
async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
if (!async_copy)
goto out_err;
+ async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
+ if (!async_copy->cp_src)
+ goto out_err;
if (!nfs4_init_copy_state(nn, copy))
goto out_err;
refcount_set(&async_copy->refcount, 1);
@@ -1872,7 +1840,9 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
wake_up_process(async_copy->copy_task);
status = nfs_ok;
} else {
- status = nfsd4_do_copy(copy, 1);
+ status = nfsd4_do_copy(copy, copy->nf_src->nf_file,
+ copy->nf_dst->nf_file, true);
+ nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
}
out:
return status;
@@ -1880,7 +1850,7 @@ out_err:
if (async_copy)
cleanup_async_copy(async_copy);
status = nfserrno(-ENOMEM);
- if (!copy->cp_intra)
+ if (nfsd4_ssc_is_inter(copy))
nfsd4_interssc_disconnect(copy->ss_mnt);
goto out;
}
@@ -1953,9 +1923,9 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* For now, only return one server address in cpn_src, the
* address used by the client to connect to this server.
*/
- cn->cpn_src.nl4_type = NL4_NETADDR;
+ cn->cpn_src->nl4_type = NL4_NETADDR;
status = nfsd4_set_netaddr((struct sockaddr *)&rqstp->rq_daddr,
- &cn->cpn_src.u.nl4_addr);
+ &cn->cpn_src->u.nl4_addr);
WARN_ON_ONCE(status);
if (status) {
nfs4_put_cpntf_state(nn, cps);
@@ -2609,7 +2579,7 @@ check_if_stalefh_allowed(struct nfsd4_compoundargs *args)
return;
}
putfh = (struct nfsd4_putfh *)&saved_op->u;
- if (!copy->cp_intra)
+ if (nfsd4_ssc_is_inter(copy))
putfh->no_verify = true;
}
}
@@ -2711,7 +2681,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
if (op->opdesc->op_flags & OP_MODIFIES_SOMETHING) {
/*
* Don't execute this op if we couldn't encode a
- * succesful reply:
+ * successful reply:
*/
u32 plen = op->opdesc->op_rsize_bop(rqstp, op);
/*
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9409a0dc1b76..c5d199d7e6b4 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -820,9 +820,9 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
swap(f2, fp->fi_fds[O_RDWR]);
spin_unlock(&fp->fi_lock);
if (f1)
- nfsd_file_put(f1);
+ nfsd_file_close(f1);
if (f2)
- nfsd_file_put(f2);
+ nfsd_file_close(f2);
}
}
@@ -1131,7 +1131,6 @@ static void block_delegations(struct knfsd_fh *fh)
static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
- struct svc_fh *current_fh,
struct nfs4_clnt_odstate *odstate)
{
struct nfs4_delegation *dp;
@@ -1141,7 +1140,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
n = atomic_long_inc_return(&num_delegations);
if (n < 0 || n > max_delegations)
goto out_dec;
- if (delegation_blocked(&current_fh->fh_handle))
+ if (delegation_blocked(&fp->fi_fhandle))
goto out_dec;
dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
if (dp == NULL)
@@ -2053,11 +2052,16 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
* This type of memory management is somewhat inefficient, but we use it
* anyway since SETCLIENTID is not a common operation.
*/
-static struct nfs4_client *alloc_client(struct xdr_netobj name)
+static struct nfs4_client *alloc_client(struct xdr_netobj name,
+ struct nfsd_net *nn)
{
struct nfs4_client *clp;
int i;
+ if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
+ mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
+ return NULL;
+ }
clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
if (clp == NULL)
return NULL;
@@ -2076,6 +2080,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
atomic_set(&clp->cl_rpc_users, 0);
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
clp->cl_state = NFSD4_ACTIVE;
+ atomic_inc(&nn->nfs4_client_count);
atomic_set(&clp->cl_delegs_in_recall, 0);
INIT_LIST_HEAD(&clp->cl_idhash);
INIT_LIST_HEAD(&clp->cl_openowners);
@@ -2183,6 +2188,7 @@ static __be32 mark_client_expired_locked(struct nfs4_client *clp)
static void
__destroy_client(struct nfs4_client *clp)
{
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
int i;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
@@ -2226,6 +2232,7 @@ __destroy_client(struct nfs4_client *clp)
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
+ atomic_add_unless(&nn->nfs4_client_count, -1, 0);
free_client(clp);
wake_up_all(&expiry_wq);
}
@@ -2564,7 +2571,7 @@ static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
{
- struct inode *inode = f->nf_inode;
+ struct inode *inode = file_inode(f->nf_file);
seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
MAJOR(inode->i_sb->s_dev),
@@ -2848,7 +2855,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct dentry *dentries[ARRAY_SIZE(client_files)];
- clp = alloc_client(name);
+ clp = alloc_client(name, nn);
if (clp == NULL)
return NULL;
@@ -4330,6 +4337,27 @@ out:
return -ENOMEM;
}
+void nfsd4_init_leases_net(struct nfsd_net *nn)
+{
+ struct sysinfo si;
+ u64 max_clients;
+
+ nn->nfsd4_lease = 90; /* default lease time */
+ nn->nfsd4_grace = 90;
+ nn->somebody_reclaimed = false;
+ nn->track_reclaim_completes = false;
+ nn->clverifier_counter = prandom_u32();
+ nn->clientid_base = prandom_u32();
+ nn->clientid_counter = nn->clientid_base + 1;
+ nn->s2s_cp_cl_id = nn->clientid_counter++;
+
+ atomic_set(&nn->nfs4_client_count, 0);
+ si_meminfo(&si);
+ max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
+ max_clients *= NFS4_CLIENTS_PER_GB;
+ nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
+}
+
static void init_nfs4_replay(struct nfs4_replay *rp)
{
rp->rp_status = nfserr_serverfault;
@@ -5032,11 +5060,14 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
.ia_valid = ATTR_SIZE,
.ia_size = 0,
};
+ struct nfsd_attrs attrs = {
+ .na_iattr = &iattr,
+ };
if (!open->op_truncate)
return 0;
if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
- return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
+ return nfsd_setattr(rqstp, fh, &attrs, 0, (time64_t)0);
}
static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
@@ -5104,6 +5135,7 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
goto out_put_access;
nf->nf_file = open->op_filp;
open->op_filp = NULL;
+ trace_nfsd_file_create(rqstp, access, nf);
}
spin_lock(&fp->fi_lock);
@@ -5259,11 +5291,41 @@ static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
return 0;
}
+/*
+ * It's possible that between opening the dentry and setting the delegation,
+ * that it has been renamed or unlinked. Redo the lookup to verify that this
+ * hasn't happened.
+ */
+static int
+nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
+ struct svc_fh *parent)
+{
+ struct svc_export *exp;
+ struct dentry *child;
+ __be32 err;
+
+ err = nfsd_lookup_dentry(open->op_rqstp, parent,
+ open->op_fname, open->op_fnamelen,
+ &exp, &child);
+
+ if (err)
+ return -EAGAIN;
+
+ dput(child);
+ if (child != file_dentry(fp->fi_deleg_file->nf_file))
+ return -EAGAIN;
+
+ return 0;
+}
+
static struct nfs4_delegation *
-nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
- struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
+nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ struct svc_fh *parent)
{
int status = 0;
+ struct nfs4_client *clp = stp->st_stid.sc_client;
+ struct nfs4_file *fp = stp->st_stid.sc_file;
+ struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
struct nfs4_delegation *dp;
struct nfsd_file *nf;
struct file_lock *fl;
@@ -5305,7 +5367,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
return ERR_PTR(status);
status = -ENOMEM;
- dp = alloc_init_deleg(clp, fp, fh, odstate);
+ dp = alloc_init_deleg(clp, fp, odstate);
if (!dp)
goto out_delegees;
@@ -5318,6 +5380,13 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
locks_free_lock(fl);
if (status)
goto out_clnt_odstate;
+
+ if (parent) {
+ status = nfsd4_verify_deleg_dentry(open, fp, parent);
+ if (status)
+ goto out_unlock;
+ }
+
status = nfsd4_check_conflicting_opens(clp, fp);
if (status)
goto out_unlock;
@@ -5373,12 +5442,13 @@ static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
* proper support for them.
*/
static void
-nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
- struct nfs4_ol_stateid *stp)
+nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ struct svc_fh *currentfh)
{
struct nfs4_delegation *dp;
struct nfs4_openowner *oo = openowner(stp->st_stateowner);
struct nfs4_client *clp = stp->st_stid.sc_client;
+ struct svc_fh *parent = NULL;
int cb_up;
int status = 0;
@@ -5392,6 +5462,8 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
goto out_no_deleg;
break;
case NFS4_OPEN_CLAIM_NULL:
+ parent = currentfh;
+ fallthrough;
case NFS4_OPEN_CLAIM_FH:
/*
* Let's not give out any delegations till everyone's
@@ -5406,7 +5478,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
default:
goto out_no_deleg;
}
- dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
+ dp = nfs4_set_delegation(open, stp, parent);
if (IS_ERR(dp))
goto out_no_deleg;
@@ -5538,7 +5610,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
* Attempt to hand out a delegation. No error return, because the
* OPEN succeeds even if we fail.
*/
- nfs4_open_delegation(current_fh, open, stp);
+ nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
nodeleg:
status = nfs_ok;
trace_nfsd_open(&stp->st_stid.sc_stateid);
@@ -5792,9 +5864,12 @@ static void
nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
struct laundry_time *lt)
{
+ unsigned int maxreap, reapcnt = 0;
struct list_head *pos, *next;
struct nfs4_client *clp;
+ maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
+ NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
INIT_LIST_HEAD(reaplist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
@@ -5805,14 +5880,15 @@ nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
break;
if (!atomic_read(&clp->cl_rpc_users))
clp->cl_state = NFSD4_COURTESY;
- if (!client_has_state(clp) ||
- ktime_get_boottime_seconds() >=
- (clp->cl_time + NFSD_COURTESY_CLIENT_TIMEOUT))
+ if (!client_has_state(clp))
goto exp_client;
- if (nfs4_anylock_blockers(clp)) {
+ if (!nfs4_anylock_blockers(clp))
+ if (reapcnt >= maxreap)
+ continue;
exp_client:
- if (!mark_client_expired_locked(clp))
- list_add(&clp->cl_lru, reaplist);
+ if (!mark_client_expired_locked(clp)) {
+ list_add(&clp->cl_lru, reaplist);
+ reapcnt++;
}
}
spin_unlock(&nn->client_lock);
@@ -7321,21 +7397,22 @@ out:
static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
{
struct nfsd_file *nf;
+ struct inode *inode;
__be32 err;
err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
if (err)
return err;
- fh_lock(fhp); /* to block new leases till after test_lock: */
- err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
- NFSD_MAY_READ));
+ inode = fhp->fh_dentry->d_inode;
+ inode_lock(inode); /* to block new leases till after test_lock: */
+ err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
if (err)
goto out;
lock->fl_file = nf->nf_file;
err = nfserrno(vfs_test_lock(nf->nf_file, lock));
lock->fl_file = NULL;
out:
- fh_unlock(fhp);
+ inode_unlock(inode);
nfsd_file_put(nf);
return err;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2acea7792bb2..1e9690a061ec 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1810,7 +1810,7 @@ nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_sta
for (i = 0; i < test_stateid->ts_num_ids; i++) {
stateid = svcxdr_tmpalloc(argp, sizeof(*stateid));
if (!stateid)
- return nfserrno(-ENOMEM); /* XXX: not jukebox? */
+ return nfserr_jukebox;
INIT_LIST_HEAD(&stateid->ts_id_list);
list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list);
status = nfsd4_decode_stateid4(argp, &stateid->ts_id_stateid);
@@ -1896,8 +1896,8 @@ static __be32 nfsd4_decode_nl4_server(struct nfsd4_compoundargs *argp,
static __be32
nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
{
+ u32 consecutive, i, count, sync;
struct nl4_server *ns_dummy;
- u32 consecutive, i, count;
__be32 status;
status = nfsd4_decode_stateid4(argp, &copy->cp_src_stateid);
@@ -1915,25 +1915,28 @@ nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
/* ca_consecutive: we always do consecutive copies */
if (xdr_stream_decode_u32(argp->xdr, &consecutive) < 0)
return nfserr_bad_xdr;
- if (xdr_stream_decode_u32(argp->xdr, &copy->cp_synchronous) < 0)
+ if (xdr_stream_decode_bool(argp->xdr, &sync) < 0)
return nfserr_bad_xdr;
+ nfsd4_copy_set_sync(copy, sync);
if (xdr_stream_decode_u32(argp->xdr, &count) < 0)
return nfserr_bad_xdr;
- copy->cp_intra = false;
+ copy->cp_src = svcxdr_tmpalloc(argp, sizeof(*copy->cp_src));
+ if (copy->cp_src == NULL)
+ return nfserr_jukebox;
if (count == 0) { /* intra-server copy */
- copy->cp_intra = true;
+ __set_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
return nfs_ok;
}
/* decode all the supplied server addresses but use only the first */
- status = nfsd4_decode_nl4_server(argp, &copy->cp_src);
+ status = nfsd4_decode_nl4_server(argp, copy->cp_src);
if (status)
return status;
ns_dummy = kmalloc(sizeof(struct nl4_server), GFP_KERNEL);
if (ns_dummy == NULL)
- return nfserrno(-ENOMEM); /* XXX: jukebox? */
+ return nfserr_jukebox;
for (i = 0; i < count - 1; i++) {
status = nfsd4_decode_nl4_server(argp, ns_dummy);
if (status) {
@@ -1952,10 +1955,17 @@ nfsd4_decode_copy_notify(struct nfsd4_compoundargs *argp,
{
__be32 status;
+ cn->cpn_src = svcxdr_tmpalloc(argp, sizeof(*cn->cpn_src));
+ if (cn->cpn_src == NULL)
+ return nfserr_jukebox;
+ cn->cpn_dst = svcxdr_tmpalloc(argp, sizeof(*cn->cpn_dst));
+ if (cn->cpn_dst == NULL)
+ return nfserr_jukebox;
+
status = nfsd4_decode_stateid4(argp, &cn->cpn_src_stateid);
if (status)
return status;
- return nfsd4_decode_nl4_server(argp, &cn->cpn_dst);
+ return nfsd4_decode_nl4_server(argp, cn->cpn_dst);
}
static __be32
@@ -2828,10 +2838,9 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
struct kstat stat;
struct svc_fh *tempfh = NULL;
struct kstatfs statfs;
- __be32 *p;
+ __be32 *p, *attrlen_p;
int starting_len = xdr->buf->len;
int attrlen_offset;
- __be32 attrlen;
u32 dummy;
u64 dummy64;
u32 rdattr_err = 0;
@@ -2919,10 +2928,9 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
goto out;
attrlen_offset = xdr->buf->len;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ attrlen_p = xdr_reserve_space(xdr, XDR_UNIT);
+ if (!attrlen_p)
goto out_resource;
- p++; /* to be backfilled later */
if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
u32 supp[3];
@@ -3344,8 +3352,7 @@ out_acl:
*p++ = cpu_to_be32(err == 0);
}
- attrlen = htonl(xdr->buf->len - attrlen_offset - 4);
- write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4);
+ *attrlen_p = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT);
status = nfs_ok;
out:
@@ -3882,16 +3889,15 @@ static __be32 nfsd4_encode_splice_read(
struct xdr_stream *xdr = resp->xdr;
struct xdr_buf *buf = xdr->buf;
int status, space_left;
- u32 eof;
__be32 nfserr;
- __be32 *p = xdr->p - 2;
/* Make sure there will be room for padding if needed */
if (xdr->end - xdr->p < 1)
return nfserr_resource;
nfserr = nfsd_splice_read(read->rd_rqstp, read->rd_fhp,
- file, read->rd_offset, &maxcount, &eof);
+ file, read->rd_offset, &maxcount,
+ &read->rd_eof);
read->rd_length = maxcount;
if (nfserr)
goto out_err;
@@ -3902,9 +3908,6 @@ static __be32 nfsd4_encode_splice_read(
goto out_err;
}
- *(p++) = htonl(eof);
- *(p++) = htonl(maxcount);
-
buf->page_len = maxcount;
buf->len += maxcount;
xdr->page_ptr += (buf->page_base + maxcount + PAGE_SIZE - 1)
@@ -3946,11 +3949,9 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
struct file *file, unsigned long maxcount)
{
struct xdr_stream *xdr = resp->xdr;
- u32 eof;
- int starting_len = xdr->buf->len - 8;
+ unsigned int starting_len = xdr->buf->len;
+ __be32 zero = xdr_zero;
__be32 nfserr;
- __be32 tmp;
- int pad;
read->rd_vlen = xdr_reserve_space_vec(xdr, resp->rqstp->rq_vec, maxcount);
if (read->rd_vlen < 0)
@@ -3958,31 +3959,24 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, file, read->rd_offset,
resp->rqstp->rq_vec, read->rd_vlen, &maxcount,
- &eof);
+ &read->rd_eof);
read->rd_length = maxcount;
if (nfserr)
return nfserr;
- if (svc_encode_result_payload(resp->rqstp, starting_len + 8, maxcount))
+ if (svc_encode_result_payload(resp->rqstp, starting_len, maxcount))
return nfserr_io;
- xdr_truncate_encode(xdr, starting_len + 8 + xdr_align_size(maxcount));
-
- tmp = htonl(eof);
- write_bytes_to_xdr_buf(xdr->buf, starting_len , &tmp, 4);
- tmp = htonl(maxcount);
- write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
-
- tmp = xdr_zero;
- pad = (maxcount&3) ? 4 - (maxcount&3) : 0;
- write_bytes_to_xdr_buf(xdr->buf, starting_len + 8 + maxcount,
- &tmp, pad);
- return 0;
+ xdr_truncate_encode(xdr, starting_len + xdr_align_size(maxcount));
+ write_bytes_to_xdr_buf(xdr->buf, starting_len + maxcount, &zero,
+ xdr_pad_size(maxcount));
+ return nfs_ok;
}
static __be32
nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_read *read)
{
+ bool splice_ok = test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags);
unsigned long maxcount;
struct xdr_stream *xdr = resp->xdr;
struct file *file;
@@ -3995,11 +3989,10 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */
if (!p) {
- WARN_ON_ONCE(test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags));
+ WARN_ON_ONCE(splice_ok);
return nfserr_resource;
}
- if (resp->xdr->buf->page_len &&
- test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) {
+ if (resp->xdr->buf->page_len && splice_ok) {
WARN_ON_ONCE(1);
return nfserr_resource;
}
@@ -4008,31 +4001,30 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
- if (file->f_op->splice_read &&
- test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
+ if (file->f_op->splice_read && splice_ok)
nfserr = nfsd4_encode_splice_read(resp, read, file, maxcount);
else
nfserr = nfsd4_encode_readv(resp, read, file, maxcount);
-
- if (nfserr)
+ if (nfserr) {
xdr_truncate_encode(xdr, starting_len);
+ return nfserr;
+ }
- return nfserr;
+ p = xdr_encode_bool(p, read->rd_eof);
+ *p = cpu_to_be32(read->rd_length);
+ return nfs_ok;
}
static __be32
nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_readlink *readlink)
{
- int maxcount;
- __be32 wire_count;
- int zero = 0;
+ __be32 *p, *maxcount_p, zero = xdr_zero;
struct xdr_stream *xdr = resp->xdr;
int length_offset = xdr->buf->len;
- int status;
- __be32 *p;
+ int maxcount, status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ maxcount_p = xdr_reserve_space(xdr, XDR_UNIT);
+ if (!maxcount_p)
return nfserr_resource;
maxcount = PAGE_SIZE;
@@ -4057,14 +4049,11 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
nfserr = nfserrno(status);
goto out_err;
}
-
- wire_count = htonl(maxcount);
- write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4);
- xdr_truncate_encode(xdr, length_offset + 4 + ALIGN(maxcount, 4));
- if (maxcount & 3)
- write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount,
- &zero, 4 - (maxcount&3));
- return 0;
+ *maxcount_p = cpu_to_be32(maxcount);
+ xdr_truncate_encode(xdr, length_offset + 4 + xdr_align_size(maxcount));
+ write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount, &zero,
+ xdr_pad_size(maxcount));
+ return nfs_ok;
out_err:
xdr_truncate_encode(xdr, length_offset);
@@ -4715,13 +4704,13 @@ nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
__be32 *p;
nfserr = nfsd42_encode_write_res(resp, &copy->cp_res,
- !!copy->cp_synchronous);
+ nfsd4_copy_is_sync(copy));
if (nfserr)
return nfserr;
p = xdr_reserve_space(resp->xdr, 4 + 4);
*p++ = xdr_one; /* cr_consecutive */
- *p++ = cpu_to_be32(copy->cp_synchronous);
+ *p = nfsd4_copy_is_sync(copy) ? xdr_one : xdr_zero;
return 0;
}
@@ -4919,7 +4908,8 @@ nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
*p++ = cpu_to_be32(1);
- return nfsd42_encode_nl4_server(resp, &cn->cpn_src);
+ nfserr = nfsd42_encode_nl4_server(resp, cn->cpn_src);
+ return nfserr;
}
static __be32
@@ -5373,8 +5363,7 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
so->so_replay.rp_buf, len);
}
status:
- /* Note that op->status is already in network byte order: */
- write_bytes_to_xdr_buf(xdr->buf, post_err_offset - 4, &op->status, 4);
+ *p = op->status;
}
/*
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 7da88bdc0d6c..9b31e1103e7b 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -176,7 +176,8 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
nn->nfsd_reply_cache_shrinker.seeks = 1;
- status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
+ status = register_shrinker(&nn->nfsd_reply_cache_shrinker,
+ "nfsd-reply:%s", nn->nfsd_name);
if (status)
goto out_stats_destroy;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 0621c2faf242..917fa1892fd2 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -25,6 +25,7 @@
#include "state.h"
#include "netns.h"
#include "pnfs.h"
+#include "filecache.h"
/*
* We have a single directory with several nodes in it.
@@ -45,6 +46,7 @@ enum {
NFSD_Ports,
NFSD_MaxBlkSize,
NFSD_MaxConnections,
+ NFSD_Filecache,
NFSD_SupportedEnctypes,
/*
* The below MUST come last. Otherwise we leave a hole in nfsd_files[]
@@ -229,6 +231,13 @@ static const struct file_operations reply_cache_stats_operations = {
.release = single_release,
};
+static const struct file_operations filecache_ops = {
+ .open = nfsd_file_cache_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*----------------------------------------------------------------------------*/
/*
* payload - write methods
@@ -633,7 +642,6 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
}
/* Now write current state into reply buffer */
- len = 0;
sep = "";
remaining = SIMPLE_TRANSACTION_LIMIT;
for (num=2 ; num <= 4 ; num++) {
@@ -1371,6 +1379,7 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
[NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_MaxConnections] = {"max_connections", &transaction_ops, S_IWUSR|S_IRUGO},
+ [NFSD_Filecache] = {"filecache", &filecache_ops, S_IRUGO},
#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
[NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO},
#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
@@ -1475,14 +1484,7 @@ static __net_init int nfsd_init_net(struct net *net)
retval = nfsd_reply_cache_init(nn);
if (retval)
goto out_drc_error;
- nn->nfsd4_lease = 90; /* default lease time */
- nn->nfsd4_grace = 90;
- nn->somebody_reclaimed = false;
- nn->track_reclaim_completes = false;
- nn->clverifier_counter = prandom_u32();
- nn->clientid_base = prandom_u32();
- nn->clientid_counter = nn->clientid_base + 1;
- nn->s2s_cp_cl_id = nn->clientid_counter++;
+ nfsd4_init_leases_net(nn);
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
seqlock_init(&nn->writeverf_lock);
@@ -1517,7 +1519,6 @@ static struct pernet_operations nfsd_net_ops = {
static int __init init_nfsd(void)
{
int retval;
- printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
retval = nfsd4_init_slabs();
if (retval)
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 9a8b09afc173..57a468ed85c3 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -341,6 +341,8 @@ void nfsd_lockd_shutdown(void);
#define NFSD_LAUNDROMAT_MINTIMEOUT 1 /* seconds */
#define NFSD_COURTESY_CLIENT_TIMEOUT (24 * 60 * 60) /* seconds */
+#define NFSD_CLIENT_MAX_TRIM_PER_RUN 128
+#define NFS4_CLIENTS_PER_GB 1024
/*
* The following attributes are currently not supported by the NFSv4 server:
@@ -496,12 +498,16 @@ extern void unregister_cld_notifier(void);
extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn);
#endif
+extern void nfsd4_init_leases_net(struct nfsd_net *nn);
+
#else /* CONFIG_NFSD_V4 */
static inline int nfsd4_is_junction(struct dentry *dentry)
{
return 0;
}
+static inline void nfsd4_init_leases_net(struct nfsd_net *nn) {};
+
#define register_cld_notifier() 0
#define unregister_cld_notifier() do { } while(0)
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index c29baa03dfaf..a5b71526cee0 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -331,8 +331,6 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
struct dentry *dentry;
__be32 error;
- dprintk("nfsd: fh_verify(%s)\n", SVCFH_fmt(fhp));
-
if (!fhp->fh_dentry) {
error = nfsd_set_fh_dentry(rqstp, fhp);
if (error)
@@ -340,6 +338,9 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
}
dentry = fhp->fh_dentry;
exp = fhp->fh_export;
+
+ trace_nfsd_fh_verify(rqstp, fhp, type, access);
+
/*
* We still have to do all these permission checks, even when
* fh_dentry is already set:
@@ -548,7 +549,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
if (ref_fh == fhp)
fh_put(ref_fh);
- if (fhp->fh_locked || fhp->fh_dentry) {
+ if (fhp->fh_dentry) {
printk(KERN_ERR "fh_compose: fh %pd2 not initialized!\n",
dentry);
}
@@ -671,6 +672,25 @@ void fh_fill_post_attrs(struct svc_fh *fhp)
nfsd4_change_attribute(&fhp->fh_post_attr, inode);
}
+/**
+ * fh_fill_both_attrs - Fill pre-op and post-op attributes
+ * @fhp: file handle to be updated
+ *
+ * This is used when the directory wasn't changed, but wcc attributes
+ * are needed anyway.
+ */
+void fh_fill_both_attrs(struct svc_fh *fhp)
+{
+ fh_fill_post_attrs(fhp);
+ if (!fhp->fh_post_saved)
+ return;
+ fhp->fh_pre_change = fhp->fh_post_change;
+ fhp->fh_pre_mtime = fhp->fh_post_attr.mtime;
+ fhp->fh_pre_ctime = fhp->fh_post_attr.ctime;
+ fhp->fh_pre_size = fhp->fh_post_attr.size;
+ fhp->fh_pre_saved = true;
+}
+
/*
* Release a file handle.
*/
@@ -680,7 +700,6 @@ fh_put(struct svc_fh *fhp)
struct dentry * dentry = fhp->fh_dentry;
struct svc_export * exp = fhp->fh_export;
if (dentry) {
- fh_unlock(fhp);
fhp->fh_dentry = NULL;
dput(dentry);
fh_clear_pre_post_attrs(fhp);
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index fb9d358a267e..c3ae6414fc5c 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -81,7 +81,6 @@ typedef struct svc_fh {
struct dentry * fh_dentry; /* validated dentry */
struct svc_export * fh_export; /* export pointer */
- bool fh_locked; /* inode locked by us */
bool fh_want_write; /* remount protection taken */
bool fh_no_wcc; /* no wcc data needed */
bool fh_no_atomic_attr;
@@ -93,7 +92,7 @@ typedef struct svc_fh {
bool fh_post_saved; /* post-op attrs saved */
bool fh_pre_saved; /* pre-op attrs saved */
- /* Pre-op attributes saved during fh_lock */
+ /* Pre-op attributes saved when inode is locked */
__u64 fh_pre_size; /* size before operation */
struct timespec64 fh_pre_mtime; /* mtime before oper */
struct timespec64 fh_pre_ctime; /* ctime before oper */
@@ -103,7 +102,7 @@ typedef struct svc_fh {
*/
u64 fh_pre_change;
- /* Post-op attributes saved in fh_unlock */
+ /* Post-op attributes saved in fh_fill_post_attrs() */
struct kstat fh_post_attr; /* full attrs after operation */
u64 fh_post_change; /* nfsv4 change; see above */
} svc_fh;
@@ -223,8 +222,8 @@ void fh_put(struct svc_fh *);
static __inline__ struct svc_fh *
fh_copy(struct svc_fh *dst, struct svc_fh *src)
{
- WARN_ON(src->fh_dentry || src->fh_locked);
-
+ WARN_ON(src->fh_dentry);
+
*dst = *src;
return dst;
}
@@ -322,52 +321,5 @@ static inline u64 nfsd4_change_attribute(struct kstat *stat,
extern void fh_fill_pre_attrs(struct svc_fh *fhp);
extern void fh_fill_post_attrs(struct svc_fh *fhp);
-
-
-/*
- * Lock a file handle/inode
- * NOTE: both fh_lock and fh_unlock are done "by hand" in
- * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
- * so, any changes here should be reflected there.
- */
-
-static inline void
-fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
-{
- struct dentry *dentry = fhp->fh_dentry;
- struct inode *inode;
-
- BUG_ON(!dentry);
-
- if (fhp->fh_locked) {
- printk(KERN_WARNING "fh_lock: %pd2 already locked!\n",
- dentry);
- return;
- }
-
- inode = d_inode(dentry);
- inode_lock_nested(inode, subclass);
- fh_fill_pre_attrs(fhp);
- fhp->fh_locked = true;
-}
-
-static inline void
-fh_lock(struct svc_fh *fhp)
-{
- fh_lock_nested(fhp, I_MUTEX_NORMAL);
-}
-
-/*
- * Unlock a file handle/inode
- */
-static inline void
-fh_unlock(struct svc_fh *fhp)
-{
- if (fhp->fh_locked) {
- fh_fill_post_attrs(fhp);
- inode_unlock(d_inode(fhp->fh_dentry));
- fhp->fh_locked = false;
- }
-}
-
+extern void fh_fill_both_attrs(struct svc_fh *fhp);
#endif /* _LINUX_NFSD_NFSFH_H */
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index fcdab8a8a41f..7381972f1677 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -51,6 +51,9 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
struct nfsd_sattrargs *argp = rqstp->rq_argp;
struct nfsd_attrstat *resp = rqstp->rq_resp;
struct iattr *iap = &argp->attrs;
+ struct nfsd_attrs attrs = {
+ .na_iattr = iap,
+ };
struct svc_fh *fhp;
dprintk("nfsd: SETATTR %s, valid=%x, size=%ld\n",
@@ -100,7 +103,7 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
}
}
- resp->status = nfsd_setattr(rqstp, fhp, iap, 0, (time64_t)0);
+ resp->status = nfsd_setattr(rqstp, fhp, &attrs, 0, (time64_t)0);
if (resp->status != nfs_ok)
goto out;
@@ -260,6 +263,9 @@ nfsd_proc_create(struct svc_rqst *rqstp)
svc_fh *dirfhp = &argp->fh;
svc_fh *newfhp = &resp->fh;
struct iattr *attr = &argp->attrs;
+ struct nfsd_attrs attrs = {
+ .na_iattr = attr,
+ };
struct inode *inode;
struct dentry *dchild;
int type, mode;
@@ -285,7 +291,7 @@ nfsd_proc_create(struct svc_rqst *rqstp)
goto done;
}
- fh_lock_nested(dirfhp, I_MUTEX_PARENT);
+ inode_lock_nested(dirfhp->fh_dentry->d_inode, I_MUTEX_PARENT);
dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len);
if (IS_ERR(dchild)) {
resp->status = nfserrno(PTR_ERR(dchild));
@@ -385,7 +391,7 @@ nfsd_proc_create(struct svc_rqst *rqstp)
if (!inode) {
/* File doesn't exist. Create it and set attrs */
resp->status = nfsd_create_locked(rqstp, dirfhp, argp->name,
- argp->len, attr, type, rdev,
+ argp->len, &attrs, type, rdev,
newfhp);
} else if (type == S_IFREG) {
dprintk("nfsd: existing %s, valid=%x, size=%ld\n",
@@ -396,13 +402,12 @@ nfsd_proc_create(struct svc_rqst *rqstp)
*/
attr->ia_valid &= ATTR_SIZE;
if (attr->ia_valid)
- resp->status = nfsd_setattr(rqstp, newfhp, attr, 0,
+ resp->status = nfsd_setattr(rqstp, newfhp, &attrs, 0,
(time64_t)0);
}
out_unlock:
- /* We don't really need to unlock, as fh_put does it. */
- fh_unlock(dirfhp);
+ inode_unlock(dirfhp->fh_dentry->d_inode);
fh_drop_write(dirfhp);
done:
fh_put(dirfhp);
@@ -472,6 +477,9 @@ nfsd_proc_symlink(struct svc_rqst *rqstp)
{
struct nfsd_symlinkargs *argp = rqstp->rq_argp;
struct nfsd_stat *resp = rqstp->rq_resp;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
struct svc_fh newfh;
if (argp->tlen > NFS_MAXPATHLEN) {
@@ -493,7 +501,7 @@ nfsd_proc_symlink(struct svc_rqst *rqstp)
fh_init(&newfh, NFS_FHSIZE);
resp->status = nfsd_symlink(rqstp, &argp->ffh, argp->fname, argp->flen,
- argp->tname, &newfh);
+ argp->tname, &attrs, &newfh);
kfree(argp->tname);
fh_put(&argp->ffh);
@@ -511,6 +519,9 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp)
{
struct nfsd_createargs *argp = rqstp->rq_argp;
struct nfsd_diropres *resp = rqstp->rq_resp;
+ struct nfsd_attrs attrs = {
+ .na_iattr = &argp->attrs,
+ };
dprintk("nfsd: MKDIR %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name);
@@ -522,7 +533,7 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp)
argp->attrs.ia_valid &= ~ATTR_SIZE;
fh_init(&resp->fh, NFS_FHSIZE);
resp->status = nfsd_create(rqstp, &argp->fh, argp->name, argp->len,
- &argp->attrs, S_IFDIR, 0, &resp->fh);
+ &attrs, S_IFDIR, 0, &resp->fh);
fh_put(&argp->fh);
if (resp->status != nfs_ok)
goto out;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index f3d6313914ed..ae596dbf8667 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -703,7 +703,6 @@ extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(struct xdr_netobj name
extern bool nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn);
void put_nfs4_file(struct nfs4_file *fi);
-extern void nfs4_put_copy(struct nfsd4_copy *copy);
extern struct nfsd4_copy *
find_async_copy(struct nfs4_client *clp, stateid_t *staetid);
extern void nfs4_put_cpntf_state(struct nfsd_net *nn,
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index a60ead3b227a..9ebd67d461f9 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -171,6 +171,52 @@ TRACE_EVENT(nfsd_compound_encode_err,
__entry->opnum, __entry->status)
);
+#define show_fs_file_type(x) \
+ __print_symbolic(x, \
+ { S_IFLNK, "LNK" }, \
+ { S_IFREG, "REG" }, \
+ { S_IFDIR, "DIR" }, \
+ { S_IFCHR, "CHR" }, \
+ { S_IFBLK, "BLK" }, \
+ { S_IFIFO, "FIFO" }, \
+ { S_IFSOCK, "SOCK" })
+
+TRACE_EVENT(nfsd_fh_verify,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct svc_fh *fhp,
+ umode_t type,
+ int access
+ ),
+ TP_ARGS(rqstp, fhp, type, access),
+ TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
+ __sockaddr(client, rqstp->rq_xprt->xpt_remotelen)
+ __field(u32, xid)
+ __field(u32, fh_hash)
+ __field(void *, inode)
+ __field(unsigned long, type)
+ __field(unsigned long, access)
+ ),
+ TP_fast_assign(
+ __entry->netns_ino = SVC_NET(rqstp)->ns.inum;
+ __assign_sockaddr(server, &rqstp->rq_xprt->xpt_local,
+ rqstp->rq_xprt->xpt_locallen);
+ __assign_sockaddr(client, &rqstp->rq_xprt->xpt_remote,
+ rqstp->rq_xprt->xpt_remotelen);
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ __entry->inode = d_inode(fhp->fh_dentry);
+ __entry->type = type;
+ __entry->access = access;
+ ),
+ TP_printk("xid=0x%08x fh_hash=0x%08x inode=%p type=%s access=%s",
+ __entry->xid, __entry->fh_hash, __entry->inode,
+ show_fs_file_type(__entry->type),
+ show_nfsd_may_flags(__entry->access)
+ )
+);
DECLARE_EVENT_CLASS(nfsd_fh_err_class,
TP_PROTO(struct svc_rqst *rqstp,
@@ -696,15 +742,12 @@ DEFINE_CLID_EVENT(confirmed_r);
__print_flags(val, "|", \
{ 1 << NFSD_FILE_HASHED, "HASHED" }, \
{ 1 << NFSD_FILE_PENDING, "PENDING" }, \
- { 1 << NFSD_FILE_BREAK_READ, "BREAK_READ" }, \
- { 1 << NFSD_FILE_BREAK_WRITE, "BREAK_WRITE" }, \
{ 1 << NFSD_FILE_REFERENCED, "REFERENCED"})
DECLARE_EVENT_CLASS(nfsd_file_class,
TP_PROTO(struct nfsd_file *nf),
TP_ARGS(nf),
TP_STRUCT__entry(
- __field(unsigned int, nf_hashval)
__field(void *, nf_inode)
__field(int, nf_ref)
__field(unsigned long, nf_flags)
@@ -712,15 +755,13 @@ DECLARE_EVENT_CLASS(nfsd_file_class,
__field(struct file *, nf_file)
),
TP_fast_assign(
- __entry->nf_hashval = nf->nf_hashval;
__entry->nf_inode = nf->nf_inode;
__entry->nf_ref = refcount_read(&nf->nf_ref);
__entry->nf_flags = nf->nf_flags;
__entry->nf_may = nf->nf_may;
__entry->nf_file = nf->nf_file;
),
- TP_printk("hash=0x%x inode=%p ref=%d flags=%s may=%s file=%p",
- __entry->nf_hashval,
+ TP_printk("inode=%p ref=%d flags=%s may=%s nf_file=%p",
__entry->nf_inode,
__entry->nf_ref,
show_nf_flags(__entry->nf_flags),
@@ -733,34 +774,59 @@ DEFINE_EVENT(nfsd_file_class, name, \
TP_PROTO(struct nfsd_file *nf), \
TP_ARGS(nf))
-DEFINE_NFSD_FILE_EVENT(nfsd_file_alloc);
DEFINE_NFSD_FILE_EVENT(nfsd_file_put_final);
DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash);
DEFINE_NFSD_FILE_EVENT(nfsd_file_put);
-DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash_and_release_locked);
+DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash_and_dispose);
+
+TRACE_EVENT(nfsd_file_alloc,
+ TP_PROTO(
+ const struct nfsd_file *nf
+ ),
+ TP_ARGS(nf),
+ TP_STRUCT__entry(
+ __field(const void *, nf_inode)
+ __field(unsigned long, nf_flags)
+ __field(unsigned long, nf_may)
+ __field(unsigned int, nf_ref)
+ ),
+ TP_fast_assign(
+ __entry->nf_inode = nf->nf_inode;
+ __entry->nf_flags = nf->nf_flags;
+ __entry->nf_ref = refcount_read(&nf->nf_ref);
+ __entry->nf_may = nf->nf_may;
+ ),
+ TP_printk("inode=%p ref=%u flags=%s may=%s",
+ __entry->nf_inode, __entry->nf_ref,
+ show_nf_flags(__entry->nf_flags),
+ show_nfsd_may_flags(__entry->nf_may)
+ )
+);
TRACE_EVENT(nfsd_file_acquire,
- TP_PROTO(struct svc_rqst *rqstp, unsigned int hash,
- struct inode *inode, unsigned int may_flags,
- struct nfsd_file *nf, __be32 status),
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct inode *inode,
+ unsigned int may_flags,
+ const struct nfsd_file *nf,
+ __be32 status
+ ),
- TP_ARGS(rqstp, hash, inode, may_flags, nf, status),
+ TP_ARGS(rqstp, inode, may_flags, nf, status),
TP_STRUCT__entry(
__field(u32, xid)
- __field(unsigned int, hash)
- __field(void *, inode)
+ __field(const void *, inode)
__field(unsigned long, may_flags)
- __field(int, nf_ref)
+ __field(unsigned int, nf_ref)
__field(unsigned long, nf_flags)
__field(unsigned long, nf_may)
- __field(struct file *, nf_file)
+ __field(const void *, nf_file)
__field(u32, status)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqstp->rq_xid);
- __entry->hash = hash;
__entry->inode = inode;
__entry->may_flags = may_flags;
__entry->nf_ref = nf ? refcount_read(&nf->nf_ref) : 0;
@@ -770,19 +836,117 @@ TRACE_EVENT(nfsd_file_acquire,
__entry->status = be32_to_cpu(status);
),
- TP_printk("xid=0x%x hash=0x%x inode=%p may_flags=%s ref=%d nf_flags=%s nf_may=%s nf_file=%p status=%u",
- __entry->xid, __entry->hash, __entry->inode,
+ TP_printk("xid=0x%x inode=%p may_flags=%s ref=%u nf_flags=%s nf_may=%s nf_file=%p status=%u",
+ __entry->xid, __entry->inode,
show_nfsd_may_flags(__entry->may_flags),
__entry->nf_ref, show_nf_flags(__entry->nf_flags),
show_nfsd_may_flags(__entry->nf_may),
- __entry->nf_file, __entry->status)
+ __entry->nf_file, __entry->status
+ )
+);
+
+TRACE_EVENT(nfsd_file_create,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ unsigned int may_flags,
+ const struct nfsd_file *nf
+ ),
+
+ TP_ARGS(rqstp, may_flags, nf),
+
+ TP_STRUCT__entry(
+ __field(const void *, nf_inode)
+ __field(const void *, nf_file)
+ __field(unsigned long, may_flags)
+ __field(unsigned long, nf_flags)
+ __field(unsigned long, nf_may)
+ __field(unsigned int, nf_ref)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->nf_inode = nf->nf_inode;
+ __entry->nf_file = nf->nf_file;
+ __entry->may_flags = may_flags;
+ __entry->nf_flags = nf->nf_flags;
+ __entry->nf_may = nf->nf_may;
+ __entry->nf_ref = refcount_read(&nf->nf_ref);
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ ),
+
+ TP_printk("xid=0x%x inode=%p may_flags=%s ref=%u nf_flags=%s nf_may=%s nf_file=%p",
+ __entry->xid, __entry->nf_inode,
+ show_nfsd_may_flags(__entry->may_flags),
+ __entry->nf_ref, show_nf_flags(__entry->nf_flags),
+ show_nfsd_may_flags(__entry->nf_may), __entry->nf_file
+ )
+);
+
+TRACE_EVENT(nfsd_file_insert_err,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct inode *inode,
+ unsigned int may_flags,
+ long error
+ ),
+ TP_ARGS(rqstp, inode, may_flags, error),
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(const void *, inode)
+ __field(unsigned long, may_flags)
+ __field(long, error)
+ ),
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->inode = inode;
+ __entry->may_flags = may_flags;
+ __entry->error = error;
+ ),
+ TP_printk("xid=0x%x inode=%p may_flags=%s error=%ld",
+ __entry->xid, __entry->inode,
+ show_nfsd_may_flags(__entry->may_flags),
+ __entry->error
+ )
+);
+
+TRACE_EVENT(nfsd_file_cons_err,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ const struct inode *inode,
+ unsigned int may_flags,
+ const struct nfsd_file *nf
+ ),
+ TP_ARGS(rqstp, inode, may_flags, nf),
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(const void *, inode)
+ __field(unsigned long, may_flags)
+ __field(unsigned int, nf_ref)
+ __field(unsigned long, nf_flags)
+ __field(unsigned long, nf_may)
+ __field(const void *, nf_file)
+ ),
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->inode = inode;
+ __entry->may_flags = may_flags;
+ __entry->nf_ref = refcount_read(&nf->nf_ref);
+ __entry->nf_flags = nf->nf_flags;
+ __entry->nf_may = nf->nf_may;
+ __entry->nf_file = nf->nf_file;
+ ),
+ TP_printk("xid=0x%x inode=%p may_flags=%s ref=%u nf_flags=%s nf_may=%s nf_file=%p",
+ __entry->xid, __entry->inode,
+ show_nfsd_may_flags(__entry->may_flags), __entry->nf_ref,
+ show_nf_flags(__entry->nf_flags),
+ show_nfsd_may_flags(__entry->nf_may), __entry->nf_file
+ )
);
TRACE_EVENT(nfsd_file_open,
TP_PROTO(struct nfsd_file *nf, __be32 status),
TP_ARGS(nf, status),
TP_STRUCT__entry(
- __field(unsigned int, nf_hashval)
__field(void *, nf_inode) /* cannot be dereferenced */
__field(int, nf_ref)
__field(unsigned long, nf_flags)
@@ -790,15 +954,13 @@ TRACE_EVENT(nfsd_file_open,
__field(void *, nf_file) /* cannot be dereferenced */
),
TP_fast_assign(
- __entry->nf_hashval = nf->nf_hashval;
__entry->nf_inode = nf->nf_inode;
__entry->nf_ref = refcount_read(&nf->nf_ref);
__entry->nf_flags = nf->nf_flags;
__entry->nf_may = nf->nf_may;
__entry->nf_file = nf->nf_file;
),
- TP_printk("hash=0x%x inode=%p ref=%d flags=%s may=%s file=%p",
- __entry->nf_hashval,
+ TP_printk("inode=%p ref=%d flags=%s may=%s file=%p",
__entry->nf_inode,
__entry->nf_ref,
show_nf_flags(__entry->nf_flags),
@@ -807,30 +969,53 @@ TRACE_EVENT(nfsd_file_open,
)
DECLARE_EVENT_CLASS(nfsd_file_search_class,
- TP_PROTO(struct inode *inode, unsigned int hash, int found),
- TP_ARGS(inode, hash, found),
+ TP_PROTO(
+ const struct inode *inode,
+ unsigned int count
+ ),
+ TP_ARGS(inode, count),
TP_STRUCT__entry(
- __field(struct inode *, inode)
- __field(unsigned int, hash)
- __field(int, found)
+ __field(const struct inode *, inode)
+ __field(unsigned int, count)
),
TP_fast_assign(
__entry->inode = inode;
- __entry->hash = hash;
- __entry->found = found;
+ __entry->count = count;
),
- TP_printk("hash=0x%x inode=%p found=%d", __entry->hash,
- __entry->inode, __entry->found)
+ TP_printk("inode=%p count=%u",
+ __entry->inode, __entry->count)
);
#define DEFINE_NFSD_FILE_SEARCH_EVENT(name) \
DEFINE_EVENT(nfsd_file_search_class, name, \
- TP_PROTO(struct inode *inode, unsigned int hash, int found), \
- TP_ARGS(inode, hash, found))
+ TP_PROTO( \
+ const struct inode *inode, \
+ unsigned int count \
+ ), \
+ TP_ARGS(inode, count))
DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_close_inode_sync);
DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_close_inode);
-DEFINE_NFSD_FILE_SEARCH_EVENT(nfsd_file_is_cached);
+
+TRACE_EVENT(nfsd_file_is_cached,
+ TP_PROTO(
+ const struct inode *inode,
+ int found
+ ),
+ TP_ARGS(inode, found),
+ TP_STRUCT__entry(
+ __field(const struct inode *, inode)
+ __field(int, found)
+ ),
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->found = found;
+ ),
+ TP_printk("inode=%p is %scached",
+ __entry->inode,
+ __entry->found ? "" : "not "
+ )
+);
TRACE_EVENT(nfsd_file_fsnotify_handle_event,
TP_PROTO(struct inode *inode, u32 mask),
@@ -851,6 +1036,76 @@ TRACE_EVENT(nfsd_file_fsnotify_handle_event,
__entry->nlink, __entry->mode, __entry->mask)
);
+DECLARE_EVENT_CLASS(nfsd_file_gc_class,
+ TP_PROTO(
+ const struct nfsd_file *nf
+ ),
+ TP_ARGS(nf),
+ TP_STRUCT__entry(
+ __field(void *, nf_inode)
+ __field(void *, nf_file)
+ __field(int, nf_ref)
+ __field(unsigned long, nf_flags)
+ ),
+ TP_fast_assign(
+ __entry->nf_inode = nf->nf_inode;
+ __entry->nf_file = nf->nf_file;
+ __entry->nf_ref = refcount_read(&nf->nf_ref);
+ __entry->nf_flags = nf->nf_flags;
+ ),
+ TP_printk("inode=%p ref=%d nf_flags=%s nf_file=%p",
+ __entry->nf_inode, __entry->nf_ref,
+ show_nf_flags(__entry->nf_flags),
+ __entry->nf_file
+ )
+);
+
+#define DEFINE_NFSD_FILE_GC_EVENT(name) \
+DEFINE_EVENT(nfsd_file_gc_class, name, \
+ TP_PROTO( \
+ const struct nfsd_file *nf \
+ ), \
+ TP_ARGS(nf))
+
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add_disposed);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del_disposed);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_hashed);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_disposed);
+
+DECLARE_EVENT_CLASS(nfsd_file_lruwalk_class,
+ TP_PROTO(
+ unsigned long removed,
+ unsigned long remaining
+ ),
+ TP_ARGS(removed, remaining),
+ TP_STRUCT__entry(
+ __field(unsigned long, removed)
+ __field(unsigned long, remaining)
+ ),
+ TP_fast_assign(
+ __entry->removed = removed;
+ __entry->remaining = remaining;
+ ),
+ TP_printk("%lu entries removed, %lu remaining",
+ __entry->removed, __entry->remaining)
+);
+
+#define DEFINE_NFSD_FILE_LRUWALK_EVENT(name) \
+DEFINE_EVENT(nfsd_file_lruwalk_class, name, \
+ TP_PROTO( \
+ unsigned long removed, \
+ unsigned long remaining \
+ ), \
+ TP_ARGS(removed, remaining))
+
+DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_gc_removed);
+DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_shrinker_removed);
+
#include "cache.h"
TRACE_DEFINE_ENUM(RC_DROPIT);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index d79db56475d4..9f486b788ed0 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -199,27 +199,13 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out_nfserr;
}
} else {
- /*
- * In the nfsd4_open() case, this may be held across
- * subsequent open and delegation acquisition which may
- * need to take the child's i_mutex:
- */
- fh_lock_nested(fhp, I_MUTEX_PARENT);
- dentry = lookup_one_len(name, dparent, len);
+ dentry = lookup_one_len_unlocked(name, dparent, len);
host_err = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_nfserr;
if (nfsd_mountpoint(dentry, exp)) {
- /*
- * We don't need the i_mutex after all. It's
- * still possible we could open this (regular
- * files can be mountpoints too), but the
- * i_mutex is just there to prevent renames of
- * something that we might be about to delegate,
- * and a mountpoint won't be renamed:
- */
- fh_unlock(fhp);
- if ((host_err = nfsd_cross_mnt(rqstp, &dentry, &exp))) {
+ host_err = nfsd_cross_mnt(rqstp, &dentry, &exp);
+ if (host_err) {
dput(dentry);
goto out_nfserr;
}
@@ -234,7 +220,15 @@ out_nfserr:
return nfserrno(host_err);
}
-/*
+/**
+ * nfsd_lookup - look up a single path component for nfsd
+ *
+ * @rqstp: the request context
+ * @fhp: the file handle of the directory
+ * @name: the component name, or %NULL to look up parent
+ * @len: length of name to examine
+ * @resfh: pointer to pre-initialised filehandle to hold result.
+ *
* Look up one component of a pathname.
* N.B. After this call _both_ fhp and resfh need an fh_put
*
@@ -244,11 +238,11 @@ out_nfserr:
* returned. Otherwise the covered directory is returned.
* NOTE: this mountpoint crossing is not supported properly by all
* clients and is explicitly disallowed for NFSv3
- * NeilBrown <neilb@cse.unsw.edu.au>
+ *
*/
__be32
nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
- unsigned int len, struct svc_fh *resfh)
+ unsigned int len, struct svc_fh *resfh)
{
struct svc_export *exp;
struct dentry *dentry;
@@ -349,11 +343,13 @@ nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
* Set various file attributes. After this call fhp needs an fh_put.
*/
__be32
-nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct nfsd_attrs *attr,
int check_guard, time64_t guardtime)
{
struct dentry *dentry;
struct inode *inode;
+ struct iattr *iap = attr->na_iattr;
int accmode = NFSD_MAY_SATTR;
umode_t ftype = 0;
__be32 err;
@@ -420,7 +416,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
return err;
}
- fh_lock(fhp);
+ inode_lock(inode);
if (size_change) {
/*
* RFC5661, Section 18.30.4:
@@ -456,7 +452,19 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
host_err = notify_change(&init_user_ns, dentry, iap, NULL);
out_unlock:
- fh_unlock(fhp);
+ if (attr->na_seclabel && attr->na_seclabel->len)
+ attr->na_labelerr = security_inode_setsecctx(dentry,
+ attr->na_seclabel->data, attr->na_seclabel->len);
+ if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && attr->na_pacl)
+ attr->na_aclerr = set_posix_acl(&init_user_ns,
+ inode, ACL_TYPE_ACCESS,
+ attr->na_pacl);
+ if (IS_ENABLED(CONFIG_FS_POSIX_ACL) &&
+ !attr->na_aclerr && attr->na_dpacl && S_ISDIR(inode->i_mode))
+ attr->na_aclerr = set_posix_acl(&init_user_ns,
+ inode, ACL_TYPE_DEFAULT,
+ attr->na_dpacl);
+ inode_unlock(inode);
if (size_change)
put_write_access(inode);
out:
@@ -494,32 +502,6 @@ int nfsd4_is_junction(struct dentry *dentry)
return 0;
return 1;
}
-#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-__be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct xdr_netobj *label)
-{
- __be32 error;
- int host_error;
- struct dentry *dentry;
-
- error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, NFSD_MAY_SATTR);
- if (error)
- return error;
-
- dentry = fhp->fh_dentry;
-
- inode_lock(d_inode(dentry));
- host_error = security_inode_setsecctx(dentry, label->data, label->len);
- inode_unlock(d_inode(dentry));
- return nfserrno(host_error);
-}
-#else
-__be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct xdr_netobj *label)
-{
- return nfserr_notsupp;
-}
-#endif
static struct nfsd4_compound_state *nfsd4_get_cstate(struct svc_rqst *rqstp)
{
@@ -1202,14 +1184,15 @@ out:
* @rqstp: RPC transaction being executed
* @fhp: NFS filehandle of parent directory
* @resfhp: NFS filehandle of new object
- * @iap: requested attributes of new object
+ * @attrs: requested attributes of new object
*
* Returns nfs_ok on success, or an nfsstat in network byte order.
*/
__be32
nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct svc_fh *resfhp, struct iattr *iap)
+ struct svc_fh *resfhp, struct nfsd_attrs *attrs)
{
+ struct iattr *iap = attrs->na_iattr;
__be32 status;
/*
@@ -1230,7 +1213,7 @@ nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
* if the attributes have not changed.
*/
if (iap->ia_valid)
- status = nfsd_setattr(rqstp, resfhp, iap, 0, (time64_t)0);
+ status = nfsd_setattr(rqstp, resfhp, attrs, 0, (time64_t)0);
else
status = nfserrno(commit_metadata(resfhp));
@@ -1269,11 +1252,12 @@ nfsd_check_ignore_resizing(struct iattr *iap)
/* The parent directory should already be locked: */
__be32
nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
- char *fname, int flen, struct iattr *iap,
- int type, dev_t rdev, struct svc_fh *resfhp)
+ char *fname, int flen, struct nfsd_attrs *attrs,
+ int type, dev_t rdev, struct svc_fh *resfhp)
{
struct dentry *dentry, *dchild;
struct inode *dirp;
+ struct iattr *iap = attrs->na_iattr;
__be32 err;
int host_err;
@@ -1281,13 +1265,6 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
dirp = d_inode(dentry);
dchild = dget(resfhp->fh_dentry);
- if (!fhp->fh_locked) {
- WARN_ONCE(1, "nfsd_create: parent %pd2 not locked!\n",
- dentry);
- err = nfserr_io;
- goto out;
- }
-
err = nfsd_permission(rqstp, fhp->fh_export, dentry, NFSD_MAY_CREATE);
if (err)
goto out;
@@ -1347,7 +1324,7 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (host_err < 0)
goto out_nfserr;
- err = nfsd_create_setattr(rqstp, fhp, resfhp, iap);
+ err = nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
out:
dput(dchild);
@@ -1366,8 +1343,8 @@ out_nfserr:
*/
__be32
nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- char *fname, int flen, struct iattr *iap,
- int type, dev_t rdev, struct svc_fh *resfhp)
+ char *fname, int flen, struct nfsd_attrs *attrs,
+ int type, dev_t rdev, struct svc_fh *resfhp)
{
struct dentry *dentry, *dchild = NULL;
__be32 err;
@@ -1386,11 +1363,13 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (host_err)
return nfserrno(host_err);
- fh_lock_nested(fhp, I_MUTEX_PARENT);
+ inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT);
dchild = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(dchild);
- if (IS_ERR(dchild))
- return nfserrno(host_err);
+ if (IS_ERR(dchild)) {
+ err = nfserrno(host_err);
+ goto out_unlock;
+ }
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
/*
* We unconditionally drop our ref to dchild as fh_compose will have
@@ -1398,9 +1377,14 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
*/
dput(dchild);
if (err)
- return err;
- return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type,
- rdev, resfhp);
+ goto out_unlock;
+ fh_fill_pre_attrs(fhp);
+ err = nfsd_create_locked(rqstp, fhp, fname, flen, attrs, type,
+ rdev, resfhp);
+ fh_fill_post_attrs(fhp);
+out_unlock:
+ inode_unlock(dentry->d_inode);
+ return err;
}
/*
@@ -1441,15 +1425,25 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
return 0;
}
-/*
- * Create a symlink and look up its inode
+/**
+ * nfsd_symlink - Create a symlink and look up its inode
+ * @rqstp: RPC transaction being executed
+ * @fhp: NFS filehandle of parent directory
+ * @fname: filename of the new symlink
+ * @flen: length of @fname
+ * @path: content of the new symlink (NUL-terminated)
+ * @attrs: requested attributes of new object
+ * @resfhp: NFS filehandle of new object
+ *
* N.B. After this call _both_ fhp and resfhp need an fh_put
+ *
+ * Returns nfs_ok on success, or an nfsstat in network byte order.
*/
__be32
nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- char *fname, int flen,
- char *path,
- struct svc_fh *resfhp)
+ char *fname, int flen,
+ char *path, struct nfsd_attrs *attrs,
+ struct svc_fh *resfhp)
{
struct dentry *dentry, *dnew;
__be32 err, cerr;
@@ -1467,33 +1461,35 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
host_err = fh_want_write(fhp);
- if (host_err)
- goto out_nfserr;
+ if (host_err) {
+ err = nfserrno(host_err);
+ goto out;
+ }
- fh_lock(fhp);
dentry = fhp->fh_dentry;
+ inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT);
dnew = lookup_one_len(fname, dentry, flen);
- host_err = PTR_ERR(dnew);
- if (IS_ERR(dnew))
- goto out_nfserr;
-
+ if (IS_ERR(dnew)) {
+ err = nfserrno(PTR_ERR(dnew));
+ inode_unlock(dentry->d_inode);
+ goto out_drop_write;
+ }
+ fh_fill_pre_attrs(fhp);
host_err = vfs_symlink(&init_user_ns, d_inode(dentry), dnew, path);
err = nfserrno(host_err);
- fh_unlock(fhp);
+ cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
+ if (!err)
+ nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
+ fh_fill_post_attrs(fhp);
+ inode_unlock(dentry->d_inode);
if (!err)
err = nfserrno(commit_metadata(fhp));
-
- fh_drop_write(fhp);
-
- cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
dput(dnew);
if (err==0) err = cerr;
+out_drop_write:
+ fh_drop_write(fhp);
out:
return err;
-
-out_nfserr:
- err = nfserrno(host_err);
- goto out;
}
/*
@@ -1531,22 +1527,25 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
goto out;
}
- fh_lock_nested(ffhp, I_MUTEX_PARENT);
ddir = ffhp->fh_dentry;
dirp = d_inode(ddir);
+ inode_lock_nested(dirp, I_MUTEX_PARENT);
dnew = lookup_one_len(name, ddir, len);
- host_err = PTR_ERR(dnew);
- if (IS_ERR(dnew))
- goto out_nfserr;
+ if (IS_ERR(dnew)) {
+ err = nfserrno(PTR_ERR(dnew));
+ goto out_unlock;
+ }
dold = tfhp->fh_dentry;
err = nfserr_noent;
if (d_really_is_negative(dold))
goto out_dput;
+ fh_fill_pre_attrs(ffhp);
host_err = vfs_link(dold, &init_user_ns, dirp, dnew, NULL);
- fh_unlock(ffhp);
+ fh_fill_post_attrs(ffhp);
+ inode_unlock(dirp);
if (!host_err) {
err = nfserrno(commit_metadata(ffhp));
if (!err)
@@ -1557,17 +1556,17 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
else
err = nfserrno(host_err);
}
-out_dput:
dput(dnew);
-out_unlock:
- fh_unlock(ffhp);
+out_drop_write:
fh_drop_write(tfhp);
out:
return err;
-out_nfserr:
- err = nfserrno(host_err);
- goto out_unlock;
+out_dput:
+ dput(dnew);
+out_unlock:
+ inode_unlock(dirp);
+ goto out_drop_write;
}
static void
@@ -1628,10 +1627,7 @@ retry:
goto out;
}
- /* cannot use fh_lock as we need deadlock protective ordering
- * so do it by hand */
trap = lock_rename(tdentry, fdentry);
- ffhp->fh_locked = tfhp->fh_locked = true;
fh_fill_pre_attrs(ffhp);
fh_fill_pre_attrs(tfhp);
@@ -1687,17 +1683,12 @@ retry:
dput(odentry);
out_nfserr:
err = nfserrno(host_err);
- /*
- * We cannot rely on fh_unlock on the two filehandles,
- * as that would do the wrong thing if the two directories
- * were the same, so again we do it by hand.
- */
+
if (!close_cached) {
fh_fill_post_attrs(ffhp);
fh_fill_post_attrs(tfhp);
}
unlock_rename(tdentry, fdentry);
- ffhp->fh_locked = tfhp->fh_locked = false;
fh_drop_write(ffhp);
/*
@@ -1741,19 +1732,19 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
if (host_err)
goto out_nfserr;
- fh_lock_nested(fhp, I_MUTEX_PARENT);
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
+ inode_lock_nested(dirp, I_MUTEX_PARENT);
rdentry = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(rdentry);
if (IS_ERR(rdentry))
- goto out_drop_write;
+ goto out_unlock;
if (d_really_is_negative(rdentry)) {
dput(rdentry);
host_err = -ENOENT;
- goto out_drop_write;
+ goto out_unlock;
}
rinode = d_inode(rdentry);
ihold(rinode);
@@ -1761,6 +1752,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
if (!type)
type = d_inode(rdentry)->i_mode & S_IFMT;
+ fh_fill_pre_attrs(fhp);
if (type != S_IFDIR) {
if (rdentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK)
nfsd_close_cached_files(rdentry);
@@ -1768,8 +1760,9 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
} else {
host_err = vfs_rmdir(&init_user_ns, dirp, rdentry);
}
+ fh_fill_post_attrs(fhp);
- fh_unlock(fhp);
+ inode_unlock(dirp);
if (!host_err)
host_err = commit_metadata(fhp);
dput(rdentry);
@@ -1791,6 +1784,9 @@ out_nfserr:
}
out:
return err;
+out_unlock:
+ inode_unlock(dirp);
+ goto out_drop_write;
}
/*
@@ -2144,13 +2140,16 @@ out:
return err;
}
-/*
- * Removexattr and setxattr need to call fh_lock to both lock the inode
- * and set the change attribute. Since the top-level vfs_removexattr
- * and vfs_setxattr calls already do their own inode_lock calls, call
- * the _locked variant. Pass in a NULL pointer for delegated_inode,
- * and let the client deal with NFS4ERR_DELAY (same as with e.g.
- * setattr and remove).
+/**
+ * nfsd_removexattr - Remove an extended attribute
+ * @rqstp: RPC transaction being executed
+ * @fhp: NFS filehandle of object with xattr to remove
+ * @name: name of xattr to remove (NUL-terminate)
+ *
+ * Pass in a NULL pointer for delegated_inode, and let the client deal
+ * with NFS4ERR_DELAY (same as with e.g. setattr and remove).
+ *
+ * Returns nfs_ok on success, or an nfsstat in network byte order.
*/
__be32
nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name)
@@ -2166,12 +2165,14 @@ nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name)
if (ret)
return nfserrno(ret);
- fh_lock(fhp);
+ inode_lock(fhp->fh_dentry->d_inode);
+ fh_fill_pre_attrs(fhp);
ret = __vfs_removexattr_locked(&init_user_ns, fhp->fh_dentry,
name, NULL);
- fh_unlock(fhp);
+ fh_fill_post_attrs(fhp);
+ inode_unlock(fhp->fh_dentry->d_inode);
fh_drop_write(fhp);
return nfsd_xattr_errno(ret);
@@ -2191,12 +2192,13 @@ nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
ret = fh_want_write(fhp);
if (ret)
return nfserrno(ret);
- fh_lock(fhp);
+ inode_lock(fhp->fh_dentry->d_inode);
+ fh_fill_pre_attrs(fhp);
ret = __vfs_setxattr_locked(&init_user_ns, fhp->fh_dentry, name, buf,
len, flags, NULL);
-
- fh_unlock(fhp);
+ fh_fill_post_attrs(fhp);
+ inode_unlock(fhp->fh_dentry->d_inode);
fh_drop_write(fhp);
return nfsd_xattr_errno(ret);
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 26347d76f44a..c95cd414b4bb 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -6,6 +6,8 @@
#ifndef LINUX_NFSD_VFS_H
#define LINUX_NFSD_VFS_H
+#include <linux/fs.h>
+#include <linux/posix_acl.h>
#include "nfsfh.h"
#include "nfsd.h"
@@ -42,6 +44,22 @@ struct nfsd_file;
typedef int (*nfsd_filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
/* nfsd/vfs.c */
+struct nfsd_attrs {
+ struct iattr *na_iattr; /* input */
+ struct xdr_netobj *na_seclabel; /* input */
+ struct posix_acl *na_pacl; /* input */
+ struct posix_acl *na_dpacl; /* input */
+
+ int na_labelerr; /* output */
+ int na_aclerr; /* output */
+};
+
+static inline void nfsd_attrs_free(struct nfsd_attrs *attrs)
+{
+ posix_acl_release(attrs->na_pacl);
+ posix_acl_release(attrs->na_dpacl);
+}
+
int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
struct svc_export **expp);
__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *,
@@ -50,11 +68,9 @@ __be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
const char *, unsigned int,
struct svc_export **, struct dentry **);
__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
- struct iattr *, int, time64_t);
+ struct nfsd_attrs *, int, time64_t);
int nfsd_mountpoint(struct dentry *, struct svc_export *);
#ifdef CONFIG_NFSD_V4
-__be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
- struct xdr_netobj *);
__be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
struct file *, loff_t, loff_t, int);
__be32 nfsd4_clone_file_range(struct svc_rqst *rqstp,
@@ -63,14 +79,14 @@ __be32 nfsd4_clone_file_range(struct svc_rqst *rqstp,
u64 count, bool sync);
#endif /* CONFIG_NFSD_V4 */
__be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *,
- char *name, int len, struct iattr *attrs,
+ char *name, int len, struct nfsd_attrs *attrs,
int type, dev_t rdev, struct svc_fh *res);
__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
- char *name, int len, struct iattr *attrs,
+ char *name, int len, struct nfsd_attrs *attrs,
int type, dev_t rdev, struct svc_fh *res);
__be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
__be32 nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct svc_fh *resfhp, struct iattr *iap);
+ struct svc_fh *resfhp, struct nfsd_attrs *iap);
__be32 nfsd_commit(struct svc_rqst *rqst, struct svc_fh *fhp,
u64 offset, u32 count, __be32 *verf);
#ifdef CONFIG_NFSD_V4
@@ -110,8 +126,9 @@ __be32 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
char *, int *);
__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
- char *name, int len, char *path,
- struct svc_fh *res);
+ char *name, int len, char *path,
+ struct nfsd_attrs *attrs,
+ struct svc_fh *res);
__be32 nfsd_link(struct svc_rqst *, struct svc_fh *,
char *, int, struct svc_fh *);
ssize_t nfsd_copy_file_range(struct file *, u64,
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 7b744011f2d3..96267258e629 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -279,6 +279,7 @@ struct nfsd4_open {
struct nfs4_clnt_odstate *op_odstate; /* used during processing */
struct nfs4_acl *op_acl;
struct xdr_netobj op_label;
+ struct svc_rqst *op_rqstp;
};
struct nfsd4_open_confirm {
@@ -302,9 +303,10 @@ struct nfsd4_read {
u32 rd_length; /* request */
int rd_vlen;
struct nfsd_file *rd_nf;
-
+
struct svc_rqst *rd_rqstp; /* response */
- struct svc_fh *rd_fhp; /* response */
+ struct svc_fh *rd_fhp; /* response */
+ u32 rd_eof; /* response */
};
struct nfsd4_readdir {
@@ -532,6 +534,13 @@ struct nfsd42_write_res {
stateid_t cb_stateid;
};
+struct nfsd4_cb_offload {
+ struct nfsd4_callback co_cb;
+ struct nfsd42_write_res co_res;
+ __be32 co_nfserr;
+ struct knfsd_fh co_fh;
+};
+
struct nfsd4_copy {
/* request */
stateid_t cp_src_stateid;
@@ -539,18 +548,16 @@ struct nfsd4_copy {
u64 cp_src_pos;
u64 cp_dst_pos;
u64 cp_count;
- struct nl4_server cp_src;
- bool cp_intra;
+ struct nl4_server *cp_src;
- /* both */
- u32 cp_synchronous;
+ unsigned long cp_flags;
+#define NFSD4_COPY_F_STOPPED (0)
+#define NFSD4_COPY_F_INTRA (1)
+#define NFSD4_COPY_F_SYNCHRONOUS (2)
+#define NFSD4_COPY_F_COMMITTED (3)
/* response */
struct nfsd42_write_res cp_res;
-
- /* for cb_offload */
- struct nfsd4_callback cp_cb;
- __be32 nfserr;
struct knfsd_fh fh;
struct nfs4_client *cp_clp;
@@ -563,14 +570,35 @@ struct nfsd4_copy {
struct list_head copies;
struct task_struct *copy_task;
refcount_t refcount;
- bool stopped;
struct vfsmount *ss_mnt;
struct nfs_fh c_fh;
nfs4_stateid stateid;
- bool committed;
};
+static inline void nfsd4_copy_set_sync(struct nfsd4_copy *copy, bool sync)
+{
+ if (sync)
+ set_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
+ else
+ clear_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
+}
+
+static inline bool nfsd4_copy_is_sync(const struct nfsd4_copy *copy)
+{
+ return test_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
+}
+
+static inline bool nfsd4_copy_is_async(const struct nfsd4_copy *copy)
+{
+ return !test_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
+}
+
+static inline bool nfsd4_ssc_is_inter(const struct nfsd4_copy *copy)
+{
+ return !test_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
+}
+
struct nfsd4_seek {
/* request */
stateid_t seek_stateid;
@@ -594,19 +622,20 @@ struct nfsd4_offload_status {
struct nfsd4_copy_notify {
/* request */
stateid_t cpn_src_stateid;
- struct nl4_server cpn_dst;
+ struct nl4_server *cpn_dst;
/* response */
stateid_t cpn_cnr_stateid;
u64 cpn_sec;
u32 cpn_nsec;
- struct nl4_server cpn_src;
+ struct nl4_server *cpn_src;
};
struct nfsd4_op {
u32 opnum;
- const struct nfsd4_operation * opdesc;
__be32 status;
+ const struct nfsd4_operation *opdesc;
+ struct nfs4_replay *replay;
union nfsd4_op_u {
struct nfsd4_access access;
struct nfsd4_close close;
@@ -670,7 +699,6 @@ struct nfsd4_op {
struct nfsd4_listxattrs listxattrs;
struct nfsd4_removexattr removexattr;
} u;
- struct nfs4_replay * replay;
};
bool nfsd4_cache_this_op(struct nfsd4_op *);
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index e8c00dda42ad..71f870d497ae 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -84,8 +84,8 @@ static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
/*
* attr_load_runs - Load all runs stored in @attr.
*/
-int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
- struct runs_tree *run, const CLST *vcn)
+static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
+ struct runs_tree *run, const CLST *vcn)
{
int err;
CLST svcn = le64_to_cpu(attr->nres.svcn);
@@ -140,7 +140,10 @@ failed:
}
if (lcn != SPARSE_LCN) {
- mark_as_free_ex(sbi, lcn, clen, trim);
+ if (sbi) {
+ /* mark bitmap range [lcn + clen) as free and trim clusters. */
+ mark_as_free_ex(sbi, lcn, clen, trim);
+ }
dn += clen;
}
@@ -173,7 +176,6 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
{
int err;
CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
- struct wnd_bitmap *wnd = &sbi->used.bitmap;
size_t cnt = run->count;
for (;;) {
@@ -196,9 +198,7 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
/* Add new fragment into run storage. */
if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
/* Undo last 'ntfs_look_for_free_space' */
- down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
- wnd_set_free(wnd, lcn, flen);
- up_write(&wnd->rw_lock);
+ mark_as_free_ex(sbi, lcn, len, false);
err = -ENOMEM;
goto out;
}
@@ -320,7 +320,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
attr_s->name_len, run, 0, alen,
- attr_s->flags, &attr, NULL);
+ attr_s->flags, &attr, NULL, NULL);
if (err)
goto out3;
@@ -419,40 +419,44 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
struct mft_inode *mi, *mi_b;
CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
CLST next_svcn, pre_alloc = -1, done = 0;
- bool is_ext;
+ bool is_ext, is_bad = false;
u32 align;
struct MFT_REC *rec;
again:
+ alen = 0;
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
&mi_b);
if (!attr_b) {
err = -ENOENT;
- goto out;
+ goto bad_inode;
}
if (!attr_b->non_res) {
err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
&attr_b);
- if (err || !attr_b->non_res)
- goto out;
+ if (err)
+ return err;
+
+ /* Return if file is still resident. */
+ if (!attr_b->non_res)
+ goto ok1;
/* Layout of records may be changed, so do a full search. */
goto again;
}
is_ext = is_attr_ext(attr_b);
-
-again_1:
align = sbi->cluster_size;
-
if (is_ext)
align <<= attr_b->nres.c_unit;
old_valid = le64_to_cpu(attr_b->nres.valid_size);
old_size = le64_to_cpu(attr_b->nres.data_size);
old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
+
+again_1:
old_alen = old_alloc >> cluster_bits;
new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
@@ -475,24 +479,27 @@ again_1:
mi = mi_b;
} else if (!le_b) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
} else {
le = le_b;
attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
&mi);
if (!attr) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
next_le_1:
svcn = le64_to_cpu(attr->nres.svcn);
evcn = le64_to_cpu(attr->nres.evcn);
}
-
+ /*
+ * Here we have:
+ * attr,mi,le - last attribute segment (containing 'vcn').
+ * attr_b,mi_b,le_b - base (primary) attribute segment.
+ */
next_le:
rec = mi->mrec;
-
err = attr_load_runs(attr, ni, run, NULL);
if (err)
goto out;
@@ -507,6 +514,13 @@ next_le:
goto ok;
}
+ /*
+ * Add clusters. In simple case we have to:
+ * - allocate space (vcn, lcn, len)
+ * - update packed run in 'mi'
+ * - update attr->nres.evcn
+ * - update attr_b->nres.data_size/attr_b->nres.alloc_size
+ */
to_allocate = new_alen - old_alen;
add_alloc_in_same_attr_seg:
lcn = 0;
@@ -520,9 +534,11 @@ add_alloc_in_same_attr_seg:
pre_alloc = 0;
if (type == ATTR_DATA && !name_len &&
sbi->options->prealloc) {
- CLST new_alen2 = bytes_to_cluster(
- sbi, get_pre_allocated(new_size));
- pre_alloc = new_alen2 - new_alen;
+ pre_alloc =
+ bytes_to_cluster(
+ sbi,
+ get_pre_allocated(new_size)) -
+ new_alen;
}
/* Get the last LCN to allocate from. */
@@ -580,7 +596,7 @@ add_alloc_in_same_attr_seg:
pack_runs:
err = mi_pack_runs(mi, attr, run, vcn - svcn);
if (err)
- goto out;
+ goto undo_1;
next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
new_alloc_tmp = (u64)next_svcn << cluster_bits;
@@ -614,7 +630,7 @@ pack_runs:
if (type == ATTR_LIST) {
err = ni_expand_list(ni);
if (err)
- goto out;
+ goto undo_2;
if (next_svcn < vcn)
goto pack_runs;
@@ -624,8 +640,9 @@ pack_runs:
if (!ni->attr_list.size) {
err = ni_create_attr_list(ni);
+ /* In case of error layout of records is not changed. */
if (err)
- goto out;
+ goto undo_2;
/* Layout of records is changed. */
}
@@ -637,48 +654,57 @@ pack_runs:
/* Insert new attribute segment. */
err = ni_insert_nonresident(ni, type, name, name_len, run,
next_svcn, vcn - next_svcn,
- attr_b->flags, &attr, &mi);
- if (err)
- goto out;
-
- if (!is_mft)
- run_truncate_head(run, evcn + 1);
-
- svcn = le64_to_cpu(attr->nres.svcn);
- evcn = le64_to_cpu(attr->nres.evcn);
+ attr_b->flags, &attr, &mi, NULL);
- le_b = NULL;
/*
* Layout of records maybe changed.
* Find base attribute to update.
*/
+ le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
NULL, &mi_b);
if (!attr_b) {
- err = -ENOENT;
- goto out;
+ err = -EINVAL;
+ goto bad_inode;
}
- attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
- attr_b->nres.data_size = attr_b->nres.alloc_size;
- attr_b->nres.valid_size = attr_b->nres.alloc_size;
+ if (err) {
+ /* ni_insert_nonresident failed. */
+ attr = NULL;
+ goto undo_2;
+ }
+
+ if (!is_mft)
+ run_truncate_head(run, evcn + 1);
+
+ svcn = le64_to_cpu(attr->nres.svcn);
+ evcn = le64_to_cpu(attr->nres.evcn);
+
+ /*
+ * Attribute is in consistency state.
+ * Save this point to restore to if next steps fail.
+ */
+ old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
+ attr_b->nres.valid_size = attr_b->nres.data_size =
+ attr_b->nres.alloc_size = cpu_to_le64(old_size);
mi_b->dirty = true;
goto again_1;
}
if (new_size != old_size ||
(new_alloc != old_alloc && !keep_prealloc)) {
+ /*
+ * Truncate clusters. In simple case we have to:
+ * - update packed run in 'mi'
+ * - update attr->nres.evcn
+ * - update attr_b->nres.data_size/attr_b->nres.alloc_size
+ * - mark and trim clusters as free (vcn, lcn, len)
+ */
+ CLST dlen = 0;
+
vcn = max(svcn, new_alen);
new_alloc_tmp = (u64)vcn << cluster_bits;
- alen = 0;
- err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
- true);
- if (err)
- goto out;
-
- run_truncate(run, vcn);
-
if (vcn > svcn) {
err = mi_pack_runs(mi, attr, run, vcn - svcn);
if (err)
@@ -697,7 +723,7 @@ pack_runs:
if (!al_remove_le(ni, le)) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
@@ -723,12 +749,20 @@ pack_runs:
attr_b->nres.valid_size =
attr_b->nres.alloc_size;
}
+ mi_b->dirty = true;
- if (is_ext)
+ err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
+ true);
+ if (err)
+ goto out;
+
+ if (is_ext) {
+ /* dlen - really deallocated clusters. */
le64_sub_cpu(&attr_b->nres.total_size,
- ((u64)alen << cluster_bits));
+ ((u64)dlen << cluster_bits));
+ }
- mi_b->dirty = true;
+ run_truncate(run, vcn);
if (new_alloc_tmp <= new_alloc)
goto ok;
@@ -747,7 +781,7 @@ pack_runs:
if (le->type != type || le->name_len != name_len ||
memcmp(le_name(le), name, name_len * sizeof(short))) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
err = ni_load_mi(ni, le, &mi);
@@ -757,7 +791,7 @@ pack_runs:
attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
if (!attr) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
goto next_le_1;
}
@@ -772,13 +806,13 @@ ok:
}
}
-out:
- if (!err && attr_b && ret)
+ok1:
+ if (ret)
*ret = attr_b;
/* Update inode_set_bytes. */
- if (!err && ((type == ATTR_DATA && !name_len) ||
- (type == ATTR_ALLOC && name == I30_NAME))) {
+ if (((type == ATTR_DATA && !name_len) ||
+ (type == ATTR_ALLOC && name == I30_NAME))) {
bool dirty = false;
if (ni->vfs_inode.i_size != new_size) {
@@ -786,7 +820,7 @@ out:
dirty = true;
}
- if (attr_b && attr_b->non_res) {
+ if (attr_b->non_res) {
new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
inode_set_bytes(&ni->vfs_inode, new_alloc);
@@ -800,6 +834,47 @@ out:
}
}
+ return 0;
+
+undo_2:
+ vcn -= alen;
+ attr_b->nres.data_size = cpu_to_le64(old_size);
+ attr_b->nres.valid_size = cpu_to_le64(old_valid);
+ attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
+
+ /* Restore 'attr' and 'mi'. */
+ if (attr)
+ goto restore_run;
+
+ if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
+ svcn <= le64_to_cpu(attr_b->nres.evcn)) {
+ attr = attr_b;
+ le = le_b;
+ mi = mi_b;
+ } else if (!le_b) {
+ err = -EINVAL;
+ goto bad_inode;
+ } else {
+ le = le_b;
+ attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
+ &svcn, &mi);
+ if (!attr)
+ goto bad_inode;
+ }
+
+restore_run:
+ if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
+ is_bad = true;
+
+undo_1:
+ run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
+
+ run_truncate(run, vcn);
+out:
+ if (is_bad) {
+bad_inode:
+ _ntfs_bad_inode(&ni->vfs_inode);
+ }
return err;
}
@@ -855,7 +930,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
goto out;
}
- asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
+ asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
if (vcn >= asize) {
err = -EINVAL;
goto out;
@@ -1047,7 +1122,7 @@ ins_ext:
if (evcn1 > next_svcn) {
err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
next_svcn, evcn1 - next_svcn,
- attr_b->flags, &attr, &mi);
+ attr_b->flags, &attr, &mi, NULL);
if (err)
goto out;
}
@@ -1173,7 +1248,7 @@ int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
{
struct ntfs_sb_info *sbi = ni->mi.sbi;
u8 cluster_bits = sbi->cluster_bits;
- CLST vcn = from >> cluster_bits;
+ CLST vcn;
CLST vcn_last = (to - 1) >> cluster_bits;
CLST lcn, clen;
int err;
@@ -1647,7 +1722,7 @@ ins_ext:
if (evcn1 > next_svcn) {
err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
next_svcn, evcn1 - next_svcn,
- attr_b->flags, &attr, &mi);
+ attr_b->flags, &attr, &mi, NULL);
if (err)
goto out;
}
@@ -1812,18 +1887,12 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
err = ni_insert_nonresident(
ni, ATTR_DATA, NULL, 0, run, next_svcn,
evcn1 - eat - next_svcn, a_flags, &attr,
- &mi);
+ &mi, &le);
if (err)
goto out;
/* Layout of records maybe changed. */
attr_b = NULL;
- le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
- &next_svcn);
- if (!le) {
- err = -EINVAL;
- goto out;
- }
}
/* Free all allocated memory. */
@@ -1918,7 +1987,7 @@ next_attr:
out:
up_write(&ni->file.run_lock);
if (err)
- make_bad_inode(&ni->vfs_inode);
+ _ntfs_bad_inode(&ni->vfs_inode);
return err;
}
@@ -1936,9 +2005,11 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
struct ATTRIB *attr = NULL, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b;
struct mft_inode *mi, *mi_b;
- CLST svcn, evcn1, vcn, len, end, alen, dealloc;
+ CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
u64 total_size, alloc_size;
u32 mask;
+ __le16 a_flags;
+ struct runs_tree run2;
if (!bytes)
return 0;
@@ -1990,6 +2061,9 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
}
down_write(&ni->file.run_lock);
+ run_init(&run2);
+ run_truncate(run, 0);
+
/*
* Enumerate all attribute segments and punch hole where necessary.
*/
@@ -1997,10 +2071,11 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
vcn = vbo >> sbi->cluster_bits;
len = bytes >> sbi->cluster_bits;
end = vcn + len;
- dealloc = 0;
+ hole = 0;
svcn = le64_to_cpu(attr_b->nres.svcn);
evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
+ a_flags = attr_b->flags;
if (svcn <= vcn && vcn < evcn1) {
attr = attr_b;
@@ -2008,14 +2083,14 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
mi = mi_b;
} else if (!le_b) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
} else {
le = le_b;
attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
&mi);
if (!attr) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
svcn = le64_to_cpu(attr->nres.svcn);
@@ -2023,49 +2098,91 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
}
while (svcn < end) {
- CLST vcn1, zero, dealloc2;
+ CLST vcn1, zero, hole2 = hole;
err = attr_load_runs(attr, ni, run, &svcn);
if (err)
- goto out;
+ goto done;
vcn1 = max(vcn, svcn);
zero = min(end, evcn1) - vcn1;
- dealloc2 = dealloc;
- err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
+ /*
+ * Check range [vcn1 + zero).
+ * Calculate how many clusters there are.
+ * Don't do any destructive actions.
+ */
+ err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
if (err)
- goto out;
+ goto done;
- if (dealloc2 == dealloc) {
- /* Looks like the required range is already sparsed. */
- } else {
- if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
- false)) {
- err = -ENOMEM;
- goto out;
- }
+ /* Check if required range is already hole. */
+ if (hole2 == hole)
+ goto next_attr;
+
+ /* Make a clone of run to undo. */
+ err = run_clone(run, &run2);
+ if (err)
+ goto done;
+
+ /* Make a hole range (sparse) [vcn1 + zero). */
+ if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
+ err = -ENOMEM;
+ goto done;
+ }
- err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
+ /* Update run in attribute segment. */
+ err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
+ if (err)
+ goto done;
+ next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
+ if (next_svcn < evcn1) {
+ /* Insert new attribute segment. */
+ err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
+ next_svcn,
+ evcn1 - next_svcn, a_flags,
+ &attr, &mi, &le);
if (err)
- goto out;
+ goto undo_punch;
+
+ /* Layout of records maybe changed. */
+ attr_b = NULL;
}
+
+ /* Real deallocate. Should not fail. */
+ run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
+
+next_attr:
/* Free all allocated memory. */
run_truncate(run, 0);
if (evcn1 >= alen)
break;
+ /* Get next attribute segment. */
attr = ni_enum_attr_ex(ni, attr, &le, &mi);
if (!attr) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
svcn = le64_to_cpu(attr->nres.svcn);
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
}
- total_size -= (u64)dealloc << sbi->cluster_bits;
+done:
+ if (!hole)
+ goto out;
+
+ if (!attr_b) {
+ attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
+ &mi_b);
+ if (!attr_b) {
+ err = -EINVAL;
+ goto bad_inode;
+ }
+ }
+
+ total_size -= (u64)hole << sbi->cluster_bits;
attr_b->nres.total_size = cpu_to_le64(total_size);
mi_b->dirty = true;
@@ -2075,9 +2192,263 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
mark_inode_dirty(&ni->vfs_inode);
out:
+ run_close(&run2);
up_write(&ni->file.run_lock);
+ return err;
+
+bad_inode:
+ _ntfs_bad_inode(&ni->vfs_inode);
+ goto out;
+
+undo_punch:
+ /*
+ * Restore packed runs.
+ * 'mi_pack_runs' should not fail, cause we restore original.
+ */
+ if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
+ goto bad_inode;
+
+ goto done;
+}
+
+/*
+ * attr_insert_range - Insert range (hole) in file.
+ * Not for normal files.
+ */
+int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
+{
+ int err = 0;
+ struct runs_tree *run = &ni->file.run;
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
+ struct ATTRIB *attr = NULL, *attr_b;
+ struct ATTR_LIST_ENTRY *le, *le_b;
+ struct mft_inode *mi, *mi_b;
+ CLST vcn, svcn, evcn1, len, next_svcn;
+ u64 data_size, alloc_size;
+ u32 mask;
+ __le16 a_flags;
+
+ if (!bytes)
+ return 0;
+
+ le_b = NULL;
+ attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
+ if (!attr_b)
+ return -ENOENT;
+
+ if (!is_attr_ext(attr_b)) {
+ /* It was checked above. See fallocate. */
+ return -EOPNOTSUPP;
+ }
+
+ if (!attr_b->non_res) {
+ data_size = le32_to_cpu(attr_b->res.data_size);
+ alloc_size = data_size;
+ mask = sbi->cluster_mask; /* cluster_size - 1 */
+ } else {
+ data_size = le64_to_cpu(attr_b->nres.data_size);
+ alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
+ mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
+ }
+
+ if (vbo > data_size) {
+ /* Insert range after the file size is not allowed. */
+ return -EINVAL;
+ }
+
+ if ((vbo & mask) || (bytes & mask)) {
+ /* Allow to insert only frame aligned ranges. */
+ return -EINVAL;
+ }
+
+ /*
+ * valid_size <= data_size <= alloc_size
+ * Check alloc_size for maximum possible.
+ */
+ if (bytes > sbi->maxbytes_sparse - alloc_size)
+ return -EFBIG;
+
+ vcn = vbo >> sbi->cluster_bits;
+ len = bytes >> sbi->cluster_bits;
+
+ down_write(&ni->file.run_lock);
+
+ if (!attr_b->non_res) {
+ err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
+ data_size + bytes, NULL, false, NULL);
+
+ le_b = NULL;
+ attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
+ &mi_b);
+ if (!attr_b) {
+ err = -EINVAL;
+ goto bad_inode;
+ }
+
+ if (err)
+ goto out;
+
+ if (!attr_b->non_res) {
+ /* Still resident. */
+ char *data = Add2Ptr(attr_b, attr_b->res.data_off);
+
+ memmove(data + bytes, data, bytes);
+ memset(data, 0, bytes);
+ goto done;
+ }
+
+ /* Resident files becomes nonresident. */
+ data_size = le64_to_cpu(attr_b->nres.data_size);
+ alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
+ }
+
+ /*
+ * Enumerate all attribute segments and shift start vcn.
+ */
+ a_flags = attr_b->flags;
+ svcn = le64_to_cpu(attr_b->nres.svcn);
+ evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
+
+ if (svcn <= vcn && vcn < evcn1) {
+ attr = attr_b;
+ le = le_b;
+ mi = mi_b;
+ } else if (!le_b) {
+ err = -EINVAL;
+ goto bad_inode;
+ } else {
+ le = le_b;
+ attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
+ &mi);
+ if (!attr) {
+ err = -EINVAL;
+ goto bad_inode;
+ }
+
+ svcn = le64_to_cpu(attr->nres.svcn);
+ evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+ }
+
+ run_truncate(run, 0); /* clear cached values. */
+ err = attr_load_runs(attr, ni, run, NULL);
+ if (err)
+ goto out;
+
+ if (!run_insert_range(run, vcn, len)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Try to pack in current record as much as possible. */
+ err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
if (err)
- make_bad_inode(&ni->vfs_inode);
+ goto out;
+
+ next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
+
+ while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
+ attr->type == ATTR_DATA && !attr->name_len) {
+ le64_add_cpu(&attr->nres.svcn, len);
+ le64_add_cpu(&attr->nres.evcn, len);
+ if (le) {
+ le->vcn = attr->nres.svcn;
+ ni->attr_list.dirty = true;
+ }
+ mi->dirty = true;
+ }
+
+ if (next_svcn < evcn1 + len) {
+ err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
+ next_svcn, evcn1 + len - next_svcn,
+ a_flags, NULL, NULL, NULL);
+
+ le_b = NULL;
+ attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
+ &mi_b);
+ if (!attr_b) {
+ err = -EINVAL;
+ goto bad_inode;
+ }
+
+ if (err) {
+ /* ni_insert_nonresident failed. Try to undo. */
+ goto undo_insert_range;
+ }
+ }
+
+ /*
+ * Update primary attribute segment.
+ */
+ if (vbo <= ni->i_valid)
+ ni->i_valid += bytes;
+
+ attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
+ attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);
+
+ /* ni->valid may be not equal valid_size (temporary). */
+ if (ni->i_valid > data_size + bytes)
+ attr_b->nres.valid_size = attr_b->nres.data_size;
+ else
+ attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
+ mi_b->dirty = true;
+
+done:
+ ni->vfs_inode.i_size += bytes;
+ ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+ mark_inode_dirty(&ni->vfs_inode);
+
+out:
+ run_truncate(run, 0); /* clear cached values. */
+
+ up_write(&ni->file.run_lock);
return err;
+
+bad_inode:
+ _ntfs_bad_inode(&ni->vfs_inode);
+ goto out;
+
+undo_insert_range:
+ svcn = le64_to_cpu(attr_b->nres.svcn);
+ evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
+
+ if (svcn <= vcn && vcn < evcn1) {
+ attr = attr_b;
+ le = le_b;
+ mi = mi_b;
+ } else if (!le_b) {
+ goto bad_inode;
+ } else {
+ le = le_b;
+ attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
+ &mi);
+ if (!attr) {
+ goto bad_inode;
+ }
+
+ svcn = le64_to_cpu(attr->nres.svcn);
+ evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+ }
+
+ if (attr_load_runs(attr, ni, run, NULL))
+ goto bad_inode;
+
+ if (!run_collapse_range(run, vcn, len))
+ goto bad_inode;
+
+ if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
+ goto bad_inode;
+
+ while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
+ attr->type == ATTR_DATA && !attr->name_len) {
+ le64_sub_cpu(&attr->nres.svcn, len);
+ le64_sub_cpu(&attr->nres.evcn, len);
+ if (le) {
+ le->vcn = attr->nres.svcn;
+ ni->attr_list.dirty = true;
+ }
+ mi->dirty = true;
+ }
+
+ goto out;
}
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index aa184407520f..5d44ceac855b 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -51,11 +51,6 @@ void ntfs3_exit_bitmap(void)
kmem_cache_destroy(ntfs_enode_cachep);
}
-static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
-{
- return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
-}
-
/*
* wnd_scan
*
@@ -1333,9 +1328,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
if (!new_free)
return -ENOMEM;
- if (new_free != wnd->free_bits)
- memcpy(new_free, wnd->free_bits,
- wnd->nwnd * sizeof(short));
+ memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
memset(new_free + wnd->nwnd, 0,
(new_wnd - wnd->nwnd) * sizeof(short));
kfree(wnd->free_bits);
@@ -1395,9 +1388,8 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
{
- size_t zlen;
+ size_t zlen = wnd->zone_end - wnd->zone_bit;
- zlen = wnd->zone_end - wnd->zone_bit;
if (zlen)
wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 4a21745711fe..4f2ffc7ef296 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -530,21 +530,35 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
{
struct inode *inode = file->f_mapping->host;
+ struct address_space *mapping = inode->i_mapping;
struct super_block *sb = inode->i_sb;
struct ntfs_sb_info *sbi = sb->s_fs_info;
struct ntfs_inode *ni = ntfs_i(inode);
loff_t end = vbo + len;
loff_t vbo_down = round_down(vbo, PAGE_SIZE);
- loff_t i_size;
+ bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
+ loff_t i_size, new_size;
+ bool map_locked;
int err;
/* No support for dir. */
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
- /* Return error if mode is not supported. */
- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
- FALLOC_FL_COLLAPSE_RANGE)) {
+ /*
+ * vfs_fallocate checks all possible combinations of mode.
+ * Do additional checks here before ntfs_set_state(dirty).
+ */
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ if (!is_supported_holes)
+ return -EOPNOTSUPP;
+ } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
+ } else if (mode & FALLOC_FL_INSERT_RANGE) {
+ if (!is_supported_holes)
+ return -EOPNOTSUPP;
+ } else if (mode &
+ ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+ FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
mode);
return -EOPNOTSUPP;
@@ -554,6 +568,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
inode_lock(inode);
i_size = inode->i_size;
+ new_size = max(end, i_size);
+ map_locked = false;
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
/* Should never be here, see ntfs_file_open. */
@@ -561,38 +577,27 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
goto out;
}
+ if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
+ FALLOC_FL_INSERT_RANGE)) {
+ inode_dio_wait(inode);
+ filemap_invalidate_lock(mapping);
+ map_locked = true;
+ }
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
u32 frame_size;
loff_t mask, vbo_a, end_a, tmp;
- if (!(mode & FALLOC_FL_KEEP_SIZE)) {
- err = -EINVAL;
- goto out;
- }
-
- err = filemap_write_and_wait_range(inode->i_mapping, vbo,
- end - 1);
+ err = filemap_write_and_wait_range(mapping, vbo, end - 1);
if (err)
goto out;
- err = filemap_write_and_wait_range(inode->i_mapping, end,
- LLONG_MAX);
+ err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
if (err)
goto out;
- inode_dio_wait(inode);
-
truncate_pagecache(inode, vbo_down);
- if (!is_sparsed(ni) && !is_compressed(ni)) {
- /*
- * Normal file, can't make hole.
- * TODO: Try to find way to save info about hole.
- */
- err = -EOPNOTSUPP;
- goto out;
- }
-
ni_lock(ni);
err = attr_punch_hole(ni, vbo, len, &frame_size);
ni_unlock(ni);
@@ -624,17 +629,11 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
ni_unlock(ni);
}
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
- if (mode & ~FALLOC_FL_COLLAPSE_RANGE) {
- err = -EINVAL;
- goto out;
- }
-
/*
* Write tail of the last page before removed range since
* it will get removed from the page cache below.
*/
- err = filemap_write_and_wait_range(inode->i_mapping, vbo_down,
- vbo);
+ err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
if (err)
goto out;
@@ -642,34 +641,58 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
* Write data that will be shifted to preserve them
* when discarding page cache below.
*/
- err = filemap_write_and_wait_range(inode->i_mapping, end,
- LLONG_MAX);
+ err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
if (err)
goto out;
- /* Wait for existing dio to complete. */
- inode_dio_wait(inode);
-
truncate_pagecache(inode, vbo_down);
ni_lock(ni);
err = attr_collapse_range(ni, vbo, len);
ni_unlock(ni);
+ } else if (mode & FALLOC_FL_INSERT_RANGE) {
+ /* Check new size. */
+ err = inode_newsize_ok(inode, new_size);
+ if (err)
+ goto out;
+
+ /* Write out all dirty pages. */
+ err = filemap_write_and_wait_range(mapping, vbo_down,
+ LLONG_MAX);
+ if (err)
+ goto out;
+ truncate_pagecache(inode, vbo_down);
+
+ ni_lock(ni);
+ err = attr_insert_range(ni, vbo, len);
+ ni_unlock(ni);
} else {
- /*
- * Normal file: Allocate clusters, do not change 'valid' size.
- */
- loff_t new_size = max(end, i_size);
+ /* Check new size. */
+
+ /* generic/213: expected -ENOSPC instead of -EFBIG. */
+ if (!is_supported_holes) {
+ loff_t to_alloc = new_size - inode_get_bytes(inode);
+
+ if (to_alloc > 0 &&
+ (to_alloc >> sbi->cluster_bits) >
+ wnd_zeroes(&sbi->used.bitmap)) {
+ err = -ENOSPC;
+ goto out;
+ }
+ }
err = inode_newsize_ok(inode, new_size);
if (err)
goto out;
+ /*
+ * Allocate clusters, do not change 'valid' size.
+ */
err = ntfs_set_size(inode, new_size);
if (err)
goto out;
- if (is_sparsed(ni) || is_compressed(ni)) {
+ if (is_supported_holes) {
CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
CLST vcn = vbo >> sbi->cluster_bits;
CLST cend = bytes_to_cluster(sbi, end);
@@ -717,8 +740,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
}
out:
- if (err == -EFBIG)
- err = -ENOSPC;
+ if (map_locked)
+ filemap_invalidate_unlock(mapping);
if (!err) {
inode->i_ctime = inode->i_mtime = current_time(inode);
@@ -989,7 +1012,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
if (bytes > count)
bytes = count;
- frame = pos >> frame_bits;
frame_vbo = pos & ~(frame_size - 1);
index = frame_vbo >> PAGE_SHIFT;
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 18842998c8fa..381a38a06ec2 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -7,6 +7,7 @@
#include <linux/fiemap.h>
#include <linux/fs.h>
+#include <linux/minmax.h>
#include <linux/vmalloc.h>
#include "debug.h"
@@ -468,7 +469,7 @@ ni_ins_new_attr(struct ntfs_inode *ni, struct mft_inode *mi,
&ref, &le);
if (err) {
/* No memory or no space. */
- return NULL;
+ return ERR_PTR(err);
}
le_added = true;
@@ -649,6 +650,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
struct mft_inode *mi;
u32 asize, free;
struct MFT_REF ref;
+ struct MFT_REC *mrec;
__le16 id;
if (!ni->attr_list.dirty)
@@ -692,11 +694,17 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
free -= asize;
}
+ /* Make a copy of primary record to restore if error. */
+ mrec = kmemdup(ni->mi.mrec, sbi->record_size, GFP_NOFS);
+ if (!mrec)
+ return 0; /* Not critical. */
+
/* It seems that attribute list can be removed from primary record. */
mi_remove_attr(NULL, &ni->mi, attr_list);
/*
- * Repeat the cycle above and move all attributes to primary record.
+ * Repeat the cycle above and copy all attributes to primary record.
+ * Do not remove original attributes from subrecords!
* It should be success!
*/
le = NULL;
@@ -707,14 +715,14 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
mi = ni_find_mi(ni, ino_get(&le->ref));
if (!mi) {
/* Should never happened, 'cause already checked. */
- goto bad;
+ goto out;
}
attr = mi_find_attr(mi, NULL, le->type, le_name(le),
le->name_len, &le->id);
if (!attr) {
/* Should never happened, 'cause already checked. */
- goto bad;
+ goto out;
}
asize = le32_to_cpu(attr->size);
@@ -724,18 +732,33 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
le16_to_cpu(attr->name_off));
if (!attr_ins) {
/*
- * Internal error.
- * Either no space in primary record (already checked).
- * Either tried to insert another
- * non indexed attribute (logic error).
+ * No space in primary record (already checked).
*/
- goto bad;
+ goto out;
}
/* Copy all except id. */
id = attr_ins->id;
memcpy(attr_ins, attr, asize);
attr_ins->id = id;
+ }
+
+ /*
+ * Repeat the cycle above and remove all attributes from subrecords.
+ */
+ le = NULL;
+ while ((le = al_enumerate(ni, le))) {
+ if (!memcmp(&le->ref, &ref, sizeof(ref)))
+ continue;
+
+ mi = ni_find_mi(ni, ino_get(&le->ref));
+ if (!mi)
+ continue;
+
+ attr = mi_find_attr(mi, NULL, le->type, le_name(le),
+ le->name_len, &le->id);
+ if (!attr)
+ continue;
/* Remove from original record. */
mi_remove_attr(NULL, mi, attr);
@@ -748,11 +771,13 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
ni->attr_list.le = NULL;
ni->attr_list.dirty = false;
+ kfree(mrec);
+ return 0;
+out:
+ /* Restore primary record. */
+ swap(mrec, ni->mi.mrec);
+ kfree(mrec);
return 0;
-bad:
- ntfs_inode_err(&ni->vfs_inode, "Internal error");
- make_bad_inode(&ni->vfs_inode);
- return -EINVAL;
}
/*
@@ -986,6 +1011,8 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
name_off, svcn, ins_le);
if (!attr)
continue;
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
if (ins_attr)
*ins_attr = attr;
@@ -1007,8 +1034,15 @@ insert_ext:
attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
name_off, svcn, ins_le);
- if (!attr)
+ if (!attr) {
+ err = -EINVAL;
goto out2;
+ }
+
+ if (IS_ERR(attr)) {
+ err = PTR_ERR(attr);
+ goto out2;
+ }
if (ins_attr)
*ins_attr = attr;
@@ -1020,10 +1054,9 @@ insert_ext:
out2:
ni_remove_mi(ni, mi);
mi_put(mi);
- err = -EINVAL;
out1:
- ntfs_mark_rec_free(sbi, rno);
+ ntfs_mark_rec_free(sbi, rno, is_mft);
out:
return err;
@@ -1076,6 +1109,11 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
if (asize <= free) {
attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len,
asize, name_off, svcn, ins_le);
+ if (IS_ERR(attr)) {
+ err = PTR_ERR(attr);
+ goto out;
+ }
+
if (attr) {
if (ins_attr)
*ins_attr = attr;
@@ -1173,6 +1211,11 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
goto out;
}
+ if (IS_ERR(attr)) {
+ err = PTR_ERR(attr);
+ goto out;
+ }
+
if (ins_attr)
*ins_attr = attr;
if (ins_mi)
@@ -1218,7 +1261,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
mft_min = mft_new;
mi_min = mi_new;
} else {
- ntfs_mark_rec_free(sbi, mft_new);
+ ntfs_mark_rec_free(sbi, mft_new, true);
mft_new = 0;
ni_remove_mi(ni, mi_new);
}
@@ -1262,7 +1305,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
done = asize - run_size - SIZEOF_NONRESIDENT;
le32_sub_cpu(&ni->mi.mrec->used, done);
- /* Estimate the size of second part: run_buf=NULL. */
+ /* Estimate packed size (run_buf=NULL). */
err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,
&plen);
if (err < 0)
@@ -1288,10 +1331,16 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
goto out;
}
+ if (IS_ERR(attr)) {
+ err = PTR_ERR(attr);
+ goto out;
+ }
+
attr->non_res = 1;
attr->name_off = SIZEOF_NONRESIDENT_LE;
attr->flags = 0;
+ /* This function can't fail - cause already checked above. */
run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
run_size, &plen);
@@ -1301,7 +1350,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
out:
if (mft_new) {
- ntfs_mark_rec_free(sbi, mft_new);
+ ntfs_mark_rec_free(sbi, mft_new, true);
ni_remove_mi(ni, mi_new);
}
@@ -1367,8 +1416,6 @@ int ni_expand_list(struct ntfs_inode *ni)
/* Split MFT data as much as possible. */
err = ni_expand_mft_list(ni);
- if (err)
- goto out;
out:
return !err && !done ? -EOPNOTSUPP : err;
@@ -1381,7 +1428,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
const __le16 *name, u8 name_len,
const struct runs_tree *run, CLST svcn, CLST len,
__le16 flags, struct ATTRIB **new_attr,
- struct mft_inode **mi)
+ struct mft_inode **mi, struct ATTR_LIST_ENTRY **le)
{
int err;
CLST plen;
@@ -1394,6 +1441,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
u32 run_size, asize;
struct ntfs_sb_info *sbi = ni->mi.sbi;
+ /* Estimate packed size (run_buf=NULL). */
err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off,
&plen);
if (err < 0)
@@ -1414,7 +1462,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
}
err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn,
- &attr, mi, NULL);
+ &attr, mi, le);
if (err)
goto out;
@@ -1423,12 +1471,12 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
attr->name_off = cpu_to_le16(name_off);
attr->flags = flags;
+ /* This function can't fail - cause already checked above. */
run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen);
attr->nres.svcn = cpu_to_le64(svcn);
attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1);
- err = 0;
if (new_attr)
*new_attr = attr;
@@ -1560,7 +1608,7 @@ int ni_delete_all(struct ntfs_inode *ni)
mi->dirty = true;
mi_write(mi, 0);
- ntfs_mark_rec_free(sbi, mi->rno);
+ ntfs_mark_rec_free(sbi, mi->rno, false);
ni_remove_mi(ni, mi);
mi_put(mi);
node = next;
@@ -1571,7 +1619,7 @@ int ni_delete_all(struct ntfs_inode *ni)
ni->mi.dirty = true;
err = mi_write(&ni->mi, 0);
- ntfs_mark_rec_free(sbi, ni->mi.rno);
+ ntfs_mark_rec_free(sbi, ni->mi.rno, false);
return err;
}
@@ -1589,7 +1637,8 @@ struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
struct ATTRIB *attr = NULL;
struct ATTR_FILE_NAME *fname;
- *le = NULL;
+ if (le)
+ *le = NULL;
/* Enumerate all names. */
next:
@@ -1605,7 +1654,7 @@ next:
goto next;
if (!uni)
- goto next;
+ return fname;
if (uni->len != fname->name_len)
goto next;
@@ -2302,10 +2351,8 @@ remove_wof:
out:
kfree(pages);
- if (err) {
- make_bad_inode(inode);
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
- }
+ if (err)
+ _ntfs_bad_inode(inode);
return err;
}
@@ -2944,7 +2991,7 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
}
/*
- * ni_add_name - Add new name in MFT and in directory.
+ * ni_add_name - Add new name into MFT and into directory.
*/
int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
struct NTFS_DE *de)
@@ -2953,13 +3000,20 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
struct ATTRIB *attr;
struct ATTR_LIST_ENTRY *le;
struct mft_inode *mi;
+ struct ATTR_FILE_NAME *fname;
struct ATTR_FILE_NAME *de_name = (struct ATTR_FILE_NAME *)(de + 1);
u16 de_key_size = le16_to_cpu(de->key_size);
mi_get_ref(&ni->mi, &de->ref);
mi_get_ref(&dir_ni->mi, &de_name->home);
- /* Insert new name in MFT. */
+ /* Fill duplicate from any ATTR_NAME. */
+ fname = ni_fname_name(ni, NULL, NULL, NULL, NULL);
+ if (fname)
+ memcpy(&de_name->dup, &fname->dup, sizeof(fname->dup));
+ de_name->dup.fa = ni->std_fa;
+
+ /* Insert new name into MFT. */
err = ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, &attr,
&mi, &le);
if (err)
@@ -2967,7 +3021,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de_name, de_key_size);
- /* Insert new name in directory. */
+ /* Insert new name into directory. */
err = indx_insert_entry(&dir_ni->dir, dir_ni, de, ni->mi.sbi, NULL, 0);
if (err)
ni_remove_attr_le(ni, attr, mi, le);
@@ -2991,7 +3045,7 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
* 1) Add new name and remove old name.
* 2) Remove old name and add new name.
*
- * In most cases (not all!) adding new name in MFT and in directory can
+ * In most cases (not all!) adding new name into MFT and into directory can
* allocate additional cluster(s).
* Second way may result to bad inode if we can't add new name
* and then can't restore (add) old name.
@@ -3261,7 +3315,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
err = err2;
if (is_empty) {
- ntfs_mark_rec_free(sbi, mi->rno);
+ ntfs_mark_rec_free(sbi, mi->rno, false);
rb_erase(node, &ni->mi_tree);
mi_put(mi);
}
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index 49b7df616778..e7c494005122 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -3843,6 +3843,8 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
memset(&rst_info2, 0, sizeof(struct restart_info));
err = log_read_rst(log, l_size, false, &rst_info2);
+ if (err)
+ goto out;
/* Determine which restart area to use. */
if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
@@ -5057,7 +5059,7 @@ undo_action_next:
goto add_allocated_vcns;
vcn = le64_to_cpu(lrh->target_vcn);
- vcn &= ~(log->clst_per_page - 1);
+ vcn &= ~(u64)(log->clst_per_page - 1);
add_allocated_vcns:
for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 1835e35199c2..4ed15f64b17f 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -703,12 +703,14 @@ out:
/*
* ntfs_mark_rec_free - Mark record as free.
+ * is_mft - true if we are changing MFT
*/
-void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
+void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
{
struct wnd_bitmap *wnd = &sbi->mft.bitmap;
- down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
+ if (!is_mft)
+ down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
if (rno >= wnd->nbits)
goto out;
@@ -727,7 +729,8 @@ void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
sbi->mft.next_free = rno;
out:
- up_write(&wnd->rw_lock);
+ if (!is_mft)
+ up_write(&wnd->rw_lock);
}
/*
@@ -780,7 +783,7 @@ out:
*/
int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
{
- CLST zone_limit, zone_max, lcn, vcn, len;
+ CLST lcn, vcn, len;
size_t lcn_s, zlen;
struct wnd_bitmap *wnd = &sbi->used.bitmap;
struct ntfs_inode *ni = sbi->mft.ni;
@@ -789,16 +792,6 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
if (wnd_zone_len(wnd))
return 0;
- /*
- * Compute the MFT zone at two steps.
- * It would be nice if we are able to allocate 1/8 of
- * total clusters for MFT but not more then 512 MB.
- */
- zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
- zone_max = wnd->nbits >> 3;
- if (zone_max > zone_limit)
- zone_max = zone_limit;
-
vcn = bytes_to_cluster(sbi,
(u64)sbi->mft.bitmap.nbits << sbi->record_bits);
@@ -812,13 +805,7 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
lcn_s = lcn + 1;
/* Try to allocate clusters after last MFT run. */
- zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
- if (!zlen) {
- ntfs_notice(sbi->sb, "MftZone: unavailable");
- return 0;
- }
-
- /* Truncate too large zone. */
+ zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
wnd_zone_set(wnd, lcn_s, zlen);
return 0;
@@ -827,16 +814,21 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
/*
* ntfs_update_mftmirr - Update $MFTMirr data.
*/
-int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
+void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
{
int err;
struct super_block *sb = sbi->sb;
- u32 blocksize = sb->s_blocksize;
+ u32 blocksize;
sector_t block1, block2;
u32 bytes;
+ if (!sb)
+ return;
+
+ blocksize = sb->s_blocksize;
+
if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
- return 0;
+ return;
err = 0;
bytes = sbi->mft.recs_mirr << sbi->record_bits;
@@ -847,16 +839,13 @@ int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
struct buffer_head *bh1, *bh2;
bh1 = sb_bread(sb, block1++);
- if (!bh1) {
- err = -EIO;
- goto out;
- }
+ if (!bh1)
+ return;
bh2 = sb_getblk(sb, block2++);
if (!bh2) {
put_bh(bh1);
- err = -EIO;
- goto out;
+ return;
}
if (buffer_locked(bh2))
@@ -876,13 +865,24 @@ int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
put_bh(bh2);
if (err)
- goto out;
+ return;
}
sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
+}
-out:
- return err;
+/*
+ * ntfs_bad_inode
+ *
+ * Marks inode as bad and marks fs as 'dirty'
+ */
+void ntfs_bad_inode(struct inode *inode, const char *hint)
+{
+ struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
+
+ ntfs_inode_err(inode, "%s", hint);
+ make_bad_inode(inode);
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
}
/*
@@ -1395,7 +1395,7 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
if (buffer_locked(bh))
__wait_on_buffer(bh);
- lock_buffer(nb->bh[idx]);
+ lock_buffer(bh);
bh_data = bh->b_data + off;
end_data = Add2Ptr(bh_data, op);
@@ -2424,7 +2424,7 @@ static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
{
- CLST end, i;
+ CLST end, i, zone_len, zlen;
struct wnd_bitmap *wnd = &sbi->used.bitmap;
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
@@ -2459,6 +2459,28 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
ntfs_unmap_and_discard(sbi, lcn, len);
wnd_set_free(wnd, lcn, len);
+ /* append to MFT zone, if possible. */
+ zone_len = wnd_zone_len(wnd);
+ zlen = min(zone_len + len, sbi->zone_max);
+
+ if (zlen == zone_len) {
+ /* MFT zone already has maximum size. */
+ } else if (!zone_len) {
+ /* Create MFT zone only if 'zlen' is large enough. */
+ if (zlen == sbi->zone_max)
+ wnd_zone_set(wnd, lcn, zlen);
+ } else {
+ CLST zone_lcn = wnd_zone_bit(wnd);
+
+ if (lcn + len == zone_lcn) {
+ /* Append into head MFT zone. */
+ wnd_zone_set(wnd, lcn, zlen);
+ } else if (zone_lcn + zone_len == lcn) {
+ /* Append into tail MFT zone. */
+ wnd_zone_set(wnd, zone_lcn, zlen);
+ }
+ }
+
out:
up_write(&wnd->rw_lock);
}
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 6f81e3a49abf..440328147e7e 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -1042,19 +1042,16 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
{
int err;
struct NTFS_DE *e;
- const struct INDEX_HDR *hdr;
struct indx_node *node;
if (!root)
root = indx_get_root(&ni->dir, ni, NULL, NULL);
if (!root) {
- err = -EINVAL;
- goto out;
+ /* Should not happen. */
+ return -EINVAL;
}
- hdr = &root->ihdr;
-
/* Check cache. */
e = fnd->level ? fnd->de[fnd->level - 1] : fnd->root_de;
if (e && !de_is_last(e) &&
@@ -1068,39 +1065,35 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
fnd_clear(fnd);
/* Lookup entry that is <= to the search value. */
- e = hdr_find_e(indx, hdr, key, key_len, ctx, diff);
+ e = hdr_find_e(indx, &root->ihdr, key, key_len, ctx, diff);
if (!e)
return -EINVAL;
fnd->root_de = e;
- err = 0;
for (;;) {
node = NULL;
- if (*diff >= 0 || !de_has_vcn_ex(e)) {
- *entry = e;
- goto out;
- }
+ if (*diff >= 0 || !de_has_vcn_ex(e))
+ break;
/* Read next level. */
err = indx_read(indx, ni, de_get_vbn(e), &node);
if (err)
- goto out;
+ return err;
/* Lookup entry that is <= to the search value. */
e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
diff);
if (!e) {
- err = -EINVAL;
put_indx_node(node);
- goto out;
+ return -EINVAL;
}
fnd_push(fnd, node, e);
}
-out:
- return err;
+ *entry = e;
+ return 0;
}
int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
@@ -1354,7 +1347,7 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
goto out;
err = ni_insert_nonresident(ni, ATTR_ALLOC, in->name, in->name_len,
- &run, 0, len, 0, &alloc, NULL);
+ &run, 0, len, 0, &alloc, NULL, NULL);
if (err)
goto out1;
@@ -1685,8 +1678,8 @@ indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
{
int err;
const struct NTFS_DE *sp;
- struct NTFS_DE *e, *de_t, *up_e = NULL;
- struct indx_node *n2 = NULL;
+ struct NTFS_DE *e, *de_t, *up_e;
+ struct indx_node *n2;
struct indx_node *n1 = fnd->nodes[level];
struct INDEX_HDR *hdr1 = &n1->index->ihdr;
struct INDEX_HDR *hdr2;
@@ -1994,7 +1987,7 @@ static int indx_free_children(struct ntfs_index *indx, struct ntfs_inode *ni,
const struct NTFS_DE *e, bool trim)
{
int err;
- struct indx_node *n;
+ struct indx_node *n = NULL;
struct INDEX_HDR *hdr;
CLST vbn = de_get_vbn(e);
size_t i;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index 80104afeb2cd..51363d4e8636 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -430,6 +430,7 @@ end_enum:
} else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
/* Records in $Extend are not a files or general directories. */
+ inode->i_op = &ntfs_file_inode_operations;
} else {
err = -EINVAL;
goto out;
@@ -500,7 +501,7 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
inode = ntfs_read_mft(inode, name, ref);
else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
/* Inode overlaps? */
- make_bad_inode(inode);
+ _ntfs_bad_inode(inode);
}
return inode;
@@ -1632,7 +1633,7 @@ out4:
ni->mi.dirty = false;
discard_new_inode(inode);
out3:
- ntfs_mark_rec_free(sbi, ino);
+ ntfs_mark_rec_free(sbi, ino, false);
out2:
__putname(new_de);
@@ -1655,7 +1656,6 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
struct ntfs_inode *ni = ntfs_i(inode);
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
struct NTFS_DE *de;
- struct ATTR_FILE_NAME *de_name;
/* Allocate PATH_MAX bytes. */
de = __getname();
@@ -1670,15 +1670,6 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
if (err)
goto out;
- de_name = (struct ATTR_FILE_NAME *)(de + 1);
- /* Fill duplicate info. */
- de_name->dup.cr_time = de_name->dup.m_time = de_name->dup.c_time =
- de_name->dup.a_time = kernel2nt(&inode->i_ctime);
- de_name->dup.alloc_size = de_name->dup.data_size =
- cpu_to_le64(inode->i_size);
- de_name->dup.fa = ni->std_fa;
- de_name->dup.ea_size = de_name->dup.reparse = 0;
-
err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
out:
__putname(de);
@@ -1731,9 +1722,7 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
if (inode->i_nlink)
mark_inode_dirty(inode);
} else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
- make_bad_inode(inode);
- ntfs_inode_err(inode, "failed to undo unlink");
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ _ntfs_bad_inode(inode);
} else {
if (ni_is_dirty(dir))
mark_inode_dirty(dir);
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index bc741213ad84..bc22cc321a74 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -208,7 +208,7 @@ static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
}
/*
- * ntfs_rmdir - inode_operations::rm_dir
+ * ntfs_rmdir - inode_operations::rmdir
*/
static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
{
@@ -308,9 +308,7 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *dir,
err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de, &is_bad);
if (is_bad) {
/* Restore after failed rename failed too. */
- make_bad_inode(inode);
- ntfs_inode_err(inode, "failed to undo rename");
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ _ntfs_bad_inode(inode);
} else if (!err) {
inode->i_ctime = dir->i_ctime = dir->i_mtime =
current_time(dir);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 8dbdca03e1af..2c791222c4e2 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -220,6 +220,7 @@ struct ntfs_sb_info {
u32 flags; // See NTFS_FLAGS_XXX.
+ CLST zone_max; // Maximum MFT zone length in clusters
CLST bad_clusters; // The count of marked bad clusters.
u16 max_bytes_per_attr; // Maximum attribute size in record.
@@ -408,8 +409,6 @@ enum REPARSE_SIGN {
};
/* Functions from attrib.c */
-int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
- struct runs_tree *run, const CLST *vcn);
int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
@@ -440,6 +439,7 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
u64 new_valid);
int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
+int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size);
/* Functions from attrlist.c */
@@ -528,7 +528,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
const __le16 *name, u8 name_len,
const struct runs_tree *run, CLST svcn, CLST len,
__le16 flags, struct ATTRIB **new_attr,
- struct mft_inode **mi);
+ struct mft_inode **mi, struct ATTR_LIST_ENTRY **le);
int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
enum ATTR_TYPE type, const __le16 *name, u8 name_len,
struct ATTRIB **new_attr, struct mft_inode **mi,
@@ -589,10 +589,12 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
enum ALLOCATE_OPT opt);
int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
struct ntfs_inode *ni, struct mft_inode **mi);
-void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno);
+void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft);
int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to);
int ntfs_refresh_zone(struct ntfs_sb_info *sbi);
-int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
+void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
+void ntfs_bad_inode(struct inode *inode, const char *hint);
+#define _ntfs_bad_inode(i) ntfs_bad_inode(i, __func__)
enum NTFS_DIRTY_FLAGS {
NTFS_DIRTY_CLEAR = 0,
NTFS_DIRTY_DIRTY = 1,
@@ -738,7 +740,6 @@ static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
int mi_write(struct mft_inode *mi, int wait);
int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
__le16 flags, bool is_mft);
-void mi_mark_free(struct mft_inode *mi);
struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
const __le16 *name, u8 name_len, u32 asize,
u16 name_off);
@@ -780,10 +781,10 @@ bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
void run_truncate(struct runs_tree *run, CLST vcn);
void run_truncate_head(struct runs_tree *run, CLST vcn);
void run_truncate_around(struct runs_tree *run, CLST vcn);
-bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *Index);
bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
bool is_mft);
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
+bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len);
bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
CLST *lcn, CLST *len);
bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn);
@@ -802,6 +803,7 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
#define run_unpack_ex run_unpack
#endif
int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn);
+int run_clone(const struct runs_tree *run, struct runs_tree *new_run);
/* Globals from super.c */
void *ntfs_set_shared(void *ptr, u32 bytes);
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 861e35791506..7d2fac5ee215 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -395,28 +395,6 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
}
/*
- * mi_mark_free - Mark record as unused and marks it as free in bitmap.
- */
-void mi_mark_free(struct mft_inode *mi)
-{
- CLST rno = mi->rno;
- struct ntfs_sb_info *sbi = mi->sbi;
-
- if (rno >= MFT_REC_RESERVED && rno < MFT_REC_FREE) {
- ntfs_clear_mft_tail(sbi, rno, rno + 1);
- mi->dirty = false;
- return;
- }
-
- if (mi->mrec) {
- clear_rec_inuse(mi->mrec);
- mi->dirty = true;
- mi_write(mi, 0);
- }
- ntfs_mark_rec_free(sbi, rno);
-}
-
-/*
* mi_insert_attr - Reserve space for new attribute.
*
* Return: Not full constructed attribute or NULL if not possible to create.
@@ -445,12 +423,11 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
attr = NULL;
while ((attr = mi_enum_attr(mi, attr))) {
diff = compare_attr(attr, type, name, name_len, upcase);
- if (diff > 0)
- break;
+
if (diff < 0)
continue;
- if (!is_attr_indexed(attr))
+ if (!diff && !is_attr_indexed(attr))
return NULL;
break;
}
diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
index a8fec651f973..aaaa0d3d35a2 100644
--- a/fs/ntfs3/run.c
+++ b/fs/ntfs3/run.c
@@ -31,7 +31,7 @@ struct ntfs_run {
* Case of entry missing from list 'index' will be set to
* point to insertion position for the entry question.
*/
-bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
+static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
{
size_t min_idx, max_idx, mid_idx;
struct ntfs_run *r;
@@ -547,6 +547,48 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
return true;
}
+/* run_insert_range
+ *
+ * Helper for attr_insert_range(),
+ * which is helper for fallocate(insert_range).
+ */
+bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len)
+{
+ size_t index;
+ struct ntfs_run *r, *e;
+
+ if (WARN_ON(!run_lookup(run, vcn, &index)))
+ return false; /* Should never be here. */
+
+ e = run->runs + run->count;
+ r = run->runs + index;
+
+ if (vcn > r->vcn)
+ r += 1;
+
+ for (; r < e; r++)
+ r->vcn += len;
+
+ r = run->runs + index;
+
+ if (vcn > r->vcn) {
+ /* split fragment. */
+ CLST len1 = vcn - r->vcn;
+ CLST len2 = r->len - len1;
+ CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1);
+
+ r->len = len1;
+
+ if (!run_add_entry(run, vcn + len, lcn2, len2, false))
+ return false;
+ }
+
+ if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
+ return false;
+
+ return true;
+}
+
/*
* run_get_entry - Return index-th mapped region.
*/
@@ -778,26 +820,36 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
CLST next_vcn, vcn, lcn;
CLST prev_lcn = 0;
CLST evcn1 = svcn + len;
+ const struct ntfs_run *r, *r_end;
int packed_size = 0;
size_t i;
- bool ok;
s64 dlcn;
int offset_size, size_size, tmp;
- next_vcn = vcn = svcn;
-
*packed_vcns = 0;
if (!len)
goto out;
- ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
+ /* Check all required entries [svcn, encv1) available. */
+ if (!run_lookup(run, svcn, &i))
+ return -ENOENT;
+
+ r_end = run->runs + run->count;
+ r = run->runs + i;
- if (!ok)
- goto error;
+ for (next_vcn = r->vcn + r->len; next_vcn < evcn1;
+ next_vcn = r->vcn + r->len) {
+ if (++r >= r_end || r->vcn != next_vcn)
+ return -ENOENT;
+ }
- if (next_vcn != vcn)
- goto error;
+ /* Repeat cycle above and pack runs. Assume no errors. */
+ r = run->runs + i;
+ len = svcn - r->vcn;
+ vcn = svcn;
+ lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len);
+ len = r->len - len;
for (;;) {
next_vcn = vcn + len;
@@ -846,12 +898,10 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
goto out;
- ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
- if (!ok)
- goto error;
-
- if (next_vcn != vcn)
- goto error;
+ r += 1;
+ vcn = r->vcn;
+ lcn = r->lcn;
+ len = r->len;
}
out:
@@ -860,9 +910,6 @@ out:
run_buf[0] = 0;
return packed_size + 1;
-
-error:
- return -EOPNOTSUPP;
}
/*
@@ -1109,3 +1156,28 @@ int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
*highest_vcn = vcn64 - 1;
return 0;
}
+
+/*
+ * run_clone
+ *
+ * Make a copy of run
+ */
+int run_clone(const struct runs_tree *run, struct runs_tree *new_run)
+{
+ size_t bytes = run->count * sizeof(struct ntfs_run);
+
+ if (bytes > new_run->allocated) {
+ struct ntfs_run *new_ptr = kvmalloc(bytes, GFP_KERNEL);
+
+ if (!new_ptr)
+ return -ENOMEM;
+
+ kvfree(new_run->runs);
+ new_run->runs = new_ptr;
+ new_run->allocated = bytes;
+ }
+
+ memcpy(new_run->runs, run->runs, bytes);
+ new_run->count = run->count;
+ return 0;
+}
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 0c6de6287737..47012c9bf505 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -30,6 +30,7 @@
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/log2.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/nls.h>
#include <linux/seq_file.h>
@@ -390,7 +391,7 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
return -EINVAL;
}
- memcpy(sbi->options, new_opts, sizeof(*new_opts));
+ swap(sbi->options, fc->fs_private);
return 0;
}
@@ -870,6 +871,13 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
#endif
+ /*
+ * Compute the MFT zone at two steps.
+ * It would be nice if we are able to allocate 1/8 of
+ * total clusters for MFT but not more then 512 MB.
+ */
+ sbi->zone_max = min_t(CLST, 0x20000000 >> sbi->cluster_bits, clusters >> 3);
+
err = 0;
out:
@@ -900,6 +908,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
ref.high = 0;
sbi->sb = sb;
+ sbi->options = fc->fs_private;
+ fc->fs_private = NULL;
sb->s_flags |= SB_NODIRATIME;
sb->s_magic = 0x7366746e; // "ntfs"
sb->s_op = &ntfs_sops;
@@ -1262,8 +1272,6 @@ load_root:
goto put_inode_out;
}
- fc->fs_private = NULL;
-
return 0;
put_inode_out:
@@ -1378,7 +1386,7 @@ static const struct fs_context_operations ntfs_context_ops = {
/*
* ntfs_init_fs_context - Initialize spi and opts
*
- * This will called when mount/remount. We will first initiliaze
+ * This will called when mount/remount. We will first initialize
* options so that if remount we can use just that.
*/
static int ntfs_init_fs_context(struct fs_context *fc)
@@ -1416,7 +1424,6 @@ static int ntfs_init_fs_context(struct fs_context *fc)
mutex_init(&sbi->compress.mtx_lzx);
#endif
- sbi->options = opts;
fc->s_fs_info = sbi;
ok:
fc->fs_private = opts;
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index 5e0e0280e70d..6ae1f56b7358 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -118,7 +118,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
run_init(&run);
- err = attr_load_runs(attr_ea, ni, &run, NULL);
+ err = attr_load_runs_range(ni, ATTR_EA, NULL, 0, &run, 0, size);
if (!err)
err = ntfs_read_run_nb(sbi, &run, 0, ea_p, size, NULL);
run_close(&run);
@@ -444,6 +444,11 @@ update_ea:
/* Delete xattr, ATTR_EA */
ni_remove_attr_le(ni, attr, mi, le);
} else if (attr->non_res) {
+ err = attr_load_runs_range(ni, ATTR_EA, NULL, 0, &ea_run, 0,
+ size);
+ if (err)
+ goto out;
+
err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size, 0);
if (err)
goto out;
@@ -478,8 +483,7 @@ out:
}
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
-static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
- struct inode *inode, int type,
+static struct posix_acl *ntfs_get_acl_ex(struct inode *inode, int type,
int locked)
{
struct ntfs_inode *ni = ntfs_i(inode);
@@ -514,7 +518,7 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
/* Translate extended attribute to acl. */
if (err >= 0) {
- acl = posix_acl_from_xattr(mnt_userns, buf, err);
+ acl = posix_acl_from_xattr(&init_user_ns, buf, err);
} else if (err == -ENODATA) {
acl = NULL;
} else {
@@ -537,8 +541,7 @@ struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu)
if (rcu)
return ERR_PTR(-ECHILD);
- /* TODO: init_user_ns? */
- return ntfs_get_acl_ex(&init_user_ns, inode, type, 0);
+ return ntfs_get_acl_ex(inode, type, 0);
}
static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
@@ -547,28 +550,23 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
{
const char *name;
size_t size, name_len;
- void *value = NULL;
- int err = 0;
+ void *value;
+ int err;
int flags;
+ umode_t mode;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
+ mode = inode->i_mode;
switch (type) {
case ACL_TYPE_ACCESS:
/* Do not change i_mode if we are in init_acl */
if (acl && !init_acl) {
- umode_t mode;
-
err = posix_acl_update_mode(mnt_userns, inode, &mode,
&acl);
if (err)
- goto out;
-
- if (inode->i_mode != mode) {
- inode->i_mode = mode;
- mark_inode_dirty(inode);
- }
+ return err;
}
name = XATTR_NAME_POSIX_ACL_ACCESS;
name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
@@ -595,7 +593,7 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
value = kmalloc(size, GFP_NOFS);
if (!value)
return -ENOMEM;
- err = posix_acl_to_xattr(mnt_userns, acl, value, size);
+ err = posix_acl_to_xattr(&init_user_ns, acl, value, size);
if (err < 0)
goto out;
flags = 0;
@@ -604,8 +602,13 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
if (err == -ENODATA && !size)
err = 0; /* Removing non existed xattr. */
- if (!err)
+ if (!err) {
set_cached_acl(inode, type, acl);
+ if (inode->i_mode != mode) {
+ inode->i_mode = mode;
+ mark_inode_dirty(inode);
+ }
+ }
out:
kfree(value);
@@ -641,7 +644,7 @@ static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
if (!acl)
return -ENODATA;
- err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
+ err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
posix_acl_release(acl);
return err;
@@ -665,12 +668,12 @@ static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
if (!value) {
acl = NULL;
} else {
- acl = posix_acl_from_xattr(mnt_userns, value, size);
+ acl = posix_acl_from_xattr(&init_user_ns, value, size);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl) {
- err = posix_acl_valid(mnt_userns, acl);
+ err = posix_acl_valid(&init_user_ns, acl);
if (err)
goto release_and_out;
}
@@ -706,13 +709,13 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
inode->i_default_acl = NULL;
}
- if (!acl)
- inode->i_acl = NULL;
- else {
+ if (acl) {
if (!err)
err = ntfs_set_acl_ex(mnt_userns, inode, acl,
ACL_TYPE_ACCESS, true);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
return err;
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index e360543ad7e7..8b2020f92b5f 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -296,17 +296,25 @@ static void dlmfs_evict_inode(struct inode *inode)
{
int status;
struct dlmfs_inode_private *ip;
+ struct user_lock_res *lockres;
+ int teardown;
clear_inode(inode);
mlog(0, "inode %lu\n", inode->i_ino);
ip = DLMFS_I(inode);
+ lockres = &ip->ip_lockres;
if (S_ISREG(inode->i_mode)) {
- status = user_dlm_destroy_lock(&ip->ip_lockres);
- if (status < 0)
- mlog_errno(status);
+ spin_lock(&lockres->l_lock);
+ teardown = !!(lockres->l_flags & USER_LOCK_IN_TEARDOWN);
+ spin_unlock(&lockres->l_lock);
+ if (!teardown) {
+ status = user_dlm_destroy_lock(lockres);
+ if (status < 0)
+ mlog_errno(status);
+ }
iput(ip->ip_parent);
goto clear_fields;
}
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 801e60bab955..c28bc983a7b1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3403,10 +3403,12 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
- ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
- osb->cconn = NULL;
+ if (osb->cconn) {
+ ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
+ osb->cconn = NULL;
- ocfs2_dlm_shutdown_debug(osb);
+ ocfs2_dlm_shutdown_debug(osb);
+ }
}
static int ocfs2_drop_lock(struct ocfs2_super *osb,
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
index 9099d8fc7599..22da768e65b7 100644
--- a/fs/ocfs2/heartbeat.c
+++ b/fs/ocfs2/heartbeat.c
@@ -2,12 +2,13 @@
/*
* heartbeat.c
*
- * Register ourselves with the heartbaet service, keep our node maps
+ * Register ourselves with the heartbeat service, keep our node maps
* up to date, and fire off recovery when needed.
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*/
+#include <linux/bitmap.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
@@ -24,18 +25,12 @@
#include "buffer_head_io.h"
-static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map,
- int bit);
-static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map,
- int bit);
-
/* special case -1 for now
* TODO: should *really* make sure the calling func never passes -1!! */
static void ocfs2_node_map_init(struct ocfs2_node_map *map)
{
map->num_nodes = OCFS2_NODE_MAP_MAX_NODES;
- memset(map->map, 0, BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES) *
- sizeof(unsigned long));
+ bitmap_zero(map->map, OCFS2_NODE_MAP_MAX_NODES);
}
void ocfs2_init_node_maps(struct ocfs2_super *osb)
@@ -65,12 +60,6 @@ void ocfs2_do_node_down(int node_num, void *data)
ocfs2_recovery_thread(osb, node_num);
}
-static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map,
- int bit)
-{
- set_bit(bit, map->map);
-}
-
void ocfs2_node_map_set_bit(struct ocfs2_super *osb,
struct ocfs2_node_map *map,
int bit)
@@ -79,16 +68,10 @@ void ocfs2_node_map_set_bit(struct ocfs2_super *osb,
return;
BUG_ON(bit >= map->num_nodes);
spin_lock(&osb->node_map_lock);
- __ocfs2_node_map_set_bit(map, bit);
+ set_bit(bit, map->map);
spin_unlock(&osb->node_map_lock);
}
-static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map,
- int bit)
-{
- clear_bit(bit, map->map);
-}
-
void ocfs2_node_map_clear_bit(struct ocfs2_super *osb,
struct ocfs2_node_map *map,
int bit)
@@ -97,7 +80,7 @@ void ocfs2_node_map_clear_bit(struct ocfs2_super *osb,
return;
BUG_ON(bit >= map->num_nodes);
spin_lock(&osb->node_map_lock);
- __ocfs2_node_map_clear_bit(map, bit);
+ clear_bit(bit, map->map);
spin_unlock(&osb->node_map_lock);
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index c75fd54b9185..961d1cf54388 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -197,6 +197,7 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
* callers. */
if (S_ISDIR(mode))
set_nlink(inode, 2);
+ mode = mode_strip_sgid(&init_user_ns, dir, mode);
inode_init_owner(&init_user_ns, inode, dir, mode);
status = dquot_initialize(inode);
if (status)
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 0b6f551a342a..dc9f76ab7e13 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -412,7 +412,7 @@ out_unlock:
goto out_err;
}
-/* Write information to global quota file. Expects exlusive lock on quota
+/* Write information to global quota file. Expects exclusive lock on quota
* file inode and quota info */
static int __ocfs2_global_write_info(struct super_block *sb, int type)
{
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 013a727bd7c8..e2cc9eec287c 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1914,8 +1914,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
!ocfs2_is_hard_readonly(osb))
hangup_needed = 1;
- if (osb->cconn)
- ocfs2_dlm_shutdown(osb, hangup_needed);
+ ocfs2_dlm_shutdown(osb, hangup_needed);
ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
debugfs_remove_recursive(osb->osb_debug_root);
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 2eada97bbd23..e065a5b9a442 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -259,7 +259,7 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
return FILEID_INVALID;
dentry = d_find_any_alias(inode);
- if (WARN_ON(!dentry))
+ if (!dentry)
return FILEID_INVALID;
bytes = ovl_dentry_to_fid(ofs, dentry, fid, buflen);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 7922b619f6c8..0fbcb590af84 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -454,14 +454,18 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
return res;
}
+#ifdef CONFIG_FS_POSIX_ACL
/*
* Apply the idmapping of the layer to POSIX ACLs. The caller must pass a clone
* of the POSIX ACLs retrieved from the lower layer to this function to not
* alter the POSIX ACLs for the underlying filesystem.
*/
-static void ovl_idmap_posix_acl(struct user_namespace *mnt_userns,
+static void ovl_idmap_posix_acl(struct inode *realinode,
+ struct user_namespace *mnt_userns,
struct posix_acl *acl)
{
+ struct user_namespace *fs_userns = i_user_ns(realinode);
+
for (unsigned int i = 0; i < acl->a_count; i++) {
vfsuid_t vfsuid;
vfsgid_t vfsgid;
@@ -469,11 +473,11 @@ static void ovl_idmap_posix_acl(struct user_namespace *mnt_userns,
struct posix_acl_entry *e = &acl->a_entries[i];
switch (e->e_tag) {
case ACL_USER:
- vfsuid = make_vfsuid(mnt_userns, &init_user_ns, e->e_uid);
+ vfsuid = make_vfsuid(mnt_userns, fs_userns, e->e_uid);
e->e_uid = vfsuid_into_kuid(vfsuid);
break;
case ACL_GROUP:
- vfsgid = make_vfsgid(mnt_userns, &init_user_ns, e->e_gid);
+ vfsgid = make_vfsgid(mnt_userns, fs_userns, e->e_gid);
e->e_gid = vfsgid_into_kgid(vfsgid);
break;
}
@@ -497,7 +501,7 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu)
struct posix_acl *acl, *clone;
struct path realpath;
- if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
+ if (!IS_POSIXACL(realinode))
return NULL;
/* Careful in RCU walk mode */
@@ -535,7 +539,7 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu)
if (!clone)
clone = ERR_PTR(-ENOMEM);
else
- ovl_idmap_posix_acl(mnt_user_ns(realpath.mnt), clone);
+ ovl_idmap_posix_acl(realinode, mnt_user_ns(realpath.mnt), clone);
/*
* Since we're not in RCU path walk we always need to release the
* original ACLs.
@@ -543,6 +547,7 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu)
posix_acl_release(acl);
return clone;
}
+#endif
int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags)
{
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 65c4346a5b43..69dc577974f8 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -42,7 +42,7 @@ static int ovl_check_redirect(struct path *path, struct ovl_lookup_data *d,
* One of the ancestor path elements in an absolute path
* lookup in ovl_lookup_layer() could have been opaque and
* that will stop further lookup in lower layers (d->stop=true)
- * But we have found an absolute redirect in decendant path
+ * But we have found an absolute redirect in descendant path
* element and that should force continue lookup in lower
* layers (reset d->stop).
*/
@@ -648,7 +648,7 @@ static int ovl_get_index_name_fh(struct ovl_fh *fh, struct qstr *name)
* If the index dentry for a copy up origin inode is positive, but points
* to an inode different than the upper inode, then either the upper inode
* has been copied up and not indexed or it was indexed, but since then
- * index dir was cleared. Either way, that index cannot be used to indentify
+ * index dir was cleared. Either way, that index cannot be used to identify
* the overlay inode.
*/
int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 6ec815b84d48..87759165d32b 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -590,7 +590,13 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
void *value, size_t size);
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
+
+#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu);
+#else
+#define ovl_get_acl NULL
+#endif
+
int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags);
bool ovl_is_private_xattr(struct super_block *sb, const char *name);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index e0a2e0468ee7..ec746d447f1b 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -301,7 +301,7 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
/**
* ovl_statfs
- * @sb: The overlayfs super block
+ * @dentry: The dentry to query
* @buf: The struct kstatfs to fill in with stats
*
* Get the filesystem statistics. As writes always target the upper layer
@@ -349,6 +349,8 @@ static inline int ovl_xino_def(void)
/**
* ovl_show_options
+ * @m: the seq_file handle
+ * @dentry: The dentry to query
*
* Prints the mount options for a given superblock.
* Returns zero; does not fail.
@@ -1412,11 +1414,12 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
*/
err = ovl_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1);
if (err) {
+ pr_warn("failed to set xattr on upper\n");
ofs->noxattr = true;
if (ofs->config.index || ofs->config.metacopy) {
ofs->config.index = false;
ofs->config.metacopy = false;
- pr_warn("upper fs does not support xattr, falling back to index=off,metacopy=off.\n");
+ pr_warn("...falling back to index=off,metacopy=off.\n");
}
/*
* xattr support is required for persistent st_ino.
@@ -1424,8 +1427,10 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
*/
if (ofs->config.xino == OVL_XINO_AUTO) {
ofs->config.xino = OVL_XINO_OFF;
- pr_warn("upper fs does not support xattr, falling back to xino=off.\n");
+ pr_warn("...falling back to xino=off.\n");
}
+ if (err == -EPERM && !ofs->config.userxattr)
+ pr_info("try mounting with 'userxattr' option\n");
err = 0;
} else {
ovl_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE);
@@ -2032,7 +2037,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_stack_depth = 0;
sb->s_maxbytes = MAX_LFS_FILESIZE;
atomic_long_set(&ofs->last_ino, 1);
- /* Assume underlaying fs uses 32bit inodes unless proven otherwise */
+ /* Assume underlying fs uses 32bit inodes unless proven otherwise */
if (ofs->config.xino != OVL_XINO_OFF) {
ofs->xino_mode = BITS_PER_LONG - 32;
if (!ofs->xino_mode) {
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 1d17d7b13dcd..5af33800743e 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -361,6 +361,7 @@ posix_acl_permission(struct user_namespace *mnt_userns, struct inode *inode,
const struct posix_acl *acl, int want)
{
const struct posix_acl_entry *pa, *pe, *mask_obj;
+ struct user_namespace *fs_userns = i_user_ns(inode);
int found = 0;
vfsuid_t vfsuid;
vfsgid_t vfsgid;
@@ -376,7 +377,7 @@ posix_acl_permission(struct user_namespace *mnt_userns, struct inode *inode,
goto check_perm;
break;
case ACL_USER:
- vfsuid = make_vfsuid(mnt_userns, &init_user_ns,
+ vfsuid = make_vfsuid(mnt_userns, fs_userns,
pa->e_uid);
if (vfsuid_eq_kuid(vfsuid, current_fsuid()))
goto mask;
@@ -390,7 +391,7 @@ posix_acl_permission(struct user_namespace *mnt_userns, struct inode *inode,
}
break;
case ACL_GROUP:
- vfsgid = make_vfsgid(mnt_userns, &init_user_ns,
+ vfsgid = make_vfsgid(mnt_userns, fs_userns,
pa->e_gid);
if (vfsgid_in_group_p(vfsgid)) {
found = 1;
@@ -736,6 +737,7 @@ void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
{
struct posix_acl_xattr_header *header = value;
struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
+ struct user_namespace *fs_userns = i_user_ns(inode);
int count;
vfsuid_t vfsuid;
vfsgid_t vfsgid;
@@ -753,13 +755,13 @@ void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
switch (le16_to_cpu(entry->e_tag)) {
case ACL_USER:
uid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
- vfsuid = make_vfsuid(mnt_userns, &init_user_ns, uid);
+ vfsuid = make_vfsuid(mnt_userns, fs_userns, uid);
entry->e_id = cpu_to_le32(from_kuid(&init_user_ns,
vfsuid_into_kuid(vfsuid)));
break;
case ACL_GROUP:
gid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
- vfsgid = make_vfsgid(mnt_userns, &init_user_ns, gid);
+ vfsgid = make_vfsgid(mnt_userns, fs_userns, gid);
entry->e_id = cpu_to_le32(from_kgid(&init_user_ns,
vfsgid_into_kgid(vfsgid)));
break;
@@ -775,6 +777,7 @@ void posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns,
{
struct posix_acl_xattr_header *header = value;
struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
+ struct user_namespace *fs_userns = i_user_ns(inode);
int count;
vfsuid_t vfsuid;
vfsgid_t vfsgid;
@@ -793,13 +796,13 @@ void posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns,
case ACL_USER:
uid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
vfsuid = VFSUIDT_INIT(uid);
- uid = from_vfsuid(mnt_userns, &init_user_ns, vfsuid);
+ uid = from_vfsuid(mnt_userns, fs_userns, vfsuid);
entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, uid));
break;
case ACL_GROUP:
gid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
vfsgid = VFSGIDT_INIT(gid);
- gid = from_vfsgid(mnt_userns, &init_user_ns, vfsgid);
+ gid = from_vfsgid(mnt_userns, fs_userns, vfsgid);
entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, gid));
break;
default:
diff --git a/fs/proc/array.c b/fs/proc/array.c
index eb815759842c..99fcbfda8e25 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -69,7 +69,6 @@
#include <linux/sched/cputime.h>
#include <linux/proc_fs.h>
#include <linux/ioport.h>
-#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
@@ -100,6 +99,10 @@ void proc_task_name(struct seq_file *m, struct task_struct *p, bool escape)
{
char tcomm[64];
+ /*
+ * Test before PF_KTHREAD because all workqueue worker threads are
+ * kernel threads.
+ */
if (p->flags & PF_WQ_WORKER)
wq_worker_comm(tcomm, sizeof(tcomm), p);
else if (p->flags & PF_KTHREAD)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8dfa36a99c74..93f7e3d971e4 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1885,7 +1885,7 @@ void proc_pid_evict_inode(struct proc_inode *ei)
put_pid(pid);
}
-struct inode *proc_pid_make_inode(struct super_block * sb,
+struct inode *proc_pid_make_inode(struct super_block *sb,
struct task_struct *task, umode_t mode)
{
struct inode * inode;
@@ -1914,11 +1914,6 @@ struct inode *proc_pid_make_inode(struct super_block * sb,
/* Let the pid remember us for quick removal */
ei->pid = pid;
- if (S_ISDIR(mode)) {
- spin_lock(&pid->lock);
- hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes);
- spin_unlock(&pid->lock);
- }
task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid);
security_task_to_inode(task, inode);
@@ -1931,6 +1926,39 @@ out_unlock:
return NULL;
}
+/*
+ * Generating an inode and adding it into @pid->inodes, so that task will
+ * invalidate inode's dentry before being released.
+ *
+ * This helper is used for creating dir-type entries under '/proc' and
+ * '/proc/<tgid>/task'. Other entries(eg. fd, stat) under '/proc/<tgid>'
+ * can be released by invalidating '/proc/<tgid>' dentry.
+ * In theory, dentries under '/proc/<tgid>/task' can also be released by
+ * invalidating '/proc/<tgid>' dentry, we reserve it to handle single
+ * thread exiting situation: Any one of threads should invalidate its
+ * '/proc/<tgid>/task/<pid>' dentry before released.
+ */
+static struct inode *proc_pid_make_base_inode(struct super_block *sb,
+ struct task_struct *task, umode_t mode)
+{
+ struct inode *inode;
+ struct proc_inode *ei;
+ struct pid *pid;
+
+ inode = proc_pid_make_inode(sb, task, mode);
+ if (!inode)
+ return NULL;
+
+ /* Let proc_flush_pid find this directory inode */
+ ei = PROC_I(inode);
+ pid = ei->pid;
+ spin_lock(&pid->lock);
+ hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes);
+ spin_unlock(&pid->lock);
+
+ return inode;
+}
+
int pid_getattr(struct user_namespace *mnt_userns, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
@@ -3369,7 +3397,8 @@ static struct dentry *proc_pid_instantiate(struct dentry * dentry,
{
struct inode *inode;
- inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO);
+ inode = proc_pid_make_base_inode(dentry->d_sb, task,
+ S_IFDIR | S_IRUGO | S_IXUGO);
if (!inode)
return ERR_PTR(-ENOENT);
@@ -3671,7 +3700,8 @@ static struct dentry *proc_task_instantiate(struct dentry *dentry,
struct task_struct *task, const void *ptr)
{
struct inode *inode;
- inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO);
+ inode = proc_pid_make_base_inode(dentry->d_sb, task,
+ S_IFDIR | S_IRUGO | S_IXUGO);
if (!inode)
return ERR_PTR(-ENOENT);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 73aeb4e6d32e..f495fdb39151 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -26,8 +26,6 @@
#include <linux/mount.h>
#include <linux/bug.h>
-#include <linux/uaccess.h>
-
#include "internal.h"
static void proc_evict_inode(struct inode *inode)
@@ -214,7 +212,15 @@ static void unuse_pde(struct proc_dir_entry *pde)
complete(pde->pde_unload_completion);
}
-/* pde is locked on entry, unlocked on exit */
+/*
+ * At most 2 contexts can enter this function: the one doing the last
+ * close on the descriptor and whoever is deleting PDE itself.
+ *
+ * First to enter calls ->proc_release hook and signals its completion
+ * to the second one which waits and then does nothing.
+ *
+ * PDE is locked on entry, unlocked on exit.
+ */
static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
__releases(&pde->pde_unload_lock)
{
@@ -224,9 +230,6 @@ static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
*
* rmmod (remove_proc_entry() et al) can't delete an entry and proceed:
* "struct file" needs to be available at the right moment.
- *
- * Therefore, first process to enter this function does ->release() and
- * signals its completion to the other process which does nothing.
*/
if (pdeo->closing) {
/* somebody else is doing that, just wait */
@@ -240,10 +243,12 @@ static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
pdeo->closing = true;
spin_unlock(&pde->pde_unload_lock);
+
file = pdeo->file;
pde->proc_ops->proc_release(file_inode(file), file);
+
spin_lock(&pde->pde_unload_lock);
- /* After ->release. */
+ /* Strictly after ->proc_release, see above. */
list_del(&pdeo->lh);
c = pdeo->c;
spin_unlock(&pde->pde_unload_lock);
@@ -489,6 +494,9 @@ static int proc_reg_open(struct inode *inode, struct file *file)
typeof_member(struct proc_ops, proc_release) release;
struct pde_opener *pdeo;
+ if (!pde->proc_ops->proc_lseek)
+ file->f_mode &= ~FMODE_LSEEK;
+
if (pde_is_permanent(pde)) {
open = pde->proc_ops->proc_open;
if (open)
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index b38ad552887f..592e6dc7c110 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -15,7 +15,6 @@
#include <linux/fs.h>
#include <linux/syslog.h>
-#include <linux/uaccess.h>
#include <asm/io.h>
extern wait_queue_head_t log_wait;
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 13452b32e2bd..4d3493579458 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -21,7 +21,6 @@
#include <linux/seq_file.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
-#include <linux/uaccess.h>
#include <asm/tlb.h>
#include <asm/div64.h>
#include "internal.h"
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 913e5acefbb6..856839b8ae8b 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -8,9 +8,6 @@
*
* proc net directory handling functions
*/
-
-#include <linux/uaccess.h>
-
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/proc_fs.h>
@@ -353,6 +350,12 @@ static __net_init int proc_net_ns_init(struct net *net)
kgid_t gid;
int err;
+ /*
+ * This PDE acts only as an anchor for /proc/${pid}/net hierarchy.
+ * Corresponding inode (PDE(inode) == net->proc_net) is never
+ * instantiated therefore blanket zeroing is fine.
+ * net->proc_net_stat inode is instantiated normally.
+ */
err = -ENOMEM;
netd = kmem_cache_zalloc(proc_dir_entry_cache, GFP_KERNEL);
if (!netd)
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index c69ff191e5d8..5c6a5ceab2f1 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -4,8 +4,6 @@
*
* Copyright 1997, Theodore Ts'o
*/
-
-#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
diff --git a/fs/proc/root.c b/fs/proc/root.c
index c7e3b1350ef8..3c2ee3eb1138 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -6,9 +6,6 @@
*
* proc root directory handling functions
*/
-
-#include <linux/uaccess.h>
-
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/proc_fs.h>
@@ -305,6 +302,11 @@ void __init proc_root_init(void)
proc_mkdir("bus", NULL);
proc_sys_init();
+ /*
+ * Last things last. It is not like userspace processes eager
+ * to open /proc files exist at this point but register last
+ * anyway.
+ */
register_filesystem(&proc_fs_type);
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2d04e3470d4c..4e0023643f8b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -406,6 +406,7 @@ struct mem_size_stats {
u64 pss_anon;
u64 pss_file;
u64 pss_shmem;
+ u64 pss_dirty;
u64 pss_locked;
u64 swap_pss;
};
@@ -427,6 +428,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
mss->pss_locked += pss;
if (dirty || PageDirty(page)) {
+ mss->pss_dirty += pss;
if (private)
mss->private_dirty += size;
else
@@ -525,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
- bool migration = false;
+ bool migration = false, young = false, dirty = false;
if (pte_present(*pte)) {
page = vm_normal_page(vma, addr, *pte);
+ young = pte_young(*pte);
+ dirty = pte_dirty(*pte);
} else if (is_swap_pte(*pte)) {
swp_entry_t swpent = pte_to_swp_entry(*pte);
@@ -558,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
if (!page)
return;
- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
- locked, migration);
+ smaps_account(mss, page, false, young, dirty, locked, migration);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -808,6 +811,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
{
SEQ_PUT_DEC("Rss: ", mss->resident);
SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT);
+ SEQ_PUT_DEC(" kB\nPss_Dirty: ", mss->pss_dirty >> PSS_SHIFT);
if (rollup_mode) {
/*
* These are meaningful only for smaps_rollup, otherwise two of
@@ -860,7 +864,7 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %d\n",
- transparent_hugepage_active(vma));
+ hugepage_vma_check(vma, vma->vm_flags, true, false));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
@@ -1792,7 +1796,7 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
return NULL;
page = vm_normal_page(vma, addr, pte);
- if (!page)
+ if (!page || is_zone_device_page(page))
return NULL;
if (PageReserved(page))
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 4eaeb645e759..f2aa86c421f2 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -25,7 +25,6 @@
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
-#include <linux/uaccess.h>
#include <linux/uio.h>
#include <linux/cc_platform.h>
#include <asm/io.h>
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 49650e54d2f8..846f9455ae22 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -86,7 +86,7 @@ static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
static inline void mangle(struct seq_file *m, const char *s)
{
- seq_escape(m, s, " \t\n\\");
+ seq_escape(m, s, " \t\n\\#");
}
static void show_type(struct seq_file *m, struct super_block *sb)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 28966da7834e..0427b44bfee5 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -3002,7 +3002,7 @@ static int __init dquot_init(void)
pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
- if (register_shrinker(&dqcache_shrinker))
+ if (register_shrinker(&dqcache_shrinker, "dquota-cache"))
panic("Cannot register dquot shrinker");
return 0;
diff --git a/fs/read_write.c b/fs/read_write.c
index ea59dd0095c2..1a261dcf1778 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -378,14 +378,13 @@ EXPORT_SYMBOL(rw_verify_area);
static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
{
- struct iovec iov = { .iov_base = buf, .iov_len = len };
struct kiocb kiocb;
struct iov_iter iter;
ssize_t ret;
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = (ppos ? *ppos : 0);
- iov_iter_init(&iter, READ, &iov, 1, len);
+ iov_iter_ubuf(&iter, READ, buf, len);
ret = call_read_iter(filp, &kiocb, &iter);
BUG_ON(ret == -EIOCBQUEUED);
@@ -481,14 +480,13 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
{
- struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
struct kiocb kiocb;
struct iov_iter iter;
ssize_t ret;
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = (ppos ? *ppos : 0);
- iov_iter_init(&iter, WRITE, &iov, 1, len);
+ iov_iter_ubuf(&iter, WRITE, (void __user *)buf, len);
ret = call_write_iter(filp, &kiocb, &iter);
BUG_ON(ret == -EIOCBQUEUED);
diff --git a/fs/remap_range.c b/fs/remap_range.c
index 046a513dbc3a..654912d06862 100644
--- a/fs/remap_range.c
+++ b/fs/remap_range.c
@@ -14,6 +14,7 @@
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/fs.h>
+#include <linux/dax.h>
#include "internal.h"
#include <linux/uaccess.h>
@@ -263,9 +264,11 @@ out_error:
* If there's an error, then the usual negative error code is returned.
* Otherwise returns 0 with *len set to the request length.
*/
-int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *len, unsigned int remap_flags)
+int
+__generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *dax_read_ops)
{
struct inode *inode_in = file_inode(file_in);
struct inode *inode_out = file_inode(file_out);
@@ -325,8 +328,18 @@ int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
if (remap_flags & REMAP_FILE_DEDUP) {
bool is_same = false;
- ret = vfs_dedupe_file_range_compare(file_in, pos_in,
- file_out, pos_out, *len, &is_same);
+ if (*len == 0)
+ return 0;
+
+ if (!IS_DAX(inode_in))
+ ret = vfs_dedupe_file_range_compare(file_in, pos_in,
+ file_out, pos_out, *len, &is_same);
+ else if (dax_read_ops)
+ ret = dax_dedupe_file_range_compare(inode_in, pos_in,
+ inode_out, pos_out, *len, &is_same,
+ dax_read_ops);
+ else
+ return -EINVAL;
if (ret)
return ret;
if (!is_same)
@@ -344,6 +357,14 @@ int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
return ret;
}
+
+int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags)
+{
+ return __generic_remap_file_range_prep(file_in, pos_in, file_out,
+ pos_out, len, remap_flags, NULL);
+}
EXPORT_SYMBOL(generic_remap_file_range_prep);
loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
diff --git a/fs/splice.c b/fs/splice.c
index 93a2c9bf6249..0878b852b355 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -301,11 +301,9 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
{
struct iov_iter to;
struct kiocb kiocb;
- unsigned int i_head;
int ret;
iov_iter_pipe(&to, READ, pipe, len);
- i_head = to.head;
init_sync_kiocb(&kiocb, in);
kiocb.ki_pos = *ppos;
ret = call_read_iter(in, &kiocb, &to);
@@ -313,9 +311,8 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
*ppos = kiocb.ki_pos;
file_accessed(in);
} else if (ret < 0) {
- to.head = i_head;
- to.iov_offset = 0;
- iov_iter_advance(&to, 0); /* to free what was emitted */
+ /* free what was emitted */
+ pipe_discard_from(pipe, to.start_head);
/*
* callers of ->splice_read() expect -EAGAIN on
* "can't put anything in there", rather than -EFAULT.
@@ -1161,39 +1158,40 @@ static int iter_to_pipe(struct iov_iter *from,
};
size_t total = 0;
int ret = 0;
- bool failed = false;
- while (iov_iter_count(from) && !failed) {
+ while (iov_iter_count(from)) {
struct page *pages[16];
- ssize_t copied;
+ ssize_t left;
size_t start;
- int n;
+ int i, n;
- copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start);
- if (copied <= 0) {
- ret = copied;
+ left = iov_iter_get_pages2(from, pages, ~0UL, 16, &start);
+ if (left <= 0) {
+ ret = left;
break;
}
- for (n = 0; copied; n++, start = 0) {
- int size = min_t(int, copied, PAGE_SIZE - start);
- if (!failed) {
- buf.page = pages[n];
- buf.offset = start;
- buf.len = size;
- ret = add_to_pipe(pipe, &buf);
- if (unlikely(ret < 0)) {
- failed = true;
- } else {
- iov_iter_advance(from, ret);
- total += ret;
- }
- } else {
- put_page(pages[n]);
+ n = DIV_ROUND_UP(left + start, PAGE_SIZE);
+ for (i = 0; i < n; i++) {
+ int size = min_t(int, left, PAGE_SIZE - start);
+
+ buf.page = pages[i];
+ buf.offset = start;
+ buf.len = size;
+ ret = add_to_pipe(pipe, &buf);
+ if (unlikely(ret < 0)) {
+ iov_iter_revert(from, left);
+ // this one got dropped by add_to_pipe()
+ while (++i < n)
+ put_page(pages[i]);
+ goto out;
}
- copied -= size;
+ total += ret;
+ left -= size;
+ start = 0;
}
}
+out:
return total ? total : ret;
}
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 7bd9b8b856d0..477c89a519ee 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -5,9 +5,9 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
-squashfs-y += namei.o super.o symlink.o decompressor.o
+squashfs-y += namei.o super.o symlink.o decompressor.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
-squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
+squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 8879d052f96c..833aca92301f 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -34,12 +34,15 @@ static int copy_bio_to_actor(struct bio *bio,
struct squashfs_page_actor *actor,
int offset, int req_length)
{
- void *actor_addr = squashfs_first_page(actor);
+ void *actor_addr;
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
int copied_bytes = 0;
int actor_offset = 0;
+ squashfs_actor_nobuff(actor);
+ actor_addr = squashfs_first_page(actor);
+
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
return 0;
@@ -49,8 +52,9 @@ static int copy_bio_to_actor(struct bio *bio,
bytes_to_copy = min_t(int, bytes_to_copy,
req_length - copied_bytes);
- memcpy(actor_addr + actor_offset, bvec_virt(bvec) + offset,
- bytes_to_copy);
+ if (!IS_ERR(actor_addr))
+ memcpy(actor_addr + actor_offset, bvec_virt(bvec) +
+ offset, bytes_to_copy);
actor_offset += bytes_to_copy;
copied_bytes += bytes_to_copy;
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 1b9ccfd0aa51..19ab60834389 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -20,6 +20,7 @@ struct squashfs_decompressor {
struct bio *, int, int, struct squashfs_page_actor *);
int id;
char *name;
+ int alloc_buffer;
int supported;
};
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 7f0904b20329..e56510964b22 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -39,6 +39,7 @@
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
+#include "page_actor.h"
/*
* Locate cache slot in range [offset, index] for specified inode. If
@@ -496,7 +497,137 @@ out:
return res;
}
+static int squashfs_readahead_fragment(struct page **page,
+ unsigned int pages, unsigned int expected)
+{
+ struct inode *inode = page[0]->mapping->host;
+ struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
+ squashfs_i(inode)->fragment_block,
+ squashfs_i(inode)->fragment_size);
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+
+ if (buffer->error)
+ goto out;
+
+ expected += squashfs_i(inode)->fragment_offset;
+
+ for (n = 0; n < pages; n++) {
+ unsigned int base = (page[n]->index & mask) << PAGE_SHIFT;
+ unsigned int offset = base + squashfs_i(inode)->fragment_offset;
+
+ if (expected > offset) {
+ unsigned int avail = min_t(unsigned int, expected -
+ offset, PAGE_SIZE);
+
+ squashfs_fill_page(page[n], buffer, offset, avail);
+ }
+
+ unlock_page(page[n]);
+ put_page(page[n]);
+ }
+
+out:
+ squashfs_cache_put(buffer);
+ return buffer->error;
+}
+
+static void squashfs_readahead(struct readahead_control *ractl)
+{
+ struct inode *inode = ractl->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ size_t mask = (1UL << msblk->block_log) - 1;
+ unsigned short shift = msblk->block_log - PAGE_SHIFT;
+ loff_t start = readahead_pos(ractl) & ~mask;
+ size_t len = readahead_length(ractl) + readahead_pos(ractl) - start;
+ struct squashfs_page_actor *actor;
+ unsigned int nr_pages = 0;
+ struct page **pages;
+ int i, file_end = i_size_read(inode) >> msblk->block_log;
+ unsigned int max_pages = 1UL << shift;
+
+ readahead_expand(ractl, start, (len | mask) + 1);
+
+ pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL);
+ if (!pages)
+ return;
+
+ for (;;) {
+ pgoff_t index;
+ int res, bsize;
+ u64 block = 0;
+ unsigned int expected;
+
+ nr_pages = __readahead_batch(ractl, pages, max_pages);
+ if (!nr_pages)
+ break;
+
+ if (readahead_pos(ractl) >= i_size_read(inode))
+ goto skip_pages;
+
+ index = pages[0]->index >> shift;
+ if ((pages[nr_pages - 1]->index >> shift) != index)
+ goto skip_pages;
+
+ expected = index == file_end ?
+ (i_size_read(inode) & (msblk->block_size - 1)) :
+ msblk->block_size;
+
+ if (index == file_end && squashfs_i(inode)->fragment_block !=
+ SQUASHFS_INVALID_BLK) {
+ res = squashfs_readahead_fragment(pages, nr_pages,
+ expected);
+ if (res)
+ goto skip_pages;
+ continue;
+ }
+
+ bsize = read_blocklist(inode, index, &block);
+ if (bsize == 0)
+ goto skip_pages;
+
+ actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
+ expected);
+ if (!actor)
+ goto skip_pages;
+
+ res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
+
+ squashfs_page_actor_free(actor);
+
+ if (res == expected) {
+ int bytes;
+
+ /* Last page (if present) may have trailing bytes not filled */
+ bytes = res % PAGE_SIZE;
+ if (pages[nr_pages - 1]->index == file_end && bytes)
+ memzero_page(pages[nr_pages - 1], bytes,
+ PAGE_SIZE - bytes);
+
+ for (i = 0; i < nr_pages; i++) {
+ flush_dcache_page(pages[i]);
+ SetPageUptodate(pages[i]);
+ }
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+ }
+
+ kfree(pages);
+ return;
+
+skip_pages:
+ for (i = 0; i < nr_pages; i++) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+ kfree(pages);
+}
const struct address_space_operations squashfs_aops = {
- .read_folio = squashfs_read_folio
+ .read_folio = squashfs_read_folio,
+ .readahead = squashfs_readahead
};
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index a4894cc59447..f1ccad519e28 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -18,9 +18,6 @@
#include "squashfs.h"
#include "page_actor.h"
-static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
- int pages, struct page **page, int bytes);
-
/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
int expected)
@@ -33,7 +30,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
int start_index = target_page->index & ~mask;
int end_index = start_index | mask;
- int i, n, pages, missing_pages, bytes, res = -ENOMEM;
+ int i, n, pages, bytes, res = -ENOMEM;
struct page **page;
struct squashfs_page_actor *actor;
void *pageaddr;
@@ -47,50 +44,38 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
if (page == NULL)
return res;
- /*
- * Create a "page actor" which will kmap and kunmap the
- * page cache pages appropriately within the decompressor
- */
- actor = squashfs_page_actor_init_special(page, pages, 0);
- if (actor == NULL)
- goto out;
-
/* Try to grab all the pages covered by the Squashfs block */
- for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
+ for (i = 0, n = start_index; n <= end_index; n++) {
page[i] = (n == target_page->index) ? target_page :
grab_cache_page_nowait(target_page->mapping, n);
- if (page[i] == NULL) {
- missing_pages++;
+ if (page[i] == NULL)
continue;
- }
if (PageUptodate(page[i])) {
unlock_page(page[i]);
put_page(page[i]);
- page[i] = NULL;
- missing_pages++;
+ continue;
}
+
+ i++;
}
- if (missing_pages) {
- /*
- * Couldn't get one or more pages, this page has either
- * been VM reclaimed, but others are still in the page cache
- * and uptodate, or we're racing with another thread in
- * squashfs_readpage also trying to grab them. Fall back to
- * using an intermediate buffer.
- */
- res = squashfs_read_cache(target_page, block, bsize, pages,
- page, expected);
- if (res < 0)
- goto mark_errored;
+ pages = i;
+ /*
+ * Create a "page actor" which will kmap and kunmap the
+ * page cache pages appropriately within the decompressor
+ */
+ actor = squashfs_page_actor_init_special(msblk, page, pages, expected);
+ if (actor == NULL)
goto out;
- }
/* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
+
+ squashfs_page_actor_free(actor);
+
if (res < 0)
goto mark_errored;
@@ -99,12 +84,12 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
goto mark_errored;
}
- /* Last page may have trailing bytes not filled */
+ /* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
- if (bytes) {
- pageaddr = kmap_atomic(page[pages - 1]);
+ if (page[pages - 1]->index == end_index && bytes) {
+ pageaddr = kmap_local_page(page[pages - 1]);
memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
- kunmap_atomic(pageaddr);
+ kunmap_local(pageaddr);
}
/* Mark pages as uptodate, unlock and release */
@@ -116,7 +101,6 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
put_page(page[i]);
}
- kfree(actor);
kfree(page);
return 0;
@@ -135,40 +119,6 @@ mark_errored:
}
out:
- kfree(actor);
kfree(page);
return res;
}
-
-
-static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
- int pages, struct page **page, int bytes)
-{
- struct inode *i = target_page->mapping->host;
- struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
- block, bsize);
- int res = buffer->error, n, offset = 0;
-
- if (res) {
- ERROR("Unable to read page, block %llx, size %x\n", block,
- bsize);
- goto out;
- }
-
- for (n = 0; n < pages && bytes > 0; n++,
- bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
- int avail = min_t(int, bytes, PAGE_SIZE);
-
- if (page[n] == NULL)
- continue;
-
- squashfs_fill_page(page[n], buffer, offset, avail);
- unlock_page(page[n]);
- if (page[n] != target_page)
- put_page(page[n]);
- }
-
-out:
- squashfs_cache_put(buffer);
- return res;
-}
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
index b685b6238316..49797729f143 100644
--- a/fs/squashfs/lz4_wrapper.c
+++ b/fs/squashfs/lz4_wrapper.c
@@ -119,10 +119,12 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
buff = stream->output;
while (data) {
if (bytes <= PAGE_SIZE) {
- memcpy(data, buff, bytes);
+ if (!IS_ERR(data))
+ memcpy(data, buff, bytes);
break;
}
- memcpy(data, buff, PAGE_SIZE);
+ if (!IS_ERR(data))
+ memcpy(data, buff, PAGE_SIZE);
buff += PAGE_SIZE;
bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
@@ -139,5 +141,6 @@ const struct squashfs_decompressor squashfs_lz4_comp_ops = {
.decompress = lz4_uncompress,
.id = LZ4_COMPRESSION,
.name = "lz4",
+ .alloc_buffer = 0,
.supported = 1
};
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index cb510a631968..d216aeefa865 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -93,10 +93,12 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
buff = stream->output;
while (data) {
if (bytes <= PAGE_SIZE) {
- memcpy(data, buff, bytes);
+ if (!IS_ERR(data))
+ memcpy(data, buff, bytes);
break;
} else {
- memcpy(data, buff, PAGE_SIZE);
+ if (!IS_ERR(data))
+ memcpy(data, buff, PAGE_SIZE);
buff += PAGE_SIZE;
bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
@@ -116,5 +118,6 @@ const struct squashfs_decompressor squashfs_lzo_comp_ops = {
.decompress = lzo_uncompress,
.id = LZO_COMPRESSION,
.name = "lzo",
+ .alloc_buffer = 0,
.supported = 1
};
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
index 520d323a99ce..54b93bf4a25c 100644
--- a/fs/squashfs/page_actor.c
+++ b/fs/squashfs/page_actor.c
@@ -7,6 +7,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
#include "page_actor.h"
/*
@@ -50,6 +52,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
actor->buffer = buffer;
actor->pages = pages;
actor->next_page = 0;
+ actor->tmp_buffer = NULL;
actor->squashfs_first_page = cache_first_page;
actor->squashfs_next_page = cache_next_page;
actor->squashfs_finish_page = cache_finish_page;
@@ -57,40 +60,72 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
}
/* Implementation of page_actor for decompressing directly into page cache. */
+static void *handle_next_page(struct squashfs_page_actor *actor)
+{
+ int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ if (actor->returned_pages == max_pages)
+ return NULL;
+
+ if ((actor->next_page == actor->pages) ||
+ (actor->next_index != actor->page[actor->next_page]->index)) {
+ actor->next_index++;
+ actor->returned_pages++;
+ return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM);
+ }
+
+ actor->next_index++;
+ actor->returned_pages++;
+ return actor->pageaddr = kmap_local_page(actor->page[actor->next_page++]);
+}
+
static void *direct_first_page(struct squashfs_page_actor *actor)
{
- actor->next_page = 1;
- return actor->pageaddr = kmap_atomic(actor->page[0]);
+ return handle_next_page(actor);
}
static void *direct_next_page(struct squashfs_page_actor *actor)
{
- if (actor->pageaddr)
- kunmap_atomic(actor->pageaddr);
+ if (actor->pageaddr) {
+ kunmap_local(actor->pageaddr);
+ actor->pageaddr = NULL;
+ }
- return actor->pageaddr = actor->next_page == actor->pages ? NULL :
- kmap_atomic(actor->page[actor->next_page++]);
+ return handle_next_page(actor);
}
static void direct_finish_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr)
- kunmap_atomic(actor->pageaddr);
+ kunmap_local(actor->pageaddr);
}
-struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
- int pages, int length)
+struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
+ struct page **page, int pages, int length)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
if (actor == NULL)
return NULL;
+ if (msblk->decompressor->alloc_buffer) {
+ actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (actor->tmp_buffer == NULL) {
+ kfree(actor);
+ return NULL;
+ }
+ } else
+ actor->tmp_buffer = NULL;
+
actor->length = length ? : pages * PAGE_SIZE;
actor->page = page;
actor->pages = pages;
actor->next_page = 0;
+ actor->returned_pages = 0;
+ actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
actor->pageaddr = NULL;
+ actor->alloc_buffer = msblk->decompressor->alloc_buffer;
actor->squashfs_first_page = direct_first_page;
actor->squashfs_next_page = direct_next_page;
actor->squashfs_finish_page = direct_finish_page;
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
index 2e3073ace009..95ffbb543d91 100644
--- a/fs/squashfs/page_actor.h
+++ b/fs/squashfs/page_actor.h
@@ -6,63 +6,34 @@
* Phillip Lougher <phillip@squashfs.org.uk>
*/
-#ifndef CONFIG_SQUASHFS_FILE_DIRECT
-struct squashfs_page_actor {
- void **page;
- int pages;
- int length;
- int next_page;
-};
-
-static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
- int pages, int length)
-{
- struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
-
- if (actor == NULL)
- return NULL;
-
- actor->length = length ? : pages * PAGE_SIZE;
- actor->page = page;
- actor->pages = pages;
- actor->next_page = 0;
- return actor;
-}
-
-static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
-{
- actor->next_page = 1;
- return actor->page[0];
-}
-
-static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
-{
- return actor->next_page == actor->pages ? NULL :
- actor->page[actor->next_page++];
-}
-
-static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
-{
- /* empty */
-}
-#else
struct squashfs_page_actor {
union {
void **buffer;
struct page **page;
};
void *pageaddr;
+ void *tmp_buffer;
void *(*squashfs_first_page)(struct squashfs_page_actor *);
void *(*squashfs_next_page)(struct squashfs_page_actor *);
void (*squashfs_finish_page)(struct squashfs_page_actor *);
int pages;
int length;
int next_page;
+ int alloc_buffer;
+ int returned_pages;
+ pgoff_t next_index;
};
-extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int);
-extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
- **, int, int);
+extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
+ int pages, int length);
+extern struct squashfs_page_actor *squashfs_page_actor_init_special(
+ struct squashfs_sb_info *msblk,
+ struct page **page, int pages, int length);
+static inline void squashfs_page_actor_free(struct squashfs_page_actor *actor)
+{
+ kfree(actor->tmp_buffer);
+ kfree(actor);
+}
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
return actor->squashfs_first_page(actor);
@@ -75,5 +46,8 @@ static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
{
actor->squashfs_finish_page(actor);
}
-#endif
+static inline void squashfs_actor_nobuff(struct squashfs_page_actor *actor)
+{
+ actor->alloc_buffer = 0;
+}
#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 6d594ba2ed28..32565dafa7f3 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/magic.h>
#include <linux/xattr.h>
-#include <linux/backing-dev.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@@ -113,24 +112,6 @@ static const struct squashfs_decompressor *supported_squashfs_filesystem(
return decompressor;
}
-static int squashfs_bdi_init(struct super_block *sb)
-{
- int err;
- unsigned int major = MAJOR(sb->s_dev);
- unsigned int minor = MINOR(sb->s_dev);
-
- bdi_put(sb->s_bdi);
- sb->s_bdi = &noop_backing_dev_info;
-
- err = super_setup_bdi_name(sb, "squashfs_%u_%u", major, minor);
- if (err)
- return err;
-
- sb->s_bdi->ra_pages = 0;
- sb->s_bdi->io_pages = 0;
-
- return 0;
-}
static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
@@ -146,20 +127,6 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
TRACE("Entered squashfs_fill_superblock\n");
- /*
- * squashfs provides 'backing_dev_info' in order to disable read-ahead. For
- * squashfs, I/O is not deferred, it is done immediately in read_folio,
- * which means the user would always have to wait their own I/O. So the effect
- * of readahead is very weak for squashfs. squashfs_bdi_init will set
- * sb->s_bdi->ra_pages and sb->s_bdi->io_pages to 0 and close readahead for
- * squashfs.
- */
- err = squashfs_bdi_init(sb);
- if (err) {
- errorf(fc, "squashfs init bdi failed");
- return err;
- }
-
sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
if (sb->s_fs_info == NULL) {
ERROR("Failed to allocate squashfs_sb_info\n");
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index 68f6d09bb3a2..6c49481a2f8c 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -131,6 +131,10 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
stream->buf.out_pos = 0;
stream->buf.out_size = PAGE_SIZE;
stream->buf.out = squashfs_first_page(output);
+ if (IS_ERR(stream->buf.out)) {
+ error = PTR_ERR(stream->buf.out);
+ goto finish;
+ }
for (;;) {
enum xz_ret xz_err;
@@ -156,7 +160,10 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->buf.out_pos == stream->buf.out_size) {
stream->buf.out = squashfs_next_page(output);
- if (stream->buf.out != NULL) {
+ if (IS_ERR(stream->buf.out)) {
+ error = PTR_ERR(stream->buf.out);
+ break;
+ } else if (stream->buf.out != NULL) {
stream->buf.out_pos = 0;
total += PAGE_SIZE;
}
@@ -171,6 +178,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
}
+finish:
squashfs_finish_page(output);
return error ? error : total + stream->buf.out_pos;
@@ -183,5 +191,6 @@ const struct squashfs_decompressor squashfs_xz_comp_ops = {
.decompress = squashfs_xz_uncompress,
.id = XZ_COMPRESSION,
.name = "xz",
+ .alloc_buffer = 1,
.supported = 1
};
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index a20e9042146b..cbb7afe7bc46 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -62,6 +62,11 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
stream->next_out = squashfs_first_page(output);
stream->avail_in = 0;
+ if (IS_ERR(stream->next_out)) {
+ error = PTR_ERR(stream->next_out);
+ goto finish;
+ }
+
for (;;) {
int zlib_err;
@@ -85,7 +90,10 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->avail_out == 0) {
stream->next_out = squashfs_next_page(output);
- if (stream->next_out != NULL)
+ if (IS_ERR(stream->next_out)) {
+ error = PTR_ERR(stream->next_out);
+ break;
+ } else if (stream->next_out != NULL)
stream->avail_out = PAGE_SIZE;
}
@@ -107,6 +115,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
}
+finish:
squashfs_finish_page(output);
if (!error)
@@ -122,6 +131,7 @@ const struct squashfs_decompressor squashfs_zlib_comp_ops = {
.decompress = zlib_uncompress,
.id = ZLIB_COMPRESSION,
.name = "zlib",
+ .alloc_buffer = 1,
.supported = 1
};
diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
index c40445dbf38c..0e407c4d8b3b 100644
--- a/fs/squashfs/zstd_wrapper.c
+++ b/fs/squashfs/zstd_wrapper.c
@@ -80,6 +80,10 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
out_buf.size = PAGE_SIZE;
out_buf.dst = squashfs_first_page(output);
+ if (IS_ERR(out_buf.dst)) {
+ error = PTR_ERR(out_buf.dst);
+ goto finish;
+ }
for (;;) {
size_t zstd_err;
@@ -104,7 +108,10 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (out_buf.pos == out_buf.size) {
out_buf.dst = squashfs_next_page(output);
- if (out_buf.dst == NULL) {
+ if (IS_ERR(out_buf.dst)) {
+ error = PTR_ERR(out_buf.dst);
+ break;
+ } else if (out_buf.dst == NULL) {
/* Shouldn't run out of pages
* before stream is done.
*/
@@ -129,6 +136,8 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
}
+finish:
+
squashfs_finish_page(output);
return error ? error : total_out;
@@ -140,5 +149,6 @@ const struct squashfs_decompressor squashfs_zstd_comp_ops = {
.decompress = zstd_uncompress,
.id = ZSTD_COMPRESSION,
.name = "zstd",
+ .alloc_buffer = 1,
.supported = 1
};
diff --git a/fs/super.c b/fs/super.c
index 60f57c7bc0a6..734ed584a946 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -265,7 +265,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
s->s_shrink.count_objects = super_cache_count;
s->s_shrink.batch = 1024;
s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
- if (prealloc_shrinker(&s->s_shrink))
+ if (prealloc_shrinker(&s->s_shrink, "sb-%s", type->name))
goto fail;
if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
goto fail;
@@ -423,6 +423,35 @@ bool trylock_super(struct super_block *sb)
}
/**
+ * retire_super - prevents superblock from being reused
+ * @sb: superblock to retire
+ *
+ * The function marks superblock to be ignored in superblock test, which
+ * prevents it from being reused for any new mounts. If the superblock has
+ * a private bdi, it also unregisters it, but doesn't reduce the refcount
+ * of the superblock to prevent potential races. The refcount is reduced
+ * by generic_shutdown_super(). The function can not be called
+ * concurrently with generic_shutdown_super(). It is safe to call the
+ * function multiple times, subsequent calls have no effect.
+ *
+ * The marker will affect the re-use only for block-device-based
+ * superblocks. Other superblocks will still get marked if this function
+ * is used, but that will not affect their reusability.
+ */
+void retire_super(struct super_block *sb)
+{
+ WARN_ON(!sb->s_bdev);
+ down_write(&sb->s_umount);
+ if (sb->s_iflags & SB_I_PERSB_BDI) {
+ bdi_unregister(sb->s_bdi);
+ sb->s_iflags &= ~SB_I_PERSB_BDI;
+ }
+ sb->s_iflags |= SB_I_RETIRED;
+ up_write(&sb->s_umount);
+}
+EXPORT_SYMBOL(retire_super);
+
+/**
* generic_shutdown_super - common helper for ->kill_sb()
* @sb: superblock to kill
*
@@ -1216,7 +1245,7 @@ static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
{
- return s->s_bdev == fc->sget_key;
+ return !(s->s_iflags & SB_I_RETIRED) && s->s_bdev == fc->sget_key;
}
/**
@@ -1288,6 +1317,8 @@ int get_tree_bdev(struct fs_context *fc,
} else {
s->s_mode = mode;
snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
+ shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s",
+ fc->fs_type->name, s->s_id);
sb_set_blocksize(s, block_size(bdev));
error = fill_super(s, fc);
if (error) {
@@ -1307,7 +1338,7 @@ EXPORT_SYMBOL(get_tree_bdev);
static int test_bdev_super(struct super_block *s, void *data)
{
- return (void *)s->s_bdev == data;
+ return !(s->s_iflags & SB_I_RETIRED) && (void *)s->s_bdev == data;
}
struct dentry *mount_bdev(struct file_system_type *fs_type,
@@ -1363,6 +1394,8 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
} else {
s->s_mode = mode;
snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
+ shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s",
+ fs_type->name, s->s_id);
sb_set_blocksize(s, block_size(bdev));
error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
if (error) {
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 0978d01b0ea4..d0c9a09988bc 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2430,7 +2430,7 @@ static int __init ubifs_init(void)
if (!ubifs_inode_slab)
return -ENOMEM;
- err = register_shrinker(&ubifs_shrinker_info);
+ err = register_shrinker(&ubifs_shrinker_info, "ubifs-slab");
if (err)
goto out_slab;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index de86f5b2859f..175de70e3adf 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1601,6 +1601,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
}
+ /* Reset ptes for the whole vma range if wr-protected */
+ if (userfaultfd_wp(vma))
+ uffd_wp_range(mm, vma, start, vma_end - start, false);
+
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
prev = vma_merge(mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
@@ -1925,10 +1929,8 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
ret = -EFAULT;
if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
goto out;
- features = uffdio_api.features;
- ret = -EINVAL;
- if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
- goto err_out;
+ /* Ignore unsupported features (userspace built against newer kernel) */
+ features = uffdio_api.features & UFFD_API_FEATURES;
ret = -EPERM;
if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
goto err_out;
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index b056cfc6398e..03135a1c31b6 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -106,6 +106,7 @@ xfs-y += xfs_log.o \
xfs_icreate_item.o \
xfs_inode_item.o \
xfs_inode_item_recover.o \
+ xfs_iunlink_item.o \
xfs_refcount_item.o \
xfs_rmap_item.o \
xfs_log_recover.o \
@@ -129,6 +130,11 @@ xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o
xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o
xfs-$(CONFIG_EXPORTFS_BLOCK_OPS) += xfs_pnfs.o
+# notify failure
+ifeq ($(CONFIG_MEMORY_FAILURE),y)
+xfs-$(CONFIG_FS_DAX) += xfs_notify_failure.o
+endif
+
# online scrub/repair
ifeq ($(CONFIG_XFS_ONLINE_SCRUB),y)
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 3e920cf1b454..bb0c700afe3c 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -120,18 +120,18 @@ xfs_initialize_perag_data(
for (index = 0; index < agcount; index++) {
/*
- * read the agf, then the agi. This gets us
- * all the information we need and populates the
- * per-ag structures for us.
+ * Read the AGF and AGI buffers to populate the per-ag
+ * structures for us.
*/
- error = xfs_alloc_pagf_init(mp, NULL, index, 0);
- if (error)
+ pag = xfs_perag_get(mp, index);
+ error = xfs_alloc_read_agf(pag, NULL, 0, NULL);
+ if (!error)
+ error = xfs_ialloc_read_agi(pag, NULL, NULL);
+ if (error) {
+ xfs_perag_put(pag);
return error;
+ }
- error = xfs_ialloc_pagi_init(mp, NULL, index);
- if (error)
- return error;
- pag = xfs_perag_get(mp, index);
ifree += pag->pagi_freecount;
ialloc += pag->pagi_count;
bfree += pag->pagf_freeblks;
@@ -194,17 +194,76 @@ xfs_free_perag(
XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0);
cancel_delayed_work_sync(&pag->pag_blockgc_work);
- xfs_iunlink_destroy(pag);
xfs_buf_hash_destroy(pag);
call_rcu(&pag->rcu_head, __xfs_free_perag);
}
}
+/* Find the size of the AG, in blocks. */
+static xfs_agblock_t
+__xfs_ag_block_count(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_agnumber_t agcount,
+ xfs_rfsblock_t dblocks)
+{
+ ASSERT(agno < agcount);
+
+ if (agno < agcount - 1)
+ return mp->m_sb.sb_agblocks;
+ return dblocks - (agno * mp->m_sb.sb_agblocks);
+}
+
+xfs_agblock_t
+xfs_ag_block_count(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno)
+{
+ return __xfs_ag_block_count(mp, agno, mp->m_sb.sb_agcount,
+ mp->m_sb.sb_dblocks);
+}
+
+/* Calculate the first and last possible inode number in an AG. */
+static void
+__xfs_agino_range(
+ struct xfs_mount *mp,
+ xfs_agblock_t eoag,
+ xfs_agino_t *first,
+ xfs_agino_t *last)
+{
+ xfs_agblock_t bno;
+
+ /*
+ * Calculate the first inode, which will be in the first
+ * cluster-aligned block after the AGFL.
+ */
+ bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align);
+ *first = XFS_AGB_TO_AGINO(mp, bno);
+
+ /*
+ * Calculate the last inode, which will be at the end of the
+ * last (aligned) cluster that can be allocated in the AG.
+ */
+ bno = round_down(eoag, M_IGEO(mp)->cluster_align);
+ *last = XFS_AGB_TO_AGINO(mp, bno) - 1;
+}
+
+void
+xfs_agino_range(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_agino_t *first,
+ xfs_agino_t *last)
+{
+ return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last);
+}
+
int
xfs_initialize_perag(
struct xfs_mount *mp,
xfs_agnumber_t agcount,
+ xfs_rfsblock_t dblocks,
xfs_agnumber_t *maxagi)
{
struct xfs_perag *pag;
@@ -263,13 +322,18 @@ xfs_initialize_perag(
if (error)
goto out_remove_pag;
- error = xfs_iunlink_init(pag);
- if (error)
- goto out_hash_destroy;
-
/* first new pag is fully initialized */
if (first_initialised == NULLAGNUMBER)
first_initialised = index;
+
+ /*
+ * Pre-calculated geometry
+ */
+ pag->block_count = __xfs_ag_block_count(mp, index, agcount,
+ dblocks);
+ pag->min_block = XFS_AGFL_BLOCK(mp);
+ __xfs_agino_range(mp, pag->block_count, &pag->agino_min,
+ &pag->agino_max);
}
index = xfs_set_inode_alloc(mp, agcount);
@@ -280,8 +344,6 @@ xfs_initialize_perag(
mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
return 0;
-out_hash_destroy:
- xfs_buf_hash_destroy(pag);
out_remove_pag:
radix_tree_delete(&mp->m_perag_tree, index);
out_free_pag:
@@ -293,7 +355,6 @@ out_unwind_new_pags:
if (!pag)
break;
xfs_buf_hash_destroy(pag);
- xfs_iunlink_destroy(pag);
kmem_free(pag);
}
return error;
@@ -321,12 +382,6 @@ xfs_get_aghdr_buf(
return 0;
}
-static inline bool is_log_ag(struct xfs_mount *mp, struct aghdr_init_data *id)
-{
- return mp->m_sb.sb_logstart > 0 &&
- id->agno == XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart);
-}
-
/*
* Generic btree root block init function
*/
@@ -352,7 +407,7 @@ xfs_freesp_init_recs(
arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
- if (is_log_ag(mp, id)) {
+ if (xfs_ag_contains_log(mp, id->agno)) {
struct xfs_alloc_rec *nrec;
xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp,
mp->m_sb.sb_logstart);
@@ -479,7 +534,7 @@ xfs_rmaproot_init(
}
/* account for the log space */
- if (is_log_ag(mp, id)) {
+ if (xfs_ag_contains_log(mp, id->agno)) {
rrec = XFS_RMAP_REC_ADDR(block,
be16_to_cpu(block->bb_numrecs) + 1);
rrec->rm_startblock = cpu_to_be32(
@@ -550,7 +605,7 @@ xfs_agfblock_init(
agf->agf_refcount_blocks = cpu_to_be32(1);
}
- if (is_log_ag(mp, id)) {
+ if (xfs_ag_contains_log(mp, id->agno)) {
int64_t logblocks = mp->m_sb.sb_logblocks;
be32_add_cpu(&agf->agf_freeblks, -logblocks);
@@ -761,11 +816,11 @@ xfs_ag_init_headers(
int
xfs_ag_shrink_space(
- struct xfs_mount *mp,
+ struct xfs_perag *pag,
struct xfs_trans **tpp,
- xfs_agnumber_t agno,
xfs_extlen_t delta)
{
+ struct xfs_mount *mp = pag->pag_mount;
struct xfs_alloc_arg args = {
.tp = *tpp,
.mp = mp,
@@ -782,14 +837,14 @@ xfs_ag_shrink_space(
xfs_agblock_t aglen;
int error, err2;
- ASSERT(agno == mp->m_sb.sb_agcount - 1);
- error = xfs_ialloc_read_agi(mp, *tpp, agno, &agibp);
+ ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1);
+ error = xfs_ialloc_read_agi(pag, *tpp, &agibp);
if (error)
return error;
agi = agibp->b_addr;
- error = xfs_alloc_read_agf(mp, *tpp, agno, 0, &agfbp);
+ error = xfs_alloc_read_agf(pag, *tpp, 0, &agfbp);
if (error)
return error;
@@ -801,13 +856,14 @@ xfs_ag_shrink_space(
if (delta >= aglen)
return -EINVAL;
- args.fsbno = XFS_AGB_TO_FSB(mp, agno, aglen - delta);
+ args.fsbno = XFS_AGB_TO_FSB(mp, pag->pag_agno, aglen - delta);
/*
* Make sure that the last inode cluster cannot overlap with the new
* end of the AG, even if it's sparse.
*/
- error = xfs_ialloc_check_shrink(*tpp, agno, agibp, aglen - delta);
+ error = xfs_ialloc_check_shrink(*tpp, pag->pag_agno, agibp,
+ aglen - delta);
if (error)
return error;
@@ -815,7 +871,7 @@ xfs_ag_shrink_space(
* Disable perag reservations so it doesn't cause the allocation request
* to fail. We'll reestablish reservation before we return.
*/
- error = xfs_ag_resv_free(agibp->b_pag);
+ error = xfs_ag_resv_free(pag);
if (error)
return error;
@@ -844,7 +900,7 @@ xfs_ag_shrink_space(
be32_add_cpu(&agi->agi_length, -delta);
be32_add_cpu(&agf->agf_length, -delta);
- err2 = xfs_ag_resv_init(agibp->b_pag, *tpp);
+ err2 = xfs_ag_resv_init(pag, *tpp);
if (err2) {
be32_add_cpu(&agi->agi_length, delta);
be32_add_cpu(&agf->agf_length, delta);
@@ -868,8 +924,9 @@ xfs_ag_shrink_space(
xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
return 0;
+
resv_init_out:
- err2 = xfs_ag_resv_init(agibp->b_pag, *tpp);
+ err2 = xfs_ag_resv_init(pag, *tpp);
if (!err2)
return error;
resv_err:
@@ -883,9 +940,8 @@ resv_err:
*/
int
xfs_ag_extend_space(
- struct xfs_mount *mp,
+ struct xfs_perag *pag,
struct xfs_trans *tp,
- struct aghdr_init_data *id,
xfs_extlen_t len)
{
struct xfs_buf *bp;
@@ -893,23 +949,20 @@ xfs_ag_extend_space(
struct xfs_agf *agf;
int error;
- /*
- * Change the agi length.
- */
- error = xfs_ialloc_read_agi(mp, tp, id->agno, &bp);
+ ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1);
+
+ error = xfs_ialloc_read_agi(pag, tp, &bp);
if (error)
return error;
agi = bp->b_addr;
be32_add_cpu(&agi->agi_length, len);
- ASSERT(id->agno == mp->m_sb.sb_agcount - 1 ||
- be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
/*
* Change agf length.
*/
- error = xfs_alloc_read_agf(mp, tp, id->agno, 0, &bp);
+ error = xfs_alloc_read_agf(pag, tp, 0, &bp);
if (error)
return error;
@@ -924,49 +977,49 @@ xfs_ag_extend_space(
* XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
* this doesn't actually exist in the rmap btree.
*/
- error = xfs_rmap_free(tp, bp, bp->b_pag,
- be32_to_cpu(agf->agf_length) - len,
+ error = xfs_rmap_free(tp, bp, pag, be32_to_cpu(agf->agf_length) - len,
len, &XFS_RMAP_OINFO_SKIP_UPDATE);
if (error)
return error;
- return xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, id->agno,
+ error = xfs_free_extent(tp, XFS_AGB_TO_FSB(pag->pag_mount, pag->pag_agno,
be32_to_cpu(agf->agf_length) - len),
len, &XFS_RMAP_OINFO_SKIP_UPDATE,
XFS_AG_RESV_NONE);
+ if (error)
+ return error;
+
+ /* Update perag geometry */
+ pag->block_count = be32_to_cpu(agf->agf_length);
+ __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min,
+ &pag->agino_max);
+ return 0;
}
/* Retrieve AG geometry. */
int
xfs_ag_get_geometry(
- struct xfs_mount *mp,
- xfs_agnumber_t agno,
+ struct xfs_perag *pag,
struct xfs_ag_geometry *ageo)
{
struct xfs_buf *agi_bp;
struct xfs_buf *agf_bp;
struct xfs_agi *agi;
struct xfs_agf *agf;
- struct xfs_perag *pag;
unsigned int freeblks;
int error;
- if (agno >= mp->m_sb.sb_agcount)
- return -EINVAL;
-
/* Lock the AG headers. */
- error = xfs_ialloc_read_agi(mp, NULL, agno, &agi_bp);
+ error = xfs_ialloc_read_agi(pag, NULL, &agi_bp);
if (error)
return error;
- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agf_bp);
+ error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp);
if (error)
goto out_agi;
- pag = agi_bp->b_pag;
-
/* Fill out form. */
memset(ageo, 0, sizeof(*ageo));
- ageo->ag_number = agno;
+ ageo->ag_number = pag->pag_agno;
agi = agi_bp->b_addr;
ageo->ag_icount = be32_to_cpu(agi->agi_count);
diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h
index e411d51c2589..517a138faa66 100644
--- a/fs/xfs/libxfs/xfs_ag.h
+++ b/fs/xfs/libxfs/xfs_ag.h
@@ -67,6 +67,12 @@ struct xfs_perag {
/* for rcu-safe freeing */
struct rcu_head rcu_head;
+ /* Precalculated geometry info */
+ xfs_agblock_t block_count;
+ xfs_agblock_t min_block;
+ xfs_agino_t agino_min;
+ xfs_agino_t agino_max;
+
#ifdef __KERNEL__
/* -- kernel only structures below this line -- */
@@ -97,17 +103,11 @@ struct xfs_perag {
/* background prealloc block trimming */
struct delayed_work pag_blockgc_work;
- /*
- * Unlinked inode information. This incore information reflects
- * data stored in the AGI, so callers must hold the AGI buffer lock
- * or have some other means to control concurrency.
- */
- struct rhashtable pagi_unlinked_hash;
#endif /* __KERNEL__ */
};
int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount,
- xfs_agnumber_t *maxagi);
+ xfs_rfsblock_t dcount, xfs_agnumber_t *maxagi);
int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno);
void xfs_free_perag(struct xfs_mount *mp);
@@ -117,6 +117,56 @@ struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
void xfs_perag_put(struct xfs_perag *pag);
/*
+ * Per-ag geometry infomation and validation
+ */
+xfs_agblock_t xfs_ag_block_count(struct xfs_mount *mp, xfs_agnumber_t agno);
+void xfs_agino_range(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agino_t *first, xfs_agino_t *last);
+
+static inline bool
+xfs_verify_agbno(struct xfs_perag *pag, xfs_agblock_t agbno)
+{
+ if (agbno >= pag->block_count)
+ return false;
+ if (agbno <= pag->min_block)
+ return false;
+ return true;
+}
+
+/*
+ * Verify that an AG inode number pointer neither points outside the AG
+ * nor points at static metadata.
+ */
+static inline bool
+xfs_verify_agino(struct xfs_perag *pag, xfs_agino_t agino)
+{
+ if (agino < pag->agino_min)
+ return false;
+ if (agino > pag->agino_max)
+ return false;
+ return true;
+}
+
+/*
+ * Verify that an AG inode number pointer neither points outside the AG
+ * nor points at static metadata, or is NULLAGINO.
+ */
+static inline bool
+xfs_verify_agino_or_null(struct xfs_perag *pag, xfs_agino_t agino)
+{
+ if (agino == NULLAGINO)
+ return true;
+ return xfs_verify_agino(pag, agino);
+}
+
+static inline bool
+xfs_ag_contains_log(struct xfs_mount *mp, xfs_agnumber_t agno)
+{
+ return mp->m_sb.sb_logstart > 0 &&
+ agno == XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart);
+}
+
+/*
* Perag iteration APIs
*/
static inline struct xfs_perag *
@@ -168,11 +218,10 @@ struct aghdr_init_data {
};
int xfs_ag_init_headers(struct xfs_mount *mp, struct aghdr_init_data *id);
-int xfs_ag_shrink_space(struct xfs_mount *mp, struct xfs_trans **tpp,
- xfs_agnumber_t agno, xfs_extlen_t delta);
-int xfs_ag_extend_space(struct xfs_mount *mp, struct xfs_trans *tp,
- struct aghdr_init_data *id, xfs_extlen_t len);
-int xfs_ag_get_geometry(struct xfs_mount *mp, xfs_agnumber_t agno,
- struct xfs_ag_geometry *ageo);
+int xfs_ag_shrink_space(struct xfs_perag *pag, struct xfs_trans **tpp,
+ xfs_extlen_t delta);
+int xfs_ag_extend_space(struct xfs_perag *pag, struct xfs_trans *tp,
+ xfs_extlen_t len);
+int xfs_ag_get_geometry(struct xfs_perag *pag, struct xfs_ag_geometry *ageo);
#endif /* __LIBXFS_AG_H */
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index fe94058d4e9e..5af123d13a63 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -322,7 +322,7 @@ out:
* address.
*/
if (has_resv) {
- error2 = xfs_alloc_pagf_init(mp, tp, pag->pag_agno, 0);
+ error2 = xfs_alloc_read_agf(pag, tp, 0, NULL);
if (error2)
return error2;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index d3f2886fdc08..e2bdf089c0a3 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -84,7 +84,7 @@ xfs_prealloc_blocks(
/*
* The number of blocks per AG that we withhold from xfs_mod_fdblocks to
* guarantee that we can refill the AGFL prior to allocating space in a nearly
- * full AG. Although the the space described by the free space btrees, the
+ * full AG. Although the space described by the free space btrees, the
* blocks used by the freesp btrees themselves, and the blocks owned by the
* AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
* free space in the AG drop so low that the free space btrees cannot refill an
@@ -248,7 +248,7 @@ xfs_alloc_get_rec(
int *stat) /* output: success/failure */
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
+ struct xfs_perag *pag = cur->bc_ag.pag;
union xfs_btree_rec *rec;
int error;
@@ -263,11 +263,11 @@ xfs_alloc_get_rec(
goto out_bad_rec;
/* check for valid extent range, including overflow */
- if (!xfs_verify_agbno(mp, agno, *bno))
+ if (!xfs_verify_agbno(pag, *bno))
goto out_bad_rec;
if (*bno > *bno + *len)
goto out_bad_rec;
- if (!xfs_verify_agbno(mp, agno, *bno + *len - 1))
+ if (!xfs_verify_agbno(pag, *bno + *len - 1))
goto out_bad_rec;
return 0;
@@ -275,7 +275,8 @@ xfs_alloc_get_rec(
out_bad_rec:
xfs_warn(mp,
"%s Freespace BTree record corruption in AG %d detected!",
- cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno);
+ cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size",
+ pag->pag_agno);
xfs_warn(mp,
"start block 0x%x block count 0x%x", *bno, *len);
return -EFSCORRUPTED;
@@ -703,20 +704,19 @@ const struct xfs_buf_ops xfs_agfl_buf_ops = {
/*
* Read in the allocation group free block array.
*/
-int /* error */
+int
xfs_alloc_read_agfl(
- xfs_mount_t *mp, /* mount point structure */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- struct xfs_buf **bpp) /* buffer for the ag free block array */
+ struct xfs_perag *pag,
+ struct xfs_trans *tp,
+ struct xfs_buf **bpp)
{
- struct xfs_buf *bp; /* return value */
- int error;
+ struct xfs_mount *mp = pag->pag_mount;
+ struct xfs_buf *bp;
+ int error;
- ASSERT(agno != NULLAGNUMBER);
error = xfs_trans_read_buf(
mp, tp, mp->m_ddev_targp,
- XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
+ XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGFL_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
if (error)
return error;
@@ -1075,7 +1075,8 @@ xfs_alloc_ag_vextent_small(
be32_to_cpu(agf->agf_flcount) <= args->minleft)
goto out;
- error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
+ error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp,
+ &fbno, 0);
if (error)
goto error;
if (fbno == NULLAGBLOCK)
@@ -2609,7 +2610,7 @@ xfs_alloc_fix_freelist(
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
if (!pag->pagf_init) {
- error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
+ error = xfs_alloc_read_agf(pag, tp, flags, &agbp);
if (error) {
/* Couldn't lock the AGF so skip this AG. */
if (error == -EAGAIN)
@@ -2639,7 +2640,7 @@ xfs_alloc_fix_freelist(
* Can fail if we're not blocking on locks, and it's held.
*/
if (!agbp) {
- error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
+ error = xfs_alloc_read_agf(pag, tp, flags, &agbp);
if (error) {
/* Couldn't lock the AGF so skip this AG. */
if (error == -EAGAIN)
@@ -2697,7 +2698,7 @@ xfs_alloc_fix_freelist(
else
targs.oinfo = XFS_RMAP_OINFO_AG;
while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
- error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
+ error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0);
if (error)
goto out_agbp_relse;
@@ -2712,7 +2713,7 @@ xfs_alloc_fix_freelist(
targs.alignment = targs.minlen = targs.prod = 1;
targs.type = XFS_ALLOCTYPE_THIS_AG;
targs.pag = pag;
- error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
+ error = xfs_alloc_read_agfl(pag, tp, &agflbp);
if (error)
goto out_agbp_relse;
@@ -2741,7 +2742,7 @@ xfs_alloc_fix_freelist(
* Put each allocated block on the list.
*/
for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
- error = xfs_alloc_put_freelist(tp, agbp,
+ error = xfs_alloc_put_freelist(pag, tp, agbp,
agflbp, bno, 0);
if (error)
goto out_agflbp_relse;
@@ -2767,6 +2768,7 @@ out_no_agbp:
*/
int
xfs_alloc_get_freelist(
+ struct xfs_perag *pag,
struct xfs_trans *tp,
struct xfs_buf *agbp,
xfs_agblock_t *bnop,
@@ -2779,7 +2781,6 @@ xfs_alloc_get_freelist(
int error;
uint32_t logflags;
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_perag *pag;
/*
* Freelist is empty, give up.
@@ -2791,8 +2792,7 @@ xfs_alloc_get_freelist(
/*
* Read the array of free blocks.
*/
- error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
- &agflbp);
+ error = xfs_alloc_read_agfl(pag, tp, &agflbp);
if (error)
return error;
@@ -2807,7 +2807,6 @@ xfs_alloc_get_freelist(
if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
agf->agf_flfirst = 0;
- pag = agbp->b_pag;
ASSERT(!pag->pagf_agflreset);
be32_add_cpu(&agf->agf_flcount, -1);
pag->pagf_flcount--;
@@ -2868,29 +2867,11 @@ xfs_alloc_log_agf(
}
/*
- * Interface for inode allocation to force the pag data to be initialized.
- */
-int /* error */
-xfs_alloc_pagf_init(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- int flags) /* XFS_ALLOC_FLAGS_... */
-{
- struct xfs_buf *bp;
- int error;
-
- error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp);
- if (!error)
- xfs_trans_brelse(tp, bp);
- return error;
-}
-
-/*
* Put the block on the freelist for the allocation group.
*/
int
xfs_alloc_put_freelist(
+ struct xfs_perag *pag,
struct xfs_trans *tp,
struct xfs_buf *agbp,
struct xfs_buf *agflbp,
@@ -2899,21 +2880,22 @@ xfs_alloc_put_freelist(
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_agf *agf = agbp->b_addr;
- struct xfs_perag *pag;
__be32 *blockp;
int error;
uint32_t logflags;
__be32 *agfl_bno;
int startoff;
- if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
- be32_to_cpu(agf->agf_seqno), &agflbp)))
- return error;
+ if (!agflbp) {
+ error = xfs_alloc_read_agfl(pag, tp, &agflbp);
+ if (error)
+ return error;
+ }
+
be32_add_cpu(&agf->agf_fllast, 1);
if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
agf->agf_fllast = 0;
- pag = agbp->b_pag;
ASSERT(!pag->pagf_agflreset);
be32_add_cpu(&agf->agf_flcount, 1);
pag->pagf_flcount++;
@@ -3070,61 +3052,57 @@ const struct xfs_buf_ops xfs_agf_buf_ops = {
/*
* Read in the allocation group header (free/alloc section).
*/
-int /* error */
+int
xfs_read_agf(
- struct xfs_mount *mp, /* mount point structure */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- int flags, /* XFS_BUF_ */
- struct xfs_buf **bpp) /* buffer for the ag freelist header */
+ struct xfs_perag *pag,
+ struct xfs_trans *tp,
+ int flags,
+ struct xfs_buf **agfbpp)
{
- int error;
+ struct xfs_mount *mp = pag->pag_mount;
+ int error;
- trace_xfs_read_agf(mp, agno);
+ trace_xfs_read_agf(pag->pag_mount, pag->pag_agno);
- ASSERT(agno != NULLAGNUMBER);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
- XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
- XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
+ XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)),
+ XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops);
if (error)
return error;
- ASSERT(!(*bpp)->b_error);
- xfs_buf_set_ref(*bpp, XFS_AGF_REF);
+ xfs_buf_set_ref(*agfbpp, XFS_AGF_REF);
return 0;
}
/*
- * Read in the allocation group header (free/alloc section).
+ * Read in the allocation group header (free/alloc section) and initialise the
+ * perag structure if necessary. If the caller provides @agfbpp, then return the
+ * locked buffer to the caller, otherwise free it.
*/
-int /* error */
+int
xfs_alloc_read_agf(
- struct xfs_mount *mp, /* mount point structure */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- int flags, /* XFS_ALLOC_FLAG_... */
- struct xfs_buf **bpp) /* buffer for the ag freelist header */
+ struct xfs_perag *pag,
+ struct xfs_trans *tp,
+ int flags,
+ struct xfs_buf **agfbpp)
{
- struct xfs_agf *agf; /* ag freelist header */
- struct xfs_perag *pag; /* per allocation group data */
+ struct xfs_buf *agfbp;
+ struct xfs_agf *agf;
int error;
int allocbt_blks;
- trace_xfs_alloc_read_agf(mp, agno);
+ trace_xfs_alloc_read_agf(pag->pag_mount, pag->pag_agno);
/* We don't support trylock when freeing. */
ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
(XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
- ASSERT(agno != NULLAGNUMBER);
- error = xfs_read_agf(mp, tp, agno,
+ error = xfs_read_agf(pag, tp,
(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
- bpp);
+ &agfbp);
if (error)
return error;
- ASSERT(!(*bpp)->b_error);
- agf = (*bpp)->b_addr;
- pag = (*bpp)->b_pag;
+ agf = agfbp->b_addr;
if (!pag->pagf_init) {
pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
@@ -3138,7 +3116,7 @@ xfs_alloc_read_agf(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
pag->pagf_init = 1;
- pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
+ pag->pagf_agflreset = xfs_agfl_needs_reset(pag->pag_mount, agf);
/*
* Update the in-core allocbt counter. Filter out the rmapbt
@@ -3148,13 +3126,14 @@ xfs_alloc_read_agf(
* counter only tracks non-root blocks.
*/
allocbt_blks = pag->pagf_btreeblks;
- if (xfs_has_rmapbt(mp))
+ if (xfs_has_rmapbt(pag->pag_mount))
allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
if (allocbt_blks > 0)
- atomic64_add(allocbt_blks, &mp->m_allocbt_blks);
+ atomic64_add(allocbt_blks,
+ &pag->pag_mount->m_allocbt_blks);
}
#ifdef DEBUG
- else if (!xfs_is_shutdown(mp)) {
+ else if (!xfs_is_shutdown(pag->pag_mount)) {
ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
@@ -3165,6 +3144,10 @@ xfs_alloc_read_agf(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
}
#endif
+ if (agfbpp)
+ *agfbpp = agfbp;
+ else
+ xfs_trans_brelse(tp, agfbp);
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 84ca09b2223f..2c3f762dfb58 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -95,6 +95,11 @@ xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_perag *pag,
xfs_extlen_t need, xfs_extlen_t reserved);
unsigned int xfs_alloc_min_freelist(struct xfs_mount *mp,
struct xfs_perag *pag);
+int xfs_alloc_get_freelist(struct xfs_perag *pag, struct xfs_trans *tp,
+ struct xfs_buf *agfbp, xfs_agblock_t *bnop, int btreeblk);
+int xfs_alloc_put_freelist(struct xfs_perag *pag, struct xfs_trans *tp,
+ struct xfs_buf *agfbp, struct xfs_buf *agflbp,
+ xfs_agblock_t bno, int btreeblk);
/*
* Compute and fill in value of m_alloc_maxlevels.
@@ -104,17 +109,6 @@ xfs_alloc_compute_maxlevels(
struct xfs_mount *mp); /* file system mount structure */
/*
- * Get a block from the freelist.
- * Returns with the buffer for the block gotten.
- */
-int /* error */
-xfs_alloc_get_freelist(
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_buf *agbp, /* buffer containing the agf structure */
- xfs_agblock_t *bnop, /* block address retrieved from freelist */
- int btreeblk); /* destination is a AGF btree */
-
-/*
* Log the given fields from the agf structure.
*/
void
@@ -124,38 +118,6 @@ xfs_alloc_log_agf(
uint32_t fields);/* mask of fields to be logged (XFS_AGF_...) */
/*
- * Interface for inode allocation to force the pag data to be initialized.
- */
-int /* error */
-xfs_alloc_pagf_init(
- struct xfs_mount *mp, /* file system mount structure */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- int flags); /* XFS_ALLOC_FLAGS_... */
-
-/*
- * Put the block on the freelist for the allocation group.
- */
-int /* error */
-xfs_alloc_put_freelist(
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_buf *agbp, /* buffer for a.g. freelist header */
- struct xfs_buf *agflbp,/* buffer for a.g. free block array */
- xfs_agblock_t bno, /* block being freed */
- int btreeblk); /* owner was a AGF btree */
-
-/*
- * Read in the allocation group header (free/alloc section).
- */
-int /* error */
-xfs_alloc_read_agf(
- struct xfs_mount *mp, /* mount point structure */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- int flags, /* XFS_ALLOC_FLAG_... */
- struct xfs_buf **bpp); /* buffer for the ag freelist header */
-
-/*
* Allocate an extent (variable-size).
*/
int /* error */
@@ -206,10 +168,12 @@ xfs_alloc_get_rec(
xfs_extlen_t *len, /* output: length of extent */
int *stat); /* output: success/failure */
-int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
-int xfs_alloc_read_agfl(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, struct xfs_buf **bpp);
+int xfs_read_agf(struct xfs_perag *pag, struct xfs_trans *tp, int flags,
+ struct xfs_buf **agfbpp);
+int xfs_alloc_read_agf(struct xfs_perag *pag, struct xfs_trans *tp, int flags,
+ struct xfs_buf **agfbpp);
+int xfs_alloc_read_agfl(struct xfs_perag *pag, struct xfs_trans *tp,
+ struct xfs_buf **bpp);
int xfs_free_agfl_block(struct xfs_trans *, xfs_agnumber_t, xfs_agblock_t,
struct xfs_buf *, struct xfs_owner_info *);
int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, int flags);
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 8c9f73cc0bee..549a3cba0234 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -60,8 +60,8 @@ xfs_allocbt_alloc_block(
xfs_agblock_t bno;
/* Allocate the new block from the freelist. If we can't, give up. */
- error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
- &bno, 1);
+ error = xfs_alloc_get_freelist(cur->bc_ag.pag, cur->bc_tp,
+ cur->bc_ag.agbp, &bno, 1);
if (error)
return error;
@@ -71,7 +71,7 @@ xfs_allocbt_alloc_block(
}
atomic64_inc(&cur->bc_mp->m_allocbt_blks);
- xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agbp->b_pag, bno, 1, false);
+ xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.pag, bno, 1, false);
new->s = cpu_to_be32(bno);
@@ -89,7 +89,8 @@ xfs_allocbt_free_block(
int error;
bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
- error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
+ error = xfs_alloc_put_freelist(cur->bc_ag.pag, cur->bc_tp, agbp, NULL,
+ bno, 1);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 224649a76cbb..e28d93d232de 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -67,12 +67,10 @@ int
xfs_inode_hasattr(
struct xfs_inode *ip)
{
- if (!XFS_IFORK_Q(ip))
+ if (!xfs_inode_has_attr_fork(ip))
return 0;
- if (!ip->i_afp)
- return 0;
- if (ip->i_afp->if_format == XFS_DINODE_FMT_EXTENTS &&
- ip->i_afp->if_nextents == 0)
+ if (ip->i_af.if_format == XFS_DINODE_FMT_EXTENTS &&
+ ip->i_af.if_nextents == 0)
return 0;
return 1;
}
@@ -85,7 +83,7 @@ bool
xfs_attr_is_leaf(
struct xfs_inode *ip)
{
- struct xfs_ifork *ifp = ip->i_afp;
+ struct xfs_ifork *ifp = &ip->i_af;
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec imap;
@@ -231,7 +229,7 @@ xfs_attr_get_ilocked(
if (!xfs_inode_hasattr(args->dp))
return -ENOATTR;
- if (args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
+ if (args->dp->i_af.if_format == XFS_DINODE_FMT_LOCAL)
return xfs_attr_shortform_getvalue(args);
if (xfs_attr_is_leaf(args->dp))
return xfs_attr_leaf_get(args);
@@ -354,7 +352,7 @@ xfs_attr_try_sf_addname(
/*
* Build initial attribute list (if required).
*/
- if (dp->i_afp->if_format == XFS_DINODE_FMT_EXTENTS)
+ if (dp->i_af.if_format == XFS_DINODE_FMT_EXTENTS)
xfs_attr_shortform_create(args);
error = xfs_attr_shortform_addname(args);
@@ -864,7 +862,7 @@ xfs_attr_lookup(
if (!xfs_inode_hasattr(dp))
return -ENOATTR;
- if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
+ if (dp->i_af.if_format == XFS_DINODE_FMT_LOCAL)
return xfs_attr_sf_findname(args, NULL, NULL);
if (xfs_attr_is_leaf(dp)) {
@@ -1001,7 +999,7 @@ xfs_attr_set(
* If the inode doesn't have an attribute fork, add one.
* (inode must not be locked when we call this routine)
*/
- if (XFS_IFORK_Q(dp) == 0) {
+ if (xfs_inode_has_attr_fork(dp) == 0) {
int sf_size = sizeof(struct xfs_attr_sf_hdr) +
xfs_attr_sf_entsize_byname(args->namelen,
args->valuelen);
@@ -1101,7 +1099,7 @@ static inline int xfs_attr_sf_totsize(struct xfs_inode *dp)
{
struct xfs_attr_shortform *sf;
- sf = (struct xfs_attr_shortform *)dp->i_afp->if_u1.if_data;
+ sf = (struct xfs_attr_shortform *)dp->i_af.if_u1.if_data;
return be16_to_cpu(sf->hdr.totsize);
}
@@ -1558,7 +1556,7 @@ xfs_attr_node_get(
* If not in a transaction, we have to release all the buffers.
*/
out_release:
- for (i = 0; state != NULL && i < state->path.active; i++) {
+ for (i = 0; i < state->path.active; i++) {
xfs_trans_brelse(args->trans, state->path.blk[i].bp);
state->path.blk[i].bp = NULL;
}
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index dfb47fa63c6d..81be9b3e4004 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -560,9 +560,9 @@ static inline bool
xfs_attr_is_shortform(
struct xfs_inode *ip)
{
- return ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL ||
- (ip->i_afp->if_format == XFS_DINODE_FMT_EXTENTS &&
- ip->i_afp->if_nextents == 0);
+ return ip->i_af.if_format == XFS_DINODE_FMT_LOCAL ||
+ (ip->i_af.if_format == XFS_DINODE_FMT_EXTENTS &&
+ ip->i_af.if_nextents == 0);
}
static inline enum xfs_delattr_state
@@ -573,10 +573,10 @@ xfs_attr_init_add_state(struct xfs_da_args *args)
* next state, the attribute fork may be null. This can occur only occur
* on a pure remove, but we grab the next state before we check if a
* replace operation is being performed. If we are called from any other
- * context, i_afp is guaranteed to exist. Hence if the attr fork is
+ * context, i_af is guaranteed to exist. Hence if the attr fork is
* null, we were called from a pure remove operation and so we are done.
*/
- if (!args->dp->i_afp)
+ if (!xfs_inode_has_attr_fork(args->dp))
return XFS_DAS_DONE;
args->op_flags |= XFS_DA_OP_ADDNAME;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 8f47396f8dd2..beee51ad75ce 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -590,7 +590,7 @@ xfs_attr_shortform_bytesfit(
* to real extents, or the delalloc conversion will take care of the
* literal area rebalancing.
*/
- if (bytes <= XFS_IFORK_ASIZE(dp))
+ if (bytes <= xfs_inode_attr_fork_size(dp))
return dp->i_forkoff;
/*
@@ -682,7 +682,7 @@ xfs_attr_shortform_create(
struct xfs_da_args *args)
{
struct xfs_inode *dp = args->dp;
- struct xfs_ifork *ifp = dp->i_afp;
+ struct xfs_ifork *ifp = &dp->i_af;
struct xfs_attr_sf_hdr *hdr;
trace_xfs_attr_sf_create(args);
@@ -719,7 +719,7 @@ xfs_attr_sf_findname(
int end;
int i;
- sf = (struct xfs_attr_shortform *)args->dp->i_afp->if_u1.if_data;
+ sf = (struct xfs_attr_shortform *)args->dp->i_af.if_u1.if_data;
sfe = &sf->list[0];
end = sf->hdr.count;
for (i = 0; i < end; sfe = xfs_attr_sf_nextentry(sfe),
@@ -764,7 +764,7 @@ xfs_attr_shortform_add(
mp = dp->i_mount;
dp->i_forkoff = forkoff;
- ifp = dp->i_afp;
+ ifp = &dp->i_af;
ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
if (xfs_attr_sf_findname(args, &sfe, NULL) == -EEXIST)
@@ -797,11 +797,9 @@ xfs_attr_fork_remove(
struct xfs_inode *ip,
struct xfs_trans *tp)
{
- ASSERT(ip->i_afp->if_nextents == 0);
+ ASSERT(ip->i_af.if_nextents == 0);
- xfs_idestroy_fork(ip->i_afp);
- kmem_cache_free(xfs_ifork_cache, ip->i_afp);
- ip->i_afp = NULL;
+ xfs_ifork_zap_attr(ip);
ip->i_forkoff = 0;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
@@ -825,7 +823,7 @@ xfs_attr_sf_removename(
dp = args->dp;
mp = dp->i_mount;
- sf = (struct xfs_attr_shortform *)dp->i_afp->if_u1.if_data;
+ sf = (struct xfs_attr_shortform *)dp->i_af.if_u1.if_data;
error = xfs_attr_sf_findname(args, &sfe, &base);
@@ -889,7 +887,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
trace_xfs_attr_sf_lookup(args);
- ifp = args->dp->i_afp;
+ ifp = &args->dp->i_af;
ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
sfe = &sf->list[0];
@@ -917,8 +915,8 @@ xfs_attr_shortform_getvalue(
struct xfs_attr_sf_entry *sfe;
int i;
- ASSERT(args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL);
- sf = (struct xfs_attr_shortform *)args->dp->i_afp->if_u1.if_data;
+ ASSERT(args->dp->i_af.if_format == XFS_DINODE_FMT_LOCAL);
+ sf = (struct xfs_attr_shortform *)args->dp->i_af.if_u1.if_data;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count;
sfe = xfs_attr_sf_nextentry(sfe), i++) {
@@ -948,7 +946,7 @@ xfs_attr_shortform_to_leaf(
trace_xfs_attr_sf_to_leaf(args);
dp = args->dp;
- ifp = dp->i_afp;
+ ifp = &dp->i_af;
sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
size = be16_to_cpu(sf->hdr.totsize);
tmpbuffer = kmem_alloc(size, 0);
@@ -1055,8 +1053,8 @@ xfs_attr_shortform_verify(
int i;
int64_t size;
- ASSERT(ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL);
- ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
+ ASSERT(ip->i_af.if_format == XFS_DINODE_FMT_LOCAL);
+ ifp = xfs_ifork_ptr(ip, XFS_ATTR_FORK);
sfp = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
size = ifp->if_bytes;
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 7298c148f848..d440393b40eb 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -543,6 +543,7 @@ xfs_attr_rmtval_stale(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_buf *bp;
+ int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
@@ -550,14 +551,18 @@ xfs_attr_rmtval_stale(
XFS_IS_CORRUPT(mp, map->br_startblock == HOLESTARTBLOCK))
return -EFSCORRUPTED;
- bp = xfs_buf_incore(mp->m_ddev_targp,
+ error = xfs_buf_incore(mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, map->br_startblock),
- XFS_FSB_TO_BB(mp, map->br_blockcount), incore_flags);
- if (bp) {
- xfs_buf_stale(bp);
- xfs_buf_relse(bp);
+ XFS_FSB_TO_BB(mp, map->br_blockcount),
+ incore_flags, &bp);
+ if (error) {
+ if (error == -ENOENT)
+ return 0;
+ return error;
}
+ xfs_buf_stale(bp);
+ xfs_buf_relse(bp);
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 6833110d1bd4..e56723dc9cd5 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -128,7 +128,7 @@ xfs_bmbt_lookup_first(
*/
static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
return whichfork != XFS_COW_FORK &&
ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
@@ -140,7 +140,7 @@ static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
*/
static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
return whichfork != XFS_COW_FORK &&
ifp->if_format == XFS_DINODE_FMT_BTREE &&
@@ -319,7 +319,7 @@ xfs_bmap_check_leaf_extents(
int whichfork) /* data or attr fork */
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_block *block; /* current btree block */
xfs_fsblock_t bno; /* block # of "block" */
struct xfs_buf *bp; /* buffer for "block" */
@@ -538,7 +538,7 @@ xfs_bmap_btree_to_extents(
int *logflagsp, /* inode logging flags */
int whichfork) /* data or attr fork */
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_mount *mp = ip->i_mount;
struct xfs_btree_block *rblock = ifp->if_broot;
struct xfs_btree_block *cblock;/* child btree block */
@@ -616,7 +616,7 @@ xfs_bmap_extents_to_btree(
mp = ip->i_mount;
ASSERT(whichfork != XFS_COW_FORK);
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
/*
@@ -745,7 +745,7 @@ xfs_bmap_local_to_extents_empty(
struct xfs_inode *ip,
int whichfork)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
ASSERT(whichfork != XFS_COW_FORK);
ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
@@ -785,7 +785,7 @@ xfs_bmap_local_to_extents(
* So sending the data fork of a regular inode is invalid.
*/
ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
if (!ifp->if_bytes) {
@@ -880,7 +880,7 @@ xfs_bmap_add_attrfork_btree(
mp = ip->i_mount;
- if (XFS_BMAP_BMDR_SPACE(block) <= XFS_IFORK_DSIZE(ip))
+ if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip))
*flags |= XFS_ILOG_DBROOT;
else {
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
@@ -920,7 +920,7 @@ xfs_bmap_add_attrfork_extents(
int error; /* error return value */
if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
- XFS_IFORK_DSIZE(ip))
+ xfs_inode_data_fork_size(ip))
return 0;
cur = NULL;
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
@@ -951,7 +951,7 @@ xfs_bmap_add_attrfork_local(
{
struct xfs_da_args dargs; /* args for dir/attr code */
- if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
+ if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
return 0;
if (S_ISDIR(VFS_I(ip)->i_mode)) {
@@ -1023,7 +1023,7 @@ xfs_bmap_add_attrfork(
int logflags; /* logging flags */
int error; /* error return value */
- ASSERT(XFS_IFORK_Q(ip) == 0);
+ ASSERT(xfs_inode_has_attr_fork(ip) == 0);
mp = ip->i_mount;
ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
@@ -1034,16 +1034,15 @@ xfs_bmap_add_attrfork(
rsvd, &tp);
if (error)
return error;
- if (XFS_IFORK_Q(ip))
+ if (xfs_inode_has_attr_fork(ip))
goto trans_cancel;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_bmap_set_attrforkoff(ip, size, &version);
if (error)
goto trans_cancel;
- ASSERT(ip->i_afp == NULL);
- ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
+ xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
logflags = 0;
switch (ip->i_df.if_format) {
case XFS_DINODE_FMT_LOCAL:
@@ -1116,7 +1115,7 @@ xfs_iread_bmbt_block(
xfs_extnum_t num_recs;
xfs_extnum_t j;
int whichfork = cur->bc_ino.whichfork;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
block = xfs_btree_get_block(cur, level, &bp);
@@ -1164,7 +1163,7 @@ xfs_iread_extents(
int whichfork)
{
struct xfs_iread_state ir;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_mount *mp = ip->i_mount;
struct xfs_btree_cur *cur;
int error;
@@ -1208,7 +1207,7 @@ xfs_bmap_first_unused(
xfs_fileoff_t *first_unused, /* unused block */
int whichfork) /* data or attr fork */
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_bmbt_irec got;
struct xfs_iext_cursor icur;
xfs_fileoff_t lastaddr = 0;
@@ -1255,7 +1254,7 @@ xfs_bmap_last_before(
xfs_fileoff_t *last_block, /* last block */
int whichfork) /* data or attr fork */
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_bmbt_irec got;
struct xfs_iext_cursor icur;
int error;
@@ -1289,7 +1288,7 @@ xfs_bmap_last_extent(
struct xfs_bmbt_irec *rec,
int *is_empty)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_iext_cursor icur;
int error;
@@ -1355,7 +1354,7 @@ xfs_bmap_last_offset(
xfs_fileoff_t *last_block,
int whichfork)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_bmbt_irec rec;
int is_empty;
int error;
@@ -1389,7 +1388,7 @@ xfs_bmap_add_extent_delay_real(
int whichfork)
{
struct xfs_mount *mp = bma->ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
struct xfs_bmbt_irec *new = &bma->got;
int error; /* error return value */
int i; /* temp state */
@@ -1955,7 +1954,7 @@ xfs_bmap_add_extent_unwritten_real(
*logflagsp = 0;
cur = *curp;
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
ASSERT(!isnullstartblock(new->br_startblock));
@@ -2480,7 +2479,7 @@ xfs_bmap_add_extent_hole_delay(
uint32_t state = xfs_bmap_fork_to_state(whichfork);
xfs_filblks_t temp; /* temp for indirect calculations */
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
ASSERT(isnullstartblock(new->br_startblock));
/*
@@ -2616,7 +2615,7 @@ xfs_bmap_add_extent_hole_real(
int *logflagsp,
uint32_t flags)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_mount *mp = ip->i_mount;
struct xfs_btree_cur *cur = *curp;
int error; /* error return value */
@@ -3185,7 +3184,8 @@ xfs_bmap_longest_free_extent(
pag = xfs_perag_get(mp, ag);
if (!pag->pagf_init) {
- error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
+ error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
+ NULL);
if (error) {
/* Couldn't lock the AGF, so skip this AG. */
if (error == -EAGAIN) {
@@ -3866,7 +3866,7 @@ xfs_bmapi_read(
{
struct xfs_mount *mp = ip->i_mount;
int whichfork = xfs_bmapi_whichfork(flags);
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_bmbt_irec got;
xfs_fileoff_t obno;
xfs_fileoff_t end;
@@ -3959,7 +3959,7 @@ xfs_bmapi_reserve_delalloc(
int eof)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
xfs_extlen_t alen;
xfs_extlen_t indlen;
int error;
@@ -4086,7 +4086,7 @@ xfs_bmapi_allocate(
{
struct xfs_mount *mp = bma->ip->i_mount;
int whichfork = xfs_bmapi_whichfork(bma->flags);
- struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
int tmp_logflags = 0;
int error;
@@ -4185,7 +4185,7 @@ xfs_bmapi_convert_unwritten(
uint32_t flags)
{
int whichfork = xfs_bmapi_whichfork(flags);
- struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
int tmp_logflags = 0;
int error;
@@ -4262,7 +4262,7 @@ xfs_bmapi_minleft(
struct xfs_inode *ip,
int fork)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, fork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, fork);
if (tp && tp->t_firstblock != NULLFSBLOCK)
return 0;
@@ -4283,7 +4283,7 @@ xfs_bmapi_finish(
int whichfork,
int error)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
ifp->if_format != XFS_DINODE_FMT_EXTENTS)
@@ -4322,7 +4322,7 @@ xfs_bmapi_write(
};
struct xfs_mount *mp = ip->i_mount;
int whichfork = xfs_bmapi_whichfork(flags);
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
xfs_fileoff_t end; /* end of mapped file region */
bool eof = false; /* after the end of extents */
int error; /* error return */
@@ -4503,7 +4503,7 @@ xfs_bmapi_convert_delalloc(
struct iomap *iomap,
unsigned int *seq)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
struct xfs_bmalloca bma = { NULL };
@@ -4640,7 +4640,7 @@ xfs_bmapi_remap(
int whichfork = xfs_bmapi_whichfork(flags);
int logflags = 0, error;
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
ASSERT(len > 0);
ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
@@ -4797,7 +4797,7 @@ xfs_bmap_del_extent_delay(
struct xfs_bmbt_irec *del)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_bmbt_irec new;
int64_t da_old, da_new, da_diff = 0;
xfs_fileoff_t del_endoff, got_endoff;
@@ -4924,7 +4924,7 @@ xfs_bmap_del_extent_cow(
struct xfs_bmbt_irec *del)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
struct xfs_bmbt_irec new;
xfs_fileoff_t del_endoff, got_endoff;
uint32_t state = BMAP_COWFORK;
@@ -5022,7 +5022,7 @@ xfs_bmap_del_extent_real(
mp = ip->i_mount;
XFS_STATS_INC(mp, xs_del_exlist);
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
ASSERT(del->br_blockcount > 0);
xfs_iext_get_extent(ifp, icur, &got);
ASSERT(got.br_startoff <= del->br_startoff);
@@ -5288,7 +5288,7 @@ __xfs_bunmapi(
whichfork = xfs_bmapi_whichfork(flags);
ASSERT(whichfork != XFS_COW_FORK);
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)))
return -EFSCORRUPTED;
if (xfs_is_shutdown(mp))
@@ -5629,7 +5629,7 @@ xfs_bmse_merge(
struct xfs_btree_cur *cur,
int *logflags) /* output */
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_bmbt_irec new;
xfs_filblks_t blockcount;
int error, i;
@@ -5750,7 +5750,7 @@ xfs_bmap_collapse_extents(
{
int whichfork = XFS_DATA_FORK;
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_cur *cur = NULL;
struct xfs_bmbt_irec got, prev;
struct xfs_iext_cursor icur;
@@ -5865,7 +5865,7 @@ xfs_bmap_insert_extents(
{
int whichfork = XFS_DATA_FORK;
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_cur *cur = NULL;
struct xfs_bmbt_irec got, next;
struct xfs_iext_cursor icur;
@@ -5965,7 +5965,7 @@ xfs_bmap_split_extent(
xfs_fileoff_t split_fsb)
{
int whichfork = XFS_DATA_FORK;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_cur *cur = NULL;
struct xfs_bmbt_irec got;
struct xfs_bmbt_irec new; /* split extent */
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index 2b77d45c215f..cfa052d40105 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -304,7 +304,7 @@ xfs_bmbt_get_minrecs(
if (level == cur->bc_nlevels - 1) {
struct xfs_ifork *ifp;
- ifp = XFS_IFORK_PTR(cur->bc_ino.ip,
+ ifp = xfs_ifork_ptr(cur->bc_ino.ip,
cur->bc_ino.whichfork);
return xfs_bmbt_maxrecs(cur->bc_mp,
@@ -322,7 +322,7 @@ xfs_bmbt_get_maxrecs(
if (level == cur->bc_nlevels - 1) {
struct xfs_ifork *ifp;
- ifp = XFS_IFORK_PTR(cur->bc_ino.ip,
+ ifp = xfs_ifork_ptr(cur->bc_ino.ip,
cur->bc_ino.whichfork);
return xfs_bmbt_maxrecs(cur->bc_mp,
@@ -550,7 +550,7 @@ xfs_bmbt_init_cursor(
struct xfs_inode *ip, /* inode owning the btree */
int whichfork) /* data or attr fork */
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_cur *cur;
ASSERT(whichfork != XFS_COW_FORK);
@@ -564,7 +564,7 @@ xfs_bmbt_init_cursor(
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
- cur->bc_ino.forksize = XFS_IFORK_SIZE(ip, whichfork);
+ cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork);
cur->bc_ino.ip = ip;
cur->bc_ino.allocated = 0;
cur->bc_ino.flags = 0;
@@ -664,7 +664,7 @@ xfs_bmbt_change_owner(
ASSERT(tp || buffer_list);
ASSERT(!(tp && buffer_list));
- ASSERT(XFS_IFORK_PTR(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE);
+ ASSERT(xfs_ifork_ptr(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
cur->bc_ino.flags |= XFS_BTCUR_BMBT_INVALID_OWNER;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 2eecc49fc1b2..4c16c8c31fcb 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -91,10 +91,9 @@ xfs_btree_check_lblock_siblings(
static inline xfs_failaddr_t
xfs_btree_check_sblock_siblings(
- struct xfs_mount *mp,
+ struct xfs_perag *pag,
struct xfs_btree_cur *cur,
int level,
- xfs_agnumber_t agno,
xfs_agblock_t agbno,
__be32 dsibling)
{
@@ -110,7 +109,7 @@ xfs_btree_check_sblock_siblings(
if (!xfs_btree_check_sptr(cur, sibling, level + 1))
return __this_address;
} else {
- if (!xfs_verify_agbno(mp, agno, sibling))
+ if (!xfs_verify_agbno(pag, sibling))
return __this_address;
}
return NULL;
@@ -195,11 +194,11 @@ __xfs_btree_check_sblock(
struct xfs_buf *bp)
{
struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_perag *pag = cur->bc_ag.pag;
xfs_btnum_t btnum = cur->bc_btnum;
int crc = xfs_has_crc(mp);
xfs_failaddr_t fa;
xfs_agblock_t agbno = NULLAGBLOCK;
- xfs_agnumber_t agno = NULLAGNUMBER;
if (crc) {
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
@@ -217,16 +216,14 @@ __xfs_btree_check_sblock(
cur->bc_ops->get_maxrecs(cur, level))
return __this_address;
- if (bp) {
+ if (bp)
agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
- agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
- }
- fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, agbno,
+ fa = xfs_btree_check_sblock_siblings(pag, cur, level, agbno,
block->bb_u.s.bb_leftsib);
if (!fa)
- fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno,
- agbno, block->bb_u.s.bb_rightsib);
+ fa = xfs_btree_check_sblock_siblings(pag, cur, level, agbno,
+ block->bb_u.s.bb_rightsib);
return fa;
}
@@ -288,7 +285,7 @@ xfs_btree_check_sptr(
{
if (level <= 0)
return false;
- return xfs_verify_agbno(cur->bc_mp, cur->bc_ag.pag->pag_agno, agbno);
+ return xfs_verify_agbno(cur->bc_ag.pag, agbno);
}
/*
@@ -725,7 +722,7 @@ xfs_btree_ifork_ptr(
if (cur->bc_flags & XFS_BTREE_STAGING)
return cur->bc_ino.ifake->if_fork;
- return XFS_IFORK_PTR(cur->bc_ino.ip, cur->bc_ino.whichfork);
+ return xfs_ifork_ptr(cur->bc_ino.ip, cur->bc_ino.whichfork);
}
/*
@@ -3559,7 +3556,7 @@ xfs_btree_kill_iroot(
{
int whichfork = cur->bc_ino.whichfork;
struct xfs_inode *ip = cur->bc_ino.ip;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_block *block;
struct xfs_btree_block *cblock;
union xfs_btree_key *kp;
@@ -4595,7 +4592,6 @@ xfs_btree_sblock_verify(
{
struct xfs_mount *mp = bp->b_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
- xfs_agnumber_t agno;
xfs_agblock_t agbno;
xfs_failaddr_t fa;
@@ -4604,12 +4600,11 @@ xfs_btree_sblock_verify(
return __this_address;
/* sibling pointer verification */
- agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
- fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
+ fa = xfs_btree_check_sblock_siblings(bp->b_pag, NULL, -1, agbno,
block->bb_u.s.bb_leftsib);
if (!fa)
- fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
+ fa = xfs_btree_check_sblock_siblings(bp->b_pag, NULL, -1, agbno,
block->bb_u.s.bb_rightsib);
return fa;
}
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 3cd51fa3837b..76eedc2756b3 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -193,7 +193,7 @@ xfs_dir_isempty(
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
if (dp->i_disk_size == 0) /* might happen during shutdown. */
return 1;
- if (dp->i_disk_size > XFS_IFORK_DSIZE(dp))
+ if (dp->i_disk_size > xfs_inode_data_fork_size(dp))
return 0;
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
return !sfp->count;
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index df0869bba275..00f960a703b2 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -842,7 +842,7 @@ xfs_dir2_block_removename(
* See if the size as a shortform is good enough.
*/
size = xfs_dir2_block_sfsize(dp, hdr, &sfh);
- if (size > XFS_IFORK_DSIZE(dp))
+ if (size > xfs_inode_data_fork_size(dp))
return 0;
/*
@@ -1055,7 +1055,7 @@ xfs_dir2_leaf_to_block(
* Now see if the resulting block can be shrunken to shortform.
*/
size = xfs_dir2_block_sfsize(dp, hdr, &sfh);
- if (size > XFS_IFORK_DSIZE(dp))
+ if (size > xfs_inode_data_fork_size(dp))
return 0;
return xfs_dir2_block_to_sf(args, dbp, size, &sfh);
@@ -1071,7 +1071,7 @@ xfs_dir2_sf_to_block(
struct xfs_trans *tp = args->trans;
struct xfs_inode *dp = args->dp;
struct xfs_mount *mp = dp->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(dp, XFS_DATA_FORK);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(dp, XFS_DATA_FORK);
struct xfs_da_geometry *geo = args->geo;
xfs_dir2_db_t blkno; /* dir-relative block # (0) */
xfs_dir2_data_hdr_t *hdr; /* block header */
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 5a97a87eaa20..003812fd7d35 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -237,7 +237,7 @@ xfs_dir2_block_sfsize(
(i8count ? /* inumber */
count * XFS_INO64_SIZE :
count * XFS_INO32_SIZE);
- if (size > XFS_IFORK_DSIZE(dp))
+ if (size > xfs_inode_data_fork_size(dp))
return size; /* size value is a failure */
}
/*
@@ -406,7 +406,7 @@ xfs_dir2_sf_addname(
* Won't fit as shortform any more (due to size),
* or the pick routine says it won't (due to offset values).
*/
- if (new_isize > XFS_IFORK_DSIZE(dp) ||
+ if (new_isize > xfs_inode_data_fork_size(dp) ||
(pick =
xfs_dir2_sf_addname_pick(args, objchange, &sfep, &offset)) == 0) {
/*
@@ -710,7 +710,7 @@ xfs_dir2_sf_verify(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
struct xfs_dir2_sf_hdr *sfp;
struct xfs_dir2_sf_entry *sfep;
struct xfs_dir2_sf_entry *next_sfep;
@@ -1031,7 +1031,7 @@ xfs_dir2_sf_replace_needblock(
newsize = dp->i_df.if_bytes + (sfp->count + 1) * XFS_INO64_DIFF;
return inum > XFS_DIR2_MAX_SHORT_INUM &&
- sfp->i8count == 0 && newsize > XFS_IFORK_DSIZE(dp);
+ sfp->i8count == 0 && newsize > xfs_inode_data_fork_size(dp);
}
/*
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index afdfc8108c5f..b55bdfa9c8a8 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -704,7 +704,7 @@ struct xfs_agfl {
* When the bigtime feature is enabled, ondisk inode timestamps become an
* unsigned 64-bit nanoseconds counter. This means that the bigtime inode
* timestamp epoch is the start of the classic timestamp range, which is
- * Dec 31 20:45:52 UTC 1901. Because the epochs are not the same, callers
+ * Dec 13 20:45:52 UTC 1901. Because the epochs are not the same, callers
* /must/ use the bigtime conversion functions when encoding and decoding raw
* timestamps.
*/
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index bf2f4bc89193..6cdfd64bc56b 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -105,7 +105,6 @@ xfs_inobt_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
union xfs_btree_rec *rec;
int error;
uint64_t realfree;
@@ -116,7 +115,7 @@ xfs_inobt_get_rec(
xfs_inobt_btrec_to_irec(mp, rec, irec);
- if (!xfs_verify_agino(mp, agno, irec->ir_startino))
+ if (!xfs_verify_agino(cur->bc_ag.pag, irec->ir_startino))
goto out_bad_rec;
if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT ||
irec->ir_count > XFS_INODES_PER_CHUNK)
@@ -137,7 +136,8 @@ xfs_inobt_get_rec(
out_bad_rec:
xfs_warn(mp,
"%s Inode BTree record corruption in AG %d detected!",
- cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", agno);
+ cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free",
+ cur->bc_ag.pag->pag_agno);
xfs_warn(mp,
"start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
irec->ir_startino, irec->ir_count, irec->ir_freecount,
@@ -1610,7 +1610,7 @@ xfs_dialloc_good_ag(
return false;
if (!pag->pagi_init) {
- error = xfs_ialloc_pagi_init(mp, tp, pag->pag_agno);
+ error = xfs_ialloc_read_agi(pag, tp, NULL);
if (error)
return false;
}
@@ -1621,7 +1621,7 @@ xfs_dialloc_good_ag(
return false;
if (!pag->pagf_init) {
- error = xfs_alloc_pagf_init(mp, tp, pag->pag_agno, flags);
+ error = xfs_alloc_read_agf(pag, tp, flags, NULL);
if (error)
return false;
}
@@ -1679,7 +1679,7 @@ xfs_dialloc_try_ag(
* Then read in the AGI buffer and recheck with the AGI buffer
* lock held.
*/
- error = xfs_ialloc_read_agi(pag->pag_mount, *tpp, pag->pag_agno, &agbp);
+ error = xfs_ialloc_read_agi(pag, *tpp, &agbp);
if (error)
return error;
@@ -2169,7 +2169,7 @@ xfs_difree(
/*
* Get the allocation group header.
*/
- error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
+ error = xfs_ialloc_read_agi(pag, tp, &agbp);
if (error) {
xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
__func__, error);
@@ -2215,7 +2215,7 @@ xfs_imap_lookup(
int error;
int i;
- error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
+ error = xfs_ialloc_read_agi(pag, tp, &agbp);
if (error) {
xfs_alert(mp,
"%s: xfs_ialloc_read_agi() returned error %d, agno %d",
@@ -2571,47 +2571,48 @@ const struct xfs_buf_ops xfs_agi_buf_ops = {
*/
int
xfs_read_agi(
- struct xfs_mount *mp, /* file system mount structure */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- struct xfs_buf **bpp) /* allocation group hdr buf */
+ struct xfs_perag *pag,
+ struct xfs_trans *tp,
+ struct xfs_buf **agibpp)
{
+ struct xfs_mount *mp = pag->pag_mount;
int error;
- trace_xfs_read_agi(mp, agno);
+ trace_xfs_read_agi(pag->pag_mount, pag->pag_agno);
- ASSERT(agno != NULLAGNUMBER);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
- XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
- XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
+ XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)),
+ XFS_FSS_TO_BB(mp, 1), 0, agibpp, &xfs_agi_buf_ops);
if (error)
return error;
if (tp)
- xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF);
+ xfs_trans_buf_set_type(tp, *agibpp, XFS_BLFT_AGI_BUF);
- xfs_buf_set_ref(*bpp, XFS_AGI_REF);
+ xfs_buf_set_ref(*agibpp, XFS_AGI_REF);
return 0;
}
+/*
+ * Read in the agi and initialise the per-ag data. If the caller supplies a
+ * @agibpp, return the locked AGI buffer to them, otherwise release it.
+ */
int
xfs_ialloc_read_agi(
- struct xfs_mount *mp, /* file system mount structure */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- struct xfs_buf **bpp) /* allocation group hdr buf */
+ struct xfs_perag *pag,
+ struct xfs_trans *tp,
+ struct xfs_buf **agibpp)
{
- struct xfs_agi *agi; /* allocation group header */
- struct xfs_perag *pag; /* per allocation group data */
+ struct xfs_buf *agibp;
+ struct xfs_agi *agi;
int error;
- trace_xfs_ialloc_read_agi(mp, agno);
+ trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno);
- error = xfs_read_agi(mp, tp, agno, bpp);
+ error = xfs_read_agi(pag, tp, &agibp);
if (error)
return error;
- agi = (*bpp)->b_addr;
- pag = (*bpp)->b_pag;
+ agi = agibp->b_addr;
if (!pag->pagi_init) {
pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
pag->pagi_count = be32_to_cpu(agi->agi_count);
@@ -2623,27 +2624,11 @@ xfs_ialloc_read_agi(
* we are in the middle of a forced shutdown.
*/
ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
- xfs_is_shutdown(mp));
- return 0;
-}
-
-/*
- * Read in the agi to initialise the per-ag data in the mount structure
- */
-int
-xfs_ialloc_pagi_init(
- xfs_mount_t *mp, /* file system mount structure */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_agnumber_t agno) /* allocation group number */
-{
- struct xfs_buf *bp = NULL;
- int error;
-
- error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
- if (error)
- return error;
- if (bp)
- xfs_trans_brelse(tp, bp);
+ xfs_is_shutdown(pag->pag_mount));
+ if (agibpp)
+ *agibpp = agibp;
+ else
+ xfs_trans_brelse(tp, agibp);
return 0;
}
@@ -2912,8 +2897,7 @@ xfs_ialloc_calc_rootino(
* allocation group, or very odd geometries created by old mkfs
* versions on very small filesystems.
*/
- if (mp->m_sb.sb_logstart &&
- XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == 0)
+ if (xfs_ag_contains_log(mp, 0))
first_bno += mp->m_sb.sb_logblocks;
/*
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index a7705b6a1fd3..9bbbca6ac4ed 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -62,25 +62,10 @@ xfs_ialloc_log_agi(
struct xfs_buf *bp, /* allocation group header buffer */
uint32_t fields); /* bitmask of fields to log */
-/*
- * Read in the allocation group header (inode allocation section)
- */
-int /* error */
-xfs_ialloc_read_agi(
- struct xfs_mount *mp, /* file system mount structure */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- struct xfs_buf **bpp); /* allocation group hdr buf */
-
-/*
- * Read in the allocation group header to initialise the per-ag data
- * in the mount structure
- */
-int
-xfs_ialloc_pagi_init(
- struct xfs_mount *mp, /* file system mount structure */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno); /* allocation group number */
+int xfs_read_agi(struct xfs_perag *pag, struct xfs_trans *tp,
+ struct xfs_buf **agibpp);
+int xfs_ialloc_read_agi(struct xfs_perag *pag, struct xfs_trans *tp,
+ struct xfs_buf **agibpp);
/*
* Lookup a record by ino in the btree given by cur.
@@ -102,8 +87,6 @@ int xfs_ialloc_inode_init(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_agblock_t agbno,
xfs_agblock_t length, unsigned int gen);
-int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, struct xfs_buf **bpp);
union xfs_btree_rec;
void xfs_inobt_btrec_to_irec(struct xfs_mount *mp,
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index b2ad2fdc40f5..8c83e265770c 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -683,10 +683,10 @@ xfs_inobt_rec_check_count(
static xfs_extlen_t
xfs_inobt_max_size(
- struct xfs_mount *mp,
- xfs_agnumber_t agno)
+ struct xfs_perag *pag)
{
- xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno);
+ struct xfs_mount *mp = pag->pag_mount;
+ xfs_agblock_t agblocks = pag->block_count;
/* Bail out if we're uninitialized, which can happen in mkfs. */
if (M_IGEO(mp)->inobt_mxr[0] == 0)
@@ -697,8 +697,7 @@ xfs_inobt_max_size(
* never be available for the kinds of things that would require btree
* expansion. We therefore can pretend the space isn't there.
*/
- if (mp->m_sb.sb_logstart &&
- XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
+ if (xfs_ag_contains_log(mp, pag->pag_agno))
agblocks -= mp->m_sb.sb_logblocks;
return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
@@ -722,7 +721,7 @@ xfs_inobt_cur(
ASSERT(*agi_bpp == NULL);
ASSERT(*curpp == NULL);
- error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, agi_bpp);
+ error = xfs_ialloc_read_agi(pag, tp, agi_bpp);
if (error)
return error;
@@ -757,16 +756,15 @@ xfs_inobt_count_blocks(
/* Read finobt block count from AGI header. */
static int
xfs_finobt_read_blocks(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
struct xfs_perag *pag,
+ struct xfs_trans *tp,
xfs_extlen_t *tree_blocks)
{
struct xfs_buf *agbp;
struct xfs_agi *agi;
int error;
- error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
+ error = xfs_ialloc_read_agi(pag, tp, &agbp);
if (error)
return error;
@@ -794,14 +792,14 @@ xfs_finobt_calc_reserves(
return 0;
if (xfs_has_inobtcounts(mp))
- error = xfs_finobt_read_blocks(mp, tp, pag, &tree_len);
+ error = xfs_finobt_read_blocks(pag, tp, &tree_len);
else
error = xfs_inobt_count_blocks(mp, tp, pag, XFS_BTNUM_FINO,
&tree_len);
if (error)
return error;
- *ask += xfs_inobt_max_size(mp, pag->pag_agno);
+ *ask += xfs_inobt_max_size(pag);
*used += tree_len;
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 3b1b63f9d886..758aacd8166b 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -10,6 +10,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_ag.h"
#include "xfs_inode.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
@@ -41,14 +42,12 @@ xfs_inode_buf_verify(
bool readahead)
{
struct xfs_mount *mp = bp->b_mount;
- xfs_agnumber_t agno;
int i;
int ni;
/*
* Validate the magic number and version of every inode in the buffer
*/
- agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
for (i = 0; i < ni; i++) {
struct xfs_dinode *dip;
@@ -59,7 +58,7 @@ xfs_inode_buf_verify(
unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
xfs_dinode_good_version(mp, dip->di_version) &&
- xfs_verify_agino_or_null(mp, agno, unlinked_ino);
+ xfs_verify_agino_or_null(bp->b_pag, unlinked_ino);
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP))) {
if (readahead) {
@@ -178,7 +177,6 @@ xfs_inode_from_disk(
xfs_failaddr_t fa;
ASSERT(ip->i_cowfp == NULL);
- ASSERT(ip->i_afp == NULL);
fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from);
if (fa) {
@@ -230,7 +228,8 @@ xfs_inode_from_disk(
ip->i_nblocks = be64_to_cpu(from->di_nblocks);
ip->i_extsize = be32_to_cpu(from->di_extsize);
ip->i_forkoff = from->di_forkoff;
- ip->i_diflags = be16_to_cpu(from->di_flags);
+ ip->i_diflags = be16_to_cpu(from->di_flags);
+ ip->i_next_unlinked = be32_to_cpu(from->di_next_unlinked);
if (from->di_dmevmask || from->di_dmstate)
xfs_iflags_set(ip, XFS_IPRESERVE_DM_FIELDS);
@@ -286,7 +285,7 @@ xfs_inode_to_disk_iext_counters(
{
if (xfs_inode_has_large_extent_counts(ip)) {
to->di_big_nextents = cpu_to_be64(xfs_ifork_nextents(&ip->i_df));
- to->di_big_anextents = cpu_to_be32(xfs_ifork_nextents(ip->i_afp));
+ to->di_big_anextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_af));
/*
* We might be upgrading the inode to use larger extent counters
* than was previously used. Hence zero the unused field.
@@ -294,7 +293,7 @@ xfs_inode_to_disk_iext_counters(
to->di_nrext64_pad = cpu_to_be16(0);
} else {
to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
- to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp));
+ to->di_anextents = cpu_to_be16(xfs_ifork_nextents(&ip->i_af));
}
}
@@ -326,7 +325,7 @@ xfs_inode_to_disk(
to->di_nblocks = cpu_to_be64(ip->i_nblocks);
to->di_extsize = cpu_to_be32(ip->i_extsize);
to->di_forkoff = ip->i_forkoff;
- to->di_aformat = xfs_ifork_format(ip->i_afp);
+ to->di_aformat = xfs_ifork_format(&ip->i_af);
to->di_flags = cpu_to_be16(ip->i_diflags);
if (xfs_has_v3inodes(ip->i_mount)) {
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 1a4cdf550f6d..9327a4f39206 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -35,7 +35,7 @@ xfs_init_local_fork(
const void *data,
int64_t size)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
int mem_size = size;
bool zero_terminate;
@@ -102,7 +102,7 @@ xfs_iformat_extents(
int whichfork)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
int state = xfs_bmap_fork_to_state(whichfork);
xfs_extnum_t nex = xfs_dfork_nextents(dip, whichfork);
int size = nex * sizeof(xfs_bmbt_rec_t);
@@ -173,7 +173,7 @@ xfs_iformat_btree(
int size;
int level;
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
size = XFS_BMAP_BROOT_SPACE(mp, dfp);
nrecs = be16_to_cpu(dfp->bb_numrecs);
@@ -276,17 +276,23 @@ xfs_dfork_attr_shortform_size(
return be16_to_cpu(atp->hdr.totsize);
}
-struct xfs_ifork *
-xfs_ifork_alloc(
+void
+xfs_ifork_init_attr(
+ struct xfs_inode *ip,
enum xfs_dinode_fmt format,
xfs_extnum_t nextents)
{
- struct xfs_ifork *ifp;
+ ip->i_af.if_format = format;
+ ip->i_af.if_nextents = nextents;
+}
- ifp = kmem_cache_zalloc(xfs_ifork_cache, GFP_NOFS | __GFP_NOFAIL);
- ifp->if_format = format;
- ifp->if_nextents = nextents;
- return ifp;
+void
+xfs_ifork_zap_attr(
+ struct xfs_inode *ip)
+{
+ xfs_idestroy_fork(&ip->i_af);
+ memset(&ip->i_af, 0, sizeof(struct xfs_ifork));
+ ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
}
int
@@ -301,9 +307,9 @@ xfs_iformat_attr_fork(
* Initialize the extent count early, as the per-format routines may
* depend on it.
*/
- ip->i_afp = xfs_ifork_alloc(dip->di_aformat, naextents);
+ xfs_ifork_init_attr(ip, dip->di_aformat, naextents);
- switch (ip->i_afp->if_format) {
+ switch (ip->i_af.if_format) {
case XFS_DINODE_FMT_LOCAL:
error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK,
xfs_dfork_attr_shortform_size(dip));
@@ -323,10 +329,8 @@ xfs_iformat_attr_fork(
break;
}
- if (error) {
- kmem_cache_free(xfs_ifork_cache, ip->i_afp);
- ip->i_afp = NULL;
- }
+ if (error)
+ xfs_ifork_zap_attr(ip);
return error;
}
@@ -370,7 +374,7 @@ xfs_iroot_realloc(
return;
}
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
if (rec_diff > 0) {
/*
* If there wasn't any memory allocated before, just
@@ -400,7 +404,7 @@ xfs_iroot_realloc(
(int)new_size);
ifp->if_broot_bytes = (int)new_size;
ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
- XFS_IFORK_SIZE(ip, whichfork));
+ xfs_inode_fork_size(ip, whichfork));
memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t));
return;
}
@@ -454,7 +458,7 @@ xfs_iroot_realloc(
ifp->if_broot_bytes = (int)new_size;
if (ifp->if_broot)
ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
- XFS_IFORK_SIZE(ip, whichfork));
+ xfs_inode_fork_size(ip, whichfork));
return;
}
@@ -480,11 +484,11 @@ xfs_idata_realloc(
int64_t byte_diff,
int whichfork)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
int64_t new_size = ifp->if_bytes + byte_diff;
ASSERT(new_size >= 0);
- ASSERT(new_size <= XFS_IFORK_SIZE(ip, whichfork));
+ ASSERT(new_size <= xfs_inode_fork_size(ip, whichfork));
if (byte_diff == 0)
return;
@@ -539,7 +543,7 @@ xfs_iextents_copy(
int whichfork)
{
int state = xfs_bmap_fork_to_state(whichfork);
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec rec;
int64_t copied = 0;
@@ -591,7 +595,7 @@ xfs_iflush_fork(
if (!iip)
return;
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
/*
* This can happen if we gave up in iformat in an error path,
* for the attribute fork.
@@ -607,7 +611,7 @@ xfs_iflush_fork(
if ((iip->ili_fields & dataflag[whichfork]) &&
(ifp->if_bytes > 0)) {
ASSERT(ifp->if_u1.if_data != NULL);
- ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
+ ASSERT(ifp->if_bytes <= xfs_inode_fork_size(ip, whichfork));
memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
}
break;
@@ -626,7 +630,7 @@ xfs_iflush_fork(
(ifp->if_broot_bytes > 0)) {
ASSERT(ifp->if_broot != NULL);
ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
- XFS_IFORK_SIZE(ip, whichfork));
+ xfs_inode_fork_size(ip, whichfork));
xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
(xfs_bmdr_block_t *)cp,
XFS_DFORK_SIZE(dip, mp, whichfork));
@@ -656,7 +660,7 @@ xfs_iext_state_to_fork(
if (state & BMAP_COWFORK)
return ip->i_cowfp;
else if (state & BMAP_ATTRFORK)
- return ip->i_afp;
+ return &ip->i_af;
return &ip->i_df;
}
@@ -707,18 +711,17 @@ int
xfs_ifork_verify_local_attr(
struct xfs_inode *ip)
{
- struct xfs_ifork *ifp = ip->i_afp;
+ struct xfs_ifork *ifp = &ip->i_af;
xfs_failaddr_t fa;
- if (!ifp)
+ if (!xfs_inode_has_attr_fork(ip))
fa = __this_address;
else
fa = xfs_attr_shortform_verify(ip);
if (fa) {
xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
- ifp ? ifp->if_u1.if_data : NULL,
- ifp ? ifp->if_bytes : 0, fa);
+ ifp->if_u1.if_data, ifp->if_bytes, fa);
return -EFSCORRUPTED;
}
@@ -731,7 +734,7 @@ xfs_iext_count_may_overflow(
int whichfork,
int nr_to_add)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
uint64_t max_exts;
uint64_t nr_exts;
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 4f68c1f20beb..d3943d6ad0b9 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -77,28 +77,8 @@ struct xfs_ifork {
/*
* Fork handling.
*/
-
-#define XFS_IFORK_Q(ip) ((ip)->i_forkoff != 0)
-#define XFS_IFORK_BOFF(ip) ((int)((ip)->i_forkoff << 3))
-
-#define XFS_IFORK_PTR(ip,w) \
- ((w) == XFS_DATA_FORK ? \
- &(ip)->i_df : \
- ((w) == XFS_ATTR_FORK ? \
- (ip)->i_afp : \
- (ip)->i_cowfp))
-#define XFS_IFORK_DSIZE(ip) \
- (XFS_IFORK_Q(ip) ? XFS_IFORK_BOFF(ip) : XFS_LITINO((ip)->i_mount))
-#define XFS_IFORK_ASIZE(ip) \
- (XFS_IFORK_Q(ip) ? XFS_LITINO((ip)->i_mount) - XFS_IFORK_BOFF(ip) : 0)
-#define XFS_IFORK_SIZE(ip,w) \
- ((w) == XFS_DATA_FORK ? \
- XFS_IFORK_DSIZE(ip) : \
- ((w) == XFS_ATTR_FORK ? \
- XFS_IFORK_ASIZE(ip) : \
- 0))
#define XFS_IFORK_MAXEXT(ip, w) \
- (XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t))
+ (xfs_inode_fork_size(ip, w) / sizeof(xfs_bmbt_rec_t))
static inline bool xfs_ifork_has_extents(struct xfs_ifork *ifp)
{
@@ -179,8 +159,9 @@ xfs_dfork_nextents(
return 0;
}
-struct xfs_ifork *xfs_ifork_alloc(enum xfs_dinode_fmt format,
- xfs_extnum_t nextents);
+void xfs_ifork_zap_attr(struct xfs_inode *ip);
+void xfs_ifork_init_attr(struct xfs_inode *ip, enum xfs_dinode_fmt format,
+ xfs_extnum_t nextents);
struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
int xfs_iformat_data_fork(struct xfs_inode *, struct xfs_dinode *);
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 97e9e6020596..64b910caafaa 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -111,7 +111,7 @@ xfs_refcount_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
+ struct xfs_perag *pag = cur->bc_ag.pag;
union xfs_btree_rec *rec;
int error;
xfs_agblock_t realstart;
@@ -121,8 +121,6 @@ xfs_refcount_get_rec(
return error;
xfs_refcount_btrec_to_irec(rec, irec);
-
- agno = cur->bc_ag.pag->pag_agno;
if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN)
goto out_bad_rec;
@@ -137,22 +135,23 @@ xfs_refcount_get_rec(
}
/* check for valid extent range, including overflow */
- if (!xfs_verify_agbno(mp, agno, realstart))
+ if (!xfs_verify_agbno(pag, realstart))
goto out_bad_rec;
if (realstart > realstart + irec->rc_blockcount)
goto out_bad_rec;
- if (!xfs_verify_agbno(mp, agno, realstart + irec->rc_blockcount - 1))
+ if (!xfs_verify_agbno(pag, realstart + irec->rc_blockcount - 1))
goto out_bad_rec;
if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT)
goto out_bad_rec;
- trace_xfs_refcount_get(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec);
+ trace_xfs_refcount_get(cur->bc_mp, pag->pag_agno, irec);
return 0;
out_bad_rec:
xfs_warn(mp,
- "Refcount BTree record corruption in AG %d detected!", agno);
+ "Refcount BTree record corruption in AG %d detected!",
+ pag->pag_agno);
xfs_warn(mp,
"Start block 0x%x, block count 0x%x, references 0x%x",
irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount);
@@ -1177,8 +1176,8 @@ xfs_refcount_finish_one(
*pcur = NULL;
}
if (rcur == NULL) {
- error = xfs_alloc_read_agf(tp->t_mountp, tp, pag->pag_agno,
- XFS_ALLOC_FLAG_FREEING, &agbp);
+ error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_FREEING,
+ &agbp);
if (error)
goto out_drop;
@@ -1710,7 +1709,7 @@ xfs_refcount_recover_cow_leftovers(
if (error)
return error;
- error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
+ error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
if (error)
goto out_trans;
cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index d14c1720b0fb..316c1ec0c3c2 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -493,7 +493,7 @@ xfs_refcountbt_calc_reserves(
if (!xfs_has_reflink(mp))
return 0;
- error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
+ error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
if (error)
return error;
@@ -507,8 +507,7 @@ xfs_refcountbt_calc_reserves(
* never be available for the kinds of things that would require btree
* expansion. We therefore can pretend the space isn't there.
*/
- if (mp->m_sb.sb_logstart &&
- XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == pag->pag_agno)
+ if (xfs_ag_contains_log(mp, pag->pag_agno))
agblocks -= mp->m_sb.sb_logblocks;
*ask += xfs_refcountbt_max_size(mp, agblocks);
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 2845019d31da..094dfc897ebc 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -215,7 +215,7 @@ xfs_rmap_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
+ struct xfs_perag *pag = cur->bc_ag.pag;
union xfs_btree_rec *rec;
int error;
@@ -235,12 +235,12 @@ xfs_rmap_get_rec(
goto out_bad_rec;
} else {
/* check for valid extent range, including overflow */
- if (!xfs_verify_agbno(mp, agno, irec->rm_startblock))
+ if (!xfs_verify_agbno(pag, irec->rm_startblock))
goto out_bad_rec;
if (irec->rm_startblock >
irec->rm_startblock + irec->rm_blockcount)
goto out_bad_rec;
- if (!xfs_verify_agbno(mp, agno,
+ if (!xfs_verify_agbno(pag,
irec->rm_startblock + irec->rm_blockcount - 1))
goto out_bad_rec;
}
@@ -254,7 +254,7 @@ xfs_rmap_get_rec(
out_bad_rec:
xfs_warn(mp,
"Reverse Mapping BTree record corruption in AG %d detected!",
- agno);
+ pag->pag_agno);
xfs_warn(mp,
"Owner 0x%llx, flags 0x%x, start block 0x%x block count 0x%x",
irec->rm_owner, irec->rm_flags, irec->rm_startblock,
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 69e104d0277f..7f83f62e51e0 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -90,7 +90,7 @@ xfs_rmapbt_alloc_block(
xfs_agblock_t bno;
/* Allocate the new block from the freelist. If we can't, give up. */
- error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
+ error = xfs_alloc_get_freelist(pag, cur->bc_tp, cur->bc_ag.agbp,
&bno, 1);
if (error)
return error;
@@ -129,7 +129,7 @@ xfs_rmapbt_free_block(
bno, 1);
be32_add_cpu(&agf->agf_rmap_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
- error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
+ error = xfs_alloc_put_freelist(pag, cur->bc_tp, agbp, NULL, bno, 1);
if (error)
return error;
@@ -652,7 +652,7 @@ xfs_rmapbt_calc_reserves(
if (!xfs_has_rmapbt(mp))
return 0;
- error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
+ error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
if (error)
return error;
@@ -666,8 +666,7 @@ xfs_rmapbt_calc_reserves(
* never be available for the kinds of things that would require btree
* expansion. We therefore can pretend the space isn't there.
*/
- if (mp->m_sb.sb_logstart &&
- XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == pag->pag_agno)
+ if (xfs_ag_contains_log(mp, pag->pag_agno))
agblocks -= mp->m_sb.sb_logblocks;
/* Reserve 1% of the AG or enough for 1 block per record. */
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index 8b9bd178a487..bdc777b9ec4a 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -204,7 +204,7 @@ xfs_failaddr_t
xfs_symlink_shortform_verify(
struct xfs_inode *ip)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
char *sfp = (char *)ifp->if_u1.if_data;
int size = ifp->if_bytes;
char *endp = sfp + size;
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index e9913c2c5a24..2c4ad6e4bb14 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -515,7 +515,7 @@ xfs_calc_remove_reservation(
{
return XFS_DQUOT_LOGRES(mp) +
xfs_calc_iunlink_add_reservation(mp) +
- max((xfs_calc_inode_res(mp, 1) +
+ max((xfs_calc_inode_res(mp, 2) +
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(4, mp->m_sb.sb_sectsize) +
diff --git a/fs/xfs/libxfs/xfs_types.c b/fs/xfs/libxfs/xfs_types.c
index e810d23f2d97..5c2765934732 100644
--- a/fs/xfs/libxfs/xfs_types.c
+++ b/fs/xfs/libxfs/xfs_types.c
@@ -13,25 +13,13 @@
#include "xfs_mount.h"
#include "xfs_ag.h"
-/* Find the size of the AG, in blocks. */
-inline xfs_agblock_t
-xfs_ag_block_count(
- struct xfs_mount *mp,
- xfs_agnumber_t agno)
-{
- ASSERT(agno < mp->m_sb.sb_agcount);
-
- if (agno < mp->m_sb.sb_agcount - 1)
- return mp->m_sb.sb_agblocks;
- return mp->m_sb.sb_dblocks - (agno * mp->m_sb.sb_agblocks);
-}
/*
* Verify that an AG block number pointer neither points outside the AG
* nor points at static metadata.
*/
-inline bool
-xfs_verify_agbno(
+static inline bool
+xfs_verify_agno_agbno(
struct xfs_mount *mp,
xfs_agnumber_t agno,
xfs_agblock_t agbno)
@@ -59,7 +47,7 @@ xfs_verify_fsbno(
if (agno >= mp->m_sb.sb_agcount)
return false;
- return xfs_verify_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
+ return xfs_verify_agno_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
}
/*
@@ -85,40 +73,12 @@ xfs_verify_fsbext(
XFS_FSB_TO_AGNO(mp, fsbno + len - 1);
}
-/* Calculate the first and last possible inode number in an AG. */
-inline void
-xfs_agino_range(
- struct xfs_mount *mp,
- xfs_agnumber_t agno,
- xfs_agino_t *first,
- xfs_agino_t *last)
-{
- xfs_agblock_t bno;
- xfs_agblock_t eoag;
-
- eoag = xfs_ag_block_count(mp, agno);
-
- /*
- * Calculate the first inode, which will be in the first
- * cluster-aligned block after the AGFL.
- */
- bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align);
- *first = XFS_AGB_TO_AGINO(mp, bno);
-
- /*
- * Calculate the last inode, which will be at the end of the
- * last (aligned) cluster that can be allocated in the AG.
- */
- bno = round_down(eoag, M_IGEO(mp)->cluster_align);
- *last = XFS_AGB_TO_AGINO(mp, bno) - 1;
-}
-
/*
* Verify that an AG inode number pointer neither points outside the AG
* nor points at static metadata.
*/
-inline bool
-xfs_verify_agino(
+static inline bool
+xfs_verify_agno_agino(
struct xfs_mount *mp,
xfs_agnumber_t agno,
xfs_agino_t agino)
@@ -131,19 +91,6 @@ xfs_verify_agino(
}
/*
- * Verify that an AG inode number pointer neither points outside the AG
- * nor points at static metadata, or is NULLAGINO.
- */
-bool
-xfs_verify_agino_or_null(
- struct xfs_mount *mp,
- xfs_agnumber_t agno,
- xfs_agino_t agino)
-{
- return agino == NULLAGINO || xfs_verify_agino(mp, agno, agino);
-}
-
-/*
* Verify that an FS inode number pointer neither points outside the
* filesystem nor points at static AG metadata.
*/
@@ -159,7 +106,7 @@ xfs_verify_ino(
return false;
if (XFS_AGINO_TO_INO(mp, agno, agino) != ino)
return false;
- return xfs_verify_agino(mp, agno, agino);
+ return xfs_verify_agno_agino(mp, agno, agino);
}
/* Is this an internal inode number? */
@@ -229,12 +176,8 @@ xfs_icount_range(
/* root, rtbitmap, rtsum all live in the first chunk */
*min = XFS_INODES_PER_CHUNK;
- for_each_perag(mp, agno, pag) {
- xfs_agino_t first, last;
-
- xfs_agino_range(mp, agno, &first, &last);
- nr_inos += last - first + 1;
- }
+ for_each_perag(mp, agno, pag)
+ nr_inos += pag->agino_max - pag->agino_min + 1;
*max = nr_inos;
}
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 373f64a492a4..a6b7d98cf68f 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -179,19 +179,10 @@ enum xfs_ag_resv_type {
*/
struct xfs_mount;
-xfs_agblock_t xfs_ag_block_count(struct xfs_mount *mp, xfs_agnumber_t agno);
-bool xfs_verify_agbno(struct xfs_mount *mp, xfs_agnumber_t agno,
- xfs_agblock_t agbno);
bool xfs_verify_fsbno(struct xfs_mount *mp, xfs_fsblock_t fsbno);
bool xfs_verify_fsbext(struct xfs_mount *mp, xfs_fsblock_t fsbno,
xfs_fsblock_t len);
-void xfs_agino_range(struct xfs_mount *mp, xfs_agnumber_t agno,
- xfs_agino_t *first, xfs_agino_t *last);
-bool xfs_verify_agino(struct xfs_mount *mp, xfs_agnumber_t agno,
- xfs_agino_t agino);
-bool xfs_verify_agino_or_null(struct xfs_mount *mp, xfs_agnumber_t agno,
- xfs_agino_t agino);
bool xfs_verify_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_internal_inum(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_dir_ino(struct xfs_mount *mp, xfs_ino_t ino);
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index 90aebfe9dc5f..b7b838bd4ba4 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -541,16 +541,16 @@ xchk_agf(
/* Check the AG length */
eoag = be32_to_cpu(agf->agf_length);
- if (eoag != xfs_ag_block_count(mp, agno))
+ if (eoag != pag->block_count)
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
/* Check the AGF btree roots and levels */
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
- if (!xfs_verify_agbno(mp, agno, agbno))
+ if (!xfs_verify_agbno(pag, agbno))
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
- if (!xfs_verify_agbno(mp, agno, agbno))
+ if (!xfs_verify_agbno(pag, agbno))
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
@@ -563,7 +563,7 @@ xchk_agf(
if (xfs_has_rmapbt(mp)) {
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
- if (!xfs_verify_agbno(mp, agno, agbno))
+ if (!xfs_verify_agbno(pag, agbno))
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
@@ -573,7 +573,7 @@ xchk_agf(
if (xfs_has_reflink(mp)) {
agbno = be32_to_cpu(agf->agf_refcount_root);
- if (!xfs_verify_agbno(mp, agno, agbno))
+ if (!xfs_verify_agbno(pag, agbno))
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_refcount_level);
@@ -639,9 +639,8 @@ xchk_agfl_block(
{
struct xchk_agfl_info *sai = priv;
struct xfs_scrub *sc = sai->sc;
- xfs_agnumber_t agno = sc->sa.pag->pag_agno;
- if (xfs_verify_agbno(mp, agno, agbno) &&
+ if (xfs_verify_agbno(sc->sa.pag, agbno) &&
sai->nr_entries < sai->sz_entries)
sai->entries[sai->nr_entries++] = agbno;
else
@@ -871,12 +870,12 @@ xchk_agi(
/* Check the AG length */
eoag = be32_to_cpu(agi->agi_length);
- if (eoag != xfs_ag_block_count(mp, agno))
+ if (eoag != pag->block_count)
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check btree roots and levels */
agbno = be32_to_cpu(agi->agi_root);
- if (!xfs_verify_agbno(mp, agno, agbno))
+ if (!xfs_verify_agbno(pag, agbno))
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
level = be32_to_cpu(agi->agi_level);
@@ -885,7 +884,7 @@ xchk_agi(
if (xfs_has_finobt(mp)) {
agbno = be32_to_cpu(agi->agi_free_root);
- if (!xfs_verify_agbno(mp, agno, agbno))
+ if (!xfs_verify_agbno(pag, agbno))
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
level = be32_to_cpu(agi->agi_free_level);
@@ -902,17 +901,17 @@ xchk_agi(
/* Check inode pointers */
agino = be32_to_cpu(agi->agi_newino);
- if (!xfs_verify_agino_or_null(mp, agno, agino))
+ if (!xfs_verify_agino_or_null(pag, agino))
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
agino = be32_to_cpu(agi->agi_dirino);
- if (!xfs_verify_agino_or_null(mp, agno, agino))
+ if (!xfs_verify_agino_or_null(pag, agino))
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check unlinked inode buckets */
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
agino = be32_to_cpu(agi->agi_unlinked[i]);
- if (!xfs_verify_agino_or_null(mp, agno, agino))
+ if (!xfs_verify_agino_or_null(pag, agino))
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
}
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 6da7f2ca77de..1b0b4e243f77 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -106,7 +106,7 @@ xrep_agf_check_agfl_block(
{
struct xfs_scrub *sc = priv;
- if (!xfs_verify_agbno(mp, sc->sa.pag->pag_agno, agbno))
+ if (!xfs_verify_agbno(sc->sa.pag, agbno))
return -EFSCORRUPTED;
return 0;
}
@@ -130,10 +130,7 @@ xrep_check_btree_root(
struct xfs_scrub *sc,
struct xrep_find_ag_btree *fab)
{
- struct xfs_mount *mp = sc->mp;
- xfs_agnumber_t agno = sc->sm->sm_agno;
-
- return xfs_verify_agbno(mp, agno, fab->root) &&
+ return xfs_verify_agbno(sc->sa.pag, fab->root) &&
fab->height <= fab->maxlevels;
}
@@ -201,8 +198,7 @@ xrep_agf_init_header(
agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
agf->agf_seqno = cpu_to_be32(sc->sa.pag->pag_agno);
- agf->agf_length = cpu_to_be32(xfs_ag_block_count(mp,
- sc->sa.pag->pag_agno));
+ agf->agf_length = cpu_to_be32(sc->sa.pag->block_count);
agf->agf_flfirst = old_agf->agf_flfirst;
agf->agf_fllast = old_agf->agf_fllast;
agf->agf_flcount = old_agf->agf_flcount;
@@ -405,7 +401,7 @@ xrep_agf(
* btrees rooted in the AGF. If the AGFL contents are obviously bad
* then we'll bail out.
*/
- error = xfs_alloc_read_agfl(mp, sc->tp, sc->sa.pag->pag_agno, &agfl_bp);
+ error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp);
if (error)
return error;
@@ -666,8 +662,7 @@ xrep_agfl(
* nothing wrong with the AGF, but all the AG header repair functions
* have this chicken-and-egg problem.
*/
- error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.pag->pag_agno, 0,
- &agf_bp);
+ error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp);
if (error)
return error;
@@ -742,8 +737,7 @@ xrep_agi_find_btrees(
int error;
/* Read the AGF. */
- error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.pag->pag_agno, 0,
- &agf_bp);
+ error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp);
if (error)
return error;
@@ -782,8 +776,7 @@ xrep_agi_init_header(
agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
agi->agi_seqno = cpu_to_be32(sc->sa.pag->pag_agno);
- agi->agi_length = cpu_to_be32(xfs_ag_block_count(mp,
- sc->sa.pag->pag_agno));
+ agi->agi_length = cpu_to_be32(sc->sa.pag->block_count);
agi->agi_newino = cpu_to_be32(NULLAGINO);
agi->agi_dirino = cpu_to_be32(NULLAGINO);
if (xfs_has_crc(mp))
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 87518e1292f8..ab427b4d7fe0 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -93,8 +93,7 @@ xchk_allocbt_rec(
struct xchk_btree *bs,
const union xfs_btree_rec *rec)
{
- struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
+ struct xfs_perag *pag = bs->cur->bc_ag.pag;
xfs_agblock_t bno;
xfs_extlen_t len;
@@ -102,8 +101,8 @@ xchk_allocbt_rec(
len = be32_to_cpu(rec->alloc.ar_blockcount);
if (bno + len <= bno ||
- !xfs_verify_agbno(mp, agno, bno) ||
- !xfs_verify_agbno(mp, agno, bno + len - 1))
+ !xfs_verify_agbno(pag, bno) ||
+ !xfs_verify_agbno(pag, bno + len - 1))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
xchk_allocbt_xref(bs->sc, bno, len);
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 285995ba3947..f0b9cb6506fd 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -377,7 +377,7 @@ xchk_bmapbt_rec(
struct xfs_inode *ip = bs->cur->bc_ino.ip;
struct xfs_buf *bp = NULL;
struct xfs_btree_block *block;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, info->whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, info->whichfork);
uint64_t owner;
int i;
@@ -426,7 +426,7 @@ xchk_bmap_btree(
struct xchk_bmap_info *info)
{
struct xfs_owner_info oinfo;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork);
struct xfs_mount *mp = sc->mp;
struct xfs_inode *ip = sc->ip;
struct xfs_btree_cur *cur;
@@ -478,7 +478,7 @@ xchk_bmap_check_rmap(
return 0;
/* Now look up the bmbt record. */
- ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
+ ifp = xfs_ifork_ptr(sc->ip, sbcri->whichfork);
if (!ifp) {
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
@@ -540,7 +540,7 @@ xchk_bmap_check_ag_rmaps(
struct xfs_buf *agf;
int error;
- error = xfs_alloc_read_agf(sc->mp, sc->tp, pag->pag_agno, 0, &agf);
+ error = xfs_alloc_read_agf(pag, sc->tp, 0, &agf);
if (error)
return error;
@@ -563,7 +563,7 @@ xchk_bmap_check_rmaps(
struct xfs_scrub *sc,
int whichfork)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork);
struct xfs_perag *pag;
xfs_agnumber_t agno;
bool zero_size;
@@ -578,7 +578,7 @@ xchk_bmap_check_rmaps(
if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
return 0;
- ASSERT(XFS_IFORK_PTR(sc->ip, whichfork) != NULL);
+ ASSERT(xfs_ifork_ptr(sc->ip, whichfork) != NULL);
/*
* Only do this for complex maps that are in btree format, or for
@@ -624,7 +624,7 @@ xchk_bmap(
struct xchk_bmap_info info = { NULL };
struct xfs_mount *mp = sc->mp;
struct xfs_inode *ip = sc->ip;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
xfs_fileoff_t endoff;
struct xfs_iext_cursor icur;
int error = 0;
@@ -689,7 +689,7 @@ xchk_bmap(
/* Scrub extent records. */
info.lastoff = 0;
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ ifp = xfs_ifork_ptr(ip, whichfork);
for_each_xfs_iext(ifp, &icur, &irec) {
if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 39dd46f038fe..2f4519590dc1 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -462,7 +462,7 @@ xchk_btree_check_iroot_minrecs(
*/
if (bs->cur->bc_btnum == XFS_BTNUM_BMAP &&
bs->cur->bc_ino.whichfork == XFS_DATA_FORK &&
- XFS_IFORK_Q(bs->sc->ip))
+ xfs_inode_has_attr_fork(bs->sc->ip))
return false;
return true;
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 97b54ac3075f..9bbbf20f401b 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -416,15 +416,15 @@ xchk_ag_read_headers(
if (!sa->pag)
return -ENOENT;
- error = xfs_ialloc_read_agi(mp, sc->tp, agno, &sa->agi_bp);
+ error = xfs_ialloc_read_agi(sa->pag, sc->tp, &sa->agi_bp);
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
return error;
- error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &sa->agf_bp);
+ error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp);
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
return error;
- error = xfs_alloc_read_agfl(mp, sc->tp, agno, &sa->agfl_bp);
+ error = xfs_alloc_read_agfl(sa->pag, sc->tp, &sa->agfl_bp);
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
return error;
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index b962cfbbd92b..84fe3d33d699 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -482,7 +482,7 @@ xchk_da_btree(
int error;
/* Skip short format data structures; no btree to scan. */
- if (!xfs_ifork_has_extents(XFS_IFORK_PTR(sc->ip, whichfork)))
+ if (!xfs_ifork_has_extents(xfs_ifork_ptr(sc->ip, whichfork)))
return 0;
/* Set up initial da state. */
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index 38897adde7b5..5abb5fdb71d9 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -667,7 +667,7 @@ xchk_directory_blocks(
{
struct xfs_bmbt_irec got;
struct xfs_da_args args;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
struct xfs_mount *mp = sc->mp;
xfs_fileoff_t leaf_lblk;
xfs_fileoff_t free_lblk;
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index 48a6cbdf95d0..6a6f8fe7f87c 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -78,10 +78,10 @@ xchk_fscount_warmup(
continue;
/* Lock both AG headers. */
- error = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp);
+ error = xfs_ialloc_read_agi(pag, sc->tp, &agi_bp);
if (error)
break;
- error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp);
+ error = xfs_alloc_read_agf(pag, sc->tp, 0, &agf_bp);
if (error)
break;
diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c
index 2e61df3bca83..aa65ec88a0c0 100644
--- a/fs/xfs/scrub/health.c
+++ b/fs/xfs/scrub/health.c
@@ -8,6 +8,8 @@
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_btree.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
#include "xfs_ag.h"
#include "xfs_health.h"
#include "scrub/scrub.h"
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 00848ee542fb..e1026e07bf94 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -104,13 +104,13 @@ xchk_iallocbt_chunk(
xfs_extlen_t len)
{
struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
+ struct xfs_perag *pag = bs->cur->bc_ag.pag;
xfs_agblock_t bno;
bno = XFS_AGINO_TO_AGBNO(mp, agino);
if (bno + len <= bno ||
- !xfs_verify_agbno(mp, agno, bno) ||
- !xfs_verify_agbno(mp, agno, bno + len - 1))
+ !xfs_verify_agbno(pag, bno) ||
+ !xfs_verify_agbno(pag, bno + len - 1))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
@@ -421,10 +421,10 @@ xchk_iallocbt_rec(
const union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
+ struct xfs_perag *pag = bs->cur->bc_ag.pag;
struct xchk_iallocbt *iabt = bs->private;
struct xfs_inobt_rec_incore irec;
uint64_t holes;
- xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
xfs_agino_t agino;
xfs_extlen_t len;
int holecount;
@@ -446,8 +446,8 @@ xchk_iallocbt_rec(
agino = irec.ir_startino;
/* Record has to be properly aligned within the AG. */
- if (!xfs_verify_agino(mp, agno, agino) ||
- !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
+ if (!xfs_verify_agino(pag, agino) ||
+ !xfs_verify_agino(pag, agino + XFS_INODES_PER_CHUNK - 1)) {
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
goto out;
}
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 3c7506c7553c..21b4c9006859 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -185,7 +185,7 @@ xchk_quota_data_fork(
/* Check for data fork problems that apply only to quota files. */
max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
- ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
+ ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
for_each_xfs_iext(ifp, &icur, &irec) {
if (xchk_should_terminate(sc, &error))
break;
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 2744eecdbaf0..c68b767dc08f 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -13,6 +13,8 @@
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
#include "xfs_ag.h"
/*
@@ -332,9 +334,8 @@ xchk_refcountbt_rec(
struct xchk_btree *bs,
const union xfs_btree_rec *rec)
{
- struct xfs_mount *mp = bs->cur->bc_mp;
xfs_agblock_t *cow_blocks = bs->private;
- xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
+ struct xfs_perag *pag = bs->cur->bc_ag.pag;
xfs_agblock_t bno;
xfs_extlen_t len;
xfs_nlink_t refcount;
@@ -354,8 +355,8 @@ xchk_refcountbt_rec(
/* Check the extent. */
bno &= ~XFS_REFC_COW_START;
if (bno + len <= bno ||
- !xfs_verify_agbno(mp, agno, bno) ||
- !xfs_verify_agbno(mp, agno, bno + len - 1))
+ !xfs_verify_agbno(pag, bno) ||
+ !xfs_verify_agbno(pag, bno + len - 1))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (refcount == 0)
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 1e7b6b209ee8..c18bd039fce9 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -199,7 +199,7 @@ xrep_calc_ag_resblks(
icount = pag->pagi_count;
} else {
/* Try to get the actual counters from disk. */
- error = xfs_ialloc_read_agi(mp, NULL, sm->sm_agno, &bp);
+ error = xfs_ialloc_read_agi(pag, NULL, &bp);
if (!error) {
icount = pag->pagi_count;
xfs_buf_relse(bp);
@@ -207,9 +207,9 @@ xrep_calc_ag_resblks(
}
/* Now grab the block counters from the AGF. */
- error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp);
+ error = xfs_alloc_read_agf(pag, NULL, 0, &bp);
if (error) {
- aglen = xfs_ag_block_count(mp, sm->sm_agno);
+ aglen = pag->block_count;
freelen = aglen;
usedlen = aglen;
} else {
@@ -220,25 +220,22 @@ xrep_calc_ag_resblks(
usedlen = aglen - freelen;
xfs_buf_relse(bp);
}
- xfs_perag_put(pag);
/* If the icount is impossible, make some worst-case assumptions. */
if (icount == NULLAGINO ||
- !xfs_verify_agino(mp, sm->sm_agno, icount)) {
- xfs_agino_t first, last;
-
- xfs_agino_range(mp, sm->sm_agno, &first, &last);
- icount = last - first + 1;
+ !xfs_verify_agino(pag, icount)) {
+ icount = pag->agino_max - pag->agino_min + 1;
}
/* If the block counts are impossible, make worst-case assumptions. */
if (aglen == NULLAGBLOCK ||
- aglen != xfs_ag_block_count(mp, sm->sm_agno) ||
+ aglen != pag->block_count ||
freelen >= aglen) {
- aglen = xfs_ag_block_count(mp, sm->sm_agno);
+ aglen = pag->block_count;
freelen = aglen;
usedlen = aglen;
}
+ xfs_perag_put(pag);
trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
freelen, usedlen);
@@ -300,13 +297,13 @@ xrep_alloc_ag_block(
switch (resv) {
case XFS_AG_RESV_AGFL:
case XFS_AG_RESV_RMAPBT:
- error = xfs_alloc_get_freelist(sc->tp, sc->sa.agf_bp, &bno, 1);
+ error = xfs_alloc_get_freelist(sc->sa.pag, sc->tp,
+ sc->sa.agf_bp, &bno, 1);
if (error)
return error;
if (bno == NULLAGBLOCK)
return -ENOSPC;
- xfs_extent_busy_reuse(sc->mp, sc->sa.pag, bno,
- 1, false);
+ xfs_extent_busy_reuse(sc->mp, sc->sa.pag, bno, 1, false);
*fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno, bno);
if (resv == XFS_AG_RESV_RMAPBT)
xfs_ag_resv_rmapbt_alloc(sc->mp, sc->sa.pag->pag_agno);
@@ -457,16 +454,19 @@ xrep_invalidate_blocks(
* assume it's owned by someone else.
*/
for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
+ int error;
+
/* Skip AG headers and post-EOFS blocks */
if (!xfs_verify_fsbno(sc->mp, fsbno))
continue;
- bp = xfs_buf_incore(sc->mp->m_ddev_targp,
+ error = xfs_buf_incore(sc->mp->m_ddev_targp,
XFS_FSB_TO_DADDR(sc->mp, fsbno),
- XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK);
- if (bp) {
- xfs_trans_bjoin(sc->tp, bp);
- xfs_trans_binval(sc->tp, bp);
- }
+ XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp);
+ if (error)
+ continue;
+
+ xfs_trans_bjoin(sc->tp, bp);
+ xfs_trans_binval(sc->tp, bp);
}
return 0;
@@ -516,8 +516,8 @@ xrep_put_freelist(
return error;
/* Put the block on the AGFL. */
- error = xfs_alloc_put_freelist(sc->tp, sc->sa.agf_bp, sc->sa.agfl_bp,
- agbno, 0);
+ error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp,
+ sc->sa.agfl_bp, agbno, 0);
if (error)
return error;
xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1,
@@ -536,13 +536,12 @@ xrep_reap_block(
{
struct xfs_btree_cur *cur;
struct xfs_buf *agf_bp = NULL;
- xfs_agnumber_t agno;
xfs_agblock_t agbno;
bool has_other_rmap;
int error;
- agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
+ ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno);
/*
* If we are repairing per-inode metadata, we need to read in the AGF
@@ -550,7 +549,7 @@ xrep_reap_block(
* the AGF buffer that the setup functions already grabbed.
*/
if (sc->ip) {
- error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf_bp);
+ error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp);
if (error)
return error;
} else {
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
index 8dae0345c7df..229826b2e1c0 100644
--- a/fs/xfs/scrub/rmap.c
+++ b/fs/xfs/scrub/rmap.c
@@ -92,7 +92,7 @@ xchk_rmapbt_rec(
{
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_rmap_irec irec;
- xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
+ struct xfs_perag *pag = bs->cur->bc_ag.pag;
bool non_inode;
bool is_unwritten;
bool is_bmbt;
@@ -121,8 +121,8 @@ xchk_rmapbt_rec(
* Otherwise we must point somewhere past the static metadata
* but before the end of the FS. Run the regular check.
*/
- if (!xfs_verify_agbno(mp, agno, irec.rm_startblock) ||
- !xfs_verify_agbno(mp, agno, irec.rm_startblock +
+ if (!xfs_verify_agbno(pag, irec.rm_startblock) ||
+ !xfs_verify_agbno(pag, irec.rm_startblock +
irec.rm_blockcount - 1))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
}
diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c
index 599ee277bba2..75311f8daeeb 100644
--- a/fs/xfs/scrub/symlink.c
+++ b/fs/xfs/scrub/symlink.c
@@ -41,7 +41,7 @@ xchk_symlink(
if (!S_ISLNK(VFS_I(ip)->i_mode))
return -ENOENT;
- ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
len = ip->i_disk_size;
/* Plausible size? */
@@ -52,8 +52,8 @@ xchk_symlink(
/* Inline symlink? */
if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
- if (len > XFS_IFORK_DSIZE(ip) ||
- len > strnlen(ifp->if_u1.if_data, XFS_IFORK_DSIZE(ip)))
+ if (len > xfs_inode_data_fork_size(ip) ||
+ len > strnlen(ifp->if_u1.if_data, xfs_inode_data_fork_size(ip)))
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 27265771f247..5db87b34fb6e 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -158,6 +158,7 @@ xfs_attr3_node_inactive(
}
child_fsb = be32_to_cpu(ichdr.btree[0].before);
xfs_trans_brelse(*trans, bp); /* no locks for later trans */
+ bp = NULL;
/*
* If this is the node level just above the leaves, simply loop
@@ -211,12 +212,8 @@ xfs_attr3_node_inactive(
&child_bp);
if (error)
return error;
- error = bp->b_error;
- if (error) {
- xfs_trans_brelse(*trans, child_bp);
- return error;
- }
xfs_trans_binval(*trans, child_bp);
+ child_bp = NULL;
/*
* If we're not done, re-read the parent to get the next
@@ -233,6 +230,7 @@ xfs_attr3_node_inactive(
bp->b_addr);
child_fsb = be32_to_cpu(phdr.btree[i + 1].before);
xfs_trans_brelse(*trans, bp);
+ bp = NULL;
}
/*
* Atomically commit the whole invalidate stuff.
@@ -338,7 +336,7 @@ xfs_attr_inactive(
ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
xfs_ilock(dp, lock_mode);
- if (!XFS_IFORK_Q(dp))
+ if (!xfs_inode_has_attr_fork(dp))
goto out_destroy_fork;
xfs_iunlock(dp, lock_mode);
@@ -351,7 +349,7 @@ xfs_attr_inactive(
lock_mode = XFS_ILOCK_EXCL;
xfs_ilock(dp, lock_mode);
- if (!XFS_IFORK_Q(dp))
+ if (!xfs_inode_has_attr_fork(dp))
goto out_cancel;
/*
@@ -362,12 +360,11 @@ xfs_attr_inactive(
/*
* Invalidate and truncate the attribute fork extents. Make sure the
- * fork actually has attributes as otherwise the invalidation has no
+ * fork actually has xattr blocks as otherwise the invalidation has no
* blocks to read and returns an error. In this case, just do the fork
* removal below.
*/
- if (xfs_inode_hasattr(dp) &&
- dp->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
+ if (dp->i_af.if_nextents > 0) {
error = xfs_attr3_root_inactive(&trans, dp);
if (error)
goto out_cancel;
@@ -388,11 +385,7 @@ out_cancel:
xfs_trans_cancel(trans);
out_destroy_fork:
/* kill the in-core attr fork before we drop the inode lock */
- if (dp->i_afp) {
- xfs_idestroy_fork(dp->i_afp);
- kmem_cache_free(xfs_ifork_cache, dp->i_afp);
- dp->i_afp = NULL;
- }
+ xfs_ifork_zap_attr(dp);
if (lock_mode)
xfs_iunlock(dp, lock_mode);
return error;
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 90a14e85e76d..99bbbe1a0e44 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -61,8 +61,7 @@ xfs_attr_shortform_list(
int sbsize, nsbuf, count, i;
int error = 0;
- ASSERT(dp->i_afp != NULL);
- sf = (struct xfs_attr_shortform *)dp->i_afp->if_u1.if_data;
+ sf = (struct xfs_attr_shortform *)dp->i_af.if_u1.if_data;
ASSERT(sf != NULL);
if (!sf->hdr.count)
return 0;
@@ -80,7 +79,7 @@ xfs_attr_shortform_list(
*/
if (context->bufsize == 0 ||
(XFS_ISRESET_CURSOR(cursor) &&
- (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
+ (dp->i_af.if_bytes + sf->hdr.count * 16) < context->bufsize)) {
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
if (XFS_IS_CORRUPT(context->dp->i_mount,
!xfs_attr_namecheck(sfe->nameval,
@@ -121,7 +120,7 @@ xfs_attr_shortform_list(
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
if (unlikely(
((char *)sfe < (char *)sf) ||
- ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
+ ((char *)sfe >= ((char *)sf + dp->i_af.if_bytes)))) {
XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, sfe,
@@ -513,7 +512,7 @@ xfs_attr_list_ilocked(
*/
if (!xfs_inode_hasattr(dp))
return 0;
- if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
+ if (dp->i_af.if_format == XFS_DINODE_FMT_LOCAL)
return xfs_attr_shortform_list(context);
if (xfs_attr_is_leaf(dp))
return xfs_attr_leaf_list(context);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 85e1a26c92e8..04d0c2bff67c 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -256,7 +256,7 @@ xfs_bmap_count_blocks(
xfs_filblks_t *count)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_btree_cur *cur;
xfs_extlen_t btblocks = 0;
int error;
@@ -439,29 +439,28 @@ xfs_getbmap(
whichfork = XFS_COW_FORK;
else
whichfork = XFS_DATA_FORK;
- ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_ilock(ip, XFS_IOLOCK_SHARED);
switch (whichfork) {
case XFS_ATTR_FORK:
- if (!XFS_IFORK_Q(ip))
- goto out_unlock_iolock;
+ lock = xfs_ilock_attr_map_shared(ip);
+ if (!xfs_inode_has_attr_fork(ip))
+ goto out_unlock_ilock;
max_len = 1LL << 32;
- lock = xfs_ilock_attr_map_shared(ip);
break;
case XFS_COW_FORK:
+ lock = XFS_ILOCK_SHARED;
+ xfs_ilock(ip, lock);
+
/* No CoW fork? Just return */
- if (!ifp)
- goto out_unlock_iolock;
+ if (!xfs_ifork_ptr(ip, whichfork))
+ goto out_unlock_ilock;
if (xfs_get_cowextsz_hint(ip))
max_len = mp->m_super->s_maxbytes;
else
max_len = XFS_ISIZE(ip);
-
- lock = XFS_ILOCK_SHARED;
- xfs_ilock(ip, lock);
break;
case XFS_DATA_FORK:
if (!(iflags & BMV_IF_DELALLOC) &&
@@ -491,6 +490,8 @@ xfs_getbmap(
break;
}
+ ifp = xfs_ifork_ptr(ip, whichfork);
+
switch (ifp->if_format) {
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
@@ -1320,8 +1321,8 @@ xfs_swap_extents_check_format(
* extent format...
*/
if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
- if (XFS_IFORK_Q(ip) &&
- XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
+ if (xfs_inode_has_attr_fork(ip) &&
+ XFS_BMAP_BMDR_SPACE(tifp->if_broot) > xfs_inode_fork_boff(ip))
return -EINVAL;
if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
return -EINVAL;
@@ -1329,8 +1330,8 @@ xfs_swap_extents_check_format(
/* Reciprocal target->temp btree format checks */
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
- if (XFS_IFORK_Q(tip) &&
- XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
+ if (xfs_inode_has_attr_fork(tip) &&
+ XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
return -EINVAL;
if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
return -EINVAL;
@@ -1506,15 +1507,15 @@ xfs_swap_extent_forks(
/*
* Count the number of extended attribute blocks
*/
- if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
- ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
+ if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 &&
+ ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
&aforkblks);
if (error)
return error;
}
- if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
- tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
+ if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 &&
+ tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
&taforkblks);
if (error)
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 5e8f40d8c052..dde346450952 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -5,6 +5,7 @@
*/
#include "xfs.h"
#include <linux/backing-dev.h>
+#include <linux/dax.h>
#include "xfs_shared.h"
#include "xfs_format.h"
@@ -21,7 +22,7 @@
#include "xfs_error.h"
#include "xfs_ag.h"
-static struct kmem_cache *xfs_buf_cache;
+struct kmem_cache *xfs_buf_cache;
/*
* Locking orders
@@ -295,6 +296,16 @@ xfs_buf_free_pages(
}
static void
+xfs_buf_free_callback(
+ struct callback_head *cb)
+{
+ struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu);
+
+ xfs_buf_free_maps(bp);
+ kmem_cache_free(xfs_buf_cache, bp);
+}
+
+static void
xfs_buf_free(
struct xfs_buf *bp)
{
@@ -307,8 +318,7 @@ xfs_buf_free(
else if (bp->b_flags & _XBF_KMEM)
kmem_free(bp->b_addr);
- xfs_buf_free_maps(bp);
- kmem_cache_free(xfs_buf_cache, bp);
+ call_rcu(&bp->b_rcu, xfs_buf_free_callback);
}
static int
@@ -503,100 +513,45 @@ xfs_buf_hash_destroy(
rhashtable_destroy(&pag->pag_buf_hash);
}
-/*
- * Look up a buffer in the buffer cache and return it referenced and locked
- * in @found_bp.
- *
- * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
- * cache.
- *
- * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
- * -EAGAIN if we fail to lock it.
- *
- * Return values are:
- * -EFSCORRUPTED if have been supplied with an invalid address
- * -EAGAIN on trylock failure
- * -ENOENT if we fail to find a match and @new_bp was NULL
- * 0, with @found_bp:
- * - @new_bp if we inserted it into the cache
- * - the buffer we found and locked.
- */
static int
-xfs_buf_find(
+xfs_buf_map_verify(
struct xfs_buftarg *btp,
- struct xfs_buf_map *map,
- int nmaps,
- xfs_buf_flags_t flags,
- struct xfs_buf *new_bp,
- struct xfs_buf **found_bp)
+ struct xfs_buf_map *map)
{
- struct xfs_perag *pag;
- struct xfs_buf *bp;
- struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
xfs_daddr_t eofs;
- int i;
-
- *found_bp = NULL;
-
- for (i = 0; i < nmaps; i++)
- cmap.bm_len += map[i].bm_len;
/* Check for IOs smaller than the sector size / not sector aligned */
- ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
- ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
+ ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
+ ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
/*
* Corrupted block numbers can get through to here, unfortunately, so we
* have to check that the buffer falls within the filesystem bounds.
*/
eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
- if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
+ if (map->bm_bn < 0 || map->bm_bn >= eofs) {
xfs_alert(btp->bt_mount,
"%s: daddr 0x%llx out of range, EOFS 0x%llx",
- __func__, cmap.bm_bn, eofs);
+ __func__, map->bm_bn, eofs);
WARN_ON(1);
return -EFSCORRUPTED;
}
-
- pag = xfs_perag_get(btp->bt_mount,
- xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
-
- spin_lock(&pag->pag_buf_lock);
- bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
- xfs_buf_hash_params);
- if (bp) {
- atomic_inc(&bp->b_hold);
- goto found;
- }
-
- /* No match found */
- if (!new_bp) {
- XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
- spin_unlock(&pag->pag_buf_lock);
- xfs_perag_put(pag);
- return -ENOENT;
- }
-
- /* the buffer keeps the perag reference until it is freed */
- new_bp->b_pag = pag;
- rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
- xfs_buf_hash_params);
- spin_unlock(&pag->pag_buf_lock);
- *found_bp = new_bp;
return 0;
+}
-found:
- spin_unlock(&pag->pag_buf_lock);
- xfs_perag_put(pag);
-
- if (!xfs_buf_trylock(bp)) {
- if (flags & XBF_TRYLOCK) {
- xfs_buf_rele(bp);
- XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
+static int
+xfs_buf_find_lock(
+ struct xfs_buf *bp,
+ xfs_buf_flags_t flags)
+{
+ if (flags & XBF_TRYLOCK) {
+ if (!xfs_buf_trylock(bp)) {
+ XFS_STATS_INC(bp->b_mount, xb_busy_locked);
return -EAGAIN;
}
+ } else {
xfs_buf_lock(bp);
- XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
+ XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
}
/*
@@ -609,57 +564,59 @@ found:
bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
bp->b_ops = NULL;
}
-
- trace_xfs_buf_find(bp, flags, _RET_IP_);
- XFS_STATS_INC(btp->bt_mount, xb_get_locked);
- *found_bp = bp;
return 0;
}
-struct xfs_buf *
-xfs_buf_incore(
- struct xfs_buftarg *target,
- xfs_daddr_t blkno,
- size_t numblks,
- xfs_buf_flags_t flags)
+static inline int
+xfs_buf_lookup(
+ struct xfs_perag *pag,
+ struct xfs_buf_map *map,
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
{
- struct xfs_buf *bp;
+ struct xfs_buf *bp;
int error;
- DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
- error = xfs_buf_find(target, &map, 1, flags, NULL, &bp);
- if (error)
- return NULL;
- return bp;
+ rcu_read_lock();
+ bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
+ if (!bp || !atomic_inc_not_zero(&bp->b_hold)) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+ rcu_read_unlock();
+
+ error = xfs_buf_find_lock(bp, flags);
+ if (error) {
+ xfs_buf_rele(bp);
+ return error;
+ }
+
+ trace_xfs_buf_find(bp, flags, _RET_IP_);
+ *bpp = bp;
+ return 0;
}
/*
- * Assembles a buffer covering the specified range. The code is optimised for
- * cache hits, as metadata intensive workloads will see 3 orders of magnitude
- * more hits than misses.
+ * Insert the new_bp into the hash table. This consumes the perag reference
+ * taken for the lookup regardless of the result of the insert.
*/
-int
-xfs_buf_get_map(
- struct xfs_buftarg *target,
+static int
+xfs_buf_find_insert(
+ struct xfs_buftarg *btp,
+ struct xfs_perag *pag,
+ struct xfs_buf_map *cmap,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags,
struct xfs_buf **bpp)
{
- struct xfs_buf *bp;
struct xfs_buf *new_bp;
+ struct xfs_buf *bp;
int error;
- *bpp = NULL;
- error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
- if (!error)
- goto found;
- if (error != -ENOENT)
- return error;
-
- error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
+ error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
if (error)
- return error;
+ goto out_drop_pag;
/*
* For buffers that fit entirely within a single page, first attempt to
@@ -674,18 +631,94 @@ xfs_buf_get_map(
goto out_free_buf;
}
- error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
- if (error)
+ spin_lock(&pag->pag_buf_lock);
+ bp = rhashtable_lookup_get_insert_fast(&pag->pag_buf_hash,
+ &new_bp->b_rhash_head, xfs_buf_hash_params);
+ if (IS_ERR(bp)) {
+ error = PTR_ERR(bp);
+ spin_unlock(&pag->pag_buf_lock);
goto out_free_buf;
+ }
+ if (bp) {
+ /* found an existing buffer */
+ atomic_inc(&bp->b_hold);
+ spin_unlock(&pag->pag_buf_lock);
+ error = xfs_buf_find_lock(bp, flags);
+ if (error)
+ xfs_buf_rele(bp);
+ else
+ *bpp = bp;
+ goto out_free_buf;
+ }
+
+ /* The new buffer keeps the perag reference until it is freed. */
+ new_bp->b_pag = pag;
+ spin_unlock(&pag->pag_buf_lock);
+ *bpp = new_bp;
+ return 0;
- if (bp != new_bp)
- xfs_buf_free(new_bp);
+out_free_buf:
+ xfs_buf_free(new_bp);
+out_drop_pag:
+ xfs_perag_put(pag);
+ return error;
+}
-found:
+/*
+ * Assembles a buffer covering the specified range. The code is optimised for
+ * cache hits, as metadata intensive workloads will see 3 orders of magnitude
+ * more hits than misses.
+ */
+int
+xfs_buf_get_map(
+ struct xfs_buftarg *btp,
+ struct xfs_buf_map *map,
+ int nmaps,
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
+{
+ struct xfs_perag *pag;
+ struct xfs_buf *bp = NULL;
+ struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
+ int error;
+ int i;
+
+ for (i = 0; i < nmaps; i++)
+ cmap.bm_len += map[i].bm_len;
+
+ error = xfs_buf_map_verify(btp, &cmap);
+ if (error)
+ return error;
+
+ pag = xfs_perag_get(btp->bt_mount,
+ xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
+
+ error = xfs_buf_lookup(pag, &cmap, flags, &bp);
+ if (error && error != -ENOENT)
+ goto out_put_perag;
+
+ /* cache hits always outnumber misses by at least 10:1 */
+ if (unlikely(!bp)) {
+ XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
+
+ if (flags & XBF_INCORE)
+ goto out_put_perag;
+
+ /* xfs_buf_find_insert() consumes the perag reference. */
+ error = xfs_buf_find_insert(btp, pag, &cmap, map, nmaps,
+ flags, &bp);
+ if (error)
+ return error;
+ } else {
+ XFS_STATS_INC(btp->bt_mount, xb_get_locked);
+ xfs_perag_put(pag);
+ }
+
+ /* We do not hold a perag reference anymore. */
if (!bp->b_addr) {
error = _xfs_buf_map_pages(bp, flags);
if (unlikely(error)) {
- xfs_warn_ratelimited(target->bt_mount,
+ xfs_warn_ratelimited(btp->bt_mount,
"%s: failed to map %u pages", __func__,
bp->b_page_count);
xfs_buf_relse(bp);
@@ -700,12 +733,13 @@ found:
if (!(flags & XBF_READ))
xfs_buf_ioerror(bp, 0);
- XFS_STATS_INC(target->bt_mount, xb_get);
+ XFS_STATS_INC(btp->bt_mount, xb_get);
trace_xfs_buf_get(bp, flags, _RET_IP_);
*bpp = bp;
return 0;
-out_free_buf:
- xfs_buf_free(new_bp);
+
+out_put_perag:
+ xfs_perag_put(pag);
return error;
}
@@ -1911,7 +1945,7 @@ xfs_free_buftarg(
list_lru_destroy(&btp->bt_lru);
blkdev_issue_flush(btp->bt_bdev);
- fs_put_dax(btp->bt_daxdev);
+ fs_put_dax(btp->bt_daxdev, btp->bt_mount);
kmem_free(btp);
}
@@ -1958,13 +1992,18 @@ xfs_alloc_buftarg(
struct block_device *bdev)
{
xfs_buftarg_t *btp;
+ const struct dax_holder_operations *ops = NULL;
+#if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE)
+ ops = &xfs_dax_holder_operations;
+#endif
btp = kmem_zalloc(sizeof(*btp), KM_NOFS);
btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev;
btp->bt_bdev = bdev;
- btp->bt_daxdev = fs_dax_get_by_bdev(bdev, &btp->bt_dax_part_off);
+ btp->bt_daxdev = fs_dax_get_by_bdev(bdev, &btp->bt_dax_part_off,
+ mp, ops);
/*
* Buffer IO error rate limiting. Limit it to no more than 10 messages
@@ -1986,7 +2025,8 @@ xfs_alloc_buftarg(
btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
btp->bt_shrinker.seeks = DEFAULT_SEEKS;
btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
- if (register_shrinker(&btp->bt_shrinker))
+ if (register_shrinker(&btp->bt_shrinker, "xfs-buf:%s",
+ mp->m_super->s_id))
goto error_pcpu;
return btp;
@@ -2275,29 +2315,6 @@ xfs_buf_delwri_pushbuf(
return error;
}
-int __init
-xfs_buf_init(void)
-{
- xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_MEM_SPREAD,
- NULL);
- if (!xfs_buf_cache)
- goto out;
-
- return 0;
-
- out:
- return -ENOMEM;
-}
-
-void
-xfs_buf_terminate(void)
-{
- kmem_cache_destroy(xfs_buf_cache);
-}
-
void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
{
/*
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 1ee3056ff9cf..549c60942208 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -15,6 +15,8 @@
#include <linux/uio.h>
#include <linux/list_lru.h>
+extern struct kmem_cache *xfs_buf_cache;
+
/*
* Base types
*/
@@ -42,9 +44,11 @@ struct xfs_buf;
#define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */
/* flags used only as arguments to access routines */
+#define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */
#define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */
#define XBF_UNMAPPED (1u << 31)/* do not map the buffer */
+
typedef unsigned int xfs_buf_flags_t;
#define XFS_BUF_FLAGS \
@@ -63,6 +67,7 @@ typedef unsigned int xfs_buf_flags_t;
{ _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \
/* The following interface flags should never be set */ \
+ { XBF_INCORE, "INCORE" }, \
{ XBF_TRYLOCK, "TRYLOCK" }, \
{ XBF_UNMAPPED, "UNMAPPED" }
@@ -193,13 +198,10 @@ struct xfs_buf {
int b_last_error;
const struct xfs_buf_ops *b_ops;
+ struct rcu_head b_rcu;
};
/* Finding and Reading Buffers */
-struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
- xfs_daddr_t blkno, size_t numblks,
- xfs_buf_flags_t flags);
-
int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
@@ -210,6 +212,19 @@ void xfs_buf_readahead_map(struct xfs_buftarg *target,
const struct xfs_buf_ops *ops);
static inline int
+xfs_buf_incore(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+
+ return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
+}
+
+static inline int
xfs_buf_get(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
@@ -294,10 +309,6 @@ extern int xfs_buf_delwri_submit(struct list_head *);
extern int xfs_buf_delwri_submit_nowait(struct list_head *);
extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
-/* Buffer Daemon Setup Routines */
-extern int xfs_buf_init(void);
-extern void xfs_buf_terminate(void);
-
static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
{
return bp->b_maps[0].bm_bn;
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index a7174a5b3203..e295fc8062d8 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -248,7 +248,7 @@ xfs_dir2_leaf_readbuf(
struct xfs_inode *dp = args->dp;
struct xfs_buf *bp = NULL;
struct xfs_da_geometry *geo = args->geo;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(dp, XFS_DATA_FORK);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(dp, XFS_DATA_FORK);
struct xfs_bmbt_irec map;
struct blk_plug plug;
xfs_dir2_off_t new_off;
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index c6fe3f6ebb6b..bfc829c07f03 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -45,7 +45,7 @@ xfs_trim_extents(
*/
xfs_log_force(mp, XFS_LOG_SYNC);
- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(pag, NULL, 0, &agbp);
if (error)
goto out_put_perag;
agf = agbp->b_addr;
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 5a6c3c3c4de2..8fb90da89787 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -549,7 +549,7 @@ xfs_dquot_check_type(
* at the same time. The non-user quota file can be switched between
* group and project quota uses depending on the mount options, which
* means that we can encounter the other type when we try to load quota
- * defaults. Quotacheck will soon reset the the entire quota file
+ * defaults. Quotacheck will soon reset the entire quota file
* (including the root dquot) anyway, but don't log scary corruption
* reports to dmesg.
*/
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 765be054dffe..27ccfcd82f04 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -11,6 +11,7 @@
#include "xfs_bit.h"
#include "xfs_shared.h"
#include "xfs_mount.h"
+#include "xfs_ag.h"
#include "xfs_defer.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
@@ -187,12 +188,12 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
{
xfs_efi_log_format_t *src_efi_fmt = buf->i_addr;
uint i;
- uint len = sizeof(xfs_efi_log_format_t) +
- (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_t);
- uint len32 = sizeof(xfs_efi_log_format_32_t) +
- (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_32_t);
- uint len64 = sizeof(xfs_efi_log_format_64_t) +
- (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_64_t);
+ uint len = sizeof(xfs_efi_log_format_t) +
+ (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_t);
+ uint len32 = sizeof(xfs_efi_log_format_32_t) +
+ (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_32_t);
+ uint len64 = sizeof(xfs_efi_log_format_64_t) +
+ (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_64_t);
if (buf->i_len == len) {
memcpy((char *)dst_efi_fmt, (char*)src_efi_fmt, len);
@@ -551,6 +552,7 @@ xfs_agfl_free_finish_item(
xfs_agnumber_t agno;
xfs_agblock_t agbno;
uint next_extent;
+ struct xfs_perag *pag;
free = container_of(item, struct xfs_extent_free_item, xefi_list);
ASSERT(free->xefi_blockcount == 1);
@@ -560,9 +562,11 @@ xfs_agfl_free_finish_item(
trace_xfs_agfl_free_deferred(mp, agno, 0, agbno, free->xefi_blockcount);
- error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
if (!error)
error = xfs_free_agfl_block(tp, agno, agbno, agbp, &oinfo);
+ xfs_perag_put(pag);
/*
* Mark the transaction dirty, even on error. This ensures the
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 8d9b14d2b912..c6c80265c0b2 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -25,6 +25,7 @@
#include "xfs_iomap.h"
#include "xfs_reflink.h"
+#include <linux/dax.h>
#include <linux/falloc.h>
#include <linux/backing-dev.h>
#include <linux/mman.h>
@@ -142,7 +143,7 @@ xfs_file_fsync(
{
struct xfs_inode *ip = XFS_I(file->f_mapping->host);
struct xfs_mount *mp = ip->i_mount;
- int error = 0;
+ int error, err2;
int log_flushed = 0;
trace_xfs_file_fsync(ip);
@@ -163,18 +164,21 @@ xfs_file_fsync(
* inode size in case of an extending write.
*/
if (XFS_IS_REALTIME_INODE(ip))
- blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
+ error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
else if (mp->m_logdev_targp != mp->m_ddev_targp)
- blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
+ error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
/*
* Any inode that has dirty modifications in the log is pinned. The
- * racy check here for a pinned inode while not catch modifications
+ * racy check here for a pinned inode will not catch modifications
* that happen concurrently to the fsync call, but fsync semantics
* only require to sync previously completed I/O.
*/
- if (xfs_ipincount(ip))
- error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
+ if (xfs_ipincount(ip)) {
+ err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
+ if (err2 && !error)
+ error = err2;
+ }
/*
* If we only have a single device, and the log force about was
@@ -184,8 +188,11 @@ xfs_file_fsync(
* commit.
*/
if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
- mp->m_logdev_targp == mp->m_ddev_targp)
- blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
+ mp->m_logdev_targp == mp->m_ddev_targp) {
+ err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
+ if (err2 && !error)
+ error = err2;
+ }
return error;
}
@@ -669,7 +676,7 @@ xfs_file_dax_write(
pos = iocb->ki_pos;
trace_xfs_file_dax_write(iocb, from);
- ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
+ ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos);
error = xfs_setfilesize(ip, pos, ret);
@@ -806,7 +813,7 @@ xfs_wait_dax_page(
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
}
-static int
+int
xfs_break_dax_layouts(
struct inode *inode,
bool *retry)
@@ -1253,6 +1260,31 @@ xfs_file_llseek(
return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
}
+#ifdef CONFIG_FS_DAX
+static int
+xfs_dax_fault(
+ struct vm_fault *vmf,
+ enum page_entry_size pe_size,
+ bool write_fault,
+ pfn_t *pfn)
+{
+ return dax_iomap_fault(vmf, pe_size, pfn, NULL,
+ (write_fault && !vmf->cow_page) ?
+ &xfs_dax_write_iomap_ops :
+ &xfs_read_iomap_ops);
+}
+#else
+static int
+xfs_dax_fault(
+ struct vm_fault *vmf,
+ enum page_entry_size pe_size,
+ bool write_fault,
+ pfn_t *pfn)
+{
+ return 0;
+}
+#endif
+
/*
* Locking for serialisation of IO during page faults. This results in a lock
* ordering of:
@@ -1284,10 +1316,7 @@ __xfs_filemap_fault(
pfn_t pfn;
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
- (write_fault && !vmf->cow_page) ?
- &xfs_direct_write_iomap_ops :
- &xfs_read_iomap_ops);
+ ret = xfs_dax_fault(vmf, pe_size, write_fault, &pfn);
if (ret & VM_FAULT_NEEDDSYNC)
ret = dax_finish_sync_fault(vmf, pe_size, pfn);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index be9bcf8a1f99..34b21a29c39b 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -126,7 +126,7 @@ xfs_filestream_pick_ag(
pag = xfs_perag_get(mp, ag);
if (!pag->pagf_init) {
- err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
+ err = xfs_alloc_read_agf(pag, NULL, trylock, NULL);
if (err) {
if (err != -EAGAIN) {
xfs_perag_put(pag);
@@ -181,7 +181,7 @@ next_ag:
if (ag != startag)
continue;
- /* Allow sleeping in xfs_alloc_pagf_init() on the 2nd pass. */
+ /* Allow sleeping in xfs_alloc_read_agf() on the 2nd pass. */
if (trylock != 0) {
trylock = 0;
continue;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index bb23199f65c3..d8337274c74d 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -642,8 +642,7 @@ __xfs_getfsmap_datadev(
info->agf_bp = NULL;
}
- error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0,
- &info->agf_bp);
+ error = xfs_alloc_read_agf(pag, tp, 0, &info->agf_bp);
if (error)
break;
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index d4a77c53f94b..13851c0d640b 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -41,6 +41,7 @@ xfs_resizefs_init_new_ags(
xfs_agnumber_t oagcount,
xfs_agnumber_t nagcount,
xfs_rfsblock_t delta,
+ struct xfs_perag *last_pag,
bool *lastag_extended)
{
struct xfs_mount *mp = tp->t_mountp;
@@ -73,7 +74,7 @@ xfs_resizefs_init_new_ags(
if (delta) {
*lastag_extended = true;
- error = xfs_ag_extend_space(mp, tp, id, delta);
+ error = xfs_ag_extend_space(last_pag, tp, delta);
}
return error;
}
@@ -96,6 +97,7 @@ xfs_growfs_data_private(
xfs_agnumber_t oagcount;
struct xfs_trans *tp;
struct aghdr_init_data id = {};
+ struct xfs_perag *last_pag;
nb = in->newblocks;
error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
@@ -128,10 +130,9 @@ xfs_growfs_data_private(
return -EINVAL;
oagcount = mp->m_sb.sb_agcount;
-
/* allocate the new per-ag structures */
if (nagcount > oagcount) {
- error = xfs_initialize_perag(mp, nagcount, &nagimax);
+ error = xfs_initialize_perag(mp, nagcount, nb, &nagimax);
if (error)
return error;
} else if (nagcount < oagcount) {
@@ -145,15 +146,17 @@ xfs_growfs_data_private(
if (error)
return error;
+ last_pag = xfs_perag_get(mp, oagcount - 1);
if (delta > 0) {
error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
- delta, &lastag_extended);
+ delta, last_pag, &lastag_extended);
} else {
xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK,
"EXPERIMENTAL online shrink feature in use. Use at your own risk!");
- error = xfs_ag_shrink_space(mp, &tp, nagcount - 1, -delta);
+ error = xfs_ag_shrink_space(last_pag, &tp, -delta);
}
+ xfs_perag_put(last_pag);
if (error)
goto out_trans_cancel;
@@ -528,6 +531,9 @@ xfs_do_force_shutdown(
} else if (flags & SHUTDOWN_CORRUPT_INCORE) {
tag = XFS_PTAG_SHUTDOWN_CORRUPT;
why = "Corruption of in-memory data";
+ } else if (flags & SHUTDOWN_CORRUPT_ONDISK) {
+ tag = XFS_PTAG_SHUTDOWN_CORRUPT;
+ why = "Corruption of on-disk metadata";
} else {
tag = XFS_PTAG_SHUTDOWN_IOERROR;
why = "Metadata I/O Error";
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 2609825d53ee..2bbe7916a998 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -98,8 +98,9 @@ xfs_inode_alloc(
ip->i_ino = ino;
ip->i_mount = mp;
memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
- ip->i_afp = NULL;
ip->i_cowfp = NULL;
+ memset(&ip->i_af, 0, sizeof(ip->i_af));
+ ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
memset(&ip->i_df, 0, sizeof(ip->i_df));
ip->i_flags = 0;
ip->i_delayed_blks = 0;
@@ -111,6 +112,8 @@ xfs_inode_alloc(
INIT_WORK(&ip->i_ioend_work, xfs_end_io);
INIT_LIST_HEAD(&ip->i_ioend_list);
spin_lock_init(&ip->i_ioend_lock);
+ ip->i_next_unlinked = NULLAGINO;
+ ip->i_prev_unlinked = NULLAGINO;
return ip;
}
@@ -130,10 +133,8 @@ xfs_inode_free_callback(
break;
}
- if (ip->i_afp) {
- xfs_idestroy_fork(ip->i_afp);
- kmem_cache_free(xfs_ifork_cache, ip->i_afp);
- }
+ xfs_ifork_zap_attr(ip);
+
if (ip->i_cowfp) {
xfs_idestroy_fork(ip->i_cowfp);
kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
@@ -912,6 +913,7 @@ reclaim:
ip->i_checked = 0;
spin_unlock(&ip->i_flags_lock);
+ ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
@@ -1774,7 +1776,7 @@ xfs_check_delalloc(
struct xfs_inode *ip,
int whichfork)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
struct xfs_bmbt_irec got;
struct xfs_iext_cursor icur;
@@ -2219,5 +2221,5 @@ xfs_inodegc_register_shrinker(
shrink->flags = SHRINKER_NONSLAB;
shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
- return register_shrinker(shrink);
+ return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id);
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 3e1c62ffa4f7..28493c8e9bb2 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -20,6 +20,7 @@
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_inode_item.h"
+#include "xfs_iunlink_item.h"
#include "xfs_ialloc.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
@@ -125,7 +126,7 @@ xfs_ilock_attr_map_shared(
{
uint lock_mode = XFS_ILOCK_SHARED;
- if (ip->i_afp && xfs_need_iread_extents(ip->i_afp))
+ if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
lock_mode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lock_mode);
return lock_mode;
@@ -635,7 +636,7 @@ xfs_ip2xflags(
flags |= FS_XFLAG_COWEXTSIZE;
}
- if (XFS_IFORK_Q(ip))
+ if (xfs_inode_has_attr_fork(ip))
flags |= FS_XFLAG_HASATTR;
return flags;
}
@@ -893,7 +894,7 @@ xfs_init_new_inode(
*/
if (init_xattrs && xfs_has_attr(mp)) {
ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
- ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
+ xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
}
/*
@@ -1293,8 +1294,8 @@ xfs_itruncate_clear_reflink_flags(
if (!xfs_is_reflink_inode(ip))
return;
- dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
- cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
+ cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
if (cfork->if_bytes == 0)
@@ -1643,7 +1644,7 @@ xfs_inode_needs_inactive(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *cow_ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ struct xfs_ifork *cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
/*
* If the inode is already free, then there can be nothing
@@ -1762,13 +1763,12 @@ xfs_inactive(
* now. The code calls a routine that recursively deconstructs the
* attribute fork. If also blows away the in-core attribute fork.
*/
- if (XFS_IFORK_Q(ip)) {
+ if (xfs_inode_has_attr_fork(ip)) {
error = xfs_attr_inactive(ip);
if (error)
goto out;
}
- ASSERT(!ip->i_afp);
ASSERT(ip->i_forkoff == 0);
/*
@@ -1801,195 +1801,69 @@ out:
* because we must walk that list to find the inode that points to the inode
* being removed from the unlinked hash bucket list.
*
- * What if we modelled the unlinked list as a collection of records capturing
- * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd
- * have a fast way to look up unlinked list predecessors, which avoids the
- * slow list walk. That's exactly what we do here (in-core) with a per-AG
- * rhashtable.
+ * Hence we keep an in-memory double linked list to link each inode on an
+ * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
+ * based lists would require having 64 list heads in the perag, one for each
+ * list. This is expensive in terms of memory (think millions of AGs) and cache
+ * misses on lookups. Instead, use the fact that inodes on the unlinked list
+ * must be referenced at the VFS level to keep them on the list and hence we
+ * have an existence guarantee for inodes on the unlinked list.
*
- * Because this is a backref cache, we ignore operational failures since the
- * iunlink code can fall back to the slow bucket walk. The only errors that
- * should bubble out are for obviously incorrect situations.
- *
- * All users of the backref cache MUST hold the AGI buffer lock to serialize
- * access or have otherwise provided for concurrency control.
+ * Given we have an existence guarantee, we can use lockless inode cache lookups
+ * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
+ * for the double linked unlinked list, and we don't need any extra locking to
+ * keep the list safe as all manipulations are done under the AGI buffer lock.
+ * Keeping the list up to date does not require memory allocation, just finding
+ * the XFS inode and updating the next/prev unlinked list aginos.
*/
-/* Capture a "X.next_unlinked = Y" relationship. */
-struct xfs_iunlink {
- struct rhash_head iu_rhash_head;
- xfs_agino_t iu_agino; /* X */
- xfs_agino_t iu_next_unlinked; /* Y */
-};
-
-/* Unlinked list predecessor lookup hashtable construction */
-static int
-xfs_iunlink_obj_cmpfn(
- struct rhashtable_compare_arg *arg,
- const void *obj)
-{
- const xfs_agino_t *key = arg->key;
- const struct xfs_iunlink *iu = obj;
-
- if (iu->iu_next_unlinked != *key)
- return 1;
- return 0;
-}
-
-static const struct rhashtable_params xfs_iunlink_hash_params = {
- .min_size = XFS_AGI_UNLINKED_BUCKETS,
- .key_len = sizeof(xfs_agino_t),
- .key_offset = offsetof(struct xfs_iunlink,
- iu_next_unlinked),
- .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head),
- .automatic_shrinking = true,
- .obj_cmpfn = xfs_iunlink_obj_cmpfn,
-};
-
/*
- * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such
- * relation is found.
+ * Find an inode on the unlinked list. This does not take references to the
+ * inode as we have existence guarantees by holding the AGI buffer lock and that
+ * only unlinked, referenced inodes can be on the unlinked inode list. If we
+ * don't find the inode in cache, then let the caller handle the situation.
*/
-static xfs_agino_t
-xfs_iunlink_lookup_backref(
+static struct xfs_inode *
+xfs_iunlink_lookup(
struct xfs_perag *pag,
xfs_agino_t agino)
{
- struct xfs_iunlink *iu;
-
- iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
- xfs_iunlink_hash_params);
- return iu ? iu->iu_agino : NULLAGINO;
-}
+ struct xfs_inode *ip;
-/*
- * Take ownership of an iunlink cache entry and insert it into the hash table.
- * If successful, the entry will be owned by the cache; if not, it is freed.
- * Either way, the caller does not own @iu after this call.
- */
-static int
-xfs_iunlink_insert_backref(
- struct xfs_perag *pag,
- struct xfs_iunlink *iu)
-{
- int error;
+ rcu_read_lock();
+ ip = radix_tree_lookup(&pag->pag_ici_root, agino);
- error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
- &iu->iu_rhash_head, xfs_iunlink_hash_params);
/*
- * Fail loudly if there already was an entry because that's a sign of
- * corruption of in-memory data. Also fail loudly if we see an error
- * code we didn't anticipate from the rhashtable code. Currently we
- * only anticipate ENOMEM.
+ * Inode not in memory or in RCU freeing limbo should not happen.
+ * Warn about this and let the caller handle the failure.
*/
- if (error) {
- WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
- kmem_free(iu);
+ if (WARN_ON_ONCE(!ip || !ip->i_ino)) {
+ rcu_read_unlock();
+ return NULL;
}
- /*
- * Absorb any runtime errors that aren't a result of corruption because
- * this is a cache and we can always fall back to bucket list scanning.
- */
- if (error != 0 && error != -EEXIST)
- error = 0;
- return error;
+ ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
+ rcu_read_unlock();
+ return ip;
}
-/* Remember that @prev_agino.next_unlinked = @this_agino. */
+/* Update the prev pointer of the next agino. */
static int
-xfs_iunlink_add_backref(
+xfs_iunlink_update_backref(
struct xfs_perag *pag,
xfs_agino_t prev_agino,
- xfs_agino_t this_agino)
-{
- struct xfs_iunlink *iu;
-
- if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
- return 0;
-
- iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
- iu->iu_agino = prev_agino;
- iu->iu_next_unlinked = this_agino;
-
- return xfs_iunlink_insert_backref(pag, iu);
-}
-
-/*
- * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
- * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there
- * wasn't any such entry then we don't bother.
- */
-static int
-xfs_iunlink_change_backref(
- struct xfs_perag *pag,
- xfs_agino_t agino,
- xfs_agino_t next_unlinked)
+ xfs_agino_t next_agino)
{
- struct xfs_iunlink *iu;
- int error;
-
- /* Look up the old entry; if there wasn't one then exit. */
- iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
- xfs_iunlink_hash_params);
- if (!iu)
- return 0;
-
- /*
- * Remove the entry. This shouldn't ever return an error, but if we
- * couldn't remove the old entry we don't want to add it again to the
- * hash table, and if the entry disappeared on us then someone's
- * violated the locking rules and we need to fail loudly. Either way
- * we cannot remove the inode because internal state is or would have
- * been corrupt.
- */
- error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
- &iu->iu_rhash_head, xfs_iunlink_hash_params);
- if (error)
- return error;
+ struct xfs_inode *ip;
- /* If there is no new next entry just free our item and return. */
- if (next_unlinked == NULLAGINO) {
- kmem_free(iu);
+ /* No update necessary if we are at the end of the list. */
+ if (next_agino == NULLAGINO)
return 0;
- }
- /* Update the entry and re-add it to the hash table. */
- iu->iu_next_unlinked = next_unlinked;
- return xfs_iunlink_insert_backref(pag, iu);
-}
-
-/* Set up the in-core predecessor structures. */
-int
-xfs_iunlink_init(
- struct xfs_perag *pag)
-{
- return rhashtable_init(&pag->pagi_unlinked_hash,
- &xfs_iunlink_hash_params);
-}
-
-/* Free the in-core predecessor structures. */
-static void
-xfs_iunlink_free_item(
- void *ptr,
- void *arg)
-{
- struct xfs_iunlink *iu = ptr;
- bool *freed_anything = arg;
-
- *freed_anything = true;
- kmem_free(iu);
-}
-
-void
-xfs_iunlink_destroy(
- struct xfs_perag *pag)
-{
- bool freed_anything = false;
-
- rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
- xfs_iunlink_free_item, &freed_anything);
-
- ASSERT(freed_anything == false || xfs_is_shutdown(pag->pag_mount));
+ ip = xfs_iunlink_lookup(pag, next_agino);
+ if (!ip)
+ return -EFSCORRUPTED;
+ ip->i_prev_unlinked = prev_agino;
+ return 0;
}
/*
@@ -2008,7 +1882,7 @@ xfs_iunlink_update_bucket(
xfs_agino_t old_value;
int offset;
- ASSERT(xfs_verify_agino_or_null(tp->t_mountp, pag->pag_agno, new_agino));
+ ASSERT(xfs_verify_agino_or_null(pag, new_agino));
old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
@@ -2031,88 +1905,53 @@ xfs_iunlink_update_bucket(
return 0;
}
-/* Set an on-disk inode's next_unlinked pointer. */
-STATIC void
-xfs_iunlink_update_dinode(
- struct xfs_trans *tp,
- struct xfs_perag *pag,
- xfs_agino_t agino,
- struct xfs_buf *ibp,
- struct xfs_dinode *dip,
- struct xfs_imap *imap,
- xfs_agino_t next_agino)
-{
- struct xfs_mount *mp = tp->t_mountp;
- int offset;
-
- ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
-
- trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino,
- be32_to_cpu(dip->di_next_unlinked), next_agino);
-
- dip->di_next_unlinked = cpu_to_be32(next_agino);
- offset = imap->im_boffset +
- offsetof(struct xfs_dinode, di_next_unlinked);
-
- /* need to recalc the inode CRC if appropriate */
- xfs_dinode_calc_crc(mp, dip);
- xfs_trans_inode_buf(tp, ibp);
- xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
-}
-
-/* Set an in-core inode's unlinked pointer and return the old value. */
-STATIC int
-xfs_iunlink_update_inode(
+static int
+xfs_iunlink_insert_inode(
struct xfs_trans *tp,
- struct xfs_inode *ip,
struct xfs_perag *pag,
- xfs_agino_t next_agino,
- xfs_agino_t *old_next_agino)
+ struct xfs_buf *agibp,
+ struct xfs_inode *ip)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_dinode *dip;
- struct xfs_buf *ibp;
- xfs_agino_t old_value;
+ struct xfs_agi *agi = agibp->b_addr;
+ xfs_agino_t next_agino;
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
+ short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
int error;
- ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
-
- error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
- if (error)
- return error;
- dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
-
- /* Make sure the old pointer isn't garbage. */
- old_value = be32_to_cpu(dip->di_next_unlinked);
- if (!xfs_verify_agino_or_null(mp, pag->pag_agno, old_value)) {
- xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
- sizeof(*dip), __this_address);
- error = -EFSCORRUPTED;
- goto out;
+ /*
+ * Get the index into the agi hash table for the list this inode will
+ * go on. Make sure the pointer isn't garbage and that this inode
+ * isn't already on the list.
+ */
+ next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
+ if (next_agino == agino ||
+ !xfs_verify_agino_or_null(pag, next_agino)) {
+ xfs_buf_mark_corrupt(agibp);
+ return -EFSCORRUPTED;
}
/*
- * Since we're updating a linked list, we should never find that the
- * current pointer is the same as the new value, unless we're
- * terminating the list.
+ * Update the prev pointer in the next inode to point back to this
+ * inode.
*/
- *old_next_agino = old_value;
- if (old_value == next_agino) {
- if (next_agino != NULLAGINO) {
- xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
- dip, sizeof(*dip), __this_address);
- error = -EFSCORRUPTED;
- }
- goto out;
+ error = xfs_iunlink_update_backref(pag, agino, next_agino);
+ if (error)
+ return error;
+
+ if (next_agino != NULLAGINO) {
+ /*
+ * There is already another inode in the bucket, so point this
+ * inode to the current head of the list.
+ */
+ error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
+ if (error)
+ return error;
+ ip->i_next_unlinked = next_agino;
}
- /* Ok, update the new pointer. */
- xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
- ibp, dip, &ip->i_imap, next_agino);
- return 0;
-out:
- xfs_trans_brelse(tp, ibp);
- return error;
+ /* Point the head of the list to point to this inode. */
+ return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
}
/*
@@ -2129,11 +1968,7 @@ xfs_iunlink(
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_perag *pag;
- struct xfs_agi *agi;
struct xfs_buf *agibp;
- xfs_agino_t next_agino;
- xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
- short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
int error;
ASSERT(VFS_I(ip)->i_nlink == 0);
@@ -2143,202 +1978,38 @@ xfs_iunlink(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
/* Get the agi buffer first. It ensures lock ordering on the list. */
- error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
+ error = xfs_read_agi(pag, tp, &agibp);
if (error)
goto out;
- agi = agibp->b_addr;
-
- /*
- * Get the index into the agi hash table for the list this inode will
- * go on. Make sure the pointer isn't garbage and that this inode
- * isn't already on the list.
- */
- next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
- if (next_agino == agino ||
- !xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)) {
- xfs_buf_mark_corrupt(agibp);
- error = -EFSCORRUPTED;
- goto out;
- }
-
- if (next_agino != NULLAGINO) {
- xfs_agino_t old_agino;
-
- /*
- * There is already another inode in the bucket, so point this
- * inode to the current head of the list.
- */
- error = xfs_iunlink_update_inode(tp, ip, pag, next_agino,
- &old_agino);
- if (error)
- goto out;
- ASSERT(old_agino == NULLAGINO);
- /*
- * agino has been unlinked, add a backref from the next inode
- * back to agino.
- */
- error = xfs_iunlink_add_backref(pag, agino, next_agino);
- if (error)
- goto out;
- }
-
- /* Point the head of the list to point to this inode. */
- error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
+ error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
out:
xfs_perag_put(pag);
return error;
}
-/* Return the imap, dinode pointer, and buffer for an inode. */
-STATIC int
-xfs_iunlink_map_ino(
- struct xfs_trans *tp,
- xfs_agnumber_t agno,
- xfs_agino_t agino,
- struct xfs_imap *imap,
- struct xfs_dinode **dipp,
- struct xfs_buf **bpp)
-{
- struct xfs_mount *mp = tp->t_mountp;
- int error;
-
- imap->im_blkno = 0;
- error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
- if (error) {
- xfs_warn(mp, "%s: xfs_imap returned error %d.",
- __func__, error);
- return error;
- }
-
- error = xfs_imap_to_bp(mp, tp, imap, bpp);
- if (error) {
- xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
- __func__, error);
- return error;
- }
-
- *dipp = xfs_buf_offset(*bpp, imap->im_boffset);
- return 0;
-}
-
-/*
- * Walk the unlinked chain from @head_agino until we find the inode that
- * points to @target_agino. Return the inode number, map, dinode pointer,
- * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
- *
- * @tp, @pag, @head_agino, and @target_agino are input parameters.
- * @agino, @imap, @dipp, and @bpp are all output parameters.
- *
- * Do not call this function if @target_agino is the head of the list.
- */
-STATIC int
-xfs_iunlink_map_prev(
- struct xfs_trans *tp,
- struct xfs_perag *pag,
- xfs_agino_t head_agino,
- xfs_agino_t target_agino,
- xfs_agino_t *agino,
- struct xfs_imap *imap,
- struct xfs_dinode **dipp,
- struct xfs_buf **bpp)
-{
- struct xfs_mount *mp = tp->t_mountp;
- xfs_agino_t next_agino;
- int error;
-
- ASSERT(head_agino != target_agino);
- *bpp = NULL;
-
- /* See if our backref cache can find it faster. */
- *agino = xfs_iunlink_lookup_backref(pag, target_agino);
- if (*agino != NULLAGINO) {
- error = xfs_iunlink_map_ino(tp, pag->pag_agno, *agino, imap,
- dipp, bpp);
- if (error)
- return error;
-
- if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
- return 0;
-
- /*
- * If we get here the cache contents were corrupt, so drop the
- * buffer and fall back to walking the bucket list.
- */
- xfs_trans_brelse(tp, *bpp);
- *bpp = NULL;
- WARN_ON_ONCE(1);
- }
-
- trace_xfs_iunlink_map_prev_fallback(mp, pag->pag_agno);
-
- /* Otherwise, walk the entire bucket until we find it. */
- next_agino = head_agino;
- while (next_agino != target_agino) {
- xfs_agino_t unlinked_agino;
-
- if (*bpp)
- xfs_trans_brelse(tp, *bpp);
-
- *agino = next_agino;
- error = xfs_iunlink_map_ino(tp, pag->pag_agno, next_agino, imap,
- dipp, bpp);
- if (error)
- return error;
-
- unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
- /*
- * Make sure this pointer is valid and isn't an obvious
- * infinite loop.
- */
- if (!xfs_verify_agino(mp, pag->pag_agno, unlinked_agino) ||
- next_agino == unlinked_agino) {
- XFS_CORRUPTION_ERROR(__func__,
- XFS_ERRLEVEL_LOW, mp,
- *dipp, sizeof(**dipp));
- error = -EFSCORRUPTED;
- return error;
- }
- next_agino = unlinked_agino;
- }
-
- return 0;
-}
-
-/*
- * Pull the on-disk inode from the AGI unlinked list.
- */
-STATIC int
-xfs_iunlink_remove(
+static int
+xfs_iunlink_remove_inode(
struct xfs_trans *tp,
struct xfs_perag *pag,
+ struct xfs_buf *agibp,
struct xfs_inode *ip)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_agi *agi;
- struct xfs_buf *agibp;
- struct xfs_buf *last_ibp;
- struct xfs_dinode *last_dip = NULL;
+ struct xfs_agi *agi = agibp->b_addr;
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
- xfs_agino_t next_agino;
xfs_agino_t head_agino;
short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
int error;
trace_xfs_iunlink_remove(ip);
- /* Get the agi buffer first. It ensures lock ordering on the list. */
- error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
- if (error)
- return error;
- agi = agibp->b_addr;
-
/*
* Get the index into the agi hash table for the list this inode will
* go on. Make sure the head pointer isn't garbage.
*/
head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
- if (!xfs_verify_agino(mp, pag->pag_agno, head_agino)) {
+ if (!xfs_verify_agino(pag, head_agino)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
agi, sizeof(*agi));
return -EFSCORRUPTED;
@@ -2349,52 +2020,60 @@ xfs_iunlink_remove(
* the old pointer value so that we can update whatever was previous
* to us in the list to point to whatever was next in the list.
*/
- error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino);
+ error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
if (error)
return error;
/*
- * If there was a backref pointing from the next inode back to this
- * one, remove it because we've removed this inode from the list.
- *
- * Later, if this inode was in the middle of the list we'll update
- * this inode's backref to point from the next inode.
+ * Update the prev pointer in the next inode to point back to previous
+ * inode in the chain.
*/
- if (next_agino != NULLAGINO) {
- error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO);
- if (error)
- return error;
- }
+ error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
+ ip->i_next_unlinked);
+ if (error)
+ return error;
if (head_agino != agino) {
- struct xfs_imap imap;
- xfs_agino_t prev_agino;
+ struct xfs_inode *prev_ip;
- /* We need to search the list for the inode being freed. */
- error = xfs_iunlink_map_prev(tp, pag, head_agino, agino,
- &prev_agino, &imap, &last_dip, &last_ibp);
- if (error)
- return error;
+ prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
+ if (!prev_ip)
+ return -EFSCORRUPTED;
- /* Point the previous inode on the list to the next inode. */
- xfs_iunlink_update_dinode(tp, pag, prev_agino, last_ibp,
- last_dip, &imap, next_agino);
-
- /*
- * Now we deal with the backref for this inode. If this inode
- * pointed at a real inode, change the backref that pointed to
- * us to point to our old next. If this inode was the end of
- * the list, delete the backref that pointed to us. Note that
- * change_backref takes care of deleting the backref if
- * next_agino is NULLAGINO.
- */
- return xfs_iunlink_change_backref(agibp->b_pag, agino,
- next_agino);
+ error = xfs_iunlink_log_inode(tp, prev_ip, pag,
+ ip->i_next_unlinked);
+ prev_ip->i_next_unlinked = ip->i_next_unlinked;
+ } else {
+ /* Point the head of the list to the next unlinked inode. */
+ error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
+ ip->i_next_unlinked);
}
- /* Point the head of the list to the next unlinked inode. */
- return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
- next_agino);
+ ip->i_next_unlinked = NULLAGINO;
+ ip->i_prev_unlinked = NULLAGINO;
+ return error;
+}
+
+/*
+ * Pull the on-disk inode from the AGI unlinked list.
+ */
+STATIC int
+xfs_iunlink_remove(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ struct xfs_inode *ip)
+{
+ struct xfs_buf *agibp;
+ int error;
+
+ trace_xfs_iunlink_remove(ip);
+
+ /* Get the agi buffer first. It ensures lock ordering on the list. */
+ error = xfs_read_agi(pag, tp, &agibp);
+ if (error)
+ return error;
+
+ return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
}
/*
@@ -3032,10 +2711,12 @@ out_trans_abort:
static int
xfs_rename_alloc_whiteout(
struct user_namespace *mnt_userns,
+ struct xfs_name *src_name,
struct xfs_inode *dp,
struct xfs_inode **wip)
{
struct xfs_inode *tmpfile;
+ struct qstr name;
int error;
error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
@@ -3043,6 +2724,15 @@ xfs_rename_alloc_whiteout(
if (error)
return error;
+ name.name = src_name->name;
+ name.len = src_name->len;
+ error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
+ if (error) {
+ xfs_finish_inode_setup(tmpfile);
+ xfs_irele(tmpfile);
+ return error;
+ }
+
/*
* Prepare the tmpfile inode as if it were created through the VFS.
* Complete the inode setup and flag it as linkable. nlink is already
@@ -3093,7 +2783,8 @@ xfs_rename(
* appropriately.
*/
if (flags & RENAME_WHITEOUT) {
- error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
+ error = xfs_rename_alloc_whiteout(mnt_userns, src_name,
+ target_dp, &wip);
if (error)
return error;
@@ -3229,11 +2920,13 @@ retry:
if (inodes[i] == wip ||
(inodes[i] == target_ip &&
(VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
- struct xfs_buf *bp;
- xfs_agnumber_t agno;
+ struct xfs_perag *pag;
+ struct xfs_buf *bp;
- agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
- error = xfs_read_agi(mp, tp, agno, &bp);
+ pag = xfs_perag_get(mp,
+ XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
+ error = xfs_read_agi(pag, tp, &bp);
+ xfs_perag_put(pag);
if (error)
goto out_trans_cancel;
}
@@ -3452,13 +3145,13 @@ xfs_iflush(
goto flush_out;
}
}
- if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
+ if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
"%s: detected corrupt incore inode %llu, "
"total extents = %llu nblocks = %lld, ptr "PTR_FMT,
__func__, ip->i_ino,
- ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
+ ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
ip->i_nblocks, ip);
goto flush_out;
}
@@ -3488,7 +3181,8 @@ xfs_iflush(
if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
xfs_ifork_verify_local_data(ip))
goto flush_out;
- if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
+ if (xfs_inode_has_attr_fork(ip) &&
+ ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
xfs_ifork_verify_local_attr(ip))
goto flush_out;
@@ -3506,7 +3200,7 @@ xfs_iflush(
}
xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
- if (XFS_IFORK_Q(ip))
+ if (xfs_inode_has_attr_fork(ip))
xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
/*
@@ -3753,6 +3447,50 @@ retry:
return 0;
}
+static int
+xfs_mmaplock_two_inodes_and_break_dax_layout(
+ struct xfs_inode *ip1,
+ struct xfs_inode *ip2)
+{
+ int error;
+ bool retry;
+ struct page *page;
+
+ if (ip1->i_ino > ip2->i_ino)
+ swap(ip1, ip2);
+
+again:
+ retry = false;
+ /* Lock the first inode */
+ xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
+ error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
+ if (error || retry) {
+ xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
+ if (error == 0 && retry)
+ goto again;
+ return error;
+ }
+
+ if (ip1 == ip2)
+ return 0;
+
+ /* Nested lock the second inode */
+ xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
+ /*
+ * We cannot use xfs_break_dax_layouts() directly here because it may
+ * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
+ * for this nested lock case.
+ */
+ page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
+ if (page && page_ref_count(page) != 1) {
+ xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
+ xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
+ goto again;
+ }
+
+ return 0;
+}
+
/*
* Lock two inodes so that userspace cannot initiate I/O via file syscalls or
* mmap activity.
@@ -3767,8 +3505,19 @@ xfs_ilock2_io_mmap(
ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
if (ret)
return ret;
- filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
- VFS_I(ip2)->i_mapping);
+
+ if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
+ ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
+ if (ret) {
+ inode_unlock(VFS_I(ip2));
+ if (ip1 != ip2)
+ inode_unlock(VFS_I(ip1));
+ return ret;
+ }
+ } else
+ filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
+ VFS_I(ip2)->i_mapping);
+
return 0;
}
@@ -3778,8 +3527,14 @@ xfs_iunlock2_io_mmap(
struct xfs_inode *ip1,
struct xfs_inode *ip2)
{
- filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
- VFS_I(ip2)->i_mapping);
+ if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
+ xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
+ if (ip1 != ip2)
+ xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
+ } else
+ filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
+ VFS_I(ip2)->i_mapping);
+
inode_unlock(VFS_I(ip2));
if (ip1 != ip2)
inode_unlock(VFS_I(ip1));
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 7be6f8e705ab..fa780f08dc89 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -33,9 +33,9 @@ typedef struct xfs_inode {
struct xfs_imap i_imap; /* location for xfs_imap() */
/* Extent information. */
- struct xfs_ifork *i_afp; /* attribute fork pointer */
struct xfs_ifork *i_cowfp; /* copy on write extents */
struct xfs_ifork i_df; /* data fork */
+ struct xfs_ifork i_af; /* attribute fork */
/* Transaction and locking information. */
struct xfs_inode_log_item *i_itemp; /* logging information */
@@ -68,6 +68,10 @@ typedef struct xfs_inode {
uint64_t i_diflags2; /* XFS_DIFLAG2_... */
struct timespec64 i_crtime; /* time created */
+ /* unlinked list pointers */
+ xfs_agino_t i_next_unlinked;
+ xfs_agino_t i_prev_unlinked;
+
/* VFS inode */
struct inode i_vnode; /* embedded VFS inode */
@@ -77,6 +81,66 @@ typedef struct xfs_inode {
struct list_head i_ioend_list;
} xfs_inode_t;
+static inline bool xfs_inode_has_attr_fork(struct xfs_inode *ip)
+{
+ return ip->i_forkoff > 0;
+}
+
+static inline struct xfs_ifork *
+xfs_ifork_ptr(
+ struct xfs_inode *ip,
+ int whichfork)
+{
+ switch (whichfork) {
+ case XFS_DATA_FORK:
+ return &ip->i_df;
+ case XFS_ATTR_FORK:
+ if (!xfs_inode_has_attr_fork(ip))
+ return NULL;
+ return &ip->i_af;
+ case XFS_COW_FORK:
+ return ip->i_cowfp;
+ default:
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+static inline unsigned int xfs_inode_fork_boff(struct xfs_inode *ip)
+{
+ return ip->i_forkoff << 3;
+}
+
+static inline unsigned int xfs_inode_data_fork_size(struct xfs_inode *ip)
+{
+ if (xfs_inode_has_attr_fork(ip))
+ return xfs_inode_fork_boff(ip);
+
+ return XFS_LITINO(ip->i_mount);
+}
+
+static inline unsigned int xfs_inode_attr_fork_size(struct xfs_inode *ip)
+{
+ if (xfs_inode_has_attr_fork(ip))
+ return XFS_LITINO(ip->i_mount) - xfs_inode_fork_boff(ip);
+ return 0;
+}
+
+static inline unsigned int
+xfs_inode_fork_size(
+ struct xfs_inode *ip,
+ int whichfork)
+{
+ switch (whichfork) {
+ case XFS_DATA_FORK:
+ return xfs_inode_data_fork_size(ip);
+ case XFS_ATTR_FORK:
+ return xfs_inode_attr_fork_size(ip);
+ default:
+ return 0;
+ }
+}
+
/* Convert from vfs inode to xfs inode */
static inline struct xfs_inode *XFS_I(struct inode *inode)
{
@@ -467,6 +531,7 @@ xfs_itruncate_extents(
}
/* from xfs_file.c */
+int xfs_break_dax_layouts(struct inode *inode, bool *retry);
int xfs_break_layouts(struct inode *inode, uint *iolock,
enum layout_break_reason reason);
@@ -505,9 +570,6 @@ extern struct kmem_cache *xfs_inode_cache;
bool xfs_inode_needs_inactive(struct xfs_inode *ip);
-int xfs_iunlink_init(struct xfs_perag *pag);
-void xfs_iunlink_destroy(struct xfs_perag *pag);
-
void xfs_end_io(struct work_struct *work);
int xfs_ilock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 721def0639fd..6e19ece916bf 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -57,7 +57,7 @@ xfs_inode_item_data_fork_size(
ip->i_df.if_nextents > 0 &&
ip->i_df.if_bytes > 0) {
/* worst case, doesn't subtract delalloc extents */
- *nbytes += XFS_IFORK_DSIZE(ip);
+ *nbytes += xfs_inode_data_fork_size(ip);
*nvecs += 1;
}
break;
@@ -92,27 +92,27 @@ xfs_inode_item_attr_fork_size(
{
struct xfs_inode *ip = iip->ili_inode;
- switch (ip->i_afp->if_format) {
+ switch (ip->i_af.if_format) {
case XFS_DINODE_FMT_EXTENTS:
if ((iip->ili_fields & XFS_ILOG_AEXT) &&
- ip->i_afp->if_nextents > 0 &&
- ip->i_afp->if_bytes > 0) {
+ ip->i_af.if_nextents > 0 &&
+ ip->i_af.if_bytes > 0) {
/* worst case, doesn't subtract unused space */
- *nbytes += XFS_IFORK_ASIZE(ip);
+ *nbytes += xfs_inode_attr_fork_size(ip);
*nvecs += 1;
}
break;
case XFS_DINODE_FMT_BTREE:
if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
- ip->i_afp->if_broot_bytes > 0) {
- *nbytes += ip->i_afp->if_broot_bytes;
+ ip->i_af.if_broot_bytes > 0) {
+ *nbytes += ip->i_af.if_broot_bytes;
*nvecs += 1;
}
break;
case XFS_DINODE_FMT_LOCAL:
if ((iip->ili_fields & XFS_ILOG_ADATA) &&
- ip->i_afp->if_bytes > 0) {
- *nbytes += xlog_calc_iovec_len(ip->i_afp->if_bytes);
+ ip->i_af.if_bytes > 0) {
+ *nbytes += xlog_calc_iovec_len(ip->i_af.if_bytes);
*nvecs += 1;
}
break;
@@ -143,7 +143,7 @@ xfs_inode_item_size(
xfs_log_dinode_size(ip->i_mount);
xfs_inode_item_data_fork_size(iip, nvecs, nbytes);
- if (XFS_IFORK_Q(ip))
+ if (xfs_inode_has_attr_fork(ip))
xfs_inode_item_attr_fork_size(iip, nvecs, nbytes);
}
@@ -237,18 +237,18 @@ xfs_inode_item_format_attr_fork(
struct xfs_inode *ip = iip->ili_inode;
size_t data_bytes;
- switch (ip->i_afp->if_format) {
+ switch (ip->i_af.if_format) {
case XFS_DINODE_FMT_EXTENTS:
iip->ili_fields &=
~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT);
if ((iip->ili_fields & XFS_ILOG_AEXT) &&
- ip->i_afp->if_nextents > 0 &&
- ip->i_afp->if_bytes > 0) {
+ ip->i_af.if_nextents > 0 &&
+ ip->i_af.if_bytes > 0) {
struct xfs_bmbt_rec *p;
- ASSERT(xfs_iext_count(ip->i_afp) ==
- ip->i_afp->if_nextents);
+ ASSERT(xfs_iext_count(&ip->i_af) ==
+ ip->i_af.if_nextents);
p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT);
data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK);
@@ -265,13 +265,13 @@ xfs_inode_item_format_attr_fork(
~(XFS_ILOG_ADATA | XFS_ILOG_AEXT);
if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
- ip->i_afp->if_broot_bytes > 0) {
- ASSERT(ip->i_afp->if_broot != NULL);
+ ip->i_af.if_broot_bytes > 0) {
+ ASSERT(ip->i_af.if_broot != NULL);
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_BROOT,
- ip->i_afp->if_broot,
- ip->i_afp->if_broot_bytes);
- ilf->ilf_asize = ip->i_afp->if_broot_bytes;
+ ip->i_af.if_broot,
+ ip->i_af.if_broot_bytes);
+ ilf->ilf_asize = ip->i_af.if_broot_bytes;
ilf->ilf_size++;
} else {
iip->ili_fields &= ~XFS_ILOG_ABROOT;
@@ -282,12 +282,12 @@ xfs_inode_item_format_attr_fork(
~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT);
if ((iip->ili_fields & XFS_ILOG_ADATA) &&
- ip->i_afp->if_bytes > 0) {
- ASSERT(ip->i_afp->if_u1.if_data != NULL);
+ ip->i_af.if_bytes > 0) {
+ ASSERT(ip->i_af.if_u1.if_data != NULL);
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL,
- ip->i_afp->if_u1.if_data,
- ip->i_afp->if_bytes);
- ilf->ilf_asize = (unsigned)ip->i_afp->if_bytes;
+ ip->i_af.if_u1.if_data,
+ ip->i_af.if_bytes);
+ ilf->ilf_asize = (unsigned)ip->i_af.if_bytes;
ilf->ilf_size++;
} else {
iip->ili_fields &= ~XFS_ILOG_ADATA;
@@ -355,11 +355,11 @@ xfs_inode_to_log_dinode_iext_counters(
{
if (xfs_inode_has_large_extent_counts(ip)) {
to->di_big_nextents = xfs_ifork_nextents(&ip->i_df);
- to->di_big_anextents = xfs_ifork_nextents(ip->i_afp);
+ to->di_big_anextents = xfs_ifork_nextents(&ip->i_af);
to->di_nrext64_pad = 0;
} else {
to->di_nextents = xfs_ifork_nextents(&ip->i_df);
- to->di_anextents = xfs_ifork_nextents(ip->i_afp);
+ to->di_anextents = xfs_ifork_nextents(&ip->i_af);
}
}
@@ -390,7 +390,7 @@ xfs_inode_to_log_dinode(
to->di_nblocks = ip->i_nblocks;
to->di_extsize = ip->i_extsize;
to->di_forkoff = ip->i_forkoff;
- to->di_aformat = xfs_ifork_format(ip->i_afp);
+ to->di_aformat = xfs_ifork_format(&ip->i_af);
to->di_flags = ip->i_diflags;
xfs_copy_dm_fields_to_log_dinode(ip, to);
@@ -480,7 +480,7 @@ xfs_inode_item_format(
xfs_inode_item_format_core(ip, lv, &vecp);
xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
- if (XFS_IFORK_Q(ip)) {
+ if (xfs_inode_has_attr_fork(ip)) {
xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp);
} else {
iip->ili_fields &=
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 0d67ff8a8961..1f783e979629 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -955,6 +955,7 @@ xfs_ioc_ag_geometry(
struct xfs_mount *mp,
void __user *arg)
{
+ struct xfs_perag *pag;
struct xfs_ag_geometry ageo;
int error;
@@ -965,7 +966,12 @@ xfs_ioc_ag_geometry(
if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved)))
return -EINVAL;
- error = xfs_ag_get_geometry(mp, ageo.ag_number, &ageo);
+ pag = xfs_perag_get(mp, ageo.ag_number);
+ if (!pag)
+ return -EINVAL;
+
+ error = xfs_ag_get_geometry(pag, &ageo);
+ xfs_perag_put(pag);
if (error)
return error;
@@ -985,7 +991,7 @@ xfs_fill_fsxattr(
struct fileattr *fa)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
fileattr_fill_xflags(fa, xfs_ip2xflags(ip));
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 5d50fed291b4..07da03976ec1 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -159,7 +159,7 @@ xfs_iomap_eof_align_last_fsb(
struct xfs_inode *ip,
xfs_fileoff_t end_fsb)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
xfs_extlen_t extsz = xfs_get_extsz_hint(ip);
xfs_extlen_t align = xfs_eof_alignment(ip);
struct xfs_bmbt_irec irec;
@@ -370,7 +370,7 @@ xfs_iomap_prealloc_size(
struct xfs_iext_cursor ncur = *icur;
struct xfs_bmbt_irec prev, got;
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
int64_t freesp;
xfs_fsblock_t qblocks;
@@ -773,7 +773,8 @@ xfs_direct_write_iomap_begin(
/* may drop and re-acquire the ilock */
error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
- &lockmode, flags & IOMAP_DIRECT);
+ &lockmode,
+ (flags & IOMAP_DIRECT) || IS_DAX(inode));
if (error)
goto out_unlock;
if (shared)
@@ -868,6 +869,33 @@ const struct iomap_ops xfs_direct_write_iomap_ops = {
};
static int
+xfs_dax_write_iomap_end(
+ struct inode *inode,
+ loff_t pos,
+ loff_t length,
+ ssize_t written,
+ unsigned flags,
+ struct iomap *iomap)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+
+ if (!xfs_is_cow_inode(ip))
+ return 0;
+
+ if (!written) {
+ xfs_reflink_cancel_cow_range(ip, pos, length, true);
+ return 0;
+ }
+
+ return xfs_reflink_end_cow(ip, pos, written);
+}
+
+const struct iomap_ops xfs_dax_write_iomap_ops = {
+ .iomap_begin = xfs_direct_write_iomap_begin,
+ .iomap_end = xfs_dax_write_iomap_end,
+};
+
+static int
xfs_buffered_write_iomap_begin(
struct inode *inode,
loff_t offset,
@@ -1310,12 +1338,12 @@ xfs_xattr_iomap_begin(
lockmode = xfs_ilock_attr_map_shared(ip);
/* if there are no attribute fork or extents, return ENOENT */
- if (!XFS_IFORK_Q(ip) || !ip->i_afp->if_nextents) {
+ if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) {
error = -ENOENT;
goto out_unlock;
}
- ASSERT(ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL);
+ ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL);
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, XFS_BMAPI_ATTRFORK);
out_unlock:
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index e88dc162c785..c782e8c0479c 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -51,5 +51,6 @@ extern const struct iomap_ops xfs_direct_write_iomap_ops;
extern const struct iomap_ops xfs_read_iomap_ops;
extern const struct iomap_ops xfs_seek_iomap_ops;
extern const struct iomap_ops xfs_xattr_iomap_ops;
+extern const struct iomap_ops xfs_dax_write_iomap_ops;
#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index a7402f6ea510..45518b8c613c 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -75,9 +75,8 @@ xfs_initxattrs(
* these attrs can be journalled at inode creation time (along with the
* inode, of course, such that log replay can't cause these to be lost).
*/
-
-STATIC int
-xfs_init_security(
+int
+xfs_inode_init_security(
struct inode *inode,
struct inode *dir,
const struct qstr *qstr)
@@ -122,7 +121,7 @@ xfs_cleanup_inode(
/* Oh, the horror.
* If we can't add the ACL or we fail in
- * xfs_init_security we must back out.
+ * xfs_inode_init_security we must back out.
* ENOSPC can hit here, among other things.
*/
xfs_dentry_to_name(&teardown, dentry);
@@ -208,7 +207,7 @@ xfs_generic_create(
inode = VFS_I(ip);
- error = xfs_init_security(inode, dir, &dentry->d_name);
+ error = xfs_inode_init_security(inode, dir, &dentry->d_name);
if (unlikely(error))
goto out_cleanup_inode;
@@ -424,7 +423,7 @@ xfs_vn_symlink(
inode = VFS_I(cip);
- error = xfs_init_security(inode, dir, &dentry->d_name);
+ error = xfs_inode_init_security(inode, dir, &dentry->d_name);
if (unlikely(error))
goto out_cleanup_inode;
@@ -1282,7 +1281,7 @@ xfs_setup_inode(
* If there is no attribute fork no ACL can exist on this inode,
* and it can't have any file capabilities attached to it either.
*/
- if (!XFS_IFORK_Q(ip)) {
+ if (!xfs_inode_has_attr_fork(ip)) {
inode_has_no_xattr(inode);
cache_no_acl(inode);
}
diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h
index 278949056048..cb5fc68c9ea0 100644
--- a/fs/xfs/xfs_iops.h
+++ b/fs/xfs/xfs_iops.h
@@ -17,4 +17,7 @@ extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr);
int xfs_vn_setattr_size(struct user_namespace *mnt_userns,
struct dentry *dentry, struct iattr *vap);
+int xfs_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr);
+
#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f74c9fff72bb..36312b00b164 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -111,8 +111,8 @@ xfs_bulkstat_one_int(
buf->bs_extents64 = nextents;
xfs_bulkstat_health(ip, buf);
- buf->bs_aextents = xfs_ifork_nextents(ip->i_afp);
- buf->bs_forkoff = XFS_IFORK_BOFF(ip);
+ buf->bs_aextents = xfs_ifork_nextents(&ip->i_af);
+ buf->bs_forkoff = xfs_inode_fork_boff(ip);
buf->bs_version = XFS_BULKSTAT_VERSION_V5;
if (xfs_has_v3inodes(mp)) {
diff --git a/fs/xfs/xfs_iunlink_item.c b/fs/xfs/xfs_iunlink_item.c
new file mode 100644
index 000000000000..43005ce8bd48
--- /dev/null
+++ b/fs/xfs/xfs_iunlink_item.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020-2022, Red Hat, Inc.
+ * All Rights Reserved.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_ag.h"
+#include "xfs_iunlink_item.h"
+#include "xfs_trace.h"
+#include "xfs_error.h"
+
+struct kmem_cache *xfs_iunlink_cache;
+
+static inline struct xfs_iunlink_item *IUL_ITEM(struct xfs_log_item *lip)
+{
+ return container_of(lip, struct xfs_iunlink_item, item);
+}
+
+static void
+xfs_iunlink_item_release(
+ struct xfs_log_item *lip)
+{
+ struct xfs_iunlink_item *iup = IUL_ITEM(lip);
+
+ xfs_perag_put(iup->pag);
+ kmem_cache_free(xfs_iunlink_cache, IUL_ITEM(lip));
+}
+
+
+static uint64_t
+xfs_iunlink_item_sort(
+ struct xfs_log_item *lip)
+{
+ return IUL_ITEM(lip)->ip->i_ino;
+}
+
+/*
+ * Look up the inode cluster buffer and log the on-disk unlinked inode change
+ * we need to make.
+ */
+static int
+xfs_iunlink_log_dinode(
+ struct xfs_trans *tp,
+ struct xfs_iunlink_item *iup)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_inode *ip = iup->ip;
+ struct xfs_dinode *dip;
+ struct xfs_buf *ibp;
+ int offset;
+ int error;
+
+ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
+ if (error)
+ return error;
+ /*
+ * Don't log the unlinked field on stale buffers as this may be the
+ * transaction that frees the inode cluster and relogging the buffer
+ * here will incorrectly remove the stale state.
+ */
+ if (ibp->b_flags & XBF_STALE)
+ goto out;
+
+ dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
+
+ /* Make sure the old pointer isn't garbage. */
+ if (be32_to_cpu(dip->di_next_unlinked) != iup->old_agino) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
+ sizeof(*dip), __this_address);
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+
+ trace_xfs_iunlink_update_dinode(mp, iup->pag->pag_agno,
+ XFS_INO_TO_AGINO(mp, ip->i_ino),
+ be32_to_cpu(dip->di_next_unlinked), iup->next_agino);
+
+ dip->di_next_unlinked = cpu_to_be32(iup->next_agino);
+ offset = ip->i_imap.im_boffset +
+ offsetof(struct xfs_dinode, di_next_unlinked);
+
+ xfs_dinode_calc_crc(mp, dip);
+ xfs_trans_inode_buf(tp, ibp);
+ xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
+ return 0;
+out:
+ xfs_trans_brelse(tp, ibp);
+ return error;
+}
+
+/*
+ * On precommit, we grab the inode cluster buffer for the inode number we were
+ * passed, then update the next unlinked field for that inode in the buffer and
+ * log the buffer. This ensures that the inode cluster buffer was logged in the
+ * correct order w.r.t. other inode cluster buffers. We can then remove the
+ * iunlink item from the transaction and release it as it is has now served it's
+ * purpose.
+ */
+static int
+xfs_iunlink_item_precommit(
+ struct xfs_trans *tp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_iunlink_item *iup = IUL_ITEM(lip);
+ int error;
+
+ error = xfs_iunlink_log_dinode(tp, iup);
+ list_del(&lip->li_trans);
+ xfs_iunlink_item_release(lip);
+ return error;
+}
+
+static const struct xfs_item_ops xfs_iunlink_item_ops = {
+ .iop_release = xfs_iunlink_item_release,
+ .iop_sort = xfs_iunlink_item_sort,
+ .iop_precommit = xfs_iunlink_item_precommit,
+};
+
+
+/*
+ * Initialize the inode log item for a newly allocated (in-core) inode.
+ *
+ * Inode extents can only reside within an AG. Hence specify the starting
+ * block for the inode chunk by offset within an AG as well as the
+ * length of the allocated extent.
+ *
+ * This joins the item to the transaction and marks it dirty so
+ * that we don't need a separate call to do this, nor does the
+ * caller need to know anything about the iunlink item.
+ */
+int
+xfs_iunlink_log_inode(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ struct xfs_perag *pag,
+ xfs_agino_t next_agino)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_iunlink_item *iup;
+
+ ASSERT(xfs_verify_agino_or_null(pag, next_agino));
+ ASSERT(xfs_verify_agino_or_null(pag, ip->i_next_unlinked));
+
+ /*
+ * Since we're updating a linked list, we should never find that the
+ * current pointer is the same as the new value, unless we're
+ * terminating the list.
+ */
+ if (ip->i_next_unlinked == next_agino) {
+ if (next_agino != NULLAGINO)
+ return -EFSCORRUPTED;
+ return 0;
+ }
+
+ iup = kmem_cache_zalloc(xfs_iunlink_cache, GFP_KERNEL | __GFP_NOFAIL);
+ xfs_log_item_init(mp, &iup->item, XFS_LI_IUNLINK,
+ &xfs_iunlink_item_ops);
+
+ iup->ip = ip;
+ iup->next_agino = next_agino;
+ iup->old_agino = ip->i_next_unlinked;
+
+ atomic_inc(&pag->pag_ref);
+ iup->pag = pag;
+
+ xfs_trans_add_item(tp, &iup->item);
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ set_bit(XFS_LI_DIRTY, &iup->item.li_flags);
+ return 0;
+}
+
diff --git a/fs/xfs/xfs_iunlink_item.h b/fs/xfs/xfs_iunlink_item.h
new file mode 100644
index 000000000000..c793cdcaccde
--- /dev/null
+++ b/fs/xfs/xfs_iunlink_item.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020-2022, Red Hat, Inc.
+ * All Rights Reserved.
+ */
+#ifndef XFS_IUNLINK_ITEM_H
+#define XFS_IUNLINK_ITEM_H 1
+
+struct xfs_trans;
+struct xfs_inode;
+struct xfs_perag;
+
+/* in memory log item structure */
+struct xfs_iunlink_item {
+ struct xfs_log_item item;
+ struct xfs_inode *ip;
+ struct xfs_perag *pag;
+ xfs_agino_t next_agino;
+ xfs_agino_t old_agino;
+};
+
+extern struct kmem_cache *xfs_iunlink_cache;
+
+int xfs_iunlink_log_inode(struct xfs_trans *tp, struct xfs_inode *ip,
+ struct xfs_perag *pag, xfs_agino_t next_agino);
+
+#endif /* XFS_IUNLINK_ITEM_H */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index ae904b21e9cc..386b0307aed8 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -57,7 +57,8 @@ xlog_grant_push_ail(
STATIC void
xlog_sync(
struct xlog *log,
- struct xlog_in_core *iclog);
+ struct xlog_in_core *iclog,
+ struct xlog_ticket *ticket);
#if defined(DEBUG)
STATIC void
xlog_verify_grant_tail(
@@ -567,7 +568,8 @@ xlog_state_shutdown_callbacks(
int
xlog_state_release_iclog(
struct xlog *log,
- struct xlog_in_core *iclog)
+ struct xlog_in_core *iclog,
+ struct xlog_ticket *ticket)
{
xfs_lsn_t tail_lsn;
bool last_ref;
@@ -614,7 +616,7 @@ xlog_state_release_iclog(
trace_xlog_iclog_syncing(iclog, _RET_IP_);
spin_unlock(&log->l_icloglock);
- xlog_sync(log, iclog);
+ xlog_sync(log, iclog, ticket);
spin_lock(&log->l_icloglock);
return 0;
}
@@ -881,7 +883,7 @@ xlog_force_iclog(
iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
if (iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
- return xlog_state_release_iclog(iclog->ic_log, iclog);
+ return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
}
/*
@@ -944,6 +946,8 @@ xlog_write_unmount_record(
.lv_niovecs = 1,
.lv_iovecp = &reg,
};
+ LIST_HEAD(lv_chain);
+ list_add(&vec.lv_list, &lv_chain);
BUILD_BUG_ON((sizeof(struct xlog_op_header) +
sizeof(struct xfs_unmount_log_format)) !=
@@ -952,7 +956,7 @@ xlog_write_unmount_record(
/* account for space used by record data */
ticket->t_curr_res -= sizeof(unmount_rec);
- return xlog_write(log, NULL, &vec, ticket, reg.i_len);
+ return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len);
}
/*
@@ -1921,9 +1925,17 @@ xlog_write_iclog(
* device cache first to ensure all metadata writeback covered
* by the LSN in this iclog is on stable storage. This is slow,
* but it *must* complete before we issue the external log IO.
+ *
+ * If the flush fails, we cannot conclude that past metadata
+ * writeback from the log succeeded. Repeating the flush is
+ * not possible, hence we must shut down with log IO error to
+ * avoid shutdown re-entering this path and erroring out again.
*/
- if (log->l_targ != log->l_mp->m_ddev_targp)
- blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev);
+ if (log->l_targ != log->l_mp->m_ddev_targp &&
+ blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) {
+ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
+ return;
+ }
}
if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
iclog->ic_bio.bi_opf |= REQ_FUA;
@@ -2000,7 +2012,7 @@ xlog_calc_iclog_size(
}
/*
- * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
+ * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
* fashion. Previously, we should have moved the current iclog
* ptr in the log to point to the next available iclog. This allows further
* write to continue while this code syncs out an iclog ready to go.
@@ -2025,7 +2037,8 @@ xlog_calc_iclog_size(
STATIC void
xlog_sync(
struct xlog *log,
- struct xlog_in_core *iclog)
+ struct xlog_in_core *iclog,
+ struct xlog_ticket *ticket)
{
unsigned int count; /* byte count of bwrite */
unsigned int roundoff; /* roundoff to BB or stripe */
@@ -2037,12 +2050,20 @@ xlog_sync(
count = xlog_calc_iclog_size(log, iclog, &roundoff);
- /* move grant heads by roundoff in sync */
- xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
- xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
+ /*
+ * If we have a ticket, account for the roundoff via the ticket
+ * reservation to avoid touching the hot grant heads needlessly.
+ * Otherwise, we have to move grant heads directly.
+ */
+ if (ticket) {
+ ticket->t_curr_res -= roundoff;
+ } else {
+ xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
+ xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
+ }
/* put cycle number in every block */
- xlog_pack_data(log, iclog, roundoff);
+ xlog_pack_data(log, iclog, roundoff);
/* real byte length */
size = iclog->ic_offset;
@@ -2275,7 +2296,7 @@ xlog_write_get_more_iclog_space(
spin_lock(&log->l_icloglock);
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, ticket);
spin_unlock(&log->l_icloglock);
if (error)
return error;
@@ -2471,13 +2492,13 @@ int
xlog_write(
struct xlog *log,
struct xfs_cil_ctx *ctx,
- struct xfs_log_vec *log_vector,
+ struct list_head *lv_chain,
struct xlog_ticket *ticket,
uint32_t len)
{
struct xlog_in_core *iclog = NULL;
- struct xfs_log_vec *lv = log_vector;
+ struct xfs_log_vec *lv;
uint32_t record_cnt = 0;
uint32_t data_cnt = 0;
int error = 0;
@@ -2505,7 +2526,7 @@ xlog_write(
if (ctx)
xlog_cil_set_ctx_write_state(ctx, iclog);
- while (lv) {
+ list_for_each_entry(lv, lv_chain, lv_list) {
/*
* If the entire log vec does not fit in the iclog, punt it to
* the partial copy loop which can handle this case.
@@ -2526,7 +2547,6 @@ xlog_write(
xlog_write_full(lv, ticket, iclog, &log_offset,
&len, &record_cnt, &data_cnt);
}
- lv = lv->lv_next;
}
ASSERT(len == 0);
@@ -2538,7 +2558,7 @@ xlog_write(
*/
spin_lock(&log->l_icloglock);
xlog_state_finish_copy(log, iclog, record_cnt, 0);
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, ticket);
spin_unlock(&log->l_icloglock);
return error;
@@ -2958,7 +2978,7 @@ restart:
* reference to the iclog.
*/
if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, ticket);
spin_unlock(&log->l_icloglock);
if (error)
return error;
@@ -3406,7 +3426,8 @@ xfs_log_ticket_get(
static int
xlog_calc_unit_res(
struct xlog *log,
- int unit_bytes)
+ int unit_bytes,
+ int *niclogs)
{
int iclog_space;
uint num_headers;
@@ -3486,6 +3507,8 @@ xlog_calc_unit_res(
/* roundoff padding for transaction data and one for commit record */
unit_bytes += 2 * log->l_iclog_roundoff;
+ if (niclogs)
+ *niclogs = num_headers;
return unit_bytes;
}
@@ -3494,7 +3517,7 @@ xfs_log_calc_unit_res(
struct xfs_mount *mp,
int unit_bytes)
{
- return xlog_calc_unit_res(mp->m_log, unit_bytes);
+ return xlog_calc_unit_res(mp->m_log, unit_bytes, NULL);
}
/*
@@ -3512,7 +3535,7 @@ xlog_ticket_alloc(
tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL);
- unit_res = xlog_calc_unit_res(log, unit_bytes);
+ unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
atomic_set(&tic->t_ref, 1);
tic->t_task = current;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index f3ce046a7d45..2728886c2963 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -9,7 +9,8 @@
struct xfs_cil_ctx;
struct xfs_log_vec {
- struct xfs_log_vec *lv_next; /* next lv in build list */
+ struct list_head lv_list; /* CIL lv chain ptrs */
+ uint32_t lv_order_id; /* chain ordering info */
int lv_niovecs; /* number of iovecs in lv */
struct xfs_log_iovec *lv_iovecp; /* iovec array */
struct xfs_log_item *lv_item; /* owner */
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index db6cb7800251..eccbfb99e894 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -44,9 +44,20 @@ xlog_cil_ticket_alloc(
* transaction overhead reservation from the first transaction commit.
*/
tic->t_curr_res = 0;
+ tic->t_iclog_hdrs = 0;
return tic;
}
+static inline void
+xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
+{
+ struct xlog *log = cil->xc_log;
+
+ atomic_set(&cil->xc_iclog_hdrs,
+ (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
+ (log->l_iclog_size - log->l_iclog_hsize)));
+}
+
/*
* Check if the current log item was first committed in this sequence.
* We can't rely on just the log item being in the CIL, we have to check
@@ -61,7 +72,7 @@ xlog_item_in_current_chkpt(
struct xfs_cil *cil,
struct xfs_log_item *lip)
{
- if (list_empty(&lip->li_cil))
+ if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
return false;
/*
@@ -93,15 +104,88 @@ xlog_cil_ctx_alloc(void)
ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS);
INIT_LIST_HEAD(&ctx->committing);
INIT_LIST_HEAD(&ctx->busy_extents);
+ INIT_LIST_HEAD(&ctx->log_items);
+ INIT_LIST_HEAD(&ctx->lv_chain);
INIT_WORK(&ctx->push_work, xlog_cil_push_work);
return ctx;
}
+/*
+ * Aggregate the CIL per cpu structures into global counts, lists, etc and
+ * clear the percpu state ready for the next context to use. This is called
+ * from the push code with the context lock held exclusively, hence nothing else
+ * will be accessing or modifying the per-cpu counters.
+ */
+static void
+xlog_cil_push_pcp_aggregate(
+ struct xfs_cil *cil,
+ struct xfs_cil_ctx *ctx)
+{
+ struct xlog_cil_pcp *cilpcp;
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
+
+ ctx->ticket->t_curr_res += cilpcp->space_reserved;
+ cilpcp->space_reserved = 0;
+
+ if (!list_empty(&cilpcp->busy_extents)) {
+ list_splice_init(&cilpcp->busy_extents,
+ &ctx->busy_extents);
+ }
+ if (!list_empty(&cilpcp->log_items))
+ list_splice_init(&cilpcp->log_items, &ctx->log_items);
+
+ /*
+ * We're in the middle of switching cil contexts. Reset the
+ * counter we use to detect when the current context is nearing
+ * full.
+ */
+ cilpcp->space_used = 0;
+ }
+}
+
+/*
+ * Aggregate the CIL per-cpu space used counters into the global atomic value.
+ * This is called when the per-cpu counter aggregation will first pass the soft
+ * limit threshold so we can switch to atomic counter aggregation for accurate
+ * detection of hard limit traversal.
+ */
+static void
+xlog_cil_insert_pcp_aggregate(
+ struct xfs_cil *cil,
+ struct xfs_cil_ctx *ctx)
+{
+ struct xlog_cil_pcp *cilpcp;
+ int cpu;
+ int count = 0;
+
+ /* Trigger atomic updates then aggregate only for the first caller */
+ if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
+ return;
+
+ for_each_online_cpu(cpu) {
+ int old, prev;
+
+ cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
+ do {
+ old = cilpcp->space_used;
+ prev = cmpxchg(&cilpcp->space_used, old, 0);
+ } while (old != prev);
+ count += old;
+ }
+ atomic_add(count, &ctx->space_used);
+}
+
static void
xlog_cil_ctx_switch(
struct xfs_cil *cil,
struct xfs_cil_ctx *ctx)
{
+ xlog_cil_set_iclog_hdr_count(cil);
+ set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
+ set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
ctx->sequence = ++cil->xc_current_sequence;
ctx->cil = cil;
cil->xc_ctx = ctx;
@@ -123,6 +207,7 @@ xlog_cil_init_post_recovery(
{
log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
log->l_cilp->xc_ctx->sequence = 1;
+ xlog_cil_set_iclog_hdr_count(log->l_cilp);
}
static inline int
@@ -254,6 +339,7 @@ xlog_cil_alloc_shadow_bufs(
memset(lv, 0, xlog_cil_iovec_space(niovecs));
+ INIT_LIST_HEAD(&lv->lv_list);
lv->lv_item = lip;
lv->lv_size = buf_size;
if (ordered)
@@ -269,7 +355,6 @@ xlog_cil_alloc_shadow_bufs(
else
lv->lv_buf_len = 0;
lv->lv_bytes = 0;
- lv->lv_next = NULL;
}
/* Ensure the lv is set up according to ->iop_size */
@@ -396,7 +481,6 @@ xlog_cil_insert_format_items(
if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
/* same or smaller, optimise common overwrite case */
lv = lip->li_lv;
- lv->lv_next = NULL;
if (ordered)
goto insert;
@@ -434,6 +518,23 @@ insert:
}
/*
+ * The use of lockless waitqueue_active() requires that the caller has
+ * serialised itself against the wakeup call in xlog_cil_push_work(). That
+ * can be done by either holding the push lock or the context lock.
+ */
+static inline bool
+xlog_cil_over_hard_limit(
+ struct xlog *log,
+ int32_t space_used)
+{
+ if (waitqueue_active(&log->l_cilp->xc_push_wait))
+ return true;
+ if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
+ return true;
+ return false;
+}
+
+/*
* Insert the log items into the CIL and calculate the difference in space
* consumed by the item. Add the space to the checkpoint ticket and calculate
* if the change requires additional log metadata. If it does, take that space
@@ -450,8 +551,10 @@ xlog_cil_insert_items(
struct xfs_cil_ctx *ctx = cil->xc_ctx;
struct xfs_log_item *lip;
int len = 0;
- int iclog_space;
int iovhdr_res = 0, split_res = 0, ctx_res = 0;
+ int space_used;
+ int order;
+ struct xlog_cil_pcp *cilpcp;
ASSERT(tp);
@@ -461,93 +564,135 @@ xlog_cil_insert_items(
*/
xlog_cil_insert_format_items(log, tp, &len);
- spin_lock(&cil->xc_cil_lock);
+ /*
+ * Subtract the space released by intent cancelation from the space we
+ * consumed so that we remove it from the CIL space and add it back to
+ * the current transaction reservation context.
+ */
+ len -= released_space;
- /* attach the transaction to the CIL if it has any busy extents */
- if (!list_empty(&tp->t_busy))
- list_splice_init(&tp->t_busy, &ctx->busy_extents);
+ /*
+ * Grab the per-cpu pointer for the CIL before we start any accounting.
+ * That ensures that we are running with pre-emption disabled and so we
+ * can't be scheduled away between split sample/update operations that
+ * are done without outside locking to serialise them.
+ */
+ cilpcp = get_cpu_ptr(cil->xc_pcp);
/*
- * Now transfer enough transaction reservation to the context ticket
- * for the checkpoint. The context ticket is special - the unit
- * reservation has to grow as well as the current reservation as we
- * steal from tickets so we can correctly determine the space used
- * during the transaction commit.
+ * We need to take the CIL checkpoint unit reservation on the first
+ * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
+ * unnecessarily do an atomic op in the fast path here. We can clear the
+ * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
+ * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
*/
- if (ctx->ticket->t_curr_res == 0) {
+ if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
+ test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
ctx_res = ctx->ticket->t_unit_res;
- ctx->ticket->t_curr_res = ctx_res;
- tp->t_ticket->t_curr_res -= ctx_res;
- }
- /* do we need space for more log record headers? */
- iclog_space = log->l_iclog_size - log->l_iclog_hsize;
- if (len > 0 && (ctx->space_used / iclog_space !=
- (ctx->space_used + len) / iclog_space)) {
- split_res = (len + iclog_space - 1) / iclog_space;
- /* need to take into account split region headers, too */
- split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
- ctx->ticket->t_unit_res += split_res;
- ctx->ticket->t_curr_res += split_res;
- tp->t_ticket->t_curr_res -= split_res;
- ASSERT(tp->t_ticket->t_curr_res >= len);
+ /*
+ * Check if we need to steal iclog headers. atomic_read() is not a
+ * locked atomic operation, so we can check the value before we do any
+ * real atomic ops in the fast path. If we've already taken the CIL unit
+ * reservation from this commit, we've already got one iclog header
+ * space reserved so we have to account for that otherwise we risk
+ * overrunning the reservation on this ticket.
+ *
+ * If the CIL is already at the hard limit, we might need more header
+ * space that originally reserved. So steal more header space from every
+ * commit that occurs once we are over the hard limit to ensure the CIL
+ * push won't run out of reservation space.
+ *
+ * This can steal more than we need, but that's OK.
+ *
+ * The cil->xc_ctx_lock provides the serialisation necessary for safely
+ * calling xlog_cil_over_hard_limit() in this context.
+ */
+ space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
+ if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
+ xlog_cil_over_hard_limit(log, space_used)) {
+ split_res = log->l_iclog_hsize +
+ sizeof(struct xlog_op_header);
+ if (ctx_res)
+ ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
+ else
+ ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
+ atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
}
- tp->t_ticket->t_curr_res -= len;
- tp->t_ticket->t_curr_res += released_space;
- ctx->space_used += len;
- ctx->space_used -= released_space;
+ cilpcp->space_reserved += ctx_res;
/*
- * If we've overrun the reservation, dump the tx details before we move
- * the log items. Shutdown is imminent...
+ * Accurately account when over the soft limit, otherwise fold the
+ * percpu count into the global count if over the per-cpu threshold.
*/
- if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
- xfs_warn(log->l_mp, "Transaction log reservation overrun:");
- xfs_warn(log->l_mp,
- " log items: %d bytes (iov hdrs: %d bytes)",
- len, iovhdr_res);
- xfs_warn(log->l_mp, " split region headers: %d bytes",
- split_res);
- xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
- xlog_print_trans(tp);
+ if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
+ atomic_add(len, &ctx->space_used);
+ } else if (cilpcp->space_used + len >
+ (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
+ space_used = atomic_add_return(cilpcp->space_used + len,
+ &ctx->space_used);
+ cilpcp->space_used = 0;
+
+ /*
+ * If we just transitioned over the soft limit, we need to
+ * transition to the global atomic counter.
+ */
+ if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
+ xlog_cil_insert_pcp_aggregate(cil, ctx);
+ } else {
+ cilpcp->space_used += len;
}
+ /* attach the transaction to the CIL if it has any busy extents */
+ if (!list_empty(&tp->t_busy))
+ list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
/*
- * Now (re-)position everything modified at the tail of the CIL.
+ * Now update the order of everything modified in the transaction
+ * and insert items into the CIL if they aren't already there.
* We do this here so we only need to take the CIL lock once during
* the transaction commit.
*/
+ order = atomic_inc_return(&ctx->order_id);
list_for_each_entry(lip, &tp->t_items, li_trans) {
-
/* Skip items which aren't dirty in this transaction. */
if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
continue;
- /*
- * Only move the item if it isn't already at the tail. This is
- * to prevent a transient list_empty() state when reinserting
- * an item that is already the only item in the CIL.
- */
- if (!list_is_last(&lip->li_cil, &cil->xc_cil))
- list_move_tail(&lip->li_cil, &cil->xc_cil);
+ lip->li_order_id = order;
+ if (!list_empty(&lip->li_cil))
+ continue;
+ list_add_tail(&lip->li_cil, &cilpcp->log_items);
}
+ put_cpu_ptr(cilpcp);
- spin_unlock(&cil->xc_cil_lock);
-
- if (tp->t_ticket->t_curr_res < 0)
+ /*
+ * If we've overrun the reservation, dump the tx details before we move
+ * the log items. Shutdown is imminent...
+ */
+ tp->t_ticket->t_curr_res -= ctx_res + len;
+ if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
+ xfs_warn(log->l_mp, "Transaction log reservation overrun:");
+ xfs_warn(log->l_mp,
+ " log items: %d bytes (iov hdrs: %d bytes)",
+ len, iovhdr_res);
+ xfs_warn(log->l_mp, " split region headers: %d bytes",
+ split_res);
+ xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
+ xlog_print_trans(tp);
xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
+ }
}
static void
xlog_cil_free_logvec(
- struct xfs_log_vec *log_vector)
+ struct list_head *lv_chain)
{
struct xfs_log_vec *lv;
- for (lv = log_vector; lv; ) {
- struct xfs_log_vec *next = lv->lv_next;
+ while (!list_empty(lv_chain)) {
+ lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
+ list_del_init(&lv->lv_list);
kmem_free(lv);
- lv = next;
}
}
@@ -647,7 +792,7 @@ xlog_cil_committed(
spin_unlock(&ctx->cil->xc_push_lock);
}
- xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
+ xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain,
ctx->start_lsn, abort);
xfs_extent_busy_sort(&ctx->busy_extents);
@@ -658,7 +803,7 @@ xlog_cil_committed(
list_del(&ctx->committing);
spin_unlock(&ctx->cil->xc_push_lock);
- xlog_cil_free_logvec(ctx->lv_chain);
+ xlog_cil_free_logvec(&ctx->lv_chain);
if (!list_empty(&ctx->busy_extents))
xlog_discard_busy_extents(mp, ctx);
@@ -817,7 +962,6 @@ restart:
static int
xlog_cil_write_chain(
struct xfs_cil_ctx *ctx,
- struct xfs_log_vec *chain,
uint32_t chain_len)
{
struct xlog *log = ctx->cil->xc_log;
@@ -826,7 +970,7 @@ xlog_cil_write_chain(
error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
if (error)
return error;
- return xlog_write(log, ctx, chain, ctx->ticket, chain_len);
+ return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
}
/*
@@ -855,6 +999,8 @@ xlog_cil_write_commit_record(
.lv_iovecp = &reg,
};
int error;
+ LIST_HEAD(lv_chain);
+ list_add(&vec.lv_list, &lv_chain);
if (xlog_is_shutdown(log))
return -EIO;
@@ -865,7 +1011,7 @@ xlog_cil_write_commit_record(
/* account for space used by record data */
ctx->ticket->t_curr_res -= reg.i_len;
- error = xlog_write(log, ctx, &vec, ctx->ticket, reg.i_len);
+ error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
if (error)
xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
return error;
@@ -931,12 +1077,31 @@ xlog_cil_build_trans_hdr(
lvhdr->lv_niovecs = 2;
lvhdr->lv_iovecp = &hdr->lhdr[0];
lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
- lvhdr->lv_next = ctx->lv_chain;
tic->t_curr_res -= lvhdr->lv_bytes;
}
/*
+ * CIL item reordering compare function. We want to order in ascending ID order,
+ * but we want to leave items with the same ID in the order they were added to
+ * the list. This is important for operations like reflink where we log 4 order
+ * dependent intents in a single transaction when we overwrite an existing
+ * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
+ * CUI (inc), BUI(remap)...
+ */
+static int
+xlog_cil_order_cmp(
+ void *priv,
+ const struct list_head *a,
+ const struct list_head *b)
+{
+ struct xfs_log_vec *l1 = container_of(a, struct xfs_log_vec, lv_list);
+ struct xfs_log_vec *l2 = container_of(b, struct xfs_log_vec, lv_list);
+
+ return l1->lv_order_id > l2->lv_order_id;
+}
+
+/*
* Pull all the log vectors off the items in the CIL, and remove the items from
* the CIL. We don't need the CIL lock here because it's only needed on the
* transaction commit side which is currently locked out by the flush lock.
@@ -947,18 +1112,16 @@ xlog_cil_build_trans_hdr(
*/
static void
xlog_cil_build_lv_chain(
- struct xfs_cil *cil,
struct xfs_cil_ctx *ctx,
struct list_head *whiteouts,
uint32_t *num_iovecs,
uint32_t *num_bytes)
{
- struct xfs_log_vec *lv = NULL;
-
- while (!list_empty(&cil->xc_cil)) {
+ while (!list_empty(&ctx->log_items)) {
struct xfs_log_item *item;
+ struct xfs_log_vec *lv;
- item = list_first_entry(&cil->xc_cil,
+ item = list_first_entry(&ctx->log_items,
struct xfs_log_item, li_cil);
if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
@@ -967,18 +1130,18 @@ xlog_cil_build_lv_chain(
continue;
}
- list_del_init(&item->li_cil);
- if (!ctx->lv_chain)
- ctx->lv_chain = item->li_lv;
- else
- lv->lv_next = item->li_lv;
lv = item->li_lv;
- item->li_lv = NULL;
- *num_iovecs += lv->lv_niovecs;
+ lv->lv_order_id = item->li_order_id;
/* we don't write ordered log vectors */
if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
*num_bytes += lv->lv_bytes;
+ *num_iovecs += lv->lv_niovecs;
+ list_add_tail(&lv->lv_list, &ctx->lv_chain);
+
+ list_del_init(&item->li_cil);
+ item->li_order_id = 0;
+ item->li_lv = NULL;
}
}
@@ -1022,10 +1185,11 @@ xlog_cil_push_work(
int num_bytes = 0;
int error = 0;
struct xlog_cil_trans_hdr thdr;
- struct xfs_log_vec lvhdr = { NULL };
+ struct xfs_log_vec lvhdr = {};
xfs_csn_t push_seq;
bool push_commit_stable;
LIST_HEAD (whiteouts);
+ struct xlog_ticket *ticket;
new_ctx = xlog_cil_ctx_alloc();
new_ctx->ticket = xlog_cil_ticket_alloc(log);
@@ -1049,12 +1213,14 @@ xlog_cil_push_work(
if (waitqueue_active(&cil->xc_push_wait))
wake_up_all(&cil->xc_push_wait);
+ xlog_cil_push_pcp_aggregate(cil, ctx);
+
/*
* Check if we've anything to push. If there is nothing, then we don't
* move on to a new sequence number and so we have to be able to push
* this sequence again later.
*/
- if (list_empty(&cil->xc_cil)) {
+ if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
cil->xc_push_seq = 0;
spin_unlock(&cil->xc_push_lock);
goto out_skip;
@@ -1094,7 +1260,7 @@ xlog_cil_push_work(
list_add(&ctx->committing, &cil->xc_committing);
spin_unlock(&cil->xc_push_lock);
- xlog_cil_build_lv_chain(cil, ctx, &whiteouts, &num_iovecs, &num_bytes);
+ xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
/*
* Switch the contexts so we can drop the context lock and move out
@@ -1127,14 +1293,30 @@ xlog_cil_push_work(
up_write(&cil->xc_ctx_lock);
/*
+ * Sort the log vector chain before we add the transaction headers.
+ * This ensures we always have the transaction headers at the start
+ * of the chain.
+ */
+ list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
+
+ /*
* Build a checkpoint transaction header and write it to the log to
* begin the transaction. We need to account for the space used by the
* transaction header here as it is not accounted for in xlog_write().
+ * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
+ * it gets written into the iclog first.
*/
xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
num_bytes += lvhdr.lv_bytes;
+ list_add(&lvhdr.lv_list, &ctx->lv_chain);
- error = xlog_cil_write_chain(ctx, &lvhdr, num_bytes);
+ /*
+ * Take the lvhdr back off the lv_chain immediately after calling
+ * xlog_cil_write_chain() as it should not be passed to log IO
+ * completion.
+ */
+ error = xlog_cil_write_chain(ctx, num_bytes);
+ list_del(&lvhdr.lv_list);
if (error)
goto out_abort_free_ticket;
@@ -1142,7 +1324,14 @@ xlog_cil_push_work(
if (error)
goto out_abort_free_ticket;
- xfs_log_ticket_ungrant(log, ctx->ticket);
+ /*
+ * Grab the ticket from the ctx so we can ungrant it after releasing the
+ * commit_iclog. The ctx may be freed by the time we return from
+ * releasing the commit_iclog (i.e. checkpoint has been completed and
+ * callback run) so we can't reference the ctx after the call to
+ * xlog_state_release_iclog().
+ */
+ ticket = ctx->ticket;
/*
* If the checkpoint spans multiple iclogs, wait for all previous iclogs
@@ -1192,12 +1381,14 @@ xlog_cil_push_work(
if (push_commit_stable &&
ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
- xlog_state_release_iclog(log, ctx->commit_iclog);
+ ticket = ctx->ticket;
+ xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
/* Not safe to reference ctx now! */
spin_unlock(&log->l_icloglock);
xlog_cil_cleanup_whiteouts(&whiteouts);
+ xfs_log_ticket_ungrant(log, ticket);
return;
out_skip:
@@ -1207,17 +1398,19 @@ out_skip:
return;
out_abort_free_ticket:
- xfs_log_ticket_ungrant(log, ctx->ticket);
ASSERT(xlog_is_shutdown(log));
xlog_cil_cleanup_whiteouts(&whiteouts);
if (!ctx->commit_iclog) {
+ xfs_log_ticket_ungrant(log, ctx->ticket);
xlog_cil_committed(ctx);
return;
}
spin_lock(&log->l_icloglock);
- xlog_state_release_iclog(log, ctx->commit_iclog);
+ ticket = ctx->ticket;
+ xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
/* Not safe to reference ctx now! */
spin_unlock(&log->l_icloglock);
+ xfs_log_ticket_ungrant(log, ticket);
}
/*
@@ -1232,18 +1425,27 @@ xlog_cil_push_background(
struct xlog *log) __releases(cil->xc_ctx_lock)
{
struct xfs_cil *cil = log->l_cilp;
+ int space_used = atomic_read(&cil->xc_ctx->space_used);
/*
* The cil won't be empty because we are called while holding the
- * context lock so whatever we added to the CIL will still be there
+ * context lock so whatever we added to the CIL will still be there.
*/
- ASSERT(!list_empty(&cil->xc_cil));
+ ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
/*
- * Don't do a background push if we haven't used up all the
- * space available yet.
+ * We are done if:
+ * - we haven't used up all the space available yet; or
+ * - we've already queued up a push; and
+ * - we're not over the hard limit; and
+ * - nothing has been over the hard limit.
+ *
+ * If so, we don't need to take the push lock as there's nothing to do.
*/
- if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
+ if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
+ (cil->xc_push_seq == cil->xc_current_sequence &&
+ space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
+ !waitqueue_active(&cil->xc_push_wait))) {
up_read(&cil->xc_ctx_lock);
return;
}
@@ -1270,12 +1472,11 @@ xlog_cil_push_background(
* dipping back down under the hard limit.
*
* The ctx->xc_push_lock provides the serialisation necessary for safely
- * using the lockless waitqueue_active() check in this context.
+ * calling xlog_cil_over_hard_limit() in this context.
*/
- if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) ||
- waitqueue_active(&cil->xc_push_wait)) {
+ if (xlog_cil_over_hard_limit(log, space_used)) {
trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
- ASSERT(cil->xc_ctx->space_used < log->l_logsize);
+ ASSERT(space_used < log->l_logsize);
xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
return;
}
@@ -1334,7 +1535,8 @@ xlog_cil_push_now(
* If the CIL is empty or we've already pushed the sequence then
* there's no more work that we need to do.
*/
- if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
+ if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
+ push_seq <= cil->xc_push_seq) {
spin_unlock(&cil->xc_push_lock);
return;
}
@@ -1352,7 +1554,7 @@ xlog_cil_empty(
bool empty = false;
spin_lock(&cil->xc_push_lock);
- if (list_empty(&cil->xc_cil))
+ if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
empty = true;
spin_unlock(&cil->xc_push_lock);
return empty;
@@ -1483,7 +1685,7 @@ xlog_cil_flush(
* If the CIL is empty, make sure that any previous checkpoint that may
* still be in an active iclog is pushed to stable storage.
*/
- if (list_empty(&log->l_cilp->xc_cil))
+ if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
xfs_log_force(log->l_mp, 0);
}
@@ -1568,7 +1770,7 @@ restart:
* we would have found the context on the committing list.
*/
if (sequence == cil->xc_current_sequence &&
- !list_empty(&cil->xc_cil)) {
+ !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
spin_unlock(&cil->xc_push_lock);
goto restart;
}
@@ -1589,14 +1791,48 @@ out_shutdown:
}
/*
+ * Move dead percpu state to the relevant CIL context structures.
+ *
+ * We have to lock the CIL context here to ensure that nothing is modifying
+ * the percpu state, either addition or removal. Both of these are done under
+ * the CIL context lock, so grabbing that exclusively here will ensure we can
+ * safely drain the cilpcp for the CPU that is dying.
+ */
+void
+xlog_cil_pcp_dead(
+ struct xlog *log,
+ unsigned int cpu)
+{
+ struct xfs_cil *cil = log->l_cilp;
+ struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
+ struct xfs_cil_ctx *ctx;
+
+ down_write(&cil->xc_ctx_lock);
+ ctx = cil->xc_ctx;
+ if (ctx->ticket)
+ ctx->ticket->t_curr_res += cilpcp->space_reserved;
+ cilpcp->space_reserved = 0;
+
+ if (!list_empty(&cilpcp->log_items))
+ list_splice_init(&cilpcp->log_items, &ctx->log_items);
+ if (!list_empty(&cilpcp->busy_extents))
+ list_splice_init(&cilpcp->busy_extents, &ctx->busy_extents);
+ atomic_add(cilpcp->space_used, &ctx->space_used);
+ cilpcp->space_used = 0;
+ up_write(&cil->xc_ctx_lock);
+}
+
+/*
* Perform initial CIL structure initialisation.
*/
int
xlog_cil_init(
- struct xlog *log)
+ struct xlog *log)
{
- struct xfs_cil *cil;
- struct xfs_cil_ctx *ctx;
+ struct xfs_cil *cil;
+ struct xfs_cil_ctx *ctx;
+ struct xlog_cil_pcp *cilpcp;
+ int cpu;
cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
if (!cil)
@@ -1611,22 +1847,31 @@ xlog_cil_init(
if (!cil->xc_push_wq)
goto out_destroy_cil;
- INIT_LIST_HEAD(&cil->xc_cil);
+ cil->xc_log = log;
+ cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
+ if (!cil->xc_pcp)
+ goto out_destroy_wq;
+
+ for_each_possible_cpu(cpu) {
+ cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
+ INIT_LIST_HEAD(&cilpcp->busy_extents);
+ INIT_LIST_HEAD(&cilpcp->log_items);
+ }
+
INIT_LIST_HEAD(&cil->xc_committing);
- spin_lock_init(&cil->xc_cil_lock);
spin_lock_init(&cil->xc_push_lock);
init_waitqueue_head(&cil->xc_push_wait);
init_rwsem(&cil->xc_ctx_lock);
init_waitqueue_head(&cil->xc_start_wait);
init_waitqueue_head(&cil->xc_commit_wait);
- cil->xc_log = log;
log->l_cilp = cil;
ctx = xlog_cil_ctx_alloc();
xlog_cil_ctx_switch(cil, ctx);
-
return 0;
+out_destroy_wq:
+ destroy_workqueue(cil->xc_push_wq);
out_destroy_cil:
kmem_free(cil);
return -ENOMEM;
@@ -1636,14 +1881,17 @@ void
xlog_cil_destroy(
struct xlog *log)
{
- if (log->l_cilp->xc_ctx) {
- if (log->l_cilp->xc_ctx->ticket)
- xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
- kmem_free(log->l_cilp->xc_ctx);
+ struct xfs_cil *cil = log->l_cilp;
+
+ if (cil->xc_ctx) {
+ if (cil->xc_ctx->ticket)
+ xfs_log_ticket_put(cil->xc_ctx->ticket);
+ kmem_free(cil->xc_ctx);
}
- ASSERT(list_empty(&log->l_cilp->xc_cil));
- destroy_workqueue(log->l_cilp->xc_push_wq);
- kmem_free(log->l_cilp);
+ ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
+ free_percpu(cil->xc_pcp);
+ destroy_workqueue(cil->xc_push_wq);
+ kmem_free(cil);
}
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 686c01eb3661..1bd2963e8fbd 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -143,15 +143,16 @@ enum xlog_iclog_state {
#define XLOG_COVER_OPS 5
typedef struct xlog_ticket {
- struct list_head t_queue; /* reserve/write queue */
- struct task_struct *t_task; /* task that owns this ticket */
- xlog_tid_t t_tid; /* transaction identifier : 4 */
- atomic_t t_ref; /* ticket reference count : 4 */
- int t_curr_res; /* current reservation in bytes : 4 */
- int t_unit_res; /* unit reservation in bytes : 4 */
- char t_ocnt; /* original count : 1 */
- char t_cnt; /* current count : 1 */
- uint8_t t_flags; /* properties of reservation : 1 */
+ struct list_head t_queue; /* reserve/write queue */
+ struct task_struct *t_task; /* task that owns this ticket */
+ xlog_tid_t t_tid; /* transaction identifier */
+ atomic_t t_ref; /* ticket reference count */
+ int t_curr_res; /* current reservation */
+ int t_unit_res; /* unit reservation */
+ char t_ocnt; /* original unit count */
+ char t_cnt; /* current unit count */
+ uint8_t t_flags; /* properties of reservation */
+ int t_iclog_hdrs; /* iclog hdrs in t_curr_res */
} xlog_ticket_t;
/*
@@ -221,13 +222,25 @@ struct xfs_cil_ctx {
xfs_lsn_t commit_lsn; /* chkpt commit record lsn */
struct xlog_in_core *commit_iclog;
struct xlog_ticket *ticket; /* chkpt ticket */
- int space_used; /* aggregate size of regions */
+ atomic_t space_used; /* aggregate size of regions */
struct list_head busy_extents; /* busy extents in chkpt */
- struct xfs_log_vec *lv_chain; /* logvecs being pushed */
+ struct list_head log_items; /* log items in chkpt */
+ struct list_head lv_chain; /* logvecs being pushed */
struct list_head iclog_entry;
struct list_head committing; /* ctx committing list */
struct work_struct discard_endio_work;
struct work_struct push_work;
+ atomic_t order_id;
+};
+
+/*
+ * Per-cpu CIL tracking items
+ */
+struct xlog_cil_pcp {
+ int32_t space_used;
+ uint32_t space_reserved;
+ struct list_head busy_extents;
+ struct list_head log_items;
};
/*
@@ -248,8 +261,8 @@ struct xfs_cil_ctx {
*/
struct xfs_cil {
struct xlog *xc_log;
- struct list_head xc_cil;
- spinlock_t xc_cil_lock;
+ unsigned long xc_flags;
+ atomic_t xc_iclog_hdrs;
struct workqueue_struct *xc_push_wq;
struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp;
@@ -263,8 +276,17 @@ struct xfs_cil {
wait_queue_head_t xc_start_wait;
xfs_csn_t xc_current_sequence;
wait_queue_head_t xc_push_wait; /* background push throttle */
+
+ void __percpu *xc_pcp; /* percpu CIL structures */
+#ifdef CONFIG_HOTPLUG_CPU
+ struct list_head xc_pcp_list;
+#endif
} ____cacheline_aligned_in_smp;
+/* xc_flags bit values */
+#define XLOG_CIL_EMPTY 1
+#define XLOG_CIL_PCP_SPACE 2
+
/*
* The amount of log space we allow the CIL to aggregate is difficult to size.
* Whatever we choose, we have to make sure we can get a reservation for the
@@ -486,14 +508,15 @@ struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
void xlog_print_trans(struct xfs_trans *);
int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx,
- struct xfs_log_vec *log_vector, struct xlog_ticket *tic,
+ struct list_head *lv_chain, struct xlog_ticket *tic,
uint32_t len);
void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
int eventual_size);
-int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
+int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
+ struct xlog_ticket *ticket);
/*
* When we crack an atomic LSN, we sample it first so that the value will not
@@ -682,4 +705,9 @@ xlog_kvmalloc(
return p;
}
+/*
+ * CIL CPU dead notifier
+ */
+void xlog_cil_pcp_dead(struct xlog *log, unsigned int cpu);
+
#endif /* __XFS_LOG_PRIV_H__ */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 940c8107cbd4..17e923b9c5fa 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2629,21 +2629,21 @@ xlog_recover_cancel_intents(
*/
STATIC void
xlog_recover_clear_agi_bucket(
- xfs_mount_t *mp,
- xfs_agnumber_t agno,
- int bucket)
+ struct xfs_perag *pag,
+ int bucket)
{
- xfs_trans_t *tp;
- xfs_agi_t *agi;
- struct xfs_buf *agibp;
- int offset;
- int error;
+ struct xfs_mount *mp = pag->pag_mount;
+ struct xfs_trans *tp;
+ struct xfs_agi *agi;
+ struct xfs_buf *agibp;
+ int offset;
+ int error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
if (error)
goto out_error;
- error = xfs_read_agi(mp, tp, agno, &agibp);
+ error = xfs_read_agi(pag, tp, &agibp);
if (error)
goto out_abort;
@@ -2662,60 +2662,62 @@ xlog_recover_clear_agi_bucket(
out_abort:
xfs_trans_cancel(tp);
out_error:
- xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
+ xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__,
+ pag->pag_agno);
return;
}
-STATIC xfs_agino_t
-xlog_recover_process_one_iunlink(
- struct xfs_mount *mp,
- xfs_agnumber_t agno,
- xfs_agino_t agino,
- int bucket)
+static int
+xlog_recover_iunlink_bucket(
+ struct xfs_perag *pag,
+ struct xfs_agi *agi,
+ int bucket)
{
- struct xfs_buf *ibp;
- struct xfs_dinode *dip;
- struct xfs_inode *ip;
- xfs_ino_t ino;
- int error;
-
- ino = XFS_AGINO_TO_INO(mp, agno, agino);
- error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
- if (error)
- goto fail;
+ struct xfs_mount *mp = pag->pag_mount;
+ struct xfs_inode *prev_ip = NULL;
+ struct xfs_inode *ip;
+ xfs_agino_t prev_agino, agino;
+ int error = 0;
- /*
- * Get the on disk inode to find the next inode in the bucket.
- */
- error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &ibp);
- if (error)
- goto fail_iput;
- dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
+ agino = be32_to_cpu(agi->agi_unlinked[bucket]);
+ while (agino != NULLAGINO) {
+ error = xfs_iget(mp, NULL,
+ XFS_AGINO_TO_INO(mp, pag->pag_agno, agino),
+ 0, 0, &ip);
+ if (error)
+ break;
- xfs_iflags_clear(ip, XFS_IRECOVERY);
- ASSERT(VFS_I(ip)->i_nlink == 0);
- ASSERT(VFS_I(ip)->i_mode != 0);
+ ASSERT(VFS_I(ip)->i_nlink == 0);
+ ASSERT(VFS_I(ip)->i_mode != 0);
+ xfs_iflags_clear(ip, XFS_IRECOVERY);
+ agino = ip->i_next_unlinked;
- /* setup for the next pass */
- agino = be32_to_cpu(dip->di_next_unlinked);
- xfs_buf_relse(ibp);
+ if (prev_ip) {
+ ip->i_prev_unlinked = prev_agino;
+ xfs_irele(prev_ip);
- xfs_irele(ip);
- return agino;
+ /*
+ * Ensure the inode is removed from the unlinked list
+ * before we continue so that it won't race with
+ * building the in-memory list here. This could be
+ * serialised with the agibp lock, but that just
+ * serialises via lockstepping and it's much simpler
+ * just to flush the inodegc queue and wait for it to
+ * complete.
+ */
+ xfs_inodegc_flush(mp);
+ }
- fail_iput:
- xfs_irele(ip);
- fail:
- /*
- * We can't read in the inode this bucket points to, or this inode
- * is messed up. Just ditch this bucket of inodes. We will lose
- * some inodes and space, but at least we won't hang.
- *
- * Call xlog_recover_clear_agi_bucket() to perform a transaction to
- * clear the inode pointer in the bucket.
- */
- xlog_recover_clear_agi_bucket(mp, agno, bucket);
- return NULLAGINO;
+ prev_agino = agino;
+ prev_ip = ip;
+ }
+
+ if (prev_ip) {
+ ip->i_prev_unlinked = prev_agino;
+ xfs_irele(prev_ip);
+ }
+ xfs_inodegc_flush(mp);
+ return error;
}
/*
@@ -2741,59 +2743,70 @@ xlog_recover_process_one_iunlink(
* scheduled on this CPU to ensure other scheduled work can run without undue
* latency.
*/
-STATIC void
-xlog_recover_process_iunlinks(
- struct xlog *log)
+static void
+xlog_recover_iunlink_ag(
+ struct xfs_perag *pag)
{
- struct xfs_mount *mp = log->l_mp;
- struct xfs_perag *pag;
- xfs_agnumber_t agno;
struct xfs_agi *agi;
struct xfs_buf *agibp;
- xfs_agino_t agino;
int bucket;
int error;
- for_each_perag(mp, agno, pag) {
- error = xfs_read_agi(mp, NULL, pag->pag_agno, &agibp);
+ error = xfs_read_agi(pag, NULL, &agibp);
+ if (error) {
+ /*
+ * AGI is b0rked. Don't process it.
+ *
+ * We should probably mark the filesystem as corrupt after we've
+ * recovered all the ag's we can....
+ */
+ return;
+ }
+
+ /*
+ * Unlock the buffer so that it can be acquired in the normal course of
+ * the transaction to truncate and free each inode. Because we are not
+ * racing with anyone else here for the AGI buffer, we don't even need
+ * to hold it locked to read the initial unlinked bucket entries out of
+ * the buffer. We keep buffer reference though, so that it stays pinned
+ * in memory while we need the buffer.
+ */
+ agi = agibp->b_addr;
+ xfs_buf_unlock(agibp);
+
+ for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
+ error = xlog_recover_iunlink_bucket(pag, agi, bucket);
if (error) {
/*
- * AGI is b0rked. Don't process it.
- *
- * We should probably mark the filesystem as corrupt
- * after we've recovered all the ag's we can....
+ * Bucket is unrecoverable, so only a repair scan can
+ * free the remaining unlinked inodes. Just empty the
+ * bucket and remaining inodes on it unreferenced and
+ * unfreeable.
*/
- continue;
+ xfs_inodegc_flush(pag->pag_mount);
+ xlog_recover_clear_agi_bucket(pag, bucket);
}
- /*
- * Unlock the buffer so that it can be acquired in the normal
- * course of the transaction to truncate and free each inode.
- * Because we are not racing with anyone else here for the AGI
- * buffer, we don't even need to hold it locked to read the
- * initial unlinked bucket entries out of the buffer. We keep
- * buffer reference though, so that it stays pinned in memory
- * while we need the buffer.
- */
- agi = agibp->b_addr;
- xfs_buf_unlock(agibp);
-
- for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
- agino = be32_to_cpu(agi->agi_unlinked[bucket]);
- while (agino != NULLAGINO) {
- agino = xlog_recover_process_one_iunlink(mp,
- pag->pag_agno, agino, bucket);
- cond_resched();
- }
- }
- xfs_buf_rele(agibp);
}
+ xfs_buf_rele(agibp);
+}
+
+static void
+xlog_recover_process_iunlinks(
+ struct xlog *log)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for_each_perag(log->l_mp, agno, pag)
+ xlog_recover_iunlink_ag(pag);
+
/*
* Flush the pending unlinked inodes to ensure that the inactivations
* are fully completed on disk and the incore inodes can be reclaimed
* before we signal that recovery is complete.
*/
- xfs_inodegc_flush(mp);
+ xfs_inodegc_flush(log->l_mp);
}
STATIC void
@@ -3313,7 +3326,8 @@ xlog_do_recover(
/* re-initialise in-core superblock and geometry structures */
mp->m_features |= xfs_sb_version_to_features(sbp);
xfs_reinit_percpu_counters(mp);
- error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
+ error = xfs_initialize_perag(mp, sbp->sb_agcount, sbp->sb_dblocks,
+ &mp->m_maxagi);
if (error) {
xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
return error;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index daa8d29c46b4..f10c88cee116 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -778,7 +778,8 @@ xfs_mountfs(
/*
* Allocate and initialize the per-ag data.
*/
- error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
+ error = xfs_initialize_perag(mp, sbp->sb_agcount, mp->m_sb.sb_dblocks,
+ &mp->m_maxagi);
if (error) {
xfs_warn(mp, "Failed per-ag init: %d", error);
goto out_free_dir;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index d2eaebd85abf..8aca2cc173ac 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -454,6 +454,7 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, uint32_t flags, char *fname,
#define SHUTDOWN_LOG_IO_ERROR (1u << 1) /* write attempt to the log failed */
#define SHUTDOWN_FORCE_UMOUNT (1u << 2) /* shutdown from a forced unmount */
#define SHUTDOWN_CORRUPT_INCORE (1u << 3) /* corrupt in-memory structures */
+#define SHUTDOWN_CORRUPT_ONDISK (1u << 4) /* corrupt metadata on device */
#define XFS_SHUTDOWN_STRINGS \
{ SHUTDOWN_META_IO_ERROR, "metadata_io" }, \
diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
new file mode 100644
index 000000000000..69d9c83ea4b2
--- /dev/null
+++ b/fs/xfs/xfs_notify_failure.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Fujitsu. All Rights Reserved.
+ */
+
+#include "xfs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_alloc.h"
+#include "xfs_bit.h"
+#include "xfs_btree.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_rtalloc.h"
+#include "xfs_trans.h"
+#include "xfs_ag.h"
+
+#include <linux/mm.h>
+#include <linux/dax.h>
+
+struct failure_info {
+ xfs_agblock_t startblock;
+ xfs_extlen_t blockcount;
+ int mf_flags;
+};
+
+static pgoff_t
+xfs_failure_pgoff(
+ struct xfs_mount *mp,
+ const struct xfs_rmap_irec *rec,
+ const struct failure_info *notify)
+{
+ loff_t pos = XFS_FSB_TO_B(mp, rec->rm_offset);
+
+ if (notify->startblock > rec->rm_startblock)
+ pos += XFS_FSB_TO_B(mp,
+ notify->startblock - rec->rm_startblock);
+ return pos >> PAGE_SHIFT;
+}
+
+static unsigned long
+xfs_failure_pgcnt(
+ struct xfs_mount *mp,
+ const struct xfs_rmap_irec *rec,
+ const struct failure_info *notify)
+{
+ xfs_agblock_t end_rec;
+ xfs_agblock_t end_notify;
+ xfs_agblock_t start_cross;
+ xfs_agblock_t end_cross;
+
+ start_cross = max(rec->rm_startblock, notify->startblock);
+
+ end_rec = rec->rm_startblock + rec->rm_blockcount;
+ end_notify = notify->startblock + notify->blockcount;
+ end_cross = min(end_rec, end_notify);
+
+ return XFS_FSB_TO_B(mp, end_cross - start_cross) >> PAGE_SHIFT;
+}
+
+static int
+xfs_dax_failure_fn(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *data)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_inode *ip;
+ struct failure_info *notify = data;
+ int error = 0;
+
+ if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) ||
+ (rec->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))) {
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK);
+ return -EFSCORRUPTED;
+ }
+
+ /* Get files that incore, filter out others that are not in use. */
+ error = xfs_iget(mp, cur->bc_tp, rec->rm_owner, XFS_IGET_INCORE,
+ 0, &ip);
+ /* Continue the rmap query if the inode isn't incore */
+ if (error == -ENODATA)
+ return 0;
+ if (error)
+ return error;
+
+ error = mf_dax_kill_procs(VFS_I(ip)->i_mapping,
+ xfs_failure_pgoff(mp, rec, notify),
+ xfs_failure_pgcnt(mp, rec, notify),
+ notify->mf_flags);
+ xfs_irele(ip);
+ return error;
+}
+
+static int
+xfs_dax_notify_ddev_failure(
+ struct xfs_mount *mp,
+ xfs_daddr_t daddr,
+ xfs_daddr_t bblen,
+ int mf_flags)
+{
+ struct xfs_trans *tp = NULL;
+ struct xfs_btree_cur *cur = NULL;
+ struct xfs_buf *agf_bp = NULL;
+ int error = 0;
+ xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, daddr);
+ xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno);
+ xfs_fsblock_t end_fsbno = XFS_DADDR_TO_FSB(mp, daddr + bblen);
+ xfs_agnumber_t end_agno = XFS_FSB_TO_AGNO(mp, end_fsbno);
+
+ error = xfs_trans_alloc_empty(mp, &tp);
+ if (error)
+ return error;
+
+ for (; agno <= end_agno; agno++) {
+ struct xfs_rmap_irec ri_low = { };
+ struct xfs_rmap_irec ri_high;
+ struct failure_info notify;
+ struct xfs_agf *agf;
+ xfs_agblock_t agend;
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp);
+ if (error) {
+ xfs_perag_put(pag);
+ break;
+ }
+
+ cur = xfs_rmapbt_init_cursor(mp, tp, agf_bp, pag);
+
+ /*
+ * Set the rmap range from ri_low to ri_high, which represents
+ * a [start, end] where we looking for the files or metadata.
+ */
+ memset(&ri_high, 0xFF, sizeof(ri_high));
+ ri_low.rm_startblock = XFS_FSB_TO_AGBNO(mp, fsbno);
+ if (agno == end_agno)
+ ri_high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsbno);
+
+ agf = agf_bp->b_addr;
+ agend = min(be32_to_cpu(agf->agf_length),
+ ri_high.rm_startblock);
+ notify.startblock = ri_low.rm_startblock;
+ notify.blockcount = agend - ri_low.rm_startblock;
+
+ error = xfs_rmap_query_range(cur, &ri_low, &ri_high,
+ xfs_dax_failure_fn, &notify);
+ xfs_btree_del_cursor(cur, error);
+ xfs_trans_brelse(tp, agf_bp);
+ xfs_perag_put(pag);
+ if (error)
+ break;
+
+ fsbno = XFS_AGB_TO_FSB(mp, agno + 1, 0);
+ }
+
+ xfs_trans_cancel(tp);
+ return error;
+}
+
+static int
+xfs_dax_notify_failure(
+ struct dax_device *dax_dev,
+ u64 offset,
+ u64 len,
+ int mf_flags)
+{
+ struct xfs_mount *mp = dax_holder(dax_dev);
+ u64 ddev_start;
+ u64 ddev_end;
+
+ if (!(mp->m_sb.sb_flags & SB_BORN)) {
+ xfs_warn(mp, "filesystem is not ready for notify_failure()!");
+ return -EIO;
+ }
+
+ if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) {
+ xfs_warn(mp,
+ "notify_failure() not supported on realtime device!");
+ return -EOPNOTSUPP;
+ }
+
+ if (mp->m_logdev_targp && mp->m_logdev_targp->bt_daxdev == dax_dev &&
+ mp->m_logdev_targp != mp->m_ddev_targp) {
+ xfs_err(mp, "ondisk log corrupt, shutting down fs!");
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK);
+ return -EFSCORRUPTED;
+ }
+
+ if (!xfs_has_rmapbt(mp)) {
+ xfs_warn(mp, "notify_failure() needs rmapbt enabled!");
+ return -EOPNOTSUPP;
+ }
+
+ ddev_start = mp->m_ddev_targp->bt_dax_part_off;
+ ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1;
+
+ /* Ignore the range out of filesystem area */
+ if (offset + len < ddev_start)
+ return -ENXIO;
+ if (offset > ddev_end)
+ return -ENXIO;
+
+ /* Calculate the real range when it touches the boundary */
+ if (offset > ddev_start)
+ offset -= ddev_start;
+ else {
+ len -= ddev_start - offset;
+ offset = 0;
+ }
+ if (offset + len > ddev_end)
+ len -= ddev_end - offset;
+
+ return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len),
+ mf_flags);
+}
+
+const struct dax_holder_operations xfs_dax_holder_operations = {
+ .notify_failure = xfs_dax_notify_failure,
+};
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index abf08bbf34a9..18bb4ec4d7c9 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -677,7 +677,8 @@ xfs_qm_init_quotainfo(
qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
- error = register_shrinker(&qinf->qi_shrinker);
+ error = register_shrinker(&qinf->qi_shrinker, "xfs-qm:%s",
+ mp->m_super->s_id);
if (error)
goto out_free_inos;
@@ -1154,7 +1155,7 @@ xfs_qm_dqusage_adjust(
ASSERT(ip->i_delayed_blks == 0);
if (XFS_IS_REALTIME_INODE(ip)) {
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
if (error)
@@ -1229,10 +1230,14 @@ xfs_qm_flush_one(
*/
if (!xfs_dqflock_nowait(dqp)) {
/* buf is pinned in-core by delwri list */
- bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
- mp->m_quotainfo->qi_dqchunklen, 0);
- if (!bp) {
- error = -EINVAL;
+ error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
+ mp->m_quotainfo->qi_dqchunklen, 0, &bp);
+ if (error)
+ goto out_unlock;
+
+ if (!(bp->b_flags & _XBF_DELWRI_Q)) {
+ error = -EAGAIN;
+ xfs_buf_relse(bp);
goto out_unlock;
}
xfs_buf_unlock(bp);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index e7a7c00d93be..251f20ddd368 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -125,11 +125,10 @@
* shared blocks. If there are no shared extents, fbno and flen will
* be set to NULLAGBLOCK and 0, respectively.
*/
-int
+static int
xfs_reflink_find_shared(
- struct xfs_mount *mp,
+ struct xfs_perag *pag,
struct xfs_trans *tp,
- xfs_agnumber_t agno,
xfs_agblock_t agbno,
xfs_extlen_t aglen,
xfs_agblock_t *fbno,
@@ -140,11 +139,11 @@ xfs_reflink_find_shared(
struct xfs_btree_cur *cur;
int error;
- error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
if (error)
return error;
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agbp->b_pag);
+ cur = xfs_refcountbt_init_cursor(pag->pag_mount, tp, agbp, pag);
error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
find_end_of_shared);
@@ -171,7 +170,8 @@ xfs_reflink_trim_around_shared(
struct xfs_bmbt_irec *irec,
bool *shared)
{
- xfs_agnumber_t agno;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_perag *pag;
xfs_agblock_t agbno;
xfs_extlen_t aglen;
xfs_agblock_t fbno;
@@ -186,12 +186,13 @@ xfs_reflink_trim_around_shared(
trace_xfs_reflink_trim_around_shared(ip, irec);
- agno = XFS_FSB_TO_AGNO(ip->i_mount, irec->br_startblock);
- agbno = XFS_FSB_TO_AGBNO(ip->i_mount, irec->br_startblock);
+ pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, irec->br_startblock));
+ agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
aglen = irec->br_blockcount;
- error = xfs_reflink_find_shared(ip->i_mount, NULL, agno, agbno,
- aglen, &fbno, &flen, true);
+ error = xfs_reflink_find_shared(pag, NULL, agbno, aglen, &fbno, &flen,
+ true);
+ xfs_perag_put(pag);
if (error)
return error;
@@ -340,9 +341,41 @@ xfs_find_trim_cow_extent(
return 0;
}
-/* Allocate all CoW reservations covering a range of blocks in a file. */
-int
-xfs_reflink_allocate_cow(
+static int
+xfs_reflink_convert_unwritten(
+ struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap,
+ struct xfs_bmbt_irec *cmap,
+ bool convert_now)
+{
+ xfs_fileoff_t offset_fsb = imap->br_startoff;
+ xfs_filblks_t count_fsb = imap->br_blockcount;
+ int error;
+
+ /*
+ * cmap might larger than imap due to cowextsize hint.
+ */
+ xfs_trim_extent(cmap, offset_fsb, count_fsb);
+
+ /*
+ * COW fork extents are supposed to remain unwritten until we're ready
+ * to initiate a disk write. For direct I/O we are going to write the
+ * data and need the conversion, but for buffered writes we're done.
+ */
+ if (!convert_now || cmap->br_state == XFS_EXT_NORM)
+ return 0;
+
+ trace_xfs_reflink_convert_cow(ip, cmap);
+
+ error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
+ if (!error)
+ cmap->br_state = XFS_EXT_NORM;
+
+ return error;
+}
+
+static int
+xfs_reflink_fill_cow_hole(
struct xfs_inode *ip,
struct xfs_bmbt_irec *imap,
struct xfs_bmbt_irec *cmap,
@@ -351,25 +384,12 @@ xfs_reflink_allocate_cow(
bool convert_now)
{
struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb = imap->br_startoff;
- xfs_filblks_t count_fsb = imap->br_blockcount;
struct xfs_trans *tp;
- int nimaps, error = 0;
- bool found;
xfs_filblks_t resaligned;
- xfs_extlen_t resblks = 0;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- if (!ip->i_cowfp) {
- ASSERT(!xfs_is_reflink_inode(ip));
- xfs_ifork_init_cow(ip);
- }
-
- error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
- if (error || !*shared)
- return error;
- if (found)
- goto convert;
+ xfs_extlen_t resblks;
+ int nimaps;
+ int error;
+ bool found;
resaligned = xfs_aligned_fsb_count(imap->br_startoff,
imap->br_blockcount, xfs_get_cowextsz_hint(ip));
@@ -385,17 +405,17 @@ xfs_reflink_allocate_cow(
*lockmode = XFS_ILOCK_EXCL;
- /*
- * Check for an overlapping extent again now that we dropped the ilock.
- */
error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
if (error || !*shared)
goto out_trans_cancel;
+
if (found) {
xfs_trans_cancel(tp);
goto convert;
}
+ ASSERT(cmap->br_startoff > imap->br_startoff);
+
/* Allocate the entire reservation as unwritten blocks. */
nimaps = 1;
error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
@@ -415,26 +435,135 @@ xfs_reflink_allocate_cow(
*/
if (nimaps == 0)
return -ENOSPC;
+
convert:
- xfs_trim_extent(cmap, offset_fsb, count_fsb);
- /*
- * COW fork extents are supposed to remain unwritten until we're ready
- * to initiate a disk write. For direct I/O we are going to write the
- * data and need the conversion, but for buffered writes we're done.
- */
- if (!convert_now || cmap->br_state == XFS_EXT_NORM)
- return 0;
- trace_xfs_reflink_convert_cow(ip, cmap);
- error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
- if (!error)
- cmap->br_state = XFS_EXT_NORM;
+ return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now);
+
+out_trans_cancel:
+ xfs_trans_cancel(tp);
return error;
+}
+
+static int
+xfs_reflink_fill_delalloc(
+ struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap,
+ struct xfs_bmbt_irec *cmap,
+ bool *shared,
+ uint *lockmode,
+ bool convert_now)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int nimaps;
+ int error;
+ bool found;
+
+ do {
+ xfs_iunlock(ip, *lockmode);
+ *lockmode = 0;
+
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, 0, 0,
+ false, &tp);
+ if (error)
+ return error;
+
+ *lockmode = XFS_ILOCK_EXCL;
+
+ error = xfs_find_trim_cow_extent(ip, imap, cmap, shared,
+ &found);
+ if (error || !*shared)
+ goto out_trans_cancel;
+
+ if (found) {
+ xfs_trans_cancel(tp);
+ break;
+ }
+
+ ASSERT(isnullstartblock(cmap->br_startblock) ||
+ cmap->br_startblock == DELAYSTARTBLOCK);
+
+ /*
+ * Replace delalloc reservation with an unwritten extent.
+ */
+ nimaps = 1;
+ error = xfs_bmapi_write(tp, ip, cmap->br_startoff,
+ cmap->br_blockcount,
+ XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0,
+ cmap, &nimaps);
+ if (error)
+ goto out_trans_cancel;
+
+ xfs_inode_set_cowblocks_tag(ip);
+ error = xfs_trans_commit(tp);
+ if (error)
+ return error;
+
+ /*
+ * Allocation succeeded but the requested range was not even
+ * partially satisfied? Bail out!
+ */
+ if (nimaps == 0)
+ return -ENOSPC;
+ } while (cmap->br_startoff + cmap->br_blockcount <= imap->br_startoff);
+
+ return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now);
out_trans_cancel:
xfs_trans_cancel(tp);
return error;
}
+/* Allocate all CoW reservations covering a range of blocks in a file. */
+int
+xfs_reflink_allocate_cow(
+ struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap,
+ struct xfs_bmbt_irec *cmap,
+ bool *shared,
+ uint *lockmode,
+ bool convert_now)
+{
+ int error;
+ bool found;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ if (!ip->i_cowfp) {
+ ASSERT(!xfs_is_reflink_inode(ip));
+ xfs_ifork_init_cow(ip);
+ }
+
+ error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
+ if (error || !*shared)
+ return error;
+
+ /* CoW fork has a real extent */
+ if (found)
+ return xfs_reflink_convert_unwritten(ip, imap, cmap,
+ convert_now);
+
+ /*
+ * CoW fork does not have an extent and data extent is shared.
+ * Allocate a real extent in the CoW fork.
+ */
+ if (cmap->br_startoff > imap->br_startoff)
+ return xfs_reflink_fill_cow_hole(ip, imap, cmap, shared,
+ lockmode, convert_now);
+
+ /*
+ * CoW fork has a delalloc reservation. Replace it with a real extent.
+ * There may or may not be a data fork mapping.
+ */
+ if (isnullstartblock(cmap->br_startblock) ||
+ cmap->br_startblock == DELAYSTARTBLOCK)
+ return xfs_reflink_fill_delalloc(ip, imap, cmap, shared,
+ lockmode, convert_now);
+
+ /* Shouldn't get here. */
+ ASSERT(0);
+ return -EFSCORRUPTED;
+}
+
/*
* Cancel CoW reservations for some block range of an inode.
*
@@ -452,7 +581,7 @@ xfs_reflink_cancel_cow_blocks(
xfs_fileoff_t end_fsb,
bool cancel_real)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got, del;
struct xfs_iext_cursor icur;
int error = 0;
@@ -593,7 +722,7 @@ xfs_reflink_end_cow_extent(
struct xfs_bmbt_irec got, del, data;
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
unsigned int resblks;
int nmaps;
int error;
@@ -1363,12 +1492,16 @@ xfs_reflink_remap_prep(
if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
goto out_unlock;
- /* Don't share DAX file data for now. */
- if (IS_DAX(inode_in) || IS_DAX(inode_out))
+ /* Don't share DAX file data with non-DAX file. */
+ if (IS_DAX(inode_in) != IS_DAX(inode_out))
goto out_unlock;
- ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
- len, remap_flags);
+ if (!IS_DAX(inode_in))
+ ret = generic_remap_file_range_prep(file_in, pos_in, file_out,
+ pos_out, len, remap_flags);
+ else
+ ret = dax_remap_file_range_prep(file_in, pos_in, file_out,
+ pos_out, len, remap_flags, &xfs_read_iomap_ops);
if (ret || *len == 0)
goto out_unlock;
@@ -1420,16 +1553,11 @@ xfs_reflink_inode_has_shared_extents(
struct xfs_bmbt_irec got;
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_extlen_t aglen;
- xfs_agblock_t rbno;
- xfs_extlen_t rlen;
struct xfs_iext_cursor icur;
bool found;
int error;
- ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
if (error)
return error;
@@ -1437,17 +1565,25 @@ xfs_reflink_inode_has_shared_extents(
*has_shared = false;
found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got);
while (found) {
+ struct xfs_perag *pag;
+ xfs_agblock_t agbno;
+ xfs_extlen_t aglen;
+ xfs_agblock_t rbno;
+ xfs_extlen_t rlen;
+
if (isnullstartblock(got.br_startblock) ||
got.br_state != XFS_EXT_NORM)
goto next;
- agno = XFS_FSB_TO_AGNO(mp, got.br_startblock);
+
+ pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, got.br_startblock));
agbno = XFS_FSB_TO_AGBNO(mp, got.br_startblock);
aglen = got.br_blockcount;
-
- error = xfs_reflink_find_shared(mp, tp, agno, agbno, aglen,
+ error = xfs_reflink_find_shared(pag, tp, agbno, aglen,
&rbno, &rlen, false);
+ xfs_perag_put(pag);
if (error)
return error;
+
/* Is there still a shared block here? */
if (rbno != NULLAGBLOCK) {
*has_shared = true;
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index bea65f2fe657..65c5dfe17ecf 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -16,9 +16,6 @@ static inline bool xfs_is_cow_inode(struct xfs_inode *ip)
return xfs_is_reflink_inode(ip) || xfs_is_always_cow_inode(ip);
}
-extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_agnumber_t agno, xfs_agblock_t agbno, xfs_extlen_t aglen,
- xfs_agblock_t *fbno, xfs_extlen_t *flen, bool find_maximal);
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
struct xfs_bmbt_irec *irec, bool *shared);
int xfs_bmap_trim_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index aa977c7ea370..9ac59814bbb6 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -40,6 +40,7 @@
#include "xfs_defer.h"
#include "xfs_attr_item.h"
#include "xfs_xattr.h"
+#include "xfs_iunlink_item.h"
#include <linux/magic.h>
#include <linux/fs_context.h>
@@ -350,8 +351,10 @@ xfs_setup_dax_always(
goto disable_dax;
}
- if (xfs_has_reflink(mp)) {
- xfs_alert(mp, "DAX and reflink cannot be used together!");
+ if (xfs_has_reflink(mp) &&
+ bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
+ xfs_alert(mp,
+ "DAX and reflink cannot work with multi-partitions!");
return -EINVAL;
}
@@ -1966,11 +1969,19 @@ xfs_init_caches(void)
{
int error;
+ xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_MEM_SPREAD,
+ NULL);
+ if (!xfs_buf_cache)
+ goto out;
+
xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
sizeof(struct xlog_ticket),
0, 0, NULL);
if (!xfs_log_ticket_cache)
- goto out;
+ goto out_destroy_buf_cache;
error = xfs_btree_init_cur_caches();
if (error)
@@ -2096,8 +2107,16 @@ xfs_init_caches(void)
if (!xfs_attri_cache)
goto out_destroy_attrd_cache;
+ xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
+ sizeof(struct xfs_iunlink_item),
+ 0, 0, NULL);
+ if (!xfs_iunlink_cache)
+ goto out_destroy_attri_cache;
+
return 0;
+ out_destroy_attri_cache:
+ kmem_cache_destroy(xfs_attri_cache);
out_destroy_attrd_cache:
kmem_cache_destroy(xfs_attrd_cache);
out_destroy_bui_cache:
@@ -2136,6 +2155,8 @@ xfs_init_caches(void)
xfs_btree_destroy_cur_caches();
out_destroy_log_ticket_cache:
kmem_cache_destroy(xfs_log_ticket_cache);
+ out_destroy_buf_cache:
+ kmem_cache_destroy(xfs_buf_cache);
out:
return -ENOMEM;
}
@@ -2148,6 +2169,7 @@ xfs_destroy_caches(void)
* destroy caches.
*/
rcu_barrier();
+ kmem_cache_destroy(xfs_iunlink_cache);
kmem_cache_destroy(xfs_attri_cache);
kmem_cache_destroy(xfs_attrd_cache);
kmem_cache_destroy(xfs_bui_cache);
@@ -2168,6 +2190,7 @@ xfs_destroy_caches(void)
xfs_defer_destroy_item_caches();
xfs_btree_destroy_cur_caches();
kmem_cache_destroy(xfs_log_ticket_cache);
+ kmem_cache_destroy(xfs_buf_cache);
}
STATIC int __init
@@ -2213,6 +2236,7 @@ xfs_cpu_dead(
list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
spin_unlock(&xfs_mount_list_lock);
xfs_inodegc_cpu_dead(mp, cpu);
+ xlog_cil_pcp_dead(mp->m_log, cpu);
spin_lock(&xfs_mount_list_lock);
}
spin_unlock(&xfs_mount_list_lock);
@@ -2272,13 +2296,9 @@ init_xfs_fs(void)
if (error)
goto out_destroy_wq;
- error = xfs_buf_init();
- if (error)
- goto out_mru_cache_uninit;
-
error = xfs_init_procfs();
if (error)
- goto out_buf_terminate;
+ goto out_mru_cache_uninit;
error = xfs_sysctl_register();
if (error)
@@ -2335,8 +2355,6 @@ init_xfs_fs(void)
xfs_sysctl_unregister();
out_cleanup_procfs:
xfs_cleanup_procfs();
- out_buf_terminate:
- xfs_buf_terminate();
out_mru_cache_uninit:
xfs_mru_cache_uninit();
out_destroy_wq:
@@ -2362,7 +2380,6 @@ exit_xfs_fs(void)
kset_unregister(xfs_kset);
xfs_sysctl_unregister();
xfs_cleanup_procfs();
- xfs_buf_terminate();
xfs_mru_cache_uninit();
xfs_destroy_workqueues();
xfs_destroy_caches();
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index 3cd5a51bace1..364e2c2648a8 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -92,6 +92,7 @@ extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *,
extern const struct export_operations xfs_export_operations;
extern const struct quotactl_ops xfs_quotactl_operations;
+extern const struct dax_holder_operations xfs_dax_holder_operations;
extern void xfs_reinit_percpu_counters(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 4145ba872547..8389f3ef88ef 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -256,7 +256,7 @@ xfs_symlink(
/*
* If the symlink will fit into the inode, write it inline.
*/
- if (pathlen <= XFS_IFORK_DSIZE(ip)) {
+ if (pathlen <= xfs_inode_data_fork_size(ip)) {
xfs_init_local_fork(ip, XFS_DATA_FORK, target_path, pathlen);
ip->i_disk_size = pathlen;
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 0fa1b7a2918c..f9057af6e0c8 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -2171,7 +2171,7 @@ DECLARE_EVENT_CLASS(xfs_swap_extent_class,
__entry->format = ip->i_df.if_format;
__entry->nex = ip->i_df.if_nextents;
__entry->broot_size = ip->i_df.if_broot_bytes;
- __entry->fork_off = XFS_IFORK_BOFF(ip);
+ __entry->fork_off = xfs_inode_fork_boff(ip);
),
TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %llu, "
"broot size %d, forkoff 0x%x",
@@ -3672,7 +3672,6 @@ DEFINE_EVENT(xfs_ag_inode_class, name, \
TP_ARGS(ip))
DEFINE_AGINODE_EVENT(xfs_iunlink);
DEFINE_AGINODE_EVENT(xfs_iunlink_remove);
-DEFINE_AG_EVENT(xfs_iunlink_map_prev_fallback);
DECLARE_EVENT_CLASS(xfs_fs_corrupt_class,
TP_PROTO(struct xfs_mount *mp, unsigned int flags),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 82cf0189c0db..7bd16fbff534 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -760,7 +760,7 @@ xfs_log_item_batch_insert(
void
xfs_trans_committed_bulk(
struct xfs_ail *ailp,
- struct xfs_log_vec *log_vector,
+ struct list_head *lv_chain,
xfs_lsn_t commit_lsn,
bool aborted)
{
@@ -775,7 +775,7 @@ xfs_trans_committed_bulk(
spin_unlock(&ailp->ail_lock);
/* unpin all the log items */
- for (lv = log_vector; lv; lv = lv->lv_next ) {
+ list_for_each_entry(lv, lv_chain, lv_list) {
struct xfs_log_item *lip = lv->lv_item;
xfs_lsn_t item_lsn;
@@ -845,6 +845,90 @@ xfs_trans_committed_bulk(
}
/*
+ * Sort transaction items prior to running precommit operations. This will
+ * attempt to order the items such that they will always be locked in the same
+ * order. Items that have no sort function are moved to the end of the list
+ * and so are locked last.
+ *
+ * This may need refinement as different types of objects add sort functions.
+ *
+ * Function is more complex than it needs to be because we are comparing 64 bit
+ * values and the function only returns 32 bit values.
+ */
+static int
+xfs_trans_precommit_sort(
+ void *unused_arg,
+ const struct list_head *a,
+ const struct list_head *b)
+{
+ struct xfs_log_item *lia = container_of(a,
+ struct xfs_log_item, li_trans);
+ struct xfs_log_item *lib = container_of(b,
+ struct xfs_log_item, li_trans);
+ int64_t diff;
+
+ /*
+ * If both items are non-sortable, leave them alone. If only one is
+ * sortable, move the non-sortable item towards the end of the list.
+ */
+ if (!lia->li_ops->iop_sort && !lib->li_ops->iop_sort)
+ return 0;
+ if (!lia->li_ops->iop_sort)
+ return 1;
+ if (!lib->li_ops->iop_sort)
+ return -1;
+
+ diff = lia->li_ops->iop_sort(lia) - lib->li_ops->iop_sort(lib);
+ if (diff < 0)
+ return -1;
+ if (diff > 0)
+ return 1;
+ return 0;
+}
+
+/*
+ * Run transaction precommit functions.
+ *
+ * If there is an error in any of the callouts, then stop immediately and
+ * trigger a shutdown to abort the transaction. There is no recovery possible
+ * from errors at this point as the transaction is dirty....
+ */
+static int
+xfs_trans_run_precommits(
+ struct xfs_trans *tp)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_log_item *lip, *n;
+ int error = 0;
+
+ /*
+ * Sort the item list to avoid ABBA deadlocks with other transactions
+ * running precommit operations that lock multiple shared items such as
+ * inode cluster buffers.
+ */
+ list_sort(NULL, &tp->t_items, xfs_trans_precommit_sort);
+
+ /*
+ * Precommit operations can remove the log item from the transaction
+ * if the log item exists purely to delay modifications until they
+ * can be ordered against other operations. Hence we have to use
+ * list_for_each_entry_safe() here.
+ */
+ list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) {
+ if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
+ continue;
+ if (lip->li_ops->iop_precommit) {
+ error = lip->li_ops->iop_precommit(tp, lip);
+ if (error)
+ break;
+ }
+ }
+ if (error)
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return error;
+}
+
+/*
* Commit the given transaction to the log.
*
* XFS disk error handling mechanism is not based on a typical
@@ -869,6 +953,13 @@ __xfs_trans_commit(
trace_xfs_trans_commit(tp, _RET_IP_);
+ error = xfs_trans_run_precommits(tp);
+ if (error) {
+ if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
+ xfs_defer_cancel(tp);
+ goto out_unreserve;
+ }
+
/*
* Finish deferred items on final commit. Only permanent transactions
* should ever have deferred ops.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 9561f193e7e1..55819785941c 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -45,6 +45,7 @@ struct xfs_log_item {
struct xfs_log_vec *li_lv; /* active log vector */
struct xfs_log_vec *li_lv_shadow; /* standby vector */
xfs_csn_t li_seq; /* CIL commit seq */
+ uint32_t li_order_id; /* CIL commit order */
};
/*
@@ -71,10 +72,12 @@ struct xfs_item_ops {
void (*iop_format)(struct xfs_log_item *, struct xfs_log_vec *);
void (*iop_pin)(struct xfs_log_item *);
void (*iop_unpin)(struct xfs_log_item *, int remove);
- uint (*iop_push)(struct xfs_log_item *, struct list_head *);
+ uint64_t (*iop_sort)(struct xfs_log_item *lip);
+ int (*iop_precommit)(struct xfs_trans *tp, struct xfs_log_item *lip);
void (*iop_committing)(struct xfs_log_item *lip, xfs_csn_t seq);
- void (*iop_release)(struct xfs_log_item *);
xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t);
+ uint (*iop_push)(struct xfs_log_item *, struct list_head *);
+ void (*iop_release)(struct xfs_log_item *);
int (*iop_recover)(struct xfs_log_item *lip,
struct list_head *capture_list);
bool (*iop_match)(struct xfs_log_item *item, uint64_t id);
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index f0d79a9050ba..d5400150358e 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -19,7 +19,8 @@ void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
void xfs_trans_del_item(struct xfs_log_item *);
void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
-void xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv,
+void xfs_trans_committed_bulk(struct xfs_ail *ailp,
+ struct list_head *lv_chain,
xfs_lsn_t commit_lsn, bool aborted);
/*
* AIL traversal cursor.
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 511bb9fa3750..860f0b1032c6 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -231,13 +231,6 @@ static const struct iomap_writeback_ops zonefs_writeback_ops = {
.map_blocks = zonefs_write_map_blocks,
};
-static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct iomap_writepage_ctx wpc = { };
-
- return iomap_writepage(page, wbc, &wpc, &zonefs_writeback_ops);
-}
-
static int zonefs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
@@ -265,7 +258,6 @@ static int zonefs_swap_activate(struct swap_info_struct *sis,
static const struct address_space_operations zonefs_file_aops = {
.read_folio = zonefs_read_folio,
.readahead = zonefs_readahead,
- .writepage = zonefs_writepage,
.writepages = zonefs_writepages,
.dirty_folio = filemap_dirty_folio,
.release_folio = iomap_release_folio,
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 48f0fd499274..e7d27373ff71 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -344,8 +344,9 @@ struct acpi_device_physical_node {
struct acpi_device_properties {
const guid_t *guid;
- const union acpi_object *properties;
+ union acpi_object *properties;
struct list_head list;
+ void **bufs;
};
/* ACPI Device Specific Data (_DSD) */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 3096f086b5a3..71ab4ba9c25d 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -39,9 +39,6 @@ arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (READ_ONCE(*p) & mask)
- return 1;
-
old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask);
}
@@ -53,9 +50,6 @@ arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (!(READ_ONCE(*p) & mask))
- return 0;
-
old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask);
}
diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h
new file mode 100644
index 000000000000..564a8c675d85
--- /dev/null
+++ b/include/asm-generic/bitops/generic-non-atomic.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+
+#include <linux/bits.h>
+#include <asm/barrier.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+/*
+ * Generic definitions for bit operations, should not be used in regular code
+ * directly.
+ */
+
+/**
+ * generic___set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+static __always_inline void
+generic___clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * generic___change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * generic___test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic___test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static __always_inline bool
+generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ /*
+ * Unlike the bitops with the '__' prefix above, this one *is* atomic,
+ * so `volatile` must always stay here with no cast-aways. See
+ * `Documentation/atomic_bitops.txt` for the details.
+ */
+ return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+/**
+ * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
+}
+
+/*
+ * const_*() definitions provide good compile-time optimizations when
+ * the passed arguments can be resolved at compile time.
+ */
+#define const___set_bit generic___set_bit
+#define const___clear_bit generic___clear_bit
+#define const___change_bit generic___change_bit
+#define const___test_and_set_bit generic___test_and_set_bit
+#define const___test_and_clear_bit generic___test_and_clear_bit
+#define const___test_and_change_bit generic___test_and_change_bit
+#define const_test_bit_acquire generic_test_bit_acquire
+
+/**
+ * const_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ *
+ * A version of generic_test_bit() which discards the `volatile` qualifier to
+ * allow a compiler to optimize code harder. Non-atomic and to be called only
+ * for testing compile-time constants, e.g. by the corresponding macros, not
+ * directly from "regular" code.
+ */
+static __always_inline bool
+const_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr);
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long val = *p;
+
+ return !!(val & mask);
+}
+
+#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 7ab1ecc37782..2b238b161a62 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -14,7 +14,7 @@
#include <linux/instrumented.h>
/**
- * __set_bit - Set a bit in memory
+ * ___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -22,14 +22,15 @@
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___set_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
/**
- * __clear_bit - Clears a bit in memory
+ * ___clear_bit - Clears a bit in memory
* @nr: the bit to clear
* @addr: the address to start counting from
*
@@ -37,14 +38,15 @@ static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
/**
- * __change_bit - Toggle a bit in memory
+ * ___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
@@ -52,7 +54,8 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___change_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
@@ -83,56 +86,72 @@ static __always_inline void __instrument_read_write_bitop(long nr, volatile unsi
}
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * ___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr);
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * ___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr);
}
/**
- * __test_and_change_bit - Change a bit and return its old value
+ * ___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr);
}
/**
- * test_bit - Determine whether a bit is set
+ * _test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static __always_inline bool test_bit(long nr, const volatile unsigned long *addr)
+static __always_inline bool
+_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
+/**
+ * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_bit_acquire(nr, addr);
+}
+
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
index 078cc68be2f1..71f8d54a5195 100644
--- a/include/asm-generic/bitops/non-atomic.h
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -2,121 +2,19 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
-#include <asm/types.h>
+#include <asm-generic/bitops/generic-non-atomic.h>
-/**
- * arch___set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __always_inline void
-arch___set_bit(unsigned int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+#define arch___set_bit generic___set_bit
+#define arch___clear_bit generic___clear_bit
+#define arch___change_bit generic___change_bit
- *p |= mask;
-}
-#define __set_bit arch___set_bit
+#define arch___test_and_set_bit generic___test_and_set_bit
+#define arch___test_and_clear_bit generic___test_and_clear_bit
+#define arch___test_and_change_bit generic___test_and_change_bit
-static __always_inline void
-arch___clear_bit(unsigned int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
- *p &= ~mask;
-}
-#define __clear_bit arch___clear_bit
-
-/**
- * arch___change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __always_inline
-void arch___change_bit(unsigned int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-
- *p ^= mask;
-}
-#define __change_bit arch___change_bit
-
-/**
- * arch___test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __always_inline int
-arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old | mask;
- return (old & mask) != 0;
-}
-#define __test_and_set_bit arch___test_and_set_bit
-
-/**
- * arch___test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __always_inline int
-arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old & ~mask;
- return (old & mask) != 0;
-}
-#define __test_and_clear_bit arch___test_and_clear_bit
-
-/* WARNING: non atomic and it can be reordered! */
-static __always_inline int
-arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old ^ mask;
- return (old & mask) != 0;
-}
-#define __test_and_change_bit arch___test_and_change_bit
-
-/**
- * arch_test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static __always_inline int
-arch_test_bit(unsigned int nr, const volatile unsigned long *addr)
-{
- return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
-}
-#define test_bit arch_test_bit
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/non-instrumented-non-atomic.h b/include/asm-generic/bitops/non-instrumented-non-atomic.h
new file mode 100644
index 000000000000..0ddc78dfc358
--- /dev/null
+++ b/include/asm-generic/bitops/non-instrumented-non-atomic.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+
+#define ___set_bit arch___set_bit
+#define ___clear_bit arch___clear_bit
+#define ___change_bit arch___change_bit
+
+#define ___test_and_set_bit arch___test_and_set_bit
+#define ___test_and_clear_bit arch___test_and_clear_bit
+#define ___test_and_change_bit arch___test_and_change_bit
+
+#define _test_bit arch_test_bit
+#define _test_bit_acquire arch_test_bit_acquire
+
+#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 72974cb81343..a68f8fbf423b 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -10,6 +10,7 @@
#include <asm/page.h> /* I/O is all done through memory accesses */
#include <linux/string.h> /* for memset() and memcpy() */
#include <linux/types.h>
+#include <linux/instruction_pointer.h>
#ifdef CONFIG_GENERIC_IOMAP
#include <asm-generic/iomap.h>
@@ -61,6 +62,44 @@
#define __io_par(v) __io_ar(v)
#endif
+/*
+ * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for
+ * specific kernel drivers in case of excessive/unwanted logging.
+ *
+ * Usage: Add a #define flag at the beginning of the driver file.
+ * Ex: #define __DISABLE_TRACE_MMIO__
+ * #include <...>
+ * ...
+ */
+#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
+#include <linux/tracepoint-defs.h>
+
+DECLARE_TRACEPOINT(rwmmio_write);
+DECLARE_TRACEPOINT(rwmmio_post_write);
+DECLARE_TRACEPOINT(rwmmio_read);
+DECLARE_TRACEPOINT(rwmmio_post_read);
+
+void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr);
+
+#else
+
+static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+
+#endif /* CONFIG_TRACE_MMIO_ACCESS */
/*
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
@@ -149,9 +188,11 @@ static inline u8 readb(const volatile void __iomem *addr)
{
u8 val;
+ log_read_mmio(8, addr, _THIS_IP_);
__io_br();
val = __raw_readb(addr);
__io_ar(val);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_);
return val;
}
#endif
@@ -162,9 +203,11 @@ static inline u16 readw(const volatile void __iomem *addr)
{
u16 val;
+ log_read_mmio(16, addr, _THIS_IP_);
__io_br();
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
__io_ar(val);
+ log_post_read_mmio(val, 16, addr, _THIS_IP_);
return val;
}
#endif
@@ -175,9 +218,11 @@ static inline u32 readl(const volatile void __iomem *addr)
{
u32 val;
+ log_read_mmio(32, addr, _THIS_IP_);
__io_br();
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
__io_ar(val);
+ log_post_read_mmio(val, 32, addr, _THIS_IP_);
return val;
}
#endif
@@ -189,9 +234,11 @@ static inline u64 readq(const volatile void __iomem *addr)
{
u64 val;
+ log_read_mmio(64, addr, _THIS_IP_);
__io_br();
val = __le64_to_cpu(__raw_readq(addr));
__io_ar(val);
+ log_post_read_mmio(val, 64, addr, _THIS_IP_);
return val;
}
#endif
@@ -201,9 +248,11 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 8, addr, _THIS_IP_);
__io_bw();
__raw_writeb(value, addr);
__io_aw();
+ log_post_write_mmio(value, 8, addr, _THIS_IP_);
}
#endif
@@ -211,9 +260,11 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 16, addr, _THIS_IP_);
__io_bw();
__raw_writew((u16 __force)cpu_to_le16(value), addr);
__io_aw();
+ log_post_write_mmio(value, 16, addr, _THIS_IP_);
}
#endif
@@ -221,9 +272,11 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 32, addr, _THIS_IP_);
__io_bw();
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
__io_aw();
+ log_post_write_mmio(value, 32, addr, _THIS_IP_);
}
#endif
@@ -232,9 +285,11 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 64, addr, _THIS_IP_);
__io_bw();
__raw_writeq(__cpu_to_le64(value), addr);
__io_aw();
+ log_post_write_mmio(value, 64, addr, _THIS_IP_);
}
#endif
#endif /* CONFIG_64BIT */
@@ -248,7 +303,12 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readb_relaxed readb_relaxed
static inline u8 readb_relaxed(const volatile void __iomem *addr)
{
- return __raw_readb(addr);
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_);
+ val = __raw_readb(addr);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -256,7 +316,12 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
#define readw_relaxed readw_relaxed
static inline u16 readw_relaxed(const volatile void __iomem *addr)
{
- return __le16_to_cpu(__raw_readw(addr));
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_);
+ val = __le16_to_cpu(__raw_readw(addr));
+ log_post_read_mmio(val, 16, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -264,7 +329,12 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
#define readl_relaxed readl_relaxed
static inline u32 readl_relaxed(const volatile void __iomem *addr)
{
- return __le32_to_cpu(__raw_readl(addr));
+ u32 val;
+
+ log_read_mmio(32, addr, _THIS_IP_);
+ val = __le32_to_cpu(__raw_readl(addr));
+ log_post_read_mmio(val, 32, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -272,7 +342,12 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
#define readq_relaxed readq_relaxed
static inline u64 readq_relaxed(const volatile void __iomem *addr)
{
- return __le64_to_cpu(__raw_readq(addr));
+ u64 val;
+
+ log_read_mmio(64, addr, _THIS_IP_);
+ val = __le64_to_cpu(__raw_readq(addr));
+ log_post_read_mmio(val, 64, addr, _THIS_IP_);
+ return val;
}
#endif
@@ -280,7 +355,9 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
#define writeb_relaxed writeb_relaxed
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 8, addr, _THIS_IP_);
__raw_writeb(value, addr);
+ log_post_write_mmio(value, 8, addr, _THIS_IP_);
}
#endif
@@ -288,7 +365,9 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
#define writew_relaxed writew_relaxed
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 16, addr, _THIS_IP_);
__raw_writew(cpu_to_le16(value), addr);
+ log_post_write_mmio(value, 16, addr, _THIS_IP_);
}
#endif
@@ -296,7 +375,9 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
#define writel_relaxed writel_relaxed
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 32, addr, _THIS_IP_);
__raw_writel(__cpu_to_le32(value), addr);
+ log_post_write_mmio(value, 32, addr, _THIS_IP_);
}
#endif
@@ -304,7 +385,9 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
#define writeq_relaxed writeq_relaxed
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
{
+ log_write_mmio(value, 64, addr, _THIS_IP_);
__raw_writeq(__cpu_to_le64(value), addr);
+ log_post_write_mmio(value, 64, addr, _THIS_IP_);
}
#endif
@@ -1086,20 +1169,6 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
}
#endif
-#ifdef CONFIG_VIRT_TO_BUS
-#ifndef virt_to_bus
-static inline unsigned long virt_to_bus(void *address)
-{
- return (unsigned long)address;
-}
-
-static inline void *bus_to_virt(unsigned long address)
-{
- return (void *)address;
-}
-#endif
-#endif
-
#ifndef memset_io
#define memset_io memset_io
/**
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index 6bb3cd3d695a..6869f1061528 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -1,17 +1,30 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/include/asm-generic/pci.h
- *
- * Copyright (C) 2003 Russell King
- */
-#ifndef _ASM_GENERIC_PCI_H
-#define _ASM_GENERIC_PCI_H
+/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+#ifndef __ASM_GENERIC_PCI_H
+#define __ASM_GENERIC_PCI_H
+
+#ifndef PCIBIOS_MIN_IO
+#define PCIBIOS_MIN_IO 0
+#endif
+
+#ifndef PCIBIOS_MIN_MEM
+#define PCIBIOS_MIN_MEM 0
+#endif
+
+#ifndef pcibios_assign_all_busses
+/* For bootloaders that do not initialize the PCI bus */
+#define pcibios_assign_all_busses() 1
+#endif
+
+/* Enable generic resource mapping code in drivers/pci/ */
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_proc_domain(struct pci_bus *bus)
{
- return channel ? 15 : 14;
+ /* always show the domain in /proc */
+ return 1;
}
-#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
+#endif /* CONFIG_PCI_DOMAINS */
-#endif /* _ASM_GENERIC_PCI_H */
+#endif /* __ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index 5a2f9bf53384..8fbb0a55545d 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -25,6 +25,8 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
unsigned int nr);
+#elif !defined(CONFIG_HAS_IOPORT_MAP)
+#define __pci_ioport_map(dev, port, nr) NULL
#else
#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
#endif
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d0f7bdd2fdf2..db13bb620f52 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -97,7 +97,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
/**
* memory_intersects - checks if the region occupied by an object intersects
* with another memory region
- * @begin: virtual address of the beginning of the memory regien
+ * @begin: virtual address of the beginning of the memory region
* @end: virtual address of the end of the memory region
* @virt: virtual address of the memory object
* @size: size of the memory object
@@ -110,7 +110,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
{
void *vend = virt + size;
- return (virt >= begin && virt < end) || (vend >= begin && vend < end);
+ if (virt < end && vend > begin)
+ return true;
+
+ return false;
}
/**
diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h
index eceeecf6a5bd..d3e2d81656e0 100644
--- a/include/asm-generic/softirq_stack.h
+++ b/include/asm-generic/softirq_stack.h
@@ -2,7 +2,7 @@
#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
#define __ASM_GENERIC_SOFTIRQ_STACK_H
-#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
+#if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT)
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index f140e4643949..f5841992dc9b 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -718,6 +718,8 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask);
+int crypto_has_shash(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
{
return &tfm->base;
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index cccceadc164b..24d01e9877c1 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -104,6 +104,8 @@ struct kpp_alg {
*/
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask);
+int crypto_has_kpp(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm)
{
return &tfm->base;
diff --git a/include/dt-bindings/clock/efm32-cmu.h b/include/dt-bindings/clock/efm32-cmu.h
deleted file mode 100644
index 4b48d15fe194..000000000000
--- a/include/dt-bindings/clock/efm32-cmu.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DT_BINDINGS_CLOCK_EFM32_CMU_H
-#define __DT_BINDINGS_CLOCK_EFM32_CMU_H
-
-#define clk_HFXO 0
-#define clk_HFRCO 1
-#define clk_LFXO 2
-#define clk_LFRCO 3
-#define clk_ULFRCO 4
-#define clk_AUXHFRCO 5
-#define clk_HFCLKNODIV 6
-#define clk_HFCLK 7
-#define clk_HFPERCLK 8
-#define clk_HFCORECLK 9
-#define clk_LFACLK 10
-#define clk_LFBCLK 11
-#define clk_WDOGCLK 12
-#define clk_HFCORECLKDMA 13
-#define clk_HFCORECLKAES 14
-#define clk_HFCORECLKUSBC 15
-#define clk_HFCORECLKUSB 16
-#define clk_HFCORECLKLE 17
-#define clk_HFCORECLKEBI 18
-#define clk_HFPERCLKUSART0 19
-#define clk_HFPERCLKUSART1 20
-#define clk_HFPERCLKUSART2 21
-#define clk_HFPERCLKUART0 22
-#define clk_HFPERCLKUART1 23
-#define clk_HFPERCLKTIMER0 24
-#define clk_HFPERCLKTIMER1 25
-#define clk_HFPERCLKTIMER2 26
-#define clk_HFPERCLKTIMER3 27
-#define clk_HFPERCLKACMP0 28
-#define clk_HFPERCLKACMP1 29
-#define clk_HFPERCLKI2C0 30
-#define clk_HFPERCLKI2C1 31
-#define clk_HFPERCLKGPIO 32
-#define clk_HFPERCLKVCMP 33
-#define clk_HFPERCLKPRS 34
-#define clk_HFPERCLKADC0 35
-#define clk_HFPERCLKDAC0 36
-
-#endif /* __DT_BINDINGS_CLOCK_EFM32_CMU_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
index 55f8322a1e50..e4991d303708 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
@@ -233,6 +233,7 @@
#define GCC_PCIE0_AXI_S_BRIDGE_CLK 224
#define GCC_PCIE0_RCHNG_CLK_SRC 225
#define GCC_PCIE0_RCHNG_CLK 226
+#define GCC_CRYPTO_PPE_CLK 227
#define GCC_BLSP1_BCR 0
#define GCC_BLSP1_QUP1_BCR 1
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h
index 0634467c4ce5..2d545ed0d35a 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8939.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h
@@ -192,6 +192,7 @@
#define GCC_VENUS0_CORE0_VCODEC0_CLK 183
#define GCC_VENUS0_CORE1_VCODEC0_CLK 184
#define GCC_OXILI_TIMER_CLK 185
+#define SYSTEM_MM_NOC_BFDCD_CLK_SRC 186
/* Indexes for GDSCs */
#define BIMC_GDSC 0
diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h
index 27e232733096..77cde8effdc7 100644
--- a/include/dt-bindings/clock/r9a07g043-cpg.h
+++ b/include/dt-bindings/clock/r9a07g043-cpg.h
@@ -108,6 +108,15 @@
#define R9A07G043_ADC_ADCLK 76
#define R9A07G043_ADC_PCLK 77
#define R9A07G043_TSU_PCLK 78
+#define R9A07G043_NCEPLDM_DM_CLK 79 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_ACLK 80 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_TCK 81 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_ACLK 82 /* RZ/Five Only */
+#define R9A07G043_NCEPLIC_ACLK 83 /* RZ/Five Only */
+#define R9A07G043_AX45MP_CORE0_CLK 84 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ACLK 85 /* RZ/Five Only */
+#define R9A07G043_IAX45_CLK 86 /* RZ/Five Only */
+#define R9A07G043_IAX45_PCLK 87 /* RZ/Five Only */
/* R9A07G043 Resets */
#define R9A07G043_CA55_RST_1_0 0 /* RZ/G2UL Only */
@@ -180,5 +189,16 @@
#define R9A07G043_ADC_PRESETN 67
#define R9A07G043_ADC_ADRST_N 68
#define R9A07G043_TSU_PRESETN 69
+#define R9A07G043_NCEPLDM_DTM_PWR_RST_N 70 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_ARESETN 71 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_POR_RSTN 72 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_ARESETN 73 /* RZ/Five Only */
+#define R9A07G043_NCEPLIC_ARESETN 74 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ARESETNM 75 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ARESETNS 76 /* RZ/Five Only */
+#define R9A07G043_AX45MP_L2_RESETN 77 /* RZ/Five Only */
+#define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */
+#define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */
+
#endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */
diff --git a/include/dt-bindings/clock/sprd,ums512-clk.h b/include/dt-bindings/clock/sprd,ums512-clk.h
new file mode 100644
index 000000000000..4f1d90849944
--- /dev/null
+++ b/include/dt-bindings/clock/sprd,ums512-clk.h
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Unisoc UMS512 SoC DTS file
+ *
+ * Copyright (C) 2022, Unisoc Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_UMS512_H_
+#define _DT_BINDINGS_CLK_UMS512_H_
+
+#define CLK_26M_AUD 0
+#define CLK_13M 1
+#define CLK_6M5 2
+#define CLK_4M3 3
+#define CLK_2M 4
+#define CLK_1M 5
+#define CLK_250K 6
+#define CLK_RCO_25M 7
+#define CLK_RCO_4M 8
+#define CLK_RCO_2M 9
+#define CLK_ISPPLL_GATE 10
+#define CLK_DPLL0_GATE 11
+#define CLK_DPLL1_GATE 12
+#define CLK_LPLL_GATE 13
+#define CLK_TWPLL_GATE 14
+#define CLK_GPLL_GATE 15
+#define CLK_RPLL_GATE 16
+#define CLK_CPPLL_GATE 17
+#define CLK_MPLL0_GATE 18
+#define CLK_MPLL1_GATE 19
+#define CLK_MPLL2_GATE 20
+#define CLK_PMU_GATE_NUM (CLK_MPLL2_GATE + 1)
+
+#define CLK_DPLL0 0
+#define CLK_DPLL0_58M31 1
+#define CLK_ANLG_PHY_G0_NUM (CLK_DPLL0_58M31 + 1)
+
+#define CLK_MPLL1 0
+#define CLK_MPLL1_63M38 1
+#define CLK_ANLG_PHY_G2_NUM (CLK_MPLL1_63M38 + 1)
+
+#define CLK_RPLL 0
+#define CLK_AUDIO_GATE 1
+#define CLK_MPLL0 2
+#define CLK_MPLL0_56M88 3
+#define CLK_MPLL2 4
+#define CLK_MPLL2_47M13 5
+#define CLK_ANLG_PHY_G3_NUM (CLK_MPLL2_47M13 + 1)
+
+#define CLK_TWPLL 0
+#define CLK_TWPLL_768M 1
+#define CLK_TWPLL_384M 2
+#define CLK_TWPLL_192M 3
+#define CLK_TWPLL_96M 4
+#define CLK_TWPLL_48M 5
+#define CLK_TWPLL_24M 6
+#define CLK_TWPLL_12M 7
+#define CLK_TWPLL_512M 8
+#define CLK_TWPLL_256M 9
+#define CLK_TWPLL_128M 10
+#define CLK_TWPLL_64M 11
+#define CLK_TWPLL_307M2 12
+#define CLK_TWPLL_219M4 13
+#define CLK_TWPLL_170M6 14
+#define CLK_TWPLL_153M6 15
+#define CLK_TWPLL_76M8 16
+#define CLK_TWPLL_51M2 17
+#define CLK_TWPLL_38M4 18
+#define CLK_TWPLL_19M2 19
+#define CLK_TWPLL_12M29 20
+#define CLK_LPLL 21
+#define CLK_LPLL_614M4 22
+#define CLK_LPLL_409M6 23
+#define CLK_LPLL_245M76 24
+#define CLK_LPLL_30M72 25
+#define CLK_ISPPLL 26
+#define CLK_ISPPLL_468M 27
+#define CLK_ISPPLL_78M 28
+#define CLK_GPLL 29
+#define CLK_GPLL_40M 30
+#define CLK_CPPLL 31
+#define CLK_CPPLL_39M32 32
+#define CLK_ANLG_PHY_GC_NUM (CLK_CPPLL_39M32 + 1)
+
+#define CLK_AP_APB 0
+#define CLK_IPI 1
+#define CLK_AP_UART0 2
+#define CLK_AP_UART1 3
+#define CLK_AP_UART2 4
+#define CLK_AP_I2C0 5
+#define CLK_AP_I2C1 6
+#define CLK_AP_I2C2 7
+#define CLK_AP_I2C3 8
+#define CLK_AP_I2C4 9
+#define CLK_AP_SPI0 10
+#define CLK_AP_SPI1 11
+#define CLK_AP_SPI2 12
+#define CLK_AP_SPI3 13
+#define CLK_AP_IIS0 14
+#define CLK_AP_IIS1 15
+#define CLK_AP_IIS2 16
+#define CLK_AP_SIM 17
+#define CLK_AP_CE 18
+#define CLK_SDIO0_2X 19
+#define CLK_SDIO1_2X 20
+#define CLK_EMMC_2X 21
+#define CLK_VSP 22
+#define CLK_DISPC0 23
+#define CLK_DISPC0_DPI 24
+#define CLK_DSI_APB 25
+#define CLK_DSI_RXESC 26
+#define CLK_DSI_LANEBYTE 27
+#define CLK_VDSP 28
+#define CLK_VDSP_M 29
+#define CLK_AP_CLK_NUM (CLK_VDSP_M + 1)
+
+#define CLK_DSI_EB 0
+#define CLK_DISPC_EB 1
+#define CLK_VSP_EB 2
+#define CLK_VDMA_EB 3
+#define CLK_DMA_PUB_EB 4
+#define CLK_DMA_SEC_EB 5
+#define CLK_IPI_EB 6
+#define CLK_AHB_CKG_EB 7
+#define CLK_BM_CLK_EB 8
+#define CLK_AP_AHB_GATE_NUM (CLK_BM_CLK_EB + 1)
+
+#define CLK_AON_APB 0
+#define CLK_ADI 1
+#define CLK_AUX0 2
+#define CLK_AUX1 3
+#define CLK_AUX2 4
+#define CLK_PROBE 5
+#define CLK_PWM0 6
+#define CLK_PWM1 7
+#define CLK_PWM2 8
+#define CLK_PWM3 9
+#define CLK_EFUSE 10
+#define CLK_UART0 11
+#define CLK_UART1 12
+#define CLK_THM0 13
+#define CLK_THM1 14
+#define CLK_THM2 15
+#define CLK_THM3 16
+#define CLK_AON_I2C 17
+#define CLK_AON_IIS 18
+#define CLK_SCC 19
+#define CLK_APCPU_DAP 20
+#define CLK_APCPU_DAP_MTCK 21
+#define CLK_APCPU_TS 22
+#define CLK_DEBUG_TS 23
+#define CLK_DSI_TEST_S 24
+#define CLK_DJTAG_TCK 25
+#define CLK_DJTAG_TCK_HW 26
+#define CLK_AON_TMR 27
+#define CLK_AON_PMU 28
+#define CLK_DEBOUNCE 29
+#define CLK_APCPU_PMU 30
+#define CLK_TOP_DVFS 31
+#define CLK_OTG_UTMI 32
+#define CLK_OTG_REF 33
+#define CLK_CSSYS 34
+#define CLK_CSSYS_PUB 35
+#define CLK_CSSYS_APB 36
+#define CLK_AP_AXI 37
+#define CLK_AP_MM 38
+#define CLK_SDIO2_2X 39
+#define CLK_ANALOG_IO_APB 40
+#define CLK_DMC_REF_CLK 41
+#define CLK_EMC 42
+#define CLK_USB 43
+#define CLK_26M_PMU 44
+#define CLK_AON_APB_NUM (CLK_26M_PMU + 1)
+
+#define CLK_MM_AHB 0
+#define CLK_MM_MTX 1
+#define CLK_SENSOR0 2
+#define CLK_SENSOR1 3
+#define CLK_SENSOR2 4
+#define CLK_CPP 5
+#define CLK_JPG 6
+#define CLK_FD 7
+#define CLK_DCAM_IF 8
+#define CLK_DCAM_AXI 9
+#define CLK_ISP 10
+#define CLK_MIPI_CSI0 11
+#define CLK_MIPI_CSI1 12
+#define CLK_MIPI_CSI2 13
+#define CLK_MM_CLK_NUM (CLK_MIPI_CSI2 + 1)
+
+#define CLK_RC100M_CAL_EB 0
+#define CLK_DJTAG_TCK_EB 1
+#define CLK_DJTAG_EB 2
+#define CLK_AUX0_EB 3
+#define CLK_AUX1_EB 4
+#define CLK_AUX2_EB 5
+#define CLK_PROBE_EB 6
+#define CLK_MM_EB 7
+#define CLK_GPU_EB 8
+#define CLK_MSPI_EB 9
+#define CLK_APCPU_DAP_EB 10
+#define CLK_AON_CSSYS_EB 11
+#define CLK_CSSYS_APB_EB 12
+#define CLK_CSSYS_PUB_EB 13
+#define CLK_SDPHY_CFG_EB 14
+#define CLK_SDPHY_REF_EB 15
+#define CLK_EFUSE_EB 16
+#define CLK_GPIO_EB 17
+#define CLK_MBOX_EB 18
+#define CLK_KPD_EB 19
+#define CLK_AON_SYST_EB 20
+#define CLK_AP_SYST_EB 21
+#define CLK_AON_TMR_EB 22
+#define CLK_OTG_UTMI_EB 23
+#define CLK_OTG_PHY_EB 24
+#define CLK_SPLK_EB 25
+#define CLK_PIN_EB 26
+#define CLK_ANA_EB 27
+#define CLK_APCPU_TS0_EB 28
+#define CLK_APB_BUSMON_EB 29
+#define CLK_AON_IIS_EB 30
+#define CLK_SCC_EB 31
+#define CLK_THM0_EB 32
+#define CLK_THM1_EB 33
+#define CLK_THM2_EB 34
+#define CLK_ASIM_TOP_EB 35
+#define CLK_I2C_EB 36
+#define CLK_PMU_EB 37
+#define CLK_ADI_EB 38
+#define CLK_EIC_EB 39
+#define CLK_AP_INTC0_EB 40
+#define CLK_AP_INTC1_EB 41
+#define CLK_AP_INTC2_EB 42
+#define CLK_AP_INTC3_EB 43
+#define CLK_AP_INTC4_EB 44
+#define CLK_AP_INTC5_EB 45
+#define CLK_AUDCP_INTC_EB 46
+#define CLK_AP_TMR0_EB 47
+#define CLK_AP_TMR1_EB 48
+#define CLK_AP_TMR2_EB 49
+#define CLK_PWM0_EB 50
+#define CLK_PWM1_EB 51
+#define CLK_PWM2_EB 52
+#define CLK_PWM3_EB 53
+#define CLK_AP_WDG_EB 54
+#define CLK_APCPU_WDG_EB 55
+#define CLK_SERDES_EB 56
+#define CLK_ARCH_RTC_EB 57
+#define CLK_KPD_RTC_EB 58
+#define CLK_AON_SYST_RTC_EB 59
+#define CLK_AP_SYST_RTC_EB 60
+#define CLK_AON_TMR_RTC_EB 61
+#define CLK_EIC_RTC_EB 62
+#define CLK_EIC_RTCDV5_EB 63
+#define CLK_AP_WDG_RTC_EB 64
+#define CLK_AC_WDG_RTC_EB 65
+#define CLK_AP_TMR0_RTC_EB 66
+#define CLK_AP_TMR1_RTC_EB 67
+#define CLK_AP_TMR2_RTC_EB 68
+#define CLK_DCXO_LC_RTC_EB 69
+#define CLK_BB_CAL_RTC_EB 70
+#define CLK_AP_EMMC_RTC_EB 71
+#define CLK_AP_SDIO0_RTC_EB 72
+#define CLK_AP_SDIO1_RTC_EB 73
+#define CLK_AP_SDIO2_RTC_EB 74
+#define CLK_DSI_CSI_TEST_EB 75
+#define CLK_DJTAG_TCK_EN 76
+#define CLK_DPHY_REF_EB 77
+#define CLK_DMC_REF_EB 78
+#define CLK_OTG_REF_EB 79
+#define CLK_TSEN_EB 80
+#define CLK_TMR_EB 81
+#define CLK_RC100M_REF_EB 82
+#define CLK_RC100M_FDK_EB 83
+#define CLK_DEBOUNCE_EB 84
+#define CLK_DET_32K_EB 85
+#define CLK_TOP_CSSYS_EB 86
+#define CLK_AP_AXI_EN 87
+#define CLK_SDIO0_2X_EN 88
+#define CLK_SDIO0_1X_EN 89
+#define CLK_SDIO1_2X_EN 90
+#define CLK_SDIO1_1X_EN 91
+#define CLK_SDIO2_2X_EN 92
+#define CLK_SDIO2_1X_EN 93
+#define CLK_EMMC_2X_EN 94
+#define CLK_EMMC_1X_EN 95
+#define CLK_PLL_TEST_EN 96
+#define CLK_CPHY_CFG_EN 97
+#define CLK_DEBUG_TS_EN 98
+#define CLK_ACCESS_AUD_EN 99
+#define CLK_AON_APB_GATE_NUM (CLK_ACCESS_AUD_EN + 1)
+
+#define CLK_MM_CPP_EB 0
+#define CLK_MM_JPG_EB 1
+#define CLK_MM_DCAM_EB 2
+#define CLK_MM_ISP_EB 3
+#define CLK_MM_CSI2_EB 4
+#define CLK_MM_CSI1_EB 5
+#define CLK_MM_CSI0_EB 6
+#define CLK_MM_CKG_EB 7
+#define CLK_ISP_AHB_EB 8
+#define CLK_MM_DVFS_EB 9
+#define CLK_MM_FD_EB 10
+#define CLK_MM_SENSOR2_EB 11
+#define CLK_MM_SENSOR1_EB 12
+#define CLK_MM_SENSOR0_EB 13
+#define CLK_MM_MIPI_CSI2_EB 14
+#define CLK_MM_MIPI_CSI1_EB 15
+#define CLK_MM_MIPI_CSI0_EB 16
+#define CLK_DCAM_AXI_EB 17
+#define CLK_ISP_AXI_EB 18
+#define CLK_MM_CPHY_EB 19
+#define CLK_MM_GATE_CLK_NUM (CLK_MM_CPHY_EB + 1)
+
+#define CLK_SIM0_EB 0
+#define CLK_IIS0_EB 1
+#define CLK_IIS1_EB 2
+#define CLK_IIS2_EB 3
+#define CLK_APB_REG_EB 4
+#define CLK_SPI0_EB 5
+#define CLK_SPI1_EB 6
+#define CLK_SPI2_EB 7
+#define CLK_SPI3_EB 8
+#define CLK_I2C0_EB 9
+#define CLK_I2C1_EB 10
+#define CLK_I2C2_EB 11
+#define CLK_I2C3_EB 12
+#define CLK_I2C4_EB 13
+#define CLK_UART0_EB 14
+#define CLK_UART1_EB 15
+#define CLK_UART2_EB 16
+#define CLK_SIM0_32K_EB 17
+#define CLK_SPI0_LFIN_EB 18
+#define CLK_SPI1_LFIN_EB 19
+#define CLK_SPI2_LFIN_EB 20
+#define CLK_SPI3_LFIN_EB 21
+#define CLK_SDIO0_EB 22
+#define CLK_SDIO1_EB 23
+#define CLK_SDIO2_EB 24
+#define CLK_EMMC_EB 25
+#define CLK_SDIO0_32K_EB 26
+#define CLK_SDIO1_32K_EB 27
+#define CLK_SDIO2_32K_EB 28
+#define CLK_EMMC_32K_EB 29
+#define CLK_AP_APB_GATE_NUM (CLK_EMMC_32K_EB + 1)
+
+#define CLK_GPU_CORE_EB 0
+#define CLK_GPU_CORE 1
+#define CLK_GPU_MEM_EB 2
+#define CLK_GPU_MEM 3
+#define CLK_GPU_SYS_EB 4
+#define CLK_GPU_SYS 5
+#define CLK_GPU_CLK_NUM (CLK_GPU_SYS + 1)
+
+#define CLK_AUDCP_IIS0_EB 0
+#define CLK_AUDCP_IIS1_EB 1
+#define CLK_AUDCP_IIS2_EB 2
+#define CLK_AUDCP_UART_EB 3
+#define CLK_AUDCP_DMA_CP_EB 4
+#define CLK_AUDCP_DMA_AP_EB 5
+#define CLK_AUDCP_SRC48K_EB 6
+#define CLK_AUDCP_MCDT_EB 7
+#define CLK_AUDCP_VBCIFD_EB 8
+#define CLK_AUDCP_VBC_EB 9
+#define CLK_AUDCP_SPLK_EB 10
+#define CLK_AUDCP_ICU_EB 11
+#define CLK_AUDCP_DMA_AP_ASHB_EB 12
+#define CLK_AUDCP_DMA_CP_ASHB_EB 13
+#define CLK_AUDCP_AUD_EB 14
+#define CLK_AUDCP_VBC_24M_EB 15
+#define CLK_AUDCP_TMR_26M_EB 16
+#define CLK_AUDCP_DVFS_ASHB_EB 17
+#define CLK_AUDCP_AHB_GATE_NUM (CLK_AUDCP_DVFS_ASHB_EB + 1)
+
+#define CLK_AUDCP_WDG_EB 0
+#define CLK_AUDCP_RTC_WDG_EB 1
+#define CLK_AUDCP_TMR0_EB 2
+#define CLK_AUDCP_TMR1_EB 3
+#define CLK_AUDCP_APB_GATE_NUM (CLK_AUDCP_TMR1_EB + 1)
+
+#define CLK_ACORE0 0
+#define CLK_ACORE1 1
+#define CLK_ACORE2 2
+#define CLK_ACORE3 3
+#define CLK_ACORE4 4
+#define CLK_ACORE5 5
+#define CLK_PCORE0 6
+#define CLK_PCORE1 7
+#define CLK_SCU 8
+#define CLK_ACE 9
+#define CLK_PERIPH 10
+#define CLK_GIC 11
+#define CLK_ATB 12
+#define CLK_DEBUG_APB 13
+#define CLK_APCPU_SEC_NUM (CLK_DEBUG_APB + 1)
+
+#endif /* _DT_BINDINGS_CLK_UMS512_H_ */
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index c029467e828b..5566e58196a2 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -39,4 +39,7 @@
/* Bit 5 express pull down */
#define GPIO_PULL_DOWN 32
+/* Bit 6 express pull disable */
+#define GPIO_PULL_DISABLE 64
+
#endif
diff --git a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
index 2d0c23e5d3a7..8736ce038eca 100644
--- a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
+++ b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
@@ -42,6 +42,6 @@
/*
* Convert a port and pin label to its global pin index
*/
- #define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin))
+#define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin))
#endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H */
diff --git a/include/dt-bindings/pinctrl/rzg2l-pinctrl.h b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h
index b48f8c7a5556..c78ed5e5efb7 100644
--- a/include/dt-bindings/pinctrl/rzg2l-pinctrl.h
+++ b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h
@@ -18,6 +18,6 @@
#define RZG2L_PORT_PINMUX(b, p, f) ((b) * RZG2L_PINS_PER_PORT + (p) | ((f) << 16))
/* Convert a port and pin label to its global pin index */
- #define RZG2L_GPIO(port, pin) ((port) * RZG2L_PINS_PER_PORT + (pin))
+#define RZG2L_GPIO(port, pin) ((port) * RZG2L_PINS_PER_PORT + (pin))
#endif /* __DT_BINDINGS_RZG2L_PINCTRL_H */
diff --git a/include/dt-bindings/pinctrl/rzv2m-pinctrl.h b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h
new file mode 100644
index 000000000000..525532cd15da
--- /dev/null
+++ b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/V2M pinctrl bindings.
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_RZV2M_PINCTRL_H
+#define __DT_BINDINGS_RZV2M_PINCTRL_H
+
+#define RZV2M_PINS_PER_PORT 16
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZV2M_PORT_PINMUX(b, p, f) ((b) * RZV2M_PINS_PER_PORT + (p) | ((f) << 16))
+
+/* Convert a port and pin label to its global pin index */
+#define RZV2M_GPIO(port, pin) ((port) * RZV2M_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_RZV2M_PINCTRL_H */
diff --git a/include/dt-bindings/reset/mt8186-resets.h b/include/dt-bindings/reset/mt8186-resets.h
index 5f850370c42c..2e9029c22f38 100644
--- a/include/dt-bindings/reset/mt8186-resets.h
+++ b/include/dt-bindings/reset/mt8186-resets.h
@@ -7,6 +7,7 @@
#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8186
#define _DT_BINDINGS_RESET_CONTROLLER_MT8186
+/* TOPRGU resets */
#define MT8186_TOPRGU_INFRA_SW_RST 0
#define MT8186_TOPRGU_MM_SW_RST 1
#define MT8186_TOPRGU_MFG_SW_RST 2
@@ -33,4 +34,8 @@
/* MMSYS resets */
#define MT8186_MMSYS_SW0_RST_B_DISP_DSI0 19
+/* INFRA resets */
+#define MT8186_INFRA_THERMAL_CTRL_RST 0
+#define MT8186_INFRA_PTP_CTRL_RST 1
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8186 */
diff --git a/include/dt-bindings/reset/mt8192-resets.h b/include/dt-bindings/reset/mt8192-resets.h
index 764ca9910fa9..12e2087c90a3 100644
--- a/include/dt-bindings/reset/mt8192-resets.h
+++ b/include/dt-bindings/reset/mt8192-resets.h
@@ -7,6 +7,7 @@
#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8192
#define _DT_BINDINGS_RESET_CONTROLLER_MT8192
+/* TOPRGU resets */
#define MT8192_TOPRGU_MM_SW_RST 1
#define MT8192_TOPRGU_MFG_SW_RST 2
#define MT8192_TOPRGU_VENC_SW_RST 3
@@ -30,4 +31,11 @@
/* MMSYS resets */
#define MT8192_MMSYS_SW0_RST_B_DISP_DSI0 15
+/* INFRA resets */
+#define MT8192_INFRA_RST0_THERM_CTRL_SWRST 0
+#define MT8192_INFRA_RST2_PEXTP_PHY_SWRST 1
+#define MT8192_INFRA_RST3_THERM_CTRL_PTP_SWRST 2
+#define MT8192_INFRA_RST4_PCIE_TOP_SWRST 3
+#define MT8192_INFRA_RST4_THERM_CTRL_MCU_SWRST 4
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8192 */
diff --git a/include/dt-bindings/reset/mt8195-resets.h b/include/dt-bindings/reset/mt8195-resets.h
index a26bccc8b957..0b1937f14b36 100644
--- a/include/dt-bindings/reset/mt8195-resets.h
+++ b/include/dt-bindings/reset/mt8195-resets.h
@@ -7,6 +7,7 @@
#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8195
#define _DT_BINDINGS_RESET_CONTROLLER_MT8195
+/* TOPRGU resets */
#define MT8195_TOPRGU_CONN_MCU_SW_RST 0
#define MT8195_TOPRGU_INFRA_GRST_SW_RST 1
#define MT8195_TOPRGU_APU_SW_RST 2
@@ -26,4 +27,9 @@
#define MT8195_TOPRGU_SW_RST_NUM 16
+/* INFRA resets */
+#define MT8195_INFRA_RST0_THERM_CTRL_SWRST 0
+#define MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST 1
+#define MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST 2
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8195 */
diff --git a/include/dt-bindings/reset/sama7g5-reset.h b/include/dt-bindings/reset/sama7g5-reset.h
new file mode 100644
index 000000000000..2116f41d04e0
--- /dev/null
+++ b/include/dt-bindings/reset/sama7g5-reset.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __DT_BINDINGS_RESET_SAMA7G5_H
+#define __DT_BINDINGS_RESET_SAMA7G5_H
+
+#define SAMA7G5_RESET_USB_PHY1 4
+#define SAMA7G5_RESET_USB_PHY2 5
+#define SAMA7G5_RESET_USB_PHY3 6
+
+#endif /* __DT_BINDINGS_RESET_SAMA7G5_H */
diff --git a/include/dt-bindings/sound/qcom,wcd9335.h b/include/dt-bindings/sound/qcom,wcd9335.h
new file mode 100644
index 000000000000..f5e9f1db091e
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,wcd9335.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __DT_SOUND_QCOM_WCD9335_H
+#define __DT_SOUND_QCOM_WCD9335_H
+
+#define AIF1_PB 0
+#define AIF1_CAP 1
+#define AIF2_PB 2
+#define AIF2_CAP 3
+#define AIF3_PB 4
+#define AIF3_CAP 5
+#define AIF4_PB 6
+#define NUM_CODEC_DAIS 7
+
+#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f6d4539c3895..6f64b2f3dc54 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -764,6 +764,7 @@ static inline u64 acpi_arch_get_root_pointer(void)
#endif
int acpi_get_local_address(acpi_handle handle, u32 *addr);
+const char *acpi_get_subsystem_id(acpi_handle handle);
#else /* !CONFIG_ACPI */
@@ -1025,6 +1026,11 @@ static inline int acpi_get_local_address(acpi_handle handle, u32 *addr)
return -ENODEV;
}
+static inline const char *acpi_get_subsystem_id(acpi_handle handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline int acpi_register_wakeup_handler(int wake_irq,
bool (*wakeup)(void *context), void *context)
{
@@ -1245,7 +1251,7 @@ static inline bool acpi_dev_has_props(const struct acpi_device *adev)
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
- const union acpi_object *properties);
+ union acpi_object *properties);
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index f1f0842a2cb2..b43be0987b19 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -33,10 +33,14 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token);
void acpi_configure_pmsi_domain(struct device *dev);
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
/* IOMMU interface */
int iort_dma_get_ranges(struct device *dev, u64 *size);
int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
static inline void acpi_iort_init(void) { }
@@ -46,14 +50,18 @@ static inline struct irq_domain *iort_get_device_domain(
struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
+static inline
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
+static inline
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
/* IOMMU interface */
static inline int iort_dma_get_ranges(struct device *dev, u64 *size)
{ return -ENODEV; }
static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
{ return -ENODEV; }
static inline
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
-{ return 0; }
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{ }
static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void)
{ return PHYS_ADDR_MAX; }
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 58e6c3806c09..953e6f12fa1c 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -206,4 +206,8 @@ int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
u64 *value);
struct amd_iommu *get_amd_iommu(unsigned int idx);
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+int amd_iommu_snp_enable(void);
+#endif
+
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 00f7a80f1a3e..3608992848d3 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -285,7 +285,6 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
/* These are defined in auditsc.c */
/* Public API */
extern int audit_alloc(struct task_struct *task);
-extern int audit_alloc_kernel(struct task_struct *task);
extern void __audit_free(struct task_struct *task);
extern void __audit_uring_entry(u8 op);
extern void __audit_uring_exit(int success, long code);
@@ -578,10 +577,6 @@ static inline int audit_alloc(struct task_struct *task)
{
return 0;
}
-static inline int audit_alloc_kernel(struct task_struct *task)
-{
- return 0;
-}
static inline void audit_free(struct task_struct *task)
{ }
static inline void audit_uring_entry(u8 op)
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index e863c88df95f..ae12696ec492 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -28,11 +28,6 @@ enum wb_state {
WB_start_all, /* nr_pages == 0 (all) work pending */
};
-enum wb_congested_state {
- WB_async_congested, /* The async (write) queue is getting full */
- WB_sync_congested, /* The sync queue is getting full */
-};
-
enum wb_stat_item {
WB_RECLAIMABLE,
WB_WRITEBACK,
@@ -122,8 +117,6 @@ struct bdi_writeback {
atomic_t writeback_inodes; /* number of inodes under writeback */
struct percpu_counter stat[NR_WB_STAT_ITEMS];
- unsigned long congested; /* WB_[a]sync_congested flags */
-
unsigned long bw_time_stamp; /* last time write bw is updated */
unsigned long dirtied_stamp;
unsigned long written_stamp; /* pages written at bw_time_stamp */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index d452071db572..439815cc1ab9 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -140,12 +140,6 @@ static inline bool mapping_can_writeback(struct address_space *mapping)
return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
}
-static inline int bdi_sched_wait(void *word)
-{
- schedule();
- return 0;
-}
-
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
@@ -236,18 +230,6 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
}
/**
- * inode_to_wb_is_valid - test whether an inode has a wb associated
- * @inode: inode of interest
- *
- * Returns %true if @inode has a wb associated. May be called without any
- * locking.
- */
-static inline bool inode_to_wb_is_valid(struct inode *inode)
-{
- return inode->i_wb;
-}
-
-/**
* inode_to_wb - determine the wb of an inode
* @inode: inode of interest
*
@@ -345,11 +327,6 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
return &bdi->wb;
}
-static inline bool inode_to_wb_is_valid(struct inode *inode)
-{
- return true;
-}
-
static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
{
return &inode_to_bdi(inode)->wb;
diff --git a/include/linux/base64.h b/include/linux/base64.h
new file mode 100644
index 000000000000..660d4cb1ef31
--- /dev/null
+++ b/include/linux/base64.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64 encoding, lifted from fs/crypto/fname.c.
+ */
+
+#ifndef _LINUX_BASE64_H
+#define _LINUX_BASE64_H
+
+#include <linux/types.h>
+
+#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
+
+int base64_encode(const u8 *src, int len, char *dst);
+int base64_decode(const char *src, int len, u8 *dst);
+
+#endif /* _LINUX_BASE64_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 2e6cd5681040..f65410a49fda 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -71,9 +71,9 @@ struct device;
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
+ * bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst
* bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
* bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
- * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
* bitmap_get_value8(map, start) Get 8bit value from map at start
* bitmap_set_value8(map, value, start) Set 8bit value to map at start
*
@@ -148,13 +148,13 @@ void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned int first, unsigned int cut, unsigned int nbits);
-int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
-int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_replace(unsigned long *dst,
const unsigned long *old, const unsigned long *new,
@@ -163,7 +163,7 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
bool __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
-int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
+unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
void __bitmap_set(unsigned long *map, unsigned int start, int len);
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
@@ -238,20 +238,32 @@ extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0, len);
+
+ if (small_const_nbits(nbits))
+ *dst = 0;
+ else
+ memset(dst, 0, len);
}
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0xff, len);
+
+ if (small_const_nbits(nbits))
+ *dst = ~0UL;
+ else
+ memset(dst, 0xff, len);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memcpy(dst, src, len);
+
+ if (small_const_nbits(nbits))
+ *dst = *src;
+ else
+ memcpy(dst, src, len);
}
/*
@@ -303,7 +315,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
#endif
-static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
+static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
@@ -329,7 +341,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
@@ -419,7 +431,8 @@ static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static __always_inline
+unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -431,6 +444,8 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
+ else if (small_const_nbits(start + nbits))
+ *map |= GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
@@ -445,6 +460,8 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
+ else if (small_const_nbits(start + nbits))
+ *map &= ~GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 7aaed501f768..3b89c64bcfd8 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -27,11 +27,62 @@ extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
+ * Defined here because those may be needed by architecture-specific static
+ * inlines.
+ */
+
+#include <asm-generic/bitops/generic-non-atomic.h>
+
+/*
+ * Many architecture-specific non-atomic bitops contain inline asm code and due
+ * to that the compiler can't optimize them to compile-time expressions or
+ * constants. In contrary, generic_*() helpers are defined in pure C and
+ * compilers optimize them just well.
+ * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
+ * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
+ * the arguments can be resolved at compile time. That expression itself is a
+ * constant and doesn't bring any functional changes to the rest of cases.
+ * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
+ * passing a bitmap from .bss or .data (-> `!!addr` is always true).
+ */
+#define bitop(op, nr, addr) \
+ ((__builtin_constant_p(nr) && \
+ __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
+ (uintptr_t)(addr) != (uintptr_t)NULL && \
+ __builtin_constant_p(*(const unsigned long *)(addr))) ? \
+ const##op(nr, addr) : op(nr, addr))
+
+#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
+#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
+#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
+#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
+#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
+#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
+#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
+#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
+
+/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*/
#include <asm/bitops.h>
+/* Check that the bitops prototypes are sane */
+#define __check_bitop_pr(name) \
+ static_assert(__same_type(arch_##name, generic_##name) && \
+ __same_type(const_##name, generic_##name) && \
+ __same_type(_##name, generic_##name))
+
+__check_bitop_pr(__set_bit);
+__check_bitop_pr(__clear_bit);
+__check_bitop_pr(__change_bit);
+__check_bitop_pr(__test_and_set_bit);
+__check_bitop_pr(__test_and_clear_bit);
+__check_bitop_pr(__test_and_change_bit);
+__check_bitop_pr(test_bit);
+
+#undef __check_bitop_pr
+
static inline int get_bitmask_order(unsigned int count)
{
int order;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index effee1dc715a..92294a5fb083 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -857,7 +857,6 @@ void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq);
bool blk_mq_complete_request_remote(struct request *rq);
-bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index dccdf1551c62..84b13fdd34a7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -140,6 +140,8 @@ struct gendisk {
struct request_queue *queue;
void *private_data;
+ struct bio_set bio_split;
+
int flags;
unsigned long state;
#define GD_NEED_PART_SCAN 0
@@ -531,7 +533,6 @@ struct request_queue {
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
- struct bio_set bio_split;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
@@ -864,9 +865,9 @@ void blk_request_module(dev_t devt);
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
void submit_bio_noacct(struct bio *bio);
+struct bio *bio_split_to_limits(struct bio *bio);
extern int blk_lld_busy(struct request_queue *q);
-extern void blk_queue_split(struct bio **);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
index 46e1757d06a3..79b2f78eec1a 100644
--- a/include/linux/bpfptr.h
+++ b/include/linux/bpfptr.h
@@ -49,7 +49,9 @@ static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
size_t offset, size_t size)
{
- return copy_from_sockptr_offset(dst, (sockptr_t) src, offset, size);
+ if (!bpfptr_is_kernel(src))
+ return copy_from_user(dst, src.user + offset, size);
+ return copy_from_kernel_nofault(dst, src.kernel + offset, size);
}
static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
@@ -78,7 +80,9 @@ static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len)
static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
{
- return strncpy_from_sockptr(dst, (sockptr_t) src, count);
+ if (bpfptr_is_kernel(src))
+ return strncpy_from_kernel_nofault(dst, src.kernel, count);
+ return strncpy_from_user(dst, src.user, count);
}
#endif /* _LINUX_BPFPTR_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 307445d1c69e..089c9ade4325 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -118,7 +118,6 @@ static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
* of the form "mark_buffer_foo()". These are higher-level functions which
* do something in addition to setting a b_state bit.
*/
-BUFFER_FNS(Uptodate, uptodate)
BUFFER_FNS(Dirty, dirty)
TAS_BUFFER_FNS(Dirty, dirty)
BUFFER_FNS(Lock, locked)
@@ -136,6 +135,30 @@ BUFFER_FNS(Meta, meta)
BUFFER_FNS(Prio, prio)
BUFFER_FNS(Defer_Completion, defer_completion)
+static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
+{
+ /*
+ * make it consistent with folio_mark_uptodate
+ * pairs with smp_load_acquire in buffer_uptodate
+ */
+ smp_mb__before_atomic();
+ set_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
+{
+ clear_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline int buffer_uptodate(const struct buffer_head *bh)
+{
+ /*
+ * make it consistent with folio_test_uptodate
+ * pairs with smp_mb__before_atomic in set_buffer_uptodate
+ */
+ return test_bit_acquire(BH_Uptodate, &bh->b_state);
+}
+
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
/* If we *know* page->private refers to buffer_heads */
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 86bf82dbd8b8..49586ff26152 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -433,9 +433,9 @@ union ceph_mds_request_args {
__le32 stripe_unit; /* layout for newly created file */
__le32 stripe_count; /* ... */
__le32 object_size;
- __le32 file_replication;
- __le32 mask; /* CEPH_CAP_* */
- __le32 old_size;
+ __le32 pool;
+ __le32 mask; /* CEPH_CAP_* */
+ __le64 old_size;
} __attribute__ ((packed)) open;
struct {
__le32 flags;
@@ -768,7 +768,7 @@ struct ceph_mds_caps {
__le32 xattr_len;
__le64 xattr_version;
- /* filelock */
+ /* a union of non-export and export bodies. */
__le64 size, max_size, truncate_size;
__le32 truncate_seq;
struct ceph_timespec mtime, atime, ctime;
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index 523fd0452856..4c3e0648dc27 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -25,6 +25,7 @@ struct ceph_mdsmap {
u32 m_session_timeout; /* seconds */
u32 m_session_autoclose; /* seconds */
u64 m_max_file_size;
+ u64 m_max_xattr_size; /* maximum size for xattrs blob */
u32 m_max_mds; /* expected up:active mds number */
u32 m_num_active_mds; /* actual up:active mds number */
u32 possible_max_rank; /* possible max rank index */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index cba8a6ffc329..fb6be72104df 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -507,9 +507,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
-extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req,
- bool nofail);
+void ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req);
extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed53bfe7c46c..ac5d0515680e 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -734,11 +734,6 @@ static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
return NULL;
}
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
-{
- return NULL;
-}
-
static inline bool cgroup_psi_enabled(void)
{
return false;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index c10dc4c659e2..1615010aa0ec 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -832,6 +832,25 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
NULL, (flags), (reg), (shift), (width), \
(clk_divider_flags), NULL, (lock))
/**
+ * devm_clk_hw_register_divider_parent_hw - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, \
+ reg, shift, width, \
+ clk_divider_flags, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), NULL, \
+ (parent_hw), NULL, (flags), (reg), \
+ (shift), (width), (clk_divider_flags), \
+ NULL, (lock))
+/**
* devm_clk_hw_register_divider_table - register a table based divider clock
* with the clock framework (devres variant)
* @dev: device registering this clock
@@ -961,6 +980,13 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
(parent_names), NULL, NULL, (flags), (reg), \
(shift), BIT((width)) - 1, (clk_mux_flags), \
NULL, (lock))
+#define devm_clk_hw_register_mux_parent_hws(dev, name, parent_hws, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ (parent_hws), NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, \
+ (clk_mux_flags), NULL, (lock))
int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
unsigned int val);
@@ -1006,6 +1032,14 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
const char *name, unsigned int index, unsigned long flags,
unsigned int mult, unsigned int div);
+
+struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
+
+struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
/**
* struct clk_fractional_divider - adjustable fractional divider clock
*
@@ -1176,10 +1210,8 @@ int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw);
int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw);
void clk_unregister(struct clk *clk);
-void devm_clk_unregister(struct device *dev, struct clk *clk);
void clk_hw_unregister(struct clk_hw *hw);
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 39faa54efe88..c13061cabdfc 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -443,15 +443,16 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
* @dev: device for clock "consumer"
* @id: clock consumer ID
*
- * Returns a struct clk corresponding to the clock producer, or
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
* valid IS_ERR() condition containing errno. The implementation
* uses @dev and @id to determine the clock consumer, and thereby
* the clock producer. (IOW, @id may be identical strings, but
* clk_get may return different clock producers depending on @dev.)
*
- * Drivers must assume that the clock source is not enabled.
- *
- * devm_clk_get should not be called from within interrupt context.
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
*
* The clock will automatically be freed when the device is unbound
* from the bus.
@@ -459,17 +460,114 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
+ * devm_clk_get_prepared - devm_clk_get() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared. Drivers must however assume
+ * that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the device
+ * is unbound from the bus.
+ */
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id);
+
+/**
* devm_clk_get_optional - lookup and obtain a managed reference to an optional
* clock producer.
* @dev: device for clock "consumer"
* @id: clock consumer ID
*
- * Behaves the same as devm_clk_get() except where there is no clock producer.
- * In this case, instead of returning -ENOENT, the function returns NULL.
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get().
+ *
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
*/
struct clk *devm_clk_get_optional(struct device *dev, const char *id);
/**
+ * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_prepared().
+ *
+ * The returned clk (if valid) is prepared. Drivers must however
+ * assume that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the
+ * device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_optional_enabled - devm_clk_get_optional() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
+
+/**
* devm_get_clk_from_child - lookup and obtain a managed reference to a
* clock producer from child node.
* @dev: device for clock "consumer"
@@ -813,12 +911,36 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
+static inline struct clk *devm_clk_get_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
static inline struct clk *devm_clk_get_optional(struct device *dev,
const char *id)
{
return NULL;
}
+static inline struct clk *devm_clk_get_optional_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index a0c55eeaeaf1..9b157b71036f 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -66,17 +66,6 @@
__builtin_unreachable(); \
} while (0)
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 01ce94b58b42..7713d7bcdaea 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -240,6 +240,12 @@ static inline void *offset_to_ptr(const int *off)
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
/*
+ * Whether 'type' is a signed type or an unsigned type. Supports scalar types,
+ * bool and also pointer types.
+ */
+#define is_signed_type(type) (((type)(-1)) < (__force type)1)
+
+/*
* This is needed in functions which generate the stack canary, see
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
*/
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index d5b9c8d40c18..1518568aaf0f 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -17,7 +17,7 @@
#include <linux/vt.h>
#include <linux/workqueue.h>
-struct uni_pagedir;
+struct uni_pagedict;
struct uni_screen;
#define NPAR 16
@@ -157,8 +157,8 @@ struct vc_data {
unsigned int vc_bell_duration; /* Console bell duration */
unsigned short vc_cur_blink_ms; /* Cursor blink duration */
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
- struct uni_pagedir *vc_uni_pagedir;
- struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
+ struct uni_pagedict *uni_pagedict;
+ struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */
struct uni_screen *vc_uni_screen; /* unicode screen content */
/* additional information is in vt_kern.h */
};
diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h
index bcfce748c9d8..c35db4896c37 100644
--- a/include/linux/consolemap.h
+++ b/include/linux/consolemap.h
@@ -7,30 +7,56 @@
#ifndef __LINUX_CONSOLEMAP_H__
#define __LINUX_CONSOLEMAP_H__
-#define LAT1_MAP 0
-#define GRAF_MAP 1
-#define IBMPC_MAP 2
-#define USER_MAP 3
+enum translation_map {
+ LAT1_MAP,
+ GRAF_MAP,
+ IBMPC_MAP,
+ USER_MAP,
+
+ FIRST_MAP = LAT1_MAP,
+ LAST_MAP = USER_MAP,
+};
#include <linux/types.h>
-#ifdef CONFIG_CONSOLE_TRANSLATIONS
struct vc_data;
-extern u16 inverse_translate(const struct vc_data *conp, int glyph,
- int use_unicode);
-extern unsigned short *set_translate(int m, struct vc_data *vc);
-extern int conv_uni_to_pc(struct vc_data *conp, long ucs);
-extern u32 conv_8bit_to_uni(unsigned char c);
-extern int conv_uni_to_8bit(u32 uni);
+#ifdef CONFIG_CONSOLE_TRANSLATIONS
+u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode);
+unsigned short *set_translate(enum translation_map m, struct vc_data *vc);
+int conv_uni_to_pc(struct vc_data *conp, long ucs);
+u32 conv_8bit_to_uni(unsigned char c);
+int conv_uni_to_8bit(u32 uni);
void console_map_init(void);
#else
-#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph)
-#define set_translate(m, vc) ((unsigned short *)NULL)
-#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs))
-#define conv_8bit_to_uni(c) ((uint32_t)(c))
-#define conv_uni_to_8bit(c) ((int) ((c) & 0xff))
-#define console_map_init(c) do { ; } while (0)
+static inline u16 inverse_translate(const struct vc_data *conp, u16 glyph,
+ bool use_unicode)
+{
+ return glyph;
+}
+
+static inline unsigned short *set_translate(enum translation_map m,
+ struct vc_data *vc)
+{
+ return NULL;
+}
+
+static inline int conv_uni_to_pc(struct vc_data *conp, long ucs)
+{
+ return ucs > 0xff ? -1 : ucs;
+}
+
+static inline u32 conv_8bit_to_uni(unsigned char c)
+{
+ return c;
+}
+
+static inline int conv_uni_to_8bit(u32 uni)
+{
+ return uni & 0xff;
+}
+
+static inline void console_map_init(void) { }
#endif /* CONFIG_CONSOLE_TRANSLATIONS */
#endif /* __LINUX_CONSOLEMAP_H__ */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 4592d0845941..bd047864c7ac 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -12,6 +12,8 @@
#include <linux/bitmap.h>
#include <linux/atomic.h>
#include <linux/bug.h>
+#include <linux/gfp_types.h>
+#include <linux/numa.h>
/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
@@ -116,85 +118,6 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu)
return cpu;
}
-#if NR_CPUS == 1
-/* Uniprocessor. Assume all masks are "1". */
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
-{
- return 0;
-}
-
-static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
-{
- return 0;
-}
-
-static inline unsigned int cpumask_first_and(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
-{
- return 0;
-}
-
-static inline unsigned int cpumask_last(const struct cpumask *srcp)
-{
- return 0;
-}
-
-/* Valid inputs for n are -1 and 0. */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
-{
- return n+1;
-}
-
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
-{
- return n+1;
-}
-
-static inline unsigned int cpumask_next_and(int n,
- const struct cpumask *srcp,
- const struct cpumask *andp)
-{
- return n+1;
-}
-
-static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
- int start, bool wrap)
-{
- /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
- return (wrap && n == 0);
-}
-
-/* cpu must be a valid cpu, ie 0, so there's no other choice. */
-static inline unsigned int cpumask_any_but(const struct cpumask *mask,
- unsigned int cpu)
-{
- return 1;
-}
-
-static inline unsigned int cpumask_local_spread(unsigned int i, int node)
-{
- return 0;
-}
-
-static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p) {
- return cpumask_first_and(src1p, src2p);
-}
-
-static inline int cpumask_any_distribute(const struct cpumask *srcp)
-{
- return cpumask_first(srcp);
-}
-
-#define for_each_cpu(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
-#define for_each_cpu_and(cpu, mask1, mask2) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
-#else
/**
* cpumask_first - get the first cpu in a cpumask
* @srcp: the cpumask pointer
@@ -241,7 +164,21 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
}
-unsigned int __pure cpumask_next(int n, const struct cpumask *srcp);
+/**
+ * cpumask_next - get the next cpu in a cpumask
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus set.
+ */
+static inline
+unsigned int cpumask_next(int n, const struct cpumask *srcp)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
+}
/**
* cpumask_next_zero - get the next unset cpu in a cpumask
@@ -258,12 +195,48 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
}
-int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
-int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+#if NR_CPUS == 1
+/* Uniprocessor: there is only one valid CPU */
+static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+{
+ return 0;
+}
+
+static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return cpumask_first_and(src1p, src2p);
+}
+
+static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+{
+ return cpumask_first(srcp);
+}
+#else
unsigned int cpumask_local_spread(unsigned int i, int node);
-int cpumask_any_and_distribute(const struct cpumask *src1p,
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p);
-int cpumask_any_distribute(const struct cpumask *srcp);
+unsigned int cpumask_any_distribute(const struct cpumask *srcp);
+#endif /* NR_CPUS */
+
+/**
+ * cpumask_next_and - get the next cpu in *src1p & *src2p
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus set in both.
+ */
+static inline
+unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits, n + 1);
+}
/**
* for_each_cpu - iterate over every cpu in a mask
@@ -289,7 +262,26 @@ int cpumask_any_distribute(const struct cpumask *srcp);
(cpu) = cpumask_next_zero((cpu), (mask)), \
(cpu) < nr_cpu_ids;)
-extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+#if NR_CPUS == 1
+static inline
+unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+ cpumask_check(start);
+ if (n != -1)
+ cpumask_check(n);
+
+ /*
+ * Return the first available CPU when wrapping, or when starting before cpu0,
+ * since there is only one valid option.
+ */
+ if (wrap && n >= 0)
+ return nr_cpumask_bits;
+
+ return cpumask_first(mask);
+}
+#else
+unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+#endif
/**
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
@@ -324,7 +316,26 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
for ((cpu) = -1; \
(cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
(cpu) < nr_cpu_ids;)
-#endif /* SMP */
+
+/**
+ * cpumask_any_but - return a "random" in a cpumask, but not this one.
+ * @mask: the cpumask to search
+ * @cpu: the cpu to ignore.
+ *
+ * Often used to find any cpu but smp_processor_id() in a mask.
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static inline
+unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
+{
+ unsigned int i;
+
+ cpumask_check(cpu);
+ for_each_cpu(i, mask)
+ if (i != cpu)
+ break;
+ return i;
+}
#define CPU_BITS_NONE \
{ \
@@ -372,9 +383,9 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in @cpumask, else returns 0
+ * Returns true if @cpu is set in @cpumask, else returns false
*/
-static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -384,11 +395,11 @@ static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpuma
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
+ * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
*
* test_and_set_bit wrapper for cpumasks.
*/
-static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -398,11 +409,11 @@ static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpu
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
+ * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
*
* test_and_clear_bit wrapper for cpumasks.
*/
-static __always_inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -431,9 +442,9 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * If *@dstp is empty, returns false, else returns true
*/
-static inline int cpumask_and(struct cpumask *dstp,
+static inline bool cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
@@ -474,9 +485,9 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * If *@dstp is empty, returns false, else returns true
*/
-static inline int cpumask_andnot(struct cpumask *dstp,
+static inline bool cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
@@ -539,9 +550,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*
- * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
+ * Returns true if *@src1p is a subset of *@src2p, else returns false
*/
-static inline int cpumask_subset(const struct cpumask *src1p,
+static inline bool cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
@@ -743,9 +754,35 @@ typedef struct cpumask *cpumask_var_t;
#define __cpumask_var_read_mostly __read_mostly
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
-bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+
+static inline
+bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
+{
+ return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
+}
+
+/**
+ * alloc_cpumask_var - allocate a struct cpumask
+ * @mask: pointer to cpumask_var_t where the cpumask is returned
+ * @flags: GFP_ flags
+ *
+ * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
+ * a nop returning a constant 1 (in <linux/cpumask.h>).
+ *
+ * See alloc_cpumask_var_node.
+ */
+static inline
+bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
+}
+
+static inline
+bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var(mask, flags | __GFP_ZERO);
+}
+
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
@@ -811,9 +848,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
/* First bits of cpu_bit_bitmap are in fact unset. */
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
+#if NR_CPUS == 1
+/* Uniprocessor: the possible/online/present masks are always "1" */
+#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#else
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
+#endif
/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 7c62da31ce4b..7b1f4a488230 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -86,6 +86,8 @@ struct damon_target {
* @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT.
* @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
* @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
+ * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists.
+ * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists.
* @DAMOS_STAT: Do nothing but count the stat.
* @NR_DAMOS_ACTIONS: Total number of DAMOS actions
*/
@@ -95,6 +97,8 @@ enum damos_action {
DAMOS_PAGEOUT,
DAMOS_HUGEPAGE,
DAMOS_NOHUGEPAGE,
+ DAMOS_LRU_PRIO,
+ DAMOS_LRU_DEPRIO,
DAMOS_STAT, /* Do nothing but only record the stat */
NR_DAMOS_ACTIONS,
};
@@ -397,7 +401,6 @@ struct damon_callback {
* detail.
*
* @kdamond: Kernel thread who does the monitoring.
- * @kdamond_stop: Notifies whether kdamond should stop.
* @kdamond_lock: Mutex for the synchronizations with @kdamond.
*
* For each monitoring context, one kernel thread for the monitoring is
@@ -406,14 +409,14 @@ struct damon_callback {
* Once started, the monitoring thread runs until explicitly required to be
* terminated or every monitoring target is invalid. The validity of the
* targets is checked via the &damon_operations.target_valid of @ops. The
- * termination can also be explicitly requested by writing non-zero to
- * @kdamond_stop. The thread sets @kdamond to NULL when it terminates.
- * Therefore, users can know whether the monitoring is ongoing or terminated by
- * reading @kdamond. Reads and writes to @kdamond and @kdamond_stop from
- * outside of the monitoring thread must be protected by @kdamond_lock.
+ * termination can also be explicitly requested by calling damon_stop().
+ * The thread sets @kdamond to NULL when it terminates. Therefore, users can
+ * know whether the monitoring is ongoing or terminated by reading @kdamond.
+ * Reads and writes to @kdamond from outside of the monitoring thread must
+ * be protected by @kdamond_lock.
*
- * Note that the monitoring thread protects only @kdamond and @kdamond_stop via
- * @kdamond_lock. Accesses to other fields must be protected by themselves.
+ * Note that the monitoring thread protects only @kdamond via @kdamond_lock.
+ * Accesses to other fields must be protected by themselves.
*
* @ops: Set of monitoring operations for given use cases.
* @callback: Set of callbacks for monitoring events notifications.
@@ -526,6 +529,12 @@ bool damon_is_registered_ops(enum damon_ops_id id);
int damon_register_ops(struct damon_operations *ops);
int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id);
+static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+{
+ return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+}
+
+
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
diff --git a/include/linux/dax.h b/include/linux/dax.h
index e7b81634c52a..ba985333e26b 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -43,8 +43,21 @@ struct dax_operations {
void *addr, size_t bytes, struct iov_iter *iter);
};
+struct dax_holder_operations {
+ /*
+ * notify_failure - notify memory failure into inner holder device
+ * @dax_dev: the dax device which contains the holder
+ * @offset: offset on this dax device where memory failure occurs
+ * @len: length of this memory failure event
+ * @flags: action flags for memory failure handler
+ */
+ int (*notify_failure)(struct dax_device *dax_dev, u64 offset,
+ u64 len, int mf_flags);
+};
+
#if IS_ENABLED(CONFIG_DAX)
struct dax_device *alloc_dax(void *private, const struct dax_operations *ops);
+void *dax_holder(struct dax_device *dax_dev);
void put_dax(struct dax_device *dax_dev);
void kill_dax(struct dax_device *dax_dev);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
@@ -66,6 +79,10 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
return dax_synchronous(dax_dev);
}
#else
+static inline void *dax_holder(struct dax_device *dax_dev)
+{
+ return NULL;
+}
static inline struct dax_device *alloc_dax(void *private,
const struct dax_operations *ops)
{
@@ -114,12 +131,9 @@ struct writeback_control;
#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
void dax_remove_host(struct gendisk *disk);
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
- u64 *start_off);
-static inline void fs_put_dax(struct dax_device *dax_dev)
-{
- put_dax(dax_dev);
-}
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
+ void *holder, const struct dax_holder_operations *ops);
+void fs_put_dax(struct dax_device *dax_dev, void *holder);
#else
static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
{
@@ -129,11 +143,12 @@ static inline void dax_remove_host(struct gendisk *disk)
{
}
static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
- u64 *start_off)
+ u64 *start_off, void *holder,
+ const struct dax_holder_operations *ops)
{
return NULL;
}
-static inline void fs_put_dax(struct dax_device *dax_dev)
+static inline void fs_put_dax(struct dax_device *dax_dev, void *holder)
{
}
#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
@@ -146,6 +161,10 @@ struct page *dax_layout_busy_page(struct address_space *mapping);
struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_page(struct page *page, dax_entry_t cookie);
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page);
+void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie);
#else
static inline struct page *dax_layout_busy_page(struct address_space *mapping)
{
@@ -173,6 +192,17 @@ static inline dax_entry_t dax_lock_page(struct page *page)
static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
}
+
+static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page)
+{
+ return 0;
+}
+
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie)
+{
+}
#endif
int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
@@ -203,6 +233,8 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages);
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len,
+ int mf_flags);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
@@ -214,6 +246,14 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
+int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ struct inode *dest, loff_t destoff,
+ loff_t len, bool *is_same,
+ const struct iomap_ops *ops);
+int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *ops);
static inline bool dax_mapping(struct address_space *mapping)
{
return mapping->host && IS_DAX(mapping->host);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c73e5e327e76..92c78ed02b54 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -233,6 +233,8 @@ extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
+ const struct qstr *name);
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index 90bd558a17f5..15d9e15ca830 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -18,13 +18,19 @@ struct dm_bufio_client;
struct dm_buffer;
/*
+ * Flags for dm_bufio_client_create
+ */
+#define DM_BUFIO_CLIENT_NO_SLEEP 0x1
+
+/*
* Create a buffered IO cache on a given device
*/
struct dm_bufio_client *
dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
unsigned reserved_buffers, unsigned aux_size,
void (*alloc_callback)(struct dm_buffer *),
- void (*write_callback)(struct dm_buffer *));
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags);
/*
* Release a buffered IO cache.
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 0d5b06b3a4a6..d678afeb8a13 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -11,7 +11,17 @@
struct cma;
+/*
+ * Values for struct dma_map_ops.flags:
+ *
+ * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
+ * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
+ */
+#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
+
struct dma_map_ops {
+ unsigned int flags;
+
void *(*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs);
@@ -69,6 +79,7 @@ struct dma_map_ops {
int (*dma_supported)(struct device *dev, u64 mask);
u64 (*get_required_mask)(struct device *dev);
size_t (*max_mapping_size)(struct device *dev);
+ size_t (*opt_mapping_size)(void);
unsigned long (*get_merge_boundary)(struct device *dev);
};
@@ -166,6 +177,7 @@ static inline void dma_pernuma_cma_reserve(void) { }
#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size);
+void dma_release_coherent_memory(struct device *dev);
int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
@@ -177,9 +189,11 @@ static inline int dma_declare_coherent_memory(struct device *dev,
{
return -ENOSYS;
}
+
#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+static inline void dma_release_coherent_memory(struct device *dev) { }
#endif /* CONFIG_DMA_DECLARE_COHERENT */
#ifdef CONFIG_DMA_GLOBAL_POOL
@@ -379,4 +393,57 @@ static inline void debug_dma_dump_mappings(struct device *dev)
extern const struct dma_map_ops dma_dummy_ops;
+enum pci_p2pdma_map_type {
+ /*
+ * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
+ * type hasn't been calculated yet. Functions that return this enum
+ * never return this value.
+ */
+ PCI_P2PDMA_MAP_UNKNOWN = 0,
+
+ /*
+ * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
+ * traverse the host bridge and the host bridge is not in the
+ * allowlist. DMA Mapping routines should return an error when
+ * this is returned.
+ */
+ PCI_P2PDMA_MAP_NOT_SUPPORTED,
+
+ /*
+ * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
+ * each other directly through a PCI switch and the transaction will
+ * not traverse the host bridge. Such a mapping should program
+ * the DMA engine with PCI bus addresses.
+ */
+ PCI_P2PDMA_MAP_BUS_ADDR,
+
+ /*
+ * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
+ * to each other, but the transaction traverses a host bridge on the
+ * allowlist. In this case, a normal mapping either with CPU physical
+ * addresses (in the case of dma-direct) or IOVA addresses (in the
+ * case of IOMMUs) should be used to program the DMA engine.
+ */
+ PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
+};
+
+struct pci_p2pdma_map_state {
+ struct dev_pagemap *pgmap;
+ int map;
+ u64 bus_off;
+};
+
+#ifdef CONFIG_PCI_P2PDMA
+enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg);
+#else /* CONFIG_PCI_P2PDMA */
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg)
+{
+ return PCI_P2PDMA_MAP_NOT_SUPPORTED;
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
#endif /* _LINUX_DMA_MAP_OPS_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index dca2b1355bb1..25a30906289d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -140,10 +140,12 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
unsigned long attrs);
bool dma_can_mmap(struct device *dev);
int dma_supported(struct device *dev, u64 mask);
+bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
+size_t dma_opt_mapping_size(struct device *dev);
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
unsigned long dma_get_merge_boundary(struct device *dev);
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
@@ -250,6 +252,10 @@ static inline int dma_supported(struct device *dev, u64 mask)
{
return 0;
}
+static inline bool dma_pci_p2pdma_supported(struct device *dev)
+{
+ return false;
+}
static inline int dma_set_mask(struct device *dev, u64 mask)
{
return -EIO;
@@ -266,6 +272,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
}
+static inline size_t dma_opt_mapping_size(struct device *dev)
+{
+ return 0;
+}
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return false;
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
index cab6e18773da..7d8062e9c544 100644
--- a/include/linux/dma/edma.h
+++ b/include/linux/dma/edma.h
@@ -12,19 +12,74 @@
#include <linux/device.h>
#include <linux/dmaengine.h>
+#define EDMA_MAX_WR_CH 8
+#define EDMA_MAX_RD_CH 8
+
struct dw_edma;
+struct dw_edma_region {
+ phys_addr_t paddr;
+ void __iomem *vaddr;
+ size_t sz;
+};
+
+struct dw_edma_core_ops {
+ int (*irq_vector)(struct device *dev, unsigned int nr);
+};
+
+enum dw_edma_map_format {
+ EDMA_MF_EDMA_LEGACY = 0x0,
+ EDMA_MF_EDMA_UNROLL = 0x1,
+ EDMA_MF_HDMA_COMPAT = 0x5
+};
+
+/**
+ * enum dw_edma_chip_flags - Flags specific to an eDMA chip
+ * @DW_EDMA_CHIP_LOCAL: eDMA is used locally by an endpoint
+ */
+enum dw_edma_chip_flags {
+ DW_EDMA_CHIP_LOCAL = BIT(0),
+};
+
/**
* struct dw_edma_chip - representation of DesignWare eDMA controller hardware
* @dev: struct device of the eDMA controller
* @id: instance ID
- * @irq: irq line
- * @dw: struct dw_edma that is filed by dw_edma_probe()
+ * @nr_irqs: total number of DMA IRQs
+ * @ops DMA channel to IRQ number mapping
+ * @flags dw_edma_chip_flags
+ * @reg_base DMA register base address
+ * @ll_wr_cnt DMA write link list count
+ * @ll_rd_cnt DMA read link list count
+ * @rg_region DMA register region
+ * @ll_region_wr DMA descriptor link list memory for write channel
+ * @ll_region_rd DMA descriptor link list memory for read channel
+ * @dt_region_wr DMA data memory for write channel
+ * @dt_region_rd DMA data memory for read channel
+ * @mf DMA register map format
+ * @dw: struct dw_edma that is filled by dw_edma_probe()
*/
struct dw_edma_chip {
struct device *dev;
int id;
- int irq;
+ int nr_irqs;
+ const struct dw_edma_core_ops *ops;
+ u32 flags;
+
+ void __iomem *reg_base;
+
+ u16 ll_wr_cnt;
+ u16 ll_rd_cnt;
+ /* link list address */
+ struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
+
+ /* data region */
+ struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
+
+ enum dw_edma_map_format mf;
+
struct dw_edma *dw;
};
diff --git a/include/linux/dma/imx-dma.h b/include/linux/dma/imx-dma.h
index 8887762360d4..f487a4fa103a 100644
--- a/include/linux/dma/imx-dma.h
+++ b/include/linux/dma/imx-dma.h
@@ -70,6 +70,16 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
* struct sdma_peripheral_config - SDMA config for audio
* @n_fifos_src: Number of FIFOs for recording
* @n_fifos_dst: Number of FIFOs for playback
+ * @stride_fifos_src: FIFO address stride for recording, 0 means all FIFOs are
+ * continuous, 1 means 1 word stride between FIFOs. All stride
+ * between FIFOs should be same.
+ * @stride_fifos_dst: FIFO address stride for playback
+ * @words_per_fifo: numbers of words per FIFO fetch/fill, 1 means
+ * one channel per FIFO, 2 means 2 channels per FIFO..
+ * If 'n_fifos_src = 4' and 'words_per_fifo = 2', it
+ * means the first two words(channels) fetch from FIFO0
+ * and then jump to FIFO1 for next two words, and so on
+ * after the last FIFO3 fetched, roll back to FIFO0.
* @sw_done: Use software done. Needed for PDM (micfil)
*
* Some i.MX Audio devices (SAI, micfil) have multiple successive FIFO
@@ -82,6 +92,9 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
struct sdma_peripheral_config {
int n_fifos_src;
int n_fifos_dst;
+ int stride_fifos_src;
+ int stride_fifos_dst;
+ int words_per_fifo;
bool sw_done;
};
diff --git a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h
index f46dc3372f11..6680dd1a43c6 100644
--- a/include/linux/dma/qcom-gpi-dma.h
+++ b/include/linux/dma/qcom-gpi-dma.h
@@ -26,7 +26,7 @@ enum spi_transfer_cmd {
* @clk_div: source clock divider
* @clk_src: serial clock
* @cmd: spi cmd
- * @fragmentation: keep CS assserted at end of sequence
+ * @fragmentation: keep CS asserted at end of sequence
* @cs: chip select toggle
* @set_config: set peripheral config
* @rx_len: receive length for buffer
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index b46b88e6aa0d..c923f4e60f24 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -50,7 +50,6 @@ enum dma_status {
*/
enum dma_transaction_type {
DMA_MEMCPY,
- DMA_MEMCPY_SG,
DMA_XOR,
DMA_PQ,
DMA_XOR_VAL,
@@ -887,11 +886,6 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_memcpy_sg)(
- struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags);
@@ -1060,20 +1054,6 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
-static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy_sg(
- struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags)
-{
- if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy_sg)
- return NULL;
-
- return chan->device->device_prep_dma_memcpy_sg(chan, dst_sg, dst_nents,
- src_sg, src_nents,
- flags);
-}
-
static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
enum dma_desc_metadata_mode mode)
{
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index cbd714a198a0..d81a51978d01 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -18,11 +18,7 @@
struct acpi_dmar_header;
-#ifdef CONFIG_X86
-# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
-#else
-# define DMAR_UNITS_SUPPORTED 64
-#endif
+#define DMAR_UNITS_SUPPORTED 1024
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
index 30055706cce2..cad828e21c72 100644
--- a/include/linux/firmware/cirrus/cs_dsp.h
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -11,6 +11,7 @@
#ifndef __CS_DSP_H
#define __CS_DSP_H
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/list.h>
@@ -34,6 +35,7 @@
#define CS_ADSP2_REGION_ALL (CS_ADSP2_REGION_0 | CS_ADSP2_REGION_1_9)
#define CS_DSP_DATA_WORD_SIZE 3
+#define CS_DSP_DATA_WORD_BITS (3 * BITS_PER_BYTE)
#define CS_DSP_ACKED_CTL_TIMEOUT_MS 100
#define CS_DSP_ACKED_CTL_N_QUICKPOLLS 10
@@ -189,7 +191,8 @@ struct cs_dsp {
* @control_remove: Called under the pwr_lock when a control is destroyed
* @pre_run: Called under the pwr_lock by cs_dsp_run() before the core is started
* @post_run: Called under the pwr_lock by cs_dsp_run() after the core is started
- * @post_stop: Called under the pwr_lock by cs_dsp_stop()
+ * @pre_stop: Called under the pwr_lock by cs_dsp_stop() before the core is stopped
+ * @post_stop: Called under the pwr_lock by cs_dsp_stop() after the core is stopped
* @watchdog_expired: Called when a watchdog expiry is detected
*
* These callbacks give the cs_dsp client an opportunity to respond to events
@@ -200,6 +203,7 @@ struct cs_dsp_client_ops {
void (*control_remove)(struct cs_dsp_coeff_ctl *ctl);
int (*pre_run)(struct cs_dsp *dsp);
int (*post_run)(struct cs_dsp *dsp);
+ void (*pre_stop)(struct cs_dsp *dsp);
void (*post_stop)(struct cs_dsp *dsp);
void (*watchdog_expired)(struct cs_dsp *dsp);
};
@@ -250,4 +254,75 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
const char *cs_dsp_mem_region_name(unsigned int type);
+/**
+ * struct cs_dsp_chunk - Describes a buffer holding data formatted for the DSP
+ * @data: Pointer to underlying buffer memory
+ * @max: Pointer to end of the buffer memory
+ * @bytes: Number of bytes read/written into the memory chunk
+ * @cache: Temporary holding data as it is formatted
+ * @cachebits: Number of bits of data currently in cache
+ */
+struct cs_dsp_chunk {
+ u8 *data;
+ u8 *max;
+ int bytes;
+
+ u32 cache;
+ int cachebits;
+};
+
+/**
+ * cs_dsp_chunk() - Create a DSP memory chunk
+ * @data: Pointer to the buffer that will be used to store data
+ * @size: Size of the buffer in bytes
+ *
+ * Return: A cs_dsp_chunk structure
+ */
+static inline struct cs_dsp_chunk cs_dsp_chunk(void *data, int size)
+{
+ struct cs_dsp_chunk ch = {
+ .data = data,
+ .max = data + size,
+ };
+
+ return ch;
+}
+
+/**
+ * cs_dsp_chunk_end() - Check if a DSP memory chunk is full
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the whole buffer has been read/written
+ */
+static inline bool cs_dsp_chunk_end(struct cs_dsp_chunk *ch)
+{
+ return ch->data == ch->max;
+}
+
+/**
+ * cs_dsp_chunk_bytes() - Number of bytes written/read from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: Number of bytes read/written to the buffer
+ */
+static inline int cs_dsp_chunk_bytes(struct cs_dsp_chunk *ch)
+{
+ return ch->bytes;
+}
+
+/**
+ * cs_dsp_chunk_valid_addr() - Check if an address is in a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the given address is within the buffer
+ */
+static inline bool cs_dsp_chunk_valid_addr(struct cs_dsp_chunk *ch, void *addr)
+{
+ return (u8 *)addr >= ch->data && (u8 *)addr < ch->max;
+}
+
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val);
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch);
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits);
+
#endif
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index cbde3b1fa414..9f50dacbf7d6 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -369,6 +369,11 @@ enum pm_pinctrl_drive_strength {
PM_PINCTRL_DRIVE_STRENGTH_12MA = 3,
};
+enum pm_pinctrl_tri_state {
+ PM_PINCTRL_TRI_STATE_DISABLE = 0,
+ PM_PINCTRL_TRI_STATE_ENABLE = 1,
+};
+
enum zynqmp_pm_shutdown_type {
ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0,
ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cc64873d76c5..9eced4cc286e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -74,6 +74,7 @@ struct fsverity_operations;
struct fs_context;
struct fs_parameter_spec;
struct fileattr;
+struct iomap_ops;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -339,17 +340,12 @@ enum rw_hint {
struct kiocb {
struct file *ki_filp;
-
- /* The 'ki_filp' pointer is shared in a union for aio */
- randomized_struct_fields_start
-
loff_t ki_pos;
void (*ki_complete)(struct kiocb *iocb, long ret);
void *private;
int ki_flags;
u16 ki_ioprio; /* See linux/ioprio.h */
struct wait_page_queue *ki_waitq; /* for async buffered IO */
- randomized_struct_fields_end
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -1432,6 +1428,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
+#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
/* Possible states of 'frozen' field */
enum {
@@ -2033,6 +2030,8 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode,
const struct inode *dir, umode_t mode);
extern bool may_open_dev(const struct path *path);
+umode_t mode_strip_sgid(struct user_namespace *mnt_userns,
+ const struct inode *dir, umode_t mode);
/*
* This is the "filldir" function type, used by readdir() to let
@@ -2200,10 +2199,13 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t len, unsigned int flags);
-extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *count,
- unsigned int remap_flags);
+int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *dax_read_ops);
+int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *count, unsigned int remap_flags);
extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
@@ -2561,6 +2563,7 @@ extern struct dentry *mount_nodev(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
+void retire_super(struct super_block *sb);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index e60d57c99cb6..7d2f1e0f23b1 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -284,6 +284,7 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg);
int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg);
int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg);
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child);
+int fscrypt_context_for_new_inode(void *ctx, struct inode *inode);
int fscrypt_set_context(struct inode *inode, void *fs_data);
struct fscrypt_dummy_policy {
@@ -327,6 +328,10 @@ void fscrypt_free_inode(struct inode *inode);
int fscrypt_drop_inode(struct inode *inode);
/* fname.c */
+int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
+ u8 *out, unsigned int olen);
+bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
+ u32 max_len, u32 *encrypted_len_ret);
int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname,
int lookup, struct fscrypt_name *fname);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0ace7759acd2..f314be58fa77 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -2,357 +2,13 @@
#ifndef __LINUX_GFP_H
#define __LINUX_GFP_H
-#include <linux/mmdebug.h>
+#include <linux/gfp_types.h>
+
#include <linux/mmzone.h>
-#include <linux/stddef.h>
-#include <linux/linkage.h>
#include <linux/topology.h>
-/* The typedef is in types.h but we want the documentation here */
-#if 0
-/**
- * typedef gfp_t - Memory allocation flags.
- *
- * GFP flags are commonly used throughout Linux to indicate how memory
- * should be allocated. The GFP acronym stands for get_free_pages(),
- * the underlying memory allocation function. Not every GFP flag is
- * supported by every function which may allocate memory. Most users
- * will want to use a plain ``GFP_KERNEL``.
- */
-typedef unsigned int __bitwise gfp_t;
-#endif
-
struct vm_area_struct;
-/*
- * In case of changes, please don't forget to update
- * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
- */
-
-/* Plain integer GFP bitmasks. Do not use this directly. */
-#define ___GFP_DMA 0x01u
-#define ___GFP_HIGHMEM 0x02u
-#define ___GFP_DMA32 0x04u
-#define ___GFP_MOVABLE 0x08u
-#define ___GFP_RECLAIMABLE 0x10u
-#define ___GFP_HIGH 0x20u
-#define ___GFP_IO 0x40u
-#define ___GFP_FS 0x80u
-#define ___GFP_ZERO 0x100u
-#define ___GFP_ATOMIC 0x200u
-#define ___GFP_DIRECT_RECLAIM 0x400u
-#define ___GFP_KSWAPD_RECLAIM 0x800u
-#define ___GFP_WRITE 0x1000u
-#define ___GFP_NOWARN 0x2000u
-#define ___GFP_RETRY_MAYFAIL 0x4000u
-#define ___GFP_NOFAIL 0x8000u
-#define ___GFP_NORETRY 0x10000u
-#define ___GFP_MEMALLOC 0x20000u
-#define ___GFP_COMP 0x40000u
-#define ___GFP_NOMEMALLOC 0x80000u
-#define ___GFP_HARDWALL 0x100000u
-#define ___GFP_THISNODE 0x200000u
-#define ___GFP_ACCOUNT 0x400000u
-#define ___GFP_ZEROTAGS 0x800000u
-#ifdef CONFIG_KASAN_HW_TAGS
-#define ___GFP_SKIP_ZERO 0x1000000u
-#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u
-#define ___GFP_SKIP_KASAN_POISON 0x4000000u
-#else
-#define ___GFP_SKIP_ZERO 0
-#define ___GFP_SKIP_KASAN_UNPOISON 0
-#define ___GFP_SKIP_KASAN_POISON 0
-#endif
-#ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP 0x8000000u
-#else
-#define ___GFP_NOLOCKDEP 0
-#endif
-/* If the above are modified, __GFP_BITS_SHIFT may need updating */
-
-/*
- * Physical address zone modifiers (see linux/mmzone.h - low four bits)
- *
- * Do not put any conditional on these. If necessary modify the definitions
- * without the underscores and use them consistently. The definitions here may
- * be used in bit comparisons.
- */
-#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
-#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
-#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
-#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
-/**
- * DOC: Page mobility and placement hints
- *
- * Page mobility and placement hints
- * ---------------------------------
- *
- * These flags provide hints about how mobile the page is. Pages with similar
- * mobility are placed within the same pageblocks to minimise problems due
- * to external fragmentation.
- *
- * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
- * moved by page migration during memory compaction or can be reclaimed.
- *
- * %__GFP_RECLAIMABLE is used for slab allocations that specify
- * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
- *
- * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
- * these pages will be spread between local zones to avoid all the dirty
- * pages being in one zone (fair zone allocation policy).
- *
- * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
- *
- * %__GFP_THISNODE forces the allocation to be satisfied from the requested
- * node with no fallbacks or placement policy enforcements.
- *
- * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
- */
-#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
-#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
-#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
-#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
-#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
-
-/**
- * DOC: Watermark modifiers
- *
- * Watermark modifiers -- controls access to emergency reserves
- * ------------------------------------------------------------
- *
- * %__GFP_HIGH indicates that the caller is high-priority and that granting
- * the request is necessary before the system can make forward progress.
- * For example, creating an IO context to clean pages.
- *
- * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
- * high priority. Users are typically interrupt handlers. This may be
- * used in conjunction with %__GFP_HIGH
- *
- * %__GFP_MEMALLOC allows access to all memory. This should only be used when
- * the caller guarantees the allocation will allow more memory to be freed
- * very shortly e.g. process exiting or swapping. Users either should
- * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
- * Users of this flag have to be extremely careful to not deplete the reserve
- * completely and implement a throttling mechanism which controls the
- * consumption of the reserve based on the amount of freed memory.
- * Usage of a pre-allocated pool (e.g. mempool) should be always considered
- * before using this flag.
- *
- * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
- * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
- */
-#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
-#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
-#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
-#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
-
-/**
- * DOC: Reclaim modifiers
- *
- * Reclaim modifiers
- * -----------------
- * Please note that all the following flags are only applicable to sleepable
- * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
- *
- * %__GFP_IO can start physical IO.
- *
- * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
- * allocator recursing into the filesystem which might already be holding
- * locks.
- *
- * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
- * This flag can be cleared to avoid unnecessary delays when a fallback
- * option is available.
- *
- * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
- * the low watermark is reached and have it reclaim pages until the high
- * watermark is reached. A caller may wish to clear this flag when fallback
- * options are available and the reclaim is likely to disrupt the system. The
- * canonical example is THP allocation where a fallback is cheap but
- * reclaim/compaction may cause indirect stalls.
- *
- * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
- *
- * The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
- * !costly allocations are too essential to fail so they are implicitly
- * non-failing by default (with some exceptions like OOM victims might fail so
- * the caller still has to check for failures) while costly requests try to be
- * not disruptive and back off even without invoking the OOM killer.
- * The following three modifiers might be used to override some of these
- * implicit rules
- *
- * %__GFP_NORETRY: The VM implementation will try only very lightweight
- * memory direct reclaim to get some memory under memory pressure (thus
- * it can sleep). It will avoid disruptive actions like OOM killer. The
- * caller must handle the failure which is quite likely to happen under
- * heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
- *
- * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
- * procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
- * compaction (which removes fragmentation) and page-out.
- * There is still a definite limit to the number of retries, but it is
- * a larger limit than with %__GFP_NORETRY.
- * Allocations with this flag may fail, but only when there is
- * genuinely little unused memory. While these allocations do not
- * directly trigger the OOM killer, their failure indicates that
- * the system is likely to need to use the OOM killer soon. The
- * caller must handle failure, but can reasonably do so by failing
- * a higher-level request, or completing it only in a much less
- * efficient manner.
- * If the allocation does fail, and the caller is in a position to
- * free some non-essential memory, doing so could benefit the system
- * as a whole.
- *
- * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. The allocation could block
- * indefinitely but will never return with failure. Testing for
- * failure is pointless.
- * New users should be evaluated carefully (and the flag should be
- * used only when there is no reasonable failure policy) but it is
- * definitely preferable to use the flag rather than opencode endless
- * loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
- */
-#define __GFP_IO ((__force gfp_t)___GFP_IO)
-#define __GFP_FS ((__force gfp_t)___GFP_FS)
-#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
-#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
-#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
-#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
-#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
-#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
-
-/**
- * DOC: Action modifiers
- *
- * Action modifiers
- * ----------------
- *
- * %__GFP_NOWARN suppresses allocation failure reports.
- *
- * %__GFP_COMP address compound page metadata.
- *
- * %__GFP_ZERO returns a zeroed page on success.
- *
- * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
- * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
- * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
- * memory tags at the same time as zeroing memory has minimal additional
- * performace impact.
- *
- * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation.
- * Only effective in HW_TAGS mode.
- *
- * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation.
- * Typically, used for userspace pages. Only effective in HW_TAGS mode.
- */
-#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
-#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
-#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
-#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
-#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
-#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON)
-#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
-
-/* Disable lockdep for GFP context tracking */
-#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
-
-/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
-#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
-/**
- * DOC: Useful GFP flag combinations
- *
- * Useful GFP flag combinations
- * ----------------------------
- *
- * Useful GFP flag combinations that are commonly used. It is recommended
- * that subsystems start with one of these combinations and then set/clear
- * %__GFP_FOO flags as necessary.
- *
- * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
- * watermark is applied to allow access to "atomic reserves".
- * The current implementation doesn't support NMI and few other strict
- * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
- *
- * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
- * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
- *
- * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
- * accounted to kmemcg.
- *
- * %GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
- *
- * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
- * that do not require the starting of any physical IO.
- * Please try to avoid using this flag directly and instead use
- * memalloc_noio_{save,restore} to mark the whole scope which cannot
- * perform any IO with a short explanation why. All allocation requests
- * will inherit GFP_NOIO implicitly.
- *
- * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
- * Please try to avoid using this flag directly and instead use
- * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
- * recurse into the FS layer with a short explanation why. All allocation
- * requests will inherit GFP_NOFS implicitly.
- *
- * %GFP_USER is for userspace allocations that also need to be directly
- * accessibly by the kernel or hardware. It is typically used by hardware
- * for buffers that are mapped to userspace (e.g. graphics) that hardware
- * still must DMA to. cpuset limits are enforced for these allocations.
- *
- * %GFP_DMA exists for historical reasons and should be avoided where possible.
- * The flags indicates that the caller requires that the lowest zone be
- * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
- * it would require careful auditing as some users really require it and
- * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
- * lowest zone as a type of emergency reserve.
- *
- * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
- * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
- * because the DMA32 kmalloc cache array is not implemented.
- * (Reason: there is no such user in kernel).
- *
- * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
- * do not need to be directly accessible by the kernel but that cannot
- * move once in use. An example may be a hardware allocation that maps
- * data directly into userspace but has no addressing limitations.
- *
- * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
- * need direct access to but can use kmap() when access is required. They
- * are expected to be movable via page reclaim or page migration. Typically,
- * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
- *
- * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
- * are compound allocations that will generally fail quickly if memory is not
- * available and will not wake kswapd/kcompactd on failure. The _LIGHT
- * version does not attempt reclaim/compaction at all and is by default used
- * in page fault path, while the non-light is used by khugepaged.
- */
-#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
-#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
-#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
-#define GFP_NOIO (__GFP_RECLAIM)
-#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
-#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
-#define GFP_DMA __GFP_DMA
-#define GFP_DMA32 __GFP_DMA32
-#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
-#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \
- __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON)
-#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
- __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
-#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
-
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
#define GFP_MOVABLE_SHIFT 3
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
new file mode 100644
index 000000000000..d88c46ca82e1
--- /dev/null
+++ b/include/linux/gfp_types.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GFP_TYPES_H
+#define __LINUX_GFP_TYPES_H
+
+/* The typedef is in types.h but we want the documentation here */
+#if 0
+/**
+ * typedef gfp_t - Memory allocation flags.
+ *
+ * GFP flags are commonly used throughout Linux to indicate how memory
+ * should be allocated. The GFP acronym stands for get_free_pages(),
+ * the underlying memory allocation function. Not every GFP flag is
+ * supported by every function which may allocate memory. Most users
+ * will want to use a plain ``GFP_KERNEL``.
+ */
+typedef unsigned int __bitwise gfp_t;
+#endif
+
+/*
+ * In case of changes, please don't forget to update
+ * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
+ */
+
+/* Plain integer GFP bitmasks. Do not use this directly. */
+#define ___GFP_DMA 0x01u
+#define ___GFP_HIGHMEM 0x02u
+#define ___GFP_DMA32 0x04u
+#define ___GFP_MOVABLE 0x08u
+#define ___GFP_RECLAIMABLE 0x10u
+#define ___GFP_HIGH 0x20u
+#define ___GFP_IO 0x40u
+#define ___GFP_FS 0x80u
+#define ___GFP_ZERO 0x100u
+#define ___GFP_ATOMIC 0x200u
+#define ___GFP_DIRECT_RECLAIM 0x400u
+#define ___GFP_KSWAPD_RECLAIM 0x800u
+#define ___GFP_WRITE 0x1000u
+#define ___GFP_NOWARN 0x2000u
+#define ___GFP_RETRY_MAYFAIL 0x4000u
+#define ___GFP_NOFAIL 0x8000u
+#define ___GFP_NORETRY 0x10000u
+#define ___GFP_MEMALLOC 0x20000u
+#define ___GFP_COMP 0x40000u
+#define ___GFP_NOMEMALLOC 0x80000u
+#define ___GFP_HARDWALL 0x100000u
+#define ___GFP_THISNODE 0x200000u
+#define ___GFP_ACCOUNT 0x400000u
+#define ___GFP_ZEROTAGS 0x800000u
+#ifdef CONFIG_KASAN_HW_TAGS
+#define ___GFP_SKIP_ZERO 0x1000000u
+#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u
+#define ___GFP_SKIP_KASAN_POISON 0x4000000u
+#else
+#define ___GFP_SKIP_ZERO 0
+#define ___GFP_SKIP_KASAN_UNPOISON 0
+#define ___GFP_SKIP_KASAN_POISON 0
+#endif
+#ifdef CONFIG_LOCKDEP
+#define ___GFP_NOLOCKDEP 0x8000000u
+#else
+#define ___GFP_NOLOCKDEP 0
+#endif
+/* If the above are modified, __GFP_BITS_SHIFT may need updating */
+
+/*
+ * Physical address zone modifiers (see linux/mmzone.h - low four bits)
+ *
+ * Do not put any conditional on these. If necessary modify the definitions
+ * without the underscores and use them consistently. The definitions here may
+ * be used in bit comparisons.
+ */
+#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
+#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
+#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+
+/**
+ * DOC: Page mobility and placement hints
+ *
+ * Page mobility and placement hints
+ * ---------------------------------
+ *
+ * These flags provide hints about how mobile the page is. Pages with similar
+ * mobility are placed within the same pageblocks to minimise problems due
+ * to external fragmentation.
+ *
+ * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
+ *
+ * %__GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ *
+ * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
+ *
+ * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
+ *
+ * %__GFP_THISNODE forces the allocation to be satisfied from the requested
+ * node with no fallbacks or placement policy enforcements.
+ *
+ * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
+ */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
+#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
+
+/**
+ * DOC: Watermark modifiers
+ *
+ * Watermark modifiers -- controls access to emergency reserves
+ * ------------------------------------------------------------
+ *
+ * %__GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example, creating an IO context to clean pages.
+ *
+ * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
+ * high priority. Users are typically interrupt handlers. This may be
+ * used in conjunction with %__GFP_HIGH
+ *
+ * %__GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ * Users of this flag have to be extremely careful to not deplete the reserve
+ * completely and implement a throttling mechanism which controls the
+ * consumption of the reserve based on the amount of freed memory.
+ * Usage of a pre-allocated pool (e.g. mempool) should be always considered
+ * before using this flag.
+ *
+ * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
+ */
+#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
+
+/**
+ * DOC: Reclaim modifiers
+ *
+ * Reclaim modifiers
+ * -----------------
+ * Please note that all the following flags are only applicable to sleepable
+ * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
+ *
+ * %__GFP_IO can start physical IO.
+ *
+ * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
+ *
+ * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
+ *
+ * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
+ *
+ * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
+ *
+ * The default allocator behavior depends on the request size. We have a concept
+ * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * !costly allocations are too essential to fail so they are implicitly
+ * non-failing by default (with some exceptions like OOM victims might fail so
+ * the caller still has to check for failures) while costly requests try to be
+ * not disruptive and back off even without invoking the OOM killer.
+ * The following three modifiers might be used to override some of these
+ * implicit rules
+ *
+ * %__GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput
+ *
+ * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made else where. It can wait for other
+ * tasks to attempt high level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with %__GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
+ *
+ * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Using this flag for costly allocations is _highly_ discouraged.
+ */
+#define __GFP_IO ((__force gfp_t)___GFP_IO)
+#define __GFP_FS ((__force gfp_t)___GFP_FS)
+#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
+#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
+
+/**
+ * DOC: Action modifiers
+ *
+ * Action modifiers
+ * ----------------
+ *
+ * %__GFP_NOWARN suppresses allocation failure reports.
+ *
+ * %__GFP_COMP address compound page metadata.
+ *
+ * %__GFP_ZERO returns a zeroed page on success.
+ *
+ * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
+ * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
+ * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
+ * memory tags at the same time as zeroing memory has minimal additional
+ * performace impact.
+ *
+ * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation.
+ * Only effective in HW_TAGS mode.
+ *
+ * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation.
+ * Typically, used for userspace pages. Only effective in HW_TAGS mode.
+ */
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
+#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
+#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
+#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON)
+#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
+
+/* Disable lockdep for GFP context tracking */
+#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
+
+/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+/**
+ * DOC: Useful GFP flag combinations
+ *
+ * Useful GFP flag combinations
+ * ----------------------------
+ *
+ * Useful GFP flag combinations that are commonly used. It is recommended
+ * that subsystems start with one of these combinations and then set/clear
+ * %__GFP_FOO flags as necessary.
+ *
+ * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves".
+ * The current implementation doesn't support NMI and few other strict
+ * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
+ *
+ * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
+ * accounted to kmemcg.
+ *
+ * %GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback.
+ *
+ * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
+ *
+ * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
+ *
+ * %GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * %GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
+ * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
+ * because the DMA32 kmalloc cache array is not implemented.
+ * (Reason: there is no such user in kernel).
+ *
+ * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
+ *
+ * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
+ * are compound allocations that will generally fail quickly if memory is not
+ * available and will not wake kswapd/kcompactd on failure. The _LIGHT
+ * version does not attempt reclaim/compaction at all and is by default used
+ * in page fault path, while the non-light is used by khugepaged.
+ */
+#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
+#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#define GFP_NOIO (__GFP_RECLAIM)
+#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
+#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_DMA __GFP_DMA
+#define GFP_DMA32 __GFP_DMA32
+#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \
+ __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON)
+#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
+#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
+
+#endif /* __LINUX_GFP_TYPES_H */
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 008ad3ee56b7..a370387fa406 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -95,7 +95,6 @@ struct device;
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label);
-void devm_gpio_free(struct device *dev, unsigned int gpio);
#else /* ! CONFIG_GPIOLIB */
@@ -240,11 +239,6 @@ static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
return -EINVAL;
}
-static inline void devm_gpio_free(struct device *dev, unsigned int gpio)
-{
- WARN_ON(1);
-}
-
#endif /* ! CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 4d55da28e664..0b619eb7ae83 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -14,6 +14,7 @@ enum gpio_lookup_flags {
GPIO_TRANSITORY = (1 << 3),
GPIO_PULL_UP = (1 << 4),
GPIO_PULL_DOWN = (1 << 5),
+ GPIO_PULL_DISABLE = (1 << 6),
GPIO_LOOKUP_FLAGS_DEFAULT = GPIO_ACTIVE_HIGH | GPIO_PERSISTENT,
};
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 56d6a0196534..25679035ca28 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -60,11 +60,11 @@ static inline void kmap_flush_unused(void);
/**
* kmap_local_page - Map a page for temporary usage
- * @page: Pointer to the page to be mapped
+ * @page: Pointer to the page to be mapped
*
* Returns: The virtual address of the mapping
*
- * Can be invoked from any context.
+ * Can be invoked from any context, including interrupts.
*
* Requires careful handling when nesting multiple mappings because the map
* management is stack based. The unmap has to be in the reverse order of
@@ -86,8 +86,7 @@ static inline void kmap_flush_unused(void);
* temporarily mapped.
*
* While it is significantly faster than kmap() for the higmem case it
- * comes with restrictions about the pointer validity. Only use when really
- * necessary.
+ * comes with restrictions about the pointer validity.
*
* On HIGHMEM enabled systems mapping a highmem page has the side effect of
* disabling migration in order to keep the virtual address stable across
@@ -243,6 +242,16 @@ static inline void clear_highpage(struct page *page)
kunmap_local(kaddr);
}
+static inline void clear_highpage_kasan_tagged(struct page *page)
+{
+ u8 tag;
+
+ tag = page_kasan_tag(page);
+ page_kasan_tag_reset(page);
+ clear_highpage(page);
+ page_kasan_tag_set(page, tag);
+}
+
#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
static inline void tag_clear_highpage(struct page *page)
@@ -336,19 +345,6 @@ static inline void memcpy_page(struct page *dst_page, size_t dst_off,
kunmap_local(dst);
}
-static inline void memmove_page(struct page *dst_page, size_t dst_off,
- struct page *src_page, size_t src_off,
- size_t len)
-{
- char *dst = kmap_local_page(dst_page);
- char *src = kmap_local_page(src_page);
-
- VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
- memmove(dst + dst_off, src + src_off, len);
- kunmap_local(src);
- kunmap_local(dst);
-}
-
static inline void memset_page(struct page *page, size_t offset, int val,
size_t len)
{
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index d5a6f101f843..126a36571667 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -4,7 +4,7 @@
*
* Authors: Jérôme Glisse <jglisse@redhat.com>
*
- * See Documentation/vm/hmm.rst for reasons and overview of what HMM is.
+ * See Documentation/mm/hmm.rst for reasons and overview of what HMM is.
*/
#ifndef LINUX_HMM_H
#define LINUX_HMM_H
@@ -100,7 +100,7 @@ struct hmm_range {
};
/*
- * Please see Documentation/vm/hmm.rst for how to use the range API.
+ * Please see Documentation/mm/hmm.rst for how to use the range API.
*/
int hmm_range_fault(struct hmm_range *range);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 4ddaf6ad73ef..768e5261fdae 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -116,9 +116,30 @@ extern struct kobj_attribute shmem_enabled_attr;
extern unsigned long transparent_hugepage_flags;
+#define hugepage_flags_enabled() \
+ (transparent_hugepage_flags & \
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
+#define hugepage_flags_always() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_FLAG))
+
+/*
+ * Do the below checks:
+ * - For file vma, check if the linear page offset of vma is
+ * HPAGE_PMD_NR aligned within the file. The hugepage is
+ * guaranteed to be hugepage-aligned within the file, but we must
+ * check that the PMD-aligned addresses in the VMA map to
+ * PMD-aligned offsets within the file, else the hugepage will
+ * not be PMD-mappable.
+ * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
+ * area.
+ */
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
+ unsigned long addr)
{
+ unsigned long haddr;
+
/* Don't have to check pgoff for anonymous vma */
if (!vma_is_anonymous(vma)) {
if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
@@ -126,53 +147,13 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
return false;
}
- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
- return false;
- return true;
-}
+ haddr = addr & HPAGE_PMD_MASK;
-static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
- /* Explicitly disabled through madvise. */
- if ((vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+ if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
return false;
return true;
}
-/*
- * to be used on vmas which are known to support THP.
- * Use transparent_hugepage_active otherwise
- */
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
-{
-
- /*
- * If the hardware/firmware marked hugepage support disabled.
- */
- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
- return false;
-
- if (!transhuge_vma_enabled(vma, vma->vm_flags))
- return false;
-
- if (vma_is_temporary_stack(vma))
- return false;
-
- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
- return true;
-
- if (vma_is_dax(vma))
- return true;
-
- if (transparent_hugepage_flags &
- (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
- return !!(vma->vm_flags & VM_HUGEPAGE);
-
- return false;
-}
-
static inline bool file_thp_enabled(struct vm_area_struct *vma)
{
struct inode *inode;
@@ -187,7 +168,9 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
-bool transparent_hugepage_active(struct vm_area_struct *vma);
+bool hugepage_vma_check(struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ bool smaps, bool in_pf);
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
@@ -290,7 +273,7 @@ static inline bool is_huge_zero_page(struct page *page)
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
+ return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
}
static inline bool is_huge_zero_pud(pud_t pud)
@@ -311,8 +294,8 @@ static inline bool thp_migration_supported(void)
static inline struct list_head *page_deferred_list(struct page *page)
{
/*
- * Global or memcg deferred list in the second tail pages is
- * occupied by compound_head.
+ * See organization of tail pages of compound page in
+ * "struct page" definition.
*/
return &page[2].deferred_list;
}
@@ -331,24 +314,15 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
return false;
}
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
-{
- return false;
-}
-
-static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
-{
- return false;
-}
-
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
+ unsigned long addr)
{
return false;
}
-static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline bool hugepage_vma_check(struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ bool smaps, bool in_pf)
{
return false;
}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index e4cff27d1198..3ec981a0d8b3 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -43,6 +43,9 @@ enum {
SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ SUBPAGE_INDEX_HWPOISON,
+#endif
__NR_USED_SUBPAGE,
};
@@ -152,7 +155,7 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct page *ref_page, zap_flags_t zap_flags);
void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(char *buf, int len, int nid);
-void hugetlb_show_meminfo(void);
+void hugetlb_show_meminfo_node(int nid);
unsigned long hugetlb_total_pages(void);
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
@@ -170,7 +173,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-bool isolate_huge_page(struct page *page, struct list_head *list);
+int isolate_hugetlb(struct page *page, struct list_head *list);
int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
void putback_active_hugepage(struct page *page);
@@ -194,8 +197,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
+unsigned long hugetlb_mask_last_page(struct hstate *h);
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep);
+ unsigned long addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -242,7 +246,7 @@ static inline struct address_space *hugetlb_page_mapping_lock_write(
static inline int huge_pmd_unshare(struct mm_struct *mm,
struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep)
{
return 0;
}
@@ -297,7 +301,7 @@ static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
return 0;
}
-static inline void hugetlb_show_meminfo(void)
+static inline void hugetlb_show_meminfo_node(int nid)
{
}
@@ -376,9 +380,9 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL;
}
-static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+static inline int isolate_hugetlb(struct page *page, struct list_head *list)
{
- return false;
+ return -EBUSY;
}
static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
@@ -550,7 +554,7 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* Synchronization: Initially set after new page allocation with no
* locking. When examined and modified during migration processing
* (isolate, migrate, putback) the hugetlb_lock is held.
- * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
+ * HPG_temporary - Set on a page that is temporarily allocated from the buddy
* allocator. Typically used for migration target pages when no pages
* are available in the pool. The hugetlb free page path will
* immediately free pages with this flag set to the buddy allocator.
@@ -560,6 +564,8 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* HPG_freed - Set when page is on the free lists.
* Synchronization: hugetlb_lock held for examination and modification.
* HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
+ * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
+ * that is not tracked by raw_hwp_page list.
*/
enum hugetlb_page_flags {
HPG_restore_reserve = 0,
@@ -567,6 +573,7 @@ enum hugetlb_page_flags {
HPG_temporary,
HPG_freed,
HPG_vmemmap_optimized,
+ HPG_raw_hwp_unreliable,
__NR_HPAGEFLAGS,
};
@@ -613,6 +620,7 @@ HPAGEFLAG(Migratable, migratable)
HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
+HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
#ifdef CONFIG_HUGETLB_PAGE
@@ -637,9 +645,6 @@ struct hstate {
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
- unsigned int optimize_vmemmap_pages;
-#endif
#ifdef CONFIG_CGROUP_HUGETLB
/* cgroup control files */
struct cftype cgroup_files_dfl[8];
@@ -715,7 +720,7 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
return hstate_file(vma->vm_file);
}
-static inline unsigned long huge_page_size(struct hstate *h)
+static inline unsigned long huge_page_size(const struct hstate *h)
{
return (unsigned long)PAGE_SIZE << h->order;
}
@@ -744,7 +749,7 @@ static inline bool hstate_is_gigantic(struct hstate *h)
return huge_page_order(h) >= MAX_ORDER;
}
-static inline unsigned int pages_per_huge_page(struct hstate *h)
+static inline unsigned int pages_per_huge_page(const struct hstate *h)
{
return 1 << h->order;
}
@@ -798,6 +803,14 @@ extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
+#ifdef CONFIG_MEMORY_FAILURE
+extern void hugetlb_clear_page_hwpoison(struct page *hpage);
+#else
+static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
+{
+}
+#endif
+
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
#ifndef arch_hugetlb_migration_supported
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
@@ -903,14 +916,6 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
atomic_long_sub(l, &mm->hugetlb_usage);
}
-#ifndef set_huge_swap_pte_at
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
-{
- set_huge_pte_at(mm, addr, ptep, pte);
-}
-#endif
-
#ifndef huge_ptep_modify_prot_start
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
@@ -1094,11 +1099,6 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
-{
-}
-
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index fc08b433c856..9efbc54e35e5 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -32,4 +32,12 @@ static inline bool jailhouse_paravirt(void)
#endif /* !CONFIG_X86 */
+static inline bool hypervisor_isolated_pci_functions(void)
+{
+ if (IS_ENABLED(CONFIG_S390))
+ return true;
+
+ return jailhouse_paravirt();
+}
+
#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 86af6f0a00a2..ca98aeadcc80 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -74,17 +74,22 @@ struct io_pgtable_cfg {
* to support up to 35 bits PA where the bit32, bit33 and bit34 are
* encoded in the bit9, bit4 and bit5 of the PTE respectively.
*
+ * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs
+ * extend the translation table base support up to 35 bits PA, the
+ * encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT.
+ *
* IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
* for use in the upper half of a split address space.
*
* IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
* attributes set in the TCR for a non-coherent page-table walker.
*/
- #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
- #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
- #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
- #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
- #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
+ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
+ #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
+ #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
+ #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index f7fab3758cb9..677a25d44d7f 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -491,7 +491,14 @@ struct io_cmd_data {
__u8 data[56];
};
-#define io_kiocb_to_cmd(req) ((void *) &(req)->cmd)
+static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
+}
+#define io_kiocb_to_cmd(req, cmd_type) ( \
+ io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
+ ((cmd_type *)&(req)->cmd) \
+)
#define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
struct io_kiocb {
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 25ac28175e4f..238a03087e17 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -297,9 +297,6 @@ void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
void iomap_ioend_try_merge(struct iomap_ioend *ioend,
struct list_head *more_ioends);
void iomap_sort_ioends(struct list_head *ioend_list);
-int iomap_writepage(struct page *page, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops);
int iomap_writepages(struct address_space *mapping,
struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
const struct iomap_writeback_ops *ops);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 5e1afe169549..ea30f00dc145 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -135,6 +135,7 @@ enum iommu_resv_type {
* @length: Length of the region in bytes
* @prot: IOMMU Protection flags (READ/WRITE/...)
* @type: Type of the reserved region
+ * @free: Callback to free associated memory allocations
*/
struct iommu_resv_region {
struct list_head list;
@@ -142,6 +143,15 @@ struct iommu_resv_region {
size_t length;
int prot;
enum iommu_resv_type type;
+ void (*free)(struct device *dev, struct iommu_resv_region *region);
+};
+
+struct iommu_iort_rmr_data {
+ struct iommu_resv_region rr;
+
+ /* Stream IDs associated with IORT RMR entry */
+ const u32 *sids;
+ u32 num_sids;
};
/**
@@ -154,8 +164,7 @@ struct iommu_resv_region {
* supported, this feature must be enabled before and
* disabled after %IOMMU_DEV_FEAT_SVA.
*
- * Device drivers query whether a feature is supported using
- * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature().
+ * Device drivers enable a feature using iommu_dev_enable_feature().
*/
enum iommu_dev_features {
IOMMU_DEV_FEAT_SVA,
@@ -200,13 +209,11 @@ struct iommu_iotlb_gather {
* group and attached to the groups domain
* @device_group: find iommu group for a particular device
* @get_resv_regions: Request list of reserved regions for a device
- * @put_resv_regions: Free list of reserved regions for a device
* @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no)
* @dev_has/enable/disable_feat: per device entries to check/enable/disable
* iommu specific features.
- * @dev_feat_enabled: check enabled feature
* @sva_bind: Bind process address space to device
* @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle
@@ -232,14 +239,11 @@ struct iommu_ops {
/* Request/Free a list of reserved regions for a device */
void (*get_resv_regions)(struct device *dev, struct list_head *list);
- void (*put_resv_regions)(struct device *dev, struct list_head *list);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
bool (*is_attach_deferred)(struct device *dev);
/* Per device IOMMU features */
- bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
- bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
@@ -448,8 +452,6 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain,
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
-extern void generic_iommu_put_resv_regions(struct device *dev,
- struct list_head *list);
extern void iommu_set_default_passthrough(bool cmd_line);
extern void iommu_set_default_translated(bool cmd_line);
extern bool iommu_default_passthrough(void);
@@ -662,7 +664,6 @@ void iommu_release_device(struct device *dev);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
-bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
struct iommu_sva *iommu_sva_bind_device(struct device *dev,
struct mm_struct *mm,
@@ -989,12 +990,6 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
return NULL;
}
-static inline bool
-iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
-{
- return false;
-}
-
static inline int
iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index ec5f71f7135b..616b683563a9 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -141,6 +141,7 @@ enum {
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_RESERVED = 7,
IORES_DESC_SOFT_RESERVED = 8,
+ IORES_DESC_CXL = 9,
};
/*
@@ -329,6 +330,8 @@ struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size);
struct resource *request_free_mem_region(struct resource *base,
unsigned long size, const char *name);
+struct resource *alloc_free_mem_region(struct resource *base,
+ unsigned long size, unsigned long align, const char *name);
static inline void irqresource_disabled(struct resource *res, u32 irq)
{
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 320a70e40233..c6ba6d95d79c 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -79,6 +79,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
int iova_cache_get(void);
void iova_cache_put(void);
+unsigned long iova_rcache_range(void);
+
void free_iova(struct iova_domain *iovad, unsigned long pfn);
void __free_iova(struct iova_domain *iovad, struct iova *iova);
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
diff --git a/include/linux/isa-dma.h b/include/linux/isa-dma.h
new file mode 100644
index 000000000000..61504a8c1b9e
--- /dev/null
+++ b/include/linux/isa-dma.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_ISA_DMA_H
+#define __LINUX_ISA_DMA_H
+
+#include <asm/dma.h>
+
+#if defined(CONFIG_PCI) && defined(CONFIG_X86_32)
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* __LINUX_ISA_DMA_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index dc1724131300..0b7242370b56 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -54,14 +54,13 @@
* CONFIG_JBD2_DEBUG is on.
*/
#define JBD2_EXPENSIVE_CHECKING
-extern ushort jbd2_journal_enable_debug;
void __jbd2_debug(int level, const char *file, const char *func,
unsigned int line, const char *fmt, ...);
-#define jbd_debug(n, fmt, a...) \
+#define jbd2_debug(n, fmt, a...) \
__jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
#else
-#define jbd_debug(n, fmt, a...) no_printk(fmt, ##a)
+#define jbd2_debug(n, fmt, a...) no_printk(fmt, ##a)
#endif
extern void *jbd2_alloc(size_t size, gfp_t flags);
@@ -1647,7 +1646,6 @@ extern void jbd2_clear_buffer_revoked_flags(journal_t *journal);
*/
int jbd2_log_start_commit(journal_t *journal, tid_t tid);
-int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
int jbd2_transaction_committed(journal_t *journal, tid_t tid);
diff --git a/include/linux/kernel_read_file.h b/include/linux/kernel_read_file.h
index 575ffa1031d3..90451e2e12bd 100644
--- a/include/linux/kernel_read_file.h
+++ b/include/linux/kernel_read_file.h
@@ -35,21 +35,21 @@ static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
return kernel_read_file_str[id];
}
-int kernel_read_file(struct file *file, loff_t offset,
- void **buf, size_t buf_size,
- size_t *file_size,
- enum kernel_read_file_id id);
-int kernel_read_file_from_path(const char *path, loff_t offset,
- void **buf, size_t buf_size,
- size_t *file_size,
- enum kernel_read_file_id id);
-int kernel_read_file_from_path_initns(const char *path, loff_t offset,
- void **buf, size_t buf_size,
- size_t *file_size,
- enum kernel_read_file_id id);
-int kernel_read_file_from_fd(int fd, loff_t offset,
- void **buf, size_t buf_size,
- size_t *file_size,
- enum kernel_read_file_id id);
+ssize_t kernel_read_file(struct file *file, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path_initns(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_fd(int fd, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
#endif /* _LINUX_KERNEL_READ_FILE_H */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 86249476b57f..0b35a41440ff 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -688,7 +688,7 @@ __kfifo_uint_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_to_user(fifo, to, len, copied) \
-__kfifo_uint_must_check_helper( \
+__kfifo_int_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 392d34c3c59a..384f034ae947 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -10,8 +10,6 @@ extern struct attribute_group khugepaged_attr_group;
extern int khugepaged_init(void);
extern void khugepaged_destroy(void);
extern int start_stop_khugepaged(void);
-extern bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags);
extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern void khugepaged_enter_vma(struct vm_area_struct *vma,
@@ -26,20 +24,6 @@ static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
}
#endif
-#define khugepaged_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define khugepaged_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
-#define khugepaged_req_madv() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
-#define khugepaged_defrag() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
-
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
@@ -51,16 +35,6 @@ static inline void khugepaged_exit(struct mm_struct *mm)
if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
__khugepaged_exit(mm);
}
-
-static inline void khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
- if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
- khugepaged_enabled()) {
- if (hugepage_vma_check(vma, vm_flags))
- __khugepaged_enter(vma->vm_mm);
- }
-}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
@@ -68,10 +42,6 @@ static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
-static inline void khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
-}
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 34684b2026ab..6a3cd1bf4680 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -29,10 +29,9 @@ extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
-extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
gfp_t gfp) __ref;
extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
-extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
@@ -107,15 +106,12 @@ static inline void kmemleak_no_scan(const void *ptr)
{
}
static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
- int min_count, gfp_t gfp)
+ gfp_t gfp)
{
}
static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
}
-static inline void kmemleak_not_leak_phys(phys_addr_t phys)
-{
-}
static inline void kmemleak_ignore_phys(phys_addr_t phys)
{
}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1c480b1821e1..f4519d3689e1 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -656,12 +656,12 @@ struct kvm_irq_routing_table {
};
#endif
-#ifndef KVM_PRIVATE_MEM_SLOTS
-#define KVM_PRIVATE_MEM_SLOTS 0
+#ifndef KVM_INTERNAL_MEM_SLOTS
+#define KVM_INTERNAL_MEM_SLOTS 0
#endif
#define KVM_MEM_SLOTS_NUM SHRT_MAX
-#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
+#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
@@ -765,10 +765,10 @@ struct kvm {
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
struct mmu_notifier mmu_notifier;
- unsigned long mmu_notifier_seq;
- long mmu_notifier_count;
- unsigned long mmu_notifier_range_start;
- unsigned long mmu_notifier_range_end;
+ unsigned long mmu_invalidate_seq;
+ long mmu_invalidate_in_progress;
+ unsigned long mmu_invalidate_range_start;
+ unsigned long mmu_invalidate_range_end;
#endif
struct list_head devices;
u64 manual_dirty_log_protect;
@@ -1357,10 +1357,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
#endif
-void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
- unsigned long end);
-void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
- unsigned long end);
+void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
+ unsigned long end);
+void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+ unsigned long end);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -1907,42 +1907,44 @@ extern const struct kvm_stats_header kvm_vcpu_stats_header;
extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
-static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
+static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
- if (unlikely(kvm->mmu_notifier_count))
+ if (unlikely(kvm->mmu_invalidate_in_progress))
return 1;
/*
- * Ensure the read of mmu_notifier_count happens before the read
- * of mmu_notifier_seq. This interacts with the smp_wmb() in
- * mmu_notifier_invalidate_range_end to make sure that the caller
- * either sees the old (non-zero) value of mmu_notifier_count or
- * the new (incremented) value of mmu_notifier_seq.
- * PowerPC Book3s HV KVM calls this under a per-page lock
- * rather than under kvm->mmu_lock, for scalability, so
- * can't rely on kvm->mmu_lock to keep things ordered.
+ * Ensure the read of mmu_invalidate_in_progress happens before
+ * the read of mmu_invalidate_seq. This interacts with the
+ * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
+ * that the caller either sees the old (non-zero) value of
+ * mmu_invalidate_in_progress or the new (incremented) value of
+ * mmu_invalidate_seq.
+ *
+ * PowerPC Book3s HV KVM calls this under a per-page lock rather
+ * than under kvm->mmu_lock, for scalability, so can't rely on
+ * kvm->mmu_lock to keep things ordered.
*/
smp_rmb();
- if (kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
-static inline int mmu_notifier_retry_hva(struct kvm *kvm,
- unsigned long mmu_seq,
- unsigned long hva)
+static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
+ unsigned long mmu_seq,
+ unsigned long hva)
{
lockdep_assert_held(&kvm->mmu_lock);
/*
- * If mmu_notifier_count is non-zero, then the range maintained by
- * kvm_mmu_notifier_invalidate_range_start contains all addresses that
- * might be being invalidated. Note that it may include some false
+ * If mmu_invalidate_in_progress is non-zero, then the range maintained
+ * by kvm_mmu_notifier_invalidate_range_start contains all addresses
+ * that might be being invalidated. Note that it may include some false
* positives, due to shortcuts when handing concurrent invalidations.
*/
- if (unlikely(kvm->mmu_notifier_count) &&
- hva >= kvm->mmu_notifier_range_start &&
- hva < kvm->mmu_notifier_range_end)
+ if (unlikely(kvm->mmu_invalidate_in_progress) &&
+ hva >= kvm->mmu_invalidate_range_start &&
+ hva < kvm->mmu_invalidate_range_end)
return 1;
- if (kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0269ff114f5a..698032e5ef2d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1382,7 +1382,8 @@ extern const struct attribute_group *ata_common_sdev_groups[];
.proc_name = drv_name, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
- .unlock_native_capacity = ata_scsi_unlock_native_capacity
+ .unlock_native_capacity = ata_scsi_unlock_native_capacity,\
+ .max_sectors = ATA_MAX_SECTORS_LBA48
#define ATA_SUBBASE_SHT(drv_name) \
__ATA_BASE_SHT(drv_name), \
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 0d61e07b6827..c74acfa1a3fe 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -59,6 +59,9 @@ enum {
/* Platform provides asynchronous flush mechanism */
ND_REGION_ASYNC = 3,
+ /* Region was created by CXL subsystem */
+ ND_REGION_CXL = 4,
+
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
@@ -122,6 +125,7 @@ struct nd_region_desc {
int numa_node;
int target_node;
unsigned long flags;
+ int memregion;
struct device_node *of_node;
int (*flush)(struct nd_region *nd_region, struct bio *bio);
};
@@ -259,6 +263,7 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
}
void nvdimm_delete(struct nvdimm *nvdimm);
+void nvdimm_region_delete(struct nd_region *nd_region);
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
diff --git a/include/linux/limits.h b/include/linux/limits.h
index b568b9c30bbf..f6bcc9369010 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -7,6 +7,7 @@
#include <vdso/limits.h>
#define SIZE_MAX (~(size_t)0)
+#define SSIZE_MAX ((ssize_t)(SIZE_MAX >> 1))
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
#define U8_MAX ((u8)~0U)
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index fcef192e5e45..70ce419e2709 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -292,6 +292,7 @@ void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t);
__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **,
struct nlm_lock *);
void nlm_release_file(struct nlm_file *);
+void nlmsvc_put_lockowner(struct nlm_lockowner *);
void nlmsvc_release_lockowner(struct nlm_lock *);
void nlmsvc_mark_resources(struct net *);
void nlmsvc_free_host_resources(struct nlm_host *);
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 398f70093cd3..67e4a2c5500b 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -41,6 +41,8 @@ struct nlm_lock {
struct nfs_fh fh;
struct xdr_netobj oh;
u32 svid;
+ u64 lock_start;
+ u64 lock_len;
struct file_lock fl;
};
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 806448173033..60fff133c0b1 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -407,4 +407,5 @@ LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
#ifdef CONFIG_IO_URING
LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
LSM_HOOK(int, 0, uring_sqpoll, void)
+LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
#endif /* CONFIG_IO_URING */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 84a0d7e02176..3aa6030302f5 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1582,6 +1582,9 @@
* Check whether the current task is allowed to spawn a io_uring polling
* thread (IORING_SETUP_SQPOLL).
*
+ * @uring_cmd:
+ * Check whether the file_operations uring_cmd is allowed to run.
+ *
*/
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index 44365aab043c..a8f0070c7aa9 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -67,24 +67,14 @@ enum cmdq_code {
struct cmdq_cb_data {
int sta;
- void *data;
struct cmdq_pkt *pkt;
};
-typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data);
-
-struct cmdq_task_cb {
- cmdq_async_flush_cb cb;
- void *data;
-};
-
struct cmdq_pkt {
void *va_base;
dma_addr_t pa_base;
size_t cmd_buf_size; /* command occupied size */
size_t buf_size; /* real buffer size */
- struct cmdq_task_cb cb;
- struct cmdq_task_cb async_cb;
void *cl;
};
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 20f1e3ff6013..2da63fd7b98f 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -13,8 +13,16 @@ struct mb_cache;
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
- /* Hash table list - protected by hash chain bitlock */
+ /*
+ * Hash table list - protected by hash chain bitlock. The entry is
+ * guaranteed to be hashed while e_refcnt > 0.
+ */
struct hlist_bl_node e_hash_list;
+ /*
+ * Entry refcount. Once it reaches zero, entry is unhashed and freed.
+ * While refcount > 0, the entry is guaranteed to stay in the hash and
+ * e.g. mb_cache_entry_try_delete() will fail.
+ */
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
@@ -29,17 +37,24 @@ void mb_cache_destroy(struct mb_cache *cache);
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
u64 value, bool reusable);
-void __mb_cache_entry_free(struct mb_cache_entry *entry);
-static inline int mb_cache_entry_put(struct mb_cache *cache,
- struct mb_cache_entry *entry)
+void __mb_cache_entry_free(struct mb_cache *cache,
+ struct mb_cache_entry *entry);
+void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
+static inline void mb_cache_entry_put(struct mb_cache *cache,
+ struct mb_cache_entry *entry)
{
- if (!atomic_dec_and_test(&entry->e_refcnt))
- return 0;
- __mb_cache_entry_free(entry);
- return 1;
+ unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
+
+ if (cnt > 0) {
+ if (cnt <= 2)
+ wake_up_var(&entry->e_refcnt);
+ return;
+ }
+ __mb_cache_entry_free(cache, entry);
}
-void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
+ u32 key, u64 value);
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
u64 value);
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index bb539794f54a..47ad3b104d9e 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -65,11 +65,6 @@ struct mdev_driver {
struct device_driver driver;
};
-static inline const guid_t *mdev_uuid(struct mdev_device *mdev)
-{
- return &mdev->uuid;
-}
-
extern struct bus_type mdev_bus_type;
int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 9ecead1042b9..6257867fbf95 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -837,6 +837,15 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
+}
+
+struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
+#endif
+
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
return mem_cgroup_from_css(seq_css(m));
@@ -978,19 +987,30 @@ static inline void mod_memcg_page_state(struct page *page,
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
- return READ_ONCE(memcg->vmstats.state[idx]);
+ long x = READ_ONCE(memcg->vmstats.state[idx]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
struct mem_cgroup_per_node *pn;
+ long x;
if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- return READ_ONCE(pn->lruvec_stats.state[idx]);
+ x = READ_ONCE(pn->lruvec_stats.state[idx]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
@@ -1343,6 +1363,18 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL;
}
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
+static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
+{
+ return NULL;
+}
+#endif
+
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
return NULL;
@@ -1740,6 +1772,7 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
}
struct mem_cgroup *mem_cgroup_from_obj(void *p);
+struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
static inline void count_objcg_event(struct obj_cgroup *objcg,
enum vm_event_item idx)
@@ -1755,6 +1788,42 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
rcu_read_unlock();
}
+/**
+ * get_mem_cgroup_from_obj - get a memcg associated with passed kernel object.
+ * @p: pointer to object from which memcg should be extracted. It can be NULL.
+ *
+ * Retrieves the memory group into which the memory of the pointed kernel
+ * object is accounted. If memcg is found, its reference is taken.
+ * If a passed kernel object is uncharged, or if proper memcg cannot be found,
+ * as well as if mem_cgroup is disabled, NULL is returned.
+ *
+ * Return: valid memcg pointer with taken reference or NULL.
+ */
+static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
+{
+ struct mem_cgroup *memcg;
+
+ rcu_read_lock();
+ do {
+ memcg = mem_cgroup_from_obj(p);
+ } while (memcg && !css_tryget(&memcg->css));
+ rcu_read_unlock();
+ return memcg;
+}
+
+/**
+ * mem_cgroup_or_root - always returns a pointer to a valid memory cgroup.
+ * @memcg: pointer to a valid memory cgroup or NULL.
+ *
+ * If passed argument is not NULL, returns it without any additional checks
+ * and changes. Otherwise, root_mem_cgroup is returned.
+ *
+ * NOTE: root_mem_cgroup can be NULL during early boot.
+ */
+static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
+{
+ return memcg ? memcg : root_mem_cgroup;
+}
#else
static inline bool mem_cgroup_kmem_disabled(void)
{
@@ -1798,7 +1867,12 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
{
- return NULL;
+ return NULL;
+}
+
+static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
+{
+ return NULL;
}
static inline void count_objcg_event(struct obj_cgroup *objcg,
@@ -1806,6 +1880,15 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
{
}
+static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
+{
+ return NULL;
+}
#endif /* CONFIG_MEMCG_KMEM */
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 20d7edf62a6a..e0b2209ab71c 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -351,13 +351,4 @@ void arch_remove_linear_mapping(u64 start, u64 size);
extern bool mhp_supports_memmap_on_memory(unsigned long size);
#endif /* CONFIG_MEMORY_HOTPLUG */
-#ifdef CONFIG_MHP_MEMMAP_ON_MEMORY
-bool mhp_memmap_on_memory(void);
-#else
-static inline bool mhp_memmap_on_memory(void)
-{
- return false;
-}
-#endif
-
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 8af304f6b504..19010491a603 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -2,7 +2,7 @@
#ifndef _LINUX_MEMREMAP_H_
#define _LINUX_MEMREMAP_H_
-#include <linux/mm.h>
+#include <linux/mmzone.h>
#include <linux/range.h>
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
@@ -39,7 +39,14 @@ struct vmem_altmap {
* must be treated as an opaque object, rather than a "normal" struct page.
*
* A more complete discussion of unaddressable memory may be found in
- * include/linux/hmm.h and Documentation/vm/hmm.rst.
+ * include/linux/hmm.h and Documentation/mm/hmm.rst.
+ *
+ * MEMORY_DEVICE_COHERENT:
+ * Device memory that is cache coherent from device and CPU point of view. This
+ * is used on platforms that have an advanced system bus (like CAPI or CXL). A
+ * driver can hotplug the device memory using ZONE_DEVICE and with that memory
+ * type. Any page of a process can be migrated to such memory. However no one
+ * should be allowed to pin such memory so that it can always be evicted.
*
* MEMORY_DEVICE_FS_DAX:
* Host memory that has similar access semantics as System RAM i.e. DMA
@@ -61,6 +68,7 @@ struct vmem_altmap {
enum memory_type {
/* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1,
+ MEMORY_DEVICE_COHERENT,
MEMORY_DEVICE_FS_DAX,
MEMORY_DEVICE_GENERIC,
MEMORY_DEVICE_PCI_P2PDMA,
@@ -79,6 +87,18 @@ struct dev_pagemap_ops {
* the page back to a CPU accessible page.
*/
vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
+
+ /*
+ * Handle the memory failure happens on a range of pfns. Notify the
+ * processes who are using these pfns, and try to recover the data on
+ * them if necessary. The mf_flags is finally passed to the recover
+ * function through the whole notify routine.
+ *
+ * When this is not implemented, or it returns -EOPNOTSUPP, the caller
+ * will fall back to a common handler called mf_generic_kill_procs().
+ */
+ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
+ unsigned long nr_pages, int mf_flags);
};
#define PGMAP_ALTMAP_VALID (1 << 0)
@@ -150,6 +170,17 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
}
+static inline bool is_device_coherent_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_COHERENT;
+}
+
+static inline bool folio_is_device_coherent(const struct folio *folio)
+{
+ return is_device_coherent_page(&folio->page);
+}
+
#ifdef CONFIG_ZONE_DEVICE
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h
index ee48a4321c57..d5caa4c86ecc 100644
--- a/include/linux/mfd/ipaq-micro.h
+++ b/include/linux/mfd/ipaq-micro.h
@@ -75,8 +75,8 @@ struct ipaq_micro_rxdev {
* @id: 4-bit ID of the message
* @tx_len: length of TX data
* @tx_data: TX data to send
- * @rx_len: length of receieved RX data
- * @rx_data: RX data to recieve
+ * @rx_len: length of received RX data
+ * @rx_data: RX data to receive
* @ack: a completion that will be completed when RX is complete
* @node: list node if message gets queued
*/
diff --git a/include/linux/mfd/max77714.h b/include/linux/mfd/max77714.h
index a970dc455426..7947e0d697a5 100644
--- a/include/linux/mfd/max77714.h
+++ b/include/linux/mfd/max77714.h
@@ -3,7 +3,7 @@
* Maxim MAX77714 Register and data structures definition.
*
* Copyright (C) 2022 Luca Ceresoli
- * Author: Luca Ceresoli <luca@lucaceresoli.net>
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
*/
#ifndef __LINUX_MFD_MAX77714_H_
diff --git a/include/linux/mfd/mt6331/core.h b/include/linux/mfd/mt6331/core.h
new file mode 100644
index 000000000000..df8e6b1e4bc1
--- /dev/null
+++ b/include/linux/mfd/mt6331/core.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_CORE_H__
+#define __MFD_MT6331_CORE_H__
+
+enum mt6331_irq_status_numbers {
+ MT6331_IRQ_STATUS_PWRKEY = 0,
+ MT6331_IRQ_STATUS_HOMEKEY,
+ MT6331_IRQ_STATUS_CHRDET,
+ MT6331_IRQ_STATUS_THR_H,
+ MT6331_IRQ_STATUS_THR_L,
+ MT6331_IRQ_STATUS_BAT_H,
+ MT6331_IRQ_STATUS_BAT_L,
+ MT6331_IRQ_STATUS_RTC,
+ MT6331_IRQ_STATUS_AUDIO,
+ MT6331_IRQ_STATUS_MAD,
+ MT6331_IRQ_STATUS_ACCDET,
+ MT6331_IRQ_STATUS_ACCDET_EINT,
+ MT6331_IRQ_STATUS_ACCDET_NEGV = 12,
+ MT6331_IRQ_STATUS_VDVFS11_OC = 16,
+ MT6331_IRQ_STATUS_VDVFS12_OC,
+ MT6331_IRQ_STATUS_VDVFS13_OC,
+ MT6331_IRQ_STATUS_VDVFS14_OC,
+ MT6331_IRQ_STATUS_GPU_OC,
+ MT6331_IRQ_STATUS_VCORE1_OC,
+ MT6331_IRQ_STATUS_VCORE2_OC,
+ MT6331_IRQ_STATUS_VIO18_OC,
+ MT6331_IRQ_STATUS_LDO_OC,
+ MT6331_IRQ_STATUS_NR,
+};
+
+#define MT6331_IRQ_CON0_BASE MT6331_IRQ_STATUS_PWRKEY
+#define MT6331_IRQ_CON0_BITS (MT6331_IRQ_STATUS_ACCDET_NEGV + 1)
+#define MT6331_IRQ_CON1_BASE MT6331_IRQ_STATUS_VDVFS11_OC
+#define MT6331_IRQ_CON1_BITS (MT6331_IRQ_STATUS_LDO_OC - MT6331_IRQ_STATUS_VDFS11_OC + 1)
+
+#endif /* __MFD_MT6331_CORE_H__ */
diff --git a/include/linux/mfd/mt6331/registers.h b/include/linux/mfd/mt6331/registers.h
new file mode 100644
index 000000000000..e2be6bccd1a7
--- /dev/null
+++ b/include/linux/mfd/mt6331/registers.h
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_REGISTERS_H__
+#define __MFD_MT6331_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6331_STRUP_CON0 0x0
+#define MT6331_STRUP_CON2 0x2
+#define MT6331_STRUP_CON3 0x4
+#define MT6331_STRUP_CON4 0x6
+#define MT6331_STRUP_CON5 0x8
+#define MT6331_STRUP_CON6 0xA
+#define MT6331_STRUP_CON7 0xC
+#define MT6331_STRUP_CON8 0xE
+#define MT6331_STRUP_CON9 0x10
+#define MT6331_STRUP_CON10 0x12
+#define MT6331_STRUP_CON11 0x14
+#define MT6331_STRUP_CON12 0x16
+#define MT6331_STRUP_CON13 0x18
+#define MT6331_STRUP_CON14 0x1A
+#define MT6331_STRUP_CON15 0x1C
+#define MT6331_STRUP_CON16 0x1E
+#define MT6331_STRUP_CON17 0x20
+#define MT6331_STRUP_CON18 0x22
+#define MT6331_HWCID 0x100
+#define MT6331_SWCID 0x102
+#define MT6331_EXT_PMIC_STATUS 0x104
+#define MT6331_TOP_CON 0x106
+#define MT6331_TEST_OUT 0x108
+#define MT6331_TEST_CON0 0x10A
+#define MT6331_TEST_CON1 0x10C
+#define MT6331_TESTMODE_SW 0x10E
+#define MT6331_EN_STATUS0 0x110
+#define MT6331_EN_STATUS1 0x112
+#define MT6331_EN_STATUS2 0x114
+#define MT6331_OCSTATUS0 0x116
+#define MT6331_OCSTATUS1 0x118
+#define MT6331_OCSTATUS2 0x11A
+#define MT6331_PGSTATUS 0x11C
+#define MT6331_TOPSTATUS 0x11E
+#define MT6331_TDSEL_CON 0x120
+#define MT6331_RDSEL_CON 0x122
+#define MT6331_SMT_CON0 0x124
+#define MT6331_SMT_CON1 0x126
+#define MT6331_SMT_CON2 0x128
+#define MT6331_DRV_CON0 0x12A
+#define MT6331_DRV_CON1 0x12C
+#define MT6331_DRV_CON2 0x12E
+#define MT6331_DRV_CON3 0x130
+#define MT6331_TOP_STATUS 0x132
+#define MT6331_TOP_STATUS_SET 0x134
+#define MT6331_TOP_STATUS_CLR 0x136
+#define MT6331_TOP_CKPDN_CON0 0x138
+#define MT6331_TOP_CKPDN_CON0_SET 0x13A
+#define MT6331_TOP_CKPDN_CON0_CLR 0x13C
+#define MT6331_TOP_CKPDN_CON1 0x13E
+#define MT6331_TOP_CKPDN_CON1_SET 0x140
+#define MT6331_TOP_CKPDN_CON1_CLR 0x142
+#define MT6331_TOP_CKPDN_CON2 0x144
+#define MT6331_TOP_CKPDN_CON2_SET 0x146
+#define MT6331_TOP_CKPDN_CON2_CLR 0x148
+#define MT6331_TOP_CKSEL_CON 0x14A
+#define MT6331_TOP_CKSEL_CON_SET 0x14C
+#define MT6331_TOP_CKSEL_CON_CLR 0x14E
+#define MT6331_TOP_CKHWEN_CON 0x150
+#define MT6331_TOP_CKHWEN_CON_SET 0x152
+#define MT6331_TOP_CKHWEN_CON_CLR 0x154
+#define MT6331_TOP_CKTST_CON0 0x156
+#define MT6331_TOP_CKTST_CON1 0x158
+#define MT6331_TOP_CLKSQ 0x15A
+#define MT6331_TOP_CLKSQ_SET 0x15C
+#define MT6331_TOP_CLKSQ_CLR 0x15E
+#define MT6331_TOP_RST_CON 0x160
+#define MT6331_TOP_RST_CON_SET 0x162
+#define MT6331_TOP_RST_CON_CLR 0x164
+#define MT6331_TOP_RST_MISC 0x166
+#define MT6331_TOP_RST_MISC_SET 0x168
+#define MT6331_TOP_RST_MISC_CLR 0x16A
+#define MT6331_INT_CON0 0x16C
+#define MT6331_INT_CON0_SET 0x16E
+#define MT6331_INT_CON0_CLR 0x170
+#define MT6331_INT_CON1 0x172
+#define MT6331_INT_CON1_SET 0x174
+#define MT6331_INT_CON1_CLR 0x176
+#define MT6331_INT_MISC_CON 0x178
+#define MT6331_INT_MISC_CON_SET 0x17A
+#define MT6331_INT_MISC_CON_CLR 0x17C
+#define MT6331_INT_STATUS_CON0 0x17E
+#define MT6331_INT_STATUS_CON1 0x180
+#define MT6331_OC_GEAR_0 0x182
+#define MT6331_FQMTR_CON0 0x184
+#define MT6331_FQMTR_CON1 0x186
+#define MT6331_FQMTR_CON2 0x188
+#define MT6331_RG_SPI_CON 0x18A
+#define MT6331_DEW_DIO_EN 0x18C
+#define MT6331_DEW_READ_TEST 0x18E
+#define MT6331_DEW_WRITE_TEST 0x190
+#define MT6331_DEW_CRC_SWRST 0x192
+#define MT6331_DEW_CRC_EN 0x194
+#define MT6331_DEW_CRC_VAL 0x196
+#define MT6331_DEW_DBG_MON_SEL 0x198
+#define MT6331_DEW_CIPHER_KEY_SEL 0x19A
+#define MT6331_DEW_CIPHER_IV_SEL 0x19C
+#define MT6331_DEW_CIPHER_EN 0x19E
+#define MT6331_DEW_CIPHER_RDY 0x1A0
+#define MT6331_DEW_CIPHER_MODE 0x1A2
+#define MT6331_DEW_CIPHER_SWRST 0x1A4
+#define MT6331_DEW_RDDMY_NO 0x1A6
+#define MT6331_INT_TYPE_CON0 0x1A8
+#define MT6331_INT_TYPE_CON0_SET 0x1AA
+#define MT6331_INT_TYPE_CON0_CLR 0x1AC
+#define MT6331_INT_TYPE_CON1 0x1AE
+#define MT6331_INT_TYPE_CON1_SET 0x1B0
+#define MT6331_INT_TYPE_CON1_CLR 0x1B2
+#define MT6331_INT_STA 0x1B4
+#define MT6331_BUCK_ALL_CON0 0x200
+#define MT6331_BUCK_ALL_CON1 0x202
+#define MT6331_BUCK_ALL_CON2 0x204
+#define MT6331_BUCK_ALL_CON3 0x206
+#define MT6331_BUCK_ALL_CON4 0x208
+#define MT6331_BUCK_ALL_CON5 0x20A
+#define MT6331_BUCK_ALL_CON6 0x20C
+#define MT6331_BUCK_ALL_CON7 0x20E
+#define MT6331_BUCK_ALL_CON8 0x210
+#define MT6331_BUCK_ALL_CON9 0x212
+#define MT6331_BUCK_ALL_CON10 0x214
+#define MT6331_BUCK_ALL_CON11 0x216
+#define MT6331_BUCK_ALL_CON12 0x218
+#define MT6331_BUCK_ALL_CON13 0x21A
+#define MT6331_BUCK_ALL_CON14 0x21C
+#define MT6331_BUCK_ALL_CON15 0x21E
+#define MT6331_BUCK_ALL_CON16 0x220
+#define MT6331_BUCK_ALL_CON17 0x222
+#define MT6331_BUCK_ALL_CON18 0x224
+#define MT6331_BUCK_ALL_CON19 0x226
+#define MT6331_BUCK_ALL_CON20 0x228
+#define MT6331_BUCK_ALL_CON21 0x22A
+#define MT6331_BUCK_ALL_CON22 0x22C
+#define MT6331_BUCK_ALL_CON23 0x22E
+#define MT6331_BUCK_ALL_CON24 0x230
+#define MT6331_BUCK_ALL_CON25 0x232
+#define MT6331_BUCK_ALL_CON26 0x234
+#define MT6331_VDVFS11_CON0 0x236
+#define MT6331_VDVFS11_CON1 0x238
+#define MT6331_VDVFS11_CON2 0x23A
+#define MT6331_VDVFS11_CON3 0x23C
+#define MT6331_VDVFS11_CON4 0x23E
+#define MT6331_VDVFS11_CON5 0x240
+#define MT6331_VDVFS11_CON6 0x242
+#define MT6331_VDVFS11_CON7 0x244
+#define MT6331_VDVFS11_CON8 0x246
+#define MT6331_VDVFS11_CON9 0x248
+#define MT6331_VDVFS11_CON10 0x24A
+#define MT6331_VDVFS11_CON11 0x24C
+#define MT6331_VDVFS11_CON12 0x24E
+#define MT6331_VDVFS11_CON13 0x250
+#define MT6331_VDVFS11_CON14 0x252
+#define MT6331_VDVFS11_CON18 0x25A
+#define MT6331_VDVFS11_CON19 0x25C
+#define MT6331_VDVFS11_CON20 0x25E
+#define MT6331_VDVFS11_CON21 0x260
+#define MT6331_VDVFS11_CON22 0x262
+#define MT6331_VDVFS11_CON23 0x264
+#define MT6331_VDVFS11_CON24 0x266
+#define MT6331_VDVFS11_CON25 0x268
+#define MT6331_VDVFS11_CON26 0x26A
+#define MT6331_VDVFS11_CON27 0x26C
+#define MT6331_VDVFS12_CON0 0x26E
+#define MT6331_VDVFS12_CON1 0x270
+#define MT6331_VDVFS12_CON2 0x272
+#define MT6331_VDVFS12_CON3 0x274
+#define MT6331_VDVFS12_CON4 0x276
+#define MT6331_VDVFS12_CON5 0x278
+#define MT6331_VDVFS12_CON6 0x27A
+#define MT6331_VDVFS12_CON7 0x27C
+#define MT6331_VDVFS12_CON8 0x27E
+#define MT6331_VDVFS12_CON9 0x280
+#define MT6331_VDVFS12_CON10 0x282
+#define MT6331_VDVFS12_CON11 0x284
+#define MT6331_VDVFS12_CON12 0x286
+#define MT6331_VDVFS12_CON13 0x288
+#define MT6331_VDVFS12_CON14 0x28A
+#define MT6331_VDVFS12_CON18 0x292
+#define MT6331_VDVFS12_CON19 0x294
+#define MT6331_VDVFS12_CON20 0x296
+#define MT6331_VDVFS13_CON0 0x298
+#define MT6331_VDVFS13_CON1 0x29A
+#define MT6331_VDVFS13_CON2 0x29C
+#define MT6331_VDVFS13_CON3 0x29E
+#define MT6331_VDVFS13_CON4 0x2A0
+#define MT6331_VDVFS13_CON5 0x2A2
+#define MT6331_VDVFS13_CON6 0x2A4
+#define MT6331_VDVFS13_CON7 0x2A6
+#define MT6331_VDVFS13_CON8 0x2A8
+#define MT6331_VDVFS13_CON9 0x2AA
+#define MT6331_VDVFS13_CON10 0x2AC
+#define MT6331_VDVFS13_CON11 0x2AE
+#define MT6331_VDVFS13_CON12 0x2B0
+#define MT6331_VDVFS13_CON13 0x2B2
+#define MT6331_VDVFS13_CON14 0x2B4
+#define MT6331_VDVFS13_CON18 0x2BC
+#define MT6331_VDVFS13_CON19 0x2BE
+#define MT6331_VDVFS13_CON20 0x2C0
+#define MT6331_VDVFS14_CON0 0x2C2
+#define MT6331_VDVFS14_CON1 0x2C4
+#define MT6331_VDVFS14_CON2 0x2C6
+#define MT6331_VDVFS14_CON3 0x2C8
+#define MT6331_VDVFS14_CON4 0x2CA
+#define MT6331_VDVFS14_CON5 0x2CC
+#define MT6331_VDVFS14_CON6 0x2CE
+#define MT6331_VDVFS14_CON7 0x2D0
+#define MT6331_VDVFS14_CON8 0x2D2
+#define MT6331_VDVFS14_CON9 0x2D4
+#define MT6331_VDVFS14_CON10 0x2D6
+#define MT6331_VDVFS14_CON11 0x2D8
+#define MT6331_VDVFS14_CON12 0x2DA
+#define MT6331_VDVFS14_CON13 0x2DC
+#define MT6331_VDVFS14_CON14 0x2DE
+#define MT6331_VDVFS14_CON18 0x2E6
+#define MT6331_VDVFS14_CON19 0x2E8
+#define MT6331_VDVFS14_CON20 0x2EA
+#define MT6331_VGPU_CON0 0x300
+#define MT6331_VGPU_CON1 0x302
+#define MT6331_VGPU_CON2 0x304
+#define MT6331_VGPU_CON3 0x306
+#define MT6331_VGPU_CON4 0x308
+#define MT6331_VGPU_CON5 0x30A
+#define MT6331_VGPU_CON6 0x30C
+#define MT6331_VGPU_CON7 0x30E
+#define MT6331_VGPU_CON8 0x310
+#define MT6331_VGPU_CON9 0x312
+#define MT6331_VGPU_CON10 0x314
+#define MT6331_VGPU_CON11 0x316
+#define MT6331_VGPU_CON12 0x318
+#define MT6331_VGPU_CON13 0x31A
+#define MT6331_VGPU_CON14 0x31C
+#define MT6331_VGPU_CON15 0x31E
+#define MT6331_VGPU_CON16 0x320
+#define MT6331_VGPU_CON17 0x322
+#define MT6331_VGPU_CON18 0x324
+#define MT6331_VGPU_CON19 0x326
+#define MT6331_VGPU_CON20 0x328
+#define MT6331_VCORE1_CON0 0x32A
+#define MT6331_VCORE1_CON1 0x32C
+#define MT6331_VCORE1_CON2 0x32E
+#define MT6331_VCORE1_CON3 0x330
+#define MT6331_VCORE1_CON4 0x332
+#define MT6331_VCORE1_CON5 0x334
+#define MT6331_VCORE1_CON6 0x336
+#define MT6331_VCORE1_CON7 0x338
+#define MT6331_VCORE1_CON8 0x33A
+#define MT6331_VCORE1_CON9 0x33C
+#define MT6331_VCORE1_CON10 0x33E
+#define MT6331_VCORE1_CON11 0x340
+#define MT6331_VCORE1_CON12 0x342
+#define MT6331_VCORE1_CON13 0x344
+#define MT6331_VCORE1_CON14 0x346
+#define MT6331_VCORE1_CON15 0x348
+#define MT6331_VCORE1_CON16 0x34A
+#define MT6331_VCORE1_CON17 0x34C
+#define MT6331_VCORE1_CON18 0x34E
+#define MT6331_VCORE1_CON19 0x350
+#define MT6331_VCORE1_CON20 0x352
+#define MT6331_VCORE2_CON0 0x354
+#define MT6331_VCORE2_CON1 0x356
+#define MT6331_VCORE2_CON2 0x358
+#define MT6331_VCORE2_CON3 0x35A
+#define MT6331_VCORE2_CON4 0x35C
+#define MT6331_VCORE2_CON5 0x35E
+#define MT6331_VCORE2_CON6 0x360
+#define MT6331_VCORE2_CON7 0x362
+#define MT6331_VCORE2_CON8 0x364
+#define MT6331_VCORE2_CON9 0x366
+#define MT6331_VCORE2_CON10 0x368
+#define MT6331_VCORE2_CON11 0x36A
+#define MT6331_VCORE2_CON12 0x36C
+#define MT6331_VCORE2_CON13 0x36E
+#define MT6331_VCORE2_CON14 0x370
+#define MT6331_VCORE2_CON15 0x372
+#define MT6331_VCORE2_CON16 0x374
+#define MT6331_VCORE2_CON17 0x376
+#define MT6331_VCORE2_CON18 0x378
+#define MT6331_VCORE2_CON19 0x37A
+#define MT6331_VCORE2_CON20 0x37C
+#define MT6331_VCORE2_CON21 0x37E
+#define MT6331_VIO18_CON0 0x380
+#define MT6331_VIO18_CON1 0x382
+#define MT6331_VIO18_CON2 0x384
+#define MT6331_VIO18_CON3 0x386
+#define MT6331_VIO18_CON4 0x388
+#define MT6331_VIO18_CON5 0x38A
+#define MT6331_VIO18_CON6 0x38C
+#define MT6331_VIO18_CON7 0x38E
+#define MT6331_VIO18_CON8 0x390
+#define MT6331_VIO18_CON9 0x392
+#define MT6331_VIO18_CON10 0x394
+#define MT6331_VIO18_CON11 0x396
+#define MT6331_VIO18_CON12 0x398
+#define MT6331_VIO18_CON13 0x39A
+#define MT6331_VIO18_CON14 0x39C
+#define MT6331_VIO18_CON15 0x39E
+#define MT6331_VIO18_CON16 0x3A0
+#define MT6331_VIO18_CON17 0x3A2
+#define MT6331_VIO18_CON18 0x3A4
+#define MT6331_VIO18_CON19 0x3A6
+#define MT6331_VIO18_CON20 0x3A8
+#define MT6331_BUCK_K_CON0 0x3AA
+#define MT6331_BUCK_K_CON1 0x3AC
+#define MT6331_BUCK_K_CON2 0x3AE
+#define MT6331_BUCK_K_CON3 0x3B0
+#define MT6331_ZCD_CON0 0x400
+#define MT6331_ZCD_CON1 0x402
+#define MT6331_ZCD_CON2 0x404
+#define MT6331_ZCD_CON3 0x406
+#define MT6331_ZCD_CON4 0x408
+#define MT6331_ZCD_CON5 0x40A
+#define MT6331_ISINK0_CON0 0x40C
+#define MT6331_ISINK0_CON1 0x40E
+#define MT6331_ISINK0_CON2 0x410
+#define MT6331_ISINK0_CON3 0x412
+#define MT6331_ISINK0_CON4 0x414
+#define MT6331_ISINK1_CON0 0x416
+#define MT6331_ISINK1_CON1 0x418
+#define MT6331_ISINK1_CON2 0x41A
+#define MT6331_ISINK1_CON3 0x41C
+#define MT6331_ISINK1_CON4 0x41E
+#define MT6331_ISINK2_CON0 0x420
+#define MT6331_ISINK2_CON1 0x422
+#define MT6331_ISINK2_CON2 0x424
+#define MT6331_ISINK2_CON3 0x426
+#define MT6331_ISINK2_CON4 0x428
+#define MT6331_ISINK3_CON0 0x42A
+#define MT6331_ISINK3_CON1 0x42C
+#define MT6331_ISINK3_CON2 0x42E
+#define MT6331_ISINK3_CON3 0x430
+#define MT6331_ISINK3_CON4 0x432
+#define MT6331_ISINK_ANA0 0x434
+#define MT6331_ISINK_ANA1 0x436
+#define MT6331_ISINK_PHASE_DLY 0x438
+#define MT6331_ISINK_EN_CTRL 0x43A
+#define MT6331_ANALDO_CON0 0x500
+#define MT6331_ANALDO_CON1 0x502
+#define MT6331_ANALDO_CON2 0x504
+#define MT6331_ANALDO_CON3 0x506
+#define MT6331_ANALDO_CON4 0x508
+#define MT6331_ANALDO_CON5 0x50A
+#define MT6331_ANALDO_CON6 0x50C
+#define MT6331_ANALDO_CON7 0x50E
+#define MT6331_ANALDO_CON8 0x510
+#define MT6331_ANALDO_CON9 0x512
+#define MT6331_ANALDO_CON10 0x514
+#define MT6331_ANALDO_CON11 0x516
+#define MT6331_ANALDO_CON12 0x518
+#define MT6331_ANALDO_CON13 0x51A
+#define MT6331_SYSLDO_CON0 0x51C
+#define MT6331_SYSLDO_CON1 0x51E
+#define MT6331_SYSLDO_CON2 0x520
+#define MT6331_SYSLDO_CON3 0x522
+#define MT6331_SYSLDO_CON4 0x524
+#define MT6331_SYSLDO_CON5 0x526
+#define MT6331_SYSLDO_CON6 0x528
+#define MT6331_SYSLDO_CON7 0x52A
+#define MT6331_SYSLDO_CON8 0x52C
+#define MT6331_SYSLDO_CON9 0x52E
+#define MT6331_SYSLDO_CON10 0x530
+#define MT6331_SYSLDO_CON11 0x532
+#define MT6331_SYSLDO_CON12 0x534
+#define MT6331_SYSLDO_CON13 0x536
+#define MT6331_SYSLDO_CON14 0x538
+#define MT6331_SYSLDO_CON15 0x53A
+#define MT6331_SYSLDO_CON16 0x53C
+#define MT6331_SYSLDO_CON17 0x53E
+#define MT6331_SYSLDO_CON18 0x540
+#define MT6331_SYSLDO_CON19 0x542
+#define MT6331_SYSLDO_CON20 0x544
+#define MT6331_SYSLDO_CON21 0x546
+#define MT6331_DIGLDO_CON0 0x548
+#define MT6331_DIGLDO_CON1 0x54A
+#define MT6331_DIGLDO_CON2 0x54C
+#define MT6331_DIGLDO_CON3 0x54E
+#define MT6331_DIGLDO_CON4 0x550
+#define MT6331_DIGLDO_CON5 0x552
+#define MT6331_DIGLDO_CON6 0x554
+#define MT6331_DIGLDO_CON7 0x556
+#define MT6331_DIGLDO_CON8 0x558
+#define MT6331_DIGLDO_CON9 0x55A
+#define MT6331_DIGLDO_CON10 0x55C
+#define MT6331_DIGLDO_CON11 0x55E
+#define MT6331_DIGLDO_CON12 0x560
+#define MT6331_DIGLDO_CON13 0x562
+#define MT6331_DIGLDO_CON14 0x564
+#define MT6331_DIGLDO_CON15 0x566
+#define MT6331_DIGLDO_CON16 0x568
+#define MT6331_DIGLDO_CON17 0x56A
+#define MT6331_DIGLDO_CON18 0x56C
+#define MT6331_DIGLDO_CON19 0x56E
+#define MT6331_DIGLDO_CON20 0x570
+#define MT6331_DIGLDO_CON21 0x572
+#define MT6331_DIGLDO_CON22 0x574
+#define MT6331_DIGLDO_CON23 0x576
+#define MT6331_DIGLDO_CON24 0x578
+#define MT6331_DIGLDO_CON25 0x57A
+#define MT6331_DIGLDO_CON26 0x57C
+#define MT6331_DIGLDO_CON27 0x57E
+#define MT6331_DIGLDO_CON28 0x580
+#define MT6331_OTP_CON0 0x600
+#define MT6331_OTP_CON1 0x602
+#define MT6331_OTP_CON2 0x604
+#define MT6331_OTP_CON3 0x606
+#define MT6331_OTP_CON4 0x608
+#define MT6331_OTP_CON5 0x60A
+#define MT6331_OTP_CON6 0x60C
+#define MT6331_OTP_CON7 0x60E
+#define MT6331_OTP_CON8 0x610
+#define MT6331_OTP_CON9 0x612
+#define MT6331_OTP_CON10 0x614
+#define MT6331_OTP_CON11 0x616
+#define MT6331_OTP_CON12 0x618
+#define MT6331_OTP_CON13 0x61A
+#define MT6331_OTP_CON14 0x61C
+#define MT6331_OTP_DOUT_0_15 0x61E
+#define MT6331_OTP_DOUT_16_31 0x620
+#define MT6331_OTP_DOUT_32_47 0x622
+#define MT6331_OTP_DOUT_48_63 0x624
+#define MT6331_OTP_DOUT_64_79 0x626
+#define MT6331_OTP_DOUT_80_95 0x628
+#define MT6331_OTP_DOUT_96_111 0x62A
+#define MT6331_OTP_DOUT_112_127 0x62C
+#define MT6331_OTP_DOUT_128_143 0x62E
+#define MT6331_OTP_DOUT_144_159 0x630
+#define MT6331_OTP_DOUT_160_175 0x632
+#define MT6331_OTP_DOUT_176_191 0x634
+#define MT6331_OTP_DOUT_192_207 0x636
+#define MT6331_OTP_DOUT_208_223 0x638
+#define MT6331_OTP_DOUT_224_239 0x63A
+#define MT6331_OTP_DOUT_240_255 0x63C
+#define MT6331_OTP_VAL_0_15 0x63E
+#define MT6331_OTP_VAL_16_31 0x640
+#define MT6331_OTP_VAL_32_47 0x642
+#define MT6331_OTP_VAL_48_63 0x644
+#define MT6331_OTP_VAL_64_79 0x646
+#define MT6331_OTP_VAL_80_95 0x648
+#define MT6331_OTP_VAL_96_111 0x64A
+#define MT6331_OTP_VAL_112_127 0x64C
+#define MT6331_OTP_VAL_128_143 0x64E
+#define MT6331_OTP_VAL_144_159 0x650
+#define MT6331_OTP_VAL_160_175 0x652
+#define MT6331_OTP_VAL_176_191 0x654
+#define MT6331_OTP_VAL_192_207 0x656
+#define MT6331_OTP_VAL_208_223 0x658
+#define MT6331_OTP_VAL_224_239 0x65A
+#define MT6331_OTP_VAL_240_255 0x65C
+#define MT6331_RTC_MIX_CON0 0x65E
+#define MT6331_RTC_MIX_CON1 0x660
+#define MT6331_AUDDAC_CFG0 0x662
+#define MT6331_AUDBUF_CFG0 0x664
+#define MT6331_AUDBUF_CFG1 0x666
+#define MT6331_AUDBUF_CFG2 0x668
+#define MT6331_AUDBUF_CFG3 0x66A
+#define MT6331_AUDBUF_CFG4 0x66C
+#define MT6331_AUDBUF_CFG5 0x66E
+#define MT6331_AUDBUF_CFG6 0x670
+#define MT6331_AUDBUF_CFG7 0x672
+#define MT6331_AUDBUF_CFG8 0x674
+#define MT6331_IBIASDIST_CFG0 0x676
+#define MT6331_AUDCLKGEN_CFG0 0x678
+#define MT6331_AUDLDO_CFG0 0x67A
+#define MT6331_AUDDCDC_CFG0 0x67C
+#define MT6331_AUDDCDC_CFG1 0x67E
+#define MT6331_AUDNVREGGLB_CFG0 0x680
+#define MT6331_AUD_NCP0 0x682
+#define MT6331_AUD_ZCD_CFG0 0x684
+#define MT6331_AUDPREAMP_CFG0 0x686
+#define MT6331_AUDPREAMP_CFG1 0x688
+#define MT6331_AUDPREAMP_CFG2 0x68A
+#define MT6331_AUDADC_CFG0 0x68C
+#define MT6331_AUDADC_CFG1 0x68E
+#define MT6331_AUDADC_CFG2 0x690
+#define MT6331_AUDADC_CFG3 0x692
+#define MT6331_AUDADC_CFG4 0x694
+#define MT6331_AUDADC_CFG5 0x696
+#define MT6331_AUDDIGMI_CFG0 0x698
+#define MT6331_AUDDIGMI_CFG1 0x69A
+#define MT6331_AUDMICBIAS_CFG0 0x69C
+#define MT6331_AUDMICBIAS_CFG1 0x69E
+#define MT6331_AUDENCSPARE_CFG0 0x6A0
+#define MT6331_AUDPREAMPGAIN_CFG0 0x6A2
+#define MT6331_AUDMADPLL_CFG0 0x6A4
+#define MT6331_AUDMADPLL_CFG1 0x6A6
+#define MT6331_AUDMADPLL_CFG2 0x6A8
+#define MT6331_AUDLDO_NVREG_CFG0 0x6AA
+#define MT6331_AUDLDO_NVREG_CFG1 0x6AC
+#define MT6331_AUDLDO_NVREG_CFG2 0x6AE
+#define MT6331_AUXADC_ADC0 0x700
+#define MT6331_AUXADC_ADC1 0x702
+#define MT6331_AUXADC_ADC2 0x704
+#define MT6331_AUXADC_ADC3 0x706
+#define MT6331_AUXADC_ADC4 0x708
+#define MT6331_AUXADC_ADC5 0x70A
+#define MT6331_AUXADC_ADC6 0x70C
+#define MT6331_AUXADC_ADC7 0x70E
+#define MT6331_AUXADC_ADC8 0x710
+#define MT6331_AUXADC_ADC9 0x712
+#define MT6331_AUXADC_ADC10 0x714
+#define MT6331_AUXADC_ADC11 0x716
+#define MT6331_AUXADC_ADC12 0x718
+#define MT6331_AUXADC_ADC13 0x71A
+#define MT6331_AUXADC_ADC14 0x71C
+#define MT6331_AUXADC_ADC15 0x71E
+#define MT6331_AUXADC_ADC16 0x720
+#define MT6331_AUXADC_ADC17 0x722
+#define MT6331_AUXADC_ADC18 0x724
+#define MT6331_AUXADC_ADC19 0x726
+#define MT6331_AUXADC_STA0 0x728
+#define MT6331_AUXADC_STA1 0x72A
+#define MT6331_AUXADC_RQST0 0x72C
+#define MT6331_AUXADC_RQST0_SET 0x72E
+#define MT6331_AUXADC_RQST0_CLR 0x730
+#define MT6331_AUXADC_RQST1 0x732
+#define MT6331_AUXADC_RQST1_SET 0x734
+#define MT6331_AUXADC_RQST1_CLR 0x736
+#define MT6331_AUXADC_CON0 0x738
+#define MT6331_AUXADC_CON1 0x73A
+#define MT6331_AUXADC_CON2 0x73C
+#define MT6331_AUXADC_CON3 0x73E
+#define MT6331_AUXADC_CON4 0x740
+#define MT6331_AUXADC_CON5 0x742
+#define MT6331_AUXADC_CON6 0x744
+#define MT6331_AUXADC_CON7 0x746
+#define MT6331_AUXADC_CON8 0x748
+#define MT6331_AUXADC_CON9 0x74A
+#define MT6331_AUXADC_CON10 0x74C
+#define MT6331_AUXADC_CON11 0x74E
+#define MT6331_AUXADC_CON12 0x750
+#define MT6331_AUXADC_CON13 0x752
+#define MT6331_AUXADC_CON14 0x754
+#define MT6331_AUXADC_CON15 0x756
+#define MT6331_AUXADC_CON16 0x758
+#define MT6331_AUXADC_CON17 0x75A
+#define MT6331_AUXADC_CON18 0x75C
+#define MT6331_AUXADC_CON19 0x75E
+#define MT6331_AUXADC_CON20 0x760
+#define MT6331_AUXADC_CON21 0x762
+#define MT6331_AUXADC_CON22 0x764
+#define MT6331_AUXADC_CON23 0x766
+#define MT6331_AUXADC_CON24 0x768
+#define MT6331_AUXADC_CON25 0x76A
+#define MT6331_AUXADC_CON26 0x76C
+#define MT6331_AUXADC_CON27 0x76E
+#define MT6331_AUXADC_CON28 0x770
+#define MT6331_AUXADC_CON29 0x772
+#define MT6331_AUXADC_CON30 0x774
+#define MT6331_AUXADC_CON31 0x776
+#define MT6331_AUXADC_CON32 0x778
+#define MT6331_ACCDET_CON0 0x77A
+#define MT6331_ACCDET_CON1 0x77C
+#define MT6331_ACCDET_CON2 0x77E
+#define MT6331_ACCDET_CON3 0x780
+#define MT6331_ACCDET_CON4 0x782
+#define MT6331_ACCDET_CON5 0x784
+#define MT6331_ACCDET_CON6 0x786
+#define MT6331_ACCDET_CON7 0x788
+#define MT6331_ACCDET_CON8 0x78A
+#define MT6331_ACCDET_CON9 0x78C
+#define MT6331_ACCDET_CON10 0x78E
+#define MT6331_ACCDET_CON11 0x790
+#define MT6331_ACCDET_CON12 0x792
+#define MT6331_ACCDET_CON13 0x794
+#define MT6331_ACCDET_CON14 0x796
+#define MT6331_ACCDET_CON15 0x798
+#define MT6331_ACCDET_CON16 0x79A
+#define MT6331_ACCDET_CON17 0x79C
+#define MT6331_ACCDET_CON18 0x79E
+#define MT6331_ACCDET_CON19 0x7A0
+#define MT6331_ACCDET_CON20 0x7A2
+#define MT6331_ACCDET_CON21 0x7A4
+#define MT6331_ACCDET_CON22 0x7A6
+#define MT6331_ACCDET_CON23 0x7A8
+#define MT6331_ACCDET_CON24 0x7AA
+
+#endif /* __MFD_MT6331_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6332/core.h b/include/linux/mfd/mt6332/core.h
new file mode 100644
index 000000000000..cd6013eb82d9
--- /dev/null
+++ b/include/linux/mfd/mt6332/core.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_CORE_H__
+#define __MFD_MT6332_CORE_H__
+
+enum mt6332_irq_status_numbers {
+ MT6332_IRQ_STATUS_CHR_COMPLETE = 0,
+ MT6332_IRQ_STATUS_THERMAL_SD,
+ MT6332_IRQ_STATUS_THERMAL_REG_IN,
+ MT6332_IRQ_STATUS_THERMAL_REG_OUT,
+ MT6332_IRQ_STATUS_OTG_OC,
+ MT6332_IRQ_STATUS_CHR_OC,
+ MT6332_IRQ_STATUS_OTG_THERMAL,
+ MT6332_IRQ_STATUS_CHRIN_SHORT,
+ MT6332_IRQ_STATUS_DRVCDT_SHORT,
+ MT6332_IRQ_STATUS_PLUG_IN_FLASH,
+ MT6332_IRQ_STATUS_CHRWDT_FLAG,
+ MT6332_IRQ_STATUS_FLASH_EN_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_OPEN = 13,
+ MT6332_IRQ_STATUS_OV = 16,
+ MT6332_IRQ_STATUS_BVALID_DET,
+ MT6332_IRQ_STATUS_VBATON_UNDET,
+ MT6332_IRQ_STATUS_CHR_PLUG_IN,
+ MT6332_IRQ_STATUS_CHR_PLUG_OUT,
+ MT6332_IRQ_STATUS_BC11_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_OPEN = 23,
+ MT6332_IRQ_STATUS_THR_H = 32,
+ MT6332_IRQ_STATUS_THR_L,
+ MT6332_IRQ_STATUS_BAT_H,
+ MT6332_IRQ_STATUS_BAT_L,
+ MT6332_IRQ_STATUS_M3_H,
+ MT6332_IRQ_STATUS_M3_L,
+ MT6332_IRQ_STATUS_FG_BAT_H,
+ MT6332_IRQ_STATUS_FG_BAT_L,
+ MT6332_IRQ_STATUS_FG_CUR_H,
+ MT6332_IRQ_STATUS_FG_CUR_L,
+ MT6332_IRQ_STATUS_SPKL_D,
+ MT6332_IRQ_STATUS_SPKL_AB,
+ MT6332_IRQ_STATUS_BIF,
+ MT6332_IRQ_STATUS_VWLED_OC = 45,
+ MT6332_IRQ_STATUS_VDRAM_OC = 48,
+ MT6332_IRQ_STATUS_VDVFS2_OC,
+ MT6332_IRQ_STATUS_VRF1_OC,
+ MT6332_IRQ_STATUS_VRF2_OC,
+ MT6332_IRQ_STATUS_VPA_OC,
+ MT6332_IRQ_STATUS_VSBST_OC,
+ MT6332_IRQ_STATUS_LDO_OC,
+ MT6332_IRQ_STATUS_NR,
+};
+
+#define MT6332_IRQ_CON0_BASE MT6332_IRQ_STATUS_CHR_COMPLETE
+#define MT6332_IRQ_CON0_BITS (MT6332_IRQ_STATUS_FLASH_VLED1_OPEN + 1)
+#define MT6332_IRQ_CON1_BASE MT6332_IRQ_STATUS_OV
+#define MT6332_IRQ_CON1_BITS (MT6332_IRQ_STATUS_FLASH_VLED2_OPEN - MT6332_IRQ_STATUS_OV + 1)
+#define MT6332_IRQ_CON2_BASE MT6332_IRQ_STATUS_THR_H
+#define MT6332_IRQ_CON2_BITS (MT6332_IRQ_STATUS_VWLED_OC - MT6332_IRQ_STATUS_THR_H + 1)
+#define MT6332_IRQ_CON3_BASE MT6332_IRQ_STATUS_VDRAM_OC
+#define MT6332_IRQ_CON3_BITS (MT6332_IRQ_STATUS_LDO_OC - MT6332_IRQ_STATUS_VDRAM_OC + 1)
+
+#endif /* __MFD_MT6332_CORE_H__ */
diff --git a/include/linux/mfd/mt6332/registers.h b/include/linux/mfd/mt6332/registers.h
new file mode 100644
index 000000000000..65e0b86fceac
--- /dev/null
+++ b/include/linux/mfd/mt6332/registers.h
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_REGISTERS_H__
+#define __MFD_MT6332_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6332_HWCID 0x8000
+#define MT6332_SWCID 0x8002
+#define MT6332_TOP_CON 0x8004
+#define MT6332_DDR_VREF_AP_CON 0x8006
+#define MT6332_DDR_VREF_DQ_CON 0x8008
+#define MT6332_DDR_VREF_CA_CON 0x800A
+#define MT6332_TEST_OUT 0x800C
+#define MT6332_TEST_CON0 0x800E
+#define MT6332_TEST_CON1 0x8010
+#define MT6332_TESTMODE_SW 0x8012
+#define MT6332_TESTMODE_ANA 0x8014
+#define MT6332_TDSEL_CON 0x8016
+#define MT6332_RDSEL_CON 0x8018
+#define MT6332_SMT_CON0 0x801A
+#define MT6332_SMT_CON1 0x801C
+#define MT6332_DRV_CON0 0x801E
+#define MT6332_DRV_CON1 0x8020
+#define MT6332_DRV_CON2 0x8022
+#define MT6332_EN_STATUS0 0x8024
+#define MT6332_OCSTATUS0 0x8026
+#define MT6332_TOP_STATUS 0x8028
+#define MT6332_TOP_STATUS_SET 0x802A
+#define MT6332_TOP_STATUS_CLR 0x802C
+#define MT6332_FLASH_CON0 0x802E
+#define MT6332_FLASH_CON1 0x8030
+#define MT6332_FLASH_CON2 0x8032
+#define MT6332_CORE_CON0 0x8034
+#define MT6332_CORE_CON1 0x8036
+#define MT6332_CORE_CON2 0x8038
+#define MT6332_CORE_CON3 0x803A
+#define MT6332_CORE_CON4 0x803C
+#define MT6332_CORE_CON5 0x803E
+#define MT6332_CORE_CON6 0x8040
+#define MT6332_CORE_CON7 0x8042
+#define MT6332_CORE_CON8 0x8044
+#define MT6332_CORE_CON9 0x8046
+#define MT6332_CORE_CON10 0x8048
+#define MT6332_CORE_CON11 0x804A
+#define MT6332_CORE_CON12 0x804C
+#define MT6332_CORE_CON13 0x804E
+#define MT6332_CORE_CON14 0x8050
+#define MT6332_CORE_CON15 0x8052
+#define MT6332_STA_CON0 0x8054
+#define MT6332_STA_CON1 0x8056
+#define MT6332_STA_CON2 0x8058
+#define MT6332_STA_CON3 0x805A
+#define MT6332_STA_CON4 0x805C
+#define MT6332_STA_CON5 0x805E
+#define MT6332_STA_CON6 0x8060
+#define MT6332_STA_CON7 0x8062
+#define MT6332_CHR_CON0 0x8064
+#define MT6332_CHR_CON1 0x8066
+#define MT6332_CHR_CON2 0x8068
+#define MT6332_CHR_CON3 0x806A
+#define MT6332_CHR_CON4 0x806C
+#define MT6332_CHR_CON5 0x806E
+#define MT6332_CHR_CON6 0x8070
+#define MT6332_CHR_CON7 0x8072
+#define MT6332_CHR_CON8 0x8074
+#define MT6332_CHR_CON9 0x8076
+#define MT6332_CHR_CON10 0x8078
+#define MT6332_CHR_CON11 0x807A
+#define MT6332_CHR_CON12 0x807C
+#define MT6332_CHR_CON13 0x807E
+#define MT6332_CHR_CON14 0x8080
+#define MT6332_CHR_CON15 0x8082
+#define MT6332_BOOST_CON0 0x8084
+#define MT6332_BOOST_CON1 0x8086
+#define MT6332_BOOST_CON2 0x8088
+#define MT6332_BOOST_CON3 0x808A
+#define MT6332_BOOST_CON4 0x808C
+#define MT6332_BOOST_CON5 0x808E
+#define MT6332_BOOST_CON6 0x8090
+#define MT6332_BOOST_CON7 0x8092
+#define MT6332_TOP_CKPDN_CON0 0x8094
+#define MT6332_TOP_CKPDN_CON0_SET 0x8096
+#define MT6332_TOP_CKPDN_CON0_CLR 0x8098
+#define MT6332_TOP_CKPDN_CON1 0x809A
+#define MT6332_TOP_CKPDN_CON1_SET 0x809C
+#define MT6332_TOP_CKPDN_CON1_CLR 0x809E
+#define MT6332_TOP_CKPDN_CON2 0x80A0
+#define MT6332_TOP_CKPDN_CON2_SET 0x80A2
+#define MT6332_TOP_CKPDN_CON2_CLR 0x80A4
+#define MT6332_TOP_CKSEL_CON0 0x80A6
+#define MT6332_TOP_CKSEL_CON0_SET 0x80A8
+#define MT6332_TOP_CKSEL_CON0_CLR 0x80AA
+#define MT6332_TOP_CKSEL_CON1 0x80AC
+#define MT6332_TOP_CKSEL_CON1_SET 0x80AE
+#define MT6332_TOP_CKSEL_CON1_CLR 0x80B0
+#define MT6332_TOP_CKHWEN_CON 0x80B2
+#define MT6332_TOP_CKHWEN_CON_SET 0x80B4
+#define MT6332_TOP_CKHWEN_CON_CLR 0x80B6
+#define MT6332_TOP_CKTST_CON0 0x80B8
+#define MT6332_TOP_CKTST_CON1 0x80BA
+#define MT6332_TOP_RST_CON 0x80BC
+#define MT6332_TOP_RST_CON_SET 0x80BE
+#define MT6332_TOP_RST_CON_CLR 0x80C0
+#define MT6332_TOP_RST_MISC 0x80C2
+#define MT6332_TOP_RST_MISC_SET 0x80C4
+#define MT6332_TOP_RST_MISC_CLR 0x80C6
+#define MT6332_INT_CON0 0x80C8
+#define MT6332_INT_CON0_SET 0x80CA
+#define MT6332_INT_CON0_CLR 0x80CC
+#define MT6332_INT_CON1 0x80CE
+#define MT6332_INT_CON1_SET 0x80D0
+#define MT6332_INT_CON1_CLR 0x80D2
+#define MT6332_INT_CON2 0x80D4
+#define MT6332_INT_CON2_SET 0x80D6
+#define MT6332_INT_CON2_CLR 0x80D8
+#define MT6332_INT_CON3 0x80DA
+#define MT6332_INT_CON3_SET 0x80DC
+#define MT6332_INT_CON3_CLR 0x80DE
+#define MT6332_CHRWDT_CON0 0x80E0
+#define MT6332_CHRWDT_STATUS0 0x80E2
+#define MT6332_INT_STATUS0 0x80E4
+#define MT6332_INT_STATUS1 0x80E6
+#define MT6332_INT_STATUS2 0x80E8
+#define MT6332_INT_STATUS3 0x80EA
+#define MT6332_OC_GEAR_0 0x80EC
+#define MT6332_OC_GEAR_1 0x80EE
+#define MT6332_OC_GEAR_2 0x80F0
+#define MT6332_INT_MISC_CON 0x80F2
+#define MT6332_RG_SPI_CON 0x80F4
+#define MT6332_DEW_DIO_EN 0x80F6
+#define MT6332_DEW_READ_TEST 0x80F8
+#define MT6332_DEW_WRITE_TEST 0x80FA
+#define MT6332_DEW_CRC_SWRST 0x80FC
+#define MT6332_DEW_CRC_EN 0x80FE
+#define MT6332_DEW_CRC_VAL 0x8100
+#define MT6332_DEW_DBG_MON_SEL 0x8102
+#define MT6332_DEW_CIPHER_KEY_SEL 0x8104
+#define MT6332_DEW_CIPHER_IV_SEL 0x8106
+#define MT6332_DEW_CIPHER_EN 0x8108
+#define MT6332_DEW_CIPHER_RDY 0x810A
+#define MT6332_DEW_CIPHER_MODE 0x810C
+#define MT6332_DEW_CIPHER_SWRST 0x810E
+#define MT6332_DEW_RDDMY_NO 0x8110
+#define MT6332_INT_STA 0x8112
+#define MT6332_BIF_CON0 0x8114
+#define MT6332_BIF_CON1 0x8116
+#define MT6332_BIF_CON2 0x8118
+#define MT6332_BIF_CON3 0x811A
+#define MT6332_BIF_CON4 0x811C
+#define MT6332_BIF_CON5 0x811E
+#define MT6332_BIF_CON6 0x8120
+#define MT6332_BIF_CON7 0x8122
+#define MT6332_BIF_CON8 0x8124
+#define MT6332_BIF_CON9 0x8126
+#define MT6332_BIF_CON10 0x8128
+#define MT6332_BIF_CON11 0x812A
+#define MT6332_BIF_CON12 0x812C
+#define MT6332_BIF_CON13 0x812E
+#define MT6332_BIF_CON14 0x8130
+#define MT6332_BIF_CON15 0x8132
+#define MT6332_BIF_CON16 0x8134
+#define MT6332_BIF_CON17 0x8136
+#define MT6332_BIF_CON18 0x8138
+#define MT6332_BIF_CON19 0x813A
+#define MT6332_BIF_CON20 0x813C
+#define MT6332_BIF_CON21 0x813E
+#define MT6332_BIF_CON22 0x8140
+#define MT6332_BIF_CON23 0x8142
+#define MT6332_BIF_CON24 0x8144
+#define MT6332_BIF_CON25 0x8146
+#define MT6332_BIF_CON26 0x8148
+#define MT6332_BIF_CON27 0x814A
+#define MT6332_BIF_CON28 0x814C
+#define MT6332_BIF_CON29 0x814E
+#define MT6332_BIF_CON30 0x8150
+#define MT6332_BIF_CON31 0x8152
+#define MT6332_BIF_CON32 0x8154
+#define MT6332_BIF_CON33 0x8156
+#define MT6332_BIF_CON34 0x8158
+#define MT6332_BIF_CON35 0x815A
+#define MT6332_BIF_CON36 0x815C
+#define MT6332_BATON_CON0 0x815E
+#define MT6332_BIF_CON37 0x8160
+#define MT6332_BIF_CON38 0x8162
+#define MT6332_CHR_CON16 0x8164
+#define MT6332_CHR_CON17 0x8166
+#define MT6332_CHR_CON18 0x8168
+#define MT6332_CHR_CON19 0x816A
+#define MT6332_CHR_CON20 0x816C
+#define MT6332_CHR_CON21 0x816E
+#define MT6332_CHR_CON22 0x8170
+#define MT6332_CHR_CON23 0x8172
+#define MT6332_CHR_CON24 0x8174
+#define MT6332_CHR_CON25 0x8176
+#define MT6332_STA_CON8 0x8178
+#define MT6332_BUCK_ALL_CON0 0x8400
+#define MT6332_BUCK_ALL_CON1 0x8402
+#define MT6332_BUCK_ALL_CON2 0x8404
+#define MT6332_BUCK_ALL_CON3 0x8406
+#define MT6332_BUCK_ALL_CON4 0x8408
+#define MT6332_BUCK_ALL_CON5 0x840A
+#define MT6332_BUCK_ALL_CON6 0x840C
+#define MT6332_BUCK_ALL_CON7 0x840E
+#define MT6332_BUCK_ALL_CON8 0x8410
+#define MT6332_BUCK_ALL_CON9 0x8412
+#define MT6332_BUCK_ALL_CON10 0x8414
+#define MT6332_BUCK_ALL_CON11 0x8416
+#define MT6332_BUCK_ALL_CON12 0x8418
+#define MT6332_BUCK_ALL_CON13 0x841A
+#define MT6332_BUCK_ALL_CON14 0x841C
+#define MT6332_BUCK_ALL_CON15 0x841E
+#define MT6332_BUCK_ALL_CON16 0x8420
+#define MT6332_BUCK_ALL_CON17 0x8422
+#define MT6332_BUCK_ALL_CON18 0x8424
+#define MT6332_BUCK_ALL_CON19 0x8426
+#define MT6332_BUCK_ALL_CON20 0x8428
+#define MT6332_BUCK_ALL_CON21 0x842A
+#define MT6332_BUCK_ALL_CON22 0x842C
+#define MT6332_BUCK_ALL_CON23 0x842E
+#define MT6332_BUCK_ALL_CON24 0x8430
+#define MT6332_BUCK_ALL_CON25 0x8432
+#define MT6332_BUCK_ALL_CON26 0x8434
+#define MT6332_BUCK_ALL_CON27 0x8436
+#define MT6332_VDRAM_CON0 0x8438
+#define MT6332_VDRAM_CON1 0x843A
+#define MT6332_VDRAM_CON2 0x843C
+#define MT6332_VDRAM_CON3 0x843E
+#define MT6332_VDRAM_CON4 0x8440
+#define MT6332_VDRAM_CON5 0x8442
+#define MT6332_VDRAM_CON6 0x8444
+#define MT6332_VDRAM_CON7 0x8446
+#define MT6332_VDRAM_CON8 0x8448
+#define MT6332_VDRAM_CON9 0x844A
+#define MT6332_VDRAM_CON10 0x844C
+#define MT6332_VDRAM_CON11 0x844E
+#define MT6332_VDRAM_CON12 0x8450
+#define MT6332_VDRAM_CON13 0x8452
+#define MT6332_VDRAM_CON14 0x8454
+#define MT6332_VDRAM_CON15 0x8456
+#define MT6332_VDRAM_CON16 0x8458
+#define MT6332_VDRAM_CON17 0x845A
+#define MT6332_VDRAM_CON18 0x845C
+#define MT6332_VDRAM_CON19 0x845E
+#define MT6332_VDRAM_CON20 0x8460
+#define MT6332_VDRAM_CON21 0x8462
+#define MT6332_VDVFS2_CON0 0x8464
+#define MT6332_VDVFS2_CON1 0x8466
+#define MT6332_VDVFS2_CON2 0x8468
+#define MT6332_VDVFS2_CON3 0x846A
+#define MT6332_VDVFS2_CON4 0x846C
+#define MT6332_VDVFS2_CON5 0x846E
+#define MT6332_VDVFS2_CON6 0x8470
+#define MT6332_VDVFS2_CON7 0x8472
+#define MT6332_VDVFS2_CON8 0x8474
+#define MT6332_VDVFS2_CON9 0x8476
+#define MT6332_VDVFS2_CON10 0x8478
+#define MT6332_VDVFS2_CON11 0x847A
+#define MT6332_VDVFS2_CON12 0x847C
+#define MT6332_VDVFS2_CON13 0x847E
+#define MT6332_VDVFS2_CON14 0x8480
+#define MT6332_VDVFS2_CON15 0x8482
+#define MT6332_VDVFS2_CON16 0x8484
+#define MT6332_VDVFS2_CON17 0x8486
+#define MT6332_VDVFS2_CON18 0x8488
+#define MT6332_VDVFS2_CON19 0x848A
+#define MT6332_VDVFS2_CON20 0x848C
+#define MT6332_VDVFS2_CON21 0x848E
+#define MT6332_VDVFS2_CON22 0x8490
+#define MT6332_VDVFS2_CON23 0x8492
+#define MT6332_VDVFS2_CON24 0x8494
+#define MT6332_VDVFS2_CON25 0x8496
+#define MT6332_VDVFS2_CON26 0x8498
+#define MT6332_VDVFS2_CON27 0x849A
+#define MT6332_VRF1_CON0 0x849C
+#define MT6332_VRF1_CON1 0x849E
+#define MT6332_VRF1_CON2 0x84A0
+#define MT6332_VRF1_CON3 0x84A2
+#define MT6332_VRF1_CON4 0x84A4
+#define MT6332_VRF1_CON5 0x84A6
+#define MT6332_VRF1_CON6 0x84A8
+#define MT6332_VRF1_CON7 0x84AA
+#define MT6332_VRF1_CON8 0x84AC
+#define MT6332_VRF1_CON9 0x84AE
+#define MT6332_VRF1_CON10 0x84B0
+#define MT6332_VRF1_CON11 0x84B2
+#define MT6332_VRF1_CON12 0x84B4
+#define MT6332_VRF1_CON13 0x84B6
+#define MT6332_VRF1_CON14 0x84B8
+#define MT6332_VRF1_CON15 0x84BA
+#define MT6332_VRF1_CON16 0x84BC
+#define MT6332_VRF1_CON17 0x84BE
+#define MT6332_VRF1_CON18 0x84C0
+#define MT6332_VRF1_CON19 0x84C2
+#define MT6332_VRF1_CON20 0x84C4
+#define MT6332_VRF1_CON21 0x84C6
+#define MT6332_VRF2_CON0 0x84C8
+#define MT6332_VRF2_CON1 0x84CA
+#define MT6332_VRF2_CON2 0x84CC
+#define MT6332_VRF2_CON3 0x84CE
+#define MT6332_VRF2_CON4 0x84D0
+#define MT6332_VRF2_CON5 0x84D2
+#define MT6332_VRF2_CON6 0x84D4
+#define MT6332_VRF2_CON7 0x84D6
+#define MT6332_VRF2_CON8 0x84D8
+#define MT6332_VRF2_CON9 0x84DA
+#define MT6332_VRF2_CON10 0x84DC
+#define MT6332_VRF2_CON11 0x84DE
+#define MT6332_VRF2_CON12 0x84E0
+#define MT6332_VRF2_CON13 0x84E2
+#define MT6332_VRF2_CON14 0x84E4
+#define MT6332_VRF2_CON15 0x84E6
+#define MT6332_VRF2_CON16 0x84E8
+#define MT6332_VRF2_CON17 0x84EA
+#define MT6332_VRF2_CON18 0x84EC
+#define MT6332_VRF2_CON19 0x84EE
+#define MT6332_VRF2_CON20 0x84F0
+#define MT6332_VRF2_CON21 0x84F2
+#define MT6332_VPA_CON0 0x84F4
+#define MT6332_VPA_CON1 0x84F6
+#define MT6332_VPA_CON2 0x84F8
+#define MT6332_VPA_CON3 0x84FC
+#define MT6332_VPA_CON4 0x84FE
+#define MT6332_VPA_CON5 0x8500
+#define MT6332_VPA_CON6 0x8502
+#define MT6332_VPA_CON7 0x8504
+#define MT6332_VPA_CON8 0x8506
+#define MT6332_VPA_CON9 0x8508
+#define MT6332_VPA_CON10 0x850A
+#define MT6332_VPA_CON11 0x850C
+#define MT6332_VPA_CON12 0x850E
+#define MT6332_VPA_CON13 0x8510
+#define MT6332_VPA_CON14 0x8512
+#define MT6332_VPA_CON15 0x8514
+#define MT6332_VPA_CON16 0x8516
+#define MT6332_VPA_CON17 0x8518
+#define MT6332_VPA_CON18 0x851A
+#define MT6332_VPA_CON19 0x851C
+#define MT6332_VPA_CON20 0x851E
+#define MT6332_VPA_CON21 0x8520
+#define MT6332_VPA_CON22 0x8522
+#define MT6332_VPA_CON23 0x8524
+#define MT6332_VPA_CON24 0x8526
+#define MT6332_VPA_CON25 0x8528
+#define MT6332_VSBST_CON0 0x852A
+#define MT6332_VSBST_CON1 0x852C
+#define MT6332_VSBST_CON2 0x852E
+#define MT6332_VSBST_CON3 0x8530
+#define MT6332_VSBST_CON4 0x8532
+#define MT6332_VSBST_CON5 0x8534
+#define MT6332_VSBST_CON6 0x8536
+#define MT6332_VSBST_CON7 0x8538
+#define MT6332_VSBST_CON8 0x853A
+#define MT6332_VSBST_CON9 0x853C
+#define MT6332_VSBST_CON10 0x853E
+#define MT6332_VSBST_CON11 0x8540
+#define MT6332_VSBST_CON12 0x8542
+#define MT6332_VSBST_CON13 0x8544
+#define MT6332_VSBST_CON14 0x8546
+#define MT6332_VSBST_CON15 0x8548
+#define MT6332_VSBST_CON16 0x854A
+#define MT6332_VSBST_CON17 0x854C
+#define MT6332_VSBST_CON18 0x854E
+#define MT6332_VSBST_CON19 0x8550
+#define MT6332_VSBST_CON20 0x8552
+#define MT6332_VSBST_CON21 0x8554
+#define MT6332_BUCK_K_CON0 0x8556
+#define MT6332_BUCK_K_CON1 0x8558
+#define MT6332_BUCK_K_CON2 0x855A
+#define MT6332_BUCK_K_CON3 0x855C
+#define MT6332_BUCK_K_CON4 0x855E
+#define MT6332_BUCK_K_CON5 0x8560
+#define MT6332_AUXADC_ADC0 0x8800
+#define MT6332_AUXADC_ADC1 0x8802
+#define MT6332_AUXADC_ADC2 0x8804
+#define MT6332_AUXADC_ADC3 0x8806
+#define MT6332_AUXADC_ADC4 0x8808
+#define MT6332_AUXADC_ADC5 0x880A
+#define MT6332_AUXADC_ADC6 0x880C
+#define MT6332_AUXADC_ADC7 0x880E
+#define MT6332_AUXADC_ADC8 0x8810
+#define MT6332_AUXADC_ADC9 0x8812
+#define MT6332_AUXADC_ADC10 0x8814
+#define MT6332_AUXADC_ADC11 0x8816
+#define MT6332_AUXADC_ADC12 0x8818
+#define MT6332_AUXADC_ADC13 0x881A
+#define MT6332_AUXADC_ADC14 0x881C
+#define MT6332_AUXADC_ADC15 0x881E
+#define MT6332_AUXADC_ADC16 0x8820
+#define MT6332_AUXADC_ADC17 0x8822
+#define MT6332_AUXADC_ADC18 0x8824
+#define MT6332_AUXADC_ADC19 0x8826
+#define MT6332_AUXADC_ADC20 0x8828
+#define MT6332_AUXADC_ADC21 0x882A
+#define MT6332_AUXADC_ADC22 0x882C
+#define MT6332_AUXADC_ADC23 0x882E
+#define MT6332_AUXADC_ADC24 0x8830
+#define MT6332_AUXADC_ADC25 0x8832
+#define MT6332_AUXADC_ADC26 0x8834
+#define MT6332_AUXADC_ADC27 0x8836
+#define MT6332_AUXADC_ADC28 0x8838
+#define MT6332_AUXADC_ADC29 0x883A
+#define MT6332_AUXADC_ADC30 0x883C
+#define MT6332_AUXADC_ADC31 0x883E
+#define MT6332_AUXADC_ADC32 0x8840
+#define MT6332_AUXADC_ADC33 0x8842
+#define MT6332_AUXADC_ADC34 0x8844
+#define MT6332_AUXADC_ADC35 0x8846
+#define MT6332_AUXADC_ADC36 0x8848
+#define MT6332_AUXADC_ADC37 0x884A
+#define MT6332_AUXADC_ADC38 0x884C
+#define MT6332_AUXADC_ADC39 0x884E
+#define MT6332_AUXADC_ADC40 0x8850
+#define MT6332_AUXADC_ADC41 0x8852
+#define MT6332_AUXADC_ADC42 0x8854
+#define MT6332_AUXADC_ADC43 0x8856
+#define MT6332_AUXADC_STA0 0x8858
+#define MT6332_AUXADC_STA1 0x885A
+#define MT6332_AUXADC_RQST0 0x885C
+#define MT6332_AUXADC_RQST0_SET 0x885E
+#define MT6332_AUXADC_RQST0_CLR 0x8860
+#define MT6332_AUXADC_RQST1 0x8862
+#define MT6332_AUXADC_RQST1_SET 0x8864
+#define MT6332_AUXADC_RQST1_CLR 0x8866
+#define MT6332_AUXADC_CON0 0x8868
+#define MT6332_AUXADC_CON1 0x886A
+#define MT6332_AUXADC_CON2 0x886C
+#define MT6332_AUXADC_CON3 0x886E
+#define MT6332_AUXADC_CON4 0x8870
+#define MT6332_AUXADC_CON5 0x8872
+#define MT6332_AUXADC_CON6 0x8874
+#define MT6332_AUXADC_CON7 0x8876
+#define MT6332_AUXADC_CON8 0x8878
+#define MT6332_AUXADC_CON9 0x887A
+#define MT6332_AUXADC_CON10 0x887C
+#define MT6332_AUXADC_CON11 0x887E
+#define MT6332_AUXADC_CON12 0x8880
+#define MT6332_AUXADC_CON13 0x8882
+#define MT6332_AUXADC_CON14 0x8884
+#define MT6332_AUXADC_CON15 0x8886
+#define MT6332_AUXADC_CON16 0x8888
+#define MT6332_AUXADC_CON17 0x888A
+#define MT6332_AUXADC_CON18 0x888C
+#define MT6332_AUXADC_CON19 0x888E
+#define MT6332_AUXADC_CON20 0x8890
+#define MT6332_AUXADC_CON21 0x8892
+#define MT6332_AUXADC_CON22 0x8894
+#define MT6332_AUXADC_CON23 0x8896
+#define MT6332_AUXADC_CON24 0x8898
+#define MT6332_AUXADC_CON25 0x889A
+#define MT6332_AUXADC_CON26 0x889C
+#define MT6332_AUXADC_CON27 0x889E
+#define MT6332_AUXADC_CON28 0x88A0
+#define MT6332_AUXADC_CON29 0x88A2
+#define MT6332_AUXADC_CON30 0x88A4
+#define MT6332_AUXADC_CON31 0x88A6
+#define MT6332_AUXADC_CON32 0x88A8
+#define MT6332_AUXADC_CON33 0x88AA
+#define MT6332_AUXADC_CON34 0x88AC
+#define MT6332_AUXADC_CON35 0x88AE
+#define MT6332_AUXADC_CON36 0x88B0
+#define MT6332_AUXADC_CON37 0x88B2
+#define MT6332_AUXADC_CON38 0x88B4
+#define MT6332_AUXADC_CON39 0x88B6
+#define MT6332_AUXADC_CON40 0x88B8
+#define MT6332_AUXADC_CON41 0x88BA
+#define MT6332_AUXADC_CON42 0x88BC
+#define MT6332_AUXADC_CON43 0x88BE
+#define MT6332_AUXADC_CON44 0x88C0
+#define MT6332_AUXADC_CON45 0x88C2
+#define MT6332_AUXADC_CON46 0x88C4
+#define MT6332_AUXADC_CON47 0x88C6
+#define MT6332_STRUP_CONA0 0x8C00
+#define MT6332_STRUP_CONA1 0x8C02
+#define MT6332_STRUP_CONA2 0x8C04
+#define MT6332_STRUP_CON0 0x8C06
+#define MT6332_STRUP_CON2 0x8C08
+#define MT6332_STRUP_CON3 0x8C0A
+#define MT6332_STRUP_CON4 0x8C0C
+#define MT6332_STRUP_CON5 0x8C0E
+#define MT6332_STRUP_CON6 0x8C10
+#define MT6332_STRUP_CON7 0x8C12
+#define MT6332_STRUP_CON8 0x8C14
+#define MT6332_STRUP_CON9 0x8C16
+#define MT6332_STRUP_CON10 0x8C18
+#define MT6332_STRUP_CON11 0x8C1A
+#define MT6332_STRUP_CON12 0x8C1C
+#define MT6332_STRUP_CON13 0x8C1E
+#define MT6332_STRUP_CON14 0x8C20
+#define MT6332_STRUP_CON15 0x8C22
+#define MT6332_STRUP_CON16 0x8C24
+#define MT6332_STRUP_CON17 0x8C26
+#define MT6332_FGADC_CON0 0x8C28
+#define MT6332_FGADC_CON1 0x8C2A
+#define MT6332_FGADC_CON2 0x8C2C
+#define MT6332_FGADC_CON3 0x8C2E
+#define MT6332_FGADC_CON4 0x8C30
+#define MT6332_FGADC_CON5 0x8C32
+#define MT6332_FGADC_CON6 0x8C34
+#define MT6332_FGADC_CON7 0x8C36
+#define MT6332_FGADC_CON8 0x8C38
+#define MT6332_FGADC_CON9 0x8C3A
+#define MT6332_FGADC_CON10 0x8C3C
+#define MT6332_FGADC_CON11 0x8C3E
+#define MT6332_FGADC_CON12 0x8C40
+#define MT6332_FGADC_CON13 0x8C42
+#define MT6332_FGADC_CON14 0x8C44
+#define MT6332_FGADC_CON15 0x8C46
+#define MT6332_FGADC_CON16 0x8C48
+#define MT6332_FGADC_CON17 0x8C4A
+#define MT6332_FGADC_CON18 0x8C4C
+#define MT6332_FGADC_CON19 0x8C4E
+#define MT6332_FGADC_CON20 0x8C50
+#define MT6332_FGADC_CON21 0x8C52
+#define MT6332_FGADC_CON22 0x8C54
+#define MT6332_OTP_CON0 0x8C56
+#define MT6332_OTP_CON1 0x8C58
+#define MT6332_OTP_CON2 0x8C5A
+#define MT6332_OTP_CON3 0x8C5C
+#define MT6332_OTP_CON4 0x8C5E
+#define MT6332_OTP_CON5 0x8C60
+#define MT6332_OTP_CON6 0x8C62
+#define MT6332_OTP_CON7 0x8C64
+#define MT6332_OTP_CON8 0x8C66
+#define MT6332_OTP_CON9 0x8C68
+#define MT6332_OTP_CON10 0x8C6A
+#define MT6332_OTP_CON11 0x8C6C
+#define MT6332_OTP_CON12 0x8C6E
+#define MT6332_OTP_CON13 0x8C70
+#define MT6332_OTP_CON14 0x8C72
+#define MT6332_OTP_DOUT_0_15 0x8C74
+#define MT6332_OTP_DOUT_16_31 0x8C76
+#define MT6332_OTP_DOUT_32_47 0x8C78
+#define MT6332_OTP_DOUT_48_63 0x8C7A
+#define MT6332_OTP_DOUT_64_79 0x8C7C
+#define MT6332_OTP_DOUT_80_95 0x8C7E
+#define MT6332_OTP_DOUT_96_111 0x8C80
+#define MT6332_OTP_DOUT_112_127 0x8C82
+#define MT6332_OTP_DOUT_128_143 0x8C84
+#define MT6332_OTP_DOUT_144_159 0x8C86
+#define MT6332_OTP_DOUT_160_175 0x8C88
+#define MT6332_OTP_DOUT_176_191 0x8C8A
+#define MT6332_OTP_DOUT_192_207 0x8C8C
+#define MT6332_OTP_DOUT_208_223 0x8C8E
+#define MT6332_OTP_DOUT_224_239 0x8C90
+#define MT6332_OTP_DOUT_240_255 0x8C92
+#define MT6332_OTP_VAL_0_15 0x8C94
+#define MT6332_OTP_VAL_16_31 0x8C96
+#define MT6332_OTP_VAL_32_47 0x8C98
+#define MT6332_OTP_VAL_48_63 0x8C9A
+#define MT6332_OTP_VAL_64_79 0x8C9C
+#define MT6332_OTP_VAL_80_95 0x8C9E
+#define MT6332_OTP_VAL_96_111 0x8CA0
+#define MT6332_OTP_VAL_112_127 0x8CA2
+#define MT6332_OTP_VAL_128_143 0x8CA4
+#define MT6332_OTP_VAL_144_159 0x8CA6
+#define MT6332_OTP_VAL_160_175 0x8CA8
+#define MT6332_OTP_VAL_176_191 0x8CAA
+#define MT6332_OTP_VAL_192_207 0x8CAC
+#define MT6332_OTP_VAL_208_223 0x8CAE
+#define MT6332_OTP_VAL_224_239 0x8CB0
+#define MT6332_OTP_VAL_240_255 0x8CB2
+#define MT6332_LDO_CON0 0x8CB4
+#define MT6332_LDO_CON1 0x8CB6
+#define MT6332_LDO_CON2 0x8CB8
+#define MT6332_LDO_CON3 0x8CBA
+#define MT6332_LDO_CON5 0x8CBC
+#define MT6332_LDO_CON6 0x8CBE
+#define MT6332_LDO_CON7 0x8CC0
+#define MT6332_LDO_CON8 0x8CC2
+#define MT6332_LDO_CON9 0x8CC4
+#define MT6332_LDO_CON10 0x8CC6
+#define MT6332_LDO_CON11 0x8CC8
+#define MT6332_LDO_CON12 0x8CCA
+#define MT6332_LDO_CON13 0x8CCC
+#define MT6332_FQMTR_CON0 0x8CCE
+#define MT6332_FQMTR_CON1 0x8CD0
+#define MT6332_FQMTR_CON2 0x8CD2
+#define MT6332_IWLED_CON0 0x8CD4
+#define MT6332_IWLED_DEG 0x8CD6
+#define MT6332_IWLED_STATUS 0x8CD8
+#define MT6332_IWLED_EN_CTRL 0x8CDA
+#define MT6332_IWLED_CON1 0x8CDC
+#define MT6332_IWLED_CON2 0x8CDE
+#define MT6332_IWLED_TRIM0 0x8CE0
+#define MT6332_IWLED_TRIM1 0x8CE2
+#define MT6332_IWLED_CON3 0x8CE4
+#define MT6332_IWLED_CON4 0x8CE6
+#define MT6332_IWLED_CON5 0x8CE8
+#define MT6332_IWLED_CON6 0x8CEA
+#define MT6332_IWLED_CON7 0x8CEC
+#define MT6332_IWLED_CON8 0x8CEE
+#define MT6332_IWLED_CON9 0x8CF0
+#define MT6332_SPK_CON0 0x8CF2
+#define MT6332_SPK_CON1 0x8CF4
+#define MT6332_SPK_CON2 0x8CF6
+#define MT6332_SPK_CON3 0x8CF8
+#define MT6332_SPK_CON4 0x8CFA
+#define MT6332_SPK_CON5 0x8CFC
+#define MT6332_SPK_CON6 0x8CFE
+#define MT6332_SPK_CON7 0x8D00
+#define MT6332_SPK_CON8 0x8D02
+#define MT6332_SPK_CON9 0x8D04
+#define MT6332_SPK_CON10 0x8D06
+#define MT6332_SPK_CON11 0x8D08
+#define MT6332_SPK_CON12 0x8D0A
+#define MT6332_SPK_CON13 0x8D0C
+#define MT6332_SPK_CON14 0x8D0E
+#define MT6332_SPK_CON15 0x8D10
+#define MT6332_SPK_CON16 0x8D12
+#define MT6332_TESTI_CON0 0x8D14
+#define MT6332_TESTI_CON1 0x8D16
+#define MT6332_TESTI_CON2 0x8D18
+#define MT6332_TESTI_CON3 0x8D1A
+#define MT6332_TESTI_CON4 0x8D1C
+#define MT6332_TESTI_CON5 0x8D1E
+#define MT6332_TESTI_CON6 0x8D20
+#define MT6332_TESTI_MUX_CON0 0x8D22
+#define MT6332_TESTI_MUX_CON1 0x8D24
+#define MT6332_TESTI_MUX_CON2 0x8D26
+#define MT6332_TESTI_MUX_CON3 0x8D28
+#define MT6332_TESTI_MUX_CON4 0x8D2A
+#define MT6332_TESTI_MUX_CON5 0x8D2C
+#define MT6332_TESTI_MUX_CON6 0x8D2E
+#define MT6332_TESTO_CON0 0x8D30
+#define MT6332_TESTO_CON1 0x8D32
+#define MT6332_TEST_OMUX_CON0 0x8D34
+#define MT6332_TEST_OMUX_CON1 0x8D36
+#define MT6332_DEBUG_CON0 0x8D38
+#define MT6332_DEBUG_CON1 0x8D3A
+#define MT6332_DEBUG_CON2 0x8D3C
+#define MT6332_FGADC_CON23 0x8D3E
+#define MT6332_FGADC_CON24 0x8D40
+#define MT6332_FGADC_CON25 0x8D42
+#define MT6332_TOP_RST_STATUS 0x8D44
+#define MT6332_TOP_RST_STATUS_SET 0x8D46
+#define MT6332_TOP_RST_STATUS_CLR 0x8D48
+#define MT6332_VDVFS2_CON28 0x8D4A
+
+#endif /* __MFD_MT6332_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6357/core.h b/include/linux/mfd/mt6357/core.h
new file mode 100644
index 000000000000..2441611264fd
--- /dev/null
+++ b/include/linux/mfd/mt6357/core.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 BayLibre, SAS
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#ifndef __MFD_MT6357_CORE_H__
+#define __MFD_MT6357_CORE_H__
+
+enum mt6357_irq_top_status_shift {
+ MT6357_BUCK_TOP = 0,
+ MT6357_LDO_TOP,
+ MT6357_PSC_TOP,
+ MT6357_SCK_TOP,
+ MT6357_BM_TOP,
+ MT6357_HK_TOP,
+ MT6357_XPP_TOP,
+ MT6357_AUD_TOP,
+ MT6357_MISC_TOP,
+};
+
+enum mt6357_irq_numbers {
+ MT6357_IRQ_VPROC_OC = 0,
+ MT6357_IRQ_VCORE_OC,
+ MT6357_IRQ_VMODEM_OC,
+ MT6357_IRQ_VS1_OC,
+ MT6357_IRQ_VPA_OC,
+ MT6357_IRQ_VCORE_PREOC,
+ MT6357_IRQ_VFE28_OC = 16,
+ MT6357_IRQ_VXO22_OC,
+ MT6357_IRQ_VRF18_OC,
+ MT6357_IRQ_VRF12_OC,
+ MT6357_IRQ_VEFUSE_OC,
+ MT6357_IRQ_VCN33_OC,
+ MT6357_IRQ_VCN28_OC,
+ MT6357_IRQ_VCN18_OC,
+ MT6357_IRQ_VCAMA_OC,
+ MT6357_IRQ_VCAMD_OC,
+ MT6357_IRQ_VCAMIO_OC,
+ MT6357_IRQ_VLDO28_OC,
+ MT6357_IRQ_VUSB33_OC,
+ MT6357_IRQ_VAUX18_OC,
+ MT6357_IRQ_VAUD28_OC,
+ MT6357_IRQ_VIO28_OC,
+ MT6357_IRQ_VIO18_OC,
+ MT6357_IRQ_VSRAM_PROC_OC,
+ MT6357_IRQ_VSRAM_OTHERS_OC,
+ MT6357_IRQ_VIBR_OC,
+ MT6357_IRQ_VDRAM_OC,
+ MT6357_IRQ_VMC_OC,
+ MT6357_IRQ_VMCH_OC,
+ MT6357_IRQ_VEMC_OC,
+ MT6357_IRQ_VSIM1_OC,
+ MT6357_IRQ_VSIM2_OC,
+ MT6357_IRQ_PWRKEY = 48,
+ MT6357_IRQ_HOMEKEY,
+ MT6357_IRQ_PWRKEY_R,
+ MT6357_IRQ_HOMEKEY_R,
+ MT6357_IRQ_NI_LBAT_INT,
+ MT6357_IRQ_CHRDET,
+ MT6357_IRQ_CHRDET_EDGE,
+ MT6357_IRQ_VCDT_HV_DET,
+ MT6357_IRQ_WATCHDOG,
+ MT6357_IRQ_VBATON_UNDET,
+ MT6357_IRQ_BVALID_DET,
+ MT6357_IRQ_OV,
+ MT6357_IRQ_RTC = 64,
+ MT6357_IRQ_FG_BAT0_H = 80,
+ MT6357_IRQ_FG_BAT0_L,
+ MT6357_IRQ_FG_CUR_H,
+ MT6357_IRQ_FG_CUR_L,
+ MT6357_IRQ_FG_ZCV,
+ MT6357_IRQ_BATON_LV = 96,
+ MT6357_IRQ_BATON_HT,
+ MT6357_IRQ_BAT_H = 112,
+ MT6357_IRQ_BAT_L,
+ MT6357_IRQ_AUXADC_IMP,
+ MT6357_IRQ_NAG_C_DLTV,
+ MT6357_IRQ_AUDIO = 128,
+ MT6357_IRQ_ACCDET = 133,
+ MT6357_IRQ_ACCDET_EINT0,
+ MT6357_IRQ_ACCDET_EINT1,
+ MT6357_IRQ_SPI_CMD_ALERT = 144,
+ MT6357_IRQ_NR,
+};
+
+#define MT6357_IRQ_BUCK_BASE MT6357_IRQ_VPROC_OC
+#define MT6357_IRQ_LDO_BASE MT6357_IRQ_VFE28_OC
+#define MT6357_IRQ_PSC_BASE MT6357_IRQ_PWRKEY
+#define MT6357_IRQ_SCK_BASE MT6357_IRQ_RTC
+#define MT6357_IRQ_BM_BASE MT6357_IRQ_FG_BAT0_H
+#define MT6357_IRQ_HK_BASE MT6357_IRQ_BAT_H
+#define MT6357_IRQ_AUD_BASE MT6357_IRQ_AUDIO
+#define MT6357_IRQ_MISC_BASE MT6357_IRQ_SPI_CMD_ALERT
+
+#define MT6357_IRQ_BUCK_BITS (MT6357_IRQ_VCORE_PREOC - MT6357_IRQ_BUCK_BASE + 1)
+#define MT6357_IRQ_LDO_BITS (MT6357_IRQ_VSIM2_OC - MT6357_IRQ_LDO_BASE + 1)
+#define MT6357_IRQ_PSC_BITS (MT6357_IRQ_VCDT_HV_DET - MT6357_IRQ_PSC_BASE + 1)
+#define MT6357_IRQ_SCK_BITS (MT6357_IRQ_RTC - MT6357_IRQ_SCK_BASE + 1)
+#define MT6357_IRQ_BM_BITS (MT6357_IRQ_BATON_HT - MT6357_IRQ_BM_BASE + 1)
+#define MT6357_IRQ_HK_BITS (MT6357_IRQ_NAG_C_DLTV - MT6357_IRQ_HK_BASE + 1)
+#define MT6357_IRQ_AUD_BITS (MT6357_IRQ_ACCDET_EINT1 - MT6357_IRQ_AUD_BASE + 1)
+#define MT6357_IRQ_MISC_BITS \
+ (MT6357_IRQ_SPI_CMD_ALERT - MT6357_IRQ_MISC_BASE + 1)
+
+#define MT6357_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6357_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6357_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6357_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6357_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6357_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6357_CORE_H__ */
diff --git a/include/linux/mfd/mt6357/registers.h b/include/linux/mfd/mt6357/registers.h
new file mode 100644
index 000000000000..e24af83b618d
--- /dev/null
+++ b/include/linux/mfd/mt6357/registers.h
@@ -0,0 +1,1574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6357_REGISTERS_H__
+#define __MFD_MT6357_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6357_TOP0_ID 0x0
+#define MT6357_TOP0_REV0 0x2
+#define MT6357_TOP0_DSN_DBI 0x4
+#define MT6357_TOP0_DSN_DXI 0x6
+#define MT6357_HWCID 0x8
+#define MT6357_SWCID 0xa
+#define MT6357_PONSTS 0xc
+#define MT6357_POFFSTS 0xe
+#define MT6357_PSTSCTL 0x10
+#define MT6357_PG_DEB_STS0 0x12
+#define MT6357_PG_SDN_STS0 0x14
+#define MT6357_OC_SDN_STS0 0x16
+#define MT6357_THERMALSTATUS 0x18
+#define MT6357_TOP_CON 0x1a
+#define MT6357_TEST_OUT 0x1c
+#define MT6357_TEST_CON0 0x1e
+#define MT6357_TEST_CON1 0x20
+#define MT6357_TESTMODE_SW 0x22
+#define MT6357_TOPSTATUS 0x24
+#define MT6357_TDSEL_CON 0x26
+#define MT6357_RDSEL_CON 0x28
+#define MT6357_SMT_CON0 0x2a
+#define MT6357_SMT_CON1 0x2c
+#define MT6357_TOP_RSV0 0x2e
+#define MT6357_TOP_RSV1 0x30
+#define MT6357_DRV_CON0 0x32
+#define MT6357_DRV_CON1 0x34
+#define MT6357_DRV_CON2 0x36
+#define MT6357_DRV_CON3 0x38
+#define MT6357_FILTER_CON0 0x3a
+#define MT6357_FILTER_CON1 0x3c
+#define MT6357_FILTER_CON2 0x3e
+#define MT6357_FILTER_CON3 0x40
+#define MT6357_TOP_STATUS 0x42
+#define MT6357_TOP_STATUS_SET 0x44
+#define MT6357_TOP_STATUS_CLR 0x46
+#define MT6357_TOP_TRAP 0x48
+#define MT6357_TOP1_ID 0x80
+#define MT6357_TOP1_REV0 0x82
+#define MT6357_TOP1_DSN_DBI 0x84
+#define MT6357_TOP1_DSN_DXI 0x86
+#define MT6357_GPIO_DIR0 0x88
+#define MT6357_GPIO_DIR0_SET 0x8a
+#define MT6357_GPIO_DIR0_CLR 0x8c
+#define MT6357_GPIO_PULLEN0 0x8e
+#define MT6357_GPIO_PULLEN0_SET 0x90
+#define MT6357_GPIO_PULLEN0_CLR 0x92
+#define MT6357_GPIO_PULLSEL0 0x94
+#define MT6357_GPIO_PULLSEL0_SET 0x96
+#define MT6357_GPIO_PULLSEL0_CLR 0x98
+#define MT6357_GPIO_DINV0 0x9a
+#define MT6357_GPIO_DINV0_SET 0x9c
+#define MT6357_GPIO_DINV0_CLR 0x9e
+#define MT6357_GPIO_DOUT0 0xa0
+#define MT6357_GPIO_DOUT0_SET 0xa2
+#define MT6357_GPIO_DOUT0_CLR 0xa4
+#define MT6357_GPIO_PI0 0xa6
+#define MT6357_GPIO_POE0 0xa8
+#define MT6357_GPIO_MODE0 0xaa
+#define MT6357_GPIO_MODE0_SET 0xac
+#define MT6357_GPIO_MODE0_CLR 0xae
+#define MT6357_GPIO_MODE1 0xb0
+#define MT6357_GPIO_MODE1_SET 0xb2
+#define MT6357_GPIO_MODE1_CLR 0xb4
+#define MT6357_GPIO_MODE2 0xb6
+#define MT6357_GPIO_MODE2_SET 0xb8
+#define MT6357_GPIO_MODE2_CLR 0xba
+#define MT6357_GPIO_MODE3 0xbc
+#define MT6357_GPIO_MODE3_SET 0xbe
+#define MT6357_GPIO_MODE3_CLR 0xc0
+#define MT6357_GPIO_RSV 0xc2
+#define MT6357_TOP2_ID 0x100
+#define MT6357_TOP2_REV0 0x102
+#define MT6357_TOP2_DSN_DBI 0x104
+#define MT6357_TOP2_DSN_DXI 0x106
+#define MT6357_TOP_PAM0 0x108
+#define MT6357_TOP_PAM1 0x10a
+#define MT6357_TOP_CKPDN_CON0 0x10c
+#define MT6357_TOP_CKPDN_CON0_SET 0x10e
+#define MT6357_TOP_CKPDN_CON0_CLR 0x110
+#define MT6357_TOP_CKPDN_CON1 0x112
+#define MT6357_TOP_CKPDN_CON1_SET 0x114
+#define MT6357_TOP_CKPDN_CON1_CLR 0x116
+#define MT6357_TOP_CKSEL_CON0 0x118
+#define MT6357_TOP_CKSEL_CON0_SET 0x11a
+#define MT6357_TOP_CKSEL_CON0_CLR 0x11c
+#define MT6357_TOP_CKSEL_CON1 0x11e
+#define MT6357_TOP_CKSEL_CON1_SET 0x120
+#define MT6357_TOP_CKSEL_CON1_CLR 0x122
+#define MT6357_TOP_CKDIVSEL_CON0 0x124
+#define MT6357_TOP_CKDIVSEL_CON0_SET 0x126
+#define MT6357_TOP_CKDIVSEL_CON0_CLR 0x128
+#define MT6357_TOP_CKHWEN_CON0 0x12a
+#define MT6357_TOP_CKHWEN_CON0_SET 0x12c
+#define MT6357_TOP_CKHWEN_CON0_CLR 0x12e
+#define MT6357_TOP_CKTST_CON0 0x130
+#define MT6357_TOP_CKTST_CON1 0x132
+#define MT6357_TOP_CLK_CON0 0x134
+#define MT6357_TOP_CLK_CON0_SET 0x136
+#define MT6357_TOP_CLK_CON0_CLR 0x138
+#define MT6357_TOP_DCM_CON0 0x13a
+#define MT6357_TOP_HANDOVER_DEBUG0 0x13c
+#define MT6357_TOP_RST_CON0 0x13e
+#define MT6357_TOP_RST_CON0_SET 0x140
+#define MT6357_TOP_RST_CON0_CLR 0x142
+#define MT6357_TOP_RST_CON1 0x144
+#define MT6357_TOP_RST_CON1_SET 0x146
+#define MT6357_TOP_RST_CON1_CLR 0x148
+#define MT6357_TOP_RST_CON2 0x14a
+#define MT6357_TOP_RST_MISC 0x14c
+#define MT6357_TOP_RST_MISC_SET 0x14e
+#define MT6357_TOP_RST_MISC_CLR 0x150
+#define MT6357_TOP_RST_STATUS 0x152
+#define MT6357_TOP_RST_STATUS_SET 0x154
+#define MT6357_TOP_RST_STATUS_CLR 0x156
+#define MT6357_TOP2_ELR_NUM 0x158
+#define MT6357_TOP2_ELR0 0x15a
+#define MT6357_TOP2_ELR1 0x15c
+#define MT6357_TOP3_ID 0x180
+#define MT6357_TOP3_REV0 0x182
+#define MT6357_TOP3_DSN_DBI 0x184
+#define MT6357_TOP3_DSN_DXI 0x186
+#define MT6357_MISC_TOP_INT_CON0 0x188
+#define MT6357_MISC_TOP_INT_CON0_SET 0x18a
+#define MT6357_MISC_TOP_INT_CON0_CLR 0x18c
+#define MT6357_MISC_TOP_INT_MASK_CON0 0x18e
+#define MT6357_MISC_TOP_INT_MASK_CON0_SET 0x190
+#define MT6357_MISC_TOP_INT_MASK_CON0_CLR 0x192
+#define MT6357_MISC_TOP_INT_STATUS0 0x194
+#define MT6357_MISC_TOP_INT_RAW_STATUS0 0x196
+#define MT6357_TOP_INT_MASK_CON0 0x198
+#define MT6357_TOP_INT_MASK_CON0_SET 0x19a
+#define MT6357_TOP_INT_MASK_CON0_CLR 0x19c
+#define MT6357_TOP_INT_STATUS0 0x19e
+#define MT6357_TOP_INT_RAW_STATUS0 0x1a0
+#define MT6357_TOP_INT_CON0 0x1a2
+#define MT6357_PLT0_ID 0x380
+#define MT6357_PLT0_REV0 0x382
+#define MT6357_PLT0_REV1 0x384
+#define MT6357_PLT0_DSN_DXI 0x386
+#define MT6357_FQMTR_CON0 0x388
+#define MT6357_FQMTR_CON1 0x38a
+#define MT6357_FQMTR_CON2 0x38c
+#define MT6357_TOP_CLK_TRIM 0x38e
+#define MT6357_OTP_CON0 0x390
+#define MT6357_OTP_CON1 0x392
+#define MT6357_OTP_CON2 0x394
+#define MT6357_OTP_CON3 0x396
+#define MT6357_OTP_CON4 0x398
+#define MT6357_OTP_CON5 0x39a
+#define MT6357_OTP_CON6 0x39c
+#define MT6357_OTP_CON7 0x39e
+#define MT6357_OTP_CON8 0x3a0
+#define MT6357_OTP_CON9 0x3a2
+#define MT6357_OTP_CON10 0x3a4
+#define MT6357_OTP_CON11 0x3a6
+#define MT6357_OTP_CON12 0x3a8
+#define MT6357_OTP_CON13 0x3aa
+#define MT6357_OTP_CON14 0x3ac
+#define MT6357_TOP_TMA_KEY 0x3ae
+#define MT6357_TOP_MDB_CONF0 0x3b0
+#define MT6357_TOP_MDB_CONF1 0x3b2
+#define MT6357_TOP_MDB_CONF2 0x3b4
+#define MT6357_PLT0_ELR_NUM 0x3b6
+#define MT6357_PLT0_ELR0 0x3b8
+#define MT6357_PLT0_ELR1 0x3ba
+#define MT6357_SPISLV_ID 0x400
+#define MT6357_SPISLV_REV0 0x402
+#define MT6357_SPISLV_REV1 0x404
+#define MT6357_SPISLV_DSN_DXI 0x406
+#define MT6357_RG_SPI_CON0 0x408
+#define MT6357_DEW_DIO_EN 0x40a
+#define MT6357_DEW_READ_TEST 0x40c
+#define MT6357_DEW_WRITE_TEST 0x40e
+#define MT6357_DEW_CRC_SWRST 0x410
+#define MT6357_DEW_CRC_EN 0x412
+#define MT6357_DEW_CRC_VAL 0x414
+#define MT6357_DEW_DBG_MON_SEL 0x416
+#define MT6357_DEW_CIPHER_KEY_SEL 0x418
+#define MT6357_DEW_CIPHER_IV_SEL 0x41a
+#define MT6357_DEW_CIPHER_EN 0x41c
+#define MT6357_DEW_CIPHER_RDY 0x41e
+#define MT6357_DEW_CIPHER_MODE 0x420
+#define MT6357_DEW_CIPHER_SWRST 0x422
+#define MT6357_DEW_RDDMY_NO 0x424
+#define MT6357_INT_TYPE_CON0 0x426
+#define MT6357_INT_TYPE_CON0_SET 0x428
+#define MT6357_INT_TYPE_CON0_CLR 0x42a
+#define MT6357_INT_STA 0x42c
+#define MT6357_RG_SPI_CON1 0x42e
+#define MT6357_RG_SPI_CON2 0x430
+#define MT6357_RG_SPI_CON3 0x432
+#define MT6357_RG_SPI_CON4 0x434
+#define MT6357_RG_SPI_CON5 0x436
+#define MT6357_RG_SPI_CON6 0x438
+#define MT6357_RG_SPI_CON7 0x43a
+#define MT6357_RG_SPI_CON8 0x43c
+#define MT6357_RG_SPI_CON9 0x43e
+#define MT6357_RG_SPI_CON10 0x440
+#define MT6357_RG_SPI_CON11 0x442
+#define MT6357_RG_SPI_CON12 0x444
+#define MT6357_RG_SPI_CON13 0x446
+#define MT6357_TOP_SPI_CON0 0x448
+#define MT6357_TOP_SPI_CON1 0x44a
+#define MT6357_SCK_TOP_DSN_ID 0x500
+#define MT6357_SCK_TOP_DSN_REV0 0x502
+#define MT6357_SCK_TOP_DBI 0x504
+#define MT6357_SCK_TOP_DXI 0x506
+#define MT6357_SCK_TOP_TPM0 0x508
+#define MT6357_SCK_TOP_TPM1 0x50a
+#define MT6357_SCK_TOP_CON0 0x50c
+#define MT6357_SCK_TOP_CON1 0x50e
+#define MT6357_SCK_TOP_TEST_OUT 0x510
+#define MT6357_SCK_TOP_TEST_CON0 0x512
+#define MT6357_SCK_TOP_CKPDN_CON0 0x514
+#define MT6357_SCK_TOP_CKPDN_CON0_SET 0x516
+#define MT6357_SCK_TOP_CKPDN_CON0_CLR 0x518
+#define MT6357_SCK_TOP_CKHWEN_CON0 0x51a
+#define MT6357_SCK_TOP_CKHWEN_CON0_SET 0x51c
+#define MT6357_SCK_TOP_CKHWEN_CON0_CLR 0x51e
+#define MT6357_SCK_TOP_CKTST_CON 0x520
+#define MT6357_SCK_TOP_RST_CON0 0x522
+#define MT6357_SCK_TOP_RST_CON0_SET 0x524
+#define MT6357_SCK_TOP_RST_CON0_CLR 0x526
+#define MT6357_SCK_TOP_INT_CON0 0x528
+#define MT6357_SCK_TOP_INT_CON0_SET 0x52a
+#define MT6357_SCK_TOP_INT_CON0_CLR 0x52c
+#define MT6357_SCK_TOP_INT_MASK_CON0 0x52e
+#define MT6357_SCK_TOP_INT_MASK_CON0_SET 0x530
+#define MT6357_SCK_TOP_INT_MASK_CON0_CLR 0x532
+#define MT6357_SCK_TOP_INT_STATUS0 0x534
+#define MT6357_SCK_TOP_INT_RAW_STATUS0 0x536
+#define MT6357_SCK_TOP_INT_MISC_CON 0x538
+#define MT6357_EOSC_CALI_CON0 0x53a
+#define MT6357_EOSC_CALI_CON1 0x53c
+#define MT6357_RTC_MIX_CON0 0x53e
+#define MT6357_RTC_MIX_CON1 0x540
+#define MT6357_RTC_MIX_CON2 0x542
+#define MT6357_RTC_DSN_ID 0x580
+#define MT6357_RTC_DSN_REV0 0x582
+#define MT6357_RTC_DBI 0x584
+#define MT6357_RTC_DXI 0x586
+#define MT6357_RTC_BBPU 0x588
+#define MT6357_RTC_IRQ_STA 0x58a
+#define MT6357_RTC_IRQ_EN 0x58c
+#define MT6357_RTC_CII_EN 0x58e
+#define MT6357_RTC_AL_MASK 0x590
+#define MT6357_RTC_TC_SEC 0x592
+#define MT6357_RTC_TC_MIN 0x594
+#define MT6357_RTC_TC_HOU 0x596
+#define MT6357_RTC_TC_DOM 0x598
+#define MT6357_RTC_TC_DOW 0x59a
+#define MT6357_RTC_TC_MTH 0x59c
+#define MT6357_RTC_TC_YEA 0x59e
+#define MT6357_RTC_AL_SEC 0x5a0
+#define MT6357_RTC_AL_MIN 0x5a2
+#define MT6357_RTC_AL_HOU 0x5a4
+#define MT6357_RTC_AL_DOM 0x5a6
+#define MT6357_RTC_AL_DOW 0x5a8
+#define MT6357_RTC_AL_MTH 0x5aa
+#define MT6357_RTC_AL_YEA 0x5ac
+#define MT6357_RTC_OSC32CON 0x5ae
+#define MT6357_RTC_POWERKEY1 0x5b0
+#define MT6357_RTC_POWERKEY2 0x5b2
+#define MT6357_RTC_PDN1 0x5b4
+#define MT6357_RTC_PDN2 0x5b6
+#define MT6357_RTC_SPAR0 0x5b8
+#define MT6357_RTC_SPAR1 0x5ba
+#define MT6357_RTC_PROT 0x5bc
+#define MT6357_RTC_DIFF 0x5be
+#define MT6357_RTC_CALI 0x5c0
+#define MT6357_RTC_WRTGR 0x5c2
+#define MT6357_RTC_CON 0x5c4
+#define MT6357_RTC_SEC_CTRL 0x5c6
+#define MT6357_RTC_INT_CNT 0x5c8
+#define MT6357_RTC_SEC_DAT0 0x5ca
+#define MT6357_RTC_SEC_DAT1 0x5cc
+#define MT6357_RTC_SEC_DAT2 0x5ce
+#define MT6357_RTC_SEC_DSN_ID 0x600
+#define MT6357_RTC_SEC_DSN_REV0 0x602
+#define MT6357_RTC_SEC_DBI 0x604
+#define MT6357_RTC_SEC_DXI 0x606
+#define MT6357_RTC_TC_SEC_SEC 0x608
+#define MT6357_RTC_TC_MIN_SEC 0x60a
+#define MT6357_RTC_TC_HOU_SEC 0x60c
+#define MT6357_RTC_TC_DOM_SEC 0x60e
+#define MT6357_RTC_TC_DOW_SEC 0x610
+#define MT6357_RTC_TC_MTH_SEC 0x612
+#define MT6357_RTC_TC_YEA_SEC 0x614
+#define MT6357_RTC_SEC_CK_PDN 0x616
+#define MT6357_RTC_SEC_WRTGR 0x618
+#define MT6357_DCXO_DSN_ID 0x780
+#define MT6357_DCXO_DSN_REV0 0x782
+#define MT6357_DCXO_DSN_DBI 0x784
+#define MT6357_DCXO_DSN_DXI 0x786
+#define MT6357_DCXO_CW00 0x788
+#define MT6357_DCXO_CW00_SET 0x78a
+#define MT6357_DCXO_CW00_CLR 0x78c
+#define MT6357_DCXO_CW01 0x78e
+#define MT6357_DCXO_CW02 0x790
+#define MT6357_DCXO_CW03 0x792
+#define MT6357_DCXO_CW04 0x794
+#define MT6357_DCXO_CW05 0x796
+#define MT6357_DCXO_CW06 0x798
+#define MT6357_DCXO_CW07 0x79a
+#define MT6357_DCXO_CW08 0x79c
+#define MT6357_DCXO_CW09 0x79e
+#define MT6357_DCXO_CW10 0x7a0
+#define MT6357_DCXO_CW11 0x7a2
+#define MT6357_DCXO_CW11_SET 0x7a4
+#define MT6357_DCXO_CW11_CLR 0x7a6
+#define MT6357_DCXO_CW12 0x7a8
+#define MT6357_DCXO_CW13 0x7aa
+#define MT6357_DCXO_CW14 0x7ac
+#define MT6357_DCXO_CW15 0x7ae
+#define MT6357_DCXO_CW16 0x7b0
+#define MT6357_DCXO_CW17 0x7b2
+#define MT6357_DCXO_CW18 0x7b4
+#define MT6357_DCXO_CW19 0x7b6
+#define MT6357_DCXO_CW20 0x7b8
+#define MT6357_DCXO_CW21 0x7ba
+#define MT6357_DCXO_CW22 0x7bc
+#define MT6357_DCXO_ELR_NUM 0x7be
+#define MT6357_DCXO_ELR0 0x7c0
+#define MT6357_PSC_TOP_ID 0x900
+#define MT6357_PSC_TOP_REV0 0x902
+#define MT6357_PSC_TOP_DBI 0x904
+#define MT6357_PSC_TOP_DXI 0x906
+#define MT6357_PSC_TPM0 0x908
+#define MT6357_PSC_TPM1 0x90a
+#define MT6357_PSC_TOP_RSTCTL_0 0x90c
+#define MT6357_PSC_TOP_INT_CON0 0x90e
+#define MT6357_PSC_TOP_INT_CON0_SET 0x910
+#define MT6357_PSC_TOP_INT_CON0_CLR 0x912
+#define MT6357_PSC_TOP_INT_MASK_CON0 0x914
+#define MT6357_PSC_TOP_INT_MASK_CON0_SET 0x916
+#define MT6357_PSC_TOP_INT_MASK_CON0_CLR 0x918
+#define MT6357_PSC_TOP_INT_STATUS0 0x91a
+#define MT6357_PSC_TOP_INT_RAW_STATUS0 0x91c
+#define MT6357_PSC_TOP_INT_MISC_CON 0x91e
+#define MT6357_PSC_TOP_INT_MISC_CON_SET 0x920
+#define MT6357_PSC_TOP_INT_MISC_CON_CLR 0x922
+#define MT6357_PSC_TOP_MON_CTL 0x924
+#define MT6357_STRUP_ID 0x980
+#define MT6357_STRUP_REV0 0x982
+#define MT6357_STRUP_DBI 0x984
+#define MT6357_STRUP_DXI 0x986
+#define MT6357_STRUP_ANA_CON0 0x988
+#define MT6357_STRUP_ANA_CON1 0x98a
+#define MT6357_STRUP_ANA_CON2 0x98c
+#define MT6357_STRUP_ELR_NUM 0x98e
+#define MT6357_STRUP_ELR_0 0x990
+#define MT6357_PSEQ_ID 0xa00
+#define MT6357_PSEQ_REV0 0xa02
+#define MT6357_PSEQ_DBI 0xa04
+#define MT6357_PSEQ_DXI 0xa06
+#define MT6357_PPCCTL0 0xa08
+#define MT6357_PPCCTL1 0xa0a
+#define MT6357_PPCCTL2 0xa0c
+#define MT6357_PPCCFG0 0xa0e
+#define MT6357_PPCTST0 0xa10
+#define MT6357_PORFLAG 0xa12
+#define MT6357_STRUP_CON0 0xa14
+#define MT6357_STRUP_CON1 0xa16
+#define MT6357_STRUP_CON2 0xa18
+#define MT6357_STRUP_CON3 0xa1a
+#define MT6357_STRUP_CON4 0xa1c
+#define MT6357_STRUP_CON5 0xa1e
+#define MT6357_STRUP_CON6 0xa20
+#define MT6357_STRUP_CON7 0xa22
+#define MT6357_CPSCFG0 0xa24
+#define MT6357_STRUP_CON9 0xa26
+#define MT6357_STRUP_CON10 0xa28
+#define MT6357_STRUP_CON11 0xa2a
+#define MT6357_STRUP_CON12 0xa2c
+#define MT6357_STRUP_CON13 0xa2e
+#define MT6357_STRUP_CON14 0xa30
+#define MT6357_STRUP_CON15 0xa32
+#define MT6357_STRUP_CON16 0xa34
+#define MT6357_STRUP_CON19 0xa36
+#define MT6357_PSEQ_ELR_NUM 0xa38
+#define MT6357_PSEQ_ELR7 0xa3a
+#define MT6357_PSEQ_ELR8 0xa3c
+#define MT6357_PCHR_DIG_DSN_ID 0xa80
+#define MT6357_PCHR_DIG_DSN_REV0 0xa82
+#define MT6357_PCHR_DIG_DSN_DBI 0xa84
+#define MT6357_PCHR_DIG_DSN_DXI 0xa86
+#define MT6357_CHR_TOP_CON0 0xa88
+#define MT6357_CHR_TOP_CON1 0xa8a
+#define MT6357_CHR_TOP_CON2 0xa8c
+#define MT6357_CHR_TOP_CON3 0xa8e
+#define MT6357_CHR_TOP_CON4 0xa90
+#define MT6357_CHR_TOP_CON5 0xa92
+#define MT6357_CHR_TOP_CON6 0xa94
+#define MT6357_PCHR_DIG_ELR_NUM 0xa96
+#define MT6357_PCHR_ELR0 0xa98
+#define MT6357_PCHR_ELR1 0xa9a
+#define MT6357_PCHR_MACRO_DSN_ID 0xb80
+#define MT6357_PCHR_MACRO_DSN_REV0 0xb82
+#define MT6357_PCHR_MACRO_DSN_DBI 0xb84
+#define MT6357_PCHR_MACRO_DSN_DXI 0xb86
+#define MT6357_CHR_CON0 0xb88
+#define MT6357_CHR_CON1 0xb8a
+#define MT6357_CHR_CON2 0xb8c
+#define MT6357_CHR_CON3 0xb8e
+#define MT6357_CHR_CON4 0xb90
+#define MT6357_CHR_CON5 0xb92
+#define MT6357_CHR_CON6 0xb94
+#define MT6357_CHR_CON7 0xb96
+#define MT6357_CHR_CON8 0xb98
+#define MT6357_CHR_CON9 0xb9a
+#define MT6357_BM_TOP_DSN_ID 0xc00
+#define MT6357_BM_TOP_DSN_REV0 0xc02
+#define MT6357_BM_TOP_DBI 0xc04
+#define MT6357_BM_TOP_DXI 0xc06
+#define MT6357_BM_TPM0 0xc08
+#define MT6357_BM_TPM1 0xc0a
+#define MT6357_BM_TOP_CKPDN_CON0 0xc0c
+#define MT6357_BM_TOP_CKPDN_CON0_SET 0xc0e
+#define MT6357_BM_TOP_CKPDN_CON0_CLR 0xc10
+#define MT6357_BM_TOP_CKSEL_CON0 0xc12
+#define MT6357_BM_TOP_CKSEL_CON0_SET 0xc14
+#define MT6357_BM_TOP_CKSEL_CON0_CLR 0xc16
+#define MT6357_BM_TOP_CKTST_CON0 0xc18
+#define MT6357_BM_TOP_RST_CON0 0xc1a
+#define MT6357_BM_TOP_RST_CON0_SET 0xc1c
+#define MT6357_BM_TOP_RST_CON0_CLR 0xc1e
+#define MT6357_BM_TOP_INT_CON0 0xc20
+#define MT6357_BM_TOP_INT_CON0_SET 0xc22
+#define MT6357_BM_TOP_INT_CON0_CLR 0xc24
+#define MT6357_BM_TOP_INT_CON1 0xc26
+#define MT6357_BM_TOP_INT_CON1_SET 0xc28
+#define MT6357_BM_TOP_INT_CON1_CLR 0xc2a
+#define MT6357_BM_TOP_INT_MASK_CON0 0xc2c
+#define MT6357_BM_TOP_INT_MASK_CON0_SET 0xc2e
+#define MT6357_BM_TOP_INT_MASK_CON0_CLR 0xc30
+#define MT6357_BM_TOP_INT_MASK_CON1 0xc32
+#define MT6357_BM_TOP_INT_MASK_CON1_SET 0xc34
+#define MT6357_BM_TOP_INT_MASK_CON1_CLR 0xc36
+#define MT6357_BM_TOP_INT_STATUS0 0xc38
+#define MT6357_BM_TOP_INT_STATUS1 0xc3a
+#define MT6357_BM_TOP_INT_RAW_STATUS0 0xc3c
+#define MT6357_BM_TOP_INT_RAW_STATUS1 0xc3e
+#define MT6357_BM_TOP_INT_MISC_CON 0xc40
+#define MT6357_BM_TOP_DBG_CON 0xc42
+#define MT6357_BM_TOP_RSV0 0xc44
+#define MT6357_FGADC_ANA_DSN_ID 0xc80
+#define MT6357_FGADC_ANA_DSN_REV0 0xc82
+#define MT6357_FGADC_ANA_DSN_DBI 0xc84
+#define MT6357_FGADC_ANA_DSN_DXI 0xc86
+#define MT6357_FGADC_ANA_CON0 0xc88
+#define MT6357_FGADC_ANA_TEST_CON0 0xc8a
+#define MT6357_FGADC_ANA_ELR_NUM 0xc8c
+#define MT6357_FGADC_ANA_ELR0 0xc8e
+#define MT6357_FGADC_ANA_ELR1 0xc90
+#define MT6357_FGADC0_DSN_ID 0xd00
+#define MT6357_FGADC0_DSN_REV0 0xd02
+#define MT6357_FGADC0_DSN_DBI 0xd04
+#define MT6357_FGADC0_DSN_DXI 0xd06
+#define MT6357_FGADC_CON0 0xd08
+#define MT6357_FGADC_CON1 0xd0a
+#define MT6357_FGADC_CON2 0xd0c
+#define MT6357_FGADC_CON3 0xd0e
+#define MT6357_FGADC_CON4 0xd10
+#define MT6357_FGADC_CAR_CON0 0xd12
+#define MT6357_FGADC_CAR_CON1 0xd14
+#define MT6357_FGADC_CAR_CON2 0xd16
+#define MT6357_FGADC_CARTH_CON0 0xd18
+#define MT6357_FGADC_CARTH_CON1 0xd1a
+#define MT6357_FGADC_CARTH_CON2 0xd1c
+#define MT6357_FGADC_CARTH_CON3 0xd1e
+#define MT6357_FGADC_NTER_CON0 0xd20
+#define MT6357_FGADC_NTER_CON1 0xd22
+#define MT6357_FGADC_NTER_CON2 0xd24
+#define MT6357_FGADC_SON_CON0 0xd26
+#define MT6357_FGADC_SON_CON1 0xd28
+#define MT6357_FGADC_SON_CON2 0xd2a
+#define MT6357_FGADC_SON_CON3 0xd2c
+#define MT6357_FGADC_ZCV_CON0 0xd2e
+#define MT6357_FGADC_ZCV_CON1 0xd30
+#define MT6357_FGADC_ZCV_CON2 0xd32
+#define MT6357_FGADC_ZCV_CON3 0xd34
+#define MT6357_FGADC_ZCV_CON4 0xd36
+#define MT6357_FGADC_ZCVTH_CON0 0xd38
+#define MT6357_FGADC_ZCVTH_CON1 0xd3a
+#define MT6357_FGADC_ZCVTH_CON2 0xd3c
+#define MT6357_FGADC1_DSN_ID 0xd80
+#define MT6357_FGADC1_DSN_REV0 0xd82
+#define MT6357_FGADC1_DSN_DBI 0xd84
+#define MT6357_FGADC1_DSN_DXI 0xd86
+#define MT6357_FGADC_R_CON0 0xd88
+#define MT6357_FGADC_CUR_CON0 0xd8a
+#define MT6357_FGADC_CUR_CON1 0xd8c
+#define MT6357_FGADC_CUR_CON2 0xd8e
+#define MT6357_FGADC_CUR_CON3 0xd90
+#define MT6357_FGADC_OFFSET_CON0 0xd92
+#define MT6357_FGADC_OFFSET_CON1 0xd94
+#define MT6357_FGADC_GAIN_CON0 0xd96
+#define MT6357_FGADC_TEST_CON0 0xd98
+#define MT6357_SYSTEM_INFO_CON0 0xd9a
+#define MT6357_SYSTEM_INFO_CON1 0xd9c
+#define MT6357_SYSTEM_INFO_CON2 0xd9e
+#define MT6357_SYSTEM_INFO_CON3 0xda0
+#define MT6357_SYSTEM_INFO_CON4 0xda2
+#define MT6357_BATON_ANA_DSN_ID 0xe00
+#define MT6357_BATON_ANA_DSN_REV0 0xe02
+#define MT6357_BATON_ANA_DSN_DBI 0xe04
+#define MT6357_BATON_ANA_DSN_DXI 0xe06
+#define MT6357_BATON_ANA_CON0 0xe08
+#define MT6357_BATON_ANA_ELR_NUM 0xe0a
+#define MT6357_BATON_ANA_ELR0 0xe0c
+#define MT6357_HK_TOP_ID 0xf80
+#define MT6357_HK_TOP_REV0 0xf82
+#define MT6357_HK_TOP_DBI 0xf84
+#define MT6357_HK_TOP_DXI 0xf86
+#define MT6357_HK_TPM0 0xf88
+#define MT6357_HK_TPM1 0xf8a
+#define MT6357_HK_TOP_CLK_CON0 0xf8c
+#define MT6357_HK_TOP_CLK_CON1 0xf8e
+#define MT6357_HK_TOP_RST_CON0 0xf90
+#define MT6357_HK_TOP_INT_CON0 0xf92
+#define MT6357_HK_TOP_INT_CON0_SET 0xf94
+#define MT6357_HK_TOP_INT_CON0_CLR 0xf96
+#define MT6357_HK_TOP_INT_MASK_CON0 0xf98
+#define MT6357_HK_TOP_INT_MASK_CON0_SET 0xf9a
+#define MT6357_HK_TOP_INT_MASK_CON0_CLR 0xf9c
+#define MT6357_HK_TOP_INT_STATUS0 0xf9e
+#define MT6357_HK_TOP_INT_RAW_STATUS0 0xfa0
+#define MT6357_HK_TOP_MON_CON0 0xfa2
+#define MT6357_HK_TOP_MON_CON1 0xfa4
+#define MT6357_HK_TOP_MON_CON2 0xfa6
+#define MT6357_AUXADC_DSN_ID 0x1000
+#define MT6357_AUXADC_DSN_REV0 0x1002
+#define MT6357_AUXADC_DSN_DBI 0x1004
+#define MT6357_AUXADC_DSN_DXI 0x1006
+#define MT6357_AUXADC_ANA_CON0 0x1008
+#define MT6357_AUXADC_DIG_1_DSN_ID 0x1080
+#define MT6357_AUXADC_DIG_1_DSN_REV0 0x1082
+#define MT6357_AUXADC_DIG_1_DSN_DBI 0x1084
+#define MT6357_AUXADC_DIG_1_DSN_DXI 0x1086
+#define MT6357_AUXADC_ADC0 0x1088
+#define MT6357_AUXADC_ADC1 0x108a
+#define MT6357_AUXADC_ADC2 0x108c
+#define MT6357_AUXADC_ADC3 0x108e
+#define MT6357_AUXADC_ADC4 0x1090
+#define MT6357_AUXADC_ADC5 0x1092
+#define MT6357_AUXADC_ADC6 0x1094
+#define MT6357_AUXADC_ADC7 0x1096
+#define MT6357_AUXADC_ADC8 0x1098
+#define MT6357_AUXADC_ADC9 0x109a
+#define MT6357_AUXADC_ADC10 0x109c
+#define MT6357_AUXADC_ADC11 0x109e
+#define MT6357_AUXADC_ADC12 0x10a0
+#define MT6357_AUXADC_ADC14 0x10a2
+#define MT6357_AUXADC_ADC16 0x10a4
+#define MT6357_AUXADC_ADC17 0x10a6
+#define MT6357_AUXADC_ADC18 0x10a8
+#define MT6357_AUXADC_ADC19 0x10aa
+#define MT6357_AUXADC_ADC20 0x10ac
+#define MT6357_AUXADC_ADC21 0x10ae
+#define MT6357_AUXADC_ADC22 0x10b0
+#define MT6357_AUXADC_ADC23 0x10b2
+#define MT6357_AUXADC_ADC24 0x10b4
+#define MT6357_AUXADC_ADC25 0x10b6
+#define MT6357_AUXADC_ADC26 0x10b8
+#define MT6357_AUXADC_ADC27 0x10ba
+#define MT6357_AUXADC_ADC29 0x10bc
+#define MT6357_AUXADC_ADC30 0x10be
+#define MT6357_AUXADC_ADC31 0x10c0
+#define MT6357_AUXADC_ADC32 0x10c2
+#define MT6357_AUXADC_ADC33 0x10c4
+#define MT6357_AUXADC_ADC34 0x10c6
+#define MT6357_AUXADC_ADC35 0x10c8
+#define MT6357_AUXADC_ADC36 0x10ca
+#define MT6357_AUXADC_ADC38 0x10cc
+#define MT6357_AUXADC_ADC39 0x10ce
+#define MT6357_AUXADC_ADC40 0x10d0
+#define MT6357_AUXADC_ADC41 0x10d2
+#define MT6357_AUXADC_ADC42 0x10d4
+#define MT6357_AUXADC_ADC43 0x10d6
+#define MT6357_AUXADC_ADC46 0x10d8
+#define MT6357_AUXADC_ADC47 0x10da
+#define MT6357_AUXADC_DIG_1_ELR_NUM 0x10dc
+#define MT6357_AUXADC_DIG_1_ELR0 0x10de
+#define MT6357_AUXADC_DIG_1_ELR1 0x10e0
+#define MT6357_AUXADC_DIG_2_DSN_ID 0x1100
+#define MT6357_AUXADC_DIG_2_DSN_REV0 0x1102
+#define MT6357_AUXADC_DIG_2_DSN_DBI 0x1104
+#define MT6357_AUXADC_DIG_2_DSN_DXI 0x1106
+#define MT6357_AUXADC_STA0 0x1108
+#define MT6357_AUXADC_STA1 0x110a
+#define MT6357_AUXADC_STA2 0x110c
+#define MT6357_AUXADC_RQST0 0x110e
+#define MT6357_AUXADC_RQST0_SET 0x1110
+#define MT6357_AUXADC_RQST0_CLR 0x1112
+#define MT6357_AUXADC_RQST2 0x1114
+#define MT6357_AUXADC_RQST2_SET 0x1116
+#define MT6357_AUXADC_RQST2_CLR 0x1118
+#define MT6357_AUXADC_RQST1 0x111a
+#define MT6357_AUXADC_RQST1_SET 0x111c
+#define MT6357_AUXADC_RQST1_CLR 0x111e
+#define MT6357_AUXADC_CON0 0x1120
+#define MT6357_AUXADC_CON0_SET 0x1122
+#define MT6357_AUXADC_CON0_CLR 0x1124
+#define MT6357_AUXADC_CON1 0x1126
+#define MT6357_AUXADC_CON2 0x1128
+#define MT6357_AUXADC_CON3 0x112a
+#define MT6357_AUXADC_CON4 0x112c
+#define MT6357_AUXADC_CON5 0x112e
+#define MT6357_AUXADC_CON6 0x1130
+#define MT6357_AUXADC_CON7 0x1132
+#define MT6357_AUXADC_CON8 0x1134
+#define MT6357_AUXADC_CON9 0x1136
+#define MT6357_AUXADC_CON10 0x1138
+#define MT6357_AUXADC_CON11 0x113a
+#define MT6357_AUXADC_CON12 0x113c
+#define MT6357_AUXADC_CON13 0x113e
+#define MT6357_AUXADC_CON14 0x1140
+#define MT6357_AUXADC_CON15 0x1142
+#define MT6357_AUXADC_CON16 0x1144
+#define MT6357_AUXADC_CON17 0x1146
+#define MT6357_AUXADC_CON18 0x1148
+#define MT6357_AUXADC_CON19 0x114a
+#define MT6357_AUXADC_CON20 0x114c
+#define MT6357_AUXADC_DIG_3_DSN_ID 0x1180
+#define MT6357_AUXADC_DIG_3_DSN_REV0 0x1182
+#define MT6357_AUXADC_DIG_3_DSN_DBI 0x1184
+#define MT6357_AUXADC_DIG_3_DSN_DXI 0x1186
+#define MT6357_AUXADC_AUTORPT0 0x1188
+#define MT6357_AUXADC_LBAT0 0x118a
+#define MT6357_AUXADC_LBAT1 0x118c
+#define MT6357_AUXADC_LBAT2 0x118e
+#define MT6357_AUXADC_LBAT3 0x1190
+#define MT6357_AUXADC_LBAT4 0x1192
+#define MT6357_AUXADC_LBAT5 0x1194
+#define MT6357_AUXADC_LBAT6 0x1196
+#define MT6357_AUXADC_ACCDET 0x1198
+#define MT6357_AUXADC_DBG0 0x119a
+#define MT6357_AUXADC_IMP0 0x119c
+#define MT6357_AUXADC_IMP1 0x119e
+#define MT6357_AUXADC_DIG_3_ELR_NUM 0x11a0
+#define MT6357_AUXADC_DIG_3_ELR0 0x11a2
+#define MT6357_AUXADC_DIG_3_ELR1 0x11a4
+#define MT6357_AUXADC_DIG_3_ELR2 0x11a6
+#define MT6357_AUXADC_DIG_3_ELR3 0x11a8
+#define MT6357_AUXADC_DIG_3_ELR4 0x11aa
+#define MT6357_AUXADC_DIG_3_ELR5 0x11ac
+#define MT6357_AUXADC_DIG_3_ELR6 0x11ae
+#define MT6357_AUXADC_DIG_3_ELR7 0x11b0
+#define MT6357_AUXADC_DIG_3_ELR8 0x11b2
+#define MT6357_AUXADC_DIG_3_ELR9 0x11b4
+#define MT6357_AUXADC_DIG_3_ELR10 0x11b6
+#define MT6357_AUXADC_DIG_3_ELR11 0x11b8
+#define MT6357_AUXADC_DIG_4_DSN_ID 0x1200
+#define MT6357_AUXADC_DIG_4_DSN_REV0 0x1202
+#define MT6357_AUXADC_DIG_4_DSN_DBI 0x1204
+#define MT6357_AUXADC_DIG_4_DSN_DXI 0x1206
+#define MT6357_AUXADC_MDRT_0 0x1208
+#define MT6357_AUXADC_MDRT_1 0x120a
+#define MT6357_AUXADC_MDRT_2 0x120c
+#define MT6357_AUXADC_MDRT_3 0x120e
+#define MT6357_AUXADC_MDRT_4 0x1210
+#define MT6357_AUXADC_DCXO_MDRT_0 0x1212
+#define MT6357_AUXADC_DCXO_MDRT_1 0x1214
+#define MT6357_AUXADC_DCXO_MDRT_2 0x1216
+#define MT6357_AUXADC_NAG_0 0x1218
+#define MT6357_AUXADC_NAG_1 0x121a
+#define MT6357_AUXADC_NAG_2 0x121c
+#define MT6357_AUXADC_NAG_3 0x121e
+#define MT6357_AUXADC_NAG_4 0x1220
+#define MT6357_AUXADC_NAG_5 0x1222
+#define MT6357_AUXADC_NAG_6 0x1224
+#define MT6357_AUXADC_NAG_7 0x1226
+#define MT6357_AUXADC_NAG_8 0x1228
+#define MT6357_AUXADC_RSV_1 0x122a
+#define MT6357_AUXADC_ANA_0 0x122c
+#define MT6357_AUXADC_IMP_CG0 0x122e
+#define MT6357_AUXADC_LBAT_CG0 0x1230
+#define MT6357_AUXADC_NAG_CG0 0x1232
+#define MT6357_AUXADC_PRI_NEW 0x1234
+#define MT6357_AUXADC_CHR_TOP_CON2 0x1236
+#define MT6357_BUCK_TOP_DSN_ID 0x1400
+#define MT6357_BUCK_TOP_DSN_REV0 0x1402
+#define MT6357_BUCK_TOP_DBI 0x1404
+#define MT6357_BUCK_TOP_DXI 0x1406
+#define MT6357_BUCK_TOP_PAM0 0x1408
+#define MT6357_BUCK_TOP_PAM1 0x140a
+#define MT6357_BUCK_TOP_CLK_CON0 0x140c
+#define MT6357_BUCK_TOP_CLK_CON0_SET 0x140e
+#define MT6357_BUCK_TOP_CLK_CON0_CLR 0x1410
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0 0x1412
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_SET 0x1414
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_CLR 0x1416
+#define MT6357_BUCK_TOP_CLK_MISC_CON0 0x1418
+#define MT6357_BUCK_TOP_INT_CON0 0x141a
+#define MT6357_BUCK_TOP_INT_CON0_SET 0x141c
+#define MT6357_BUCK_TOP_INT_CON0_CLR 0x141e
+#define MT6357_BUCK_TOP_INT_MASK_CON0 0x1420
+#define MT6357_BUCK_TOP_INT_MASK_CON0_SET 0x1422
+#define MT6357_BUCK_TOP_INT_MASK_CON0_CLR 0x1424
+#define MT6357_BUCK_TOP_INT_STATUS0 0x1426
+#define MT6357_BUCK_TOP_INT_RAW_STATUS0 0x1428
+#define MT6357_BUCK_TOP_STB_CON 0x142a
+#define MT6357_BUCK_TOP_SLP_CON0 0x142c
+#define MT6357_BUCK_TOP_SLP_CON1 0x142e
+#define MT6357_BUCK_TOP_SLP_CON2 0x1430
+#define MT6357_BUCK_TOP_MINFREQ_CON 0x1432
+#define MT6357_BUCK_TOP_OC_CON0 0x1434
+#define MT6357_BUCK_TOP_K_CON0 0x1436
+#define MT6357_BUCK_TOP_K_CON1 0x1438
+#define MT6357_BUCK_TOP_K_CON2 0x143a
+#define MT6357_BUCK_TOP_WDTDBG0 0x143c
+#define MT6357_BUCK_TOP_WDTDBG1 0x143e
+#define MT6357_BUCK_TOP_WDTDBG2 0x1440
+#define MT6357_BUCK_TOP_ELR_NUM 0x1442
+#define MT6357_BUCK_TOP_ELR0 0x1444
+#define MT6357_BUCK_TOP_ELR1 0x1446
+#define MT6357_BUCK_VPROC_DSN_ID 0x1480
+#define MT6357_BUCK_VPROC_DSN_REV0 0x1482
+#define MT6357_BUCK_VPROC_DSN_DBI 0x1484
+#define MT6357_BUCK_VPROC_DSN_DXI 0x1486
+#define MT6357_BUCK_VPROC_CON0 0x1488
+#define MT6357_BUCK_VPROC_CON1 0x148a
+#define MT6357_BUCK_VPROC_CFG0 0x148c
+#define MT6357_BUCK_VPROC_CFG1 0x148e
+#define MT6357_BUCK_VPROC_OP_EN 0x1490
+#define MT6357_BUCK_VPROC_OP_EN_SET 0x1492
+#define MT6357_BUCK_VPROC_OP_EN_CLR 0x1494
+#define MT6357_BUCK_VPROC_OP_CFG 0x1496
+#define MT6357_BUCK_VPROC_OP_CFG_SET 0x1498
+#define MT6357_BUCK_VPROC_OP_CFG_CLR 0x149a
+#define MT6357_BUCK_VPROC_SP_CON 0x149c
+#define MT6357_BUCK_VPROC_SP_CFG 0x149e
+#define MT6357_BUCK_VPROC_OC_CFG 0x14a0
+#define MT6357_BUCK_VPROC_DBG0 0x14a2
+#define MT6357_BUCK_VPROC_DBG1 0x14a4
+#define MT6357_BUCK_VPROC_DBG2 0x14a6
+#define MT6357_BUCK_VPROC_ELR_NUM 0x14a8
+#define MT6357_BUCK_VPROC_ELR0 0x14aa
+#define MT6357_BUCK_VCORE_DSN_ID 0x1500
+#define MT6357_BUCK_VCORE_DSN_REV0 0x1502
+#define MT6357_BUCK_VCORE_DSN_DBI 0x1504
+#define MT6357_BUCK_VCORE_DSN_DXI 0x1506
+#define MT6357_BUCK_VCORE_CON0 0x1508
+#define MT6357_BUCK_VCORE_CON1 0x150a
+#define MT6357_BUCK_VCORE_CFG0 0x150c
+#define MT6357_BUCK_VCORE_CFG1 0x150e
+#define MT6357_BUCK_VCORE_OP_EN 0x1510
+#define MT6357_BUCK_VCORE_OP_EN_SET 0x1512
+#define MT6357_BUCK_VCORE_OP_EN_CLR 0x1514
+#define MT6357_BUCK_VCORE_OP_CFG 0x1516
+#define MT6357_BUCK_VCORE_OP_CFG_SET 0x1518
+#define MT6357_BUCK_VCORE_OP_CFG_CLR 0x151a
+#define MT6357_BUCK_VCORE_SP_CON 0x151c
+#define MT6357_BUCK_VCORE_SP_CFG 0x151e
+#define MT6357_BUCK_VCORE_OC_CFG 0x1520
+#define MT6357_BUCK_VCORE_DBG0 0x1522
+#define MT6357_BUCK_VCORE_DBG1 0x1524
+#define MT6357_BUCK_VCORE_DBG2 0x1526
+#define MT6357_BUCK_VCORE_ELR_NUM 0x1528
+#define MT6357_BUCK_VCORE_ELR0 0x152a
+#define MT6357_BUCK_VMODEM_DSN_ID 0x1580
+#define MT6357_BUCK_VMODEM_DSN_REV0 0x1582
+#define MT6357_BUCK_VMODEM_DSN_DBI 0x1584
+#define MT6357_BUCK_VMODEM_DSN_DXI 0x1586
+#define MT6357_BUCK_VMODEM_CON0 0x1588
+#define MT6357_BUCK_VMODEM_CON1 0x158a
+#define MT6357_BUCK_VMODEM_CFG0 0x158c
+#define MT6357_BUCK_VMODEM_CFG1 0x158e
+#define MT6357_BUCK_VMODEM_OP_EN 0x1590
+#define MT6357_BUCK_VMODEM_OP_EN_SET 0x1592
+#define MT6357_BUCK_VMODEM_OP_EN_CLR 0x1594
+#define MT6357_BUCK_VMODEM_OP_CFG 0x1596
+#define MT6357_BUCK_VMODEM_OP_CFG_SET 0x1598
+#define MT6357_BUCK_VMODEM_OP_CFG_CLR 0x159a
+#define MT6357_BUCK_VMODEM_SP_CON 0x159c
+#define MT6357_BUCK_VMODEM_SP_CFG 0x159e
+#define MT6357_BUCK_VMODEM_OC_CFG 0x15a0
+#define MT6357_BUCK_VMODEM_DBG0 0x15a2
+#define MT6357_BUCK_VMODEM_DBG1 0x15a4
+#define MT6357_BUCK_VMODEM_DBG2 0x15a6
+#define MT6357_BUCK_VMODEM_ELR_NUM 0x15a8
+#define MT6357_BUCK_VMODEM_ELR0 0x15aa
+#define MT6357_BUCK_VS1_DSN_ID 0x1600
+#define MT6357_BUCK_VS1_DSN_REV0 0x1602
+#define MT6357_BUCK_VS1_DSN_DBI 0x1604
+#define MT6357_BUCK_VS1_DSN_DXI 0x1606
+#define MT6357_BUCK_VS1_CON0 0x1608
+#define MT6357_BUCK_VS1_CON1 0x160a
+#define MT6357_BUCK_VS1_CFG0 0x160c
+#define MT6357_BUCK_VS1_CFG1 0x160e
+#define MT6357_BUCK_VS1_OP_EN 0x1610
+#define MT6357_BUCK_VS1_OP_EN_SET 0x1612
+#define MT6357_BUCK_VS1_OP_EN_CLR 0x1614
+#define MT6357_BUCK_VS1_OP_CFG 0x1616
+#define MT6357_BUCK_VS1_OP_CFG_SET 0x1618
+#define MT6357_BUCK_VS1_OP_CFG_CLR 0x161a
+#define MT6357_BUCK_VS1_SP_CON 0x161c
+#define MT6357_BUCK_VS1_SP_CFG 0x161e
+#define MT6357_BUCK_VS1_OC_CFG 0x1620
+#define MT6357_BUCK_VS1_DBG0 0x1622
+#define MT6357_BUCK_VS1_DBG1 0x1624
+#define MT6357_BUCK_VS1_DBG2 0x1626
+#define MT6357_BUCK_VS1_VOTER 0x1628
+#define MT6357_BUCK_VS1_VOTER_SET 0x162a
+#define MT6357_BUCK_VS1_VOTER_CLR 0x162c
+#define MT6357_BUCK_VS1_VOTER_CFG 0x162e
+#define MT6357_BUCK_VS1_ELR_NUM 0x1630
+#define MT6357_BUCK_VS1_ELR0 0x1632
+#define MT6357_BUCK_VPA_DSN_ID 0x1680
+#define MT6357_BUCK_VPA_DSN_REV0 0x1682
+#define MT6357_BUCK_VPA_DSN_DBI 0x1684
+#define MT6357_BUCK_VPA_DSN_DXI 0x1686
+#define MT6357_BUCK_VPA_CON0 0x1688
+#define MT6357_BUCK_VPA_CON1 0x168a
+#define MT6357_BUCK_VPA_CFG0 0x168c
+#define MT6357_BUCK_VPA_CFG1 0x168e
+#define MT6357_BUCK_VPA_OC_CFG 0x1690
+#define MT6357_BUCK_VPA_DBG0 0x1692
+#define MT6357_BUCK_VPA_DBG1 0x1694
+#define MT6357_BUCK_VPA_DBG2 0x1696
+#define MT6357_BUCK_VPA_DLC_CON0 0x1698
+#define MT6357_BUCK_VPA_DLC_CON1 0x169a
+#define MT6357_BUCK_VPA_DLC_CON2 0x169c
+#define MT6357_BUCK_VPA_MSFG_CON0 0x169e
+#define MT6357_BUCK_VPA_MSFG_CON1 0x16a0
+#define MT6357_BUCK_VPA_MSFG_RRATE0 0x16a2
+#define MT6357_BUCK_VPA_MSFG_RRATE1 0x16a4
+#define MT6357_BUCK_VPA_MSFG_RRATE2 0x16a6
+#define MT6357_BUCK_VPA_MSFG_RTHD0 0x16a8
+#define MT6357_BUCK_VPA_MSFG_RTHD1 0x16aa
+#define MT6357_BUCK_VPA_MSFG_RTHD2 0x16ac
+#define MT6357_BUCK_VPA_MSFG_FRATE0 0x16ae
+#define MT6357_BUCK_VPA_MSFG_FRATE1 0x16b0
+#define MT6357_BUCK_VPA_MSFG_FRATE2 0x16b2
+#define MT6357_BUCK_VPA_MSFG_FTHD0 0x16b4
+#define MT6357_BUCK_VPA_MSFG_FTHD1 0x16b6
+#define MT6357_BUCK_VPA_MSFG_FTHD2 0x16b8
+#define MT6357_BUCK_ANA_DSN_ID 0x1700
+#define MT6357_BUCK_ANA_DSN_REV0 0x1702
+#define MT6357_BUCK_ANA_DSN_DBI 0x1704
+#define MT6357_BUCK_ANA_DSN_FPI 0x1706
+#define MT6357_SMPS_ANA_CON0 0x1708
+#define MT6357_SMPS_ANA_CON1 0x170a
+#define MT6357_SMPS_ANA_CON2 0x170c
+#define MT6357_VCORE_VPROC_ANA_CON0 0x170e
+#define MT6357_VCORE_VPROC_ANA_CON1 0x1710
+#define MT6357_VCORE_VPROC_ANA_CON2 0x1712
+#define MT6357_VCORE_VPROC_ANA_CON3 0x1714
+#define MT6357_VCORE_VPROC_ANA_CON4 0x1716
+#define MT6357_VCORE_VPROC_ANA_CON5 0x1718
+#define MT6357_VCORE_VPROC_ANA_CON6 0x171a
+#define MT6357_VCORE_VPROC_ANA_CON7 0x171c
+#define MT6357_VCORE_VPROC_ANA_CON8 0x171e
+#define MT6357_VCORE_VPROC_ANA_CON9 0x1720
+#define MT6357_VCORE_VPROC_ANA_CON10 0x1722
+#define MT6357_VCORE_VPROC_ANA_CON11 0x1724
+#define MT6357_VMODEM_ANA_CON0 0x1726
+#define MT6357_VMODEM_ANA_CON1 0x1728
+#define MT6357_VMODEM_ANA_CON2 0x172a
+#define MT6357_VMODEM_ANA_CON3 0x172c
+#define MT6357_VMODEM_ANA_CON4 0x172e
+#define MT6357_VMODEM_ANA_CON5 0x1730
+#define MT6357_VS1_ANA_CON0 0x1732
+#define MT6357_VS1_ANA_CON1 0x1734
+#define MT6357_VS1_ANA_CON2 0x1736
+#define MT6357_VS1_ANA_CON3 0x1738
+#define MT6357_VS1_ANA_CON4 0x173a
+#define MT6357_VS1_ANA_CON5 0x173c
+#define MT6357_VPA_ANA_CON0 0x173e
+#define MT6357_VPA_ANA_CON1 0x1740
+#define MT6357_VPA_ANA_CON2 0x1742
+#define MT6357_VPA_ANA_CON3 0x1744
+#define MT6357_VPA_ANA_CON4 0x1746
+#define MT6357_VPA_ANA_CON5 0x1748
+#define MT6357_BUCK_ANA_ELR_NUM 0x174a
+#define MT6357_SMPS_ELR_0 0x174c
+#define MT6357_SMPS_ELR_1 0x174e
+#define MT6357_SMPS_ELR_2 0x1750
+#define MT6357_SMPS_ELR_3 0x1752
+#define MT6357_SMPS_ELR_4 0x1754
+#define MT6357_SMPS_ELR_5 0x1756
+#define MT6357_VCORE_VPROC_ELR_0 0x1758
+#define MT6357_VCORE_VPROC_ELR_1 0x175a
+#define MT6357_VCORE_VPROC_ELR_2 0x175c
+#define MT6357_VCORE_VPROC_ELR_3 0x175e
+#define MT6357_VCORE_VPROC_ELR_4 0x1760
+#define MT6357_VMODEM_ELR_0 0x1762
+#define MT6357_VMODEM_ELR_1 0x1764
+#define MT6357_VMODEM_ELR_2 0x1766
+#define MT6357_VS1_ELR_0 0x1768
+#define MT6357_VS1_ELR_1 0x176a
+#define MT6357_VPA_ELR_0 0x176c
+#define MT6357_LDO_TOP_ID 0x1880
+#define MT6357_LDO_TOP_REV0 0x1882
+#define MT6357_LDO_TOP_DBI 0x1884
+#define MT6357_LDO_TOP_DXI 0x1886
+#define MT6357_LDO_TPM0 0x1888
+#define MT6357_LDO_TPM1 0x188a
+#define MT6357_LDO_TOP_CLK_DCM_CON0 0x188c
+#define MT6357_LDO_TOP_CLK_VIO28_CON0 0x188e
+#define MT6357_LDO_TOP_CLK_VIO18_CON0 0x1890
+#define MT6357_LDO_TOP_CLK_VAUD28_CON0 0x1892
+#define MT6357_LDO_TOP_CLK_VDRAM_CON0 0x1894
+#define MT6357_LDO_TOP_CLK_VSRAM_PROC_CON0 0x1896
+#define MT6357_LDO_TOP_CLK_VSRAM_OTHERS_CON0 0x1898
+#define MT6357_LDO_TOP_CLK_VAUX18_CON0 0x189a
+#define MT6357_LDO_TOP_CLK_VUSB33_CON0 0x189c
+#define MT6357_LDO_TOP_CLK_VEMC_CON0 0x189e
+#define MT6357_LDO_TOP_CLK_VXO22_CON0 0x18a0
+#define MT6357_LDO_TOP_CLK_VSIM1_CON0 0x18a2
+#define MT6357_LDO_TOP_CLK_VSIM2_CON0 0x18a4
+#define MT6357_LDO_TOP_CLK_VCAMD_CON0 0x18a6
+#define MT6357_LDO_TOP_CLK_VCAMIO_CON0 0x18a8
+#define MT6357_LDO_TOP_CLK_VEFUSE_CON0 0x18aa
+#define MT6357_LDO_TOP_CLK_VCN33_CON0 0x18ac
+#define MT6357_LDO_TOP_CLK_VCN18_CON0 0x18ae
+#define MT6357_LDO_TOP_CLK_VCN28_CON0 0x18b0
+#define MT6357_LDO_TOP_CLK_VIBR_CON0 0x18b2
+#define MT6357_LDO_TOP_CLK_VFE28_CON0 0x18b4
+#define MT6357_LDO_TOP_CLK_VMCH_CON0 0x18b6
+#define MT6357_LDO_TOP_CLK_VMC_CON0 0x18b8
+#define MT6357_LDO_TOP_CLK_VRF18_CON0 0x18ba
+#define MT6357_LDO_TOP_CLK_VLDO28_CON0 0x18bc
+#define MT6357_LDO_TOP_CLK_VRF12_CON0 0x18be
+#define MT6357_LDO_TOP_CLK_VCAMA_CON0 0x18c0
+#define MT6357_LDO_TOP_CLK_TREF_CON0 0x18c2
+#define MT6357_LDO_TOP_INT_CON0 0x18c4
+#define MT6357_LDO_TOP_INT_CON0_SET 0x18c6
+#define MT6357_LDO_TOP_INT_CON0_CLR 0x18c8
+#define MT6357_LDO_TOP_INT_CON1 0x18ca
+#define MT6357_LDO_TOP_INT_CON1_SET 0x18cc
+#define MT6357_LDO_TOP_INT_CON1_CLR 0x18ce
+#define MT6357_LDO_TOP_INT_MASK_CON0 0x18d0
+#define MT6357_LDO_TOP_INT_MASK_CON0_SET 0x18d2
+#define MT6357_LDO_TOP_INT_MASK_CON0_CLR 0x18d4
+#define MT6357_LDO_TOP_INT_MASK_CON1 0x18d6
+#define MT6357_LDO_TOP_INT_MASK_CON1_SET 0x18d8
+#define MT6357_LDO_TOP_INT_MASK_CON1_CLR 0x18da
+#define MT6357_LDO_TOP_INT_STATUS0 0x18dc
+#define MT6357_LDO_TOP_INT_STATUS1 0x18de
+#define MT6357_LDO_TOP_INT_RAW_STATUS0 0x18e0
+#define MT6357_LDO_TOP_INT_RAW_STATUS1 0x18e2
+#define MT6357_LDO_TEST_CON0 0x18e4
+#define MT6357_LDO_TOP_WDT_CON0 0x18e6
+#define MT6357_LDO_TOP_RSV_CON0 0x18e8
+#define MT6357_LDO_TOP_RSV_CON1 0x18ea
+#define MT6357_LDO_OCFB0 0x18ec
+#define MT6357_LDO_LP_PROTECTION 0x18ee
+#define MT6357_LDO_DUMMY_LOAD_GATED 0x18f0
+#define MT6357_LDO_GON0_DSN_ID 0x1900
+#define MT6357_LDO_GON0_DSN_REV0 0x1902
+#define MT6357_LDO_GON0_DSN_DBI 0x1904
+#define MT6357_LDO_GON0_DSN_DXI 0x1906
+#define MT6357_LDO_VXO22_CON0 0x1908
+#define MT6357_LDO_VXO22_OP_EN 0x190a
+#define MT6357_LDO_VXO22_OP_EN_SET 0x190c
+#define MT6357_LDO_VXO22_OP_EN_CLR 0x190e
+#define MT6357_LDO_VXO22_OP_CFG 0x1910
+#define MT6357_LDO_VXO22_OP_CFG_SET 0x1912
+#define MT6357_LDO_VXO22_OP_CFG_CLR 0x1914
+#define MT6357_LDO_VXO22_CON1 0x1916
+#define MT6357_LDO_VXO22_CON2 0x1918
+#define MT6357_LDO_VXO22_CON3 0x191a
+#define MT6357_LDO_VAUX18_CON0 0x191c
+#define MT6357_LDO_VAUX18_OP_EN 0x191e
+#define MT6357_LDO_VAUX18_OP_EN_SET 0x1920
+#define MT6357_LDO_VAUX18_OP_EN_CLR 0x1922
+#define MT6357_LDO_VAUX18_OP_CFG 0x1924
+#define MT6357_LDO_VAUX18_OP_CFG_SET 0x1926
+#define MT6357_LDO_VAUX18_OP_CFG_CLR 0x1928
+#define MT6357_LDO_VAUX18_CON1 0x192a
+#define MT6357_LDO_VAUX18_CON2 0x192c
+#define MT6357_LDO_VAUX18_CON3 0x192e
+#define MT6357_LDO_VAUD28_CON0 0x1930
+#define MT6357_LDO_VAUD28_OP_EN 0x1932
+#define MT6357_LDO_VAUD28_OP_EN_SET 0x1934
+#define MT6357_LDO_VAUD28_OP_EN_CLR 0x1936
+#define MT6357_LDO_VAUD28_OP_CFG 0x1938
+#define MT6357_LDO_VAUD28_OP_CFG_SET 0x193a
+#define MT6357_LDO_VAUD28_OP_CFG_CLR 0x193c
+#define MT6357_LDO_VAUD28_CON1 0x193e
+#define MT6357_LDO_VAUD28_CON2 0x1940
+#define MT6357_LDO_VAUD28_CON3 0x1942
+#define MT6357_LDO_VIO28_CON0 0x1944
+#define MT6357_LDO_VIO28_OP_EN 0x1946
+#define MT6357_LDO_VIO28_OP_EN_SET 0x1948
+#define MT6357_LDO_VIO28_OP_EN_CLR 0x194a
+#define MT6357_LDO_VIO28_OP_CFG 0x194c
+#define MT6357_LDO_VIO28_OP_CFG_SET 0x194e
+#define MT6357_LDO_VIO28_OP_CFG_CLR 0x1950
+#define MT6357_LDO_VIO28_CON1 0x1952
+#define MT6357_LDO_VIO28_CON2 0x1954
+#define MT6357_LDO_VIO28_CON3 0x1956
+#define MT6357_LDO_VIO18_CON0 0x1958
+#define MT6357_LDO_VIO18_OP_EN 0x195a
+#define MT6357_LDO_VIO18_OP_EN_SET 0x195c
+#define MT6357_LDO_VIO18_OP_EN_CLR 0x195e
+#define MT6357_LDO_VIO18_OP_CFG 0x1960
+#define MT6357_LDO_VIO18_OP_CFG_SET 0x1962
+#define MT6357_LDO_VIO18_OP_CFG_CLR 0x1964
+#define MT6357_LDO_VIO18_CON1 0x1966
+#define MT6357_LDO_VIO18_CON2 0x1968
+#define MT6357_LDO_VIO18_CON3 0x196a
+#define MT6357_LDO_VDRAM_CON0 0x196c
+#define MT6357_LDO_VDRAM_OP_EN 0x196e
+#define MT6357_LDO_VDRAM_OP_EN_SET 0x1970
+#define MT6357_LDO_VDRAM_OP_EN_CLR 0x1972
+#define MT6357_LDO_VDRAM_OP_CFG 0x1974
+#define MT6357_LDO_VDRAM_OP_CFG_SET 0x1976
+#define MT6357_LDO_VDRAM_OP_CFG_CLR 0x1978
+#define MT6357_LDO_VDRAM_CON1 0x197a
+#define MT6357_LDO_VDRAM_CON2 0x197c
+#define MT6357_LDO_VDRAM_CON3 0x197e
+#define MT6357_LDO_GON1_DSN_ID 0x1980
+#define MT6357_LDO_GON1_DSN_REV0 0x1982
+#define MT6357_LDO_GON1_DSN_DBI 0x1984
+#define MT6357_LDO_GON1_DSN_DXI 0x1986
+#define MT6357_LDO_VEMC_CON0 0x1988
+#define MT6357_LDO_VEMC_OP_EN 0x198a
+#define MT6357_LDO_VEMC_OP_EN_SET 0x198c
+#define MT6357_LDO_VEMC_OP_EN_CLR 0x198e
+#define MT6357_LDO_VEMC_OP_CFG 0x1990
+#define MT6357_LDO_VEMC_OP_CFG_SET 0x1992
+#define MT6357_LDO_VEMC_OP_CFG_CLR 0x1994
+#define MT6357_LDO_VEMC_CON1 0x1996
+#define MT6357_LDO_VEMC_CON2 0x1998
+#define MT6357_LDO_VEMC_CON3 0x199a
+#define MT6357_LDO_VUSB33_CON0_0 0x199c
+#define MT6357_LDO_VUSB33_OP_EN 0x199e
+#define MT6357_LDO_VUSB33_OP_EN_SET 0x19a0
+#define MT6357_LDO_VUSB33_OP_EN_CLR 0x19a2
+#define MT6357_LDO_VUSB33_OP_CFG 0x19a4
+#define MT6357_LDO_VUSB33_OP_CFG_SET 0x19a6
+#define MT6357_LDO_VUSB33_OP_CFG_CLR 0x19a8
+#define MT6357_LDO_VUSB33_CON0_1 0x19aa
+#define MT6357_LDO_VUSB33_CON1 0x19ac
+#define MT6357_LDO_VUSB33_CON2 0x19ae
+#define MT6357_LDO_VUSB33_CON3 0x19b0
+#define MT6357_LDO_VSRAM_PROC_CON0 0x19b2
+#define MT6357_LDO_VSRAM_PROC_CON2 0x19b4
+#define MT6357_LDO_VSRAM_PROC_CFG0 0x19b6
+#define MT6357_LDO_VSRAM_PROC_CFG1 0x19b8
+#define MT6357_LDO_VSRAM_PROC_OP_EN 0x19ba
+#define MT6357_LDO_VSRAM_PROC_OP_EN_SET 0x19bc
+#define MT6357_LDO_VSRAM_PROC_OP_EN_CLR 0x19be
+#define MT6357_LDO_VSRAM_PROC_OP_CFG 0x19c0
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_SET 0x19c2
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_CLR 0x19c4
+#define MT6357_LDO_VSRAM_PROC_CON3 0x19c6
+#define MT6357_LDO_VSRAM_PROC_CON4 0x19c8
+#define MT6357_LDO_VSRAM_PROC_CON5 0x19ca
+#define MT6357_LDO_VSRAM_PROC_DBG0 0x19cc
+#define MT6357_LDO_VSRAM_PROC_DBG1 0x19ce
+#define MT6357_LDO_VSRAM_OTHERS_CON0 0x19d0
+#define MT6357_LDO_VSRAM_OTHERS_CON2 0x19d2
+#define MT6357_LDO_VSRAM_OTHERS_CFG0 0x19d4
+#define MT6357_LDO_VSRAM_OTHERS_CFG1 0x19d6
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN 0x19d8
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_SET 0x19da
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_CLR 0x19dc
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG 0x19de
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_SET 0x19e0
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_CLR 0x19e2
+#define MT6357_LDO_VSRAM_OTHERS_CON3 0x19e4
+#define MT6357_LDO_VSRAM_OTHERS_CON4 0x19e6
+#define MT6357_LDO_VSRAM_OTHERS_CON5 0x19e8
+#define MT6357_LDO_VSRAM_OTHERS_DBG0 0x19ea
+#define MT6357_LDO_VSRAM_OTHERS_DBG1 0x19ec
+#define MT6357_LDO_VSRAM_PROC_SP 0x19ee
+#define MT6357_LDO_VSRAM_OTHERS_SP 0x19f0
+#define MT6357_LDO_VSRAM_PROC_R2R_PDN_DIS 0x19f2
+#define MT6357_LDO_VSRAM_OTHERS_R2R_PDN_DIS 0x19f4
+#define MT6357_LDO_VSRAM_WDT_DBG0 0x19f6
+#define MT6357_LDO_GON1_ELR_NUM 0x19f8
+#define MT6357_LDO_VSRAM_CON0 0x19fa
+#define MT6357_LDO_VSRAM_CON1 0x19fc
+#define MT6357_LDO_VSRAM_CON2 0x19fe
+#define MT6357_LDO_GOFF0_DSN_ID 0x1a00
+#define MT6357_LDO_GOFF0_DSN_REV0 0x1a02
+#define MT6357_LDO_GOFF0_DSN_DBI 0x1a04
+#define MT6357_LDO_GOFF0_DSN_DXI 0x1a06
+#define MT6357_LDO_VFE28_CON0 0x1a08
+#define MT6357_LDO_VFE28_OP_EN 0x1a0a
+#define MT6357_LDO_VFE28_OP_EN_SET 0x1a0c
+#define MT6357_LDO_VFE28_OP_EN_CLR 0x1a0e
+#define MT6357_LDO_VFE28_OP_CFG 0x1a10
+#define MT6357_LDO_VFE28_OP_CFG_SET 0x1a12
+#define MT6357_LDO_VFE28_OP_CFG_CLR 0x1a14
+#define MT6357_LDO_VFE28_CON1 0x1a16
+#define MT6357_LDO_VFE28_CON2 0x1a18
+#define MT6357_LDO_VFE28_CON3 0x1a1a
+#define MT6357_LDO_VRF18_CON0 0x1a1c
+#define MT6357_LDO_VRF18_OP_EN 0x1a1e
+#define MT6357_LDO_VRF18_OP_EN_SET 0x1a20
+#define MT6357_LDO_VRF18_OP_EN_CLR 0x1a22
+#define MT6357_LDO_VRF18_OP_CFG 0x1a24
+#define MT6357_LDO_VRF18_OP_CFG_SET 0x1a26
+#define MT6357_LDO_VRF18_OP_CFG_CLR 0x1a28
+#define MT6357_LDO_VRF18_CON1 0x1a2a
+#define MT6357_LDO_VRF18_CON2 0x1a2c
+#define MT6357_LDO_VRF18_CON3 0x1a2e
+#define MT6357_LDO_VRF12_CON0 0x1a30
+#define MT6357_LDO_VRF12_OP_EN 0x1a32
+#define MT6357_LDO_VRF12_OP_EN_SET 0x1a34
+#define MT6357_LDO_VRF12_OP_EN_CLR 0x1a36
+#define MT6357_LDO_VRF12_OP_CFG 0x1a38
+#define MT6357_LDO_VRF12_OP_CFG_SET 0x1a3a
+#define MT6357_LDO_VRF12_OP_CFG_CLR 0x1a3c
+#define MT6357_LDO_VRF12_CON1 0x1a3e
+#define MT6357_LDO_VRF12_CON2 0x1a40
+#define MT6357_LDO_VRF12_CON3 0x1a42
+#define MT6357_LDO_VEFUSE_CON0 0x1a44
+#define MT6357_LDO_VEFUSE_OP_EN 0x1a46
+#define MT6357_LDO_VEFUSE_OP_EN_SET 0x1a48
+#define MT6357_LDO_VEFUSE_OP_EN_CLR 0x1a4a
+#define MT6357_LDO_VEFUSE_OP_CFG 0x1a4c
+#define MT6357_LDO_VEFUSE_OP_CFG_SET 0x1a4e
+#define MT6357_LDO_VEFUSE_OP_CFG_CLR 0x1a50
+#define MT6357_LDO_VEFUSE_CON1 0x1a52
+#define MT6357_LDO_VEFUSE_CON2 0x1a54
+#define MT6357_LDO_VEFUSE_CON3 0x1a56
+#define MT6357_LDO_VCN18_CON0 0x1a58
+#define MT6357_LDO_VCN18_OP_EN 0x1a5a
+#define MT6357_LDO_VCN18_OP_EN_SET 0x1a5c
+#define MT6357_LDO_VCN18_OP_EN_CLR 0x1a5e
+#define MT6357_LDO_VCN18_OP_CFG 0x1a60
+#define MT6357_LDO_VCN18_OP_CFG_SET 0x1a62
+#define MT6357_LDO_VCN18_OP_CFG_CLR 0x1a64
+#define MT6357_LDO_VCN18_CON1 0x1a66
+#define MT6357_LDO_VCN18_CON2 0x1a68
+#define MT6357_LDO_VCN18_CON3 0x1a6a
+#define MT6357_LDO_VCAMA_CON0 0x1a6c
+#define MT6357_LDO_VCAMA_OP_EN 0x1a6e
+#define MT6357_LDO_VCAMA_OP_EN_SET 0x1a70
+#define MT6357_LDO_VCAMA_OP_EN_CLR 0x1a72
+#define MT6357_LDO_VCAMA_OP_CFG 0x1a74
+#define MT6357_LDO_VCAMA_OP_CFG_SET 0x1a76
+#define MT6357_LDO_VCAMA_OP_CFG_CLR 0x1a78
+#define MT6357_LDO_VCAMA_CON1 0x1a7a
+#define MT6357_LDO_VCAMA_CON2 0x1a7c
+#define MT6357_LDO_VCAMA_CON3 0x1a7e
+#define MT6357_LDO_GOFF1_DSN_ID 0x1a80
+#define MT6357_LDO_GOFF1_DSN_REV0 0x1a82
+#define MT6357_LDO_GOFF1_DSN_DBI 0x1a84
+#define MT6357_LDO_GOFF1_DSN_DXI 0x1a86
+#define MT6357_LDO_VCAMD_CON0 0x1a88
+#define MT6357_LDO_VCAMD_OP_EN 0x1a8a
+#define MT6357_LDO_VCAMD_OP_EN_SET 0x1a8c
+#define MT6357_LDO_VCAMD_OP_EN_CLR 0x1a8e
+#define MT6357_LDO_VCAMD_OP_CFG 0x1a90
+#define MT6357_LDO_VCAMD_OP_CFG_SET 0x1a92
+#define MT6357_LDO_VCAMD_OP_CFG_CLR 0x1a94
+#define MT6357_LDO_VCAMD_CON1 0x1a96
+#define MT6357_LDO_VCAMD_CON2 0x1a98
+#define MT6357_LDO_VCAMD_CON3 0x1a9a
+#define MT6357_LDO_VCAMIO_CON0 0x1a9c
+#define MT6357_LDO_VCAMIO_OP_EN 0x1a9e
+#define MT6357_LDO_VCAMIO_OP_EN_SET 0x1aa0
+#define MT6357_LDO_VCAMIO_OP_EN_CLR 0x1aa2
+#define MT6357_LDO_VCAMIO_OP_CFG 0x1aa4
+#define MT6357_LDO_VCAMIO_OP_CFG_SET 0x1aa6
+#define MT6357_LDO_VCAMIO_OP_CFG_CLR 0x1aa8
+#define MT6357_LDO_VCAMIO_CON1 0x1aaa
+#define MT6357_LDO_VCAMIO_CON2 0x1aac
+#define MT6357_LDO_VCAMIO_CON3 0x1aae
+#define MT6357_LDO_VMC_CON0 0x1ab0
+#define MT6357_LDO_VMC_OP_EN 0x1ab2
+#define MT6357_LDO_VMC_OP_EN_SET 0x1ab4
+#define MT6357_LDO_VMC_OP_EN_CLR 0x1ab6
+#define MT6357_LDO_VMC_OP_CFG 0x1ab8
+#define MT6357_LDO_VMC_OP_CFG_SET 0x1aba
+#define MT6357_LDO_VMC_OP_CFG_CLR 0x1abc
+#define MT6357_LDO_VMC_CON1 0x1abe
+#define MT6357_LDO_VMC_CON2 0x1ac0
+#define MT6357_LDO_VMC_CON3 0x1ac2
+#define MT6357_LDO_VMCH_CON0 0x1ac4
+#define MT6357_LDO_VMCH_OP_EN 0x1ac6
+#define MT6357_LDO_VMCH_OP_EN_SET 0x1ac8
+#define MT6357_LDO_VMCH_OP_EN_CLR 0x1aca
+#define MT6357_LDO_VMCH_OP_CFG 0x1acc
+#define MT6357_LDO_VMCH_OP_CFG_SET 0x1ace
+#define MT6357_LDO_VMCH_OP_CFG_CLR 0x1ad0
+#define MT6357_LDO_VMCH_CON1 0x1ad2
+#define MT6357_LDO_VMCH_CON2 0x1ad4
+#define MT6357_LDO_VMCH_CON3 0x1ad6
+#define MT6357_LDO_VSIM1_CON0 0x1ad8
+#define MT6357_LDO_VSIM1_OP_EN 0x1ada
+#define MT6357_LDO_VSIM1_OP_EN_SET 0x1adc
+#define MT6357_LDO_VSIM1_OP_EN_CLR 0x1ade
+#define MT6357_LDO_VSIM1_OP_CFG 0x1ae0
+#define MT6357_LDO_VSIM1_OP_CFG_SET 0x1ae2
+#define MT6357_LDO_VSIM1_OP_CFG_CLR 0x1ae4
+#define MT6357_LDO_VSIM1_CON1 0x1ae6
+#define MT6357_LDO_VSIM1_CON2 0x1ae8
+#define MT6357_LDO_VSIM1_CON3 0x1aea
+#define MT6357_LDO_VSIM2_CON0 0x1aec
+#define MT6357_LDO_VSIM2_OP_EN 0x1aee
+#define MT6357_LDO_VSIM2_OP_EN_SET 0x1af0
+#define MT6357_LDO_VSIM2_OP_EN_CLR 0x1af2
+#define MT6357_LDO_VSIM2_OP_CFG 0x1af4
+#define MT6357_LDO_VSIM2_OP_CFG_SET 0x1af6
+#define MT6357_LDO_VSIM2_OP_CFG_CLR 0x1af8
+#define MT6357_LDO_VSIM2_CON1 0x1afa
+#define MT6357_LDO_VSIM2_CON2 0x1afc
+#define MT6357_LDO_VSIM2_CON3 0x1afe
+#define MT6357_LDO_GOFF2_DSN_ID 0x1b00
+#define MT6357_LDO_GOFF2_DSN_REV0 0x1b02
+#define MT6357_LDO_GOFF2_DSN_DBI 0x1b04
+#define MT6357_LDO_GOFF2_DSN_DXI 0x1b06
+#define MT6357_LDO_VIBR_CON0 0x1b08
+#define MT6357_LDO_VIBR_OP_EN 0x1b0a
+#define MT6357_LDO_VIBR_OP_EN_SET 0x1b0c
+#define MT6357_LDO_VIBR_OP_EN_CLR 0x1b0e
+#define MT6357_LDO_VIBR_OP_CFG 0x1b10
+#define MT6357_LDO_VIBR_OP_CFG_SET 0x1b12
+#define MT6357_LDO_VIBR_OP_CFG_CLR 0x1b14
+#define MT6357_LDO_VIBR_CON1 0x1b16
+#define MT6357_LDO_VIBR_CON2 0x1b18
+#define MT6357_LDO_VIBR_CON3 0x1b1a
+#define MT6357_LDO_VCN33_CON0_0 0x1b1c
+#define MT6357_LDO_VCN33_OP_EN 0x1b1e
+#define MT6357_LDO_VCN33_OP_EN_SET 0x1b20
+#define MT6357_LDO_VCN33_OP_EN_CLR 0x1b22
+#define MT6357_LDO_VCN33_OP_CFG 0x1b24
+#define MT6357_LDO_VCN33_OP_CFG_SET 0x1b26
+#define MT6357_LDO_VCN33_OP_CFG_CLR 0x1b28
+#define MT6357_LDO_VCN33_CON0_1 0x1b2a
+#define MT6357_LDO_VCN33_CON1 0x1b2c
+#define MT6357_LDO_VCN33_CON2 0x1b2e
+#define MT6357_LDO_VCN33_CON3 0x1b30
+#define MT6357_LDO_VLDO28_CON0_0 0x1b32
+#define MT6357_LDO_VLDO28_OP_EN 0x1b34
+#define MT6357_LDO_VLDO28_OP_EN_SET 0x1b36
+#define MT6357_LDO_VLDO28_OP_EN_CLR 0x1b38
+#define MT6357_LDO_VLDO28_OP_CFG 0x1b3a
+#define MT6357_LDO_VLDO28_OP_CFG_SET 0x1b3c
+#define MT6357_LDO_VLDO28_OP_CFG_CLR 0x1b3e
+#define MT6357_LDO_VLDO28_CON0_1 0x1b40
+#define MT6357_LDO_VLDO28_CON1 0x1b42
+#define MT6357_LDO_VLDO28_CON2 0x1b44
+#define MT6357_LDO_VLDO28_CON3 0x1b46
+#define MT6357_LDO_GOFF2_RSV_CON0 0x1b48
+#define MT6357_LDO_GOFF2_RSV_CON1 0x1b4a
+#define MT6357_LDO_GOFF3_DSN_ID 0x1b80
+#define MT6357_LDO_GOFF3_DSN_REV0 0x1b82
+#define MT6357_LDO_GOFF3_DSN_DBI 0x1b84
+#define MT6357_LDO_GOFF3_DSN_DXI 0x1b86
+#define MT6357_LDO_VCN28_CON0 0x1b88
+#define MT6357_LDO_VCN28_OP_EN 0x1b8a
+#define MT6357_LDO_VCN28_OP_EN_SET 0x1b8c
+#define MT6357_LDO_VCN28_OP_EN_CLR 0x1b8e
+#define MT6357_LDO_VCN28_OP_CFG 0x1b90
+#define MT6357_LDO_VCN28_OP_CFG_SET 0x1b92
+#define MT6357_LDO_VCN28_OP_CFG_CLR 0x1b94
+#define MT6357_LDO_VCN28_CON1 0x1b96
+#define MT6357_LDO_VCN28_CON2 0x1b98
+#define MT6357_LDO_VCN28_CON3 0x1b9a
+#define MT6357_VRTC_CON0 0x1b9c
+#define MT6357_LDO_TREF_CON0 0x1b9e
+#define MT6357_LDO_TREF_OP_EN 0x1ba0
+#define MT6357_LDO_TREF_OP_EN_SET 0x1ba2
+#define MT6357_LDO_TREF_OP_EN_CLR 0x1ba4
+#define MT6357_LDO_TREF_OP_CFG 0x1ba6
+#define MT6357_LDO_TREF_OP_CFG_SET 0x1ba8
+#define MT6357_LDO_TREF_OP_CFG_CLR 0x1baa
+#define MT6357_LDO_TREF_CON1 0x1bac
+#define MT6357_LDO_GOFF3_RSV_CON0 0x1bae
+#define MT6357_LDO_GOFF3_RSV_CON1 0x1bb0
+#define MT6357_LDO_ANA0_DSN_ID 0x1c00
+#define MT6357_LDO_ANA0_DSN_REV0 0x1c02
+#define MT6357_LDO_ANA0_DSN_DBI 0x1c04
+#define MT6357_LDO_ANA0_DSN_DXI 0x1c06
+#define MT6357_VFE28_ANA_CON0 0x1c08
+#define MT6357_VFE28_ANA_CON1 0x1c0a
+#define MT6357_VCN28_ANA_CON0 0x1c0c
+#define MT6357_VCN28_ANA_CON1 0x1c0e
+#define MT6357_VAUD28_ANA_CON0 0x1c10
+#define MT6357_VAUD28_ANA_CON1 0x1c12
+#define MT6357_VAUX18_ANA_CON0 0x1c14
+#define MT6357_VAUX18_ANA_CON1 0x1c16
+#define MT6357_VXO22_ANA_CON0 0x1c18
+#define MT6357_VXO22_ANA_CON1 0x1c1a
+#define MT6357_VCN33_ANA_CON0 0x1c1c
+#define MT6357_VCN33_ANA_CON1 0x1c1e
+#define MT6357_VEMC_ANA_CON0 0x1c20
+#define MT6357_VEMC_ANA_CON1 0x1c22
+#define MT6357_VLDO28_ANA_CON0 0x1c24
+#define MT6357_VLDO28_ANA_CON1 0x1c26
+#define MT6357_VIO28_ANA_CON0 0x1c28
+#define MT6357_VIO28_ANA_CON1 0x1c2a
+#define MT6357_VIBR_ANA_CON0 0x1c2c
+#define MT6357_VIBR_ANA_CON1 0x1c2e
+#define MT6357_VSIM1_ANA_CON0 0x1c30
+#define MT6357_VSIM1_ANA_CON1 0x1c32
+#define MT6357_VSIM2_ANA_CON0 0x1c34
+#define MT6357_VSIM2_ANA_CON1 0x1c36
+#define MT6357_VMCH_ANA_CON0 0x1c38
+#define MT6357_VMCH_ANA_CON1 0x1c3a
+#define MT6357_VMC_ANA_CON0 0x1c3c
+#define MT6357_VMC_ANA_CON1 0x1c3e
+#define MT6357_VCAMIO_ANA_CON0 0x1c40
+#define MT6357_VCAMIO_ANA_CON1 0x1c42
+#define MT6357_VCN18_ANA_CON0 0x1c44
+#define MT6357_VCN18_ANA_CON1 0x1c46
+#define MT6357_VRF18_ANA_CON0 0x1c48
+#define MT6357_VRF18_ANA_CON1 0x1c4a
+#define MT6357_VIO18_ANA_CON0 0x1c4c
+#define MT6357_VIO18_ANA_CON1 0x1c4e
+#define MT6357_VDRAM_ANA_CON1 0x1c50
+#define MT6357_VRF12_ANA_CON0 0x1c52
+#define MT6357_VRF12_ANA_CON1 0x1c54
+#define MT6357_VSRAM_PROC_ANA_CON0 0x1c56
+#define MT6357_VSRAM_OTHERS_ANA_CON0 0x1c58
+#define MT6357_LDO_ANA0_ELR_NUM 0x1c5a
+#define MT6357_VFE28_ELR_0 0x1c5c
+#define MT6357_VCN28_ELR_0 0x1c5e
+#define MT6357_VAUD28_ELR_0 0x1c60
+#define MT6357_VAUX18_ELR_0 0x1c62
+#define MT6357_VXO22_ELR_0 0x1c64
+#define MT6357_VCN33_ELR_0 0x1c66
+#define MT6357_VEMC_ELR_0 0x1c68
+#define MT6357_VLDO28_ELR_0 0x1c6a
+#define MT6357_VIO28_ELR_0 0x1c6c
+#define MT6357_VIBR_ELR_0 0x1c6e
+#define MT6357_VSIM1_ELR_0 0x1c70
+#define MT6357_VSIM2_ELR_0 0x1c72
+#define MT6357_VMCH_ELR_0 0x1c74
+#define MT6357_VMC_ELR_0 0x1c76
+#define MT6357_VCAMIO_ELR_0 0x1c78
+#define MT6357_VCN18_ELR_0 0x1c7a
+#define MT6357_VRF18_ELR_0 0x1c7c
+#define MT6357_LDO_ANA1_DSN_ID 0x1c80
+#define MT6357_LDO_ANA1_DSN_REV0 0x1c82
+#define MT6357_LDO_ANA1_DSN_DBI 0x1c84
+#define MT6357_LDO_ANA1_DSN_DXI 0x1c86
+#define MT6357_VUSB33_ANA_CON0 0x1c88
+#define MT6357_VUSB33_ANA_CON1 0x1c8a
+#define MT6357_VCAMA_ANA_CON0 0x1c8c
+#define MT6357_VCAMA_ANA_CON1 0x1c8e
+#define MT6357_VEFUSE_ANA_CON0 0x1c90
+#define MT6357_VEFUSE_ANA_CON1 0x1c92
+#define MT6357_VCAMD_ANA_CON0 0x1c94
+#define MT6357_VCAMD_ANA_CON1 0x1c96
+#define MT6357_LDO_ANA1_ELR_NUM 0x1c98
+#define MT6357_VUSB33_ELR_0 0x1c9a
+#define MT6357_VCAMA_ELR_0 0x1c9c
+#define MT6357_VEFUSE_ELR_0 0x1c9e
+#define MT6357_VCAMD_ELR_0 0x1ca0
+#define MT6357_VIO18_ELR_0 0x1ca2
+#define MT6357_VDRAM_ELR_0 0x1ca4
+#define MT6357_VRF12_ELR_0 0x1ca6
+#define MT6357_VRTC_ELR_0 0x1ca8
+#define MT6357_VDRAM_ELR_1 0x1caa
+#define MT6357_VDRAM_ELR_2 0x1cac
+#define MT6357_XPP_TOP_ID 0x1e00
+#define MT6357_XPP_TOP_REV0 0x1e02
+#define MT6357_XPP_TOP_DBI 0x1e04
+#define MT6357_XPP_TOP_DXI 0x1e06
+#define MT6357_XPP_TPM0 0x1e08
+#define MT6357_XPP_TPM1 0x1e0a
+#define MT6357_XPP_TOP_TEST_OUT 0x1e0c
+#define MT6357_XPP_TOP_TEST_CON0 0x1e0e
+#define MT6357_XPP_TOP_CKPDN_CON0 0x1e10
+#define MT6357_XPP_TOP_CKPDN_CON0_SET 0x1e12
+#define MT6357_XPP_TOP_CKPDN_CON0_CLR 0x1e14
+#define MT6357_XPP_TOP_CKSEL_CON0 0x1e16
+#define MT6357_XPP_TOP_CKSEL_CON0_SET 0x1e18
+#define MT6357_XPP_TOP_CKSEL_CON0_CLR 0x1e1a
+#define MT6357_XPP_TOP_RST_CON0 0x1e1c
+#define MT6357_XPP_TOP_RST_CON0_SET 0x1e1e
+#define MT6357_XPP_TOP_RST_CON0_CLR 0x1e20
+#define MT6357_XPP_TOP_RST_BANK_CON0 0x1e22
+#define MT6357_XPP_TOP_RST_BANK_CON0_SET 0x1e24
+#define MT6357_XPP_TOP_RST_BANK_CON0_CLR 0x1e26
+#define MT6357_DRIVER_BL_DSN_ID 0x1e80
+#define MT6357_DRIVER_BL_DSN_REV0 0x1e82
+#define MT6357_DRIVER_BL_DSN_DBI 0x1e84
+#define MT6357_DRIVER_BL_DSN_DXI 0x1e86
+#define MT6357_ISINK1_CON0 0x1e88
+#define MT6357_ISINK1_CON1 0x1e8a
+#define MT6357_ISINK1_CON2 0x1e8c
+#define MT6357_ISINK1_CON3 0x1e8e
+#define MT6357_ISINK_ANA1 0x1e90
+#define MT6357_ISINK_PHASE_DLY 0x1e92
+#define MT6357_ISINK_SFSTR 0x1e94
+#define MT6357_ISINK_EN_CTRL 0x1e96
+#define MT6357_ISINK_MODE_CTRL 0x1e98
+#define MT6357_DRIVER_ANA_CON0 0x1e9a
+#define MT6357_ISINK_ANA_CON0 0x1e9c
+#define MT6357_ISINK_ANA_CON1 0x1e9e
+#define MT6357_DRIVER_BL_ELR_NUM 0x1ea0
+#define MT6357_DRIVER_BL_ELR_0 0x1ea2
+#define MT6357_DRIVER_CI_DSN_ID 0x1f00
+#define MT6357_DRIVER_CI_DSN_REV0 0x1f02
+#define MT6357_DRIVER_CI_DSN_DBI 0x1f04
+#define MT6357_DRIVER_CI_DSN_DXI 0x1f06
+#define MT6357_CHRIND_CON0 0x1f08
+#define MT6357_CHRIND_CON1 0x1f0a
+#define MT6357_CHRIND_CON2 0x1f0c
+#define MT6357_CHRIND_CON3 0x1f0e
+#define MT6357_CHRIND_CON4 0x1f10
+#define MT6357_CHRIND_EN_CTRL 0x1f12
+#define MT6357_CHRIND_ANA_CON0 0x1f14
+#define MT6357_DRIVER_DL_DSN_ID 0x1f80
+#define MT6357_DRIVER_DL_DSN_REV0 0x1f82
+#define MT6357_DRIVER_DL_DSN_DBI 0x1f84
+#define MT6357_DRIVER_DL_DSN_DXI 0x1f86
+#define MT6357_ISINK2_CON0 0x1f88
+#define MT6357_ISINK3_CON0 0x1f8a
+#define MT6357_ISINK_EN_CTRL_SMPL 0x1f8c
+#define MT6357_AUD_TOP_ID 0x2080
+#define MT6357_AUD_TOP_REV0 0x2082
+#define MT6357_AUD_TOP_DBI 0x2084
+#define MT6357_AUD_TOP_DXI 0x2086
+#define MT6357_AUD_TOP_CKPDN_TPM0 0x2088
+#define MT6357_AUD_TOP_CKPDN_TPM1 0x208a
+#define MT6357_AUD_TOP_CKPDN_CON0 0x208c
+#define MT6357_AUD_TOP_CKPDN_CON0_SET 0x208e
+#define MT6357_AUD_TOP_CKPDN_CON0_CLR 0x2090
+#define MT6357_AUD_TOP_CKSEL_CON0 0x2092
+#define MT6357_AUD_TOP_CKSEL_CON0_SET 0x2094
+#define MT6357_AUD_TOP_CKSEL_CON0_CLR 0x2096
+#define MT6357_AUD_TOP_CKTST_CON0 0x2098
+#define MT6357_AUD_TOP_RST_CON0 0x209a
+#define MT6357_AUD_TOP_RST_CON0_SET 0x209c
+#define MT6357_AUD_TOP_RST_CON0_CLR 0x209e
+#define MT6357_AUD_TOP_RST_BANK_CON0 0x20a0
+#define MT6357_AUD_TOP_INT_CON0 0x20a2
+#define MT6357_AUD_TOP_INT_CON0_SET 0x20a4
+#define MT6357_AUD_TOP_INT_CON0_CLR 0x20a6
+#define MT6357_AUD_TOP_INT_MASK_CON0 0x20a8
+#define MT6357_AUD_TOP_INT_MASK_CON0_SET 0x20aa
+#define MT6357_AUD_TOP_INT_MASK_CON0_CLR 0x20ac
+#define MT6357_AUD_TOP_INT_STATUS0 0x20ae
+#define MT6357_AUD_TOP_INT_RAW_STATUS0 0x20b0
+#define MT6357_AUD_TOP_INT_MISC_CON0 0x20b2
+#define MT6357_AUDNCP_CLKDIV_CON0 0x20b4
+#define MT6357_AUDNCP_CLKDIV_CON1 0x20b6
+#define MT6357_AUDNCP_CLKDIV_CON2 0x20b8
+#define MT6357_AUDNCP_CLKDIV_CON3 0x20ba
+#define MT6357_AUDNCP_CLKDIV_CON4 0x20bc
+#define MT6357_AUD_TOP_MON_CON0 0x20be
+#define MT6357_AUDIO_DIG_DSN_ID 0x2100
+#define MT6357_AUDIO_DIG_DSN_REV0 0x2102
+#define MT6357_AUDIO_DIG_DSN_DBI 0x2104
+#define MT6357_AUDIO_DIG_DSN_DXI 0x2106
+#define MT6357_AFE_UL_DL_CON0 0x2108
+#define MT6357_AFE_DL_SRC2_CON0_L 0x210a
+#define MT6357_AFE_UL_SRC_CON0_H 0x210c
+#define MT6357_AFE_UL_SRC_CON0_L 0x210e
+#define MT6357_AFE_TOP_CON0 0x2110
+#define MT6357_AUDIO_TOP_CON0 0x2112
+#define MT6357_AFE_MON_DEBUG0 0x2114
+#define MT6357_AFUNC_AUD_CON0 0x2116
+#define MT6357_AFUNC_AUD_CON1 0x2118
+#define MT6357_AFUNC_AUD_CON2 0x211a
+#define MT6357_AFUNC_AUD_CON3 0x211c
+#define MT6357_AFUNC_AUD_CON4 0x211e
+#define MT6357_AFUNC_AUD_CON5 0x2120
+#define MT6357_AFUNC_AUD_CON6 0x2122
+#define MT6357_AFUNC_AUD_MON0 0x2124
+#define MT6357_AUDRC_TUNE_MON0 0x2126
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_CFG0 0x2128
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 0x212a
+#define MT6357_AFE_ADDA_MTKAIF_MON0 0x212c
+#define MT6357_AFE_ADDA_MTKAIF_MON1 0x212e
+#define MT6357_AFE_ADDA_MTKAIF_MON2 0x2130
+#define MT6357_AFE_ADDA_MTKAIF_MON3 0x2132
+#define MT6357_AFE_ADDA_MTKAIF_CFG0 0x2134
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG0 0x2136
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG1 0x2138
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG2 0x213a
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG3 0x213c
+#define MT6357_AFE_ADDA_MTKAIF_TX_CFG1 0x213e
+#define MT6357_AFE_SGEN_CFG0 0x2140
+#define MT6357_AFE_SGEN_CFG1 0x2142
+#define MT6357_AFE_ADC_ASYNC_FIFO_CFG 0x2144
+#define MT6357_AFE_DCCLK_CFG0 0x2146
+#define MT6357_AFE_DCCLK_CFG1 0x2148
+#define MT6357_AUDIO_DIG_CFG 0x214a
+#define MT6357_AFE_AUD_PAD_TOP 0x214c
+#define MT6357_AFE_AUD_PAD_TOP_MON 0x214e
+#define MT6357_AFE_AUD_PAD_TOP_MON1 0x2150
+#define MT6357_AUDENC_DSN_ID 0x2180
+#define MT6357_AUDENC_DSN_REV0 0x2182
+#define MT6357_AUDENC_DSN_DBI 0x2184
+#define MT6357_AUDENC_DSN_FPI 0x2186
+#define MT6357_AUDENC_ANA_CON0 0x2188
+#define MT6357_AUDENC_ANA_CON1 0x218a
+#define MT6357_AUDENC_ANA_CON2 0x218c
+#define MT6357_AUDENC_ANA_CON3 0x218e
+#define MT6357_AUDENC_ANA_CON4 0x2190
+#define MT6357_AUDENC_ANA_CON5 0x2192
+#define MT6357_AUDENC_ANA_CON6 0x2194
+#define MT6357_AUDENC_ANA_CON7 0x2196
+#define MT6357_AUDENC_ANA_CON8 0x2198
+#define MT6357_AUDENC_ANA_CON9 0x219a
+#define MT6357_AUDENC_ANA_CON10 0x219c
+#define MT6357_AUDENC_ANA_CON11 0x219e
+#define MT6357_AUDDEC_DSN_ID 0x2200
+#define MT6357_AUDDEC_DSN_REV0 0x2202
+#define MT6357_AUDDEC_DSN_DBI 0x2204
+#define MT6357_AUDDEC_DSN_FPI 0x2206
+#define MT6357_AUDDEC_ANA_CON0 0x2208
+#define MT6357_AUDDEC_ANA_CON1 0x220a
+#define MT6357_AUDDEC_ANA_CON2 0x220c
+#define MT6357_AUDDEC_ANA_CON3 0x220e
+#define MT6357_AUDDEC_ANA_CON4 0x2210
+#define MT6357_AUDDEC_ANA_CON5 0x2212
+#define MT6357_AUDDEC_ANA_CON6 0x2214
+#define MT6357_AUDDEC_ANA_CON7 0x2216
+#define MT6357_AUDDEC_ANA_CON8 0x2218
+#define MT6357_AUDDEC_ANA_CON9 0x221a
+#define MT6357_AUDDEC_ANA_CON10 0x221c
+#define MT6357_AUDDEC_ANA_CON11 0x221e
+#define MT6357_AUDDEC_ANA_CON12 0x2220
+#define MT6357_AUDDEC_ANA_CON13 0x2222
+#define MT6357_AUDDEC_ELR_NUM 0x2224
+#define MT6357_AUDDEC_ELR_0 0x2226
+#define MT6357_AUDZCD_DSN_ID 0x2280
+#define MT6357_AUDZCD_DSN_REV0 0x2282
+#define MT6357_AUDZCD_DSN_DBI 0x2284
+#define MT6357_AUDZCD_DSN_FPI 0x2286
+#define MT6357_ZCD_CON0 0x2288
+#define MT6357_ZCD_CON1 0x228a
+#define MT6357_ZCD_CON2 0x228c
+#define MT6357_ZCD_CON3 0x228e
+#define MT6357_ZCD_CON4 0x2290
+#define MT6357_ZCD_CON5 0x2292
+#define MT6357_ACCDET_DSN_DIG_ID 0x2300
+#define MT6357_ACCDET_DSN_DIG_REV0 0x2302
+#define MT6357_ACCDET_DSN_DBI 0x2304
+#define MT6357_ACCDET_DSN_FPI 0x2306
+#define MT6357_ACCDET_CON0 0x2308
+#define MT6357_ACCDET_CON1 0x230a
+#define MT6357_ACCDET_CON2 0x230c
+#define MT6357_ACCDET_CON3 0x230e
+#define MT6357_ACCDET_CON4 0x2310
+#define MT6357_ACCDET_CON5 0x2312
+#define MT6357_ACCDET_CON6 0x2314
+#define MT6357_ACCDET_CON7 0x2316
+#define MT6357_ACCDET_CON8 0x2318
+#define MT6357_ACCDET_CON9 0x231a
+#define MT6357_ACCDET_CON10 0x231c
+#define MT6357_ACCDET_CON11 0x231e
+#define MT6357_ACCDET_CON12 0x2320
+#define MT6357_ACCDET_CON13 0x2322
+#define MT6357_ACCDET_CON14 0x2324
+#define MT6357_ACCDET_CON15 0x2326
+#define MT6357_ACCDET_CON16 0x2328
+#define MT6357_ACCDET_CON17 0x232a
+#define MT6357_ACCDET_CON18 0x232c
+#define MT6357_ACCDET_CON19 0x232e
+#define MT6357_ACCDET_CON20 0x2330
+#define MT6357_ACCDET_CON21 0x2332
+#define MT6357_ACCDET_CON22 0x2334
+#define MT6357_ACCDET_CON23 0x2336
+#define MT6357_ACCDET_CON24 0x2338
+#define MT6357_ACCDET_CON25 0x233a
+#define MT6357_ACCDET_CON26 0x233c
+#define MT6357_ACCDET_CON27 0x233e
+#define MT6357_ACCDET_CON28 0x2340
+
+#endif /* __MFD_MT6357_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index 1cf78726503b..627487e26287 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -12,6 +12,9 @@
enum chip_id {
MT6323_CHIP_ID = 0x23,
+ MT6331_CHIP_ID = 0x20,
+ MT6332_CHIP_ID = 0x20,
+ MT6357_CHIP_ID = 0x57,
MT6358_CHIP_ID = 0x58,
MT6359_CHIP_ID = 0x59,
MT6366_CHIP_ID = 0x66,
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
index 69632c1b07bd..ae3e7a5c5219 100644
--- a/include/linux/mfd/t7l66xb.h
+++ b/include/linux/mfd/t7l66xb.h
@@ -12,7 +12,6 @@
struct t7l66xb_platform_data {
int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
index b4888209494a..aacf1dcc86b9 100644
--- a/include/linux/mfd/tc6387xb.h
+++ b/include/linux/mfd/tc6387xb.h
@@ -12,7 +12,6 @@
struct tc6387xb_platform_data {
int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
};
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
index d336c541b7df..d17807f2d0c9 100644
--- a/include/linux/mfd/tc6393xb.h
+++ b/include/linux/mfd/tc6393xb.h
@@ -22,7 +22,7 @@ struct tc6393xb_platform_data {
u16 scr_gper; /* GP Enable */
int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
+ void (*disable)(struct platform_device *dev);
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index 8871cc5188a0..eaa233038254 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -594,8 +594,6 @@ struct twl4030_gpio_platform_data {
int (*setup)(struct device *dev,
unsigned gpio, unsigned ngpio);
- int (*teardown)(struct device *dev,
- unsigned gpio, unsigned ngpio);
};
struct twl4030_madc_platform_data {
@@ -694,61 +692,6 @@ struct twl4030_audio_data {
unsigned int irq_base;
};
-struct twl4030_platform_data {
- struct twl4030_clock_init_data *clock;
- struct twl4030_bci_platform_data *bci;
- struct twl4030_gpio_platform_data *gpio;
- struct twl4030_madc_platform_data *madc;
- struct twl4030_keypad_data *keypad;
- struct twl4030_usb_data *usb;
- struct twl4030_power_data *power;
- struct twl4030_audio_data *audio;
-
- /* Common LDO regulators for TWL4030/TWL6030 */
- struct regulator_init_data *vdac;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
- struct regulator_init_data *vdd1;
- struct regulator_init_data *vdd2;
- struct regulator_init_data *vdd3;
- /* TWL4030 LDO regulators */
- struct regulator_init_data *vpll1;
- struct regulator_init_data *vpll2;
- struct regulator_init_data *vmmc1;
- struct regulator_init_data *vmmc2;
- struct regulator_init_data *vsim;
- struct regulator_init_data *vaux4;
- struct regulator_init_data *vio;
- struct regulator_init_data *vintana1;
- struct regulator_init_data *vintana2;
- struct regulator_init_data *vintdig;
- /* TWL6030 LDO regulators */
- struct regulator_init_data *vmmc;
- struct regulator_init_data *vpp;
- struct regulator_init_data *vusim;
- struct regulator_init_data *vana;
- struct regulator_init_data *vcxio;
- struct regulator_init_data *vusb;
- struct regulator_init_data *clk32kg;
- struct regulator_init_data *v1v8;
- struct regulator_init_data *v2v1;
- /* TWL6032 LDO regulators */
- struct regulator_init_data *ldo1;
- struct regulator_init_data *ldo2;
- struct regulator_init_data *ldo3;
- struct regulator_init_data *ldo4;
- struct regulator_init_data *ldo5;
- struct regulator_init_data *ldo6;
- struct regulator_init_data *ldo7;
- struct regulator_init_data *ldoln;
- struct regulator_init_data *ldousb;
- /* TWL6032 DCDC regulators */
- struct regulator_init_data *smps3;
- struct regulator_init_data *smps4;
- struct regulator_init_data *vio6025;
-};
-
struct twl_regulator_driver_data {
int (*set_voltage)(void *data, int target_uV);
int (*get_voltage)(void *data);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index ae5bb67a9ba1..22c0a0cf5e0c 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -182,6 +182,7 @@ static inline unsigned long migrate_pfn(unsigned long pfn)
enum migrate_vma_direction {
MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
};
struct migrate_vma {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ecda6e63d5f2..7b7ce602c808 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -733,10 +733,10 @@ enum {
};
enum {
- MR_CACHE_LAST_STD_ENTRY = 20,
+ MKEY_CACHE_LAST_STD_ENTRY = 20,
MLX5_IMR_MTT_CACHE_ENTRY,
MLX5_IMR_KSM_CACHE_ENTRY,
- MAX_MR_CACHE_ENTRIES
+ MAX_MKEY_CACHE_ENTRIES
};
struct mlx5_profile {
@@ -745,7 +745,7 @@ struct mlx5_profile {
struct {
int size;
int limit;
- } mr_cache[MAX_MR_CACHE_ENTRIES];
+ } mr_cache[MAX_MKEY_CACHE_ENTRIES];
};
struct mlx5_hca_cap {
@@ -779,6 +779,7 @@ struct mlx5_core_dev {
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
+ struct lock_class_key lock_key;
unsigned long intf_state;
struct mlx5_priv priv;
struct mlx5_profile profile;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index ece3e35622d7..8e73c377da2c 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -178,6 +178,7 @@ struct mlx5_flow_table_attr {
int max_fte;
u32 level;
u32 flags;
+ u16 uid;
struct mlx5_flow_table *next_ft;
struct {
@@ -315,4 +316,5 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 51b4e71017ee..4acd5610e96b 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1371,7 +1371,9 @@ enum {
};
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x1f];
+ u8 reserved_at_0[0x10];
+ u8 shared_object_to_user_object_allowed[0x1];
+ u8 reserved_at_13[0xe];
u8 vhca_resource_manager[0x1];
u8 hca_cap_2[0x1];
@@ -8528,7 +8530,7 @@ struct mlx5_ifc_create_flow_table_out_bits {
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
index 4414ed5b6ed2..9becdc3fa503 100644
--- a/include/linux/mlx5/mlx5_ifc_vdpa.h
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -150,6 +150,14 @@ enum {
MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3,
};
+/* This indicates that the object was not created or has already
+ * been desroyed. It is very safe to assume that this object will never
+ * have so many states
+ */
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_NONE = 0xffffffff
+};
+
enum {
MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0,
MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7898e29bcfb5..21f8b27bd9fd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
+#include <linux/memremap.h>
struct mempolicy;
struct anon_vma;
@@ -221,6 +222,9 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+/* to align the pointer to the (prev) page boundary */
+#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
+
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
@@ -424,7 +428,6 @@ extern unsigned int kobjsize(const void *objp);
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
-extern pgprot_t protection_map[16];
/*
* The default fault flags that should be used by most of the
@@ -855,7 +858,7 @@ static inline struct folio *virt_to_folio(const void *x)
return page_folio(page);
}
-void __put_page(struct page *page);
+void __folio_put(struct folio *folio);
void put_pages_list(struct list_head *pages);
@@ -892,11 +895,7 @@ static inline void set_compound_page_dtor(struct page *page,
page[1].compound_dtor = compound_dtor;
}
-static inline void destroy_compound_page(struct page *page)
-{
- VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
- compound_page_dtors[page[1].compound_dtor](page);
-}
+void destroy_large_folio(struct folio *folio);
static inline int head_compound_pincount(struct page *head)
{
@@ -1049,84 +1048,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
* back into memory.
*/
-/*
- * The zone field is never updated after free_area_init_core()
- * sets it, so none of the operations on it need to be atomic.
- */
-
-/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
-#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
-#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
-#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
-#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
-#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
-
-/*
- * Define the bit shifts to access each section. For non-existent
- * sections we define the shift as 0; that plus a 0 mask ensures
- * the compiler will optimise away reference to them.
- */
-#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
-#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
-#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
-#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
-#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
-
-/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
-#ifdef NODE_NOT_IN_PAGE_FLAGS
-#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
-#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
- SECTIONS_PGOFF : ZONES_PGOFF)
-#else
-#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
-#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
- NODES_PGOFF : ZONES_PGOFF)
-#endif
-
-#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
-
-#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
-#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
-#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
-#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
-#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
-#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
-
-static inline enum zone_type page_zonenum(const struct page *page)
-{
- ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
- return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
-}
-
-static inline enum zone_type folio_zonenum(const struct folio *folio)
-{
- return page_zonenum(&folio->page);
-}
-
-#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_zone_device_page(const struct page *page)
-{
- return page_zonenum(page) == ZONE_DEVICE;
-}
-extern void memmap_init_zone_device(struct zone *, unsigned long,
- unsigned long, struct dev_pagemap *);
-#else
-static inline bool is_zone_device_page(const struct page *page)
-{
- return false;
-}
-#endif
-
-static inline bool folio_is_zone_device(const struct folio *folio)
-{
- return is_zone_device_page(&folio->page);
-}
-
-static inline bool is_zone_movable_page(const struct page *page)
-{
- return page_zonenum(page) == ZONE_MOVABLE;
-}
-
#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
@@ -1201,7 +1122,7 @@ static inline __must_check bool try_get_page(struct page *page)
static inline void folio_put(struct folio *folio)
{
if (folio_put_testzero(folio))
- __put_page(&folio->page);
+ __folio_put(folio);
}
/**
@@ -1221,7 +1142,26 @@ static inline void folio_put(struct folio *folio)
static inline void folio_put_refs(struct folio *folio, int refs)
{
if (folio_ref_sub_and_test(folio, refs))
- __put_page(&folio->page);
+ __folio_put(folio);
+}
+
+void release_pages(struct page **pages, int nr);
+
+/**
+ * folios_put - Decrement the reference count on an array of folios.
+ * @folios: The folios.
+ * @nr: How many folios there are.
+ *
+ * Like folio_put(), but for an array of folios. This is more efficient
+ * than writing the loop yourself as it will optimise the locks which
+ * need to be taken if the folios are freed.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folios_put(struct folio **folios, unsigned int nr)
+{
+ release_pages((struct page **)folios, nr);
}
static inline void put_page(struct page *page)
@@ -1596,7 +1536,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
#ifdef CONFIG_MIGRATION
-static inline bool is_pinnable_page(struct page *page)
+static inline bool is_longterm_pinnable_page(struct page *page)
{
#ifdef CONFIG_CMA
int mt = get_pageblock_migratetype(page);
@@ -1604,18 +1544,27 @@ static inline bool is_pinnable_page(struct page *page)
if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
return false;
#endif
- return !is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page));
+ /* The zero page may always be pinned */
+ if (is_zero_pfn(page_to_pfn(page)))
+ return true;
+
+ /* Coherent device memory must always allow eviction. */
+ if (is_device_coherent_page(page))
+ return false;
+
+ /* Otherwise, non-movable zone pages can be pinned. */
+ return !is_zone_movable_page(page);
}
#else
-static inline bool is_pinnable_page(struct page *page)
+static inline bool is_longterm_pinnable_page(struct page *page)
{
return true;
}
#endif
-static inline bool folio_is_pinnable(struct folio *folio)
+static inline bool folio_is_longterm_pinnable(struct folio *folio)
{
- return is_pinnable_page(&folio->page);
+ return is_longterm_pinnable_page(&folio->page);
}
static inline void set_page_zone(struct page *page, enum zone_type zone)
@@ -1966,8 +1915,12 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
* for now all the callers are only use one of the flags at the same
* time.
*/
-/* Whether we should allow dirty bit accounting */
-#define MM_CP_DIRTY_ACCT (1UL << 0)
+/*
+ * Whether we should manually check if we can map individual PTEs writable,
+ * because something (e.g., COW, uffd-wp) blocks that from happening for all
+ * PTEs automatically in a writable mapping.
+ */
+#define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0)
/* Whether this protection change is for NUMA hints */
#define MM_CP_PROT_NUMA (1UL << 1)
/* Whether this change is for write protecting */
@@ -2939,7 +2892,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
-#define FOLL_COW 0x4000 /* internal GUP flag */
#define FOLL_ANON 0x8000 /* don't do file mappings */
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
@@ -3196,13 +3148,6 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
}
#endif
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
-int vmemmap_remap_free(unsigned long start, unsigned long end,
- unsigned long reuse);
-int vmemmap_remap_alloc(unsigned long start, unsigned long end,
- unsigned long reuse, gfp_t gfp_mask);
-#endif
-
void *sparse_buffer_alloc(unsigned long size);
struct page * __populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
@@ -3237,7 +3182,10 @@ enum mf_flags {
MF_SOFT_OFFLINE = 1 << 3,
MF_UNPOISON = 1 << 4,
MF_SW_SIMULATED = 1 << 5,
+ MF_NO_RETRY = 1 << 6,
};
+int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
+ unsigned long count, int mf_flags);
extern int memory_failure(unsigned long pfn, int flags);
extern void memory_failure_queue(unsigned long pfn, int flags);
extern void memory_failure_queue_kick(int cpu);
@@ -3287,7 +3235,6 @@ enum mf_action_page_type {
MF_MSG_DIFFERENT_COMPOUND,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
- MF_MSG_NON_PMD_HUGE,
MF_MSG_UNMAP_FAILED,
MF_MSG_DIRTY_SWAPCACHE,
MF_MSG_CLEAN_SWAPCACHE,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c29ab4c0cd5c..cf97f3884fda 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -87,6 +87,7 @@ struct page {
*/
union {
struct list_head lru;
+
/* Or, for the Unevictable "LRU list" slot */
struct {
/* Always even, to negate PageTail */
@@ -94,6 +95,10 @@ struct page {
/* Count page's or folio's mlocks */
unsigned int mlock_count;
};
+
+ /* Or, free page */
+ struct list_head buddy_list;
+ struct list_head pcp_list;
};
/* See page-flags.h for PAGE_MAPPING_FLAGS */
struct address_space *mapping;
@@ -729,6 +734,7 @@ typedef __bitwise unsigned int vm_fault_t;
* @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs
* fsync() to complete (for synchronous page faults
* in DAX)
+ * @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released
* @VM_FAULT_HINDEX_MASK: mask HINDEX value
*
*/
@@ -746,6 +752,7 @@ enum vm_fault_reason {
VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000,
VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000,
+ VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000,
VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000,
};
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 37f975875102..8a30de08e913 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -219,7 +219,8 @@ struct sdio_cccr {
wide_bus:1,
high_power:1,
high_speed:1,
- disable_cd:1;
+ disable_cd:1,
+ enable_async_irq:1;
};
struct sdio_cis {
@@ -343,10 +344,16 @@ static inline bool mmc_large_sector(struct mmc_card *card)
return card->ext_csd.data_sector_size == 4096;
}
+static inline int mmc_card_enable_async_irq(struct mmc_card *card)
+{
+ return card->cccr.enable_async_irq;
+}
+
bool mmc_card_is_blockaddr(struct mmc_card *card);
#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
+#define mmc_card_sd_combo(c) ((c)->type == MMC_TYPE_SD_COMBO)
#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index c193c50ccd78..eb8bc5b9b0b7 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -93,6 +93,25 @@ struct mmc_clk_phase_map {
struct mmc_host;
+enum mmc_err_stat {
+ MMC_ERR_CMD_TIMEOUT,
+ MMC_ERR_CMD_CRC,
+ MMC_ERR_DAT_TIMEOUT,
+ MMC_ERR_DAT_CRC,
+ MMC_ERR_AUTO_CMD,
+ MMC_ERR_ADMA,
+ MMC_ERR_TUNING,
+ MMC_ERR_CMDQ_RED,
+ MMC_ERR_CMDQ_GCE,
+ MMC_ERR_CMDQ_ICCE,
+ MMC_ERR_REQ_TIMEOUT,
+ MMC_ERR_CMDQ_REQ_TIMEOUT,
+ MMC_ERR_ICE_CFG,
+ MMC_ERR_CTRL_TIMEOUT,
+ MMC_ERR_UNEXPECTED_IRQ,
+ MMC_ERR_MAX,
+};
+
struct mmc_host_ops {
/*
* It is optional for the host to implement pre_req and post_req in
@@ -501,6 +520,7 @@ struct mmc_host {
/* Host Software Queue support */
bool hsq_enabled;
+ u32 err_stats[MMC_ERR_MAX];
unsigned long private[] ____cacheline_aligned;
};
@@ -635,6 +655,12 @@ static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
+static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
+ enum mmc_err_stat stat)
+{
+ host->err_stats[stat] += 1;
+}
+
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index d9a65c6a8816..9c50bc40f8ff 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -99,6 +99,12 @@ static inline bool mmc_op_multi(u32 opcode)
opcode == MMC_READ_MULTIPLE_BLOCK;
}
+static inline bool mmc_op_tuning(u32 opcode)
+{
+ return opcode == MMC_SEND_TUNING_BLOCK ||
+ opcode == MMC_SEND_TUNING_BLOCK_HS200;
+}
+
/*
* MMC_SWITCH argument format:
*
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 2a05d1ac4f0e..1ef400f28642 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -159,6 +159,11 @@
#define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT)
+
+#define SDIO_CCCR_INTERRUPT_EXT 0x16
+#define SDIO_INTERRUPT_EXT_SAI (1 << 0)
+#define SDIO_INTERRUPT_EXT_EAI (1 << 1)
+
/*
* Function Basic Registers (FBR)
*/
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index d7285f8148a3..15ae78cd2853 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -54,6 +54,15 @@ void dump_mm(const struct mm_struct *mm);
} \
unlikely(__ret_warn_once); \
})
+#define VM_WARN_ON_FOLIO(cond, folio) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_page(&folio->page, "VM_WARN_ON_FOLIO(" __stringify(cond)")");\
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \
static bool __section(".data.once") __warned; \
int __ret_warn_once = !!(cond); \
@@ -79,6 +88,7 @@ void dump_mm(const struct mm_struct *mm);
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 45fc2c81e370..d6c06e140277 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -198,7 +198,7 @@ struct mmu_notifier_ops {
* invalidate_range_start()/end() notifiers, as
* invalidate_range() already catches the points in time when an
* external TLB range needs to be flushed. For more in depth
- * discussion on this see Documentation/vm/mmu_notifier.rst
+ * discussion on this see Documentation/mm/mmu_notifier.rst
*
* Note that this function might be called with just a sub-range
* of what was passed to invalidate_range_start()/end(), if
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index aab70355d64f..e24b40c52468 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -355,15 +355,18 @@ enum zone_watermarks {
};
/*
- * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional
- * for pageblock size for THP if configured.
+ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
+ * for THP which will usually be GFP_MOVABLE. Even if it is another type,
+ * it should not contribute to serious fragmentation causing THP allocation
+ * failures.
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define NR_PCP_THP 1
#else
#define NR_PCP_THP 0
#endif
-#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP))
+#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
+#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
/*
* Shift to encode migratetype and order in the same integer, with order
@@ -379,6 +382,7 @@ enum zone_watermarks {
/* Fields and list protected by pagesets local_lock in page_alloc.c */
struct per_cpu_pages {
+ spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
@@ -389,7 +393,7 @@ struct per_cpu_pages {
/* Lists of pages, one per migrate type stored on the pcp-lists */
struct list_head lists[NR_PCP_LISTS];
-};
+} ____cacheline_aligned_in_smp;
struct per_cpu_zonestat {
#ifdef CONFIG_SMP
@@ -591,8 +595,8 @@ struct zone {
* give them a chance of being in the same cacheline.
*
* Write access to present_pages at runtime should be protected by
- * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
- * present_pages should get_online_mems() to get a stable value.
+ * mem_hotplug_begin/done(). Any reader who can't tolerant drift of
+ * present_pages should use get_online_mems() to get a stable value.
*/
atomic_long_t managed_pages;
unsigned long spanned_pages;
@@ -730,6 +734,86 @@ static inline bool zone_is_empty(struct zone *zone)
return zone->spanned_pages == 0;
}
+#ifndef BUILD_VDSO32_64
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ */
+
+/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
+#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
+#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
+#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
+#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
+#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
+
+/*
+ * Define the bit shifts to access each section. For non-existent
+ * sections we define the shift as 0; that plus a 0 mask ensures
+ * the compiler will optimise away reference to them.
+ */
+#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
+#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
+#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
+#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
+#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
+
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \
+ SECTIONS_PGOFF : ZONES_PGOFF)
+#else
+#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \
+ NODES_PGOFF : ZONES_PGOFF)
+#endif
+
+#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
+
+#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
+#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
+#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
+#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
+#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
+#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+
+static inline enum zone_type page_zonenum(const struct page *page)
+{
+ ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
+ return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+}
+
+static inline enum zone_type folio_zonenum(const struct folio *folio)
+{
+ return page_zonenum(&folio->page);
+}
+
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return page_zonenum(page) == ZONE_DEVICE;
+}
+extern void memmap_init_zone_device(struct zone *, unsigned long,
+ unsigned long, struct dev_pagemap *);
+#else
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return false;
+}
+#endif
+
+static inline bool folio_is_zone_device(const struct folio *folio)
+{
+ return is_zone_device_page(&folio->page);
+}
+
+static inline bool is_zone_movable_page(const struct page *page)
+{
+ return page_zonenum(page) == ZONE_MOVABLE;
+}
+#endif
+
/*
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
@@ -870,7 +954,7 @@ typedef struct pglist_data {
unsigned long nr_reclaim_start; /* nr pages written while throttled
* when throttling started. */
struct task_struct *kswapd; /* Protected by
- mem_hotplug_begin/end() */
+ mem_hotplug_begin/done() */
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
@@ -1053,15 +1137,6 @@ static inline int is_highmem_idx(enum zone_type idx)
#endif
}
-#ifdef CONFIG_ZONE_DMA
-bool has_managed_dma(void);
-#else
-static inline bool has_managed_dma(void)
-{
- return false;
-}
-#endif
-
/**
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
@@ -1071,12 +1146,17 @@ static inline bool has_managed_dma(void)
*/
static inline int is_highmem(struct zone *zone)
{
-#ifdef CONFIG_HIGHMEM
return is_highmem_idx(zone_idx(zone));
+}
+
+#ifdef CONFIG_ZONE_DMA
+bool has_managed_dma(void);
#else
- return 0;
-#endif
+static inline bool has_managed_dma(void)
+{
+ return false;
}
+#endif
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
@@ -1418,16 +1498,32 @@ extern size_t mem_section_usage_size(void);
* (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
* worst combination is powerpc with 256k pages,
* which results in PFN_SECTION_SHIFT equal 6.
- * To sum it up, at least 6 bits are available.
+ * To sum it up, at least 6 bits are available on all architectures.
+ * However, we can exceed 6 bits on some other architectures except
+ * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available
+ * with the worst case of 64K pages on arm64) if we make sure the
+ * exceeded bit is not applicable to powerpc.
*/
-#define SECTION_MARKED_PRESENT (1UL<<0)
-#define SECTION_HAS_MEM_MAP (1UL<<1)
-#define SECTION_IS_ONLINE (1UL<<2)
-#define SECTION_IS_EARLY (1UL<<3)
-#define SECTION_TAINT_ZONE_DEVICE (1UL<<4)
-#define SECTION_MAP_LAST_BIT (1UL<<5)
-#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
-#define SECTION_NID_SHIFT 6
+enum {
+ SECTION_MARKED_PRESENT_BIT,
+ SECTION_HAS_MEM_MAP_BIT,
+ SECTION_IS_ONLINE_BIT,
+ SECTION_IS_EARLY_BIT,
+#ifdef CONFIG_ZONE_DEVICE
+ SECTION_TAINT_ZONE_DEVICE_BIT,
+#endif
+ SECTION_MAP_LAST_BIT,
+};
+
+#define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT)
+#define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT)
+#define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT)
+#define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT)
+#ifdef CONFIG_ZONE_DEVICE
+#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
+#endif
+#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
+#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
@@ -1466,12 +1562,19 @@ static inline int online_section(struct mem_section *section)
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
+#ifdef CONFIG_ZONE_DEVICE
static inline int online_device_section(struct mem_section *section)
{
unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
return section && ((section->section_mem_map & flags) == flags);
}
+#else
+static inline int online_device_section(struct mem_section *section)
+{
+ return 0;
+}
+#endif
static inline int online_section_nr(unsigned long nr)
{
diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h
index 0ce612428aea..bb6b7121a542 100644
--- a/include/linux/mtd/hyperbus.h
+++ b/include/linux/mtd/hyperbus.h
@@ -89,9 +89,7 @@ int hyperbus_register_device(struct hyperbus_device *hbdev);
/**
* hyperbus_unregister_device - deregister HyperBus slave memory device
* @hbdev: hyperbus_device to be unregistered
- *
- * Return: 0 for success, others for failure.
*/
-int hyperbus_unregister_device(struct hyperbus_device *hbdev);
+void hyperbus_unregister_device(struct hyperbus_device *hbdev);
#endif /* __LINUX_MTD_HYPERBUS_H__ */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 1ede4c89805a..42218a1164f6 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -351,7 +351,7 @@ struct spi_nor_flash_parameter;
* @bouncebuf_size: size of the bounce buffer
* @info: SPI NOR part JEDEC MFR ID and other info
* @manufacturer: SPI NOR manufacturer
- * @addr_width: number of address bytes
+ * @addr_nbytes: number of address bytes
* @erase_opcode: the opcode for erasing a sector
* @read_opcode: the read opcode
* @read_dummy: the dummy needed by the read operation
@@ -381,7 +381,7 @@ struct spi_nor {
size_t bouncebuf_size;
const struct flash_info *info;
const struct spi_nor_manufacturer *manufacturer;
- u8 addr_width;
+ u8 addr_nbytes;
u8 erase_opcode;
u8 read_opcode;
u8 read_dummy;
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 5584d3bb6556..6d3392a7edc6 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -260,6 +260,7 @@ struct spinand_manufacturer {
};
/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer ato_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
diff --git a/include/linux/net.h b/include/linux/net.h
index a03485e8cbb2..711c3593c3b8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -307,8 +307,6 @@ do { \
#define net_get_random_once(buf, nbytes) \
get_random_once((buf), (nbytes))
-#define net_get_random_once_wait(buf, nbytes) \
- get_random_once_wait((buf), (nbytes))
/*
* E.g. XFS meta- & log-data is in slab pages, or bcache meta
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1a3cb93c3dcc..05d6f3facd5a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -640,9 +640,23 @@ extern int sysctl_devconf_inherit_init_net;
*/
static inline bool net_has_fallback_tunnels(const struct net *net)
{
- return !IS_ENABLED(CONFIG_SYSCTL) ||
- !sysctl_fb_tunnels_only_for_init_net ||
- (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
+#if IS_ENABLED(CONFIG_SYSCTL)
+ int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
+
+ return !fb_tunnels_only_for_init_net ||
+ (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
+#else
+ return true;
+#endif
+}
+
+static inline int net_inherit_devconf(void)
+{
+#if IS_ENABLED(CONFIG_SYSCTL)
+ return READ_ONCE(sysctl_devconf_inherit_init_net);
+#else
+ return 0;
+#endif
}
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index a13296d6c7ce..fd533552a062 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -94,10 +94,6 @@ struct ebt_table {
struct ebt_replace_kernel *table;
unsigned int valid_hooks;
rwlock_t lock;
- /* e.g. could be the table explicitly only allows certain
- * matches, targets, ... 0 == let it in */
- int (*check)(const struct ebt_table_info *info,
- unsigned int valid_hooks);
/* the data used by the kernel */
struct ebt_table_info *private;
struct nf_hook_ops *ops;
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index a17c337dbdf1..7931fa472561 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -83,7 +83,6 @@ struct nfs_open_context {
fmode_t mode;
unsigned long flags;
-#define NFS_CONTEXT_RESEND_WRITES (1)
#define NFS_CONTEXT_BAD (2)
#define NFS_CONTEXT_UNLOCK (3)
#define NFS_CONTEXT_FILE_OPEN (4)
@@ -182,6 +181,7 @@ struct nfs_inode {
/* Regular file */
struct {
atomic_long_t nrequests;
+ atomic_long_t redirtied_pages;
struct nfs_mds_commit_info commit_info;
struct mutex commit_mutex;
};
@@ -617,6 +617,15 @@ nfs_fileid_to_ino_t(u64 fileid)
#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
+/* We need to block new opens while a file is being unlinked.
+ * If it is opened *before* we decide to unlink, we will silly-rename
+ * instead. If it is opened *after*, then we need to create or will fail.
+ * If we allow the two to race, we could end up with a file that is open
+ * but deleted on the server resulting in ESTALE.
+ * So use ->d_fsdata to record when the unlink is happening
+ * and block dentry revalidation while it is set.
+ */
+#define NFS_FSDATA_BLOCKED ((void*)1)
# undef ifdebug
# ifdef NFS_DEBUG
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index f0373a6cb5fb..ba7e2e4b0926 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -202,8 +202,7 @@ nfs_list_entry(struct list_head *head)
return list_entry(head, struct nfs_page, wb_list);
}
-static inline
-loff_t req_offset(struct nfs_page *req)
+static inline loff_t req_offset(const struct nfs_page *req)
{
return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
}
diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
index 222ae8883e85..75843c00f326 100644
--- a/include/linux/nfs_ssc.h
+++ b/include/linux/nfs_ssc.h
@@ -64,7 +64,7 @@ struct nfsd4_ssc_umount_item {
refcount_t nsui_refcnt;
unsigned long nsui_expire;
struct vfsmount *nsui_vfsmount;
- char nsui_ipaddr[RPC_MAX_ADDRBUFLEN];
+ char nsui_ipaddr[RPC_MAX_ADDRBUFLEN + 1];
};
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 0e3aa0f5f324..e86cf6642d21 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1600,6 +1600,7 @@ enum {
NFS_IOHDR_STAT,
NFS_IOHDR_RESEND_PNFS,
NFS_IOHDR_RESEND_MDS,
+ NFS_IOHDR_UNSTABLE_WRITES,
};
struct nfs_io_completion;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 750c7f395ca9..f700ff2df074 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -122,6 +122,8 @@ int watchdog_nmi_probe(void);
int watchdog_nmi_enable(unsigned int cpu);
void watchdog_nmi_disable(unsigned int cpu);
+void lockup_detector_reconfigure(void);
+
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
*
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 0f233b76c9ce..4b71a96190a8 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -94,6 +94,7 @@
#include <linux/bitmap.h>
#include <linux/minmax.h>
#include <linux/numa.h>
+#include <linux/random.h>
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
@@ -276,7 +277,14 @@ static inline unsigned int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
-unsigned int __next_node_in(int node, const nodemask_t *srcp);
+static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
+{
+ unsigned int ret = __next_node(node, srcp);
+
+ if (ret == MAX_NUMNODES)
+ ret = __first_node(srcp);
+ return ret;
+}
static inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
@@ -493,14 +501,20 @@ static inline int num_node_state(enum node_states state)
#endif
+static inline int node_random(const nodemask_t *maskp)
+{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
-extern int node_random(const nodemask_t *maskp);
+ int w, bit = NUMA_NO_NODE;
+
+ w = nodes_weight(*maskp);
+ if (w)
+ bit = bitmap_ord_to_pos(maskp->bits,
+ get_random_int() % w, MAX_NUMNODES);
+ return bit;
#else
-static inline int node_random(const nodemask_t *mask)
-{
return 0;
-}
#endif
+}
#define node_online_map node_states[N_ONLINE]
#define node_possible_map node_states[N_POSSIBLE]
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
new file mode 100644
index 000000000000..dcb8030062dd
--- /dev/null
+++ b/include/linux/nvme-auth.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Hannes Reinecke, SUSE Software Solutions
+ */
+
+#ifndef _NVME_AUTH_H
+#define _NVME_AUTH_H
+
+#include <crypto/kpp.h>
+
+struct nvme_dhchap_key {
+ u8 *key;
+ size_t len;
+ u8 hash;
+};
+
+u32 nvme_auth_get_seqnum(void);
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id);
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id);
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name);
+
+const char *nvme_auth_hmac_name(u8 hmac_id);
+const char *nvme_auth_digest_name(u8 hmac_id);
+size_t nvme_auth_hmac_hash_len(u8 hmac_id);
+u8 nvme_auth_hmac_id(const char *hmac_name);
+
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash);
+void nvme_auth_free_key(struct nvme_dhchap_key *key);
+u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn);
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen);
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid);
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len);
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len);
+
+#endif /* _NVME_AUTH_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 07cfc922f8e4..ae53d74f3696 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -19,6 +19,7 @@
#define NVMF_TRSVCID_SIZE 32
#define NVMF_TRADDR_SIZE 256
#define NVMF_TSAS_SIZE 256
+#define NVMF_AUTH_HASH_LEN 64
#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
@@ -712,6 +713,10 @@ enum {
};
enum {
+ NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
+};
+
+enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
NVME_AER_NOTICE_ANA = 0x03,
@@ -1369,6 +1374,8 @@ enum nvmf_capsule_command {
nvme_fabrics_type_property_set = 0x00,
nvme_fabrics_type_connect = 0x01,
nvme_fabrics_type_property_get = 0x04,
+ nvme_fabrics_type_auth_send = 0x05,
+ nvme_fabrics_type_auth_receive = 0x06,
};
#define nvme_fabrics_type_name(type) { type, #type }
@@ -1376,7 +1383,9 @@ enum nvmf_capsule_command {
__print_symbolic(type, \
nvme_fabrics_type_name(nvme_fabrics_type_property_set), \
nvme_fabrics_type_name(nvme_fabrics_type_connect), \
- nvme_fabrics_type_name(nvme_fabrics_type_property_get))
+ nvme_fabrics_type_name(nvme_fabrics_type_property_get), \
+ nvme_fabrics_type_name(nvme_fabrics_type_auth_send), \
+ nvme_fabrics_type_name(nvme_fabrics_type_auth_receive))
/*
* If not fabrics command, fctype will be ignored.
@@ -1472,6 +1481,11 @@ struct nvmf_connect_command {
__u8 resv4[12];
};
+enum {
+ NVME_CONNECT_AUTHREQ_ASCR = (1 << 2),
+ NVME_CONNECT_AUTHREQ_ATR = (1 << 1),
+};
+
struct nvmf_connect_data {
uuid_t hostid;
__le16 cntlid;
@@ -1506,6 +1520,200 @@ struct nvmf_property_get_command {
__u8 resv4[16];
};
+struct nvmf_auth_common_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 al_tl;
+ __u8 resv4[16];
+};
+
+struct nvmf_auth_send_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 tl;
+ __u8 resv4[16];
+};
+
+struct nvmf_auth_receive_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 al;
+ __u8 resv4[16];
+};
+
+/* Value for secp */
+enum {
+ NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER = 0xe9,
+};
+
+/* Defined value for auth_type */
+enum {
+ NVME_AUTH_COMMON_MESSAGES = 0x00,
+ NVME_AUTH_DHCHAP_MESSAGES = 0x01,
+};
+
+/* Defined messages for auth_id */
+enum {
+ NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE = 0x00,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE = 0x01,
+ NVME_AUTH_DHCHAP_MESSAGE_REPLY = 0x02,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1 = 0x03,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 = 0x04,
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2 = 0xf0,
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1 = 0xf1,
+};
+
+struct nvmf_auth_dhchap_protocol_descriptor {
+ __u8 authid;
+ __u8 rsvd;
+ __u8 halen;
+ __u8 dhlen;
+ __u8 idlist[60];
+};
+
+enum {
+ NVME_AUTH_DHCHAP_AUTH_ID = 0x01,
+};
+
+/* Defined hash functions for DH-HMAC-CHAP authentication */
+enum {
+ NVME_AUTH_HASH_SHA256 = 0x01,
+ NVME_AUTH_HASH_SHA384 = 0x02,
+ NVME_AUTH_HASH_SHA512 = 0x03,
+ NVME_AUTH_HASH_INVALID = 0xff,
+};
+
+/* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */
+enum {
+ NVME_AUTH_DHGROUP_NULL = 0x00,
+ NVME_AUTH_DHGROUP_2048 = 0x01,
+ NVME_AUTH_DHGROUP_3072 = 0x02,
+ NVME_AUTH_DHGROUP_4096 = 0x03,
+ NVME_AUTH_DHGROUP_6144 = 0x04,
+ NVME_AUTH_DHGROUP_8192 = 0x05,
+ NVME_AUTH_DHGROUP_INVALID = 0xff,
+};
+
+union nvmf_auth_protocol {
+ struct nvmf_auth_dhchap_protocol_descriptor dhchap;
+};
+
+struct nvmf_auth_dhchap_negotiate_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd;
+ __le16 t_id;
+ __u8 sc_c;
+ __u8 napd;
+ union nvmf_auth_protocol auth_protocol[];
+};
+
+struct nvmf_auth_dhchap_challenge_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __u16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 hashid;
+ __u8 dhgid;
+ __le16 dhvlen;
+ __le32 seqnum;
+ /* 'hl' bytes of challenge value */
+ __u8 cval[];
+ /* followed by 'dhvlen' bytes of DH value */
+};
+
+struct nvmf_auth_dhchap_reply_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 cvalid;
+ __u8 rsvd3;
+ __le16 dhvlen;
+ __le32 seqnum;
+ /* 'hl' bytes of response data */
+ __u8 rval[];
+ /* followed by 'hl' bytes of Challenge value */
+ /* followed by 'dhvlen' bytes of DH value */
+};
+
+enum {
+ NVME_AUTH_DHCHAP_RESPONSE_VALID = (1 << 0),
+};
+
+struct nvmf_auth_dhchap_success1_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 rvalid;
+ __u8 rsvd3[7];
+ /* 'hl' bytes of response value if 'rvalid' is set */
+ __u8 rval[];
+};
+
+struct nvmf_auth_dhchap_success2_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 rsvd2[10];
+};
+
+struct nvmf_auth_dhchap_failure_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 rescode;
+ __u8 rescode_exp;
+};
+
+enum {
+ NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED = 0x01,
+};
+
+enum {
+ NVME_AUTH_DHCHAP_FAILURE_FAILED = 0x01,
+ NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE = 0x02,
+ NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH = 0x03,
+ NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE = 0x04,
+ NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE = 0x05,
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD = 0x06,
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE = 0x07,
+};
+
+
struct nvme_dbbuf {
__u8 opcode;
__u8 flags;
@@ -1549,6 +1757,9 @@ struct nvme_command {
struct nvmf_connect_command connect;
struct nvmf_property_set_command prop_set;
struct nvmf_property_get_command prop_get;
+ struct nvmf_auth_common_command auth_common;
+ struct nvmf_auth_send_command auth_send;
+ struct nvmf_auth_receive_command auth_receive;
struct nvme_dbbuf dbbuf;
struct nvme_directive_cmd directive;
};
diff --git a/include/linux/of.h b/include/linux/of.h
index 20a4e7cb7afe..766d002bddb9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -207,7 +207,7 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
}
#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC)
-static inline int of_property_check_flag(struct property *p, unsigned long flag)
+static inline int of_property_check_flag(const struct property *p, unsigned long flag)
{
return test_bit(flag, &p->_flags);
}
@@ -812,7 +812,8 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
{
}
-static inline int of_property_check_flag(struct property *p, unsigned long flag)
+static inline int of_property_check_flag(const struct property *p,
+ unsigned long flag)
{
return 0;
}
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 8bf2ea859653..a5166eb93437 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -29,6 +29,7 @@ enum of_gpio_flags {
OF_GPIO_TRANSITORY = 0x8,
OF_GPIO_PULL_UP = 0x10,
OF_GPIO_PULL_DOWN = 0x20,
+ OF_GPIO_PULL_DISABLE = 0x40,
};
#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/once.h b/include/linux/once.h
index f54523052bbc..b14d8b309d52 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -54,7 +54,5 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
#define get_random_once(buf, nbytes) \
DO_ONCE(get_random_bytes, (buf), (nbytes))
-#define get_random_once_wait(buf, nbytes) \
- DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \
#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index f1221d11f8e5..0eb3b192f07a 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -30,7 +30,6 @@
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
* credit to Christian Biere.
*/
-#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 3f5490f6f038..465ff35a8c00 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -193,6 +193,11 @@ enum pageflags {
/* Only valid for buddy pages. Used to track pages that are reported */
PG_reported = PG_uptodate,
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /* For self-hosted memmap pages */
+ PG_vmemmap_self_hosted = PG_owner_priv_1,
+#endif
};
#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
@@ -200,34 +205,15 @@ enum pageflags {
#ifndef __GENERATING_BOUNDS_H
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
-DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
- hugetlb_optimize_vmemmap_key);
-
-static __always_inline bool hugetlb_optimize_vmemmap_enabled(void)
-{
- return static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
- &hugetlb_optimize_vmemmap_key);
-}
+DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
/*
- * If the feature of optimizing vmemmap pages associated with each HugeTLB
- * page is enabled, the head vmemmap page frame is reused and all of the tail
- * vmemmap addresses map to the head vmemmap page frame (furture details can
- * refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other
- * words, there are more than one page struct with PG_head associated with each
- * HugeTLB page. We __know__ that there is only one head page struct, the tail
- * page structs with PG_head are fake head page structs. We need an approach
- * to distinguish between those two different types of page structs so that
- * compound_head() can return the real head page struct when the parameter is
- * the tail page struct but with PG_head.
- *
- * The page_fixed_fake_head() returns the real head page struct if the @page is
- * fake page head, otherwise, returns @page which can either be a true page
- * head or tail.
+ * Return the real head page struct iff the @page is a fake head page, otherwise
+ * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
*/
static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
{
- if (!hugetlb_optimize_vmemmap_enabled())
+ if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
return page;
/*
@@ -255,11 +241,6 @@ static inline const struct page *page_fixed_fake_head(const struct page *page)
{
return page;
}
-
-static inline bool hugetlb_optimize_vmemmap_enabled(void)
-{
- return false;
-}
#endif
static __always_inline int page_is_fake_head(struct page *page)
@@ -628,6 +609,12 @@ PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison)
*/
__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
+#ifdef CONFIG_MEMORY_HOTPLUG
+PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
+#else
+PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
+#endif
+
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
@@ -650,6 +637,12 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+/*
+ * Different with flags above, this flag is used only for fsdax mode. It
+ * indicates that this page->mapping is now under reflink case.
+ */
+#define PAGE_MAPPING_DAX_COW 0x1
+
static __always_inline bool folio_mapping_flags(struct folio *folio)
{
return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
@@ -670,6 +663,12 @@ static __always_inline bool PageAnon(struct page *page)
return folio_test_anon(page_folio(page));
}
+static __always_inline bool __folio_test_movable(const struct folio *folio)
+{
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
+ PAGE_MAPPING_MOVABLE;
+}
+
static __always_inline int __PageMovable(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index cc9adbaddb59..0178b2040ea3 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -345,8 +345,6 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping)
#endif
}
-void release_pages(struct page **pages, int nr);
-
struct address_space *page_mapping(struct page *);
struct address_space *folio_mapping(struct folio *);
struct address_space *swapcache_mapping(struct folio *);
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 6649154a2115..215eb6c3bdc9 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -26,7 +26,6 @@ struct pagevec {
};
void __pagevec_release(struct pagevec *pvec);
-void __pagevec_lru_add(struct pagevec *pvec);
unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
xa_mark_t tag);
diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h
new file mode 100644
index 000000000000..ed9b4df792b8
--- /dev/null
+++ b/include/linux/pci-doe.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Object Exchange
+ * PCIe r6.0, sec 6.30 DOE
+ *
+ * Copyright (C) 2021 Huawei
+ * Jonathan Cameron <Jonathan.Cameron@huawei.com>
+ *
+ * Copyright (C) 2022 Intel Corporation
+ * Ira Weiny <ira.weiny@intel.com>
+ */
+
+#ifndef LINUX_PCI_DOE_H
+#define LINUX_PCI_DOE_H
+
+struct pci_doe_protocol {
+ u16 vid;
+ u8 type;
+};
+
+struct pci_doe_mb;
+
+/**
+ * struct pci_doe_task - represents a single query/response
+ *
+ * @prot: DOE Protocol
+ * @request_pl: The request payload
+ * @request_pl_sz: Size of the request payload (bytes)
+ * @response_pl: The response payload
+ * @response_pl_sz: Size of the response payload (bytes)
+ * @rv: Return value. Length of received response or error (bytes)
+ * @complete: Called when task is complete
+ * @private: Private data for the consumer
+ * @work: Used internally by the mailbox
+ * @doe_mb: Used internally by the mailbox
+ *
+ * The payload sizes and rv are specified in bytes with the following
+ * restrictions concerning the protocol.
+ *
+ * 1) The request_pl_sz must be a multiple of double words (4 bytes)
+ * 2) The response_pl_sz must be >= a single double word (4 bytes)
+ * 3) rv is returned as bytes but it will be a multiple of double words
+ *
+ * NOTE there is no need for the caller to initialize work or doe_mb.
+ */
+struct pci_doe_task {
+ struct pci_doe_protocol prot;
+ u32 *request_pl;
+ size_t request_pl_sz;
+ u32 *response_pl;
+ size_t response_pl_sz;
+ int rv;
+ void (*complete)(struct pci_doe_task *task);
+ void *private;
+
+ /* No need for the user to initialize these fields */
+ struct work_struct work;
+ struct pci_doe_mb *doe_mb;
+};
+
+/**
+ * pci_doe_for_each_off - Iterate each DOE capability
+ * @pdev: struct pci_dev to iterate
+ * @off: u16 of config space offset of each mailbox capability found
+ */
+#define pci_doe_for_each_off(pdev, off) \
+ for (off = pci_find_next_ext_capability(pdev, off, \
+ PCI_EXT_CAP_ID_DOE); \
+ off > 0; \
+ off = pci_find_next_ext_capability(pdev, off, \
+ PCI_EXT_CAP_ID_DOE))
+
+struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset);
+bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type);
+int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task);
+
+#endif
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index adea5a4771cf..6b1301e2498e 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -87,6 +87,7 @@ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 *
extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
+extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
#endif
#if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index 8318a97c9c61..2c07aa6b7665 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -30,10 +30,6 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
unsigned int *nents, u32 length);
void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
-int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs);
-void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs);
int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
bool *use_p2pdma);
ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
@@ -83,17 +79,6 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
{
}
-static inline int pci_p2pdma_map_sg_attrs(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return 0;
-}
-static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
static inline int pci_p2pdma_enable_store(const char *page,
struct pci_dev **p2p_dev, bool *use_p2pdma)
{
@@ -119,16 +104,4 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client)
return pci_p2pmem_find_many(&client, 1);
}
-static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0);
-}
-
-static inline void pci_p2pdma_unmap_sg(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir)
-{
- pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0);
-}
-
#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 81a57b498f22..060af91bafcd 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1909,24 +1909,14 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
#include <asm/pci.h>
-/* These two functions provide almost identical functionality. Depending
- * on the architecture, one will be implemented as a wrapper around the
- * other (in drivers/pci/mmap.c).
- *
+/*
* pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
* is expected to be an offset within that region.
*
- * pci_mmap_page_range() is the legacy architecture-specific interface,
- * which accepts a "user visible" resource address converted by
- * pci_resource_to_user(), as used in the legacy mmap() interface in
- * /proc/bus/pci/.
*/
int pci_mmap_resource_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
-int pci_mmap_page_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
#ifndef arch_can_pci_mmap_wc
#define arch_can_pci_mmap_wc() 0
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 7fa460ccf7fa..6feade66efdb 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -151,6 +151,7 @@
#define PCI_CLASS_OTHERS 0xff
/* Vendors and devices. Sort key: vendor first, device next. */
+#define PCI_VENDOR_ID_PCI_SIG 0x0001
#define PCI_VENDOR_ID_LOONGSON 0x0014
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 3cdc16cfd867..014ee8f0fbaa 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1689,4 +1689,32 @@ typedef unsigned int pgtbl_mod_mask;
#define MAX_PTRS_PER_P4D PTRS_PER_P4D
#endif
+/* description of effects of mapping type and prot in current implementation.
+ * this is due to the limited x86 page protection hardware. The expected
+ * behavior is in parens:
+ *
+ * map_type prot
+ * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
+ * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (yes) yes w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (copy) copy w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
+ * MAP_PRIVATE (with Enhanced PAN supported):
+ * r: (no) no
+ * w: (no) no
+ * x: (yes) yes
+ */
+#define DECLARE_VM_GET_PAGE_PROT \
+pgprot_t vm_get_page_prot(unsigned long vm_flags) \
+{ \
+ return protection_map[vm_flags & \
+ (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
+} \
+EXPORT_SYMBOL(vm_get_page_prot);
+
#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 70b45d28e7a9..487117ccb1bc 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -27,6 +27,26 @@ struct gpio_chip;
struct device_node;
/**
+ * struct pingroup - provides information on pingroup
+ * @name: a name for pingroup
+ * @pins: an array of pins in the pingroup
+ * @npins: number of pins in the pingroup
+ */
+struct pingroup {
+ const char *name;
+ const unsigned int *pins;
+ size_t npins;
+};
+
+/* Convenience macro to define a single named or anonymous pingroup */
+#define PINCTRL_PINGROUP(_name, _pins, _npins) \
+(struct pingroup){ \
+ .name = _name, \
+ .pins = _pins, \
+ .npins = _npins, \
+}
+
+/**
* struct pinctrl_pin_desc - boards/machines provide information on their
* pins, pads or other muxable units in this struct
* @number: unique pin number from the global pin number space
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 4ea496924106..6cb65df3e3ba 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -157,26 +157,6 @@ static inline bool pipe_full(unsigned int head, unsigned int tail,
}
/**
- * pipe_space_for_user - Return number of slots available to userspace
- * @head: The pipe ring head pointer
- * @tail: The pipe ring tail pointer
- * @pipe: The pipe info structure
- */
-static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail,
- struct pipe_inode_info *pipe)
-{
- unsigned int p_occupancy, p_space;
-
- p_occupancy = pipe_occupancy(head, tail);
- if (p_occupancy >= pipe->max_usage)
- return 0;
- p_space = pipe->ring_size - p_occupancy;
- if (p_space > pipe->max_usage)
- p_space = pipe->max_usage;
- return p_space;
-}
-
-/**
* pipe_buf_get - get a reference to a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 8cfa8cfca77e..8b1b795867a1 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -13,8 +13,8 @@
#ifndef __CROS_EC_COMMANDS_H
#define __CROS_EC_COMMANDS_H
-
-
+#include <linux/bits.h>
+#include <linux/types.h>
#define BUILD_ASSERT(_cond)
@@ -787,7 +787,7 @@ struct ec_host_response {
*
* Packets always start with a request or response header. They are followed
* by data_len bytes of data. If the data_crc_present flag is set, the data
- * bytes are followed by a CRC-8 of that data, using using x^8 + x^2 + x + 1
+ * bytes are followed by a CRC-8 of that data, using x^8 + x^2 + x + 1
* polynomial.
*
* Host algorithm when sending a request q:
@@ -1300,6 +1300,8 @@ enum ec_feature_code {
* mux.
*/
EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK = 43,
+ /* The MCU is a System Companion Processor (SCP) 2nd Core. */
+ EC_FEATURE_SCP_C1 = 45,
};
#define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32)
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 138fd912c808..408b29ca4004 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -19,8 +19,12 @@
#define CROS_EC_DEV_ISH_NAME "cros_ish"
#define CROS_EC_DEV_PD_NAME "cros_pd"
#define CROS_EC_DEV_SCP_NAME "cros_scp"
+#define CROS_EC_DEV_SCP_C1_NAME "cros_scp_c1"
#define CROS_EC_DEV_TP_NAME "cros_tp"
+#define CROS_EC_DEV_EC_INDEX 0
+#define CROS_EC_DEV_PD_INDEX 1
+
/*
* The EC is unresponsive for a time after a reboot command. Add a
* simple delay to make sure that the bus stays locked.
@@ -231,8 +235,8 @@ bool cros_ec_check_features(struct cros_ec_dev *ec, int feature);
int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
-int cros_ec_command(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata,
- int outsize, void *indata, int insize);
+int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata,
+ size_t outsize, void *indata, size_t insize);
/**
* cros_ec_get_time_ns() - Return time in ns.
diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h
deleted file mode 100644
index 02812651af7d..000000000000
--- a/include/linux/platform_data/video-imxfb.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This structure describes the machine which we are running on.
- */
-#ifndef __MACH_IMXFB_H__
-#define __MACH_IMXFB_H__
-
-#include <linux/fb.h>
-
-#define PCR_TFT (1 << 31)
-#define PCR_COLOR (1 << 30)
-#define PCR_PBSIZ_1 (0 << 28)
-#define PCR_PBSIZ_2 (1 << 28)
-#define PCR_PBSIZ_4 (2 << 28)
-#define PCR_PBSIZ_8 (3 << 28)
-#define PCR_BPIX_1 (0 << 25)
-#define PCR_BPIX_2 (1 << 25)
-#define PCR_BPIX_4 (2 << 25)
-#define PCR_BPIX_8 (3 << 25)
-#define PCR_BPIX_12 (4 << 25)
-#define PCR_BPIX_16 (5 << 25)
-#define PCR_BPIX_18 (6 << 25)
-#define PCR_PIXPOL (1 << 24)
-#define PCR_FLMPOL (1 << 23)
-#define PCR_LPPOL (1 << 22)
-#define PCR_CLKPOL (1 << 21)
-#define PCR_OEPOL (1 << 20)
-#define PCR_SCLKIDLE (1 << 19)
-#define PCR_END_SEL (1 << 18)
-#define PCR_END_BYTE_SWAP (1 << 17)
-#define PCR_REV_VS (1 << 16)
-#define PCR_ACD_SEL (1 << 15)
-#define PCR_ACD(x) (((x) & 0x7f) << 8)
-#define PCR_SCLK_SEL (1 << 7)
-#define PCR_SHARP (1 << 6)
-#define PCR_PCD(x) ((x) & 0x3f)
-
-#define PWMR_CLS(x) (((x) & 0x1ff) << 16)
-#define PWMR_LDMSK (1 << 15)
-#define PWMR_SCR1 (1 << 10)
-#define PWMR_SCR0 (1 << 9)
-#define PWMR_CC_EN (1 << 8)
-#define PWMR_PW(x) ((x) & 0xff)
-
-#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26)
-#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16)
-#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8)
-#define LSCR1_GRAY2(x) (((x) & 0xf) << 4)
-#define LSCR1_GRAY1(x) (((x) & 0xf))
-
-struct imx_fb_videomode {
- struct fb_videomode mode;
- u32 pcr;
- bool aus_mode;
- unsigned char bpp;
-};
-
-struct imx_fb_platform_data {
- struct imx_fb_videomode *mode;
- int num_modes;
-
- u_int pwmr;
- u_int lscr1;
- u_int dmacr;
-
- int (*init)(struct platform_device *);
- void (*exit)(struct platform_device *);
-};
-
-#endif /* ifndef __MACH_IMXFB_H__ */
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index a571b47ff362..98f2b2f20f3e 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -49,6 +49,7 @@
#define ASUS_WMI_DEVID_LED4 0x00020014
#define ASUS_WMI_DEVID_LED5 0x00020015
#define ASUS_WMI_DEVID_LED6 0x00020016
+#define ASUS_WMI_DEVID_MICMUTE_LED 0x00040017
/* Backlight and Brightness */
#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
diff --git a/include/linux/platform_data/x86/p2sb.h b/include/linux/platform_data/x86/p2sb.h
new file mode 100644
index 000000000000..a1d5fddc8f13
--- /dev/null
+++ b/include/linux/platform_data/x86/p2sb.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Primary to Sideband (P2SB) bridge access support
+ */
+
+#ifndef _PLATFORM_DATA_X86_P2SB_H
+#define _PLATFORM_DATA_X86_P2SB_H
+
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+
+struct pci_bus;
+struct resource;
+
+#if IS_BUILTIN(CONFIG_P2SB)
+
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem);
+
+#else /* CONFIG_P2SB */
+
+static inline int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_P2SB is not set */
+
+#endif /* _PLATFORM_DATA_X86_P2SB_H */
diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
index 6807839c718b..3edfb6d4e67a 100644
--- a/include/linux/platform_data/x86/pmc_atom.h
+++ b/include/linux/platform_data/x86/pmc_atom.h
@@ -47,7 +47,7 @@
#define PMC_S0I2_TMR 0x88
#define PMC_S0I3_TMR 0x8C
#define PMC_S0_TMR 0x90
-/* Sleep state counter is in units of of 32us */
+/* Sleep state counter is in units of 32us */
#define PMC_TMR_SHIFT 5
/* Power status of power islands */
diff --git a/include/linux/platform_data/x86/simatic-ipc-base.h b/include/linux/platform_data/x86/simatic-ipc-base.h
index 62d2bc774067..39fefd48cf4d 100644
--- a/include/linux/platform_data/x86/simatic-ipc-base.h
+++ b/include/linux/platform_data/x86/simatic-ipc-base.h
@@ -24,6 +24,4 @@ struct simatic_ipc_platform {
u8 devmode;
};
-u32 simatic_ipc_get_membase0(unsigned int p2sb);
-
#endif /* __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 6708b4ec244d..dc1fb5890792 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -57,37 +57,39 @@ struct dev_pm_opp_icc_bw {
u32 peak;
};
-/**
- * struct dev_pm_opp_info - OPP freq/voltage/current values
- * @rate: Target clk rate in hz
- * @supplies: Array of voltage/current values for all power supplies
- *
- * This structure stores the freq/voltage/current values for a single OPP.
- */
-struct dev_pm_opp_info {
- unsigned long rate;
- struct dev_pm_opp_supply *supplies;
-};
+typedef int (*config_regulators_t)(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count);
+
+typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data, bool scaling_down);
/**
- * struct dev_pm_set_opp_data - Set OPP data
- * @old_opp: Old OPP info
- * @new_opp: New OPP info
- * @regulators: Array of regulator pointers
- * @regulator_count: Number of regulators
- * @clk: Pointer to clk
- * @dev: Pointer to the struct device
+ * struct dev_pm_opp_config - Device OPP configuration values
+ * @clk_names: Clk names, NULL terminated array.
+ * @config_clks: Custom set clk helper.
+ * @prop_name: Name to postfix to properties.
+ * @config_regulators: Custom set regulator helper.
+ * @supported_hw: Array of hierarchy of versions to match.
+ * @supported_hw_count: Number of elements in the array.
+ * @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
+ * @genpd_names: Null terminated array of pointers containing names of genpd to
+ * attach.
+ * @virt_devs: Pointer to return the array of virtual devices.
*
- * This structure contains all information required for setting an OPP.
+ * This structure contains platform specific OPP configurations for the device.
*/
-struct dev_pm_set_opp_data {
- struct dev_pm_opp_info old_opp;
- struct dev_pm_opp_info new_opp;
-
- struct regulator **regulators;
- unsigned int regulator_count;
- struct clk *clk;
- struct device *dev;
+struct dev_pm_opp_config {
+ /* NULL terminated */
+ const char * const *clk_names;
+ config_clks_t config_clks;
+ const char *prop_name;
+ config_regulators_t config_regulators;
+ const unsigned int *supported_hw;
+ unsigned int supported_hw_count;
+ const char * const *regulator_names;
+ const char * const *genpd_names;
+ struct device ***virt_devs;
};
#if defined(CONFIG_PM_OPP)
@@ -97,6 +99,8 @@ void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
+int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies);
+
unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp);
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
@@ -119,8 +123,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
bool available);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt);
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
unsigned int level);
@@ -154,23 +156,13 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq);
int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb);
int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb);
-struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count);
-void dev_pm_opp_put_supported_hw(struct opp_table *opp_table);
-int devm_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count);
-struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name);
-void dev_pm_opp_put_prop_name(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
-void dev_pm_opp_put_regulators(struct opp_table *opp_table);
-int devm_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name);
-void dev_pm_opp_put_clkname(struct opp_table *opp_table);
-int devm_pm_opp_set_clkname(struct device *dev, const char *name);
-struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
-void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
-int devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
-struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs);
-void dev_pm_opp_detach_genpd(struct opp_table *opp_table);
-int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs);
+int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config);
+int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config);
+void dev_pm_opp_clear_config(int token);
+int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down);
+
struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp);
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
@@ -198,6 +190,11 @@ static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
return 0;
}
+static inline int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies)
+{
+ return -EOPNOTSUPP;
+}
+
static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
{
return 0;
@@ -274,12 +271,6 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
@@ -342,79 +333,21 @@ static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct noti
return -EOPNOTSUPP;
}
-static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions,
- unsigned int count)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {}
-
-static inline int devm_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions,
- unsigned int count)
-{
- return -EOPNOTSUPP;
-}
-
-static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {}
-
-static inline int devm_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
-{
- return -EOPNOTSUPP;
-}
-
-static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {}
-
-static inline int devm_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
+static inline int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
return -EOPNOTSUPP;
}
-static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
-
-static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+static inline int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
return -EOPNOTSUPP;
}
-static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {}
+static inline void dev_pm_opp_clear_config(int token) {}
-static inline int devm_pm_opp_attach_genpd(struct device *dev,
- const char * const *names,
- struct device ***virt_devs)
+static inline int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
{
return -EOPNOTSUPP;
}
@@ -469,8 +402,6 @@ static inline int dev_pm_opp_sync_regulators(struct device *dev)
int dev_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index);
int devm_pm_opp_of_add_table_indexed(struct device *dev, int index);
-int dev_pm_opp_of_add_table_noclk(struct device *dev, int index);
-int devm_pm_opp_of_add_table_noclk(struct device *dev, int index);
void dev_pm_opp_of_remove_table(struct device *dev);
int devm_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
@@ -501,16 +432,6 @@ static inline int devm_pm_opp_of_add_table_indexed(struct device *dev, int index
return -EOPNOTSUPP;
}
-static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int devm_pm_opp_of_add_table_noclk(struct device *dev, int index)
-{
- return -EOPNOTSUPP;
-}
-
static inline void dev_pm_opp_of_remove_table(struct device *dev)
{
}
@@ -565,4 +486,149 @@ static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_ta
}
#endif
+/* OPP Configuration helpers */
+
+/* Regulators helpers */
+static inline int dev_pm_opp_set_regulators(struct device *dev,
+ const char * const names[])
+{
+ struct dev_pm_opp_config config = {
+ .regulator_names = names,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_regulators(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_regulators(struct device *dev,
+ const char * const names[])
+{
+ struct dev_pm_opp_config config = {
+ .regulator_names = names,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* Supported-hw helpers */
+static inline int dev_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ struct dev_pm_opp_config config = {
+ .supported_hw = versions,
+ .supported_hw_count = count,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_supported_hw(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ struct dev_pm_opp_config config = {
+ .supported_hw = versions,
+ .supported_hw_count = count,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* clkname helpers */
+static inline int dev_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+ const char *names[] = { name, NULL };
+ struct dev_pm_opp_config config = {
+ .clk_names = names,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_clkname(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+ const char *names[] = { name, NULL };
+ struct dev_pm_opp_config config = {
+ .clk_names = names,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* config-regulators helpers */
+static inline int dev_pm_opp_set_config_regulators(struct device *dev,
+ config_regulators_t helper)
+{
+ struct dev_pm_opp_config config = {
+ .config_regulators = helper,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_config_regulators(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+/* genpd helpers */
+static inline int dev_pm_opp_attach_genpd(struct device *dev,
+ const char * const *names,
+ struct device ***virt_devs)
+{
+ struct dev_pm_opp_config config = {
+ .genpd_names = names,
+ .virt_devs = virt_devs,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_detach_genpd(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_attach_genpd(struct device *dev,
+ const char * const *names,
+ struct device ***virt_devs)
+{
+ struct dev_pm_opp_config config = {
+ .genpd_names = names,
+ .virt_devs = virt_devs,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* prop-name helpers */
+static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+ struct dev_pm_opp_config config = {
+ .prop_name = name,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_prop_name(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 89784763d19e..dd74411ac21d 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -27,7 +27,7 @@ void psi_memstall_leave(unsigned long *flags);
int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
struct psi_trigger *psi_trigger_create(struct psi_group *group,
- char *buf, size_t nbytes, enum psi_res res);
+ char *buf, enum psi_res res);
void psi_trigger_destroy(struct psi_trigger *t);
__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index f7c1d21c2f39..eae67015ce51 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -9,7 +9,7 @@
#define _LINUX_RADIX_TREE_H
#include <linux/bitops.h>
-#include <linux/gfp.h>
+#include <linux/gfp_types.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/math.h>
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 235047d7a1b5..f7edca369eda 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -17,9 +17,9 @@
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
+#include <linux/container_of.h>
#include <linux/rbtree_types.h>
-#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/rcupdate.h>
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 7c943f0a2fc4..aea79c77db0f 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -597,7 +597,7 @@ struct rproc_subdev {
/**
* struct rproc_vring - remoteproc vring state
* @va: virtual address
- * @len: length, in bytes
+ * @num: vring size
* @da: device address
* @align: vring alignment
* @notifyid: rproc-specific unique vring index
@@ -606,7 +606,7 @@ struct rproc_subdev {
*/
struct rproc_vring {
void *va;
- int len;
+ int num;
u32 da;
u32 align;
int notifyid;
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 9ec23138e410..bf80adca980b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -325,8 +325,8 @@ struct page_vma_mapped_walk {
#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \
struct page_vma_mapped_walk name = { \
.pfn = page_to_pfn(_page), \
- .nr_pages = compound_nr(page), \
- .pgoff = page_to_pgoff(page), \
+ .nr_pages = compound_nr(_page), \
+ .pgoff = page_to_pgoff(_page), \
.vma = _vma, \
.address = _address, \
.flags = _flags, \
diff --git a/include/linux/rv.h b/include/linux/rv.h
new file mode 100644
index 000000000000..8883b41d88ec
--- /dev/null
+++ b/include/linux/rv.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Runtime Verification.
+ *
+ * For futher information, see: kernel/trace/rv/rv.c.
+ */
+#ifndef _LINUX_RV_H
+#define _LINUX_RV_H
+
+#define MAX_DA_NAME_LEN 24
+
+#ifdef CONFIG_RV
+/*
+ * Deterministic automaton per-object variables.
+ */
+struct da_monitor {
+ bool monitoring;
+ unsigned int curr_state;
+};
+
+/*
+ * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS.
+ * If we find justification for more monitors, we can think about
+ * adding more or developing a dynamic method. So far, none of
+ * these are justified.
+ */
+#define RV_PER_TASK_MONITORS 1
+#define RV_PER_TASK_MONITOR_INIT (RV_PER_TASK_MONITORS)
+
+/*
+ * Futher monitor types are expected, so make this a union.
+ */
+union rv_task_monitor {
+ struct da_monitor da_mon;
+};
+
+#ifdef CONFIG_RV_REACTORS
+struct rv_reactor {
+ const char *name;
+ const char *description;
+ void (*react)(char *msg);
+};
+#endif
+
+struct rv_monitor {
+ const char *name;
+ const char *description;
+ bool enabled;
+ int (*enable)(void);
+ void (*disable)(void);
+ void (*reset)(void);
+#ifdef CONFIG_RV_REACTORS
+ void (*react)(char *msg);
+#endif
+};
+
+bool rv_monitoring_on(void);
+int rv_unregister_monitor(struct rv_monitor *monitor);
+int rv_register_monitor(struct rv_monitor *monitor);
+int rv_get_task_monitor_slot(void);
+void rv_put_task_monitor_slot(int slot);
+
+#ifdef CONFIG_RV_REACTORS
+bool rv_reacting_on(void);
+int rv_unregister_reactor(struct rv_reactor *reactor);
+int rv_register_reactor(struct rv_reactor *reactor);
+#endif /* CONFIG_RV_REACTORS */
+
+#endif /* CONFIG_RV */
+#endif /* _LINUX_RV_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 7ff9d6386c12..375a5e90d86a 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -16,6 +16,9 @@ struct scatterlist {
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
+#ifdef CONFIG_PCI_P2PDMA
+ unsigned int dma_flags;
+#endif
};
/*
@@ -245,6 +248,72 @@ static inline void sg_unmark_end(struct scatterlist *sg)
sg->page_link &= ~SG_END;
}
+/*
+ * CONFGI_PCI_P2PDMA depends on CONFIG_64BIT which means there is 4 bytes
+ * in struct scatterlist (assuming also CONFIG_NEED_SG_DMA_LENGTH is set).
+ * Use this padding for DMA flags bits to indicate when a specific
+ * dma address is a bus address.
+ */
+#ifdef CONFIG_PCI_P2PDMA
+
+#define SG_DMA_BUS_ADDRESS (1 << 0)
+
+/**
+ * sg_dma_is_bus address - Return whether a given segment was marked
+ * as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Returns true if sg_dma_mark_bus_address() has been called on
+ * this segment.
+ **/
+static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
+{
+ return sg->dma_flags & SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_dma_mark_bus address - Mark the scatterlist entry as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Marks the passed in sg entry to indicate that the dma_address is
+ * a bus address and doesn't need to be unmapped. This should only be
+ * used by dma_map_sg() implementations to mark bus addresses
+ * so they can be properly cleaned up in dma_unmap_sg().
+ **/
+static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+{
+ sg->dma_flags |= SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Clears the bus address mark.
+ **/
+static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
+{
+ sg->dma_flags &= ~SG_DMA_BUS_ADDRESS;
+}
+
+#else
+
+static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
+{
+ return false;
+}
+static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+{
+}
+static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
+{
+}
+
+#endif
+
/**
* sg_phys - Return physical address of an sg entry
* @sg: SG entry
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d6b0866c71ed..e7b2f8a5c711 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -34,6 +34,7 @@
#include <linux/rseq.h>
#include <linux/seqlock.h>
#include <linux/kcsan.h>
+#include <linux/rv.h>
#include <asm/kmap_size.h>
/* task_struct member predeclarations (sorted alphabetically): */
@@ -1501,6 +1502,16 @@ struct task_struct {
struct callback_head l1d_flush_kill;
#endif
+#ifdef CONFIG_RV
+ /*
+ * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
+ * If we find justification for more monitors, we can think
+ * about adding more or developing a dynamic method. So far,
+ * none of these are justified.
+ */
+ union rv_task_monitor rv[RV_PER_TASK_MONITORS];
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
@@ -1814,7 +1825,7 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
-extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
+extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 8cd975a8bfeb..2a243616f222 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -29,7 +29,7 @@ extern struct mm_struct *mm_alloc(void);
*
* Use mmdrop() to release the reference acquired by mmgrab().
*
- * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
+ * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmgrab(struct mm_struct *mm)
@@ -92,7 +92,7 @@ static inline void mmdrop_sched(struct mm_struct *mm)
*
* Use mmput() to release the reference acquired by mmget().
*
- * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
+ * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmget(struct mm_struct *mm)
diff --git a/include/linux/security.h b/include/linux/security.h
index 1bc362cb413f..7bd0c490703d 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2060,6 +2060,7 @@ static inline int security_perf_event_write(struct perf_event *event)
#ifdef CONFIG_SECURITY
extern int security_uring_override_creds(const struct cred *new);
extern int security_uring_sqpoll(void);
+extern int security_uring_cmd(struct io_uring_cmd *ioucmd);
#else
static inline int security_uring_override_creds(const struct cred *new)
{
@@ -2069,6 +2070,10 @@ static inline int security_uring_sqpoll(void)
{
return 0;
}
+static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_IO_URING */
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 0b8b7d7c8f33..3d6fe3ef92cf 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -9,12 +9,20 @@
#ifndef _LINUX_SERIAL_H
#define _LINUX_SERIAL_H
-#include <asm/page.h>
#include <uapi/linux/serial.h>
+#include <uapi/linux/serial_reg.h>
/* Helper for dealing with UART_LCR_WLEN* defines */
#define UART_LCR_WLEN(x) ((x) - 5)
+/* FIFO and shifting register empty */
+#define UART_LSR_BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static inline bool uart_lsr_tx_empty(u16 lsr)
+{
+ return (lsr & UART_LSR_BOTH_EMPTY) == UART_LSR_BOTH_EMPTY;
+}
+
/*
* Counters of the input lines (CTS, DSR, RI, CD) interrupts
*/
@@ -25,11 +33,6 @@ struct async_icount {
__u32 buf_overrun;
};
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-#define SERIAL_XMIT_SIZE PAGE_SIZE
-
#include <linux/compiler.h>
#endif /* _LINUX_SERIAL_H */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index ff84a3ed10ea..8c7b793aa4d7 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -119,7 +119,8 @@ struct uart_8250_port {
* be immediately processed.
*/
#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
- unsigned char lsr_saved_flags;
+ u16 lsr_saved_flags;
+ u16 lsr_save_mask;
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
unsigned char msr_saved_flags;
@@ -170,8 +171,8 @@ extern void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
unsigned int quot_frac);
extern int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
-unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
-void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr);
+u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
+void serial8250_read_char(struct uart_8250_port *up, u16 lsr);
void serial8250_tx_chars(struct uart_8250_port *up);
unsigned int serial8250_modem_status(struct uart_8250_port *up);
void serial8250_init_port(struct uart_8250_port *up);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index fde258b3decd..aef3145f2032 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -31,9 +31,336 @@ struct serial_struct;
struct device;
struct gpio_desc;
-/*
+/**
+ * struct uart_ops -- interface between serial_core and the driver
+ *
* This structure describes all the operations that can be done on the
- * physical hardware. See Documentation/driver-api/serial/driver.rst for details.
+ * physical hardware.
+ *
+ * @tx_empty: ``unsigned int ()(struct uart_port *port)``
+ *
+ * This function tests whether the transmitter fifo and shifter for the
+ * @port is empty. If it is empty, this function should return
+ * %TIOCSER_TEMT, otherwise return 0. If the port does not support this
+ * operation, then it should return %TIOCSER_TEMT.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @set_mctrl: ``void ()(struct uart_port *port, unsigned int mctrl)``
+ *
+ * This function sets the modem control lines for @port to the state
+ * described by @mctrl. The relevant bits of @mctrl are:
+ *
+ * - %TIOCM_RTS RTS signal.
+ * - %TIOCM_DTR DTR signal.
+ * - %TIOCM_OUT1 OUT1 signal.
+ * - %TIOCM_OUT2 OUT2 signal.
+ * - %TIOCM_LOOP Set the port into loopback mode.
+ *
+ * If the appropriate bit is set, the signal should be driven
+ * active. If the bit is clear, the signal should be driven
+ * inactive.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @get_mctrl: ``unsigned int ()(struct uart_port *port)``
+ *
+ * Returns the current state of modem control inputs of @port. The state
+ * of the outputs should not be returned, since the core keeps track of
+ * their state. The state information should include:
+ *
+ * - %TIOCM_CAR state of DCD signal
+ * - %TIOCM_CTS state of CTS signal
+ * - %TIOCM_DSR state of DSR signal
+ * - %TIOCM_RI state of RI signal
+ *
+ * The bit is set if the signal is currently driven active. If
+ * the port does not support CTS, DCD or DSR, the driver should
+ * indicate that the signal is permanently active. If RI is
+ * not available, the signal should not be indicated as active.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @stop_tx: ``void ()(struct uart_port *port)``
+ *
+ * Stop transmitting characters. This might be due to the CTS line
+ * becoming inactive or the tty layer indicating we want to stop
+ * transmission due to an %XOFF character.
+ *
+ * The driver should stop transmitting characters as soon as possible.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @start_tx: ``void ()(struct uart_port *port)``
+ *
+ * Start transmitting characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @throttle: ``void ()(struct uart_port *port)``
+ *
+ * Notify the serial driver that input buffers for the line discipline are
+ * close to full, and it should somehow signal that no more characters
+ * should be sent to the serial port.
+ * This will be called only if hardware assisted flow control is enabled.
+ *
+ * Locking: serialized with @unthrottle() and termios modification by the
+ * tty layer.
+ *
+ * @unthrottle: ``void ()(struct uart_port *port)``
+ *
+ * Notify the serial driver that characters can now be sent to the serial
+ * port without fear of overrunning the input buffers of the line
+ * disciplines.
+ *
+ * This will be called only if hardware assisted flow control is enabled.
+ *
+ * Locking: serialized with @throttle() and termios modification by the
+ * tty layer.
+ *
+ * @send_xchar: ``void ()(struct uart_port *port, char ch)``
+ *
+ * Transmit a high priority character, even if the port is stopped. This
+ * is used to implement XON/XOFF flow control and tcflow(). If the serial
+ * driver does not implement this function, the tty core will append the
+ * character to the circular buffer and then call start_tx() / stop_tx()
+ * to flush the data out.
+ *
+ * Do not transmit if @ch == '\0' (%__DISABLED_CHAR).
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @stop_rx: ``void ()(struct uart_port *port)``
+ *
+ * Stop receiving characters; the @port is in the process of being closed.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @enable_ms: ``void ()(struct uart_port *port)``
+ *
+ * Enable the modem status interrupts.
+ *
+ * This method may be called multiple times. Modem status interrupts
+ * should be disabled when the @shutdown() method is called.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @break_ctl: ``void ()(struct uart_port *port, int ctl)``
+ *
+ * Control the transmission of a break signal. If @ctl is nonzero, the
+ * break signal should be transmitted. The signal should be terminated
+ * when another call is made with a zero @ctl.
+ *
+ * Locking: caller holds tty_port->mutex
+ *
+ * @startup: ``int ()(struct uart_port *port)``
+ *
+ * Grab any interrupt resources and initialise any low level driver state.
+ * Enable the port for reception. It should not activate RTS nor DTR;
+ * this will be done via a separate call to @set_mctrl().
+ *
+ * This method will only be called when the port is initially opened.
+ *
+ * Locking: port_sem taken.
+ * Interrupts: globally disabled.
+ *
+ * @shutdown: ``void ()(struct uart_port *port)``
+ *
+ * Disable the @port, disable any break condition that may be in effect,
+ * and free any interrupt resources. It should not disable RTS nor DTR;
+ * this will have already been done via a separate call to @set_mctrl().
+ *
+ * Drivers must not access @port->state once this call has completed.
+ *
+ * This method will only be called when there are no more users of this
+ * @port.
+ *
+ * Locking: port_sem taken.
+ * Interrupts: caller dependent.
+ *
+ * @flush_buffer: ``void ()(struct uart_port *port)``
+ *
+ * Flush any write buffers, reset any DMA state and stop any ongoing DMA
+ * transfers.
+ *
+ * This will be called whenever the @port->state->xmit circular buffer is
+ * cleared.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @set_termios: ``void ()(struct uart_port *port, struct ktermios *new,
+ * struct ktermios *old)``
+ *
+ * Change the @port parameters, including word length, parity, stop bits.
+ * Update @port->read_status_mask and @port->ignore_status_mask to
+ * indicate the types of events we are interested in receiving. Relevant
+ * ktermios::c_cflag bits are:
+ *
+ * - %CSIZE - word size
+ * - %CSTOPB - 2 stop bits
+ * - %PARENB - parity enable
+ * - %PARODD - odd parity (when %PARENB is in force)
+ * - %ADDRB - address bit (changed through uart_port::rs485_config()).
+ * - %CREAD - enable reception of characters (if not set, still receive
+ * characters from the port, but throw them away).
+ * - %CRTSCTS - if set, enable CTS status change reporting.
+ * - %CLOCAL - if not set, enable modem status change reporting.
+ *
+ * Relevant ktermios::c_iflag bits are:
+ *
+ * - %INPCK - enable frame and parity error events to be passed to the TTY
+ * layer.
+ * - %BRKINT / %PARMRK - both of these enable break events to be passed to
+ * the TTY layer.
+ * - %IGNPAR - ignore parity and framing errors.
+ * - %IGNBRK - ignore break errors. If %IGNPAR is also set, ignore overrun
+ * errors as well.
+ *
+ * The interaction of the ktermios::c_iflag bits is as follows (parity
+ * error given as an example):
+ *
+ * ============ ======= ======= =========================================
+ * Parity error INPCK IGNPAR
+ * ============ ======= ======= =========================================
+ * n/a 0 n/a character received, marked as %TTY_NORMAL
+ * None 1 n/a character received, marked as %TTY_NORMAL
+ * Yes 1 0 character received, marked as %TTY_PARITY
+ * Yes 1 1 character discarded
+ * ============ ======= ======= =========================================
+ *
+ * Other flags may be used (eg, xon/xoff characters) if your hardware
+ * supports hardware "soft" flow control.
+ *
+ * Locking: caller holds tty_port->mutex
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @set_ldisc: ``void ()(struct uart_port *port, struct ktermios *termios)``
+ *
+ * Notifier for discipline change. See
+ * Documentation/driver-api/tty/tty_ldisc.rst.
+ *
+ * Locking: caller holds tty_port->mutex
+ *
+ * @pm: ``void ()(struct uart_port *port, unsigned int state,
+ * unsigned int oldstate)``
+ *
+ * Perform any power management related activities on the specified @port.
+ * @state indicates the new state (defined by enum uart_pm_state),
+ * @oldstate indicates the previous state.
+ *
+ * This function should not be used to grab any resources.
+ *
+ * This will be called when the @port is initially opened and finally
+ * closed, except when the @port is also the system console. This will
+ * occur even if %CONFIG_PM is not set.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @type: ``const char *()(struct uart_port *port)``
+ *
+ * Return a pointer to a string constant describing the specified @port,
+ * or return %NULL, in which case the string 'unknown' is substituted.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @release_port: ``void ()(struct uart_port *port)``
+ *
+ * Release any memory and IO region resources currently in use by the
+ * @port.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @request_port: ``int ()(struct uart_port *port)``
+ *
+ * Request any memory and IO region resources required by the port. If any
+ * fail, no resources should be registered when this function returns, and
+ * it should return -%EBUSY on failure.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @config_port: ``void ()(struct uart_port *port, int type)``
+ *
+ * Perform any autoconfiguration steps required for the @port. @type
+ * contains a bit mask of the required configuration. %UART_CONFIG_TYPE
+ * indicates that the port requires detection and identification.
+ * @port->type should be set to the type found, or %PORT_UNKNOWN if no
+ * port was detected.
+ *
+ * %UART_CONFIG_IRQ indicates autoconfiguration of the interrupt signal,
+ * which should be probed using standard kernel autoprobing techniques.
+ * This is not necessary on platforms where ports have interrupts
+ * internally hard wired (eg, system on a chip implementations).
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @verify_port: ``int ()(struct uart_port *port,
+ * struct serial_struct *serinfo)``
+ *
+ * Verify the new serial port information contained within @serinfo is
+ * suitable for this port type.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @ioctl: ``int ()(struct uart_port *port, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * Perform any port specific IOCTLs. IOCTL commands must be defined using
+ * the standard numbering system found in <asm/ioctl.h>.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @poll_init: ``int ()(struct uart_port *port)``
+ *
+ * Called by kgdb to perform the minimal hardware initialization needed to
+ * support @poll_put_char() and @poll_get_char(). Unlike @startup(), this
+ * should not request interrupts.
+ *
+ * Locking: %tty_mutex and tty_port->mutex taken.
+ * Interrupts: n/a.
+ *
+ * @poll_put_char: ``void ()(struct uart_port *port, unsigned char ch)``
+ *
+ * Called by kgdb to write a single character @ch directly to the serial
+ * @port. It can and should block until there is space in the TX FIFO.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @poll_get_char: ``int ()(struct uart_port *port)``
+ *
+ * Called by kgdb to read a single character directly from the serial
+ * port. If data is available, it should be returned; otherwise the
+ * function should return %NO_POLL_CHAR immediately.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
*/
struct uart_ops {
unsigned int (*tx_empty)(struct uart_port *);
@@ -56,22 +383,8 @@ struct uart_ops {
void (*set_ldisc)(struct uart_port *, struct ktermios *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
-
- /*
- * Return a string describing the type of the port
- */
const char *(*type)(struct uart_port *);
-
- /*
- * Release IO and memory resources used by the port.
- * This includes iounmap if necessary.
- */
void (*release_port)(struct uart_port *);
-
- /*
- * Request IO and memory resources used by the port.
- * This includes iomapping the port if necessary.
- */
int (*request_port)(struct uart_port *);
void (*config_port)(struct uart_port *, int);
int (*verify_port)(struct uart_port *, struct serial_struct *);
@@ -133,6 +446,7 @@ struct uart_port {
unsigned int old);
void (*handle_break)(struct uart_port *);
int (*rs485_config)(struct uart_port *,
+ struct ktermios *termios,
struct serial_rs485 *rs485);
int (*iso7816_config)(struct uart_port *,
struct serial_iso7816 *iso7816);
@@ -232,7 +546,6 @@ struct uart_port {
int hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
- unsigned int timeout; /* character-based timeout */
unsigned int frame_time; /* frame timing in ns */
unsigned int type; /* port type */
const struct uart_ops *ops;
@@ -255,6 +568,7 @@ struct uart_port {
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
+ struct serial_rs485 rs485_supported; /* Supported mask for serial_rs485 */
struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */
struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
@@ -334,10 +648,23 @@ unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios
unsigned int max);
unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud);
+/*
+ * Calculates FIFO drain time.
+ */
+static inline unsigned long uart_fifo_timeout(struct uart_port *port)
+{
+ u64 fifo_timeout = (u64)READ_ONCE(port->frame_time) * port->fifosize;
+
+ /* Add .02 seconds of slop */
+ fifo_timeout += 20 * NSEC_PER_MSEC;
+
+ return max(nsecs_to_jiffies(fifo_timeout), 1UL);
+}
+
/* Base timer interval for polling */
static inline int uart_poll_timeout(struct uart_port *port)
{
- int timeout = port->timeout;
+ int timeout = uart_fifo_timeout(port);
return timeout > 6 ? (timeout / 2 - 2) : 1;
}
@@ -598,4 +925,5 @@ static inline int uart_handle_break(struct uart_port *port)
!((cflag) & CLOCAL))
int uart_get_rs485_mode(struct uart_port *port);
+int uart_rs485_config(struct uart_port *port);
#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index dec15f5b3dec..1672cf0810ef 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -83,7 +83,7 @@
#define S3C2410_UCON_RXIRQMODE (1<<0)
#define S3C2410_UCON_RXFIFO_TOI (1<<7)
#define S3C2443_UCON_RXERR_IRQEN (1<<6)
-#define S3C2443_UCON_LOOPBACK (1<<5)
+#define S3C2410_UCON_LOOPBACK (1<<5)
#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a68f982f22d1..ff0b990de83d 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -25,9 +25,15 @@ struct shmem_inode_info {
struct simple_xattrs xattrs; /* list of xattrs */
atomic_t stop_eviction; /* hold when working on inode */
struct timespec64 i_crtime; /* file creation time */
+ unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */
struct inode vfs_inode;
};
+#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
+#define SHMEM_FL_USER_MODIFIABLE \
+ (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
+#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
+
struct shmem_sb_info {
unsigned long max_blocks; /* How many blocks are allowed */
struct percpu_counter used_blocks; /* How many are allocated */
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 76fbf92b04d9..08e6054e061f 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -73,6 +73,11 @@ struct shrinker {
/* ID in shrinker_idr */
int id;
#endif
+#ifdef CONFIG_SHRINKER_DEBUG
+ int debugfs_id;
+ const char *name;
+ struct dentry *debugfs_entry;
+#endif
/* objs pending delete, per node */
atomic_long_t *nr_deferred;
};
@@ -88,10 +93,32 @@ struct shrinker {
*/
#define SHRINKER_NONSLAB (1 << 3)
-extern int prealloc_shrinker(struct shrinker *shrinker);
+extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker,
+ const char *fmt, ...);
extern void register_shrinker_prepared(struct shrinker *shrinker);
-extern int register_shrinker(struct shrinker *shrinker);
+extern int __printf(2, 3) register_shrinker(struct shrinker *shrinker,
+ const char *fmt, ...);
extern void unregister_shrinker(struct shrinker *shrinker);
extern void free_prealloced_shrinker(struct shrinker *shrinker);
extern void synchronize_shrinkers(void);
-#endif
+
+#ifdef CONFIG_SHRINKER_DEBUG
+extern int shrinker_debugfs_add(struct shrinker *shrinker);
+extern void shrinker_debugfs_remove(struct shrinker *shrinker);
+extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
+ const char *fmt, ...);
+#else /* CONFIG_SHRINKER_DEBUG */
+static inline int shrinker_debugfs_add(struct shrinker *shrinker)
+{
+ return 0;
+}
+static inline void shrinker_debugfs_remove(struct shrinker *shrinker)
+{
+}
+static inline __printf(2, 3)
+int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
+{
+ return 0;
+}
+#endif /* CONFIG_SHRINKER_DEBUG */
+#endif /* _LINUX_SHRINKER_H */
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 153b6dec9b6a..48f4b645193b 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -278,7 +278,8 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
static inline struct sk_psock *sk_psock(const struct sock *sk)
{
- return rcu_dereference_sk_user_data(sk);
+ return __rcu_dereference_sk_user_data_with_flags(sk,
+ SK_USER_DATA_PSOCK);
}
static inline void sk_psock_set_state(struct sk_psock *psock,
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 67e0d3e750b5..ec16ae49e6a4 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -9,6 +9,8 @@
#define SDW_SHIM_BASE 0x2C000
#define SDW_ALH_BASE 0x2C800
+#define SDW_SHIM_BASE_ACE 0x38000
+#define SDW_ALH_BASE_ACE 0x24000
#define SDW_LINK_BASE 0x30000
#define SDW_LINK_SIZE 0x10000
@@ -119,6 +121,7 @@ struct sdw_intel_ops {
struct sdw_intel_stream_params_data *params_data);
int (*free_stream)(struct device *dev,
struct sdw_intel_stream_free_data *free_data);
+ int (*trigger)(struct snd_soc_dai *dai, int cmd, int stream);
};
/**
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 90501404fa49..75eea5ebb179 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -234,13 +234,18 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *,
struct rpc_xprt_switch *,
struct rpc_xprt *,
void *);
+void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *);
+void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *,
+ struct rpc_add_xprt_test *);
const char *rpc_proc_name(const struct rpc_task *task);
void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
+void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
const struct sockaddr *sap);
+void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt);
void rpc_cleanup_clids(void);
static inline int rpc_reply_expected(struct rpc_task *task)
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 1d7a3e51b795..acc62647317c 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -61,8 +61,6 @@ struct rpc_task {
struct rpc_wait tk_wait; /* RPC wait */
} u;
- int tk_rpc_status; /* Result of last RPC operation */
-
/*
* RPC call state
*/
@@ -82,6 +80,8 @@ struct rpc_task {
ktime_t tk_start; /* RPC task init timestamp */
pid_t tk_owner; /* Process id for batching tasks */
+
+ int tk_rpc_status; /* Result of last RPC operation */
unsigned short tk_flags; /* misc flags */
unsigned short tk_timeouts; /* maj timeouts */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 5860f32e3958..69175029abbb 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -258,10 +258,13 @@ extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
extern int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
-extern unsigned int xdr_align_data(struct xdr_stream *, unsigned int offset, unsigned int length);
-extern unsigned int xdr_expand_hole(struct xdr_stream *, unsigned int offset, unsigned int length);
+extern void xdr_set_pagelen(struct xdr_stream *, unsigned int len);
extern bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
unsigned int len);
+extern unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset,
+ unsigned int target, unsigned int length);
+extern unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset,
+ unsigned int length);
/**
* xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
@@ -419,8 +422,8 @@ static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr)
*/
static inline __be32 *xdr_encode_bool(__be32 *p, u32 n)
{
- *p = n ? xdr_one : xdr_zero;
- return p++;
+ *p++ = n ? xdr_one : xdr_zero;
+ return p;
}
/**
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 522bbf937957..b9f59aabee53 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -144,7 +144,8 @@ struct rpc_xprt_ops {
unsigned short (*get_srcport)(struct rpc_xprt *xprt);
int (*buf_alloc)(struct rpc_task *task);
void (*buf_free)(struct rpc_task *task);
- int (*prepare_request)(struct rpc_rqst *req);
+ int (*prepare_request)(struct rpc_rqst *req,
+ struct xdr_buf *buf);
int (*send_request)(struct rpc_rqst *req);
void (*wait_for_reply_request)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -505,4 +506,7 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
return test_and_set_bit(XPRT_BINDING, &xprt->state);
}
+void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
+void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
+void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
#endif /* _LINUX_SUNRPC_XPRT_H */
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index bbb8a5fa0816..c0514c684b2c 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -55,7 +55,7 @@ extern void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps);
extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt);
extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
- struct rpc_xprt *xprt);
+ struct rpc_xprt *xprt, bool offline);
extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
@@ -63,8 +63,13 @@ extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
extern void xprt_iter_init_listall(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
+extern void xprt_iter_init_listoffline(struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *xps);
+
extern void xprt_iter_destroy(struct rpc_xprt_iter *xpi);
+extern void xprt_iter_rewind(struct rpc_xprt_iter *xpi);
+
extern struct rpc_xprt_switch *xprt_iter_xchg_switch(
struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *newswitch);
diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
index 74bfdffaf7b0..d11a1c6e3186 100644
--- a/include/linux/surface_aggregator/controller.h
+++ b/include/linux/surface_aggregator/controller.h
@@ -470,6 +470,67 @@ struct ssam_request_spec_md {
}
/**
+ * SSAM_DEFINE_SYNC_REQUEST_WR() - Define synchronous SAM request function with
+ * both argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. The generated function takes care of setting up the request
+ * and response structs, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, const atype *arg, rtype *ret)``, returning the status
+ * of the request, which is zero on success and negative on failure. The
+ * ``ctrl`` parameter is the controller via which the request is sent. The
+ * request argument is specified via the ``arg`` pointer. The request's return
+ * value is written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_WR(name, atype, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, const atype *arg, rtype *ret) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, sizeof(atype)); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+/**
* SSAM_DEFINE_SYNC_REQUEST_MD_N() - Define synchronous multi-device SAM
* request function with neither argument nor return value.
* @name: Name of the generated function.
@@ -613,6 +674,70 @@ struct ssam_request_spec_md {
return 0; \
}
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_WR() - Define synchronous multi-device SAM
+ * request function with both argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. Device specifying parameters are not hard-coded, but instead
+ * must be provided to the function. The generated function takes care of
+ * setting up the request and response structs, buffer allocation, as well as
+ * execution of the request itself, returning once the request has been fully
+ * completed. The required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg, rtype *ret)``,
+ * returning the status of the request, which is zero on success and negative
+ * on failure. The ``ctrl`` parameter is the controller via which the request
+ * is sent, ``tid`` the target ID for the request, and ``iid`` the instance ID.
+ * The request argument is specified via the ``arg`` pointer. The request's
+ * return value is written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_WR(name, atype, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, \
+ const atype *arg, rtype *ret) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, sizeof(atype)); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
/* -- Event notifier/callbacks. --------------------------------------------- */
@@ -835,8 +960,28 @@ struct ssam_event_notifier {
int ssam_notifier_register(struct ssam_controller *ctrl,
struct ssam_event_notifier *n);
-int ssam_notifier_unregister(struct ssam_controller *ctrl,
- struct ssam_event_notifier *n);
+int __ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n, bool disable);
+
+/**
+ * ssam_notifier_unregister() - Unregister an event notifier.
+ * @ctrl: The controller the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ *
+ * Unregister an event notifier. Decrement the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the usage counter
+ * reaches zero, the event will be disabled.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event-disable EC-command.
+ */
+static inline int ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n)
+{
+ return __ssam_notifier_unregister(ctrl, n, true);
+}
int ssam_controller_event_enable(struct ssam_controller *ctrl,
struct ssam_event_registry reg,
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
index cc257097eb05..46c45d1b6368 100644
--- a/include/linux/surface_aggregator/device.h
+++ b/include/linux/surface_aggregator/device.h
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/types.h>
#include <linux/surface_aggregator/controller.h>
@@ -148,17 +149,30 @@ struct ssam_device_uid {
#define SSAM_SDEV(cat, tid, iid, fun) \
SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, tid, iid, fun)
+/*
+ * enum ssam_device_flags - Flags for SSAM client devices.
+ * @SSAM_DEVICE_HOT_REMOVED_BIT:
+ * The device has been hot-removed. Further communication with it may time
+ * out and should be avoided.
+ */
+enum ssam_device_flags {
+ SSAM_DEVICE_HOT_REMOVED_BIT = 0,
+};
+
/**
* struct ssam_device - SSAM client device.
- * @dev: Driver model representation of the device.
- * @ctrl: SSAM controller managing this device.
- * @uid: UID identifying the device.
+ * @dev: Driver model representation of the device.
+ * @ctrl: SSAM controller managing this device.
+ * @uid: UID identifying the device.
+ * @flags: Device state flags, see &enum ssam_device_flags.
*/
struct ssam_device {
struct device dev;
struct ssam_controller *ctrl;
struct ssam_device_uid uid;
+
+ unsigned long flags;
};
/**
@@ -177,6 +191,8 @@ struct ssam_device_driver {
void (*remove)(struct ssam_device *sdev);
};
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
extern struct bus_type ssam_bus_type;
extern const struct device_type ssam_device_type;
@@ -193,6 +209,15 @@ static inline bool is_ssam_device(struct device *d)
return d->type == &ssam_device_type;
}
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static inline bool is_ssam_device(struct device *d)
+{
+ return false;
+}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
/**
* to_ssam_device() - Casts the given device to a SSAM client device.
* @d: The device to cast.
@@ -241,6 +266,35 @@ int ssam_device_add(struct ssam_device *sdev);
void ssam_device_remove(struct ssam_device *sdev);
/**
+ * ssam_device_mark_hot_removed() - Mark the given device as hot-removed.
+ * @sdev: The device to mark as hot-removed.
+ *
+ * Mark the device as having been hot-removed. This signals drivers using the
+ * device that communication with the device should be avoided and may lead to
+ * timeouts.
+ */
+static inline void ssam_device_mark_hot_removed(struct ssam_device *sdev)
+{
+ dev_dbg(&sdev->dev, "marking device as hot-removed\n");
+ set_bit(SSAM_DEVICE_HOT_REMOVED_BIT, &sdev->flags);
+}
+
+/**
+ * ssam_device_is_hot_removed() - Check if the given device has been
+ * hot-removed.
+ * @sdev: The device to check.
+ *
+ * Checks if the given device has been marked as hot-removed. See
+ * ssam_device_mark_hot_removed() for more details.
+ *
+ * Return: Returns ``true`` if the device has been marked as hot-removed.
+ */
+static inline bool ssam_device_is_hot_removed(struct ssam_device *sdev)
+{
+ return test_bit(SSAM_DEVICE_HOT_REMOVED_BIT, &sdev->flags);
+}
+
+/**
* ssam_device_get() - Increment reference count of SSAM client device.
* @sdev: The device to increment the reference count of.
*
@@ -322,11 +376,62 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d);
/* -- Helpers for controller and hub devices. ------------------------------- */
#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node);
void ssam_remove_clients(struct device *dev);
+
#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static inline int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ return 0;
+}
+
static inline void ssam_remove_clients(struct device *dev) {}
+
#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+/**
+ * ssam_register_clients() - Register all client devices defined under the
+ * given parent device.
+ * @dev: The parent device under which clients should be registered.
+ * @ctrl: The controller with which client should be registered.
+ *
+ * Register all clients that have via firmware nodes been defined as children
+ * of the given (parent) device. The respective child firmware nodes will be
+ * associated with the correspondingly created child devices.
+ *
+ * The given controller will be used to instantiate the new devices. See
+ * ssam_device_add() for details.
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+static inline int ssam_register_clients(struct device *dev, struct ssam_controller *ctrl)
+{
+ return __ssam_register_clients(dev, ctrl, dev_fwnode(dev));
+}
+
+/**
+ * ssam_device_register_clients() - Register all client devices defined under
+ * the given SSAM parent device.
+ * @sdev: The parent device under which clients should be registered.
+ *
+ * Register all clients that have via firmware nodes been defined as children
+ * of the given (parent) device. The respective child firmware nodes will be
+ * associated with the correspondingly created child devices.
+ *
+ * The controller used by the parent device will be used to instantiate the new
+ * devices. See ssam_device_add() for details.
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+static inline int ssam_device_register_clients(struct ssam_device *sdev)
+{
+ return ssam_register_clients(&sdev->dev, sdev->ctrl);
+}
+
/* -- Helpers for client-device requests. ----------------------------------- */
@@ -430,4 +535,106 @@ static inline void ssam_remove_clients(struct device *dev) {}
sdev->uid.instance, ret); \
}
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_WR() - Define synchronous client-device SAM
+ * request function with argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. Device specifying parameters are not hard-coded, but instead
+ * are provided via the client device, specifically its UID, supplied when
+ * calling this function. The generated function takes care of setting up the
+ * request struct, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev, const atype *arg, rtype *ret)``, returning the status of the request,
+ * which is zero on success and negative on failure. The ``sdev`` parameter
+ * specifies both the target device of the request and by association the
+ * controller via which the request is sent. The request's argument is
+ * specified via the ``arg`` pointer. The request's return value is written to
+ * the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_WR(name, atype, rtype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_WR(__raw_##name, atype, rtype, spec) \
+ static int name(struct ssam_device *sdev, const atype *arg, rtype *ret) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, arg, ret); \
+ }
+
+
+/* -- Helpers for client-device notifiers. ---------------------------------- */
+
+/**
+ * ssam_device_notifier_register() - Register an event notifier for the
+ * specified client device.
+ * @sdev: The device the notifier should be registered on.
+ * @n: The event notifier to register.
+ *
+ * Register an event notifier. Increment the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the event is not
+ * marked as an observer and is currently not enabled, it will be enabled
+ * during this call. If the notifier is marked as an observer, no attempt will
+ * be made at enabling any event and no reference count will be modified.
+ *
+ * Notifiers marked as observers do not need to be associated with one specific
+ * event, i.e. as long as no event matching is performed, only the event target
+ * category needs to be set.
+ *
+ * Return: Returns zero on success, %-ENOSPC if there have already been
+ * %INT_MAX notifiers for the event ID/type associated with the notifier block
+ * registered, %-ENOMEM if the corresponding event entry could not be
+ * allocated, %-ENODEV if the device is marked as hot-removed. If this is the
+ * first time that a notifier block is registered for the specific associated
+ * event, returns the status of the event-enable EC-command.
+ */
+static inline int ssam_device_notifier_register(struct ssam_device *sdev,
+ struct ssam_event_notifier *n)
+{
+ /*
+ * Note that this check does not provide any guarantees whatsoever as
+ * hot-removal could happen at any point and we can't protect against
+ * it. Nevertheless, if we can detect hot-removal, bail early to avoid
+ * communication timeouts.
+ */
+ if (ssam_device_is_hot_removed(sdev))
+ return -ENODEV;
+
+ return ssam_notifier_register(sdev->ctrl, n);
+}
+
+/**
+ * ssam_device_notifier_unregister() - Unregister an event notifier for the
+ * specified client device.
+ * @sdev: The device the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ *
+ * Unregister an event notifier. Decrement the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the usage counter
+ * reaches zero, the event will be disabled.
+ *
+ * In case the device has been marked as hot-removed, the event will not be
+ * disabled on the EC, as in those cases any attempt at doing so may time out.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event-disable EC-command.
+ */
+static inline int ssam_device_notifier_unregister(struct ssam_device *sdev,
+ struct ssam_event_notifier *n)
+{
+ return __ssam_notifier_unregister(sdev->ctrl, n,
+ !ssam_device_is_hot_removed(sdev));
+}
+
#endif /* _LINUX_SURFACE_AGGREGATOR_DEVICE_H */
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
index c3de43edcffa..45501b6e54e8 100644
--- a/include/linux/surface_aggregator/serial_hub.h
+++ b/include/linux/surface_aggregator/serial_hub.h
@@ -201,7 +201,7 @@ static inline u16 ssh_crc(const u8 *buf, size_t len)
* exception of zero, which is not an event ID. Thus, this is also the
* absolute maximum number of event handlers that can be registered.
*/
-#define SSH_NUM_EVENTS 34
+#define SSH_NUM_EVENTS 38
/*
* SSH_NUM_TARGETS - The number of communication targets used in the protocol.
@@ -292,40 +292,45 @@ struct ssam_span {
* Windows driver.
*/
enum ssam_ssh_tc {
- /* Category 0x00 is invalid for EC use. */
- SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */
- SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */
- SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */
- SSAM_SSH_TC_PMC = 0x04,
- SSAM_SSH_TC_FAN = 0x05,
- SSAM_SSH_TC_PoM = 0x06,
- SSAM_SSH_TC_DBG = 0x07,
- SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */
- SSAM_SSH_TC_FWU = 0x09,
- SSAM_SSH_TC_UNI = 0x0a,
- SSAM_SSH_TC_LPC = 0x0b,
- SSAM_SSH_TC_TCL = 0x0c,
- SSAM_SSH_TC_SFL = 0x0d,
- SSAM_SSH_TC_KIP = 0x0e,
- SSAM_SSH_TC_EXT = 0x0f,
- SSAM_SSH_TC_BLD = 0x10,
- SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */
- SSAM_SSH_TC_SEN = 0x12,
- SSAM_SSH_TC_SRQ = 0x13,
- SSAM_SSH_TC_MCU = 0x14,
- SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */
- SSAM_SSH_TC_TCH = 0x16,
- SSAM_SSH_TC_BKL = 0x17,
- SSAM_SSH_TC_TAM = 0x18,
- SSAM_SSH_TC_ACC = 0x19,
- SSAM_SSH_TC_UFI = 0x1a,
- SSAM_SSH_TC_USC = 0x1b,
- SSAM_SSH_TC_PEN = 0x1c,
- SSAM_SSH_TC_VID = 0x1d,
- SSAM_SSH_TC_AUD = 0x1e,
- SSAM_SSH_TC_SMC = 0x1f,
- SSAM_SSH_TC_KPD = 0x20,
- SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */
+ /* Category 0x00 is invalid for EC use. */
+ SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */
+ SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */
+ SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */
+ SSAM_SSH_TC_PMC = 0x04,
+ SSAM_SSH_TC_FAN = 0x05,
+ SSAM_SSH_TC_PoM = 0x06,
+ SSAM_SSH_TC_DBG = 0x07,
+ SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */
+ SSAM_SSH_TC_FWU = 0x09,
+ SSAM_SSH_TC_UNI = 0x0a,
+ SSAM_SSH_TC_LPC = 0x0b,
+ SSAM_SSH_TC_TCL = 0x0c,
+ SSAM_SSH_TC_SFL = 0x0d,
+ SSAM_SSH_TC_KIP = 0x0e, /* Manages detachable peripherals (Pro X/8 keyboard cover) */
+ SSAM_SSH_TC_EXT = 0x0f,
+ SSAM_SSH_TC_BLD = 0x10,
+ SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */
+ SSAM_SSH_TC_SEN = 0x12,
+ SSAM_SSH_TC_SRQ = 0x13,
+ SSAM_SSH_TC_MCU = 0x14,
+ SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */
+ SSAM_SSH_TC_TCH = 0x16,
+ SSAM_SSH_TC_BKL = 0x17,
+ SSAM_SSH_TC_TAM = 0x18,
+ SSAM_SSH_TC_ACC0 = 0x19,
+ SSAM_SSH_TC_UFI = 0x1a,
+ SSAM_SSH_TC_USC = 0x1b,
+ SSAM_SSH_TC_PEN = 0x1c,
+ SSAM_SSH_TC_VID = 0x1d,
+ SSAM_SSH_TC_AUD = 0x1e,
+ SSAM_SSH_TC_SMC = 0x1f,
+ SSAM_SSH_TC_KPD = 0x20,
+ SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */
+ SSAM_SSH_TC_SPT = 0x22,
+ SSAM_SSH_TC_SYS = 0x23,
+ SSAM_SSH_TC_ACC1 = 0x24,
+ SSAM_SSH_TC_SHB = 0x25,
+ SSAM_SSH_TC_POS = 0x26, /* For obtaining Laptop Studio screen position. */
};
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 8672a7123ccd..43150b9bbc5c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -74,7 +74,7 @@ static inline int current_is_kswapd(void)
/*
* Unaddressable device memory support. See include/linux/hmm.h and
- * Documentation/vm/hmm.rst. Short description is we need struct pages for
+ * Documentation/mm/hmm.rst. Short description is we need struct pages for
* device memory that is unaddressable (inaccessible) by CPU, so that we can
* migrate part of a process memory to device memory.
*
@@ -411,10 +411,13 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
+
+#define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
+#define MEMCG_RECLAIM_PROACTIVE (1 << 2)
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
- bool may_swap);
+ unsigned int reclaim_options);
extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
@@ -456,6 +459,7 @@ static inline unsigned long total_swapcache_pages(void)
return global_node_page_state(NR_SWAPCACHE);
}
+extern void free_swap_cache(struct page *page);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
/* linux/mm/swapfile.c */
@@ -540,6 +544,10 @@ static inline void put_swap_device(struct swap_info_struct *si)
/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
#define free_swap_and_cache(e) is_pfn_swap_entry(e)
+static inline void free_swap_cache(struct page *page)
+{
+}
+
static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
return 0;
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index f24775b41880..a3d435bf9f97 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -244,8 +244,10 @@ extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl);
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
-extern void migration_entry_wait_huge(struct vm_area_struct *vma,
- struct mm_struct *mm, pte_t *pte);
+#ifdef CONFIG_HUGETLB_PAGE
+extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl);
+extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
+#endif
#else
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
@@ -271,8 +273,10 @@ static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
-static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
- struct mm_struct *mm, pte_t *pte) { }
+#ifdef CONFIG_HUGETLB_PAGE
+static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { }
+static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
+#endif
static inline int is_writable_migration_entry(swp_entry_t entry)
{
return 0;
@@ -486,6 +490,11 @@ static inline void num_poisoned_pages_dec(void)
atomic_long_dec(&num_poisoned_pages);
}
+static inline void num_poisoned_pages_sub(long i)
+{
+ atomic_long_sub(i, &num_poisoned_pages);
+}
+
#else
static inline swp_entry_t make_hwpoison_entry(struct page *page)
@@ -501,6 +510,10 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
static inline void num_poisoned_pages_inc(void)
{
}
+
+static inline void num_poisoned_pages_sub(long i)
+{
+}
#endif
static inline int non_swap_entry(swp_entry_t entry)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 7ed35dd3de6e..35bc4e281c21 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -60,7 +60,6 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
#ifdef CONFIG_SWIOTLB
-extern enum swiotlb_force swiotlb_force;
/**
* struct io_tlb_mem - IO TLB Memory Pool Descriptor
@@ -80,15 +79,14 @@ extern enum swiotlb_force swiotlb_force;
* @used: The number of used IO TLB block.
* @list: The free list describing the number of free entries available
* from each index.
- * @index: The index to start searching in the next round.
* @orig_addr: The original address corresponding to a mapped entry.
* @alloc_size: Size of the allocated buffer.
- * @lock: The lock to protect the above data structures in the map and
- * unmap calls.
* @debugfs: The dentry to debugfs.
* @late_alloc: %true if allocated using the page allocator
* @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation
+ * @nareas: The area number in the pool.
+ * @area_nslabs: The slot number in the area.
*/
struct io_tlb_mem {
phys_addr_t start;
@@ -96,17 +94,14 @@ struct io_tlb_mem {
void *vaddr;
unsigned long nslabs;
unsigned long used;
- unsigned int index;
- spinlock_t lock;
struct dentry *debugfs;
bool late_alloc;
bool force_bounce;
bool for_alloc;
- struct io_tlb_slot {
- phys_addr_t orig_addr;
- size_t alloc_size;
- unsigned int list;
- } *slots;
+ unsigned int nareas;
+ unsigned int area_nslabs;
+ struct io_tlb_area *areas;
+ struct io_tlb_slot *slots;
};
extern struct io_tlb_mem io_tlb_default_mem;
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 17b42ce89d3e..780690dc08cd 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -268,6 +268,10 @@ static inline struct ctl_table_header *register_sysctl_table(struct ctl_table *
return NULL;
}
+static inline void register_sysctl_init(const char *path, struct ctl_table *table)
+{
+}
+
static inline struct ctl_table_header *register_sysctl_mount_point(const char *path)
{
return NULL;
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index e3f1e8ac1f85..fd3fe5c8c17f 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -235,6 +235,22 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size)
#define BIN_ATTR_RW(_name, _size) \
struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
+
+#define __BIN_ATTR_ADMIN_RO(_name, _size) { \
+ .attr = { .name = __stringify(_name), .mode = 0400 }, \
+ .read = _name##_read, \
+ .size = _size, \
+}
+
+#define __BIN_ATTR_ADMIN_RW(_name, _size) \
+ __BIN_ATTR(_name, 0600, _name##_read, _name##_write, _size)
+
+#define BIN_ATTR_ADMIN_RO(_name, _size) \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size)
+
+#define BIN_ATTR_ADMIN_RW(_name, _size) \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size)
+
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *, char *);
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
diff --git a/include/linux/tboot.h b/include/linux/tboot.h
index 5146d2574e85..d2279160ef39 100644
--- a/include/linux/tboot.h
+++ b/include/linux/tboot.h
@@ -126,7 +126,6 @@ extern void tboot_probe(void);
extern void tboot_shutdown(u32 shutdown_type);
extern struct acpi_table_header *tboot_get_dmar_table(
struct acpi_table_header *dmar_tbl);
-extern int tboot_force_iommu(void);
#else
@@ -136,7 +135,6 @@ extern int tboot_force_iommu(void);
#define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \
do { } while (0)
#define tboot_get_dmar_table(dmar_tbl) (dmar_tbl)
-#define tboot_force_iommu() 0
#endif /* !CONFIG_INTEL_TXT */
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 2fb8232cff1d..f1bcea8c124a 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -145,7 +145,7 @@ static inline s64 timespec64_to_ns(const struct timespec64 *ts)
*
* Returns the timespec64 representation of the nsec parameter.
*/
-extern struct timespec64 ns_to_timespec64(const s64 nsec);
+extern struct timespec64 ns_to_timespec64(s64 nsec);
/**
* timespec64_add_ns - Adds nanoseconds to a timespec64
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 739ba9a03ec1..20c0ff54b7a0 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -157,7 +157,7 @@ struct tcg_algorithm_info {
* Return: size of the event on success, 0 on failure
*/
-static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
struct tcg_pcr_event *event_header,
bool do_mapping)
{
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index e6e95a9f07a5..8401dec93c15 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -814,8 +814,6 @@ extern int trace_add_event_call(struct trace_event_call *call);
extern int trace_remove_event_call(struct trace_event_call *call);
extern int trace_event_get_offsets(struct trace_event_call *call);
-#define is_signed_type(type) (((type)(-1)) < (type)1)
-
int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
int trace_set_clr_event(const char *system, const char *event, int set);
int trace_array_set_clr_event(struct trace_array *tr, const char *system,
@@ -916,6 +914,24 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
#endif
+#define TRACE_EVENT_STR_MAX 512
+
+/*
+ * gcc warns that you can not use a va_list in an inlined
+ * function. But lets me make it into a macro :-/
+ */
+#define __trace_event_vstr_len(fmt, va) \
+({ \
+ va_list __ap; \
+ int __ret; \
+ \
+ va_copy(__ap, *(va)); \
+ __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \
+ va_end(__ap); \
+ \
+ min(__ret, TRACE_EVENT_STR_MAX); \
+})
+
#endif /* _LINUX_TRACE_EVENT_H */
/*
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 55717a2eda08..4b33b95eb8be 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -151,7 +151,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
/*
* Individual subsystem my have a separate configuration to
* enable their tracepoints. By default, this file will create
- * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem
+ * the tracepoints if CONFIG_TRACEPOINTS is defined. If a subsystem
* wants to be able to disable its tracepoints from being created
* it can define NOTRACE before including the tracepoint headers.
*/
diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h
index 3b9d77604291..1796648c2907 100644
--- a/include/linux/tty_buffer.h
+++ b/include/linux/tty_buffer.h
@@ -15,6 +15,7 @@ struct tty_buffer {
int used;
int size;
int commit;
+ int lookahead; /* Lazy update on recv, can become less than "read" */
int read;
int flags;
/* Data points here */
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index e85002b56752..ede6f2157f32 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -186,6 +186,18 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* indicate all data received is %TTY_NORMAL. If assigned, prefer this
* function for automatic flow control.
*
+ * @lookahead_buf: [DRV] ``void ()(struct tty_struct *tty,
+ * const unsigned char *cp, const char *fp, int count)``
+ *
+ * This function is called by the low-level tty driver for characters
+ * not eaten by ->receive_buf() or ->receive_buf2(). It is useful for
+ * processing high-priority characters such as software flow-control
+ * characters that could otherwise get stuck into the intermediate
+ * buffer until tty has room to receive them. Ldisc must be able to
+ * handle later a ->receive_buf() or ->receive_buf2() call for the
+ * same characters (e.g. by skipping the actions for high-priority
+ * characters already handled by ->lookahead_buf()).
+ *
* @owner: module containting this ldisc (for reference counting)
*
* This structure defines the interface between the tty line discipline
@@ -229,6 +241,8 @@ struct tty_ldisc_ops {
void (*dcd_change)(struct tty_struct *tty, unsigned int status);
int (*receive_buf2)(struct tty_struct *tty, const unsigned char *cp,
const char *fp, int count);
+ void (*lookahead_buf)(struct tty_struct *tty, const unsigned char *cp,
+ const unsigned char *fp, unsigned int count);
struct module *owner;
};
diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h
index 58e9619116b7..fa3c3bdaa234 100644
--- a/include/linux/tty_port.h
+++ b/include/linux/tty_port.h
@@ -40,6 +40,8 @@ struct tty_port_operations {
struct tty_port_client_operations {
int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t);
+ void (*lookahead_buf)(struct tty_port *port, const unsigned char *cp,
+ const unsigned char *fp, unsigned int count);
void (*write_wakeup)(struct tty_port *port);
};
diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h
index 0968ef458447..22345391350b 100644
--- a/include/linux/ucb1400.h
+++ b/include/linux/ucb1400.h
@@ -84,8 +84,6 @@ struct ucb1400_gpio {
struct gpio_chip gc;
struct snd_ac97 *ac97;
int gpio_offset;
- int (*gpio_setup)(struct device *dev, int ngpio);
- int (*gpio_teardown)(struct device *dev, int ngpio);
};
struct ucb1400_ts {
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 9a2dc496d535..5896af36199c 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -26,6 +26,7 @@ enum iter_type {
ITER_PIPE,
ITER_XARRAY,
ITER_DISCARD,
+ ITER_UBUF,
};
struct iov_iter_state {
@@ -38,7 +39,11 @@ struct iov_iter {
u8 iter_type;
bool nofault;
bool data_source;
- size_t iov_offset;
+ bool user_backed;
+ union {
+ size_t iov_offset;
+ int last_offset;
+ };
size_t count;
union {
const struct iovec *iov;
@@ -46,6 +51,7 @@ struct iov_iter {
const struct bio_vec *bvec;
struct xarray *xarray;
struct pipe_inode_info *pipe;
+ void __user *ubuf;
};
union {
unsigned long nr_segs;
@@ -70,6 +76,11 @@ static inline void iov_iter_save_state(struct iov_iter *iter,
state->nr_segs = iter->nr_segs;
}
+static inline bool iter_is_ubuf(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_UBUF;
+}
+
static inline bool iter_is_iovec(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_IOVEC;
@@ -105,6 +116,11 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
return i->data_source ? WRITE : READ;
}
+static inline bool user_backed_iter(const struct iov_iter *i)
+{
+ return i->user_backed;
+}
+
/*
* Total number of bytes covered by an iovec.
*
@@ -231,9 +247,9 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
loff_t start, size_t count);
-ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
+ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
-ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
+ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
@@ -322,4 +338,17 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec,
int import_single_range(int type, void __user *buf, size_t len,
struct iovec *iov, struct iov_iter *i);
+static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
+ void __user *buf, size_t count)
+{
+ WARN_ON(direction & ~(READ | WRITE));
+ *i = (struct iov_iter) {
+ .iter_type = ITER_UBUF,
+ .user_backed = true,
+ .data_source = direction,
+ .ubuf = buf,
+ .count = count
+ };
+}
+
#endif
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 732b522bacb7..e1b8a915e9e9 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -73,6 +73,8 @@ extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
extern int mwriteprotect_range(struct mm_struct *dst_mm,
unsigned long start, unsigned long len,
bool enable_wp, atomic_t *mmap_changing);
+extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
+ unsigned long start, unsigned long len, bool enable_wp);
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 7b4a13d3bd91..d282f464d2f1 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -218,6 +218,9 @@ struct vdpa_map_file {
* @reset: Reset device
* @vdev: vdpa device
* Returns integer: success (0) or error (< 0)
+ * @suspend: Suspend or resume the device (optional)
+ * @vdev: vdpa device
+ * Returns integer: success (0) or error (< 0)
* @get_config_size: Get the size of the configuration space includes
* fields that are conditional on feature bits.
* @vdev: vdpa device
@@ -319,6 +322,7 @@ struct vdpa_config_ops {
u8 (*get_status)(struct vdpa_device *vdev);
void (*set_status)(struct vdpa_device *vdev, u8 status);
int (*reset)(struct vdpa_device *vdev);
+ int (*suspend)(struct vdpa_device *vdev);
size_t (*get_config_size)(struct vdpa_device *vdev);
void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index aa888cc51757..e05ddc6fe6a5 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -32,6 +32,11 @@ struct vfio_device_set {
struct vfio_device {
struct device *dev;
const struct vfio_device_ops *ops;
+ /*
+ * mig_ops is a static property of the vfio_device which must be set
+ * prior to registering the vfio_device.
+ */
+ const struct vfio_migration_ops *mig_ops;
struct vfio_group *group;
struct vfio_device_set *dev_set;
struct list_head dev_set_list;
@@ -44,6 +49,7 @@ struct vfio_device {
unsigned int open_count;
struct completion comp;
struct list_head group_next;
+ struct list_head iommu_entry;
};
/**
@@ -60,17 +66,9 @@ struct vfio_device {
* @match: Optional device name match callback (return: 0 for no-match, >0 for
* match, -errno for abort (ex. match with insufficient or incorrect
* additional args)
+ * @dma_unmap: Called when userspace unmaps IOVA from the container
+ * this device is attached to.
* @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
- * @migration_set_state: Optional callback to change the migration state for
- * devices that support migration. It's mandatory for
- * VFIO_DEVICE_FEATURE_MIGRATION migration support.
- * The returned FD is used for data transfer according to the FSM
- * definition. The driver is responsible to ensure that FD reaches end
- * of stream or error whenever the migration FSM leaves a data transfer
- * state or before close_device() returns.
- * @migration_get_state: Optional callback to get the migration state for
- * devices that support migration. It's mandatory for
- * VFIO_DEVICE_FEATURE_MIGRATION migration support.
*/
struct vfio_device_ops {
char *name;
@@ -85,8 +83,24 @@ struct vfio_device_ops {
int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
void (*request)(struct vfio_device *vdev, unsigned int count);
int (*match)(struct vfio_device *vdev, char *buf);
+ void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
int (*device_feature)(struct vfio_device *device, u32 flags,
void __user *arg, size_t argsz);
+};
+
+/**
+ * @migration_set_state: Optional callback to change the migration state for
+ * devices that support migration. It's mandatory for
+ * VFIO_DEVICE_FEATURE_MIGRATION migration support.
+ * The returned FD is used for data transfer according to the FSM
+ * definition. The driver is responsible to ensure that FD reaches end
+ * of stream or error whenever the migration FSM leaves a data transfer
+ * state or before close_device() returns.
+ * @migration_get_state: Optional callback to get the migration state for
+ * devices that support migration. It's mandatory for
+ * VFIO_DEVICE_FEATURE_MIGRATION migration support.
+ */
+struct vfio_migration_ops {
struct file *(*migration_set_state)(
struct vfio_device *device,
enum vfio_device_mig_state new_state);
@@ -140,36 +154,18 @@ int vfio_mig_get_next_state(struct vfio_device *device,
/*
* External user API
*/
-extern struct iommu_group *vfio_file_iommu_group(struct file *file);
-extern bool vfio_file_enforced_coherent(struct file *file);
-extern void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
-extern bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
+struct iommu_group *vfio_file_iommu_group(struct file *file);
+bool vfio_file_enforced_coherent(struct file *file);
+void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
+bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
-extern int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage, int prot, unsigned long *phys_pfn);
-extern int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
- int npage);
-extern int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova,
- void *data, size_t len, bool write);
-
-/* each type has independent events */
-enum vfio_notify_type {
- VFIO_IOMMU_NOTIFY = 0,
-};
-
-/* events for VFIO_IOMMU_NOTIFY */
-#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0)
-
-extern int vfio_register_notifier(struct vfio_device *device,
- enum vfio_notify_type type,
- unsigned long *required_events,
- struct notifier_block *nb);
-extern int vfio_unregister_notifier(struct vfio_device *device,
- enum vfio_notify_type type,
- struct notifier_block *nb);
-
+int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
+ int npage, int prot, struct page **pages);
+void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage);
+int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova,
+ void *data, size_t len, bool write);
/*
* Sub-module helpers
@@ -178,25 +174,24 @@ struct vfio_info_cap {
struct vfio_info_cap_header *buf;
size_t size;
};
-extern struct vfio_info_cap_header *vfio_info_cap_add(
- struct vfio_info_cap *caps, size_t size, u16 id, u16 version);
-extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
+struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
+ size_t size, u16 id,
+ u16 version);
+void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
-extern int vfio_info_add_capability(struct vfio_info_cap *caps,
- struct vfio_info_cap_header *cap,
- size_t size);
+int vfio_info_add_capability(struct vfio_info_cap *caps,
+ struct vfio_info_cap_header *cap, size_t size);
-extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
- int num_irqs, int max_irq_type,
- size_t *data_size);
+int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
+ int num_irqs, int max_irq_type,
+ size_t *data_size);
struct pci_dev;
#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH)
-extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
-extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
-extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
- unsigned int cmd,
- unsigned long arg);
+void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
+void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
+long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, unsigned int cmd,
+ unsigned long arg);
#else
static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev)
{
@@ -230,10 +225,9 @@ struct virqfd {
struct virqfd **pvirqfd;
};
-extern int vfio_virqfd_enable(void *opaque,
- int (*handler)(void *, void *),
- void (*thread)(void *, void *),
- void *data, struct virqfd **pvirqfd, int fd);
-extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
+int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *),
+ void (*thread)(void *, void *), void *data,
+ struct virqfd **pvirqfd, int fd);
+void vfio_virqfd_disable(struct virqfd **pvirqfd);
#endif /* VFIO_H */
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index d5d9e17f0156..5579ece4347b 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -147,23 +147,23 @@ struct vfio_pci_core_device {
#define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev)))
#define irq_is(vdev, type) (vdev->irq_type == type)
-extern void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev);
-extern void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev);
+void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev);
+void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev);
-extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev,
- uint32_t flags, unsigned index,
- unsigned start, unsigned count, void *data);
+int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev,
+ uint32_t flags, unsigned index,
+ unsigned start, unsigned count, void *data);
-extern ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
+ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite);
-extern ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite);
+ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
#ifdef CONFIG_VFIO_PCI_VGA
-extern ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite);
+ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
#else
static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev,
char __user *buf, size_t count,
@@ -173,32 +173,31 @@ static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev,
}
#endif
-extern long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
- uint64_t data, int count, int fd);
+long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
+ uint64_t data, int count, int fd);
-extern int vfio_pci_init_perm_bits(void);
-extern void vfio_pci_uninit_perm_bits(void);
+int vfio_pci_init_perm_bits(void);
+void vfio_pci_uninit_perm_bits(void);
-extern int vfio_config_init(struct vfio_pci_core_device *vdev);
-extern void vfio_config_free(struct vfio_pci_core_device *vdev);
+int vfio_config_init(struct vfio_pci_core_device *vdev);
+void vfio_config_free(struct vfio_pci_core_device *vdev);
-extern int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
- unsigned int type, unsigned int subtype,
- const struct vfio_pci_regops *ops,
- size_t size, u32 flags, void *data);
+int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data);
-extern int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev,
- pci_power_t state);
+int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev,
+ pci_power_t state);
-extern bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
-extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device
- *vdev);
-extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev);
-extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev,
- u16 cmd);
+bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev);
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev);
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev,
+ u16 cmd);
#ifdef CONFIG_VFIO_PCI_IGD
-extern int vfio_pci_igd_init(struct vfio_pci_core_device *vdev);
+int vfio_pci_igd_init(struct vfio_pci_core_device *vdev);
#else
static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
{
@@ -207,8 +206,8 @@ static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
#endif
#ifdef CONFIG_VFIO_PCI_ZDEV_KVM
-extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
- struct vfio_info_cap *caps);
+int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
+ struct vfio_info_cap *caps);
int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev);
void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev);
#else
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d8fdf170637c..dcab9c7e8784 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -11,7 +11,7 @@
#include <linux/gfp.h>
/**
- * virtqueue - a queue to register buffers for sending or receiving.
+ * struct virtqueue - a queue to register buffers for sending or receiving.
* @list: the chain of virtqueues for this device
* @callback: the function to call when buffers are consumed (can be NULL).
* @name: the name of this virtqueue (mainly for debugging)
@@ -19,6 +19,8 @@
* @priv: a pointer for the virtqueue implementation to use.
* @index: the zero-based ordinal number for this queue.
* @num_free: number of elements we expect to be able to fit.
+ * @num_max: the maximum number of elements supported by the device.
+ * @reset: vq is in reset state or not.
*
* A note on @num_free: with indirect buffers, each buffer needs one
* element in the queue, otherwise a buffer will need one element per
@@ -31,7 +33,9 @@ struct virtqueue {
struct virtio_device *vdev;
unsigned int index;
unsigned int num_free;
+ unsigned int num_max;
void *priv;
+ bool reset;
};
int virtqueue_add_outbuf(struct virtqueue *vq,
@@ -89,8 +93,11 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq);
dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq);
dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq);
+int virtqueue_resize(struct virtqueue *vq, u32 num,
+ void (*recycle)(struct virtqueue *vq, void *buf));
+
/**
- * virtio_device - representation of a device using virtio
+ * struct virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
* @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
* @config_enabled: configuration change reporting enabled
@@ -133,6 +140,9 @@ bool is_virtio_device(struct device *dev);
void virtio_break_device(struct virtio_device *dev);
void __virtio_unbreak_device(struct virtio_device *dev);
+void __virtqueue_break(struct virtqueue *_vq);
+void __virtqueue_unbreak(struct virtqueue *_vq);
+
void virtio_config_changed(struct virtio_device *dev);
#ifdef CONFIG_PM_SLEEP
int virtio_device_freeze(struct virtio_device *dev);
@@ -146,7 +156,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev);
list_for_each_entry(vq, &vdev->vqs, list)
/**
- * virtio_driver - operations for a virtio I/O driver
+ * struct virtio_driver - operations for a virtio I/O driver
* @driver: underlying device driver (populate name and owner).
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this driver.
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index b47c2e7ed0ee..4b517649cfe8 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -78,6 +78,18 @@ struct virtio_shm_region {
* @set_vq_affinity: set the affinity for a virtqueue (optional).
* @get_vq_affinity: get the affinity for a virtqueue (optional).
* @get_shm_region: get a shared memory region based on the index.
+ * @disable_vq_and_reset: reset a queue individually (optional).
+ * vq: the virtqueue
+ * Returns 0 on success or error status
+ * disable_vq_and_reset will guarantee that the callbacks are disabled and
+ * synchronized.
+ * Except for the callback, the caller should guarantee that the vring is
+ * not accessed by any functions of virtqueue.
+ * @enable_vq_after_reset: enable a reset queue
+ * vq: the virtqueue
+ * Returns 0 on success or error status
+ * If disable_vq_and_reset is set, then enable_vq_after_reset must also be
+ * set.
*/
typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
@@ -104,6 +116,8 @@ struct virtio_config_ops {
int index);
bool (*get_shm_region)(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id);
+ int (*disable_vq_and_reset)(struct virtqueue *vq);
+ int (*enable_vq_after_reset)(struct virtqueue *vq);
};
/* If driver didn't advertise the feature, it will never appear. */
@@ -225,7 +239,7 @@ int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
/**
* virtio_synchronize_cbs - synchronize with virtqueue callbacks
- * @vdev: the device
+ * @dev: the virtio device
*/
static inline
void virtio_synchronize_cbs(struct virtio_device *dev)
@@ -244,7 +258,7 @@ void virtio_synchronize_cbs(struct virtio_device *dev)
/**
* virtio_device_ready - enable vq use in probe function
- * @vdev: the device
+ * @dev: the virtio device
*
* Driver must call this to use vqs in the probe function.
*
@@ -292,7 +306,7 @@ const char *virtio_bus_name(struct virtio_device *vdev)
/**
* virtqueue_set_affinity - setting affinity for a virtqueue
* @vq: the virtqueue
- * @cpu: the cpu no.
+ * @cpu_mask: the cpu mask
*
* Pay attention the function are best-effort: the affinity hint may not be set
* due to config support, irq type and sharing.
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
index eb2bd9b4077d..c4eeb79b0139 100644
--- a/include/linux/virtio_pci_modern.h
+++ b/include/linux/virtio_pci_modern.h
@@ -5,6 +5,13 @@
#include <linux/pci.h>
#include <linux/virtio_pci.h>
+struct virtio_pci_modern_common_cfg {
+ struct virtio_pci_common_cfg cfg;
+
+ __le16 queue_notify_data; /* read-write */
+ __le16 queue_reset; /* read-write */
+};
+
struct virtio_pci_modern_device {
struct pci_dev *pci_dev;
@@ -106,4 +113,6 @@ void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
u16 index, resource_size_t *pa);
int vp_modern_probe(struct virtio_pci_modern_device *mdev);
void vp_modern_remove(struct virtio_pci_modern_device *mdev);
+int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
+void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
#endif
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index b485b13fa50b..8b8af1a38991 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -76,16 +76,6 @@ struct virtqueue *vring_create_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name);
-/* Creates a virtqueue with a custom layout. */
-struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring vring,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool ctx,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name);
-
/*
* Creates a virtqueue with a standard layout but a caller-allocated
* ring.
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 404024486fa5..f3fc36cd2276 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -20,12 +20,19 @@
#define HIGHMEM_ZONE(xx)
#endif
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
+#ifdef CONFIG_ZONE_DEVICE
+#define DEVICE_ZONE(xx) xx##_DEVICE,
+#else
+#define DEVICE_ZONE(xx)
+#endif
+
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, \
+ HIGHMEM_ZONE(xx) xx##_MOVABLE, DEVICE_ZONE(xx)
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
- FOR_ALL_ZONES(PGALLOC),
- FOR_ALL_ZONES(ALLOCSTALL),
- FOR_ALL_ZONES(PGSCAN_SKIP),
+ FOR_ALL_ZONES(PGALLOC)
+ FOR_ALL_ZONES(ALLOCSTALL)
+ FOR_ALL_ZONES(PGSCAN_SKIP)
PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 7dec36aecbd9..7725b7579b78 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -71,7 +71,7 @@ static inline int
wait_on_bit(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait,
@@ -96,7 +96,7 @@ static inline int
wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait_io,
@@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
unsigned long timeout)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit_timeout(word, bit,
bit_wait_timeout,
@@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit, action, mode);
}
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index ec1d1706f43c..78ebcf782ce5 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -11,6 +11,7 @@
#include <linux/utsname.h>
#include <linux/idr.h>
+#include <linux/tracepoint-defs.h>
/* Number of requests per row */
#define P9_ROW_MAXTAG 255
@@ -76,7 +77,7 @@ enum p9_req_status_t {
struct p9_req_t {
int status;
int t_err;
- struct kref refcount;
+ refcount_t refcount;
wait_queue_head_t wq;
struct p9_fcall tc;
struct p9_fcall rc;
@@ -227,15 +228,55 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag);
static inline void p9_req_get(struct p9_req_t *r)
{
- kref_get(&r->refcount);
+ refcount_inc(&r->refcount);
}
static inline int p9_req_try_get(struct p9_req_t *r)
{
- return kref_get_unless_zero(&r->refcount);
+ return refcount_inc_not_zero(&r->refcount);
}
-int p9_req_put(struct p9_req_t *r);
+int p9_req_put(struct p9_client *c, struct p9_req_t *r);
+
+/* We cannot have the real tracepoints in header files,
+ * use a wrapper function */
+DECLARE_TRACEPOINT(9p_fid_ref);
+void do_trace_9p_fid_get(struct p9_fid *fid);
+void do_trace_9p_fid_put(struct p9_fid *fid);
+
+/* fid reference counting helpers:
+ * - fids used for any length of time should always be referenced through
+ * p9_fid_get(), and released with p9_fid_put()
+ * - v9fs_fid_lookup() or similar will automatically call get for you
+ * and also require a put
+ * - the *_fid_add() helpers will stash the fid in the inode,
+ * at which point it is the responsibility of evict_inode()
+ * to call the put
+ * - the last put will automatically send a clunk to the server
+ */
+static inline struct p9_fid *p9_fid_get(struct p9_fid *fid)
+{
+ if (tracepoint_enabled(9p_fid_ref))
+ do_trace_9p_fid_get(fid);
+
+ refcount_inc(&fid->count);
+
+ return fid;
+}
+
+static inline int p9_fid_put(struct p9_fid *fid)
+{
+ if (!fid || IS_ERR(fid))
+ return 0;
+
+ if (tracepoint_enabled(9p_fid_ref))
+ do_trace_9p_fid_put(fid);
+
+ if (!refcount_dec_and_test(&fid->count))
+ return 0;
+
+ return p9_client_clunk(fid);
+}
void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
diff --git a/include/net/ax88796.h b/include/net/ax88796.h
index b658471f97f0..303100f08ab8 100644
--- a/include/net/ax88796.h
+++ b/include/net/ax88796.h
@@ -34,8 +34,8 @@ struct ax_plat_data {
const unsigned char *buf, int star_page);
void (*block_input)(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
- /* returns nonzero if a pending interrupt request might by caused by
- * the ax88786. Handles all interrupts if set to NULL
+ /* returns nonzero if a pending interrupt request might be caused by
+ * the ax88796. Handles all interrupts if set to NULL
*/
int (*check_irq)(struct platform_device *pdev);
};
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index 184105d68294..be2992e6de5d 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -290,7 +290,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state)
}
/* ========== AD Exported functions to the main bonding code ========== */
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
+void bond_3ad_initialize(struct bonding *bond);
void bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 6e78d657aa05..afd606df149a 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -161,8 +161,9 @@ struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
int delay;
- /* all three in jiffies */
+ /* all 4 in jiffies */
unsigned long last_link_up;
+ unsigned long last_tx;
unsigned long last_rx;
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
s8 link; /* one of BOND_LINK_XXXX */
@@ -540,6 +541,16 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
return slave->last_rx;
}
+static inline void slave_update_last_tx(struct slave *slave)
+{
+ WRITE_ONCE(slave->last_tx, jiffies);
+}
+
+static inline unsigned long slave_last_tx(struct slave *slave)
+{
+ return READ_ONCE(slave->last_tx);
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave,
struct sk_buff *skb)
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index c4898fcbf923..f90f0021f5f2 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -33,7 +33,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
static inline bool net_busy_loop_on(void)
{
- return sysctl_net_busy_poll;
+ return READ_ONCE(sysctl_net_busy_poll);
}
static inline bool sk_can_busy_loop(const struct sock *sk)
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 7cb3fa8310ed..56a50e1c51b9 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -11,6 +11,7 @@
/**
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
*/
struct genl_multicast_group {
char name[GENL_NAMSIZ];
@@ -116,7 +117,7 @@ enum genl_validate_flags {
* struct genl_small_ops - generic netlink operations (small version)
* @cmd: command identifier
* @internal_flags: flags used by the family
- * @flags: flags
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
* @validate: validation flags from enum genl_validate_flags
* @doit: standard command callback
* @dumpit: callback for dumpers
@@ -137,7 +138,7 @@ struct genl_small_ops {
* struct genl_ops - generic netlink operations
* @cmd: command identifier
* @internal_flags: flags used by the family
- * @flags: flags
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
* @maxattr: maximum number of attributes supported
* @policy: netlink policy (takes precedence over family policy)
* @validate: validation flags from enum genl_validate_flags
diff --git a/include/net/gro.h b/include/net/gro.h
index 867656b0739c..24003dea8fa4 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -439,7 +439,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
{
list_add_tail(&skb->list, &napi->rx_list);
napi->rx_count += segs;
- if (napi->rx_count >= gro_normal_batch)
+ if (napi->rx_count >= READ_ONCE(gro_normal_batch))
gro_normal_list(napi);
}
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index ac9cf7271d46..412479ebf5ad 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -291,4 +291,8 @@ struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk);
static inline struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) { return NULL; }
#endif
+#if !IS_ENABLED(CONFIG_MPTCP)
+struct mptcp_sock { };
+#endif
+
#endif /* __NET_MPTCP_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 9f0bab0589d9..3827a6b395fd 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -83,6 +83,7 @@ struct neigh_parms {
struct rcu_head rcu_head;
int reachable_time;
+ int qlen;
int data[NEIGH_VAR_DATA_MAX];
DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
};
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index d5326c44b453..cd982f4a0f50 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -270,6 +270,7 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
+void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
struct net_device *dev);
void nf_flow_table_cleanup(struct net_device *dev);
@@ -306,6 +307,8 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
struct flow_offload *flow);
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
+void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
+
int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
struct net_device *dev,
enum flow_block_command cmd);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 8bfb9c74afbf..cdb7db9b0e25 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -221,13 +221,18 @@ struct nft_ctx {
bool report;
};
+enum nft_data_desc_flags {
+ NFT_DATA_DESC_SETELEM = (1 << 0),
+};
+
struct nft_data_desc {
enum nft_data_types type;
+ unsigned int size;
unsigned int len;
+ unsigned int flags;
};
-int nft_data_init(const struct nft_ctx *ctx,
- struct nft_data *data, unsigned int size,
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
struct nft_data_desc *desc, const struct nlattr *nla);
void nft_data_hold(const struct nft_data *data, enum nft_data_types type);
void nft_data_release(const struct nft_data *data, enum nft_data_types type);
@@ -651,6 +656,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[];
struct nft_set_ext_tmpl {
u16 len;
u8 offset[NFT_SET_EXT_NUM];
+ u8 ext_len[NFT_SET_EXT_NUM];
};
/**
@@ -680,7 +686,8 @@ static inline int nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
return -EINVAL;
tmpl->offset[id] = tmpl->len;
- tmpl->len += nft_set_ext_types[id].len + len;
+ tmpl->ext_len[id] = nft_set_ext_types[id].len + len;
+ tmpl->len += tmpl->ext_len[id];
return 0;
}
@@ -1645,6 +1652,7 @@ struct nftables_pernet {
struct list_head module_list;
struct list_head notify_list;
struct mutex commit_mutex;
+ u64 table_handle;
unsigned int base_seq;
u8 validate_state;
};
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 0677cd3de034..c396a3862e80 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -95,7 +95,7 @@ struct nf_ip_net {
struct netns_ct {
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- bool ctnetlink_has_listener;
+ u8 ctnetlink_has_listener;
bool ecache_dwork_pending;
#endif
u8 sysctl_log_invalid; /* Log invalid packets */
diff --git a/include/net/sock.h b/include/net/sock.h
index a7273b289188..d08cfe190a78 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -545,14 +545,26 @@ enum sk_pacing {
SK_PACING_FQ = 2,
};
-/* Pointer stored in sk_user_data might not be suitable for copying
- * when cloning the socket. For instance, it can point to a reference
- * counted object. sk_user_data bottom bit is set if pointer must not
- * be copied.
+/* flag bits in sk_user_data
+ *
+ * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might
+ * not be suitable for copying when cloning the socket. For instance,
+ * it can point to a reference counted object. sk_user_data bottom
+ * bit is set if pointer must not be copied.
+ *
+ * - SK_USER_DATA_BPF: Mark whether sk_user_data field is
+ * managed/owned by a BPF reuseport array. This bit should be set
+ * when sk_user_data's sk is added to the bpf's reuseport_array.
+ *
+ * - SK_USER_DATA_PSOCK: Mark whether pointer stored in
+ * sk_user_data points to psock type. This bit should be set
+ * when sk_user_data is assigned to a psock object.
*/
#define SK_USER_DATA_NOCOPY 1UL
-#define SK_USER_DATA_BPF 2UL /* Managed by BPF */
-#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
+#define SK_USER_DATA_BPF 2UL
+#define SK_USER_DATA_PSOCK 4UL
+#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
+ SK_USER_DATA_PSOCK)
/**
* sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
@@ -565,24 +577,65 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk)
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
+/**
+ * __locked_read_sk_user_data_with_flags - return the pointer
+ * only if argument flags all has been set in sk_user_data. Otherwise
+ * return NULL
+ *
+ * @sk: socket
+ * @flags: flag bits
+ *
+ * The caller must be holding sk->sk_callback_lock.
+ */
+static inline void *
+__locked_read_sk_user_data_with_flags(const struct sock *sk,
+ uintptr_t flags)
+{
+ uintptr_t sk_user_data =
+ (uintptr_t)rcu_dereference_check(__sk_user_data(sk),
+ lockdep_is_held(&sk->sk_callback_lock));
+
+ WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
+
+ if ((sk_user_data & flags) == flags)
+ return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
+ return NULL;
+}
+
+/**
+ * __rcu_dereference_sk_user_data_with_flags - return the pointer
+ * only if argument flags all has been set in sk_user_data. Otherwise
+ * return NULL
+ *
+ * @sk: socket
+ * @flags: flag bits
+ */
+static inline void *
+__rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
+ uintptr_t flags)
+{
+ uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
+
+ WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
+
+ if ((sk_user_data & flags) == flags)
+ return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
+ return NULL;
+}
+
#define rcu_dereference_sk_user_data(sk) \
+ __rcu_dereference_sk_user_data_with_flags(sk, 0)
+#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
({ \
- void *__tmp = rcu_dereference(__sk_user_data((sk))); \
- (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \
-})
-#define rcu_assign_sk_user_data(sk, ptr) \
-({ \
- uintptr_t __tmp = (uintptr_t)(ptr); \
- WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
- rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
-})
-#define rcu_assign_sk_user_data_nocopy(sk, ptr) \
-({ \
- uintptr_t __tmp = (uintptr_t)(ptr); \
- WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
+ uintptr_t __tmp1 = (uintptr_t)(ptr), \
+ __tmp2 = (uintptr_t)(flags); \
+ WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \
+ WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \
rcu_assign_pointer(__sk_user_data((sk)), \
- __tmp | SK_USER_DATA_NOCOPY); \
+ __tmp1 | __tmp2); \
})
+#define rcu_assign_sk_user_data(sk, ptr) \
+ __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
static inline
struct net *sock_net(const struct sock *sk)
diff --git a/include/net/tls.h b/include/net/tls.h
index b75b5727abdb..cb205f9d9473 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -237,7 +237,7 @@ struct tls_context {
void *priv_ctx_tx;
void *priv_ctx_rx;
- struct net_device *netdev;
+ struct net_device __rcu *netdev;
/* rw cache line */
struct cipher_context tx;
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index d0337a41141c..cbd3ddd7c33d 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -360,7 +360,6 @@ TRACE_EVENT(aer_event,
EM ( MF_MSG_DIFFERENT_COMPOUND, "different compound page after locking" ) \
EM ( MF_MSG_HUGE, "huge page" ) \
EM ( MF_MSG_FREE_HUGE, "free huge page" ) \
- EM ( MF_MSG_NON_PMD_HUGE, "non-pmd-sized huge page" ) \
EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \
EM ( MF_MSG_DIRTY_SWAPCACHE, "dirty swapcache page" ) \
EM ( MF_MSG_CLEAN_SWAPCACHE, "clean swapcache page" ) \
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9c6317cf80d5..975d6e9efbcb 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4013,6 +4013,17 @@ static inline bool ib_uses_virt_dma(struct ib_device *dev)
return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
}
+/*
+ * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
+ */
+static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
+{
+ if (ib_uses_virt_dma(dev))
+ return false;
+
+ return dma_pci_p2pdma_supported(dev->dma_device);
+}
+
/**
* ib_dma_mapping_error - check a DMA addr for error
* @dev: The device for which the dma_addr was created
@@ -4603,7 +4614,7 @@ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
/**
* ib_lid_cpu16 - Return lid in 16bit CPU encoding.
- * In the current implementation the only way to get
+ * In the current implementation the only way to
* get the 32bit lid is from other sources for OPA.
* For IB, lids will always be 16bits so cast the
* value accordingly.
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index d989f030fae0..5b18e2e36ee6 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -108,6 +108,7 @@ struct rdma_cm_id {
enum rdma_ucm_port_space ps;
enum ib_qp_type qp_type;
u32 port_num;
+ struct work_struct net_work;
};
struct rdma_cm_id *
diff --git a/include/rv/automata.h b/include/rv/automata.h
new file mode 100644
index 000000000000..eb9e636809a0
--- /dev/null
+++ b/include/rv/automata.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Deterministic automata helper functions, to be used with the automata
+ * models in C generated by the dot2k tool.
+ */
+
+/*
+ * DECLARE_AUTOMATA_HELPERS - define a set of helper functions for automata
+ *
+ * Define a set of helper functions for automata. The 'name' argument is used
+ * as suffix for the functions and data. These functions will handle automaton
+ * with data type 'type'.
+ */
+#define DECLARE_AUTOMATA_HELPERS(name, type) \
+ \
+/* \
+ * model_get_state_name_##name - return the (string) name of the given state \
+ */ \
+static char *model_get_state_name_##name(enum states_##name state) \
+{ \
+ if ((state < 0) || (state >= state_max_##name)) \
+ return "INVALID"; \
+ \
+ return automaton_##name.state_names[state]; \
+} \
+ \
+/* \
+ * model_get_event_name_##name - return the (string) name of the given event \
+ */ \
+static char *model_get_event_name_##name(enum events_##name event) \
+{ \
+ if ((event < 0) || (event >= event_max_##name)) \
+ return "INVALID"; \
+ \
+ return automaton_##name.event_names[event]; \
+} \
+ \
+/* \
+ * model_get_initial_state_##name - return the automaton's initial state \
+ */ \
+static inline type model_get_initial_state_##name(void) \
+{ \
+ return automaton_##name.initial_state; \
+} \
+ \
+/* \
+ * model_get_next_state_##name - process an automaton event occurrence \
+ * \
+ * Given the current state (curr_state) and the event (event), returns \
+ * the next state, or INVALID_STATE in case of error. \
+ */ \
+static inline type model_get_next_state_##name(enum states_##name curr_state, \
+ enum events_##name event) \
+{ \
+ if ((curr_state < 0) || (curr_state >= state_max_##name)) \
+ return INVALID_STATE; \
+ \
+ if ((event < 0) || (event >= event_max_##name)) \
+ return INVALID_STATE; \
+ \
+ return automaton_##name.function[curr_state][event]; \
+} \
+ \
+/* \
+ * model_is_final_state_##name - check if the given state is a final state \
+ */ \
+static inline bool model_is_final_state_##name(enum states_##name state) \
+{ \
+ if ((state < 0) || (state >= state_max_##name)) \
+ return 0; \
+ \
+ return automaton_##name.final_states[state]; \
+}
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
new file mode 100644
index 000000000000..9eb75683e012
--- /dev/null
+++ b/include/rv/da_monitor.h
@@ -0,0 +1,544 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Deterministic automata (DA) monitor functions, to be used together
+ * with automata models in C generated by the dot2k tool.
+ *
+ * The dot2k tool is available at tools/verification/dot2k/
+ *
+ * For further information, see:
+ * Documentation/trace/rv/da_monitor_synthesis.rst
+ */
+
+#include <rv/automata.h>
+#include <linux/rv.h>
+#include <linux/bug.h>
+
+#ifdef CONFIG_RV_REACTORS
+
+#define DECLARE_RV_REACTING_HELPERS(name, type) \
+static char REACT_MSG_##name[1024]; \
+ \
+static inline char *format_react_msg_##name(type curr_state, type event) \
+{ \
+ snprintf(REACT_MSG_##name, 1024, \
+ "rv: monitor %s does not allow event %s on state %s\n", \
+ #name, \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(curr_state)); \
+ return REACT_MSG_##name; \
+} \
+ \
+static void cond_react_##name(char *msg) \
+{ \
+ if (rv_##name.react) \
+ rv_##name.react(msg); \
+} \
+ \
+static bool rv_reacting_on_##name(void) \
+{ \
+ return rv_reacting_on(); \
+}
+
+#else /* CONFIG_RV_REACTOR */
+
+#define DECLARE_RV_REACTING_HELPERS(name, type) \
+static inline char *format_react_msg_##name(type curr_state, type event) \
+{ \
+ return NULL; \
+} \
+ \
+static void cond_react_##name(char *msg) \
+{ \
+ return; \
+} \
+ \
+static bool rv_reacting_on_##name(void) \
+{ \
+ return 0; \
+}
+#endif
+
+/*
+ * Generic helpers for all types of deterministic automata monitors.
+ */
+#define DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+ \
+DECLARE_RV_REACTING_HELPERS(name, type) \
+ \
+/* \
+ * da_monitor_reset_##name - reset a monitor and setting it to init state \
+ */ \
+static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \
+{ \
+ da_mon->monitoring = 0; \
+ da_mon->curr_state = model_get_initial_state_##name(); \
+} \
+ \
+/* \
+ * da_monitor_curr_state_##name - return the current state \
+ */ \
+static inline type da_monitor_curr_state_##name(struct da_monitor *da_mon) \
+{ \
+ return da_mon->curr_state; \
+} \
+ \
+/* \
+ * da_monitor_set_state_##name - set the new current state \
+ */ \
+static inline void \
+da_monitor_set_state_##name(struct da_monitor *da_mon, enum states_##name state) \
+{ \
+ da_mon->curr_state = state; \
+} \
+ \
+/* \
+ * da_monitor_start_##name - start monitoring \
+ * \
+ * The monitor will ignore all events until monitoring is set to true. This \
+ * function needs to be called to tell the monitor to start monitoring. \
+ */ \
+static inline void da_monitor_start_##name(struct da_monitor *da_mon) \
+{ \
+ da_mon->curr_state = model_get_initial_state_##name(); \
+ da_mon->monitoring = 1; \
+} \
+ \
+/* \
+ * da_monitoring_##name - returns true if the monitor is processing events \
+ */ \
+static inline bool da_monitoring_##name(struct da_monitor *da_mon) \
+{ \
+ return da_mon->monitoring; \
+} \
+ \
+/* \
+ * da_monitor_enabled_##name - checks if the monitor is enabled \
+ */ \
+static inline bool da_monitor_enabled_##name(void) \
+{ \
+ /* global switch */ \
+ if (unlikely(!rv_monitoring_on())) \
+ return 0; \
+ \
+ /* monitor enabled */ \
+ if (unlikely(!rv_##name.enabled)) \
+ return 0; \
+ \
+ return 1; \
+} \
+ \
+/* \
+ * da_monitor_handling_event_##name - checks if the monitor is ready to handle events \
+ */ \
+static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon) \
+{ \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ /* monitor is actually monitoring */ \
+ if (unlikely(!da_monitoring_##name(da_mon))) \
+ return 0; \
+ \
+ return 1; \
+}
+
+/*
+ * Event handler for implicit monitors. Implicit monitor is the one which the
+ * handler does not need to specify which da_monitor to manipulate. Examples
+ * of implicit monitor are the per_cpu or the global ones.
+ */
+#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
+ \
+static inline bool \
+da_event_##name(struct da_monitor *da_mon, enum events_##name event) \
+{ \
+ type curr_state = da_monitor_curr_state_##name(da_mon); \
+ type next_state = model_get_next_state_##name(curr_state, event); \
+ \
+ if (next_state != INVALID_STATE) { \
+ da_monitor_set_state_##name(da_mon, next_state); \
+ \
+ trace_event_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ \
+ return true; \
+ } \
+ \
+ if (rv_reacting_on_##name()) \
+ cond_react_##name(format_react_msg_##name(curr_state, event)); \
+ \
+ trace_error_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ \
+ return false; \
+} \
+
+/*
+ * Event handler for per_task monitors.
+ */
+#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
+ \
+static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
+ enum events_##name event) \
+{ \
+ type curr_state = da_monitor_curr_state_##name(da_mon); \
+ type next_state = model_get_next_state_##name(curr_state, event); \
+ \
+ if (next_state != INVALID_STATE) { \
+ da_monitor_set_state_##name(da_mon, next_state); \
+ \
+ trace_event_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ \
+ return true; \
+ } \
+ \
+ if (rv_reacting_on_##name()) \
+ cond_react_##name(format_react_msg_##name(curr_state, event)); \
+ \
+ trace_error_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ \
+ return false; \
+}
+
+/*
+ * Functions to define, init and get a global monitor.
+ */
+#define DECLARE_DA_MON_INIT_GLOBAL(name, type) \
+ \
+/* \
+ * global monitor (a single variable) \
+ */ \
+static struct da_monitor da_mon_##name; \
+ \
+/* \
+ * da_get_monitor_##name - return the global monitor address \
+ */ \
+static struct da_monitor *da_get_monitor_##name(void) \
+{ \
+ return &da_mon_##name; \
+} \
+ \
+/* \
+ * da_monitor_reset_all_##name - reset the single monitor \
+ */ \
+static void da_monitor_reset_all_##name(void) \
+{ \
+ da_monitor_reset_##name(da_get_monitor_##name()); \
+} \
+ \
+/* \
+ * da_monitor_init_##name - initialize a monitor \
+ */ \
+static inline int da_monitor_init_##name(void) \
+{ \
+ da_monitor_reset_all_##name(); \
+ return 0; \
+} \
+ \
+/* \
+ * da_monitor_destroy_##name - destroy the monitor \
+ */ \
+static inline void da_monitor_destroy_##name(void) \
+{ \
+ return; \
+}
+
+/*
+ * Functions to define, init and get a per-cpu monitor.
+ */
+#define DECLARE_DA_MON_INIT_PER_CPU(name, type) \
+ \
+/* \
+ * per-cpu monitor variables \
+ */ \
+DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
+ \
+/* \
+ * da_get_monitor_##name - return current CPU monitor address \
+ */ \
+static struct da_monitor *da_get_monitor_##name(void) \
+{ \
+ return this_cpu_ptr(&da_mon_##name); \
+} \
+ \
+/* \
+ * da_monitor_reset_all_##name - reset all CPUs' monitor \
+ */ \
+static void da_monitor_reset_all_##name(void) \
+{ \
+ struct da_monitor *da_mon; \
+ int cpu; \
+ for_each_cpu(cpu, cpu_online_mask) { \
+ da_mon = per_cpu_ptr(&da_mon_##name, cpu); \
+ da_monitor_reset_##name(da_mon); \
+ } \
+} \
+ \
+/* \
+ * da_monitor_init_##name - initialize all CPUs' monitor \
+ */ \
+static inline int da_monitor_init_##name(void) \
+{ \
+ da_monitor_reset_all_##name(); \
+ return 0; \
+} \
+ \
+/* \
+ * da_monitor_destroy_##name - destroy the monitor \
+ */ \
+static inline void da_monitor_destroy_##name(void) \
+{ \
+ return; \
+}
+
+/*
+ * Functions to define, init and get a per-task monitor.
+ */
+#define DECLARE_DA_MON_INIT_PER_TASK(name, type) \
+ \
+/* \
+ * The per-task monitor is stored a vector in the task struct. This variable \
+ * stores the position on the vector reserved for this monitor. \
+ */ \
+static int task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \
+ \
+/* \
+ * da_get_monitor_##name - return the monitor in the allocated slot for tsk \
+ */ \
+static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk) \
+{ \
+ return &tsk->rv[task_mon_slot_##name].da_mon; \
+} \
+ \
+static void da_monitor_reset_all_##name(void) \
+{ \
+ struct task_struct *g, *p; \
+ \
+ read_lock(&tasklist_lock); \
+ for_each_process_thread(g, p) \
+ da_monitor_reset_##name(da_get_monitor_##name(p)); \
+ read_unlock(&tasklist_lock); \
+} \
+ \
+/* \
+ * da_monitor_init_##name - initialize the per-task monitor \
+ * \
+ * Try to allocate a slot in the task's vector of monitors. If there \
+ * is an available slot, use it and reset all task's monitor. \
+ */ \
+static int da_monitor_init_##name(void) \
+{ \
+ int slot; \
+ \
+ slot = rv_get_task_monitor_slot(); \
+ if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT) \
+ return slot; \
+ \
+ task_mon_slot_##name = slot; \
+ \
+ da_monitor_reset_all_##name(); \
+ return 0; \
+} \
+ \
+/* \
+ * da_monitor_destroy_##name - return the allocated slot \
+ */ \
+static inline void da_monitor_destroy_##name(void) \
+{ \
+ if (task_mon_slot_##name == RV_PER_TASK_MONITOR_INIT) { \
+ WARN_ONCE(1, "Disabling a disabled monitor: " #name); \
+ return; \
+ } \
+ rv_put_task_monitor_slot(task_mon_slot_##name); \
+ task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \
+ return; \
+}
+
+/*
+ * Handle event for implicit monitor: da_get_monitor_##name() will figure out
+ * the monitor.
+ */
+#define DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) \
+ \
+static inline void __da_handle_event_##name(struct da_monitor *da_mon, \
+ enum events_##name event) \
+{ \
+ bool retval; \
+ \
+ retval = da_event_##name(da_mon, event); \
+ if (!retval) \
+ da_monitor_reset_##name(da_mon); \
+} \
+ \
+/* \
+ * da_handle_event_##name - handle an event \
+ */ \
+static inline void da_handle_event_##name(enum events_##name event) \
+{ \
+ struct da_monitor *da_mon = da_get_monitor_##name(); \
+ bool retval; \
+ \
+ retval = da_monitor_handling_event_##name(da_mon); \
+ if (!retval) \
+ return; \
+ \
+ __da_handle_event_##name(da_mon, event); \
+} \
+ \
+/* \
+ * da_handle_start_event_##name - start monitoring or handle event \
+ * \
+ * This function is used to notify the monitor that the system is returning \
+ * to the initial state, so the monitor can start monitoring in the next event. \
+ * Thus: \
+ * \
+ * If the monitor already started, handle the event. \
+ * If the monitor did not start yet, start the monitor but skip the event. \
+ */ \
+static inline bool da_handle_start_event_##name(enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) { \
+ da_monitor_start_##name(da_mon); \
+ return 0; \
+ } \
+ \
+ __da_handle_event_##name(da_mon, event); \
+ \
+ return 1; \
+} \
+ \
+/* \
+ * da_handle_start_run_event_##name - start monitoring and handle event \
+ * \
+ * This function is used to notify the monitor that the system is in the \
+ * initial state, so the monitor can start monitoring and handling event. \
+ */ \
+static inline bool da_handle_start_run_event_##name(enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) \
+ da_monitor_start_##name(da_mon); \
+ \
+ __da_handle_event_##name(da_mon, event); \
+ \
+ return 1; \
+}
+
+/*
+ * Handle event for per task.
+ */
+#define DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) \
+ \
+static inline void \
+__da_handle_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
+ enum events_##name event) \
+{ \
+ bool retval; \
+ \
+ retval = da_event_##name(da_mon, tsk, event); \
+ if (!retval) \
+ da_monitor_reset_##name(da_mon); \
+} \
+ \
+/* \
+ * da_handle_event_##name - handle an event \
+ */ \
+static inline void \
+da_handle_event_##name(struct task_struct *tsk, enum events_##name event) \
+{ \
+ struct da_monitor *da_mon = da_get_monitor_##name(tsk); \
+ bool retval; \
+ \
+ retval = da_monitor_handling_event_##name(da_mon); \
+ if (!retval) \
+ return; \
+ \
+ __da_handle_event_##name(da_mon, tsk, event); \
+} \
+ \
+/* \
+ * da_handle_start_event_##name - start monitoring or handle event \
+ * \
+ * This function is used to notify the monitor that the system is returning \
+ * to the initial state, so the monitor can start monitoring in the next event. \
+ * Thus: \
+ * \
+ * If the monitor already started, handle the event. \
+ * If the monitor did not start yet, start the monitor but skip the event. \
+ */ \
+static inline bool \
+da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(tsk); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) { \
+ da_monitor_start_##name(da_mon); \
+ return 0; \
+ } \
+ \
+ __da_handle_event_##name(da_mon, tsk, event); \
+ \
+ return 1; \
+}
+
+/*
+ * Entry point for the global monitor.
+ */
+#define DECLARE_DA_MON_GLOBAL(name, type) \
+ \
+DECLARE_AUTOMATA_HELPERS(name, type) \
+DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
+DECLARE_DA_MON_INIT_GLOBAL(name, type) \
+DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type)
+
+/*
+ * Entry point for the per-cpu monitor.
+ */
+#define DECLARE_DA_MON_PER_CPU(name, type) \
+ \
+DECLARE_AUTOMATA_HELPERS(name, type) \
+DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
+DECLARE_DA_MON_INIT_PER_CPU(name, type) \
+DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type)
+
+/*
+ * Entry point for the per-task monitor.
+ */
+#define DECLARE_DA_MON_PER_TASK(name, type) \
+ \
+DECLARE_AUTOMATA_HELPERS(name, type) \
+DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
+DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
+DECLARE_DA_MON_INIT_PER_TASK(name, type) \
+DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type)
diff --git a/include/rv/instrumentation.h b/include/rv/instrumentation.h
new file mode 100644
index 000000000000..d4e7a02ede1a
--- /dev/null
+++ b/include/rv/instrumentation.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Helper functions to facilitate the instrumentation of auto-generated
+ * RV monitors create by dot2k.
+ *
+ * The dot2k tool is available at tools/verification/dot2/
+ */
+
+#include <linux/ftrace.h>
+
+/*
+ * rv_attach_trace_probe - check and attach a handler function to a tracepoint
+ */
+#define rv_attach_trace_probe(monitor, tp, rv_handler) \
+ do { \
+ check_trace_callback_type_##tp(rv_handler); \
+ WARN_ONCE(register_trace_##tp(rv_handler, NULL), \
+ "fail attaching " #monitor " " #tp "handler"); \
+ } while (0)
+
+/*
+ * rv_detach_trace_probe - detach a handler function to a tracepoint
+ */
+#define rv_detach_trace_probe(monitor, tp, rv_handler) \
+ do { \
+ unregister_trace_##tp(rv_handler, NULL); \
+ } while (0)
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index c0703cd20a99..654cc3918c94 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -135,9 +135,6 @@ struct iscsi_task {
void *dd_data; /* driver/transport data */
};
-/* invalid scsi_task pointer */
-#define INVALID_SCSI_TASK (struct iscsi_task *)-1l
-
static inline int iscsi_task_has_unsol_data(struct iscsi_task *task)
{
return task->unsol_r2t.data_length > task->unsol_r2t.sent;
@@ -213,6 +210,8 @@ struct iscsi_conn {
struct list_head cmdqueue; /* data-path cmd queue */
struct list_head requeue; /* tasks needing another run */
struct work_struct xmitwork; /* per-conn. xmit workqueue */
+ /* recv */
+ struct work_struct recvwork;
unsigned long flags; /* ISCSI_CONN_FLAGs */
/* negotiated params */
@@ -411,7 +410,7 @@ extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
int dd_data_size,
bool xmit_can_sleep);
-extern void iscsi_host_remove(struct Scsi_Host *shost);
+extern void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown);
extern void iscsi_host_free(struct Scsi_Host *shost);
extern int iscsi_target_alloc(struct scsi_target *starget);
extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
@@ -452,8 +451,10 @@ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
extern int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
enum iscsi_param param, char *buf);
extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+extern void iscsi_suspend_rx(struct iscsi_conn *conn);
extern void iscsi_suspend_queue(struct iscsi_conn *conn);
-extern void iscsi_conn_queue_work(struct iscsi_conn *conn);
+extern void iscsi_conn_queue_xmit(struct iscsi_conn *conn);
+extern void iscsi_conn_queue_recv(struct iscsi_conn *conn);
#define iscsi_conn_printk(prefix, _c, fmt, a...) \
iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
@@ -478,7 +479,7 @@ extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t);
extern void iscsi_requeue_task(struct iscsi_task *task);
extern void iscsi_put_task(struct iscsi_task *task);
extern void __iscsi_put_task(struct iscsi_task *task);
-extern void __iscsi_get_task(struct iscsi_task *task);
+extern bool iscsi_get_task(struct iscsi_task *task);
extern void iscsi_complete_scsi_task(struct iscsi_task *task,
uint32_t exp_cmdsn, uint32_t max_cmdsn);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ff04eb6d250b..2dbead74a2af 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -145,7 +145,7 @@ struct sata_device {
struct ata_port *ap;
struct ata_host *ata_host;
- struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
+ struct smp_rps_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
u8 fis[ATA_RESP_FIS_SIZE];
};
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index acfc69fd72d0..71b749bed3b0 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -471,18 +471,6 @@ struct report_phy_sata_resp {
__be32 crc;
} __attribute__ ((packed));
-struct smp_resp {
- u8 frame_type;
- u8 function;
- u8 result;
- u8 reserved;
- union {
- struct report_general_resp rg;
- struct discover_resp disc;
- struct report_phy_sata_resp rps;
- };
-} __attribute__ ((packed));
-
#elif defined(__BIG_ENDIAN_BITFIELD)
struct sas_identify_frame {
/* Byte 0 */
@@ -704,20 +692,32 @@ struct report_phy_sata_resp {
__be32 crc;
} __attribute__ ((packed));
-struct smp_resp {
+#else
+#error "Bitfield order not defined!"
+#endif
+
+struct smp_rg_resp {
u8 frame_type;
u8 function;
u8 result;
u8 reserved;
- union {
- struct report_general_resp rg;
- struct discover_resp disc;
- struct report_phy_sata_resp rps;
- };
+ struct report_general_resp rg;
} __attribute__ ((packed));
-#else
-#error "Bitfield order not defined!"
-#endif
+struct smp_disc_resp {
+ u8 frame_type;
+ u8 function;
+ u8 result;
+ u8 reserved;
+ struct discover_resp disc;
+} __attribute__ ((packed));
+
+struct smp_rps_resp {
+ u8 frame_type;
+ u8 function;
+ u8 result;
+ u8 reserved;
+ struct report_phy_sata_resp rps;
+} __attribute__ ((packed));
#endif /* _SAS_H_ */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 2493bd65351a..3113471ca375 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -309,6 +309,8 @@ struct scsi_target {
struct list_head devices;
struct device dev;
struct kref reap_ref; /* last put renders target invisible */
+ atomic_t sdev_count;
+ wait_queue_head_t sdev_wq;
unsigned int channel;
unsigned int id; /* target id ... replace
* scsi_device.id eventually */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 65082ecdd557..aa7b7496c93a 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -607,6 +607,7 @@ struct Scsi_Host {
short unsigned int sg_tablesize;
short unsigned int sg_prot_tablesize;
unsigned int max_sectors;
+ unsigned int opt_sectors;
unsigned int max_segment_size;
unsigned long dma_boundary;
unsigned long virt_boundary_mask;
@@ -689,6 +690,9 @@ struct Scsi_Host {
/* ldm bits */
struct device shost_gendev, shost_dev;
+ atomic_t target_count;
+ wait_queue_head_t targets_wq;
+
/*
* Points to the transport data (if any) which is allocated
* separately
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 9acb8422f680..cab52b0f11d0 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -162,7 +162,7 @@ struct iscsi_transport {
* transport registration upcalls
*/
extern struct scsi_transport_template *iscsi_register_transport(struct iscsi_transport *tt);
-extern int iscsi_unregister_transport(struct iscsi_transport *tt);
+extern void iscsi_unregister_transport(struct iscsi_transport *tt);
/*
* control plane upcalls
@@ -442,6 +442,7 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
struct iscsi_transport *t,
int dd_size,
unsigned int target_id);
+extern void iscsi_force_destroy_session(struct iscsi_cls_session *session);
extern void iscsi_remove_session(struct iscsi_cls_session *session);
extern void iscsi_free_session(struct iscsi_cls_session *session);
extern struct iscsi_cls_conn *iscsi_alloc_conn(struct iscsi_cls_session *sess,
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index ac151ecc7f19..2edea901bbd5 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -105,11 +105,6 @@
#define REG_RESERVED_ADDR 0xffffffff
#define REG_RESERVED(reg) REG(reg, REG_RESERVED_ADDR)
-#define for_each_stat(ocelot, stat) \
- for ((stat) = (ocelot)->stats_layout; \
- ((stat)->name[0] != '\0'); \
- (stat)++)
-
enum ocelot_target {
ANA = 1,
QS,
@@ -335,13 +330,38 @@ enum ocelot_reg {
SYS_COUNT_RX_64,
SYS_COUNT_RX_65_127,
SYS_COUNT_RX_128_255,
- SYS_COUNT_RX_256_1023,
+ SYS_COUNT_RX_256_511,
+ SYS_COUNT_RX_512_1023,
SYS_COUNT_RX_1024_1526,
SYS_COUNT_RX_1527_MAX,
SYS_COUNT_RX_PAUSE,
SYS_COUNT_RX_CONTROL,
SYS_COUNT_RX_LONGS,
SYS_COUNT_RX_CLASSIFIED_DROPS,
+ SYS_COUNT_RX_RED_PRIO_0,
+ SYS_COUNT_RX_RED_PRIO_1,
+ SYS_COUNT_RX_RED_PRIO_2,
+ SYS_COUNT_RX_RED_PRIO_3,
+ SYS_COUNT_RX_RED_PRIO_4,
+ SYS_COUNT_RX_RED_PRIO_5,
+ SYS_COUNT_RX_RED_PRIO_6,
+ SYS_COUNT_RX_RED_PRIO_7,
+ SYS_COUNT_RX_YELLOW_PRIO_0,
+ SYS_COUNT_RX_YELLOW_PRIO_1,
+ SYS_COUNT_RX_YELLOW_PRIO_2,
+ SYS_COUNT_RX_YELLOW_PRIO_3,
+ SYS_COUNT_RX_YELLOW_PRIO_4,
+ SYS_COUNT_RX_YELLOW_PRIO_5,
+ SYS_COUNT_RX_YELLOW_PRIO_6,
+ SYS_COUNT_RX_YELLOW_PRIO_7,
+ SYS_COUNT_RX_GREEN_PRIO_0,
+ SYS_COUNT_RX_GREEN_PRIO_1,
+ SYS_COUNT_RX_GREEN_PRIO_2,
+ SYS_COUNT_RX_GREEN_PRIO_3,
+ SYS_COUNT_RX_GREEN_PRIO_4,
+ SYS_COUNT_RX_GREEN_PRIO_5,
+ SYS_COUNT_RX_GREEN_PRIO_6,
+ SYS_COUNT_RX_GREEN_PRIO_7,
SYS_COUNT_TX_OCTETS,
SYS_COUNT_TX_UNICAST,
SYS_COUNT_TX_MULTICAST,
@@ -351,11 +371,46 @@ enum ocelot_reg {
SYS_COUNT_TX_PAUSE,
SYS_COUNT_TX_64,
SYS_COUNT_TX_65_127,
- SYS_COUNT_TX_128_511,
+ SYS_COUNT_TX_128_255,
+ SYS_COUNT_TX_256_511,
SYS_COUNT_TX_512_1023,
SYS_COUNT_TX_1024_1526,
SYS_COUNT_TX_1527_MAX,
+ SYS_COUNT_TX_YELLOW_PRIO_0,
+ SYS_COUNT_TX_YELLOW_PRIO_1,
+ SYS_COUNT_TX_YELLOW_PRIO_2,
+ SYS_COUNT_TX_YELLOW_PRIO_3,
+ SYS_COUNT_TX_YELLOW_PRIO_4,
+ SYS_COUNT_TX_YELLOW_PRIO_5,
+ SYS_COUNT_TX_YELLOW_PRIO_6,
+ SYS_COUNT_TX_YELLOW_PRIO_7,
+ SYS_COUNT_TX_GREEN_PRIO_0,
+ SYS_COUNT_TX_GREEN_PRIO_1,
+ SYS_COUNT_TX_GREEN_PRIO_2,
+ SYS_COUNT_TX_GREEN_PRIO_3,
+ SYS_COUNT_TX_GREEN_PRIO_4,
+ SYS_COUNT_TX_GREEN_PRIO_5,
+ SYS_COUNT_TX_GREEN_PRIO_6,
+ SYS_COUNT_TX_GREEN_PRIO_7,
SYS_COUNT_TX_AGING,
+ SYS_COUNT_DROP_LOCAL,
+ SYS_COUNT_DROP_TAIL,
+ SYS_COUNT_DROP_YELLOW_PRIO_0,
+ SYS_COUNT_DROP_YELLOW_PRIO_1,
+ SYS_COUNT_DROP_YELLOW_PRIO_2,
+ SYS_COUNT_DROP_YELLOW_PRIO_3,
+ SYS_COUNT_DROP_YELLOW_PRIO_4,
+ SYS_COUNT_DROP_YELLOW_PRIO_5,
+ SYS_COUNT_DROP_YELLOW_PRIO_6,
+ SYS_COUNT_DROP_YELLOW_PRIO_7,
+ SYS_COUNT_DROP_GREEN_PRIO_0,
+ SYS_COUNT_DROP_GREEN_PRIO_1,
+ SYS_COUNT_DROP_GREEN_PRIO_2,
+ SYS_COUNT_DROP_GREEN_PRIO_3,
+ SYS_COUNT_DROP_GREEN_PRIO_4,
+ SYS_COUNT_DROP_GREEN_PRIO_5,
+ SYS_COUNT_DROP_GREEN_PRIO_6,
+ SYS_COUNT_DROP_GREEN_PRIO_7,
SYS_RESET_CFG,
SYS_SR_ETYPE_CFG,
SYS_VLAN_ETYPE_CFG,
@@ -538,16 +593,111 @@ enum ocelot_ptp_pins {
TOD_ACC_PIN
};
+enum ocelot_stat {
+ OCELOT_STAT_RX_OCTETS,
+ OCELOT_STAT_RX_UNICAST,
+ OCELOT_STAT_RX_MULTICAST,
+ OCELOT_STAT_RX_BROADCAST,
+ OCELOT_STAT_RX_SHORTS,
+ OCELOT_STAT_RX_FRAGMENTS,
+ OCELOT_STAT_RX_JABBERS,
+ OCELOT_STAT_RX_CRC_ALIGN_ERRS,
+ OCELOT_STAT_RX_SYM_ERRS,
+ OCELOT_STAT_RX_64,
+ OCELOT_STAT_RX_65_127,
+ OCELOT_STAT_RX_128_255,
+ OCELOT_STAT_RX_256_511,
+ OCELOT_STAT_RX_512_1023,
+ OCELOT_STAT_RX_1024_1526,
+ OCELOT_STAT_RX_1527_MAX,
+ OCELOT_STAT_RX_PAUSE,
+ OCELOT_STAT_RX_CONTROL,
+ OCELOT_STAT_RX_LONGS,
+ OCELOT_STAT_RX_CLASSIFIED_DROPS,
+ OCELOT_STAT_RX_RED_PRIO_0,
+ OCELOT_STAT_RX_RED_PRIO_1,
+ OCELOT_STAT_RX_RED_PRIO_2,
+ OCELOT_STAT_RX_RED_PRIO_3,
+ OCELOT_STAT_RX_RED_PRIO_4,
+ OCELOT_STAT_RX_RED_PRIO_5,
+ OCELOT_STAT_RX_RED_PRIO_6,
+ OCELOT_STAT_RX_RED_PRIO_7,
+ OCELOT_STAT_RX_YELLOW_PRIO_0,
+ OCELOT_STAT_RX_YELLOW_PRIO_1,
+ OCELOT_STAT_RX_YELLOW_PRIO_2,
+ OCELOT_STAT_RX_YELLOW_PRIO_3,
+ OCELOT_STAT_RX_YELLOW_PRIO_4,
+ OCELOT_STAT_RX_YELLOW_PRIO_5,
+ OCELOT_STAT_RX_YELLOW_PRIO_6,
+ OCELOT_STAT_RX_YELLOW_PRIO_7,
+ OCELOT_STAT_RX_GREEN_PRIO_0,
+ OCELOT_STAT_RX_GREEN_PRIO_1,
+ OCELOT_STAT_RX_GREEN_PRIO_2,
+ OCELOT_STAT_RX_GREEN_PRIO_3,
+ OCELOT_STAT_RX_GREEN_PRIO_4,
+ OCELOT_STAT_RX_GREEN_PRIO_5,
+ OCELOT_STAT_RX_GREEN_PRIO_6,
+ OCELOT_STAT_RX_GREEN_PRIO_7,
+ OCELOT_STAT_TX_OCTETS,
+ OCELOT_STAT_TX_UNICAST,
+ OCELOT_STAT_TX_MULTICAST,
+ OCELOT_STAT_TX_BROADCAST,
+ OCELOT_STAT_TX_COLLISION,
+ OCELOT_STAT_TX_DROPS,
+ OCELOT_STAT_TX_PAUSE,
+ OCELOT_STAT_TX_64,
+ OCELOT_STAT_TX_65_127,
+ OCELOT_STAT_TX_128_255,
+ OCELOT_STAT_TX_256_511,
+ OCELOT_STAT_TX_512_1023,
+ OCELOT_STAT_TX_1024_1526,
+ OCELOT_STAT_TX_1527_MAX,
+ OCELOT_STAT_TX_YELLOW_PRIO_0,
+ OCELOT_STAT_TX_YELLOW_PRIO_1,
+ OCELOT_STAT_TX_YELLOW_PRIO_2,
+ OCELOT_STAT_TX_YELLOW_PRIO_3,
+ OCELOT_STAT_TX_YELLOW_PRIO_4,
+ OCELOT_STAT_TX_YELLOW_PRIO_5,
+ OCELOT_STAT_TX_YELLOW_PRIO_6,
+ OCELOT_STAT_TX_YELLOW_PRIO_7,
+ OCELOT_STAT_TX_GREEN_PRIO_0,
+ OCELOT_STAT_TX_GREEN_PRIO_1,
+ OCELOT_STAT_TX_GREEN_PRIO_2,
+ OCELOT_STAT_TX_GREEN_PRIO_3,
+ OCELOT_STAT_TX_GREEN_PRIO_4,
+ OCELOT_STAT_TX_GREEN_PRIO_5,
+ OCELOT_STAT_TX_GREEN_PRIO_6,
+ OCELOT_STAT_TX_GREEN_PRIO_7,
+ OCELOT_STAT_TX_AGED,
+ OCELOT_STAT_DROP_LOCAL,
+ OCELOT_STAT_DROP_TAIL,
+ OCELOT_STAT_DROP_YELLOW_PRIO_0,
+ OCELOT_STAT_DROP_YELLOW_PRIO_1,
+ OCELOT_STAT_DROP_YELLOW_PRIO_2,
+ OCELOT_STAT_DROP_YELLOW_PRIO_3,
+ OCELOT_STAT_DROP_YELLOW_PRIO_4,
+ OCELOT_STAT_DROP_YELLOW_PRIO_5,
+ OCELOT_STAT_DROP_YELLOW_PRIO_6,
+ OCELOT_STAT_DROP_YELLOW_PRIO_7,
+ OCELOT_STAT_DROP_GREEN_PRIO_0,
+ OCELOT_STAT_DROP_GREEN_PRIO_1,
+ OCELOT_STAT_DROP_GREEN_PRIO_2,
+ OCELOT_STAT_DROP_GREEN_PRIO_3,
+ OCELOT_STAT_DROP_GREEN_PRIO_4,
+ OCELOT_STAT_DROP_GREEN_PRIO_5,
+ OCELOT_STAT_DROP_GREEN_PRIO_6,
+ OCELOT_STAT_DROP_GREEN_PRIO_7,
+ OCELOT_NUM_STATS,
+};
+
struct ocelot_stat_layout {
- u32 offset;
+ u32 reg;
char name[ETH_GSTRING_LEN];
};
-#define OCELOT_STAT_END { .name = "" }
-
struct ocelot_stats_region {
struct list_head node;
- u32 offset;
+ u32 base;
int count;
u32 *buf;
};
@@ -707,7 +857,6 @@ struct ocelot {
const u32 *const *map;
const struct ocelot_stat_layout *stats_layout;
struct list_head stats_regions;
- unsigned int num_stats;
u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
int packet_buffer_size;
@@ -750,7 +899,7 @@ struct ocelot {
struct ocelot_psfp_list psfp;
/* Workqueue to check statistics for overflow with its lock */
- struct mutex stats_lock;
+ spinlock_t stats_lock;
u64 *stats;
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
@@ -786,8 +935,8 @@ struct ocelot_policer {
u32 burst; /* bytes */
};
-#define ocelot_bulk_read_rix(ocelot, reg, ri, buf, count) \
- __ocelot_bulk_read_ix(ocelot, reg, reg##_RSZ * (ri), buf, count)
+#define ocelot_bulk_read(ocelot, reg, buf, count) \
+ __ocelot_bulk_read_ix(ocelot, reg, 0, buf, count)
#define ocelot_read_ix(ocelot, reg, gi, ri) \
__ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
diff --git a/include/sound/control.h b/include/sound/control.h
index 985c51a8fb74..eae443ba79ba 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -23,7 +23,7 @@ typedef int (snd_kcontrol_tlv_rw_t)(struct snd_kcontrol *kcontrol,
unsigned int __user *tlv);
/* internal flag for skipping validations */
-#ifdef CONFIG_SND_CTL_VALIDATION
+#ifdef CONFIG_SND_CTL_DEBUG
#define SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK (1 << 24)
#define snd_ctl_skip_validation(info) \
((info)->access & SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK)
@@ -109,7 +109,7 @@ struct snd_ctl_file {
int preferred_subdevice[SND_CTL_SUBDEV_ITEMS];
wait_queue_head_t change_sleep;
spinlock_t read_lock;
- struct fasync_struct *fasync;
+ struct snd_fasync *fasync;
int subscribed; /* read interface is activated */
struct list_head events; /* waiting events for read */
};
diff --git a/include/sound/core.h b/include/sound/core.h
index 6d4cc49584c6..4365c35d038b 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -14,6 +14,7 @@
#include <linux/pm.h> /* pm_message_t */
#include <linux/stringify.h>
#include <linux/printk.h>
+#include <linux/xarray.h>
/* number of supported soundcards */
#ifdef CONFIG_SND_DYNAMIC_MINORS
@@ -103,6 +104,11 @@ struct snd_card {
size_t user_ctl_alloc_size; // current memory allocation by user controls.
struct list_head controls; /* all controls for this card */
struct list_head ctl_files; /* active control files */
+#ifdef CONFIG_SND_CTL_FAST_LOOKUP
+ struct xarray ctl_numids; /* hash table for numids */
+ struct xarray ctl_hash; /* hash table for ctl id matching */
+ bool ctl_hash_collision; /* ctl_hash collision seen? */
+#endif
struct snd_info_entry *proc_root; /* root for soundcard specific files */
struct proc_dir_entry *proc_root_link; /* number link to real id */
@@ -501,4 +507,12 @@ snd_pci_quirk_lookup_id(u16 vendor, u16 device,
}
#endif
+/* async signal helpers */
+struct snd_fasync;
+
+int snd_fasync_helper(int fd, struct file *file, int on,
+ struct snd_fasync **fasyncp);
+void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll);
+void snd_fasync_free(struct snd_fasync *fasync);
+
#endif /* __SOUND_CORE_H */
diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
index 8972fa697622..9ac5918269a5 100644
--- a/include/sound/cs35l41.h
+++ b/include/sound/cs35l41.h
@@ -665,6 +665,10 @@
#define CS35L41_BST_EN_DEFAULT 0x2
#define CS35L41_AMP_EN_SHIFT 0
#define CS35L41_AMP_EN_MASK 1
+#define CS35L41_VMON_EN_MASK 0x1000
+#define CS35L41_VMON_EN_SHIFT 12
+#define CS35L41_IMON_EN_MASK 0x2000
+#define CS35L41_IMON_EN_SHIFT 13
#define CS35L41_PDN_DONE_MASK 0x00800000
#define CS35L41_PDN_DONE_SHIFT 23
@@ -881,6 +885,9 @@ void cs35l41_configure_cs_dsp(struct device *dev, struct regmap *reg, struct cs_
int cs35l41_set_cspl_mbox_cmd(struct device *dev, struct regmap *regmap,
enum cs35l41_cspl_mbox_cmd cmd);
int cs35l41_write_fs_errata(struct device *dev, struct regmap *regmap);
+int cs35l41_enter_hibernate(struct device *dev, struct regmap *regmap,
+ enum cs35l41_boost_type b_type);
+int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap);
int cs35l41_init_boost(struct device *dev, struct regmap *regmap,
struct cs35l41_hw_cfg *hw_cfg);
bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type);
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 38ea046e653c..2df54cf02cb3 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -15,6 +15,8 @@
* snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM
* substream
* @substream: PCM substream
+ *
+ * Return: DMA transfer direction
*/
static inline enum dma_transfer_direction
snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream)
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index b7be300b6b18..6d3c82c4b6ac 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -231,7 +231,6 @@ struct hda_codec {
/* misc flags */
unsigned int configured:1; /* codec was configured */
unsigned int in_freeing:1; /* being released */
- unsigned int registered:1; /* codec was registered */
unsigned int display_power_control:1; /* needs display power */
unsigned int spdif_status_reset :1; /* needs to toggle SPDIF for each
* status change
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 15f15075238d..797bf67a164d 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -93,6 +93,7 @@ struct hdac_device {
bool lazy_cache:1; /* don't wake up for writes */
bool caps_overwriting:1; /* caps overwrite being in process */
bool cache_coef:1; /* cache COEF read/write too */
+ unsigned int registered:1; /* codec was registered */
};
/* device/driver type used for matching */
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 4fc733c8c570..48ad33aba393 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -32,8 +32,8 @@ struct hdmi_codec_daifmt {
} fmt;
unsigned int bit_clk_inv:1;
unsigned int frame_clk_inv:1;
- unsigned int bit_clk_master:1;
- unsigned int frame_clk_master:1;
+ unsigned int bit_clk_provider:1;
+ unsigned int frame_clk_provider:1;
/* bit_fmt could be standard PCM format or
* IEC958 encoded format. ALSA IEC958 plugin will pass
* IEC958_SUBFRAME format to the underneath driver.
diff --git a/include/sound/madera-pdata.h b/include/sound/madera-pdata.h
index e3060f48f108..58398d80c3de 100644
--- a/include/sound/madera-pdata.h
+++ b/include/sound/madera-pdata.h
@@ -9,7 +9,7 @@
#ifndef MADERA_CODEC_PDATA_H
#define MADERA_CODEC_PDATA_H
-#include <linux/kernel.h>
+#include <linux/types.h>
#define MADERA_MAX_INPUT 6
#define MADERA_MAX_MUXED_CHANNELS 4
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 6b99310b5b88..8c48a5bce88c 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -399,7 +399,7 @@ struct snd_pcm_runtime {
snd_pcm_uframes_t twake; /* do transfer (!poll) wakeup if non-zero */
wait_queue_head_t sleep; /* poll sleep */
wait_queue_head_t tsleep; /* transfer sleep */
- struct fasync_struct *fasync;
+ struct snd_fasync *fasync;
bool stop_operating; /* sync_stop will be called */
struct mutex buffer_mutex; /* protect for buffer changes */
atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */
@@ -607,7 +607,7 @@ snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size)
* snd_pcm_stream_linked - Check whether the substream is linked with others
* @substream: substream to check
*
- * Returns true if the given substream is being linked with others.
+ * Return: true if the given substream is being linked with others
*/
static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
{
@@ -673,7 +673,7 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
* snd_pcm_running - Check whether the substream is in a running state
* @substream: substream to check
*
- * Returns true if the given substream is in the state RUNNING, or in the
+ * Return: true if the given substream is in the state RUNNING, or in the
* state DRAINING for playback.
*/
static inline int snd_pcm_running(struct snd_pcm_substream *substream)
@@ -687,6 +687,8 @@ static inline int snd_pcm_running(struct snd_pcm_substream *substream)
* bytes_to_samples - Unit conversion of the size from bytes to samples
* @runtime: PCM runtime instance
* @size: size in bytes
+ *
+ * Return: the size in samples
*/
static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size)
{
@@ -697,6 +699,8 @@ static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t
* bytes_to_frames - Unit conversion of the size from bytes to frames
* @runtime: PCM runtime instance
* @size: size in bytes
+ *
+ * Return: the size in frames
*/
static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size)
{
@@ -707,6 +711,8 @@ static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime,
* samples_to_bytes - Unit conversion of the size from samples to bytes
* @runtime: PCM runtime instance
* @size: size in samples
+ *
+ * Return: the byte size
*/
static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size)
{
@@ -717,6 +723,8 @@ static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t
* frames_to_bytes - Unit conversion of the size from frames to bytes
* @runtime: PCM runtime instance
* @size: size in frames
+ *
+ * Return: the byte size
*/
static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size)
{
@@ -727,6 +735,8 @@ static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_s
* frame_aligned - Check whether the byte size is aligned to frames
* @runtime: PCM runtime instance
* @bytes: size in bytes
+ *
+ * Return: true if aligned, or false if not
*/
static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes)
{
@@ -736,6 +746,8 @@ static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes)
/**
* snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes
* @substream: PCM substream
+ *
+ * Return: buffer byte size
*/
static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream)
{
@@ -746,6 +758,8 @@ static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substrea
/**
* snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes
* @substream: PCM substream
+ *
+ * Return: period byte size
*/
static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream)
{
@@ -758,6 +772,8 @@ static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substrea
* @runtime: PCM runtime instance
*
* Result is between 0 ... (boundary - 1)
+ *
+ * Return: available frame size
*/
static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime)
{
@@ -774,6 +790,8 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r
* @runtime: PCM runtime instance
*
* Result is between 0 ... (boundary - 1)
+ *
+ * Return: available frame size
*/
static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime)
{
@@ -786,6 +804,8 @@ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *ru
/**
* snd_pcm_playback_hw_avail - Get the queued space for playback
* @runtime: PCM runtime instance
+ *
+ * Return: available frame size
*/
static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime)
{
@@ -795,6 +815,8 @@ static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime
/**
* snd_pcm_capture_hw_avail - Get the free space for capture
* @runtime: PCM runtime instance
+ *
+ * Return: available frame size
*/
static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime)
{
@@ -934,6 +956,8 @@ static inline const struct snd_interval *hw_param_interval_c(const struct snd_pc
/**
* params_channels - Get the number of channels from the hw params
* @p: hw params
+ *
+ * Return: the number of channels
*/
static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
{
@@ -943,6 +967,8 @@ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
/**
* params_rate - Get the sample rate from the hw params
* @p: hw params
+ *
+ * Return: the sample rate
*/
static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
{
@@ -952,6 +978,8 @@ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
/**
* params_period_size - Get the period size (in frames) from the hw params
* @p: hw params
+ *
+ * Return: the period size in frames
*/
static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
{
@@ -961,6 +989,8 @@ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
/**
* params_periods - Get the number of periods from the hw params
* @p: hw params
+ *
+ * Return: the number of periods
*/
static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
{
@@ -970,6 +1000,8 @@ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
/**
* params_buffer_size - Get the buffer size (in frames) from the hw params
* @p: hw params
+ *
+ * Return: the buffer size in frames
*/
static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
{
@@ -979,6 +1011,8 @@ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
/**
* params_buffer_bytes - Get the buffer size (in bytes) from the hw params
* @p: hw params
+ *
+ * Return: the buffer size in bytes
*/
static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
{
@@ -1241,6 +1275,8 @@ int snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
* only the given sized buffer and doesn't allow re-allocation nor dynamic
* allocation of a larger buffer unlike the standard one.
* The function may return -ENOMEM error, hence the caller must check it.
+ *
+ * Return: zero if successful, or a negative error code
*/
static inline int __must_check
snd_pcm_set_fixed_buffer(struct snd_pcm_substream *substream, int type,
@@ -1259,6 +1295,8 @@ snd_pcm_set_fixed_buffer(struct snd_pcm_substream *substream, int type,
* Apply the set up of the fixed buffer via snd_pcm_set_fixed_buffer() for
* all substream. If any of allocation fails, it returns -ENOMEM, hence the
* caller must check the return value.
+ *
+ * Return: zero if successful, or a negative error code
*/
static inline int __must_check
snd_pcm_set_fixed_buffer_all(struct snd_pcm *pcm, int type,
@@ -1315,6 +1353,8 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
* snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset
* @substream: PCM substream
* @ofs: byte offset
+ *
+ * Return: DMA address
*/
static inline dma_addr_t
snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
@@ -1328,6 +1368,8 @@ snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
* @substream: PCM substream
* @ofs: byte offset
* @size: byte size to examine
+ *
+ * Return: chunk size
*/
static inline unsigned int
snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
@@ -1393,6 +1435,20 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
const char *snd_pcm_format_name(snd_pcm_format_t format);
/**
+ * snd_pcm_direction_name - Get a string naming the direction of a stream
+ * @direction: Stream's direction, one of SNDRV_PCM_STREAM_XXX
+ *
+ * Returns a string naming the direction of the stream.
+ */
+static inline const char *snd_pcm_direction_name(int direction)
+{
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ return "Playback";
+ else
+ return "Capture";
+}
+
+/**
* snd_pcm_stream_str - Get a string naming the direction of a stream
* @substream: the pcm substream instance
*
@@ -1400,10 +1456,7 @@ const char *snd_pcm_format_name(snd_pcm_format_t format);
*/
static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream)
{
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- return "Playback";
- else
- return "Capture";
+ return snd_pcm_direction_name(substream->stream);
}
/*
@@ -1430,6 +1483,8 @@ struct snd_pcm_chmap {
* snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info
* @info: chmap information
* @idx: the substream number index
+ *
+ * Return: the matched PCM substream, or NULL if not found
*/
static inline struct snd_pcm_substream *
snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx)
@@ -1460,6 +1515,8 @@ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
/**
* pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise
* @pcm_format: PCM format
+ *
+ * Return: 64bit mask corresponding to the given PCM format
*/
static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
{
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 7a08ed2acd60..e1f59b2940af 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -63,7 +63,6 @@ struct snd_rawmidi_runtime {
size_t xruns; /* over/underruns counter */
int buffer_ref; /* buffer reference count */
/* misc */
- spinlock_t lock;
wait_queue_head_t sleep;
/* event handler (new bytes, input only) */
void (*event)(struct snd_rawmidi_substream *substream);
@@ -85,6 +84,7 @@ struct snd_rawmidi_substream {
unsigned int clock_type; /* clock source to use for input framing */
int use_count; /* use counter (for output) */
size_t bytes;
+ spinlock_t lock;
struct snd_rawmidi *rmidi;
struct snd_rawmidi_str *pstr;
char name[32];
@@ -156,10 +156,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count);
-int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
- unsigned char *buffer, int count);
-int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
- int count);
int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream);
/* main midi functions */
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 8faa649f712b..ab55f40896e0 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -51,7 +51,6 @@ struct prop_nums {
int cpus;
int codecs;
int platforms;
- int c2c;
};
struct asoc_simple_priv {
@@ -64,7 +63,6 @@ struct asoc_simple_priv {
struct snd_soc_dai_link_component *platforms;
struct asoc_simple_data adata;
struct snd_soc_codec_conf *codec_conf;
- struct snd_soc_pcm_stream *c2c_conf;
struct prop_nums num;
unsigned int mclk_fs;
} *dai_props;
@@ -75,7 +73,6 @@ struct asoc_simple_priv {
struct snd_soc_dai_link_component *dlcs;
struct snd_soc_dai_link_component dummy;
struct snd_soc_codec_conf *codec_conf;
- struct snd_soc_pcm_stream *c2c_conf;
struct gpio_desc *pa_gpio;
const struct snd_soc_ops *ops;
unsigned int dpcm_selectable:1;
@@ -173,7 +170,7 @@ void asoc_simple_canonicalize_platform(struct snd_soc_dai_link_component *platfo
void asoc_simple_canonicalize_cpu(struct snd_soc_dai_link_component *cpus,
int is_single_links);
-int asoc_simple_clean_reference(struct snd_soc_card *card);
+void asoc_simple_clean_reference(struct snd_soc_card *card);
void asoc_simple_convert_fixup(struct asoc_simple_data *data,
struct snd_pcm_hw_params *params);
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 59551b1f22f3..bc7fd46ec2bc 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -30,6 +30,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ehl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[];
@@ -37,6 +38,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
index df08573bd80c..9d31a5c0db33 100644
--- a/include/sound/soc-card.h
+++ b/include/sound/soc-card.h
@@ -29,6 +29,7 @@ int snd_soc_card_resume_post(struct snd_soc_card *card);
int snd_soc_card_probe(struct snd_soc_card *card);
int snd_soc_card_late_probe(struct snd_soc_card *card);
+void snd_soc_card_fixup_controls(struct snd_soc_card *card);
int snd_soc_card_remove(struct snd_soc_card *card);
int snd_soc_card_set_bias_level(struct snd_soc_card *card,
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 5a764c3099d3..c26ffb033777 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -179,7 +179,7 @@ struct snd_soc_component_driver {
* analogue).
*/
unsigned int endianness:1;
- unsigned int non_legacy_dai_naming:1;
+ unsigned int legacy_dai_naming:1;
/* this component uses topology and ignore machine driver FEs */
const char *ignore_machine;
@@ -348,11 +348,6 @@ static inline int snd_soc_component_cache_sync(
return regcache_sync(component->regmap);
}
-static inline int snd_soc_component_is_codec(struct snd_soc_component *component)
-{
- return component->driver->non_legacy_dai_naming;
-}
-
void snd_soc_component_set_aux(struct snd_soc_component *component,
struct snd_soc_aux_dev *aux);
int snd_soc_component_init(struct snd_soc_component *component);
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index bbd821d2df9c..ea7509672086 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -124,6 +124,12 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_CBM_CFS SND_SOC_DAIFMT_CBP_CFC
#define SND_SOC_DAIFMT_CBS_CFS SND_SOC_DAIFMT_CBC_CFC
+/* when passed to set_fmt directly indicate if the device is provider or consumer */
+#define SND_SOC_DAIFMT_BP_FP SND_SOC_DAIFMT_CBP_CFP
+#define SND_SOC_DAIFMT_BC_FP SND_SOC_DAIFMT_CBC_CFP
+#define SND_SOC_DAIFMT_BP_FC SND_SOC_DAIFMT_CBP_CFC
+#define SND_SOC_DAIFMT_BC_FC SND_SOC_DAIFMT_CBC_CFC
+
/* Describes the possible PCM format */
#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT 48
#define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT)
diff --git a/include/sound/soc.h b/include/sound/soc.h
index b276dcb5d4e8..aad24a1d3276 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -136,6 +136,18 @@
.put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \
max, invert, 0) }
+#define SOC_DOUBLE_SX_TLV(xname, xreg, shift_left, shift_right, xmin, xmax, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw_sx, \
+ .get = snd_soc_get_volsw_sx, \
+ .put = snd_soc_put_volsw_sx, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .rreg = xreg, \
+ .shift = shift_left, .rshift = shift_right, \
+ .max = xmax, .min = xmin} }
#define SOC_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -414,7 +426,7 @@ enum snd_soc_pcm_subclass {
};
int snd_soc_register_card(struct snd_soc_card *card);
-int snd_soc_unregister_card(struct snd_soc_card *card);
+void snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
#ifdef CONFIG_PM_SLEEP
int snd_soc_suspend(struct device *dev);
@@ -914,6 +926,7 @@ struct snd_soc_card {
int (*probe)(struct snd_soc_card *card);
int (*late_probe)(struct snd_soc_card *card);
+ void (*fixup_controls)(struct snd_soc_card *card);
int (*remove)(struct snd_soc_card *card);
/* the pre and post PM functions are used to do any PM work before and
diff --git a/include/sound/sof.h b/include/sound/sof.h
index 1a82a0db5e7f..367dccfea7ad 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -138,6 +138,7 @@ struct sof_dev_desc {
struct snd_sof_dsp_ops *ops;
int (*ops_init)(struct snd_sof_dev *sdev);
+ void (*ops_free)(struct snd_sof_dev *sdev);
};
int sof_dai_get_mclk(struct snd_soc_pcm_runtime *rtd);
diff --git a/include/sound/sof/dai-amd.h b/include/sound/sof/dai-amd.h
index 90d09dbdd709..92f45c180b7c 100644
--- a/include/sound/sof/dai-amd.h
+++ b/include/sound/sof/dai-amd.h
@@ -18,4 +18,11 @@ struct sof_ipc_dai_acp_params {
uint32_t fsync_rate; /* FSYNC frequency in Hz */
uint32_t tdm_slots;
} __packed;
+
+/* ACPDMIC Configuration Request - SOF_IPC_DAI_AMD_CONFIG */
+struct sof_ipc_dai_acpdmic_params {
+ uint32_t pdm_rate;
+ uint32_t pdm_ch;
+} __packed;
+
#endif
diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h
index 7a266f41983c..5b93b7292f5e 100644
--- a/include/sound/sof/dai-intel.h
+++ b/include/sound/sof/dai-intel.h
@@ -52,6 +52,8 @@
#define SOF_DAI_INTEL_SSP_CLKCTRL_MCLK_ES BIT(6)
/* bclk early start */
#define SOF_DAI_INTEL_SSP_CLKCTRL_BCLK_ES BIT(7)
+/* mclk always on */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_MCLK_AON BIT(8)
/* DMIC max. four controllers for eight microphone channels */
#define SOF_DAI_INTEL_DMIC_NUM_CTRL 4
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index a818a0f0a226..21d98f31a9ca 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -111,7 +111,7 @@ struct sof_ipc_dai_config {
struct sof_ipc_dai_sai_params sai;
struct sof_ipc_dai_acp_params acpbt;
struct sof_ipc_dai_acp_params acpsp;
- struct sof_ipc_dai_acp_params acpdmic;
+ struct sof_ipc_dai_acpdmic_params acpdmic;
struct sof_ipc_dai_mtk_afe_params afe;
};
} __packed;
diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h
index b8b8e5b5e3e1..a795deacc2ea 100644
--- a/include/sound/sof/ipc4/header.h
+++ b/include/sound/sof/ipc4/header.h
@@ -385,6 +385,14 @@ struct sof_ipc4_fw_version {
uint16_t build;
} __packed;
+/* Payload data for SOF_IPC4_MOD_SET_DX */
+struct sof_ipc4_dx_state_info {
+ /* core(s) to apply the change */
+ uint32_t core_mask;
+ /* core state: 0: put core_id to D3; 1: put core_id to D0 */
+ uint32_t dx_mask;
+} __packed __aligned(4);
+
/* Reply messages */
/*
diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h
index 1db3bbc3e65d..9377113f13e4 100644
--- a/include/sound/sof/stream.h
+++ b/include/sound/sof/stream.h
@@ -86,9 +86,11 @@ struct sof_ipc_stream_params {
uint32_t host_period_bytes;
uint16_t no_stream_position; /**< 1 means don't send stream position */
uint8_t cont_update_posn; /**< 1 means continuous update stream position */
-
- uint8_t reserved[5];
+ uint8_t reserved0;
+ int16_t ext_data_length; /**< 0, means no extended data */
+ uint8_t reserved[2];
uint16_t chmap[SOF_IPC_MAX_CHANNELS]; /**< channel map - SOF_CHMAP_ */
+ uint8_t ext_data[]; /**< extended data */
} __packed;
/* PCM params info - SOF_IPC_STREAM_PCM_PARAMS */
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 8e68ace428d9..94d06ddfd80a 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -26,6 +26,7 @@ struct sock;
#define ISCSI_RX_THREAD_NAME "iscsi_trx"
#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
#define ISCSI_IQN_LEN 224
+#define NA_AUTHENTICATION_INHERITED -1
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
@@ -715,6 +716,7 @@ struct iscsi_login {
} ____cacheline_aligned;
struct iscsi_node_attrib {
+ s32 authentication;
u32 dataout_timeout;
u32 dataout_timeout_retries;
u32 default_erl;
@@ -758,6 +760,12 @@ struct iscsi_node_acl {
struct iscsi_node_stat_grps node_stat_grps;
};
+static inline struct iscsi_node_acl *
+to_iscsi_nacl(struct se_node_acl *se_nacl)
+{
+ return container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+}
+
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
@@ -839,6 +847,12 @@ struct iscsi_portal_group {
struct list_head tpg_list;
} ____cacheline_aligned;
+static inline struct iscsi_portal_group *
+to_iscsi_tpg(struct se_portal_group *se_tpg)
+{
+ return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+}
+
struct iscsi_wwn_stat_grps {
struct config_group iscsi_stat_group;
struct config_group iscsi_instance_group;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 773963a1e0b5..a3c193df25b3 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -37,6 +37,7 @@ struct target_backend_ops {
struct se_dev_plug *(*plug_device)(struct se_device *se_dev);
void (*unplug_device)(struct se_dev_plug *se_plug);
+ bool (*configure_unmap)(struct se_device *se_dev);
ssize_t (*set_configfs_dev_params)(struct se_device *,
const char *, ssize_t);
ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c2b36f7d917d..8c920456edd9 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -665,9 +665,9 @@ struct se_dev_entry {
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
struct kref pr_kref;
struct completion pr_comp;
- struct se_lun_acl __rcu *se_lun_acl;
+ struct se_lun_acl *se_lun_acl;
spinlock_t ua_lock;
- struct se_lun __rcu *se_lun;
+ struct se_lun *se_lun;
#define DEF_PR_REG_ACTIVE 1
unsigned long deve_flags;
struct list_head alua_port_list;
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
index 78c5608a1648..4dfa6d7f83ba 100644
--- a/include/trace/events/9p.h
+++ b/include/trace/events/9p.h
@@ -77,6 +77,13 @@
EM( P9_TWSTAT, "P9_TWSTAT" ) \
EMe(P9_RWSTAT, "P9_RWSTAT" )
+
+#define P9_FID_REFTYPE \
+ EM( P9_FID_REF_CREATE, "create " ) \
+ EM( P9_FID_REF_GET, "get " ) \
+ EM( P9_FID_REF_PUT, "put " ) \
+ EMe(P9_FID_REF_DESTROY, "destroy" )
+
/* Define EM() to export the enums to userspace via TRACE_DEFINE_ENUM() */
#undef EM
#undef EMe
@@ -84,6 +91,21 @@
#define EMe(a, b) TRACE_DEFINE_ENUM(a);
P9_MSG_T
+P9_FID_REFTYPE
+
+/* And also use EM/EMe to define helper enums -- once */
+#ifndef __9P_DECLARE_TRACE_ENUMS_ONLY_ONCE
+#define __9P_DECLARE_TRACE_ENUMS_ONLY_ONCE
+#undef EM
+#undef EMe
+#define EM(a, b) a,
+#define EMe(a, b) a
+
+enum p9_fid_reftype {
+ P9_FID_REFTYPE
+} __mode(byte);
+
+#endif
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
@@ -96,6 +118,8 @@ P9_MSG_T
#define show_9p_op(type) \
__print_symbolic(type, P9_MSG_T)
+#define show_9p_fid_reftype(type) \
+ __print_symbolic(type, P9_FID_REFTYPE)
TRACE_EVENT(9p_client_req,
TP_PROTO(struct p9_client *clnt, int8_t type, int tag),
@@ -168,6 +192,30 @@ TRACE_EVENT(9p_protocol_dump,
__entry->tag, 0, __entry->line, 16, __entry->line + 16)
);
+
+TRACE_EVENT(9p_fid_ref,
+ TP_PROTO(struct p9_fid *fid, __u8 type),
+
+ TP_ARGS(fid, type),
+
+ TP_STRUCT__entry(
+ __field( int, fid )
+ __field( int, refcount )
+ __field( __u8, type )
+ ),
+
+ TP_fast_assign(
+ __entry->fid = fid->fid;
+ __entry->refcount = refcount_read(&fid->count);
+ __entry->type = type;
+ ),
+
+ TP_printk("%s fid %d, refcount %d",
+ show_9p_fid_reftype(__entry->type),
+ __entry->fid, __entry->refcount)
+);
+
+
#endif /* _TRACE_9P_H */
/* This part must be outside protection */
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 499f5fabd20f..e9d412d19dbb 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -727,31 +727,31 @@ TRACE_EVENT(afs_cb_call,
);
TRACE_EVENT(afs_call,
- TP_PROTO(struct afs_call *call, enum afs_call_trace op,
- int usage, int outstanding, const void *where),
+ TP_PROTO(unsigned int call_debug_id, enum afs_call_trace op,
+ int ref, int outstanding, const void *where),
- TP_ARGS(call, op, usage, outstanding, where),
+ TP_ARGS(call_debug_id, op, ref, outstanding, where),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(int, op )
- __field(int, usage )
+ __field(int, ref )
__field(int, outstanding )
__field(const void *, where )
),
TP_fast_assign(
- __entry->call = call->debug_id;
+ __entry->call = call_debug_id;
__entry->op = op;
- __entry->usage = usage;
+ __entry->ref = ref;
__entry->outstanding = outstanding;
__entry->where = where;
),
- TP_printk("c=%08x %s u=%d o=%d sp=%pSR",
+ TP_printk("c=%08x %s r=%d o=%d sp=%pSR",
__entry->call,
__print_symbolic(__entry->op, afs_call_traces),
- __entry->usage,
+ __entry->ref,
__entry->outstanding,
__entry->where)
);
@@ -1433,10 +1433,10 @@ TRACE_EVENT(afs_cb_miss,
);
TRACE_EVENT(afs_server,
- TP_PROTO(struct afs_server *server, int ref, int active,
+ TP_PROTO(unsigned int server_debug_id, int ref, int active,
enum afs_server_trace reason),
- TP_ARGS(server, ref, active, reason),
+ TP_ARGS(server_debug_id, ref, active, reason),
TP_STRUCT__entry(
__field(unsigned int, server )
@@ -1446,7 +1446,7 @@ TRACE_EVENT(afs_server,
),
TP_fast_assign(
- __entry->server = server->debug_id;
+ __entry->server = server_debug_id;
__entry->ref = ref;
__entry->active = active;
__entry->reason = reason;
@@ -1476,36 +1476,36 @@ TRACE_EVENT(afs_volume,
__entry->reason = reason;
),
- TP_printk("V=%llx %s u=%d",
+ TP_printk("V=%llx %s ur=%d",
__entry->vid,
__print_symbolic(__entry->reason, afs_volume_traces),
__entry->ref)
);
TRACE_EVENT(afs_cell,
- TP_PROTO(unsigned int cell_debug_id, int usage, int active,
+ TP_PROTO(unsigned int cell_debug_id, int ref, int active,
enum afs_cell_trace reason),
- TP_ARGS(cell_debug_id, usage, active, reason),
+ TP_ARGS(cell_debug_id, ref, active, reason),
TP_STRUCT__entry(
__field(unsigned int, cell )
- __field(int, usage )
+ __field(int, ref )
__field(int, active )
__field(int, reason )
),
TP_fast_assign(
__entry->cell = cell_debug_id;
- __entry->usage = usage;
+ __entry->ref = ref;
__entry->active = active;
__entry->reason = reason;
),
- TP_printk("L=%08x %s u=%d a=%d",
+ TP_printk("L=%08x %s r=%d a=%d",
__entry->cell,
__print_symbolic(__entry->reason, afs_cell_traces),
- __entry->usage,
+ __entry->ref,
__entry->active)
);
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 2814f188d98c..24969184c534 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -186,7 +186,7 @@ TRACE_EVENT(devlink_trap_report,
__string(driver_name, devlink_to_dev(devlink)->driver->name)
__string(trap_name, metadata->trap_name)
__string(trap_group_name, metadata->trap_group_name)
- __dynamic_array(char, input_dev_name, IFNAMSIZ)
+ __array(char, input_dev_name, IFNAMSIZ)
),
TP_fast_assign(
@@ -197,15 +197,14 @@ TRACE_EVENT(devlink_trap_report,
__assign_str(driver_name, devlink_to_dev(devlink)->driver->name);
__assign_str(trap_name, metadata->trap_name);
__assign_str(trap_group_name, metadata->trap_group_name);
- __assign_str(input_dev_name,
- (input_dev ? input_dev->name : "NULL"));
+ strscpy(__entry->input_dev_name, input_dev ? input_dev->name : "NULL", IFNAMSIZ);
),
TP_printk("bus_name=%s dev_name=%s driver_name=%s trap_name=%s "
"trap_group_name=%s input_dev_name=%s", __get_str(bus_name),
__get_str(dev_name), __get_str(driver_name),
__get_str(trap_name), __get_str(trap_group_name),
- __get_str(input_dev_name))
+ __entry->input_dev_name)
);
#endif /* _TRACE_DEVLINK_H */
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 6f2a4dc35e37..c2300c407f58 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -32,7 +32,7 @@ TRACE_EVENT(fib_table_lookup,
__array( __u8, gw6, 16 )
__field( u16, sport )
__field( u16, dport )
- __dynamic_array(char, name, IFNAMSIZ )
+ __array(char, name, IFNAMSIZ )
),
TP_fast_assign(
@@ -66,7 +66,7 @@ TRACE_EVENT(fib_table_lookup,
}
dev = nhc ? nhc->nhc_dev : NULL;
- __assign_str(name, dev ? dev->name : "-");
+ strlcpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ);
if (nhc) {
if (nhc->nhc_gw_family == AF_INET) {
@@ -95,7 +95,7 @@ TRACE_EVENT(fib_table_lookup,
__entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
__entry->src, __entry->sport, __entry->dst, __entry->dport,
__entry->tos, __entry->scope, __entry->flags,
- __get_str(name), __entry->gw4, __entry->gw6, __entry->err)
+ __entry->name, __entry->gw4, __entry->gw6, __entry->err)
);
#endif /* _TRACE_FIB_H */
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index c6abdcc77c12..6e821eb79450 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -31,7 +31,7 @@ TRACE_EVENT(fib6_table_lookup,
__field( u16, dport )
__field( u8, proto )
__field( u8, rt_type )
- __dynamic_array( char, name, IFNAMSIZ )
+ __array( char, name, IFNAMSIZ )
__array( __u8, gw, 16 )
),
@@ -63,9 +63,9 @@ TRACE_EVENT(fib6_table_lookup,
}
if (res->nh && res->nh->fib_nh_dev) {
- __assign_str(name, res->nh->fib_nh_dev);
+ strlcpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ);
} else {
- __assign_str(name, "-");
+ strcpy(__entry->name, "-");
}
if (res->f6i == net->ipv6.fib6_null_entry) {
struct in6_addr in6_zero = {};
@@ -83,7 +83,7 @@ TRACE_EVENT(fib6_table_lookup,
__entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
__entry->src, __entry->sport, __entry->dst, __entry->dport,
__entry->tos, __entry->scope, __entry->flags,
- __get_str(name), __entry->gw, __entry->err)
+ __entry->name, __entry->gw, __entry->err)
);
#endif /* _TRACE_FIB6_H */
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
index cb3fb337e880..c078c48a8e6d 100644
--- a/include/trace/events/fscache.h
+++ b/include/trace/events/fscache.h
@@ -49,6 +49,7 @@ enum fscache_volume_trace {
enum fscache_cookie_trace {
fscache_cookie_collision,
fscache_cookie_discard,
+ fscache_cookie_failed,
fscache_cookie_get_attach_object,
fscache_cookie_get_end_access,
fscache_cookie_get_hash_collision,
@@ -131,6 +132,7 @@ enum fscache_access_trace {
#define fscache_cookie_traces \
EM(fscache_cookie_collision, "*COLLIDE*") \
EM(fscache_cookie_discard, "DISCARD ") \
+ EM(fscache_cookie_failed, "FAILED ") \
EM(fscache_cookie_get_attach_object, "GET attch") \
EM(fscache_cookie_get_hash_collision, "GET hcoll") \
EM(fscache_cookie_get_end_access, "GQ endac") \
diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h
index 87408faf6e4e..8ff2a3ca5d75 100644
--- a/include/trace/events/iscsi.h
+++ b/include/trace/events/iscsi.h
@@ -26,12 +26,12 @@ DECLARE_EVENT_CLASS(iscsi_log_msg,
TP_STRUCT__entry(
__string(dname, dev_name(dev) )
- __dynamic_array(char, msg, ISCSI_MSG_MAX )
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(dname, dev_name(dev));
- vsnprintf(__get_str(msg), ISCSI_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s",__get_str(dname), __get_str(msg)
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 37e1e1a2d67d..3bd31ea23fee 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -282,7 +282,7 @@ DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
TP_ARGS(gva, gfn)
);
-DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
TP_PROTO(u64 gva, u64 gfn),
diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
index 62bb17516713..5eaa1fa99171 100644
--- a/include/trace/events/neigh.h
+++ b/include/trace/events/neigh.h
@@ -30,7 +30,7 @@ TRACE_EVENT(neigh_create,
TP_STRUCT__entry(
__field(u32, family)
- __dynamic_array(char, dev, IFNAMSIZ )
+ __string(dev, dev ? dev->name : "NULL")
__field(int, entries)
__field(u8, created)
__field(u8, gc_exempt)
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index c708521e4ed5..77f14f7a11d4 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -40,6 +40,28 @@ DEFINE_EVENT(cpu, cpu_idle,
TP_ARGS(state, cpu_id)
);
+TRACE_EVENT(cpu_idle_miss,
+
+ TP_PROTO(unsigned int cpu_id, unsigned int state, bool below),
+
+ TP_ARGS(cpu_id, state, below),
+
+ TP_STRUCT__entry(
+ __field(u32, cpu_id)
+ __field(u32, state)
+ __field(bool, below)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->state = state;
+ __entry->below = below;
+ ),
+
+ TP_printk("cpu_id=%lu state=%lu type=%s", (unsigned long)__entry->cpu_id,
+ (unsigned long)__entry->state, (__entry->below)?"below":"above")
+);
+
TRACE_EVENT(powernv_throttle,
TP_PROTO(int chip_id, const char *reason, int pmax),
diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h
index 5857cf682ee7..e7fd55e7dc3d 100644
--- a/include/trace/events/qla.h
+++ b/include/trace/events/qla.h
@@ -22,11 +22,11 @@ DECLARE_EVENT_CLASS(qla_log_event,
TP_STRUCT__entry(
__string(buf, buf)
- __dynamic_array(char, msg, QLA_MSG_MAX)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(buf, buf);
- vsnprintf(__get_str(msg), QLA_MSG_MAX, vaf->fmt, *vaf->va);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s %s", __get_str(buf), __get_str(msg))
diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h
new file mode 100644
index 000000000000..56592da9301c
--- /dev/null
+++ b/include/trace/events/rv.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rv
+
+#if !defined(_TRACE_RV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RV_H
+
+#include <linux/rv.h>
+#include <linux/tracepoint.h>
+
+#ifdef CONFIG_DA_MON_EVENTS_IMPLICIT
+DECLARE_EVENT_CLASS(event_da_monitor,
+
+ TP_PROTO(char *state, char *event, char *next_state, bool final_state),
+
+ TP_ARGS(state, event, next_state, final_state),
+
+ TP_STRUCT__entry(
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ __array( char, next_state, MAX_DA_NAME_LEN )
+ __field( bool, final_state )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN);
+ __entry->final_state = final_state;
+ ),
+
+ TP_printk("%s x %s -> %s %s",
+ __entry->state,
+ __entry->event,
+ __entry->next_state,
+ __entry->final_state ? "(final)" : "")
+);
+
+DECLARE_EVENT_CLASS(error_da_monitor,
+
+ TP_PROTO(char *state, char *event),
+
+ TP_ARGS(state, event),
+
+ TP_STRUCT__entry(
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ ),
+
+ TP_printk("event %s not expected in the state %s",
+ __entry->event,
+ __entry->state)
+);
+
+#ifdef CONFIG_RV_MON_WIP
+DEFINE_EVENT(event_da_monitor, event_wip,
+ TP_PROTO(char *state, char *event, char *next_state, bool final_state),
+ TP_ARGS(state, event, next_state, final_state));
+
+DEFINE_EVENT(error_da_monitor, error_wip,
+ TP_PROTO(char *state, char *event),
+ TP_ARGS(state, event));
+#endif /* CONFIG_RV_MON_WIP */
+#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */
+
+#ifdef CONFIG_DA_MON_EVENTS_ID
+DECLARE_EVENT_CLASS(event_da_monitor_id,
+
+ TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
+
+ TP_ARGS(id, state, event, next_state, final_state),
+
+ TP_STRUCT__entry(
+ __field( int, id )
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ __array( char, next_state, MAX_DA_NAME_LEN )
+ __field( bool, final_state )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN);
+ __entry->id = id;
+ __entry->final_state = final_state;
+ ),
+
+ TP_printk("%d: %s x %s -> %s %s",
+ __entry->id,
+ __entry->state,
+ __entry->event,
+ __entry->next_state,
+ __entry->final_state ? "(final)" : "")
+);
+
+DECLARE_EVENT_CLASS(error_da_monitor_id,
+
+ TP_PROTO(int id, char *state, char *event),
+
+ TP_ARGS(id, state, event),
+
+ TP_STRUCT__entry(
+ __field( int, id )
+ __array( char, state, MAX_DA_NAME_LEN )
+ __array( char, event, MAX_DA_NAME_LEN )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->state, state, MAX_DA_NAME_LEN);
+ memcpy(__entry->event, event, MAX_DA_NAME_LEN);
+ __entry->id = id;
+ ),
+
+ TP_printk("%d: event %s not expected in the state %s",
+ __entry->id,
+ __entry->event,
+ __entry->state)
+);
+
+#ifdef CONFIG_RV_MON_WWNR
+/* id is the pid of the task */
+DEFINE_EVENT(event_da_monitor_id, event_wwnr,
+ TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
+ TP_ARGS(id, state, event, next_state, final_state));
+
+DEFINE_EVENT(error_da_monitor_id, error_wwnr,
+ TP_PROTO(int id, char *state, char *event),
+ TP_ARGS(id, state, event));
+#endif /* CONFIG_RV_MON_WWNR */
+
+#endif /* CONFIG_DA_MON_EVENTS_ID */
+#endif /* _TRACE_RV_H */
+
+/* This part ust be outside protection */
+#undef TRACE_INCLUDE_PATH
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rwmmio.h b/include/trace/events/rwmmio.h
new file mode 100644
index 000000000000..de41159216c1
--- /dev/null
+++ b/include/trace/events/rwmmio.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rwmmio
+
+#if !defined(_TRACE_RWMMIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RWMMIO_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(rwmmio_rw_template,
+
+ TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
+
+ TP_ARGS(caller, val, width, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, caller)
+ __field(unsigned long, addr)
+ __field(u64, val)
+ __field(u8, width)
+ ),
+
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->val = val;
+ __entry->addr = (unsigned long)addr;
+ __entry->width = width;
+ ),
+
+ TP_printk("%pS width=%d val=%#llx addr=%#lx",
+ (void *)__entry->caller, __entry->width,
+ __entry->val, __entry->addr)
+);
+
+DEFINE_EVENT(rwmmio_rw_template, rwmmio_write,
+ TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
+ TP_ARGS(caller, val, width, addr)
+);
+
+DEFINE_EVENT(rwmmio_rw_template, rwmmio_post_write,
+ TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
+ TP_ARGS(caller, val, width, addr)
+);
+
+TRACE_EVENT(rwmmio_read,
+
+ TP_PROTO(unsigned long caller, u8 width, const volatile void __iomem *addr),
+
+ TP_ARGS(caller, width, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, caller)
+ __field(unsigned long, addr)
+ __field(u8, width)
+ ),
+
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = (unsigned long)addr;
+ __entry->width = width;
+ ),
+
+ TP_printk("%pS width=%d addr=%#lx",
+ (void *)__entry->caller, __entry->width, __entry->addr)
+);
+
+TRACE_EVENT(rwmmio_post_read,
+
+ TP_PROTO(unsigned long caller, u64 val, u8 width, const volatile void __iomem *addr),
+
+ TP_ARGS(caller, val, width, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, caller)
+ __field(unsigned long, addr)
+ __field(u64, val)
+ __field(u8, width)
+ ),
+
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->val = val;
+ __entry->addr = (unsigned long)addr;
+ __entry->width = width;
+ ),
+
+ TP_printk("%pS width=%d val=%#llx addr=%#lx",
+ (void *)__entry->caller, __entry->width,
+ __entry->val, __entry->addr)
+);
+
+#endif /* _TRACE_RWMMIO_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index 370ade0d4093..a2c7befd451a 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -166,6 +166,8 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__field( unsigned int, lun )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
+ __field( int, driver_tag)
+ __field( int, scheduler_tag)
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
@@ -179,6 +181,8 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__entry->lun = cmd->device->lun;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
+ __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag;
+ __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
@@ -186,11 +190,11 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
- " prot_op=%s cmnd=(%s %s raw=%s)",
+ " prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
- show_prot_op_name(__entry->prot_op),
- show_opcode_name(__entry->opcode),
+ show_prot_op_name(__entry->prot_op), __entry->driver_tag,
+ __entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
);
@@ -209,6 +213,8 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
__field( int, rtn )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
+ __field( int, driver_tag)
+ __field( int, scheduler_tag)
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
@@ -223,6 +229,8 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
__entry->rtn = rtn;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
+ __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag;
+ __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
@@ -230,11 +238,12 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
- " prot_op=%s cmnd=(%s %s raw=%s) rtn=%d",
+ " prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)" \
+ " rtn=%d",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
- show_prot_op_name(__entry->prot_op),
- show_opcode_name(__entry->opcode),
+ show_prot_op_name(__entry->prot_op), __entry->driver_tag,
+ __entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
__entry->rtn)
@@ -254,6 +263,8 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__field( int, result )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
+ __field( int, driver_tag)
+ __field( int, scheduler_tag)
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
@@ -268,19 +279,21 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__entry->result = cmd->result;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
+ __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag;
+ __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
),
- TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
- "prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \
- "%s host=%s message=%s status=%s)",
+ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u " \
+ "prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s) " \
+ "result=(driver=%s host=%s message=%s status=%s)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
- show_prot_op_name(__entry->prot_op),
- show_opcode_name(__entry->opcode),
+ show_prot_op_name(__entry->prot_op), __entry->driver_tag,
+ __entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
"DRIVER_OK",
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index b61d9c90fa26..f48f2ab9d238 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1266,6 +1266,26 @@ TRACE_EVENT(xprt_reserve,
)
);
+TRACE_EVENT(xs_data_ready,
+ TP_PROTO(
+ const struct rpc_xprt *xprt
+ ),
+
+ TP_ARGS(xprt),
+
+ TP_STRUCT__entry(
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ ),
+
+ TP_fast_assign(
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ ),
+
+ TP_printk("peer=[%s]:%s", __get_str(addr), __get_str(port))
+);
+
TRACE_EVENT(xs_stream_read_data,
TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total),
@@ -1989,20 +2009,24 @@ TRACE_EVENT(svc_wake_up,
TRACE_EVENT(svc_alloc_arg_err,
TP_PROTO(
- unsigned int pages
+ unsigned int requested,
+ unsigned int allocated
),
- TP_ARGS(pages),
+ TP_ARGS(requested, allocated),
TP_STRUCT__entry(
- __field(unsigned int, pages)
+ __field(unsigned int, requested)
+ __field(unsigned int, allocated)
),
TP_fast_assign(
- __entry->pages = pages;
+ __entry->requested = requested;
+ __entry->allocated = allocated;
),
- TP_printk("pages=%u", __entry->pages)
+ TP_printk("requested=%u allocated=%u",
+ __entry->requested, __entry->allocated)
);
DECLARE_EVENT_CLASS(svc_deferred_event,
diff --git a/include/trace/stages/stage1_struct_define.h b/include/trace/stages/stage1_struct_define.h
index a16783419687..1b7bab60434c 100644
--- a/include/trace/stages/stage1_struct_define.h
+++ b/include/trace/stages/stage1_struct_define.h
@@ -26,6 +26,9 @@
#undef __string_len
#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
diff --git a/include/trace/stages/stage2_data_offsets.h b/include/trace/stages/stage2_data_offsets.h
index 42fd1e8813ec..1b7a8f764fdd 100644
--- a/include/trace/stages/stage2_data_offsets.h
+++ b/include/trace/stages/stage2_data_offsets.h
@@ -32,6 +32,9 @@
#undef __string_len
#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h
index e80cdc397a43..a8fb25f39a99 100644
--- a/include/trace/stages/stage4_event_fields.h
+++ b/include/trace/stages/stage4_event_fields.h
@@ -2,16 +2,18 @@
/* Stage 4 definitions for creating trace events */
+#define ALIGN_STRUCTFIELD(type) ((int)(__alignof__(struct {type b;})))
+
#undef __field_ext
#define __field_ext(_type, _item, _filter_type) { \
.type = #_type, .name = #_item, \
- .size = sizeof(_type), .align = __alignof__(_type), \
+ .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \
.is_signed = is_signed_type(_type), .filter_type = _filter_type },
#undef __field_struct_ext
#define __field_struct_ext(_type, _item, _filter_type) { \
.type = #_type, .name = #_item, \
- .size = sizeof(_type), .align = __alignof__(_type), \
+ .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \
0, .filter_type = _filter_type },
#undef __field
@@ -23,7 +25,7 @@
#undef __array
#define __array(_type, _item, _len) { \
.type = #_type"["__stringify(_len)"]", .name = #_item, \
- .size = sizeof(_type[_len]), .align = __alignof__(_type), \
+ .size = sizeof(_type[_len]), .align = ALIGN_STRUCTFIELD(_type), \
.is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
#undef __dynamic_array
@@ -38,6 +40,9 @@
#undef __string_len
#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h
index 7ee5931300e6..fba4c24ed9e6 100644
--- a/include/trace/stages/stage5_get_offsets.h
+++ b/include/trace/stages/stage5_get_offsets.h
@@ -39,6 +39,10 @@
#undef __string_len
#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, \
+ __trace_event_vstr_len(fmt, ap))
+
#undef __rel_dynamic_array
#define __rel_dynamic_array(type, item, len) \
__item_length = (len) * sizeof(type); \
diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h
index e1724f73594b..3c554a585320 100644
--- a/include/trace/stages/stage6_event_callback.h
+++ b/include/trace/stages/stage6_event_callback.h
@@ -24,6 +24,9 @@
#undef __string_len
#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+#undef __vstring
+#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1)
+
#undef __assign_str
#define __assign_str(dst, src) \
strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
@@ -35,6 +38,15 @@
__get_str(dst)[len] = '\0'; \
} while(0)
+#undef __assign_vstr
+#define __assign_vstr(dst, fmt, va) \
+ do { \
+ va_list __cp_va; \
+ va_copy(__cp_va, *(va)); \
+ vsnprintf(__get_str(dst), TRACE_EVENT_STR_MAX, fmt, __cp_va); \
+ va_end(__cp_va); \
+ } while (0)
+
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
diff --git a/include/uapi/asm-generic/termbits-common.h b/include/uapi/asm-generic/termbits-common.h
index 4d084fe8def5..4a6a79f28b21 100644
--- a/include/uapi/asm-generic/termbits-common.h
+++ b/include/uapi/asm-generic/termbits-common.h
@@ -46,6 +46,7 @@ typedef unsigned int speed_t;
#define EXTA B19200
#define EXTB B38400
+#define ADDRB 0x20000000 /* address bit */
#define CMSPAR 0x40000000 /* mark or space (stick) parity */
#define CRTSCTS 0x80000000 /* flow control */
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h
new file mode 100644
index 000000000000..5135027b93c1
--- /dev/null
+++ b/include/uapi/linux/atm_zatm.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* atm_zatm.h - Driver-specific declarations of the ZATM driver (for use by
+ driver-specific utilities) */
+
+/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
+
+
+#ifndef LINUX_ATM_ZATM_H
+#define LINUX_ATM_ZATM_H
+
+/*
+ * Note: non-kernel programs including this file must also include
+ * sys/types.h for struct timeval
+ */
+
+#include <linux/atmapi.h>
+#include <linux/atmioc.h>
+
+#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
+ /* get pool statistics */
+#define ZATM_GETPOOLZ _IOW('a',ATMIOC_SARPRV+2,struct atmif_sioc)
+ /* get statistics and zero */
+#define ZATM_SETPOOL _IOW('a',ATMIOC_SARPRV+3,struct atmif_sioc)
+ /* set pool parameters */
+
+struct zatm_pool_info {
+ int ref_count; /* free buffer pool usage counters */
+ int low_water,high_water; /* refill parameters */
+ int rqa_count,rqu_count; /* queue condition counters */
+ int offset,next_off; /* alignment optimizations: offset */
+ int next_cnt,next_thres; /* repetition counter and threshold */
+};
+
+struct zatm_pool_req {
+ int pool_num; /* pool number */
+ struct zatm_pool_info info; /* actual information */
+};
+
+#define ZATM_OAM_POOL 0 /* free buffer pool for OAM cells */
+#define ZATM_AAL0_POOL 1 /* free buffer pool for AAL0 cells */
+#define ZATM_AAL5_POOL_BASE 2 /* first AAL5 free buffer pool */
+#define ZATM_LAST_POOL ZATM_AAL5_POOL_BASE+10 /* max. 64 kB */
+
+#define ZATM_TIMER_HISTORY_SIZE 16 /* number of timer adjustments to
+ record; must be 2^n */
+
+#endif
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 2b9f5e9985e5..c7b056af9ef0 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -420,6 +420,7 @@ typedef struct elf64_shdr {
#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
#define NT_S390_GS_BC 0x30c /* s390 guarded storage broadcast control block */
#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */
+#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */
#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
#define NT_ARM_TLS 0x401 /* ARM TLS register */
#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
diff --git a/include/uapi/linux/f2fs.h b/include/uapi/linux/f2fs.h
index 352a822d4370..3121d127d5aa 100644
--- a/include/uapi/linux/f2fs.h
+++ b/include/uapi/linux/f2fs.h
@@ -13,7 +13,7 @@
#define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2)
#define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3)
#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4)
-#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
+#define F2FS_IOC_ABORT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
#define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32)
#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7)
#define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h
index d83f214b4134..ddba3ca01e39 100644
--- a/include/uapi/linux/genetlink.h
+++ b/include/uapi/linux/genetlink.h
@@ -87,6 +87,8 @@ enum {
__CTRL_ATTR_MCAST_GRP_MAX,
};
+#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1)
+
enum {
CTRL_ATTR_POLICY_UNSPEC,
CTRL_ATTR_POLICY_DO,
@@ -96,7 +98,6 @@ enum {
CTRL_ATTR_POLICY_DUMP_MAX = __CTRL_ATTR_POLICY_DUMP_MAX - 1
};
-#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1)
-
+#define CTRL_ATTR_POLICY_MAX (__CTRL_ATTR_POLICY_DUMP_MAX - 1)
#endif /* _UAPI__LINUX_GENERIC_NETLINK_H */
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index bce7c43657d5..095299c75828 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -89,14 +89,14 @@ enum iax_opcode {
IAX_OPCODE_CRC64,
IAX_OPCODE_ZERO_DECOMP_32 = 0x48,
IAX_OPCODE_ZERO_DECOMP_16,
- IAX_OPCODE_DECOMP_32 = 0x4c,
- IAX_OPCODE_DECOMP_16,
+ IAX_OPCODE_ZERO_COMP_32 = 0x4c,
+ IAX_OPCODE_ZERO_COMP_16,
IAX_OPCODE_SCAN = 0x50,
IAX_OPCODE_SET_MEMBER,
IAX_OPCODE_EXTRACT,
IAX_OPCODE_SELECT,
IAX_OPCODE_RLE_BURST,
- IAX_OPCDE_FIND_UNIQUE,
+ IAX_OPCODE_FIND_UNIQUE,
IAX_OPCODE_EXPAND,
};
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 1463cfecb56b..9e0b5c8d92ce 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -12,6 +12,10 @@
#include <linux/types.h>
#include <linux/time_types.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/*
* IO submission data structure (Submission Queue Entry)
*/
@@ -661,4 +665,8 @@ struct io_uring_recvmsg_out {
__u32 flags;
};
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h
index 23e91a9c2583..0b7b16dbdec2 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h
@@ -17,4 +17,4 @@ struct ip6t_log_info {
char prefix[30];
};
-#endif /*_IPT_LOG_H*/
+#endif /* _IP6T_LOG_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 108f8523fa04..57b8e2ffb1dd 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -737,7 +737,8 @@
#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT
+#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
@@ -1103,4 +1104,30 @@
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
+/* Data Object Exchange */
+#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */
+#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */
+#define PCI_DOE_CAP_INT_MSG_NUM 0x00000ffe /* Interrupt Message Number */
+#define PCI_DOE_CTRL 0x08 /* DOE Control Register */
+#define PCI_DOE_CTRL_ABORT 0x00000001 /* DOE Abort */
+#define PCI_DOE_CTRL_INT_EN 0x00000002 /* DOE Interrupt Enable */
+#define PCI_DOE_CTRL_GO 0x80000000 /* DOE Go */
+#define PCI_DOE_STATUS 0x0c /* DOE Status Register */
+#define PCI_DOE_STATUS_BUSY 0x00000001 /* DOE Busy */
+#define PCI_DOE_STATUS_INT_STATUS 0x00000002 /* DOE Interrupt Status */
+#define PCI_DOE_STATUS_ERROR 0x00000004 /* DOE Error */
+#define PCI_DOE_STATUS_DATA_OBJECT_READY 0x80000000 /* Data Object Ready */
+#define PCI_DOE_WRITE 0x10 /* DOE Write Data Mailbox Register */
+#define PCI_DOE_READ 0x14 /* DOE Read Data Mailbox Register */
+
+/* DOE Data Object - note not actually registers */
+#define PCI_DOE_DATA_OBJECT_HEADER_1_VID 0x0000ffff
+#define PCI_DOE_DATA_OBJECT_HEADER_1_TYPE 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH 0x0003ffff
+
+#define PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX 0x000000ff
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID 0x0000ffff
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL 0x00ff0000
+#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX 0xff000000
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index fa6b16e5fdd8..cea06924b295 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -126,10 +126,26 @@ struct serial_rs485 {
#define SER_RS485_TERMINATE_BUS (1 << 5) /* Enable bus
termination
(if supported) */
+
+/* RS-485 addressing mode */
+#define SER_RS485_ADDRB (1 << 6) /* Enable addressing mode */
+#define SER_RS485_ADDR_RECV (1 << 7) /* Receive address filter */
+#define SER_RS485_ADDR_DEST (1 << 8) /* Destination address */
+
__u32 delay_rts_before_send; /* Delay before send (milliseconds) */
__u32 delay_rts_after_send; /* Delay after send (milliseconds) */
- __u32 padding[5]; /* Memory is cheap, new structs
- are a royal PITA .. */
+
+ /* The fields below are defined by flags */
+ union {
+ __u32 padding[5]; /* Memory is cheap, new structs are a pain */
+
+ struct {
+ __u8 addr_recv;
+ __u8 addr_dest;
+ __u8 padding0[2];
+ __u32 padding1[4];
+ };
+ };
};
/*
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 6faf502b7860..3ba34d8378bd 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -124,10 +124,6 @@
/* TXX9 type number */
#define PORT_TXX9 64
-/* NEC VR4100 series SIU/DSIU */
-#define PORT_VR41XX_SIU 65
-#define PORT_VR41XX_DSIU 66
-
/* Samsung S3C2400 SoC */
#define PORT_S3C2400 67
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index f51bc8f36813..bab3b39266cc 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -139,7 +139,7 @@
#define UART_LSR_PE 0x04 /* Parity error indicator */
#define UART_LSR_OE 0x02 /* Overrun error indicator */
#define UART_LSR_DR 0x01 /* Receiver data ready */
-#define UART_LSR_BRK_ERROR_BITS 0x1E /* BI, FE, PE, OE bits */
+#define UART_LSR_BRK_ERROR_BITS (UART_LSR_BI|UART_LSR_FE|UART_LSR_PE|UART_LSR_OE)
#define UART_MSR 6 /* In: Modem Status Register */
#define UART_MSR_DCD 0x80 /* Data Carrier Detect */
@@ -150,7 +150,7 @@
#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */
#define UART_MSR_DDSR 0x02 /* Delta DSR */
#define UART_MSR_DCTS 0x01 /* Delta CTS */
-#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */
+#define UART_MSR_ANY_DELTA (UART_MSR_DDCD|UART_MSR_TERI|UART_MSR_DDSR|UART_MSR_DCTS)
#define UART_SCR 7 /* I/O: Scratch Register */
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 7272f85d6d6a..0723a9cce747 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -102,7 +102,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
#else
#define __swab16(x) \
- (__builtin_constant_p((__u16)(x)) ? \
+ (__u16)(__builtin_constant_p(x) ? \
___constant_swab16(x) : \
__fswab16(x))
#endif
@@ -115,7 +115,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
#else
#define __swab32(x) \
- (__builtin_constant_p((__u32)(x)) ? \
+ (__u32)(__builtin_constant_p(x) ? \
___constant_swab32(x) : \
__fswab32(x))
#endif
@@ -128,7 +128,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
#else
#define __swab64(x) \
- (__builtin_constant_p((__u64)(x)) ? \
+ (__u64)(__builtin_constant_p(x) ? \
___constant_swab64(x) : \
__fswab64(x))
#endif
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index ca33092354ab..677edaab2b66 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -15,6 +15,8 @@
#define UBLK_CMD_DEL_DEV 0x05
#define UBLK_CMD_START_DEV 0x06
#define UBLK_CMD_STOP_DEV 0x07
+#define UBLK_CMD_SET_PARAMS 0x08
+#define UBLK_CMD_GET_PARAMS 0x09
/*
* IO commands, issued by ublk server, and handled by ublk driver.
@@ -28,12 +30,21 @@
* this IO request, request's handling result is committed to ublk
* driver, meantime FETCH_REQ is piggyback, and FETCH_REQ has to be
* handled before completing io request.
+ *
+ * NEED_GET_DATA: only used for write requests to set io addr and copy data
+ * When NEED_GET_DATA is set, ublksrv has to issue UBLK_IO_NEED_GET_DATA
+ * command after ublk driver returns UBLK_IO_RES_NEED_GET_DATA.
+ *
+ * It is only used if ublksrv set UBLK_F_NEED_GET_DATA flag
+ * while starting a ublk device.
*/
#define UBLK_IO_FETCH_REQ 0x20
#define UBLK_IO_COMMIT_AND_FETCH_REQ 0x21
+#define UBLK_IO_NEED_GET_DATA 0x22
/* only ABORT means that no re-fetch */
#define UBLK_IO_RES_OK 0
+#define UBLK_IO_RES_NEED_GET_DATA 1
#define UBLK_IO_RES_ABORT (-ENODEV)
#define UBLKSRV_CMD_BUF_OFFSET 0
@@ -54,6 +65,15 @@
*/
#define UBLK_F_URING_CMD_COMP_IN_TASK (1ULL << 1)
+/*
+ * User should issue io cmd again for write requests to
+ * set io buffer address and copy data from bio vectors
+ * to the userspace io buffer.
+ *
+ * In this mode, task_work is not used.
+ */
+#define UBLK_F_NEED_GET_DATA (1UL << 2)
+
/* device state */
#define UBLK_S_DEV_DEAD 0
#define UBLK_S_DEV_LIVE 1
@@ -78,22 +98,23 @@ struct ublksrv_ctrl_cmd {
struct ublksrv_ctrl_dev_info {
__u16 nr_hw_queues;
__u16 queue_depth;
- __u16 block_size;
__u16 state;
+ __u16 pad0;
- __u32 rq_max_blocks;
+ __u32 max_io_buf_bytes;
__u32 dev_id;
- __u64 dev_blocks;
-
__s32 ublksrv_pid;
- __s32 reserved0;
+ __u32 pad1;
+
__u64 flags;
- __u64 flags_reserved;
/* For ublksrv internal use, invisible to ublk driver */
__u64 ublksrv_flags;
- __u64 reserved1[9];
+
+ __u64 reserved0;
+ __u64 reserved1;
+ __u64 reserved2;
};
#define UBLK_IO_OP_READ 0
@@ -158,4 +179,49 @@ struct ublksrv_io_cmd {
__u64 addr;
};
+struct ublk_param_basic {
+#define UBLK_ATTR_READ_ONLY (1 << 0)
+#define UBLK_ATTR_ROTATIONAL (1 << 1)
+#define UBLK_ATTR_VOLATILE_CACHE (1 << 2)
+#define UBLK_ATTR_FUA (1 << 3)
+ __u32 attrs;
+ __u8 logical_bs_shift;
+ __u8 physical_bs_shift;
+ __u8 io_opt_shift;
+ __u8 io_min_shift;
+
+ __u32 max_sectors;
+ __u32 chunk_sectors;
+
+ __u64 dev_sectors;
+ __u64 virt_boundary_mask;
+};
+
+struct ublk_param_discard {
+ __u32 discard_alignment;
+
+ __u32 discard_granularity;
+ __u32 max_discard_sectors;
+
+ __u32 max_write_zeroes_sectors;
+ __u16 max_discard_segments;
+ __u16 reserved0;
+};
+
+struct ublk_params {
+ /*
+ * Total length of parameters, userspace has to set 'len' for both
+ * SET_PARAMS and GET_PARAMS command, and driver may update len
+ * if two sides use different version of 'ublk_params', same with
+ * 'types' fields.
+ */
+ __u32 len;
+#define UBLK_PARAM_TYPE_BASIC (1 << 0)
+#define UBLK_PARAM_TYPE_DISCARD (1 << 1)
+ __u32 types; /* types of parameter included */
+
+ struct ublk_param_basic basic;
+ struct ublk_param_discard discard;
+};
+
#endif
diff --git a/include/uapi/linux/vduse.h b/include/uapi/linux/vduse.h
index 7cfe1c1280c0..11bd48c72c6c 100644
--- a/include/uapi/linux/vduse.h
+++ b/include/uapi/linux/vduse.h
@@ -210,6 +210,53 @@ struct vduse_vq_eventfd {
*/
#define VDUSE_VQ_INJECT_IRQ _IOW(VDUSE_BASE, 0x17, __u32)
+/**
+ * struct vduse_iova_umem - userspace memory configuration for one IOVA region
+ * @uaddr: start address of userspace memory, it must be aligned to page size
+ * @iova: start of the IOVA region
+ * @size: size of the IOVA region
+ * @reserved: for future use, needs to be initialized to zero
+ *
+ * Structure used by VDUSE_IOTLB_REG_UMEM and VDUSE_IOTLB_DEREG_UMEM
+ * ioctls to register/de-register userspace memory for IOVA regions
+ */
+struct vduse_iova_umem {
+ __u64 uaddr;
+ __u64 iova;
+ __u64 size;
+ __u64 reserved[3];
+};
+
+/* Register userspace memory for IOVA regions */
+#define VDUSE_IOTLB_REG_UMEM _IOW(VDUSE_BASE, 0x18, struct vduse_iova_umem)
+
+/* De-register the userspace memory. Caller should set iova and size field. */
+#define VDUSE_IOTLB_DEREG_UMEM _IOW(VDUSE_BASE, 0x19, struct vduse_iova_umem)
+
+/**
+ * struct vduse_iova_info - information of one IOVA region
+ * @start: start of the IOVA region
+ * @last: last of the IOVA region
+ * @capability: capability of the IOVA regsion
+ * @reserved: for future use, needs to be initialized to zero
+ *
+ * Structure used by VDUSE_IOTLB_GET_INFO ioctl to get information of
+ * one IOVA region.
+ */
+struct vduse_iova_info {
+ __u64 start;
+ __u64 last;
+#define VDUSE_IOVA_CAP_UMEM (1 << 0)
+ __u64 capability;
+ __u64 reserved[3];
+};
+
+/*
+ * Find the first IOVA region that overlaps with the range [start, last]
+ * and return some information on it. Caller should set start and last fields.
+ */
+#define VDUSE_IOTLB_GET_INFO _IOWR(VDUSE_BASE, 0x1a, struct vduse_iova_info)
+
/* The control messages definition for read(2)/write(2) on /dev/vduse/$NAME */
/**
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index cab645d4a645..f9f115a7c75b 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -171,4 +171,13 @@
#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
struct vhost_vring_state)
+/* Suspend a device so it does not process virtqueue requests anymore
+ *
+ * After the return of ioctl the device must preserve all the necessary state
+ * (the virtqueue vring base plus the possible device specific states) that is
+ * required for restoring in the future. The device must not change its
+ * configuration after that point.
+ */
+#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D)
+
#endif
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index 391331a10879..53601ce2c20a 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -161,5 +161,7 @@ struct vhost_vdpa_iova_range {
* message
*/
#define VHOST_BACKEND_F_IOTLB_ASID 0x3
+/* Device can be suspended */
+#define VHOST_BACKEND_F_SUSPEND 0x4
#endif
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index f0fb0ae021c0..3c05162bc988 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -52,7 +52,7 @@
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 38
+#define VIRTIO_TRANSPORT_F_END 41
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@@ -98,4 +98,9 @@
* Does the device support Single Root I/O Virtualization?
*/
#define VIRTIO_F_SR_IOV 37
+
+/*
+ * This feature indicates that the driver can reset a queue individually.
+ */
+#define VIRTIO_F_RING_RESET 40
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 3f55a4215f11..29ced55514d4 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -56,7 +56,7 @@
#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
-
+#define VIRTIO_NET_F_NOTF_COAL 53 /* Guest can handle notifications coalescing */
#define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */
#define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */
#define VIRTIO_NET_F_RSC_EXT 61 /* extended coalescing info */
@@ -355,4 +355,36 @@ struct virtio_net_hash_config {
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
+/*
+ * Control notifications coalescing.
+ *
+ * Request the device to change the notifications coalescing parameters.
+ *
+ * Available with the VIRTIO_NET_F_NOTF_COAL feature bit.
+ */
+#define VIRTIO_NET_CTRL_NOTF_COAL 6
+/*
+ * Set the tx-usecs/tx-max-packets patameters.
+ * tx-usecs - Maximum number of usecs to delay a TX notification.
+ * tx-max-packets - Maximum number of packets to send before a TX notification.
+ */
+struct virtio_net_ctrl_coal_tx {
+ __le32 tx_max_packets;
+ __le32 tx_usecs;
+};
+
+#define VIRTIO_NET_CTRL_NOTF_COAL_TX_SET 0
+
+/*
+ * Set the rx-usecs/rx-max-packets patameters.
+ * rx-usecs - Maximum number of usecs to delay a RX notification.
+ * rx-max-frames - Maximum number of packets to receive before a RX notification.
+ */
+struct virtio_net_ctrl_coal_rx {
+ __le32 rx_max_packets;
+ __le32 rx_usecs;
+};
+
+#define VIRTIO_NET_CTRL_NOTF_COAL_RX_SET 1
+
#endif /* _UAPI_LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 3a86f36d7e3d..f703afc7ad31 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -202,6 +202,8 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_Q_AVAILHI 44
#define VIRTIO_PCI_COMMON_Q_USEDLO 48
#define VIRTIO_PCI_COMMON_Q_USEDHI 52
+#define VIRTIO_PCI_COMMON_Q_NDATA 56
+#define VIRTIO_PCI_COMMON_Q_RESET 58
#endif /* VIRTIO_PCI_NO_MODERN */
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 476d3e5c0fe7..f8c20d3de8da 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -93,15 +93,21 @@
#define VRING_USED_ALIGN_SIZE 4
#define VRING_DESC_ALIGN_SIZE 16
-/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
+/**
+ * struct vring_desc - Virtio ring descriptors,
+ * 16 bytes long. These can chain together via @next.
+ *
+ * @addr: buffer address (guest-physical)
+ * @len: buffer length
+ * @flags: descriptor flags
+ * @next: index of the next descriptor in the chain,
+ * if the VRING_DESC_F_NEXT flag is set. We chain unused
+ * descriptors via this, too.
+ */
struct vring_desc {
- /* Address (guest-physical). */
__virtio64 addr;
- /* Length. */
__virtio32 len;
- /* The flags as indicated above. */
__virtio16 flags;
- /* We chain unused descriptors via this, too */
__virtio16 next;
};
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index b1f3e6a8f11a..4f84ea7ee14c 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -296,7 +296,7 @@ enum xfrm_attr_type_t {
XFRMA_ETIMER_THRESH,
XFRMA_SRCADDR, /* xfrm_address_t */
XFRMA_COADDR, /* xfrm_address_t */
- XFRMA_LASTUSED, /* unsigned long */
+ XFRMA_LASTUSED, /* __u64 */
XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */
XFRMA_MIGRATE,
XFRMA_ALG_AEAD, /* struct xfrm_algo_aead */
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index b869990c2db2..890d9e5b76d7 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -69,8 +69,8 @@ enum {
* struct mtd_write_req - data structure for requesting a write operation
*
* @start: start address
- * @len: length of data buffer
- * @ooblen: length of OOB buffer
+ * @len: length of data buffer (only lower 32 bits are used)
+ * @ooblen: length of OOB buffer (only lower 32 bits are used)
* @usr_data: user-provided data buffer
* @usr_oob: user-provided OOB buffer
* @mode: MTD mode (see "MTD operation modes")
diff --git a/include/uapi/rdma/erdma-abi.h b/include/uapi/rdma/erdma-abi.h
new file mode 100644
index 000000000000..b7a0222f978f
--- /dev/null
+++ b/include/uapi/rdma/erdma-abi.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2020-2022, Alibaba Group.
+ */
+
+#ifndef __ERDMA_USER_H__
+#define __ERDMA_USER_H__
+
+#include <linux/types.h>
+
+#define ERDMA_ABI_VERSION 1
+
+struct erdma_ureq_create_cq {
+ __aligned_u64 db_record_va;
+ __aligned_u64 qbuf_va;
+ __u32 qbuf_len;
+ __u32 rsvd0;
+};
+
+struct erdma_uresp_create_cq {
+ __u32 cq_id;
+ __u32 num_cqe;
+};
+
+struct erdma_ureq_create_qp {
+ __aligned_u64 db_record_va;
+ __aligned_u64 qbuf_va;
+ __u32 qbuf_len;
+ __u32 rsvd0;
+};
+
+struct erdma_uresp_create_qp {
+ __u32 qp_id;
+ __u32 num_sqe;
+ __u32 num_rqe;
+ __u32 rq_offset;
+};
+
+struct erdma_uresp_alloc_ctx {
+ __u32 dev_id;
+ __u32 pad;
+ __u32 sdb_type;
+ __u32 sdb_offset;
+ __aligned_u64 sdb;
+ __aligned_u64 rdb;
+ __aligned_u64 cdb;
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 3072e5d6b692..7dd56210226f 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -250,6 +250,7 @@ enum rdma_driver_id {
RDMA_DRIVER_QIB,
RDMA_DRIVER_EFA,
RDMA_DRIVER_SIW,
+ RDMA_DRIVER_ERDMA,
};
enum ib_uverbs_gid_type {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index e539c84d63f1..3bee490eb585 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -228,6 +228,7 @@ enum mlx5_ib_objects {
MLX5_IB_OBJECT_VAR,
MLX5_IB_OBJECT_PP,
MLX5_IB_OBJECT_UAR,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
};
enum mlx5_ib_flow_matcher_create_attrs {
@@ -248,6 +249,22 @@ enum mlx5_ib_flow_matcher_methods {
MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
};
+enum mlx5_ib_flow_steering_anchor_create_attrs {
+ MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE,
+ MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY,
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+};
+
+enum mlx5_ib_flow_steering_anchor_destroy_attrs {
+ MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_steering_anchor_methods {
+ MLX5_IB_METHOD_STEERING_ANCHOR_CREATE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY,
+};
+
enum mlx5_ib_device_query_context_attrs {
MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT),
};
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 9555f31c8425..3aef123dbd7f 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -123,7 +123,7 @@ struct snd_compr_codec_caps {
} __attribute__((packed, aligned(4)));
/**
- * enum sndrv_compress_encoder
+ * enum sndrv_compress_encoder - encoder metadata key
* @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
* end of the track
* @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 79b14389ae41..726361716919 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -250,7 +250,7 @@ struct snd_enc_wma {
/**
- * struct snd_enc_vorbis
+ * struct snd_enc_vorbis - Vorbis encoder parameters
* @quality: Sets encoding quality to n, between -1 (low) and 10 (high).
* In the default mode of operation, the quality level is 3.
* Normal quality range is 0 - 10.
@@ -279,7 +279,7 @@ struct snd_enc_vorbis {
/**
- * struct snd_enc_real
+ * struct snd_enc_real - RealAudio encoder parameters
* @quant_bits: number of coupling quantization bits in the stream
* @start_region: coupling start region in the stream
* @num_regions: number of regions value
@@ -294,7 +294,7 @@ struct snd_enc_real {
} __attribute__((packed, aligned(4)));
/**
- * struct snd_enc_flac
+ * struct snd_enc_flac - FLAC encoder parameters
* @num: serial number, valid only for OGG formats
* needs to be set by application
* @gain: Add replay gain tags
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
index 0e7dccdc25fd..3566630ca965 100644
--- a/include/uapi/sound/sof/abi.h
+++ b/include/uapi/sound/sof/abi.h
@@ -24,9 +24,11 @@
#ifndef __INCLUDE_UAPI_SOUND_SOF_ABI_H__
#define __INCLUDE_UAPI_SOUND_SOF_ABI_H__
+#include <linux/types.h>
+
/* SOF ABI version major, minor and patch numbers */
#define SOF_ABI_MAJOR 3
-#define SOF_ABI_MINOR 21
+#define SOF_ABI_MINOR 23
#define SOF_ABI_PATCH 0
/* SOF ABI version number. Format within 32bit word is MMmmmppp */
diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h
index dbf137516522..e9bba93a5399 100644
--- a/include/uapi/sound/sof/header.h
+++ b/include/uapi/sound/sof/header.h
@@ -26,4 +26,34 @@ struct sof_abi_hdr {
__u32 data[]; /**< Component data - opaque to core */
} __packed;
+#define SOF_MANIFEST_DATA_TYPE_NHLT 1
+
+/**
+ * struct sof_manifest_tlv - SOF manifest TLV data
+ * @type: type of data
+ * @size: data size (not including the size of this struct)
+ * @data: payload data
+ */
+struct sof_manifest_tlv {
+ __le32 type;
+ __le32 size;
+ __u8 data[];
+};
+
+/**
+ * struct sof_manifest - SOF topology manifest
+ * @abi_major: Major ABI version
+ * @abi_minor: Minor ABI version
+ * @abi_patch: ABI patch
+ * @count: count of tlv items
+ * @items: consecutive variable size tlv items
+ */
+struct sof_manifest {
+ __le16 abi_major;
+ __le16 abi_minor;
+ __le16 abi_patch;
+ __le16 count;
+ struct sof_manifest_tlv items[];
+};
+
#endif
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index b72fa385bebf..5caf75cadaf8 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -52,11 +52,17 @@
#define SOF_TKN_SCHED_FRAMES 204
#define SOF_TKN_SCHED_TIME_DOMAIN 205
#define SOF_TKN_SCHED_DYNAMIC_PIPELINE 206
+#define SOF_TKN_SCHED_LP_MODE 207
+#define SOF_TKN_SCHED_MEM_USAGE 208
/* volume */
#define SOF_TKN_VOLUME_RAMP_STEP_TYPE 250
#define SOF_TKN_VOLUME_RAMP_STEP_MS 251
+#define SOF_TKN_GAIN_RAMP_TYPE 260
+#define SOF_TKN_GAIN_RAMP_DURATION 261
+#define SOF_TKN_GAIN_VAL 262
+
/* SRC */
#define SOF_TKN_SRC_RATE_IN 300
#define SOF_TKN_SRC_RATE_OUT 301
@@ -79,6 +85,9 @@
*/
#define SOF_TKN_COMP_CORE_ID 404
#define SOF_TKN_COMP_UUID 405
+#define SOF_TKN_COMP_CPC 406
+#define SOF_TKN_COMP_IS_PAGES 409
+#define SOF_TKN_COMP_NUM_AUDIO_FORMATS 410
/* SSP */
#define SOF_TKN_INTEL_SSP_CLKS_CONTROL 500
@@ -145,4 +154,39 @@
#define SOF_TKN_MEDIATEK_AFE_CH 1601
#define SOF_TKN_MEDIATEK_AFE_FORMAT 1602
+/* MIXER */
+#define SOF_TKN_MIXER_TYPE 1700
+
+/* ACPDMIC */
+#define SOF_TKN_AMD_ACPDMIC_RATE 1800
+#define SOF_TKN_AMD_ACPDMIC_CH 1801
+
+/* CAVS AUDIO FORMAT */
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_RATE 1900
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_BIT_DEPTH 1901
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_VALID_BIT 1902
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CHANNELS 1903
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_MAP 1904
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_CFG 1905
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_INTERLEAVING_STYLE 1906
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_FMT_CFG 1907
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_SAMPLE_TYPE 1908
+/* intentional token numbering discontinuity, reserved for future use */
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_RATE 1930
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_BIT_DEPTH 1931
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_VALID_BIT 1932
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CHANNELS 1933
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_MAP 1934
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_CFG 1935
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_INTERLEAVING_STYLE 1936
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_FMT_CFG 1937
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_SAMPLE_TYPE 1938
+/* intentional token numbering discontinuity, reserved for future use */
+#define SOF_TKN_CAVS_AUDIO_FORMAT_IBS 1970
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OBS 1971
+#define SOF_TKN_CAVS_AUDIO_FORMAT_DMA_BUFFER_SIZE 1972
+
+/* COPIER */
+#define SOF_TKN_INTEL_COPIER_NODE_TYPE 1980
+
#endif
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index a92271421718..7fe1a926cd99 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -577,6 +577,18 @@ enum ufshcd_quirks {
* support physical host configuration.
*/
UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
+
+ /*
+ * This quirk needs to be enabled if the host controller has
+ * 64-bit addressing supported capability but it doesn't work.
+ */
+ UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17,
+
+ /*
+ * This quirk needs to be enabled if the host controller has
+ * auto-hibernate capability but it's FASTAUTO only.
+ */
+ UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18,
};
enum ufshcd_caps {
@@ -1087,6 +1099,7 @@ extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer);
extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode);
+extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode);
/* UIC command interfaces for DME primitives */
#define DME_LOCAL 0
@@ -1186,6 +1199,8 @@ void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
+int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
+
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
@@ -1217,14 +1232,14 @@ static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
return 0;
}
-extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
+extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix);
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
int ufshcd_write_ee_control(struct ufs_hba *hba);
-int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
- u16 set, u16 clr);
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
+ const u16 *other_mask, u16 set, u16 clr);
#endif /* End of Header */
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index f81aa95ffbc4..f525566a0864 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -135,11 +135,7 @@ static inline u32 ufshci_version(u32 major, u32 minor)
#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
-#define UFSHCD_ERROR_MASK (UIC_ERROR |\
- DEVICE_FATAL_ERROR |\
- CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR |\
- CRYPTO_ENGINE_FATAL_ERROR)
+#define UFSHCD_ERROR_MASK (UIC_ERROR | INT_FATAL_ERRORS)
#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h
index 0521f887e3ac..6c553f98fe57 100644
--- a/include/ufs/unipro.h
+++ b/include/ufs/unipro.h
@@ -38,6 +38,18 @@
/*
* M-RX Configuration Attributes
*/
+#define RX_HS_G1_SYNC_LENGTH_CAP 0x008B
+#define RX_HS_G1_PREP_LENGTH_CAP 0x008C
+#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
+#define RX_HIBERN8TIME_CAPABILITY 0x0092
+#define RX_HS_G2_SYNC_LENGTH_CAP 0x0094
+#define RX_HS_G3_SYNC_LENGTH_CAP 0x0095
+#define RX_HS_G2_PREP_LENGTH_CAP 0x0096
+#define RX_HS_G3_PREP_LENGTH_CAP 0x0097
+#define RX_ADV_GRANULARITY_CAP 0x0098
+#define RX_HIBERN8TIME_CAP 0x0092
+#define RX_ADV_HIBERN8TIME_CAP 0x0099
+#define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A
#define RX_MODE 0x00A1
#define RX_HSRATE_SERIES 0x00A2
#define RX_HSGEAR 0x00A3
@@ -47,32 +59,19 @@
#define RX_ENTER_HIBERN8 0x00A7
#define RX_BYPASS_8B10B_ENABLE 0x00A8
#define RX_TERMINATION_FORCE_ENABLE 0x00A9
-#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
-#define RX_HIBERN8TIME_CAPABILITY 0x0092
+#define RXCALCTRL 0x00B4
+#define RXSQCTRL 0x00B5
+#define CFGRXCDR8 0x00BA
+#define CFGRXOVR8 0x00BD
+#define CFGRXOVR6 0x00BF
+#define RXDIRECTCTRL2 0x00C7
+#define CFGRXOVR4 0x00E9
#define RX_REFCLKFREQ 0x00EB
#define RX_CFGCLKFREQVAL 0x00EC
#define CFGWIDEINLN 0x00F0
-#define CFGRXCDR8 0x00BA
#define ENARXDIRECTCFG4 0x00F2
-#define CFGRXOVR8 0x00BD
-#define RXDIRECTCTRL2 0x00C7
#define ENARXDIRECTCFG3 0x00F3
-#define RXCALCTRL 0x00B4
#define ENARXDIRECTCFG2 0x00F4
-#define CFGRXOVR4 0x00E9
-#define RXSQCTRL 0x00B5
-#define CFGRXOVR6 0x00BF
-#define RX_HS_G1_SYNC_LENGTH_CAP 0x008B
-#define RX_HS_G1_PREP_LENGTH_CAP 0x008C
-#define RX_HS_G2_SYNC_LENGTH_CAP 0x0094
-#define RX_HS_G3_SYNC_LENGTH_CAP 0x0095
-#define RX_HS_G2_PREP_LENGTH_CAP 0x0096
-#define RX_HS_G3_PREP_LENGTH_CAP 0x0097
-#define RX_ADV_GRANULARITY_CAP 0x0098
-#define RX_MIN_ACTIVATETIME_CAP 0x008F
-#define RX_HIBERN8TIME_CAP 0x0092
-#define RX_ADV_HIBERN8TIME_CAP 0x0099
-#define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A
#define is_mphy_tx_attr(attr) (attr < RX_MODE)
@@ -103,47 +102,50 @@
/*
* PHY Adapter attributes
*/
-#define PA_ACTIVETXDATALANES 0x1560
-#define PA_ACTIVERXDATALANES 0x1580
-#define PA_TXTRAILINGCLOCKS 0x1564
#define PA_PHY_TYPE 0x1500
#define PA_AVAILTXDATALANES 0x1520
-#define PA_AVAILRXDATALANES 0x1540
-#define PA_MINRXTRAILINGCLOCKS 0x1543
-#define PA_TXPWRSTATUS 0x1567
-#define PA_RXPWRSTATUS 0x1582
-#define PA_TXFORCECLOCK 0x1562
-#define PA_TXPWRMODE 0x1563
-#define PA_LEGACYDPHYESCDL 0x1570
#define PA_MAXTXSPEEDFAST 0x1521
#define PA_MAXTXSPEEDSLOW 0x1522
#define PA_MAXRXSPEEDFAST 0x1541
#define PA_MAXRXSPEEDSLOW 0x1542
#define PA_TXLINKSTARTUPHS 0x1544
+#define PA_AVAILRXDATALANES 0x1540
+#define PA_MINRXTRAILINGCLOCKS 0x1543
#define PA_LOCAL_TX_LCC_ENABLE 0x155E
+#define PA_ACTIVETXDATALANES 0x1560
+#define PA_CONNECTEDTXDATALANES 0x1561
+#define PA_TXFORCECLOCK 0x1562
+#define PA_TXPWRMODE 0x1563
+#define PA_TXTRAILINGCLOCKS 0x1564
#define PA_TXSPEEDFAST 0x1565
#define PA_TXSPEEDSLOW 0x1566
-#define PA_REMOTEVERINFO 0x15A0
+#define PA_TXPWRSTATUS 0x1567
#define PA_TXGEAR 0x1568
#define PA_TXTERMINATION 0x1569
#define PA_HSSERIES 0x156A
+#define PA_LEGACYDPHYESCDL 0x1570
#define PA_PWRMODE 0x1571
+#define PA_ACTIVERXDATALANES 0x1580
+#define PA_CONNECTEDRXDATALANES 0x1581
+#define PA_RXPWRSTATUS 0x1582
#define PA_RXGEAR 0x1583
#define PA_RXTERMINATION 0x1584
#define PA_MAXRXPWMGEAR 0x1586
#define PA_MAXRXHSGEAR 0x1587
+#define PA_PACPREQTIMEOUT 0x1590
+#define PA_PACPREQEOBTIMEOUT 0x1591
+#define PA_REMOTEVERINFO 0x15A0
+#define PA_LOGICALLANEMAP 0x15A1
+#define PA_SLEEPNOCONFIGTIME 0x15A2
+#define PA_STALLNOCONFIGTIME 0x15A3
+#define PA_SAVECONFIGTIME 0x15A4
#define PA_RXHSUNTERMCAP 0x15A5
#define PA_RXLSTERMCAP 0x15A6
#define PA_GRANULARITY 0x15AA
-#define PA_PACPREQTIMEOUT 0x1590
-#define PA_PACPREQEOBTIMEOUT 0x1591
#define PA_HIBERN8TIME 0x15A7
#define PA_LOCALVERINFO 0x15A9
#define PA_GRANULARITY 0x15AA
#define PA_TACTIVATE 0x15A8
-#define PA_PACPFRAMECOUNT 0x15C0
-#define PA_PACPERRORCOUNT 0x15C1
-#define PA_PHYTESTCONTROL 0x15C2
#define PA_PWRMODEUSERDATA0 0x15B0
#define PA_PWRMODEUSERDATA1 0x15B1
#define PA_PWRMODEUSERDATA2 0x15B2
@@ -156,12 +158,9 @@
#define PA_PWRMODEUSERDATA9 0x15B9
#define PA_PWRMODEUSERDATA10 0x15BA
#define PA_PWRMODEUSERDATA11 0x15BB
-#define PA_CONNECTEDTXDATALANES 0x1561
-#define PA_CONNECTEDRXDATALANES 0x1581
-#define PA_LOGICALLANEMAP 0x15A1
-#define PA_SLEEPNOCONFIGTIME 0x15A2
-#define PA_STALLNOCONFIGTIME 0x15A3
-#define PA_SAVECONFIGTIME 0x15A4
+#define PA_PACPFRAMECOUNT 0x15C0
+#define PA_PACPERRORCOUNT 0x15C1
+#define PA_PHYTESTCONTROL 0x15C2
#define PA_TXHSADAPTTYPE 0x15D4
/* Adpat type for PA_TXHSADAPTTYPE attribute */
@@ -173,9 +172,9 @@
#define PA_HIBERN8_TIME_UNIT_US 100
/*Other attributes*/
+#define VS_POWERSTATE 0xD083
#define VS_MPHYCFGUPDT 0xD085
#define VS_DEBUGOMC 0xD09E
-#define VS_POWERSTATE 0xD083
#define PA_GRANULARITY_MIN_VAL 1
#define PA_GRANULARITY_MAX_VAL 6
@@ -229,6 +228,7 @@ enum ufs_hs_gear_tag {
UFS_HS_G2, /* HS Gear 2 */
UFS_HS_G3, /* HS Gear 3 */
UFS_HS_G4, /* HS Gear 4 */
+ UFS_HS_G5 /* HS Gear 5 */
};
enum ufs_unipro_ver {
@@ -246,27 +246,27 @@ enum ufs_unipro_ver {
/*
* Data Link Layer Attributes
*/
+#define DL_TXPREEMPTIONCAP 0x2000
+#define DL_TC0TXMAXSDUSIZE 0x2001
+#define DL_TC0RXINITCREDITVAL 0x2002
+#define DL_TC1TXMAXSDUSIZE 0x2003
+#define DL_TC1RXINITCREDITVAL 0x2004
+#define DL_TC0TXBUFFERSIZE 0x2005
+#define DL_TC1TXBUFFERSIZE 0x2006
#define DL_TC0TXFCTHRESHOLD 0x2040
#define DL_FC0PROTTIMEOUTVAL 0x2041
#define DL_TC0REPLAYTIMEOUTVAL 0x2042
#define DL_AFC0REQTIMEOUTVAL 0x2043
#define DL_AFC0CREDITTHRESHOLD 0x2044
#define DL_TC0OUTACKTHRESHOLD 0x2045
+#define DL_PEERTC0PRESENT 0x2046
+#define DL_PEERTC0RXINITCREVAL 0x2047
#define DL_TC1TXFCTHRESHOLD 0x2060
#define DL_FC1PROTTIMEOUTVAL 0x2061
#define DL_TC1REPLAYTIMEOUTVAL 0x2062
#define DL_AFC1REQTIMEOUTVAL 0x2063
#define DL_AFC1CREDITTHRESHOLD 0x2064
#define DL_TC1OUTACKTHRESHOLD 0x2065
-#define DL_TXPREEMPTIONCAP 0x2000
-#define DL_TC0TXMAXSDUSIZE 0x2001
-#define DL_TC0RXINITCREDITVAL 0x2002
-#define DL_TC0TXBUFFERSIZE 0x2005
-#define DL_PEERTC0PRESENT 0x2046
-#define DL_PEERTC0RXINITCREVAL 0x2047
-#define DL_TC1TXMAXSDUSIZE 0x2003
-#define DL_TC1RXINITCREDITVAL 0x2004
-#define DL_TC1TXBUFFERSIZE 0x2006
#define DL_PEERTC1PRESENT 0x2066
#define DL_PEERTC1RXINITCREVAL 0x2067
diff --git a/include/xen/hvm.h b/include/xen/hvm.h
index b7fd7fc9ad41..8da7a6747058 100644
--- a/include/xen/hvm.h
+++ b/include/xen/hvm.h
@@ -60,4 +60,6 @@ static inline int hvm_get_parameter(int idx, uint64_t *value)
void xen_setup_callback_vector(void);
+int xen_set_upcall_vector(unsigned int cpu);
+
#endif /* XEN_HVM_H__ */
diff --git a/include/xen/interface/hvm/hvm_op.h b/include/xen/interface/hvm/hvm_op.h
index f3097e79bb03..03134bf3cec1 100644
--- a/include/xen/interface/hvm/hvm_op.h
+++ b/include/xen/interface/hvm/hvm_op.h
@@ -46,4 +46,23 @@ struct xen_hvm_get_mem_type {
};
DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_get_mem_type);
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
+ * channel upcalls on the specified <vcpu>. If set,
+ * this vector will be used in preference to the
+ * domain global callback via (see
+ * HVM_PARAM_CALLBACK_IRQ).
+ */
+#define HVMOP_set_evtchn_upcall_vector 23
+struct xen_hvm_evtchn_upcall_vector {
+ uint32_t vcpu;
+ uint8_t vector;
+};
+typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t;
+DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_evtchn_upcall_vector_t);
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
diff --git a/init/Kconfig b/init/Kconfig
index 8c9ad53b45dc..532362fcfe31 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -70,11 +70,7 @@ config CC_CAN_LINK_STATIC
default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag) -static) if 64BIT
default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m32-flag) -static)
-config CC_HAS_ASM_GOTO
- def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
-
config CC_HAS_ASM_GOTO_OUTPUT
- depends on CC_HAS_ASM_GOTO
def_bool $(success,echo 'int foo(int x) { asm goto ("": "=r"(x) ::: bar); return x; bar: return 0; }' | $(CC) -x c - -c -o /dev/null)
config CC_HAS_ASM_GOTO_TIED_OUTPUT
@@ -1411,13 +1407,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
with the "-O2" compiler flag for best performance and most
helpful compile-time warnings.
-config CC_OPTIMIZE_FOR_PERFORMANCE_O3
- bool "Optimize more for performance (-O3)"
- depends on ARC
- help
- Choosing this option will pass "-O3" to your compiler to optimize
- the kernel yet more for performance.
-
config CC_OPTIMIZE_FOR_SIZE
bool "Optimize for size (-Os)"
help
@@ -1733,16 +1722,17 @@ config KALLSYMS_ALL
help
Normally kallsyms only contains the symbols of functions for nicer
OOPS messages and backtraces (i.e., symbols from the text and inittext
- sections). This is sufficient for most cases. And only in very rare
- cases (e.g., when a debugger is used) all symbols are required (e.g.,
- names of variables from the data sections, etc).
+ sections). This is sufficient for most cases. And only if you want to
+ enable kernel live patching, or other less common use cases (e.g.,
+ when a debugger is used) all symbols are required (i.e., names of
+ variables from the data sections, etc).
This option makes sure that all symbols are loaded into the kernel
image (i.e., symbols from all sections) in cost of increased kernel
size (depending on the kernel configuration, it may be 300KiB or
something like this).
- Say N unless you really need all symbols.
+ Say N unless you really need all symbols, or kernel live patching.
config KALLSYMS_ABSOLUTE_PERCPU
bool
@@ -1933,298 +1923,7 @@ config MODULE_SIG_FORMAT
def_bool n
select SYSTEM_DATA_VERIFICATION
-menuconfig MODULES
- bool "Enable loadable module support"
- modules
- help
- Kernel modules are small pieces of compiled code which can
- be inserted in the running kernel, rather than being
- permanently built into the kernel. You use the "modprobe"
- tool to add (and sometimes remove) them. If you say Y here,
- many parts of the kernel can be built as modules (by
- answering M instead of Y where indicated): this is most
- useful for infrequently used options which are not required
- for booting. For more information, see the man pages for
- modprobe, lsmod, modinfo, insmod and rmmod.
-
- If you say Y here, you will need to run "make
- modules_install" to put the modules under /lib/modules/
- where modprobe can find them (you may need to be root to do
- this).
-
- If unsure, say Y.
-
-if MODULES
-
-config MODULE_FORCE_LOAD
- bool "Forced module loading"
- default n
- help
- Allow loading of modules without version information (ie. modprobe
- --force). Forced module loading sets the 'F' (forced) taint flag and
- is usually a really bad idea.
-
-config MODULE_UNLOAD
- bool "Module unloading"
- help
- Without this option you will not be able to unload any
- modules (note that some modules may not be unloadable
- anyway), which makes your kernel smaller, faster
- and simpler. If unsure, say Y.
-
-config MODULE_FORCE_UNLOAD
- bool "Forced module unloading"
- depends on MODULE_UNLOAD
- help
- This option allows you to force a module to unload, even if the
- kernel believes it is unsafe: the kernel will remove the module
- without waiting for anyone to stop using it (using the -f option to
- rmmod). This is mainly for kernel developers and desperate users.
- If unsure, say N.
-
-config MODULE_UNLOAD_TAINT_TRACKING
- bool "Tainted module unload tracking"
- depends on MODULE_UNLOAD
- default n
- help
- This option allows you to maintain a record of each unloaded
- module that tainted the kernel. In addition to displaying a
- list of linked (or loaded) modules e.g. on detection of a bad
- page (see bad_page()), the aforementioned details are also
- shown. If unsure, say N.
-
-config MODVERSIONS
- bool "Module versioning support"
- help
- Usually, you have to use modules compiled with your kernel.
- Saying Y here makes it sometimes possible to use modules
- compiled for different kernels, by adding enough information
- to the modules to (hopefully) spot any changes which would
- make them incompatible with the kernel you are running. If
- unsure, say N.
-
-config ASM_MODVERSIONS
- bool
- default HAVE_ASM_MODVERSIONS && MODVERSIONS
- help
- This enables module versioning for exported symbols also from
- assembly. This can be enabled only when the target architecture
- supports it.
-
-config MODULE_SRCVERSION_ALL
- bool "Source checksum for all modules"
- help
- Modules which contain a MODULE_VERSION get an extra "srcversion"
- field inserted into their modinfo section, which contains a
- sum of the source files which made it. This helps maintainers
- see exactly which source was used to build a module (since
- others sometimes change the module source without updating
- the version). With this option, such a "srcversion" field
- will be created for all modules. If unsure, say N.
-
-config MODULE_SIG
- bool "Module signature verification"
- select MODULE_SIG_FORMAT
- help
- Check modules for valid signatures upon load: the signature
- is simply appended to the module. For more information see
- <file:Documentation/admin-guide/module-signing.rst>.
-
- Note that this option adds the OpenSSL development packages as a
- kernel build dependency so that the signing tool can use its crypto
- library.
-
- You should enable this option if you wish to use either
- CONFIG_SECURITY_LOCKDOWN_LSM or lockdown functionality imposed via
- another LSM - otherwise unsigned modules will be loadable regardless
- of the lockdown policy.
-
- !!!WARNING!!! If you enable this option, you MUST make sure that the
- module DOES NOT get stripped after being signed. This includes the
- debuginfo strip done by some packagers (such as rpmbuild) and
- inclusion into an initramfs that wants the module size reduced.
-
-config MODULE_SIG_FORCE
- bool "Require modules to be validly signed"
- depends on MODULE_SIG
- help
- Reject unsigned modules or signed modules for which we don't have a
- key. Without this, such modules will simply taint the kernel.
-
-config MODULE_SIG_ALL
- bool "Automatically sign all modules"
- default y
- depends on MODULE_SIG || IMA_APPRAISE_MODSIG
- help
- Sign all modules during make modules_install. Without this option,
- modules must be signed manually, using the scripts/sign-file tool.
-
-comment "Do not forget to sign required modules with scripts/sign-file"
- depends on MODULE_SIG_FORCE && !MODULE_SIG_ALL
-
-choice
- prompt "Which hash algorithm should modules be signed with?"
- depends on MODULE_SIG || IMA_APPRAISE_MODSIG
- help
- This determines which sort of hashing algorithm will be used during
- signature generation. This algorithm _must_ be built into the kernel
- directly so that signature verification can take place. It is not
- possible to load a signed module containing the algorithm to check
- the signature on that module.
-
-config MODULE_SIG_SHA1
- bool "Sign modules with SHA-1"
- select CRYPTO_SHA1
-
-config MODULE_SIG_SHA224
- bool "Sign modules with SHA-224"
- select CRYPTO_SHA256
-
-config MODULE_SIG_SHA256
- bool "Sign modules with SHA-256"
- select CRYPTO_SHA256
-
-config MODULE_SIG_SHA384
- bool "Sign modules with SHA-384"
- select CRYPTO_SHA512
-
-config MODULE_SIG_SHA512
- bool "Sign modules with SHA-512"
- select CRYPTO_SHA512
-
-endchoice
-
-config MODULE_SIG_HASH
- string
- depends on MODULE_SIG || IMA_APPRAISE_MODSIG
- default "sha1" if MODULE_SIG_SHA1
- default "sha224" if MODULE_SIG_SHA224
- default "sha256" if MODULE_SIG_SHA256
- default "sha384" if MODULE_SIG_SHA384
- default "sha512" if MODULE_SIG_SHA512
-
-choice
- prompt "Module compression mode"
- help
- This option allows you to choose the algorithm which will be used to
- compress modules when 'make modules_install' is run. (or, you can
- choose to not compress modules at all.)
-
- External modules will also be compressed in the same way during the
- installation.
-
- For modules inside an initrd or initramfs, it's more efficient to
- compress the whole initrd or initramfs instead.
-
- This is fully compatible with signed modules.
-
- Please note that the tool used to load modules needs to support the
- corresponding algorithm. module-init-tools MAY support gzip, and kmod
- MAY support gzip, xz and zstd.
-
- Your build system needs to provide the appropriate compression tool
- to compress the modules.
-
- If in doubt, select 'None'.
-
-config MODULE_COMPRESS_NONE
- bool "None"
- help
- Do not compress modules. The installed modules are suffixed
- with .ko.
-
-config MODULE_COMPRESS_GZIP
- bool "GZIP"
- help
- Compress modules with GZIP. The installed modules are suffixed
- with .ko.gz.
-
-config MODULE_COMPRESS_XZ
- bool "XZ"
- help
- Compress modules with XZ. The installed modules are suffixed
- with .ko.xz.
-
-config MODULE_COMPRESS_ZSTD
- bool "ZSTD"
- help
- Compress modules with ZSTD. The installed modules are suffixed
- with .ko.zst.
-
-endchoice
-
-config MODULE_DECOMPRESS
- bool "Support in-kernel module decompression"
- depends on MODULE_COMPRESS_GZIP || MODULE_COMPRESS_XZ
- select ZLIB_INFLATE if MODULE_COMPRESS_GZIP
- select XZ_DEC if MODULE_COMPRESS_XZ
- help
-
- Support for decompressing kernel modules by the kernel itself
- instead of relying on userspace to perform this task. Useful when
- load pinning security policy is enabled.
-
- If unsure, say N.
-
-config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
- bool "Allow loading of modules with missing namespace imports"
- help
- Symbols exported with EXPORT_SYMBOL_NS*() are considered exported in
- a namespace. A module that makes use of a symbol exported with such a
- namespace is required to import the namespace via MODULE_IMPORT_NS().
- There is no technical reason to enforce correct namespace imports,
- but it creates consistency between symbols defining namespaces and
- users importing namespaces they make use of. This option relaxes this
- requirement and lifts the enforcement when loading a module.
-
- If unsure, say N.
-
-config MODPROBE_PATH
- string "Path to modprobe binary"
- default "/sbin/modprobe"
- help
- When kernel code requests a module, it does so by calling
- the "modprobe" userspace utility. This option allows you to
- set the path where that binary is found. This can be changed
- at runtime via the sysctl file
- /proc/sys/kernel/modprobe. Setting this to the empty string
- removes the kernel's ability to request modules (but
- userspace can still load modules explicitly).
-
-config TRIM_UNUSED_KSYMS
- bool "Trim unused exported kernel symbols" if EXPERT
- depends on !COMPILE_TEST
- help
- The kernel and some modules make many symbols available for
- other modules to use via EXPORT_SYMBOL() and variants. Depending
- on the set of modules being selected in your kernel configuration,
- many of those exported symbols might never be used.
-
- This option allows for unused exported symbols to be dropped from
- the build. In turn, this provides the compiler more opportunities
- (especially when using LTO) for optimizing the code and reducing
- binary size. This might have some security advantages as well.
-
- If unsure, or if you need to build out-of-tree modules, say N.
-
-config UNUSED_KSYMS_WHITELIST
- string "Whitelist of symbols to keep in ksymtab"
- depends on TRIM_UNUSED_KSYMS
- help
- By default, all unused exported symbols will be un-exported from the
- build when TRIM_UNUSED_KSYMS is selected.
-
- UNUSED_KSYMS_WHITELIST allows to whitelist symbols that must be kept
- exported at all times, even in absence of in-tree users. The value to
- set here is the path to a text file containing the list of symbols,
- one per line. The path can be absolute, or relative to the kernel
- source tree.
-
-endif # MODULES
-
-config MODULES_TREE_LOOKUP
- def_bool y
- depends on PERF_EVENTS || TRACING || CFI_CLANG
+source "kernel/module/Kconfig"
config INIT_ALL_POSSIBLE
bool
diff --git a/init/main.c b/init/main.c
index 91642a4e69be..1fe7942f5d4a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1446,13 +1446,25 @@ static noinline void __init kernel_init_freeable(void);
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
bool rodata_enabled __ro_after_init = true;
+
+#ifndef arch_parse_debug_rodata
+static inline bool arch_parse_debug_rodata(char *str) { return false; }
+#endif
+
static int __init set_debug_rodata(char *str)
{
- if (strtobool(str, &rodata_enabled))
+ if (arch_parse_debug_rodata(str))
+ return 0;
+
+ if (str && !strcmp(str, "on"))
+ rodata_enabled = true;
+ else if (str && !strcmp(str, "off"))
+ rodata_enabled = false;
+ else
pr_warn("Invalid option string for rodata: '%s'\n", str);
- return 1;
+ return 0;
}
-__setup("rodata=", set_debug_rodata);
+early_param("rodata", set_debug_rodata);
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
diff --git a/init/version.c b/init/version.c
index 1a356f5493e8..b7f9559d417c 100644
--- a/init/version.c
+++ b/init/version.c
@@ -11,6 +11,8 @@
#include <linux/build-salt.h>
#include <linux/elfnote-lto.h>
#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/printk.h>
#include <linux/uts.h>
#include <linux/utsname.h>
#include <generated/utsrelease.h>
@@ -35,6 +37,21 @@ struct uts_namespace init_uts_ns = {
};
EXPORT_SYMBOL_GPL(init_uts_ns);
+static int __init early_hostname(char *arg)
+{
+ size_t bufsize = sizeof(init_uts_ns.name.nodename);
+ size_t maxlen = bufsize - 1;
+ size_t arglen;
+
+ arglen = strlcpy(init_uts_ns.name.nodename, arg, bufsize);
+ if (arglen > maxlen) {
+ pr_warn("hostname parameter exceeds %zd characters and will be truncated",
+ maxlen);
+ }
+ return 0;
+}
+early_param("hostname", early_hostname);
+
/* FIXED STRINGS! Don't touch! */
const char linux_banner[] =
"Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
diff --git a/io_uring/advise.c b/io_uring/advise.c
index 581956934c0b..449c6f14649f 100644
--- a/io_uring/advise.c
+++ b/io_uring/advise.c
@@ -31,7 +31,7 @@ struct io_madvise {
int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
- struct io_madvise *ma = io_kiocb_to_cmd(req);
+ struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise);
if (sqe->buf_index || sqe->off || sqe->splice_fd_in)
return -EINVAL;
@@ -48,7 +48,7 @@ int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
- struct io_madvise *ma = io_kiocb_to_cmd(req);
+ struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise);
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -64,7 +64,7 @@ int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_fadvise *fa = io_kiocb_to_cmd(req);
+ struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise);
if (sqe->buf_index || sqe->addr || sqe->splice_fd_in)
return -EINVAL;
@@ -77,7 +77,7 @@ int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_fadvise *fa = io_kiocb_to_cmd(req);
+ struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise);
int ret;
if (issue_flags & IO_URING_F_NONBLOCK) {
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
index 8435a1eba59a..5fc5d3e80fcb 100644
--- a/io_uring/cancel.c
+++ b/io_uring/cancel.c
@@ -107,7 +107,7 @@ int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_cancel *cancel = io_kiocb_to_cmd(req);
+ struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
return -EINVAL;
@@ -164,7 +164,7 @@ static int __io_async_cancel(struct io_cancel_data *cd,
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_cancel *cancel = io_kiocb_to_cmd(req);
+ struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
struct io_cancel_data cd = {
.ctx = req->ctx,
.data = cancel->addr,
@@ -218,7 +218,7 @@ static int __io_sync_cancel(struct io_uring_task *tctx,
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
unsigned long file_ptr;
- if (unlikely(fd > ctx->nr_user_files))
+ if (unlikely(fd >= ctx->nr_user_files))
return -EBADF;
fd = array_index_nospec(fd, ctx->nr_user_files);
file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
diff --git a/io_uring/epoll.c b/io_uring/epoll.c
index a8b794471d6b..9aa74d2c80bc 100644
--- a/io_uring/epoll.c
+++ b/io_uring/epoll.c
@@ -23,7 +23,7 @@ struct io_epoll {
int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_epoll *epoll = io_kiocb_to_cmd(req);
+ struct io_epoll *epoll = io_kiocb_to_cmd(req, struct io_epoll);
pr_warn_once("%s: epoll_ctl support in io_uring is deprecated and will "
"be removed in a future Linux kernel version.\n",
@@ -49,7 +49,7 @@ int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_epoll *ie = io_kiocb_to_cmd(req);
+ struct io_epoll *ie = io_kiocb_to_cmd(req, struct io_epoll);
int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
diff --git a/io_uring/fs.c b/io_uring/fs.c
index 0de4f549bb7d..7100c293c13a 100644
--- a/io_uring/fs.c
+++ b/io_uring/fs.c
@@ -49,7 +49,7 @@ struct io_link {
int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_rename *ren = io_kiocb_to_cmd(req);
+ struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
const char __user *oldf, *newf;
if (sqe->buf_index || sqe->splice_fd_in)
@@ -79,7 +79,7 @@ int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_rename *ren = io_kiocb_to_cmd(req);
+ struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -95,7 +95,7 @@ int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
void io_renameat_cleanup(struct io_kiocb *req)
{
- struct io_rename *ren = io_kiocb_to_cmd(req);
+ struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
putname(ren->oldpath);
putname(ren->newpath);
@@ -103,7 +103,7 @@ void io_renameat_cleanup(struct io_kiocb *req)
int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_unlink *un = io_kiocb_to_cmd(req);
+ struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
const char __user *fname;
if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in)
@@ -128,7 +128,7 @@ int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_unlink *un = io_kiocb_to_cmd(req);
+ struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -146,14 +146,14 @@ int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
void io_unlinkat_cleanup(struct io_kiocb *req)
{
- struct io_unlink *ul = io_kiocb_to_cmd(req);
+ struct io_unlink *ul = io_kiocb_to_cmd(req, struct io_unlink);
putname(ul->filename);
}
int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_mkdir *mkd = io_kiocb_to_cmd(req);
+ struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
const char __user *fname;
if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
@@ -175,7 +175,7 @@ int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_mkdir *mkd = io_kiocb_to_cmd(req);
+ struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -190,14 +190,14 @@ int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
void io_mkdirat_cleanup(struct io_kiocb *req)
{
- struct io_mkdir *md = io_kiocb_to_cmd(req);
+ struct io_mkdir *md = io_kiocb_to_cmd(req, struct io_mkdir);
putname(md->filename);
}
int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_link *sl = io_kiocb_to_cmd(req);
+ struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
const char __user *oldpath, *newpath;
if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
@@ -225,7 +225,7 @@ int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_link *sl = io_kiocb_to_cmd(req);
+ struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -240,7 +240,7 @@ int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_link *lnk = io_kiocb_to_cmd(req);
+ struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
const char __user *oldf, *newf;
if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
@@ -270,7 +270,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_link *lnk = io_kiocb_to_cmd(req);
+ struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -286,7 +286,7 @@ int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
void io_link_cleanup(struct io_kiocb *req)
{
- struct io_link *sl = io_kiocb_to_cmd(req);
+ struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
putname(sl->oldpath);
putname(sl->newpath);
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 77df5b43bf52..c6536d4b2da0 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -624,8 +624,6 @@ static int io_wqe_worker(void *data)
snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
set_task_comm(current, buf);
- audit_alloc_kernel(current);
-
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
long ret;
@@ -660,7 +658,6 @@ static int io_wqe_worker(void *data)
if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
io_worker_handle_work(worker);
- audit_free(current);
io_worker_exit(worker);
return 0;
}
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index b54218da075c..77616279000b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1450,9 +1450,10 @@ int io_req_prep_async(struct io_kiocb *req)
return 0;
if (WARN_ON_ONCE(req_has_async_data(req)))
return -EFAULT;
- if (io_alloc_async_data(req))
- return -EAGAIN;
-
+ if (!io_op_defs[req->opcode].manual_alloc) {
+ if (io_alloc_async_data(req))
+ return -EAGAIN;
+ }
return def->prep_async(req);
}
@@ -3885,13 +3886,15 @@ out_fput:
static int __init io_uring_init(void)
{
-#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
+#define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
- BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
+ BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
} while (0)
#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
- __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
+ __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
+#define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
+ __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
BUILD_BUG_SQE_ELEM(0, __u8, opcode);
BUILD_BUG_SQE_ELEM(1, __u8, flags);
@@ -3899,6 +3902,8 @@ static int __init io_uring_init(void)
BUILD_BUG_SQE_ELEM(4, __s32, fd);
BUILD_BUG_SQE_ELEM(8, __u64, off);
BUILD_BUG_SQE_ELEM(8, __u64, addr2);
+ BUILD_BUG_SQE_ELEM(8, __u32, cmd_op);
+ BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
BUILD_BUG_SQE_ELEM(16, __u64, addr);
BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
BUILD_BUG_SQE_ELEM(24, __u32, len);
@@ -3917,13 +3922,22 @@ static int __init io_uring_init(void)
BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, rename_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags);
BUILD_BUG_SQE_ELEM(32, __u64, user_data);
BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
BUILD_BUG_SQE_ELEM(42, __u16, personality);
BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
BUILD_BUG_SQE_ELEM(44, __u32, file_index);
+ BUILD_BUG_SQE_ELEM(44, __u16, notification_idx);
+ BUILD_BUG_SQE_ELEM(46, __u16, addr_len);
BUILD_BUG_SQE_ELEM(48, __u64, addr3);
+ BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
+ BUILD_BUG_SQE_ELEM(56, __u64, __pad2);
BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
sizeof(struct io_uring_rsrc_update));
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index e538fa7cb727..25cd724ade18 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -272,7 +272,7 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_provide_buf *p = io_kiocb_to_cmd(req);
+ struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
u64 tmp;
if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
@@ -291,7 +291,7 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_provide_buf *p = io_kiocb_to_cmd(req);
+ struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
int ret = 0;
@@ -319,7 +319,7 @@ int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
unsigned long size, tmp_check;
- struct io_provide_buf *p = io_kiocb_to_cmd(req);
+ struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
u64 tmp;
if (sqe->rw_flags || sqe->splice_fd_in)
@@ -421,7 +421,7 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_provide_buf *p = io_kiocb_to_cmd(req);
+ struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
int ret = 0;
@@ -436,7 +436,7 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
bl = io_buffer_get_list(ctx, p->bgid);
if (unlikely(!bl)) {
- bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+ bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
if (!bl) {
ret = -ENOMEM;
goto err;
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 753d16734319..976c4ba68ee7 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -26,7 +26,7 @@ struct io_msg {
static int io_msg_ring_data(struct io_kiocb *req)
{
struct io_ring_ctx *target_ctx = req->file->private_data;
- struct io_msg *msg = io_kiocb_to_cmd(req);
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
if (msg->src_fd || msg->dst_fd || msg->flags)
return -EINVAL;
@@ -76,7 +76,7 @@ static int io_double_lock_ctx(struct io_ring_ctx *ctx,
static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *target_ctx = req->file->private_data;
- struct io_msg *msg = io_kiocb_to_cmd(req);
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
struct io_ring_ctx *ctx = req->ctx;
unsigned long file_ptr;
struct file *src_file;
@@ -122,7 +122,7 @@ out_unlock:
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_msg *msg = io_kiocb_to_cmd(req);
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
if (unlikely(sqe->buf_index || sqe->personality))
return -EINVAL;
@@ -141,7 +141,7 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_msg *msg = io_kiocb_to_cmd(req);
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
int ret;
ret = -EBADFD;
diff --git a/io_uring/net.c b/io_uring/net.c
index 32fc3da04e41..0af8a02df580 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -70,13 +70,14 @@ struct io_sendzc {
unsigned flags;
unsigned addr_len;
void __user *addr;
+ size_t done_io;
};
#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
+ struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
sqe->buf_index || sqe->splice_fd_in))
@@ -88,7 +89,7 @@ int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
+ struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
struct socket *sock;
int ret;
@@ -115,7 +116,7 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr *hdr = req->async_data;
- if (!hdr || issue_flags & IO_URING_F_UNLOCKED)
+ if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
return;
/* Let normal cleanup path reap it if we fail adding to the cache */
@@ -151,9 +152,9 @@ static int io_setup_async_msg(struct io_kiocb *req,
struct io_async_msghdr *kmsg,
unsigned int issue_flags)
{
- struct io_async_msghdr *async_msg = req->async_data;
+ struct io_async_msghdr *async_msg;
- if (async_msg)
+ if (req_has_async_data(req))
return -EAGAIN;
async_msg = io_recvmsg_alloc_async(req, issue_flags);
if (!async_msg) {
@@ -173,7 +174,7 @@ static int io_setup_async_msg(struct io_kiocb *req,
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
iomsg->msg.msg_name = &iomsg->addr;
iomsg->free_iov = iomsg->fast_iov;
@@ -181,6 +182,37 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
&iomsg->free_iov);
}
+int io_sendzc_prep_async(struct io_kiocb *req)
+{
+ struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
+ struct io_async_msghdr *io;
+ int ret;
+
+ if (!zc->addr || req_has_async_data(req))
+ return 0;
+ if (io_alloc_async_data(req))
+ return -ENOMEM;
+
+ io = req->async_data;
+ ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
+ return ret;
+}
+
+static int io_setup_async_addr(struct io_kiocb *req,
+ struct sockaddr_storage *addr,
+ unsigned int issue_flags)
+{
+ struct io_async_msghdr *io;
+
+ if (!addr || req_has_async_data(req))
+ return -EAGAIN;
+ if (io_alloc_async_data(req))
+ return -ENOMEM;
+ io = req->async_data;
+ memcpy(&io->addr, addr, sizeof(io->addr));
+ return -EAGAIN;
+}
+
int io_sendmsg_prep_async(struct io_kiocb *req)
{
int ret;
@@ -200,7 +232,7 @@ void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
if (unlikely(sqe->file_index || sqe->addr2))
return -EINVAL;
@@ -224,7 +256,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
unsigned flags;
@@ -283,7 +315,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
int io_send(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg;
struct iovec iov;
struct socket *sock;
@@ -357,7 +389,7 @@ static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct user_msghdr msg;
int ret;
@@ -404,7 +436,7 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct compat_msghdr msg;
struct compat_iovec __user *uiov;
int ret;
@@ -482,7 +514,7 @@ int io_recvmsg_prep_async(struct io_kiocb *req)
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
if (unlikely(sqe->file_index || sqe->addr2))
return -EINVAL;
@@ -517,7 +549,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static inline void io_recv_prep_retry(struct io_kiocb *req)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
sr->done_io = 0;
sr->len = 0; /* get from the provided buffer */
@@ -575,12 +607,12 @@ static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
if (kmsg->controllen) {
unsigned long control = ubuf + hdr - kmsg->controllen;
- kmsg->msg.msg_control_user = (void *) control;
+ kmsg->msg.msg_control_user = (void __user *) control;
kmsg->msg.msg_controllen = kmsg->controllen;
}
sr->buf = *buf; /* stash for later copy */
- *buf = (void *) (ubuf + hdr);
+ *buf = (void __user *) (ubuf + hdr);
kmsg->payloadlen = *len = *len - hdr;
return 0;
}
@@ -646,7 +678,7 @@ static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
unsigned int cflags;
@@ -758,7 +790,7 @@ retry_multishot:
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg;
struct socket *sock;
struct iovec iov;
@@ -849,7 +881,7 @@ out_free:
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_sendzc *zc = io_kiocb_to_cmd(req);
+ struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
struct io_ring_ctx *ctx = req->ctx;
if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))
@@ -878,6 +910,7 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
zc->addr_len = READ_ONCE(sqe->addr_len);
+ zc->done_io = 0;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
@@ -942,9 +975,9 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
{
- struct sockaddr_storage address;
+ struct sockaddr_storage __address, *addr = NULL;
struct io_ring_ctx *ctx = req->ctx;
- struct io_sendzc *zc = io_kiocb_to_cmd(req);
+ struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
struct io_notif_slot *notif_slot;
struct io_kiocb *notif;
struct msghdr msg;
@@ -975,11 +1008,26 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
msg.msg_controllen = 0;
msg.msg_namelen = 0;
+ if (zc->addr) {
+ if (req_has_async_data(req)) {
+ struct io_async_msghdr *io = req->async_data;
+
+ msg.msg_name = addr = &io->addr;
+ } else {
+ ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
+ if (unlikely(ret < 0))
+ return ret;
+ msg.msg_name = (struct sockaddr *)&__address;
+ addr = &__address;
+ }
+ msg.msg_namelen = zc->addr_len;
+ }
+
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
(u64)(uintptr_t)zc->buf, zc->len);
if (unlikely(ret))
- return ret;
+ return ret;
} else {
ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
&msg.msg_iter);
@@ -990,14 +1038,6 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
return ret;
}
- if (zc->addr) {
- ret = move_addr_to_kernel(zc->addr, zc->addr_len, &address);
- if (unlikely(ret < 0))
- return ret;
- msg.msg_name = (struct sockaddr *)&address;
- msg.msg_namelen = zc->addr_len;
- }
-
msg_flags = zc->msg_flags | MSG_ZEROCOPY;
if (issue_flags & IO_URING_F_NONBLOCK)
msg_flags |= MSG_DONTWAIT;
@@ -1011,19 +1051,33 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
- return -EAGAIN;
- return ret == -ERESTARTSYS ? -EINTR : ret;
- }
+ return io_setup_async_addr(req, addr, issue_flags);
- if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH)
+ if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
+ zc->len -= ret;
+ zc->buf += ret;
+ zc->done_io += ret;
+ req->flags |= REQ_F_PARTIAL_IO;
+ return io_setup_async_addr(req, addr, issue_flags);
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ req_set_fail(req);
+ } else if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH) {
io_notif_slot_flush_submit(notif_slot, 0);
+ }
+
+ if (ret >= 0)
+ ret += zc->done_io;
+ else if (zc->done_io)
+ ret = zc->done_io;
io_req_set_res(req, ret, 0);
return IOU_OK;
}
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_accept *accept = io_kiocb_to_cmd(req);
+ struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
unsigned flags;
if (sqe->len || sqe->buf_index)
@@ -1057,7 +1111,7 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_accept *accept = io_kiocb_to_cmd(req);
+ struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
bool fixed = !!accept->file_slot;
@@ -1115,7 +1169,7 @@ retry:
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_socket *sock = io_kiocb_to_cmd(req);
+ struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
if (sqe->addr || sqe->rw_flags || sqe->buf_index)
return -EINVAL;
@@ -1136,7 +1190,7 @@ int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_socket(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_socket *sock = io_kiocb_to_cmd(req);
+ struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
bool fixed = !!sock->file_slot;
struct file *file;
int ret, fd;
@@ -1170,14 +1224,14 @@ int io_socket(struct io_kiocb *req, unsigned int issue_flags)
int io_connect_prep_async(struct io_kiocb *req)
{
struct io_async_connect *io = req->async_data;
- struct io_connect *conn = io_kiocb_to_cmd(req);
+ struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
}
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_connect *conn = io_kiocb_to_cmd(req);
+ struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
return -EINVAL;
@@ -1189,7 +1243,7 @@ int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_connect(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_connect *connect = io_kiocb_to_cmd(req);
+ struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
struct io_async_connect __io, *io;
unsigned file_flags;
int ret;
diff --git a/io_uring/net.h b/io_uring/net.h
index 7c438d39c089..f91f56c6eeac 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -31,6 +31,7 @@ struct io_async_connect {
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
+int io_sendzc_prep_async(struct io_kiocb *req);
int io_sendmsg_prep_async(struct io_kiocb *req);
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
diff --git a/io_uring/notif.c b/io_uring/notif.c
index b5f989dff9de..96f076b175e0 100644
--- a/io_uring/notif.c
+++ b/io_uring/notif.c
@@ -73,7 +73,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
}
void io_notif_slot_flush(struct io_notif_slot *slot)
- __must_hold(&ctx->uring_lock)
+ __must_hold(&slot->notif->ctx->uring_lock)
{
struct io_kiocb *notif = slot->notif;
struct io_notif_data *nd = io_notif_to_data(notif);
@@ -81,8 +81,10 @@ void io_notif_slot_flush(struct io_notif_slot *slot)
slot->notif = NULL;
/* drop slot's master ref */
- if (refcount_dec_and_test(&nd->uarg.refcnt))
- io_notif_complete(notif);
+ if (refcount_dec_and_test(&nd->uarg.refcnt)) {
+ notif->io_task_work.func = __io_notif_complete_tw;
+ io_req_task_work_add(notif);
+ }
}
__cold int io_notif_unregister(struct io_ring_ctx *ctx)
@@ -100,7 +102,7 @@ __cold int io_notif_unregister(struct io_ring_ctx *ctx)
if (!notif)
continue;
- nd = io_kiocb_to_cmd(notif);
+ nd = io_notif_to_data(notif);
slot->notif = NULL;
if (!refcount_dec_and_test(&nd->uarg.refcnt))
continue;
@@ -123,8 +125,6 @@ __cold int io_notif_register(struct io_ring_ctx *ctx,
struct io_uring_notification_register reg;
unsigned i;
- BUILD_BUG_ON(sizeof(struct io_notif_data) > 64);
-
if (ctx->nr_notif_slots)
return -EBUSY;
if (size != sizeof(reg))
diff --git a/io_uring/notif.h b/io_uring/notif.h
index 0819304d7e00..80f6445e0c2b 100644
--- a/io_uring/notif.h
+++ b/io_uring/notif.h
@@ -8,7 +8,7 @@
#include "rsrc.h"
#define IO_NOTIF_SPLICE_BATCH 32
-#define IORING_MAX_NOTIF_SLOTS (1U << 10)
+#define IORING_MAX_NOTIF_SLOTS (1U << 15)
struct io_notif_data {
struct file *file;
@@ -46,7 +46,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif)
{
- return io_kiocb_to_cmd(notif);
+ return io_kiocb_to_cmd(notif, struct io_notif_data);
}
static inline struct io_kiocb *io_get_notif(struct io_ring_ctx *ctx,
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 72dd2b2d8a9d..41410126c1c6 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -478,13 +478,15 @@ const struct io_op_def io_op_defs[] = {
.pollout = 1,
.audit_skip = 1,
.ioprio = 1,
+ .manual_alloc = 1,
#if defined(CONFIG_NET)
+ .async_size = sizeof(struct io_async_msghdr),
.prep = io_sendzc_prep,
.issue = io_sendzc,
+ .prep_async = io_sendzc_prep_async,
#else
.prep = io_eopnotsupp_prep,
#endif
-
},
};
diff --git a/io_uring/opdef.h b/io_uring/opdef.h
index ece8ed4f96c4..763c6e54e2ee 100644
--- a/io_uring/opdef.h
+++ b/io_uring/opdef.h
@@ -25,6 +25,8 @@ struct io_op_def {
unsigned ioprio : 1;
/* supports iopoll */
unsigned iopoll : 1;
+ /* opcode specific path will handle ->async_data allocation if needed */
+ unsigned manual_alloc : 1;
/* size of async data needed, if any */
unsigned short async_size;
diff --git a/io_uring/openclose.c b/io_uring/openclose.c
index d1818ec9169b..67178e4bb282 100644
--- a/io_uring/openclose.c
+++ b/io_uring/openclose.c
@@ -33,7 +33,7 @@ struct io_close {
static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_open *open = io_kiocb_to_cmd(req);
+ struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
const char __user *fname;
int ret;
@@ -66,7 +66,7 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_open *open = io_kiocb_to_cmd(req);
+ struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
u64 mode = READ_ONCE(sqe->len);
u64 flags = READ_ONCE(sqe->open_flags);
@@ -76,7 +76,7 @@ int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_open *open = io_kiocb_to_cmd(req);
+ struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
struct open_how __user *how;
size_t len;
int ret;
@@ -95,7 +95,7 @@ int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_open *open = io_kiocb_to_cmd(req);
+ struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
struct open_flags op;
struct file *file;
bool resolve_nonblock, nonblock_set;
@@ -167,7 +167,7 @@ int io_openat(struct io_kiocb *req, unsigned int issue_flags)
void io_open_cleanup(struct io_kiocb *req)
{
- struct io_open *open = io_kiocb_to_cmd(req);
+ struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
if (open->filename)
putname(open->filename);
@@ -187,14 +187,14 @@ int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_close *close = io_kiocb_to_cmd(req);
+ struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
}
int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_close *close = io_kiocb_to_cmd(req);
+ struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
return -EINVAL;
@@ -212,7 +212,7 @@ int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_close(struct io_kiocb *req, unsigned int issue_flags)
{
struct files_struct *files = current->files;
- struct io_close *close = io_kiocb_to_cmd(req);
+ struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
struct fdtable *fdt;
struct file *file;
int ret = -EBADF;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index dadd293749b0..d5bad0bea6e4 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -85,7 +85,7 @@ static struct io_poll *io_poll_get_double(struct io_kiocb *req)
static struct io_poll *io_poll_get_single(struct io_kiocb *req)
{
if (req->opcode == IORING_OP_POLL_ADD)
- return io_kiocb_to_cmd(req);
+ return io_kiocb_to_cmd(req, struct io_poll);
return &req->apoll->poll;
}
@@ -274,7 +274,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
return;
if (ret == IOU_POLL_DONE) {
- struct io_poll *poll = io_kiocb_to_cmd(req);
+ struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
req->cqe.res = mangle_poll(req->cqe.res & poll->events);
} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
req->cqe.res = ret;
@@ -475,7 +475,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
struct poll_table_struct *p)
{
struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
- struct io_poll *poll = io_kiocb_to_cmd(pt->req);
+ struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
__io_queue_proc(poll, pt, head,
(struct io_poll **) &pt->req->async_data);
@@ -821,7 +821,7 @@ static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_poll_update *upd = io_kiocb_to_cmd(req);
+ struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
u32 flags;
if (sqe->buf_index || sqe->splice_fd_in)
@@ -851,7 +851,7 @@ int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_poll *poll = io_kiocb_to_cmd(req);
+ struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
u32 flags;
if (sqe->buf_index || sqe->off || sqe->addr)
@@ -868,7 +868,7 @@ int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_poll *poll = io_kiocb_to_cmd(req);
+ struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
struct io_poll_table ipt;
int ret;
@@ -891,7 +891,7 @@ int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_poll_update *poll_update = io_kiocb_to_cmd(req);
+ struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
struct io_cancel_data cd = { .data = poll_update->old_user_data, };
struct io_ring_ctx *ctx = req->ctx;
struct io_hash_bucket *bucket;
@@ -930,7 +930,7 @@ found:
if (poll_update->update_events || poll_update->update_user_data) {
/* only mask one event flags, keep behavior flags */
if (poll_update->update_events) {
- struct io_poll *poll = io_kiocb_to_cmd(preq);
+ struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
poll->events &= ~0xffff;
poll->events |= poll_update->events & 0xffff;
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 59704b9ac537..71359a4d0bd4 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -657,7 +657,7 @@ __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_rsrc_update *up = io_kiocb_to_cmd(req);
+ struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
@@ -676,7 +676,7 @@ int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_files_update_with_index_alloc(struct io_kiocb *req,
unsigned int issue_flags)
{
- struct io_rsrc_update *up = io_kiocb_to_cmd(req);
+ struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
__s32 __user *fds = u64_to_user_ptr(up->arg);
unsigned int done;
struct file *file;
@@ -714,7 +714,7 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_rsrc_update *up = io_kiocb_to_cmd(req);
+ struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
struct io_ring_ctx *ctx = req->ctx;
struct io_uring_rsrc_update2 up2;
int ret;
@@ -743,7 +743,7 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
static int io_notif_update(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_rsrc_update *up = io_kiocb_to_cmd(req);
+ struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
struct io_ring_ctx *ctx = req->ctx;
unsigned len = up->nr_args;
unsigned idx_end, idx = up->offset;
@@ -778,7 +778,7 @@ out:
int io_rsrc_update(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_rsrc_update *up = io_kiocb_to_cmd(req);
+ struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
switch (up->type) {
case IORING_RSRC_UPDATE_FILES:
diff --git a/io_uring/rw.c b/io_uring/rw.c
index b20ba87e4926..1babd77da79c 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -35,7 +35,7 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
unsigned ioprio;
int ret;
@@ -102,7 +102,7 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (rw->kiocb.ki_pos != -1)
return &rw->kiocb.ki_pos;
@@ -186,7 +186,7 @@ static void kiocb_end_write(struct io_kiocb *req)
static bool __io_complete_rw_common(struct io_kiocb *req, long res)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (rw->kiocb.ki_flags & IOCB_WRITE) {
kiocb_end_write(req);
@@ -241,7 +241,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
unsigned int issue_flags)
{
struct io_async_rw *io = req->async_data;
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
/* add previously done IO, if any */
if (req_has_async_data(req) && io->bytes_done > 0) {
@@ -277,7 +277,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct compat_iovec __user *uiov;
compat_ssize_t clen;
void __user *buf;
@@ -305,7 +305,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct iovec __user *uiov = u64_to_user_ptr(rw->addr);
void __user *buf;
ssize_t len;
@@ -328,7 +328,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
iov[0].iov_base = u64_to_user_ptr(rw->addr);
@@ -350,7 +350,7 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
struct io_rw_state *s,
unsigned int issue_flags)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct iov_iter *iter = &s->iter;
u8 opcode = req->opcode;
struct iovec *iovec;
@@ -571,7 +571,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
{
struct wait_page_queue *wpq;
struct io_kiocb *req = wait->private;
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct wait_page_key *key = arg;
wpq = container_of(wait, struct wait_page_queue, wait);
@@ -601,7 +601,7 @@ static bool io_rw_should_retry(struct io_kiocb *req)
{
struct io_async_rw *io = req->async_data;
struct wait_page_queue *wait = &io->wpq;
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct kiocb *kiocb = &rw->kiocb;
/* never retry for NOWAIT, we just complete with -EAGAIN */
@@ -649,7 +649,7 @@ static bool need_complete_io(struct io_kiocb *req)
static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct kiocb *kiocb = &rw->kiocb;
struct io_ring_ctx *ctx = req->ctx;
struct file *file = req->file;
@@ -694,7 +694,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
int io_read(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct io_rw_state __s, *s = &__s;
struct iovec *iovec;
struct kiocb *kiocb = &rw->kiocb;
@@ -839,7 +839,7 @@ done:
int io_write(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct io_rw_state __s, *s = &__s;
struct iovec *iovec;
struct kiocb *kiocb = &rw->kiocb;
@@ -994,7 +994,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
wq_list_for_each(pos, start, &ctx->iopoll_list) {
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
- struct io_rw *rw = io_kiocb_to_cmd(req);
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
int ret;
/*
diff --git a/io_uring/splice.c b/io_uring/splice.c
index b013ba34bffa..53e4232d0866 100644
--- a/io_uring/splice.c
+++ b/io_uring/splice.c
@@ -26,7 +26,7 @@ struct io_splice {
static int __io_splice_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
- struct io_splice *sp = io_kiocb_to_cmd(req);
+ struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
sp->len = READ_ONCE(sqe->len);
@@ -46,7 +46,7 @@ int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_tee(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_splice *sp = io_kiocb_to_cmd(req);
+ struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
struct file *out = sp->file_out;
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
struct file *in;
@@ -78,7 +78,7 @@ done:
int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_splice *sp = io_kiocb_to_cmd(req);
+ struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
sp->off_in = READ_ONCE(sqe->splice_off_in);
sp->off_out = READ_ONCE(sqe->off);
@@ -87,7 +87,7 @@ int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_splice(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_splice *sp = io_kiocb_to_cmd(req);
+ struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
struct file *out = sp->file_out;
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
loff_t *poff_in, *poff_out;
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 76d4d70c733a..559652380672 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -235,8 +235,6 @@ static int io_sq_thread(void *data)
set_cpus_allowed_ptr(current, cpu_online_mask);
current->flags |= PF_NO_SETAFFINITY;
- audit_alloc_kernel(current);
-
mutex_lock(&sqd->lock);
while (1) {
bool cap_entries, sqt_spin = false;
@@ -310,8 +308,6 @@ static int io_sq_thread(void *data)
io_run_task_work();
mutex_unlock(&sqd->lock);
- audit_free(current);
-
complete(&sqd->exited);
do_exit(0);
}
diff --git a/io_uring/statx.c b/io_uring/statx.c
index 6056cd7f4876..d8fc933d3f59 100644
--- a/io_uring/statx.c
+++ b/io_uring/statx.c
@@ -22,7 +22,7 @@ struct io_statx {
int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_statx *sx = io_kiocb_to_cmd(req);
+ struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx);
const char __user *path;
if (sqe->buf_index || sqe->splice_fd_in)
@@ -53,7 +53,7 @@ int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_statx(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_statx *sx = io_kiocb_to_cmd(req);
+ struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx);
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -66,7 +66,7 @@ int io_statx(struct io_kiocb *req, unsigned int issue_flags)
void io_statx_cleanup(struct io_kiocb *req)
{
- struct io_statx *sx = io_kiocb_to_cmd(req);
+ struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx);
if (sx->filename)
putname(sx->filename);
diff --git a/io_uring/sync.c b/io_uring/sync.c
index f2102afa79ca..64e87ea2b8fb 100644
--- a/io_uring/sync.c
+++ b/io_uring/sync.c
@@ -24,7 +24,7 @@ struct io_sync {
int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_sync *sync = io_kiocb_to_cmd(req);
+ struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
return -EINVAL;
@@ -37,7 +37,7 @@ int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_sync *sync = io_kiocb_to_cmd(req);
+ struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
int ret;
/* sync_file_range always requires a blocking context */
@@ -51,7 +51,7 @@ int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_sync *sync = io_kiocb_to_cmd(req);
+ struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
return -EINVAL;
@@ -67,7 +67,7 @@ int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_sync *sync = io_kiocb_to_cmd(req);
+ struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
loff_t end = sync->off + sync->len;
int ret;
@@ -83,7 +83,7 @@ int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_sync *sync = io_kiocb_to_cmd(req);
+ struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
return -EINVAL;
@@ -96,7 +96,7 @@ int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_sync *sync = io_kiocb_to_cmd(req);
+ struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
int ret;
/* fallocate always requiring blocking context */
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 2f9e56935479..78ea2c64b70e 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -36,7 +36,7 @@ struct io_timeout_rem {
static inline bool io_is_timeout_noseq(struct io_kiocb *req)
{
- struct io_timeout *timeout = io_kiocb_to_cmd(req);
+ struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
return !timeout->off;
}
@@ -56,7 +56,7 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
struct io_timeout_data *io = req->async_data;
if (hrtimer_try_to_cancel(&io->timer) != -1) {
- struct io_timeout *timeout = io_kiocb_to_cmd(req);
+ struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
if (status)
req_set_fail(req);
@@ -188,7 +188,7 @@ struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
__must_hold(&req->ctx->timeout_lock)
{
struct io_timeout_data *io = link->async_data;
- struct io_timeout *timeout = io_kiocb_to_cmd(link);
+ struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout);
io_remove_next_linked(req);
timeout->head = NULL;
@@ -205,7 +205,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
struct io_timeout_data *data = container_of(timer,
struct io_timeout_data, timer);
struct io_kiocb *req = data->req;
- struct io_timeout *timeout = io_kiocb_to_cmd(req);
+ struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
@@ -252,7 +252,7 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
io = req->async_data;
if (hrtimer_try_to_cancel(&io->timer) == -1)
return ERR_PTR(-EALREADY);
- timeout = io_kiocb_to_cmd(req);
+ timeout = io_kiocb_to_cmd(req, struct io_timeout);
list_del_init(&timeout->list);
return req;
}
@@ -275,7 +275,7 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
{
unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
- struct io_timeout *timeout = io_kiocb_to_cmd(req);
+ struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_kiocb *prev = timeout->prev;
int ret = -ENOENT;
@@ -302,7 +302,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
struct io_timeout_data *data = container_of(timer,
struct io_timeout_data, timer);
struct io_kiocb *prev, *req = data->req;
- struct io_timeout *timeout = io_kiocb_to_cmd(req);
+ struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
@@ -378,7 +378,7 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
{
struct io_cancel_data cd = { .data = user_data, };
struct io_kiocb *req = io_timeout_extract(ctx, &cd);
- struct io_timeout *timeout = io_kiocb_to_cmd(req);
+ struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_timeout_data *data;
if (IS_ERR(req))
@@ -395,7 +395,7 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_timeout_rem *tr = io_kiocb_to_cmd(req);
+ struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
@@ -435,7 +435,7 @@ static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
*/
int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_timeout_rem *tr = io_kiocb_to_cmd(req);
+ struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
struct io_ring_ctx *ctx = req->ctx;
int ret;
@@ -466,7 +466,7 @@ static int __io_timeout_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe,
bool is_timeout_link)
{
- struct io_timeout *timeout = io_kiocb_to_cmd(req);
+ struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_timeout_data *data;
unsigned flags;
u32 off = READ_ONCE(sqe->off);
@@ -532,7 +532,7 @@ int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_timeout *timeout = io_kiocb_to_cmd(req);
+ struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_ring_ctx *ctx = req->ctx;
struct io_timeout_data *data = req->async_data;
struct list_head *entry;
@@ -583,7 +583,7 @@ add:
void io_queue_linked_timeout(struct io_kiocb *req)
{
- struct io_timeout *timeout = io_kiocb_to_cmd(req);
+ struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_ring_ctx *ctx = req->ctx;
spin_lock_irq(&ctx->timeout_lock);
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 0a421ed51e7e..e78b6f980d77 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -3,6 +3,7 @@
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/io_uring.h>
+#include <linux/security.h>
#include <uapi/linux/io_uring.h>
@@ -11,7 +12,7 @@
static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
{
- struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req);
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
ioucmd->task_work_cb(ioucmd);
}
@@ -46,7 +47,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
if (ret < 0)
req_set_fail(req);
- io_req_set_res(req, 0, ret);
+ io_req_set_res(req, ret, 0);
if (req->ctx->flags & IORING_SETUP_CQE32)
io_req_set_cqe32_extra(req, res2, 0);
__io_req_complete(req, 0);
@@ -55,9 +56,12 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_done);
int io_uring_cmd_prep_async(struct io_kiocb *req)
{
- struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req);
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
size_t cmd_size;
+ BUILD_BUG_ON(uring_cmd_pdu_size(0) != 16);
+ BUILD_BUG_ON(uring_cmd_pdu_size(1) != 80);
+
cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128);
memcpy(req->async_data, ioucmd->cmd, cmd_size);
@@ -66,7 +70,7 @@ int io_uring_cmd_prep_async(struct io_kiocb *req)
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req);
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
if (sqe->rw_flags || sqe->__pad1)
return -EINVAL;
@@ -77,7 +81,7 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req);
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
struct io_ring_ctx *ctx = req->ctx;
struct file *file = req->file;
int ret;
@@ -85,6 +89,10 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
if (!req->file->f_op->uring_cmd)
return -EOPNOTSUPP;
+ ret = security_uring_cmd(ioucmd);
+ if (ret)
+ return ret;
+
if (ctx->flags & IORING_SETUP_SQE128)
issue_flags |= IO_URING_F_SQE128;
if (ctx->flags & IORING_SETUP_CQE32)
@@ -106,8 +114,10 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
}
if (ret != -EIOCBQUEUED) {
- io_uring_cmd_done(ioucmd, ret, 0);
- return IOU_OK;
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return ret;
}
return IOU_ISSUE_SKIP_COMPLETE;
diff --git a/io_uring/xattr.c b/io_uring/xattr.c
index b179f9acd5ac..84180afd090b 100644
--- a/io_uring/xattr.c
+++ b/io_uring/xattr.c
@@ -24,7 +24,7 @@ struct io_xattr {
void io_xattr_cleanup(struct io_kiocb *req)
{
- struct io_xattr *ix = io_kiocb_to_cmd(req);
+ struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
if (ix->filename)
putname(ix->filename);
@@ -44,7 +44,7 @@ static void io_xattr_finish(struct io_kiocb *req, int ret)
static int __io_getxattr_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
- struct io_xattr *ix = io_kiocb_to_cmd(req);
+ struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
const char __user *name;
int ret;
@@ -85,7 +85,7 @@ int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_xattr *ix = io_kiocb_to_cmd(req);
+ struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
const char __user *path;
int ret;
@@ -106,7 +106,7 @@ int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_xattr *ix = io_kiocb_to_cmd(req);
+ struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -122,7 +122,7 @@ int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_xattr *ix = io_kiocb_to_cmd(req);
+ struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
unsigned int lookup_flags = LOOKUP_FOLLOW;
struct path path;
int ret;
@@ -151,7 +151,7 @@ retry:
static int __io_setxattr_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
- struct io_xattr *ix = io_kiocb_to_cmd(req);
+ struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
const char __user *name;
int ret;
@@ -181,7 +181,7 @@ static int __io_setxattr_prep(struct io_kiocb *req,
int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_xattr *ix = io_kiocb_to_cmd(req);
+ struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
const char __user *path;
int ret;
@@ -208,7 +208,7 @@ int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags,
struct path *path)
{
- struct io_xattr *ix = io_kiocb_to_cmd(req);
+ struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
int ret;
ret = mnt_want_write(path->mnt);
@@ -234,7 +234,7 @@ int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags)
int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_xattr *ix = io_kiocb_to_cmd(req);
+ struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
unsigned int lookup_flags = LOOKUP_FOLLOW;
struct path path;
int ret;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 12ad7860bb88..f98de32aeea1 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -489,7 +489,7 @@ static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
static void init_once(void *foo)
{
- struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
+ struct mqueue_inode_info *p = foo;
inode_init_once(&p->vfs_inode);
}
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
index 6432a37ac1c9..c565fbf66ac8 100644
--- a/kernel/audit_fsnotify.c
+++ b/kernel/audit_fsnotify.c
@@ -102,6 +102,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, 0);
if (ret < 0) {
+ audit_mark->path = NULL;
fsnotify_put_mark(&audit_mark->mark);
audit_mark = ERR_PTR(ret);
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 3a8c9d744800..79a5da1bc5bb 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1073,31 +1073,6 @@ int audit_alloc(struct task_struct *tsk)
return 0;
}
-/**
- * audit_alloc_kernel - allocate an audit_context for a kernel task
- * @tsk: the kernel task
- *
- * Similar to the audit_alloc() function, but intended for kernel private
- * threads. Returns zero on success, negative values on failure.
- */
-int audit_alloc_kernel(struct task_struct *tsk)
-{
- /*
- * At the moment we are just going to call into audit_alloc() to
- * simplify the code, but there two things to keep in mind with this
- * approach:
- *
- * 1. Filtering internal kernel tasks is a bit laughable in almost all
- * cases, but there is at least one case where there is a benefit:
- * the '-a task,never' case allows the admin to effectively disable
- * task auditing at runtime.
- *
- * 2. The {set,clear}_task_syscall_work() ops likely have zero effect
- * on these internal kernel tasks, but they probably don't hurt either.
- */
- return audit_alloc(tsk);
-}
-
static inline void audit_free_context(struct audit_context *context)
{
/* resetting is extra work, but it is likely just noise */
@@ -1965,6 +1940,7 @@ void __audit_uring_exit(int success, long code)
goto out;
}
+ audit_return_fixup(ctx, success, code);
if (ctx->context == AUDIT_CTX_SYSCALL) {
/*
* NOTE: See the note in __audit_uring_entry() about the case
@@ -2006,7 +1982,6 @@ void __audit_uring_exit(int success, long code)
audit_filter_inodes(current, ctx);
if (ctx->current_state != AUDIT_STATE_RECORD)
goto out;
- audit_return_fixup(ctx, success, code);
audit_log_exit();
out:
@@ -2090,13 +2065,13 @@ void __audit_syscall_exit(int success, long return_code)
if (!list_empty(&context->killed_trees))
audit_kill_trees(context);
+ audit_return_fixup(context, success, return_code);
/* run through both filters to ensure we set the filterkey properly */
audit_filter_syscall(current, context);
audit_filter_inodes(current, context);
if (context->current_state < AUDIT_STATE_RECORD)
goto out;
- audit_return_fixup(context, success, return_code);
audit_log_exit();
out:
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index d3e734bf8056..624527401d4d 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -649,6 +649,11 @@ static int bpf_iter_init_array_map(void *priv_data,
seq_info->percpu_value_buf = value_buf;
}
+ /* bpf_iter_attach_map() acquires a map uref, and the uref may be
+ * released before or in the middle of iterating map elements, so
+ * acquire an extra map uref for iterator.
+ */
+ bpf_map_inc_with_uref(map);
seq_info->map = map;
return 0;
}
@@ -657,6 +662,7 @@ static void bpf_iter_fini_array_map(void *priv_data)
{
struct bpf_iter_seq_array_map_info *seq_info = priv_data;
+ bpf_map_put_with_uref(seq_info->map);
kfree(seq_info->percpu_value_buf);
}
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index 2726a5950cfa..24b755eca0b3 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -68,13 +68,18 @@ static void bpf_iter_done_stop(struct seq_file *seq)
iter_priv->done_stop = true;
}
+static inline bool bpf_iter_target_support_resched(const struct bpf_iter_target_info *tinfo)
+{
+ return tinfo->reg_info->feature & BPF_ITER_RESCHED;
+}
+
static bool bpf_iter_support_resched(struct seq_file *seq)
{
struct bpf_iter_priv_data *iter_priv;
iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
target_private);
- return iter_priv->tinfo->reg_info->feature & BPF_ITER_RESCHED;
+ return bpf_iter_target_support_resched(iter_priv->tinfo);
}
/* maximum visited objects before bailing out */
@@ -537,6 +542,10 @@ int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
if (!tinfo)
return -ENOENT;
+ /* Only allow sleepable program for resched-able iterator */
+ if (prog->aux->sleepable && !bpf_iter_target_support_resched(tinfo))
+ return -EINVAL;
+
link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
if (!link)
return -ENOMEM;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index da7578426a46..6c530a5e560a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -311,12 +311,8 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
struct htab_elem *l;
if (node) {
- u32 key_size = htab->map.key_size;
-
l = container_of(node, struct htab_elem, lru_node);
- memcpy(l->key, key, key_size);
- check_and_init_map_value(&htab->map,
- l->key + round_up(key_size, 8));
+ memcpy(l->key, key, htab->map.key_size);
return l;
}
@@ -2064,6 +2060,7 @@ static int bpf_iter_init_hash_map(void *priv_data,
seq_info->percpu_value_buf = value_buf;
}
+ bpf_map_inc_with_uref(map);
seq_info->map = map;
seq_info->htab = container_of(map, struct bpf_htab, map);
return 0;
@@ -2073,6 +2070,7 @@ static void bpf_iter_fini_hash_map(void *priv_data)
{
struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
+ bpf_map_put_with_uref(seq_info->map);
kfree(seq_info->percpu_value_buf);
}
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index e2618fb5870e..82c61612f382 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -21,14 +21,11 @@ static struct reuseport_array *reuseport_array(struct bpf_map *map)
/* The caller must hold the reuseport_lock */
void bpf_sk_reuseport_detach(struct sock *sk)
{
- uintptr_t sk_user_data;
+ struct sock __rcu **socks;
write_lock_bh(&sk->sk_callback_lock);
- sk_user_data = (uintptr_t)sk->sk_user_data;
- if (sk_user_data & SK_USER_DATA_BPF) {
- struct sock __rcu **socks;
-
- socks = (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
+ socks = __locked_read_sk_user_data_with_flags(sk, SK_USER_DATA_BPF);
+ if (socks) {
WRITE_ONCE(sk->sk_user_data, NULL);
/*
* Do not move this NULL assignment outside of
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 83c7136c5788..a4d40d98428a 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3886,6 +3886,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
union bpf_attr __user *uattr)
{
struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
+ struct btf *attach_btf = bpf_prog_get_target_btf(prog);
struct bpf_prog_info info;
u32 info_len = attr->info.info_len;
struct bpf_prog_kstats stats;
@@ -4088,10 +4089,8 @@ static int bpf_prog_get_info_by_fd(struct file *file,
if (prog->aux->btf)
info.btf_id = btf_obj_id(prog->aux->btf);
info.attach_btf_id = prog->aux->attach_btf_id;
- if (prog->aux->attach_btf)
- info.attach_btf_obj_id = btf_obj_id(prog->aux->attach_btf);
- else if (prog->aux->dst_prog)
- info.attach_btf_obj_id = btf_obj_id(prog->aux->dst_prog->aux->attach_btf);
+ if (attach_btf)
+ info.attach_btf_obj_id = btf_obj_id(attach_btf);
ulen = info.nr_func_info;
info.nr_func_info = prog->aux->func_info_cnt;
@@ -5072,9 +5071,6 @@ static bool syscall_prog_is_valid_access(int off, int size,
BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
{
- struct bpf_prog * __maybe_unused prog;
- struct bpf_tramp_run_ctx __maybe_unused run_ctx;
-
switch (cmd) {
case BPF_MAP_CREATE:
case BPF_MAP_UPDATE_ELEM:
@@ -5084,6 +5080,26 @@ BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
case BPF_LINK_CREATE:
case BPF_RAW_TRACEPOINT_OPEN:
break;
+ default:
+ return -EINVAL;
+ }
+ return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
+}
+
+
+/* To shut up -Wmissing-prototypes.
+ * This function is used by the kernel light skeleton
+ * to load bpf programs when modules are loaded or during kernel boot.
+ * See tools/lib/bpf/skel_internal.h
+ */
+int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+
+int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
+{
+ struct bpf_prog * __maybe_unused prog;
+ struct bpf_tramp_run_ctx __maybe_unused run_ctx;
+
+ switch (cmd) {
#ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
case BPF_PROG_TEST_RUN:
if (attr->test.data_in || attr->test.data_out ||
@@ -5114,11 +5130,10 @@ BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
return 0;
#endif
default:
- return -EINVAL;
+ return ____bpf_sys_bpf(cmd, attr, size);
}
- return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
}
-EXPORT_SYMBOL(bpf_sys_bpf);
+EXPORT_SYMBOL(kern_sys_bpf);
static const struct bpf_func_proto bpf_sys_bpf_proto = {
.func = bpf_sys_bpf,
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 0f532e6a717f..ff87e38af8a7 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -841,7 +841,10 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
* multiple rcu callbacks.
*/
hlist_del(&tr->hlist);
- kfree(tr->fops);
+ if (tr->fops) {
+ ftrace_free_filter(tr->fops);
+ kfree(tr->fops);
+ }
kfree(tr);
out:
mutex_unlock(&trampoline_mutex);
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 2ade21b54dc4..ff6a8099eb2a 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -59,6 +59,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
int retval = 0;
mutex_lock(&cgroup_mutex);
+ cpus_read_lock();
percpu_down_write(&cgroup_threadgroup_rwsem);
for_each_root(root) {
struct cgroup *from_cgrp;
@@ -72,6 +73,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
break;
}
percpu_up_write(&cgroup_threadgroup_rwsem);
+ cpus_read_unlock();
mutex_unlock(&cgroup_mutex);
return retval;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index ffaccd6373f1..e4bb5d57f4d1 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1820,6 +1820,7 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
if (ss->css_rstat_flush) {
list_del_rcu(&css->rstat_css_node);
+ synchronize_rcu();
list_add_rcu(&css->rstat_css_node,
&dcgrp->rstat_css_list);
}
@@ -2370,6 +2371,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
EXPORT_SYMBOL_GPL(task_cgroup_path);
/**
+ * cgroup_attach_lock - Lock for ->attach()
+ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
+ *
+ * cgroup migration sometimes needs to stabilize threadgroups against forks and
+ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
+ * implementations (e.g. cpuset), also need to disable CPU hotplug.
+ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
+ * lead to deadlocks.
+ *
+ * Bringing up a CPU may involve creating and destroying tasks which requires
+ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
+ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
+ * write-locking threadgroup_rwsem, the locking order is reversed and we end up
+ * waiting for an on-going CPU hotplug operation which in turn is waiting for
+ * the threadgroup_rwsem to be released to create new tasks. For more details:
+ *
+ * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
+ *
+ * Resolve the situation by always acquiring cpus_read_lock() before optionally
+ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
+ * CPU hotplug is disabled on entry.
+ */
+static void cgroup_attach_lock(bool lock_threadgroup)
+{
+ cpus_read_lock();
+ if (lock_threadgroup)
+ percpu_down_write(&cgroup_threadgroup_rwsem);
+}
+
+/**
+ * cgroup_attach_unlock - Undo cgroup_attach_lock()
+ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
+ */
+static void cgroup_attach_unlock(bool lock_threadgroup)
+{
+ if (lock_threadgroup)
+ percpu_up_write(&cgroup_threadgroup_rwsem);
+ cpus_read_unlock();
+}
+
+/**
* cgroup_migrate_add_task - add a migration target task to a migration context
* @task: target task
* @mgctx: target migration context
@@ -2841,8 +2883,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
}
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
- bool *locked)
- __acquires(&cgroup_threadgroup_rwsem)
+ bool *threadgroup_locked)
{
struct task_struct *tsk;
pid_t pid;
@@ -2859,12 +2900,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
* Therefore, we can skip the global lock.
*/
lockdep_assert_held(&cgroup_mutex);
- if (pid || threadgroup) {
- percpu_down_write(&cgroup_threadgroup_rwsem);
- *locked = true;
- } else {
- *locked = false;
- }
+ *threadgroup_locked = pid || threadgroup;
+ cgroup_attach_lock(*threadgroup_locked);
rcu_read_lock();
if (pid) {
@@ -2895,17 +2932,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
goto out_unlock_rcu;
out_unlock_threadgroup:
- if (*locked) {
- percpu_up_write(&cgroup_threadgroup_rwsem);
- *locked = false;
- }
+ cgroup_attach_unlock(*threadgroup_locked);
+ *threadgroup_locked = false;
out_unlock_rcu:
rcu_read_unlock();
return tsk;
}
-void cgroup_procs_write_finish(struct task_struct *task, bool locked)
- __releases(&cgroup_threadgroup_rwsem)
+void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
{
struct cgroup_subsys *ss;
int ssid;
@@ -2913,8 +2947,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked)
/* release reference from cgroup_procs_write_start() */
put_task_struct(task);
- if (locked)
- percpu_up_write(&cgroup_threadgroup_rwsem);
+ cgroup_attach_unlock(threadgroup_locked);
+
for_each_subsys(ss, ssid)
if (ss->post_attach)
ss->post_attach();
@@ -3000,8 +3034,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
* write-locking can be skipped safely.
*/
has_tasks = !list_empty(&mgctx.preloaded_src_csets);
- if (has_tasks)
- percpu_down_write(&cgroup_threadgroup_rwsem);
+ cgroup_attach_lock(has_tasks);
/* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(&mgctx);
@@ -3022,8 +3055,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
ret = cgroup_migrate_execute(&mgctx);
out_finish:
cgroup_migrate_finish(&mgctx);
- if (has_tasks)
- percpu_up_write(&cgroup_threadgroup_rwsem);
+ cgroup_attach_unlock(has_tasks);
return ret;
}
@@ -3698,7 +3730,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
}
psi = cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
- new = psi_trigger_create(psi, buf, nbytes, res);
+ new = psi_trigger_create(psi, buf, res);
if (IS_ERR(new)) {
cgroup_put(cgrp);
return PTR_ERR(new);
@@ -4971,13 +5003,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
struct task_struct *task;
const struct cred *saved_cred;
ssize_t ret;
- bool locked;
+ bool threadgroup_locked;
dst_cgrp = cgroup_kn_lock_live(of->kn, false);
if (!dst_cgrp)
return -ENODEV;
- task = cgroup_procs_write_start(buf, threadgroup, &locked);
+ task = cgroup_procs_write_start(buf, threadgroup, &threadgroup_locked);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;
@@ -5003,7 +5035,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
ret = cgroup_attach_task(dst_cgrp, task, threadgroup);
out_finish:
- cgroup_procs_write_finish(task, locked);
+ cgroup_procs_write_finish(task, threadgroup_locked);
out_unlock:
cgroup_kn_unlock(of->kn);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 71a418858a5e..1f3a55297f39 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2239,7 +2239,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
goto out_unlock;
cgroup_taskset_for_each(task, css, tset) {
- ret = task_can_attach(task, cs->cpus_allowed);
+ ret = task_can_attach(task, cs->effective_cpus);
if (ret)
goto out_unlock;
ret = security_task_setscheduler(task);
@@ -2289,7 +2289,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
- cpus_read_lock();
+ lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
percpu_down_write(&cpuset_rwsem);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
@@ -2343,7 +2343,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
- cpus_read_unlock();
}
/* The various types of files and directories in a cpuset file system */
diff --git a/kernel/configs/xen.config b/kernel/configs/xen.config
index ff756221f112..436f806aa1ed 100644
--- a/kernel/configs/xen.config
+++ b/kernel/configs/xen.config
@@ -34,7 +34,6 @@ CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
CONFIG_XEN_SCSI_FRONTEND=m
# others
CONFIG_XEN_BALLOON=y
-CONFIG_XEN_SCRUB_PAGES=y
CONFIG_XEN_DEV_EVTCHN=m
CONFIG_XEN_BLKDEV_FRONTEND=m
CONFIG_XEN_NETDEV_FRONTEND=m
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 71122e01623c..a0eb4d5cf557 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -9,12 +9,15 @@
#include <linux/init.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
+#include <linux/sizes.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <crypto/sha1.h>
+#include "kallsyms_internal.h"
+
/* vmcoreinfo stuff */
unsigned char *vmcoreinfo_data;
size_t vmcoreinfo_size;
@@ -43,6 +46,15 @@ static int __init parse_crashkernel_mem(char *cmdline,
unsigned long long *crash_base)
{
char *cur = cmdline, *tmp;
+ unsigned long long total_mem = system_ram;
+
+ /*
+ * Firmware sometimes reserves some memory regions for its own use,
+ * so the system memory size is less than the actual physical memory
+ * size. Work around this by rounding up the total size to 128M,
+ * which is enough for most test cases.
+ */
+ total_mem = roundup(total_mem, SZ_128M);
/* for each entry of the comma-separated list */
do {
@@ -87,13 +99,13 @@ static int __init parse_crashkernel_mem(char *cmdline,
return -EINVAL;
}
cur = tmp;
- if (size >= system_ram) {
+ if (size >= total_mem) {
pr_warn("crashkernel: invalid size\n");
return -EINVAL;
}
/* match ? */
- if (system_ram >= start && system_ram < end) {
+ if (total_mem >= start && total_mem < end) {
*crash_size = size;
break;
}
@@ -480,6 +492,19 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE);
#endif
+#ifdef CONFIG_KALLSYMS
+ VMCOREINFO_SYMBOL(kallsyms_names);
+ VMCOREINFO_SYMBOL(kallsyms_num_syms);
+ VMCOREINFO_SYMBOL(kallsyms_token_table);
+ VMCOREINFO_SYMBOL(kallsyms_token_index);
+#ifdef CONFIG_KALLSYMS_BASE_RELATIVE
+ VMCOREINFO_SYMBOL(kallsyms_offsets);
+ VMCOREINFO_SYMBOL(kallsyms_relative_base);
+#else
+ VMCOREINFO_SYMBOL(kallsyms_addresses);
+#endif /* CONFIG_KALLSYMS_BASE_RELATIVE */
+#endif /* CONFIG_KALLSYMS */
+
arch_crash_save_vmcoreinfo();
update_vmcoreinfo_note();
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 375fb3c9538d..c21abc77c53e 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -74,7 +74,7 @@ out_unmap_membase:
return ERR_PTR(-ENOMEM);
}
-static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
+static void _dma_release_coherent_memory(struct dma_coherent_mem *mem)
{
if (!mem)
return;
@@ -126,10 +126,16 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
ret = dma_assign_coherent_memory(dev, mem);
if (ret)
- dma_release_coherent_memory(mem);
+ _dma_release_coherent_memory(mem);
return ret;
}
+void dma_release_coherent_memory(struct device *dev)
+{
+ if (dev)
+ _dma_release_coherent_memory(dev->dma_mem);
+}
+
static void *__dma_alloc_from_coherent(struct device *dev,
struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 8d0b68a17042..63859a101ed8 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -453,29 +453,60 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
arch_sync_dma_for_cpu_all();
}
+/*
+ * Unmaps segments, except for ones marked as pci_p2pdma which do not
+ * require any further action as they contain a bus address.
+ */
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
- for_each_sg(sgl, sg, nents, i)
- dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
- attrs);
+ for_each_sg(sgl, sg, nents, i) {
+ if (sg_is_dma_bus_address(sg))
+ sg_dma_unmark_bus_address(sg);
+ else
+ dma_direct_unmap_page(dev, sg->dma_address,
+ sg_dma_len(sg), dir, attrs);
+ }
}
#endif
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
- int i;
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ enum pci_p2pdma_map_type map;
struct scatterlist *sg;
+ int i, ret;
for_each_sg(sgl, sg, nents, i) {
+ if (is_pci_p2pdma_page(sg_page(sg))) {
+ map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
+ switch (map) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ continue;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * Any P2P mapping that traverses the PCI
+ * host bridge must be mapped with CPU physical
+ * address and not PCI bus addresses. This is
+ * done with dma_direct_map_page() below.
+ */
+ break;
+ default:
+ ret = -EREMOTEIO;
+ goto out_unmap;
+ }
+ }
+
sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
sg->offset, sg->length, dir, attrs);
- if (sg->dma_address == DMA_MAPPING_ERROR)
+ if (sg->dma_address == DMA_MAPPING_ERROR) {
+ ret = -EIO;
goto out_unmap;
+ }
sg_dma_len(sg) = sg->length;
}
@@ -483,7 +514,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
out_unmap:
dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
- return -EIO;
+ return ret;
}
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index a78c0ba70645..e38ffc5e6bdd 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -8,6 +8,7 @@
#define _KERNEL_DMA_DIRECT_H
#include <linux/dma-direct.h>
+#include <linux/memremap.h>
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
@@ -87,10 +88,15 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t dma_addr = phys_to_dma(dev, phys);
- if (is_swiotlb_force_bounce(dev))
+ if (is_swiotlb_force_bounce(dev)) {
+ if (is_pci_p2pdma_page(page))
+ return DMA_MAPPING_ERROR;
return swiotlb_map(dev, phys, size, dir, attrs);
+ }
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
+ if (is_pci_p2pdma_page(page))
+ return DMA_MAPPING_ERROR;
if (is_swiotlb_active(dev))
return swiotlb_map(dev, phys, size, dir, attrs);
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index db7244291b74..49cbf3e33de7 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -197,7 +197,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
if (ents > 0)
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
- ents != -EIO))
+ ents != -EIO && ents != -EREMOTEIO))
return -EIO;
return ents;
@@ -249,12 +249,15 @@ EXPORT_SYMBOL(dma_map_sg_attrs);
* Returns 0 on success or a negative error code on error. The following
* error codes are supported with the given meaning:
*
- * -EINVAL An invalid argument, unaligned access or other error
- * in usage. Will not succeed if retried.
- * -ENOMEM Insufficient resources (like memory or IOVA space) to
- * complete the mapping. Should succeed if retried later.
- * -EIO Legacy error code with an unknown meaning. eg. this is
- * returned if a lower level call returned DMA_MAPPING_ERROR.
+ * -EINVAL An invalid argument, unaligned access or other error
+ * in usage. Will not succeed if retried.
+ * -ENOMEM Insufficient resources (like memory or IOVA space) to
+ * complete the mapping. Should succeed if retried later.
+ * -EIO Legacy error code with an unknown meaning. eg. this is
+ * returned if a lower level call returned
+ * DMA_MAPPING_ERROR.
+ * -EREMOTEIO The DMA device cannot access P2PDMA memory specified
+ * in the sg_table. This will not succeed if retried.
*/
int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
@@ -720,6 +723,24 @@ int dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_supported);
+bool dma_pci_p2pdma_supported(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ /* if ops is not set, dma direct will be used which supports P2PDMA */
+ if (!ops)
+ return true;
+
+ /*
+ * Note: dma_ops_bypass is not checked here because P2PDMA should
+ * not be used with dma mapping ops that do not have support even
+ * if the specific device is bypassing them.
+ */
+
+ return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
+}
+EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
+
#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
void arch_dma_set_mask(struct device *dev, u64 mask);
#else
@@ -773,6 +794,18 @@ size_t dma_max_mapping_size(struct device *dev)
}
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
+size_t dma_opt_mapping_size(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+ size_t size = SIZE_MAX;
+
+ if (ops && ops->opt_mapping_size)
+ size = ops->opt_mapping_size();
+
+ return min(dma_max_mapping_size(dev), size);
+}
+EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
+
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index cb50f8d38360..c5a9190b218f 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -62,6 +62,12 @@
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+struct io_tlb_slot {
+ phys_addr_t orig_addr;
+ size_t alloc_size;
+ unsigned int list;
+};
+
static bool swiotlb_force_bounce;
static bool swiotlb_force_disable;
@@ -70,6 +76,62 @@ struct io_tlb_mem io_tlb_default_mem;
phys_addr_t swiotlb_unencrypted_base;
static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
+static unsigned long default_nareas;
+
+/**
+ * struct io_tlb_area - IO TLB memory area descriptor
+ *
+ * This is a single area with a single lock.
+ *
+ * @used: The number of used IO TLB block.
+ * @index: The slot index to start searching in this area for next round.
+ * @lock: The lock to protect the above data structures in the map and
+ * unmap calls.
+ */
+struct io_tlb_area {
+ unsigned long used;
+ unsigned int index;
+ spinlock_t lock;
+};
+
+/*
+ * Round up number of slabs to the next power of 2. The last area is going
+ * be smaller than the rest if default_nslabs is not power of two.
+ * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
+ * otherwise a segment may span two or more areas. It conflicts with free
+ * contiguous slots tracking: free slots are treated contiguous no matter
+ * whether they cross an area boundary.
+ *
+ * Return true if default_nslabs is rounded up.
+ */
+static bool round_up_default_nslabs(void)
+{
+ if (!default_nareas)
+ return false;
+
+ if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
+ default_nslabs = IO_TLB_SEGSIZE * default_nareas;
+ else if (is_power_of_2(default_nslabs))
+ return false;
+ default_nslabs = roundup_pow_of_two(default_nslabs);
+ return true;
+}
+
+static void swiotlb_adjust_nareas(unsigned int nareas)
+{
+ /* use a single area when non is specified */
+ if (!nareas)
+ nareas = 1;
+ else if (!is_power_of_2(nareas))
+ nareas = roundup_pow_of_two(nareas);
+
+ default_nareas = nareas;
+
+ pr_info("area num %d.\n", nareas);
+ if (round_up_default_nslabs())
+ pr_info("SWIOTLB bounce buffer size roundup to %luMB",
+ (default_nslabs << IO_TLB_SHIFT) >> 20);
+}
static int __init
setup_io_tlb_npages(char *str)
@@ -81,6 +143,10 @@ setup_io_tlb_npages(char *str)
}
if (*str == ',')
++str;
+ if (isdigit(*str))
+ swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
+ if (*str == ',')
+ ++str;
if (!strcmp(str, "force"))
swiotlb_force_bounce = true;
else if (!strcmp(str, "noforce"))
@@ -112,8 +178,11 @@ void __init swiotlb_adjust_size(unsigned long size)
*/
if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
return;
+
size = ALIGN(size, IO_TLB_SIZE);
default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
+ if (round_up_default_nslabs())
+ size = default_nslabs << IO_TLB_SHIFT;
pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
}
@@ -192,7 +261,8 @@ void __init swiotlb_update_mem_attributes(void)
}
static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
- unsigned long nslabs, unsigned int flags, bool late_alloc)
+ unsigned long nslabs, unsigned int flags,
+ bool late_alloc, unsigned int nareas)
{
void *vaddr = phys_to_virt(start);
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
@@ -200,12 +270,18 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
mem->nslabs = nslabs;
mem->start = start;
mem->end = mem->start + bytes;
- mem->index = 0;
mem->late_alloc = late_alloc;
+ mem->nareas = nareas;
+ mem->area_nslabs = nslabs / mem->nareas;
mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
- spin_lock_init(&mem->lock);
+ for (i = 0; i < mem->nareas; i++) {
+ spin_lock_init(&mem->areas[i].lock);
+ mem->areas[i].index = 0;
+ mem->areas[i].used = 0;
+ }
+
for (i = 0; i < mem->nslabs; i++) {
mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
@@ -232,7 +308,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
int (*remap)(void *tlb, unsigned long nslabs))
{
struct io_tlb_mem *mem = &io_tlb_default_mem;
- unsigned long nslabs = default_nslabs;
+ unsigned long nslabs;
size_t alloc_size;
size_t bytes;
void *tlb;
@@ -243,6 +319,17 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
return;
/*
+ * default_nslabs maybe changed when adjust area number.
+ * So allocate bounce buffer after adjusting area number.
+ */
+ if (!default_nareas)
+ swiotlb_adjust_nareas(num_possible_cpus());
+
+ nslabs = default_nslabs;
+ if (nslabs < IO_TLB_MIN_SLABS)
+ panic("%s: nslabs = %lu too small\n", __func__, nslabs);
+
+ /*
* By default allocate the bounce buffer memory from low memory, but
* allow to pick a location everywhere for hypervisors with guest
* memory encryption.
@@ -254,7 +341,8 @@ retry:
else
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
if (!tlb) {
- pr_warn("%s: failed to allocate tlb structure\n", __func__);
+ pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
+ __func__, bytes);
return;
}
@@ -274,7 +362,13 @@ retry:
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
__func__, alloc_size, PAGE_SIZE);
- swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false);
+ mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
+ default_nareas), SMP_CACHE_BYTES);
+ if (!mem->areas)
+ panic("%s: Failed to allocate mem->areas.\n", __func__);
+
+ swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
+ default_nareas);
if (flags & SWIOTLB_VERBOSE)
swiotlb_print_info();
@@ -282,7 +376,7 @@ retry:
void __init swiotlb_init(bool addressing_limit, unsigned int flags)
{
- return swiotlb_init_remap(addressing_limit, flags, NULL);
+ swiotlb_init_remap(addressing_limit, flags, NULL);
}
/*
@@ -296,7 +390,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
struct io_tlb_mem *mem = &io_tlb_default_mem;
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
unsigned char *vstart = NULL;
- unsigned int order;
+ unsigned int order, area_order;
bool retried = false;
int rc = 0;
@@ -337,19 +431,34 @@ retry:
(PAGE_SIZE << order) >> 20);
}
+ if (!default_nareas)
+ swiotlb_adjust_nareas(num_possible_cpus());
+
+ area_order = get_order(array_size(sizeof(*mem->areas),
+ default_nareas));
+ mem->areas = (struct io_tlb_area *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
+ if (!mem->areas)
+ goto error_area;
+
mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(array_size(sizeof(*mem->slots), nslabs)));
- if (!mem->slots) {
- free_pages((unsigned long)vstart, order);
- return -ENOMEM;
- }
+ if (!mem->slots)
+ goto error_slots;
set_memory_decrypted((unsigned long)vstart,
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
- swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true);
+ swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
+ default_nareas);
swiotlb_print_info();
return 0;
+
+error_slots:
+ free_pages((unsigned long)mem->areas, area_order);
+error_area:
+ free_pages((unsigned long)vstart, order);
+ return -ENOMEM;
}
void __init swiotlb_exit(void)
@@ -357,6 +466,7 @@ void __init swiotlb_exit(void)
struct io_tlb_mem *mem = &io_tlb_default_mem;
unsigned long tbl_vaddr;
size_t tbl_size, slots_size;
+ unsigned int area_order;
if (swiotlb_force_bounce)
return;
@@ -371,9 +481,14 @@ void __init swiotlb_exit(void)
set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
if (mem->late_alloc) {
+ area_order = get_order(array_size(sizeof(*mem->areas),
+ mem->nareas));
+ free_pages((unsigned long)mem->areas, area_order);
free_pages(tbl_vaddr, get_order(tbl_size));
free_pages((unsigned long)mem->slots, get_order(slots_size));
} else {
+ memblock_free_late(__pa(mem->areas),
+ array_size(sizeof(*mem->areas), mem->nareas));
memblock_free_late(mem->start, tbl_size);
memblock_free_late(__pa(mem->slots), slots_size);
}
@@ -476,9 +591,9 @@ static inline unsigned long get_max_slots(unsigned long boundary_mask)
return nr_slots(boundary_mask + 1);
}
-static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
+static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index)
{
- if (index >= mem->nslabs)
+ if (index >= mem->area_nslabs)
return 0;
return index;
}
@@ -487,10 +602,12 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
* Find a suitable number of IO TLB entries size that will fit this request and
* allocate a buffer from that IO TLB pool.
*/
-static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
- size_t alloc_size, unsigned int alloc_align_mask)
+static int swiotlb_do_find_slots(struct device *dev, int area_index,
+ phys_addr_t orig_addr, size_t alloc_size,
+ unsigned int alloc_align_mask)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
+ struct io_tlb_area *area = mem->areas + area_index;
unsigned long boundary_mask = dma_get_seg_boundary(dev);
dma_addr_t tbl_dma_addr =
phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
@@ -501,8 +618,11 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
unsigned int index, wrap, count = 0, i;
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
unsigned long flags;
+ unsigned int slot_base;
+ unsigned int slot_index;
BUG_ON(!nslots);
+ BUG_ON(area_index >= mem->nareas);
/*
* For mappings with an alignment requirement don't bother looping to
@@ -514,16 +634,20 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
- spin_lock_irqsave(&mem->lock, flags);
- if (unlikely(nslots > mem->nslabs - mem->used))
+ spin_lock_irqsave(&area->lock, flags);
+ if (unlikely(nslots > mem->area_nslabs - area->used))
goto not_found;
- index = wrap = wrap_index(mem, ALIGN(mem->index, stride));
+ slot_base = area_index * mem->area_nslabs;
+ index = wrap = wrap_area_index(mem, ALIGN(area->index, stride));
+
do {
+ slot_index = slot_base + index;
+
if (orig_addr &&
- (slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
- (orig_addr & iotlb_align_mask)) {
- index = wrap_index(mem, index + 1);
+ (slot_addr(tbl_dma_addr, slot_index) &
+ iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
+ index = wrap_area_index(mem, index + 1);
continue;
}
@@ -532,26 +656,26 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
* contiguous buffers, we allocate the buffers from that slot
* and mark the entries as '0' indicating unavailable.
*/
- if (!iommu_is_span_boundary(index, nslots,
+ if (!iommu_is_span_boundary(slot_index, nslots,
nr_slots(tbl_dma_addr),
max_slots)) {
- if (mem->slots[index].list >= nslots)
+ if (mem->slots[slot_index].list >= nslots)
goto found;
}
- index = wrap_index(mem, index + stride);
+ index = wrap_area_index(mem, index + stride);
} while (index != wrap);
not_found:
- spin_unlock_irqrestore(&mem->lock, flags);
+ spin_unlock_irqrestore(&area->lock, flags);
return -1;
found:
- for (i = index; i < index + nslots; i++) {
+ for (i = slot_index; i < slot_index + nslots; i++) {
mem->slots[i].list = 0;
- mem->slots[i].alloc_size =
- alloc_size - (offset + ((i - index) << IO_TLB_SHIFT));
+ mem->slots[i].alloc_size = alloc_size - (offset +
+ ((i - slot_index) << IO_TLB_SHIFT));
}
- for (i = index - 1;
+ for (i = slot_index - 1;
io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
mem->slots[i].list; i--)
mem->slots[i].list = ++count;
@@ -559,14 +683,42 @@ found:
/*
* Update the indices to avoid searching in the next round.
*/
- if (index + nslots < mem->nslabs)
- mem->index = index + nslots;
+ if (index + nslots < mem->area_nslabs)
+ area->index = index + nslots;
else
- mem->index = 0;
- mem->used += nslots;
+ area->index = 0;
+ area->used += nslots;
+ spin_unlock_irqrestore(&area->lock, flags);
+ return slot_index;
+}
- spin_unlock_irqrestore(&mem->lock, flags);
- return index;
+static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
+ size_t alloc_size, unsigned int alloc_align_mask)
+{
+ struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
+ int start = raw_smp_processor_id() & (mem->nareas - 1);
+ int i = start, index;
+
+ do {
+ index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
+ alloc_align_mask);
+ if (index >= 0)
+ return index;
+ if (++i >= mem->nareas)
+ i = 0;
+ } while (i != start);
+
+ return -1;
+}
+
+static unsigned long mem_used(struct io_tlb_mem *mem)
+{
+ int i;
+ unsigned long used = 0;
+
+ for (i = 0; i < mem->nareas; i++)
+ used += mem->areas[i].used;
+ return used;
}
phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
@@ -580,7 +732,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
int index;
phys_addr_t tlb_addr;
- if (!mem)
+ if (!mem || !mem->nslabs)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
@@ -598,7 +750,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
if (!(attrs & DMA_ATTR_NO_WARN))
dev_warn_ratelimited(dev,
"swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
- alloc_size, mem->nslabs, mem->used);
+ alloc_size, mem->nslabs, mem_used(mem));
return (phys_addr_t)DMA_MAPPING_ERROR;
}
@@ -628,6 +780,8 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
int nslots = nr_slots(mem->slots[index].alloc_size + offset);
+ int aindex = index / mem->area_nslabs;
+ struct io_tlb_area *area = &mem->areas[aindex];
int count, i;
/*
@@ -636,7 +790,9 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
* While returning the entries to the free list, we merge the entries
* with slots below and above the pool being returned.
*/
- spin_lock_irqsave(&mem->lock, flags);
+ BUG_ON(aindex >= mem->nareas);
+
+ spin_lock_irqsave(&area->lock, flags);
if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
count = mem->slots[index + nslots].list;
else
@@ -660,8 +816,8 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
i--)
mem->slots[i].list = ++count;
- mem->used -= nslots;
- spin_unlock_irqrestore(&mem->lock, flags);
+ area->used -= nslots;
+ spin_unlock_irqrestore(&area->lock, flags);
}
/*
@@ -756,6 +912,13 @@ bool is_swiotlb_active(struct device *dev)
}
EXPORT_SYMBOL_GPL(is_swiotlb_active);
+static int io_tlb_used_get(void *data, u64 *val)
+{
+ *val = mem_used(&io_tlb_default_mem);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
+
static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
const char *dirname)
{
@@ -764,7 +927,8 @@ static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
return;
debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
- debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used);
+ debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
+ &fops_io_tlb_used);
}
static int __init __maybe_unused swiotlb_create_default_debugfs(void)
@@ -815,6 +979,9 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
struct io_tlb_mem *mem = rmem->priv;
unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
+ /* Set Per-device io tlb area to one */
+ unsigned int nareas = 1;
+
/*
* Since multiple devices can share the same pool, the private data,
* io_tlb_mem struct, will be initialized by the first device attached
@@ -831,10 +998,18 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
return -ENOMEM;
}
+ mem->areas = kcalloc(nareas, sizeof(*mem->areas),
+ GFP_KERNEL);
+ if (!mem->areas) {
+ kfree(mem->slots);
+ kfree(mem);
+ return -ENOMEM;
+ }
+
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
rmem->size >> PAGE_SHIFT);
swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
- false);
+ false, nareas);
mem->for_alloc = true;
rmem->priv = mem;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4e718b93442b..2621fd24ad26 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4457,7 +4457,7 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
*value = local64_read(&event->count);
if (enabled || running) {
- u64 __enabled, __running, __now;;
+ u64 __enabled, __running, __now;
calc_timer_values(event, &__now, &__enabled, &__running);
if (enabled)
diff --git a/kernel/exit.c b/kernel/exit.c
index 64c938ce36fe..84021b24f79e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1051,7 +1051,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
* p->signal fields because the whole thread group is dead
* and nobody can change them.
*
- * psig->stats_lock also protects us from our sub-theads
+ * psig->stats_lock also protects us from our sub-threads
* which can reap other children at the same time. Until
* we change k_getrusage()-like users to rely on this lock
* we have to take ->siglock as well.
diff --git a/kernel/fork.c b/kernel/fork.c
index 28772142022a..90c85b17bf69 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1965,6 +1965,18 @@ static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
mutex_unlock(&oom_adj_mutex);
}
+#ifdef CONFIG_RV
+static void rv_task_fork(struct task_struct *p)
+{
+ int i;
+
+ for (i = 0; i < RV_PER_TASK_MONITORS; i++)
+ p->rv[i].da_mon.monitoring = false;
+}
+#else
+#define rv_task_fork(p) do {} while (0)
+#endif
+
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
@@ -2403,6 +2415,8 @@ static __latent_entropy struct task_struct *copy_process(
*/
copy_seccomp(p);
+ rv_task_fork(p);
+
rseq_fork(p, clone_flags);
/* Don't start children in a dying pid namespace */
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index cff3ae8c818f..bb2354f73ded 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -229,7 +229,7 @@ static long hung_timeout_jiffies(unsigned long last_checked,
* Process updating of timeout sysctl
*/
static int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
- void __user *buffer,
+ void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 79a85834ce9d..3e7e2c2ad2f7 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -32,28 +32,7 @@
#include <linux/bsearch.h>
#include <linux/btf_ids.h>
-/*
- * These will be re-linked against their real values
- * during the second link stage.
- */
-extern const unsigned long kallsyms_addresses[] __weak;
-extern const int kallsyms_offsets[] __weak;
-extern const u8 kallsyms_names[] __weak;
-
-/*
- * Tell the compiler that the count isn't in the small data section if the arch
- * has one (eg: FRV).
- */
-extern const unsigned int kallsyms_num_syms
-__section(".rodata") __attribute__((weak));
-
-extern const unsigned long kallsyms_relative_base
-__section(".rodata") __attribute__((weak));
-
-extern const char kallsyms_token_table[] __weak;
-extern const u16 kallsyms_token_index[] __weak;
-
-extern const unsigned int kallsyms_markers[] __weak;
+#include "kallsyms_internal.h"
/*
* Expand a compressed symbol data into the resulting uncompressed string,
diff --git a/kernel/kallsyms_internal.h b/kernel/kallsyms_internal.h
new file mode 100644
index 000000000000..2d0c6f2f0243
--- /dev/null
+++ b/kernel/kallsyms_internal.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef LINUX_KALLSYMS_INTERNAL_H_
+#define LINUX_KALLSYMS_INTERNAL_H_
+
+#include <linux/types.h>
+
+/*
+ * These will be re-linked against their real values
+ * during the second link stage.
+ */
+extern const unsigned long kallsyms_addresses[] __weak;
+extern const int kallsyms_offsets[] __weak;
+extern const u8 kallsyms_names[] __weak;
+
+/*
+ * Tell the compiler that the count isn't in the small data section if the arch
+ * has one (eg: FRV).
+ */
+extern const unsigned int kallsyms_num_syms
+__section(".rodata") __attribute__((weak));
+
+extern const unsigned long kallsyms_relative_base
+__section(".rodata") __attribute__((weak));
+
+extern const char kallsyms_token_table[] __weak;
+extern const u16 kallsyms_token_index[] __weak;
+
+extern const unsigned int kallsyms_markers[] __weak;
+
+#endif // LINUX_KALLSYMS_INTERNAL_H_
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index a7b411c22f19..1d546dc97c50 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -40,6 +40,9 @@ void set_kexec_sig_enforced(void)
static int kexec_calculate_store_digests(struct kimage *image);
+/* Maximum size in bytes for kernel/initrd files. */
+#define KEXEC_FILE_SIZE_MAX min_t(s64, 4LL << 30, SSIZE_MAX)
+
/*
* Currently this is the only default function that is exported as some
* architectures need it to do additional handlings.
@@ -190,11 +193,12 @@ kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
const char __user *cmdline_ptr,
unsigned long cmdline_len, unsigned flags)
{
- int ret;
+ ssize_t ret;
void *ldata;
ret = kernel_read_file_from_fd(kernel_fd, 0, &image->kernel_buf,
- INT_MAX, NULL, READING_KEXEC_IMAGE);
+ KEXEC_FILE_SIZE_MAX, NULL,
+ READING_KEXEC_IMAGE);
if (ret < 0)
return ret;
image->kernel_buf_len = ret;
@@ -214,7 +218,7 @@ kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
/* It is possible that there no initramfs is being loaded */
if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
ret = kernel_read_file_from_fd(initrd_fd, 0, &image->initrd_buf,
- INT_MAX, NULL,
+ KEXEC_FILE_SIZE_MAX, NULL,
READING_KEXEC_INITRAMFS);
if (ret < 0)
goto out;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index f214f8c088ed..08350e35aba2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1560,7 +1560,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
preempt_disable();
/* Ensure it is not in reserved area nor out of text */
- if (!kernel_text_address((unsigned long) p->addr) ||
+ if (!(core_kernel_text((unsigned long) p->addr) ||
+ is_module_text_address((unsigned long) p->addr)) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
static_call_text_reserved(p->addr, p->addr) ||
@@ -1706,11 +1707,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
/* Try to disarm and disable this/parent probe */
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
/*
- * If 'kprobes_all_disarmed' is set, 'orig_p'
- * should have already been disarmed, so
- * skip unneed disarming process.
+ * Don't be lazy here. Even if 'kprobes_all_disarmed'
+ * is false, 'orig_p' might not have been armed yet.
+ * Note arm_all_kprobes() __tries__ to arm all kprobes
+ * on the best effort basis.
*/
- if (!kprobes_all_disarmed) {
+ if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
ret = disarm_kprobe(orig_p, true);
if (ret) {
p->flags &= ~KPROBE_FLAG_DISABLED;
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
new file mode 100644
index 000000000000..26ea5d04f56c
--- /dev/null
+++ b/kernel/module/Kconfig
@@ -0,0 +1,293 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig MODULES
+ bool "Enable loadable module support"
+ modules
+ help
+ Kernel modules are small pieces of compiled code which can
+ be inserted in the running kernel, rather than being
+ permanently built into the kernel. You use the "modprobe"
+ tool to add (and sometimes remove) them. If you say Y here,
+ many parts of the kernel can be built as modules (by
+ answering M instead of Y where indicated): this is most
+ useful for infrequently used options which are not required
+ for booting. For more information, see the man pages for
+ modprobe, lsmod, modinfo, insmod and rmmod.
+
+ If you say Y here, you will need to run "make
+ modules_install" to put the modules under /lib/modules/
+ where modprobe can find them (you may need to be root to do
+ this).
+
+ If unsure, say Y.
+
+if MODULES
+
+config MODULE_FORCE_LOAD
+ bool "Forced module loading"
+ default n
+ help
+ Allow loading of modules without version information (ie. modprobe
+ --force). Forced module loading sets the 'F' (forced) taint flag and
+ is usually a really bad idea.
+
+config MODULE_UNLOAD
+ bool "Module unloading"
+ help
+ Without this option you will not be able to unload any
+ modules (note that some modules may not be unloadable
+ anyway), which makes your kernel smaller, faster
+ and simpler. If unsure, say Y.
+
+config MODULE_FORCE_UNLOAD
+ bool "Forced module unloading"
+ depends on MODULE_UNLOAD
+ help
+ This option allows you to force a module to unload, even if the
+ kernel believes it is unsafe: the kernel will remove the module
+ without waiting for anyone to stop using it (using the -f option to
+ rmmod). This is mainly for kernel developers and desperate users.
+ If unsure, say N.
+
+config MODULE_UNLOAD_TAINT_TRACKING
+ bool "Tainted module unload tracking"
+ depends on MODULE_UNLOAD
+ default n
+ help
+ This option allows you to maintain a record of each unloaded
+ module that tainted the kernel. In addition to displaying a
+ list of linked (or loaded) modules e.g. on detection of a bad
+ page (see bad_page()), the aforementioned details are also
+ shown. If unsure, say N.
+
+config MODVERSIONS
+ bool "Module versioning support"
+ help
+ Usually, you have to use modules compiled with your kernel.
+ Saying Y here makes it sometimes possible to use modules
+ compiled for different kernels, by adding enough information
+ to the modules to (hopefully) spot any changes which would
+ make them incompatible with the kernel you are running. If
+ unsure, say N.
+
+config ASM_MODVERSIONS
+ bool
+ default HAVE_ASM_MODVERSIONS && MODVERSIONS
+ help
+ This enables module versioning for exported symbols also from
+ assembly. This can be enabled only when the target architecture
+ supports it.
+
+config MODULE_SRCVERSION_ALL
+ bool "Source checksum for all modules"
+ help
+ Modules which contain a MODULE_VERSION get an extra "srcversion"
+ field inserted into their modinfo section, which contains a
+ sum of the source files which made it. This helps maintainers
+ see exactly which source was used to build a module (since
+ others sometimes change the module source without updating
+ the version). With this option, such a "srcversion" field
+ will be created for all modules. If unsure, say N.
+
+config MODULE_SIG
+ bool "Module signature verification"
+ select MODULE_SIG_FORMAT
+ help
+ Check modules for valid signatures upon load: the signature
+ is simply appended to the module. For more information see
+ <file:Documentation/admin-guide/module-signing.rst>.
+
+ Note that this option adds the OpenSSL development packages as a
+ kernel build dependency so that the signing tool can use its crypto
+ library.
+
+ You should enable this option if you wish to use either
+ CONFIG_SECURITY_LOCKDOWN_LSM or lockdown functionality imposed via
+ another LSM - otherwise unsigned modules will be loadable regardless
+ of the lockdown policy.
+
+ !!!WARNING!!! If you enable this option, you MUST make sure that the
+ module DOES NOT get stripped after being signed. This includes the
+ debuginfo strip done by some packagers (such as rpmbuild) and
+ inclusion into an initramfs that wants the module size reduced.
+
+config MODULE_SIG_FORCE
+ bool "Require modules to be validly signed"
+ depends on MODULE_SIG
+ help
+ Reject unsigned modules or signed modules for which we don't have a
+ key. Without this, such modules will simply taint the kernel.
+
+config MODULE_SIG_ALL
+ bool "Automatically sign all modules"
+ default y
+ depends on MODULE_SIG || IMA_APPRAISE_MODSIG
+ help
+ Sign all modules during make modules_install. Without this option,
+ modules must be signed manually, using the scripts/sign-file tool.
+
+comment "Do not forget to sign required modules with scripts/sign-file"
+ depends on MODULE_SIG_FORCE && !MODULE_SIG_ALL
+
+choice
+ prompt "Which hash algorithm should modules be signed with?"
+ depends on MODULE_SIG || IMA_APPRAISE_MODSIG
+ help
+ This determines which sort of hashing algorithm will be used during
+ signature generation. This algorithm _must_ be built into the kernel
+ directly so that signature verification can take place. It is not
+ possible to load a signed module containing the algorithm to check
+ the signature on that module.
+
+config MODULE_SIG_SHA1
+ bool "Sign modules with SHA-1"
+ select CRYPTO_SHA1
+
+config MODULE_SIG_SHA224
+ bool "Sign modules with SHA-224"
+ select CRYPTO_SHA256
+
+config MODULE_SIG_SHA256
+ bool "Sign modules with SHA-256"
+ select CRYPTO_SHA256
+
+config MODULE_SIG_SHA384
+ bool "Sign modules with SHA-384"
+ select CRYPTO_SHA512
+
+config MODULE_SIG_SHA512
+ bool "Sign modules with SHA-512"
+ select CRYPTO_SHA512
+
+endchoice
+
+config MODULE_SIG_HASH
+ string
+ depends on MODULE_SIG || IMA_APPRAISE_MODSIG
+ default "sha1" if MODULE_SIG_SHA1
+ default "sha224" if MODULE_SIG_SHA224
+ default "sha256" if MODULE_SIG_SHA256
+ default "sha384" if MODULE_SIG_SHA384
+ default "sha512" if MODULE_SIG_SHA512
+
+choice
+ prompt "Module compression mode"
+ help
+ This option allows you to choose the algorithm which will be used to
+ compress modules when 'make modules_install' is run. (or, you can
+ choose to not compress modules at all.)
+
+ External modules will also be compressed in the same way during the
+ installation.
+
+ For modules inside an initrd or initramfs, it's more efficient to
+ compress the whole initrd or initramfs instead.
+
+ This is fully compatible with signed modules.
+
+ Please note that the tool used to load modules needs to support the
+ corresponding algorithm. module-init-tools MAY support gzip, and kmod
+ MAY support gzip, xz and zstd.
+
+ Your build system needs to provide the appropriate compression tool
+ to compress the modules.
+
+ If in doubt, select 'None'.
+
+config MODULE_COMPRESS_NONE
+ bool "None"
+ help
+ Do not compress modules. The installed modules are suffixed
+ with .ko.
+
+config MODULE_COMPRESS_GZIP
+ bool "GZIP"
+ help
+ Compress modules with GZIP. The installed modules are suffixed
+ with .ko.gz.
+
+config MODULE_COMPRESS_XZ
+ bool "XZ"
+ help
+ Compress modules with XZ. The installed modules are suffixed
+ with .ko.xz.
+
+config MODULE_COMPRESS_ZSTD
+ bool "ZSTD"
+ help
+ Compress modules with ZSTD. The installed modules are suffixed
+ with .ko.zst.
+
+endchoice
+
+config MODULE_DECOMPRESS
+ bool "Support in-kernel module decompression"
+ depends on MODULE_COMPRESS_GZIP || MODULE_COMPRESS_XZ
+ select ZLIB_INFLATE if MODULE_COMPRESS_GZIP
+ select XZ_DEC if MODULE_COMPRESS_XZ
+ help
+
+ Support for decompressing kernel modules by the kernel itself
+ instead of relying on userspace to perform this task. Useful when
+ load pinning security policy is enabled.
+
+ If unsure, say N.
+
+config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
+ bool "Allow loading of modules with missing namespace imports"
+ help
+ Symbols exported with EXPORT_SYMBOL_NS*() are considered exported in
+ a namespace. A module that makes use of a symbol exported with such a
+ namespace is required to import the namespace via MODULE_IMPORT_NS().
+ There is no technical reason to enforce correct namespace imports,
+ but it creates consistency between symbols defining namespaces and
+ users importing namespaces they make use of. This option relaxes this
+ requirement and lifts the enforcement when loading a module.
+
+ If unsure, say N.
+
+config MODPROBE_PATH
+ string "Path to modprobe binary"
+ default "/sbin/modprobe"
+ help
+ When kernel code requests a module, it does so by calling
+ the "modprobe" userspace utility. This option allows you to
+ set the path where that binary is found. This can be changed
+ at runtime via the sysctl file
+ /proc/sys/kernel/modprobe. Setting this to the empty string
+ removes the kernel's ability to request modules (but
+ userspace can still load modules explicitly).
+
+config TRIM_UNUSED_KSYMS
+ bool "Trim unused exported kernel symbols" if EXPERT
+ depends on !COMPILE_TEST
+ help
+ The kernel and some modules make many symbols available for
+ other modules to use via EXPORT_SYMBOL() and variants. Depending
+ on the set of modules being selected in your kernel configuration,
+ many of those exported symbols might never be used.
+
+ This option allows for unused exported symbols to be dropped from
+ the build. In turn, this provides the compiler more opportunities
+ (especially when using LTO) for optimizing the code and reducing
+ binary size. This might have some security advantages as well.
+
+ If unsure, or if you need to build out-of-tree modules, say N.
+
+config UNUSED_KSYMS_WHITELIST
+ string "Whitelist of symbols to keep in ksymtab"
+ depends on TRIM_UNUSED_KSYMS
+ help
+ By default, all unused exported symbols will be un-exported from the
+ build when TRIM_UNUSED_KSYMS is selected.
+
+ UNUSED_KSYMS_WHITELIST allows to whitelist symbols that must be kept
+ exported at all times, even in absence of in-tree users. The value to
+ set here is the path to a text file containing the list of symbols,
+ one per line. The path can be absolute, or relative to the kernel
+ source tree.
+
+config MODULES_TREE_LOOKUP
+ def_bool y
+ depends on PERF_EVENTS || TRACING || CFI_CLANG
+
+endif # MODULES
diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
index 2fc7081dd7c1..4d0bcb3d9e44 100644
--- a/kernel/module/decompress.c
+++ b/kernel/module/decompress.c
@@ -119,10 +119,10 @@ static ssize_t module_gzip_decompress(struct load_info *info,
goto out_inflate_end;
}
- s.next_out = kmap(page);
+ s.next_out = kmap_local_page(page);
s.avail_out = PAGE_SIZE;
rc = zlib_inflate(&s, 0);
- kunmap(page);
+ kunmap_local(s.next_out);
new_size += PAGE_SIZE - s.avail_out;
} while (rc == Z_OK);
@@ -178,11 +178,11 @@ static ssize_t module_xz_decompress(struct load_info *info,
goto out;
}
- xz_buf.out = kmap(page);
+ xz_buf.out = kmap_local_page(page);
xz_buf.out_pos = 0;
xz_buf.out_size = PAGE_SIZE;
xz_ret = xz_dec_run(xz_dec, &xz_buf);
- kunmap(page);
+ kunmap_local(xz_buf.out);
new_size += xz_buf.out_pos;
} while (xz_buf.out_pos == PAGE_SIZE && xz_ret == XZ_OK);
diff --git a/kernel/module/internal.h b/kernel/module/internal.h
index ec104c2950c3..680d980a4fb2 100644
--- a/kernel/module/internal.h
+++ b/kernel/module/internal.h
@@ -103,7 +103,7 @@ struct module *find_module_all(const char *name, size_t len, bool even_unformed)
int cmp_name(const void *name, const void *sym);
long module_get_offset(struct module *mod, unsigned int *size, Elf_Shdr *sechdr,
unsigned int section);
-char *module_flags(struct module *mod, char *buf);
+char *module_flags(struct module *mod, char *buf, bool show_state);
size_t module_flags_taint(unsigned long taints, char *buf);
static inline void module_assert_mutex_or_preempt(void)
diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c
index 77e75bead569..f5c5c9175333 100644
--- a/kernel/module/kallsyms.c
+++ b/kernel/module/kallsyms.c
@@ -457,26 +457,39 @@ unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
return 0;
}
-/* Look for this name: can be of form module:name. */
-unsigned long module_kallsyms_lookup_name(const char *name)
+static unsigned long __module_kallsyms_lookup_name(const char *name)
{
struct module *mod;
char *colon;
- unsigned long ret = 0;
+
+ colon = strnchr(name, MODULE_NAME_LEN, ':');
+ if (colon) {
+ mod = find_module_all(name, colon - name, false);
+ if (mod)
+ return find_kallsyms_symbol_value(mod, colon + 1);
+ return 0;
+ }
+
+ list_for_each_entry_rcu(mod, &modules, list) {
+ unsigned long ret;
+
+ if (mod->state == MODULE_STATE_UNFORMED)
+ continue;
+ ret = find_kallsyms_symbol_value(mod, name);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Look for this name: can be of form module:name. */
+unsigned long module_kallsyms_lookup_name(const char *name)
+{
+ unsigned long ret;
/* Don't lock: we're in enough trouble already. */
preempt_disable();
- if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
- if ((mod = find_module_all(name, colon - name, false)) != NULL)
- ret = find_kallsyms_symbol_value(mod, colon + 1);
- } else {
- list_for_each_entry_rcu(mod, &modules, list) {
- if (mod->state == MODULE_STATE_UNFORMED)
- continue;
- if ((ret = find_kallsyms_symbol_value(mod, name)) != 0)
- break;
- }
- }
+ ret = __module_kallsyms_lookup_name(name);
preempt_enable();
return ret;
}
diff --git a/kernel/module/main.c b/kernel/module/main.c
index 57fc2821be63..a4e4d84b6f4e 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -119,7 +119,7 @@ static void mod_update_bounds(struct module *mod)
}
/* Block module loading/unloading? */
-int modules_disabled = 0;
+int modules_disabled;
core_param(nomodule, modules_disabled, bint, 0);
/* Waiting for a module to finish initializing? */
@@ -524,7 +524,10 @@ static struct module_attribute modinfo_##field = { \
MODINFO_ATTR(version);
MODINFO_ATTR(srcversion);
-static char last_unloaded_module[MODULE_NAME_LEN+1];
+static struct {
+ char name[MODULE_NAME_LEN + 1];
+ char taints[MODULE_FLAGS_BUF_SIZE];
+} last_unloaded_module;
#ifdef CONFIG_MODULE_UNLOAD
@@ -694,6 +697,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
{
struct module *mod;
char name[MODULE_NAME_LEN];
+ char buf[MODULE_FLAGS_BUF_SIZE];
int ret, forced = 0;
if (!capable(CAP_SYS_MODULE) || modules_disabled)
@@ -753,8 +757,9 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
async_synchronize_full();
- /* Store the name of the last unloaded module for diagnostic purposes */
- strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
+ /* Store the name and taints of the last unloaded module for diagnostic purposes */
+ strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name));
+ strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints));
free_module(mod);
/* someone could wait for the module in add_unformed_module() */
@@ -2094,7 +2099,7 @@ static int find_module_sections(struct module *mod, struct load_info *info)
sizeof(*mod->static_call_sites),
&mod->num_static_call_sites);
#endif
-#ifdef CONFIG_KUNIT
+#if IS_ENABLED(CONFIG_KUNIT)
mod->kunit_suites = section_objs(info, ".kunit_test_suites",
sizeof(*mod->kunit_suites),
&mod->num_kunit_suites);
@@ -2151,7 +2156,7 @@ static int move_module(struct module *mod, struct load_info *info)
#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
/* Do the allocs. */
- ptr = vmalloc(mod->data_layout.size);
+ ptr = vzalloc(mod->data_layout.size);
/*
* The pointer to this block is stored in the module structure
* which is inside the block. Just mark it as not being a
@@ -2164,7 +2169,6 @@ static int move_module(struct module *mod, struct load_info *info)
return -ENOMEM;
}
- memset(ptr, 0, mod->data_layout.size);
mod->data_layout.base = ptr;
#endif
/* Transfer each section which specifies SHF_ALLOC */
@@ -2423,6 +2427,12 @@ static void do_free_init(struct work_struct *w)
}
}
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "module."
+/* Default value for module->async_probe_requested */
+static bool async_probe;
+module_param(async_probe, bool, 0644);
+
/*
* This is where the real work happens.
*
@@ -2643,7 +2653,8 @@ static int unknown_module_param_cb(char *param, char *val, const char *modname,
int ret;
if (strcmp(param, "async_probe") == 0) {
- mod->async_probe_requested = true;
+ if (strtobool(val, &mod->async_probe_requested))
+ mod->async_probe_requested = true;
return 0;
}
@@ -2810,6 +2821,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
if (err)
goto bug_cleanup;
+ mod->async_probe_requested = async_probe;
+
/* Module is ready to execute: parsing args may do that. */
after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
-32768, 32767, mod,
@@ -2984,24 +2997,27 @@ static void cfi_cleanup(struct module *mod)
}
/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
-char *module_flags(struct module *mod, char *buf)
+char *module_flags(struct module *mod, char *buf, bool show_state)
{
int bx = 0;
BUG_ON(mod->state == MODULE_STATE_UNFORMED);
+ if (!mod->taints && !show_state)
+ goto out;
if (mod->taints ||
mod->state == MODULE_STATE_GOING ||
mod->state == MODULE_STATE_COMING) {
buf[bx++] = '(';
bx += module_flags_taint(mod->taints, buf + bx);
/* Show a - for module-is-being-unloaded */
- if (mod->state == MODULE_STATE_GOING)
+ if (mod->state == MODULE_STATE_GOING && show_state)
buf[bx++] = '-';
/* Show a + for module-is-being-loaded */
- if (mod->state == MODULE_STATE_COMING)
+ if (mod->state == MODULE_STATE_COMING && show_state)
buf[bx++] = '+';
buf[bx++] = ')';
}
+out:
buf[bx] = '\0';
return buf;
@@ -3134,12 +3150,13 @@ void print_modules(void)
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
- pr_cont(" %s%s", mod->name, module_flags(mod, buf));
+ pr_cont(" %s%s", mod->name, module_flags(mod, buf, true));
}
print_unloaded_tainted_modules();
preempt_enable();
- if (last_unloaded_module[0])
- pr_cont(" [last unloaded: %s]", last_unloaded_module);
+ if (last_unloaded_module.name[0])
+ pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name,
+ last_unloaded_module.taints);
pr_cont("\n");
}
diff --git a/kernel/module/procfs.c b/kernel/module/procfs.c
index 9a8f4f0f6329..cf5b9f1e6ec4 100644
--- a/kernel/module/procfs.c
+++ b/kernel/module/procfs.c
@@ -91,7 +91,7 @@ static int m_show(struct seq_file *m, void *p)
/* Taints info */
if (mod->taints)
- seq_printf(m, " %s", module_flags(mod, buf));
+ seq_printf(m, " %s", module_flags(mod, buf, true));
seq_puts(m, "\n");
return 0;
diff --git a/kernel/profile.c b/kernel/profile.c
index 37640a0bd8a3..7ea01ba30e75 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -109,6 +109,13 @@ int __ref profile_init(void)
/* only text is profiled */
prof_len = (_etext - _stext) >> prof_shift;
+
+ if (!prof_len) {
+ pr_warn("profiling shift: %u too large\n", prof_shift);
+ prof_on = 0;
+ return -EINVAL;
+ }
+
buffer_bytes = prof_len*sizeof(atomic_t);
if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
@@ -418,6 +425,12 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
return read;
}
+/* default is to not implement this call */
+int __weak setup_profiling_timer(unsigned mult)
+{
+ return -EINVAL;
+}
+
/*
* Writing to /proc/profile resets the counters
*
@@ -428,8 +441,6 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
#ifdef CONFIG_SMP
- extern int setup_profiling_timer(unsigned int multiplier);
-
if (count == sizeof(int)) {
unsigned int multiplier;
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3dc968006ad0..79aea7df4345 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -4574,7 +4574,7 @@ static void __init kfree_rcu_batch_init(void)
INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
krcp->initialized = true;
}
- if (register_shrinker(&kfree_rcu_shrinker))
+ if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree"))
pr_err("Failed to register kfree_rcu() shrinker!\n");
}
diff --git a/kernel/resource.c b/kernel/resource.c
index 34eaee179689..4c5e80b92f2f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -489,8 +489,9 @@ int __weak page_is_ram(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(page_is_ram);
-static int __region_intersects(resource_size_t start, size_t size,
- unsigned long flags, unsigned long desc)
+static int __region_intersects(struct resource *parent, resource_size_t start,
+ size_t size, unsigned long flags,
+ unsigned long desc)
{
struct resource res;
int type = 0; int other = 0;
@@ -499,7 +500,7 @@ static int __region_intersects(resource_size_t start, size_t size,
res.start = start;
res.end = start + size - 1;
- for (p = iomem_resource.child; p ; p = p->sibling) {
+ for (p = parent->child; p ; p = p->sibling) {
bool is_type = (((p->flags & flags) == flags) &&
((desc == IORES_DESC_NONE) ||
(desc == p->desc)));
@@ -543,7 +544,7 @@ int region_intersects(resource_size_t start, size_t size, unsigned long flags,
int ret;
read_lock(&resource_lock);
- ret = __region_intersects(start, size, flags, desc);
+ ret = __region_intersects(&iomem_resource, start, size, flags, desc);
read_unlock(&resource_lock);
return ret;
@@ -891,6 +892,13 @@ void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
}
write_unlock(&resource_lock);
}
+/*
+ * Not for general consumption, only early boot memory map parsing, PCI
+ * resource discovery, and late discovery of CXL resources are expected
+ * to use this interface. The former are built-in and only the latter,
+ * CXL, is a module.
+ */
+EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
/**
* remove_resource - Remove a resource in the resource tree
@@ -1773,62 +1781,139 @@ void resource_list_free(struct list_head *head)
}
EXPORT_SYMBOL(resource_list_free);
-#ifdef CONFIG_DEVICE_PRIVATE
-static struct resource *__request_free_mem_region(struct device *dev,
- struct resource *base, unsigned long size, const char *name)
+#ifdef CONFIG_GET_FREE_REGION
+#define GFR_DESCENDING (1UL << 0)
+#define GFR_REQUEST_REGION (1UL << 1)
+#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
+
+static resource_size_t gfr_start(struct resource *base, resource_size_t size,
+ resource_size_t align, unsigned long flags)
+{
+ if (flags & GFR_DESCENDING) {
+ resource_size_t end;
+
+ end = min_t(resource_size_t, base->end,
+ (1ULL << MAX_PHYSMEM_BITS) - 1);
+ return end - size + 1;
+ }
+
+ return ALIGN(base->start, align);
+}
+
+static bool gfr_continue(struct resource *base, resource_size_t addr,
+ resource_size_t size, unsigned long flags)
+{
+ if (flags & GFR_DESCENDING)
+ return addr > size && addr >= base->start;
+ /*
+ * In the ascend case be careful that the last increment by
+ * @size did not wrap 0.
+ */
+ return addr > addr - size &&
+ addr <= min_t(resource_size_t, base->end,
+ (1ULL << MAX_PHYSMEM_BITS) - 1);
+}
+
+static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
+ unsigned long flags)
{
- resource_size_t end, addr;
+ if (flags & GFR_DESCENDING)
+ return addr - size;
+ return addr + size;
+}
+
+static void remove_free_mem_region(void *_res)
+{
+ struct resource *res = _res;
+
+ if (res->parent)
+ remove_resource(res);
+ free_resource(res);
+}
+
+static struct resource *
+get_free_mem_region(struct device *dev, struct resource *base,
+ resource_size_t size, const unsigned long align,
+ const char *name, const unsigned long desc,
+ const unsigned long flags)
+{
+ resource_size_t addr;
struct resource *res;
struct region_devres *dr = NULL;
- size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
- end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1);
- addr = end - size + 1UL;
+ size = ALIGN(size, align);
res = alloc_resource(GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
- if (dev) {
+ if (dev && (flags & GFR_REQUEST_REGION)) {
dr = devres_alloc(devm_region_release,
sizeof(struct region_devres), GFP_KERNEL);
if (!dr) {
free_resource(res);
return ERR_PTR(-ENOMEM);
}
+ } else if (dev) {
+ if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
+ return ERR_PTR(-ENOMEM);
}
write_lock(&resource_lock);
- for (; addr > size && addr >= base->start; addr -= size) {
- if (__region_intersects(addr, size, 0, IORES_DESC_NONE) !=
- REGION_DISJOINT)
+ for (addr = gfr_start(base, size, align, flags);
+ gfr_continue(base, addr, size, flags);
+ addr = gfr_next(addr, size, flags)) {
+ if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
+ REGION_DISJOINT)
continue;
- if (__request_region_locked(res, &iomem_resource, addr, size,
- name, 0))
- break;
+ if (flags & GFR_REQUEST_REGION) {
+ if (__request_region_locked(res, &iomem_resource, addr,
+ size, name, 0))
+ break;
- if (dev) {
- dr->parent = &iomem_resource;
- dr->start = addr;
- dr->n = size;
- devres_add(dev, dr);
- }
+ if (dev) {
+ dr->parent = &iomem_resource;
+ dr->start = addr;
+ dr->n = size;
+ devres_add(dev, dr);
+ }
- res->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
- write_unlock(&resource_lock);
+ res->desc = desc;
+ write_unlock(&resource_lock);
+
+
+ /*
+ * A driver is claiming this region so revoke any
+ * mappings.
+ */
+ revoke_iomem(res);
+ } else {
+ res->start = addr;
+ res->end = addr + size - 1;
+ res->name = name;
+ res->desc = desc;
+ res->flags = IORESOURCE_MEM;
+
+ /*
+ * Only succeed if the resource hosts an exclusive
+ * range after the insert
+ */
+ if (__insert_resource(base, res) || res->child)
+ break;
+
+ write_unlock(&resource_lock);
+ }
- /*
- * A driver is claiming this region so revoke any mappings.
- */
- revoke_iomem(res);
return res;
}
write_unlock(&resource_lock);
- free_resource(res);
- if (dr)
+ if (flags & GFR_REQUEST_REGION) {
+ free_resource(res);
devres_free(dr);
+ } else if (dev)
+ devm_release_action(dev, remove_free_mem_region, res);
return ERR_PTR(-ERANGE);
}
@@ -1847,18 +1932,48 @@ static struct resource *__request_free_mem_region(struct device *dev,
struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size)
{
- return __request_free_mem_region(dev, base, size, dev_name(dev));
+ unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
+
+ return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
+ dev_name(dev),
+ IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
}
EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
struct resource *request_free_mem_region(struct resource *base,
unsigned long size, const char *name)
{
- return __request_free_mem_region(NULL, base, size, name);
+ unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
+
+ return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
+ IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
}
EXPORT_SYMBOL_GPL(request_free_mem_region);
-#endif /* CONFIG_DEVICE_PRIVATE */
+/**
+ * alloc_free_mem_region - find a free region relative to @base
+ * @base: resource that will parent the new resource
+ * @size: size in bytes of memory to allocate from @base
+ * @align: alignment requirements for the allocation
+ * @name: resource name
+ *
+ * Buses like CXL, that can dynamically instantiate new memory regions,
+ * need a method to allocate physical address space for those regions.
+ * Allocate and insert a new resource to cover a free, unclaimed by a
+ * descendant of @base, range in the span of @base.
+ */
+struct resource *alloc_free_mem_region(struct resource *base,
+ unsigned long size, unsigned long align,
+ const char *name)
+{
+ /* Default of ascending direction and insert resource */
+ unsigned long flags = 0;
+
+ return get_free_mem_region(NULL, base, size, align, name,
+ IORES_DESC_NONE, flags);
+}
+EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
+#endif /* CONFIG_GET_FREE_REGION */
static int __init strict_iomem(char *str)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 189999007f32..ee28253c9ac0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3802,7 +3802,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
}
-static inline bool ttwu_queue_cond(int cpu)
+static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
{
/*
* Do not complicate things with the async wake_list while the CPU is
@@ -3811,6 +3811,10 @@ static inline bool ttwu_queue_cond(int cpu)
if (!cpu_active(cpu))
return false;
+ /* Ensure the task will still be allowed to run on the CPU. */
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ return false;
+
/*
* If the CPU does not share cache, then queue the task on the
* remote rqs wakelist to avoid accessing remote data.
@@ -3840,7 +3844,7 @@ static inline bool ttwu_queue_cond(int cpu)
static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
{
- if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu)) {
+ if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
sched_clock_cpu(cpu); /* Sync clocks across CPUs */
__ttwu_queue_wakelist(p, cpu, wake_flags);
return true;
@@ -9012,7 +9016,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
}
int task_can_attach(struct task_struct *p,
- const struct cpumask *cs_cpus_allowed)
+ const struct cpumask *cs_effective_cpus)
{
int ret = 0;
@@ -9031,9 +9035,11 @@ int task_can_attach(struct task_struct *p,
}
if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
- cs_cpus_allowed)) {
- int cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
+ cs_effective_cpus)) {
+ int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);
+ if (unlikely(cpu >= nr_cpu_ids))
+ return -EINVAL;
ret = dl_cpu_busy(cpu, p);
}
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index ec66b40bdd40..ecb4b4ff4ce0 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -190,12 +190,8 @@ static void group_init(struct psi_group *group)
/* Init trigger-related members */
mutex_init(&group->trigger_lock);
INIT_LIST_HEAD(&group->triggers);
- memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
- group->poll_states = 0;
group->poll_min_period = U32_MAX;
- memset(group->polling_total, 0, sizeof(group->polling_total));
group->polling_next_update = ULLONG_MAX;
- group->polling_until = 0;
init_waitqueue_head(&group->poll_wait);
timer_setup(&group->poll_timer, poll_timer_fn, 0);
rcu_assign_pointer(group->poll_task, NULL);
@@ -957,7 +953,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
if (static_branch_likely(&psi_disabled))
return 0;
- cgroup->psi = kmalloc(sizeof(struct psi_group), GFP_KERNEL);
+ cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
if (!cgroup->psi)
return -ENOMEM;
@@ -1091,7 +1087,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
}
struct psi_trigger *psi_trigger_create(struct psi_group *group,
- char *buf, size_t nbytes, enum psi_res res)
+ char *buf, enum psi_res res)
{
struct psi_trigger *t;
enum psi_states state;
@@ -1320,7 +1316,7 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
return -EBUSY;
}
- new = psi_trigger_create(&psi_system, buf, nbytes, res);
+ new = psi_trigger_create(&psi_system, buf, res);
if (IS_ERR(new)) {
mutex_unlock(&seq->lock);
return PTR_ERR(new);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a6f071b2acac..e26688d387ae 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -481,9 +481,6 @@ extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
-extern void unregister_rt_sched_group(struct task_group *tg);
-extern void free_rt_sched_group(struct task_group *tg);
-extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent);
@@ -521,6 +518,10 @@ struct cfs_bandwidth { };
#endif /* CONFIG_CGROUP_SCHED */
+extern void unregister_rt_sched_group(struct task_group *tg);
+extern void free_rt_sched_group(struct task_group *tg);
+extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
+
/*
* u64_u32_load/u64_u32_store
*
diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c
index d4788f810b55..0b1cd985dc27 100644
--- a/kernel/sched/wait_bit.c
+++ b/kernel/sched/wait_bit.c
@@ -47,7 +47,7 @@ __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_
prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
ret = (*action)(&wbq_entry->key, mode);
- } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
+ } while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
finish_wait(wq_head, &wbq_entry->wq_entry);
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index a492f159624f..860b2dcf3ac4 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -277,6 +277,7 @@ COND_SYSCALL(landlock_restrict_self);
/* mm/fadvise.c */
COND_SYSCALL(fadvise64_64);
+COND_SYSCALL_COMPAT(fadvise64_64);
/* mm/, CONFIG_MMU only */
COND_SYSCALL(swapon);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b233714a1c78..205d605cacc5 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -492,12 +492,12 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
int *i, vleft, first = 1, err = 0;
size_t left;
char *p;
-
+
if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
-
+
i = (int *) tbl_data;
vleft = table->maxlen / sizeof(*i);
left = *lenp;
@@ -729,7 +729,7 @@ int proc_dobool(struct ctl_table *table, int write, void *buffer,
* @ppos: file position
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) integer
- * values from/to the user buffer, treated as an ASCII string.
+ * values from/to the user buffer, treated as an ASCII string.
*
* Returns 0 on success.
*/
@@ -1273,7 +1273,7 @@ static int do_proc_dointvec_ms_jiffies_minmax_conv(bool *negp, unsigned long *lv
* @ppos: file position
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) integer
- * values from/to the user buffer, treated as an ASCII string.
+ * values from/to the user buffer, treated as an ASCII string.
* The values read are assumed to be in seconds, and are converted into
* jiffies.
*
@@ -1306,8 +1306,8 @@ int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write,
* @ppos: pointer to the file position
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) integer
- * values from/to the user buffer, treated as an ASCII string.
- * The values read are assumed to be in 1/USER_HZ seconds, and
+ * values from/to the user buffer, treated as an ASCII string.
+ * The values read are assumed to be in 1/USER_HZ seconds, and
* are converted into jiffies.
*
* Returns 0 on success.
@@ -1315,8 +1315,8 @@ int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write,
int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_dointvec(table,write,buffer,lenp,ppos,
- do_proc_dointvec_userhz_jiffies_conv,NULL);
+ return do_proc_dointvec(table, write, buffer, lenp, ppos,
+ do_proc_dointvec_userhz_jiffies_conv, NULL);
}
/**
@@ -2061,7 +2061,7 @@ static struct ctl_table kern_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
-#if defined(CONFIG_TREE_RCU)
+#ifdef CONFIG_TREE_RCU
{
.procname = "panic_on_rcu_stall",
.data = &sysctl_panic_on_rcu_stall,
@@ -2071,8 +2071,6 @@ static struct ctl_table kern_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
-#endif
-#if defined(CONFIG_TREE_RCU)
{
.procname = "max_rcu_stall_to_panic",
.data = &sysctl_max_rcu_stall_to_panic,
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index fcb3b21d8bdc..90ea5f373e50 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -70,7 +70,7 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
return do_sys_settimeofday64(&new_tp, NULL);
}
-int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
+static int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
{
switch (which_clock) {
case CLOCK_REALTIME:
@@ -90,6 +90,7 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
return 0;
}
+
SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
struct __kernel_timespec __user *, tp)
{
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 29923b20e0e4..526257b3727c 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -449,7 +449,7 @@ time64_t mktime64(const unsigned int year0, const unsigned int mon0,
}
EXPORT_SYMBOL(mktime64);
-struct __kernel_old_timeval ns_to_kernel_old_timeval(const s64 nsec)
+struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec)
{
struct timespec64 ts = ns_to_timespec64(nsec);
struct __kernel_old_timeval tv;
@@ -503,7 +503,7 @@ EXPORT_SYMBOL(set_normalized_timespec64);
*
* Returns the timespec64 representation of the nsec parameter.
*/
-struct timespec64 ns_to_timespec64(const s64 nsec)
+struct timespec64 ns_to_timespec64(s64 nsec)
{
struct timespec64 ts = { 0, 0 };
s32 rem;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index ccd6a5ade3e9..1052126bdca2 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1106,4 +1106,6 @@ config HIST_TRIGGERS_DEBUG
If unsure, say N.
+source "kernel/trace/rv/Kconfig"
+
endif # FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 0d261774d6f3..c6651e16b557 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -106,5 +106,6 @@ obj-$(CONFIG_FPROBE) += fprobe.o
obj-$(CONFIG_RETHOOK) += rethook.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
+obj-$(CONFIG_RV) += rv/
libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index bc921a3f7ea8..439e2ab6905e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1861,8 +1861,6 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
ftrace_hash_rec_update_modify(ops, filter_hash, 1);
}
-static bool ops_references_ip(struct ftrace_ops *ops, unsigned long ip);
-
/*
* Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
* or no-needed to update, -EBUSY if it detects a conflict of the flag
@@ -2974,6 +2972,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_startup_enable(command);
+ /*
+ * If ftrace is in an undefined state, we just remove ops from list
+ * to prevent the NULL pointer, instead of totally rolling it back and
+ * free trampoline, because those actions could cause further damage.
+ */
+ if (unlikely(ftrace_disabled)) {
+ __unregister_ftrace_function(ops);
+ return -ENODEV;
+ }
+
ops->flags &= ~FTRACE_OPS_FL_ADDING;
return 0;
@@ -3108,49 +3116,6 @@ static inline int ops_traces_mod(struct ftrace_ops *ops)
ftrace_hash_empty(ops->func_hash->notrace_hash);
}
-/*
- * Check if the current ops references the given ip.
- *
- * If the ops traces all functions, then it was already accounted for.
- * If the ops does not trace the current record function, skip it.
- * If the ops ignores the function via notrace filter, skip it.
- */
-static bool
-ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
-{
- /* If ops isn't enabled, ignore it */
- if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
- return false;
-
- /* If ops traces all then it includes this function */
- if (ops_traces_mod(ops))
- return true;
-
- /* The function must be in the filter */
- if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
- !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
- return false;
-
- /* If in notrace hash, we ignore it too */
- if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
- return false;
-
- return true;
-}
-
-/*
- * Check if the current ops references the record.
- *
- * If the ops traces all functions, then it was already accounted for.
- * If the ops does not trace the current record function, skip it.
- * If the ops ignores the function via notrace filter, skip it.
- */
-static bool
-ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
-{
- return ops_references_ip(ops, rec->ip);
-}
-
static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
{
bool init_nop = ftrace_need_init_nop();
@@ -6812,6 +6777,38 @@ static int ftrace_get_trampoline_kallsym(unsigned int symnum,
return -ERANGE;
}
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
+/*
+ * Check if the current ops references the given ip.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static bool
+ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
+{
+ /* If ops isn't enabled, ignore it */
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return false;
+
+ /* If ops traces all then it includes this function */
+ if (ops_traces_mod(ops))
+ return true;
+
+ /* The function must be in the filter */
+ if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
+ !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
+ return false;
+
+ /* If in notrace hash, we ignore it too */
+ if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
+ return false;
+
+ return true;
+}
+#endif
+
#ifdef CONFIG_MODULES
#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
@@ -6824,7 +6821,7 @@ static int referenced_filters(struct dyn_ftrace *rec)
int cnt = 0;
for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
- if (ops_references_rec(ops, rec)) {
+ if (ops_references_ip(ops, rec->ip)) {
if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
continue;
if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
diff --git a/kernel/trace/rv/Kconfig b/kernel/trace/rv/Kconfig
new file mode 100644
index 000000000000..831779607e84
--- /dev/null
+++ b/kernel/trace/rv/Kconfig
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+config DA_MON_EVENTS
+ bool
+
+config DA_MON_EVENTS_IMPLICIT
+ select DA_MON_EVENTS
+ bool
+
+config DA_MON_EVENTS_ID
+ select DA_MON_EVENTS
+ bool
+
+menuconfig RV
+ bool "Runtime Verification"
+ depends on TRACING
+ help
+ Enable the kernel runtime verification infrastructure. RV is a
+ lightweight (yet rigorous) method that complements classical
+ exhaustive verification techniques (such as model checking and
+ theorem proving). RV works by analyzing the trace of the system's
+ actual execution, comparing it against a formal specification of
+ the system behavior.
+
+ For further information, see:
+ Documentation/trace/rv/runtime-verification.rst
+
+config RV_MON_WIP
+ depends on RV
+ depends on PREEMPT_TRACER
+ select DA_MON_EVENTS_IMPLICIT
+ bool "wip monitor"
+ help
+ Enable wip (wakeup in preemptive) sample monitor that illustrates
+ the usage of per-cpu monitors, and one limitation of the
+ preempt_disable/enable events.
+
+ For further information, see:
+ Documentation/trace/rv/monitor_wip.rst
+
+config RV_MON_WWNR
+ depends on RV
+ select DA_MON_EVENTS_ID
+ bool "wwnr monitor"
+ help
+ Enable wwnr (wakeup while not running) sample monitor, this is a
+ sample monitor that illustrates the usage of per-task monitor.
+ The model is borken on purpose: it serves to test reactors.
+
+ For further information, see:
+ Documentation/trace/rv/monitor_wwnr.rst
+
+config RV_REACTORS
+ bool "Runtime verification reactors"
+ default y
+ depends on RV
+ help
+ Enables the online runtime verification reactors. A runtime
+ monitor can cause a reaction to the detection of an exception
+ on the model's execution. By default, the monitors have
+ tracing reactions, printing the monitor output via tracepoints,
+ but other reactions can be added (on-demand) via this interface.
+
+config RV_REACT_PRINTK
+ bool "Printk reactor"
+ depends on RV_REACTORS
+ default y
+ help
+ Enables the printk reactor. The printk reactor emits a printk()
+ message if an exception is found.
+
+config RV_REACT_PANIC
+ bool "Panic reactor"
+ depends on RV_REACTORS
+ default y
+ help
+ Enables the panic reactor. The panic reactor emits a printk()
+ message if an exception is found and panic()s the system.
diff --git a/kernel/trace/rv/Makefile b/kernel/trace/rv/Makefile
new file mode 100644
index 000000000000..963d14875b45
--- /dev/null
+++ b/kernel/trace/rv/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_RV) += rv.o
+obj-$(CONFIG_RV_MON_WIP) += monitors/wip/wip.o
+obj-$(CONFIG_RV_MON_WWNR) += monitors/wwnr/wwnr.o
+obj-$(CONFIG_RV_REACTORS) += rv_reactors.o
+obj-$(CONFIG_RV_REACT_PRINTK) += reactor_printk.o
+obj-$(CONFIG_RV_REACT_PANIC) += reactor_panic.o
diff --git a/kernel/trace/rv/monitors/wip/wip.c b/kernel/trace/rv/monitors/wip/wip.c
new file mode 100644
index 000000000000..83cace53b9fa
--- /dev/null
+++ b/kernel/trace/rv/monitors/wip/wip.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+#include <rv/instrumentation.h>
+#include <rv/da_monitor.h>
+
+#define MODULE_NAME "wip"
+
+#include <trace/events/rv.h>
+#include <trace/events/sched.h>
+#include <trace/events/preemptirq.h>
+
+#include "wip.h"
+
+struct rv_monitor rv_wip;
+DECLARE_DA_MON_PER_CPU(wip, unsigned char);
+
+static void handle_preempt_disable(void *data, unsigned long ip, unsigned long parent_ip)
+{
+ da_handle_event_wip(preempt_disable_wip);
+}
+
+static void handle_preempt_enable(void *data, unsigned long ip, unsigned long parent_ip)
+{
+ da_handle_start_event_wip(preempt_enable_wip);
+}
+
+static void handle_sched_waking(void *data, struct task_struct *task)
+{
+ da_handle_event_wip(sched_waking_wip);
+}
+
+static int enable_wip(void)
+{
+ int retval;
+
+ retval = da_monitor_init_wip();
+ if (retval)
+ return retval;
+
+ rv_attach_trace_probe("wip", preempt_enable, handle_preempt_enable);
+ rv_attach_trace_probe("wip", sched_waking, handle_sched_waking);
+ rv_attach_trace_probe("wip", preempt_disable, handle_preempt_disable);
+
+ return 0;
+}
+
+static void disable_wip(void)
+{
+ rv_wip.enabled = 0;
+
+ rv_detach_trace_probe("wip", preempt_disable, handle_preempt_disable);
+ rv_detach_trace_probe("wip", preempt_enable, handle_preempt_enable);
+ rv_detach_trace_probe("wip", sched_waking, handle_sched_waking);
+
+ da_monitor_destroy_wip();
+}
+
+struct rv_monitor rv_wip = {
+ .name = "wip",
+ .description = "wakeup in preemptive per-cpu testing monitor.",
+ .enable = enable_wip,
+ .disable = disable_wip,
+ .reset = da_monitor_reset_all_wip,
+ .enabled = 0,
+};
+
+static int register_wip(void)
+{
+ rv_register_monitor(&rv_wip);
+ return 0;
+}
+
+static void unregister_wip(void)
+{
+ rv_unregister_monitor(&rv_wip);
+}
+
+module_init(register_wip);
+module_exit(unregister_wip);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Bristot de Oliveira <bristot@kernel.org>");
+MODULE_DESCRIPTION("wip: wakeup in preemptive - per-cpu sample monitor.");
diff --git a/kernel/trace/rv/monitors/wip/wip.h b/kernel/trace/rv/monitors/wip/wip.h
new file mode 100644
index 000000000000..c1c47e2305ef
--- /dev/null
+++ b/kernel/trace/rv/monitors/wip/wip.h
@@ -0,0 +1,46 @@
+/*
+ * Automatically generated C representation of wip automaton
+ * For further information about this format, see kernel documentation:
+ * Documentation/trace/rv/deterministic_automata.rst
+ */
+
+enum states_wip {
+ preemptive_wip = 0,
+ non_preemptive_wip,
+ state_max_wip
+};
+
+#define INVALID_STATE state_max_wip
+
+enum events_wip {
+ preempt_disable_wip = 0,
+ preempt_enable_wip,
+ sched_waking_wip,
+ event_max_wip
+};
+
+struct automaton_wip {
+ char *state_names[state_max_wip];
+ char *event_names[event_max_wip];
+ unsigned char function[state_max_wip][event_max_wip];
+ unsigned char initial_state;
+ bool final_states[state_max_wip];
+};
+
+struct automaton_wip automaton_wip = {
+ .state_names = {
+ "preemptive",
+ "non_preemptive"
+ },
+ .event_names = {
+ "preempt_disable",
+ "preempt_enable",
+ "sched_waking"
+ },
+ .function = {
+ { non_preemptive_wip, INVALID_STATE, INVALID_STATE },
+ { INVALID_STATE, preemptive_wip, non_preemptive_wip },
+ },
+ .initial_state = preemptive_wip,
+ .final_states = { 1, 0 },
+};
diff --git a/kernel/trace/rv/monitors/wwnr/wwnr.c b/kernel/trace/rv/monitors/wwnr/wwnr.c
new file mode 100644
index 000000000000..599225d9cf38
--- /dev/null
+++ b/kernel/trace/rv/monitors/wwnr/wwnr.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+#include <rv/instrumentation.h>
+#include <rv/da_monitor.h>
+
+#define MODULE_NAME "wwnr"
+
+#include <trace/events/rv.h>
+#include <trace/events/sched.h>
+
+#include "wwnr.h"
+
+struct rv_monitor rv_wwnr;
+DECLARE_DA_MON_PER_TASK(wwnr, unsigned char);
+
+static void handle_switch(void *data, bool preempt, struct task_struct *p,
+ struct task_struct *n, unsigned int prev_state)
+{
+ /* start monitoring only after the first suspension */
+ if (prev_state == TASK_INTERRUPTIBLE)
+ da_handle_start_event_wwnr(p, switch_out_wwnr);
+ else
+ da_handle_event_wwnr(p, switch_out_wwnr);
+
+ da_handle_event_wwnr(n, switch_in_wwnr);
+}
+
+static void handle_wakeup(void *data, struct task_struct *p)
+{
+ da_handle_event_wwnr(p, wakeup_wwnr);
+}
+
+static int enable_wwnr(void)
+{
+ int retval;
+
+ retval = da_monitor_init_wwnr();
+ if (retval)
+ return retval;
+
+ rv_attach_trace_probe("wwnr", sched_switch, handle_switch);
+ rv_attach_trace_probe("wwnr", sched_wakeup, handle_wakeup);
+
+ return 0;
+}
+
+static void disable_wwnr(void)
+{
+ rv_wwnr.enabled = 0;
+
+ rv_detach_trace_probe("wwnr", sched_switch, handle_switch);
+ rv_detach_trace_probe("wwnr", sched_wakeup, handle_wakeup);
+
+ da_monitor_destroy_wwnr();
+}
+
+struct rv_monitor rv_wwnr = {
+ .name = "wwnr",
+ .description = "wakeup while not running per-task testing model.",
+ .enable = enable_wwnr,
+ .disable = disable_wwnr,
+ .reset = da_monitor_reset_all_wwnr,
+ .enabled = 0,
+};
+
+static int register_wwnr(void)
+{
+ rv_register_monitor(&rv_wwnr);
+ return 0;
+}
+
+static void unregister_wwnr(void)
+{
+ rv_unregister_monitor(&rv_wwnr);
+}
+
+module_init(register_wwnr);
+module_exit(unregister_wwnr);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Bristot de Oliveira <bristot@kernel.org>");
+MODULE_DESCRIPTION("wwnr: wakeup while not running monitor");
diff --git a/kernel/trace/rv/monitors/wwnr/wwnr.h b/kernel/trace/rv/monitors/wwnr/wwnr.h
new file mode 100644
index 000000000000..d1afe55cdd4c
--- /dev/null
+++ b/kernel/trace/rv/monitors/wwnr/wwnr.h
@@ -0,0 +1,46 @@
+/*
+ * Automatically generated C representation of wwnr automaton
+ * For further information about this format, see kernel documentation:
+ * Documentation/trace/rv/deterministic_automata.rst
+ */
+
+enum states_wwnr {
+ not_running_wwnr = 0,
+ running_wwnr,
+ state_max_wwnr
+};
+
+#define INVALID_STATE state_max_wwnr
+
+enum events_wwnr {
+ switch_in_wwnr = 0,
+ switch_out_wwnr,
+ wakeup_wwnr,
+ event_max_wwnr
+};
+
+struct automaton_wwnr {
+ char *state_names[state_max_wwnr];
+ char *event_names[event_max_wwnr];
+ unsigned char function[state_max_wwnr][event_max_wwnr];
+ unsigned char initial_state;
+ bool final_states[state_max_wwnr];
+};
+
+struct automaton_wwnr automaton_wwnr = {
+ .state_names = {
+ "not_running",
+ "running"
+ },
+ .event_names = {
+ "switch_in",
+ "switch_out",
+ "wakeup"
+ },
+ .function = {
+ { running_wwnr, INVALID_STATE, not_running_wwnr },
+ { INVALID_STATE, not_running_wwnr, INVALID_STATE },
+ },
+ .initial_state = not_running_wwnr,
+ .final_states = { 1, 0 },
+};
diff --git a/kernel/trace/rv/reactor_panic.c b/kernel/trace/rv/reactor_panic.c
new file mode 100644
index 000000000000..b698d05dd069
--- /dev/null
+++ b/kernel/trace/rv/reactor_panic.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Panic RV reactor:
+ * Prints the exception msg to the kernel message log and panic().
+ */
+
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+
+static void rv_panic_reaction(char *msg)
+{
+ panic(msg);
+}
+
+static struct rv_reactor rv_panic = {
+ .name = "panic",
+ .description = "panic the system if an exception is found.",
+ .react = rv_panic_reaction
+};
+
+static int register_react_panic(void)
+{
+ rv_register_reactor(&rv_panic);
+ return 0;
+}
+
+static void unregister_react_panic(void)
+{
+ rv_unregister_reactor(&rv_panic);
+}
+
+module_init(register_react_panic);
+module_exit(unregister_react_panic);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Bristot de Oliveira");
+MODULE_DESCRIPTION("panic rv reactor: panic if an exception is found.");
diff --git a/kernel/trace/rv/reactor_printk.c b/kernel/trace/rv/reactor_printk.c
new file mode 100644
index 000000000000..31899f953af4
--- /dev/null
+++ b/kernel/trace/rv/reactor_printk.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Printk RV reactor:
+ * Prints the exception msg to the kernel message log.
+ */
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+
+static void rv_printk_reaction(char *msg)
+{
+ printk_deferred(msg);
+}
+
+static struct rv_reactor rv_printk = {
+ .name = "printk",
+ .description = "prints the exception msg to the kernel message log.",
+ .react = rv_printk_reaction
+};
+
+static int register_react_printk(void)
+{
+ rv_register_reactor(&rv_printk);
+ return 0;
+}
+
+static void unregister_react_printk(void)
+{
+ rv_unregister_reactor(&rv_printk);
+}
+
+module_init(register_react_printk);
+module_exit(unregister_react_printk);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Bristot de Oliveira");
+MODULE_DESCRIPTION("printk rv reactor: printk if an exception is hit.");
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
new file mode 100644
index 000000000000..6c97cc2d754a
--- /dev/null
+++ b/kernel/trace/rv/rv.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * This is the online Runtime Verification (RV) interface.
+ *
+ * RV is a lightweight (yet rigorous) method that complements classical
+ * exhaustive verification techniques (such as model checking and
+ * theorem proving) with a more practical approach to complex systems.
+ *
+ * RV works by analyzing the trace of the system's actual execution,
+ * comparing it against a formal specification of the system behavior.
+ * RV can give precise information on the runtime behavior of the
+ * monitored system while enabling the reaction for unexpected
+ * events, avoiding, for example, the propagation of a failure on
+ * safety-critical systems.
+ *
+ * The development of this interface roots in the development of the
+ * paper:
+ *
+ * De Oliveira, Daniel Bristot; Cucinotta, Tommaso; De Oliveira, Romulo
+ * Silva. Efficient formal verification for the Linux kernel. In:
+ * International Conference on Software Engineering and Formal Methods.
+ * Springer, Cham, 2019. p. 315-332.
+ *
+ * And:
+ *
+ * De Oliveira, Daniel Bristot, et al. Automata-based formal analysis
+ * and verification of the real-time Linux kernel. PhD Thesis, 2020.
+ *
+ * == Runtime monitor interface ==
+ *
+ * A monitor is the central part of the runtime verification of a system.
+ *
+ * The monitor stands in between the formal specification of the desired
+ * (or undesired) behavior, and the trace of the actual system.
+ *
+ * In Linux terms, the runtime verification monitors are encapsulated
+ * inside the "RV monitor" abstraction. A RV monitor includes a reference
+ * model of the system, a set of instances of the monitor (per-cpu monitor,
+ * per-task monitor, and so on), and the helper functions that glue the
+ * monitor to the system via trace. Generally, a monitor includes some form
+ * of trace output as a reaction for event parsing and exceptions,
+ * as depicted bellow:
+ *
+ * Linux +----- RV Monitor ----------------------------------+ Formal
+ * Realm | | Realm
+ * +-------------------+ +----------------+ +-----------------+
+ * | Linux kernel | | Monitor | | Reference |
+ * | Tracing | -> | Instance(s) | <- | Model |
+ * | (instrumentation) | | (verification) | | (specification) |
+ * +-------------------+ +----------------+ +-----------------+
+ * | | |
+ * | V |
+ * | +----------+ |
+ * | | Reaction | |
+ * | +--+--+--+-+ |
+ * | | | | |
+ * | | | +-> trace output ? |
+ * +------------------------|--|----------------------+
+ * | +----> panic ?
+ * +-------> <user-specified>
+ *
+ * This file implements the interface for loading RV monitors, and
+ * to control the verification session.
+ *
+ * == Registering monitors ==
+ *
+ * The struct rv_monitor defines a set of callback functions to control
+ * a verification session. For instance, when a given monitor is enabled,
+ * the "enable" callback function is called to hook the instrumentation
+ * functions to the kernel trace events. The "disable" function is called
+ * when disabling the verification session.
+ *
+ * A RV monitor is registered via:
+ * int rv_register_monitor(struct rv_monitor *monitor);
+ * And unregistered via:
+ * int rv_unregister_monitor(struct rv_monitor *monitor);
+ *
+ * == User interface ==
+ *
+ * The user interface resembles kernel tracing interface. It presents
+ * these files:
+ *
+ * "available_monitors"
+ * - List the available monitors, one per line.
+ *
+ * For example:
+ * # cat available_monitors
+ * wip
+ * wwnr
+ *
+ * "enabled_monitors"
+ * - Lists the enabled monitors, one per line;
+ * - Writing to it enables a given monitor;
+ * - Writing a monitor name with a '!' prefix disables it;
+ * - Truncating the file disables all enabled monitors.
+ *
+ * For example:
+ * # cat enabled_monitors
+ * # echo wip > enabled_monitors
+ * # echo wwnr >> enabled_monitors
+ * # cat enabled_monitors
+ * wip
+ * wwnr
+ * # echo '!wip' >> enabled_monitors
+ * # cat enabled_monitors
+ * wwnr
+ * # echo > enabled_monitors
+ * # cat enabled_monitors
+ * #
+ *
+ * Note that more than one monitor can be enabled concurrently.
+ *
+ * "monitoring_on"
+ * - It is an on/off general switcher for monitoring. Note
+ * that it does not disable enabled monitors or detach events,
+ * but stops the per-entity monitors from monitoring the events
+ * received from the instrumentation. It resembles the "tracing_on"
+ * switcher.
+ *
+ * "monitors/"
+ * Each monitor will have its own directory inside "monitors/". There
+ * the monitor specific files will be presented.
+ * The "monitors/" directory resembles the "events" directory on
+ * tracefs.
+ *
+ * For example:
+ * # cd monitors/wip/
+ * # ls
+ * desc enable
+ * # cat desc
+ * auto-generated wakeup in preemptive monitor.
+ * # cat enable
+ * 0
+ *
+ * For further information, see:
+ * Documentation/trace/rv/runtime-verification.rst
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_DA_MON_EVENTS
+#define CREATE_TRACE_POINTS
+#include <trace/events/rv.h>
+#endif
+
+#include "rv.h"
+
+DEFINE_MUTEX(rv_interface_lock);
+
+static struct rv_interface rv_root;
+
+struct dentry *get_monitors_root(void)
+{
+ return rv_root.monitors_dir;
+}
+
+/*
+ * Interface for the monitor register.
+ */
+static LIST_HEAD(rv_monitors_list);
+
+static int task_monitor_count;
+static bool task_monitor_slots[RV_PER_TASK_MONITORS];
+
+int rv_get_task_monitor_slot(void)
+{
+ int i;
+
+ lockdep_assert_held(&rv_interface_lock);
+
+ if (task_monitor_count == RV_PER_TASK_MONITORS)
+ return -EBUSY;
+
+ task_monitor_count++;
+
+ for (i = 0; i < RV_PER_TASK_MONITORS; i++) {
+ if (task_monitor_slots[i] == false) {
+ task_monitor_slots[i] = true;
+ return i;
+ }
+ }
+
+ WARN_ONCE(1, "RV task_monitor_count and slots are out of sync\n");
+
+ return -EINVAL;
+}
+
+void rv_put_task_monitor_slot(int slot)
+{
+ lockdep_assert_held(&rv_interface_lock);
+
+ if (slot < 0 || slot >= RV_PER_TASK_MONITORS) {
+ WARN_ONCE(1, "RV releasing an invalid slot!: %d\n", slot);
+ return;
+ }
+
+ WARN_ONCE(!task_monitor_slots[slot], "RV releasing unused task_monitor_slots: %d\n",
+ slot);
+
+ task_monitor_count--;
+ task_monitor_slots[slot] = false;
+}
+
+/*
+ * This section collects the monitor/ files and folders.
+ */
+static ssize_t monitor_enable_read_data(struct file *filp, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct rv_monitor_def *mdef = filp->private_data;
+ const char *buff;
+
+ buff = mdef->monitor->enabled ? "1\n" : "0\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff)+1);
+}
+
+/*
+ * __rv_disable_monitor - disabled an enabled monitor
+ */
+static int __rv_disable_monitor(struct rv_monitor_def *mdef, bool sync)
+{
+ lockdep_assert_held(&rv_interface_lock);
+
+ if (mdef->monitor->enabled) {
+ mdef->monitor->enabled = 0;
+ mdef->monitor->disable();
+
+ /*
+ * Wait for the execution of all events to finish.
+ * Otherwise, the data used by the monitor could
+ * be inconsistent. i.e., if the monitor is re-enabled.
+ */
+ if (sync)
+ tracepoint_synchronize_unregister();
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * rv_disable_monitor - disable a given runtime monitor
+ *
+ * Returns 0 on success.
+ */
+int rv_disable_monitor(struct rv_monitor_def *mdef)
+{
+ __rv_disable_monitor(mdef, true);
+ return 0;
+}
+
+/**
+ * rv_enable_monitor - enable a given runtime monitor
+ *
+ * Returns 0 on success, error otherwise.
+ */
+int rv_enable_monitor(struct rv_monitor_def *mdef)
+{
+ int retval;
+
+ lockdep_assert_held(&rv_interface_lock);
+
+ if (mdef->monitor->enabled)
+ return 0;
+
+ retval = mdef->monitor->enable();
+
+ if (!retval)
+ mdef->monitor->enabled = 1;
+
+ return retval;
+}
+
+/*
+ * interface for enabling/disabling a monitor.
+ */
+static ssize_t monitor_enable_write_data(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rv_monitor_def *mdef = filp->private_data;
+ int retval;
+ bool val;
+
+ retval = kstrtobool_from_user(user_buf, count, &val);
+ if (retval)
+ return retval;
+
+ retval = count;
+
+ mutex_lock(&rv_interface_lock);
+
+ if (val)
+ retval = rv_enable_monitor(mdef);
+ else
+ retval = rv_disable_monitor(mdef);
+
+ mutex_unlock(&rv_interface_lock);
+
+ return retval ? : count;
+}
+
+static const struct file_operations interface_enable_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = monitor_enable_write_data,
+ .read = monitor_enable_read_data,
+};
+
+/*
+ * Interface to read monitors description.
+ */
+static ssize_t monitor_desc_read_data(struct file *filp, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct rv_monitor_def *mdef = filp->private_data;
+ char buff[256];
+
+ memset(buff, 0, sizeof(buff));
+
+ snprintf(buff, sizeof(buff), "%s\n", mdef->monitor->description);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff) + 1);
+}
+
+static const struct file_operations interface_desc_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .read = monitor_desc_read_data,
+};
+
+/*
+ * During the registration of a monitor, this function creates
+ * the monitor dir, where the specific options of the monitor
+ * are exposed.
+ */
+static int create_monitor_dir(struct rv_monitor_def *mdef)
+{
+ struct dentry *root = get_monitors_root();
+ const char *name = mdef->monitor->name;
+ struct dentry *tmp;
+ int retval;
+
+ mdef->root_d = rv_create_dir(name, root);
+ if (!mdef->root_d)
+ return -ENOMEM;
+
+ tmp = rv_create_file("enable", RV_MODE_WRITE, mdef->root_d, mdef, &interface_enable_fops);
+ if (!tmp) {
+ retval = -ENOMEM;
+ goto out_remove_root;
+ }
+
+ tmp = rv_create_file("desc", RV_MODE_READ, mdef->root_d, mdef, &interface_desc_fops);
+ if (!tmp) {
+ retval = -ENOMEM;
+ goto out_remove_root;
+ }
+
+ retval = reactor_populate_monitor(mdef);
+ if (retval)
+ goto out_remove_root;
+
+ return 0;
+
+out_remove_root:
+ rv_remove(mdef->root_d);
+ return retval;
+}
+
+/*
+ * Available/Enable monitor shared seq functions.
+ */
+static int monitors_show(struct seq_file *m, void *p)
+{
+ struct rv_monitor_def *mon_def = p;
+
+ seq_printf(m, "%s\n", mon_def->monitor->name);
+ return 0;
+}
+
+/*
+ * Used by the seq file operations at the end of a read
+ * operation.
+ */
+static void monitors_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&rv_interface_lock);
+}
+
+/*
+ * Available monitor seq functions.
+ */
+static void *available_monitors_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&rv_interface_lock);
+ return seq_list_start(&rv_monitors_list, *pos);
+}
+
+static void *available_monitors_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ return seq_list_next(p, &rv_monitors_list, pos);
+}
+
+/*
+ * Enable monitor seq functions.
+ */
+static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct rv_monitor_def *m_def = p;
+
+ (*pos)++;
+
+ list_for_each_entry_continue(m_def, &rv_monitors_list, list) {
+ if (m_def->monitor->enabled)
+ return m_def;
+ }
+
+ return NULL;
+}
+
+static void *enabled_monitors_start(struct seq_file *m, loff_t *pos)
+{
+ struct rv_monitor_def *m_def;
+ loff_t l;
+
+ mutex_lock(&rv_interface_lock);
+
+ if (list_empty(&rv_monitors_list))
+ return NULL;
+
+ m_def = list_entry(&rv_monitors_list, struct rv_monitor_def, list);
+
+ for (l = 0; l <= *pos; ) {
+ m_def = enabled_monitors_next(m, m_def, &l);
+ if (!m_def)
+ break;
+ }
+
+ return m_def;
+}
+
+/*
+ * available/enabled monitors seq definition.
+ */
+static const struct seq_operations available_monitors_seq_ops = {
+ .start = available_monitors_start,
+ .next = available_monitors_next,
+ .stop = monitors_stop,
+ .show = monitors_show
+};
+
+static const struct seq_operations enabled_monitors_seq_ops = {
+ .start = enabled_monitors_start,
+ .next = enabled_monitors_next,
+ .stop = monitors_stop,
+ .show = monitors_show
+};
+
+/*
+ * available_monitors interface.
+ */
+static int available_monitors_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &available_monitors_seq_ops);
+};
+
+static const struct file_operations available_monitors_ops = {
+ .open = available_monitors_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+/*
+ * enabled_monitors interface.
+ */
+static void disable_all_monitors(void)
+{
+ struct rv_monitor_def *mdef;
+ int enabled = 0;
+
+ mutex_lock(&rv_interface_lock);
+
+ list_for_each_entry(mdef, &rv_monitors_list, list)
+ enabled += __rv_disable_monitor(mdef, false);
+
+ if (enabled) {
+ /*
+ * Wait for the execution of all events to finish.
+ * Otherwise, the data used by the monitor could
+ * be inconsistent. i.e., if the monitor is re-enabled.
+ */
+ tracepoint_synchronize_unregister();
+ }
+
+ mutex_unlock(&rv_interface_lock);
+}
+
+static int enabled_monitors_open(struct inode *inode, struct file *file)
+{
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
+ disable_all_monitors();
+
+ return seq_open(file, &enabled_monitors_seq_ops);
+};
+
+static ssize_t enabled_monitors_write(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buff[MAX_RV_MONITOR_NAME_SIZE + 2];
+ struct rv_monitor_def *mdef;
+ int retval = -EINVAL;
+ bool enable = true;
+ char *ptr = buff;
+ int len;
+
+ if (count < 1 || count > MAX_RV_MONITOR_NAME_SIZE + 1)
+ return -EINVAL;
+
+ memset(buff, 0, sizeof(buff));
+
+ retval = simple_write_to_buffer(buff, sizeof(buff) - 1, ppos, user_buf, count);
+ if (retval < 0)
+ return -EFAULT;
+
+ ptr = strim(buff);
+
+ if (ptr[0] == '!') {
+ enable = false;
+ ptr++;
+ }
+
+ len = strlen(ptr);
+ if (!len)
+ return count;
+
+ mutex_lock(&rv_interface_lock);
+
+ retval = -EINVAL;
+
+ list_for_each_entry(mdef, &rv_monitors_list, list) {
+ if (strcmp(ptr, mdef->monitor->name) != 0)
+ continue;
+
+ /*
+ * Monitor found!
+ */
+ if (enable)
+ retval = rv_enable_monitor(mdef);
+ else
+ retval = rv_disable_monitor(mdef);
+
+ if (!retval)
+ retval = count;
+
+ break;
+ }
+
+ mutex_unlock(&rv_interface_lock);
+ return retval;
+}
+
+static const struct file_operations enabled_monitors_ops = {
+ .open = enabled_monitors_open,
+ .read = seq_read,
+ .write = enabled_monitors_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * Monitoring on global switcher!
+ */
+static bool __read_mostly monitoring_on;
+
+/**
+ * rv_monitoring_on - checks if monitoring is on
+ *
+ * Returns 1 if on, 0 otherwise.
+ */
+bool rv_monitoring_on(void)
+{
+ /* Ensures that concurrent monitors read consistent monitoring_on */
+ smp_rmb();
+ return READ_ONCE(monitoring_on);
+}
+
+/*
+ * monitoring_on general switcher.
+ */
+static ssize_t monitoring_on_read_data(struct file *filp, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buff;
+
+ buff = rv_monitoring_on() ? "1\n" : "0\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff) + 1);
+}
+
+static void turn_monitoring_off(void)
+{
+ WRITE_ONCE(monitoring_on, false);
+ /* Ensures that concurrent monitors read consistent monitoring_on */
+ smp_wmb();
+}
+
+static void reset_all_monitors(void)
+{
+ struct rv_monitor_def *mdef;
+
+ list_for_each_entry(mdef, &rv_monitors_list, list) {
+ if (mdef->monitor->enabled)
+ mdef->monitor->reset();
+ }
+}
+
+static void turn_monitoring_on(void)
+{
+ WRITE_ONCE(monitoring_on, true);
+ /* Ensures that concurrent monitors read consistent monitoring_on */
+ smp_wmb();
+}
+
+static void turn_monitoring_on_with_reset(void)
+{
+ lockdep_assert_held(&rv_interface_lock);
+
+ if (rv_monitoring_on())
+ return;
+
+ /*
+ * Monitors might be out of sync with the system if events were not
+ * processed because of !rv_monitoring_on().
+ *
+ * Reset all monitors, forcing a re-sync.
+ */
+ reset_all_monitors();
+ turn_monitoring_on();
+}
+
+static ssize_t monitoring_on_write_data(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int retval;
+ bool val;
+
+ retval = kstrtobool_from_user(user_buf, count, &val);
+ if (retval)
+ return retval;
+
+ mutex_lock(&rv_interface_lock);
+
+ if (val)
+ turn_monitoring_on_with_reset();
+ else
+ turn_monitoring_off();
+
+ /*
+ * Wait for the execution of all events to finish
+ * before returning to user-space.
+ */
+ tracepoint_synchronize_unregister();
+
+ mutex_unlock(&rv_interface_lock);
+
+ return count;
+}
+
+static const struct file_operations monitoring_on_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = monitoring_on_write_data,
+ .read = monitoring_on_read_data,
+};
+
+static void destroy_monitor_dir(struct rv_monitor_def *mdef)
+{
+ reactor_cleanup_monitor(mdef);
+ rv_remove(mdef->root_d);
+}
+
+/**
+ * rv_register_monitor - register a rv monitor.
+ * @monitor: The rv_monitor to be registered.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int rv_register_monitor(struct rv_monitor *monitor)
+{
+ struct rv_monitor_def *r;
+ int retval = 0;
+
+ if (strlen(monitor->name) >= MAX_RV_MONITOR_NAME_SIZE) {
+ pr_info("Monitor %s has a name longer than %d\n", monitor->name,
+ MAX_RV_MONITOR_NAME_SIZE);
+ return -1;
+ }
+
+ mutex_lock(&rv_interface_lock);
+
+ list_for_each_entry(r, &rv_monitors_list, list) {
+ if (strcmp(monitor->name, r->monitor->name) == 0) {
+ pr_info("Monitor %s is already registered\n", monitor->name);
+ retval = -1;
+ goto out_unlock;
+ }
+ }
+
+ r = kzalloc(sizeof(struct rv_monitor_def), GFP_KERNEL);
+ if (!r) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+
+ r->monitor = monitor;
+
+ retval = create_monitor_dir(r);
+ if (retval) {
+ kfree(r);
+ goto out_unlock;
+ }
+
+ list_add_tail(&r->list, &rv_monitors_list);
+
+out_unlock:
+ mutex_unlock(&rv_interface_lock);
+ return retval;
+}
+
+/**
+ * rv_unregister_monitor - unregister a rv monitor.
+ * @monitor: The rv_monitor to be unregistered.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int rv_unregister_monitor(struct rv_monitor *monitor)
+{
+ struct rv_monitor_def *ptr, *next;
+
+ mutex_lock(&rv_interface_lock);
+
+ list_for_each_entry_safe(ptr, next, &rv_monitors_list, list) {
+ if (strcmp(monitor->name, ptr->monitor->name) == 0) {
+ rv_disable_monitor(ptr);
+ list_del(&ptr->list);
+ destroy_monitor_dir(ptr);
+ }
+ }
+
+ mutex_unlock(&rv_interface_lock);
+ return 0;
+}
+
+int __init rv_init_interface(void)
+{
+ struct dentry *tmp;
+ int retval;
+
+ rv_root.root_dir = rv_create_dir("rv", NULL);
+ if (!rv_root.root_dir)
+ goto out_err;
+
+ rv_root.monitors_dir = rv_create_dir("monitors", rv_root.root_dir);
+ if (!rv_root.monitors_dir)
+ goto out_err;
+
+ tmp = rv_create_file("available_monitors", RV_MODE_READ, rv_root.root_dir, NULL,
+ &available_monitors_ops);
+ if (!tmp)
+ goto out_err;
+
+ tmp = rv_create_file("enabled_monitors", RV_MODE_WRITE, rv_root.root_dir, NULL,
+ &enabled_monitors_ops);
+ if (!tmp)
+ goto out_err;
+
+ tmp = rv_create_file("monitoring_on", RV_MODE_WRITE, rv_root.root_dir, NULL,
+ &monitoring_on_fops);
+ if (!tmp)
+ goto out_err;
+ retval = init_rv_reactors(rv_root.root_dir);
+ if (retval)
+ goto out_err;
+
+ turn_monitoring_on();
+
+ return 0;
+
+out_err:
+ rv_remove(rv_root.root_dir);
+ printk(KERN_ERR "RV: Error while creating the RV interface\n");
+ return 1;
+}
diff --git a/kernel/trace/rv/rv.h b/kernel/trace/rv/rv.h
new file mode 100644
index 000000000000..db6cb0913dbd
--- /dev/null
+++ b/kernel/trace/rv/rv.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/mutex.h>
+
+struct rv_interface {
+ struct dentry *root_dir;
+ struct dentry *monitors_dir;
+};
+
+#include "../trace.h"
+#include <linux/tracefs.h>
+#include <linux/rv.h>
+
+#define RV_MODE_WRITE TRACE_MODE_WRITE
+#define RV_MODE_READ TRACE_MODE_READ
+
+#define rv_create_dir tracefs_create_dir
+#define rv_create_file tracefs_create_file
+#define rv_remove tracefs_remove
+
+#define MAX_RV_MONITOR_NAME_SIZE 32
+#define MAX_RV_REACTOR_NAME_SIZE 32
+
+extern struct mutex rv_interface_lock;
+
+#ifdef CONFIG_RV_REACTORS
+struct rv_reactor_def {
+ struct list_head list;
+ struct rv_reactor *reactor;
+ /* protected by the monitor interface lock */
+ int counter;
+};
+#endif
+
+struct rv_monitor_def {
+ struct list_head list;
+ struct rv_monitor *monitor;
+ struct dentry *root_d;
+#ifdef CONFIG_RV_REACTORS
+ struct rv_reactor_def *rdef;
+ bool reacting;
+#endif
+ bool task_monitor;
+};
+
+struct dentry *get_monitors_root(void);
+int rv_disable_monitor(struct rv_monitor_def *mdef);
+int rv_enable_monitor(struct rv_monitor_def *mdef);
+
+#ifdef CONFIG_RV_REACTORS
+int reactor_populate_monitor(struct rv_monitor_def *mdef);
+void reactor_cleanup_monitor(struct rv_monitor_def *mdef);
+int init_rv_reactors(struct dentry *root_dir);
+#else
+static inline int reactor_populate_monitor(struct rv_monitor_def *mdef)
+{
+ return 0;
+}
+
+static inline void reactor_cleanup_monitor(struct rv_monitor_def *mdef)
+{
+ return;
+}
+
+static inline int init_rv_reactors(struct dentry *root_dir)
+{
+ return 0;
+}
+#endif
diff --git a/kernel/trace/rv/rv_reactors.c b/kernel/trace/rv/rv_reactors.c
new file mode 100644
index 000000000000..6aae106695b6
--- /dev/null
+++ b/kernel/trace/rv/rv_reactors.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+ *
+ * Runtime reactor interface.
+ *
+ * A runtime monitor can cause a reaction to the detection of an
+ * exception on the model's execution. By default, the monitors have
+ * tracing reactions, printing the monitor output via tracepoints.
+ * But other reactions can be added (on-demand) via this interface.
+ *
+ * == Registering reactors ==
+ *
+ * The struct rv_reactor defines a callback function to be executed
+ * in case of a model exception happens. The callback function
+ * receives a message to be (optionally) printed before executing
+ * the reaction.
+ *
+ * A RV reactor is registered via:
+ * int rv_register_reactor(struct rv_reactor *reactor)
+ * And unregistered via:
+ * int rv_unregister_reactor(struct rv_reactor *reactor)
+ *
+ * These functions are exported to modules, enabling reactors to be
+ * dynamically loaded.
+ *
+ * == User interface ==
+ *
+ * The user interface resembles the kernel tracing interface and
+ * presents these files:
+ *
+ * "available_reactors"
+ * - List the available reactors, one per line.
+ *
+ * For example:
+ * # cat available_reactors
+ * nop
+ * panic
+ * printk
+ *
+ * "reacting_on"
+ * - It is an on/off general switch for reactors, disabling
+ * all reactions.
+ *
+ * "monitors/MONITOR/reactors"
+ * - List available reactors, with the select reaction for the given
+ * MONITOR inside []. The default one is the nop (no operation)
+ * reactor.
+ * - Writing the name of an reactor enables it to the given
+ * MONITOR.
+ *
+ * For example:
+ * # cat monitors/wip/reactors
+ * [nop]
+ * panic
+ * printk
+ * # echo panic > monitors/wip/reactors
+ * # cat monitors/wip/reactors
+ * nop
+ * [panic]
+ * printk
+ */
+
+#include <linux/slab.h>
+
+#include "rv.h"
+
+/*
+ * Interface for the reactor register.
+ */
+static LIST_HEAD(rv_reactors_list);
+
+static struct rv_reactor_def *get_reactor_rdef_by_name(char *name)
+{
+ struct rv_reactor_def *r;
+
+ list_for_each_entry(r, &rv_reactors_list, list) {
+ if (strcmp(name, r->reactor->name) == 0)
+ return r;
+ }
+ return NULL;
+}
+
+/*
+ * Available reactors seq functions.
+ */
+static int reactors_show(struct seq_file *m, void *p)
+{
+ struct rv_reactor_def *rea_def = p;
+
+ seq_printf(m, "%s\n", rea_def->reactor->name);
+ return 0;
+}
+
+static void reactors_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&rv_interface_lock);
+}
+
+static void *reactors_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&rv_interface_lock);
+ return seq_list_start(&rv_reactors_list, *pos);
+}
+
+static void *reactors_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ return seq_list_next(p, &rv_reactors_list, pos);
+}
+
+/*
+ * available_reactors seq definition.
+ */
+static const struct seq_operations available_reactors_seq_ops = {
+ .start = reactors_start,
+ .next = reactors_next,
+ .stop = reactors_stop,
+ .show = reactors_show
+};
+
+/*
+ * available_reactors interface.
+ */
+static int available_reactors_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &available_reactors_seq_ops);
+};
+
+static const struct file_operations available_reactors_ops = {
+ .open = available_reactors_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+/*
+ * Monitor's reactor file.
+ */
+static int monitor_reactor_show(struct seq_file *m, void *p)
+{
+ struct rv_monitor_def *mdef = m->private;
+ struct rv_reactor_def *rdef = p;
+
+ if (mdef->rdef == rdef)
+ seq_printf(m, "[%s]\n", rdef->reactor->name);
+ else
+ seq_printf(m, "%s\n", rdef->reactor->name);
+ return 0;
+}
+
+/*
+ * available_reactors seq definition.
+ */
+static const struct seq_operations monitor_reactors_seq_ops = {
+ .start = reactors_start,
+ .next = reactors_next,
+ .stop = reactors_stop,
+ .show = monitor_reactor_show
+};
+
+static void monitor_swap_reactors(struct rv_monitor_def *mdef, struct rv_reactor_def *rdef,
+ bool reacting)
+{
+ bool monitor_enabled;
+
+ /* nothing to do */
+ if (mdef->rdef == rdef)
+ return;
+
+ monitor_enabled = mdef->monitor->enabled;
+ if (monitor_enabled)
+ rv_disable_monitor(mdef);
+
+ /* swap reactor's usage */
+ mdef->rdef->counter--;
+ rdef->counter++;
+
+ mdef->rdef = rdef;
+ mdef->reacting = reacting;
+ mdef->monitor->react = rdef->reactor->react;
+
+ if (monitor_enabled)
+ rv_enable_monitor(mdef);
+}
+
+static ssize_t
+monitor_reactors_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buff[MAX_RV_REACTOR_NAME_SIZE + 2];
+ struct rv_monitor_def *mdef;
+ struct rv_reactor_def *rdef;
+ struct seq_file *seq_f;
+ int retval = -EINVAL;
+ bool enable;
+ char *ptr;
+ int len;
+
+ if (count < 1 || count > MAX_RV_REACTOR_NAME_SIZE + 1)
+ return -EINVAL;
+
+ memset(buff, 0, sizeof(buff));
+
+ retval = simple_write_to_buffer(buff, sizeof(buff) - 1, ppos, user_buf, count);
+ if (retval < 0)
+ return -EFAULT;
+
+ ptr = strim(buff);
+
+ len = strlen(ptr);
+ if (!len)
+ return count;
+
+ /*
+ * See monitor_reactors_open()
+ */
+ seq_f = file->private_data;
+ mdef = seq_f->private;
+
+ mutex_lock(&rv_interface_lock);
+
+ retval = -EINVAL;
+
+ list_for_each_entry(rdef, &rv_reactors_list, list) {
+ if (strcmp(ptr, rdef->reactor->name) != 0)
+ continue;
+
+ if (rdef == get_reactor_rdef_by_name("nop"))
+ enable = false;
+ else
+ enable = true;
+
+ monitor_swap_reactors(mdef, rdef, enable);
+
+ retval = count;
+ break;
+ }
+
+ mutex_unlock(&rv_interface_lock);
+
+ return retval;
+}
+
+/*
+ * available_reactors interface.
+ */
+static int monitor_reactors_open(struct inode *inode, struct file *file)
+{
+ struct rv_monitor_def *mdef = inode->i_private;
+ struct seq_file *seq_f;
+ int ret;
+
+ ret = seq_open(file, &monitor_reactors_seq_ops);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * seq_open stores the seq_file on the file->private data.
+ */
+ seq_f = file->private_data;
+
+ /*
+ * Copy the create file "private" data to the seq_file private data.
+ */
+ seq_f->private = mdef;
+
+ return 0;
+};
+
+static const struct file_operations monitor_reactors_ops = {
+ .open = monitor_reactors_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = monitor_reactors_write
+};
+
+static int __rv_register_reactor(struct rv_reactor *reactor)
+{
+ struct rv_reactor_def *r;
+
+ list_for_each_entry(r, &rv_reactors_list, list) {
+ if (strcmp(reactor->name, r->reactor->name) == 0) {
+ pr_info("Reactor %s is already registered\n", reactor->name);
+ return -EINVAL;
+ }
+ }
+
+ r = kzalloc(sizeof(struct rv_reactor_def), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+
+ r->reactor = reactor;
+ r->counter = 0;
+
+ list_add_tail(&r->list, &rv_reactors_list);
+
+ return 0;
+}
+
+/**
+ * rv_register_reactor - register a rv reactor.
+ * @reactor: The rv_reactor to be registered.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int rv_register_reactor(struct rv_reactor *reactor)
+{
+ int retval = 0;
+
+ if (strlen(reactor->name) >= MAX_RV_REACTOR_NAME_SIZE) {
+ pr_info("Reactor %s has a name longer than %d\n",
+ reactor->name, MAX_RV_MONITOR_NAME_SIZE);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rv_interface_lock);
+ retval = __rv_register_reactor(reactor);
+ mutex_unlock(&rv_interface_lock);
+ return retval;
+}
+
+/**
+ * rv_unregister_reactor - unregister a rv reactor.
+ * @reactor: The rv_reactor to be unregistered.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int rv_unregister_reactor(struct rv_reactor *reactor)
+{
+ struct rv_reactor_def *ptr, *next;
+ int ret = 0;
+
+ mutex_lock(&rv_interface_lock);
+
+ list_for_each_entry_safe(ptr, next, &rv_reactors_list, list) {
+ if (strcmp(reactor->name, ptr->reactor->name) == 0) {
+
+ if (!ptr->counter) {
+ list_del(&ptr->list);
+ } else {
+ printk(KERN_WARNING
+ "rv: the rv_reactor %s is in use by %d monitor(s)\n",
+ ptr->reactor->name, ptr->counter);
+ printk(KERN_WARNING "rv: the rv_reactor %s cannot be removed\n",
+ ptr->reactor->name);
+ ret = -EBUSY;
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&rv_interface_lock);
+ return ret;
+}
+
+/*
+ * reacting_on interface.
+ */
+static bool __read_mostly reacting_on;
+
+/**
+ * rv_reacting_on - checks if reacting is on
+ *
+ * Returns 1 if on, 0 otherwise.
+ */
+bool rv_reacting_on(void)
+{
+ /* Ensures that concurrent monitors read consistent reacting_on */
+ smp_rmb();
+ return READ_ONCE(reacting_on);
+}
+
+static ssize_t reacting_on_read_data(struct file *filp,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+
+ buff = rv_reacting_on() ? "1\n" : "0\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff)+1);
+}
+
+static void turn_reacting_off(void)
+{
+ WRITE_ONCE(reacting_on, false);
+ /* Ensures that concurrent monitors read consistent reacting_on */
+ smp_wmb();
+}
+
+static void turn_reacting_on(void)
+{
+ WRITE_ONCE(reacting_on, true);
+ /* Ensures that concurrent monitors read consistent reacting_on */
+ smp_wmb();
+}
+
+static ssize_t reacting_on_write_data(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int retval;
+ bool val;
+
+ retval = kstrtobool_from_user(user_buf, count, &val);
+ if (retval)
+ return retval;
+
+ mutex_lock(&rv_interface_lock);
+
+ if (val)
+ turn_reacting_on();
+ else
+ turn_reacting_off();
+
+ /*
+ * Wait for the execution of all events to finish
+ * before returning to user-space.
+ */
+ tracepoint_synchronize_unregister();
+
+ mutex_unlock(&rv_interface_lock);
+
+ return count;
+}
+
+static const struct file_operations reacting_on_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = reacting_on_write_data,
+ .read = reacting_on_read_data,
+};
+
+/**
+ * reactor_populate_monitor - creates per monitor reactors file
+ * @mdef: monitor's definition.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int reactor_populate_monitor(struct rv_monitor_def *mdef)
+{
+ struct dentry *tmp;
+
+ tmp = rv_create_file("reactors", RV_MODE_WRITE, mdef->root_d, mdef, &monitor_reactors_ops);
+ if (!tmp)
+ return -ENOMEM;
+
+ /*
+ * Configure as the rv_nop reactor.
+ */
+ mdef->rdef = get_reactor_rdef_by_name("nop");
+ mdef->rdef->counter++;
+ mdef->reacting = false;
+
+ return 0;
+}
+
+/**
+ * reactor_cleanup_monitor - cleanup a monitor reference
+ * @mdef: monitor's definition.
+ */
+void reactor_cleanup_monitor(struct rv_monitor_def *mdef)
+{
+ lockdep_assert_held(&rv_interface_lock);
+ mdef->rdef->counter--;
+ WARN_ON_ONCE(mdef->rdef->counter < 0);
+}
+
+/*
+ * Nop reactor register
+ */
+static void rv_nop_reaction(char *msg)
+{
+}
+
+static struct rv_reactor rv_nop = {
+ .name = "nop",
+ .description = "no-operation reactor: do nothing.",
+ .react = rv_nop_reaction
+};
+
+int init_rv_reactors(struct dentry *root_dir)
+{
+ struct dentry *available, *reacting;
+ int retval;
+
+ available = rv_create_file("available_reactors", RV_MODE_READ, root_dir, NULL,
+ &available_reactors_ops);
+ if (!available)
+ goto out_err;
+
+ reacting = rv_create_file("reacting_on", RV_MODE_WRITE, root_dir, NULL, &reacting_on_fops);
+ if (!reacting)
+ goto rm_available;
+
+ retval = __rv_register_reactor(&rv_nop);
+ if (retval)
+ goto rm_reacting;
+
+ turn_reacting_on();
+
+ return 0;
+
+rm_reacting:
+ rv_remove(reacting);
+rm_available:
+ rv_remove(available);
+out_err:
+ return -ENOMEM;
+}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0c517c8c8999..d3005279165d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5569,13 +5569,13 @@ static const char readme_msg[] =
#endif
#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
"\t accepts: event-definitions (one definition per line)\n"
- "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
- "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
+ "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
+ "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
#ifdef CONFIG_HIST_TRIGGERS
"\t s:[synthetic/]<event> <field> [<field>]\n"
#endif
- "\t e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]\n"
- "\t -:[<group>/]<event>\n"
+ "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]\n"
+ "\t -:[<group>/][<event>]\n"
#ifdef CONFIG_KPROBE_EVENTS
"\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
"place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
@@ -9101,6 +9101,16 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
return 0;
}
+static void free_trace_buffer(struct array_buffer *buf)
+{
+ if (buf->buffer) {
+ ring_buffer_free(buf->buffer);
+ buf->buffer = NULL;
+ free_percpu(buf->data);
+ buf->data = NULL;
+ }
+}
+
static int allocate_trace_buffers(struct trace_array *tr, int size)
{
int ret;
@@ -9113,10 +9123,7 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
ret = allocate_trace_buffer(tr, &tr->max_buffer,
allocate_snapshot ? size : 1);
if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
- ring_buffer_free(tr->array_buffer.buffer);
- tr->array_buffer.buffer = NULL;
- free_percpu(tr->array_buffer.data);
- tr->array_buffer.data = NULL;
+ free_trace_buffer(&tr->array_buffer);
return -ENOMEM;
}
tr->allocated_snapshot = allocate_snapshot;
@@ -9131,16 +9138,6 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
return 0;
}
-static void free_trace_buffer(struct array_buffer *buf)
-{
- if (buf->buffer) {
- ring_buffer_free(buf->buffer);
- buf->buffer = NULL;
- free_percpu(buf->data);
- buf->data = NULL;
- }
-}
-
static void free_trace_buffers(struct trace_array *tr)
{
if (!tr)
@@ -9772,6 +9769,8 @@ static __init int tracer_init_tracefs(void)
tracer_init_tracefs_work_func(NULL);
}
+ rv_init_interface();
+
return 0;
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index ff816fb41e48..900e75d96c84 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -2005,4 +2005,13 @@ struct trace_min_max_param {
extern const struct file_operations trace_min_max_fops;
+#ifdef CONFIG_RV
+extern int rv_init_interface(void);
+#else
+static inline int rv_init_interface(void)
+{
+ return 0;
+}
+#endif
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index 076b447a1b88..154996684fb5 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -101,7 +101,7 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
event = p + 1;
*p = '\0';
}
- if (event[0] == '\0') {
+ if (!system && event[0] == '\0') {
ret = -EINVAL;
goto out;
}
diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
index 7d4478525c66..1783e3478912 100644
--- a/kernel/trace/trace_eprobe.c
+++ b/kernel/trace/trace_eprobe.c
@@ -125,6 +125,7 @@ static bool eprobe_dyn_event_match(const char *system, const char *event,
* We match the following:
* event only - match all eprobes with event name
* system and event only - match all system/event probes
+ * system only - match all system probes
*
* The below has the above satisfied with more arguments:
*
@@ -143,7 +144,7 @@ static bool eprobe_dyn_event_match(const char *system, const char *event,
return false;
/* Must match the event name */
- if (strcmp(trace_probe_name(&ep->tp), event) != 0)
+ if (event[0] != '\0' && strcmp(trace_probe_name(&ep->tp), event) != 0)
return false;
/* No arguments match all */
@@ -226,6 +227,7 @@ static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
struct probe_arg *parg = &ep->tp.args[i];
struct ftrace_event_field *field;
struct list_head *head;
+ int ret = -ENOENT;
head = trace_get_fields(ep->event);
list_for_each_entry(field, head, link) {
@@ -235,9 +237,20 @@ static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
return 0;
}
}
+
+ /*
+ * Argument not found on event. But allow for comm and COMM
+ * to be used to get the current->comm.
+ */
+ if (strcmp(parg->code->data, "COMM") == 0 ||
+ strcmp(parg->code->data, "comm") == 0) {
+ parg->code->op = FETCH_OP_COMM;
+ ret = 0;
+ }
+
kfree(parg->code->data);
parg->code->data = NULL;
- return -ENOENT;
+ return ret;
}
static int eprobe_event_define_fields(struct trace_event_call *event_call)
@@ -310,6 +323,27 @@ static unsigned long get_event_field(struct fetch_insn *code, void *rec)
addr = rec + field->offset;
+ if (is_string_field(field)) {
+ switch (field->filter_type) {
+ case FILTER_DYN_STRING:
+ val = (unsigned long)(rec + (*(unsigned int *)addr & 0xffff));
+ break;
+ case FILTER_RDYN_STRING:
+ val = (unsigned long)(addr + (*(unsigned int *)addr & 0xffff));
+ break;
+ case FILTER_STATIC_STRING:
+ val = (unsigned long)addr;
+ break;
+ case FILTER_PTR_STRING:
+ val = (unsigned long)(*(char *)addr);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+ return val;
+ }
+
switch (field->size) {
case 1:
if (field->is_signed)
@@ -341,16 +375,38 @@ static unsigned long get_event_field(struct fetch_insn *code, void *rec)
static int get_eprobe_size(struct trace_probe *tp, void *rec)
{
+ struct fetch_insn *code;
struct probe_arg *arg;
int i, len, ret = 0;
for (i = 0; i < tp->nr_args; i++) {
arg = tp->args + i;
- if (unlikely(arg->dynamic)) {
+ if (arg->dynamic) {
unsigned long val;
- val = get_event_field(arg->code, rec);
- len = process_fetch_insn_bottom(arg->code + 1, val, NULL, NULL);
+ code = arg->code;
+ retry:
+ switch (code->op) {
+ case FETCH_OP_TP_ARG:
+ val = get_event_field(code, rec);
+ break;
+ case FETCH_OP_IMM:
+ val = code->immediate;
+ break;
+ case FETCH_OP_COMM:
+ val = (unsigned long)current->comm;
+ break;
+ case FETCH_OP_DATA:
+ val = (unsigned long)code->data;
+ break;
+ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
+ code++;
+ goto retry;
+ default:
+ continue;
+ }
+ code++;
+ len = process_fetch_insn_bottom(code, val, NULL, NULL);
if (len > 0)
ret += len;
}
@@ -368,8 +424,28 @@ process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
{
unsigned long val;
- val = get_event_field(code, rec);
- return process_fetch_insn_bottom(code + 1, val, dest, base);
+ retry:
+ switch (code->op) {
+ case FETCH_OP_TP_ARG:
+ val = get_event_field(code, rec);
+ break;
+ case FETCH_OP_IMM:
+ val = code->immediate;
+ break;
+ case FETCH_OP_COMM:
+ val = (unsigned long)current->comm;
+ break;
+ case FETCH_OP_DATA:
+ val = (unsigned long)code->data;
+ break;
+ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
+ code++;
+ goto retry;
+ default:
+ return -EILSEQ;
+ }
+ code++;
+ return process_fetch_insn_bottom(code, val, dest, base);
}
NOKPROBE_SYMBOL(process_fetch_insn)
@@ -838,8 +914,15 @@ static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[
if (ret)
return ret;
- if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG)
+ if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG) {
ret = trace_eprobe_tp_arg_update(ep, i);
+ if (ret)
+ trace_probe_log_err(0, BAD_ATTACH_ARG);
+ }
+
+ /* Handle symbols "@" */
+ if (!ret)
+ ret = traceprobe_update_arg(&ep->tp.args[i]);
return ret;
}
@@ -848,7 +931,7 @@ static int __trace_eprobe_create(int argc, const char *argv[])
{
/*
* Argument syntax:
- * e[:[GRP/]ENAME] SYSTEM.EVENT [FETCHARGS]
+ * e[:[GRP/][ENAME]] SYSTEM.EVENT [FETCHARGS]
* Fetch args:
* <name>=$<field>[:TYPE]
*/
@@ -858,6 +941,7 @@ static int __trace_eprobe_create(int argc, const char *argv[])
struct trace_eprobe *ep = NULL;
char buf1[MAX_EVENT_NAME_LEN];
char buf2[MAX_EVENT_NAME_LEN];
+ char gbuf[MAX_EVENT_NAME_LEN];
int ret = 0;
int i;
@@ -869,25 +953,25 @@ static int __trace_eprobe_create(int argc, const char *argv[])
event = strchr(&argv[0][1], ':');
if (event) {
event++;
- ret = traceprobe_parse_event_name(&event, &group, buf1,
+ ret = traceprobe_parse_event_name(&event, &group, gbuf,
event - argv[0]);
if (ret)
goto parse_error;
- } else {
- strscpy(buf1, argv[1], MAX_EVENT_NAME_LEN);
- sanitize_event_name(buf1);
- event = buf1;
}
- if (!is_good_name(event) || !is_good_name(group))
- goto parse_error;
+ trace_probe_log_set_index(1);
sys_event = argv[1];
- ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2,
- sys_event - argv[1]);
- if (ret || !sys_name)
- goto parse_error;
- if (!is_good_name(sys_event) || !is_good_name(sys_name))
+ ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2, 0);
+ if (ret || !sys_event || !sys_name) {
+ trace_probe_log_err(0, NO_EVENT_INFO);
goto parse_error;
+ }
+
+ if (!event) {
+ strscpy(buf1, argv[1], MAX_EVENT_NAME_LEN);
+ sanitize_event_name(buf1);
+ event = buf1;
+ }
mutex_lock(&event_mutex);
event_call = find_and_get_event(sys_name, sys_event);
@@ -896,6 +980,8 @@ static int __trace_eprobe_create(int argc, const char *argv[])
if (IS_ERR(ep)) {
ret = PTR_ERR(ep);
+ if (ret == -ENODEV)
+ trace_probe_log_err(0, BAD_ATTACH_EVENT);
/* This must return -ENOMEM or missing event, else there is a bug */
WARN_ON_ONCE(ret != -ENOMEM && ret != -ENODEV);
ep = NULL;
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index a114549720d6..61e3a2620fa3 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -157,7 +157,7 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
int i;
if (--tp_event->perf_refcount > 0)
- goto out;
+ return;
tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
@@ -176,8 +176,6 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
perf_trace_buf[i] = NULL;
}
}
-out:
- trace_event_put_ref(tp_event);
}
static int perf_trace_event_open(struct perf_event *p_event)
@@ -241,6 +239,7 @@ void perf_trace_destroy(struct perf_event *p_event)
mutex_lock(&event_mutex);
perf_trace_event_close(p_event);
perf_trace_event_unreg(p_event);
+ trace_event_put_ref(p_event->tp_event);
mutex_unlock(&event_mutex);
}
@@ -292,6 +291,7 @@ void perf_kprobe_destroy(struct perf_event *p_event)
mutex_lock(&event_mutex);
perf_trace_event_close(p_event);
perf_trace_event_unreg(p_event);
+ trace_event_put_ref(p_event->tp_event);
mutex_unlock(&event_mutex);
destroy_local_trace_kprobe(p_event->tp_event);
@@ -347,6 +347,7 @@ void perf_uprobe_destroy(struct perf_event *p_event)
mutex_lock(&event_mutex);
perf_trace_event_close(p_event);
perf_trace_event_unreg(p_event);
+ trace_event_put_ref(p_event->tp_event);
mutex_unlock(&event_mutex);
destroy_local_trace_uprobe(p_event->tp_event);
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 181f08186d32..0356cae0cf74 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -176,6 +176,7 @@ static int trace_define_generic_fields(void)
__generic_field(int, CPU, FILTER_CPU);
__generic_field(int, cpu, FILTER_CPU);
+ __generic_field(int, common_cpu, FILTER_CPU);
__generic_field(char *, COMM, FILTER_COMM);
__generic_field(char *, comm, FILTER_COMM);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index e87a46794079..fdf784620c28 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -4455,7 +4455,7 @@ static int create_hist_fields(struct hist_trigger_data *hist_data,
ret = parse_var_defs(hist_data);
if (ret)
- goto out;
+ return ret;
ret = create_val_fields(hist_data, file);
if (ret)
@@ -4466,8 +4466,7 @@ static int create_hist_fields(struct hist_trigger_data *hist_data,
goto out;
ret = create_key_fields(hist_data, file);
- if (ret)
- goto out;
+
out:
free_var_defs(hist_data);
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index 706e1686b5eb..a6621c52ce45 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -567,7 +567,7 @@ static int user_event_set_call_visible(struct user_event *user, bool visible)
* to allow user_event files to be less locked down. The extreme case
* being "other" has read/write access to user_events_data/status.
*
- * When not locked down, processes may not have have permissions to
+ * When not locked down, processes may not have permissions to
* add/remove calls themselves to tracefs. We need to temporarily
* switch to root file permission to allow for this scenario.
*/
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index a245ea673715..23f7f0ec4f4c 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -163,7 +163,8 @@ static bool trace_kprobe_match(const char *system, const char *event,
{
struct trace_kprobe *tk = to_trace_kprobe(ev);
- return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
+ return (event[0] == '\0' ||
+ strcmp(trace_probe_name(&tk->tp), event) == 0) &&
(!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
trace_kprobe_match_command_head(tk, argc, argv);
}
@@ -708,11 +709,11 @@ static int __trace_kprobe_create(int argc, const char *argv[])
/*
* Argument syntax:
* - Add kprobe:
- * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
+ * p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
* - Add kretprobe:
- * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
+ * r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS]
* Or
- * p:[GRP/]EVENT] [MOD:]KSYM[+0]%return [FETCHARGS]
+ * p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS]
*
* Fetch args:
* $retval : fetch return value
@@ -739,6 +740,7 @@ static int __trace_kprobe_create(int argc, const char *argv[])
long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];
+ char gbuf[MAX_EVENT_NAME_LEN];
unsigned int flags = TPARG_FL_KERNEL;
switch (argv[0][0]) {
@@ -833,11 +835,13 @@ static int __trace_kprobe_create(int argc, const char *argv[])
trace_probe_log_set_index(0);
if (event) {
- ret = traceprobe_parse_event_name(&event, &group, buf,
+ ret = traceprobe_parse_event_name(&event, &group, gbuf,
event - argv[0]);
if (ret)
goto parse_error;
- } else {
+ }
+
+ if (!event) {
/* Make a new event name */
if (symbol)
snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 80863c6508e5..36dff277de46 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -257,6 +257,10 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
}
len = strlen(event);
if (len == 0) {
+ if (slash) {
+ *pevent = NULL;
+ return 0;
+ }
trace_probe_log_err(offset, NO_EVENT_NAME);
return -EINVAL;
} else if (len > MAX_EVENT_NAME_LEN) {
@@ -279,7 +283,14 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
int ret = 0;
int len;
- if (strcmp(arg, "retval") == 0) {
+ if (flags & TPARG_FL_TPOINT) {
+ if (code->data)
+ return -EFAULT;
+ code->data = kstrdup(arg, GFP_KERNEL);
+ if (!code->data)
+ return -ENOMEM;
+ code->op = FETCH_OP_TP_ARG;
+ } else if (strcmp(arg, "retval") == 0) {
if (flags & TPARG_FL_RETURN) {
code->op = FETCH_OP_RETVAL;
} else {
@@ -303,7 +314,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
}
} else
goto inval_var;
- } else if (strcmp(arg, "comm") == 0) {
+ } else if (strcmp(arg, "comm") == 0 || strcmp(arg, "COMM") == 0) {
code->op = FETCH_OP_COMM;
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
} else if (((flags & TPARG_FL_MASK) ==
@@ -319,13 +330,6 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
code->op = FETCH_OP_ARG;
code->param = (unsigned int)param - 1;
#endif
- } else if (flags & TPARG_FL_TPOINT) {
- if (code->data)
- return -EFAULT;
- code->data = kstrdup(arg, GFP_KERNEL);
- if (!code->data)
- return -ENOMEM;
- code->op = FETCH_OP_TP_ARG;
} else
goto inval_var;
@@ -380,6 +384,11 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
break;
case '%': /* named register */
+ if (flags & TPARG_FL_TPOINT) {
+ /* eprobes do not handle registers */
+ trace_probe_log_err(offs, BAD_VAR);
+ break;
+ }
ret = regs_query_register_offset(arg + 1);
if (ret >= 0) {
code->op = FETCH_OP_REG;
@@ -613,9 +622,11 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
/*
* Since $comm and immediate string can not be dereferenced,
- * we can find those by strcmp.
+ * we can find those by strcmp. But ignore for eprobes.
*/
- if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) {
+ if (!(flags & TPARG_FL_TPOINT) &&
+ (strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 ||
+ strncmp(arg, "\\\"", 2) == 0)) {
/* The type of $comm must be "string", and not an array. */
if (parg->count || (t && strcmp(t, "string")))
goto out;
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 92cc149af0fd..3b3869ae8cfd 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -442,7 +442,10 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(FAIL_REG_PROBE, "Failed to register probe event"),\
C(DIFF_PROBE_TYPE, "Probe type is different from existing probe"),\
C(DIFF_ARG_TYPE, "Argument type or name is different from existing probe"),\
- C(SAME_PROBE, "There is already the exact same probe event"),
+ C(SAME_PROBE, "There is already the exact same probe event"),\
+ C(NO_EVENT_INFO, "This requires both group and event name to attach"),\
+ C(BAD_ATTACH_EVENT, "Attached event does not exist"),\
+ C(BAD_ATTACH_ARG, "Attached event does not have this field"),
#undef C
#define C(a, b) TP_ERR_##a
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 88ba5b4bd0c5..fb58e86dd117 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -313,7 +313,8 @@ static bool trace_uprobe_match(const char *system, const char *event,
{
struct trace_uprobe *tu = to_trace_uprobe(ev);
- return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
+ return (event[0] == '\0' ||
+ strcmp(trace_probe_name(&tu->tp), event) == 0) &&
(!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
trace_uprobe_match_command_head(tu, argc, argv);
}
@@ -533,7 +534,7 @@ end:
/*
* Argument syntax:
- * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
+ * - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
*/
static int __trace_uprobe_create(int argc, const char **argv)
{
@@ -541,6 +542,7 @@ static int __trace_uprobe_create(int argc, const char **argv)
const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
char *arg, *filename, *rctr, *rctr_end, *tmp;
char buf[MAX_EVENT_NAME_LEN];
+ char gbuf[MAX_EVENT_NAME_LEN];
enum probe_print_type ptype;
struct path path;
unsigned long offset, ref_ctr_offset;
@@ -645,11 +647,13 @@ static int __trace_uprobe_create(int argc, const char **argv)
/* setup a probe */
trace_probe_log_set_index(0);
if (event) {
- ret = traceprobe_parse_event_name(&event, &group, buf,
+ ret = traceprobe_parse_event_name(&event, &group, gbuf,
event - argv[0]);
if (ret)
goto fail_address_parse;
- } else {
+ }
+
+ if (!event) {
char *tail;
char *ptr;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index ecb0e8346e65..8e61f21e7e33 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -537,7 +537,7 @@ int lockup_detector_offline_cpu(unsigned int cpu)
return 0;
}
-static void lockup_detector_reconfigure(void)
+static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_nmi_stop();
@@ -557,6 +557,13 @@ static void lockup_detector_reconfigure(void)
__lockup_detector_cleanup();
}
+void lockup_detector_reconfigure(void)
+{
+ mutex_lock(&watchdog_mutex);
+ __lockup_detector_reconfigure();
+ mutex_unlock(&watchdog_mutex);
+}
+
/*
* Create the watchdog infrastructure and configure the detector(s).
*/
@@ -573,13 +580,13 @@ static __init void lockup_detector_setup(void)
return;
mutex_lock(&watchdog_mutex);
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
softlockup_initialized = true;
mutex_unlock(&watchdog_mutex);
}
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
-static void lockup_detector_reconfigure(void)
+static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_nmi_stop();
@@ -587,9 +594,13 @@ static void lockup_detector_reconfigure(void)
watchdog_nmi_start();
cpus_read_unlock();
}
+void lockup_detector_reconfigure(void)
+{
+ __lockup_detector_reconfigure();
+}
static inline void lockup_detector_setup(void)
{
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
}
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
@@ -629,7 +640,7 @@ static void proc_watchdog_update(void)
{
/* Remove impossible cpus to keep sysctl output clean. */
cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
- lockup_detector_reconfigure();
+ __lockup_detector_reconfigure();
}
/*
diff --git a/lib/Kconfig b/lib/Kconfig
index eaaad4d85bf2..dc1ab2ed1dc6 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -118,6 +118,13 @@ config INDIRECT_IOMEM_FALLBACK
mmio accesses when the IO memory address is not a registered
emulated region.
+config TRACE_MMIO_ACCESS
+ bool "Register read/write tracing"
+ depends on TRACING && ARCH_HAVE_TRACE_MMIO_ACCESS
+ help
+ Create tracepoints for MMIO read/write operations. These trace events
+ can be used for logging all MMIO read/write operations.
+
source "lib/crypto/Kconfig"
config LIB_MEMNEQ
@@ -685,15 +692,6 @@ config STACKDEPOT_ALWAYS_INIT
bool
select STACKDEPOT
-config STACK_HASH_ORDER
- int "stack depot hash size (12 => 4KB, 20 => 1024KB)"
- range 12 20
- default 20
- depends on STACKDEPOT
- help
- Select the hash size as a power of 2 for the stackdepot hash table.
- Choose a lower value to reduce the memory impact.
-
config REF_TRACKER
bool
depends on STACKTRACE_SUPPORT
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 35cd8287642a..bcbe60d6c80c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -699,6 +699,14 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
help
Debug objects boot parameter default value
+config SHRINKER_DEBUG
+ bool "Enable shrinker debugging support"
+ depends on DEBUG_FS
+ help
+ Say Y to enable the shrinker debugfs interface which provides
+ visibility into the kernel memory shrinkers subsystem.
+ Disable it to avoid an extra memory footprint.
+
config HAVE_DEBUG_KMEMLEAK
bool
@@ -2021,6 +2029,18 @@ config LKDTM
Documentation on how to use the module can be found in
Documentation/fault-injection/provoke-crashes.rst
+config CPUMASK_KUNIT_TEST
+ tristate "KUnit test for cpumask" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable to turn on cpumask tests, running at boot or module load time.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config TEST_LIST_SORT
tristate "Linked list sorting test" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/lib/Makefile b/lib/Makefile
index 67482f5ec0e8..ffabc30a27d4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -33,7 +33,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
- nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o \
+ nmi_backtrace.o win_minmax.o memcat_p.o \
buildid.o
lib-$(CONFIG_PRINTK) += dump_stack.o
@@ -46,7 +46,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
list_sort.o uuid.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o rhashtable.o \
+ percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o usercopy.o errseq.o bucket_locks.o \
generic-radix-tree.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
@@ -60,6 +60,7 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
CFLAGS_test_bitops.o += -Werror
+obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_SIPHASH) += test_siphash.o
obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
@@ -151,6 +152,8 @@ lib-y += logic_pio.o
lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o
+obj-$(CONFIG_TRACE_MMIO_ACCESS) += trace_readwrite.o
+
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
diff --git a/lib/base64.c b/lib/base64.c
new file mode 100644
index 000000000000..b736a7a431c5
--- /dev/null
+++ b/lib/base64.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64.c - RFC4648-compliant base64 encoding
+ *
+ * Copyright (c) 2020 Hannes Reinecke, SUSE
+ *
+ * Based on the base64url routines from fs/crypto/fname.c
+ * (which are using the URL-safe base64 encoding),
+ * modified to use the standard coding table from RFC4648 section 4.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/base64.h>
+
+static const char base64_table[65] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+/**
+ * base64_encode() - base64-encode some binary data
+ * @src: the binary data to encode
+ * @srclen: the length of @src in bytes
+ * @dst: (output) the base64-encoded string. Not NUL-terminated.
+ *
+ * Encodes data using base64 encoding, i.e. the "Base 64 Encoding" specified
+ * by RFC 4648, including the '='-padding.
+ *
+ * Return: the length of the resulting base64-encoded string in bytes.
+ */
+int base64_encode(const u8 *src, int srclen, char *dst)
+{
+ u32 ac = 0;
+ int bits = 0;
+ int i;
+ char *cp = dst;
+
+ for (i = 0; i < srclen; i++) {
+ ac = (ac << 8) | src[i];
+ bits += 8;
+ do {
+ bits -= 6;
+ *cp++ = base64_table[(ac >> bits) & 0x3f];
+ } while (bits >= 6);
+ }
+ if (bits) {
+ *cp++ = base64_table[(ac << (6 - bits)) & 0x3f];
+ bits -= 6;
+ }
+ while (bits < 0) {
+ *cp++ = '=';
+ bits += 2;
+ }
+ return cp - dst;
+}
+EXPORT_SYMBOL_GPL(base64_encode);
+
+/**
+ * base64_decode() - base64-decode a string
+ * @src: the string to decode. Doesn't need to be NUL-terminated.
+ * @srclen: the length of @src in bytes
+ * @dst: (output) the decoded binary data
+ *
+ * Decodes a string using base64 encoding, i.e. the "Base 64 Encoding"
+ * specified by RFC 4648, including the '='-padding.
+ *
+ * This implementation hasn't been optimized for performance.
+ *
+ * Return: the length of the resulting decoded binary data in bytes,
+ * or -1 if the string isn't a valid base64 string.
+ */
+int base64_decode(const char *src, int srclen, u8 *dst)
+{
+ u32 ac = 0;
+ int bits = 0;
+ int i;
+ u8 *bp = dst;
+
+ for (i = 0; i < srclen; i++) {
+ const char *p = strchr(base64_table, src[i]);
+
+ if (src[i] == '=') {
+ ac = (ac << 6);
+ bits += 6;
+ if (bits >= 8)
+ bits -= 8;
+ continue;
+ }
+ if (p == NULL || src[i] == 0)
+ return -1;
+ ac = (ac << 6) | (p - base64_table);
+ bits += 6;
+ if (bits >= 8) {
+ bits -= 8;
+ *bp++ = (u8)(ac >> bits);
+ }
+ }
+ if (ac & ((1 << bits) - 1))
+ return -1;
+ return bp - dst;
+}
+EXPORT_SYMBOL_GPL(base64_decode);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index b18e31ea6e66..488e6c3e5acc 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -237,7 +237,7 @@ void bitmap_cut(unsigned long *dst, const unsigned long *src,
}
EXPORT_SYMBOL(bitmap_cut);
-int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
@@ -275,7 +275,7 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_xor);
-int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
@@ -333,10 +333,9 @@ bool __bitmap_subset(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_subset);
-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
+unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
{
- unsigned int k, lim = bits/BITS_PER_LONG;
- int w = 0;
+ unsigned int k, lim = bits/BITS_PER_LONG, w = 0;
for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]);
@@ -1564,7 +1563,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
/* Clear tail bits in the last element of array beyond nbits. */
if (nbits % 64)
- buf[-1] &= GENMASK_ULL(nbits % 64, 0);
+ buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0);
}
EXPORT_SYMBOL(bitmap_to_arr64);
#endif
diff --git a/lib/btree.c b/lib/btree.c
index b4cf08a5c267..a82100c73b55 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -238,7 +238,7 @@ static int keyzero(struct btree_geo *geo, unsigned long *key)
return 1;
}
-void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
+static void *btree_lookup_node(struct btree_head *head, struct btree_geo *geo,
unsigned long *key)
{
int i, height = head->height;
@@ -257,7 +257,16 @@ void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
if (!node)
return NULL;
}
+ return node;
+}
+void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ int i;
+ unsigned long *node;
+
+ node = btree_lookup_node(head, geo, key);
if (!node)
return NULL;
@@ -271,23 +280,10 @@ EXPORT_SYMBOL_GPL(btree_lookup);
int btree_update(struct btree_head *head, struct btree_geo *geo,
unsigned long *key, void *val)
{
- int i, height = head->height;
- unsigned long *node = head->node;
-
- if (height == 0)
- return -ENOENT;
-
- for ( ; height > 1; height--) {
- for (i = 0; i < geo->no_pairs; i++)
- if (keycmp(geo, node, i, key) <= 0)
- break;
- if (i == geo->no_pairs)
- return -ENOENT;
- node = bval(geo, node, i);
- if (!node)
- return -ENOENT;
- }
+ int i;
+ unsigned long *node;
+ node = btree_lookup_node(head, geo, key);
if (!node)
return -ENOENT;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index a971a82d2f43..f0ae119be8c4 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -8,61 +8,6 @@
#include <linux/numa.h>
/**
- * cpumask_next - get the next cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
- * @srcp: the cpumask pointer
- *
- * Returns >= nr_cpu_ids if no further cpus set.
- */
-unsigned int cpumask_next(int n, const struct cpumask *srcp)
-{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
- return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
-}
-EXPORT_SYMBOL(cpumask_next);
-
-/**
- * cpumask_next_and - get the next cpu in *src1p & *src2p
- * @n: the cpu prior to the place to search (ie. return will be > @n)
- * @src1p: the first cpumask pointer
- * @src2p: the second cpumask pointer
- *
- * Returns >= nr_cpu_ids if no further cpus set in both.
- */
-int cpumask_next_and(int n, const struct cpumask *src1p,
- const struct cpumask *src2p)
-{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
- return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits, n + 1);
-}
-EXPORT_SYMBOL(cpumask_next_and);
-
-/**
- * cpumask_any_but - return a "random" in a cpumask, but not this one.
- * @mask: the cpumask to search
- * @cpu: the cpu to ignore.
- *
- * Often used to find any cpu but smp_processor_id() in a mask.
- * Returns >= nr_cpu_ids if no cpus set.
- */
-int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
-{
- unsigned int i;
-
- cpumask_check(cpu);
- for_each_cpu(i, mask)
- if (i != cpu)
- break;
- return i;
-}
-EXPORT_SYMBOL(cpumask_any_but);
-
-/**
* cpumask_next_wrap - helper to implement for_each_cpu_wrap
* @n: the cpu prior to the place to search
* @mask: the cpumask pointer
@@ -74,9 +19,9 @@ EXPORT_SYMBOL(cpumask_any_but);
* Note: the @wrap argument is required for the start condition when
* we cannot assume @start is set in @mask.
*/
-int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{
- int next;
+ unsigned int next;
again:
next = cpumask_next(n, mask);
@@ -125,34 +70,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
}
EXPORT_SYMBOL(alloc_cpumask_var_node);
-bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
-{
- return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
-}
-EXPORT_SYMBOL(zalloc_cpumask_var_node);
-
-/**
- * alloc_cpumask_var - allocate a struct cpumask
- * @mask: pointer to cpumask_var_t where the cpumask is returned
- * @flags: GFP_ flags
- *
- * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
- * a nop returning a constant 1 (in <linux/cpumask.h>).
- *
- * See alloc_cpumask_var_node.
- */
-bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
-{
- return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
-}
-EXPORT_SYMBOL(alloc_cpumask_var);
-
-bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
-{
- return alloc_cpumask_var(mask, flags | __GFP_ZERO);
-}
-EXPORT_SYMBOL(zalloc_cpumask_var);
-
/**
* alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
* @mask: pointer to cpumask_var_t where the cpumask is returned
@@ -205,7 +122,7 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
*/
unsigned int cpumask_local_spread(unsigned int i, int node)
{
- int cpu;
+ unsigned int cpu;
/* Wrap: we always want a cpu. */
i %= num_online_cpus();
@@ -243,10 +160,10 @@ static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
*
* Returns >= nr_cpu_ids if the intersection is empty.
*/
-int cpumask_any_and_distribute(const struct cpumask *src1p,
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p)
{
- int next, prev;
+ unsigned int next, prev;
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
@@ -262,9 +179,9 @@ int cpumask_any_and_distribute(const struct cpumask *src1p,
}
EXPORT_SYMBOL(cpumask_any_and_distribute);
-int cpumask_any_distribute(const struct cpumask *srcp)
+unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
- int next, prev;
+ unsigned int next, prev;
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
diff --git a/lib/cpumask_kunit.c b/lib/cpumask_kunit.c
new file mode 100644
index 000000000000..ecbeec72221e
--- /dev/null
+++ b/lib/cpumask_kunit.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for cpumask.
+ *
+ * Author: Sander Vanheule <sander@svanheule.net>
+ */
+
+#include <kunit/test.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+
+#define MASK_MSG(m) \
+ "%s contains %sCPUs %*pbl", #m, (cpumask_weight(m) ? "" : "no "), \
+ nr_cpumask_bits, cpumask_bits(m)
+
+#define EXPECT_FOR_EACH_CPU_EQ(test, mask) \
+ do { \
+ const cpumask_t *m = (mask); \
+ int mask_weight = cpumask_weight(m); \
+ int cpu, iter = 0; \
+ for_each_cpu(cpu, m) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_NOT_EQ(test, mask) \
+ do { \
+ const cpumask_t *m = (mask); \
+ int mask_weight = cpumask_weight(m); \
+ int cpu, iter = 0; \
+ for_each_cpu_not(cpu, m) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), nr_cpu_ids - mask_weight, iter, MASK_MSG(mask)); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask) \
+ do { \
+ const cpumask_t *m = (mask); \
+ int mask_weight = cpumask_weight(m); \
+ int cpu, iter = 0; \
+ for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, name) \
+ do { \
+ int mask_weight = num_##name##_cpus(); \
+ int cpu, iter = 0; \
+ for_each_##name##_cpu(cpu) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(cpu_##name##_mask)); \
+ } while (0)
+
+static cpumask_t mask_empty;
+static cpumask_t mask_all;
+
+static void test_cpumask_weight(struct kunit *test)
+{
+ KUNIT_EXPECT_TRUE_MSG(test, cpumask_empty(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_TRUE_MSG(test, cpumask_full(&mask_all), MASK_MSG(&mask_all));
+
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_weight(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpumask_bits, cpumask_weight(&mask_all), MASK_MSG(&mask_all));
+}
+
+static void test_cpumask_first(struct kunit *test)
+{
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first(cpu_possible_mask), MASK_MSG(cpu_possible_mask));
+
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first_zero(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_last(struct kunit *test)
+{
+ KUNIT_EXPECT_LE_MSG(test, nr_cpumask_bits, cpumask_last(&mask_empty),
+ MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_next(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next_zero(-1, &mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty),
+ MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next(-1, cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_iterators(struct kunit *test)
+{
+ EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty);
+ EXPECT_FOR_EACH_CPU_NOT_EQ(test, &mask_empty);
+ EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty);
+
+ EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_NOT_EQ(test, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
+}
+
+static void test_cpumask_iterators_builtin(struct kunit *test)
+{
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, possible);
+
+ /* Ensure the dynamic masks are stable while running the tests */
+ cpu_hotplug_disable();
+
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, online);
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, present);
+
+ cpu_hotplug_enable();
+}
+
+static int test_cpumask_init(struct kunit *test)
+{
+ cpumask_clear(&mask_empty);
+ cpumask_setall(&mask_all);
+
+ return 0;
+}
+
+static struct kunit_case test_cpumask_cases[] = {
+ KUNIT_CASE(test_cpumask_weight),
+ KUNIT_CASE(test_cpumask_first),
+ KUNIT_CASE(test_cpumask_last),
+ KUNIT_CASE(test_cpumask_next),
+ KUNIT_CASE(test_cpumask_iterators),
+ KUNIT_CASE(test_cpumask_iterators_builtin),
+ {}
+};
+
+static struct kunit_suite test_cpumask_suite = {
+ .name = "cpumask",
+ .init = test_cpumask_init,
+ .test_cases = test_cpumask_cases,
+};
+kunit_test_suite(test_cpumask_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 9ff549f63540..47816af9a9d7 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -33,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_LIB_CHACHA_GENERIC
tristate
- select XOR_BLOCKS
help
This symbol can be depended upon by arch implementations of the
ChaCha library interface that require the generic code as a
diff --git a/lib/devres.c b/lib/devres.c
index 14664bbb4875..55eb07e80cbb 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -29,7 +29,8 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
{
void __iomem **ptr, *addr = NULL;
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL,
+ dev_to_node(dev));
if (!ptr)
return NULL;
@@ -292,7 +293,8 @@ void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
{
void __iomem **ptr, *addr;
- ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
+ ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL,
+ dev_to_node(dev));
if (!ptr)
return NULL;
@@ -366,7 +368,8 @@ void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
if (dr)
return dr->table;
- new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
+ new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
if (!new_dr)
return NULL;
dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
@@ -548,7 +551,8 @@ int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long
int *mtrr;
int ret;
- mtrr = devres_alloc(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL);
+ mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL,
+ dev_to_node(dev));
if (!mtrr)
return -ENOMEM;
@@ -593,7 +597,8 @@ int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
struct arch_io_reserve_memtype_wc_devres *dr;
int ret;
- dr = devres_alloc(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL);
+ dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL,
+ dev_to_node(dev));
if (!dr)
return -ENOMEM;
diff --git a/lib/error-inject.c b/lib/error-inject.c
index 2ff5ef689d72..1afca1b1cdea 100644
--- a/lib/error-inject.c
+++ b/lib/error-inject.c
@@ -40,12 +40,18 @@ bool within_error_injection_list(unsigned long addr)
int get_injectable_error_type(unsigned long addr)
{
struct ei_entry *ent;
+ int ei_type = EI_ETYPE_NONE;
+ mutex_lock(&ei_mutex);
list_for_each_entry(ent, &error_injection_list, list) {
- if (addr >= ent->start_addr && addr < ent->end_addr)
- return ent->etype;
+ if (addr >= ent->start_addr && addr < ent->end_addr) {
+ ei_type = ent->etype;
+ break;
+ }
}
- return EI_ETYPE_NONE;
+ mutex_unlock(&ei_mutex);
+
+ return ei_type;
}
/*
@@ -197,24 +203,14 @@ static int ei_seq_show(struct seq_file *m, void *v)
return 0;
}
-static const struct seq_operations ei_seq_ops = {
+static const struct seq_operations ei_sops = {
.start = ei_seq_start,
.next = ei_seq_next,
.stop = ei_seq_stop,
.show = ei_seq_show,
};
-static int ei_open(struct inode *inode, struct file *filp)
-{
- return seq_open(filp, &ei_seq_ops);
-}
-
-static const struct file_operations debugfs_ei_ops = {
- .open = ei_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(ei);
static int __init ei_debugfs_init(void)
{
@@ -224,7 +220,7 @@ static int __init ei_debugfs_init(void)
if (!dir)
return -ENOMEM;
- file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops);
+ file = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);
if (!file) {
debugfs_remove(dir);
return -ENOMEM;
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 53e7eb1dd76c..05cccbcf1661 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -63,18 +63,13 @@ void fprop_global_destroy(struct fprop_global *p)
*/
bool fprop_new_period(struct fprop_global *p, int periods)
{
- s64 events;
- unsigned long flags;
+ s64 events = percpu_counter_sum(&p->events);
- local_irq_save(flags);
- events = percpu_counter_sum(&p->events);
/*
* Don't do anything if there are no events.
*/
- if (events <= 1) {
- local_irq_restore(flags);
+ if (events <= 1)
return false;
- }
write_seqcount_begin(&p->sequence);
if (periods < 64)
events -= events >> periods;
@@ -82,7 +77,6 @@ bool fprop_new_period(struct fprop_global *p, int periods)
percpu_counter_add(&p->events, -events);
p->period += periods;
write_seqcount_end(&p->sequence);
- local_irq_restore(flags);
return true;
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 0e0be334dbee..4b7fce72e3e5 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -16,6 +16,16 @@
#define PIPE_PARANOIA /* for now */
+/* covers ubuf and kbuf alike */
+#define iterate_buf(i, n, base, len, off, __p, STEP) { \
+ size_t __maybe_unused off = 0; \
+ len = n; \
+ base = __p + i->iov_offset; \
+ len -= (STEP); \
+ i->iov_offset += len; \
+ n = len; \
+}
+
/* covers iovec and kvec alike */
#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
size_t off = 0; \
@@ -110,7 +120,12 @@ __out: \
if (unlikely(i->count < n)) \
n = i->count; \
if (likely(n)) { \
- if (likely(iter_is_iovec(i))) { \
+ if (likely(iter_is_ubuf(i))) { \
+ void __user *base; \
+ size_t len; \
+ iterate_buf(i, n, base, len, off, \
+ i->ubuf, (I)) \
+ } else if (likely(iter_is_iovec(i))) { \
const struct iovec *iov = i->iov; \
void __user *base; \
size_t len; \
@@ -168,26 +183,31 @@ static int copyin(void *to, const void __user *from, size_t n)
return n;
}
+static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
+ unsigned int slot)
+{
+ return &pipe->bufs[slot & (pipe->ring_size - 1)];
+}
+
#ifdef PIPE_PARANOIA
static bool sanity(const struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_head = pipe->head;
unsigned int p_tail = pipe->tail;
- unsigned int p_mask = pipe->ring_size - 1;
unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
unsigned int i_head = i->head;
unsigned int idx;
- if (i->iov_offset) {
+ if (i->last_offset) {
struct pipe_buffer *p;
if (unlikely(p_occupancy == 0))
goto Bad; // pipe must be non-empty
if (unlikely(i_head != p_head - 1))
goto Bad; // must be at the last buffer...
- p = &pipe->bufs[i_head & p_mask];
- if (unlikely(p->offset + p->len != i->iov_offset))
+ p = pipe_buf(pipe, i_head);
+ if (unlikely(p->offset + p->len != abs(i->last_offset)))
goto Bad; // ... at the end of segment
} else {
if (i_head != p_head)
@@ -195,7 +215,7 @@ static bool sanity(const struct iov_iter *i)
}
return true;
Bad:
- printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
+ printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
p_head, p_tail, pipe->ring_size);
for (idx = 0; idx < pipe->ring_size; idx++)
@@ -211,15 +231,79 @@ Bad:
#define sanity(i) true
#endif
+static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
+{
+ struct page *page = alloc_page(GFP_USER);
+ if (page) {
+ struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
+ *buf = (struct pipe_buffer) {
+ .ops = &default_pipe_buf_ops,
+ .page = page,
+ .offset = 0,
+ .len = size
+ };
+ }
+ return page;
+}
+
+static void push_page(struct pipe_inode_info *pipe, struct page *page,
+ unsigned int offset, unsigned int size)
+{
+ struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
+ *buf = (struct pipe_buffer) {
+ .ops = &page_cache_pipe_buf_ops,
+ .page = page,
+ .offset = offset,
+ .len = size
+ };
+ get_page(page);
+}
+
+static inline int last_offset(const struct pipe_buffer *buf)
+{
+ if (buf->ops == &default_pipe_buf_ops)
+ return buf->len; // buf->offset is 0 for those
+ else
+ return -(buf->offset + buf->len);
+}
+
+static struct page *append_pipe(struct iov_iter *i, size_t size,
+ unsigned int *off)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ int offset = i->last_offset;
+ struct pipe_buffer *buf;
+ struct page *page;
+
+ if (offset > 0 && offset < PAGE_SIZE) {
+ // some space in the last buffer; add to it
+ buf = pipe_buf(pipe, pipe->head - 1);
+ size = min_t(size_t, size, PAGE_SIZE - offset);
+ buf->len += size;
+ i->last_offset += size;
+ i->count -= size;
+ *off = offset;
+ return buf->page;
+ }
+ // OK, we need a new buffer
+ *off = 0;
+ size = min_t(size_t, size, PAGE_SIZE);
+ if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+ return NULL;
+ page = push_anon(pipe, size);
+ if (!page)
+ return NULL;
+ i->head = pipe->head - 1;
+ i->last_offset = size;
+ i->count -= size;
+ return page;
+}
+
static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
- struct pipe_buffer *buf;
- unsigned int p_tail = pipe->tail;
- unsigned int p_mask = pipe->ring_size - 1;
- unsigned int i_head = i->head;
- size_t off;
+ unsigned int head = pipe->head;
if (unlikely(bytes > i->count))
bytes = i->count;
@@ -230,32 +314,21 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
if (!sanity(i))
return 0;
- off = i->iov_offset;
- buf = &pipe->bufs[i_head & p_mask];
- if (off) {
- if (offset == off && buf->page == page) {
- /* merge with the last one */
+ if (offset && i->last_offset == -offset) { // could we merge it?
+ struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
+ if (buf->page == page) {
buf->len += bytes;
- i->iov_offset += bytes;
- goto out;
+ i->last_offset -= bytes;
+ i->count -= bytes;
+ return bytes;
}
- i_head++;
- buf = &pipe->bufs[i_head & p_mask];
}
- if (pipe_full(i_head, p_tail, pipe->max_usage))
+ if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
return 0;
- buf->ops = &page_cache_pipe_buf_ops;
- buf->flags = 0;
- get_page(page);
- buf->page = page;
- buf->offset = offset;
- buf->len = bytes;
-
- pipe->head = i_head + 1;
- i->iov_offset = offset + bytes;
- i->head = i_head;
-out:
+ push_page(pipe, page, offset, bytes);
+ i->last_offset = -(offset + bytes);
+ i->head = head;
i->count -= bytes;
return bytes;
}
@@ -275,7 +348,11 @@ out:
*/
size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
{
- if (iter_is_iovec(i)) {
+ if (iter_is_ubuf(i)) {
+ size_t n = min(size, iov_iter_count(i));
+ n -= fault_in_readable(i->ubuf + i->iov_offset, n);
+ return size - n;
+ } else if (iter_is_iovec(i)) {
size_t count = min(size, iov_iter_count(i));
const struct iovec *p;
size_t skip;
@@ -314,7 +391,11 @@ EXPORT_SYMBOL(fault_in_iov_iter_readable);
*/
size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
{
- if (iter_is_iovec(i)) {
+ if (iter_is_ubuf(i)) {
+ size_t n = min(size, iov_iter_count(i));
+ n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
+ return size - n;
+ } else if (iter_is_iovec(i)) {
size_t count = min(size, iov_iter_count(i));
const struct iovec *p;
size_t skip;
@@ -345,6 +426,7 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
*i = (struct iov_iter) {
.iter_type = ITER_IOVEC,
.nofault = false,
+ .user_backed = true,
.data_source = direction,
.iov = iov,
.nr_segs = nr_segs,
@@ -354,101 +436,43 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_init);
-static inline bool allocated(struct pipe_buffer *buf)
-{
- return buf->ops == &default_pipe_buf_ops;
-}
-
-static inline void data_start(const struct iov_iter *i,
- unsigned int *iter_headp, size_t *offp)
-{
- unsigned int p_mask = i->pipe->ring_size - 1;
- unsigned int iter_head = i->head;
- size_t off = i->iov_offset;
-
- if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
- off == PAGE_SIZE)) {
- iter_head++;
- off = 0;
- }
- *iter_headp = iter_head;
- *offp = off;
-}
-
-static size_t push_pipe(struct iov_iter *i, size_t size,
- int *iter_headp, size_t *offp)
+// returns the offset in partial buffer (if any)
+static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
{
struct pipe_inode_info *pipe = i->pipe;
- unsigned int p_tail = pipe->tail;
- unsigned int p_mask = pipe->ring_size - 1;
- unsigned int iter_head;
- size_t off;
- ssize_t left;
+ int used = pipe->head - pipe->tail;
+ int off = i->last_offset;
- if (unlikely(size > i->count))
- size = i->count;
- if (unlikely(!size))
- return 0;
+ *npages = max((int)pipe->max_usage - used, 0);
- left = size;
- data_start(i, &iter_head, &off);
- *iter_headp = iter_head;
- *offp = off;
- if (off) {
- left -= PAGE_SIZE - off;
- if (left <= 0) {
- pipe->bufs[iter_head & p_mask].len += size;
- return size;
- }
- pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
- iter_head++;
+ if (off > 0 && off < PAGE_SIZE) { // anon and not full
+ (*npages)++;
+ return off;
}
- while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
- struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
- struct page *page = alloc_page(GFP_USER);
- if (!page)
- break;
-
- buf->ops = &default_pipe_buf_ops;
- buf->flags = 0;
- buf->page = page;
- buf->offset = 0;
- buf->len = min_t(ssize_t, left, PAGE_SIZE);
- left -= buf->len;
- iter_head++;
- pipe->head = iter_head;
-
- if (left == 0)
- return size;
- }
- return size - left;
+ return 0;
}
static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
struct iov_iter *i)
{
- struct pipe_inode_info *pipe = i->pipe;
- unsigned int p_mask = pipe->ring_size - 1;
- unsigned int i_head;
- size_t n, off;
+ unsigned int off, chunk;
- if (!sanity(i))
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+ if (unlikely(!bytes))
return 0;
- bytes = n = push_pipe(i, bytes, &i_head, &off);
- if (unlikely(!n))
+ if (!sanity(i))
return 0;
- do {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
- i->head = i_head;
- i->iov_offset = off + chunk;
- n -= chunk;
+
+ for (size_t n = bytes; n; n -= chunk) {
+ struct page *page = append_pipe(i, n, &off);
+ chunk = min_t(size_t, n, PAGE_SIZE - off);
+ if (!page)
+ return bytes - n;
+ memcpy_to_page(page, off, addr, chunk);
addr += chunk;
- off = 0;
- i_head++;
- } while (n);
- i->count -= bytes;
+ }
return bytes;
}
@@ -462,31 +486,32 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
struct iov_iter *i, __wsum *sump)
{
- struct pipe_inode_info *pipe = i->pipe;
- unsigned int p_mask = pipe->ring_size - 1;
__wsum sum = *sump;
size_t off = 0;
- unsigned int i_head;
- size_t r;
+ unsigned int chunk, r;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+ if (unlikely(!bytes))
+ return 0;
if (!sanity(i))
return 0;
- bytes = push_pipe(i, bytes, &i_head, &r);
while (bytes) {
- size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r);
- char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
+ struct page *page = append_pipe(i, bytes, &r);
+ char *p;
+
+ if (!page)
+ break;
+ chunk = min_t(size_t, bytes, PAGE_SIZE - r);
+ p = kmap_local_page(page);
sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
kunmap_local(p);
- i->head = i_head;
- i->iov_offset = r + chunk;
- bytes -= chunk;
off += chunk;
- r = 0;
- i_head++;
+ bytes -= chunk;
}
*sump = sum;
- i->count -= off;
return off;
}
@@ -494,7 +519,7 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter(addr, bytes, i);
- if (iter_is_iovec(i))
+ if (user_backed_iter(i))
might_fault();
iterate_and_advance(i, bytes, base, len, off,
copyout(base, addr + off, len),
@@ -518,39 +543,36 @@ static int copyout_mc(void __user *to, const void *from, size_t n)
static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
struct iov_iter *i)
{
- struct pipe_inode_info *pipe = i->pipe;
- unsigned int p_mask = pipe->ring_size - 1;
- unsigned int i_head;
- unsigned int valid = pipe->head;
- size_t n, off, xfer = 0;
+ size_t xfer = 0;
+ unsigned int off, chunk;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+ if (unlikely(!bytes))
+ return 0;
if (!sanity(i))
return 0;
- n = push_pipe(i, bytes, &i_head, &off);
- while (n) {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
+ while (bytes) {
+ struct page *page = append_pipe(i, bytes, &off);
unsigned long rem;
+ char *p;
+
+ if (!page)
+ break;
+ chunk = min_t(size_t, bytes, PAGE_SIZE - off);
+ p = kmap_local_page(page);
rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
chunk -= rem;
kunmap_local(p);
- if (chunk) {
- i->head = i_head;
- i->iov_offset = off + chunk;
- xfer += chunk;
- valid = i_head + 1;
- }
+ xfer += chunk;
+ bytes -= chunk;
if (rem) {
- pipe->bufs[i_head & p_mask].len -= rem;
- pipe_discard_from(pipe, valid);
+ iov_iter_revert(i, rem);
break;
}
- n -= chunk;
- off = 0;
- i_head++;
}
- i->count -= xfer;
return xfer;
}
@@ -583,7 +605,7 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (unlikely(iov_iter_is_pipe(i)))
return copy_mc_pipe_to_iter(addr, bytes, i);
- if (iter_is_iovec(i))
+ if (user_backed_iter(i))
might_fault();
__iterate_and_advance(i, bytes, base, len, off,
copyout_mc(base, addr + off, len),
@@ -601,7 +623,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
WARN_ON(1);
return 0;
}
- if (iter_is_iovec(i))
+ if (user_backed_iter(i))
might_fault();
iterate_and_advance(i, bytes, base, len, off,
copyin(addr + off, base, len),
@@ -684,30 +706,21 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
return false;
}
-static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
-{
- if (unlikely(iov_iter_is_pipe(i))) {
- return copy_page_to_iter_pipe(page, offset, bytes, i);
- } else {
- void *kaddr = kmap_local_page(page);
- size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
- kunmap_local(kaddr);
- return wanted;
- }
-}
-
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
size_t res = 0;
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
+ if (unlikely(iov_iter_is_pipe(i)))
+ return copy_page_to_iter_pipe(page, offset, bytes, i);
page += offset / PAGE_SIZE; // first subpage
offset %= PAGE_SIZE;
while (1) {
- size_t n = __copy_page_to_iter(page, offset,
- min(bytes, (size_t)PAGE_SIZE - offset), i);
+ void *kaddr = kmap_local_page(page);
+ size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
+ n = _copy_to_iter(kaddr + offset, n, i);
+ kunmap_local(kaddr);
res += n;
bytes -= n;
if (!bytes || !n)
@@ -725,42 +738,53 @@ EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
- if (page_copy_sane(page, offset, bytes)) {
+ size_t res = 0;
+ if (!page_copy_sane(page, offset, bytes))
+ return 0;
+ page += offset / PAGE_SIZE; // first subpage
+ offset %= PAGE_SIZE;
+ while (1) {
void *kaddr = kmap_local_page(page);
- size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
+ size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
+ n = _copy_from_iter(kaddr + offset, n, i);
kunmap_local(kaddr);
- return wanted;
+ res += n;
+ bytes -= n;
+ if (!bytes || !n)
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+ page++;
+ offset = 0;
+ }
}
- return 0;
+ return res;
}
EXPORT_SYMBOL(copy_page_from_iter);
static size_t pipe_zero(size_t bytes, struct iov_iter *i)
{
- struct pipe_inode_info *pipe = i->pipe;
- unsigned int p_mask = pipe->ring_size - 1;
- unsigned int i_head;
- size_t n, off;
+ unsigned int chunk, off;
- if (!sanity(i))
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+ if (unlikely(!bytes))
return 0;
- bytes = n = push_pipe(i, bytes, &i_head, &off);
- if (unlikely(!n))
+ if (!sanity(i))
return 0;
- do {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
+ for (size_t n = bytes; n; n -= chunk) {
+ struct page *page = append_pipe(i, n, &off);
+ char *p;
+
+ if (!page)
+ return bytes - n;
+ chunk = min_t(size_t, n, PAGE_SIZE - off);
+ p = kmap_local_page(page);
memset(p + off, 0, chunk);
kunmap_local(p);
- i->head = i_head;
- i->iov_offset = off + chunk;
- n -= chunk;
- off = 0;
- i_head++;
- } while (n);
- i->count -= bytes;
+ }
return bytes;
}
@@ -799,56 +823,30 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt
}
EXPORT_SYMBOL(copy_page_from_iter_atomic);
-static inline void pipe_truncate(struct iov_iter *i)
-{
- struct pipe_inode_info *pipe = i->pipe;
- unsigned int p_tail = pipe->tail;
- unsigned int p_head = pipe->head;
- unsigned int p_mask = pipe->ring_size - 1;
-
- if (!pipe_empty(p_head, p_tail)) {
- struct pipe_buffer *buf;
- unsigned int i_head = i->head;
- size_t off = i->iov_offset;
-
- if (off) {
- buf = &pipe->bufs[i_head & p_mask];
- buf->len = off - buf->offset;
- i_head++;
- }
- while (p_head != i_head) {
- p_head--;
- pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
- }
-
- pipe->head = p_head;
- }
-}
-
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
- if (size) {
- struct pipe_buffer *buf;
- unsigned int p_mask = pipe->ring_size - 1;
- unsigned int i_head = i->head;
- size_t off = i->iov_offset, left = size;
+ int off = i->last_offset;
+ if (!off && !size) {
+ pipe_discard_from(pipe, i->start_head); // discard everything
+ return;
+ }
+ i->count -= size;
+ while (1) {
+ struct pipe_buffer *buf = pipe_buf(pipe, i->head);
if (off) /* make it relative to the beginning of buffer */
- left += off - pipe->bufs[i_head & p_mask].offset;
- while (1) {
- buf = &pipe->bufs[i_head & p_mask];
- if (left <= buf->len)
- break;
- left -= buf->len;
- i_head++;
+ size += abs(off) - buf->offset;
+ if (size <= buf->len) {
+ buf->len = size;
+ i->last_offset = last_offset(buf);
+ break;
}
- i->head = i_head;
- i->iov_offset = buf->offset + left;
+ size -= buf->len;
+ i->head++;
+ off = 0;
}
- i->count -= size;
- /* ... and discard everything past that point */
- pipe_truncate(i);
+ pipe_discard_from(pipe, i->head + 1); // discard everything past this one
}
static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
@@ -894,16 +892,16 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
{
if (unlikely(i->count < size))
size = i->count;
- if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
+ if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
+ i->iov_offset += size;
+ i->count -= size;
+ } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
/* iovec and kvec have identical layouts */
iov_iter_iovec_advance(i, size);
} else if (iov_iter_is_bvec(i)) {
iov_iter_bvec_advance(i, size);
} else if (iov_iter_is_pipe(i)) {
pipe_advance(i, size);
- } else if (unlikely(iov_iter_is_xarray(i))) {
- i->iov_offset += size;
- i->count -= size;
} else if (iov_iter_is_discard(i)) {
i->count -= size;
}
@@ -919,28 +917,22 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
i->count += unroll;
if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe;
- unsigned int p_mask = pipe->ring_size - 1;
- unsigned int i_head = i->head;
- size_t off = i->iov_offset;
- while (1) {
- struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
- size_t n = off - b->offset;
- if (unroll < n) {
- off -= unroll;
- break;
- }
- unroll -= n;
- if (!unroll && i_head == i->start_head) {
- off = 0;
- break;
+ unsigned int head = pipe->head;
+
+ while (head > i->start_head) {
+ struct pipe_buffer *b = pipe_buf(pipe, --head);
+ if (unroll < b->len) {
+ b->len -= unroll;
+ i->last_offset = last_offset(b);
+ i->head = head;
+ return;
}
- i_head--;
- b = &pipe->bufs[i_head & p_mask];
- off = b->offset + b->len;
+ unroll -= b->len;
+ pipe_buf_release(pipe, b);
+ pipe->head--;
}
- i->iov_offset = off;
- i->head = i_head;
- pipe_truncate(i);
+ i->last_offset = 0;
+ i->head = head;
return;
}
if (unlikely(iov_iter_is_discard(i)))
@@ -950,7 +942,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
return;
}
unroll -= i->iov_offset;
- if (iov_iter_is_xarray(i)) {
+ if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
BUG(); /* We should never go beyond the start of the specified
* range since we might then be straying into pages that
* aren't pinned.
@@ -1042,7 +1034,7 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
.pipe = pipe,
.head = pipe->head,
.start_head = pipe->head,
- .iov_offset = 0,
+ .last_offset = 0,
.count = count
};
}
@@ -1158,6 +1150,14 @@ static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask)
{
+ if (likely(iter_is_ubuf(i))) {
+ if (i->count & len_mask)
+ return false;
+ if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
+ return false;
+ return true;
+ }
+
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return iov_iter_aligned_iovec(i, addr_mask, len_mask);
@@ -1165,13 +1165,12 @@ bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
return iov_iter_aligned_bvec(i, addr_mask, len_mask);
if (iov_iter_is_pipe(i)) {
- unsigned int p_mask = i->pipe->ring_size - 1;
size_t size = i->count;
if (size & len_mask)
return false;
- if (size && allocated(&i->pipe->bufs[i->head & p_mask])) {
- if (i->iov_offset & addr_mask)
+ if (size && i->last_offset > 0) {
+ if (i->last_offset & addr_mask)
return false;
}
@@ -1233,6 +1232,13 @@ static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
+ if (likely(iter_is_ubuf(i))) {
+ size_t size = i->count;
+ if (size)
+ return ((unsigned long)i->ubuf + i->iov_offset) | size;
+ return 0;
+ }
+
/* iovec and kvec have identical layouts */
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return iov_iter_alignment_iovec(i);
@@ -1241,11 +1247,10 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
return iov_iter_alignment_bvec(i);
if (iov_iter_is_pipe(i)) {
- unsigned int p_mask = i->pipe->ring_size - 1;
size_t size = i->count;
- if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
- return size | i->iov_offset;
+ if (size && i->last_offset > 0)
+ return size | i->last_offset;
return size;
}
@@ -1263,6 +1268,9 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
size_t size = i->count;
unsigned k;
+ if (iter_is_ubuf(i))
+ return 0;
+
if (WARN_ON(!iter_is_iovec(i)))
return ~0U;
@@ -1281,45 +1289,50 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
}
EXPORT_SYMBOL(iov_iter_gap_alignment);
-static inline ssize_t __pipe_get_pages(struct iov_iter *i,
- size_t maxsize,
- struct page **pages,
- int iter_head,
- size_t *start)
+static int want_pages_array(struct page ***res, size_t size,
+ size_t start, unsigned int maxpages)
{
- struct pipe_inode_info *pipe = i->pipe;
- unsigned int p_mask = pipe->ring_size - 1;
- ssize_t n = push_pipe(i, maxsize, &iter_head, start);
- if (!n)
- return -EFAULT;
+ unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
- maxsize = n;
- n += *start;
- while (n > 0) {
- get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
- iter_head++;
- n -= PAGE_SIZE;
+ if (count > maxpages)
+ count = maxpages;
+ WARN_ON(!count); // caller should've prevented that
+ if (!*res) {
+ *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ if (!*res)
+ return 0;
}
-
- return maxsize;
+ return count;
}
static ssize_t pipe_get_pages(struct iov_iter *i,
- struct page **pages, size_t maxsize, unsigned maxpages,
+ struct page ***pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
- unsigned int iter_head, npages;
- size_t capacity;
+ unsigned int npages, count, off, chunk;
+ struct page **p;
+ size_t left;
if (!sanity(i))
return -EFAULT;
- data_start(i, &iter_head, start);
- /* Amount of free space: some of this one + all after this one */
- npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
- capacity = min(npages, maxpages) * PAGE_SIZE - *start;
-
- return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
+ *start = off = pipe_npages(i, &npages);
+ if (!npages)
+ return -EFAULT;
+ count = want_pages_array(pages, maxsize, off, min(npages, maxpages));
+ if (!count)
+ return -ENOMEM;
+ p = *pages;
+ for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) {
+ struct page *page = append_pipe(i, left, &off);
+ if (!page)
+ break;
+ chunk = min_t(size_t, left, PAGE_SIZE - off);
+ get_page(*p++ = page);
+ }
+ if (!npages)
+ return -EFAULT;
+ return maxsize - left;
}
static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
@@ -1350,47 +1363,40 @@ static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa
}
static ssize_t iter_xarray_get_pages(struct iov_iter *i,
- struct page **pages, size_t maxsize,
+ struct page ***pages, size_t maxsize,
unsigned maxpages, size_t *_start_offset)
{
- unsigned nr, offset;
- pgoff_t index, count;
- size_t size = maxsize;
+ unsigned nr, offset, count;
+ pgoff_t index;
loff_t pos;
- if (!size || !maxpages)
- return 0;
-
pos = i->xarray_start + i->iov_offset;
index = pos >> PAGE_SHIFT;
offset = pos & ~PAGE_MASK;
*_start_offset = offset;
- count = 1;
- if (size > PAGE_SIZE - offset) {
- size -= PAGE_SIZE - offset;
- count += size >> PAGE_SHIFT;
- size &= ~PAGE_MASK;
- if (size)
- count++;
- }
-
- if (count > maxpages)
- count = maxpages;
-
- nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
+ count = want_pages_array(pages, maxsize, offset, maxpages);
+ if (!count)
+ return -ENOMEM;
+ nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
if (nr == 0)
return 0;
- return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
+ maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
+ i->iov_offset += maxsize;
+ i->count -= maxsize;
+ return maxsize;
}
-/* must be done on non-empty ITER_IOVEC one */
+/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
{
size_t skip;
long k;
+ if (iter_is_ubuf(i))
+ return (unsigned long)i->ubuf + i->iov_offset;
+
for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
size_t len = i->iov[k].iov_len - skip;
@@ -1419,11 +1425,11 @@ static struct page *first_bvec_segment(const struct iov_iter *i,
return page;
}
-ssize_t iov_iter_get_pages(struct iov_iter *i,
- struct page **pages, size_t maxsize, unsigned maxpages,
- size_t *start)
+static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned int maxpages, size_t *start)
{
- int n, res;
+ unsigned int n;
if (maxsize > i->count)
maxsize = i->count;
@@ -1432,9 +1438,10 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (maxsize > MAX_RW_COUNT)
maxsize = MAX_RW_COUNT;
- if (likely(iter_is_iovec(i))) {
+ if (likely(user_backed_iter(i))) {
unsigned int gup_flags = 0;
unsigned long addr;
+ int res;
if (iov_iter_rw(i) != WRITE)
gup_flags |= FOLL_WRITE;
@@ -1444,24 +1451,36 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
addr = first_iovec_segment(i, &maxsize);
*start = addr % PAGE_SIZE;
addr &= PAGE_MASK;
- n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
- if (n > maxpages)
- n = maxpages;
- res = get_user_pages_fast(addr, n, gup_flags, pages);
+ n = want_pages_array(pages, maxsize, *start, maxpages);
+ if (!n)
+ return -ENOMEM;
+ res = get_user_pages_fast(addr, n, gup_flags, *pages);
if (unlikely(res <= 0))
return res;
- return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
+ maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
+ iov_iter_advance(i, maxsize);
+ return maxsize;
}
if (iov_iter_is_bvec(i)) {
+ struct page **p;
struct page *page;
page = first_bvec_segment(i, &maxsize, start);
- n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
- if (n > maxpages)
- n = maxpages;
+ n = want_pages_array(pages, maxsize, *start, maxpages);
+ if (!n)
+ return -ENOMEM;
+ p = *pages;
for (int k = 0; k < n; k++)
- get_page(*pages++ = page++);
- return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
+ get_page(p[k] = page + k);
+ maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
+ i->count -= maxsize;
+ i->iov_offset += maxsize;
+ if (i->iov_offset == i->bvec->bv_len) {
+ i->iov_offset = 0;
+ i->bvec++;
+ i->nr_segs--;
+ }
+ return maxsize;
}
if (iov_iter_is_pipe(i))
return pipe_get_pages(i, pages, maxsize, maxpages, start);
@@ -1469,140 +1488,35 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
return -EFAULT;
}
-EXPORT_SYMBOL(iov_iter_get_pages);
-static struct page **get_pages_array(size_t n)
-{
- return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
-}
-
-static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
- struct page ***pages, size_t maxsize,
+ssize_t iov_iter_get_pages2(struct iov_iter *i,
+ struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
- struct page **p;
- unsigned int iter_head, npages;
- ssize_t n;
-
- if (!sanity(i))
- return -EFAULT;
-
- data_start(i, &iter_head, start);
- /* Amount of free space: some of this one + all after this one */
- npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
- n = npages * PAGE_SIZE - *start;
- if (maxsize > n)
- maxsize = n;
- else
- npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
- p = get_pages_array(npages);
- if (!p)
- return -ENOMEM;
- n = __pipe_get_pages(i, maxsize, p, iter_head, start);
- if (n > 0)
- *pages = p;
- else
- kvfree(p);
- return n;
-}
-
-static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
- struct page ***pages, size_t maxsize,
- size_t *_start_offset)
-{
- struct page **p;
- unsigned nr, offset;
- pgoff_t index, count;
- size_t size = maxsize;
- loff_t pos;
-
- if (!size)
+ if (!maxpages)
return 0;
+ BUG_ON(!pages);
- pos = i->xarray_start + i->iov_offset;
- index = pos >> PAGE_SHIFT;
- offset = pos & ~PAGE_MASK;
- *_start_offset = offset;
-
- count = 1;
- if (size > PAGE_SIZE - offset) {
- size -= PAGE_SIZE - offset;
- count += size >> PAGE_SHIFT;
- size &= ~PAGE_MASK;
- if (size)
- count++;
- }
-
- p = get_pages_array(count);
- if (!p)
- return -ENOMEM;
- *pages = p;
-
- nr = iter_xarray_populate_pages(p, i->xarray, index, count);
- if (nr == 0)
- return 0;
-
- return min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
+ return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
}
+EXPORT_SYMBOL(iov_iter_get_pages2);
-ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
+ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
struct page ***pages, size_t maxsize,
size_t *start)
{
- struct page **p;
- int n, res;
+ ssize_t len;
- if (maxsize > i->count)
- maxsize = i->count;
- if (!maxsize)
- return 0;
- if (maxsize > MAX_RW_COUNT)
- maxsize = MAX_RW_COUNT;
-
- if (likely(iter_is_iovec(i))) {
- unsigned int gup_flags = 0;
- unsigned long addr;
-
- if (iov_iter_rw(i) != WRITE)
- gup_flags |= FOLL_WRITE;
- if (i->nofault)
- gup_flags |= FOLL_NOFAULT;
-
- addr = first_iovec_segment(i, &maxsize);
- *start = addr % PAGE_SIZE;
- addr &= PAGE_MASK;
- n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
- p = get_pages_array(n);
- if (!p)
- return -ENOMEM;
- res = get_user_pages_fast(addr, n, gup_flags, p);
- if (unlikely(res <= 0)) {
- kvfree(p);
- *pages = NULL;
- return res;
- }
- *pages = p;
- return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
- }
- if (iov_iter_is_bvec(i)) {
- struct page *page;
+ *pages = NULL;
- page = first_bvec_segment(i, &maxsize, start);
- n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
- *pages = p = get_pages_array(n);
- if (!p)
- return -ENOMEM;
- for (int k = 0; k < n; k++)
- get_page(*p++ = page++);
- return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
+ len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
+ if (len <= 0) {
+ kvfree(*pages);
+ *pages = NULL;
}
- if (iov_iter_is_pipe(i))
- return pipe_get_pages_alloc(i, pages, maxsize, start);
- if (iov_iter_is_xarray(i))
- return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
- return -EFAULT;
+ return len;
}
-EXPORT_SYMBOL(iov_iter_get_pages_alloc);
+EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
struct iov_iter *i)
@@ -1715,22 +1629,23 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
{
if (unlikely(!i->count))
return 0;
+ if (likely(iter_is_ubuf(i))) {
+ unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
+ int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
+ return min(npages, maxpages);
+ }
/* iovec and kvec have identical layouts */
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return iov_npages(i, maxpages);
if (iov_iter_is_bvec(i))
return bvec_npages(i, maxpages);
if (iov_iter_is_pipe(i)) {
- unsigned int iter_head;
int npages;
- size_t off;
if (!sanity(i))
return 0;
- data_start(i, &iter_head, &off);
- /* some of this one + all after this one */
- npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
+ pipe_npages(i, &npages);
return min(npages, maxpages);
}
if (iov_iter_is_xarray(i)) {
@@ -1749,17 +1664,16 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
WARN_ON(1);
return NULL;
}
- if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
- return NULL;
if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec),
flags);
- else
+ else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
/* iovec and kvec have identical layout */
return new->iov = kmemdup(new->iov,
new->nr_segs * sizeof(struct iovec),
flags);
+ return NULL;
}
EXPORT_SYMBOL(dup_iter);
@@ -1953,10 +1867,12 @@ EXPORT_SYMBOL(import_single_range);
void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
{
if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
- !iov_iter_is_kvec(i))
+ !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
return;
i->iov_offset = state->iov_offset;
i->count = state->count;
+ if (iter_is_ubuf(i))
+ return;
/*
* For the *vec iters, nr_segs + iov is constant - if we increment
* the vec, then we also decrement the nr_segs count. Hence we don't
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 9daa3fb9d1cd..d98d43f80958 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,7 +20,11 @@
bool __list_add_valid(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
- if (CHECK_DATA_CORRUPTION(next->prev != prev,
+ if (CHECK_DATA_CORRUPTION(prev == NULL,
+ "list_add corruption. prev is NULL.\n") ||
+ CHECK_DATA_CORRUPTION(next == NULL,
+ "list_add corruption. next is NULL.\n") ||
+ CHECK_DATA_CORRUPTION(next->prev != prev,
"list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
prev, next->prev, next) ||
CHECK_DATA_CORRUPTION(prev->next != next,
@@ -42,7 +46,11 @@ bool __list_del_entry_valid(struct list_head *entry)
prev = entry->prev;
next = entry->next;
- if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
+ if (CHECK_DATA_CORRUPTION(next == NULL,
+ "list_del corruption, %px->next is NULL\n", entry) ||
+ CHECK_DATA_CORRUPTION(prev == NULL,
+ "list_del corruption, %px->prev is NULL\n", entry) ||
+ CHECK_DATA_CORRUPTION(next == LIST_POISON1,
"list_del corruption, %px->next is LIST_POISON1 (%px)\n",
entry, LIST_POISON1) ||
CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c
index 7ac845f65be5..133929e0ce8f 100644
--- a/lib/livepatch/test_klp_callbacks_busy.c
+++ b/lib/livepatch/test_klp_callbacks_busy.c
@@ -16,10 +16,12 @@ MODULE_PARM_DESC(block_transition, "block_transition (default=false)");
static void busymod_work_func(struct work_struct *work);
static DECLARE_WORK(work, busymod_work_func);
+static DECLARE_COMPLETION(busymod_work_started);
static void busymod_work_func(struct work_struct *work)
{
pr_info("%s enter\n", __func__);
+ complete(&busymod_work_started);
while (READ_ONCE(block_transition)) {
/*
@@ -37,6 +39,12 @@ static int test_klp_callbacks_busy_init(void)
pr_info("%s\n", __func__);
schedule_work(&work);
+ /*
+ * To synchronize kernel messages, hold the init function from
+ * exiting until the work function's entry message has printed.
+ */
+ wait_for_completion(&busymod_work_started);
+
if (!block_transition) {
/*
* Serialize output: print all messages from the work
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 52313acbfa62..dc35464216d3 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -147,8 +147,8 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
return lc;
/* else: could not allocate all elements, give up */
- for (i--; i; i--) {
- void *p = element[i];
+ while (i) {
+ void *p = element[--i];
kmem_cache_free(cache, p - e_off);
}
kfree(lc);
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index fd1728d94bab..59fe69a63800 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -507,9 +507,9 @@ static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
(BYTE *)dest - prefixSize, NULL, 0);
}
-int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
- int compressedSize, int maxOutputSize,
- const void *dictStart, size_t dictSize)
+static int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const void *dictStart, size_t dictSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxOutputSize,
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 76758e9296ba..9d31e7126606 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -50,9 +50,7 @@ next:
if (dv == 0 && bitstream_version) {
const unsigned char *ir = ip + 4;
- const unsigned char *limit = ip_end
- < (ip + MAX_ZERO_RUN_LENGTH + 1)
- ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1;
+ const unsigned char *limit = min(ip_end, ip + MAX_ZERO_RUN_LENGTH + 1);
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
defined(LZO_FAST_64BIT_MEMORY_ACCESS)
u64 dv64;
@@ -326,7 +324,7 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
data_start = op;
while (l > 20) {
- size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
+ size_t ll = min_t(size_t, l, m4_max_offset + 1);
uintptr_t ll_end = (uintptr_t) ip + ll;
if ((ll_end + ((t + ll) >> 5)) <= ll_end)
break;
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index bc81419f400c..aa8c46544af8 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -272,7 +272,7 @@ MPI mpi_set_ui(MPI w, unsigned long u)
if (!w)
w = mpi_alloc(1);
/* FIXME: If U is 0 we have no need to resize and thus possible
- * allocating the the limbs.
+ * allocating the limbs.
*/
RESIZE_IF_NEEDED(w, 1);
w->d[0] = u;
diff --git a/lib/nodemask.c b/lib/nodemask.c
deleted file mode 100644
index e22647f5181b..000000000000
--- a/lib/nodemask.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/nodemask.h>
-#include <linux/module.h>
-#include <linux/random.h>
-
-unsigned int __next_node_in(int node, const nodemask_t *srcp)
-{
- unsigned int ret = __next_node(node, srcp);
-
- if (ret == MAX_NUMNODES)
- ret = __first_node(srcp);
- return ret;
-}
-EXPORT_SYMBOL(__next_node_in);
-
-#ifdef CONFIG_NUMA
-/*
- * Return the bit number of a random bit set in the nodemask.
- * (returns NUMA_NO_NODE if nodemask is empty)
- */
-int node_random(const nodemask_t *maskp)
-{
- int w, bit = NUMA_NO_NODE;
-
- w = nodes_weight(*maskp);
- if (w)
- bit = bitmap_ord_to_pos(maskp->bits,
- get_random_int() % w, MAX_NUMNODES);
- return bit;
-}
-#endif
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index b3afafe46fff..3c78e1e8b2ad 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -677,7 +677,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
}
static inline int insert_entries(struct radix_tree_node *node,
- void __rcu **slot, void *item, bool replace)
+ void __rcu **slot, void *item)
{
if (*slot)
return -EEXIST;
@@ -711,7 +711,7 @@ int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
if (error)
return error;
- error = insert_entries(node, slot, item, false);
+ error = insert_entries(node, slot, item);
if (error < 0)
return error;
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index e01a93f46f83..ce945c17980b 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -26,10 +26,16 @@
*/
int ___ratelimit(struct ratelimit_state *rs, const char *func)
{
+ /* Paired with WRITE_ONCE() in .proc_handler().
+ * Changing two values seperately could be inconsistent
+ * and some message could be lost. (See: net_ratelimit_state).
+ */
+ int interval = READ_ONCE(rs->interval);
+ int burst = READ_ONCE(rs->burst);
unsigned long flags;
int ret;
- if (!rs->interval)
+ if (!interval)
return 1;
/*
@@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
if (!rs->begin)
rs->begin = jiffies;
- if (time_is_before_jiffies(rs->begin + rs->interval)) {
+ if (time_is_before_jiffies(rs->begin + interval)) {
if (rs->missed) {
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
printk_deferred(KERN_WARNING
@@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
rs->begin = jiffies;
rs->printed = 0;
}
- if (rs->burst && rs->burst > rs->printed) {
+ if (burst && burst > rs->printed) {
rs->printed++;
ret = 1;
} else {
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index d5e82e4a57ad..c8c3d675845c 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -240,7 +240,7 @@ EXPORT_SYMBOL(__sg_free_table);
**/
void sg_free_append_table(struct sg_append_table *table)
{
- __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, false, sg_kfree,
+ __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
table->total_nents);
}
EXPORT_SYMBOL(sg_free_append_table);
@@ -253,7 +253,7 @@ EXPORT_SYMBOL(sg_free_append_table);
**/
void sg_free_table(struct sg_table *table)
{
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree,
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
table->orig_nents);
}
EXPORT_SYMBOL(sg_free_table);
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 046ac6297c78..a2bb7738c373 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -47,9 +47,9 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
printk("caller is %pS\n", __builtin_return_address(0));
dump_stack();
- instrumentation_end();
out_enable:
+ instrumentation_end();
preempt_enable_no_resched_notrace();
out:
return this_cpu;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 5ca0d086ef4a..e73fda23388d 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/memblock.h>
+#include <linux/kasan-enabled.h>
#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
@@ -145,10 +146,16 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
return stack;
}
-#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
-#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
+/* one hash table bucket entry per 16kB of memory */
+#define STACK_HASH_SCALE 14
+/* limited between 4k and 1M buckets */
+#define STACK_HASH_ORDER_MIN 12
+#define STACK_HASH_ORDER_MAX 20
#define STACK_HASH_SEED 0x9747b28c
+static unsigned int stack_hash_order;
+static unsigned int stack_hash_mask;
+
static bool stack_depot_disable;
static struct stack_record **stack_table;
@@ -175,7 +182,7 @@ void __init stack_depot_want_early_init(void)
int __init stack_depot_early_init(void)
{
- size_t size;
+ unsigned long entries = 0;
/* This is supposed to be called only once, from mm_init() */
if (WARN_ON(__stack_depot_early_init_passed))
@@ -183,13 +190,23 @@ int __init stack_depot_early_init(void)
__stack_depot_early_init_passed = true;
+ if (kasan_enabled() && !stack_hash_order)
+ stack_hash_order = STACK_HASH_ORDER_MAX;
+
if (!__stack_depot_want_early_init || stack_depot_disable)
return 0;
- size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
- pr_info("Stack Depot early init allocating hash table with memblock_alloc, %zu bytes\n",
- size);
- stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (stack_hash_order)
+ entries = 1UL << stack_hash_order;
+ stack_table = alloc_large_system_hash("stackdepot",
+ sizeof(struct stack_record *),
+ entries,
+ STACK_HASH_SCALE,
+ HASH_EARLY | HASH_ZERO,
+ NULL,
+ &stack_hash_mask,
+ 1UL << STACK_HASH_ORDER_MIN,
+ 1UL << STACK_HASH_ORDER_MAX);
if (!stack_table) {
pr_err("Stack Depot hash table allocation failed, disabling\n");
@@ -207,13 +224,35 @@ int stack_depot_init(void)
mutex_lock(&stack_depot_init_mutex);
if (!stack_depot_disable && !stack_table) {
- pr_info("Stack Depot allocating hash table with kvcalloc\n");
- stack_table = kvcalloc(STACK_HASH_SIZE, sizeof(struct stack_record *), GFP_KERNEL);
+ unsigned long entries;
+ int scale = STACK_HASH_SCALE;
+
+ if (stack_hash_order) {
+ entries = 1UL << stack_hash_order;
+ } else {
+ entries = nr_free_buffer_pages();
+ entries = roundup_pow_of_two(entries);
+
+ if (scale > PAGE_SHIFT)
+ entries >>= (scale - PAGE_SHIFT);
+ else
+ entries <<= (PAGE_SHIFT - scale);
+ }
+
+ if (entries < 1UL << STACK_HASH_ORDER_MIN)
+ entries = 1UL << STACK_HASH_ORDER_MIN;
+ if (entries > 1UL << STACK_HASH_ORDER_MAX)
+ entries = 1UL << STACK_HASH_ORDER_MAX;
+
+ pr_info("Stack Depot allocating hash table of %lu entries with kvcalloc\n",
+ entries);
+ stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
if (!stack_table) {
pr_err("Stack Depot hash table allocation failed, disabling\n");
stack_depot_disable = true;
ret = -ENOMEM;
}
+ stack_hash_mask = entries - 1;
}
mutex_unlock(&stack_depot_init_mutex);
return ret;
@@ -386,7 +425,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
goto fast_exit;
hash = hash_stack(entries, nr_entries);
- bucket = &stack_table[hash & STACK_HASH_MASK];
+ bucket = &stack_table[hash & stack_hash_mask];
/*
* Fast path: look the stack trace up without locking.
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index d5923a640457..98754ff9fe68 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -604,6 +604,12 @@ static void __init test_bitmap_arr64(void)
pr_err("bitmap_copy_arr64(nbits == %d:"
" tail is not safely cleared: %d\n", nbits, next_bit);
+ if ((nbits % 64) &&
+ (arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0)))
+ pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n",
+ nbits, arr[(nbits - 1) / 64],
+ GENMASK_ULL((nbits - 1) % 64, 0));
+
if (nbits < EXP1_IN_BITS - 64)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5);
}
@@ -869,6 +875,67 @@ static void __init test_bitmap_print_buf(void)
}
}
+static void __init test_bitmap_const_eval(void)
+{
+ DECLARE_BITMAP(bitmap, BITS_PER_LONG);
+ unsigned long initvar = BIT(2);
+ unsigned long bitopvar = 0;
+ unsigned long var = 0;
+ int res;
+
+ /*
+ * Compilers must be able to optimize all of those to compile-time
+ * constants on any supported optimization level (-O2, -Os) and any
+ * architecture. Otherwise, trigger a build bug.
+ * The whole function gets optimized out then, there's nothing to do
+ * in runtime.
+ */
+
+ /*
+ * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`.
+ * Clang on s390 optimizes bitops at compile-time as intended, but at
+ * the same time stops treating @bitmap and @bitopvar as compile-time
+ * constants after regular test_bit() is executed, thus triggering the
+ * build bugs below. So, call const_test_bit() there directly until
+ * the compiler is fixed.
+ */
+ bitmap_clear(bitmap, 0, BITS_PER_LONG);
+#if defined(__s390__) && defined(__clang__)
+ if (!const_test_bit(7, bitmap))
+#else
+ if (!test_bit(7, bitmap))
+#endif
+ bitmap_set(bitmap, 5, 2);
+
+ /* Equals to `unsigned long bitopvar = BIT(20)` */
+ __change_bit(31, &bitopvar);
+ bitmap_shift_right(&bitopvar, &bitopvar, 11, BITS_PER_LONG);
+
+ /* Equals to `unsigned long var = BIT(25)` */
+ var |= BIT(25);
+ if (var & BIT(0))
+ var ^= GENMASK(9, 6);
+
+ /* __const_hweight<32|64>(GENMASK(6, 5)) == 2 */
+ res = bitmap_weight(bitmap, 20);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(res != 2);
+
+ /* !(BIT(31) & BIT(18)) == 1 */
+ res = !test_bit(18, &bitopvar);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(!res);
+
+ /* BIT(2) & GENMASK(14, 8) == 0 */
+ res = initvar & GENMASK(14, 8);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(res);
+
+ /* ~BIT(25) */
+ BUILD_BUG_ON(!__builtin_constant_p(~var));
+ BUILD_BUG_ON(~var != ~BIT(25));
+}
+
static void __init selftest(void)
{
test_zero_clear();
@@ -884,6 +951,7 @@ static void __init selftest(void)
test_for_each_set_clump8();
test_bitmap_cut();
test_bitmap_print_buf();
+ test_bitmap_const_eval();
}
KSTM_MODULE_LOADERS(test_bitmap);
diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c
index 25ae1ac2624a..9ebf6f5549f3 100644
--- a/lib/test_free_pages.c
+++ b/lib/test_free_pages.c
@@ -17,7 +17,7 @@ static void test_free_pages(gfp_t gfp)
for (i = 0; i < 1000 * 1000; i++) {
unsigned long addr = __get_free_pages(gfp, 3);
- struct page *page = virt_to_page(addr);
+ struct page *page = virt_to_page((void *)addr);
/* Simulate page cache getting a speculative reference */
get_page(page);
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index cfe632047839..e3965cafd27c 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -32,11 +32,32 @@
#include "test_hmm_uapi.h"
-#define DMIRROR_NDEVICES 2
+#define DMIRROR_NDEVICES 4
#define DMIRROR_RANGE_FAULT_TIMEOUT 1000
#define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U)
#define DEVMEM_CHUNKS_RESERVE 16
+/*
+ * For device_private pages, dpage is just a dummy struct page
+ * representing a piece of device memory. dmirror_devmem_alloc_page
+ * allocates a real system memory page as backing storage to fake a
+ * real device. zone_device_data points to that backing page. But
+ * for device_coherent memory, the struct page represents real
+ * physical CPU-accessible memory that we can use directly.
+ */
+#define BACKING_PAGE(page) (is_device_private_page((page)) ? \
+ (page)->zone_device_data : (page))
+
+static unsigned long spm_addr_dev0;
+module_param(spm_addr_dev0, long, 0644);
+MODULE_PARM_DESC(spm_addr_dev0,
+ "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
+
+static unsigned long spm_addr_dev1;
+module_param(spm_addr_dev1, long, 0644);
+MODULE_PARM_DESC(spm_addr_dev1,
+ "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
+
static const struct dev_pagemap_ops dmirror_devmem_ops;
static const struct mmu_interval_notifier_ops dmirror_min_ops;
static dev_t dmirror_dev;
@@ -87,6 +108,7 @@ struct dmirror_chunk {
struct dmirror_device {
struct cdev cdevice;
struct hmm_devmem *devmem;
+ unsigned int zone_device_type;
unsigned int devmem_capacity;
unsigned int devmem_count;
@@ -114,6 +136,21 @@ static int dmirror_bounce_init(struct dmirror_bounce *bounce,
return 0;
}
+static bool dmirror_is_private_zone(struct dmirror_device *mdevice)
+{
+ return (mdevice->zone_device_type ==
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ? true : false;
+}
+
+static enum migrate_vma_direction
+dmirror_select_device(struct dmirror *dmirror)
+{
+ return (dmirror->mdevice->zone_device_type ==
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ?
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE :
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT;
+}
+
static void dmirror_bounce_fini(struct dmirror_bounce *bounce)
{
vfree(bounce->ptr);
@@ -454,28 +491,44 @@ fini:
return ret;
}
-static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
+static int dmirror_allocate_chunk(struct dmirror_device *mdevice,
struct page **ppage)
{
struct dmirror_chunk *devmem;
- struct resource *res;
+ struct resource *res = NULL;
unsigned long pfn;
unsigned long pfn_first;
unsigned long pfn_last;
void *ptr;
+ int ret = -ENOMEM;
devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
if (!devmem)
- return false;
+ return ret;
- res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
- "hmm_dmirror");
- if (IS_ERR(res))
+ switch (mdevice->zone_device_type) {
+ case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE:
+ res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
+ "hmm_dmirror");
+ if (IS_ERR_OR_NULL(res))
+ goto err_devmem;
+ devmem->pagemap.range.start = res->start;
+ devmem->pagemap.range.end = res->end;
+ devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ break;
+ case HMM_DMIRROR_MEMORY_DEVICE_COHERENT:
+ devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ?
+ spm_addr_dev0 :
+ spm_addr_dev1;
+ devmem->pagemap.range.end = devmem->pagemap.range.start +
+ DEVMEM_CHUNK_SIZE - 1;
+ devmem->pagemap.type = MEMORY_DEVICE_COHERENT;
+ break;
+ default:
+ ret = -EINVAL;
goto err_devmem;
+ }
- devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
- devmem->pagemap.range.start = res->start;
- devmem->pagemap.range.end = res->end;
devmem->pagemap.nr_range = 1;
devmem->pagemap.ops = &dmirror_devmem_ops;
devmem->pagemap.owner = mdevice;
@@ -496,10 +549,14 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
mdevice->devmem_capacity = new_capacity;
mdevice->devmem_chunks = new_chunks;
}
-
ptr = memremap_pages(&devmem->pagemap, numa_node_id());
- if (IS_ERR(ptr))
+ if (IS_ERR_OR_NULL(ptr)) {
+ if (ptr)
+ ret = PTR_ERR(ptr);
+ else
+ ret = -EFAULT;
goto err_release;
+ }
devmem->mdevice = mdevice;
pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
@@ -528,30 +585,35 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
}
spin_unlock(&mdevice->lock);
- return true;
+ return 0;
err_release:
mutex_unlock(&mdevice->devmem_lock);
- release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
+ if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+ release_mem_region(devmem->pagemap.range.start,
+ range_len(&devmem->pagemap.range));
err_devmem:
kfree(devmem);
- return false;
+ return ret;
}
static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
{
struct page *dpage = NULL;
- struct page *rpage;
+ struct page *rpage = NULL;
/*
- * This is a fake device so we alloc real system memory to store
- * our device memory.
+ * For ZONE_DEVICE private type, this is a fake device so we allocate
+ * real system memory to store our device memory.
+ * For ZONE_DEVICE coherent type we use the actual dpage to store the
+ * data and ignore rpage.
*/
- rpage = alloc_page(GFP_HIGHUSER);
- if (!rpage)
- return NULL;
-
+ if (dmirror_is_private_zone(mdevice)) {
+ rpage = alloc_page(GFP_HIGHUSER);
+ if (!rpage)
+ return NULL;
+ }
spin_lock(&mdevice->lock);
if (mdevice->free_pages) {
@@ -561,7 +623,7 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
spin_unlock(&mdevice->lock);
} else {
spin_unlock(&mdevice->lock);
- if (!dmirror_allocate_chunk(mdevice, &dpage))
+ if (dmirror_allocate_chunk(mdevice, &dpage))
goto error;
}
@@ -570,7 +632,8 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
return dpage;
error:
- __free_page(rpage);
+ if (rpage)
+ __free_page(rpage);
return NULL;
}
@@ -596,12 +659,16 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
* unallocated pte_none() or read-only zero page.
*/
spage = migrate_pfn_to_page(*src);
+ if (WARN(spage && is_zone_device_page(spage),
+ "page already in device spage pfn: 0x%lx\n",
+ page_to_pfn(spage)))
+ continue;
dpage = dmirror_devmem_alloc_page(mdevice);
if (!dpage)
continue;
- rpage = dpage->zone_device_data;
+ rpage = BACKING_PAGE(dpage);
if (spage)
copy_highpage(rpage, spage);
else
@@ -615,6 +682,8 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
*/
rpage->zone_device_data = dmirror;
+ pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n",
+ page_to_pfn(spage), page_to_pfn(dpage));
*dst = migrate_pfn(page_to_pfn(dpage));
if ((*src & MIGRATE_PFN_WRITE) ||
(!spage && args->vma->vm_flags & VM_WRITE))
@@ -692,11 +761,7 @@ static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
if (!dpage)
continue;
- /*
- * Store the page that holds the data so the page table
- * doesn't have to deal with ZONE_DEVICE private pages.
- */
- entry = dpage->zone_device_data;
+ entry = BACKING_PAGE(dpage);
if (*dst & MIGRATE_PFN_WRITE)
entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
@@ -732,7 +797,7 @@ static int dmirror_exclusive(struct dmirror *dmirror,
mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) {
- unsigned long mapped;
+ unsigned long mapped = 0;
int i;
if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
@@ -741,7 +806,13 @@ static int dmirror_exclusive(struct dmirror *dmirror,
next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
- mapped = dmirror_atomic_map(addr, next, pages, dmirror);
+ /*
+ * Do dmirror_atomic_map() iff all pages are marked for
+ * exclusive access to avoid accessing uninitialized
+ * fields of pages.
+ */
+ if (ret == (next - addr) >> PAGE_SHIFT)
+ mapped = dmirror_atomic_map(addr, next, pages, dmirror);
for (i = 0; i < ret; i++) {
if (pages[i]) {
unlock_page(pages[i]);
@@ -776,15 +847,126 @@ static int dmirror_exclusive(struct dmirror *dmirror,
return ret;
}
-static int dmirror_migrate(struct dmirror *dmirror,
- struct hmm_dmirror_cmd *cmd)
+static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ const unsigned long *src = args->src;
+ unsigned long *dst = args->dst;
+ unsigned long start = args->start;
+ unsigned long end = args->end;
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE,
+ src++, dst++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(*src);
+ if (!spage || !(*src & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage)))
+ continue;
+ spage = BACKING_PAGE(spage);
+ dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
+ if (!dpage)
+ continue;
+ pr_debug("migrating from dev to sys pfn src: 0x%lx pfn dst: 0x%lx\n",
+ page_to_pfn(spage), page_to_pfn(dpage));
+
+ lock_page(dpage);
+ xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
+ copy_highpage(dpage, spage);
+ *dst = migrate_pfn(page_to_pfn(dpage));
+ if (*src & MIGRATE_PFN_WRITE)
+ *dst |= MIGRATE_PFN_WRITE;
+ }
+ return 0;
+}
+
+static unsigned long
+dmirror_successful_migrated_pages(struct migrate_vma *migrate)
+{
+ unsigned long cpages = 0;
+ unsigned long i;
+
+ for (i = 0; i < migrate->npages; i++) {
+ if (migrate->src[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ cpages++;
+ }
+ return cpages;
+}
+
+static int dmirror_migrate_to_system(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
{
unsigned long start, end, addr;
unsigned long size = cmd->npages << PAGE_SHIFT;
struct mm_struct *mm = dmirror->notifier.mm;
struct vm_area_struct *vma;
- unsigned long src_pfns[64];
- unsigned long dst_pfns[64];
+ unsigned long src_pfns[64] = { 0 };
+ unsigned long dst_pfns[64] = { 0 };
+ struct migrate_vma args;
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ cmd->cpages = 0;
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_READ)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT));
+ if (next > vma->vm_end)
+ next = vma->vm_end;
+
+ args.vma = vma;
+ args.src = src_pfns;
+ args.dst = dst_pfns;
+ args.start = addr;
+ args.end = next;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = dmirror_select_device(dmirror);
+
+ ret = migrate_vma_setup(&args);
+ if (ret)
+ goto out;
+
+ pr_debug("Migrating from device mem to sys mem\n");
+ dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
+
+ migrate_vma_pages(&args);
+ cmd->cpages += dmirror_successful_migrated_pages(&args);
+ migrate_vma_finalize(&args);
+ }
+out:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return ret;
+}
+
+static int dmirror_migrate_to_device(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct vm_area_struct *vma;
+ unsigned long src_pfns[64] = { 0 };
+ unsigned long dst_pfns[64] = { 0 };
struct dmirror_bounce bounce;
struct migrate_vma args;
unsigned long next;
@@ -821,6 +1003,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
if (ret)
goto out;
+ pr_debug("Migrating from sys mem to device mem\n");
dmirror_migrate_alloc_and_copy(&args, dmirror);
migrate_vma_pages(&args);
dmirror_migrate_finalize_and_map(&args, dmirror);
@@ -829,7 +1012,10 @@ static int dmirror_migrate(struct dmirror *dmirror,
mmap_read_unlock(mm);
mmput(mm);
- /* Return the migrated data for verification. */
+ /*
+ * Return the migrated data for verification.
+ * Only for pages in device zone
+ */
ret = dmirror_bounce_init(&bounce, start, size);
if (ret)
return ret;
@@ -872,6 +1058,12 @@ static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
*perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL;
else
*perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE;
+ } else if (is_device_coherent_page(page)) {
+ /* Is the page migrated to this device or some other? */
+ if (dmirror->mdevice == dmirror_page_to_device(page))
+ *perm = HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL;
+ else
+ *perm = HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE;
} else if (is_zero_pfn(page_to_pfn(page)))
*perm = HMM_DMIRROR_PROT_ZERO;
else
@@ -1059,8 +1251,12 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
ret = dmirror_write(dmirror, &cmd);
break;
- case HMM_DMIRROR_MIGRATE:
- ret = dmirror_migrate(dmirror, &cmd);
+ case HMM_DMIRROR_MIGRATE_TO_DEV:
+ ret = dmirror_migrate_to_device(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_MIGRATE_TO_SYS:
+ ret = dmirror_migrate_to_system(dmirror, &cmd);
break;
case HMM_DMIRROR_EXCLUSIVE:
@@ -1122,14 +1318,13 @@ static const struct file_operations dmirror_fops = {
static void dmirror_devmem_free(struct page *page)
{
- struct page *rpage = page->zone_device_data;
+ struct page *rpage = BACKING_PAGE(page);
struct dmirror_device *mdevice;
- if (rpage)
+ if (rpage != page)
__free_page(rpage);
mdevice = dmirror_page_to_device(page);
-
spin_lock(&mdevice->lock);
mdevice->cfree++;
page->zone_device_data = mdevice->free_pages;
@@ -1137,43 +1332,11 @@ static void dmirror_devmem_free(struct page *page)
spin_unlock(&mdevice->lock);
}
-static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
- struct dmirror *dmirror)
-{
- const unsigned long *src = args->src;
- unsigned long *dst = args->dst;
- unsigned long start = args->start;
- unsigned long end = args->end;
- unsigned long addr;
-
- for (addr = start; addr < end; addr += PAGE_SIZE,
- src++, dst++) {
- struct page *dpage, *spage;
-
- spage = migrate_pfn_to_page(*src);
- if (!spage || !(*src & MIGRATE_PFN_MIGRATE))
- continue;
- spage = spage->zone_device_data;
-
- dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
- if (!dpage)
- continue;
-
- lock_page(dpage);
- xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
- copy_highpage(dpage, spage);
- *dst = migrate_pfn(page_to_pfn(dpage));
- if (*src & MIGRATE_PFN_WRITE)
- *dst |= MIGRATE_PFN_WRITE;
- }
- return 0;
-}
-
static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
{
struct migrate_vma args;
- unsigned long src_pfns;
- unsigned long dst_pfns;
+ unsigned long src_pfns = 0;
+ unsigned long dst_pfns = 0;
struct page *rpage;
struct dmirror *dmirror;
vm_fault_t ret;
@@ -1193,7 +1356,7 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
args.src = &src_pfns;
args.dst = &dst_pfns;
args.pgmap_owner = dmirror->mdevice;
- args.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
+ args.flags = dmirror_select_device(dmirror);
if (migrate_vma_setup(&args))
return VM_FAULT_SIGBUS;
@@ -1231,10 +1394,8 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id)
if (ret)
return ret;
- /* Build a list of free ZONE_DEVICE private struct pages */
- dmirror_allocate_chunk(mdevice, NULL);
-
- return 0;
+ /* Build a list of free ZONE_DEVICE struct pages */
+ return dmirror_allocate_chunk(mdevice, NULL);
}
static void dmirror_device_remove(struct dmirror_device *mdevice)
@@ -1247,8 +1408,9 @@ static void dmirror_device_remove(struct dmirror_device *mdevice)
mdevice->devmem_chunks[i];
memunmap_pages(&devmem->pagemap);
- release_mem_region(devmem->pagemap.range.start,
- range_len(&devmem->pagemap.range));
+ if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+ release_mem_region(devmem->pagemap.range.start,
+ range_len(&devmem->pagemap.range));
kfree(devmem);
}
kfree(mdevice->devmem_chunks);
@@ -1260,14 +1422,26 @@ static void dmirror_device_remove(struct dmirror_device *mdevice)
static int __init hmm_dmirror_init(void)
{
int ret;
- int id;
+ int id = 0;
+ int ndevices = 0;
ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES,
"HMM_DMIRROR");
if (ret)
goto err_unreg;
- for (id = 0; id < DMIRROR_NDEVICES; id++) {
+ memset(dmirror_devices, 0, DMIRROR_NDEVICES * sizeof(dmirror_devices[0]));
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
+ if (spm_addr_dev0 && spm_addr_dev1) {
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT;
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT;
+ }
+ for (id = 0; id < ndevices; id++) {
ret = dmirror_device_init(dmirror_devices + id, id);
if (ret)
goto err_chrdev;
@@ -1289,7 +1463,8 @@ static void __exit hmm_dmirror_exit(void)
int id;
for (id = 0; id < DMIRROR_NDEVICES; id++)
- dmirror_device_remove(dmirror_devices + id);
+ if (dmirror_devices[id].zone_device_type)
+ dmirror_device_remove(dmirror_devices + id);
unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
}
diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h
index f14dea5dcd06..e31d58c9034a 100644
--- a/lib/test_hmm_uapi.h
+++ b/lib/test_hmm_uapi.h
@@ -31,10 +31,11 @@ struct hmm_dmirror_cmd {
/* Expose the address space of the calling process through hmm device file */
#define HMM_DMIRROR_READ _IOWR('H', 0x00, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd)
-#define HMM_DMIRROR_MIGRATE _IOWR('H', 0x02, struct hmm_dmirror_cmd)
-#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x03, struct hmm_dmirror_cmd)
-#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x04, struct hmm_dmirror_cmd)
-#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_MIGRATE_TO_DEV _IOWR('H', 0x02, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_MIGRATE_TO_SYS _IOWR('H', 0x03, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x04, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd)
/*
* Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.
@@ -49,6 +50,8 @@ struct hmm_dmirror_cmd {
* device the ioctl() is made
* HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE: Migrated device private page on some
* other device
+ * HMM_DMIRROR_PROT_DEV_COHERENT: Migrate device coherent page on the device
+ * the ioctl() is made
*/
enum {
HMM_DMIRROR_PROT_ERROR = 0xFF,
@@ -60,6 +63,14 @@ enum {
HMM_DMIRROR_PROT_ZERO = 0x10,
HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL = 0x20,
HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE = 0x30,
+ HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL = 0x40,
+ HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE = 0x50,
+};
+
+enum {
+ /* 0 is reserved to catch uninitialized type fields */
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1,
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT,
};
#endif /* _LIB_TEST_HMM_UAPI_H */
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 07309c45f327..4bd15a593fbd 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -30,6 +30,12 @@
#define PAD_SIZE 16
#define FILL_CHAR '$'
+#define NOWARN(option, comment, block) \
+ __diag_push(); \
+ __diag_ignore_all(#option, comment); \
+ block \
+ __diag_pop();
+
KSTM_MODULE_GLOBALS();
static char *test_buffer __initdata;
@@ -78,12 +84,17 @@ do_test(int bufsize, const char *expect, int elen,
return 1;
}
- if (memchr_inv(test_buffer + written + 1, FILL_CHAR, BUF_SIZE + PAD_SIZE - (written + 1))) {
+ if (memchr_inv(test_buffer + written + 1, FILL_CHAR, bufsize - (written + 1))) {
pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
bufsize, fmt);
return 1;
}
+ if (memchr_inv(test_buffer + bufsize, FILL_CHAR, BUF_SIZE + PAD_SIZE - bufsize)) {
+ pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n", bufsize, fmt);
+ return 1;
+ }
+
if (memcmp(test_buffer, expect, written)) {
pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
bufsize, fmt, test_buffer, written, expect);
@@ -154,9 +165,11 @@ test_number(void)
test("0x1234abcd ", "%#-12x", 0x1234abcd);
test(" 0x1234abcd", "%#12x", 0x1234abcd);
test("0|001| 12|+123| 1234|-123|-1234", "%d|%03d|%3d|%+d|% d|%+d|% d", 0, 1, 12, 123, 1234, -123, -1234);
- test("0|1|1|128|255", "%hhu|%hhu|%hhu|%hhu|%hhu", 0, 1, 257, 128, -1);
- test("0|1|1|-128|-1", "%hhd|%hhd|%hhd|%hhd|%hhd", 0, 1, 257, 128, -1);
- test("2015122420151225", "%ho%ho%#ho", 1037, 5282, -11627);
+ NOWARN(-Wformat, "Intentionally test narrowing conversion specifiers.", {
+ test("0|1|1|128|255", "%hhu|%hhu|%hhu|%hhu|%hhu", 0, 1, 257, 128, -1);
+ test("0|1|1|-128|-1", "%hhd|%hhd|%hhd|%hhd|%hhd", 0, 1, 257, 128, -1);
+ test("2015122420151225", "%ho%ho%#ho", 1037, 5282, -11627);
+ })
/*
* POSIX/C99: »The result of converting zero with an explicit
* precision of zero shall be no characters.« Hence the output
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index cf41fd6df42a..4f2f2d1bac56 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -74,12 +74,13 @@ test_report_one_done(void)
static int random_size_align_alloc_test(void)
{
- unsigned long size, align, rnd;
+ unsigned long size, align;
+ unsigned int rnd;
void *ptr;
int i;
for (i = 0; i < test_loop_count; i++) {
- get_random_bytes(&rnd, sizeof(rnd));
+ rnd = prandom_u32();
/*
* Maximum 1024 pages, if PAGE_SIZE is 4096.
@@ -150,7 +151,7 @@ static int random_size_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- get_random_bytes(&n, sizeof(i));
+ n = prandom_u32();
n = (n % 100) + 1;
p = vmalloc(n * PAGE_SIZE);
@@ -294,14 +295,14 @@ pcpu_alloc_test(void)
for (i = 0; i < 35000; i++) {
unsigned int r;
- get_random_bytes(&r, sizeof(i));
+ r = prandom_u32();
size = (r % (PAGE_SIZE / 4)) + 1;
/*
* Maximum PAGE_SIZE
*/
- get_random_bytes(&r, sizeof(i));
- align = 1 << ((i % 11) + 1);
+ r = prandom_u32();
+ align = 1 << ((r % 11) + 1);
pcpu[i] = __alloc_percpu(size, align);
if (!pcpu[i])
@@ -396,7 +397,7 @@ static void shuffle_array(int *arr, int n)
int i, j;
for (i = n - 1; i > 0; i--) {
- get_random_bytes(&rnd, sizeof(rnd));
+ rnd = prandom_u32();
/* Cut the range. */
j = rnd % i;
diff --git a/lib/trace_readwrite.c b/lib/trace_readwrite.c
new file mode 100644
index 000000000000..88637038b30c
--- /dev/null
+++ b/lib/trace_readwrite.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Register read and write tracepoints
+ *
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <asm-generic/io.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/rwmmio.h>
+
+#ifdef CONFIG_TRACE_MMIO_ACCESS
+void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr)
+{
+ trace_rwmmio_write(caller_addr, val, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_write_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_write);
+
+void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr)
+{
+ trace_rwmmio_post_write(caller_addr, val, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_post_write_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_write);
+
+void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr)
+{
+ trace_rwmmio_read(caller_addr, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_read_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_read);
+
+void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr)
+{
+ trace_rwmmio_post_read(caller_addr, val, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_post_read_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_read);
+#endif /* CONFIG_TRACE_MMIO_ACCESS */
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 4cf250031f0f..1f2234221dd1 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -80,7 +80,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
/* London calling... */
DEBUGP("found!\n");
- return consumed += (shift-(bm->patlen-1));
+ return consumed + (shift-(bm->patlen-1));
next: bs = bm->bad_shift[text[shift-i]];
diff --git a/mm/Kconfig b/mm/Kconfig
index 169e64192e48..0331f1461f81 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -33,7 +33,7 @@ config ZSWAP
pages that are in the process of being swapped out and attempts to
compress them into a dynamically allocated RAM-based memory pool.
This can result in a significant I/O reduction on swap device and,
- in the case where decompressing from RAM is faster that swap device
+ in the case where decompressing from RAM is faster than swap device
reads, can also improve workload performance.
This is marked experimental because it is a new feature (as of
@@ -639,14 +639,6 @@ config BOUNCE
memory available to the CPU. Enabled by default when HIGHMEM is
selected, but you may say n to override this.
-config VIRT_TO_BUS
- bool
- help
- An architecture should select this if it implements the
- deprecated interface virt_to_bus(). All new architectures
- should probably not select this.
-
-
config MMU_NOTIFIER
bool
select SRCU
@@ -663,7 +655,7 @@ config KSM
the many instances by a single page with that content, so
saving memory until one or another app needs to modify the content.
Recommended for use with KVM, or with other duplicative applications.
- See Documentation/vm/ksm.rst for more information: KSM is inactive
+ See Documentation/mm/ksm.rst for more information: KSM is inactive
until a program has madvised that an area is MADV_MERGEABLE, and
root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
@@ -951,9 +943,6 @@ config ARCH_HAS_CURRENT_STACK_POINTER
register alias named "current_stack_pointer", this config can be
selected.
-config ARCH_HAS_VM_GET_PAGE_PROT
- bool
-
config ARCH_HAS_PTE_DEVMAP
bool
@@ -994,9 +983,14 @@ config HMM_MIRROR
bool
depends on MMU
+config GET_FREE_REGION
+ depends on SPARSEMEM
+ bool
+
config DEVICE_PRIVATE
bool "Unaddressable device memory (GPU memory, ...)"
depends on ZONE_DEVICE
+ select GET_FREE_REGION
help
Allows creation of struct pages to represent unaddressable device
diff --git a/mm/Makefile b/mm/Makefile
index 6f9ffa968a1a..9a564f836403 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -133,3 +133,4 @@ obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o
obj-$(CONFIG_IO_MAPPING) += io-mapping.o
obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o
obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o
+obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 95550b8fa7fe..de65cb1e5f76 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -260,10 +260,10 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
unsigned long timeout;
timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
- spin_lock_bh(&wb->work_lock);
+ spin_lock_irq(&wb->work_lock);
if (test_bit(WB_registered, &wb->state))
queue_delayed_work(bdi_wq, &wb->dwork, timeout);
- spin_unlock_bh(&wb->work_lock);
+ spin_unlock_irq(&wb->work_lock);
}
static void wb_update_bandwidth_workfn(struct work_struct *work)
@@ -334,12 +334,12 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
static void wb_shutdown(struct bdi_writeback *wb)
{
/* Make sure nobody queues further work */
- spin_lock_bh(&wb->work_lock);
+ spin_lock_irq(&wb->work_lock);
if (!test_and_clear_bit(WB_registered, &wb->state)) {
- spin_unlock_bh(&wb->work_lock);
+ spin_unlock_irq(&wb->work_lock);
return;
}
- spin_unlock_bh(&wb->work_lock);
+ spin_unlock_irq(&wb->work_lock);
cgwb_remove_from_bdi_list(wb);
/*
diff --git a/mm/bootmem_info.c b/mm/bootmem_info.c
index f18a631e7479..b1efebfcf94b 100644
--- a/mm/bootmem_info.c
+++ b/mm/bootmem_info.c
@@ -12,6 +12,7 @@
#include <linux/memblock.h>
#include <linux/bootmem_info.h>
#include <linux/memory_hotplug.h>
+#include <linux/kmemleak.h>
void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
{
@@ -33,6 +34,7 @@ void put_page_bootmem(struct page *page)
ClearPagePrivate(page);
set_page_private(page, 0);
INIT_LIST_HEAD(&page->lru);
+ kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
free_reserved_page(page);
}
}
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index 2e7704955f4f..c3ffe253e055 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -163,7 +163,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
{
struct dentry *tmp;
- char name[16];
+ char name[CMA_MAX_NAME];
scnprintf(name, sizeof(name), "cma-%s", cma->name);
diff --git a/mm/compaction.c b/mm/compaction.c
index a2c53fcf933e..640fa76228dd 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -613,6 +613,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
break;
set_page_private(page, order);
+ nr_scanned += isolated - 1;
total_isolated += isolated;
cc->nr_freepages += isolated;
list_add_tail(&page->lru, freelist);
@@ -1099,6 +1100,7 @@ isolate_success:
isolate_success_no_list:
cc->nr_migratepages += compound_nr(page);
nr_isolated += compound_nr(page);
+ nr_scanned += compound_nr(page) - 1;
/*
* Avoid isolating too much unless this block is being
@@ -1502,6 +1504,7 @@ fast_isolate_freepages(struct compact_control *cc)
if (__isolate_free_page(page, order)) {
set_page_private(page, order);
nr_isolated = 1 << order;
+ nr_scanned += nr_isolated - 1;
cc->nr_freepages += nr_isolated;
list_add_tail(&page->lru, &cc->freepages);
count_compact_events(COMPACTISOLATED, nr_isolated);
@@ -3009,7 +3012,7 @@ void kcompactd_run(int nid)
/*
* Called by memory hotplug when all memory in a node is offlined. Caller must
- * hold mem_hotplug_begin/end().
+ * be holding mem_hotplug_begin/done().
*/
void kcompactd_stop(int nid)
{
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
index 9b559c76d6dd..66265e3a9c65 100644
--- a/mm/damon/Kconfig
+++ b/mm/damon/Kconfig
@@ -92,4 +92,12 @@ config DAMON_RECLAIM
reclamation under light memory pressure, while the traditional page
scanning-based reclamation is used for heavy pressure.
+config DAMON_LRU_SORT
+ bool "Build DAMON-based LRU-lists sorting (DAMON_LRU_SORT)"
+ depends on DAMON_PADDR
+ help
+ This builds the DAMON-based LRU-lists sorting subsystem. It tries to
+ protect frequently accessed (hot) pages while rarely accessed (cold)
+ pages reclaimed first under memory pressure.
+
endmenu
diff --git a/mm/damon/Makefile b/mm/damon/Makefile
index dbf7190b4144..3e6b8ad73858 100644
--- a/mm/damon/Makefile
+++ b/mm/damon/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_DAMON_PADDR) += ops-common.o paddr.o
obj-$(CONFIG_DAMON_SYSFS) += sysfs.o
obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o
obj-$(CONFIG_DAMON_RECLAIM) += reclaim.o
+obj-$(CONFIG_DAMON_LRU_SORT) += lru_sort.o
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index a0dab8b5e45f..cfdf63132d5a 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -97,6 +97,31 @@ out:
return ret;
}
+/*
+ * Return corresponding dbgfs' scheme action value (int) for the given
+ * damos_action if the given damos_action value is valid and supported by
+ * dbgfs, negative error code otherwise.
+ */
+static int damos_action_to_dbgfs_scheme_action(enum damos_action action)
+{
+ switch (action) {
+ case DAMOS_WILLNEED:
+ return 0;
+ case DAMOS_COLD:
+ return 1;
+ case DAMOS_PAGEOUT:
+ return 2;
+ case DAMOS_HUGEPAGE:
+ return 3;
+ case DAMOS_NOHUGEPAGE:
+ return 4;
+ case DAMOS_STAT:
+ return 5;
+ default:
+ return -EINVAL;
+ }
+}
+
static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
{
struct damos *s;
@@ -109,7 +134,7 @@ static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
s->min_sz_region, s->max_sz_region,
s->min_nr_accesses, s->max_nr_accesses,
s->min_age_region, s->max_age_region,
- s->action,
+ damos_action_to_dbgfs_scheme_action(s->action),
s->quota.ms, s->quota.sz,
s->quota.reset_interval,
s->quota.weight_sz,
@@ -160,18 +185,27 @@ static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
kfree(schemes);
}
-static bool damos_action_valid(int action)
+/*
+ * Return corresponding damos_action for the given dbgfs input for a scheme
+ * action if the input is valid, negative error code otherwise.
+ */
+static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action)
{
- switch (action) {
- case DAMOS_WILLNEED:
- case DAMOS_COLD:
- case DAMOS_PAGEOUT:
- case DAMOS_HUGEPAGE:
- case DAMOS_NOHUGEPAGE:
- case DAMOS_STAT:
- return true;
+ switch (dbgfs_action) {
+ case 0:
+ return DAMOS_WILLNEED;
+ case 1:
+ return DAMOS_COLD;
+ case 2:
+ return DAMOS_PAGEOUT;
+ case 3:
+ return DAMOS_HUGEPAGE;
+ case 4:
+ return DAMOS_NOHUGEPAGE;
+ case 5:
+ return DAMOS_STAT;
default:
- return false;
+ return -EINVAL;
}
}
@@ -189,7 +223,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
int pos = 0, parsed, ret;
unsigned long min_sz, max_sz;
unsigned int min_nr_a, max_nr_a, min_age, max_age;
- unsigned int action;
+ unsigned int action_input;
+ enum damos_action action;
schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
GFP_KERNEL);
@@ -204,7 +239,7 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
ret = sscanf(&str[pos],
"%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
&min_sz, &max_sz, &min_nr_a, &max_nr_a,
- &min_age, &max_age, &action, &quota.ms,
+ &min_age, &max_age, &action_input, &quota.ms,
&quota.sz, &quota.reset_interval,
&quota.weight_sz, &quota.weight_nr_accesses,
&quota.weight_age, &wmarks.metric,
@@ -212,7 +247,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
&wmarks.low, &parsed);
if (ret != 18)
break;
- if (!damos_action_valid(action))
+ action = dbgfs_scheme_action_to_damos_action(action_input);
+ if ((int)action < 0)
goto fail;
if (min_sz > max_sz || min_nr_a > max_nr_a || min_age > max_age)
@@ -275,11 +311,6 @@ out:
return ret;
}
-static inline bool target_has_pid(const struct damon_ctx *ctx)
-{
- return ctx->ops.id == DAMON_OPS_VADDR;
-}
-
static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
{
struct damon_target *t;
@@ -288,7 +319,7 @@ static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
int rc;
damon_for_each_target(t, ctx) {
- if (target_has_pid(ctx))
+ if (damon_target_has_pid(ctx))
/* Show pid numbers to debugfs users */
id = pid_vnr(t->pid);
else
@@ -415,7 +446,7 @@ static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
struct damon_target *t, *next;
damon_for_each_target_safe(t, next, ctx) {
- if (target_has_pid(ctx))
+ if (damon_target_has_pid(ctx))
put_pid(t->pid);
damon_destroy_target(t);
}
@@ -425,11 +456,11 @@ static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
if (!t) {
damon_for_each_target_safe(t, next, ctx)
damon_destroy_target(t);
- if (target_has_pid(ctx))
+ if (damon_target_has_pid(ctx))
dbgfs_put_pids(pids, nr_targets);
return -ENOMEM;
}
- if (target_has_pid(ctx))
+ if (damon_target_has_pid(ctx))
t->pid = pids[i];
damon_add_target(ctx, t);
}
@@ -722,7 +753,7 @@ static void dbgfs_before_terminate(struct damon_ctx *ctx)
{
struct damon_target *t, *next;
- if (!target_has_pid(ctx))
+ if (!damon_target_has_pid(ctx))
return;
mutex_lock(&ctx->kdamond_lock);
@@ -787,6 +818,9 @@ static int dbgfs_mk_context(char *name)
return -ENOENT;
new_dir = debugfs_create_dir(name, root);
+ /* Below check is required for a potential duplicated name case */
+ if (IS_ERR(new_dir))
+ return PTR_ERR(new_dir);
dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
new_ctx = dbgfs_new_ctx();
diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
new file mode 100644
index 000000000000..9de6f00a71c5
--- /dev/null
+++ b/mm/damon/lru_sort.c
@@ -0,0 +1,548 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DAMON-based LRU-lists Sorting
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#define pr_fmt(fmt) "damon-lru-sort: " fmt
+
+#include <linux/damon.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "damon_lru_sort."
+
+/*
+ * Enable or disable DAMON_LRU_SORT.
+ *
+ * You can enable DAMON_LRU_SORT by setting the value of this parameter as
+ * ``Y``. Setting it as ``N`` disables DAMON_LRU_SORT. Note that
+ * DAMON_LRU_SORT could do no real monitoring and LRU-lists sorting due to the
+ * watermarks-based activation condition. Refer to below descriptions for the
+ * watermarks parameter for this.
+ */
+static bool enabled __read_mostly;
+
+/*
+ * Make DAMON_LRU_SORT reads the input parameters again, except ``enabled``.
+ *
+ * Input parameters that updated while DAMON_LRU_SORT is running are not
+ * applied by default. Once this parameter is set as ``Y``, DAMON_LRU_SORT
+ * reads values of parametrs except ``enabled`` again. Once the re-reading is
+ * done, this parameter is set as ``N``. If invalid parameters are found while
+ * the re-reading, DAMON_LRU_SORT will be disabled.
+ */
+static bool commit_inputs __read_mostly;
+module_param(commit_inputs, bool, 0600);
+
+/*
+ * Access frequency threshold for hot memory regions identification in permil.
+ *
+ * If a memory region is accessed in frequency of this or higher,
+ * DAMON_LRU_SORT identifies the region as hot, and mark it as accessed on the
+ * LRU list, so that it could not be reclaimed under memory pressure. 50% by
+ * default.
+ */
+static unsigned long hot_thres_access_freq = 500;
+module_param(hot_thres_access_freq, ulong, 0600);
+
+/*
+ * Time threshold for cold memory regions identification in microseconds.
+ *
+ * If a memory region is not accessed for this or longer time, DAMON_LRU_SORT
+ * identifies the region as cold, and mark it as unaccessed on the LRU list, so
+ * that it could be reclaimed first under memory pressure. 120 seconds by
+ * default.
+ */
+static unsigned long cold_min_age __read_mostly = 120000000;
+module_param(cold_min_age, ulong, 0600);
+
+/*
+ * Limit of time for trying the LRU lists sorting in milliseconds.
+ *
+ * DAMON_LRU_SORT tries to use only up to this time within a time window
+ * (quota_reset_interval_ms) for trying LRU lists sorting. This can be used
+ * for limiting CPU consumption of DAMON_LRU_SORT. If the value is zero, the
+ * limit is disabled.
+ *
+ * 10 ms by default.
+ */
+static unsigned long quota_ms __read_mostly = 10;
+module_param(quota_ms, ulong, 0600);
+
+/*
+ * The time quota charge reset interval in milliseconds.
+ *
+ * The charge reset interval for the quota of time (quota_ms). That is,
+ * DAMON_LRU_SORT does not try LRU-lists sorting for more than quota_ms
+ * milliseconds or quota_sz bytes within quota_reset_interval_ms milliseconds.
+ *
+ * 1 second by default.
+ */
+static unsigned long quota_reset_interval_ms __read_mostly = 1000;
+module_param(quota_reset_interval_ms, ulong, 0600);
+
+/*
+ * The watermarks check time interval in microseconds.
+ *
+ * Minimal time to wait before checking the watermarks, when DAMON_LRU_SORT is
+ * enabled but inactive due to its watermarks rule. 5 seconds by default.
+ */
+static unsigned long wmarks_interval __read_mostly = 5000000;
+module_param(wmarks_interval, ulong, 0600);
+
+/*
+ * Free memory rate (per thousand) for the high watermark.
+ *
+ * If free memory of the system in bytes per thousand bytes is higher than
+ * this, DAMON_LRU_SORT becomes inactive, so it does nothing but periodically
+ * checks the watermarks. 200 (20%) by default.
+ */
+static unsigned long wmarks_high __read_mostly = 200;
+module_param(wmarks_high, ulong, 0600);
+
+/*
+ * Free memory rate (per thousand) for the middle watermark.
+ *
+ * If free memory of the system in bytes per thousand bytes is between this and
+ * the low watermark, DAMON_LRU_SORT becomes active, so starts the monitoring
+ * and the LRU-lists sorting. 150 (15%) by default.
+ */
+static unsigned long wmarks_mid __read_mostly = 150;
+module_param(wmarks_mid, ulong, 0600);
+
+/*
+ * Free memory rate (per thousand) for the low watermark.
+ *
+ * If free memory of the system in bytes per thousand bytes is lower than this,
+ * DAMON_LRU_SORT becomes inactive, so it does nothing but periodically checks
+ * the watermarks. 50 (5%) by default.
+ */
+static unsigned long wmarks_low __read_mostly = 50;
+module_param(wmarks_low, ulong, 0600);
+
+/*
+ * Sampling interval for the monitoring in microseconds.
+ *
+ * The sampling interval of DAMON for the hot/cold memory monitoring. Please
+ * refer to the DAMON documentation for more detail. 5 ms by default.
+ */
+static unsigned long sample_interval __read_mostly = 5000;
+module_param(sample_interval, ulong, 0600);
+
+/*
+ * Aggregation interval for the monitoring in microseconds.
+ *
+ * The aggregation interval of DAMON for the hot/cold memory monitoring.
+ * Please refer to the DAMON documentation for more detail. 100 ms by default.
+ */
+static unsigned long aggr_interval __read_mostly = 100000;
+module_param(aggr_interval, ulong, 0600);
+
+/*
+ * Minimum number of monitoring regions.
+ *
+ * The minimal number of monitoring regions of DAMON for the hot/cold memory
+ * monitoring. This can be used to set lower-bound of the monitoring quality.
+ * But, setting this too high could result in increased monitoring overhead.
+ * Please refer to the DAMON documentation for more detail. 10 by default.
+ */
+static unsigned long min_nr_regions __read_mostly = 10;
+module_param(min_nr_regions, ulong, 0600);
+
+/*
+ * Maximum number of monitoring regions.
+ *
+ * The maximum number of monitoring regions of DAMON for the hot/cold memory
+ * monitoring. This can be used to set upper-bound of the monitoring overhead.
+ * However, setting this too low could result in bad monitoring quality.
+ * Please refer to the DAMON documentation for more detail. 1000 by default.
+ */
+static unsigned long max_nr_regions __read_mostly = 1000;
+module_param(max_nr_regions, ulong, 0600);
+
+/*
+ * Start of the target memory region in physical address.
+ *
+ * The start physical address of memory region that DAMON_LRU_SORT will do work
+ * against. By default, biggest System RAM is used as the region.
+ */
+static unsigned long monitor_region_start __read_mostly;
+module_param(monitor_region_start, ulong, 0600);
+
+/*
+ * End of the target memory region in physical address.
+ *
+ * The end physical address of memory region that DAMON_LRU_SORT will do work
+ * against. By default, biggest System RAM is used as the region.
+ */
+static unsigned long monitor_region_end __read_mostly;
+module_param(monitor_region_end, ulong, 0600);
+
+/*
+ * PID of the DAMON thread
+ *
+ * If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread.
+ * Else, -1.
+ */
+static int kdamond_pid __read_mostly = -1;
+module_param(kdamond_pid, int, 0400);
+
+/*
+ * Number of hot memory regions that tried to be LRU-sorted.
+ */
+static unsigned long nr_lru_sort_tried_hot_regions __read_mostly;
+module_param(nr_lru_sort_tried_hot_regions, ulong, 0400);
+
+/*
+ * Total bytes of hot memory regions that tried to be LRU-sorted.
+ */
+static unsigned long bytes_lru_sort_tried_hot_regions __read_mostly;
+module_param(bytes_lru_sort_tried_hot_regions, ulong, 0400);
+
+/*
+ * Number of hot memory regions that successfully be LRU-sorted.
+ */
+static unsigned long nr_lru_sorted_hot_regions __read_mostly;
+module_param(nr_lru_sorted_hot_regions, ulong, 0400);
+
+/*
+ * Total bytes of hot memory regions that successfully be LRU-sorted.
+ */
+static unsigned long bytes_lru_sorted_hot_regions __read_mostly;
+module_param(bytes_lru_sorted_hot_regions, ulong, 0400);
+
+/*
+ * Number of times that the time quota limit for hot regions have exceeded
+ */
+static unsigned long nr_hot_quota_exceeds __read_mostly;
+module_param(nr_hot_quota_exceeds, ulong, 0400);
+
+/*
+ * Number of cold memory regions that tried to be LRU-sorted.
+ */
+static unsigned long nr_lru_sort_tried_cold_regions __read_mostly;
+module_param(nr_lru_sort_tried_cold_regions, ulong, 0400);
+
+/*
+ * Total bytes of cold memory regions that tried to be LRU-sorted.
+ */
+static unsigned long bytes_lru_sort_tried_cold_regions __read_mostly;
+module_param(bytes_lru_sort_tried_cold_regions, ulong, 0400);
+
+/*
+ * Number of cold memory regions that successfully be LRU-sorted.
+ */
+static unsigned long nr_lru_sorted_cold_regions __read_mostly;
+module_param(nr_lru_sorted_cold_regions, ulong, 0400);
+
+/*
+ * Total bytes of cold memory regions that successfully be LRU-sorted.
+ */
+static unsigned long bytes_lru_sorted_cold_regions __read_mostly;
+module_param(bytes_lru_sorted_cold_regions, ulong, 0400);
+
+/*
+ * Number of times that the time quota limit for cold regions have exceeded
+ */
+static unsigned long nr_cold_quota_exceeds __read_mostly;
+module_param(nr_cold_quota_exceeds, ulong, 0400);
+
+static struct damon_ctx *ctx;
+static struct damon_target *target;
+
+struct damon_lru_sort_ram_walk_arg {
+ unsigned long start;
+ unsigned long end;
+};
+
+static int walk_system_ram(struct resource *res, void *arg)
+{
+ struct damon_lru_sort_ram_walk_arg *a = arg;
+
+ if (a->end - a->start < resource_size(res)) {
+ a->start = res->start;
+ a->end = res->end;
+ }
+ return 0;
+}
+
+/*
+ * Find biggest 'System RAM' resource and store its start and end address in
+ * @start and @end, respectively. If no System RAM is found, returns false.
+ */
+static bool get_monitoring_region(unsigned long *start, unsigned long *end)
+{
+ struct damon_lru_sort_ram_walk_arg arg = {};
+
+ walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
+ if (arg.end <= arg.start)
+ return false;
+
+ *start = arg.start;
+ *end = arg.end;
+ return true;
+}
+
+/* Create a DAMON-based operation scheme for hot memory regions */
+static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres)
+{
+ struct damos_watermarks wmarks = {
+ .metric = DAMOS_WMARK_FREE_MEM_RATE,
+ .interval = wmarks_interval,
+ .high = wmarks_high,
+ .mid = wmarks_mid,
+ .low = wmarks_low,
+ };
+ struct damos_quota quota = {
+ /*
+ * Do not try LRU-lists sorting of hot pages for more than half
+ * of quota_ms milliseconds within quota_reset_interval_ms.
+ */
+ .ms = quota_ms / 2,
+ .sz = 0,
+ .reset_interval = quota_reset_interval_ms,
+ /* Within the quota, mark hotter regions accessed first. */
+ .weight_sz = 0,
+ .weight_nr_accesses = 1,
+ .weight_age = 0,
+ };
+ struct damos *scheme = damon_new_scheme(
+ /* Find regions having PAGE_SIZE or larger size */
+ PAGE_SIZE, ULONG_MAX,
+ /* and accessed for more than the threshold */
+ hot_thres, UINT_MAX,
+ /* no matter its age */
+ 0, UINT_MAX,
+ /* prioritize those on LRU lists, as soon as found */
+ DAMOS_LRU_PRIO,
+ /* under the quota. */
+ &quota,
+ /* (De)activate this according to the watermarks. */
+ &wmarks);
+
+ return scheme;
+}
+
+/* Create a DAMON-based operation scheme for cold memory regions */
+static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
+{
+ struct damos_watermarks wmarks = {
+ .metric = DAMOS_WMARK_FREE_MEM_RATE,
+ .interval = wmarks_interval,
+ .high = wmarks_high,
+ .mid = wmarks_mid,
+ .low = wmarks_low,
+ };
+ struct damos_quota quota = {
+ /*
+ * Do not try LRU-lists sorting of cold pages for more than
+ * half of quota_ms milliseconds within
+ * quota_reset_interval_ms.
+ */
+ .ms = quota_ms / 2,
+ .sz = 0,
+ .reset_interval = quota_reset_interval_ms,
+ /* Within the quota, mark colder regions not accessed first. */
+ .weight_sz = 0,
+ .weight_nr_accesses = 0,
+ .weight_age = 1,
+ };
+ struct damos *scheme = damon_new_scheme(
+ /* Find regions having PAGE_SIZE or larger size */
+ PAGE_SIZE, ULONG_MAX,
+ /* and not accessed at all */
+ 0, 0,
+ /* for cold_thres or more micro-seconds, and */
+ cold_thres, UINT_MAX,
+ /* mark those as not accessed, as soon as found */
+ DAMOS_LRU_DEPRIO,
+ /* under the quota. */
+ &quota,
+ /* (De)activate this according to the watermarks. */
+ &wmarks);
+
+ return scheme;
+}
+
+static int damon_lru_sort_apply_parameters(void)
+{
+ struct damos *scheme, *next_scheme;
+ struct damon_addr_range addr_range;
+ unsigned int hot_thres, cold_thres;
+ int err = 0;
+
+ err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
+ min_nr_regions, max_nr_regions);
+ if (err)
+ return err;
+
+ /* free previously set schemes */
+ damon_for_each_scheme_safe(scheme, next_scheme, ctx)
+ damon_destroy_scheme(scheme);
+
+ /* aggr_interval / sample_interval is the maximum nr_accesses */
+ hot_thres = aggr_interval / sample_interval * hot_thres_access_freq /
+ 1000;
+ scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+ if (!scheme)
+ return -ENOMEM;
+ damon_add_scheme(ctx, scheme);
+
+ cold_thres = cold_min_age / aggr_interval;
+ scheme = damon_lru_sort_new_cold_scheme(cold_thres);
+ if (!scheme)
+ return -ENOMEM;
+ damon_add_scheme(ctx, scheme);
+
+ if (monitor_region_start > monitor_region_end)
+ return -EINVAL;
+ if (!monitor_region_start && !monitor_region_end &&
+ !get_monitoring_region(&monitor_region_start,
+ &monitor_region_end))
+ return -EINVAL;
+ addr_range.start = monitor_region_start;
+ addr_range.end = monitor_region_end;
+ return damon_set_regions(target, &addr_range, 1);
+}
+
+static int damon_lru_sort_turn(bool on)
+{
+ int err;
+
+ if (!on) {
+ err = damon_stop(&ctx, 1);
+ if (!err)
+ kdamond_pid = -1;
+ return err;
+ }
+
+ err = damon_lru_sort_apply_parameters();
+ if (err)
+ return err;
+
+ err = damon_start(&ctx, 1, true);
+ if (err)
+ return err;
+ kdamond_pid = ctx->kdamond->pid;
+ return 0;
+}
+
+static struct delayed_work damon_lru_sort_timer;
+static void damon_lru_sort_timer_fn(struct work_struct *work)
+{
+ static bool last_enabled;
+ bool now_enabled;
+
+ now_enabled = enabled;
+ if (last_enabled != now_enabled) {
+ if (!damon_lru_sort_turn(now_enabled))
+ last_enabled = now_enabled;
+ else
+ enabled = last_enabled;
+ }
+}
+static DECLARE_DELAYED_WORK(damon_lru_sort_timer, damon_lru_sort_timer_fn);
+
+static bool damon_lru_sort_initialized;
+
+static int damon_lru_sort_enabled_store(const char *val,
+ const struct kernel_param *kp)
+{
+ int rc = param_set_bool(val, kp);
+
+ if (rc < 0)
+ return rc;
+
+ if (!damon_lru_sort_initialized)
+ return rc;
+
+ schedule_delayed_work(&damon_lru_sort_timer, 0);
+
+ return 0;
+}
+
+static const struct kernel_param_ops enabled_param_ops = {
+ .set = damon_lru_sort_enabled_store,
+ .get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
+MODULE_PARM_DESC(enabled,
+ "Enable or disable DAMON_LRU_SORT (default: disabled)");
+
+static int damon_lru_sort_handle_commit_inputs(void)
+{
+ int err;
+
+ if (!commit_inputs)
+ return 0;
+
+ err = damon_lru_sort_apply_parameters();
+ commit_inputs = false;
+ return err;
+}
+
+static int damon_lru_sort_after_aggregation(struct damon_ctx *c)
+{
+ struct damos *s;
+
+ /* update the stats parameter */
+ damon_for_each_scheme(s, c) {
+ if (s->action == DAMOS_LRU_PRIO) {
+ nr_lru_sort_tried_hot_regions = s->stat.nr_tried;
+ bytes_lru_sort_tried_hot_regions = s->stat.sz_tried;
+ nr_lru_sorted_hot_regions = s->stat.nr_applied;
+ bytes_lru_sorted_hot_regions = s->stat.sz_applied;
+ nr_hot_quota_exceeds = s->stat.qt_exceeds;
+ } else if (s->action == DAMOS_LRU_DEPRIO) {
+ nr_lru_sort_tried_cold_regions = s->stat.nr_tried;
+ bytes_lru_sort_tried_cold_regions = s->stat.sz_tried;
+ nr_lru_sorted_cold_regions = s->stat.nr_applied;
+ bytes_lru_sorted_cold_regions = s->stat.sz_applied;
+ nr_cold_quota_exceeds = s->stat.qt_exceeds;
+ }
+ }
+
+ return damon_lru_sort_handle_commit_inputs();
+}
+
+static int damon_lru_sort_after_wmarks_check(struct damon_ctx *c)
+{
+ return damon_lru_sort_handle_commit_inputs();
+}
+
+static int __init damon_lru_sort_init(void)
+{
+ ctx = damon_new_ctx();
+ if (!ctx)
+ return -ENOMEM;
+
+ if (damon_select_ops(ctx, DAMON_OPS_PADDR)) {
+ damon_destroy_ctx(ctx);
+ return -EINVAL;
+ }
+
+ ctx->callback.after_wmarks_check = damon_lru_sort_after_wmarks_check;
+ ctx->callback.after_aggregation = damon_lru_sort_after_aggregation;
+
+ target = damon_new_target();
+ if (!target) {
+ damon_destroy_ctx(ctx);
+ return -ENOMEM;
+ }
+ damon_add_target(ctx, target);
+
+ schedule_delayed_work(&damon_lru_sort_timer, 0);
+
+ damon_lru_sort_initialized = true;
+ return 0;
+}
+
+module_init(damon_lru_sort_init);
diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index 10ef20b2003f..b1335de200e7 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -130,3 +130,45 @@ int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
/* Return coldness of the region */
return DAMOS_MAX_SCORE - hotness;
}
+
+int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ struct damos *s)
+{
+ unsigned int max_nr_accesses;
+ int freq_subscore;
+ unsigned int age_in_sec;
+ int age_in_log, age_subscore;
+ unsigned int freq_weight = s->quota.weight_nr_accesses;
+ unsigned int age_weight = s->quota.weight_age;
+ int hotness;
+
+ max_nr_accesses = c->aggr_interval / c->sample_interval;
+ freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
+
+ age_in_sec = (unsigned long)r->age * c->aggr_interval / 1000000;
+ for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
+ age_in_log++, age_in_sec >>= 1)
+ ;
+
+ /* If frequency is 0, higher age means it's colder */
+ if (freq_subscore == 0)
+ age_in_log *= -1;
+
+ /*
+ * Now age_in_log is in [-DAMON_MAX_AGE_IN_LOG, DAMON_MAX_AGE_IN_LOG].
+ * Scale it to be in [0, 100] and set it as age subscore.
+ */
+ age_in_log += DAMON_MAX_AGE_IN_LOG;
+ age_subscore = age_in_log * DAMON_MAX_SUBSCORE /
+ DAMON_MAX_AGE_IN_LOG / 2;
+
+ hotness = (freq_weight * freq_subscore + age_weight * age_subscore);
+ if (freq_weight + age_weight)
+ hotness /= freq_weight + age_weight;
+ /*
+ * Transform it to fit in [0, DAMOS_MAX_SCORE]
+ */
+ hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE;
+
+ return hotness;
+}
diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h
index e790cb5f8fe0..52329ff361cd 100644
--- a/mm/damon/ops-common.h
+++ b/mm/damon/ops-common.h
@@ -14,3 +14,5 @@ void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr);
int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
struct damos *s);
+int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ struct damos *s);
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index b40ff5811bb2..dc131c6a5403 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -204,16 +204,11 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
return max_nr_accesses;
}
-static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
- struct damon_target *t, struct damon_region *r,
- struct damos *scheme)
+static unsigned long damon_pa_pageout(struct damon_region *r)
{
unsigned long addr, applied;
LIST_HEAD(page_list);
- if (scheme->action != DAMOS_PAGEOUT)
- return 0;
-
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
struct page *page = damon_get_page(PHYS_PFN(addr));
@@ -238,6 +233,55 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
return applied * PAGE_SIZE;
}
+static unsigned long damon_pa_mark_accessed(struct damon_region *r)
+{
+ unsigned long addr, applied = 0;
+
+ for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
+ struct page *page = damon_get_page(PHYS_PFN(addr));
+
+ if (!page)
+ continue;
+ mark_page_accessed(page);
+ put_page(page);
+ applied++;
+ }
+ return applied * PAGE_SIZE;
+}
+
+static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
+{
+ unsigned long addr, applied = 0;
+
+ for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
+ struct page *page = damon_get_page(PHYS_PFN(addr));
+
+ if (!page)
+ continue;
+ deactivate_page(page);
+ put_page(page);
+ applied++;
+ }
+ return applied * PAGE_SIZE;
+}
+
+static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme)
+{
+ switch (scheme->action) {
+ case DAMOS_PAGEOUT:
+ return damon_pa_pageout(r);
+ case DAMOS_LRU_PRIO:
+ return damon_pa_mark_accessed(r);
+ case DAMOS_LRU_DEPRIO:
+ return damon_pa_deactivate_pages(r);
+ default:
+ break;
+ }
+ return 0;
+}
+
static int damon_pa_scheme_score(struct damon_ctx *context,
struct damon_target *t, struct damon_region *r,
struct damos *scheme)
@@ -245,6 +289,10 @@ static int damon_pa_scheme_score(struct damon_ctx *context,
switch (scheme->action) {
case DAMOS_PAGEOUT:
return damon_pageout_score(context, r, scheme);
+ case DAMOS_LRU_PRIO:
+ return damon_hot_score(context, r, scheme);
+ case DAMOS_LRU_DEPRIO:
+ return damon_pageout_score(context, r, scheme);
default:
break;
}
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index 4b07c29effe9..a7faf51b4bd4 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -353,7 +353,6 @@ static int damon_reclaim_turn(bool on)
return 0;
}
-#define ENABLE_CHECK_INTERVAL_MS 1000
static struct delayed_work damon_reclaim_timer;
static void damon_reclaim_timer_fn(struct work_struct *work)
{
@@ -367,16 +366,12 @@ static void damon_reclaim_timer_fn(struct work_struct *work)
else
enabled = last_enabled;
}
-
- if (enabled)
- schedule_delayed_work(&damon_reclaim_timer,
- msecs_to_jiffies(ENABLE_CHECK_INTERVAL_MS));
}
static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn);
static bool damon_reclaim_initialized;
-static int enabled_store(const char *val,
+static int damon_reclaim_enabled_store(const char *val,
const struct kernel_param *kp)
{
int rc = param_set_bool(val, kp);
@@ -388,14 +383,12 @@ static int enabled_store(const char *val,
if (!damon_reclaim_initialized)
return rc;
- if (enabled)
- schedule_delayed_work(&damon_reclaim_timer, 0);
-
+ schedule_delayed_work(&damon_reclaim_timer, 0);
return 0;
}
static const struct kernel_param_ops enabled_param_ops = {
- .set = enabled_store,
+ .set = damon_reclaim_enabled_store,
.get = param_get_bool,
};
@@ -403,10 +396,21 @@ module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
MODULE_PARM_DESC(enabled,
"Enable or disable DAMON_RECLAIM (default: disabled)");
+static int damon_reclaim_handle_commit_inputs(void)
+{
+ int err;
+
+ if (!commit_inputs)
+ return 0;
+
+ err = damon_reclaim_apply_parameters();
+ commit_inputs = false;
+ return err;
+}
+
static int damon_reclaim_after_aggregation(struct damon_ctx *c)
{
struct damos *s;
- int err = 0;
/* update the stats parameter */
damon_for_each_scheme(s, c) {
@@ -417,22 +421,12 @@ static int damon_reclaim_after_aggregation(struct damon_ctx *c)
nr_quota_exceeds = s->stat.qt_exceeds;
}
- if (commit_inputs) {
- err = damon_reclaim_apply_parameters();
- commit_inputs = false;
- }
- return err;
+ return damon_reclaim_handle_commit_inputs();
}
static int damon_reclaim_after_wmarks_check(struct damon_ctx *c)
{
- int err = 0;
-
- if (commit_inputs) {
- err = damon_reclaim_apply_parameters();
- commit_inputs = false;
- }
- return err;
+ return damon_reclaim_handle_commit_inputs();
}
static int __init damon_reclaim_init(void)
@@ -441,8 +435,10 @@ static int __init damon_reclaim_init(void)
if (!ctx)
return -ENOMEM;
- if (damon_select_ops(ctx, DAMON_OPS_PADDR))
+ if (damon_select_ops(ctx, DAMON_OPS_PADDR)) {
+ damon_destroy_ctx(ctx);
return -EINVAL;
+ }
ctx->callback.after_wmarks_check = damon_reclaim_after_wmarks_check;
ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 09f9e8ca3d1f..7488e27c87c3 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -762,6 +762,8 @@ static const char * const damon_sysfs_damos_action_strs[] = {
"pageout",
"hugepage",
"nohugepage",
+ "lru_prio",
+ "lru_deprio",
"stat",
};
@@ -2136,8 +2138,7 @@ static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
struct damon_target *t, *next;
damon_for_each_target_safe(t, next, ctx) {
- if (ctx->ops.id == DAMON_OPS_VADDR ||
- ctx->ops.id == DAMON_OPS_FVADDR)
+ if (damon_target_has_pid(ctx))
put_pid(t->pid);
damon_destroy_target(t);
}
@@ -2181,8 +2182,7 @@ static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
if (!t)
return -ENOMEM;
- if (ctx->ops.id == DAMON_OPS_VADDR ||
- ctx->ops.id == DAMON_OPS_FVADDR) {
+ if (damon_target_has_pid(ctx)) {
t->pid = find_get_pid(sys_target->pid);
if (!t->pid)
goto destroy_targets_out;
@@ -2210,7 +2210,7 @@ static struct damon_target *damon_sysfs_existing_target(
struct pid *pid;
struct damon_target *t;
- if (ctx->ops.id == DAMON_OPS_PADDR) {
+ if (!damon_target_has_pid(ctx)) {
/* Up to only one target for paddr could exist */
damon_for_each_target(t, ctx)
return t;
@@ -2359,6 +2359,23 @@ static inline bool damon_sysfs_kdamond_running(
damon_sysfs_ctx_running(kdamond->damon_ctx);
}
+static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
+ struct damon_sysfs_context *sys_ctx)
+{
+ int err;
+
+ err = damon_select_ops(ctx, sys_ctx->ops_id);
+ if (err)
+ return err;
+ err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
+ if (err)
+ return err;
+ err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
+ if (err)
+ return err;
+ return damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
+}
+
/*
* damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
* @kdamond: The kobject wrapper for the associated kdamond.
@@ -2367,31 +2384,14 @@ static inline bool damon_sysfs_kdamond_running(
*/
static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
{
- struct damon_ctx *ctx = kdamond->damon_ctx;
- struct damon_sysfs_context *sys_ctx;
- int err = 0;
-
if (!damon_sysfs_kdamond_running(kdamond))
return -EINVAL;
/* TODO: Support multiple contexts per kdamond */
if (kdamond->contexts->nr != 1)
return -EINVAL;
- sys_ctx = kdamond->contexts->contexts_arr[0];
-
- err = damon_select_ops(ctx, sys_ctx->ops_id);
- if (err)
- return err;
- err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
- if (err)
- return err;
- err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
- if (err)
- return err;
- err = damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
- if (err)
- return err;
- return err;
+ return damon_sysfs_apply_inputs(kdamond->damon_ctx,
+ kdamond->contexts->contexts_arr[0]);
}
/*
@@ -2438,27 +2438,16 @@ static struct damon_ctx *damon_sysfs_build_ctx(
if (!ctx)
return ERR_PTR(-ENOMEM);
- err = damon_select_ops(ctx, sys_ctx->ops_id);
- if (err)
- goto out;
- err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
- if (err)
- goto out;
- err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
- if (err)
- goto out;
- err = damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
- if (err)
- goto out;
+ err = damon_sysfs_apply_inputs(ctx, sys_ctx);
+ if (err) {
+ damon_destroy_ctx(ctx);
+ return ERR_PTR(err);
+ }
ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback;
ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback;
ctx->callback.before_terminate = damon_sysfs_before_terminate;
return ctx;
-
-out:
- damon_destroy_ctx(ctx);
- return ERR_PTR(err);
}
static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index 1ab091f49fc0..dc7df1254f0a 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -35,7 +35,7 @@
#include <asm/tlbflush.h>
/*
- * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
+ * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
* expectations that are being validated here. All future changes in here
* or the documentation need to be in sync.
*/
diff --git a/mm/filemap.c b/mm/filemap.c
index 0dec96ea6688..15800334147b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -667,7 +667,7 @@ EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend)
{
- int err = 0;
+ int err = 0, err2;
if (mapping_needs_writeback(mapping)) {
err = __filemap_fdatawrite_range(mapping, lstart, lend,
@@ -678,18 +678,12 @@ int filemap_write_and_wait_range(struct address_space *mapping,
* But the -EIO is special case, it may indicate the worst
* thing (e.g. bug) happened, so we avoid waiting for it.
*/
- if (err != -EIO) {
- int err2 = filemap_fdatawait_range(mapping,
- lstart, lend);
- if (!err)
- err = err2;
- } else {
- /* Clear any previously stored errors */
- filemap_check_errors(mapping);
- }
- } else {
- err = filemap_check_errors(mapping);
+ if (err != -EIO)
+ __filemap_fdatawait_range(mapping, lstart, lend);
}
+ err2 = filemap_check_errors(mapping);
+ if (!err)
+ err = err2;
return err;
}
EXPORT_SYMBOL(filemap_write_and_wait_range);
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 6f69b044a8cc..1a97610308cb 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -4,7 +4,7 @@
*
* This code provides the generic "frontend" layer to call a matching
* "backend" driver implementation of frontswap. See
- * Documentation/vm/frontswap.rst for more information.
+ * Documentation/mm/frontswap.rst for more information.
*
* Copyright (C) 2009-2012 Oracle Corp. All rights reserved.
* Author: Dan Magenheimer
diff --git a/mm/gup.c b/mm/gup.c
index e2a39e30756d..5abdaf487460 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -134,7 +134,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
* path.
*/
if (unlikely((flags & FOLL_LONGTERM) &&
- !is_pinnable_page(page)))
+ !is_longterm_pinnable_page(page)))
return NULL;
/*
@@ -478,14 +478,42 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
return -EEXIST;
}
-/*
- * FOLL_FORCE can write to even unwritable pte's, but only
- * after we've gone through a COW cycle and they are dirty.
- */
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+/* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
+static inline bool can_follow_write_pte(pte_t pte, struct page *page,
+ struct vm_area_struct *vma,
+ unsigned int flags)
{
- return pte_write(pte) ||
- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+ /* If the pte is writable, we can write to the page. */
+ if (pte_write(pte))
+ return true;
+
+ /* Maybe FOLL_FORCE is set to override it? */
+ if (!(flags & FOLL_FORCE))
+ return false;
+
+ /* But FOLL_FORCE has no effect on shared mappings */
+ if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
+ return false;
+
+ /* ... or read-only private ones */
+ if (!(vma->vm_flags & VM_MAYWRITE))
+ return false;
+
+ /* ... or already writable ones that just need to take a write fault */
+ if (vma->vm_flags & VM_WRITE)
+ return false;
+
+ /*
+ * See can_change_pte_writable(): we broke COW and could map the page
+ * writable if we have an exclusive anonymous page ...
+ */
+ if (!page || !PageAnon(page) || !PageAnonExclusive(page))
+ return false;
+
+ /* ... and a write-fault isn't required for other reasons. */
+ if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
+ return false;
+ return !userfaultfd_pte_wp(vma, pte);
}
static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -528,12 +556,19 @@ retry:
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
- if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
- pte_unmap_unlock(ptep, ptl);
- return NULL;
- }
page = vm_normal_page(vma, address, pte);
+
+ /*
+ * We only care about anon pages in can_follow_write_pte() and don't
+ * have to worry about pte_devmap() because they are never anon.
+ */
+ if ((flags & FOLL_WRITE) &&
+ !can_follow_write_pte(pte, page, vma, flags)) {
+ page = NULL;
+ goto out;
+ }
+
if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
/*
* Only return device mapping pages in the FOLL_GET or FOLL_PIN
@@ -953,6 +988,25 @@ static int faultin_page(struct vm_area_struct *vma,
}
ret = handle_mm_fault(vma, address, fault_flags, NULL);
+
+ if (ret & VM_FAULT_COMPLETED) {
+ /*
+ * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
+ * mmap lock in the page fault handler. Sanity check this.
+ */
+ WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
+ if (locked)
+ *locked = 0;
+ /*
+ * We should do the same as VM_FAULT_RETRY, but let's not
+ * return -EBUSY since that's not reflecting the reality of
+ * what has happened - we've just fully completed a page
+ * fault, with the mmap lock released. Use -EAGAIN to show
+ * that we want to take the mmap lock _again_.
+ */
+ return -EAGAIN;
+ }
+
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, *flags);
@@ -967,17 +1021,6 @@ static int faultin_page(struct vm_area_struct *vma,
return -EBUSY;
}
- /*
- * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
- * necessary, even if maybe_mkwrite decided not to set pte_write. We
- * can thus safely do subsequent page lookups as if they were reads.
- * But only do so when looping for pte_write is futile: in some cases
- * userspace may also be wanting to write to the gotten user page,
- * which a read fault here might prevent (a readonly page might get
- * reCOWed by userspace write).
- */
- if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
- *flags |= FOLL_COW;
return 0;
}
@@ -1179,6 +1222,7 @@ retry:
case 0:
goto retry;
case -EBUSY:
+ case -EAGAIN:
ret = 0;
fallthrough;
case -EFAULT:
@@ -1305,6 +1349,18 @@ retry:
return -EINTR;
ret = handle_mm_fault(vma, address, fault_flags, NULL);
+
+ if (ret & VM_FAULT_COMPLETED) {
+ /*
+ * NOTE: it's a pity that we need to retake the lock here
+ * to pair with the unlock() in the callers. Ideally we
+ * could tell the callers so they do not need to unlock.
+ */
+ mmap_read_lock(mm);
+ *unlocked = true;
+ return 0;
+ }
+
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, 0);
@@ -1370,7 +1426,7 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
/* VM_FAULT_RETRY couldn't trigger, bypass */
return ret;
- /* VM_FAULT_RETRY cannot return errors */
+ /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
if (!*locked) {
BUG_ON(ret < 0);
BUG_ON(ret >= nr_pages);
@@ -1674,7 +1730,7 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
goto finish_or_fault;
if (pages) {
- pages[i] = virt_to_page(start);
+ pages[i] = virt_to_page((void *)start);
if (pages[i])
get_page(pages[i]);
}
@@ -1883,7 +1939,7 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
unsigned long isolation_error_count = 0, i;
struct folio *prev_folio = NULL;
LIST_HEAD(movable_page_list);
- bool drain_allow = true;
+ bool drain_allow = true, coherent_pages = false;
int ret = 0;
for (i = 0; i < nr_pages; i++) {
@@ -1893,14 +1949,43 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
continue;
prev_folio = folio;
- if (folio_is_pinnable(folio))
+ /*
+ * Device coherent pages are managed by a driver and should not
+ * be pinned indefinitely as it prevents the driver moving the
+ * page. So when trying to pin with FOLL_LONGTERM instead try
+ * to migrate the page out of device memory.
+ */
+ if (folio_is_device_coherent(folio)) {
+ /*
+ * We always want a new GUP lookup with device coherent
+ * pages.
+ */
+ pages[i] = 0;
+ coherent_pages = true;
+
+ /*
+ * Migration will fail if the page is pinned, so convert
+ * the pin on the source page to a normal reference.
+ */
+ if (gup_flags & FOLL_PIN) {
+ get_page(&folio->page);
+ unpin_user_page(&folio->page);
+ }
+
+ ret = migrate_device_coherent_page(&folio->page);
+ if (ret)
+ goto unpin_pages;
+
continue;
+ }
+ if (folio_is_longterm_pinnable(folio))
+ continue;
/*
* Try to move out any movable page before pinning the range.
*/
if (folio_test_hugetlb(folio)) {
- if (!isolate_huge_page(&folio->page,
+ if (isolate_hugetlb(&folio->page,
&movable_page_list))
isolation_error_count++;
continue;
@@ -1921,7 +2006,8 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
folio_nr_pages(folio));
}
- if (!list_empty(&movable_page_list) || isolation_error_count)
+ if (!list_empty(&movable_page_list) || isolation_error_count ||
+ coherent_pages)
goto unpin_pages;
/*
@@ -1931,10 +2017,16 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
return nr_pages;
unpin_pages:
- if (gup_flags & FOLL_PIN) {
- unpin_user_pages(pages, nr_pages);
- } else {
- for (i = 0; i < nr_pages; i++)
+ /*
+ * pages[i] might be NULL if any device coherent pages were found.
+ */
+ for (i = 0; i < nr_pages; i++) {
+ if (!pages[i])
+ continue;
+
+ if (gup_flags & FOLL_PIN)
+ unpin_user_page(pages[i]);
+ else
put_page(pages[i]);
}
diff --git a/mm/gup_test.c b/mm/gup_test.c
index d974dec19e1c..12b0a91767d3 100644
--- a/mm/gup_test.c
+++ b/mm/gup_test.c
@@ -53,7 +53,7 @@ static void verify_dma_pinned(unsigned int cmd, struct page **pages,
dump_page(page, "gup_test failure");
break;
} else if (cmd == PIN_LONGTERM_BENCHMARK &&
- WARN(!is_pinnable_page(page),
+ WARN(!is_longterm_pinnable_page(page),
"pages[%lu] is NOT pinnable but pinned\n",
i)) {
dump_page(page, "gup_test failure");
diff --git a/mm/highmem.c b/mm/highmem.c
index e32083e4ce0d..c707d7202d5f 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -150,7 +150,7 @@ struct page *__kmap_to_page(void *vaddr)
return pte_page(pkmap_page_table[i]);
}
- return virt_to_page(addr);
+ return virt_to_page(vaddr);
}
EXPORT_SYMBOL(__kmap_to_page);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 15965084816d..e9414ee57c5b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -70,21 +70,85 @@ static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
unsigned long huge_zero_pfn __read_mostly = ~0UL;
-bool transparent_hugepage_active(struct vm_area_struct *vma)
+bool hugepage_vma_check(struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ bool smaps, bool in_pf)
{
- /* The addr is used to check if the vma size fits */
- unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
+ if (!vma->vm_mm) /* vdso */
+ return false;
+
+ /*
+ * Explicitly disabled through madvise or prctl, or some
+ * architectures may disable THP for some mappings, for
+ * example, s390 kvm.
+ * */
+ if ((vm_flags & VM_NOHUGEPAGE) ||
+ test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+ return false;
+ /*
+ * If the hardware/firmware marked hugepage support disabled.
+ */
+ if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
+ return false;
+
+ /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
+ if (vma_is_dax(vma))
+ return in_pf;
+
+ /*
+ * Special VMA and hugetlb VMA.
+ * Must be checked after dax since some dax mappings may have
+ * VM_MIXEDMAP set.
+ */
+ if (vm_flags & VM_NO_KHUGEPAGED)
+ return false;
- if (!transhuge_vma_suitable(vma, addr))
+ /*
+ * Check alignment for file vma and size for both file and anon vma.
+ *
+ * Skip the check for page fault. Huge fault does the check in fault
+ * handlers. And this check is not suitable for huge PUD fault.
+ */
+ if (!in_pf &&
+ !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
return false;
- if (vma_is_anonymous(vma))
- return __transparent_hugepage_enabled(vma);
- if (vma_is_shmem(vma))
+
+ /*
+ * Enabled via shmem mount options or sysfs settings.
+ * Must be done before hugepage flags check since shmem has its
+ * own flags.
+ */
+ if (!in_pf && shmem_file(vma->vm_file))
return shmem_huge_enabled(vma);
- if (transhuge_vma_enabled(vma, vma->vm_flags) && file_thp_enabled(vma))
+
+ if (!hugepage_flags_enabled())
+ return false;
+
+ /* THP settings require madvise. */
+ if (!(vm_flags & VM_HUGEPAGE) && !hugepage_flags_always())
+ return false;
+
+ /* Only regular file is valid */
+ if (!in_pf && file_thp_enabled(vma))
return true;
- return false;
+ if (!vma_is_anonymous(vma))
+ return false;
+
+ if (vma_is_temporary_stack(vma))
+ return false;
+
+ /*
+ * THPeligible bit of smaps should show 1 for proper VMAs even
+ * though anon_vma is not initialized yet.
+ *
+ * Allow page fault since anon_vma may be not initialized until
+ * the first page fault.
+ */
+ if (!vma->anon_vma)
+ return (smaps || in_pf);
+
+ return true;
}
static bool get_huge_zero_page(void)
@@ -213,8 +277,8 @@ static ssize_t enabled_store(struct kobject *kobj,
}
return ret;
}
-static struct kobj_attribute enabled_attr =
- __ATTR(enabled, 0644, enabled_show, enabled_store);
+
+static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
ssize_t single_hugepage_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
@@ -303,8 +367,7 @@ static ssize_t defrag_store(struct kobject *kobj,
return count;
}
-static struct kobj_attribute defrag_attr =
- __ATTR(defrag, 0644, defrag_show, defrag_store);
+static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
static ssize_t use_zero_page_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -318,8 +381,7 @@ static ssize_t use_zero_page_store(struct kobject *kobj,
return single_hugepage_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
}
-static struct kobj_attribute use_zero_page_attr =
- __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
+static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
static ssize_t hpage_pmd_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -424,10 +486,10 @@ static int __init hugepage_init(void)
if (err)
goto err_slab;
- err = register_shrinker(&huge_zero_page_shrinker);
+ err = register_shrinker(&huge_zero_page_shrinker, "thp-zero");
if (err)
goto err_hzp_shrinker;
- err = register_shrinker(&deferred_split_shrinker);
+ err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split");
if (err)
goto err_split_shrinker;
@@ -520,7 +582,7 @@ static inline struct deferred_split *get_deferred_split_queue(struct page *page)
void prep_transhuge_page(struct page *page)
{
/*
- * we use page->mapping and page->indexlru in second tail page
+ * we use page->mapping and page->index in second tail page
* as list_head: assuming THP order >= 2
*/
@@ -727,7 +789,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
return VM_FAULT_FALLBACK;
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
- khugepaged_enter(vma, vma->vm_flags);
+ khugepaged_enter_vma(vma, vma->vm_flags);
if (!(vmf->flags & FAULT_FLAG_WRITE) &&
!mm_forbids_zeropage(vma->vm_mm) &&
@@ -957,15 +1019,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, int flags)
+ pmd_t *pmd, bool write)
{
pmd_t _pmd;
_pmd = pmd_mkyoung(*pmd);
- if (flags & FOLL_WRITE)
+ if (write)
_pmd = pmd_mkdirty(_pmd);
if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
- pmd, _pmd, flags & FOLL_WRITE))
+ pmd, _pmd, write))
update_mmu_cache_pmd(vma, addr, pmd);
}
@@ -978,12 +1040,6 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
assert_spin_locked(pmd_lockptr(mm, pmd));
- /*
- * When we COW a devmap PMD entry, we split it into PTEs, so we should
- * not be in this function with `flags & FOLL_COW` set.
- */
- WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
-
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET)))
@@ -998,7 +1054,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
return NULL;
if (flags & FOLL_TOUCH)
- touch_pmd(vma, addr, pmd, flags);
+ touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
/*
* device mapped pages can only be returned if the
@@ -1121,15 +1177,15 @@ out:
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, int flags)
+ pud_t *pud, bool write)
{
pud_t _pud;
_pud = pud_mkyoung(*pud);
- if (flags & FOLL_WRITE)
+ if (write)
_pud = pud_mkdirty(_pud);
if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
- pud, _pud, flags & FOLL_WRITE))
+ pud, _pud, write))
update_mmu_cache_pud(vma, addr, pud);
}
@@ -1156,7 +1212,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
return NULL;
if (flags & FOLL_TOUCH)
- touch_pud(vma, addr, pud, flags);
+ touch_pud(vma, addr, pud, flags & FOLL_WRITE);
/*
* device mapped pages can only be returned if the
@@ -1221,21 +1277,13 @@ out_unlock:
void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
- pud_t entry;
- unsigned long haddr;
bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
if (unlikely(!pud_same(*vmf->pud, orig_pud)))
goto unlock;
- entry = pud_mkyoung(orig_pud);
- if (write)
- entry = pud_mkdirty(entry);
- haddr = vmf->address & HPAGE_PUD_MASK;
- if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
- update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
-
+ touch_pud(vmf->vma, vmf->address, vmf->pud, write);
unlock:
spin_unlock(vmf->ptl);
}
@@ -1243,21 +1291,13 @@ unlock:
void huge_pmd_set_accessed(struct vm_fault *vmf)
{
- pmd_t entry;
- unsigned long haddr;
bool write = vmf->flags & FAULT_FLAG_WRITE;
- pmd_t orig_pmd = vmf->orig_pmd;
vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
+ if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
goto unlock;
- entry = pmd_mkyoung(orig_pmd);
- if (write)
- entry = pmd_mkdirty(entry);
- haddr = vmf->address & HPAGE_PMD_MASK;
- if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
- update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
+ touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
unlock:
spin_unlock(vmf->ptl);
@@ -1349,14 +1389,42 @@ fallback:
return VM_FAULT_FALLBACK;
}
-/*
- * FOLL_FORCE can write to even unwritable pmd's, but only
- * after we've gone through a COW cycle and they are dirty.
- */
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
+static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
+ struct vm_area_struct *vma,
+ unsigned int flags)
{
- return pmd_write(pmd) ||
- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+ /* If the pmd is writable, we can write to the page. */
+ if (pmd_write(pmd))
+ return true;
+
+ /* Maybe FOLL_FORCE is set to override it? */
+ if (!(flags & FOLL_FORCE))
+ return false;
+
+ /* But FOLL_FORCE has no effect on shared mappings */
+ if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
+ return false;
+
+ /* ... or read-only private ones */
+ if (!(vma->vm_flags & VM_MAYWRITE))
+ return false;
+
+ /* ... or already writable ones that just need to take a write fault */
+ if (vma->vm_flags & VM_WRITE)
+ return false;
+
+ /*
+ * See can_change_pte_writable(): we broke COW and could map the page
+ * writable if we have an exclusive anonymous page ...
+ */
+ if (!page || !PageAnon(page) || !PageAnonExclusive(page))
+ return false;
+
+ /* ... and a write-fault isn't required for other reasons. */
+ if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
+ return false;
+ return !userfaultfd_huge_pmd_wp(vma, pmd);
}
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1365,12 +1433,16 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned int flags)
{
struct mm_struct *mm = vma->vm_mm;
- struct page *page = NULL;
+ struct page *page;
assert_spin_locked(pmd_lockptr(mm, pmd));
- if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
- goto out;
+ page = pmd_page(*pmd);
+ VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
+
+ if ((flags & FOLL_WRITE) &&
+ !can_follow_write_pmd(*pmd, page, vma, flags))
+ return NULL;
/* Avoid dumping huge zero page */
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
@@ -1378,10 +1450,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
/* Full NUMA hinting faults to serialise migration in fault paths */
if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
- goto out;
-
- page = pmd_page(*pmd);
- VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
+ return NULL;
if (!pmd_write(*pmd) && gup_must_unshare(flags, page))
return ERR_PTR(-EMLINK);
@@ -1393,12 +1462,11 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
return ERR_PTR(-ENOMEM);
if (flags & FOLL_TOUCH)
- touch_pmd(vma, addr, pmd, flags);
+ touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
-out:
return page;
}
@@ -1686,7 +1754,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
pmd = move_soft_dirty_pmd(pmd);
set_pmd_at(mm, new_addr, new_pmd, pmd);
if (force_flush)
- flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+ flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
@@ -1843,10 +1911,10 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
}
/*
- * Returns true if a given pud maps a thp, false otherwise.
+ * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
*
- * Note that if it returns true, this routine returns without unlocking page
- * table lock. So callers must unlock it.
+ * Note that if it returns page table lock pointer, this routine returns without
+ * unlocking page table lock. So callers must unlock it.
*/
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
{
@@ -1868,12 +1936,7 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
ptl = __pud_trans_huge_lock(pud, vma);
if (!ptl)
return 0;
- /*
- * For architectures like ppc64 we look at deposited pgtable
- * when calling pudp_huge_get_and_clear. So do the
- * pgtable_trans_huge_withdraw after finishing pudp related
- * operations.
- */
+
pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
tlb_remove_pud_tlb_entry(tlb, pud, addr);
if (vma_is_special_huge(vma)) {
@@ -1938,7 +2001,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
* replacing a zero pmd write protected page with a zero pte write
* protected page.
*
- * See Documentation/vm/mmu_notifier.rst
+ * See Documentation/mm/mmu_notifier.rst
*/
pmdp_huge_clear_flush(vma, haddr, pmd);
@@ -2195,6 +2258,10 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
is_pmd_migration_entry(*pmd)) {
+ /*
+ * It's safe to call pmd_page when folio is set because it's
+ * guaranteed that pmd is present.
+ */
if (folio && folio != page_folio(pmd_page(*pmd)))
goto out;
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
@@ -2502,7 +2569,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
* requires taking the lru_lock so we do the put_page
* of the tail pages after the split is complete.
*/
- put_page(subpage);
+ free_page_and_swap_cache(subpage);
}
}
@@ -2821,9 +2888,12 @@ static void split_huge_pages_all(void)
unsigned long total = 0, split = 0;
pr_debug("Split all THPs\n");
- for_each_populated_zone(zone) {
+ for_each_zone(zone) {
+ if (!managed_zone(zone))
+ continue;
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
+ int nr_pages;
if (!pfn_valid(pfn))
continue;
@@ -2839,8 +2909,10 @@ static void split_huge_pages_all(void)
total++;
lock_page(page);
+ nr_pages = thp_nr_pages(page);
if (!split_huge_page(page))
split++;
+ pfn += nr_pages - 1;
unlock_page(page);
next:
put_page(page);
@@ -2898,10 +2970,10 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
* table filled with PTE-mapped THPs, each of which is distinct.
*/
for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
- struct vm_area_struct *vma = find_vma(mm, addr);
+ struct vm_area_struct *vma = vma_lookup(mm, addr);
struct page *page;
- if (!vma || addr < vma->vm_start)
+ if (!vma)
break;
/* skip special VMA and hugetlb VMA */
@@ -2913,9 +2985,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
/* FOLL_DUMP to ignore special (like zero) pages */
page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
- if (IS_ERR(page))
- continue;
- if (!page)
+ if (IS_ERR_OR_NULL(page) || is_zone_device_page(page))
continue;
if (!is_transparent_hugepage(page))
@@ -3137,7 +3207,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
struct vm_area_struct *vma = pvmw->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long address = pvmw->address;
- unsigned long mmun_start = address & HPAGE_PMD_MASK;
+ unsigned long haddr = address & HPAGE_PMD_MASK;
pmd_t pmde;
swp_entry_t entry;
@@ -3146,7 +3216,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
entry = pmd_to_swp_entry(*pvmw->pmd);
get_page(new);
- pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
+ pmde = pmd_mkold(mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_writable_migration_entry(entry))
@@ -3160,12 +3230,12 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
if (!is_readable_migration_entry(entry))
rmap_flags |= RMAP_EXCLUSIVE;
- page_add_anon_rmap(new, vma, mmun_start, rmap_flags);
+ page_add_anon_rmap(new, vma, haddr, rmap_flags);
} else {
page_add_file_rmap(new, vma, true);
}
VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
- set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
+ set_pmd_at(mm, haddr, pvmw->pmd, pmde);
/* No need to invalidate - it was non-present before */
update_mmu_cache_pmd(vma, address, pvmw->pmd);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index aa39534898e0..e070b8593b37 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -66,12 +66,6 @@ static bool hugetlb_cma_page(struct page *page, unsigned int order)
#endif
static unsigned long hugetlb_cma_size __initdata;
-/*
- * Minimum page order among possible hugepage sizes, set to a proper value
- * at boot time.
- */
-static unsigned int minimum_order __read_mostly = UINT_MAX;
-
__initdata LIST_HEAD(huge_boot_pages);
/* for command line parsing */
@@ -1135,7 +1129,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
lockdep_assert_held(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
- if (pin && !is_pinnable_page(page))
+ if (pin && !is_longterm_pinnable_page(page))
continue;
if (PageHWPoison(page))
@@ -1541,7 +1535,14 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
- if (hugetlb_vmemmap_alloc(h, page)) {
+ /*
+ * If we don't know which subpages are hwpoisoned, we can't free
+ * the hugepage, so it's leaked intentionally.
+ */
+ if (HPageRawHwpUnreliable(page))
+ return;
+
+ if (hugetlb_vmemmap_restore(h, page)) {
spin_lock_irq(&hugetlb_lock);
/*
* If we cannot allocate vmemmap pages, just refuse to free the
@@ -1553,6 +1554,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
return;
}
+ /*
+ * Move PageHWPoison flag from head page to the raw error pages,
+ * which makes any healthy subpages reusable.
+ */
+ if (unlikely(PageHWPoison(page)))
+ hugetlb_clear_page_hwpoison(page);
+
for (i = 0; i < pages_per_huge_page(h);
i++, subpage = mem_map_next(subpage, page, i)) {
subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
@@ -1618,7 +1626,7 @@ static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
static inline void flush_free_hpage_work(struct hstate *h)
{
- if (hugetlb_optimize_vmemmap_pages(h))
+ if (hugetlb_vmemmap_optimizable(h))
flush_work(&free_hpage_work);
}
@@ -1740,7 +1748,7 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
static void __prep_new_huge_page(struct hstate *h, struct page *page)
{
- hugetlb_vmemmap_free(h, page);
+ hugetlb_vmemmap_optimize(h, page);
INIT_LIST_HEAD(&page->lru);
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
hugetlb_set_page_subpool(page, NULL);
@@ -2113,17 +2121,8 @@ retry:
* Attempt to allocate vmemmmap here so that we can take
* appropriate action on failure.
*/
- rc = hugetlb_vmemmap_alloc(h, head);
+ rc = hugetlb_vmemmap_restore(h, head);
if (!rc) {
- /*
- * Move PageHWPoison flag from head page to the raw
- * error page, which makes any subpages rather than
- * the error page reusable.
- */
- if (PageHWPoison(head) && page != head) {
- SetPageHWPoison(page);
- ClearPageHWPoison(head);
- }
update_and_free_page(h, head, false);
} else {
spin_lock_irq(&hugetlb_lock);
@@ -2152,11 +2151,17 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
unsigned long pfn;
struct page *page;
int rc = 0;
+ unsigned int order;
+ struct hstate *h;
if (!hugepages_supported())
return rc;
- for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
+ order = huge_page_order(&default_hstate);
+ for_each_hstate(h)
+ order = min(order, huge_page_order(h));
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
page = pfn_to_page(pfn);
rc = dissolve_free_huge_page(page);
if (rc)
@@ -2432,8 +2437,7 @@ static void return_unused_surplus_pages(struct hstate *h,
/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
- /* Cannot return gigantic pages currently */
- if (hstate_is_gigantic(h))
+ if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
goto out;
/*
@@ -2766,8 +2770,7 @@ retry:
* Fail with -EBUSY if not possible.
*/
spin_unlock_irq(&hugetlb_lock);
- if (!isolate_huge_page(old_page, list))
- ret = -EBUSY;
+ ret = isolate_hugetlb(old_page, list);
spin_lock_irq(&hugetlb_lock);
goto free_new;
} else if (!HPageFreed(old_page)) {
@@ -2843,7 +2846,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
if (hstate_is_gigantic(h))
return -ENOMEM;
- if (page_count(head) && isolate_huge_page(head, list))
+ if (page_count(head) && !isolate_hugetlb(head, list))
ret = 0;
else if (!page_count(head))
ret = alloc_and_dissolve_huge_page(h, head, list);
@@ -3149,9 +3152,6 @@ static void __init hugetlb_init_hstates(void)
struct hstate *h, *h2;
for_each_hstate(h) {
- if (minimum_order > huge_page_order(h))
- minimum_order = huge_page_order(h);
-
/* oversize hugepages were init'ed in early boot */
if (!hstate_is_gigantic(h))
hugetlb_hstate_alloc_pages(h);
@@ -3176,7 +3176,6 @@ static void __init hugetlb_init_hstates(void)
h->demote_order = h2->order;
}
}
- VM_BUG_ON(minimum_order == UINT_MAX);
}
static void __init report_hugepages(void)
@@ -3187,8 +3186,10 @@ static void __init report_hugepages(void)
char buf[32];
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
- pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
+ pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
buf, h->free_huge_pages);
+ pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
+ hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
}
}
@@ -3426,7 +3427,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
remove_hugetlb_page_for_demote(h, page, false);
spin_unlock_irq(&hugetlb_lock);
- rc = hugetlb_vmemmap_alloc(h, page);
+ rc = hugetlb_vmemmap_restore(h, page);
if (rc) {
/* Allocation of vmemmmap failed, we can not demote page */
spin_lock_irq(&hugetlb_lock);
@@ -4116,7 +4117,6 @@ void __init hugetlb_add_hstate(unsigned int order)
h->next_nid_to_free = first_memory_node;
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
- hugetlb_vmemmap_init(h);
parsed_hstate = h;
}
@@ -4482,22 +4482,20 @@ int hugetlb_report_node_meminfo(char *buf, int len, int nid)
nid, h->surplus_huge_pages_node[nid]);
}
-void hugetlb_show_meminfo(void)
+void hugetlb_show_meminfo_node(int nid)
{
struct hstate *h;
- int nid;
if (!hugepages_supported())
return;
- for_each_node_state(nid, N_MEMORY)
- for_each_hstate(h)
- pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
- nid,
- h->nr_huge_pages_node[nid],
- h->free_huge_pages_node[nid],
- h->surplus_huge_pages_node[nid],
- huge_page_size(h) / SZ_1K);
+ for_each_hstate(h)
+ printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
+ nid,
+ h->nr_huge_pages_node[nid],
+ h->free_huge_pages_node[nid],
+ h->surplus_huge_pages_node[nid],
+ huge_page_size(h) / SZ_1K);
}
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
@@ -4732,6 +4730,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
unsigned long npages = pages_per_huge_page(h);
struct address_space *mapping = src_vma->vm_file->f_mapping;
struct mmu_notifier_range range;
+ unsigned long last_addr_mask;
int ret = 0;
if (cow) {
@@ -4751,11 +4750,14 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
i_mmap_lock_read(mapping);
}
+ last_addr_mask = hugetlb_mask_last_page(h);
for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
spinlock_t *src_ptl, *dst_ptl;
src_pte = huge_pte_offset(src, addr, sz);
- if (!src_pte)
+ if (!src_pte) {
+ addr |= last_addr_mask;
continue;
+ }
dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
if (!dst_pte) {
ret = -ENOMEM;
@@ -4772,8 +4774,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
* after taking the lock below.
*/
dst_entry = huge_ptep_get(dst_pte);
- if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
+ if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) {
+ addr |= last_addr_mask;
continue;
+ }
dst_ptl = huge_pte_lock(h, dst, dst_pte);
src_ptl = huge_pte_lockptr(h, src, src_pte);
@@ -4808,12 +4812,11 @@ again:
entry = swp_entry_to_pte(swp_entry);
if (userfaultfd_wp(src_vma) && uffd_wp)
entry = huge_pte_mkuffd_wp(entry);
- set_huge_swap_pte_at(src, addr, src_pte,
- entry, sz);
+ set_huge_pte_at(src, addr, src_pte, entry);
}
if (!userfaultfd_wp(dst_vma) && uffd_wp)
entry = huge_pte_clear_uffd_wp(entry);
- set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
+ set_huge_pte_at(dst, addr, dst_pte, entry);
} else if (unlikely(is_pte_marker(entry))) {
/*
* We copy the pte marker only if the dst vma has
@@ -4880,7 +4883,7 @@ again:
* table protection not changing it to point
* to a new page.
*
- * See Documentation/vm/mmu_notifier.rst
+ * See Documentation/mm/mmu_notifier.rst
*/
huge_ptep_set_wrprotect(src, addr, src_pte);
entry = huge_pte_wrprotect(entry);
@@ -4939,7 +4942,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
unsigned long sz = huge_page_size(h);
struct mm_struct *mm = vma->vm_mm;
unsigned long old_end = old_addr + len;
- unsigned long old_addr_copy;
+ unsigned long last_addr_mask;
pte_t *src_pte, *dst_pte;
struct mmu_notifier_range range;
bool shared_pmd = false;
@@ -4954,23 +4957,23 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
flush_cache_range(vma, range.start, range.end);
mmu_notifier_invalidate_range_start(&range);
+ last_addr_mask = hugetlb_mask_last_page(h);
/* Prevent race with file truncation */
i_mmap_lock_write(mapping);
for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
src_pte = huge_pte_offset(mm, old_addr, sz);
- if (!src_pte)
+ if (!src_pte) {
+ old_addr |= last_addr_mask;
+ new_addr |= last_addr_mask;
continue;
+ }
if (huge_pte_none(huge_ptep_get(src_pte)))
continue;
- /* old_addr arg to huge_pmd_unshare() is a pointer and so the
- * arg may be modified. Pass a copy instead to preserve the
- * value in old_addr.
- */
- old_addr_copy = old_addr;
-
- if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte)) {
+ if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
shared_pmd = true;
+ old_addr |= last_addr_mask;
+ new_addr |= last_addr_mask;
continue;
}
@@ -5004,6 +5007,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
struct mmu_notifier_range range;
+ unsigned long last_addr_mask;
bool force_flush = false;
WARN_ON(!is_vm_hugetlb_page(vma));
@@ -5024,17 +5028,21 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
end);
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
mmu_notifier_invalidate_range_start(&range);
+ last_addr_mask = hugetlb_mask_last_page(h);
address = start;
for (; address < end; address += sz) {
ptep = huge_pte_offset(mm, address, sz);
- if (!ptep)
+ if (!ptep) {
+ address |= last_addr_mask;
continue;
+ }
ptl = huge_pte_lock(h, mm, ptep);
- if (huge_pmd_unshare(mm, vma, &address, ptep)) {
+ if (huge_pmd_unshare(mm, vma, address, ptep)) {
spin_unlock(ptl);
tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
force_flush = true;
+ address |= last_addr_mask;
continue;
}
@@ -5233,6 +5241,21 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
VM_BUG_ON(unshare && (flags & FOLL_WRITE));
VM_BUG_ON(!unshare && !(flags & FOLL_WRITE));
+ /*
+ * hugetlb does not support FOLL_FORCE-style write faults that keep the
+ * PTE mapped R/O such as maybe_mkwrite() would do.
+ */
+ if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
+ return VM_FAULT_SIGSEGV;
+
+ /* Let's take out MAP_SHARED mappings first. */
+ if (vma->vm_flags & VM_MAYSHARE) {
+ if (unlikely(unshare))
+ return 0;
+ set_huge_ptep_writable(vma, haddr, ptep);
+ return 0;
+ }
+
pte = huge_ptep_get(ptep);
old_page = pte_page(pte);
@@ -5714,7 +5737,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
- migration_entry_wait_huge(vma, mm, ptep);
+ migration_entry_wait_huge(vma, ptep);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
@@ -5773,12 +5796,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* If we are going to COW/unshare the mapping later, we examine the
* pending reservations for this page now. This will ensure that any
* allocations necessary to record that reservation occur outside the
- * spinlock. For private mappings, we also lookup the pagecache
- * page now as it is used to determine if a reservation has been
- * consumed.
+ * spinlock. Also lookup the pagecache page now as it is used to
+ * determine if a reservation has been consumed.
*/
if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
- !huge_pte_write(entry)) {
+ !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
if (vma_needs_reservation(h, vma, haddr) < 0) {
ret = VM_FAULT_OOM;
goto out_mutex;
@@ -5786,9 +5808,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* Just decrements count, does not deallocate */
vma_end_reservation(h, vma, haddr);
- if (!(vma->vm_flags & VM_MAYSHARE))
- pagecache_page = hugetlbfs_pagecache_page(h,
- vma, haddr);
+ pagecache_page = hugetlbfs_pagecache_page(h, vma, haddr);
}
ptl = huge_pte_lock(h, mm, ptep);
@@ -6021,7 +6041,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
goto out_release_unlock;
- if (vm_shared) {
+ if (page_in_pagecache) {
page_dup_file_rmap(page, true);
} else {
ClearHPageRestoreReserve(page);
@@ -6052,8 +6072,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
- (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
- dst_vma->vm_flags & VM_WRITE);
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
/* No need to invalidate - it was non-present before */
@@ -6305,6 +6323,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long pages = 0, psize = huge_page_size(h);
bool shared_pmd = false;
struct mmu_notifier_range range;
+ unsigned long last_addr_mask;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
@@ -6321,14 +6340,17 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
flush_cache_range(vma, range.start, range.end);
mmu_notifier_invalidate_range_start(&range);
+ last_addr_mask = hugetlb_mask_last_page(h);
i_mmap_lock_write(vma->vm_file->f_mapping);
for (; address < end; address += psize) {
spinlock_t *ptl;
ptep = huge_pte_offset(mm, address, psize);
- if (!ptep)
+ if (!ptep) {
+ address |= last_addr_mask;
continue;
+ }
ptl = huge_pte_lock(h, mm, ptep);
- if (huge_pmd_unshare(mm, vma, &address, ptep)) {
+ if (huge_pmd_unshare(mm, vma, address, ptep)) {
/*
* When uffd-wp is enabled on the vma, unshare
* shouldn't happen at all. Warn about it if it
@@ -6338,6 +6360,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
pages++;
spin_unlock(ptl);
shared_pmd = true;
+ address |= last_addr_mask;
continue;
}
pte = huge_ptep_get(ptep);
@@ -6363,8 +6386,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
newpte = pte_swp_mkuffd_wp(newpte);
else if (uffd_wp_resolve)
newpte = pte_swp_clear_uffd_wp(newpte);
- set_huge_swap_pte_at(mm, address, ptep,
- newpte, psize);
+ set_huge_pte_at(mm, address, ptep, newpte);
pages++;
}
spin_unlock(ptl);
@@ -6415,7 +6437,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* No need to call mmu_notifier_invalidate_range() we are downgrading
* page table protection not changing it to point to a new page.
*
- * See Documentation/vm/mmu_notifier.rst
+ * See Documentation/mm/mmu_notifier.rst
*/
i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(&range);
@@ -6761,11 +6783,11 @@ out:
* 0 the underlying pte page is not shared, or it is the last user
*/
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep)
{
- pgd_t *pgd = pgd_offset(mm, *addr);
- p4d_t *p4d = p4d_offset(pgd, *addr);
- pud_t *pud = pud_offset(p4d, *addr);
+ pgd_t *pgd = pgd_offset(mm, addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
BUG_ON(page_count(virt_to_page(ptep)) == 0);
@@ -6775,14 +6797,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
pud_clear(pud);
put_page(virt_to_page(ptep));
mm_dec_nr_pmds(mm);
- /*
- * This update of passed address optimizes loops sequentially
- * processing addresses in increments of huge page size (PMD_SIZE
- * in this case). By clearing the pud, a PUD_SIZE area is unmapped.
- * Update address to the 'last page' in the cleared area so that
- * calling loop can move to first page past this area.
- */
- *addr |= PUD_SIZE - PMD_SIZE;
return 1;
}
@@ -6794,7 +6808,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
}
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep)
{
return 0;
}
@@ -6877,6 +6891,37 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return (pte_t *)pmd;
}
+/*
+ * Return a mask that can be used to update an address to the last huge
+ * page in a page table page mapping size. Used to skip non-present
+ * page table entries when linearly scanning address ranges. Architectures
+ * with unique huge page to page table relationships can define their own
+ * version of this routine.
+ */
+unsigned long hugetlb_mask_last_page(struct hstate *h)
+{
+ unsigned long hp_size = huge_page_size(h);
+
+ if (hp_size == PUD_SIZE)
+ return P4D_SIZE - PUD_SIZE;
+ else if (hp_size == PMD_SIZE)
+ return PUD_SIZE - PMD_SIZE;
+ else
+ return 0UL;
+}
+
+#else
+
+/* See description above. Architectures can provide their own version. */
+__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
+{
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+ if (huge_page_size(h) == PMD_SIZE)
+ return PUD_SIZE - PMD_SIZE;
+#endif
+ return 0UL;
+}
+
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
/*
@@ -6940,7 +6985,7 @@ retry:
} else {
if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl);
- __migration_entry_wait(mm, (pte_t *)pmd, ptl);
+ __migration_entry_wait_huge((pte_t *)pmd, ptl);
goto retry;
}
/*
@@ -6957,10 +7002,38 @@ struct page * __weak
follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags)
{
- if (flags & (FOLL_GET | FOLL_PIN))
+ struct page *page = NULL;
+ spinlock_t *ptl;
+ pte_t pte;
+
+ if (WARN_ON_ONCE(flags & FOLL_PIN))
return NULL;
- return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
+retry:
+ ptl = huge_pte_lock(hstate_sizelog(PUD_SHIFT), mm, (pte_t *)pud);
+ if (!pud_huge(*pud))
+ goto out;
+ pte = huge_ptep_get((pte_t *)pud);
+ if (pte_present(pte)) {
+ page = pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
+ if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
+ page = NULL;
+ goto out;
+ }
+ } else {
+ if (is_hugetlb_entry_migration(pte)) {
+ spin_unlock(ptl);
+ __migration_entry_wait(mm, (pte_t *)pud, ptl);
+ goto retry;
+ }
+ /*
+ * hwpoisoned entry is treated as no_page_table in
+ * follow_page_mask().
+ */
+ }
+out:
+ spin_unlock(ptl);
+ return page;
}
struct page * __weak
@@ -6972,15 +7045,15 @@ follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int fla
return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
}
-bool isolate_huge_page(struct page *page, struct list_head *list)
+int isolate_hugetlb(struct page *page, struct list_head *list)
{
- bool ret = true;
+ int ret = 0;
spin_lock_irq(&hugetlb_lock);
if (!PageHeadHuge(page) ||
!HPageMigratable(page) ||
!get_page_unless_zero(page)) {
- ret = false;
+ ret = -EBUSY;
goto unlock;
}
ClearHPageMigratable(page);
@@ -7100,21 +7173,18 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
mmu_notifier_invalidate_range_start(&range);
i_mmap_lock_write(vma->vm_file->f_mapping);
for (address = start; address < end; address += PUD_SIZE) {
- unsigned long tmp = address;
-
ptep = huge_pte_offset(mm, address, sz);
if (!ptep)
continue;
ptl = huge_pte_lock(h, mm, ptep);
- /* We don't want 'address' to be changed */
- huge_pmd_unshare(mm, vma, &tmp, ptep);
+ huge_pmd_unshare(mm, vma, address, ptep);
spin_unlock(ptl);
}
flush_hugetlb_tlb_range(vma, start, end);
i_mmap_unlock_write(vma->vm_file->f_mapping);
/*
* No need to call mmu_notifier_invalidate_range(), see
- * Documentation/vm/mmu_notifier.rst.
+ * Documentation/mm/mmu_notifier.rst.
*/
mmu_notifier_invalidate_range_end(&range);
}
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index f9942841df18..c86691c431fd 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -772,6 +772,7 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx)
/* Add the numa stat file */
cft = &h->cgroup_files_dfl[6];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
+ cft->private = MEMFILE_PRIVATE(idx, 0);
cft->seq_show = hugetlb_cgroup_read_numa_stat;
cft->flags = CFTYPE_NOT_ON_ROOT;
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 1089ea8a9c98..20f414c0379f 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -1,93 +1,452 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Optimize vmemmap pages associated with HugeTLB
+ * HugeTLB Vmemmap Optimization (HVO)
*
- * Copyright (c) 2020, Bytedance. All rights reserved.
+ * Copyright (c) 2020, ByteDance. All rights reserved.
*
* Author: Muchun Song <songmuchun@bytedance.com>
*
- * See Documentation/vm/vmemmap_dedup.rst
+ * See Documentation/mm/vmemmap_dedup.rst
*/
#define pr_fmt(fmt) "HugeTLB: " fmt
-#include <linux/memory_hotplug.h>
+#include <linux/pgtable.h>
+#include <linux/bootmem_info.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
#include "hugetlb_vmemmap.h"
-/*
- * There are a lot of struct page structures associated with each HugeTLB page.
- * For tail pages, the value of compound_head is the same. So we can reuse first
- * page of head page structures. We map the virtual addresses of all the pages
- * of tail page structures to the head page struct, and then free these page
- * frames. Therefore, we need to reserve one pages as vmemmap areas.
+/**
+ * struct vmemmap_remap_walk - walk vmemmap page table
+ *
+ * @remap_pte: called for each lowest-level entry (PTE).
+ * @nr_walked: the number of walked pte.
+ * @reuse_page: the page which is reused for the tail vmemmap pages.
+ * @reuse_addr: the virtual address of the @reuse_page page.
+ * @vmemmap_pages: the list head of the vmemmap pages that can be freed
+ * or is mapped from.
*/
-#define RESERVE_VMEMMAP_NR 1U
-#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
-
-enum vmemmap_optimize_mode {
- VMEMMAP_OPTIMIZE_OFF,
- VMEMMAP_OPTIMIZE_ON,
+struct vmemmap_remap_walk {
+ void (*remap_pte)(pte_t *pte, unsigned long addr,
+ struct vmemmap_remap_walk *walk);
+ unsigned long nr_walked;
+ struct page *reuse_page;
+ unsigned long reuse_addr;
+ struct list_head *vmemmap_pages;
};
-DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
- hugetlb_optimize_vmemmap_key);
-EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
+static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+{
+ pmd_t __pmd;
+ int i;
+ unsigned long addr = start;
+ struct page *page = pmd_page(*pmd);
+ pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
-static enum vmemmap_optimize_mode vmemmap_optimize_mode =
- IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
+ if (!pgtable)
+ return -ENOMEM;
-static void vmemmap_optimize_mode_switch(enum vmemmap_optimize_mode to)
+ pmd_populate_kernel(&init_mm, &__pmd, pgtable);
+
+ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
+ pte_t entry, *pte;
+ pgprot_t pgprot = PAGE_KERNEL;
+
+ entry = mk_pte(page + i, pgprot);
+ pte = pte_offset_kernel(&__pmd, addr);
+ set_pte_at(&init_mm, addr, pte, entry);
+ }
+
+ spin_lock(&init_mm.page_table_lock);
+ if (likely(pmd_leaf(*pmd))) {
+ /*
+ * Higher order allocations from buddy allocator must be able to
+ * be treated as indepdenent small pages (as they can be freed
+ * individually).
+ */
+ if (!PageReserved(page))
+ split_page(page, get_order(PMD_SIZE));
+
+ /* Make pte visible before pmd. See comment in pmd_install(). */
+ smp_wmb();
+ pmd_populate_kernel(&init_mm, pmd, pgtable);
+ flush_tlb_kernel_range(start, start + PMD_SIZE);
+ } else {
+ pte_free_kernel(&init_mm, pgtable);
+ }
+ spin_unlock(&init_mm.page_table_lock);
+
+ return 0;
+}
+
+static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
{
- if (vmemmap_optimize_mode == to)
- return;
+ int leaf;
- if (to == VMEMMAP_OPTIMIZE_OFF)
- static_branch_dec(&hugetlb_optimize_vmemmap_key);
- else
- static_branch_inc(&hugetlb_optimize_vmemmap_key);
- WRITE_ONCE(vmemmap_optimize_mode, to);
+ spin_lock(&init_mm.page_table_lock);
+ leaf = pmd_leaf(*pmd);
+ spin_unlock(&init_mm.page_table_lock);
+
+ if (!leaf)
+ return 0;
+
+ return __split_vmemmap_huge_pmd(pmd, start);
+}
+
+static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end,
+ struct vmemmap_remap_walk *walk)
+{
+ pte_t *pte = pte_offset_kernel(pmd, addr);
+
+ /*
+ * The reuse_page is found 'first' in table walk before we start
+ * remapping (which is calling @walk->remap_pte).
+ */
+ if (!walk->reuse_page) {
+ walk->reuse_page = pte_page(*pte);
+ /*
+ * Because the reuse address is part of the range that we are
+ * walking, skip the reuse address range.
+ */
+ addr += PAGE_SIZE;
+ pte++;
+ walk->nr_walked++;
+ }
+
+ for (; addr != end; addr += PAGE_SIZE, pte++) {
+ walk->remap_pte(pte, addr, walk);
+ walk->nr_walked++;
+ }
+}
+
+static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
+ unsigned long end,
+ struct vmemmap_remap_walk *walk)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ int ret;
+
+ ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK);
+ if (ret)
+ return ret;
+
+ next = pmd_addr_end(addr, end);
+ vmemmap_pte_range(pmd, addr, next, walk);
+ } while (pmd++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
+ unsigned long end,
+ struct vmemmap_remap_walk *walk)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ pud = pud_offset(p4d, addr);
+ do {
+ int ret;
+
+ next = pud_addr_end(addr, end);
+ ret = vmemmap_pmd_range(pud, addr, next, walk);
+ if (ret)
+ return ret;
+ } while (pud++, addr = next, addr != end);
+
+ return 0;
}
-static int __init hugetlb_vmemmap_early_param(char *buf)
+static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
+ unsigned long end,
+ struct vmemmap_remap_walk *walk)
{
- bool enable;
- enum vmemmap_optimize_mode mode;
+ p4d_t *p4d;
+ unsigned long next;
- if (kstrtobool(buf, &enable))
- return -EINVAL;
+ p4d = p4d_offset(pgd, addr);
+ do {
+ int ret;
- mode = enable ? VMEMMAP_OPTIMIZE_ON : VMEMMAP_OPTIMIZE_OFF;
- vmemmap_optimize_mode_switch(mode);
+ next = p4d_addr_end(addr, end);
+ ret = vmemmap_pud_range(p4d, addr, next, walk);
+ if (ret)
+ return ret;
+ } while (p4d++, addr = next, addr != end);
return 0;
}
-early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_early_param);
+
+static int vmemmap_remap_range(unsigned long start, unsigned long end,
+ struct vmemmap_remap_walk *walk)
+{
+ unsigned long addr = start;
+ unsigned long next;
+ pgd_t *pgd;
+
+ VM_BUG_ON(!PAGE_ALIGNED(start));
+ VM_BUG_ON(!PAGE_ALIGNED(end));
+
+ pgd = pgd_offset_k(addr);
+ do {
+ int ret;
+
+ next = pgd_addr_end(addr, end);
+ ret = vmemmap_p4d_range(pgd, addr, next, walk);
+ if (ret)
+ return ret;
+ } while (pgd++, addr = next, addr != end);
+
+ /*
+ * We only change the mapping of the vmemmap virtual address range
+ * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
+ * belongs to the range.
+ */
+ flush_tlb_kernel_range(start + PAGE_SIZE, end);
+
+ return 0;
+}
+
+/*
+ * Free a vmemmap page. A vmemmap page can be allocated from the memblock
+ * allocator or buddy allocator. If the PG_reserved flag is set, it means
+ * that it allocated from the memblock allocator, just free it via the
+ * free_bootmem_page(). Otherwise, use __free_page().
+ */
+static inline void free_vmemmap_page(struct page *page)
+{
+ if (PageReserved(page))
+ free_bootmem_page(page);
+ else
+ __free_page(page);
+}
+
+/* Free a list of the vmemmap pages */
+static void free_vmemmap_page_list(struct list_head *list)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, list, lru) {
+ list_del(&page->lru);
+ free_vmemmap_page(page);
+ }
+}
+
+static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
+ struct vmemmap_remap_walk *walk)
+{
+ /*
+ * Remap the tail pages as read-only to catch illegal write operation
+ * to the tail pages.
+ */
+ pgprot_t pgprot = PAGE_KERNEL_RO;
+ pte_t entry = mk_pte(walk->reuse_page, pgprot);
+ struct page *page = pte_page(*pte);
+
+ list_add_tail(&page->lru, walk->vmemmap_pages);
+ set_pte_at(&init_mm, addr, pte, entry);
+}
/*
- * Previously discarded vmemmap pages will be allocated and remapping
- * after this function returns zero.
+ * How many struct page structs need to be reset. When we reuse the head
+ * struct page, the special metadata (e.g. page->flags or page->mapping)
+ * cannot copy to the tail struct page structs. The invalid value will be
+ * checked in the free_tail_pages_check(). In order to avoid the message
+ * of "corrupted mapping in tail page". We need to reset at least 3 (one
+ * head struct page struct and two tail struct page structs) struct page
+ * structs.
+ */
+#define NR_RESET_STRUCT_PAGE 3
+
+static inline void reset_struct_pages(struct page *start)
+{
+ int i;
+ struct page *from = start + NR_RESET_STRUCT_PAGE;
+
+ for (i = 0; i < NR_RESET_STRUCT_PAGE; i++)
+ memcpy(start + i, from, sizeof(*from));
+}
+
+static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
+ struct vmemmap_remap_walk *walk)
+{
+ pgprot_t pgprot = PAGE_KERNEL;
+ struct page *page;
+ void *to;
+
+ BUG_ON(pte_page(*pte) != walk->reuse_page);
+
+ page = list_first_entry(walk->vmemmap_pages, struct page, lru);
+ list_del(&page->lru);
+ to = page_to_virt(page);
+ copy_page(to, (void *)walk->reuse_addr);
+ reset_struct_pages(to);
+
+ set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
+}
+
+/**
+ * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
+ * to the page which @reuse is mapped to, then free vmemmap
+ * which the range are mapped to.
+ * @start: start address of the vmemmap virtual address range that we want
+ * to remap.
+ * @end: end address of the vmemmap virtual address range that we want to
+ * remap.
+ * @reuse: reuse address.
+ *
+ * Return: %0 on success, negative error code otherwise.
+ */
+static int vmemmap_remap_free(unsigned long start, unsigned long end,
+ unsigned long reuse)
+{
+ int ret;
+ LIST_HEAD(vmemmap_pages);
+ struct vmemmap_remap_walk walk = {
+ .remap_pte = vmemmap_remap_pte,
+ .reuse_addr = reuse,
+ .vmemmap_pages = &vmemmap_pages,
+ };
+
+ /*
+ * In order to make remapping routine most efficient for the huge pages,
+ * the routine of vmemmap page table walking has the following rules
+ * (see more details from the vmemmap_pte_range()):
+ *
+ * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
+ * should be continuous.
+ * - The @reuse address is part of the range [@reuse, @end) that we are
+ * walking which is passed to vmemmap_remap_range().
+ * - The @reuse address is the first in the complete range.
+ *
+ * So we need to make sure that @start and @reuse meet the above rules.
+ */
+ BUG_ON(start - reuse != PAGE_SIZE);
+
+ mmap_read_lock(&init_mm);
+ ret = vmemmap_remap_range(reuse, end, &walk);
+ if (ret && walk.nr_walked) {
+ end = reuse + walk.nr_walked * PAGE_SIZE;
+ /*
+ * vmemmap_pages contains pages from the previous
+ * vmemmap_remap_range call which failed. These
+ * are pages which were removed from the vmemmap.
+ * They will be restored in the following call.
+ */
+ walk = (struct vmemmap_remap_walk) {
+ .remap_pte = vmemmap_restore_pte,
+ .reuse_addr = reuse,
+ .vmemmap_pages = &vmemmap_pages,
+ };
+
+ vmemmap_remap_range(reuse, end, &walk);
+ }
+ mmap_read_unlock(&init_mm);
+
+ free_vmemmap_page_list(&vmemmap_pages);
+
+ return ret;
+}
+
+static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
+ gfp_t gfp_mask, struct list_head *list)
+{
+ unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
+ int nid = page_to_nid((struct page *)start);
+ struct page *page, *next;
+
+ while (nr_pages--) {
+ page = alloc_pages_node(nid, gfp_mask, 0);
+ if (!page)
+ goto out;
+ list_add_tail(&page->lru, list);
+ }
+
+ return 0;
+out:
+ list_for_each_entry_safe(page, next, list, lru)
+ __free_pages(page, 0);
+ return -ENOMEM;
+}
+
+/**
+ * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
+ * to the page which is from the @vmemmap_pages
+ * respectively.
+ * @start: start address of the vmemmap virtual address range that we want
+ * to remap.
+ * @end: end address of the vmemmap virtual address range that we want to
+ * remap.
+ * @reuse: reuse address.
+ * @gfp_mask: GFP flag for allocating vmemmap pages.
+ *
+ * Return: %0 on success, negative error code otherwise.
+ */
+static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
+ unsigned long reuse, gfp_t gfp_mask)
+{
+ LIST_HEAD(vmemmap_pages);
+ struct vmemmap_remap_walk walk = {
+ .remap_pte = vmemmap_restore_pte,
+ .reuse_addr = reuse,
+ .vmemmap_pages = &vmemmap_pages,
+ };
+
+ /* See the comment in the vmemmap_remap_free(). */
+ BUG_ON(start - reuse != PAGE_SIZE);
+
+ if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
+ return -ENOMEM;
+
+ mmap_read_lock(&init_mm);
+ vmemmap_remap_range(reuse, end, &walk);
+ mmap_read_unlock(&init_mm);
+
+ return 0;
+}
+
+DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
+EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
+
+static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
+core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
+
+/**
+ * hugetlb_vmemmap_restore - restore previously optimized (by
+ * hugetlb_vmemmap_optimize()) vmemmap pages which
+ * will be reallocated and remapped.
+ * @h: struct hstate.
+ * @head: the head page whose vmemmap pages will be restored.
+ *
+ * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
+ * negative error code otherwise.
*/
-int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
+int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
{
int ret;
- unsigned long vmemmap_addr = (unsigned long)head;
- unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
+ unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+ unsigned long vmemmap_reuse;
if (!HPageVmemmapOptimized(head))
return 0;
- vmemmap_addr += RESERVE_VMEMMAP_SIZE;
- vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
- vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
- vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
+ vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
+ vmemmap_reuse = vmemmap_start;
+ vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
/*
- * The pages which the vmemmap virtual address range [@vmemmap_addr,
+ * The pages which the vmemmap virtual address range [@vmemmap_start,
* @vmemmap_end) are mapped to are freed to the buddy allocator, and
* the range is mapped to the page which @vmemmap_reuse is mapped to.
* When a HugeTLB page is freed to the buddy allocator, previously
* discarded vmemmap pages must be allocated and remapping.
*/
- ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse,
+ ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse,
GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
if (!ret) {
ClearHPageVmemmapOptimized(head);
@@ -97,115 +456,123 @@ int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
return ret;
}
-void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
+/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
+static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
{
- unsigned long vmemmap_addr = (unsigned long)head;
- unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
+ if (!READ_ONCE(vmemmap_optimize_enabled))
+ return false;
+
+ if (!hugetlb_vmemmap_optimizable(h))
+ return false;
+
+ if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
+ pmd_t *pmdp, pmd;
+ struct page *vmemmap_page;
+ unsigned long vaddr = (unsigned long)head;
+
+ /*
+ * Only the vmemmap page's vmemmap page can be self-hosted.
+ * Walking the page tables to find the backing page of the
+ * vmemmap page.
+ */
+ pmdp = pmd_off_k(vaddr);
+ /*
+ * The READ_ONCE() is used to stabilize *pmdp in a register or
+ * on the stack so that it will stop changing under the code.
+ * The only concurrent operation where it can be changed is
+ * split_vmemmap_huge_pmd() (*pmdp will be stable after this
+ * operation).
+ */
+ pmd = READ_ONCE(*pmdp);
+ if (pmd_leaf(pmd))
+ vmemmap_page = pmd_page(pmd) + pte_index(vaddr);
+ else
+ vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr));
+ /*
+ * Due to HugeTLB alignment requirements and the vmemmap pages
+ * being at the start of the hotplugged memory region in
+ * memory_hotplug.memmap_on_memory case. Checking any vmemmap
+ * page's vmemmap page if it is marked as VmemmapSelfHosted is
+ * sufficient.
+ *
+ * [ hotplugged memory ]
+ * [ section ][...][ section ]
+ * [ vmemmap ][ usable memory ]
+ * ^ | | |
+ * +---+ | |
+ * ^ | |
+ * +-------+ |
+ * ^ |
+ * +-------------------------------------------+
+ */
+ if (PageVmemmapSelfHosted(vmemmap_page))
+ return false;
+ }
- vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
- if (!vmemmap_pages)
- return;
+ return true;
+}
- if (READ_ONCE(vmemmap_optimize_mode) == VMEMMAP_OPTIMIZE_OFF)
+/**
+ * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
+ * @h: struct hstate.
+ * @head: the head page whose vmemmap pages will be optimized.
+ *
+ * This function only tries to optimize @head's vmemmap pages and does not
+ * guarantee that the optimization will succeed after it returns. The caller
+ * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
+ * have been optimized.
+ */
+void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
+{
+ unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+ unsigned long vmemmap_reuse;
+
+ if (!vmemmap_should_optimize(h, head))
return;
static_branch_inc(&hugetlb_optimize_vmemmap_key);
- vmemmap_addr += RESERVE_VMEMMAP_SIZE;
- vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
- vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
+ vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
+ vmemmap_reuse = vmemmap_start;
+ vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
/*
- * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end)
+ * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end)
* to the page which @vmemmap_reuse is mapped to, then free the pages
- * which the range [@vmemmap_addr, @vmemmap_end] is mapped to.
+ * which the range [@vmemmap_start, @vmemmap_end] is mapped to.
*/
- if (vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse))
+ if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse))
static_branch_dec(&hugetlb_optimize_vmemmap_key);
else
SetHPageVmemmapOptimized(head);
}
-void __init hugetlb_vmemmap_init(struct hstate *h)
-{
- unsigned int nr_pages = pages_per_huge_page(h);
- unsigned int vmemmap_pages;
-
- /*
- * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
- * page structs that can be used when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP,
- * so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
- */
- BUILD_BUG_ON(__NR_USED_SUBPAGE >=
- RESERVE_VMEMMAP_SIZE / sizeof(struct page));
-
- if (!is_power_of_2(sizeof(struct page))) {
- pr_warn_once("cannot optimize vmemmap pages because \"struct page\" crosses page boundaries\n");
- static_branch_disable(&hugetlb_optimize_vmemmap_key);
- return;
- }
-
- vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
- /*
- * The head page is not to be freed to buddy allocator, the other tail
- * pages will map to the head page, so they can be freed.
- *
- * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
- * on some architectures (e.g. aarch64). See Documentation/arm64/
- * hugetlbpage.rst for more details.
- */
- if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
- h->optimize_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
-
- pr_info("can optimize %d vmemmap pages for %s\n",
- h->optimize_vmemmap_pages, h->name);
-}
-
-#ifdef CONFIG_PROC_SYSCTL
-static int hugetlb_optimize_vmemmap_handler(struct ctl_table *table, int write,
- void *buffer, size_t *length,
- loff_t *ppos)
-{
- int ret;
- enum vmemmap_optimize_mode mode;
- static DEFINE_MUTEX(sysctl_mutex);
-
- if (write && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- mutex_lock(&sysctl_mutex);
- mode = vmemmap_optimize_mode;
- table->data = &mode;
- ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
- if (write && !ret)
- vmemmap_optimize_mode_switch(mode);
- mutex_unlock(&sysctl_mutex);
-
- return ret;
-}
-
static struct ctl_table hugetlb_vmemmap_sysctls[] = {
{
.procname = "hugetlb_optimize_vmemmap",
- .maxlen = sizeof(enum vmemmap_optimize_mode),
+ .data = &vmemmap_optimize_enabled,
+ .maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = hugetlb_optimize_vmemmap_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
+ .proc_handler = proc_dobool,
},
{ }
};
-static __init int hugetlb_vmemmap_sysctls_init(void)
+static int __init hugetlb_vmemmap_init(void)
{
- /*
- * If "memory_hotplug.memmap_on_memory" is enabled or "struct page"
- * crosses page boundaries, the vmemmap pages cannot be optimized.
- */
- if (!mhp_memmap_on_memory() && is_power_of_2(sizeof(struct page)))
- register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
-
+ /* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */
+ BUILD_BUG_ON(__NR_USED_SUBPAGE * sizeof(struct page) > HUGETLB_VMEMMAP_RESERVE_SIZE);
+
+ if (IS_ENABLED(CONFIG_PROC_SYSCTL)) {
+ const struct hstate *h;
+
+ for_each_hstate(h) {
+ if (hugetlb_vmemmap_optimizable(h)) {
+ register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
+ break;
+ }
+ }
+ }
return 0;
}
-late_initcall(hugetlb_vmemmap_sysctls_init);
-#endif /* CONFIG_PROC_SYSCTL */
+late_initcall(hugetlb_vmemmap_init);
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 109b0a53b6fe..25bd0e002431 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Optimize vmemmap pages associated with HugeTLB
+ * HugeTLB Vmemmap Optimization (HVO)
*
- * Copyright (c) 2020, Bytedance. All rights reserved.
+ * Copyright (c) 2020, ByteDance. All rights reserved.
*
* Author: Muchun Song <songmuchun@bytedance.com>
*/
@@ -11,35 +11,50 @@
#include <linux/hugetlb.h>
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
-int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head);
-void hugetlb_vmemmap_free(struct hstate *h, struct page *head);
-void hugetlb_vmemmap_init(struct hstate *h);
+int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
+void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
/*
- * How many vmemmap pages associated with a HugeTLB page that can be
- * optimized and freed to the buddy allocator.
+ * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
+ * Documentation/vm/vmemmap_dedup.rst.
*/
-static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h)
+#define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE
+
+static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
{
- return h->optimize_vmemmap_pages;
+ return pages_per_huge_page(h) * sizeof(struct page);
+}
+
+/*
+ * Return how many vmemmap size associated with a HugeTLB page that can be
+ * optimized and can be freed to the buddy allocator.
+ */
+static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
+{
+ int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
+
+ if (!is_power_of_2(sizeof(struct page)))
+ return 0;
+ return size > 0 ? size : 0;
}
#else
-static inline int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
+static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
{
return 0;
}
-static inline void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
+static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
{
}
-static inline void hugetlb_vmemmap_init(struct hstate *h)
+static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
{
+ return 0;
}
+#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
-static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h)
+static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
{
- return 0;
+ return hugetlb_vmemmap_optimizable_size(h) != 0;
}
-#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
#endif /* _LINUX_HUGETLB_VMEMMAP_H */
diff --git a/mm/internal.h b/mm/internal.h
index ddd2d6a46f1b..785409805ed7 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -853,6 +853,7 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
unsigned long addr, int page_nid, int *flags);
void free_zone_device_page(struct page *page);
+int migrate_device_coherent_page(struct page *page);
/*
* mm/gup.c
@@ -863,4 +864,22 @@ DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
extern bool mirrored_kernelcore;
+static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
+{
+ /*
+ * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
+ * enablements, because when without soft-dirty being compiled in,
+ * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
+ * will be constantly true.
+ */
+ if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
+ return false;
+
+ /*
+ * Soft-dirty is kind of special: its tracking is enabled when the
+ * vma flags not set.
+ */
+ return !(vma->vm_flags & VM_SOFTDIRTY);
+}
+
#endif /* __MM_INTERNAL_H */
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 78be2beb7453..69f583855c8b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -344,7 +344,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
object)) {
- kasan_report_invalid_free(tagged_object, ip);
+ kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
return true;
}
@@ -353,7 +353,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
return false;
if (!kasan_byte_accessible(tagged_object)) {
- kasan_report_invalid_free(tagged_object, ip);
+ kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
return true;
}
@@ -378,12 +378,12 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
{
if (ptr != page_address(virt_to_head_page(ptr))) {
- kasan_report_invalid_free(ptr, ip);
+ kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
return true;
}
if (!kasan_byte_accessible(ptr)) {
- kasan_report_invalid_free(ptr, ip);
+ kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
return true;
}
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 9e1b6544bfa8..9ad8eff71b28 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -257,27 +257,37 @@ static void unpoison_vmalloc_pages(const void *addr, u8 tag)
}
}
+static void init_vmalloc_pages(const void *start, unsigned long size)
+{
+ const void *addr;
+
+ for (addr = start; addr < start + size; addr += PAGE_SIZE) {
+ struct page *page = virt_to_page(addr);
+
+ clear_highpage_kasan_tagged(page);
+ }
+}
+
void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
kasan_vmalloc_flags_t flags)
{
u8 tag;
unsigned long redzone_start, redzone_size;
- if (!kasan_vmalloc_enabled())
- return (void *)start;
-
- if (!is_vmalloc_or_module_addr(start))
+ if (!kasan_vmalloc_enabled() || !is_vmalloc_or_module_addr(start)) {
+ if (flags & KASAN_VMALLOC_INIT)
+ init_vmalloc_pages(start, size);
return (void *)start;
+ }
/*
- * Skip unpoisoning and assigning a pointer tag for non-VM_ALLOC
- * mappings as:
+ * Don't tag non-VM_ALLOC mappings, as:
*
* 1. Unlike the software KASAN modes, hardware tag-based KASAN only
* supports tagging physical memory. Therefore, it can only tag a
* single mapping of normal physical pages.
* 2. Hardware tag-based KASAN can only tag memory mapped with special
- * mapping protection bits, see arch_vmalloc_pgprot_modify().
+ * mapping protection bits, see arch_vmap_pgprot_tagged().
* As non-VM_ALLOC mappings can be mapped outside of vmalloc code,
* providing these bits would require tracking all non-VM_ALLOC
* mappers.
@@ -289,15 +299,19 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
*
* For non-VM_ALLOC allocations, page_alloc memory is tagged as usual.
*/
- if (!(flags & KASAN_VMALLOC_VM_ALLOC))
+ if (!(flags & KASAN_VMALLOC_VM_ALLOC)) {
+ WARN_ON(flags & KASAN_VMALLOC_INIT);
return (void *)start;
+ }
/*
* Don't tag executable memory.
* The kernel doesn't tolerate having the PC register tagged.
*/
- if (!(flags & KASAN_VMALLOC_PROT_NORMAL))
+ if (!(flags & KASAN_VMALLOC_PROT_NORMAL)) {
+ WARN_ON(flags & KASAN_VMALLOC_INIT);
return (void *)start;
+ }
tag = kasan_random_tag();
start = set_tag(start, tag);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 610d60d6e5b8..01c03e45acd4 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -125,6 +125,7 @@ static inline bool kasan_sync_fault_possible(void)
enum kasan_report_type {
KASAN_REPORT_ACCESS,
KASAN_REPORT_INVALID_FREE,
+ KASAN_REPORT_DOUBLE_FREE,
};
struct kasan_report_info {
@@ -277,7 +278,7 @@ static inline void kasan_print_address_stack_frame(const void *addr) { }
bool kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
-void kasan_report_invalid_free(void *object, unsigned long ip);
+void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report_type type);
struct page *kasan_addr_to_page(const void *addr);
struct slab *kasan_addr_to_slab(const void *addr);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index b341a191651d..fe3f606b3a98 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -176,8 +176,12 @@ static void end_report(unsigned long *flags, void *addr)
static void print_error_description(struct kasan_report_info *info)
{
if (info->type == KASAN_REPORT_INVALID_FREE) {
- pr_err("BUG: KASAN: double-free or invalid-free in %pS\n",
- (void *)info->ip);
+ pr_err("BUG: KASAN: invalid-free in %pS\n", (void *)info->ip);
+ return;
+ }
+
+ if (info->type == KASAN_REPORT_DOUBLE_FREE) {
+ pr_err("BUG: KASAN: double-free in %pS\n", (void *)info->ip);
return;
}
@@ -433,7 +437,7 @@ static void print_report(struct kasan_report_info *info)
}
}
-void kasan_report_invalid_free(void *ptr, unsigned long ip)
+void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_type type)
{
unsigned long flags;
struct kasan_report_info info;
@@ -448,7 +452,7 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip)
start_report(&flags, true);
- info.type = KASAN_REPORT_INVALID_FREE;
+ info.type = type;
info.access_addr = ptr;
info.first_bad_addr = kasan_reset_tag(ptr);
info.access_size = 0;
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index a4f07de21771..0e3648b603a6 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -295,9 +295,22 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
return 0;
shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
- shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
- shadow_end = ALIGN(shadow_end, PAGE_SIZE);
+
+ /*
+ * User Mode Linux maps enough shadow memory for all of virtual memory
+ * at boot, so doesn't need to allocate more on vmalloc, just clear it.
+ *
+ * The remaining CONFIG_UML checks in this file exist for the same
+ * reason.
+ */
+ if (IS_ENABLED(CONFIG_UML)) {
+ __memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start);
+ return 0;
+ }
+
+ shadow_start = PAGE_ALIGN_DOWN(shadow_start);
+ shadow_end = PAGE_ALIGN(shadow_end);
ret = apply_to_page_range(&init_mm, shadow_start,
shadow_end - shadow_start,
@@ -466,6 +479,10 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
if (shadow_end > shadow_start) {
size = shadow_end - shadow_start;
+ if (IS_ENABLED(CONFIG_UML)) {
+ __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start);
+ return;
+ }
apply_to_existing_page_range(&init_mm,
(unsigned long)shadow_start,
size, kasan_depopulate_vmalloc_pte,
@@ -531,6 +548,11 @@ int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
return -EINVAL;
+ if (IS_ENABLED(CONFIG_UML)) {
+ __memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size);
+ return 0;
+ }
+
ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
shadow_start + shadow_size,
GFP_KERNEL,
@@ -554,6 +576,9 @@ int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
void kasan_free_module_shadow(const struct vm_struct *vm)
{
+ if (IS_ENABLED(CONFIG_UML))
+ return;
+
if (vm->flags & VM_KASAN)
vfree(kasan_mem_to_shadow(vm->addr));
}
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 6aff49f6b79e..c252081b11df 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -546,7 +546,7 @@ static unsigned long kfence_init_pool(void)
if (!arch_kfence_init_pool())
return addr;
- pages = virt_to_page(addr);
+ pages = virt_to_page(__kfence_pool);
/*
* Set up object pages: they must have PG_slab set, to avoid freeing
@@ -660,7 +660,7 @@ static bool kfence_init_pool_late(void)
/* Same as above. */
free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
#ifdef CONFIG_CONTIG_ALLOC
- free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE);
+ free_contig_range(page_to_pfn(virt_to_page((void *)addr)), free_size / PAGE_SIZE);
#else
free_pages_exact((void *)addr, free_size);
#endif
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 16be62d493cd..01f71786d530 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -147,8 +147,7 @@ static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
return count;
}
static struct kobj_attribute scan_sleep_millisecs_attr =
- __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
- scan_sleep_millisecs_store);
+ __ATTR_RW(scan_sleep_millisecs);
static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -175,8 +174,7 @@ static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
return count;
}
static struct kobj_attribute alloc_sleep_millisecs_attr =
- __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
- alloc_sleep_millisecs_store);
+ __ATTR_RW(alloc_sleep_millisecs);
static ssize_t pages_to_scan_show(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -200,8 +198,7 @@ static ssize_t pages_to_scan_store(struct kobject *kobj,
return count;
}
static struct kobj_attribute pages_to_scan_attr =
- __ATTR(pages_to_scan, 0644, pages_to_scan_show,
- pages_to_scan_store);
+ __ATTR_RW(pages_to_scan);
static ssize_t pages_collapsed_show(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -221,22 +218,21 @@ static ssize_t full_scans_show(struct kobject *kobj,
static struct kobj_attribute full_scans_attr =
__ATTR_RO(full_scans);
-static ssize_t khugepaged_defrag_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t defrag_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
return single_hugepage_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
}
-static ssize_t khugepaged_defrag_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t defrag_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
return single_hugepage_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
}
static struct kobj_attribute khugepaged_defrag_attr =
- __ATTR(defrag, 0644, khugepaged_defrag_show,
- khugepaged_defrag_store);
+ __ATTR_RW(defrag);
/*
* max_ptes_none controls if khugepaged should collapse hugepages over
@@ -246,21 +242,21 @@ static struct kobj_attribute khugepaged_defrag_attr =
* runs. Increasing max_ptes_none will instead potentially reduce the
* free memory in the system during the khugepaged scan.
*/
-static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+static ssize_t max_ptes_none_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
{
return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
}
-static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t max_ptes_none_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int err;
unsigned long max_ptes_none;
err = kstrtoul(buf, 10, &max_ptes_none);
- if (err || max_ptes_none > HPAGE_PMD_NR-1)
+ if (err || max_ptes_none > HPAGE_PMD_NR - 1)
return -EINVAL;
khugepaged_max_ptes_none = max_ptes_none;
@@ -268,25 +264,24 @@ static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
return count;
}
static struct kobj_attribute khugepaged_max_ptes_none_attr =
- __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
- khugepaged_max_ptes_none_store);
+ __ATTR_RW(max_ptes_none);
-static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+static ssize_t max_ptes_swap_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
{
return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
}
-static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t max_ptes_swap_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int err;
unsigned long max_ptes_swap;
err = kstrtoul(buf, 10, &max_ptes_swap);
- if (err || max_ptes_swap > HPAGE_PMD_NR-1)
+ if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
return -EINVAL;
khugepaged_max_ptes_swap = max_ptes_swap;
@@ -295,25 +290,24 @@ static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
}
static struct kobj_attribute khugepaged_max_ptes_swap_attr =
- __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
- khugepaged_max_ptes_swap_store);
+ __ATTR_RW(max_ptes_swap);
-static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+static ssize_t max_ptes_shared_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
{
return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
}
-static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t max_ptes_shared_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int err;
unsigned long max_ptes_shared;
err = kstrtoul(buf, 10, &max_ptes_shared);
- if (err || max_ptes_shared > HPAGE_PMD_NR-1)
+ if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
return -EINVAL;
khugepaged_max_ptes_shared = max_ptes_shared;
@@ -322,8 +316,7 @@ static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
}
static struct kobj_attribute khugepaged_max_ptes_shared_attr =
- __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
- khugepaged_max_ptes_shared_store);
+ __ATTR_RW(max_ptes_shared);
static struct attribute *khugepaged_attr[] = {
&khugepaged_defrag_attr.attr,
@@ -437,43 +430,6 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
return atomic_read(&mm->mm_users) == 0;
}
-bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
- if (!transhuge_vma_enabled(vma, vm_flags))
- return false;
-
- if (vm_flags & VM_NO_KHUGEPAGED)
- return false;
-
- /* Don't run khugepaged against DAX vma */
- if (vma_is_dax(vma))
- return false;
-
- if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
- vma->vm_pgoff, HPAGE_PMD_NR))
- return false;
-
- /* Enabled via shmem mount options or sysfs settings. */
- if (shmem_file(vma->vm_file))
- return shmem_huge_enabled(vma);
-
- /* THP settings require madvise. */
- if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
- return false;
-
- /* Only regular file is valid */
- if (file_thp_enabled(vma))
- return true;
-
- if (!vma->anon_vma || !vma_is_anonymous(vma))
- return false;
- if (vma_is_temporary_stack(vma))
- return false;
-
- return true;
-}
-
void __khugepaged_enter(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
@@ -509,10 +465,8 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
- khugepaged_enabled() &&
- (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
- (vma->vm_end & HPAGE_PMD_MASK))) {
- if (hugepage_vma_check(vma, vm_flags))
+ hugepage_flags_enabled()) {
+ if (hugepage_vma_check(vma, vm_flags, false, false))
__khugepaged_enter(vma->vm_mm);
}
}
@@ -599,7 +553,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
bool writable = false;
- for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
+ for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, address += PAGE_SIZE) {
pte_t pteval = *_pte;
if (pte_none(pteval) || (pte_present(pteval) &&
@@ -618,7 +572,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
goto out;
}
page = vm_normal_page(vma, address, pteval);
- if (unlikely(!page)) {
+ if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
result = SCAN_PAGE_NULL;
goto out;
}
@@ -762,7 +716,12 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
list_del(&src_page->lru);
- release_pte_page(src_page);
+ mod_node_page_state(page_pgdat(src_page),
+ NR_ISOLATED_ANON + page_is_file_lru(src_page),
+ -compound_nr(src_page));
+ unlock_page(src_page);
+ free_swap_cache(src_page);
+ putback_lru_page(src_page);
}
}
@@ -802,6 +761,10 @@ static bool khugepaged_scan_abort(int nid)
return false;
}
+#define khugepaged_defrag() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
+
/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
{
@@ -899,7 +862,7 @@ static struct page *khugepaged_alloc_hugepage(bool *wait)
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
- } while (unlikely(!hpage) && likely(khugepaged_enabled()));
+ } while (unlikely(!hpage) && likely(hugepage_flags_enabled()));
return hpage;
}
@@ -947,7 +910,6 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
struct vm_area_struct **vmap)
{
struct vm_area_struct *vma;
- unsigned long hstart, hend;
if (unlikely(khugepaged_test_exit(mm)))
return SCAN_ANY_PROCESS;
@@ -956,13 +918,17 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!vma)
return SCAN_VMA_NULL;
- hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
- hend = vma->vm_end & HPAGE_PMD_MASK;
- if (address < hstart || address + HPAGE_PMD_SIZE > hend)
+ if (!transhuge_vma_suitable(vma, address))
return SCAN_ADDRESS_RANGE;
- if (!hugepage_vma_check(vma, vma->vm_flags))
+ if (!hugepage_vma_check(vma, vma->vm_flags, false, false))
return SCAN_VMA_CHECK;
- /* Anon VMA expected */
+ /*
+ * Anon VMA expected, the address may be unmapped then
+ * remapped to file after khugepaged reaquired the mmap_lock.
+ *
+ * hugepage_vma_check may return true for qualified file
+ * vmas.
+ */
if (!vma->anon_vma || !vma_is_anonymous(vma))
return SCAN_VMA_CHECK;
return 0;
@@ -972,8 +938,8 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
* Bring missing pages in from swap, to complete THP collapse.
* Only done if khugepaged_scan_pmd believes it is worthwhile.
*
- * Called and returns without pte mapped or spinlocks held,
- * but with mmap_lock held to protect against vma changes.
+ * Called and returns without pte mapped or spinlocks held.
+ * Note that if false is returned, mmap_lock will be released.
*/
static bool __collapse_huge_page_swapin(struct mm_struct *mm,
@@ -1000,27 +966,24 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
pte_unmap(vmf.pte);
continue;
}
- swapped_in++;
ret = do_swap_page(&vmf);
- /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
+ /*
+ * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
+ * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
+ * we do not retry here and swap entry will remain in pagetable
+ * resulting in later failure.
+ */
if (ret & VM_FAULT_RETRY) {
- mmap_read_lock(mm);
- if (hugepage_vma_revalidate(mm, haddr, &vma)) {
- /* vma is no longer available, don't continue to swapin */
- trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
- return false;
- }
- /* check if the pmd is still valid */
- if (mm_find_pmd(mm, haddr) != pmd) {
- trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
- return false;
- }
+ trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
+ return false;
}
if (ret & VM_FAULT_ERROR) {
+ mmap_read_unlock(mm);
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false;
}
+ swapped_in++;
}
/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
@@ -1086,13 +1049,12 @@ static void collapse_huge_page(struct mm_struct *mm,
}
/*
- * __collapse_huge_page_swapin always returns with mmap_lock locked.
- * If it fails, we release mmap_lock and jump out_nolock.
+ * __collapse_huge_page_swapin will return with mmap_lock released
+ * when it fails. So we jump out_nolock directly in that case.
* Continuing to collapse causes inconsistency.
*/
if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
pmd, referenced)) {
- mmap_read_unlock(mm);
goto out_nolock;
}
@@ -1219,7 +1181,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
- for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
+ for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
pte_t pteval = *_pte;
if (is_swap_pte(pteval)) {
@@ -1267,7 +1229,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
writable = true;
page = vm_normal_page(vma, _address, pteval);
- if (unlikely(!page)) {
+ if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
result = SCAN_PAGE_NULL;
goto out_unmap;
}
@@ -1309,7 +1271,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
/*
* Check if the page has any GUP (or other external) pins.
*
- * Here the check is racy it may see totmal_mapcount > refcount
+ * Here the check is racy it may see total_mapcount > refcount
* in some cases.
* For example, one process with one forked child process.
* The parent has the PMD split due to MADV_DONTNEED, then
@@ -1382,8 +1344,8 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
* Notify khugepaged that given addr of the mm is pte-mapped THP. Then
* khugepaged should try to collapse the page table.
*/
-static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
+static void khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr)
{
struct mm_slot *mm_slot;
@@ -1394,7 +1356,6 @@ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
spin_unlock(&khugepaged_mm_lock);
- return 0;
}
static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -1444,7 +1405,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
* the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
* will not fail the vma for missing VM_HUGEPAGE
*/
- if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
+ if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false, false))
return;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -1479,7 +1440,8 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
goto abort;
page = vm_normal_page(vma, addr, *pte);
-
+ if (WARN_ON_ONCE(page && is_zone_device_page(page)))
+ page = NULL;
/*
* Note that uprobe, debugger, or MAP_PRIVATE may change the
* page table, but the new page will not be a subpage of hpage.
@@ -1497,6 +1459,8 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
if (pte_none(*pte))
continue;
page = vm_normal_page(vma, addr, *pte);
+ if (WARN_ON_ONCE(page && is_zone_device_page(page)))
+ goto abort;
page_remove_rmap(page, vma, false);
}
@@ -1557,7 +1521,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* mmap_write_lock(mm) as PMD-mapping is likely to be split
* later.
*
- * Not that vma->anon_vma check is racy: it can be set up after
+ * Note that vma->anon_vma check is racy: it can be set up after
* the check but before we took mmap_lock by the fault path.
* But page lock would prevent establishing any new ptes of the
* page, so we are safe.
@@ -1885,8 +1849,8 @@ out_unlock:
if (nr_none) {
__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
- if (is_shmem)
- __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
+ /* nr_none is always 0 for non-shmem. */
+ __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
}
/* Join all the small entries into a single multi-index entry */
@@ -1950,10 +1914,10 @@ xa_unlocked:
/* Something went wrong: roll back page cache changes */
xas_lock_irq(&xas);
- mapping->nrpages -= nr_none;
-
- if (is_shmem)
+ if (nr_none) {
+ mapping->nrpages -= nr_none;
shmem_uncharge(mapping->host, nr_none);
+ }
xas_set(&xas, start);
xas_for_each(&xas, page, end - 1) {
@@ -2131,22 +2095,18 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
progress++;
break;
}
- if (!hugepage_vma_check(vma, vma->vm_flags)) {
+ if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) {
skip:
progress++;
continue;
}
- hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
- hend = vma->vm_end & HPAGE_PMD_MASK;
- if (hstart >= hend)
- goto skip;
+ hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
+ hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
if (khugepaged_scan.address > hend)
goto skip;
if (khugepaged_scan.address < hstart)
khugepaged_scan.address = hstart;
VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
- if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
- goto skip;
while (khugepaged_scan.address < hend) {
int ret;
@@ -2216,7 +2176,7 @@ breakouterloop_mmap_lock:
static int khugepaged_has_work(void)
{
return !list_empty(&khugepaged_scan.mm_head) &&
- khugepaged_enabled();
+ hugepage_flags_enabled();
}
static int khugepaged_wait_event(void)
@@ -2281,7 +2241,7 @@ static void khugepaged_wait_work(void)
return;
}
- if (khugepaged_enabled())
+ if (hugepage_flags_enabled())
wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
}
@@ -2312,7 +2272,7 @@ static void set_recommended_min_free_kbytes(void)
int nr_zones = 0;
unsigned long recommended_min;
- if (!khugepaged_enabled()) {
+ if (!hugepage_flags_enabled()) {
calculate_min_free_kbytes();
goto update_wmarks;
}
@@ -2362,7 +2322,7 @@ int start_stop_khugepaged(void)
int err = 0;
mutex_lock(&khugepaged_mutex);
- if (khugepaged_enabled()) {
+ if (hugepage_flags_enabled()) {
if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged");
@@ -2388,7 +2348,7 @@ fail:
void khugepaged_min_free_kbytes_update(void)
{
mutex_lock(&khugepaged_mutex);
- if (khugepaged_enabled() && khugepaged_thread)
+ if (hugepage_flags_enabled() && khugepaged_thread)
set_recommended_min_free_kbytes();
mutex_unlock(&khugepaged_mutex);
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index a182f5ddaf68..1eddc0132f7f 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -14,14 +14,16 @@
* The following locks and mutexes are used by kmemleak:
*
* - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
- * accesses to the object_tree_root. The object_list is the main list
- * holding the metadata (struct kmemleak_object) for the allocated memory
- * blocks. The object_tree_root is a red black tree used to look-up
- * metadata based on a pointer to the corresponding memory block. The
- * kmemleak_object structures are added to the object_list and
- * object_tree_root in the create_object() function called from the
- * kmemleak_alloc() callback and removed in delete_object() called from the
- * kmemleak_free() callback
+ * accesses to the object_tree_root (or object_phys_tree_root). The
+ * object_list is the main list holding the metadata (struct kmemleak_object)
+ * for the allocated memory blocks. The object_tree_root and object_phys_tree_root
+ * are red black trees used to look-up metadata based on a pointer to the
+ * corresponding memory block. The object_phys_tree_root is for objects
+ * allocated with physical address. The kmemleak_object structures are
+ * added to the object_list and object_tree_root (or object_phys_tree_root)
+ * in the create_object() function called from the kmemleak_alloc() (or
+ * kmemleak_alloc_phys()) callback and removed in delete_object() called from
+ * the kmemleak_free() callback
* - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
* Accesses to the metadata (e.g. count) are protected by this lock. Note
* that some members of this structure may be protected by other means
@@ -172,6 +174,8 @@ struct kmemleak_object {
#define OBJECT_NO_SCAN (1 << 2)
/* flag set to fully scan the object when scan_area allocation failed */
#define OBJECT_FULL_SCAN (1 << 3)
+/* flag set for object allocated with physical address */
+#define OBJECT_PHYS (1 << 4)
#define HEX_PREFIX " "
/* number of bytes to print per line; must be 16 or 32 */
@@ -193,7 +197,9 @@ static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
static LIST_HEAD(mem_pool_free_list);
/* search tree for object boundaries */
static struct rb_root object_tree_root = RB_ROOT;
-/* protecting the access to object_list and object_tree_root */
+/* search tree for object (with OBJECT_PHYS flag) boundaries */
+static struct rb_root object_phys_tree_root = RB_ROOT;
+/* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
static DEFINE_RAW_SPINLOCK(kmemleak_lock);
/* allocation caches for kmemleak internal data */
@@ -285,6 +291,9 @@ static void hex_dump_object(struct seq_file *seq,
const u8 *ptr = (const u8 *)object->pointer;
size_t len;
+ if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
+ return;
+
/* limit the number of lines to HEX_MAX_LINES */
len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
@@ -378,9 +387,11 @@ static void dump_object_info(struct kmemleak_object *object)
* beginning of the memory block are allowed. The kmemleak_lock must be held
* when calling this function.
*/
-static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
+static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
+ bool is_phys)
{
- struct rb_node *rb = object_tree_root.rb_node;
+ struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node :
+ object_tree_root.rb_node;
unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
while (rb) {
@@ -406,6 +417,12 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
return NULL;
}
+/* Look-up a kmemleak object which allocated with virtual address. */
+static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
+{
+ return __lookup_object(ptr, alias, false);
+}
+
/*
* Increment the object use_count. Return 1 if successful or 0 otherwise. Note
* that once an object's use_count reached 0, the RCU freeing was already
@@ -515,14 +532,15 @@ static void put_object(struct kmemleak_object *object)
/*
* Look up an object in the object search tree and increase its use_count.
*/
-static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
+static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
+ bool is_phys)
{
unsigned long flags;
struct kmemleak_object *object;
rcu_read_lock();
raw_spin_lock_irqsave(&kmemleak_lock, flags);
- object = lookup_object(ptr, alias);
+ object = __lookup_object(ptr, alias, is_phys);
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
/* check whether the object is still available */
@@ -533,28 +551,39 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
return object;
}
+/* Look up and get an object which allocated with virtual address. */
+static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
+{
+ return __find_and_get_object(ptr, alias, false);
+}
+
/*
- * Remove an object from the object_tree_root and object_list. Must be called
- * with the kmemleak_lock held _if_ kmemleak is still enabled.
+ * Remove an object from the object_tree_root (or object_phys_tree_root)
+ * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak
+ * is still enabled.
*/
static void __remove_object(struct kmemleak_object *object)
{
- rb_erase(&object->rb_node, &object_tree_root);
+ rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ?
+ &object_phys_tree_root :
+ &object_tree_root);
list_del_rcu(&object->object_list);
}
/*
* Look up an object in the object search tree and remove it from both
- * object_tree_root and object_list. The returned object's use_count should be
- * at least 1, as initially set by create_object().
+ * object_tree_root (or object_phys_tree_root) and object_list. The
+ * returned object's use_count should be at least 1, as initially set
+ * by create_object().
*/
-static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
+static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
+ bool is_phys)
{
unsigned long flags;
struct kmemleak_object *object;
raw_spin_lock_irqsave(&kmemleak_lock, flags);
- object = lookup_object(ptr, alias);
+ object = __lookup_object(ptr, alias, is_phys);
if (object)
__remove_object(object);
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
@@ -572,10 +601,12 @@ static int __save_stack_trace(unsigned long *trace)
/*
* Create the metadata (struct kmemleak_object) corresponding to an allocated
- * memory block and add it to the object_list and object_tree_root.
+ * memory block and add it to the object_list and object_tree_root (or
+ * object_phys_tree_root).
*/
-static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
- int min_count, gfp_t gfp)
+static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
+ int min_count, gfp_t gfp,
+ bool is_phys)
{
unsigned long flags;
struct kmemleak_object *object, *parent;
@@ -595,7 +626,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
INIT_HLIST_HEAD(&object->area_list);
raw_spin_lock_init(&object->lock);
atomic_set(&object->use_count, 1);
- object->flags = OBJECT_ALLOCATED;
+ object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
object->pointer = ptr;
object->size = kfence_ksize((void *)ptr) ?: size;
object->excess_ref = 0;
@@ -628,9 +659,16 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
raw_spin_lock_irqsave(&kmemleak_lock, flags);
untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
- min_addr = min(min_addr, untagged_ptr);
- max_addr = max(max_addr, untagged_ptr + size);
- link = &object_tree_root.rb_node;
+ /*
+ * Only update min_addr and max_addr with object
+ * storing virtual address.
+ */
+ if (!is_phys) {
+ min_addr = min(min_addr, untagged_ptr);
+ max_addr = max(max_addr, untagged_ptr + size);
+ }
+ link = is_phys ? &object_phys_tree_root.rb_node :
+ &object_tree_root.rb_node;
rb_parent = NULL;
while (*link) {
rb_parent = *link;
@@ -654,7 +692,8 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
}
}
rb_link_node(&object->rb_node, rb_parent, link);
- rb_insert_color(&object->rb_node, &object_tree_root);
+ rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
+ &object_tree_root);
list_add_tail_rcu(&object->object_list, &object_list);
out:
@@ -662,6 +701,20 @@ out:
return object;
}
+/* Create kmemleak object which allocated with virtual address. */
+static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
+ int min_count, gfp_t gfp)
+{
+ return __create_object(ptr, size, min_count, gfp, false);
+}
+
+/* Create kmemleak object which allocated with physical address. */
+static struct kmemleak_object *create_object_phys(unsigned long ptr, size_t size,
+ int min_count, gfp_t gfp)
+{
+ return __create_object(ptr, size, min_count, gfp, true);
+}
+
/*
* Mark the object as not allocated and schedule RCU freeing via put_object().
*/
@@ -690,7 +743,7 @@ static void delete_object_full(unsigned long ptr)
{
struct kmemleak_object *object;
- object = find_and_remove_object(ptr, 0);
+ object = find_and_remove_object(ptr, 0, false);
if (!object) {
#ifdef DEBUG
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
@@ -706,12 +759,12 @@ static void delete_object_full(unsigned long ptr)
* delete it. If the memory block is partially freed, the function may create
* additional metadata for the remaining parts of the block.
*/
-static void delete_object_part(unsigned long ptr, size_t size)
+static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
{
struct kmemleak_object *object;
unsigned long start, end;
- object = find_and_remove_object(ptr, 1);
+ object = find_and_remove_object(ptr, 1, is_phys);
if (!object) {
#ifdef DEBUG
kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
@@ -728,11 +781,11 @@ static void delete_object_part(unsigned long ptr, size_t size)
start = object->pointer;
end = object->pointer + object->size;
if (ptr > start)
- create_object(start, ptr - start, object->min_count,
- GFP_KERNEL);
+ __create_object(start, ptr - start, object->min_count,
+ GFP_KERNEL, is_phys);
if (ptr + size < end)
- create_object(ptr + size, end - ptr - size, object->min_count,
- GFP_KERNEL);
+ __create_object(ptr + size, end - ptr - size, object->min_count,
+ GFP_KERNEL, is_phys);
__delete_object(object);
}
@@ -753,11 +806,11 @@ static void paint_it(struct kmemleak_object *object, int color)
raw_spin_unlock_irqrestore(&object->lock, flags);
}
-static void paint_ptr(unsigned long ptr, int color)
+static void paint_ptr(unsigned long ptr, int color, bool is_phys)
{
struct kmemleak_object *object;
- object = find_and_get_object(ptr, 0);
+ object = __find_and_get_object(ptr, 0, is_phys);
if (!object) {
kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
ptr,
@@ -775,16 +828,16 @@ static void paint_ptr(unsigned long ptr, int color)
*/
static void make_gray_object(unsigned long ptr)
{
- paint_ptr(ptr, KMEMLEAK_GREY);
+ paint_ptr(ptr, KMEMLEAK_GREY, false);
}
/*
* Mark the object as black-colored so that it is ignored from scans and
* reporting.
*/
-static void make_black_object(unsigned long ptr)
+static void make_black_object(unsigned long ptr, bool is_phys)
{
- paint_ptr(ptr, KMEMLEAK_BLACK);
+ paint_ptr(ptr, KMEMLEAK_BLACK, is_phys);
}
/*
@@ -990,7 +1043,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
pr_debug("%s(0x%p)\n", __func__, ptr);
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- delete_object_part((unsigned long)ptr, size);
+ delete_object_part((unsigned long)ptr, size, false);
}
EXPORT_SYMBOL_GPL(kmemleak_free_part);
@@ -1078,7 +1131,7 @@ void __ref kmemleak_ignore(const void *ptr)
pr_debug("%s(0x%p)\n", __func__, ptr);
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- make_black_object((unsigned long)ptr);
+ make_black_object((unsigned long)ptr, false);
}
EXPORT_SYMBOL(kmemleak_ignore);
@@ -1125,15 +1178,18 @@ EXPORT_SYMBOL(kmemleak_no_scan);
* address argument
* @phys: physical address of the object
* @size: size of the object
- * @min_count: minimum number of references to this object.
- * See kmemleak_alloc()
* @gfp: kmalloc() flags used for kmemleak internal memory allocations
*/
-void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
- gfp_t gfp)
+void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
{
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
- kmemleak_alloc(__va(phys), size, min_count, gfp);
+ pr_debug("%s(0x%pa, %zu)\n", __func__, &phys, size);
+
+ if (kmemleak_enabled)
+ /*
+ * Create object with OBJECT_PHYS flag and
+ * assume min_count 0.
+ */
+ create_object_phys((unsigned long)phys, size, 0, gfp);
}
EXPORT_SYMBOL(kmemleak_alloc_phys);
@@ -1146,22 +1202,12 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
*/
void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
- kmemleak_free_part(__va(phys), size);
-}
-EXPORT_SYMBOL(kmemleak_free_part_phys);
+ pr_debug("%s(0x%pa)\n", __func__, &phys);
-/**
- * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
- * address argument
- * @phys: physical address of the object
- */
-void __ref kmemleak_not_leak_phys(phys_addr_t phys)
-{
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
- kmemleak_not_leak(__va(phys));
+ if (kmemleak_enabled)
+ delete_object_part((unsigned long)phys, size, true);
}
-EXPORT_SYMBOL(kmemleak_not_leak_phys);
+EXPORT_SYMBOL(kmemleak_free_part_phys);
/**
* kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
@@ -1170,8 +1216,10 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
*/
void __ref kmemleak_ignore_phys(phys_addr_t phys)
{
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
- kmemleak_ignore(__va(phys));
+ pr_debug("%s(0x%pa)\n", __func__, &phys);
+
+ if (kmemleak_enabled)
+ make_black_object((unsigned long)phys, true);
}
EXPORT_SYMBOL(kmemleak_ignore_phys);
@@ -1182,6 +1230,9 @@ static bool update_checksum(struct kmemleak_object *object)
{
u32 old_csum = object->checksum;
+ if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
+ return false;
+
kasan_disable_current();
kcsan_disable_current();
object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
@@ -1335,6 +1386,7 @@ static void scan_object(struct kmemleak_object *object)
{
struct kmemleak_scan_area *area;
unsigned long flags;
+ void *obj_ptr;
/*
* Once the object->lock is acquired, the corresponding memory block
@@ -1346,10 +1398,15 @@ static void scan_object(struct kmemleak_object *object)
if (!(object->flags & OBJECT_ALLOCATED))
/* already freed object */
goto out;
+
+ obj_ptr = object->flags & OBJECT_PHYS ?
+ __va((phys_addr_t)object->pointer) :
+ (void *)object->pointer;
+
if (hlist_empty(&object->area_list) ||
object->flags & OBJECT_FULL_SCAN) {
- void *start = (void *)object->pointer;
- void *end = (void *)(object->pointer + object->size);
+ void *start = obj_ptr;
+ void *end = obj_ptr + object->size;
void *next;
do {
@@ -1413,18 +1470,21 @@ static void scan_gray_list(void)
*/
static void kmemleak_scan(void)
{
- unsigned long flags;
struct kmemleak_object *object;
struct zone *zone;
int __maybe_unused i;
int new_leaks = 0;
+ int loop1_cnt = 0;
jiffies_last_scan = jiffies;
/* prepare the kmemleak_object's */
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
- raw_spin_lock_irqsave(&object->lock, flags);
+ bool obj_pinned = false;
+
+ loop1_cnt++;
+ raw_spin_lock_irq(&object->lock);
#ifdef DEBUG
/*
* With a few exceptions there should be a maximum of
@@ -1436,12 +1496,45 @@ static void kmemleak_scan(void)
dump_object_info(object);
}
#endif
+
+ /* ignore objects outside lowmem (paint them black) */
+ if ((object->flags & OBJECT_PHYS) &&
+ !(object->flags & OBJECT_NO_SCAN)) {
+ unsigned long phys = object->pointer;
+
+ if (PHYS_PFN(phys) < min_low_pfn ||
+ PHYS_PFN(phys + object->size) >= max_low_pfn)
+ __paint_it(object, KMEMLEAK_BLACK);
+ }
+
/* reset the reference count (whiten the object) */
object->count = 0;
- if (color_gray(object) && get_object(object))
+ if (color_gray(object) && get_object(object)) {
list_add_tail(&object->gray_list, &gray_list);
+ obj_pinned = true;
+ }
- raw_spin_unlock_irqrestore(&object->lock, flags);
+ raw_spin_unlock_irq(&object->lock);
+
+ /*
+ * Do a cond_resched() to avoid soft lockup every 64k objects.
+ * Make sure a reference has been taken so that the object
+ * won't go away without RCU read lock.
+ */
+ if (!(loop1_cnt & 0xffff)) {
+ if (!obj_pinned && !get_object(object)) {
+ /* Try the next object instead */
+ loop1_cnt--;
+ continue;
+ }
+
+ rcu_read_unlock();
+ cond_resched();
+ rcu_read_lock();
+
+ if (!obj_pinned)
+ put_object(object);
+ }
}
rcu_read_unlock();
@@ -1509,14 +1602,21 @@ static void kmemleak_scan(void)
*/
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
- raw_spin_lock_irqsave(&object->lock, flags);
+ /*
+ * This is racy but we can save the overhead of lock/unlock
+ * calls. The missed objects, if any, should be caught in
+ * the next scan.
+ */
+ if (!color_white(object))
+ continue;
+ raw_spin_lock_irq(&object->lock);
if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
&& update_checksum(object) && get_object(object)) {
/* color it gray temporarily */
object->count = object->min_count;
list_add_tail(&object->gray_list, &gray_list);
}
- raw_spin_unlock_irqrestore(&object->lock, flags);
+ raw_spin_unlock_irq(&object->lock);
}
rcu_read_unlock();
@@ -1536,7 +1636,14 @@ static void kmemleak_scan(void)
*/
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
- raw_spin_lock_irqsave(&object->lock, flags);
+ /*
+ * This is racy but we can save the overhead of lock/unlock
+ * calls. The missed objects, if any, should be caught in
+ * the next scan.
+ */
+ if (!color_white(object))
+ continue;
+ raw_spin_lock_irq(&object->lock);
if (unreferenced_object(object) &&
!(object->flags & OBJECT_REPORTED)) {
object->flags |= OBJECT_REPORTED;
@@ -1546,7 +1653,7 @@ static void kmemleak_scan(void)
new_leaks++;
}
- raw_spin_unlock_irqrestore(&object->lock, flags);
+ raw_spin_unlock_irq(&object->lock);
}
rcu_read_unlock();
@@ -1748,15 +1855,14 @@ static int dump_str_object_info(const char *str)
static void kmemleak_clear(void)
{
struct kmemleak_object *object;
- unsigned long flags;
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
- raw_spin_lock_irqsave(&object->lock, flags);
+ raw_spin_lock_irq(&object->lock);
if ((object->flags & OBJECT_REPORTED) &&
unreferenced_object(object))
__paint_it(object, KMEMLEAK_GREY);
- raw_spin_unlock_irqrestore(&object->lock, flags);
+ raw_spin_unlock_irq(&object->lock);
}
rcu_read_unlock();
diff --git a/mm/ksm.c b/mm/ksm.c
index e8f8c1a2bb39..42ab153335a2 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -475,7 +475,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
cond_resched();
page = follow_page(vma, addr,
FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
- if (IS_ERR_OR_NULL(page))
+ if (IS_ERR_OR_NULL(page) || is_zone_device_page(page))
break;
if (PageKsm(page))
ret = handle_mm_fault(vma, addr,
@@ -560,7 +560,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
goto out;
page = follow_page(vma, addr, FOLL_GET);
- if (IS_ERR_OR_NULL(page))
+ if (IS_ERR_OR_NULL(page) || is_zone_device_page(page))
goto out;
if (PageAnon(page)) {
flush_anon_page(vma, page, addr);
@@ -1083,7 +1083,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
* No need to notify as we are downgrading page table to read
* only not changing it to point to a new page.
*
- * See Documentation/vm/mmu_notifier.rst
+ * See Documentation/mm/mmu_notifier.rst
*/
entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
/*
@@ -1186,7 +1186,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
* No need to notify as we are replacing a read only page with another
* read only page with the same content.
*
- * See Documentation/vm/mmu_notifier.rst
+ * See Documentation/mm/mmu_notifier.rst
*/
ptep_clear_flush(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, newpte);
@@ -2308,7 +2308,7 @@ next_mm:
if (ksm_test_exit(mm))
break;
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
- if (IS_ERR_OR_NULL(*page)) {
+ if (IS_ERR_OR_NULL(*page) || is_zone_device_page(*page)) {
ksm_scan.address += PAGE_SIZE;
cond_resched();
continue;
diff --git a/mm/list_lru.c b/mm/list_lru.c
index ba76428ceece..a05e5bef3b40 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -71,7 +71,7 @@ list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
if (!list_lru_memcg_aware(lru))
goto out;
- memcg = mem_cgroup_from_obj(ptr);
+ memcg = mem_cgroup_from_slab_obj(ptr);
if (!memcg)
goto out;
diff --git a/mm/madvise.c b/mm/madvise.c
index 0316bbc6441b..5f0f0948a50e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -195,7 +195,6 @@ success:
static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
unsigned long end, struct mm_walk *walk)
{
- pte_t *orig_pte;
struct vm_area_struct *vma = walk->private;
unsigned long index;
struct swap_iocb *splug = NULL;
@@ -208,12 +207,13 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
swp_entry_t entry;
struct page *page;
spinlock_t *ptl;
+ pte_t *ptep;
- orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
- pte = *(orig_pte + ((index - start) / PAGE_SIZE));
- pte_unmap_unlock(orig_pte, ptl);
+ ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl);
+ pte = *ptep;
+ pte_unmap_unlock(ptep, ptl);
- if (pte_present(pte) || pte_none(pte))
+ if (!is_swap_pte(pte))
continue;
entry = pte_to_swp_entry(pte);
if (unlikely(non_swap_entry(entry)))
@@ -421,7 +421,7 @@ regular_page:
continue;
page = vm_normal_page(vma, addr, ptent);
- if (!page)
+ if (!page || is_zone_device_page(page))
continue;
/*
@@ -639,7 +639,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
}
page = vm_normal_page(vma, addr, ptent);
- if (!page)
+ if (!page || is_zone_device_page(page))
continue;
/*
diff --git a/mm/memblock.c b/mm/memblock.c
index a9f18b988b7f..b5d3026979fc 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -29,6 +29,10 @@
# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
#endif
+#ifndef INIT_MEMBLOCK_MEMORY_REGIONS
+#define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS
+#endif
+
/**
* DOC: memblock overview
*
@@ -55,9 +59,9 @@
* the allocator metadata. The "memory" and "reserved" types are nicely
* wrapped with struct memblock. This structure is statically
* initialized at build time. The region arrays are initially sized to
- * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
- * for "reserved". The region array for "physmem" is initially sized to
- * %INIT_PHYSMEM_REGIONS.
+ * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and
+ * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array
+ * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS.
* The memblock_allow_resize() enables automatic resizing of the region
* arrays during addition of new regions. This feature should be used
* with care so that memory allocated for the region array will not
@@ -102,7 +106,7 @@ unsigned long min_low_pfn;
unsigned long max_pfn;
unsigned long long max_possible_pfn;
-static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
@@ -111,7 +115,7 @@ static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS
struct memblock memblock __initdata_memblock = {
.memory.regions = memblock_memory_init_regions,
.memory.cnt = 1, /* empty dummy entry */
- .memory.max = INIT_MEMBLOCK_REGIONS,
+ .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS,
.memory.name = "memory",
.reserved.regions = memblock_reserved_init_regions,
@@ -593,6 +597,17 @@ static int __init_memblock memblock_add_range(struct memblock_type *type,
type->total_size = size;
return 0;
}
+
+ /*
+ * The worst case is when new range overlaps all existing regions,
+ * then we'll need type->cnt + 1 empty regions in @type. So if
+ * type->cnt * 2 + 1 is less than type->max, we know
+ * that there is enough empty regions in @type, and we can insert
+ * regions directly.
+ */
+ if (type->cnt * 2 + 1 < type->max)
+ insert = true;
+
repeat:
/*
* The following is executed twice. Once with %false @insert and
@@ -1348,8 +1363,8 @@ __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
* from the regions with mirroring enabled and then retried from any
* memory region.
*
- * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
- * allocated boot memory block, so that it is never reported as leaks.
+ * In addition, function using kmemleak_alloc_phys for allocated boot
+ * memory block, it is never reported as leaks.
*
* Return:
* Physical address of allocated memory block on success, %0 on failure.
@@ -1401,12 +1416,12 @@ done:
*/
if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
/*
- * The min_count is set to 0 so that memblock allocated
- * blocks are never reported as leaks. This is because many
- * of these blocks are only referred via the physical
- * address which is not looked up by kmemleak.
+ * Memblock allocated blocks are never reported as
+ * leaks. This is because many of these blocks are
+ * only referred via the physical address which is
+ * not looked up by kmemleak.
*/
- kmemleak_alloc_phys(found, size, 0, 0);
+ kmemleak_alloc_phys(found, size, 0);
return found;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 618c366a2f07..b69979c9ced5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -626,7 +626,14 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
x = __this_cpu_add_return(stats_updates, abs(val));
if (x > MEMCG_CHARGE_BATCH) {
- atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
+ /*
+ * If stats_flush_threshold exceeds the threshold
+ * (>num_online_cpus()), cgroup stats update will be triggered
+ * in __mem_cgroup_flush_stats(). Increasing this var further
+ * is redundant and simply adds overhead in atomic update.
+ */
+ if (atomic_read(&stats_flush_threshold) <= num_online_cpus())
+ atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
__this_cpu_write(stats_updates, 0);
}
}
@@ -783,7 +790,7 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
struct lruvec *lruvec;
rcu_read_lock();
- memcg = mem_cgroup_from_obj(p);
+ memcg = mem_cgroup_from_slab_obj(p);
/*
* Untracked pages have no memcg, no lruvec. Update only the
@@ -1460,14 +1467,35 @@ static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
}
-static char *memory_stat_format(struct mem_cgroup *memcg)
+/* Subset of vm_event_item to report for memcg event stats */
+static const unsigned int memcg_vm_event_stat[] = {
+ PGSCAN_KSWAPD,
+ PGSCAN_DIRECT,
+ PGSTEAL_KSWAPD,
+ PGSTEAL_DIRECT,
+ PGFAULT,
+ PGMAJFAULT,
+ PGREFILL,
+ PGACTIVATE,
+ PGDEACTIVATE,
+ PGLAZYFREE,
+ PGLAZYFREED,
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+ ZSWPIN,
+ ZSWPOUT,
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ THP_FAULT_ALLOC,
+ THP_COLLAPSE_ALLOC,
+#endif
+};
+
+static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
{
struct seq_buf s;
int i;
- seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
- if (!s.buffer)
- return NULL;
+ seq_buf_init(&s, buf, bufsize);
/*
* Provide statistics on the state of the memory subsystem as
@@ -1495,46 +1523,20 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
}
/* Accumulated memory events */
-
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
- memcg_events(memcg, PGFAULT));
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
- memcg_events(memcg, PGMAJFAULT));
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL),
- memcg_events(memcg, PGREFILL));
seq_buf_printf(&s, "pgscan %lu\n",
memcg_events(memcg, PGSCAN_KSWAPD) +
memcg_events(memcg, PGSCAN_DIRECT));
seq_buf_printf(&s, "pgsteal %lu\n",
memcg_events(memcg, PGSTEAL_KSWAPD) +
memcg_events(memcg, PGSTEAL_DIRECT));
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
- memcg_events(memcg, PGACTIVATE));
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
- memcg_events(memcg, PGDEACTIVATE));
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
- memcg_events(memcg, PGLAZYFREE));
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
- memcg_events(memcg, PGLAZYFREED));
-
-#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(ZSWPIN),
- memcg_events(memcg, ZSWPIN));
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(ZSWPOUT),
- memcg_events(memcg, ZSWPOUT));
-#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
- memcg_events(memcg, THP_FAULT_ALLOC));
- seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
- memcg_events(memcg, THP_COLLAPSE_ALLOC));
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++)
+ seq_buf_printf(&s, "%s %lu\n",
+ vm_event_name(memcg_vm_event_stat[i]),
+ memcg_events(memcg, memcg_vm_event_stat[i]));
/* The above should easily fit into one page */
WARN_ON_ONCE(seq_buf_has_overflowed(&s));
-
- return s.buffer;
}
#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -1570,7 +1572,10 @@ void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *
*/
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
- char *buf;
+ /* Use static buffer, for the caller is holding oom_lock. */
+ static char buf[PAGE_SIZE];
+
+ lockdep_assert_held(&oom_lock);
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)),
@@ -1591,11 +1596,8 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
pr_info("Memory cgroup stats for ");
pr_cont_cgroup_path(memcg->css.cgroup);
pr_cont(":");
- buf = memory_stat_format(memcg);
- if (!buf)
- return;
+ memory_stat_format(memcg, buf, sizeof(buf));
pr_info("%s", buf);
- kfree(buf);
}
/*
@@ -2331,7 +2333,8 @@ static unsigned long reclaim_high(struct mem_cgroup *memcg,
psi_memstall_enter(&pflags);
nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
- gfp_mask, true);
+ gfp_mask,
+ MEMCG_RECLAIM_MAY_SWAP);
psi_memstall_leave(&pflags);
} while ((memcg = parent_mem_cgroup(memcg)) &&
!mem_cgroup_is_root(memcg));
@@ -2576,8 +2579,9 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
struct page_counter *counter;
unsigned long nr_reclaimed;
bool passed_oom = false;
- bool may_swap = true;
+ unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
bool drained = false;
+ bool raised_max_event = false;
unsigned long pflags;
retry:
@@ -2593,7 +2597,7 @@ retry:
mem_over_limit = mem_cgroup_from_counter(counter, memory);
} else {
mem_over_limit = mem_cgroup_from_counter(counter, memsw);
- may_swap = false;
+ reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
}
if (batch > nr_pages) {
@@ -2617,10 +2621,11 @@ retry:
goto nomem;
memcg_memory_event(mem_over_limit, MEMCG_MAX);
+ raised_max_event = true;
psi_memstall_enter(&pflags);
nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
- gfp_mask, may_swap);
+ gfp_mask, reclaim_options);
psi_memstall_leave(&pflags);
if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
@@ -2684,6 +2689,13 @@ nomem:
return -ENOMEM;
force:
/*
+ * If the allocation has to be enforced, don't forget to raise
+ * a MEMCG_MAX event.
+ */
+ if (!raised_max_event)
+ memcg_memory_event(mem_over_limit, MEMCG_MAX);
+
+ /*
* The allocation either can't fail or will lead to more memory
* being freed very soon. Allow memory usage go over the limit
* temporarily by force charging it.
@@ -2842,27 +2854,9 @@ int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
return 0;
}
-/*
- * Returns a pointer to the memory cgroup to which the kernel object is charged.
- *
- * A passed kernel object can be a slab object or a generic kernel page, so
- * different mechanisms for getting the memory cgroup pointer should be used.
- * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
- * can not know for sure how the kernel object is implemented.
- * mem_cgroup_from_obj() can be safely used in such cases.
- *
- * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
- * cgroup_mutex, etc.
- */
-struct mem_cgroup *mem_cgroup_from_obj(void *p)
+static __always_inline
+struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
{
- struct folio *folio;
-
- if (mem_cgroup_disabled())
- return NULL;
-
- folio = virt_to_folio(p);
-
/*
* Slab objects are accounted individually, not per-page.
* Memcg membership data for each individual object is saved in
@@ -2895,6 +2889,53 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p)
return page_memcg_check(folio_page(folio, 0));
}
+/*
+ * Returns a pointer to the memory cgroup to which the kernel object is charged.
+ *
+ * A passed kernel object can be a slab object, vmalloc object or a generic
+ * kernel page, so different mechanisms for getting the memory cgroup pointer
+ * should be used.
+ *
+ * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
+ * can not know for sure how the kernel object is implemented.
+ * mem_cgroup_from_obj() can be safely used in such cases.
+ *
+ * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
+ * cgroup_mutex, etc.
+ */
+struct mem_cgroup *mem_cgroup_from_obj(void *p)
+{
+ struct folio *folio;
+
+ if (mem_cgroup_disabled())
+ return NULL;
+
+ if (unlikely(is_vmalloc_addr(p)))
+ folio = page_folio(vmalloc_to_page(p));
+ else
+ folio = virt_to_folio(p);
+
+ return mem_cgroup_from_obj_folio(folio, p);
+}
+
+/*
+ * Returns a pointer to the memory cgroup to which the kernel object is charged.
+ * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
+ * allocated using vmalloc().
+ *
+ * A passed kernel object must be a slab object or a generic kernel page.
+ *
+ * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
+ * cgroup_mutex, etc.
+ */
+struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
+{
+ if (mem_cgroup_disabled())
+ return NULL;
+
+ return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
+}
+
static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
{
struct obj_cgroup *objcg = NULL;
@@ -3402,8 +3443,8 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
continue;
}
- if (!try_to_free_mem_cgroup_pages(memcg, 1,
- GFP_KERNEL, !memsw)) {
+ if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
+ memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
ret = -EBUSY;
break;
}
@@ -3513,7 +3554,8 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
if (signal_pending(current))
return -EINTR;
- if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true))
+ if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
+ MEMCG_RECLAIM_MAY_SWAP))
nr_retries--;
}
@@ -3625,7 +3667,7 @@ static int memcg_online_kmem(struct mem_cgroup *memcg)
{
struct obj_cgroup *objcg;
- if (cgroup_memory_nokmem)
+ if (mem_cgroup_kmem_disabled())
return 0;
if (unlikely(mem_cgroup_is_root(memcg)))
@@ -3649,7 +3691,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
struct mem_cgroup *parent;
- if (cgroup_memory_nokmem)
+ if (mem_cgroup_kmem_disabled())
return;
if (unlikely(mem_cgroup_is_root(memcg)))
@@ -5060,6 +5102,29 @@ struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return idr_find(&mem_cgroup_idr, id);
}
+#ifdef CONFIG_SHRINKER_DEBUG
+struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
+{
+ struct cgroup *cgrp;
+ struct cgroup_subsys_state *css;
+ struct mem_cgroup *memcg;
+
+ cgrp = cgroup_get_from_id(ino);
+ if (!cgrp)
+ return ERR_PTR(-ENOENT);
+
+ css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
+ if (css)
+ memcg = container_of(css, struct mem_cgroup, css);
+ else
+ memcg = ERR_PTR(-ENOENT);
+
+ cgroup_put(cgrp);
+
+ return memcg;
+}
+#endif
+
static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn;
@@ -5665,8 +5730,8 @@ out:
* 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
* target for charge migration. if @target is not NULL, the entry is stored
* in target->ent.
- * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE
- * (so ZONE_DEVICE page and thus not on the lru).
+ * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is device memory and
+ * thus not on the lru.
* For now we such page is charge like a regular page would be as for all
* intent and purposes it is just special memory taking the place of a
* regular page.
@@ -5704,7 +5769,8 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
*/
if (page_memcg(page) == mc.from) {
ret = MC_TARGET_PAGE;
- if (is_device_private_page(page))
+ if (is_device_private_page(page) ||
+ is_device_coherent_page(page))
ret = MC_TARGET_DEVICE;
if (target)
target->page = page;
@@ -6241,7 +6307,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
}
reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
- GFP_KERNEL, true);
+ GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
if (!reclaimed && !nr_retries--)
break;
@@ -6290,7 +6356,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
if (nr_reclaims) {
if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
- GFP_KERNEL, true))
+ GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
nr_reclaims--;
continue;
}
@@ -6335,11 +6401,11 @@ static int memory_events_local_show(struct seq_file *m, void *v)
static int memory_stat_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
- char *buf;
+ char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- buf = memory_stat_format(memcg);
if (!buf)
return -ENOMEM;
+ memory_stat_format(memcg, buf, PAGE_SIZE);
seq_puts(m, buf);
kfree(buf);
return 0;
@@ -6419,6 +6485,7 @@ static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
unsigned int nr_retries = MAX_RECLAIM_RETRIES;
unsigned long nr_to_reclaim, nr_reclaimed = 0;
+ unsigned int reclaim_options;
int err;
buf = strstrip(buf);
@@ -6426,6 +6493,7 @@ static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
if (err)
return err;
+ reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
while (nr_reclaimed < nr_to_reclaim) {
unsigned long reclaimed;
@@ -6442,7 +6510,7 @@ static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
reclaimed = try_to_free_mem_cgroup_pages(memcg,
nr_to_reclaim - nr_reclaimed,
- GFP_KERNEL, true);
+ GFP_KERNEL, reclaim_options);
if (!reclaimed && !nr_retries--)
return -EAGAIN;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b864c2eff641..14439806b5ef 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -33,6 +33,9 @@
* are rare we hope to get away with this. This avoids impacting the core
* VM.
*/
+
+#define pr_fmt(fmt) "Memory failure: " fmt
+
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
@@ -71,7 +74,13 @@ atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
static bool hw_memory_failure __read_mostly = false;
-static bool __page_handle_poison(struct page *page)
+/*
+ * Return values:
+ * 1: the page is dissolved (if needed) and taken off from buddy,
+ * 0: the page is dissolved (if needed) and not taken off from buddy,
+ * < 0: failed to dissolve.
+ */
+static int __page_handle_poison(struct page *page)
{
int ret;
@@ -81,7 +90,7 @@ static bool __page_handle_poison(struct page *page)
ret = take_page_off_buddy(page);
zone_pcp_enable(page_zone(page));
- return ret > 0;
+ return ret;
}
static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
@@ -91,7 +100,7 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo
* Doing this check for free pages is also fine since dissolve_free_huge_page
* returns 0 for non-hugetlb pages as well.
*/
- if (!__page_handle_poison(page))
+ if (__page_handle_poison(page) <= 0)
/*
* We could fail to take off the target page from buddy
* for example due to racy page allocation, but that's
@@ -252,7 +261,7 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
short addr_lsb = tk->size_shift;
int ret = 0;
- pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
+ pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
pfn, t->comm, t->pid);
if ((flags & MF_ACTION_REQUIRED) && (t == current))
@@ -270,7 +279,7 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
addr_lsb, t); /* synchronous? */
if (ret < 0)
- pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
+ pr_info("Error sending signal to %s:%d: %d\n",
t->comm, t->pid, ret);
return ret;
}
@@ -297,10 +306,9 @@ void shake_page(struct page *p)
}
EXPORT_SYMBOL_GPL(shake_page);
-static unsigned long dev_pagemap_mapping_shift(struct page *page,
- struct vm_area_struct *vma)
+static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
+ unsigned long address)
{
- unsigned long address = vma_address(page, vma);
unsigned long ret = 0;
pgd_t *pgd;
p4d_t *p4d;
@@ -340,23 +348,33 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
/*
* Schedule a process for later kill.
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
+ *
+ * Notice: @fsdax_pgoff is used only when @p is a fsdax page.
+ * In other cases, such as anonymous and file-backend page, the address to be
+ * killed can be caculated by @p itself.
*/
static void add_to_kill(struct task_struct *tsk, struct page *p,
- struct vm_area_struct *vma,
- struct list_head *to_kill)
+ pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
+ struct list_head *to_kill)
{
struct to_kill *tk;
tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
if (!tk) {
- pr_err("Memory failure: Out of memory while machine check handling\n");
+ pr_err("Out of memory while machine check handling\n");
return;
}
tk->addr = page_address_in_vma(p, vma);
- if (is_zone_device_page(p))
- tk->size_shift = dev_pagemap_mapping_shift(p, vma);
- else
+ if (is_zone_device_page(p)) {
+ /*
+ * Since page->mapping is not used for fsdax, we need
+ * calculate the address based on the vma.
+ */
+ if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
+ tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
+ tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
+ } else
tk->size_shift = page_shift(compound_head(p));
/*
@@ -370,7 +388,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
* has a mapping for the page.
*/
if (tk->addr == -EFAULT) {
- pr_info("Memory failure: Unable to find user space address %lx in %s\n",
+ pr_info("Unable to find user space address %lx in %s\n",
page_to_pfn(p), tsk->comm);
} else if (tk->size_shift == 0) {
kfree(tk);
@@ -403,7 +421,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
* signal and then access the memory. Just kill it.
*/
if (fail || tk->addr == -EFAULT) {
- pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
+ pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
pfn, tk->tsk->comm, tk->tsk->pid);
do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
tk->tsk, PIDTYPE_PID);
@@ -416,7 +434,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
* process anyways.
*/
else if (kill_proc(tk, pfn, flags) < 0)
- pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
+ pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
pfn, tk->tsk->comm, tk->tsk->pid);
}
put_task_struct(tk->tsk);
@@ -505,7 +523,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
if (!page_mapped_in_vma(page, vma))
continue;
if (vma->vm_mm == t->mm)
- add_to_kill(t, page, vma, to_kill);
+ add_to_kill(t, page, 0, vma, to_kill);
}
}
read_unlock(&tasklist_lock);
@@ -541,13 +559,41 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
* to be informed of all such data corruptions.
*/
if (vma->vm_mm == t->mm)
- add_to_kill(t, page, vma, to_kill);
+ add_to_kill(t, page, 0, vma, to_kill);
}
}
read_unlock(&tasklist_lock);
i_mmap_unlock_read(mapping);
}
+#ifdef CONFIG_FS_DAX
+/*
+ * Collect processes when the error hit a fsdax page.
+ */
+static void collect_procs_fsdax(struct page *page,
+ struct address_space *mapping, pgoff_t pgoff,
+ struct list_head *to_kill)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+
+ i_mmap_lock_read(mapping);
+ read_lock(&tasklist_lock);
+ for_each_process(tsk) {
+ struct task_struct *t = task_early_kill(tsk, true);
+
+ if (!t)
+ continue;
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
+ if (vma->vm_mm == t->mm)
+ add_to_kill(t, page, pgoff, vma, to_kill);
+ }
+ }
+ read_unlock(&tasklist_lock);
+ i_mmap_unlock_read(mapping);
+}
+#endif /* CONFIG_FS_DAX */
+
/*
* Collect the processes who have the corrupted page mapped to kill.
*/
@@ -722,7 +768,6 @@ static const char * const action_page_types[] = {
[MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
[MF_MSG_HUGE] = "huge page",
[MF_MSG_FREE_HUGE] = "free huge page",
- [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
[MF_MSG_UNMAP_FAILED] = "unmapping failed page",
[MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
[MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
@@ -779,12 +824,10 @@ static int truncate_error_page(struct page *p, unsigned long pfn,
int err = mapping->a_ops->error_remove_page(mapping, p);
if (err != 0) {
- pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
- pfn, err);
+ pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
} else if (page_has_private(p) &&
!try_to_release_page(p, GFP_NOIO)) {
- pr_info("Memory failure: %#lx: failed to release buffers\n",
- pfn);
+ pr_info("%#lx: failed to release buffers\n", pfn);
} else {
ret = MF_RECOVERED;
}
@@ -796,8 +839,7 @@ static int truncate_error_page(struct page *p, unsigned long pfn,
if (invalidate_inode_page(p))
ret = MF_RECOVERED;
else
- pr_info("Memory failure: %#lx: Failed to invalidate\n",
- pfn);
+ pr_info("%#lx: Failed to invalidate\n", pfn);
}
return ret;
@@ -827,7 +869,7 @@ static bool has_extra_refcount(struct page_state *ps, struct page *p,
count -= 1;
if (count > 0) {
- pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
+ pr_err("%#lx: %s still referenced by %d users\n",
page_to_pfn(p), action_page_types[ps->type], count);
return true;
}
@@ -851,7 +893,7 @@ static int me_kernel(struct page_state *ps, struct page *p)
*/
static int me_unknown(struct page_state *ps, struct page *p)
{
- pr_err("Memory failure: %#lx: Unknown page state\n", page_to_pfn(p));
+ pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
unlock_page(p);
return MF_FAILED;
}
@@ -1007,12 +1049,13 @@ static int me_swapcache_dirty(struct page_state *ps, struct page *p)
static int me_swapcache_clean(struct page_state *ps, struct page *p)
{
+ struct folio *folio = page_folio(p);
int ret;
- delete_from_swap_cache(p);
+ delete_from_swap_cache(folio);
ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
- unlock_page(p);
+ folio_unlock(folio);
if (has_extra_refcount(ps, p, false))
ret = MF_FAILED;
@@ -1040,7 +1083,6 @@ static int me_huge_page(struct page_state *ps, struct page *p)
res = truncate_error_page(hpage, page_to_pfn(p), mapping);
unlock_page(hpage);
} else {
- res = MF_FAILED;
unlock_page(hpage);
/*
* migration entry prevents later access on error hugepage,
@@ -1048,9 +1090,11 @@ static int me_huge_page(struct page_state *ps, struct page *p)
* subpages.
*/
put_page(hpage);
- if (__page_handle_poison(p)) {
+ if (__page_handle_poison(p) >= 0) {
page_ref_inc(p);
res = MF_RECOVERED;
+ } else {
+ res = MF_FAILED;
}
}
@@ -1135,7 +1179,7 @@ static void action_result(unsigned long pfn, enum mf_action_page_type type,
trace_memory_failure_event(pfn, type, result);
num_poisoned_pages_inc();
- pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
+ pr_err("%#lx: recovery action for %s: %s\n",
pfn, action_page_types[type], action_name[result]);
}
@@ -1210,8 +1254,7 @@ static int __get_hwpoison_page(struct page *page, unsigned long flags)
if (head == compound_head(page))
return 1;
- pr_info("Memory failure: %#lx cannot catch tail\n",
- page_to_pfn(page));
+ pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
put_page(head);
}
@@ -1274,7 +1317,7 @@ try_again:
}
out:
if (ret == -EIO)
- pr_err("Memory failure: %#lx: unhandlable page.\n", page_to_pfn(p));
+ pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
return ret;
}
@@ -1373,13 +1416,12 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
return true;
if (PageKsm(p)) {
- pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
+ pr_err("%#lx: can't handle KSM pages.\n", pfn);
return false;
}
if (PageSwapCache(p)) {
- pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
- pfn);
+ pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
ttu |= TTU_IGNORE_HWPOISON;
}
@@ -1397,7 +1439,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
} else {
kill = 0;
ttu |= TTU_IGNORE_HWPOISON;
- pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
+ pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
pfn);
}
}
@@ -1426,14 +1468,14 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
i_mmap_unlock_write(mapping);
} else
- pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
+ pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
} else {
try_to_unmap(folio, ttu);
}
unmap_success = !page_mapped(hpage);
if (!unmap_success)
- pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
+ pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));
/*
@@ -1498,6 +1540,241 @@ static int try_to_split_thp_page(struct page *page, const char *msg)
return 0;
}
+static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
+ struct address_space *mapping, pgoff_t index, int flags)
+{
+ struct to_kill *tk;
+ unsigned long size = 0;
+
+ list_for_each_entry(tk, to_kill, nd)
+ if (tk->size_shift)
+ size = max(size, 1UL << tk->size_shift);
+
+ if (size) {
+ /*
+ * Unmap the largest mapping to avoid breaking up device-dax
+ * mappings which are constant size. The actual size of the
+ * mapping being torn down is communicated in siginfo, see
+ * kill_proc()
+ */
+ loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
+
+ unmap_mapping_range(mapping, start, size, 0);
+ }
+
+ kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
+}
+
+static int mf_generic_kill_procs(unsigned long long pfn, int flags,
+ struct dev_pagemap *pgmap)
+{
+ struct page *page = pfn_to_page(pfn);
+ LIST_HEAD(to_kill);
+ dax_entry_t cookie;
+ int rc = 0;
+
+ /*
+ * Pages instantiated by device-dax (not filesystem-dax)
+ * may be compound pages.
+ */
+ page = compound_head(page);
+
+ /*
+ * Prevent the inode from being freed while we are interrogating
+ * the address_space, typically this would be handled by
+ * lock_page(), but dax pages do not use the page lock. This
+ * also prevents changes to the mapping of this pfn until
+ * poison signaling is complete.
+ */
+ cookie = dax_lock_page(page);
+ if (!cookie)
+ return -EBUSY;
+
+ if (hwpoison_filter(page)) {
+ rc = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ switch (pgmap->type) {
+ case MEMORY_DEVICE_PRIVATE:
+ case MEMORY_DEVICE_COHERENT:
+ /*
+ * TODO: Handle device pages which may need coordination
+ * with device-side memory.
+ */
+ rc = -ENXIO;
+ goto unlock;
+ default:
+ break;
+ }
+
+ /*
+ * Use this flag as an indication that the dax page has been
+ * remapped UC to prevent speculative consumption of poison.
+ */
+ SetPageHWPoison(page);
+
+ /*
+ * Unlike System-RAM there is no possibility to swap in a
+ * different physical page at a given virtual address, so all
+ * userspace consumption of ZONE_DEVICE memory necessitates
+ * SIGBUS (i.e. MF_MUST_KILL)
+ */
+ flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
+ collect_procs(page, &to_kill, true);
+
+ unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
+unlock:
+ dax_unlock_page(page, cookie);
+ return rc;
+}
+
+#ifdef CONFIG_FS_DAX
+/**
+ * mf_dax_kill_procs - Collect and kill processes who are using this file range
+ * @mapping: address_space of the file in use
+ * @index: start pgoff of the range within the file
+ * @count: length of the range, in unit of PAGE_SIZE
+ * @mf_flags: memory failure flags
+ */
+int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
+ unsigned long count, int mf_flags)
+{
+ LIST_HEAD(to_kill);
+ dax_entry_t cookie;
+ struct page *page;
+ size_t end = index + count;
+
+ mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
+
+ for (; index < end; index++) {
+ page = NULL;
+ cookie = dax_lock_mapping_entry(mapping, index, &page);
+ if (!cookie)
+ return -EBUSY;
+ if (!page)
+ goto unlock;
+
+ SetPageHWPoison(page);
+
+ collect_procs_fsdax(page, mapping, index, &to_kill);
+ unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
+ index, mf_flags);
+unlock:
+ dax_unlock_mapping_entry(mapping, index, cookie);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
+#endif /* CONFIG_FS_DAX */
+
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * Struct raw_hwp_page represents information about "raw error page",
+ * constructing singly linked list originated from ->private field of
+ * SUBPAGE_INDEX_HWPOISON-th tail page.
+ */
+struct raw_hwp_page {
+ struct llist_node node;
+ struct page *page;
+};
+
+static inline struct llist_head *raw_hwp_list_head(struct page *hpage)
+{
+ return (struct llist_head *)&page_private(hpage + SUBPAGE_INDEX_HWPOISON);
+}
+
+static unsigned long __free_raw_hwp_pages(struct page *hpage, bool move_flag)
+{
+ struct llist_head *head;
+ struct llist_node *t, *tnode;
+ unsigned long count = 0;
+
+ head = raw_hwp_list_head(hpage);
+ llist_for_each_safe(tnode, t, head->first) {
+ struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
+
+ if (move_flag)
+ SetPageHWPoison(p->page);
+ kfree(p);
+ count++;
+ }
+ llist_del_all(head);
+ return count;
+}
+
+static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
+{
+ struct llist_head *head;
+ struct raw_hwp_page *raw_hwp;
+ struct llist_node *t, *tnode;
+ int ret = TestSetPageHWPoison(hpage) ? -EHWPOISON : 0;
+
+ /*
+ * Once the hwpoison hugepage has lost reliable raw error info,
+ * there is little meaning to keep additional error info precisely,
+ * so skip to add additional raw error info.
+ */
+ if (HPageRawHwpUnreliable(hpage))
+ return -EHWPOISON;
+ head = raw_hwp_list_head(hpage);
+ llist_for_each_safe(tnode, t, head->first) {
+ struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
+
+ if (p->page == page)
+ return -EHWPOISON;
+ }
+
+ raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
+ if (raw_hwp) {
+ raw_hwp->page = page;
+ llist_add(&raw_hwp->node, head);
+ /* the first error event will be counted in action_result(). */
+ if (ret)
+ num_poisoned_pages_inc();
+ } else {
+ /*
+ * Failed to save raw error info. We no longer trace all
+ * hwpoisoned subpages, and we need refuse to free/dissolve
+ * this hwpoisoned hugepage.
+ */
+ SetHPageRawHwpUnreliable(hpage);
+ /*
+ * Once HPageRawHwpUnreliable is set, raw_hwp_page is not
+ * used any more, so free it.
+ */
+ __free_raw_hwp_pages(hpage, false);
+ }
+ return ret;
+}
+
+static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag)
+{
+ /*
+ * HPageVmemmapOptimized hugepages can't be freed because struct
+ * pages for tail pages are required but they don't exist.
+ */
+ if (move_flag && HPageVmemmapOptimized(hpage))
+ return 0;
+
+ /*
+ * HPageRawHwpUnreliable hugepages shouldn't be unpoisoned by
+ * definition.
+ */
+ if (HPageRawHwpUnreliable(hpage))
+ return 0;
+
+ return __free_raw_hwp_pages(hpage, move_flag);
+}
+
+void hugetlb_clear_page_hwpoison(struct page *hpage)
+{
+ if (HPageRawHwpUnreliable(hpage))
+ return;
+ ClearPageHWPoison(hpage);
+ free_raw_hwp_pages(hpage, true);
+}
+
/*
* Called from hugetlb code with hugetlb_lock held.
*
@@ -1529,10 +1806,11 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
count_increased = true;
} else {
ret = -EBUSY;
- goto out;
+ if (!(flags & MF_NO_RETRY))
+ goto out;
}
- if (TestSetPageHWPoison(head)) {
+ if (hugetlb_set_page_hwpoison(head, page)) {
ret = -EHWPOISON;
goto out;
}
@@ -1544,7 +1822,6 @@ out:
return ret;
}
-#ifdef CONFIG_HUGETLB_PAGE
/*
* Taking refcount of hugetlb pages needs extra care about race conditions
* with basic operations like hugepage allocation/free/demotion.
@@ -1557,7 +1834,6 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
struct page *p = pfn_to_page(pfn);
struct page *head;
unsigned long page_flags;
- bool retry = true;
*hugetlb = 1;
retry:
@@ -1566,15 +1842,15 @@ retry:
*hugetlb = 0;
return 0;
} else if (res == -EHWPOISON) {
- pr_err("Memory failure: %#lx: already hardware poisoned\n", pfn);
+ pr_err("%#lx: already hardware poisoned\n", pfn);
if (flags & MF_ACTION_REQUIRED) {
head = compound_head(p);
res = kill_accessing_process(current, page_to_pfn(head), flags);
}
return res;
} else if (res == -EBUSY) {
- if (retry) {
- retry = false;
+ if (!(flags & MF_NO_RETRY)) {
+ flags |= MF_NO_RETRY;
goto retry;
}
action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
@@ -1585,7 +1861,7 @@ retry:
lock_page(head);
if (hwpoison_filter(p)) {
- ClearPageHWPoison(head);
+ hugetlb_clear_page_hwpoison(head);
res = -EOPNOTSUPP;
goto out;
}
@@ -1596,10 +1872,11 @@ retry:
*/
if (res == 0) {
unlock_page(head);
- res = MF_FAILED;
- if (__page_handle_poison(p)) {
+ if (__page_handle_poison(p) >= 0) {
page_ref_inc(p);
res = MF_RECOVERED;
+ } else {
+ res = MF_FAILED;
}
action_result(pfn, MF_MSG_FREE_HUGE, res);
return res == MF_RECOVERED ? 0 : -EBUSY;
@@ -1607,21 +1884,6 @@ retry:
page_flags = head->flags;
- /*
- * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
- * simply disable it. In order to make it work properly, we need
- * make sure that:
- * - conversion of a pud that maps an error hugetlb into hwpoison
- * entry properly works, and
- * - other mm code walking over page table is aware of pud-aligned
- * hwpoison entries.
- */
- if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
- action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
- res = -EBUSY;
- goto out;
- }
-
if (!hwpoison_user_mappings(p, pfn, flags, head)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY;
@@ -1633,23 +1895,24 @@ out:
unlock_page(head);
return res;
}
+
#else
static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
{
return 0;
}
-#endif
+
+static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
+{
+ return 0;
+}
+#endif /* CONFIG_HUGETLB_PAGE */
static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
struct dev_pagemap *pgmap)
{
struct page *page = pfn_to_page(pfn);
- unsigned long size = 0;
- struct to_kill *tk;
- LIST_HEAD(tokill);
- int rc = -EBUSY;
- loff_t start;
- dax_entry_t cookie;
+ int rc = -ENXIO;
if (flags & MF_COUNT_INCREASED)
/*
@@ -1658,73 +1921,24 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
put_page(page);
/* device metadata space is not recoverable */
- if (!pgmap_pfn_valid(pgmap, pfn)) {
- rc = -ENXIO;
+ if (!pgmap_pfn_valid(pgmap, pfn))
goto out;
- }
/*
- * Pages instantiated by device-dax (not filesystem-dax)
- * may be compound pages.
+ * Call driver's implementation to handle the memory failure, otherwise
+ * fall back to generic handler.
*/
- page = compound_head(page);
-
- /*
- * Prevent the inode from being freed while we are interrogating
- * the address_space, typically this would be handled by
- * lock_page(), but dax pages do not use the page lock. This
- * also prevents changes to the mapping of this pfn until
- * poison signaling is complete.
- */
- cookie = dax_lock_page(page);
- if (!cookie)
- goto out;
-
- if (hwpoison_filter(page)) {
- rc = -EOPNOTSUPP;
- goto unlock;
- }
-
- if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+ if (pgmap->ops->memory_failure) {
+ rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
/*
- * TODO: Handle HMM pages which may need coordination
- * with device-side memory.
+ * Fall back to generic handler too if operation is not
+ * supported inside the driver/device/filesystem.
*/
- goto unlock;
+ if (rc != -EOPNOTSUPP)
+ goto out;
}
- /*
- * Use this flag as an indication that the dax page has been
- * remapped UC to prevent speculative consumption of poison.
- */
- SetPageHWPoison(page);
-
- /*
- * Unlike System-RAM there is no possibility to swap in a
- * different physical page at a given virtual address, so all
- * userspace consumption of ZONE_DEVICE memory necessitates
- * SIGBUS (i.e. MF_MUST_KILL)
- */
- flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
- collect_procs(page, &tokill, true);
-
- list_for_each_entry(tk, &tokill, nd)
- if (tk->size_shift)
- size = max(size, 1UL << tk->size_shift);
- if (size) {
- /*
- * Unmap the largest mapping to avoid breaking up
- * device-dax mappings which are constant size. The
- * actual size of the mapping being torn down is
- * communicated in siginfo, see kill_proc()
- */
- start = (page->index << PAGE_SHIFT) & ~(size - 1);
- unmap_mapping_range(page->mapping, start, size, 0);
- }
- kill_procs(&tokill, true, false, pfn, flags);
- rc = 0;
-unlock:
- dax_unlock_page(page, cookie);
+ rc = mf_generic_kill_procs(pfn, flags, pgmap);
out:
/* drop pgmap ref acquired in caller */
put_dev_pagemap(pgmap);
@@ -1787,8 +2001,7 @@ int memory_failure(unsigned long pfn, int flags)
goto unlock_mutex;
}
}
- pr_err("Memory failure: %#lx: memory outside kernel control\n",
- pfn);
+ pr_err("%#lx: memory outside kernel control\n", pfn);
res = -ENXIO;
goto unlock_mutex;
}
@@ -1799,8 +2012,7 @@ try_again:
goto unlock_mutex;
if (TestSetPageHWPoison(p)) {
- pr_err("Memory failure: %#lx: already hardware poisoned\n",
- pfn);
+ pr_err("%#lx: already hardware poisoned\n", pfn);
res = -EHWPOISON;
if (flags & MF_ACTION_REQUIRED)
res = kill_accessing_process(current, pfn, flags);
@@ -2016,7 +2228,7 @@ void memory_failure_queue(unsigned long pfn, int flags)
if (kfifo_put(&mf_cpu->fifo, entry))
schedule_work_on(smp_processor_id(), &mf_cpu->work);
else
- pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
+ pr_err("buffer overflow when queuing memory failure at %#lx\n",
pfn);
spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
put_cpu_var(memory_failure_cpu);
@@ -2073,6 +2285,8 @@ static int __init memory_failure_init(void)
}
core_initcall(memory_failure_init);
+#undef pr_fmt
+#define pr_fmt(fmt) "" fmt
#define unpoison_pr_info(fmt, pfn, rs) \
({ \
if (__ratelimit(rs)) \
@@ -2097,6 +2311,7 @@ int unpoison_memory(unsigned long pfn)
struct page *p;
int ret = -EBUSY;
int freeit = 0;
+ unsigned long count = 1;
static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
@@ -2144,6 +2359,13 @@ int unpoison_memory(unsigned long pfn)
ret = get_hwpoison_page(p, MF_UNPOISON);
if (!ret) {
+ if (PageHuge(p)) {
+ count = free_raw_hwp_pages(page, false);
+ if (count == 0) {
+ ret = -EBUSY;
+ goto unlock_mutex;
+ }
+ }
ret = TestClearPageHWPoison(page) ? 0 : -EBUSY;
} else if (ret < 0) {
if (ret == -EHWPOISON) {
@@ -2152,6 +2374,13 @@ int unpoison_memory(unsigned long pfn)
unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
pfn, &unpoison_rs);
} else {
+ if (PageHuge(p)) {
+ count = free_raw_hwp_pages(page, false);
+ if (count == 0) {
+ ret = -EBUSY;
+ goto unlock_mutex;
+ }
+ }
freeit = !!TestClearPageHWPoison(p);
put_page(page);
@@ -2164,7 +2393,7 @@ int unpoison_memory(unsigned long pfn)
unlock_mutex:
mutex_unlock(&mf_mutex);
if (!ret || freeit) {
- num_poisoned_pages_dec();
+ num_poisoned_pages_sub(count);
unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
page_to_pfn(p), &unpoison_rs);
}
@@ -2178,7 +2407,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
bool lru = PageLRU(page);
if (PageHuge(page)) {
- isolated = isolate_huge_page(page, pagelist);
+ isolated = !isolate_hugetlb(page, pagelist);
} else {
if (lru)
isolated = !isolate_lru_page(page);
diff --git a/mm/memory.c b/mm/memory.c
index 1c6027adc542..4ba73f5aa8bb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -624,6 +624,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
if (is_zero_pfn(pfn))
return NULL;
if (pte_devmap(pte))
+ /*
+ * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
+ * and will have refcounts incremented on their struct pages
+ * when they are inserted into PTEs, thus they are safe to
+ * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
+ * do not have refcounts. Example of legacy ZONE_DEVICE is
+ * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
+ */
return NULL;
print_bad_pte(vma, addr, pte, NULL);
@@ -736,7 +744,7 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
* Currently device exclusive access only supports anonymous
* memory so the entry shouldn't point to a filebacked page.
*/
- WARN_ON_ONCE(!PageAnon(page));
+ WARN_ON_ONCE(1);
set_pte_at(vma->vm_mm, address, ptep, pte);
@@ -1245,7 +1253,7 @@ vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
if (userfaultfd_wp(dst_vma))
return true;
- if (src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP))
+ if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
return true;
if (src_vma->anon_vma)
@@ -3020,7 +3028,7 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
balance_dirty_pages_ratelimited(mapping);
if (fpin) {
fput(fpin);
- return VM_FAULT_RETRY;
+ return VM_FAULT_COMPLETED;
}
}
@@ -4434,10 +4442,6 @@ late_initcall(fault_around_debugfs);
* It uses vm_ops->map_pages() to map the pages, which skips the page if it's
* not ready to be mapped: not up-to-date, locked, etc.
*
- * This function is called with the page table lock taken. In the split ptlock
- * case the page table lock only protects only those entries which belong to
- * the page table corresponding to the fault address.
- *
* This function doesn't cross the VMA boundaries, in order to call map_pages()
* only once.
*
@@ -4696,7 +4700,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
pte = pte_modify(old_pte, vma->vm_page_prot);
page = vm_normal_page(vma, vmf->address, pte);
- if (!page)
+ if (!page || is_zone_device_page(page))
goto out_map;
/* TODO: handle PTE-mapped THP */
@@ -4966,6 +4970,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
.gfp_mask = __get_fault_gfp_mask(vma),
};
struct mm_struct *mm = vma->vm_mm;
+ unsigned long vm_flags = vma->vm_flags;
pgd_t *pgd;
p4d_t *p4d;
vm_fault_t ret;
@@ -4979,7 +4984,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
if (!vmf.pud)
return VM_FAULT_OOM;
retry_pud:
- if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
+ if (pud_none(*vmf.pud) &&
+ hugepage_vma_check(vma, vm_flags, false, true)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5012,7 +5018,8 @@ retry_pud:
if (pud_trans_unstable(vmf.pud))
goto retry_pud;
- if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
+ if (pmd_none(*vmf.pmd) &&
+ hugepage_vma_check(vma, vm_flags, false, true)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1213d0c67a53..fad6d1f2262a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -43,30 +43,22 @@
#include "shuffle.h"
#ifdef CONFIG_MHP_MEMMAP_ON_MEMORY
-static int memmap_on_memory_set(const char *val, const struct kernel_param *kp)
-{
- if (hugetlb_optimize_vmemmap_enabled())
- return 0;
- return param_set_bool(val, kp);
-}
-
-static const struct kernel_param_ops memmap_on_memory_ops = {
- .flags = KERNEL_PARAM_OPS_FL_NOARG,
- .set = memmap_on_memory_set,
- .get = param_get_bool,
-};
-
/*
* memory_hotplug.memmap_on_memory parameter
*/
static bool memmap_on_memory __ro_after_init;
-module_param_cb(memmap_on_memory, &memmap_on_memory_ops, &memmap_on_memory, 0444);
+module_param(memmap_on_memory, bool, 0444);
MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug");
-bool mhp_memmap_on_memory(void)
+static inline bool mhp_memmap_on_memory(void)
{
return memmap_on_memory;
}
+#else
+static inline bool mhp_memmap_on_memory(void)
+{
+ return false;
+}
#endif
enum {
@@ -237,8 +229,7 @@ static void release_memory_resource(struct resource *res)
kfree(res);
}
-static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
- const char *reason)
+static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)
{
/*
* Disallow all operations smaller than a sub-section and only
@@ -255,12 +246,8 @@ static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
min_align = PAGES_PER_SUBSECTION;
else
min_align = PAGES_PER_SECTION;
- if (!IS_ALIGNED(pfn, min_align)
- || !IS_ALIGNED(nr_pages, min_align)) {
- WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n",
- reason, pfn, pfn + nr_pages - 1);
+ if (!IS_ALIGNED(pfn | nr_pages, min_align))
return -EINVAL;
- }
return 0;
}
@@ -337,9 +324,10 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
altmap->alloc = 0;
}
- err = check_pfn_span(pfn, nr_pages, "add");
- if (err)
- return err;
+ if (check_pfn_span(pfn, nr_pages)) {
+ WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1);
+ return -EINVAL;
+ }
for (; pfn < end_pfn; pfn += cur_nr_pages) {
/* Select all remaining pages up to the next section boundary */
@@ -536,8 +524,10 @@ void __remove_pages(unsigned long pfn, unsigned long nr_pages,
map_offset = vmem_altmap_offset(altmap);
- if (check_pfn_span(pfn, nr_pages, "remove"))
+ if (check_pfn_span(pfn, nr_pages)) {
+ WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1);
return;
+ }
for (; pfn < end_pfn; pfn += cur_nr_pages) {
cond_resched();
@@ -672,12 +662,18 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
}
+#ifdef CONFIG_ZONE_DEVICE
static void section_taint_zone_device(unsigned long pfn)
{
struct mem_section *ms = __pfn_to_section(pfn);
ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE;
}
+#else
+static inline void section_taint_zone_device(unsigned long pfn)
+{
+}
+#endif
/*
* Associate the pfn range with the given zone, initializing the memmaps
@@ -936,7 +932,7 @@ static struct zone *auto_movable_zone_for_pfn(int nid,
if (!page)
continue;
/* If anything is !MOVABLE online the rest !MOVABLE. */
- if (page_zonenum(page) != ZONE_MOVABLE)
+ if (!is_zone_movable_page(page))
goto kernel_zone;
online_pages += PAGES_PER_SECTION;
}
@@ -1031,7 +1027,7 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
struct zone *zone)
{
unsigned long end_pfn = pfn + nr_pages;
- int ret;
+ int ret, i;
ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
if (ret)
@@ -1039,6 +1035,9 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE);
+ for (i = 0; i < nr_pages; i++)
+ SetPageVmemmapSelfHosted(pfn_to_page(pfn + i));
+
/*
* It might be that the vmemmap_pages fully span sections. If that is
* the case, mark those sections online here as otherwise they will be
@@ -1643,7 +1642,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (PageHuge(page)) {
pfn = page_to_pfn(head) + compound_nr(head) - 1;
- isolate_huge_page(head, &source);
+ isolate_hugetlb(head, &source);
continue;
} else if (PageTransHuge(page))
pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d39b01fd52fe..b73d3248d976 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -465,9 +465,8 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
}
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
- spin_unlock(ptl);
walk->action = ACTION_CONTINUE;
- goto out;
+ goto unlock;
}
if (!queue_pages_required(page, qp))
goto unlock;
@@ -484,7 +483,6 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
ret = -EIO;
unlock:
spin_unlock(ptl);
-out:
return ret;
}
@@ -523,7 +521,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
if (!pte_present(*pte))
continue;
page = vm_normal_page(vma, addr, *pte);
- if (!page)
+ if (!page || is_zone_device_page(page))
continue;
/*
* vm_normal_page() filters out zero pages, but there might
@@ -602,7 +600,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
if (flags & (MPOL_MF_MOVE_ALL) ||
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
- if (!isolate_huge_page(page, qp->pagelist) &&
+ if (isolate_hugetlb(page, qp->pagelist) &&
(flags & MPOL_MF_STRICT))
/*
* Failed to isolate page but allow migrating pages
@@ -1388,7 +1386,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
unsigned long t;
- if (get_bitmap(&t, &nmask[maxnode / BITS_PER_LONG], bits))
+ if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
return -EFAULT;
if (maxnode - bits >= MAX_NUMNODES) {
diff --git a/mm/mempool.c b/mm/mempool.c
index b933d0fc21b8..96488b13a1ef 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -379,7 +379,7 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
gfp_t gfp_temp;
VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
- might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
+ might_alloc(gfp_mask);
gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
diff --git a/mm/memremap.c b/mm/memremap.c
index 745eea0f99c3..58b20c3c300b 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -141,10 +141,10 @@ void memunmap_pages(struct dev_pagemap *pgmap)
for (i = 0; i < pgmap->nr_range; i++)
percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i));
wait_for_completion(&pgmap->done);
- percpu_ref_exit(&pgmap->ref);
for (i = 0; i < pgmap->nr_range; i++)
pageunmap_range(pgmap, i);
+ percpu_ref_exit(&pgmap->ref);
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
devmap_managed_enable_put(pgmap);
@@ -279,8 +279,8 @@ err_pfn_remap:
/*
- * Not device managed version of dev_memremap_pages, undone by
- * memunmap_pages(). Please use dev_memremap_pages if you have a struct
+ * Not device managed version of devm_memremap_pages, undone by
+ * memunmap_pages(). Please use devm_memremap_pages if you have a struct
* device available.
*/
void *memremap_pages(struct dev_pagemap *pgmap, int nid)
@@ -315,6 +315,16 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
return ERR_PTR(-EINVAL);
}
break;
+ case MEMORY_DEVICE_COHERENT:
+ if (!pgmap->ops->page_free) {
+ WARN(1, "Missing page_free method\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (!pgmap->owner) {
+ WARN(1, "Missing owner\n");
+ return ERR_PTR(-EINVAL);
+ }
+ break;
case MEMORY_DEVICE_FS_DAX:
if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
WARN(1, "File system DAX not supported\n");
diff --git a/mm/migrate.c b/mm/migrate.c
index 1b4b977809a1..6a1597c92261 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -132,7 +132,7 @@ static void putback_movable_page(struct page *page)
*
* This function shall be used whenever the isolated pageset has been
* built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
- * and isolate_huge_page().
+ * and isolate_hugetlb().
*/
void putback_movable_pages(struct list_head *l)
{
@@ -314,13 +314,28 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
__migration_entry_wait(mm, ptep, ptl);
}
-void migration_entry_wait_huge(struct vm_area_struct *vma,
- struct mm_struct *mm, pte_t *pte)
+#ifdef CONFIG_HUGETLB_PAGE
+void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl)
{
- spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
- __migration_entry_wait(mm, pte, ptl);
+ pte_t pte;
+
+ spin_lock(ptl);
+ pte = huge_ptep_get(ptep);
+
+ if (unlikely(!is_hugetlb_entry_migration(pte)))
+ spin_unlock(ptl);
+ else
+ migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
}
+void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
+{
+ spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
+
+ __migration_entry_wait_huge(pte, ptl);
+}
+#endif
+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
{
@@ -1132,15 +1147,10 @@ static int unmap_and_move(new_page_t get_new_page,
return -ENOSYS;
if (page_count(page) == 1) {
- /* page was freed from under us. So we are done. */
+ /* Page was freed from under us. So we are done. */
ClearPageActive(page);
ClearPageUnevictable(page);
- if (unlikely(__PageMovable(page))) {
- lock_page(page);
- if (!PageMovable(page))
- ClearPageIsolated(page);
- unlock_page(page);
- }
+ /* free_pages_prepare() will clear PG_isolated. */
goto out;
}
@@ -1662,7 +1672,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
goto out;
err = -ENOENT;
- if (!page)
+ if (!page || is_zone_device_page(page))
goto out;
err = 0;
@@ -1675,8 +1685,9 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
if (PageHuge(page)) {
if (PageHead(page)) {
- isolate_huge_page(page, pagelist);
- err = 1;
+ err = isolate_hugetlb(page, pagelist);
+ if (!err)
+ err = 1;
}
} else {
struct page *head;
@@ -1852,7 +1863,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
if (IS_ERR(page))
goto set_status;
- if (page) {
+ if (page && !is_zone_device_page(page)) {
err = page_to_nid(page);
put_page(page);
} else {
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 5dd97c39ca6a..27fb37d65476 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -148,15 +148,21 @@ again:
if (is_writable_device_private_entry(entry))
mpfn |= MIGRATE_PFN_WRITE;
} else {
- if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
- goto next;
pfn = pte_pfn(pte);
- if (is_zero_pfn(pfn)) {
+ if (is_zero_pfn(pfn) &&
+ (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
mpfn = MIGRATE_PFN_MIGRATE;
migrate->cpages++;
goto next;
}
page = vm_normal_page(migrate->vma, addr, pte);
+ if (page && !is_zone_device_page(page) &&
+ !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
+ goto next;
+ else if (page && is_device_coherent_page(page) &&
+ (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
+ page->pgmap->owner != migrate->pgmap_owner))
+ goto next;
mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
}
@@ -518,7 +524,7 @@ EXPORT_SYMBOL(migrate_vma_setup);
* handle_pte_fault()
* do_anonymous_page()
* to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
- * private page.
+ * private or coherent page.
*/
static void migrate_vma_insert_page(struct migrate_vma *migrate,
unsigned long addr,
@@ -594,11 +600,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
page_to_pfn(page));
entry = swp_entry_to_pte(swp_entry);
} else {
- /*
- * For now we only support migrating to un-addressable device
- * memory.
- */
- if (is_zone_device_page(page)) {
+ if (is_zone_device_page(page) &&
+ !is_device_coherent_page(page)) {
pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
goto abort;
}
@@ -683,6 +686,12 @@ void migrate_vma_pages(struct migrate_vma *migrate)
}
if (!page) {
+ /*
+ * The only time there is no vma is when called from
+ * migrate_device_coherent_page(). However this isn't
+ * called if the page could not be unmapped.
+ */
+ VM_BUG_ON(!migrate->vma);
if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;
if (!notified) {
@@ -701,10 +710,11 @@ void migrate_vma_pages(struct migrate_vma *migrate)
mapping = page_mapping(page);
- if (is_device_private_page(newpage)) {
+ if (is_device_private_page(newpage) ||
+ is_device_coherent_page(newpage)) {
/*
- * For now only support private anonymous when migrating
- * to un-addressable device memory.
+ * For now only support anonymous memory migrating to
+ * device private or coherent memory.
*/
if (mapping) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
@@ -791,3 +801,49 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
}
}
EXPORT_SYMBOL(migrate_vma_finalize);
+
+/*
+ * Migrate a device coherent page back to normal memory. The caller should have
+ * a reference on page which will be copied to the new page if migration is
+ * successful or dropped on failure.
+ */
+int migrate_device_coherent_page(struct page *page)
+{
+ unsigned long src_pfn, dst_pfn = 0;
+ struct migrate_vma args;
+ struct page *dpage;
+
+ WARN_ON_ONCE(PageCompound(page));
+
+ lock_page(page);
+ src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
+ args.src = &src_pfn;
+ args.dst = &dst_pfn;
+ args.cpages = 1;
+ args.npages = 1;
+ args.vma = NULL;
+
+ /*
+ * We don't have a VMA and don't need to walk the page tables to find
+ * the source page. So call migrate_vma_unmap() directly to unmap the
+ * page as migrate_vma_setup() will fail if args.vma == NULL.
+ */
+ migrate_vma_unmap(&args);
+ if (!(src_pfn & MIGRATE_PFN_MIGRATE))
+ return -EBUSY;
+
+ dpage = alloc_page(GFP_USER | __GFP_NOWARN);
+ if (dpage) {
+ lock_page(dpage);
+ dst_pfn = migrate_pfn(page_to_pfn(dpage));
+ }
+
+ migrate_vma_pages(&args);
+ if (src_pfn & MIGRATE_PFN_MIGRATE)
+ copy_highpage(dpage, page);
+ migrate_vma_finalize(&args);
+
+ if (src_pfn & MIGRATE_PFN_MIGRATE)
+ return 0;
+ return -EBUSY;
+}
diff --git a/mm/mlock.c b/mm/mlock.c
index 716caf851043..b14e929084cc 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -333,7 +333,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
if (!pte_present(*pte))
continue;
page = vm_normal_page(vma, addr, *pte);
- if (!page)
+ if (!page || is_zone_device_page(page))
continue;
if (PageTransCompound(page))
continue;
diff --git a/mm/mmap.c b/mm/mmap.c
index 61e6135c54ef..9d780f415be3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -81,53 +81,6 @@ static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
-/* description of effects of mapping type and prot in current implementation.
- * this is due to the limited x86 page protection hardware. The expected
- * behavior is in parens:
- *
- * map_type prot
- * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
- * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
- * w: (no) no w: (no) no w: (yes) yes w: (no) no
- * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
- *
- * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
- * w: (no) no w: (no) no w: (copy) copy w: (no) no
- * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
- *
- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
- * MAP_PRIVATE (with Enhanced PAN supported):
- * r: (no) no
- * w: (no) no
- * x: (yes) yes
- */
-pgprot_t protection_map[16] __ro_after_init = {
- [VM_NONE] = __P000,
- [VM_READ] = __P001,
- [VM_WRITE] = __P010,
- [VM_WRITE | VM_READ] = __P011,
- [VM_EXEC] = __P100,
- [VM_EXEC | VM_READ] = __P101,
- [VM_EXEC | VM_WRITE] = __P110,
- [VM_EXEC | VM_WRITE | VM_READ] = __P111,
- [VM_SHARED] = __S000,
- [VM_SHARED | VM_READ] = __S001,
- [VM_SHARED | VM_WRITE] = __S010,
- [VM_SHARED | VM_WRITE | VM_READ] = __S011,
- [VM_SHARED | VM_EXEC] = __S100,
- [VM_SHARED | VM_EXEC | VM_READ] = __S101,
- [VM_SHARED | VM_EXEC | VM_WRITE] = __S110,
- [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __S111
-};
-
-#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
-{
- return protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
-}
-EXPORT_SYMBOL(vm_get_page_prot);
-#endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */
-
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
{
return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
@@ -1693,8 +1646,11 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
return 0;
- /* Do we need to track softdirty? */
- if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
+ /*
+ * Do we need to track softdirty? hugetlb does not support softdirty
+ * tracking yet.
+ */
+ if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
return 1;
/* Specialty mapping? */
@@ -1894,7 +1850,6 @@ unmap_and_free_vma:
/* Undo any partial mapping done by a device driver. */
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
- charged = 0;
if (vm_flags & VM_SHARED)
mapping_unmap_writable(file->f_mapping);
free_vma:
@@ -2588,7 +2543,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
- /* don't alter vm_end if the coredump is running */
if (!prev || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED)
@@ -2944,7 +2898,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
unsigned long ret = -EINVAL;
struct file *file;
- pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n",
+ pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
current->comm, current->pid);
if (prot)
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ba5592655ee3..bc6bddd156ca 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -38,6 +38,39 @@
#include "internal.h"
+static inline bool can_change_pte_writable(struct vm_area_struct *vma,
+ unsigned long addr, pte_t pte)
+{
+ struct page *page;
+
+ VM_BUG_ON(!(vma->vm_flags & VM_WRITE) || pte_write(pte));
+
+ if (pte_protnone(pte) || !pte_dirty(pte))
+ return false;
+
+ /* Do we need write faults for softdirty tracking? */
+ if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
+ return false;
+
+ /* Do we need write faults for uffd-wp tracking? */
+ if (userfaultfd_pte_wp(vma, pte))
+ return false;
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ /*
+ * We can only special-case on exclusive anonymous pages,
+ * because we know that our write-fault handler similarly would
+ * map them writable without any additional checks while holding
+ * the PT lock.
+ */
+ page = vm_normal_page(vma, addr, pte);
+ if (!page || !PageAnon(page) || !PageAnonExclusive(page))
+ return false;
+ }
+
+ return true;
+}
+
static unsigned long change_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
@@ -46,7 +79,6 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
spinlock_t *ptl;
unsigned long pages = 0;
int target_node = NUMA_NO_NODE;
- bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
@@ -95,7 +127,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
continue;
page = vm_normal_page(vma, addr, oldpte);
- if (!page || PageKsm(page))
+ if (!page || is_zone_device_page(page) || PageKsm(page))
continue;
/* Also skip shared copy-on-write pages */
@@ -137,31 +169,38 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
ptent = pte_wrprotect(ptent);
ptent = pte_mkuffd_wp(ptent);
} else if (uffd_wp_resolve) {
- /*
- * Leave the write bit to be handled
- * by PF interrupt handler, then
- * things like COW could be properly
- * handled.
- */
ptent = pte_clear_uffd_wp(ptent);
}
- /* Avoid taking write faults for known dirty pages */
- if (dirty_accountable && pte_dirty(ptent) &&
- (pte_soft_dirty(ptent) ||
- !(vma->vm_flags & VM_SOFTDIRTY))) {
+ /*
+ * In some writable, shared mappings, we might want
+ * to catch actual write access -- see
+ * vma_wants_writenotify().
+ *
+ * In all writable, private mappings, we have to
+ * properly handle COW.
+ *
+ * In both cases, we can sometimes still change PTEs
+ * writable and avoid the write-fault handler, for
+ * example, if a PTE is already dirty and no other
+ * COW or special handling is required.
+ */
+ if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
+ !pte_write(ptent) &&
+ can_change_pte_writable(vma, addr, ptent))
ptent = pte_mkwrite(ptent);
- }
+
ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
if (pte_needs_flush(oldpte, ptent))
tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
pages++;
} else if (is_swap_pte(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
- struct page *page = pfn_swap_entry_to_page(entry);
pte_t newpte;
if (is_writable_migration_entry(entry)) {
+ struct page *page = pfn_swap_entry_to_page(entry);
+
/*
* A protection check is difficult so
* just be safe and disable write
@@ -505,9 +544,9 @@ mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long oldflags = vma->vm_flags;
long nrpages = (end - start) >> PAGE_SHIFT;
unsigned long charged = 0;
+ bool try_change_writable;
pgoff_t pgoff;
int error;
- int dirty_accountable = 0;
if (newflags == oldflags) {
*pprev = vma;
@@ -583,11 +622,20 @@ success:
* held in write mode.
*/
vma->vm_flags = newflags;
- dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
+ /*
+ * We want to check manually if we can change individual PTEs writable
+ * if we can't do that automatically for all PTEs in a mapping. For
+ * private mappings, that's always the case when we have write
+ * permissions as we properly have to handle COW.
+ */
+ if (vma->vm_flags & VM_SHARED)
+ try_change_writable = vma_wants_writenotify(vma, vma->vm_page_prot);
+ else
+ try_change_writable = !!(vma->vm_flags & VM_WRITE);
vma_set_page_prot(vma);
change_protection(tlb, vma, start, end, vma->vm_page_prot,
- dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
+ try_change_writable ? MM_CP_TRY_CHANGE_WRITABLE : 0);
/*
* Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
@@ -616,7 +664,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
{
unsigned long nstart, end, tmp, reqprot;
struct vm_area_struct *vma, *prev;
- int error = -EINVAL;
+ int error;
const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
(prot & PROT_READ);
diff --git a/mm/nommu.c b/mm/nommu.c
index 9d7afc2d959e..e819cbc21b39 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -500,7 +500,7 @@ static void delete_nommu_region(struct vm_region *region)
static void free_page_series(unsigned long from, unsigned long to)
{
for (; from < to; from += PAGE_SIZE) {
- struct page *page = virt_to_page(from);
+ struct page *page = virt_to_page((void *)from);
atomic_long_dec(&mmap_pages_allocated);
put_page(page);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d0d466a5c804..032a7bf8d259 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2892,6 +2892,7 @@ static void wb_inode_writeback_start(struct bdi_writeback *wb)
static void wb_inode_writeback_end(struct bdi_writeback *wb)
{
+ unsigned long flags;
atomic_dec(&wb->writeback_inodes);
/*
* Make sure estimate of writeback throughput gets updated after
@@ -2900,7 +2901,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
* that if multiple inodes end writeback at a similar time, they get
* batched into one bandwidth update.
*/
- queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
+ spin_lock_irqsave(&wb->work_lock, flags);
+ if (test_bit(WB_registered, &wb->state))
+ queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
+ spin_unlock_irqrestore(&wb->work_lock, flags);
}
bool __folio_end_writeback(struct folio *folio)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 170bbf144cfa..e5486d47406e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -126,13 +126,97 @@ typedef int __bitwise fpi_t;
static DEFINE_MUTEX(pcp_batch_high_lock);
#define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
-struct pagesets {
- local_lock_t lock;
-};
-static DEFINE_PER_CPU(struct pagesets, pagesets) = {
- .lock = INIT_LOCAL_LOCK(lock),
-};
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+/*
+ * On SMP, spin_trylock is sufficient protection.
+ * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
+ */
+#define pcp_trylock_prepare(flags) do { } while (0)
+#define pcp_trylock_finish(flag) do { } while (0)
+#else
+
+/* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
+#define pcp_trylock_prepare(flags) local_irq_save(flags)
+#define pcp_trylock_finish(flags) local_irq_restore(flags)
+#endif
+
+/*
+ * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
+ * a migration causing the wrong PCP to be locked and remote memory being
+ * potentially allocated, pin the task to the CPU for the lookup+lock.
+ * preempt_disable is used on !RT because it is faster than migrate_disable.
+ * migrate_disable is used on RT because otherwise RT spinlock usage is
+ * interfered with and a high priority task cannot preempt the allocator.
+ */
+#ifndef CONFIG_PREEMPT_RT
+#define pcpu_task_pin() preempt_disable()
+#define pcpu_task_unpin() preempt_enable()
+#else
+#define pcpu_task_pin() migrate_disable()
+#define pcpu_task_unpin() migrate_enable()
+#endif
+/*
+ * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
+ * Return value should be used with equivalent unlock helper.
+ */
+#define pcpu_spin_lock(type, member, ptr) \
+({ \
+ type *_ret; \
+ pcpu_task_pin(); \
+ _ret = this_cpu_ptr(ptr); \
+ spin_lock(&_ret->member); \
+ _ret; \
+})
+
+#define pcpu_spin_lock_irqsave(type, member, ptr, flags) \
+({ \
+ type *_ret; \
+ pcpu_task_pin(); \
+ _ret = this_cpu_ptr(ptr); \
+ spin_lock_irqsave(&_ret->member, flags); \
+ _ret; \
+})
+
+#define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \
+({ \
+ type *_ret; \
+ pcpu_task_pin(); \
+ _ret = this_cpu_ptr(ptr); \
+ if (!spin_trylock_irqsave(&_ret->member, flags)) { \
+ pcpu_task_unpin(); \
+ _ret = NULL; \
+ } \
+ _ret; \
+})
+
+#define pcpu_spin_unlock(member, ptr) \
+({ \
+ spin_unlock(&ptr->member); \
+ pcpu_task_unpin(); \
+})
+
+#define pcpu_spin_unlock_irqrestore(member, ptr, flags) \
+({ \
+ spin_unlock_irqrestore(&ptr->member, flags); \
+ pcpu_task_unpin(); \
+})
+
+/* struct per_cpu_pages specific helpers. */
+#define pcp_spin_lock(ptr) \
+ pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
+
+#define pcp_spin_lock_irqsave(ptr, flags) \
+ pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags)
+
+#define pcp_spin_trylock_irqsave(ptr, flags) \
+ pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags)
+
+#define pcp_spin_unlock(ptr) \
+ pcpu_spin_unlock(lock, ptr)
+
+#define pcp_spin_unlock_irqrestore(ptr, flags) \
+ pcpu_spin_unlock_irqrestore(lock, ptr, flags)
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
@@ -151,13 +235,7 @@ DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
#endif
-/* work_structs for global per-cpu drains */
-struct pcpu_drain {
- struct zone *zone;
- struct work_struct work;
-};
static DEFINE_MUTEX(pcpu_drain_mutex);
-static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
volatile unsigned long latent_entropy __latent_entropy;
@@ -524,7 +602,7 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
{
unsigned long *bitmap;
unsigned long bitidx, word_bitidx;
- unsigned long old_word, word;
+ unsigned long word;
BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
@@ -540,12 +618,8 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
flags <<= bitidx;
word = READ_ONCE(bitmap[word_bitidx]);
- for (;;) {
- old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
- if (word == old_word)
- break;
- word = old_word;
- }
+ do {
+ } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
}
void set_pageblock_migratetype(struct page *page, int migratetype)
@@ -653,7 +727,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (order > PAGE_ALLOC_COSTLY_ORDER) {
VM_BUG_ON(order != pageblock_order);
- base = PAGE_ALLOC_COSTLY_ORDER + 1;
+ return NR_LOWORDER_PCP_LISTS;
}
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
@@ -667,7 +741,7 @@ static inline int pindex_to_order(unsigned int pindex)
int order = pindex / MIGRATE_PCPTYPES;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (order > PAGE_ALLOC_COSTLY_ORDER)
+ if (pindex == NR_LOWORDER_PCP_LISTS)
order = pageblock_order;
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
@@ -744,6 +818,14 @@ void prep_compound_page(struct page *page, unsigned int order)
prep_compound_head(page, order);
}
+void destroy_large_folio(struct folio *folio)
+{
+ enum compound_dtor_id dtor = folio_page(folio, 1)->compound_dtor;
+
+ VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
+ compound_page_dtors[dtor](&folio->page);
+}
+
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
@@ -785,7 +867,7 @@ static inline bool set_page_guard(struct zone *zone, struct page *page,
return false;
__SetPageGuard(page);
- INIT_LIST_HEAD(&page->lru);
+ INIT_LIST_HEAD(&page->buddy_list);
set_page_private(page, order);
/* Guard pages are not available for any usage */
__mod_zone_freepage_state(zone, -(1 << order), migratetype);
@@ -928,7 +1010,7 @@ static inline void add_to_free_list(struct page *page, struct zone *zone,
{
struct free_area *area = &zone->free_area[order];
- list_add(&page->lru, &area->free_list[migratetype]);
+ list_add(&page->buddy_list, &area->free_list[migratetype]);
area->nr_free++;
}
@@ -938,7 +1020,7 @@ static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
{
struct free_area *area = &zone->free_area[order];
- list_add_tail(&page->lru, &area->free_list[migratetype]);
+ list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
area->nr_free++;
}
@@ -952,7 +1034,7 @@ static inline void move_to_free_list(struct page *page, struct zone *zone,
{
struct free_area *area = &zone->free_area[order];
- list_move_tail(&page->lru, &area->free_list[migratetype]);
+ list_move_tail(&page->buddy_list, &area->free_list[migratetype]);
}
static inline void del_page_from_free_list(struct page *page, struct zone *zone,
@@ -962,7 +1044,7 @@ static inline void del_page_from_free_list(struct page *page, struct zone *zone,
if (page_reported(page))
__ClearPageReported(page);
- list_del(&page->lru);
+ list_del(&page->buddy_list);
__ClearPageBuddy(page);
set_page_private(page, 0);
zone->free_area[order].nr_free--;
@@ -1296,18 +1378,14 @@ static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
PageSkipKASanPoison(page);
}
-static void kernel_init_free_pages(struct page *page, int numpages)
+static void kernel_init_pages(struct page *page, int numpages)
{
int i;
/* s390's use of memset() could override KASAN redzones. */
kasan_disable_current();
- for (i = 0; i < numpages; i++) {
- u8 tag = page_kasan_tag(page + i);
- page_kasan_tag_reset(page + i);
- clear_highpage(page + i);
- page_kasan_tag_set(page + i, tag);
- }
+ for (i = 0; i < numpages; i++)
+ clear_highpage_kasan_tagged(page + i);
kasan_enable_current();
}
@@ -1396,7 +1474,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
init = false;
}
if (init)
- kernel_init_free_pages(page, 1 << order);
+ kernel_init_pages(page, 1 << order);
/*
* arch_free_page() can make the page's contents inaccessible. s390
@@ -1473,10 +1551,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
/* Ensure requested pindex is drained first. */
pindex = pindex - 1;
- /*
- * local_lock_irq held so equivalent to spin_lock_irqsave for
- * both PREEMPT_RT and non-PREEMPT_RT configurations.
- */
+ /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
spin_lock(&zone->lock);
isolated_pageblocks = has_isolate_pageblock(zone);
@@ -1504,11 +1579,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
do {
int mt;
- page = list_last_entry(list, struct page, lru);
+ page = list_last_entry(list, struct page, pcp_list);
mt = get_pcppage_migratetype(page);
/* must delete to avoid corrupting pcp list */
- list_del(&page->lru);
+ list_del(&page->pcp_list);
count -= nr_pages;
pcp->count -= nr_pages;
@@ -2442,7 +2517,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
}
/* If memory is still not initialized, do it now. */
if (init)
- kernel_init_free_pages(page, 1 << order);
+ kernel_init_pages(page, 1 << order);
/* Propagate __GFP_SKIP_KASAN_POISON to page flags. */
if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON))
SetPageSkipKASanPoison(page);
@@ -3045,10 +3120,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
{
int i, allocated = 0;
- /*
- * local_lock_irq held so equivalent to spin_lock_irqsave for
- * both PREEMPT_RT and non-PREEMPT_RT configurations.
- */
+ /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype,
@@ -3069,7 +3141,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
* for IO devices that can merge IO requests if the physical
* pages are ordered properly.
*/
- list_add_tail(&page->lru, list);
+ list_add_tail(&page->pcp_list, list);
allocated++;
if (is_migrate_cma(get_pcppage_migratetype(page)))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
@@ -3092,51 +3164,48 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
* Called from the vmstat counter updater to drain pagesets of this
* currently executing processor on remote nodes after they have
* expired.
- *
- * Note that this function must be called with the thread pinned to
- * a single processor.
*/
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
- unsigned long flags;
int to_drain, batch;
- local_lock_irqsave(&pagesets.lock, flags);
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
- if (to_drain > 0)
+ if (to_drain > 0) {
+ unsigned long flags;
+
+ /*
+ * free_pcppages_bulk expects IRQs disabled for zone->lock
+ * so even though pcp->lock is not intended to be IRQ-safe,
+ * it's needed in this context.
+ */
+ spin_lock_irqsave(&pcp->lock, flags);
free_pcppages_bulk(zone, to_drain, pcp, 0);
- local_unlock_irqrestore(&pagesets.lock, flags);
+ spin_unlock_irqrestore(&pcp->lock, flags);
+ }
}
#endif
/*
* Drain pcplists of the indicated processor and zone.
- *
- * The processor must either be the current processor and the
- * thread pinned to the current processor or a processor that
- * is not online.
*/
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{
- unsigned long flags;
struct per_cpu_pages *pcp;
- local_lock_irqsave(&pagesets.lock, flags);
-
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
- if (pcp->count)
- free_pcppages_bulk(zone, pcp->count, pcp, 0);
+ if (pcp->count) {
+ unsigned long flags;
- local_unlock_irqrestore(&pagesets.lock, flags);
+ /* See drain_zone_pages on why this is disabling IRQs */
+ spin_lock_irqsave(&pcp->lock, flags);
+ free_pcppages_bulk(zone, pcp->count, pcp, 0);
+ spin_unlock_irqrestore(&pcp->lock, flags);
+ }
}
/*
* Drain pcplists of all zones on the indicated processor.
- *
- * The processor must either be the current processor and the
- * thread pinned to the current processor or a processor that
- * is not online.
*/
static void drain_pages(unsigned int cpu)
{
@@ -3149,9 +3218,6 @@ static void drain_pages(unsigned int cpu)
/*
* Spill all of this CPU's per-cpu pages back into the buddy allocator.
- *
- * The CPU has to be pinned. When zone parameter is non-NULL, spill just
- * the single zone's pages.
*/
void drain_local_pages(struct zone *zone)
{
@@ -3163,24 +3229,6 @@ void drain_local_pages(struct zone *zone)
drain_pages(cpu);
}
-static void drain_local_pages_wq(struct work_struct *work)
-{
- struct pcpu_drain *drain;
-
- drain = container_of(work, struct pcpu_drain, work);
-
- /*
- * drain_all_pages doesn't use proper cpu hotplug protection so
- * we can race with cpu offline when the WQ can move this from
- * a cpu pinned worker to an unbound one. We can operate on a different
- * cpu which is alright but we also have to make sure to not move to
- * a different one.
- */
- migrate_disable();
- drain_local_pages(drain->zone);
- migrate_enable();
-}
-
/*
* The implementation of drain_all_pages(), exposing an extra parameter to
* drain on all cpus.
@@ -3202,13 +3250,6 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
static cpumask_t cpus_with_pcps;
/*
- * Make sure nobody triggers this path before mm_percpu_wq is fully
- * initialized.
- */
- if (WARN_ON_ONCE(!mm_percpu_wq))
- return;
-
- /*
* Do not drain if one is already in progress unless it's specific to
* a zone. Such callers are primarily CMA and memory hotplug and need
* the drain to be complete when the call returns.
@@ -3257,14 +3298,11 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
}
for_each_cpu(cpu, &cpus_with_pcps) {
- struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
-
- drain->zone = zone;
- INIT_WORK(&drain->work, drain_local_pages_wq);
- queue_work_on(cpu, mm_percpu_wq, &drain->work);
+ if (zone)
+ drain_pages_zone(cpu, zone);
+ else
+ drain_pages(cpu);
}
- for_each_cpu(cpu, &cpus_with_pcps)
- flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
mutex_unlock(&pcpu_drain_mutex);
}
@@ -3273,8 +3311,6 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
*
* When zone parameter is non-NULL, spill just the single zone's pages.
- *
- * Note that this can be extremely slow as the draining happens in a workqueue.
*/
void drain_all_pages(struct zone *zone)
{
@@ -3319,7 +3355,7 @@ void mark_free_pages(struct zone *zone)
for_each_migratetype_order(order, t) {
list_for_each_entry(page,
- &zone->free_area[order].free_list[t], lru) {
+ &zone->free_area[order].free_list[t], buddy_list) {
unsigned long i;
pfn = page_to_pfn(page);
@@ -3396,19 +3432,17 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
return min(READ_ONCE(pcp->batch) << 2, high);
}
-static void free_unref_page_commit(struct page *page, int migratetype,
+static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
+ struct page *page, int migratetype,
unsigned int order)
{
- struct zone *zone = page_zone(page);
- struct per_cpu_pages *pcp;
int high;
int pindex;
bool free_high;
__count_vm_event(PGFREE);
- pcp = this_cpu_ptr(zone->per_cpu_pageset);
pindex = order_to_pindex(migratetype, order);
- list_add(&page->lru, &pcp->lists[pindex]);
+ list_add(&page->pcp_list, &pcp->lists[pindex]);
pcp->count += 1 << order;
/*
@@ -3433,6 +3467,9 @@ static void free_unref_page_commit(struct page *page, int migratetype,
void free_unref_page(struct page *page, unsigned int order)
{
unsigned long flags;
+ unsigned long __maybe_unused UP_flags;
+ struct per_cpu_pages *pcp;
+ struct zone *zone;
unsigned long pfn = page_to_pfn(page);
int migratetype;
@@ -3455,9 +3492,16 @@ void free_unref_page(struct page *page, unsigned int order)
migratetype = MIGRATE_MOVABLE;
}
- local_lock_irqsave(&pagesets.lock, flags);
- free_unref_page_commit(page, migratetype, order);
- local_unlock_irqrestore(&pagesets.lock, flags);
+ zone = page_zone(page);
+ pcp_trylock_prepare(UP_flags);
+ pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
+ if (pcp) {
+ free_unref_page_commit(zone, pcp, page, migratetype, order);
+ pcp_spin_unlock_irqrestore(pcp, flags);
+ } else {
+ free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
+ }
+ pcp_trylock_finish(UP_flags);
}
/*
@@ -3466,6 +3510,8 @@ void free_unref_page(struct page *page, unsigned int order)
void free_unref_page_list(struct list_head *list)
{
struct page *page, *next;
+ struct per_cpu_pages *pcp = NULL;
+ struct zone *locked_zone = NULL;
unsigned long flags;
int batch_count = 0;
int migratetype;
@@ -3490,8 +3536,18 @@ void free_unref_page_list(struct list_head *list)
}
}
- local_lock_irqsave(&pagesets.lock, flags);
list_for_each_entry_safe(page, next, list, lru) {
+ struct zone *zone = page_zone(page);
+
+ /* Different zone, different pcp lock. */
+ if (zone != locked_zone) {
+ if (pcp)
+ pcp_spin_unlock_irqrestore(pcp, flags);
+
+ locked_zone = zone;
+ pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
+ }
+
/*
* Non-isolated types over MIGRATE_PCPTYPES get added
* to the MIGRATE_MOVABLE pcp list.
@@ -3501,19 +3557,21 @@ void free_unref_page_list(struct list_head *list)
migratetype = MIGRATE_MOVABLE;
trace_mm_page_free_batched(page);
- free_unref_page_commit(page, migratetype, 0);
+ free_unref_page_commit(zone, pcp, page, migratetype, 0);
/*
* Guard against excessive IRQ disabled times when we get
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
- local_unlock_irqrestore(&pagesets.lock, flags);
+ pcp_spin_unlock_irqrestore(pcp, flags);
batch_count = 0;
- local_lock_irqsave(&pagesets.lock, flags);
+ pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags);
}
}
- local_unlock_irqrestore(&pagesets.lock, flags);
+
+ if (pcp)
+ pcp_spin_unlock_irqrestore(pcp, flags);
}
/*
@@ -3638,6 +3696,43 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
#endif
}
+static __always_inline
+struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
+ unsigned int order, unsigned int alloc_flags,
+ int migratetype)
+{
+ struct page *page;
+ unsigned long flags;
+
+ do {
+ page = NULL;
+ spin_lock_irqsave(&zone->lock, flags);
+ /*
+ * order-0 request can reach here when the pcplist is skipped
+ * due to non-CMA allocation context. HIGHATOMIC area is
+ * reserved for high-order atomic allocation, so order-0
+ * request should skip it.
+ */
+ if (order > 0 && alloc_flags & ALLOC_HARDER)
+ page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
+ if (!page) {
+ page = __rmqueue(zone, order, migratetype, alloc_flags);
+ if (!page) {
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return NULL;
+ }
+ }
+ __mod_zone_freepage_state(zone, -(1 << order),
+ get_pcppage_migratetype(page));
+ spin_unlock_irqrestore(&zone->lock, flags);
+ } while (check_new_pages(page, order));
+
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
+ zone_statistics(preferred_zone, zone, 1);
+
+ return page;
+}
+
/* Remove page from the per-cpu list, caller must protect the list */
static inline
struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
@@ -3671,8 +3766,8 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
return NULL;
}
- page = list_first_entry(list, struct page, lru);
- list_del(&page->lru);
+ page = list_first_entry(list, struct page, pcp_list);
+ list_del(&page->pcp_list);
pcp->count -= 1 << order;
} while (check_new_pcp(page, order));
@@ -3689,19 +3784,29 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
struct list_head *list;
struct page *page;
unsigned long flags;
+ unsigned long __maybe_unused UP_flags;
- local_lock_irqsave(&pagesets.lock, flags);
+ /*
+ * spin_trylock may fail due to a parallel drain. In the future, the
+ * trylock will also protect against IRQ reentrancy.
+ */
+ pcp_trylock_prepare(UP_flags);
+ pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
+ if (!pcp) {
+ pcp_trylock_finish(UP_flags);
+ return NULL;
+ }
/*
* On allocation, reduce the number of pages that are batch freed.
* See nr_pcp_free() where free_factor is increased for subsequent
* frees.
*/
- pcp = this_cpu_ptr(zone->per_cpu_pageset);
pcp->free_factor >>= 1;
list = &pcp->lists[order_to_pindex(migratetype, order)];
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
- local_unlock_irqrestore(&pagesets.lock, flags);
+ pcp_spin_unlock_irqrestore(pcp, flags);
+ pcp_trylock_finish(UP_flags);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
zone_statistics(preferred_zone, zone, 1);
@@ -3718,9 +3823,14 @@ struct page *rmqueue(struct zone *preferred_zone,
gfp_t gfp_flags, unsigned int alloc_flags,
int migratetype)
{
- unsigned long flags;
struct page *page;
+ /*
+ * We most definitely don't want callers attempting to
+ * allocate greater than order-1 page units with __GFP_NOFAIL.
+ */
+ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
+
if (likely(pcp_allowed_order(order))) {
/*
* MIGRATE_MOVABLE pcplist could have the pages on CMA area and
@@ -3730,53 +3840,23 @@ struct page *rmqueue(struct zone *preferred_zone,
migratetype != MIGRATE_MOVABLE) {
page = rmqueue_pcplist(preferred_zone, zone, order,
gfp_flags, migratetype, alloc_flags);
- goto out;
+ if (likely(page))
+ goto out;
}
}
- /*
- * We most definitely don't want callers attempting to
- * allocate greater than order-1 page units with __GFP_NOFAIL.
- */
- WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
-
- do {
- page = NULL;
- spin_lock_irqsave(&zone->lock, flags);
- /*
- * order-0 request can reach here when the pcplist is skipped
- * due to non-CMA allocation context. HIGHATOMIC area is
- * reserved for high-order atomic allocation, so order-0
- * request should skip it.
- */
- if (order > 0 && alloc_flags & ALLOC_HARDER)
- page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
- if (!page) {
- page = __rmqueue(zone, order, migratetype, alloc_flags);
- if (!page)
- goto failed;
- }
- __mod_zone_freepage_state(zone, -(1 << order),
- get_pcppage_migratetype(page));
- spin_unlock_irqrestore(&zone->lock, flags);
- } while (check_new_pages(page, order));
-
- __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
- zone_statistics(preferred_zone, zone, 1);
+ page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
+ migratetype);
out:
/* Separate test+clear to avoid unnecessary atomics */
- if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
+ if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
wakeup_kswapd(zone, 0, 0, zone_idx(zone));
}
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
return page;
-
-failed:
- spin_unlock_irqrestore(&zone->lock, flags);
- return NULL;
}
#ifdef CONFIG_FAIL_PAGE_ALLOC
@@ -4095,7 +4175,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
retry:
/*
* Scan zonelist, looking for a zone with enough free.
- * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
+ * See also __cpuset_node_allowed() comment in kernel/cgroup/cpuset.c.
*/
no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
z = ac->preferred_zoneref;
@@ -5202,10 +5282,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
*alloc_flags |= ALLOC_CPUSET;
}
- fs_reclaim_acquire(gfp_mask);
- fs_reclaim_release(gfp_mask);
-
- might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
+ might_alloc(gfp_mask);
if (should_fail_alloc_page(gfp_mask, order))
return false;
@@ -5253,6 +5330,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
{
struct page *page;
unsigned long flags;
+ unsigned long __maybe_unused UP_flags;
struct zone *zone;
struct zoneref *z;
struct per_cpu_pages *pcp;
@@ -5333,11 +5411,14 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
if (unlikely(!zone))
goto failed;
+ /* Is a parallel drain in progress? */
+ pcp_trylock_prepare(UP_flags);
+ pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
+ if (!pcp)
+ goto failed_irq;
+
/* Attempt the batch allocation */
- local_lock_irqsave(&pagesets.lock, flags);
- pcp = this_cpu_ptr(zone->per_cpu_pageset);
pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
-
while (nr_populated < nr_pages) {
/* Skip existing pages */
@@ -5350,8 +5431,10 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
pcp, pcp_list);
if (unlikely(!page)) {
/* Try and allocate at least one page */
- if (!nr_account)
+ if (!nr_account) {
+ pcp_spin_unlock_irqrestore(pcp, flags);
goto failed_irq;
+ }
break;
}
nr_account++;
@@ -5364,7 +5447,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nr_populated++;
}
- local_unlock_irqrestore(&pagesets.lock, flags);
+ pcp_spin_unlock_irqrestore(pcp, flags);
+ pcp_trylock_finish(UP_flags);
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
@@ -5373,7 +5457,7 @@ out:
return nr_populated;
failed_irq:
- local_unlock_irqrestore(&pagesets.lock, flags);
+ pcp_trylock_finish(UP_flags);
failed:
page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
@@ -5804,14 +5888,14 @@ long si_mem_available(void)
/*
* Estimate the amount of memory available for userspace allocations,
- * without causing swapping.
+ * without causing swapping or OOM.
*/
available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
/*
* Not all the page cache can be freed, otherwise the system will
- * start swapping. Assume at least half of the page cache, or the
- * low watermark worth of cache, needs to stay.
+ * start swapping or thrashing. Assume at least half of the page
+ * cache, or the low watermark worth of cache, needs to stay.
*/
pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
pagecache -= min(pagecache / 2, wmark_low);
@@ -5939,7 +6023,7 @@ static void show_migration_types(unsigned char type)
void show_free_areas(unsigned int filter, nodemask_t *nodemask)
{
unsigned long free_pcp = 0;
- int cpu;
+ int cpu, nid;
struct zone *zone;
pg_data_t *pgdat;
@@ -6127,7 +6211,11 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
printk(KERN_CONT "= %lukB\n", K(total));
}
- hugetlb_show_meminfo();
+ for_each_online_node(nid) {
+ if (show_mem_node_skip(filter, nid, nodemask))
+ continue;
+ hugetlb_show_meminfo_node(nid);
+ }
printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
@@ -7013,6 +7101,7 @@ static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonesta
memset(pcp, 0, sizeof(*pcp));
memset(pzstats, 0, sizeof(*pzstats));
+ spin_lock_init(&pcp->lock);
for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
INIT_LIST_HEAD(&pcp->lists[pindex]);
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index c10f839fc410..8e9e574d535a 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -174,8 +174,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (!pvmw->pte)
return false;
- pvmw->ptl = huge_pte_lockptr(hstate, mm, pvmw->pte);
- spin_lock(pvmw->ptl);
+ pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
if (!check_pte(pvmw))
return not_found(pvmw);
return true;
@@ -243,7 +242,7 @@ restart:
* cleared *pmd but not decremented compound_mapcount().
*/
if ((pvmw->flags & PVMW_SYNC) &&
- transparent_hugepage_active(vma) &&
+ transhuge_vma_suitable(vma, pvmw->address) &&
(pvmw->nr_pages >= HPAGE_PMD_NR)) {
spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
diff --git a/mm/percpu.c b/mm/percpu.c
index 3633eeefaa0d..27697b2429c2 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -3104,7 +3104,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
goto out_free_areas;
}
/* kmemleak tracks the percpu allocations separately */
- kmemleak_free(ptr);
+ kmemleak_ignore_phys(__pa(ptr));
areas[group] = ptr;
base = min(ptr, base);
@@ -3304,7 +3304,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t
goto enomem;
}
/* kmemleak tracks the percpu allocations separately */
- kmemleak_free(ptr);
+ kmemleak_ignore_phys(__pa(ptr));
pages[j++] = virt_to_page(ptr);
}
}
@@ -3417,7 +3417,7 @@ void __init setup_per_cpu_areas(void)
if (!ai || !fc)
panic("Failed to allocate memory for percpu areas.");
/* kmemleak tracks the percpu allocations separately */
- kmemleak_free(fc);
+ kmemleak_ignore_phys(__pa(fc));
ai->dyn_size = unit_size;
ai->unit_size = unit_size;
diff --git a/mm/rmap.c b/mm/rmap.c
index 746c05acad27..edc06c52bc82 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -999,7 +999,7 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
* downgrading page table protection not changing it to point
* to a new page.
*
- * See Documentation/vm/mmu_notifier.rst
+ * See Documentation/mm/mmu_notifier.rst
*/
if (ret)
cleaned++;
@@ -1537,6 +1537,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
PageAnonExclusive(subpage);
if (folio_test_hugetlb(folio)) {
+ bool anon = folio_test_anon(folio);
+
/*
* The try_to_unmap() is only passed a hugetlb page
* in the case where the hugetlb page is poisoned.
@@ -1551,31 +1553,28 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/
flush_cache_range(vma, range.start, range.end);
- if (!folio_test_anon(folio)) {
+ /*
+ * To call huge_pmd_unshare, i_mmap_rwsem must be
+ * held in write mode. Caller needs to explicitly
+ * do this outside rmap routines.
+ */
+ VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
+ if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
+ flush_tlb_range(vma, range.start, range.end);
+ mmu_notifier_invalidate_range(mm, range.start,
+ range.end);
+
/*
- * To call huge_pmd_unshare, i_mmap_rwsem must be
- * held in write mode. Caller needs to explicitly
- * do this outside rmap routines.
+ * The ref count of the PMD page was dropped
+ * which is part of the way map counting
+ * is done for shared PMDs. Return 'true'
+ * here. When there is no other sharing,
+ * huge_pmd_unshare returns false and we will
+ * unmap the actual page and drop map count
+ * to zero.
*/
- VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
-
- if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
- flush_tlb_range(vma, range.start, range.end);
- mmu_notifier_invalidate_range(mm, range.start,
- range.end);
-
- /*
- * The ref count of the PMD page was dropped
- * which is part of the way map counting
- * is done for shared PMDs. Return 'true'
- * here. When there is no other sharing,
- * huge_pmd_unshare returns false and we will
- * unmap the actual page and drop map count
- * to zero.
- */
- page_vma_mapped_walk_done(&pvmw);
- break;
- }
+ page_vma_mapped_walk_done(&pvmw);
+ break;
}
pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
} else {
@@ -1619,9 +1618,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(folio_nr_pages(folio), mm);
- set_huge_swap_pte_at(mm, address,
- pvmw.pte, pteval,
- vma_mmu_pagesize(vma));
+ set_huge_pte_at(mm, address, pvmw.pte, pteval);
} else {
dec_mm_counter(mm, mm_counter(&folio->page));
set_pte_at(mm, address, pvmw.pte, pteval);
@@ -1765,7 +1762,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* to point at a new folio while a device is
* still using this folio.
*
- * See Documentation/vm/mmu_notifier.rst
+ * See Documentation/mm/mmu_notifier.rst
*/
dec_mm_counter(mm, mm_counter_file(&folio->page));
}
@@ -1775,7 +1772,7 @@ discard:
* done above for all cases requiring it to happen under page
* table lock before mmu_notifier_invalidate_range_end()
*
- * See Documentation/vm/mmu_notifier.rst
+ * See Documentation/mm/mmu_notifier.rst
*/
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED)
@@ -1921,6 +1918,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
PageAnonExclusive(subpage);
if (folio_test_hugetlb(folio)) {
+ bool anon = folio_test_anon(folio);
+
/*
* huge_pmd_unshare may unmap an entire PMD page.
* There is no way of knowing exactly which PMDs may
@@ -1930,31 +1929,28 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/
flush_cache_range(vma, range.start, range.end);
- if (!folio_test_anon(folio)) {
+ /*
+ * To call huge_pmd_unshare, i_mmap_rwsem must be
+ * held in write mode. Caller needs to explicitly
+ * do this outside rmap routines.
+ */
+ VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
+ if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
+ flush_tlb_range(vma, range.start, range.end);
+ mmu_notifier_invalidate_range(mm, range.start,
+ range.end);
+
/*
- * To call huge_pmd_unshare, i_mmap_rwsem must be
- * held in write mode. Caller needs to explicitly
- * do this outside rmap routines.
+ * The ref count of the PMD page was dropped
+ * which is part of the way map counting
+ * is done for shared PMDs. Return 'true'
+ * here. When there is no other sharing,
+ * huge_pmd_unshare returns false and we will
+ * unmap the actual page and drop map count
+ * to zero.
*/
- VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
-
- if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
- flush_tlb_range(vma, range.start, range.end);
- mmu_notifier_invalidate_range(mm, range.start,
- range.end);
-
- /*
- * The ref count of the PMD page was dropped
- * which is part of the way map counting
- * is done for shared PMDs. Return 'true'
- * here. When there is no other sharing,
- * huge_pmd_unshare returns false and we will
- * unmap the actual page and drop map count
- * to zero.
- */
- page_vma_mapped_walk_done(&pvmw);
- break;
- }
+ page_vma_mapped_walk_done(&pvmw);
+ break;
}
/* Nuke the hugetlb page table entry */
@@ -1972,7 +1968,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
- if (folio_is_zone_device(folio)) {
+ if (folio_is_device_private(folio)) {
unsigned long pfn = folio_pfn(folio);
swp_entry_t entry;
pte_t swp_pte;
@@ -2013,9 +2009,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(folio_nr_pages(folio), mm);
- set_huge_swap_pte_at(mm, address,
- pvmw.pte, pteval,
- vma_mmu_pagesize(vma));
+ set_huge_pte_at(mm, address, pvmw.pte, pteval);
} else {
dec_mm_counter(mm, mm_counter(&folio->page));
set_pte_at(mm, address, pvmw.pte, pteval);
@@ -2083,8 +2077,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (pte_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
if (folio_test_hugetlb(folio))
- set_huge_swap_pte_at(mm, address, pvmw.pte,
- swp_pte, vma_mmu_pagesize(vma));
+ set_huge_pte_at(mm, address, pvmw.pte, swp_pte);
else
set_pte_at(mm, address, pvmw.pte, swp_pte);
trace_set_migration_pte(address, pte_val(swp_pte),
@@ -2100,7 +2093,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* done above for all cases requiring it to happen under page
* table lock before mmu_notifier_invalidate_range_end()
*
- * See Documentation/vm/mmu_notifier.rst
+ * See Documentation/mm/mmu_notifier.rst
*/
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED)
@@ -2138,7 +2131,8 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
TTU_SYNC)))
return;
- if (folio_is_zone_device(folio) && !folio_is_device_private(folio))
+ if (folio_is_zone_device(folio) &&
+ (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
return;
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index e5e43b990fdc..42e5888bf84d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -28,6 +28,7 @@
#include <linux/ramfs.h>
#include <linux/pagemap.h>
#include <linux/file.h>
+#include <linux/fileattr.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched/signal.h>
@@ -1057,6 +1058,15 @@ static int shmem_getattr(struct user_namespace *mnt_userns,
shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock);
}
+ if (info->fsflags & FS_APPEND_FL)
+ stat->attributes |= STATX_ATTR_APPEND;
+ if (info->fsflags & FS_IMMUTABLE_FL)
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+ if (info->fsflags & FS_NODUMP_FL)
+ stat->attributes |= STATX_ATTR_NODUMP;
+ stat->attributes_mask |= (STATX_ATTR_APPEND |
+ STATX_ATTR_IMMUTABLE |
+ STATX_ATTR_NODUMP);
generic_fillattr(&init_user_ns, inode, stat);
if (shmem_is_huge(NULL, inode, 0))
@@ -1649,7 +1659,9 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
new = page_folio(newpage);
mem_cgroup_migrate(old, new);
__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
+ __inc_lruvec_page_state(newpage, NR_SHMEM);
__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
+ __dec_lruvec_page_state(oldpage, NR_SHMEM);
}
xa_unlock_irq(&swap_mapping->i_pages);
@@ -1690,7 +1702,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
return;
folio_wait_writeback(folio);
- delete_from_swap_cache(&folio->page);
+ delete_from_swap_cache(folio);
spin_lock_irq(&info->lock);
/*
* Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
@@ -1705,10 +1717,10 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
}
/*
- * Swap in the page pointed to by *pagep.
- * Caller has to make sure that *pagep contains a valid swapped page.
- * Returns 0 and the page in pagep if success. On failure, returns the
- * error code and NULL in *pagep.
+ * Swap in the folio pointed to by *foliop.
+ * Caller has to make sure that *foliop contains a valid swapped folio.
+ * Returns 0 and the folio in foliop if success. On failure, returns the
+ * error code and NULL in *foliop.
*/
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct folio **foliop, enum sgp_type sgp,
@@ -1748,7 +1760,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
}
folio = page_folio(page);
- /* We have to do this with page locked to prevent races */
+ /* We have to do this with folio locked to prevent races */
folio_lock(folio);
if (!folio_test_swapcache(folio) ||
folio_swap_entry(folio).val != swap.val ||
@@ -1770,6 +1782,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
if (shmem_should_replace_folio(folio, gfp)) {
error = shmem_replace_page(&page, gfp, info, index);
+ folio = page_folio(page);
if (error)
goto failed;
}
@@ -1788,7 +1801,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
if (sgp == SGP_WRITE)
folio_mark_accessed(folio);
- delete_from_swap_cache(&folio->page);
+ delete_from_swap_cache(folio);
folio_mark_dirty(folio);
swap_free(swap);
@@ -2271,7 +2284,36 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
+#ifdef CONFIG_TMPFS_XATTR
+static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
+
+/*
+ * chattr's fsflags are unrelated to extended attributes,
+ * but tmpfs has chosen to enable them under the same config option.
+ */
+static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
+{
+ unsigned int i_flags = 0;
+
+ if (fsflags & FS_NOATIME_FL)
+ i_flags |= S_NOATIME;
+ if (fsflags & FS_APPEND_FL)
+ i_flags |= S_APPEND;
+ if (fsflags & FS_IMMUTABLE_FL)
+ i_flags |= S_IMMUTABLE;
+ /*
+ * But FS_NODUMP_FL does not require any action in i_flags.
+ */
+ inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
+}
+#else
+static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
+{
+}
+#define shmem_initxattrs NULL
+#endif
+
+static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
umode_t mode, dev_t dev, unsigned long flags)
{
struct inode *inode;
@@ -2296,6 +2338,10 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
info->seals = F_SEAL_SEAL;
info->flags = flags & VM_NORESERVE;
info->i_crtime = inode->i_mtime;
+ info->fsflags = (dir == NULL) ? 0 :
+ SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
+ if (info->fsflags)
+ shmem_set_inode_flags(inode, info->fsflags);
INIT_LIST_HEAD(&info->shrinklist);
INIT_LIST_HEAD(&info->swaplist);
simple_xattrs_init(&info->xattrs);
@@ -2444,12 +2490,6 @@ out_unacct_blocks:
static const struct inode_operations shmem_symlink_inode_operations;
static const struct inode_operations shmem_short_symlink_operations;
-#ifdef CONFIG_TMPFS_XATTR
-static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
-#else
-#define shmem_initxattrs NULL
-#endif
-
static int
shmem_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
@@ -2602,7 +2642,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
ret = copy_page_to_iter(page, offset, nr, to);
put_page(page);
- } else if (iter_is_iovec(to)) {
+ } else if (user_backed_iter(to)) {
/*
* Copy to user tends to be so well optimized, but
* clear_user() not so much, that it is noticeably
@@ -2802,12 +2842,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
i_size_write(inode, offset + len);
- inode->i_ctime = current_time(inode);
undone:
spin_lock(&inode->i_lock);
inode->i_private = NULL;
spin_unlock(&inode->i_lock);
out:
+ if (!error)
+ file_modified(file);
inode_unlock(inode);
return error;
}
@@ -3137,6 +3178,35 @@ static const char *shmem_get_link(struct dentry *dentry,
}
#ifdef CONFIG_TMPFS_XATTR
+
+static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
+{
+ struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
+
+ fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
+
+ return 0;
+}
+
+static int shmem_fileattr_set(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct fileattr *fa)
+{
+ struct inode *inode = d_inode(dentry);
+ struct shmem_inode_info *info = SHMEM_I(inode);
+
+ if (fileattr_has_fsx(fa))
+ return -EOPNOTSUPP;
+ if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
+ return -EOPNOTSUPP;
+
+ info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
+ (fa->flags & SHMEM_FL_USER_MODIFIABLE);
+
+ shmem_set_inode_flags(inode, info->fsflags);
+ inode->i_ctime = current_time(inode);
+ return 0;
+}
+
/*
* Superblocks without xattr inode operations may get some security.* xattr
* support from the LSM "for free". As soon as we have any other xattrs
@@ -3824,6 +3894,8 @@ static const struct inode_operations shmem_inode_operations = {
#ifdef CONFIG_TMPFS_XATTR
.listxattr = shmem_listxattr,
.set_acl = simple_set_acl,
+ .fileattr_get = shmem_fileattr_get,
+ .fileattr_set = shmem_fileattr_set,
#endif
};
@@ -3843,6 +3915,8 @@ static const struct inode_operations shmem_dir_inode_operations = {
#endif
#ifdef CONFIG_TMPFS_XATTR
.listxattr = shmem_listxattr,
+ .fileattr_get = shmem_fileattr_get,
+ .fileattr_set = shmem_fileattr_set,
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
.setattr = shmem_setattr,
diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c
new file mode 100644
index 000000000000..b05295bab322
--- /dev/null
+++ b/mm/shrinker_debug.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/shrinker.h>
+#include <linux/memcontrol.h>
+
+/* defined in vmscan.c */
+extern struct rw_semaphore shrinker_rwsem;
+extern struct list_head shrinker_list;
+
+static DEFINE_IDA(shrinker_debugfs_ida);
+static struct dentry *shrinker_debugfs_root;
+
+static unsigned long shrinker_count_objects(struct shrinker *shrinker,
+ struct mem_cgroup *memcg,
+ unsigned long *count_per_node)
+{
+ unsigned long nr, total = 0;
+ int nid;
+
+ for_each_node(nid) {
+ if (nid == 0 || (shrinker->flags & SHRINKER_NUMA_AWARE)) {
+ struct shrink_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .nid = nid,
+ .memcg = memcg,
+ };
+
+ nr = shrinker->count_objects(shrinker, &sc);
+ if (nr == SHRINK_EMPTY)
+ nr = 0;
+ } else {
+ nr = 0;
+ }
+
+ count_per_node[nid] = nr;
+ total += nr;
+ }
+
+ return total;
+}
+
+static int shrinker_debugfs_count_show(struct seq_file *m, void *v)
+{
+ struct shrinker *shrinker = m->private;
+ unsigned long *count_per_node;
+ struct mem_cgroup *memcg;
+ unsigned long total;
+ bool memcg_aware;
+ int ret, nid;
+
+ count_per_node = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
+ if (!count_per_node)
+ return -ENOMEM;
+
+ ret = down_read_killable(&shrinker_rwsem);
+ if (ret) {
+ kfree(count_per_node);
+ return ret;
+ }
+ rcu_read_lock();
+
+ memcg_aware = shrinker->flags & SHRINKER_MEMCG_AWARE;
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ if (memcg && !mem_cgroup_online(memcg))
+ continue;
+
+ total = shrinker_count_objects(shrinker,
+ memcg_aware ? memcg : NULL,
+ count_per_node);
+ if (total) {
+ seq_printf(m, "%lu", mem_cgroup_ino(memcg));
+ for_each_node(nid)
+ seq_printf(m, " %lu", count_per_node[nid]);
+ seq_putc(m, '\n');
+ }
+
+ if (!memcg_aware) {
+ mem_cgroup_iter_break(NULL, memcg);
+ break;
+ }
+
+ if (signal_pending(current)) {
+ mem_cgroup_iter_break(NULL, memcg);
+ ret = -EINTR;
+ break;
+ }
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
+
+ rcu_read_unlock();
+ up_read(&shrinker_rwsem);
+
+ kfree(count_per_node);
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(shrinker_debugfs_count);
+
+static int shrinker_debugfs_scan_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t shrinker_debugfs_scan_write(struct file *file,
+ const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct shrinker *shrinker = file->private_data;
+ unsigned long nr_to_scan = 0, ino, read_len;
+ struct shrink_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ };
+ struct mem_cgroup *memcg = NULL;
+ int nid;
+ char kbuf[72];
+ ssize_t ret;
+
+ read_len = size < (sizeof(kbuf) - 1) ? size : (sizeof(kbuf) - 1);
+ if (copy_from_user(kbuf, buf, read_len))
+ return -EFAULT;
+ kbuf[read_len] = '\0';
+
+ if (sscanf(kbuf, "%lu %d %lu", &ino, &nid, &nr_to_scan) != 3)
+ return -EINVAL;
+
+ if (nid < 0 || nid >= nr_node_ids)
+ return -EINVAL;
+
+ if (nr_to_scan == 0)
+ return size;
+
+ if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
+ memcg = mem_cgroup_get_from_ino(ino);
+ if (!memcg || IS_ERR(memcg))
+ return -ENOENT;
+
+ if (!mem_cgroup_online(memcg)) {
+ mem_cgroup_put(memcg);
+ return -ENOENT;
+ }
+ } else if (ino != 0) {
+ return -EINVAL;
+ }
+
+ ret = down_read_killable(&shrinker_rwsem);
+ if (ret) {
+ mem_cgroup_put(memcg);
+ return ret;
+ }
+
+ sc.nid = nid;
+ sc.memcg = memcg;
+ sc.nr_to_scan = nr_to_scan;
+ sc.nr_scanned = nr_to_scan;
+
+ shrinker->scan_objects(shrinker, &sc);
+
+ up_read(&shrinker_rwsem);
+ mem_cgroup_put(memcg);
+
+ return size;
+}
+
+static const struct file_operations shrinker_debugfs_scan_fops = {
+ .owner = THIS_MODULE,
+ .open = shrinker_debugfs_scan_open,
+ .write = shrinker_debugfs_scan_write,
+};
+
+int shrinker_debugfs_add(struct shrinker *shrinker)
+{
+ struct dentry *entry;
+ char buf[128];
+ int id;
+
+ lockdep_assert_held(&shrinker_rwsem);
+
+ /* debugfs isn't initialized yet, add debugfs entries later. */
+ if (!shrinker_debugfs_root)
+ return 0;
+
+ id = ida_alloc(&shrinker_debugfs_ida, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ shrinker->debugfs_id = id;
+
+ snprintf(buf, sizeof(buf), "%s-%d", shrinker->name, id);
+
+ /* create debugfs entry */
+ entry = debugfs_create_dir(buf, shrinker_debugfs_root);
+ if (IS_ERR(entry)) {
+ ida_free(&shrinker_debugfs_ida, id);
+ return PTR_ERR(entry);
+ }
+ shrinker->debugfs_entry = entry;
+
+ debugfs_create_file("count", 0220, entry, shrinker,
+ &shrinker_debugfs_count_fops);
+ debugfs_create_file("scan", 0440, entry, shrinker,
+ &shrinker_debugfs_scan_fops);
+ return 0;
+}
+
+int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
+{
+ struct dentry *entry;
+ char buf[128];
+ const char *new, *old;
+ va_list ap;
+ int ret = 0;
+
+ va_start(ap, fmt);
+ new = kvasprintf_const(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ if (!new)
+ return -ENOMEM;
+
+ down_write(&shrinker_rwsem);
+
+ old = shrinker->name;
+ shrinker->name = new;
+
+ if (shrinker->debugfs_entry) {
+ snprintf(buf, sizeof(buf), "%s-%d", shrinker->name,
+ shrinker->debugfs_id);
+
+ entry = debugfs_rename(shrinker_debugfs_root,
+ shrinker->debugfs_entry,
+ shrinker_debugfs_root, buf);
+ if (IS_ERR(entry))
+ ret = PTR_ERR(entry);
+ else
+ shrinker->debugfs_entry = entry;
+ }
+
+ up_write(&shrinker_rwsem);
+
+ kfree_const(old);
+
+ return ret;
+}
+EXPORT_SYMBOL(shrinker_debugfs_rename);
+
+void shrinker_debugfs_remove(struct shrinker *shrinker)
+{
+ lockdep_assert_held(&shrinker_rwsem);
+
+ kfree_const(shrinker->name);
+ shrinker->name = NULL;
+
+ if (!shrinker->debugfs_entry)
+ return;
+
+ debugfs_remove_recursive(shrinker->debugfs_entry);
+ ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
+}
+
+static int __init shrinker_debugfs_init(void)
+{
+ struct shrinker *shrinker;
+ struct dentry *dentry;
+ int ret = 0;
+
+ dentry = debugfs_create_dir("shrinker", NULL);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ shrinker_debugfs_root = dentry;
+
+ /* Create debugfs entries for shrinkers registered at boot */
+ down_write(&shrinker_rwsem);
+ list_for_each_entry(shrinker, &shrinker_list, list)
+ if (!shrinker->debugfs_entry) {
+ ret = shrinker_debugfs_add(shrinker);
+ if (ret)
+ break;
+ }
+ up_write(&shrinker_rwsem);
+
+ return ret;
+}
+late_initcall(shrinker_debugfs_init);
diff --git a/mm/slab.c b/mm/slab.c
index 5e73e2d80222..10e96137b44f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2958,12 +2958,6 @@ direct_grow:
return ac->entry[--ac->avail];
}
-static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
- gfp_t flags)
-{
- might_sleep_if(gfpflags_allow_blocking(flags));
-}
-
#if DEBUG
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, unsigned long caller)
@@ -3205,7 +3199,6 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
if (unlikely(ptr))
goto out_hooks;
- cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
if (nodeid == NUMA_NO_NODE)
@@ -3290,7 +3283,6 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
if (unlikely(objp))
goto out;
- cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
objp = __do_cache_alloc(cachep, flags);
local_irq_restore(save_flags);
@@ -3527,8 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
if (!s)
return 0;
- cache_alloc_debugcheck_before(s, flags);
-
local_irq_disable();
for (i = 0; i < size; i++) {
void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 3f7e4bd34a5b..46ae542118c0 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -27,408 +27,9 @@
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
-#include <linux/pgtable.h>
-#include <linux/bootmem_info.h>
#include <asm/dma.h>
#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
-/**
- * struct vmemmap_remap_walk - walk vmemmap page table
- *
- * @remap_pte: called for each lowest-level entry (PTE).
- * @nr_walked: the number of walked pte.
- * @reuse_page: the page which is reused for the tail vmemmap pages.
- * @reuse_addr: the virtual address of the @reuse_page page.
- * @vmemmap_pages: the list head of the vmemmap pages that can be freed
- * or is mapped from.
- */
-struct vmemmap_remap_walk {
- void (*remap_pte)(pte_t *pte, unsigned long addr,
- struct vmemmap_remap_walk *walk);
- unsigned long nr_walked;
- struct page *reuse_page;
- unsigned long reuse_addr;
- struct list_head *vmemmap_pages;
-};
-
-static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
-{
- pmd_t __pmd;
- int i;
- unsigned long addr = start;
- struct page *page = pmd_page(*pmd);
- pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
-
- if (!pgtable)
- return -ENOMEM;
-
- pmd_populate_kernel(&init_mm, &__pmd, pgtable);
-
- for (i = 0; i < PMD_SIZE / PAGE_SIZE; i++, addr += PAGE_SIZE) {
- pte_t entry, *pte;
- pgprot_t pgprot = PAGE_KERNEL;
-
- entry = mk_pte(page + i, pgprot);
- pte = pte_offset_kernel(&__pmd, addr);
- set_pte_at(&init_mm, addr, pte, entry);
- }
-
- spin_lock(&init_mm.page_table_lock);
- if (likely(pmd_leaf(*pmd))) {
- /*
- * Higher order allocations from buddy allocator must be able to
- * be treated as indepdenent small pages (as they can be freed
- * individually).
- */
- if (!PageReserved(page))
- split_page(page, get_order(PMD_SIZE));
-
- /* Make pte visible before pmd. See comment in pmd_install(). */
- smp_wmb();
- pmd_populate_kernel(&init_mm, pmd, pgtable);
- flush_tlb_kernel_range(start, start + PMD_SIZE);
- } else {
- pte_free_kernel(&init_mm, pgtable);
- }
- spin_unlock(&init_mm.page_table_lock);
-
- return 0;
-}
-
-static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
-{
- int leaf;
-
- spin_lock(&init_mm.page_table_lock);
- leaf = pmd_leaf(*pmd);
- spin_unlock(&init_mm.page_table_lock);
-
- if (!leaf)
- return 0;
-
- return __split_vmemmap_huge_pmd(pmd, start);
-}
-
-static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
-{
- pte_t *pte = pte_offset_kernel(pmd, addr);
-
- /*
- * The reuse_page is found 'first' in table walk before we start
- * remapping (which is calling @walk->remap_pte).
- */
- if (!walk->reuse_page) {
- walk->reuse_page = pte_page(*pte);
- /*
- * Because the reuse address is part of the range that we are
- * walking, skip the reuse address range.
- */
- addr += PAGE_SIZE;
- pte++;
- walk->nr_walked++;
- }
-
- for (; addr != end; addr += PAGE_SIZE, pte++) {
- walk->remap_pte(pte, addr, walk);
- walk->nr_walked++;
- }
-}
-
-static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_offset(pud, addr);
- do {
- int ret;
-
- ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK);
- if (ret)
- return ret;
-
- next = pmd_addr_end(addr, end);
- vmemmap_pte_range(pmd, addr, next, walk);
- } while (pmd++, addr = next, addr != end);
-
- return 0;
-}
-
-static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
-{
- pud_t *pud;
- unsigned long next;
-
- pud = pud_offset(p4d, addr);
- do {
- int ret;
-
- next = pud_addr_end(addr, end);
- ret = vmemmap_pmd_range(pud, addr, next, walk);
- if (ret)
- return ret;
- } while (pud++, addr = next, addr != end);
-
- return 0;
-}
-
-static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
-{
- p4d_t *p4d;
- unsigned long next;
-
- p4d = p4d_offset(pgd, addr);
- do {
- int ret;
-
- next = p4d_addr_end(addr, end);
- ret = vmemmap_pud_range(p4d, addr, next, walk);
- if (ret)
- return ret;
- } while (p4d++, addr = next, addr != end);
-
- return 0;
-}
-
-static int vmemmap_remap_range(unsigned long start, unsigned long end,
- struct vmemmap_remap_walk *walk)
-{
- unsigned long addr = start;
- unsigned long next;
- pgd_t *pgd;
-
- VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
- VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
-
- pgd = pgd_offset_k(addr);
- do {
- int ret;
-
- next = pgd_addr_end(addr, end);
- ret = vmemmap_p4d_range(pgd, addr, next, walk);
- if (ret)
- return ret;
- } while (pgd++, addr = next, addr != end);
-
- /*
- * We only change the mapping of the vmemmap virtual address range
- * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
- * belongs to the range.
- */
- flush_tlb_kernel_range(start + PAGE_SIZE, end);
-
- return 0;
-}
-
-/*
- * Free a vmemmap page. A vmemmap page can be allocated from the memblock
- * allocator or buddy allocator. If the PG_reserved flag is set, it means
- * that it allocated from the memblock allocator, just free it via the
- * free_bootmem_page(). Otherwise, use __free_page().
- */
-static inline void free_vmemmap_page(struct page *page)
-{
- if (PageReserved(page))
- free_bootmem_page(page);
- else
- __free_page(page);
-}
-
-/* Free a list of the vmemmap pages */
-static void free_vmemmap_page_list(struct list_head *list)
-{
- struct page *page, *next;
-
- list_for_each_entry_safe(page, next, list, lru) {
- list_del(&page->lru);
- free_vmemmap_page(page);
- }
-}
-
-static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
- struct vmemmap_remap_walk *walk)
-{
- /*
- * Remap the tail pages as read-only to catch illegal write operation
- * to the tail pages.
- */
- pgprot_t pgprot = PAGE_KERNEL_RO;
- pte_t entry = mk_pte(walk->reuse_page, pgprot);
- struct page *page = pte_page(*pte);
-
- list_add_tail(&page->lru, walk->vmemmap_pages);
- set_pte_at(&init_mm, addr, pte, entry);
-}
-
-/*
- * How many struct page structs need to be reset. When we reuse the head
- * struct page, the special metadata (e.g. page->flags or page->mapping)
- * cannot copy to the tail struct page structs. The invalid value will be
- * checked in the free_tail_pages_check(). In order to avoid the message
- * of "corrupted mapping in tail page". We need to reset at least 3 (one
- * head struct page struct and two tail struct page structs) struct page
- * structs.
- */
-#define NR_RESET_STRUCT_PAGE 3
-
-static inline void reset_struct_pages(struct page *start)
-{
- int i;
- struct page *from = start + NR_RESET_STRUCT_PAGE;
-
- for (i = 0; i < NR_RESET_STRUCT_PAGE; i++)
- memcpy(start + i, from, sizeof(*from));
-}
-
-static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
- struct vmemmap_remap_walk *walk)
-{
- pgprot_t pgprot = PAGE_KERNEL;
- struct page *page;
- void *to;
-
- BUG_ON(pte_page(*pte) != walk->reuse_page);
-
- page = list_first_entry(walk->vmemmap_pages, struct page, lru);
- list_del(&page->lru);
- to = page_to_virt(page);
- copy_page(to, (void *)walk->reuse_addr);
- reset_struct_pages(to);
-
- set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
-}
-
-/**
- * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
- * to the page which @reuse is mapped to, then free vmemmap
- * which the range are mapped to.
- * @start: start address of the vmemmap virtual address range that we want
- * to remap.
- * @end: end address of the vmemmap virtual address range that we want to
- * remap.
- * @reuse: reuse address.
- *
- * Return: %0 on success, negative error code otherwise.
- */
-int vmemmap_remap_free(unsigned long start, unsigned long end,
- unsigned long reuse)
-{
- int ret;
- LIST_HEAD(vmemmap_pages);
- struct vmemmap_remap_walk walk = {
- .remap_pte = vmemmap_remap_pte,
- .reuse_addr = reuse,
- .vmemmap_pages = &vmemmap_pages,
- };
-
- /*
- * In order to make remapping routine most efficient for the huge pages,
- * the routine of vmemmap page table walking has the following rules
- * (see more details from the vmemmap_pte_range()):
- *
- * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
- * should be continuous.
- * - The @reuse address is part of the range [@reuse, @end) that we are
- * walking which is passed to vmemmap_remap_range().
- * - The @reuse address is the first in the complete range.
- *
- * So we need to make sure that @start and @reuse meet the above rules.
- */
- BUG_ON(start - reuse != PAGE_SIZE);
-
- mmap_read_lock(&init_mm);
- ret = vmemmap_remap_range(reuse, end, &walk);
- if (ret && walk.nr_walked) {
- end = reuse + walk.nr_walked * PAGE_SIZE;
- /*
- * vmemmap_pages contains pages from the previous
- * vmemmap_remap_range call which failed. These
- * are pages which were removed from the vmemmap.
- * They will be restored in the following call.
- */
- walk = (struct vmemmap_remap_walk) {
- .remap_pte = vmemmap_restore_pte,
- .reuse_addr = reuse,
- .vmemmap_pages = &vmemmap_pages,
- };
-
- vmemmap_remap_range(reuse, end, &walk);
- }
- mmap_read_unlock(&init_mm);
-
- free_vmemmap_page_list(&vmemmap_pages);
-
- return ret;
-}
-
-static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
- gfp_t gfp_mask, struct list_head *list)
-{
- unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
- int nid = page_to_nid((struct page *)start);
- struct page *page, *next;
-
- while (nr_pages--) {
- page = alloc_pages_node(nid, gfp_mask, 0);
- if (!page)
- goto out;
- list_add_tail(&page->lru, list);
- }
-
- return 0;
-out:
- list_for_each_entry_safe(page, next, list, lru)
- __free_pages(page, 0);
- return -ENOMEM;
-}
-
-/**
- * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
- * to the page which is from the @vmemmap_pages
- * respectively.
- * @start: start address of the vmemmap virtual address range that we want
- * to remap.
- * @end: end address of the vmemmap virtual address range that we want to
- * remap.
- * @reuse: reuse address.
- * @gfp_mask: GFP flag for allocating vmemmap pages.
- *
- * Return: %0 on success, negative error code otherwise.
- */
-int vmemmap_remap_alloc(unsigned long start, unsigned long end,
- unsigned long reuse, gfp_t gfp_mask)
-{
- LIST_HEAD(vmemmap_pages);
- struct vmemmap_remap_walk walk = {
- .remap_pte = vmemmap_restore_pte,
- .reuse_addr = reuse,
- .vmemmap_pages = &vmemmap_pages,
- };
-
- /* See the comment in the vmemmap_remap_free(). */
- BUG_ON(start - reuse != PAGE_SIZE);
-
- if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
- return -ENOMEM;
-
- mmap_read_lock(&init_mm);
- vmemmap_remap_range(reuse, end, &walk);
- mmap_read_unlock(&init_mm);
-
- return 0;
-}
-#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
/*
* Allocate a block of memory to be used to back the virtual memory map
@@ -556,7 +157,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
} else {
/*
* When a PTE/PMD entry is freed from the init_mm
- * there's a a free_pages() call to this page allocated
+ * there's a free_pages() call to this page allocated
* above. Thus this get_page() is paired with the
* put_page_testzero() on the freeing path.
* This can only called by certain ZONE_DEVICE path,
@@ -745,7 +346,7 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page));
for (addr = start; addr < end; addr += size) {
- unsigned long next = addr, last = addr + size;
+ unsigned long next, last = addr + size;
/* Populate the head page vmemmap page */
pte = vmemmap_populate_address(addr, node, NULL, NULL);
@@ -760,7 +361,7 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
/*
* Reuse the previous page for the rest of tail pages
- * See layout diagram in Documentation/vm/vmemmap_dedup.rst
+ * See layout diagram in Documentation/mm/vmemmap_dedup.rst
*/
next += PAGE_SIZE;
rc = vmemmap_populate_range(next, last, node, NULL,
diff --git a/mm/sparse.c b/mm/sparse.c
index cb3bfae64036..e5a8a3a0edd7 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -281,7 +281,7 @@ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long p
{
unsigned long coded_mem_map =
(unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
- BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
+ BUILD_BUG_ON(SECTION_MAP_LAST_BIT > PFN_SECTION_SHIFT);
BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
return coded_mem_map;
}
diff --git a/mm/swap.c b/mm/swap.c
index 275a4ea1bc66..9cee7f6a3809 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -46,30 +46,30 @@
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
-/* Protecting only lru_rotate.pvec which requires disabling interrupts */
+/* Protecting only lru_rotate.fbatch which requires disabling interrupts */
struct lru_rotate {
local_lock_t lock;
- struct pagevec pvec;
+ struct folio_batch fbatch;
};
static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
.lock = INIT_LOCAL_LOCK(lock),
};
/*
- * The following struct pagevec are grouped together because they are protected
+ * The following folio batches are grouped together because they are protected
* by disabling preemption (and interrupts remain enabled).
*/
-struct lru_pvecs {
+struct cpu_fbatches {
local_lock_t lock;
- struct pagevec lru_add;
- struct pagevec lru_deactivate_file;
- struct pagevec lru_deactivate;
- struct pagevec lru_lazyfree;
+ struct folio_batch lru_add;
+ struct folio_batch lru_deactivate_file;
+ struct folio_batch lru_deactivate;
+ struct folio_batch lru_lazyfree;
#ifdef CONFIG_SMP
- struct pagevec activate_page;
+ struct folio_batch activate;
#endif
};
-static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
+static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
.lock = INIT_LOCAL_LOCK(lock),
};
@@ -77,36 +77,35 @@ static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
* This path almost never happens for VM activity - pages are normally freed
* via pagevecs. But it gets used by networking - and for compound pages.
*/
-static void __page_cache_release(struct page *page)
+static void __page_cache_release(struct folio *folio)
{
- if (PageLRU(page)) {
- struct folio *folio = page_folio(page);
+ if (folio_test_lru(folio)) {
struct lruvec *lruvec;
unsigned long flags;
lruvec = folio_lruvec_lock_irqsave(folio, &flags);
- del_page_from_lru_list(page, lruvec);
- __clear_page_lru_flags(page);
+ lruvec_del_folio(lruvec, folio);
+ __folio_clear_lru_flags(folio);
unlock_page_lruvec_irqrestore(lruvec, flags);
}
- /* See comment on PageMlocked in release_pages() */
- if (unlikely(PageMlocked(page))) {
- int nr_pages = thp_nr_pages(page);
+ /* See comment on folio_test_mlocked in release_pages() */
+ if (unlikely(folio_test_mlocked(folio))) {
+ long nr_pages = folio_nr_pages(folio);
- __ClearPageMlocked(page);
- mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+ __folio_clear_mlocked(folio);
+ zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
}
}
-static void __put_single_page(struct page *page)
+static void __folio_put_small(struct folio *folio)
{
- __page_cache_release(page);
- mem_cgroup_uncharge(page_folio(page));
- free_unref_page(page, 0);
+ __page_cache_release(folio);
+ mem_cgroup_uncharge(folio);
+ free_unref_page(&folio->page, 0);
}
-static void __put_compound_page(struct page *page)
+static void __folio_put_large(struct folio *folio)
{
/*
* __page_cache_release() is supposed to be called for thp, not for
@@ -114,21 +113,21 @@ static void __put_compound_page(struct page *page)
* (it's never listed to any LRU lists) and no memcg routines should
* be called for hugetlb (it has a separate hugetlb_cgroup.)
*/
- if (!PageHuge(page))
- __page_cache_release(page);
- destroy_compound_page(page);
+ if (!folio_test_hugetlb(folio))
+ __page_cache_release(folio);
+ destroy_large_folio(folio);
}
-void __put_page(struct page *page)
+void __folio_put(struct folio *folio)
{
- if (unlikely(is_zone_device_page(page)))
- free_zone_device_page(page);
- else if (unlikely(PageCompound(page)))
- __put_compound_page(page);
+ if (unlikely(folio_is_zone_device(folio)))
+ free_zone_device_page(&folio->page);
+ else if (unlikely(folio_test_large(folio)))
+ __folio_put_large(folio);
else
- __put_single_page(page);
+ __folio_put_small(folio);
}
-EXPORT_SYMBOL(__put_page);
+EXPORT_SYMBOL(__folio_put);
/**
* put_pages_list() - release a list of pages
@@ -138,19 +137,19 @@ EXPORT_SYMBOL(__put_page);
*/
void put_pages_list(struct list_head *pages)
{
- struct page *page, *next;
+ struct folio *folio, *next;
- list_for_each_entry_safe(page, next, pages, lru) {
- if (!put_page_testzero(page)) {
- list_del(&page->lru);
+ list_for_each_entry_safe(folio, next, pages, lru) {
+ if (!folio_put_testzero(folio)) {
+ list_del(&folio->lru);
continue;
}
- if (PageHead(page)) {
- list_del(&page->lru);
- __put_compound_page(page);
+ if (folio_test_large(folio)) {
+ list_del(&folio->lru);
+ __folio_put_large(folio);
continue;
}
- /* Cannot be PageLRU because it's passed to us using the lru */
+ /* LRU flag must be clear because it's passed using the lru */
}
free_unref_page_list(pages);
@@ -188,36 +187,84 @@ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
}
EXPORT_SYMBOL_GPL(get_kernel_pages);
-static void pagevec_lru_move_fn(struct pagevec *pvec,
- void (*move_fn)(struct page *page, struct lruvec *lruvec))
+typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
+
+static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
+{
+ int was_unevictable = folio_test_clear_unevictable(folio);
+ long nr_pages = folio_nr_pages(folio);
+
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+
+ /*
+ * Is an smp_mb__after_atomic() still required here, before
+ * folio_evictable() tests the mlocked flag, to rule out the possibility
+ * of stranding an evictable folio on an unevictable LRU? I think
+ * not, because __munlock_page() only clears the mlocked flag
+ * while the LRU lock is held.
+ *
+ * (That is not true of __page_cache_release(), and not necessarily
+ * true of release_pages(): but those only clear the mlocked flag after
+ * folio_put_testzero() has excluded any other users of the folio.)
+ */
+ if (folio_evictable(folio)) {
+ if (was_unevictable)
+ __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
+ } else {
+ folio_clear_active(folio);
+ folio_set_unevictable(folio);
+ /*
+ * folio->mlock_count = !!folio_test_mlocked(folio)?
+ * But that leaves __mlock_page() in doubt whether another
+ * actor has already counted the mlock or not. Err on the
+ * safe side, underestimate, let page reclaim fix it, rather
+ * than leaving a page on the unevictable LRU indefinitely.
+ */
+ folio->mlock_count = 0;
+ if (!was_unevictable)
+ __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
+ }
+
+ lruvec_add_folio(lruvec, folio);
+ trace_mm_lru_insertion(folio);
+}
+
+static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
{
int i;
struct lruvec *lruvec = NULL;
unsigned long flags = 0;
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- struct folio *folio = page_folio(page);
+ for (i = 0; i < folio_batch_count(fbatch); i++) {
+ struct folio *folio = fbatch->folios[i];
- /* block memcg migration during page moving between lru */
- if (!TestClearPageLRU(page))
+ /* block memcg migration while the folio moves between lru */
+ if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
continue;
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
- (*move_fn)(page, lruvec);
+ move_fn(lruvec, folio);
- SetPageLRU(page);
+ folio_set_lru(folio);
}
+
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
- release_pages(pvec->pages, pvec->nr);
- pagevec_reinit(pvec);
+ folios_put(fbatch->folios, folio_batch_count(fbatch));
+ folio_batch_init(fbatch);
}
-static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
+static void folio_batch_add_and_move(struct folio_batch *fbatch,
+ struct folio *folio, move_fn_t move_fn)
{
- struct folio *folio = page_folio(page);
+ if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
+ !lru_cache_disabled())
+ return;
+ folio_batch_move_lru(fbatch, move_fn);
+}
+static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
+{
if (!folio_test_unevictable(folio)) {
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
@@ -226,18 +273,6 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
}
}
-/* return true if pagevec needs to drain */
-static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
-{
- bool ret = false;
-
- if (!pagevec_add(pvec, page) || PageCompound(page) ||
- lru_cache_disabled())
- ret = true;
-
- return ret;
-}
-
/*
* Writeback is about to end against a folio which has been marked for
* immediate reclaim. If it still appears to be reclaimable, move it
@@ -249,14 +284,13 @@ void folio_rotate_reclaimable(struct folio *folio)
{
if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
!folio_test_unevictable(folio) && folio_test_lru(folio)) {
- struct pagevec *pvec;
+ struct folio_batch *fbatch;
unsigned long flags;
folio_get(folio);
local_lock_irqsave(&lru_rotate.lock, flags);
- pvec = this_cpu_ptr(&lru_rotate.pvec);
- if (pagevec_add_and_need_flush(pvec, &folio->page))
- pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
+ fbatch = this_cpu_ptr(&lru_rotate.fbatch);
+ folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
local_unlock_irqrestore(&lru_rotate.lock, flags);
}
}
@@ -307,7 +341,7 @@ void lru_note_cost_folio(struct folio *folio)
folio_nr_pages(folio));
}
-static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
+static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio)
{
if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
long nr_pages = folio_nr_pages(folio);
@@ -324,41 +358,30 @@ static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
}
#ifdef CONFIG_SMP
-static void __activate_page(struct page *page, struct lruvec *lruvec)
-{
- return __folio_activate(page_folio(page), lruvec);
-}
-
-static void activate_page_drain(int cpu)
+static void folio_activate_drain(int cpu)
{
- struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
+ struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu);
- if (pagevec_count(pvec))
- pagevec_lru_move_fn(pvec, __activate_page);
-}
-
-static bool need_activate_page_drain(int cpu)
-{
- return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
+ if (folio_batch_count(fbatch))
+ folio_batch_move_lru(fbatch, folio_activate_fn);
}
static void folio_activate(struct folio *folio)
{
if (folio_test_lru(folio) && !folio_test_active(folio) &&
!folio_test_unevictable(folio)) {
- struct pagevec *pvec;
+ struct folio_batch *fbatch;
folio_get(folio);
- local_lock(&lru_pvecs.lock);
- pvec = this_cpu_ptr(&lru_pvecs.activate_page);
- if (pagevec_add_and_need_flush(pvec, &folio->page))
- pagevec_lru_move_fn(pvec, __activate_page);
- local_unlock(&lru_pvecs.lock);
+ local_lock(&cpu_fbatches.lock);
+ fbatch = this_cpu_ptr(&cpu_fbatches.activate);
+ folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
+ local_unlock(&cpu_fbatches.lock);
}
}
#else
-static inline void activate_page_drain(int cpu)
+static inline void folio_activate_drain(int cpu)
{
}
@@ -368,7 +391,7 @@ static void folio_activate(struct folio *folio)
if (folio_test_clear_lru(folio)) {
lruvec = folio_lruvec_lock_irq(folio);
- __folio_activate(folio, lruvec);
+ folio_activate_fn(lruvec, folio);
unlock_page_lruvec_irq(lruvec);
folio_set_lru(folio);
}
@@ -377,32 +400,32 @@ static void folio_activate(struct folio *folio)
static void __lru_cache_activate_folio(struct folio *folio)
{
- struct pagevec *pvec;
+ struct folio_batch *fbatch;
int i;
- local_lock(&lru_pvecs.lock);
- pvec = this_cpu_ptr(&lru_pvecs.lru_add);
+ local_lock(&cpu_fbatches.lock);
+ fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
/*
- * Search backwards on the optimistic assumption that the page being
- * activated has just been added to this pagevec. Note that only
- * the local pagevec is examined as a !PageLRU page could be in the
+ * Search backwards on the optimistic assumption that the folio being
+ * activated has just been added to this batch. Note that only
+ * the local batch is examined as a !LRU folio could be in the
* process of being released, reclaimed, migrated or on a remote
- * pagevec that is currently being drained. Furthermore, marking
- * a remote pagevec's page PageActive potentially hits a race where
- * a page is marked PageActive just after it is added to the inactive
+ * batch that is currently being drained. Furthermore, marking
+ * a remote batch's folio active potentially hits a race where
+ * a folio is marked active just after it is added to the inactive
* list causing accounting errors and BUG_ON checks to trigger.
*/
- for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
- struct page *pagevec_page = pvec->pages[i];
+ for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) {
+ struct folio *batch_folio = fbatch->folios[i];
- if (pagevec_page == &folio->page) {
+ if (batch_folio == folio) {
folio_set_active(folio);
break;
}
}
- local_unlock(&lru_pvecs.lock);
+ local_unlock(&cpu_fbatches.lock);
}
/*
@@ -427,9 +450,9 @@ void folio_mark_accessed(struct folio *folio)
*/
} else if (!folio_test_active(folio)) {
/*
- * If the page is on the LRU, queue it for activation via
- * lru_pvecs.activate_page. Otherwise, assume the page is on a
- * pagevec, mark it active and it'll be moved to the active
+ * If the folio is on the LRU, queue it for activation via
+ * cpu_fbatches.activate. Otherwise, assume the folio is in a
+ * folio_batch, mark it active and it'll be moved to the active
* LRU on the next drain.
*/
if (folio_test_lru(folio))
@@ -450,22 +473,22 @@ EXPORT_SYMBOL(folio_mark_accessed);
*
* Queue the folio for addition to the LRU. The decision on whether
* to add the page to the [in]active [file|anon] list is deferred until the
- * pagevec is drained. This gives a chance for the caller of folio_add_lru()
+ * folio_batch is drained. This gives a chance for the caller of folio_add_lru()
* have the folio added to the active list using folio_mark_accessed().
*/
void folio_add_lru(struct folio *folio)
{
- struct pagevec *pvec;
+ struct folio_batch *fbatch;
- VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
+ VM_BUG_ON_FOLIO(folio_test_active(folio) &&
+ folio_test_unevictable(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
folio_get(folio);
- local_lock(&lru_pvecs.lock);
- pvec = this_cpu_ptr(&lru_pvecs.lru_add);
- if (pagevec_add_and_need_flush(pvec, &folio->page))
- __pagevec_lru_add(pvec);
- local_unlock(&lru_pvecs.lock);
+ local_lock(&cpu_fbatches.lock);
+ fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
+ folio_batch_add_and_move(fbatch, folio, lru_add_fn);
+ local_unlock(&cpu_fbatches.lock);
}
EXPORT_SYMBOL(folio_add_lru);
@@ -489,56 +512,57 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
}
/*
- * If the page can not be invalidated, it is moved to the
+ * If the folio cannot be invalidated, it is moved to the
* inactive list to speed up its reclaim. It is moved to the
* head of the list, rather than the tail, to give the flusher
* threads some time to write it out, as this is much more
* effective than the single-page writeout from reclaim.
*
- * If the page isn't page_mapped and dirty/writeback, the page
- * could reclaim asap using PG_reclaim.
+ * If the folio isn't mapped and dirty/writeback, the folio
+ * could be reclaimed asap using the reclaim flag.
*
- * 1. active, mapped page -> none
- * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
- * 3. inactive, mapped page -> none
- * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
+ * 1. active, mapped folio -> none
+ * 2. active, dirty/writeback folio -> inactive, head, reclaim
+ * 3. inactive, mapped folio -> none
+ * 4. inactive, dirty/writeback folio -> inactive, head, reclaim
* 5. inactive, clean -> inactive, tail
* 6. Others -> none
*
- * In 4, why it moves inactive's head, the VM expects the page would
- * be write it out by flusher threads as this is much more effective
+ * In 4, it moves to the head of the inactive list so the folio is
+ * written out by flusher threads as this is much more efficient
* than the single-page writeout from reclaim.
*/
-static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
+static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
{
- bool active = PageActive(page);
- int nr_pages = thp_nr_pages(page);
+ bool active = folio_test_active(folio);
+ long nr_pages = folio_nr_pages(folio);
- if (PageUnevictable(page))
+ if (folio_test_unevictable(folio))
return;
- /* Some processes are using the page */
- if (page_mapped(page))
+ /* Some processes are using the folio */
+ if (folio_mapped(folio))
return;
- del_page_from_lru_list(page, lruvec);
- ClearPageActive(page);
- ClearPageReferenced(page);
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ folio_clear_referenced(folio);
- if (PageWriteback(page) || PageDirty(page)) {
+ if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
/*
- * PG_reclaim could be raced with end_page_writeback
- * It can make readahead confusing. But race window
- * is _really_ small and it's non-critical problem.
+ * Setting the reclaim flag could race with
+ * folio_end_writeback() and confuse readahead. But the
+ * race window is _really_ small and it's not a critical
+ * problem.
*/
- add_page_to_lru_list(page, lruvec);
- SetPageReclaim(page);
+ lruvec_add_folio(lruvec, folio);
+ folio_set_reclaim(folio);
} else {
/*
- * The page's writeback ends up during pagevec
- * We move that page into tail of inactive.
+ * The folio's writeback ended while it was in the batch.
+ * We move that folio to the tail of the inactive list.
*/
- add_page_to_lru_list_tail(page, lruvec);
+ lruvec_add_folio_tail(lruvec, folio);
__count_vm_events(PGROTATED, nr_pages);
}
@@ -549,15 +573,15 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
}
}
-static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
+static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio)
{
- if (PageActive(page) && !PageUnevictable(page)) {
- int nr_pages = thp_nr_pages(page);
+ if (folio_test_active(folio) && !folio_test_unevictable(folio)) {
+ long nr_pages = folio_nr_pages(folio);
- del_page_from_lru_list(page, lruvec);
- ClearPageActive(page);
- ClearPageReferenced(page);
- add_page_to_lru_list(page, lruvec);
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ folio_clear_referenced(folio);
+ lruvec_add_folio(lruvec, folio);
__count_vm_events(PGDEACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
@@ -565,22 +589,22 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
}
}
-static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
+static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio)
{
- if (PageAnon(page) && PageSwapBacked(page) &&
- !PageSwapCache(page) && !PageUnevictable(page)) {
- int nr_pages = thp_nr_pages(page);
+ if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+ !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
+ long nr_pages = folio_nr_pages(folio);
- del_page_from_lru_list(page, lruvec);
- ClearPageActive(page);
- ClearPageReferenced(page);
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ folio_clear_referenced(folio);
/*
- * Lazyfree pages are clean anonymous pages. They have
- * PG_swapbacked flag cleared, to distinguish them from normal
- * anonymous pages
+ * Lazyfree folios are clean anonymous folios. They have
+ * the swapbacked flag cleared, to distinguish them from normal
+ * anonymous folios
*/
- ClearPageSwapBacked(page);
- add_page_to_lru_list(page, lruvec);
+ folio_clear_swapbacked(folio);
+ lruvec_add_folio(lruvec, folio);
__count_vm_events(PGLAZYFREE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
@@ -589,71 +613,67 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
}
/*
- * Drain pages out of the cpu's pagevecs.
+ * Drain pages out of the cpu's folio_batch.
* Either "cpu" is the current CPU, and preemption has already been
* disabled; or "cpu" is being hot-unplugged, and is already dead.
*/
void lru_add_drain_cpu(int cpu)
{
- struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
+ struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
+ struct folio_batch *fbatch = &fbatches->lru_add;
- if (pagevec_count(pvec))
- __pagevec_lru_add(pvec);
+ if (folio_batch_count(fbatch))
+ folio_batch_move_lru(fbatch, lru_add_fn);
- pvec = &per_cpu(lru_rotate.pvec, cpu);
+ fbatch = &per_cpu(lru_rotate.fbatch, cpu);
/* Disabling interrupts below acts as a compiler barrier. */
- if (data_race(pagevec_count(pvec))) {
+ if (data_race(folio_batch_count(fbatch))) {
unsigned long flags;
/* No harm done if a racing interrupt already did this */
local_lock_irqsave(&lru_rotate.lock, flags);
- pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
+ folio_batch_move_lru(fbatch, lru_move_tail_fn);
local_unlock_irqrestore(&lru_rotate.lock, flags);
}
- pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
- if (pagevec_count(pvec))
- pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
+ fbatch = &fbatches->lru_deactivate_file;
+ if (folio_batch_count(fbatch))
+ folio_batch_move_lru(fbatch, lru_deactivate_file_fn);
- pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
- if (pagevec_count(pvec))
- pagevec_lru_move_fn(pvec, lru_deactivate_fn);
+ fbatch = &fbatches->lru_deactivate;
+ if (folio_batch_count(fbatch))
+ folio_batch_move_lru(fbatch, lru_deactivate_fn);
- pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
- if (pagevec_count(pvec))
- pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
+ fbatch = &fbatches->lru_lazyfree;
+ if (folio_batch_count(fbatch))
+ folio_batch_move_lru(fbatch, lru_lazyfree_fn);
- activate_page_drain(cpu);
+ folio_activate_drain(cpu);
}
/**
- * deactivate_file_folio() - Forcefully deactivate a file folio.
+ * deactivate_file_folio() - Deactivate a file folio.
* @folio: Folio to deactivate.
*
* This function hints to the VM that @folio is a good reclaim candidate,
* for example if its invalidation fails due to the folio being dirty
* or under writeback.
*
- * Context: Caller holds a reference on the page.
+ * Context: Caller holds a reference on the folio.
*/
void deactivate_file_folio(struct folio *folio)
{
- struct pagevec *pvec;
+ struct folio_batch *fbatch;
- /*
- * In a workload with many unevictable pages such as mprotect,
- * unevictable folio deactivation for accelerating reclaim is pointless.
- */
+ /* Deactivating an unevictable folio will not accelerate reclaim */
if (folio_test_unevictable(folio))
return;
folio_get(folio);
- local_lock(&lru_pvecs.lock);
- pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
-
- if (pagevec_add_and_need_flush(pvec, &folio->page))
- pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
- local_unlock(&lru_pvecs.lock);
+ local_lock(&cpu_fbatches.lock);
+ fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
+ folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
+ local_unlock(&cpu_fbatches.lock);
}
/*
@@ -666,15 +686,17 @@ void deactivate_file_folio(struct folio *folio)
*/
void deactivate_page(struct page *page)
{
- if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
- struct pagevec *pvec;
-
- local_lock(&lru_pvecs.lock);
- pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
- get_page(page);
- if (pagevec_add_and_need_flush(pvec, page))
- pagevec_lru_move_fn(pvec, lru_deactivate_fn);
- local_unlock(&lru_pvecs.lock);
+ struct folio *folio = page_folio(page);
+
+ if (folio_test_lru(folio) && folio_test_active(folio) &&
+ !folio_test_unevictable(folio)) {
+ struct folio_batch *fbatch;
+
+ folio_get(folio);
+ local_lock(&cpu_fbatches.lock);
+ fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
+ folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
+ local_unlock(&cpu_fbatches.lock);
}
}
@@ -687,24 +709,26 @@ void deactivate_page(struct page *page)
*/
void mark_page_lazyfree(struct page *page)
{
- if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
- !PageSwapCache(page) && !PageUnevictable(page)) {
- struct pagevec *pvec;
-
- local_lock(&lru_pvecs.lock);
- pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
- get_page(page);
- if (pagevec_add_and_need_flush(pvec, page))
- pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
- local_unlock(&lru_pvecs.lock);
+ struct folio *folio = page_folio(page);
+
+ if (folio_test_lru(folio) && folio_test_anon(folio) &&
+ folio_test_swapbacked(folio) && !folio_test_swapcache(folio) &&
+ !folio_test_unevictable(folio)) {
+ struct folio_batch *fbatch;
+
+ folio_get(folio);
+ local_lock(&cpu_fbatches.lock);
+ fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
+ folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);
+ local_unlock(&cpu_fbatches.lock);
}
}
void lru_add_drain(void)
{
- local_lock(&lru_pvecs.lock);
+ local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id());
- local_unlock(&lru_pvecs.lock);
+ local_unlock(&cpu_fbatches.lock);
mlock_page_drain_local();
}
@@ -716,19 +740,19 @@ void lru_add_drain(void)
*/
static void lru_add_and_bh_lrus_drain(void)
{
- local_lock(&lru_pvecs.lock);
+ local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id());
- local_unlock(&lru_pvecs.lock);
+ local_unlock(&cpu_fbatches.lock);
invalidate_bh_lrus_cpu();
mlock_page_drain_local();
}
void lru_add_drain_cpu_zone(struct zone *zone)
{
- local_lock(&lru_pvecs.lock);
+ local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id());
drain_local_pages(zone);
- local_unlock(&lru_pvecs.lock);
+ local_unlock(&cpu_fbatches.lock);
mlock_page_drain_local();
}
@@ -741,6 +765,21 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
lru_add_and_bh_lrus_drain();
}
+static bool cpu_needs_drain(unsigned int cpu)
+{
+ struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
+
+ /* Check these in order of likelihood that they're not zero */
+ return folio_batch_count(&fbatches->lru_add) ||
+ data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
+ folio_batch_count(&fbatches->lru_deactivate_file) ||
+ folio_batch_count(&fbatches->lru_deactivate) ||
+ folio_batch_count(&fbatches->lru_lazyfree) ||
+ folio_batch_count(&fbatches->activate) ||
+ need_mlock_page_drain(cpu) ||
+ has_bh_in_lru(cpu, NULL);
+}
+
/*
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
* kworkers being shut down before our page_alloc_cpu_dead callback is
@@ -773,8 +812,9 @@ static inline void __lru_add_drain_all(bool force_all_cpus)
return;
/*
- * Guarantee pagevec counter stores visible by this CPU are visible to
- * other CPUs before loading the current drain generation.
+ * Guarantee folio_batch counter stores visible by this CPU
+ * are visible to other CPUs before loading the current drain
+ * generation.
*/
smp_mb();
@@ -800,8 +840,9 @@ static inline void __lru_add_drain_all(bool force_all_cpus)
* (D) Increment global generation number
*
* Pairs with smp_load_acquire() at (B), outside of the critical
- * section. Use a full memory barrier to guarantee that the new global
- * drain generation number is stored before loading pagevec counters.
+ * section. Use a full memory barrier to guarantee that the
+ * new global drain generation number is stored before loading
+ * folio_batch counters.
*
* This pairing must be done here, before the for_each_online_cpu loop
* below which drains the page vectors.
@@ -823,14 +864,7 @@ static inline void __lru_add_drain_all(bool force_all_cpus)
for_each_online_cpu(cpu) {
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
- if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
- data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
- pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
- pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
- pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
- need_activate_page_drain(cpu) ||
- need_mlock_page_drain(cpu) ||
- has_bh_in_lru(cpu, NULL)) {
+ if (cpu_needs_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
queue_work_on(cpu, mm_percpu_wq, work);
__cpumask_set_cpu(cpu, &has_work);
@@ -906,8 +940,7 @@ void release_pages(struct page **pages, int nr)
unsigned int lock_batch;
for (i = 0; i < nr; i++) {
- struct page *page = pages[i];
- struct folio *folio = page_folio(page);
+ struct folio *folio = page_folio(pages[i]);
/*
* Make sure the IRQ-safe lock-holding time does not get
@@ -919,35 +952,34 @@ void release_pages(struct page **pages, int nr)
lruvec = NULL;
}
- page = &folio->page;
- if (is_huge_zero_page(page))
+ if (is_huge_zero_page(&folio->page))
continue;
- if (is_zone_device_page(page)) {
+ if (folio_is_zone_device(folio)) {
if (lruvec) {
unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL;
}
- if (put_devmap_managed_page(page))
+ if (put_devmap_managed_page(&folio->page))
continue;
- if (put_page_testzero(page))
- free_zone_device_page(page);
+ if (folio_put_testzero(folio))
+ free_zone_device_page(&folio->page);
continue;
}
- if (!put_page_testzero(page))
+ if (!folio_put_testzero(folio))
continue;
- if (PageCompound(page)) {
+ if (folio_test_large(folio)) {
if (lruvec) {
unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL;
}
- __put_compound_page(page);
+ __folio_put_large(folio);
continue;
}
- if (PageLRU(page)) {
+ if (folio_test_lru(folio)) {
struct lruvec *prev_lruvec = lruvec;
lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
@@ -955,8 +987,8 @@ void release_pages(struct page **pages, int nr)
if (prev_lruvec != lruvec)
lock_batch = 0;
- del_page_from_lru_list(page, lruvec);
- __clear_page_lru_flags(page);
+ lruvec_del_folio(lruvec, folio);
+ __folio_clear_lru_flags(folio);
}
/*
@@ -965,13 +997,13 @@ void release_pages(struct page **pages, int nr)
* found set here. This does not indicate a problem, unless
* "unevictable_pgs_cleared" appears worryingly large.
*/
- if (unlikely(PageMlocked(page))) {
- __ClearPageMlocked(page);
- dec_zone_page_state(page, NR_MLOCK);
+ if (unlikely(folio_test_mlocked(folio))) {
+ __folio_clear_mlocked(folio);
+ zone_stat_sub_folio(folio, NR_MLOCK);
count_vm_event(UNEVICTABLE_PGCLEARED);
}
- list_add(&page->lru, &pages_to_free);
+ list_add(&folio->lru, &pages_to_free);
}
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
@@ -987,8 +1019,8 @@ EXPORT_SYMBOL(release_pages);
* OK from a correctness point of view but is inefficient - those pages may be
* cache-warm and we want to give them back to the page allocator ASAP.
*
- * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
- * and __pagevec_lru_add_active() call release_pages() directly to avoid
+ * So __pagevec_release() will drain those queues here.
+ * folio_batch_move_lru() calls folios_put() directly to avoid
* mutual recursion.
*/
void __pagevec_release(struct pagevec *pvec)
@@ -1002,69 +1034,6 @@ void __pagevec_release(struct pagevec *pvec)
}
EXPORT_SYMBOL(__pagevec_release);
-static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
-{
- int was_unevictable = folio_test_clear_unevictable(folio);
- long nr_pages = folio_nr_pages(folio);
-
- VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
-
- folio_set_lru(folio);
- /*
- * Is an smp_mb__after_atomic() still required here, before
- * folio_evictable() tests PageMlocked, to rule out the possibility
- * of stranding an evictable folio on an unevictable LRU? I think
- * not, because __munlock_page() only clears PageMlocked while the LRU
- * lock is held.
- *
- * (That is not true of __page_cache_release(), and not necessarily
- * true of release_pages(): but those only clear PageMlocked after
- * put_page_testzero() has excluded any other users of the page.)
- */
- if (folio_evictable(folio)) {
- if (was_unevictable)
- __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
- } else {
- folio_clear_active(folio);
- folio_set_unevictable(folio);
- /*
- * folio->mlock_count = !!folio_test_mlocked(folio)?
- * But that leaves __mlock_page() in doubt whether another
- * actor has already counted the mlock or not. Err on the
- * safe side, underestimate, let page reclaim fix it, rather
- * than leaving a page on the unevictable LRU indefinitely.
- */
- folio->mlock_count = 0;
- if (!was_unevictable)
- __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
- }
-
- lruvec_add_folio(lruvec, folio);
- trace_mm_lru_insertion(folio);
-}
-
-/*
- * Add the passed pages to the LRU, then drop the caller's refcount
- * on them. Reinitialises the caller's pagevec.
- */
-void __pagevec_lru_add(struct pagevec *pvec)
-{
- int i;
- struct lruvec *lruvec = NULL;
- unsigned long flags = 0;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct folio *folio = page_folio(pvec->pages[i]);
-
- lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
- __pagevec_lru_add_fn(folio, lruvec);
- }
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
- release_pages(pvec->pages, pvec->nr);
- pagevec_reinit(pvec);
-}
-
/**
* folio_batch_remove_exceptionals() - Prune non-folios from a batch.
* @fbatch: The batch to prune
diff --git a/mm/swap.h b/mm/swap.h
index 0193797b0c92..17936e068c1c 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -36,12 +36,11 @@ bool add_to_swap(struct folio *folio);
void *get_shadow_from_swap_cache(swp_entry_t entry);
int add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp, void **shadowp);
-void __delete_from_swap_cache(struct page *page,
+void __delete_from_swap_cache(struct folio *folio,
swp_entry_t entry, void *shadow);
-void delete_from_swap_cache(struct page *page);
+void delete_from_swap_cache(struct folio *folio);
void clear_shadow_from_swap_cache(int type, unsigned long begin,
unsigned long end);
-void free_swap_cache(struct page *page);
struct page *lookup_swap_cache(swp_entry_t entry,
struct vm_area_struct *vma,
unsigned long addr);
@@ -61,9 +60,9 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
-static inline unsigned int page_swap_flags(struct page *page)
+static inline unsigned int folio_swap_flags(struct folio *folio)
{
- return page_swap_info(page)->flags;
+ return page_swap_info(&folio->page)->flags;
}
#else /* CONFIG_SWAP */
struct swap_iocb;
@@ -81,10 +80,6 @@ static inline struct address_space *swap_address_space(swp_entry_t entry)
return NULL;
}
-static inline void free_swap_cache(struct page *page)
-{
-}
-
static inline void show_swap_cache_info(void)
{
}
@@ -135,12 +130,12 @@ static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
return -1;
}
-static inline void __delete_from_swap_cache(struct page *page,
+static inline void __delete_from_swap_cache(struct folio *folio,
swp_entry_t entry, void *shadow)
{
}
-static inline void delete_from_swap_cache(struct page *page)
+static inline void delete_from_swap_cache(struct folio *folio)
{
}
@@ -149,7 +144,7 @@ static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
{
}
-static inline unsigned int page_swap_flags(struct page *page)
+static inline unsigned int folio_swap_flags(struct folio *folio)
{
return 0;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 0a2021fc55ad..e166051566f4 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -59,24 +59,11 @@ static bool enable_vma_readahead __read_mostly = true;
#define GET_SWAP_RA_VAL(vma) \
(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
-#define INC_CACHE_INFO(x) data_race(swap_cache_info.x++)
-#define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr))
-
-static struct {
- unsigned long add_total;
- unsigned long del_total;
- unsigned long find_success;
- unsigned long find_total;
-} swap_cache_info;
-
static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
void show_swap_cache_info(void)
{
printk("%lu pages in swap cache\n", total_swapcache_pages());
- printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
- swap_cache_info.add_total, swap_cache_info.del_total,
- swap_cache_info.find_success, swap_cache_info.find_total);
printk("Free swap = %ldkB\n",
get_nr_swap_pages() << (PAGE_SHIFT - 10));
printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
@@ -133,7 +120,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
address_space->nrpages += nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
__mod_lruvec_page_state(page, NR_SWAPCACHE, nr);
- ADD_CACHE_INFO(add_total, nr);
unlock:
xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp));
@@ -147,32 +133,32 @@ unlock:
}
/*
- * This must be called only on pages that have
+ * This must be called only on folios that have
* been verified to be in the swap cache.
*/
-void __delete_from_swap_cache(struct page *page,
+void __delete_from_swap_cache(struct folio *folio,
swp_entry_t entry, void *shadow)
{
struct address_space *address_space = swap_address_space(entry);
- int i, nr = thp_nr_pages(page);
+ int i;
+ long nr = folio_nr_pages(folio);
pgoff_t idx = swp_offset(entry);
XA_STATE(xas, &address_space->i_pages, idx);
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(!PageSwapCache(page), page);
- VM_BUG_ON_PAGE(PageWriteback(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
+ VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
for (i = 0; i < nr; i++) {
void *entry = xas_store(&xas, shadow);
- VM_BUG_ON_PAGE(entry != page, entry);
- set_page_private(page + i, 0);
+ VM_BUG_ON_FOLIO(entry != folio, folio);
+ set_page_private(folio_page(folio, i), 0);
xas_next(&xas);
}
- ClearPageSwapCache(page);
+ folio_clear_swapcache(folio);
address_space->nrpages -= nr;
- __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
- __mod_lruvec_page_state(page, NR_SWAPCACHE, -nr);
- ADD_CACHE_INFO(del_total, nr);
+ __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
+ __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
}
/**
@@ -237,22 +223,22 @@ fail:
}
/*
- * This must be called only on pages that have
+ * This must be called only on folios that have
* been verified to be in the swap cache and locked.
- * It will never put the page into the free list,
- * the caller has a reference on the page.
+ * It will never put the folio into the free list,
+ * the caller has a reference on the folio.
*/
-void delete_from_swap_cache(struct page *page)
+void delete_from_swap_cache(struct folio *folio)
{
- swp_entry_t entry = { .val = page_private(page) };
+ swp_entry_t entry = folio_swap_entry(folio);
struct address_space *address_space = swap_address_space(entry);
xa_lock_irq(&address_space->i_pages);
- __delete_from_swap_cache(page, entry, NULL);
+ __delete_from_swap_cache(folio, entry, NULL);
xa_unlock_irq(&address_space->i_pages);
- put_swap_page(page, entry);
- page_ref_sub(page, thp_nr_pages(page));
+ put_swap_page(&folio->page, entry);
+ folio_ref_sub(folio, folio_nr_pages(folio));
}
void clear_shadow_from_swap_cache(int type, unsigned long begin,
@@ -348,12 +334,10 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
page = find_get_page(swap_address_space(entry), swp_offset(entry));
put_swap_device(si);
- INC_CACHE_INFO(find_total);
if (page) {
bool vma_ra = swap_use_vma_readahead();
bool readahead;
- INC_CACHE_INFO(find_success);
/*
* At the moment, we don't support PG_readahead for anon THP
* so let's bail out rather than confusing the readahead stat.
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a2e66d855b19..1fdccd2f1422 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -695,7 +695,7 @@ static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
si->lowest_bit += nr_entries;
if (end == si->highest_bit)
WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
- si->inuse_pages += nr_entries;
+ WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries);
if (si->inuse_pages == si->pages) {
si->lowest_bit = si->max;
si->highest_bit = 0;
@@ -732,7 +732,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
add_to_avail_list(si);
}
atomic_long_add(nr_entries, &nr_swap_pages);
- si->inuse_pages -= nr_entries;
+ WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
if (si->flags & SWP_BLKDEV)
swap_slot_free_notify =
si->bdev->bd_disk->fops->swap_slot_free_notify;
@@ -1568,16 +1568,15 @@ unlock_out:
return ret;
}
-static bool page_swapped(struct page *page)
+static bool folio_swapped(struct folio *folio)
{
swp_entry_t entry;
struct swap_info_struct *si;
- if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
- return page_swapcount(page) != 0;
+ if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
+ return page_swapcount(&folio->page) != 0;
- page = compound_head(page);
- entry.val = page_private(page);
+ entry = folio_swap_entry(folio);
si = _swap_info_get(entry);
if (si)
return swap_page_trans_huge_swapped(si, entry);
@@ -1590,13 +1589,14 @@ static bool page_swapped(struct page *page)
*/
int try_to_free_swap(struct page *page)
{
- VM_BUG_ON_PAGE(!PageLocked(page), page);
+ struct folio *folio = page_folio(page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- if (!PageSwapCache(page))
+ if (!folio_test_swapcache(folio))
return 0;
- if (PageWriteback(page))
+ if (folio_test_writeback(folio))
return 0;
- if (page_swapped(page))
+ if (folio_swapped(folio))
return 0;
/*
@@ -1617,9 +1617,8 @@ int try_to_free_swap(struct page *page)
if (pm_suspended_storage())
return 0;
- page = compound_head(page);
- delete_from_swap_cache(page);
- SetPageDirty(page);
+ delete_from_swap_cache(folio);
+ folio_set_dirty(folio);
return 1;
}
@@ -2640,7 +2639,7 @@ static int swap_show(struct seq_file *swap, void *v)
}
bytes = si->pages << (PAGE_SHIFT - 10);
- inuse = si->inuse_pages << (PAGE_SHIFT - 10);
+ inuse = READ_ONCE(si->inuse_pages) << (PAGE_SHIFT - 10);
file = si->swap_file;
len = seq_file_path(swap, file, " \t\n\\");
@@ -3259,7 +3258,7 @@ void si_swapinfo(struct sysinfo *val)
struct swap_info_struct *si = swap_info[type];
if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
- nr_to_be_unused += si->inuse_pages;
+ nr_to_be_unused += READ_ONCE(si->inuse_pages);
}
val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 07d3befc80e4..7327b2573f7c 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -703,14 +703,29 @@ ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
mmap_changing, 0);
}
+void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
+ unsigned long start, unsigned long len, bool enable_wp)
+{
+ struct mmu_gather tlb;
+ pgprot_t newprot;
+
+ if (enable_wp)
+ newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
+ else
+ newprot = vm_get_page_prot(dst_vma->vm_flags);
+
+ tlb_gather_mmu(&tlb, dst_mm);
+ change_protection(&tlb, dst_vma, start, start + len, newprot,
+ enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
+ tlb_finish_mmu(&tlb);
+}
+
int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
unsigned long len, bool enable_wp,
atomic_t *mmap_changing)
{
struct vm_area_struct *dst_vma;
unsigned long page_mask;
- struct mmu_gather tlb;
- pgprot_t newprot;
int err;
/*
@@ -750,15 +765,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
goto out_unlock;
}
- if (enable_wp)
- newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
- else
- newprot = vm_get_page_prot(dst_vma->vm_flags);
-
- tlb_gather_mmu(&tlb, dst_mm);
- change_protection(&tlb, dst_vma, start, start + len, newprot,
- enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
- tlb_finish_mmu(&tlb);
+ uffd_wp_range(dst_mm, dst_vma, start, len, enable_wp);
err = 0;
out_unlock:
diff --git a/mm/util.c b/mm/util.c
index 53af0e79d3e4..c9439c66d8cf 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -1005,7 +1005,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
* succeed and -ENOMEM implies there is not.
*
* We currently support three overcommit policies, which are set via the
- * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
+ * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst
*
* Strict overcommit modes added 2002 Feb 26 by Alan Cox.
* Additional code 2002 Jul 20 by Robert Love.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index effd1ff6a4b4..dd6cdb201195 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -790,6 +790,7 @@ unsigned long vmalloc_nr_pages(void)
return atomic_long_read(&nr_vmalloc_pages);
}
+/* Look up the first VA which satisfies addr < va_end, NULL if none. */
static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
{
struct vmap_area *va = NULL;
@@ -814,9 +815,9 @@ static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
return va;
}
-static struct vmap_area *__find_vmap_area(unsigned long addr)
+static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
{
- struct rb_node *n = vmap_area_root.rb_node;
+ struct rb_node *n = root->rb_node;
addr = (unsigned long)kasan_reset_tag((void *)addr);
@@ -874,11 +875,9 @@ find_va_links(struct vmap_area *va,
* Trigger the BUG() if there are sides(left/right)
* or full overlaps.
*/
- if (va->va_start < tmp_va->va_end &&
- va->va_end <= tmp_va->va_start)
+ if (va->va_end <= tmp_va->va_start)
link = &(*link)->rb_left;
- else if (va->va_end > tmp_va->va_start &&
- va->va_start >= tmp_va->va_end)
+ else if (va->va_start >= tmp_va->va_end)
link = &(*link)->rb_right;
else {
WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
@@ -911,8 +910,9 @@ get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
}
static __always_inline void
-link_va(struct vmap_area *va, struct rb_root *root,
- struct rb_node *parent, struct rb_node **link, struct list_head *head)
+__link_va(struct vmap_area *va, struct rb_root *root,
+ struct rb_node *parent, struct rb_node **link,
+ struct list_head *head, bool augment)
{
/*
* VA is still not in the list, but we can
@@ -926,12 +926,12 @@ link_va(struct vmap_area *va, struct rb_root *root,
/* Insert to the rb-tree */
rb_link_node(&va->rb_node, parent, link);
- if (root == &free_vmap_area_root) {
+ if (augment) {
/*
* Some explanation here. Just perform simple insertion
* to the tree. We do not set va->subtree_max_size to
* its current size before calling rb_insert_augmented().
- * It is because of we populate the tree from the bottom
+ * It is because we populate the tree from the bottom
* to parent levels when the node _is_ in the tree.
*
* Therefore we set subtree_max_size to zero after insertion,
@@ -950,21 +950,49 @@ link_va(struct vmap_area *va, struct rb_root *root,
}
static __always_inline void
-unlink_va(struct vmap_area *va, struct rb_root *root)
+link_va(struct vmap_area *va, struct rb_root *root,
+ struct rb_node *parent, struct rb_node **link,
+ struct list_head *head)
+{
+ __link_va(va, root, parent, link, head, false);
+}
+
+static __always_inline void
+link_va_augment(struct vmap_area *va, struct rb_root *root,
+ struct rb_node *parent, struct rb_node **link,
+ struct list_head *head)
+{
+ __link_va(va, root, parent, link, head, true);
+}
+
+static __always_inline void
+__unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
{
if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
return;
- if (root == &free_vmap_area_root)
+ if (augment)
rb_erase_augmented(&va->rb_node,
root, &free_vmap_area_rb_augment_cb);
else
rb_erase(&va->rb_node, root);
- list_del(&va->list);
+ list_del_init(&va->list);
RB_CLEAR_NODE(&va->rb_node);
}
+static __always_inline void
+unlink_va(struct vmap_area *va, struct rb_root *root)
+{
+ __unlink_va(va, root, false);
+}
+
+static __always_inline void
+unlink_va_augment(struct vmap_area *va, struct rb_root *root)
+{
+ __unlink_va(va, root, true);
+}
+
#if DEBUG_AUGMENT_PROPAGATE_CHECK
/*
* Gets called when remove the node and rotate.
@@ -1060,7 +1088,7 @@ insert_vmap_area_augment(struct vmap_area *va,
link = find_va_links(va, root, NULL, &parent);
if (link) {
- link_va(va, root, parent, link, head);
+ link_va_augment(va, root, parent, link, head);
augment_tree_propagate_from(va);
}
}
@@ -1077,8 +1105,8 @@ insert_vmap_area_augment(struct vmap_area *va,
* ongoing.
*/
static __always_inline struct vmap_area *
-merge_or_add_vmap_area(struct vmap_area *va,
- struct rb_root *root, struct list_head *head)
+__merge_or_add_vmap_area(struct vmap_area *va,
+ struct rb_root *root, struct list_head *head, bool augment)
{
struct vmap_area *sibling;
struct list_head *next;
@@ -1140,7 +1168,7 @@ merge_or_add_vmap_area(struct vmap_area *va,
* "normalized" because of rotation operations.
*/
if (merged)
- unlink_va(va, root);
+ __unlink_va(va, root, augment);
sibling->va_end = va->va_end;
@@ -1155,16 +1183,23 @@ merge_or_add_vmap_area(struct vmap_area *va,
insert:
if (!merged)
- link_va(va, root, parent, link, head);
+ __link_va(va, root, parent, link, head, augment);
return va;
}
static __always_inline struct vmap_area *
+merge_or_add_vmap_area(struct vmap_area *va,
+ struct rb_root *root, struct list_head *head)
+{
+ return __merge_or_add_vmap_area(va, root, head, false);
+}
+
+static __always_inline struct vmap_area *
merge_or_add_vmap_area_augment(struct vmap_area *va,
struct rb_root *root, struct list_head *head)
{
- va = merge_or_add_vmap_area(va, root, head);
+ va = __merge_or_add_vmap_area(va, root, head, true);
if (va)
augment_tree_propagate_from(va);
@@ -1198,15 +1233,15 @@ is_within_this_va(struct vmap_area *va, unsigned long size,
* overhead.
*/
static __always_inline struct vmap_area *
-find_vmap_lowest_match(unsigned long size, unsigned long align,
- unsigned long vstart, bool adjust_search_size)
+find_vmap_lowest_match(struct rb_root *root, unsigned long size,
+ unsigned long align, unsigned long vstart, bool adjust_search_size)
{
struct vmap_area *va;
struct rb_node *node;
unsigned long length;
/* Start from the root. */
- node = free_vmap_area_root.rb_node;
+ node = root->rb_node;
/* Adjust the search size for alignment overhead. */
length = adjust_search_size ? size + align - 1 : size;
@@ -1334,11 +1369,12 @@ classify_va_fit_type(struct vmap_area *va,
}
static __always_inline int
-adjust_va_to_fit_type(struct vmap_area *va,
- unsigned long nva_start_addr, unsigned long size,
- enum fit_type type)
+adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
+ struct vmap_area *va, unsigned long nva_start_addr,
+ unsigned long size)
{
struct vmap_area *lva = NULL;
+ enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
if (type == FL_FIT_TYPE) {
/*
@@ -1348,7 +1384,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
* V NVA V
* |---------------|
*/
- unlink_va(va, &free_vmap_area_root);
+ unlink_va_augment(va, root);
kmem_cache_free(vmap_area_cachep, va);
} else if (type == LE_FIT_TYPE) {
/*
@@ -1426,8 +1462,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
augment_tree_propagate_from(va);
if (lva) /* type == NE_FIT_TYPE */
- insert_vmap_area_augment(lva, &va->rb_node,
- &free_vmap_area_root, &free_vmap_area_list);
+ insert_vmap_area_augment(lva, &va->rb_node, root, head);
}
return 0;
@@ -1438,13 +1473,13 @@ adjust_va_to_fit_type(struct vmap_area *va,
* Otherwise a vend is returned that indicates failure.
*/
static __always_inline unsigned long
-__alloc_vmap_area(unsigned long size, unsigned long align,
+__alloc_vmap_area(struct rb_root *root, struct list_head *head,
+ unsigned long size, unsigned long align,
unsigned long vstart, unsigned long vend)
{
bool adjust_search_size = true;
unsigned long nva_start_addr;
struct vmap_area *va;
- enum fit_type type;
int ret;
/*
@@ -1459,7 +1494,7 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
adjust_search_size = false;
- va = find_vmap_lowest_match(size, align, vstart, adjust_search_size);
+ va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
if (unlikely(!va))
return vend;
@@ -1472,14 +1507,9 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
if (nva_start_addr + size > vend)
return vend;
- /* Classify what we have found. */
- type = classify_va_fit_type(va, nva_start_addr, size);
- if (WARN_ON_ONCE(type == NOTHING_FIT))
- return vend;
-
/* Update the free vmap_area. */
- ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
- if (ret)
+ ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
+ if (WARN_ON_ONCE(ret))
return vend;
#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
@@ -1569,7 +1599,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
retry:
preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
- addr = __alloc_vmap_area(size, align, vstart, vend);
+ addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
+ size, align, vstart, vend);
spin_unlock(&free_vmap_area_lock);
/*
@@ -1663,7 +1694,7 @@ static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
/*
* Serialize vmap purging. There is no actual critical section protected
- * by this look, but we want to avoid concurrent calls for performance
+ * by this lock, but we want to avoid concurrent calls for performance
* reasons and to make the pcpu_get_vm_areas more deterministic.
*/
static DEFINE_MUTEX(vmap_purge_lock);
@@ -1677,32 +1708,32 @@ static void purge_fragmented_blocks_allcpus(void);
static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
{
unsigned long resched_threshold;
- struct list_head local_pure_list;
+ struct list_head local_purge_list;
struct vmap_area *va, *n_va;
lockdep_assert_held(&vmap_purge_lock);
spin_lock(&purge_vmap_area_lock);
purge_vmap_area_root = RB_ROOT;
- list_replace_init(&purge_vmap_area_list, &local_pure_list);
+ list_replace_init(&purge_vmap_area_list, &local_purge_list);
spin_unlock(&purge_vmap_area_lock);
- if (unlikely(list_empty(&local_pure_list)))
+ if (unlikely(list_empty(&local_purge_list)))
return false;
start = min(start,
- list_first_entry(&local_pure_list,
+ list_first_entry(&local_purge_list,
struct vmap_area, list)->va_start);
end = max(end,
- list_last_entry(&local_pure_list,
+ list_last_entry(&local_purge_list,
struct vmap_area, list)->va_end);
flush_tlb_kernel_range(start, end);
resched_threshold = lazy_max_pages() << 1;
spin_lock(&free_vmap_area_lock);
- list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
+ list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
unsigned long orig_start = va->va_start;
unsigned long orig_end = va->va_end;
@@ -1803,7 +1834,7 @@ struct vmap_area *find_vmap_area(unsigned long addr)
struct vmap_area *va;
spin_lock(&vmap_area_lock);
- va = __find_vmap_area(addr);
+ va = __find_vmap_area(addr, &vmap_area_root);
spin_unlock(&vmap_area_lock);
return va;
@@ -2546,7 +2577,7 @@ struct vm_struct *remove_vm_area(const void *addr)
might_sleep();
spin_lock(&vmap_area_lock);
- va = __find_vmap_area((unsigned long)addr);
+ va = __find_vmap_area((unsigned long)addr, &vmap_area_root);
if (va && va->vm) {
struct vm_struct *vm = va->vm;
@@ -3168,15 +3199,15 @@ again:
/*
* Mark the pages as accessible, now that they are mapped.
- * The init condition should match the one in post_alloc_hook()
- * (except for the should_skip_init() check) to make sure that memory
- * is initialized under the same conditions regardless of the enabled
- * KASAN mode.
+ * The condition for setting KASAN_VMALLOC_INIT should complement the
+ * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
+ * to make sure that memory is initialized under the same conditions.
* Tag-based KASAN modes only assign tags to normal non-executable
* allocations, see __kasan_unpoison_vmalloc().
*/
kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
- if (!want_init_on_free() && want_init_on_alloc(gfp_mask))
+ if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
+ (gfp_mask & __GFP_SKIP_ZERO))
kasan_flags |= KASAN_VMALLOC_INIT;
/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
@@ -3735,7 +3766,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
int area, area2, last_area, term_area;
unsigned long base, start, size, end, last_end, orig_start, orig_end;
bool purged = false;
- enum fit_type type;
/* verify parameters and allocate data structures */
BUG_ON(offset_in_page(align) || !is_power_of_2(align));
@@ -3846,15 +3876,13 @@ retry:
/* It is a BUG(), but trigger recovery instead. */
goto recovery;
- type = classify_va_fit_type(va, start, size);
- if (WARN_ON_ONCE(type == NOTHING_FIT))
+ ret = adjust_va_to_fit_type(&free_vmap_area_root,
+ &free_vmap_area_list,
+ va, start, size);
+ if (WARN_ON_ONCE(unlikely(ret)))
/* It is a BUG(), but trigger recovery instead. */
goto recovery;
- ret = adjust_va_to_fit_type(va, start, size, type);
- if (unlikely(ret))
- goto recovery;
-
/* Allocated area. */
va = vas[area];
va->va_start = start;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 04f8671caad9..b2b1431352dc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -26,8 +26,7 @@
#include <linux/file.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
-#include <linux/buffer_head.h> /* for try_to_release_page(),
- buffer_heads_over_limit */
+#include <linux/buffer_head.h> /* for buffer_heads_over_limit */
#include <linux/mm_inline.h>
#include <linux/backing-dev.h>
#include <linux/rmap.h>
@@ -102,6 +101,9 @@ struct scan_control {
/* Can pages be swapped as part of reclaim? */
unsigned int may_swap:1;
+ /* Proactive reclaim invoked by userspace through memory.reclaim */
+ unsigned int proactive:1;
+
/*
* Cgroup memory below memory.low is protected as long as we
* don't threaten to OOM. If any cgroup is reclaimed at
@@ -160,17 +162,17 @@ struct scan_control {
};
#ifdef ARCH_HAS_PREFETCHW
-#define prefetchw_prev_lru_page(_page, _base, _field) \
+#define prefetchw_prev_lru_folio(_folio, _base, _field) \
do { \
- if ((_page)->lru.prev != _base) { \
- struct page *prev; \
+ if ((_folio)->lru.prev != _base) { \
+ struct folio *prev; \
\
- prev = lru_to_page(&(_page->lru)); \
+ prev = lru_to_folio(&(_folio->lru)); \
prefetchw(&prev->_field); \
} \
} while (0)
#else
-#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
+#define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0)
#endif
/*
@@ -190,8 +192,8 @@ static void set_task_reclaim_state(struct task_struct *task,
task->reclaim_state = rs;
}
-static LIST_HEAD(shrinker_list);
-static DECLARE_RWSEM(shrinker_rwsem);
+LIST_HEAD(shrinker_list);
+DECLARE_RWSEM(shrinker_rwsem);
#ifdef CONFIG_MEMCG
static int shrinker_nr_max;
@@ -608,7 +610,7 @@ static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
/*
* Add a shrinker callback to be called from the vm.
*/
-int prealloc_shrinker(struct shrinker *shrinker)
+static int __prealloc_shrinker(struct shrinker *shrinker)
{
unsigned int size;
int err;
@@ -632,8 +634,39 @@ int prealloc_shrinker(struct shrinker *shrinker)
return 0;
}
+#ifdef CONFIG_SHRINKER_DEBUG
+int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...)
+{
+ va_list ap;
+ int err;
+
+ va_start(ap, fmt);
+ shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+ if (!shrinker->name)
+ return -ENOMEM;
+
+ err = __prealloc_shrinker(shrinker);
+ if (err) {
+ kfree_const(shrinker->name);
+ shrinker->name = NULL;
+ }
+
+ return err;
+}
+#else
+int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...)
+{
+ return __prealloc_shrinker(shrinker);
+}
+#endif
+
void free_prealloced_shrinker(struct shrinker *shrinker)
{
+#ifdef CONFIG_SHRINKER_DEBUG
+ kfree_const(shrinker->name);
+ shrinker->name = NULL;
+#endif
if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
down_write(&shrinker_rwsem);
unregister_memcg_shrinker(shrinker);
@@ -650,18 +683,45 @@ void register_shrinker_prepared(struct shrinker *shrinker)
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
shrinker->flags |= SHRINKER_REGISTERED;
+ shrinker_debugfs_add(shrinker);
up_write(&shrinker_rwsem);
}
-int register_shrinker(struct shrinker *shrinker)
+static int __register_shrinker(struct shrinker *shrinker)
{
- int err = prealloc_shrinker(shrinker);
+ int err = __prealloc_shrinker(shrinker);
if (err)
return err;
register_shrinker_prepared(shrinker);
return 0;
}
+
+#ifdef CONFIG_SHRINKER_DEBUG
+int register_shrinker(struct shrinker *shrinker, const char *fmt, ...)
+{
+ va_list ap;
+ int err;
+
+ va_start(ap, fmt);
+ shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+ if (!shrinker->name)
+ return -ENOMEM;
+
+ err = __register_shrinker(shrinker);
+ if (err) {
+ kfree_const(shrinker->name);
+ shrinker->name = NULL;
+ }
+ return err;
+}
+#else
+int register_shrinker(struct shrinker *shrinker, const char *fmt, ...)
+{
+ return __register_shrinker(shrinker);
+}
+#endif
EXPORT_SYMBOL(register_shrinker);
/*
@@ -677,6 +737,7 @@ void unregister_shrinker(struct shrinker *shrinker)
shrinker->flags &= ~SHRINKER_REGISTERED;
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
unregister_memcg_shrinker(shrinker);
+ shrinker_debugfs_remove(shrinker);
up_write(&shrinker_rwsem);
kfree(shrinker->nr_deferred);
@@ -1276,7 +1337,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
mem_cgroup_swapout(folio, swap);
if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(folio, target_memcg);
- __delete_from_swap_cache(&folio->page, swap, shadow);
+ __delete_from_swap_cache(folio, swap, shadow);
xa_unlock_irq(&mapping->i_pages);
put_swap_page(&folio->page, swap);
} else {
@@ -1519,7 +1580,7 @@ static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
* but that will never affect SWP_FS_OPS, so the data_race
* is safe.
*/
- return !data_race(page_swap_flags(&folio->page) & SWP_FS_OPS);
+ return !data_race(folio_swap_flags(folio) & SWP_FS_OPS);
}
/*
@@ -1926,7 +1987,7 @@ free_it:
* appear not as the counts should be low
*/
if (unlikely(folio_test_large(folio)))
- destroy_compound_page(&folio->page);
+ destroy_large_folio(folio);
else
list_add(&folio->lru, &free_pages);
continue;
@@ -1987,7 +2048,7 @@ keep:
}
unsigned int reclaim_clean_pages_from_list(struct zone *zone,
- struct list_head *page_list)
+ struct list_head *folio_list)
{
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
@@ -1995,16 +2056,16 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
};
struct reclaim_stat stat;
unsigned int nr_reclaimed;
- struct page *page, *next;
- LIST_HEAD(clean_pages);
+ struct folio *folio, *next;
+ LIST_HEAD(clean_folios);
unsigned int noreclaim_flag;
- list_for_each_entry_safe(page, next, page_list, lru) {
- if (!PageHuge(page) && page_is_file_lru(page) &&
- !PageDirty(page) && !__PageMovable(page) &&
- !PageUnevictable(page)) {
- ClearPageActive(page);
- list_move(&page->lru, &clean_pages);
+ list_for_each_entry_safe(folio, next, folio_list, lru) {
+ if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
+ !folio_test_dirty(folio) && !__folio_test_movable(folio) &&
+ !folio_test_unevictable(folio)) {
+ folio_clear_active(folio);
+ list_move(&folio->lru, &clean_folios);
}
}
@@ -2015,11 +2076,11 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
* change in the future.
*/
noreclaim_flag = memalloc_noreclaim_save();
- nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
+ nr_reclaimed = shrink_page_list(&clean_folios, zone->zone_pgdat, &sc,
&stat, true);
memalloc_noreclaim_restore(noreclaim_flag);
- list_splice(&clean_pages, page_list);
+ list_splice(&clean_folios, folio_list);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
-(long)nr_reclaimed);
/*
@@ -2085,72 +2146,72 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long skipped = 0;
unsigned long scan, total_scan, nr_pages;
- LIST_HEAD(pages_skipped);
+ LIST_HEAD(folios_skipped);
total_scan = 0;
scan = 0;
while (scan < nr_to_scan && !list_empty(src)) {
struct list_head *move_to = src;
- struct page *page;
+ struct folio *folio;
- page = lru_to_page(src);
- prefetchw_prev_lru_page(page, src, flags);
+ folio = lru_to_folio(src);
+ prefetchw_prev_lru_folio(folio, src, flags);
- nr_pages = compound_nr(page);
+ nr_pages = folio_nr_pages(folio);
total_scan += nr_pages;
- if (page_zonenum(page) > sc->reclaim_idx) {
- nr_skipped[page_zonenum(page)] += nr_pages;
- move_to = &pages_skipped;
+ if (folio_zonenum(folio) > sc->reclaim_idx) {
+ nr_skipped[folio_zonenum(folio)] += nr_pages;
+ move_to = &folios_skipped;
goto move;
}
/*
- * Do not count skipped pages because that makes the function
- * return with no isolated pages if the LRU mostly contains
- * ineligible pages. This causes the VM to not reclaim any
- * pages, triggering a premature OOM.
- * Account all tail pages of THP.
+ * Do not count skipped folios because that makes the function
+ * return with no isolated folios if the LRU mostly contains
+ * ineligible folios. This causes the VM to not reclaim any
+ * folios, triggering a premature OOM.
+ * Account all pages in a folio.
*/
scan += nr_pages;
- if (!PageLRU(page))
+ if (!folio_test_lru(folio))
goto move;
- if (!sc->may_unmap && page_mapped(page))
+ if (!sc->may_unmap && folio_mapped(folio))
goto move;
/*
- * Be careful not to clear PageLRU until after we're
- * sure the page is not being freed elsewhere -- the
- * page release code relies on it.
+ * Be careful not to clear the lru flag until after we're
+ * sure the folio is not being freed elsewhere -- the
+ * folio release code relies on it.
*/
- if (unlikely(!get_page_unless_zero(page)))
+ if (unlikely(!folio_try_get(folio)))
goto move;
- if (!TestClearPageLRU(page)) {
- /* Another thread is already isolating this page */
- put_page(page);
+ if (!folio_test_clear_lru(folio)) {
+ /* Another thread is already isolating this folio */
+ folio_put(folio);
goto move;
}
nr_taken += nr_pages;
- nr_zone_taken[page_zonenum(page)] += nr_pages;
+ nr_zone_taken[folio_zonenum(folio)] += nr_pages;
move_to = dst;
move:
- list_move(&page->lru, move_to);
+ list_move(&folio->lru, move_to);
}
/*
- * Splice any skipped pages to the start of the LRU list. Note that
+ * Splice any skipped folios to the start of the LRU list. Note that
* this disrupts the LRU order when reclaiming for lower zones but
* we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
- * scanning would soon rescan the same pages to skip and waste lots
+ * scanning would soon rescan the same folios to skip and waste lots
* of cpu cycles.
*/
- if (!list_empty(&pages_skipped)) {
+ if (!list_empty(&folios_skipped)) {
int zid;
- list_splice(&pages_skipped, src);
+ list_splice(&folios_skipped, src);
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
if (!nr_skipped[zid])
continue;
@@ -2254,8 +2315,8 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
}
/*
- * move_pages_to_lru() moves pages from private @list to appropriate LRU list.
- * On return, @list is reused as a list of pages to be freed by the caller.
+ * move_pages_to_lru() moves folios from private @list to appropriate LRU list.
+ * On return, @list is reused as a list of folios to be freed by the caller.
*
* Returns the number of pages moved to the given lruvec.
*/
@@ -2263,42 +2324,42 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
struct list_head *list)
{
int nr_pages, nr_moved = 0;
- LIST_HEAD(pages_to_free);
- struct page *page;
+ LIST_HEAD(folios_to_free);
while (!list_empty(list)) {
- page = lru_to_page(list);
- VM_BUG_ON_PAGE(PageLRU(page), page);
- list_del(&page->lru);
- if (unlikely(!page_evictable(page))) {
+ struct folio *folio = lru_to_folio(list);
+
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+ list_del(&folio->lru);
+ if (unlikely(!folio_evictable(folio))) {
spin_unlock_irq(&lruvec->lru_lock);
- putback_lru_page(page);
+ folio_putback_lru(folio);
spin_lock_irq(&lruvec->lru_lock);
continue;
}
/*
- * The SetPageLRU needs to be kept here for list integrity.
+ * The folio_set_lru needs to be kept here for list integrity.
* Otherwise:
* #0 move_pages_to_lru #1 release_pages
- * if !put_page_testzero
- * if (put_page_testzero())
- * !PageLRU //skip lru_lock
- * SetPageLRU()
- * list_add(&page->lru,)
- * list_add(&page->lru,)
+ * if (!folio_put_testzero())
+ * if (folio_put_testzero())
+ * !lru //skip lru_lock
+ * folio_set_lru()
+ * list_add(&folio->lru,)
+ * list_add(&folio->lru,)
*/
- SetPageLRU(page);
+ folio_set_lru(folio);
- if (unlikely(put_page_testzero(page))) {
- __clear_page_lru_flags(page);
+ if (unlikely(folio_put_testzero(folio))) {
+ __folio_clear_lru_flags(folio);
- if (unlikely(PageCompound(page))) {
+ if (unlikely(folio_test_large(folio))) {
spin_unlock_irq(&lruvec->lru_lock);
- destroy_compound_page(page);
+ destroy_large_folio(folio);
spin_lock_irq(&lruvec->lru_lock);
} else
- list_add(&page->lru, &pages_to_free);
+ list_add(&folio->lru, &folios_to_free);
continue;
}
@@ -2307,18 +2368,18 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
* All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration).
*/
- VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page);
- add_page_to_lru_list(page, lruvec);
- nr_pages = thp_nr_pages(page);
+ VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
+ lruvec_add_folio(lruvec, folio);
+ nr_pages = folio_nr_pages(folio);
nr_moved += nr_pages;
- if (PageActive(page))
+ if (folio_test_active(folio))
workingset_age_nonresident(lruvec, nr_pages);
}
/*
* To save our caller's stack, now use input list for pages to free.
*/
- list_splice(&pages_to_free, list);
+ list_splice(&folios_to_free, list);
return nr_moved;
}
@@ -2429,21 +2490,21 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
}
/*
- * shrink_active_list() moves pages from the active LRU to the inactive LRU.
+ * shrink_active_list() moves folios from the active LRU to the inactive LRU.
*
- * We move them the other way if the page is referenced by one or more
+ * We move them the other way if the folio is referenced by one or more
* processes.
*
- * If the pages are mostly unmapped, the processing is fast and it is
+ * If the folios are mostly unmapped, the processing is fast and it is
* appropriate to hold lru_lock across the whole operation. But if
- * the pages are mapped, the processing is slow (folio_referenced()), so
- * we should drop lru_lock around each page. It's impossible to balance
- * this, so instead we remove the pages from the LRU while processing them.
- * It is safe to rely on PG_active against the non-LRU pages in here because
- * nobody will play with that bit on a non-LRU page.
+ * the folios are mapped, the processing is slow (folio_referenced()), so
+ * we should drop lru_lock around each folio. It's impossible to balance
+ * this, so instead we remove the folios from the LRU while processing them.
+ * It is safe to rely on the active flag against the non-LRU folios in here
+ * because nobody will play with that bit on a non-LRU folio.
*
- * The downside is that we have to touch page->_refcount against each page.
- * But we had to alter page->flags anyway.
+ * The downside is that we have to touch folio->_refcount against each folio.
+ * But we had to alter folio->flags anyway.
*/
static void shrink_active_list(unsigned long nr_to_scan,
struct lruvec *lruvec,
@@ -2453,7 +2514,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
unsigned long nr_taken;
unsigned long nr_scanned;
unsigned long vm_flags;
- LIST_HEAD(l_hold); /* The pages which were snipped off */
+ LIST_HEAD(l_hold); /* The folios which were snipped off */
LIST_HEAD(l_active);
LIST_HEAD(l_inactive);
unsigned nr_deactivate, nr_activate;
@@ -2478,23 +2539,21 @@ static void shrink_active_list(unsigned long nr_to_scan,
while (!list_empty(&l_hold)) {
struct folio *folio;
- struct page *page;
cond_resched();
folio = lru_to_folio(&l_hold);
list_del(&folio->lru);
- page = &folio->page;
- if (unlikely(!page_evictable(page))) {
- putback_lru_page(page);
+ if (unlikely(!folio_evictable(folio))) {
+ folio_putback_lru(folio);
continue;
}
if (unlikely(buffer_heads_over_limit)) {
- if (page_has_private(page) && trylock_page(page)) {
- if (page_has_private(page))
- try_to_release_page(page, 0);
- unlock_page(page);
+ if (folio_get_private(folio) && folio_trylock(folio)) {
+ if (folio_get_private(folio))
+ filemap_release_folio(folio, 0);
+ folio_unlock(folio);
}
}
@@ -2502,34 +2561,34 @@ static void shrink_active_list(unsigned long nr_to_scan,
if (folio_referenced(folio, 0, sc->target_mem_cgroup,
&vm_flags) != 0) {
/*
- * Identify referenced, file-backed active pages and
+ * Identify referenced, file-backed active folios and
* give them one more trip around the active list. So
* that executable code get better chances to stay in
- * memory under moderate memory pressure. Anon pages
+ * memory under moderate memory pressure. Anon folios
* are not likely to be evicted by use-once streaming
- * IO, plus JVM can create lots of anon VM_EXEC pages,
+ * IO, plus JVM can create lots of anon VM_EXEC folios,
* so we ignore them here.
*/
- if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
- nr_rotated += thp_nr_pages(page);
- list_add(&page->lru, &l_active);
+ if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) {
+ nr_rotated += folio_nr_pages(folio);
+ list_add(&folio->lru, &l_active);
continue;
}
}
- ClearPageActive(page); /* we are de-activating */
- SetPageWorkingset(page);
- list_add(&page->lru, &l_inactive);
+ folio_clear_active(folio); /* we are de-activating */
+ folio_set_workingset(folio);
+ list_add(&folio->lru, &l_inactive);
}
/*
- * Move pages back to the lru list.
+ * Move folios back to the lru list.
*/
spin_lock_irq(&lruvec->lru_lock);
nr_activate = move_pages_to_lru(lruvec, &l_active);
nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
- /* Keep all free pages in l_active list */
+ /* Keep all free folios in l_active list */
list_splice(&l_inactive, &l_active);
__count_vm_events(PGDEACTIVATE, nr_deactivate);
@@ -2568,34 +2627,33 @@ static unsigned int reclaim_page_list(struct list_head *page_list,
return nr_reclaimed;
}
-unsigned long reclaim_pages(struct list_head *page_list)
+unsigned long reclaim_pages(struct list_head *folio_list)
{
int nid;
unsigned int nr_reclaimed = 0;
- LIST_HEAD(node_page_list);
- struct page *page;
+ LIST_HEAD(node_folio_list);
unsigned int noreclaim_flag;
- if (list_empty(page_list))
+ if (list_empty(folio_list))
return nr_reclaimed;
noreclaim_flag = memalloc_noreclaim_save();
- nid = page_to_nid(lru_to_page(page_list));
+ nid = folio_nid(lru_to_folio(folio_list));
do {
- page = lru_to_page(page_list);
+ struct folio *folio = lru_to_folio(folio_list);
- if (nid == page_to_nid(page)) {
- ClearPageActive(page);
- list_move(&page->lru, &node_page_list);
+ if (nid == folio_nid(folio)) {
+ folio_clear_active(folio);
+ list_move(&folio->lru, &node_folio_list);
continue;
}
- nr_reclaimed += reclaim_page_list(&node_page_list, NODE_DATA(nid));
- nid = page_to_nid(lru_to_page(page_list));
- } while (!list_empty(page_list));
+ nr_reclaimed += reclaim_page_list(&node_folio_list, NODE_DATA(nid));
+ nid = folio_nid(lru_to_folio(folio_list));
+ } while (!list_empty(folio_list));
- nr_reclaimed += reclaim_page_list(&node_page_list, NODE_DATA(nid));
+ nr_reclaimed += reclaim_page_list(&node_folio_list, NODE_DATA(nid));
memalloc_noreclaim_restore(noreclaim_flag);
@@ -3125,9 +3183,10 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
sc->priority);
/* Record the group's reclaim efficiency */
- vmpressure(sc->gfp_mask, memcg, false,
- sc->nr_scanned - scanned,
- sc->nr_reclaimed - reclaimed);
+ if (!sc->proactive)
+ vmpressure(sc->gfp_mask, memcg, false,
+ sc->nr_scanned - scanned,
+ sc->nr_reclaimed - reclaimed);
} while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
}
@@ -3250,9 +3309,10 @@ again:
}
/* Record the subtree's reclaim efficiency */
- vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
- sc->nr_scanned - nr_scanned,
- sc->nr_reclaimed - nr_reclaimed);
+ if (!sc->proactive)
+ vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
+ sc->nr_scanned - nr_scanned,
+ sc->nr_reclaimed - nr_reclaimed);
if (sc->nr_reclaimed - nr_reclaimed)
reclaimable = true;
@@ -3534,8 +3594,9 @@ retry:
__count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
do {
- vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
- sc->priority);
+ if (!sc->proactive)
+ vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
+ sc->priority);
sc->nr_scanned = 0;
shrink_zones(zonelist, sc);
@@ -3825,7 +3886,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
- bool may_swap)
+ unsigned int reclaim_options)
{
unsigned long nr_reclaimed;
unsigned int noreclaim_flag;
@@ -3838,7 +3899,8 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.priority = DEF_PRIORITY,
.may_writepage = !laptop_mode,
.may_unmap = 1,
- .may_swap = may_swap,
+ .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
+ .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
};
/*
* Traverse the ZONELIST_FALLBACK zonelist of the current node to put
@@ -4595,7 +4657,7 @@ void kswapd_run(int nid)
/*
* Called by memory hotplug when all memory in a node is offlined. Caller must
- * hold mem_hotplug_begin/end().
+ * be holding mem_hotplug_begin/done().
*/
void kswapd_stop(int nid)
{
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 373d2730fcf2..90af9a8572f5 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1168,8 +1168,15 @@ int fragmentation_index(struct zone *zone, unsigned int order)
#define TEXT_FOR_HIGHMEM(xx)
#endif
+#ifdef CONFIG_ZONE_DEVICE
+#define TEXT_FOR_DEVICE(xx) xx "_device",
+#else
+#define TEXT_FOR_DEVICE(xx)
+#endif
+
#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
- TEXT_FOR_HIGHMEM(xx) xx "_movable",
+ TEXT_FOR_HIGHMEM(xx) xx "_movable", \
+ TEXT_FOR_DEVICE(xx)
const char * const vmstat_text[] = {
/* enum zone_stat_item counters */
diff --git a/mm/workingset.c b/mm/workingset.c
index 592569a8974c..a5e84862fc86 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -625,7 +625,7 @@ static int __init workingset_init(void)
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
- ret = prealloc_shrinker(&workingset_shadow_shrinker);
+ ret = prealloc_shrinker(&workingset_shadow_shrinker, "mm-shadow");
if (ret)
goto err;
ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 71d6edcbea48..907c9b1e1e61 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -386,7 +386,10 @@ static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
unsigned long *handle)
{
*handle = zs_malloc(pool, size, gfp);
- return *handle ? 0 : -1;
+
+ if (IS_ERR((void *)(*handle)))
+ return PTR_ERR((void *)*handle);
+ return 0;
}
static void zs_zpool_free(void *pool, unsigned long handle)
{
@@ -1388,7 +1391,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
* @gfp: gfp flags when allocating object
*
* On success, handle to the allocated object is returned,
- * otherwise 0.
+ * otherwise an ERR_PTR().
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
*/
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
@@ -1399,11 +1402,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
struct zspage *zspage;
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
- return 0;
+ return (unsigned long)ERR_PTR(-EINVAL);
handle = cache_alloc_handle(pool, gfp);
if (!handle)
- return 0;
+ return (unsigned long)ERR_PTR(-ENOMEM);
/* extra space in chunk to keep the handle */
size += ZS_HANDLE_SIZE;
@@ -1428,7 +1431,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
zspage = alloc_zspage(pool, class, gfp);
if (!zspage) {
cache_free_handle(pool, handle);
- return 0;
+ return (unsigned long)ERR_PTR(-ENOMEM);
}
spin_lock(&class->lock);
@@ -1484,7 +1487,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
struct size_class *class;
enum fullness_group fullness;
- if (unlikely(!handle))
+ if (IS_ERR_OR_NULL((void *)handle))
return;
/*
@@ -2169,7 +2172,8 @@ static int zs_register_shrinker(struct zs_pool *pool)
pool->shrinker.batch = 0;
pool->shrinker.seeks = DEFAULT_SEEKS;
- return register_shrinker(&pool->shrinker);
+ return register_shrinker(&pool->shrinker, "mm-zspool:%s",
+ pool->name);
}
/**
diff --git a/net/9p/client.c b/net/9p/client.c
index d403085b9ef5..0a6110e15d0f 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -298,14 +298,14 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size)
/* Init ref to two because in the general case there is one ref
* that is put asynchronously by a writer thread, one ref
* temporarily given by p9_tag_lookup and put by p9_client_cb
- * in the recv thread, and one ref put by p9_tag_remove in the
+ * in the recv thread, and one ref put by p9_req_put in the
* main thread. The only exception is virtio that does not use
* p9_tag_lookup but does not have a writer thread either
* (the write happens synchronously in the request/zc_request
* callback), so p9_client_cb eats the second ref there
* as the pointer is duplicated directly by virtqueue_add_sgs()
*/
- refcount_set(&req->refcount.refcount, 2);
+ refcount_set(&req->refcount, 2);
return req;
@@ -341,7 +341,7 @@ again:
if (!p9_req_try_get(req))
goto again;
if (req->tc.tag != tag) {
- p9_req_put(req);
+ p9_req_put(c, req);
goto again;
}
}
@@ -358,30 +358,28 @@ EXPORT_SYMBOL(p9_tag_lookup);
*
* Context: Any context.
*/
-static int p9_tag_remove(struct p9_client *c, struct p9_req_t *r)
+static void p9_tag_remove(struct p9_client *c, struct p9_req_t *r)
{
unsigned long flags;
u16 tag = r->tc.tag;
- p9_debug(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag);
+ p9_debug(P9_DEBUG_MUX, "freeing clnt %p req %p tag: %d\n", c, r, tag);
spin_lock_irqsave(&c->lock, flags);
idr_remove(&c->reqs, tag);
spin_unlock_irqrestore(&c->lock, flags);
- return p9_req_put(r);
}
-static void p9_req_free(struct kref *ref)
+int p9_req_put(struct p9_client *c, struct p9_req_t *r)
{
- struct p9_req_t *r = container_of(ref, struct p9_req_t, refcount);
+ if (refcount_dec_and_test(&r->refcount)) {
+ p9_tag_remove(c, r);
- p9_fcall_fini(&r->tc);
- p9_fcall_fini(&r->rc);
- kmem_cache_free(p9_req_cache, r);
-}
-
-int p9_req_put(struct p9_req_t *r)
-{
- return kref_put(&r->refcount, p9_req_free);
+ p9_fcall_fini(&r->tc);
+ p9_fcall_fini(&r->rc);
+ kmem_cache_free(p9_req_cache, r);
+ return 1;
+ }
+ return 0;
}
EXPORT_SYMBOL(p9_req_put);
@@ -400,7 +398,7 @@ static void p9_tag_cleanup(struct p9_client *c)
rcu_read_lock();
idr_for_each_entry(&c->reqs, req, id) {
pr_info("Tag %d still in use\n", id);
- if (p9_tag_remove(c, req) == 0)
+ if (p9_req_put(c, req) == 0)
pr_warn("Packet with tag %d has still references",
req->tc.tag);
}
@@ -426,7 +424,7 @@ void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
wake_up(&req->wq);
p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc.tag);
- p9_req_put(req);
+ p9_req_put(c, req);
}
EXPORT_SYMBOL(p9_client_cb);
@@ -589,7 +587,7 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
c->trans_mod->cancelled(c, oldreq);
}
- p9_tag_remove(c, req);
+ p9_req_put(c, req);
return 0;
}
@@ -623,9 +621,9 @@ static struct p9_req_t *p9_client_prepare_req(struct p9_client *c,
trace_9p_client_req(c, type, req->tc.tag);
return req;
reterr:
- p9_tag_remove(c, req);
+ p9_req_put(c, req);
/* We have to put also the 2nd reference as it won't be used */
- p9_req_put(req);
+ p9_req_put(c, req);
return ERR_PTR(err);
}
@@ -635,7 +633,7 @@ reterr:
* @type: type of request
* @fmt: protocol format string (see protocol.c)
*
- * Returns request structure (which client must free using p9_tag_remove)
+ * Returns request structure (which client must free using p9_req_put)
*/
static struct p9_req_t *
@@ -662,7 +660,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
err = c->trans_mod->request(c, req);
if (err < 0) {
/* write won't happen */
- p9_req_put(req);
+ p9_req_put(c, req);
if (err != -ERESTARTSYS && err != -EFAULT)
c->status = Disconnected;
goto recalc_sigpending;
@@ -713,7 +711,7 @@ recalc_sigpending:
if (!err)
return req;
reterr:
- p9_tag_remove(c, req);
+ p9_req_put(c, req);
return ERR_PTR(safe_errno(err));
}
@@ -728,7 +726,7 @@ reterr:
* @in_hdrlen: reader header size, This is the size of response protocol data
* @fmt: protocol format string (see protocol.c)
*
- * Returns request structure (which client must free using p9_tag_remove)
+ * Returns request structure (which client must free using p9_req_put)
*/
static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
struct iov_iter *uidata,
@@ -795,7 +793,7 @@ recalc_sigpending:
if (!err)
return req;
reterr:
- p9_tag_remove(c, req);
+ p9_req_put(c, req);
return ERR_PTR(safe_errno(err));
}
@@ -805,16 +803,13 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
struct p9_fid *fid;
p9_debug(P9_DEBUG_FID, "clnt %p\n", clnt);
- fid = kmalloc(sizeof(*fid), GFP_KERNEL);
+ fid = kzalloc(sizeof(*fid), GFP_KERNEL);
if (!fid)
return NULL;
- memset(&fid->qid, 0, sizeof(fid->qid));
fid->mode = -1;
fid->uid = current_fsuid();
fid->clnt = clnt;
- fid->rdir = NULL;
- fid->fid = 0;
refcount_set(&fid->count, 1);
idr_preload(GFP_KERNEL);
@@ -823,8 +818,10 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
GFP_NOWAIT);
spin_unlock_irq(&clnt->lock);
idr_preload_end();
- if (!ret)
+ if (!ret) {
+ trace_9p_fid_ref(fid, P9_FID_REF_CREATE);
return fid;
+ }
kfree(fid);
return NULL;
@@ -836,6 +833,7 @@ static void p9_fid_destroy(struct p9_fid *fid)
unsigned long flags;
p9_debug(P9_DEBUG_FID, "fid %d\n", fid->fid);
+ trace_9p_fid_ref(fid, P9_FID_REF_DESTROY);
clnt = fid->clnt;
spin_lock_irqsave(&clnt->lock, flags);
idr_remove(&clnt->fids, fid->fid);
@@ -844,6 +842,21 @@ static void p9_fid_destroy(struct p9_fid *fid)
kfree(fid);
}
+/* We also need to export tracepoint symbols for tracepoint_enabled() */
+EXPORT_TRACEPOINT_SYMBOL(9p_fid_ref);
+
+void do_trace_9p_fid_get(struct p9_fid *fid)
+{
+ trace_9p_fid_ref(fid, P9_FID_REF_GET);
+}
+EXPORT_SYMBOL(do_trace_9p_fid_get);
+
+void do_trace_9p_fid_put(struct p9_fid *fid)
+{
+ trace_9p_fid_ref(fid, P9_FID_REF_PUT);
+}
+EXPORT_SYMBOL(do_trace_9p_fid_put);
+
static int p9_client_version(struct p9_client *c)
{
int err = 0;
@@ -906,7 +919,7 @@ static int p9_client_version(struct p9_client *c)
error:
kfree(version);
- p9_tag_remove(c, req);
+ p9_req_put(c, req);
return err;
}
@@ -1060,7 +1073,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", &qid);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
goto error;
}
@@ -1069,7 +1082,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
memmove(&fid->qid, &qid, sizeof(struct p9_qid));
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return fid;
error:
@@ -1116,10 +1129,10 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
err = p9pdu_readf(&req->rc, clnt->proto_version, "R", &nwqids, &wqids);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
goto clunk_fid;
}
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
p9_debug(P9_DEBUG_9P, "<<< RWALK nwqid %d:\n", nwqids);
@@ -1144,7 +1157,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
clunk_fid:
kfree(wqids);
- p9_client_clunk(fid);
+ p9_fid_put(fid);
fid = NULL;
error:
@@ -1195,7 +1208,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
fid->iounit = iounit;
free_and_error:
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -1239,7 +1252,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags,
ofid->iounit = iounit;
free_and_error:
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -1283,7 +1296,7 @@ int p9_client_fcreate(struct p9_fid *fid, const char *name, u32 perm, int mode,
fid->iounit = iounit;
free_and_error:
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -1317,7 +1330,7 @@ int p9_client_symlink(struct p9_fid *dfid, const char *name,
qid->type, qid->path, qid->version);
free_and_error:
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -1337,7 +1350,7 @@ int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, const char *newna
return PTR_ERR(req);
p9_debug(P9_DEBUG_9P, "<<< RLINK\n");
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return 0;
}
EXPORT_SYMBOL(p9_client_link);
@@ -1361,7 +1374,7 @@ int p9_client_fsync(struct p9_fid *fid, int datasync)
p9_debug(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
@@ -1375,15 +1388,6 @@ int p9_client_clunk(struct p9_fid *fid)
struct p9_req_t *req;
int retries = 0;
- if (!fid || IS_ERR(fid)) {
- pr_warn("%s (%d): Trying to clunk with invalid fid\n",
- __func__, task_pid_nr(current));
- dump_stack();
- return 0;
- }
- if (!refcount_dec_and_test(&fid->count))
- return 0;
-
again:
p9_debug(P9_DEBUG_9P, ">>> TCLUNK fid %d (try %d)\n",
fid->fid, retries);
@@ -1398,7 +1402,7 @@ again:
p9_debug(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
/* Fid is not valid even after a failed clunk
* If interrupted, retry once then give up and
@@ -1432,10 +1436,10 @@ int p9_client_remove(struct p9_fid *fid)
p9_debug(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
if (err == -ERESTARTSYS)
- p9_client_clunk(fid);
+ p9_fid_put(fid);
else
p9_fid_destroy(fid);
return err;
@@ -1459,7 +1463,7 @@ int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags)
}
p9_debug(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -1491,7 +1495,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int count = iov_iter_count(to);
- int rsize, non_zc = 0;
+ int rsize, received, non_zc = 0;
char *dataptr;
*err = 0;
@@ -1520,36 +1524,40 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
}
if (IS_ERR(req)) {
*err = PTR_ERR(req);
+ if (!non_zc)
+ iov_iter_revert(to, count - iov_iter_count(to));
return 0;
}
*err = p9pdu_readf(&req->rc, clnt->proto_version,
- "D", &count, &dataptr);
+ "D", &received, &dataptr);
if (*err) {
+ if (!non_zc)
+ iov_iter_revert(to, count - iov_iter_count(to));
trace_9p_protocol_dump(clnt, &req->rc);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return 0;
}
- if (rsize < count) {
- pr_err("bogus RREAD count (%d > %d)\n", count, rsize);
- count = rsize;
+ if (rsize < received) {
+ pr_err("bogus RREAD count (%d > %d)\n", received, rsize);
+ received = rsize;
}
p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
if (non_zc) {
- int n = copy_to_iter(dataptr, count, to);
+ int n = copy_to_iter(dataptr, received, to);
- if (n != count) {
+ if (n != received) {
*err = -EFAULT;
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return n;
}
} else {
- iov_iter_advance(to, count);
+ iov_iter_revert(to, count - received - iov_iter_count(to));
}
- p9_tag_remove(clnt, req);
- return count;
+ p9_req_put(clnt, req);
+ return received;
}
EXPORT_SYMBOL(p9_client_read_once);
@@ -1567,6 +1575,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
while (iov_iter_count(from)) {
int count = iov_iter_count(from);
int rsize = fid->iounit;
+ int written;
if (!rsize || rsize > clnt->msize - P9_IOHDRSZ)
rsize = clnt->msize - P9_IOHDRSZ;
@@ -1584,27 +1593,29 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
offset, rsize, from);
}
if (IS_ERR(req)) {
+ iov_iter_revert(from, count - iov_iter_count(from));
*err = PTR_ERR(req);
break;
}
- *err = p9pdu_readf(&req->rc, clnt->proto_version, "d", &count);
+ *err = p9pdu_readf(&req->rc, clnt->proto_version, "d", &written);
if (*err) {
+ iov_iter_revert(from, count - iov_iter_count(from));
trace_9p_protocol_dump(clnt, &req->rc);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
break;
}
- if (rsize < count) {
- pr_err("bogus RWRITE count (%d > %d)\n", count, rsize);
- count = rsize;
+ if (rsize < written) {
+ pr_err("bogus RWRITE count (%d > %d)\n", written, rsize);
+ written = rsize;
}
p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
- p9_tag_remove(clnt, req);
- iov_iter_advance(from, count);
- total += count;
- offset += count;
+ p9_req_put(clnt, req);
+ iov_iter_revert(from, count - written - iov_iter_count(from));
+ total += written;
+ offset += written;
}
return total;
}
@@ -1636,7 +1647,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
err = p9pdu_readf(&req->rc, clnt->proto_version, "wS", &ignored, ret);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
goto error;
}
@@ -1653,7 +1664,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
from_kgid(&init_user_ns, ret->n_gid),
from_kuid(&init_user_ns, ret->n_muid));
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return ret;
error:
@@ -1689,7 +1700,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
err = p9pdu_readf(&req->rc, clnt->proto_version, "A", ret);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
goto error;
}
@@ -1715,7 +1726,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
ret->st_btime_sec, ret->st_btime_nsec,
ret->st_gen, ret->st_data_version);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return ret;
error:
@@ -1787,7 +1798,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
p9_debug(P9_DEBUG_9P, "<<< RWSTAT fid %d\n", fid->fid);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -1819,7 +1830,7 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr)
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RSETATTR fid %d\n", fid->fid);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -1847,7 +1858,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
&sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
goto error;
}
@@ -1856,7 +1867,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
fid->fid, sb->type, sb->bsize, sb->blocks, sb->bfree,
sb->bavail, sb->files, sb->ffree, sb->fsid, sb->namelen);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -1884,7 +1895,7 @@ int p9_client_rename(struct p9_fid *fid,
p9_debug(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -1914,7 +1925,7 @@ int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
p9_debug(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n",
newdirfid->fid, new_name);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -1950,15 +1961,15 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
err = p9pdu_readf(&req->rc, clnt->proto_version, "q", attr_size);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
goto clunk_fid;
}
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
p9_debug(P9_DEBUG_9P, "<<< RXATTRWALK fid %d size %llu\n",
attr_fid->fid, *attr_size);
return attr_fid;
clunk_fid:
- p9_client_clunk(attr_fid);
+ p9_fid_put(attr_fid);
attr_fid = NULL;
error:
if (attr_fid && attr_fid != file_fid)
@@ -1987,7 +1998,7 @@ int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RXATTRCREATE fid %d\n", fid->fid);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -2049,11 +2060,11 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
if (non_zc)
memmove(data, dataptr, count);
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return count;
free_and_error:
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
error:
return err;
}
@@ -2085,7 +2096,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, const char *name, int mode,
qid->type, qid->path, qid->version);
error:
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return err;
}
EXPORT_SYMBOL(p9_client_mknod_dotl);
@@ -2115,7 +2126,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode,
qid->path, qid->version);
error:
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return err;
}
EXPORT_SYMBOL(p9_client_mkdir_dotl);
@@ -2147,7 +2158,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
}
p9_debug(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
error:
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return err;
}
EXPORT_SYMBOL(p9_client_lock_dotl);
@@ -2184,7 +2195,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
glock->type, glock->start, glock->length,
glock->proc_id, glock->client_id);
error:
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return err;
}
EXPORT_SYMBOL(p9_client_getlock_dotl);
@@ -2210,7 +2221,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
}
p9_debug(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
error:
- p9_tag_remove(clnt, req);
+ p9_req_put(clnt, req);
return err;
}
EXPORT_SYMBOL(p9_client_readlink);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 3754c33e2974..83694c631989 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -63,9 +63,8 @@ static size_t
pdu_write_u(struct p9_fcall *pdu, struct iov_iter *from, size_t size)
{
size_t len = min(pdu->capacity - pdu->size, size);
- struct iov_iter i = *from;
- if (!copy_from_iter_full(&pdu->sdata[pdu->size], len, &i))
+ if (!copy_from_iter_full(&pdu->sdata[pdu->size], len, from))
len = 0;
pdu->size += len;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 8f8f95e39b03..e758978b44be 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -343,6 +343,7 @@ static void p9_read_work(struct work_struct *work)
p9_debug(P9_DEBUG_ERROR,
"No recv fcall for tag %d (req %p), disconnecting!\n",
m->rc.tag, m->rreq);
+ p9_req_put(m->client, m->rreq);
m->rreq = NULL;
err = -EIO;
goto error;
@@ -378,7 +379,7 @@ static void p9_read_work(struct work_struct *work)
m->rc.sdata = NULL;
m->rc.offset = 0;
m->rc.capacity = 0;
- p9_req_put(m->rreq);
+ p9_req_put(m->client, m->rreq);
m->rreq = NULL;
}
@@ -492,7 +493,7 @@ static void p9_write_work(struct work_struct *work)
m->wpos += err;
if (m->wpos == m->wsize) {
m->wpos = m->wsize = 0;
- p9_req_put(m->wreq);
+ p9_req_put(m->client, m->wreq);
m->wreq = NULL;
}
@@ -695,7 +696,7 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
if (req->status == REQ_STATUS_UNSENT) {
list_del(&req->req_list);
req->status = REQ_STATUS_FLSHD;
- p9_req_put(req);
+ p9_req_put(client, req);
ret = 0;
}
spin_unlock(&client->lock);
@@ -722,7 +723,7 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
list_del(&req->req_list);
req->status = REQ_STATUS_FLSHD;
spin_unlock(&client->lock);
- p9_req_put(req);
+ p9_req_put(client, req);
return 0;
}
@@ -883,12 +884,12 @@ static void p9_conn_destroy(struct p9_conn *m)
p9_mux_poll_stop(m);
cancel_work_sync(&m->rq);
if (m->rreq) {
- p9_req_put(m->rreq);
+ p9_req_put(m->client, m->rreq);
m->rreq = NULL;
}
cancel_work_sync(&m->wq);
if (m->wreq) {
- p9_req_put(m->wreq);
+ p9_req_put(m->client, m->wreq);
m->wreq = NULL;
}
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 88e563826674..d817d3745238 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -350,7 +350,7 @@ send_done(struct ib_cq *cq, struct ib_wc *wc)
c->busa, c->req->tc.size,
DMA_TO_DEVICE);
up(&rdma->sq_sem);
- p9_req_put(c->req);
+ p9_req_put(client, c->req);
kfree(c);
}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 2a210c2f8e40..b84d35cf6899 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -199,7 +199,7 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
/* Reply won't come, so drop req ref */
static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
{
- p9_req_put(req);
+ p9_req_put(client, req);
return 0;
}
@@ -331,7 +331,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
if (err == -ERESTARTSYS)
return err;
}
- n = iov_iter_get_pages_alloc(data, pages, count, offs);
+ n = iov_iter_get_pages_alloc2(data, pages, count, offs);
if (n < 0)
return n;
*need_drop = 1;
@@ -373,6 +373,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
(*pages)[index] = kmap_to_page(p);
p += PAGE_SIZE;
}
+ iov_iter_advance(data, len);
return len;
}
}
@@ -557,7 +558,7 @@ err_out:
kvfree(out_pages);
if (!kicked) {
/* reply won't come */
- p9_req_put(req);
+ p9_req_put(client, req);
}
return err;
}
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 833cd3792c51..227f89cc7237 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -163,7 +163,7 @@ again:
ring->intf->out_prod = prod;
spin_unlock_irqrestore(&ring->lock, flags);
notify_remote_via_irq(ring->irq);
- p9_req_put(p9_req);
+ p9_req_put(client, p9_req);
return 0;
}
diff --git a/net/ax25/ax25_timer.c b/net/ax25/ax25_timer.c
index 85865ebfdfa2..9f7cb0a7c73f 100644
--- a/net/ax25/ax25_timer.c
+++ b/net/ax25/ax25_timer.c
@@ -108,10 +108,12 @@ int ax25_t1timer_running(ax25_cb *ax25)
unsigned long ax25_display_timer(struct timer_list *timer)
{
+ long delta = timer->expires - jiffies;
+
if (!timer_pending(timer))
return 0;
- return timer->expires - jiffies;
+ return max(0L, delta);
}
EXPORT_SYMBOL(ax25_display_timer);
diff --git a/net/batman-adv/trace.h b/net/batman-adv/trace.h
index d673ebdd0426..31c8f922651d 100644
--- a/net/batman-adv/trace.h
+++ b/net/batman-adv/trace.h
@@ -28,8 +28,6 @@
#endif /* CONFIG_BATMAN_ADV_TRACING */
-#define BATADV_MAX_MSG_LEN 256
-
TRACE_EVENT(batadv_dbg,
TP_PROTO(struct batadv_priv *bat_priv,
@@ -40,16 +38,13 @@ TRACE_EVENT(batadv_dbg,
TP_STRUCT__entry(
__string(device, bat_priv->soft_iface->name)
__string(driver, KBUILD_MODNAME)
- __dynamic_array(char, msg, BATADV_MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
__assign_str(device, bat_priv->soft_iface->name);
__assign_str(driver, KBUILD_MODNAME);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- BATADV_MAX_MSG_LEN,
- vaf->fmt,
- *vaf->va) >= BATADV_MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
diff --git a/net/bluetooth/aosp.c b/net/bluetooth/aosp.c
index 432ae3aac9e3..1d67836e95e1 100644
--- a/net/bluetooth/aosp.c
+++ b/net/bluetooth/aosp.c
@@ -54,7 +54,10 @@ void aosp_do_open(struct hci_dev *hdev)
/* LE Get Vendor Capabilities Command */
skb = __hci_cmd_sync(hdev, hci_opcode_pack(0x3f, 0x153), 0, NULL,
HCI_CMD_TIMEOUT);
- if (IS_ERR(skb)) {
+ if (IS_ERR_OR_NULL(skb)) {
+ if (!skb)
+ skb = ERR_PTR(-EIO);
+
bt_dev_err(hdev, "AOSP get vendor capabilities (%ld)",
PTR_ERR(skb));
return;
@@ -152,7 +155,10 @@ static int enable_quality_report(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
- if (IS_ERR(skb)) {
+ if (IS_ERR_OR_NULL(skb)) {
+ if (!skb)
+ skb = ERR_PTR(-EIO);
+
bt_dev_err(hdev, "Enabling Android BQR failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
@@ -171,7 +177,10 @@ static int disable_quality_report(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
- if (IS_ERR(skb)) {
+ if (IS_ERR_OR_NULL(skb)) {
+ if (!skb)
+ skb = ERR_PTR(-EIO);
+
bt_dev_err(hdev, "Disabling Android BQR failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index f54864e19866..9777e7b109ee 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1551,8 +1551,8 @@ static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
cis->cis_id = qos->cis;
cis->c_sdu = cpu_to_le16(qos->out.sdu);
cis->p_sdu = cpu_to_le16(qos->in.sdu);
- cis->c_phy = qos->out.phy;
- cis->p_phy = qos->in.phy;
+ cis->c_phy = qos->out.phy ? qos->out.phy : qos->in.phy;
+ cis->p_phy = qos->in.phy ? qos->in.phy : qos->out.phy;
cis->c_rtn = qos->out.rtn;
cis->p_rtn = qos->in.rtn;
@@ -1735,13 +1735,6 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
if (!qos->in.latency)
qos->in.latency = qos->out.latency;
- /* Mirror PHYs that are disabled as SDU will be set to 0 */
- if (!qos->in.phy)
- qos->in.phy = qos->out.phy;
-
- if (!qos->out.phy)
- qos->out.phy = qos->in.phy;
-
if (!hci_le_set_cig_params(cis, qos)) {
hci_conn_drop(cis);
return ERR_PTR(-EINVAL);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index ea33dd0cd478..485c814cf44a 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -328,14 +328,17 @@ static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_rp_delete_stored_link_key *rp = data;
+ u16 num_keys;
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
return rp->status;
- if (rp->num_keys <= hdev->stored_num_keys)
- hdev->stored_num_keys -= le16_to_cpu(rp->num_keys);
+ num_keys = le16_to_cpu(rp->num_keys);
+
+ if (num_keys <= hdev->stored_num_keys)
+ hdev->stored_num_keys -= num_keys;
else
hdev->stored_num_keys = 0;
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index ff09c353e64e..ced8ad4fed4f 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -44,6 +44,9 @@ static void iso_sock_kill(struct sock *sk);
/* ----- ISO socket info ----- */
#define iso_pi(sk) ((struct iso_pinfo *)sk)
+#define EIR_SERVICE_DATA_LENGTH 4
+#define BASE_MAX_LENGTH (HCI_MAX_PER_AD_LENGTH - EIR_SERVICE_DATA_LENGTH)
+
struct iso_pinfo {
struct bt_sock bt;
bdaddr_t src;
@@ -57,7 +60,7 @@ struct iso_pinfo {
__u32 flags;
struct bt_iso_qos qos;
__u8 base_len;
- __u8 base[HCI_MAX_PER_AD_LENGTH];
+ __u8 base[BASE_MAX_LENGTH];
struct iso_conn *conn;
};
@@ -370,15 +373,24 @@ done:
return err;
}
+static struct bt_iso_qos *iso_sock_get_qos(struct sock *sk)
+{
+ if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONNECT2)
+ return &iso_pi(sk)->conn->hcon->iso_qos;
+
+ return &iso_pi(sk)->qos;
+}
+
static int iso_send_frame(struct sock *sk, struct sk_buff *skb)
{
struct iso_conn *conn = iso_pi(sk)->conn;
+ struct bt_iso_qos *qos = iso_sock_get_qos(sk);
struct hci_iso_data_hdr *hdr;
int len = 0;
BT_DBG("sk %p len %d", sk, skb->len);
- if (skb->len > iso_pi(sk)->qos.out.sdu)
+ if (skb->len > qos->out.sdu)
return -EMSGSIZE;
len = skb->len;
@@ -1177,8 +1189,10 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
}
len = min_t(unsigned int, sizeof(qos), optlen);
- if (len != sizeof(qos))
- return -EINVAL;
+ if (len != sizeof(qos)) {
+ err = -EINVAL;
+ break;
+ }
memset(&qos, 0, sizeof(qos));
@@ -1233,7 +1247,7 @@ static int iso_sock_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
int len, err = 0;
- struct bt_iso_qos qos;
+ struct bt_iso_qos *qos;
u8 base_len;
u8 *base;
@@ -1246,7 +1260,7 @@ static int iso_sock_getsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case BT_DEFER_SETUP:
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ if (sk->sk_state == BT_CONNECTED) {
err = -EINVAL;
break;
}
@@ -1258,13 +1272,10 @@ static int iso_sock_getsockopt(struct socket *sock, int level, int optname,
break;
case BT_ISO_QOS:
- if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONNECT2)
- qos = iso_pi(sk)->conn->hcon->iso_qos;
- else
- qos = iso_pi(sk)->qos;
+ qos = iso_sock_get_qos(sk);
- len = min_t(unsigned int, len, sizeof(qos));
- if (copy_to_user(optval, (char *)&qos, len))
+ len = min_t(unsigned int, len, sizeof(*qos));
+ if (copy_to_user(optval, qos, len))
err = -EFAULT;
break;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 77c0aac14539..cbe0cae73434 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1970,11 +1970,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
bdaddr_t *dst,
u8 link_type)
{
- struct l2cap_chan *c, *c1 = NULL;
+ struct l2cap_chan *c, *tmp, *c1 = NULL;
read_lock(&chan_list_lock);
- list_for_each_entry(c, &chan_list, global_l) {
+ list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
if (state && c->state != state)
continue;
@@ -1993,11 +1993,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
dst_match = !bacmp(&c->dst, dst);
if (src_match && dst_match) {
c = l2cap_chan_hold_unless_zero(c);
- if (!c)
- continue;
-
- read_unlock(&chan_list_lock);
- return c;
+ if (c) {
+ read_unlock(&chan_list_lock);
+ return c;
+ }
}
/* Closest match */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 646d10401b80..6e31023b84f5 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -3819,7 +3819,7 @@ static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
hci_blocked_keys_clear(hdev);
- for (i = 0; i < keys->key_count; ++i) {
+ for (i = 0; i < key_count; ++i) {
struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
if (!b) {
@@ -4624,8 +4624,7 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
u32 current_flags = __le32_to_cpu(cp->current_flags);
bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
- &cp->addr.bdaddr, cp->addr.type,
- __le32_to_cpu(current_flags));
+ &cp->addr.bdaddr, cp->addr.type, current_flags);
// We should take hci_dev_lock() early, I think.. conn_flags can change
supported_flags = hdev->conn_flags;
@@ -8936,6 +8935,8 @@ void mgmt_index_removed(struct hci_dev *hdev)
HCI_MGMT_EXT_INDEX_EVENTS);
/* Cancel any remaining timed work */
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
+ return;
cancel_delayed_work_sync(&hdev->discov_off);
cancel_delayed_work_sync(&hdev->service_cache);
cancel_delayed_work_sync(&hdev->rpa_expired);
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index 14975769f678..bee6a4c656be 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -120,7 +120,10 @@ static bool read_supported_features(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
- if (IS_ERR(skb)) {
+ if (IS_ERR_OR_NULL(skb)) {
+ if (!skb)
+ skb = ERR_PTR(-EIO);
+
bt_dev_err(hdev, "Failed to read MSFT supported features (%ld)",
PTR_ERR(skb));
return false;
@@ -319,8 +322,11 @@ static int msft_remove_monitor_sync(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
+ if (IS_ERR_OR_NULL(skb)) {
+ if (!skb)
+ return -EIO;
return PTR_ERR(skb);
+ }
return msft_le_cancel_monitor_advertisement_cb(hdev, hdev->msft_opcode,
monitor, skb);
@@ -432,8 +438,11 @@ static int msft_add_monitor_sync(struct hci_dev *hdev,
HCI_CMD_TIMEOUT);
kfree(cp);
- if (IS_ERR(skb))
+ if (IS_ERR_OR_NULL(skb)) {
+ if (!skb)
+ return -EIO;
return PTR_ERR(skb);
+ }
return msft_le_monitor_advertisement_cb(hdev, hdev->msft_opcode,
monitor, skb);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index cbc9cd5058cb..d11209367dd0 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -1628,6 +1628,7 @@ static int __init bpf_prog_test_run_init(void)
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
THIS_MODULE);
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 1a11064f9990..8f19253024b0 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -36,18 +36,10 @@ static struct ebt_replace_kernel initial_table = {
.entries = (char *)&initial_chain,
};
-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
-{
- if (valid_hooks & ~(1 << NF_BR_BROUTING))
- return -EINVAL;
- return 0;
-}
-
static const struct ebt_table broute_table = {
.name = "broute",
.table = &initial_table,
.valid_hooks = 1 << NF_BR_BROUTING,
- .check = check,
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index cb949436bc0e..278f324e6752 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
.entries = (char *)initial_chains,
};
-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
-{
- if (valid_hooks & ~FILTER_VALID_HOOKS)
- return -EINVAL;
- return 0;
-}
-
static const struct ebt_table frame_filter = {
.name = "filter",
.table = &initial_table,
.valid_hooks = FILTER_VALID_HOOKS,
- .check = check,
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 5ee0531ae506..9066f7f376d5 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
.entries = (char *)initial_chains,
};
-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
-{
- if (valid_hooks & ~NAT_VALID_HOOKS)
- return -EINVAL;
- return 0;
-}
-
static const struct ebt_table frame_nat = {
.name = "nat",
.table = &initial_table,
.valid_hooks = NAT_VALID_HOOKS,
- .check = check,
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index f2dbefb61ce8..9a0ae59cdc50 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1040,8 +1040,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
goto free_iterate;
}
- /* the table doesn't like it */
- if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
+ if (repl->valid_hooks != t->valid_hooks)
goto free_unlock;
if (repl->num_counters && repl->num_counters != t->private->nentries) {
@@ -1231,11 +1230,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
if (ret != 0)
goto free_chainstack;
- if (table->check && table->check(newinfo, table->valid_hooks)) {
- ret = -EINVAL;
- goto free_chainstack;
- }
-
table->private = newinfo;
rwlock_init(&table->lock);
mutex_lock(&ebt_mutex);
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index f5ecfdcf57b2..b670ba03a675 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -178,7 +178,10 @@ activate_next:
if (!first)
return;
- if (WARN_ON_ONCE(j1939_session_activate(first))) {
+ if (j1939_session_activate(first)) {
+ netdev_warn_once(first->priv->ndev,
+ "%s: 0x%p: Identical session is already activated.\n",
+ __func__, first);
first->err = -EBUSY;
goto activate_next;
} else {
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 307ee1174a6e..d7d86c944d76 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -260,6 +260,8 @@ static void __j1939_session_drop(struct j1939_session *session)
static void j1939_session_destroy(struct j1939_session *session)
{
+ struct sk_buff *skb;
+
if (session->transmission) {
if (session->err)
j1939_sk_errqueue(session, J1939_ERRQUEUE_TX_ABORT);
@@ -274,7 +276,11 @@ static void j1939_session_destroy(struct j1939_session *session)
WARN_ON_ONCE(!list_empty(&session->sk_session_queue_entry));
WARN_ON_ONCE(!list_empty(&session->active_session_list_entry));
- skb_queue_purge(&session->skb_queue);
+ while ((skb = skb_dequeue(&session->skb_queue)) != NULL) {
+ /* drop ref taken in j1939_session_skb_queue() */
+ skb_unref(skb);
+ kfree_skb(skb);
+ }
__j1939_session_drop(session);
j1939_priv_put(session->priv);
kfree(session);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 9d82bb42e958..87b883c7bfd6 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -4578,15 +4578,12 @@ bad:
/*
* Register request, send initial attempt.
*/
-int ceph_osdc_start_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req,
- bool nofail)
+void ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req)
{
down_read(&osdc->lock);
submit_request(req, false);
up_read(&osdc->lock);
-
- return 0;
}
EXPORT_SYMBOL(ceph_osdc_start_request);
@@ -4756,7 +4753,7 @@ int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
if (ret)
goto out_put_req;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
linger_cancel(lreq);
linger_put(lreq);
ret = wait_request_timeout(req, opts->mount_timeout);
@@ -4827,7 +4824,7 @@ int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
if (ret)
goto out_put_req;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
out_put_req:
@@ -5043,7 +5040,7 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
if (ret)
goto out_put_req;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0) {
void *p = page_address(pages[0]);
@@ -5120,7 +5117,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
if (ret)
goto out_put_req;
- ceph_osdc_start_request(osdc, req, false);
+ ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0) {
ret = req->r_ops[0].rval;
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 2823bb3cff55..295098873861 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -11,6 +11,22 @@
#include <linux/crush/hash.h>
#include <linux/crush/mapper.h>
+static __printf(2, 3)
+void osdmap_info(const struct ceph_osdmap *map, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_INFO "%s (%pU e%u): %pV", KBUILD_MODNAME, &map->fsid,
+ map->epoch, &vaf);
+
+ va_end(args);
+}
+
char *ceph_osdmap_state_str(char *str, int len, u32 state)
{
if (!len)
@@ -571,10 +587,10 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
goto bad;
#endif
r = kmalloc(struct_size(r, steps, yes), GFP_NOFS);
- c->rules[i] = r;
if (r == NULL)
goto badmem;
dout(" rule %d is at %p\n", i, r);
+ c->rules[i] = r;
r->len = yes;
ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
@@ -1566,7 +1582,7 @@ static int decode_new_primary_affinity(void **p, void *end,
if (ret)
return ret;
- pr_info("osd%d primary-affinity 0x%x\n", osd, aff);
+ osdmap_info(map, "osd%d primary-affinity 0x%x\n", osd, aff);
}
return 0;
@@ -1864,9 +1880,9 @@ static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
osd = ceph_decode_32(p);
w = ceph_decode_32(p);
BUG_ON(osd >= map->max_osd);
- pr_info("osd%d weight 0x%x %s\n", osd, w,
- w == CEPH_OSD_IN ? "(in)" :
- (w == CEPH_OSD_OUT ? "(out)" : ""));
+ osdmap_info(map, "osd%d weight 0x%x %s\n", osd, w,
+ w == CEPH_OSD_IN ? "(in)" :
+ (w == CEPH_OSD_OUT ? "(out)" : ""));
map->osd_weight[osd] = w;
/*
@@ -1898,10 +1914,10 @@ static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
BUG_ON(osd >= map->max_osd);
if ((map->osd_state[osd] & CEPH_OSD_UP) &&
(xorstate & CEPH_OSD_UP))
- pr_info("osd%d down\n", osd);
+ osdmap_info(map, "osd%d down\n", osd);
if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
(xorstate & CEPH_OSD_EXISTS)) {
- pr_info("osd%d does not exist\n", osd);
+ osdmap_info(map, "osd%d does not exist\n", osd);
ret = set_primary_affinity(map, osd,
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
if (ret)
@@ -1931,7 +1947,7 @@ static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
dout("%s osd%d addr %s\n", __func__, osd, ceph_pr_addr(&addr));
- pr_info("osd%d up\n", osd);
+ osdmap_info(map, "osd%d up\n", osd);
map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
map->osd_addr[osd] = addr;
}
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index 65e34f78b05d..74622b278d57 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -96,7 +96,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
EXPORT_SYMBOL(ceph_pagelist_append);
/* Allocate enough pages for a pagelist to append the given amount
- * of data without without allocating.
+ * of data without allocating.
* Returns: 0 on success, -ENOMEM on error.
*/
int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index a25ec93729b9..94374d529ea4 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -310,11 +310,12 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
void *owner, u32 size)
{
+ int optmem_max = READ_ONCE(sysctl_optmem_max);
struct sock *sk = (struct sock *)owner;
/* same check as in sock_kmalloc() */
- if (size <= sysctl_optmem_max &&
- atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
+ if (size <= optmem_max &&
+ atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
atomic_add(size, &sk->sk_omem_alloc);
return 0;
}
@@ -875,10 +876,18 @@ static int bpf_iter_init_sk_storage_map(void *priv_data,
{
struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
+ bpf_map_inc_with_uref(aux->map);
seq_info->map = aux->map;
return 0;
}
+static void bpf_iter_fini_sk_storage_map(void *priv_data)
+{
+ struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
+
+ bpf_map_put_with_uref(seq_info->map);
+}
+
static int bpf_iter_attach_map(struct bpf_prog *prog,
union bpf_iter_link_info *linfo,
struct bpf_iter_aux_info *aux)
@@ -896,7 +905,7 @@ static int bpf_iter_attach_map(struct bpf_prog *prog,
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
goto put_map;
- if (prog->aux->max_rdonly_access > map->value_size) {
+ if (prog->aux->max_rdwr_access > map->value_size) {
err = -EACCES;
goto put_map;
}
@@ -924,7 +933,7 @@ static const struct seq_operations bpf_sk_storage_map_seq_ops = {
static const struct bpf_iter_seq_info iter_seq_info = {
.seq_ops = &bpf_sk_storage_map_seq_ops,
.init_seq_private = bpf_iter_init_sk_storage_map,
- .fini_seq_private = NULL,
+ .fini_seq_private = bpf_iter_fini_sk_storage_map,
.seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
};
diff --git a/net/core/datagram.c b/net/core/datagram.c
index f3988ef8e9af..7255531f63ae 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -632,12 +632,11 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
if (frag == MAX_SKB_FRAGS)
return -EMSGSIZE;
- copied = iov_iter_get_pages(from, pages, length,
+ copied = iov_iter_get_pages2(from, pages, length,
MAX_SKB_FRAGS - frag, &start);
if (copied < 0)
return -EFAULT;
- iov_iter_advance(from, copied);
length -= copied;
truesize = PAGE_ALIGN(copied + start);
diff --git a/net/core/dev.c b/net/core/dev.c
index 716df64fcfa5..56c8b0921c9f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4624,7 +4624,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
struct softnet_data *sd;
unsigned int old_flow, new_flow;
- if (qlen < (netdev_max_backlog >> 1))
+ if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
return false;
sd = this_cpu_ptr(&softnet_data);
@@ -4672,7 +4672,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
if (!netif_running(skb->dev))
goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
- if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
+ if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
if (qlen) {
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
@@ -4928,7 +4928,7 @@ static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(netdev_tstamp_prequeue, skb);
+ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
trace_netif_rx(skb);
@@ -5281,7 +5281,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
int ret = NET_RX_DROP;
__be16 type;
- net_timestamp_check(!netdev_tstamp_prequeue, skb);
+ net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
trace_netif_receive_skb(skb);
@@ -5664,7 +5664,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(netdev_tstamp_prequeue, skb);
+ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
@@ -5694,7 +5694,7 @@ void netif_receive_skb_list_internal(struct list_head *head)
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
- net_timestamp_check(netdev_tstamp_prequeue, skb);
+ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
@@ -5918,7 +5918,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
net_rps_action_and_irq_enable(sd);
}
- napi->weight = dev_rx_weight;
+ napi->weight = READ_ONCE(dev_rx_weight);
while (again) {
struct sk_buff *skb;
@@ -6665,8 +6665,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
- usecs_to_jiffies(netdev_budget_usecs);
- int budget = netdev_budget;
+ usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
+ int budget = READ_ONCE(netdev_budget);
LIST_HEAD(list);
LIST_HEAD(repoll);
@@ -10284,7 +10284,7 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
return dev;
if (time_after(jiffies, warning_time +
- netdev_unregister_timeout_secs * HZ)) {
+ READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
list_for_each_entry(dev, list, todo_list) {
pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
dev->name, netdev_refcnt_read(dev));
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 5da5c7cca98a..b50bcc18b8d9 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -5147,7 +5147,7 @@ static int devlink_param_get(struct devlink *devlink,
const struct devlink_param *param,
struct devlink_param_gset_ctx *ctx)
{
- if (!param->get)
+ if (!param->get || devlink->reload_failed)
return -EOPNOTSUPP;
return param->get(devlink, param->id, ctx);
}
@@ -5156,7 +5156,7 @@ static int devlink_param_set(struct devlink *devlink,
const struct devlink_param *param,
struct devlink_param_gset_ctx *ctx)
{
- if (!param->set)
+ if (!param->set || devlink->reload_failed)
return -EOPNOTSUPP;
return param->set(devlink, param->id, ctx);
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 5669248aff25..c191db80ce93 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1214,10 +1214,11 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
u32 filter_size = bpf_prog_size(fp->prog->len);
+ int optmem_max = READ_ONCE(sysctl_optmem_max);
/* same check as in sock_kmalloc() */
- if (filter_size <= sysctl_optmem_max &&
- atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
+ if (filter_size <= optmem_max &&
+ atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) {
atomic_add(filter_size, &sk->sk_omem_alloc);
return true;
}
@@ -1548,7 +1549,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
if (IS_ERR(prog))
return PTR_ERR(prog);
- if (bpf_prog_size(prog->len) > sysctl_optmem_max)
+ if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max))
err = -ENOMEM;
else
err = reuseport_attach_prog(sk, prog);
@@ -1615,7 +1616,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
}
} else {
/* BPF_PROG_TYPE_SOCKET_FILTER */
- if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
+ if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) {
err = -ENOMEM;
goto err_prog_put;
}
@@ -5034,14 +5035,14 @@ static int __bpf_setsockopt(struct sock *sk, int level, int optname,
/* Only some socketops are supported */
switch (optname) {
case SO_RCVBUF:
- val = min_t(u32, val, sysctl_rmem_max);
+ val = min_t(u32, val, READ_ONCE(sysctl_rmem_max));
val = min_t(int, val, INT_MAX / 2);
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
WRITE_ONCE(sk->sk_rcvbuf,
max_t(int, val * 2, SOCK_MIN_RCVBUF));
break;
case SO_SNDBUF:
- val = min_t(u32, val, sysctl_wmem_max);
+ val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
val = min_t(int, val, INT_MAX / 2);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
WRITE_ONCE(sk->sk_sndbuf,
@@ -5063,7 +5064,10 @@ static int __bpf_setsockopt(struct sock *sk, int level, int optname,
case SO_RCVLOWAT:
if (val < 0)
val = INT_MAX;
- WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
+ if (sk->sk_socket && sk->sk_socket->ops->set_rcvlowat)
+ ret = sk->sk_socket->ops->set_rcvlowat(sk, val);
+ else
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
break;
case SO_MARK:
if (sk->sk_mark != val) {
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index a10335b4ba2d..c8d137ef5980 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -345,7 +345,7 @@ static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats,
for_each_possible_cpu(i) {
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
- qstats->qlen += qcpu->backlog;
+ qstats->qlen += qcpu->qlen;
qstats->backlog += qcpu->backlog;
qstats->drops += qcpu->drops;
qstats->requeues += qcpu->requeues;
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index 541c7a72a28a..21619c70a82b 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -26,7 +26,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
cell = this_cpu_ptr(gcells->cells);
- if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+ if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
drop:
dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 6a8c2596ebab..78cc8fb68814 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -307,11 +307,35 @@ static int neigh_del_timer(struct neighbour *n)
return 0;
}
-static void pneigh_queue_purge(struct sk_buff_head *list)
+static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
{
+ struct sk_buff_head tmp;
+ unsigned long flags;
struct sk_buff *skb;
- while ((skb = skb_dequeue(list)) != NULL) {
+ skb_queue_head_init(&tmp);
+ spin_lock_irqsave(&list->lock, flags);
+ skb = skb_peek(list);
+ while (skb != NULL) {
+ struct sk_buff *skb_next = skb_peek_next(skb, list);
+ struct net_device *dev = skb->dev;
+
+ if (net == NULL || net_eq(dev_net(dev), net)) {
+ struct in_device *in_dev;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
+ if (in_dev)
+ in_dev->arp_parms->qlen--;
+ rcu_read_unlock();
+ __skb_unlink(skb, list);
+ __skb_queue_tail(&tmp, skb);
+ }
+ skb = skb_next;
+ }
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ while ((skb = __skb_dequeue(&tmp))) {
dev_put(skb->dev);
kfree_skb(skb);
}
@@ -385,9 +409,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev, skip_perm);
pneigh_ifdown_and_unlock(tbl, dev);
-
- del_timer_sync(&tbl->proxy_timer);
- pneigh_queue_purge(&tbl->proxy_queue);
+ pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
+ if (skb_queue_empty_lockless(&tbl->proxy_queue))
+ del_timer_sync(&tbl->proxy_timer);
return 0;
}
@@ -1597,8 +1621,15 @@ static void neigh_proxy_process(struct timer_list *t)
if (tdif <= 0) {
struct net_device *dev = skb->dev;
+ struct in_device *in_dev;
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
+ if (in_dev)
+ in_dev->arp_parms->qlen--;
+ rcu_read_unlock();
__skb_unlink(skb, &tbl->proxy_queue);
+
if (tbl->proxy_redo && netif_running(dev)) {
rcu_read_lock();
tbl->proxy_redo(skb);
@@ -1623,7 +1654,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
unsigned long sched_next = jiffies +
prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
- if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
+ if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
kfree_skb(skb);
return;
}
@@ -1639,6 +1670,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
skb_dst_drop(skb);
dev_hold(skb->dev);
__skb_queue_tail(&tbl->proxy_queue, skb);
+ p->qlen++;
mod_timer(&tbl->proxy_timer, sched_next);
spin_unlock(&tbl->proxy_queue.lock);
}
@@ -1671,6 +1703,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
refcount_set(&p->refcnt, 1);
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+ p->qlen = 0;
netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
p->dev = dev;
write_pnet(&p->net, net);
@@ -1736,6 +1769,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
refcount_set(&tbl->parms.refcnt, 1);
tbl->parms.reachable_time =
neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
+ tbl->parms.qlen = 0;
tbl->stats = alloc_percpu(struct neigh_statistics);
if (!tbl->stats)
@@ -1787,7 +1821,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
cancel_delayed_work_sync(&tbl->managed_work);
cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer);
- pneigh_queue_purge(&tbl->proxy_queue);
+ pneigh_queue_purge(&tbl->proxy_queue, NULL);
neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries))
pr_crit("neighbour leakage\n");
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 0ec2f5906a27..6b9f19122ec1 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -18,6 +18,7 @@
#include <linux/user_namespace.h>
#include <linux/net_namespace.h>
#include <linux/sched/task.h>
+#include <linux/sched/mm.h>
#include <linux/uidgid.h>
#include <linux/cookie.h>
@@ -1143,7 +1144,13 @@ static int __register_pernet_operations(struct list_head *list,
* setup_net() and cleanup_net() are not possible.
*/
for_each_net(net) {
+ struct mem_cgroup *old, *memcg;
+
+ memcg = mem_cgroup_or_root(get_mem_cgroup_from_obj(net));
+ old = set_active_memcg(memcg);
error = ops_init(ops, net);
+ set_active_memcg(old);
+ mem_cgroup_put(memcg);
if (error)
goto out_undo;
list_add_tail(&net->exit_list, &net_exit_list);
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index b74905fcc3a1..9b203d8660e4 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -16,7 +16,7 @@
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/page-flags.h>
-#include <linux/mm.h> /* for __put_page() */
+#include <linux/mm.h> /* for put_page() */
#include <linux/poison.h>
#include <linux/ethtool.h>
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ac45328607f7..4b5b15c684ed 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -6070,6 +6070,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
!(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
+ module_put(owner);
goto err_unlock;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 974bbbbe7138..84bb5e188d0d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4205,9 +4205,8 @@ normal:
SKB_GSO_CB(nskb)->csum_start =
skb_headroom(nskb) + doffset;
} else {
- skb_copy_bits(head_skb, offset,
- skb_put(nskb, len),
- len);
+ if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
+ goto err;
}
continue;
}
@@ -4798,7 +4797,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
{
bool ret;
- if (likely(sysctl_tstamp_allow_data || tsonly))
+ if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
return true;
read_lock_bh(&sk->sk_callback_lock);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 81627892bdd4..59e75ffcc1f4 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -324,14 +324,13 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
goto out;
}
- copied = iov_iter_get_pages(from, pages, bytes, maxpages,
+ copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
&offset);
if (copied <= 0) {
ret = -EFAULT;
goto out;
}
- iov_iter_advance(from, copied);
bytes -= copied;
msg->sg.size += copied;
@@ -739,7 +738,9 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
refcount_set(&psock->refcnt, 1);
- rcu_assign_sk_user_data_nocopy(sk, psock);
+ __rcu_assign_sk_user_data_with_flags(sk, psock,
+ SK_USER_DATA_NOCOPY |
+ SK_USER_DATA_PSOCK);
sock_hold(sk);
out:
@@ -1193,8 +1194,9 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
ret = bpf_prog_run_pin_on_cpu(prog, skb);
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
}
- if (sk_psock_verdict_apply(psock, skb, ret) < 0)
- len = 0;
+ ret = sk_psock_verdict_apply(psock, skb, ret);
+ if (ret < 0)
+ len = ret;
out:
rcu_read_unlock();
return len;
diff --git a/net/core/sock.c b/net/core/sock.c
index 4cb957d934a2..788c1372663c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1101,7 +1101,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
* play 'guess the biggest size' games. RCVBUF/SNDBUF
* are treated in BSD as hints
*/
- val = min_t(u32, val, sysctl_wmem_max);
+ val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
set_sndbuf:
/* Ensure val * 2 fits into an int, to prevent max_t()
* from treating it as a negative value.
@@ -1133,7 +1133,7 @@ set_sndbuf:
* play 'guess the biggest size' games. RCVBUF/SNDBUF
* are treated in BSD as hints
*/
- __sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max));
+ __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max)));
break;
case SO_RCVBUFFORCE:
@@ -2536,7 +2536,7 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
- sysctl_optmem_max)
+ READ_ONCE(sysctl_optmem_max))
return NULL;
skb = alloc_skb(size, priority);
@@ -2554,8 +2554,10 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
*/
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
{
- if ((unsigned int)size <= sysctl_optmem_max &&
- atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
+ int optmem_max = READ_ONCE(sysctl_optmem_max);
+
+ if ((unsigned int)size <= optmem_max &&
+ atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
void *mem;
/* First do the add, to avoid the race if kmalloc
* might sleep.
@@ -3309,8 +3311,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
timer_setup(&sk->sk_timer, NULL, 0);
sk->sk_allocation = GFP_KERNEL;
- sk->sk_rcvbuf = sysctl_rmem_default;
- sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default);
+ sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);
@@ -3365,7 +3367,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = 0;
- sk->sk_ll_usec = sysctl_net_busy_read;
+ sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
#endif
sk->sk_max_pacing_rate = ~0UL;
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 028813dfecb0..9a9fb9487d63 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -783,13 +783,22 @@ static int sock_map_init_seq_private(void *priv_data,
{
struct sock_map_seq_info *info = priv_data;
+ bpf_map_inc_with_uref(aux->map);
info->map = aux->map;
return 0;
}
+static void sock_map_fini_seq_private(void *priv_data)
+{
+ struct sock_map_seq_info *info = priv_data;
+
+ bpf_map_put_with_uref(info->map);
+}
+
static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
.seq_ops = &sock_map_seq_ops,
.init_seq_private = sock_map_init_seq_private,
+ .fini_seq_private = sock_map_fini_seq_private,
.seq_priv_size = sizeof(struct sock_map_seq_info),
};
@@ -1369,18 +1378,27 @@ static const struct seq_operations sock_hash_seq_ops = {
};
static int sock_hash_init_seq_private(void *priv_data,
- struct bpf_iter_aux_info *aux)
+ struct bpf_iter_aux_info *aux)
{
struct sock_hash_seq_info *info = priv_data;
+ bpf_map_inc_with_uref(aux->map);
info->map = aux->map;
info->htab = container_of(aux->map, struct bpf_shtab, map);
return 0;
}
+static void sock_hash_fini_seq_private(void *priv_data)
+{
+ struct sock_hash_seq_info *info = priv_data;
+
+ bpf_map_put_with_uref(info->map);
+}
+
static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
.seq_ops = &sock_hash_seq_ops,
.init_seq_private = sock_hash_init_seq_private,
+ .fini_seq_private = sock_hash_fini_seq_private,
.seq_priv_size = sizeof(struct sock_hash_seq_info),
};
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 71a13596ea2b..725891527814 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -234,14 +234,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
static int proc_do_dev_weight(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- int ret;
+ static DEFINE_MUTEX(dev_weight_mutex);
+ int ret, weight;
+ mutex_lock(&dev_weight_mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
- if (ret != 0)
- return ret;
-
- dev_rx_weight = weight_p * dev_weight_rx_bias;
- dev_tx_weight = weight_p * dev_weight_tx_bias;
+ if (!ret && write) {
+ weight = READ_ONCE(weight_p);
+ WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
+ WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
+ }
+ mutex_unlock(&dev_weight_mutex);
return ret;
}
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 2dd76eb1621c..a8895ee3cd60 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -145,11 +145,14 @@ int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
bool do_fast_age)
{
+ struct dsa_switch *ds = dp->ds;
int err;
err = dsa_port_set_state(dp, state, do_fast_age);
- if (err)
- pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
+ if (err && err != -EOPNOTSUPP) {
+ dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n",
+ dp->index, state, ERR_PTR(err));
+ }
}
int dsa_port_set_mst_state(struct dsa_port *dp,
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index ad6a6663feeb..1291c2431d44 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -2484,7 +2484,7 @@ static int dsa_slave_changeupper(struct net_device *dev,
if (!err)
dsa_bridge_mtu_normalization(dp);
if (err == -EOPNOTSUPP) {
- if (!extack->_msg)
+ if (extack && !extack->_msg)
NL_SET_ERR_MSG_MOD(extack,
"Offloading not supported");
err = 0;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 92b778e423df..e8b9a9202fec 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2682,23 +2682,27 @@ static __net_init int devinet_init_net(struct net *net)
#endif
if (!net_eq(net, &init_net)) {
- if (IS_ENABLED(CONFIG_SYSCTL) &&
- sysctl_devconf_inherit_init_net == 3) {
+ switch (net_inherit_devconf()) {
+ case 3:
/* copy from the current netns */
memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
sizeof(ipv4_devconf));
memcpy(dflt,
current->nsproxy->net_ns->ipv4.devconf_dflt,
sizeof(ipv4_devconf_dflt));
- } else if (!IS_ENABLED(CONFIG_SYSCTL) ||
- sysctl_devconf_inherit_init_net != 2) {
- /* inherit == 0 or 1: copy from init_net */
+ break;
+ case 0:
+ case 1:
+ /* copy from init_net */
memcpy(all, init_net.ipv4.devconf_all,
sizeof(ipv4_devconf));
memcpy(dflt, init_net.ipv4.devconf_dflt,
sizeof(ipv4_devconf_dflt));
+ break;
+ case 2:
+ /* use compiled values */
+ break;
}
- /* else inherit == 2: use compiled values */
}
#ifdef CONFIG_SYSCTL
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d7bd1daf022b..04e2034f2f8e 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1730,7 +1730,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
- sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
ipc.sockc.mark = fl4.flowi4_mark;
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
len, 0, &ipc, &rt, MSG_DONTWAIT);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index a8a323ecbb54..e49a61a053a6 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -772,7 +772,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
if (optlen < GROUP_FILTER_SIZE(0))
return -EINVAL;
- if (optlen > sysctl_optmem_max)
+ if (optlen > READ_ONCE(sysctl_optmem_max))
return -ENOBUFS;
gsf = memdup_sockptr(optval, optlen);
@@ -808,7 +808,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
if (optlen < size0)
return -EINVAL;
- if (optlen > sysctl_optmem_max - 4)
+ if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
return -ENOBUFS;
p = kmalloc(optlen + 4, GFP_KERNEL);
@@ -1233,7 +1233,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname,
if (optlen < IP_MSFILTER_SIZE(0))
goto e_inval;
- if (optlen > sysctl_optmem_max) {
+ if (optlen > READ_ONCE(sysctl_optmem_max)) {
err = -ENOBUFS;
break;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 970e9a2cca4a..e5011c136fdb 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1000,7 +1000,7 @@ new_segment:
i = skb_shinfo(skb)->nr_frags;
can_coalesce = skb_can_coalesce(skb, i, page, offset);
- if (!can_coalesce && i >= sysctl_max_skb_frags) {
+ if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
tcp_mark_push(tp, skb);
goto new_segment;
}
@@ -1354,7 +1354,7 @@ new_segment:
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
- if (i >= sysctl_max_skb_frags) {
+ if (i >= READ_ONCE(sysctl_max_skb_frags)) {
tcp_mark_push(tp, skb);
goto new_segment;
}
@@ -1567,17 +1567,11 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
* calculation of whether or not we must ACK for the sake of
* a window update.
*/
-void tcp_cleanup_rbuf(struct sock *sk, int copied)
+static void __tcp_cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_sock *tp = tcp_sk(sk);
bool time_to_ack = false;
- struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
-
- WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
- "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
- tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
-
if (inet_csk_ack_scheduled(sk)) {
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1623,6 +1617,17 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
tcp_send_ack(sk);
}
+void tcp_cleanup_rbuf(struct sock *sk, int copied)
+{
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
+ "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+ tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
+ __tcp_cleanup_rbuf(sk, copied);
+}
+
static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
@@ -1756,34 +1761,26 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
if (sk->sk_state == TCP_LISTEN)
return -ENOTCONN;
- while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
- int used;
-
- __skb_unlink(skb, &sk->sk_receive_queue);
- used = recv_actor(sk, skb);
- if (used <= 0) {
- if (!copied)
- copied = used;
- break;
- }
- seq += used;
- copied += used;
+ skb = tcp_recv_skb(sk, seq, &offset);
+ if (!skb)
+ return 0;
- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
- consume_skb(skb);
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ WARN_ON(!skb_set_owner_sk_safe(skb, sk));
+ copied = recv_actor(sk, skb);
+ if (copied >= 0) {
+ seq += copied;
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
++seq;
- break;
- }
- consume_skb(skb);
- break;
}
+ consume_skb(skb);
WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
/* Clean up data we have read: This will do ACK frames. */
if (copied > 0)
- tcp_cleanup_rbuf(sk, copied);
+ __tcp_cleanup_rbuf(sk, copied);
return copied;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 78b654ff421b..290019de766d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -239,7 +239,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
if (wscale_ok) {
/* Set window scaling on max possible window */
space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
- space = max_t(u32, space, sysctl_rmem_max);
+ space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
space = min_t(u32, space, *window_clamp);
*rcv_wscale = clamp_t(int, ilog2(space) - 15,
0, TCP_MAX_WSCALE);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b624e3d8c5f0..e15f64f22fa8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -7162,9 +7162,8 @@ static int __net_init addrconf_init_net(struct net *net)
if (!dflt)
goto err_alloc_dflt;
- if (IS_ENABLED(CONFIG_SYSCTL) &&
- !net_eq(net, &init_net)) {
- switch (sysctl_devconf_inherit_init_net) {
+ if (!net_eq(net, &init_net)) {
+ switch (net_inherit_devconf()) {
case 1: /* copy from init_net */
memcpy(all, init_net.ipv6.devconf_all,
sizeof(ipv6_devconf));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 897ca4f9b791..f152e51242cb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1311,8 +1311,7 @@ struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
fl6.daddr = info->key.u.ipv6.dst;
fl6.saddr = info->key.u.ipv6.src;
prio = info->key.tos;
- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(prio),
- info->key.label);
+ fl6.flowlabel = ip6_make_flowinfo(prio, info->key.label);
dst = ipv6_stub->ipv6_dst_lookup_flow(net, sock->sk, &fl6,
NULL);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3fda5634578c..79c6a827dea9 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1517,7 +1517,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
* ip6_tnl_change() updates the tunnel parameters
**/
-static int
+static void
ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
{
t->parms.laddr = p->laddr;
@@ -1531,29 +1531,25 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
t->parms.fwmark = p->fwmark;
dst_cache_reset(&t->dst_cache);
ip6_tnl_link_config(t);
- return 0;
}
-static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+static void ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
{
struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
- int err;
ip6_tnl_unlink(ip6n, t);
synchronize_net();
- err = ip6_tnl_change(t, p);
+ ip6_tnl_change(t, p);
ip6_tnl_link(ip6n, t);
netdev_state_change(t->dev);
- return err;
}
-static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+static void ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
{
/* for default tnl0 device allow to change only the proto */
t->parms.proto = p->proto;
netdev_state_change(t->dev);
- return 0;
}
static void
@@ -1667,9 +1663,9 @@ ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
} else
t = netdev_priv(dev);
if (dev == ip6n->fb_tnl_dev)
- err = ip6_tnl0_update(t, &p1);
+ ip6_tnl0_update(t, &p1);
else
- err = ip6_tnl_update(t, &p1);
+ ip6_tnl_update(t, &p1);
}
if (!IS_ERR(t)) {
err = 0;
@@ -2091,7 +2087,8 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
} else
t = netdev_priv(dev);
- return ip6_tnl_update(t, &p);
+ ip6_tnl_update(t, &p);
+ return 0;
}
static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 222f6bf220ba..e0dcc7a193df 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -210,7 +210,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
if (optlen < GROUP_FILTER_SIZE(0))
return -EINVAL;
- if (optlen > sysctl_optmem_max)
+ if (optlen > READ_ONCE(sysctl_optmem_max))
return -ENOBUFS;
gsf = memdup_sockptr(optval, optlen);
@@ -244,7 +244,7 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
if (optlen < size0)
return -EINVAL;
- if (optlen > sysctl_optmem_max - 4)
+ if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
return -ENOBUFS;
p = kmalloc(optlen + 4, GFP_KERNEL);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 98453693e400..3a553494ff16 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1378,6 +1378,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (!rt && lifetime) {
ND_PRINTK(3, info, "RA: adding default router\n");
+ if (neigh)
+ neigh_release(neigh);
+
rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr,
skb->dev, pref, defrtr_usr_metric);
if (!rt) {
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 7dd3629dd19e..38db0064d661 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -86,7 +86,6 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
table[1].extra2 = &nf_frag->fqdir->high_thresh;
table[2].data = &nf_frag->fqdir->high_thresh;
table[2].extra1 = &nf_frag->fqdir->low_thresh;
- table[2].extra2 = &nf_frag->fqdir->high_thresh;
hdr = register_net_sysctl(net, "net/netfilter", table);
if (hdr == NULL)
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 2cd4a8d3b30a..b7de5e46fdd8 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -1614,7 +1614,7 @@ static void __destroy_attrs(unsigned long parsed_attrs, int max_parsed,
* callback. If the callback is not available, then we skip to the next
* attribute; otherwise, we call the destroy() callback.
*/
- for (i = 0; i < max_parsed; ++i) {
+ for (i = SEG6_LOCAL_SRH; i < max_parsed; ++i) {
if (!(parsed_attrs & SEG6_F_ATTR(i)))
continue;
@@ -1643,7 +1643,7 @@ static int parse_nla_optional_attrs(struct nlattr **attrs,
struct seg6_action_param *param;
int err, i;
- for (i = 0; i < SEG6_LOCAL_MAX + 1; ++i) {
+ for (i = SEG6_LOCAL_SRH; i < SEG6_LOCAL_MAX + 1; ++i) {
if (!(desc->optattrs & SEG6_F_ATTR(i)) || !attrs[i])
continue;
@@ -1742,7 +1742,7 @@ static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt)
}
/* parse the required attributes */
- for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
+ for (i = SEG6_LOCAL_SRH; i < SEG6_LOCAL_MAX + 1; i++) {
if (desc->attrs & SEG6_F_ATTR(i)) {
if (!attrs[i])
return -EINVAL;
@@ -1847,7 +1847,7 @@ static int seg6_local_fill_encap(struct sk_buff *skb,
attrs = slwt->desc->attrs | slwt->parsed_optattrs;
- for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
+ for (i = SEG6_LOCAL_SRH; i < SEG6_LOCAL_MAX + 1; i++) {
if (attrs & SEG6_F_ATTR(i)) {
param = &seg6_action_params[i];
err = param->put(skb, slwt);
@@ -1927,7 +1927,7 @@ static int seg6_local_cmp_encap(struct lwtunnel_state *a,
if (attrs_a != attrs_b)
return 1;
- for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
+ for (i = SEG6_LOCAL_SRH; i < SEG6_LOCAL_MAX + 1; i++) {
if (attrs_a & SEG6_F_ATTR(i)) {
param = &seg6_action_params[i];
if (param->cmp(slwt_a, slwt_b))
diff --git a/net/key/af_key.c b/net/key/af_key.c
index fda2dcc8a383..c85df5b958d2 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1697,9 +1697,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
pfk->registered |= (1<<hdr->sadb_msg_satype);
}
+ mutex_lock(&pfkey_mutex);
xfrm_probe_algs();
supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
+ mutex_unlock(&pfkey_mutex);
+
if (!supp_skb) {
if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
pfk->registered &= ~(1<<hdr->sadb_msg_satype);
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h
index 40141df09f25..c9dbe9aab7bd 100644
--- a/net/mac80211/trace_msg.h
+++ b/net/mac80211/trace_msg.h
@@ -24,13 +24,11 @@ DECLARE_EVENT_CLASS(mac80211_msg_event,
TP_ARGS(vaf),
TP_STRUCT__entry(
- __dynamic_array(char, msg, MAX_MSG_LEN)
+ __vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index a3f1c1461874..d398f3810662 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1240,6 +1240,9 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
info->limit > dfrag->data_len))
return 0;
+ if (unlikely(!__tcp_can_send(ssk)))
+ return -EAGAIN;
+
/* compute send limit */
info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
copy = info->size_goal;
@@ -1260,7 +1263,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
i = skb_shinfo(skb)->nr_frags;
can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
- if (!can_coalesce && i >= sysctl_max_skb_frags) {
+ if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
tcp_mark_push(tcp_sk(ssk), skb);
goto alloc_skb;
}
@@ -1413,7 +1416,8 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
if (__mptcp_check_fallback(msk)) {
if (!msk->first)
return NULL;
- return sk_stream_memory_free(msk->first) ? msk->first : NULL;
+ return __tcp_can_send(msk->first) &&
+ sk_stream_memory_free(msk->first) ? msk->first : NULL;
}
/* re-use last subflow, if the burst allow that */
@@ -1564,6 +1568,8 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
if (ret <= 0) {
+ if (ret == -EAGAIN)
+ continue;
mptcp_push_release(ssk, &info);
goto out;
}
@@ -2769,30 +2775,16 @@ static void __mptcp_wr_shutdown(struct sock *sk)
static void __mptcp_destroy_sock(struct sock *sk)
{
- struct mptcp_subflow_context *subflow, *tmp;
struct mptcp_sock *msk = mptcp_sk(sk);
- LIST_HEAD(conn_list);
pr_debug("msk=%p", msk);
might_sleep();
- /* join list will be eventually flushed (with rst) at sock lock release time*/
- list_splice_init(&msk->conn_list, &conn_list);
-
mptcp_stop_timer(sk);
sk_stop_timer(sk, &sk->sk_timer);
msk->pm.status = 0;
- /* clears msk->subflow, allowing the following loop to close
- * even the initial subflow
- */
- mptcp_dispose_initial_subflow(msk);
- list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- __mptcp_close_ssk(sk, ssk, subflow, 0);
- }
-
sk->sk_prot->destroy(sk);
WARN_ON_ONCE(msk->rmem_fwd_alloc);
@@ -2884,24 +2876,20 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
static int mptcp_disconnect(struct sock *sk, int flags)
{
- struct mptcp_subflow_context *subflow, *tmp;
struct mptcp_sock *msk = mptcp_sk(sk);
inet_sk_state_store(sk, TCP_CLOSE);
- list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
- __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_FASTCLOSE);
- }
-
mptcp_stop_timer(sk);
sk_stop_timer(sk, &sk->sk_timer);
if (mptcp_sk(sk)->token)
mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
- mptcp_destroy_common(msk);
+ /* msk->subflow is still intact, the following will not free the first
+ * subflow
+ */
+ mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
msk->last_snd = NULL;
WRITE_ONCE(msk->flags, 0);
msk->cb_flags = 0;
@@ -3051,12 +3039,17 @@ out:
return newsk;
}
-void mptcp_destroy_common(struct mptcp_sock *msk)
+void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
{
+ struct mptcp_subflow_context *subflow, *tmp;
struct sock *sk = (struct sock *)msk;
__mptcp_clear_xmit(sk);
+ /* join list will be eventually flushed (with rst) at sock lock release time */
+ list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node)
+ __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
+
/* move to sk_receive_queue, sk_stream_kill_queues will purge it */
mptcp_data_lock(sk);
skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue);
@@ -3078,7 +3071,11 @@ static void mptcp_destroy(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- mptcp_destroy_common(msk);
+ /* clears msk->subflow, allowing the following to close
+ * even the initial subflow
+ */
+ mptcp_dispose_initial_subflow(msk);
+ mptcp_destroy_common(msk, 0);
sk_sockets_allocated_dec(sk);
}
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 5d6043c16b09..132d50833df1 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -624,16 +624,19 @@ void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
struct sockaddr_storage *addr,
unsigned short family);
-static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
+static inline bool __tcp_can_send(const struct sock *ssk)
{
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ /* only send if our side has not closed yet */
+ return ((1 << inet_sk_state_load(ssk)) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT));
+}
+static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
+{
/* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */
if (subflow->request_join && !subflow->fully_established)
return false;
- /* only send if our side has not closed yet */
- return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT));
+ return __tcp_can_send(mptcp_subflow_tcp_sock(subflow));
}
void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow);
@@ -717,7 +720,7 @@ static inline void mptcp_write_space(struct sock *sk)
}
}
-void mptcp_destroy_common(struct mptcp_sock *msk);
+void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
#define MPTCP_TOKEN_MAX_RETRIES 4
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 901c763dcdbb..c7d49fb6e7bd 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -621,7 +621,8 @@ static void mptcp_sock_destruct(struct sock *sk)
sock_orphan(sk);
}
- mptcp_destroy_common(mptcp_sk(sk));
+ /* We don't need to clear msk->subflow, as it's still NULL at this point */
+ mptcp_destroy_common(mptcp_sk(sk), 0);
inet_sock_destruct(sk);
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index df6abbfe0079..4b8d04640ff3 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -144,7 +144,6 @@ config NF_CONNTRACK_ZONES
config NF_CONNTRACK_PROCFS
bool "Supply CT list in procfs (OBSOLETE)"
- default y
depends on PROC_FS
help
This option enables for the list of known conntrack entries
@@ -736,9 +735,8 @@ config NF_FLOW_TABLE
config NF_FLOW_TABLE_PROCFS
bool "Supply flow table statistics in procfs"
- default y
+ depends on NF_FLOW_TABLE
depends on PROC_FS
- depends on SYSCTL
help
This option enables for the flow table offload statistics
to be shown in procfs under net/netfilter/nf_flowtable.
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 9d43277b8b4f..a56fd0b5a430 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1280,12 +1280,12 @@ static void set_sock_size(struct sock *sk, int mode, int val)
lock_sock(sk);
if (mode) {
val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2,
- sysctl_wmem_max);
+ READ_ONCE(sysctl_wmem_max));
sk->sk_sndbuf = val * 2;
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
} else {
val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2,
- sysctl_rmem_max);
+ READ_ONCE(sysctl_rmem_max));
sk->sk_rcvbuf = val * 2;
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
}
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index a414274338cf..0d9332e9cf71 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -34,11 +34,6 @@ MODULE_DESCRIPTION("ftp connection tracking helper");
MODULE_ALIAS("ip_conntrack_ftp");
MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
-/* This is slow, but it's simple. --RR */
-static char *ftp_buffer;
-
-static DEFINE_SPINLOCK(nf_ftp_lock);
-
#define MAX_PORTS 8
static u_int16_t ports[MAX_PORTS];
static unsigned int ports_c;
@@ -398,6 +393,9 @@ static int help(struct sk_buff *skb,
return NF_ACCEPT;
}
+ if (unlikely(skb_linearize(skb)))
+ return NF_DROP;
+
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return NF_ACCEPT;
@@ -411,12 +409,8 @@ static int help(struct sk_buff *skb,
}
datalen = skb->len - dataoff;
- spin_lock_bh(&nf_ftp_lock);
- fb_ptr = skb_header_pointer(skb, dataoff, datalen, ftp_buffer);
- if (!fb_ptr) {
- spin_unlock_bh(&nf_ftp_lock);
- return NF_ACCEPT;
- }
+ spin_lock_bh(&ct->lock);
+ fb_ptr = skb->data + dataoff;
ends_in_nl = (fb_ptr[datalen - 1] == '\n');
seq = ntohl(th->seq) + datalen;
@@ -544,7 +538,7 @@ out_update_nl:
if (ends_in_nl)
update_nl_seq(ct, seq, ct_ftp_info, dir, skb);
out:
- spin_unlock_bh(&nf_ftp_lock);
+ spin_unlock_bh(&ct->lock);
return ret;
}
@@ -571,7 +565,6 @@ static const struct nf_conntrack_expect_policy ftp_exp_policy = {
static void __exit nf_conntrack_ftp_fini(void)
{
nf_conntrack_helpers_unregister(ftp, ports_c * 2);
- kfree(ftp_buffer);
}
static int __init nf_conntrack_ftp_init(void)
@@ -580,10 +573,6 @@ static int __init nf_conntrack_ftp_init(void)
NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_ftp_master));
- ftp_buffer = kmalloc(65536, GFP_KERNEL);
- if (!ftp_buffer)
- return -ENOMEM;
-
if (ports_c == 0)
ports[ports_c++] = FTP_PORT;
@@ -603,7 +592,6 @@ static int __init nf_conntrack_ftp_init(void)
ret = nf_conntrack_helpers_register(ftp, ports_c * 2);
if (ret < 0) {
pr_err("failed to register helpers\n");
- kfree(ftp_buffer);
return ret;
}
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index bb76305bb7ff..5a9bce24f3c3 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -34,6 +34,8 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_h323.h>
+#define H323_MAX_SIZE 65535
+
/* Parameters */
static unsigned int default_rrq_ttl __read_mostly = 300;
module_param(default_rrq_ttl, uint, 0600);
@@ -86,6 +88,9 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
if (tcpdatalen <= 0) /* No TCP data */
goto clear_out;
+ if (tcpdatalen > H323_MAX_SIZE)
+ tcpdatalen = H323_MAX_SIZE;
+
if (*data == NULL) { /* first TPKT */
/* Get first TPKT pointer */
tpkt = skb_header_pointer(skb, tcpdataoff, tcpdatalen,
@@ -1169,6 +1174,9 @@ static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
if (dataoff >= skb->len)
return NULL;
*datalen = skb->len - dataoff;
+ if (*datalen > H323_MAX_SIZE)
+ *datalen = H323_MAX_SIZE;
+
return skb_header_pointer(skb, dataoff, *datalen, h323_buffer);
}
@@ -1770,7 +1778,7 @@ static int __init nf_conntrack_h323_init(void)
NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_h323_master));
- h323_buffer = kmalloc(65536, GFP_KERNEL);
+ h323_buffer = kmalloc(H323_MAX_SIZE + 1, GFP_KERNEL);
if (!h323_buffer)
return -ENOMEM;
ret = h323_helper_init();
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 08ee4e760a3d..1796c456ac98 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -39,6 +39,7 @@ unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
EXPORT_SYMBOL_GPL(nf_nat_irc_hook);
#define HELPER_NAME "irc"
+#define MAX_SEARCH_SIZE 4095
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("IRC (DCC) connection tracking helper");
@@ -121,6 +122,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
int i, ret = NF_ACCEPT;
char *addr_beg_p, *addr_end_p;
typeof(nf_nat_irc_hook) nf_nat_irc;
+ unsigned int datalen;
/* If packet is coming from IRC server */
if (dir == IP_CT_DIR_REPLY)
@@ -140,8 +142,12 @@ static int help(struct sk_buff *skb, unsigned int protoff,
if (dataoff >= skb->len)
return NF_ACCEPT;
+ datalen = skb->len - dataoff;
+ if (datalen > MAX_SEARCH_SIZE)
+ datalen = MAX_SEARCH_SIZE;
+
spin_lock_bh(&irc_buffer_lock);
- ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff,
+ ib_ptr = skb_header_pointer(skb, dataoff, datalen,
irc_buffer);
if (!ib_ptr) {
spin_unlock_bh(&irc_buffer_lock);
@@ -149,7 +155,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
}
data = ib_ptr;
- data_limit = ib_ptr + skb->len - dataoff;
+ data_limit = ib_ptr + datalen;
/* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
* 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
@@ -251,7 +257,7 @@ static int __init nf_conntrack_irc_init(void)
irc_exp_policy.max_expected = max_dcc_channels;
irc_exp_policy.timeout = dcc_timeout;
- irc_buffer = kmalloc(65536, GFP_KERNEL);
+ irc_buffer = kmalloc(MAX_SEARCH_SIZE + 1, GFP_KERNEL);
if (!irc_buffer)
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index a63b51dceaf2..a634c72b1ffc 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -655,6 +655,37 @@ static bool tcp_in_window(struct nf_conn *ct,
tn->tcp_be_liberal)
res = true;
if (!res) {
+ bool seq_ok = before(seq, sender->td_maxend + 1);
+
+ if (!seq_ok) {
+ u32 overshot = end - sender->td_maxend + 1;
+ bool ack_ok;
+
+ ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
+
+ if (in_recv_win &&
+ ack_ok &&
+ overshot <= receiver->td_maxwin &&
+ before(sack, receiver->td_end + 1)) {
+ /* Work around TCPs that send more bytes than allowed by
+ * the receive window.
+ *
+ * If the (marked as invalid) packet is allowed to pass by
+ * the ruleset and the peer acks this data, then its possible
+ * all future packets will trigger 'ACK is over upper bound' check.
+ *
+ * Thus if only the sequence check fails then do update td_end so
+ * possible ACK for this data can update internal state.
+ */
+ sender->td_end = end;
+ sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
+
+ nf_ct_l4proto_log_invalid(skb, ct, hook_state,
+ "%u bytes more than expected", overshot);
+ return res;
+ }
+ }
+
nf_ct_l4proto_log_invalid(skb, ct, hook_state,
"%s",
before(seq, sender->td_maxend + 1) ?
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index fcb33b1d5456..13dc421fc4f5 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -34,10 +34,6 @@ MODULE_AUTHOR("Michal Schmidt <mschmidt@redhat.com>");
MODULE_DESCRIPTION("SANE connection tracking helper");
MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
-static char *sane_buffer;
-
-static DEFINE_SPINLOCK(nf_sane_lock);
-
#define MAX_PORTS 8
static u_int16_t ports[MAX_PORTS];
static unsigned int ports_c;
@@ -67,14 +63,16 @@ static int help(struct sk_buff *skb,
unsigned int dataoff, datalen;
const struct tcphdr *th;
struct tcphdr _tcph;
- void *sb_ptr;
int ret = NF_ACCEPT;
int dir = CTINFO2DIR(ctinfo);
struct nf_ct_sane_master *ct_sane_info = nfct_help_data(ct);
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
- struct sane_request *req;
struct sane_reply_net_start *reply;
+ union {
+ struct sane_request req;
+ struct sane_reply_net_start repl;
+ } buf;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
@@ -92,59 +90,62 @@ static int help(struct sk_buff *skb,
return NF_ACCEPT;
datalen = skb->len - dataoff;
-
- spin_lock_bh(&nf_sane_lock);
- sb_ptr = skb_header_pointer(skb, dataoff, datalen, sane_buffer);
- if (!sb_ptr) {
- spin_unlock_bh(&nf_sane_lock);
- return NF_ACCEPT;
- }
-
if (dir == IP_CT_DIR_ORIGINAL) {
+ const struct sane_request *req;
+
if (datalen != sizeof(struct sane_request))
- goto out;
+ return NF_ACCEPT;
+
+ req = skb_header_pointer(skb, dataoff, datalen, &buf.req);
+ if (!req)
+ return NF_ACCEPT;
- req = sb_ptr;
if (req->RPC_code != htonl(SANE_NET_START)) {
/* Not an interesting command */
- ct_sane_info->state = SANE_STATE_NORMAL;
- goto out;
+ WRITE_ONCE(ct_sane_info->state, SANE_STATE_NORMAL);
+ return NF_ACCEPT;
}
/* We're interested in the next reply */
- ct_sane_info->state = SANE_STATE_START_REQUESTED;
- goto out;
+ WRITE_ONCE(ct_sane_info->state, SANE_STATE_START_REQUESTED);
+ return NF_ACCEPT;
}
+ /* IP_CT_DIR_REPLY */
+
/* Is it a reply to an uninteresting command? */
- if (ct_sane_info->state != SANE_STATE_START_REQUESTED)
- goto out;
+ if (READ_ONCE(ct_sane_info->state) != SANE_STATE_START_REQUESTED)
+ return NF_ACCEPT;
/* It's a reply to SANE_NET_START. */
- ct_sane_info->state = SANE_STATE_NORMAL;
+ WRITE_ONCE(ct_sane_info->state, SANE_STATE_NORMAL);
if (datalen < sizeof(struct sane_reply_net_start)) {
pr_debug("NET_START reply too short\n");
- goto out;
+ return NF_ACCEPT;
}
- reply = sb_ptr;
+ datalen = sizeof(struct sane_reply_net_start);
+
+ reply = skb_header_pointer(skb, dataoff, datalen, &buf.repl);
+ if (!reply)
+ return NF_ACCEPT;
+
if (reply->status != htonl(SANE_STATUS_SUCCESS)) {
/* saned refused the command */
pr_debug("unsuccessful SANE_STATUS = %u\n",
ntohl(reply->status));
- goto out;
+ return NF_ACCEPT;
}
/* Invalid saned reply? Ignore it. */
if (reply->zero != 0)
- goto out;
+ return NF_ACCEPT;
exp = nf_ct_expect_alloc(ct);
if (exp == NULL) {
nf_ct_helper_log(skb, ct, "cannot alloc expectation");
- ret = NF_DROP;
- goto out;
+ return NF_DROP;
}
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
@@ -162,9 +163,6 @@ static int help(struct sk_buff *skb,
}
nf_ct_expect_put(exp);
-
-out:
- spin_unlock_bh(&nf_sane_lock);
return ret;
}
@@ -178,7 +176,6 @@ static const struct nf_conntrack_expect_policy sane_exp_policy = {
static void __exit nf_conntrack_sane_fini(void)
{
nf_conntrack_helpers_unregister(sane, ports_c * 2);
- kfree(sane_buffer);
}
static int __init nf_conntrack_sane_init(void)
@@ -187,10 +184,6 @@ static int __init nf_conntrack_sane_init(void)
NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_sane_master));
- sane_buffer = kmalloc(65536, GFP_KERNEL);
- if (!sane_buffer)
- return -ENOMEM;
-
if (ports_c == 0)
ports[ports_c++] = SANE_PORT;
@@ -210,7 +203,6 @@ static int __init nf_conntrack_sane_init(void)
ret = nf_conntrack_helpers_register(sane, ports_c * 2);
if (ret < 0) {
pr_err("failed to register helpers\n");
- kfree(sane_buffer);
return ret;
}
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 765ac779bfc8..81c26a96c30b 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -437,12 +437,17 @@ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
}
}
+void nf_flow_table_gc_run(struct nf_flowtable *flow_table)
+{
+ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+}
+
static void nf_flow_offload_work_gc(struct work_struct *work)
{
struct nf_flowtable *flow_table;
flow_table = container_of(work, struct nf_flowtable, gc_work.work);
- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+ nf_flow_table_gc_run(flow_table);
queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
}
@@ -600,11 +605,11 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
mutex_unlock(&flowtable_lock);
cancel_delayed_work_sync(&flow_table->gc_work);
- nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
nf_flow_table_offload_flush(flow_table);
- if (nf_flowtable_hw_offload(flow_table))
- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+ /* ... no more pending work after this stage ... */
+ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
+ nf_flow_table_gc_run(flow_table);
+ nf_flow_table_offload_flush_cleanup(flow_table);
rhashtable_destroy(&flow_table->rhashtable);
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 103b6cbf257f..b04645ced89b 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -1074,6 +1074,14 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
flow_offload_queue_work(offload);
}
+void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable)
+{
+ if (nf_flowtable_hw_offload(flowtable)) {
+ flush_workqueue(nf_flow_offload_del_wq);
+ nf_flow_table_gc_run(flowtable);
+ }
+}
+
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
{
if (nf_flowtable_hw_offload(flowtable)) {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 9f976b11d896..2ee50e23c9b7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -32,7 +32,6 @@ static LIST_HEAD(nf_tables_objects);
static LIST_HEAD(nf_tables_flowtables);
static LIST_HEAD(nf_tables_destroy_list);
static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
-static u64 table_handle;
enum {
NFT_VALIDATE_SKIP = 0,
@@ -153,6 +152,7 @@ static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx,
if (trans == NULL)
return NULL;
+ INIT_LIST_HEAD(&trans->list);
trans->msg_type = msg_type;
trans->ctx = *ctx;
@@ -888,7 +888,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = nft_net->base_seq;
+ cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -1234,7 +1234,7 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info,
INIT_LIST_HEAD(&table->flowtables);
table->family = family;
table->flags = flags;
- table->handle = ++table_handle;
+ table->handle = ++nft_net->table_handle;
if (table->flags & NFT_TABLE_F_OWNER)
table->nlpid = NETLINK_CB(skb).portid;
@@ -1704,7 +1704,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = nft_net->base_seq;
+ cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -2195,9 +2195,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
+ struct nft_stats __percpu *stats = NULL;
struct nft_table *table = ctx->table;
struct nft_base_chain *basechain;
- struct nft_stats __percpu *stats;
struct net *net = ctx->net;
char name[NFT_NAME_MAXLEN];
struct nft_rule_blob *blob;
@@ -2235,7 +2235,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
return PTR_ERR(stats);
}
rcu_assign_pointer(basechain->stats, stats);
- static_branch_inc(&nft_counters_enabled);
}
err = nft_basechain_init(basechain, family, &hook, flags);
@@ -2318,6 +2317,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
goto err_unregister_hook;
}
+ if (stats)
+ static_branch_inc(&nft_counters_enabled);
+
table->use++;
return 0;
@@ -2472,6 +2474,7 @@ err:
}
static struct nft_chain *nft_chain_lookup_byid(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla)
{
struct nftables_pernet *nft_net = nft_pernet(net);
@@ -2482,6 +2485,7 @@ static struct nft_chain *nft_chain_lookup_byid(const struct net *net,
struct nft_chain *chain = trans->ctx.chain;
if (trans->msg_type == NFT_MSG_NEWCHAIN &&
+ chain->table == table &&
id == nft_trans_chain_id(trans))
return chain;
}
@@ -2571,6 +2575,9 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
if (chain != NULL) {
+ if (chain->flags & NFT_CHAIN_BINDING)
+ return -EINVAL;
+
if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
@@ -3146,7 +3153,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = nft_net->base_seq;
+ cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -3371,6 +3378,7 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
}
static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+ const struct nft_chain *chain,
const struct nlattr *nla);
#define NFT_RULE_MAXEXPRS 128
@@ -3417,7 +3425,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
return -EOPNOTSUPP;
} else if (nla[NFTA_RULE_CHAIN_ID]) {
- chain = nft_chain_lookup_byid(net, nla[NFTA_RULE_CHAIN_ID]);
+ chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID]);
if (IS_ERR(chain)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN_ID]);
return PTR_ERR(chain);
@@ -3459,7 +3467,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
return PTR_ERR(old_rule);
}
} else if (nla[NFTA_RULE_POSITION_ID]) {
- old_rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_POSITION_ID]);
+ old_rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_POSITION_ID]);
if (IS_ERR(old_rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION_ID]);
return PTR_ERR(old_rule);
@@ -3604,6 +3612,7 @@ err_release_expr:
}
static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+ const struct nft_chain *chain,
const struct nlattr *nla)
{
struct nftables_pernet *nft_net = nft_pernet(net);
@@ -3614,6 +3623,7 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
struct nft_rule *rule = nft_trans_rule(trans);
if (trans->msg_type == NFT_MSG_NEWRULE &&
+ trans->ctx.chain == chain &&
id == nft_trans_rule_id(trans))
return rule;
}
@@ -3663,7 +3673,7 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
err = nft_delrule(&ctx, rule);
} else if (nla[NFTA_RULE_ID]) {
- rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_ID]);
+ rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_ID]);
if (IS_ERR(rule)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_ID]);
return PTR_ERR(rule);
@@ -3842,6 +3852,7 @@ static struct nft_set *nft_set_lookup_byhandle(const struct nft_table *table,
}
static struct nft_set *nft_set_lookup_byid(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla, u8 genmask)
{
struct nftables_pernet *nft_net = nft_pernet(net);
@@ -3853,6 +3864,7 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net,
struct nft_set *set = nft_trans_set(trans);
if (id == nft_trans_set_id(trans) &&
+ set->table == table &&
nft_active_genmask(set, genmask))
return set;
}
@@ -3873,7 +3885,7 @@ struct nft_set *nft_set_lookup_global(const struct net *net,
if (!nla_set_id)
return set;
- set = nft_set_lookup_byid(net, nla_set_id, genmask);
+ set = nft_set_lookup_byid(net, table, nla_set_id, genmask);
}
return set;
}
@@ -3899,7 +3911,7 @@ cont:
list_for_each_entry(i, &ctx->table->sets, list) {
int tmp;
- if (!nft_is_active_next(ctx->net, set))
+ if (!nft_is_active_next(ctx->net, i))
continue;
if (!sscanf(i->name, name, &tmp))
continue;
@@ -4125,7 +4137,7 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = nft_net->base_seq;
+ cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (ctx->family != NFPROTO_UNSPEC &&
@@ -4443,6 +4455,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
if (err < 0)
return err;
+
+ if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT))
+ return -EINVAL;
+ } else if (flags & NFT_SET_CONCAT) {
+ return -EINVAL;
}
if (nla[NFTA_SET_EXPR] || nla[NFTA_SET_EXPRESSIONS])
@@ -5053,6 +5070,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
nft_net = nft_pernet(net);
+ cb->seq = READ_ONCE(nft_net->base_seq);
+
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
dump_ctx->ctx.family != table->family)
@@ -5188,6 +5207,9 @@ static int nft_setelem_parse_flags(const struct nft_set *set,
if (!(set->flags & NFT_SET_INTERVAL) &&
*flags & NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
+ if ((*flags & (NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL)) ==
+ (NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL))
+ return -EINVAL;
return 0;
}
@@ -5195,19 +5217,13 @@ static int nft_setelem_parse_flags(const struct nft_set *set,
static int nft_setelem_parse_key(struct nft_ctx *ctx, struct nft_set *set,
struct nft_data *key, struct nlattr *attr)
{
- struct nft_data_desc desc;
- int err;
-
- err = nft_data_init(ctx, key, NFT_DATA_VALUE_MAXLEN, &desc, attr);
- if (err < 0)
- return err;
-
- if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
- nft_data_release(key, desc.type);
- return -EINVAL;
- }
+ struct nft_data_desc desc = {
+ .type = NFT_DATA_VALUE,
+ .size = NFT_DATA_VALUE_MAXLEN,
+ .len = set->klen,
+ };
- return 0;
+ return nft_data_init(ctx, key, &desc, attr);
}
static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set,
@@ -5216,24 +5232,18 @@ static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set,
struct nlattr *attr)
{
u32 dtype;
- int err;
-
- err = nft_data_init(ctx, data, NFT_DATA_VALUE_MAXLEN, desc, attr);
- if (err < 0)
- return err;
if (set->dtype == NFT_DATA_VERDICT)
dtype = NFT_DATA_VERDICT;
else
dtype = NFT_DATA_VALUE;
- if (dtype != desc->type ||
- set->dlen != desc->len) {
- nft_data_release(data, desc->type);
- return -EINVAL;
- }
+ desc->type = dtype;
+ desc->size = NFT_DATA_VALUE_MAXLEN;
+ desc->len = set->dlen;
+ desc->flags = NFT_DATA_DESC_SETELEM;
- return 0;
+ return nft_data_init(ctx, data, desc, attr);
}
static void *nft_setelem_catchall_get(const struct net *net,
@@ -5467,6 +5477,27 @@ err_set_elem_expr:
return ERR_PTR(err);
}
+static int nft_set_ext_check(const struct nft_set_ext_tmpl *tmpl, u8 id, u32 len)
+{
+ len += nft_set_ext_types[id].len;
+ if (len > tmpl->ext_len[id] ||
+ len > U8_MAX)
+ return -1;
+
+ return 0;
+}
+
+static int nft_set_ext_memcpy(const struct nft_set_ext_tmpl *tmpl, u8 id,
+ void *to, const void *from, u32 len)
+{
+ if (nft_set_ext_check(tmpl, id, len) < 0)
+ return -1;
+
+ memcpy(to, from, len);
+
+ return 0;
+}
+
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *key_end,
@@ -5477,17 +5508,26 @@ void *nft_set_elem_init(const struct nft_set *set,
elem = kzalloc(set->ops->elemsize + tmpl->len, gfp);
if (elem == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ext = nft_set_elem_ext(set, elem);
nft_set_ext_init(ext, tmpl);
- if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY))
- memcpy(nft_set_ext_key(ext), key, set->klen);
- if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
- memcpy(nft_set_ext_key_end(ext), key_end, set->klen);
- if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
- memcpy(nft_set_ext_data(ext), data, set->dlen);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY) &&
+ nft_set_ext_memcpy(tmpl, NFT_SET_EXT_KEY,
+ nft_set_ext_key(ext), key, set->klen) < 0)
+ goto err_ext_check;
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END) &&
+ nft_set_ext_memcpy(tmpl, NFT_SET_EXT_KEY_END,
+ nft_set_ext_key_end(ext), key_end, set->klen) < 0)
+ goto err_ext_check;
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
+ nft_set_ext_memcpy(tmpl, NFT_SET_EXT_DATA,
+ nft_set_ext_data(ext), data, set->dlen) < 0)
+ goto err_ext_check;
+
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
*nft_set_ext_expiration(ext) = get_jiffies_64() + expiration;
if (expiration == 0)
@@ -5497,6 +5537,11 @@ void *nft_set_elem_init(const struct nft_set *set,
*nft_set_ext_timeout(ext) = timeout;
return elem;
+
+err_ext_check:
+ kfree(elem);
+
+ return ERR_PTR(-EINVAL);
}
static void __nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
@@ -5568,7 +5613,7 @@ int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
err = nft_expr_clone(expr, set->exprs[i]);
if (err < 0) {
- nft_expr_destroy(ctx, expr);
+ kfree(expr);
goto err_expr;
}
expr_array[i] = expr;
@@ -5584,14 +5629,25 @@ err_expr:
}
static int nft_set_elem_expr_setup(struct nft_ctx *ctx,
+ const struct nft_set_ext_tmpl *tmpl,
const struct nft_set_ext *ext,
struct nft_expr *expr_array[],
u32 num_exprs)
{
struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
+ u32 len = sizeof(struct nft_set_elem_expr);
struct nft_expr *expr;
int i, err;
+ if (num_exprs == 0)
+ return 0;
+
+ for (i = 0; i < num_exprs; i++)
+ len += expr_array[i]->ops->size;
+
+ if (nft_set_ext_check(tmpl, NFT_SET_EXT_EXPRESSIONS, len) < 0)
+ return -EINVAL;
+
for (i = 0; i < num_exprs; i++) {
expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
err = nft_expr_clone(expr, expr_array[i]);
@@ -5800,6 +5856,24 @@ static void nft_setelem_remove(const struct net *net,
set->ops->remove(net, set, elem);
}
+static bool nft_setelem_valid_key_end(const struct nft_set *set,
+ struct nlattr **nla, u32 flags)
+{
+ if ((set->flags & (NFT_SET_CONCAT | NFT_SET_INTERVAL)) ==
+ (NFT_SET_CONCAT | NFT_SET_INTERVAL)) {
+ if (flags & NFT_SET_ELEM_INTERVAL_END)
+ return false;
+ if (!nla[NFTA_SET_ELEM_KEY_END] &&
+ !(flags & NFT_SET_ELEM_CATCHALL))
+ return false;
+ } else {
+ if (nla[NFTA_SET_ELEM_KEY_END])
+ return false;
+ }
+
+ return true;
+}
+
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr, u32 nlmsg_flags)
{
@@ -5850,6 +5924,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return -EINVAL;
}
+ if (set->flags & NFT_SET_OBJECT) {
+ if (!nla[NFTA_SET_ELEM_OBJREF] &&
+ !(flags & NFT_SET_ELEM_INTERVAL_END))
+ return -EINVAL;
+ } else {
+ if (nla[NFTA_SET_ELEM_OBJREF])
+ return -EINVAL;
+ }
+
+ if (!nft_setelem_valid_key_end(set, nla, flags))
+ return -EINVAL;
+
if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
(nla[NFTA_SET_ELEM_DATA] ||
nla[NFTA_SET_ELEM_OBJREF] ||
@@ -5857,6 +5943,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
nla[NFTA_SET_ELEM_EXPIRATION] ||
nla[NFTA_SET_ELEM_USERDATA] ||
nla[NFTA_SET_ELEM_EXPR] ||
+ nla[NFTA_SET_ELEM_KEY_END] ||
nla[NFTA_SET_ELEM_EXPRESSIONS]))
return -EINVAL;
@@ -5987,10 +6074,6 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
}
if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
- if (!(set->flags & NFT_SET_OBJECT)) {
- err = -EINVAL;
- goto err_parse_key_end;
- }
obj = nft_obj_lookup(ctx->net, ctx->table,
nla[NFTA_SET_ELEM_OBJREF],
set->objtype, genmask);
@@ -6054,17 +6137,23 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
}
}
- err = -ENOMEM;
elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
elem.key_end.val.data, elem.data.val.data,
timeout, expiration, GFP_KERNEL_ACCOUNT);
- if (elem.priv == NULL)
+ if (IS_ERR(elem.priv)) {
+ err = PTR_ERR(elem.priv);
goto err_parse_data;
+ }
ext = nft_set_elem_ext(set, elem.priv);
if (flags)
*nft_set_ext_flags(ext) = flags;
+
if (ulen > 0) {
+ if (nft_set_ext_check(&tmpl, NFT_SET_EXT_USERDATA, ulen) < 0) {
+ err = -EINVAL;
+ goto err_elem_userdata;
+ }
udata = nft_set_ext_userdata(ext);
udata->len = ulen - 1;
nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen);
@@ -6073,14 +6162,14 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
*nft_set_ext_obj(ext) = obj;
obj->use++;
}
- err = nft_set_elem_expr_setup(ctx, ext, expr_array, num_exprs);
+ err = nft_set_elem_expr_setup(ctx, &tmpl, ext, expr_array, num_exprs);
if (err < 0)
- goto err_elem_expr;
+ goto err_elem_free;
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
if (trans == NULL) {
err = -ENOMEM;
- goto err_elem_expr;
+ goto err_elem_free;
}
ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
@@ -6126,10 +6215,10 @@ err_set_full:
nft_setelem_remove(ctx->net, set, &elem);
err_element_clash:
kfree(trans);
-err_elem_expr:
+err_elem_free:
if (obj)
obj->use--;
-
+err_elem_userdata:
nf_tables_set_elem_destroy(ctx, set, elem.priv);
err_parse_data:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
@@ -6277,6 +6366,9 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
return -EINVAL;
+ if (!nft_setelem_valid_key_end(set, nla, flags))
+ return -EINVAL;
+
nft_set_ext_prepare(&tmpl);
if (flags != 0) {
@@ -6311,8 +6403,10 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data,
elem.key_end.val.data, NULL, 0, 0,
GFP_KERNEL_ACCOUNT);
- if (elem.priv == NULL)
+ if (IS_ERR(elem.priv)) {
+ err = PTR_ERR(elem.priv);
goto fail_elem_key_end;
+ }
ext = nft_set_elem_ext(set, elem.priv);
if (flags)
@@ -6891,7 +6985,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = nft_net->base_seq;
+ cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -7823,7 +7917,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = nft_net->base_seq;
+ cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -8756,6 +8850,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
struct nft_trans_elem *te;
struct nft_chain *chain;
struct nft_table *table;
+ unsigned int base_seq;
LIST_HEAD(adl);
int err;
@@ -8805,9 +8900,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
* Bump generation counter, invalidate any dump in progress.
* Cannot fail after this point.
*/
- while (++nft_net->base_seq == 0)
+ base_seq = READ_ONCE(nft_net->base_seq);
+ while (++base_seq == 0)
;
+ WRITE_ONCE(nft_net->base_seq, base_seq);
+
/* step 3. Start new generation, rules_gen_X now in use. */
net->nft.gencursor = nft_gencursor_next(net);
@@ -9369,13 +9467,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
break;
}
}
-
- cond_resched();
}
list_for_each_entry(set, &ctx->table->sets, list) {
- cond_resched();
-
if (!nft_is_active_next(ctx->net, set))
continue;
if (!(set->flags & NFT_SET_MAP) ||
@@ -9605,7 +9699,7 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
tb[NFTA_VERDICT_CHAIN],
genmask);
} else if (tb[NFTA_VERDICT_CHAIN_ID]) {
- chain = nft_chain_lookup_byid(ctx->net,
+ chain = nft_chain_lookup_byid(ctx->net, ctx->table,
tb[NFTA_VERDICT_CHAIN_ID]);
if (IS_ERR(chain))
return PTR_ERR(chain);
@@ -9617,6 +9711,11 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
return PTR_ERR(chain);
if (nft_is_base_chain(chain))
return -EOPNOTSUPP;
+ if (nft_chain_is_bound(chain))
+ return -EINVAL;
+ if (desc->flags & NFT_DATA_DESC_SETELEM &&
+ chain->flags & NFT_CHAIN_BINDING)
+ return -EINVAL;
chain->use++;
data->verdict.chain = chain;
@@ -9624,7 +9723,7 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
}
desc->len = sizeof(data->verdict);
- desc->type = NFT_DATA_VERDICT;
+
return 0;
}
@@ -9677,20 +9776,25 @@ nla_put_failure:
}
static int nft_value_init(const struct nft_ctx *ctx,
- struct nft_data *data, unsigned int size,
- struct nft_data_desc *desc, const struct nlattr *nla)
+ struct nft_data *data, struct nft_data_desc *desc,
+ const struct nlattr *nla)
{
unsigned int len;
len = nla_len(nla);
if (len == 0)
return -EINVAL;
- if (len > size)
+ if (len > desc->size)
return -EOVERFLOW;
+ if (desc->len) {
+ if (len != desc->len)
+ return -EINVAL;
+ } else {
+ desc->len = len;
+ }
nla_memcpy(data->data, nla, len);
- desc->type = NFT_DATA_VALUE;
- desc->len = len;
+
return 0;
}
@@ -9710,7 +9814,6 @@ static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
*
* @ctx: context of the expression using the data
* @data: destination struct nft_data
- * @size: maximum data length
* @desc: data description
* @nla: netlink attribute containing data
*
@@ -9720,24 +9823,35 @@ static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
* The caller can indicate that it only wants to accept data of type
* NFT_DATA_VALUE by passing NULL for the ctx argument.
*/
-int nft_data_init(const struct nft_ctx *ctx,
- struct nft_data *data, unsigned int size,
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
struct nft_data_desc *desc, const struct nlattr *nla)
{
struct nlattr *tb[NFTA_DATA_MAX + 1];
int err;
+ if (WARN_ON_ONCE(!desc->size))
+ return -EINVAL;
+
err = nla_parse_nested_deprecated(tb, NFTA_DATA_MAX, nla,
nft_data_policy, NULL);
if (err < 0)
return err;
- if (tb[NFTA_DATA_VALUE])
- return nft_value_init(ctx, data, size, desc,
- tb[NFTA_DATA_VALUE]);
- if (tb[NFTA_DATA_VERDICT] && ctx != NULL)
- return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
- return -EINVAL;
+ if (tb[NFTA_DATA_VALUE]) {
+ if (desc->type != NFT_DATA_VALUE)
+ return -EINVAL;
+
+ err = nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]);
+ } else if (tb[NFTA_DATA_VERDICT] && ctx != NULL) {
+ if (desc->type != NFT_DATA_VERDICT)
+ return -EINVAL;
+
+ err = nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
+ } else {
+ err = -EINVAL;
+ }
+
+ return err;
}
EXPORT_SYMBOL_GPL(nft_data_init);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 3ddce24ac76d..cee3e4e905ec 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -34,25 +34,23 @@ static noinline void __nft_trace_packet(struct nft_traceinfo *info,
nft_trace_notify(info);
}
-static inline void nft_trace_packet(struct nft_traceinfo *info,
+static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
+ struct nft_traceinfo *info,
const struct nft_chain *chain,
const struct nft_rule_dp *rule,
enum nft_trace_types type)
{
if (static_branch_unlikely(&nft_trace_enabled)) {
- const struct nft_pktinfo *pkt = info->pkt;
-
info->nf_trace = pkt->skb->nf_trace;
info->rule = rule;
__nft_trace_packet(info, chain, type);
}
}
-static inline void nft_trace_copy_nftrace(struct nft_traceinfo *info)
+static inline void nft_trace_copy_nftrace(const struct nft_pktinfo *pkt,
+ struct nft_traceinfo *info)
{
if (static_branch_unlikely(&nft_trace_enabled)) {
- const struct nft_pktinfo *pkt = info->pkt;
-
if (info->trace)
info->nf_trace = pkt->skb->nf_trace;
}
@@ -96,7 +94,6 @@ static noinline void __nft_trace_verdict(struct nft_traceinfo *info,
const struct nft_chain *chain,
const struct nft_regs *regs)
{
- const struct nft_pktinfo *pkt = info->pkt;
enum nft_trace_types type;
switch (regs->verdict.code) {
@@ -110,7 +107,9 @@ static noinline void __nft_trace_verdict(struct nft_traceinfo *info,
break;
default:
type = NFT_TRACETYPE_RULE;
- info->nf_trace = pkt->skb->nf_trace;
+
+ if (info->trace)
+ info->nf_trace = info->pkt->skb->nf_trace;
break;
}
@@ -271,10 +270,10 @@ next_rule:
switch (regs.verdict.code) {
case NFT_BREAK:
regs.verdict.code = NFT_CONTINUE;
- nft_trace_copy_nftrace(&info);
+ nft_trace_copy_nftrace(pkt, &info);
continue;
case NFT_CONTINUE:
- nft_trace_packet(&info, chain, rule,
+ nft_trace_packet(pkt, &info, chain, rule,
NFT_TRACETYPE_RULE);
continue;
}
@@ -318,7 +317,7 @@ next_rule:
goto next_rule;
}
- nft_trace_packet(&info, basechain, NULL, NFT_TRACETYPE_POLICY);
+ nft_trace_packet(pkt, &info, basechain, NULL, NFT_TRACETYPE_POLICY);
if (static_branch_unlikely(&nft_counters_enabled))
nft_update_chain_stats(basechain, pkt);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index c24b1240908f..9c44518cb70f 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -44,6 +44,10 @@ MODULE_DESCRIPTION("Netfilter messages via netlink socket");
static unsigned int nfnetlink_pernet_id __read_mostly;
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+static DEFINE_SPINLOCK(nfnl_grp_active_lock);
+#endif
+
struct nfnl_net {
struct sock *nfnl;
};
@@ -654,6 +658,44 @@ static void nfnetlink_rcv(struct sk_buff *skb)
netlink_rcv_skb(skb, nfnetlink_rcv_msg);
}
+static void nfnetlink_bind_event(struct net *net, unsigned int group)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ int type, group_bit;
+ u8 v;
+
+ /* All NFNLGRP_CONNTRACK_* group bits fit into u8.
+ * The other groups are not relevant and can be ignored.
+ */
+ if (group >= 8)
+ return;
+
+ type = nfnl_group2type[group];
+
+ switch (type) {
+ case NFNL_SUBSYS_CTNETLINK:
+ break;
+ case NFNL_SUBSYS_CTNETLINK_EXP:
+ break;
+ default:
+ return;
+ }
+
+ group_bit = (1 << group);
+
+ spin_lock(&nfnl_grp_active_lock);
+ v = READ_ONCE(net->ct.ctnetlink_has_listener);
+ if ((v & group_bit) == 0) {
+ v |= group_bit;
+
+ /* read concurrently without nfnl_grp_active_lock held. */
+ WRITE_ONCE(net->ct.ctnetlink_has_listener, v);
+ }
+
+ spin_unlock(&nfnl_grp_active_lock);
+#endif
+}
+
static int nfnetlink_bind(struct net *net, int group)
{
const struct nfnetlink_subsystem *ss;
@@ -670,28 +712,45 @@ static int nfnetlink_bind(struct net *net, int group)
if (!ss)
request_module_nowait("nfnetlink-subsys-%d", type);
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- if (type == NFNL_SUBSYS_CTNETLINK) {
- nfnl_lock(NFNL_SUBSYS_CTNETLINK);
- WRITE_ONCE(net->ct.ctnetlink_has_listener, true);
- nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
- }
-#endif
+ nfnetlink_bind_event(net, group);
return 0;
}
static void nfnetlink_unbind(struct net *net, int group)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ int type, group_bit;
+
if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
return;
- if (nfnl_group2type[group] == NFNL_SUBSYS_CTNETLINK) {
- nfnl_lock(NFNL_SUBSYS_CTNETLINK);
- if (!nfnetlink_has_listeners(net, group))
- WRITE_ONCE(net->ct.ctnetlink_has_listener, false);
- nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
+ type = nfnl_group2type[group];
+
+ switch (type) {
+ case NFNL_SUBSYS_CTNETLINK:
+ break;
+ case NFNL_SUBSYS_CTNETLINK_EXP:
+ break;
+ default:
+ return;
+ }
+
+ /* ctnetlink_has_listener is u8 */
+ if (group >= 8)
+ return;
+
+ group_bit = (1 << group);
+
+ spin_lock(&nfnl_grp_active_lock);
+ if (!nfnetlink_has_listeners(net, group)) {
+ u8 v = READ_ONCE(net->ct.ctnetlink_has_listener);
+
+ v &= ~group_bit;
+
+ /* read concurrently without nfnl_grp_active_lock held. */
+ WRITE_ONCE(net->ct.ctnetlink_has_listener, v);
}
+ spin_unlock(&nfnl_grp_active_lock);
#endif
}
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 83590afe3768..e6e402b247d0 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -93,7 +93,16 @@ static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
static int nft_bitwise_init_bool(struct nft_bitwise *priv,
const struct nlattr *const tb[])
{
- struct nft_data_desc mask, xor;
+ struct nft_data_desc mask = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(priv->mask),
+ .len = priv->len,
+ };
+ struct nft_data_desc xor = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(priv->xor),
+ .len = priv->len,
+ };
int err;
if (tb[NFTA_BITWISE_DATA])
@@ -103,37 +112,30 @@ static int nft_bitwise_init_bool(struct nft_bitwise *priv,
!tb[NFTA_BITWISE_XOR])
return -EINVAL;
- err = nft_data_init(NULL, &priv->mask, sizeof(priv->mask), &mask,
- tb[NFTA_BITWISE_MASK]);
+ err = nft_data_init(NULL, &priv->mask, &mask, tb[NFTA_BITWISE_MASK]);
if (err < 0)
return err;
- if (mask.type != NFT_DATA_VALUE || mask.len != priv->len) {
- err = -EINVAL;
- goto err_mask_release;
- }
- err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &xor,
- tb[NFTA_BITWISE_XOR]);
+ err = nft_data_init(NULL, &priv->xor, &xor, tb[NFTA_BITWISE_XOR]);
if (err < 0)
- goto err_mask_release;
- if (xor.type != NFT_DATA_VALUE || xor.len != priv->len) {
- err = -EINVAL;
- goto err_xor_release;
- }
+ goto err_xor_err;
return 0;
-err_xor_release:
- nft_data_release(&priv->xor, xor.type);
-err_mask_release:
+err_xor_err:
nft_data_release(&priv->mask, mask.type);
+
return err;
}
static int nft_bitwise_init_shift(struct nft_bitwise *priv,
const struct nlattr *const tb[])
{
- struct nft_data_desc d;
+ struct nft_data_desc desc = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(priv->data),
+ .len = sizeof(u32),
+ };
int err;
if (tb[NFTA_BITWISE_MASK] ||
@@ -143,13 +145,12 @@ static int nft_bitwise_init_shift(struct nft_bitwise *priv,
if (!tb[NFTA_BITWISE_DATA])
return -EINVAL;
- err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &d,
- tb[NFTA_BITWISE_DATA]);
+ err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_BITWISE_DATA]);
if (err < 0)
return err;
- if (d.type != NFT_DATA_VALUE || d.len != sizeof(u32) ||
- priv->data.data[0] >= BITS_PER_TYPE(u32)) {
- nft_data_release(&priv->data, d.type);
+
+ if (priv->data.data[0] >= BITS_PER_TYPE(u32)) {
+ nft_data_release(&priv->data, desc.type);
return -EINVAL;
}
@@ -339,22 +340,21 @@ static const struct nft_expr_ops nft_bitwise_ops = {
static int
nft_bitwise_extract_u32_data(const struct nlattr * const tb, u32 *out)
{
- struct nft_data_desc desc;
struct nft_data data;
- int err = 0;
+ struct nft_data_desc desc = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(data),
+ .len = sizeof(u32),
+ };
+ int err;
- err = nft_data_init(NULL, &data, sizeof(data), &desc, tb);
+ err = nft_data_init(NULL, &data, &desc, tb);
if (err < 0)
return err;
- if (desc.type != NFT_DATA_VALUE || desc.len != sizeof(u32)) {
- err = -EINVAL;
- goto err;
- }
*out = data.data[0];
-err:
- nft_data_release(&data, desc.type);
- return err;
+
+ return 0;
}
static int nft_bitwise_fast_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index 777f09e4dc60..963cf831799c 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -73,20 +73,16 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_cmp_expr *priv = nft_expr_priv(expr);
- struct nft_data_desc desc;
+ struct nft_data_desc desc = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(priv->data),
+ };
int err;
- err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
- tb[NFTA_CMP_DATA]);
+ err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
if (err < 0)
return err;
- if (desc.type != NFT_DATA_VALUE) {
- err = -EINVAL;
- nft_data_release(&priv->data, desc.type);
- return err;
- }
-
err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
if (err < 0)
return err;
@@ -214,12 +210,14 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
- struct nft_data_desc desc;
struct nft_data data;
+ struct nft_data_desc desc = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(data),
+ };
int err;
- err = nft_data_init(NULL, &data, sizeof(data), &desc,
- tb[NFTA_CMP_DATA]);
+ err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
if (err < 0)
return err;
@@ -313,11 +311,13 @@ static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
- struct nft_data_desc desc;
+ struct nft_data_desc desc = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(priv->data),
+ };
int err;
- err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
- tb[NFTA_CMP_DATA]);
+ err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
if (err < 0)
return err;
@@ -380,8 +380,11 @@ const struct nft_expr_ops nft_cmp16_fast_ops = {
static const struct nft_expr_ops *
nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
{
- struct nft_data_desc desc;
struct nft_data data;
+ struct nft_data_desc desc = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(data),
+ };
enum nft_cmp_ops op;
u8 sreg;
int err;
@@ -404,14 +407,10 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
return ERR_PTR(-EINVAL);
}
- err = nft_data_init(NULL, &data, sizeof(data), &desc,
- tb[NFTA_CMP_DATA]);
+ err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
if (err < 0)
return ERR_PTR(err);
- if (desc.type != NFT_DATA_VALUE)
- goto err1;
-
sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) {
@@ -423,9 +422,6 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
return &nft_cmp16_fast_ops;
}
return &nft_cmp_ops;
-err1:
- nft_data_release(&data, desc.type);
- return ERR_PTR(-EINVAL);
}
struct nft_expr_type nft_cmp_type __read_mostly = {
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 22f70b543fa2..6983e6ddeef9 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -60,7 +60,7 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
&regs->data[priv->sreg_key], NULL,
&regs->data[priv->sreg_data],
timeout, 0, GFP_ATOMIC);
- if (elem == NULL)
+ if (IS_ERR(elem))
goto err1;
ext = nft_set_elem_ext(set, elem);
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index b80f7b507349..5f28b21abc7d 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -29,20 +29,36 @@ static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = {
[NFTA_IMMEDIATE_DATA] = { .type = NLA_NESTED },
};
+static enum nft_data_types nft_reg_to_type(const struct nlattr *nla)
+{
+ enum nft_data_types type;
+ u8 reg;
+
+ reg = ntohl(nla_get_be32(nla));
+ if (reg == NFT_REG_VERDICT)
+ type = NFT_DATA_VERDICT;
+ else
+ type = NFT_DATA_VALUE;
+
+ return type;
+}
+
static int nft_immediate_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_immediate_expr *priv = nft_expr_priv(expr);
- struct nft_data_desc desc;
+ struct nft_data_desc desc = {
+ .size = sizeof(priv->data),
+ };
int err;
if (tb[NFTA_IMMEDIATE_DREG] == NULL ||
tb[NFTA_IMMEDIATE_DATA] == NULL)
return -EINVAL;
- err = nft_data_init(ctx, &priv->data, sizeof(priv->data), &desc,
- tb[NFTA_IMMEDIATE_DATA]);
+ desc.type = nft_reg_to_type(tb[NFTA_IMMEDIATE_DREG]);
+ err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 0053a697c931..89342ccccdcc 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -115,9 +115,21 @@ static int nft_osf_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
- return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
- (1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_FORWARD));
+ unsigned int hooks;
+
+ switch (ctx->family) {
+ case NFPROTO_IPV4:
+ case NFPROTO_IPV6:
+ case NFPROTO_INET:
+ hooks = (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_FORWARD);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return nft_chain_validate_hooks(ctx->chain, hooks);
}
static bool nft_osf_reduce(struct nft_regs_track *track,
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 2e7ac007cb30..eb0e40c29712 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -740,17 +740,23 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_payload_set *priv = nft_expr_priv(expr);
+ u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
+ int err;
priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
if (tb[NFTA_PAYLOAD_CSUM_TYPE])
- priv->csum_type =
- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
- if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
- priv->csum_offset =
- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
+ csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
+ if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
+ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
+ &csum_offset);
+ if (err < 0)
+ return err;
+
+ priv->csum_offset = csum_offset;
+ }
if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
u32 flags;
@@ -761,7 +767,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
priv->csum_flags = flags;
}
- switch (priv->csum_type) {
+ switch (csum_type) {
case NFT_PAYLOAD_CSUM_NONE:
case NFT_PAYLOAD_CSUM_INET:
break;
@@ -775,6 +781,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
default:
return -EOPNOTSUPP;
}
+ priv->csum_type = csum_type;
return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
priv->len);
@@ -833,6 +840,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
{
enum nft_payload_bases base;
unsigned int offset, len;
+ int err;
if (tb[NFTA_PAYLOAD_BASE] == NULL ||
tb[NFTA_PAYLOAD_OFFSET] == NULL ||
@@ -859,8 +867,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
if (tb[NFTA_PAYLOAD_DREG] == NULL)
return ERR_PTR(-EINVAL);
- offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
- len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
+ if (err < 0)
+ return ERR_PTR(err);
if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index 66f77484c227..832f0d725a9e 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -51,7 +51,14 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
const struct nlattr * const tb[])
{
struct nft_range_expr *priv = nft_expr_priv(expr);
- struct nft_data_desc desc_from, desc_to;
+ struct nft_data_desc desc_from = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(priv->data_from),
+ };
+ struct nft_data_desc desc_to = {
+ .type = NFT_DATA_VALUE,
+ .size = sizeof(priv->data_to),
+ };
int err;
u32 op;
@@ -61,26 +68,16 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
!tb[NFTA_RANGE_TO_DATA])
return -EINVAL;
- err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
- &desc_from, tb[NFTA_RANGE_FROM_DATA]);
+ err = nft_data_init(NULL, &priv->data_from, &desc_from,
+ tb[NFTA_RANGE_FROM_DATA]);
if (err < 0)
return err;
- if (desc_from.type != NFT_DATA_VALUE) {
- err = -EINVAL;
- goto err1;
- }
-
- err = nft_data_init(NULL, &priv->data_to, sizeof(priv->data_to),
- &desc_to, tb[NFTA_RANGE_TO_DATA]);
+ err = nft_data_init(NULL, &priv->data_to, &desc_to,
+ tb[NFTA_RANGE_TO_DATA]);
if (err < 0)
goto err1;
- if (desc_to.type != NFT_DATA_VALUE) {
- err = -EINVAL;
- goto err2;
- }
-
if (desc_from.len != desc_to.len) {
err = -EINVAL;
goto err2;
diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
index 68b2eed742df..62da25ad264b 100644
--- a/net/netfilter/nft_tproxy.c
+++ b/net/netfilter/nft_tproxy.c
@@ -312,6 +312,13 @@ static int nft_tproxy_dump(struct sk_buff *skb,
return 0;
}
+static int nft_tproxy_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
+}
+
static struct nft_expr_type nft_tproxy_type;
static const struct nft_expr_ops nft_tproxy_ops = {
.type = &nft_tproxy_type,
@@ -321,6 +328,7 @@ static const struct nft_expr_ops nft_tproxy_ops = {
.destroy = nft_tproxy_destroy,
.dump = nft_tproxy_dump,
.reduce = NFT_REDUCE_READONLY,
+ .validate = nft_tproxy_validate,
};
static struct nft_expr_type nft_tproxy_type __read_mostly = {
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 5edaaded706d..983ade4be3b3 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -161,6 +161,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = {
static struct nft_expr_type nft_tunnel_type __read_mostly = {
.name = "tunnel",
+ .family = NFPROTO_NETDEV,
.ops = &nft_tunnel_get_ops,
.policy = nft_tunnel_policy,
.maxattr = NFTA_TUNNEL_MAX,
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 8490e46359ae..0555dffd80e0 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -885,7 +885,7 @@ static int netlbl_unlabel_staticadd(struct sk_buff *skb,
/* Don't allow users to add both IPv4 and IPv6 addresses for a
* single entry. However, allow users to create two entries, one each
- * for IPv4 and IPv4, with the same LSM security context which should
+ * for IPv4 and IPv6, with the same LSM security context which should
* achieve the same result. */
if (!info->attrs[NLBL_UNLABEL_A_SECCTX] ||
!info->attrs[NLBL_UNLABEL_A_IFACE] ||
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1afca2a6c2ac..57010927e20a 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1174,13 +1174,17 @@ static int ctrl_dumppolicy_start(struct netlink_callback *cb)
op.policy,
op.maxattr);
if (err)
- return err;
+ goto err_free_state;
}
}
if (!ctx->state)
return -ENODATA;
return 0;
+
+err_free_state:
+ netlink_policy_dump_free(ctx->state);
+ return err;
}
static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
diff --git a/net/netlink/policy.c b/net/netlink/policy.c
index 8d7c900e27f4..87e3de0fde89 100644
--- a/net/netlink/policy.c
+++ b/net/netlink/policy.c
@@ -144,7 +144,7 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
err = add_policy(&state, policy, maxtype);
if (err)
- return err;
+ goto err_try_undo;
for (policy_idx = 0;
policy_idx < state->n_alloc && state->policies[policy_idx].policy;
@@ -164,7 +164,7 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
policy[type].nested_policy,
policy[type].len);
if (err)
- return err;
+ goto err_try_undo;
break;
default:
break;
@@ -174,6 +174,16 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
*pstate = state;
return 0;
+
+err_try_undo:
+ /* Try to preserve reasonable unwind semantics - if we're starting from
+ * scratch clean up fully, otherwise record what we got and caller will.
+ */
+ if (!*pstate)
+ netlink_policy_dump_free(state);
+ else
+ *pstate = state;
+ return err;
}
static bool
diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
index 18196e1c8c2f..9ced13c0627a 100644
--- a/net/qrtr/mhi.c
+++ b/net/qrtr/mhi.c
@@ -78,11 +78,6 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
struct qrtr_mhi_dev *qdev;
int rc;
- /* start channels */
- rc = mhi_prepare_for_transfer_autoqueue(mhi_dev);
- if (rc)
- return rc;
-
qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
if (!qdev)
return -ENOMEM;
@@ -96,6 +91,13 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
if (rc)
return rc;
+ /* start channels */
+ rc = mhi_prepare_for_transfer_autoqueue(mhi_dev);
+ if (rc) {
+ qrtr_endpoint_unregister(&qdev->ep);
+ return rc;
+ }
+
dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
return 0;
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 6fdedd9dbbc2..cfbf0e129cba 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -363,6 +363,7 @@ static int acquire_refill(struct rds_connection *conn)
static void release_refill(struct rds_connection *conn)
{
clear_bit(RDS_RECV_REFILL, &conn->c_flags);
+ smp_mb__after_atomic();
/* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
* hot path and finding waiters is very rare. We don't want to walk
diff --git a/net/rds/message.c b/net/rds/message.c
index 799034e0f513..d74be4e3f3fa 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -391,7 +391,7 @@ static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *
size_t start;
ssize_t copied;
- copied = iov_iter_get_pages(from, &pages, PAGE_SIZE,
+ copied = iov_iter_get_pages2(from, &pages, PAGE_SIZE,
1, &start);
if (copied < 0) {
struct mmpin *mmp;
@@ -405,7 +405,6 @@ static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *
goto err;
}
total_copied += copied;
- iov_iter_advance(from, copied);
length -= copied;
sg_set_page(sg, pages, copied, start);
rm->data.op_nents++;
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 11c45c8c6c16..036d92c0ad79 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -96,7 +96,8 @@ static void rose_loopback_timer(struct timer_list *unused)
}
if (frametype == ROSE_CALL_REQUEST) {
- if (!rose_loopback_neigh->dev) {
+ if (!rose_loopback_neigh->dev &&
+ !rose_loopback_neigh->loopback) {
kfree_skb(skb);
continue;
}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 84d0a4109645..6401cdf7a624 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -285,8 +285,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
_enter("%p,%lx", rx, p->user_call_ID);
limiter = rxrpc_get_call_slot(p, gfp);
- if (!limiter)
+ if (!limiter) {
+ release_sock(&rx->sk);
return ERR_PTR(-ERESTARTSYS);
+ }
call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
if (IS_ERR(call)) {
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 1d38e279e2ef..3c3a626459de 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -51,10 +51,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
return sock_intr_errno(*timeo);
trace_rxrpc_transmit(call, rxrpc_transmit_wait);
- mutex_unlock(&call->user_mutex);
*timeo = schedule_timeout(*timeo);
- if (mutex_lock_interruptible(&call->user_mutex) < 0)
- return sock_intr_errno(*timeo);
}
}
@@ -290,37 +287,48 @@ out:
static int rxrpc_send_data(struct rxrpc_sock *rx,
struct rxrpc_call *call,
struct msghdr *msg, size_t len,
- rxrpc_notify_end_tx_t notify_end_tx)
+ rxrpc_notify_end_tx_t notify_end_tx,
+ bool *_dropped_lock)
{
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
struct sock *sk = &rx->sk;
+ enum rxrpc_call_state state;
long timeo;
- bool more;
- int ret, copied;
+ bool more = msg->msg_flags & MSG_MORE;
+ int ret, copied = 0;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
/* this should be in poll */
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+reload:
+ ret = -EPIPE;
if (sk->sk_shutdown & SEND_SHUTDOWN)
- return -EPIPE;
-
- more = msg->msg_flags & MSG_MORE;
-
+ goto maybe_error;
+ state = READ_ONCE(call->state);
+ ret = -ESHUTDOWN;
+ if (state >= RXRPC_CALL_COMPLETE)
+ goto maybe_error;
+ ret = -EPROTO;
+ if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
+ state != RXRPC_CALL_SERVER_ACK_REQUEST &&
+ state != RXRPC_CALL_SERVER_SEND_REPLY)
+ goto maybe_error;
+
+ ret = -EMSGSIZE;
if (call->tx_total_len != -1) {
- if (len > call->tx_total_len)
- return -EMSGSIZE;
- if (!more && len != call->tx_total_len)
- return -EMSGSIZE;
+ if (len - copied > call->tx_total_len)
+ goto maybe_error;
+ if (!more && len - copied != call->tx_total_len)
+ goto maybe_error;
}
skb = call->tx_pending;
call->tx_pending = NULL;
rxrpc_see_skb(skb, rxrpc_skb_seen);
- copied = 0;
do {
/* Check to see if there's a ping ACK to reply to. */
if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
@@ -331,16 +339,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
_debug("alloc");
- if (!rxrpc_check_tx_space(call, NULL)) {
- ret = -EAGAIN;
- if (msg->msg_flags & MSG_DONTWAIT)
- goto maybe_error;
- ret = rxrpc_wait_for_tx_window(rx, call,
- &timeo,
- msg->msg_flags & MSG_WAITALL);
- if (ret < 0)
- goto maybe_error;
- }
+ if (!rxrpc_check_tx_space(call, NULL))
+ goto wait_for_space;
/* Work out the maximum size of a packet. Assume that
* the security header is going to be in the padded
@@ -468,6 +468,27 @@ maybe_error:
efault:
ret = -EFAULT;
goto out;
+
+wait_for_space:
+ ret = -EAGAIN;
+ if (msg->msg_flags & MSG_DONTWAIT)
+ goto maybe_error;
+ mutex_unlock(&call->user_mutex);
+ *_dropped_lock = true;
+ ret = rxrpc_wait_for_tx_window(rx, call, &timeo,
+ msg->msg_flags & MSG_WAITALL);
+ if (ret < 0)
+ goto maybe_error;
+ if (call->interruptibility == RXRPC_INTERRUPTIBLE) {
+ if (mutex_lock_interruptible(&call->user_mutex) < 0) {
+ ret = sock_intr_errno(timeo);
+ goto maybe_error;
+ }
+ } else {
+ mutex_lock(&call->user_mutex);
+ }
+ *_dropped_lock = false;
+ goto reload;
}
/*
@@ -629,6 +650,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
enum rxrpc_call_state state;
struct rxrpc_call *call;
unsigned long now, j;
+ bool dropped_lock = false;
int ret;
struct rxrpc_send_params p = {
@@ -737,21 +759,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
ret = rxrpc_send_abort_packet(call);
} else if (p.command != RXRPC_CMD_SEND_DATA) {
ret = -EINVAL;
- } else if (rxrpc_is_client_call(call) &&
- state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
- /* request phase complete for this client call */
- ret = -EPROTO;
- } else if (rxrpc_is_service_call(call) &&
- state != RXRPC_CALL_SERVER_ACK_REQUEST &&
- state != RXRPC_CALL_SERVER_SEND_REPLY) {
- /* Reply phase not begun or not complete for service call. */
- ret = -EPROTO;
} else {
- ret = rxrpc_send_data(rx, call, msg, len, NULL);
+ ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock);
}
out_put_unlock:
- mutex_unlock(&call->user_mutex);
+ if (!dropped_lock)
+ mutex_unlock(&call->user_mutex);
error_put:
rxrpc_put_call(call, rxrpc_call_put);
_leave(" = %d", ret);
@@ -779,6 +793,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
struct msghdr *msg, size_t len,
rxrpc_notify_end_tx_t notify_end_tx)
{
+ bool dropped_lock = false;
int ret;
_enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
@@ -796,7 +811,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
case RXRPC_CALL_SERVER_ACK_REQUEST:
case RXRPC_CALL_SERVER_SEND_REPLY:
ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
- notify_end_tx);
+ notify_end_tx, &dropped_lock);
break;
case RXRPC_CALL_COMPLETE:
read_lock_bh(&call->state_lock);
@@ -810,7 +825,8 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
break;
}
- mutex_unlock(&call->user_mutex);
+ if (!dropped_lock)
+ mutex_unlock(&call->user_mutex);
_leave(" = %d", ret);
return ret;
}
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index a35ab8c27866..48712bc51bda 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -424,6 +424,11 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
return -EINVAL;
}
+ if (!nhandle) {
+ NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid");
+ return -EINVAL;
+ }
+
h1 = to_hash(nhandle);
b = rtnl_dereference(head->table[h1]);
if (!b) {
@@ -477,6 +482,11 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
int err;
bool new = true;
+ if (!handle) {
+ NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid");
+ return -EINVAL;
+ }
+
if (opt == NULL)
return handle ? -EINVAL : 0;
@@ -526,7 +536,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
rcu_assign_pointer(f->next, f1);
rcu_assign_pointer(*fp, f);
- if (fold && fold->handle && f->handle != fold->handle) {
+ if (fold) {
th = to_hash(fold->handle);
h = from_hash(fold->handle >> 16);
b = rtnl_dereference(head->table[th]);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cc6eabee2830..99b697ad2b98 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -409,7 +409,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
void __qdisc_run(struct Qdisc *q)
{
- int quota = dev_tx_weight;
+ int quota = READ_ONCE(dev_tx_weight);
int packets;
while (qdisc_restart(q, &packets)) {
@@ -427,14 +427,10 @@ void __qdisc_run(struct Qdisc *q)
unsigned long dev_trans_start(struct net_device *dev)
{
- unsigned long val, res;
+ unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
+ unsigned long val;
unsigned int i;
- if (is_vlan_dev(dev))
- dev = vlan_dev_real_dev(dev);
- else if (netif_is_macvlan(dev))
- dev = macvlan_dev_real_dev(dev);
- res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
for (i = 1; i < dev->num_tx_queues; i++) {
val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
if (val && time_after(val, res))
diff --git a/net/socket.c b/net/socket.c
index 9b27c5e4e5ba..7378375d3a5b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1801,7 +1801,7 @@ int __sys_listen(int fd, int backlog)
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
- somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
+ somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
if ((unsigned int)backlog > somaxconn)
backlog = somaxconn;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 682fcd24bf43..fb75a883503f 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -445,7 +445,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
* Enforce a 60 second garbage collection moratorium
* Note that the cred_unused list must be time-ordered.
*/
- if (!time_in_range(cred->cr_expire, expired, jiffies))
+ if (time_in_range(cred->cr_expire, expired, jiffies))
continue;
if (!rpcauth_unhash_cred(cred))
continue;
@@ -874,7 +874,7 @@ int __init rpcauth_init_module(void)
err = rpc_init_authunix();
if (err < 0)
goto out1;
- err = register_shrinker(&rpc_cred_shrinker);
+ err = register_shrinker(&rpc_cred_shrinker, "sunrpc_cred");
if (err < 0)
goto out2;
return 0;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index de7e5b41ab8f..a31a27816cc0 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1340,14 +1340,11 @@ gss_hash_cred(struct auth_cred *acred, unsigned int hashbits)
/*
* Lookup RPCSEC_GSS cred for the current process
*/
-static struct rpc_cred *
-gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
+static struct rpc_cred *gss_lookup_cred(struct rpc_auth *auth,
+ struct auth_cred *acred, int flags)
{
- gfp_t gfp = GFP_KERNEL;
-
- if (flags & RPCAUTH_LOOKUP_ASYNC)
- gfp = GFP_NOWAIT | __GFP_NOWARN;
- return rpcauth_lookup_credcache(auth, acred, flags, gfp);
+ return rpcauth_lookup_credcache(auth, acred, flags,
+ rpc_task_gfp_mask());
}
static struct rpc_cred *
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 253a54c2fcfe..65a6c6429a53 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -50,6 +50,17 @@ static void xprt_free_allocation(struct rpc_rqst *req)
kfree(req);
}
+static void xprt_bc_reinit_xdr_buf(struct xdr_buf *buf)
+{
+ buf->head[0].iov_len = PAGE_SIZE;
+ buf->tail[0].iov_len = 0;
+ buf->pages = NULL;
+ buf->page_len = 0;
+ buf->flags = 0;
+ buf->len = 0;
+ buf->buflen = PAGE_SIZE;
+}
+
static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
{
struct page *page;
@@ -278,6 +289,9 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
*/
spin_lock_bh(&xprt->bc_pa_lock);
if (xprt_need_to_requeue(xprt)) {
+ xprt_bc_reinit_xdr_buf(&req->rq_snd_buf);
+ xprt_bc_reinit_xdr_buf(&req->rq_rcv_buf);
+ req->rq_rcv_buf.len = PAGE_SIZE;
list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
xprt->bc_alloc_count++;
atomic_inc(&xprt->bc_slot_count);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index b6781ada3aa8..7d268a291486 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -786,7 +786,8 @@ out_revert:
EXPORT_SYMBOL_GPL(rpc_switch_client_transport);
static
-int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi)
+int _rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi,
+ void func(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps))
{
struct rpc_xprt_switch *xps;
@@ -795,11 +796,24 @@ int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi)
rcu_read_unlock();
if (xps == NULL)
return -EAGAIN;
- xprt_iter_init_listall(xpi, xps);
+ func(xpi, xps);
xprt_switch_put(xps);
return 0;
}
+static
+int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi)
+{
+ return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listall);
+}
+
+static
+int rpc_clnt_xprt_iter_offline_init(struct rpc_clnt *clnt,
+ struct rpc_xprt_iter *xpi)
+{
+ return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listoffline);
+}
+
/**
* rpc_clnt_iterate_for_each_xprt - Apply a function to all transports
* @clnt: pointer to client
@@ -1856,7 +1870,6 @@ rpc_xdr_encode(struct rpc_task *task)
req->rq_snd_buf.head[0].iov_len = 0;
xdr_init_encode(&xdr, &req->rq_snd_buf,
req->rq_snd_buf.head[0].iov_base, req);
- xdr_free_bvec(&req->rq_snd_buf);
if (rpc_encode_header(task, &xdr))
return;
@@ -1889,7 +1902,7 @@ call_encode(struct rpc_task *task)
break;
case -EKEYEXPIRED:
if (!task->tk_cred_retry) {
- rpc_exit(task, task->tk_status);
+ rpc_call_rpcerror(task, task->tk_status);
} else {
task->tk_action = call_refresh;
task->tk_cred_retry--;
@@ -2137,7 +2150,8 @@ call_connect_status(struct rpc_task *task)
xprt_release(task);
value = atomic_long_dec_return(&xprt->queuelen);
if (value == 0)
- rpc_xprt_switch_remove_xprt(xps, saved);
+ rpc_xprt_switch_remove_xprt(xps, saved,
+ true);
xprt_put(saved);
task->tk_xprt = NULL;
task->tk_action = call_start;
@@ -2650,7 +2664,7 @@ out_unparsable:
out_verifier:
trace_rpc_bad_verifier(task);
- goto out_garbage;
+ goto out_err;
out_msg_denied:
error = -EACCES;
@@ -2866,6 +2880,30 @@ success:
}
EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt);
+static int rpc_clnt_add_xprt_helper(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt,
+ struct rpc_add_xprt_test *data)
+{
+ struct rpc_task *task;
+ int status = -EADDRINUSE;
+
+ /* Test the connection */
+ task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ status = task->tk_status;
+ rpc_put_task(task);
+
+ if (status < 0)
+ return status;
+
+ /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
+ data->add_xprt_test(clnt, xprt, data->data);
+
+ return 0;
+}
+
/**
* rpc_clnt_setup_test_and_add_xprt()
*
@@ -2889,8 +2927,6 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
struct rpc_xprt *xprt,
void *data)
{
- struct rpc_task *task;
- struct rpc_add_xprt_test *xtest = (struct rpc_add_xprt_test *)data;
int status = -EADDRINUSE;
xprt = xprt_get(xprt);
@@ -2899,31 +2935,19 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr))
goto out_err;
- /* Test the connection */
- task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL);
- if (IS_ERR(task)) {
- status = PTR_ERR(task);
- goto out_err;
- }
- status = task->tk_status;
- rpc_put_task(task);
-
+ status = rpc_clnt_add_xprt_helper(clnt, xprt, data);
if (status < 0)
goto out_err;
- /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
- xtest->add_xprt_test(clnt, xprt, xtest->data);
-
- xprt_put(xprt);
- xprt_switch_put(xps);
-
- /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
- return 1;
+ status = 1;
out_err:
xprt_put(xprt);
xprt_switch_put(xps);
- pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not added\n",
- status, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ if (status < 0)
+ pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not "
+ "added\n", status,
+ xprt->address_strings[RPC_DISPLAY_ADDR]);
+ /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
return status;
}
EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt);
@@ -3000,6 +3024,110 @@ out_put_switch:
}
EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt);
+static int rpc_xprt_probe_trunked(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt,
+ struct rpc_add_xprt_test *data)
+{
+ struct rpc_xprt_switch *xps;
+ struct rpc_xprt *main_xprt;
+ int status = 0;
+
+ xprt_get(xprt);
+
+ rcu_read_lock();
+ main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
+ xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
+ status = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr,
+ (struct sockaddr *)&main_xprt->addr);
+ rcu_read_unlock();
+ xprt_put(main_xprt);
+ if (status || !test_bit(XPRT_OFFLINE, &xprt->state))
+ goto out;
+
+ status = rpc_clnt_add_xprt_helper(clnt, xprt, data);
+out:
+ xprt_put(xprt);
+ xprt_switch_put(xps);
+ return status;
+}
+
+/* rpc_clnt_probe_trunked_xprt -- probe offlined transport for session trunking
+ * @clnt rpc_clnt structure
+ *
+ * For each offlined transport found in the rpc_clnt structure call
+ * the function rpc_xprt_probe_trunked() which will determine if this
+ * transport still belongs to the trunking group.
+ */
+void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *clnt,
+ struct rpc_add_xprt_test *data)
+{
+ struct rpc_xprt_iter xpi;
+ int ret;
+
+ ret = rpc_clnt_xprt_iter_offline_init(clnt, &xpi);
+ if (ret)
+ return;
+ for (;;) {
+ struct rpc_xprt *xprt = xprt_iter_get_next(&xpi);
+
+ if (!xprt)
+ break;
+ ret = rpc_xprt_probe_trunked(clnt, xprt, data);
+ xprt_put(xprt);
+ if (ret < 0)
+ break;
+ xprt_iter_rewind(&xpi);
+ }
+ xprt_iter_destroy(&xpi);
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_probe_trunked_xprts);
+
+static int rpc_xprt_offline(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt,
+ void *data)
+{
+ struct rpc_xprt *main_xprt;
+ struct rpc_xprt_switch *xps;
+ int err = 0;
+
+ xprt_get(xprt);
+
+ rcu_read_lock();
+ main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
+ xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
+ err = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr,
+ (struct sockaddr *)&main_xprt->addr);
+ rcu_read_unlock();
+ xprt_put(main_xprt);
+ if (err)
+ goto out;
+
+ if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) {
+ err = -EINTR;
+ goto out;
+ }
+ xprt_set_offline_locked(xprt, xps);
+
+ xprt_release_write(xprt, NULL);
+out:
+ xprt_put(xprt);
+ xprt_switch_put(xps);
+ return err;
+}
+
+/* rpc_clnt_manage_trunked_xprts -- offline trunked transports
+ * @clnt rpc_clnt structure
+ *
+ * For each active transport found in the rpc_clnt structure call
+ * the function rpc_xprt_offline() which will identify trunked transports
+ * and will mark them offline.
+ */
+void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *clnt)
+{
+ rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_offline, NULL);
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_manage_trunked_xprts);
+
struct connect_timeout_data {
unsigned long connect_timeout;
unsigned long reconnect_timeout;
@@ -3042,8 +3170,22 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt)
}
EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put);
+void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
+{
+ struct rpc_xprt_switch *xps;
+
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ rcu_read_unlock();
+ xprt_set_online_locked(xprt, xps);
+}
+
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
{
+ if (rpc_clnt_xprt_switch_has_addr(clnt,
+ (const struct sockaddr *)&xprt->addr)) {
+ return rpc_clnt_xprt_set_online(clnt, xprt);
+ }
rcu_read_lock();
rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
xprt);
@@ -3051,6 +3193,19 @@ void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
}
EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt);
+void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
+{
+ struct rpc_xprt_switch *xps;
+
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ rpc_xprt_switch_remove_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
+ xprt, 0);
+ xps->xps_nunique_destaddr_xprts--;
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_remove_xprt);
+
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
const struct sockaddr *sap)
{
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 7f70c1e608b7..25b9221950ff 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -63,6 +63,7 @@ gfp_t rpc_task_gfp_mask(void)
return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
return GFP_KERNEL;
}
+EXPORT_SYMBOL_GPL(rpc_task_gfp_mask);
unsigned long
rpc_task_timeout(const struct rpc_task *task)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 2c4dd7ca95b0..2106003645a7 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -691,7 +691,7 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
set_current_state(TASK_RUNNING);
return -EINTR;
}
- trace_svc_alloc_arg_err(pages);
+ trace_svc_alloc_arg_err(pages, ret);
memalloc_retry_wait(GFP_KERNEL);
}
rqstp->rq_page_end = &rqstp->rq_pages[pages];
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index a3a2f8aeb80e..c65c90ad626a 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -291,8 +291,10 @@ static ssize_t rpc_sysfs_xprt_state_change(struct kobject *kobj,
int offline = 0, online = 0, remove = 0;
struct rpc_xprt_switch *xps = rpc_sysfs_xprt_kobj_get_xprt_switch(kobj);
- if (!xprt)
- return 0;
+ if (!xprt || !xps) {
+ count = 0;
+ goto out_put;
+ }
if (!strncmp(buf, "offline", 7))
offline = 1;
@@ -314,32 +316,14 @@ static ssize_t rpc_sysfs_xprt_state_change(struct kobject *kobj,
goto release_tasks;
}
if (offline) {
- if (!test_and_set_bit(XPRT_OFFLINE, &xprt->state)) {
- spin_lock(&xps->xps_lock);
- xps->xps_nactive--;
- spin_unlock(&xps->xps_lock);
- }
+ xprt_set_offline_locked(xprt, xps);
} else if (online) {
- if (test_and_clear_bit(XPRT_OFFLINE, &xprt->state)) {
- spin_lock(&xps->xps_lock);
- xps->xps_nactive++;
- spin_unlock(&xps->xps_lock);
- }
+ xprt_set_online_locked(xprt, xps);
} else if (remove) {
- if (test_bit(XPRT_OFFLINE, &xprt->state)) {
- if (!test_and_set_bit(XPRT_REMOVE, &xprt->state)) {
- xprt_force_disconnect(xprt);
- if (test_bit(XPRT_CONNECTED, &xprt->state)) {
- if (!xprt->sending.qlen &&
- !xprt->pending.qlen &&
- !xprt->backlog.qlen &&
- !atomic_long_read(&xprt->queuelen))
- rpc_xprt_switch_remove_xprt(xps, xprt);
- }
- }
- } else {
+ if (test_bit(XPRT_OFFLINE, &xprt->state))
+ xprt_delete_locked(xprt, xps);
+ else
count = -EINVAL;
- }
}
release_tasks:
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 5d2b3e6979fb..482586c23fdd 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -775,6 +775,34 @@ static void xdr_buf_pages_shift_left(const struct xdr_buf *buf,
xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift);
}
+static void xdr_buf_head_shift_left(const struct xdr_buf *buf,
+ unsigned int base, unsigned int len,
+ unsigned int shift)
+{
+ const struct kvec *head = buf->head;
+ unsigned int bytes;
+
+ if (!shift || !len)
+ return;
+
+ if (shift > base) {
+ bytes = (shift - base);
+ if (bytes >= len)
+ return;
+ base += bytes;
+ len -= bytes;
+ }
+
+ if (base < head->iov_len) {
+ bytes = min_t(unsigned int, len, head->iov_len - base);
+ memmove(head->iov_base + (base - shift),
+ head->iov_base + base, bytes);
+ base += bytes;
+ len -= bytes;
+ }
+ xdr_buf_pages_shift_left(buf, base - head->iov_len, len, shift);
+}
+
/**
* xdr_shrink_bufhead
* @buf: xdr_buf
@@ -1472,71 +1500,35 @@ unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
}
EXPORT_SYMBOL_GPL(xdr_read_pages);
-unsigned int xdr_align_data(struct xdr_stream *xdr, unsigned int offset,
- unsigned int length)
-{
- struct xdr_buf *buf = xdr->buf;
- unsigned int from, bytes, len;
- unsigned int shift;
-
- xdr_realign_pages(xdr);
- from = xdr_page_pos(xdr);
-
- if (from >= buf->page_len + buf->tail->iov_len)
- return 0;
- if (from + buf->head->iov_len >= buf->len)
- return 0;
-
- len = buf->len - buf->head->iov_len;
-
- /* We only shift data left! */
- if (WARN_ONCE(from < offset, "SUNRPC: misaligned data src=%u dst=%u\n",
- from, offset))
- return 0;
- if (WARN_ONCE(offset > buf->page_len,
- "SUNRPC: buffer overflow. offset=%u, page_len=%u\n",
- offset, buf->page_len))
- return 0;
-
- /* Move page data to the left */
- shift = from - offset;
- xdr_buf_pages_shift_left(buf, from, len, shift);
-
- bytes = xdr_stream_remaining(xdr);
- if (length > bytes)
- length = bytes;
- bytes -= length;
-
- xdr->buf->len -= shift;
- xdr_set_page(xdr, offset + length, bytes);
- return length;
-}
-EXPORT_SYMBOL_GPL(xdr_align_data);
-
-unsigned int xdr_expand_hole(struct xdr_stream *xdr, unsigned int offset,
- unsigned int length)
+/**
+ * xdr_set_pagelen - Sets the length of the XDR pages
+ * @xdr: pointer to xdr_stream struct
+ * @len: new length of the XDR page data
+ *
+ * Either grows or shrinks the length of the xdr pages by setting pagelen to
+ * @len bytes. When shrinking, any extra data is moved into buf->tail, whereas
+ * when growing any data beyond the current pointer is moved into the tail.
+ *
+ * Returns True if the operation was successful, and False otherwise.
+ */
+void xdr_set_pagelen(struct xdr_stream *xdr, unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
- unsigned int from, to, shift;
-
- xdr_realign_pages(xdr);
- from = xdr_page_pos(xdr);
- to = xdr_align_size(offset + length);
-
- /* Could the hole be behind us? */
- if (to > from) {
- unsigned int buflen = buf->len - buf->head->iov_len;
- shift = to - from;
- xdr_buf_try_expand(buf, shift);
- xdr_buf_pages_shift_right(buf, from, buflen, shift);
- xdr_set_page(xdr, to, xdr_stream_remaining(xdr));
- } else if (to != from)
- xdr_align_data(xdr, to, 0);
- xdr_buf_pages_zero(buf, offset, length);
+ size_t remaining = xdr_stream_remaining(xdr);
+ size_t base = 0;
- return length;
+ if (len < buf->page_len) {
+ base = buf->page_len - len;
+ xdr_shrink_pagelen(buf, len);
+ } else {
+ xdr_buf_head_shift_right(buf, xdr_stream_pos(xdr),
+ buf->page_len, remaining);
+ if (len > buf->page_len)
+ xdr_buf_try_expand(buf, len - buf->page_len);
+ }
+ xdr_set_tail_base(xdr, base, remaining);
}
-EXPORT_SYMBOL_GPL(xdr_expand_hole);
+EXPORT_SYMBOL_GPL(xdr_set_pagelen);
/**
* xdr_enter_page - decode data from the XDR page
@@ -1681,6 +1673,60 @@ bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
EXPORT_SYMBOL_GPL(xdr_stream_subsegment);
/**
+ * xdr_stream_move_subsegment - Move part of a stream to another position
+ * @xdr: the source xdr_stream
+ * @offset: the source offset of the segment
+ * @target: the target offset of the segment
+ * @length: the number of bytes to move
+ *
+ * Moves @length bytes from @offset to @target in the xdr_stream, overwriting
+ * anything in its space. Returns the number of bytes in the segment.
+ */
+unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset,
+ unsigned int target, unsigned int length)
+{
+ struct xdr_buf buf;
+ unsigned int shift;
+
+ if (offset < target) {
+ shift = target - offset;
+ if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0)
+ return 0;
+ xdr_buf_head_shift_right(&buf, 0, length, shift);
+ } else if (offset > target) {
+ shift = offset - target;
+ if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0)
+ return 0;
+ xdr_buf_head_shift_left(&buf, shift, length, shift);
+ }
+ return length;
+}
+EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment);
+
+/**
+ * xdr_stream_zero - zero out a portion of an xdr_stream
+ * @xdr: an xdr_stream to zero out
+ * @offset: the starting point in the stream
+ * @length: the number of bytes to zero
+ */
+unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset,
+ unsigned int length)
+{
+ struct xdr_buf buf;
+
+ if (xdr_buf_subsegment(xdr->buf, &buf, offset, length) < 0)
+ return 0;
+ if (buf.head[0].iov_len)
+ xdr_buf_iov_zero(buf.head, 0, buf.head[0].iov_len);
+ if (buf.page_len > 0)
+ xdr_buf_pages_zero(&buf, 0, buf.page_len);
+ if (buf.tail[0].iov_len)
+ xdr_buf_iov_zero(buf.tail, 0, buf.tail[0].iov_len);
+ return length;
+}
+EXPORT_SYMBOL_GPL(xdr_stream_zero);
+
+/**
* xdr_buf_trim - lop at most "len" bytes off the end of "buf"
* @buf: buf to be trimmed
* @len: number of bytes to reduce "buf" by
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 86d62cffba0d..d71eec494826 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -73,7 +73,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net);
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
static void xprt_destroy(struct rpc_xprt *xprt);
static void xprt_request_init(struct rpc_task *task);
-static int xprt_request_prepare(struct rpc_rqst *req);
+static int xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf);
static DEFINE_SPINLOCK(xprt_list_lock);
static LIST_HEAD(xprt_list);
@@ -1149,7 +1149,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
if (!xprt_request_need_enqueue_receive(task, req))
return 0;
- ret = xprt_request_prepare(task->tk_rqstp);
+ ret = xprt_request_prepare(task->tk_rqstp, &req->rq_rcv_buf);
if (ret)
return ret;
spin_lock(&xprt->queue_lock);
@@ -1179,8 +1179,11 @@ xprt_request_dequeue_receive_locked(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
+ if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
xprt_request_rb_remove(req->rq_xprt, req);
+ xdr_free_bvec(&req->rq_rcv_buf);
+ req->rq_private_buf.bvec = NULL;
+ }
}
/**
@@ -1336,8 +1339,14 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
{
struct rpc_rqst *pos, *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
+ int ret;
if (xprt_request_need_enqueue_transmit(task, req)) {
+ ret = xprt_request_prepare(task->tk_rqstp, &req->rq_snd_buf);
+ if (ret) {
+ task->tk_status = ret;
+ return;
+ }
req->rq_bytes_sent = 0;
spin_lock(&xprt->queue_lock);
/*
@@ -1397,6 +1406,7 @@ xprt_request_dequeue_transmit_locked(struct rpc_task *task)
} else
list_del(&req->rq_xmit2);
atomic_long_dec(&req->rq_xprt->xmit_queuelen);
+ xdr_free_bvec(&req->rq_snd_buf);
}
/**
@@ -1433,8 +1443,6 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
xprt_is_pinned_rqst(req)) {
spin_lock(&xprt->queue_lock);
- xprt_request_dequeue_transmit_locked(task);
- xprt_request_dequeue_receive_locked(task);
while (xprt_is_pinned_rqst(req)) {
set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
spin_unlock(&xprt->queue_lock);
@@ -1442,6 +1450,8 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
spin_lock(&xprt->queue_lock);
clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
}
+ xprt_request_dequeue_transmit_locked(task);
+ xprt_request_dequeue_receive_locked(task);
spin_unlock(&xprt->queue_lock);
}
}
@@ -1449,18 +1459,19 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
/**
* xprt_request_prepare - prepare an encoded request for transport
* @req: pointer to rpc_rqst
+ * @buf: pointer to send/rcv xdr_buf
*
* Calls into the transport layer to do whatever is needed to prepare
* the request for transmission or receive.
* Returns error, or zero.
*/
static int
-xprt_request_prepare(struct rpc_rqst *req)
+xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf)
{
struct rpc_xprt *xprt = req->rq_xprt;
if (xprt->ops->prepare_request)
- return xprt->ops->prepare_request(req);
+ return xprt->ops->prepare_request(req, buf);
return 0;
}
@@ -1961,8 +1972,6 @@ void xprt_release(struct rpc_task *task)
spin_unlock(&xprt->transport_lock);
if (req->rq_buffer)
xprt->ops->buf_free(task);
- xdr_free_bvec(&req->rq_rcv_buf);
- xdr_free_bvec(&req->rq_snd_buf);
if (req->rq_cred != NULL)
put_rpccred(req->rq_cred);
if (req->rq_release_snd_buf)
@@ -2152,3 +2161,35 @@ void xprt_put(struct rpc_xprt *xprt)
kref_put(&xprt->kref, xprt_destroy_kref);
}
EXPORT_SYMBOL_GPL(xprt_put);
+
+void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
+{
+ if (!test_and_set_bit(XPRT_OFFLINE, &xprt->state)) {
+ spin_lock(&xps->xps_lock);
+ xps->xps_nactive--;
+ spin_unlock(&xps->xps_lock);
+ }
+}
+
+void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
+{
+ if (test_and_clear_bit(XPRT_OFFLINE, &xprt->state)) {
+ spin_lock(&xps->xps_lock);
+ xps->xps_nactive++;
+ spin_unlock(&xps->xps_lock);
+ }
+}
+
+void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
+{
+ if (test_and_set_bit(XPRT_REMOVE, &xprt->state))
+ return;
+
+ xprt_force_disconnect(xprt);
+ if (!test_bit(XPRT_CONNECTED, &xprt->state))
+ return;
+
+ if (!xprt->sending.qlen && !xprt->pending.qlen &&
+ !xprt->backlog.qlen && !atomic_long_read(&xprt->queuelen))
+ rpc_xprt_switch_remove_xprt(xps, xprt, true);
+}
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 1693f81aae37..685db598acbe 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -27,6 +27,7 @@ typedef struct rpc_xprt *(*xprt_switch_find_xprt_t)(struct rpc_xprt_switch *xps,
static const struct rpc_xprt_iter_ops rpc_xprt_iter_singular;
static const struct rpc_xprt_iter_ops rpc_xprt_iter_roundrobin;
static const struct rpc_xprt_iter_ops rpc_xprt_iter_listall;
+static const struct rpc_xprt_iter_ops rpc_xprt_iter_listoffline;
static void xprt_switch_add_xprt_locked(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt)
@@ -61,11 +62,11 @@ void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
}
static void xprt_switch_remove_xprt_locked(struct rpc_xprt_switch *xps,
- struct rpc_xprt *xprt)
+ struct rpc_xprt *xprt, bool offline)
{
if (unlikely(xprt == NULL))
return;
- if (!test_bit(XPRT_OFFLINE, &xprt->state))
+ if (!test_bit(XPRT_OFFLINE, &xprt->state) && offline)
xps->xps_nactive--;
xps->xps_nxprts--;
if (xps->xps_nxprts == 0)
@@ -78,14 +79,15 @@ static void xprt_switch_remove_xprt_locked(struct rpc_xprt_switch *xps,
* rpc_xprt_switch_remove_xprt - Removes an rpc_xprt from a rpc_xprt_switch
* @xps: pointer to struct rpc_xprt_switch
* @xprt: pointer to struct rpc_xprt
+ * @offline: indicates if the xprt that's being removed is in an offline state
*
* Removes xprt from the list of struct rpc_xprt in xps.
*/
void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
- struct rpc_xprt *xprt)
+ struct rpc_xprt *xprt, bool offline)
{
spin_lock(&xps->xps_lock);
- xprt_switch_remove_xprt_locked(xps, xprt);
+ xprt_switch_remove_xprt_locked(xps, xprt, offline);
spin_unlock(&xps->xps_lock);
xprt_put(xprt);
}
@@ -154,7 +156,7 @@ static void xprt_switch_free_entries(struct rpc_xprt_switch *xps)
xprt = list_first_entry(&xps->xps_xprt_list,
struct rpc_xprt, xprt_switch);
- xprt_switch_remove_xprt_locked(xps, xprt);
+ xprt_switch_remove_xprt_locked(xps, xprt, true);
spin_unlock(&xps->xps_lock);
xprt_put(xprt);
spin_lock(&xps->xps_lock);
@@ -249,6 +251,18 @@ struct rpc_xprt *xprt_switch_find_first_entry(struct list_head *head)
}
static
+struct rpc_xprt *xprt_switch_find_first_entry_offline(struct list_head *head)
+{
+ struct rpc_xprt *pos;
+
+ list_for_each_entry_rcu(pos, head, xprt_switch) {
+ if (!xprt_is_active(pos))
+ return pos;
+ }
+ return NULL;
+}
+
+static
struct rpc_xprt *xprt_iter_first_entry(struct rpc_xprt_iter *xpi)
{
struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
@@ -259,8 +273,9 @@ struct rpc_xprt *xprt_iter_first_entry(struct rpc_xprt_iter *xpi)
}
static
-struct rpc_xprt *xprt_switch_find_current_entry(struct list_head *head,
- const struct rpc_xprt *cur)
+struct rpc_xprt *_xprt_switch_find_current_entry(struct list_head *head,
+ const struct rpc_xprt *cur,
+ bool find_active)
{
struct rpc_xprt *pos;
bool found = false;
@@ -268,14 +283,25 @@ struct rpc_xprt *xprt_switch_find_current_entry(struct list_head *head,
list_for_each_entry_rcu(pos, head, xprt_switch) {
if (cur == pos)
found = true;
- if (found && xprt_is_active(pos))
+ if (found && ((find_active && xprt_is_active(pos)) ||
+ (!find_active && xprt_is_active(pos))))
return pos;
}
return NULL;
}
static
-struct rpc_xprt *xprt_iter_current_entry(struct rpc_xprt_iter *xpi)
+struct rpc_xprt *xprt_switch_find_current_entry(struct list_head *head,
+ const struct rpc_xprt *cur)
+{
+ return _xprt_switch_find_current_entry(head, cur, true);
+}
+
+static
+struct rpc_xprt * _xprt_iter_current_entry(struct rpc_xprt_iter *xpi,
+ struct rpc_xprt *first_entry(struct list_head *head),
+ struct rpc_xprt *current_entry(struct list_head *head,
+ const struct rpc_xprt *cur))
{
struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
struct list_head *head;
@@ -284,8 +310,30 @@ struct rpc_xprt *xprt_iter_current_entry(struct rpc_xprt_iter *xpi)
return NULL;
head = &xps->xps_xprt_list;
if (xpi->xpi_cursor == NULL || xps->xps_nxprts < 2)
- return xprt_switch_find_first_entry(head);
- return xprt_switch_find_current_entry(head, xpi->xpi_cursor);
+ return first_entry(head);
+ return current_entry(head, xpi->xpi_cursor);
+}
+
+static
+struct rpc_xprt *xprt_iter_current_entry(struct rpc_xprt_iter *xpi)
+{
+ return _xprt_iter_current_entry(xpi, xprt_switch_find_first_entry,
+ xprt_switch_find_current_entry);
+}
+
+static
+struct rpc_xprt *xprt_switch_find_current_entry_offline(struct list_head *head,
+ const struct rpc_xprt *cur)
+{
+ return _xprt_switch_find_current_entry(head, cur, false);
+}
+
+static
+struct rpc_xprt *xprt_iter_current_entry_offline(struct rpc_xprt_iter *xpi)
+{
+ return _xprt_iter_current_entry(xpi,
+ xprt_switch_find_first_entry_offline,
+ xprt_switch_find_current_entry_offline);
}
bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
@@ -310,7 +358,7 @@ bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
static
struct rpc_xprt *xprt_switch_find_next_entry(struct list_head *head,
- const struct rpc_xprt *cur)
+ const struct rpc_xprt *cur, bool check_active)
{
struct rpc_xprt *pos, *prev = NULL;
bool found = false;
@@ -318,7 +366,12 @@ struct rpc_xprt *xprt_switch_find_next_entry(struct list_head *head,
list_for_each_entry_rcu(pos, head, xprt_switch) {
if (cur == prev)
found = true;
- if (found && xprt_is_active(pos))
+ /* for request to return active transports return only
+ * active, for request to return offline transports
+ * return only offline
+ */
+ if (found && ((check_active && xprt_is_active(pos)) ||
+ (!check_active && !xprt_is_active(pos))))
return pos;
prev = pos;
}
@@ -355,7 +408,7 @@ struct rpc_xprt *__xprt_switch_find_next_entry_roundrobin(struct list_head *head
{
struct rpc_xprt *ret;
- ret = xprt_switch_find_next_entry(head, cur);
+ ret = xprt_switch_find_next_entry(head, cur, true);
if (ret != NULL)
return ret;
return xprt_switch_find_first_entry(head);
@@ -397,7 +450,14 @@ static
struct rpc_xprt *xprt_switch_find_next_entry_all(struct rpc_xprt_switch *xps,
const struct rpc_xprt *cur)
{
- return xprt_switch_find_next_entry(&xps->xps_xprt_list, cur);
+ return xprt_switch_find_next_entry(&xps->xps_xprt_list, cur, true);
+}
+
+static
+struct rpc_xprt *xprt_switch_find_next_entry_offline(struct rpc_xprt_switch *xps,
+ const struct rpc_xprt *cur)
+{
+ return xprt_switch_find_next_entry(&xps->xps_xprt_list, cur, false);
}
static
@@ -407,6 +467,13 @@ struct rpc_xprt *xprt_iter_next_entry_all(struct rpc_xprt_iter *xpi)
xprt_switch_find_next_entry_all);
}
+static
+struct rpc_xprt *xprt_iter_next_entry_offline(struct rpc_xprt_iter *xpi)
+{
+ return xprt_iter_next_entry_multiple(xpi,
+ xprt_switch_find_next_entry_offline);
+}
+
/*
* xprt_iter_rewind - Resets the xprt iterator
* @xpi: pointer to rpc_xprt_iter
@@ -414,7 +481,6 @@ struct rpc_xprt *xprt_iter_next_entry_all(struct rpc_xprt_iter *xpi)
* Resets xpi to ensure that it points to the first entry in the list
* of transports.
*/
-static
void xprt_iter_rewind(struct rpc_xprt_iter *xpi)
{
rcu_read_lock();
@@ -460,6 +526,12 @@ void xprt_iter_init_listall(struct rpc_xprt_iter *xpi,
__xprt_iter_init(xpi, xps, &rpc_xprt_iter_listall);
}
+void xprt_iter_init_listoffline(struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *xps)
+{
+ __xprt_iter_init(xpi, xps, &rpc_xprt_iter_listoffline);
+}
+
/**
* xprt_iter_xchg_switch - Atomically swap out the rpc_xprt_switch
* @xpi: pointer to rpc_xprt_iter
@@ -574,3 +646,10 @@ const struct rpc_xprt_iter_ops rpc_xprt_iter_listall = {
.xpi_xprt = xprt_iter_current_entry,
.xpi_next = xprt_iter_next_entry_all,
};
+
+static
+const struct rpc_xprt_iter_ops rpc_xprt_iter_listoffline = {
+ .xpi_rewind = xprt_iter_default_rewind,
+ .xpi_xprt = xprt_iter_current_entry_offline,
+ .xpi_next = xprt_iter_next_entry_offline,
+};
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 6b7e10e5a141..bcb37b51adf6 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -571,11 +571,7 @@ xprt_rdma_allocate(struct rpc_task *task)
struct rpc_rqst *rqst = task->tk_rqstp;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
- gfp_t flags;
-
- flags = RPCRDMA_DEF_GFP;
- if (RPC_IS_ASYNC(task))
- flags = GFP_NOWAIT | __GFP_NOWARN;
+ gfp_t flags = rpc_task_gfp_mask();
if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize,
flags))
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index fcdd0fca408e..e976007f4fd0 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -822,17 +822,9 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait)
return ret;
}
-static int
-xs_stream_prepare_request(struct rpc_rqst *req)
+static int xs_stream_prepare_request(struct rpc_rqst *req, struct xdr_buf *buf)
{
- gfp_t gfp = rpc_task_gfp_mask();
- int ret;
-
- ret = xdr_alloc_bvec(&req->rq_snd_buf, gfp);
- if (ret < 0)
- return ret;
- xdr_free_bvec(&req->rq_rcv_buf);
- return xdr_alloc_bvec(&req->rq_rcv_buf, gfp);
+ return xdr_alloc_bvec(buf, rpc_task_gfp_mask());
}
/*
@@ -1378,7 +1370,7 @@ static void xs_udp_data_receive_workfn(struct work_struct *work)
}
/**
- * xs_data_ready - "data ready" callback for UDP sockets
+ * xs_data_ready - "data ready" callback for sockets
* @sk: socket with data to read
*
*/
@@ -1386,11 +1378,13 @@ static void xs_data_ready(struct sock *sk)
{
struct rpc_xprt *xprt;
- dprintk("RPC: xs_data_ready...\n");
xprt = xprt_from_sock(sk);
if (xprt != NULL) {
struct sock_xprt *transport = container_of(xprt,
struct sock_xprt, xprt);
+
+ trace_xs_data_ready(xprt);
+
transport->old_data_ready(sk);
/* Any data means we had a useful conversation, so
* then we don't need to delay the next reconnect
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index e3e6cf75aa03..0f983e5f7dde 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -71,7 +71,13 @@ static void tls_device_tx_del_task(struct work_struct *work)
struct tls_offload_context_tx *offload_ctx =
container_of(work, struct tls_offload_context_tx, destruct_work);
struct tls_context *ctx = offload_ctx->ctx;
- struct net_device *netdev = ctx->netdev;
+ struct net_device *netdev;
+
+ /* Safe, because this is the destroy flow, refcount is 0, so
+ * tls_device_down can't store this field in parallel.
+ */
+ netdev = rcu_dereference_protected(ctx->netdev,
+ !refcount_read(&ctx->refcount));
netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
dev_put(netdev);
@@ -81,6 +87,7 @@ static void tls_device_tx_del_task(struct work_struct *work)
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
{
+ struct net_device *netdev;
unsigned long flags;
bool async_cleanup;
@@ -91,7 +98,14 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
}
list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
- async_cleanup = ctx->netdev && ctx->tx_conf == TLS_HW;
+
+ /* Safe, because this is the destroy flow, refcount is 0, so
+ * tls_device_down can't store this field in parallel.
+ */
+ netdev = rcu_dereference_protected(ctx->netdev,
+ !refcount_read(&ctx->refcount));
+
+ async_cleanup = netdev && ctx->tx_conf == TLS_HW;
if (async_cleanup) {
struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
@@ -229,7 +243,8 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
down_read(&device_offload_lock);
- netdev = tls_ctx->netdev;
+ netdev = rcu_dereference_protected(tls_ctx->netdev,
+ lockdep_is_held(&device_offload_lock));
if (netdev)
err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
rcd_sn,
@@ -710,7 +725,7 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
rcu_read_lock();
- netdev = READ_ONCE(tls_ctx->netdev);
+ netdev = rcu_dereference(tls_ctx->netdev);
if (netdev)
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
TLS_OFFLOAD_CTX_DIR_RX);
@@ -984,11 +999,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
int is_decrypted = skb->decrypted;
int is_encrypted = !is_decrypted;
struct sk_buff *skb_iter;
+ int left;
+ left = rxm->full_len - skb->len;
/* Check if all the data is decrypted already */
- skb_walk_frags(skb, skb_iter) {
+ skb_iter = skb_shinfo(skb)->frag_list;
+ while (skb_iter && left > 0) {
is_decrypted &= skb_iter->decrypted;
is_encrypted &= !skb_iter->decrypted;
+
+ left -= skb_iter->len;
+ skb_iter = skb_iter->next;
}
trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
@@ -1029,7 +1050,7 @@ static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
if (sk->sk_destruct != tls_device_sk_destruct) {
refcount_set(&ctx->refcount, 1);
dev_hold(netdev);
- ctx->netdev = netdev;
+ RCU_INIT_POINTER(ctx->netdev, netdev);
spin_lock_irq(&tls_device_lock);
list_add_tail(&ctx->list, &tls_device_list);
spin_unlock_irq(&tls_device_lock);
@@ -1300,7 +1321,8 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
struct net_device *netdev;
down_read(&device_offload_lock);
- netdev = tls_ctx->netdev;
+ netdev = rcu_dereference_protected(tls_ctx->netdev,
+ lockdep_is_held(&device_offload_lock));
if (!netdev)
goto out;
@@ -1309,7 +1331,7 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
if (tls_ctx->tx_conf != TLS_HW) {
dev_put(netdev);
- tls_ctx->netdev = NULL;
+ rcu_assign_pointer(tls_ctx->netdev, NULL);
} else {
set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
}
@@ -1329,7 +1351,11 @@ static int tls_device_down(struct net_device *netdev)
spin_lock_irqsave(&tls_device_lock, flags);
list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
- if (ctx->netdev != netdev ||
+ struct net_device *ctx_netdev =
+ rcu_dereference_protected(ctx->netdev,
+ lockdep_is_held(&device_offload_lock));
+
+ if (ctx_netdev != netdev ||
!refcount_inc_not_zero(&ctx->refcount))
continue;
@@ -1346,7 +1372,7 @@ static int tls_device_down(struct net_device *netdev)
/* Stop the RX and TX resync.
* tls_dev_resync must not be called after tls_dev_del.
*/
- WRITE_ONCE(ctx->netdev, NULL);
+ rcu_assign_pointer(ctx->netdev, NULL);
/* Start skipping the RX resync logic completely. */
set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 618cee704217..7dfc8023e0f1 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -426,7 +426,8 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev,
struct sk_buff *skb)
{
- if (dev == tls_get_ctx(sk)->netdev || netif_is_bond_master(dev))
+ if (dev == rcu_dereference_bh(tls_get_ctx(sk)->netdev) ||
+ netif_is_bond_master(dev))
return skb;
return tls_sw_fallback(sk, skb);
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
index f0b7c9122fba..9b79e334dbd9 100644
--- a/net/tls/tls_strp.c
+++ b/net/tls/tls_strp.c
@@ -41,7 +41,7 @@ static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
struct sk_buff *skb;
int i, err, offset;
- skb = alloc_skb_with_frags(0, strp->anchor->len, TLS_PAGE_ORDER,
+ skb = alloc_skb_with_frags(0, strp->stm.full_len, TLS_PAGE_ORDER,
&err, strp->sk->sk_allocation);
if (!skb)
return NULL;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 17db8c8811fa..fe27241cd13f 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1352,7 +1352,7 @@ static int tls_setup_from_iter(struct iov_iter *from,
rc = -EFAULT;
goto out;
}
- copied = iov_iter_get_pages(from, pages,
+ copied = iov_iter_get_pages2(from, pages,
length,
maxpages, &offset);
if (copied <= 0) {
@@ -1360,8 +1360,6 @@ static int tls_setup_from_iter(struct iov_iter *from,
goto out;
}
- iov_iter_advance(from, copied);
-
length -= copied;
size += copied;
while (copied) {
@@ -2704,7 +2702,9 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
crypto_info->version != TLS_1_3_VERSION &&
!!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
- tls_strp_init(&sw_ctx_rx->strp, sk);
+ rc = tls_strp_init(&sw_ctx_rx->strp, sk);
+ if (rc)
+ goto free_aead;
}
goto out;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index f04abf662ec6..b4ee163154a6 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1286,6 +1286,7 @@ static void vsock_connect_timeout(struct work_struct *work)
if (sk->sk_state == TCP_SYN_SENT &&
(sk->sk_shutdown != SHUTDOWN_MASK)) {
sk->sk_state = TCP_CLOSE;
+ sk->sk_socket->state = SS_UNCONNECTED;
sk->sk_err = ETIMEDOUT;
sk_error_report(sk);
vsock_transport_cancel_pkt(vsk);
@@ -1391,7 +1392,14 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
* timeout fires.
*/
sock_hold(sk);
- schedule_delayed_work(&vsk->connect_work, timeout);
+
+ /* If the timeout function is already scheduled,
+ * reschedule it, then ungrab the socket refcount to
+ * keep it balanced.
+ */
+ if (mod_delayed_work(system_wq, &vsk->connect_work,
+ timeout))
+ sock_put(sk);
/* Skip ahead to preserve error code set above. */
goto out_wait;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 62c773cf1b8d..27fb2a0c4052 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -782,9 +782,11 @@ void __cfg80211_connect_result(struct net_device *dev,
#endif
if (cr->status == WLAN_STATUS_SUCCESS) {
- for_each_valid_link(cr, link) {
- if (WARN_ON_ONCE(!cr->links[link].bss))
- break;
+ if (!wiphy_to_rdev(wdev->wiphy)->ops->connect) {
+ for_each_valid_link(cr, link) {
+ if (WARN_ON_ONCE(!cr->links[link].bss))
+ break;
+ }
}
for_each_valid_link(cr, link) {
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 6bc2ac8d8146..3b55502b2965 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -719,6 +719,11 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
sk->sk_socket->state = SS_UNCONNECTED;
break;
}
+ rc = -ENOTCONN;
+ if (sk->sk_state == TCP_CLOSE) {
+ sk->sk_socket->state = SS_UNCONNECTED;
+ break;
+ }
rc = 0;
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index 82d14eea1b5a..974eb97b77d2 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -168,7 +168,7 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
- if (skb_queue_len(&ctx->out_queue) >= netdev_max_backlog)
+ if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
return -ENOBUFS;
__skb_queue_tail(&ctx->out_queue, skb);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 144238a50f3d..b2f4ec9c537f 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -669,7 +669,6 @@ resume:
x->curlft.bytes += skb->len;
x->curlft.packets++;
- x->curlft.use_time = ktime_get_real_seconds();
spin_unlock(&x->lock);
@@ -783,7 +782,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
trans = this_cpu_ptr(&xfrm_trans_tasklet);
- if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
+ if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
return -ENOBUFS;
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 555ab35cd119..9a5e79a38c67 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -534,7 +534,6 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
x->curlft.bytes += skb->len;
x->curlft.packets++;
- x->curlft.use_time = ktime_get_real_seconds();
spin_unlock_bh(&x->lock);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f1a0bab920a5..cc6ab79609e2 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3162,7 +3162,7 @@ ok:
return dst;
nopol:
- if (!(dst_orig->dev->flags & IFF_LOOPBACK) &&
+ if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
err = -EPERM;
goto error;
@@ -3599,6 +3599,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
if (pols[1]) {
if (IS_ERR(pols[1])) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
+ xfrm_pol_put(pols[0]);
return 0;
}
pols[1]->curlft.use_time = ktime_get_real_seconds();
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 52e60e607f8a..91c32a3b6924 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1592,6 +1592,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
x->replay = orig->replay;
x->preplay = orig->preplay;
x->mapping_maxage = orig->mapping_maxage;
+ x->lastused = orig->lastused;
x->new_mapping = 0;
x->new_mapping_sport = 0;
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index 4d34dc0b0fee..608c4ae3b08a 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -19,9 +19,10 @@ static const char *random_strings[] = {
"One ring to rule them all"
};
-static void simple_thread_func(int cnt)
+static void do_simple_thread_func(int cnt, const char *fmt, ...)
{
unsigned long bitmask[1] = {0xdeadbeefUL};
+ va_list va;
int array[6];
int len = cnt % 5;
int i;
@@ -33,9 +34,13 @@ static void simple_thread_func(int cnt)
array[i] = i + 1;
array[i] = 0;
+ va_start(va, fmt);
+
/* Silly tracepoints */
trace_foo_bar("hello", cnt, array, random_strings[len],
- current->cpus_ptr);
+ current->cpus_ptr, fmt, &va);
+
+ va_end(va);
trace_foo_with_template_simple("HELLO", cnt);
@@ -48,6 +53,11 @@ static void simple_thread_func(int cnt)
trace_foo_rel_loc("Hello __rel_loc", cnt, bitmask);
}
+static void simple_thread_func(int cnt)
+{
+ do_simple_thread_func(cnt, "iter=%d", cnt);
+}
+
static int simple_thread(void *arg)
{
int cnt = 0;
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index cbbbb83beced..1a92226202fc 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -141,6 +141,27 @@
* In most cases, the __assign_str() macro will take the same
* parameters as the __string() macro had to declare the string.
*
+ * __vstring: This is similar to __string() but instead of taking a
+ * dynamic length, it takes a variable list va_list 'va' variable.
+ * Some event callers already have a message from parameters saved
+ * in a va_list. Passing in the format and the va_list variable
+ * will save just enough on the ring buffer for that string.
+ * Note, the va variable used is a pointer to a va_list, not
+ * to the va_list directly.
+ *
+ * (va_list *va)
+ *
+ * __vstring(foo, fmt, va) is similar to: vsnprintf(foo, fmt, va)
+ *
+ * To assign the string, use the helper macro __assign_vstr().
+ *
+ * __assign_vstr(foo, fmt, va);
+ *
+ * In most cases, the __assign_vstr() macro will take the same
+ * parameters as the __vstring() macro had to declare the string.
+ * Use __get_str() to retrieve the __vstring() just like it would for
+ * __string().
+ *
* __string_len: This is a helper to a __dynamic_array, but it understands
* that the array has characters in it, and with the combined
* use of __assign_str_len(), it will allocate 'len' + 1 bytes
@@ -256,9 +277,10 @@ TRACE_DEFINE_ENUM(TRACE_SAMPLE_ZOO);
TRACE_EVENT(foo_bar,
TP_PROTO(const char *foo, int bar, const int *lst,
- const char *string, const struct cpumask *mask),
+ const char *string, const struct cpumask *mask,
+ const char *fmt, va_list *va),
- TP_ARGS(foo, bar, lst, string, mask),
+ TP_ARGS(foo, bar, lst, string, mask, fmt, va),
TP_STRUCT__entry(
__array( char, foo, 10 )
@@ -266,6 +288,7 @@ TRACE_EVENT(foo_bar,
__dynamic_array(int, list, __length_of(lst))
__string( str, string )
__bitmask( cpus, num_possible_cpus() )
+ __vstring( vstr, fmt, va )
),
TP_fast_assign(
@@ -274,10 +297,11 @@ TRACE_EVENT(foo_bar,
memcpy(__get_dynamic_array(list), lst,
__length_of(lst) * sizeof(int));
__assign_str(str, string);
+ __assign_vstr(vstr, fmt, va);
__assign_bitmask(cpus, cpumask_bits(mask), num_possible_cpus());
),
- TP_printk("foo %s %d %s %s %s %s (%s)", __entry->foo, __entry->bar,
+ TP_printk("foo %s %d %s %s %s %s (%s) %s", __entry->foo, __entry->bar,
/*
* Notice here the use of some helper functions. This includes:
@@ -321,7 +345,7 @@ TRACE_EVENT(foo_bar,
__print_array(__get_dynamic_array(list),
__get_dynamic_array_len(list) / sizeof(int),
sizeof(int)),
- __get_str(str), __get_bitmask(cpus))
+ __get_str(str), __get_bitmask(cpus), __get_str(vstr))
);
/*
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index 0496efd6e117..a0ccceb22cf8 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -25,7 +25,7 @@ failure = $(if-success,$(1),n,y)
# $(cc-option,<flag>)
# Return y if the compiler supports <flag>, n otherwise
-cc-option = $(success,mkdir .tmp_$$$$; trap "rm -rf .tmp_$$$$" EXIT; $(CC) -Werror $(CLANG_FLAGS) $(1) -c -x c /dev/null -o .tmp_$$$$/tmp.o)
+cc-option = $(success,trap "rm -rf .tmp_$$" EXIT; mkdir .tmp_$$; $(CC) -Werror $(CLANG_FLAGS) $(1) -c -x c /dev/null -o .tmp_$$/tmp.o)
# $(ld-option,<flag>)
# Return y if the linker supports <flag>, n otherwise
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index cac070aee791..784f46d41959 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -358,9 +358,8 @@ $(subdir-modorder): $(obj)/%/modules.order: $(obj)/% ;
quiet_cmd_ar_builtin = AR $@
cmd_ar_builtin = rm -f $@; \
- echo $(patsubst $(obj)/%,%,$(real-prereqs)) | \
- sed -E 's:([^ ]+):$(obj)/\1:g' | \
- xargs $(AR) cDPrST $@
+ $(if $(real-prereqs), printf "$(obj)/%s " $(patsubst $(obj)/%,%,$(real-prereqs)) | xargs) \
+ $(AR) cDPrST $@
$(obj)/built-in.a: $(real-obj-y) FORCE
$(call if_changed,ar_builtin)
diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
index 86ecd2ac874c..94d0d40cddb3 100644
--- a/scripts/Makefile.compiler
+++ b/scripts/Makefile.compiler
@@ -21,8 +21,8 @@ TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$
# automatically cleaned up.
try-run = $(shell set -e; \
TMP=$(TMPOUT)/tmp; \
- mkdir -p $(TMPOUT); \
trap "rm -rf $(TMPOUT)" EXIT; \
+ mkdir -p $(TMPOUT); \
if ($(1)) >/dev/null 2>&1; \
then echo "$(2)"; \
else echo "$(3)"; \
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index f5f0d6f09053..0621c39a3955 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -49,7 +49,6 @@ ifdef CONFIG_CC_IS_CLANG
KBUILD_CFLAGS += -Wno-initializer-overrides
KBUILD_CFLAGS += -Wno-format
KBUILD_CFLAGS += -Wno-sign-compare
-KBUILD_CFLAGS += -Wno-format-zero-length
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 692d64a70542..e4deaf5fa571 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -4,7 +4,7 @@ gcc-plugin-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += latent_entropy_plugin.so
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) \
+= -DLATENT_ENTROPY_PLUGIN
ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
- DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable
+ DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable -ULATENT_ENTROPY_PLUGIN
endif
export DISABLE_LATENT_ENTROPY_PLUGIN
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 16a02e9237d3..a4c987c23750 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -18,6 +18,9 @@ INSTALL_MOD_DIR ?= extra
dst := $(MODLIB)/$(INSTALL_MOD_DIR)
endif
+$(foreach x, % :, $(if $(findstring $x, $(dst)), \
+ $(error module installation path cannot contain '$x')))
+
suffix-y :=
suffix-$(CONFIG_MODULE_COMPRESS_GZIP) := .gz
suffix-$(CONFIG_MODULE_COMPRESS_XZ) := .xz
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 77b612183c08..5017f6b2da80 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -56,7 +56,7 @@ rpm-pkg:
$(MAKE) clean
$(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
$(call cmd,src_tar,$(KERNELPATH),kernel.spec)
- +rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz \
+ +rpmbuild $(RPMOPTS) --target $(UTS_MACHINE)-linux -ta $(KERNELPATH).tar.gz \
--define='_smp_mflags %{nil}'
# binrpm-pkg
@@ -66,7 +66,7 @@ binrpm-pkg:
$(MAKE) -f $(srctree)/Makefile
$(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec
+rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
- $(UTS_MACHINE) -bb $(objtree)/binkernel.spec
+ $(UTS_MACHINE)-linux -bb $(objtree)/binkernel.spec
PHONY += deb-pkg
deb-pkg:
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 4dd6a804ce41..f9553f60a14a 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -7,24 +7,31 @@
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
-import sys, os, re
+import sys, os, re, argparse
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
-if len(sys.argv) < 3:
- sys.stderr.write("usage: %s [option] file1 file2\n" % sys.argv[0])
- sys.stderr.write("The options are:\n")
- sys.stderr.write("-c categorize output based on symbol type\n")
- sys.stderr.write("-d Show delta of Data Section\n")
- sys.stderr.write("-t Show delta of text Section\n")
- sys.exit(-1)
+parser = argparse.ArgumentParser(description="Simple script used to compare the symbol sizes of 2 object files")
+group = parser.add_mutually_exclusive_group()
+group.add_argument('-c', help='categorize output based on symbol type', action='store_true')
+group.add_argument('-d', help='Show delta of Data Section', action='store_true')
+group.add_argument('-t', help='Show delta of text Section', action='store_true')
+parser.add_argument('-p', dest='prefix', help='Arch prefix for the tool being used. Useful in cross build scenarios')
+parser.add_argument('file1', help='First file to compare')
+parser.add_argument('file2', help='Second file to compare')
+
+args = parser.parse_args()
re_NUMBER = re.compile(r'\.[0-9]+')
def getsizes(file, format):
sym = {}
- with os.popen("nm --size-sort " + file) as f:
+ nm = "nm"
+ if args.prefix:
+ nm = "{}nm".format(args.prefix)
+
+ with os.popen("{} --size-sort {}".format(nm, file)) as f:
for line in f:
if line.startswith("\n") or ":" in line:
continue
@@ -77,9 +84,9 @@ def calc(oldfile, newfile, format):
delta.reverse()
return grow, shrink, add, remove, up, down, delta, old, new, otot, ntot
-def print_result(symboltype, symbolformat, argc):
+def print_result(symboltype, symbolformat):
grow, shrink, add, remove, up, down, delta, old, new, otot, ntot = \
- calc(sys.argv[argc - 1], sys.argv[argc], symbolformat)
+ calc(args.file1, args.file2, symbolformat)
print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
(add, remove, grow, shrink, up, -down, up-down))
@@ -93,13 +100,13 @@ def print_result(symboltype, symbolformat, argc):
percent = 0
print("Total: Before=%d, After=%d, chg %+.2f%%" % (otot, ntot, percent))
-if sys.argv[1] == "-c":
- print_result("Function", "tT", 3)
- print_result("Data", "dDbB", 3)
- print_result("RO Data", "rR", 3)
-elif sys.argv[1] == "-d":
- print_result("Data", "dDbBrR", 3)
-elif sys.argv[1] == "-t":
- print_result("Function", "tT", 3)
+if args.c:
+ print_result("Function", "tT")
+ print_result("Data", "dDbB")
+ print_result("RO Data", "rR")
+elif args.d:
+ print_result("Data", "dDbBrR")
+elif args.t:
+ print_result("Function", "tT")
else:
- print_result("Function", "tTdDbBrR", 2)
+ print_result("Function", "tTdDbBrR")
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 503e8abbb2c1..79e759aac543 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1042,7 +1042,8 @@ our $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant|$String)};
our $declaration_macros = qr{(?x:
(?:$Storage\s+)?(?:[A-Z_][A-Z0-9]*_){0,2}(?:DEFINE|DECLARE)(?:_[A-Z0-9]+){1,6}\s*\(|
(?:$Storage\s+)?[HLP]?LIST_HEAD\s*\(|
- (?:SKCIPHER_REQUEST|SHASH_DESC|AHASH_REQUEST)_ON_STACK\s*\(
+ (?:SKCIPHER_REQUEST|SHASH_DESC|AHASH_REQUEST)_ON_STACK\s*\(|
+ (?:$Storage\s+)?(?:XA_STATE|XA_STATE_ORDER)\s*\(
)};
our %allow_repeated_words = (
@@ -5720,7 +5721,7 @@ sub process {
$var !~ /^(?:[a-z0-9_]*|[A-Z0-9_]*)?_?[a-z][A-Z](?:_[a-z0-9_]+|_[A-Z0-9_]+)?$/ &&
#Ignore some three character SI units explicitly, like MiB and KHz
$var !~ /^(?:[a-z_]*?)_?(?:[KMGT]iB|[KMGT]?Hz)(?:_[a-z_]+)?$/) {
- while ($var =~ m{($Ident)}g) {
+ while ($var =~ m{\b($Ident)}g) {
my $word = $1;
next if ($word !~ /[A-Z][a-z]|[a-z][A-Z]/);
if ($check) {
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index d2c38584ece6..d48dfed6d3db 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -16,6 +16,7 @@
# AArch64, PARISC ports by Kyle McMartin
# sparc port by Martin Habets <errandir_news@mph.eclipse.co.uk>
# ppc64le port by Breno Leitao <leitao@debian.org>
+# riscv port by Wadim Mueller <wafgo01@gmail.com>
#
# Usage:
# objdump -d vmlinux | scripts/checkstack.pl [arch]
@@ -108,6 +109,9 @@ my (@stack, $re, $dre, $sub, $x, $xs, $funcre, $min_stack);
} elsif ($arch eq 'sparc' || $arch eq 'sparc64') {
# f0019d10: 9d e3 bf 90 save %sp, -112, %sp
$re = qr/.*save.*%sp, -(([0-9]{2}|[3-9])[0-9]{2}), %sp/o;
+ } elsif ($arch =~ /^riscv(64)?$/) {
+ #ffffffff8036e868: c2010113 addi sp,sp,-992
+ $re = qr/.*addi.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
} else {
print("wrong or unknown architecture \"$arch\"\n");
exit
diff --git a/scripts/clang-tools/run-clang-tools.py b/scripts/clang-tools/run-clang-tools.py
index f754415af398..1337cedca096 100755
--- a/scripts/clang-tools/run-clang-tools.py
+++ b/scripts/clang-tools/run-clang-tools.py
@@ -51,6 +51,7 @@ def run_analysis(entry):
checks += "linuxkernel-*"
else:
checks += "clang-analyzer-*"
+ checks += ",-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling"
p = subprocess.run(["clang-tidy", "-p", args.path, checks, entry["file"]],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
diff --git a/scripts/coccinelle/api/alloc/zalloc-simple.cocci b/scripts/coccinelle/api/alloc/zalloc-simple.cocci
index b3d0c3c230c1..d66c45356691 100644
--- a/scripts/coccinelle/api/alloc/zalloc-simple.cocci
+++ b/scripts/coccinelle/api/alloc/zalloc-simple.cocci
@@ -10,7 +10,7 @@
// Copyright: (C) 2009-2010 Julia Lawall, Nicolas Palix, DIKU.
// Copyright: (C) 2009-2010 Gilles Muller, INRIA/LiP6.
// Copyright: (C) 2017 Himanshu Jha
-// URL: http://coccinelle.lip6.fr/rules/kzalloc.html
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options: --no-includes --include-headers
//
// Keywords: kmalloc, kzalloc
diff --git a/scripts/coccinelle/api/atomic_as_refcounter.cocci b/scripts/coccinelle/api/atomic_as_refcounter.cocci
index 0f78d94abc35..e63d52408b86 100644
--- a/scripts/coccinelle/api/atomic_as_refcounter.cocci
+++ b/scripts/coccinelle/api/atomic_as_refcounter.cocci
@@ -5,7 +5,7 @@
// Copyright (c) 2016-2017, Elena Reshetova, Intel Corporation
//
// Confidence: Moderate
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options: --include-headers --very-quiet
virtual report
diff --git a/scripts/coccinelle/api/check_bq27xxx_data.cocci b/scripts/coccinelle/api/check_bq27xxx_data.cocci
index fae539ef0ce5..27366c6169ec 100644
--- a/scripts/coccinelle/api/check_bq27xxx_data.cocci
+++ b/scripts/coccinelle/api/check_bq27xxx_data.cocci
@@ -6,7 +6,7 @@
///
// Confidence: High
// Copyright: (C) 2017 Julia Lawall, Inria/LIP6,
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Requires: 1.0.7
// Keywords: BQ27XXX_DATA
diff --git a/scripts/coccinelle/api/d_find_alias.cocci b/scripts/coccinelle/api/d_find_alias.cocci
index 47e050166f20..3489001378ab 100644
--- a/scripts/coccinelle/api/d_find_alias.cocci
+++ b/scripts/coccinelle/api/d_find_alias.cocci
@@ -4,7 +4,7 @@
// Keywords: d_find_alias, dput
//
// Confidence: Moderate
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options: --include-headers
virtual context
diff --git a/scripts/coccinelle/api/err_cast.cocci b/scripts/coccinelle/api/err_cast.cocci
index 0e661c8d8d6f..7f9dc1212c74 100644
--- a/scripts/coccinelle/api/err_cast.cocci
+++ b/scripts/coccinelle/api/err_cast.cocci
@@ -6,7 +6,7 @@
// Copyright: (C) 2009, 2010 Nicolas Palix, DIKU.
// Copyright: (C) 2009, 2010 Julia Lawall, DIKU.
// Copyright: (C) 2009, 2010 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options:
//
// Keywords: ERR_PTR, PTR_ERR, ERR_CAST
diff --git a/scripts/coccinelle/api/kstrdup.cocci b/scripts/coccinelle/api/kstrdup.cocci
index 3c6dc5469ee4..8a61534676da 100644
--- a/scripts/coccinelle/api/kstrdup.cocci
+++ b/scripts/coccinelle/api/kstrdup.cocci
@@ -5,7 +5,7 @@
// Copyright: (C) 2010-2012 Nicolas Palix.
// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/api/memdup.cocci b/scripts/coccinelle/api/memdup.cocci
index 30b15df734e5..d28741c69873 100644
--- a/scripts/coccinelle/api/memdup.cocci
+++ b/scripts/coccinelle/api/memdup.cocci
@@ -5,7 +5,7 @@
// Copyright: (C) 2010-2012 Nicolas Palix.
// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/api/memdup_user.cocci b/scripts/coccinelle/api/memdup_user.cocci
index e01e95108405..03e7afa09eee 100644
--- a/scripts/coccinelle/api/memdup_user.cocci
+++ b/scripts/coccinelle/api/memdup_user.cocci
@@ -6,7 +6,7 @@
// Copyright: (C) 2010-2012 Nicolas Palix.
// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/api/pm_runtime.cocci b/scripts/coccinelle/api/pm_runtime.cocci
index 1ccce3fd00b8..4b9778874453 100644
--- a/scripts/coccinelle/api/pm_runtime.cocci
+++ b/scripts/coccinelle/api/pm_runtime.cocci
@@ -4,7 +4,7 @@
// Keywords: pm_runtime
// Confidence: Medium
// Copyright (C) 2013 Texas Instruments Incorporated -
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options: --include-headers
virtual patch
diff --git a/scripts/coccinelle/api/resource_size.cocci b/scripts/coccinelle/api/resource_size.cocci
index a9a571ac04ce..16857072d162 100644
--- a/scripts/coccinelle/api/resource_size.cocci
+++ b/scripts/coccinelle/api/resource_size.cocci
@@ -7,7 +7,7 @@
// Copyright: (C) 2009, 2010 Nicolas Palix, DIKU.
// Copyright: (C) 2009, 2010 Julia Lawall, DIKU.
// Copyright: (C) 2009, 2010 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options:
//
// Keywords: resource_size
diff --git a/scripts/coccinelle/free/clk_put.cocci b/scripts/coccinelle/free/clk_put.cocci
index 7237b49496f6..3c732cb7210b 100644
--- a/scripts/coccinelle/free/clk_put.cocci
+++ b/scripts/coccinelle/free/clk_put.cocci
@@ -8,7 +8,7 @@
// Confidence: Moderate
// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options:
diff --git a/scripts/coccinelle/free/devm_free.cocci b/scripts/coccinelle/free/devm_free.cocci
index da80050b91ff..0880729badbc 100644
--- a/scripts/coccinelle/free/devm_free.cocci
+++ b/scripts/coccinelle/free/devm_free.cocci
@@ -17,7 +17,7 @@
// Confidence: Moderate
// Copyright: (C) 2011 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2011 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/free/ifnulldev_put.cocci b/scripts/coccinelle/free/ifnulldev_put.cocci
new file mode 100644
index 000000000000..2bd2e8fae485
--- /dev/null
+++ b/scripts/coccinelle/free/ifnulldev_put.cocci
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/// Since commit b37a46683739 ("netdevice: add the case if dev is NULL"),
+/// NULL check before dev_{put, hold} functions is not needed.
+///
+/// Based on ifnullfree.cocci by Fabian Frederick.
+///
+// Copyright: (C) 2022 Ziyang Xuan.
+// Comments: -
+// Options: --no-includes --include-headers
+// Version min: 5.15
+
+virtual patch
+virtual org
+virtual report
+virtual context
+
+@r2 depends on patch@
+expression E;
+@@
+- if (E != NULL)
+(
+ __dev_put(E);
+|
+ dev_put(E);
+|
+ dev_put_track(E, ...);
+|
+ __dev_hold(E);
+|
+ dev_hold(E);
+|
+ dev_hold_track(E, ...);
+)
+
+@r depends on context || report || org @
+expression E;
+position p;
+@@
+
+* if (E != NULL)
+* \(__dev_put@p\|dev_put@p\|dev_put_track@p\|__dev_hold@p\|dev_hold@p\|
+* dev_hold_track@p\)(E, ...);
+
+@script:python depends on org@
+p << r.p;
+@@
+
+cocci.print_main("NULL check before dev_{put, hold} functions is not needed", p)
+
+@script:python depends on report@
+p << r.p;
+@@
+
+msg = "WARNING: NULL check before dev_{put, hold} functions is not needed."
+coccilib.report.print_report(p[0], msg)
diff --git a/scripts/coccinelle/free/iounmap.cocci b/scripts/coccinelle/free/iounmap.cocci
index 63b81d0c97b6..90d85fa7bfdf 100644
--- a/scripts/coccinelle/free/iounmap.cocci
+++ b/scripts/coccinelle/free/iounmap.cocci
@@ -8,7 +8,7 @@
// Confidence: Moderate
// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options:
diff --git a/scripts/coccinelle/free/kfree.cocci b/scripts/coccinelle/free/kfree.cocci
index 9b6e2037c2a9..6338bbac2f7f 100644
--- a/scripts/coccinelle/free/kfree.cocci
+++ b/scripts/coccinelle/free/kfree.cocci
@@ -9,7 +9,7 @@
// Copyright: (C) 2010-2012 Nicolas Palix.
// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/free/kfreeaddr.cocci b/scripts/coccinelle/free/kfreeaddr.cocci
index 142af6337a04..85635a36f0ac 100644
--- a/scripts/coccinelle/free/kfreeaddr.cocci
+++ b/scripts/coccinelle/free/kfreeaddr.cocci
@@ -3,7 +3,7 @@
///
// Confidence: High
// Copyright: (C) 2013 Julia Lawall, INRIA/LIP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/free/pci_free_consistent.cocci b/scripts/coccinelle/free/pci_free_consistent.cocci
index d51e92556b42..e062b9ba09ff 100644
--- a/scripts/coccinelle/free/pci_free_consistent.cocci
+++ b/scripts/coccinelle/free/pci_free_consistent.cocci
@@ -3,7 +3,7 @@
///
// Confidence: Moderate
// Copyright: (C) 2013 Petr Strnad.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Keywords: pci_free_consistent, pci_alloc_consistent
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/iterators/device_node_continue.cocci b/scripts/coccinelle/iterators/device_node_continue.cocci
index f8cd14dfa604..5713c9cf59ff 100644
--- a/scripts/coccinelle/iterators/device_node_continue.cocci
+++ b/scripts/coccinelle/iterators/device_node_continue.cocci
@@ -4,7 +4,7 @@
///
// Confidence: High
// Copyright: (C) 2015 Julia Lawall, Inria.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options: --no-includes --include-headers
// Requires: 1.0.4
// Keywords: for_each_child_of_node, etc.
diff --git a/scripts/coccinelle/iterators/for_each_child.cocci b/scripts/coccinelle/iterators/for_each_child.cocci
index bc394615948e..2ea98a61a1f2 100644
--- a/scripts/coccinelle/iterators/for_each_child.cocci
+++ b/scripts/coccinelle/iterators/for_each_child.cocci
@@ -5,7 +5,7 @@
///
// Confidence: High
// Copyright: (C) 2020 Sumera Priyadarsini
-// URL: http://coccinelle.lip6.fr
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options: --no-includes --include-headers
virtual patch
diff --git a/scripts/coccinelle/iterators/itnull.cocci b/scripts/coccinelle/iterators/itnull.cocci
index 9b362b98d7a1..7d34802f2724 100644
--- a/scripts/coccinelle/iterators/itnull.cocci
+++ b/scripts/coccinelle/iterators/itnull.cocci
@@ -10,7 +10,7 @@
// Copyright: (C) 2010-2012 Nicolas Palix.
// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/iterators/list_entry_update.cocci b/scripts/coccinelle/iterators/list_entry_update.cocci
index d62e8a16085f..9a7593667da6 100644
--- a/scripts/coccinelle/iterators/list_entry_update.cocci
+++ b/scripts/coccinelle/iterators/list_entry_update.cocci
@@ -8,7 +8,7 @@
// Copyright: (C) 2010 Nicolas Palix, DIKU.
// Copyright: (C) 2010 Julia Lawall, DIKU.
// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
index 676edd562eef..f67110ba7290 100644
--- a/scripts/coccinelle/iterators/use_after_iter.cocci
+++ b/scripts/coccinelle/iterators/use_after_iter.cocci
@@ -10,7 +10,7 @@
// Confidence: Moderate
// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2012 Gilles Muller, INRIA/LIP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/locks/call_kern.cocci b/scripts/coccinelle/locks/call_kern.cocci
index 5ca0d81b0015..3720f86d67c5 100644
--- a/scripts/coccinelle/locks/call_kern.cocci
+++ b/scripts/coccinelle/locks/call_kern.cocci
@@ -8,7 +8,7 @@
// Copyright: (C) 2012 Nicolas Palix.
// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/locks/double_lock.cocci b/scripts/coccinelle/locks/double_lock.cocci
index 9e88a578957c..619cfc714409 100644
--- a/scripts/coccinelle/locks/double_lock.cocci
+++ b/scripts/coccinelle/locks/double_lock.cocci
@@ -7,7 +7,7 @@
// Copyright: (C) 2010 Nicolas Palix, DIKU.
// Copyright: (C) 2010 Julia Lawall, DIKU.
// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/locks/flags.cocci b/scripts/coccinelle/locks/flags.cocci
index 7f990cd55f5a..44acf20a98dd 100644
--- a/scripts/coccinelle/locks/flags.cocci
+++ b/scripts/coccinelle/locks/flags.cocci
@@ -5,7 +5,7 @@
// Copyright: (C) 2010-2012 Nicolas Palix.
// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/locks/mini_lock.cocci b/scripts/coccinelle/locks/mini_lock.cocci
index c3ad098f4a5b..71065d8a5d54 100644
--- a/scripts/coccinelle/locks/mini_lock.cocci
+++ b/scripts/coccinelle/locks/mini_lock.cocci
@@ -10,7 +10,7 @@
// Copyright: (C) 2010-2012 Nicolas Palix.
// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/misc/boolreturn.cocci b/scripts/coccinelle/misc/boolreturn.cocci
deleted file mode 100644
index 29d2bf41e95d..000000000000
--- a/scripts/coccinelle/misc/boolreturn.cocci
+++ /dev/null
@@ -1,59 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/// Return statements in functions returning bool should use
-/// true/false instead of 1/0.
-//
-// Confidence: High
-// Options: --no-includes --include-headers
-
-virtual patch
-virtual report
-virtual context
-
-@r1 depends on patch@
-identifier fn;
-typedef bool;
-symbol false;
-symbol true;
-@@
-
-bool fn ( ... )
-{
-<...
-return
-(
-- 0
-+ false
-|
-- 1
-+ true
-)
- ;
-...>
-}
-
-@r2 depends on report || context@
-identifier fn;
-position p;
-@@
-
-bool fn ( ... )
-{
-<...
-return
-(
-* 0@p
-|
-* 1@p
-)
- ;
-...>
-}
-
-
-@script:python depends on report@
-p << r2.p;
-fn << r2.fn;
-@@
-
-msg = "WARNING: return of 0/1 in function '%s' with return type bool" % fn
-coccilib.report.print_report(p[0], msg)
diff --git a/scripts/coccinelle/misc/cstptr.cocci b/scripts/coccinelle/misc/cstptr.cocci
index c52e3c8ca9b3..74acf34bee03 100644
--- a/scripts/coccinelle/misc/cstptr.cocci
+++ b/scripts/coccinelle/misc/cstptr.cocci
@@ -5,7 +5,7 @@
// Confidence: High
// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/misc/doubleinit.cocci b/scripts/coccinelle/misc/doubleinit.cocci
index 2f80d3ab38dd..7dbfde3f44e1 100644
--- a/scripts/coccinelle/misc/doubleinit.cocci
+++ b/scripts/coccinelle/misc/doubleinit.cocci
@@ -7,7 +7,7 @@
// Copyright: (C) 2010-2012 Nicolas Palix.
// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments: requires at least Coccinelle 0.2.4, lex or parse error otherwise
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/misc/ifcol.cocci b/scripts/coccinelle/misc/ifcol.cocci
index da0351ed5740..442742467c18 100644
--- a/scripts/coccinelle/misc/ifcol.cocci
+++ b/scripts/coccinelle/misc/ifcol.cocci
@@ -12,7 +12,7 @@
// Copyright: (C) 2010 Nicolas Palix, DIKU.
// Copyright: (C) 2010 Julia Lawall, DIKU.
// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/misc/newline_in_nl_msg.cocci b/scripts/coccinelle/misc/newline_in_nl_msg.cocci
index c175886e4015..9baffe55d917 100644
--- a/scripts/coccinelle/misc/newline_in_nl_msg.cocci
+++ b/scripts/coccinelle/misc/newline_in_nl_msg.cocci
@@ -5,7 +5,7 @@
///
// Confidence: Very High
// Copyright: (C) 2020 Intel Corporation
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options: --no-includes --include-headers
virtual context
diff --git a/scripts/coccinelle/misc/noderef.cocci b/scripts/coccinelle/misc/noderef.cocci
index 72de62a77a44..37ee7358d742 100644
--- a/scripts/coccinelle/misc/noderef.cocci
+++ b/scripts/coccinelle/misc/noderef.cocci
@@ -5,7 +5,7 @@
// Confidence: High
// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/misc/orplus.cocci b/scripts/coccinelle/misc/orplus.cocci
index 52203dc2ca4b..3a1566f1aec9 100644
--- a/scripts/coccinelle/misc/orplus.cocci
+++ b/scripts/coccinelle/misc/orplus.cocci
@@ -6,7 +6,7 @@
// Confidence: Moderate
// Copyright: (C) 2013 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2013 Gilles Muller, INRIA/LIP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/misc/returnvar.cocci b/scripts/coccinelle/misc/returnvar.cocci
index ce0d9eebc7e1..7ffc55bf51f9 100644
--- a/scripts/coccinelle/misc/returnvar.cocci
+++ b/scripts/coccinelle/misc/returnvar.cocci
@@ -4,7 +4,7 @@
///
// Confidence: Moderate
// Copyright: (C) 2012 Peter Senna Tschudin, INRIA/LIP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments: Comments on code can be deleted if near code that is removed.
// "when strict" can be removed to get more hits, but adds false
// positives
diff --git a/scripts/coccinelle/misc/semicolon.cocci b/scripts/coccinelle/misc/semicolon.cocci
index a53edb026dad..4476bf873deb 100644
--- a/scripts/coccinelle/misc/semicolon.cocci
+++ b/scripts/coccinelle/misc/semicolon.cocci
@@ -4,7 +4,7 @@
///
// Confidence: Moderate
// Copyright: (C) 2012 Peter Senna Tschudin, INRIA/LIP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments: Some false positives on empty default cases in switch statements.
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/misc/ifaddr.cocci b/scripts/coccinelle/misc/test_addr.cocci
index fc92e8fcbfcb..d559b297476c 100644
--- a/scripts/coccinelle/misc/ifaddr.cocci
+++ b/scripts/coccinelle/misc/test_addr.cocci
@@ -4,7 +4,7 @@
// Confidence: High
// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
@@ -14,12 +14,10 @@ virtual context
@r@
expression x;
-statement S1,S2;
position p;
@@
-*if@p (&x)
- S1 else S2
+*\(&x@p == NULL \| &x@p != NULL\)
@script:python depends on org@
p << r.p;
diff --git a/scripts/coccinelle/misc/warn.cocci b/scripts/coccinelle/misc/warn.cocci
index e379661e240d..b5f428035d03 100644
--- a/scripts/coccinelle/misc/warn.cocci
+++ b/scripts/coccinelle/misc/warn.cocci
@@ -4,7 +4,7 @@
// Confidence: High
// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/null/badzero.cocci b/scripts/coccinelle/null/badzero.cocci
index 882dd65313ab..35d443825c2a 100644
--- a/scripts/coccinelle/null/badzero.cocci
+++ b/scripts/coccinelle/null/badzero.cocci
@@ -10,7 +10,7 @@
// Confidence: High
// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Requires: 1.0.0
// Options:
diff --git a/scripts/coccinelle/null/deref_null.cocci b/scripts/coccinelle/null/deref_null.cocci
index 98f1e7faf503..fdf098d4f522 100644
--- a/scripts/coccinelle/null/deref_null.cocci
+++ b/scripts/coccinelle/null/deref_null.cocci
@@ -7,7 +7,7 @@
// Copyright: (C) 2010 Nicolas Palix, DIKU.
// Copyright: (C) 2010 Julia Lawall, DIKU.
// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments: -I ... -all_includes can give more complete results
// Options:
diff --git a/scripts/coccinelle/null/eno.cocci b/scripts/coccinelle/null/eno.cocci
index 81584ff87956..7107d6c8db9e 100644
--- a/scripts/coccinelle/null/eno.cocci
+++ b/scripts/coccinelle/null/eno.cocci
@@ -5,7 +5,7 @@
// Copyright: (C) 2010-2012 Nicolas Palix.
// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.
// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/null/kmerr.cocci b/scripts/coccinelle/null/kmerr.cocci
index d0e004d4e130..68db20de62eb 100644
--- a/scripts/coccinelle/null/kmerr.cocci
+++ b/scripts/coccinelle/null/kmerr.cocci
@@ -9,7 +9,7 @@
// Copyright: (C) 2010 Nicolas Palix, DIKU.
// Copyright: (C) 2010 Julia Lawall, DIKU.
// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/tests/doublebitand.cocci b/scripts/coccinelle/tests/doublebitand.cocci
index 0f0b94e7debd..025436a15040 100644
--- a/scripts/coccinelle/tests/doublebitand.cocci
+++ b/scripts/coccinelle/tests/doublebitand.cocci
@@ -9,7 +9,7 @@
// Copyright: (C) 2010 Nicolas Palix, DIKU.
// Copyright: (C) 2010 Julia Lawall, DIKU.
// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/tests/doubletest.cocci b/scripts/coccinelle/tests/doubletest.cocci
index b35519cddb13..c3d94c31959f 100644
--- a/scripts/coccinelle/tests/doubletest.cocci
+++ b/scripts/coccinelle/tests/doubletest.cocci
@@ -8,7 +8,7 @@
// Copyright: (C) 2010 Nicolas Palix, DIKU.
// Copyright: (C) 2010 Julia Lawall, DIKU.
// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Comments:
// Options: --no-includes --include-headers
diff --git a/scripts/coccinelle/tests/odd_ptr_err.cocci b/scripts/coccinelle/tests/odd_ptr_err.cocci
index 11d4e2b6deb8..377436abe2d0 100644
--- a/scripts/coccinelle/tests/odd_ptr_err.cocci
+++ b/scripts/coccinelle/tests/odd_ptr_err.cocci
@@ -6,7 +6,7 @@
// Confidence: High
// Copyright: (C) 2012, 2015 Julia Lawall, INRIA.
// Copyright: (C) 2012, 2015 Gilles Muller, INRIA.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options: --no-includes --include-headers
virtual patch
diff --git a/scripts/coccinelle/tests/unsigned_lesser_than_zero.cocci b/scripts/coccinelle/tests/unsigned_lesser_than_zero.cocci
index 91e286ace54c..5e188c62d8a9 100644
--- a/scripts/coccinelle/tests/unsigned_lesser_than_zero.cocci
+++ b/scripts/coccinelle/tests/unsigned_lesser_than_zero.cocci
@@ -13,7 +13,7 @@
///
// Confidence: Average
// Copyright: (C) 2015 Andrzej Hajda, Samsung Electronics Co., Ltd.
-// URL: http://coccinelle.lip6.fr/
+// URL: https://coccinelle.gitlabpages.inria.fr/website
// Options: --all-includes
virtual context
diff --git a/scripts/dummy-tools/dummy-plugin-dir/include/plugin-version.h b/scripts/dummy-tools/dummy-plugin-dir/include/plugin-version.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/scripts/dummy-tools/dummy-plugin-dir/include/plugin-version.h
diff --git a/scripts/dummy-tools/gcc b/scripts/dummy-tools/gcc
index b2483149bbe5..1db1889f6d81 100755
--- a/scripts/dummy-tools/gcc
+++ b/scripts/dummy-tools/gcc
@@ -59,7 +59,7 @@ fi
if arg_contain -E "$@"; then
# For scripts/cc-version.sh; This emulates GCC 20.0.0
if arg_contain - "$@"; then
- sed -n '/^GCC/{s/__GNUC__/20/; s/__GNUC_MINOR__/0/; s/__GNUC_PATCHLEVEL__/0/; p;}'
+ sed -n '/^GCC/{s/__GNUC__/20/; s/__GNUC_MINOR__/0/; s/__GNUC_PATCHLEVEL__/0/; p;}; s/__LONG_DOUBLE_128__/1/ p'
exit 0
else
echo "no input files" >&2
@@ -96,12 +96,8 @@ fi
# To set GCC_PLUGINS
if arg_contain -print-file-name=plugin "$@"; then
- plugin_dir=$(mktemp -d)
-
- mkdir -p $plugin_dir/include
- touch $plugin_dir/include/plugin-version.h
-
- echo $plugin_dir
+ # Use $0 to find the in-tree dummy directory
+ echo "$(dirname "$(readlink -f "$0")")/dummy-plugin-dir"
exit 0
fi
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 94ed98dd899f..5514c23f45c2 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -61,6 +61,7 @@ die() {
READELF="${CROSS_COMPILE:-}readelf"
ADDR2LINE="${CROSS_COMPILE:-}addr2line"
AWK="awk"
+GREP="grep"
command -v ${AWK} >/dev/null 2>&1 || die "${AWK} isn't installed"
command -v ${READELF} >/dev/null 2>&1 || die "${READELF} isn't installed"
@@ -112,7 +113,9 @@ __faddr2line() {
# section offsets.
local file_type=$(${READELF} --file-header $objfile |
${AWK} '$1 == "Type:" { print $2; exit }')
- [[ $file_type = "EXEC" ]] && is_vmlinux=1
+ if [[ $file_type = "EXEC" ]] || [[ $file_type == "DYN" ]]; then
+ is_vmlinux=1
+ fi
# Go through each of the object's symbols which match the func name.
# In rare cases there might be duplicates, in which case we print all
@@ -269,6 +272,8 @@ LIST=0
[[ ! -f $objfile ]] && die "can't find objfile $objfile"
shift
+${READELF} --section-headers --wide $objfile | ${GREP} -q '\.debug_info' || die "CONFIG_DEBUG_INFO not enabled"
+
DIR_PREFIX=supercalifragilisticexpialidocious
find_dir_prefix $objfile
diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh
deleted file mode 100755
index 8b980fb2270a..000000000000
--- a/scripts/gcc-goto.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Test for gcc 'asm goto' support
-# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
-
-cat << "END" | $@ -x c - -fno-PIE -c -o /dev/null
-int main(void)
-{
-#if defined(__arm__) || defined(__aarch64__)
- /*
- * Not related to asm goto, but used by jump label
- * and broken on some ARM GCC versions (see GCC Bug 48637).
- */
- static struct { int dummy; int state; } tp;
- asm (".long %c0" :: "i" (&tp.state));
-#endif
-
-entry:
- asm goto ("" :::: entry);
- return 0;
-}
-END
diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py
index d5983cf3db7d..c771831eb077 100644
--- a/scripts/gdb/linux/dmesg.py
+++ b/scripts/gdb/linux/dmesg.py
@@ -22,7 +22,6 @@ prb_desc_type = utils.CachedType("struct prb_desc")
prb_desc_ring_type = utils.CachedType("struct prb_desc_ring")
prb_data_ring_type = utils.CachedType("struct prb_data_ring")
printk_ringbuffer_type = utils.CachedType("struct printk_ringbuffer")
-atomic_long_type = utils.CachedType("atomic_long_t")
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
@@ -68,8 +67,6 @@ class LxDmesg(gdb.Command):
off = prb_data_ring_type.get_type()['data'].bitpos // 8
text_data_addr = utils.read_ulong(text_data_ring, off)
- counter_off = atomic_long_type.get_type()['counter'].bitpos // 8
-
sv_off = prb_desc_type.get_type()['state_var'].bitpos // 8
off = prb_desc_type.get_type()['text_blk_lpos'].bitpos // 8
@@ -89,9 +86,9 @@ class LxDmesg(gdb.Command):
# read in tail and head descriptor ids
off = prb_desc_ring_type.get_type()['tail_id'].bitpos // 8
- tail_id = utils.read_u64(desc_ring, off + counter_off)
+ tail_id = utils.read_atomic_long(desc_ring, off)
off = prb_desc_ring_type.get_type()['head_id'].bitpos // 8
- head_id = utils.read_u64(desc_ring, off + counter_off)
+ head_id = utils.read_atomic_long(desc_ring, off)
did = tail_id
while True:
@@ -102,7 +99,7 @@ class LxDmesg(gdb.Command):
desc = utils.read_memoryview(inf, desc_addr + desc_off, desc_sz).tobytes()
# skip non-committed record
- state = 3 & (utils.read_u64(desc, sv_off + counter_off) >> desc_flags_shift)
+ state = 3 & (utils.read_atomic_long(desc, sv_off) >> desc_flags_shift)
if state != desc_committed and state != desc_finalized:
if did == head_id:
break
diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
index ff7c1799d588..1553f68716cc 100644
--- a/scripts/gdb/linux/utils.py
+++ b/scripts/gdb/linux/utils.py
@@ -35,13 +35,12 @@ class CachedType:
long_type = CachedType("long")
-
+atomic_long_type = CachedType("atomic_long_t")
def get_long_type():
global long_type
return long_type.get_type()
-
def offset_of(typeobj, field):
element = gdb.Value(0).cast(typeobj)
return int(str(element[field].address).split()[0], 16)
@@ -129,6 +128,17 @@ def read_ulong(buffer, offset):
else:
return read_u32(buffer, offset)
+atomic_long_counter_offset = atomic_long_type.get_type()['counter'].bitpos
+atomic_long_counter_sizeof = atomic_long_type.get_type()['counter'].type.sizeof
+
+def read_atomic_long(buffer, offset):
+ global atomic_long_counter_offset
+ global atomic_long_counter_sizeof
+
+ if atomic_long_counter_sizeof == 8:
+ return read_u64(buffer, offset + atomic_long_counter_offset)
+ else:
+ return read_u32(buffer, offset + atomic_long_counter_offset)
target_arch = None
diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py
index 4136dc2c59df..3e8d3669f0ce 100644
--- a/scripts/gdb/vmlinux-gdb.py
+++ b/scripts/gdb/vmlinux-gdb.py
@@ -13,7 +13,7 @@
import os
-sys.path.insert(0, os.path.dirname(__file__) + "/scripts/gdb")
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/scripts/gdb")
try:
gdb.parse_and_eval("0")
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index dd554bd436cc..4041881746ad 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -70,7 +70,6 @@ configs=$(sed -e '
#
# The format is <file-name>:<CONFIG-option> in each line.
config_leak_ignores="
-arch/alpha/include/uapi/asm/setup.h:CONFIG_ALPHA_LEGACY_START_ADDRESS
arch/arc/include/uapi/asm/page.h:CONFIG_ARC_PAGE_SIZE_16K
arch/arc/include/uapi/asm/page.h:CONFIG_ARC_PAGE_SIZE_4K
arch/arc/include/uapi/asm/swab.h:CONFIG_ARC_HAS_SWAPE
@@ -84,7 +83,6 @@ arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_SUPPORT
arch/x86/include/uapi/asm/auxvec.h:CONFIG_IA32_EMULATION
arch/x86/include/uapi/asm/auxvec.h:CONFIG_X86_64
arch/x86/include/uapi/asm/mman.h:CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-include/uapi/asm-generic/fcntl.h:CONFIG_64BIT
include/uapi/linux/atmdev.h:CONFIG_COMPAT
include/uapi/linux/eventpoll.h:CONFIG_PM_SLEEP
include/uapi/linux/hw_breakpoint.h:CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
diff --git a/scripts/kconfig/qconf-cfg.sh b/scripts/kconfig/qconf-cfg.sh
index 9b695e5cd9b3..ad652cb53947 100755
--- a/scripts/kconfig/qconf-cfg.sh
+++ b/scripts/kconfig/qconf-cfg.sh
@@ -20,5 +20,6 @@ fi
echo >&2 "*"
echo >&2 "* Could not find Qt5 via ${HOSTPKG_CONFIG}."
echo >&2 "* Please install Qt5 and make sure it's in PKG_CONFIG_PATH"
+echo >&2 "* You need $PKG"
echo >&2 "*"
exit 1
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index cbd6b0f48b4e..80d973144fde 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1571,9 +1571,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
zeros = calloc(1, sym->st_size);
symval = zeros;
} else {
- symval = (void *)info->hdr
- + info->sechdrs[get_secindex(info, sym)].sh_offset
- + sym->st_value;
+ symval = sym_get_data(info, sym);
}
/* First handle the "special" cases */
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 29474cee10b1..2c80da0220c3 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -321,13 +321,10 @@ static void *sym_get_data_by_offset(const struct elf_info *info,
{
Elf_Shdr *sechdr = &info->sechdrs[secindex];
- if (info->hdr->e_type != ET_REL)
- offset -= sechdr->sh_addr;
-
return (void *)info->hdr + sechdr->sh_offset + offset;
}
-static void *sym_get_data(const struct elf_info *info, const Elf_Sym *sym)
+void *sym_get_data(const struct elf_info *info, const Elf_Sym *sym)
{
return sym_get_data_by_offset(info, get_secindex(info, sym),
sym->st_value);
@@ -339,8 +336,16 @@ static const char *sech_name(const struct elf_info *info, Elf_Shdr *sechdr)
sechdr->sh_name);
}
-static const char *sec_name(const struct elf_info *info, int secindex)
+static const char *sec_name(const struct elf_info *info, unsigned int secindex)
{
+ /*
+ * If sym->st_shndx is a special section index, there is no
+ * corresponding section header.
+ * Return "" if the index is out of range of info->sechdrs[] array.
+ */
+ if (secindex >= info->num_sections)
+ return "";
+
return sech_name(info, &info->sechdrs[secindex]);
}
@@ -466,6 +471,10 @@ static int parse_elf(struct elf_info *info, const char *filename)
sechdrs = (void *)hdr + hdr->e_shoff;
info->sechdrs = sechdrs;
+ /* modpost only works for relocatable objects */
+ if (hdr->e_type != ET_REL)
+ fatal("%s: not relocatable object.", filename);
+
/* Check if file offset is correct */
if (hdr->e_shoff > info->size) {
fatal("section header offset=%lu in file '%s' is bigger than filesize=%zu\n",
@@ -737,12 +746,18 @@ static bool match(const char *string, const char *const patterns[])
return false;
}
+/* useful to pass patterns to match() directly */
+#define PATTERNS(...) \
+ ({ \
+ static const char *const patterns[] = {__VA_ARGS__, NULL}; \
+ patterns; \
+ })
+
/* sections that we do not want to do full section mismatch check on */
static const char *const section_white_list[] =
{
".comment*",
".debug*",
- ".cranges", /* sh64 */
".zdebug*", /* Compressed debug sections. */
".GCC.command.line", /* record-gcc-switches */
".mdebug*", /* alpha, score, mips etc. */
@@ -830,28 +845,12 @@ static const char *const init_data_sections[] =
/* all init sections */
static const char *const init_sections[] = { ALL_INIT_SECTIONS, NULL };
-/* All init and exit sections (code + data) */
-static const char *const init_exit_sections[] =
- {ALL_INIT_SECTIONS, ALL_EXIT_SECTIONS, NULL };
-
/* all text sections */
static const char *const text_sections[] = { ALL_TEXT_SECTIONS, NULL };
/* data section */
static const char *const data_sections[] = { DATA_SECTIONS, NULL };
-
-/* symbols in .data that may refer to init/exit sections */
-#define DEFAULT_SYMBOL_WHITE_LIST \
- "*driver", \
- "*_template", /* scsi uses *_template a lot */ \
- "*_timer", /* arm uses ops structures named _timer a lot */ \
- "*_sht", /* scsi also used *_sht to some extent */ \
- "*_ops", \
- "*_probe", \
- "*_probe_one", \
- "*_console"
-
static const char *const head_sections[] = { ".head.text*", NULL };
static const char *const linker_symbols[] =
{ "__init_begin", "_sinittext", "_einittext", NULL };
@@ -883,9 +882,6 @@ enum mismatch {
*
* @mismatch: Type of mismatch.
*
- * @symbol_white_list: Do not match a relocation to a symbol in this list
- * even if it is targeting a section in @bad_to_sec.
- *
* @handler: Specific handler to call when a match is found. If NULL,
* default_mismatch_handler() will be called.
*
@@ -895,7 +891,6 @@ struct sectioncheck {
const char *bad_tosec[20];
const char *good_tosec[20];
enum mismatch mismatch;
- const char *symbol_white_list[20];
void (*handler)(const char *modname, struct elf_info *elf,
const struct sectioncheck* const mismatch,
Elf_Rela *r, Elf_Sym *sym, const char *fromsec);
@@ -915,75 +910,61 @@ static const struct sectioncheck sectioncheck[] = {
.fromsec = { TEXT_SECTIONS, NULL },
.bad_tosec = { ALL_INIT_SECTIONS, NULL },
.mismatch = TEXT_TO_ANY_INIT,
- .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { DATA_SECTIONS, NULL },
.bad_tosec = { ALL_XXXINIT_SECTIONS, NULL },
.mismatch = DATA_TO_ANY_INIT,
- .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { DATA_SECTIONS, NULL },
.bad_tosec = { INIT_SECTIONS, NULL },
.mismatch = DATA_TO_ANY_INIT,
- .symbol_white_list = {
- "*_template", "*_timer", "*_sht", "*_ops",
- "*_probe", "*_probe_one", "*_console", NULL
- },
},
{
.fromsec = { TEXT_SECTIONS, NULL },
.bad_tosec = { ALL_EXIT_SECTIONS, NULL },
.mismatch = TEXT_TO_ANY_EXIT,
- .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { DATA_SECTIONS, NULL },
.bad_tosec = { ALL_EXIT_SECTIONS, NULL },
.mismatch = DATA_TO_ANY_EXIT,
- .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not reference init code/data from meminit code/data */
{
.fromsec = { ALL_XXXINIT_SECTIONS, NULL },
.bad_tosec = { INIT_SECTIONS, NULL },
.mismatch = XXXINIT_TO_SOME_INIT,
- .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not reference exit code/data from memexit code/data */
{
.fromsec = { ALL_XXXEXIT_SECTIONS, NULL },
.bad_tosec = { EXIT_SECTIONS, NULL },
.mismatch = XXXEXIT_TO_SOME_EXIT,
- .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not use exit code/data from init code */
{
.fromsec = { ALL_INIT_SECTIONS, NULL },
.bad_tosec = { ALL_EXIT_SECTIONS, NULL },
.mismatch = ANY_INIT_TO_ANY_EXIT,
- .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
/* Do not use init code/data from exit code */
{
.fromsec = { ALL_EXIT_SECTIONS, NULL },
.bad_tosec = { ALL_INIT_SECTIONS, NULL },
.mismatch = ANY_EXIT_TO_ANY_INIT,
- .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { ALL_PCI_INIT_SECTIONS, NULL },
.bad_tosec = { INIT_SECTIONS, NULL },
.mismatch = ANY_INIT_TO_ANY_EXIT,
- .symbol_white_list = { NULL },
},
/* Do not export init/exit functions or data */
{
.fromsec = { "___ksymtab*", NULL },
.bad_tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
.mismatch = EXPORT_TO_INIT_EXIT,
- .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
{
.fromsec = { "__ex_table", NULL },
@@ -1044,15 +1025,6 @@ static const struct sectioncheck *section_mismatch(
* fromsec = .data*
* atsym = __param_ops_*
*
- * Pattern 2:
- * Many drivers utilise a *driver container with references to
- * add, remove, probe functions etc.
- * the pattern is identified by:
- * tosec = init or exit section
- * fromsec = data section
- * atsym = *driver, *_template, *_sht, *_ops, *_probe,
- * *probe_one, *_console, *_timer
- *
* Pattern 3:
* Whitelist all references from .head.text to any init section
*
@@ -1101,10 +1073,22 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
strstarts(fromsym, "__param_ops_"))
return 0;
- /* Check for pattern 2 */
- if (match(tosec, init_exit_sections) &&
- match(fromsec, data_sections) &&
- match(fromsym, mismatch->symbol_white_list))
+ /* symbols in data sections that may refer to any init/exit sections */
+ if (match(fromsec, PATTERNS(DATA_SECTIONS)) &&
+ match(tosec, PATTERNS(ALL_INIT_SECTIONS, ALL_EXIT_SECTIONS)) &&
+ match(fromsym, PATTERNS("*_template", // scsi uses *_template a lot
+ "*_timer", // arm uses ops structures named _timer a lot
+ "*_sht", // scsi also used *_sht to some extent
+ "*_ops",
+ "*_probe",
+ "*_probe_one",
+ "*_console")))
+ return 0;
+
+ /* symbols in data sections that may refer to meminit/exit sections */
+ if (match(fromsec, PATTERNS(DATA_SECTIONS)) &&
+ match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS, ALL_EXIT_SECTIONS)) &&
+ match(fromsym, PATTERNS("*driver")))
return 0;
/* Check for pattern 3 */
@@ -1230,42 +1214,6 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
return near;
}
-/*
- * Convert a section name to the function/data attribute
- * .init.text => __init
- * .memexitconst => __memconst
- * etc.
- *
- * The memory of returned value has been allocated on a heap. The user of this
- * method should free it after usage.
-*/
-static char *sec2annotation(const char *s)
-{
- if (match(s, init_exit_sections)) {
- char *p = NOFAIL(malloc(20));
- char *r = p;
-
- *p++ = '_';
- *p++ = '_';
- if (*s == '.')
- s++;
- while (*s && *s != '.')
- *p++ = *s++;
- *p = '\0';
- if (*s == '.')
- s++;
- if (strstr(s, "rodata") != NULL)
- strcat(p, "const ");
- else if (strstr(s, "data") != NULL)
- strcat(p, "data ");
- else
- strcat(p, " ");
- return r;
- } else {
- return NOFAIL(strdup(""));
- }
-}
-
static int is_function(Elf_Sym *sym)
{
if (sym)
@@ -1274,19 +1222,6 @@ static int is_function(Elf_Sym *sym)
return -1;
}
-static void print_section_list(const char * const list[20])
-{
- const char *const *s = list;
-
- while (*s) {
- fprintf(stderr, "%s", *s);
- s++;
- if (*s)
- fprintf(stderr, ", ");
- }
- fprintf(stderr, "\n");
-}
-
static inline void get_pretty_name(int is_func, const char** name, const char** name_p)
{
switch (is_func) {
@@ -1304,141 +1239,31 @@ static inline void get_pretty_name(int is_func, const char** name, const char**
static void report_sec_mismatch(const char *modname,
const struct sectioncheck *mismatch,
const char *fromsec,
- unsigned long long fromaddr,
const char *fromsym,
- int from_is_func,
- const char *tosec, const char *tosym,
- int to_is_func)
+ const char *tosec, const char *tosym)
{
- const char *from, *from_p;
- const char *to, *to_p;
- char *prl_from;
- char *prl_to;
-
sec_mismatch_count++;
- get_pretty_name(from_is_func, &from, &from_p);
- get_pretty_name(to_is_func, &to, &to_p);
-
- warn("%s(%s+0x%llx): Section mismatch in reference from the %s %s%s "
- "to the %s %s:%s%s\n",
- modname, fromsec, fromaddr, from, fromsym, from_p, to, tosec,
- tosym, to_p);
-
switch (mismatch->mismatch) {
case TEXT_TO_ANY_INIT:
- prl_from = sec2annotation(fromsec);
- prl_to = sec2annotation(tosec);
- fprintf(stderr,
- "The function %s%s() references\n"
- "the %s %s%s%s.\n"
- "This is often because %s lacks a %s\n"
- "annotation or the annotation of %s is wrong.\n",
- prl_from, fromsym,
- to, prl_to, tosym, to_p,
- fromsym, prl_to, tosym);
- free(prl_from);
- free(prl_to);
- break;
- case DATA_TO_ANY_INIT: {
- prl_to = sec2annotation(tosec);
- fprintf(stderr,
- "The variable %s references\n"
- "the %s %s%s%s\n"
- "If the reference is valid then annotate the\n"
- "variable with __init* or __refdata (see linux/init.h) "
- "or name the variable:\n",
- fromsym, to, prl_to, tosym, to_p);
- print_section_list(mismatch->symbol_white_list);
- free(prl_to);
- break;
- }
+ case DATA_TO_ANY_INIT:
case TEXT_TO_ANY_EXIT:
- prl_to = sec2annotation(tosec);
- fprintf(stderr,
- "The function %s() references a %s in an exit section.\n"
- "Often the %s %s%s has valid usage outside the exit section\n"
- "and the fix is to remove the %sannotation of %s.\n",
- fromsym, to, to, tosym, to_p, prl_to, tosym);
- free(prl_to);
- break;
- case DATA_TO_ANY_EXIT: {
- prl_to = sec2annotation(tosec);
- fprintf(stderr,
- "The variable %s references\n"
- "the %s %s%s%s\n"
- "If the reference is valid then annotate the\n"
- "variable with __exit* (see linux/init.h) or "
- "name the variable:\n",
- fromsym, to, prl_to, tosym, to_p);
- print_section_list(mismatch->symbol_white_list);
- free(prl_to);
- break;
- }
+ case DATA_TO_ANY_EXIT:
case XXXINIT_TO_SOME_INIT:
case XXXEXIT_TO_SOME_EXIT:
- prl_from = sec2annotation(fromsec);
- prl_to = sec2annotation(tosec);
- fprintf(stderr,
- "The %s %s%s%s references\n"
- "a %s %s%s%s.\n"
- "If %s is only used by %s then\n"
- "annotate %s with a matching annotation.\n",
- from, prl_from, fromsym, from_p,
- to, prl_to, tosym, to_p,
- tosym, fromsym, tosym);
- free(prl_from);
- free(prl_to);
- break;
case ANY_INIT_TO_ANY_EXIT:
- prl_from = sec2annotation(fromsec);
- prl_to = sec2annotation(tosec);
- fprintf(stderr,
- "The %s %s%s%s references\n"
- "a %s %s%s%s.\n"
- "This is often seen when error handling "
- "in the init function\n"
- "uses functionality in the exit path.\n"
- "The fix is often to remove the %sannotation of\n"
- "%s%s so it may be used outside an exit section.\n",
- from, prl_from, fromsym, from_p,
- to, prl_to, tosym, to_p,
- prl_to, tosym, to_p);
- free(prl_from);
- free(prl_to);
- break;
case ANY_EXIT_TO_ANY_INIT:
- prl_from = sec2annotation(fromsec);
- prl_to = sec2annotation(tosec);
- fprintf(stderr,
- "The %s %s%s%s references\n"
- "a %s %s%s%s.\n"
- "This is often seen when error handling "
- "in the exit function\n"
- "uses functionality in the init path.\n"
- "The fix is often to remove the %sannotation of\n"
- "%s%s so it may be used outside an init section.\n",
- from, prl_from, fromsym, from_p,
- to, prl_to, tosym, to_p,
- prl_to, tosym, to_p);
- free(prl_from);
- free(prl_to);
+ warn("%s: section mismatch in reference: %s (section: %s) -> %s (section: %s)\n",
+ modname, fromsym, fromsec, tosym, tosec);
break;
case EXPORT_TO_INIT_EXIT:
- prl_to = sec2annotation(tosec);
- fprintf(stderr,
- "The symbol %s is exported and annotated %s\n"
- "Fix this by removing the %sannotation of %s "
- "or drop the export.\n",
- tosym, prl_to, prl_to, tosym);
- free(prl_to);
+ warn("%s: EXPORT_SYMBOL used for init/exit symbol: %s (section: %s)\n",
+ modname, tosym, tosec);
break;
case EXTABLE_TO_NON_TEXT:
- fatal("There's a special handler for this mismatch type, "
- "we should never get here.");
+ fatal("There's a special handler for this mismatch type, we should never get here.\n");
break;
}
- fprintf(stderr, "\n");
}
static void default_mismatch_handler(const char *modname, struct elf_info *elf,
@@ -1454,9 +1279,6 @@ static void default_mismatch_handler(const char *modname, struct elf_info *elf,
from = find_elf_symbol2(elf, r->r_offset, fromsec);
fromsym = sym_name(elf, from);
- if (strstarts(fromsym, "reference___initcall"))
- return;
-
tosec = sec_name(elf, get_secindex(elf, sym));
to = find_elf_symbol(elf, r->r_addend, sym);
tosym = sym_name(elf, to);
@@ -1465,9 +1287,7 @@ static void default_mismatch_handler(const char *modname, struct elf_info *elf,
if (secref_whitelist(mismatch,
fromsec, fromsym, tosec, tosym)) {
report_sec_mismatch(modname, mismatch,
- fromsec, r->r_offset, fromsym,
- is_function(from), tosec, tosym,
- is_function(to));
+ fromsec, fromsym, tosec, tosym);
}
}
@@ -1623,9 +1443,6 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
break;
case R_386_PC32:
r->r_addend = TO_NATIVE(*location) + 4;
- /* For CONFIG_RELOCATABLE=y */
- if (elf->hdr->e_type == ET_EXEC)
- r->r_addend += r->r_offset;
break;
}
return 0;
@@ -1718,8 +1535,7 @@ static void section_rela(const char *modname, struct elf_info *elf,
Elf_Rela *start = (void *)elf->hdr + sechdr->sh_offset;
Elf_Rela *stop = (void *)start + sechdr->sh_size;
- fromsec = sech_name(elf, sechdr);
- fromsec += strlen(".rela");
+ fromsec = sec_name(elf, sechdr->sh_info);
/* if from section (name) is know good then skip it */
if (match(fromsec, section_white_list))
return;
@@ -1771,8 +1587,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
Elf_Rel *start = (void *)elf->hdr + sechdr->sh_offset;
Elf_Rel *stop = (void *)start + sechdr->sh_size;
- fromsec = sech_name(elf, sechdr);
- fromsec += strlen(".rel");
+ fromsec = sec_name(elf, sechdr->sh_info);
/* if from section (name) is know good then skip it */
if (match(fromsec, section_white_list))
return;
@@ -2206,13 +2021,11 @@ static void add_exported_symbols(struct buffer *buf, struct module *mod)
/* record CRCs for exported symbols */
buf_printf(buf, "\n");
list_for_each_entry(sym, &mod->exported_symbols, list) {
- if (!sym->crc_valid) {
+ if (!sym->crc_valid)
warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n"
"Is \"%s\" prototyped in <asm/asm-prototypes.h>?\n",
sym->name, mod->name, mod->is_vmlinux ? "" : ".ko",
sym->name);
- continue;
- }
buf_printf(buf, "SYMBOL_CRC(%s, 0x%08x, \"%s\");\n",
sym->name, sym->crc, sym->is_gpl_only ? "_gpl" : "");
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 044bdfb894b7..1178f40a73f3 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -26,7 +26,6 @@
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Addr Elf32_Addr
-#define Elf_Sword Elf64_Sword
#define Elf_Section Elf32_Half
#define ELF_ST_BIND ELF32_ST_BIND
#define ELF_ST_TYPE ELF32_ST_TYPE
@@ -41,7 +40,6 @@
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Addr Elf64_Addr
-#define Elf_Sword Elf64_Sxword
#define Elf_Section Elf64_Half
#define ELF_ST_BIND ELF64_ST_BIND
#define ELF_ST_TYPE ELF64_ST_TYPE
@@ -158,22 +156,28 @@ static inline int is_shndx_special(unsigned int i)
return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
}
-/*
- * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
- * the way to -256..-1, to avoid conflicting with real section
- * indices.
- */
-#define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
-
/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
static inline unsigned int get_secindex(const struct elf_info *info,
const Elf_Sym *sym)
{
- if (is_shndx_special(sym->st_shndx))
- return SPECIAL(sym->st_shndx);
- if (sym->st_shndx != SHN_XINDEX)
- return sym->st_shndx;
- return info->symtab_shndx_start[sym - info->symtab_start];
+ unsigned int index = sym->st_shndx;
+
+ /*
+ * Elf{32,64}_Sym::st_shndx is 2 byte. Big section numbers are available
+ * in the .symtab_shndx section.
+ */
+ if (index == SHN_XINDEX)
+ return info->symtab_shndx_start[sym - info->symtab_start];
+
+ /*
+ * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
+ * the way to UINT_MAX-255..UINT_MAX, to avoid conflicting with real
+ * section indices.
+ */
+ if (index >= SHN_LORESERVE && index <= SHN_HIRESERVE)
+ return index - SHN_HIRESERVE - 1;
+
+ return index;
}
/* file2alias.c */
@@ -187,6 +191,7 @@ void get_src_version(const char *modname, char sum[], unsigned sumlen);
/* from modpost.c */
char *read_text_file(const char *filename);
char *get_line(char **stringp);
+void *sym_get_data(const struct elf_info *info, const Elf_Sym *sym);
enum loglevel {
LOG_WARN,
diff --git a/scripts/module.lds.S b/scripts/module.lds.S
index 1d0e1e4dc3d2..3a3aa2354ed8 100644
--- a/scripts/module.lds.S
+++ b/scripts/module.lds.S
@@ -27,6 +27,8 @@ SECTIONS {
.ctors 0 : ALIGN(8) { *(SORT(.ctors.*)) *(.ctors) }
.init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) }
+ .altinstructions 0 : ALIGN(8) { KEEP(*(.altinstructions)) }
+ __bug_table 0 : ALIGN(8) { KEEP(*(__bug_table)) }
__jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
__patchable_function_entries : { *(__patchable_function_entries) }
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 7c477ca7dc98..8fa7c5b8a1a1 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -49,6 +49,9 @@ sed -e '/^DEL/d' -e 's/^\t*//' <<EOF
URL: https://www.kernel.org
$S Source: kernel-$__KERNELRELEASE.tar.gz
Provides: $PROVIDES
+ # $UTS_MACHINE as a fallback of _arch in case
+ # /usr/lib/rpm/platform/*/macros was not included.
+ %define _arch %{?_arch:$UTS_MACHINE}
%define __spec_install_post /usr/lib/rpm/brp-compress || :
%define debug_package %{nil}
diff --git a/scripts/remove-stale-files b/scripts/remove-stale-files
index 379e86c71bed..ccadfa3afb2b 100755
--- a/scripts/remove-stale-files
+++ b/scripts/remove-stale-files
@@ -20,6 +20,8 @@ set -e
# yard. Stale files stay in this file for a while (for some release cycles?),
# then will be really dead and removed from the code base entirely.
+rm -f arch/powerpc/purgatory/kexec-purgatory.c
+
# These were previously generated source files. When you are building the kernel
# with O=, make sure to remove the stale files in the output tree. Otherwise,
# the build system wrongly compiles the stale ones.
@@ -40,6 +42,8 @@ if [ -n "${building_out_of_srctree}" ]; then
done
fi
+rm -f arch/riscv/purgatory/kexec-purgatory.c
+
rm -f scripts/extract-cert
rm -f arch/x86/purgatory/kexec-purgatory.c
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index 7434e9ea926e..598ef5465f82 100644
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -114,7 +114,7 @@ static void drain_openssl_errors(void)
bool __cond = (cond); \
display_openssl_errors(__LINE__); \
if (__cond) { \
- err(1, fmt, ## __VA_ARGS__); \
+ errx(1, fmt, ## __VA_ARGS__); \
} \
} while(0)
diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py
index 7011fbe003ff..438516bdfb3c 100755
--- a/scripts/tracing/draw_functrace.py
+++ b/scripts/tracing/draw_functrace.py
@@ -8,7 +8,7 @@ This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
-calls. Only the functions's names and the the call time are provided.
+calls. Only the functions's names and the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
index 348ed6cfa08a..cb3496e00d8a 100644
--- a/security/apparmor/Kconfig
+++ b/security/apparmor/Kconfig
@@ -6,8 +6,6 @@ config SECURITY_APPARMOR
select SECURITY_PATH
select SECURITYFS
select SECURITY_NETWORK
- select ZLIB_INFLATE
- select ZLIB_DEFLATE
default n
help
This enables the AppArmor security module.
@@ -17,29 +15,6 @@ config SECURITY_APPARMOR
If you are unsure how to answer this question, answer N.
-config SECURITY_APPARMOR_HASH
- bool "Enable introspection of sha1 hashes for loaded profiles"
- depends on SECURITY_APPARMOR
- select CRYPTO
- select CRYPTO_SHA1
- default y
- help
- This option selects whether introspection of loaded policy
- is available to userspace via the apparmor filesystem.
-
-config SECURITY_APPARMOR_HASH_DEFAULT
- bool "Enable policy hash introspection by default"
- depends on SECURITY_APPARMOR_HASH
- default y
- help
- This option selects whether sha1 hashing of loaded policy
- is enabled by default. The generation of sha1 hashes for
- loaded policy provide system administrators a quick way
- to verify that policy in the kernel matches what is expected,
- however it can slow down policy load on some devices. In
- these cases policy hashing can be disabled by default and
- enabled only if needed.
-
config SECURITY_APPARMOR_DEBUG
bool "Build AppArmor with debug code"
depends on SECURITY_APPARMOR
@@ -69,6 +44,67 @@ config SECURITY_APPARMOR_DEBUG_MESSAGES
When enabled, various debug messages will be logged to
the kernel message buffer.
+config SECURITY_APPARMOR_INTROSPECT_POLICY
+ bool "Allow loaded policy to be introspected"
+ depends on SECURITY_APPARMOR
+ default y
+ help
+ This option selects whether introspection of loaded policy
+ is available to userspace via the apparmor filesystem. This
+ adds to kernel memory usage. It is required for introspection
+ of loaded policy, and check point and restore support. It
+ can be disabled for embedded systems where reducing memory and
+ cpu is paramount.
+
+config SECURITY_APPARMOR_HASH
+ bool "Enable introspection of sha1 hashes for loaded profiles"
+ depends on SECURITY_APPARMOR_INTROSPECT_POLICY
+ select CRYPTO
+ select CRYPTO_SHA1
+ default y
+ help
+ This option selects whether introspection of loaded policy
+ hashes is available to userspace via the apparmor
+ filesystem. This option provides a light weight means of
+ checking loaded policy. This option adds to policy load
+ time and can be disabled for small embedded systems.
+
+config SECURITY_APPARMOR_HASH_DEFAULT
+ bool "Enable policy hash introspection by default"
+ depends on SECURITY_APPARMOR_HASH
+ default y
+ help
+ This option selects whether sha1 hashing of loaded policy
+ is enabled by default. The generation of sha1 hashes for
+ loaded policy provide system administrators a quick way
+ to verify that policy in the kernel matches what is expected,
+ however it can slow down policy load on some devices. In
+ these cases policy hashing can be disabled by default and
+ enabled only if needed.
+
+config SECURITY_APPARMOR_EXPORT_BINARY
+ bool "Allow exporting the raw binary policy"
+ depends on SECURITY_APPARMOR_INTROSPECT_POLICY
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
+ default y
+ help
+ This option allows reading back binary policy as it was loaded.
+ It increases the amount of kernel memory needed by policy and
+ also increases policy load time. This option is required for
+ checkpoint and restore support, and debugging of loaded policy.
+
+config SECURITY_APPARMOR_PARANOID_LOAD
+ bool "Perform full verification of loaded policy"
+ depends on SECURITY_APPARMOR
+ default y
+ help
+ This options allows controlling whether apparmor does a full
+ verification of loaded policy. This should not be disabled
+ except for embedded systems where the image is read only,
+ includes policy, and has some form of integrity check.
+ Disabling the check will speed up policy loads.
+
config SECURITY_APPARMOR_KUNIT_TEST
bool "Build KUnit tests for policy_unpack.c" if !KUNIT_ALL_TESTS
depends on KUNIT=y && SECURITY_APPARMOR
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 0797edb2fb3d..d066ccc219e2 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -36,6 +36,7 @@
#include "include/policy_ns.h"
#include "include/resource.h"
#include "include/policy_unpack.h"
+#include "include/task.h"
/*
* The apparmor filesystem interface used for policy load and introspection
@@ -70,6 +71,7 @@ struct rawdata_f_data {
struct aa_loaddata *loaddata;
};
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
#define RAWDATA_F_DATA_BUF(p) (char *)(p + 1)
static void rawdata_f_data_free(struct rawdata_f_data *private)
@@ -94,9 +96,10 @@ static struct rawdata_f_data *rawdata_f_data_alloc(size_t size)
return ret;
}
+#endif
/**
- * aa_mangle_name - mangle a profile name to std profile layout form
+ * mangle_name - mangle a profile name to std profile layout form
* @name: profile name to mangle (NOT NULL)
* @target: buffer to store mangled name, same length as @name (MAYBE NULL)
*
@@ -401,7 +404,7 @@ static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf,
data->size = copy_size;
if (copy_from_user(data->data, userbuf, copy_size)) {
- kvfree(data);
+ aa_put_loaddata(data);
return ERR_PTR(-EFAULT);
}
@@ -1201,7 +1204,7 @@ SEQ_NS_FOPS(name);
/* policy/raw_data/ * file ops */
-
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
#define SEQ_RAWDATA_FOPS(NAME) \
static int seq_rawdata_ ##NAME ##_open(struct inode *inode, struct file *file)\
{ \
@@ -1294,44 +1297,47 @@ SEQ_RAWDATA_FOPS(compressed_size);
static int deflate_decompress(char *src, size_t slen, char *dst, size_t dlen)
{
- int error;
- struct z_stream_s strm;
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+ if (aa_g_rawdata_compression_level != 0) {
+ int error = 0;
+ struct z_stream_s strm;
- if (aa_g_rawdata_compression_level == 0) {
- if (dlen < slen)
- return -EINVAL;
- memcpy(dst, src, slen);
- return 0;
- }
+ memset(&strm, 0, sizeof(strm));
- memset(&strm, 0, sizeof(strm));
+ strm.workspace = kvzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+ if (!strm.workspace)
+ return -ENOMEM;
- strm.workspace = kvzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
- if (!strm.workspace)
- return -ENOMEM;
-
- strm.next_in = src;
- strm.avail_in = slen;
+ strm.next_in = src;
+ strm.avail_in = slen;
- error = zlib_inflateInit(&strm);
- if (error != Z_OK) {
- error = -ENOMEM;
- goto fail_inflate_init;
- }
+ error = zlib_inflateInit(&strm);
+ if (error != Z_OK) {
+ error = -ENOMEM;
+ goto fail_inflate_init;
+ }
- strm.next_out = dst;
- strm.avail_out = dlen;
+ strm.next_out = dst;
+ strm.avail_out = dlen;
- error = zlib_inflate(&strm, Z_FINISH);
- if (error != Z_STREAM_END)
- error = -EINVAL;
- else
- error = 0;
+ error = zlib_inflate(&strm, Z_FINISH);
+ if (error != Z_STREAM_END)
+ error = -EINVAL;
+ else
+ error = 0;
- zlib_inflateEnd(&strm);
+ zlib_inflateEnd(&strm);
fail_inflate_init:
- kvfree(strm.workspace);
- return error;
+ kvfree(strm.workspace);
+
+ return error;
+ }
+#endif
+
+ if (dlen < slen)
+ return -EINVAL;
+ memcpy(dst, src, slen);
+ return 0;
}
static ssize_t rawdata_read(struct file *file, char __user *buf, size_t size,
@@ -1492,10 +1498,12 @@ fail:
return PTR_ERR(dent);
}
+#endif /* CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
+
/** fns to setup dynamic per profile/namespace files **/
-/**
+/*
*
* Requires: @profile->ns->lock held
*/
@@ -1522,7 +1530,7 @@ void __aafs_profile_rmdir(struct aa_profile *profile)
}
}
-/**
+/*
*
* Requires: @old->ns->lock held
*/
@@ -1557,6 +1565,7 @@ static struct dentry *create_profile_file(struct dentry *dir, const char *name,
return dent;
}
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
static int profile_depth(struct aa_profile *profile)
{
int depth = 0;
@@ -1658,7 +1667,7 @@ static const struct inode_operations rawdata_link_abi_iops = {
static const struct inode_operations rawdata_link_data_iops = {
.get_link = rawdata_get_link_data,
};
-
+#endif /* CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
/*
* Requires: @profile->ns->lock held
@@ -1729,15 +1738,17 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
profile->dents[AAFS_PROF_HASH] = dent;
}
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
if (profile->rawdata) {
- dent = aafs_create("raw_sha1", S_IFLNK | 0444, dir,
- profile->label.proxy, NULL, NULL,
- &rawdata_link_sha1_iops);
- if (IS_ERR(dent))
- goto fail;
- aa_get_proxy(profile->label.proxy);
- profile->dents[AAFS_PROF_RAW_HASH] = dent;
-
+ if (aa_g_hash_policy) {
+ dent = aafs_create("raw_sha1", S_IFLNK | 0444, dir,
+ profile->label.proxy, NULL, NULL,
+ &rawdata_link_sha1_iops);
+ if (IS_ERR(dent))
+ goto fail;
+ aa_get_proxy(profile->label.proxy);
+ profile->dents[AAFS_PROF_RAW_HASH] = dent;
+ }
dent = aafs_create("raw_abi", S_IFLNK | 0444, dir,
profile->label.proxy, NULL, NULL,
&rawdata_link_abi_iops);
@@ -1754,6 +1765,7 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
aa_get_proxy(profile->label.proxy);
profile->dents[AAFS_PROF_RAW_DATA] = dent;
}
+#endif /*CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
list_for_each_entry(child, &profile->base.profiles, base.list) {
error = __aafs_profile_mkdir(child, prof_child_dir(profile));
@@ -1880,7 +1892,7 @@ static void __aa_fs_list_remove_rawdata(struct aa_ns *ns)
__aa_fs_remove_rawdata(ent);
}
-/**
+/*
*
* Requires: @ns->lock held
*/
@@ -2323,6 +2335,7 @@ static struct aa_sfs_entry aa_sfs_entry_versions[] = {
AA_SFS_FILE_BOOLEAN("v6", 1),
AA_SFS_FILE_BOOLEAN("v7", 1),
AA_SFS_FILE_BOOLEAN("v8", 1),
+ AA_SFS_FILE_BOOLEAN("v9", 1),
{ }
};
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index f7e97c7e80f3..704b0c895605 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -137,7 +137,7 @@ int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
}
if (AUDIT_MODE(profile) == AUDIT_QUIET ||
(type == AUDIT_APPARMOR_DENIED &&
- AUDIT_MODE(profile) == AUDIT_QUIET))
+ AUDIT_MODE(profile) == AUDIT_QUIET_DENIED))
return aad(sa)->error;
if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index a29e69d2c300..91689d34d281 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -119,7 +119,7 @@ static inline unsigned int match_component(struct aa_profile *profile,
* @profile: profile to find perms for
* @label: label to check access permissions for
* @stack: whether this is a stacking request
- * @start: state to start match in
+ * @state: state to start match in
* @subns: whether to do permission checks on components in a subns
* @request: permissions to request
* @perms: perms struct to set
@@ -466,7 +466,7 @@ restart:
* xattrs, or a longer match
*/
candidate = profile;
- candidate_len = profile->xmatch_len;
+ candidate_len = max(count, profile->xmatch_len);
candidate_xattrs = ret;
conflict = false;
}
@@ -1279,7 +1279,6 @@ static int change_profile_perms_wrapper(const char *op, const char *name,
/**
* aa_change_profile - perform a one-way profile transition
* @fqname: name of profile may include namespace (NOT NULL)
- * @onexec: whether this transition is to take place immediately or at exec
* @flags: flags affecting change behavior
*
* Change to new profile @name. Unlike with hats, there is no way
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index 1fbabdb565a8..9c3fc36a0702 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -36,6 +36,7 @@ extern enum audit_mode aa_g_audit;
extern bool aa_g_audit_header;
extern bool aa_g_debug;
extern bool aa_g_hash_policy;
+extern bool aa_g_export_binary;
extern int aa_g_rawdata_compression_level;
extern bool aa_g_lock_policy;
extern bool aa_g_logsyscall;
diff --git a/security/apparmor/include/apparmorfs.h b/security/apparmor/include/apparmorfs.h
index 6e14f6cecdb9..1e94904f68d9 100644
--- a/security/apparmor/include/apparmorfs.h
+++ b/security/apparmor/include/apparmorfs.h
@@ -114,7 +114,21 @@ int __aafs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name,
struct dentry *dent);
struct aa_loaddata;
+
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
void __aa_fs_remove_rawdata(struct aa_loaddata *rawdata);
int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata);
+#else
+static inline void __aa_fs_remove_rawdata(struct aa_loaddata *rawdata)
+{
+ /* empty stub */
+}
+
+static inline int __aa_fs_create_rawdata(struct aa_ns *ns,
+ struct aa_loaddata *rawdata)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
#endif /* __AA_APPARMORFS_H */
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index 7517605a183d..029cb20e322d 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -142,6 +142,7 @@ static inline u16 dfa_map_xindex(u16 mask)
*/
#define dfa_user_allow(dfa, state) (((ACCEPT_TABLE(dfa)[state]) & 0x7f) | \
((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
+#define dfa_user_xbits(dfa, state) (((ACCEPT_TABLE(dfa)[state]) >> 7) & 0x7f)
#define dfa_user_audit(dfa, state) ((ACCEPT_TABLE2(dfa)[state]) & 0x7f)
#define dfa_user_quiet(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 7) & 0x7f)
#define dfa_user_xindex(dfa, state) \
@@ -150,6 +151,8 @@ static inline u16 dfa_map_xindex(u16 mask)
#define dfa_other_allow(dfa, state) ((((ACCEPT_TABLE(dfa)[state]) >> 14) & \
0x7f) | \
((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
+#define dfa_other_xbits(dfa, state) \
+ ((((ACCEPT_TABLE(dfa)[state]) >> 7) >> 14) & 0x7f)
#define dfa_other_audit(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 14) & 0x7f)
#define dfa_other_quiet(dfa, state) \
((((ACCEPT_TABLE2(dfa)[state]) >> 7) >> 14) & 0x7f)
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
index 9cafd80f7731..a1ac6ffb95e9 100644
--- a/security/apparmor/include/ipc.h
+++ b/security/apparmor/include/ipc.h
@@ -13,24 +13,6 @@
#include <linux/sched.h>
-struct aa_profile;
-
-#define AA_PTRACE_TRACE MAY_WRITE
-#define AA_PTRACE_READ MAY_READ
-#define AA_MAY_BE_TRACED AA_MAY_APPEND
-#define AA_MAY_BE_READ AA_MAY_CREATE
-#define PTRACE_PERM_SHIFT 2
-
-#define AA_PTRACE_PERM_MASK (AA_PTRACE_READ | AA_PTRACE_TRACE | \
- AA_MAY_BE_READ | AA_MAY_BE_TRACED)
-#define AA_SIGNAL_PERM_MASK (MAY_READ | MAY_WRITE)
-
-#define AA_SFS_SIG_MASK "hup int quit ill trap abrt bus fpe kill usr1 " \
- "segv usr2 pipe alrm term stkflt chld cont stop stp ttin ttou urg " \
- "xcpu xfsz vtalrm prof winch io pwr sys emt lost"
-
-int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
- u32 request);
int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig);
#endif /* __AA_IPC_H */
diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h
index 9101c2c76d9e..860484c6f99a 100644
--- a/security/apparmor/include/label.h
+++ b/security/apparmor/include/label.h
@@ -92,6 +92,8 @@ enum label_flags {
FLAG_STALE = 0x800, /* replaced/removed */
FLAG_RENAMED = 0x1000, /* label has renaming in it */
FLAG_REVOKED = 0x2000, /* label has revocation in it */
+ FLAG_DEBUG1 = 0x4000,
+ FLAG_DEBUG2 = 0x8000,
/* These flags must correspond with PATH_flags */
/* TODO: add new path flags */
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index e2e8df0c6f1c..f42359f58eb5 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -22,6 +22,11 @@
*/
#define DEBUG_ON (aa_g_debug)
+/*
+ * split individual debug cases out in preparation for finer grained
+ * debug controls in the future.
+ */
+#define AA_DEBUG_LABEL DEBUG_ON
#define dbg_printk(__fmt, __args...) pr_debug(__fmt, ##__args)
#define AA_DEBUG(fmt, args...) \
do { \
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
index 44a7945fbe3c..343189903dba 100644
--- a/security/apparmor/include/path.h
+++ b/security/apparmor/include/path.h
@@ -17,8 +17,8 @@ enum path_flags {
PATH_CHROOT_REL = 0x8, /* do path lookup relative to chroot */
PATH_CHROOT_NSCONNECT = 0x10, /* connect paths that are at ns root */
- PATH_DELEGATE_DELETED = 0x08000, /* delegate deleted files */
- PATH_MEDIATE_DELETED = 0x10000, /* mediate deleted paths */
+ PATH_DELEGATE_DELETED = 0x10000, /* delegate deleted files */
+ PATH_MEDIATE_DELETED = 0x20000, /* mediate deleted paths */
};
int aa_path_name(const struct path *path, int flags, char *buffer,
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index cb5ef21991b7..639b5b248e63 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -48,6 +48,10 @@ extern const char *const aa_profile_mode_names[];
#define PROFILE_IS_HAT(_profile) ((_profile)->label.flags & FLAG_HAT)
+#define CHECK_DEBUG1(_profile) ((_profile)->label.flags & FLAG_DEBUG1)
+
+#define CHECK_DEBUG2(_profile) ((_profile)->label.flags & FLAG_DEBUG2)
+
#define profile_is_stale(_profile) (label_is_stale(&(_profile)->label))
#define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2)
@@ -135,7 +139,7 @@ struct aa_profile {
const char *attach;
struct aa_dfa *xmatch;
- int xmatch_len;
+ unsigned int xmatch_len;
enum audit_mode audit;
long mode;
u32 path_flags;
diff --git a/security/apparmor/include/policy_ns.h b/security/apparmor/include/policy_ns.h
index 3df6f804922d..33d665516fc1 100644
--- a/security/apparmor/include/policy_ns.h
+++ b/security/apparmor/include/policy_ns.h
@@ -74,6 +74,7 @@ struct aa_ns {
struct dentry *dents[AAFS_NS_SIZEOF];
};
+extern struct aa_label *kernel_t;
extern struct aa_ns *root_ns;
extern const char *aa_hidden_ns_name;
diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
index e0e1ca7ebc38..eb5f7d7f132b 100644
--- a/security/apparmor/include/policy_unpack.h
+++ b/security/apparmor/include/policy_unpack.h
@@ -28,6 +28,8 @@ void aa_load_ent_free(struct aa_load_ent *ent);
struct aa_load_ent *aa_load_ent_alloc(void);
#define PACKED_FLAG_HAT 1
+#define PACKED_FLAG_DEBUG1 2
+#define PACKED_FLAG_DEBUG2 4
#define PACKED_MODE_ENFORCE 0
#define PACKED_MODE_COMPLAIN 1
diff --git a/security/apparmor/include/secid.h b/security/apparmor/include/secid.h
index 48ff1ddecad5..a912a5d5d04f 100644
--- a/security/apparmor/include/secid.h
+++ b/security/apparmor/include/secid.h
@@ -21,6 +21,9 @@ struct aa_label;
/* secid value that matches any other secid */
#define AA_SECID_WILDCARD 1
+/* sysctl to enable displaying mode when converting secid to secctx */
+extern int apparmor_display_secid_mode;
+
struct aa_label *aa_secid_to_label(u32 secid);
int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
@@ -31,6 +34,4 @@ int aa_alloc_secid(struct aa_label *label, gfp_t gfp);
void aa_free_secid(u32 secid);
void aa_secid_update(u32 secid, struct aa_label *label);
-void aa_secids_init(void);
-
#endif /* __AA_SECID_H */
diff --git a/security/apparmor/include/task.h b/security/apparmor/include/task.h
index f13d12373b25..13437d62c70f 100644
--- a/security/apparmor/include/task.h
+++ b/security/apparmor/include/task.h
@@ -77,4 +77,22 @@ static inline void aa_clear_task_ctx_trans(struct aa_task_ctx *ctx)
ctx->token = 0;
}
+#define AA_PTRACE_TRACE MAY_WRITE
+#define AA_PTRACE_READ MAY_READ
+#define AA_MAY_BE_TRACED AA_MAY_APPEND
+#define AA_MAY_BE_READ AA_MAY_CREATE
+#define PTRACE_PERM_SHIFT 2
+
+#define AA_PTRACE_PERM_MASK (AA_PTRACE_READ | AA_PTRACE_TRACE | \
+ AA_MAY_BE_READ | AA_MAY_BE_TRACED)
+#define AA_SIGNAL_PERM_MASK (MAY_READ | MAY_WRITE)
+
+#define AA_SFS_SIG_MASK "hup int quit ill trap abrt bus fpe kill usr1 " \
+ "segv usr2 pipe alrm term stkflt chld cont stop stp ttin ttou urg " \
+ "xcpu xfsz vtalrm prof winch io pwr sys emt lost"
+
+int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
+ u32 request);
+
+
#endif /* __AA_TASK_H */
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index fe36d112aad9..3dbbc59d440d 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -9,7 +9,6 @@
*/
#include <linux/gfp.h>
-#include <linux/ptrace.h>
#include "include/audit.h"
#include "include/capability.h"
@@ -18,115 +17,6 @@
#include "include/ipc.h"
#include "include/sig_names.h"
-/**
- * audit_ptrace_mask - convert mask to permission string
- * @mask: permission mask to convert
- *
- * Returns: pointer to static string
- */
-static const char *audit_ptrace_mask(u32 mask)
-{
- switch (mask) {
- case MAY_READ:
- return "read";
- case MAY_WRITE:
- return "trace";
- case AA_MAY_BE_READ:
- return "readby";
- case AA_MAY_BE_TRACED:
- return "tracedby";
- }
- return "";
-}
-
-/* call back to audit ptrace fields */
-static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
-{
- struct common_audit_data *sa = va;
-
- if (aad(sa)->request & AA_PTRACE_PERM_MASK) {
- audit_log_format(ab, " requested_mask=\"%s\"",
- audit_ptrace_mask(aad(sa)->request));
-
- if (aad(sa)->denied & AA_PTRACE_PERM_MASK) {
- audit_log_format(ab, " denied_mask=\"%s\"",
- audit_ptrace_mask(aad(sa)->denied));
- }
- }
- audit_log_format(ab, " peer=");
- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
- FLAGS_NONE, GFP_ATOMIC);
-}
-
-/* assumes check for PROFILE_MEDIATES is already done */
-/* TODO: conditionals */
-static int profile_ptrace_perm(struct aa_profile *profile,
- struct aa_label *peer, u32 request,
- struct common_audit_data *sa)
-{
- struct aa_perms perms = { };
-
- aad(sa)->peer = peer;
- aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request,
- &perms);
- aa_apply_modes_to_perms(profile, &perms);
- return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
-}
-
-static int profile_tracee_perm(struct aa_profile *tracee,
- struct aa_label *tracer, u32 request,
- struct common_audit_data *sa)
-{
- if (profile_unconfined(tracee) || unconfined(tracer) ||
- !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE))
- return 0;
-
- return profile_ptrace_perm(tracee, tracer, request, sa);
-}
-
-static int profile_tracer_perm(struct aa_profile *tracer,
- struct aa_label *tracee, u32 request,
- struct common_audit_data *sa)
-{
- if (profile_unconfined(tracer))
- return 0;
-
- if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE))
- return profile_ptrace_perm(tracer, tracee, request, sa);
-
- /* profile uses the old style capability check for ptrace */
- if (&tracer->label == tracee)
- return 0;
-
- aad(sa)->label = &tracer->label;
- aad(sa)->peer = tracee;
- aad(sa)->request = 0;
- aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE,
- CAP_OPT_NONE);
-
- return aa_audit(AUDIT_APPARMOR_AUTO, tracer, sa, audit_ptrace_cb);
-}
-
-/**
- * aa_may_ptrace - test if tracer task can trace the tracee
- * @tracer: label of the task doing the tracing (NOT NULL)
- * @tracee: task label to be traced
- * @request: permission request
- *
- * Returns: %0 else error code if permission denied or error
- */
-int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
- u32 request)
-{
- struct aa_profile *profile;
- u32 xrequest = request << PTRACE_PERM_SHIFT;
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
-
- return xcheck_labels(tracer, tracee, profile,
- profile_tracer_perm(profile, tracee, request, &sa),
- profile_tracee_perm(profile, tracer, xrequest, &sa));
-}
-
static inline int map_signal_num(int sig)
{
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 0b0265da1926..0f36ee907438 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -197,18 +197,18 @@ static bool vec_is_stale(struct aa_profile **vec, int n)
return false;
}
-static bool vec_unconfined(struct aa_profile **vec, int n)
+static long union_vec_flags(struct aa_profile **vec, int n, long mask)
{
+ long u = 0;
int i;
AA_BUG(!vec);
for (i = 0; i < n; i++) {
- if (!profile_unconfined(vec[i]))
- return false;
+ u |= vec[i]->label.flags & mask;
}
- return true;
+ return u;
}
static int sort_cmp(const void *a, const void *b)
@@ -485,7 +485,7 @@ int aa_label_next_confined(struct aa_label *label, int i)
}
/**
- * aa_label_next_not_in_set - return the next profile of @sub not in @set
+ * __aa_label_next_not_in_set - return the next profile of @sub not in @set
* @I: label iterator
* @set: label to test against
* @sub: label to if is subset of @set
@@ -1097,8 +1097,8 @@ static struct aa_label *label_merge_insert(struct aa_label *new,
else if (k == b->size)
return aa_get_label(b);
}
- if (vec_unconfined(new->vec, new->size))
- new->flags |= FLAG_UNCONFINED;
+ new->flags |= union_vec_flags(new->vec, new->size, FLAG_UNCONFINED |
+ FLAG_DEBUG1 | FLAG_DEBUG2);
ls = labels_set(new);
write_lock_irqsave(&ls->lock, flags);
label = __label_insert(labels_set(new), new, false);
@@ -1631,9 +1631,9 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
AA_BUG(!str && size != 0);
AA_BUG(!label);
- if (flags & FLAG_ABS_ROOT) {
+ if (AA_DEBUG_LABEL && (flags & FLAG_ABS_ROOT)) {
ns = root_ns;
- len = snprintf(str, size, "=");
+ len = snprintf(str, size, "_");
update_for_len(total, len, size, str);
} else if (!ns) {
ns = labels_ns(label);
@@ -1744,7 +1744,7 @@ void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
if (!use_label_hname(ns, label, flags) ||
display_mode(ns, label, flags)) {
len = aa_label_asxprint(&name, ns, label, flags, gfp);
- if (len == -1) {
+ if (len < 0) {
AA_DEBUG("label print error");
return;
}
@@ -1772,7 +1772,7 @@ void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
int len;
len = aa_label_asxprint(&str, ns, label, flags, gfp);
- if (len == -1) {
+ if (len < 0) {
AA_DEBUG("label print error");
return;
}
@@ -1795,7 +1795,7 @@ void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
int len;
len = aa_label_asxprint(&str, ns, label, flags, gfp);
- if (len == -1) {
+ if (len < 0) {
AA_DEBUG("label print error");
return;
}
@@ -1895,7 +1895,8 @@ struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str,
AA_BUG(!str);
str = skipn_spaces(str, n);
- if (str == NULL || (*str == '=' && base != &root_ns->unconfined->label))
+ if (str == NULL || (AA_DEBUG_LABEL && *str == '_' &&
+ base != &root_ns->unconfined->label))
return ERR_PTR(-EINVAL);
len = label_count_strn_entries(str, end - str);
@@ -2136,7 +2137,7 @@ static void __labelset_update(struct aa_ns *ns)
}
/**
- * __aa_labelset_udate_subtree - update all labels with a stale component
+ * __aa_labelset_update_subtree - update all labels with a stale component
* @ns: ns to start update at (NOT NULL)
*
* Requires: @ns lock be held
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index fa49b81eb54c..1c72a61108d3 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -136,7 +136,7 @@ __counted char *aa_str_alloc(int size, gfp_t gfp)
{
struct counted_str *str;
- str = kmalloc(sizeof(struct counted_str) + size, gfp);
+ str = kmalloc(struct_size(str, name, size), gfp);
if (!str)
return NULL;
@@ -322,22 +322,39 @@ static u32 map_other(u32 x)
((x & 0x60) << 19); /* SETOPT/GETOPT */
}
+static u32 map_xbits(u32 x)
+{
+ return ((x & 0x1) << 7) |
+ ((x & 0x7e) << 9);
+}
+
void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
struct aa_perms *perms)
{
+ /* This mapping is convulated due to history.
+ * v1-v4: only file perms
+ * v5: added policydb which dropped in perm user conditional to
+ * gain new perm bits, but had to map around the xbits because
+ * the userspace compiler was still munging them.
+ * v9: adds using the xbits in policydb because the compiler now
+ * supports treating policydb permission bits different.
+ * Unfortunately there is not way to force auditing on the
+ * perms represented by the xbits
+ */
*perms = (struct aa_perms) {
- .allow = dfa_user_allow(dfa, state),
+ .allow = dfa_user_allow(dfa, state) |
+ map_xbits(dfa_user_xbits(dfa, state)),
.audit = dfa_user_audit(dfa, state),
- .quiet = dfa_user_quiet(dfa, state),
+ .quiet = dfa_user_quiet(dfa, state) |
+ map_xbits(dfa_other_xbits(dfa, state)),
};
- /* for v5 perm mapping in the policydb, the other set is used
+ /* for v5-v9 perm mapping in the policydb, the other set is used
* to extend the general perm set
*/
perms->allow |= map_other(dfa_other_allow(dfa, state));
perms->audit |= map_other(dfa_other_audit(dfa, state));
perms->quiet |= map_other(dfa_other_quiet(dfa, state));
-// perms->xindex = dfa_user_xindex(dfa, state);
}
/**
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 900bc540656a..e29cade7b662 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -832,7 +832,7 @@ static void apparmor_sk_free_security(struct sock *sk)
}
/**
- * apparmor_clone_security - clone the sk_security field
+ * apparmor_sk_clone_security - clone the sk_security field
*/
static void apparmor_sk_clone_security(const struct sock *sk,
struct sock *newsk)
@@ -886,10 +886,7 @@ static int apparmor_socket_post_create(struct socket *sock, int family,
struct aa_label *label;
if (kern) {
- struct aa_ns *ns = aa_get_current_ns();
-
- label = aa_get_label(ns_unconfined(ns));
- aa_put_ns(ns);
+ label = aa_get_label(kernel_t);
} else
label = aa_get_current_label();
@@ -937,7 +934,7 @@ static int apparmor_socket_connect(struct socket *sock,
}
/**
- * apparmor_socket_list - check perms before allowing listen
+ * apparmor_socket_listen - check perms before allowing listen
*/
static int apparmor_socket_listen(struct socket *sock, int backlog)
{
@@ -1041,7 +1038,7 @@ static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
}
/**
- * apparmor_getsockopt - check perms before getting socket options
+ * apparmor_socket_getsockopt - check perms before getting socket options
*/
static int apparmor_socket_getsockopt(struct socket *sock, int level,
int optname)
@@ -1051,7 +1048,7 @@ static int apparmor_socket_getsockopt(struct socket *sock, int level,
}
/**
- * apparmor_setsockopt - check perms before setting socket options
+ * apparmor_socket_setsockopt - check perms before setting socket options
*/
static int apparmor_socket_setsockopt(struct socket *sock, int level,
int optname)
@@ -1070,7 +1067,7 @@ static int apparmor_socket_shutdown(struct socket *sock, int how)
#ifdef CONFIG_NETWORK_SECMARK
/**
- * apparmor_socket_sock_recv_skb - check perms before associating skb to sk
+ * apparmor_socket_sock_rcv_skb - check perms before associating skb to sk
*
* Note: can not sleep may be called with locks held
*
@@ -1357,6 +1354,12 @@ bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT);
module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR);
#endif
+/* whether policy exactly as loaded is retained for debug and checkpointing */
+bool aa_g_export_binary = IS_ENABLED(CONFIG_SECURITY_APPARMOR_EXPORT_BINARY);
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+module_param_named(export_binary, aa_g_export_binary, aabool, 0600);
+#endif
+
/* policy loaddata compression level */
int aa_g_rawdata_compression_level = Z_DEFAULT_COMPRESSION;
module_param_named(rawdata_compression_level, aa_g_rawdata_compression_level,
@@ -1399,7 +1402,7 @@ module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
* DEPRECATED: read only as strict checking of load is always done now
* that none root users (user namespaces) can load policy.
*/
-bool aa_g_paranoid_load = true;
+bool aa_g_paranoid_load = IS_ENABLED(CONFIG_SECURITY_APPARMOR_PARANOID_LOAD);
module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
static int param_get_aaintbool(char *buffer, const struct kernel_param *kp);
@@ -1761,6 +1764,14 @@ static struct ctl_table apparmor_sysctl_table[] = {
.mode = 0600,
.proc_handler = apparmor_dointvec,
},
+ {
+ .procname = "apparmor_display_secid_mode",
+ .data = &apparmor_display_secid_mode,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = apparmor_dointvec,
+ },
+
{ }
};
@@ -1819,11 +1830,8 @@ static const struct nf_hook_ops apparmor_nf_ops[] = {
static int __net_init apparmor_nf_register(struct net *net)
{
- int ret;
-
- ret = nf_register_net_hooks(net, apparmor_nf_ops,
+ return nf_register_net_hooks(net, apparmor_nf_ops,
ARRAY_SIZE(apparmor_nf_ops));
- return ret;
}
static void __net_exit apparmor_nf_unregister(struct net *net)
@@ -1857,8 +1865,6 @@ static int __init apparmor_init(void)
{
int error;
- aa_secids_init();
-
error = aa_setup_dfa_engine();
if (error) {
AA_ERROR("Unable to setup dfa engine\n");
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index aa6fcfde3051..f61247241803 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -217,7 +217,6 @@ static struct aa_perms compute_mnt_perms(struct aa_dfa *dfa,
.allow = dfa_user_allow(dfa, state),
.audit = dfa_user_audit(dfa, state),
.quiet = dfa_user_quiet(dfa, state),
- .xindex = dfa_user_xindex(dfa, state),
};
return perms;
@@ -229,7 +228,8 @@ static const char * const mnt_info_table[] = {
"failed srcname match",
"failed type match",
"failed flags match",
- "failed data match"
+ "failed data match",
+ "failed perms check"
};
/*
@@ -284,8 +284,8 @@ static int do_match_mnt(struct aa_dfa *dfa, unsigned int start,
return 0;
}
- /* failed at end of flags match */
- return 4;
+ /* failed at perms check, don't confuse with flags match */
+ return 6;
}
@@ -303,7 +303,7 @@ static int path_flags(struct aa_profile *profile, const struct path *path)
* @profile: the confining profile
* @mntpath: for the mntpnt (NOT NULL)
* @buffer: buffer to be used to lookup mntpath
- * @devnme: string for the devname/src_name (MAY BE NULL OR ERRPTR)
+ * @devname: string for the devname/src_name (MAY BE NULL OR ERRPTR)
* @type: string for the dev type (MAYBE NULL)
* @flags: mount flags to match
* @data: fs mount data (MAYBE NULL)
@@ -358,7 +358,7 @@ audit:
/**
* match_mnt - handle path matching for mount
* @profile: the confining profile
- * @mntpath: for the mntpnt (NOT NULL)
+ * @path: for the mntpnt (NOT NULL)
* @buffer: buffer to be used to lookup mntpath
* @devpath: path devname/src_name (MAYBE NULL)
* @devbuffer: buffer to be used to lookup devname/src_name
@@ -718,6 +718,7 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
aa_put_label(target);
goto out;
}
+ aa_put_label(target);
} else
/* already audited error */
error = PTR_ERR(target);
diff --git a/security/apparmor/net.c b/security/apparmor/net.c
index e0c1b50d6edd..7efe4d17273d 100644
--- a/security/apparmor/net.c
+++ b/security/apparmor/net.c
@@ -145,12 +145,13 @@ int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
struct sock *sk)
{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
int error = 0;
AA_BUG(!label);
AA_BUG(!sk);
- if (!unconfined(label)) {
+ if (ctx->label != kernel_t && !unconfined(label)) {
struct aa_profile *profile;
DEFINE_AUDIT_SK(sa, op, sk);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index b0cbc4906cb3..499c0209b6a4 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -422,7 +422,7 @@ static struct aa_profile *__lookup_profile(struct aa_policy *base,
}
/**
- * aa_lookup_profile - find a profile by its full or partial name
+ * aa_lookupn_profile - find a profile by its full or partial name
* @ns: the namespace to start from (NOT NULL)
* @hname: name to do lookup on. Does not contain namespace prefix (NOT NULL)
* @n: size of @hname
@@ -952,16 +952,18 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
mutex_lock_nested(&ns->lock, ns->level);
/* check for duplicate rawdata blobs: space and file dedup */
- list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) {
- if (aa_rawdata_eq(rawdata_ent, udata)) {
- struct aa_loaddata *tmp;
-
- tmp = __aa_get_loaddata(rawdata_ent);
- /* check we didn't fail the race */
- if (tmp) {
- aa_put_loaddata(udata);
- udata = tmp;
- break;
+ if (!list_empty(&ns->rawdata_list)) {
+ list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) {
+ if (aa_rawdata_eq(rawdata_ent, udata)) {
+ struct aa_loaddata *tmp;
+
+ tmp = __aa_get_loaddata(rawdata_ent);
+ /* check we didn't fail the race */
+ if (tmp) {
+ aa_put_loaddata(udata);
+ udata = tmp;
+ break;
+ }
}
}
}
@@ -969,7 +971,8 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
list_for_each_entry(ent, &lh, list) {
struct aa_policy *policy;
- ent->new->rawdata = aa_get_loaddata(udata);
+ if (aa_g_export_binary)
+ ent->new->rawdata = aa_get_loaddata(udata);
error = __lookup_replace(ns, ent->new->base.hname,
!(mask & AA_MAY_REPLACE_POLICY),
&ent->old, &info);
@@ -1009,7 +1012,7 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
}
/* create new fs entries for introspection if needed */
- if (!udata->dents[AAFS_LOADDATA_DIR]) {
+ if (!udata->dents[AAFS_LOADDATA_DIR] && aa_g_export_binary) {
error = __aa_fs_create_rawdata(ns, udata);
if (error) {
info = "failed to create raw_data dir and files";
@@ -1037,12 +1040,14 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
/* Done with checks that may fail - do actual replacement */
__aa_bump_ns_revision(ns);
- __aa_loaddata_update(udata, ns->revision);
+ if (aa_g_export_binary)
+ __aa_loaddata_update(udata, ns->revision);
list_for_each_entry_safe(ent, tmp, &lh, list) {
list_del_init(&ent->list);
op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL;
- if (ent->old && ent->old->rawdata == ent->new->rawdata) {
+ if (ent->old && ent->old->rawdata == ent->new->rawdata &&
+ ent->new->rawdata) {
/* dedup actual profile replacement */
audit_policy(label, op, ns_name, ent->new->base.hname,
"same as current profile, skipping",
diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
index 70921d95fb40..43beaad083fe 100644
--- a/security/apparmor/policy_ns.c
+++ b/security/apparmor/policy_ns.c
@@ -22,6 +22,9 @@
#include "include/label.h"
#include "include/policy.h"
+/* kernel label */
+struct aa_label *kernel_t;
+
/* root profile namespace */
struct aa_ns *root_ns;
const char *aa_hidden_ns_name = "---";
@@ -51,10 +54,10 @@ bool aa_ns_visible(struct aa_ns *curr, struct aa_ns *view, bool subns)
}
/**
- * aa_na_name - Find the ns name to display for @view from @curr
- * @curr - current namespace (NOT NULL)
- * @view - namespace attempting to view (NOT NULL)
- * @subns - are subns visible
+ * aa_ns_name - Find the ns name to display for @view from @curr
+ * @curr: current namespace (NOT NULL)
+ * @view: namespace attempting to view (NOT NULL)
+ * @subns: are subns visible
*
* Returns: name of @view visible from @curr
*/
@@ -77,6 +80,23 @@ const char *aa_ns_name(struct aa_ns *curr, struct aa_ns *view, bool subns)
return aa_hidden_ns_name;
}
+static struct aa_profile *alloc_unconfined(const char *name)
+{
+ struct aa_profile *profile;
+
+ profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
+ if (!profile)
+ return NULL;
+
+ profile->label.flags |= FLAG_IX_ON_NAME_ERROR |
+ FLAG_IMMUTIBLE | FLAG_NS_COUNT | FLAG_UNCONFINED;
+ profile->mode = APPARMOR_UNCONFINED;
+ profile->file.dfa = aa_get_dfa(nulldfa);
+ profile->policy.dfa = aa_get_dfa(nulldfa);
+
+ return profile;
+}
+
/**
* alloc_ns - allocate, initialize and return a new namespace
* @prefix: parent namespace name (MAYBE NULL)
@@ -101,16 +121,9 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name)
init_waitqueue_head(&ns->wait);
/* released by aa_free_ns() */
- ns->unconfined = aa_alloc_profile("unconfined", NULL, GFP_KERNEL);
+ ns->unconfined = alloc_unconfined("unconfined");
if (!ns->unconfined)
goto fail_unconfined;
-
- ns->unconfined->label.flags |= FLAG_IX_ON_NAME_ERROR |
- FLAG_IMMUTIBLE | FLAG_NS_COUNT | FLAG_UNCONFINED;
- ns->unconfined->mode = APPARMOR_UNCONFINED;
- ns->unconfined->file.dfa = aa_get_dfa(nulldfa);
- ns->unconfined->policy.dfa = aa_get_dfa(nulldfa);
-
/* ns and ns->unconfined share ns->unconfined refcount */
ns->unconfined->ns = ns;
@@ -187,7 +200,7 @@ struct aa_ns *aa_find_ns(struct aa_ns *root, const char *name)
/**
* __aa_lookupn_ns - lookup the namespace matching @hname
- * @base: base list to start looking up profile name from (NOT NULL)
+ * @view: namespace to search in (NOT NULL)
* @hname: hierarchical ns name (NOT NULL)
* @n: length of @hname
*
@@ -272,7 +285,7 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
}
/**
- * aa_create_ns - create an ns, fail if it already exists
+ * __aa_find_or_create_ns - create an ns, fail if it already exists
* @parent: the parent of the namespace being created
* @name: the name of the namespace
* @dir: if not null the dir to put the ns entries in
@@ -388,11 +401,22 @@ static void __ns_list_release(struct list_head *head)
*/
int __init aa_alloc_root_ns(void)
{
+ struct aa_profile *kernel_p;
+
/* released by aa_free_root_ns - used as list ref*/
root_ns = alloc_ns(NULL, "root");
if (!root_ns)
return -ENOMEM;
+ kernel_p = alloc_unconfined("kernel_t");
+ if (!kernel_p) {
+ destroy_ns(root_ns);
+ aa_free_ns(root_ns);
+ return -ENOMEM;
+ }
+ kernel_t = &kernel_p->label;
+ root_ns->unconfined->ns = aa_get_ns(root_ns);
+
return 0;
}
@@ -405,6 +429,7 @@ void __init aa_free_root_ns(void)
root_ns = NULL;
+ aa_label_free(kernel_t);
destroy_ns(ns);
aa_put_ns(ns);
}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 0acca6f2a93f..55d31bac4f35 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -125,15 +125,16 @@ void __aa_loaddata_update(struct aa_loaddata *data, long revision)
{
AA_BUG(!data);
AA_BUG(!data->ns);
- AA_BUG(!data->dents[AAFS_LOADDATA_REVISION]);
AA_BUG(!mutex_is_locked(&data->ns->lock));
AA_BUG(data->revision > revision);
data->revision = revision;
- d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
- current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
- d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
- current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
+ if ((data->dents[AAFS_LOADDATA_REVISION])) {
+ d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
+ current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
+ d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
+ current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
+ }
}
bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
@@ -213,7 +214,7 @@ static void *kvmemdup(const void *src, size_t len)
}
/**
- * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
+ * unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
* @e: serialized data read head (NOT NULL)
* @chunk: start address for chunk of data (NOT NULL)
*
@@ -456,7 +457,9 @@ static struct aa_dfa *unpack_dfa(struct aa_ext *e)
((e->pos - e->start) & 7);
size_t pad = ALIGN(sz, 8) - sz;
int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
- TO_ACCEPT2_FLAG(YYTD_DATA32) | DFA_FLAG_VERIFY_STATES;
+ TO_ACCEPT2_FLAG(YYTD_DATA32);
+ if (aa_g_paranoid_load)
+ flags |= DFA_FLAG_VERIFY_STATES;
dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
if (IS_ERR(dfa))
@@ -668,6 +671,7 @@ static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
/**
* unpack_profile - unpack a serialized profile
* @e: serialized data extent information (NOT NULL)
+ * @ns_name: pointer of newly allocated copy of %NULL in case of error
*
* NOTE: unpack profile sets audit struct if there is a failure
*/
@@ -744,18 +748,24 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
goto fail;
if (tmp & PACKED_FLAG_HAT)
profile->label.flags |= FLAG_HAT;
+ if (tmp & PACKED_FLAG_DEBUG1)
+ profile->label.flags |= FLAG_DEBUG1;
+ if (tmp & PACKED_FLAG_DEBUG2)
+ profile->label.flags |= FLAG_DEBUG2;
if (!unpack_u32(e, &tmp, NULL))
goto fail;
- if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG))
+ if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
profile->mode = APPARMOR_COMPLAIN;
- else if (tmp == PACKED_MODE_ENFORCE)
+ } else if (tmp == PACKED_MODE_ENFORCE) {
profile->mode = APPARMOR_ENFORCE;
- else if (tmp == PACKED_MODE_KILL)
+ } else if (tmp == PACKED_MODE_KILL) {
profile->mode = APPARMOR_KILL;
- else if (tmp == PACKED_MODE_UNCONFINED)
+ } else if (tmp == PACKED_MODE_UNCONFINED) {
profile->mode = APPARMOR_UNCONFINED;
- else
+ profile->label.flags |= FLAG_UNCONFINED;
+ } else {
goto fail;
+ }
if (!unpack_u32(e, &tmp, NULL))
goto fail;
if (tmp)
@@ -936,7 +946,7 @@ fail:
}
/**
- * verify_head - unpack serialized stream header
+ * verify_header - unpack serialized stream header
* @e: serialized data read head (NOT NULL)
* @required: whether the header is required or optional
* @ns: Returns - namespace if one is specified else NULL (NOT NULL)
@@ -1052,6 +1062,7 @@ struct aa_load_ent *aa_load_ent_alloc(void)
static int deflate_compress(const char *src, size_t slen, char **dst,
size_t *dlen)
{
+#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
int error;
struct z_stream_s strm;
void *stgbuf, *dstbuf;
@@ -1123,6 +1134,10 @@ fail_deflate_init:
fail_deflate:
kvfree(stgbuf);
goto fail_stg_alloc;
+#else
+ *dlen = slen;
+ return 0;
+#endif
}
static int compress_loaddata(struct aa_loaddata *data)
@@ -1141,7 +1156,8 @@ static int compress_loaddata(struct aa_loaddata *data)
if (error)
return error;
- kvfree(udata);
+ if (udata != data->data)
+ kvfree(udata);
} else
data->compressed_size = data->size;
@@ -1216,9 +1232,12 @@ int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
goto fail;
}
}
- error = compress_loaddata(udata);
- if (error)
- goto fail;
+
+ if (aa_g_export_binary) {
+ error = compress_loaddata(udata);
+ if (error)
+ goto fail;
+ }
return 0;
fail_profile:
diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
index 7954cb23d5f2..0a969b2e03db 100644
--- a/security/apparmor/policy_unpack_test.c
+++ b/security/apparmor/policy_unpack_test.c
@@ -48,8 +48,8 @@ struct policy_unpack_fixture {
size_t e_size;
};
-struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
- struct kunit *test, size_t buf_size)
+static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
+ struct kunit *test, size_t buf_size)
{
char *buf;
struct aa_ext *e;
@@ -439,7 +439,7 @@ static void policy_unpack_test_unpack_u32_with_null_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
bool success;
- u32 data;
+ u32 data = 0;
puf->e->pos += TEST_U32_BUF_OFFSET;
@@ -456,7 +456,7 @@ static void policy_unpack_test_unpack_u32_with_name(struct kunit *test)
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_U32_NAME;
bool success;
- u32 data;
+ u32 data = 0;
puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
@@ -473,7 +473,7 @@ static void policy_unpack_test_unpack_u32_out_of_bounds(struct kunit *test)
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_U32_NAME;
bool success;
- u32 data;
+ u32 data = 0;
puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
puf->e->end = puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32);
@@ -489,7 +489,7 @@ static void policy_unpack_test_unpack_u64_with_null_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
bool success;
- u64 data;
+ u64 data = 0;
puf->e->pos += TEST_U64_BUF_OFFSET;
@@ -506,7 +506,7 @@ static void policy_unpack_test_unpack_u64_with_name(struct kunit *test)
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_U64_NAME;
bool success;
- u64 data;
+ u64 data = 0;
puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
@@ -523,7 +523,7 @@ static void policy_unpack_test_unpack_u64_out_of_bounds(struct kunit *test)
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_U64_NAME;
bool success;
- u64 data;
+ u64 data = 0;
puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
puf->e->end = puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64);
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
index fde332e0ea7d..86ad26ef72ed 100644
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -90,7 +90,7 @@ static char *split_token_from_name(const char *op, char *args, u64 *token)
}
/**
- * aa_setprocattr_chagnehat - handle procattr interface to change_hat
+ * aa_setprocattr_changehat - handle procattr interface to change_hat
* @args: args received from writing to /proc/<pid>/attr/current (NOT NULL)
* @size: size of the args
* @flags: set of flags governing behavior
diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c
index ce545f99259e..24a0e23f1b2b 100644
--- a/security/apparmor/secid.c
+++ b/security/apparmor/secid.c
@@ -13,9 +13,9 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/gfp.h>
-#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/xarray.h>
#include "include/cred.h"
#include "include/lib.h"
@@ -29,8 +29,9 @@
*/
#define AA_FIRST_SECID 2
-static DEFINE_IDR(aa_secids);
-static DEFINE_SPINLOCK(secid_lock);
+static DEFINE_XARRAY_FLAGS(aa_secids, XA_FLAGS_LOCK_IRQ | XA_FLAGS_TRACK_FREE);
+
+int apparmor_display_secid_mode;
/*
* TODO: allow policy to reserve a secid range?
@@ -47,9 +48,9 @@ void aa_secid_update(u32 secid, struct aa_label *label)
{
unsigned long flags;
- spin_lock_irqsave(&secid_lock, flags);
- idr_replace(&aa_secids, label, secid);
- spin_unlock_irqrestore(&secid_lock, flags);
+ xa_lock_irqsave(&aa_secids, flags);
+ __xa_store(&aa_secids, secid, label, 0);
+ xa_unlock_irqrestore(&aa_secids, flags);
}
/**
@@ -58,19 +59,14 @@ void aa_secid_update(u32 secid, struct aa_label *label)
*/
struct aa_label *aa_secid_to_label(u32 secid)
{
- struct aa_label *label;
-
- rcu_read_lock();
- label = idr_find(&aa_secids, secid);
- rcu_read_unlock();
-
- return label;
+ return xa_load(&aa_secids, secid);
}
int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
/* TODO: cache secctx and ref count so we don't have to recreate */
struct aa_label *label = aa_secid_to_label(secid);
+ int flags = FLAG_VIEW_SUBNS | FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT;
int len;
AA_BUG(!seclen);
@@ -78,15 +74,15 @@ int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
if (!label)
return -EINVAL;
+ if (apparmor_display_secid_mode)
+ flags |= FLAG_SHOW_MODE;
+
if (secdata)
len = aa_label_asxprint(secdata, root_ns, label,
- FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
- FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT,
- GFP_ATOMIC);
+ flags, GFP_ATOMIC);
else
- len = aa_label_snxprint(NULL, 0, root_ns, label,
- FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
- FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT);
+ len = aa_label_snxprint(NULL, 0, root_ns, label, flags);
+
if (len < 0)
return -ENOMEM;
@@ -126,19 +122,16 @@ int aa_alloc_secid(struct aa_label *label, gfp_t gfp)
unsigned long flags;
int ret;
- idr_preload(gfp);
- spin_lock_irqsave(&secid_lock, flags);
- ret = idr_alloc(&aa_secids, label, AA_FIRST_SECID, 0, GFP_ATOMIC);
- spin_unlock_irqrestore(&secid_lock, flags);
- idr_preload_end();
+ xa_lock_irqsave(&aa_secids, flags);
+ ret = __xa_alloc(&aa_secids, &label->secid, label,
+ XA_LIMIT(AA_FIRST_SECID, INT_MAX), gfp);
+ xa_unlock_irqrestore(&aa_secids, flags);
if (ret < 0) {
label->secid = AA_SECID_INVALID;
return ret;
}
- AA_BUG(ret == AA_SECID_INVALID);
- label->secid = ret;
return 0;
}
@@ -150,12 +143,7 @@ void aa_free_secid(u32 secid)
{
unsigned long flags;
- spin_lock_irqsave(&secid_lock, flags);
- idr_remove(&aa_secids, secid);
- spin_unlock_irqrestore(&secid_lock, flags);
-}
-
-void aa_secids_init(void)
-{
- idr_init_base(&aa_secids, AA_FIRST_SECID);
+ xa_lock_irqsave(&aa_secids, flags);
+ __xa_erase(&aa_secids, secid);
+ xa_unlock_irqrestore(&aa_secids, flags);
}
diff --git a/security/apparmor/task.c b/security/apparmor/task.c
index d17130ee6795..503dc0877fb1 100644
--- a/security/apparmor/task.c
+++ b/security/apparmor/task.c
@@ -12,7 +12,12 @@
* should return to the previous cred if it has not been modified.
*/
+#include <linux/gfp.h>
+#include <linux/ptrace.h>
+
+#include "include/audit.h"
#include "include/cred.h"
+#include "include/policy.h"
#include "include/task.h"
/**
@@ -177,3 +182,112 @@ int aa_restore_previous_label(u64 token)
return 0;
}
+
+/**
+ * audit_ptrace_mask - convert mask to permission string
+ * @mask: permission mask to convert
+ *
+ * Returns: pointer to static string
+ */
+static const char *audit_ptrace_mask(u32 mask)
+{
+ switch (mask) {
+ case MAY_READ:
+ return "read";
+ case MAY_WRITE:
+ return "trace";
+ case AA_MAY_BE_READ:
+ return "readby";
+ case AA_MAY_BE_TRACED:
+ return "tracedby";
+ }
+ return "";
+}
+
+/* call back to audit ptrace fields */
+static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ if (aad(sa)->request & AA_PTRACE_PERM_MASK) {
+ audit_log_format(ab, " requested_mask=\"%s\"",
+ audit_ptrace_mask(aad(sa)->request));
+
+ if (aad(sa)->denied & AA_PTRACE_PERM_MASK) {
+ audit_log_format(ab, " denied_mask=\"%s\"",
+ audit_ptrace_mask(aad(sa)->denied));
+ }
+ }
+ audit_log_format(ab, " peer=");
+ aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+}
+
+/* assumes check for PROFILE_MEDIATES is already done */
+/* TODO: conditionals */
+static int profile_ptrace_perm(struct aa_profile *profile,
+ struct aa_label *peer, u32 request,
+ struct common_audit_data *sa)
+{
+ struct aa_perms perms = { };
+
+ aad(sa)->peer = peer;
+ aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request,
+ &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+ return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
+}
+
+static int profile_tracee_perm(struct aa_profile *tracee,
+ struct aa_label *tracer, u32 request,
+ struct common_audit_data *sa)
+{
+ if (profile_unconfined(tracee) || unconfined(tracer) ||
+ !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE))
+ return 0;
+
+ return profile_ptrace_perm(tracee, tracer, request, sa);
+}
+
+static int profile_tracer_perm(struct aa_profile *tracer,
+ struct aa_label *tracee, u32 request,
+ struct common_audit_data *sa)
+{
+ if (profile_unconfined(tracer))
+ return 0;
+
+ if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE))
+ return profile_ptrace_perm(tracer, tracee, request, sa);
+
+ /* profile uses the old style capability check for ptrace */
+ if (&tracer->label == tracee)
+ return 0;
+
+ aad(sa)->label = &tracer->label;
+ aad(sa)->peer = tracee;
+ aad(sa)->request = 0;
+ aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE,
+ CAP_OPT_NONE);
+
+ return aa_audit(AUDIT_APPARMOR_AUTO, tracer, sa, audit_ptrace_cb);
+}
+
+/**
+ * aa_may_ptrace - test if tracer task can trace the tracee
+ * @tracer: label of the task doing the tracing (NOT NULL)
+ * @tracee: task label to be traced
+ * @request: permission request
+ *
+ * Returns: %0 else error code if permission denied or error
+ */
+int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
+ u32 request)
+{
+ struct aa_profile *profile;
+ u32 xrequest = request << PTRACE_PERM_SHIFT;
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
+
+ return xcheck_labels(tracer, tracee, profile,
+ profile_tracer_perm(profile, tracee, request, &sa),
+ profile_tracee_perm(profile, tracer, xrequest, &sa));
+}
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index 6ab5f2bbf41f..44521582dcba 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -356,13 +356,11 @@ static long dm_verity_ioctl(struct file *filp, unsigned int cmd, unsigned long a
{
void __user *uarg = (void __user *)arg;
unsigned int fd;
- int rc;
switch (cmd) {
case LOADPIN_IOC_SET_TRUSTED_VERITY_DIGESTS:
- rc = copy_from_user(&fd, uarg, sizeof(fd));
- if (rc)
- return rc;
+ if (copy_from_user(&fd, uarg, sizeof(fd)))
+ return -EFAULT;
return read_trusted_verity_root_digests(fd);
diff --git a/security/security.c b/security/security.c
index 14d30fec8a00..4b95de24bc8d 100644
--- a/security/security.c
+++ b/security/security.c
@@ -2660,4 +2660,8 @@ int security_uring_sqpoll(void)
{
return call_int_hook(uring_sqpoll, 0);
}
+int security_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ return call_int_hook(uring_cmd, 0, ioucmd);
+}
#endif /* CONFIG_IO_URING */
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 79573504783b..03bca97c8b29 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -91,6 +91,7 @@
#include <uapi/linux/mount.h>
#include <linux/fsnotify.h>
#include <linux/fanotify.h>
+#include <linux/io_uring.h>
#include "avc.h"
#include "objsec.h"
@@ -6987,6 +6988,28 @@ static int selinux_uring_sqpoll(void)
return avc_has_perm(&selinux_state, sid, sid,
SECCLASS_IO_URING, IO_URING__SQPOLL, NULL);
}
+
+/**
+ * selinux_uring_cmd - check if IORING_OP_URING_CMD is allowed
+ * @ioucmd: the io_uring command structure
+ *
+ * Check to see if the current domain is allowed to execute an
+ * IORING_OP_URING_CMD against the device/file specified in @ioucmd.
+ *
+ */
+static int selinux_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ struct file *file = ioucmd->file;
+ struct inode *inode = file_inode(file);
+ struct inode_security_struct *isec = selinux_inode(inode);
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = file;
+
+ return avc_has_perm(&selinux_state, current_sid(), isec->sid,
+ SECCLASS_IO_URING, IO_URING__CMD, &ad);
+}
#endif /* CONFIG_IO_URING */
/*
@@ -7231,6 +7254,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
#ifdef CONFIG_IO_URING
LSM_HOOK_INIT(uring_override_creds, selinux_uring_override_creds),
LSM_HOOK_INIT(uring_sqpoll, selinux_uring_sqpoll),
+ LSM_HOOK_INIT(uring_cmd, selinux_uring_cmd),
#endif
/*
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index ff757ae5f253..1c2f41ff4e55 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -253,7 +253,7 @@ const struct security_class_mapping secclass_map[] = {
{ "anon_inode",
{ COMMON_FILE_PERMS, NULL } },
{ "io_uring",
- { "override_creds", "sqpoll", NULL } },
+ { "override_creds", "sqpoll", "cmd", NULL } },
{ NULL }
};
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 001831458fa2..bffccdc494cb 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -42,6 +42,7 @@
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/watch_queue.h>
+#include <linux/io_uring.h>
#include "smack.h"
#define TRANS_TRUE "TRUE"
@@ -4732,6 +4733,36 @@ static int smack_uring_sqpoll(void)
return -EPERM;
}
+/**
+ * smack_uring_cmd - check on file operations for io_uring
+ * @ioucmd: the command in question
+ *
+ * Make a best guess about whether a io_uring "command" should
+ * be allowed. Use the same logic used for determining if the
+ * file could be opened for read in the absence of better criteria.
+ */
+static int smack_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ struct file *file = ioucmd->file;
+ struct smk_audit_info ad;
+ struct task_smack *tsp;
+ struct inode *inode;
+ int rc;
+
+ if (!file)
+ return -EINVAL;
+
+ tsp = smack_cred(file->f_cred);
+ inode = file_inode(file);
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad);
+ rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc);
+
+ return rc;
+}
+
#endif /* CONFIG_IO_URING */
struct lsm_blob_sizes smack_blob_sizes __lsm_ro_after_init = {
@@ -4889,6 +4920,7 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
#ifdef CONFIG_IO_URING
LSM_HOOK_INIT(uring_override_creds, smack_uring_override_creds),
LSM_HOOK_INIT(uring_sqpoll, smack_uring_sqpoll),
+ LSM_HOOK_INIT(uring_cmd, smack_uring_cmd),
#endif
};
diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
index 0d31a6d71468..045330883a96 100644
--- a/sound/ac97/bus.c
+++ b/sound/ac97/bus.c
@@ -460,7 +460,7 @@ static ssize_t vendor_id_show(struct device *dev,
{
struct ac97_codec_device *codec = to_ac97_device(dev);
- return sprintf(buf, "%08x", codec->vendor_id);
+ return sysfs_emit(buf, "%08x", codec->vendor_id);
}
DEVICE_ATTR_RO(vendor_id);
diff --git a/sound/aoa/soundbus/sysfs.c b/sound/aoa/soundbus/sysfs.c
index dead3105689b..e87b28428b99 100644
--- a/sound/aoa/soundbus/sysfs.c
+++ b/sound/aoa/soundbus/sysfs.c
@@ -10,19 +10,13 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct soundbus_dev *sdev = to_soundbus_device(dev);
struct platform_device *of = &sdev->ofdev;
- int length;
- if (*sdev->modalias) {
- strscpy(buf, sdev->modalias, sizeof(sdev->modalias) + 1);
- strcat(buf, "\n");
- length = strlen(buf);
- } else {
- length = sprintf(buf, "of:N%pOFn%c%s\n",
- of->dev.of_node, 'T',
- of_node_get_device_type(of->dev.of_node));
- }
-
- return length;
+ if (*sdev->modalias)
+ return sysfs_emit(buf, "%s\n", sdev->modalias);
+ else
+ return sysfs_emit(buf, "of:N%pOFn%c%s\n",
+ of->dev.of_node, 'T',
+ of_node_get_device_type(of->dev.of_node));
}
static DEVICE_ATTR_RO(modalias);
@@ -32,7 +26,7 @@ static ssize_t name_show(struct device *dev,
struct soundbus_dev *sdev = to_soundbus_device(dev);
struct platform_device *of = &sdev->ofdev;
- return sprintf(buf, "%pOFn\n", of->dev.of_node);
+ return sysfs_emit(buf, "%pOFn\n", of->dev.of_node);
}
static DEVICE_ATTR_RO(name);
@@ -42,7 +36,7 @@ static ssize_t type_show(struct device *dev,
struct soundbus_dev *sdev = to_soundbus_device(dev);
struct platform_device *of = &sdev->ofdev;
- return sprintf(buf, "%s\n", of_node_get_device_type(of->dev.of_node));
+ return sysfs_emit(buf, "%s\n", of_node_get_device_type(of->dev.of_node));
}
static DEVICE_ATTR_RO(type);
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index dd7b40734723..12990d9a4dff 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -154,6 +154,16 @@ config SND_VERBOSE_PRINTK
You don't need this unless you're debugging ALSA.
+config SND_CTL_FAST_LOOKUP
+ bool "Fast lookup of control elements" if EXPERT
+ default y
+ select XARRAY_MULTI
+ help
+ This option enables the faster lookup of control elements.
+ It will consume more memory because of the additional Xarray.
+ If you want to choose the memory footprint over the performance
+ inevitably, turn this off.
+
config SND_DEBUG
bool "Debug"
help
@@ -178,14 +188,29 @@ config SND_PCM_XRUN_DEBUG
sound clicking when system is loaded, it may help to determine
the process or driver which causes the scheduling gaps.
-config SND_CTL_VALIDATION
- bool "Perform sanity-checks for each control element access"
+config SND_CTL_INPUT_VALIDATION
+ bool "Validate input data to control API"
+ help
+ Say Y to enable the additional validation for the input data to
+ each control element, including the value range checks.
+ An error is returned from ALSA core for invalid inputs without
+ passing to the driver. This is a kind of hardening for drivers
+ that have no proper error checks, at the cost of a slight
+ performance overhead.
+
+config SND_CTL_DEBUG
+ bool "Enable debugging feature for control API"
depends on SND_DEBUG
help
- Say Y to enable the additional validation of each control element
- access, including sanity-checks like whether the values returned
- from the driver are in the proper ranges or the check of the invalid
- access at out-of-array areas.
+ Say Y to enable the debugging feature for ALSA control API.
+ It performs the additional sanity-checks for each control element
+ read access, such as whether the values returned from the driver
+ are in the proper ranges or the check of the invalid access at
+ out-of-array areas. The error is printed when the driver gives
+ such unexpected values.
+ When you develop a driver that deals with control elements, it's
+ strongly recommended to try this one once and verify whether you see
+ any relevant errors or not.
config SND_JACK_INJECTION_DEBUG
bool "Sound jack injection interface via debugfs"
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index de514ec8c83d..243acad89fd3 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -810,7 +810,7 @@ static void error_delayed_work(struct work_struct *work)
mutex_unlock(&stream->device->lock);
}
-/*
+/**
* snd_compr_stop_error: Report a fatal error on a stream
* @stream: pointer to stream
* @state: state to transition the stream to
@@ -818,6 +818,8 @@ static void error_delayed_work(struct work_struct *work)
* Stop the stream and set its state.
*
* Should be called with compressed device lock held.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_compr_stop_error(struct snd_compr_stream *stream,
snd_pcm_state_t state)
@@ -1157,12 +1159,15 @@ static int snd_compress_dev_free(struct snd_device *device)
return 0;
}
-/*
+/**
* snd_compress_new: create new compress device
* @card: sound card pointer
* @device: device number
* @dirn: device direction, should be of type enum snd_compr_direction
+ * @id: ID string
* @compr: compress device pointer
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_compress_new(struct snd_card *card, int device,
int dirn, const char *id, struct snd_compr *compr)
diff --git a/sound/core/control.c b/sound/core/control.c
index a25c0d64d104..f3e893715369 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -127,6 +127,7 @@ static int snd_ctl_release(struct inode *inode, struct file *file)
if (control->vd[idx].owner == ctl)
control->vd[idx].owner = NULL;
up_write(&card->controls_rwsem);
+ snd_fasync_free(ctl->fasync);
snd_ctl_empty_read_queue(ctl);
put_pid(ctl->pid);
kfree(ctl);
@@ -181,7 +182,7 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
_found:
wake_up(&ctl->change_sleep);
spin_unlock(&ctl->read_lock);
- kill_fasync(&ctl->fasync, SIGIO, POLL_IN);
+ snd_kill_fasync(ctl->fasync, SIGIO, POLL_IN);
}
read_unlock_irqrestore(&card->ctl_files_rwlock, flags);
}
@@ -364,6 +365,93 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
return 0;
}
+/* check whether the given id is contained in the given kctl */
+static bool elem_id_matches(const struct snd_kcontrol *kctl,
+ const struct snd_ctl_elem_id *id)
+{
+ return kctl->id.iface == id->iface &&
+ kctl->id.device == id->device &&
+ kctl->id.subdevice == id->subdevice &&
+ !strncmp(kctl->id.name, id->name, sizeof(kctl->id.name)) &&
+ kctl->id.index <= id->index &&
+ kctl->id.index + kctl->count > id->index;
+}
+
+#ifdef CONFIG_SND_CTL_FAST_LOOKUP
+/* Compute a hash key for the corresponding ctl id
+ * It's for the name lookup, hence the numid is excluded.
+ * The hash key is bound in LONG_MAX to be used for Xarray key.
+ */
+#define MULTIPLIER 37
+static unsigned long get_ctl_id_hash(const struct snd_ctl_elem_id *id)
+{
+ unsigned long h;
+ const unsigned char *p;
+
+ h = id->iface;
+ h = MULTIPLIER * h + id->device;
+ h = MULTIPLIER * h + id->subdevice;
+ for (p = id->name; *p; p++)
+ h = MULTIPLIER * h + *p;
+ h = MULTIPLIER * h + id->index;
+ h &= LONG_MAX;
+ return h;
+}
+
+/* add hash entries to numid and ctl xarray tables */
+static void add_hash_entries(struct snd_card *card,
+ struct snd_kcontrol *kcontrol)
+{
+ struct snd_ctl_elem_id id = kcontrol->id;
+ int i;
+
+ xa_store_range(&card->ctl_numids, kcontrol->id.numid,
+ kcontrol->id.numid + kcontrol->count - 1,
+ kcontrol, GFP_KERNEL);
+
+ for (i = 0; i < kcontrol->count; i++) {
+ id.index = kcontrol->id.index + i;
+ if (xa_insert(&card->ctl_hash, get_ctl_id_hash(&id),
+ kcontrol, GFP_KERNEL)) {
+ /* skip hash for this entry, noting we had collision */
+ card->ctl_hash_collision = true;
+ dev_dbg(card->dev, "ctl_hash collision %d:%s:%d\n",
+ id.iface, id.name, id.index);
+ }
+ }
+}
+
+/* remove hash entries that have been added */
+static void remove_hash_entries(struct snd_card *card,
+ struct snd_kcontrol *kcontrol)
+{
+ struct snd_ctl_elem_id id = kcontrol->id;
+ struct snd_kcontrol *matched;
+ unsigned long h;
+ int i;
+
+ for (i = 0; i < kcontrol->count; i++) {
+ xa_erase(&card->ctl_numids, id.numid);
+ h = get_ctl_id_hash(&id);
+ matched = xa_load(&card->ctl_hash, h);
+ if (matched && (matched == kcontrol ||
+ elem_id_matches(matched, &id)))
+ xa_erase(&card->ctl_hash, h);
+ id.index++;
+ id.numid++;
+ }
+}
+#else /* CONFIG_SND_CTL_FAST_LOOKUP */
+static inline void add_hash_entries(struct snd_card *card,
+ struct snd_kcontrol *kcontrol)
+{
+}
+static inline void remove_hash_entries(struct snd_card *card,
+ struct snd_kcontrol *kcontrol)
+{
+}
+#endif /* CONFIG_SND_CTL_FAST_LOOKUP */
+
enum snd_ctl_add_mode {
CTL_ADD_EXCLUSIVE, CTL_REPLACE, CTL_ADD_ON_REPLACE,
};
@@ -408,6 +496,8 @@ static int __snd_ctl_add_replace(struct snd_card *card,
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
+ add_hash_entries(card, kcontrol);
+
for (idx = 0; idx < kcontrol->count; idx++)
snd_ctl_notify_one(card, SNDRV_CTL_EVENT_MASK_ADD, kcontrol, idx);
@@ -479,6 +569,26 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
}
EXPORT_SYMBOL(snd_ctl_replace);
+static int __snd_ctl_remove(struct snd_card *card,
+ struct snd_kcontrol *kcontrol,
+ bool remove_hash)
+{
+ unsigned int idx;
+
+ if (snd_BUG_ON(!card || !kcontrol))
+ return -EINVAL;
+ list_del(&kcontrol->list);
+
+ if (remove_hash)
+ remove_hash_entries(card, kcontrol);
+
+ card->controls_count -= kcontrol->count;
+ for (idx = 0; idx < kcontrol->count; idx++)
+ snd_ctl_notify_one(card, SNDRV_CTL_EVENT_MASK_REMOVE, kcontrol, idx);
+ snd_ctl_free_one(kcontrol);
+ return 0;
+}
+
/**
* snd_ctl_remove - remove the control from the card and release it
* @card: the card instance
@@ -492,16 +602,7 @@ EXPORT_SYMBOL(snd_ctl_replace);
*/
int snd_ctl_remove(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
- unsigned int idx;
-
- if (snd_BUG_ON(!card || !kcontrol))
- return -EINVAL;
- list_del(&kcontrol->list);
- card->controls_count -= kcontrol->count;
- for (idx = 0; idx < kcontrol->count; idx++)
- snd_ctl_notify_one(card, SNDRV_CTL_EVENT_MASK_REMOVE, kcontrol, idx);
- snd_ctl_free_one(kcontrol);
- return 0;
+ return __snd_ctl_remove(card, kcontrol, true);
}
EXPORT_SYMBOL(snd_ctl_remove);
@@ -642,14 +743,30 @@ int snd_ctl_rename_id(struct snd_card *card, struct snd_ctl_elem_id *src_id,
up_write(&card->controls_rwsem);
return -ENOENT;
}
+ remove_hash_entries(card, kctl);
kctl->id = *dst_id;
kctl->id.numid = card->last_numid + 1;
card->last_numid += kctl->count;
+ add_hash_entries(card, kctl);
up_write(&card->controls_rwsem);
return 0;
}
EXPORT_SYMBOL(snd_ctl_rename_id);
+#ifndef CONFIG_SND_CTL_FAST_LOOKUP
+static struct snd_kcontrol *
+snd_ctl_find_numid_slow(struct snd_card *card, unsigned int numid)
+{
+ struct snd_kcontrol *kctl;
+
+ list_for_each_entry(kctl, &card->controls, list) {
+ if (kctl->id.numid <= numid && kctl->id.numid + kctl->count > numid)
+ return kctl;
+ }
+ return NULL;
+}
+#endif /* !CONFIG_SND_CTL_FAST_LOOKUP */
+
/**
* snd_ctl_find_numid - find the control instance with the given number-id
* @card: the card instance
@@ -665,15 +782,13 @@ EXPORT_SYMBOL(snd_ctl_rename_id);
*/
struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid)
{
- struct snd_kcontrol *kctl;
-
if (snd_BUG_ON(!card || !numid))
return NULL;
- list_for_each_entry(kctl, &card->controls, list) {
- if (kctl->id.numid <= numid && kctl->id.numid + kctl->count > numid)
- return kctl;
- }
- return NULL;
+#ifdef CONFIG_SND_CTL_FAST_LOOKUP
+ return xa_load(&card->ctl_numids, numid);
+#else
+ return snd_ctl_find_numid_slow(card, numid);
+#endif
}
EXPORT_SYMBOL(snd_ctl_find_numid);
@@ -699,21 +814,18 @@ struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card,
return NULL;
if (id->numid != 0)
return snd_ctl_find_numid(card, id->numid);
- list_for_each_entry(kctl, &card->controls, list) {
- if (kctl->id.iface != id->iface)
- continue;
- if (kctl->id.device != id->device)
- continue;
- if (kctl->id.subdevice != id->subdevice)
- continue;
- if (strncmp(kctl->id.name, id->name, sizeof(kctl->id.name)))
- continue;
- if (kctl->id.index > id->index)
- continue;
- if (kctl->id.index + kctl->count <= id->index)
- continue;
+#ifdef CONFIG_SND_CTL_FAST_LOOKUP
+ kctl = xa_load(&card->ctl_hash, get_ctl_id_hash(id));
+ if (kctl && elem_id_matches(kctl, id))
return kctl;
- }
+ if (!card->ctl_hash_collision)
+ return NULL; /* we can rely on only hash table */
+#endif
+ /* no matching in hash table - try all as the last resort */
+ list_for_each_entry(kctl, &card->controls, list)
+ if (elem_id_matches(kctl, id))
+ return kctl;
+
return NULL;
}
EXPORT_SYMBOL(snd_ctl_find_id);
@@ -855,7 +967,6 @@ static const unsigned int value_sizes[] = {
[SNDRV_CTL_ELEM_TYPE_INTEGER64] = sizeof(long long),
};
-#ifdef CONFIG_SND_CTL_VALIDATION
/* fill the remaining snd_ctl_elem_value data with the given pattern */
static void fill_remaining_elem_value(struct snd_ctl_elem_value *control,
struct snd_ctl_elem_info *info,
@@ -872,7 +983,7 @@ static void fill_remaining_elem_value(struct snd_ctl_elem_value *control,
static int sanity_check_int_value(struct snd_card *card,
const struct snd_ctl_elem_value *control,
const struct snd_ctl_elem_info *info,
- int i)
+ int i, bool print_error)
{
long long lval, lmin, lmax, lstep;
u64 rem;
@@ -906,21 +1017,23 @@ static int sanity_check_int_value(struct snd_card *card,
}
if (lval < lmin || lval > lmax) {
- dev_err(card->dev,
- "control %i:%i:%i:%s:%i: value out of range %lld (%lld/%lld) at count %i\n",
- control->id.iface, control->id.device,
- control->id.subdevice, control->id.name,
- control->id.index, lval, lmin, lmax, i);
+ if (print_error)
+ dev_err(card->dev,
+ "control %i:%i:%i:%s:%i: value out of range %lld (%lld/%lld) at count %i\n",
+ control->id.iface, control->id.device,
+ control->id.subdevice, control->id.name,
+ control->id.index, lval, lmin, lmax, i);
return -EINVAL;
}
if (lstep) {
div64_u64_rem(lval, lstep, &rem);
if (rem) {
- dev_err(card->dev,
- "control %i:%i:%i:%s:%i: unaligned value %lld (step %lld) at count %i\n",
- control->id.iface, control->id.device,
- control->id.subdevice, control->id.name,
- control->id.index, lval, lstep, i);
+ if (print_error)
+ dev_err(card->dev,
+ "control %i:%i:%i:%s:%i: unaligned value %lld (step %lld) at count %i\n",
+ control->id.iface, control->id.device,
+ control->id.subdevice, control->id.name,
+ control->id.index, lval, lstep, i);
return -EINVAL;
}
}
@@ -928,15 +1041,13 @@ static int sanity_check_int_value(struct snd_card *card,
return 0;
}
-/* perform sanity checks to the given snd_ctl_elem_value object */
-static int sanity_check_elem_value(struct snd_card *card,
- const struct snd_ctl_elem_value *control,
- const struct snd_ctl_elem_info *info,
- u32 pattern)
+/* check whether the all input values are valid for the given elem value */
+static int sanity_check_input_values(struct snd_card *card,
+ const struct snd_ctl_elem_value *control,
+ const struct snd_ctl_elem_info *info,
+ bool print_error)
{
- size_t offset;
- int i, ret = 0;
- u32 *p;
+ int i, ret;
switch (info->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
@@ -944,7 +1055,8 @@ static int sanity_check_elem_value(struct snd_card *card,
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
for (i = 0; i < info->count; i++) {
- ret = sanity_check_int_value(card, control, info, i);
+ ret = sanity_check_int_value(card, control, info, i,
+ print_error);
if (ret < 0)
return ret;
}
@@ -953,6 +1065,23 @@ static int sanity_check_elem_value(struct snd_card *card,
break;
}
+ return 0;
+}
+
+/* perform sanity checks to the given snd_ctl_elem_value object */
+static int sanity_check_elem_value(struct snd_card *card,
+ const struct snd_ctl_elem_value *control,
+ const struct snd_ctl_elem_info *info,
+ u32 pattern)
+{
+ size_t offset;
+ int ret;
+ u32 *p;
+
+ ret = sanity_check_input_values(card, control, info, true);
+ if (ret < 0)
+ return ret;
+
/* check whether the remaining area kept untouched */
offset = value_sizes[info->type] * info->count;
offset = DIV_ROUND_UP(offset, sizeof(u32));
@@ -967,21 +1096,6 @@ static int sanity_check_elem_value(struct snd_card *card,
return ret;
}
-#else
-static inline void fill_remaining_elem_value(struct snd_ctl_elem_value *control,
- struct snd_ctl_elem_info *info,
- u32 pattern)
-{
-}
-
-static inline int sanity_check_elem_value(struct snd_card *card,
- struct snd_ctl_elem_value *control,
- struct snd_ctl_elem_info *info,
- u32 pattern)
-{
- return 0;
-}
-#endif
static int __snd_ctl_elem_info(struct snd_card *card,
struct snd_kcontrol *kctl,
@@ -1077,7 +1191,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
snd_ctl_build_ioff(&control->id, kctl, index_offset);
-#ifdef CONFIG_SND_CTL_VALIDATION
+#ifdef CONFIG_SND_CTL_DEBUG
/* info is needed only for validation */
memset(&info, 0, sizeof(info));
info.id = control->id;
@@ -1154,6 +1268,17 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
snd_ctl_build_ioff(&control->id, kctl, index_offset);
result = snd_power_ref_and_wait(card);
+ /* validate input values */
+ if (IS_ENABLED(CONFIG_SND_CTL_INPUT_VALIDATION) && !result) {
+ struct snd_ctl_elem_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.id = control->id;
+ result = __snd_ctl_elem_info(card, kctl, &info, NULL);
+ if (!result)
+ result = sanity_check_input_values(card, control, &info,
+ false);
+ }
if (!result)
result = kctl->put(kctl, control);
snd_power_unref(card);
@@ -1930,6 +2055,8 @@ static int _snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *
* @fcn: ioctl callback function
*
* called from each device manager like pcm.c, hwdep.c, etc.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn)
{
@@ -1942,6 +2069,8 @@ EXPORT_SYMBOL(snd_ctl_register_ioctl);
* snd_ctl_register_ioctl_compat - register the device-specific 32bit compat
* control-ioctls
* @fcn: ioctl callback function
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_ctl_register_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
@@ -1977,6 +2106,8 @@ static int _snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn,
/**
* snd_ctl_unregister_ioctl - de-register the device-specific control-ioctls
* @fcn: ioctl callback function to unregister
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn)
{
@@ -1989,6 +2120,8 @@ EXPORT_SYMBOL(snd_ctl_unregister_ioctl);
* snd_ctl_unregister_ioctl_compat - de-register the device-specific compat
* 32bit control-ioctls
* @fcn: ioctl callback function to unregister
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
@@ -2002,7 +2135,7 @@ static int snd_ctl_fasync(int fd, struct file * file, int on)
struct snd_ctl_file *ctl;
ctl = file->private_data;
- return fasync_helper(fd, file, on, &ctl->fasync);
+ return snd_fasync_helper(fd, file, on, &ctl->fasync);
}
/* return the preferred subdevice number if already assigned;
@@ -2044,7 +2177,7 @@ EXPORT_SYMBOL_GPL(snd_ctl_get_preferred_subdevice);
* snd_ctl_request_layer - request to use the layer
* @module_name: Name of the kernel module (NULL == build-in)
*
- * Return an error code when the module cannot be loaded.
+ * Return: zero if successful, or an error code when the module cannot be loaded
*/
int snd_ctl_request_layer(const char *module_name)
{
@@ -2170,7 +2303,7 @@ static int snd_ctl_dev_disconnect(struct snd_device *device)
read_lock_irqsave(&card->ctl_files_rwlock, flags);
list_for_each_entry(ctl, &card->ctl_files, list) {
wake_up(&ctl->change_sleep);
- kill_fasync(&ctl->fasync, SIGIO, POLL_ERR);
+ snd_kill_fasync(ctl->fasync, SIGIO, POLL_ERR);
}
read_unlock_irqrestore(&card->ctl_files_rwlock, flags);
@@ -2195,8 +2328,13 @@ static int snd_ctl_dev_free(struct snd_device *device)
down_write(&card->controls_rwsem);
while (!list_empty(&card->controls)) {
control = snd_kcontrol(card->controls.next);
- snd_ctl_remove(card, control);
+ __snd_ctl_remove(card, control, false);
}
+
+#ifdef CONFIG_SND_CTL_FAST_LOOKUP
+ xa_destroy(&card->ctl_numids);
+ xa_destroy(&card->ctl_hash);
+#endif
up_write(&card->controls_rwsem);
put_device(&card->ctl_dev);
return 0;
@@ -2241,6 +2379,8 @@ int snd_ctl_create(struct snd_card *card)
*
* This is a function that can be used as info callback for a standard
* boolean control with a single mono channel.
+ *
+ * Return: Zero (always successful)
*/
int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
@@ -2261,6 +2401,8 @@ EXPORT_SYMBOL(snd_ctl_boolean_mono_info);
*
* This is a function that can be used as info callback for a standard
* boolean control with stereo two channels.
+ *
+ * Return: Zero (always successful)
*/
int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
@@ -2284,7 +2426,7 @@ EXPORT_SYMBOL(snd_ctl_boolean_stereo_info);
* If the control's accessibility is not the default (readable and writable),
* the caller has to fill @info->access.
*
- * Return: Zero.
+ * Return: Zero (always successful)
*/
int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
unsigned int items, const char *const names[])
diff --git a/sound/core/control_led.c b/sound/core/control_led.c
index 207828f30983..f975cc85772b 100644
--- a/sound/core/control_led.c
+++ b/sound/core/control_led.c
@@ -405,7 +405,7 @@ static ssize_t mode_show(struct device *dev,
case MODE_ON: str = "on"; break;
case MODE_OFF: str = "off"; break;
}
- return sprintf(buf, "%s\n", str);
+ return sysfs_emit(buf, "%s\n", str);
}
static ssize_t mode_store(struct device *dev,
@@ -443,7 +443,7 @@ static ssize_t brightness_show(struct device *dev,
{
struct snd_ctl_led *led = container_of(dev, struct snd_ctl_led, dev);
- return sprintf(buf, "%u\n", ledtrig_audio_get(led->trigger_type));
+ return sysfs_emit(buf, "%u\n", ledtrig_audio_get(led->trigger_type));
}
static DEVICE_ATTR_RW(mode);
@@ -618,8 +618,7 @@ static ssize_t list_show(struct device *dev,
struct snd_ctl_led_card *led_card = container_of(dev, struct snd_ctl_led_card, dev);
struct snd_card *card;
struct snd_ctl_led_ctl *lctl;
- char *buf2 = buf;
- size_t l;
+ size_t l = 0;
card = snd_card_ref(led_card->number);
if (!card)
@@ -627,23 +626,19 @@ static ssize_t list_show(struct device *dev,
down_read(&card->controls_rwsem);
mutex_lock(&snd_ctl_led_mutex);
if (snd_ctl_led_card_valid[led_card->number]) {
- list_for_each_entry(lctl, &led_card->led->controls, list)
- if (lctl->card == card) {
- if (buf2 - buf > PAGE_SIZE - 16)
- break;
- if (buf2 != buf)
- *buf2++ = ' ';
- l = scnprintf(buf2, 15, "%u",
- lctl->kctl->id.numid +
- lctl->index_offset);
- buf2[l] = '\0';
- buf2 += l + 1;
- }
+ list_for_each_entry(lctl, &led_card->led->controls, list) {
+ if (lctl->card != card)
+ continue;
+ if (l)
+ l += sysfs_emit_at(buf, l, " ");
+ l += sysfs_emit_at(buf, l, "%u",
+ lctl->kctl->id.numid + lctl->index_offset);
+ }
}
mutex_unlock(&snd_ctl_led_mutex);
up_read(&card->controls_rwsem);
snd_card_unref(card);
- return buf2 - buf;
+ return l;
}
static DEVICE_ATTR_WO(attach);
diff --git a/sound/core/device.c b/sound/core/device.c
index bf0b04a7ee79..b57d80a17052 100644
--- a/sound/core/device.c
+++ b/sound/core/device.c
@@ -247,6 +247,8 @@ void snd_device_free_all(struct snd_card *card)
* device, either @SNDRV_DEV_BUILD, @SNDRV_DEV_REGISTERED or
* @SNDRV_DEV_DISCONNECTED is returned.
* Or for a non-existing device, -1 is returned as an error.
+ *
+ * Return: the current state, or -1 if not found
*/
int snd_device_get_state(struct snd_card *card, void *device_data)
{
diff --git a/sound/core/info.c b/sound/core/info.c
index 782fba87cc04..0b2f04dcb589 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -111,9 +111,9 @@ static loff_t snd_info_entry_llseek(struct file *file, loff_t offset, int orig)
entry = data->entry;
mutex_lock(&entry->access);
if (entry->c.ops->llseek) {
- offset = entry->c.ops->llseek(entry,
- data->file_private_data,
- file, offset, orig);
+ ret = entry->c.ops->llseek(entry,
+ data->file_private_data,
+ file, offset, orig);
goto out;
}
@@ -868,6 +868,8 @@ EXPORT_SYMBOL(snd_info_register);
*
* This proc file entry will be registered via snd_card_register() call, and
* it will be removed automatically at the card removal, too.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_card_rw_proc_new(struct snd_card *card, const char *name,
void *private_data,
diff --git a/sound/core/init.c b/sound/core/init.c
index 726a8353201f..193dae361fac 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -215,6 +215,8 @@ static void __snd_card_release(struct device *dev, void *data)
* via snd_card_free() call in the error; otherwise it may lead to UAF due to
* devres call orders. You can use snd_card_free_on_error() helper for
* handling it more easily.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_devm_card_new(struct device *parent, int idx, const char *xid,
struct module *module, size_t extra_size,
@@ -249,6 +251,8 @@ EXPORT_SYMBOL_GPL(snd_devm_card_new);
* This function handles the explicit snd_card_free() call at the error from
* the probe callback. It's just a small helper for simplifying the error
* handling with the managed devices.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_card_free_on_error(struct device *dev, int ret)
{
@@ -310,6 +314,10 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
rwlock_init(&card->ctl_files_rwlock);
INIT_LIST_HEAD(&card->controls);
INIT_LIST_HEAD(&card->ctl_files);
+#ifdef CONFIG_SND_CTL_FAST_LOOKUP
+ xa_init(&card->ctl_numids);
+ xa_init(&card->ctl_hash);
+#endif
spin_lock_init(&card->files_lock);
INIT_LIST_HEAD(&card->files_list);
mutex_init(&card->memory_mutex);
@@ -366,6 +374,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
*
* Returns a card object corresponding to the given index or NULL if not found.
* Release the object via snd_card_unref().
+ *
+ * Return: a card object or NULL
*/
struct snd_card *snd_card_ref(int idx)
{
@@ -604,6 +614,8 @@ static int snd_card_do_free(struct snd_card *card)
* resource immediately, but tries to disconnect at first. When the card
* is still in use, the function returns before freeing the resources.
* The card resources will be freed when the refcount gets to zero.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_card_free_when_closed(struct snd_card *card)
{
@@ -772,7 +784,7 @@ static ssize_t id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct snd_card *card = container_of(dev, struct snd_card, card_dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", card->id);
+ return sysfs_emit(buf, "%s\n", card->id);
}
static ssize_t id_store(struct device *dev, struct device_attribute *attr,
@@ -810,7 +822,7 @@ static ssize_t number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct snd_card *card = container_of(dev, struct snd_card, card_dev);
- return scnprintf(buf, PAGE_SIZE, "%i\n", card->number);
+ return sysfs_emit(buf, "%i\n", card->number);
}
static DEVICE_ATTR_RO(number);
@@ -829,6 +841,8 @@ static const struct attribute_group card_dev_attr_group = {
* snd_card_add_dev_attr - Append a new sysfs attribute group to card
* @card: card instance
* @group: attribute group to append
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_card_add_dev_attr(struct snd_card *card,
const struct attribute_group *group)
diff --git a/sound/core/isadma.c b/sound/core/isadma.c
index 1f45ede023b4..28768061d769 100644
--- a/sound/core/isadma.c
+++ b/sound/core/isadma.c
@@ -12,8 +12,8 @@
#undef HAVE_REALLY_SLOW_DMA_CONTROLLER
#include <linux/export.h>
+#include <linux/isa-dma.h>
#include <sound/core.h>
-#include <asm/dma.h>
/**
* snd_dma_program - program an ISA DMA transfer
@@ -116,8 +116,9 @@ static void __snd_release_dma(struct device *dev, void *data)
* @dma: the dma number
* @name: the name string of the requester
*
- * Returns zero on success, or a negative error code.
* The requested DMA will be automatically released at unbinding via devres.
+ *
+ * Return: zero on success, or a negative error code
*/
int snd_devm_request_dma(struct device *dev, int dma, const char *name)
{
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 8cfdaee77905..d3885cb02270 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -147,7 +147,7 @@ static void __snd_release_pages(struct device *dev, void *res)
* hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
* SNDRV_DMA_TYPE_VMALLOC type.
*
- * The function returns the snd_dma_buffer object at success, or NULL if failed.
+ * Return: the snd_dma_buffer object at success, or NULL if failed
*/
struct snd_dma_buffer *
snd_devm_alloc_dir_pages(struct device *dev, int type,
@@ -179,6 +179,8 @@ EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
* snd_dma_buffer_mmap - perform mmap of the given DMA buffer
* @dmab: buffer allocation information
* @area: VM area information
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
@@ -219,6 +221,8 @@ EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
* snd_sgbuf_get_addr - return the physical address at the corresponding offset
* @dmab: buffer allocation information
* @offset: offset in the ring buffer
+ *
+ * Return: the physical address
*/
dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
{
@@ -235,6 +239,8 @@ EXPORT_SYMBOL(snd_sgbuf_get_addr);
* snd_sgbuf_get_page - return the physical page at the corresponding offset
* @dmab: buffer allocation information
* @offset: offset in the ring buffer
+ *
+ * Return: the page pointer
*/
struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
{
@@ -253,6 +259,8 @@ EXPORT_SYMBOL(snd_sgbuf_get_page);
* @dmab: buffer allocation information
* @ofs: offset in the ring buffer
* @size: the requested size
+ *
+ * Return: the chunk size
*/
unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
unsigned int ofs, unsigned int size)
diff --git a/sound/core/misc.c b/sound/core/misc.c
index 50e4aaa6270d..d32a19976a2b 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -10,6 +10,7 @@
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/ioport.h>
+#include <linux/fs.h>
#include <sound/core.h>
#ifdef CONFIG_SND_DEBUG
@@ -145,3 +146,96 @@ snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list)
}
EXPORT_SYMBOL(snd_pci_quirk_lookup);
#endif
+
+/*
+ * Deferred async signal helpers
+ *
+ * Below are a few helper functions to wrap the async signal handling
+ * in the deferred work. The main purpose is to avoid the messy deadlock
+ * around tasklist_lock and co at the kill_fasync() invocation.
+ * fasync_helper() and kill_fasync() are replaced with snd_fasync_helper()
+ * and snd_kill_fasync(), respectively. In addition, snd_fasync_free() has
+ * to be called at releasing the relevant file object.
+ */
+struct snd_fasync {
+ struct fasync_struct *fasync;
+ int signal;
+ int poll;
+ int on;
+ struct list_head list;
+};
+
+static DEFINE_SPINLOCK(snd_fasync_lock);
+static LIST_HEAD(snd_fasync_list);
+
+static void snd_fasync_work_fn(struct work_struct *work)
+{
+ struct snd_fasync *fasync;
+
+ spin_lock_irq(&snd_fasync_lock);
+ while (!list_empty(&snd_fasync_list)) {
+ fasync = list_first_entry(&snd_fasync_list, struct snd_fasync, list);
+ list_del_init(&fasync->list);
+ spin_unlock_irq(&snd_fasync_lock);
+ if (fasync->on)
+ kill_fasync(&fasync->fasync, fasync->signal, fasync->poll);
+ spin_lock_irq(&snd_fasync_lock);
+ }
+ spin_unlock_irq(&snd_fasync_lock);
+}
+
+static DECLARE_WORK(snd_fasync_work, snd_fasync_work_fn);
+
+int snd_fasync_helper(int fd, struct file *file, int on,
+ struct snd_fasync **fasyncp)
+{
+ struct snd_fasync *fasync = NULL;
+
+ if (on) {
+ fasync = kzalloc(sizeof(*fasync), GFP_KERNEL);
+ if (!fasync)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&fasync->list);
+ }
+
+ spin_lock_irq(&snd_fasync_lock);
+ if (*fasyncp) {
+ kfree(fasync);
+ fasync = *fasyncp;
+ } else {
+ if (!fasync) {
+ spin_unlock_irq(&snd_fasync_lock);
+ return 0;
+ }
+ *fasyncp = fasync;
+ }
+ fasync->on = on;
+ spin_unlock_irq(&snd_fasync_lock);
+ return fasync_helper(fd, file, on, &fasync->fasync);
+}
+EXPORT_SYMBOL_GPL(snd_fasync_helper);
+
+void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll)
+{
+ unsigned long flags;
+
+ if (!fasync || !fasync->on)
+ return;
+ spin_lock_irqsave(&snd_fasync_lock, flags);
+ fasync->signal = signal;
+ fasync->poll = poll;
+ list_move(&fasync->list, &snd_fasync_list);
+ schedule_work(&snd_fasync_work);
+ spin_unlock_irqrestore(&snd_fasync_lock, flags);
+}
+EXPORT_SYMBOL_GPL(snd_kill_fasync);
+
+void snd_fasync_free(struct snd_fasync *fasync)
+{
+ if (!fasync)
+ return;
+ fasync->on = 0;
+ flush_work(&snd_fasync_work);
+ kfree(fasync);
+}
+EXPORT_SYMBOL_GPL(snd_fasync_free);
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 977d54320a5c..82925709fa12 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -216,6 +216,8 @@ static const char * const snd_pcm_format_names[] = {
/**
* snd_pcm_format_name - Return a name string for the given PCM format
* @format: PCM format
+ *
+ * Return: the format name string
*/
const char *snd_pcm_format_name(snd_pcm_format_t format)
{
@@ -1005,6 +1007,7 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
substream->runtime = NULL;
}
mutex_destroy(&runtime->buffer_mutex);
+ snd_fasync_free(runtime->fasync);
kfree(runtime);
put_pid(substream->pid);
substream->pid = NULL;
@@ -1028,7 +1031,7 @@ static ssize_t pcm_class_show(struct device *dev,
str = "none";
else
str = strs[pcm->dev_class];
- return sprintf(buf, "%s\n", str);
+ return sysfs_emit(buf, "%s\n", str);
}
static DEVICE_ATTR_RO(pcm_class);
@@ -1138,6 +1141,8 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
* This adds the given notifier to the global list so that the callback is
* called for each registered PCM devices. This exists only for PCM OSS
* emulation, so far.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree)
{
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index af6f717e1e7e..5b2ca028f5aa 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -48,6 +48,8 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_get_chan);
*
* This function can be used to initialize a dma_slave_config from a substream
* and hw_params in a dmaengine based PCM driver implementation.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
const struct snd_pcm_hw_params *params,
@@ -175,10 +177,10 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
* @substream: PCM substream
* @cmd: Trigger command
*
- * Returns 0 on success, a negative error code otherwise.
- *
* This function can be used as the PCM trigger callback for dmaengine based PCM
* driver implementations.
+ *
+ * Return: 0 on success, a negative error code otherwise
*/
int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
@@ -223,6 +225,8 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
*
* This function is deprecated and should not be used by new drivers, as its
* results may be unreliable.
+ *
+ * Return: PCM position in frames
*/
snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream)
{
@@ -237,6 +241,8 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
*
* This function can be used as the PCM pointer callback for dmaengine based PCM
* driver implementations.
+ *
+ * Return: PCM position in frames
*/
snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
{
@@ -266,9 +272,9 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
* @filter_fn: Filter function used to request the DMA channel
* @filter_data: Data passed to the DMA filter function
*
- * Returns NULL or the requested DMA channel.
- *
* This function request a DMA channel for usage with dmaengine PCM.
+ *
+ * Return: NULL or the requested DMA channel
*/
struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
void *filter_data)
@@ -288,11 +294,11 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_request_channel);
* @substream: PCM substream
* @chan: DMA channel to use for data transfers
*
- * Returns 0 on success, a negative error code otherwise.
- *
* The function should usually be called from the pcm open callback. Note that
* this function will use private_data field of the substream's runtime. So it
* is not available to your pcm driver implementation.
+ *
+ * Return: 0 on success, a negative error code otherwise
*/
int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
struct dma_chan *chan)
@@ -326,12 +332,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open);
* @filter_fn: Filter function used to request the DMA channel
* @filter_data: Data passed to the DMA filter function
*
- * Returns 0 on success, a negative error code otherwise.
- *
* This function will request a DMA channel using the passed filter function and
* data. The function should usually be called from the pcm open callback. Note
* that this function will use private_data field of the substream's runtime. So
* it is not available to your pcm driver implementation.
+ *
+ * Return: 0 on success, a negative error code otherwise
*/
int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
dma_filter_fn filter_fn, void *filter_data)
@@ -344,6 +350,8 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
/**
* snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
* @substream: PCM substream
+ *
+ * Return: 0 on success, a negative error code otherwise
*/
int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
{
@@ -362,6 +370,8 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
* @substream: PCM substream
*
* Releases the DMA channel associated with the PCM substream.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
{
@@ -382,10 +392,10 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
* @hw: PCM hw params
* @chan: DMA channel to use for data transfers
*
- * Returns 0 on success, a negative error code otherwise.
- *
* This function will query DMA capability, then refine the pcm hardware
* parameters.
+ *
+ * Return: 0 on success, a negative error code otherwise
*/
int snd_dmaengine_pcm_refine_runtime_hwparams(
struct snd_pcm_substream *substream,
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 1fc7c50ffa62..40751e5aff09 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1822,7 +1822,7 @@ void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substrea
snd_timer_interrupt(substream->timer, 1);
#endif
_end:
- kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+ snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
}
EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index b8296b6eb2c1..7bde7fb64011 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -350,6 +350,8 @@ EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages_for_all);
* SNDRV_DMA_TYPE_VMALLOC type.
*
* Upon successful buffer allocation and setup, the function returns 0.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type,
struct device *data, size_t size, size_t max)
@@ -369,6 +371,8 @@ EXPORT_SYMBOL(snd_pcm_set_managed_buffer);
*
* Do pre-allocation to all substreams of the given pcm for the specified DMA
* type and size, and set the managed_buffer_alloc flag to each substream.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
struct device *data,
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 4adaee62ef33..ad0541e9e888 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3412,6 +3412,8 @@ static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
* The function is provided primarily for OSS layer and USB gadget drivers,
* and it allows only the limited set of ioctls (hw_params, sw_params,
* prepare, start, drain, drop, forward).
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
@@ -3810,6 +3812,8 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
*
* This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
* this function is invoked implicitly.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
@@ -3836,6 +3840,8 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
* When your hardware uses the iomapped pages as the hardware buffer and
* wants to mmap it, pass this function as mmap pcm_ops. Note that this
* is supposed to work only on limited architectures.
+ *
+ * Return: zero if successful, or a negative error code
*/
int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
@@ -3945,7 +3951,7 @@ static int snd_pcm_fasync(int fd, struct file * file, int on)
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
return -EBADFD;
- return fasync_helper(fd, file, on, &runtime->fasync);
+ return snd_fasync_helper(fd, file, on, &runtime->fasync);
}
/*
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index befa9809ff00..6963d5a487b3 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -102,13 +102,12 @@ static inline bool __snd_rawmidi_ready(struct snd_rawmidi_runtime *runtime)
static bool snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
{
- struct snd_rawmidi_runtime *runtime = substream->runtime;
unsigned long flags;
bool ready;
- spin_lock_irqsave(&runtime->lock, flags);
- ready = __snd_rawmidi_ready(runtime);
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_lock_irqsave(&substream->lock, flags);
+ ready = __snd_rawmidi_ready(substream->runtime);
+ spin_unlock_irqrestore(&substream->lock, flags);
return ready;
}
@@ -130,7 +129,7 @@ static void snd_rawmidi_input_event_work(struct work_struct *work)
runtime->event(runtime->substream);
}
-/* buffer refcount management: call with runtime->lock held */
+/* buffer refcount management: call with substream->lock held */
static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime)
{
runtime->buffer_ref++;
@@ -141,6 +140,23 @@ static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime *runtime)
runtime->buffer_ref--;
}
+static void snd_rawmidi_buffer_ref_sync(struct snd_rawmidi_substream *substream)
+{
+ int loop = HZ;
+
+ spin_lock_irq(&substream->lock);
+ while (substream->runtime->buffer_ref) {
+ spin_unlock_irq(&substream->lock);
+ if (!--loop) {
+ rmidi_err(substream->rmidi, "Buffer ref sync timeout\n");
+ return;
+ }
+ schedule_timeout_uninterruptible(1);
+ spin_lock_irq(&substream->lock);
+ }
+ spin_unlock_irq(&substream->lock);
+}
+
static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
{
struct snd_rawmidi_runtime *runtime;
@@ -149,7 +165,6 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
if (!runtime)
return -ENOMEM;
runtime->substream = substream;
- spin_lock_init(&runtime->lock);
init_waitqueue_head(&runtime->sleep);
INIT_WORK(&runtime->event_work, snd_rawmidi_input_event_work);
runtime->event = NULL;
@@ -203,35 +218,48 @@ static void __reset_runtime_ptrs(struct snd_rawmidi_runtime *runtime,
runtime->avail = is_input ? 0 : runtime->buffer_size;
}
-static void reset_runtime_ptrs(struct snd_rawmidi_runtime *runtime,
+static void reset_runtime_ptrs(struct snd_rawmidi_substream *substream,
bool is_input)
{
unsigned long flags;
- spin_lock_irqsave(&runtime->lock, flags);
- __reset_runtime_ptrs(runtime, is_input);
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_lock_irqsave(&substream->lock, flags);
+ if (substream->opened && substream->runtime)
+ __reset_runtime_ptrs(substream->runtime, is_input);
+ spin_unlock_irqrestore(&substream->lock, flags);
}
int snd_rawmidi_drop_output(struct snd_rawmidi_substream *substream)
{
snd_rawmidi_output_trigger(substream, 0);
- reset_runtime_ptrs(substream->runtime, false);
+ reset_runtime_ptrs(substream, false);
return 0;
}
EXPORT_SYMBOL(snd_rawmidi_drop_output);
int snd_rawmidi_drain_output(struct snd_rawmidi_substream *substream)
{
- int err;
+ int err = 0;
long timeout;
- struct snd_rawmidi_runtime *runtime = substream->runtime;
+ struct snd_rawmidi_runtime *runtime;
+
+ spin_lock_irq(&substream->lock);
+ runtime = substream->runtime;
+ if (!substream->opened || !runtime || !runtime->buffer) {
+ err = -EINVAL;
+ } else {
+ snd_rawmidi_buffer_ref(runtime);
+ runtime->drain = 1;
+ }
+ spin_unlock_irq(&substream->lock);
+ if (err < 0)
+ return err;
- err = 0;
- runtime->drain = 1;
timeout = wait_event_interruptible_timeout(runtime->sleep,
(runtime->avail >= runtime->buffer_size),
10*HZ);
+
+ spin_lock_irq(&substream->lock);
if (signal_pending(current))
err = -ERESTARTSYS;
if (runtime->avail < runtime->buffer_size && !timeout) {
@@ -241,6 +269,8 @@ int snd_rawmidi_drain_output(struct snd_rawmidi_substream *substream)
err = -EIO;
}
runtime->drain = 0;
+ spin_unlock_irq(&substream->lock);
+
if (err != -ERESTARTSYS) {
/* we need wait a while to make sure that Tx FIFOs are empty */
if (substream->ops->drain)
@@ -249,6 +279,11 @@ int snd_rawmidi_drain_output(struct snd_rawmidi_substream *substream)
msleep(50);
snd_rawmidi_drop_output(substream);
}
+
+ spin_lock_irq(&substream->lock);
+ snd_rawmidi_buffer_unref(runtime);
+ spin_unlock_irq(&substream->lock);
+
return err;
}
EXPORT_SYMBOL(snd_rawmidi_drain_output);
@@ -256,7 +291,7 @@ EXPORT_SYMBOL(snd_rawmidi_drain_output);
int snd_rawmidi_drain_input(struct snd_rawmidi_substream *substream)
{
snd_rawmidi_input_trigger(substream, 0);
- reset_runtime_ptrs(substream->runtime, true);
+ reset_runtime_ptrs(substream, true);
return 0;
}
EXPORT_SYMBOL(snd_rawmidi_drain_input);
@@ -311,12 +346,14 @@ static int open_substream(struct snd_rawmidi *rmidi,
snd_rawmidi_runtime_free(substream);
return err;
}
+ spin_lock_irq(&substream->lock);
substream->opened = 1;
substream->active_sensing = 0;
if (mode & SNDRV_RAWMIDI_LFLG_APPEND)
substream->append = 1;
substream->pid = get_pid(task_pid(current));
rmidi->streams[substream->stream].substream_opened++;
+ spin_unlock_irq(&substream->lock);
}
substream->use_count++;
return 0;
@@ -521,13 +558,16 @@ static void close_substream(struct snd_rawmidi *rmidi,
if (snd_rawmidi_drain_output(substream) == -ERESTARTSYS)
snd_rawmidi_output_trigger(substream, 0);
}
+ snd_rawmidi_buffer_ref_sync(substream);
}
+ spin_lock_irq(&substream->lock);
+ substream->opened = 0;
+ substream->append = 0;
+ spin_unlock_irq(&substream->lock);
substream->ops->close(substream);
if (substream->runtime->private_free)
substream->runtime->private_free(substream);
snd_rawmidi_runtime_free(substream);
- substream->opened = 0;
- substream->append = 0;
put_pid(substream->pid);
substream->pid = NULL;
rmidi->streams[substream->stream].substream_opened--;
@@ -676,10 +716,11 @@ static int snd_rawmidi_info_select_user(struct snd_card *card,
return 0;
}
-static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
+static int resize_runtime_buffer(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params *params,
bool is_input)
{
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
char *newbuf, *oldbuf;
unsigned int framing = params->mode & SNDRV_RAWMIDI_MODE_FRAMING_MASK;
@@ -693,9 +734,9 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
newbuf = kvzalloc(params->buffer_size, GFP_KERNEL);
if (!newbuf)
return -ENOMEM;
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
if (runtime->buffer_ref) {
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
kvfree(newbuf);
return -EBUSY;
}
@@ -703,7 +744,7 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
__reset_runtime_ptrs(runtime, is_input);
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
kvfree(oldbuf);
}
runtime->avail_min = params->avail_min;
@@ -713,11 +754,19 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params *params)
{
- if (substream->append && substream->use_count > 1)
- return -EBUSY;
+ int err;
+
snd_rawmidi_drain_output(substream);
- substream->active_sensing = !params->no_active_sensing;
- return resize_runtime_buffer(substream->runtime, params, false);
+ mutex_lock(&substream->rmidi->open_mutex);
+ if (substream->append && substream->use_count > 1)
+ err = -EBUSY;
+ else
+ err = resize_runtime_buffer(substream, params, false);
+
+ if (!err)
+ substream->active_sensing = !params->no_active_sensing;
+ mutex_unlock(&substream->rmidi->open_mutex);
+ return err;
}
EXPORT_SYMBOL(snd_rawmidi_output_params);
@@ -728,19 +777,22 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
unsigned int clock_type = params->mode & SNDRV_RAWMIDI_MODE_CLOCK_MASK;
int err;
+ snd_rawmidi_drain_input(substream);
+ mutex_lock(&substream->rmidi->open_mutex);
if (framing == SNDRV_RAWMIDI_MODE_FRAMING_NONE && clock_type != SNDRV_RAWMIDI_MODE_CLOCK_NONE)
- return -EINVAL;
+ err = -EINVAL;
else if (clock_type > SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW)
- return -EINVAL;
- if (framing > SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP)
- return -EINVAL;
- snd_rawmidi_drain_input(substream);
- err = resize_runtime_buffer(substream->runtime, params, true);
- if (err < 0)
- return err;
+ err = -EINVAL;
+ else if (framing > SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP)
+ err = -EINVAL;
+ else
+ err = resize_runtime_buffer(substream, params, true);
- substream->framing = framing;
- substream->clock_type = clock_type;
+ if (!err) {
+ substream->framing = framing;
+ substream->clock_type = clock_type;
+ }
+ mutex_unlock(&substream->rmidi->open_mutex);
return 0;
}
EXPORT_SYMBOL(snd_rawmidi_input_params);
@@ -752,9 +804,9 @@ static int snd_rawmidi_output_status(struct snd_rawmidi_substream *substream,
memset(status, 0, sizeof(*status));
status->stream = SNDRV_RAWMIDI_STREAM_OUTPUT;
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
status->avail = runtime->avail;
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
return 0;
}
@@ -765,11 +817,11 @@ static int snd_rawmidi_input_status(struct snd_rawmidi_substream *substream,
memset(status, 0, sizeof(*status));
status->stream = SNDRV_RAWMIDI_STREAM_INPUT;
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
status->avail = runtime->avail;
status->xruns = runtime->xruns;
runtime->xruns = 0;
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
return 0;
}
@@ -1064,17 +1116,21 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
unsigned long flags;
struct timespec64 ts64 = get_framing_tstamp(substream);
int result = 0, count1;
- struct snd_rawmidi_runtime *runtime = substream->runtime;
+ struct snd_rawmidi_runtime *runtime;
- if (!substream->opened)
- return -EBADFD;
- if (runtime->buffer == NULL) {
+ spin_lock_irqsave(&substream->lock, flags);
+ if (!substream->opened) {
+ result = -EBADFD;
+ goto unlock;
+ }
+ runtime = substream->runtime;
+ if (!runtime || !runtime->buffer) {
rmidi_dbg(substream->rmidi,
"snd_rawmidi_receive: input is not active!!!\n");
- return -EINVAL;
+ result = -EINVAL;
+ goto unlock;
}
- spin_lock_irqsave(&runtime->lock, flags);
if (substream->framing == SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP) {
result = receive_with_tstamp_framing(substream, buffer, count, &ts64);
} else if (count == 1) { /* special case, faster code */
@@ -1121,7 +1177,8 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
else if (__snd_rawmidi_ready(runtime))
wake_up(&runtime->sleep);
}
- spin_unlock_irqrestore(&runtime->lock, flags);
+ unlock:
+ spin_unlock_irqrestore(&substream->lock, flags);
return result;
}
EXPORT_SYMBOL(snd_rawmidi_receive);
@@ -1136,7 +1193,7 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
unsigned long appl_ptr;
int err = 0;
- spin_lock_irqsave(&runtime->lock, flags);
+ spin_lock_irqsave(&substream->lock, flags);
snd_rawmidi_buffer_ref(runtime);
while (count > 0 && runtime->avail) {
count1 = runtime->buffer_size - runtime->appl_ptr;
@@ -1154,11 +1211,11 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
if (kernelbuf)
memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
if (userbuf) {
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_unlock_irqrestore(&substream->lock, flags);
if (copy_to_user(userbuf + result,
runtime->buffer + appl_ptr, count1))
err = -EFAULT;
- spin_lock_irqsave(&runtime->lock, flags);
+ spin_lock_irqsave(&substream->lock, flags);
if (err)
goto out;
}
@@ -1167,7 +1224,7 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
}
out:
snd_rawmidi_buffer_unref(runtime);
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_unlock_irqrestore(&substream->lock, flags);
return result > 0 ? result : err;
}
@@ -1196,31 +1253,31 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
snd_rawmidi_input_trigger(substream, 1);
result = 0;
while (count > 0) {
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
while (!__snd_rawmidi_ready(runtime)) {
wait_queue_entry_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
return result > 0 ? result : -EAGAIN;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&runtime->sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
schedule();
remove_wait_queue(&runtime->sleep, &wait);
if (rfile->rmidi->card->shutdown)
return -ENODEV;
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
if (!runtime->avail) {
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
return result > 0 ? result : -EIO;
}
}
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
count1 = snd_rawmidi_kernel_read1(substream,
(unsigned char __user *)buf,
NULL/*kernelbuf*/,
@@ -1242,23 +1299,25 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
*/
int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
{
- struct snd_rawmidi_runtime *runtime = substream->runtime;
+ struct snd_rawmidi_runtime *runtime;
int result;
unsigned long flags;
- if (runtime->buffer == NULL) {
+ spin_lock_irqsave(&substream->lock, flags);
+ runtime = substream->runtime;
+ if (!substream->opened || !runtime || !runtime->buffer) {
rmidi_dbg(substream->rmidi,
"snd_rawmidi_transmit_empty: output is not active!!!\n");
- return 1;
+ result = 1;
+ } else {
+ result = runtime->avail >= runtime->buffer_size;
}
- spin_lock_irqsave(&runtime->lock, flags);
- result = runtime->avail >= runtime->buffer_size;
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_unlock_irqrestore(&substream->lock, flags);
return result;
}
EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
-/**
+/*
* __snd_rawmidi_transmit_peek - copy data from the internal buffer
* @substream: the rawmidi substream
* @buffer: the buffer pointer
@@ -1266,8 +1325,8 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
*
* This is a variant of snd_rawmidi_transmit_peek() without spinlock.
*/
-int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
- unsigned char *buffer, int count)
+static int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ unsigned char *buffer, int count)
{
int result, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
@@ -1304,7 +1363,6 @@ int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
__skip:
return result;
}
-EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
/**
* snd_rawmidi_transmit_peek - copy data from the internal buffer
@@ -1323,25 +1381,28 @@ EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count)
{
- struct snd_rawmidi_runtime *runtime = substream->runtime;
int result;
unsigned long flags;
- spin_lock_irqsave(&runtime->lock, flags);
- result = __snd_rawmidi_transmit_peek(substream, buffer, count);
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_lock_irqsave(&substream->lock, flags);
+ if (!substream->opened || !substream->runtime)
+ result = -EBADFD;
+ else
+ result = __snd_rawmidi_transmit_peek(substream, buffer, count);
+ spin_unlock_irqrestore(&substream->lock, flags);
return result;
}
EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
-/**
+/*
* __snd_rawmidi_transmit_ack - acknowledge the transmission
* @substream: the rawmidi substream
* @count: the transferred count
*
* This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
*/
-int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+static int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
+ int count)
{
struct snd_rawmidi_runtime *runtime = substream->runtime;
@@ -1361,7 +1422,6 @@ int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int coun
}
return count;
}
-EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
/**
* snd_rawmidi_transmit_ack - acknowledge the transmission
@@ -1376,13 +1436,15 @@ EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
*/
int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
{
- struct snd_rawmidi_runtime *runtime = substream->runtime;
int result;
unsigned long flags;
- spin_lock_irqsave(&runtime->lock, flags);
- result = __snd_rawmidi_transmit_ack(substream, count);
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_lock_irqsave(&substream->lock, flags);
+ if (!substream->opened || !substream->runtime)
+ result = -EBADFD;
+ else
+ result = __snd_rawmidi_transmit_ack(substream, count);
+ spin_unlock_irqrestore(&substream->lock, flags);
return result;
}
EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
@@ -1400,11 +1462,10 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count)
{
- struct snd_rawmidi_runtime *runtime = substream->runtime;
int result;
unsigned long flags;
- spin_lock_irqsave(&runtime->lock, flags);
+ spin_lock_irqsave(&substream->lock, flags);
if (!substream->opened)
result = -EBADFD;
else {
@@ -1414,7 +1475,7 @@ int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
else
result = __snd_rawmidi_transmit_ack(substream, count);
}
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_unlock_irqrestore(&substream->lock, flags);
return result;
}
EXPORT_SYMBOL(snd_rawmidi_transmit);
@@ -1427,16 +1488,18 @@ EXPORT_SYMBOL(snd_rawmidi_transmit);
*/
int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream)
{
- struct snd_rawmidi_runtime *runtime = substream->runtime;
+ struct snd_rawmidi_runtime *runtime;
unsigned long flags;
int count = 0;
- spin_lock_irqsave(&runtime->lock, flags);
- if (runtime->avail < runtime->buffer_size) {
+ spin_lock_irqsave(&substream->lock, flags);
+ runtime = substream->runtime;
+ if (substream->opened && runtime &&
+ runtime->avail < runtime->buffer_size) {
count = runtime->buffer_size - runtime->avail;
__snd_rawmidi_transmit_ack(substream, count);
}
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_unlock_irqrestore(&substream->lock, flags);
return count;
}
EXPORT_SYMBOL(snd_rawmidi_proceed);
@@ -1457,10 +1520,10 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
return -EINVAL;
result = 0;
- spin_lock_irqsave(&runtime->lock, flags);
+ spin_lock_irqsave(&substream->lock, flags);
if (substream->append) {
if ((long)runtime->avail < count) {
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_unlock_irqrestore(&substream->lock, flags);
return -EAGAIN;
}
}
@@ -1482,14 +1545,14 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
memcpy(runtime->buffer + appl_ptr,
kernelbuf + result, count1);
else if (userbuf) {
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_unlock_irqrestore(&substream->lock, flags);
if (copy_from_user(runtime->buffer + appl_ptr,
userbuf + result, count1)) {
- spin_lock_irqsave(&runtime->lock, flags);
+ spin_lock_irqsave(&substream->lock, flags);
result = result > 0 ? result : -EFAULT;
goto __end;
}
- spin_lock_irqsave(&runtime->lock, flags);
+ spin_lock_irqsave(&substream->lock, flags);
}
result += count1;
count -= count1;
@@ -1497,7 +1560,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
__end:
count1 = runtime->avail < runtime->buffer_size;
snd_rawmidi_buffer_unref(runtime);
- spin_unlock_irqrestore(&runtime->lock, flags);
+ spin_unlock_irqrestore(&substream->lock, flags);
if (count1)
snd_rawmidi_output_trigger(substream, 1);
return result;
@@ -1527,31 +1590,31 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
return -EIO;
result = 0;
while (count > 0) {
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
while (!snd_rawmidi_ready_append(substream, count)) {
wait_queue_entry_t wait;
if (file->f_flags & O_NONBLOCK) {
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
return result > 0 ? result : -EAGAIN;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&runtime->sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
timeout = schedule_timeout(30 * HZ);
remove_wait_queue(&runtime->sleep, &wait);
if (rfile->rmidi->card->shutdown)
return -ENODEV;
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
if (!runtime->avail && !timeout) {
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
return result > 0 ? result : -EIO;
}
}
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count);
if (count1 < 0)
return result > 0 ? result : count1;
@@ -1562,7 +1625,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
count -= count1;
}
if (file->f_flags & O_DSYNC) {
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
while (runtime->avail != runtime->buffer_size) {
wait_queue_entry_t wait;
unsigned int last_avail = runtime->avail;
@@ -1570,16 +1633,16 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
init_waitqueue_entry(&wait, current);
add_wait_queue(&runtime->sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
timeout = schedule_timeout(30 * HZ);
remove_wait_queue(&runtime->sleep, &wait);
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
if (runtime->avail == last_avail && !timeout)
return result > 0 ? result : -EIO;
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
}
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
}
return result;
}
@@ -1650,10 +1713,10 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
" Owner PID : %d\n",
pid_vnr(substream->pid));
runtime = substream->runtime;
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
buffer_size = runtime->buffer_size;
avail = runtime->avail;
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
snd_iprintf(buffer,
" Mode : %s\n"
" Buffer size : %lu\n"
@@ -1677,11 +1740,11 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
" Owner PID : %d\n",
pid_vnr(substream->pid));
runtime = substream->runtime;
- spin_lock_irq(&runtime->lock);
+ spin_lock_irq(&substream->lock);
buffer_size = runtime->buffer_size;
avail = runtime->avail;
xruns = runtime->xruns;
- spin_unlock_irq(&runtime->lock);
+ spin_unlock_irq(&substream->lock);
snd_iprintf(buffer,
" Buffer size : %lu\n"
" Avail : %lu\n"
@@ -1733,6 +1796,7 @@ static int snd_rawmidi_alloc_substreams(struct snd_rawmidi *rmidi,
substream->number = idx;
substream->rmidi = rmidi;
substream->pstr = stream;
+ spin_lock_init(&substream->lock);
list_add_tail(&substream->list, &stream->substreams);
stream->substream_count++;
}
diff --git a/sound/core/timer.c b/sound/core/timer.c
index b3214baa8919..e08a37c23add 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -83,7 +83,7 @@ struct snd_timer_user {
unsigned int filter;
struct timespec64 tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
- struct fasync_struct *fasync;
+ struct snd_fasync *fasync;
struct mutex ioctl_lock;
};
@@ -1345,7 +1345,7 @@ static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
}
__wake:
spin_unlock(&tu->qlock);
- kill_fasync(&tu->fasync, SIGIO, POLL_IN);
+ snd_kill_fasync(tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
@@ -1383,7 +1383,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
- kill_fasync(&tu->fasync, SIGIO, POLL_IN);
+ snd_kill_fasync(tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
@@ -1453,7 +1453,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
spin_unlock(&tu->qlock);
if (append == 0)
return;
- kill_fasync(&tu->fasync, SIGIO, POLL_IN);
+ snd_kill_fasync(tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
@@ -1521,6 +1521,7 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
snd_timer_instance_free(tu->timeri);
}
mutex_unlock(&tu->ioctl_lock);
+ snd_fasync_free(tu->fasync);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
@@ -2135,7 +2136,7 @@ static int snd_timer_user_fasync(int fd, struct file * file, int on)
struct snd_timer_user *tu;
tu = file->private_data;
- return fasync_helper(fd, file, on, &tu->fasync);
+ return snd_fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index ab36f9898711..d0f11f37889b 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -494,7 +494,8 @@ EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
* @arg: optional function argument
*
* Apply the function @func to each follower kctl of the given vmaster kctl.
- * Returns 0 if successful, or a negative error code.
+ *
+ * Return: 0 if successful, or a negative error code
*/
int snd_ctl_apply_vmaster_followers(struct snd_kcontrol *kctl,
int (*func)(struct snd_kcontrol *vfollower,
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index b072392725c7..a42f66f561f5 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -15,13 +15,6 @@
#include <sound/hdaudio_ext.h>
/*
- * maximum HDAC capablities we should parse to avoid endless looping:
- * currently we have 4 extended caps, so this is future proof for now.
- * extend when this limit is seen meeting in real HW
- */
-#define HDAC_MAX_CAPS 10
-
-/*
* processing pipe helpers - these helpers are useful for dealing with HDA
* new capability of processing pipelines
*/
diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
index 71db8592b33d..d497414a5538 100644
--- a/sound/hda/hdac_bus.c
+++ b/sound/hda/hdac_bus.c
@@ -183,7 +183,7 @@ static void snd_hdac_bus_process_unsol_events(struct work_struct *work)
if (!(caddr & (1 << 4))) /* no unsolicited event? */
continue;
codec = bus->caddr_tbl[caddr & 0x0f];
- if (!codec || !codec->dev.driver)
+ if (!codec || !codec->registered)
continue;
spin_unlock_irq(&bus->reg_lock);
drv = drv_to_hdac_driver(codec->dev.driver);
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index f7bd6e2db085..9a60bfdb39ba 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -474,11 +474,8 @@ static void azx_int_disable(struct hdac_bus *bus)
list_for_each_entry(azx_dev, &bus->stream_list, list)
snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
- /* disable SIE for all streams */
- snd_hdac_chip_writeb(bus, INTCTL, 0);
-
- /* disable controller CIE and GIE */
- snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN, 0);
+ /* disable SIE for all streams & disable controller CIE and GIE */
+ snd_hdac_chip_writel(bus, INTCTL, 0);
}
/* clear interrupts */
diff --git a/sound/hda/hdac_sysfs.c b/sound/hda/hdac_sysfs.c
index 0d7771fca9f0..e47de49a32e3 100644
--- a/sound/hda/hdac_sysfs.c
+++ b/sound/hda/hdac_sysfs.c
@@ -22,7 +22,7 @@ static ssize_t type##_show(struct device *dev, \
char *buf) \
{ \
struct hdac_device *codec = dev_to_hdac_dev(dev); \
- return sprintf(buf, "0x%x\n", codec->type); \
+ return sysfs_emit(buf, "0x%x\n", codec->type); \
} \
static DEVICE_ATTR_RO(type)
@@ -32,8 +32,8 @@ static ssize_t type##_show(struct device *dev, \
char *buf) \
{ \
struct hdac_device *codec = dev_to_hdac_dev(dev); \
- return sprintf(buf, "%s\n", \
- codec->type ? codec->type : ""); \
+ return sysfs_emit(buf, "%s\n", \
+ codec->type ? codec->type : ""); \
} \
static DEVICE_ATTR_RO(type)
@@ -161,7 +161,7 @@ static struct kobj_type widget_ktype = {
static ssize_t caps_show(struct hdac_device *codec, hda_nid_t nid,
struct widget_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%08x\n", get_wcaps(codec, nid));
+ return sysfs_emit(buf, "0x%08x\n", get_wcaps(codec, nid));
}
static ssize_t pin_caps_show(struct hdac_device *codec, hda_nid_t nid,
@@ -169,8 +169,8 @@ static ssize_t pin_caps_show(struct hdac_device *codec, hda_nid_t nid,
{
if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN)
return 0;
- return sprintf(buf, "0x%08x\n",
- snd_hdac_read_parm(codec, nid, AC_PAR_PIN_CAP));
+ return sysfs_emit(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_PIN_CAP));
}
static ssize_t pin_cfg_show(struct hdac_device *codec, hda_nid_t nid,
@@ -182,7 +182,7 @@ static ssize_t pin_cfg_show(struct hdac_device *codec, hda_nid_t nid,
return 0;
if (snd_hdac_read(codec, nid, AC_VERB_GET_CONFIG_DEFAULT, 0, &val))
return 0;
- return sprintf(buf, "0x%08x\n", val);
+ return sysfs_emit(buf, "0x%08x\n", val);
}
static bool has_pcm_cap(struct hdac_device *codec, hda_nid_t nid)
@@ -203,8 +203,8 @@ static ssize_t pcm_caps_show(struct hdac_device *codec, hda_nid_t nid,
{
if (!has_pcm_cap(codec, nid))
return 0;
- return sprintf(buf, "0x%08x\n",
- snd_hdac_read_parm(codec, nid, AC_PAR_PCM));
+ return sysfs_emit(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_PCM));
}
static ssize_t pcm_formats_show(struct hdac_device *codec, hda_nid_t nid,
@@ -212,8 +212,8 @@ static ssize_t pcm_formats_show(struct hdac_device *codec, hda_nid_t nid,
{
if (!has_pcm_cap(codec, nid))
return 0;
- return sprintf(buf, "0x%08x\n",
- snd_hdac_read_parm(codec, nid, AC_PAR_STREAM));
+ return sysfs_emit(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_STREAM));
}
static ssize_t amp_in_caps_show(struct hdac_device *codec, hda_nid_t nid,
@@ -221,8 +221,8 @@ static ssize_t amp_in_caps_show(struct hdac_device *codec, hda_nid_t nid,
{
if (nid != codec->afg && !(get_wcaps(codec, nid) & AC_WCAP_IN_AMP))
return 0;
- return sprintf(buf, "0x%08x\n",
- snd_hdac_read_parm(codec, nid, AC_PAR_AMP_IN_CAP));
+ return sysfs_emit(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_AMP_IN_CAP));
}
static ssize_t amp_out_caps_show(struct hdac_device *codec, hda_nid_t nid,
@@ -230,8 +230,8 @@ static ssize_t amp_out_caps_show(struct hdac_device *codec, hda_nid_t nid,
{
if (nid != codec->afg && !(get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
return 0;
- return sprintf(buf, "0x%08x\n",
- snd_hdac_read_parm(codec, nid, AC_PAR_AMP_OUT_CAP));
+ return sysfs_emit(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_AMP_OUT_CAP));
}
static ssize_t power_caps_show(struct hdac_device *codec, hda_nid_t nid,
@@ -239,15 +239,15 @@ static ssize_t power_caps_show(struct hdac_device *codec, hda_nid_t nid,
{
if (nid != codec->afg && !(get_wcaps(codec, nid) & AC_WCAP_POWER))
return 0;
- return sprintf(buf, "0x%08x\n",
- snd_hdac_read_parm(codec, nid, AC_PAR_POWER_STATE));
+ return sysfs_emit(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_POWER_STATE));
}
static ssize_t gpio_caps_show(struct hdac_device *codec, hda_nid_t nid,
struct widget_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%08x\n",
- snd_hdac_read_parm(codec, nid, AC_PAR_GPIO_CAP));
+ return sysfs_emit(buf, "0x%08x\n",
+ snd_hdac_read_parm(codec, nid, AC_PAR_GPIO_CAP));
}
static ssize_t connections_show(struct hdac_device *codec, hda_nid_t nid,
@@ -261,8 +261,8 @@ static ssize_t connections_show(struct hdac_device *codec, hda_nid_t nid,
if (nconns <= 0)
return nconns;
for (i = 0; i < nconns; i++)
- ret += sprintf(buf + ret, "%s0x%02x", i ? " " : "", list[i]);
- ret += sprintf(buf + ret, "\n");
+ ret += sysfs_emit_at(buf, ret, "%s0x%02x", i ? " " : "", list[i]);
+ ret += sysfs_emit_at(buf, ret, "\n");
return ret;
}
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index ec9cbb219bc1..d84ffdf47210 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -414,6 +414,11 @@ static const struct config_entry config_table[] = {
},
/* Alderlake-P */
{
+ .flags = FLAG_SOF,
+ .device = 0x51c8,
+ .codec_hid = &essx_83x6,
+ },
+ {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x51c8,
},
diff --git a/sound/hda/trace.h b/sound/hda/trace.h
index 70af6c815089..2cc493434a8f 100644
--- a/sound/hda/trace.h
+++ b/sound/hda/trace.h
@@ -19,37 +19,48 @@ struct hdac_codec;
TRACE_EVENT(hda_send_cmd,
TP_PROTO(struct hdac_bus *bus, unsigned int cmd),
TP_ARGS(bus, cmd),
- TP_STRUCT__entry(__dynamic_array(char, msg, HDAC_MSG_MAX)),
+ TP_STRUCT__entry(
+ __string(name, dev_name((bus)->dev))
+ __field(u32, cmd)
+ ),
TP_fast_assign(
- snprintf(__get_str(msg), HDAC_MSG_MAX,
- "[%s:%d] val=0x%08x",
- dev_name((bus)->dev), (cmd) >> 28, cmd);
+ __assign_str(name, dev_name((bus)->dev));
+ __entry->cmd = cmd;
),
- TP_printk("%s", __get_str(msg))
+ TP_printk("[%s:%d] val=0x%08x", __get_str(name), __entry->cmd >> 28, __entry->cmd)
);
TRACE_EVENT(hda_get_response,
TP_PROTO(struct hdac_bus *bus, unsigned int addr, unsigned int res),
TP_ARGS(bus, addr, res),
- TP_STRUCT__entry(__dynamic_array(char, msg, HDAC_MSG_MAX)),
+ TP_STRUCT__entry(
+ __string(name, dev_name((bus)->dev))
+ __field(u32, addr)
+ __field(u32, res)
+ ),
TP_fast_assign(
- snprintf(__get_str(msg), HDAC_MSG_MAX,
- "[%s:%d] val=0x%08x",
- dev_name((bus)->dev), addr, res);
+ __assign_str(name, dev_name((bus)->dev));
+ __entry->addr = addr;
+ __entry->res = res;
),
- TP_printk("%s", __get_str(msg))
+ TP_printk("[%s:%d] val=0x%08x", __get_str(name), __entry->addr, __entry->res)
);
TRACE_EVENT(hda_unsol_event,
TP_PROTO(struct hdac_bus *bus, u32 res, u32 res_ex),
TP_ARGS(bus, res, res_ex),
- TP_STRUCT__entry(__dynamic_array(char, msg, HDAC_MSG_MAX)),
+ TP_STRUCT__entry(
+ __string(name, dev_name((bus)->dev))
+ __field(u32, res)
+ __field(u32, res_ex)
+ ),
TP_fast_assign(
- snprintf(__get_str(msg), HDAC_MSG_MAX,
- "[%s:%d] res=0x%08x, res_ex=0x%08x",
- dev_name((bus)->dev), res_ex & 0x0f, res, res_ex);
+ __assign_str(name, dev_name((bus)->dev));
+ __entry->res = res;
+ __entry->res_ex = res_ex;
),
- TP_printk("%s", __get_str(msg))
+ TP_printk("[%s:%d] res=0x%08x, res_ex=0x%08x", __get_str(name),
+ __entry->res_ex & 0x0f, __entry->res, __entry->res_ex)
);
DECLARE_EVENT_CLASS(hdac_stream,
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index 2aaaa6807174..13ce96148fa3 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -581,8 +581,6 @@ demunge_buf (unsigned char *src, unsigned char *dst, unsigned int src_bytes)
int i;
unsigned char *end = src + src_bytes;
- end = src + src_bytes;
-
/* NOTE: src and dst *CAN* point to the same address */
for (i = 0; src != end; i++) {
diff --git a/sound/pci/asihpi/hpi6000.c b/sound/pci/asihpi/hpi6000.c
index aa4d06353126..88d902997b74 100644
--- a/sound/pci/asihpi/hpi6000.c
+++ b/sound/pci/asihpi/hpi6000.c
@@ -388,7 +388,7 @@ void HPI_6000(struct hpi_message *phm, struct hpi_response *phr)
/* SUBSYSTEM */
/* create an adapter object and initialise it based on resource information
- * passed in in the message
+ * passed in the message
* NOTE - you cannot use this function AND the FindAdapters function at the
* same time, the application must use only one of them to get the adapters
*/
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 3d6914c64c4a..27e11b5f70b9 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -445,7 +445,7 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
/* SUBSYSTEM */
/** Create an adapter object and initialise it based on resource information
- * passed in in the message
+ * passed in the message
* *** NOTE - you cannot use this function AND the FindAdapters function at the
* same time, the application must use only one of them to get the adapters ***
*/
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 9d26535f3fa3..edb3f1763719 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -324,7 +324,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
return NULL;
}
/* fill buffer addresses but pointers are not stored so that
- * snd_free_pci_page() is not called in in synth_free()
+ * snd_free_pci_page() is not called in synth_free()
*/
idx = 0;
for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 94efe347a97a..89210b2c7342 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -8,7 +8,7 @@
/* Power-Management-Code ( CONFIG_PM )
* for ens1371 only ( FIXME )
* derived from cs4281.c, atiixp.c and via82xx.c
- * using http://www.alsa-project.org/~tiwai/writing-an-alsa-driver/
+ * using https://www.kernel.org/doc/html/latest/sound/kernel-api/writing-an-alsa-driver.html
* by Kurt J. Bosch
*/
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 79ade4787d95..a8e8cf98befa 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -93,16 +93,21 @@ config SND_HDA_PATCH_LOADER
config SND_HDA_SCODEC_CS35L41
tristate
+ select SND_HDA_GENERIC
+ select REGMAP_IRQ
+
+config SND_HDA_CS_DSP_CONTROLS
+ tristate
+ select CS_DSP
config SND_HDA_SCODEC_CS35L41_I2C
tristate "Build CS35L41 HD-audio side codec support for I2C Bus"
depends on I2C
depends on ACPI
depends on SND_SOC
- select SND_HDA_GENERIC
select SND_SOC_CS35L41_LIB
select SND_HDA_SCODEC_CS35L41
- select REGMAP_IRQ
+ select SND_HDA_CS_DSP_CONTROLS
help
Say Y or M here to include CS35L41 I2C HD-audio side codec support
in snd-hda-intel driver, such as ALC287.
@@ -115,10 +120,9 @@ config SND_HDA_SCODEC_CS35L41_SPI
depends on SPI_MASTER
depends on ACPI
depends on SND_SOC
- select SND_HDA_GENERIC
select SND_SOC_CS35L41_LIB
select SND_HDA_SCODEC_CS35L41
- select REGMAP_IRQ
+ select SND_HDA_CS_DSP_CONTROLS
help
Say Y or M here to include CS35L41 SPI HD-audio side codec support
in snd-hda-intel driver, such as ALC287.
diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile
index 3e7bc608d45f..00d306104484 100644
--- a/sound/pci/hda/Makefile
+++ b/sound/pci/hda/Makefile
@@ -31,6 +31,7 @@ snd-hda-codec-hdmi-objs := patch_hdmi.o hda_eld.o
snd-hda-scodec-cs35l41-objs := cs35l41_hda.o
snd-hda-scodec-cs35l41-i2c-objs := cs35l41_hda_i2c.o
snd-hda-scodec-cs35l41-spi-objs := cs35l41_hda_spi.o
+snd-hda-cs-dsp-ctls-objs := hda_cs_dsp_ctl.o
# common driver
obj-$(CONFIG_SND_HDA) := snd-hda-codec.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_SND_HDA_CODEC_HDMI) += snd-hda-codec-hdmi.o
obj-$(CONFIG_SND_HDA_SCODEC_CS35L41) += snd-hda-scodec-cs35l41.o
obj-$(CONFIG_SND_HDA_SCODEC_CS35L41_I2C) += snd-hda-scodec-cs35l41-i2c.o
obj-$(CONFIG_SND_HDA_SCODEC_CS35L41_SPI) += snd-hda-scodec-cs35l41-spi.o
+obj-$(CONFIG_SND_HDA_CS_DSP_CONTROLS) += snd-hda-cs-dsp-ctls.o
# this must be the last entry after codec drivers;
# otherwise the codec patches won't be hooked before the PCI probe
diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
index cce27a86267f..15e2a0009080 100644
--- a/sound/pci/hda/cs35l41_hda.c
+++ b/sound/pci/hda/cs35l41_hda.c
@@ -8,30 +8,464 @@
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <sound/hda_codec.h>
+#include <sound/soc.h>
+#include <linux/pm_runtime.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
#include "hda_generic.h"
#include "hda_component.h"
#include "cs35l41_hda.h"
+#include "hda_cs_dsp_ctl.h"
+
+#define CS35L41_FIRMWARE_ROOT "cirrus/"
+#define CS35L41_PART "cs35l41"
+
+#define HALO_STATE_DSP_CTL_NAME "HALO_STATE"
+#define HALO_STATE_DSP_CTL_TYPE 5
+#define HALO_STATE_DSP_CTL_ALG 262308
+#define CAL_R_DSP_CTL_NAME "CAL_R"
+#define CAL_STATUS_DSP_CTL_NAME "CAL_STATUS"
+#define CAL_CHECKSUM_DSP_CTL_NAME "CAL_CHECKSUM"
+#define CAL_AMBIENT_DSP_CTL_NAME "CAL_AMBIENT"
+#define CAL_DSP_CTL_TYPE 5
+#define CAL_DSP_CTL_ALG 205
+
+static bool firmware_autostart = 1;
+module_param(firmware_autostart, bool, 0444);
+MODULE_PARM_DESC(firmware_autostart, "Allow automatic firmware download on boot"
+ "(0=Disable, 1=Enable) (default=1); ");
static const struct reg_sequence cs35l41_hda_config[] = {
{ CS35L41_PLL_CLK_CTRL, 0x00000430 }, // 3072000Hz, BCLK Input, PLL_REFCLK_EN = 1
+ { CS35L41_DSP_CLK_CTRL, 0x00000003 }, // DSP CLK EN
{ CS35L41_GLOBAL_CLK_CTRL, 0x00000003 }, // GLOBAL_FS = 48 kHz
{ CS35L41_SP_ENABLES, 0x00010000 }, // ASP_RX1_EN = 1
{ CS35L41_SP_RATE_CTRL, 0x00000021 }, // ASP_BCLK_FREQ = 3.072 MHz
{ CS35L41_SP_FORMAT, 0x20200200 }, // 32 bits RX/TX slots, I2S, clk consumer
+ { CS35L41_SP_HIZ_CTRL, 0x00000002 }, // Hi-Z unused
+ { CS35L41_SP_TX_WL, 0x00000018 }, // 24 cycles/slot
+ { CS35L41_SP_RX_WL, 0x00000018 }, // 24 cycles/slot
{ CS35L41_DAC_PCM1_SRC, 0x00000008 }, // DACPCM1_SRC = ASPRX1
+ { CS35L41_ASP_TX1_SRC, 0x00000018 }, // ASPTX1 SRC = VMON
+ { CS35L41_ASP_TX2_SRC, 0x00000019 }, // ASPTX2 SRC = IMON
+ { CS35L41_ASP_TX3_SRC, 0x00000032 }, // ASPTX3 SRC = ERRVOL
+ { CS35L41_ASP_TX4_SRC, 0x00000033 }, // ASPTX4 SRC = CLASSH_TGT
+ { CS35L41_DSP1_RX1_SRC, 0x00000008 }, // DSP1RX1 SRC = ASPRX1
+ { CS35L41_DSP1_RX2_SRC, 0x00000009 }, // DSP1RX2 SRC = ASPRX2
+ { CS35L41_DSP1_RX3_SRC, 0x00000018 }, // DSP1RX3 SRC = VMON
+ { CS35L41_DSP1_RX4_SRC, 0x00000019 }, // DSP1RX4 SRC = IMON
+ { CS35L41_DSP1_RX5_SRC, 0x00000020 }, // DSP1RX5 SRC = ERRVOL
{ CS35L41_AMP_DIG_VOL_CTRL, 0x00000000 }, // AMP_VOL_PCM 0.0 dB
{ CS35L41_AMP_GAIN_CTRL, 0x00000084 }, // AMP_GAIN_PCM 4.5 dB
};
+static const struct reg_sequence cs35l41_hda_config_dsp[] = {
+ { CS35L41_PLL_CLK_CTRL, 0x00000430 }, // 3072000Hz, BCLK Input, PLL_REFCLK_EN = 1
+ { CS35L41_DSP_CLK_CTRL, 0x00000003 }, // DSP CLK EN
+ { CS35L41_GLOBAL_CLK_CTRL, 0x00000003 }, // GLOBAL_FS = 48 kHz
+ { CS35L41_SP_ENABLES, 0x00010001 }, // ASP_RX1_EN = 1, ASP_TX1_EN = 1
+ { CS35L41_SP_RATE_CTRL, 0x00000021 }, // ASP_BCLK_FREQ = 3.072 MHz
+ { CS35L41_SP_FORMAT, 0x20200200 }, // 32 bits RX/TX slots, I2S, clk consumer
+ { CS35L41_SP_HIZ_CTRL, 0x00000003 }, // Hi-Z unused/disabled
+ { CS35L41_SP_TX_WL, 0x00000018 }, // 24 cycles/slot
+ { CS35L41_SP_RX_WL, 0x00000018 }, // 24 cycles/slot
+ { CS35L41_DAC_PCM1_SRC, 0x00000032 }, // DACPCM1_SRC = ERR_VOL
+ { CS35L41_ASP_TX1_SRC, 0x00000018 }, // ASPTX1 SRC = VMON
+ { CS35L41_ASP_TX2_SRC, 0x00000019 }, // ASPTX2 SRC = IMON
+ { CS35L41_ASP_TX3_SRC, 0x00000028 }, // ASPTX3 SRC = VPMON
+ { CS35L41_ASP_TX4_SRC, 0x00000029 }, // ASPTX4 SRC = VBSTMON
+ { CS35L41_DSP1_RX1_SRC, 0x00000008 }, // DSP1RX1 SRC = ASPRX1
+ { CS35L41_DSP1_RX2_SRC, 0x00000008 }, // DSP1RX2 SRC = ASPRX1
+ { CS35L41_DSP1_RX3_SRC, 0x00000018 }, // DSP1RX3 SRC = VMON
+ { CS35L41_DSP1_RX4_SRC, 0x00000019 }, // DSP1RX4 SRC = IMON
+ { CS35L41_DSP1_RX5_SRC, 0x00000029 }, // DSP1RX5 SRC = VBSTMON
+ { CS35L41_AMP_DIG_VOL_CTRL, 0x00000000 }, // AMP_VOL_PCM 0.0 dB
+ { CS35L41_AMP_GAIN_CTRL, 0x00000233 }, // AMP_GAIN_PCM = 17.5dB AMP_GAIN_PDM = 19.5dB
+};
+
static const struct reg_sequence cs35l41_hda_mute[] = {
{ CS35L41_AMP_GAIN_CTRL, 0x00000000 }, // AMP_GAIN_PCM 0.5 dB
{ CS35L41_AMP_DIG_VOL_CTRL, 0x0000A678 }, // AMP_VOL_PCM Mute
};
+static int cs35l41_control_add(struct cs_dsp_coeff_ctl *cs_ctl)
+{
+ struct cs35l41_hda *cs35l41 = container_of(cs_ctl->dsp, struct cs35l41_hda, cs_dsp);
+ struct hda_cs_dsp_ctl_info info;
+
+ info.device_name = cs35l41->amp_name;
+ info.fw_type = cs35l41->firmware_type;
+ info.card = cs35l41->codec->card;
+
+ return hda_cs_dsp_control_add(cs_ctl, &info);
+}
+
+static const struct cs_dsp_client_ops client_ops = {
+ .control_add = cs35l41_control_add,
+ .control_remove = hda_cs_dsp_control_remove,
+};
+
+static int cs35l41_request_firmware_file(struct cs35l41_hda *cs35l41,
+ const struct firmware **firmware, char **filename,
+ const char *dir, const char *ssid, const char *amp_name,
+ int spkid, const char *filetype)
+{
+ const char * const dsp_name = cs35l41->cs_dsp.name;
+ char *s, c;
+ int ret = 0;
+
+ if (spkid > -1 && ssid && amp_name)
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s-spkid%d-%s.%s", dir, CS35L41_PART,
+ dsp_name, hda_cs_dsp_fw_ids[cs35l41->firmware_type],
+ ssid, spkid, amp_name, filetype);
+ else if (spkid > -1 && ssid)
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s-spkid%d.%s", dir, CS35L41_PART,
+ dsp_name, hda_cs_dsp_fw_ids[cs35l41->firmware_type],
+ ssid, spkid, filetype);
+ else if (ssid && amp_name)
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s-%s.%s", dir, CS35L41_PART,
+ dsp_name, hda_cs_dsp_fw_ids[cs35l41->firmware_type],
+ ssid, amp_name, filetype);
+ else if (ssid)
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s.%s", dir, CS35L41_PART,
+ dsp_name, hda_cs_dsp_fw_ids[cs35l41->firmware_type],
+ ssid, filetype);
+ else
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, CS35L41_PART,
+ dsp_name, hda_cs_dsp_fw_ids[cs35l41->firmware_type],
+ filetype);
+
+ if (*filename == NULL)
+ return -ENOMEM;
+
+ /*
+ * Make sure that filename is lower-case and any non alpha-numeric
+ * characters except full stop and '/' are replaced with hyphens.
+ */
+ s = *filename;
+ while (*s) {
+ c = *s;
+ if (isalnum(c))
+ *s = tolower(c);
+ else if (c != '.' && c != '/')
+ *s = '-';
+ s++;
+ }
+
+ ret = firmware_request_nowarn(firmware, *filename, cs35l41->dev);
+ if (ret != 0) {
+ dev_dbg(cs35l41->dev, "Failed to request '%s'\n", *filename);
+ kfree(*filename);
+ *filename = NULL;
+ }
+
+ return ret;
+}
+
+static int cs35l41_request_firmware_files_spkid(struct cs35l41_hda *cs35l41,
+ const struct firmware **wmfw_firmware,
+ char **wmfw_filename,
+ const struct firmware **coeff_firmware,
+ char **coeff_filename)
+{
+ int ret;
+
+ /* try cirrus/part-dspN-fwtype-sub<-spkidN><-ampname>.wmfw */
+ ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+ CS35L41_FIRMWARE_ROOT,
+ cs35l41->acpi_subsystem_id, cs35l41->amp_name,
+ cs35l41->speaker_id, "wmfw");
+ if (!ret) {
+ /* try cirrus/part-dspN-fwtype-sub<-spkidN><-ampname>.bin */
+ cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT,
+ cs35l41->acpi_subsystem_id, cs35l41->amp_name,
+ cs35l41->speaker_id, "bin");
+ return 0;
+ }
+
+ /* try cirrus/part-dspN-fwtype-sub<-ampname>.wmfw */
+ ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+ CS35L41_FIRMWARE_ROOT, cs35l41->acpi_subsystem_id,
+ cs35l41->amp_name, -1, "wmfw");
+ if (!ret) {
+ /* try cirrus/part-dspN-fwtype-sub<-spkidN><-ampname>.bin */
+ cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT, cs35l41->acpi_subsystem_id,
+ cs35l41->amp_name, cs35l41->speaker_id, "bin");
+ return 0;
+ }
+
+ /* try cirrus/part-dspN-fwtype-sub<-spkidN>.wmfw */
+ ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+ CS35L41_FIRMWARE_ROOT, cs35l41->acpi_subsystem_id,
+ NULL, cs35l41->speaker_id, "wmfw");
+ if (!ret) {
+ /* try cirrus/part-dspN-fwtype-sub<-spkidN><-ampname>.bin */
+ ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT,
+ cs35l41->acpi_subsystem_id,
+ cs35l41->amp_name, cs35l41->speaker_id, "bin");
+ if (ret)
+ /* try cirrus/part-dspN-fwtype-sub<-spkidN>.bin */
+ cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT,
+ cs35l41->acpi_subsystem_id,
+ NULL, cs35l41->speaker_id, "bin");
+ return 0;
+ }
+
+ /* try cirrus/part-dspN-fwtype-sub.wmfw */
+ ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+ CS35L41_FIRMWARE_ROOT, cs35l41->acpi_subsystem_id,
+ NULL, -1, "wmfw");
+ if (!ret) {
+ /* try cirrus/part-dspN-fwtype-sub<-spkidN><-ampname>.bin */
+ ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT,
+ cs35l41->acpi_subsystem_id,
+ cs35l41->amp_name, cs35l41->speaker_id, "bin");
+ if (ret)
+ /* try cirrus/part-dspN-fwtype-sub<-spkidN>.bin */
+ cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT,
+ cs35l41->acpi_subsystem_id,
+ NULL, cs35l41->speaker_id, "bin");
+ return 0;
+ }
+
+ /* fallback try cirrus/part-dspN-fwtype.wmfw */
+ ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+ CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "wmfw");
+ if (!ret) {
+ /* fallback try cirrus/part-dspN-fwtype.bin */
+ cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "bin");
+ return 0;
+ }
+
+ dev_warn(cs35l41->dev, "Failed to request firmware\n");
+
+ return ret;
+}
+
+static int cs35l41_request_firmware_files(struct cs35l41_hda *cs35l41,
+ const struct firmware **wmfw_firmware,
+ char **wmfw_filename,
+ const struct firmware **coeff_firmware,
+ char **coeff_filename)
+{
+ int ret;
+
+ if (cs35l41->speaker_id > -1)
+ return cs35l41_request_firmware_files_spkid(cs35l41, wmfw_firmware, wmfw_filename,
+ coeff_firmware, coeff_filename);
+
+ /* try cirrus/part-dspN-fwtype-sub<-ampname>.wmfw */
+ ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+ CS35L41_FIRMWARE_ROOT, cs35l41->acpi_subsystem_id,
+ cs35l41->amp_name, -1, "wmfw");
+ if (!ret) {
+ /* try cirrus/part-dspN-fwtype-sub<-ampname>.bin */
+ cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT, cs35l41->acpi_subsystem_id,
+ cs35l41->amp_name, -1, "bin");
+ return 0;
+ }
+
+ /* try cirrus/part-dspN-fwtype-sub.wmfw */
+ ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+ CS35L41_FIRMWARE_ROOT, cs35l41->acpi_subsystem_id,
+ NULL, -1, "wmfw");
+ if (!ret) {
+ /* try cirrus/part-dspN-fwtype-sub<-ampname>.bin */
+ ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT,
+ cs35l41->acpi_subsystem_id,
+ cs35l41->amp_name, -1, "bin");
+ if (ret)
+ /* try cirrus/part-dspN-fwtype-sub.bin */
+ cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT,
+ cs35l41->acpi_subsystem_id,
+ NULL, -1, "bin");
+ return 0;
+ }
+
+ /* fallback try cirrus/part-dspN-fwtype.wmfw */
+ ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+ CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "wmfw");
+ if (!ret) {
+ /* fallback try cirrus/part-dspN-fwtype.bin */
+ cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+ CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "bin");
+ return 0;
+ }
+
+ dev_warn(cs35l41->dev, "Failed to request firmware\n");
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_EFI)
+static int cs35l41_apply_calibration(struct cs35l41_hda *cs35l41, unsigned int ambient,
+ unsigned int r0, unsigned int status, unsigned int checksum)
+{
+ int ret;
+
+ ret = hda_cs_dsp_write_ctl(&cs35l41->cs_dsp, CAL_AMBIENT_DSP_CTL_NAME, CAL_DSP_CTL_TYPE,
+ CAL_DSP_CTL_ALG, &ambient, 4);
+ if (ret) {
+ dev_err(cs35l41->dev, "Cannot Write Control: %s - %d\n", CAL_AMBIENT_DSP_CTL_NAME,
+ ret);
+ return ret;
+ }
+ ret = hda_cs_dsp_write_ctl(&cs35l41->cs_dsp, CAL_R_DSP_CTL_NAME, CAL_DSP_CTL_TYPE,
+ CAL_DSP_CTL_ALG, &r0, 4);
+ if (ret) {
+ dev_err(cs35l41->dev, "Cannot Write Control: %s - %d\n", CAL_R_DSP_CTL_NAME, ret);
+ return ret;
+ }
+ ret = hda_cs_dsp_write_ctl(&cs35l41->cs_dsp, CAL_STATUS_DSP_CTL_NAME, CAL_DSP_CTL_TYPE,
+ CAL_DSP_CTL_ALG, &status, 4);
+ if (ret) {
+ dev_err(cs35l41->dev, "Cannot Write Control: %s - %d\n", CAL_STATUS_DSP_CTL_NAME,
+ ret);
+ return ret;
+ }
+ ret = hda_cs_dsp_write_ctl(&cs35l41->cs_dsp, CAL_CHECKSUM_DSP_CTL_NAME, CAL_DSP_CTL_TYPE,
+ CAL_DSP_CTL_ALG, &checksum, 4);
+ if (ret) {
+ dev_err(cs35l41->dev, "Cannot Write Control: %s - %d\n", CAL_CHECKSUM_DSP_CTL_NAME,
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cs35l41_save_calibration(struct cs35l41_hda *cs35l41)
+{
+ static efi_guid_t efi_guid = EFI_GUID(0x02f9af02, 0x7734, 0x4233, 0xb4, 0x3d, 0x93, 0xfe,
+ 0x5a, 0xa3, 0x5d, 0xb3);
+ static efi_char16_t efi_name[] = L"CirrusSmartAmpCalibrationData";
+ const struct cs35l41_amp_efi_data *efi_data;
+ const struct cs35l41_amp_cal_data *cl;
+ unsigned long data_size = 0;
+ efi_status_t status;
+ int ret = 0;
+ u8 *data = NULL;
+ u32 attr;
+
+ /* Get real size of UEFI variable */
+ status = efi.get_variable(efi_name, &efi_guid, &attr, &data_size, data);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ ret = -ENODEV;
+ /* Allocate data buffer of data_size bytes */
+ data = vmalloc(data_size);
+ if (!data)
+ return -ENOMEM;
+ /* Get variable contents into buffer */
+ status = efi.get_variable(efi_name, &efi_guid, &attr, &data_size, data);
+ if (status == EFI_SUCCESS) {
+ efi_data = (struct cs35l41_amp_efi_data *)data;
+ dev_dbg(cs35l41->dev, "Calibration: Size=%d, Amp Count=%d\n",
+ efi_data->size, efi_data->count);
+ if (efi_data->count > cs35l41->index) {
+ cl = &efi_data->data[cs35l41->index];
+ dev_dbg(cs35l41->dev,
+ "Calibration: Ambient=%02x, Status=%02x, R0=%d\n",
+ cl->calAmbient, cl->calStatus, cl->calR);
+
+ /* Calibration can only be applied whilst the DSP is not running */
+ ret = cs35l41_apply_calibration(cs35l41,
+ cpu_to_be32(cl->calAmbient),
+ cpu_to_be32(cl->calR),
+ cpu_to_be32(cl->calStatus),
+ cpu_to_be32(cl->calR + 1));
+ }
+ }
+ vfree(data);
+ }
+ return ret;
+}
+#else
+static int cs35l41_save_calibration(struct cs35l41_hda *cs35l41)
+{
+ dev_warn(cs35l41->dev, "Calibration not supported without EFI support.\n");
+ return 0;
+}
+#endif
+
+static int cs35l41_init_dsp(struct cs35l41_hda *cs35l41)
+{
+ const struct firmware *coeff_firmware = NULL;
+ const struct firmware *wmfw_firmware = NULL;
+ struct cs_dsp *dsp = &cs35l41->cs_dsp;
+ char *coeff_filename = NULL;
+ char *wmfw_filename = NULL;
+ int ret;
+
+ if (!cs35l41->halo_initialized) {
+ cs35l41_configure_cs_dsp(cs35l41->dev, cs35l41->regmap, dsp);
+ dsp->client_ops = &client_ops;
+
+ ret = cs_dsp_halo_init(&cs35l41->cs_dsp);
+ if (ret)
+ return ret;
+ cs35l41->halo_initialized = true;
+ }
+
+ ret = cs35l41_request_firmware_files(cs35l41, &wmfw_firmware, &wmfw_filename,
+ &coeff_firmware, &coeff_filename);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(cs35l41->dev, "Loading WMFW Firmware: %s\n", wmfw_filename);
+ if (coeff_filename)
+ dev_dbg(cs35l41->dev, "Loading Coefficient File: %s\n", coeff_filename);
+ else
+ dev_warn(cs35l41->dev, "No Coefficient File available.\n");
+
+ ret = cs_dsp_power_up(dsp, wmfw_firmware, wmfw_filename, coeff_firmware, coeff_filename,
+ hda_cs_dsp_fw_ids[cs35l41->firmware_type]);
+ if (ret)
+ goto err_release;
+
+ ret = cs35l41_save_calibration(cs35l41);
+
+err_release:
+ release_firmware(wmfw_firmware);
+ release_firmware(coeff_firmware);
+ kfree(wmfw_filename);
+ kfree(coeff_filename);
+
+ return ret;
+}
+
+static void cs35l41_shutdown_dsp(struct cs35l41_hda *cs35l41)
+{
+ struct cs_dsp *dsp = &cs35l41->cs_dsp;
+
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ cs35l41->firmware_running = false;
+ dev_dbg(cs35l41->dev, "Unloaded Firmware\n");
+}
+
+static void cs35l41_remove_dsp(struct cs35l41_hda *cs35l41)
+{
+ struct cs_dsp *dsp = &cs35l41->cs_dsp;
+
+ cancel_work_sync(&cs35l41->fw_load_work);
+ cs35l41_shutdown_dsp(cs35l41);
+ cs_dsp_remove(dsp);
+ cs35l41->halo_initialized = false;
+}
+
/* Protection release cycle to get the speaker out of Safe-Mode */
static void cs35l41_error_release(struct device *dev, struct regmap *regmap, unsigned int mask)
{
@@ -53,9 +487,23 @@ static void cs35l41_hda_playback_hook(struct device *dev, int action)
struct regmap *reg = cs35l41->regmap;
int ret = 0;
+ mutex_lock(&cs35l41->fw_mutex);
+
switch (action) {
case HDA_GEN_PCM_ACT_OPEN:
- regmap_multi_reg_write(reg, cs35l41_hda_config, ARRAY_SIZE(cs35l41_hda_config));
+ cs35l41->playback_started = true;
+ if (cs35l41->firmware_running) {
+ regmap_multi_reg_write(reg, cs35l41_hda_config_dsp,
+ ARRAY_SIZE(cs35l41_hda_config_dsp));
+ regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+ CS35L41_VMON_EN_MASK | CS35L41_IMON_EN_MASK,
+ 1 << CS35L41_VMON_EN_SHIFT | 1 << CS35L41_IMON_EN_SHIFT);
+ cs35l41_set_cspl_mbox_cmd(cs35l41->dev, cs35l41->regmap,
+ CSPL_MBOX_CMD_RESUME);
+ } else {
+ regmap_multi_reg_write(reg, cs35l41_hda_config,
+ ARRAY_SIZE(cs35l41_hda_config));
+ }
ret = regmap_update_bits(reg, CS35L41_PWR_CTRL2,
CS35L41_AMP_EN_MASK, 1 << CS35L41_AMP_EN_SHIFT);
if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST)
@@ -73,13 +521,23 @@ static void cs35l41_hda_playback_hook(struct device *dev, int action)
CS35L41_AMP_EN_MASK, 0 << CS35L41_AMP_EN_SHIFT);
if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST)
regmap_write(reg, CS35L41_GPIO1_CTRL1, 0x00000001);
+ if (cs35l41->firmware_running) {
+ cs35l41_set_cspl_mbox_cmd(cs35l41->dev, cs35l41->regmap,
+ CSPL_MBOX_CMD_PAUSE);
+ regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+ CS35L41_VMON_EN_MASK | CS35L41_IMON_EN_MASK,
+ 0 << CS35L41_VMON_EN_SHIFT | 0 << CS35L41_IMON_EN_SHIFT);
+ }
cs35l41_irq_release(cs35l41);
+ cs35l41->playback_started = false;
break;
default:
dev_warn(cs35l41->dev, "Playback action not supported: %d\n", action);
break;
}
+ mutex_unlock(&cs35l41->fw_mutex);
+
if (ret)
dev_err(cs35l41->dev, "Regmap access fail: %d\n", ret);
}
@@ -104,10 +562,274 @@ static int cs35l41_hda_channel_map(struct device *dev, unsigned int tx_num, unsi
rx_slot);
}
+static int cs35l41_runtime_suspend(struct device *dev)
+{
+ struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+
+ dev_dbg(cs35l41->dev, "Suspend\n");
+
+ if (!cs35l41->firmware_running)
+ return 0;
+
+ if (cs35l41_enter_hibernate(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type) < 0)
+ return 0;
+
+ regcache_cache_only(cs35l41->regmap, true);
+ regcache_mark_dirty(cs35l41->regmap);
+
+ return 0;
+}
+
+static int cs35l41_runtime_resume(struct device *dev)
+{
+ struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ int ret;
+
+ dev_dbg(cs35l41->dev, "Resume.\n");
+
+ if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
+ dev_dbg(cs35l41->dev, "System does not support Resume\n");
+ return 0;
+ }
+
+ if (!cs35l41->firmware_running)
+ return 0;
+
+ regcache_cache_only(cs35l41->regmap, false);
+
+ ret = cs35l41_exit_hibernate(cs35l41->dev, cs35l41->regmap);
+ if (ret) {
+ regcache_cache_only(cs35l41->regmap, true);
+ return ret;
+ }
+
+ /* Test key needs to be unlocked to allow the OTP settings to re-apply */
+ cs35l41_test_key_unlock(cs35l41->dev, cs35l41->regmap);
+ ret = regcache_sync(cs35l41->regmap);
+ cs35l41_test_key_lock(cs35l41->dev, cs35l41->regmap);
+ if (ret) {
+ dev_err(cs35l41->dev, "Failed to restore register cache: %d\n", ret);
+ return ret;
+ }
+
+ if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST)
+ cs35l41_init_boost(cs35l41->dev, cs35l41->regmap, &cs35l41->hw_cfg);
+
+ return 0;
+}
+
+static int cs35l41_hda_suspend_hook(struct device *dev)
+{
+ dev_dbg(dev, "Request Suspend\n");
+ pm_runtime_mark_last_busy(dev);
+ return pm_runtime_put_autosuspend(dev);
+}
+
+static int cs35l41_hda_resume_hook(struct device *dev)
+{
+ dev_dbg(dev, "Request Resume\n");
+ return pm_runtime_get_sync(dev);
+}
+
+static int cs35l41_smart_amp(struct cs35l41_hda *cs35l41)
+{
+ int halo_sts;
+ int ret;
+
+ ret = cs35l41_init_dsp(cs35l41);
+ if (ret) {
+ dev_warn(cs35l41->dev, "Cannot Initialize Firmware. Error: %d\n", ret);
+ goto clean_dsp;
+ }
+
+ ret = cs35l41_write_fs_errata(cs35l41->dev, cs35l41->regmap);
+ if (ret) {
+ dev_err(cs35l41->dev, "Cannot Write FS Errata: %d\n", ret);
+ goto clean_dsp;
+ }
+
+ ret = cs_dsp_run(&cs35l41->cs_dsp);
+ if (ret) {
+ dev_err(cs35l41->dev, "Fail to start dsp: %d\n", ret);
+ goto clean_dsp;
+ }
+
+ ret = read_poll_timeout(hda_cs_dsp_read_ctl, ret,
+ be32_to_cpu(halo_sts) == HALO_STATE_CODE_RUN,
+ 1000, 15000, false, &cs35l41->cs_dsp, HALO_STATE_DSP_CTL_NAME,
+ HALO_STATE_DSP_CTL_TYPE, HALO_STATE_DSP_CTL_ALG,
+ &halo_sts, sizeof(halo_sts));
+
+ if (ret) {
+ dev_err(cs35l41->dev, "Timeout waiting for HALO Core to start. State: %d\n",
+ halo_sts);
+ goto clean_dsp;
+ }
+
+ cs35l41_set_cspl_mbox_cmd(cs35l41->dev, cs35l41->regmap, CSPL_MBOX_CMD_PAUSE);
+ cs35l41->firmware_running = true;
+
+ return 0;
+
+clean_dsp:
+ cs35l41_shutdown_dsp(cs35l41);
+ return ret;
+}
+
+static void cs35l41_load_firmware(struct cs35l41_hda *cs35l41, bool load)
+{
+ pm_runtime_get_sync(cs35l41->dev);
+
+ if (cs35l41->firmware_running && !load) {
+ dev_dbg(cs35l41->dev, "Unloading Firmware\n");
+ cs35l41_shutdown_dsp(cs35l41);
+ } else if (!cs35l41->firmware_running && load) {
+ dev_dbg(cs35l41->dev, "Loading Firmware\n");
+ cs35l41_smart_amp(cs35l41);
+ } else {
+ dev_dbg(cs35l41->dev, "Unable to Load firmware.\n");
+ }
+
+ pm_runtime_mark_last_busy(cs35l41->dev);
+ pm_runtime_put_autosuspend(cs35l41->dev);
+}
+
+static int cs35l41_fw_load_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct cs35l41_hda *cs35l41 = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = cs35l41->request_fw_load;
+ return 0;
+}
+
+static void cs35l41_fw_load_work(struct work_struct *work)
+{
+ struct cs35l41_hda *cs35l41 = container_of(work, struct cs35l41_hda, fw_load_work);
+
+ mutex_lock(&cs35l41->fw_mutex);
+
+ /* Recheck if playback is ongoing, mutex will block playback during firmware loading */
+ if (cs35l41->playback_started)
+ dev_err(cs35l41->dev, "Cannot Load/Unload firmware during Playback\n");
+ else
+ cs35l41_load_firmware(cs35l41, cs35l41->request_fw_load);
+
+ cs35l41->fw_request_ongoing = false;
+ mutex_unlock(&cs35l41->fw_mutex);
+}
+
+static int cs35l41_fw_load_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct cs35l41_hda *cs35l41 = snd_kcontrol_chip(kcontrol);
+ unsigned int ret = 0;
+
+ mutex_lock(&cs35l41->fw_mutex);
+
+ if (cs35l41->request_fw_load == ucontrol->value.integer.value[0])
+ goto err;
+
+ if (cs35l41->fw_request_ongoing) {
+ dev_dbg(cs35l41->dev, "Existing request not complete\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Check if playback is ongoing when initial request is made */
+ if (cs35l41->playback_started) {
+ dev_err(cs35l41->dev, "Cannot Load/Unload firmware during Playback\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ cs35l41->fw_request_ongoing = true;
+ cs35l41->request_fw_load = ucontrol->value.integer.value[0];
+ schedule_work(&cs35l41->fw_load_work);
+
+err:
+ mutex_unlock(&cs35l41->fw_mutex);
+
+ return ret;
+}
+
+static int cs35l41_fw_type_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct cs35l41_hda *cs35l41 = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.enumerated.item[0] = cs35l41->firmware_type;
+
+ return 0;
+}
+
+static int cs35l41_fw_type_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct cs35l41_hda *cs35l41 = snd_kcontrol_chip(kcontrol);
+
+ if (ucontrol->value.enumerated.item[0] < HDA_CS_DSP_NUM_FW) {
+ cs35l41->firmware_type = ucontrol->value.enumerated.item[0];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int cs35l41_fw_type_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(hda_cs_dsp_fw_ids), hda_cs_dsp_fw_ids);
+}
+
+static int cs35l41_create_controls(struct cs35l41_hda *cs35l41)
+{
+ char fw_type_ctl_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ char fw_load_ctl_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ struct snd_kcontrol_new fw_type_ctl = {
+ .name = fw_type_ctl_name,
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .info = cs35l41_fw_type_ctl_info,
+ .get = cs35l41_fw_type_ctl_get,
+ .put = cs35l41_fw_type_ctl_put,
+ };
+ struct snd_kcontrol_new fw_load_ctl = {
+ .name = fw_load_ctl_name,
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .info = snd_ctl_boolean_mono_info,
+ .get = cs35l41_fw_load_ctl_get,
+ .put = cs35l41_fw_load_ctl_put,
+ };
+ int ret;
+
+ scnprintf(fw_type_ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s DSP1 Firmware Type",
+ cs35l41->amp_name);
+ scnprintf(fw_load_ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s DSP1 Firmware Load",
+ cs35l41->amp_name);
+
+ ret = snd_ctl_add(cs35l41->codec->card, snd_ctl_new1(&fw_type_ctl, cs35l41));
+ if (ret) {
+ dev_err(cs35l41->dev, "Failed to add KControl %s = %d\n", fw_type_ctl.name, ret);
+ return ret;
+ }
+
+ dev_dbg(cs35l41->dev, "Added Control %s\n", fw_type_ctl.name);
+
+ ret = snd_ctl_add(cs35l41->codec->card, snd_ctl_new1(&fw_load_ctl, cs35l41));
+ if (ret) {
+ dev_err(cs35l41->dev, "Failed to add KControl %s = %d\n", fw_load_ctl.name, ret);
+ return ret;
+ }
+
+ dev_dbg(cs35l41->dev, "Added Control %s\n", fw_load_ctl.name);
+
+ return 0;
+}
+
static int cs35l41_hda_bind(struct device *dev, struct device *master, void *master_data)
{
struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
struct hda_component *comps = master_data;
+ int ret = 0;
if (!comps || cs35l41->index < 0 || cs35l41->index >= HDA_MAX_COMPONENTS)
return -EINVAL;
@@ -116,11 +838,38 @@ static int cs35l41_hda_bind(struct device *dev, struct device *master, void *mas
if (comps->dev)
return -EBUSY;
+ pm_runtime_get_sync(dev);
+
comps->dev = dev;
+ if (!cs35l41->acpi_subsystem_id)
+ cs35l41->acpi_subsystem_id = devm_kasprintf(dev, GFP_KERNEL, "%.8x",
+ comps->codec->core.subsystem_id);
+ cs35l41->codec = comps->codec;
strscpy(comps->name, dev_name(dev), sizeof(comps->name));
+
+ cs35l41->firmware_type = HDA_CS_DSP_FW_SPK_PROT;
+
+ if (firmware_autostart) {
+ dev_dbg(cs35l41->dev, "Firmware Autostart.\n");
+ cs35l41->request_fw_load = true;
+ mutex_lock(&cs35l41->fw_mutex);
+ if (cs35l41_smart_amp(cs35l41) < 0)
+ dev_warn(cs35l41->dev, "Cannot Run Firmware, reverting to dsp bypass...\n");
+ mutex_unlock(&cs35l41->fw_mutex);
+ } else {
+ dev_dbg(cs35l41->dev, "Firmware Autostart is disabled.\n");
+ }
+
+ ret = cs35l41_create_controls(cs35l41);
+
comps->playback_hook = cs35l41_hda_playback_hook;
+ comps->suspend_hook = cs35l41_hda_suspend_hook;
+ comps->resume_hook = cs35l41_hda_resume_hook;
- return 0;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
}
static void cs35l41_hda_unbind(struct device *dev, struct device *master, void *master_data)
@@ -215,7 +964,7 @@ static const struct regmap_irq cs35l41_reg_irqs[] = {
CS35L41_REG_IRQ(IRQ1_STATUS1, AMP_SHORT_ERR),
};
-static const struct regmap_irq_chip cs35l41_regmap_irq_chip = {
+static struct regmap_irq_chip cs35l41_regmap_irq_chip = {
.name = "cs35l41 IRQ1 Controller",
.status_base = CS35L41_IRQ1_STATUS1,
.mask_base = CS35L41_IRQ1_MASK1,
@@ -223,6 +972,7 @@ static const struct regmap_irq_chip cs35l41_regmap_irq_chip = {
.num_regs = 4,
.irqs = cs35l41_reg_irqs,
.num_irqs = ARRAY_SIZE(cs35l41_reg_irqs),
+ .runtime_pm = true,
};
static int cs35l41_hda_apply_properties(struct cs35l41_hda *cs35l41)
@@ -264,6 +1014,7 @@ static int cs35l41_hda_apply_properties(struct cs35l41_hda *cs35l41)
break;
case CS35L41_INTERRUPT:
using_irq = true;
+ hw_cfg->gpio2.func = CS35L41_GPIO2_INT_OPEN_DRAIN;
break;
default:
dev_err(cs35l41->dev, "Invalid GPIO2 function %d\n", hw_cfg->gpio2.func);
@@ -297,6 +1048,135 @@ static int cs35l41_hda_apply_properties(struct cs35l41_hda *cs35l41)
return cs35l41_hda_channel_map(cs35l41->dev, 0, NULL, 1, &hw_cfg->spk_pos);
}
+static int cs35l41_get_acpi_sub_string(struct device *dev, struct acpi_device *adev,
+ const char **subsysid)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = 0;
+
+ status = acpi_evaluate_object(adev->handle, "_SUB", NULL, &buffer);
+ if (ACPI_SUCCESS(status)) {
+ obj = buffer.pointer;
+ if (obj->type == ACPI_TYPE_STRING) {
+ *subsysid = devm_kstrdup(dev, obj->string.pointer, GFP_KERNEL);
+ if (*subsysid == NULL) {
+ dev_err(dev, "Cannot allocate Subsystem ID");
+ ret = -ENOMEM;
+ }
+ } else {
+ dev_warn(dev, "Warning ACPI _SUB did not return a string\n");
+ ret = -ENODEV;
+ }
+ acpi_os_free(buffer.pointer);
+ } else {
+ dev_dbg(dev, "Warning ACPI _SUB failed: %#x\n", status);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int cs35l41_get_speaker_id(struct device *dev, int amp_index,
+ int num_amps, int fixed_gpio_id)
+{
+ struct gpio_desc *speaker_id_desc;
+ int speaker_id = -ENODEV;
+
+ if (fixed_gpio_id >= 0) {
+ dev_dbg(dev, "Found Fixed Speaker ID GPIO (index = %d)\n", fixed_gpio_id);
+ speaker_id_desc = gpiod_get_index(dev, NULL, fixed_gpio_id, GPIOD_IN);
+ if (IS_ERR(speaker_id_desc)) {
+ speaker_id = PTR_ERR(speaker_id_desc);
+ return speaker_id;
+ }
+ speaker_id = gpiod_get_value_cansleep(speaker_id_desc);
+ gpiod_put(speaker_id_desc);
+ dev_dbg(dev, "Speaker ID = %d\n", speaker_id);
+ } else {
+ int base_index;
+ int gpios_per_amp;
+ int count;
+ int tmp;
+ int i;
+
+ count = gpiod_count(dev, "spk-id");
+ if (count > 0) {
+ speaker_id = 0;
+ gpios_per_amp = count / num_amps;
+ base_index = gpios_per_amp * amp_index;
+
+ if (count % num_amps)
+ return -EINVAL;
+
+ dev_dbg(dev, "Found %d Speaker ID GPIOs per Amp\n", gpios_per_amp);
+
+ for (i = 0; i < gpios_per_amp; i++) {
+ speaker_id_desc = gpiod_get_index(dev, "spk-id", i + base_index,
+ GPIOD_IN);
+ if (IS_ERR(speaker_id_desc)) {
+ speaker_id = PTR_ERR(speaker_id_desc);
+ break;
+ }
+ tmp = gpiod_get_value_cansleep(speaker_id_desc);
+ gpiod_put(speaker_id_desc);
+ if (tmp < 0) {
+ speaker_id = tmp;
+ break;
+ }
+ speaker_id |= tmp << i;
+ }
+ dev_dbg(dev, "Speaker ID = %d\n", speaker_id);
+ }
+ }
+ return speaker_id;
+}
+
+/*
+ * Device CLSA010(0/1) doesn't have _DSD so a gpiod_get by the label reset won't work.
+ * And devices created by serial-multi-instantiate don't have their device struct
+ * pointing to the correct fwnode, so acpi_dev must be used here.
+ * And devm functions expect that the device requesting the resource has the correct
+ * fwnode.
+ */
+static int cs35l41_no_acpi_dsd(struct cs35l41_hda *cs35l41, struct device *physdev, int id,
+ const char *hid)
+{
+ struct cs35l41_hw_cfg *hw_cfg = &cs35l41->hw_cfg;
+
+ /* check I2C address to assign the index */
+ cs35l41->index = id == 0x40 ? 0 : 1;
+ cs35l41->channel_index = 0;
+ cs35l41->reset_gpio = gpiod_get_index(physdev, NULL, 0, GPIOD_OUT_HIGH);
+ cs35l41->speaker_id = cs35l41_get_speaker_id(physdev, 0, 0, 2);
+ hw_cfg->spk_pos = cs35l41->index;
+ hw_cfg->gpio2.func = CS35L41_INTERRUPT;
+ hw_cfg->gpio2.valid = true;
+ hw_cfg->valid = true;
+ put_device(physdev);
+
+ if (strncmp(hid, "CLSA0100", 8) == 0) {
+ hw_cfg->bst_type = CS35L41_EXT_BOOST_NO_VSPK_SWITCH;
+ } else if (strncmp(hid, "CLSA0101", 8) == 0) {
+ hw_cfg->bst_type = CS35L41_EXT_BOOST;
+ hw_cfg->gpio1.func = CS35l41_VSPK_SWITCH;
+ hw_cfg->gpio1.valid = true;
+ } else {
+ /*
+ * Note: CLSA010(0/1) are special cases which use a slightly different design.
+ * All other HIDs e.g. CSC3551 require valid ACPI _DSD properties to be supported.
+ */
+ dev_err(cs35l41->dev, "Error: ACPI _DSD Properties are missing for HID %s.\n", hid);
+ hw_cfg->valid = false;
+ hw_cfg->gpio1.valid = false;
+ hw_cfg->gpio2.valid = false;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int cs35l41_hda_read_acpi(struct cs35l41_hda *cs35l41, const char *hid, int id)
{
struct cs35l41_hw_cfg *hw_cfg = &cs35l41->hw_cfg;
@@ -316,10 +1196,16 @@ static int cs35l41_hda_read_acpi(struct cs35l41_hda *cs35l41, const char *hid, i
physdev = get_device(acpi_get_first_physical_node(adev));
acpi_dev_put(adev);
+ ret = cs35l41_get_acpi_sub_string(cs35l41->dev, adev, &cs35l41->acpi_subsystem_id);
+ if (ret)
+ dev_info(cs35l41->dev, "No Subsystem ID found in ACPI: %d", ret);
+ else
+ dev_dbg(cs35l41->dev, "Subsystem ID %s found", cs35l41->acpi_subsystem_id);
+
property = "cirrus,dev-index";
ret = device_property_count_u32(physdev, property);
if (ret <= 0)
- goto no_acpi_dsd;
+ return cs35l41_no_acpi_dsd(cs35l41, physdev, id, hid);
if (ret > ARRAY_SIZE(values)) {
ret = -EINVAL;
@@ -347,7 +1233,7 @@ static int cs35l41_hda_read_acpi(struct cs35l41_hda *cs35l41, const char *hid, i
/* To use the same release code for all laptop variants we can't use devm_ version of
* gpiod_get here, as CLSA010* don't have a fully functional bios with an _DSD node
*/
- cs35l41->reset_gpio = fwnode_gpiod_get_index(&adev->fwnode, "reset", cs35l41->index,
+ cs35l41->reset_gpio = fwnode_gpiod_get_index(acpi_fwnode_handle(adev), "reset", cs35l41->index,
GPIOD_OUT_LOW, "cs35l41-reset");
property = "cirrus,speaker-position";
@@ -396,6 +1282,8 @@ static int cs35l41_hda_read_acpi(struct cs35l41_hda *cs35l41, const char *hid, i
else
hw_cfg->bst_cap = -1;
+ cs35l41->speaker_id = cs35l41_get_speaker_id(physdev, cs35l41->index, nval, -1);
+
if (hw_cfg->bst_ind > 0 || hw_cfg->bst_cap > 0 || hw_cfg->bst_ipk > 0)
hw_cfg->bst_type = CS35L41_INT_BOOST;
else
@@ -411,30 +1299,6 @@ err:
dev_err(cs35l41->dev, "Failed property %s: %d\n", property, ret);
return ret;
-
-no_acpi_dsd:
- /*
- * Device CLSA0100 doesn't have _DSD so a gpiod_get by the label reset won't work.
- * And devices created by i2c-multi-instantiate don't have their device struct pointing to
- * the correct fwnode, so acpi_dev must be used here.
- * And devm functions expect that the device requesting the resource has the correct
- * fwnode.
- */
- if (strncmp(hid, "CLSA0100", 8) != 0)
- return -EINVAL;
-
- /* check I2C address to assign the index */
- cs35l41->index = id == 0x40 ? 0 : 1;
- cs35l41->hw_cfg.spk_pos = cs35l41->index;
- cs35l41->channel_index = 0;
- cs35l41->reset_gpio = gpiod_get_index(physdev, NULL, 0, GPIOD_OUT_HIGH);
- cs35l41->hw_cfg.bst_type = CS35L41_EXT_BOOST_NO_VSPK_SWITCH;
- hw_cfg->gpio2.func = CS35L41_GPIO2_INT_OPEN_DRAIN;
- hw_cfg->gpio2.valid = true;
- cs35l41->hw_cfg.valid = true;
- put_device(physdev);
-
- return 0;
}
int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int irq,
@@ -460,10 +1324,8 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
dev_set_drvdata(dev, cs35l41);
ret = cs35l41_hda_read_acpi(cs35l41, device_name, id);
- if (ret) {
- dev_err_probe(cs35l41->dev, ret, "Platform not supported %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(cs35l41->dev, ret, "Platform not supported\n");
if (IS_ERR(cs35l41->reset_gpio)) {
ret = PTR_ERR(cs35l41->reset_gpio);
@@ -471,7 +1333,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
if (ret == -EBUSY) {
dev_info(cs35l41->dev, "Reset line busy, assuming shared reset\n");
} else {
- dev_err_probe(cs35l41->dev, ret, "Failed to get reset GPIO: %d\n", ret);
+ dev_err_probe(cs35l41->dev, ret, "Failed to get reset GPIO\n");
goto err;
}
}
@@ -536,13 +1398,26 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
if (ret)
goto err;
+ INIT_WORK(&cs35l41->fw_load_work, cs35l41_fw_load_work);
+ mutex_init(&cs35l41->fw_mutex);
+
+ pm_runtime_set_autosuspend_delay(cs35l41->dev, 3000);
+ pm_runtime_use_autosuspend(cs35l41->dev);
+ pm_runtime_mark_last_busy(cs35l41->dev);
+ pm_runtime_set_active(cs35l41->dev);
+ pm_runtime_get_noresume(cs35l41->dev);
+ pm_runtime_enable(cs35l41->dev);
+
ret = cs35l41_hda_apply_properties(cs35l41);
if (ret)
- goto err;
+ goto err_pm;
+
+ pm_runtime_put_autosuspend(cs35l41->dev);
ret = component_add(cs35l41->dev, &cs35l41_hda_comp_ops);
if (ret) {
dev_err(cs35l41->dev, "Register component failed: %d\n", ret);
+ pm_runtime_disable(cs35l41->dev);
goto err;
}
@@ -550,6 +1425,10 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
return 0;
+err_pm:
+ pm_runtime_disable(cs35l41->dev);
+ pm_runtime_put_noidle(cs35l41->dev);
+
err:
if (cs35l41_safe_reset(cs35l41->regmap, cs35l41->hw_cfg.bst_type))
gpiod_set_value_cansleep(cs35l41->reset_gpio, 0);
@@ -563,14 +1442,28 @@ void cs35l41_hda_remove(struct device *dev)
{
struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ pm_runtime_get_sync(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+
+ if (cs35l41->halo_initialized)
+ cs35l41_remove_dsp(cs35l41);
+
component_del(cs35l41->dev, &cs35l41_hda_comp_ops);
+ pm_runtime_put_noidle(cs35l41->dev);
+
if (cs35l41_safe_reset(cs35l41->regmap, cs35l41->hw_cfg.bst_type))
gpiod_set_value_cansleep(cs35l41->reset_gpio, 0);
gpiod_put(cs35l41->reset_gpio);
}
EXPORT_SYMBOL_NS_GPL(cs35l41_hda_remove, SND_HDA_SCODEC_CS35L41);
+const struct dev_pm_ops cs35l41_hda_pm_ops = {
+ RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume, NULL)
+};
+EXPORT_SYMBOL_NS_GPL(cs35l41_hda_pm_ops, SND_HDA_SCODEC_CS35L41);
+
MODULE_DESCRIPTION("CS35L41 HDA Driver");
+MODULE_IMPORT_NS(SND_HDA_CS_DSP_CONTROLS);
MODULE_AUTHOR("Lucas Tanure, Cirrus Logic Inc, <tanureal@opensource.cirrus.com>");
MODULE_LICENSE("GPL");
diff --git a/sound/pci/hda/cs35l41_hda.h b/sound/pci/hda/cs35l41_hda.h
index a52ffd1f7999..bdb35f3be68a 100644
--- a/sound/pci/hda/cs35l41_hda.h
+++ b/sound/pci/hda/cs35l41_hda.h
@@ -10,11 +10,29 @@
#ifndef __CS35L41_HDA_H__
#define __CS35L41_HDA_H__
+#include <linux/efi.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
#include <linux/device.h>
#include <sound/cs35l41.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/wmfw.h>
+
+struct cs35l41_amp_cal_data {
+ u32 calTarget[2];
+ u32 calTime[2];
+ s8 calAmbient;
+ u8 calStatus;
+ u16 calR;
+} __packed;
+
+struct cs35l41_amp_efi_data {
+ u32 size;
+ u32 count;
+ struct cs35l41_amp_cal_data data[];
+} __packed;
+
enum cs35l41_hda_spk_pos {
CS35l41_LEFT,
CS35l41_RIGHT,
@@ -32,15 +50,36 @@ struct cs35l41_hda {
struct regmap *regmap;
struct gpio_desc *reset_gpio;
struct cs35l41_hw_cfg hw_cfg;
+ struct hda_codec *codec;
int irq;
int index;
int channel_index;
unsigned volatile long irq_errors;
const char *amp_name;
+ const char *acpi_subsystem_id;
+ int firmware_type;
+ int speaker_id;
+ struct mutex fw_mutex;
+ struct work_struct fw_load_work;
+
struct regmap_irq_chip_data *irq_data;
+ bool firmware_running;
+ bool request_fw_load;
+ bool fw_request_ongoing;
+ bool halo_initialized;
+ bool playback_started;
+ struct cs_dsp cs_dsp;
};
+enum halo_state {
+ HALO_STATE_CODE_INIT_DOWNLOAD = 0,
+ HALO_STATE_CODE_START,
+ HALO_STATE_CODE_RUN
+};
+
+extern const struct dev_pm_ops cs35l41_hda_pm_ops;
+
int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int irq,
struct regmap *regmap);
void cs35l41_hda_remove(struct device *dev);
diff --git a/sound/pci/hda/cs35l41_hda_i2c.c b/sound/pci/hda/cs35l41_hda_i2c.c
index e810b278fb91..5baacfde4f16 100644
--- a/sound/pci/hda/cs35l41_hda_i2c.c
+++ b/sound/pci/hda/cs35l41_hda_i2c.c
@@ -1,14 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
//
-// cs35l41.c -- CS35l41 HDA I2C driver
+// CS35l41 HDA I2C driver
//
// Copyright 2021 Cirrus Logic, Inc.
//
// Author: Lucas Tanure <tanureal@opensource.cirrus.com>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/acpi.h>
#include "cs35l41_hda.h"
@@ -16,11 +16,14 @@ static int cs35l41_hda_i2c_probe(struct i2c_client *clt, const struct i2c_device
{
const char *device_name;
- /* Compare against the device name so it works for I2C, normal ACPI
- * and for ACPI by i2c-multi-instantiate matching cases
+ /*
+ * Compare against the device name so it works for SPI, normal ACPI
+ * and for ACPI by serial-multi-instantiate matching cases.
*/
if (strstr(dev_name(&clt->dev), "CLSA0100"))
device_name = "CLSA0100";
+ else if (strstr(dev_name(&clt->dev), "CLSA0101"))
+ device_name = "CLSA0101";
else if (strstr(dev_name(&clt->dev), "CSC3551"))
device_name = "CSC3551";
else
@@ -42,19 +45,19 @@ static const struct i2c_device_id cs35l41_hda_i2c_id[] = {
{}
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id cs35l41_acpi_hda_match[] = {
{"CLSA0100", 0 },
+ {"CLSA0101", 0 },
{"CSC3551", 0 },
- { },
+ {}
};
MODULE_DEVICE_TABLE(acpi, cs35l41_acpi_hda_match);
-#endif
static struct i2c_driver cs35l41_i2c_driver = {
.driver = {
.name = "cs35l41-hda",
- .acpi_match_table = ACPI_PTR(cs35l41_acpi_hda_match),
+ .acpi_match_table = cs35l41_acpi_hda_match,
+ .pm = &cs35l41_hda_pm_ops,
},
.id_table = cs35l41_hda_i2c_id,
.probe = cs35l41_hda_i2c_probe,
diff --git a/sound/pci/hda/cs35l41_hda_spi.c b/sound/pci/hda/cs35l41_hda_spi.c
index 22e088f28438..71979cfb4d7e 100644
--- a/sound/pci/hda/cs35l41_hda_spi.c
+++ b/sound/pci/hda/cs35l41_hda_spi.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
//
-// cs35l41.c -- CS35l41 HDA SPI driver
+// CS35l41 HDA SPI driver
//
// Copyright 2021 Cirrus Logic, Inc.
//
// Author: Lucas Tanure <tanureal@opensource.cirrus.com>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
@@ -16,8 +16,9 @@ static int cs35l41_hda_spi_probe(struct spi_device *spi)
{
const char *device_name;
- /* Compare against the device name so it works for SPI, normal ACPI
- * and for ACPI by spi-multi-instantiate matching cases
+ /*
+ * Compare against the device name so it works for SPI, normal ACPI
+ * and for ACPI by serial-multi-instantiate matching cases.
*/
if (strstr(dev_name(&spi->dev), "CSC3551"))
device_name = "CSC3551";
@@ -38,18 +39,17 @@ static const struct spi_device_id cs35l41_hda_spi_id[] = {
{}
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id cs35l41_acpi_hda_match[] = {
{ "CSC3551", 0 },
- {},
+ {}
};
MODULE_DEVICE_TABLE(acpi, cs35l41_acpi_hda_match);
-#endif
static struct spi_driver cs35l41_spi_driver = {
.driver = {
.name = "cs35l41-hda",
- .acpi_match_table = ACPI_PTR(cs35l41_acpi_hda_match),
+ .acpi_match_table = cs35l41_acpi_hda_match,
+ .pm = &cs35l41_hda_pm_ops,
},
.id_table = cs35l41_hda_spi_id,
.probe = cs35l41_hda_spi_probe,
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index c572fb5886d5..cae9a975cbcc 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -248,6 +248,13 @@ static bool is_likely_hdmi_codec(struct hda_codec *codec)
{
hda_nid_t nid;
+ /*
+ * For ASoC users, if snd_hda_hdmi_codec module is denylisted and any
+ * event causes i915 enumeration to fail, ->wcaps remains uninitialized.
+ */
+ if (!codec->wcaps)
+ return true;
+
for_each_hda_codec_node(nid, codec) {
unsigned int wcaps = get_wcaps(codec, nid);
switch (get_wcaps_type(wcaps)) {
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 7579a6982f47..384426d7e9dd 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -772,11 +772,11 @@ static void codec_release_pcms(struct hda_codec *codec)
*/
void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
{
- if (codec->registered) {
+ if (codec->core.registered) {
/* pm_runtime_put() is called in snd_hdac_device_exit() */
pm_runtime_get_noresume(hda_codec_dev(codec));
pm_runtime_disable(hda_codec_dev(codec));
- codec->registered = 0;
+ codec->core.registered = 0;
}
snd_hda_codec_disconnect_pcms(codec);
@@ -825,14 +825,14 @@ void snd_hda_codec_display_power(struct hda_codec *codec, bool enable)
*/
void snd_hda_codec_register(struct hda_codec *codec)
{
- if (codec->registered)
+ if (codec->core.registered)
return;
if (device_is_registered(hda_codec_dev(codec))) {
snd_hda_codec_display_power(codec, true);
pm_runtime_enable(hda_codec_dev(codec));
/* it was powered up in snd_hda_codec_new(), now all done */
snd_hda_power_down(codec);
- codec->registered = 1;
+ codec->core.registered = 1;
}
}
EXPORT_SYMBOL_GPL(snd_hda_codec_register);
@@ -950,6 +950,7 @@ int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
unsigned int codec_addr, struct hda_codec **codecp)
{
struct hda_codec *codec;
+ int ret;
codec = snd_hda_codec_device_init(bus, codec_addr, "hdaudioC%dD%d",
card->number, codec_addr);
@@ -957,7 +958,11 @@ int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
return PTR_ERR(codec);
*codecp = codec;
- return snd_hda_codec_device_new(bus, card, codec_addr, *codecp, true);
+ ret = snd_hda_codec_device_new(bus, card, codec_addr, *codecp, true);
+ if (ret)
+ put_device(hda_codec_dev(*codecp));
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_hda_codec_new);
@@ -1012,19 +1017,17 @@ int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
if (codec->bus->modelname) {
codec->modelname = kstrdup(codec->bus->modelname, GFP_KERNEL);
- if (!codec->modelname) {
- err = -ENOMEM;
- goto error;
- }
+ if (!codec->modelname)
+ return -ENOMEM;
}
fg = codec->core.afg ? codec->core.afg : codec->core.mfg;
err = read_widget_caps(codec, fg);
if (err < 0)
- goto error;
+ return err;
err = read_pin_defaults(codec);
if (err < 0)
- goto error;
+ return err;
/* power-up all before initialization */
hda_set_power_state(codec, AC_PWRST_D0);
@@ -1042,17 +1045,19 @@ int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
/* ASoC features component management instead */
err = snd_device_new(card, SNDRV_DEV_CODEC, codec, &dev_ops);
if (err < 0)
- goto error;
+ return err;
}
+#ifdef CONFIG_PM
/* PM runtime needs to be enabled later after binding codec */
- pm_runtime_forbid(&codec->core.dev);
+ if (codec->core.dev.power.runtime_auto)
+ pm_runtime_forbid(&codec->core.dev);
+ else
+ /* Keep the usage_count consistent across subsequent probing */
+ pm_runtime_get_noresume(&codec->core.dev);
+#endif
return 0;
-
- error:
- put_device(hda_codec_dev(codec));
- return err;
}
EXPORT_SYMBOL_GPL(snd_hda_codec_device_new);
@@ -2935,8 +2940,7 @@ static int hda_codec_runtime_suspend(struct device *dev)
if (!codec->card)
return 0;
- if (!codec->bus->jackpoll_in_suspend)
- cancel_delayed_work_sync(&codec->jackpoll_work);
+ cancel_delayed_work_sync(&codec->jackpoll_work);
state = hda_call_codec_suspend(codec);
if (codec->link_down_at_suspend ||
@@ -2944,6 +2948,11 @@ static int hda_codec_runtime_suspend(struct device *dev)
(state & AC_PWRST_CLK_STOP_OK)))
snd_hdac_codec_link_down(&codec->core);
snd_hda_codec_display_power(codec, false);
+
+ if (codec->bus->jackpoll_in_suspend &&
+ (dev->power.power_state.event != PM_EVENT_SUSPEND))
+ schedule_delayed_work(&codec->jackpoll_work,
+ codec->jackpoll_interval);
return 0;
}
@@ -2967,6 +2976,9 @@ static int hda_codec_runtime_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int hda_codec_pm_prepare(struct device *dev)
{
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+
+ cancel_delayed_work_sync(&codec->jackpoll_work);
dev->power.power_state = PMSG_SUSPEND;
return pm_runtime_suspended(dev);
}
@@ -2986,9 +2998,6 @@ static void hda_codec_pm_complete(struct device *dev)
static int hda_codec_pm_suspend(struct device *dev)
{
- struct hda_codec *codec = dev_to_hda_codec(dev);
-
- cancel_delayed_work_sync(&codec->jackpoll_work);
dev->power.power_state = PMSG_SUSPEND;
return pm_runtime_force_suspend(dev);
}
@@ -3043,7 +3052,7 @@ void snd_hda_codec_shutdown(struct hda_codec *codec)
struct hda_pcm *cpcm;
/* Skip the shutdown if codec is not registered */
- if (!codec->registered)
+ if (!codec->core.registered)
return;
cancel_delayed_work_sync(&codec->jackpoll_work);
diff --git a/sound/pci/hda/hda_component.h b/sound/pci/hda/hda_component.h
index e26c896a13f3..1223621bd62c 100644
--- a/sound/pci/hda/hda_component.h
+++ b/sound/pci/hda/hda_component.h
@@ -14,5 +14,8 @@
struct hda_component {
struct device *dev;
char name[HDA_MAX_NAME_SIZE];
+ struct hda_codec *codec;
void (*playback_hook)(struct device *dev, int action);
+ int (*suspend_hook)(struct device *dev);
+ int (*resume_hook)(struct device *dev);
};
diff --git a/sound/pci/hda/hda_cs_dsp_ctl.c b/sound/pci/hda/hda_cs_dsp_ctl.c
new file mode 100644
index 000000000000..89ee549cb7d5
--- /dev/null
+++ b/sound/pci/hda/hda_cs_dsp_ctl.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// HDA DSP ALSA Control Driver
+//
+// Copyright 2022 Cirrus Logic, Inc.
+//
+// Author: Stefan Binding <sbinding@opensource.cirrus.com>
+
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include "hda_cs_dsp_ctl.h"
+
+#define ADSP_MAX_STD_CTRL_SIZE 512
+
+struct hda_cs_dsp_coeff_ctl {
+ struct cs_dsp_coeff_ctl *cs_ctl;
+ struct snd_card *card;
+ struct snd_kcontrol *kctl;
+};
+
+static const char * const hda_cs_dsp_fw_text[HDA_CS_DSP_NUM_FW] = {
+ [HDA_CS_DSP_FW_SPK_PROT] = "Prot",
+ [HDA_CS_DSP_FW_SPK_CALI] = "Cali",
+ [HDA_CS_DSP_FW_SPK_DIAG] = "Diag",
+ [HDA_CS_DSP_FW_MISC] = "Misc",
+};
+
+const char * const hda_cs_dsp_fw_ids[HDA_CS_DSP_NUM_FW] = {
+ [HDA_CS_DSP_FW_SPK_PROT] = "spk-prot",
+ [HDA_CS_DSP_FW_SPK_CALI] = "spk-cali",
+ [HDA_CS_DSP_FW_SPK_DIAG] = "spk-diag",
+ [HDA_CS_DSP_FW_MISC] = "misc",
+};
+EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_fw_ids, SND_HDA_CS_DSP_CONTROLS);
+
+static int hda_cs_dsp_coeff_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
+{
+ struct hda_cs_dsp_coeff_ctl *ctl = (struct hda_cs_dsp_coeff_ctl *)snd_kcontrol_chip(kctl);
+ struct cs_dsp_coeff_ctl *cs_ctl = ctl->cs_ctl;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = cs_ctl->len;
+
+ return 0;
+}
+
+static int hda_cs_dsp_coeff_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_cs_dsp_coeff_ctl *ctl = (struct hda_cs_dsp_coeff_ctl *)snd_kcontrol_chip(kctl);
+ struct cs_dsp_coeff_ctl *cs_ctl = ctl->cs_ctl;
+ char *p = ucontrol->value.bytes.data;
+ int ret = 0;
+
+ mutex_lock(&cs_ctl->dsp->pwr_lock);
+ ret = cs_dsp_coeff_write_ctrl(cs_ctl, 0, p, cs_ctl->len);
+ mutex_unlock(&cs_ctl->dsp->pwr_lock);
+
+ return ret;
+}
+
+static int hda_cs_dsp_coeff_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_cs_dsp_coeff_ctl *ctl = (struct hda_cs_dsp_coeff_ctl *)snd_kcontrol_chip(kctl);
+ struct cs_dsp_coeff_ctl *cs_ctl = ctl->cs_ctl;
+ char *p = ucontrol->value.bytes.data;
+ int ret;
+
+ mutex_lock(&cs_ctl->dsp->pwr_lock);
+ ret = cs_dsp_coeff_read_ctrl(cs_ctl, 0, p, cs_ctl->len);
+ mutex_unlock(&cs_ctl->dsp->pwr_lock);
+
+ return ret;
+}
+
+static unsigned int wmfw_convert_flags(unsigned int in)
+{
+ unsigned int out, rd, wr, vol;
+
+ rd = SNDRV_CTL_ELEM_ACCESS_READ;
+ wr = SNDRV_CTL_ELEM_ACCESS_WRITE;
+ vol = SNDRV_CTL_ELEM_ACCESS_VOLATILE;
+
+ out = 0;
+
+ if (in) {
+ out |= rd;
+ if (in & WMFW_CTL_FLAG_WRITEABLE)
+ out |= wr;
+ if (in & WMFW_CTL_FLAG_VOLATILE)
+ out |= vol;
+ } else {
+ out |= rd | wr | vol;
+ }
+
+ return out;
+}
+
+static int hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char *name)
+{
+ struct cs_dsp_coeff_ctl *cs_ctl = ctl->cs_ctl;
+ struct snd_kcontrol_new kcontrol = {0};
+ struct snd_kcontrol *kctl;
+ int ret = 0;
+
+ if (cs_ctl->len > ADSP_MAX_STD_CTRL_SIZE) {
+ dev_err(cs_ctl->dsp->dev, "KControl %s: length %zu exceeds maximum %d\n", name,
+ cs_ctl->len, ADSP_MAX_STD_CTRL_SIZE);
+ return -EINVAL;
+ }
+
+ kcontrol.name = name;
+ kcontrol.info = hda_cs_dsp_coeff_info;
+ kcontrol.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ kcontrol.access = wmfw_convert_flags(cs_ctl->flags);
+ kcontrol.get = hda_cs_dsp_coeff_get;
+ kcontrol.put = hda_cs_dsp_coeff_put;
+
+ /* Save ctl inside private_data, ctl is owned by cs_dsp,
+ * and will be freed when cs_dsp removes the control */
+ kctl = snd_ctl_new1(&kcontrol, (void *)ctl);
+ if (!kctl) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ ret = snd_ctl_add(ctl->card, kctl);
+ if (ret) {
+ dev_err(cs_ctl->dsp->dev, "Failed to add KControl %s = %d\n", kcontrol.name, ret);
+ return ret;
+ }
+
+ dev_dbg(cs_ctl->dsp->dev, "Added KControl: %s\n", kcontrol.name);
+ ctl->kctl = kctl;
+
+ return 0;
+}
+
+int hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl, struct hda_cs_dsp_ctl_info *info)
+{
+ struct cs_dsp *cs_dsp = cs_ctl->dsp;
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ struct hda_cs_dsp_coeff_ctl *ctl;
+ const char *region_name;
+ int ret;
+
+ if (cs_ctl->flags & WMFW_CTL_FLAG_SYS)
+ return 0;
+
+ region_name = cs_dsp_mem_region_name(cs_ctl->alg_region.type);
+ if (!region_name) {
+ dev_err(cs_dsp->dev, "Unknown region type: %d\n", cs_ctl->alg_region.type);
+ return -EINVAL;
+ }
+
+ ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s %.12s %x", info->device_name,
+ cs_dsp->name, hda_cs_dsp_fw_text[info->fw_type], cs_ctl->alg_region.alg);
+
+ if (cs_ctl->subname) {
+ int avail = SNDRV_CTL_ELEM_ID_NAME_MAXLEN - ret - 2;
+ int skip = 0;
+
+ /* Truncate the subname from the start if it is too long */
+ if (cs_ctl->subname_len > avail)
+ skip = cs_ctl->subname_len - avail;
+
+ snprintf(name + ret, SNDRV_CTL_ELEM_ID_NAME_MAXLEN - ret,
+ " %.*s", cs_ctl->subname_len - skip, cs_ctl->subname + skip);
+ }
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ ctl->cs_ctl = cs_ctl;
+ ctl->card = info->card;
+ cs_ctl->priv = ctl;
+
+ ret = hda_cs_dsp_add_kcontrol(ctl, name);
+ if (ret) {
+ dev_err(cs_dsp->dev, "Error (%d) adding control %s\n", ret, name);
+ kfree(ctl);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_control_add, SND_HDA_CS_DSP_CONTROLS);
+
+void hda_cs_dsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl)
+{
+ struct hda_cs_dsp_coeff_ctl *ctl = cs_ctl->priv;
+
+ kfree(ctl);
+}
+EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_control_remove, SND_HDA_CS_DSP_CONTROLS);
+
+int hda_cs_dsp_write_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg, const void *buf, size_t len)
+{
+ struct cs_dsp_coeff_ctl *cs_ctl;
+ struct hda_cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ cs_ctl = cs_dsp_get_ctl(dsp, name, type, alg);
+ if (!cs_ctl)
+ return -EINVAL;
+
+ ctl = cs_ctl->priv;
+
+ ret = cs_dsp_coeff_write_ctrl(cs_ctl, 0, buf, len);
+ if (ret)
+ return ret;
+
+ if (cs_ctl->flags & WMFW_CTL_FLAG_SYS)
+ return 0;
+
+ snd_ctl_notify(ctl->card, SNDRV_CTL_EVENT_MASK_VALUE, &ctl->kctl->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_write_ctl, SND_HDA_CS_DSP_CONTROLS);
+
+int hda_cs_dsp_read_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len)
+{
+ struct cs_dsp_coeff_ctl *cs_ctl;
+
+ cs_ctl = cs_dsp_get_ctl(dsp, name, type, alg);
+ if (!cs_ctl)
+ return -EINVAL;
+
+ return cs_dsp_coeff_read_ctrl(cs_ctl, 0, buf, len);
+}
+EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_read_ctl, SND_HDA_CS_DSP_CONTROLS);
+
+MODULE_DESCRIPTION("CS_DSP ALSA Control HDA Library");
+MODULE_AUTHOR("Stefan Binding, <sbinding@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/pci/hda/hda_cs_dsp_ctl.h b/sound/pci/hda/hda_cs_dsp_ctl.h
new file mode 100644
index 000000000000..4babc69cf2f0
--- /dev/null
+++ b/sound/pci/hda/hda_cs_dsp_ctl.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * HDA DSP ALSA Control Driver
+ *
+ * Copyright 2022 Cirrus Logic, Inc.
+ *
+ * Author: Stefan Binding <sbinding@opensource.cirrus.com>
+ */
+
+#ifndef __HDA_CS_DSP_CTL_H__
+#define __HDA_CS_DSP_CTL_H__
+
+#include <sound/soc.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+
+enum hda_cs_dsp_fw_id {
+ HDA_CS_DSP_FW_SPK_PROT,
+ HDA_CS_DSP_FW_SPK_CALI,
+ HDA_CS_DSP_FW_SPK_DIAG,
+ HDA_CS_DSP_FW_MISC,
+ HDA_CS_DSP_NUM_FW
+};
+
+struct hda_cs_dsp_ctl_info {
+ struct snd_card *card;
+ enum hda_cs_dsp_fw_id fw_type;
+ const char *device_name;
+};
+
+extern const char * const hda_cs_dsp_fw_ids[HDA_CS_DSP_NUM_FW];
+
+int hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl, struct hda_cs_dsp_ctl_info *info);
+void hda_cs_dsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl);
+int hda_cs_dsp_write_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg, const void *buf, size_t len);
+int hda_cs_dsp_read_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len);
+
+#endif /*__HDA_CS_DSP_CTL_H__*/
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index d5ffcba794e5..bf951c10ae61 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -33,7 +33,7 @@ static ssize_t power_on_acct_show(struct device *dev,
{
struct hda_codec *codec = dev_get_drvdata(dev);
snd_hda_update_power_acct(codec);
- return sprintf(buf, "%u\n", jiffies_to_msecs(codec->power_on_acct));
+ return sysfs_emit(buf, "%u\n", jiffies_to_msecs(codec->power_on_acct));
}
static ssize_t power_off_acct_show(struct device *dev,
@@ -42,7 +42,7 @@ static ssize_t power_off_acct_show(struct device *dev,
{
struct hda_codec *codec = dev_get_drvdata(dev);
snd_hda_update_power_acct(codec);
- return sprintf(buf, "%u\n", jiffies_to_msecs(codec->power_off_acct));
+ return sysfs_emit(buf, "%u\n", jiffies_to_msecs(codec->power_off_acct));
}
static DEVICE_ATTR_RO(power_on_acct);
@@ -55,7 +55,7 @@ static ssize_t type##_show(struct device *dev, \
char *buf) \
{ \
struct hda_codec *codec = dev_get_drvdata(dev); \
- return sprintf(buf, "0x%x\n", codec->field); \
+ return sysfs_emit(buf, "0x%x\n", codec->field); \
}
#define CODEC_INFO_STR_SHOW(type, field) \
@@ -64,8 +64,8 @@ static ssize_t type##_show(struct device *dev, \
char *buf) \
{ \
struct hda_codec *codec = dev_get_drvdata(dev); \
- return sprintf(buf, "%s\n", \
- codec->field ? codec->field : ""); \
+ return sysfs_emit(buf, "%s\n", \
+ codec->field ? codec->field : ""); \
}
CODEC_INFO_SHOW(vendor_id, core.vendor_id);
@@ -85,8 +85,8 @@ static ssize_t pin_configs_show(struct hda_codec *codec,
int i, len = 0;
mutex_lock(&codec->user_mutex);
snd_array_for_each(list, i, pin) {
- len += sprintf(buf + len, "0x%02x 0x%08x\n",
- pin->nid, pin->cfg);
+ len += sysfs_emit_at(buf, len, "0x%02x 0x%08x\n",
+ pin->nid, pin->cfg);
}
mutex_unlock(&codec->user_mutex);
return len;
@@ -222,9 +222,8 @@ static ssize_t init_verbs_show(struct device *dev,
int i, len = 0;
mutex_lock(&codec->user_mutex);
snd_array_for_each(&codec->init_verbs, i, v) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "0x%02x 0x%03x 0x%04x\n",
- v->nid, v->verb, v->param);
+ len += sysfs_emit_at(buf, len, "0x%02x 0x%03x 0x%04x\n",
+ v->nid, v->verb, v->param);
}
mutex_unlock(&codec->user_mutex);
return len;
@@ -272,8 +271,8 @@ static ssize_t hints_show(struct device *dev,
int i, len = 0;
mutex_lock(&codec->user_mutex);
snd_array_for_each(&codec->hints, i, hint) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "%s = %s\n", hint->key, hint->val);
+ len += sysfs_emit_at(buf, len, "%s = %s\n",
+ hint->key, hint->val);
}
mutex_unlock(&codec->user_mutex);
return len;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 678fbcaf2a3b..6807b4708a17 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -395,6 +395,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
/* codec SSID */
SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
+ SND_PCI_QUIRK(0x106b, 0x0900, "iMac 12,1", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 83ae21a01bbf..7b1a30a551f6 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -222,6 +222,7 @@ enum {
CXT_PINCFG_LEMOTE_A1205,
CXT_PINCFG_COMPAQ_CQ60,
CXT_FIXUP_STEREO_DMIC,
+ CXT_PINCFG_LENOVO_NOTEBOOK,
CXT_FIXUP_INC_MIC_BOOST,
CXT_FIXUP_HEADPHONE_MIC_PIN,
CXT_FIXUP_HEADPHONE_MIC,
@@ -772,6 +773,14 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_stereo_dmic,
},
+ [CXT_PINCFG_LENOVO_NOTEBOOK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x05d71030 },
+ { }
+ },
+ .chain_id = CXT_FIXUP_STEREO_DMIC,
+ },
[CXT_FIXUP_INC_MIC_BOOST] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt5066_increase_mic_boost,
@@ -971,7 +980,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
- SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_PINCFG_LENOVO_NOTEBOOK),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
diff --git a/sound/pci/hda/patch_cs8409-tables.c b/sound/pci/hda/patch_cs8409-tables.c
index 4f4cc8215917..b288874e401e 100644
--- a/sound/pci/hda/patch_cs8409-tables.c
+++ b/sound/pci/hda/patch_cs8409-tables.c
@@ -68,7 +68,7 @@ const struct hda_verb cs8409_cs42l42_init_verbs[] = {
{} /* terminator */
};
-const struct hda_pintbl cs8409_cs42l42_pincfgs[] = {
+static const struct hda_pintbl cs8409_cs42l42_pincfgs[] = {
{ CS8409_PIN_ASP1_TRANSMITTER_A, 0x042120f0 }, /* ASP-1-TX */
{ CS8409_PIN_ASP1_RECEIVER_A, 0x04a12050 }, /* ASP-1-RX */
{ CS8409_PIN_ASP2_TRANSMITTER_A, 0x901000f0 }, /* ASP-2-TX */
@@ -76,7 +76,7 @@ const struct hda_pintbl cs8409_cs42l42_pincfgs[] = {
{} /* terminator */
};
-const struct hda_pintbl cs8409_cs42l42_pincfgs_no_dmic[] = {
+static const struct hda_pintbl cs8409_cs42l42_pincfgs_no_dmic[] = {
{ CS8409_PIN_ASP1_TRANSMITTER_A, 0x042120f0 }, /* ASP-1-TX */
{ CS8409_PIN_ASP1_RECEIVER_A, 0x04a12050 }, /* ASP-1-RX */
{ CS8409_PIN_ASP2_TRANSMITTER_A, 0x901000f0 }, /* ASP-2-TX */
@@ -279,7 +279,7 @@ const struct hda_verb dolphin_init_verbs[] = {
{} /* terminator */
};
-const struct hda_pintbl dolphin_pincfgs[] = {
+static const struct hda_pintbl dolphin_pincfgs[] = {
{ 0x24, 0x022210f0 }, /* ASP-1-TX-A */
{ 0x25, 0x010240f0 }, /* ASP-1-TX-B */
{ 0x34, 0x02a21050 }, /* ASP-1-RX */
@@ -546,6 +546,10 @@ const struct snd_pci_quirk cs8409_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0BD6, "Dolphin", CS8409_DOLPHIN),
SND_PCI_QUIRK(0x1028, 0x0BD7, "Dolphin", CS8409_DOLPHIN),
SND_PCI_QUIRK(0x1028, 0x0BD8, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0C43, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0C50, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0C51, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0C52, "Dolphin", CS8409_DOLPHIN),
{} /* terminator */
};
diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h
index 260388a6256c..2a8dfb4ff046 100644
--- a/sound/pci/hda/patch_cs8409.h
+++ b/sound/pci/hda/patch_cs8409.h
@@ -358,13 +358,11 @@ extern const struct snd_pci_quirk cs8409_fixup_tbl[];
extern const struct hda_model_fixup cs8409_models[];
extern const struct hda_fixup cs8409_fixups[];
extern const struct hda_verb cs8409_cs42l42_init_verbs[];
-extern const struct hda_pintbl cs8409_cs42l42_pincfgs[];
extern const struct cs8409_cir_param cs8409_cs42l42_hw_cfg[];
extern const struct cs8409_cir_param cs8409_cs42l42_bullseye_atn[];
extern struct sub_codec cs8409_cs42l42_codec;
extern const struct hda_verb dolphin_init_verbs[];
-extern const struct hda_pintbl dolphin_pincfgs[];
extern const struct cs8409_cir_param dolphin_hw_cfg[];
extern struct sub_codec dolphin_cs42l42_0;
extern struct sub_codec dolphin_cs42l42_1;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2f55bc43bfa9..47e72cf76608 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4021,15 +4021,22 @@ static void alc5505_dsp_init(struct hda_codec *codec)
static int alc269_suspend(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
+ int i;
if (spec->has_alc5505_dsp)
alc5505_dsp_suspend(codec);
+
+ for (i = 0; i < HDA_MAX_COMPONENTS; i++)
+ if (spec->comps[i].suspend_hook)
+ spec->comps[i].suspend_hook(spec->comps[i].dev);
+
return alc_suspend(codec);
}
static int alc269_resume(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
+ int i;
if (spec->codec_variant == ALC269_TYPE_ALC269VB)
alc269vb_toggle_power_output(codec, 0);
@@ -4060,6 +4067,10 @@ static int alc269_resume(struct hda_codec *codec)
if (spec->has_alc5505_dsp)
alc5505_dsp_resume(codec);
+ for (i = 0; i < HDA_MAX_COMPONENTS; i++)
+ if (spec->comps[i].resume_hook)
+ spec->comps[i].resume_hook(spec->comps[i].dev);
+
return 0;
}
#endif /* CONFIG_PM */
@@ -6610,8 +6621,20 @@ static int comp_bind(struct device *dev)
{
struct hda_codec *cdc = dev_to_hda_codec(dev);
struct alc_spec *spec = cdc->spec;
+ int ret, i;
+
+ ret = component_bind_all(dev, spec->comps);
+ if (ret)
+ return ret;
- return component_bind_all(dev, spec->comps);
+ if (snd_hdac_is_power_on(&cdc->core)) {
+ codec_dbg(cdc, "Resuming after bind.\n");
+ for (i = 0; i < HDA_MAX_COMPONENTS; i++)
+ if (spec->comps[i].resume_hook)
+ spec->comps[i].resume_hook(spec->comps[i].dev);
+ }
+
+ return 0;
}
static void comp_unbind(struct device *dev)
@@ -6654,6 +6677,7 @@ static void cs35l41_generic_fixup(struct hda_codec *cdc, int action, const char
"%s-%s:00-cs35l41-hda.%d", bus, hid, i);
if (!name)
return;
+ spec->comps[i].codec = cdc;
component_match_add(dev, &spec->match, component_compare_dev_name, name);
}
ret = component_master_add_with_match(dev, &comp_master_ops, spec->match);
@@ -6686,6 +6710,12 @@ static void alc287_fixup_legion_16achg6_speakers(struct hda_codec *cdc, const st
cs35l41_generic_fixup(cdc, action, "i2c", "CLSA0100", 2);
}
+static void alc287_fixup_legion_16ithg6_speakers(struct hda_codec *cdc, const struct hda_fixup *fix,
+ int action)
+{
+ cs35l41_generic_fixup(cdc, action, "i2c", "CLSA0101", 2);
+}
+
/* for alc295_fixup_hp_top_speakers */
#include "hp_x360_helper.c"
@@ -6787,6 +6817,43 @@ static void alc_fixup_dell4_mic_no_presence_quiet(struct hda_codec *codec,
}
}
+static void alc287_fixup_yoga9_14iap7_bass_spk_pin(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ /*
+ * The Pin Complex 0x17 for the bass speakers is wrongly reported as
+ * unconnected.
+ */
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x17, 0x90170121 },
+ { }
+ };
+ /*
+ * Avoid DAC 0x06 and 0x08, as they have no volume controls.
+ * DAC 0x02 and 0x03 would be fine.
+ */
+ static const hda_nid_t conn[] = { 0x02, 0x03 };
+ /*
+ * Prefer both speakerbar (0x14) and bass speakers (0x17) connected to DAC 0x02.
+ * Headphones (0x21) are connected to DAC 0x03.
+ */
+ static const hda_nid_t preferred_pairs[] = {
+ 0x14, 0x02,
+ 0x17, 0x02,
+ 0x21, 0x03,
+ 0
+ };
+ struct alc_spec *spec = codec->spec;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ spec->gen.preferred_dacs = preferred_pairs;
+ break;
+ }
+}
+
enum {
ALC269_FIXUP_GPIO2,
ALC269_FIXUP_SONY_VAIO,
@@ -6842,6 +6909,7 @@ enum {
ALC269_FIXUP_LIMIT_INT_MIC_BOOST,
ALC269VB_FIXUP_ASUS_ZENBOOK,
ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A,
+ ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED,
ALC269VB_FIXUP_ORDISSIMO_EVE2,
ALC283_FIXUP_CHROME_BOOK,
@@ -7023,6 +7091,9 @@ enum {
ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED,
ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED,
ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE,
+ ALC287_FIXUP_LEGION_16ITHG6,
+ ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
+ ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -7427,6 +7498,15 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269VB_FIXUP_ASUS_ZENBOOK,
},
+ [ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x01a110f0 }, /* use as headset mic */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
+ },
[ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_limit_int_mic_boost,
@@ -8865,6 +8945,78 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
},
+ [ALC287_FIXUP_LEGION_16ITHG6] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_legion_16ithg6_speakers,
+ },
+ [ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ // enable left speaker
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xf },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x10 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x40 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ // enable right speaker
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x46 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xf },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x46 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x10 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x44 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { },
+ },
+ },
+ [ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_yoga9_14iap7_bass_spk_pin,
+ .chained = true,
+ .chain_id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -9044,6 +9196,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
@@ -9059,6 +9213,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation",
ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8786, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
@@ -9114,6 +9269,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8aa8, "HP EliteBook 640 G9 (MB 8AA6)", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8aab, "HP EliteBook 650 G9 (MB 8AA9)", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -9126,8 +9283,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
@@ -9145,6 +9304,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
@@ -9203,6 +9363,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x4018, "Clevo NV40M[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x4019, "Clevo NV40MZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x4020, "Clevo NV40MB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x4041, "Clevo NV4[15]PZ", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x40a1, "Clevo NL40GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x40c1, "Clevo NL40[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x40d1, "Clevo NL41DU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -9230,6 +9391,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x7717, "Clevo NS70PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x7718, "Clevo L140PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -9315,6 +9477,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+ SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
@@ -9329,6 +9492,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3855, "Legion 7 16ITHG6", ALC287_FIXUP_LEGION_16ITHG6),
+ SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -9560,6 +9725,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
{.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
+ {.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"},
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
{.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
diff --git a/sound/pci/ice1712/quartet.c b/sound/pci/ice1712/quartet.c
index 0dfa093f7dca..20b3e8f94719 100644
--- a/sound/pci/ice1712/quartet.c
+++ b/sound/pci/ice1712/quartet.c
@@ -566,7 +566,7 @@ static int qtet_ain12_sw_put(struct snd_kcontrol *kcontrol,
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int old, new, tmp, masked_old;
- old = new = get_scr(ice);
+ old = get_scr(ice);
masked_old = old & (SCR_AIN12_SEL1 | SCR_AIN12_SEL0);
tmp = ucontrol->value.integer.value[0];
if (tmp == 2)
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index d4528962ac34..453181ef6c94 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -9,12 +9,12 @@ endif
ifneq ($(CONFIG_SND_SOC_TOPOLOGY_KUNIT_TEST),)
# snd-soc-test-objs := soc-topology-test.o
-obj-$(CONFIG_SND_SOC_TOPOLOGY_KUNIT_TEST) := soc-topology-test.o
+obj-$(CONFIG_SND_SOC_TOPOLOGY_KUNIT_TEST) += soc-topology-test.o
endif
ifneq ($(CONFIG_SND_SOC_UTILS_KUNIT_TEST),)
# snd-soc-test-objs := soc-utils-test.o
-obj-$(CONFIG_SND_SOC_UTILS_KUNIT_TEST) := soc-utils-test.o
+obj-$(CONFIG_SND_SOC_UTILS_KUNIT_TEST) += soc-utils-test.o
endif
ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
diff --git a/sound/soc/adi/axi-i2s.c b/sound/soc/adi/axi-i2s.c
index 1289cb4e2988..b1342351bff4 100644
--- a/sound/soc/adi/axi-i2s.c
+++ b/sound/soc/adi/axi-i2s.c
@@ -161,6 +161,7 @@ static struct snd_soc_dai_driver axi_i2s_dai = {
static const struct snd_soc_component_driver axi_i2s_component = {
.name = "axi-i2s",
+ .legacy_dai_naming = 1,
};
static const struct regmap_config axi_i2s_regmap_config = {
diff --git a/sound/soc/adi/axi-spdif.c b/sound/soc/adi/axi-spdif.c
index 8d4a6cb4e5c5..51b968ea21da 100644
--- a/sound/soc/adi/axi-spdif.c
+++ b/sound/soc/adi/axi-spdif.c
@@ -167,6 +167,7 @@ static struct snd_soc_dai_driver axi_spdif_dai = {
static const struct snd_soc_component_driver axi_spdif_component = {
.name = "axi-spdif",
+ .legacy_dai_naming = 1,
};
static const struct regmap_config axi_spdif_regmap_config = {
diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
index 1381aec23048..08f5289dac54 100644
--- a/sound/soc/amd/Kconfig
+++ b/sound/soc/amd/Kconfig
@@ -23,6 +23,18 @@ config SND_SOC_AMD_CZ_RT5645_MACH
help
This option enables machine driver for rt5645.
+config SND_SOC_AMD_ST_ES8336_MACH
+ tristate "AMD ST support for ES8336"
+ select SND_SOC_ACPI if ACPI
+ select SND_SOC_ES8316
+ depends on SND_SOC_AMD_ACP && ACPI
+ depends on I2C
+ help
+ This option enables machine driver for Jadeite platform
+ using es8336 codec.
+ Say m if you have such a device.
+ If unsure select "N".
+
config SND_SOC_AMD_ACP3x
tristate "AMD Audio Coprocessor-v3.x support"
depends on X86 && PCI
@@ -105,3 +117,13 @@ config SND_AMD_ACP_CONFIG
driver modules to use
source "sound/soc/amd/acp/Kconfig"
+
+config SND_SOC_AMD_RPL_ACP6x
+ tristate "AMD Audio Coprocessor-v6.2 RPL support"
+ depends on X86 && PCI
+ help
+ This option enables Audio Coprocessor i.e ACP v6.2 support on
+ AMD RPL platform. By enabling this flag build will be
+ triggered for ACP PCI driver.
+ Say m if you have such a device.
+ If unsure select "N".
diff --git a/sound/soc/amd/Makefile b/sound/soc/amd/Makefile
index 4b1f77930a4a..0592e7c5c407 100644
--- a/sound/soc/amd/Makefile
+++ b/sound/soc/amd/Makefile
@@ -2,12 +2,14 @@
acp_audio_dma-objs := acp-pcm-dma.o
snd-soc-acp-da7219mx98357-mach-objs := acp-da7219-max98357a.o
snd-soc-acp-rt5645-mach-objs := acp-rt5645.o
+snd-soc-acp-es8336-mach-objs := acp-es8336.o
snd-soc-acp-rt5682-mach-objs := acp3x-rt5682-max9836.o
snd-acp-config-objs := acp-config.o
obj-$(CONFIG_SND_SOC_AMD_ACP) += acp_audio_dma.o
obj-$(CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH) += snd-soc-acp-da7219mx98357-mach.o
obj-$(CONFIG_SND_SOC_AMD_CZ_RT5645_MACH) += snd-soc-acp-rt5645-mach.o
+obj-$(CONFIG_SND_SOC_AMD_ST_ES8336_MACH) += snd-soc-acp-es8336-mach.o
obj-$(CONFIG_SND_SOC_AMD_ACP3x) += raven/
obj-$(CONFIG_SND_SOC_AMD_RV_RT5682_MACH) += snd-soc-acp-rt5682-mach.o
obj-$(CONFIG_SND_SOC_AMD_RENOIR) += renoir/
@@ -15,3 +17,4 @@ obj-$(CONFIG_SND_SOC_AMD_ACP5x) += vangogh/
obj-$(CONFIG_SND_SOC_AMD_ACP6x) += yc/
obj-$(CONFIG_SND_SOC_AMD_ACP_COMMON) += acp/
obj-$(CONFIG_SND_AMD_ACP_CONFIG) += snd-acp-config.o
+obj-$(CONFIG_SND_SOC_AMD_RPL_ACP6x) += rpl/
diff --git a/sound/soc/amd/acp-config.c b/sound/soc/amd/acp-config.c
index 5cbc82eca4c9..0932473b6394 100644
--- a/sound/soc/amd/acp-config.c
+++ b/sound/soc/amd/acp-config.c
@@ -130,4 +130,34 @@ struct snd_soc_acpi_mach snd_soc_acpi_amd_sof_machines[] = {
};
EXPORT_SYMBOL(snd_soc_acpi_amd_sof_machines);
+struct snd_soc_acpi_mach snd_soc_acpi_amd_rmb_sof_machines[] = {
+ {
+ .id = "AMDI1019",
+ .drv_name = "rmb-dsp",
+ .pdata = &acp_quirk_data,
+ .fw_filename = "sof-rmb.ri",
+ .sof_tplg_filename = "sof-acp-rmb.tplg",
+ },
+ {
+ .id = "10508825",
+ .drv_name = "nau8825-max",
+ .pdata = &acp_quirk_data,
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &amp_max,
+ .fw_filename = "sof-rmb.ri",
+ .sof_tplg_filename = "sof-rmb-nau8825-max98360.tplg",
+ },
+ {
+ .id = "RTL5682",
+ .drv_name = "rt5682s-hs-rt1019",
+ .pdata = &acp_quirk_data,
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &amp_rt1019,
+ .fw_filename = "sof-rmb.ri",
+ .sof_tplg_filename = "sof-rmb-rt5682s-rt1019.tplg",
+ },
+ {},
+};
+EXPORT_SYMBOL(snd_soc_acpi_amd_rmb_sof_machines);
+
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/amd/acp-es8336.c b/sound/soc/amd/acp-es8336.c
new file mode 100644
index 000000000000..2fe8df86053a
--- /dev/null
+++ b/sound/soc/amd/acp-es8336.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Machine driver for AMD Stoney platform using ES8336 Codec
+ *
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ */
+
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc-dapm.h>
+#include <sound/jack.h>
+#include <linux/gpio.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+
+#include "acp.h"
+
+#define DUAL_CHANNEL 2
+#define DRV_NAME "acp2x_mach"
+#define ST_JADEITE 1
+#define ES8336_PLL_FREQ (48000 * 256)
+
+static unsigned long acp2x_machine_id;
+static struct snd_soc_jack st_jack;
+static struct device *codec_dev;
+static struct gpio_desc *gpio_pa;
+
+static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ gpiod_set_value_cansleep(gpio_pa, true);
+ else
+ gpiod_set_value_cansleep(gpio_pa, false);
+
+ return 0;
+}
+
+static struct snd_soc_jack_pin st_es8316_jack_pins[] = {
+ {
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static int st_es8336_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct snd_soc_card *card;
+ struct snd_soc_component *codec;
+
+ codec = asoc_rtd_to_codec(rtd, 0)->component;
+ card = rtd->card;
+
+ ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0,
+ &st_jack, st_es8316_jack_pins,
+ ARRAY_SIZE(st_es8316_jack_pins));
+ if (ret) {
+ dev_err(card->dev, "HP jack creation failed %d\n", ret);
+ return ret;
+ }
+ snd_jack_set_key(st_jack.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ ret = snd_soc_component_set_jack(codec, &st_jack, NULL);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack call-back failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const unsigned int st_channels[] = {
+ DUAL_CHANNEL,
+};
+
+static const unsigned int st_rates[] = {
+ 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list st_constraints_rates = {
+ .count = ARRAY_SIZE(st_rates),
+ .list = st_rates,
+ .mask = 0,
+};
+
+static const struct snd_pcm_hw_constraint_list st_constraints_channels = {
+ .count = ARRAY_SIZE(st_channels),
+ .list = st_channels,
+ .mask = 0,
+};
+
+static int st_es8336_codec_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime;
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_card *card;
+ struct acp_platform_info *machine;
+ struct snd_soc_dai *codec_dai;
+ int ret;
+
+ runtime = substream->runtime;
+ rtd = asoc_substream_to_rtd(substream);
+ card = rtd->card;
+ machine = snd_soc_card_get_drvdata(card);
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, ES8336_PLL_FREQ, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec sysclk: %d\n", ret);
+ return ret;
+ }
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &st_constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &st_constraints_rates);
+
+ machine->play_i2s_instance = I2S_MICSP_INSTANCE;
+ machine->cap_i2s_instance = I2S_MICSP_INSTANCE;
+ machine->capture_channel = CAP_CHANNEL0;
+ return 0;
+}
+
+static const struct snd_soc_ops st_es8336_ops = {
+ .startup = st_es8336_codec_startup,
+};
+
+SND_SOC_DAILINK_DEF(designware1,
+ DAILINK_COMP_ARRAY(COMP_CPU("designware-i2s.2.auto")));
+SND_SOC_DAILINK_DEF(codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("i2c-ESSX8336:00", "ES8316 HiFi")));
+SND_SOC_DAILINK_DEF(platform,
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("acp_audio_dma.1.auto")));
+
+static struct snd_soc_dai_link st_dai_es8336[] = {
+ {
+ .name = "amdes8336",
+ .stream_name = "ES8336 HiFi Play",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBP_CFP,
+ .stop_dma_first = 1,
+ .dpcm_capture = 1,
+ .dpcm_playback = 1,
+ .init = st_es8336_init,
+ .ops = &st_es8336_ops,
+ SND_SOC_DAILINK_REG(designware1, codec, platform),
+ },
+};
+
+static const struct snd_soc_dapm_widget st_widgets[] = {
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Internal Mic", NULL),
+
+ SND_SOC_DAPM_SUPPLY("Speaker Power", SND_SOC_NOPM, 0, 0,
+ sof_es8316_speaker_power_event,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+};
+
+static const struct snd_soc_dapm_route st_audio_route[] = {
+ {"Speaker", NULL, "HPOL"},
+ {"Speaker", NULL, "HPOR"},
+ {"Headphone", NULL, "HPOL"},
+ {"Headphone", NULL, "HPOR"},
+ {"MIC1", NULL, "Headset Mic"},
+ {"MIC2", NULL, "Internal Mic"},
+ {"Speaker", NULL, "Speaker Power"},
+};
+
+static const struct snd_kcontrol_new st_mc_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Internal Mic"),
+};
+
+static const struct acpi_gpio_params pa_enable_gpio = { 0, 0, false };
+static const struct acpi_gpio_mapping acpi_es8336_gpios[] = {
+ { "pa-enable-gpios", &pa_enable_gpio, 1 },
+ { }
+};
+
+static int st_es8336_late_probe(struct snd_soc_card *card)
+{
+ struct acpi_device *adev;
+ int ret;
+
+ adev = acpi_dev_get_first_match_dev("ESSX8336", NULL, -1);
+ if (adev)
+ put_device(&adev->dev);
+ codec_dev = acpi_get_first_physical_node(adev);
+ if (!codec_dev)
+ dev_err(card->dev, "can not find codec dev\n");
+
+ ret = devm_acpi_dev_add_driver_gpios(codec_dev, acpi_es8336_gpios);
+ if (ret)
+ dev_warn(card->dev, "Failed to add driver gpios\n");
+
+ gpio_pa = gpiod_get_optional(codec_dev, "pa-enable", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio_pa)) {
+ ret = dev_err_probe(card->dev, PTR_ERR(gpio_pa),
+ "could not get pa-enable GPIO\n");
+ put_device(codec_dev);
+ return ret;
+ }
+ return 0;
+}
+
+static struct snd_soc_card st_card = {
+ .name = "acpes8336",
+ .owner = THIS_MODULE,
+ .dai_link = st_dai_es8336,
+ .num_links = ARRAY_SIZE(st_dai_es8336),
+ .dapm_widgets = st_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(st_widgets),
+ .dapm_routes = st_audio_route,
+ .num_dapm_routes = ARRAY_SIZE(st_audio_route),
+ .controls = st_mc_controls,
+ .num_controls = ARRAY_SIZE(st_mc_controls),
+ .late_probe = st_es8336_late_probe,
+};
+
+static int st_es8336_quirk_cb(const struct dmi_system_id *id)
+{
+ acp2x_machine_id = ST_JADEITE;
+ return 1;
+}
+
+static const struct dmi_system_id st_es8336_quirk_table[] = {
+ {
+ .callback = st_es8336_quirk_cb,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jadeite"),
+ },
+ },
+ {
+ .callback = st_es8336_quirk_cb,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "IP3 Technology CO.,Ltd."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN1D"),
+ },
+ },
+ {
+ .callback = st_es8336_quirk_cb,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Standard"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN10"),
+ },
+ },
+ {}
+};
+
+static int st_es8336_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct snd_soc_card *card;
+ struct acp_platform_info *machine;
+
+ machine = devm_kzalloc(&pdev->dev, sizeof(struct acp_platform_info), GFP_KERNEL);
+ if (!machine)
+ return -ENOMEM;
+
+ dmi_check_system(st_es8336_quirk_table);
+ switch (acp2x_machine_id) {
+ case ST_JADEITE:
+ card = &st_card;
+ st_card.dev = &pdev->dev;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, machine);
+ ret = devm_snd_soc_register_card(&pdev->dev, &st_card);
+ if (ret) {
+ return dev_err_probe(&pdev->dev, ret,
+ "devm_snd_soc_register_card(%s) failed\n",
+ card->name);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id st_audio_acpi_match[] = {
+ {"AMDI8336", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, st_audio_acpi_match);
+#endif
+
+static struct platform_driver st_mach_driver = {
+ .driver = {
+ .name = "st-es8316",
+ .acpi_match_table = ACPI_PTR(st_audio_acpi_match),
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = st_es8336_probe,
+};
+
+module_platform_driver(st_mach_driver);
+
+MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
+MODULE_DESCRIPTION("st-es8316 audio support");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 1cd2e70a57df..198358d28ea9 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -433,6 +433,7 @@ static void acp_dma_start(void __iomem *acp_mmio, u16 ch_num, bool is_circular)
case I2S_TO_ACP_DMA_CH_NUM:
case ACP_TO_I2S_DMA_BT_INSTANCE_CH_NUM:
case I2S_TO_ACP_DMA_BT_INSTANCE_CH_NUM:
+ case ACP_TO_I2S_DMA_MICSP_INSTANCE_CH_NUM:
dma_ctrl |= ACP_DMA_CNTL_0__DMAChIOCEn_MASK;
break;
default:
@@ -710,6 +711,13 @@ static irqreturn_t dma_irq_handler(int irq, void *arg)
acp_mmio, mmACP_EXTERNAL_INTR_STAT);
}
+ if ((intr_flag & BIT(ACP_TO_I2S_DMA_MICSP_INSTANCE_CH_NUM)) != 0) {
+ valid_irq = true;
+ snd_pcm_period_elapsed(irq_data->play_i2s_micsp_stream);
+ acp_reg_write((intr_flag & BIT(ACP_TO_I2S_DMA_MICSP_INSTANCE_CH_NUM)) << 16,
+ acp_mmio, mmACP_EXTERNAL_INTR_STAT);
+ }
+
if ((intr_flag & BIT(ACP_TO_I2S_DMA_BT_INSTANCE_CH_NUM)) != 0) {
valid_irq = true;
snd_pcm_period_elapsed(irq_data->play_i2sbt_stream);
@@ -807,7 +815,8 @@ static int acp_dma_open(struct snd_soc_component *component,
* stream is not closed
*/
if (!intr_data->play_i2ssp_stream && !intr_data->capture_i2ssp_stream &&
- !intr_data->play_i2sbt_stream && !intr_data->capture_i2sbt_stream)
+ !intr_data->play_i2sbt_stream && !intr_data->capture_i2sbt_stream &&
+ !intr_data->play_i2s_micsp_stream)
acp_reg_write(1, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -867,6 +876,9 @@ static int acp_dma_hw_params(struct snd_soc_component *component,
case I2S_BT_INSTANCE:
val |= ACP_I2S_BT_16BIT_RESOLUTION_EN;
break;
+ case I2S_MICSP_INSTANCE:
+ val |= ACP_I2S_MICSP_16BIT_RESOLUTION_EN;
+ break;
case I2S_SP_INSTANCE:
default:
val |= ACP_I2S_SP_16BIT_RESOLUTION_EN;
@@ -876,6 +888,7 @@ static int acp_dma_hw_params(struct snd_soc_component *component,
case I2S_BT_INSTANCE:
val |= ACP_I2S_BT_16BIT_RESOLUTION_EN;
break;
+ case I2S_MICSP_INSTANCE:
case I2S_SP_INSTANCE:
default:
val |= ACP_I2S_MIC_16BIT_RESOLUTION_EN;
@@ -901,6 +914,27 @@ static int acp_dma_hw_params(struct snd_soc_component *component,
mmACP_I2S_BT_TRANSMIT_BYTE_CNT_LOW;
adata->play_i2sbt_stream = substream;
break;
+ case I2S_MICSP_INSTANCE:
+ switch (adata->asic_type) {
+ case CHIP_STONEY:
+ rtd->pte_offset = ACP_ST_PLAYBACK_PTE_OFFSET;
+ break;
+ default:
+ rtd->pte_offset = ACP_PLAYBACK_PTE_OFFSET;
+ }
+ rtd->ch1 = SYSRAM_TO_ACP_MICSP_INSTANCE_CH_NUM;
+ rtd->ch2 = ACP_TO_I2S_DMA_MICSP_INSTANCE_CH_NUM;
+ rtd->sram_bank = ACP_SRAM_BANK_1_ADDRESS;
+ rtd->destination = TO_ACP_I2S_2;
+ rtd->dma_dscr_idx_1 = PLAYBACK_START_DMA_DESCR_CH4;
+ rtd->dma_dscr_idx_2 = PLAYBACK_START_DMA_DESCR_CH5;
+ rtd->byte_cnt_high_reg_offset =
+ mmACP_I2S_MICSP_TRANSMIT_BYTE_CNT_HIGH;
+ rtd->byte_cnt_low_reg_offset =
+ mmACP_I2S_MICSP_TRANSMIT_BYTE_CNT_LOW;
+
+ adata->play_i2s_micsp_stream = substream;
+ break;
case I2S_SP_INSTANCE:
default:
switch (adata->asic_type) {
@@ -939,6 +973,7 @@ static int acp_dma_hw_params(struct snd_soc_component *component,
rtd->dma_curr_dscr = mmACP_DMA_CUR_DSCR_11;
adata->capture_i2sbt_stream = substream;
break;
+ case I2S_MICSP_INSTANCE:
case I2S_SP_INSTANCE:
default:
rtd->pte_offset = ACP_CAPTURE_PTE_OFFSET;
@@ -1160,6 +1195,9 @@ static int acp_dma_close(struct snd_soc_component *component,
case I2S_BT_INSTANCE:
adata->play_i2sbt_stream = NULL;
break;
+ case I2S_MICSP_INSTANCE:
+ adata->play_i2s_micsp_stream = NULL;
+ break;
case I2S_SP_INSTANCE:
default:
adata->play_i2ssp_stream = NULL;
@@ -1181,6 +1219,7 @@ static int acp_dma_close(struct snd_soc_component *component,
case I2S_BT_INSTANCE:
adata->capture_i2sbt_stream = NULL;
break;
+ case I2S_MICSP_INSTANCE:
case I2S_SP_INSTANCE:
default:
adata->capture_i2ssp_stream = NULL;
@@ -1197,7 +1236,8 @@ static int acp_dma_close(struct snd_soc_component *component,
* another stream is also not active.
*/
if (!adata->play_i2ssp_stream && !adata->capture_i2ssp_stream &&
- !adata->play_i2sbt_stream && !adata->capture_i2sbt_stream)
+ !adata->play_i2sbt_stream && !adata->capture_i2sbt_stream &&
+ !adata->play_i2s_micsp_stream)
acp_reg_write(0, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
kfree(rtd);
return 0;
@@ -1245,6 +1285,7 @@ static int acp_audio_probe(struct platform_device *pdev)
audio_drv_data->capture_i2ssp_stream = NULL;
audio_drv_data->play_i2sbt_stream = NULL;
audio_drv_data->capture_i2sbt_stream = NULL;
+ audio_drv_data->play_i2s_micsp_stream = NULL;
audio_drv_data->asic_type = *pdata;
@@ -1333,6 +1374,11 @@ static int acp_pcm_resume(struct device *dev)
config_acp_dma(adata->acp_mmio, rtd, adata->asic_type);
}
if (adata->asic_type != CHIP_CARRIZO) {
+ if (adata->play_i2s_micsp_stream &&
+ adata->play_i2s_micsp_stream->runtime) {
+ rtd = adata->play_i2s_micsp_stream->runtime->private_data;
+ config_acp_dma(adata->acp_mmio, rtd, adata->asic_type);
+ }
if (adata->play_i2sbt_stream &&
adata->play_i2sbt_stream->runtime) {
rtd = adata->play_i2sbt_stream->runtime->private_data;
diff --git a/sound/soc/amd/acp.h b/sound/soc/amd/acp.h
index db80a73aa593..b29bef90f886 100644
--- a/sound/soc/amd/acp.h
+++ b/sound/soc/amd/acp.h
@@ -55,6 +55,7 @@
#define I2S_SP_INSTANCE 0x01
#define I2S_BT_INSTANCE 0x02
+#define I2S_MICSP_INSTANCE 0x03
#define CAP_CHANNEL0 0x00
#define CAP_CHANNEL1 0x01
@@ -85,6 +86,10 @@
#define I2S_TO_ACP_DMA_BT_INSTANCE_CH_NUM 10
#define ACP_TO_SYSRAM_BT_INSTANCE_CH_NUM 11
+/* Playback DMA channels for I2S MICSP instance */
+#define SYSRAM_TO_ACP_MICSP_INSTANCE_CH_NUM 4
+#define ACP_TO_I2S_DMA_MICSP_INSTANCE_CH_NUM 5
+
#define NUM_DSCRS_PER_CHANNEL 2
#define PLAYBACK_START_DMA_DESCR_CH12 0
@@ -108,8 +113,15 @@
#define CAPTURE_START_DMA_DESCR_CH11 14
#define CAPTURE_END_DMA_DESCR_CH11 15
+/* I2S MICSP Instance DMA Descriptors */
+#define PLAYBACK_START_DMA_DESCR_CH4 0
+#define PLAYBACK_END_DMA_DESCR_CH4 1
+#define PLAYBACK_START_DMA_DESCR_CH5 2
+#define PLAYBACK_END_DMA_DESCR_CH5 3
+
#define mmACP_I2S_16BIT_RESOLUTION_EN 0x5209
#define ACP_I2S_MIC_16BIT_RESOLUTION_EN 0x01
+#define ACP_I2S_MICSP_16BIT_RESOLUTION_EN 0x01
#define ACP_I2S_SP_16BIT_RESOLUTION_EN 0x02
#define ACP_I2S_BT_16BIT_RESOLUTION_EN 0x04
#define ACP_BT_UART_PAD_SELECT_MASK 0x1
@@ -149,6 +161,7 @@ struct audio_drv_data {
struct snd_pcm_substream *capture_i2ssp_stream;
struct snd_pcm_substream *play_i2sbt_stream;
struct snd_pcm_substream *capture_i2sbt_stream;
+ struct snd_pcm_substream *play_i2s_micsp_stream;
void __iomem *acp_mmio;
u32 asic_type;
snd_pcm_sframes_t delay;
diff --git a/sound/soc/amd/acp/Kconfig b/sound/soc/amd/acp/Kconfig
index 9dae2719084c..ce0037810743 100644
--- a/sound/soc/amd/acp/Kconfig
+++ b/sound/soc/amd/acp/Kconfig
@@ -40,6 +40,17 @@ config SND_AMD_ASOC_RENOIR
help
This option enables Renoir I2S support on AMD platform.
+config SND_AMD_ASOC_REMBRANDT
+ tristate "AMD ACP ASOC Rembrandt Support"
+ select SND_SOC_AMD_ACP_PCM
+ select SND_SOC_AMD_ACP_I2S
+ select SND_SOC_AMD_ACP_PDM
+ depends on X86 && PCI
+ help
+ This option enables Rembrandt I2S support on AMD platform.
+ Say Y if you want to enable AUDIO on Rembrandt
+ If unsure select "N".
+
config SND_SOC_AMD_MACH_COMMON
tristate
depends on X86 && PCI && I2C
@@ -49,6 +60,7 @@ config SND_SOC_AMD_MACH_COMMON
select SND_SOC_RT1019
select SND_SOC_MAX98357A
select SND_SOC_RT5682S
+ select SND_SOC_NAU8825
help
This option enables common Machine driver module for ACP.
diff --git a/sound/soc/amd/acp/Makefile b/sound/soc/amd/acp/Makefile
index 657ddfadf0bb..d9abb0ee5218 100644
--- a/sound/soc/amd/acp/Makefile
+++ b/sound/soc/amd/acp/Makefile
@@ -12,6 +12,7 @@ snd-acp-pci-objs := acp-pci.o
#platform specific driver
snd-acp-renoir-objs := acp-renoir.o
+snd-acp-rembrandt-objs := acp-rembrandt.o
#machine specific driver
snd-acp-mach-objs := acp-mach-common.o
@@ -24,6 +25,7 @@ obj-$(CONFIG_SND_SOC_AMD_ACP_PDM) += snd-acp-pdm.o
obj-$(CONFIG_SND_SOC_AMD_ACP_PCI) += snd-acp-pci.o
obj-$(CONFIG_SND_AMD_ASOC_RENOIR) += snd-acp-renoir.o
+obj-$(CONFIG_SND_AMD_ASOC_REMBRANDT) += snd-acp-rembrandt.o
obj-$(CONFIG_SND_SOC_AMD_MACH_COMMON) += snd-acp-mach.o
obj-$(CONFIG_SND_SOC_AMD_LEGACY_MACH) += snd-acp-legacy-mach.o
diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
index ce9aca8dd6f5..393f729ef561 100644
--- a/sound/soc/amd/acp/acp-i2s.c
+++ b/sound/soc/amd/acp/acp-i2s.c
@@ -30,11 +30,14 @@ static int acp_i2s_hwparams(struct snd_pcm_substream *substream, struct snd_pcm_
{
struct device *dev = dai->component->dev;
struct acp_dev_data *adata;
+ struct acp_resource *rsrc;
u32 val;
u32 xfer_resolution;
u32 reg_val;
+ u32 lrclk_div_val, bclk_div_val;
adata = snd_soc_dai_get_drvdata(dai);
+ rsrc = adata->rsrc;
/* These values are as per Hardware Spec */
switch (params_format(params)) {
@@ -63,6 +66,9 @@ static int acp_i2s_hwparams(struct snd_pcm_substream *substream, struct snd_pcm_
case I2S_SP_INSTANCE:
reg_val = ACP_I2STDM_ITER;
break;
+ case I2S_HS_INSTANCE:
+ reg_val = ACP_HSTDM_ITER;
+ break;
default:
dev_err(dev, "Invalid dai id %x\n", dai->driver->id);
return -EINVAL;
@@ -75,6 +81,9 @@ static int acp_i2s_hwparams(struct snd_pcm_substream *substream, struct snd_pcm_
case I2S_SP_INSTANCE:
reg_val = ACP_I2STDM_IRER;
break;
+ case I2S_HS_INSTANCE:
+ reg_val = ACP_HSTDM_IRER;
+ break;
default:
dev_err(dev, "Invalid dai id %x\n", dai->driver->id);
return -EINVAL;
@@ -86,6 +95,74 @@ static int acp_i2s_hwparams(struct snd_pcm_substream *substream, struct snd_pcm_
val = val | (xfer_resolution << 3);
writel(val, adata->acp_base + reg_val);
+ if (rsrc->soc_mclk) {
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ switch (params_rate(params)) {
+ case 8000:
+ bclk_div_val = 768;
+ break;
+ case 16000:
+ bclk_div_val = 384;
+ break;
+ case 24000:
+ bclk_div_val = 256;
+ break;
+ case 32000:
+ bclk_div_val = 192;
+ break;
+ case 44100:
+ case 48000:
+ bclk_div_val = 128;
+ break;
+ case 88200:
+ case 96000:
+ bclk_div_val = 64;
+ break;
+ case 192000:
+ bclk_div_val = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+ lrclk_div_val = 32;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ switch (params_rate(params)) {
+ case 8000:
+ bclk_div_val = 384;
+ break;
+ case 16000:
+ bclk_div_val = 192;
+ break;
+ case 24000:
+ bclk_div_val = 128;
+ break;
+ case 32000:
+ bclk_div_val = 96;
+ break;
+ case 44100:
+ case 48000:
+ bclk_div_val = 64;
+ break;
+ case 88200:
+ case 96000:
+ bclk_div_val = 32;
+ break;
+ case 192000:
+ bclk_div_val = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ lrclk_div_val = 64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ adata->lrclk_div = lrclk_div_val;
+ adata->bclk_div = bclk_div_val;
+ }
return 0;
}
@@ -94,6 +171,7 @@ static int acp_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct
struct acp_stream *stream = substream->runtime->private_data;
struct device *dev = dai->component->dev;
struct acp_dev_data *adata = dev_get_drvdata(dev);
+ struct acp_resource *rsrc = adata->rsrc;
u32 val, period_bytes, reg_val, ier_val, water_val, buf_size, buf_reg;
period_bytes = frames_to_bytes(substream->runtime, substream->runtime->period_size);
@@ -118,6 +196,12 @@ static int acp_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct
ier_val = ACP_I2STDM_IER;
buf_reg = ACP_I2S_TX_RINGBUFSIZE;
break;
+ case I2S_HS_INSTANCE:
+ water_val = ACP_HS_TX_INTR_WATERMARK_SIZE;
+ reg_val = ACP_HSTDM_ITER;
+ ier_val = ACP_HSTDM_IER;
+ buf_reg = ACP_HS_TX_RINGBUFSIZE;
+ break;
default:
dev_err(dev, "Invalid dai id %x\n", dai->driver->id);
return -EINVAL;
@@ -136,6 +220,12 @@ static int acp_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct
ier_val = ACP_I2STDM_IER;
buf_reg = ACP_I2S_RX_RINGBUFSIZE;
break;
+ case I2S_HS_INSTANCE:
+ water_val = ACP_HS_RX_INTR_WATERMARK_SIZE;
+ reg_val = ACP_HSTDM_IRER;
+ ier_val = ACP_HSTDM_IER;
+ buf_reg = ACP_HS_RX_RINGBUFSIZE;
+ break;
default:
dev_err(dev, "Invalid dai id %x\n", dai->driver->id);
return -EINVAL;
@@ -147,6 +237,8 @@ static int acp_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct
val = val | BIT(0);
writel(val, adata->acp_base + reg_val);
writel(1, adata->acp_base + ier_val);
+ if (rsrc->soc_mclk)
+ acp_set_i2s_clk(adata, dai->driver->id);
return 0;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
@@ -159,6 +251,9 @@ static int acp_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct
case I2S_SP_INSTANCE:
reg_val = ACP_I2STDM_ITER;
break;
+ case I2S_HS_INSTANCE:
+ reg_val = ACP_HSTDM_ITER;
+ break;
default:
dev_err(dev, "Invalid dai id %x\n", dai->driver->id);
return -EINVAL;
@@ -172,6 +267,9 @@ static int acp_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct
case I2S_SP_INSTANCE:
reg_val = ACP_I2STDM_IRER;
break;
+ case I2S_HS_INSTANCE:
+ reg_val = ACP_HSTDM_IRER;
+ break;
default:
dev_err(dev, "Invalid dai id %x\n", dai->driver->id);
return -EINVAL;
@@ -187,6 +285,9 @@ static int acp_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct
if (!(readl(adata->acp_base + ACP_I2STDM_ITER) & BIT(0)) &&
!(readl(adata->acp_base + ACP_I2STDM_IRER) & BIT(0)))
writel(0, adata->acp_base + ACP_I2STDM_IER);
+ if (!(readl(adata->acp_base + ACP_HSTDM_ITER) & BIT(0)) &&
+ !(readl(adata->acp_base + ACP_HSTDM_IRER) & BIT(0)))
+ writel(0, adata->acp_base + ACP_HSTDM_IER);
return 0;
default:
return -EINVAL;
@@ -199,6 +300,7 @@ static int acp_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_d
{
struct device *dev = dai->component->dev;
struct acp_dev_data *adata = dev_get_drvdata(dev);
+ struct acp_resource *rsrc = adata->rsrc;
struct acp_stream *stream = substream->runtime->private_data;
u32 reg_dma_size = 0, reg_fifo_size = 0, reg_fifo_addr = 0;
u32 phy_addr = 0, acp_fifo_addr = 0, ext_int_ctrl;
@@ -208,7 +310,7 @@ static int acp_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_d
case I2S_SP_INSTANCE:
if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
reg_dma_size = ACP_I2S_TX_DMA_SIZE;
- acp_fifo_addr = ACP_SRAM_PTE_OFFSET +
+ acp_fifo_addr = rsrc->sram_pte_offset +
SP_PB_FIFO_ADDR_OFFSET;
reg_fifo_addr = ACP_I2S_TX_FIFOADDR;
reg_fifo_size = ACP_I2S_TX_FIFOSIZE;
@@ -217,7 +319,7 @@ static int acp_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_d
writel(phy_addr, adata->acp_base + ACP_I2S_TX_RINGBUFADDR);
} else {
reg_dma_size = ACP_I2S_RX_DMA_SIZE;
- acp_fifo_addr = ACP_SRAM_PTE_OFFSET +
+ acp_fifo_addr = rsrc->sram_pte_offset +
SP_CAPT_FIFO_ADDR_OFFSET;
reg_fifo_addr = ACP_I2S_RX_FIFOADDR;
reg_fifo_size = ACP_I2S_RX_FIFOSIZE;
@@ -228,7 +330,7 @@ static int acp_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_d
case I2S_BT_INSTANCE:
if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
reg_dma_size = ACP_BT_TX_DMA_SIZE;
- acp_fifo_addr = ACP_SRAM_PTE_OFFSET +
+ acp_fifo_addr = rsrc->sram_pte_offset +
BT_PB_FIFO_ADDR_OFFSET;
reg_fifo_addr = ACP_BT_TX_FIFOADDR;
reg_fifo_size = ACP_BT_TX_FIFOSIZE;
@@ -237,7 +339,7 @@ static int acp_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_d
writel(phy_addr, adata->acp_base + ACP_BT_TX_RINGBUFADDR);
} else {
reg_dma_size = ACP_BT_RX_DMA_SIZE;
- acp_fifo_addr = ACP_SRAM_PTE_OFFSET +
+ acp_fifo_addr = rsrc->sram_pte_offset +
BT_CAPT_FIFO_ADDR_OFFSET;
reg_fifo_addr = ACP_BT_RX_FIFOADDR;
reg_fifo_size = ACP_BT_RX_FIFOSIZE;
@@ -246,6 +348,27 @@ static int acp_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_d
writel(phy_addr, adata->acp_base + ACP_BT_RX_RINGBUFADDR);
}
break;
+ case I2S_HS_INSTANCE:
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
+ reg_dma_size = ACP_HS_TX_DMA_SIZE;
+ acp_fifo_addr = rsrc->sram_pte_offset +
+ HS_PB_FIFO_ADDR_OFFSET;
+ reg_fifo_addr = ACP_HS_TX_FIFOADDR;
+ reg_fifo_size = ACP_HS_TX_FIFOSIZE;
+
+ phy_addr = I2S_HS_TX_MEM_WINDOW_START + stream->reg_offset;
+ writel(phy_addr, adata->acp_base + ACP_HS_TX_RINGBUFADDR);
+ } else {
+ reg_dma_size = ACP_HS_RX_DMA_SIZE;
+ acp_fifo_addr = rsrc->sram_pte_offset +
+ HS_CAPT_FIFO_ADDR_OFFSET;
+ reg_fifo_addr = ACP_HS_RX_FIFOADDR;
+ reg_fifo_size = ACP_HS_RX_FIFOSIZE;
+
+ phy_addr = I2S_HS_RX_MEM_WINDOW_START + stream->reg_offset;
+ writel(phy_addr, adata->acp_base + ACP_HS_RX_RINGBUFADDR);
+ }
+ break;
default:
dev_err(dev, "Invalid dai id %x\n", dai->driver->id);
return -EINVAL;
@@ -255,11 +378,15 @@ static int acp_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_d
writel(acp_fifo_addr, adata->acp_base + reg_fifo_addr);
writel(FIFO_SIZE, adata->acp_base + reg_fifo_size);
- ext_int_ctrl = readl(adata->acp_base + ACP_EXTERNAL_INTR_CNTL);
- ext_int_ctrl |= BIT(I2S_RX_THRESHOLD) | BIT(BT_RX_THRESHOLD)
- | BIT(I2S_TX_THRESHOLD) | BIT(BT_TX_THRESHOLD);
+ ext_int_ctrl = readl(ACP_EXTERNAL_INTR_CNTL(adata, rsrc->irqp_used));
+ ext_int_ctrl |= BIT(I2S_RX_THRESHOLD(rsrc->offset)) |
+ BIT(BT_RX_THRESHOLD(rsrc->offset)) |
+ BIT(I2S_TX_THRESHOLD(rsrc->offset)) |
+ BIT(BT_TX_THRESHOLD(rsrc->offset)) |
+ BIT(HS_RX_THRESHOLD(rsrc->offset)) |
+ BIT(HS_TX_THRESHOLD(rsrc->offset));
- writel(ext_int_ctrl, adata->acp_base + ACP_EXTERNAL_INTR_CNTL);
+ writel(ext_int_ctrl, ACP_EXTERNAL_INTR_CNTL(adata, rsrc->irqp_used));
return 0;
}
@@ -268,32 +395,45 @@ static int acp_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_d
{
struct acp_stream *stream = substream->runtime->private_data;
struct device *dev = dai->component->dev;
+ struct acp_dev_data *adata = dev_get_drvdata(dev);
+ struct acp_resource *rsrc = adata->rsrc;
unsigned int dir = substream->stream;
unsigned int irq_bit = 0;
switch (dai->driver->id) {
case I2S_SP_INSTANCE:
if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
- irq_bit = BIT(I2S_TX_THRESHOLD);
+ irq_bit = BIT(I2S_TX_THRESHOLD(rsrc->offset));
stream->pte_offset = ACP_SRAM_SP_PB_PTE_OFFSET;
stream->fifo_offset = SP_PB_FIFO_ADDR_OFFSET;
} else {
- irq_bit = BIT(I2S_RX_THRESHOLD);
+ irq_bit = BIT(I2S_RX_THRESHOLD(rsrc->offset));
stream->pte_offset = ACP_SRAM_SP_CP_PTE_OFFSET;
stream->fifo_offset = SP_CAPT_FIFO_ADDR_OFFSET;
}
break;
case I2S_BT_INSTANCE:
if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
- irq_bit = BIT(BT_TX_THRESHOLD);
+ irq_bit = BIT(BT_TX_THRESHOLD(rsrc->offset));
stream->pte_offset = ACP_SRAM_BT_PB_PTE_OFFSET;
stream->fifo_offset = BT_PB_FIFO_ADDR_OFFSET;
} else {
- irq_bit = BIT(BT_RX_THRESHOLD);
+ irq_bit = BIT(BT_RX_THRESHOLD(rsrc->offset));
stream->pte_offset = ACP_SRAM_BT_CP_PTE_OFFSET;
stream->fifo_offset = BT_CAPT_FIFO_ADDR_OFFSET;
}
break;
+ case I2S_HS_INSTANCE:
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
+ irq_bit = BIT(HS_TX_THRESHOLD(rsrc->offset));
+ stream->pte_offset = ACP_SRAM_HS_PB_PTE_OFFSET;
+ stream->fifo_offset = HS_PB_FIFO_ADDR_OFFSET;
+ } else {
+ irq_bit = BIT(HS_RX_THRESHOLD(rsrc->offset));
+ stream->pte_offset = ACP_SRAM_HS_CP_PTE_OFFSET;
+ stream->fifo_offset = HS_CAPT_FIFO_ADDR_OFFSET;
+ }
+ break;
default:
dev_err(dev, "Invalid dai id %x\n", dai->driver->id);
return -EINVAL;
@@ -319,6 +459,7 @@ int asoc_acp_i2s_probe(struct snd_soc_dai *dai)
{
struct device *dev = dai->component->dev;
struct acp_dev_data *adata = dev_get_drvdata(dev);
+ struct acp_resource *rsrc = adata->rsrc;
unsigned int val;
if (!adata->acp_base) {
@@ -326,8 +467,8 @@ int asoc_acp_i2s_probe(struct snd_soc_dai *dai)
return -EINVAL;
}
- val = readl(adata->acp_base + ACP_I2S_PIN_CONFIG);
- if (val != I2S_MODE) {
+ val = readl(adata->acp_base + rsrc->i2s_pin_cfg_offset);
+ if (val != rsrc->i2s_mode) {
dev_err(dev, "I2S Mode not supported val %x\n", val);
return -EINVAL;
}
diff --git a/sound/soc/amd/acp/acp-legacy-mach.c b/sound/soc/amd/acp/acp-legacy-mach.c
index 7f04a048ca3a..1f4878ff7d37 100644
--- a/sound/soc/amd/acp/acp-legacy-mach.c
+++ b/sound/soc/amd/acp/acp-legacy-mach.c
@@ -47,6 +47,28 @@ static struct acp_card_drvdata rt5682s_rt1019_data = {
.dmic_codec_id = DMIC,
};
+static struct acp_card_drvdata max_nau8825_data = {
+ .hs_cpu_id = I2S_HS,
+ .amp_cpu_id = I2S_HS,
+ .dmic_cpu_id = DMIC,
+ .hs_codec_id = NAU8825,
+ .amp_codec_id = MAX98360A,
+ .dmic_codec_id = DMIC,
+ .soc_mclk = true,
+ .platform = REMBRANDT,
+};
+
+static struct acp_card_drvdata rt5682s_rt1019_rmb_data = {
+ .hs_cpu_id = I2S_HS,
+ .amp_cpu_id = I2S_HS,
+ .dmic_cpu_id = DMIC,
+ .hs_codec_id = RT5682S,
+ .amp_codec_id = RT1019,
+ .dmic_codec_id = DMIC,
+ .soc_mclk = true,
+ .platform = REMBRANDT,
+};
+
static const struct snd_kcontrol_new acp_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone Jack"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -112,6 +134,14 @@ static const struct platform_device_id board_ids[] = {
.name = "acp3xalc5682s1019",
.driver_data = (kernel_ulong_t)&rt5682s_rt1019_data,
},
+ {
+ .name = "rmb-nau8825-max",
+ .driver_data = (kernel_ulong_t)&max_nau8825_data,
+ },
+ {
+ .name = "rmb-rt5682s-rt1019",
+ .driver_data = (kernel_ulong_t)&rt5682s_rt1019_rmb_data,
+ },
{ }
};
static struct platform_driver acp_asoc_audio = {
@@ -130,4 +160,6 @@ MODULE_DESCRIPTION("ACP chrome audio support");
MODULE_ALIAS("platform:acp3xalc56821019");
MODULE_ALIAS("platform:acp3xalc5682sm98360");
MODULE_ALIAS("platform:acp3xalc5682s1019");
+MODULE_ALIAS("platform:rmb-nau8825-max");
+MODULE_ALIAS("platform:rmb-rt5682s-rt1019");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/amd/acp/acp-mach-common.c b/sound/soc/amd/acp/acp-mach-common.c
index 6ae454bf60af..f0c49127aad1 100644
--- a/sound/soc/amd/acp/acp-mach-common.c
+++ b/sound/soc/amd/acp/acp-mach-common.c
@@ -24,6 +24,7 @@
#include "../../codecs/rt5682.h"
#include "../../codecs/rt1019.h"
#include "../../codecs/rt5682s.h"
+#include "../../codecs/nau8825.h"
#include "acp-mach.h"
#define PCO_PLAT_CLK 48000000
@@ -148,9 +149,14 @@ static int acp_card_hs_startup(struct snd_pcm_substream *substream)
struct acp_card_drvdata *drvdata = card->drvdata;
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
+ unsigned int fmt;
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBP_CFP);
+ if (drvdata->soc_mclk)
+ fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC;
+ else
+ fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBP_CFP;
+
+ ret = snd_soc_dai_set_fmt(codec_dai, fmt);
if (ret < 0) {
dev_err(rtd->card->dev, "Failed to set dai fmt: %d\n", ret);
return ret;
@@ -161,10 +167,13 @@ static int acp_card_hs_startup(struct snd_pcm_substream *substream)
&constraints_channels);
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&constraints_rates);
-
- ret = acp_clk_enable(drvdata);
- if (ret < 0)
- dev_err(rtd->card->dev, "Failed to enable HS clk: %d\n", ret);
+ if (!drvdata->soc_mclk) {
+ ret = acp_clk_enable(drvdata);
+ if (ret < 0) {
+ dev_err(rtd->card->dev, "Failed to enable HS clk: %d\n", ret);
+ return ret;
+ }
+ }
return ret;
}
@@ -175,7 +184,8 @@ static void acp_card_shutdown(struct snd_pcm_substream *substream)
struct snd_soc_card *card = rtd->card;
struct acp_card_drvdata *drvdata = card->drvdata;
- clk_disable_unprepare(drvdata->wclk);
+ if (!drvdata->soc_mclk)
+ clk_disable_unprepare(drvdata->wclk);
}
static const struct snd_soc_ops acp_card_rt5682_ops = {
@@ -199,6 +209,7 @@ static int acp_card_rt5682s_init(struct snd_soc_pcm_runtime *rtd)
struct acp_card_drvdata *drvdata = card->drvdata;
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_component *component = codec_dai->component;
+ unsigned int fmt;
int ret;
dev_info(rtd->dev, "codec dai name = %s\n", codec_dai->name);
@@ -206,8 +217,12 @@ static int acp_card_rt5682s_init(struct snd_soc_pcm_runtime *rtd)
if (drvdata->hs_codec_id != RT5682S)
return -EINVAL;
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBP_CFP);
+ if (drvdata->soc_mclk)
+ fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC;
+ else
+ fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBP_CFP;
+
+ ret = snd_soc_dai_set_fmt(codec_dai, fmt);
if (ret < 0) {
dev_err(rtd->card->dev, "Failed to set dai fmt: %d\n", ret);
return ret;
@@ -234,8 +249,10 @@ static int acp_card_rt5682s_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
- drvdata->wclk = clk_get(component->dev, "rt5682-dai-wclk");
- drvdata->bclk = clk_get(component->dev, "rt5682-dai-bclk");
+ if (!drvdata->soc_mclk) {
+ drvdata->wclk = clk_get(component->dev, "rt5682-dai-wclk");
+ drvdata->bclk = clk_get(component->dev, "rt5682-dai-bclk");
+ }
ret = snd_soc_card_jack_new(card, "Headset Jack",
SND_JACK_HEADSET | SND_JACK_LINEOUT |
@@ -363,7 +380,7 @@ static int acp_card_amp_startup(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_card *card = rtd->card;
struct acp_card_drvdata *drvdata = card->drvdata;
- int ret;
+ int ret = 0;
runtime->hw.channels_max = DUAL_CHANNEL;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
@@ -371,10 +388,13 @@ static int acp_card_amp_startup(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&constraints_rates);
- ret = acp_clk_enable(drvdata);
- if (ret < 0)
- dev_err(rtd->card->dev, "Failed to enable AMP clk: %d\n", ret);
-
+ if (!drvdata->soc_mclk) {
+ ret = acp_clk_enable(drvdata);
+ if (ret < 0) {
+ dev_err(rtd->card->dev, "Failed to enable AMP clk: %d\n", ret);
+ return ret;
+ }
+ }
return ret;
}
@@ -409,6 +429,104 @@ static const struct snd_soc_ops acp_card_maxim_ops = {
.shutdown = acp_card_shutdown,
};
+/* Declare nau8825 codec components */
+SND_SOC_DAILINK_DEF(nau8825,
+ DAILINK_COMP_ARRAY(COMP_CODEC("i2c-10508825:00", "nau8825-hifi")));
+
+static const struct snd_soc_dapm_route nau8825_map[] = {
+ { "Headphone Jack", NULL, "HPOL" },
+ { "Headphone Jack", NULL, "HPOR" },
+};
+
+static int acp_card_nau8825_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ struct acp_card_drvdata *drvdata = card->drvdata;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
+ unsigned int fmt;
+ int ret;
+
+ dev_info(rtd->dev, "codec dai name = %s\n", codec_dai->name);
+
+ if (drvdata->hs_codec_id != NAU8825)
+ return -EINVAL;
+
+ if (drvdata->soc_mclk)
+ fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC;
+ else
+ fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBP_CFP;
+
+ ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+ if (ret < 0) {
+ dev_err(rtd->card->dev, "Failed to set dai fmt: %d\n", ret);
+ return ret;
+ }
+ ret = snd_soc_card_jack_new(card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_LINEOUT |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &pco_jack);
+ if (ret) {
+ dev_err(card->dev, "HP jack creation failed %d\n", ret);
+ return ret;
+ }
+
+ snd_jack_set_key(pco_jack.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(pco_jack.jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(pco_jack.jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(pco_jack.jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+
+ ret = snd_soc_component_set_jack(component, &pco_jack, NULL);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack call-back failed: %d\n", ret);
+ return ret;
+ }
+
+ return snd_soc_dapm_add_routes(&rtd->card->dapm, nau8825_map, ARRAY_SIZE(nau8825_map));
+}
+
+static int acp_nau8825_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_FLL_FS,
+ (48000 * 256), SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(rtd->dev, "snd_soc_dai_set_sysclk err = %d\n", ret);
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0, 0, params_rate(params),
+ params_rate(params) * 256);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set FLL: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int acp_nau8825_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_max = 2;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+ return 0;
+}
+
+static const struct snd_soc_ops acp_card_nau8825_ops = {
+ .startup = acp_nau8825_startup,
+ .hw_params = acp_nau8825_hw_params,
+};
+
/* Declare DMIC codec components */
SND_SOC_DAILINK_DEF(dmic_codec,
DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec", "dmic-hifi")));
@@ -427,6 +545,12 @@ static struct snd_soc_dai_link_component platform_component[] = {
}
};
+static struct snd_soc_dai_link_component platform_rmb_component[] = {
+ {
+ .name = "acp_asoc_rembrandt.0",
+ }
+};
+
static struct snd_soc_dai_link_component sof_component[] = {
{
.name = "0000:04:00.5",
@@ -435,8 +559,12 @@ static struct snd_soc_dai_link_component sof_component[] = {
SND_SOC_DAILINK_DEF(i2s_sp,
DAILINK_COMP_ARRAY(COMP_CPU("acp-i2s-sp")));
+SND_SOC_DAILINK_DEF(i2s_hs,
+ DAILINK_COMP_ARRAY(COMP_CPU("acp-i2s-hs")));
SND_SOC_DAILINK_DEF(sof_sp,
DAILINK_COMP_ARRAY(COMP_CPU("acp-sof-sp")));
+SND_SOC_DAILINK_DEF(sof_hs,
+ DAILINK_COMP_ARRAY(COMP_CPU("acp-sof-hs")));
SND_SOC_DAILINK_DEF(sof_dmic,
DAILINK_COMP_ARRAY(COMP_CPU("acp-sof-dmic")));
SND_SOC_DAILINK_DEF(pdm_dmic,
@@ -491,6 +619,37 @@ int acp_sofdsp_dai_links_create(struct snd_soc_card *card)
i++;
}
+ if (drv_data->hs_cpu_id == I2S_HS) {
+ links[i].name = "acp-headset-codec";
+ links[i].id = HEADSET_BE_ID;
+ links[i].cpus = sof_hs;
+ links[i].num_cpus = ARRAY_SIZE(sof_hs);
+ links[i].platforms = sof_component;
+ links[i].num_platforms = ARRAY_SIZE(sof_component);
+ links[i].dpcm_playback = 1;
+ links[i].dpcm_capture = 1;
+ links[i].nonatomic = true;
+ links[i].no_pcm = 1;
+ if (!drv_data->hs_codec_id) {
+ /* Use dummy codec if codec id not specified */
+ links[i].codecs = dummy_codec;
+ links[i].num_codecs = ARRAY_SIZE(dummy_codec);
+ }
+ if (drv_data->hs_codec_id == NAU8825) {
+ links[i].codecs = nau8825;
+ links[i].num_codecs = ARRAY_SIZE(nau8825);
+ links[i].init = acp_card_nau8825_init;
+ links[i].ops = &acp_card_nau8825_ops;
+ }
+ if (drv_data->hs_codec_id == RT5682S) {
+ links[i].codecs = rt5682s;
+ links[i].num_codecs = ARRAY_SIZE(rt5682s);
+ links[i].init = acp_card_rt5682s_init;
+ links[i].ops = &acp_card_rt5682s_ops;
+ }
+ i++;
+ }
+
if (drv_data->amp_cpu_id == I2S_SP) {
links[i].name = "acp-amp-codec";
links[i].id = AMP_BE_ID;
@@ -523,6 +682,38 @@ int acp_sofdsp_dai_links_create(struct snd_soc_card *card)
i++;
}
+ if (drv_data->amp_cpu_id == I2S_HS) {
+ links[i].name = "acp-amp-codec";
+ links[i].id = AMP_BE_ID;
+ links[i].cpus = sof_hs;
+ links[i].num_cpus = ARRAY_SIZE(sof_hs);
+ links[i].platforms = sof_component;
+ links[i].num_platforms = ARRAY_SIZE(sof_component);
+ links[i].dpcm_playback = 1;
+ links[i].nonatomic = true;
+ links[i].no_pcm = 1;
+ if (!drv_data->amp_codec_id) {
+ /* Use dummy codec if codec id not specified */
+ links[i].codecs = dummy_codec;
+ links[i].num_codecs = ARRAY_SIZE(dummy_codec);
+ }
+ if (drv_data->amp_codec_id == MAX98360A) {
+ links[i].codecs = max98360a;
+ links[i].num_codecs = ARRAY_SIZE(max98360a);
+ links[i].ops = &acp_card_maxim_ops;
+ links[i].init = acp_card_maxim_init;
+ }
+ if (drv_data->amp_codec_id == RT1019) {
+ links[i].codecs = rt1019;
+ links[i].num_codecs = ARRAY_SIZE(rt1019);
+ links[i].ops = &acp_card_rt1019_ops;
+ links[i].init = acp_card_rt1019_init;
+ card->codec_conf = rt1019_conf;
+ card->num_configs = ARRAY_SIZE(rt1019_conf);
+ }
+ i++;
+ }
+
if (drv_data->dmic_cpu_id == DMIC) {
links[i].name = "acp-dmic-codec";
links[i].id = DMIC_BE_ID;
@@ -591,6 +782,40 @@ int acp_legacy_dai_links_create(struct snd_soc_card *card)
i++;
}
+ if (drv_data->hs_cpu_id == I2S_HS) {
+ links[i].name = "acp-headset-codec";
+ links[i].id = HEADSET_BE_ID;
+ links[i].cpus = i2s_hs;
+ links[i].num_cpus = ARRAY_SIZE(i2s_hs);
+ if (drv_data->platform == REMBRANDT) {
+ links[i].platforms = platform_rmb_component;
+ links[i].num_platforms = ARRAY_SIZE(platform_rmb_component);
+ } else {
+ links[i].platforms = platform_component;
+ links[i].num_platforms = ARRAY_SIZE(platform_component);
+ }
+ links[i].dpcm_playback = 1;
+ links[i].dpcm_capture = 1;
+ if (!drv_data->hs_codec_id) {
+ /* Use dummy codec if codec id not specified */
+ links[i].codecs = dummy_codec;
+ links[i].num_codecs = ARRAY_SIZE(dummy_codec);
+ }
+ if (drv_data->hs_codec_id == NAU8825) {
+ links[i].codecs = nau8825;
+ links[i].num_codecs = ARRAY_SIZE(nau8825);
+ links[i].init = acp_card_nau8825_init;
+ links[i].ops = &acp_card_nau8825_ops;
+ }
+ if (drv_data->hs_codec_id == RT5682S) {
+ links[i].codecs = rt5682s;
+ links[i].num_codecs = ARRAY_SIZE(rt5682s);
+ links[i].init = acp_card_rt5682s_init;
+ links[i].ops = &acp_card_rt5682s_ops;
+ }
+ i++;
+ }
+
if (drv_data->amp_cpu_id == I2S_SP) {
links[i].name = "acp-amp-codec";
links[i].id = AMP_BE_ID;
@@ -621,6 +846,41 @@ int acp_legacy_dai_links_create(struct snd_soc_card *card)
i++;
}
+ if (drv_data->amp_cpu_id == I2S_HS) {
+ links[i].name = "acp-amp-codec";
+ links[i].id = AMP_BE_ID;
+ links[i].cpus = i2s_hs;
+ links[i].num_cpus = ARRAY_SIZE(i2s_hs);
+ if (drv_data->platform == REMBRANDT) {
+ links[i].platforms = platform_rmb_component;
+ links[i].num_platforms = ARRAY_SIZE(platform_rmb_component);
+ } else {
+ links[i].platforms = platform_component;
+ links[i].num_platforms = ARRAY_SIZE(platform_component);
+ }
+ links[i].dpcm_playback = 1;
+ if (!drv_data->amp_codec_id) {
+ /* Use dummy codec if codec id not specified */
+ links[i].codecs = dummy_codec;
+ links[i].num_codecs = ARRAY_SIZE(dummy_codec);
+ }
+ if (drv_data->amp_codec_id == MAX98360A) {
+ links[i].codecs = max98360a;
+ links[i].num_codecs = ARRAY_SIZE(max98360a);
+ links[i].ops = &acp_card_maxim_ops;
+ links[i].init = acp_card_maxim_init;
+ }
+ if (drv_data->amp_codec_id == RT1019) {
+ links[i].codecs = rt1019;
+ links[i].num_codecs = ARRAY_SIZE(rt1019);
+ links[i].ops = &acp_card_rt1019_ops;
+ links[i].init = acp_card_rt1019_init;
+ card->codec_conf = rt1019_conf;
+ card->num_configs = ARRAY_SIZE(rt1019_conf);
+ }
+ i++;
+ }
+
if (drv_data->dmic_cpu_id == DMIC) {
links[i].name = "acp-dmic-codec";
links[i].id = DMIC_BE_ID;
@@ -634,8 +894,13 @@ int acp_legacy_dai_links_create(struct snd_soc_card *card)
}
links[i].cpus = pdm_dmic;
links[i].num_cpus = ARRAY_SIZE(pdm_dmic);
- links[i].platforms = platform_component;
- links[i].num_platforms = ARRAY_SIZE(platform_component);
+ if (drv_data->platform == REMBRANDT) {
+ links[i].platforms = platform_rmb_component;
+ links[i].num_platforms = ARRAY_SIZE(platform_rmb_component);
+ } else {
+ links[i].platforms = platform_component;
+ links[i].num_platforms = ARRAY_SIZE(platform_component);
+ }
links[i].ops = &acp_card_dmic_ops;
links[i].dpcm_capture = 1;
}
diff --git a/sound/soc/amd/acp/acp-mach.h b/sound/soc/amd/acp/acp-mach.h
index 5dc47cfbff10..20583ef902df 100644
--- a/sound/soc/amd/acp/acp-mach.h
+++ b/sound/soc/amd/acp/acp-mach.h
@@ -26,6 +26,7 @@ enum be_id {
enum cpu_endpoints {
NONE = 0,
+ I2S_HS,
I2S_SP,
I2S_BT,
DMIC,
@@ -37,6 +38,12 @@ enum codec_endpoints {
RT1019,
MAX98360A,
RT5682S,
+ NAU8825,
+};
+
+enum platform_end_point {
+ RENOIR = 0,
+ REMBRANDT,
};
struct acp_card_drvdata {
@@ -47,8 +54,10 @@ struct acp_card_drvdata {
unsigned int amp_codec_id;
unsigned int dmic_codec_id;
unsigned int dai_fmt;
+ unsigned int platform;
struct clk *wclk;
struct clk *bclk;
+ bool soc_mclk;
};
int acp_sofdsp_dai_links_create(struct snd_soc_card *card);
diff --git a/sound/soc/amd/acp/acp-pci.c b/sound/soc/amd/acp/acp-pci.c
index c893963ee2d0..2c8e960cc9a6 100644
--- a/sound/soc/amd/acp/acp-pci.c
+++ b/sound/soc/amd/acp/acp-pci.c
@@ -29,7 +29,7 @@
static struct platform_device *dmic_dev;
static struct platform_device *pdev;
-static const struct resource acp3x_res[] = {
+static const struct resource acp_res[] = {
{
.start = 0,
.end = ACP3x_REG_END - ACP3x_REG_START,
@@ -70,36 +70,49 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
ret = pci_request_regions(pci, "AMD ACP3x audio");
if (ret < 0) {
dev_err(&pci->dev, "pci_request_regions failed\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto disable_pci;
}
pci_set_master(pci);
+ res_acp = acp_res;
+ num_res = ARRAY_SIZE(acp_res);
+
switch (pci->revision) {
case 0x01:
- res_acp = acp3x_res;
- num_res = ARRAY_SIZE(acp3x_res);
chip->name = "acp_asoc_renoir";
chip->acp_rev = ACP3X_DEV;
break;
+ case 0x6f:
+ chip->name = "acp_asoc_rembrandt";
+ chip->acp_rev = ACP6X_DEV;
+ break;
default:
dev_err(dev, "Unsupported device revision:0x%x\n", pci->revision);
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_regions;
}
dmic_dev = platform_device_register_data(dev, "dmic-codec", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(dmic_dev)) {
dev_err(dev, "failed to create DMIC device\n");
- return PTR_ERR(dmic_dev);
+ ret = PTR_ERR(dmic_dev);
+ goto release_regions;
}
addr = pci_resource_start(pci, 0);
chip->base = devm_ioremap(&pci->dev, addr, pci_resource_len(pci, 0));
+ if (!chip->base) {
+ ret = -ENOMEM;
+ goto release_regions;
+ }
res = devm_kzalloc(&pci->dev, sizeof(struct resource) * num_res, GFP_KERNEL);
if (!res) {
platform_device_unregister(dmic_dev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto release_regions;
}
for (i = 0; i < num_res; i++, res_acp++) {
@@ -128,9 +141,17 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
dev_err(&pci->dev, "cannot register %s device\n", pdevinfo.name);
platform_device_unregister(dmic_dev);
ret = PTR_ERR(pdev);
+ goto release_regions;
}
return ret;
+
+release_regions:
+ pci_release_regions(pci);
+disable_pci:
+ pci_disable_device(pci);
+
+ return ret;
};
static void acp_pci_remove(struct pci_dev *pci)
diff --git a/sound/soc/amd/acp/acp-pdm.c b/sound/soc/amd/acp/acp-pdm.c
index 424c6e0bb9d6..66ec6b6a5972 100644
--- a/sound/soc/amd/acp/acp-pdm.c
+++ b/sound/soc/amd/acp/acp-pdm.c
@@ -160,9 +160,9 @@ static int acp_dmic_dai_startup(struct snd_pcm_substream *substream,
stream->reg_offset = ACP_REGION2_OFFSET;
/* Enable DMIC Interrupts */
- ext_int_ctrl = readl(adata->acp_base + ACP_EXTERNAL_INTR_CNTL);
+ ext_int_ctrl = readl(ACP_EXTERNAL_INTR_CNTL(adata, 0));
ext_int_ctrl |= PDM_DMA_INTR_MASK;
- writel(ext_int_ctrl, adata->acp_base + ACP_EXTERNAL_INTR_CNTL);
+ writel(ext_int_ctrl, ACP_EXTERNAL_INTR_CNTL(adata, 0));
return 0;
}
@@ -174,10 +174,10 @@ static void acp_dmic_dai_shutdown(struct snd_pcm_substream *substream,
struct acp_dev_data *adata = dev_get_drvdata(dev);
u32 ext_int_ctrl;
- /* Disable DMIC interrrupts */
- ext_int_ctrl = readl(adata->acp_base + ACP_EXTERNAL_INTR_CNTL);
+ /* Disable DMIC interrupts */
+ ext_int_ctrl = readl(ACP_EXTERNAL_INTR_CNTL(adata, 0));
ext_int_ctrl |= ~PDM_DMA_INTR_MASK;
- writel(ext_int_ctrl, adata->acp_base + ACP_EXTERNAL_INTR_CNTL);
+ writel(ext_int_ctrl, ACP_EXTERNAL_INTR_CNTL(adata, 0));
}
const struct snd_soc_dai_ops acp_dmic_dai_ops = {
diff --git a/sound/soc/amd/acp/acp-platform.c b/sound/soc/amd/acp/acp-platform.c
index 65a809e2c29f..f561d39b33e2 100644
--- a/sound/soc/amd/acp/acp-platform.c
+++ b/sound/soc/amd/acp/acp-platform.c
@@ -91,25 +91,38 @@ EXPORT_SYMBOL_NS_GPL(acp_machine_select, SND_SOC_ACP_COMMON);
static irqreturn_t i2s_irq_handler(int irq, void *data)
{
struct acp_dev_data *adata = data;
+ struct acp_resource *rsrc = adata->rsrc;
struct acp_stream *stream;
u16 i2s_flag = 0;
- u32 val, i;
+ u32 ext_intr_stat, ext_intr_stat1, i;
if (!adata)
return IRQ_NONE;
- val = readl(adata->acp_base + ACP_EXTERNAL_INTR_STAT);
+ if (adata->rsrc->no_of_ctrls == 2)
+ ext_intr_stat1 = readl(ACP_EXTERNAL_INTR_STAT(adata, (rsrc->irqp_used - 1)));
+
+ ext_intr_stat = readl(ACP_EXTERNAL_INTR_STAT(adata, rsrc->irqp_used));
for (i = 0; i < ACP_MAX_STREAM; i++) {
stream = adata->stream[i];
- if (stream && (val & stream->irq_bit)) {
- writel(stream->irq_bit, adata->acp_base + ACP_EXTERNAL_INTR_STAT);
+ if (stream && (ext_intr_stat & stream->irq_bit)) {
+ writel(stream->irq_bit,
+ ACP_EXTERNAL_INTR_STAT(adata, rsrc->irqp_used));
snd_pcm_period_elapsed(stream->substream);
i2s_flag = 1;
break;
}
+ if (adata->rsrc->no_of_ctrls == 2) {
+ if (stream && (ext_intr_stat1 & stream->irq_bit)) {
+ writel(stream->irq_bit, ACP_EXTERNAL_INTR_STAT(adata,
+ (rsrc->irqp_used - 1)));
+ snd_pcm_period_elapsed(stream->substream);
+ i2s_flag = 1;
+ break;
+ }
+ }
}
-
if (i2s_flag)
return IRQ_HANDLED;
@@ -118,6 +131,7 @@ static irqreturn_t i2s_irq_handler(int irq, void *data)
static void config_pte_for_stream(struct acp_dev_data *adata, struct acp_stream *stream)
{
+ struct acp_resource *rsrc = adata->rsrc;
u32 pte_reg, pte_size, reg_val;
/* Use ATU base Group5 */
@@ -126,15 +140,17 @@ static void config_pte_for_stream(struct acp_dev_data *adata, struct acp_stream
stream->reg_offset = 0x02000000;
/* Group Enable */
- reg_val = ACP_SRAM_PTE_OFFSET;
+ reg_val = rsrc->sram_pte_offset;
writel(reg_val | BIT(31), adata->acp_base + pte_reg);
writel(PAGE_SIZE_4K_ENABLE, adata->acp_base + pte_size);
+ writel(0x01, adata->acp_base + ACPAXI2AXI_ATU_CTRL);
}
static void config_acp_dma(struct acp_dev_data *adata, int cpu_id, int size)
{
struct acp_stream *stream = adata->stream[cpu_id];
struct snd_pcm_substream *substream = stream->substream;
+ struct acp_resource *rsrc = adata->rsrc;
dma_addr_t addr = substream->dma_buffer.addr;
int num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
u32 low, high, val;
@@ -146,9 +162,9 @@ static void config_acp_dma(struct acp_dev_data *adata, int cpu_id, int size)
/* Load the low address of page int ACP SRAM through SRBM */
low = lower_32_bits(addr);
high = upper_32_bits(addr);
- writel(low, adata->acp_base + ACP_SCRATCH_REG_0 + val);
+ writel(low, adata->acp_base + rsrc->scratch_reg_offset + val);
high |= BIT(31);
- writel(high, adata->acp_base + ACP_SCRATCH_REG_0 + val + 4);
+ writel(high, adata->acp_base + rsrc->scratch_reg_offset + val + 4);
/* Move to next physically contiguous page */
val += 8;
@@ -187,7 +203,7 @@ static int acp_dma_open(struct snd_soc_component *component, struct snd_pcm_subs
}
runtime->private_data = stream;
- writel(1, adata->acp_base + ACP_EXTERNAL_INTR_ENB);
+ writel(1, ACP_EXTERNAL_INTR_ENB(adata));
return ret;
}
@@ -242,13 +258,6 @@ static int acp_dma_new(struct snd_soc_component *component,
return 0;
}
-static int acp_dma_mmap(struct snd_soc_component *component,
- struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
-{
- return snd_pcm_lib_default_mmap(substream, vma);
-}
-
static int acp_dma_close(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
@@ -267,13 +276,13 @@ static int acp_dma_close(struct snd_soc_component *component,
}
static const struct snd_soc_component_driver acp_pcm_component = {
- .name = DRV_NAME,
- .open = acp_dma_open,
- .close = acp_dma_close,
- .hw_params = acp_dma_hw_params,
- .pointer = acp_dma_pointer,
- .mmap = acp_dma_mmap,
- .pcm_construct = acp_dma_new,
+ .name = DRV_NAME,
+ .open = acp_dma_open,
+ .close = acp_dma_close,
+ .hw_params = acp_dma_hw_params,
+ .pointer = acp_dma_pointer,
+ .pcm_construct = acp_dma_new,
+ .legacy_dai_naming = 1,
};
int acp_platform_register(struct device *dev)
diff --git a/sound/soc/amd/acp/acp-rembrandt.c b/sound/soc/amd/acp/acp-rembrandt.c
new file mode 100644
index 000000000000..2b57c0ca4e99
--- /dev/null
+++ b/sound/soc/amd/acp/acp-rembrandt.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Advanced Micro Devices, Inc.
+//
+// Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+// V sujith kumar Reddy <Vsujithkumar.Reddy@amd.com>
+/*
+ * Hardware interface for Renoir ACP block
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <linux/dma-mapping.h>
+
+#include "amd.h"
+
+#define DRV_NAME "acp_asoc_rembrandt"
+
+#define ACP6X_PGFSM_CONTROL 0x1024
+#define ACP6X_PGFSM_STATUS 0x1028
+
+#define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK 0x00010001
+
+#define ACP_PGFSM_CNTL_POWER_ON_MASK 0x01
+#define ACP_PGFSM_CNTL_POWER_OFF_MASK 0x00
+#define ACP_PGFSM_STATUS_MASK 0x03
+#define ACP_POWERED_ON 0x00
+#define ACP_POWER_ON_IN_PROGRESS 0x01
+#define ACP_POWERED_OFF 0x02
+#define ACP_POWER_OFF_IN_PROGRESS 0x03
+
+#define ACP_ERROR_MASK 0x20000000
+#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
+
+
+static int rmb_acp_init(void __iomem *base);
+static int rmb_acp_deinit(void __iomem *base);
+
+static struct acp_resource rsrc = {
+ .offset = 0,
+ .no_of_ctrls = 2,
+ .irqp_used = 1,
+ .soc_mclk = true,
+ .irq_reg_offset = 0x1a00,
+ .i2s_pin_cfg_offset = 0x1440,
+ .i2s_mode = 0x0a,
+ .scratch_reg_offset = 0x12800,
+ .sram_pte_offset = 0x03802800,
+};
+
+static struct snd_soc_acpi_codecs amp_rt1019 = {
+ .num_codecs = 1,
+ .codecs = {"10EC1019"}
+};
+
+static struct snd_soc_acpi_codecs amp_max = {
+ .num_codecs = 1,
+ .codecs = {"MX98360A"}
+};
+
+static struct snd_soc_acpi_mach snd_soc_acpi_amd_rmb_acp_machines[] = {
+ {
+ .id = "10508825",
+ .drv_name = "rmb-nau8825-max",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &amp_max,
+ },
+ {
+ .id = "AMDI0007",
+ .drv_name = "rembrandt-acp",
+ },
+ {
+ .id = "RTL5682",
+ .drv_name = "rmb-rt5682s-rt1019",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &amp_rt1019,
+ },
+ {},
+};
+
+static struct snd_soc_dai_driver acp_rmb_dai[] = {
+{
+ .name = "acp-i2s-sp",
+ .id = I2S_SP_INSTANCE,
+ .playback = {
+ .stream_name = "I2S SP Playback",
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .stream_name = "I2S SP Capture",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &asoc_acp_cpu_dai_ops,
+ .probe = &asoc_acp_i2s_probe,
+},
+{
+ .name = "acp-i2s-bt",
+ .id = I2S_BT_INSTANCE,
+ .playback = {
+ .stream_name = "I2S BT Playback",
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .stream_name = "I2S BT Capture",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &asoc_acp_cpu_dai_ops,
+ .probe = &asoc_acp_i2s_probe,
+},
+{
+ .name = "acp-i2s-hs",
+ .id = I2S_HS_INSTANCE,
+ .playback = {
+ .stream_name = "I2S HS Playback",
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .stream_name = "I2S HS Capture",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &asoc_acp_cpu_dai_ops,
+ .probe = &asoc_acp_i2s_probe,
+},
+{
+ .name = "acp-pdm-dmic",
+ .id = DMIC_INSTANCE,
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &acp_dmic_dai_ops,
+},
+};
+
+static int acp6x_power_on(void __iomem *base)
+{
+ u32 val;
+ int timeout;
+
+ val = readl(base + ACP6X_PGFSM_STATUS);
+
+ if (val == ACP_POWERED_ON)
+ return 0;
+
+ if ((val & ACP_PGFSM_STATUS_MASK) !=
+ ACP_POWER_ON_IN_PROGRESS)
+ writel(ACP_PGFSM_CNTL_POWER_ON_MASK,
+ base + ACP6X_PGFSM_CONTROL);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = readl(base + ACP6X_PGFSM_STATUS);
+ if (!val)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int acp6x_power_off(void __iomem *base)
+{
+ u32 val;
+ int timeout;
+
+ writel(ACP_PGFSM_CNTL_POWER_OFF_MASK,
+ base + ACP6X_PGFSM_CONTROL);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = readl(base + ACP6X_PGFSM_STATUS);
+ if ((val & ACP_PGFSM_STATUS_MASK) == ACP_POWERED_OFF)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int acp6x_reset(void __iomem *base)
+{
+ u32 val;
+ int timeout;
+
+ writel(1, base + ACP_SOFT_RESET);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = readl(base + ACP_SOFT_RESET);
+ if (val & ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK)
+ break;
+ cpu_relax();
+ }
+ writel(0, base + ACP_SOFT_RESET);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = readl(base + ACP_SOFT_RESET);
+ if (!val)
+ return 0;
+ cpu_relax();
+ }
+ return -ETIMEDOUT;
+}
+
+static void acp6x_enable_interrupts(struct acp_dev_data *adata)
+{
+ struct acp_resource *rsrc = adata->rsrc;
+ u32 ext_intr_ctrl;
+
+ writel(0x01, ACP_EXTERNAL_INTR_ENB(adata));
+ ext_intr_ctrl = readl(ACP_EXTERNAL_INTR_CNTL(adata, rsrc->irqp_used));
+ ext_intr_ctrl |= ACP_ERROR_MASK;
+ writel(ext_intr_ctrl, ACP_EXTERNAL_INTR_CNTL(adata, rsrc->irqp_used));
+}
+
+static void acp6x_disable_interrupts(struct acp_dev_data *adata)
+{
+ struct acp_resource *rsrc = adata->rsrc;
+
+ writel(ACP_EXT_INTR_STAT_CLEAR_MASK,
+ ACP_EXTERNAL_INTR_STAT(adata, rsrc->irqp_used));
+ writel(0x00, ACP_EXTERNAL_INTR_ENB(adata));
+}
+
+static int rmb_acp_init(void __iomem *base)
+{
+ int ret;
+
+ /* power on */
+ ret = acp6x_power_on(base);
+ if (ret) {
+ pr_err("ACP power on failed\n");
+ return ret;
+ }
+ writel(0x01, base + ACP_CONTROL);
+
+ /* Reset */
+ ret = acp6x_reset(base);
+ if (ret) {
+ pr_err("ACP reset failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmb_acp_deinit(void __iomem *base)
+{
+ int ret = 0;
+
+ /* Reset */
+ ret = acp6x_reset(base);
+ if (ret) {
+ pr_err("ACP reset failed\n");
+ return ret;
+ }
+
+ writel(0x00, base + ACP_CONTROL);
+
+ /* power off */
+ ret = acp6x_power_off(base);
+ if (ret) {
+ pr_err("ACP power off failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rembrandt_audio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acp_chip_info *chip;
+ struct acp_dev_data *adata;
+ struct resource *res;
+
+ chip = dev_get_platdata(&pdev->dev);
+ if (!chip || !chip->base) {
+ dev_err(&pdev->dev, "ACP chip data is NULL\n");
+ return -ENODEV;
+ }
+
+ if (chip->acp_rev != ACP6X_DEV) {
+ dev_err(&pdev->dev, "Un-supported ACP Revision %d\n", chip->acp_rev);
+ return -ENODEV;
+ }
+
+ rmb_acp_init(chip->base);
+
+ adata = devm_kzalloc(dev, sizeof(struct acp_dev_data), GFP_KERNEL);
+ if (!adata)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acp_mem");
+ if (!res) {
+ dev_err(&pdev->dev, "IORESOURCE_MEM FAILED\n");
+ return -ENODEV;
+ }
+
+ adata->acp_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!adata->acp_base)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "acp_dai_irq");
+ if (!res) {
+ dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n");
+ return -ENODEV;
+ }
+
+ adata->i2s_irq = res->start;
+ adata->dev = dev;
+ adata->dai_driver = acp_rmb_dai;
+ adata->num_dai = ARRAY_SIZE(acp_rmb_dai);
+ adata->rsrc = &rsrc;
+
+ adata->machines = snd_soc_acpi_amd_rmb_acp_machines;
+ acp_machine_select(adata);
+
+ dev_set_drvdata(dev, adata);
+ acp6x_enable_interrupts(adata);
+ acp_platform_register(dev);
+
+ return 0;
+}
+
+static int rembrandt_audio_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acp_dev_data *adata = dev_get_drvdata(dev);
+ struct acp_chip_info *chip;
+
+ chip = dev_get_platdata(&pdev->dev);
+ if (!chip || !chip->base) {
+ dev_err(&pdev->dev, "ACP chip data is NULL\n");
+ return -ENODEV;
+ }
+
+ rmb_acp_deinit(chip->base);
+
+ acp6x_disable_interrupts(adata);
+ acp_platform_unregister(dev);
+ return 0;
+}
+
+static struct platform_driver rembrandt_driver = {
+ .probe = rembrandt_audio_probe,
+ .remove = rembrandt_audio_remove,
+ .driver = {
+ .name = "acp_asoc_rembrandt",
+ },
+};
+
+module_platform_driver(rembrandt_driver);
+
+MODULE_DESCRIPTION("AMD ACP Rembrandt Driver");
+MODULE_IMPORT_NS(SND_SOC_ACP_COMMON);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/amd/acp/acp-renoir.c b/sound/soc/amd/acp/acp-renoir.c
index 75c9229ece97..2a89a0d2e601 100644
--- a/sound/soc/amd/acp/acp-renoir.c
+++ b/sound/soc/amd/acp/acp-renoir.c
@@ -39,6 +39,17 @@
#define ACP_ERROR_MASK 0x20000000
#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
+static struct acp_resource rsrc = {
+ .offset = 20,
+ .no_of_ctrls = 1,
+ .irqp_used = 0,
+ .irq_reg_offset = 0x1800,
+ .i2s_pin_cfg_offset = 0x1400,
+ .i2s_mode = 0x04,
+ .scratch_reg_offset = 0x12800,
+ .sram_pte_offset = 0x02052800,
+};
+
static struct snd_soc_acpi_codecs amp_rt1019 = {
.num_codecs = 1,
.codecs = {"10EC1019"}
@@ -186,20 +197,24 @@ static int acp3x_reset(void __iomem *base)
return readl_poll_timeout(base + ACP_SOFT_RESET, val, !val, DELAY_US, ACP_TIMEOUT);
}
-static void acp3x_enable_interrupts(void __iomem *base)
+static void acp3x_enable_interrupts(struct acp_dev_data *adata)
{
+ struct acp_resource *rsrc = adata->rsrc;
u32 ext_intr_ctrl;
- writel(0x01, base + ACP_EXTERNAL_INTR_ENB);
- ext_intr_ctrl = readl(base + ACP_EXTERNAL_INTR_CNTL);
+ writel(0x01, ACP_EXTERNAL_INTR_ENB(adata));
+ ext_intr_ctrl = readl(ACP_EXTERNAL_INTR_CNTL(adata, rsrc->irqp_used));
ext_intr_ctrl |= ACP_ERROR_MASK;
- writel(ext_intr_ctrl, base + ACP_EXTERNAL_INTR_CNTL);
+ writel(ext_intr_ctrl, ACP_EXTERNAL_INTR_CNTL(adata, rsrc->irqp_used));
}
-static void acp3x_disable_interrupts(void __iomem *base)
+static void acp3x_disable_interrupts(struct acp_dev_data *adata)
{
- writel(ACP_EXT_INTR_STAT_CLEAR_MASK, base + ACP_EXTERNAL_INTR_STAT);
- writel(0x00, base + ACP_EXTERNAL_INTR_ENB);
+ struct acp_resource *rsrc = adata->rsrc;
+
+ writel(ACP_EXT_INTR_STAT_CLEAR_MASK,
+ ACP_EXTERNAL_INTR_STAT(adata, rsrc->irqp_used));
+ writel(0x00, ACP_EXTERNAL_INTR_ENB(adata));
}
static int rn_acp_init(void __iomem *base)
@@ -218,8 +233,6 @@ static int rn_acp_init(void __iomem *base)
if (ret)
return ret;
- acp3x_enable_interrupts(base);
-
return 0;
}
@@ -227,8 +240,6 @@ static int rn_acp_deinit(void __iomem *base)
{
int ret = 0;
- acp3x_disable_interrupts(base);
-
/* Reset */
ret = acp3x_reset(base);
if (ret)
@@ -290,11 +301,13 @@ static int renoir_audio_probe(struct platform_device *pdev)
adata->dev = dev;
adata->dai_driver = acp_renoir_dai;
adata->num_dai = ARRAY_SIZE(acp_renoir_dai);
+ adata->rsrc = &rsrc;
adata->machines = snd_soc_acpi_amd_acp_machines;
acp_machine_select(adata);
dev_set_drvdata(dev, adata);
+ acp3x_enable_interrupts(adata);
acp_platform_register(dev);
return 0;
@@ -303,20 +316,17 @@ static int renoir_audio_probe(struct platform_device *pdev)
static int renoir_audio_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct acp_dev_data *adata = dev_get_drvdata(dev);
struct acp_chip_info *chip;
int ret;
chip = dev_get_platdata(&pdev->dev);
- if (!chip || !chip->base) {
- dev_err(&pdev->dev, "ACP chip data is NULL\n");
- return -ENODEV;
- }
+
+ acp3x_disable_interrupts(adata);
ret = rn_acp_deinit(chip->base);
- if (ret) {
- dev_err(&pdev->dev, "ACP de-init Failed\n");
- return -EINVAL;
- }
+ if (ret)
+ dev_err(&pdev->dev, "ACP de-init Failed (%pe)\n", ERR_PTR(ret));
acp_platform_unregister(dev);
return 0;
diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c
index d1531cdab110..f19f064a7527 100644
--- a/sound/soc/amd/acp/acp-sof-mach.c
+++ b/sound/soc/amd/acp/acp-sof-mach.c
@@ -56,6 +56,26 @@ static struct acp_card_drvdata sof_rt5682s_max_data = {
.dmic_codec_id = DMIC,
};
+static struct acp_card_drvdata sof_nau8825_data = {
+ .hs_cpu_id = I2S_HS,
+ .amp_cpu_id = I2S_HS,
+ .dmic_cpu_id = DMIC,
+ .hs_codec_id = NAU8825,
+ .amp_codec_id = MAX98360A,
+ .dmic_codec_id = DMIC,
+ .soc_mclk = true,
+};
+
+static struct acp_card_drvdata sof_rt5682s_hs_rt1019_data = {
+ .hs_cpu_id = I2S_HS,
+ .amp_cpu_id = I2S_HS,
+ .dmic_cpu_id = DMIC,
+ .hs_codec_id = RT5682S,
+ .amp_codec_id = RT1019,
+ .dmic_codec_id = DMIC,
+ .soc_mclk = true,
+};
+
static const struct snd_kcontrol_new acp_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone Jack"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -124,6 +144,14 @@ static const struct platform_device_id board_ids[] = {
.name = "rt5682s-rt1019",
.driver_data = (kernel_ulong_t)&sof_rt5682s_rt1019_data
},
+ {
+ .name = "nau8825-max",
+ .driver_data = (kernel_ulong_t)&sof_nau8825_data
+ },
+ {
+ .name = "rt5682s-hs-rt1019",
+ .driver_data = (kernel_ulong_t)&sof_rt5682s_hs_rt1019_data
+ },
{ }
};
static struct platform_driver acp_asoc_audio = {
@@ -143,4 +171,6 @@ MODULE_ALIAS("platform:rt5682-rt1019");
MODULE_ALIAS("platform:rt5682-max");
MODULE_ALIAS("platform:rt5682s-max");
MODULE_ALIAS("platform:rt5682s-rt1019");
+MODULE_ALIAS("platform:nau8825-max");
+MODULE_ALIAS("platform:rt5682s-hs-rt1019");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/amd/acp/amd.h b/sound/soc/amd/acp/amd.h
index 8fd38bf4d3bd..af9603724a68 100644
--- a/sound/soc/amd/acp/amd.h
+++ b/sound/soc/amd/acp/amd.h
@@ -19,10 +19,12 @@
#include "chip_offset_byte.h"
#define ACP3X_DEV 3
+#define ACP6X_DEV 6
#define I2S_SP_INSTANCE 0x00
#define I2S_BT_INSTANCE 0x01
#define DMIC_INSTANCE 0x02
+#define I2S_HS_INSTANCE 0x03
#define MEM_WINDOW_START 0x4080000
@@ -32,30 +34,37 @@
#define ACP3x_I2STDM_REG_END 0x1242410
#define ACP3x_BT_TDM_REG_START 0x1242800
#define ACP3x_BT_TDM_REG_END 0x1242810
-#define I2S_MODE 0x04
-#define I2S_RX_THRESHOLD 27
-#define I2S_TX_THRESHOLD 28
-#define BT_TX_THRESHOLD 26
-#define BT_RX_THRESHOLD 25
-#define ACP_SRAM_PTE_OFFSET 0x02052800
+#define THRESHOLD(bit, base) ((bit) + (base))
+#define I2S_RX_THRESHOLD(base) THRESHOLD(7, base)
+#define I2S_TX_THRESHOLD(base) THRESHOLD(8, base)
+#define BT_TX_THRESHOLD(base) THRESHOLD(6, base)
+#define BT_RX_THRESHOLD(base) THRESHOLD(5, base)
+#define HS_TX_THRESHOLD(base) THRESHOLD(4, base)
+#define HS_RX_THRESHOLD(base) THRESHOLD(3, base)
#define ACP_SRAM_SP_PB_PTE_OFFSET 0x0
#define ACP_SRAM_SP_CP_PTE_OFFSET 0x100
#define ACP_SRAM_BT_PB_PTE_OFFSET 0x200
#define ACP_SRAM_BT_CP_PTE_OFFSET 0x300
#define ACP_SRAM_PDM_PTE_OFFSET 0x400
+#define ACP_SRAM_HS_PB_PTE_OFFSET 0x500
+#define ACP_SRAM_HS_CP_PTE_OFFSET 0x600
#define PAGE_SIZE_4K_ENABLE 0x2
#define I2S_SP_TX_MEM_WINDOW_START 0x4000000
#define I2S_SP_RX_MEM_WINDOW_START 0x4020000
#define I2S_BT_TX_MEM_WINDOW_START 0x4040000
#define I2S_BT_RX_MEM_WINDOW_START 0x4060000
+#define I2S_HS_TX_MEM_WINDOW_START 0x40A0000
+#define I2S_HS_RX_MEM_WINDOW_START 0x40C0000
#define SP_PB_FIFO_ADDR_OFFSET 0x500
#define SP_CAPT_FIFO_ADDR_OFFSET 0x700
#define BT_PB_FIFO_ADDR_OFFSET 0x900
#define BT_CAPT_FIFO_ADDR_OFFSET 0xB00
+#define HS_PB_FIFO_ADDR_OFFSET 0xD00
+#define HS_CAPT_FIFO_ADDR_OFFSET 0xF00
#define PLAYBACK_MIN_NUM_PERIODS 2
#define PLAYBACK_MAX_NUM_PERIODS 8
#define PLAYBACK_MAX_PERIOD_SIZE 8192
@@ -73,7 +82,7 @@
#define ACP3x_ITER_IRER_SAMP_LEN_MASK 0x38
-#define ACP_MAX_STREAM 6
+#define ACP_MAX_STREAM 8
struct acp_chip_info {
char *name; /* Platform name */
@@ -92,6 +101,18 @@ struct acp_stream {
u32 fifo_offset;
};
+struct acp_resource {
+ int offset;
+ int no_of_ctrls;
+ int irqp_used;
+ bool soc_mclk;
+ u32 irq_reg_offset;
+ u32 i2s_pin_cfg_offset;
+ int i2s_mode;
+ u64 scratch_reg_offset;
+ u64 sram_pte_offset;
+};
+
struct acp_dev_data {
char *name;
struct device *dev;
@@ -106,6 +127,22 @@ struct acp_dev_data {
struct snd_soc_acpi_mach *machines;
struct platform_device *mach_dev;
+
+ u32 bclk_div;
+ u32 lrclk_div;
+
+ struct acp_resource *rsrc;
+};
+
+union acp_i2stdm_mstrclkgen {
+ struct {
+ u32 i2stdm_master_mode : 1;
+ u32 i2stdm_format_mode : 1;
+ u32 i2stdm_lrclk_div_val : 9;
+ u32 i2stdm_bclk_div_val : 11;
+ u32:10;
+ } bitfields, bits;
+ u32 u32_all;
};
extern const struct snd_soc_dai_ops asoc_acp_cpu_dai_ops;
@@ -134,6 +171,10 @@ static inline u64 acp_get_byte_count(struct acp_dev_data *adata, int dai_id, int
high = readl(adata->acp_base + ACP_I2S_TX_LINEARPOSITIONCNTR_HIGH);
low = readl(adata->acp_base + ACP_I2S_TX_LINEARPOSITIONCNTR_LOW);
break;
+ case I2S_HS_INSTANCE:
+ high = readl(adata->acp_base + ACP_HS_TX_LINEARPOSITIONCNTR_HIGH);
+ low = readl(adata->acp_base + ACP_HS_TX_LINEARPOSITIONCNTR_LOW);
+ break;
default:
dev_err(adata->dev, "Invalid dai id %x\n", dai_id);
return -EINVAL;
@@ -148,6 +189,10 @@ static inline u64 acp_get_byte_count(struct acp_dev_data *adata, int dai_id, int
high = readl(adata->acp_base + ACP_I2S_RX_LINEARPOSITIONCNTR_HIGH);
low = readl(adata->acp_base + ACP_I2S_RX_LINEARPOSITIONCNTR_LOW);
break;
+ case I2S_HS_INSTANCE:
+ high = readl(adata->acp_base + ACP_HS_RX_LINEARPOSITIONCNTR_HIGH);
+ low = readl(adata->acp_base + ACP_HS_RX_LINEARPOSITIONCNTR_LOW);
+ break;
case DMIC_INSTANCE:
high = readl(adata->acp_base + ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH);
low = readl(adata->acp_base + ACP_WOV_RX_LINEARPOSITIONCNTR_LOW);
@@ -163,4 +208,31 @@ static inline u64 acp_get_byte_count(struct acp_dev_data *adata, int dai_id, int
return byte_count;
}
+static inline void acp_set_i2s_clk(struct acp_dev_data *adata, int dai_id)
+{
+ union acp_i2stdm_mstrclkgen mclkgen;
+ u32 master_reg;
+
+ switch (dai_id) {
+ case I2S_SP_INSTANCE:
+ master_reg = ACP_I2STDM0_MSTRCLKGEN;
+ break;
+ case I2S_BT_INSTANCE:
+ master_reg = ACP_I2STDM1_MSTRCLKGEN;
+ break;
+ case I2S_HS_INSTANCE:
+ master_reg = ACP_I2STDM2_MSTRCLKGEN;
+ break;
+ default:
+ master_reg = ACP_I2STDM0_MSTRCLKGEN;
+ break;
+ }
+
+ mclkgen.bits.i2stdm_master_mode = 0x1;
+ mclkgen.bits.i2stdm_format_mode = 0x00;
+
+ mclkgen.bits.i2stdm_bclk_div_val = adata->bclk_div;
+ mclkgen.bits.i2stdm_lrclk_div_val = adata->lrclk_div;
+ writel(mclkgen.u32_all, adata->acp_base + master_reg);
+}
#endif
diff --git a/sound/soc/amd/acp/chip_offset_byte.h b/sound/soc/amd/acp/chip_offset_byte.h
index 88f6fa597cd6..ce3948e0679c 100644
--- a/sound/soc/amd/acp/chip_offset_byte.h
+++ b/sound/soc/amd/acp/chip_offset_byte.h
@@ -20,11 +20,13 @@
#define ACP_SOFT_RESET 0x1000
#define ACP_CONTROL 0x1004
-#define ACP_EXTERNAL_INTR_ENB 0x1800
-#define ACP_EXTERNAL_INTR_CNTL 0x1804
-#define ACP_EXTERNAL_INTR_STAT 0x1808
-#define ACP_I2S_PIN_CONFIG 0x1400
-#define ACP_SCRATCH_REG_0 0x12800
+#define ACP_EXTERNAL_INTR_REG_ADDR(adata, offset, ctrl) \
+ (adata->acp_base + adata->rsrc->irq_reg_offset + offset + (ctrl * 0x04))
+
+#define ACP_EXTERNAL_INTR_ENB(adata) ACP_EXTERNAL_INTR_REG_ADDR(adata, 0x0, 0x0)
+#define ACP_EXTERNAL_INTR_CNTL(adata, ctrl) ACP_EXTERNAL_INTR_REG_ADDR(adata, 0x4, ctrl)
+#define ACP_EXTERNAL_INTR_STAT(adata, ctrl) ACP_EXTERNAL_INTR_REG_ADDR(adata, \
+ (0x4 + (adata->rsrc->no_of_ctrls * 0x04)), ctrl)
/* Registers from ACP_AUDIO_BUFFERS block */
@@ -64,6 +66,24 @@
#define ACP_BT_TX_LINEARPOSITIONCNTR_HIGH 0x2084
#define ACP_BT_TX_LINEARPOSITIONCNTR_LOW 0x2088
#define ACP_BT_TX_INTR_WATERMARK_SIZE 0x208C
+#define ACP_HS_RX_RINGBUFADDR 0x3A90
+#define ACP_HS_RX_RINGBUFSIZE 0x3A94
+#define ACP_HS_RX_LINKPOSITIONCNTR 0x3A98
+#define ACP_HS_RX_FIFOADDR 0x3A9C
+#define ACP_HS_RX_FIFOSIZE 0x3AA0
+#define ACP_HS_RX_DMA_SIZE 0x3AA4
+#define ACP_HS_RX_LINEARPOSITIONCNTR_HIGH 0x3AA8
+#define ACP_HS_RX_LINEARPOSITIONCNTR_LOW 0x3AAC
+#define ACP_HS_RX_INTR_WATERMARK_SIZE 0x3AB0
+#define ACP_HS_TX_RINGBUFADDR 0x3AB4
+#define ACP_HS_TX_RINGBUFSIZE 0x3AB8
+#define ACP_HS_TX_LINKPOSITIONCNTR 0x3ABC
+#define ACP_HS_TX_FIFOADDR 0x3AC0
+#define ACP_HS_TX_FIFOSIZE 0x3AC4
+#define ACP_HS_TX_DMA_SIZE 0x3AC8
+#define ACP_HS_TX_LINEARPOSITIONCNTR_HIGH 0x3ACC
+#define ACP_HS_TX_LINEARPOSITIONCNTR_LOW 0x3AD0
+#define ACP_HS_TX_INTR_WATERMARK_SIZE 0x3AD4
#define ACP_I2STDM_IER 0x2400
#define ACP_I2STDM_IRER 0x2404
@@ -79,6 +99,13 @@
#define ACP_BTTDM_ITER 0x280C
#define ACP_BTTDM_TXFRMT 0x2810
+/* Registers from ACP_HS_TDM block */
+#define ACP_HSTDM_IER 0x2814
+#define ACP_HSTDM_IRER 0x2818
+#define ACP_HSTDM_RXFRMT 0x281C
+#define ACP_HSTDM_ITER 0x2820
+#define ACP_HSTDM_TXFRMT 0x2824
+
/* Registers from ACP_WOV_PDM block */
#define ACP_WOV_PDM_ENABLE 0x2C04
@@ -99,4 +126,7 @@
#define ACP_PDM_VAD_DYNAMIC_CLK_GATING_EN 0x2C64
#define ACP_WOV_ERROR_STATUS_REGISTER 0x2C68
+#define ACP_I2STDM0_MSTRCLKGEN 0x2414
+#define ACP_I2STDM1_MSTRCLKGEN 0x2418
+#define ACP_I2STDM2_MSTRCLKGEN 0x241C
#endif
diff --git a/sound/soc/amd/mach-config.h b/sound/soc/amd/mach-config.h
index 0a54567a2841..7b4c625da40d 100644
--- a/sound/soc/amd/mach-config.h
+++ b/sound/soc/amd/mach-config.h
@@ -19,6 +19,7 @@
#define ACP_PCI_DEV_ID 0x15E2
extern struct snd_soc_acpi_mach snd_soc_acpi_amd_sof_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_amd_rmb_sof_machines[];
struct config_entry {
u32 flags;
diff --git a/sound/soc/amd/raven/acp3x-i2s.c b/sound/soc/amd/raven/acp3x-i2s.c
index de6f70d7ef36..aa38cef1776d 100644
--- a/sound/soc/amd/raven/acp3x-i2s.c
+++ b/sound/soc/amd/raven/acp3x-i2s.c
@@ -257,7 +257,8 @@ static const struct snd_soc_dai_ops acp3x_i2s_dai_ops = {
};
static const struct snd_soc_component_driver acp3x_dai_component = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver acp3x_i2s_dai = {
diff --git a/sound/soc/amd/renoir/acp3x-pdm-dma.c b/sound/soc/amd/renoir/acp3x-pdm-dma.c
index 8c42345ee41e..7203c6488df0 100644
--- a/sound/soc/amd/renoir/acp3x-pdm-dma.c
+++ b/sound/soc/amd/renoir/acp3x-pdm-dma.c
@@ -363,12 +363,13 @@ static struct snd_soc_dai_driver acp_pdm_dai_driver = {
};
static const struct snd_soc_component_driver acp_pdm_component = {
- .name = DRV_NAME,
- .open = acp_pdm_dma_open,
- .close = acp_pdm_dma_close,
- .hw_params = acp_pdm_dma_hw_params,
- .pointer = acp_pdm_dma_pointer,
- .pcm_construct = acp_pdm_dma_new,
+ .name = DRV_NAME,
+ .open = acp_pdm_dma_open,
+ .close = acp_pdm_dma_close,
+ .hw_params = acp_pdm_dma_hw_params,
+ .pointer = acp_pdm_dma_pointer,
+ .pcm_construct = acp_pdm_dma_new,
+ .legacy_dai_naming = 1,
};
static int acp_pdm_audio_probe(struct platform_device *pdev)
diff --git a/sound/soc/amd/rpl/Makefile b/sound/soc/amd/rpl/Makefile
new file mode 100644
index 000000000000..11a33a05e94b
--- /dev/null
+++ b/sound/soc/amd/rpl/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0+
+# RPL platform Support
+snd-rpl-pci-acp6x-objs := rpl-pci-acp6x.o
+
+obj-$(CONFIG_SND_SOC_AMD_RPL_ACP6x) += snd-rpl-pci-acp6x.o
diff --git a/sound/soc/amd/rpl/rpl-pci-acp6x.c b/sound/soc/amd/rpl/rpl-pci-acp6x.c
new file mode 100644
index 000000000000..a8e548ed991b
--- /dev/null
+++ b/sound/soc/amd/rpl/rpl-pci-acp6x.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * AMD RPL ACP PCI Driver
+ *
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rpl_acp6x.h"
+
+struct rpl_dev_data {
+ void __iomem *acp6x_base;
+};
+
+static int rpl_power_on(void __iomem *acp_base)
+{
+ u32 val;
+ int timeout;
+
+ val = rpl_acp_readl(acp_base + ACP_PGFSM_STATUS);
+
+ if (!val)
+ return val;
+
+ if ((val & ACP_PGFSM_STATUS_MASK) != ACP_POWER_ON_IN_PROGRESS)
+ rpl_acp_writel(ACP_PGFSM_CNTL_POWER_ON_MASK, acp_base + ACP_PGFSM_CONTROL);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = rpl_acp_readl(acp_base + ACP_PGFSM_STATUS);
+ if (!val)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int rpl_reset(void __iomem *acp_base)
+{
+ u32 val;
+ int timeout;
+
+ rpl_acp_writel(1, acp_base + ACP_SOFT_RESET);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = rpl_acp_readl(acp_base + ACP_SOFT_RESET);
+ if (val & ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK)
+ break;
+ cpu_relax();
+ }
+ rpl_acp_writel(0, acp_base + ACP_SOFT_RESET);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = rpl_acp_readl(acp_base + ACP_SOFT_RESET);
+ if (!val)
+ return 0;
+ cpu_relax();
+ }
+ return -ETIMEDOUT;
+}
+
+static int rpl_init(void __iomem *acp_base)
+{
+ int ret;
+
+ /* power on */
+ ret = rpl_power_on(acp_base);
+ if (ret) {
+ pr_err("ACP power on failed\n");
+ return ret;
+ }
+ rpl_acp_writel(0x01, acp_base + ACP_CONTROL);
+ /* Reset */
+ ret = rpl_reset(acp_base);
+ if (ret) {
+ pr_err("ACP reset failed\n");
+ return ret;
+ }
+ rpl_acp_writel(0x03, acp_base + ACP_CLKMUX_SEL);
+ return 0;
+}
+
+static int rpl_deinit(void __iomem *acp_base)
+{
+ int ret;
+
+ /* Reset */
+ ret = rpl_reset(acp_base);
+ if (ret) {
+ pr_err("ACP reset failed\n");
+ return ret;
+ }
+ rpl_acp_writel(0x00, acp_base + ACP_CLKMUX_SEL);
+ rpl_acp_writel(0x00, acp_base + ACP_CONTROL);
+ return 0;
+}
+
+static int snd_rpl_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ struct rpl_dev_data *adata;
+ u32 addr;
+ int ret;
+
+ /* RPL device check */
+ switch (pci->revision) {
+ case 0x62:
+ break;
+ default:
+ dev_dbg(&pci->dev, "acp6x pci device not found\n");
+ return -ENODEV;
+ }
+ if (pci_enable_device(pci)) {
+ dev_err(&pci->dev, "pci_enable_device failed\n");
+ return -ENODEV;
+ }
+
+ ret = pci_request_regions(pci, "AMD ACP6x audio");
+ if (ret < 0) {
+ dev_err(&pci->dev, "pci_request_regions failed\n");
+ goto disable_pci;
+ }
+
+ adata = devm_kzalloc(&pci->dev, sizeof(struct rpl_dev_data),
+ GFP_KERNEL);
+ if (!adata) {
+ ret = -ENOMEM;
+ goto release_regions;
+ }
+
+ addr = pci_resource_start(pci, 0);
+ adata->acp6x_base = devm_ioremap(&pci->dev, addr,
+ pci_resource_len(pci, 0));
+ if (!adata->acp6x_base) {
+ ret = -ENOMEM;
+ goto release_regions;
+ }
+ pci_set_master(pci);
+ pci_set_drvdata(pci, adata);
+ ret = rpl_init(adata->acp6x_base);
+ if (ret)
+ goto release_regions;
+ pm_runtime_set_autosuspend_delay(&pci->dev, ACP_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&pci->dev);
+ pm_runtime_put_noidle(&pci->dev);
+ pm_runtime_allow(&pci->dev);
+
+ return 0;
+release_regions:
+ pci_release_regions(pci);
+disable_pci:
+ pci_disable_device(pci);
+
+ return ret;
+}
+
+static int __maybe_unused snd_rpl_suspend(struct device *dev)
+{
+ struct rpl_dev_data *adata;
+ int ret;
+
+ adata = dev_get_drvdata(dev);
+ ret = rpl_deinit(adata->acp6x_base);
+ if (ret)
+ dev_err(dev, "ACP de-init failed\n");
+ return ret;
+}
+
+static int __maybe_unused snd_rpl_resume(struct device *dev)
+{
+ struct rpl_dev_data *adata;
+ int ret;
+
+ adata = dev_get_drvdata(dev);
+ ret = rpl_init(adata->acp6x_base);
+ if (ret)
+ dev_err(dev, "ACP init failed\n");
+ return ret;
+}
+
+static const struct dev_pm_ops rpl_pm = {
+ SET_RUNTIME_PM_OPS(snd_rpl_suspend, snd_rpl_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(snd_rpl_suspend, snd_rpl_resume)
+};
+
+static void snd_rpl_remove(struct pci_dev *pci)
+{
+ struct rpl_dev_data *adata;
+ int ret;
+
+ adata = pci_get_drvdata(pci);
+ ret = rpl_deinit(adata->acp6x_base);
+ if (ret)
+ dev_err(&pci->dev, "ACP de-init failed\n");
+ pm_runtime_forbid(&pci->dev);
+ pm_runtime_get_noresume(&pci->dev);
+ pci_release_regions(pci);
+ pci_disable_device(pci);
+}
+
+static const struct pci_device_id snd_rpl_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, ACP_DEVICE_ID),
+ .class = PCI_CLASS_MULTIMEDIA_OTHER << 8,
+ .class_mask = 0xffffff },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, snd_rpl_ids);
+
+static struct pci_driver rpl_acp6x_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = snd_rpl_ids,
+ .probe = snd_rpl_probe,
+ .remove = snd_rpl_remove,
+ .driver = {
+ .pm = &rpl_pm,
+ }
+};
+
+module_pci_driver(rpl_acp6x_driver);
+
+MODULE_DESCRIPTION("AMD ACP RPL PCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/amd/rpl/rpl_acp6x.h b/sound/soc/amd/rpl/rpl_acp6x.h
new file mode 100644
index 000000000000..f5816a33632e
--- /dev/null
+++ b/sound/soc/amd/rpl/rpl_acp6x.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ACP Driver
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include "rpl_acp6x_chip_offset_byte.h"
+
+#define ACP_DEVICE_ID 0x15E2
+#define ACP6x_PHY_BASE_ADDRESS 0x1240000
+
+#define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK 0x00010001
+#define ACP_PGFSM_CNTL_POWER_ON_MASK 1
+#define ACP_PGFSM_CNTL_POWER_OFF_MASK 0
+#define ACP_PGFSM_STATUS_MASK 3
+#define ACP_POWERED_ON 0
+#define ACP_POWER_ON_IN_PROGRESS 1
+#define ACP_POWERED_OFF 2
+#define ACP_POWER_OFF_IN_PROGRESS 3
+
+#define DELAY_US 5
+#define ACP_COUNTER 20000
+
+/* time in ms for runtime suspend delay */
+#define ACP_SUSPEND_DELAY_MS 2000
+
+static inline u32 rpl_acp_readl(void __iomem *base_addr)
+{
+ return readl(base_addr - ACP6x_PHY_BASE_ADDRESS);
+}
+
+static inline void rpl_acp_writel(u32 val, void __iomem *base_addr)
+{
+ writel(val, base_addr - ACP6x_PHY_BASE_ADDRESS);
+}
diff --git a/sound/soc/amd/rpl/rpl_acp6x_chip_offset_byte.h b/sound/soc/amd/rpl/rpl_acp6x_chip_offset_byte.h
new file mode 100644
index 000000000000..456498f5396d
--- /dev/null
+++ b/sound/soc/amd/rpl/rpl_acp6x_chip_offset_byte.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ACP 6.2 Register Documentation
+ *
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ */
+
+#ifndef _rpl_acp6x_OFFSET_HEADER
+#define _rpl_acp6x_OFFSET_HEADER
+
+/* Registers from ACP_CLKRST block */
+#define ACP_SOFT_RESET 0x1241000
+#define ACP_CONTROL 0x1241004
+#define ACP_STATUS 0x1241008
+#define ACP_DYNAMIC_CG_MASTER_CONTROL 0x1241010
+#define ACP_PGFSM_CONTROL 0x124101C
+#define ACP_PGFSM_STATUS 0x1241020
+#define ACP_CLKMUX_SEL 0x1241024
+
+/* Registers from ACP_AON block */
+#define ACP_PME_EN 0x1241400
+#define ACP_DEVICE_STATE 0x1241404
+#define AZ_DEVICE_STATE 0x1241408
+#define ACP_PIN_CONFIG 0x1241440
+#define ACP_PAD_PULLUP_CTRL 0x1241444
+#define ACP_PAD_PULLDOWN_CTRL 0x1241448
+#define ACP_PAD_DRIVE_STRENGTH_CTRL 0x124144C
+#define ACP_PAD_SCHMEN_CTRL 0x1241450
+
+#endif
diff --git a/sound/soc/amd/vangogh/acp5x-i2s.c b/sound/soc/amd/vangogh/acp5x-i2s.c
index 59a98f89a669..773e96f1b4dd 100644
--- a/sound/soc/amd/vangogh/acp5x-i2s.c
+++ b/sound/soc/amd/vangogh/acp5x-i2s.c
@@ -37,10 +37,10 @@ static int acp5x_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
}
mode = fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK;
switch (mode) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
adata->master_mode = I2S_MASTER_MODE_ENABLE;
break;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
adata->master_mode = I2S_MASTER_MODE_DISABLE;
break;
}
@@ -345,6 +345,7 @@ static const struct snd_soc_dai_ops acp5x_i2s_dai_ops = {
static const struct snd_soc_component_driver acp5x_dai_component = {
.name = "acp5x-i2s",
+ .legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver acp5x_i2s_dai = {
diff --git a/sound/soc/amd/vangogh/acp5x-mach.c b/sound/soc/amd/vangogh/acp5x-mach.c
index 727de46860b1..af3737ef9707 100644
--- a/sound/soc/amd/vangogh/acp5x-mach.c
+++ b/sound/soc/amd/vangogh/acp5x-mach.c
@@ -178,8 +178,7 @@ static int acp5x_cs35l41_hw_params(struct snd_pcm_substream *substream,
ret = 0;
for (i = 0; i < num_codecs; i++) {
codec_dai = asoc_rtd_to_codec(rtd, i);
- if ((strcmp(codec_dai->name, "spi-VLV1776:00") == 0) ||
- (strcmp(codec_dai->name, "spi-VLV1776:01") == 0)) {
+ if (strcmp(codec_dai->name, "cs35l41-pcm") == 0) {
switch (params_rate(params)) {
case 48000:
bclk_val = 1536000;
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index f06e6c1a7799..e0b24e1daef3 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -105,70 +105,70 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21AW"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21CM"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21AX"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21CN"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21BN"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21CH"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21BQ"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21CJ"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21CH"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21CK"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21CJ"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21CL"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21CK"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21EM"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21CL"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21EN"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21D8"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J5"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "21D9"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J6"),
}
},
{}
diff --git a/sound/soc/amd/yc/acp6x-pdm-dma.c b/sound/soc/amd/yc/acp6x-pdm-dma.c
index 7e66393e4153..acecd6a4ec4b 100644
--- a/sound/soc/amd/yc/acp6x-pdm-dma.c
+++ b/sound/soc/amd/yc/acp6x-pdm-dma.c
@@ -335,12 +335,13 @@ static struct snd_soc_dai_driver acp6x_pdm_dai_driver = {
};
static const struct snd_soc_component_driver acp6x_pdm_component = {
- .name = DRV_NAME,
- .open = acp6x_pdm_dma_open,
- .close = acp6x_pdm_dma_close,
- .hw_params = acp6x_pdm_dma_hw_params,
- .pointer = acp6x_pdm_dma_pointer,
- .pcm_construct = acp6x_pdm_dma_new,
+ .name = DRV_NAME,
+ .open = acp6x_pdm_dma_open,
+ .close = acp6x_pdm_dma_close,
+ .hw_params = acp6x_pdm_dma_hw_params,
+ .pointer = acp6x_pdm_dma_pointer,
+ .pcm_construct = acp6x_pdm_dma_new,
+ .legacy_dai_naming = 1,
};
static int acp6x_pdm_audio_probe(struct platform_device *pdev)
diff --git a/sound/soc/amd/yc/pci-acp6x.c b/sound/soc/amd/yc/pci-acp6x.c
index 20f7a99783f2..77c5fa1f7af1 100644
--- a/sound/soc/amd/yc/pci-acp6x.c
+++ b/sound/soc/amd/yc/pci-acp6x.c
@@ -159,7 +159,7 @@ static int snd_acp6x_probe(struct pci_dev *pci,
case 0x6f:
break;
default:
- dev_err(&pci->dev, "acp6x pci device not found\n");
+ dev_dbg(&pci->dev, "acp6x pci device not found\n");
return -ENODEV;
}
if (pci_enable_device(pci)) {
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 74b7b2611aa7..87d6d6ed026b 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -458,6 +458,7 @@ static const struct snd_soc_component_driver atmel_classd_cpu_dai_component = {
.num_controls = ARRAY_SIZE(atmel_classd_snd_controls),
.idle_bias_on = 1,
.use_pmdown_time = 1,
+ .legacy_dai_naming = 1,
};
/* ASoC sound card */
diff --git a/sound/soc/atmel/atmel-i2s.c b/sound/soc/atmel/atmel-i2s.c
index 1934767690b5..425d66edbf86 100644
--- a/sound/soc/atmel/atmel-i2s.c
+++ b/sound/soc/atmel/atmel-i2s.c
@@ -343,7 +343,7 @@ static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
}
switch (dev->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
/* codec is slave, so cpu is master */
mr |= ATMEL_I2SC_MR_MODE_MASTER;
ret = atmel_i2s_get_gck_param(dev, params_rate(params));
@@ -351,7 +351,7 @@ static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
return ret;
break;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
/* codec is master, so cpu is slave */
mr |= ATMEL_I2SC_MR_MODE_SLAVE;
dev->gck_param = NULL;
@@ -564,7 +564,8 @@ static struct snd_soc_dai_driver atmel_i2s_dai = {
};
static const struct snd_soc_component_driver atmel_i2s_component = {
- .name = "atmel-i2s",
+ .name = "atmel-i2s",
+ .legacy_dai_naming = 1,
};
static int atmel_i2s_sama5d2_mck_init(struct atmel_i2s_dev *dev,
diff --git a/sound/soc/atmel/atmel-pdmic.c b/sound/soc/atmel/atmel-pdmic.c
index ea34efac2fff..77ff12baead5 100644
--- a/sound/soc/atmel/atmel-pdmic.c
+++ b/sound/soc/atmel/atmel-pdmic.c
@@ -481,6 +481,7 @@ static const struct snd_soc_component_driver atmel_pdmic_cpu_dai_component = {
.num_controls = ARRAY_SIZE(atmel_pdmic_snd_controls),
.idle_bias_on = 1,
.use_pmdown_time = 1,
+ .legacy_dai_naming = 1,
};
/* ASoC sound card */
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index c1dea8d62416..e868b7e028d6 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -210,7 +210,7 @@ static int atmel_ssc_hw_rule_rate(struct snd_pcm_hw_params *params,
return frame_size;
switch (ssc_p->daifmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBP_CFC:
+ case SND_SOC_DAIFMT_BC_FP:
if ((ssc_p->dir_mask & SSC_DIR_MASK_CAPTURE)
&& ssc->clk_from_rk_pin)
/* Receiver Frame Synchro (i.e. capture)
@@ -220,7 +220,7 @@ static int atmel_ssc_hw_rule_rate(struct snd_pcm_hw_params *params,
mck_div = 3;
break;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
if ((ssc_p->dir_mask & SSC_DIR_MASK_PLAYBACK)
&& !ssc->clk_from_rk_pin)
/* Transmit Frame Synchro (i.e. playback)
@@ -233,7 +233,7 @@ static int atmel_ssc_hw_rule_rate(struct snd_pcm_hw_params *params,
}
switch (ssc_p->daifmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
r.num = ssc_p->mck_rate / mck_div / frame_size;
ret = snd_interval_ratnum(i, 1, &r, &num, &den);
@@ -243,8 +243,8 @@ static int atmel_ssc_hw_rule_rate(struct snd_pcm_hw_params *params,
}
break;
- case SND_SOC_DAIFMT_CBP_CFC:
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FP:
+ case SND_SOC_DAIFMT_BC_FC:
t.min = 8000;
t.max = ssc_p->mck_rate / mck_div / frame_size;
t.openmin = t.openmax = 0;
@@ -433,8 +433,8 @@ static int atmel_ssc_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
static int atmel_ssc_cfs(struct atmel_ssc_info *ssc_p)
{
switch (ssc_p->daifmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBP_CFC:
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BC_FP:
+ case SND_SOC_DAIFMT_BP_FP:
return 1;
}
return 0;
@@ -444,8 +444,8 @@ static int atmel_ssc_cfs(struct atmel_ssc_info *ssc_p)
static int atmel_ssc_cbs(struct atmel_ssc_info *ssc_p)
{
switch (ssc_p->daifmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFP:
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FC:
+ case SND_SOC_DAIFMT_BP_FP:
return 1;
}
return 0;
@@ -762,7 +762,6 @@ static int atmel_ssc_trigger(struct snd_pcm_substream *substream,
return 0;
}
-#ifdef CONFIG_PM
static int atmel_ssc_suspend(struct snd_soc_component *component)
{
struct atmel_ssc_info *ssc_p;
@@ -821,10 +820,6 @@ static int atmel_ssc_resume(struct snd_soc_component *component)
return 0;
}
-#else /* CONFIG_PM */
-# define atmel_ssc_suspend NULL
-# define atmel_ssc_resume NULL
-#endif /* CONFIG_PM */
#define ATMEL_SSC_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
@@ -858,9 +853,10 @@ static struct snd_soc_dai_driver atmel_ssc_dai = {
};
static const struct snd_soc_component_driver atmel_ssc_component = {
- .name = "atmel-ssc",
- .suspend = atmel_ssc_suspend,
- .resume = atmel_ssc_resume,
+ .name = "atmel-ssc",
+ .suspend = pm_ptr(atmel_ssc_suspend),
+ .resume = pm_ptr(atmel_ssc_resume),
+ .legacy_dai_naming = 1,
};
static int asoc_ssc_init(struct device *dev)
diff --git a/sound/soc/atmel/mchp-i2s-mcc.c b/sound/soc/atmel/mchp-i2s-mcc.c
index 6d1227a1d67b..6dfb96c576ff 100644
--- a/sound/soc/atmel/mchp-i2s-mcc.c
+++ b/sound/soc/atmel/mchp-i2s-mcc.c
@@ -350,7 +350,7 @@ static int mchp_i2s_mcc_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
/* We can't generate only FSYNC */
- if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) == SND_SOC_DAIFMT_CBP_CFC)
+ if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) == SND_SOC_DAIFMT_BC_FP)
return -EINVAL;
/* We can only reconfigure the IP when it's stopped */
@@ -547,19 +547,19 @@ static int mchp_i2s_mcc_hw_params(struct snd_pcm_substream *substream,
}
switch (dev->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
/* cpu is BCLK and LRC master */
mra |= MCHP_I2SMCC_MRA_MODE_MASTER;
if (dev->sysclk)
mra |= MCHP_I2SMCC_MRA_IMCKMODE_GEN;
set_divs = 1;
break;
- case SND_SOC_DAIFMT_CBC_CFP:
+ case SND_SOC_DAIFMT_BP_FC:
/* cpu is BCLK master */
mrb |= MCHP_I2SMCC_MRB_CLKSEL_INT;
set_divs = 1;
fallthrough;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
/* cpu is slave */
mra |= MCHP_I2SMCC_MRA_MODE_SLAVE;
if (dev->sysclk)
@@ -928,7 +928,8 @@ static struct snd_soc_dai_driver mchp_i2s_mcc_dai = {
};
static const struct snd_soc_component_driver mchp_i2s_mcc_component = {
- .name = "mchp-i2s-mcc",
+ .name = "mchp-i2s-mcc",
+ .legacy_dai_naming = 1,
};
#ifdef CONFIG_OF
diff --git a/sound/soc/atmel/mchp-pdmc.c b/sound/soc/atmel/mchp-pdmc.c
index a3856c73e221..44aefbd5b62c 100644
--- a/sound/soc/atmel/mchp-pdmc.c
+++ b/sound/soc/atmel/mchp-pdmc.c
@@ -423,6 +423,7 @@ static const struct snd_soc_component_driver mchp_pdmc_dai_component = {
.num_controls = ARRAY_SIZE(mchp_pdmc_snd_controls),
.open = &mchp_pdmc_open,
.close = &mchp_pdmc_close,
+ .legacy_dai_naming = 1,
};
static const unsigned int mchp_pdmc_1mic[] = {1};
@@ -492,8 +493,8 @@ static int mchp_pdmc_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
unsigned int fmt_format = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
/* IP needs to be bitclock master */
- if (fmt_master != SND_SOC_DAIFMT_CBS_CFS &&
- fmt_master != SND_SOC_DAIFMT_CBS_CFM)
+ if (fmt_master != SND_SOC_DAIFMT_BP_FP &&
+ fmt_master != SND_SOC_DAIFMT_BP_FC)
return -EINVAL;
/* IP supports only PDM interface */
@@ -984,7 +985,7 @@ static int mchp_pdmc_probe(struct platform_device *pdev)
return -ENOMEM;
dd->dev = &pdev->dev;
- ret = mchp_pdmc_dt_init(dd);
+ ret = mchp_pdmc_dt_init(dd);
if (ret < 0)
return ret;
diff --git a/sound/soc/atmel/mchp-spdifrx.c b/sound/soc/atmel/mchp-spdifrx.c
index 5fc968483f2c..ec0705cc40fa 100644
--- a/sound/soc/atmel/mchp-spdifrx.c
+++ b/sound/soc/atmel/mchp-spdifrx.c
@@ -221,11 +221,11 @@ struct mchp_spdifrx_user_data {
};
struct mchp_spdifrx_mixer_control {
- struct mchp_spdifrx_ch_stat ch_stat[SPDIFRX_CHANNELS];
- struct mchp_spdifrx_user_data user_data[SPDIFRX_CHANNELS];
- bool ulock;
- bool badf;
- bool signal;
+ struct mchp_spdifrx_ch_stat ch_stat[SPDIFRX_CHANNELS];
+ struct mchp_spdifrx_user_data user_data[SPDIFRX_CHANNELS];
+ bool ulock;
+ bool badf;
+ bool signal;
};
struct mchp_spdifrx_dev {
@@ -288,15 +288,17 @@ static void mchp_spdifrx_isr_blockend_en(struct mchp_spdifrx_dev *dev)
spin_unlock_irqrestore(&dev->blockend_lock, flags);
}
-/* called from atomic context only */
+/* called from atomic/non-atomic context */
static void mchp_spdifrx_isr_blockend_dis(struct mchp_spdifrx_dev *dev)
{
- spin_lock(&dev->blockend_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->blockend_lock, flags);
dev->blockend_refcount--;
/* don't enable BLOCKEND interrupt if it's already enabled */
if (dev->blockend_refcount == 0)
regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND);
- spin_unlock(&dev->blockend_lock);
+ spin_unlock_irqrestore(&dev->blockend_lock, flags);
}
static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id)
@@ -575,6 +577,7 @@ static int mchp_spdifrx_subcode_ch_get(struct mchp_spdifrx_dev *dev,
if (ret <= 0) {
dev_dbg(dev->dev, "user data for channel %d timeout\n",
channel);
+ mchp_spdifrx_isr_blockend_dis(dev);
return ret;
}
@@ -846,7 +849,8 @@ static struct snd_soc_dai_driver mchp_spdifrx_dai = {
};
static const struct snd_soc_component_driver mchp_spdifrx_component = {
- .name = "mchp-spdifrx",
+ .name = "mchp-spdifrx",
+ .legacy_dai_naming = 1,
};
static const struct of_device_id mchp_spdifrx_dt_ids[] = {
diff --git a/sound/soc/atmel/mchp-spdiftx.c b/sound/soc/atmel/mchp-spdiftx.c
index d24380046435..4850a177803d 100644
--- a/sound/soc/atmel/mchp-spdiftx.c
+++ b/sound/soc/atmel/mchp-spdiftx.c
@@ -196,7 +196,6 @@ struct mchp_spdiftx_dev {
struct clk *pclk;
struct clk *gclk;
unsigned int fmt;
- const struct mchp_i2s_caps *caps;
int gclk_enabled:1;
};
@@ -341,12 +340,10 @@ static int mchp_spdiftx_trigger(struct snd_pcm_substream *substream, int cmd,
ret = regmap_write(dev->regmap, SPDIFTX_MR, mr);
spin_unlock(&ctrl->lock);
- if (ret) {
+ if (ret)
dev_err(dev->dev, "unable to disable TX: %d\n", ret);
- return ret;
- }
- return 0;
+ return ret;
}
static int mchp_spdiftx_hw_params(struct snd_pcm_substream *substream,
@@ -753,7 +750,8 @@ static struct snd_soc_dai_driver mchp_spdiftx_dai = {
};
static const struct snd_soc_component_driver mchp_spdiftx_component = {
- .name = "mchp-spdiftx",
+ .name = "mchp-spdiftx",
+ .legacy_dai_naming = 1,
};
static const struct of_device_id mchp_spdiftx_dt_ids[] = {
@@ -762,12 +760,10 @@ static const struct of_device_id mchp_spdiftx_dt_ids[] = {
},
{ /* sentinel */ }
};
-
MODULE_DEVICE_TABLE(of, mchp_spdiftx_dt_ids);
+
static int mchp_spdiftx_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *match;
struct mchp_spdiftx_dev *dev;
struct resource *mem;
struct regmap *regmap;
@@ -781,11 +777,6 @@ static int mchp_spdiftx_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- /* Get hardware capabilities. */
- match = of_match_node(mchp_spdiftx_dt_ids, np);
- if (match)
- dev->caps = match->data;
-
/* Map I/O registers. */
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(base))
@@ -847,12 +838,10 @@ static int mchp_spdiftx_probe(struct platform_device *pdev)
err = devm_snd_soc_register_component(&pdev->dev,
&mchp_spdiftx_component,
&mchp_spdiftx_dai, 1);
- if (err) {
+ if (err)
dev_err(&pdev->dev, "failed to register component: %d\n", err);
- return err;
- }
- return 0;
+ return err;
}
static struct platform_driver mchp_spdiftx_driver = {
diff --git a/sound/soc/atmel/mikroe-proto.c b/sound/soc/atmel/mikroe-proto.c
index ce46d8a0b7e4..954460719aa3 100644
--- a/sound/soc/atmel/mikroe-proto.c
+++ b/sound/soc/atmel/mikroe-proto.c
@@ -157,7 +157,9 @@ put_codec_node:
static int snd_proto_remove(struct platform_device *pdev)
{
- return snd_soc_unregister_card(&snd_proto);
+ snd_soc_unregister_card(&snd_proto);
+
+ return 0;
}
static const struct of_device_id snd_proto_of_match[] = {
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index 3b1700e665f5..b18512ca2578 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -223,7 +223,8 @@ static struct snd_soc_dai_driver au1xac97c_dai_driver = {
};
static const struct snd_soc_component_driver au1xac97c_component = {
- .name = "au1xac97c",
+ .name = "au1xac97c",
+ .legacy_dai_naming = 1,
};
static int au1xac97c_drvprobe(struct platform_device *pdev)
diff --git a/sound/soc/au1x/i2sc.c b/sound/soc/au1x/i2sc.c
index 740d4e052e4d..b15c8baa9ee4 100644
--- a/sound/soc/au1x/i2sc.c
+++ b/sound/soc/au1x/i2sc.c
@@ -121,7 +121,7 @@ static int au1xi2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
/* I2S controller only supports provider */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC: /* CODEC consumer */
+ case SND_SOC_DAIFMT_BP_FP: /* CODEC consumer */
break;
default:
goto out;
@@ -227,7 +227,8 @@ static struct snd_soc_dai_driver au1xi2s_dai_driver = {
};
static const struct snd_soc_component_driver au1xi2s_component = {
- .name = "au1xi2s",
+ .name = "au1xi2s",
+ .legacy_dai_naming = 1,
};
static int au1xi2s_drvprobe(struct platform_device *pdev)
diff --git a/sound/soc/au1x/psc-ac97.c b/sound/soc/au1x/psc-ac97.c
index 05eb36991f14..b536394b9ca0 100644
--- a/sound/soc/au1x/psc-ac97.c
+++ b/sound/soc/au1x/psc-ac97.c
@@ -356,7 +356,8 @@ static const struct snd_soc_dai_driver au1xpsc_ac97_dai_template = {
};
static const struct snd_soc_component_driver au1xpsc_ac97_component = {
- .name = "au1xpsc-ac97",
+ .name = "au1xpsc-ac97",
+ .legacy_dai_naming = 1,
};
static int au1xpsc_ac97_drvprobe(struct platform_device *pdev)
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index b2b8896bb593..79b5ae4e494c 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -91,10 +91,10 @@ static int au1xpsc_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
}
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBP_CFP: /* CODEC provider */
+ case SND_SOC_DAIFMT_BC_FC: /* CODEC provider */
ct |= PSC_I2SCFG_MS; /* PSC I2S consumer mode */
break;
- case SND_SOC_DAIFMT_CBC_CFC: /* CODEC consumer */
+ case SND_SOC_DAIFMT_BP_FP: /* CODEC consumer */
ct &= ~PSC_I2SCFG_MS; /* PSC I2S provider mode */
break;
default:
@@ -286,7 +286,8 @@ static const struct snd_soc_dai_driver au1xpsc_i2s_dai_template = {
};
static const struct snd_soc_component_driver au1xpsc_i2s_component = {
- .name = "au1xpsc-i2s",
+ .name = "au1xpsc-i2s",
+ .legacy_dai_naming = 1,
};
static int au1xpsc_i2s_drvprobe(struct platform_device *pdev)
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index e3fc4bee8cfd..f4d84774dac7 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -133,8 +133,8 @@ static void bcm2835_i2s_start_clock(struct bcm2835_i2s_dev *dev)
return;
switch (provider) {
- case SND_SOC_DAIFMT_CBC_CFC:
- case SND_SOC_DAIFMT_CBC_CFP:
+ case SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_BP_FC:
clk_prepare_enable(dev->clk);
dev->clk_prepared = true;
break;
@@ -385,12 +385,12 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
/* Check if CPU is bit clock provider */
switch (dev->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
- case SND_SOC_DAIFMT_CBC_CFP:
+ case SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_BP_FC:
bit_clock_provider = true;
break;
- case SND_SOC_DAIFMT_CBP_CFC:
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FP:
+ case SND_SOC_DAIFMT_BC_FC:
bit_clock_provider = false;
break;
default:
@@ -399,12 +399,12 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
/* Check if CPU is frame sync provider */
switch (dev->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
- case SND_SOC_DAIFMT_CBP_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_BC_FP:
frame_sync_provider = true;
break;
- case SND_SOC_DAIFMT_CBC_CFP:
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BP_FC:
+ case SND_SOC_DAIFMT_BC_FC:
frame_sync_provider = false;
break;
default:
@@ -821,7 +821,8 @@ static const struct regmap_config bcm2835_regmap_config = {
};
static const struct snd_soc_component_driver bcm2835_i2s_component = {
- .name = "bcm2835-i2s-comp",
+ .name = "bcm2835-i2s-comp",
+ .legacy_dai_naming = 1,
};
static int bcm2835_i2s_probe(struct platform_device *pdev)
diff --git a/sound/soc/bcm/bcm63xx-i2s-whistler.c b/sound/soc/bcm/bcm63xx-i2s-whistler.c
index 527caf430715..2da1384ffe91 100644
--- a/sound/soc/bcm/bcm63xx-i2s-whistler.c
+++ b/sound/soc/bcm/bcm63xx-i2s-whistler.c
@@ -218,6 +218,7 @@ static struct snd_soc_dai_driver bcm63xx_i2s_dai = {
static const struct snd_soc_component_driver bcm63xx_i2s_component = {
.name = "bcm63xx",
+ .legacy_dai_naming = 1,
};
static int bcm63xx_i2s_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/bcm/cygnus-ssp.c b/sound/soc/bcm/cygnus-ssp.c
index b0254e724ec9..2a92e33e1fbf 100644
--- a/sound/soc/bcm/cygnus-ssp.c
+++ b/sound/soc/bcm/cygnus-ssp.c
@@ -839,11 +839,11 @@ static int cygnus_ssp_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
ssp_newcfg = 0;
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
ssp_newcfg |= BIT(I2S_OUT_CFGX_SLAVE_MODE);
aio->is_slave = 1;
break;
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
ssp_newcfg &= ~BIT(I2S_OUT_CFGX_SLAVE_MODE);
aio->is_slave = 0;
break;
@@ -1191,9 +1191,10 @@ static const struct snd_soc_dai_driver cygnus_spdif_dai_info = {
static struct snd_soc_dai_driver cygnus_ssp_dai[CYGNUS_MAX_PORTS];
static const struct snd_soc_component_driver cygnus_ssp_component = {
- .name = "cygnus-audio",
- .suspend = cygnus_ssp_suspend,
- .resume = cygnus_ssp_resume,
+ .name = "cygnus-audio",
+ .suspend = cygnus_ssp_suspend,
+ .resume = cygnus_ssp_resume,
+ .legacy_dai_naming = 1,
};
/*
diff --git a/sound/soc/cirrus/ep93xx-ac97.c b/sound/soc/cirrus/ep93xx-ac97.c
index 16f9bb283b5c..37593abe6053 100644
--- a/sound/soc/cirrus/ep93xx-ac97.c
+++ b/sound/soc/cirrus/ep93xx-ac97.c
@@ -355,7 +355,8 @@ static struct snd_soc_dai_driver ep93xx_ac97_dai = {
};
static const struct snd_soc_component_driver ep93xx_ac97_component = {
- .name = "ep93xx-ac97",
+ .name = "ep93xx-ac97",
+ .legacy_dai_naming = 1,
};
static int ep93xx_ac97_probe(struct platform_device *pdev)
diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
index 2c8cd843d049..982151330c89 100644
--- a/sound/soc/cirrus/ep93xx-i2s.c
+++ b/sound/soc/cirrus/ep93xx-i2s.c
@@ -246,12 +246,12 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
}
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
/* CPU is provider */
clk_cfg |= EP93XX_I2S_CLKCFG_MASTER;
break;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
/* Codec is provider */
clk_cfg &= ~EP93XX_I2S_CLKCFG_MASTER;
break;
@@ -422,9 +422,10 @@ static struct snd_soc_dai_driver ep93xx_i2s_dai = {
};
static const struct snd_soc_component_driver ep93xx_i2s_component = {
- .name = "ep93xx-i2s",
- .suspend = ep93xx_i2s_suspend,
- .resume = ep93xx_i2s_resume,
+ .name = "ep93xx-i2s",
+ .suspend = ep93xx_i2s_suspend,
+ .resume = ep93xx_i2s_resume,
+ .legacy_dai_naming = 1,
};
static int ep93xx_i2s_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index c6043fa58c74..fc65283031cd 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1345,7 +1345,6 @@ static const struct snd_soc_component_driver soc_component_dev_pm860x = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int pm860x_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 6165db92a629..d16b4efb88a7 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -219,6 +219,7 @@ config SND_SOC_ALL_CODECS
imply SND_SOC_TAS2562
imply SND_SOC_TAS2764
imply SND_SOC_TAS2770
+ imply SND_SOC_TAS2780
imply SND_SOC_TAS5086
imply SND_SOC_TAS571X
imply SND_SOC_TAS5720
@@ -308,6 +309,7 @@ config SND_SOC_ALL_CODECS
imply SND_SOC_WM9712
imply SND_SOC_WM9713
imply SND_SOC_WSA881X
+ imply SND_SOC_WSA883X
imply SND_SOC_ZL38060
help
Normally ASoC codec drivers are only built if a machine driver which
@@ -937,6 +939,16 @@ config SND_SOC_HDAC_HDA
tristate
select SND_HDA
+config SND_SOC_HDA
+ tristate "HD-Audio codec driver"
+ select SND_HDA_EXT_CORE
+ select SND_HDA
+ help
+ This enables HD-Audio codec support in ASoC subsystem. Compared
+ to SND_SOC_HDAC_HDA, driver's behavior is identical to HD-Audio
+ legacy solution - including the dynamic resource allocation
+ based on actual codec capabilities.
+
config SND_SOC_ICS43432
tristate "ICS43423 and compatible i2s microphones"
@@ -1524,6 +1536,13 @@ config SND_SOC_TAS2770
tristate "Texas Instruments TAS2770 speaker amplifier"
depends on I2C
+config SND_SOC_TAS2780
+ tristate "Texas Instruments TAS2780 Mono Audio amplifier"
+ depends on I2C
+ help
+ Enable support for Texas Instruments TAS2780 high-efficiency
+ digital input mono Class-D audio power amplifiers.
+
config SND_SOC_TAS5086
tristate "Texas Instruments TAS5086 speaker amplifier"
depends on I2C
@@ -1975,6 +1994,15 @@ config SND_SOC_WSA881X
This enables support for Qualcomm WSA8810/WSA8815 Class-D
Smart Speaker Amplifier.
+config SND_SOC_WSA883X
+ tristate "WSA883X Codec"
+ depends on SOUNDWIRE
+ select REGMAP_SOUNDWIRE
+ tristate
+ help
+ This enables support for Qualcomm WSA8830/WSA8835 Class-D
+ Smart Speaker Amplifier.
+
config SND_SOC_ZL38060
tristate "Microsemi ZL38060 Connected Home Audio Processor"
depends on SPI_MASTER
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 28dc4edfd01f..92fd441d426a 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -106,6 +106,7 @@ snd-soc-es8328-spi-objs := es8328-spi.o
snd-soc-gtm601-objs := gtm601.o
snd-soc-hdac-hdmi-objs := hdac_hdmi.o
snd-soc-hdac-hda-objs := hdac_hda.o
+snd-soc-hda-codec-objs := hda.o hda-dai.o
snd-soc-ics43432-objs := ics43432.o
snd-soc-inno-rk3036-objs := inno_rk3036.o
snd-soc-isabelle-objs := isabelle.o
@@ -337,6 +338,7 @@ snd-soc-wm9712-objs := wm9712.o
snd-soc-wm9713-objs := wm9713.o
snd-soc-wm-hubs-objs := wm_hubs.o
snd-soc-wsa881x-objs := wsa881x.o
+snd-soc-wsa883x-objs := wsa883x.o
snd-soc-zl38060-objs := zl38060.o
# Amp
snd-soc-max9877-objs := max9877.o
@@ -346,6 +348,7 @@ snd-soc-tpa6130a2-objs := tpa6130a2.o
snd-soc-tas2552-objs := tas2552.o
snd-soc-tas2562-objs := tas2562.o
snd-soc-tas2764-objs := tas2764.o
+snd-soc-tas2780-objs := tas2780.o
# Mux
snd-soc-simple-mux-objs := simple-mux.o
@@ -458,6 +461,7 @@ obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o
obj-$(CONFIG_SND_SOC_GTM601) += snd-soc-gtm601.o
obj-$(CONFIG_SND_SOC_HDAC_HDMI) += snd-soc-hdac-hdmi.o
obj-$(CONFIG_SND_SOC_HDAC_HDA) += snd-soc-hdac-hda.o
+obj-$(CONFIG_SND_SOC_HDA) += snd-soc-hda-codec.o
obj-$(CONFIG_SND_SOC_ICS43432) += snd-soc-ics43432.o
obj-$(CONFIG_SND_SOC_INNO_RK3036) += snd-soc-inno-rk3036.o
obj-$(CONFIG_SND_SOC_ISABELLE) += snd-soc-isabelle.o
@@ -589,6 +593,7 @@ obj-$(CONFIG_SND_SOC_STI_SAS) += snd-soc-sti-sas.o
obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o
obj-$(CONFIG_SND_SOC_TAS2562) += snd-soc-tas2562.o
obj-$(CONFIG_SND_SOC_TAS2764) += snd-soc-tas2764.o
+obj-$(CONFIG_SND_SOC_TAS2780) += snd-soc-tas2780.o
obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o
obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o
obj-$(CONFIG_SND_SOC_TAS5720) += snd-soc-tas5720.o
@@ -688,6 +693,7 @@ obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o
obj-$(CONFIG_SND_SOC_WM_ADSP) += snd-soc-wm-adsp.o
obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
obj-$(CONFIG_SND_SOC_WSA881X) += snd-soc-wsa881x.o
+obj-$(CONFIG_SND_SOC_WSA883X) += snd-soc-wsa883x.o
obj-$(CONFIG_SND_SOC_ZL38060) += snd-soc-zl38060.o
# Amp
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index aefafb0b7b97..68342917419e 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -12,8 +12,6 @@
* Mikko Sarmanne <mikko.sarmanne@symbio.com>,
* Jarmo K. Kuronen <jarmo.kuronen@symbio.com>,
* for ST-Ericsson.
- *
- * License terms:
*/
#include <linux/kernel.h>
@@ -2525,7 +2523,6 @@ static const struct snd_soc_component_driver ab8500_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int ab8500_codec_driver_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ab8500-codec.h b/sound/soc/codecs/ab8500-codec.h
index 0ac87d0446c2..2a6f6409f1f8 100644
--- a/sound/soc/codecs/ab8500-codec.h
+++ b/sound/soc/codecs/ab8500-codec.h
@@ -11,8 +11,6 @@
* Mikko J. Lehto <mikko.lehto@symbio.com>,
* Mikko Sarmanne <mikko.sarmanne@symbio.com>,
* for ST-Ericsson.
- *
- * License terms:
*/
#ifndef AB8500_CODEC_REGISTERS_H
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c
index 6ad9c9443b5d..cc12052e1920 100644
--- a/sound/soc/codecs/ac97.c
+++ b/sound/soc/codecs/ac97.c
@@ -119,7 +119,6 @@ static const struct snd_soc_component_driver soc_component_dev_ac97 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int ac97_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index 29e1689da67f..2c64df96b5ce 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -332,7 +332,6 @@ static const struct snd_soc_component_driver soc_component_dev_ad1836 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct reg_default ad1836_reg_defaults[] = {
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index 30b98b4267e1..1d3c4d94b4ae 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -523,7 +523,6 @@ static const struct snd_soc_component_driver soc_component_dev_ad193x = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
const struct regmap_config ad193x_regmap_config = {
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 9fd2023da218..5e777d7fd5d9 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -302,7 +302,6 @@ static const struct snd_soc_component_driver soc_component_dev_ad1980 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int ad1980_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ad73311.c b/sound/soc/codecs/ad73311.c
index b98bf19f594e..f6090ac57e93 100644
--- a/sound/soc/codecs/ad73311.c
+++ b/sound/soc/codecs/ad73311.c
@@ -58,7 +58,6 @@ static const struct snd_soc_component_driver soc_component_dev_ad73311 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int ad73311_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index a9032b5c8d78..7f832d00ab17 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -1470,7 +1470,6 @@ static const struct snd_soc_component_driver adau1373_component_driver = {
.num_dapm_routes = ARRAY_SIZE(adau1373_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int adau1373_i2c_probe(struct i2c_client *client)
diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
index 98768e5300f0..135a7db7fcf9 100644
--- a/sound/soc/codecs/adau1701.c
+++ b/sound/soc/codecs/adau1701.c
@@ -772,7 +772,6 @@ static const struct snd_soc_component_driver adau1701_component_drv = {
.set_sysclk = adau1701_set_sysclk,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config adau1701_regmap = {
diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
index 8f887227981f..3ccc7acac205 100644
--- a/sound/soc/codecs/adau1761.c
+++ b/sound/soc/codecs/adau1761.c
@@ -930,7 +930,6 @@ static const struct snd_soc_component_driver adau1761_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
#define ADAU1761_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
diff --git a/sound/soc/codecs/adau1781.c b/sound/soc/codecs/adau1781.c
index 74dc3344b259..ff6be24863bf 100644
--- a/sound/soc/codecs/adau1781.c
+++ b/sound/soc/codecs/adau1781.c
@@ -439,7 +439,6 @@ static const struct snd_soc_component_driver adau1781_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
#define ADAU1781_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
diff --git a/sound/soc/codecs/adau1977.c b/sound/soc/codecs/adau1977.c
index 5fcbdf2ec313..7a9672f94fc6 100644
--- a/sound/soc/codecs/adau1977.c
+++ b/sound/soc/codecs/adau1977.c
@@ -876,7 +876,6 @@ static const struct snd_soc_component_driver adau1977_component_driver = {
.num_dapm_routes = ARRAY_SIZE(adau1977_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int adau1977_setup_micbias(struct adau1977 *adau1977)
diff --git a/sound/soc/codecs/adau7002.c b/sound/soc/codecs/adau7002.c
index 0e00de6ce3fb..401bafabc8eb 100644
--- a/sound/soc/codecs/adau7002.c
+++ b/sound/soc/codecs/adau7002.c
@@ -91,7 +91,6 @@ static const struct snd_soc_component_driver adau7002_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int adau7002_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/adau7118.c b/sound/soc/codecs/adau7118.c
index 841229dcbca1..bbb097249887 100644
--- a/sound/soc/codecs/adau7118.c
+++ b/sound/soc/codecs/adau7118.c
@@ -442,7 +442,6 @@ static const struct snd_soc_component_driver adau7118_component_driver = {
.num_dapm_widgets = ARRAY_SIZE(adau7118_widgets),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static void adau7118_regulator_disable(void *data)
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index 90f3a5e9e31f..fcff35f26cec 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -842,7 +842,6 @@ static const struct snd_soc_component_driver adav80x_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
int adav80x_bus_probe(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/ads117x.c b/sound/soc/codecs/ads117x.c
index 1d07e2699f04..44aa06e03486 100644
--- a/sound/soc/codecs/ads117x.c
+++ b/sound/soc/codecs/ads117x.c
@@ -62,7 +62,6 @@ static const struct snd_soc_component_driver soc_component_dev_ads117x = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int ads117x_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ak4104.c b/sound/soc/codecs/ak4104.c
index dc4747c77a7a..ce99f30b4613 100644
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -248,7 +248,6 @@ static const struct snd_soc_component_driver soc_component_device_ak4104 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ak4104_regmap = {
diff --git a/sound/soc/codecs/ak4118.c b/sound/soc/codecs/ak4118.c
index 5c4a78c16733..b6d9a10bdccd 100644
--- a/sound/soc/codecs/ak4118.c
+++ b/sound/soc/codecs/ak4118.c
@@ -342,7 +342,6 @@ static const struct snd_soc_component_driver soc_component_drv_ak4118 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ak4118_regmap = {
diff --git a/sound/soc/codecs/ak4375.c b/sound/soc/codecs/ak4375.c
index 9a7b662016b9..1ed004ba7cd2 100644
--- a/sound/soc/codecs/ak4375.c
+++ b/sound/soc/codecs/ak4375.c
@@ -473,7 +473,6 @@ static const struct snd_soc_component_driver soc_codec_dev_ak4375 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ak4375_regmap = {
diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c
index baa9ff5d0ce5..ea33cc83c86c 100644
--- a/sound/soc/codecs/ak4458.c
+++ b/sound/soc/codecs/ak4458.c
@@ -725,7 +725,6 @@ static const struct snd_soc_component_driver soc_codec_dev_ak4458 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_component_driver soc_codec_dev_ak4497 = {
@@ -740,7 +739,6 @@ static const struct snd_soc_component_driver soc_codec_dev_ak4497 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ak4458_regmap = {
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index cc803e730c6e..8c8c93eea704 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -402,7 +402,6 @@ static const struct snd_soc_component_driver soc_component_dev_ak4535 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int ak4535_i2c_probe(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/ak4554.c b/sound/soc/codecs/ak4554.c
index 8e60e2b56ad6..b9607de5a191 100644
--- a/sound/soc/codecs/ak4554.c
+++ b/sound/soc/codecs/ak4554.c
@@ -67,7 +67,6 @@ static const struct snd_soc_component_driver soc_component_dev_ak4554 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int ak4554_soc_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index 93606e5afd8f..f75c19ef3551 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -827,7 +827,6 @@ static const struct snd_soc_component_driver soc_component_dev_ak4613 = {
.num_dapm_routes = ARRAY_SIZE(ak4613_intercon),
.idle_bias_on = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static void ak4613_parse_of(struct ak4613_priv *priv,
@@ -921,18 +920,12 @@ static int ak4613_i2c_probe(struct i2c_client *i2c)
&ak4613_dai, 1);
}
-static int ak4613_i2c_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static struct i2c_driver ak4613_i2c_driver = {
.driver = {
.name = "ak4613-codec",
.of_match_table = ak4613_of_match,
},
.probe_new = ak4613_i2c_probe,
- .remove = ak4613_i2c_remove,
.id_table = ak4613_i2c_id,
};
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index d8d9cc712d67..88851e94b045 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -535,7 +535,6 @@ static const struct snd_soc_component_driver soc_component_dev_ak4641 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ak4641_regmap = {
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 3c20ff5595eb..914d5b1969f8 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -559,7 +559,6 @@ static const struct snd_soc_component_driver soc_component_dev_ak4642 = {
.num_dapm_routes = ARRAY_SIZE(ak4642_intercon),
.idle_bias_on = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ak4642_regmap = {
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 60edcbe56014..cd76765f8cc0 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -616,7 +616,6 @@ static const struct snd_soc_component_driver soc_component_dev_ak4671 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ak4671_regmap = {
diff --git a/sound/soc/codecs/ak5386.c b/sound/soc/codecs/ak5386.c
index c76bfff24602..0c5e00679c7d 100644
--- a/sound/soc/codecs/ak5386.c
+++ b/sound/soc/codecs/ak5386.c
@@ -77,7 +77,6 @@ static const struct snd_soc_component_driver soc_component_ak5386 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int ak5386_set_dai_fmt(struct snd_soc_dai *codec_dai,
diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
index c94cfde3e4a8..887d2c04d647 100644
--- a/sound/soc/codecs/ak5558.c
+++ b/sound/soc/codecs/ak5558.c
@@ -393,7 +393,6 @@ static const struct snd_soc_component_driver soc_codec_dev_ak5558 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_component_driver soc_codec_dev_ak5552 = {
@@ -408,7 +407,6 @@ static const struct snd_soc_component_driver soc_codec_dev_ak5552 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ak5558_regmap = {
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index 8e6235d2c544..9ef01b1dd294 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -956,7 +956,6 @@ static const struct snd_soc_component_driver soc_component_device_alc5623 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config alc5623_regmap = {
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index 641bdfddae16..a770704a4e17 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -1078,7 +1078,6 @@ static const struct snd_soc_component_driver soc_component_device_alc5632 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config alc5632_regmap = {
diff --git a/sound/soc/codecs/bd28623.c b/sound/soc/codecs/bd28623.c
index a6267cb86d86..82a94211d012 100644
--- a/sound/soc/codecs/bd28623.c
+++ b/sound/soc/codecs/bd28623.c
@@ -161,7 +161,6 @@ static const struct snd_soc_component_driver soc_codec_bd = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver soc_dai_bd = {
diff --git a/sound/soc/codecs/bt-sco.c b/sound/soc/codecs/bt-sco.c
index cf17b9741bd8..4086b6a53de8 100644
--- a/sound/soc/codecs/bt-sco.c
+++ b/sound/soc/codecs/bt-sco.c
@@ -69,7 +69,6 @@ static const struct snd_soc_component_driver soc_component_dev_bt_sco = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int bt_sco_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c
index ffdf8b615efa..4f9dabd9d78a 100644
--- a/sound/soc/codecs/cpcap.c
+++ b/sound/soc/codecs/cpcap.c
@@ -1660,7 +1660,6 @@ static struct snd_soc_component_driver soc_codec_dev_cpcap = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int cpcap_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 0aae5790222a..14403b76c724 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -126,7 +126,6 @@ static const struct snd_soc_component_driver soc_component_dev_cq93vc = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int cq93vc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c
index 8b0a9c788a26..11e7b3f6d410 100644
--- a/sound/soc/codecs/cros_ec_codec.c
+++ b/sound/soc/codecs/cros_ec_codec.c
@@ -995,6 +995,7 @@ static int cros_ec_codec_platform_probe(struct platform_device *pdev)
dev_dbg(dev, "ap_shm_phys_addr=%#llx len=%#x\n",
priv->ap_shm_phys_addr, priv->ap_shm_len);
}
+ of_node_put(node);
}
#endif
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index badfc55bc5fa..8ff6f66be86f 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -236,7 +236,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs35l32 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
/* Current and threshold powerup sequence Pg37 in datasheet */
diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
index 47dc0f6d90a2..082025fa0370 100644
--- a/sound/soc/codecs/cs35l33.c
+++ b/sound/soc/codecs/cs35l33.c
@@ -840,7 +840,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs35l33 = {
.num_dapm_routes = ARRAY_SIZE(cs35l33_audio_map),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config cs35l33_regmap = {
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
index 50d509a06071..472ac982779b 100644
--- a/sound/soc/codecs/cs35l34.c
+++ b/sound/soc/codecs/cs35l34.c
@@ -787,7 +787,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs35l34 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct regmap_config cs35l34_regmap = {
diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c
index 6b70afb70a67..714a759dca21 100644
--- a/sound/soc/codecs/cs35l35.c
+++ b/sound/soc/codecs/cs35l35.c
@@ -1087,7 +1087,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs35l35 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct regmap_config cs35l35_regmap = {
diff --git a/sound/soc/codecs/cs35l36.c b/sound/soc/codecs/cs35l36.c
index dfe85dc2cd20..4dc13e6f4874 100644
--- a/sound/soc/codecs/cs35l36.c
+++ b/sound/soc/codecs/cs35l36.c
@@ -1300,7 +1300,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs35l36 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct regmap_config cs35l36_regmap = {
diff --git a/sound/soc/codecs/cs35l41-lib.c b/sound/soc/codecs/cs35l41-lib.c
index 198cfe54a46f..04be71435491 100644
--- a/sound/soc/codecs/cs35l41-lib.c
+++ b/sound/soc/codecs/cs35l41-lib.c
@@ -1308,7 +1308,8 @@ int cs35l41_set_cspl_mbox_cmd(struct device *dev, struct regmap *regmap,
return 0;
}
- dev_err(dev, "Failed to set mailbox cmd %u (status %u)\n", cmd, sts);
+ if (cmd != CSPL_MBOX_CMD_OUT_OF_HIBERNATE)
+ dev_err(dev, "Failed to set mailbox cmd %u (status %u)\n", cmd, sts);
return -ENOMSG;
}
@@ -1327,6 +1328,85 @@ int cs35l41_write_fs_errata(struct device *dev, struct regmap *regmap)
}
EXPORT_SYMBOL_GPL(cs35l41_write_fs_errata);
+int cs35l41_enter_hibernate(struct device *dev, struct regmap *regmap,
+ enum cs35l41_boost_type b_type)
+{
+ if (!cs35l41_safe_reset(regmap, b_type)) {
+ dev_dbg(dev, "System does not support Suspend\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "Enter hibernate\n");
+ regmap_write(regmap, CS35L41_WAKESRC_CTL, 0x0088);
+ regmap_write(regmap, CS35L41_WAKESRC_CTL, 0x0188);
+
+ // Don't wait for ACK since bus activity would wake the device
+ regmap_write(regmap, CS35L41_DSP_VIRT1_MBOX_1, CSPL_MBOX_CMD_HIBERNATE);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs35l41_enter_hibernate);
+
+static void cs35l41_wait_for_pwrmgt_sts(struct device *dev, struct regmap *regmap)
+{
+ const int pwrmgt_retries = 10;
+ unsigned int sts;
+ int i, ret;
+
+ for (i = 0; i < pwrmgt_retries; i++) {
+ ret = regmap_read(regmap, CS35L41_PWRMGT_STS, &sts);
+ if (ret)
+ dev_err(dev, "Failed to read PWRMGT_STS: %d\n", ret);
+ else if (!(sts & CS35L41_WR_PEND_STS_MASK))
+ return;
+
+ udelay(20);
+ }
+
+ dev_err(dev, "Timed out reading PWRMGT_STS\n");
+}
+
+int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap)
+{
+ const int wake_retries = 20;
+ const int sleep_retries = 5;
+ int ret, i, j;
+
+ for (i = 0; i < sleep_retries; i++) {
+ dev_dbg(dev, "Exit hibernate\n");
+
+ for (j = 0; j < wake_retries; j++) {
+ ret = cs35l41_set_cspl_mbox_cmd(dev, regmap,
+ CSPL_MBOX_CMD_OUT_OF_HIBERNATE);
+ if (!ret)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (j < wake_retries) {
+ dev_dbg(dev, "Wake success at cycle: %d\n", j);
+ return 0;
+ }
+
+ dev_err(dev, "Wake failed, re-enter hibernate: %d\n", ret);
+
+ cs35l41_wait_for_pwrmgt_sts(dev, regmap);
+ regmap_write(regmap, CS35L41_WAKESRC_CTL, 0x0088);
+
+ cs35l41_wait_for_pwrmgt_sts(dev, regmap);
+ regmap_write(regmap, CS35L41_WAKESRC_CTL, 0x0188);
+
+ cs35l41_wait_for_pwrmgt_sts(dev, regmap);
+ regmap_write(regmap, CS35L41_PWRMGT_CTL, 0x3);
+ }
+
+ dev_err(dev, "Timed out waking device\n");
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(cs35l41_exit_hibernate);
+
MODULE_DESCRIPTION("CS35L41 library");
MODULE_AUTHOR("David Rhodes, Cirrus Logic Inc, <david.rhodes@cirrus.com>");
MODULE_AUTHOR("Lucas Tanure, Cirrus Logic Inc, <tanureal@opensource.cirrus.com>");
diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c
index 9e19c946a66b..5c8bb24909eb 100644
--- a/sound/soc/codecs/cs35l41-spi.c
+++ b/sound/soc/codecs/cs35l41-spi.c
@@ -74,6 +74,7 @@ MODULE_DEVICE_TABLE(of, cs35l41_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id cs35l41_acpi_match[] = {
{ "CSC3541", 0 }, /* Cirrus Logic PnP ID + part ID */
+ { "CLSA3541", 0 }, /* Cirrus Logic PnP ID + part ID */
{},
};
MODULE_DEVICE_TABLE(acpi, cs35l41_acpi_match);
diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
index 71ab2a5d1c55..c223d83e02cf 100644
--- a/sound/soc/codecs/cs35l41.c
+++ b/sound/soc/codecs/cs35l41.c
@@ -6,6 +6,7 @@
//
// Author: David Rhodes <david.rhodes@cirrus.com>
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -1142,6 +1143,30 @@ err_dsp:
return ret;
}
+static int cs35l41_acpi_get_name(struct cs35l41_private *cs35l41)
+{
+ acpi_handle handle = ACPI_HANDLE(cs35l41->dev);
+ const char *sub;
+
+ /* If there is no ACPI_HANDLE, there is no ACPI for this system, return 0 */
+ if (!handle)
+ return 0;
+
+ sub = acpi_get_subsystem_id(handle);
+ if (IS_ERR(sub)) {
+ /* If bad ACPI, return 0 and fallback to legacy firmware path, otherwise fail */
+ if (PTR_ERR(sub) == -ENODATA)
+ return 0;
+ else
+ return PTR_ERR(sub);
+ }
+
+ cs35l41->dsp.system_name = sub;
+ dev_dbg(cs35l41->dev, "Subsystem ID: %s\n", cs35l41->dsp.system_name);
+
+ return 0;
+}
+
int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *hw_cfg)
{
u32 regid, reg_revid, i, mtl_revid, int_status, chipid_match;
@@ -1270,6 +1295,10 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
goto err;
}
+ ret = cs35l41_acpi_get_name(cs35l41);
+ if (ret < 0)
+ goto err;
+
ret = cs35l41_dsp_init(cs35l41);
if (ret < 0)
goto err;
@@ -1316,6 +1345,7 @@ void cs35l41_remove(struct cs35l41_private *cs35l41)
pm_runtime_disable(cs35l41->dev);
regmap_write(cs35l41->regmap, CS35L41_IRQ1_MASK1, 0xFFFFFFFF);
+ kfree(cs35l41->dsp.system_name);
wm_adsp2_remove(&cs35l41->dsp);
cs35l41_safe_reset(cs35l41->regmap, cs35l41->hw_cfg.bst_type);
@@ -1335,15 +1365,7 @@ static int __maybe_unused cs35l41_runtime_suspend(struct device *dev)
if (!cs35l41->dsp.preloaded || !cs35l41->dsp.cs_dsp.running)
return 0;
- dev_dbg(cs35l41->dev, "Enter hibernate\n");
-
- cs35l41_safe_reset(cs35l41->regmap, cs35l41->hw_cfg.bst_type);
- regmap_write(cs35l41->regmap, CS35L41_WAKESRC_CTL, 0x0088);
- regmap_write(cs35l41->regmap, CS35L41_WAKESRC_CTL, 0x0188);
-
- // Don't wait for ACK since bus activity would wake the device
- regmap_write(cs35l41->regmap, CS35L41_DSP_VIRT1_MBOX_1,
- CSPL_MBOX_CMD_HIBERNATE);
+ cs35l41_enter_hibernate(dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type);
regcache_cache_only(cs35l41->regmap, true);
regcache_mark_dirty(cs35l41->regmap);
@@ -1351,65 +1373,6 @@ static int __maybe_unused cs35l41_runtime_suspend(struct device *dev)
return 0;
}
-static void cs35l41_wait_for_pwrmgt_sts(struct cs35l41_private *cs35l41)
-{
- const int pwrmgt_retries = 10;
- unsigned int sts;
- int i, ret;
-
- for (i = 0; i < pwrmgt_retries; i++) {
- ret = regmap_read(cs35l41->regmap, CS35L41_PWRMGT_STS, &sts);
- if (ret)
- dev_err(cs35l41->dev, "Failed to read PWRMGT_STS: %d\n", ret);
- else if (!(sts & CS35L41_WR_PEND_STS_MASK))
- return;
-
- udelay(20);
- }
-
- dev_err(cs35l41->dev, "Timed out reading PWRMGT_STS\n");
-}
-
-static int cs35l41_exit_hibernate(struct cs35l41_private *cs35l41)
-{
- const int wake_retries = 20;
- const int sleep_retries = 5;
- int ret, i, j;
-
- for (i = 0; i < sleep_retries; i++) {
- dev_dbg(cs35l41->dev, "Exit hibernate\n");
-
- for (j = 0; j < wake_retries; j++) {
- ret = cs35l41_set_cspl_mbox_cmd(cs35l41->dev, cs35l41->regmap,
- CSPL_MBOX_CMD_OUT_OF_HIBERNATE);
- if (!ret)
- break;
-
- usleep_range(100, 200);
- }
-
- if (j < wake_retries) {
- dev_dbg(cs35l41->dev, "Wake success at cycle: %d\n", j);
- return 0;
- }
-
- dev_err(cs35l41->dev, "Wake failed, re-enter hibernate: %d\n", ret);
-
- cs35l41_wait_for_pwrmgt_sts(cs35l41);
- regmap_write(cs35l41->regmap, CS35L41_WAKESRC_CTL, 0x0088);
-
- cs35l41_wait_for_pwrmgt_sts(cs35l41);
- regmap_write(cs35l41->regmap, CS35L41_WAKESRC_CTL, 0x0188);
-
- cs35l41_wait_for_pwrmgt_sts(cs35l41);
- regmap_write(cs35l41->regmap, CS35L41_PWRMGT_CTL, 0x3);
- }
-
- dev_err(cs35l41->dev, "Timed out waking device\n");
-
- return -ETIMEDOUT;
-}
-
static int __maybe_unused cs35l41_runtime_resume(struct device *dev)
{
struct cs35l41_private *cs35l41 = dev_get_drvdata(dev);
@@ -1422,7 +1385,7 @@ static int __maybe_unused cs35l41_runtime_resume(struct device *dev)
regcache_cache_only(cs35l41->regmap, false);
- ret = cs35l41_exit_hibernate(cs35l41);
+ ret = cs35l41_exit_hibernate(cs35l41->dev, cs35l41->regmap);
if (ret)
return ret;
diff --git a/sound/soc/codecs/cs35l45-i2c.c b/sound/soc/codecs/cs35l45-i2c.c
index 38a4dbc9e9fe..06c2ddffb9c5 100644
--- a/sound/soc/codecs/cs35l45-i2c.c
+++ b/sound/soc/codecs/cs35l45-i2c.c
@@ -40,7 +40,9 @@ static int cs35l45_i2c_remove(struct i2c_client *client)
{
struct cs35l45_private *cs35l45 = i2c_get_clientdata(client);
- return cs35l45_remove(cs35l45);
+ cs35l45_remove(cs35l45);
+
+ return 0;
}
static const struct of_device_id cs35l45_of_match[] = {
diff --git a/sound/soc/codecs/cs35l45.c b/sound/soc/codecs/cs35l45.c
index 2367c1a4c10e..d15b3b77c7eb 100644
--- a/sound/soc/codecs/cs35l45.c
+++ b/sound/soc/codecs/cs35l45.c
@@ -500,6 +500,8 @@ static const struct snd_soc_component_driver cs35l45_component = {
.num_controls = ARRAY_SIZE(cs35l45_controls),
.name = "cs35l45",
+
+ .endianness = 1,
};
static int __maybe_unused cs35l45_runtime_suspend(struct device *dev)
@@ -665,7 +667,7 @@ err:
}
EXPORT_SYMBOL_NS_GPL(cs35l45_probe, SND_SOC_CS35L45);
-int cs35l45_remove(struct cs35l45_private *cs35l45)
+void cs35l45_remove(struct cs35l45_private *cs35l45)
{
pm_runtime_disable(cs35l45->dev);
@@ -673,8 +675,6 @@ int cs35l45_remove(struct cs35l45_private *cs35l45)
regulator_disable(cs35l45->vdd_a);
/* VDD_BATT must be the last to power-off */
regulator_disable(cs35l45->vdd_batt);
-
- return 0;
}
EXPORT_SYMBOL_NS_GPL(cs35l45_remove, SND_SOC_CS35L45);
diff --git a/sound/soc/codecs/cs35l45.h b/sound/soc/codecs/cs35l45.h
index 4e266d19cd1c..53fe9d2b7b15 100644
--- a/sound/soc/codecs/cs35l45.h
+++ b/sound/soc/codecs/cs35l45.h
@@ -209,9 +209,9 @@ struct cs35l45_private {
extern const struct dev_pm_ops cs35l45_pm_ops;
extern const struct regmap_config cs35l45_i2c_regmap;
extern const struct regmap_config cs35l45_spi_regmap;
-int cs35l45_apply_patch(struct cs35l45_private *cs43l45);
+int cs35l45_apply_patch(struct cs35l45_private *cs35l45);
unsigned int cs35l45_get_clk_freq_id(unsigned int freq);
int cs35l45_probe(struct cs35l45_private *cs35l45);
-int cs35l45_remove(struct cs35l45_private *cs35l45);
+void cs35l45_remove(struct cs35l45_private *cs35l45);
#endif /* CS35L45_H */
diff --git a/sound/soc/codecs/cs4234.c b/sound/soc/codecs/cs4234.c
index 881c5ba70c0e..b49a3cf21ebe 100644
--- a/sound/soc/codecs/cs4234.c
+++ b/sound/soc/codecs/cs4234.c
@@ -660,7 +660,6 @@ static const struct snd_soc_component_driver soc_component_cs4234 = {
.controls = cs4234_snd_controls,
.num_controls = ARRAY_SIZE(cs4234_snd_controls),
.set_bias_level = cs4234_set_bias_level,
- .non_legacy_dai_naming = 1,
.idle_bias_on = 1,
.suspend_bias_off = 1,
.endianness = 1,
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 86bfa8d5ec78..76c19802d5fe 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -553,7 +553,6 @@ static const struct snd_soc_component_driver soc_component_cs4265 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config cs4265_regmap = {
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 531f63b01554..ba67e43edf35 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -619,7 +619,6 @@ static const struct snd_soc_component_driver soc_component_device_cs4270 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
/*
@@ -663,7 +662,6 @@ static int cs4270_i2c_remove(struct i2c_client *i2c_client)
/**
* cs4270_i2c_probe - initialize the I2C interface of the CS4270
* @i2c_client: the I2C client object
- * @id: the I2C device ID (ignored)
*
* This function is called whenever the I2C subsystem finds a device that
* matches the device ID given via a prior call to i2c_add_driver().
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 7663f89ac6a2..2021cf442606 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -642,7 +642,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs4271 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int cs4271_common_probe(struct device *dev,
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
index 4fade2388797..d545a593a251 100644
--- a/sound/soc/codecs/cs42l42.c
+++ b/sound/soc/codecs/cs42l42.c
@@ -581,7 +581,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs42l42 = {
.num_controls = ARRAY_SIZE(cs42l42_snd_controls),
.idle_bias_on = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
/* Switch to SCLK. Atomic delay after the write to allow the switch to complete. */
@@ -1701,8 +1700,7 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
break;
default:
- if (cs42l42->plug_state != CS42L42_TS_TRANS)
- cs42l42->plug_state = CS42L42_TS_TRANS;
+ cs42l42->plug_state = CS42L42_TS_TRANS;
}
}
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index 0e933181b5db..51721edd8f53 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -600,7 +600,6 @@ static const struct snd_soc_component_driver soc_component_device_cs42l51 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static bool cs42l51_writeable_reg(struct device *dev, unsigned int reg)
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 10e696406a71..90bf535fc5a5 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -1061,7 +1061,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs42l52 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
/* Current and threshold powerup sequence Pg37 */
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 510c94265b1f..03e2540a0ba1 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1114,7 +1114,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs42l56 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config cs42l56_regmap = {
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 5a9166289f36..0a146319755a 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1256,7 +1256,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs42l73 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config cs42l73_regmap = {
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index 5d6ef660f851..d14eb2f6e1dd 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -497,7 +497,6 @@ static const struct snd_soc_component_driver cs42xx8_driver = {
.num_dapm_routes = ARRAY_SIZE(cs42xx8_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
const struct cs42xx8_driver_data cs42448_data = {
diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
index a2bce0f9f247..ca4d47cc9c91 100644
--- a/sound/soc/codecs/cs43130.c
+++ b/sound/soc/codecs/cs43130.c
@@ -2345,7 +2345,6 @@ static struct snd_soc_component_driver soc_component_dev_cs43130 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config cs43130_regmap = {
diff --git a/sound/soc/codecs/cs4341.c b/sound/soc/codecs/cs4341.c
index 8ac043f1aae0..ac1696034846 100644
--- a/sound/soc/codecs/cs4341.c
+++ b/sound/soc/codecs/cs4341.c
@@ -202,7 +202,6 @@ static const struct snd_soc_component_driver soc_component_cs4341 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id __maybe_unused cs4341_dt_ids[] = {
diff --git a/sound/soc/codecs/cs4349.c b/sound/soc/codecs/cs4349.c
index 7069e9b54857..f7c5c2fd4304 100644
--- a/sound/soc/codecs/cs4349.c
+++ b/sound/soc/codecs/cs4349.c
@@ -260,7 +260,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs4349 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config cs4349_regmap = {
diff --git a/sound/soc/codecs/cs47l15.c b/sound/soc/codecs/cs47l15.c
index 1c7d52bef893..06c4214382e3 100644
--- a/sound/soc/codecs/cs47l15.c
+++ b/sound/soc/codecs/cs47l15.c
@@ -1356,7 +1356,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l15 = {
.num_dapm_routes = ARRAY_SIZE(cs47l15_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int cs47l15_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 6356f81aafc5..f9a2b865d717 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1203,7 +1203,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l24 = {
.num_dapm_routes = ARRAY_SIZE(cs47l24_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int cs47l24_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cs47l35.c b/sound/soc/codecs/cs47l35.c
index db2f844b8b17..c1032d6c9143 100644
--- a/sound/soc/codecs/cs47l35.c
+++ b/sound/soc/codecs/cs47l35.c
@@ -1638,7 +1638,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l35 = {
.num_dapm_routes = ARRAY_SIZE(cs47l35_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int cs47l35_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cs47l85.c b/sound/soc/codecs/cs47l85.c
index d4fedc5ad516..215d8211aa59 100644
--- a/sound/soc/codecs/cs47l85.c
+++ b/sound/soc/codecs/cs47l85.c
@@ -2582,7 +2582,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l85 = {
.num_dapm_routes = ARRAY_SIZE(cs47l85_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int cs47l85_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cs47l90.c b/sound/soc/codecs/cs47l90.c
index 5aec937a2462..1ad6526c7871 100644
--- a/sound/soc/codecs/cs47l90.c
+++ b/sound/soc/codecs/cs47l90.c
@@ -2497,7 +2497,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l90 = {
.num_dapm_routes = ARRAY_SIZE(cs47l90_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int cs47l90_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cs47l92.c b/sound/soc/codecs/cs47l92.c
index 444026b7d54b..fe576d64e089 100644
--- a/sound/soc/codecs/cs47l92.c
+++ b/sound/soc/codecs/cs47l92.c
@@ -1964,7 +1964,6 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l92 = {
.num_dapm_routes = ARRAY_SIZE(cs47l92_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int cs47l92_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c
index 360ca2ffd506..8796d8e84b7a 100644
--- a/sound/soc/codecs/cs53l30.c
+++ b/sound/soc/codecs/cs53l30.c
@@ -899,7 +899,6 @@ static const struct snd_soc_component_driver cs53l30_driver = {
.num_dapm_routes = ARRAY_SIZE(cs53l30_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct regmap_config cs53l30_regmap = {
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 1af0bf5f1e2f..43c0cac0ec9e 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -411,7 +411,6 @@ static const struct snd_soc_component_driver cx20442_component_dev = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int cx20442_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/cx2072x.c b/sound/soc/codecs/cx2072x.c
index b35debb5818d..b6667e8a6099 100644
--- a/sound/soc/codecs/cx2072x.c
+++ b/sound/soc/codecs/cx2072x.c
@@ -710,22 +710,19 @@ static int cx2072x_config_i2spcm(struct cx2072x_priv *cx2072x)
regdbt2.ulval = 0xac;
- /* set master/slave */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
reg2.r.tx_master = 1;
reg3.r.rx_master = 1;
- dev_dbg(dev, "Sets Master mode\n");
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBC_CFC:
reg2.r.tx_master = 0;
reg3.r.rx_master = 0;
- dev_dbg(dev, "Sets Slave mode\n");
break;
default:
- dev_err(dev, "Unsupported DAI master mode\n");
+ dev_err(dev, "Unsupported DAI clocking mode\n");
return -EINVAL;
}
@@ -1009,9 +1006,9 @@ static int cx2072x_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
dev_dbg(dev, "set_dai_fmt- %08x\n", fmt);
/* set master/slave */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index 3fa3042e4424..f838466bfebf 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -1173,7 +1173,6 @@ static const struct snd_soc_component_driver soc_component_dev_da7210 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
#if IS_ENABLED(CONFIG_I2C)
@@ -1335,6 +1334,8 @@ static int __init da7210_modinit(void)
int ret = 0;
#if IS_ENABLED(CONFIG_I2C)
ret = i2c_add_driver(&da7210_i2c_driver);
+ if (ret)
+ return ret;
#endif
#if defined(CONFIG_SPI_MASTER)
ret = spi_register_driver(&da7210_spi_driver);
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 2e645dc60eda..544ccbcfc884 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1922,7 +1922,6 @@ static const struct snd_soc_component_driver soc_component_dev_da7213 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config da7213_regmap_config = {
diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
index a5d7c350a3de..91372909d184 100644
--- a/sound/soc/codecs/da7218.c
+++ b/sound/soc/codecs/da7218.c
@@ -3040,7 +3040,6 @@ static const struct snd_soc_component_driver soc_component_dev_da7218 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 7fdef38ed8cd..50ecf30e6136 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -2647,7 +2647,6 @@ static const struct snd_soc_component_driver soc_component_dev_da7219 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
@@ -2693,11 +2692,6 @@ static int da7219_i2c_probe(struct i2c_client *i2c)
return ret;
}
-static int da7219_i2c_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static const struct i2c_device_id da7219_i2c_id[] = {
{ "da7219", },
{ }
@@ -2711,7 +2705,6 @@ static struct i2c_driver da7219_i2c_driver = {
.acpi_match_table = ACPI_PTR(da7219_acpi_match),
},
.probe_new = da7219_i2c_probe,
- .remove = da7219_i2c_remove,
.id_table = da7219_i2c_id,
};
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index f14cddf23f42..2c5b0b74201c 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -1503,7 +1503,6 @@ static const struct snd_soc_component_driver soc_component_dev_da732x = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int da732x_i2c_probe(struct i2c_client *i2c)
@@ -1546,11 +1545,6 @@ err:
return ret;
}
-static int da732x_i2c_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static const struct i2c_device_id da732x_i2c_id[] = {
{ "da7320", 0},
{ }
@@ -1562,7 +1556,6 @@ static struct i2c_driver da732x_i2c_driver = {
.name = "da7320",
},
.probe_new = da732x_i2c_probe,
- .remove = da732x_i2c_remove,
.id_table = da732x_i2c_id,
};
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index 9d8c8adc5d76..28043b4530df 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -1460,7 +1460,6 @@ static const struct snd_soc_component_driver soc_component_dev_da9055 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config da9055_regmap_config = {
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index d1a30ca4571a..4fd6f97e5a49 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -140,7 +140,6 @@ static const struct snd_soc_component_driver soc_dmic = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int dmic_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/es7134.c b/sound/soc/codecs/es7134.c
index f443351677df..f5150d2f95da 100644
--- a/sound/soc/codecs/es7134.c
+++ b/sound/soc/codecs/es7134.c
@@ -213,7 +213,6 @@ static const struct snd_soc_component_driver es7134_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver es7154_dai = {
diff --git a/sound/soc/codecs/es7241.c b/sound/soc/codecs/es7241.c
index 0baa86241cf9..339553cfbb48 100644
--- a/sound/soc/codecs/es7241.c
+++ b/sound/soc/codecs/es7241.c
@@ -232,7 +232,6 @@ static const struct snd_soc_component_driver es7241_component_driver = {
.num_dapm_routes = ARRAY_SIZE(es7241_dapm_routes),
.idle_bias_on = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static void es7241_parse_fmt(struct device *dev, struct es7241_data *priv)
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index 4407166bb338..de7185f73e1e 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -401,10 +401,8 @@ static int es8316_set_dai_fmt(struct snd_soc_dai *codec_dai,
u8 clksw;
u8 mask;
- if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) != SND_SOC_DAIFMT_CBC_CFC) {
- dev_err(component->dev, "Codec driver only supports consumer mode\n");
- return -EINVAL;
- }
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBP_CFP)
+ serdata1 |= ES8316_SERDATA1_MASTER;
if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_I2S) {
dev_err(component->dev, "Codec driver only supports I2S format\n");
@@ -464,6 +462,8 @@ static int es8316_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_component *component = dai->component;
struct es8316_priv *es8316 = snd_soc_component_get_drvdata(component);
u8 wordlen = 0;
+ u8 bclk_divider;
+ u16 lrck_divider;
int i;
/* Validate supported sample rates that are autodetected from MCLK */
@@ -477,19 +477,24 @@ static int es8316_pcm_hw_params(struct snd_pcm_substream *substream,
}
if (i == NR_SUPPORTED_MCLK_LRCK_RATIOS)
return -EINVAL;
-
+ lrck_divider = es8316->sysclk / params_rate(params);
+ bclk_divider = lrck_divider / 4;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
wordlen = ES8316_SERDATA2_LEN_16;
+ bclk_divider /= 16;
break;
case SNDRV_PCM_FORMAT_S20_3LE:
wordlen = ES8316_SERDATA2_LEN_20;
+ bclk_divider /= 20;
break;
case SNDRV_PCM_FORMAT_S24_LE:
wordlen = ES8316_SERDATA2_LEN_24;
+ bclk_divider /= 24;
break;
case SNDRV_PCM_FORMAT_S32_LE:
wordlen = ES8316_SERDATA2_LEN_32;
+ bclk_divider /= 32;
break;
default:
return -EINVAL;
@@ -499,6 +504,11 @@ static int es8316_pcm_hw_params(struct snd_pcm_substream *substream,
ES8316_SERDATA2_LEN_MASK, wordlen);
snd_soc_component_update_bits(component, ES8316_SERDATA_ADC,
ES8316_SERDATA2_LEN_MASK, wordlen);
+ snd_soc_component_update_bits(component, ES8316_SERDATA1, 0x1f, bclk_divider);
+ snd_soc_component_update_bits(component, ES8316_CLKMGR_ADCDIV1, 0x0f, lrck_divider >> 8);
+ snd_soc_component_update_bits(component, ES8316_CLKMGR_ADCDIV2, 0xff, lrck_divider & 0xff);
+ snd_soc_component_update_bits(component, ES8316_CLKMGR_DACDIV1, 0x0f, lrck_divider >> 8);
+ snd_soc_component_update_bits(component, ES8316_CLKMGR_DACDIV2, 0xff, lrck_divider & 0xff);
return 0;
}
@@ -769,7 +779,6 @@ static const struct snd_soc_component_driver soc_component_dev_es8316 = {
.num_dapm_routes = ARRAY_SIZE(es8316_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_range es8316_volatile_ranges[] = {
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index dd53dfd87b04..160adc706cc6 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -844,7 +844,6 @@ static const struct snd_soc_component_driver es8328_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
int es8328_probe(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/gtm601.c b/sound/soc/codecs/gtm601.c
index e1235e695b0f..c6b1e77ffccd 100644
--- a/sound/soc/codecs/gtm601.c
+++ b/sound/soc/codecs/gtm601.c
@@ -73,7 +73,6 @@ static const struct snd_soc_component_driver soc_component_dev_gtm601 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int gtm601_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/hda-dai.c b/sound/soc/codecs/hda-dai.c
new file mode 100644
index 000000000000..5371ff086261
--- /dev/null
+++ b/sound/soc/codecs/hda-dai.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include <sound/soc.h>
+#include <sound/hda_codec.h>
+#include "hda.h"
+
+static int hda_codec_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+ struct hda_pcm_stream *stream_info;
+ struct hda_codec *codec;
+ struct hda_pcm *pcm;
+ int ret;
+
+ codec = dev_to_hda_codec(dai->dev);
+ stream_info = snd_soc_dai_get_dma_data(dai, substream);
+ pcm = container_of(stream_info, struct hda_pcm, stream[substream->stream]);
+
+ dev_dbg(dai->dev, "open stream codec: %08x, info: %p, pcm: %p %s substream: %p\n",
+ codec->core.vendor_id, stream_info, pcm, pcm->name, substream);
+
+ snd_hda_codec_pcm_get(pcm);
+
+ ret = stream_info->ops.open(stream_info, codec, substream);
+ if (ret < 0) {
+ dev_err(dai->dev, "codec open failed: %d\n", ret);
+ snd_hda_codec_pcm_put(pcm);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hda_codec_dai_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+ struct hda_pcm_stream *stream_info;
+ struct hda_codec *codec;
+ struct hda_pcm *pcm;
+ int ret;
+
+ codec = dev_to_hda_codec(dai->dev);
+ stream_info = snd_soc_dai_get_dma_data(dai, substream);
+ pcm = container_of(stream_info, struct hda_pcm, stream[substream->stream]);
+
+ dev_dbg(dai->dev, "close stream codec: %08x, info: %p, pcm: %p %s substream: %p\n",
+ codec->core.vendor_id, stream_info, pcm, pcm->name, substream);
+
+ ret = stream_info->ops.close(stream_info, codec, substream);
+ if (ret < 0)
+ dev_err(dai->dev, "codec close failed: %d\n", ret);
+
+ snd_hda_codec_pcm_put(pcm);
+}
+
+static int hda_codec_dai_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+ struct hda_pcm_stream *stream_info;
+ struct hda_codec *codec;
+
+ codec = dev_to_hda_codec(dai->dev);
+ stream_info = snd_soc_dai_get_dma_data(dai, substream);
+
+ snd_hda_codec_cleanup(codec, stream_info, substream);
+
+ return 0;
+}
+
+static int hda_codec_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct hda_pcm_stream *stream_info;
+ struct hdac_stream *stream;
+ struct hda_codec *codec;
+ unsigned int format;
+ int ret;
+
+ codec = dev_to_hda_codec(dai->dev);
+ stream = substream->runtime->private_data;
+ stream_info = snd_soc_dai_get_dma_data(dai, substream);
+ format = snd_hdac_calc_stream_format(runtime->rate, runtime->channels, runtime->format,
+ runtime->sample_bits, 0);
+
+ ret = snd_hda_codec_prepare(codec, stream_info, stream->stream_tag, format, substream);
+ if (ret < 0) {
+ dev_err(dai->dev, "codec prepare failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct snd_soc_dai_ops snd_soc_hda_codec_dai_ops = {
+ .startup = hda_codec_dai_startup,
+ .shutdown = hda_codec_dai_shutdown,
+ .hw_free = hda_codec_dai_hw_free,
+ .prepare = hda_codec_dai_prepare,
+};
+EXPORT_SYMBOL_GPL(snd_soc_hda_codec_dai_ops);
diff --git a/sound/soc/codecs/hda.c b/sound/soc/codecs/hda.c
new file mode 100644
index 000000000000..ad20a3dff9b7
--- /dev/null
+++ b/sound/soc/codecs/hda.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_i915.h>
+#include <sound/hda_codec.h>
+#include "hda.h"
+
+static int hda_codec_create_dais(struct hda_codec *codec, int pcm_count,
+ struct snd_soc_dai_driver **drivers)
+{
+ struct device *dev = &codec->core.dev;
+ struct snd_soc_dai_driver *drvs;
+ struct hda_pcm *pcm;
+ int i;
+
+ drvs = devm_kcalloc(dev, pcm_count, sizeof(*drvs), GFP_KERNEL);
+ if (!drvs)
+ return -ENOMEM;
+
+ pcm = list_first_entry(&codec->pcm_list_head, struct hda_pcm, list);
+
+ for (i = 0; i < pcm_count; i++, pcm = list_next_entry(pcm, list)) {
+ struct snd_soc_pcm_stream *stream;
+ int dir;
+
+ dev_info(dev, "creating for %s %d\n", pcm->name, i);
+ drvs[i].id = i;
+ drvs[i].name = pcm->name;
+ drvs[i].ops = &snd_soc_hda_codec_dai_ops;
+
+ dir = SNDRV_PCM_STREAM_PLAYBACK;
+ stream = &drvs[i].playback;
+ if (!pcm->stream[dir].substreams) {
+ dev_info(dev, "skipping playback dai for %s\n", pcm->name);
+ goto capture_dais;
+ }
+
+ stream->stream_name =
+ devm_kasprintf(dev, GFP_KERNEL, "%s %s", pcm->name,
+ snd_pcm_direction_name(dir));
+ if (!stream->stream_name)
+ return -ENOMEM;
+ stream->channels_min = pcm->stream[dir].channels_min;
+ stream->channels_max = pcm->stream[dir].channels_max;
+ stream->rates = pcm->stream[dir].rates;
+ stream->formats = pcm->stream[dir].formats;
+ stream->sig_bits = pcm->stream[dir].maxbps;
+
+capture_dais:
+ dir = SNDRV_PCM_STREAM_CAPTURE;
+ stream = &drvs[i].capture;
+ if (!pcm->stream[dir].substreams) {
+ dev_info(dev, "skipping capture dai for %s\n", pcm->name);
+ continue;
+ }
+
+ stream->stream_name =
+ devm_kasprintf(dev, GFP_KERNEL, "%s %s", pcm->name,
+ snd_pcm_direction_name(dir));
+ if (!stream->stream_name)
+ return -ENOMEM;
+ stream->channels_min = pcm->stream[dir].channels_min;
+ stream->channels_max = pcm->stream[dir].channels_max;
+ stream->rates = pcm->stream[dir].rates;
+ stream->formats = pcm->stream[dir].formats;
+ stream->sig_bits = pcm->stream[dir].maxbps;
+ }
+
+ *drivers = drvs;
+ return 0;
+}
+
+static int hda_codec_register_dais(struct hda_codec *codec, struct snd_soc_component *component)
+{
+ struct snd_soc_dai_driver *drvs = NULL;
+ struct snd_soc_dapm_context *dapm;
+ struct hda_pcm *pcm;
+ int ret, pcm_count = 0;
+
+ if (list_empty(&codec->pcm_list_head))
+ return -EINVAL;
+ list_for_each_entry(pcm, &codec->pcm_list_head, list)
+ pcm_count++;
+
+ ret = hda_codec_create_dais(codec, pcm_count, &drvs);
+ if (ret < 0)
+ return ret;
+
+ dapm = snd_soc_component_get_dapm(component);
+
+ list_for_each_entry(pcm, &codec->pcm_list_head, list) {
+ struct snd_soc_dai *dai;
+
+ dai = snd_soc_register_dai(component, drvs, false);
+ if (!dai) {
+ dev_err(component->dev, "register dai for %s failed\n", pcm->name);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
+ if (ret < 0) {
+ dev_err(component->dev, "create widgets failed: %d\n", ret);
+ snd_soc_unregister_dai(dai);
+ return ret;
+ }
+
+ snd_soc_dai_init_dma_data(dai, &pcm->stream[0], &pcm->stream[1]);
+ drvs++;
+ }
+
+ return 0;
+}
+
+static void hda_codec_unregister_dais(struct hda_codec *codec,
+ struct snd_soc_component *component)
+{
+ struct snd_soc_dai *dai, *save;
+ struct hda_pcm *pcm;
+
+ for_each_component_dais_safe(component, dai, save) {
+ list_for_each_entry(pcm, &codec->pcm_list_head, list) {
+ if (strcmp(dai->driver->name, pcm->name))
+ continue;
+
+ if (dai->playback_widget)
+ snd_soc_dapm_free_widget(dai->playback_widget);
+ if (dai->capture_widget)
+ snd_soc_dapm_free_widget(dai->capture_widget);
+ snd_soc_unregister_dai(dai);
+ break;
+ }
+ }
+}
+
+int hda_codec_probe_complete(struct hda_codec *codec)
+{
+ struct hdac_device *hdev = &codec->core;
+ struct hdac_bus *bus = hdev->bus;
+ int ret;
+
+ ret = snd_hda_codec_build_controls(codec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "unable to create controls %d\n", ret);
+ goto out;
+ }
+
+ /* Bus suspended codecs as it does not manage their pm */
+ pm_runtime_set_active(&hdev->dev);
+ /* rpm was forbidden in snd_hda_codec_device_new() */
+ snd_hda_codec_set_power_save(codec, 2000);
+ snd_hda_codec_register(codec);
+out:
+ /* Complement pm_runtime_get_sync(bus) in probe */
+ pm_runtime_mark_last_busy(bus->dev);
+ pm_runtime_put_autosuspend(bus->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hda_codec_probe_complete);
+
+/* Expects codec with usage_count=1 and status=suspended */
+static int hda_codec_probe(struct snd_soc_component *component)
+{
+ struct hda_codec *codec = dev_to_hda_codec(component->dev);
+ struct hdac_device *hdev = &codec->core;
+ struct hdac_bus *bus = hdev->bus;
+ struct hdac_ext_link *hlink;
+ hda_codec_patch_t patch;
+ int ret;
+
+#ifdef CONFIG_PM
+ WARN_ON(atomic_read(&hdev->dev.power.usage_count) != 1 ||
+ !pm_runtime_status_suspended(&hdev->dev));
+#endif
+
+ hlink = snd_hdac_ext_bus_link_at(bus, hdev->addr);
+ if (!hlink) {
+ dev_err(&hdev->dev, "hdac link not found\n");
+ return -EIO;
+ }
+
+ pm_runtime_get_sync(bus->dev);
+ if (hda_codec_is_display(codec))
+ snd_hdac_display_power(bus, hdev->addr, true);
+ snd_hdac_ext_bus_link_get(bus, hlink);
+
+ ret = snd_hda_codec_device_new(codec->bus, component->card->snd_card, hdev->addr, codec,
+ false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "create hda codec failed: %d\n", ret);
+ goto device_new_err;
+ }
+
+ ret = snd_hda_codec_set_name(codec, codec->preset->name);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "name failed %s\n", codec->preset->name);
+ goto err;
+ }
+
+ ret = snd_hdac_regmap_init(&codec->core);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "regmap init failed\n");
+ goto err;
+ }
+
+ patch = (hda_codec_patch_t)codec->preset->driver_data;
+ if (!patch) {
+ dev_err(&hdev->dev, "no patch specified?\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = patch(codec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "patch failed %d\n", ret);
+ goto err;
+ }
+
+ /* configure codec for 1:1 PCM:DAI mapping */
+ codec->mst_no_extra_pcms = 1;
+
+ ret = snd_hda_codec_parse_pcms(codec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "unable to map pcms to dai %d\n", ret);
+ goto parse_pcms_err;
+ }
+
+ ret = hda_codec_register_dais(codec, component);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "update dais failed: %d\n", ret);
+ goto parse_pcms_err;
+ }
+
+ if (!hda_codec_is_display(codec)) {
+ ret = hda_codec_probe_complete(codec);
+ if (ret < 0)
+ goto complete_err;
+ }
+
+ codec->core.lazy_cache = true;
+
+ return 0;
+
+complete_err:
+ hda_codec_unregister_dais(codec, component);
+parse_pcms_err:
+ if (codec->patch_ops.free)
+ codec->patch_ops.free(codec);
+err:
+ snd_hda_codec_cleanup_for_unbind(codec);
+device_new_err:
+ if (hda_codec_is_display(codec))
+ snd_hdac_display_power(bus, hdev->addr, false);
+
+ snd_hdac_ext_bus_link_put(bus, hlink);
+
+ pm_runtime_mark_last_busy(bus->dev);
+ pm_runtime_put_autosuspend(bus->dev);
+ return ret;
+}
+
+/* Leaves codec with usage_count=1 and status=suspended */
+static void hda_codec_remove(struct snd_soc_component *component)
+{
+ struct hda_codec *codec = dev_to_hda_codec(component->dev);
+ struct hdac_device *hdev = &codec->core;
+ struct hdac_bus *bus = hdev->bus;
+ struct hdac_ext_link *hlink;
+ bool was_registered = codec->core.registered;
+
+ /* Don't allow any more runtime suspends */
+ pm_runtime_forbid(&hdev->dev);
+
+ hda_codec_unregister_dais(codec, component);
+
+ if (codec->patch_ops.free)
+ codec->patch_ops.free(codec);
+
+ snd_hda_codec_cleanup_for_unbind(codec);
+ pm_runtime_put_noidle(&hdev->dev);
+ /* snd_hdac_device_exit() is only called on bus remove */
+ pm_runtime_set_suspended(&hdev->dev);
+
+ if (hda_codec_is_display(codec))
+ snd_hdac_display_power(bus, hdev->addr, false);
+
+ hlink = snd_hdac_ext_bus_link_at(bus, hdev->addr);
+ if (hlink)
+ snd_hdac_ext_bus_link_put(bus, hlink);
+ /*
+ * HDMI card's hda_codec_probe_complete() (see late_probe()) may
+ * not be called due to early error, leaving bus uc unbalanced
+ */
+ if (!was_registered) {
+ pm_runtime_mark_last_busy(bus->dev);
+ pm_runtime_put_autosuspend(bus->dev);
+ }
+
+#ifdef CONFIG_PM
+ WARN_ON(atomic_read(&hdev->dev.power.usage_count) != 1 ||
+ !pm_runtime_status_suspended(&hdev->dev));
+#endif
+}
+
+static const struct snd_soc_dapm_route hda_dapm_routes[] = {
+ {"AIF1TX", NULL, "Codec Input Pin1"},
+ {"AIF2TX", NULL, "Codec Input Pin2"},
+ {"AIF3TX", NULL, "Codec Input Pin3"},
+
+ {"Codec Output Pin1", NULL, "AIF1RX"},
+ {"Codec Output Pin2", NULL, "AIF2RX"},
+ {"Codec Output Pin3", NULL, "AIF3RX"},
+};
+
+static const struct snd_soc_dapm_widget hda_dapm_widgets[] = {
+ /* Audio Interface */
+ SND_SOC_DAPM_AIF_IN("AIF1RX", "Analog Codec Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF2RX", "Digital Codec Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF3RX", "Alt Analog Codec Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX", "Analog Codec Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF2TX", "Digital Codec Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF3TX", "Alt Analog Codec Capture", 0, SND_SOC_NOPM, 0, 0),
+
+ /* Input Pins */
+ SND_SOC_DAPM_INPUT("Codec Input Pin1"),
+ SND_SOC_DAPM_INPUT("Codec Input Pin2"),
+ SND_SOC_DAPM_INPUT("Codec Input Pin3"),
+
+ /* Output Pins */
+ SND_SOC_DAPM_OUTPUT("Codec Output Pin1"),
+ SND_SOC_DAPM_OUTPUT("Codec Output Pin2"),
+ SND_SOC_DAPM_OUTPUT("Codec Output Pin3"),
+};
+
+static struct snd_soc_dai_driver card_binder_dai = {
+ .id = -1,
+ .name = "codec-probing-DAI",
+};
+
+static int hda_hdev_attach(struct hdac_device *hdev)
+{
+ struct hda_codec *codec = dev_to_hda_codec(&hdev->dev);
+ struct snd_soc_component_driver *comp_drv;
+
+ comp_drv = devm_kzalloc(&hdev->dev, sizeof(*comp_drv), GFP_KERNEL);
+ if (!comp_drv)
+ return -ENOMEM;
+
+ /*
+ * It's save to rely on dev_name() rather than a copy as component
+ * driver's lifetime is directly tied to hda codec one
+ */
+ comp_drv->name = dev_name(&hdev->dev);
+ comp_drv->probe = hda_codec_probe;
+ comp_drv->remove = hda_codec_remove;
+ comp_drv->idle_bias_on = false;
+ if (!hda_codec_is_display(codec)) {
+ comp_drv->dapm_widgets = hda_dapm_widgets;
+ comp_drv->num_dapm_widgets = ARRAY_SIZE(hda_dapm_widgets);
+ comp_drv->dapm_routes = hda_dapm_routes;
+ comp_drv->num_dapm_routes = ARRAY_SIZE(hda_dapm_routes);
+ }
+
+ return snd_soc_register_component(&hdev->dev, comp_drv, &card_binder_dai, 1);
+}
+
+static int hda_hdev_detach(struct hdac_device *hdev)
+{
+ struct hda_codec *codec = dev_to_hda_codec(&hdev->dev);
+
+ if (codec->core.registered)
+ cancel_delayed_work_sync(&codec->jackpoll_work);
+
+ snd_soc_unregister_component(&hdev->dev);
+
+ return 0;
+}
+
+const struct hdac_ext_bus_ops soc_hda_ext_bus_ops = {
+ .hdev_attach = hda_hdev_attach,
+ .hdev_detach = hda_hdev_detach,
+};
+EXPORT_SYMBOL_GPL(soc_hda_ext_bus_ops);
+
+MODULE_DESCRIPTION("HD-Audio codec driver");
+MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/hda.h b/sound/soc/codecs/hda.h
new file mode 100644
index 000000000000..78a2be4945b1
--- /dev/null
+++ b/sound/soc/codecs/hda.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+ *
+ * Author: Cezary Rojewski <cezary.rojewski@intel.com>
+ */
+
+#ifndef SND_SOC_CODECS_HDA_H
+#define SND_SOC_CODECS_HDA_H
+
+#define hda_codec_is_display(codec) \
+ ((((codec)->core.vendor_id >> 16) & 0xFFFF) == 0x8086)
+
+extern const struct snd_soc_dai_ops snd_soc_hda_codec_dai_ops;
+
+extern const struct hdac_ext_bus_ops soc_hda_ext_bus_ops;
+int hda_codec_probe_complete(struct hda_codec *codec);
+
+#endif
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 66408a98298b..cb23650ad522 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -2058,7 +2058,6 @@ static const struct snd_soc_component_driver hdmi_hda_codec = {
.remove = hdmi_codec_remove,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static void hdac_hdmi_get_chmap(struct hdac_device *hdev, int pcm_idx,
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index b773466619b2..5679102de91f 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -606,18 +606,18 @@ static int hdmi_codec_i2s_set_fmt(struct snd_soc_dai *dai,
/* Reset daifmt */
memset(cf, 0, sizeof(*cf));
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- cf->bit_clk_master = 1;
- cf->frame_clk_master = 1;
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
+ cf->bit_clk_provider = 1;
+ cf->frame_clk_provider = 1;
break;
- case SND_SOC_DAIFMT_CBS_CFM:
- cf->frame_clk_master = 1;
+ case SND_SOC_DAIFMT_CBC_CFP:
+ cf->frame_clk_provider = 1;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
- cf->bit_clk_master = 1;
+ case SND_SOC_DAIFMT_CBP_CFC:
+ cf->bit_clk_provider = 1;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
return -EINVAL;
@@ -977,7 +977,6 @@ static const struct snd_soc_component_driver hdmi_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
.set_jack = hdmi_codec_set_jack,
};
diff --git a/sound/soc/codecs/ics43432.c b/sound/soc/codecs/ics43432.c
index de4c8460ab3d..58a382254718 100644
--- a/sound/soc/codecs/ics43432.c
+++ b/sound/soc/codecs/ics43432.c
@@ -41,7 +41,6 @@ static const struct snd_soc_component_driver ics43432_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int ics43432_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/inno_rk3036.c b/sound/soc/codecs/inno_rk3036.c
index ca0f4c1911e4..8222cde6e3b9 100644
--- a/sound/soc/codecs/inno_rk3036.c
+++ b/sound/soc/codecs/inno_rk3036.c
@@ -387,7 +387,6 @@ static const struct snd_soc_component_driver rk3036_codec_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rk3036_codec_regmap_config = {
diff --git a/sound/soc/codecs/isabelle.c b/sound/soc/codecs/isabelle.c
index 39be31e1282e..50105d72b2b7 100644
--- a/sound/soc/codecs/isabelle.c
+++ b/sound/soc/codecs/isabelle.c
@@ -1095,7 +1095,6 @@ static const struct snd_soc_component_driver soc_component_dev_isabelle = {
.num_dapm_routes = ARRAY_SIZE(isabelle_intercon),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config isabelle_regmap_config = {
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 081485f784e9..7c25acf6ff0d 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -291,8 +291,6 @@ static const struct snd_soc_component_driver soc_codec_dev_jz4740_codec = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
-
};
static const struct regmap_config jz4740_codec_regmap_config = {
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index bd0078e4499b..a2e782cc4276 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -1399,7 +1399,6 @@ static const struct snd_soc_component_driver soc_component_dev_lm49453 = {
.num_dapm_routes = ARRAY_SIZE(lm49453_audio_map),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config lm49453_regmap_config = {
@@ -1442,11 +1441,6 @@ static int lm49453_i2c_probe(struct i2c_client *i2c)
return ret;
}
-static int lm49453_i2c_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static const struct i2c_device_id lm49453_i2c_id[] = {
{ "lm49453", 0 },
{ }
@@ -1458,7 +1452,6 @@ static struct i2c_driver lm49453_i2c_driver = {
.name = "lm49453",
},
.probe_new = lm49453_i2c_probe,
- .remove = lm49453_i2c_remove,
.id_table = lm49453_i2c_id,
};
diff --git a/sound/soc/codecs/lochnagar-sc.c b/sound/soc/codecs/lochnagar-sc.c
index 54a8ba7ed3c2..13fbd8830b09 100644
--- a/sound/soc/codecs/lochnagar-sc.c
+++ b/sound/soc/codecs/lochnagar-sc.c
@@ -217,7 +217,6 @@ static const struct snd_soc_component_driver lochnagar_sc_driver = {
.dapm_routes = lochnagar_sc_routes,
.num_dapm_routes = ARRAY_SIZE(lochnagar_sc_routes),
- .non_legacy_dai_naming = 1,
.endianness = 1,
};
diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c
index d18b56e60433..1ea10dc70748 100644
--- a/sound/soc/codecs/lpass-va-macro.c
+++ b/sound/soc/codecs/lpass-va-macro.c
@@ -199,6 +199,7 @@ struct va_macro {
struct clk *mclk;
struct clk *macro;
struct clk *dcodec;
+ struct clk *fsgen;
struct clk_hw hw;
struct lpass_macro *pds;
@@ -467,9 +468,9 @@ static int va_macro_mclk_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- return va_macro_mclk_enable(va, true);
+ return clk_prepare_enable(va->fsgen);
case SND_SOC_DAPM_POST_PMD:
- return va_macro_mclk_enable(va, false);
+ clk_disable_unprepare(va->fsgen);
}
return 0;
@@ -1473,6 +1474,12 @@ static int va_macro_probe(struct platform_device *pdev)
if (ret)
goto err_clkout;
+ va->fsgen = clk_hw_get_clk(&va->hw, "fsgen");
+ if (IS_ERR(va->fsgen)) {
+ ret = PTR_ERR(va->fsgen);
+ goto err_clkout;
+ }
+
ret = devm_snd_soc_register_component(dev, &va_macro_component_drv,
va_macro_dais,
ARRAY_SIZE(va_macro_dais));
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 5ef2e1279ee7..5435a49604cf 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -1734,7 +1734,6 @@ static const struct snd_soc_component_driver soc_component_dev_max98088 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct i2c_device_id max98088_i2c_id[] = {
@@ -1746,18 +1745,18 @@ MODULE_DEVICE_TABLE(i2c, max98088_i2c_id);
static int max98088_i2c_probe(struct i2c_client *i2c)
{
- struct max98088_priv *max98088;
- int ret;
- const struct i2c_device_id *id;
+ struct max98088_priv *max98088;
+ int ret;
+ const struct i2c_device_id *id;
- max98088 = devm_kzalloc(&i2c->dev, sizeof(struct max98088_priv),
- GFP_KERNEL);
- if (max98088 == NULL)
- return -ENOMEM;
+ max98088 = devm_kzalloc(&i2c->dev, sizeof(struct max98088_priv),
+ GFP_KERNEL);
+ if (max98088 == NULL)
+ return -ENOMEM;
- max98088->regmap = devm_regmap_init_i2c(i2c, &max98088_regmap);
- if (IS_ERR(max98088->regmap))
- return PTR_ERR(max98088->regmap);
+ max98088->regmap = devm_regmap_init_i2c(i2c, &max98088_regmap);
+ if (IS_ERR(max98088->regmap))
+ return PTR_ERR(max98088->regmap);
max98088->mclk = devm_clk_get(&i2c->dev, "mclk");
if (IS_ERR(max98088->mclk))
@@ -1765,14 +1764,14 @@ static int max98088_i2c_probe(struct i2c_client *i2c)
return PTR_ERR(max98088->mclk);
id = i2c_match_id(max98088_i2c_id, i2c);
- max98088->devtype = id->driver_data;
+ max98088->devtype = id->driver_data;
- i2c_set_clientdata(i2c, max98088);
- max98088->pdata = i2c->dev.platform_data;
+ i2c_set_clientdata(i2c, max98088);
+ max98088->pdata = i2c->dev.platform_data;
- ret = devm_snd_soc_register_component(&i2c->dev,
- &soc_component_dev_max98088, &max98088_dai[0], 2);
- return ret;
+ ret = devm_snd_soc_register_component(&i2c->dev, &soc_component_dev_max98088,
+ &max98088_dai[0], 2);
+ return ret;
}
#if defined(CONFIG_OF)
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 576277a82d41..142083b13ac3 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -1591,9 +1591,9 @@ static int max98090_dai_set_fmt(struct snd_soc_dai *codec_dai,
cdata->fmt = fmt;
regval = 0;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
- /* Set to slave mode PLL - MAS mode off */
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC:
+ /* Set to consumer mode PLL - MAS mode off */
snd_soc_component_write(component,
M98090_REG_CLOCK_RATIO_NI_MSB, 0x00);
snd_soc_component_write(component,
@@ -1602,8 +1602,8 @@ static int max98090_dai_set_fmt(struct snd_soc_dai *codec_dai,
M98090_USE_M1_MASK, 0);
max98090->master = false;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
- /* Set to master mode */
+ case SND_SOC_DAIFMT_CBP_CFP:
+ /* Set to provider mode */
if (max98090->tdm_slots == 4) {
/* TDM */
regval |= M98090_MAS_MASK |
@@ -1619,8 +1619,6 @@ static int max98090_dai_set_fmt(struct snd_soc_dai *codec_dai,
}
max98090->master = true;
break;
- case SND_SOC_DAIFMT_CBS_CFM:
- case SND_SOC_DAIFMT_CBM_CFS:
default:
dev_err(component->dev, "DAI clock mode unsupported");
return -EINVAL;
@@ -2521,7 +2519,6 @@ static const struct snd_soc_component_driver soc_component_dev_max98090 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config max98090_regmap = {
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 7bca99fa61b5..44aa58fcc23f 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -2103,7 +2103,6 @@ static const struct snd_soc_component_driver soc_component_dev_max98095 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct i2c_device_id max98095_i2c_id[] = {
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index 918812763884..2a2b286f1747 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -93,7 +93,6 @@ static const struct snd_soc_component_driver max98357a_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_dai_ops max98357a_dai_ops = {
diff --git a/sound/soc/codecs/max98371.c b/sound/soc/codecs/max98371.c
index 800f2bca6a0f..bac9d1bcf60a 100644
--- a/sound/soc/codecs/max98371.c
+++ b/sound/soc/codecs/max98371.c
@@ -351,7 +351,6 @@ static const struct snd_soc_component_driver max98371_component = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config max98371_regmap = {
diff --git a/sound/soc/codecs/max98373-i2c.c b/sound/soc/codecs/max98373-i2c.c
index 4fe065ece17c..3e04c7f0cce4 100644
--- a/sound/soc/codecs/max98373-i2c.c
+++ b/sound/soc/codecs/max98373-i2c.c
@@ -442,7 +442,6 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
- case MAX98373_R203E_AMP_PATH_GAIN:
case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index e14fe98349a5..f90a6a7ba83b 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/cdev.h>
@@ -436,12 +437,22 @@ const struct snd_soc_component_driver soc_codec_dev_max98373 = {
.num_dapm_routes = ARRAY_SIZE(max98373_audio_map),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
EXPORT_SYMBOL_GPL(soc_codec_dev_max98373);
+static int max98373_sdw_probe(struct snd_soc_component *component)
+{
+ int ret;
+
+ ret = pm_runtime_resume(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ return 0;
+}
+
const struct snd_soc_component_driver soc_codec_dev_max98373_sdw = {
- .probe = NULL,
+ .probe = max98373_sdw_probe,
.controls = max98373_snd_controls,
.num_controls = ARRAY_SIZE(max98373_snd_controls),
.dapm_widgets = max98373_dapm_widgets,
@@ -450,7 +461,6 @@ const struct snd_soc_component_driver soc_codec_dev_max98373_sdw = {
.num_dapm_routes = ARRAY_SIZE(max98373_audio_map),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
EXPORT_SYMBOL_GPL(soc_codec_dev_max98373_sdw);
diff --git a/sound/soc/codecs/max98390.c b/sound/soc/codecs/max98390.c
index 2a6b1648c884..5c08166a8dc6 100644
--- a/sound/soc/codecs/max98390.c
+++ b/sound/soc/codecs/max98390.c
@@ -10,7 +10,7 @@
#include <linux/cdev.h>
#include <linux/dmi.h>
#include <linux/firmware.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
@@ -983,7 +983,6 @@ static const struct snd_soc_component_driver soc_codec_dev_max98390 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config max98390_regmap = {
diff --git a/sound/soc/codecs/max98396.c b/sound/soc/codecs/max98396.c
index 34db38812807..364b4b7ee033 100644
--- a/sound/soc/codecs/max98396.c
+++ b/sound/soc/codecs/max98396.c
@@ -5,11 +5,18 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <sound/pcm_params.h>
+#include <linux/regulator/consumer.h>
#include <sound/soc.h>
#include <linux/gpio.h>
#include <sound/tlv.h>
#include "max98396.h"
+static const char * const max98396_core_supplies[MAX98396_NUM_CORE_SUPPLIES] = {
+ "avdd",
+ "dvdd",
+ "dvddio",
+};
+
static struct reg_default max98396_reg[] = {
{MAX98396_R2000_SW_RESET, 0x00},
{MAX98396_R2001_INT_RAW1, 0x00},
@@ -368,7 +375,8 @@ static int max98396_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
break;
default:
- dev_err(component->dev, "DAI invert mode unsupported\n");
+ dev_err(component->dev, "DAI invert mode %d unsupported\n",
+ fmt & SND_SOC_DAIFMT_INV_MASK);
return -EINVAL;
}
@@ -387,6 +395,8 @@ static int max98396_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
format |= MAX98396_PCM_FORMAT_TDM_MODE0;
break;
default:
+ dev_err(component->dev, "DAI format %d unsupported\n",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
return -EINVAL;
}
@@ -428,46 +438,68 @@ static int max98396_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
return 0;
}
-/* BCLKs per LRCLK */
-static const int bclk_sel_table[] = {
- 32, 48, 64, 96, 128, 192, 256, 384, 512, 320,
+#define MAX98396_BSEL_32 0x2
+#define MAX98396_BSEL_48 0x3
+#define MAX98396_BSEL_64 0x4
+#define MAX98396_BSEL_96 0x5
+#define MAX98396_BSEL_128 0x6
+#define MAX98396_BSEL_192 0x7
+#define MAX98396_BSEL_256 0x8
+#define MAX98396_BSEL_384 0x9
+#define MAX98396_BSEL_512 0xa
+#define MAX98396_BSEL_320 0xb
+#define MAX98396_BSEL_250 0xc
+#define MAX98396_BSEL_125 0xd
+
+/* Refer to table 5 in the datasheet */
+static const struct max98396_pcm_config {
+ int in, out, width, bsel, max_sr;
+} max98396_pcm_configs[] = {
+ { .in = 2, .out = 4, .width = 16, .bsel = MAX98396_BSEL_32, .max_sr = 192000 },
+ { .in = 2, .out = 6, .width = 24, .bsel = MAX98396_BSEL_48, .max_sr = 192000 },
+ { .in = 2, .out = 8, .width = 32, .bsel = MAX98396_BSEL_64, .max_sr = 192000 },
+ { .in = 3, .out = 15, .width = 32, .bsel = MAX98396_BSEL_125, .max_sr = 192000 },
+ { .in = 4, .out = 8, .width = 16, .bsel = MAX98396_BSEL_64, .max_sr = 192000 },
+ { .in = 4, .out = 12, .width = 24, .bsel = MAX98396_BSEL_96, .max_sr = 192000 },
+ { .in = 4, .out = 16, .width = 32, .bsel = MAX98396_BSEL_128, .max_sr = 192000 },
+ { .in = 5, .out = 15, .width = 24, .bsel = MAX98396_BSEL_125, .max_sr = 192000 },
+ { .in = 7, .out = 15, .width = 16, .bsel = MAX98396_BSEL_125, .max_sr = 192000 },
+ { .in = 2, .out = 4, .width = 16, .bsel = MAX98396_BSEL_32, .max_sr = 96000 },
+ { .in = 2, .out = 6, .width = 24, .bsel = MAX98396_BSEL_48, .max_sr = 96000 },
+ { .in = 2, .out = 8, .width = 32, .bsel = MAX98396_BSEL_64, .max_sr = 96000 },
+ { .in = 3, .out = 15, .width = 32, .bsel = MAX98396_BSEL_125, .max_sr = 96000 },
+ { .in = 4, .out = 8, .width = 16, .bsel = MAX98396_BSEL_64, .max_sr = 96000 },
+ { .in = 4, .out = 12, .width = 24, .bsel = MAX98396_BSEL_96, .max_sr = 96000 },
+ { .in = 4, .out = 16, .width = 32, .bsel = MAX98396_BSEL_128, .max_sr = 96000 },
+ { .in = 5, .out = 15, .width = 24, .bsel = MAX98396_BSEL_125, .max_sr = 96000 },
+ { .in = 7, .out = 15, .width = 16, .bsel = MAX98396_BSEL_125, .max_sr = 96000 },
+ { .in = 7, .out = 31, .width = 32, .bsel = MAX98396_BSEL_250, .max_sr = 96000 },
+ { .in = 8, .out = 16, .width = 16, .bsel = MAX98396_BSEL_128, .max_sr = 96000 },
+ { .in = 8, .out = 24, .width = 24, .bsel = MAX98396_BSEL_192, .max_sr = 96000 },
+ { .in = 8, .out = 32, .width = 32, .bsel = MAX98396_BSEL_256, .max_sr = 96000 },
+ { .in = 10, .out = 31, .width = 24, .bsel = MAX98396_BSEL_250, .max_sr = 96000 },
+ { .in = 15, .out = 31, .width = 16, .bsel = MAX98396_BSEL_250, .max_sr = 96000 },
+ { .in = 16, .out = 32, .width = 16, .bsel = MAX98396_BSEL_256, .max_sr = 96000 },
+ { .in = 7, .out = 31, .width = 32, .bsel = MAX98396_BSEL_250, .max_sr = 48000 },
+ { .in = 10, .out = 31, .width = 24, .bsel = MAX98396_BSEL_250, .max_sr = 48000 },
+ { .in = 10, .out = 40, .width = 32, .bsel = MAX98396_BSEL_320, .max_sr = 48000 },
+ { .in = 15, .out = 31, .width = 16, .bsel = MAX98396_BSEL_250, .max_sr = 48000 },
+ { .in = 16, .out = 48, .width = 24, .bsel = MAX98396_BSEL_384, .max_sr = 48000 },
+ { .in = 16, .out = 64, .width = 32, .bsel = MAX98396_BSEL_512, .max_sr = 48000 },
};
-static int max98396_get_bclk_sel(int bclk)
+static int max98396_pcm_config_index(int in_slots, int out_slots, int width)
{
int i;
- /* match BCLKs per LRCLK */
- for (i = 0; i < ARRAY_SIZE(bclk_sel_table); i++) {
- if (bclk_sel_table[i] == bclk)
- return i + 2;
- }
- return 0;
-}
-static int max98396_set_clock(struct snd_soc_component *component,
- struct snd_pcm_hw_params *params)
-{
- struct max98396_priv *max98396 = snd_soc_component_get_drvdata(component);
- /* BCLK/LRCLK ratio calculation */
- int blr_clk_ratio = params_channels(params) * max98396->ch_size;
- int value;
+ for (i = 0; i < ARRAY_SIZE(max98396_pcm_configs); i++) {
+ const struct max98396_pcm_config *c = &max98396_pcm_configs[i];
- if (!max98396->tdm_mode) {
- /* BCLK configuration */
- value = max98396_get_bclk_sel(blr_clk_ratio);
- if (!value) {
- dev_err(component->dev, "format unsupported %d\n",
- params_format(params));
- return -EINVAL;
- }
-
- regmap_update_bits(max98396->regmap,
- MAX98396_R2042_PCM_CLK_SETUP,
- MAX98396_PCM_CLK_SETUP_BSEL_MASK,
- value);
+ if (in_slots == c->in && out_slots <= c->out && width == c->width)
+ return i;
}
- return 0;
+ return -1;
}
static int max98396_dai_hw_params(struct snd_pcm_substream *substream,
@@ -478,8 +510,7 @@ static int max98396_dai_hw_params(struct snd_pcm_substream *substream,
struct max98396_priv *max98396 = snd_soc_component_get_drvdata(component);
unsigned int sampling_rate = 0;
unsigned int chan_sz = 0;
- int ret, reg;
- int status;
+ int ret, reg, status, bsel = 0;
bool update = false;
/* pcm mode configuration */
@@ -499,8 +530,6 @@ static int max98396_dai_hw_params(struct snd_pcm_substream *substream,
goto err;
}
- max98396->ch_size = snd_pcm_format_width(params_format(params));
-
dev_dbg(component->dev, "format supported %d",
params_format(params));
@@ -548,6 +577,33 @@ static int max98396_dai_hw_params(struct snd_pcm_substream *substream,
goto err;
}
+ if (max98396->tdm_mode) {
+ if (params_rate(params) > max98396->tdm_max_samplerate) {
+ dev_err(component->dev, "TDM sample rate %d too high",
+ params_rate(params));
+ goto err;
+ }
+ } else {
+ /* BCLK configuration */
+ ret = max98396_pcm_config_index(params_channels(params),
+ params_channels(params),
+ snd_pcm_format_width(params_format(params)));
+ if (ret < 0) {
+ dev_err(component->dev,
+ "no PCM config for %d channels, format %d\n",
+ params_channels(params), params_format(params));
+ goto err;
+ }
+
+ bsel = max98396_pcm_configs[ret].bsel;
+
+ if (params_rate(params) > max98396_pcm_configs[ret].max_sr) {
+ dev_err(component->dev, "sample rate %d too high",
+ params_rate(params));
+ goto err;
+ }
+ }
+
ret = regmap_read(max98396->regmap, MAX98396_R210F_GLOBAL_EN, &status);
if (ret < 0)
goto err;
@@ -593,12 +649,16 @@ static int max98396_dai_hw_params(struct snd_pcm_substream *substream,
MAX98396_IVADC_SR_MASK,
sampling_rate << MAX98396_IVADC_SR_SHIFT);
- ret = max98396_set_clock(component, params);
+ if (bsel)
+ regmap_update_bits(max98396->regmap,
+ MAX98396_R2042_PCM_CLK_SETUP,
+ MAX98396_PCM_CLK_SETUP_BSEL_MASK,
+ bsel);
if (status && update)
max98396_global_enable_onoff(max98396->regmap, true);
- return ret;
+ return 0;
err:
return -EINVAL;
@@ -623,13 +683,16 @@ static int max98396_dai_tdm_slot(struct snd_soc_dai *dai,
max98396->tdm_mode = true;
/* BCLK configuration */
- bsel = max98396_get_bclk_sel(slots * slot_width);
- if (bsel == 0) {
- dev_err(component->dev, "BCLK %d not supported\n",
- slots * slot_width);
+ ret = max98396_pcm_config_index(slots, slots, slot_width);
+ if (ret < 0) {
+ dev_err(component->dev, "no TDM config for %d slots %d bits\n",
+ slots, slot_width);
return -EINVAL;
}
+ bsel = max98396_pcm_configs[ret].bsel;
+ max98396->tdm_max_samplerate = max98396_pcm_configs[ret].max_sr;
+
/* Channel size configuration */
switch (slot_width) {
case 16:
@@ -642,7 +705,7 @@ static int max98396_dai_tdm_slot(struct snd_soc_dai *dai,
chan_sz = MAX98396_PCM_MODE_CFG_CHANSZ_32;
break;
default:
- dev_err(component->dev, "format unsupported %d\n",
+ dev_err(component->dev, "slot width %d unsupported\n",
slot_width);
return -EINVAL;
}
@@ -1331,6 +1394,12 @@ static int max98396_probe(struct snd_soc_component *component)
regmap_write(max98396->regmap,
MAX98397_R2057_PCM_RX_SRC2, 0x10);
}
+ /* Supply control */
+ regmap_update_bits(max98396->regmap,
+ MAX98396_R20A0_AMP_SUPPLY_CTL,
+ MAX98396_AMP_SUPPLY_NOVBAT,
+ (max98396->vbat == NULL) ?
+ MAX98396_AMP_SUPPLY_NOVBAT : 0);
/* Enable DC blocker */
regmap_update_bits(max98396->regmap,
MAX98396_R2092_AMP_DSP_CFG, 1, 1);
@@ -1360,6 +1429,9 @@ static int max98396_probe(struct snd_soc_component *component)
regmap_write(max98396->regmap,
MAX98396_R2045_PCM_TX_CTRL_2,
max98396->i_slot);
+ regmap_write(max98396->regmap,
+ MAX98396_R204A_PCM_TX_CTRL_7,
+ max98396->spkfb_slot);
if (max98396->v_slot < 8)
if (max98396->device_id == CODEC_TYPE_MAX98396)
@@ -1426,12 +1498,38 @@ static int max98396_suspend(struct device *dev)
regcache_cache_only(max98396->regmap, true);
regcache_mark_dirty(max98396->regmap);
+ regulator_bulk_disable(MAX98396_NUM_CORE_SUPPLIES,
+ max98396->core_supplies);
+ if (max98396->pvdd)
+ regulator_disable(max98396->pvdd);
+
+ if (max98396->vbat)
+ regulator_disable(max98396->vbat);
+
return 0;
}
static int max98396_resume(struct device *dev)
{
struct max98396_priv *max98396 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_bulk_enable(MAX98396_NUM_CORE_SUPPLIES,
+ max98396->core_supplies);
+ if (ret < 0)
+ return ret;
+
+ if (max98396->pvdd) {
+ ret = regulator_enable(max98396->pvdd);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (max98396->vbat) {
+ ret = regulator_enable(max98396->vbat);
+ if (ret < 0)
+ return ret;
+ }
regcache_cache_only(max98396->regmap, false);
max98396_reset(max98396, dev);
@@ -1455,7 +1553,6 @@ static const struct snd_soc_component_driver soc_codec_dev_max98396 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_component_driver soc_codec_dev_max98397 = {
@@ -1469,7 +1566,6 @@ static const struct snd_soc_component_driver soc_codec_dev_max98397 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config max98396_regmap = {
@@ -1509,17 +1605,35 @@ static void max98396_read_device_property(struct device *dev,
else
max98396->i_slot = 1;
+ if (!device_property_read_u32(dev, "adi,spkfb-slot-no", &value))
+ max98396->spkfb_slot = value & 0xF;
+ else
+ max98396->spkfb_slot = 2;
+
if (!device_property_read_u32(dev, "adi,bypass-slot-no", &value))
max98396->bypass_slot = value & 0xF;
else
max98396->bypass_slot = 0;
}
+static void max98396_core_supplies_disable(void *priv)
+{
+ struct max98396_priv *max98396 = priv;
+
+ regulator_bulk_disable(MAX98396_NUM_CORE_SUPPLIES,
+ max98396->core_supplies);
+}
+
+static void max98396_supply_disable(void *r)
+{
+ regulator_disable((struct regulator *) r);
+}
+
static int max98396_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max98396_priv *max98396 = NULL;
- int ret, reg;
+ int i, ret, reg;
max98396 = devm_kzalloc(&i2c->dev, sizeof(*max98396), GFP_KERNEL);
@@ -1545,6 +1659,69 @@ static int max98396_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ /* Obtain regulator supplies */
+ for (i = 0; i < MAX98396_NUM_CORE_SUPPLIES; i++)
+ max98396->core_supplies[i].supply = max98396_core_supplies[i];
+
+ ret = devm_regulator_bulk_get(&i2c->dev, MAX98396_NUM_CORE_SUPPLIES,
+ max98396->core_supplies);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to request core supplies: %d\n", ret);
+ return ret;
+ }
+
+ max98396->vbat = devm_regulator_get_optional(&i2c->dev, "vbat");
+ if (IS_ERR(max98396->vbat)) {
+ if (PTR_ERR(max98396->vbat) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ max98396->vbat = NULL;
+ }
+
+ max98396->pvdd = devm_regulator_get_optional(&i2c->dev, "pvdd");
+ if (IS_ERR(max98396->pvdd)) {
+ if (PTR_ERR(max98396->pvdd) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ max98396->pvdd = NULL;
+ }
+
+ ret = regulator_bulk_enable(MAX98396_NUM_CORE_SUPPLIES,
+ max98396->core_supplies);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Unable to enable core supplies: %d", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&i2c->dev, max98396_core_supplies_disable,
+ max98396);
+ if (ret < 0)
+ return ret;
+
+ if (max98396->pvdd) {
+ ret = regulator_enable(max98396->pvdd);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&i2c->dev,
+ max98396_supply_disable,
+ max98396->pvdd);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (max98396->vbat) {
+ ret = regulator_enable(max98396->vbat);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&i2c->dev,
+ max98396_supply_disable,
+ max98396->vbat);
+ if (ret < 0)
+ return ret;
+ }
+
/* update interleave mode info */
if (device_property_read_bool(&i2c->dev, "adi,interleave_mode"))
max98396->interleave_mode = true;
diff --git a/sound/soc/codecs/max98396.h b/sound/soc/codecs/max98396.h
index 694411038597..7278c779989a 100644
--- a/sound/soc/codecs/max98396.h
+++ b/sound/soc/codecs/max98396.h
@@ -274,6 +274,9 @@
#define MAX98396_DSP_SPK_SAFE_EN_SHIFT (5)
#define MAX98396_DSP_SPK_WB_FLT_EN_SHIFT (6)
+/* MAX98396_R20A0_AMP_SUPPLY_CTL */
+#define MAX98396_AMP_SUPPLY_NOVBAT (0x1 << 0)
+
/* MAX98396_R20E0_IV_SENSE_PATH_CFG */
#define MAX98396_IV_SENSE_DCBLK_EN_MASK (0x3 << 0)
#define MAX98396_IV_SENSE_DCBLK_EN_SHIFT (0)
@@ -291,15 +294,20 @@ enum {
CODEC_TYPE_MAX98397,
};
+#define MAX98396_NUM_CORE_SUPPLIES 3
+
struct max98396_priv {
struct regmap *regmap;
struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data core_supplies[MAX98396_NUM_CORE_SUPPLIES];
+ struct regulator *pvdd, *vbat;
unsigned int v_slot;
unsigned int i_slot;
+ unsigned int spkfb_slot;
unsigned int bypass_slot;
bool interleave_mode;
- unsigned int ch_size;
bool tdm_mode;
+ int tdm_max_samplerate;
int device_id;
};
#endif
diff --git a/sound/soc/codecs/max9850.c b/sound/soc/codecs/max9850.c
index 9ca6fc254883..a6733396b0ca 100644
--- a/sound/soc/codecs/max9850.c
+++ b/sound/soc/codecs/max9850.c
@@ -296,7 +296,6 @@ static const struct snd_soc_component_driver soc_component_dev_max9850 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int max9850_i2c_probe(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/max98520.c b/sound/soc/codecs/max98520.c
index f0f085ecab55..5edd6f90f8a7 100644
--- a/sound/soc/codecs/max98520.c
+++ b/sound/soc/codecs/max98520.c
@@ -657,7 +657,6 @@ static const struct snd_soc_component_driver soc_codec_dev_max98520 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config max98520_regmap = {
diff --git a/sound/soc/codecs/max9860.c b/sound/soc/codecs/max9860.c
index 82f20a8e27ad..771b3dcd6cc3 100644
--- a/sound/soc/codecs/max9860.c
+++ b/sound/soc/codecs/max9860.c
@@ -448,9 +448,9 @@ static int max9860_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
struct snd_soc_component *component = dai->component;
struct max9860_priv *max9860 = snd_soc_component_get_drvdata(component);
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_CBC_CFC:
max9860->fmt = fmt;
return 0;
@@ -537,7 +537,6 @@ static const struct snd_soc_component_driver max9860_component_driver = {
.num_dapm_routes = ARRAY_SIZE(max9860_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
#ifdef CONFIG_PM
diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c
index eb628b7e84f5..6d2941a9dbd6 100644
--- a/sound/soc/codecs/max9867.c
+++ b/sound/soc/codecs/max9867.c
@@ -589,7 +589,6 @@ static const struct snd_soc_component_driver max9867_component = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static bool max9867_volatile_register(struct device *dev, unsigned int reg)
diff --git a/sound/soc/codecs/max98925.c b/sound/soc/codecs/max98925.c
index 63849ebcfd35..c24d9f2c8874 100644
--- a/sound/soc/codecs/max98925.c
+++ b/sound/soc/codecs/max98925.c
@@ -544,7 +544,6 @@ static const struct snd_soc_component_driver soc_component_dev_max98925 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config max98925_regmap = {
diff --git a/sound/soc/codecs/max98926.c b/sound/soc/codecs/max98926.c
index 56e0a87c7112..bffd56e240e9 100644
--- a/sound/soc/codecs/max98926.c
+++ b/sound/soc/codecs/max98926.c
@@ -496,7 +496,6 @@ static const struct snd_soc_component_driver soc_component_dev_max98926 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config max98926_regmap = {
diff --git a/sound/soc/codecs/max98927.c b/sound/soc/codecs/max98927.c
index b7cff76d7b5b..9cce7c0f0142 100644
--- a/sound/soc/codecs/max98927.c
+++ b/sound/soc/codecs/max98927.c
@@ -832,7 +832,6 @@ static const struct snd_soc_component_driver soc_component_dev_max98927 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config max98927_regmap = {
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 08517547e66c..71490f11d96a 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -727,7 +727,6 @@ static const struct snd_soc_component_driver soc_component_dev_mc13783 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int __init mc13783_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ml26124.c b/sound/soc/codecs/ml26124.c
index de8fcbdd85be..3c6ac77379cb 100644
--- a/sound/soc/codecs/ml26124.c
+++ b/sound/soc/codecs/ml26124.c
@@ -537,7 +537,6 @@ static const struct snd_soc_component_driver soc_component_dev_ml26124 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ml26124_i2c_regmap = {
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index e52a559c52d6..78e543eb3c83 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -1128,7 +1128,6 @@ static const struct snd_soc_component_driver pm8916_wcd_analog = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int pm8916_wcd_analog_parse_dt(struct device *dev,
diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
index 20a07c92b2fc..d490a0f18675 100644
--- a/sound/soc/codecs/msm8916-wcd-digital.c
+++ b/sound/soc/codecs/msm8916-wcd-digital.c
@@ -328,8 +328,8 @@ static const struct snd_kcontrol_new rx1_mix2_inp1_mux = SOC_DAPM_ENUM(
static const struct snd_kcontrol_new rx2_mix2_inp1_mux = SOC_DAPM_ENUM(
"RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
-/* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */
-static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0);
+/* Digital Gain control -84 dB to +40 dB in 1 dB steps */
+static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400);
/* Cutoff Freq for High Pass Filter at -3dB */
static const char * const hpf_cutoff_text[] = {
@@ -510,15 +510,15 @@ static int wcd_iir_filter_info(struct snd_kcontrol *kcontrol,
static const struct snd_kcontrol_new msm8916_wcd_digital_snd_controls[] = {
SOC_SINGLE_S8_TLV("RX1 Digital Volume", LPASS_CDC_RX1_VOL_CTL_B2_CTL,
- -128, 127, digital_gain),
+ -84, 40, digital_gain),
SOC_SINGLE_S8_TLV("RX2 Digital Volume", LPASS_CDC_RX2_VOL_CTL_B2_CTL,
- -128, 127, digital_gain),
+ -84, 40, digital_gain),
SOC_SINGLE_S8_TLV("RX3 Digital Volume", LPASS_CDC_RX3_VOL_CTL_B2_CTL,
- -128, 127, digital_gain),
+ -84, 40, digital_gain),
SOC_SINGLE_S8_TLV("TX1 Digital Volume", LPASS_CDC_TX1_VOL_CTL_GAIN,
- -128, 127, digital_gain),
+ -84, 40, digital_gain),
SOC_SINGLE_S8_TLV("TX2 Digital Volume", LPASS_CDC_TX2_VOL_CTL_GAIN,
- -128, 127, digital_gain),
+ -84, 40, digital_gain),
SOC_ENUM("TX1 HPF Cutoff", tx1_hpf_cutoff_enum),
SOC_ENUM("TX2 HPF Cutoff", tx2_hpf_cutoff_enum),
SOC_SINGLE("TX1 HPF Switch", LPASS_CDC_TX1_MUX_CTL, 3, 1, 0),
@@ -553,22 +553,22 @@ static const struct snd_kcontrol_new msm8916_wcd_digital_snd_controls[] = {
WCD_IIR_FILTER_CTL("IIR2 Band3", IIR2, BAND3),
WCD_IIR_FILTER_CTL("IIR2 Band4", IIR2, BAND4),
WCD_IIR_FILTER_CTL("IIR2 Band5", IIR2, BAND5),
- SOC_SINGLE_SX_TLV("IIR1 INP1 Volume", LPASS_CDC_IIR1_GAIN_B1_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP2 Volume", LPASS_CDC_IIR1_GAIN_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP3 Volume", LPASS_CDC_IIR1_GAIN_B3_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP4 Volume", LPASS_CDC_IIR1_GAIN_B4_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP1 Volume", LPASS_CDC_IIR2_GAIN_B1_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP2 Volume", LPASS_CDC_IIR2_GAIN_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP3 Volume", LPASS_CDC_IIR2_GAIN_B3_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP4 Volume", LPASS_CDC_IIR2_GAIN_B4_CTL,
- 0, -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR1 INP1 Volume", LPASS_CDC_IIR1_GAIN_B1_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR1 INP2 Volume", LPASS_CDC_IIR1_GAIN_B2_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR1 INP3 Volume", LPASS_CDC_IIR1_GAIN_B3_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR1 INP4 Volume", LPASS_CDC_IIR1_GAIN_B4_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP1 Volume", LPASS_CDC_IIR2_GAIN_B1_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP2 Volume", LPASS_CDC_IIR2_GAIN_B2_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP3 Volume", LPASS_CDC_IIR2_GAIN_B3_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("IIR2 INP4 Volume", LPASS_CDC_IIR2_GAIN_B4_CTL,
+ -84, 40, digital_gain),
};
@@ -1155,7 +1155,6 @@ static const struct snd_soc_component_driver msm8916_wcd_digital = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config msm8916_codec_regmap_config = {
diff --git a/sound/soc/codecs/mt6358.c b/sound/soc/codecs/mt6358.c
index 60b209efe52d..93f35e8d26fc 100644
--- a/sound/soc/codecs/mt6358.c
+++ b/sound/soc/codecs/mt6358.c
@@ -2479,6 +2479,7 @@ static int mt6358_platform_driver_probe(struct platform_device *pdev)
static const struct of_device_id mt6358_of_match[] = {
{.compatible = "mediatek,mt6358-sound",},
+ {.compatible = "mediatek,mt6366-sound",},
{}
};
MODULE_DEVICE_TABLE(of, mt6358_of_match);
diff --git a/sound/soc/codecs/mt6359-accdet.c b/sound/soc/codecs/mt6359-accdet.c
index 6d3d170144a0..c190628e2905 100644
--- a/sound/soc/codecs/mt6359-accdet.c
+++ b/sound/soc/codecs/mt6359-accdet.c
@@ -675,6 +675,7 @@ static int mt6359_accdet_parse_dt(struct mt6359_accdet *priv)
sizeof(struct three_key_threshold));
}
+ of_node_put(node);
dev_warn(priv->dev, "accdet caps=%x\n", priv->caps);
return 0;
diff --git a/sound/soc/codecs/mt6359.c b/sound/soc/codecs/mt6359.c
index 23709b180409..c9a453ce8a2a 100644
--- a/sound/soc/codecs/mt6359.c
+++ b/sound/soc/codecs/mt6359.c
@@ -2778,6 +2778,7 @@ static int mt6359_parse_dt(struct mt6359_priv *priv)
ret = of_property_read_u32(np, "mediatek,mic-type-2",
&priv->mux_select[MUX_MIC_TYPE_2]);
+ of_node_put(np);
if (ret) {
dev_info(priv->dev,
"%s() failed to read mic-type-2, use default (%d)\n",
diff --git a/sound/soc/codecs/nau8315.c b/sound/soc/codecs/nau8315.c
index 2b66e3f7a8b7..ad4dce9e5080 100644
--- a/sound/soc/codecs/nau8315.c
+++ b/sound/soc/codecs/nau8315.c
@@ -93,7 +93,6 @@ static const struct snd_soc_component_driver nau8315_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_dai_ops nau8315_dai_ops = {
diff --git a/sound/soc/codecs/nau8540.c b/sound/soc/codecs/nau8540.c
index 347c715e22a4..58f70a02f18a 100644
--- a/sound/soc/codecs/nau8540.c
+++ b/sound/soc/codecs/nau8540.c
@@ -806,7 +806,6 @@ static const struct snd_soc_component_driver nau8540_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config nau8540_regmap_config = {
diff --git a/sound/soc/codecs/nau8810.c b/sound/soc/codecs/nau8810.c
index 7b3b1e4ac246..ccb512c21d74 100644
--- a/sound/soc/codecs/nau8810.c
+++ b/sound/soc/codecs/nau8810.c
@@ -866,7 +866,6 @@ static const struct snd_soc_component_driver nau8810_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int nau8810_i2c_probe(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/nau8821.c b/sound/soc/codecs/nau8821.c
index ce4e7f46bb06..2d21339932e6 100644
--- a/sound/soc/codecs/nau8821.c
+++ b/sound/soc/codecs/nau8821.c
@@ -29,11 +29,14 @@
#define NAU_FVCO_MAX 100000000
#define NAU_FVCO_MIN 90000000
+#define NAU8821_BUTTON SND_JACK_BTN_0
+
/* the maximum frequency of CLK_ADC and CLK_DAC */
#define CLK_DA_AD_MAX 6144000
static int nau8821_configure_sysclk(struct nau8821 *nau8821,
int clk_id, unsigned int freq);
+static bool nau8821_is_jack_inserted(struct regmap *regmap);
struct nau8821_fll {
int mclk_src;
@@ -493,7 +496,33 @@ static int nau8821_output_dac_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int system_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct nau8821 *nau8821 = snd_soc_component_get_drvdata(component);
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ dev_dbg(nau8821->dev, "system clock control : POWER OFF\n");
+ /* Set clock source to disable or internal clock before the
+ * playback or capture end. Codec needs clock for Jack
+ * detection and button press if jack inserted; otherwise,
+ * the clock should be closed.
+ */
+ if (nau8821_is_jack_inserted(nau8821->regmap)) {
+ nau8821_configure_sysclk(nau8821,
+ NAU8821_CLK_INTERNAL, 0);
+ } else {
+ nau8821_configure_sysclk(nau8821, NAU8821_CLK_DIS, 0);
+ }
+ }
+ return 0;
+}
+
static const struct snd_soc_dapm_widget nau8821_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("System Clock", SND_SOC_NOPM, 0, 0,
+ system_clock_control, SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("MICBIAS", NAU8821_R74_MIC_BIAS,
NAU8821_MICBIAS_POWERUP_SFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("DMIC Clock", SND_SOC_NOPM, 0, 0,
@@ -605,6 +634,9 @@ static const struct snd_soc_dapm_route nau8821_dapm_routes[] = {
{"AIFTX", NULL, "ADCL Digital path"},
{"AIFTX", NULL, "ADCR Digital path"},
+ {"AIFTX", NULL, "System Clock"},
+ {"AIFRX", NULL, "System Clock"},
+
{"DDACL", NULL, "AIFRX"},
{"DDACR", NULL, "AIFRX"},
@@ -911,6 +943,20 @@ static void nau8821_eject_jack(struct nau8821 *nau8821)
/* Recover to normal channel input */
regmap_update_bits(regmap, NAU8821_R2B_ADC_RATE,
NAU8821_ADC_R_SRC_EN, 0);
+ if (nau8821->key_enable) {
+ regmap_update_bits(regmap, NAU8821_R0F_INTERRUPT_MASK,
+ NAU8821_IRQ_KEY_RELEASE_EN |
+ NAU8821_IRQ_KEY_PRESS_EN,
+ NAU8821_IRQ_KEY_RELEASE_EN |
+ NAU8821_IRQ_KEY_PRESS_EN);
+ regmap_update_bits(regmap,
+ NAU8821_R12_INTERRUPT_DIS_CTRL,
+ NAU8821_IRQ_KEY_RELEASE_DIS |
+ NAU8821_IRQ_KEY_PRESS_DIS,
+ NAU8821_IRQ_KEY_RELEASE_DIS |
+ NAU8821_IRQ_KEY_PRESS_DIS);
+ }
+
}
static void nau8821_jdet_work(struct work_struct *work)
@@ -940,6 +986,15 @@ static void nau8821_jdet_work(struct work_struct *work)
*/
regmap_update_bits(regmap, NAU8821_R2B_ADC_RATE,
NAU8821_ADC_R_SRC_EN, NAU8821_ADC_R_SRC_EN);
+ if (nau8821->key_enable) {
+ regmap_update_bits(regmap, NAU8821_R0F_INTERRUPT_MASK,
+ NAU8821_IRQ_KEY_RELEASE_EN |
+ NAU8821_IRQ_KEY_PRESS_EN, 0);
+ regmap_update_bits(regmap,
+ NAU8821_R12_INTERRUPT_DIS_CTRL,
+ NAU8821_IRQ_KEY_RELEASE_DIS |
+ NAU8821_IRQ_KEY_PRESS_DIS, 0);
+ }
} else {
dev_dbg(nau8821->dev, "Headphone connected\n");
event |= SND_JACK_HEADPHONE;
@@ -999,6 +1054,13 @@ static irqreturn_t nau8821_interrupt(int irq, void *data)
nau8821_eject_jack(nau8821);
event_mask |= SND_JACK_HEADSET;
clear_irq = NAU8821_JACK_EJECT_IRQ_MASK;
+ } else if (active_irq & NAU8821_KEY_SHORT_PRESS_IRQ) {
+ event |= NAU8821_BUTTON;
+ event_mask |= NAU8821_BUTTON;
+ clear_irq = NAU8821_KEY_SHORT_PRESS_IRQ;
+ } else if (active_irq & NAU8821_KEY_RELEASE_IRQ) {
+ event_mask = NAU8821_BUTTON;
+ clear_irq = NAU8821_KEY_RELEASE_IRQ;
} else if ((active_irq & NAU8821_JACK_INSERT_IRQ_MASK) ==
NAU8821_JACK_INSERT_DETECTED) {
regmap_update_bits(regmap, NAU8821_R71_ANALOG_ADC_1,
@@ -1430,7 +1492,6 @@ static const struct snd_soc_component_driver nau8821_component_driver = {
.dapm_routes = nau8821_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(nau8821_dapm_routes),
.suspend_bias_off = 1,
- .non_legacy_dai_naming = 1,
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
@@ -1490,6 +1551,7 @@ static void nau8821_print_device_properties(struct nau8821 *nau8821)
nau8821->jack_eject_debounce);
dev_dbg(dev, "dmic-clk-threshold: %d\n",
nau8821->dmic_clk_threshold);
+ dev_dbg(dev, "key_enable: %d\n", nau8821->key_enable);
}
static int nau8821_read_device_properties(struct device *dev,
@@ -1503,6 +1565,8 @@ static int nau8821_read_device_properties(struct device *dev,
"nuvoton,jkdet-pull-enable");
nau8821->jkdet_pull_up = device_property_read_bool(dev,
"nuvoton,jkdet-pull-up");
+ nau8821->key_enable = device_property_read_bool(dev,
+ "nuvoton,key-enable");
ret = device_property_read_u32(dev, "nuvoton,jkdet-polarity",
&nau8821->jkdet_polarity);
if (ret)
@@ -1665,15 +1729,6 @@ static int nau8821_i2c_probe(struct i2c_client *i2c)
return ret;
}
-static int nau8821_i2c_remove(struct i2c_client *i2c_client)
-{
- struct nau8821 *nau8821 = i2c_get_clientdata(i2c_client);
-
- devm_free_irq(nau8821->dev, nau8821->irq, nau8821);
-
- return 0;
-}
-
static const struct i2c_device_id nau8821_i2c_ids[] = {
{ "nau8821", 0 },
{ }
@@ -1703,7 +1758,6 @@ static struct i2c_driver nau8821_driver = {
.acpi_match_table = ACPI_PTR(nau8821_acpi_match),
},
.probe_new = nau8821_i2c_probe,
- .remove = nau8821_i2c_remove,
.id_table = nau8821_i2c_ids,
};
module_i2c_driver(nau8821_driver);
diff --git a/sound/soc/codecs/nau8821.h b/sound/soc/codecs/nau8821.h
index a92edfeb9d3a..c44251f54d48 100644
--- a/sound/soc/codecs/nau8821.h
+++ b/sound/soc/codecs/nau8821.h
@@ -525,6 +525,7 @@ struct nau8821 {
int jack_eject_debounce;
int fs;
int dmic_clk_threshold;
+ int key_enable;
};
int nau8821_enable_jack_detect(struct snd_soc_component *component,
diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c
index 08f6c56dc387..1aef281a9972 100644
--- a/sound/soc/codecs/nau8822.c
+++ b/sound/soc/codecs/nau8822.c
@@ -726,6 +726,17 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
struct nau8822_pll *pll_param = &nau8822->pll;
int ret, fs;
+ if (freq_in == pll_param->freq_in &&
+ freq_out == pll_param->freq_out)
+ return 0;
+
+ if (freq_out == 0) {
+ dev_dbg(component->dev, "PLL disabled\n");
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_OFF);
+ return 0;
+ }
+
fs = freq_out / 256;
ret = nau8822_calc_pll(freq_in, fs, pll_param);
@@ -762,6 +773,9 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
snd_soc_component_update_bits(component,
NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_ON);
+ pll_param->freq_in = freq_in;
+ pll_param->freq_out = freq_out;
+
return 0;
}
@@ -1069,7 +1083,6 @@ static const struct snd_soc_component_driver soc_component_dev_nau8822 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config nau8822_regmap_config = {
diff --git a/sound/soc/codecs/nau8822.h b/sound/soc/codecs/nau8822.h
index b45d42c15de6..547ec057f853 100644
--- a/sound/soc/codecs/nau8822.h
+++ b/sound/soc/codecs/nau8822.h
@@ -198,6 +198,8 @@ struct nau8822_pll {
int mclk_scaler;
int pll_frac;
int pll_int;
+ int freq_in;
+ int freq_out;
};
/* Codec Private Data */
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 2a7c93508535..ad54d70f7d8e 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -1544,7 +1544,6 @@ static const struct snd_soc_component_driver nau8824_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_dai_ops nau8824_dai_ops = {
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 20e45a337b8f..54ef7b0fa878 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -1440,7 +1440,7 @@ static struct snd_soc_dai_driver nau8825_dai = {
.capture = {
.stream_name = "Capture",
.channels_min = 1,
- .channels_max = 1,
+ .channels_max = 2, /* Only 1 channel of data */
.rates = NAU8825_RATES,
.formats = NAU8825_FORMATS,
},
@@ -2478,7 +2478,6 @@ static const struct snd_soc_component_driver nau8825_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static void nau8825_reset_chip(struct regmap *regmap)
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 20eb04c8a41a..3591f6f53901 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -290,7 +290,6 @@ static const struct snd_soc_component_driver soc_component_dev_pcm1681 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct i2c_device_id pcm1681_i2c_id[] = {
diff --git a/sound/soc/codecs/pcm1789.c b/sound/soc/codecs/pcm1789.c
index 35788b57e11f..3ab381e9a856 100644
--- a/sound/soc/codecs/pcm1789.c
+++ b/sound/soc/codecs/pcm1789.c
@@ -229,7 +229,6 @@ static const struct snd_soc_component_driver soc_component_dev_pcm1789 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
int pcm1789_common_init(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/pcm179x.c b/sound/soc/codecs/pcm179x.c
index ee60373d7d25..f52ff66b6e64 100644
--- a/sound/soc/codecs/pcm179x.c
+++ b/sound/soc/codecs/pcm179x.c
@@ -207,7 +207,6 @@ static const struct snd_soc_component_driver soc_component_dev_pcm179x = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
int pcm179x_common_init(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
index fda9d7ee3fe6..dd21803ba13c 100644
--- a/sound/soc/codecs/pcm186x.c
+++ b/sound/soc/codecs/pcm186x.c
@@ -578,7 +578,6 @@ static struct snd_soc_component_driver soc_codec_dev_pcm1863 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct snd_soc_component_driver soc_codec_dev_pcm1865 = {
@@ -593,7 +592,6 @@ static struct snd_soc_component_driver soc_codec_dev_pcm1865 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static bool pcm186x_volatile(struct device *dev, unsigned int reg)
diff --git a/sound/soc/codecs/pcm3008.c b/sound/soc/codecs/pcm3008.c
index aef40ec40aa1..09c6c1326833 100644
--- a/sound/soc/codecs/pcm3008.c
+++ b/sound/soc/codecs/pcm3008.c
@@ -102,7 +102,6 @@ static const struct snd_soc_component_driver soc_component_dev_pcm3008 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int pcm3008_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index cf27f05dc46a..9d6431338fb7 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -716,7 +716,6 @@ static const struct snd_soc_component_driver pcm3168a_driver = {
.num_dapm_routes = ARRAY_SIZE(pcm3168a_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
int pcm3168a_probe(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/pcm5102a.c b/sound/soc/codecs/pcm5102a.c
index f39f98bbc97f..3401a25341e6 100644
--- a/sound/soc/codecs/pcm5102a.c
+++ b/sound/soc/codecs/pcm5102a.c
@@ -28,7 +28,6 @@ static struct snd_soc_component_driver soc_component_dev_pcm5102a = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int pcm5102a_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index a3ff4a07aff7..767463e82665 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1512,7 +1512,6 @@ static const struct snd_soc_component_driver pcm512x_component_driver = {
.num_dapm_routes = ARRAY_SIZE(pcm512x_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_range_cfg pcm512x_range = {
diff --git a/sound/soc/codecs/rk3328_codec.c b/sound/soc/codecs/rk3328_codec.c
index 86b679cf7aef..1d523bfd9d84 100644
--- a/sound/soc/codecs/rk3328_codec.c
+++ b/sound/soc/codecs/rk3328_codec.c
@@ -69,11 +69,11 @@ static int rk3328_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
snd_soc_component_get_drvdata(dai->component);
unsigned int val;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC:
val = PIN_DIRECTION_IN | DAC_I2S_MODE_SLAVE;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_CBP_CFP:
val = PIN_DIRECTION_OUT | DAC_I2S_MODE_MASTER;
break;
default:
diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c
index cce6f4e7992f..2a5b274bfc0f 100644
--- a/sound/soc/codecs/rk817_codec.c
+++ b/sound/soc/codecs/rk817_codec.c
@@ -444,7 +444,6 @@ static const struct snd_soc_component_driver soc_codec_dev_rk817 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
.controls = rk817_volume_controls,
.num_controls = ARRAY_SIZE(rk817_volume_controls),
.dapm_routes = rk817_dapm_routes,
diff --git a/sound/soc/codecs/rt1011.c b/sound/soc/codecs/rt1011.c
index 08dbaef84d4e..c1568216126e 100644
--- a/sound/soc/codecs/rt1011.c
+++ b/sound/soc/codecs/rt1011.c
@@ -2176,7 +2176,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt1011 = {
.set_pll = rt1011_set_component_pll,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt1011_regmap = {
diff --git a/sound/soc/codecs/rt1015.c b/sound/soc/codecs/rt1015.c
index 7a06f2654afb..57d0f1c69e46 100644
--- a/sound/soc/codecs/rt1015.c
+++ b/sound/soc/codecs/rt1015.c
@@ -1071,7 +1071,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt1015 = {
.set_pll = rt1015_set_component_pll,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt1015_regmap = {
diff --git a/sound/soc/codecs/rt1015p.c b/sound/soc/codecs/rt1015p.c
index 415cfb3b2f0d..06800dad8798 100644
--- a/sound/soc/codecs/rt1015p.c
+++ b/sound/soc/codecs/rt1015p.c
@@ -89,7 +89,6 @@ static const struct snd_soc_component_driver rt1015p_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver rt1015p_dai_driver = {
diff --git a/sound/soc/codecs/rt1016.c b/sound/soc/codecs/rt1016.c
index e31c4736627f..37eeec650f03 100644
--- a/sound/soc/codecs/rt1016.c
+++ b/sound/soc/codecs/rt1016.c
@@ -595,7 +595,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt1016 = {
.set_pll = rt1016_set_component_pll,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt1016_regmap = {
diff --git a/sound/soc/codecs/rt1019.c b/sound/soc/codecs/rt1019.c
index f3f15fbe85d0..b66bfecbb879 100644
--- a/sound/soc/codecs/rt1019.c
+++ b/sound/soc/codecs/rt1019.c
@@ -522,7 +522,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt1019 = {
.num_dapm_widgets = ARRAY_SIZE(rt1019_dapm_widgets),
.dapm_routes = rt1019_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(rt1019_dapm_routes),
- .non_legacy_dai_naming = 1,
.endianness = 1,
};
diff --git a/sound/soc/codecs/rt1305.c b/sound/soc/codecs/rt1305.c
index 58d97e3c5087..5b39a440b6dc 100644
--- a/sound/soc/codecs/rt1305.c
+++ b/sound/soc/codecs/rt1305.c
@@ -946,7 +946,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt1305 = {
.set_pll = rt1305_set_component_pll,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt1305_regmap = {
diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c
index 72f673f278ee..0be6e72ff5a9 100644
--- a/sound/soc/codecs/rt1308-sdw.c
+++ b/sound/soc/codecs/rt1308-sdw.c
@@ -608,7 +608,19 @@ static const struct sdw_slave_ops rt1308_slave_ops = {
.bus_config = rt1308_bus_config,
};
+static int rt1308_sdw_component_probe(struct snd_soc_component *component)
+{
+ int ret;
+
+ ret = pm_runtime_resume(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ return 0;
+}
+
static const struct snd_soc_component_driver soc_component_sdw_rt1308 = {
+ .probe = rt1308_sdw_component_probe,
.controls = rt1308_snd_controls,
.num_controls = ARRAY_SIZE(rt1308_snd_controls),
.dapm_widgets = rt1308_dapm_widgets,
diff --git a/sound/soc/codecs/rt1308.c b/sound/soc/codecs/rt1308.c
index eec2b1760408..d2a8e9fe3e23 100644
--- a/sound/soc/codecs/rt1308.c
+++ b/sound/soc/codecs/rt1308.c
@@ -765,7 +765,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt1308 = {
.set_pll = rt1308_set_component_pll,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt1308_regmap = {
diff --git a/sound/soc/codecs/rt1316-sdw.c b/sound/soc/codecs/rt1316-sdw.c
index 2d6b5f9d4d77..e53396606a1c 100644
--- a/sound/soc/codecs/rt1316-sdw.c
+++ b/sound/soc/codecs/rt1316-sdw.c
@@ -590,7 +590,19 @@ static struct sdw_slave_ops rt1316_slave_ops = {
.update_status = rt1316_update_status,
};
+static int rt1316_sdw_component_probe(struct snd_soc_component *component)
+{
+ int ret;
+
+ ret = pm_runtime_resume(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ return 0;
+}
+
static const struct snd_soc_component_driver soc_component_sdw_rt1316 = {
+ .probe = rt1316_sdw_component_probe,
.controls = rt1316_snd_controls,
.num_controls = ARRAY_SIZE(rt1316_snd_controls),
.dapm_widgets = rt1316_dapm_widgets,
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c
index ab093bdb5552..f2c50b11e4d0 100644
--- a/sound/soc/codecs/rt274.c
+++ b/sound/soc/codecs/rt274.c
@@ -980,14 +980,11 @@ static int rt274_probe(struct snd_soc_component *component)
struct rt274_priv *rt274 = snd_soc_component_get_drvdata(component);
rt274->component = component;
+ INIT_DELAYED_WORK(&rt274->jack_detect_work, rt274_jack_detect_work);
- if (rt274->i2c->irq) {
- INIT_DELAYED_WORK(&rt274->jack_detect_work,
- rt274_jack_detect_work);
+ if (rt274->i2c->irq)
schedule_delayed_work(&rt274->jack_detect_work,
- msecs_to_jiffies(1250));
- }
-
+ msecs_to_jiffies(1250));
return 0;
}
@@ -996,6 +993,7 @@ static void rt274_remove(struct snd_soc_component *component)
struct rt274_priv *rt274 = snd_soc_component_get_drvdata(component);
cancel_delayed_work_sync(&rt274->jack_detect_work);
+ rt274->component = NULL;
}
#ifdef CONFIG_PM
@@ -1075,7 +1073,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt274 = {
.num_dapm_routes = ARRAY_SIZE(rt274_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt274_regmap = {
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index ad8ea1fa7c23..c4f7c4c2d793 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -311,7 +311,8 @@ static void rt286_jack_detect_work(struct work_struct *work)
SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
}
-int rt286_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *jack)
+static int rt286_mic_detect(struct snd_soc_component *component,
+ struct snd_soc_jack *jack, void *data)
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct rt286_priv *rt286 = snd_soc_component_get_drvdata(component);
@@ -335,7 +336,6 @@ int rt286_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *j
return 0;
}
-EXPORT_SYMBOL_GPL(rt286_mic_detect);
static int is_mclk_mode(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
@@ -947,17 +947,11 @@ static int rt286_probe(struct snd_soc_component *component)
struct rt286_priv *rt286 = snd_soc_component_get_drvdata(component);
rt286->component = component;
+ INIT_DELAYED_WORK(&rt286->jack_detect_work, rt286_jack_detect_work);
- if (rt286->i2c->irq) {
- regmap_update_bits(rt286->regmap,
- RT286_IRQ_CTRL, 0x2, 0x2);
-
- INIT_DELAYED_WORK(&rt286->jack_detect_work,
- rt286_jack_detect_work);
+ if (rt286->i2c->irq)
schedule_delayed_work(&rt286->jack_detect_work,
- msecs_to_jiffies(1250));
- }
-
+ msecs_to_jiffies(50));
return 0;
}
@@ -966,6 +960,7 @@ static void rt286_remove(struct snd_soc_component *component)
struct rt286_priv *rt286 = snd_soc_component_get_drvdata(component);
cancel_delayed_work_sync(&rt286->jack_detect_work);
+ rt286->component = NULL;
}
#ifdef CONFIG_PM
@@ -1055,6 +1050,7 @@ static const struct snd_soc_component_driver soc_component_dev_rt286 = {
.suspend = rt286_suspend,
.resume = rt286_resume,
.set_bias_level = rt286_set_bias_level,
+ .set_jack = rt286_mic_detect,
.controls = rt286_snd_controls,
.num_controls = ARRAY_SIZE(rt286_snd_controls),
.dapm_widgets = rt286_dapm_widgets,
@@ -1063,7 +1059,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt286 = {
.num_dapm_routes = ARRAY_SIZE(rt286_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt286_regmap = {
diff --git a/sound/soc/codecs/rt286.h b/sound/soc/codecs/rt286.h
index f27a4e71d5b6..4b7a3bd6043d 100644
--- a/sound/soc/codecs/rt286.h
+++ b/sound/soc/codecs/rt286.h
@@ -196,7 +196,5 @@ enum {
RT286_AIFS,
};
-int rt286_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *jack);
-
#endif /* __RT286_H__ */
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index c291786dc82d..b0b53d4f07df 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -326,39 +326,37 @@ static void rt298_jack_detect_work(struct work_struct *work)
SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
}
-int rt298_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *jack)
+static int rt298_mic_detect(struct snd_soc_component *component,
+ struct snd_soc_jack *jack, void *data)
{
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct rt298_priv *rt298 = snd_soc_component_get_drvdata(component);
- struct snd_soc_dapm_context *dapm;
- bool hp = false;
- bool mic = false;
- int status = 0;
- /* If jack in NULL, disable HS jack */
- if (!jack) {
+ rt298->jack = jack;
+
+ if (jack) {
+ /* Enable IRQ */
+ if (rt298->jack->status & SND_JACK_HEADPHONE)
+ snd_soc_dapm_force_enable_pin(dapm, "LDO1");
+ if (rt298->jack->status & SND_JACK_MICROPHONE) {
+ snd_soc_dapm_force_enable_pin(dapm, "HV");
+ snd_soc_dapm_force_enable_pin(dapm, "VREF");
+ }
+ regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x2);
+ /* Send an initial empty report */
+ snd_soc_jack_report(rt298->jack, rt298->jack->status,
+ SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+ } else {
+ /* Disable IRQ */
regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x0);
- dapm = snd_soc_component_get_dapm(component);
+ snd_soc_dapm_disable_pin(dapm, "HV");
+ snd_soc_dapm_disable_pin(dapm, "VREF");
snd_soc_dapm_disable_pin(dapm, "LDO1");
- snd_soc_dapm_sync(dapm);
- return 0;
}
-
- rt298->jack = jack;
- regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x2);
-
- rt298_jack_detect(rt298, &hp, &mic);
- if (hp)
- status |= SND_JACK_HEADPHONE;
-
- if (mic)
- status |= SND_JACK_MICROPHONE;
-
- snd_soc_jack_report(rt298->jack, status,
- SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+ snd_soc_dapm_sync(dapm);
return 0;
}
-EXPORT_SYMBOL_GPL(rt298_mic_detect);
static int is_mclk_mode(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
@@ -1011,17 +1009,11 @@ static int rt298_probe(struct snd_soc_component *component)
struct rt298_priv *rt298 = snd_soc_component_get_drvdata(component);
rt298->component = component;
+ INIT_DELAYED_WORK(&rt298->jack_detect_work, rt298_jack_detect_work);
- if (rt298->i2c->irq) {
- regmap_update_bits(rt298->regmap,
- RT298_IRQ_CTRL, 0x2, 0x2);
-
- INIT_DELAYED_WORK(&rt298->jack_detect_work,
- rt298_jack_detect_work);
+ if (rt298->i2c->irq)
schedule_delayed_work(&rt298->jack_detect_work,
- msecs_to_jiffies(1250));
- }
-
+ msecs_to_jiffies(1250));
return 0;
}
@@ -1030,6 +1022,7 @@ static void rt298_remove(struct snd_soc_component *component)
struct rt298_priv *rt298 = snd_soc_component_get_drvdata(component);
cancel_delayed_work_sync(&rt298->jack_detect_work);
+ rt298->component = NULL;
}
#ifdef CONFIG_PM
@@ -1120,6 +1113,7 @@ static const struct snd_soc_component_driver soc_component_dev_rt298 = {
.suspend = rt298_suspend,
.resume = rt298_resume,
.set_bias_level = rt298_set_bias_level,
+ .set_jack = rt298_mic_detect,
.controls = rt298_snd_controls,
.num_controls = ARRAY_SIZE(rt298_snd_controls),
.dapm_widgets = rt298_dapm_widgets,
@@ -1128,7 +1122,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt298 = {
.num_dapm_routes = ARRAY_SIZE(rt298_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt298_regmap = {
diff --git a/sound/soc/codecs/rt298.h b/sound/soc/codecs/rt298.h
index ed2b8fd87f4c..f1be9c135401 100644
--- a/sound/soc/codecs/rt298.h
+++ b/sound/soc/codecs/rt298.h
@@ -207,7 +207,5 @@ enum {
RT298_AIFS,
};
-int rt298_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *jack);
-
#endif /* __RT298_H__ */
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index be8ece4630df..b9bcf04d4dc9 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -1173,7 +1173,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5514 = {
.num_dapm_routes = ARRAY_SIZE(rt5514_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt5514_i2c_regmap = {
diff --git a/sound/soc/codecs/rt5616.c b/sound/soc/codecs/rt5616.c
index 37f1bf552eff..970d6c4a358e 100644
--- a/sound/soc/codecs/rt5616.c
+++ b/sound/soc/codecs/rt5616.c
@@ -1304,7 +1304,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5616 = {
.num_dapm_routes = ARRAY_SIZE(rt5616_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt5616_regmap = {
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index c941e878471c..957f6b19beec 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -1666,7 +1666,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5631 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct i2c_device_id rt5631_i2c_id[] = {
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 18b3da9211e3..5a844329800f 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -1986,7 +1986,7 @@ static int rt5640_set_bias_level(struct snd_soc_component *component,
snd_soc_component_write(component, RT5640_PWR_MIXER, 0x0000);
if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER)
snd_soc_component_write(component, RT5640_PWR_ANLG1,
- 0x0018);
+ 0x2818);
else
snd_soc_component_write(component, RT5640_PWR_ANLG1,
0x0000);
@@ -2567,10 +2567,18 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
}
+static const struct snd_soc_dapm_route rt5640_hda_jack_dapm_routes[] = {
+ {"IN1P", NULL, "MICBIAS1"},
+ {"IN2P", NULL, "MICBIAS1"},
+ {"IN3P", NULL, "MICBIAS1"},
+};
+
static void rt5640_enable_hda_jack_detect(
struct snd_soc_component *component, struct snd_soc_jack *jack)
{
struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
int ret;
/* Select JD1 for Mic */
@@ -2592,7 +2600,8 @@ static void rt5640_enable_hda_jack_detect(
snd_soc_component_update_bits(component, RT5640_DUMMY1, 0x400, 0x0);
snd_soc_component_update_bits(component, RT5640_PWR_ANLG1,
- RT5640_PWR_VREF2, RT5640_PWR_VREF2);
+ RT5640_PWR_VREF2 | RT5640_PWR_MB | RT5640_PWR_BG,
+ RT5640_PWR_VREF2 | RT5640_PWR_MB | RT5640_PWR_BG);
usleep_range(10000, 15000);
snd_soc_component_update_bits(component, RT5640_PWR_ANLG1,
RT5640_PWR_FV2, RT5640_PWR_FV2);
@@ -2609,6 +2618,9 @@ static void rt5640_enable_hda_jack_detect(
/* sync initial jack state */
queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
+
+ snd_soc_dapm_add_routes(dapm, rt5640_hda_jack_dapm_routes,
+ ARRAY_SIZE(rt5640_hda_jack_dapm_routes));
}
static int rt5640_set_jack(struct snd_soc_component *component,
@@ -2881,8 +2893,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5640 = {
.num_dapm_routes = ARRAY_SIZE(rt5640_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
-
};
static const struct regmap_config rt5640_regmap = {
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 507aba8de3cc..8635bc6567dc 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3534,7 +3534,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5645 = {
.num_dapm_routes = ARRAY_SIZE(rt5645_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt5645_regmap = {
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index d11d201b1d03..df90af906563 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -2161,7 +2161,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5651 = {
.num_dapm_routes = ARRAY_SIZE(rt5651_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt5651_regmap = {
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index 6efa90f46362..5e21e3c37ab5 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -3801,7 +3801,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5659 = {
.set_pll = rt5659_set_component_pll,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
diff --git a/sound/soc/codecs/rt5660.c b/sound/soc/codecs/rt5660.c
index d5f9926625d2..341baa29fdb1 100644
--- a/sound/soc/codecs/rt5660.c
+++ b/sound/soc/codecs/rt5660.c
@@ -1208,7 +1208,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5660 = {
.num_dapm_routes = ARRAY_SIZE(rt5660_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt5660_regmap = {
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index e51eed8a79ab..ca981b374b0c 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -3258,7 +3258,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5663 = {
.set_jack = rt5663_set_jack_detect,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt5663_v2_regmap = {
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
index 4a8d62e1dd2b..6e66cc218fa8 100644
--- a/sound/soc/codecs/rt5665.c
+++ b/sound/soc/codecs/rt5665.c
@@ -4617,7 +4617,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5665 = {
.set_jack = rt5665_set_jack_detect,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
diff --git a/sound/soc/codecs/rt5668.c b/sound/soc/codecs/rt5668.c
index 01566f036ca1..beb0951ff680 100644
--- a/sound/soc/codecs/rt5668.c
+++ b/sound/soc/codecs/rt5668.c
@@ -2362,7 +2362,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5668 = {
.set_jack = rt5668_set_jack_detect,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt5668_regmap = {
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 8a97f6db04d5..60dbfa2a54f1 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -2852,7 +2852,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5670 = {
.num_dapm_routes = ARRAY_SIZE(rt5670_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt5670_regmap = {
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 4a8c267d4fbc..31a2dd0aafb6 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -5189,7 +5189,6 @@ static const struct snd_soc_component_driver soc_component_dev_rt5677 = {
.num_dapm_routes = ARRAY_SIZE(rt5677_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt5677_regmap_physical = {
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 2b6c6d6b9771..2df95e792900 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -3064,7 +3064,6 @@ const struct snd_soc_component_driver rt5682_soc_component_dev = {
.set_jack = rt5682_set_jack_detect,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
EXPORT_SYMBOL_GPL(rt5682_soc_component_dev);
diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
index 4d44eddee901..eb47e7cd485a 100644
--- a/sound/soc/codecs/rt5682s.c
+++ b/sound/soc/codecs/rt5682s.c
@@ -2893,7 +2893,6 @@ static const struct snd_soc_component_driver rt5682s_soc_component_dev = {
.set_jack = rt5682s_set_jack_detect,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int rt5682s_parse_dt(struct rt5682s_priv *rt5682s, struct device *dev)
diff --git a/sound/soc/codecs/rt700.c b/sound/soc/codecs/rt700.c
index 9bceeeb830b1..055c3ae974d8 100644
--- a/sound/soc/codecs/rt700.c
+++ b/sound/soc/codecs/rt700.c
@@ -818,9 +818,14 @@ static const struct snd_soc_dapm_route rt700_audio_map[] = {
static int rt700_probe(struct snd_soc_component *component)
{
struct rt700_priv *rt700 = snd_soc_component_get_drvdata(component);
+ int ret;
rt700->component = component;
+ ret = pm_runtime_resume(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
return 0;
}
diff --git a/sound/soc/codecs/rt711-sdca.c b/sound/soc/codecs/rt711-sdca.c
index 5ad53bbc8528..925268121901 100644
--- a/sound/soc/codecs/rt711-sdca.c
+++ b/sound/soc/codecs/rt711-sdca.c
@@ -1194,10 +1194,15 @@ static int rt711_sdca_parse_dt(struct rt711_sdca_priv *rt711, struct device *dev
static int rt711_sdca_probe(struct snd_soc_component *component)
{
struct rt711_sdca_priv *rt711 = snd_soc_component_get_drvdata(component);
+ int ret;
rt711_sdca_parse_dt(rt711, &rt711->slave->dev);
rt711->component = component;
+ ret = pm_runtime_resume(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
return 0;
}
diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c
index 9df800abfc2d..1bf618089194 100644
--- a/sound/soc/codecs/rt711.c
+++ b/sound/soc/codecs/rt711.c
@@ -935,10 +935,15 @@ static int rt711_parse_dt(struct rt711_priv *rt711, struct device *dev)
static int rt711_probe(struct snd_soc_component *component)
{
struct rt711_priv *rt711 = snd_soc_component_get_drvdata(component);
+ int ret;
rt711_parse_dt(rt711, &rt711->slave->dev);
rt711->component = component;
+ ret = pm_runtime_resume(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
return 0;
}
diff --git a/sound/soc/codecs/rt715-sdca.c b/sound/soc/codecs/rt715-sdca.c
index 5857d0866307..ce8bbc76199a 100644
--- a/sound/soc/codecs/rt715-sdca.c
+++ b/sound/soc/codecs/rt715-sdca.c
@@ -758,7 +758,19 @@ static const struct snd_soc_dapm_route rt715_sdca_audio_map[] = {
{"ADC 25 Mux", "DMIC4", "DMIC4"},
};
+static int rt715_sdca_probe(struct snd_soc_component *component)
+{
+ int ret;
+
+ ret = pm_runtime_resume(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ return 0;
+}
+
static const struct snd_soc_component_driver soc_codec_dev_rt715_sdca = {
+ .probe = rt715_sdca_probe,
.controls = rt715_sdca_snd_controls,
.num_controls = ARRAY_SIZE(rt715_sdca_snd_controls),
.dapm_widgets = rt715_sdca_dapm_widgets,
diff --git a/sound/soc/codecs/rt715.c b/sound/soc/codecs/rt715.c
index 418e006b19ef..e93240521c74 100644
--- a/sound/soc/codecs/rt715.c
+++ b/sound/soc/codecs/rt715.c
@@ -737,7 +737,19 @@ static int rt715_set_bias_level(struct snd_soc_component *component,
return 0;
}
+static int rt715_probe(struct snd_soc_component *component)
+{
+ int ret;
+
+ ret = pm_runtime_resume(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ return 0;
+}
+
static const struct snd_soc_component_driver soc_codec_dev_rt715 = {
+ .probe = rt715_probe,
.set_bias_level = rt715_set_bias_level,
.controls = rt715_snd_controls,
.num_controls = ARRAY_SIZE(rt715_snd_controls),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 3363d1696ad7..3fafd9fc5cfd 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1536,7 +1536,6 @@ static const struct snd_soc_component_driver sgtl5000_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config sgtl5000_regmap = {
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 8bd2edf70f13..d87141ba8438 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -239,7 +239,6 @@ static const struct snd_soc_component_driver soc_component_dev_si476x = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int si476x_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/spdif_receiver.c b/sound/soc/codecs/spdif_receiver.c
index 276db978e587..862e0b654a1c 100644
--- a/sound/soc/codecs/spdif_receiver.c
+++ b/sound/soc/codecs/spdif_receiver.c
@@ -43,7 +43,6 @@ static struct snd_soc_component_driver soc_codec_spdif_dir = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver dir_stub_dai = {
diff --git a/sound/soc/codecs/spdif_transmitter.c b/sound/soc/codecs/spdif_transmitter.c
index 2c8cebfc6603..736518921555 100644
--- a/sound/soc/codecs/spdif_transmitter.c
+++ b/sound/soc/codecs/spdif_transmitter.c
@@ -43,7 +43,6 @@ static struct snd_soc_component_driver soc_codec_spdif_dit = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver dit_stub_dai = {
diff --git a/sound/soc/codecs/ssm2518.c b/sound/soc/codecs/ssm2518.c
index 83acbdbb8e0d..6d8847848299 100644
--- a/sound/soc/codecs/ssm2518.c
+++ b/sound/soc/codecs/ssm2518.c
@@ -409,8 +409,8 @@ static int ssm2518_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
bool invert_fclk;
int ret;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
return -EINVAL;
@@ -721,7 +721,6 @@ static const struct snd_soc_component_driver ssm2518_component_driver = {
.num_dapm_routes = ARRAY_SIZE(ssm2518_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ssm2518_regmap_config = {
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 7964e922b07f..cbbe83b85ada 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -411,11 +411,11 @@ static int ssm2602_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int iface = 0;
/* set master/slave audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
iface |= 0x0040;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
return -EINVAL;
@@ -624,7 +624,6 @@ static const struct snd_soc_component_driver soc_component_dev_ssm2602 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static bool ssm2602_register_volatile(struct device *dev, unsigned int reg)
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 08ced09ef001..4b0265617c7b 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -278,8 +278,8 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
unsigned int ctrl1 = 0;
bool invert_fclk;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
return -EINVAL;
@@ -427,7 +427,6 @@ static const struct snd_soc_component_driver ssm4567_component_driver = {
.num_dapm_routes = ARRAY_SIZE(ssm4567_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config ssm4567_regmap_config = {
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 8585cbef4c9b..8c86b578eba8 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -601,8 +601,8 @@ static int sta32x_set_dai_fmt(struct snd_soc_dai *codec_dai,
struct sta32x_priv *sta32x = snd_soc_component_get_drvdata(component);
u8 confb = 0;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
return -EINVAL;
@@ -1014,7 +1014,6 @@ static const struct snd_soc_component_driver sta32x_component = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config sta32x_regmap = {
diff --git a/sound/soc/codecs/sta350.c b/sound/soc/codecs/sta350.c
index 9189fb3648f7..7b2c5b57d5d4 100644
--- a/sound/soc/codecs/sta350.c
+++ b/sound/soc/codecs/sta350.c
@@ -630,8 +630,8 @@ static int sta350_set_dai_fmt(struct snd_soc_dai *codec_dai,
struct sta350_priv *sta350 = snd_soc_component_get_drvdata(component);
unsigned int confb = 0;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
return -EINVAL;
@@ -1057,7 +1057,6 @@ static const struct snd_soc_component_driver sta350_component = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config sta350_regmap = {
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
index d90e5512a731..313957099145 100644
--- a/sound/soc/codecs/sta529.c
+++ b/sound/soc/codecs/sta529.c
@@ -322,7 +322,6 @@ static const struct snd_soc_component_driver sta529_component_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config sta529_regmap = {
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index d99f6e466d0a..1824a71fe053 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -313,8 +313,6 @@ static const struct snd_soc_component_driver soc_component_dev_stac9766 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
-
};
static int stac9766_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
index 3be4940e3c77..f076878908ee 100644
--- a/sound/soc/codecs/sti-sas.c
+++ b/sound/soc/codecs/sti-sas.c
@@ -199,10 +199,10 @@ static int stih407_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
static int sti_sas_spdif_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
- if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+ if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) != SND_SOC_DAIFMT_CBC_CFC) {
dev_err(dai->component->dev,
- "%s: ERROR: Unsupporter master mask 0x%x\n",
- __func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
+ "%s: ERROR: Unsupported clocking mask 0x%x\n",
+ __func__, fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK);
return -EINVAL;
}
@@ -398,7 +398,6 @@ static struct snd_soc_component_driver sti_sas_driver = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id sti_sas_dev_match[] = {
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index b5c9c61ff5a8..8bd667da8767 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -347,17 +347,17 @@ static int tas2552_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
struct tas2552_data *tas2552 = dev_get_drvdata(component->dev);
u8 serial_format;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC:
serial_format = 0x00;
break;
- case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_CBC_CFP:
serial_format = TAS2552_WCLKDIR;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_CBP_CFC:
serial_format = TAS2552_BCLKDIR;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_CBP_CFP:
serial_format = (TAS2552_BCLKDIR | TAS2552_WCLKDIR);
break;
default:
@@ -581,7 +581,7 @@ static int tas2552_component_probe(struct snd_soc_component *component)
gpiod_set_value(tas2552->enable_gpio, 1);
- ret = pm_runtime_get_sync(component->dev);
+ ret = pm_runtime_resume_and_get(component->dev);
if (ret < 0) {
dev_err(component->dev, "Enabling device failed: %d\n",
ret);
@@ -668,7 +668,6 @@ static const struct snd_soc_component_driver soc_component_dev_tas2552 = {
.num_dapm_routes = ARRAY_SIZE(tas2552_audio_map),
.idle_bias_on = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config tas2552_regmap_config = {
diff --git a/sound/soc/codecs/tas2562.c b/sound/soc/codecs/tas2562.c
index e62a3da16aed..dc088a1c6721 100644
--- a/sound/soc/codecs/tas2562.c
+++ b/sound/soc/codecs/tas2562.c
@@ -589,7 +589,6 @@ static const struct snd_soc_component_driver soc_component_dev_tas2110 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_dapm_widget tas2562_dapm_widgets[] = {
@@ -629,7 +628,6 @@ static const struct snd_soc_component_driver soc_component_dev_tas2562 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_dai_ops tas2562_speaker_dai_ops = {
diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c
index 4cb788f3e5f7..846d9d3ecc9d 100644
--- a/sound/soc/codecs/tas2764.c
+++ b/sound/soc/codecs/tas2764.c
@@ -558,7 +558,6 @@ static const struct snd_soc_component_driver soc_component_driver_tas2764 = {
.num_dapm_routes = ARRAY_SIZE(tas2764_audio_map),
.idle_bias_on = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct reg_default tas2764_reg_defaults[] = {
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index c1dbd978d550..bb653b664146 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -46,34 +46,22 @@ static void tas2770_reset(struct tas2770_priv *tas2770)
usleep_range(1000, 2000);
}
-static int tas2770_set_bias_level(struct snd_soc_component *component,
- enum snd_soc_bias_level level)
+static int tas2770_update_pwr_ctrl(struct tas2770_priv *tas2770)
{
- struct tas2770_priv *tas2770 =
- snd_soc_component_get_drvdata(component);
+ struct snd_soc_component *component = tas2770->component;
+ unsigned int val;
+ int ret;
- switch (level) {
- case SND_SOC_BIAS_ON:
- snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
- TAS2770_PWR_CTRL_MASK,
- TAS2770_PWR_CTRL_ACTIVE);
- break;
- case SND_SOC_BIAS_STANDBY:
- case SND_SOC_BIAS_PREPARE:
- snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
- TAS2770_PWR_CTRL_MASK,
- TAS2770_PWR_CTRL_MUTE);
- break;
- case SND_SOC_BIAS_OFF:
- snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
- TAS2770_PWR_CTRL_MASK,
- TAS2770_PWR_CTRL_SHUTDOWN);
- break;
+ if (tas2770->dac_powered)
+ val = tas2770->unmuted ?
+ TAS2770_PWR_CTRL_ACTIVE : TAS2770_PWR_CTRL_MUTE;
+ else
+ val = TAS2770_PWR_CTRL_SHUTDOWN;
- default:
- dev_err(tas2770->dev, "wrong power level setting %d\n", level);
- return -EINVAL;
- }
+ ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK, val);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -114,9 +102,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component)
gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
usleep_range(1000, 2000);
} else {
- ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
- TAS2770_PWR_CTRL_MASK,
- TAS2770_PWR_CTRL_ACTIVE);
+ ret = tas2770_update_pwr_ctrl(tas2770);
if (ret < 0)
return ret;
}
@@ -152,24 +138,19 @@ static int tas2770_dac_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_POST_PMU:
- ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
- TAS2770_PWR_CTRL_MASK,
- TAS2770_PWR_CTRL_MUTE);
+ tas2770->dac_powered = 1;
+ ret = tas2770_update_pwr_ctrl(tas2770);
break;
case SND_SOC_DAPM_PRE_PMD:
- ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
- TAS2770_PWR_CTRL_MASK,
- TAS2770_PWR_CTRL_SHUTDOWN);
+ tas2770->dac_powered = 0;
+ ret = tas2770_update_pwr_ctrl(tas2770);
break;
default:
dev_err(tas2770->dev, "Not supported evevt\n");
return -EINVAL;
}
- if (ret < 0)
- return ret;
-
- return 0;
+ return ret;
}
static const struct snd_kcontrol_new isense_switch =
@@ -203,21 +184,11 @@ static const struct snd_soc_dapm_route tas2770_audio_map[] = {
static int tas2770_mute(struct snd_soc_dai *dai, int mute, int direction)
{
struct snd_soc_component *component = dai->component;
- int ret;
-
- if (mute)
- ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
- TAS2770_PWR_CTRL_MASK,
- TAS2770_PWR_CTRL_MUTE);
- else
- ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
- TAS2770_PWR_CTRL_MASK,
- TAS2770_PWR_CTRL_ACTIVE);
-
- if (ret < 0)
- return ret;
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
- return 0;
+ tas2770->unmuted = !mute;
+ return tas2770_update_pwr_ctrl(tas2770);
}
static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
@@ -337,21 +308,27 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
struct snd_soc_component *component = dai->component;
struct tas2770_priv *tas2770 =
snd_soc_component_get_drvdata(component);
- u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0;
+ u8 tdm_rx_start_slot = 0, invert_fpol = 0, fpol_preinv = 0, asi_cfg_1 = 0;
int ret;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
- dev_err(tas2770->dev, "ASI format master is not found\n");
+ dev_err(tas2770->dev, "ASI invalid DAI clocking\n");
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_IF:
+ invert_fpol = 1;
+ fallthrough;
case SND_SOC_DAIFMT_NB_NF:
asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_RSING;
break;
+ case SND_SOC_DAIFMT_IB_IF:
+ invert_fpol = 1;
+ fallthrough;
case SND_SOC_DAIFMT_IB_NF:
asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_FALING;
break;
@@ -369,15 +346,19 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
tdm_rx_start_slot = 1;
+ fpol_preinv = 0;
break;
case SND_SOC_DAIFMT_DSP_A:
tdm_rx_start_slot = 0;
+ fpol_preinv = 1;
break;
case SND_SOC_DAIFMT_DSP_B:
tdm_rx_start_slot = 1;
+ fpol_preinv = 1;
break;
case SND_SOC_DAIFMT_LEFT_J:
tdm_rx_start_slot = 0;
+ fpol_preinv = 1;
break;
default:
dev_err(tas2770->dev,
@@ -391,6 +372,14 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
if (ret < 0)
return ret;
+ ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_FPOL_MASK,
+ (fpol_preinv ^ invert_fpol)
+ ? TAS2770_TDM_CFG_REG0_FPOL_RSING
+ : TAS2770_TDM_CFG_REG0_FPOL_FALING);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -489,7 +478,7 @@ static struct snd_soc_dai_driver tas2770_dai_driver[] = {
.id = 0,
.playback = {
.stream_name = "ASI1 Playback",
- .channels_min = 2,
+ .channels_min = 1,
.channels_max = 2,
.rates = TAS2770_RATES,
.formats = TAS2770_FORMATS,
@@ -537,7 +526,6 @@ static const struct snd_soc_component_driver soc_component_driver_tas2770 = {
.probe = tas2770_codec_probe,
.suspend = tas2770_codec_suspend,
.resume = tas2770_codec_resume,
- .set_bias_level = tas2770_set_bias_level,
.controls = tas2770_snd_controls,
.num_controls = ARRAY_SIZE(tas2770_snd_controls),
.dapm_widgets = tas2770_dapm_widgets,
@@ -546,7 +534,6 @@ static const struct snd_soc_component_driver soc_component_driver_tas2770 = {
.num_dapm_routes = ARRAY_SIZE(tas2770_audio_map),
.idle_bias_on = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int tas2770_register_codec(struct tas2770_priv *tas2770)
diff --git a/sound/soc/codecs/tas2770.h b/sound/soc/codecs/tas2770.h
index d156666bcc55..f75f40781ab1 100644
--- a/sound/soc/codecs/tas2770.h
+++ b/sound/soc/codecs/tas2770.h
@@ -41,6 +41,9 @@
#define TAS2770_TDM_CFG_REG0_31_44_1_48KHZ 0x6
#define TAS2770_TDM_CFG_REG0_31_88_2_96KHZ 0x8
#define TAS2770_TDM_CFG_REG0_31_176_4_192KHZ 0xa
+#define TAS2770_TDM_CFG_REG0_FPOL_MASK BIT(0)
+#define TAS2770_TDM_CFG_REG0_FPOL_RSING 0
+#define TAS2770_TDM_CFG_REG0_FPOL_FALING 1
/* TDM Configuration Reg1 */
#define TAS2770_TDM_CFG_REG1 TAS2770_REG(0X0, 0x0B)
#define TAS2770_TDM_CFG_REG1_MASK GENMASK(5, 1)
@@ -135,6 +138,8 @@ struct tas2770_priv {
struct device *dev;
int v_sense_slot;
int i_sense_slot;
+ bool dac_powered;
+ bool unmuted;
};
#endif /* __TAS2770__ */
diff --git a/sound/soc/codecs/tas2780.c b/sound/soc/codecs/tas2780.c
new file mode 100644
index 000000000000..a6db6f0e5431
--- /dev/null
+++ b/sound/soc/codecs/tas2780.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+// Driver for the Texas Instruments TAS2780 Mono
+// Audio amplifier
+// Copyright (C) 2022 Texas Instruments Inc.
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+
+#include "tas2780.h"
+
+struct tas2780_priv {
+ struct snd_soc_component *component;
+ struct gpio_desc *reset_gpio;
+ struct regmap *regmap;
+ struct device *dev;
+ int v_sense_slot;
+ int i_sense_slot;
+};
+
+static void tas2780_reset(struct tas2780_priv *tas2780)
+{
+ int ret = 0;
+
+ if (tas2780->reset_gpio) {
+ gpiod_set_value_cansleep(tas2780->reset_gpio, 0);
+ usleep_range(2000, 2050);
+ gpiod_set_value_cansleep(tas2780->reset_gpio, 1);
+ usleep_range(2000, 2050);
+ }
+
+ snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
+ TAS2780_RST);
+ if (ret)
+ dev_err(tas2780->dev, "%s:errCode:0x%x Reset error!\n",
+ __func__, ret);
+}
+
+#ifdef CONFIG_PM
+static int tas2780_codec_suspend(struct snd_soc_component *component)
+{
+ struct tas2780_priv *tas2780 =
+ snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ ret = snd_soc_component_update_bits(component, TAS2780_PWR_CTRL,
+ TAS2780_PWR_CTRL_MASK, TAS2780_PWR_CTRL_SHUTDOWN);
+ if (ret < 0) {
+ dev_err(tas2780->dev, "%s:errCode:0x%0x:power down error\n",
+ __func__, ret);
+ goto err;
+ }
+ ret = 0;
+ regcache_cache_only(tas2780->regmap, true);
+ regcache_mark_dirty(tas2780->regmap);
+err:
+ return ret;
+}
+
+static int tas2780_codec_resume(struct snd_soc_component *component)
+{
+ struct tas2780_priv *tas2780 =
+ snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ ret = snd_soc_component_update_bits(component, TAS2780_PWR_CTRL,
+ TAS2780_PWR_CTRL_MASK, TAS2780_PWR_CTRL_ACTIVE);
+
+ if (ret < 0) {
+ dev_err(tas2780->dev, "%s:errCode:0x%0x:power down error\n",
+ __func__, ret);
+ goto err;
+ }
+ ret = 0;
+ regcache_cache_only(tas2780->regmap, false);
+ ret = regcache_sync(tas2780->regmap);
+err:
+ return ret;
+}
+#endif
+
+static const char * const tas2780_ASI1_src[] = {
+ "I2C offset", "Left", "Right", "LeftRightDiv2",
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ tas2780_ASI1_src_enum, TAS2780_TDM_CFG2, 4, tas2780_ASI1_src);
+
+static const struct snd_kcontrol_new tas2780_asi1_mux =
+ SOC_DAPM_ENUM("ASI1 Source", tas2780_ASI1_src_enum);
+
+static const struct snd_kcontrol_new isense_switch =
+ SOC_DAPM_SINGLE("Switch", TAS2780_PWR_CTRL,
+ TAS2780_ISENSE_POWER_EN, 1, 1);
+static const struct snd_kcontrol_new vsense_switch =
+ SOC_DAPM_SINGLE("Switch", TAS2780_PWR_CTRL,
+ TAS2780_VSENSE_POWER_EN, 1, 1);
+
+static const struct snd_soc_dapm_widget tas2780_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("ASI1", "ASI1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_MUX("ASI1 Sel", SND_SOC_NOPM, 0, 0, &tas2780_asi1_mux),
+ SND_SOC_DAPM_SWITCH("ISENSE", TAS2780_PWR_CTRL,
+ TAS2780_ISENSE_POWER_EN, 1, &isense_switch),
+ SND_SOC_DAPM_SWITCH("VSENSE", TAS2780_PWR_CTRL,
+ TAS2780_VSENSE_POWER_EN, 1, &vsense_switch),
+ SND_SOC_DAPM_OUTPUT("OUT"),
+ SND_SOC_DAPM_SIGGEN("VMON"),
+ SND_SOC_DAPM_SIGGEN("IMON")
+};
+
+static const struct snd_soc_dapm_route tas2780_audio_map[] = {
+ {"ASI1 Sel", "I2C offset", "ASI1"},
+ {"ASI1 Sel", "Left", "ASI1"},
+ {"ASI1 Sel", "Right", "ASI1"},
+ {"ASI1 Sel", "LeftRightDiv2", "ASI1"},
+ {"OUT", NULL, "ASI1 Sel"},
+ {"ISENSE", "Switch", "IMON"},
+ {"VSENSE", "Switch", "VMON"},
+};
+
+static int tas2780_mute(struct snd_soc_dai *dai, int mute, int direction)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2780_priv *tas2780 =
+ snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ ret = snd_soc_component_update_bits(component, TAS2780_PWR_CTRL,
+ TAS2780_PWR_CTRL_MASK,
+ mute ? TAS2780_PWR_CTRL_MUTE : 0);
+ if (ret < 0) {
+ dev_err(tas2780->dev, "%s: Failed to set powercontrol\n",
+ __func__);
+ goto err;
+ }
+ ret = 0;
+err:
+ return ret;
+}
+
+static int tas2780_set_bitwidth(struct tas2780_priv *tas2780, int bitwidth)
+{
+ struct snd_soc_component *component = tas2780->component;
+ int sense_en;
+ int val;
+ int ret;
+ int slot_size;
+
+ switch (bitwidth) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ ret = snd_soc_component_update_bits(component,
+ TAS2780_TDM_CFG2,
+ TAS2780_TDM_CFG2_RXW_MASK,
+ TAS2780_TDM_CFG2_RXW_16BITS);
+ slot_size = TAS2780_TDM_CFG2_RXS_16BITS;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ret = snd_soc_component_update_bits(component,
+ TAS2780_TDM_CFG2,
+ TAS2780_TDM_CFG2_RXW_MASK,
+ TAS2780_TDM_CFG2_RXW_24BITS);
+ slot_size = TAS2780_TDM_CFG2_RXS_24BITS;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ ret = snd_soc_component_update_bits(component,
+ TAS2780_TDM_CFG2,
+ TAS2780_TDM_CFG2_RXW_MASK,
+ TAS2780_TDM_CFG2_RXW_32BITS);
+ slot_size = TAS2780_TDM_CFG2_RXS_32BITS;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret < 0) {
+ dev_err(tas2780->dev, "%s:errCode:0x%x set bitwidth error\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2780_TDM_CFG2,
+ TAS2780_TDM_CFG2_RXS_MASK, slot_size);
+ if (ret < 0) {
+ dev_err(tas2780->dev,
+ "%s:errCode:0x%x set RX slot size error\n",
+ __func__, ret);
+ goto err;
+ }
+
+ val = snd_soc_component_read(tas2780->component, TAS2780_PWR_CTRL);
+ if (val < 0) {
+ dev_err(tas2780->dev, "%s:errCode:0x%x read PWR_CTRL error\n",
+ __func__, val);
+ ret = val;
+ goto err;
+ }
+
+ if (val & (1 << TAS2780_VSENSE_POWER_EN))
+ sense_en = 0;
+ else
+ sense_en = TAS2780_TDM_CFG5_VSNS_ENABLE;
+
+ ret = snd_soc_component_update_bits(tas2780->component,
+ TAS2780_TDM_CFG5, TAS2780_TDM_CFG5_VSNS_ENABLE, sense_en);
+ if (ret < 0) {
+ dev_err(tas2780->dev, "%s:errCode:0x%x enable vSNS error\n",
+ __func__, ret);
+ goto err;
+ }
+
+ if (val & (1 << TAS2780_ISENSE_POWER_EN))
+ sense_en = 0;
+ else
+ sense_en = TAS2780_TDM_CFG6_ISNS_ENABLE;
+
+ ret = snd_soc_component_update_bits(tas2780->component,
+ TAS2780_TDM_CFG6, TAS2780_TDM_CFG6_ISNS_ENABLE, sense_en);
+ if (ret < 0) {
+ dev_err(tas2780->dev, "%s:errCode:0x%x enable iSNS error\n",
+ __func__, ret);
+ goto err;
+ }
+ ret = 0;
+err:
+ return ret;
+}
+
+static int tas2780_set_samplerate(
+ struct tas2780_priv *tas2780, int samplerate)
+{
+ struct snd_soc_component *component = tas2780->component;
+ int ramp_rate_val;
+ int ret;
+
+ switch (samplerate) {
+ case 48000:
+ ramp_rate_val = TAS2780_TDM_CFG0_SMP_48KHZ |
+ TAS2780_TDM_CFG0_44_1_48KHZ;
+ break;
+ case 44100:
+ ramp_rate_val = TAS2780_TDM_CFG0_SMP_44_1KHZ |
+ TAS2780_TDM_CFG0_44_1_48KHZ;
+ break;
+ case 96000:
+ ramp_rate_val = TAS2780_TDM_CFG0_SMP_48KHZ |
+ TAS2780_TDM_CFG0_88_2_96KHZ;
+ break;
+ case 88200:
+ ramp_rate_val = TAS2780_TDM_CFG0_SMP_44_1KHZ |
+ TAS2780_TDM_CFG0_88_2_96KHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = snd_soc_component_update_bits(component, TAS2780_TDM_CFG0,
+ TAS2780_TDM_CFG0_SMP_MASK | TAS2780_TDM_CFG0_MASK,
+ ramp_rate_val);
+ if (ret < 0) {
+ dev_err(tas2780->dev,
+ "%s:errCode:0x%x Failed to set ramp_rate_val\n",
+ __func__, ret);
+ goto err;
+ }
+ ret = 0;
+err:
+ return ret;
+}
+
+static int tas2780_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2780_priv *tas2780 =
+ snd_soc_component_get_drvdata(component);
+ int ret;
+
+ ret = tas2780_set_bitwidth(tas2780, params_format(params));
+ if (ret < 0)
+ return ret;
+
+ return tas2780_set_samplerate(tas2780, params_rate(params));
+}
+
+static int tas2780_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2780_priv *tas2780 =
+ snd_soc_component_get_drvdata(component);
+ u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0;
+ int iface;
+ int ret = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ asi_cfg_1 = TAS2780_TDM_CFG1_RX_RISING;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ asi_cfg_1 = TAS2780_TDM_CFG1_RX_FALLING;
+ break;
+ default:
+ dev_err(tas2780->dev, "ASI format Inverse is not found\n");
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2780_TDM_CFG1,
+ TAS2780_TDM_CFG1_RX_MASK, asi_cfg_1);
+ if (ret < 0) {
+ dev_err(tas2780->dev,
+ "%s:errCode:0x%x Failed to set asi_cfg_1\n",
+ __func__, ret);
+ goto err;
+ }
+
+ if (((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S)
+ || ((fmt & SND_SOC_DAIFMT_FORMAT_MASK)
+ == SND_SOC_DAIFMT_DSP_A)){
+ iface = TAS2780_TDM_CFG2_SCFG_I2S;
+ tdm_rx_start_slot = 1;
+ } else {
+ if (((fmt & SND_SOC_DAIFMT_FORMAT_MASK)
+ == SND_SOC_DAIFMT_DSP_B)
+ || ((fmt & SND_SOC_DAIFMT_FORMAT_MASK)
+ == SND_SOC_DAIFMT_LEFT_J)) {
+ iface = TAS2780_TDM_CFG2_SCFG_LEFT_J;
+ tdm_rx_start_slot = 0;
+ } else {
+ dev_err(tas2780->dev,
+ "%s:DAI Format is not found, fmt=0x%x\n",
+ __func__, fmt);
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+ ret = snd_soc_component_update_bits(component, TAS2780_TDM_CFG1,
+ TAS2780_TDM_CFG1_MASK,
+ (tdm_rx_start_slot << TAS2780_TDM_CFG1_51_SHIFT));
+ if (ret < 0) {
+ dev_err(tas2780->dev,
+ "%s:errCode:0x%x Failed to set tdm_rx_start_slot\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2780_TDM_CFG2,
+ TAS2780_TDM_CFG2_SCFG_MASK, iface);
+ if (ret < 0) {
+ dev_err(tas2780->dev, "%s:errCode:0x%x Failed to set iface\n",
+ __func__, ret);
+ goto err;
+ }
+ ret = 0;
+err:
+ return ret;
+}
+
+static int tas2780_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask,
+ unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2780_priv *tas2780 =
+ snd_soc_component_get_drvdata(component);
+ int left_slot, right_slot;
+ int slots_cfg;
+ int slot_size;
+ int ret = 0;
+
+ if (tx_mask == 0 || rx_mask != 0)
+ return -EINVAL;
+
+ if (slots == 1) {
+ if (tx_mask != 1)
+ return -EINVAL;
+ left_slot = 0;
+ right_slot = 0;
+ } else {
+ left_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << left_slot);
+ if (tx_mask == 0) {
+ right_slot = left_slot;
+ } else {
+ right_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << right_slot);
+ }
+ }
+
+ if (tx_mask != 0 || left_slot >= slots || right_slot >= slots)
+ return -EINVAL;
+
+ slots_cfg = (right_slot << TAS2780_TDM_CFG3_RXS_SHIFT) | left_slot;
+ ret = snd_soc_component_write(component, TAS2780_TDM_CFG3, slots_cfg);
+ if (ret) {
+ dev_err(tas2780->dev,
+ "%s:errCode:0x%x Failed to set slots_cfg\n",
+ __func__, ret);
+ goto err;
+ }
+
+ switch (slot_width) {
+ case 16:
+ slot_size = TAS2780_TDM_CFG2_RXS_16BITS;
+ break;
+ case 24:
+ slot_size = TAS2780_TDM_CFG2_RXS_24BITS;
+ break;
+ case 32:
+ slot_size = TAS2780_TDM_CFG2_RXS_32BITS;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2780_TDM_CFG2,
+ TAS2780_TDM_CFG2_RXS_MASK, slot_size);
+ if (ret < 0) {
+ dev_err(tas2780->dev,
+ "%s:errCode:0x%x Failed to set slot_size\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2780_TDM_CFG5,
+ TAS2780_TDM_CFG5_50_MASK, tas2780->v_sense_slot);
+ if (ret < 0) {
+ dev_err(tas2780->dev,
+ "%s:errCode:0x%x Failed to set v_sense_slot\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2780_TDM_CFG6,
+ TAS2780_TDM_CFG6_50_MASK, tas2780->i_sense_slot);
+ if (ret < 0) {
+ dev_err(tas2780->dev,
+ "%s:errCode:0x%x Failed to set i_sense_slot\n",
+ __func__, ret);
+ goto err;
+ }
+ ret = 0;
+err:
+ return ret;
+}
+
+static const struct snd_soc_dai_ops tas2780_dai_ops = {
+ .mute_stream = tas2780_mute,
+ .hw_params = tas2780_hw_params,
+ .set_fmt = tas2780_set_fmt,
+ .set_tdm_slot = tas2780_set_dai_tdm_slot,
+ .no_capture_mute = 1,
+};
+
+#define TAS2780_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+#define TAS2780_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_88200)
+
+static struct snd_soc_dai_driver tas2780_dai_driver[] = {
+ {
+ .name = "tas2780 ASI1",
+ .id = 0,
+ .playback = {
+ .stream_name = "ASI1 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = TAS2780_RATES,
+ .formats = TAS2780_FORMATS,
+ },
+ .capture = {
+ .stream_name = "ASI1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = TAS2780_RATES,
+ .formats = TAS2780_FORMATS,
+ },
+ .ops = &tas2780_dai_ops,
+ .symmetric_rate = 1,
+ },
+};
+
+static int tas2780_codec_probe(struct snd_soc_component *component)
+{
+ struct tas2780_priv *tas2780 =
+ snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ tas2780->component = component;
+
+ tas2780_reset(tas2780);
+ ret = snd_soc_component_update_bits(component,
+ TAS2780_IC_CFG, TAS2780_IC_CFG_MASK,
+ TAS2780_IC_CFG_ENABLE);
+ if (ret < 0)
+ dev_err(tas2780->dev, "%s:errCode:0x%0x\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static DECLARE_TLV_DB_SCALE(tas2780_digital_tlv, 1100, 50, 0);
+static DECLARE_TLV_DB_SCALE(tas2780_playback_volume, -10000, 50, 0);
+
+static const struct snd_kcontrol_new tas2780_snd_controls[] = {
+ SOC_SINGLE_TLV("Speaker Volume", TAS2780_DVC, 0,
+ TAS2780_DVC_MAX, 1, tas2780_playback_volume),
+ SOC_SINGLE_TLV("Amp Gain Volume", TAS2780_CHNL_0, 0, 0x14, 0,
+ tas2780_digital_tlv),
+};
+
+static const struct snd_soc_component_driver soc_component_driver_tas2780 = {
+ .probe = tas2780_codec_probe,
+#ifdef CONFIG_PM
+ .suspend = tas2780_codec_suspend,
+ .resume = tas2780_codec_resume,
+#endif
+ .controls = tas2780_snd_controls,
+ .num_controls = ARRAY_SIZE(tas2780_snd_controls),
+ .dapm_widgets = tas2780_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas2780_dapm_widgets),
+ .dapm_routes = tas2780_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas2780_audio_map),
+ .idle_bias_on = 1,
+ .endianness = 1,
+};
+
+static const struct reg_default tas2780_reg_defaults[] = {
+ { TAS2780_PAGE, 0x00 },
+ { TAS2780_SW_RST, 0x00 },
+ { TAS2780_PWR_CTRL, 0x1a },
+ { TAS2780_DVC, 0x00 },
+ { TAS2780_CHNL_0, 0x00 },
+ { TAS2780_TDM_CFG0, 0x09 },
+ { TAS2780_TDM_CFG1, 0x02 },
+ { TAS2780_TDM_CFG2, 0x0a },
+ { TAS2780_TDM_CFG3, 0x10 },
+ { TAS2780_TDM_CFG5, 0x42 },
+};
+
+static const struct regmap_range_cfg tas2780_regmap_ranges[] = {
+ {
+ .range_min = 0,
+ .range_max = 1 * 128,
+ .selector_reg = TAS2780_PAGE,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 128,
+ },
+};
+
+static const struct regmap_config tas2780_i2c_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_defaults = tas2780_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tas2780_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .ranges = tas2780_regmap_ranges,
+ .num_ranges = ARRAY_SIZE(tas2780_regmap_ranges),
+ .max_register = 1 * 128,
+};
+
+static int tas2780_parse_dt(struct device *dev, struct tas2780_priv *tas2780)
+{
+ int ret = 0;
+
+ tas2780->reset_gpio = devm_gpiod_get_optional(tas2780->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tas2780->reset_gpio)) {
+ if (PTR_ERR(tas2780->reset_gpio) == -EPROBE_DEFER) {
+ tas2780->reset_gpio = NULL;
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ret = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no",
+ &tas2780->i_sense_slot);
+ if (ret)
+ tas2780->i_sense_slot = 0;
+
+ ret = fwnode_property_read_u32(dev->fwnode, "ti,vmon-slot-no",
+ &tas2780->v_sense_slot);
+ if (ret)
+ tas2780->v_sense_slot = 2;
+
+ return 0;
+}
+
+static int tas2780_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tas2780_priv *tas2780;
+ int result;
+
+ tas2780 = devm_kzalloc(&client->dev, sizeof(struct tas2780_priv),
+ GFP_KERNEL);
+ if (!tas2780)
+ return -ENOMEM;
+ tas2780->dev = &client->dev;
+ i2c_set_clientdata(client, tas2780);
+ dev_set_drvdata(&client->dev, tas2780);
+
+ tas2780->regmap = devm_regmap_init_i2c(client, &tas2780_i2c_regmap);
+ if (IS_ERR(tas2780->regmap)) {
+ result = PTR_ERR(tas2780->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ result);
+ return result;
+ }
+
+ if (client->dev.of_node) {
+ result = tas2780_parse_dt(&client->dev, tas2780);
+ if (result) {
+ dev_err(tas2780->dev,
+ "%s: Failed to parse devicetree\n", __func__);
+ return result;
+ }
+ }
+
+ return devm_snd_soc_register_component(tas2780->dev,
+ &soc_component_driver_tas2780, tas2780_dai_driver,
+ ARRAY_SIZE(tas2780_dai_driver));
+}
+
+static const struct i2c_device_id tas2780_i2c_id[] = {
+ { "tas2780", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tas2780_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id tas2780_of_match[] = {
+ { .compatible = "ti,tas2780" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tas2780_of_match);
+#endif
+
+static struct i2c_driver tas2780_i2c_driver = {
+ .driver = {
+ .name = "tas2780",
+ .of_match_table = of_match_ptr(tas2780_of_match),
+ },
+ .probe = tas2780_i2c_probe,
+ .id_table = tas2780_i2c_id,
+};
+module_i2c_driver(tas2780_i2c_driver);
+
+MODULE_AUTHOR("Raphael Xu <raphael-xu@ti.com>");
+MODULE_DESCRIPTION("TAS2780 I2C Smart Amplifier driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tas2780.h b/sound/soc/codecs/tas2780.h
new file mode 100644
index 000000000000..661c25df4e29
--- /dev/null
+++ b/sound/soc/codecs/tas2780.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * TAS2780.h - ALSA SoC Texas Instruments TAS2780 Mono Audio Amplifier
+ *
+ * Copyright (C) 2020-2022 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Author: Raphael Xu <raphael-xu@ti.com>
+ */
+
+#ifndef __TAS2780_H__
+#define __TAS2780_H__
+
+/* Book Control Register */
+#define TAS2780_BOOKCTL_PAGE 0
+#define TAS2780_BOOKCTL_REG 127
+#define TAS2780_REG(page, reg) ((page * 128) + reg)
+
+/* Page */
+#define TAS2780_PAGE TAS2780_REG(0X0, 0x00)
+#define TAS2780_PAGE_PAGE_MASK 255
+
+/* Software Reset */
+#define TAS2780_SW_RST TAS2780_REG(0X0, 0x01)
+#define TAS2780_RST BIT(0)
+
+/* Power Control */
+#define TAS2780_PWR_CTRL TAS2780_REG(0X0, 0x02)
+#define TAS2780_PWR_CTRL_MASK GENMASK(1, 0)
+#define TAS2780_PWR_CTRL_ACTIVE 0x0
+#define TAS2780_PWR_CTRL_MUTE BIT(0)
+#define TAS2780_PWR_CTRL_SHUTDOWN BIT(1)
+
+#define TAS2780_VSENSE_POWER_EN 3
+#define TAS2780_ISENSE_POWER_EN 4
+
+/* Digital Volume Control */
+#define TAS2780_DVC TAS2780_REG(0X0, 0x1a)
+#define TAS2780_DVC_MAX 0xc9
+
+#define TAS2780_CHNL_0 TAS2780_REG(0X0, 0x03)
+
+/* TDM Configuration Reg0 */
+#define TAS2780_TDM_CFG0 TAS2780_REG(0X0, 0x08)
+#define TAS2780_TDM_CFG0_SMP_MASK BIT(5)
+#define TAS2780_TDM_CFG0_SMP_48KHZ 0x0
+#define TAS2780_TDM_CFG0_SMP_44_1KHZ BIT(5)
+#define TAS2780_TDM_CFG0_MASK GENMASK(3, 1)
+#define TAS2780_TDM_CFG0_44_1_48KHZ BIT(3)
+#define TAS2780_TDM_CFG0_88_2_96KHZ (BIT(3) | BIT(1))
+
+/* TDM Configuration Reg1 */
+#define TAS2780_TDM_CFG1 TAS2780_REG(0X0, 0x09)
+#define TAS2780_TDM_CFG1_MASK GENMASK(5, 1)
+#define TAS2780_TDM_CFG1_51_SHIFT 1
+#define TAS2780_TDM_CFG1_RX_MASK BIT(0)
+#define TAS2780_TDM_CFG1_RX_RISING 0x0
+#define TAS2780_TDM_CFG1_RX_FALLING BIT(0)
+
+/* TDM Configuration Reg2 */
+#define TAS2780_TDM_CFG2 TAS2780_REG(0X0, 0x0a)
+#define TAS2780_TDM_CFG2_RXW_MASK GENMASK(3, 2)
+#define TAS2780_TDM_CFG2_RXW_16BITS 0x0
+#define TAS2780_TDM_CFG2_RXW_24BITS BIT(3)
+#define TAS2780_TDM_CFG2_RXW_32BITS (BIT(3) | BIT(2))
+#define TAS2780_TDM_CFG2_RXS_MASK GENMASK(1, 0)
+#define TAS2780_TDM_CFG2_RXS_16BITS 0x0
+#define TAS2780_TDM_CFG2_RXS_24BITS BIT(0)
+#define TAS2780_TDM_CFG2_RXS_32BITS BIT(1)
+#define TAS2780_TDM_CFG2_SCFG_MASK GENMASK(5, 4)
+#define TAS2780_TDM_CFG2_SCFG_I2S 0x0
+#define TAS2780_TDM_CFG2_SCFG_LEFT_J BIT(4)
+#define TAS2780_TDM_CFG2_SCFG_RIGHT_J BIT(5)
+
+/* TDM Configuration Reg3 */
+#define TAS2780_TDM_CFG3 TAS2780_REG(0X0, 0x0c)
+#define TAS2780_TDM_CFG3_RXS_MASK GENMASK(7, 4)
+#define TAS2780_TDM_CFG3_RXS_SHIFT 0x4
+#define TAS2780_TDM_CFG3_MASK GENMASK(3, 0)
+
+/* TDM Configuration Reg4 */
+#define TAS2780_TDM_CFG4 TAS2780_REG(0X0, 0x0d)
+#define TAS2780_TDM_CFG4_TX_OFFSET_MASK GENMASK(3, 1)
+
+/* TDM Configuration Reg5 */
+#define TAS2780_TDM_CFG5 TAS2780_REG(0X0, 0x0e)
+#define TAS2780_TDM_CFG5_VSNS_MASK BIT(6)
+#define TAS2780_TDM_CFG5_VSNS_ENABLE BIT(6)
+#define TAS2780_TDM_CFG5_50_MASK GENMASK(5, 0)
+
+/* TDM Configuration Reg6 */
+#define TAS2780_TDM_CFG6 TAS2780_REG(0X0, 0x0f)
+#define TAS2780_TDM_CFG6_ISNS_MASK BIT(6)
+#define TAS2780_TDM_CFG6_ISNS_ENABLE BIT(6)
+#define TAS2780_TDM_CFG6_50_MASK GENMASK(5, 0)
+
+/* IC CFG */
+#define TAS2780_IC_CFG TAS2780_REG(0X0, 0x5c)
+#define TAS2780_IC_CFG_MASK GENMASK(7, 6)
+#define TAS2780_IC_CFG_ENABLE (BIT(7) | BIT(6))
+
+#endif /* __TAS2780_H__ */
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
index 5c0df3cd4832..a864984225bc 100644
--- a/sound/soc/codecs/tas5086.c
+++ b/sound/soc/codecs/tas5086.c
@@ -318,7 +318,7 @@ static int tas5086_set_dai_fmt(struct snd_soc_dai *codec_dai,
struct tas5086_private *priv = snd_soc_component_get_drvdata(component);
/* The TAS5086 can only be slave to all clocks */
- if ((format & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+ if ((format & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) != SND_SOC_DAIFMT_CBC_CFC) {
dev_err(component->dev, "Invalid clocking mode\n");
return -EINVAL;
}
@@ -888,7 +888,6 @@ static const struct snd_soc_component_driver soc_component_dev_tas5086 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct i2c_device_id tas5086_i2c_id[] = {
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 7b599664db20..4e7f20db57c4 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -756,7 +756,6 @@ static const struct snd_soc_component_driver tas571x_component = {
.num_dapm_routes = ARRAY_SIZE(tas571x_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver tas571x_dai = {
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
index 17034abef568..3885c0bf0b01 100644
--- a/sound/soc/codecs/tas5720.c
+++ b/sound/soc/codecs/tas5720.c
@@ -89,8 +89,8 @@ static int tas5720_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
u8 serial_format;
int ret;
- if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
- dev_vdbg(component->dev, "DAI Format master is not found\n");
+ if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) != SND_SOC_DAIFMT_CBC_CFC) {
+ dev_vdbg(component->dev, "DAI clocking invalid\n");
return -EINVAL;
}
@@ -572,7 +572,6 @@ static const struct snd_soc_component_driver soc_component_dev_tas5720 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_component_driver soc_component_dev_tas5722 = {
@@ -589,7 +588,6 @@ static const struct snd_soc_component_driver soc_component_dev_tas5722 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
/* PCM rates supported by the TAS5720 driver */
diff --git a/sound/soc/codecs/tas5805m.c b/sound/soc/codecs/tas5805m.c
index fa0e81ec875a..b1bb614534f7 100644
--- a/sound/soc/codecs/tas5805m.c
+++ b/sound/soc/codecs/tas5805m.c
@@ -367,7 +367,6 @@ static const struct snd_soc_component_driver soc_codec_dev_tas5805m = {
.num_dapm_routes = ARRAY_SIZE(tas5805m_audio_map),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int tas5805m_mute(struct snd_soc_dai *dai, int mute, int direction)
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
index 22b53856e691..63d2983c3fcf 100644
--- a/sound/soc/codecs/tas6424.c
+++ b/sound/soc/codecs/tas6424.c
@@ -160,11 +160,11 @@ static int tas6424_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
dev_dbg(component->dev, "%s() fmt=0x%0x\n", __func__, fmt);
/* clock masters */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
- dev_err(component->dev, "Invalid DAI master/slave interface\n");
+ dev_err(component->dev, "Invalid DAI clocking\n");
return -EINVAL;
}
@@ -375,7 +375,6 @@ static struct snd_soc_component_driver soc_codec_dev_tas6424 = {
.num_dapm_routes = ARRAY_SIZE(tas6424_audio_map),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_dai_ops tas6424_speaker_dai_ops = {
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index 3d8e8c2276f0..9f7902ec40db 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -111,8 +111,8 @@ static int tfa9879_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
int i2s_set;
int sck_pol;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
return -EINVAL;
@@ -235,7 +235,6 @@ static const struct snd_soc_component_driver tfa9879_component = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config tfa9879_regmap = {
diff --git a/sound/soc/codecs/tfa989x.c b/sound/soc/codecs/tfa989x.c
index dc86852752c5..1c27429b9af6 100644
--- a/sound/soc/codecs/tfa989x.c
+++ b/sound/soc/codecs/tfa989x.c
@@ -40,12 +40,14 @@
#define TFA989X_I2S_SEL_REG 0x0a
#define TFA989X_I2S_SEL_REG_SPKR_MSK GENMASK(10, 9) /* speaker impedance */
#define TFA989X_I2S_SEL_REG_DCFG_MSK GENMASK(14, 11) /* DCDC compensation */
+#define TFA989X_HIDE_UNHIDE_KEY 0x40
#define TFA989X_PWM_CONTROL 0x41
#define TFA989X_CURRENTSENSE1 0x46
#define TFA989X_CURRENTSENSE2 0x47
#define TFA989X_CURRENTSENSE3 0x48
#define TFA989X_CURRENTSENSE4 0x49
+#define TFA9890_REVISION 0x80
#define TFA9895_REVISION 0x12
#define TFA9897_REVISION 0x97
@@ -136,7 +138,6 @@ static const struct snd_soc_component_driver tfa989x_component = {
.num_dapm_routes = ARRAY_SIZE(tfa989x_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const unsigned int tfa989x_rates[] = {
@@ -188,6 +189,33 @@ static struct snd_soc_dai_driver tfa989x_dai = {
.ops = &tfa989x_dai_ops,
};
+static int tfa9890_init(struct regmap *regmap)
+{
+ int ret;
+
+ /* unhide keys to allow updating them */
+ ret = regmap_write(regmap, TFA989X_HIDE_UNHIDE_KEY, 0x5a6b);
+ if (ret)
+ return ret;
+
+ /* update PLL registers */
+ ret = regmap_set_bits(regmap, 0x59, 0x3);
+ if (ret)
+ return ret;
+
+ /* hide keys again */
+ ret = regmap_write(regmap, TFA989X_HIDE_UNHIDE_KEY, 0x0000);
+ if (ret)
+ return ret;
+
+ return regmap_write(regmap, TFA989X_CURRENTSENSE2, 0x7BE1);
+}
+
+static const struct tfa989x_rev tfa9890_rev = {
+ .rev = TFA9890_REVISION,
+ .init = tfa9890_init,
+};
+
static const struct reg_sequence tfa9895_reg_init[] = {
/* some other registers must be set for optimal amplifier behaviour */
{ TFA989X_BAT_PROT, 0x13ab },
@@ -376,6 +404,7 @@ static int tfa989x_i2c_probe(struct i2c_client *i2c)
}
static const struct of_device_id tfa989x_of_match[] = {
+ { .compatible = "nxp,tfa9890", .data = &tfa9890_rev },
{ .compatible = "nxp,tfa9895", .data = &tfa9895_rev },
{ .compatible = "nxp,tfa9897", .data = &tfa9897_rev },
{ }
diff --git a/sound/soc/codecs/tlv320adc3xxx.c b/sound/soc/codecs/tlv320adc3xxx.c
index 82532ad00c3c..748998e48af9 100644
--- a/sound/soc/codecs/tlv320adc3xxx.c
+++ b/sound/soc/codecs/tlv320adc3xxx.c
@@ -1252,8 +1252,7 @@ static int adc3xxx_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
int master = 0;
int ret;
- /* set master/slave audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_CBP_CFP:
master = 1;
clkdir = ADC3XXX_BCLK_MASTER | ADC3XXX_WCLK_MASTER;
diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
index 0b729658fde8..2844a9d2bc4a 100644
--- a/sound/soc/codecs/tlv320adcx140.c
+++ b/sound/soc/codecs/tlv320adcx140.c
@@ -712,16 +712,14 @@ static int adcx140_set_dai_fmt(struct snd_soc_dai *codec_dai,
bool inverted_bclk = false;
/* set master/slave audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
iface_reg2 |= ADCX140_BCLK_FSYNC_MASTER;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
- case SND_SOC_DAIFMT_CBS_CFM:
- case SND_SOC_DAIFMT_CBM_CFS:
default:
- dev_err(component->dev, "Invalid DAI master/slave interface\n");
+ dev_err(component->dev, "Invalid DAI clock provider\n");
return -EINVAL;
}
@@ -1054,7 +1052,6 @@ static const struct snd_soc_component_driver soc_codec_driver_adcx140 = {
.idle_bias_on = 0,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver adcx140_dai_driver[] = {
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index 2400093e2c99..c47aa4d4162d 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -429,12 +429,11 @@ static int tlv320aic23_set_dai_fmt(struct snd_soc_dai *codec_dai,
iface_reg = snd_soc_component_read(component, TLV320AIC23_DIGT_FMT) & (~0x03);
- /* set master/slave audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
iface_reg |= TLV320AIC23_MS_MASTER;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBC_CFC:
iface_reg &= ~TLV320AIC23_MS_MASTER;
break;
default:
@@ -587,7 +586,6 @@ static const struct snd_soc_component_driver soc_component_dev_tlv320aic23 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
int tlv320aic23_probe(struct device *dev, struct regmap *regmap)
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index 077415a57225..8bae4b475068 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -32,7 +32,7 @@ struct aic26 {
struct spi_device *spi;
struct regmap *regmap;
struct snd_soc_component *component;
- int master;
+ int clock_provider;
int datfm;
int mclk;
@@ -117,8 +117,8 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
reg = dval << 2;
snd_soc_component_write(component, AIC26_REG_PLL_PROG2, reg);
- /* Audio Control 3 (master mode, fsref rate) */
- if (aic26->master)
+ /* Audio Control 3 (clock provider mode, fsref rate) */
+ if (aic26->clock_provider)
reg = 0x0800;
if (fsref == 48000)
reg = 0x2000;
@@ -178,10 +178,9 @@ static int aic26_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
dev_dbg(&aic26->spi->dev, "aic26_set_fmt(dai=%p, fmt==%i)\n",
codec_dai, fmt);
- /* set master/slave audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM: aic26->master = 1; break;
- case SND_SOC_DAIFMT_CBS_CFS: aic26->master = 0; break;
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP: aic26->clock_provider = 1; break;
+ case SND_SOC_DAIFMT_CBC_CFC: aic26->clock_provider = 0; break;
default:
dev_dbg(&aic26->spi->dev, "bad master\n"); return -EINVAL;
}
@@ -332,7 +331,6 @@ static const struct snd_soc_component_driver aic26_soc_component_dev = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config aic26_regmap = {
@@ -363,7 +361,7 @@ static int aic26_spi_probe(struct spi_device *spi)
/* Initialize the driver data */
aic26->spi = spi;
dev_set_drvdata(&spi->dev, aic26);
- aic26->master = 1;
+ aic26->clock_provider = 1;
ret = devm_snd_soc_register_component(&spi->dev,
&aic26_soc_component_dev, &aic26_dai, 1);
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index b2e59581c17a..0847302121f6 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1033,8 +1033,8 @@ static int aic31xx_clock_master_routes(struct snd_soc_component *component,
struct aic31xx_priv *aic31xx = snd_soc_component_get_drvdata(component);
int ret;
- fmt &= SND_SOC_DAIFMT_MASTER_MASK;
- if (fmt == SND_SOC_DAIFMT_CBS_CFS &&
+ fmt &= SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK;
+ if (fmt == SND_SOC_DAIFMT_CBC_CFC &&
aic31xx->master_dapm_route_applied) {
/*
* Remove the DAPM route(s) for codec clock master modes,
@@ -1051,7 +1051,7 @@ static int aic31xx_clock_master_routes(struct snd_soc_component *component,
return ret;
aic31xx->master_dapm_route_applied = false;
- } else if (fmt != SND_SOC_DAIFMT_CBS_CFS &&
+ } else if (fmt != SND_SOC_DAIFMT_CBC_CFC &&
!aic31xx->master_dapm_route_applied) {
/*
* Add the needed DAPM route(s) for codec clock master modes,
@@ -1083,21 +1083,20 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
dev_dbg(component->dev, "## %s: fmt = 0x%x\n", __func__, fmt);
- /* set master/slave audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
iface_reg1 |= AIC31XX_BCLK_MASTER | AIC31XX_WCLK_MASTER;
break;
- case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_CBC_CFP:
iface_reg1 |= AIC31XX_WCLK_MASTER;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_CBP_CFC:
iface_reg1 |= AIC31XX_BCLK_MASTER;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
- dev_err(component->dev, "Invalid DAI master/slave interface\n");
+ dev_err(component->dev, "Invalid DAI clock provider\n");
return -EINVAL;
}
@@ -1418,7 +1417,6 @@ static const struct snd_soc_component_driver soc_codec_driver_aic31xx = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_dai_ops aic31xx_dai_ops = {
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 8f42fd7bc053..ffe1828a4b7e 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -49,6 +49,8 @@ struct aic32x4_priv {
struct aic32x4_setup_data *setup;
struct device *dev;
enum aic32x4_type type;
+
+ unsigned int fmt;
};
static int aic32x4_reset_adc(struct snd_soc_dapm_widget *w,
@@ -611,19 +613,19 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
struct snd_soc_component *component = codec_dai->component;
+ struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
u8 iface_reg_1 = 0;
u8 iface_reg_2 = 0;
u8 iface_reg_3 = 0;
- /* set master/slave audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
iface_reg_1 |= AIC32X4_BCLKMASTER | AIC32X4_WCLKMASTER;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
- printk(KERN_ERR "aic32x4: invalid DAI master/slave interface\n");
+ printk(KERN_ERR "aic32x4: invalid clock provider\n");
return -EINVAL;
}
@@ -654,6 +656,8 @@ static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
return -EINVAL;
}
+ aic32x4->fmt = fmt;
+
snd_soc_component_update_bits(component, AIC32X4_IFACE1,
AIC32X4_IFACE1_DATATYPE_MASK |
AIC32X4_IFACE1_MASTER_MASK, iface_reg_1);
@@ -758,6 +762,10 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
return -EINVAL;
}
+ /* PCM over I2S is always 2-channel */
+ if ((aic32x4->fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S)
+ channels = 2;
+
madc = DIV_ROUND_UP((32 * adc_resource_class), aosr);
max_dosr = (AIC32X4_MAX_DOSR_FREQ / sample_rate / dosr_increment) *
dosr_increment;
@@ -1078,7 +1086,6 @@ static const struct snd_soc_component_driver soc_component_dev_aic32x4 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_kcontrol_new aic32x4_tas2505_snd_controls[] = {
@@ -1200,7 +1207,6 @@ static const struct snd_soc_component_driver soc_component_dev_aic32x4_tas2505 =
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int aic32x4_parse_dt(struct aic32x4_priv *aic32x4,
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index d53037b1509d..08938801daec 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1253,22 +1253,21 @@ static int aic3x_set_dai_fmt(struct snd_soc_dai *codec_dai,
iface_areg = snd_soc_component_read(component, AIC3X_ASD_INTF_CTRLA) & 0x3f;
iface_breg = snd_soc_component_read(component, AIC3X_ASD_INTF_CTRLB) & 0x3f;
- /* set master/slave audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
aic3x->master = 1;
iface_areg |= BIT_CLK_MASTER | WORD_CLK_MASTER;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBC_CFC:
aic3x->master = 0;
iface_areg &= ~(BIT_CLK_MASTER | WORD_CLK_MASTER);
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_CBP_CFC:
aic3x->master = 1;
iface_areg |= BIT_CLK_MASTER;
iface_areg &= ~WORD_CLK_MASTER;
break;
- case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_CBC_CFP:
aic3x->master = 1;
iface_areg |= WORD_CLK_MASTER;
iface_areg &= ~BIT_CLK_MASTER;
@@ -1698,7 +1697,6 @@ static const struct snd_soc_component_driver soc_component_dev_aic3x = {
.num_dapm_routes = ARRAY_SIZE(intercon),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static void aic3x_configure_ocmv(struct device *dev, struct aic3x_priv *aic3x)
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index 66f1d1cd6cf0..17ae3b1d96fb 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -1317,16 +1317,14 @@ static int dac33_set_dai_fmt(struct snd_soc_dai *codec_dai,
aictrl_a = dac33_read_reg_cache(component, DAC33_SER_AUDIOIF_CTRL_A);
aictrl_b = dac33_read_reg_cache(component, DAC33_SER_AUDIOIF_CTRL_B);
- /* set master/slave audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- /* Codec Master */
+
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBP_CFP:
aictrl_a |= (DAC33_MSBCLK | DAC33_MSWCLK);
break;
- case SND_SOC_DAIFMT_CBS_CFS:
- /* Codec Slave */
+ case SND_SOC_DAIFMT_CBC_CFC:
if (dac33->fifo_mode) {
- dev_err(component->dev, "FIFO mode requires master mode\n");
+ dev_err(component->dev, "FIFO mode requires provider mode\n");
return -EINVAL;
} else
aictrl_a &= ~(DAC33_MSBCLK | DAC33_MSWCLK);
@@ -1433,7 +1431,6 @@ static const struct snd_soc_component_driver soc_component_dev_tlv320dac33 = {
.num_dapm_routes = ARRAY_SIZE(audio_map),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
#define DAC33_RATES (SNDRV_PCM_RATE_44100 | \
diff --git a/sound/soc/codecs/tscs42xx.c b/sound/soc/codecs/tscs42xx.c
index 4fb0bb01bcdc..fa0c525189c2 100644
--- a/sound/soc/codecs/tscs42xx.c
+++ b/sound/soc/codecs/tscs42xx.c
@@ -1358,7 +1358,6 @@ static const struct snd_soc_component_driver soc_codec_dev_tscs42xx = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static inline void init_coeff_ram_cache(struct tscs42xx *tscs42xx)
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 0ba3546ef870..e48768233e20 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -34,6 +34,14 @@
#define TWL4030_CACHEREGNUM (TWL4030_REG_MISC_SET_2 + 1)
+struct twl4030_board_params {
+ unsigned int digimic_delay; /* in ms */
+ unsigned int ramp_delay_value;
+ unsigned int offset_cncl_path;
+ unsigned int hs_extmute:1;
+ int hs_extmute_gpio;
+};
+
/* codec private data */
struct twl4030_priv {
unsigned int codec_powered;
@@ -58,7 +66,7 @@ struct twl4030_priv {
u8 carkitl_enabled, carkitr_enabled;
u8 ctl_cache[TWL4030_REG_PRECKR_CTL - TWL4030_REG_EAR_CTL + 1];
- struct twl4030_codec_data *pdata;
+ struct twl4030_board_params *board_params;
};
static void tw4030_init_ctl_cache(struct twl4030_priv *twl4030)
@@ -193,73 +201,71 @@ static void twl4030_codec_enable(struct snd_soc_component *component, int enable
udelay(10);
}
-static void twl4030_setup_pdata_of(struct twl4030_codec_data *pdata,
- struct device_node *node)
+static void
+twl4030_get_board_param_values(struct twl4030_board_params *board_params,
+ struct device_node *node)
{
int value;
- of_property_read_u32(node, "ti,digimic_delay",
- &pdata->digimic_delay);
- of_property_read_u32(node, "ti,ramp_delay_value",
- &pdata->ramp_delay_value);
- of_property_read_u32(node, "ti,offset_cncl_path",
- &pdata->offset_cncl_path);
+ of_property_read_u32(node, "ti,digimic_delay", &board_params->digimic_delay);
+ of_property_read_u32(node, "ti,ramp_delay_value", &board_params->ramp_delay_value);
+ of_property_read_u32(node, "ti,offset_cncl_path", &board_params->offset_cncl_path);
if (!of_property_read_u32(node, "ti,hs_extmute", &value))
- pdata->hs_extmute = value;
+ board_params->hs_extmute = value;
- pdata->hs_extmute_gpio = of_get_named_gpio(node,
- "ti,hs_extmute_gpio", 0);
- if (gpio_is_valid(pdata->hs_extmute_gpio))
- pdata->hs_extmute = 1;
+ board_params->hs_extmute_gpio = of_get_named_gpio(node, "ti,hs_extmute_gpio", 0);
+ if (gpio_is_valid(board_params->hs_extmute_gpio))
+ board_params->hs_extmute = 1;
}
-static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_component *component)
+static struct twl4030_board_params*
+twl4030_get_board_params(struct snd_soc_component *component)
{
- struct twl4030_codec_data *pdata = dev_get_platdata(component->dev);
+ struct twl4030_board_params *board_params = NULL;
struct device_node *twl4030_codec_node = NULL;
twl4030_codec_node = of_get_child_by_name(component->dev->parent->of_node,
"codec");
- if (!pdata && twl4030_codec_node) {
- pdata = devm_kzalloc(component->dev,
- sizeof(struct twl4030_codec_data),
- GFP_KERNEL);
- if (!pdata) {
+ if (twl4030_codec_node) {
+ board_params = devm_kzalloc(component->dev,
+ sizeof(struct twl4030_board_params),
+ GFP_KERNEL);
+ if (!board_params) {
of_node_put(twl4030_codec_node);
return NULL;
}
- twl4030_setup_pdata_of(pdata, twl4030_codec_node);
+ twl4030_get_board_param_values(board_params, twl4030_codec_node);
of_node_put(twl4030_codec_node);
}
- return pdata;
+ return board_params;
}
static void twl4030_init_chip(struct snd_soc_component *component)
{
- struct twl4030_codec_data *pdata;
+ struct twl4030_board_params *board_params;
struct twl4030_priv *twl4030 = snd_soc_component_get_drvdata(component);
u8 reg, byte;
int i = 0;
- pdata = twl4030_get_pdata(component);
+ board_params = twl4030_get_board_params(component);
- if (pdata && pdata->hs_extmute) {
- if (gpio_is_valid(pdata->hs_extmute_gpio)) {
+ if (board_params && board_params->hs_extmute) {
+ if (gpio_is_valid(board_params->hs_extmute_gpio)) {
int ret;
- if (!pdata->hs_extmute_gpio)
+ if (!board_params->hs_extmute_gpio)
dev_warn(component->dev,
"Extmute GPIO is 0 is this correct?\n");
- ret = gpio_request_one(pdata->hs_extmute_gpio,
+ ret = gpio_request_one(board_params->hs_extmute_gpio,
GPIOF_OUT_INIT_LOW,
"hs_extmute");
if (ret) {
dev_err(component->dev,
"Failed to get hs_extmute GPIO\n");
- pdata->hs_extmute_gpio = -1;
+ board_params->hs_extmute_gpio = -1;
}
} else {
u8 pin_mux;
@@ -290,14 +296,14 @@ static void twl4030_init_chip(struct snd_soc_component *component)
twl4030_write(component, TWL4030_REG_ARXR2_APGA_CTL, 0x32);
/* Machine dependent setup */
- if (!pdata)
+ if (!board_params)
return;
- twl4030->pdata = pdata;
+ twl4030->board_params = board_params;
reg = twl4030_read(component, TWL4030_REG_HS_POPN_SET);
reg &= ~TWL4030_RAMP_DELAY;
- reg |= (pdata->ramp_delay_value << 2);
+ reg |= (board_params->ramp_delay_value << 2);
twl4030_write(component, TWL4030_REG_HS_POPN_SET, reg);
/* initiate offset cancellation */
@@ -305,7 +311,7 @@ static void twl4030_init_chip(struct snd_soc_component *component)
reg = twl4030_read(component, TWL4030_REG_ANAMICL);
reg &= ~TWL4030_OFFSET_CNCL_SEL;
- reg |= pdata->offset_cncl_path;
+ reg |= board_params->offset_cncl_path;
twl4030_write(component, TWL4030_REG_ANAMICL,
reg | TWL4030_CNCL_OFFSET_START);
@@ -692,7 +698,7 @@ static void headset_ramp(struct snd_soc_component *component, int ramp)
{
unsigned char hs_gain, hs_pop;
struct twl4030_priv *twl4030 = snd_soc_component_get_drvdata(component);
- struct twl4030_codec_data *pdata = twl4030->pdata;
+ struct twl4030_board_params *board_params = twl4030->board_params;
/* Base values for ramp delay calculation: 2^19 - 2^26 */
unsigned int ramp_base[] = {524288, 1048576, 2097152, 4194304,
8388608, 16777216, 33554432, 67108864};
@@ -705,9 +711,9 @@ static void headset_ramp(struct snd_soc_component *component, int ramp)
/* Enable external mute control, this dramatically reduces
* the pop-noise */
- if (pdata && pdata->hs_extmute) {
- if (gpio_is_valid(pdata->hs_extmute_gpio)) {
- gpio_set_value(pdata->hs_extmute_gpio, 1);
+ if (board_params && board_params->hs_extmute) {
+ if (gpio_is_valid(board_params->hs_extmute_gpio)) {
+ gpio_set_value(board_params->hs_extmute_gpio, 1);
} else {
hs_pop |= TWL4030_EXTMUTE;
twl4030_write(component, TWL4030_REG_HS_POPN_SET, hs_pop);
@@ -741,9 +747,9 @@ static void headset_ramp(struct snd_soc_component *component, int ramp)
}
/* Disable external mute */
- if (pdata && pdata->hs_extmute) {
- if (gpio_is_valid(pdata->hs_extmute_gpio)) {
- gpio_set_value(pdata->hs_extmute_gpio, 0);
+ if (board_params && board_params->hs_extmute) {
+ if (gpio_is_valid(board_params->hs_extmute_gpio)) {
+ gpio_set_value(board_params->hs_extmute_gpio, 0);
} else {
hs_pop &= ~TWL4030_EXTMUTE;
twl4030_write(component, TWL4030_REG_HS_POPN_SET, hs_pop);
@@ -806,10 +812,10 @@ static int digimic_event(struct snd_soc_dapm_widget *w,
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct twl4030_priv *twl4030 = snd_soc_component_get_drvdata(component);
- struct twl4030_codec_data *pdata = twl4030->pdata;
+ struct twl4030_board_params *board_params = twl4030->board_params;
- if (pdata && pdata->digimic_delay)
- twl4030_wait_ms(pdata->digimic_delay);
+ if (board_params && board_params->digimic_delay)
+ twl4030_wait_ms(board_params->digimic_delay);
return 0;
}
@@ -2168,10 +2174,11 @@ static int twl4030_soc_probe(struct snd_soc_component *component)
static void twl4030_soc_remove(struct snd_soc_component *component)
{
struct twl4030_priv *twl4030 = snd_soc_component_get_drvdata(component);
- struct twl4030_codec_data *pdata = twl4030->pdata;
+ struct twl4030_board_params *board_params = twl4030->board_params;
- if (pdata && pdata->hs_extmute && gpio_is_valid(pdata->hs_extmute_gpio))
- gpio_free(pdata->hs_extmute_gpio);
+ if (board_params && board_params->hs_extmute &&
+ gpio_is_valid(board_params->hs_extmute_gpio))
+ gpio_free(board_params->hs_extmute_gpio);
}
static const struct snd_soc_component_driver soc_component_dev_twl4030 = {
@@ -2188,7 +2195,6 @@ static const struct snd_soc_component_driver soc_component_dev_twl4030 = {
.num_dapm_routes = ARRAY_SIZE(intercon),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int twl4030_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index b37203336c4e..dd5ee5dc0cd7 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -1153,7 +1153,6 @@ static const struct snd_soc_component_driver soc_component_dev_twl6040 = {
.suspend_bias_off = 1,
.idle_bias_on = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int twl6040_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/uda1334.c b/sound/soc/codecs/uda1334.c
index 8670a2a05a56..eace96533600 100644
--- a/sound/soc/codecs/uda1334.c
+++ b/sound/soc/codecs/uda1334.c
@@ -169,7 +169,7 @@ static int uda1334_set_dai_sysclk(struct snd_soc_dai *codec_dai,
static int uda1334_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
fmt &= (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_INV_MASK |
- SND_SOC_DAIFMT_MASTER_MASK);
+ SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK);
if (fmt != (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBC_CFC)) {
@@ -236,7 +236,6 @@ static const struct snd_soc_component_driver soc_component_dev_uda1334 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id uda1334_of_match[] = {
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 037833c509f7..2db3d8a60c7a 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -527,7 +527,6 @@ static const struct snd_soc_component_driver soc_component_dev_uda134x = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config uda134x_regmap_config = {
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index b5004842520b..fdaaee845176 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -736,7 +736,6 @@ static const struct snd_soc_component_driver soc_component_dev_uda1380 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int uda1380_i2c_probe(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index c53c2ef33e1a..98baef594bf3 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -714,12 +714,11 @@ static int wcd_mbhc_initialise(struct wcd_mbhc *mbhc)
struct snd_soc_component *component = mbhc->component;
int ret;
- ret = pm_runtime_get_sync(component->dev);
+ ret = pm_runtime_resume_and_get(component->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(component->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(component->dev);
return ret;
}
@@ -1097,12 +1096,11 @@ static void wcd_correct_swch_plug(struct work_struct *work)
mbhc = container_of(work, struct wcd_mbhc, correct_plug_swch);
component = mbhc->component;
- ret = pm_runtime_get_sync(component->dev);
+ ret = pm_runtime_resume_and_get(component->dev);
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(component->dev,
- "pm_runtime_get_sync failed in %s, ret %d\n",
+ "pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
- pm_runtime_put_noidle(component->dev);
return;
}
micbias_mv = wcd_mbhc_get_micbias(mbhc);
@@ -1306,7 +1304,7 @@ exit:
static irqreturn_t wcd_mbhc_adc_hs_ins_irq(int irq, void *data)
{
struct wcd_mbhc *mbhc = data;
- u8 clamp_state = 0;
+ u8 clamp_state;
u8 clamp_retry = WCD_MBHC_FAKE_INS_RETRY;
/*
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 3cb7a3eab8c7..beeeb35e8032 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -24,6 +24,8 @@
#include "wcd9335.h"
#include "wcd-clsh-v2.h"
+#include <dt-bindings/sound/qcom,wcd9335.h>
+
#define WCD9335_RATES_MASK (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
@@ -204,17 +206,6 @@ enum wcd9335_sido_voltage {
};
enum {
- AIF1_PB = 0,
- AIF1_CAP,
- AIF2_PB,
- AIF2_CAP,
- AIF3_PB,
- AIF3_CAP,
- AIF4_PB,
- NUM_CODEC_DAIS,
-};
-
-enum {
COMPANDER_1, /* HPH_L */
COMPANDER_2, /* HPH_R */
COMPANDER_3, /* LO1_DIFF */
@@ -1818,11 +1809,11 @@ static int wcd9335_set_decimator_rate(struct snd_soc_dai *dai,
tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0;
shift = (tx_port << 1);
shift_val = 0x03;
- } else if ((tx_port >= 4) && (tx_port < 8)) {
+ } else if (tx_port < 8) {
tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG1;
shift = ((tx_port - 4) << 1);
shift_val = 0x03;
- } else if ((tx_port >= 8) && (tx_port < 11)) {
+ } else if (tx_port < 11) {
tx_port_reg = WCD9335_CDC_IF_ROUTER_TX_MUX_CFG2;
shift = ((tx_port - 8) << 1);
shift_val = 0x03;
@@ -2264,51 +2255,42 @@ static int wcd9335_rx_hph_mode_put(struct snd_kcontrol *kc,
static const struct snd_kcontrol_new wcd9335_snd_controls[] = {
/* -84dB min - 40dB max */
- SOC_SINGLE_SX_TLV("RX0 Digital Volume", WCD9335_CDC_RX0_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX1 Digital Volume", WCD9335_CDC_RX1_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX2 Digital Volume", WCD9335_CDC_RX2_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX3 Digital Volume", WCD9335_CDC_RX3_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX4 Digital Volume", WCD9335_CDC_RX4_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX5 Digital Volume", WCD9335_CDC_RX5_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX6 Digital Volume", WCD9335_CDC_RX6_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX7 Digital Volume", WCD9335_CDC_RX7_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX8 Digital Volume", WCD9335_CDC_RX8_RX_VOL_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX0 Mix Digital Volume",
- WCD9335_CDC_RX0_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX1 Mix Digital Volume",
- WCD9335_CDC_RX1_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX2 Mix Digital Volume",
- WCD9335_CDC_RX2_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX3 Mix Digital Volume",
- WCD9335_CDC_RX3_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX4 Mix Digital Volume",
- WCD9335_CDC_RX4_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX5 Mix Digital Volume",
- WCD9335_CDC_RX5_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX6 Mix Digital Volume",
- WCD9335_CDC_RX6_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX7 Mix Digital Volume",
- WCD9335_CDC_RX7_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX8 Mix Digital Volume",
- WCD9335_CDC_RX8_RX_VOL_MIX_CTL,
- 0, -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX0 Digital Volume", WCD9335_CDC_RX0_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX1 Digital Volume", WCD9335_CDC_RX1_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX2 Digital Volume", WCD9335_CDC_RX2_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX3 Digital Volume", WCD9335_CDC_RX3_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX4 Digital Volume", WCD9335_CDC_RX4_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX5 Digital Volume", WCD9335_CDC_RX5_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX6 Digital Volume", WCD9335_CDC_RX6_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX7 Digital Volume", WCD9335_CDC_RX7_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX8 Digital Volume", WCD9335_CDC_RX8_RX_VOL_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX0 Mix Digital Volume", WCD9335_CDC_RX0_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX1 Mix Digital Volume", WCD9335_CDC_RX1_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX2 Mix Digital Volume", WCD9335_CDC_RX2_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX3 Mix Digital Volume", WCD9335_CDC_RX3_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX4 Mix Digital Volume", WCD9335_CDC_RX4_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX5 Mix Digital Volume", WCD9335_CDC_RX5_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX6 Mix Digital Volume", WCD9335_CDC_RX6_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX7 Mix Digital Volume", WCD9335_CDC_RX7_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
+ SOC_SINGLE_S8_TLV("RX8 Mix Digital Volume", WCD9335_CDC_RX8_RX_VOL_MIX_CTL,
+ -84, 40, digital_gain),
SOC_ENUM("RX INT0_1 HPF cut off", cf_int0_1_enum),
SOC_ENUM("RX INT0_2 HPF cut off", cf_int0_2_enum),
SOC_ENUM("RX INT1_1 HPF cut off", cf_int1_1_enum),
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index 02232f64110e..626278e4c923 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -475,7 +475,6 @@ static const struct snd_soc_component_driver soc_component_dev_wl1273 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wl1273_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 1bef1c500c8e..034a4e858c7e 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -789,7 +789,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm0010 = {
.num_dapm_routes = ARRAY_SIZE(wm0010_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
#define WM0010_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c
index b6366dea15a6..98343626078b 100644
--- a/sound/soc/codecs/wm1250-ev1.c
+++ b/sound/soc/codecs/wm1250-ev1.c
@@ -144,7 +144,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm1250_ev1 = {
.set_bias_level = wm1250_ev1_set_bias_level,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm1250_ev1_pdata(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index ede5f2a982a6..14b4fd97488c 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -803,7 +803,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm2000 = {
.num_dapm_routes = ARRAY_SIZE(wm2000_audio_map),
.idle_bias_on = 1,
.use_pmdown_time = 1,
- .non_legacy_dai_naming = 1,
};
static int wm2000_i2c_probe(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index 1cd544580c83..7b4e162a298c 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -2104,7 +2104,6 @@ static const struct snd_soc_component_driver soc_component_wm2200 = {
.dapm_routes = wm2200_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm2200_dapm_routes),
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static irqreturn_t wm2200_irq(int irq, void *data)
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index a89870918174..35a85ce6b464 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -2389,7 +2389,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm5100 = {
.num_dapm_routes = ARRAY_SIZE(wm5100_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm5100_regmap = {
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index b034df47a5ef..af7d324e3352 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -2028,7 +2028,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm5102 = {
.num_dapm_routes = ARRAY_SIZE(wm5102_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm5102_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 4ab7a672f8de..f3f4a10bf0f7 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2385,7 +2385,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm5110 = {
.num_dapm_routes = ARRAY_SIZE(wm5110_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm5110_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 41504ce2a682..66bd281095e1 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1613,7 +1613,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8350 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm8350_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index bf5e77c86aed..19ce839f6ef7 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1322,7 +1322,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8400 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm8400_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index c6439d25006b..e13f9780a111 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -592,7 +592,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8510 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8510_of_match[] = {
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index ba35a0221dc8..66f6371d8acf 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -422,7 +422,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8523 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8523_of_match[] = {
diff --git a/sound/soc/codecs/wm8524.c b/sound/soc/codecs/wm8524.c
index 81f858f6bd67..b56dcac60244 100644
--- a/sound/soc/codecs/wm8524.c
+++ b/sound/soc/codecs/wm8524.c
@@ -203,7 +203,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8524 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8524_of_match[] = {
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 84020195314d..ca796aa0aeb7 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -966,7 +966,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8580 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8580_regmap = {
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index b68a1ebcd061..383c6796e8a3 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -378,7 +378,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8711 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8711_of_match[] = {
diff --git a/sound/soc/codecs/wm8727.c b/sound/soc/codecs/wm8727.c
index 1a118b75b539..d6b0a570dd87 100644
--- a/sound/soc/codecs/wm8727.c
+++ b/sound/soc/codecs/wm8727.c
@@ -55,7 +55,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8727 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm8727_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index 119ff0a1bb35..a3dbdbf40723 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -221,7 +221,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8728 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8728_of_match[] = {
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 2408c4a591d5..d5ab3ba126a6 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -561,7 +561,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8731 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
int wm8731_init(struct device *dev, struct wm8731_priv *wm8731)
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
index 5778091d1c09..90b54343370c 100644
--- a/sound/soc/codecs/wm8737.c
+++ b/sound/soc/codecs/wm8737.c
@@ -583,7 +583,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8737 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8737_of_match[] = {
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index 871e2c5421b8..c7afa4f2795d 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -528,7 +528,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8741 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8741_of_match[] = {
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 1426fc1f7c5a..2f6ee8d6639f 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -719,7 +719,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8750 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8750_of_match[] = {
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 931134d334ec..bb18f58dc670 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1492,7 +1492,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8753 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8753_of_match[] = {
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
index 5f394065030d..e03fee8869c3 100644
--- a/sound/soc/codecs/wm8770.c
+++ b/sound/soc/codecs/wm8770.c
@@ -617,7 +617,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8770 = {
.num_dapm_routes = ARRAY_SIZE(wm8770_intercon),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8770_of_match[] = {
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index f164cb6744c4..936ea24621b0 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -436,7 +436,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8776 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct of_device_id wm8776_of_match[] = {
diff --git a/sound/soc/codecs/wm8782.c b/sound/soc/codecs/wm8782.c
index f89855c616eb..95ff4339d103 100644
--- a/sound/soc/codecs/wm8782.c
+++ b/sound/soc/codecs/wm8782.c
@@ -99,7 +99,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8782 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm8782_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 21bf0cfa1e7e..0b234bae480e 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -546,7 +546,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8804 = {
.num_dapm_routes = ARRAY_SIZE(wm8804_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
const struct regmap_config wm8804_regmap_config = {
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 84a3daf0c11e..d6420df3505d 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -1214,7 +1214,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8900 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8900_regmap = {
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 3c95c2aea515..54e0a7628cd5 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1893,7 +1893,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8903 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8903_regmap = {
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 04bb8e392497..ca6a01a230af 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -2131,7 +2131,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8904 = {
.set_bias_level = wm8904_set_bias_level,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8904_regmap = {
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 589394d420ce..8dac9fd88547 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -734,7 +734,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8940 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8940_regmap = {
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 80e3cbd704ee..05ef45672ebc 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -952,7 +952,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8955 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8955_regmap = {
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 8c8f32b23083..37956516d997 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -1378,7 +1378,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8960 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8960_regmap = {
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index 69eb731dbf4b..7dc6aaf65576 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -895,7 +895,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8961 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8961_regmap = {
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 5cca89364280..398c448ea854 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3502,7 +3502,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8962 = {
.set_pll = wm8962_set_fll,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
/* Improve power consumption for IN4 DC measurement mode */
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index 8a289b048e66..4db9248de54b 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -659,7 +659,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8971 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8971_regmap = {
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index a8d7809f3f64..010a394c705c 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -682,7 +682,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8974 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm8974_i2c_probe(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index 141f50bfec68..a682f8020eb6 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -1005,7 +1005,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8978 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8978_regmap_config = {
diff --git a/sound/soc/codecs/wm8983.c b/sound/soc/codecs/wm8983.c
index ae89554d47bc..50e6ac6ccbe0 100644
--- a/sound/soc/codecs/wm8983.c
+++ b/sound/soc/codecs/wm8983.c
@@ -987,7 +987,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8983 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8983_regmap = {
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index cf2c32eac773..751aa6730833 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -1116,7 +1116,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8985 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8985_regmap = {
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index 27538d6598cf..5dbdf647cd97 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -823,7 +823,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8988 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8988_regmap = {
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index c9448a59c872..589af286f133 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1217,7 +1217,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8990 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm8990_i2c_probe(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index 998bc89bb7e1..30121993b7b4 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -1243,7 +1243,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8991 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8991_regmap = {
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index f4da77ec9d6c..8db98b5a06bf 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -1621,7 +1621,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8993 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm8993_i2c_probe(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index f117ec0c489f..d3cfd3788f2a 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -4614,7 +4614,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8994 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm8994_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index ea9727446707..eed48bf339f2 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -2182,7 +2182,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8995 = {
.num_dapm_routes = ARRAY_SIZE(wm8995_intercon),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm8995_regmap = {
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index f7bb27d1c76d..17f307a31046 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -2695,8 +2695,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8996 = {
.set_pll = wm8996_set_fll,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
-
};
#define WM8996_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 38ef631d1a1f..210ad662fc26 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1105,7 +1105,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8997 = {
.num_dapm_routes = ARRAY_SIZE(wm8997_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm8997_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index ab5481187c71..79fc6bbaa3aa 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1332,7 +1332,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8998 = {
.num_dapm_routes = ARRAY_SIZE(wm8998_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm8998_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 87b58448cea7..d5151877d0fa 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -1284,7 +1284,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm9081 = {
.num_dapm_routes = ARRAY_SIZE(wm9081_audio_paths),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm9081_regmap = {
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index f7d80f1e37a8..ef3524c3f07f 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -543,7 +543,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm9090 = {
.suspend_bias_off = 1,
.idle_bias_on = 1,
.use_pmdown_time = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config wm9090_regmap = {
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index 99fe8f316624..d04902ef1d5f 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -368,7 +368,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm9705 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm9705_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 7515c9d4006e..df9b7980706b 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -692,7 +692,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm9712 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm9712_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index e0ce32dd4a81..5d2e54e06e30 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1257,7 +1257,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm9713 = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int wm9713_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index a7784ac15dde..cfaa45ede916 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -675,21 +675,12 @@ static void wm_adsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl)
int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
unsigned int alg, void *buf, size_t len)
{
- struct cs_dsp_coeff_ctl *cs_ctl;
+ struct cs_dsp_coeff_ctl *cs_ctl = cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg);
struct wm_coeff_ctl *ctl;
struct snd_kcontrol *kcontrol;
char ctl_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
int ret;
- cs_ctl = cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg);
- if (!cs_ctl)
- return -EINVAL;
-
- ctl = cs_ctl->priv;
-
- if (len > cs_ctl->len)
- return -EINVAL;
-
ret = cs_dsp_coeff_write_ctrl(cs_ctl, 0, buf, len);
if (ret)
return ret;
@@ -697,6 +688,8 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
if (cs_ctl->flags & WMFW_CTL_FLAG_SYS)
return 0;
+ ctl = cs_ctl->priv;
+
if (dsp->component->name_prefix)
snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s",
dsp->component->name_prefix, ctl->name);
@@ -720,16 +713,8 @@ EXPORT_SYMBOL_GPL(wm_adsp_write_ctl);
int wm_adsp_read_ctl(struct wm_adsp *dsp, const char *name, int type,
unsigned int alg, void *buf, size_t len)
{
- struct cs_dsp_coeff_ctl *cs_ctl;
-
- cs_ctl = cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg);
- if (!cs_ctl)
- return -EINVAL;
-
- if (len > cs_ctl->len)
- return -EINVAL;
-
- return cs_dsp_coeff_read_ctrl(cs_ctl, 0, buf, len);
+ return cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg),
+ 0, buf, len);
}
EXPORT_SYMBOL_GPL(wm_adsp_read_ctl);
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index f3a56f3ce487..6c8b1db649b8 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -749,11 +749,9 @@ static int wsa881x_put_pa_gain(struct snd_kcontrol *kc,
unsigned int mask = (1 << fls(max)) - 1;
int val, ret, min_gain, max_gain;
- ret = pm_runtime_get_sync(comp->dev);
- if (ret < 0 && ret != -EACCES) {
- pm_runtime_put_noidle(comp->dev);
+ ret = pm_runtime_resume_and_get(comp->dev);
+ if (ret < 0 && ret != -EACCES)
return ret;
- }
max_gain = (max - ucontrol->value.integer.value[0]) & mask;
/*
@@ -1175,11 +1173,17 @@ static int __maybe_unused wsa881x_runtime_resume(struct device *dev)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
struct regmap *regmap = dev_get_regmap(dev, NULL);
struct wsa881x_priv *wsa881x = dev_get_drvdata(dev);
+ unsigned long time;
gpiod_direction_output(wsa881x->sd_n, 1);
- wait_for_completion_timeout(&slave->initialization_complete,
- msecs_to_jiffies(WSA881X_PROBE_TIMEOUT));
+ time = wait_for_completion_timeout(&slave->initialization_complete,
+ msecs_to_jiffies(WSA881X_PROBE_TIMEOUT));
+ if (!time) {
+ dev_err(dev, "Initialization not complete, timed out\n");
+ gpiod_direction_output(wsa881x->sd_n, 0);
+ return -ETIMEDOUT;
+ }
regcache_cache_only(regmap, false);
regcache_sync(regmap);
diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
new file mode 100644
index 000000000000..63e1d7aa6137
--- /dev/null
+++ b/sound/soc/codecs/wsa883x.c
@@ -0,0 +1,1511 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/printk.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <linux/soundwire/sdw_type.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc-dapm.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#define WSA883X_BASE 0x3000
+#define WSA883X_ANA_BG_TSADC_BASE (WSA883X_BASE + 0x00000001)
+#define WSA883X_REF_CTRL (WSA883X_ANA_BG_TSADC_BASE + 0x0000)
+#define WSA883X_TEST_CTL_0 (WSA883X_ANA_BG_TSADC_BASE + 0x0001)
+#define WSA883X_BIAS_0 (WSA883X_ANA_BG_TSADC_BASE + 0x0002)
+#define WSA883X_OP_CTL (WSA883X_ANA_BG_TSADC_BASE + 0x0003)
+#define WSA883X_IREF_CTL (WSA883X_ANA_BG_TSADC_BASE + 0x0004)
+#define WSA883X_ISENS_CTL (WSA883X_ANA_BG_TSADC_BASE + 0x0005)
+#define WSA883X_CLK_CTL (WSA883X_ANA_BG_TSADC_BASE + 0x0006)
+#define WSA883X_TEST_CTL_1 (WSA883X_ANA_BG_TSADC_BASE + 0x0007)
+#define WSA883X_BIAS_1 (WSA883X_ANA_BG_TSADC_BASE + 0x0008)
+#define WSA883X_ADC_CTL (WSA883X_ANA_BG_TSADC_BASE + 0x0009)
+#define WSA883X_DOUT_MSB (WSA883X_ANA_BG_TSADC_BASE + 0x000A)
+#define WSA883X_DOUT_LSB (WSA883X_ANA_BG_TSADC_BASE + 0x000B)
+#define WSA883X_VBAT_SNS (WSA883X_ANA_BG_TSADC_BASE + 0x000C)
+#define WSA883X_ITRIM_CODE (WSA883X_ANA_BG_TSADC_BASE + 0x000D)
+
+#define WSA883X_ANA_IVSENSE_BASE (WSA883X_BASE + 0x0000000F)
+#define WSA883X_EN (WSA883X_ANA_IVSENSE_BASE + 0x0000)
+#define WSA883X_OVERRIDE1 (WSA883X_ANA_IVSENSE_BASE + 0x0001)
+#define WSA883X_OVERRIDE2 (WSA883X_ANA_IVSENSE_BASE + 0x0002)
+#define WSA883X_VSENSE1 (WSA883X_ANA_IVSENSE_BASE + 0x0003)
+#define WSA883X_ISENSE1 (WSA883X_ANA_IVSENSE_BASE + 0x0004)
+#define WSA883X_ISENSE2 (WSA883X_ANA_IVSENSE_BASE + 0x0005)
+#define WSA883X_ISENSE_CAL (WSA883X_ANA_IVSENSE_BASE + 0x0006)
+#define WSA883X_MISC (WSA883X_ANA_IVSENSE_BASE + 0x0007)
+#define WSA883X_ADC_0 (WSA883X_ANA_IVSENSE_BASE + 0x0008)
+#define WSA883X_ADC_1 (WSA883X_ANA_IVSENSE_BASE + 0x0009)
+#define WSA883X_ADC_2 (WSA883X_ANA_IVSENSE_BASE + 0x000A)
+#define WSA883X_ADC_3 (WSA883X_ANA_IVSENSE_BASE + 0x000B)
+#define WSA883X_ADC_4 (WSA883X_ANA_IVSENSE_BASE + 0x000C)
+#define WSA883X_ADC_5 (WSA883X_ANA_IVSENSE_BASE + 0x000D)
+#define WSA883X_ADC_6 (WSA883X_ANA_IVSENSE_BASE + 0x000E)
+#define WSA883X_ADC_7 (WSA883X_ANA_IVSENSE_BASE + 0x000F)
+#define WSA883X_STATUS (WSA883X_ANA_IVSENSE_BASE + 0x0010)
+
+#define WSA883X_ANA_SPK_TOP_BASE (WSA883X_BASE + 0x00000025)
+#define WSA883X_DAC_CTRL_REG (WSA883X_ANA_SPK_TOP_BASE + 0x0000)
+#define WSA883X_DAC_EN_DEBUG_REG (WSA883X_ANA_SPK_TOP_BASE + 0x0001)
+#define WSA883X_DAC_OPAMP_BIAS1_REG (WSA883X_ANA_SPK_TOP_BASE + 0x0002)
+#define WSA883X_DAC_OPAMP_BIAS2_REG (WSA883X_ANA_SPK_TOP_BASE + 0x0003)
+#define WSA883X_DAC_VCM_CTRL_REG (WSA883X_ANA_SPK_TOP_BASE + 0x0004)
+#define WSA883X_DAC_VOLTAGE_CTRL_REG (WSA883X_ANA_SPK_TOP_BASE + 0x0005)
+#define WSA883X_ATEST1_REG (WSA883X_ANA_SPK_TOP_BASE + 0x0006)
+#define WSA883X_ATEST2_REG (WSA883X_ANA_SPK_TOP_BASE + 0x0007)
+#define WSA883X_SPKR_TOP_BIAS_REG1 (WSA883X_ANA_SPK_TOP_BASE + 0x0008)
+#define WSA883X_SPKR_TOP_BIAS_REG2 (WSA883X_ANA_SPK_TOP_BASE + 0x0009)
+#define WSA883X_SPKR_TOP_BIAS_REG3 (WSA883X_ANA_SPK_TOP_BASE + 0x000A)
+#define WSA883X_SPKR_TOP_BIAS_REG4 (WSA883X_ANA_SPK_TOP_BASE + 0x000B)
+#define WSA883X_SPKR_CLIP_DET_REG (WSA883X_ANA_SPK_TOP_BASE + 0x000C)
+#define WSA883X_SPKR_DRV_LF_BLK_EN (WSA883X_ANA_SPK_TOP_BASE + 0x000D)
+#define WSA883X_SPKR_DRV_LF_EN (WSA883X_ANA_SPK_TOP_BASE + 0x000E)
+#define WSA883X_SPKR_DRV_LF_MASK_DCC_CTL (WSA883X_ANA_SPK_TOP_BASE + 0x000F)
+#define WSA883X_SPKR_DRV_LF_MISC_CTL (WSA883X_ANA_SPK_TOP_BASE + 0x0010)
+#define WSA883X_SPKR_DRV_LF_REG_GAIN (WSA883X_ANA_SPK_TOP_BASE + 0x0011)
+#define WSA883X_SPKR_DRV_OS_CAL_CTL (WSA883X_ANA_SPK_TOP_BASE + 0x0012)
+#define WSA883X_SPKR_DRV_OS_CAL_CTL1 (WSA883X_ANA_SPK_TOP_BASE + 0x0013)
+#define WSA883X_SPKR_PWM_CLK_CTL (WSA883X_ANA_SPK_TOP_BASE + 0x0014)
+#define WSA883X_SPKR_PWM_FREQ_SEL_MASK BIT(3)
+#define WSA883X_SPKR_PWM_FREQ_F300KHZ 0
+#define WSA883X_SPKR_PWM_FREQ_F600KHZ 1
+#define WSA883X_SPKR_PDRV_HS_CTL (WSA883X_ANA_SPK_TOP_BASE + 0x0015)
+#define WSA883X_SPKR_PDRV_LS_CTL (WSA883X_ANA_SPK_TOP_BASE + 0x0016)
+#define WSA883X_SPKR_PWRSTG_DBG (WSA883X_ANA_SPK_TOP_BASE + 0x0017)
+#define WSA883X_SPKR_OCP_CTL (WSA883X_ANA_SPK_TOP_BASE + 0x0018)
+#define WSA883X_SPKR_BBM_CTL (WSA883X_ANA_SPK_TOP_BASE + 0x0019)
+#define WSA883X_PA_STATUS0 (WSA883X_ANA_SPK_TOP_BASE + 0x001A)
+#define WSA883X_PA_STATUS1 (WSA883X_ANA_SPK_TOP_BASE + 0x001B)
+#define WSA883X_PA_STATUS2 (WSA883X_ANA_SPK_TOP_BASE + 0x001C)
+
+#define WSA883X_ANA_BOOST_BASE (WSA883X_BASE + 0x00000043)
+#define WSA883X_EN_CTRL (WSA883X_ANA_BOOST_BASE + 0x0000)
+#define WSA883X_CURRENT_LIMIT (WSA883X_ANA_BOOST_BASE + 0x0001)
+#define WSA883X_IBIAS1 (WSA883X_ANA_BOOST_BASE + 0x0002)
+#define WSA883X_IBIAS2 (WSA883X_ANA_BOOST_BASE + 0x0003)
+#define WSA883X_IBIAS3 (WSA883X_ANA_BOOST_BASE + 0x0004)
+#define WSA883X_LDO_PROG (WSA883X_ANA_BOOST_BASE + 0x0005)
+#define WSA883X_STABILITY_CTRL1 (WSA883X_ANA_BOOST_BASE + 0x0006)
+#define WSA883X_STABILITY_CTRL2 (WSA883X_ANA_BOOST_BASE + 0x0007)
+#define WSA883X_PWRSTAGE_CTRL1 (WSA883X_ANA_BOOST_BASE + 0x0008)
+#define WSA883X_PWRSTAGE_CTRL2 (WSA883X_ANA_BOOST_BASE + 0x0009)
+#define WSA883X_BYPASS_1 (WSA883X_ANA_BOOST_BASE + 0x000A)
+#define WSA883X_BYPASS_2 (WSA883X_ANA_BOOST_BASE + 0x000B)
+#define WSA883X_ZX_CTRL_1 (WSA883X_ANA_BOOST_BASE + 0x000C)
+#define WSA883X_ZX_CTRL_2 (WSA883X_ANA_BOOST_BASE + 0x000D)
+#define WSA883X_MISC1 (WSA883X_ANA_BOOST_BASE + 0x000E)
+#define WSA883X_MISC2 (WSA883X_ANA_BOOST_BASE + 0x000F)
+#define WSA883X_GMAMP_SUP1 (WSA883X_ANA_BOOST_BASE + 0x0010)
+#define WSA883X_PWRSTAGE_CTRL3 (WSA883X_ANA_BOOST_BASE + 0x0011)
+#define WSA883X_PWRSTAGE_CTRL4 (WSA883X_ANA_BOOST_BASE + 0x0012)
+#define WSA883X_TEST1 (WSA883X_ANA_BOOST_BASE + 0x0013)
+#define WSA883X_SPARE1 (WSA883X_ANA_BOOST_BASE + 0x0014)
+#define WSA883X_SPARE2 (WSA883X_ANA_BOOST_BASE + 0x0015)
+
+#define WSA883X_ANA_PON_LDOL_BASE (WSA883X_BASE + 0x00000059)
+#define WSA883X_PON_CTL_0 (WSA883X_ANA_PON_LDOL_BASE + 0x0000)
+#define WSA883X_PON_CLT_1 (WSA883X_ANA_PON_LDOL_BASE + 0x0001)
+#define WSA883X_PON_CTL_2 (WSA883X_ANA_PON_LDOL_BASE + 0x0002)
+#define WSA883X_PON_CTL_3 (WSA883X_ANA_PON_LDOL_BASE + 0x0003)
+#define WSA883X_CKWD_CTL_0 (WSA883X_ANA_PON_LDOL_BASE + 0x0004)
+#define WSA883X_CKWD_CTL_1 (WSA883X_ANA_PON_LDOL_BASE + 0x0005)
+#define WSA883X_CKWD_CTL_2 (WSA883X_ANA_PON_LDOL_BASE + 0x0006)
+#define WSA883X_CKSK_CTL_0 (WSA883X_ANA_PON_LDOL_BASE + 0x0007)
+#define WSA883X_PADSW_CTL_0 (WSA883X_ANA_PON_LDOL_BASE + 0x0008)
+#define WSA883X_TEST_0 (WSA883X_ANA_PON_LDOL_BASE + 0x0009)
+#define WSA883X_TEST_1 (WSA883X_ANA_PON_LDOL_BASE + 0x000A)
+#define WSA883X_STATUS_0 (WSA883X_ANA_PON_LDOL_BASE + 0x000B)
+#define WSA883X_STATUS_1 (WSA883X_ANA_PON_LDOL_BASE + 0x000C)
+
+#define WSA883X_DIG_CTRL_BASE (WSA883X_BASE + 0x00000400)
+#define WSA883X_CHIP_ID0 (WSA883X_DIG_CTRL_BASE + 0x0001)
+#define WSA883X_CHIP_ID1 (WSA883X_DIG_CTRL_BASE + 0x0002)
+#define WSA883X_CHIP_ID2 (WSA883X_DIG_CTRL_BASE + 0x0003)
+#define WSA883X_CHIP_ID3 (WSA883X_DIG_CTRL_BASE + 0x0004)
+#define WSA883X_BUS_ID (WSA883X_DIG_CTRL_BASE + 0x0005)
+#define WSA883X_CDC_RST_CTL (WSA883X_DIG_CTRL_BASE + 0x0006)
+#define WSA883X_TOP_CLK_CFG (WSA883X_DIG_CTRL_BASE + 0x0007)
+#define WSA883X_CDC_PATH_MODE (WSA883X_DIG_CTRL_BASE + 0x0008)
+#define WSA883X_RXD_MODE_MASK BIT(1)
+#define WSA883X_RXD_MODE_NORMAL 0
+#define WSA883X_RXD_MODE_HIFI 1
+#define WSA883X_CDC_CLK_CTL (WSA883X_DIG_CTRL_BASE + 0x0009)
+#define WSA883X_SWR_RESET_EN (WSA883X_DIG_CTRL_BASE + 0x000A)
+#define WSA883X_RESET_CTL (WSA883X_DIG_CTRL_BASE + 0x000B)
+#define WSA883X_PA_FSM_CTL (WSA883X_DIG_CTRL_BASE + 0x0010)
+#define WSA883X_GLOBAL_PA_EN_MASK BIT(0)
+#define WSA883X_GLOBAL_PA_ENABLE 1
+#define WSA883X_PA_FSM_TIMER0 (WSA883X_DIG_CTRL_BASE + 0x0011)
+#define WSA883X_PA_FSM_TIMER1 (WSA883X_DIG_CTRL_BASE + 0x0012)
+#define WSA883X_PA_FSM_STA (WSA883X_DIG_CTRL_BASE + 0x0013)
+#define WSA883X_PA_FSM_ERR_COND (WSA883X_DIG_CTRL_BASE + 0x0014)
+#define WSA883X_PA_FSM_MSK (WSA883X_DIG_CTRL_BASE + 0x0015)
+#define WSA883X_PA_FSM_BYP (WSA883X_DIG_CTRL_BASE + 0x0016)
+#define WSA883X_PA_FSM_DBG (WSA883X_DIG_CTRL_BASE + 0x0017)
+#define WSA883X_TADC_VALUE_CTL (WSA883X_DIG_CTRL_BASE + 0x0020)
+#define WSA883X_TEMP_DETECT_CTL (WSA883X_DIG_CTRL_BASE + 0x0021)
+#define WSA883X_TEMP_MSB (WSA883X_DIG_CTRL_BASE + 0x0022)
+#define WSA883X_TEMP_LSB (WSA883X_DIG_CTRL_BASE + 0x0023)
+#define WSA883X_TEMP_CONFIG0 (WSA883X_DIG_CTRL_BASE + 0x0024)
+#define WSA883X_TEMP_CONFIG1 (WSA883X_DIG_CTRL_BASE + 0x0025)
+#define WSA883X_VBAT_ADC_FLT_CTL (WSA883X_DIG_CTRL_BASE + 0x0026)
+#define WSA883X_VBAT_ADC_FLT_EN_MASK BIT(0)
+#define WSA883X_VBAT_ADC_COEF_SEL_MASK GENMASK(3, 1)
+#define WSA883X_VBAT_ADC_COEF_F_1DIV2 0x0
+#define WSA883X_VBAT_ADC_COEF_F_1DIV16 0x3
+#define WSA883X_VBAT_DIN_MSB (WSA883X_DIG_CTRL_BASE + 0x0027)
+#define WSA883X_VBAT_DIN_LSB (WSA883X_DIG_CTRL_BASE + 0x0028)
+#define WSA883X_VBAT_DOUT (WSA883X_DIG_CTRL_BASE + 0x0029)
+#define WSA883X_SDM_PDM9_LSB (WSA883X_DIG_CTRL_BASE + 0x002A)
+#define WSA883X_SDM_PDM9_MSB (WSA883X_DIG_CTRL_BASE + 0x002B)
+#define WSA883X_CDC_RX_CTL (WSA883X_DIG_CTRL_BASE + 0x0030)
+#define WSA883X_CDC_SPK_DSM_A1_0 (WSA883X_DIG_CTRL_BASE + 0x0031)
+#define WSA883X_CDC_SPK_DSM_A1_1 (WSA883X_DIG_CTRL_BASE + 0x0032)
+#define WSA883X_CDC_SPK_DSM_A2_0 (WSA883X_DIG_CTRL_BASE + 0x0033)
+#define WSA883X_CDC_SPK_DSM_A2_1 (WSA883X_DIG_CTRL_BASE + 0x0034)
+#define WSA883X_CDC_SPK_DSM_A3_0 (WSA883X_DIG_CTRL_BASE + 0x0035)
+#define WSA883X_CDC_SPK_DSM_A3_1 (WSA883X_DIG_CTRL_BASE + 0x0036)
+#define WSA883X_CDC_SPK_DSM_A4_0 (WSA883X_DIG_CTRL_BASE + 0x0037)
+#define WSA883X_CDC_SPK_DSM_A4_1 (WSA883X_DIG_CTRL_BASE + 0x0038)
+#define WSA883X_CDC_SPK_DSM_A5_0 (WSA883X_DIG_CTRL_BASE + 0x0039)
+#define WSA883X_CDC_SPK_DSM_A5_1 (WSA883X_DIG_CTRL_BASE + 0x003A)
+#define WSA883X_CDC_SPK_DSM_A6_0 (WSA883X_DIG_CTRL_BASE + 0x003B)
+#define WSA883X_CDC_SPK_DSM_A7_0 (WSA883X_DIG_CTRL_BASE + 0x003C)
+#define WSA883X_CDC_SPK_DSM_C_0 (WSA883X_DIG_CTRL_BASE + 0x003D)
+#define WSA883X_CDC_SPK_DSM_C_1 (WSA883X_DIG_CTRL_BASE + 0x003E)
+#define WSA883X_CDC_SPK_DSM_C_2 (WSA883X_DIG_CTRL_BASE + 0x003F)
+#define WSA883X_CDC_SPK_DSM_C_3 (WSA883X_DIG_CTRL_BASE + 0x0040)
+#define WSA883X_CDC_SPK_DSM_R1 (WSA883X_DIG_CTRL_BASE + 0x0041)
+#define WSA883X_CDC_SPK_DSM_R2 (WSA883X_DIG_CTRL_BASE + 0x0042)
+#define WSA883X_CDC_SPK_DSM_R3 (WSA883X_DIG_CTRL_BASE + 0x0043)
+#define WSA883X_CDC_SPK_DSM_R4 (WSA883X_DIG_CTRL_BASE + 0x0044)
+#define WSA883X_CDC_SPK_DSM_R5 (WSA883X_DIG_CTRL_BASE + 0x0045)
+#define WSA883X_CDC_SPK_DSM_R6 (WSA883X_DIG_CTRL_BASE + 0x0046)
+#define WSA883X_CDC_SPK_DSM_R7 (WSA883X_DIG_CTRL_BASE + 0x0047)
+#define WSA883X_CDC_SPK_GAIN_PDM_0 (WSA883X_DIG_CTRL_BASE + 0x0048)
+#define WSA883X_CDC_SPK_GAIN_PDM_1 (WSA883X_DIG_CTRL_BASE + 0x0049)
+#define WSA883X_CDC_SPK_GAIN_PDM_2 (WSA883X_DIG_CTRL_BASE + 0x004A)
+#define WSA883X_PDM_WD_CTL (WSA883X_DIG_CTRL_BASE + 0x004B)
+#define WSA883X_PDM_EN_MASK BIT(0)
+#define WSA883X_PDM_ENABLE BIT(0)
+#define WSA883X_DEM_BYPASS_DATA0 (WSA883X_DIG_CTRL_BASE + 0x004C)
+#define WSA883X_DEM_BYPASS_DATA1 (WSA883X_DIG_CTRL_BASE + 0x004D)
+#define WSA883X_DEM_BYPASS_DATA2 (WSA883X_DIG_CTRL_BASE + 0x004E)
+#define WSA883X_DEM_BYPASS_DATA3 (WSA883X_DIG_CTRL_BASE + 0x004F)
+#define WSA883X_WAVG_CTL (WSA883X_DIG_CTRL_BASE + 0x0050)
+#define WSA883X_WAVG_LRA_PER_0 (WSA883X_DIG_CTRL_BASE + 0x0051)
+#define WSA883X_WAVG_LRA_PER_1 (WSA883X_DIG_CTRL_BASE + 0x0052)
+#define WSA883X_WAVG_DELTA_THETA_0 (WSA883X_DIG_CTRL_BASE + 0x0053)
+#define WSA883X_WAVG_DELTA_THETA_1 (WSA883X_DIG_CTRL_BASE + 0x0054)
+#define WSA883X_WAVG_DIRECT_AMP_0 (WSA883X_DIG_CTRL_BASE + 0x0055)
+#define WSA883X_WAVG_DIRECT_AMP_1 (WSA883X_DIG_CTRL_BASE + 0x0056)
+#define WSA883X_WAVG_PTRN_AMP0_0 (WSA883X_DIG_CTRL_BASE + 0x0057)
+#define WSA883X_WAVG_PTRN_AMP0_1 (WSA883X_DIG_CTRL_BASE + 0x0058)
+#define WSA883X_WAVG_PTRN_AMP1_0 (WSA883X_DIG_CTRL_BASE + 0x0059)
+#define WSA883X_WAVG_PTRN_AMP1_1 (WSA883X_DIG_CTRL_BASE + 0x005A)
+#define WSA883X_WAVG_PTRN_AMP2_0 (WSA883X_DIG_CTRL_BASE + 0x005B)
+#define WSA883X_WAVG_PTRN_AMP2_1 (WSA883X_DIG_CTRL_BASE + 0x005C)
+#define WSA883X_WAVG_PTRN_AMP3_0 (WSA883X_DIG_CTRL_BASE + 0x005D)
+#define WSA883X_WAVG_PTRN_AMP3_1 (WSA883X_DIG_CTRL_BASE + 0x005E)
+#define WSA883X_WAVG_PTRN_AMP4_0 (WSA883X_DIG_CTRL_BASE + 0x005F)
+#define WSA883X_WAVG_PTRN_AMP4_1 (WSA883X_DIG_CTRL_BASE + 0x0060)
+#define WSA883X_WAVG_PTRN_AMP5_0 (WSA883X_DIG_CTRL_BASE + 0x0061)
+#define WSA883X_WAVG_PTRN_AMP5_1 (WSA883X_DIG_CTRL_BASE + 0x0062)
+#define WSA883X_WAVG_PTRN_AMP6_0 (WSA883X_DIG_CTRL_BASE + 0x0063)
+#define WSA883X_WAVG_PTRN_AMP6_1 (WSA883X_DIG_CTRL_BASE + 0x0064)
+#define WSA883X_WAVG_PTRN_AMP7_0 (WSA883X_DIG_CTRL_BASE + 0x0065)
+#define WSA883X_WAVG_PTRN_AMP7_1 (WSA883X_DIG_CTRL_BASE + 0x0066)
+#define WSA883X_WAVG_PER_0_1 (WSA883X_DIG_CTRL_BASE + 0x0067)
+#define WSA883X_WAVG_PER_2_3 (WSA883X_DIG_CTRL_BASE + 0x0068)
+#define WSA883X_WAVG_PER_4_5 (WSA883X_DIG_CTRL_BASE + 0x0069)
+#define WSA883X_WAVG_PER_6_7 (WSA883X_DIG_CTRL_BASE + 0x006A)
+#define WSA883X_WAVG_STA (WSA883X_DIG_CTRL_BASE + 0x006B)
+#define WSA883X_DRE_CTL_0 (WSA883X_DIG_CTRL_BASE + 0x006C)
+#define WSA883X_DRE_OFFSET_MASK GENMASK(2, 0)
+#define WSA883X_DRE_PROG_DELAY_MASK GENMASK(7, 4)
+#define WSA883X_DRE_CTL_1 (WSA883X_DIG_CTRL_BASE + 0x006D)
+#define WSA883X_DRE_GAIN_EN_MASK BIT(0)
+#define WSA883X_DRE_GAIN_FROM_CSR 1
+#define WSA883X_DRE_IDLE_DET_CTL (WSA883X_DIG_CTRL_BASE + 0x006E)
+#define WSA883X_CLSH_CTL_0 (WSA883X_DIG_CTRL_BASE + 0x0070)
+#define WSA883X_CLSH_CTL_1 (WSA883X_DIG_CTRL_BASE + 0x0071)
+#define WSA883X_CLSH_V_HD_PA (WSA883X_DIG_CTRL_BASE + 0x0072)
+#define WSA883X_CLSH_V_PA_MIN (WSA883X_DIG_CTRL_BASE + 0x0073)
+#define WSA883X_CLSH_OVRD_VAL (WSA883X_DIG_CTRL_BASE + 0x0074)
+#define WSA883X_CLSH_HARD_MAX (WSA883X_DIG_CTRL_BASE + 0x0075)
+#define WSA883X_CLSH_SOFT_MAX (WSA883X_DIG_CTRL_BASE + 0x0076)
+#define WSA883X_CLSH_SIG_DP (WSA883X_DIG_CTRL_BASE + 0x0077)
+#define WSA883X_TAGC_CTL (WSA883X_DIG_CTRL_BASE + 0x0078)
+#define WSA883X_TAGC_TIME (WSA883X_DIG_CTRL_BASE + 0x0079)
+#define WSA883X_TAGC_E2E_GAIN (WSA883X_DIG_CTRL_BASE + 0x007A)
+#define WSA883X_TAGC_FORCE_VAL (WSA883X_DIG_CTRL_BASE + 0x007B)
+#define WSA883X_VAGC_CTL (WSA883X_DIG_CTRL_BASE + 0x007C)
+#define WSA883X_VAGC_TIME (WSA883X_DIG_CTRL_BASE + 0x007D)
+#define WSA883X_VAGC_ATTN_LVL_1_2 (WSA883X_DIG_CTRL_BASE + 0x007E)
+#define WSA883X_VAGC_ATTN_LVL_3 (WSA883X_DIG_CTRL_BASE + 0x007F)
+#define WSA883X_INTR_MODE (WSA883X_DIG_CTRL_BASE + 0x0080)
+#define WSA883X_INTR_MASK0 (WSA883X_DIG_CTRL_BASE + 0x0081)
+#define WSA883X_INTR_MASK1 (WSA883X_DIG_CTRL_BASE + 0x0082)
+#define WSA883X_INTR_STATUS0 (WSA883X_DIG_CTRL_BASE + 0x0083)
+#define WSA883X_INTR_STATUS1 (WSA883X_DIG_CTRL_BASE + 0x0084)
+#define WSA883X_INTR_CLEAR0 (WSA883X_DIG_CTRL_BASE + 0x0085)
+#define WSA883X_INTR_CLEAR1 (WSA883X_DIG_CTRL_BASE + 0x0086)
+#define WSA883X_INTR_LEVEL0 (WSA883X_DIG_CTRL_BASE + 0x0087)
+#define WSA883X_INTR_LEVEL1 (WSA883X_DIG_CTRL_BASE + 0x0088)
+#define WSA883X_INTR_SET0 (WSA883X_DIG_CTRL_BASE + 0x0089)
+#define WSA883X_INTR_SET1 (WSA883X_DIG_CTRL_BASE + 0x008A)
+#define WSA883X_INTR_TEST0 (WSA883X_DIG_CTRL_BASE + 0x008B)
+#define WSA883X_INTR_TEST1 (WSA883X_DIG_CTRL_BASE + 0x008C)
+#define WSA883X_OTP_CTRL0 (WSA883X_DIG_CTRL_BASE + 0x0090)
+#define WSA883X_OTP_CTRL1 (WSA883X_DIG_CTRL_BASE + 0x0091)
+#define WSA883X_HDRIVE_CTL_GROUP1 (WSA883X_DIG_CTRL_BASE + 0x0092)
+#define WSA883X_PIN_CTL (WSA883X_DIG_CTRL_BASE + 0x0093)
+#define WSA883X_PIN_CTL_OE (WSA883X_DIG_CTRL_BASE + 0x0094)
+#define WSA883X_PIN_WDATA_IOPAD (WSA883X_DIG_CTRL_BASE + 0x0095)
+#define WSA883X_PIN_STATUS (WSA883X_DIG_CTRL_BASE + 0x0096)
+#define WSA883X_I2C_SLAVE_CTL (WSA883X_DIG_CTRL_BASE + 0x0097)
+#define WSA883X_PDM_TEST_MODE (WSA883X_DIG_CTRL_BASE + 0x00A0)
+#define WSA883X_ATE_TEST_MODE (WSA883X_DIG_CTRL_BASE + 0x00A1)
+#define WSA883X_DIG_DEBUG_MODE (WSA883X_DIG_CTRL_BASE + 0x00A3)
+#define WSA883X_DIG_DEBUG_SEL (WSA883X_DIG_CTRL_BASE + 0x00A4)
+#define WSA883X_DIG_DEBUG_EN (WSA883X_DIG_CTRL_BASE + 0x00A5)
+#define WSA883X_SWR_HM_TEST0 (WSA883X_DIG_CTRL_BASE + 0x00A6)
+#define WSA883X_SWR_HM_TEST1 (WSA883X_DIG_CTRL_BASE + 0x00A7)
+#define WSA883X_SWR_PAD_CTL (WSA883X_DIG_CTRL_BASE + 0x00A8)
+#define WSA883X_TADC_DETECT_DBG_CTL (WSA883X_DIG_CTRL_BASE + 0x00A9)
+#define WSA883X_TADC_DEBUG_MSB (WSA883X_DIG_CTRL_BASE + 0x00AA)
+#define WSA883X_TADC_DEBUG_LSB (WSA883X_DIG_CTRL_BASE + 0x00AB)
+#define WSA883X_SAMPLE_EDGE_SEL (WSA883X_DIG_CTRL_BASE + 0x00AC)
+#define WSA883X_SWR_EDGE_SEL (WSA883X_DIG_CTRL_BASE + 0x00AD)
+#define WSA883X_TEST_MODE_CTL (WSA883X_DIG_CTRL_BASE + 0x00AE)
+#define WSA883X_IOPAD_CTL (WSA883X_DIG_CTRL_BASE + 0x00AF)
+#define WSA883X_ANA_CSR_DBG_ADD (WSA883X_DIG_CTRL_BASE + 0x00B0)
+#define WSA883X_ANA_CSR_DBG_CTL (WSA883X_DIG_CTRL_BASE + 0x00B1)
+#define WSA883X_SPARE_R (WSA883X_DIG_CTRL_BASE + 0x00BC)
+#define WSA883X_SPARE_0 (WSA883X_DIG_CTRL_BASE + 0x00BD)
+#define WSA883X_SPARE_1 (WSA883X_DIG_CTRL_BASE + 0x00BE)
+#define WSA883X_SPARE_2 (WSA883X_DIG_CTRL_BASE + 0x00BF)
+#define WSA883X_SCODE (WSA883X_DIG_CTRL_BASE + 0x00C0)
+
+#define WSA883X_DIG_TRIM_BASE (WSA883X_BASE + 0x00000500)
+#define WSA883X_OTP_REG_0 (WSA883X_DIG_TRIM_BASE + 0x0080)
+#define WSA883X_ID_MASK GENMASK(3, 0)
+#define WSA883X_OTP_REG_1 (WSA883X_DIG_TRIM_BASE + 0x0081)
+#define WSA883X_OTP_REG_2 (WSA883X_DIG_TRIM_BASE + 0x0082)
+#define WSA883X_OTP_REG_3 (WSA883X_DIG_TRIM_BASE + 0x0083)
+#define WSA883X_OTP_REG_4 (WSA883X_DIG_TRIM_BASE + 0x0084)
+#define WSA883X_OTP_REG_5 (WSA883X_DIG_TRIM_BASE + 0x0085)
+#define WSA883X_OTP_REG_6 (WSA883X_DIG_TRIM_BASE + 0x0086)
+#define WSA883X_OTP_REG_7 (WSA883X_DIG_TRIM_BASE + 0x0087)
+#define WSA883X_OTP_REG_8 (WSA883X_DIG_TRIM_BASE + 0x0088)
+#define WSA883X_OTP_REG_9 (WSA883X_DIG_TRIM_BASE + 0x0089)
+#define WSA883X_OTP_REG_10 (WSA883X_DIG_TRIM_BASE + 0x008A)
+#define WSA883X_OTP_REG_11 (WSA883X_DIG_TRIM_BASE + 0x008B)
+#define WSA883X_OTP_REG_12 (WSA883X_DIG_TRIM_BASE + 0x008C)
+#define WSA883X_OTP_REG_13 (WSA883X_DIG_TRIM_BASE + 0x008D)
+#define WSA883X_OTP_REG_14 (WSA883X_DIG_TRIM_BASE + 0x008E)
+#define WSA883X_OTP_REG_15 (WSA883X_DIG_TRIM_BASE + 0x008F)
+#define WSA883X_OTP_REG_16 (WSA883X_DIG_TRIM_BASE + 0x0090)
+#define WSA883X_OTP_REG_17 (WSA883X_DIG_TRIM_BASE + 0x0091)
+#define WSA883X_OTP_REG_18 (WSA883X_DIG_TRIM_BASE + 0x0092)
+#define WSA883X_OTP_REG_19 (WSA883X_DIG_TRIM_BASE + 0x0093)
+#define WSA883X_OTP_REG_20 (WSA883X_DIG_TRIM_BASE + 0x0094)
+#define WSA883X_OTP_REG_21 (WSA883X_DIG_TRIM_BASE + 0x0095)
+#define WSA883X_OTP_REG_22 (WSA883X_DIG_TRIM_BASE + 0x0096)
+#define WSA883X_OTP_REG_23 (WSA883X_DIG_TRIM_BASE + 0x0097)
+#define WSA883X_OTP_REG_24 (WSA883X_DIG_TRIM_BASE + 0x0098)
+#define WSA883X_OTP_REG_25 (WSA883X_DIG_TRIM_BASE + 0x0099)
+#define WSA883X_OTP_REG_26 (WSA883X_DIG_TRIM_BASE + 0x009A)
+#define WSA883X_OTP_REG_27 (WSA883X_DIG_TRIM_BASE + 0x009B)
+#define WSA883X_OTP_REG_28 (WSA883X_DIG_TRIM_BASE + 0x009C)
+#define WSA883X_OTP_REG_29 (WSA883X_DIG_TRIM_BASE + 0x009D)
+#define WSA883X_OTP_REG_30 (WSA883X_DIG_TRIM_BASE + 0x009E)
+#define WSA883X_OTP_REG_31 (WSA883X_DIG_TRIM_BASE + 0x009F)
+#define WSA883X_OTP_REG_32 (WSA883X_DIG_TRIM_BASE + 0x00A0)
+#define WSA883X_OTP_REG_33 (WSA883X_DIG_TRIM_BASE + 0x00A1)
+#define WSA883X_OTP_REG_34 (WSA883X_DIG_TRIM_BASE + 0x00A2)
+#define WSA883X_OTP_REG_35 (WSA883X_DIG_TRIM_BASE + 0x00A3)
+#define WSA883X_OTP_REG_63 (WSA883X_DIG_TRIM_BASE + 0x00BF)
+
+#define WSA883X_DIG_EMEM_BASE (WSA883X_BASE + 0x000005C0)
+#define WSA883X_EMEM_0 (WSA883X_DIG_EMEM_BASE + 0x0000)
+#define WSA883X_EMEM_1 (WSA883X_DIG_EMEM_BASE + 0x0001)
+#define WSA883X_EMEM_2 (WSA883X_DIG_EMEM_BASE + 0x0002)
+#define WSA883X_EMEM_3 (WSA883X_DIG_EMEM_BASE + 0x0003)
+#define WSA883X_EMEM_4 (WSA883X_DIG_EMEM_BASE + 0x0004)
+#define WSA883X_EMEM_5 (WSA883X_DIG_EMEM_BASE + 0x0005)
+#define WSA883X_EMEM_6 (WSA883X_DIG_EMEM_BASE + 0x0006)
+#define WSA883X_EMEM_7 (WSA883X_DIG_EMEM_BASE + 0x0007)
+#define WSA883X_EMEM_8 (WSA883X_DIG_EMEM_BASE + 0x0008)
+#define WSA883X_EMEM_9 (WSA883X_DIG_EMEM_BASE + 0x0009)
+#define WSA883X_EMEM_10 (WSA883X_DIG_EMEM_BASE + 0x000A)
+#define WSA883X_EMEM_11 (WSA883X_DIG_EMEM_BASE + 0x000B)
+#define WSA883X_EMEM_12 (WSA883X_DIG_EMEM_BASE + 0x000C)
+#define WSA883X_EMEM_13 (WSA883X_DIG_EMEM_BASE + 0x000D)
+#define WSA883X_EMEM_14 (WSA883X_DIG_EMEM_BASE + 0x000E)
+#define WSA883X_EMEM_15 (WSA883X_DIG_EMEM_BASE + 0x000F)
+#define WSA883X_EMEM_16 (WSA883X_DIG_EMEM_BASE + 0x0010)
+#define WSA883X_EMEM_17 (WSA883X_DIG_EMEM_BASE + 0x0011)
+#define WSA883X_EMEM_18 (WSA883X_DIG_EMEM_BASE + 0x0012)
+#define WSA883X_EMEM_19 (WSA883X_DIG_EMEM_BASE + 0x0013)
+#define WSA883X_EMEM_20 (WSA883X_DIG_EMEM_BASE + 0x0014)
+#define WSA883X_EMEM_21 (WSA883X_DIG_EMEM_BASE + 0x0015)
+#define WSA883X_EMEM_22 (WSA883X_DIG_EMEM_BASE + 0x0016)
+#define WSA883X_EMEM_23 (WSA883X_DIG_EMEM_BASE + 0x0017)
+#define WSA883X_EMEM_24 (WSA883X_DIG_EMEM_BASE + 0x0018)
+#define WSA883X_EMEM_25 (WSA883X_DIG_EMEM_BASE + 0x0019)
+#define WSA883X_EMEM_26 (WSA883X_DIG_EMEM_BASE + 0x001A)
+#define WSA883X_EMEM_27 (WSA883X_DIG_EMEM_BASE + 0x001B)
+#define WSA883X_EMEM_28 (WSA883X_DIG_EMEM_BASE + 0x001C)
+#define WSA883X_EMEM_29 (WSA883X_DIG_EMEM_BASE + 0x001D)
+#define WSA883X_EMEM_30 (WSA883X_DIG_EMEM_BASE + 0x001E)
+#define WSA883X_EMEM_31 (WSA883X_DIG_EMEM_BASE + 0x001F)
+#define WSA883X_EMEM_32 (WSA883X_DIG_EMEM_BASE + 0x0020)
+#define WSA883X_EMEM_33 (WSA883X_DIG_EMEM_BASE + 0x0021)
+#define WSA883X_EMEM_34 (WSA883X_DIG_EMEM_BASE + 0x0022)
+#define WSA883X_EMEM_35 (WSA883X_DIG_EMEM_BASE + 0x0023)
+#define WSA883X_EMEM_36 (WSA883X_DIG_EMEM_BASE + 0x0024)
+#define WSA883X_EMEM_37 (WSA883X_DIG_EMEM_BASE + 0x0025)
+#define WSA883X_EMEM_38 (WSA883X_DIG_EMEM_BASE + 0x0026)
+#define WSA883X_EMEM_39 (WSA883X_DIG_EMEM_BASE + 0x0027)
+#define WSA883X_EMEM_40 (WSA883X_DIG_EMEM_BASE + 0x0028)
+#define WSA883X_EMEM_41 (WSA883X_DIG_EMEM_BASE + 0x0029)
+#define WSA883X_EMEM_42 (WSA883X_DIG_EMEM_BASE + 0x002A)
+#define WSA883X_EMEM_43 (WSA883X_DIG_EMEM_BASE + 0x002B)
+#define WSA883X_EMEM_44 (WSA883X_DIG_EMEM_BASE + 0x002C)
+#define WSA883X_EMEM_45 (WSA883X_DIG_EMEM_BASE + 0x002D)
+#define WSA883X_EMEM_46 (WSA883X_DIG_EMEM_BASE + 0x002E)
+#define WSA883X_EMEM_47 (WSA883X_DIG_EMEM_BASE + 0x002F)
+#define WSA883X_EMEM_48 (WSA883X_DIG_EMEM_BASE + 0x0030)
+#define WSA883X_EMEM_49 (WSA883X_DIG_EMEM_BASE + 0x0031)
+#define WSA883X_EMEM_50 (WSA883X_DIG_EMEM_BASE + 0x0032)
+#define WSA883X_EMEM_51 (WSA883X_DIG_EMEM_BASE + 0x0033)
+#define WSA883X_EMEM_52 (WSA883X_DIG_EMEM_BASE + 0x0034)
+#define WSA883X_EMEM_53 (WSA883X_DIG_EMEM_BASE + 0x0035)
+#define WSA883X_EMEM_54 (WSA883X_DIG_EMEM_BASE + 0x0036)
+#define WSA883X_EMEM_55 (WSA883X_DIG_EMEM_BASE + 0x0037)
+#define WSA883X_EMEM_56 (WSA883X_DIG_EMEM_BASE + 0x0038)
+#define WSA883X_EMEM_57 (WSA883X_DIG_EMEM_BASE + 0x0039)
+#define WSA883X_EMEM_58 (WSA883X_DIG_EMEM_BASE + 0x003A)
+#define WSA883X_EMEM_59 (WSA883X_DIG_EMEM_BASE + 0x003B)
+#define WSA883X_EMEM_60 (WSA883X_DIG_EMEM_BASE + 0x003C)
+#define WSA883X_EMEM_61 (WSA883X_DIG_EMEM_BASE + 0x003D)
+#define WSA883X_EMEM_62 (WSA883X_DIG_EMEM_BASE + 0x003E)
+#define WSA883X_EMEM_63 (WSA883X_DIG_EMEM_BASE + 0x003F)
+
+#define WSA883X_NUM_REGISTERS (WSA883X_EMEM_63 + 1)
+#define WSA883X_MAX_REGISTER (WSA883X_NUM_REGISTERS - 1)
+#define WSA883X_PROBE_TIMEOUT 1000
+
+#define WSA883X_VERSION_1_0 0
+#define WSA883X_VERSION_1_1 1
+
+#define WSA883X_MAX_SWR_PORTS 4
+#define WSA883X_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000 |\
+ SNDRV_PCM_RATE_384000)
+/* Fractional Rates */
+#define WSA883X_FRAC_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800)
+
+#define WSA883X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+struct wsa883x_priv {
+ struct regmap *regmap;
+ struct device *dev;
+ struct regulator *vdd;
+ struct sdw_slave *slave;
+ struct sdw_stream_config sconfig;
+ struct sdw_stream_runtime *sruntime;
+ struct sdw_port_config port_config[WSA883X_MAX_SWR_PORTS];
+ struct gpio_desc *sd_n;
+ bool port_prepared[WSA883X_MAX_SWR_PORTS];
+ bool port_enable[WSA883X_MAX_SWR_PORTS];
+ int version;
+ int variant;
+ int active_ports;
+ int dev_mode;
+ int comp_offset;
+};
+
+enum {
+ WSA8830 = 0,
+ WSA8835,
+ WSA8832,
+ WSA8835_V2 = 5,
+};
+
+enum {
+ COMP_OFFSET0,
+ COMP_OFFSET1,
+ COMP_OFFSET2,
+ COMP_OFFSET3,
+ COMP_OFFSET4,
+};
+
+enum wsa_port_ids {
+ WSA883X_PORT_DAC,
+ WSA883X_PORT_COMP,
+ WSA883X_PORT_BOOST,
+ WSA883X_PORT_VISENSE,
+};
+
+static const char * const wsa_dev_mode_text[] = {
+ "Speaker", "Receiver", "Ultrasound"
+};
+
+enum {
+ SPEAKER,
+ RECEIVER,
+ ULTRASOUND,
+};
+
+static const struct soc_enum wsa_dev_mode_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(wsa_dev_mode_text), wsa_dev_mode_text);
+
+/* 4 ports */
+static struct sdw_dpn_prop wsa_sink_dpn_prop[WSA883X_MAX_SWR_PORTS] = {
+ {
+ /* DAC */
+ .num = 1,
+ .type = SDW_DPN_SIMPLE,
+ .min_ch = 1,
+ .max_ch = 1,
+ .simple_ch_prep_sm = true,
+ .read_only_wordlength = true,
+ }, {
+ /* COMP */
+ .num = 2,
+ .type = SDW_DPN_SIMPLE,
+ .min_ch = 1,
+ .max_ch = 1,
+ .simple_ch_prep_sm = true,
+ .read_only_wordlength = true,
+ }, {
+ /* BOOST */
+ .num = 3,
+ .type = SDW_DPN_SIMPLE,
+ .min_ch = 1,
+ .max_ch = 1,
+ .simple_ch_prep_sm = true,
+ .read_only_wordlength = true,
+ }, {
+ /* VISENSE */
+ .num = 4,
+ .type = SDW_DPN_SIMPLE,
+ .min_ch = 1,
+ .max_ch = 1,
+ .simple_ch_prep_sm = true,
+ .read_only_wordlength = true,
+ }
+};
+
+static struct sdw_port_config wsa883x_pconfig[WSA883X_MAX_SWR_PORTS] = {
+ {
+ .num = 1,
+ .ch_mask = 0x1,
+ }, {
+ .num = 2,
+ .ch_mask = 0xf,
+ }, {
+ .num = 3,
+ .ch_mask = 0x3,
+ }, { /* IV feedback */
+ .num = 4,
+ .ch_mask = 0x3,
+ },
+};
+
+static struct reg_default wsa883x_defaults[] = {
+ { WSA883X_REF_CTRL, 0xD5 },
+ { WSA883X_TEST_CTL_0, 0x06 },
+ { WSA883X_BIAS_0, 0xD2 },
+ { WSA883X_OP_CTL, 0xE0 },
+ { WSA883X_IREF_CTL, 0x57 },
+ { WSA883X_ISENS_CTL, 0x47 },
+ { WSA883X_CLK_CTL, 0x87 },
+ { WSA883X_TEST_CTL_1, 0x00 },
+ { WSA883X_BIAS_1, 0x51 },
+ { WSA883X_ADC_CTL, 0x01 },
+ { WSA883X_DOUT_MSB, 0x00 },
+ { WSA883X_DOUT_LSB, 0x00 },
+ { WSA883X_VBAT_SNS, 0x40 },
+ { WSA883X_ITRIM_CODE, 0x9F },
+ { WSA883X_EN, 0x20 },
+ { WSA883X_OVERRIDE1, 0x00 },
+ { WSA883X_OVERRIDE2, 0x08 },
+ { WSA883X_VSENSE1, 0xD3 },
+ { WSA883X_ISENSE1, 0xD4 },
+ { WSA883X_ISENSE2, 0x20 },
+ { WSA883X_ISENSE_CAL, 0x00 },
+ { WSA883X_MISC, 0x08 },
+ { WSA883X_ADC_0, 0x00 },
+ { WSA883X_ADC_1, 0x00 },
+ { WSA883X_ADC_2, 0x40 },
+ { WSA883X_ADC_3, 0x80 },
+ { WSA883X_ADC_4, 0x25 },
+ { WSA883X_ADC_5, 0x25 },
+ { WSA883X_ADC_6, 0x08 },
+ { WSA883X_ADC_7, 0x81 },
+ { WSA883X_STATUS, 0x00 },
+ { WSA883X_DAC_CTRL_REG, 0x53 },
+ { WSA883X_DAC_EN_DEBUG_REG, 0x00 },
+ { WSA883X_DAC_OPAMP_BIAS1_REG, 0x48 },
+ { WSA883X_DAC_OPAMP_BIAS2_REG, 0x48 },
+ { WSA883X_DAC_VCM_CTRL_REG, 0x88 },
+ { WSA883X_DAC_VOLTAGE_CTRL_REG, 0xA5 },
+ { WSA883X_ATEST1_REG, 0x00 },
+ { WSA883X_ATEST2_REG, 0x00 },
+ { WSA883X_SPKR_TOP_BIAS_REG1, 0x6A },
+ { WSA883X_SPKR_TOP_BIAS_REG2, 0x65 },
+ { WSA883X_SPKR_TOP_BIAS_REG3, 0x55 },
+ { WSA883X_SPKR_TOP_BIAS_REG4, 0xA9 },
+ { WSA883X_SPKR_CLIP_DET_REG, 0x9C },
+ { WSA883X_SPKR_DRV_LF_BLK_EN, 0x0F },
+ { WSA883X_SPKR_DRV_LF_EN, 0x0A },
+ { WSA883X_SPKR_DRV_LF_MASK_DCC_CTL, 0x00 },
+ { WSA883X_SPKR_DRV_LF_MISC_CTL, 0x3A },
+ { WSA883X_SPKR_DRV_LF_REG_GAIN, 0x00 },
+ { WSA883X_SPKR_DRV_OS_CAL_CTL, 0x00 },
+ { WSA883X_SPKR_DRV_OS_CAL_CTL1, 0x90 },
+ { WSA883X_SPKR_PWM_CLK_CTL, 0x00 },
+ { WSA883X_SPKR_PDRV_HS_CTL, 0x52 },
+ { WSA883X_SPKR_PDRV_LS_CTL, 0x48 },
+ { WSA883X_SPKR_PWRSTG_DBG, 0x08 },
+ { WSA883X_SPKR_OCP_CTL, 0xE2 },
+ { WSA883X_SPKR_BBM_CTL, 0x92 },
+ { WSA883X_PA_STATUS0, 0x00 },
+ { WSA883X_PA_STATUS1, 0x00 },
+ { WSA883X_PA_STATUS2, 0x80 },
+ { WSA883X_EN_CTRL, 0x44 },
+ { WSA883X_CURRENT_LIMIT, 0xCC },
+ { WSA883X_IBIAS1, 0x00 },
+ { WSA883X_IBIAS2, 0x00 },
+ { WSA883X_IBIAS3, 0x00 },
+ { WSA883X_LDO_PROG, 0x02 },
+ { WSA883X_STABILITY_CTRL1, 0x8E },
+ { WSA883X_STABILITY_CTRL2, 0x10 },
+ { WSA883X_PWRSTAGE_CTRL1, 0x06 },
+ { WSA883X_PWRSTAGE_CTRL2, 0x00 },
+ { WSA883X_BYPASS_1, 0x19 },
+ { WSA883X_BYPASS_2, 0x13 },
+ { WSA883X_ZX_CTRL_1, 0xF0 },
+ { WSA883X_ZX_CTRL_2, 0x04 },
+ { WSA883X_MISC1, 0x06 },
+ { WSA883X_MISC2, 0xA0 },
+ { WSA883X_GMAMP_SUP1, 0x82 },
+ { WSA883X_PWRSTAGE_CTRL3, 0x39 },
+ { WSA883X_PWRSTAGE_CTRL4, 0x5F },
+ { WSA883X_TEST1, 0x00 },
+ { WSA883X_SPARE1, 0x00 },
+ { WSA883X_SPARE2, 0x00 },
+ { WSA883X_PON_CTL_0, 0x10 },
+ { WSA883X_PON_CLT_1, 0xE0 },
+ { WSA883X_PON_CTL_2, 0x90 },
+ { WSA883X_PON_CTL_3, 0x70 },
+ { WSA883X_CKWD_CTL_0, 0x34 },
+ { WSA883X_CKWD_CTL_1, 0x0F },
+ { WSA883X_CKWD_CTL_2, 0x00 },
+ { WSA883X_CKSK_CTL_0, 0x00 },
+ { WSA883X_PADSW_CTL_0, 0x00 },
+ { WSA883X_TEST_0, 0x00 },
+ { WSA883X_TEST_1, 0x00 },
+ { WSA883X_STATUS_0, 0x00 },
+ { WSA883X_STATUS_1, 0x00 },
+ { WSA883X_CHIP_ID0, 0x00 },
+ { WSA883X_CHIP_ID1, 0x00 },
+ { WSA883X_CHIP_ID2, 0x02 },
+ { WSA883X_CHIP_ID3, 0x02 },
+ { WSA883X_BUS_ID, 0x00 },
+ { WSA883X_CDC_RST_CTL, 0x01 },
+ { WSA883X_TOP_CLK_CFG, 0x00 },
+ { WSA883X_CDC_PATH_MODE, 0x00 },
+ { WSA883X_CDC_CLK_CTL, 0xFF },
+ { WSA883X_SWR_RESET_EN, 0x00 },
+ { WSA883X_RESET_CTL, 0x00 },
+ { WSA883X_PA_FSM_CTL, 0x00 },
+ { WSA883X_PA_FSM_TIMER0, 0x80 },
+ { WSA883X_PA_FSM_TIMER1, 0x80 },
+ { WSA883X_PA_FSM_STA, 0x00 },
+ { WSA883X_PA_FSM_ERR_COND, 0x00 },
+ { WSA883X_PA_FSM_MSK, 0x00 },
+ { WSA883X_PA_FSM_BYP, 0x01 },
+ { WSA883X_PA_FSM_DBG, 0x00 },
+ { WSA883X_TADC_VALUE_CTL, 0x03 },
+ { WSA883X_TEMP_DETECT_CTL, 0x01 },
+ { WSA883X_TEMP_MSB, 0x00 },
+ { WSA883X_TEMP_LSB, 0x00 },
+ { WSA883X_TEMP_CONFIG0, 0x00 },
+ { WSA883X_TEMP_CONFIG1, 0x00 },
+ { WSA883X_VBAT_ADC_FLT_CTL, 0x00 },
+ { WSA883X_VBAT_DIN_MSB, 0x00 },
+ { WSA883X_VBAT_DIN_LSB, 0x00 },
+ { WSA883X_VBAT_DOUT, 0x00 },
+ { WSA883X_SDM_PDM9_LSB, 0x00 },
+ { WSA883X_SDM_PDM9_MSB, 0x00 },
+ { WSA883X_CDC_RX_CTL, 0xFE },
+ { WSA883X_CDC_SPK_DSM_A1_0, 0x00 },
+ { WSA883X_CDC_SPK_DSM_A1_1, 0x01 },
+ { WSA883X_CDC_SPK_DSM_A2_0, 0x96 },
+ { WSA883X_CDC_SPK_DSM_A2_1, 0x09 },
+ { WSA883X_CDC_SPK_DSM_A3_0, 0xAB },
+ { WSA883X_CDC_SPK_DSM_A3_1, 0x05 },
+ { WSA883X_CDC_SPK_DSM_A4_0, 0x1C },
+ { WSA883X_CDC_SPK_DSM_A4_1, 0x02 },
+ { WSA883X_CDC_SPK_DSM_A5_0, 0x17 },
+ { WSA883X_CDC_SPK_DSM_A5_1, 0x02 },
+ { WSA883X_CDC_SPK_DSM_A6_0, 0xAA },
+ { WSA883X_CDC_SPK_DSM_A7_0, 0xE3 },
+ { WSA883X_CDC_SPK_DSM_C_0, 0x69 },
+ { WSA883X_CDC_SPK_DSM_C_1, 0x54 },
+ { WSA883X_CDC_SPK_DSM_C_2, 0x02 },
+ { WSA883X_CDC_SPK_DSM_C_3, 0x15 },
+ { WSA883X_CDC_SPK_DSM_R1, 0xA4 },
+ { WSA883X_CDC_SPK_DSM_R2, 0xB5 },
+ { WSA883X_CDC_SPK_DSM_R3, 0x86 },
+ { WSA883X_CDC_SPK_DSM_R4, 0x85 },
+ { WSA883X_CDC_SPK_DSM_R5, 0xAA },
+ { WSA883X_CDC_SPK_DSM_R6, 0xE2 },
+ { WSA883X_CDC_SPK_DSM_R7, 0x62 },
+ { WSA883X_CDC_SPK_GAIN_PDM_0, 0x00 },
+ { WSA883X_CDC_SPK_GAIN_PDM_1, 0xFC },
+ { WSA883X_CDC_SPK_GAIN_PDM_2, 0x05 },
+ { WSA883X_PDM_WD_CTL, 0x00 },
+ { WSA883X_DEM_BYPASS_DATA0, 0x00 },
+ { WSA883X_DEM_BYPASS_DATA1, 0x00 },
+ { WSA883X_DEM_BYPASS_DATA2, 0x00 },
+ { WSA883X_DEM_BYPASS_DATA3, 0x00 },
+ { WSA883X_WAVG_CTL, 0x06 },
+ { WSA883X_WAVG_LRA_PER_0, 0xD1 },
+ { WSA883X_WAVG_LRA_PER_1, 0x00 },
+ { WSA883X_WAVG_DELTA_THETA_0, 0xE6 },
+ { WSA883X_WAVG_DELTA_THETA_1, 0x04 },
+ { WSA883X_WAVG_DIRECT_AMP_0, 0x50 },
+ { WSA883X_WAVG_DIRECT_AMP_1, 0x00 },
+ { WSA883X_WAVG_PTRN_AMP0_0, 0x50 },
+ { WSA883X_WAVG_PTRN_AMP0_1, 0x00 },
+ { WSA883X_WAVG_PTRN_AMP1_0, 0x50 },
+ { WSA883X_WAVG_PTRN_AMP1_1, 0x00 },
+ { WSA883X_WAVG_PTRN_AMP2_0, 0x50 },
+ { WSA883X_WAVG_PTRN_AMP2_1, 0x00 },
+ { WSA883X_WAVG_PTRN_AMP3_0, 0x50 },
+ { WSA883X_WAVG_PTRN_AMP3_1, 0x00 },
+ { WSA883X_WAVG_PTRN_AMP4_0, 0x50 },
+ { WSA883X_WAVG_PTRN_AMP4_1, 0x00 },
+ { WSA883X_WAVG_PTRN_AMP5_0, 0x50 },
+ { WSA883X_WAVG_PTRN_AMP5_1, 0x00 },
+ { WSA883X_WAVG_PTRN_AMP6_0, 0x50 },
+ { WSA883X_WAVG_PTRN_AMP6_1, 0x00 },
+ { WSA883X_WAVG_PTRN_AMP7_0, 0x50 },
+ { WSA883X_WAVG_PTRN_AMP7_1, 0x00 },
+ { WSA883X_WAVG_PER_0_1, 0x88 },
+ { WSA883X_WAVG_PER_2_3, 0x88 },
+ { WSA883X_WAVG_PER_4_5, 0x88 },
+ { WSA883X_WAVG_PER_6_7, 0x88 },
+ { WSA883X_WAVG_STA, 0x00 },
+ { WSA883X_DRE_CTL_0, 0x70 },
+ { WSA883X_DRE_CTL_1, 0x08 },
+ { WSA883X_DRE_IDLE_DET_CTL, 0x1F },
+ { WSA883X_CLSH_CTL_0, 0x37 },
+ { WSA883X_CLSH_CTL_1, 0x81 },
+ { WSA883X_CLSH_V_HD_PA, 0x0F },
+ { WSA883X_CLSH_V_PA_MIN, 0x00 },
+ { WSA883X_CLSH_OVRD_VAL, 0x00 },
+ { WSA883X_CLSH_HARD_MAX, 0xFF },
+ { WSA883X_CLSH_SOFT_MAX, 0xF5 },
+ { WSA883X_CLSH_SIG_DP, 0x00 },
+ { WSA883X_TAGC_CTL, 0x10 },
+ { WSA883X_TAGC_TIME, 0x20 },
+ { WSA883X_TAGC_E2E_GAIN, 0x02 },
+ { WSA883X_TAGC_FORCE_VAL, 0x00 },
+ { WSA883X_VAGC_CTL, 0x00 },
+ { WSA883X_VAGC_TIME, 0x08 },
+ { WSA883X_VAGC_ATTN_LVL_1_2, 0x21 },
+ { WSA883X_VAGC_ATTN_LVL_3, 0x03 },
+ { WSA883X_INTR_MODE, 0x00 },
+ { WSA883X_INTR_MASK0, 0x90 },
+ { WSA883X_INTR_MASK1, 0x00 },
+ { WSA883X_INTR_STATUS0, 0x00 },
+ { WSA883X_INTR_STATUS1, 0x00 },
+ { WSA883X_INTR_CLEAR0, 0x00 },
+ { WSA883X_INTR_CLEAR1, 0x00 },
+ { WSA883X_INTR_LEVEL0, 0x00 },
+ { WSA883X_INTR_LEVEL1, 0x00 },
+ { WSA883X_INTR_SET0, 0x00 },
+ { WSA883X_INTR_SET1, 0x00 },
+ { WSA883X_INTR_TEST0, 0x00 },
+ { WSA883X_INTR_TEST1, 0x00 },
+ { WSA883X_OTP_CTRL0, 0x00 },
+ { WSA883X_OTP_CTRL1, 0x00 },
+ { WSA883X_HDRIVE_CTL_GROUP1, 0x00 },
+ { WSA883X_PIN_CTL, 0x04 },
+ { WSA883X_PIN_CTL_OE, 0x00 },
+ { WSA883X_PIN_WDATA_IOPAD, 0x00 },
+ { WSA883X_PIN_STATUS, 0x00 },
+ { WSA883X_I2C_SLAVE_CTL, 0x00 },
+ { WSA883X_PDM_TEST_MODE, 0x00 },
+ { WSA883X_ATE_TEST_MODE, 0x00 },
+ { WSA883X_DIG_DEBUG_MODE, 0x00 },
+ { WSA883X_DIG_DEBUG_SEL, 0x00 },
+ { WSA883X_DIG_DEBUG_EN, 0x00 },
+ { WSA883X_SWR_HM_TEST0, 0x08 },
+ { WSA883X_SWR_HM_TEST1, 0x00 },
+ { WSA883X_SWR_PAD_CTL, 0x37 },
+ { WSA883X_TADC_DETECT_DBG_CTL, 0x00 },
+ { WSA883X_TADC_DEBUG_MSB, 0x00 },
+ { WSA883X_TADC_DEBUG_LSB, 0x00 },
+ { WSA883X_SAMPLE_EDGE_SEL, 0x7F },
+ { WSA883X_SWR_EDGE_SEL, 0x00 },
+ { WSA883X_TEST_MODE_CTL, 0x04 },
+ { WSA883X_IOPAD_CTL, 0x00 },
+ { WSA883X_ANA_CSR_DBG_ADD, 0x00 },
+ { WSA883X_ANA_CSR_DBG_CTL, 0x12 },
+ { WSA883X_SPARE_R, 0x00 },
+ { WSA883X_SPARE_0, 0x00 },
+ { WSA883X_SPARE_1, 0x00 },
+ { WSA883X_SPARE_2, 0x00 },
+ { WSA883X_SCODE, 0x00 },
+ { WSA883X_OTP_REG_0, 0x05 },
+ { WSA883X_OTP_REG_1, 0xFF },
+ { WSA883X_OTP_REG_2, 0xC0 },
+ { WSA883X_OTP_REG_3, 0xFF },
+ { WSA883X_OTP_REG_4, 0xC0 },
+ { WSA883X_OTP_REG_5, 0xFF },
+ { WSA883X_OTP_REG_6, 0xFF },
+ { WSA883X_OTP_REG_7, 0xFF },
+ { WSA883X_OTP_REG_8, 0xFF },
+ { WSA883X_OTP_REG_9, 0xFF },
+ { WSA883X_OTP_REG_10, 0xFF },
+ { WSA883X_OTP_REG_11, 0xFF },
+ { WSA883X_OTP_REG_12, 0xFF },
+ { WSA883X_OTP_REG_13, 0xFF },
+ { WSA883X_OTP_REG_14, 0xFF },
+ { WSA883X_OTP_REG_15, 0xFF },
+ { WSA883X_OTP_REG_16, 0xFF },
+ { WSA883X_OTP_REG_17, 0xFF },
+ { WSA883X_OTP_REG_18, 0xFF },
+ { WSA883X_OTP_REG_19, 0xFF },
+ { WSA883X_OTP_REG_20, 0xFF },
+ { WSA883X_OTP_REG_21, 0xFF },
+ { WSA883X_OTP_REG_22, 0xFF },
+ { WSA883X_OTP_REG_23, 0xFF },
+ { WSA883X_OTP_REG_24, 0x37 },
+ { WSA883X_OTP_REG_25, 0x3F },
+ { WSA883X_OTP_REG_26, 0x03 },
+ { WSA883X_OTP_REG_27, 0x00 },
+ { WSA883X_OTP_REG_28, 0x00 },
+ { WSA883X_OTP_REG_29, 0x00 },
+ { WSA883X_OTP_REG_30, 0x00 },
+ { WSA883X_OTP_REG_31, 0x03 },
+ { WSA883X_OTP_REG_32, 0x00 },
+ { WSA883X_OTP_REG_33, 0xFF },
+ { WSA883X_OTP_REG_34, 0x00 },
+ { WSA883X_OTP_REG_35, 0x00 },
+ { WSA883X_OTP_REG_63, 0x40 },
+ { WSA883X_EMEM_0, 0x00 },
+ { WSA883X_EMEM_1, 0x00 },
+ { WSA883X_EMEM_2, 0x00 },
+ { WSA883X_EMEM_3, 0x00 },
+ { WSA883X_EMEM_4, 0x00 },
+ { WSA883X_EMEM_5, 0x00 },
+ { WSA883X_EMEM_6, 0x00 },
+ { WSA883X_EMEM_7, 0x00 },
+ { WSA883X_EMEM_8, 0x00 },
+ { WSA883X_EMEM_9, 0x00 },
+ { WSA883X_EMEM_10, 0x00 },
+ { WSA883X_EMEM_11, 0x00 },
+ { WSA883X_EMEM_12, 0x00 },
+ { WSA883X_EMEM_13, 0x00 },
+ { WSA883X_EMEM_14, 0x00 },
+ { WSA883X_EMEM_15, 0x00 },
+ { WSA883X_EMEM_16, 0x00 },
+ { WSA883X_EMEM_17, 0x00 },
+ { WSA883X_EMEM_18, 0x00 },
+ { WSA883X_EMEM_19, 0x00 },
+ { WSA883X_EMEM_20, 0x00 },
+ { WSA883X_EMEM_21, 0x00 },
+ { WSA883X_EMEM_22, 0x00 },
+ { WSA883X_EMEM_23, 0x00 },
+ { WSA883X_EMEM_24, 0x00 },
+ { WSA883X_EMEM_25, 0x00 },
+ { WSA883X_EMEM_26, 0x00 },
+ { WSA883X_EMEM_27, 0x00 },
+ { WSA883X_EMEM_28, 0x00 },
+ { WSA883X_EMEM_29, 0x00 },
+ { WSA883X_EMEM_30, 0x00 },
+ { WSA883X_EMEM_31, 0x00 },
+ { WSA883X_EMEM_32, 0x00 },
+ { WSA883X_EMEM_33, 0x00 },
+ { WSA883X_EMEM_34, 0x00 },
+ { WSA883X_EMEM_35, 0x00 },
+ { WSA883X_EMEM_36, 0x00 },
+ { WSA883X_EMEM_37, 0x00 },
+ { WSA883X_EMEM_38, 0x00 },
+ { WSA883X_EMEM_39, 0x00 },
+ { WSA883X_EMEM_40, 0x00 },
+ { WSA883X_EMEM_41, 0x00 },
+ { WSA883X_EMEM_42, 0x00 },
+ { WSA883X_EMEM_43, 0x00 },
+ { WSA883X_EMEM_44, 0x00 },
+ { WSA883X_EMEM_45, 0x00 },
+ { WSA883X_EMEM_46, 0x00 },
+ { WSA883X_EMEM_47, 0x00 },
+ { WSA883X_EMEM_48, 0x00 },
+ { WSA883X_EMEM_49, 0x00 },
+ { WSA883X_EMEM_50, 0x00 },
+ { WSA883X_EMEM_51, 0x00 },
+ { WSA883X_EMEM_52, 0x00 },
+ { WSA883X_EMEM_53, 0x00 },
+ { WSA883X_EMEM_54, 0x00 },
+ { WSA883X_EMEM_55, 0x00 },
+ { WSA883X_EMEM_56, 0x00 },
+ { WSA883X_EMEM_57, 0x00 },
+ { WSA883X_EMEM_58, 0x00 },
+ { WSA883X_EMEM_59, 0x00 },
+ { WSA883X_EMEM_60, 0x00 },
+ { WSA883X_EMEM_61, 0x00 },
+ { WSA883X_EMEM_62, 0x00 },
+ { WSA883X_EMEM_63, 0x00 },
+};
+
+static bool wsa883x_readonly_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WSA883X_DOUT_MSB:
+ case WSA883X_DOUT_LSB:
+ case WSA883X_STATUS:
+ case WSA883X_PA_STATUS0:
+ case WSA883X_PA_STATUS1:
+ case WSA883X_PA_STATUS2:
+ case WSA883X_STATUS_0:
+ case WSA883X_STATUS_1:
+ case WSA883X_CHIP_ID0:
+ case WSA883X_CHIP_ID1:
+ case WSA883X_CHIP_ID2:
+ case WSA883X_CHIP_ID3:
+ case WSA883X_BUS_ID:
+ case WSA883X_PA_FSM_STA:
+ case WSA883X_PA_FSM_ERR_COND:
+ case WSA883X_TEMP_MSB:
+ case WSA883X_TEMP_LSB:
+ case WSA883X_VBAT_DIN_MSB:
+ case WSA883X_VBAT_DIN_LSB:
+ case WSA883X_VBAT_DOUT:
+ case WSA883X_SDM_PDM9_LSB:
+ case WSA883X_SDM_PDM9_MSB:
+ case WSA883X_WAVG_STA:
+ case WSA883X_INTR_STATUS0:
+ case WSA883X_INTR_STATUS1:
+ case WSA883X_OTP_CTRL1:
+ case WSA883X_PIN_STATUS:
+ case WSA883X_ATE_TEST_MODE:
+ case WSA883X_SWR_HM_TEST1:
+ case WSA883X_SPARE_R:
+ case WSA883X_OTP_REG_0:
+ return true;
+ }
+ return false;
+}
+
+static bool wsa883x_writeable_register(struct device *dev, unsigned int reg)
+{
+ return !wsa883x_readonly_register(dev, reg);
+}
+
+static bool wsa883x_volatile_register(struct device *dev, unsigned int reg)
+{
+ return wsa883x_readonly_register(dev, reg);
+}
+
+static struct regmap_config wsa883x_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = wsa883x_defaults,
+ .max_register = WSA883X_MAX_REGISTER,
+ .num_reg_defaults = ARRAY_SIZE(wsa883x_defaults),
+ .volatile_reg = wsa883x_volatile_register,
+ .writeable_reg = wsa883x_writeable_register,
+ .reg_format_endian = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .can_multi_write = true,
+ .use_single_read = true,
+};
+
+static const struct reg_sequence reg_init[] = {
+ {WSA883X_PA_FSM_BYP, 0x00},
+ {WSA883X_ADC_6, 0x02},
+ {WSA883X_CDC_SPK_DSM_A2_0, 0x0A},
+ {WSA883X_CDC_SPK_DSM_A2_1, 0x08},
+ {WSA883X_CDC_SPK_DSM_A3_0, 0xF3},
+ {WSA883X_CDC_SPK_DSM_A3_1, 0x07},
+ {WSA883X_CDC_SPK_DSM_A4_0, 0x79},
+ {WSA883X_CDC_SPK_DSM_A4_1, 0x02},
+ {WSA883X_CDC_SPK_DSM_A5_0, 0x0B},
+ {WSA883X_CDC_SPK_DSM_A5_1, 0x02},
+ {WSA883X_CDC_SPK_DSM_A6_0, 0x8A},
+ {WSA883X_CDC_SPK_DSM_A7_0, 0x9B},
+ {WSA883X_CDC_SPK_DSM_C_0, 0x68},
+ {WSA883X_CDC_SPK_DSM_C_1, 0x54},
+ {WSA883X_CDC_SPK_DSM_C_2, 0xF2},
+ {WSA883X_CDC_SPK_DSM_C_3, 0x20},
+ {WSA883X_CDC_SPK_DSM_R1, 0x83},
+ {WSA883X_CDC_SPK_DSM_R2, 0x7F},
+ {WSA883X_CDC_SPK_DSM_R3, 0x9D},
+ {WSA883X_CDC_SPK_DSM_R4, 0x82},
+ {WSA883X_CDC_SPK_DSM_R5, 0x8B},
+ {WSA883X_CDC_SPK_DSM_R6, 0x9B},
+ {WSA883X_CDC_SPK_DSM_R7, 0x3F},
+ {WSA883X_VBAT_SNS, 0x20},
+ {WSA883X_DRE_CTL_0, 0x92},
+ {WSA883X_DRE_IDLE_DET_CTL, 0x0F},
+ {WSA883X_CURRENT_LIMIT, 0xC4},
+ {WSA883X_VAGC_TIME, 0x0F},
+ {WSA883X_VAGC_ATTN_LVL_1_2, 0x00},
+ {WSA883X_VAGC_ATTN_LVL_3, 0x01},
+ {WSA883X_VAGC_CTL, 0x01},
+ {WSA883X_TAGC_CTL, 0x1A},
+ {WSA883X_TAGC_TIME, 0x2C},
+ {WSA883X_TEMP_CONFIG0, 0x02},
+ {WSA883X_TEMP_CONFIG1, 0x02},
+ {WSA883X_OTP_REG_1, 0x49},
+ {WSA883X_OTP_REG_2, 0x80},
+ {WSA883X_OTP_REG_3, 0xC9},
+ {WSA883X_OTP_REG_4, 0x40},
+ {WSA883X_TAGC_CTL, 0x1B},
+ {WSA883X_ADC_2, 0x00},
+ {WSA883X_ADC_7, 0x85},
+ {WSA883X_ADC_7, 0x87},
+ {WSA883X_CKWD_CTL_0, 0x14},
+ {WSA883X_CKWD_CTL_1, 0x1B},
+ {WSA883X_GMAMP_SUP1, 0xE2},
+};
+
+static void wsa883x_init(struct wsa883x_priv *wsa883x)
+{
+ struct regmap *regmap = wsa883x->regmap;
+ int variant, version;
+
+ regmap_read(regmap, WSA883X_OTP_REG_0, &variant);
+ wsa883x->variant = variant & WSA883X_ID_MASK;
+
+ regmap_read(regmap, WSA883X_CHIP_ID0, &version);
+ wsa883x->version = version;
+
+ switch (wsa883x->variant) {
+ case WSA8830:
+ dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8830\n",
+ wsa883x->version);
+ break;
+ case WSA8835:
+ dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8835\n",
+ wsa883x->version);
+ break;
+ case WSA8832:
+ dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8832\n",
+ wsa883x->version);
+ break;
+ case WSA8835_V2:
+ dev_info(wsa883x->dev, "WSA883X Version 1_%d, Variant: WSA8835_V2\n",
+ wsa883x->version);
+ break;
+ default:
+ break;
+ }
+
+ wsa883x->comp_offset = COMP_OFFSET2;
+
+ /* Initial settings */
+ regmap_multi_reg_write(regmap, reg_init, ARRAY_SIZE(reg_init));
+
+ if (wsa883x->variant == WSA8830 || wsa883x->variant == WSA8832) {
+ wsa883x->comp_offset = COMP_OFFSET3;
+ regmap_update_bits(regmap, WSA883X_DRE_CTL_0,
+ WSA883X_DRE_OFFSET_MASK,
+ wsa883x->comp_offset);
+ }
+}
+
+static int wsa883x_update_status(struct sdw_slave *slave,
+ enum sdw_slave_status status)
+{
+ struct wsa883x_priv *wsa883x = dev_get_drvdata(&slave->dev);
+
+ if (status == SDW_SLAVE_ATTACHED && slave->dev_num > 0)
+ wsa883x_init(wsa883x);
+
+ return 0;
+}
+
+static int wsa883x_port_prep(struct sdw_slave *slave,
+ struct sdw_prepare_ch *prepare_ch,
+ enum sdw_port_prep_ops state)
+{
+ struct wsa883x_priv *wsa883x = dev_get_drvdata(&slave->dev);
+
+ if (state == SDW_OPS_PORT_POST_PREP)
+ wsa883x->port_prepared[prepare_ch->num - 1] = true;
+ else
+ wsa883x->port_prepared[prepare_ch->num - 1] = false;
+
+ return 0;
+}
+
+static struct sdw_slave_ops wsa883x_slave_ops = {
+ .update_status = wsa883x_update_status,
+ .port_prep = wsa883x_port_prep,
+};
+
+static int wsa_dev_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wsa883x_priv *wsa883x = snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.enumerated.item[0] = wsa883x->dev_mode;
+
+ return 0;
+}
+
+static int wsa_dev_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wsa883x_priv *wsa883x = snd_soc_component_get_drvdata(component);
+
+ if (wsa883x->dev_mode == ucontrol->value.enumerated.item[0])
+ return 0;
+
+ wsa883x->dev_mode = ucontrol->value.enumerated.item[0];
+
+ return 1;
+}
+
+static const DECLARE_TLV_DB_SCALE(pa_gain, -300, 150, -300);
+
+static int wsa883x_get_swr_port(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct wsa883x_priv *data = snd_soc_component_get_drvdata(comp);
+ struct soc_mixer_control *mixer = (struct soc_mixer_control *)kcontrol->private_value;
+ int portidx = mixer->reg;
+
+ ucontrol->value.integer.value[0] = data->port_enable[portidx];
+
+ return 0;
+}
+
+static int wsa883x_set_swr_port(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct wsa883x_priv *data = snd_soc_component_get_drvdata(comp);
+ struct soc_mixer_control *mixer = (struct soc_mixer_control *)kcontrol->private_value;
+ int portidx = mixer->reg;
+
+ if (ucontrol->value.integer.value[0]) {
+ if (data->port_enable[portidx])
+ return 0;
+
+ data->port_enable[portidx] = true;
+ } else {
+ if (!data->port_enable[portidx])
+ return 0;
+
+ data->port_enable[portidx] = false;
+ }
+
+ return 1;
+}
+
+static int wsa883x_get_comp_offset(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wsa883x_priv *wsa883x = snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = wsa883x->comp_offset;
+
+ return 0;
+}
+
+static int wsa883x_set_comp_offset(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct wsa883x_priv *wsa883x = snd_soc_component_get_drvdata(component);
+
+ if (wsa883x->comp_offset == ucontrol->value.integer.value[0])
+ return 0;
+
+ wsa883x->comp_offset = ucontrol->value.integer.value[0];
+
+ return 1;
+}
+
+static int wsa883x_codec_probe(struct snd_soc_component *comp)
+{
+ struct wsa883x_priv *wsa883x = snd_soc_component_get_drvdata(comp);
+
+ snd_soc_component_init_regmap(comp, wsa883x->regmap);
+
+ return 0;
+}
+
+static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wsa883x_priv *wsa883x = snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ switch (wsa883x->dev_mode) {
+ case RECEIVER:
+ snd_soc_component_write_field(component, WSA883X_CDC_PATH_MODE,
+ WSA883X_RXD_MODE_MASK,
+ WSA883X_RXD_MODE_HIFI);
+ snd_soc_component_write_field(component, WSA883X_SPKR_PWM_CLK_CTL,
+ WSA883X_SPKR_PWM_FREQ_SEL_MASK,
+ WSA883X_SPKR_PWM_FREQ_F600KHZ);
+ snd_soc_component_write_field(component, WSA883X_DRE_CTL_0,
+ WSA883X_DRE_PROG_DELAY_MASK, 0x0);
+ break;
+ case SPEAKER:
+ snd_soc_component_write_field(component, WSA883X_CDC_PATH_MODE,
+ WSA883X_RXD_MODE_MASK,
+ WSA883X_RXD_MODE_NORMAL);
+ snd_soc_component_write_field(component, WSA883X_SPKR_PWM_CLK_CTL,
+ WSA883X_SPKR_PWM_FREQ_SEL_MASK,
+ WSA883X_SPKR_PWM_FREQ_F300KHZ);
+ snd_soc_component_write_field(component, WSA883X_DRE_CTL_0,
+ WSA883X_DRE_PROG_DELAY_MASK, 0x9);
+ break;
+ default:
+ break;
+ }
+
+ snd_soc_component_write_field(component, WSA883X_DRE_CTL_1,
+ WSA883X_DRE_GAIN_EN_MASK,
+ WSA883X_DRE_GAIN_FROM_CSR);
+ if (wsa883x->port_enable[WSA883X_PORT_COMP])
+ snd_soc_component_write_field(component, WSA883X_DRE_CTL_0,
+ WSA883X_DRE_OFFSET_MASK,
+ wsa883x->comp_offset);
+ snd_soc_component_write_field(component, WSA883X_VBAT_ADC_FLT_CTL,
+ WSA883X_VBAT_ADC_COEF_SEL_MASK,
+ WSA883X_VBAT_ADC_COEF_F_1DIV16);
+ snd_soc_component_write_field(component, WSA883X_VBAT_ADC_FLT_CTL,
+ WSA883X_VBAT_ADC_FLT_EN_MASK, 0x1);
+ snd_soc_component_write_field(component, WSA883X_PDM_WD_CTL,
+ WSA883X_PDM_EN_MASK,
+ WSA883X_PDM_ENABLE);
+ snd_soc_component_write_field(component, WSA883X_PA_FSM_CTL,
+ WSA883X_GLOBAL_PA_EN_MASK,
+ WSA883X_GLOBAL_PA_ENABLE);
+
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_component_write_field(component, WSA883X_VBAT_ADC_FLT_CTL,
+ WSA883X_VBAT_ADC_FLT_EN_MASK, 0x0);
+ snd_soc_component_write_field(component, WSA883X_VBAT_ADC_FLT_CTL,
+ WSA883X_VBAT_ADC_COEF_SEL_MASK,
+ WSA883X_VBAT_ADC_COEF_F_1DIV2);
+ snd_soc_component_write_field(component, WSA883X_PA_FSM_CTL,
+ WSA883X_GLOBAL_PA_EN_MASK, 0);
+ snd_soc_component_write_field(component, WSA883X_PDM_WD_CTL,
+ WSA883X_PDM_EN_MASK, 0);
+ break;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget wsa883x_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("IN"),
+ SND_SOC_DAPM_SPK("SPKR", wsa883x_spkr_event),
+};
+
+static const struct snd_kcontrol_new wsa883x_snd_controls[] = {
+ SOC_SINGLE_RANGE_TLV("PA Volume", WSA883X_DRE_CTL_1, 1,
+ 0x0, 0x1f, 1, pa_gain),
+ SOC_ENUM_EXT("WSA MODE", wsa_dev_mode_enum,
+ wsa_dev_mode_get, wsa_dev_mode_put),
+ SOC_SINGLE_EXT("COMP Offset", SND_SOC_NOPM, 0, 4, 0,
+ wsa883x_get_comp_offset, wsa883x_set_comp_offset),
+ SOC_SINGLE_EXT("DAC Switch", WSA883X_PORT_DAC, 0, 1, 0,
+ wsa883x_get_swr_port, wsa883x_set_swr_port),
+ SOC_SINGLE_EXT("COMP Switch", WSA883X_PORT_COMP, 0, 1, 0,
+ wsa883x_get_swr_port, wsa883x_set_swr_port),
+ SOC_SINGLE_EXT("BOOST Switch", WSA883X_PORT_BOOST, 0, 1, 0,
+ wsa883x_get_swr_port, wsa883x_set_swr_port),
+ SOC_SINGLE_EXT("VISENSE Switch", WSA883X_PORT_VISENSE, 0, 1, 0,
+ wsa883x_get_swr_port, wsa883x_set_swr_port),
+};
+
+static const struct snd_soc_dapm_route wsa883x_audio_map[] = {
+ {"SPKR", NULL, "IN"},
+};
+
+static const struct snd_soc_component_driver wsa883x_component_drv = {
+ .name = "WSA883x",
+ .probe = wsa883x_codec_probe,
+ .controls = wsa883x_snd_controls,
+ .num_controls = ARRAY_SIZE(wsa883x_snd_controls),
+ .dapm_widgets = wsa883x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wsa883x_dapm_widgets),
+ .dapm_routes = wsa883x_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(wsa883x_audio_map),
+};
+
+static int wsa883x_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct wsa883x_priv *wsa883x = dev_get_drvdata(dai->dev);
+ int i;
+
+ wsa883x->active_ports = 0;
+ for (i = 0; i < WSA883X_MAX_SWR_PORTS; i++) {
+ if (!wsa883x->port_enable[i])
+ continue;
+
+ wsa883x->port_config[wsa883x->active_ports] = wsa883x_pconfig[i];
+ wsa883x->active_ports++;
+ }
+
+ wsa883x->sconfig.frame_rate = params_rate(params);
+
+ return sdw_stream_add_slave(wsa883x->slave, &wsa883x->sconfig,
+ wsa883x->port_config, wsa883x->active_ports,
+ wsa883x->sruntime);
+}
+
+static int wsa883x_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct wsa883x_priv *wsa883x = dev_get_drvdata(dai->dev);
+
+ sdw_stream_remove_slave(wsa883x->slave, wsa883x->sruntime);
+
+ return 0;
+}
+
+static int wsa883x_set_sdw_stream(struct snd_soc_dai *dai,
+ void *stream, int direction)
+{
+ struct wsa883x_priv *wsa883x = dev_get_drvdata(dai->dev);
+
+ wsa883x->sruntime = stream;
+
+ return 0;
+}
+
+static int wsa883x_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+ struct snd_soc_component *component = dai->component;
+
+ if (mute) {
+ snd_soc_component_write_field(component, WSA883X_DRE_CTL_1,
+ WSA883X_DRE_GAIN_EN_MASK, 0);
+ snd_soc_component_write_field(component, WSA883X_PA_FSM_CTL,
+ WSA883X_GLOBAL_PA_EN_MASK, 0);
+
+ } else {
+ snd_soc_component_write_field(component, WSA883X_DRE_CTL_1,
+ WSA883X_DRE_GAIN_EN_MASK,
+ WSA883X_DRE_GAIN_FROM_CSR);
+ snd_soc_component_write_field(component, WSA883X_PA_FSM_CTL,
+ WSA883X_GLOBAL_PA_EN_MASK, 1);
+
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops wsa883x_dai_ops = {
+ .hw_params = wsa883x_hw_params,
+ .hw_free = wsa883x_hw_free,
+ .mute_stream = wsa883x_digital_mute,
+ .set_stream = wsa883x_set_sdw_stream,
+};
+
+static struct snd_soc_dai_driver wsa883x_dais[] = {
+ {
+ .name = "SPKR",
+ .playback = {
+ .stream_name = "SPKR Playback",
+ .rates = WSA883X_RATES | WSA883X_FRAC_RATES,
+ .formats = WSA883X_FORMATS,
+ .rate_max = 8000,
+ .rate_min = 352800,
+ .channels_min = 1,
+ .channels_max = 1,
+ },
+ .ops = &wsa883x_dai_ops,
+ },
+};
+
+static int wsa883x_probe(struct sdw_slave *pdev,
+ const struct sdw_device_id *id)
+{
+ struct wsa883x_priv *wsa883x;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ wsa883x = devm_kzalloc(&pdev->dev, sizeof(*wsa883x), GFP_KERNEL);
+ if (!wsa883x)
+ return -ENOMEM;
+
+ wsa883x->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(wsa883x->vdd)) {
+ dev_err(dev, "No vdd regulator found\n");
+ return PTR_ERR(wsa883x->vdd);
+ }
+
+ ret = regulator_enable(wsa883x->vdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable vdd regulator (%d)\n", ret);
+ return ret;
+ }
+
+ wsa883x->sd_n = devm_gpiod_get_optional(&pdev->dev, "powerdown",
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ if (IS_ERR(wsa883x->sd_n)) {
+ dev_err(&pdev->dev, "Shutdown Control GPIO not found\n");
+ ret = PTR_ERR(wsa883x->sd_n);
+ goto err;
+ }
+
+ dev_set_drvdata(&pdev->dev, wsa883x);
+ wsa883x->slave = pdev;
+ wsa883x->dev = &pdev->dev;
+ wsa883x->sconfig.ch_count = 1;
+ wsa883x->sconfig.bps = 1;
+ wsa883x->sconfig.direction = SDW_DATA_DIR_RX;
+ wsa883x->sconfig.type = SDW_STREAM_PDM;
+
+ pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS, 0);
+ pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop;
+ pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+ gpiod_direction_output(wsa883x->sd_n, 1);
+
+ wsa883x->regmap = devm_regmap_init_sdw(pdev, &wsa883x_regmap_config);
+ if (IS_ERR(wsa883x->regmap)) {
+ dev_err(&pdev->dev, "regmap_init failed\n");
+ ret = PTR_ERR(wsa883x->regmap);
+ goto err;
+ }
+ pm_runtime_set_autosuspend_delay(dev, 3000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &wsa883x_component_drv,
+ wsa883x_dais,
+ ARRAY_SIZE(wsa883x_dais));
+err:
+ if (ret)
+ regulator_disable(wsa883x->vdd);
+
+ return ret;
+
+}
+
+static int __maybe_unused wsa883x_runtime_suspend(struct device *dev)
+{
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+ struct wsa883x_priv *wsa883x = dev_get_drvdata(dev);
+
+ gpiod_direction_output(wsa883x->sd_n, 0);
+
+ regcache_cache_only(regmap, true);
+ regcache_mark_dirty(regmap);
+
+ regulator_disable(wsa883x->vdd);
+ return 0;
+}
+
+static int __maybe_unused wsa883x_runtime_resume(struct device *dev)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+ struct wsa883x_priv *wsa883x = dev_get_drvdata(dev);
+ unsigned long time;
+ int ret;
+
+ ret = regulator_enable(wsa883x->vdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable vdd regulator (%d)\n", ret);
+ return ret;
+ }
+
+ gpiod_direction_output(wsa883x->sd_n, 1);
+
+ time = wait_for_completion_timeout(&slave->initialization_complete,
+ msecs_to_jiffies(WSA883X_PROBE_TIMEOUT));
+ if (!time) {
+ dev_err(dev, "Initialization not complete, timed out\n");
+ gpiod_direction_output(wsa883x->sd_n, 0);
+ regulator_disable(wsa883x->vdd);
+ return -ETIMEDOUT;
+ }
+
+ usleep_range(20000, 20010);
+ regcache_cache_only(regmap, false);
+ regcache_sync(regmap);
+
+ return 0;
+}
+
+static const struct dev_pm_ops wsa883x_pm_ops = {
+ SET_RUNTIME_PM_OPS(wsa883x_runtime_suspend, wsa883x_runtime_resume, NULL)
+};
+
+static const struct sdw_device_id wsa883x_swr_id[] = {
+ SDW_SLAVE_ENTRY(0x0217, 0x0202, 0),
+ {},
+};
+
+MODULE_DEVICE_TABLE(sdw, wsa883x_swr_id);
+
+static struct sdw_driver wsa883x_codec_driver = {
+ .driver = {
+ .name = "wsa883x-codec",
+ .pm = &wsa883x_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+ .probe = wsa883x_probe,
+ .ops = &wsa883x_slave_ops,
+ .id_table = wsa883x_swr_id,
+};
+
+module_sdw_driver(wsa883x_codec_driver);
+
+MODULE_DESCRIPTION("WSA883x codec driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/zl38060.c b/sound/soc/codecs/zl38060.c
index 6cae0fb08093..c3d0a2a7c36f 100644
--- a/sound/soc/codecs/zl38060.c
+++ b/sound/soc/codecs/zl38060.c
@@ -385,7 +385,6 @@ static const struct snd_soc_component_driver zl38_component_dev = {
.dapm_routes = zl38_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(zl38_dapm_routes),
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static void chip_gpio_set(struct gpio_chip *c, unsigned int offset, int val)
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index 1edac3e10f34..7f7dd07c63b2 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -357,20 +357,20 @@ static int dw_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
int ret = 0;
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
if (dev->capability & DW_I2S_SLAVE)
ret = 0;
else
ret = -EINVAL;
break;
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
if (dev->capability & DW_I2S_MASTER)
ret = 0;
else
ret = -EINVAL;
break;
- case SND_SOC_DAIFMT_CBP_CFC:
- case SND_SOC_DAIFMT_CBC_CFP:
+ case SND_SOC_DAIFMT_BC_FP:
+ case SND_SOC_DAIFMT_BP_FC:
ret = -EINVAL;
break;
default:
@@ -449,9 +449,10 @@ static int dw_i2s_resume(struct snd_soc_component *component)
#endif
static const struct snd_soc_component_driver dw_i2s_component = {
- .name = "dw-i2s",
- .suspend = dw_i2s_suspend,
- .resume = dw_i2s_resume,
+ .name = "dw-i2s",
+ .suspend = dw_i2s_suspend,
+ .resume = dw_i2s_resume,
+ .legacy_dai_naming = 1,
};
/*
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 10fa38753453..614eceda6b9e 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -19,6 +19,7 @@ config SND_SOC_FSL_SAI
select REGMAP_MMIO
select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
select SND_SOC_GENERIC_DMAENGINE_PCM
+ select SND_SOC_FSL_UTILS
help
Say Y if you want to add Synchronous Audio Interface (SAI)
support for the Freescale CPUs.
@@ -59,6 +60,7 @@ config SND_SOC_FSL_SPDIF
select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
select SND_SOC_IMX_PCM_FIQ if SND_IMX_SOC != n && (MXC_TZIC || MXC_AVIC)
select BITREVERSE
+ select SND_SOC_FSL_UTILS
help
Say Y if you want to add Sony/Philips Digital Interface (SPDIF)
support for the Freescale CPUs.
@@ -80,6 +82,7 @@ config SND_SOC_FSL_MICFIL
select REGMAP_MMIO
select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
select SND_SOC_GENERIC_DMAENGINE_PCM
+ select SND_SOC_FSL_UTILS
help
Say Y if you want to add Pulse Density Modulation microphone
interface (MICFIL) support for NXP.
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index d9a0d4768c4d..c836848ef0a6 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -537,6 +537,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
struct device *codec_dev = NULL;
const char *codec_dai_name;
const char *codec_dev_name;
+ u32 asrc_fmt = 0;
u32 width;
int ret;
@@ -829,8 +830,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
goto asrc_fail;
}
- ret = of_property_read_u32(asrc_np, "fsl,asrc-format",
- &priv->asrc_format);
+ ret = of_property_read_u32(asrc_np, "fsl,asrc-format", &asrc_fmt);
+ priv->asrc_format = (__force snd_pcm_format_t)asrc_fmt;
if (ret) {
/* Fallback to old binding; translate to asrc_format */
ret = of_property_read_u32(asrc_np, "fsl,asrc-width",
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 20a9f8e924b3..aa5edf32d988 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -1066,6 +1066,7 @@ static int fsl_asrc_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *regs;
int irq, ret, i;
+ u32 asrc_fmt = 0;
u32 map_idx;
char tmp[16];
u32 width;
@@ -1174,7 +1175,8 @@ static int fsl_asrc_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(np, "fsl,asrc-format", &asrc->asrc_format);
+ ret = of_property_read_u32(np, "fsl,asrc-format", &asrc_fmt);
+ asrc->asrc_format = (__force snd_pcm_format_t)asrc_fmt;
if (ret) {
ret = of_property_read_u32(np, "fsl,asrc-width", &width);
if (ret) {
@@ -1197,7 +1199,7 @@ static int fsl_asrc_probe(struct platform_device *pdev)
}
}
- if (!(FSL_ASRC_FORMATS & (1ULL << asrc->asrc_format))) {
+ if (!(FSL_ASRC_FORMATS & pcm_format_to_bits(asrc->asrc_format))) {
dev_warn(&pdev->dev, "unsupported width, use default S24_LE\n");
asrc->asrc_format = SNDRV_PCM_FORMAT_S24_LE;
}
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index 5038faf035cb..12ddf2320f2d 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -114,8 +114,8 @@ static int fsl_asrc_dma_trigger(struct snd_soc_component *component,
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- dmaengine_terminate_all(pair->dma_chan[OUT]);
- dmaengine_terminate_all(pair->dma_chan[IN]);
+ dmaengine_terminate_async(pair->dma_chan[OUT]);
+ dmaengine_terminate_async(pair->dma_chan[IN]);
break;
default:
return -EINVAL;
@@ -129,6 +129,7 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
struct snd_pcm_hw_params *params)
{
enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ enum sdma_peripheral_type be_peripheral_type = IMX_DMATYPE_SSI;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
struct snd_dmaengine_dai_dma_data *dma_params_fe = NULL;
@@ -139,6 +140,7 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
struct snd_soc_component *component_be = NULL;
struct fsl_asrc *asrc = pair->asrc;
struct dma_slave_config config_fe, config_be;
+ struct sdma_peripheral_config audio_config;
enum asrc_pair_index index = pair->index;
struct device *dev = component->dev;
struct device_node *of_dma_node;
@@ -221,6 +223,7 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
/* Get DMA request of Back-End */
tmp_data = tmp_chan->private;
pair->dma_data.dma_request = tmp_data->dma_request;
+ be_peripheral_type = tmp_data->peripheral_type;
if (!be_chan)
dma_release_channel(tmp_chan);
@@ -268,6 +271,17 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
config_be.dst_addr_width = buswidth;
config_be.dst_maxburst = dma_params_be->maxburst;
+ memset(&audio_config, 0, sizeof(audio_config));
+ config_be.peripheral_config = &audio_config;
+ config_be.peripheral_size = sizeof(audio_config);
+
+ if (tx && (be_peripheral_type == IMX_DMATYPE_SSI_DUAL ||
+ be_peripheral_type == IMX_DMATYPE_SPDIF))
+ audio_config.n_fifos_dst = 2;
+ if (!tx && (be_peripheral_type == IMX_DMATYPE_SSI_DUAL ||
+ be_peripheral_type == IMX_DMATYPE_SPDIF))
+ audio_config.n_fifos_src = 2;
+
if (tx) {
config_be.src_addr = asrc->paddr + asrc->get_fifo_addr(OUT, index);
config_be.dst_addr = dma_params_be->addr;
@@ -441,5 +455,6 @@ struct snd_soc_component_driver fsl_asrc_component = {
.close = fsl_asrc_dma_shutdown,
.pointer = fsl_asrc_dma_pcm_pointer,
.pcm_construct = fsl_asrc_dma_pcm_new,
+ .legacy_dai_naming = 1,
};
EXPORT_SYMBOL_GPL(fsl_asrc_component);
diff --git a/sound/soc/fsl/fsl_aud2htx.c b/sound/soc/fsl/fsl_aud2htx.c
index 422922146f2a..873295f59ad7 100644
--- a/sound/soc/fsl/fsl_aud2htx.c
+++ b/sound/soc/fsl/fsl_aud2htx.c
@@ -103,7 +103,8 @@ static struct snd_soc_dai_driver fsl_aud2htx_dai = {
};
static const struct snd_soc_component_driver fsl_aud2htx_component = {
- .name = "fsl-aud2htx",
+ .name = "fsl-aud2htx",
+ .legacy_dai_naming = 1,
};
static const struct reg_default fsl_aud2htx_reg_defaults[] = {
diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c
index 6dbb8c99f626..43857b7a81c9 100644
--- a/sound/soc/fsl/fsl_audmix.c
+++ b/sound/soc/fsl/fsl_audmix.c
@@ -259,8 +259,8 @@ static int fsl_audmix_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
/* For playback the AUDMIX is consumer, and for record is provider */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBP_CFP:
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BC_FC:
+ case SND_SOC_DAIFMT_BP_FP:
break;
default:
return -EINVAL;
@@ -317,7 +317,7 @@ static int fsl_audmix_dai_trigger(struct snd_pcm_substream *substream, int cmd,
}
static const struct snd_soc_dai_ops fsl_audmix_dai_ops = {
- .set_fmt = fsl_audmix_dai_set_fmt,
+ .set_fmt = fsl_audmix_dai_set_fmt,
.trigger = fsl_audmix_dai_trigger,
};
diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
index be14f84796cb..3153d19136b2 100644
--- a/sound/soc/fsl/fsl_easrc.c
+++ b/sound/soc/fsl/fsl_easrc.c
@@ -476,7 +476,8 @@ static int fsl_easrc_prefilter_config(struct fsl_asrc *easrc,
struct fsl_asrc_pair *ctx;
struct device *dev;
u32 inrate, outrate, offset = 0;
- u32 in_s_rate, out_s_rate, in_s_fmt, out_s_fmt;
+ u32 in_s_rate, out_s_rate;
+ snd_pcm_format_t in_s_fmt, out_s_fmt;
int ret, i;
if (!easrc)
@@ -1572,9 +1573,10 @@ static struct snd_soc_dai_driver fsl_easrc_dai = {
};
static const struct snd_soc_component_driver fsl_easrc_component = {
- .name = "fsl-easrc-dai",
- .controls = fsl_easrc_snd_controls,
- .num_controls = ARRAY_SIZE(fsl_easrc_snd_controls),
+ .name = "fsl-easrc-dai",
+ .controls = fsl_easrc_snd_controls,
+ .num_controls = ARRAY_SIZE(fsl_easrc_snd_controls),
+ .legacy_dai_naming = 1,
};
static const struct reg_default fsl_easrc_reg_defaults[] = {
@@ -1873,6 +1875,7 @@ static int fsl_easrc_probe(struct platform_device *pdev)
struct resource *res;
struct device_node *np;
void __iomem *regs;
+ u32 asrc_fmt = 0;
int ret, irq;
easrc = devm_kzalloc(dev, sizeof(*easrc), GFP_KERNEL);
@@ -1933,13 +1936,14 @@ static int fsl_easrc_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(np, "fsl,asrc-format", &easrc->asrc_format);
+ ret = of_property_read_u32(np, "fsl,asrc-format", &asrc_fmt);
+ easrc->asrc_format = (__force snd_pcm_format_t)asrc_fmt;
if (ret) {
dev_err(dev, "failed to asrc format\n");
return ret;
}
- if (!(FSL_EASRC_FORMATS & (1ULL << easrc->asrc_format))) {
+ if (!(FSL_EASRC_FORMATS & (pcm_format_to_bits(easrc->asrc_format)))) {
dev_warn(dev, "unsupported format, switching to S24_LE\n");
easrc->asrc_format = SNDRV_PCM_FORMAT_S24_LE;
}
diff --git a/sound/soc/fsl/fsl_easrc.h b/sound/soc/fsl/fsl_easrc.h
index 86d5c360d4f5..7c70dac52713 100644
--- a/sound/soc/fsl/fsl_easrc.h
+++ b/sound/soc/fsl/fsl_easrc.h
@@ -569,7 +569,7 @@ struct fsl_easrc_io_params {
unsigned int access_len;
unsigned int fifo_wtmk;
unsigned int sample_rate;
- unsigned int sample_format;
+ snd_pcm_format_t sample_format;
unsigned int norm_rate;
};
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 1a2bdf8e76f0..5c21fc490fce 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -480,16 +480,16 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
/* DAI clock provider masks */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
esai_priv->consumer_mode = true;
break;
- case SND_SOC_DAIFMT_CBC_CFP:
+ case SND_SOC_DAIFMT_BP_FC:
xccr |= ESAI_xCCR_xCKD;
break;
- case SND_SOC_DAIFMT_CBP_CFC:
+ case SND_SOC_DAIFMT_BC_FP:
xccr |= ESAI_xCCR_xFSD;
break;
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
xccr |= ESAI_xCCR_xFSD | ESAI_xCCR_xCKD;
break;
default:
@@ -824,7 +824,8 @@ static struct snd_soc_dai_driver fsl_esai_dai = {
};
static const struct snd_soc_component_driver fsl_esai_component = {
- .name = "fsl-esai",
+ .name = "fsl-esai",
+ .legacy_dai_naming = 1,
};
static const struct reg_default fsl_esai_reg_defaults[] = {
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
index 7b88d52f27de..79ef4e269bc9 100644
--- a/sound/soc/fsl/fsl_micfil.c
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -24,6 +24,7 @@
#include <sound/core.h>
#include "fsl_micfil.h"
+#include "fsl_utils.h"
#define MICFIL_OSR_DEFAULT 16
@@ -42,12 +43,15 @@ struct fsl_micfil {
const struct fsl_micfil_soc_data *soc;
struct clk *busclk;
struct clk *mclk;
+ struct clk *pll8k_clk;
+ struct clk *pll11k_clk;
struct snd_dmaengine_dai_dma_data dma_params_rx;
struct sdma_peripheral_config sdmacfg;
unsigned int dataline;
char name[32];
int irq[MICFIL_IRQ_LINES];
enum quality quality;
+ int dc_remover;
};
struct fsl_micfil_soc_data {
@@ -263,6 +267,27 @@ static int fsl_micfil_trigger(struct snd_pcm_substream *substream, int cmd,
return 0;
}
+static int fsl_micfil_reparent_rootclk(struct fsl_micfil *micfil, unsigned int sample_rate)
+{
+ struct device *dev = &micfil->pdev->dev;
+ u64 ratio = sample_rate;
+ struct clk *clk;
+ int ret;
+
+ /* Get root clock */
+ clk = micfil->mclk;
+
+ /* Disable clock first, for it was enabled by pm_runtime */
+ clk_disable_unprepare(clk);
+ fsl_asoc_reparent_pll_clocks(dev, clk, micfil->pll8k_clk,
+ micfil->pll11k_clk, ratio);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int fsl_micfil_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -286,6 +311,10 @@ static int fsl_micfil_hw_params(struct snd_pcm_substream *substream,
if (ret)
return ret;
+ ret = fsl_micfil_reparent_rootclk(micfil, rate);
+ if (ret)
+ return ret;
+
ret = clk_set_rate(micfil->mclk, rate * clk_div * osr * 8);
if (ret)
return ret;
@@ -317,12 +346,25 @@ static const struct snd_soc_dai_ops fsl_micfil_dai_ops = {
static int fsl_micfil_dai_probe(struct snd_soc_dai *cpu_dai)
{
struct fsl_micfil *micfil = dev_get_drvdata(cpu_dai->dev);
- int ret;
+ struct device *dev = cpu_dai->dev;
+ unsigned int val = 0;
+ int ret, i;
- micfil->quality = QUALITY_MEDIUM;
+ micfil->quality = QUALITY_VLOW0;
- /* set default gain to max_gain */
- regmap_write(micfil->regmap, REG_MICFIL_OUT_CTRL, 0x77777777);
+ /* set default gain to 2 */
+ regmap_write(micfil->regmap, REG_MICFIL_OUT_CTRL, 0x22222222);
+
+ /* set DC Remover in bypass mode*/
+ for (i = 0; i < MICFIL_OUTPUT_CHANNELS; i++)
+ val |= MICFIL_DC_BYPASS << MICFIL_DC_CHX_SHIFT(i);
+ ret = regmap_update_bits(micfil->regmap, REG_MICFIL_DC_CTRL,
+ MICFIL_DC_CTRL_CONFIG, val);
+ if (ret) {
+ dev_err(dev, "failed to set DC Remover mode bits\n");
+ return ret;
+ }
+ micfil->dc_remover = MICFIL_DC_BYPASS;
snd_soc_dai_init_dma_data(cpu_dai, NULL,
&micfil->dma_params_rx);
@@ -353,7 +395,7 @@ static const struct snd_soc_component_driver fsl_micfil_component = {
.name = "fsl-micfil-dai",
.controls = fsl_micfil_snd_controls,
.num_controls = ARRAY_SIZE(fsl_micfil_snd_controls),
-
+ .legacy_dai_naming = 1,
};
/* REGMAP */
@@ -577,6 +619,9 @@ static int fsl_micfil_probe(struct platform_device *pdev)
return PTR_ERR(micfil->busclk);
}
+ fsl_asoc_get_pll_clocks(&pdev->dev, &micfil->pll8k_clk,
+ &micfil->pll11k_clk);
+
/* init regmap */
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
diff --git a/sound/soc/fsl/fsl_micfil.h b/sound/soc/fsl/fsl_micfil.h
index 053caba3caf3..d60285dd07bc 100644
--- a/sound/soc/fsl/fsl_micfil.h
+++ b/sound/soc/fsl/fsl_micfil.h
@@ -73,6 +73,15 @@
#define MICFIL_FIFO_STAT_FIFOX_OVER(ch) BIT(ch)
#define MICFIL_FIFO_STAT_FIFOX_UNDER(ch) BIT((ch) + 8)
+/* MICFIL DC Remover Control Register -- REG_MICFIL_DC_CTRL */
+#define MICFIL_DC_CTRL_CONFIG GENMASK(15, 0)
+#define MICFIL_DC_CHX_SHIFT(ch) ((ch) << 1)
+#define MICFIL_DC_CHX(ch) GENMASK((((ch) << 1) + 1), ((ch) << 1))
+#define MICFIL_DC_CUTOFF_21HZ 0
+#define MICFIL_DC_CUTOFF_83HZ 1
+#define MICFIL_DC_CUTOFF_152Hz 2
+#define MICFIL_DC_BYPASS 3
+
/* MICFIL HWVAD0 Control 1 Register -- REG_MICFIL_VAD0_CTRL1*/
#define MICFIL_VAD0_CTRL1_CHSEL GENMASK(26, 24)
#define MICFIL_VAD0_CTRL1_CICOSR GENMASK(19, 16)
diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c
index ceaecbe3a25e..c1e2f671191b 100644
--- a/sound/soc/fsl/fsl_mqs.c
+++ b/sound/soc/fsl/fsl_mqs.c
@@ -10,6 +10,7 @@
#include <linux/moduleparam.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/pm.h>
@@ -29,15 +30,41 @@
#define MQS_CLK_DIV_MASK (0xFF << 0)
#define MQS_CLK_DIV_SHIFT (0)
+/**
+ * struct fsl_mqs_soc_data - soc specific data
+ *
+ * @use_gpr: control register is in General Purpose Register group
+ * @ctrl_off: control register offset
+ * @en_mask: enable bit mask
+ * @en_shift: enable bit shift
+ * @rst_mask: reset bit mask
+ * @rst_shift: reset bit shift
+ * @osr_mask: oversample bit mask
+ * @osr_shift: oversample bit shift
+ * @div_mask: clock divider mask
+ * @div_shift: clock divider bit shift
+ */
+struct fsl_mqs_soc_data {
+ bool use_gpr;
+ int ctrl_off;
+ int en_mask;
+ int en_shift;
+ int rst_mask;
+ int rst_shift;
+ int osr_mask;
+ int osr_shift;
+ int div_mask;
+ int div_shift;
+};
+
/* codec private data */
struct fsl_mqs {
struct regmap *regmap;
struct clk *mclk;
struct clk *ipg;
+ const struct fsl_mqs_soc_data *soc;
- unsigned int reg_iomuxc_gpr2;
unsigned int reg_mqs_ctrl;
- bool use_gpr;
};
#define FSL_MQS_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
@@ -65,19 +92,11 @@ static int fsl_mqs_hw_params(struct snd_pcm_substream *substream,
res = mclk_rate % (32 * lrclk * 2 * 8);
if (res == 0 && div > 0 && div <= 256) {
- if (mqs_priv->use_gpr) {
- regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
- IMX6SX_GPR2_MQS_CLK_DIV_MASK,
- (div - 1) << IMX6SX_GPR2_MQS_CLK_DIV_SHIFT);
- regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
- IMX6SX_GPR2_MQS_OVERSAMPLE_MASK, 0);
- } else {
- regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
- MQS_CLK_DIV_MASK,
- (div - 1) << MQS_CLK_DIV_SHIFT);
- regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
- MQS_OVERSAMPLE_MASK, 0);
- }
+ regmap_update_bits(mqs_priv->regmap, mqs_priv->soc->ctrl_off,
+ mqs_priv->soc->div_mask,
+ (div - 1) << mqs_priv->soc->div_shift);
+ regmap_update_bits(mqs_priv->regmap, mqs_priv->soc->ctrl_off,
+ mqs_priv->soc->osr_mask, 0);
} else {
dev_err(component->dev, "can't get proper divider\n");
}
@@ -103,7 +122,7 @@ static int fsl_mqs_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
}
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
break;
default:
return -EINVAL;
@@ -118,14 +137,9 @@ static int fsl_mqs_startup(struct snd_pcm_substream *substream,
struct snd_soc_component *component = dai->component;
struct fsl_mqs *mqs_priv = snd_soc_component_get_drvdata(component);
- if (mqs_priv->use_gpr)
- regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
- IMX6SX_GPR2_MQS_EN_MASK,
- 1 << IMX6SX_GPR2_MQS_EN_SHIFT);
- else
- regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
- MQS_EN_MASK,
- 1 << MQS_EN_SHIFT);
+ regmap_update_bits(mqs_priv->regmap, mqs_priv->soc->ctrl_off,
+ mqs_priv->soc->en_mask,
+ 1 << mqs_priv->soc->en_shift);
return 0;
}
@@ -135,17 +149,12 @@ static void fsl_mqs_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_component *component = dai->component;
struct fsl_mqs *mqs_priv = snd_soc_component_get_drvdata(component);
- if (mqs_priv->use_gpr)
- regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
- IMX6SX_GPR2_MQS_EN_MASK, 0);
- else
- regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
- MQS_EN_MASK, 0);
+ regmap_update_bits(mqs_priv->regmap, mqs_priv->soc->ctrl_off,
+ mqs_priv->soc->en_mask, 0);
}
static const struct snd_soc_component_driver soc_codec_fsl_mqs = {
.idle_bias_on = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_dai_ops fsl_mqs_dai_ops = {
@@ -191,12 +200,9 @@ static int fsl_mqs_probe(struct platform_device *pdev)
* But in i.MX8QM/i.MX8QXP the control register is moved
* to its own domain.
*/
- if (of_device_is_compatible(np, "fsl,imx8qm-mqs"))
- mqs_priv->use_gpr = false;
- else
- mqs_priv->use_gpr = true;
+ mqs_priv->soc = of_device_get_match_data(&pdev->dev);
- if (mqs_priv->use_gpr) {
+ if (mqs_priv->soc->use_gpr) {
gpr_np = of_parse_phandle(np, "gpr", 0);
if (!gpr_np) {
dev_err(&pdev->dev, "failed to get gpr node by phandle\n");
@@ -280,12 +286,7 @@ static int fsl_mqs_runtime_resume(struct device *dev)
return ret;
}
- if (mqs_priv->use_gpr)
- regmap_write(mqs_priv->regmap, IOMUXC_GPR2,
- mqs_priv->reg_iomuxc_gpr2);
- else
- regmap_write(mqs_priv->regmap, REG_MQS_CTRL,
- mqs_priv->reg_mqs_ctrl);
+ regmap_write(mqs_priv->regmap, mqs_priv->soc->ctrl_off, mqs_priv->reg_mqs_ctrl);
return 0;
}
@@ -293,12 +294,7 @@ static int fsl_mqs_runtime_suspend(struct device *dev)
{
struct fsl_mqs *mqs_priv = dev_get_drvdata(dev);
- if (mqs_priv->use_gpr)
- regmap_read(mqs_priv->regmap, IOMUXC_GPR2,
- &mqs_priv->reg_iomuxc_gpr2);
- else
- regmap_read(mqs_priv->regmap, REG_MQS_CTRL,
- &mqs_priv->reg_mqs_ctrl);
+ regmap_read(mqs_priv->regmap, mqs_priv->soc->ctrl_off, &mqs_priv->reg_mqs_ctrl);
clk_disable_unprepare(mqs_priv->mclk);
clk_disable_unprepare(mqs_priv->ipg);
@@ -315,9 +311,49 @@ static const struct dev_pm_ops fsl_mqs_pm_ops = {
pm_runtime_force_resume)
};
+static const struct fsl_mqs_soc_data fsl_mqs_imx8qm_data = {
+ .use_gpr = false,
+ .ctrl_off = REG_MQS_CTRL,
+ .en_mask = MQS_EN_MASK,
+ .en_shift = MQS_EN_SHIFT,
+ .rst_mask = MQS_SW_RST_MASK,
+ .rst_shift = MQS_SW_RST_SHIFT,
+ .osr_mask = MQS_OVERSAMPLE_MASK,
+ .osr_shift = MQS_OVERSAMPLE_SHIFT,
+ .div_mask = MQS_CLK_DIV_MASK,
+ .div_shift = MQS_CLK_DIV_SHIFT,
+};
+
+static const struct fsl_mqs_soc_data fsl_mqs_imx6sx_data = {
+ .use_gpr = true,
+ .ctrl_off = IOMUXC_GPR2,
+ .en_mask = IMX6SX_GPR2_MQS_EN_MASK,
+ .en_shift = IMX6SX_GPR2_MQS_EN_SHIFT,
+ .rst_mask = IMX6SX_GPR2_MQS_SW_RST_MASK,
+ .rst_shift = IMX6SX_GPR2_MQS_SW_RST_SHIFT,
+ .osr_mask = IMX6SX_GPR2_MQS_OVERSAMPLE_MASK,
+ .osr_shift = IMX6SX_GPR2_MQS_OVERSAMPLE_SHIFT,
+ .div_mask = IMX6SX_GPR2_MQS_CLK_DIV_MASK,
+ .div_shift = IMX6SX_GPR2_MQS_CLK_DIV_SHIFT,
+};
+
+static const struct fsl_mqs_soc_data fsl_mqs_imx93_data = {
+ .use_gpr = true,
+ .ctrl_off = 0x20,
+ .en_mask = BIT(1),
+ .en_shift = 1,
+ .rst_mask = BIT(2),
+ .rst_shift = 2,
+ .osr_mask = BIT(3),
+ .osr_shift = 3,
+ .div_mask = GENMASK(15, 8),
+ .div_shift = 8,
+};
+
static const struct of_device_id fsl_mqs_dt_ids[] = {
- { .compatible = "fsl,imx8qm-mqs", },
- { .compatible = "fsl,imx6sx-mqs", },
+ { .compatible = "fsl,imx8qm-mqs", .data = &fsl_mqs_imx8qm_data },
+ { .compatible = "fsl,imx6sx-mqs", .data = &fsl_mqs_imx6sx_data },
+ { .compatible = "fsl,imx93-mqs", .data = &fsl_mqs_imx93_data },
{}
};
MODULE_DEVICE_TABLE(of, fsl_mqs_dt_ids);
diff --git a/sound/soc/fsl/fsl_rpmsg.c b/sound/soc/fsl/fsl_rpmsg.c
index 19fd31250883..bf94838bdbef 100644
--- a/sound/soc/fsl/fsl_rpmsg.c
+++ b/sound/soc/fsl/fsl_rpmsg.c
@@ -135,7 +135,8 @@ static struct snd_soc_dai_driver fsl_rpmsg_dai = {
};
static const struct snd_soc_component_driver fsl_component = {
- .name = "fsl-rpmsg",
+ .name = "fsl-rpmsg",
+ .legacy_dai_naming = 1,
};
static const struct fsl_rpmsg_soc_data imx7ulp_data = {
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index e765da9a19e7..7523bb944b21 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -22,6 +23,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include "fsl_sai.h"
+#include "fsl_utils.h"
#include "imx-pcm.h"
#define FSL_SAI_FLAGS (FSL_SAI_CSR_SEIE |\
@@ -30,7 +32,8 @@
static const unsigned int fsl_sai_rates[] = {
8000, 11025, 12000, 16000, 22050,
24000, 32000, 44100, 48000, 64000,
- 88200, 96000, 176400, 192000
+ 88200, 96000, 176400, 192000, 352800,
+ 384000, 705600, 768000, 1411200, 2822400,
};
static const struct snd_pcm_hw_constraint_list fsl_sai_rate_constraints = {
@@ -56,6 +59,31 @@ static inline bool fsl_sai_dir_is_synced(struct fsl_sai *sai, int dir)
return !sai->synchronous[dir] && sai->synchronous[adir];
}
+static struct pinctrl_state *fsl_sai_get_pins_state(struct fsl_sai *sai, u32 bclk)
+{
+ struct pinctrl_state *state = NULL;
+
+ if (sai->is_pdm_mode) {
+ /* DSD512@44.1kHz, DSD512@48kHz */
+ if (bclk >= 22579200)
+ state = pinctrl_lookup_state(sai->pinctrl, "dsd512");
+
+ /* Get default DSD state */
+ if (IS_ERR_OR_NULL(state))
+ state = pinctrl_lookup_state(sai->pinctrl, "dsd");
+ } else {
+ /* 706k32b2c, 768k32b2c, etc */
+ if (bclk >= 45158400)
+ state = pinctrl_lookup_state(sai->pinctrl, "pcm_b2m");
+ }
+
+ /* Get default state */
+ if (IS_ERR_OR_NULL(state))
+ state = pinctrl_lookup_state(sai->pinctrl, "default");
+
+ return state;
+}
+
static irqreturn_t fsl_sai_isr(int irq, void *devid)
{
struct fsl_sai *sai = (struct fsl_sai *)devid;
@@ -193,14 +221,48 @@ static int fsl_sai_set_dai_sysclk_tr(struct snd_soc_dai *cpu_dai,
return 0;
}
+static int fsl_sai_set_mclk_rate(struct snd_soc_dai *dai, int clk_id, unsigned int freq)
+{
+ struct fsl_sai *sai = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ fsl_asoc_reparent_pll_clocks(dai->dev, sai->mclk_clk[clk_id],
+ sai->pll8k_clk, sai->pll11k_clk, freq);
+
+ ret = clk_set_rate(sai->mclk_clk[clk_id], freq);
+ if (ret < 0)
+ dev_err(dai->dev, "failed to set clock rate (%u): %d\n", freq, ret);
+
+ return ret;
+}
+
static int fsl_sai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq, int dir)
{
+ struct fsl_sai *sai = snd_soc_dai_get_drvdata(cpu_dai);
int ret;
if (dir == SND_SOC_CLOCK_IN)
return 0;
+ if (freq > 0 && clk_id != FSL_SAI_CLK_BUS) {
+ if (clk_id < 0 || clk_id >= FSL_SAI_MCLK_MAX) {
+ dev_err(cpu_dai->dev, "Unknown clock id: %d\n", clk_id);
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(sai->mclk_clk[clk_id])) {
+ dev_err(cpu_dai->dev, "Unassigned clock: %d\n", clk_id);
+ return -EINVAL;
+ }
+
+ if (sai->mclk_streams == 0) {
+ ret = fsl_sai_set_mclk_rate(cpu_dai, clk_id, freq);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
ret = fsl_sai_set_dai_sysclk_tr(cpu_dai, clk_id, freq, true);
if (ret) {
dev_err(cpu_dai->dev, "Cannot set tx sysclk: %d\n", ret);
@@ -224,6 +286,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
if (!sai->is_lsb_first)
val_cr4 |= FSL_SAI_CR4_MF;
+ sai->is_pdm_mode = false;
/* DAI mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
@@ -262,6 +325,11 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
val_cr2 |= FSL_SAI_CR2_BCP;
sai->is_dsp_mode = true;
break;
+ case SND_SOC_DAIFMT_PDM:
+ val_cr2 |= FSL_SAI_CR2_BCP;
+ val_cr4 &= ~FSL_SAI_CR4_MF;
+ sai->is_pdm_mode = true;
+ break;
case SND_SOC_DAIFMT_RIGHT_J:
/* To be done */
default:
@@ -292,19 +360,19 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
/* DAI clock provider masks */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
sai->is_consumer_mode = false;
break;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
sai->is_consumer_mode = true;
break;
- case SND_SOC_DAIFMT_CBC_CFP:
+ case SND_SOC_DAIFMT_BP_FC:
val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
sai->is_consumer_mode = false;
break;
- case SND_SOC_DAIFMT_CBP_CFC:
+ case SND_SOC_DAIFMT_BC_FP:
val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
sai->is_consumer_mode = true;
break;
@@ -437,6 +505,12 @@ static int fsl_sai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
FSL_SAI_CR2_DIV_MASK | FSL_SAI_CR2_BYP,
savediv / 2 - 1);
+ if (sai->soc_data->max_register >= FSL_SAI_MCTL) {
+ /* SAI is in master mode at this point, so enable MCLK */
+ regmap_update_bits(sai->regmap, FSL_SAI_MCTL,
+ FSL_SAI_MCTL_MCLK_EN, FSL_SAI_MCTL_MCLK_EN);
+ }
+
return 0;
}
@@ -448,13 +522,18 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
unsigned int ofs = sai->soc_data->reg_offset;
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
unsigned int channels = params_channels(params);
+ struct snd_dmaengine_dai_dma_data *dma_params;
+ struct fsl_sai_dl_cfg *dl_cfg = sai->dl_cfg;
u32 word_width = params_width(params);
+ int trce_mask = 0, dl_cfg_idx = 0;
+ int dl_cfg_cnt = sai->dl_cfg_cnt;
+ u32 dl_type = FSL_SAI_DL_I2S;
u32 val_cr4 = 0, val_cr5 = 0;
u32 slots = (channels == 1) ? 2 : channels;
u32 slot_width = word_width;
int adir = tx ? RX : TX;
- u32 pins;
- int ret;
+ u32 pins, bclk;
+ int ret, i;
if (sai->slots)
slots = sai->slots;
@@ -464,15 +543,42 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
pins = DIV_ROUND_UP(channels, slots);
+ /*
+ * PDM mode, channels are independent
+ * each channels are on one dataline/FIFO.
+ */
+ if (sai->is_pdm_mode) {
+ pins = channels;
+ dl_type = FSL_SAI_DL_PDM;
+ }
+
+ for (i = 0; i < dl_cfg_cnt; i++) {
+ if (dl_cfg[i].type == dl_type && dl_cfg[i].pins[tx] == pins) {
+ dl_cfg_idx = i;
+ break;
+ }
+ }
+
+ if (hweight8(dl_cfg[dl_cfg_idx].mask[tx]) < pins) {
+ dev_err(cpu_dai->dev, "channel not supported\n");
+ return -EINVAL;
+ }
+
+ bclk = params_rate(params) * (sai->bclk_ratio ? sai->bclk_ratio : slots * slot_width);
+
+ if (!IS_ERR_OR_NULL(sai->pinctrl)) {
+ sai->pins_state = fsl_sai_get_pins_state(sai, bclk);
+ if (!IS_ERR_OR_NULL(sai->pins_state)) {
+ ret = pinctrl_select_state(sai->pinctrl, sai->pins_state);
+ if (ret) {
+ dev_err(cpu_dai->dev, "failed to set proper pins state: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
if (!sai->is_consumer_mode) {
- if (sai->bclk_ratio)
- ret = fsl_sai_set_bclk(cpu_dai, tx,
- sai->bclk_ratio *
- params_rate(params));
- else
- ret = fsl_sai_set_bclk(cpu_dai, tx,
- slots * slot_width *
- params_rate(params));
+ ret = fsl_sai_set_bclk(cpu_dai, tx, bclk);
if (ret)
return ret;
@@ -486,13 +592,13 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
}
}
- if (!sai->is_dsp_mode)
+ if (!sai->is_dsp_mode && !sai->is_pdm_mode)
val_cr4 |= FSL_SAI_CR4_SYWD(slot_width);
val_cr5 |= FSL_SAI_CR5_WNW(slot_width);
val_cr5 |= FSL_SAI_CR5_W0W(slot_width);
- if (sai->is_lsb_first)
+ if (sai->is_lsb_first || sai->is_pdm_mode)
val_cr5 |= FSL_SAI_CR5_FBT(0);
else
val_cr5 |= FSL_SAI_CR5_FBT(word_width - 1);
@@ -519,13 +625,28 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
FSL_SAI_CR5_FBT_MASK, val_cr5);
}
- if (sai->soc_data->pins > 1)
+ if (hweight8(dl_cfg[dl_cfg_idx].mask[tx]) <= 1)
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
+ FSL_SAI_CR4_FCOMB_MASK, 0);
+ else
regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
FSL_SAI_CR4_FCOMB_MASK, FSL_SAI_CR4_FCOMB_SOFT);
+ dma_params = tx ? &sai->dma_params_tx : &sai->dma_params_rx;
+ dma_params->addr = sai->res->start + FSL_SAI_xDR0(tx) +
+ dl_cfg[dl_cfg_idx].start_off[tx] * 0x4;
+
+ /* Find a proper tcre setting */
+ for (i = 0; i < sai->soc_data->pins; i++) {
+ trce_mask = (1 << (i + 1)) - 1;
+ if (hweight8(dl_cfg[dl_cfg_idx].mask[tx] & trce_mask) == pins)
+ break;
+ }
+
regmap_update_bits(sai->regmap, FSL_SAI_xCR3(tx, ofs),
FSL_SAI_CR3_TRCE_MASK,
- FSL_SAI_CR3_TRCE((1 << pins) - 1));
+ FSL_SAI_CR3_TRCE((dl_cfg[dl_cfg_idx].mask[tx] & trce_mask)));
+
regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
FSL_SAI_CR4_CHMOD_MASK,
@@ -737,6 +858,23 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
return 0;
}
+static int fsl_sai_dai_resume(struct snd_soc_component *component)
+{
+ struct fsl_sai *sai = snd_soc_component_get_drvdata(component);
+ struct device *dev = &sai->pdev->dev;
+ int ret;
+
+ if (!IS_ERR_OR_NULL(sai->pinctrl) && !IS_ERR_OR_NULL(sai->pins_state)) {
+ ret = pinctrl_select_state(sai->pinctrl, sai->pins_state);
+ if (ret) {
+ dev_err(dev, "failed to set proper pins state: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static struct snd_soc_dai_driver fsl_sai_dai_template = {
.probe = fsl_sai_dai_probe,
.playback = {
@@ -744,7 +882,7 @@ static struct snd_soc_dai_driver fsl_sai_dai_template = {
.channels_min = 1,
.channels_max = 32,
.rate_min = 8000,
- .rate_max = 192000,
+ .rate_max = 2822400,
.rates = SNDRV_PCM_RATE_KNOT,
.formats = FSL_SAI_FORMATS,
},
@@ -753,7 +891,7 @@ static struct snd_soc_dai_driver fsl_sai_dai_template = {
.channels_min = 1,
.channels_max = 32,
.rate_min = 8000,
- .rate_max = 192000,
+ .rate_max = 2822400,
.rates = SNDRV_PCM_RATE_KNOT,
.formats = FSL_SAI_FORMATS,
},
@@ -761,7 +899,9 @@ static struct snd_soc_dai_driver fsl_sai_dai_template = {
};
static const struct snd_soc_component_driver fsl_component = {
- .name = "fsl-sai",
+ .name = "fsl-sai",
+ .resume = fsl_sai_dai_resume,
+ .legacy_dai_naming = 1,
};
static struct reg_default fsl_sai_reg_defaults_ofs0[] = {
@@ -998,30 +1138,142 @@ static int fsl_sai_check_version(struct device *dev)
return 0;
}
+/*
+ * Calculate the offset between first two datalines, don't
+ * different offset in one case.
+ */
+static unsigned int fsl_sai_calc_dl_off(unsigned long dl_mask)
+{
+ int fbidx, nbidx, offset;
+
+ fbidx = find_first_bit(&dl_mask, FSL_SAI_DL_NUM);
+ nbidx = find_next_bit(&dl_mask, FSL_SAI_DL_NUM, fbidx + 1);
+ offset = nbidx - fbidx - 1;
+
+ return (offset < 0 || offset >= (FSL_SAI_DL_NUM - 1) ? 0 : offset);
+}
+
+/*
+ * read the fsl,dataline property from dts file.
+ * It has 3 value for each configuration, first one means the type:
+ * I2S(1) or PDM(2), second one is dataline mask for 'rx', third one is
+ * dataline mask for 'tx'. for example
+ *
+ * fsl,dataline = <1 0xff 0xff 2 0xff 0x11>,
+ *
+ * It means I2S type rx mask is 0xff, tx mask is 0xff, PDM type
+ * rx mask is 0xff, tx mask is 0x11 (dataline 1 and 4 enabled).
+ *
+ */
+static int fsl_sai_read_dlcfg(struct fsl_sai *sai)
+{
+ struct platform_device *pdev = sai->pdev;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret, elems, i, index, num_cfg;
+ char *propname = "fsl,dataline";
+ struct fsl_sai_dl_cfg *cfg;
+ unsigned long dl_mask;
+ unsigned int soc_dl;
+ u32 rx, tx, type;
+
+ elems = of_property_count_u32_elems(np, propname);
+
+ if (elems <= 0) {
+ elems = 0;
+ } else if (elems % 3) {
+ dev_err(dev, "Number of elements must be divisible to 3.\n");
+ return -EINVAL;
+ }
+
+ num_cfg = elems / 3;
+ /* Add one more for default value */
+ cfg = devm_kzalloc(&pdev->dev, (num_cfg + 1) * sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ /* Consider default value "0 0xFF 0xFF" if property is missing */
+ soc_dl = BIT(sai->soc_data->pins) - 1;
+ cfg[0].type = FSL_SAI_DL_DEFAULT;
+ cfg[0].pins[0] = sai->soc_data->pins;
+ cfg[0].mask[0] = soc_dl;
+ cfg[0].start_off[0] = 0;
+ cfg[0].next_off[0] = 0;
+
+ cfg[0].pins[1] = sai->soc_data->pins;
+ cfg[0].mask[1] = soc_dl;
+ cfg[0].start_off[1] = 0;
+ cfg[0].next_off[1] = 0;
+ for (i = 1, index = 0; i < num_cfg + 1; i++) {
+ /*
+ * type of dataline
+ * 0 means default mode
+ * 1 means I2S mode
+ * 2 means PDM mode
+ */
+ ret = of_property_read_u32_index(np, propname, index++, &type);
+ if (ret)
+ return -EINVAL;
+
+ ret = of_property_read_u32_index(np, propname, index++, &rx);
+ if (ret)
+ return -EINVAL;
+
+ ret = of_property_read_u32_index(np, propname, index++, &tx);
+ if (ret)
+ return -EINVAL;
+
+ if ((rx & ~soc_dl) || (tx & ~soc_dl)) {
+ dev_err(dev, "dataline cfg[%d] setting error, mask is 0x%x\n", i, soc_dl);
+ return -EINVAL;
+ }
+
+ rx = rx & soc_dl;
+ tx = tx & soc_dl;
+
+ cfg[i].type = type;
+ cfg[i].pins[0] = hweight8(rx);
+ cfg[i].mask[0] = rx;
+ dl_mask = rx;
+ cfg[i].start_off[0] = find_first_bit(&dl_mask, FSL_SAI_DL_NUM);
+ cfg[i].next_off[0] = fsl_sai_calc_dl_off(rx);
+
+ cfg[i].pins[1] = hweight8(tx);
+ cfg[i].mask[1] = tx;
+ dl_mask = tx;
+ cfg[i].start_off[1] = find_first_bit(&dl_mask, FSL_SAI_DL_NUM);
+ cfg[i].next_off[1] = fsl_sai_calc_dl_off(tx);
+ }
+
+ sai->dl_cfg = cfg;
+ sai->dl_cfg_cnt = num_cfg + 1;
+ return 0;
+}
+
static int fsl_sai_runtime_suspend(struct device *dev);
static int fsl_sai_runtime_resume(struct device *dev);
static int fsl_sai_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct fsl_sai *sai;
struct regmap *gpr;
- struct resource *res;
void __iomem *base;
char tmp[8];
int irq, ret, i;
int index;
- sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
+ sai = devm_kzalloc(dev, sizeof(*sai), GFP_KERNEL);
if (!sai)
return -ENOMEM;
sai->pdev = pdev;
- sai->soc_data = of_device_get_match_data(&pdev->dev);
+ sai->soc_data = of_device_get_match_data(dev);
sai->is_lsb_first = of_property_read_bool(np, "lsb-first");
- base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &sai->res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1032,18 +1284,18 @@ static int fsl_sai_probe(struct platform_device *pdev)
ARRAY_SIZE(fsl_sai_reg_defaults_ofs8);
}
- sai->regmap = devm_regmap_init_mmio(&pdev->dev, base, &fsl_sai_regmap_config);
+ sai->regmap = devm_regmap_init_mmio(dev, base, &fsl_sai_regmap_config);
if (IS_ERR(sai->regmap)) {
- dev_err(&pdev->dev, "regmap init failed\n");
+ dev_err(dev, "regmap init failed\n");
return PTR_ERR(sai->regmap);
}
- sai->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ sai->bus_clk = devm_clk_get(dev, "bus");
/* Compatible with old DTB cases */
if (IS_ERR(sai->bus_clk) && PTR_ERR(sai->bus_clk) != -EPROBE_DEFER)
- sai->bus_clk = devm_clk_get(&pdev->dev, "sai");
+ sai->bus_clk = devm_clk_get(dev, "sai");
if (IS_ERR(sai->bus_clk)) {
- dev_err(&pdev->dev, "failed to get bus clock: %ld\n",
+ dev_err(dev, "failed to get bus clock: %ld\n",
PTR_ERR(sai->bus_clk));
/* -EPROBE_DEFER */
return PTR_ERR(sai->bus_clk);
@@ -1051,9 +1303,9 @@ static int fsl_sai_probe(struct platform_device *pdev)
for (i = 1; i < FSL_SAI_MCLK_MAX; i++) {
sprintf(tmp, "mclk%d", i);
- sai->mclk_clk[i] = devm_clk_get(&pdev->dev, tmp);
+ sai->mclk_clk[i] = devm_clk_get(dev, tmp);
if (IS_ERR(sai->mclk_clk[i])) {
- dev_err(&pdev->dev, "failed to get mclk%d clock: %ld\n",
+ dev_err(dev, "failed to get mclk%d clock: %ld\n",
i + 1, PTR_ERR(sai->mclk_clk[i]));
sai->mclk_clk[i] = NULL;
}
@@ -1064,14 +1316,24 @@ static int fsl_sai_probe(struct platform_device *pdev)
else
sai->mclk_clk[0] = sai->bus_clk;
+ fsl_asoc_get_pll_clocks(&pdev->dev, &sai->pll8k_clk,
+ &sai->pll11k_clk);
+
+ /* read dataline mask for rx and tx*/
+ ret = fsl_sai_read_dlcfg(sai);
+ if (ret < 0) {
+ dev_err(dev, "failed to read dlcfg %d\n", ret);
+ return ret;
+ }
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- ret = devm_request_irq(&pdev->dev, irq, fsl_sai_isr, IRQF_SHARED,
+ ret = devm_request_irq(dev, irq, fsl_sai_isr, IRQF_SHARED,
np->name, sai);
if (ret) {
- dev_err(&pdev->dev, "failed to claim irq %u\n", irq);
+ dev_err(dev, "failed to claim irq %u\n", irq);
return ret;
}
@@ -1088,7 +1350,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) &&
of_find_property(np, "fsl,sai-asynchronous", NULL)) {
/* error out if both synchronous and asynchronous are present */
- dev_err(&pdev->dev, "invalid binding for synchronous mode\n");
+ dev_err(dev, "invalid binding for synchronous mode\n");
return -EINVAL;
}
@@ -1109,7 +1371,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
of_device_is_compatible(np, "fsl,imx6ul-sai")) {
gpr = syscon_regmap_lookup_by_compatible("fsl,imx6ul-iomuxc-gpr");
if (IS_ERR(gpr)) {
- dev_err(&pdev->dev, "cannot find iomuxc registers\n");
+ dev_err(dev, "cannot find iomuxc registers\n");
return PTR_ERR(gpr);
}
@@ -1121,29 +1383,29 @@ static int fsl_sai_probe(struct platform_device *pdev)
MCLK_DIR(index));
}
- sai->dma_params_rx.addr = res->start + FSL_SAI_RDR0;
- sai->dma_params_tx.addr = res->start + FSL_SAI_TDR0;
+ sai->dma_params_rx.addr = sai->res->start + FSL_SAI_RDR0;
+ sai->dma_params_tx.addr = sai->res->start + FSL_SAI_TDR0;
sai->dma_params_rx.maxburst = FSL_SAI_MAXBURST_RX;
sai->dma_params_tx.maxburst = FSL_SAI_MAXBURST_TX;
+ sai->pinctrl = devm_pinctrl_get(&pdev->dev);
+
platform_set_drvdata(pdev, sai);
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev)) {
- ret = fsl_sai_runtime_resume(&pdev->dev);
+ pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = fsl_sai_runtime_resume(dev);
if (ret)
goto err_pm_disable;
}
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto err_pm_get_sync;
- }
/* Get sai version */
- ret = fsl_sai_check_version(&pdev->dev);
+ ret = fsl_sai_check_version(dev);
if (ret < 0)
- dev_warn(&pdev->dev, "Error reading SAI version: %d\n", ret);
+ dev_warn(dev, "Error reading SAI version: %d\n", ret);
/* Select MCLK direction */
if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
@@ -1152,7 +1414,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
FSL_SAI_MCTL_MCLK_EN, FSL_SAI_MCTL_MCLK_EN);
}
- ret = pm_runtime_put_sync(&pdev->dev);
+ ret = pm_runtime_put_sync(dev);
if (ret < 0)
goto err_pm_get_sync;
@@ -1162,15 +1424,18 @@ static int fsl_sai_probe(struct platform_device *pdev)
*/
if (sai->soc_data->use_imx_pcm) {
ret = imx_pcm_dma_init(pdev);
- if (ret)
+ if (ret) {
+ if (!IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA))
+ dev_err(dev, "Error: You must enable the imx-pcm-dma support!\n");
goto err_pm_get_sync;
+ }
} else {
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
if (ret)
goto err_pm_get_sync;
}
- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+ ret = devm_snd_soc_register_component(dev, &fsl_component,
&sai->cpu_dai_drv, 1);
if (ret)
goto err_pm_get_sync;
@@ -1178,10 +1443,10 @@ static int fsl_sai_probe(struct platform_device *pdev)
return ret;
err_pm_get_sync:
- if (!pm_runtime_status_suspended(&pdev->dev))
- fsl_sai_runtime_suspend(&pdev->dev);
+ if (!pm_runtime_status_suspended(dev))
+ fsl_sai_runtime_suspend(dev);
err_pm_disable:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
return ret;
}
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index 1c8f5ca07f9d..17956b5731dc 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -11,7 +11,10 @@
#define FSL_SAI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE |\
- SNDRV_PCM_FMTBIT_S32_LE)
+ SNDRV_PCM_FMTBIT_S32_LE |\
+ SNDRV_PCM_FMTBIT_DSD_U8 |\
+ SNDRV_PCM_FMTBIT_DSD_U16_LE |\
+ SNDRV_PCM_FMTBIT_DSD_U32_LE)
/* SAI Register Map Register */
#define FSL_SAI_VERID 0x00 /* SAI Version ID Register */
@@ -215,6 +218,13 @@
#define PMQOS_CPU_LATENCY BIT(0)
+/* Max number of dataline */
+#define FSL_SAI_DL_NUM (8)
+/* default dataline type is zero */
+#define FSL_SAI_DL_DEFAULT (0)
+#define FSL_SAI_DL_I2S BIT(0)
+#define FSL_SAI_DL_PDM BIT(1)
+
struct fsl_sai_soc_data {
bool use_imx_pcm;
bool use_edma;
@@ -250,16 +260,30 @@ struct fsl_sai_param {
u32 dataline;
};
+struct fsl_sai_dl_cfg {
+ unsigned int type;
+ unsigned int pins[2];
+ unsigned int mask[2];
+ unsigned int start_off[2];
+ unsigned int next_off[2];
+};
+
struct fsl_sai {
struct platform_device *pdev;
struct regmap *regmap;
struct clk *bus_clk;
struct clk *mclk_clk[FSL_SAI_MCLK_MAX];
+ struct clk *pll8k_clk;
+ struct clk *pll11k_clk;
+ struct resource *res;
bool is_consumer_mode;
bool is_lsb_first;
bool is_dsp_mode;
+ bool is_pdm_mode;
bool synchronous[2];
+ struct fsl_sai_dl_cfg *dl_cfg;
+ unsigned int dl_cfg_cnt;
unsigned int mclk_id[2];
unsigned int mclk_streams;
@@ -274,6 +298,8 @@ struct fsl_sai {
struct fsl_sai_verid verid;
struct fsl_sai_param param;
struct pm_qos_request pm_qos_req;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_state;
};
#define TX 1
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 42d11aca38a1..7fc1c96929bb 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -23,6 +23,7 @@
#include <sound/soc.h>
#include "fsl_spdif.h"
+#include "fsl_utils.h"
#include "imx-pcm.h"
#define FSL_SPDIF_TXFIFO_WML 0x8
@@ -114,6 +115,8 @@ struct spdif_mixer_control {
* @dma_params_rx: DMA parameters for receive channel
* @regcache_srpc: regcache for SRPC
* @bypass: status of bypass input to output
+ * @pll8k_clk: PLL clock for the rate of multiply of 8kHz
+ * @pll11k_clk: PLL clock for the rate of multiply of 11kHz
*/
struct fsl_spdif_priv {
const struct fsl_spdif_soc_data *soc;
@@ -137,6 +140,8 @@ struct fsl_spdif_priv {
/* regcache for SRPC */
u32 regcache_srpc;
bool bypass;
+ struct clk *pll8k_clk;
+ struct clk *pll11k_clk;
};
static struct fsl_spdif_soc_data fsl_spdif_vf610 = {
@@ -480,6 +485,8 @@ static int spdif_set_rx_clksrc(struct fsl_spdif_priv *spdif_priv,
return 0;
}
+static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv, enum spdif_txrate index);
+
static int spdif_set_sample_rate(struct snd_pcm_substream *substream,
int sample_rate)
{
@@ -528,6 +535,10 @@ static int spdif_set_sample_rate(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ ret = fsl_spdif_probe_txclk(spdif_priv, rate);
+ if (ret)
+ return ret;
+
clk = spdif_priv->txclk_src[rate];
if (clk >= STC_TXCLK_SRC_MAX) {
dev_err(&pdev->dev, "tx clock source is out of range\n");
@@ -647,6 +658,29 @@ static void fsl_spdif_shutdown(struct snd_pcm_substream *substream,
}
}
+static int spdif_reparent_rootclk(struct fsl_spdif_priv *spdif_priv, unsigned int sample_rate)
+{
+ struct platform_device *pdev = spdif_priv->pdev;
+ struct clk *clk;
+ int ret;
+
+ /* Reparent clock if required condition is true */
+ if (!fsl_spdif_can_set_clk_rate(spdif_priv, STC_TXCLK_SPDIF_ROOT))
+ return 0;
+
+ /* Get root clock */
+ clk = spdif_priv->txclk[STC_TXCLK_SPDIF_ROOT];
+
+ /* Disable clock first, for it was enabled by pm_runtime */
+ clk_disable_unprepare(clk);
+ fsl_asoc_reparent_pll_clocks(&pdev->dev, clk, spdif_priv->pll8k_clk,
+ spdif_priv->pll11k_clk, sample_rate);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return 0;
+}
static int fsl_spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -659,6 +693,13 @@ static int fsl_spdif_hw_params(struct snd_pcm_substream *substream,
int ret = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = spdif_reparent_rootclk(spdif_priv, sample_rate);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: reparent root clk failed: %d\n",
+ __func__, sample_rate);
+ return ret;
+ }
+
ret = spdif_set_sample_rate(substream, sample_rate);
if (ret) {
dev_err(&pdev->dev, "%s: set sample rate failed: %d\n",
@@ -1237,7 +1278,8 @@ static struct snd_soc_dai_driver fsl_spdif_dai = {
};
static const struct snd_soc_component_driver fsl_spdif_component = {
- .name = "fsl-spdif",
+ .name = "fsl-spdif",
+ .legacy_dai_naming = 1,
};
/* FSL SPDIF REGMAP */
@@ -1547,11 +1589,8 @@ static int fsl_spdif_probe(struct platform_device *pdev)
}
spdif_priv->rxclk_src = DEFAULT_RXCLK_SRC;
- for (i = 0; i < SPDIF_TXRATE_MAX; i++) {
- ret = fsl_spdif_probe_txclk(spdif_priv, i);
- if (ret)
- return ret;
- }
+ fsl_asoc_get_pll_clocks(&pdev->dev, &spdif_priv->pll8k_clk,
+ &spdif_priv->pll11k_clk);
/* Initial spinlock for control data */
ctrl = &spdif_priv->fsl_spdif_control;
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 84cb36d9dfea..c9e0e31d5b34 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -93,7 +93,7 @@
*/
#define FSLSSI_AC97_DAIFMT \
(SND_SOC_DAIFMT_AC97 | \
- SND_SOC_DAIFMT_CBM_CFS | \
+ SND_SOC_DAIFMT_BC_FP | \
SND_SOC_DAIFMT_NB_NF)
#define FSLSSI_SIER_DBG_RX_FLAGS \
@@ -358,13 +358,13 @@ static bool fsl_ssi_is_ac97(struct fsl_ssi *ssi)
static bool fsl_ssi_is_i2s_clock_provider(struct fsl_ssi *ssi)
{
return (ssi->dai_fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) ==
- SND_SOC_DAIFMT_CBC_CFC;
+ SND_SOC_DAIFMT_BP_FP;
}
-static bool fsl_ssi_is_i2s_cbp_cfc(struct fsl_ssi *ssi)
+static bool fsl_ssi_is_i2s_bc_fp(struct fsl_ssi *ssi)
{
return (ssi->dai_fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) ==
- SND_SOC_DAIFMT_CBP_CFC;
+ SND_SOC_DAIFMT_BC_FP;
}
/**
@@ -847,7 +847,7 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
u8 i2s_net = ssi->i2s_net;
/* Normal + Network mode to send 16-bit data in 32-bit frames */
- if (fsl_ssi_is_i2s_cbp_cfc(ssi) && sample_size == 16)
+ if (fsl_ssi_is_i2s_bc_fp(ssi) && sample_size == 16)
i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET;
/* Use Normal mode to send mono data at 1st slot of 2 slots */
@@ -920,17 +920,17 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
if (IS_ERR(ssi->baudclk)) {
dev_err(ssi->dev,
"missing baudclk for master mode\n");
return -EINVAL;
}
fallthrough;
- case SND_SOC_DAIFMT_CBP_CFC:
+ case SND_SOC_DAIFMT_BC_FP:
ssi->i2s_net |= SSI_SCR_I2S_MODE_MASTER;
break;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
ssi->i2s_net |= SSI_SCR_I2S_MODE_SLAVE;
break;
default:
@@ -992,15 +992,15 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt)
/* DAI clock provider masks */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
/* Output bit and frame sync clocks */
strcr |= SSI_STCR_TFDIR | SSI_STCR_TXDIR;
scr |= SSI_SCR_SYS_CLK_EN;
break;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
/* Input bit or frame sync clocks */
break;
- case SND_SOC_DAIFMT_CBP_CFC:
+ case SND_SOC_DAIFMT_BC_FP:
/* Input bit clock but output frame sync clock */
strcr |= SSI_STCR_TFDIR;
break;
@@ -1182,6 +1182,7 @@ static struct snd_soc_dai_driver fsl_ssi_dai_template = {
static const struct snd_soc_component_driver fsl_ssi_component = {
.name = "fsl-ssi",
+ .legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
index 9bab202569af..d0fc430f7033 100644
--- a/sound/soc/fsl/fsl_utils.c
+++ b/sound/soc/fsl/fsl_utils.c
@@ -6,6 +6,8 @@
//
// Copyright 2010 Freescale Semiconductor, Inc.
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <sound/soc.h>
@@ -83,6 +85,73 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np,
}
EXPORT_SYMBOL(fsl_asoc_get_dma_channel);
+/**
+ * fsl_asoc_get_pll_clocks - get two PLL clock source
+ *
+ * @dev: device pointer
+ * @pll8k_clk: PLL clock pointer for 8kHz
+ * @pll11k_clk: PLL clock pointer for 11kHz
+ *
+ * This function get two PLL clock source
+ */
+void fsl_asoc_get_pll_clocks(struct device *dev, struct clk **pll8k_clk,
+ struct clk **pll11k_clk)
+{
+ *pll8k_clk = devm_clk_get(dev, "pll8k");
+ if (IS_ERR(*pll8k_clk))
+ *pll8k_clk = NULL;
+
+ *pll11k_clk = devm_clk_get(dev, "pll11k");
+ if (IS_ERR(*pll11k_clk))
+ *pll11k_clk = NULL;
+}
+EXPORT_SYMBOL(fsl_asoc_get_pll_clocks);
+
+/**
+ * fsl_asoc_reparent_pll_clocks - set clock parent if necessary
+ *
+ * @dev: device pointer
+ * @clk: root clock pointer
+ * @pll8k_clk: PLL clock pointer for 8kHz
+ * @pll11k_clk: PLL clock pointer for 11kHz
+ * @ratio: target requency for root clock
+ *
+ * This function set root clock parent according to the target ratio
+ */
+void fsl_asoc_reparent_pll_clocks(struct device *dev, struct clk *clk,
+ struct clk *pll8k_clk,
+ struct clk *pll11k_clk, u64 ratio)
+{
+ struct clk *p, *pll = NULL, *npll = NULL;
+ bool reparent = false;
+ int ret = 0;
+
+ if (!clk || !pll8k_clk || !pll11k_clk)
+ return;
+
+ p = clk;
+ while (p && pll8k_clk && pll11k_clk) {
+ struct clk *pp = clk_get_parent(p);
+
+ if (clk_is_match(pp, pll8k_clk) ||
+ clk_is_match(pp, pll11k_clk)) {
+ pll = pp;
+ break;
+ }
+ p = pp;
+ }
+
+ npll = (do_div(ratio, 8000) ? pll11k_clk : pll8k_clk);
+ reparent = (pll && !clk_is_match(pll, npll));
+
+ if (reparent) {
+ ret = clk_set_parent(p, npll);
+ if (ret < 0)
+ dev_warn(dev, "failed to set parent:%d\n", ret);
+ }
+}
+EXPORT_SYMBOL(fsl_asoc_reparent_pll_clocks);
+
MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
MODULE_DESCRIPTION("Freescale ASoC utility code");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/fsl/fsl_utils.h b/sound/soc/fsl/fsl_utils.h
index c5dc2a14b492..4d5f3d93bc81 100644
--- a/sound/soc/fsl/fsl_utils.h
+++ b/sound/soc/fsl/fsl_utils.h
@@ -19,4 +19,11 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np, const char *name,
struct snd_soc_dai_link *dai,
unsigned int *dma_channel_id,
unsigned int *dma_id);
+
+void fsl_asoc_get_pll_clocks(struct device *dev, struct clk **pll8k_clk,
+ struct clk **pll11k_clk);
+
+void fsl_asoc_reparent_pll_clocks(struct device *dev, struct clk *clk,
+ struct clk *pll8k_clk,
+ struct clk *pll11k_clk, u64 ratio);
#endif /* _FSL_UTILS_H */
diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
index d0556c79fdb1..c043efe4548d 100644
--- a/sound/soc/fsl/fsl_xcvr.c
+++ b/sound/soc/fsl/fsl_xcvr.c
@@ -911,7 +911,8 @@ static struct snd_soc_dai_driver fsl_xcvr_dai = {
};
static const struct snd_soc_component_driver fsl_xcvr_comp = {
- .name = "fsl-xcvr-dai",
+ .name = "fsl-xcvr-dai",
+ .legacy_dai_naming = 1,
};
static const struct reg_default fsl_xcvr_reg_defaults[] = {
@@ -1228,6 +1229,7 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
*/
ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
if (ret) {
+ pm_runtime_disable(dev);
dev_err(dev, "failed to pcm register\n");
return ret;
}
@@ -1235,6 +1237,7 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_component(dev, &fsl_xcvr_comp,
&fsl_xcvr_dai, 1);
if (ret) {
+ pm_runtime_disable(dev);
dev_err(dev, "failed to register component %s\n",
fsl_xcvr_comp.name);
}
@@ -1242,6 +1245,12 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
return ret;
}
+static int fsl_xcvr_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
static __maybe_unused int fsl_xcvr_runtime_suspend(struct device *dev)
{
struct fsl_xcvr *xcvr = dev_get_drvdata(dev);
@@ -1370,6 +1379,7 @@ static struct platform_driver fsl_xcvr_driver = {
.pm = &fsl_xcvr_pm_ops,
.of_match_table = fsl_xcvr_dt_ids,
},
+ .remove = fsl_xcvr_remove,
};
module_platform_driver(fsl_xcvr_driver);
diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
index 502fe1b522ab..1292a845c424 100644
--- a/sound/soc/fsl/imx-audmix.c
+++ b/sound/soc/fsl/imx-audmix.c
@@ -81,7 +81,7 @@ static int imx_audmix_fe_hw_params(struct snd_pcm_substream *substream,
int ret, dir;
/* For playback the AUDMIX is consumer, and for record is provider */
- fmt |= tx ? SND_SOC_DAIFMT_CBC_CFC : SND_SOC_DAIFMT_CBP_CFP;
+ fmt |= tx ? SND_SOC_DAIFMT_BP_FP : SND_SOC_DAIFMT_BC_FC;
dir = tx ? SND_SOC_CLOCK_OUT : SND_SOC_CLOCK_IN;
/* set DAI configuration */
@@ -122,7 +122,7 @@ static int imx_audmix_be_hw_params(struct snd_pcm_substream *substream,
return 0;
/* For playback the AUDMIX is consumer */
- fmt |= SND_SOC_DAIFMT_CBP_CFP;
+ fmt |= SND_SOC_DAIFMT_BC_FC;
/* set AUDMIX DAI configuration */
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0), fmt);
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index dfa05d40b276..50b71e5d4589 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -62,17 +62,14 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
uintptr_t port = (uintptr_t)file->private_data;
u32 pdcr, ptcr;
- if (audmux_clk) {
- ret = clk_prepare_enable(audmux_clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(audmux_clk);
+ if (ret)
+ return ret;
ptcr = readl(audmux_base + IMX_AUDMUX_V2_PTCR(port));
pdcr = readl(audmux_base + IMX_AUDMUX_V2_PDCR(port));
- if (audmux_clk)
- clk_disable_unprepare(audmux_clk);
+ clk_disable_unprepare(audmux_clk);
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
@@ -209,17 +206,14 @@ int imx_audmux_v2_configure_port(unsigned int port, unsigned int ptcr,
if (!audmux_base)
return -ENOSYS;
- if (audmux_clk) {
- ret = clk_prepare_enable(audmux_clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(audmux_clk);
+ if (ret)
+ return ret;
writel(ptcr, audmux_base + IMX_AUDMUX_V2_PTCR(port));
writel(pdcr, audmux_base + IMX_AUDMUX_V2_PDCR(port));
- if (audmux_clk)
- clk_disable_unprepare(audmux_clk);
+ clk_disable_unprepare(audmux_clk);
return 0;
}
@@ -298,7 +292,7 @@ static int imx_audmux_probe(struct platform_device *pdev)
audmux_clk = NULL;
}
- audmux_type = (enum imx_audmux_type)of_device_get_match_data(&pdev->dev);
+ audmux_type = (uintptr_t)of_device_get_match_data(&pdev->dev);
switch (audmux_type) {
case IMX31_AUDMUX:
diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
index 6f8efd838fcc..14be29530fb5 100644
--- a/sound/soc/fsl/imx-card.c
+++ b/sound/soc/fsl/imx-card.c
@@ -17,6 +17,9 @@
#include "fsl_sai.h"
+#define IMX_CARD_MCLK_22P5792MHZ 22579200
+#define IMX_CARD_MCLK_24P576MHZ 24576000
+
enum codec_type {
CODEC_DUMMY = 0,
CODEC_AK5558 = 1,
@@ -115,7 +118,7 @@ struct imx_card_data {
struct snd_soc_card card;
int num_dapm_routes;
u32 asrc_rate;
- u32 asrc_format;
+ snd_pcm_format_t asrc_format;
};
static struct imx_akcodec_fs_mul ak4458_fs_mul[] = {
@@ -317,7 +320,7 @@ static int imx_aif_hw_params(struct snd_pcm_substream *substream,
}
}
- ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+ ret = snd_soc_dai_set_fmt(cpu_dai, snd_soc_daifmt_clock_provider_flipped(fmt));
if (ret && ret != -ENOTSUPP) {
dev_err(dev, "failed to set cpu dai fmt: %d\n", ret);
return ret;
@@ -353,9 +356,14 @@ static int imx_aif_hw_params(struct snd_pcm_substream *substream,
mclk_freq = akcodec_get_mclk_rate(substream, params, slots, slot_width);
else
mclk_freq = params_rate(params) * slots * slot_width;
- /* Use the maximum freq from DSD512 (512*44100 = 22579200) */
- if (format_is_dsd(params))
- mclk_freq = 22579200;
+
+ if (format_is_dsd(params)) {
+ /* Use the maximum freq from DSD512 (512*44100 = 22579200) */
+ if (!(params_rate(params) % 11025))
+ mclk_freq = IMX_CARD_MCLK_22P5792MHZ;
+ else
+ mclk_freq = IMX_CARD_MCLK_24P576MHZ;
+ }
ret = snd_soc_dai_set_sysclk(cpu_dai, link_data->cpu_sysclk_id, mclk_freq,
SND_SOC_CLOCK_OUT);
@@ -466,7 +474,7 @@ static int be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
snd_mask_none(mask);
- snd_mask_set(mask, data->asrc_format);
+ snd_mask_set(mask, (__force unsigned int)data->asrc_format);
return 0;
}
@@ -485,6 +493,7 @@ static int imx_card_parse_of(struct imx_card_data *data)
struct dai_link_data *link_data;
struct of_phandle_args args;
int ret, num_links;
+ u32 asrc_fmt = 0;
u32 width;
ret = snd_soc_of_parse_card_name(card, "model");
@@ -631,7 +640,8 @@ static int imx_card_parse_of(struct imx_card_data *data)
goto err;
}
- ret = of_property_read_u32(args.np, "fsl,asrc-format", &data->asrc_format);
+ ret = of_property_read_u32(args.np, "fsl,asrc-format", &asrc_fmt);
+ data->asrc_format = (__force snd_pcm_format_t)asrc_fmt;
if (ret) {
/* Fallback to old binding; translate to asrc_format */
ret = of_property_read_u32(args.np, "fsl,asrc-width", &width);
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.c b/sound/soc/fsl/mpc5200_psc_i2s.c
index 3149d59ae968..73f3e61f208a 100644
--- a/sound/soc/fsl/mpc5200_psc_i2s.c
+++ b/sound/soc/fsl/mpc5200_psc_i2s.c
@@ -148,7 +148,8 @@ static struct snd_soc_dai_driver psc_i2s_dai[] = {{
} };
static const struct snd_soc_component_driver psc_i2s_component = {
- .name = "mpc5200-i2s",
+ .name = "mpc5200-i2s",
+ .legacy_dai_naming = 1,
};
/* ---------------------------------------------------------------------
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index 83b4a22bf15a..997c3e66c636 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -101,8 +101,7 @@ static int pcm030_fabric_probe(struct platform_device *op)
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret);
- platform_device_del(pdata->codec_device);
- platform_device_put(pdata->codec_device);
+ platform_device_unregister(pdata->codec_device);
}
platform_set_drvdata(op, pdata);
@@ -113,12 +112,11 @@ static int pcm030_fabric_probe(struct platform_device *op)
static int pcm030_fabric_remove(struct platform_device *op)
{
struct pcm030_audio_data *pdata = platform_get_drvdata(op);
- int ret;
- ret = snd_soc_unregister_card(pdata->card);
+ snd_soc_unregister_card(pdata->card);
platform_device_unregister(pdata->codec_device);
- return ret;
+ return 0;
}
static const struct of_device_id pcm030_audio_match[] = {
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 2b598af8feef..b327372f2e4a 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -158,8 +158,10 @@ static int asoc_simple_parse_dai(struct device_node *ep,
* if he unbinded CPU or Codec.
*/
ret = snd_soc_get_dai_name(&args, &dlc->dai_name);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
return ret;
+ }
dlc->of_node = node;
diff --git a/sound/soc/generic/audio-graph-card2-custom-sample.dtsi b/sound/soc/generic/audio-graph-card2-custom-sample.dtsi
index 8eee7b821ff7..fe547c18771f 100644
--- a/sound/soc/generic/audio-graph-card2-custom-sample.dtsi
+++ b/sound/soc/generic/audio-graph-card2-custom-sample.dtsi
@@ -17,6 +17,23 @@
* CONFIG_SND_AUDIO_GRAPH_CARD2
* CONFIG_SND_AUDIO_GRAPH_CARD2_CUSTOM_SAMPLE
* CONFIG_SND_TEST_COMPONENT
+ *
+ *
+ * You can indicate more detail each device behavior as debug if you modify
+ * "compatible" on each test-component. see below
+ *
+ * test_cpu {
+ * - compatible = "test-cpu";
+ * + compatible = "test-cpu-verbose";
+ * ...
+ * };
+ *
+ * test_codec {
+ * - compatible = "test-codec";
+ * + compatible = "test-codec-verbose";
+ * ...
+ * };
+ *
*/
/ {
/*
@@ -101,35 +118,74 @@
"TC OUT", "TC DAI11 Playback",
"TC DAI9 Capture", "TC IN";
- links = <&cpu0 /* normal: cpu side only */
- &mcpu0 /* multi: cpu side only */
- &fe00 &fe01 &be0 /* dpcm: both FE / BE */
- &fe10 &fe11 &be1 /* dpcm-m: both FE / BE */
- &c2c /* c2c: cpu side only */
- &c2c_m /* c2c: cpu side only */
+ links = <
+ /*
+ * [Normal]: cpu side only
+ * cpu0/codec0
+ */
+ &cpu0
+
+ /*
+ * [Multi-CPU/Codec]: cpu side only
+ * cpu1/cpu2/codec1/codec2
+ */
+ &mcpu0
+
+ /*
+ * [DPCM]: both FE / BE
+ * cpu3/cpu4/codec3
+ */
+ &fe00 &fe01 &be0
+
+ /*
+ * [DPCM-Multi]: both FE / BE
+ * cpu5/cpu6/codec4/codec5
+ */
+ &fe10 &fe11 &be1
+
+ /*
+ * [Codec2Codec]: cpu side only
+ * codec6/codec7
+ */
+ &c2c
+
+ /*
+ * [Codec2Codec-Multi]: cpu side only
+ * codec8/codec9/codec10/codec11
+ */
+ &c2c_m
>;
multi {
ports@0 {
+ /* [Multi-CPU] */
mcpu0: port@0 { mcpu0_ep: endpoint { remote-endpoint = <&mcodec0_ep>; }; };
port@1 { mcpu1_ep: endpoint { remote-endpoint = <&cpu1_ep>; }; };
port@2 { mcpu2_ep: endpoint { remote-endpoint = <&cpu2_ep>; }; };
};
+
+ /* [Multi-Codec] */
ports@1 {
port@0 { mcodec0_ep: endpoint { remote-endpoint = <&mcpu0_ep>; }; };
port@1 { mcodec1_ep: endpoint { remote-endpoint = <&codec1_ep>; }; };
port@2 { mcodec2_ep: endpoint { remote-endpoint = <&codec2_ep>; }; };
};
+
+ /* [DPCM-Multi]::BE */
ports@2 {
port@0 { mbe_ep: endpoint { remote-endpoint = <&be10_ep>; }; };
port@1 { mbe1_ep: endpoint { remote-endpoint = <&codec4_ep>; }; };
port@2 { mbe2_ep: endpoint { remote-endpoint = <&codec5_ep>; }; };
};
+
+ /* [Codec2Codec-Multi]::CPU */
ports@3 {
port@0 { mc2c0_ep: endpoint { remote-endpoint = <&c2cmf_ep>; }; };
port@1 { mc2c00_ep: endpoint { remote-endpoint = <&codec8_ep>; }; };
port@2 { mc2c01_ep: endpoint { remote-endpoint = <&codec9_ep>; }; };
};
+
+ /* [Codec2Codec-Multi]::Codec */
ports@4 {
port@0 { mc2c1_ep: endpoint { remote-endpoint = <&c2cmb_ep>; }; };
port@1 { mc2c10_ep: endpoint { remote-endpoint = <&codec10_ep>; }; };
@@ -138,27 +194,36 @@
};
dpcm {
- /* FE */
ports@0 {
+ /* [DPCM]::FE */
fe00: port@0 { fe00_ep: endpoint { remote-endpoint = <&cpu3_ep>; }; };
fe01: port@1 { fe01_ep: endpoint { remote-endpoint = <&cpu4_ep>; }; };
+
+ /* [DPCM-Multi]::FE */
fe10: port@2 { fe10_ep: endpoint { remote-endpoint = <&cpu5_ep>; }; };
fe11: port@3 { fe11_ep: endpoint { remote-endpoint = <&cpu6_ep>; }; };
};
- /* BE */
+
ports@1 {
+ /* [DPCM]::BE */
be0: port@0 { be00_ep: endpoint { remote-endpoint = <&codec3_ep>; }; };
+
+ /* [DPCM-Multi]::BE */
be1: port@1 { be10_ep: endpoint { remote-endpoint = <&mbe_ep>; }; };
};
};
codec2codec {
+ /* [Codec2Codec] */
ports@0 {
- rate = <48000>;
+ /* use default settings */
c2c: port@0 { c2cf_ep: endpoint { remote-endpoint = <&codec6_ep>; }; };
port@1 { c2cb_ep: endpoint { remote-endpoint = <&codec7_ep>; }; };
};
+
+ /* [Codec2Codec-Multi] */
ports@1 {
+ /* use original settings */
rate = <48000>;
c2c_m: port@0 { c2cmf_ep: endpoint { remote-endpoint = <&mc2c0_ep>; }; };
port@1 { c2cmb_ep: endpoint { remote-endpoint = <&mc2c1_ep>; }; };
@@ -179,11 +244,18 @@
ports {
bitclock-master;
frame-master;
+ /* [Normal] */
cpu0: port@0 { cpu0_ep: endpoint { remote-endpoint = <&codec0_ep>; }; };
+
+ /* [Multi-CPU] */
port@1 { cpu1_ep: endpoint { remote-endpoint = <&mcpu1_ep>; }; };
port@2 { cpu2_ep: endpoint { remote-endpoint = <&mcpu2_ep>; }; };
+
+ /* [DPCM]::FE */
port@3 { cpu3_ep: endpoint { remote-endpoint = <&fe00_ep>; }; };
port@4 { cpu4_ep: endpoint { remote-endpoint = <&fe01_ep>; }; };
+
+ /* [DPCM-Multi]::FE */
port@5 { cpu5_ep: endpoint { remote-endpoint = <&fe10_ep>; }; };
port@6 { cpu6_ep: endpoint { remote-endpoint = <&fe11_ep>; }; };
};
@@ -206,16 +278,27 @@
*/
prefix = "TC";
+ /* [Normal] */
port@0 { codec0_ep: endpoint { remote-endpoint = <&cpu0_ep>; }; };
+
+ /* [Multi-Codec] */
port@1 { codec1_ep: endpoint { remote-endpoint = <&mcodec1_ep>; }; };
port@2 { codec2_ep: endpoint { remote-endpoint = <&mcodec2_ep>; }; };
+
+ /* [DPCM]::BE */
port@3 { codec3_ep: endpoint { remote-endpoint = <&be00_ep>; }; };
+
+ /* [DPCM-Multi]::BE */
port@4 { codec4_ep: endpoint { remote-endpoint = <&mbe1_ep>; }; };
port@5 { codec5_ep: endpoint { remote-endpoint = <&mbe2_ep>; }; };
+
+ /* [Codec2Codec] */
port@6 { bitclock-master;
frame-master;
codec6_ep: endpoint { remote-endpoint = <&c2cf_ep>; }; };
port@7 { codec7_ep: endpoint { remote-endpoint = <&c2cb_ep>; }; };
+
+ /* [Codec2Codec-Multi] */
port@8 { bitclock-master;
frame-master;
codec8_ep: endpoint { remote-endpoint = <&mc2c00_ep>; }; };
diff --git a/sound/soc/generic/audio-graph-card2.c b/sound/soc/generic/audio-graph-card2.c
index d34b29a49268..8ac6df645ee6 100644
--- a/sound/soc/generic/audio-graph-card2.c
+++ b/sound/soc/generic/audio-graph-card2.c
@@ -229,7 +229,8 @@ enum graph_type {
static enum graph_type __graph_get_type(struct device_node *lnk)
{
- struct device_node *np;
+ struct device_node *np, *parent_np;
+ enum graph_type ret;
/*
* target {
@@ -240,19 +241,33 @@ static enum graph_type __graph_get_type(struct device_node *lnk)
* };
*/
np = of_get_parent(lnk);
- if (of_node_name_eq(np, "ports"))
- np = of_get_parent(np);
+ if (of_node_name_eq(np, "ports")) {
+ parent_np = of_get_parent(np);
+ of_node_put(np);
+ np = parent_np;
+ }
- if (of_node_name_eq(np, GRAPH_NODENAME_MULTI))
- return GRAPH_MULTI;
+ if (of_node_name_eq(np, GRAPH_NODENAME_MULTI)) {
+ ret = GRAPH_MULTI;
+ goto out_put;
+ }
- if (of_node_name_eq(np, GRAPH_NODENAME_DPCM))
- return GRAPH_DPCM;
+ if (of_node_name_eq(np, GRAPH_NODENAME_DPCM)) {
+ ret = GRAPH_DPCM;
+ goto out_put;
+ }
+
+ if (of_node_name_eq(np, GRAPH_NODENAME_C2C)) {
+ ret = GRAPH_C2C;
+ goto out_put;
+ }
- if (of_node_name_eq(np, GRAPH_NODENAME_C2C))
- return GRAPH_C2C;
+ ret = GRAPH_NORMAL;
+
+out_put:
+ of_node_put(np);
+ return ret;
- return GRAPH_NORMAL;
}
static enum graph_type graph_get_type(struct asoc_simple_priv *priv,
@@ -430,8 +445,10 @@ static int asoc_simple_parse_dai(struct device_node *ep,
* if he unbinded CPU or Codec.
*/
ret = snd_soc_get_dai_name(&args, &dlc->dai_name);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
return ret;
+ }
dlc->of_node = node;
@@ -851,12 +868,10 @@ int audio_graph2_link_c2c(struct asoc_simple_priv *priv,
struct link_info *li)
{
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
- struct snd_soc_pcm_stream *c2c_conf = dai_props->c2c_conf;
struct device_node *port0, *port1, *ports;
struct device_node *codec0_port, *codec1_port;
struct device_node *ep0, *ep1;
- u32 val;
+ u32 val = 0;
int ret = -EINVAL;
/*
@@ -880,19 +895,33 @@ int audio_graph2_link_c2c(struct asoc_simple_priv *priv,
ports = of_get_parent(port0);
port1 = of_get_next_child(ports, lnk);
- if (!of_get_property(ports, "rate", &val)) {
+ /*
+ * Card2 can use original Codec2Codec settings if DT has.
+ * It will use default settings if no settings on DT.
+ * see
+ * asoc_simple_init_for_codec2codec()
+ *
+ * Add more settings here if needed
+ */
+ of_property_read_u32(ports, "rate", &val);
+ if (val) {
struct device *dev = simple_priv_to_dev(priv);
+ struct snd_soc_pcm_stream *c2c_conf;
- dev_err(dev, "Codec2Codec needs rate settings\n");
- goto err1;
- }
+ c2c_conf = devm_kzalloc(dev, sizeof(*c2c_conf), GFP_KERNEL);
+ if (!c2c_conf)
+ goto err1;
- c2c_conf->formats = SNDRV_PCM_FMTBIT_S32_LE; /* update ME */
- c2c_conf->rate_min =
- c2c_conf->rate_max = val;
- c2c_conf->channels_min =
- c2c_conf->channels_max = 2; /* update ME */
- dai_link->params = c2c_conf;
+ c2c_conf->formats = SNDRV_PCM_FMTBIT_S32_LE; /* update ME */
+ c2c_conf->rates = SNDRV_PCM_RATE_8000_384000;
+ c2c_conf->rate_min =
+ c2c_conf->rate_max = val;
+ c2c_conf->channels_min =
+ c2c_conf->channels_max = 2; /* update ME */
+
+ dai_link->params = c2c_conf;
+ dai_link->num_params = 1;
+ }
ep0 = port_to_endpoint(port0);
ep1 = port_to_endpoint(port1);
@@ -1086,7 +1115,6 @@ static int graph_count_c2c(struct asoc_simple_priv *priv,
li->num[li->link].cpus =
li->num[li->link].platforms = graph_counter(codec0);
li->num[li->link].codecs = graph_counter(codec1);
- li->num[li->link].c2c = 1;
of_node_put(ports);
of_node_put(port1);
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 539d7f081bd7..4a29e314fa95 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -513,7 +513,12 @@ static int asoc_simple_init_dai(struct snd_soc_dai *dai,
return 0;
}
-static int asoc_simple_init_dai_link_params(struct snd_soc_pcm_runtime *rtd,
+static inline int asoc_simple_component_is_codec(struct snd_soc_component *component)
+{
+ return component->driver->endianness;
+}
+
+static int asoc_simple_init_for_codec2codec(struct snd_soc_pcm_runtime *rtd,
struct simple_dai_props *dai_props)
{
struct snd_soc_dai_link *dai_link = rtd->dai_link;
@@ -522,9 +527,17 @@ static int asoc_simple_init_dai_link_params(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hardware hw;
int i, ret, stream;
+ /* Do nothing if it already has Codec2Codec settings */
+ if (dai_link->params)
+ return 0;
+
+ /* Do nothing if it was DPCM :: BE */
+ if (dai_link->no_pcm)
+ return 0;
+
/* Only Codecs */
for_each_rtd_components(rtd, i, component) {
- if (!snd_soc_component_is_codec(component))
+ if (!asoc_simple_component_is_codec(component))
return 0;
}
@@ -575,7 +588,7 @@ int asoc_simple_dai_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
- ret = asoc_simple_init_dai_link_params(rtd, props);
+ ret = asoc_simple_init_for_codec2codec(rtd, props);
if (ret < 0)
return ret;
@@ -609,7 +622,7 @@ void asoc_simple_canonicalize_cpu(struct snd_soc_dai_link_component *cpus,
}
EXPORT_SYMBOL_GPL(asoc_simple_canonicalize_cpu);
-int asoc_simple_clean_reference(struct snd_soc_card *card)
+void asoc_simple_clean_reference(struct snd_soc_card *card)
{
struct snd_soc_dai_link *dai_link;
struct snd_soc_dai_link_component *cpu;
@@ -622,7 +635,6 @@ int asoc_simple_clean_reference(struct snd_soc_card *card)
for_each_link_codecs(dai_link, j, codec)
of_node_put(codec->of_node);
}
- return 0;
}
EXPORT_SYMBOL_GPL(asoc_simple_clean_reference);
@@ -742,8 +754,7 @@ int asoc_simple_init_priv(struct asoc_simple_priv *priv,
struct asoc_simple_dai *dais;
struct snd_soc_dai_link_component *dlcs;
struct snd_soc_codec_conf *cconf = NULL;
- struct snd_soc_pcm_stream *c2c_conf = NULL;
- int i, dai_num = 0, dlc_num = 0, cnf_num = 0, c2c_num = 0;
+ int i, dai_num = 0, dlc_num = 0, cnf_num = 0;
dai_props = devm_kcalloc(dev, li->link, sizeof(*dai_props), GFP_KERNEL);
dai_link = devm_kcalloc(dev, li->link, sizeof(*dai_link), GFP_KERNEL);
@@ -762,8 +773,6 @@ int asoc_simple_init_priv(struct asoc_simple_priv *priv,
if (!li->num[i].cpus)
cnf_num += li->num[i].codecs;
-
- c2c_num += li->num[i].c2c;
}
dais = devm_kcalloc(dev, dai_num, sizeof(*dais), GFP_KERNEL);
@@ -777,12 +786,6 @@ int asoc_simple_init_priv(struct asoc_simple_priv *priv,
return -ENOMEM;
}
- if (c2c_num) {
- c2c_conf = devm_kcalloc(dev, c2c_num, sizeof(*c2c_conf), GFP_KERNEL);
- if (!c2c_conf)
- return -ENOMEM;
- }
-
dev_dbg(dev, "link %d, dais %d, ccnf %d\n",
li->link, dai_num, cnf_num);
@@ -796,7 +799,6 @@ int asoc_simple_init_priv(struct asoc_simple_priv *priv,
priv->dais = dais;
priv->dlcs = dlcs;
priv->codec_conf = cconf;
- priv->c2c_conf = c2c_conf;
card->dai_link = priv->dai_link;
card->num_links = li->link;
@@ -814,12 +816,6 @@ int asoc_simple_init_priv(struct asoc_simple_priv *priv,
dlcs += li->num[i].cpus;
dais += li->num[i].cpus;
-
- if (li->num[i].c2c) {
- /* Codec2Codec */
- dai_props[i].c2c_conf = c2c_conf;
- c2c_conf += li->num[i].c2c;
- }
} else {
/* DPCM Be's CPU = dummy */
dai_props[i].cpus =
@@ -877,7 +873,9 @@ int asoc_simple_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- return asoc_simple_clean_reference(card);
+ asoc_simple_clean_reference(card);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(asoc_simple_remove);
diff --git a/sound/soc/generic/test-component.c b/sound/soc/generic/test-component.c
index 5da4725d9e16..98c8990596a8 100644
--- a/sound/soc/generic/test-component.c
+++ b/sound/soc/generic/test-component.c
@@ -66,7 +66,7 @@ static int test_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
unsigned int format = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
unsigned int clock = fmt & SND_SOC_DAIFMT_CLOCK_MASK;
unsigned int inv = fmt & SND_SOC_DAIFMT_INV_MASK;
- unsigned int master = fmt & SND_SOC_DAIFMT_MASTER_MASK;
+ unsigned int master = fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK;
char *str;
dev_info(dai->dev, "name : %s", dai->name);
@@ -105,16 +105,16 @@ static int test_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
str = "unknown";
switch (master) {
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BP_FP:
str = "clk provider, frame provider";
break;
- case SND_SOC_DAIFMT_CBC_CFP:
+ case SND_SOC_DAIFMT_BC_FP:
str = "clk consumer, frame provider";
break;
- case SND_SOC_DAIFMT_CBP_CFC:
+ case SND_SOC_DAIFMT_BP_FC:
str = "clk provider, frame consumer";
break;
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BC_FC:
str = "clk consumer, frame consumer";
break;
}
@@ -192,10 +192,10 @@ static int test_dai_bespoke_trigger(struct snd_pcm_substream *substream,
static u64 test_dai_formats =
/*
* Select below from Sound Card, not auto
- * SND_SOC_POSSIBLE_DAIFMT_CBP_CFP
- * SND_SOC_POSSIBLE_DAIFMT_CBC_CFP
- * SND_SOC_POSSIBLE_DAIFMT_CBP_CFC
- * SND_SOC_POSSIBLE_DAIFMT_CBC_CFC
+ * SND_SOC_POSSIBLE_DAIFMT_BP_FP
+ * SND_SOC_POSSIBLE_DAIFMT_BC_FP
+ * SND_SOC_POSSIBLE_DAIFMT_BP_FC
+ * SND_SOC_POSSIBLE_DAIFMT_BC_FC
*/
SND_SOC_POSSIBLE_DAIFMT_I2S |
SND_SOC_POSSIBLE_DAIFMT_RIGHT_J |
@@ -564,11 +564,11 @@ static int test_driver_probe(struct platform_device *pdev)
cdriv->pcm_construct = test_component_pcm_construct;
cdriv->pointer = test_component_pointer;
cdriv->trigger = test_component_trigger;
+ cdriv->legacy_dai_naming = 1;
} else {
cdriv->name = "test_codec";
cdriv->idle_bias_on = 1;
cdriv->endianness = 1;
- cdriv->non_legacy_dai_naming = 1;
}
cdriv->open = test_component_open;
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
index a297d4af5099..27219a9e7d0d 100644
--- a/sound/soc/hisilicon/hi6210-i2s.c
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -227,9 +227,9 @@ static int hi6210_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
* We don't actually set the hardware until the hw_params
* call, but we need to validate the user input here.
*/
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
+ case SND_SOC_DAIFMT_BP_FP:
break;
default:
return -EINVAL;
@@ -245,8 +245,8 @@ static int hi6210_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
}
i2s->format = fmt;
- i2s->master = (i2s->format & SND_SOC_DAIFMT_MASTER_MASK) ==
- SND_SOC_DAIFMT_CBS_CFS;
+ i2s->master = (i2s->format & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) ==
+ SND_SOC_DAIFMT_BP_FP;
return 0;
}
@@ -375,21 +375,21 @@ static int hi6210_i2s_hw_params(struct snd_pcm_substream *substream,
hi6210_write_reg(i2s, HII2S_MUX_TOP_MODULE_CFG, val);
- switch (i2s->format & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (i2s->format & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
i2s->master = false;
val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
val |= HII2S_I2S_CFG__S2_MST_SLV;
hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
i2s->master = true;
val = hi6210_read_reg(i2s, HII2S_I2S_CFG);
val &= ~HII2S_I2S_CFG__S2_MST_SLV;
hi6210_write_reg(i2s, HII2S_I2S_CFG, val);
break;
default:
- WARN_ONCE(1, "Invalid i2s->fmt MASTER_MASK. This shouldn't happen\n");
+ WARN_ONCE(1, "Invalid i2s->fmt CLOCK_PROVIDER_MASK. This shouldn't happen\n");
return -EINVAL;
}
@@ -539,6 +539,7 @@ static const struct snd_soc_dai_driver hi6210_i2s_dai_init = {
static const struct snd_soc_component_driver hi6210_i2s_i2s_comp = {
.name = "hi6210_i2s-i2s",
+ .legacy_dai_naming = 1,
};
static int hi6210_i2s_probe(struct platform_device *pdev)
diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c
index 09d23b11621c..56bb7bbd3976 100644
--- a/sound/soc/img/img-i2s-in.c
+++ b/sound/soc/img/img-i2s-in.c
@@ -333,8 +333,8 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
break;
default:
return -EINVAL;
@@ -386,7 +386,8 @@ static int img_i2s_in_dai_probe(struct snd_soc_dai *dai)
}
static const struct snd_soc_component_driver img_i2s_in_component = {
- .name = "img-i2s-in"
+ .name = "img-i2s-in",
+ .legacy_dai_naming = 1,
};
static int img_i2s_in_dma_prepare_slave_config(struct snd_pcm_substream *st,
diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c
index 28f48ca1508a..abeff7829310 100644
--- a/sound/soc/img/img-i2s-out.c
+++ b/sound/soc/img/img-i2s-out.c
@@ -302,10 +302,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
if (force_clk_active)
control_set |= IMG_I2S_OUT_CTL_CLK_EN_MASK;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
control_set |= IMG_I2S_OUT_CTL_MASTER_MASK;
break;
default:
@@ -346,11 +346,9 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK;
- ret = pm_runtime_get_sync(i2s->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(i2s->dev);
+ ret = pm_runtime_resume_and_get(i2s->dev);
+ if (ret < 0)
return ret;
- }
img_i2s_out_disable(i2s);
@@ -394,7 +392,8 @@ static int img_i2s_out_dai_probe(struct snd_soc_dai *dai)
}
static const struct snd_soc_component_driver img_i2s_out_component = {
- .name = "img-i2s-out"
+ .name = "img-i2s-out",
+ .legacy_dai_naming = 1,
};
static int img_i2s_out_dma_prepare_slave_config(struct snd_pcm_substream *st,
@@ -482,11 +481,9 @@ static int img_i2s_out_probe(struct platform_device *pdev)
if (ret)
goto err_pm_disable;
}
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
goto err_suspend;
- }
reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK;
img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL);
diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c
index cd6a6a825741..08506b05e226 100644
--- a/sound/soc/img/img-parallel-out.c
+++ b/sound/soc/img/img-parallel-out.c
@@ -201,7 +201,8 @@ static struct snd_soc_dai_driver img_prl_out_dai = {
};
static const struct snd_soc_component_driver img_prl_out_component = {
- .name = "img-prl-out"
+ .name = "img-prl-out",
+ .legacy_dai_naming = 1,
};
static int img_prl_out_probe(struct platform_device *pdev)
diff --git a/sound/soc/img/img-spdif-in.c b/sound/soc/img/img-spdif-in.c
index a79d1ccaeec0..3f1d1a7e8735 100644
--- a/sound/soc/img/img-spdif-in.c
+++ b/sound/soc/img/img-spdif-in.c
@@ -711,7 +711,8 @@ static struct snd_soc_dai_driver img_spdif_in_dai = {
};
static const struct snd_soc_component_driver img_spdif_in_component = {
- .name = "img-spdif-in"
+ .name = "img-spdif-in",
+ .legacy_dai_naming = 1,
};
static int img_spdif_in_probe(struct platform_device *pdev)
diff --git a/sound/soc/img/img-spdif-out.c b/sound/soc/img/img-spdif-out.c
index f7062eba2611..983761d3fa7e 100644
--- a/sound/soc/img/img-spdif-out.c
+++ b/sound/soc/img/img-spdif-out.c
@@ -316,7 +316,8 @@ static struct snd_soc_dai_driver img_spdif_out_dai = {
};
static const struct snd_soc_component_driver img_spdif_out_component = {
- .name = "img-spdif-out"
+ .name = "img-spdif-out",
+ .legacy_dai_naming = 1,
};
static int img_spdif_out_probe(struct platform_device *pdev)
diff --git a/sound/soc/img/pistachio-internal-dac.c b/sound/soc/img/pistachio-internal-dac.c
index 802c0ee63aa2..e3b858643bd5 100644
--- a/sound/soc/img/pistachio-internal-dac.c
+++ b/sound/soc/img/pistachio-internal-dac.c
@@ -138,7 +138,6 @@ static const struct snd_soc_component_driver pistachio_internal_dac_driver = {
.num_dapm_routes = ARRAY_SIZE(pistachio_internal_dac_routes),
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static int pistachio_internal_dac_probe(struct platform_device *pdev)
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 7c85d1bb9c12..ded903f95b67 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -216,7 +216,7 @@ config SND_SOC_INTEL_AVS
depends on COMMON_CLK
select SND_SOC_ACPI if ACPI
select SND_SOC_TOPOLOGY
- select SND_HDA
+ select SND_SOC_HDA
select SND_HDA_EXT_CORE
select SND_HDA_DSP_LOADER
select SND_INTEL_DSP_CONFIG
@@ -226,5 +226,8 @@ config SND_SOC_INTEL_AVS
capabilities. This includes Skylake, Kabylake, Amberlake and
Apollolake.
+# Machine board drivers
+source "sound/soc/intel/avs/boards/Kconfig"
+
# ASoC codec drivers
source "sound/soc/intel/boards/Kconfig"
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 335c32732994..fd59b35a62ba 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -831,9 +831,9 @@ static int sst_get_ssp_mode(struct snd_soc_dai *dai, unsigned int fmt)
dev_dbg(dai->dev, "Enter:%s, format=%x\n", __func__, format);
switch (format) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
return SSP_MODE_PROVIDER;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
return SSP_MODE_CONSUMER;
default:
dev_err(dai->dev, "Invalid ssp protocol: %d\n", format);
@@ -1328,7 +1328,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
{
struct sst_data *drv = snd_soc_dai_get_drvdata(dai);
struct snd_soc_dapm_widget *w;
- struct snd_soc_dapm_path *p = NULL;
+ struct snd_soc_dapm_path *p;
dev_dbg(dai->dev, "enter, dai-name=%s dir=%d\n", dai->name, stream);
@@ -1392,7 +1392,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
static int sst_fill_module_list(struct snd_kcontrol *kctl,
struct snd_soc_dapm_widget *w, int type)
{
- struct sst_module *module = NULL;
+ struct sst_module *module;
struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
struct sst_ids *ids = w->priv;
int ret = 0;
diff --git a/sound/soc/intel/atom/sst/sst.c b/sound/soc/intel/atom/sst/sst.c
index 3a42d68c0247..160b50f479fb 100644
--- a/sound/soc/intel/atom/sst/sst.c
+++ b/sound/soc/intel/atom/sst/sst.c
@@ -114,7 +114,7 @@ static irqreturn_t intel_sst_interrupt_mrfld(int irq, void *context)
static irqreturn_t intel_sst_irq_thread_mrfld(int irq, void *context)
{
struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
- struct ipc_post *__msg, *msg = NULL;
+ struct ipc_post *__msg, *msg;
unsigned long irq_flags;
spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
diff --git a/sound/soc/intel/atom/sst/sst_ipc.c b/sound/soc/intel/atom/sst/sst_ipc.c
index 4e8382097e61..4e039c7173d8 100644
--- a/sound/soc/intel/atom/sst/sst_ipc.c
+++ b/sound/soc/intel/atom/sst/sst_ipc.c
@@ -28,7 +28,7 @@
struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
u32 msg_id, u32 drv_id)
{
- struct sst_block *msg = NULL;
+ struct sst_block *msg;
dev_dbg(ctx->dev, "Enter\n");
msg = kzalloc(sizeof(*msg), GFP_KERNEL);
@@ -63,7 +63,7 @@ struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
u32 drv_id, u32 ipc, void *data, u32 size)
{
- struct sst_block *block = NULL;
+ struct sst_block *block;
dev_dbg(ctx->dev, "Enter\n");
@@ -91,7 +91,7 @@ int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
{
- struct sst_block *block = NULL, *__block;
+ struct sst_block *block, *__block;
dev_dbg(ctx->dev, "Enter\n");
spin_lock_bh(&ctx->block_lock);
@@ -341,7 +341,7 @@ void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx,
}
/* FW sent short error response for an IPC */
- if (msg_high.part.result && drv_id && !msg_high.part.large) {
+ if (msg_high.part.result && !msg_high.part.large) {
/* 32-bit FW error code in msg_low */
dev_err(sst_drv_ctx->dev, "FW sent error response 0x%x", msg_low);
sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
diff --git a/sound/soc/intel/avs/Makefile b/sound/soc/intel/avs/Makefile
index b6b93ae80304..919212825f21 100644
--- a/sound/soc/intel/avs/Makefile
+++ b/sound/soc/intel/avs/Makefile
@@ -10,3 +10,6 @@ snd-soc-avs-objs += trace.o
CFLAGS_trace.o := -I$(src)
obj-$(CONFIG_SND_SOC_INTEL_AVS) += snd-soc-avs.o
+
+# Machine support
+obj-$(CONFIG_SND_SOC) += boards/
diff --git a/sound/soc/intel/avs/boards/Kconfig b/sound/soc/intel/avs/boards/Kconfig
new file mode 100644
index 000000000000..4d68e3ef992b
--- /dev/null
+++ b/sound/soc/intel/avs/boards/Kconfig
@@ -0,0 +1,121 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Intel AVS Machine drivers"
+ depends on SND_SOC_INTEL_AVS
+
+comment "Available DSP configurations"
+
+config SND_SOC_INTEL_AVS_MACH_DA7219
+ tristate "da7219 I2S board"
+ depends on I2C
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_DA7219
+ help
+ This adds support for AVS with DA7219 I2S codec configuration.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_AVS_MACH_DMIC
+ tristate "DMIC generic board"
+ select SND_SOC_DMIC
+ help
+ This adds support for AVS with Digital Mic array configuration.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_AVS_MACH_HDAUDIO
+ tristate "HD-Audio generic board"
+ select SND_SOC_HDA
+ help
+ This adds support for AVS with HDAudio codec configuration.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_AVS_MACH_I2S_TEST
+ tristate "I2S test board"
+ help
+ This adds support for I2S test-board which can be used to verify
+ transfer over I2S interface with SSP loopback scenarios.
+
+config SND_SOC_INTEL_AVS_MACH_MAX98357A
+ tristate "max98357A I2S board"
+ depends on I2C
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_MAX98357A
+ help
+ This adds support for AVS with MAX98357A I2S codec configuration.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_AVS_MACH_MAX98373
+ tristate "max98373 I2S board"
+ depends on I2C
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_MAX98373
+ help
+ This adds support for AVS with MAX98373 I2S codec configuration.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_AVS_MACH_NAU8825
+ tristate "nau8825 I2S board"
+ depends on I2C
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_NAU8825
+ help
+ This adds support for ASoC machine driver with NAU8825 I2S audio codec.
+ It is meant to be used with AVS driver.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_AVS_MACH_RT274
+ tristate "rt274 in I2S mode"
+ depends on I2C
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_RT274
+ help
+ This adds support for ASoC machine driver with RT274 I2S audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_AVS_MACH_RT286
+ tristate "rt286 in I2S mode"
+ depends on I2C
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_RT286
+ help
+ This adds support for ASoC machine driver with RT286 I2S audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_AVS_MACH_RT298
+ tristate "rt298 in I2S mode"
+ depends on I2C
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_RT298
+ help
+ This adds support for ASoC machine driver with RT298 I2S audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_AVS_MACH_RT5682
+ tristate "rt5682 in I2S mode"
+ depends on I2C
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_RT5682_I2C
+ help
+ This adds support for ASoC machine driver with RT5682 I2S audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_AVS_MACH_SSM4567
+ tristate "ssm4567 I2S board"
+ depends on I2C
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_SSM4567
+ help
+ This adds support for ASoC machine driver with SSM4567 I2S audio codec.
+ It is meant to be used with AVS driver.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+endmenu
diff --git a/sound/soc/intel/avs/boards/Makefile b/sound/soc/intel/avs/boards/Makefile
new file mode 100644
index 000000000000..bc75376d58c2
--- /dev/null
+++ b/sound/soc/intel/avs/boards/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+snd-soc-avs-da7219-objs := da7219.o
+snd-soc-avs-dmic-objs := dmic.o
+snd-soc-avs-hdaudio-objs := hdaudio.o
+snd-soc-avs-i2s-test-objs := i2s_test.o
+snd-soc-avs-max98357a-objs := max98357a.o
+snd-soc-avs-max98373-objs := max98373.o
+snd-soc-avs-nau8825-objs := nau8825.o
+snd-soc-avs-rt274-objs := rt274.o
+snd-soc-avs-rt286-objs := rt286.o
+snd-soc-avs-rt298-objs := rt298.o
+snd-soc-avs-rt5682-objs := rt5682.o
+snd-soc-avs-ssm4567-objs := ssm4567.o
+
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_DA7219) += snd-soc-avs-da7219.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_DMIC) += snd-soc-avs-dmic.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_HDAUDIO) += snd-soc-avs-hdaudio.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_I2S_TEST) += snd-soc-avs-i2s-test.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98357A) += snd-soc-avs-max98357a.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_MAX98373) += snd-soc-avs-max98373.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_NAU8825) += snd-soc-avs-nau8825.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_RT274) += snd-soc-avs-rt274.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_RT286) += snd-soc-avs-rt286.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_RT298) += snd-soc-avs-rt298.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_RT5682) += snd-soc-avs-rt5682.o
+obj-$(CONFIG_SND_SOC_INTEL_AVS_MACH_SSM4567) += snd-soc-avs-ssm4567.o
diff --git a/sound/soc/intel/avs/boards/da7219.c b/sound/soc/intel/avs/boards/da7219.c
new file mode 100644
index 000000000000..02ae542ad779
--- /dev/null
+++ b/sound/soc/intel/avs/boards/da7219.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-dapm.h>
+#include <uapi/linux/input-event-codes.h>
+#include "../../../codecs/da7219.h"
+#include "../../../codecs/da7219-aad.h"
+
+#define DA7219_DAI_NAME "da7219-hifi"
+
+static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ int ret = 0;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, DA7219_DAI_NAME);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found. Unable to set/unset codec pll\n");
+ return -EIO;
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_MCLK, 0, 0);
+ if (ret)
+ dev_err(card->dev, "failed to stop PLL: %d\n", ret);
+ } else if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_PLL_SRM,
+ 0, DA7219_PLL_FREQ_OUT_98304);
+ if (ret)
+ dev_err(card->dev, "failed to start PLL: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control,
+ SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU),
+};
+
+static const struct snd_soc_dapm_route card_base_routes[] = {
+ /* HP jack connectors - unknown if we have jack detection */
+ {"Headphone Jack", NULL, "HPL"},
+ {"Headphone Jack", NULL, "HPR"},
+
+ {"MIC", NULL, "Headset Mic"},
+
+ { "Headphone Jack", NULL, "Platform Clock" },
+ { "Headset Mic", NULL, "Platform Clock" },
+};
+
+static int avs_da7219_codec_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
+ struct snd_soc_card *card = runtime->card;
+ struct snd_soc_jack *jack;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0);
+ int clk_freq;
+ int ret;
+
+ jack = snd_soc_card_get_drvdata(card);
+ clk_freq = 19200000;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, DA7219_CLKSRC_MCLK, clk_freq, SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(card->dev, "can't set codec sysclk configuration\n");
+ return ret;
+ }
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3 | SND_JACK_LINEOUT, jack);
+ if (ret) {
+ dev_err(card->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
+ da7219_aad_jack_det(component, jack);
+
+ return 0;
+}
+
+static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ struct snd_soc_dai_link **dai_link)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+
+ dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+
+ dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port);
+ dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL);
+ if (!dl->name || !dl->cpus || !dl->codecs)
+ return -ENOMEM;
+
+ dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port);
+ dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-DLGS7219:00");
+ dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, DA7219_DAI_NAME);
+ if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name)
+ return -ENOMEM;
+
+ dl->num_cpus = 1;
+ dl->num_codecs = 1;
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+ dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ dl->init = avs_da7219_codec_init;
+ dl->nonatomic = 1;
+ dl->no_pcm = 1;
+ dl->dpcm_capture = 1;
+ dl->dpcm_playback = 1;
+
+ *dai_link = dl;
+
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ const int num_base = ARRAY_SIZE(card_base_routes);
+ const int num_dr = num_base + 2;
+ int idx;
+
+ dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ memcpy(dr, card_base_routes, num_base * sizeof(*dr));
+
+ idx = num_base;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ idx++;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port);
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "Capture");
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ *routes = dr;
+ *num_routes = num_dr;
+
+ return 0;
+}
+
+static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack)
+{
+ struct snd_soc_component *component;
+
+ for_each_card_components(card, component)
+ snd_soc_component_set_jack(component, jack, NULL);
+ return 0;
+}
+
+static int avs_card_remove(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_suspend_pre(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_resume_post(struct snd_soc_card *card)
+{
+ struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card);
+
+ return avs_card_set_jack(card, jack);
+}
+
+static int avs_da7219_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_dai_link *dai_link;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct snd_soc_jack *jack;
+ struct device *dev = &pdev->dev;
+ const char *pname;
+ int num_routes, ssp_port, ret;
+
+ mach = dev_get_platdata(dev);
+ pname = mach->mach_params.platform;
+ ssp_port = __ffs(mach->mach_params.i2s_link_mask);
+
+ ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link);
+ if (ret) {
+ dev_err(dev, "Failed to create dai link: %d", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm routes: %d", ret);
+ return ret;
+ }
+
+ jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL);
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!jack || !card)
+ return -ENOMEM;
+
+ card->name = "avs_da7219";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->remove = avs_card_remove;
+ card->suspend_pre = avs_card_suspend_pre;
+ card->resume_post = avs_card_resume_post;
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->controls = card_controls;
+ card->num_controls = ARRAY_SIZE(card_controls);
+ card->dapm_widgets = card_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(card_widgets);
+ card->dapm_routes = routes;
+ card->num_dapm_routes = num_routes;
+ card->fully_routed = true;
+ snd_soc_card_set_drvdata(card, jack);
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_da7219_driver = {
+ .probe = avs_da7219_probe,
+ .driver = {
+ .name = "avs_da7219",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_da7219_driver);
+
+MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_da7219");
diff --git a/sound/soc/intel/avs/boards/dmic.c b/sound/soc/intel/avs/boards/dmic.c
new file mode 100644
index 000000000000..90a921638572
--- /dev/null
+++ b/sound/soc/intel/avs/boards/dmic.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+
+SND_SOC_DAILINK_DEF(dmic_pin, DAILINK_COMP_ARRAY(COMP_CPU("DMIC Pin")));
+SND_SOC_DAILINK_DEF(dmic_wov_pin, DAILINK_COMP_ARRAY(COMP_CPU("DMIC WoV Pin")));
+SND_SOC_DAILINK_DEF(dmic_codec, DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec", "dmic-hifi")));
+/* Name overridden on probe */
+SND_SOC_DAILINK_DEF(platform, DAILINK_COMP_ARRAY(COMP_PLATFORM("")));
+
+static struct snd_soc_dai_link card_dai_links[] = {
+ /* Back ends */
+ {
+ .name = "DMIC",
+ .id = 0,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(dmic_pin, dmic_codec, platform),
+ },
+ {
+ .name = "DMIC WoV",
+ .id = 1,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .no_pcm = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(dmic_wov_pin, dmic_codec, platform),
+ },
+};
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+};
+
+static const struct snd_soc_dapm_route card_routes[] = {
+ {"DMic", NULL, "SoC DMIC"},
+ {"DMIC Rx", NULL, "Capture"},
+ {"DMIC WoV Rx", NULL, "Capture"},
+};
+
+static int avs_dmic_probe(struct platform_device *pdev)
+{
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ mach = dev_get_platdata(dev);
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->name = "avs_dmic";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->dai_link = card_dai_links;
+ card->num_links = ARRAY_SIZE(card_dai_links);
+ card->dapm_widgets = card_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(card_widgets);
+ card->dapm_routes = card_routes;
+ card->num_dapm_routes = ARRAY_SIZE(card_routes);
+ card->fully_routed = true;
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, mach->mach_params.platform);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_dmic_driver = {
+ .probe = avs_dmic_probe,
+ .driver = {
+ .name = "avs_dmic",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_dmic_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_dmic");
diff --git a/sound/soc/intel/avs/boards/hdaudio.c b/sound/soc/intel/avs/boards/hdaudio.c
new file mode 100644
index 000000000000..d2fc41d39448
--- /dev/null
+++ b/sound/soc/intel/avs/boards/hdaudio.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/platform_device.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_i915.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../../codecs/hda.h"
+
+static int avs_create_dai_links(struct device *dev, struct hda_codec *codec, int pcm_count,
+ const char *platform_name, struct snd_soc_dai_link **links)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+ struct hda_pcm *pcm;
+ const char *cname = dev_name(&codec->core.dev);
+ int i;
+
+ dl = devm_kcalloc(dev, pcm_count, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+ pcm = list_first_entry(&codec->pcm_list_head, struct hda_pcm, list);
+
+ for (i = 0; i < pcm_count; i++, pcm = list_next_entry(pcm, list)) {
+ dl[i].name = devm_kasprintf(dev, GFP_KERNEL, "%s link%d", cname, i);
+ if (!dl[i].name)
+ return -ENOMEM;
+
+ dl[i].id = i;
+ dl[i].nonatomic = 1;
+ dl[i].no_pcm = 1;
+ dl[i].dpcm_playback = 1;
+ dl[i].dpcm_capture = 1;
+ dl[i].platforms = platform;
+ dl[i].num_platforms = 1;
+
+ dl[i].codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL);
+ dl[i].cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ if (!dl[i].codecs || !dl[i].cpus)
+ return -ENOMEM;
+
+ dl[i].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "%s-cpu%d", cname, i);
+ if (!dl[i].cpus->dai_name)
+ return -ENOMEM;
+
+ dl[i].codecs->name = devm_kstrdup(dev, cname, GFP_KERNEL);
+ dl[i].codecs->dai_name = pcm->name;
+ dl[i].num_codecs = 1;
+ dl[i].num_cpus = 1;
+ }
+
+ *links = dl;
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, struct hda_codec *codec, int pcm_count,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ struct hda_pcm *pcm;
+ const char *cname = dev_name(&codec->core.dev);
+ int i, n = 0;
+
+ /* at max twice the number of pcms */
+ dr = devm_kcalloc(dev, pcm_count * 2, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ pcm = list_first_entry(&codec->pcm_list_head, struct hda_pcm, list);
+
+ for (i = 0; i < pcm_count; i++, pcm = list_next_entry(pcm, list)) {
+ struct hda_pcm_stream *stream;
+ int dir;
+
+ dir = SNDRV_PCM_STREAM_PLAYBACK;
+ stream = &pcm->stream[dir];
+ if (!stream->substreams)
+ goto capture_routes;
+
+ dr[n].sink = devm_kasprintf(dev, GFP_KERNEL, "%s %s", pcm->name,
+ snd_pcm_direction_name(dir));
+ dr[n].source = devm_kasprintf(dev, GFP_KERNEL, "%s-cpu%d Tx", cname, i);
+ if (!dr[n].sink || !dr[n].source)
+ return -ENOMEM;
+ n++;
+
+capture_routes:
+ dir = SNDRV_PCM_STREAM_CAPTURE;
+ stream = &pcm->stream[dir];
+ if (!stream->substreams)
+ continue;
+
+ dr[n].sink = devm_kasprintf(dev, GFP_KERNEL, "%s-cpu%d Rx", cname, i);
+ dr[n].source = devm_kasprintf(dev, GFP_KERNEL, "%s %s", pcm->name,
+ snd_pcm_direction_name(dir));
+ if (!dr[n].sink || !dr[n].source)
+ return -ENOMEM;
+ n++;
+ }
+
+ *routes = dr;
+ *num_routes = n;
+ return 0;
+}
+
+/* Should be aligned with SectionPCM's name from topology */
+#define FEDAI_NAME_PREFIX "HDMI"
+
+static struct snd_pcm *
+avs_card_hdmi_pcm_at(struct snd_soc_card *card, int hdmi_idx)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ int dir = SNDRV_PCM_STREAM_PLAYBACK;
+
+ for_each_card_rtds(card, rtd) {
+ struct snd_pcm *spcm;
+ int ret, n;
+
+ spcm = rtd->pcm ? rtd->pcm->streams[dir].pcm : NULL;
+ if (!spcm || !strstr(spcm->id, FEDAI_NAME_PREFIX))
+ continue;
+
+ ret = sscanf(spcm->id, FEDAI_NAME_PREFIX "%d", &n);
+ if (ret != 1)
+ continue;
+ if (n == hdmi_idx)
+ return rtd->pcm;
+ }
+
+ return NULL;
+}
+
+static int avs_card_late_probe(struct snd_soc_card *card)
+{
+ struct snd_soc_acpi_mach *mach = dev_get_platdata(card->dev);
+ struct hda_codec *codec = mach->pdata;
+ struct hda_pcm *hpcm;
+ /* Topology pcm indexing is 1-based */
+ int i = 1;
+
+ list_for_each_entry(hpcm, &codec->pcm_list_head, list) {
+ struct snd_pcm *spcm;
+
+ spcm = avs_card_hdmi_pcm_at(card, i);
+ if (spcm) {
+ hpcm->pcm = spcm;
+ hpcm->device = spcm->device;
+ dev_info(card->dev, "%s: mapping HDMI converter %d to PCM %d (%p)\n",
+ __func__, i, hpcm->device, spcm);
+ } else {
+ hpcm->pcm = NULL;
+ hpcm->device = SNDRV_PCM_INVALID_DEVICE;
+ dev_warn(card->dev, "%s: no PCM in topology for HDMI converter %d\n",
+ __func__, i);
+ }
+ i++;
+ }
+
+ return hda_codec_probe_complete(codec);
+}
+
+static int avs_probing_link_init(struct snd_soc_pcm_runtime *rtm)
+{
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_dai_link *links = NULL;
+ struct snd_soc_card *card = rtm->card;
+ struct hda_codec *codec;
+ struct hda_pcm *pcm;
+ int ret, n, pcm_count = 0;
+
+ mach = dev_get_platdata(card->dev);
+ codec = mach->pdata;
+
+ if (list_empty(&codec->pcm_list_head))
+ return -EINVAL;
+ list_for_each_entry(pcm, &codec->pcm_list_head, list)
+ pcm_count++;
+
+ ret = avs_create_dai_links(card->dev, codec, pcm_count, mach->mach_params.platform, &links);
+ if (ret < 0) {
+ dev_err(card->dev, "create links failed: %d\n", ret);
+ return ret;
+ }
+
+ for (n = 0; n < pcm_count; n++) {
+ ret = snd_soc_add_pcm_runtime(card, &links[n]);
+ if (ret < 0) {
+ dev_err(card->dev, "add links failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = avs_create_dapm_routes(card->dev, codec, pcm_count, &routes, &n);
+ if (ret < 0) {
+ dev_err(card->dev, "create routes failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, routes, n);
+ if (ret < 0) {
+ dev_err(card->dev, "add routes failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+SND_SOC_DAILINK_DEF(dummy, DAILINK_COMP_ARRAY(COMP_DUMMY()));
+
+static struct snd_soc_dai_link probing_link = {
+ .name = "probing-LINK",
+ .id = -1,
+ .nonatomic = 1,
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .cpus = dummy,
+ .num_cpus = ARRAY_SIZE(dummy),
+ .init = avs_probing_link_init,
+};
+
+static int avs_hdaudio_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dai_link *binder;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct device *dev = &pdev->dev;
+ struct hda_codec *codec;
+
+ mach = dev_get_platdata(dev);
+ codec = mach->pdata;
+
+ /* codec may be unloaded before card's probe() fires */
+ if (!device_is_registered(&codec->core.dev))
+ return -ENODEV;
+
+ binder = devm_kmemdup(dev, &probing_link, sizeof(probing_link), GFP_KERNEL);
+ if (!binder)
+ return -ENOMEM;
+
+ binder->platforms = devm_kzalloc(dev, sizeof(*binder->platforms), GFP_KERNEL);
+ binder->codecs = devm_kzalloc(dev, sizeof(*binder->codecs), GFP_KERNEL);
+ if (!binder->platforms || !binder->codecs)
+ return -ENOMEM;
+
+ binder->codecs->name = devm_kstrdup(dev, dev_name(&codec->core.dev), GFP_KERNEL);
+ if (!binder->codecs->name)
+ return -ENOMEM;
+
+ binder->platforms->name = mach->mach_params.platform;
+ binder->num_platforms = 1;
+ binder->codecs->dai_name = "codec-probing-DAI";
+ binder->num_codecs = 1;
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->name = binder->codecs->name;
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->dai_link = binder;
+ card->num_links = 1;
+ card->fully_routed = true;
+ if (hda_codec_is_display(codec))
+ card->late_probe = avs_card_late_probe;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_hdaudio_driver = {
+ .probe = avs_hdaudio_probe,
+ .driver = {
+ .name = "avs_hdaudio",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_hdaudio_driver)
+
+MODULE_DESCRIPTION("Intel HD-Audio machine driver");
+MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_hdaudio");
diff --git a/sound/soc/intel/avs/boards/i2s_test.c b/sound/soc/intel/avs/boards/i2s_test.c
new file mode 100644
index 000000000000..8f0fd87bc866
--- /dev/null
+++ b/sound/soc/intel/avs/boards/i2s_test.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-dapm.h>
+
+static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ struct snd_soc_dai_link **dai_link)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+
+ dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+
+ dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port);
+ dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL);
+ if (!dl->name || !dl->cpus || !dl->codecs)
+ return -ENOMEM;
+
+ dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port);
+ dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "snd-soc-dummy");
+ dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "snd-soc-dummy-dai");
+ if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name)
+ return -ENOMEM;
+
+ dl->num_cpus = 1;
+ dl->num_codecs = 1;
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+ dl->nonatomic = 1;
+ dl->no_pcm = 1;
+ dl->dpcm_capture = 1;
+ dl->dpcm_playback = 1;
+
+ *dai_link = dl;
+
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ const int num_dr = 2;
+
+ dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ dr[0].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%dpb", ssp_port);
+ dr[0].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[0].sink || !dr[0].source)
+ return -ENOMEM;
+
+ dr[1].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port);
+ dr[1].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%dcp", ssp_port);
+ if (!dr[1].sink || !dr[1].source)
+ return -ENOMEM;
+
+ *routes = dr;
+ *num_routes = num_dr;
+
+ return 0;
+}
+
+static int avs_create_dapm_widgets(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_widget **widgets, int *num_widgets)
+{
+ struct snd_soc_dapm_widget *dw;
+ const int num_dw = 2;
+
+ dw = devm_kcalloc(dev, num_dw, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ dw[0].id = snd_soc_dapm_hp;
+ dw[0].reg = SND_SOC_NOPM;
+ dw[0].name = devm_kasprintf(dev, GFP_KERNEL, "ssp%dpb", ssp_port);
+ if (!dw[0].name)
+ return -ENOMEM;
+
+ dw[1].id = snd_soc_dapm_mic;
+ dw[1].reg = SND_SOC_NOPM;
+ dw[1].name = devm_kasprintf(dev, GFP_KERNEL, "ssp%dcp", ssp_port);
+ if (!dw[1].name)
+ return -ENOMEM;
+
+ *widgets = dw;
+ *num_widgets = num_dw;
+
+ return 0;
+}
+
+static int avs_i2s_test_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dapm_widget *widgets;
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_dai_link *dai_link;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct device *dev = &pdev->dev;
+ const char *pname;
+ int num_routes, num_widgets;
+ int ssp_port, ret;
+
+ mach = dev_get_platdata(dev);
+ pname = mach->mach_params.platform;
+ ssp_port = __ffs(mach->mach_params.i2s_link_mask);
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->name = devm_kasprintf(dev, GFP_KERNEL, "ssp%d-loopback", ssp_port);
+ if (!card->name)
+ return -ENOMEM;
+
+ ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link);
+ if (ret) {
+ dev_err(dev, "Failed to create dai link: %d\n", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm routes: %d\n", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_widgets(dev, ssp_port, &widgets, &num_widgets);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm widgets: %d\n", ret);
+ return ret;
+ }
+
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->dapm_routes = routes;
+ card->num_dapm_routes = num_routes;
+ card->dapm_widgets = widgets;
+ card->num_dapm_widgets = num_widgets;
+ card->fully_routed = true;
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_i2s_test_driver = {
+ .probe = avs_i2s_test_probe,
+ .driver = {
+ .name = "avs_i2s_test",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_i2s_test_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_i2s_test");
diff --git a/sound/soc/intel/avs/boards/max98357a.c b/sound/soc/intel/avs/boards/max98357a.c
new file mode 100644
index 000000000000..921f42caf7e0
--- /dev/null
+++ b/sound/soc/intel/avs/boards/max98357a.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-dapm.h>
+
+static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Spk"),
+};
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_SPK("Spk", NULL),
+};
+
+static const struct snd_soc_dapm_route card_base_routes[] = {
+ { "Spk", NULL, "Speaker" },
+};
+
+static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ struct snd_soc_dai_link **dai_link)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+
+ dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+
+ dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port);
+ dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL);
+ if (!dl->name || !dl->cpus || !dl->codecs)
+ return -ENOMEM;
+
+ dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port);
+ dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "MX98357A:00");
+ dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "HiFi");
+ if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name)
+ return -ENOMEM;
+
+ dl->num_cpus = 1;
+ dl->num_codecs = 1;
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+ dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ dl->nonatomic = 1;
+ dl->no_pcm = 1;
+ dl->dpcm_playback = 1;
+
+ *dai_link = dl;
+
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ const int num_base = ARRAY_SIZE(card_base_routes);
+ const int num_dr = num_base + 1;
+ int idx;
+
+ dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ memcpy(dr, card_base_routes, num_base * sizeof(*dr));
+
+ idx = num_base;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "HiFi Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ *routes = dr;
+ *num_routes = num_dr;
+
+ return 0;
+}
+
+static int avs_max98357a_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_dai_link *dai_link;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct device *dev = &pdev->dev;
+ const char *pname;
+ int num_routes, ssp_port, ret;
+
+ mach = dev_get_platdata(dev);
+ pname = mach->mach_params.platform;
+ ssp_port = __ffs(mach->mach_params.i2s_link_mask);
+
+ ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link);
+ if (ret) {
+ dev_err(dev, "Failed to create dai link: %d", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm routes: %d", ret);
+ return ret;
+ }
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->name = "avs_max98357a";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->controls = card_controls;
+ card->num_controls = ARRAY_SIZE(card_controls);
+ card->dapm_widgets = card_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(card_widgets);
+ card->dapm_routes = routes;
+ card->num_dapm_routes = num_routes;
+ card->fully_routed = true;
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_max98357a_driver = {
+ .probe = avs_max98357a_probe,
+ .driver = {
+ .name = "avs_max98357a",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_max98357a_driver)
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_max98357a");
diff --git a/sound/soc/intel/avs/boards/max98373.c b/sound/soc/intel/avs/boards/max98373.c
new file mode 100644
index 000000000000..0fa8f5606385
--- /dev/null
+++ b/sound/soc/intel/avs/boards/max98373.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-dapm.h>
+
+#define MAX98373_DEV0_NAME "i2c-MX98373:00"
+#define MAX98373_DEV1_NAME "i2c-MX98373:01"
+#define MAX98373_CODEC_NAME "max98373-aif1"
+
+static struct snd_soc_codec_conf card_codec_conf[] = {
+ {
+ .dlc = COMP_CODEC_CONF(MAX98373_DEV0_NAME),
+ .name_prefix = "Right",
+ },
+ {
+ .dlc = COMP_CODEC_CONF(MAX98373_DEV1_NAME),
+ .name_prefix = "Left",
+ },
+};
+
+static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Left Spk"),
+ SOC_DAPM_PIN_SWITCH("Right Spk"),
+};
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_SPK("Left Spk", NULL),
+ SND_SOC_DAPM_SPK("Right Spk", NULL),
+};
+
+static const struct snd_soc_dapm_route card_base_routes[] = {
+ { "Left Spk", NULL, "Left BE_OUT" },
+ { "Right Spk", NULL, "Right BE_OUT" },
+};
+
+static int
+avs_max98373_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate, *channels;
+ struct snd_mask *fmt;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will covert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP0 to 16 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ return 0;
+}
+
+static int avs_max98373_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *runtime = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai;
+ int ret, i;
+
+ for_each_rtd_codec_dais(runtime, i, codec_dai) {
+ if (!strcmp(codec_dai->component->name, MAX98373_DEV0_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(runtime->dev, "DEV0 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ if (!strcmp(codec_dai->component->name, MAX98373_DEV1_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(runtime->dev, "DEV1 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_ops avs_max98373_ops = {
+ .hw_params = avs_max98373_hw_params,
+};
+
+static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ struct snd_soc_dai_link **dai_link)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+
+ dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+
+ dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port);
+ dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs) * 2, GFP_KERNEL);
+ if (!dl->name || !dl->cpus || !dl->codecs)
+ return -ENOMEM;
+
+ dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port);
+ dl->codecs[0].name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_DEV0_NAME);
+ dl->codecs[0].dai_name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_CODEC_NAME);
+ dl->codecs[1].name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_DEV1_NAME);
+ dl->codecs[1].dai_name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_CODEC_NAME);
+ if (!dl->cpus->dai_name || !dl->codecs[0].name || !dl->codecs[0].dai_name ||
+ !dl->codecs[1].name || !dl->codecs[1].dai_name)
+ return -ENOMEM;
+
+ dl->num_cpus = 1;
+ dl->num_codecs = 2;
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+ dl->dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC;
+ dl->be_hw_params_fixup = avs_max98373_be_fixup;
+ dl->nonatomic = 1;
+ dl->no_pcm = 1;
+ dl->dpcm_capture = 1;
+ dl->dpcm_playback = 1;
+ dl->ignore_pmdown_time = 1;
+ dl->ops = &avs_max98373_ops;
+
+ *dai_link = dl;
+
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ const int num_base = ARRAY_SIZE(card_base_routes);
+ const int num_dr = num_base + 2;
+ int idx;
+
+ dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ memcpy(dr, card_base_routes, num_base * sizeof(*dr));
+
+ idx = num_base;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Left HiFi Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ idx++;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Right HiFi Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ *routes = dr;
+ *num_routes = num_dr;
+
+ return 0;
+}
+
+static int avs_max98373_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_dai_link *dai_link;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct device *dev = &pdev->dev;
+ const char *pname;
+ int num_routes, ssp_port, ret;
+
+ mach = dev_get_platdata(dev);
+ pname = mach->mach_params.platform;
+ ssp_port = __ffs(mach->mach_params.i2s_link_mask);
+
+ ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link);
+ if (ret) {
+ dev_err(dev, "Failed to create dai link: %d", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm routes: %d", ret);
+ return ret;
+ }
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->name = "avs_max98373";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->codec_conf = card_codec_conf;
+ card->num_configs = ARRAY_SIZE(card_codec_conf);
+ card->controls = card_controls;
+ card->num_controls = ARRAY_SIZE(card_controls);
+ card->dapm_widgets = card_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(card_widgets);
+ card->dapm_routes = routes;
+ card->num_dapm_routes = num_routes;
+ card->fully_routed = true;
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_max98373_driver = {
+ .probe = avs_max98373_probe,
+ .driver = {
+ .name = "avs_max98373",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_max98373_driver)
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_max98373");
diff --git a/sound/soc/intel/avs/boards/nau8825.c b/sound/soc/intel/avs/boards/nau8825.c
new file mode 100644
index 000000000000..f76909e9f990
--- /dev/null
+++ b/sound/soc/intel/avs/boards/nau8825.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../../codecs/nau8825.h"
+
+#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi"
+
+static int
+avs_nau8825_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ int ret;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found\n");
+ return -EINVAL;
+ }
+
+ if (!SND_SOC_DAPM_EVENT_ON(event)) {
+ ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(card->dev, "set sysclk err = %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, avs_nau8825_clock_control,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route card_base_routes[] = {
+ { "Headphone Jack", NULL, "HPOL" },
+ { "Headphone Jack", NULL, "HPOR" },
+
+ { "MIC", NULL, "Headset Mic" },
+
+ { "Headphone Jack", NULL, "Platform Clock" },
+ { "Headset Mic", NULL, "Platform Clock" },
+};
+
+static struct snd_soc_jack_pin card_headset_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static int avs_nau8825_codec_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0);
+ struct snd_soc_component *component = codec_dai->component;
+ struct snd_soc_jack_pin *pins;
+ struct snd_soc_jack *jack;
+ struct snd_soc_card *card = runtime->card;
+ int num_pins, ret;
+
+ jack = snd_soc_card_get_drvdata(card);
+ num_pins = ARRAY_SIZE(card_headset_pins);
+
+ pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ /*
+ * 4 buttons here map to the google Reference headset.
+ * The use of these buttons can be decided by the user space.
+ */
+ ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ jack, pins, num_pins);
+ if (ret)
+ return ret;
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+
+ //snd_soc_component_set_jack(component, jack, NULL);
+ // TODO: Fix nau8825 codec to use .set_jack, like everyone else
+ nau8825_enable_jack_detect(component, jack);
+
+ return 0;
+}
+
+static int
+avs_nau8825_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate, *channels;
+ struct snd_mask *fmt;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+ return 0;
+}
+
+static int avs_nau8825_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtm = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtm, 0);
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_FLL_FS, 0, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "can't set FS clock %d\n", ret);
+ break;
+ }
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0, 0, runtime->rate, runtime->rate * 256);
+ if (ret < 0)
+ dev_err(codec_dai->dev, "can't set FLL: %d\n", ret);
+ break;
+
+ case SNDRV_PCM_TRIGGER_RESUME:
+ ret = snd_soc_dai_set_pll(codec_dai, 0, 0, runtime->rate, runtime->rate * 256);
+ if (ret < 0)
+ dev_err(codec_dai->dev, "can't set FLL: %d\n", ret);
+ break;
+ }
+
+ return ret;
+}
+
+
+static const struct snd_soc_ops avs_nau8825_ops = {
+ .trigger = avs_nau8825_trigger,
+};
+
+static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ struct snd_soc_dai_link **dai_link)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+
+ dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+
+ dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port);
+ dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL);
+ if (!dl->name || !dl->cpus || !dl->codecs)
+ return -ENOMEM;
+
+ dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port);
+ dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-10508825:00");
+ dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, SKL_NUVOTON_CODEC_DAI);
+ if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name)
+ return -ENOMEM;
+
+ dl->num_cpus = 1;
+ dl->num_codecs = 1;
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+ dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ dl->init = avs_nau8825_codec_init;
+ dl->be_hw_params_fixup = avs_nau8825_be_fixup;
+ dl->ops = &avs_nau8825_ops;
+ dl->nonatomic = 1;
+ dl->no_pcm = 1;
+ dl->dpcm_capture = 1;
+ dl->dpcm_playback = 1;
+
+ *dai_link = dl;
+
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ const int num_base = ARRAY_SIZE(card_base_routes);
+ const int num_dr = num_base + 2;
+ int idx;
+
+ dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ memcpy(dr, card_base_routes, num_base * sizeof(*dr));
+
+ idx = num_base;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ idx++;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port);
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "Capture");
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ *routes = dr;
+ *num_routes = num_dr;
+
+ return 0;
+}
+
+static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack)
+{
+ struct snd_soc_component *component;
+
+ for_each_card_components(card, component)
+ snd_soc_component_set_jack(component, jack, NULL);
+ return 0;
+}
+
+static int avs_card_remove(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_suspend_pre(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_resume_post(struct snd_soc_card *card)
+{
+ struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI);
+ struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card);
+
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found\n");
+ return -EINVAL;
+ }
+
+ if (codec_dai->stream_active[SNDRV_PCM_STREAM_PLAYBACK] &&
+ codec_dai->playback_widget->active)
+ snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_FLL_FS, 0, SND_SOC_CLOCK_IN);
+
+ return avs_card_set_jack(card, jack);
+}
+
+static int avs_nau8825_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_dai_link *dai_link;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct snd_soc_jack *jack;
+ struct device *dev = &pdev->dev;
+ const char *pname;
+ int num_routes, ssp_port, ret;
+
+ mach = dev_get_platdata(dev);
+ pname = mach->mach_params.platform;
+ ssp_port = __ffs(mach->mach_params.i2s_link_mask);
+
+ ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link);
+ if (ret) {
+ dev_err(dev, "Failed to create dai link: %d", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm routes: %d", ret);
+ return ret;
+ }
+
+ jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL);
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!jack || !card)
+ return -ENOMEM;
+
+ card->name = "avs_nau8825";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->remove = avs_card_remove;
+ card->suspend_pre = avs_card_suspend_pre;
+ card->resume_post = avs_card_resume_post;
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->controls = card_controls;
+ card->num_controls = ARRAY_SIZE(card_controls);
+ card->dapm_widgets = card_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(card_widgets);
+ card->dapm_routes = routes;
+ card->num_dapm_routes = num_routes;
+ card->fully_routed = true;
+ snd_soc_card_set_drvdata(card, jack);
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_nau8825_driver = {
+ .probe = avs_nau8825_probe,
+ .driver = {
+ .name = "avs_nau8825",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_nau8825_driver)
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_nau8825");
diff --git a/sound/soc/intel/avs/boards/rt274.c b/sound/soc/intel/avs/boards/rt274.c
new file mode 100644
index 000000000000..afef5a3ca60b
--- /dev/null
+++ b/sound/soc/intel/avs/boards/rt274.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../../codecs/rt274.h"
+
+#define AVS_RT274_FREQ_OUT 24000000
+#define AVS_RT274_BE_FIXUP_RATE 48000
+#define RT274_CODEC_DAI "rt274-aif1"
+
+static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
+};
+
+static int
+avs_rt274_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ int ret;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, RT274_CODEC_DAI);
+ if (!codec_dai)
+ return -EINVAL;
+
+ /* Codec needs clock for Jack detection and button press */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT274_SCLK_S_PLL2, AVS_RT274_FREQ_OUT,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "set codec sysclk failed: %d\n", ret);
+ return ret;
+ }
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ int ratio = 100;
+
+ snd_soc_dai_set_bclk_ratio(codec_dai, ratio);
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT274_PLL2_S_BCLK,
+ AVS_RT274_BE_FIXUP_RATE * ratio, AVS_RT274_FREQ_OUT);
+ if (ret) {
+ dev_err(codec_dai->dev, "failed to enable PLL2: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, avs_rt274_clock_control,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route card_base_routes[] = {
+ {"Headphone Jack", NULL, "HPO Pin"},
+ {"MIC", NULL, "Mic Jack"},
+
+ {"Headphone Jack", NULL, "Platform Clock"},
+ {"MIC", NULL, "Platform Clock"},
+};
+
+static struct snd_soc_jack_pin card_headset_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Mic Jack",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static int avs_rt274_codec_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0);
+ struct snd_soc_component *component = codec_dai->component;
+ struct snd_soc_jack_pin *pins;
+ struct snd_soc_jack *jack;
+ struct snd_soc_card *card = runtime->card;
+ int num_pins, ret;
+
+ jack = snd_soc_card_get_drvdata(card);
+ num_pins = ARRAY_SIZE(card_headset_pins);
+
+ pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET, jack, pins, num_pins);
+ if (ret)
+ return ret;
+
+ snd_soc_component_set_jack(component, jack, NULL);
+
+ /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24);
+ if (ret < 0) {
+ dev_err(card->dev, "can't set codec pcm format %d\n", ret);
+ return ret;
+ }
+
+ card->dapm.idle_bias_off = true;
+
+ return 0;
+}
+
+static int avs_rt274_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate, *channels;
+ struct snd_mask *fmt;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = AVS_RT274_BE_FIXUP_RATE;
+ channels->min = channels->max = 2;
+
+ /* set SSPN to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+ return 0;
+}
+
+static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ struct snd_soc_dai_link **dai_link)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+
+ dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+
+ dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port);
+ dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL);
+ if (!dl->name || !dl->cpus || !dl->codecs)
+ return -ENOMEM;
+
+ dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port);
+ dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT34C2:00");
+ dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "rt274-aif1");
+ if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name)
+ return -ENOMEM;
+
+ dl->num_cpus = 1;
+ dl->num_codecs = 1;
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+ dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ dl->init = avs_rt274_codec_init;
+ dl->be_hw_params_fixup = avs_rt274_be_fixup;
+ dl->nonatomic = 1;
+ dl->no_pcm = 1;
+ dl->dpcm_capture = 1;
+ dl->dpcm_playback = 1;
+
+ *dai_link = dl;
+
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ const int num_base = ARRAY_SIZE(card_base_routes);
+ const int num_dr = num_base + 2;
+ int idx;
+
+ dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ memcpy(dr, card_base_routes, num_base * sizeof(*dr));
+
+ idx = num_base;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ idx++;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port);
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Capture");
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ *routes = dr;
+ *num_routes = num_dr;
+
+ return 0;
+}
+
+static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack)
+{
+ struct snd_soc_component *component;
+
+ for_each_card_components(card, component)
+ snd_soc_component_set_jack(component, jack, NULL);
+ return 0;
+}
+
+static int avs_card_remove(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_suspend_pre(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_resume_post(struct snd_soc_card *card)
+{
+ struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card);
+
+ return avs_card_set_jack(card, jack);
+}
+
+static int avs_rt274_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_dai_link *dai_link;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct snd_soc_jack *jack;
+ struct device *dev = &pdev->dev;
+ const char *pname;
+ int num_routes, ssp_port, ret;
+
+ mach = dev_get_platdata(dev);
+ pname = mach->mach_params.platform;
+ ssp_port = __ffs(mach->mach_params.i2s_link_mask);
+
+ ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link);
+ if (ret) {
+ dev_err(dev, "Failed to create dai link: %d", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm routes: %d", ret);
+ return ret;
+ }
+
+ jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL);
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!jack || !card)
+ return -ENOMEM;
+
+ card->name = "avs_rt274";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->remove = avs_card_remove;
+ card->suspend_pre = avs_card_suspend_pre;
+ card->resume_post = avs_card_resume_post;
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->controls = card_controls;
+ card->num_controls = ARRAY_SIZE(card_controls);
+ card->dapm_widgets = card_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(card_widgets);
+ card->dapm_routes = routes;
+ card->num_dapm_routes = num_routes;
+ card->fully_routed = true;
+ snd_soc_card_set_drvdata(card, jack);
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_rt274_driver = {
+ .probe = avs_rt274_probe,
+ .driver = {
+ .name = "avs_rt274",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_rt274_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_rt274");
diff --git a/sound/soc/intel/avs/boards/rt286.c b/sound/soc/intel/avs/boards/rt286.c
new file mode 100644
index 000000000000..e51d4e181274
--- /dev/null
+++ b/sound/soc/intel/avs/boards/rt286.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../../codecs/rt286.h"
+
+static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+};
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+};
+
+static const struct snd_soc_dapm_route card_base_routes[] = {
+ /* HP jack connectors - unknown if we have jack detect */
+ {"Headphone Jack", NULL, "HPO Pin"},
+ {"MIC1", NULL, "Mic Jack"},
+
+ {"Speaker", NULL, "SPOR"},
+ {"Speaker", NULL, "SPOL"},
+};
+
+static struct snd_soc_jack_pin card_headset_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Mic Jack",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static int avs_rt286_codec_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
+ struct snd_soc_jack_pin *pins;
+ struct snd_soc_jack *jack;
+ struct snd_soc_card *card = runtime->card;
+ int num_pins, ret;
+
+ jack = snd_soc_card_get_drvdata(card);
+ num_pins = ARRAY_SIZE(card_headset_pins);
+
+ pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0, jack,
+ pins, num_pins);
+ if (ret)
+ return ret;
+
+ snd_soc_component_set_jack(component, jack, NULL);
+
+ return 0;
+}
+
+static int avs_rt286_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate, *channels;
+ struct snd_mask *fmt;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP0 to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+ return 0;
+}
+
+static int
+avs_rt286_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *runtime = substream->private_data;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0);
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT286_SCLK_S_PLL, 24000000, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(runtime->dev, "Set codec sysclk failed: %d\n", ret);
+
+ return ret;
+}
+
+static const struct snd_soc_ops avs_rt286_ops = {
+ .hw_params = avs_rt286_hw_params,
+};
+
+static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ struct snd_soc_dai_link **dai_link)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+
+ dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+
+ dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port);
+ dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL);
+ if (!dl->name || !dl->cpus || !dl->codecs)
+ return -ENOMEM;
+
+ dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port);
+ dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343A:00");
+ dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "rt286-aif1");
+ if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name)
+ return -ENOMEM;
+
+ dl->num_cpus = 1;
+ dl->num_codecs = 1;
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+ dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ dl->init = avs_rt286_codec_init;
+ dl->be_hw_params_fixup = avs_rt286_be_fixup;
+ dl->ops = &avs_rt286_ops;
+ dl->nonatomic = 1;
+ dl->no_pcm = 1;
+ dl->dpcm_capture = 1;
+ dl->dpcm_playback = 1;
+
+ *dai_link = dl;
+
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ const int num_base = ARRAY_SIZE(card_base_routes);
+ const int num_dr = num_base + 2;
+ int idx;
+
+ dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ memcpy(dr, card_base_routes, num_base * sizeof(*dr));
+
+ idx = num_base;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ idx++;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port);
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Capture");
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ *routes = dr;
+ *num_routes = num_dr;
+
+ return 0;
+}
+
+static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack)
+{
+ struct snd_soc_component *component;
+
+ for_each_card_components(card, component)
+ snd_soc_component_set_jack(component, jack, NULL);
+ return 0;
+}
+
+static int avs_card_remove(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_suspend_pre(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_resume_post(struct snd_soc_card *card)
+{
+ struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card);
+
+ return avs_card_set_jack(card, jack);
+}
+
+static int avs_rt286_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_dai_link *dai_link;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct snd_soc_jack *jack;
+ struct device *dev = &pdev->dev;
+ const char *pname;
+ int num_routes, ssp_port, ret;
+
+ mach = dev_get_platdata(dev);
+ pname = mach->mach_params.platform;
+ ssp_port = __ffs(mach->mach_params.i2s_link_mask);
+
+ ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link);
+ if (ret) {
+ dev_err(dev, "Failed to create dai link: %d", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm routes: %d", ret);
+ return ret;
+ }
+
+ jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL);
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!jack || !card)
+ return -ENOMEM;
+
+ card->name = "avs_rt286";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->remove = avs_card_remove;
+ card->suspend_pre = avs_card_suspend_pre;
+ card->resume_post = avs_card_resume_post;
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->controls = card_controls;
+ card->num_controls = ARRAY_SIZE(card_controls);
+ card->dapm_widgets = card_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(card_widgets);
+ card->dapm_routes = routes;
+ card->num_dapm_routes = num_routes;
+ card->fully_routed = true;
+ snd_soc_card_set_drvdata(card, jack);
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_rt286_driver = {
+ .probe = avs_rt286_probe,
+ .driver = {
+ .name = "avs_rt286",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_rt286_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_rt286");
diff --git a/sound/soc/intel/avs/boards/rt298.c b/sound/soc/intel/avs/boards/rt298.c
new file mode 100644
index 000000000000..b28d36872dcb
--- /dev/null
+++ b/sound/soc/intel/avs/boards/rt298.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../../codecs/rt298.h"
+
+static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+};
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+};
+
+static const struct snd_soc_dapm_route card_base_routes[] = {
+ /* HP jack connectors - unknown if we have jack detect */
+ {"Headphone Jack", NULL, "HPO Pin"},
+ {"MIC1", NULL, "Mic Jack"},
+
+ {"Speaker", NULL, "SPOR"},
+ {"Speaker", NULL, "SPOL"},
+};
+
+static struct snd_soc_jack_pin card_headset_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Mic Jack",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static int avs_rt298_codec_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
+ struct snd_soc_jack_pin *pins;
+ struct snd_soc_jack *jack;
+ struct snd_soc_card *card = runtime->card;
+ int num_pins, ret;
+
+ jack = snd_soc_card_get_drvdata(card);
+ num_pins = ARRAY_SIZE(card_headset_pins);
+
+ pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0, jack,
+ pins, num_pins);
+ if (ret)
+ return ret;
+
+ snd_soc_component_set_jack(component, jack, NULL);
+
+ return 0;
+}
+
+static int avs_rt298_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate, *channels;
+ struct snd_mask *fmt;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP0 to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+ return 0;
+}
+
+static int
+avs_rt298_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT298_SCLK_S_PLL, 19200000, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(rtd->dev, "Set codec sysclk failed: %d\n", ret);
+
+ return ret;
+}
+
+static const struct snd_soc_ops avs_rt298_ops = {
+ .hw_params = avs_rt298_hw_params,
+};
+
+static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ struct snd_soc_dai_link **dai_link)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+
+ dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+
+ dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port);
+ dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL);
+ if (!dl->name || !dl->cpus || !dl->codecs)
+ return -ENOMEM;
+
+ dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port);
+ dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343A:00");
+ dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "rt298-aif1");
+ if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name)
+ return -ENOMEM;
+
+ dl->num_cpus = 1;
+ dl->num_codecs = 1;
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+ dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ dl->init = avs_rt298_codec_init;
+ dl->be_hw_params_fixup = avs_rt298_be_fixup;
+ dl->ops = &avs_rt298_ops;
+ dl->nonatomic = 1;
+ dl->no_pcm = 1;
+ dl->dpcm_capture = 1;
+ dl->dpcm_playback = 1;
+
+ *dai_link = dl;
+
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ const int num_base = ARRAY_SIZE(card_base_routes);
+ const int num_dr = num_base + 2;
+ int idx;
+
+ dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ memcpy(dr, card_base_routes, num_base * sizeof(*dr));
+
+ idx = num_base;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ idx++;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port);
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Capture");
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ *routes = dr;
+ *num_routes = num_dr;
+
+ return 0;
+}
+
+static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack)
+{
+ struct snd_soc_component *component;
+
+ for_each_card_components(card, component)
+ snd_soc_component_set_jack(component, jack, NULL);
+ return 0;
+}
+
+static int avs_card_remove(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_suspend_pre(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_resume_post(struct snd_soc_card *card)
+{
+ struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card);
+
+ return avs_card_set_jack(card, jack);
+}
+
+static int avs_rt298_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_dai_link *dai_link;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct snd_soc_jack *jack;
+ struct device *dev = &pdev->dev;
+ const char *pname;
+ int num_routes, ssp_port, ret;
+
+ mach = dev_get_platdata(dev);
+ pname = mach->mach_params.platform;
+ ssp_port = __ffs(mach->mach_params.i2s_link_mask);
+
+ ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link);
+ if (ret) {
+ dev_err(dev, "Failed to create dai link: %d", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm routes: %d", ret);
+ return ret;
+ }
+
+ jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL);
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!jack || !card)
+ return -ENOMEM;
+
+ card->name = "avs_rt298";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->remove = avs_card_remove;
+ card->suspend_pre = avs_card_suspend_pre;
+ card->resume_post = avs_card_resume_post;
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->controls = card_controls;
+ card->num_controls = ARRAY_SIZE(card_controls);
+ card->dapm_widgets = card_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(card_widgets);
+ card->dapm_routes = routes;
+ card->num_dapm_routes = num_routes;
+ card->fully_routed = true;
+ snd_soc_card_set_drvdata(card, jack);
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_rt298_driver = {
+ .probe = avs_rt298_probe,
+ .driver = {
+ .name = "avs_rt298",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_rt298_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_rt298");
diff --git a/sound/soc/intel/avs/boards/rt5682.c b/sound/soc/intel/avs/boards/rt5682.c
new file mode 100644
index 000000000000..01f9b9f0c12b
--- /dev/null
+++ b/sound/soc/intel/avs/boards/rt5682.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/clk.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/rt5682.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../common/soc-intel-quirks.h"
+#include "../../../codecs/rt5682.h"
+
+#define AVS_RT5682_SSP_CODEC(quirk) ((quirk) & GENMASK(2, 0))
+#define AVS_RT5682_SSP_CODEC_MASK (GENMASK(2, 0))
+#define AVS_RT5682_MCLK_EN BIT(3)
+#define AVS_RT5682_MCLK_24MHZ BIT(4)
+
+/* Default: MCLK on, MCLK 19.2M, SSP0 */
+static unsigned long avs_rt5682_quirk = AVS_RT5682_MCLK_EN | AVS_RT5682_SSP_CODEC(0);
+
+static int avs_rt5682_quirk_cb(const struct dmi_system_id *id)
+{
+ avs_rt5682_quirk = (unsigned long)id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id avs_rt5682_quirk_table[] = {
+ {
+ .callback = avs_rt5682_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "WhiskeyLake Client"),
+ },
+ .driver_data = (void *)(AVS_RT5682_MCLK_EN |
+ AVS_RT5682_MCLK_24MHZ |
+ AVS_RT5682_SSP_CODEC(1)),
+ },
+ {
+ .callback = avs_rt5682_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client"),
+ },
+ .driver_data = (void *)(AVS_RT5682_MCLK_EN |
+ AVS_RT5682_SSP_CODEC(0)),
+ },
+ {}
+};
+
+static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route card_base_routes[] = {
+ /* HP jack connectors - unknown if we have jack detect */
+ { "Headphone Jack", NULL, "HPOL" },
+ { "Headphone Jack", NULL, "HPOR" },
+
+ /* other jacks */
+ { "IN1P", NULL, "Headset Mic" },
+};
+
+static int avs_rt5682_codec_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
+ struct snd_soc_jack *jack;
+ struct snd_soc_card *card = runtime->card;
+ int ret;
+
+ jack = snd_soc_card_get_drvdata(card);
+
+ /* Need to enable ASRC function for 24MHz mclk rate */
+ if ((avs_rt5682_quirk & AVS_RT5682_MCLK_EN) &&
+ (avs_rt5682_quirk & AVS_RT5682_MCLK_24MHZ)) {
+ rt5682_sel_asrc_clk_src(component, RT5682_DA_STEREO1_FILTER |
+ RT5682_AD_STEREO1_FILTER, RT5682_CLK_SEL_I2S1_ASRC);
+ }
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3, jack);
+ if (ret) {
+ dev_err(card->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+
+ ret = snd_soc_component_set_jack(component, jack, NULL);
+ if (ret) {
+ dev_err(card->dev, "Headset Jack call-back failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+};
+
+static int
+avs_rt5682_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *runtime = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0);
+ int clk_id, clk_freq;
+ int pll_out, ret;
+
+ if (avs_rt5682_quirk & AVS_RT5682_MCLK_EN) {
+ clk_id = RT5682_PLL1_S_MCLK;
+ if (avs_rt5682_quirk & AVS_RT5682_MCLK_24MHZ)
+ clk_freq = 24000000;
+ else
+ clk_freq = 19200000;
+ } else {
+ clk_id = RT5682_PLL1_S_BCLK1;
+ clk_freq = params_rate(params) * 50;
+ }
+
+ pll_out = params_rate(params) * 512;
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0, clk_id, clk_freq, pll_out);
+ if (ret < 0)
+ dev_err(runtime->dev, "snd_soc_dai_set_pll err = %d\n", ret);
+
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1, pll_out, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(runtime->dev, "snd_soc_dai_set_sysclk err = %d\n", ret);
+
+ /* slot_width should equal or large than data length, set them be the same */
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x0, 0x0, 2, params_width(params));
+ if (ret < 0) {
+ dev_err(runtime->dev, "set TDM slot err:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_ops avs_rt5682_ops = {
+ .hw_params = avs_rt5682_hw_params,
+};
+
+static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ struct snd_soc_dai_link **dai_link)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+
+ dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+
+ dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port);
+ dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL);
+ if (!dl->name || !dl->cpus || !dl->codecs)
+ return -ENOMEM;
+
+ dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port);
+ dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-10EC5682:00");
+ dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "rt5682-aif1");
+ if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name)
+ return -ENOMEM;
+
+ dl->num_cpus = 1;
+ dl->num_codecs = 1;
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+ dl->init = avs_rt5682_codec_init;
+ dl->ops = &avs_rt5682_ops;
+ dl->nonatomic = 1;
+ dl->no_pcm = 1;
+ dl->dpcm_capture = 1;
+ dl->dpcm_playback = 1;
+
+ *dai_link = dl;
+
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ const int num_base = ARRAY_SIZE(card_base_routes);
+ const int num_dr = num_base + 2;
+ int idx;
+
+ dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ memcpy(dr, card_base_routes, num_base * sizeof(*dr));
+
+ idx = num_base;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ idx++;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port);
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "AIF1 Capture");
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ *routes = dr;
+ *num_routes = num_dr;
+
+ return 0;
+}
+
+static int avs_card_set_jack(struct snd_soc_card *card, struct snd_soc_jack *jack)
+{
+ struct snd_soc_component *component;
+
+ for_each_card_components(card, component)
+ snd_soc_component_set_jack(component, jack, NULL);
+ return 0;
+}
+
+static int avs_card_remove(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_suspend_pre(struct snd_soc_card *card)
+{
+ return avs_card_set_jack(card, NULL);
+}
+
+static int avs_card_resume_post(struct snd_soc_card *card)
+{
+ struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card);
+
+ return avs_card_set_jack(card, jack);
+}
+
+static int avs_rt5682_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_dai_link *dai_link;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct snd_soc_jack *jack;
+ struct device *dev = &pdev->dev;
+ const char *pname;
+ int num_routes, ssp_port, ret;
+
+ if (pdev->id_entry && pdev->id_entry->driver_data)
+ avs_rt5682_quirk = (unsigned long)pdev->id_entry->driver_data;
+
+ dmi_check_system(avs_rt5682_quirk_table);
+ dev_dbg(dev, "avs_rt5682_quirk = %lx\n", avs_rt5682_quirk);
+
+ mach = dev_get_platdata(dev);
+ pname = mach->mach_params.platform;
+ ssp_port = __ffs(mach->mach_params.i2s_link_mask);
+
+ ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link);
+ if (ret) {
+ dev_err(dev, "Failed to create dai link: %d", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm routes: %d", ret);
+ return ret;
+ }
+
+ jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL);
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!jack || !card)
+ return -ENOMEM;
+
+ card->name = "avs_rt5682";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->remove = avs_card_remove;
+ card->suspend_pre = avs_card_suspend_pre;
+ card->resume_post = avs_card_resume_post;
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->controls = card_controls;
+ card->num_controls = ARRAY_SIZE(card_controls);
+ card->dapm_widgets = card_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(card_widgets);
+ card->dapm_routes = routes;
+ card->num_dapm_routes = num_routes;
+ card->fully_routed = true;
+ snd_soc_card_set_drvdata(card, jack);
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_rt5682_driver = {
+ .probe = avs_rt5682_probe,
+ .driver = {
+ .name = "avs_rt5682",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_rt5682_driver)
+
+MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_rt5682");
diff --git a/sound/soc/intel/avs/boards/ssm4567.c b/sound/soc/intel/avs/boards/ssm4567.c
new file mode 100644
index 000000000000..9f84c8ab3447
--- /dev/null
+++ b/sound/soc/intel/avs/boards/ssm4567.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
+//
+// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../../codecs/nau8825.h"
+
+#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi"
+#define SKL_SSM_CODEC_DAI "ssm4567-hifi"
+
+static struct snd_soc_codec_conf card_codec_conf[] = {
+ {
+ .dlc = COMP_CODEC_CONF("i2c-INT343B:00"),
+ .name_prefix = "Left",
+ },
+ {
+ .dlc = COMP_CODEC_CONF("i2c-INT343B:01"),
+ .name_prefix = "Right",
+ },
+};
+
+static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Left Speaker"),
+ SOC_DAPM_PIN_SWITCH("Right Speaker"),
+};
+
+static int
+platform_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ int ret;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found\n");
+ return -EINVAL;
+ }
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(card->dev, "set sysclk err = %d\n", ret);
+ } else {
+ ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(card->dev, "set sysclk err = %d\n", ret);
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_SPK("Left Speaker", NULL),
+ SND_SOC_DAPM_SPK("Right Speaker", NULL),
+ SND_SOC_DAPM_SPK("DP1", NULL),
+ SND_SOC_DAPM_SPK("DP2", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route card_base_routes[] = {
+ {"Left Speaker", NULL, "Left OUT"},
+ {"Right Speaker", NULL, "Right OUT"},
+};
+
+static int avs_ssm4567_codec_init(struct snd_soc_pcm_runtime *runtime)
+{
+ int ret;
+
+ /* Slot 1 for left */
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(runtime, 0), 0x01, 0x01, 2, 48);
+ if (ret < 0)
+ return ret;
+
+ /* Slot 2 for right */
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(runtime, 1), 0x02, 0x02, 2, 48);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+avs_ssm4567_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate, *channels;
+ struct snd_mask *fmt;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will covert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP0 to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ return 0;
+}
+
+static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ struct snd_soc_dai_link **dai_link)
+{
+ struct snd_soc_dai_link_component *platform;
+ struct snd_soc_dai_link *dl;
+
+ dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL);
+ platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
+ if (!dl || !platform)
+ return -ENOMEM;
+
+ platform->name = platform_name;
+
+ dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port);
+ dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL);
+ dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs) * 2, GFP_KERNEL);
+ if (!dl->name || !dl->cpus || !dl->codecs)
+ return -ENOMEM;
+
+ dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port);
+ dl->codecs[0].name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343B:00");
+ dl->codecs[0].dai_name = devm_kasprintf(dev, GFP_KERNEL, "ssm4567-hifi");
+ dl->codecs[1].name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343B:01");
+ dl->codecs[1].dai_name = devm_kasprintf(dev, GFP_KERNEL, "ssm4567-hifi");
+ if (!dl->cpus->dai_name || !dl->codecs[0].name || !dl->codecs[0].dai_name ||
+ !dl->codecs[1].name || !dl->codecs[1].dai_name)
+ return -ENOMEM;
+
+ dl->num_cpus = 1;
+ dl->num_codecs = 2;
+ dl->platforms = platform;
+ dl->num_platforms = 1;
+ dl->id = 0;
+ dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ dl->init = avs_ssm4567_codec_init;
+ dl->be_hw_params_fixup = avs_ssm4567_be_fixup;
+ dl->nonatomic = 1;
+ dl->no_pcm = 1;
+ dl->dpcm_capture = 1;
+ dl->dpcm_playback = 1;
+ dl->ignore_pmdown_time = 1;
+
+ *dai_link = dl;
+
+ return 0;
+}
+
+static int avs_create_dapm_routes(struct device *dev, int ssp_port,
+ struct snd_soc_dapm_route **routes, int *num_routes)
+{
+ struct snd_soc_dapm_route *dr;
+ const int num_base = ARRAY_SIZE(card_base_routes);
+ const int num_dr = num_base + 4;
+ int idx;
+
+ dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ memcpy(dr, card_base_routes, num_base * sizeof(*dr));
+
+ idx = num_base;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Left Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ idx++;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "Right Playback");
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port);
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ idx++;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port);
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "Left Capture Sense");
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ idx++;
+ dr[idx].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port);
+ dr[idx].source = devm_kasprintf(dev, GFP_KERNEL, "Right Capture Sense");
+ if (!dr[idx].sink || !dr[idx].source)
+ return -ENOMEM;
+
+ *routes = dr;
+ *num_routes = num_dr;
+
+ return 0;
+}
+
+static int avs_ssm4567_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dapm_route *routes;
+ struct snd_soc_dai_link *dai_link;
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card;
+ struct device *dev = &pdev->dev;
+ const char *pname;
+ int num_routes, ssp_port, ret;
+
+ mach = dev_get_platdata(dev);
+ pname = mach->mach_params.platform;
+ ssp_port = __ffs(mach->mach_params.i2s_link_mask);
+
+ ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link);
+ if (ret) {
+ dev_err(dev, "Failed to create dai link: %d", ret);
+ return ret;
+ }
+
+ ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes);
+ if (ret) {
+ dev_err(dev, "Failed to create dapm routes: %d", ret);
+ return ret;
+ }
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->name = "avs_ssm4567-adi";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->dai_link = dai_link;
+ card->num_links = 1;
+ card->codec_conf = card_codec_conf;
+ card->num_configs = ARRAY_SIZE(card_codec_conf);
+ card->controls = card_controls;
+ card->num_controls = ARRAY_SIZE(card_controls);
+ card->dapm_widgets = card_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(card_widgets);
+ card->dapm_routes = routes;
+ card->num_dapm_routes = num_routes;
+ card->fully_routed = true;
+ card->disable_route_checks = true;
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, card);
+}
+
+static struct platform_driver avs_ssm4567_driver = {
+ .probe = avs_ssm4567_probe,
+ .driver = {
+ .name = "avs_ssm4567",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(avs_ssm4567_driver)
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:avs_ssm4567");
diff --git a/sound/soc/intel/avs/cldma.c b/sound/soc/intel/avs/cldma.c
index d100c6ba4d8a..d7a9390b5e48 100644
--- a/sound/soc/intel/avs/cldma.c
+++ b/sound/soc/intel/avs/cldma.c
@@ -176,17 +176,17 @@ int hda_cldma_reset(struct hda_cldma *cl)
return ret;
}
- snd_hdac_stream_updateb(cl, SD_CTL, 1, 1);
- ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, (reg & 1), AVS_CL_OP_INTERVAL_US,
- AVS_CL_OP_TIMEOUT_US);
+ snd_hdac_stream_updateb(cl, SD_CTL, SD_CTL_STREAM_RESET, SD_CTL_STREAM_RESET);
+ ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, (reg & SD_CTL_STREAM_RESET),
+ AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US);
if (ret < 0) {
dev_err(cl->dev, "cldma set SRST failed: %d\n", ret);
return ret;
}
- snd_hdac_stream_updateb(cl, SD_CTL, 1, 0);
- ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & 1), AVS_CL_OP_INTERVAL_US,
- AVS_CL_OP_TIMEOUT_US);
+ snd_hdac_stream_updateb(cl, SD_CTL, SD_CTL_STREAM_RESET, 0);
+ ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & SD_CTL_STREAM_RESET),
+ AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US);
if (ret < 0) {
dev_err(cl->dev, "cldma unset SRST failed: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
index 3a0997c3af2b..c50c20fd681a 100644
--- a/sound/soc/intel/avs/core.c
+++ b/sound/soc/intel/avs/core.c
@@ -23,6 +23,7 @@
#include <sound/hdaudio_ext.h>
#include <sound/intel-dsp-config.h>
#include <sound/intel-nhlt.h>
+#include "../../codecs/hda.h"
#include "avs.h"
#include "cldma.h"
@@ -356,7 +357,7 @@ static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct
struct device *dev = &pci->dev;
int ret;
- ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, NULL);
+ ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
if (ret < 0)
return ret;
@@ -439,12 +440,9 @@ static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (bus->mlcap)
snd_hdac_ext_bus_get_ml_capabilities(bus);
- if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
- dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
- } else {
- dma_set_mask(dev, DMA_BIT_MASK(32));
- dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
- }
+ if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(dev, UINT_MAX);
ret = avs_hdac_bus_init_streams(bus);
if (ret < 0) {
@@ -555,6 +553,7 @@ static int __maybe_unused avs_suspend_common(struct avs_dev *adev)
return AVS_IPC_RET(ret);
}
+ avs_ipc_block(adev->ipc);
avs_dsp_op(adev, int_control, false);
snd_hdac_ext_bus_ppcap_int_enable(bus, false);
diff --git a/sound/soc/intel/avs/dsp.c b/sound/soc/intel/avs/dsp.c
index 06d2f7af520f..b881100d3e02 100644
--- a/sound/soc/intel/avs/dsp.c
+++ b/sound/soc/intel/avs/dsp.c
@@ -13,6 +13,7 @@
#define AVS_ADSPCS_INTERVAL_US 500
#define AVS_ADSPCS_TIMEOUT_US 50000
+#define AVS_ADSPCS_DELAY_US 1000
int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power)
{
@@ -26,6 +27,8 @@ int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power)
value = power ? mask : 0;
snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
+ /* Delay the polling to avoid false positives. */
+ usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US);
mask = AVS_ADSPCS_CPA_MASK(core_mask);
value = power ? mask : 0;
@@ -82,11 +85,15 @@ int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall)
reg, (reg & mask) == value,
AVS_ADSPCS_INTERVAL_US,
AVS_ADSPCS_TIMEOUT_US);
- if (ret)
+ if (ret) {
dev_err(adev->dev, "core_mask %d %sstall failed: %d\n",
core_mask, stall ? "" : "un", ret);
+ return ret;
+ }
- return ret;
+ /* Give HW time to propagate the change. */
+ usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US);
+ return 0;
}
int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask)
diff --git a/sound/soc/intel/avs/ipc.c b/sound/soc/intel/avs/ipc.c
index d755ba8b8518..020d85c7520d 100644
--- a/sound/soc/intel/avs/ipc.c
+++ b/sound/soc/intel/avs/ipc.c
@@ -480,6 +480,7 @@ static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request
ret = ipc->rx.rsp.status;
if (reply) {
reply->header = ipc->rx.header;
+ reply->size = ipc->rx.size;
if (reply->data && ipc->rx.size)
memcpy(reply->data, ipc->rx.data, reply->size);
}
diff --git a/sound/soc/intel/avs/loader.c b/sound/soc/intel/avs/loader.c
index 542fd44aa501..9e3f8ff33a87 100644
--- a/sound/soc/intel/avs/loader.c
+++ b/sound/soc/intel/avs/loader.c
@@ -27,8 +27,8 @@
#define APL_ROM_INIT_RETRIES 3
#define AVS_FW_INIT_POLLING_US 500
-#define AVS_FW_INIT_TIMEOUT_US 3000000
#define AVS_FW_INIT_TIMEOUT_MS 3000
+#define AVS_FW_INIT_TIMEOUT_US (AVS_FW_INIT_TIMEOUT_MS * 1000)
#define AVS_CLDMA_START_DELAY_MS 100
diff --git a/sound/soc/intel/avs/messages.c b/sound/soc/intel/avs/messages.c
index 6404fce8cde4..d4bcee1aabcf 100644
--- a/sound/soc/intel/avs/messages.c
+++ b/sound/soc/intel/avs/messages.c
@@ -59,7 +59,7 @@ int avs_ipc_unload_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids)
request.data = mod_ids;
request.size = sizeof(*mod_ids) * num_mod_ids;
- ret = avs_dsp_send_msg_timeout(adev, &request, NULL, AVS_CL_TIMEOUT_MS);
+ ret = avs_dsp_send_msg(adev, &request, NULL);
if (ret)
avs_ipc_err(adev, &request, "unload multiple modules", ret);
@@ -378,7 +378,6 @@ int avs_ipc_get_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id
union avs_module_msg msg = AVS_MODULE_REQUEST(LARGE_CONFIG_GET);
struct avs_ipc_msg request;
struct avs_ipc_msg reply = {{0}};
- size_t size;
void *buf;
int ret;
@@ -406,15 +405,14 @@ int avs_ipc_get_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id
return ret;
}
- size = reply.rsp.ext.large_config.data_off_size;
- buf = krealloc(reply.data, size, GFP_KERNEL);
+ buf = krealloc(reply.data, reply.size, GFP_KERNEL);
if (!buf) {
kfree(reply.data);
return -ENOMEM;
}
*reply_data = buf;
- *reply_size = size;
+ *reply_size = reply.size;
return 0;
}
@@ -476,6 +474,9 @@ int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg)
&payload, &payload_size);
if (ret)
return ret;
+ /* Non-zero payload expected for FIRMWARE_CONFIG. */
+ if (!payload_size)
+ return -EREMOTEIO;
while (offset < payload_size) {
tlv = (struct avs_tlv *)(payload + offset);
@@ -561,6 +562,7 @@ int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg)
case AVS_FW_CFG_DMA_BUFFER_CONFIG:
case AVS_FW_CFG_SCHEDULER_CONFIG:
case AVS_FW_CFG_CLOCKS_CONFIG:
+ case AVS_FW_CFG_RESERVED:
break;
default:
@@ -589,6 +591,9 @@ int avs_ipc_get_hw_config(struct avs_dev *adev, struct avs_hw_cfg *cfg)
&payload, &payload_size);
if (ret)
return ret;
+ /* Non-zero payload expected for HARDWARE_CONFIG. */
+ if (!payload_size)
+ return -EREMOTEIO;
while (offset < payload_size) {
tlv = (struct avs_tlv *)(payload + offset);
@@ -672,6 +677,9 @@ int avs_ipc_get_modules_info(struct avs_dev *adev, struct avs_mods_info **info)
&payload, &payload_size);
if (ret)
return ret;
+ /* Non-zero payload expected for MODULES_INFO. */
+ if (!payload_size)
+ return -EREMOTEIO;
*info = (struct avs_mods_info *)payload;
return 0;
diff --git a/sound/soc/intel/avs/path.c b/sound/soc/intel/avs/path.c
index 3d46dd5e5bc4..ce157a8d6552 100644
--- a/sound/soc/intel/avs/path.c
+++ b/sound/soc/intel/avs/path.c
@@ -449,35 +449,39 @@ static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod)
return ret;
}
+static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod)
+{
+ dev_err(adev->dev, "Probe module can't be instantiated by topology");
+ return -EINVAL;
+}
+
+struct avs_module_create {
+ guid_t *guid;
+ int (*create)(struct avs_dev *adev, struct avs_path_module *mod);
+};
+
+static struct avs_module_create avs_module_create[] = {
+ { &AVS_MIXIN_MOD_UUID, avs_modbase_create },
+ { &AVS_MIXOUT_MOD_UUID, avs_modbase_create },
+ { &AVS_KPBUFF_MOD_UUID, avs_modbase_create },
+ { &AVS_COPIER_MOD_UUID, avs_copier_create },
+ { &AVS_MICSEL_MOD_UUID, avs_micsel_create },
+ { &AVS_MUX_MOD_UUID, avs_mux_create },
+ { &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create },
+ { &AVS_SRCINTC_MOD_UUID, avs_src_create },
+ { &AVS_AEC_MOD_UUID, avs_aec_create },
+ { &AVS_ASRC_MOD_UUID, avs_asrc_create },
+ { &AVS_INTELWOV_MOD_UUID, avs_wov_create },
+ { &AVS_PROBE_MOD_UUID, avs_probe_create },
+};
+
static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod)
{
const guid_t *type = &mod->template->cfg_ext->type;
- if (guid_equal(type, &AVS_MIXIN_MOD_UUID) ||
- guid_equal(type, &AVS_MIXOUT_MOD_UUID) ||
- guid_equal(type, &AVS_KPBUFF_MOD_UUID))
- return avs_modbase_create(adev, mod);
- if (guid_equal(type, &AVS_COPIER_MOD_UUID))
- return avs_copier_create(adev, mod);
- if (guid_equal(type, &AVS_MICSEL_MOD_UUID))
- return avs_micsel_create(adev, mod);
- if (guid_equal(type, &AVS_MUX_MOD_UUID))
- return avs_mux_create(adev, mod);
- if (guid_equal(type, &AVS_UPDWMIX_MOD_UUID))
- return avs_updown_mix_create(adev, mod);
- if (guid_equal(type, &AVS_SRCINTC_MOD_UUID))
- return avs_src_create(adev, mod);
- if (guid_equal(type, &AVS_AEC_MOD_UUID))
- return avs_aec_create(adev, mod);
- if (guid_equal(type, &AVS_ASRC_MOD_UUID))
- return avs_asrc_create(adev, mod);
- if (guid_equal(type, &AVS_INTELWOV_MOD_UUID))
- return avs_wov_create(adev, mod);
-
- if (guid_equal(type, &AVS_PROBE_MOD_UUID)) {
- dev_err(adev->dev, "Probe module can't be instantiated by topology");
- return -EINVAL;
- }
+ for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++)
+ if (guid_equal(type, avs_module_create[i].guid))
+ return avs_module_create[i].create(adev, mod);
return avs_modext_create(adev, mod);
}
diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
index 668f533578a6..8fe5917b1e26 100644
--- a/sound/soc/intel/avs/pcm.c
+++ b/sound/soc/intel/avs/pcm.c
@@ -636,8 +636,8 @@ static ssize_t topology_name_read(struct file *file, char __user *user_buf, size
char buf[64];
size_t len;
- len = snprintf(buf, sizeof(buf), "%s/%s\n", component->driver->topology_name_prefix,
- mach->tplg_filename);
+ len = scnprintf(buf, sizeof(buf), "%s/%s\n", component->driver->topology_name_prefix,
+ mach->tplg_filename);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -846,7 +846,6 @@ static const struct snd_soc_component_driver avs_component_driver = {
.pcm_construct = avs_component_construct,
.module_get_upon_open = 1, /* increment refcount when a pcm is opened */
.topology_name_prefix = "intel/avs",
- .non_legacy_dai_naming = true,
};
static int avs_soc_component_register(struct device *dev, const char *name,
@@ -1172,7 +1171,6 @@ static const struct snd_soc_component_driver avs_hda_component_driver = {
.remove_order = SND_SOC_COMP_ORDER_EARLY,
.module_get_upon_open = 1,
.topology_name_prefix = "intel/avs",
- .non_legacy_dai_naming = true,
};
int avs_hda_platform_register(struct avs_dev *adev, const char *name)
diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c
index 6a06fe387d13..8a9f9fc48938 100644
--- a/sound/soc/intel/avs/topology.c
+++ b/sound/soc/intel/avs/topology.c
@@ -808,6 +808,30 @@ static const struct avs_tplg_token_parser pin_format_parsers[] = {
},
};
+static void
+assign_copier_gtw_instance(struct snd_soc_component *comp, struct avs_tplg_modcfg_ext *cfg)
+{
+ struct snd_soc_acpi_mach *mach;
+
+ if (!guid_equal(&cfg->type, &AVS_COPIER_MOD_UUID))
+ return;
+
+ /* Only I2S boards assign port instance in ->i2s_link_mask. */
+ switch (cfg->copier.dma_type) {
+ case AVS_DMA_I2S_LINK_OUTPUT:
+ case AVS_DMA_I2S_LINK_INPUT:
+ break;
+ default:
+ return;
+ }
+
+ mach = dev_get_platdata(comp->card->dev);
+
+ /* Automatic assignment only when board describes single SSP. */
+ if (hweight_long(mach->mach_params.i2s_link_mask) == 1 && !cfg->copier.vindex.i2s.instance)
+ cfg->copier.vindex.i2s.instance = __ffs(mach->mach_params.i2s_link_mask);
+}
+
static int avs_tplg_parse_modcfg_ext(struct snd_soc_component *comp,
struct avs_tplg_modcfg_ext *cfg,
struct snd_soc_tplg_vendor_array *tuples,
@@ -827,6 +851,9 @@ static int avs_tplg_parse_modcfg_ext(struct snd_soc_component *comp,
if (ret)
return ret;
+ /* Update copier gateway based on board's i2s_link_mask. */
+ assign_copier_gtw_instance(comp, cfg);
+
block_size -= esize;
/* Parse trailing in/out pin formats if any. */
if (block_size) {
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index f3873b5bea87..aa12d7e3dd2f 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -41,7 +41,7 @@ config SND_SOC_INTEL_SOF_CIRRUS_COMMON
if SND_SOC_INTEL_CATPT
config SND_SOC_INTEL_HASWELL_MACH
- tristate "Haswell Lynxpoint"
+ tristate "Haswell with RT5640 I2S codec"
depends on I2C
depends on I2C_DESIGNWARE_PLATFORM || COMPILE_TEST
depends on X86_INTEL_LPSS || COMPILE_TEST
@@ -85,7 +85,7 @@ config SND_SOC_INTEL_BDW_RT5677_MACH
If unsure select "N".
config SND_SOC_INTEL_BROADWELL_MACH
- tristate "Broadwell Wildcatpoint"
+ tristate "Broadwell with RT286 I2S codec"
depends on I2C
depends on I2C_DESIGNWARE_PLATFORM || COMPILE_TEST
depends on X86_INTEL_LPSS || COMPILE_TEST
@@ -660,7 +660,6 @@ config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
depends on MFD_INTEL_LPSS || COMPILE_TEST
depends on SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES || COMPILE_TEST
depends on SOUNDWIRE
- depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
select SND_SOC_MAX98373_I2C
select SND_SOC_MAX98373_SDW
select SND_SOC_RT700_SDW
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 40c0c3d1c500..eea1e26acfda 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-snd-soc-sst-haswell-objs := haswell.o
+snd-soc-sst-haswell-objs := hsw_rt5640.o
snd-soc-sst-bdw-rt5650-mach-objs := bdw-rt5650.o
snd-soc-sst-bdw-rt5677-mach-objs := bdw-rt5677.o
-snd-soc-sst-broadwell-objs := broadwell.o
+snd-soc-sst-broadwell-objs := bdw_rt286.o
snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o
snd-soc-sst-bxt-rt298-objs := bxt_rt298.o
snd-soc-sst-sof-pcm512x-objs := sof_pcm512x.o
diff --git a/sound/soc/intel/boards/bdw-rt5650.c b/sound/soc/intel/boards/bdw-rt5650.c
index aae857fdcdb8..67c3f49b924c 100644
--- a/sound/soc/intel/boards/bdw-rt5650.c
+++ b/sound/soc/intel/boards/bdw-rt5650.c
@@ -249,6 +249,7 @@ static struct snd_soc_dai_link bdw_rt5650_dais[] = {
/* SSP0 - Codec */
.name = "Codec",
.id = 0,
+ .nonatomic = 1,
.no_pcm = 1,
.dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBC_CFC,
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index d0ecbba2febe..31488702768e 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -349,6 +349,7 @@ static struct snd_soc_dai_link bdw_rt5677_dais[] = {
/* SSP0 - Codec */
.name = "Codec",
.id = 0,
+ .nonatomic = 1,
.no_pcm = 1,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBC_CFC,
diff --git a/sound/soc/intel/boards/bdw_rt286.c b/sound/soc/intel/boards/bdw_rt286.c
new file mode 100644
index 000000000000..6b76df0e7c9b
--- /dev/null
+++ b/sound/soc/intel/boards/bdw_rt286.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sound card driver for Intel Broadwell Wildcat Point with Realtek 286
+ *
+ * Copyright (C) 2013, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../codecs/rt286.h"
+
+static struct snd_soc_jack card_headset;
+
+static struct snd_soc_jack_pin card_headset_pins[] = {
+ {
+ .pin = "Mic Jack",
+ .mask = SND_JACK_MICROPHONE,
+ },
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+};
+
+static const struct snd_kcontrol_new card_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+};
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_MIC("DMIC1", NULL),
+ SND_SOC_DAPM_MIC("DMIC2", NULL),
+ SND_SOC_DAPM_LINE("Line Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route card_routes[] = {
+ {"Speaker", NULL, "SPOR"},
+ {"Speaker", NULL, "SPOL"},
+
+ {"Headphone Jack", NULL, "HPO Pin"},
+
+ {"MIC1", NULL, "Mic Jack"},
+ {"LINE1", NULL, "Line Jack"},
+
+ {"DMIC1 Pin", NULL, "DMIC1"},
+ {"DMIC2 Pin", NULL, "DMIC2"},
+
+ /* CODEC BE connections */
+ {"SSP0 CODEC IN", NULL, "AIF1 Capture"},
+ {"AIF1 Playback", NULL, "SSP0 CODEC OUT"},
+};
+
+static int codec_link_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_component *codec = asoc_rtd_to_codec(rtd, 0)->component;
+ int ret;
+
+ ret = snd_soc_card_jack_new_pins(rtd->card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0,
+ &card_headset, card_headset_pins,
+ ARRAY_SIZE(card_headset_pins));
+ if (ret)
+ return ret;
+
+ return snd_soc_component_set_jack(codec, &card_headset, NULL);
+}
+
+static int codec_link_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+
+ /* The ADSP will convert the FE rate to 48kHz, stereo. */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+ /* Set SSP0 to 16 bit. */
+ params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+}
+
+static int codec_link_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT286_SCLK_S_PLL, 24000000, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "set codec sysclk failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_ops codec_link_ops = {
+ .hw_params = codec_link_hw_params,
+};
+
+SND_SOC_DAILINK_DEF(system, DAILINK_COMP_ARRAY(COMP_CPU("System Pin")));
+SND_SOC_DAILINK_DEF(offload0, DAILINK_COMP_ARRAY(COMP_CPU("Offload0 Pin")));
+SND_SOC_DAILINK_DEF(offload1, DAILINK_COMP_ARRAY(COMP_CPU("Offload1 Pin")));
+SND_SOC_DAILINK_DEF(loopback, DAILINK_COMP_ARRAY(COMP_CPU("Loopback Pin")));
+
+SND_SOC_DAILINK_DEF(dummy, DAILINK_COMP_ARRAY(COMP_DUMMY()));
+SND_SOC_DAILINK_DEF(platform, DAILINK_COMP_ARRAY(COMP_PLATFORM("haswell-pcm-audio")));
+SND_SOC_DAILINK_DEF(codec, DAILINK_COMP_ARRAY(COMP_CODEC("i2c-INT343A:00", "rt286-aif1")));
+SND_SOC_DAILINK_DEF(ssp0_port, DAILINK_COMP_ARRAY(COMP_CPU("ssp0-port")));
+
+static struct snd_soc_dai_link card_dai_links[] = {
+ /* Front End DAI links */
+ {
+ .name = "System PCM",
+ .stream_name = "System Playback/Capture",
+ .nonatomic = 1,
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(system, dummy, platform),
+ },
+ {
+ .name = "Offload0",
+ .stream_name = "Offload0 Playback",
+ .nonatomic = 1,
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(offload0, dummy, platform),
+ },
+ {
+ .name = "Offload1",
+ .stream_name = "Offload1 Playback",
+ .nonatomic = 1,
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(offload1, dummy, platform),
+ },
+ {
+ .name = "Loopback PCM",
+ .stream_name = "Loopback",
+ .nonatomic = 1,
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(loopback, dummy, platform),
+ },
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "Codec",
+ .id = 0,
+ .nonatomic = 1,
+ .no_pcm = 1,
+ .init = codec_link_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = codec_link_hw_params_fixup,
+ .ops = &codec_link_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(ssp0_port, codec, platform),
+ },
+};
+
+static void bdw_rt286_disable_jack(struct snd_soc_card *card)
+{
+ struct snd_soc_component *component;
+
+ for_each_card_components(card, component) {
+ if (!strcmp(component->name, "i2c-INT343A:00")) {
+ dev_dbg(component->dev, "disabling jack detect before going to suspend.\n");
+ snd_soc_component_set_jack(component, NULL, NULL);
+ break;
+ }
+ }
+}
+
+static int bdw_rt286_suspend(struct snd_soc_card *card)
+{
+ bdw_rt286_disable_jack(card);
+
+ return 0;
+}
+
+static int bdw_rt286_resume(struct snd_soc_card *card)
+{
+ struct snd_soc_component *component;
+
+ for_each_card_components(card, component) {
+ if (!strcmp(component->name, "i2c-INT343A:00")) {
+ dev_dbg(component->dev, "enabling jack detect for resume.\n");
+ snd_soc_component_set_jack(component, &card_headset, NULL);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static struct snd_soc_card bdw_rt286_card = {
+ .owner = THIS_MODULE,
+ .dai_link = card_dai_links,
+ .num_links = ARRAY_SIZE(card_dai_links),
+ .controls = card_controls,
+ .num_controls = ARRAY_SIZE(card_controls),
+ .dapm_widgets = card_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(card_widgets),
+ .dapm_routes = card_routes,
+ .num_dapm_routes = ARRAY_SIZE(card_routes),
+ .fully_routed = true,
+ .suspend_pre = bdw_rt286_suspend,
+ .resume_post = bdw_rt286_resume,
+};
+
+/* Use space before codec name to simplify card ID, and simplify driver name. */
+#define SOF_CARD_NAME "bdw rt286" /* card name will be 'sof-bdw rt286' */
+#define SOF_DRIVER_NAME "SOF"
+
+#define CARD_NAME "broadwell-rt286"
+
+static int bdw_rt286_probe(struct platform_device *pdev)
+{
+ struct snd_soc_acpi_mach *mach;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ bdw_rt286_card.dev = dev;
+ mach = dev_get_platdata(dev);
+
+ ret = snd_soc_fixup_dai_links_platform_name(&bdw_rt286_card, mach->mach_params.platform);
+ if (ret)
+ return ret;
+
+ if (snd_soc_acpi_sof_parent(dev)) {
+ bdw_rt286_card.name = SOF_CARD_NAME;
+ bdw_rt286_card.driver_name = SOF_DRIVER_NAME;
+ } else {
+ bdw_rt286_card.name = CARD_NAME;
+ }
+
+ return devm_snd_soc_register_card(dev, &bdw_rt286_card);
+}
+
+static int bdw_rt286_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ bdw_rt286_disable_jack(card);
+
+ return 0;
+}
+
+static struct platform_driver bdw_rt286_driver = {
+ .probe = bdw_rt286_probe,
+ .remove = bdw_rt286_remove,
+ .driver = {
+ .name = "bdw_rt286",
+ .pm = &snd_soc_pm_ops
+ },
+};
+
+module_platform_driver(bdw_rt286_driver)
+
+MODULE_AUTHOR("Liam Girdwood, Xingchao Wang");
+MODULE_DESCRIPTION("Sound card driver for Intel Broadwell Wildcat Point with Realtek 286");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bdw_rt286");
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
deleted file mode 100644
index c30a9dca6801..000000000000
--- a/sound/soc/intel/boards/broadwell.c
+++ /dev/null
@@ -1,336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Broadwell Wildcatpoint SST Audio
- *
- * Copyright (C) 2013, Intel Corporation. All rights reserved.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include <sound/pcm_params.h>
-#include <sound/soc-acpi.h>
-
-#include "../../codecs/rt286.h"
-
-static struct snd_soc_jack broadwell_headset;
-/* Headset jack detection DAPM pins */
-static struct snd_soc_jack_pin broadwell_headset_pins[] = {
- {
- .pin = "Mic Jack",
- .mask = SND_JACK_MICROPHONE,
- },
- {
- .pin = "Headphone Jack",
- .mask = SND_JACK_HEADPHONE,
- },
-};
-
-static const struct snd_kcontrol_new broadwell_controls[] = {
- SOC_DAPM_PIN_SWITCH("Speaker"),
- SOC_DAPM_PIN_SWITCH("Headphone Jack"),
-};
-
-static const struct snd_soc_dapm_widget broadwell_widgets[] = {
- SND_SOC_DAPM_HP("Headphone Jack", NULL),
- SND_SOC_DAPM_SPK("Speaker", NULL),
- SND_SOC_DAPM_MIC("Mic Jack", NULL),
- SND_SOC_DAPM_MIC("DMIC1", NULL),
- SND_SOC_DAPM_MIC("DMIC2", NULL),
- SND_SOC_DAPM_LINE("Line Jack", NULL),
-};
-
-static const struct snd_soc_dapm_route broadwell_rt286_map[] = {
-
- /* speaker */
- {"Speaker", NULL, "SPOR"},
- {"Speaker", NULL, "SPOL"},
-
- /* HP jack connectors - unknown if we have jack deteck */
- {"Headphone Jack", NULL, "HPO Pin"},
-
- /* other jacks */
- {"MIC1", NULL, "Mic Jack"},
- {"LINE1", NULL, "Line Jack"},
-
- /* digital mics */
- {"DMIC1 Pin", NULL, "DMIC1"},
- {"DMIC2 Pin", NULL, "DMIC2"},
-
- /* CODEC BE connections */
- {"SSP0 CODEC IN", NULL, "AIF1 Capture"},
- {"AIF1 Playback", NULL, "SSP0 CODEC OUT"},
-};
-
-static int broadwell_rt286_codec_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
- int ret = 0;
- ret = snd_soc_card_jack_new_pins(rtd->card, "Headset",
- SND_JACK_HEADSET | SND_JACK_BTN_0, &broadwell_headset,
- broadwell_headset_pins, ARRAY_SIZE(broadwell_headset_pins));
- if (ret)
- return ret;
-
- rt286_mic_detect(component, &broadwell_headset);
- return 0;
-}
-
-
-static int broadwell_ssp0_fixup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params)
-{
- struct snd_interval *rate = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_RATE);
- struct snd_interval *chan = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_CHANNELS);
-
- /* The ADSP will covert the FE rate to 48k, stereo */
- rate->min = rate->max = 48000;
- chan->min = chan->max = 2;
-
- /* set SSP0 to 16 bit */
- params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
- return 0;
-}
-
-static int broadwell_rt286_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- int ret;
-
- ret = snd_soc_dai_set_sysclk(codec_dai, RT286_SCLK_S_PLL, 24000000,
- SND_SOC_CLOCK_IN);
-
- if (ret < 0) {
- dev_err(rtd->dev, "can't set codec sysclk configuration\n");
- return ret;
- }
-
- return ret;
-}
-
-static const struct snd_soc_ops broadwell_rt286_ops = {
- .hw_params = broadwell_rt286_hw_params,
-};
-
-static const unsigned int channels[] = {
- 2,
-};
-
-static const struct snd_pcm_hw_constraint_list constraints_channels = {
- .count = ARRAY_SIZE(channels),
- .list = channels,
- .mask = 0,
-};
-
-static int broadwell_fe_startup(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
-
- /* Board supports stereo configuration only */
- runtime->hw.channels_max = 2;
- return snd_pcm_hw_constraint_list(runtime, 0,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- &constraints_channels);
-}
-
-static const struct snd_soc_ops broadwell_fe_ops = {
- .startup = broadwell_fe_startup,
-};
-
-SND_SOC_DAILINK_DEF(system,
- DAILINK_COMP_ARRAY(COMP_CPU("System Pin")));
-
-SND_SOC_DAILINK_DEF(offload0,
- DAILINK_COMP_ARRAY(COMP_CPU("Offload0 Pin")));
-
-SND_SOC_DAILINK_DEF(offload1,
- DAILINK_COMP_ARRAY(COMP_CPU("Offload1 Pin")));
-
-SND_SOC_DAILINK_DEF(loopback,
- DAILINK_COMP_ARRAY(COMP_CPU("Loopback Pin")));
-
-SND_SOC_DAILINK_DEF(dummy,
- DAILINK_COMP_ARRAY(COMP_DUMMY()));
-
-SND_SOC_DAILINK_DEF(platform,
- DAILINK_COMP_ARRAY(COMP_PLATFORM("haswell-pcm-audio")));
-
-SND_SOC_DAILINK_DEF(codec,
- DAILINK_COMP_ARRAY(COMP_CODEC("i2c-INT343A:00", "rt286-aif1")));
-
-SND_SOC_DAILINK_DEF(ssp0_port,
- DAILINK_COMP_ARRAY(COMP_CPU("ssp0-port")));
-
-/* broadwell digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link broadwell_rt286_dais[] = {
- /* Front End DAI links */
- {
- .name = "System PCM",
- .stream_name = "System Playback/Capture",
- .nonatomic = 1,
- .dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .ops = &broadwell_fe_ops,
- .dpcm_playback = 1,
- .dpcm_capture = 1,
- SND_SOC_DAILINK_REG(system, dummy, platform),
- },
- {
- .name = "Offload0",
- .stream_name = "Offload0 Playback",
- .nonatomic = 1,
- .dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dpcm_playback = 1,
- SND_SOC_DAILINK_REG(offload0, dummy, platform),
- },
- {
- .name = "Offload1",
- .stream_name = "Offload1 Playback",
- .nonatomic = 1,
- .dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dpcm_playback = 1,
- SND_SOC_DAILINK_REG(offload1, dummy, platform),
- },
- {
- .name = "Loopback PCM",
- .stream_name = "Loopback",
- .nonatomic = 1,
- .dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dpcm_capture = 1,
- SND_SOC_DAILINK_REG(loopback, dummy, platform),
- },
- /* Back End DAI links */
- {
- /* SSP0 - Codec */
- .name = "Codec",
- .id = 0,
- .no_pcm = 1,
- .init = broadwell_rt286_codec_init,
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC,
- .ignore_pmdown_time = 1,
- .be_hw_params_fixup = broadwell_ssp0_fixup,
- .ops = &broadwell_rt286_ops,
- .dpcm_playback = 1,
- .dpcm_capture = 1,
- SND_SOC_DAILINK_REG(ssp0_port, codec, platform),
- },
-};
-
-static int broadwell_disable_jack(struct snd_soc_card *card)
-{
- struct snd_soc_component *component;
-
- for_each_card_components(card, component) {
- if (!strcmp(component->name, "i2c-INT343A:00")) {
-
- dev_dbg(component->dev, "disabling jack detect before going to suspend.\n");
- rt286_mic_detect(component, NULL);
- break;
- }
- }
-
- return 0;
-}
-
-static int broadwell_suspend(struct snd_soc_card *card)
-{
- return broadwell_disable_jack(card);
-}
-
-static int broadwell_resume(struct snd_soc_card *card){
- struct snd_soc_component *component;
-
- for_each_card_components(card, component) {
- if (!strcmp(component->name, "i2c-INT343A:00")) {
-
- dev_dbg(component->dev, "enabling jack detect for resume.\n");
- rt286_mic_detect(component, &broadwell_headset);
- break;
- }
- }
- return 0;
-}
-
-/* use space before codec name to simplify card ID, and simplify driver name */
-#define SOF_CARD_NAME "bdw rt286" /* card name will be 'sof-bdw rt286' */
-#define SOF_DRIVER_NAME "SOF"
-
-#define CARD_NAME "broadwell-rt286"
-#define DRIVER_NAME NULL /* card name will be used for driver name */
-
-/* broadwell audio machine driver for WPT + RT286S */
-static struct snd_soc_card broadwell_rt286 = {
- .owner = THIS_MODULE,
- .dai_link = broadwell_rt286_dais,
- .num_links = ARRAY_SIZE(broadwell_rt286_dais),
- .controls = broadwell_controls,
- .num_controls = ARRAY_SIZE(broadwell_controls),
- .dapm_widgets = broadwell_widgets,
- .num_dapm_widgets = ARRAY_SIZE(broadwell_widgets),
- .dapm_routes = broadwell_rt286_map,
- .num_dapm_routes = ARRAY_SIZE(broadwell_rt286_map),
- .fully_routed = true,
- .suspend_pre = broadwell_suspend,
- .resume_post = broadwell_resume,
-};
-
-static int broadwell_audio_probe(struct platform_device *pdev)
-{
- struct snd_soc_acpi_mach *mach;
- int ret;
-
- broadwell_rt286.dev = &pdev->dev;
-
- /* override platform name, if required */
- mach = pdev->dev.platform_data;
- ret = snd_soc_fixup_dai_links_platform_name(&broadwell_rt286,
- mach->mach_params.platform);
- if (ret)
- return ret;
-
- /* set card and driver name */
- if (snd_soc_acpi_sof_parent(&pdev->dev)) {
- broadwell_rt286.name = SOF_CARD_NAME;
- broadwell_rt286.driver_name = SOF_DRIVER_NAME;
- } else {
- broadwell_rt286.name = CARD_NAME;
- broadwell_rt286.driver_name = DRIVER_NAME;
- }
-
- return devm_snd_soc_register_card(&pdev->dev, &broadwell_rt286);
-}
-
-static int broadwell_audio_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- return broadwell_disable_jack(card);
-}
-
-static struct platform_driver broadwell_audio = {
- .probe = broadwell_audio_probe,
- .remove = broadwell_audio_remove,
- .driver = {
- .name = "broadwell-audio",
- .pm = &snd_soc_pm_ops
- },
-};
-
-module_platform_driver(broadwell_audio)
-
-/* Module information */
-MODULE_AUTHOR("Liam Girdwood, Xingchao Wang");
-MODULE_DESCRIPTION("Intel SST Audio for WPT/Broadwell");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:broadwell-audio");
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index d98376da425a..7c6c95e99ade 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -186,6 +186,17 @@ static const struct snd_soc_dapm_route gemini_map[] = {
{"ssp2 Rx", NULL, "Capture"},
};
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static int broxton_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
@@ -231,10 +242,12 @@ static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
- &broxton_headset);
+ ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &broxton_headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 75995d17597d..4bd93c3ba377 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -176,7 +176,7 @@ static int broxton_rt298_codec_init(struct snd_soc_pcm_runtime *rtd)
if (ret)
return ret;
- rt298_mic_detect(component, &broxton_headset);
+ snd_soc_component_set_jack(component, &broxton_headset, NULL);
snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
diff --git a/sound/soc/intel/boards/bytcht_cx2072x.c b/sound/soc/intel/boards/bytcht_cx2072x.c
index 0eed68a11f7e..ae899866863e 100644
--- a/sound/soc/intel/boards/bytcht_cx2072x.c
+++ b/sound/soc/intel/boards/bytcht_cx2072x.c
@@ -126,7 +126,7 @@ static int byt_cht_cx2072x_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC);
+ SND_SOC_DAIFMT_BP_FP);
if (ret < 0) {
dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index eb19bf16afad..a0c8f1d3f8ce 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -81,7 +81,7 @@ static int codec_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC);
+ SND_SOC_DAIFMT_BP_FP);
if (ret < 0) {
dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index a08507783e44..6432b83f616f 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -265,7 +265,7 @@ static int byt_cht_es8316_codec_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC
+ SND_SOC_DAIFMT_BP_FP
);
if (ret < 0) {
dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
diff --git a/sound/soc/intel/boards/bytcht_nocodec.c b/sound/soc/intel/boards/bytcht_nocodec.c
index 115c2bcaabd4..7fc03f2efd35 100644
--- a/sound/soc/intel/boards/bytcht_nocodec.c
+++ b/sound/soc/intel/boards/bytcht_nocodec.c
@@ -61,7 +61,7 @@ static int codec_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC);
+ SND_SOC_DAIFMT_BP_FP);
if (ret < 0) {
dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index ed9fa1728722..fb9d9e271845 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -1413,7 +1413,7 @@ static int byt_rt5640_codec_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC);
+ SND_SOC_DAIFMT_BP_FP);
if (ret < 0) {
dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
return ret;
@@ -1636,7 +1636,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
* with the codec driver/pdata are non-existent
*/
- struct acpi_chan_package chan_package;
+ struct acpi_chan_package chan_package = { 0 };
/* format specified: 2 64-bit integers */
struct acpi_buffer format = {sizeof("NN"), "NN"};
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index d467fcaa48ea..2beb686768f2 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -706,7 +706,7 @@ static int byt_rt5651_codec_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC
+ SND_SOC_DAIFMT_BP_FP
);
if (ret < 0) {
@@ -952,7 +952,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
* with the codec driver/pdata are non-existent
*/
- struct acpi_chan_package chan_package;
+ struct acpi_chan_package chan_package = { 0 };
/* format specified: 2 64-bit integers */
struct acpi_buffer format = {sizeof("NN"), "NN"};
diff --git a/sound/soc/intel/boards/bytcr_wm5102.c b/sound/soc/intel/boards/bytcr_wm5102.c
index 330c0ace1638..45a6805787f5 100644
--- a/sound/soc/intel/boards/bytcr_wm5102.c
+++ b/sound/soc/intel/boards/bytcr_wm5102.c
@@ -265,7 +265,7 @@ static int byt_wm5102_codec_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC);
+ SND_SOC_DAIFMT_BP_FP);
if (ret) {
dev_err(rtd->dev, "Error setting format to I2S: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index a5160f27adea..64eb73525ee3 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -264,8 +264,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBC_CFC;
+ fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_BP_FP;
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0), fmt);
if (ret < 0) {
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 45c301ea5e00..96501aed8bee 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -362,7 +362,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC
+ SND_SOC_DAIFMT_BP_FP
);
if (ret < 0) {
dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
@@ -372,7 +372,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC
+ SND_SOC_DAIFMT_BC_FC
);
if (ret < 0) {
dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
@@ -396,7 +396,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0),
SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_IB_NF |
- SND_SOC_DAIFMT_CBC_CFC);
+ SND_SOC_DAIFMT_BC_FC);
if (ret < 0) {
dev_err(rtd->dev, "can't set format to TDM %d\n", ret);
return ret;
@@ -603,7 +603,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
* with the codec driver/pdata are non-existent
*/
- struct acpi_chan_package chan_package;
+ struct acpi_chan_package chan_package = { 0 };
/* format specified: 2 64-bit integers */
struct acpi_buffer format = {sizeof("NN"), "NN"};
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index c80324f34b1b..ca47f6476b07 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -300,7 +300,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC);
+ SND_SOC_DAIFMT_BP_FP);
if (ret < 0) {
dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c
index a99f74a15b5f..20da83d9eece 100644
--- a/sound/soc/intel/boards/cml_rt1011_rt5682.c
+++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c
@@ -121,6 +121,17 @@ static const struct snd_soc_dapm_route cml_rt1011_tt_map[] = {
{"TR Ext Spk", NULL, "TR SPO" },
};
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static int cml_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
@@ -137,11 +148,13 @@ static int cml_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 |
- SND_JACK_BTN_1 | SND_JACK_BTN_2 |
- SND_JACK_BTN_3,
- &ctx->headset);
+ ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ &ctx->headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index 170164baae7d..cf0f89db3e20 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -78,6 +78,17 @@ static const struct snd_soc_dapm_widget geminilake_widgets[] = {
SND_SOC_DAPM_SPK("HDMI3", NULL),
};
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static const struct snd_soc_dapm_route geminilake_map[] = {
/* HP jack connectors - unknown if we have jack detection */
{ "Headphone Jack", NULL, "HPOL" },
@@ -173,10 +184,12 @@ static int geminilake_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
- &ctx->geminilake_headset);
+ ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &ctx->geminilake_headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
deleted file mode 100644
index aa61e101f793..000000000000
--- a/sound/soc/intel/boards/haswell.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel Haswell Lynxpoint SST Audio
- *
- * Copyright (C) 2013, Intel Corporation. All rights reserved.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/soc-acpi.h>
-#include <sound/pcm_params.h>
-
-#include "../../codecs/rt5640.h"
-
-/* Haswell ULT platforms have a Headphone and Mic jack */
-static const struct snd_soc_dapm_widget haswell_widgets[] = {
- SND_SOC_DAPM_HP("Headphones", NULL),
- SND_SOC_DAPM_MIC("Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route haswell_rt5640_map[] = {
-
- {"Headphones", NULL, "HPOR"},
- {"Headphones", NULL, "HPOL"},
- {"IN2P", NULL, "Mic"},
-
- /* CODEC BE connections */
- {"SSP0 CODEC IN", NULL, "AIF1 Capture"},
- {"AIF1 Playback", NULL, "SSP0 CODEC OUT"},
-};
-
-static int haswell_ssp0_fixup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params)
-{
- struct snd_interval *rate = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_RATE);
- struct snd_interval *channels = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_CHANNELS);
-
- /* The ADSP will covert the FE rate to 48k, stereo */
- rate->min = rate->max = 48000;
- channels->min = channels->max = 2;
-
- /* set SSP0 to 16 bit */
- params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
- return 0;
-}
-
-static int haswell_rt5640_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
- int ret;
-
- ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_MCLK, 12288000,
- SND_SOC_CLOCK_IN);
-
- if (ret < 0) {
- dev_err(rtd->dev, "can't set codec sysclk configuration\n");
- return ret;
- }
-
- /* set correct codec filter for DAI format and clock config */
- snd_soc_component_update_bits(codec_dai->component, 0x83, 0xffff, 0x8000);
-
- return ret;
-}
-
-static const struct snd_soc_ops haswell_rt5640_ops = {
- .hw_params = haswell_rt5640_hw_params,
-};
-
-SND_SOC_DAILINK_DEF(dummy,
- DAILINK_COMP_ARRAY(COMP_DUMMY()));
-
-SND_SOC_DAILINK_DEF(system,
- DAILINK_COMP_ARRAY(COMP_CPU("System Pin")));
-
-SND_SOC_DAILINK_DEF(offload0,
- DAILINK_COMP_ARRAY(COMP_CPU("Offload0 Pin")));
-
-SND_SOC_DAILINK_DEF(offload1,
- DAILINK_COMP_ARRAY(COMP_CPU("Offload1 Pin")));
-
-SND_SOC_DAILINK_DEF(loopback,
- DAILINK_COMP_ARRAY(COMP_CPU("Loopback Pin")));
-
-SND_SOC_DAILINK_DEF(codec,
- DAILINK_COMP_ARRAY(COMP_CODEC("i2c-INT33CA:00", "rt5640-aif1")));
-
-SND_SOC_DAILINK_DEF(platform,
- DAILINK_COMP_ARRAY(COMP_PLATFORM("haswell-pcm-audio")));
-
-SND_SOC_DAILINK_DEF(ssp0_port,
- DAILINK_COMP_ARRAY(COMP_CPU("ssp0-port")));
-
-static struct snd_soc_dai_link haswell_rt5640_dais[] = {
- /* Front End DAI links */
- {
- .name = "System",
- .stream_name = "System Playback/Capture",
- .nonatomic = 1,
- .dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dpcm_playback = 1,
- .dpcm_capture = 1,
- SND_SOC_DAILINK_REG(system, dummy, platform),
- },
- {
- .name = "Offload0",
- .stream_name = "Offload0 Playback",
- .nonatomic = 1,
- .dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dpcm_playback = 1,
- SND_SOC_DAILINK_REG(offload0, dummy, platform),
- },
- {
- .name = "Offload1",
- .stream_name = "Offload1 Playback",
- .nonatomic = 1,
- .dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dpcm_playback = 1,
- SND_SOC_DAILINK_REG(offload1, dummy, platform),
- },
- {
- .name = "Loopback",
- .stream_name = "Loopback",
- .nonatomic = 1,
- .dynamic = 1,
- .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
- .dpcm_capture = 1,
- SND_SOC_DAILINK_REG(loopback, dummy, platform),
- },
-
- /* Back End DAI links */
- {
- /* SSP0 - Codec */
- .name = "Codec",
- .id = 0,
- .no_pcm = 1,
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC,
- .ignore_pmdown_time = 1,
- .be_hw_params_fixup = haswell_ssp0_fixup,
- .ops = &haswell_rt5640_ops,
- .dpcm_playback = 1,
- .dpcm_capture = 1,
- SND_SOC_DAILINK_REG(ssp0_port, codec, platform),
- },
-};
-
-/* audio machine driver for Haswell Lynxpoint DSP + RT5640 */
-static struct snd_soc_card haswell_rt5640 = {
- .name = "haswell-rt5640",
- .owner = THIS_MODULE,
- .dai_link = haswell_rt5640_dais,
- .num_links = ARRAY_SIZE(haswell_rt5640_dais),
- .dapm_widgets = haswell_widgets,
- .num_dapm_widgets = ARRAY_SIZE(haswell_widgets),
- .dapm_routes = haswell_rt5640_map,
- .num_dapm_routes = ARRAY_SIZE(haswell_rt5640_map),
- .fully_routed = true,
-};
-
-static int haswell_audio_probe(struct platform_device *pdev)
-{
- struct snd_soc_acpi_mach *mach;
- int ret;
-
- haswell_rt5640.dev = &pdev->dev;
-
- /* override platform name, if required */
- mach = pdev->dev.platform_data;
- ret = snd_soc_fixup_dai_links_platform_name(&haswell_rt5640,
- mach->mach_params.platform);
- if (ret)
- return ret;
-
- return devm_snd_soc_register_card(&pdev->dev, &haswell_rt5640);
-}
-
-static struct platform_driver haswell_audio = {
- .probe = haswell_audio_probe,
- .driver = {
- .name = "haswell-audio",
- .pm = &snd_soc_pm_ops,
- },
-};
-
-module_platform_driver(haswell_audio)
-
-/* Module information */
-MODULE_AUTHOR("Liam Girdwood, Xingchao Wang");
-MODULE_DESCRIPTION("Intel SST Audio for Haswell Lynxpoint");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:haswell-audio");
diff --git a/sound/soc/intel/boards/hda_dsp_common.c b/sound/soc/intel/boards/hda_dsp_common.c
index 5c31ddc0884a..83c7dfbccd9d 100644
--- a/sound/soc/intel/boards/hda_dsp_common.c
+++ b/sound/soc/intel/boards/hda_dsp_common.c
@@ -62,8 +62,8 @@ int hda_dsp_hdmi_build_controls(struct snd_soc_card *card,
hpcm->pcm = spcm;
hpcm->device = spcm->device;
dev_dbg(card->dev,
- "%s: mapping HDMI converter %d to PCM %d (%p)\n",
- __func__, i, hpcm->device, spcm);
+ "mapping HDMI converter %d to PCM %d (%p)\n",
+ i, hpcm->device, spcm);
} else {
hpcm->pcm = NULL;
hpcm->device = SNDRV_PCM_INVALID_DEVICE;
diff --git a/sound/soc/intel/boards/hsw_rt5640.c b/sound/soc/intel/boards/hsw_rt5640.c
new file mode 100644
index 000000000000..050c53ebd6ba
--- /dev/null
+++ b/sound/soc/intel/boards/hsw_rt5640.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sound card driver for Intel Haswell Lynx Point with Realtek 5640
+ *
+ * Copyright (C) 2013, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../codecs/rt5640.h"
+
+static const struct snd_soc_dapm_widget card_widgets[] = {
+ SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_MIC("Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route card_routes[] = {
+ {"Headphones", NULL, "HPOR"},
+ {"Headphones", NULL, "HPOL"},
+ {"IN2P", NULL, "Mic"},
+
+ /* CODEC BE connections */
+ {"SSP0 CODEC IN", NULL, "AIF1 Capture"},
+ {"AIF1 Playback", NULL, "SSP0 CODEC OUT"},
+};
+
+static int codec_link_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+
+ /* The ADSP will convert the FE rate to 48k, stereo. */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+ /* Set SSP0 to 16 bit. */
+ params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+}
+
+static int codec_link_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_MCLK, 12288000, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "set codec sysclk failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Set correct codec filter for DAI format and clock config. */
+ snd_soc_component_update_bits(codec_dai->component, 0x83, 0xffff, 0x8000);
+
+ return ret;
+}
+
+static const struct snd_soc_ops codec_link_ops = {
+ .hw_params = codec_link_hw_params,
+};
+
+SND_SOC_DAILINK_DEF(system, DAILINK_COMP_ARRAY(COMP_CPU("System Pin")));
+SND_SOC_DAILINK_DEF(offload0, DAILINK_COMP_ARRAY(COMP_CPU("Offload0 Pin")));
+SND_SOC_DAILINK_DEF(offload1, DAILINK_COMP_ARRAY(COMP_CPU("Offload1 Pin")));
+SND_SOC_DAILINK_DEF(loopback, DAILINK_COMP_ARRAY(COMP_CPU("Loopback Pin")));
+
+SND_SOC_DAILINK_DEF(dummy, DAILINK_COMP_ARRAY(COMP_DUMMY()));
+SND_SOC_DAILINK_DEF(codec, DAILINK_COMP_ARRAY(COMP_CODEC("i2c-INT33CA:00", "rt5640-aif1")));
+SND_SOC_DAILINK_DEF(platform, DAILINK_COMP_ARRAY(COMP_PLATFORM("haswell-pcm-audio")));
+SND_SOC_DAILINK_DEF(ssp0_port, DAILINK_COMP_ARRAY(COMP_CPU("ssp0-port")));
+
+static struct snd_soc_dai_link card_dai_links[] = {
+ /* Front End DAI links */
+ {
+ .name = "System",
+ .stream_name = "System Playback/Capture",
+ .nonatomic = 1,
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(system, dummy, platform),
+ },
+ {
+ .name = "Offload0",
+ .stream_name = "Offload0 Playback",
+ .nonatomic = 1,
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(offload0, dummy, platform),
+ },
+ {
+ .name = "Offload1",
+ .stream_name = "Offload1 Playback",
+ .nonatomic = 1,
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(offload1, dummy, platform),
+ },
+ {
+ .name = "Loopback",
+ .stream_name = "Loopback",
+ .nonatomic = 1,
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(loopback, dummy, platform),
+ },
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "Codec",
+ .id = 0,
+ .nonatomic = 1,
+ .no_pcm = 1,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = codec_link_hw_params_fixup,
+ .ops = &codec_link_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(ssp0_port, codec, platform),
+ },
+};
+
+static struct snd_soc_card hsw_rt5640_card = {
+ .name = "haswell-rt5640",
+ .owner = THIS_MODULE,
+ .dai_link = card_dai_links,
+ .num_links = ARRAY_SIZE(card_dai_links),
+ .dapm_widgets = card_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(card_widgets),
+ .dapm_routes = card_routes,
+ .num_dapm_routes = ARRAY_SIZE(card_routes),
+ .fully_routed = true,
+};
+
+static int hsw_rt5640_probe(struct platform_device *pdev)
+{
+ struct snd_soc_acpi_mach *mach;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ hsw_rt5640_card.dev = dev;
+ mach = dev_get_platdata(dev);
+
+ ret = snd_soc_fixup_dai_links_platform_name(&hsw_rt5640_card, mach->mach_params.platform);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_card(dev, &hsw_rt5640_card);
+}
+
+static struct platform_driver hsw_rt5640_driver = {
+ .probe = hsw_rt5640_probe,
+ .driver = {
+ .name = "hsw_rt5640",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(hsw_rt5640_driver)
+
+MODULE_AUTHOR("Liam Girdwood, Xingchao Wang");
+MODULE_DESCRIPTION("Sound card driver for Intel Haswell Lynx Point with Realtek 5640");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hsw_rt5640");
diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
index ceabed85e9da..329457e3e3a2 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
@@ -99,6 +99,17 @@ static const struct snd_soc_dapm_widget kabylake_widgets[] = {
SND_SOC_DAPM_POST_PMD),
};
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static const struct snd_soc_dapm_route kabylake_map[] = {
{ "Headphone Jack", NULL, "HPL" },
{ "Headphone Jack", NULL, "HPR" },
@@ -179,10 +190,12 @@ static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(kabylake_audio_card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
- &ctx->kabylake_headset);
+ ret = snd_soc_card_jack_new_pins(kabylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &ctx->kabylake_headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
index 703ccff634b0..362579f25835 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
@@ -119,6 +119,17 @@ static const struct snd_soc_dapm_widget kabylake_widgets[] = {
SND_SOC_DAPM_POST_PMD),
};
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static const struct snd_soc_dapm_route kabylake_map[] = {
/* speaker */
{ "Left Spk", NULL, "Left BE_OUT" },
@@ -354,10 +365,12 @@ static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(kabylake_audio_card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
- &ctx->kabylake_headset);
+ ret = snd_soc_card_jack_new_pins(kabylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &ctx->kabylake_headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 8d37b2676a81..2d4224c5b152 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -206,6 +206,17 @@ static const struct snd_soc_dapm_widget kabylake_5663_widgets[] = {
SND_SOC_DAPM_POST_PMD),
};
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static const struct snd_soc_dapm_route kabylake_5663_map[] = {
{ "Headphone Jack", NULL, "Platform Clock" },
{ "Headphone Jack", NULL, "HPOL" },
@@ -271,10 +282,12 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(kabylake_audio_card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3,
- &ctx->kabylake_headset);
+ ret = snd_soc_card_jack_new_pins(kabylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &ctx->kabylake_headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 564c70a0fbc8..2c79fca57b19 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -145,6 +145,17 @@ static const struct snd_soc_dapm_widget kabylake_widgets[] = {
};
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static const struct snd_soc_dapm_route kabylake_map[] = {
/* Headphones */
{ "Headphone Jack", NULL, "Platform Clock" },
@@ -228,10 +239,12 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(&kabylake_audio_card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3,
- &ctx->kabylake_headset);
+ ret = snd_soc_card_jack_new_pins(&kabylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &ctx->kabylake_headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index f4b4eeca3e03..81144efb4b44 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -75,7 +75,7 @@ skl_hda_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *link)
struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
int ret = 0;
- dev_dbg(card->dev, "%s: dai link name - %s\n", __func__, link->name);
+ dev_dbg(card->dev, "dai link name - %s\n", link->name);
link->platforms->name = ctx->platform_name;
link->nonatomic = 1;
@@ -203,7 +203,7 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
struct skl_hda_private *ctx;
int ret;
- dev_dbg(&pdev->dev, "%s: entry\n", __func__);
+ dev_dbg(&pdev->dev, "entry\n");
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index 8e2d03e36079..8dceb0b02581 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -97,6 +97,17 @@ static const struct snd_soc_dapm_widget skylake_widgets[] = {
SND_SOC_DAPM_POST_PMD),
};
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static const struct snd_soc_dapm_route skylake_map[] = {
/* HP jack connectors - unknown if we have jack detection */
{ "Headphone Jack", NULL, "HPOL" },
@@ -163,9 +174,11 @@ static int skylake_nau8825_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(&skylake_audio_card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3, &skylake_headset);
+ ret = snd_soc_card_jack_new_pins(&skylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3, &skylake_headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index 501f0bbfc404..62c0d46d0086 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -101,6 +101,17 @@ static const struct snd_soc_dapm_widget skylake_widgets[] = {
SND_SOC_DAPM_POST_PMD),
};
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static const struct snd_soc_dapm_route skylake_map[] = {
/* HP jack connectors - unknown if we have jack detection */
{"Headphone Jack", NULL, "HPOL"},
@@ -182,9 +193,11 @@ static int skylake_nau8825_codec_init(struct snd_soc_pcm_runtime *rtd)
* 4 buttons here map to the google Reference headset
* The use of these buttons can be decided by the user space.
*/
- ret = snd_soc_card_jack_new(&skylake_audio_card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3, &skylake_headset);
+ ret = snd_soc_card_jack_new_pins(&skylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3, &skylake_headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index e9f9520dcea4..4f3d655e2bfa 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -133,7 +133,7 @@ static int skylake_rt286_codec_init(struct snd_soc_pcm_runtime *rtd)
if (ret)
return ret;
- rt286_mic_detect(component, &skylake_headset);
+ snd_soc_component_set_jack(component, &skylake_headset, NULL);
snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
diff --git a/sound/soc/intel/boards/sof_cs42l42.c b/sound/soc/intel/boards/sof_cs42l42.c
index 6a979c333bc5..85ffd065895d 100644
--- a/sound/soc/intel/boards/sof_cs42l42.c
+++ b/sound/soc/intel/boards/sof_cs42l42.c
@@ -41,8 +41,13 @@
#define SOF_CS42L42_DAILINK_MASK (GENMASK(24, 10))
#define SOF_CS42L42_DAILINK(link1, link2, link3, link4, link5) \
((((link1) | ((link2) << 3) | ((link3) << 6) | ((link4) << 9) | ((link5) << 12)) << SOF_CS42L42_DAILINK_SHIFT) & SOF_CS42L42_DAILINK_MASK)
-#define SOF_MAX98357A_SPEAKER_AMP_PRESENT BIT(25)
-#define SOF_MAX98360A_SPEAKER_AMP_PRESENT BIT(26)
+#define SOF_BT_OFFLOAD_PRESENT BIT(25)
+#define SOF_CS42L42_SSP_BT_SHIFT 26
+#define SOF_CS42L42_SSP_BT_MASK (GENMASK(28, 26))
+#define SOF_CS42L42_SSP_BT(quirk) \
+ (((quirk) << SOF_CS42L42_SSP_BT_SHIFT) & SOF_CS42L42_SSP_BT_MASK)
+#define SOF_MAX98357A_SPEAKER_AMP_PRESENT BIT(29)
+#define SOF_MAX98360A_SPEAKER_AMP_PRESENT BIT(30)
enum {
LINK_NONE = 0,
@@ -50,6 +55,18 @@ enum {
LINK_SPK = 2,
LINK_DMIC = 3,
LINK_HDMI = 4,
+ LINK_BT = 5,
+};
+
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
};
/* Default: SSP2 */
@@ -98,11 +115,13 @@ static int sof_cs42l42_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 |
- SND_JACK_BTN_1 | SND_JACK_BTN_2 |
- SND_JACK_BTN_3,
- jack);
+ ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ jack,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
@@ -277,6 +296,13 @@ static struct snd_soc_dai_link_component dmic_component[] = {
}
};
+static struct snd_soc_dai_link_component dummy_component[] = {
+ {
+ .name = "snd-soc-dummy",
+ .dai_name = "snd-soc-dummy-dai",
+ }
+};
+
static int create_spk_amp_dai_links(struct device *dev,
struct snd_soc_dai_link *links,
struct snd_soc_dai_link_component *cpus,
@@ -466,9 +492,50 @@ devm_err:
return -ENOMEM;
}
+static int create_bt_offload_dai_links(struct device *dev,
+ struct snd_soc_dai_link *links,
+ struct snd_soc_dai_link_component *cpus,
+ int *id, int ssp_bt)
+{
+ /* bt offload */
+ if (!(sof_cs42l42_quirk & SOF_BT_OFFLOAD_PRESENT))
+ return 0;
+
+ links[*id].name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-BT",
+ ssp_bt);
+ if (!links[*id].name)
+ goto devm_err;
+
+ links[*id].id = *id;
+ links[*id].codecs = dummy_component;
+ links[*id].num_codecs = ARRAY_SIZE(dummy_component);
+ links[*id].platforms = platform_component;
+ links[*id].num_platforms = ARRAY_SIZE(platform_component);
+
+ links[*id].dpcm_playback = 1;
+ links[*id].dpcm_capture = 1;
+ links[*id].no_pcm = 1;
+ links[*id].cpus = &cpus[*id];
+ links[*id].num_cpus = 1;
+
+ links[*id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "SSP%d Pin",
+ ssp_bt);
+ if (!links[*id].cpus->dai_name)
+ goto devm_err;
+
+ (*id)++;
+
+ return 0;
+
+devm_err:
+ return -ENOMEM;
+}
+
static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
int ssp_codec,
int ssp_amp,
+ int ssp_bt,
int dmic_be_num,
int hdmi_num)
{
@@ -521,6 +588,14 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
goto devm_err;
}
break;
+ case LINK_BT:
+ ret = create_bt_offload_dai_links(dev, links, cpus, &id, ssp_bt);
+ if (ret < 0) {
+ dev_err(dev, "fail to create bt offload dai links, ret %d\n",
+ ret);
+ goto devm_err;
+ }
+ break;
case LINK_NONE:
/* caught here if it's not used as terminator in macro */
default:
@@ -542,7 +617,7 @@ static int sof_audio_probe(struct platform_device *pdev)
struct snd_soc_acpi_mach *mach;
struct sof_card_private *ctx;
int dmic_be_num, hdmi_num;
- int ret, ssp_amp, ssp_codec;
+ int ret, ssp_bt, ssp_amp, ssp_codec;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -567,6 +642,9 @@ static int sof_audio_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "sof_cs42l42_quirk = %lx\n", sof_cs42l42_quirk);
+ ssp_bt = (sof_cs42l42_quirk & SOF_CS42L42_SSP_BT_MASK) >>
+ SOF_CS42L42_SSP_BT_SHIFT;
+
ssp_amp = (sof_cs42l42_quirk & SOF_CS42L42_SSP_AMP_MASK) >>
SOF_CS42L42_SSP_AMP_SHIFT;
@@ -577,9 +655,11 @@ static int sof_audio_probe(struct platform_device *pdev)
if (sof_cs42l42_quirk & SOF_SPEAKER_AMP_PRESENT)
sof_audio_card_cs42l42.num_links++;
+ if (sof_cs42l42_quirk & SOF_BT_OFFLOAD_PRESENT)
+ sof_audio_card_cs42l42.num_links++;
dai_links = sof_card_dai_links_create(&pdev->dev, ssp_codec, ssp_amp,
- dmic_be_num, hdmi_num);
+ ssp_bt, dmic_be_num, hdmi_num);
if (!dai_links)
return -ENOMEM;
@@ -620,6 +700,17 @@ static const struct platform_device_id board_ids[] = {
SOF_CS42L42_SSP_AMP(1)) |
SOF_CS42L42_DAILINK(LINK_HP, LINK_DMIC, LINK_HDMI, LINK_SPK, LINK_NONE),
},
+ {
+ .name = "adl_mx98360a_cs4242",
+ .driver_data = (kernel_ulong_t)(SOF_CS42L42_SSP_CODEC(0) |
+ SOF_SPEAKER_AMP_PRESENT |
+ SOF_MAX98360A_SPEAKER_AMP_PRESENT |
+ SOF_CS42L42_SSP_AMP(1) |
+ SOF_CS42L42_NUM_HDMIDEV(4) |
+ SOF_BT_OFFLOAD_PRESENT |
+ SOF_CS42L42_SSP_BT(2) |
+ SOF_CS42L42_DAILINK(LINK_HP, LINK_DMIC, LINK_HDMI, LINK_SPK, LINK_BT)),
+ },
{ }
};
MODULE_DEVICE_TABLE(platform, board_ids);
diff --git a/sound/soc/intel/boards/sof_da7219_max98373.c b/sound/soc/intel/boards/sof_da7219_max98373.c
index a83f30b687cf..34cf849a8344 100644
--- a/sound/soc/intel/boards/sof_da7219_max98373.c
+++ b/sound/soc/intel/boards/sof_da7219_max98373.c
@@ -135,6 +135,17 @@ static const struct snd_soc_dapm_route max98360a_map[] = {
{"DMic", NULL, "SoC DMIC"},
};
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static struct snd_soc_jack headset;
static int da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
@@ -156,11 +167,13 @@ static int da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 |
- SND_JACK_BTN_1 | SND_JACK_BTN_2 |
- SND_JACK_BTN_3 | SND_JACK_LINEOUT,
- &headset);
+ ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
index 23d03e0f7759..606cc3242a60 100644
--- a/sound/soc/intel/boards/sof_es8336.c
+++ b/sound/soc/intel/boards/sof_es8336.c
@@ -28,6 +28,24 @@
#define SOF_ES8336_SSP_CODEC_MASK (GENMASK(3, 0))
#define SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK BIT(4)
+
+/* HDMI capture*/
+#define SOF_SSP_HDMI_CAPTURE_PRESENT BIT(14)
+#define SOF_NO_OF_HDMI_CAPTURE_SSP_SHIFT 15
+#define SOF_NO_OF_HDMI_CAPTURE_SSP_MASK (GENMASK(16, 15))
+#define SOF_NO_OF_HDMI_CAPTURE_SSP(quirk) \
+ (((quirk) << SOF_NO_OF_HDMI_CAPTURE_SSP_SHIFT) & SOF_NO_OF_HDMI_CAPTURE_SSP_MASK)
+
+#define SOF_HDMI_CAPTURE_1_SSP_SHIFT 7
+#define SOF_HDMI_CAPTURE_1_SSP_MASK (GENMASK(9, 7))
+#define SOF_HDMI_CAPTURE_1_SSP(quirk) \
+ (((quirk) << SOF_HDMI_CAPTURE_1_SSP_SHIFT) & SOF_HDMI_CAPTURE_1_SSP_MASK)
+
+#define SOF_HDMI_CAPTURE_2_SSP_SHIFT 10
+#define SOF_HDMI_CAPTURE_2_SSP_MASK (GENMASK(12, 10))
+#define SOF_HDMI_CAPTURE_2_SSP(quirk) \
+ (((quirk) << SOF_HDMI_CAPTURE_2_SSP_SHIFT) & SOF_HDMI_CAPTURE_2_SSP_MASK)
+
#define SOF_ES8336_ENABLE_DMIC BIT(5)
#define SOF_ES8336_JD_INVERTED BIT(6)
#define SOF_ES8336_HEADPHONE_GPIO BIT(7)
@@ -57,28 +75,26 @@ static const struct acpi_gpio_params enable_gpio0 = { 0, 0, true };
static const struct acpi_gpio_params enable_gpio1 = { 1, 0, true };
static const struct acpi_gpio_mapping acpi_speakers_enable_gpio0[] = {
- { "speakers-enable-gpios", &enable_gpio0, 1 },
+ { "speakers-enable-gpios", &enable_gpio0, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
{ }
};
static const struct acpi_gpio_mapping acpi_speakers_enable_gpio1[] = {
- { "speakers-enable-gpios", &enable_gpio1, 1 },
+ { "speakers-enable-gpios", &enable_gpio1, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
};
static const struct acpi_gpio_mapping acpi_enable_both_gpios[] = {
- { "speakers-enable-gpios", &enable_gpio0, 1 },
- { "headphone-enable-gpios", &enable_gpio1, 1 },
+ { "speakers-enable-gpios", &enable_gpio0, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
+ { "headphone-enable-gpios", &enable_gpio1, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
{ }
};
static const struct acpi_gpio_mapping acpi_enable_both_gpios_rev_order[] = {
- { "speakers-enable-gpios", &enable_gpio1, 1 },
- { "headphone-enable-gpios", &enable_gpio0, 1 },
+ { "speakers-enable-gpios", &enable_gpio1, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
+ { "headphone-enable-gpios", &enable_gpio0, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
{ }
};
-static const struct acpi_gpio_mapping *gpio_mapping = acpi_speakers_enable_gpio0;
-
static void log_quirks(struct device *dev)
{
dev_info(dev, "quirk mask %#lx\n", quirk);
@@ -272,15 +288,6 @@ static int sof_es8336_quirk_cb(const struct dmi_system_id *id)
{
quirk = (unsigned long)id->driver_data;
- if (quirk & SOF_ES8336_HEADPHONE_GPIO) {
- if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
- gpio_mapping = acpi_enable_both_gpios;
- else
- gpio_mapping = acpi_enable_both_gpios_rev_order;
- } else if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK) {
- gpio_mapping = acpi_speakers_enable_gpio1;
- }
-
return 1;
}
@@ -356,6 +363,13 @@ static struct snd_soc_dai_link_component dmic_component[] = {
}
};
+static struct snd_soc_dai_link_component dummy_component[] = {
+ {
+ .name = "snd-soc-dummy",
+ .dai_name = "snd-soc-dummy-dai",
+ }
+};
+
static int sof_es8336_late_probe(struct snd_soc_card *card)
{
struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
@@ -507,6 +521,37 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
id++;
}
+ /* HDMI-In SSP */
+ if (quirk & SOF_SSP_HDMI_CAPTURE_PRESENT) {
+ int num_of_hdmi_ssp = (quirk & SOF_NO_OF_HDMI_CAPTURE_SSP_MASK) >>
+ SOF_NO_OF_HDMI_CAPTURE_SSP_SHIFT;
+
+ for (i = 1; i <= num_of_hdmi_ssp; i++) {
+ int port = (i == 1 ? (quirk & SOF_HDMI_CAPTURE_1_SSP_MASK) >>
+ SOF_HDMI_CAPTURE_1_SSP_SHIFT :
+ (quirk & SOF_HDMI_CAPTURE_2_SSP_MASK) >>
+ SOF_HDMI_CAPTURE_2_SSP_SHIFT);
+
+ links[id].cpus = &cpus[id];
+ links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "SSP%d Pin", port);
+ if (!links[id].cpus->dai_name)
+ return NULL;
+ links[id].name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-HDMI", port);
+ if (!links[id].name)
+ return NULL;
+ links[id].id = id + hdmi_id_offset;
+ links[id].codecs = dummy_component;
+ links[id].num_codecs = ARRAY_SIZE(dummy_component);
+ links[id].platforms = platform_component;
+ links[id].num_platforms = ARRAY_SIZE(platform_component);
+ links[id].dpcm_capture = 1;
+ links[id].no_pcm = 1;
+ links[id].num_cpus = 1;
+ id++;
+ }
+ }
+
return links;
devm_err:
@@ -529,6 +574,7 @@ static int sof_es8336_probe(struct platform_device *pdev)
struct acpi_device *adev;
struct snd_soc_dai_link *dai_links;
struct device *codec_dev;
+ const struct acpi_gpio_mapping *gpio_mapping;
unsigned int cnt = 0;
int dmic_be_num = 0;
int hdmi_num = 3;
@@ -541,29 +587,34 @@ static int sof_es8336_probe(struct platform_device *pdev)
card = &sof_es8336_card;
card->dev = dev;
+ if (pdev->id_entry && pdev->id_entry->driver_data)
+ quirk = (unsigned long)pdev->id_entry->driver_data;
+
/* check GPIO DMI quirks */
dmi_check_system(sof_es8336_quirk_table);
- if (!mach->mach_params.i2s_link_mask) {
- dev_warn(dev, "No I2S link information provided, using SSP0. This may need to be modified with the quirk module parameter\n");
- } else {
- /*
- * Set configuration based on platform NHLT.
- * In this machine driver, we can only support one SSP for the
- * ES8336 link, the else-if below are intentional.
- * In some cases multiple SSPs can be reported by NHLT, starting MSB-first
- * seems to pick the right connection.
- */
- unsigned long ssp = 0;
-
- if (mach->mach_params.i2s_link_mask & BIT(2))
- ssp = SOF_ES8336_SSP_CODEC(2);
- else if (mach->mach_params.i2s_link_mask & BIT(1))
- ssp = SOF_ES8336_SSP_CODEC(1);
- else if (mach->mach_params.i2s_link_mask & BIT(0))
- ssp = SOF_ES8336_SSP_CODEC(0);
-
- quirk |= ssp;
+ /* Use NHLT configuration only for Non-HDMI capture use case.
+ * Because more than one SSP will be enabled for HDMI capture hence wrong codec
+ * SSP will be set.
+ */
+ if (mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER) {
+ if (!mach->mach_params.i2s_link_mask) {
+ dev_warn(dev, "No I2S link information provided, using SSP0. This may need to be modified with the quirk module parameter\n");
+ } else {
+ /*
+ * Set configuration based on platform NHLT.
+ * In this machine driver, we can only support one SSP for the
+ * ES8336 link.
+ * In some cases multiple SSPs can be reported by NHLT, starting MSB-first
+ * seems to pick the right connection.
+ */
+ unsigned long ssp;
+
+ /* fls returns 1-based results, SSPs indices are 0-based */
+ ssp = fls(mach->mach_params.i2s_link_mask) - 1;
+
+ quirk |= ssp;
+ }
}
if (mach->mach_params.dmic_num)
@@ -579,7 +630,13 @@ static int sof_es8336_probe(struct platform_device *pdev)
if (quirk & SOF_ES8336_ENABLE_DMIC)
dmic_be_num = 2;
- sof_es8336_card.num_links += dmic_be_num + hdmi_num;
+ /* compute number of dai links */
+ sof_es8336_card.num_links = 1 + dmic_be_num + hdmi_num;
+
+ if (quirk & SOF_SSP_HDMI_CAPTURE_PRESENT)
+ sof_es8336_card.num_links += (quirk & SOF_NO_OF_HDMI_CAPTURE_SSP_MASK) >>
+ SOF_NO_OF_HDMI_CAPTURE_SSP_SHIFT;
+
dai_links = sof_card_dai_links_create(dev,
SOF_ES8336_SSP_CODEC(quirk),
dmic_be_num, hdmi_num);
@@ -635,6 +692,17 @@ static int sof_es8336_probe(struct platform_device *pdev)
}
/* get speaker enable GPIO */
+ if (quirk & SOF_ES8336_HEADPHONE_GPIO) {
+ if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
+ gpio_mapping = acpi_enable_both_gpios;
+ else
+ gpio_mapping = acpi_enable_both_gpios_rev_order;
+ } else if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK) {
+ gpio_mapping = acpi_speakers_enable_gpio1;
+ } else {
+ gpio_mapping = acpi_speakers_enable_gpio0;
+ }
+
ret = devm_acpi_dev_add_driver_gpios(codec_dev, gpio_mapping);
if (ret)
dev_warn(codec_dev, "unable to add GPIO mapping table\n");
@@ -690,6 +758,24 @@ static int sof_es8336_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id board_ids[] = {
+ {
+ .name = "sof-essx8336", /* default quirk == 0 */
+ },
+ {
+ .name = "adl_es83x6_c1_h02",
+ .driver_data = (kernel_ulong_t)(SOF_ES8336_SSP_CODEC(1) |
+ SOF_NO_OF_HDMI_CAPTURE_SSP(2) |
+ SOF_HDMI_CAPTURE_1_SSP(0) |
+ SOF_HDMI_CAPTURE_2_SSP(2) |
+ SOF_SSP_HDMI_CAPTURE_PRESENT |
+ SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK |
+ SOF_ES8336_JD_INVERTED),
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, board_ids);
+
static struct platform_driver sof_es8336_driver = {
.driver = {
.name = "sof-essx8336",
@@ -697,10 +783,10 @@ static struct platform_driver sof_es8336_driver = {
},
.probe = sof_es8336_probe,
.remove = sof_es8336_remove,
+ .id_table = board_ids,
};
module_platform_driver(sof_es8336_driver);
MODULE_DESCRIPTION("ASoC Intel(R) SOF + ES8336 Machine driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:sof-essx8336");
MODULE_IMPORT_NS(SND_SOC_INTEL_HDA_DSP_COMMON);
diff --git a/sound/soc/intel/boards/sof_nau8825.c b/sound/soc/intel/boards/sof_nau8825.c
index 97dcd204a246..8d7e5ba9e516 100644
--- a/sound/soc/intel/boards/sof_nau8825.c
+++ b/sound/soc/intel/boards/sof_nau8825.c
@@ -81,6 +81,17 @@ static int sof_hdmi_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static int sof_nau8825_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct sof_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
@@ -93,11 +104,13 @@ static int sof_nau8825_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 |
- SND_JACK_BTN_1 | SND_JACK_BTN_2 |
- SND_JACK_BTN_3,
- &ctx->sof_headset);
+ ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ &ctx->sof_headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
@@ -177,11 +190,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)
struct sof_hdmi_pcm *pcm;
int err;
- if (list_empty(&ctx->hdmi_pcm_list))
- return -EINVAL;
-
- pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm, head);
-
if (sof_nau8825_quirk & SOF_MAX98373_SPEAKER_AMP_PRESENT) {
/* Disable Left and Right Spk pin after boot */
snd_soc_dapm_disable_pin(dapm, "Left Spk");
@@ -191,6 +199,11 @@ static int sof_card_late_probe(struct snd_soc_card *card)
return err;
}
+ if (list_empty(&ctx->hdmi_pcm_list))
+ return -EINVAL;
+
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm, head);
+
return hda_dsp_hdmi_build_controls(card, pcm->codec_dai->component);
}
diff --git a/sound/soc/intel/boards/sof_pcm512x.c b/sound/soc/intel/boards/sof_pcm512x.c
index 6815204e58d5..d4c67d5340a9 100644
--- a/sound/soc/intel/boards/sof_pcm512x.c
+++ b/sound/soc/intel/boards/sof_pcm512x.c
@@ -419,7 +419,7 @@ static int sof_audio_probe(struct platform_device *pdev)
static int sof_pcm512x_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct snd_soc_component *component = NULL;
+ struct snd_soc_component *component;
for_each_card_components(card, component) {
if (!strcmp(component->name, pcm512x_component[0].name)) {
diff --git a/sound/soc/intel/boards/sof_realtek_common.c b/sound/soc/intel/boards/sof_realtek_common.c
index 2ab568c1d40b..b9643ca2e2f2 100644
--- a/sound/soc/intel/boards/sof_realtek_common.c
+++ b/sound/soc/intel/boards/sof_realtek_common.c
@@ -463,26 +463,26 @@ EXPORT_SYMBOL_NS(sof_rt1308_dai_link, SND_SOC_INTEL_SOF_REALTEK_COMMON);
* 2-amp Configuration for RT1019
*/
-static const struct snd_soc_dapm_route rt1019_dapm_routes[] = {
+static const struct snd_soc_dapm_route rt1019p_dapm_routes[] = {
/* speaker */
{ "Left Spk", NULL, "Speaker" },
{ "Right Spk", NULL, "Speaker" },
};
-static struct snd_soc_dai_link_component rt1019_components[] = {
+static struct snd_soc_dai_link_component rt1019p_components[] = {
{
- .name = RT1019_DEV0_NAME,
- .dai_name = RT1019_CODEC_DAI,
+ .name = RT1019P_DEV0_NAME,
+ .dai_name = RT1019P_CODEC_DAI,
},
};
-static int rt1019_init(struct snd_soc_pcm_runtime *rtd)
+static int rt1019p_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
int ret;
- ret = snd_soc_dapm_add_routes(&card->dapm, rt1019_dapm_routes,
- ARRAY_SIZE(rt1019_dapm_routes));
+ ret = snd_soc_dapm_add_routes(&card->dapm, rt1019p_dapm_routes,
+ ARRAY_SIZE(rt1019p_dapm_routes));
if (ret) {
dev_err(rtd->dev, "Speaker map addition failed: %d\n", ret);
return ret;
@@ -490,13 +490,13 @@ static int rt1019_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
-void sof_rt1019_dai_link(struct snd_soc_dai_link *link)
+void sof_rt1019p_dai_link(struct snd_soc_dai_link *link)
{
- link->codecs = rt1019_components;
- link->num_codecs = ARRAY_SIZE(rt1019_components);
- link->init = rt1019_init;
+ link->codecs = rt1019p_components;
+ link->num_codecs = ARRAY_SIZE(rt1019p_components);
+ link->init = rt1019p_init;
}
-EXPORT_SYMBOL_NS(sof_rt1019_dai_link, SND_SOC_INTEL_SOF_REALTEK_COMMON);
+EXPORT_SYMBOL_NS(sof_rt1019p_dai_link, SND_SOC_INTEL_SOF_REALTEK_COMMON);
MODULE_DESCRIPTION("ASoC Intel SOF Realtek helpers");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/boards/sof_realtek_common.h b/sound/soc/intel/boards/sof_realtek_common.h
index ec3eea633e04..778443421090 100644
--- a/sound/soc/intel/boards/sof_realtek_common.h
+++ b/sound/soc/intel/boards/sof_realtek_common.h
@@ -39,9 +39,9 @@ void sof_rt1015_codec_conf(struct snd_soc_card *card);
#define RT1308_DEV0_NAME "i2c-10EC1308:00"
void sof_rt1308_dai_link(struct snd_soc_dai_link *link);
-#define RT1019_CODEC_DAI "HiFi"
-#define RT1019_DEV0_NAME "RTL1019:00"
+#define RT1019P_CODEC_DAI "HiFi"
+#define RT1019P_DEV0_NAME "RTL1019:00"
-void sof_rt1019_dai_link(struct snd_soc_dai_link *link);
+void sof_rt1019p_dai_link(struct snd_soc_dai_link *link);
#endif /* __SOF_REALTEK_COMMON_H */
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index 4a90a0a5d831..045965312245 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -247,6 +247,17 @@ static int sof_hdmi_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
+static struct snd_soc_jack_pin jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static int sof_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct sof_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
@@ -294,11 +305,13 @@ static int sof_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
*/
- ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
- SND_JACK_HEADSET | SND_JACK_BTN_0 |
- SND_JACK_BTN_1 | SND_JACK_BTN_2 |
- SND_JACK_BTN_3,
- &ctx->sof_headset);
+ ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ &ctx->sof_headset,
+ jack_pins,
+ ARRAY_SIZE(jack_pins));
if (ret) {
dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
return ret;
@@ -434,6 +447,15 @@ static int sof_card_late_probe(struct snd_soc_card *card)
struct sof_hdmi_pcm *pcm;
int err;
+ if (sof_rt5682_quirk & SOF_MAX98373_SPEAKER_AMP_PRESENT) {
+ /* Disable Left and Right Spk pin after boot */
+ snd_soc_dapm_disable_pin(dapm, "Left Spk");
+ snd_soc_dapm_disable_pin(dapm, "Right Spk");
+ err = snd_soc_dapm_sync(dapm);
+ if (err < 0)
+ return err;
+ }
+
/* HDMI is not supported by SOF on Baytrail/CherryTrail */
if (is_legacy_cpu || !ctx->idisp_codec)
return 0;
@@ -464,15 +486,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)
return err;
}
- if (sof_rt5682_quirk & SOF_MAX98373_SPEAKER_AMP_PRESENT) {
- /* Disable Left and Right Spk pin after boot */
- snd_soc_dapm_disable_pin(dapm, "Left Spk");
- snd_soc_dapm_disable_pin(dapm, "Right Spk");
- err = snd_soc_dapm_sync(dapm);
- if (err < 0)
- return err;
- }
-
return hdac_hdmi_jack_port_init(component, &card->dapm);
}
@@ -731,7 +744,7 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
} else if (sof_rt5682_quirk & SOF_RT1015P_SPEAKER_AMP_PRESENT) {
sof_rt1015p_dai_link(&links[id]);
} else if (sof_rt5682_quirk & SOF_RT1019_SPEAKER_AMP_PRESENT) {
- sof_rt1019_dai_link(&links[id]);
+ sof_rt1019p_dai_link(&links[id]);
} else if (sof_rt5682_quirk &
SOF_MAX98373_SPEAKER_AMP_PRESENT) {
links[id].codecs = max_98373_components;
@@ -1079,6 +1092,14 @@ static const struct platform_device_id board_ids[] = {
SOF_RT5682_SSP_AMP(1) |
SOF_RT5682_NUM_HDMIDEV(4)),
},
+ {
+ .name = "mtl_mx98357_rt5682",
+ .driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
+ SOF_RT5682_SSP_CODEC(0) |
+ SOF_SPEAKER_AMP_PRESENT |
+ SOF_RT5682_SSP_AMP(1) |
+ SOF_RT5682_NUM_HDMIDEV(4)),
+ },
{ }
};
MODULE_DEVICE_TABLE(platform, board_ids);
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index ad826ad82d51..a49bfaab6b21 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -250,6 +250,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
.callback = sof_sdw_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0AF0")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0AF3"),
},
/* No Jack */
@@ -315,6 +325,23 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
RT711_JD2 |
SOF_SDW_FOUR_SPK),
},
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-k0xxx"),
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2),
+ },
+ /* MeteorLake devices */
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_mtlrvp"),
+ },
+ .driver_data = (void *)(RT711_JD1 | SOF_SDW_TGL_HDMI),
+ },
{}
};
@@ -1127,10 +1154,14 @@ static int sof_card_dai_links_create(struct device *dev,
for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
codec_info_list[i].amp_num = 0;
- if (sof_sdw_quirk & SOF_SDW_TGL_HDMI)
- hdmi_num = SOF_TGL_HDMI_COUNT;
- else
- hdmi_num = SOF_PRE_TGL_HDMI_COUNT;
+ if (mach_params->codec_mask & IDISP_CODEC_MASK) {
+ ctx->idisp_codec = true;
+
+ if (sof_sdw_quirk & SOF_SDW_TGL_HDMI)
+ hdmi_num = SOF_TGL_HDMI_COUNT;
+ else
+ hdmi_num = SOF_PRE_TGL_HDMI_COUNT;
+ }
ssp_mask = SOF_SSP_GET_PORT(sof_sdw_quirk);
/*
@@ -1150,9 +1181,6 @@ static int sof_card_dai_links_create(struct device *dev,
return ret;
}
- if (mach_params->codec_mask & IDISP_CODEC_MASK)
- ctx->idisp_codec = true;
-
/* enable dmic01 & dmic16k */
dmic_num = (sof_sdw_quirk & SOF_SDW_PCH_DMIC || mach_params->dmic_num) ? 2 : 0;
comp_num += dmic_num;
@@ -1375,7 +1403,9 @@ HDMI:
static int sof_sdw_card_late_probe(struct snd_soc_card *card)
{
- int i, ret;
+ struct mc_private *ctx = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+ int i;
for (i = 0; i < ARRAY_SIZE(codec_info_list); i++) {
if (!codec_info_list[i].late_probe)
@@ -1386,7 +1416,10 @@ static int sof_sdw_card_late_probe(struct snd_soc_card *card)
return ret;
}
- return sof_sdw_hdmi_card_late_probe(card);
+ if (ctx->idisp_codec)
+ ret = sof_sdw_hdmi_card_late_probe(card);
+
+ return ret;
}
/* SoC card */
@@ -1433,7 +1466,7 @@ static int mc_probe(struct platform_device *pdev)
int amp_num = 0, i;
int ret;
- dev_dbg(&pdev->dev, "Entry %s\n", __func__);
+ dev_dbg(&pdev->dev, "Entry\n");
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
diff --git a/sound/soc/intel/boards/sof_sdw_rt711.c b/sound/soc/intel/boards/sof_sdw_rt711.c
index 49ff0871e9e7..8291967f23f3 100644
--- a/sound/soc/intel/boards/sof_sdw_rt711.c
+++ b/sound/soc/intel/boards/sof_sdw_rt711.c
@@ -139,6 +139,9 @@ int sof_sdw_rt711_exit(struct snd_soc_card *card, struct snd_soc_dai_link *dai_l
{
struct mc_private *ctx = snd_soc_card_get_drvdata(card);
+ if (!ctx->headset_codec_dev)
+ return 0;
+
device_remove_software_node(ctx->headset_codec_dev);
put_device(ctx->headset_codec_dev);
diff --git a/sound/soc/intel/boards/sof_sdw_rt711_sdca.c b/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
index b3fc32bacfa8..7f16304d025b 100644
--- a/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
+++ b/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
@@ -140,6 +140,9 @@ int sof_sdw_rt711_sdca_exit(struct snd_soc_card *card, struct snd_soc_dai_link *
{
struct mc_private *ctx = snd_soc_card_get_drvdata(card);
+ if (!ctx->headset_codec_dev)
+ return 0;
+
device_remove_software_node(ctx->headset_codec_dev);
put_device(ctx->headset_codec_dev);
diff --git a/sound/soc/intel/catpt/device.c b/sound/soc/intel/catpt/device.c
index 85a34e37316d..d48a71d2cf1e 100644
--- a/sound/soc/intel/catpt/device.c
+++ b/sound/soc/intel/catpt/device.c
@@ -254,14 +254,11 @@ static int catpt_acpi_probe(struct platform_device *pdev)
return -ENODEV;
}
- spec = device_get_match_data(dev);
- if (!spec)
- return -ENODEV;
-
cdev = devm_kzalloc(dev, sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return -ENOMEM;
+ spec = (const struct catpt_spec *)id->driver_data;
catpt_dev_init(cdev, dev, spec);
/* map DSP bar address */
diff --git a/sound/soc/intel/catpt/pcm.c b/sound/soc/intel/catpt/pcm.c
index a26000cd5ceb..30ca5416c9a3 100644
--- a/sound/soc/intel/catpt/pcm.c
+++ b/sound/soc/intel/catpt/pcm.c
@@ -667,7 +667,9 @@ static int catpt_dai_pcm_new(struct snd_soc_pcm_runtime *rtm,
if (!memcmp(&cdev->devfmt[devfmt.iface], &devfmt, sizeof(devfmt)))
return 0;
- pm_runtime_get_sync(cdev->dev);
+ ret = pm_runtime_resume_and_get(cdev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
ret = catpt_ipc_set_device_format(cdev, &devfmt);
@@ -853,9 +855,12 @@ static int catpt_mixer_volume_get(struct snd_kcontrol *kcontrol,
snd_soc_kcontrol_component(kcontrol);
struct catpt_dev *cdev = dev_get_drvdata(component->dev);
u32 dspvol;
+ int ret;
int i;
- pm_runtime_get_sync(cdev->dev);
+ ret = pm_runtime_resume_and_get(cdev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
for (i = 0; i < CATPT_CHANNELS_MAX; i++) {
dspvol = catpt_mixer_volume(cdev, &cdev->mixer, i);
@@ -876,7 +881,9 @@ static int catpt_mixer_volume_put(struct snd_kcontrol *kcontrol,
struct catpt_dev *cdev = dev_get_drvdata(component->dev);
int ret;
- pm_runtime_get_sync(cdev->dev);
+ ret = pm_runtime_resume_and_get(cdev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
ret = catpt_set_dspvol(cdev, cdev->mixer.mixer_hw_id,
ucontrol->value.integer.value);
@@ -897,6 +904,7 @@ static int catpt_stream_volume_get(struct snd_kcontrol *kcontrol,
struct catpt_dev *cdev = dev_get_drvdata(component->dev);
long *ctlvol = (long *)kcontrol->private_value;
u32 dspvol;
+ int ret;
int i;
stream = catpt_stream_find(cdev, pin_id);
@@ -906,7 +914,9 @@ static int catpt_stream_volume_get(struct snd_kcontrol *kcontrol,
return 0;
}
- pm_runtime_get_sync(cdev->dev);
+ ret = pm_runtime_resume_and_get(cdev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
for (i = 0; i < CATPT_CHANNELS_MAX; i++) {
dspvol = catpt_stream_volume(cdev, stream, i);
@@ -937,7 +947,9 @@ static int catpt_stream_volume_put(struct snd_kcontrol *kcontrol,
return 0;
}
- pm_runtime_get_sync(cdev->dev);
+ ret = pm_runtime_resume_and_get(cdev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
ret = catpt_set_dspvol(cdev, stream->info.stream_hw_id,
ucontrol->value.integer.value);
@@ -1013,7 +1025,9 @@ static int catpt_loopback_switch_put(struct snd_kcontrol *kcontrol,
return 0;
}
- pm_runtime_get_sync(cdev->dev);
+ ret = pm_runtime_resume_and_get(cdev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
ret = catpt_ipc_mute_loopback(cdev, stream->info.stream_hw_id, mute);
diff --git a/sound/soc/intel/catpt/sysfs.c b/sound/soc/intel/catpt/sysfs.c
index 9579e233a15d..1bdbcc04dc71 100644
--- a/sound/soc/intel/catpt/sysfs.c
+++ b/sound/soc/intel/catpt/sysfs.c
@@ -15,7 +15,9 @@ static ssize_t fw_version_show(struct device *dev,
struct catpt_fw_version version;
int ret;
- pm_runtime_get_sync(cdev->dev);
+ ret = pm_runtime_resume_and_get(cdev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
ret = catpt_ipc_get_fw_version(cdev, &version);
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index fef0b2d1de68..8ca8f872ec80 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -9,6 +9,7 @@ snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-m
soc-acpi-intel-cml-match.o soc-acpi-intel-icl-match.o \
soc-acpi-intel-tgl-match.o soc-acpi-intel-ehl-match.o \
soc-acpi-intel-jsl-match.o soc-acpi-intel-adl-match.o \
+ soc-acpi-intel-mtl-match.o \
soc-acpi-intel-hda-match.o \
soc-acpi-intel-sdw-mockup-match.o
diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
index c1385161cdc8..9990d5502d26 100644
--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
@@ -8,6 +8,11 @@
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
+static const struct snd_soc_acpi_codecs essx_83x6 = {
+ .num_codecs = 3,
+ .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
+};
+
static const struct snd_soc_acpi_endpoint single_endpoint = {
.num = 0,
.aggregated = 0,
@@ -137,6 +142,15 @@ static const struct snd_soc_acpi_adr_device rt1316_2_single_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt1316_3_single_adr[] = {
+ {
+ .adr = 0x000330025D131601ull,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ .name_prefix = "rt1316-1"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt714_0_adr[] = {
{
.adr = 0x000030025D071401ull,
@@ -326,6 +340,20 @@ static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link0[] = {
{}
};
+static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link0_rt1316_link3[] = {
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(rt711_sdca_0_adr),
+ .adr_d = rt711_sdca_0_adr,
+ },
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(rt1316_3_single_adr),
+ .adr_d = rt1316_3_single_adr,
+ },
+ {}
+};
+
static const struct snd_soc_acpi_adr_device mx8373_2_adr[] = {
{
.adr = 0x000223019F837300ull,
@@ -412,6 +440,11 @@ static const struct snd_soc_acpi_codecs adl_max98390_amp = {
.codecs = {"MX98390"}
};
+static const struct snd_soc_acpi_codecs adl_lt6911_hdmi = {
+ .num_codecs = 1,
+ .codecs = {"INTC10B0"}
+};
+
struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
{
.comp_ids = &adl_rt5682_rt5682s_hp,
@@ -479,12 +512,34 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
.drv_name = "adl_rt5682",
.sof_tplg_filename = "sof-adl-rt5682.tplg",
},
+ {
+ .id = "10134242",
+ .drv_name = "adl_mx98360a_cs4242",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &adl_max98360a_amp,
+ .sof_tplg_filename = "sof-adl-max98360a-cs42l42.tplg",
+ },
/* place amp-only boards in the end of table */
{
.id = "CSC3541",
.drv_name = "adl_cs35l41",
.sof_tplg_filename = "sof-adl-cs35l41.tplg",
},
+ {
+ .comp_ids = &essx_83x6,
+ .drv_name = "adl_es83x6_c1_h02",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &adl_lt6911_hdmi,
+ .sof_tplg_filename = "sof-adl-es83x6-ssp1-hdmi-ssp02.tplg",
+ },
+ {
+ .comp_ids = &essx_83x6,
+ .drv_name = "sof-essx8336",
+ .sof_tplg_filename = "sof-adl-es83x6", /* the tplg suffix is added at run time */
+ .tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER |
+ SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
+ SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_adl_machines);
@@ -540,6 +595,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
.sof_tplg_filename = "sof-adl-rt1316-l2-mono-rt714-l0.tplg",
},
{
+ .link_mask = 0x9, /* 2 active links required */
+ .links = adl_sdw_rt711_link0_rt1316_link3,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l3.tplg",
+ },
+ {
.link_mask = 0x1, /* link0 required */
.links = adl_rvp,
.drv_name = "sof_sdw",
diff --git a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
index 0441df97b260..cbcb649604e5 100644
--- a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
@@ -12,7 +12,7 @@
struct snd_soc_acpi_mach snd_soc_acpi_intel_haswell_machines[] = {
{
.id = "INT33CA",
- .drv_name = "haswell-audio",
+ .drv_name = "hsw_rt5640",
.fw_filename = "intel/IntcSST1.bin",
.sof_tplg_filename = "sof-hsw.tplg",
},
@@ -23,7 +23,7 @@ EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_haswell_machines);
struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[] = {
{
.id = "INT343A",
- .drv_name = "broadwell-audio",
+ .drv_name = "bdw_rt286",
.fw_filename = "intel/IntcSST2.bin",
.sof_tplg_filename = "sof-bdw-rt286.tplg",
},
@@ -41,7 +41,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[] = {
},
{
.id = "INT33CA",
- .drv_name = "haswell-audio",
+ .drv_name = "hsw_rt5640",
.fw_filename = "intel/IntcSST2.bin",
.sof_tplg_filename = "sof-bdw-rt5640.tplg",
},
diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
new file mode 100644
index 000000000000..36c361fb28a4
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * soc-acpi-intel-mtl-match.c - tables and support for MTL ACPI enumeration.
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include "soc-acpi-intel-sdw-mockup-match.h"
+
+static const struct snd_soc_acpi_codecs mtl_max98357a_amp = {
+ .num_codecs = 1,
+ .codecs = {"MX98357A"}
+};
+
+static const struct snd_soc_acpi_codecs mtl_rt5682_rt5682s_hp = {
+ .num_codecs = 2,
+ .codecs = {"10EC5682", "RTL5682"},
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = {
+ {
+ .comp_ids = &mtl_rt5682_rt5682s_hp,
+ .drv_name = "mtl_mx98357_rt5682",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &mtl_max98357a_amp,
+ .sof_tplg_filename = "sof-mtl-max98357a-rt5682.tplg",
+ },
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_mtl_machines);
+
+static const struct snd_soc_acpi_endpoint single_endpoint = {
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+};
+
+static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
+ {
+ .adr = 0x000030025D071101ull,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ .name_prefix = "rt711"
+ }
+};
+
+static const struct snd_soc_acpi_link_adr mtl_rvp[] = {
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(rt711_sdca_0_adr),
+ .adr_d = rt711_sdca_0_adr,
+ },
+ {}
+};
+
+/* this table is used when there is no I2S codec present */
+struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[] = {
+ /* mockup tests need to be first */
+ {
+ .link_mask = GENMASK(3, 0),
+ .links = sdw_mockup_headset_2amps_mic,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-mtl-rt711-rt1308-rt715.tplg",
+ },
+ {
+ .link_mask = BIT(0) | BIT(1) | BIT(3),
+ .links = sdw_mockup_headset_1amp_mic,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-mtl-rt711-rt1308-mono-rt715.tplg",
+ },
+ {
+ .link_mask = GENMASK(2, 0),
+ .links = sdw_mockup_mic_headset_1amp,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-mtl-rt715-rt711-rt1308-mono.tplg",
+ },
+ {
+ .link_mask = BIT(0),
+ .links = mtl_rvp,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-mtl-rt711.tplg",
+ },
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_mtl_sdw_machines);
diff --git a/sound/soc/intel/keembay/kmb_platform.c b/sound/soc/intel/keembay/kmb_platform.c
index a6fb74ba1c42..b4893365d01d 100644
--- a/sound/soc/intel/keembay/kmb_platform.c
+++ b/sound/soc/intel/keembay/kmb_platform.c
@@ -388,15 +388,17 @@ static snd_pcm_uframes_t kmb_pcm_pointer(struct snd_soc_component *component,
}
static const struct snd_soc_component_driver kmb_component = {
- .name = "kmb",
- .pcm_construct = kmb_platform_pcm_new,
- .open = kmb_pcm_open,
- .trigger = kmb_pcm_trigger,
- .pointer = kmb_pcm_pointer,
+ .name = "kmb",
+ .pcm_construct = kmb_platform_pcm_new,
+ .open = kmb_pcm_open,
+ .trigger = kmb_pcm_trigger,
+ .pointer = kmb_pcm_pointer,
+ .legacy_dai_naming = 1,
};
static const struct snd_soc_component_driver kmb_component_dma = {
- .name = "kmb",
+ .name = "kmb",
+ .legacy_dai_naming = 1,
};
static int kmb_probe(struct snd_soc_dai *cpu_dai)
@@ -497,11 +499,11 @@ static int kmb_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
int ret;
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
kmb_i2s->clock_provider = false;
ret = 0;
break;
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
writel(CLOCK_PROVIDER_MODE, kmb_i2s->pss_base + I2S_GEN_CFG_0);
ret = clk_prepare_enable(kmb_i2s->clk_i2s);
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 55f310e91b55..9d72ebd812af 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -1380,7 +1380,10 @@ static int skl_platform_soc_probe(struct snd_soc_component *component)
const struct skl_dsp_ops *ops;
int ret;
- pm_runtime_get_sync(component->dev);
+ ret = pm_runtime_resume_and_get(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
if (bus->ppcap) {
skl->component = component;
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 9bdf020a2b64..e06eac592da1 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -2950,9 +2950,6 @@ static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
block_size = ret;
off += array->size;
- array = (struct snd_soc_tplg_vendor_array *)
- (tplg_w->priv.data + off);
-
data = (tplg_w->priv.data + off);
if (block_type == SKL_TYPE_TUPLE) {
@@ -3599,9 +3596,6 @@ static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
block_size = ret;
off += array->size;
- array = (struct snd_soc_tplg_vendor_array *)
- (manifest->priv.data + off);
-
data = (manifest->priv.data + off);
if (block_type == SKL_TYPE_TUPLE) {
diff --git a/sound/soc/jz4740/Kconfig b/sound/soc/jz4740/Kconfig
index 29144720cb62..e72f826062e9 100644
--- a/sound/soc/jz4740/Kconfig
+++ b/sound/soc/jz4740/Kconfig
@@ -2,7 +2,7 @@
config SND_JZ4740_SOC_I2S
tristate "SoC Audio (I2S protocol) for Ingenic JZ4740 SoC"
depends on MIPS || COMPILE_TEST
- depends on OF && HAS_IOMEM
+ depends on HAS_IOMEM
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y if you want to use I2S protocol and I2S codec on Ingenic JZ4740
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 7ad5d9a924d8..c4c1e89b47c1 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -5,10 +5,9 @@
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -94,9 +93,7 @@ struct i2s_soc_info {
};
struct jz4740_i2s {
- struct resource *mem;
void __iomem *base;
- dma_addr_t phys_base;
struct clk *clk_aic;
struct clk *clk_i2s;
@@ -206,18 +203,18 @@ static int jz4740_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
conf &= ~(JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER);
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
conf |= JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER;
format |= JZ_AIC_I2S_FMT_ENABLE_SYS_CLK;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BC_FP:
conf |= JZ_AIC_CONF_SYNC_CLK_MASTER;
break;
- case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_BP_FC:
conf |= JZ_AIC_CONF_BIT_CLK_MASTER;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
break;
default:
return -EINVAL;
@@ -371,21 +368,6 @@ static int jz4740_i2s_resume(struct snd_soc_component *component)
return 0;
}
-static void jz4740_i2s_init_pcm_config(struct jz4740_i2s *i2s)
-{
- struct snd_dmaengine_dai_dma_data *dma_data;
-
- /* Playback */
- dma_data = &i2s->playback_dma_data;
- dma_data->maxburst = 16;
- dma_data->addr = i2s->phys_base + JZ_REG_AIC_FIFO;
-
- /* Capture */
- dma_data = &i2s->capture_dma_data;
- dma_data->maxburst = 16;
- dma_data->addr = i2s->phys_base + JZ_REG_AIC_FIFO;
-}
-
static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
@@ -396,7 +378,6 @@ static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai)
if (ret)
return ret;
- jz4740_i2s_init_pcm_config(i2s);
snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data,
&i2s->capture_dma_data);
@@ -498,9 +479,10 @@ static const struct i2s_soc_info jz4780_i2s_soc_info = {
};
static const struct snd_soc_component_driver jz4740_i2s_component = {
- .name = "jz4740-i2s",
- .suspend = jz4740_i2s_suspend,
- .resume = jz4740_i2s_resume,
+ .name = "jz4740-i2s",
+ .suspend = jz4740_i2s_suspend,
+ .resume = jz4740_i2s_resume,
+ .legacy_dai_naming = 1,
};
static const struct of_device_id jz4740_of_matches[] = {
@@ -529,7 +511,11 @@ static int jz4740_i2s_dev_probe(struct platform_device *pdev)
if (IS_ERR(i2s->base))
return PTR_ERR(i2s->base);
- i2s->phys_base = mem->start;
+ i2s->playback_dma_data.maxburst = 16;
+ i2s->playback_dma_data.addr = mem->start + JZ_REG_AIC_FIFO;
+
+ i2s->capture_dma_data.maxburst = 16;
+ i2s->capture_dma_data.addr = mem->start + JZ_REG_AIC_FIFO;
i2s->clk_aic = devm_clk_get(dev, "aic");
if (IS_ERR(i2s->clk_aic))
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 9e5ce1a82639..363fa4d47680 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -152,6 +152,51 @@ config SND_SOC_MT8183_DA7219_MAX98357A
Select Y if you have such device.
If unsure select "N".
+config SND_SOC_MT8186
+ tristate "ASoC support for Mediatek MT8186 chip"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on COMMON_CLK
+ select SND_SOC_MEDIATEK
+ select SND_SOC_MT6358
+ select MFD_SYSCON if SND_SOC_MT6358
+ help
+ This adds ASoC driver for Mediatek MT8186 boards
+ that can be used with other codecs.
+ Select Y if you have such device.
+ If unsure select "N".
+
+config SND_SOC_MT8186_MT6366_DA7219_MAX98357
+ tristate "ASoC Audio driver for MT8186 with DA7219 MAX98357A codec"
+ depends on I2C && GPIOLIB
+ depends on SND_SOC_MT8186 && MTK_PMIC_WRAP
+ select SND_SOC_MT6358
+ select SND_SOC_MAX98357A
+ select SND_SOC_DA7219
+ select SND_SOC_BT_SCO
+ select SND_SOC_DMIC
+ select SND_SOC_HDMI_CODEC
+ help
+ This adds ASoC driver for Mediatek MT8186 boards
+ with the MT6366(MT6358) DA7219 MAX98357A codecs.
+ Select Y if you have such device.
+ If unsure select "N".
+
+config SND_SOC_MT8186_MT6366_RT1019_RT5682S
+ tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S codec"
+ depends on I2C && GPIOLIB
+ depends on SND_SOC_MT8186 && MTK_PMIC_WRAP
+ select SND_SOC_MT6358
+ select SND_SOC_RT1015P
+ select SND_SOC_RT5682S
+ select SND_SOC_BT_SCO
+ select SND_SOC_DMIC
+ select SND_SOC_HDMI_CODEC
+ help
+ This adds ASoC driver for Mediatek MT8186 boards
+ with the MT6366(MT6358) RT1019 RT5682S codecs.
+ Select Y if you have such device.
+ If unsure select "N".
+
config SND_SOC_MTK_BTCVSD
tristate "ALSA BT SCO CVSD/MSBC Driver"
help
diff --git a/sound/soc/mediatek/Makefile b/sound/soc/mediatek/Makefile
index 34778ca12106..5571c640a288 100644
--- a/sound/soc/mediatek/Makefile
+++ b/sound/soc/mediatek/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_SND_SOC_MT2701) += mt2701/
obj-$(CONFIG_SND_SOC_MT6797) += mt6797/
obj-$(CONFIG_SND_SOC_MT8173) += mt8173/
obj-$(CONFIG_SND_SOC_MT8183) += mt8183/
+obj-$(CONFIG_SND_SOC_MT8186) += mt8186/
obj-$(CONFIG_SND_SOC_MT8192) += mt8192/
obj-$(CONFIG_SND_SOC_MT8195) += mt8195/
diff --git a/sound/soc/mediatek/common/Makefile b/sound/soc/mediatek/common/Makefile
index acbe01e9e928..576deb7f8cce 100644
--- a/sound/soc/mediatek/common/Makefile
+++ b/sound/soc/mediatek/common/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# platform driver
-snd-soc-mtk-common-objs := mtk-afe-platform-driver.o mtk-afe-fe-dai.o
+snd-soc-mtk-common-objs := mtk-afe-platform-driver.o mtk-afe-fe-dai.o mtk-dsp-sof-common.o
obj-$(CONFIG_SND_SOC_MEDIATEK) += snd-soc-mtk-common.o
obj-$(CONFIG_SND_SOC_MTK_BTCVSD) += mtk-btcvsd.o
diff --git a/sound/soc/mediatek/common/mtk-dsp-sof-common.c b/sound/soc/mediatek/common/mtk-dsp-sof-common.c
new file mode 100644
index 000000000000..8b1b623207be
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-dsp-sof-common.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mtk-dsp-sof-common.c -- MediaTek dsp sof common ctrl
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chunxu Li <chunxu.li@mediatek.com>
+ */
+
+#include "mtk-dsp-sof-common.h"
+#include "mtk-soc-card.h"
+
+/* fixup the BE DAI link to match any values from topology */
+int mtk_sof_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_card *card = rtd->card;
+ struct mtk_soc_card_data *soc_card_data = snd_soc_card_get_drvdata(card);
+ struct mtk_sof_priv *sof_priv = soc_card_data->sof_priv;
+ int i, j, ret = 0;
+
+ for (i = 0; i < sof_priv->num_streams; i++) {
+ struct snd_soc_dai *cpu_dai;
+ struct snd_soc_pcm_runtime *runtime;
+ struct snd_soc_dai_link *sof_dai_link = NULL;
+ const struct sof_conn_stream *conn = &sof_priv->conn_streams[i];
+
+ if (strcmp(rtd->dai_link->name, conn->normal_link))
+ continue;
+
+ for_each_card_rtds(card, runtime) {
+ if (strcmp(runtime->dai_link->name, conn->sof_link))
+ continue;
+
+ for_each_rtd_cpu_dais(runtime, j, cpu_dai) {
+ if (cpu_dai->stream_active[conn->stream_dir] > 0) {
+ sof_dai_link = runtime->dai_link;
+ break;
+ }
+ }
+ break;
+ }
+
+ if (sof_dai_link && sof_dai_link->be_hw_params_fixup)
+ ret = sof_dai_link->be_hw_params_fixup(runtime, params);
+
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtk_sof_dai_link_fixup);
+
+int mtk_sof_card_probe(struct snd_soc_card *card)
+{
+ int i;
+ struct snd_soc_dai_link *dai_link;
+
+ /* Set stream_name to help sof bind widgets */
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->no_pcm && !dai_link->stream_name && dai_link->name)
+ dai_link->stream_name = dai_link->name;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_sof_card_probe);
+
+int mtk_sof_card_late_probe(struct snd_soc_card *card)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_component *sof_comp = NULL;
+ struct mtk_soc_card_data *soc_card_data =
+ snd_soc_card_get_drvdata(card);
+ struct mtk_sof_priv *sof_priv = soc_card_data->sof_priv;
+ int i;
+
+ /* 1. find sof component */
+ for_each_card_rtds(card, rtd) {
+ sof_comp = snd_soc_rtdcom_lookup(rtd, "sof-audio-component");
+ if (sof_comp)
+ break;
+ }
+
+ if (!sof_comp) {
+ dev_info(card->dev, "probe without sof-audio-component\n");
+ return 0;
+ }
+
+ /* 2. add route path and fixup callback */
+ for (i = 0; i < sof_priv->num_streams; i++) {
+ const struct sof_conn_stream *conn = &sof_priv->conn_streams[i];
+ struct snd_soc_pcm_runtime *sof_rtd = NULL;
+ struct snd_soc_pcm_runtime *normal_rtd = NULL;
+
+ for_each_card_rtds(card, rtd) {
+ if (!strcmp(rtd->dai_link->name, conn->sof_link)) {
+ sof_rtd = rtd;
+ continue;
+ }
+ if (!strcmp(rtd->dai_link->name, conn->normal_link)) {
+ normal_rtd = rtd;
+ continue;
+ }
+ if (normal_rtd && sof_rtd)
+ break;
+ }
+ if (normal_rtd && sof_rtd) {
+ int j;
+ struct snd_soc_dai *cpu_dai;
+
+ for_each_rtd_cpu_dais(sof_rtd, j, cpu_dai) {
+ struct snd_soc_dapm_route route;
+ struct snd_soc_dapm_path *p = NULL;
+ struct snd_soc_dapm_widget *play_widget =
+ cpu_dai->playback_widget;
+ struct snd_soc_dapm_widget *cap_widget =
+ cpu_dai->capture_widget;
+ memset(&route, 0, sizeof(route));
+ if (conn->stream_dir == SNDRV_PCM_STREAM_CAPTURE &&
+ cap_widget) {
+ snd_soc_dapm_widget_for_each_sink_path(cap_widget, p) {
+ route.source = conn->sof_dma;
+ route.sink = p->sink->name;
+ snd_soc_dapm_add_routes(&card->dapm, &route, 1);
+ }
+ } else if (conn->stream_dir == SNDRV_PCM_STREAM_PLAYBACK &&
+ play_widget) {
+ snd_soc_dapm_widget_for_each_source_path(play_widget, p) {
+ route.source = p->source->name;
+ route.sink = conn->sof_dma;
+ snd_soc_dapm_add_routes(&card->dapm, &route, 1);
+ }
+ } else {
+ dev_err(cpu_dai->dev, "stream dir and widget not pair\n");
+ }
+ }
+
+ sof_rtd->dai_link->be_hw_params_fixup =
+ sof_comp->driver->be_hw_params_fixup;
+ if (sof_priv->sof_dai_link_fixup)
+ normal_rtd->dai_link->be_hw_params_fixup =
+ sof_priv->sof_dai_link_fixup;
+ else
+ normal_rtd->dai_link->be_hw_params_fixup = mtk_sof_dai_link_fixup;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_sof_card_late_probe);
+
+int mtk_sof_dailink_parse_of(struct snd_soc_card *card, struct device_node *np,
+ const char *propname, struct snd_soc_dai_link *pre_dai_links,
+ int pre_num_links)
+{
+ struct device *dev = card->dev;
+ struct snd_soc_dai_link *parsed_dai_link;
+ const char *dai_name = NULL;
+ int i, j, ret, num_links, parsed_num_links = 0;
+
+ num_links = of_property_count_strings(np, "mediatek,dai-link");
+ if (num_links < 0 || num_links > card->num_links) {
+ dev_dbg(dev, "number of dai-link is invalid\n");
+ return -EINVAL;
+ }
+
+ parsed_dai_link = devm_kcalloc(dev, num_links, sizeof(*parsed_dai_link), GFP_KERNEL);
+ if (!parsed_dai_link)
+ return -ENOMEM;
+
+ for (i = 0; i < num_links; i++) {
+ ret = of_property_read_string_index(np, propname, i, &dai_name);
+ if (ret) {
+ dev_dbg(dev, "ASoC: Property '%s' index %d could not be read: %d\n",
+ propname, i, ret);
+ return ret;
+ }
+ dev_dbg(dev, "ASoC: Property get dai_name:%s\n", dai_name);
+ for (j = 0; j < pre_num_links; j++) {
+ if (!strcmp(dai_name, pre_dai_links[j].name)) {
+ memcpy(&parsed_dai_link[parsed_num_links++], &pre_dai_links[j],
+ sizeof(struct snd_soc_dai_link));
+ break;
+ }
+ }
+ }
+
+ if (parsed_num_links != num_links)
+ return -EINVAL;
+
+ card->dai_link = parsed_dai_link;
+ card->num_links = parsed_num_links;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_sof_dailink_parse_of);
diff --git a/sound/soc/mediatek/common/mtk-dsp-sof-common.h b/sound/soc/mediatek/common/mtk-dsp-sof-common.h
new file mode 100644
index 000000000000..dd38c4a93574
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-dsp-sof-common.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mtk-dsp-sof-common.h -- MediaTek dsp sof common definition
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chunxu Li <chunxu.li@mediatek.com>
+ */
+
+#ifndef _MTK_DSP_SOF_COMMON_H_
+#define _MTK_DSP_SOF_COMMON_H_
+
+#include <sound/soc.h>
+
+struct sof_conn_stream {
+ const char *normal_link;
+ const char *sof_link;
+ const char *sof_dma;
+ int stream_dir;
+};
+
+struct mtk_sof_priv {
+ const struct sof_conn_stream *conn_streams;
+ int num_streams;
+ int (*sof_dai_link_fixup)(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params);
+};
+
+int mtk_sof_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params);
+int mtk_sof_card_probe(struct snd_soc_card *card);
+int mtk_sof_card_late_probe(struct snd_soc_card *card);
+int mtk_sof_dailink_parse_of(struct snd_soc_card *card, struct device_node *np,
+ const char *propname, struct snd_soc_dai_link *pre_dai_links,
+ int pre_num_links);
+
+#endif
diff --git a/sound/soc/mediatek/common/mtk-soc-card.h b/sound/soc/mediatek/common/mtk-soc-card.h
new file mode 100644
index 000000000000..eeda79370049
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-soc-card.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mtk-soc-card.h -- MediaTek soc card data definition
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chunxu Li <chunxu.li@mediatek.com>
+ */
+
+#ifndef _MTK_SOC_CARD_H_
+#define _MTK_SOC_CARD_H_
+
+struct mtk_soc_card_data {
+ void *mach_priv;
+ void *sof_priv;
+};
+
+#endif
diff --git a/sound/soc/mediatek/mt6797/mt6797-mt6351.c b/sound/soc/mediatek/mt6797/mt6797-mt6351.c
index 496f32bcfb5e..d2f6213a6bfc 100644
--- a/sound/soc/mediatek/mt6797/mt6797-mt6351.c
+++ b/sound/soc/mediatek/mt6797/mt6797-mt6351.c
@@ -217,7 +217,8 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev)
if (!codec_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_platform_node;
}
for_each_card_prelinks(card, i, dai_link) {
if (dai_link->codecs->name)
@@ -230,6 +231,9 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
__func__, ret);
+ of_node_put(codec_node);
+put_platform_node:
+ of_node_put(platform_node);
return ret;
}
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index 31494930433f..dcaeeeb8aac7 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -286,10 +286,8 @@ static int mt8173_afe_dais_set_clks(struct mtk_base_afe *afe,
static void mt8173_afe_dais_disable_clks(struct mtk_base_afe *afe,
struct clk *m_ck, struct clk *b_ck)
{
- if (m_ck)
- clk_disable_unprepare(m_ck);
- if (b_ck)
- clk_disable_unprepare(b_ck);
+ clk_disable_unprepare(m_ck);
+ clk_disable_unprepare(b_ck);
}
static int mt8173_afe_i2s_startup(struct snd_pcm_substream *substream,
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
index 70bf312e855f..8794720cea3a 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
@@ -256,14 +256,16 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
if (!mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_node;
}
mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node =
of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
if (!mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_node;
}
mt8173_rt5650_rt5676_codec_conf[0].dlc.of_node =
mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node;
@@ -276,13 +278,15 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
if (!mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codecs->of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_node;
}
card->dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, card);
+put_node:
of_node_put(platform_node);
return ret;
}
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index d1c94acb4516..e05f2b0231fe 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -280,7 +280,8 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
if (!mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_platform_node;
}
mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node =
mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node;
@@ -293,7 +294,7 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"%s codec_capture_dai name fail %d\n",
__func__, ret);
- return ret;
+ goto put_platform_node;
}
mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[1].dai_name =
codec_capture_dai;
@@ -315,12 +316,14 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
if (!mt8173_rt5650_dais[DAI_LINK_HDMI_I2S].codecs->of_node) {
dev_err(&pdev->dev,
"Property 'audio-codec' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_platform_node;
}
card->dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, card);
+put_platform_node:
of_node_put(platform_node);
return ret;
}
diff --git a/sound/soc/mediatek/mt8186/Makefile b/sound/soc/mediatek/mt8186/Makefile
new file mode 100644
index 000000000000..49b0026628a0
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# platform driver
+snd-soc-mt8186-afe-objs := \
+ mt8186-afe-pcm.o \
+ mt8186-audsys-clk.o \
+ mt8186-afe-clk.o \
+ mt8186-afe-gpio.o \
+ mt8186-dai-adda.o \
+ mt8186-afe-control.o \
+ mt8186-dai-i2s.o \
+ mt8186-dai-hw-gain.o \
+ mt8186-dai-pcm.o \
+ mt8186-dai-src.o \
+ mt8186-dai-hostless.o \
+ mt8186-dai-tdm.o \
+ mt8186-misc-control.o \
+ mt8186-mt6366-common.o
+
+obj-$(CONFIG_SND_SOC_MT8186) += snd-soc-mt8186-afe.o
+obj-$(CONFIG_SND_SOC_MT8186_MT6366_DA7219_MAX98357) += mt8186-mt6366-da7219-max98357.o
+obj-$(CONFIG_SND_SOC_MT8186_MT6366_RT1019_RT5682S) += mt8186-mt6366-rt1019-rt5682s.o
diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-clk.c b/sound/soc/mediatek/mt8186/mt8186-afe-clk.c
new file mode 100644
index 000000000000..a6b4f29049bb
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-afe-clk.c
@@ -0,0 +1,652 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8186-afe-clk.c -- Mediatek 8186 afe clock ctrl
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "mt8186-afe-common.h"
+#include "mt8186-afe-clk.h"
+#include "mt8186-audsys-clk.h"
+
+static DEFINE_MUTEX(mutex_request_dram);
+
+static const char *aud_clks[CLK_NUM] = {
+ [CLK_AFE] = "aud_afe_clk",
+ [CLK_DAC] = "aud_dac_clk",
+ [CLK_DAC_PREDIS] = "aud_dac_predis_clk",
+ [CLK_ADC] = "aud_adc_clk",
+ [CLK_TML] = "aud_tml_clk",
+ [CLK_APLL22M] = "aud_apll22m_clk",
+ [CLK_APLL24M] = "aud_apll24m_clk",
+ [CLK_APLL1_TUNER] = "aud_apll_tuner_clk",
+ [CLK_APLL2_TUNER] = "aud_apll2_tuner_clk",
+ [CLK_TDM] = "aud_tdm_clk",
+ [CLK_NLE] = "aud_nle_clk",
+ [CLK_DAC_HIRES] = "aud_dac_hires_clk",
+ [CLK_ADC_HIRES] = "aud_adc_hires_clk",
+ [CLK_I2S1_BCLK] = "aud_i2s1_bclk",
+ [CLK_I2S2_BCLK] = "aud_i2s2_bclk",
+ [CLK_I2S3_BCLK] = "aud_i2s3_bclk",
+ [CLK_I2S4_BCLK] = "aud_i2s4_bclk",
+ [CLK_CONNSYS_I2S_ASRC] = "aud_connsys_i2s_asrc",
+ [CLK_GENERAL1_ASRC] = "aud_general1_asrc",
+ [CLK_GENERAL2_ASRC] = "aud_general2_asrc",
+ [CLK_ADC_HIRES_TML] = "aud_adc_hires_tml",
+ [CLK_ADDA6_ADC] = "aud_adda6_adc",
+ [CLK_ADDA6_ADC_HIRES] = "aud_adda6_adc_hires",
+ [CLK_3RD_DAC] = "aud_3rd_dac",
+ [CLK_3RD_DAC_PREDIS] = "aud_3rd_dac_predis",
+ [CLK_3RD_DAC_TML] = "aud_3rd_dac_tml",
+ [CLK_3RD_DAC_HIRES] = "aud_3rd_dac_hires",
+ [CLK_ETDM_IN1_BCLK] = "aud_etdm_in1_bclk",
+ [CLK_ETDM_OUT1_BCLK] = "aud_etdm_out1_bclk",
+ [CLK_INFRA_SYS_AUDIO] = "aud_infra_clk",
+ [CLK_INFRA_AUDIO_26M] = "mtkaif_26m_clk",
+ [CLK_MUX_AUDIO] = "top_mux_audio",
+ [CLK_MUX_AUDIOINTBUS] = "top_mux_audio_int",
+ [CLK_TOP_MAINPLL_D2_D4] = "top_mainpll_d2_d4",
+ [CLK_TOP_MUX_AUD_1] = "top_mux_aud_1",
+ [CLK_TOP_APLL1_CK] = "top_apll1_ck",
+ [CLK_TOP_MUX_AUD_2] = "top_mux_aud_2",
+ [CLK_TOP_APLL2_CK] = "top_apll2_ck",
+ [CLK_TOP_MUX_AUD_ENG1] = "top_mux_aud_eng1",
+ [CLK_TOP_APLL1_D8] = "top_apll1_d8",
+ [CLK_TOP_MUX_AUD_ENG2] = "top_mux_aud_eng2",
+ [CLK_TOP_APLL2_D8] = "top_apll2_d8",
+ [CLK_TOP_MUX_AUDIO_H] = "top_mux_audio_h",
+ [CLK_TOP_I2S0_M_SEL] = "top_i2s0_m_sel",
+ [CLK_TOP_I2S1_M_SEL] = "top_i2s1_m_sel",
+ [CLK_TOP_I2S2_M_SEL] = "top_i2s2_m_sel",
+ [CLK_TOP_I2S4_M_SEL] = "top_i2s4_m_sel",
+ [CLK_TOP_TDM_M_SEL] = "top_tdm_m_sel",
+ [CLK_TOP_APLL12_DIV0] = "top_apll12_div0",
+ [CLK_TOP_APLL12_DIV1] = "top_apll12_div1",
+ [CLK_TOP_APLL12_DIV2] = "top_apll12_div2",
+ [CLK_TOP_APLL12_DIV4] = "top_apll12_div4",
+ [CLK_TOP_APLL12_DIV_TDM] = "top_apll12_div_tdm",
+ [CLK_CLK26M] = "top_clk26m_clk",
+};
+
+int mt8186_set_audio_int_bus_parent(struct mtk_base_afe *afe,
+ int clk_id)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ ret = clk_set_parent(afe_priv->clk[CLK_MUX_AUDIOINTBUS],
+ afe_priv->clk[clk_id]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_MUX_AUDIOINTBUS],
+ aud_clks[clk_id], ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int apll1_mux_setting(struct mtk_base_afe *afe, bool enable)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ if (enable) {
+ ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_1]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_1], ret);
+ return ret;
+ }
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_1],
+ afe_priv->clk[CLK_TOP_APLL1_CK]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_1],
+ aud_clks[CLK_TOP_APLL1_CK], ret);
+ return ret;
+ }
+
+ /* 180.6336 / 8 = 22.5792MHz */
+ ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG1], ret);
+ return ret;
+ }
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1],
+ afe_priv->clk[CLK_TOP_APLL1_D8]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG1],
+ aud_clks[CLK_TOP_APLL1_D8], ret);
+ return ret;
+ }
+ } else {
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1],
+ afe_priv->clk[CLK_CLK26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG1],
+ aud_clks[CLK_CLK26M], ret);
+ return ret;
+ }
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_ENG1]);
+
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_1],
+ afe_priv->clk[CLK_CLK26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_1],
+ aud_clks[CLK_CLK26M], ret);
+ return ret;
+ }
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_1]);
+ }
+
+ return 0;
+}
+
+static int apll2_mux_setting(struct mtk_base_afe *afe, bool enable)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ if (enable) {
+ ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_2]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_2], ret);
+ return ret;
+ }
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_2],
+ afe_priv->clk[CLK_TOP_APLL2_CK]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_2],
+ aud_clks[CLK_TOP_APLL2_CK], ret);
+ return ret;
+ }
+
+ /* 196.608 / 8 = 24.576MHz */
+ ret = clk_prepare_enable(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG2], ret);
+ return ret;
+ }
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2],
+ afe_priv->clk[CLK_TOP_APLL2_D8]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG2],
+ aud_clks[CLK_TOP_APLL2_D8], ret);
+ return ret;
+ }
+ } else {
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2],
+ afe_priv->clk[CLK_CLK26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_ENG2],
+ aud_clks[CLK_CLK26M], ret);
+ return ret;
+ }
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_ENG2]);
+
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUD_2],
+ afe_priv->clk[CLK_CLK26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUD_2],
+ aud_clks[CLK_CLK26M], ret);
+ return ret;
+ }
+ clk_disable_unprepare(afe_priv->clk[CLK_TOP_MUX_AUD_2]);
+ }
+
+ return 0;
+}
+
+int mt8186_afe_enable_cgs(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret = 0;
+ int i;
+
+ for (i = CLK_I2S1_BCLK; i <= CLK_ETDM_OUT1_BCLK; i++) {
+ ret = clk_prepare_enable(afe_priv->clk[i]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void mt8186_afe_disable_cgs(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int i;
+
+ for (i = CLK_I2S1_BCLK; i <= CLK_ETDM_OUT1_BCLK; i++)
+ clk_disable_unprepare(afe_priv->clk[i]);
+}
+
+int mt8186_afe_enable_clock(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret = 0;
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_INFRA_SYS_AUDIO]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_INFRA_SYS_AUDIO], ret);
+ goto clk_infra_sys_audio_err;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_INFRA_AUDIO_26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_INFRA_AUDIO_26M], ret);
+ goto clk_infra_audio_26m_err;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_MUX_AUDIO]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_MUX_AUDIO], ret);
+ goto clk_mux_audio_err;
+ }
+ ret = clk_set_parent(afe_priv->clk[CLK_MUX_AUDIO],
+ afe_priv->clk[CLK_CLK26M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_MUX_AUDIO],
+ aud_clks[CLK_CLK26M], ret);
+ goto clk_mux_audio_err;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_MUX_AUDIOINTBUS], ret);
+ goto clk_mux_audio_intbus_err;
+ }
+ ret = mt8186_set_audio_int_bus_parent(afe,
+ CLK_TOP_MAINPLL_D2_D4);
+ if (ret)
+ goto clk_mux_audio_intbus_parent_err;
+
+ ret = clk_set_parent(afe_priv->clk[CLK_TOP_MUX_AUDIO_H],
+ afe_priv->clk[CLK_TOP_APLL2_CK]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[CLK_TOP_MUX_AUDIO_H],
+ aud_clks[CLK_TOP_APLL2_CK], ret);
+ goto clk_mux_audio_h_parent_err;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_AFE]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_AFE], ret);
+ goto clk_afe_err;
+ }
+
+ return 0;
+
+clk_afe_err:
+ clk_disable_unprepare(afe_priv->clk[CLK_AFE]);
+clk_mux_audio_h_parent_err:
+clk_mux_audio_intbus_parent_err:
+ mt8186_set_audio_int_bus_parent(afe, CLK_CLK26M);
+clk_mux_audio_intbus_err:
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+clk_mux_audio_err:
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIO]);
+clk_infra_sys_audio_err:
+ clk_disable_unprepare(afe_priv->clk[CLK_INFRA_SYS_AUDIO]);
+clk_infra_audio_26m_err:
+ clk_disable_unprepare(afe_priv->clk[CLK_INFRA_AUDIO_26M]);
+
+ return ret;
+}
+
+void mt8186_afe_disable_clock(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ clk_disable_unprepare(afe_priv->clk[CLK_AFE]);
+ mt8186_set_audio_int_bus_parent(afe, CLK_CLK26M);
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIO]);
+ clk_disable_unprepare(afe_priv->clk[CLK_INFRA_AUDIO_26M]);
+ clk_disable_unprepare(afe_priv->clk[CLK_INFRA_SYS_AUDIO]);
+}
+
+int mt8186_afe_suspend_clock(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ /* set audio int bus to 26M */
+ ret = clk_prepare_enable(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+ if (ret) {
+ dev_info(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_MUX_AUDIOINTBUS], ret);
+ goto clk_mux_audio_intbus_err;
+ }
+ ret = mt8186_set_audio_int_bus_parent(afe, CLK_CLK26M);
+ if (ret)
+ goto clk_mux_audio_intbus_parent_err;
+
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+
+ return 0;
+
+clk_mux_audio_intbus_parent_err:
+ mt8186_set_audio_int_bus_parent(afe, CLK_TOP_MAINPLL_D2_D4);
+clk_mux_audio_intbus_err:
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+ return ret;
+}
+
+int mt8186_afe_resume_clock(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ /* set audio int bus to normal working clock */
+ ret = clk_prepare_enable(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+ if (ret) {
+ dev_info(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_MUX_AUDIOINTBUS], ret);
+ goto clk_mux_audio_intbus_err;
+ }
+ ret = mt8186_set_audio_int_bus_parent(afe,
+ CLK_TOP_MAINPLL_D2_D4);
+ if (ret)
+ goto clk_mux_audio_intbus_parent_err;
+
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+
+ return 0;
+
+clk_mux_audio_intbus_parent_err:
+ mt8186_set_audio_int_bus_parent(afe, CLK_CLK26M);
+clk_mux_audio_intbus_err:
+ clk_disable_unprepare(afe_priv->clk[CLK_MUX_AUDIOINTBUS]);
+ return ret;
+}
+
+int mt8186_apll1_enable(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ /* setting for APLL */
+ apll1_mux_setting(afe, true);
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_APLL22M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_APLL22M], ret);
+ goto err_clk_apll22m;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_APLL1_TUNER]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_APLL1_TUNER], ret);
+ goto err_clk_apll1_tuner;
+ }
+
+ regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG, 0xfff7, 0x832);
+ regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG, 0x1, 0x1);
+
+ regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+ AFE_22M_ON_MASK_SFT, BIT(AFE_22M_ON_SFT));
+
+ return 0;
+
+err_clk_apll1_tuner:
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL1_TUNER]);
+err_clk_apll22m:
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL22M]);
+
+ return ret;
+}
+
+void mt8186_apll1_disable(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+ AFE_22M_ON_MASK_SFT, 0);
+
+ regmap_update_bits(afe->regmap, AFE_APLL1_TUNER_CFG, 0x1, 0);
+
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL1_TUNER]);
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL22M]);
+
+ apll1_mux_setting(afe, false);
+}
+
+int mt8186_apll2_enable(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ /* setting for APLL */
+ apll2_mux_setting(afe, true);
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_APLL24M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_APLL24M], ret);
+ goto err_clk_apll24m;
+ }
+
+ ret = clk_prepare_enable(afe_priv->clk[CLK_APLL2_TUNER]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[CLK_APLL2_TUNER], ret);
+ goto err_clk_apll2_tuner;
+ }
+
+ regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG, 0xfff7, 0x634);
+ regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG, 0x1, 0x1);
+
+ regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+ AFE_24M_ON_MASK_SFT, BIT(AFE_24M_ON_SFT));
+
+ return 0;
+
+err_clk_apll2_tuner:
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL2_TUNER]);
+err_clk_apll24m:
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL24M]);
+
+ return ret;
+}
+
+void mt8186_apll2_disable(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ regmap_update_bits(afe->regmap, AFE_HD_ENGEN_ENABLE,
+ AFE_24M_ON_MASK_SFT, 0);
+
+ regmap_update_bits(afe->regmap, AFE_APLL2_TUNER_CFG, 0x1, 0);
+
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL2_TUNER]);
+ clk_disable_unprepare(afe_priv->clk[CLK_APLL24M]);
+
+ apll2_mux_setting(afe, false);
+}
+
+int mt8186_get_apll_rate(struct mtk_base_afe *afe, int apll)
+{
+ return (apll == MT8186_APLL1) ? 180633600 : 196608000;
+}
+
+int mt8186_get_apll_by_rate(struct mtk_base_afe *afe, int rate)
+{
+ return ((rate % 8000) == 0) ? MT8186_APLL2 : MT8186_APLL1;
+}
+
+int mt8186_get_apll_by_name(struct mtk_base_afe *afe, const char *name)
+{
+ if (strcmp(name, APLL1_W_NAME) == 0)
+ return MT8186_APLL1;
+
+ return MT8186_APLL2;
+}
+
+/* mck */
+struct mt8186_mck_div {
+ u32 m_sel_id;
+ u32 div_clk_id;
+};
+
+static const struct mt8186_mck_div mck_div[MT8186_MCK_NUM] = {
+ [MT8186_I2S0_MCK] = {
+ .m_sel_id = CLK_TOP_I2S0_M_SEL,
+ .div_clk_id = CLK_TOP_APLL12_DIV0,
+ },
+ [MT8186_I2S1_MCK] = {
+ .m_sel_id = CLK_TOP_I2S1_M_SEL,
+ .div_clk_id = CLK_TOP_APLL12_DIV1,
+ },
+ [MT8186_I2S2_MCK] = {
+ .m_sel_id = CLK_TOP_I2S2_M_SEL,
+ .div_clk_id = CLK_TOP_APLL12_DIV2,
+ },
+ [MT8186_I2S4_MCK] = {
+ .m_sel_id = CLK_TOP_I2S4_M_SEL,
+ .div_clk_id = CLK_TOP_APLL12_DIV4,
+ },
+ [MT8186_TDM_MCK] = {
+ .m_sel_id = CLK_TOP_TDM_M_SEL,
+ .div_clk_id = CLK_TOP_APLL12_DIV_TDM,
+ },
+};
+
+int mt8186_mck_enable(struct mtk_base_afe *afe, int mck_id, int rate)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int apll = mt8186_get_apll_by_rate(afe, rate);
+ int apll_clk_id = apll == MT8186_APLL1 ?
+ CLK_TOP_MUX_AUD_1 : CLK_TOP_MUX_AUD_2;
+ int m_sel_id = mck_div[mck_id].m_sel_id;
+ int div_clk_id = mck_div[mck_id].div_clk_id;
+ int ret;
+
+ /* select apll */
+ if (m_sel_id >= 0) {
+ ret = clk_prepare_enable(afe_priv->clk[m_sel_id]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[m_sel_id], ret);
+ return ret;
+ }
+ ret = clk_set_parent(afe_priv->clk[m_sel_id],
+ afe_priv->clk[apll_clk_id]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[m_sel_id],
+ aud_clks[apll_clk_id], ret);
+ return ret;
+ }
+ }
+
+ /* enable div, set rate */
+ ret = clk_prepare_enable(afe_priv->clk[div_clk_id]);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[div_clk_id], ret);
+ return ret;
+ }
+ ret = clk_set_rate(afe_priv->clk[div_clk_id], rate);
+ if (ret) {
+ dev_err(afe->dev, "%s(), clk_set_rate %s, rate %d, fail %d\n",
+ __func__, aud_clks[div_clk_id], rate, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void mt8186_mck_disable(struct mtk_base_afe *afe, int mck_id)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int m_sel_id = mck_div[mck_id].m_sel_id;
+ int div_clk_id = mck_div[mck_id].div_clk_id;
+
+ clk_disable_unprepare(afe_priv->clk[div_clk_id]);
+ if (m_sel_id >= 0)
+ clk_disable_unprepare(afe_priv->clk[m_sel_id]);
+}
+
+int mt8186_init_clock(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct device_node *of_node = afe->dev->of_node;
+ int i = 0;
+
+ mt8186_audsys_clk_register(afe);
+
+ afe_priv->clk = devm_kcalloc(afe->dev, CLK_NUM, sizeof(*afe_priv->clk),
+ GFP_KERNEL);
+ if (!afe_priv->clk)
+ return -ENOMEM;
+
+ for (i = 0; i < CLK_NUM; i++) {
+ afe_priv->clk[i] = devm_clk_get(afe->dev, aud_clks[i]);
+ if (IS_ERR(afe_priv->clk[i])) {
+ dev_err(afe->dev, "%s devm_clk_get %s fail, ret %ld\n",
+ __func__,
+ aud_clks[i], PTR_ERR(afe_priv->clk[i]));
+ afe_priv->clk[i] = NULL;
+ }
+ }
+
+ afe_priv->apmixedsys = syscon_regmap_lookup_by_phandle(of_node,
+ "mediatek,apmixedsys");
+ if (IS_ERR(afe_priv->apmixedsys)) {
+ dev_err(afe->dev, "%s() Cannot find apmixedsys controller: %ld\n",
+ __func__, PTR_ERR(afe_priv->apmixedsys));
+ return PTR_ERR(afe_priv->apmixedsys);
+ }
+
+ afe_priv->topckgen = syscon_regmap_lookup_by_phandle(of_node,
+ "mediatek,topckgen");
+ if (IS_ERR(afe_priv->topckgen)) {
+ dev_err(afe->dev, "%s() Cannot find topckgen controller: %ld\n",
+ __func__, PTR_ERR(afe_priv->topckgen));
+ return PTR_ERR(afe_priv->topckgen);
+ }
+
+ afe_priv->infracfg = syscon_regmap_lookup_by_phandle(of_node,
+ "mediatek,infracfg");
+ if (IS_ERR(afe_priv->infracfg)) {
+ dev_err(afe->dev, "%s() Cannot find infracfg: %ld\n",
+ __func__, PTR_ERR(afe_priv->infracfg));
+ return PTR_ERR(afe_priv->infracfg);
+ }
+
+ return 0;
+}
+
+void mt8186_deinit_clock(void *priv)
+{
+ struct mtk_base_afe *afe = priv;
+ mt8186_audsys_clk_unregister(afe);
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-clk.h b/sound/soc/mediatek/mt8186/mt8186-afe-clk.h
new file mode 100644
index 000000000000..d5988717d8f2
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-afe-clk.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mt8186-afe-clk.h -- Mediatek 8186 afe clock ctrl definition
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+ */
+
+#ifndef _MT8186_AFE_CLOCK_CTRL_H_
+#define _MT8186_AFE_CLOCK_CTRL_H_
+
+#define PERI_BUS_DCM_CTRL 0x74
+
+/* APLL */
+#define APLL1_W_NAME "APLL1"
+#define APLL2_W_NAME "APLL2"
+enum {
+ MT8186_APLL1 = 0,
+ MT8186_APLL2,
+};
+
+enum {
+ CLK_AFE = 0,
+ CLK_DAC,
+ CLK_DAC_PREDIS,
+ CLK_ADC,
+ CLK_TML,
+ CLK_APLL22M,
+ CLK_APLL24M,
+ CLK_APLL1_TUNER,
+ CLK_APLL2_TUNER,
+ CLK_TDM,
+ CLK_NLE,
+ CLK_DAC_HIRES,
+ CLK_ADC_HIRES,
+ CLK_I2S1_BCLK,
+ CLK_I2S2_BCLK,
+ CLK_I2S3_BCLK,
+ CLK_I2S4_BCLK,
+ CLK_CONNSYS_I2S_ASRC,
+ CLK_GENERAL1_ASRC,
+ CLK_GENERAL2_ASRC,
+ CLK_ADC_HIRES_TML,
+ CLK_ADDA6_ADC,
+ CLK_ADDA6_ADC_HIRES,
+ CLK_3RD_DAC,
+ CLK_3RD_DAC_PREDIS,
+ CLK_3RD_DAC_TML,
+ CLK_3RD_DAC_HIRES,
+ CLK_ETDM_IN1_BCLK,
+ CLK_ETDM_OUT1_BCLK,
+ CLK_INFRA_SYS_AUDIO,
+ CLK_INFRA_AUDIO_26M,
+ CLK_MUX_AUDIO,
+ CLK_MUX_AUDIOINTBUS,
+ CLK_TOP_MAINPLL_D2_D4,
+ /* apll related mux */
+ CLK_TOP_MUX_AUD_1,
+ CLK_TOP_APLL1_CK,
+ CLK_TOP_MUX_AUD_2,
+ CLK_TOP_APLL2_CK,
+ CLK_TOP_MUX_AUD_ENG1,
+ CLK_TOP_APLL1_D8,
+ CLK_TOP_MUX_AUD_ENG2,
+ CLK_TOP_APLL2_D8,
+ CLK_TOP_MUX_AUDIO_H,
+ CLK_TOP_I2S0_M_SEL,
+ CLK_TOP_I2S1_M_SEL,
+ CLK_TOP_I2S2_M_SEL,
+ CLK_TOP_I2S4_M_SEL,
+ CLK_TOP_TDM_M_SEL,
+ CLK_TOP_APLL12_DIV0,
+ CLK_TOP_APLL12_DIV1,
+ CLK_TOP_APLL12_DIV2,
+ CLK_TOP_APLL12_DIV4,
+ CLK_TOP_APLL12_DIV_TDM,
+ CLK_CLK26M,
+ CLK_NUM
+};
+
+struct mtk_base_afe;
+int mt8186_set_audio_int_bus_parent(struct mtk_base_afe *afe, int clk_id);
+int mt8186_init_clock(struct mtk_base_afe *afe);
+void mt8186_deinit_clock(void *priv);
+int mt8186_afe_enable_cgs(struct mtk_base_afe *afe);
+void mt8186_afe_disable_cgs(struct mtk_base_afe *afe);
+int mt8186_afe_enable_clock(struct mtk_base_afe *afe);
+void mt8186_afe_disable_clock(struct mtk_base_afe *afe);
+int mt8186_afe_suspend_clock(struct mtk_base_afe *afe);
+int mt8186_afe_resume_clock(struct mtk_base_afe *afe);
+
+int mt8186_apll1_enable(struct mtk_base_afe *afe);
+void mt8186_apll1_disable(struct mtk_base_afe *afe);
+
+int mt8186_apll2_enable(struct mtk_base_afe *afe);
+void mt8186_apll2_disable(struct mtk_base_afe *afe);
+
+int mt8186_get_apll_rate(struct mtk_base_afe *afe, int apll);
+int mt8186_get_apll_by_rate(struct mtk_base_afe *afe, int rate);
+int mt8186_get_apll_by_name(struct mtk_base_afe *afe, const char *name);
+
+/* these will be replaced by using CCF */
+int mt8186_mck_enable(struct mtk_base_afe *afe, int mck_id, int rate);
+void mt8186_mck_disable(struct mtk_base_afe *afe, int mck_id);
+
+#endif
diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-common.h b/sound/soc/mediatek/mt8186/mt8186-afe-common.h
new file mode 100644
index 000000000000..b8f03e1b7e49
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-afe-common.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mt8186-afe-common.h -- Mediatek 8186 audio driver definitions
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+ */
+
+#ifndef _MT_8186_AFE_COMMON_H_
+#define _MT_8186_AFE_COMMON_H_
+#include <sound/soc.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+#include "mt8186-reg.h"
+#include "../common/mtk-base-afe.h"
+
+enum {
+ MT8186_MEMIF_DL1,
+ MT8186_MEMIF_DL12,
+ MT8186_MEMIF_DL2,
+ MT8186_MEMIF_DL3,
+ MT8186_MEMIF_DL4,
+ MT8186_MEMIF_DL5,
+ MT8186_MEMIF_DL6,
+ MT8186_MEMIF_DL7,
+ MT8186_MEMIF_DL8,
+ MT8186_MEMIF_VUL12,
+ MT8186_MEMIF_VUL2,
+ MT8186_MEMIF_VUL3,
+ MT8186_MEMIF_VUL4,
+ MT8186_MEMIF_VUL5,
+ MT8186_MEMIF_VUL6,
+ MT8186_MEMIF_AWB,
+ MT8186_MEMIF_AWB2,
+ MT8186_MEMIF_NUM,
+ MT8186_DAI_ADDA = MT8186_MEMIF_NUM,
+ MT8186_DAI_AP_DMIC,
+ MT8186_DAI_CONNSYS_I2S,
+ MT8186_DAI_I2S_0,
+ MT8186_DAI_I2S_1,
+ MT8186_DAI_I2S_2,
+ MT8186_DAI_I2S_3,
+ MT8186_DAI_HW_GAIN_1,
+ MT8186_DAI_HW_GAIN_2,
+ MT8186_DAI_SRC_1,
+ MT8186_DAI_SRC_2,
+ MT8186_DAI_PCM,
+ MT8186_DAI_TDM_IN,
+ MT8186_DAI_HOSTLESS_LPBK,
+ MT8186_DAI_HOSTLESS_FM,
+ MT8186_DAI_HOSTLESS_HW_GAIN_AAUDIO,
+ MT8186_DAI_HOSTLESS_SRC_AAUDIO,
+ MT8186_DAI_HOSTLESS_SRC_1,
+ MT8186_DAI_HOSTLESS_SRC_BARGEIN,
+ MT8186_DAI_HOSTLESS_UL1,
+ MT8186_DAI_HOSTLESS_UL2,
+ MT8186_DAI_HOSTLESS_UL3,
+ MT8186_DAI_HOSTLESS_UL5,
+ MT8186_DAI_HOSTLESS_UL6,
+ MT8186_DAI_NUM,
+};
+
+#define MT8186_RECORD_MEMIF MT8186_MEMIF_VUL12
+#define MT8186_ECHO_REF_MEMIF MT8186_MEMIF_AWB
+#define MT8186_PRIMARY_MEMIF MT8186_MEMIF_DL1
+#define MT8186_FAST_MEMIF MT8186_MEMIF_DL2
+#define MT8186_DEEP_MEMIF MT8186_MEMIF_DL3
+#define MT8186_VOIP_MEMIF MT8186_MEMIF_DL12
+#define MT8186_MMAP_DL_MEMIF MT8186_MEMIF_DL5
+#define MT8186_MMAP_UL_MEMIF MT8186_MEMIF_VUL5
+#define MT8186_BARGEIN_MEMIF MT8186_MEMIF_AWB
+
+enum {
+ MT8186_IRQ_0,
+ MT8186_IRQ_1,
+ MT8186_IRQ_2,
+ MT8186_IRQ_3,
+ MT8186_IRQ_4,
+ MT8186_IRQ_5,
+ MT8186_IRQ_6,
+ MT8186_IRQ_7,
+ MT8186_IRQ_8,
+ MT8186_IRQ_9,
+ MT8186_IRQ_10,
+ MT8186_IRQ_11,
+ MT8186_IRQ_12,
+ MT8186_IRQ_13,
+ MT8186_IRQ_14,
+ MT8186_IRQ_15,
+ MT8186_IRQ_16,
+ MT8186_IRQ_17,
+ MT8186_IRQ_18,
+ MT8186_IRQ_19,
+ MT8186_IRQ_20,
+ MT8186_IRQ_21,
+ MT8186_IRQ_22,
+ MT8186_IRQ_23,
+ MT8186_IRQ_24,
+ MT8186_IRQ_25,
+ MT8186_IRQ_26,
+ MT8186_IRQ_NUM,
+};
+
+enum {
+ MT8186_AFE_IRQ_DIR_MCU = 0,
+ MT8186_AFE_IRQ_DIR_DSP,
+ MT8186_AFE_IRQ_DIR_BOTH,
+};
+
+enum {
+ MTKAIF_PROTOCOL_1 = 0,
+ MTKAIF_PROTOCOL_2,
+ MTKAIF_PROTOCOL_2_CLK_P2,
+};
+
+enum {
+ MTK_AFE_ADDA_DL_GAIN_MUTE = 0,
+ MTK_AFE_ADDA_DL_GAIN_NORMAL = 0xf74f,
+ /* SA suggest apply -0.3db to audio/speech path */
+};
+
+#define MTK_SPK_I2S_0_STR "MTK_SPK_I2S_0"
+#define MTK_SPK_I2S_1_STR "MTK_SPK_I2S_1"
+#define MTK_SPK_I2S_2_STR "MTK_SPK_I2S_2"
+#define MTK_SPK_I2S_3_STR "MTK_SPK_I2S_3"
+
+/* MCLK */
+enum {
+ MT8186_I2S0_MCK = 0,
+ MT8186_I2S1_MCK,
+ MT8186_I2S2_MCK,
+ MT8186_I2S4_MCK,
+ MT8186_TDM_MCK,
+ MT8186_MCK_NUM,
+};
+
+struct snd_pcm_substream;
+struct mtk_base_irq_data;
+struct clk;
+
+struct mt8186_afe_private {
+ struct clk **clk;
+ struct clk_lookup **lookup;
+ struct regmap *topckgen;
+ struct regmap *apmixedsys;
+ struct regmap *infracfg;
+ int irq_cnt[MT8186_MEMIF_NUM];
+ int stf_positive_gain_db;
+ int pm_runtime_bypass_reg_ctl;
+ int sgen_mode;
+ int sgen_rate;
+ int sgen_amplitude;
+
+ /* xrun assert */
+ int xrun_assert[MT8186_MEMIF_NUM];
+
+ /* dai */
+ bool dai_on[MT8186_DAI_NUM];
+ void *dai_priv[MT8186_DAI_NUM];
+
+ /* adda */
+ bool mtkaif_calibration_ok;
+ int mtkaif_protocol;
+ int mtkaif_chosen_phase[4];
+ int mtkaif_phase_cycle[4];
+ int mtkaif_calibration_num_phase;
+ int mtkaif_dmic;
+ int mtkaif_looback0;
+ int mtkaif_looback1;
+
+ /* mck */
+ int mck_rate[MT8186_MCK_NUM];
+};
+
+int mt8186_dai_adda_register(struct mtk_base_afe *afe);
+int mt8186_dai_i2s_register(struct mtk_base_afe *afe);
+int mt8186_dai_tdm_register(struct mtk_base_afe *afe);
+int mt8186_dai_hw_gain_register(struct mtk_base_afe *afe);
+int mt8186_dai_src_register(struct mtk_base_afe *afe);
+int mt8186_dai_pcm_register(struct mtk_base_afe *afe);
+int mt8186_dai_hostless_register(struct mtk_base_afe *afe);
+
+int mt8186_add_misc_control(struct snd_soc_component *component);
+
+unsigned int mt8186_general_rate_transform(struct device *dev,
+ unsigned int rate);
+unsigned int mt8186_rate_transform(struct device *dev,
+ unsigned int rate, int aud_blk);
+unsigned int mt8186_tdm_relatch_rate_transform(struct device *dev,
+ unsigned int rate);
+
+int mt8186_dai_set_priv(struct mtk_base_afe *afe, int id,
+ int priv_size, const void *priv_data);
+
+#endif
diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-control.c b/sound/soc/mediatek/mt8186/mt8186-afe-control.c
new file mode 100644
index 000000000000..d714e9641571
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-afe-control.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio Control
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include "mt8186-afe-common.h"
+#include <linux/pm_runtime.h>
+
+enum {
+ MTK_AFE_RATE_8K = 0,
+ MTK_AFE_RATE_11K,
+ MTK_AFE_RATE_12K,
+ MTK_AFE_RATE_384K,
+ MTK_AFE_RATE_16K,
+ MTK_AFE_RATE_22K,
+ MTK_AFE_RATE_24K,
+ MTK_AFE_RATE_352K,
+ MTK_AFE_RATE_32K,
+ MTK_AFE_RATE_44K,
+ MTK_AFE_RATE_48K,
+ MTK_AFE_RATE_88K,
+ MTK_AFE_RATE_96K,
+ MTK_AFE_RATE_176K,
+ MTK_AFE_RATE_192K,
+ MTK_AFE_RATE_260K,
+};
+
+enum {
+ MTK_AFE_PCM_RATE_8K = 0,
+ MTK_AFE_PCM_RATE_16K,
+ MTK_AFE_PCM_RATE_32K,
+ MTK_AFE_PCM_RATE_48K,
+};
+
+enum {
+ MTK_AFE_TDM_RATE_8K = 0,
+ MTK_AFE_TDM_RATE_12K,
+ MTK_AFE_TDM_RATE_16K,
+ MTK_AFE_TDM_RATE_24K,
+ MTK_AFE_TDM_RATE_32K,
+ MTK_AFE_TDM_RATE_48K,
+ MTK_AFE_TDM_RATE_64K,
+ MTK_AFE_TDM_RATE_96K,
+ MTK_AFE_TDM_RATE_128K,
+ MTK_AFE_TDM_RATE_192K,
+ MTK_AFE_TDM_RATE_256K,
+ MTK_AFE_TDM_RATE_384K,
+ MTK_AFE_TDM_RATE_11K,
+ MTK_AFE_TDM_RATE_22K,
+ MTK_AFE_TDM_RATE_44K,
+ MTK_AFE_TDM_RATE_88K,
+ MTK_AFE_TDM_RATE_176K,
+ MTK_AFE_TDM_RATE_352K,
+};
+
+enum {
+ MTK_AFE_TDM_RELATCH_RATE_8K = 0,
+ MTK_AFE_TDM_RELATCH_RATE_11K,
+ MTK_AFE_TDM_RELATCH_RATE_12K,
+ MTK_AFE_TDM_RELATCH_RATE_16K,
+ MTK_AFE_TDM_RELATCH_RATE_22K,
+ MTK_AFE_TDM_RELATCH_RATE_24K,
+ MTK_AFE_TDM_RELATCH_RATE_32K,
+ MTK_AFE_TDM_RELATCH_RATE_44K,
+ MTK_AFE_TDM_RELATCH_RATE_48K,
+ MTK_AFE_TDM_RELATCH_RATE_88K,
+ MTK_AFE_TDM_RELATCH_RATE_96K,
+ MTK_AFE_TDM_RELATCH_RATE_176K,
+ MTK_AFE_TDM_RELATCH_RATE_192K,
+ MTK_AFE_TDM_RELATCH_RATE_352K,
+ MTK_AFE_TDM_RELATCH_RATE_384K,
+};
+
+unsigned int mt8186_general_rate_transform(struct device *dev, unsigned int rate)
+{
+ switch (rate) {
+ case 8000:
+ return MTK_AFE_RATE_8K;
+ case 11025:
+ return MTK_AFE_RATE_11K;
+ case 12000:
+ return MTK_AFE_RATE_12K;
+ case 16000:
+ return MTK_AFE_RATE_16K;
+ case 22050:
+ return MTK_AFE_RATE_22K;
+ case 24000:
+ return MTK_AFE_RATE_24K;
+ case 32000:
+ return MTK_AFE_RATE_32K;
+ case 44100:
+ return MTK_AFE_RATE_44K;
+ case 48000:
+ return MTK_AFE_RATE_48K;
+ case 88200:
+ return MTK_AFE_RATE_88K;
+ case 96000:
+ return MTK_AFE_RATE_96K;
+ case 176400:
+ return MTK_AFE_RATE_176K;
+ case 192000:
+ return MTK_AFE_RATE_192K;
+ case 260000:
+ return MTK_AFE_RATE_260K;
+ case 352800:
+ return MTK_AFE_RATE_352K;
+ case 384000:
+ return MTK_AFE_RATE_384K;
+ default:
+ dev_err(dev, "%s(), rate %u invalid, use %d!!!\n",
+ __func__, rate, MTK_AFE_RATE_48K);
+ }
+
+ return MTK_AFE_RATE_48K;
+}
+
+static unsigned int tdm_rate_transform(struct device *dev, unsigned int rate)
+{
+ switch (rate) {
+ case 8000:
+ return MTK_AFE_TDM_RATE_8K;
+ case 11025:
+ return MTK_AFE_TDM_RATE_11K;
+ case 12000:
+ return MTK_AFE_TDM_RATE_12K;
+ case 16000:
+ return MTK_AFE_TDM_RATE_16K;
+ case 22050:
+ return MTK_AFE_TDM_RATE_22K;
+ case 24000:
+ return MTK_AFE_TDM_RATE_24K;
+ case 32000:
+ return MTK_AFE_TDM_RATE_32K;
+ case 44100:
+ return MTK_AFE_TDM_RATE_44K;
+ case 48000:
+ return MTK_AFE_TDM_RATE_48K;
+ case 64000:
+ return MTK_AFE_TDM_RATE_64K;
+ case 88200:
+ return MTK_AFE_TDM_RATE_88K;
+ case 96000:
+ return MTK_AFE_TDM_RATE_96K;
+ case 128000:
+ return MTK_AFE_TDM_RATE_128K;
+ case 176400:
+ return MTK_AFE_TDM_RATE_176K;
+ case 192000:
+ return MTK_AFE_TDM_RATE_192K;
+ case 256000:
+ return MTK_AFE_TDM_RATE_256K;
+ case 352800:
+ return MTK_AFE_TDM_RATE_352K;
+ case 384000:
+ return MTK_AFE_TDM_RATE_384K;
+ default:
+ dev_err(dev, "%s(), rate %u invalid, use %d!!!\n",
+ __func__, rate, MTK_AFE_TDM_RATE_48K);
+ }
+
+ return MTK_AFE_TDM_RATE_48K;
+}
+
+static unsigned int pcm_rate_transform(struct device *dev, unsigned int rate)
+{
+ switch (rate) {
+ case 8000:
+ return MTK_AFE_PCM_RATE_8K;
+ case 16000:
+ return MTK_AFE_PCM_RATE_16K;
+ case 32000:
+ return MTK_AFE_PCM_RATE_32K;
+ case 48000:
+ return MTK_AFE_PCM_RATE_48K;
+ default:
+ dev_err(dev, "%s(), rate %u invalid, use %d!!!\n",
+ __func__, rate, MTK_AFE_PCM_RATE_48K);
+ }
+
+ return MTK_AFE_PCM_RATE_48K;
+}
+
+unsigned int mt8186_tdm_relatch_rate_transform(struct device *dev, unsigned int rate)
+{
+ switch (rate) {
+ case 8000:
+ return MTK_AFE_TDM_RELATCH_RATE_8K;
+ case 11025:
+ return MTK_AFE_TDM_RELATCH_RATE_11K;
+ case 12000:
+ return MTK_AFE_TDM_RELATCH_RATE_12K;
+ case 16000:
+ return MTK_AFE_TDM_RELATCH_RATE_16K;
+ case 22050:
+ return MTK_AFE_TDM_RELATCH_RATE_22K;
+ case 24000:
+ return MTK_AFE_TDM_RELATCH_RATE_24K;
+ case 32000:
+ return MTK_AFE_TDM_RELATCH_RATE_32K;
+ case 44100:
+ return MTK_AFE_TDM_RELATCH_RATE_44K;
+ case 48000:
+ return MTK_AFE_TDM_RELATCH_RATE_48K;
+ case 88200:
+ return MTK_AFE_TDM_RELATCH_RATE_88K;
+ case 96000:
+ return MTK_AFE_TDM_RELATCH_RATE_96K;
+ case 176400:
+ return MTK_AFE_TDM_RELATCH_RATE_176K;
+ case 192000:
+ return MTK_AFE_TDM_RELATCH_RATE_192K;
+ case 352800:
+ return MTK_AFE_TDM_RELATCH_RATE_352K;
+ case 384000:
+ return MTK_AFE_TDM_RELATCH_RATE_384K;
+ default:
+ dev_err(dev, "%s(), rate %u invalid, use %d!!!\n",
+ __func__, rate, MTK_AFE_TDM_RELATCH_RATE_48K);
+ }
+
+ return MTK_AFE_TDM_RELATCH_RATE_48K;
+}
+
+unsigned int mt8186_rate_transform(struct device *dev, unsigned int rate, int aud_blk)
+{
+ switch (aud_blk) {
+ case MT8186_DAI_PCM:
+ return pcm_rate_transform(dev, rate);
+ case MT8186_DAI_TDM_IN:
+ return tdm_rate_transform(dev, rate);
+ default:
+ return mt8186_general_rate_transform(dev, rate);
+ }
+}
+
+int mt8186_dai_set_priv(struct mtk_base_afe *afe, int id, int priv_size, const void *priv_data)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ void *temp_data;
+
+ temp_data = devm_kzalloc(afe->dev,
+ priv_size,
+ GFP_KERNEL);
+ if (!temp_data)
+ return -ENOMEM;
+
+ if (priv_data)
+ memcpy(temp_data, priv_data, priv_size);
+
+ afe_priv->dai_priv[id] = temp_data;
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-gpio.c b/sound/soc/mediatek/mt8186/mt8186-afe-gpio.c
new file mode 100644
index 000000000000..274c0c8ec2f2
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-afe-gpio.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8186-afe-gpio.c -- Mediatek 8186 afe gpio ctrl
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "mt8186-afe-common.h"
+#include "mt8186-afe-gpio.h"
+
+struct pinctrl *aud_pinctrl;
+
+enum mt8186_afe_gpio {
+ MT8186_AFE_GPIO_CLK_MOSI_OFF,
+ MT8186_AFE_GPIO_CLK_MOSI_ON,
+ MT8186_AFE_GPIO_CLK_MISO_OFF,
+ MT8186_AFE_GPIO_CLK_MISO_ON,
+ MT8186_AFE_GPIO_DAT_MISO_OFF,
+ MT8186_AFE_GPIO_DAT_MISO_ON,
+ MT8186_AFE_GPIO_DAT_MOSI_OFF,
+ MT8186_AFE_GPIO_DAT_MOSI_ON,
+ MT8186_AFE_GPIO_I2S0_OFF,
+ MT8186_AFE_GPIO_I2S0_ON,
+ MT8186_AFE_GPIO_I2S1_OFF,
+ MT8186_AFE_GPIO_I2S1_ON,
+ MT8186_AFE_GPIO_I2S2_OFF,
+ MT8186_AFE_GPIO_I2S2_ON,
+ MT8186_AFE_GPIO_I2S3_OFF,
+ MT8186_AFE_GPIO_I2S3_ON,
+ MT8186_AFE_GPIO_TDM_OFF,
+ MT8186_AFE_GPIO_TDM_ON,
+ MT8186_AFE_GPIO_PCM_OFF,
+ MT8186_AFE_GPIO_PCM_ON,
+ MT8186_AFE_GPIO_GPIO_NUM
+};
+
+struct audio_gpio_attr {
+ const char *name;
+ bool gpio_prepare;
+ struct pinctrl_state *gpioctrl;
+};
+
+static struct audio_gpio_attr aud_gpios[MT8186_AFE_GPIO_GPIO_NUM] = {
+ [MT8186_AFE_GPIO_CLK_MOSI_OFF] = {"aud_clk_mosi_off", false, NULL},
+ [MT8186_AFE_GPIO_CLK_MOSI_ON] = {"aud_clk_mosi_on", false, NULL},
+ [MT8186_AFE_GPIO_CLK_MISO_OFF] = {"aud_clk_miso_off", false, NULL},
+ [MT8186_AFE_GPIO_CLK_MISO_ON] = {"aud_clk_miso_on", false, NULL},
+ [MT8186_AFE_GPIO_DAT_MISO_OFF] = {"aud_dat_miso_off", false, NULL},
+ [MT8186_AFE_GPIO_DAT_MISO_ON] = {"aud_dat_miso_on", false, NULL},
+ [MT8186_AFE_GPIO_DAT_MOSI_OFF] = {"aud_dat_mosi_off", false, NULL},
+ [MT8186_AFE_GPIO_DAT_MOSI_ON] = {"aud_dat_mosi_on", false, NULL},
+ [MT8186_AFE_GPIO_I2S0_OFF] = {"aud_gpio_i2s0_off", false, NULL},
+ [MT8186_AFE_GPIO_I2S0_ON] = {"aud_gpio_i2s0_on", false, NULL},
+ [MT8186_AFE_GPIO_I2S1_OFF] = {"aud_gpio_i2s1_off", false, NULL},
+ [MT8186_AFE_GPIO_I2S1_ON] = {"aud_gpio_i2s1_on", false, NULL},
+ [MT8186_AFE_GPIO_I2S2_OFF] = {"aud_gpio_i2s2_off", false, NULL},
+ [MT8186_AFE_GPIO_I2S2_ON] = {"aud_gpio_i2s2_on", false, NULL},
+ [MT8186_AFE_GPIO_I2S3_OFF] = {"aud_gpio_i2s3_off", false, NULL},
+ [MT8186_AFE_GPIO_I2S3_ON] = {"aud_gpio_i2s3_on", false, NULL},
+ [MT8186_AFE_GPIO_TDM_OFF] = {"aud_gpio_tdm_off", false, NULL},
+ [MT8186_AFE_GPIO_TDM_ON] = {"aud_gpio_tdm_on", false, NULL},
+ [MT8186_AFE_GPIO_PCM_OFF] = {"aud_gpio_pcm_off", false, NULL},
+ [MT8186_AFE_GPIO_PCM_ON] = {"aud_gpio_pcm_on", false, NULL},
+};
+
+static DEFINE_MUTEX(gpio_request_mutex);
+
+int mt8186_afe_gpio_init(struct device *dev)
+{
+ int i, j, ret;
+
+ aud_pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(aud_pinctrl)) {
+ ret = PTR_ERR(aud_pinctrl);
+ dev_err(dev, "%s(), ret %d, cannot get aud_pinctrl!\n",
+ __func__, ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(aud_gpios); i++) {
+ aud_gpios[i].gpioctrl = pinctrl_lookup_state(aud_pinctrl,
+ aud_gpios[i].name);
+ if (IS_ERR(aud_gpios[i].gpioctrl)) {
+ ret = PTR_ERR(aud_gpios[i].gpioctrl);
+ dev_info(dev, "%s(), pinctrl_lookup_state %s fail, ret %d\n",
+ __func__, aud_gpios[i].name, ret);
+ } else {
+ aud_gpios[i].gpio_prepare = true;
+ }
+ }
+
+ /* gpio status init */
+ for (i = MT8186_DAI_ADDA; i <= MT8186_DAI_TDM_IN; i++) {
+ for (j = 0; j <= 1; j++)
+ mt8186_afe_gpio_request(dev, false, i, j);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt8186_afe_gpio_init);
+
+static int mt8186_afe_gpio_select(struct device *dev,
+ enum mt8186_afe_gpio type)
+{
+ int ret = 0;
+
+ if (type < 0 || type >= MT8186_AFE_GPIO_GPIO_NUM) {
+ dev_err(dev, "%s(), error, invalid gpio type %d\n",
+ __func__, type);
+ return -EINVAL;
+ }
+
+ if (!aud_gpios[type].gpio_prepare) {
+ dev_err(dev, "%s(), error, gpio type %d not prepared\n",
+ __func__, type);
+ return -EIO;
+ }
+
+ ret = pinctrl_select_state(aud_pinctrl,
+ aud_gpios[type].gpioctrl);
+ if (ret) {
+ dev_err(dev, "%s(), error, can not set gpio type %d\n",
+ __func__, type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mt8186_afe_gpio_adda_dl(struct device *dev, bool enable)
+{
+ int ret;
+
+ if (enable) {
+ ret = mt8186_afe_gpio_select(dev, MT8186_AFE_GPIO_CLK_MOSI_ON);
+ if (ret) {
+ dev_err(dev, "%s(), MOSI CLK ON select fail!\n", __func__);
+ return ret;
+ }
+
+ ret = mt8186_afe_gpio_select(dev, MT8186_AFE_GPIO_DAT_MOSI_ON);
+ if (ret) {
+ dev_err(dev, "%s(), MOSI DAT ON select fail!\n", __func__);
+ return ret;
+ }
+ } else {
+ ret = mt8186_afe_gpio_select(dev, MT8186_AFE_GPIO_DAT_MOSI_OFF);
+ if (ret) {
+ dev_err(dev, "%s(), MOSI DAT OFF select fail!\n", __func__);
+ return ret;
+ }
+
+ ret = mt8186_afe_gpio_select(dev, MT8186_AFE_GPIO_CLK_MOSI_OFF);
+ if (ret) {
+ dev_err(dev, "%s(), MOSI CLK ON select fail!\n", __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mt8186_afe_gpio_adda_ul(struct device *dev, bool enable)
+{
+ int ret;
+
+ if (enable) {
+ ret = mt8186_afe_gpio_select(dev, MT8186_AFE_GPIO_CLK_MISO_ON);
+ if (ret) {
+ dev_err(dev, "%s(), MISO CLK ON slect fail!\n", __func__);
+ return ret;
+ }
+
+ ret = mt8186_afe_gpio_select(dev, MT8186_AFE_GPIO_DAT_MISO_ON);
+ if (ret) {
+ dev_err(dev, "%s(), MISO DAT ON slect fail!\n", __func__);
+ return ret;
+ }
+ } else {
+ ret = mt8186_afe_gpio_select(dev, MT8186_AFE_GPIO_DAT_MISO_OFF);
+ if (ret) {
+ dev_err(dev, "%s(), MISO DAT OFF slect fail!\n", __func__);
+ return ret;
+ }
+
+ ret = mt8186_afe_gpio_select(dev, MT8186_AFE_GPIO_CLK_MISO_OFF);
+ if (ret) {
+ dev_err(dev, "%s(), MISO CLK OFF slect fail!\n", __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int mt8186_afe_gpio_request(struct device *dev, bool enable,
+ int dai, int uplink)
+{
+ enum mt8186_afe_gpio sel;
+ int ret = -EINVAL;
+
+ mutex_lock(&gpio_request_mutex);
+
+ switch (dai) {
+ case MT8186_DAI_ADDA:
+ if (uplink)
+ ret = mt8186_afe_gpio_adda_ul(dev, enable);
+ else
+ ret = mt8186_afe_gpio_adda_dl(dev, enable);
+ goto unlock;
+ case MT8186_DAI_I2S_0:
+ sel = enable ? MT8186_AFE_GPIO_I2S0_ON : MT8186_AFE_GPIO_I2S0_OFF;
+ break;
+ case MT8186_DAI_I2S_1:
+ sel = enable ? MT8186_AFE_GPIO_I2S1_ON : MT8186_AFE_GPIO_I2S1_OFF;
+ break;
+ case MT8186_DAI_I2S_2:
+ sel = enable ? MT8186_AFE_GPIO_I2S2_ON : MT8186_AFE_GPIO_I2S2_OFF;
+ break;
+ case MT8186_DAI_I2S_3:
+ sel = enable ? MT8186_AFE_GPIO_I2S3_ON : MT8186_AFE_GPIO_I2S3_OFF;
+ break;
+ case MT8186_DAI_TDM_IN:
+ sel = enable ? MT8186_AFE_GPIO_TDM_ON : MT8186_AFE_GPIO_TDM_OFF;
+ break;
+ case MT8186_DAI_PCM:
+ sel = enable ? MT8186_AFE_GPIO_PCM_ON : MT8186_AFE_GPIO_PCM_OFF;
+ break;
+ default:
+ dev_err(dev, "%s(), invalid dai %d\n", __func__, dai);
+ goto unlock;
+ }
+
+ ret = mt8186_afe_gpio_select(dev, sel);
+
+unlock:
+ mutex_unlock(&gpio_request_mutex);
+
+ return ret;
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-gpio.h b/sound/soc/mediatek/mt8186/mt8186-afe-gpio.h
new file mode 100644
index 000000000000..1ddc27838eb1
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-afe-gpio.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mt6833-afe-gpio.h -- Mediatek 6833 afe gpio ctrl definition
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+ */
+
+#ifndef _MT8186_AFE_GPIO_H_
+#define _MT8186_AFE_GPIO_H_
+
+struct mtk_base_afe;
+
+int mt8186_afe_gpio_init(struct device *dev);
+
+int mt8186_afe_gpio_request(struct device *dev, bool enable,
+ int dai, int uplink);
+
+#endif
diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c b/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
new file mode 100644
index 000000000000..eb729ab00f5a
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
@@ -0,0 +1,3000 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Mediatek ALSA SoC AFE platform driver for 8186
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <sound/soc.h>
+
+#include "../common/mtk-afe-platform-driver.h"
+#include "../common/mtk-afe-fe-dai.h"
+
+#include "mt8186-afe-common.h"
+#include "mt8186-afe-clk.h"
+#include "mt8186-afe-gpio.h"
+#include "mt8186-interconnection.h"
+
+static const struct snd_pcm_hardware mt8186_afe_hardware = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .period_bytes_min = 96,
+ .period_bytes_max = 4 * 48 * 1024,
+ .periods_min = 2,
+ .periods_max = 256,
+ .buffer_bytes_max = 4 * 48 * 1024,
+ .fifo_size = 0,
+};
+
+static int mt8186_fe_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+ const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
+ int ret;
+
+ memif->substream = substream;
+
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
+
+ snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0) {
+ dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
+ return ret;
+ }
+
+ /* dynamic allocate irq to memif */
+ if (memif->irq_usage < 0) {
+ int irq_id = mtk_dynamic_irq_acquire(afe);
+
+ if (irq_id != afe->irqs_size) {
+ /* link */
+ memif->irq_usage = irq_id;
+ } else {
+ dev_err(afe->dev, "%s() error: no more asys irq\n",
+ __func__);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void mt8186_fe_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+ int irq_id = memif->irq_usage;
+
+ memif->substream = NULL;
+ afe_priv->irq_cnt[id] = 0;
+ afe_priv->xrun_assert[id] = 0;
+
+ if (!memif->const_irq) {
+ mtk_dynamic_irq_release(afe, irq_id);
+ memif->irq_usage = -1;
+ memif->substream = NULL;
+ }
+}
+
+static int mt8186_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+ int ret;
+
+ ret = mtk_afe_fe_hw_params(substream, params, dai);
+ if (ret)
+ return ret;
+
+ /* channel merge configuration, enable control is in UL5_IN_MUX */
+ if (id == MT8186_MEMIF_VUL3) {
+ int update_cnt = 8;
+ unsigned int val = 0;
+ unsigned int mask = 0;
+ int fs_mode = mt8186_rate_transform(afe->dev, rate, id);
+
+ /* set rate, channel, update cnt, disable sgen */
+ val = fs_mode << CM1_FS_SELECT_SFT |
+ (channels - 1) << CHANNEL_MERGE0_CHNUM_SFT |
+ update_cnt << CHANNEL_MERGE0_UPDATE_CNT_SFT;
+ mask = CM1_FS_SELECT_MASK_SFT |
+ CHANNEL_MERGE0_CHNUM_MASK_SFT |
+ CHANNEL_MERGE0_UPDATE_CNT_MASK_SFT;
+ regmap_update_bits(afe->regmap, AFE_CM1_CON, mask, val);
+ }
+
+ return 0;
+}
+
+static int mt8186_fe_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = mtk_afe_fe_hw_free(substream, dai);
+ if (ret) {
+ dev_err(afe->dev, "%s failed\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mt8186_fe_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime * const runtime = substream->runtime;
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+ int irq_id = memif->irq_usage;
+ struct mtk_base_afe_irq *irqs = &afe->irqs[irq_id];
+ const struct mtk_base_irq_data *irq_data = irqs->irq_data;
+ unsigned int rate = runtime->rate;
+ unsigned int counter;
+ int fs;
+ int ret;
+
+ dev_dbg(afe->dev, "%s(), %s cmd %d, irq_id %d\n",
+ __func__, memif->data->name, cmd, irq_id);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ ret = mtk_memif_set_enable(afe, id);
+ if (ret) {
+ dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
+ __func__, id, ret);
+ return ret;
+ }
+
+ /*
+ * for small latency record
+ * ul memif need read some data before irq enable
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+ ((runtime->period_size * 1000) / rate <= 10))
+ udelay(300);
+
+ /* set irq counter */
+ if (afe_priv->irq_cnt[id] > 0)
+ counter = afe_priv->irq_cnt[id];
+ else
+ counter = runtime->period_size;
+
+ regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
+ irq_data->irq_cnt_maskbit
+ << irq_data->irq_cnt_shift,
+ counter << irq_data->irq_cnt_shift);
+
+ /* set irq fs */
+ fs = afe->irq_fs(substream, runtime->rate);
+ if (fs < 0)
+ return -EINVAL;
+
+ regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
+ irq_data->irq_fs_maskbit
+ << irq_data->irq_fs_shift,
+ fs << irq_data->irq_fs_shift);
+
+ /* enable interrupt */
+ if (runtime->stop_threshold != ~(0U))
+ regmap_update_bits(afe->regmap,
+ irq_data->irq_en_reg,
+ 1 << irq_data->irq_en_shift,
+ 1 << irq_data->irq_en_shift);
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (afe_priv->xrun_assert[id] > 0) {
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ int avail = snd_pcm_capture_avail(runtime);
+ /* alsa can trigger stop/start when occur xrun */
+ if (avail >= runtime->buffer_size)
+ dev_dbg(afe->dev, "%s(), id %d, xrun assert\n",
+ __func__, id);
+ }
+ }
+
+ ret = mtk_memif_set_disable(afe, id);
+ if (ret)
+ dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
+ __func__, id, ret);
+
+ /* disable interrupt */
+ if (runtime->stop_threshold != ~(0U))
+ regmap_update_bits(afe->regmap,
+ irq_data->irq_en_reg,
+ 1 << irq_data->irq_en_shift,
+ 0 << irq_data->irq_en_shift);
+
+ /* clear pending IRQ */
+ regmap_write(afe->regmap, irq_data->irq_clr_reg,
+ 1 << irq_data->irq_clr_shift);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt8186_memif_fs(struct snd_pcm_substream *substream,
+ unsigned int rate)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
+
+ return mt8186_rate_transform(afe->dev, rate, id);
+}
+
+static int mt8186_get_dai_fs(struct mtk_base_afe *afe,
+ int dai_id, unsigned int rate)
+{
+ return mt8186_rate_transform(afe->dev, rate, dai_id);
+}
+
+static int mt8186_irq_fs(struct snd_pcm_substream *substream, unsigned int rate)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+
+ return mt8186_general_rate_transform(afe->dev, rate);
+}
+
+static int mt8186_get_memif_pbuf_size(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ if ((runtime->period_size * 1000) / runtime->rate > 10)
+ return MT8186_MEMIF_PBUF_SIZE_256_BYTES;
+
+ return MT8186_MEMIF_PBUF_SIZE_32_BYTES;
+}
+
+static int mt8186_fe_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime * const runtime = substream->runtime;
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+ int irq_id = memif->irq_usage;
+ struct mtk_base_afe_irq *irqs = &afe->irqs[irq_id];
+ const struct mtk_base_irq_data *irq_data = irqs->irq_data;
+ unsigned int counter = runtime->period_size;
+ int fs;
+ int ret;
+
+ ret = mtk_afe_fe_prepare(substream, dai);
+ if (ret)
+ return ret;
+
+ /* set irq counter */
+ regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
+ irq_data->irq_cnt_maskbit
+ << irq_data->irq_cnt_shift,
+ counter << irq_data->irq_cnt_shift);
+
+ /* set irq fs */
+ fs = afe->irq_fs(substream, runtime->rate);
+
+ if (fs < 0)
+ return -EINVAL;
+
+ regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
+ irq_data->irq_fs_maskbit
+ << irq_data->irq_fs_shift,
+ fs << irq_data->irq_fs_shift);
+
+ return 0;
+}
+
+/* FE DAIs */
+static const struct snd_soc_dai_ops mt8186_memif_dai_ops = {
+ .startup = mt8186_fe_startup,
+ .shutdown = mt8186_fe_shutdown,
+ .hw_params = mt8186_fe_hw_params,
+ .hw_free = mt8186_fe_hw_free,
+ .prepare = mt8186_fe_prepare,
+ .trigger = mt8186_fe_trigger,
+};
+
+#define MTK_PCM_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_PCM_DAI_RATES (SNDRV_PCM_RATE_8000 |\
+ SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 |\
+ SNDRV_PCM_RATE_48000)
+
+#define MTK_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mt8186_memif_dai_driver[] = {
+ /* FE DAIs: memory intefaces to CPU */
+ {
+ .name = "DL1",
+ .id = MT8186_MEMIF_DL1,
+ .playback = {
+ .stream_name = "DL1",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "DL12",
+ .id = MT8186_MEMIF_DL12,
+ .playback = {
+ .stream_name = "DL12",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "DL2",
+ .id = MT8186_MEMIF_DL2,
+ .playback = {
+ .stream_name = "DL2",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "DL3",
+ .id = MT8186_MEMIF_DL3,
+ .playback = {
+ .stream_name = "DL3",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "DL4",
+ .id = MT8186_MEMIF_DL4,
+ .playback = {
+ .stream_name = "DL4",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "DL5",
+ .id = MT8186_MEMIF_DL5,
+ .playback = {
+ .stream_name = "DL5",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "DL6",
+ .id = MT8186_MEMIF_DL6,
+ .playback = {
+ .stream_name = "DL6",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "DL7",
+ .id = MT8186_MEMIF_DL7,
+ .playback = {
+ .stream_name = "DL7",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "DL8",
+ .id = MT8186_MEMIF_DL8,
+ .playback = {
+ .stream_name = "DL8",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "UL1",
+ .id = MT8186_MEMIF_VUL12,
+ .capture = {
+ .stream_name = "UL1",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "UL2",
+ .id = MT8186_MEMIF_AWB,
+ .capture = {
+ .stream_name = "UL2",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "UL3",
+ .id = MT8186_MEMIF_VUL2,
+ .capture = {
+ .stream_name = "UL3",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "UL4",
+ .id = MT8186_MEMIF_AWB2,
+ .capture = {
+ .stream_name = "UL4",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "UL5",
+ .id = MT8186_MEMIF_VUL3,
+ .capture = {
+ .stream_name = "UL5",
+ .channels_min = 1,
+ .channels_max = 12,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "UL6",
+ .id = MT8186_MEMIF_VUL4,
+ .capture = {
+ .stream_name = "UL6",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "UL7",
+ .id = MT8186_MEMIF_VUL5,
+ .capture = {
+ .stream_name = "UL7",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ },
+ {
+ .name = "UL8",
+ .id = MT8186_MEMIF_VUL6,
+ .capture = {
+ .stream_name = "UL8",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mt8186_memif_dai_ops,
+ }
+};
+
+/* kcontrol */
+static int mt8186_irq_cnt1_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ ucontrol->value.integer.value[0] =
+ afe_priv->irq_cnt[MT8186_PRIMARY_MEMIF];
+
+ return 0;
+}
+
+static int mt8186_irq_cnt1_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int memif_num = MT8186_PRIMARY_MEMIF;
+ struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
+ int irq_id = memif->irq_usage;
+ int irq_cnt = afe_priv->irq_cnt[memif_num];
+
+ dev_dbg(afe->dev, "%s(), irq_id %d, irq_cnt = %d, value = %ld\n",
+ __func__, irq_id, irq_cnt, ucontrol->value.integer.value[0]);
+
+ if (irq_cnt == ucontrol->value.integer.value[0])
+ return 0;
+
+ irq_cnt = ucontrol->value.integer.value[0];
+ afe_priv->irq_cnt[memif_num] = irq_cnt;
+
+ if (!pm_runtime_status_suspended(afe->dev) && irq_id >= 0) {
+ struct mtk_base_afe_irq *irqs = &afe->irqs[irq_id];
+ const struct mtk_base_irq_data *irq_data = irqs->irq_data;
+
+ regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
+ irq_data->irq_cnt_maskbit
+ << irq_data->irq_cnt_shift,
+ irq_cnt << irq_data->irq_cnt_shift);
+ } else {
+ dev_dbg(afe->dev, "%s(), suspended || irq_id %d, not set\n",
+ __func__, irq_id);
+ }
+
+ return 1;
+}
+
+static int mt8186_irq_cnt2_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ ucontrol->value.integer.value[0] =
+ afe_priv->irq_cnt[MT8186_RECORD_MEMIF];
+
+ return 0;
+}
+
+static int mt8186_irq_cnt2_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int memif_num = MT8186_RECORD_MEMIF;
+ struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
+ int irq_id = memif->irq_usage;
+ int irq_cnt = afe_priv->irq_cnt[memif_num];
+
+ dev_dbg(afe->dev, "%s(), irq_id %d, irq_cnt = %d, value = %ld\n",
+ __func__, irq_id, irq_cnt, ucontrol->value.integer.value[0]);
+
+ if (irq_cnt == ucontrol->value.integer.value[0])
+ return 0;
+
+ irq_cnt = ucontrol->value.integer.value[0];
+ afe_priv->irq_cnt[memif_num] = irq_cnt;
+
+ if (!pm_runtime_status_suspended(afe->dev) && irq_id >= 0) {
+ struct mtk_base_afe_irq *irqs = &afe->irqs[irq_id];
+ const struct mtk_base_irq_data *irq_data = irqs->irq_data;
+
+ regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
+ irq_data->irq_cnt_maskbit
+ << irq_data->irq_cnt_shift,
+ irq_cnt << irq_data->irq_cnt_shift);
+ } else {
+ dev_dbg(afe->dev, "%s(), suspended || irq_id %d, not set\n",
+ __func__, irq_id);
+ }
+
+ return 1;
+}
+
+static int mt8186_record_xrun_assert_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int xrun_assert = afe_priv->xrun_assert[MT8186_RECORD_MEMIF];
+
+ ucontrol->value.integer.value[0] = xrun_assert;
+
+ return 0;
+}
+
+static int mt8186_record_xrun_assert_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int xrun_assert = ucontrol->value.integer.value[0];
+
+ dev_dbg(afe->dev, "%s(), xrun_assert %d\n", __func__, xrun_assert);
+
+ if (xrun_assert == afe_priv->xrun_assert[MT8186_RECORD_MEMIF])
+ return 0;
+
+ afe_priv->xrun_assert[MT8186_RECORD_MEMIF] = xrun_assert;
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new mt8186_pcm_kcontrols[] = {
+ SOC_SINGLE_EXT("Audio IRQ1 CNT", SND_SOC_NOPM, 0, 0x3ffff, 0,
+ mt8186_irq_cnt1_get, mt8186_irq_cnt1_set),
+ SOC_SINGLE_EXT("Audio IRQ2 CNT", SND_SOC_NOPM, 0, 0x3ffff, 0,
+ mt8186_irq_cnt2_get, mt8186_irq_cnt2_set),
+ SOC_SINGLE_EXT("record_xrun_assert", SND_SOC_NOPM, 0, 0x1, 0,
+ mt8186_record_xrun_assert_get,
+ mt8186_record_xrun_assert_set),
+};
+
+/* dma widget & routes*/
+static const struct snd_kcontrol_new memif_ul1_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN21,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN21,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3 Switch", AFE_CONN21,
+ I_ADDA_UL_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH1 Switch", AFE_CONN21_1,
+ I_TDM_IN_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul1_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN22,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN22,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3 Switch", AFE_CONN22,
+ I_ADDA_UL_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4 Switch", AFE_CONN22,
+ I_ADDA_UL_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH2 Switch", AFE_CONN22_1,
+ I_TDM_IN_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul1_ch3_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN9,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN9,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3 Switch", AFE_CONN9,
+ I_ADDA_UL_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH3 Switch", AFE_CONN9_1,
+ I_TDM_IN_CH3, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul1_ch4_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN10,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN10,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3 Switch", AFE_CONN10,
+ I_ADDA_UL_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH4 Switch", AFE_CONN10,
+ I_ADDA_UL_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH4 Switch", AFE_CONN10_1,
+ I_TDM_IN_CH4, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul2_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH1 Switch", AFE_CONN5,
+ I_I2S0_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN5,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1 Switch", AFE_CONN5,
+ I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN5,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN5,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN5_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN5_1,
+ I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1 Switch", AFE_CONN5_1,
+ I_DL6_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1 Switch", AFE_CONN5,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH1 Switch", AFE_CONN5,
+ I_I2S2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("CONNSYS_I2S_CH1 Switch", AFE_CONN5_1,
+ I_CONNSYS_I2S_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("SRC_1_OUT_CH1 Switch", AFE_CONN5_1,
+ I_SRC_1_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul2_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH2 Switch", AFE_CONN6,
+ I_I2S0_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN6,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN6,
+ I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN6,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN6,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN6_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN6_1,
+ I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2 Switch", AFE_CONN6_1,
+ I_DL6_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2 Switch", AFE_CONN6,
+ I_PCM_1_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH2 Switch", AFE_CONN6,
+ I_I2S2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("CONNSYS_I2S_CH2 Switch", AFE_CONN6_1,
+ I_CONNSYS_I2S_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("SRC_1_OUT_CH2 Switch", AFE_CONN6_1,
+ I_SRC_1_OUT_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul3_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("CONNSYS_I2S_CH1 Switch", AFE_CONN32_1,
+ I_CONNSYS_I2S_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN32,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN32,
+ I_DL2_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul3_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("CONNSYS_I2S_CH2 Switch", AFE_CONN33_1,
+ I_CONNSYS_I2S_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul4_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN38,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH1 Switch", AFE_CONN38,
+ I_I2S0_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul4_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN39,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH2 Switch", AFE_CONN39,
+ I_I2S0_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul5_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN44,
+ I_ADDA_UL_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul5_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN45,
+ I_ADDA_UL_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul6_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN46,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN46,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1 Switch", AFE_CONN46,
+ I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1 Switch", AFE_CONN46_1,
+ I_DL6_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN46,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN46,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN46_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1 Switch", AFE_CONN46,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("GAIN1_OUT_CH1 Switch", AFE_CONN46,
+ I_GAIN1_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul6_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN47,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN47,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN47,
+ I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2 Switch", AFE_CONN47_1,
+ I_DL6_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN47,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN47,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN47_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2 Switch", AFE_CONN47,
+ I_PCM_1_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("GAIN1_OUT_CH2 Switch", AFE_CONN47,
+ I_GAIN1_OUT_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul7_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN48,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN2_OUT_CH1 Switch", AFE_CONN48,
+ I_GAIN2_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC_2_OUT_CH1 Switch", AFE_CONN48_1,
+ I_SRC_2_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul7_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN49,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN2_OUT_CH2 Switch", AFE_CONN49,
+ I_GAIN2_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC_2_OUT_CH2 Switch", AFE_CONN49_1,
+ I_SRC_2_OUT_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul8_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN50,
+ I_ADDA_UL_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new memif_ul8_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN51,
+ I_ADDA_UL_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH1 Switch", AFE_CONN58_1,
+ I_TDM_IN_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH1 Switch", AFE_CONN58,
+ I_I2S0_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH1 Switch", AFE_CONN58,
+ I_I2S2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN58,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN58,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1 Switch", AFE_CONN58,
+ I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH3 Switch", AFE_CONN58,
+ I_DL12_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN58,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN58,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN58_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN58_1,
+ I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH1 Switch", AFE_CONN58_1,
+ I_SRC_1_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH1 Switch", AFE_CONN58_1,
+ I_SRC_2_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH2 Switch", AFE_CONN59_1,
+ I_TDM_IN_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH2 Switch", AFE_CONN59,
+ I_I2S0_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH2 Switch", AFE_CONN59,
+ I_I2S2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN59,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN59,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN59,
+ I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH4 Switch", AFE_CONN59,
+ I_DL12_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN59,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN59,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN59_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN59_1,
+ I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH2 Switch", AFE_CONN59_1,
+ I_SRC_1_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH2 Switch", AFE_CONN59_1,
+ I_SRC_2_OUT_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch3_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH3 Switch", AFE_CONN60_1,
+ I_TDM_IN_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH1 Switch", AFE_CONN60,
+ I_I2S0_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH1 Switch", AFE_CONN60,
+ I_I2S2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN60,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN60,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1 Switch", AFE_CONN60,
+ I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH3 Switch", AFE_CONN60,
+ I_DL12_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN60,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN60,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN60_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN60_1,
+ I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH1 Switch", AFE_CONN60_1,
+ I_SRC_1_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH1 Switch", AFE_CONN60_1,
+ I_SRC_2_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch4_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH4 Switch", AFE_CONN61_1,
+ I_TDM_IN_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH2 Switch", AFE_CONN61,
+ I_I2S0_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH2 Switch", AFE_CONN61,
+ I_I2S2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN61,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN61,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN61,
+ I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH4 Switch", AFE_CONN61,
+ I_DL12_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN61,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN61,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN61_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN61_1,
+ I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH2 Switch", AFE_CONN61_1,
+ I_SRC_1_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH2 Switch", AFE_CONN61_1,
+ I_SRC_2_OUT_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch5_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH5 Switch", AFE_CONN62_1,
+ I_TDM_IN_CH5, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH1 Switch", AFE_CONN62,
+ I_I2S0_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH1 Switch", AFE_CONN62,
+ I_I2S2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN62,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN62,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1 Switch", AFE_CONN62,
+ I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH3 Switch", AFE_CONN62,
+ I_DL12_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN62,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN62,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN62_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN62_1,
+ I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH1 Switch", AFE_CONN62_1,
+ I_SRC_1_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH1 Switch", AFE_CONN62_1,
+ I_SRC_2_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch6_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH6 Switch", AFE_CONN63_1,
+ I_TDM_IN_CH6, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH2 Switch", AFE_CONN63,
+ I_I2S0_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH2 Switch", AFE_CONN63,
+ I_I2S2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN63,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN63,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN63,
+ I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH4 Switch", AFE_CONN63,
+ I_DL12_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN63,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN63,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN63_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN63_1,
+ I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH2 Switch", AFE_CONN63_1,
+ I_SRC_1_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH2 Switch", AFE_CONN63_1,
+ I_SRC_2_OUT_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch7_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH7 Switch", AFE_CONN64_1,
+ I_TDM_IN_CH7, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH1 Switch", AFE_CONN64,
+ I_I2S0_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH1 Switch", AFE_CONN64,
+ I_I2S2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN64,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN64,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1v", AFE_CONN64,
+ I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH3 Switch", AFE_CONN64,
+ I_DL12_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN64,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN64,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN64_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN64_1,
+ I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH1 Switch", AFE_CONN64_1,
+ I_SRC_1_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH1 Switch", AFE_CONN64_1,
+ I_SRC_2_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch8_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("TDM_IN_CH8 Switch", AFE_CONN65_1,
+ I_TDM_IN_CH8, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH2 Switch", AFE_CONN65,
+ I_I2S0_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH2 Switch", AFE_CONN65,
+ I_I2S2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN65,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN65,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN65,
+ I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH4 Switch", AFE_CONN65,
+ I_DL12_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN65,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN65,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN65_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN65_1,
+ I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH2 Switch", AFE_CONN65_1,
+ I_SRC_1_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH2 Switch", AFE_CONN65_1,
+ I_SRC_2_OUT_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch9_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH1 Switch", AFE_CONN66,
+ I_I2S0_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH1 Switch", AFE_CONN66,
+ I_I2S2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN66,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN66,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1 Switch", AFE_CONN66,
+ I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH3 Switch", AFE_CONN66,
+ I_DL12_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN66,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN66,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN66_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN66_1,
+ I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH1 Switch", AFE_CONN66_1,
+ I_SRC_1_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH1 Switch", AFE_CONN66_1,
+ I_SRC_2_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch10_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH2 Switch", AFE_CONN67,
+ I_I2S0_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH2 Switch", AFE_CONN67,
+ I_I2S2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN67,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN67,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN67,
+ I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH4 Switch", AFE_CONN67,
+ I_DL12_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN67,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN67,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN67_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN67_1,
+ I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH2 Switch", AFE_CONN67_1,
+ I_SRC_1_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH2 Switch", AFE_CONN67_1,
+ I_SRC_2_OUT_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch11_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH1 Switch", AFE_CONN68,
+ I_I2S0_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH1 Switch", AFE_CONN68,
+ I_I2S2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN68,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN68,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1 Switch", AFE_CONN68,
+ I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH3 Switch", AFE_CONN68,
+ I_DL12_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN68,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN68,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN68_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN68_1,
+ I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH1 Switch", AFE_CONN68_1,
+ I_SRC_1_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH1 Switch", AFE_CONN68_1,
+ I_SRC_2_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new hw_cm1_ch12_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH2 Switch", AFE_CONN69,
+ I_I2S0_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S2_CH2 Switch", AFE_CONN69,
+ I_I2S2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN69,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN69,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN69,
+ I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH4 Switch", AFE_CONN69,
+ I_DL12_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN69,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN69,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN69_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN69_1,
+ I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC1_OUT_CH2 Switch", AFE_CONN69_1,
+ I_SRC_1_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_SRC2_OUT_CH2 Switch", AFE_CONN69_1,
+ I_SRC_2_OUT_CH2, 1, 0),
+};
+
+/* ADDA UL MUX */
+enum {
+ UL5_IN_MUX_CM1 = 0,
+ UL5_IN_MUX_NORMAL,
+ UL5_IN_MUX_MASK = 0x1,
+};
+
+static const char * const ul5_in_mux_map[] = {
+ "UL5_IN_FROM_CM1", "UL5_IN_FROM_Normal"
+};
+
+static int ul5_in_map_value[] = {
+ UL5_IN_MUX_CM1,
+ UL5_IN_MUX_NORMAL,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(ul5_in_mux_map_enum,
+ AFE_CM1_CON,
+ VUL3_BYPASS_CM_SFT,
+ VUL3_BYPASS_CM_MASK,
+ ul5_in_mux_map,
+ ul5_in_map_value);
+
+static const struct snd_kcontrol_new ul5_in_mux_control =
+ SOC_DAPM_ENUM("UL5_IN_MUX Select", ul5_in_mux_map_enum);
+
+static const struct snd_soc_dapm_widget mt8186_memif_widgets[] = {
+ /* inter-connections */
+ SND_SOC_DAPM_MIXER("UL1_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul1_ch1_mix, ARRAY_SIZE(memif_ul1_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL1_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul1_ch2_mix, ARRAY_SIZE(memif_ul1_ch2_mix)),
+ SND_SOC_DAPM_MIXER("UL1_CH3", SND_SOC_NOPM, 0, 0,
+ memif_ul1_ch3_mix, ARRAY_SIZE(memif_ul1_ch3_mix)),
+ SND_SOC_DAPM_MIXER("UL1_CH4", SND_SOC_NOPM, 0, 0,
+ memif_ul1_ch4_mix, ARRAY_SIZE(memif_ul1_ch4_mix)),
+
+ SND_SOC_DAPM_MIXER("UL2_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul2_ch1_mix, ARRAY_SIZE(memif_ul2_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL2_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul2_ch2_mix, ARRAY_SIZE(memif_ul2_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL3_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul3_ch1_mix, ARRAY_SIZE(memif_ul3_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL3_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul3_ch2_mix, ARRAY_SIZE(memif_ul3_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL4_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul4_ch1_mix, ARRAY_SIZE(memif_ul4_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL4_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul4_ch2_mix, ARRAY_SIZE(memif_ul4_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL5_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul5_ch1_mix, ARRAY_SIZE(memif_ul5_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL5_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul5_ch2_mix, ARRAY_SIZE(memif_ul5_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL6_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul6_ch1_mix, ARRAY_SIZE(memif_ul6_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL6_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul6_ch2_mix, ARRAY_SIZE(memif_ul6_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL7_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul7_ch1_mix, ARRAY_SIZE(memif_ul7_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL7_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul7_ch2_mix, ARRAY_SIZE(memif_ul7_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL8_CH1", SND_SOC_NOPM, 0, 0,
+ memif_ul8_ch1_mix, ARRAY_SIZE(memif_ul8_ch1_mix)),
+ SND_SOC_DAPM_MIXER("UL8_CH2", SND_SOC_NOPM, 0, 0,
+ memif_ul8_ch2_mix, ARRAY_SIZE(memif_ul8_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("UL5_2CH", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("HW_CM1", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* CM1 en*/
+ SND_SOC_DAPM_SUPPLY_S("CM1_EN", 0, AFE_CM1_CON,
+ CHANNEL_MERGE0_EN_SFT, 0, NULL,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER("HW_CM1_CH1", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch1_mix, ARRAY_SIZE(hw_cm1_ch1_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH2", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch2_mix, ARRAY_SIZE(hw_cm1_ch2_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH3", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch3_mix, ARRAY_SIZE(hw_cm1_ch3_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH4", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch4_mix, ARRAY_SIZE(hw_cm1_ch4_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH5", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch5_mix, ARRAY_SIZE(hw_cm1_ch5_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH6", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch6_mix, ARRAY_SIZE(hw_cm1_ch6_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH7", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch7_mix, ARRAY_SIZE(hw_cm1_ch7_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH8", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch8_mix, ARRAY_SIZE(hw_cm1_ch8_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH9", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch9_mix, ARRAY_SIZE(hw_cm1_ch9_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH10", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch10_mix, ARRAY_SIZE(hw_cm1_ch10_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH11", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch11_mix, ARRAY_SIZE(hw_cm1_ch11_mix)),
+ SND_SOC_DAPM_MIXER("HW_CM1_CH12", SND_SOC_NOPM, 0, 0,
+ hw_cm1_ch12_mix, ARRAY_SIZE(hw_cm1_ch12_mix)),
+
+ SND_SOC_DAPM_MUX("UL5_IN_MUX", SND_SOC_NOPM, 0, 0,
+ &ul5_in_mux_control),
+
+ SND_SOC_DAPM_INPUT("UL1_VIRTUAL_INPUT"),
+ SND_SOC_DAPM_INPUT("UL2_VIRTUAL_INPUT"),
+ SND_SOC_DAPM_INPUT("UL3_VIRTUAL_INPUT"),
+ SND_SOC_DAPM_INPUT("UL4_VIRTUAL_INPUT"),
+ SND_SOC_DAPM_INPUT("UL5_VIRTUAL_INPUT"),
+ SND_SOC_DAPM_INPUT("UL6_VIRTUAL_INPUT"),
+};
+
+static const struct snd_soc_dapm_route mt8186_memif_routes[] = {
+ {"UL1", NULL, "UL1_CH1"},
+ {"UL1", NULL, "UL1_CH2"},
+ {"UL1", NULL, "UL1_CH3"},
+ {"UL1", NULL, "UL1_CH4"},
+ {"UL1_CH1", "ADDA_UL_CH1 Switch", "ADDA_UL_Mux"},
+ {"UL1_CH1", "ADDA_UL_CH2 Switch", "ADDA_UL_Mux"},
+ {"UL1_CH2", "ADDA_UL_CH1 Switch", "ADDA_UL_Mux"},
+ {"UL1_CH2", "ADDA_UL_CH2 Switch", "ADDA_UL_Mux"},
+ {"UL1_CH3", "ADDA_UL_CH1 Switch", "ADDA_UL_Mux"},
+ {"UL1_CH3", "ADDA_UL_CH2 Switch", "ADDA_UL_Mux"},
+ {"UL1_CH4", "ADDA_UL_CH1 Switch", "ADDA_UL_Mux"},
+ {"UL1_CH4", "ADDA_UL_CH2 Switch", "ADDA_UL_Mux"},
+ {"UL1_CH1", "TDM_IN_CH1 Switch", "TDM IN"},
+ {"UL1_CH2", "TDM_IN_CH2 Switch", "TDM IN"},
+ {"UL1_CH3", "TDM_IN_CH3 Switch", "TDM IN"},
+ {"UL1_CH4", "TDM_IN_CH4 Switch", "TDM IN"},
+
+ {"UL2", NULL, "UL2_CH1"},
+ {"UL2", NULL, "UL2_CH2"},
+
+ /* cannot connect FE to FE directly */
+ {"UL2_CH1", "DL1_CH1 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH2", "DL1_CH2 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH1", "DL12_CH1 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH2", "DL12_CH2 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH1", "DL6_CH1 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH2", "DL6_CH2 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH1", "DL2_CH1 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH2", "DL2_CH2 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH1", "DL3_CH1 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH2", "DL3_CH2 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH1", "DL4_CH1 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH2", "DL4_CH2 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH1", "DL5_CH1 Switch", "Hostless_UL2 UL"},
+ {"UL2_CH2", "DL5_CH2 Switch", "Hostless_UL2 UL"},
+
+ {"Hostless_UL2 UL", NULL, "UL2_VIRTUAL_INPUT"},
+
+ {"UL2_CH1", "I2S0_CH1 Switch", "I2S0"},
+ {"UL2_CH2", "I2S0_CH2 Switch", "I2S0"},
+ {"UL2_CH1", "I2S2_CH1 Switch", "I2S2"},
+ {"UL2_CH2", "I2S2_CH2 Switch", "I2S2"},
+
+ {"UL2_CH1", "PCM_1_CAP_CH1 Switch", "PCM 1 Capture"},
+ {"UL2_CH2", "PCM_1_CAP_CH2 Switch", "PCM 1 Capture"},
+
+ {"UL2_CH1", "CONNSYS_I2S_CH1 Switch", "Connsys I2S"},
+ {"UL2_CH2", "CONNSYS_I2S_CH2 Switch", "Connsys I2S"},
+
+ {"UL2_CH1", "SRC_1_OUT_CH1 Switch", "HW_SRC_1_Out"},
+ {"UL2_CH2", "SRC_1_OUT_CH2 Switch", "HW_SRC_1_Out"},
+
+ {"UL3", NULL, "UL3_CH1"},
+ {"UL3", NULL, "UL3_CH2"},
+ {"UL3_CH1", "CONNSYS_I2S_CH1 Switch", "Connsys I2S"},
+ {"UL3_CH2", "CONNSYS_I2S_CH2 Switch", "Connsys I2S"},
+
+ {"UL4", NULL, "UL4_CH1"},
+ {"UL4", NULL, "UL4_CH2"},
+ {"UL4_CH1", "ADDA_UL_CH1 Switch", "ADDA_UL_Mux"},
+ {"UL4_CH2", "ADDA_UL_CH2 Switch", "ADDA_UL_Mux"},
+ {"UL4_CH1", "I2S0_CH1 Switch", "I2S0"},
+ {"UL4_CH2", "I2S0_CH2 Switch", "I2S0"},
+
+ {"UL5", NULL, "UL5_IN_MUX"},
+ {"UL5_IN_MUX", "UL5_IN_FROM_Normal", "UL5_2CH"},
+ {"UL5_IN_MUX", "UL5_IN_FROM_CM1", "HW_CM1"},
+ {"UL5_2CH", NULL, "UL5_CH1"},
+ {"UL5_2CH", NULL, "UL5_CH2"},
+ {"UL5_CH1", "ADDA_UL_CH1 Switch", "ADDA_UL_Mux"},
+ {"UL5_CH2", "ADDA_UL_CH2 Switch", "ADDA_UL_Mux"},
+ {"HW_CM1", NULL, "CM1_EN"},
+ {"HW_CM1", NULL, "HW_CM1_CH1"},
+ {"HW_CM1", NULL, "HW_CM1_CH2"},
+ {"HW_CM1", NULL, "HW_CM1_CH3"},
+ {"HW_CM1", NULL, "HW_CM1_CH4"},
+ {"HW_CM1", NULL, "HW_CM1_CH5"},
+ {"HW_CM1", NULL, "HW_CM1_CH6"},
+ {"HW_CM1", NULL, "HW_CM1_CH7"},
+ {"HW_CM1", NULL, "HW_CM1_CH8"},
+ {"HW_CM1", NULL, "HW_CM1_CH9"},
+ {"HW_CM1", NULL, "HW_CM1_CH10"},
+ {"HW_CM1", NULL, "HW_CM1_CH11"},
+ {"HW_CM1", NULL, "HW_CM1_CH12"},
+ {"HW_CM1_CH1", "TDM_IN_CH1 Switch", "TDM IN"},
+ {"HW_CM1_CH2", "TDM_IN_CH2 Switch", "TDM IN"},
+ {"HW_CM1_CH3", "TDM_IN_CH3 Switch", "TDM IN"},
+ {"HW_CM1_CH4", "TDM_IN_CH4 Switch", "TDM IN"},
+ {"HW_CM1_CH5", "TDM_IN_CH5 Switch", "TDM IN"},
+ {"HW_CM1_CH6", "TDM_IN_CH6 Switch", "TDM IN"},
+ {"HW_CM1_CH7", "TDM_IN_CH7 Switch", "TDM IN"},
+ {"HW_CM1_CH8", "TDM_IN_CH8 Switch", "TDM IN"},
+ {"HW_CM1_CH9", "DL1_CH1 Switch", "Hostless_UL5 UL"},
+ {"HW_CM1_CH10", "DL1_CH2 Switch", "Hostless_UL5 UL"},
+
+ {"HW_CM1_CH3", "DL1_CH1 Switch", "Hostless_UL5 UL"},
+ {"HW_CM1_CH4", "DL1_CH2 Switch", "Hostless_UL5 UL"},
+
+ {"HW_CM1_CH3", "DL3_CH1 Switch", "Hostless_UL5 UL"},
+ {"HW_CM1_CH4", "DL3_CH2 Switch", "Hostless_UL5 UL"},
+
+ {"HW_CM1_CH5", "HW_SRC1_OUT_CH1 Switch", "HW_SRC_1_Out"},
+ {"HW_CM1_CH6", "HW_SRC1_OUT_CH2 Switch", "HW_SRC_1_Out"},
+
+ {"HW_CM1_CH9", "DL12_CH1 Switch", "Hostless_UL5 UL"},
+ {"HW_CM1_CH10", "DL12_CH2 Switch", "Hostless_UL5 UL"},
+ {"HW_CM1_CH11", "DL12_CH3 Switch", "Hostless_UL5 UL"},
+ {"HW_CM1_CH12", "DL12_CH4 Switch", "Hostless_UL5 UL"},
+
+ {"Hostless_UL5 UL", NULL, "UL5_VIRTUAL_INPUT"},
+
+ {"UL6", NULL, "UL6_CH1"},
+ {"UL6", NULL, "UL6_CH2"},
+
+ {"UL6_CH1", "ADDA_UL_CH1 Switch", "ADDA_UL_Mux"},
+ {"UL6_CH2", "ADDA_UL_CH2 Switch", "ADDA_UL_Mux"},
+ {"UL6_CH1", "DL1_CH1 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH2", "DL1_CH2 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH1", "DL2_CH1 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH2", "DL2_CH2 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH1", "DL12_CH1 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH2", "DL12_CH2 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH1", "DL6_CH1 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH2", "DL6_CH2 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH1", "DL3_CH1 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH2", "DL3_CH2 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH1", "DL4_CH1 Switch", "Hostless_UL6 UL"},
+ {"UL6_CH2", "DL4_CH2 Switch", "Hostless_UL6 UL"},
+ {"Hostless_UL6 UL", NULL, "UL6_VIRTUAL_INPUT"},
+ {"UL6_CH1", "PCM_1_CAP_CH1 Switch", "PCM 1 Capture"},
+ {"UL6_CH2", "PCM_1_CAP_CH2 Switch", "PCM 1 Capture"},
+ {"UL6_CH1", "GAIN1_OUT_CH1 Switch", "HW Gain 1 Out"},
+ {"UL6_CH2", "GAIN1_OUT_CH2 Switch", "HW Gain 1 Out"},
+
+ {"UL7", NULL, "UL7_CH1"},
+ {"UL7", NULL, "UL7_CH2"},
+ {"UL7_CH1", "ADDA_UL_CH1 Switch", "ADDA_UL_Mux"},
+ {"UL7_CH2", "ADDA_UL_CH2 Switch", "ADDA_UL_Mux"},
+ {"UL7_CH1", "HW_GAIN2_OUT_CH1 Switch", "HW Gain 2 Out"},
+ {"UL7_CH2", "HW_GAIN2_OUT_CH2 Switch", "HW Gain 2 Out"},
+ {"UL7_CH1", "HW_SRC_2_OUT_CH1 Switch", "HW_SRC_2_Out"},
+ {"UL7_CH2", "HW_SRC_2_OUT_CH2 Switch", "HW_SRC_2_Out"},
+
+ {"UL8", NULL, "UL8_CH1"},
+ {"UL8", NULL, "UL8_CH2"},
+ {"UL8_CH1", "ADDA_UL_CH1 Switch", "ADDA_UL_Mux"},
+ {"UL8_CH2", "ADDA_UL_CH2 Switch", "ADDA_UL_Mux"},
+
+ {"HW_GAIN2_IN_CH1", "ADDA_UL_CH1 Switch", "ADDA_UL_Mux"},
+ {"HW_GAIN2_IN_CH2", "ADDA_UL_CH2 Switch", "ADDA_UL_Mux"},
+};
+
+static const struct mtk_base_memif_data memif_data[MT8186_MEMIF_NUM] = {
+ [MT8186_MEMIF_DL1] = {
+ .name = "DL1",
+ .id = MT8186_MEMIF_DL1,
+ .reg_ofs_base = AFE_DL1_BASE,
+ .reg_ofs_cur = AFE_DL1_CUR,
+ .reg_ofs_end = AFE_DL1_END,
+ .reg_ofs_base_msb = AFE_DL1_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_DL1_CUR_MSB,
+ .reg_ofs_end_msb = AFE_DL1_END_MSB,
+ .fs_reg = AFE_DL1_CON0,
+ .fs_shift = DL1_MODE_SFT,
+ .fs_maskbit = DL1_MODE_MASK,
+ .mono_reg = AFE_DL1_CON0,
+ .mono_shift = DL1_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL1_ON_SFT,
+ .hd_reg = AFE_DL1_CON0,
+ .hd_shift = DL1_HD_MODE_SFT,
+ .hd_align_reg = AFE_DL1_CON0,
+ .hd_align_mshift = DL1_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ .pbuf_reg = AFE_DL1_CON0,
+ .pbuf_mask = DL1_PBUF_SIZE_MASK,
+ .pbuf_shift = DL1_PBUF_SIZE_SFT,
+ .minlen_reg = AFE_DL1_CON0,
+ .minlen_mask = DL1_MINLEN_MASK,
+ .minlen_shift = DL1_MINLEN_SFT,
+ },
+ [MT8186_MEMIF_DL12] = {
+ .name = "DL12",
+ .id = MT8186_MEMIF_DL12,
+ .reg_ofs_base = AFE_DL12_BASE,
+ .reg_ofs_cur = AFE_DL12_CUR,
+ .reg_ofs_end = AFE_DL12_END,
+ .reg_ofs_base_msb = AFE_DL12_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_DL12_CUR_MSB,
+ .reg_ofs_end_msb = AFE_DL12_END_MSB,
+ .fs_reg = AFE_DL12_CON0,
+ .fs_shift = DL12_MODE_SFT,
+ .fs_maskbit = DL12_MODE_MASK,
+ .mono_reg = AFE_DL12_CON0,
+ .mono_shift = DL12_MONO_SFT,
+ .quad_ch_reg = AFE_DL12_CON0,
+ .quad_ch_mask = DL12_4CH_EN_MASK,
+ .quad_ch_shift = DL12_4CH_EN_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL12_ON_SFT,
+ .hd_reg = AFE_DL12_CON0,
+ .hd_shift = DL12_HD_MODE_SFT,
+ .hd_align_reg = AFE_DL12_CON0,
+ .hd_align_mshift = DL12_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ .pbuf_reg = AFE_DL12_CON0,
+ .pbuf_mask = DL12_PBUF_SIZE_MASK,
+ .pbuf_shift = DL12_PBUF_SIZE_SFT,
+ .minlen_reg = AFE_DL12_CON0,
+ .minlen_mask = DL12_MINLEN_MASK,
+ .minlen_shift = DL12_MINLEN_SFT,
+ },
+ [MT8186_MEMIF_DL2] = {
+ .name = "DL2",
+ .id = MT8186_MEMIF_DL2,
+ .reg_ofs_base = AFE_DL2_BASE,
+ .reg_ofs_cur = AFE_DL2_CUR,
+ .reg_ofs_end = AFE_DL2_END,
+ .reg_ofs_base_msb = AFE_DL2_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_DL2_CUR_MSB,
+ .reg_ofs_end_msb = AFE_DL2_END_MSB,
+ .fs_reg = AFE_DL2_CON0,
+ .fs_shift = DL2_MODE_SFT,
+ .fs_maskbit = DL2_MODE_MASK,
+ .mono_reg = AFE_DL2_CON0,
+ .mono_shift = DL2_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL2_ON_SFT,
+ .hd_reg = AFE_DL2_CON0,
+ .hd_shift = DL2_HD_MODE_SFT,
+ .hd_align_reg = AFE_DL2_CON0,
+ .hd_align_mshift = DL2_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ .pbuf_reg = AFE_DL2_CON0,
+ .pbuf_mask = DL2_PBUF_SIZE_MASK,
+ .pbuf_shift = DL2_PBUF_SIZE_SFT,
+ .minlen_reg = AFE_DL2_CON0,
+ .minlen_mask = DL2_MINLEN_MASK,
+ .minlen_shift = DL2_MINLEN_SFT,
+ },
+ [MT8186_MEMIF_DL3] = {
+ .name = "DL3",
+ .id = MT8186_MEMIF_DL3,
+ .reg_ofs_base = AFE_DL3_BASE,
+ .reg_ofs_cur = AFE_DL3_CUR,
+ .reg_ofs_end = AFE_DL3_END,
+ .reg_ofs_base_msb = AFE_DL3_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_DL3_CUR_MSB,
+ .reg_ofs_end_msb = AFE_DL3_END_MSB,
+ .fs_reg = AFE_DL3_CON0,
+ .fs_shift = DL3_MODE_SFT,
+ .fs_maskbit = DL3_MODE_MASK,
+ .mono_reg = AFE_DL3_CON0,
+ .mono_shift = DL3_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL3_ON_SFT,
+ .hd_reg = AFE_DL3_CON0,
+ .hd_shift = DL3_HD_MODE_SFT,
+ .hd_align_reg = AFE_DL3_CON0,
+ .hd_align_mshift = DL3_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ .pbuf_reg = AFE_DL3_CON0,
+ .pbuf_mask = DL3_PBUF_SIZE_MASK,
+ .pbuf_shift = DL3_PBUF_SIZE_SFT,
+ .minlen_reg = AFE_DL3_CON0,
+ .minlen_mask = DL3_MINLEN_MASK,
+ .minlen_shift = DL3_MINLEN_SFT,
+ },
+ [MT8186_MEMIF_DL4] = {
+ .name = "DL4",
+ .id = MT8186_MEMIF_DL4,
+ .reg_ofs_base = AFE_DL4_BASE,
+ .reg_ofs_cur = AFE_DL4_CUR,
+ .reg_ofs_end = AFE_DL4_END,
+ .reg_ofs_base_msb = AFE_DL4_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_DL4_CUR_MSB,
+ .reg_ofs_end_msb = AFE_DL4_END_MSB,
+ .fs_reg = AFE_DL4_CON0,
+ .fs_shift = DL4_MODE_SFT,
+ .fs_maskbit = DL4_MODE_MASK,
+ .mono_reg = AFE_DL4_CON0,
+ .mono_shift = DL4_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL4_ON_SFT,
+ .hd_reg = AFE_DL4_CON0,
+ .hd_shift = DL4_HD_MODE_SFT,
+ .hd_align_reg = AFE_DL4_CON0,
+ .hd_align_mshift = DL4_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ .pbuf_reg = AFE_DL4_CON0,
+ .pbuf_mask = DL4_PBUF_SIZE_MASK,
+ .pbuf_shift = DL4_PBUF_SIZE_SFT,
+ .minlen_reg = AFE_DL4_CON0,
+ .minlen_mask = DL4_MINLEN_MASK,
+ .minlen_shift = DL4_MINLEN_SFT,
+ },
+ [MT8186_MEMIF_DL5] = {
+ .name = "DL5",
+ .id = MT8186_MEMIF_DL5,
+ .reg_ofs_base = AFE_DL5_BASE,
+ .reg_ofs_cur = AFE_DL5_CUR,
+ .reg_ofs_end = AFE_DL5_END,
+ .reg_ofs_base_msb = AFE_DL5_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_DL5_CUR_MSB,
+ .reg_ofs_end_msb = AFE_DL5_END_MSB,
+ .fs_reg = AFE_DL5_CON0,
+ .fs_shift = DL5_MODE_SFT,
+ .fs_maskbit = DL5_MODE_MASK,
+ .mono_reg = AFE_DL5_CON0,
+ .mono_shift = DL5_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL5_ON_SFT,
+ .hd_reg = AFE_DL5_CON0,
+ .hd_shift = DL5_HD_MODE_SFT,
+ .hd_align_reg = AFE_DL5_CON0,
+ .hd_align_mshift = DL5_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ .pbuf_reg = AFE_DL5_CON0,
+ .pbuf_mask = DL5_PBUF_SIZE_MASK,
+ .pbuf_shift = DL5_PBUF_SIZE_SFT,
+ .minlen_reg = AFE_DL5_CON0,
+ .minlen_mask = DL5_MINLEN_MASK,
+ .minlen_shift = DL5_MINLEN_SFT,
+ },
+ [MT8186_MEMIF_DL6] = {
+ .name = "DL6",
+ .id = MT8186_MEMIF_DL6,
+ .reg_ofs_base = AFE_DL6_BASE,
+ .reg_ofs_cur = AFE_DL6_CUR,
+ .reg_ofs_end = AFE_DL6_END,
+ .reg_ofs_base_msb = AFE_DL6_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_DL6_CUR_MSB,
+ .reg_ofs_end_msb = AFE_DL6_END_MSB,
+ .fs_reg = AFE_DL6_CON0,
+ .fs_shift = DL6_MODE_SFT,
+ .fs_maskbit = DL6_MODE_MASK,
+ .mono_reg = AFE_DL6_CON0,
+ .mono_shift = DL6_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL6_ON_SFT,
+ .hd_reg = AFE_DL6_CON0,
+ .hd_shift = DL6_HD_MODE_SFT,
+ .hd_align_reg = AFE_DL6_CON0,
+ .hd_align_mshift = DL6_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ .pbuf_reg = AFE_DL6_CON0,
+ .pbuf_mask = DL6_PBUF_SIZE_MASK,
+ .pbuf_shift = DL6_PBUF_SIZE_SFT,
+ .minlen_reg = AFE_DL6_CON0,
+ .minlen_mask = DL6_MINLEN_MASK,
+ .minlen_shift = DL6_MINLEN_SFT,
+ },
+ [MT8186_MEMIF_DL7] = {
+ .name = "DL7",
+ .id = MT8186_MEMIF_DL7,
+ .reg_ofs_base = AFE_DL7_BASE,
+ .reg_ofs_cur = AFE_DL7_CUR,
+ .reg_ofs_end = AFE_DL7_END,
+ .reg_ofs_base_msb = AFE_DL7_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_DL7_CUR_MSB,
+ .reg_ofs_end_msb = AFE_DL7_END_MSB,
+ .fs_reg = AFE_DL7_CON0,
+ .fs_shift = DL7_MODE_SFT,
+ .fs_maskbit = DL7_MODE_MASK,
+ .mono_reg = AFE_DL7_CON0,
+ .mono_shift = DL7_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL7_ON_SFT,
+ .hd_reg = AFE_DL7_CON0,
+ .hd_shift = DL7_HD_MODE_SFT,
+ .hd_align_reg = AFE_DL7_CON0,
+ .hd_align_mshift = DL7_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ .pbuf_reg = AFE_DL7_CON0,
+ .pbuf_mask = DL7_PBUF_SIZE_MASK,
+ .pbuf_shift = DL7_PBUF_SIZE_SFT,
+ .minlen_reg = AFE_DL7_CON0,
+ .minlen_mask = DL7_MINLEN_MASK,
+ .minlen_shift = DL7_MINLEN_SFT,
+ },
+ [MT8186_MEMIF_DL8] = {
+ .name = "DL8",
+ .id = MT8186_MEMIF_DL8,
+ .reg_ofs_base = AFE_DL8_BASE,
+ .reg_ofs_cur = AFE_DL8_CUR,
+ .reg_ofs_end = AFE_DL8_END,
+ .reg_ofs_base_msb = AFE_DL8_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_DL8_CUR_MSB,
+ .reg_ofs_end_msb = AFE_DL8_END_MSB,
+ .fs_reg = AFE_DL8_CON0,
+ .fs_shift = DL8_MODE_SFT,
+ .fs_maskbit = DL8_MODE_MASK,
+ .mono_reg = AFE_DL8_CON0,
+ .mono_shift = DL8_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = DL8_ON_SFT,
+ .hd_reg = AFE_DL8_CON0,
+ .hd_shift = DL8_HD_MODE_SFT,
+ .hd_align_reg = AFE_DL8_CON0,
+ .hd_align_mshift = DL8_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ .pbuf_reg = AFE_DL8_CON0,
+ .pbuf_mask = DL8_PBUF_SIZE_MASK,
+ .pbuf_shift = DL8_PBUF_SIZE_SFT,
+ .minlen_reg = AFE_DL8_CON0,
+ .minlen_mask = DL8_MINLEN_MASK,
+ .minlen_shift = DL8_MINLEN_SFT,
+ },
+ [MT8186_MEMIF_VUL12] = {
+ .name = "VUL12",
+ .id = MT8186_MEMIF_VUL12,
+ .reg_ofs_base = AFE_VUL12_BASE,
+ .reg_ofs_cur = AFE_VUL12_CUR,
+ .reg_ofs_end = AFE_VUL12_END,
+ .reg_ofs_base_msb = AFE_VUL12_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_VUL12_CUR_MSB,
+ .reg_ofs_end_msb = AFE_VUL12_END_MSB,
+ .fs_reg = AFE_VUL12_CON0,
+ .fs_shift = VUL12_MODE_SFT,
+ .fs_maskbit = VUL12_MODE_MASK,
+ .mono_reg = AFE_VUL12_CON0,
+ .mono_shift = VUL12_MONO_SFT,
+ .quad_ch_reg = AFE_VUL12_CON0,
+ .quad_ch_mask = VUL12_4CH_EN_MASK,
+ .quad_ch_shift = VUL12_4CH_EN_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = VUL12_ON_SFT,
+ .hd_reg = AFE_VUL12_CON0,
+ .hd_shift = VUL12_HD_MODE_SFT,
+ .hd_align_reg = AFE_VUL12_CON0,
+ .hd_align_mshift = VUL12_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8186_MEMIF_VUL2] = {
+ .name = "VUL2",
+ .id = MT8186_MEMIF_VUL2,
+ .reg_ofs_base = AFE_VUL2_BASE,
+ .reg_ofs_cur = AFE_VUL2_CUR,
+ .reg_ofs_end = AFE_VUL2_END,
+ .reg_ofs_base_msb = AFE_VUL2_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_VUL2_CUR_MSB,
+ .reg_ofs_end_msb = AFE_VUL2_END_MSB,
+ .fs_reg = AFE_VUL2_CON0,
+ .fs_shift = VUL2_MODE_SFT,
+ .fs_maskbit = VUL2_MODE_MASK,
+ .mono_reg = AFE_VUL2_CON0,
+ .mono_shift = VUL2_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = VUL2_ON_SFT,
+ .hd_reg = AFE_VUL2_CON0,
+ .hd_shift = VUL2_HD_MODE_SFT,
+ .hd_align_reg = AFE_VUL2_CON0,
+ .hd_align_mshift = VUL2_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8186_MEMIF_AWB] = {
+ .name = "AWB",
+ .id = MT8186_MEMIF_AWB,
+ .reg_ofs_base = AFE_AWB_BASE,
+ .reg_ofs_cur = AFE_AWB_CUR,
+ .reg_ofs_end = AFE_AWB_END,
+ .reg_ofs_base_msb = AFE_AWB_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_AWB_CUR_MSB,
+ .reg_ofs_end_msb = AFE_AWB_END_MSB,
+ .fs_reg = AFE_AWB_CON0,
+ .fs_shift = AWB_MODE_SFT,
+ .fs_maskbit = AWB_MODE_MASK,
+ .mono_reg = AFE_AWB_CON0,
+ .mono_shift = AWB_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = AWB_ON_SFT,
+ .hd_reg = AFE_AWB_CON0,
+ .hd_shift = AWB_HD_MODE_SFT,
+ .hd_align_reg = AFE_AWB_CON0,
+ .hd_align_mshift = AWB_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8186_MEMIF_AWB2] = {
+ .name = "AWB2",
+ .id = MT8186_MEMIF_AWB2,
+ .reg_ofs_base = AFE_AWB2_BASE,
+ .reg_ofs_cur = AFE_AWB2_CUR,
+ .reg_ofs_end = AFE_AWB2_END,
+ .reg_ofs_base_msb = AFE_AWB2_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_AWB2_CUR_MSB,
+ .reg_ofs_end_msb = AFE_AWB2_END_MSB,
+ .fs_reg = AFE_AWB2_CON0,
+ .fs_shift = AWB2_MODE_SFT,
+ .fs_maskbit = AWB2_MODE_MASK,
+ .mono_reg = AFE_AWB2_CON0,
+ .mono_shift = AWB2_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = AWB2_ON_SFT,
+ .hd_reg = AFE_AWB2_CON0,
+ .hd_shift = AWB2_HD_MODE_SFT,
+ .hd_align_reg = AFE_AWB2_CON0,
+ .hd_align_mshift = AWB2_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8186_MEMIF_VUL3] = {
+ .name = "VUL3",
+ .id = MT8186_MEMIF_VUL3,
+ .reg_ofs_base = AFE_VUL3_BASE,
+ .reg_ofs_cur = AFE_VUL3_CUR,
+ .reg_ofs_end = AFE_VUL3_END,
+ .reg_ofs_base_msb = AFE_VUL3_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_VUL3_CUR_MSB,
+ .reg_ofs_end_msb = AFE_VUL3_END_MSB,
+ .fs_reg = AFE_VUL3_CON0,
+ .fs_shift = VUL3_MODE_SFT,
+ .fs_maskbit = VUL3_MODE_MASK,
+ .mono_reg = AFE_VUL3_CON0,
+ .mono_shift = VUL3_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = VUL3_ON_SFT,
+ .hd_reg = AFE_VUL3_CON0,
+ .hd_shift = VUL3_HD_MODE_SFT,
+ .hd_align_reg = AFE_VUL3_CON0,
+ .hd_align_mshift = VUL3_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8186_MEMIF_VUL4] = {
+ .name = "VUL4",
+ .id = MT8186_MEMIF_VUL4,
+ .reg_ofs_base = AFE_VUL4_BASE,
+ .reg_ofs_cur = AFE_VUL4_CUR,
+ .reg_ofs_end = AFE_VUL4_END,
+ .reg_ofs_base_msb = AFE_VUL4_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_VUL4_CUR_MSB,
+ .reg_ofs_end_msb = AFE_VUL4_END_MSB,
+ .fs_reg = AFE_VUL4_CON0,
+ .fs_shift = VUL4_MODE_SFT,
+ .fs_maskbit = VUL4_MODE_MASK,
+ .mono_reg = AFE_VUL4_CON0,
+ .mono_shift = VUL4_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = VUL4_ON_SFT,
+ .hd_reg = AFE_VUL4_CON0,
+ .hd_shift = VUL4_HD_MODE_SFT,
+ .hd_align_reg = AFE_VUL4_CON0,
+ .hd_align_mshift = VUL4_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8186_MEMIF_VUL5] = {
+ .name = "VUL5",
+ .id = MT8186_MEMIF_VUL5,
+ .reg_ofs_base = AFE_VUL5_BASE,
+ .reg_ofs_cur = AFE_VUL5_CUR,
+ .reg_ofs_end = AFE_VUL5_END,
+ .reg_ofs_base_msb = AFE_VUL5_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_VUL5_CUR_MSB,
+ .reg_ofs_end_msb = AFE_VUL5_END_MSB,
+ .fs_reg = AFE_VUL5_CON0,
+ .fs_shift = VUL5_MODE_SFT,
+ .fs_maskbit = VUL5_MODE_MASK,
+ .mono_reg = AFE_VUL5_CON0,
+ .mono_shift = VUL5_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = VUL5_ON_SFT,
+ .hd_reg = AFE_VUL5_CON0,
+ .hd_shift = VUL5_HD_MODE_SFT,
+ .hd_align_reg = AFE_VUL5_CON0,
+ .hd_align_mshift = VUL5_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ [MT8186_MEMIF_VUL6] = {
+ .name = "VUL6",
+ .id = MT8186_MEMIF_VUL6,
+ .reg_ofs_base = AFE_VUL6_BASE,
+ .reg_ofs_cur = AFE_VUL6_CUR,
+ .reg_ofs_end = AFE_VUL6_END,
+ .reg_ofs_base_msb = AFE_VUL6_BASE_MSB,
+ .reg_ofs_cur_msb = AFE_VUL6_CUR_MSB,
+ .reg_ofs_end_msb = AFE_VUL6_END_MSB,
+ .fs_reg = AFE_VUL6_CON0,
+ .fs_shift = VUL6_MODE_SFT,
+ .fs_maskbit = VUL6_MODE_MASK,
+ .mono_reg = AFE_VUL6_CON0,
+ .mono_shift = VUL6_MONO_SFT,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = VUL6_ON_SFT,
+ .hd_reg = AFE_VUL6_CON0,
+ .hd_shift = VUL6_HD_MODE_SFT,
+ .hd_align_reg = AFE_VUL6_CON0,
+ .hd_align_mshift = VUL6_HALIGN_SFT,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+};
+
+static const struct mtk_base_irq_data irq_data[MT8186_IRQ_NUM] = {
+ [MT8186_IRQ_0] = {
+ .id = MT8186_IRQ_0,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT0,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ0_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ0_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ0_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ0_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_1] = {
+ .id = MT8186_IRQ_1,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT1,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ1_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ1_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ1_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ1_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_2] = {
+ .id = MT8186_IRQ_2,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT2,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ2_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ2_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ2_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ2_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_3] = {
+ .id = MT8186_IRQ_3,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT3,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ3_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ3_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ3_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ3_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_4] = {
+ .id = MT8186_IRQ_4,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT4,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ4_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ4_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ4_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ4_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_5] = {
+ .id = MT8186_IRQ_5,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT5,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ5_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ5_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ5_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ5_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_6] = {
+ .id = MT8186_IRQ_6,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT6,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ6_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ6_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ6_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ6_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_7] = {
+ .id = MT8186_IRQ_7,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT7,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON1,
+ .irq_fs_shift = IRQ7_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ7_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ7_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ7_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_8] = {
+ .id = MT8186_IRQ_8,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT8,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON2,
+ .irq_fs_shift = IRQ8_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ8_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ8_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ8_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_9] = {
+ .id = MT8186_IRQ_9,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT9,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON2,
+ .irq_fs_shift = IRQ9_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ9_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ9_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ9_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_10] = {
+ .id = MT8186_IRQ_10,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT10,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON2,
+ .irq_fs_shift = IRQ10_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ10_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ10_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ10_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_11] = {
+ .id = MT8186_IRQ_11,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT11,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON2,
+ .irq_fs_shift = IRQ11_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ11_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ11_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ11_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_12] = {
+ .id = MT8186_IRQ_12,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT12,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON2,
+ .irq_fs_shift = IRQ12_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ12_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ12_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ12_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_13] = {
+ .id = MT8186_IRQ_13,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT13,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON2,
+ .irq_fs_shift = IRQ13_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ13_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ13_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ13_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_14] = {
+ .id = MT8186_IRQ_14,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT14,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON2,
+ .irq_fs_shift = IRQ14_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ14_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ14_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ14_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_15] = {
+ .id = MT8186_IRQ_15,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT15,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON2,
+ .irq_fs_shift = IRQ15_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ15_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ15_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ15_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_16] = {
+ .id = MT8186_IRQ_16,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT16,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON3,
+ .irq_fs_shift = IRQ16_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ16_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ16_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ16_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_17] = {
+ .id = MT8186_IRQ_17,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT17,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON3,
+ .irq_fs_shift = IRQ17_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ17_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ17_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ17_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_18] = {
+ .id = MT8186_IRQ_18,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT18,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON3,
+ .irq_fs_shift = IRQ18_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ18_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ18_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ18_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_19] = {
+ .id = MT8186_IRQ_19,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT19,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON3,
+ .irq_fs_shift = IRQ19_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ19_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ19_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ19_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_20] = {
+ .id = MT8186_IRQ_20,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT20,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON3,
+ .irq_fs_shift = IRQ20_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ20_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ20_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ20_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_21] = {
+ .id = MT8186_IRQ_21,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT21,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON3,
+ .irq_fs_shift = IRQ21_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ21_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ21_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ21_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_22] = {
+ .id = MT8186_IRQ_22,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT22,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON3,
+ .irq_fs_shift = IRQ22_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ22_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ22_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ22_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_23] = {
+ .id = MT8186_IRQ_23,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT23,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON3,
+ .irq_fs_shift = IRQ23_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ23_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ23_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ23_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_24] = {
+ .id = MT8186_IRQ_24,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT24,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON4,
+ .irq_fs_shift = IRQ24_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ24_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ24_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ24_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_25] = {
+ .id = MT8186_IRQ_25,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT25,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON4,
+ .irq_fs_shift = IRQ25_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ25_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ25_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ25_MCU_CLR_SFT,
+ },
+ [MT8186_IRQ_26] = {
+ .id = MT8186_IRQ_26,
+ .irq_cnt_reg = AFE_IRQ_MCU_CNT26,
+ .irq_cnt_shift = AFE_IRQ_CNT_SHIFT,
+ .irq_cnt_maskbit = AFE_IRQ_CNT_MASK,
+ .irq_fs_reg = AFE_IRQ_MCU_CON4,
+ .irq_fs_shift = IRQ26_MCU_MODE_SFT,
+ .irq_fs_maskbit = IRQ26_MCU_MODE_MASK,
+ .irq_en_reg = AFE_IRQ_MCU_CON0,
+ .irq_en_shift = IRQ26_MCU_ON_SFT,
+ .irq_clr_reg = AFE_IRQ_MCU_CLR,
+ .irq_clr_shift = IRQ26_MCU_CLR_SFT,
+ },
+};
+
+static const int memif_irq_usage[MT8186_MEMIF_NUM] = {
+ /* TODO: verify each memif & irq */
+ [MT8186_MEMIF_DL1] = MT8186_IRQ_0,
+ [MT8186_MEMIF_DL2] = MT8186_IRQ_1,
+ [MT8186_MEMIF_DL3] = MT8186_IRQ_2,
+ [MT8186_MEMIF_DL4] = MT8186_IRQ_3,
+ [MT8186_MEMIF_DL5] = MT8186_IRQ_4,
+ [MT8186_MEMIF_DL6] = MT8186_IRQ_5,
+ [MT8186_MEMIF_DL7] = MT8186_IRQ_6,
+ [MT8186_MEMIF_DL8] = MT8186_IRQ_7,
+ [MT8186_MEMIF_DL12] = MT8186_IRQ_9,
+ [MT8186_MEMIF_VUL12] = MT8186_IRQ_10,
+ [MT8186_MEMIF_VUL2] = MT8186_IRQ_11,
+ [MT8186_MEMIF_AWB] = MT8186_IRQ_12,
+ [MT8186_MEMIF_AWB2] = MT8186_IRQ_13,
+ [MT8186_MEMIF_VUL3] = MT8186_IRQ_14,
+ [MT8186_MEMIF_VUL4] = MT8186_IRQ_15,
+ [MT8186_MEMIF_VUL5] = MT8186_IRQ_16,
+ [MT8186_MEMIF_VUL6] = MT8186_IRQ_17,
+};
+
+static bool mt8186_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* these auto-gen reg has read-only bit, so put it as volatile */
+ /* volatile reg cannot be cached, so cannot be set when power off */
+ switch (reg) {
+ case AUDIO_TOP_CON0: /* reg bit controlled by CCF */
+ case AUDIO_TOP_CON1: /* reg bit controlled by CCF */
+ case AUDIO_TOP_CON2:
+ case AUDIO_TOP_CON3:
+ case AFE_DL1_CUR_MSB:
+ case AFE_DL1_CUR:
+ case AFE_DL1_END:
+ case AFE_DL2_CUR_MSB:
+ case AFE_DL2_CUR:
+ case AFE_DL2_END:
+ case AFE_DL3_CUR_MSB:
+ case AFE_DL3_CUR:
+ case AFE_DL3_END:
+ case AFE_DL4_CUR_MSB:
+ case AFE_DL4_CUR:
+ case AFE_DL4_END:
+ case AFE_DL12_CUR_MSB:
+ case AFE_DL12_CUR:
+ case AFE_DL12_END:
+ case AFE_ADDA_SRC_DEBUG_MON0:
+ case AFE_ADDA_SRC_DEBUG_MON1:
+ case AFE_ADDA_UL_SRC_MON0:
+ case AFE_ADDA_UL_SRC_MON1:
+ case AFE_SECURE_CON0:
+ case AFE_SRAM_BOUND:
+ case AFE_SECURE_CON1:
+ case AFE_VUL_CUR_MSB:
+ case AFE_VUL_CUR:
+ case AFE_VUL_END:
+ case AFE_SIDETONE_MON:
+ case AFE_SIDETONE_CON0:
+ case AFE_SIDETONE_COEFF:
+ case AFE_VUL2_CUR_MSB:
+ case AFE_VUL2_CUR:
+ case AFE_VUL2_END:
+ case AFE_VUL3_CUR_MSB:
+ case AFE_VUL3_CUR:
+ case AFE_VUL3_END:
+ case AFE_I2S_MON:
+ case AFE_DAC_MON:
+ case AFE_IRQ0_MCU_CNT_MON:
+ case AFE_IRQ6_MCU_CNT_MON:
+ case AFE_VUL4_CUR_MSB:
+ case AFE_VUL4_CUR:
+ case AFE_VUL4_END:
+ case AFE_VUL12_CUR_MSB:
+ case AFE_VUL12_CUR:
+ case AFE_VUL12_END:
+ case AFE_IRQ3_MCU_CNT_MON:
+ case AFE_IRQ4_MCU_CNT_MON:
+ case AFE_IRQ_MCU_STATUS:
+ case AFE_IRQ_MCU_CLR:
+ case AFE_IRQ_MCU_MON2:
+ case AFE_IRQ1_MCU_CNT_MON:
+ case AFE_IRQ2_MCU_CNT_MON:
+ case AFE_IRQ5_MCU_CNT_MON:
+ case AFE_IRQ7_MCU_CNT_MON:
+ case AFE_IRQ_MCU_MISS_CLR:
+ case AFE_GAIN1_CUR:
+ case AFE_GAIN2_CUR:
+ case AFE_SRAM_DELSEL_CON1:
+ case PCM_INTF_CON2:
+ case FPGA_CFG0:
+ case FPGA_CFG1:
+ case FPGA_CFG2:
+ case FPGA_CFG3:
+ case AUDIO_TOP_DBG_MON0:
+ case AUDIO_TOP_DBG_MON1:
+ case AFE_IRQ8_MCU_CNT_MON:
+ case AFE_IRQ11_MCU_CNT_MON:
+ case AFE_IRQ12_MCU_CNT_MON:
+ case AFE_IRQ9_MCU_CNT_MON:
+ case AFE_IRQ10_MCU_CNT_MON:
+ case AFE_IRQ13_MCU_CNT_MON:
+ case AFE_IRQ14_MCU_CNT_MON:
+ case AFE_IRQ15_MCU_CNT_MON:
+ case AFE_IRQ16_MCU_CNT_MON:
+ case AFE_IRQ17_MCU_CNT_MON:
+ case AFE_IRQ18_MCU_CNT_MON:
+ case AFE_IRQ19_MCU_CNT_MON:
+ case AFE_IRQ20_MCU_CNT_MON:
+ case AFE_IRQ21_MCU_CNT_MON:
+ case AFE_IRQ22_MCU_CNT_MON:
+ case AFE_IRQ23_MCU_CNT_MON:
+ case AFE_IRQ24_MCU_CNT_MON:
+ case AFE_IRQ25_MCU_CNT_MON:
+ case AFE_IRQ26_MCU_CNT_MON:
+ case AFE_IRQ31_MCU_CNT_MON:
+ case AFE_CBIP_MON0:
+ case AFE_CBIP_SLV_MUX_MON0:
+ case AFE_CBIP_SLV_DECODER_MON0:
+ case AFE_ADDA6_MTKAIF_MON0:
+ case AFE_ADDA6_MTKAIF_MON1:
+ case AFE_AWB_CUR_MSB:
+ case AFE_AWB_CUR:
+ case AFE_AWB_END:
+ case AFE_AWB2_CUR_MSB:
+ case AFE_AWB2_CUR:
+ case AFE_AWB2_END:
+ case AFE_DAI_CUR_MSB:
+ case AFE_DAI_CUR:
+ case AFE_DAI_END:
+ case AFE_DAI2_CUR_MSB:
+ case AFE_DAI2_CUR:
+ case AFE_DAI2_END:
+ case AFE_ADDA6_SRC_DEBUG_MON0:
+ case AFE_ADD6A_UL_SRC_MON0:
+ case AFE_ADDA6_UL_SRC_MON1:
+ case AFE_MOD_DAI_CUR_MSB:
+ case AFE_MOD_DAI_CUR:
+ case AFE_MOD_DAI_END:
+ case AFE_AWB_RCH_MON:
+ case AFE_AWB_LCH_MON:
+ case AFE_VUL_RCH_MON:
+ case AFE_VUL_LCH_MON:
+ case AFE_VUL12_RCH_MON:
+ case AFE_VUL12_LCH_MON:
+ case AFE_VUL2_RCH_MON:
+ case AFE_VUL2_LCH_MON:
+ case AFE_DAI_DATA_MON:
+ case AFE_MOD_DAI_DATA_MON:
+ case AFE_DAI2_DATA_MON:
+ case AFE_AWB2_RCH_MON:
+ case AFE_AWB2_LCH_MON:
+ case AFE_VUL3_RCH_MON:
+ case AFE_VUL3_LCH_MON:
+ case AFE_VUL4_RCH_MON:
+ case AFE_VUL4_LCH_MON:
+ case AFE_VUL5_RCH_MON:
+ case AFE_VUL5_LCH_MON:
+ case AFE_VUL6_RCH_MON:
+ case AFE_VUL6_LCH_MON:
+ case AFE_DL1_RCH_MON:
+ case AFE_DL1_LCH_MON:
+ case AFE_DL2_RCH_MON:
+ case AFE_DL2_LCH_MON:
+ case AFE_DL12_RCH1_MON:
+ case AFE_DL12_LCH1_MON:
+ case AFE_DL12_RCH2_MON:
+ case AFE_DL12_LCH2_MON:
+ case AFE_DL3_RCH_MON:
+ case AFE_DL3_LCH_MON:
+ case AFE_DL4_RCH_MON:
+ case AFE_DL4_LCH_MON:
+ case AFE_DL5_RCH_MON:
+ case AFE_DL5_LCH_MON:
+ case AFE_DL6_RCH_MON:
+ case AFE_DL6_LCH_MON:
+ case AFE_DL7_RCH_MON:
+ case AFE_DL7_LCH_MON:
+ case AFE_DL8_RCH_MON:
+ case AFE_DL8_LCH_MON:
+ case AFE_VUL5_CUR_MSB:
+ case AFE_VUL5_CUR:
+ case AFE_VUL5_END:
+ case AFE_VUL6_CUR_MSB:
+ case AFE_VUL6_CUR:
+ case AFE_VUL6_END:
+ case AFE_ADDA_DL_SDM_FIFO_MON:
+ case AFE_ADDA_DL_SRC_LCH_MON:
+ case AFE_ADDA_DL_SRC_RCH_MON:
+ case AFE_ADDA_DL_SDM_OUT_MON:
+ case AFE_CONNSYS_I2S_MON:
+ case AFE_ASRC_2CH_CON0:
+ case AFE_ASRC_2CH_CON2:
+ case AFE_ASRC_2CH_CON3:
+ case AFE_ASRC_2CH_CON4:
+ case AFE_ASRC_2CH_CON5:
+ case AFE_ASRC_2CH_CON7:
+ case AFE_ASRC_2CH_CON8:
+ case AFE_ASRC_2CH_CON12:
+ case AFE_ASRC_2CH_CON13:
+ case AFE_ADDA_MTKAIF_MON0:
+ case AFE_ADDA_MTKAIF_MON1:
+ case AFE_AUD_PAD_TOP:
+ case AFE_DL_NLE_R_MON0:
+ case AFE_DL_NLE_R_MON1:
+ case AFE_DL_NLE_R_MON2:
+ case AFE_DL_NLE_L_MON0:
+ case AFE_DL_NLE_L_MON1:
+ case AFE_DL_NLE_L_MON2:
+ case AFE_GENERAL1_ASRC_2CH_CON0:
+ case AFE_GENERAL1_ASRC_2CH_CON2:
+ case AFE_GENERAL1_ASRC_2CH_CON3:
+ case AFE_GENERAL1_ASRC_2CH_CON4:
+ case AFE_GENERAL1_ASRC_2CH_CON5:
+ case AFE_GENERAL1_ASRC_2CH_CON7:
+ case AFE_GENERAL1_ASRC_2CH_CON8:
+ case AFE_GENERAL1_ASRC_2CH_CON12:
+ case AFE_GENERAL1_ASRC_2CH_CON13:
+ case AFE_GENERAL2_ASRC_2CH_CON0:
+ case AFE_GENERAL2_ASRC_2CH_CON2:
+ case AFE_GENERAL2_ASRC_2CH_CON3:
+ case AFE_GENERAL2_ASRC_2CH_CON4:
+ case AFE_GENERAL2_ASRC_2CH_CON5:
+ case AFE_GENERAL2_ASRC_2CH_CON7:
+ case AFE_GENERAL2_ASRC_2CH_CON8:
+ case AFE_GENERAL2_ASRC_2CH_CON12:
+ case AFE_GENERAL2_ASRC_2CH_CON13:
+ case AFE_DL5_CUR_MSB:
+ case AFE_DL5_CUR:
+ case AFE_DL5_END:
+ case AFE_DL6_CUR_MSB:
+ case AFE_DL6_CUR:
+ case AFE_DL6_END:
+ case AFE_DL7_CUR_MSB:
+ case AFE_DL7_CUR:
+ case AFE_DL7_END:
+ case AFE_DL8_CUR_MSB:
+ case AFE_DL8_CUR:
+ case AFE_DL8_END:
+ case AFE_PROT_SIDEBAND_MON:
+ case AFE_DOMAIN_SIDEBAND0_MON:
+ case AFE_DOMAIN_SIDEBAND1_MON:
+ case AFE_DOMAIN_SIDEBAND2_MON:
+ case AFE_DOMAIN_SIDEBAND3_MON:
+ case AFE_APLL1_TUNER_CFG: /* [20:31] is monitor */
+ case AFE_APLL2_TUNER_CFG: /* [20:31] is monitor */
+ return true;
+ default:
+ return false;
+ };
+}
+
+static const struct regmap_config mt8186_afe_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+
+ .volatile_reg = mt8186_is_volatile_reg,
+
+ .max_register = AFE_MAX_REGISTER,
+ .num_reg_defaults_raw = AFE_MAX_REGISTER,
+
+ .cache_type = REGCACHE_FLAT,
+};
+
+static irqreturn_t mt8186_afe_irq_handler(int irq_id, void *dev)
+{
+ struct mtk_base_afe *afe = dev;
+ struct mtk_base_afe_irq *irq;
+ unsigned int status;
+ unsigned int status_mcu;
+ unsigned int mcu_en;
+ int ret;
+ int i;
+
+ /* get irq that is sent to MCU */
+ ret = regmap_read(afe->regmap, AFE_IRQ_MCU_EN, &mcu_en);
+ if (ret) {
+ dev_err(afe->dev, "%s, get irq direction fail, ret %d", __func__, ret);
+ return ret;
+ }
+
+ ret = regmap_read(afe->regmap, AFE_IRQ_MCU_STATUS, &status);
+ /* only care IRQ which is sent to MCU */
+ status_mcu = status & mcu_en & AFE_IRQ_STATUS_BITS;
+
+ if (ret || status_mcu == 0) {
+ dev_err(afe->dev, "%s(), irq status err, ret %d, status 0x%x, mcu_en 0x%x\n",
+ __func__, ret, status, mcu_en);
+
+ goto err_irq;
+ }
+
+ for (i = 0; i < MT8186_MEMIF_NUM; i++) {
+ struct mtk_base_afe_memif *memif = &afe->memif[i];
+
+ if (!memif->substream)
+ continue;
+
+ if (memif->irq_usage < 0)
+ continue;
+
+ irq = &afe->irqs[memif->irq_usage];
+
+ if (status_mcu & (1 << irq->irq_data->irq_en_shift))
+ snd_pcm_period_elapsed(memif->substream);
+ }
+
+err_irq:
+ /* clear irq */
+ regmap_write(afe->regmap, AFE_IRQ_MCU_CLR, status_mcu);
+
+ return IRQ_HANDLED;
+}
+
+static int mt8186_afe_runtime_suspend(struct device *dev)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ unsigned int value = 0;
+ int ret;
+
+ if (!afe->regmap || afe_priv->pm_runtime_bypass_reg_ctl)
+ goto skip_regmap;
+
+ /* disable AFE */
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0x0);
+
+ ret = regmap_read_poll_timeout(afe->regmap,
+ AFE_DAC_MON,
+ value,
+ (value & AFE_ON_RETM_MASK_SFT) == 0,
+ 20,
+ 1 * 1000 * 1000);
+ if (ret) {
+ dev_err(afe->dev, "%s(), ret %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* make sure all irq status are cleared */
+ regmap_write(afe->regmap, AFE_IRQ_MCU_CLR, 0xffffffff);
+ regmap_write(afe->regmap, AFE_IRQ_MCU_CLR, 0xffffffff);
+
+ /* reset sgen */
+ regmap_write(afe->regmap, AFE_SINEGEN_CON0, 0x0);
+ regmap_update_bits(afe->regmap, AFE_SINEGEN_CON2,
+ INNER_LOOP_BACK_MODE_MASK_SFT,
+ 0x3f << INNER_LOOP_BACK_MODE_SFT);
+
+ /* cache only */
+ regcache_cache_only(afe->regmap, true);
+ regcache_mark_dirty(afe->regmap);
+
+skip_regmap:
+ mt8186_afe_disable_cgs(afe);
+ mt8186_afe_disable_clock(afe);
+
+ return 0;
+}
+
+static int mt8186_afe_runtime_resume(struct device *dev)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ ret = mt8186_afe_enable_clock(afe);
+ if (ret)
+ return ret;
+
+ ret = mt8186_afe_enable_cgs(afe);
+ if (ret)
+ return ret;
+
+ if (!afe->regmap || afe_priv->pm_runtime_bypass_reg_ctl)
+ goto skip_regmap;
+
+ regcache_cache_only(afe->regmap, false);
+ regcache_sync(afe->regmap);
+
+ /* enable audio sys DCM for power saving */
+ regmap_update_bits(afe_priv->infracfg, PERI_BUS_DCM_CTRL, BIT(29), BIT(29));
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, BIT(29), BIT(29));
+
+ /* force cpu use 8_24 format when writing 32bit data */
+ regmap_update_bits(afe->regmap, AFE_MEMIF_CON0, CPU_HD_ALIGN_MASK_SFT, 0);
+
+ /* set all output port to 24bit */
+ regmap_write(afe->regmap, AFE_CONN_24BIT, 0xffffffff);
+ regmap_write(afe->regmap, AFE_CONN_24BIT_1, 0xffffffff);
+
+ /* enable AFE */
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0, AUDIO_AFE_ON_MASK_SFT, BIT(0));
+
+skip_regmap:
+ return 0;
+}
+
+static int mt8186_afe_component_probe(struct snd_soc_component *component)
+{
+ mtk_afe_add_sub_dai_control(component);
+ mt8186_add_misc_control(component);
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver mt8186_afe_component = {
+ .name = AFE_PCM_NAME,
+ .pcm_construct = mtk_afe_pcm_new,
+ .pointer = mtk_afe_pcm_pointer,
+ .probe = mt8186_afe_component_probe,
+};
+
+static int mt8186_dai_memif_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mt8186_memif_dai_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mt8186_memif_dai_driver);
+
+ dai->controls = mt8186_pcm_kcontrols;
+ dai->num_controls = ARRAY_SIZE(mt8186_pcm_kcontrols);
+ dai->dapm_widgets = mt8186_memif_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mt8186_memif_widgets);
+ dai->dapm_routes = mt8186_memif_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mt8186_memif_routes);
+ return 0;
+}
+
+typedef int (*dai_register_cb)(struct mtk_base_afe *);
+static const dai_register_cb dai_register_cbs[] = {
+ mt8186_dai_adda_register,
+ mt8186_dai_i2s_register,
+ mt8186_dai_tdm_register,
+ mt8186_dai_hw_gain_register,
+ mt8186_dai_src_register,
+ mt8186_dai_pcm_register,
+ mt8186_dai_hostless_register,
+ mt8186_dai_memif_register,
+};
+
+static int mt8186_afe_pcm_dev_probe(struct platform_device *pdev)
+{
+ struct mtk_base_afe *afe;
+ struct mt8186_afe_private *afe_priv;
+ struct resource *res;
+ struct reset_control *rstc;
+ struct device *dev = &pdev->dev;
+ int i, ret, irq_id;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(34));
+ if (ret)
+ return ret;
+
+ afe = devm_kzalloc(dev, sizeof(*afe), GFP_KERNEL);
+ if (!afe)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, afe);
+
+ afe->platform_priv = devm_kzalloc(dev, sizeof(*afe_priv), GFP_KERNEL);
+ if (!afe->platform_priv)
+ return -ENOMEM;
+
+ afe_priv = afe->platform_priv;
+ afe->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ afe->base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(afe->base_addr))
+ return PTR_ERR(afe->base_addr);
+
+ /* init audio related clock */
+ ret = mt8186_init_clock(afe);
+ if (ret) {
+ dev_err(dev, "init clock error, ret %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, mt8186_deinit_clock, (void *)afe);
+ if (ret)
+ return ret;
+
+ /* init memif */
+ afe->memif_32bit_supported = 0;
+ afe->memif_size = MT8186_MEMIF_NUM;
+ afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif), GFP_KERNEL);
+ if (!afe->memif)
+ return -ENOMEM;
+
+ for (i = 0; i < afe->memif_size; i++) {
+ afe->memif[i].data = &memif_data[i];
+ afe->memif[i].irq_usage = memif_irq_usage[i];
+ afe->memif[i].const_irq = 1;
+ }
+
+ mutex_init(&afe->irq_alloc_lock); /* needed when dynamic irq */
+
+ /* init irq */
+ afe->irqs_size = MT8186_IRQ_NUM;
+ afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
+ GFP_KERNEL);
+
+ if (!afe->irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < afe->irqs_size; i++)
+ afe->irqs[i].irq_data = &irq_data[i];
+
+ /* request irq */
+ irq_id = platform_get_irq(pdev, 0);
+ if (irq_id <= 0)
+ return dev_err_probe(dev, irq_id < 0 ? irq_id : -ENXIO,
+ "no irq found");
+
+ ret = devm_request_irq(dev, irq_id, mt8186_afe_irq_handler,
+ IRQF_TRIGGER_NONE,
+ "Afe_ISR_Handle", (void *)afe);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not request_irq for Afe_ISR_Handle\n");
+
+ ret = enable_irq_wake(irq_id);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "enable_irq_wake %d\n", irq_id);
+
+ /* init sub_dais */
+ INIT_LIST_HEAD(&afe->sub_dais);
+
+ for (i = 0; i < ARRAY_SIZE(dai_register_cbs); i++) {
+ ret = dai_register_cbs[i](afe);
+ if (ret)
+ return dev_err_probe(dev, ret, "dai register i %d fail\n", i);
+ }
+
+ /* init dai_driver and component_driver */
+ ret = mtk_afe_combine_sub_dai(afe);
+ if (ret)
+ return dev_err_probe(dev, ret, "mtk_afe_combine_sub_dai fail\n");
+
+ /* reset controller to reset audio regs before regmap cache */
+ rstc = devm_reset_control_get_exclusive(dev, "audiosys");
+ if (IS_ERR(rstc))
+ return dev_err_probe(dev, PTR_ERR(rstc), "could not get audiosys reset\n");
+
+ ret = reset_control_reset(rstc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to trigger audio reset\n");
+
+ /* enable clock for regcache get default value from hw */
+ afe_priv->pm_runtime_bypass_reg_ctl = true;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to resume device\n");
+
+ afe->regmap = devm_regmap_init_mmio(dev, afe->base_addr,
+ &mt8186_afe_regmap_config);
+ if (IS_ERR(afe->regmap)) {
+ ret = PTR_ERR(afe->regmap);
+ goto err_pm_disable;
+ }
+
+ /* others */
+ afe->mtk_afe_hardware = &mt8186_afe_hardware;
+ afe->memif_fs = mt8186_memif_fs;
+ afe->irq_fs = mt8186_irq_fs;
+ afe->get_dai_fs = mt8186_get_dai_fs;
+ afe->get_memif_pbuf_size = mt8186_get_memif_pbuf_size;
+
+ afe->runtime_resume = mt8186_afe_runtime_resume;
+ afe->runtime_suspend = mt8186_afe_runtime_suspend;
+
+ /* register platform */
+ dev_dbg(dev, "%s(), devm_snd_soc_register_component\n", __func__);
+
+ ret = devm_snd_soc_register_component(dev,
+ &mt8186_afe_component,
+ afe->dai_drivers,
+ afe->num_dai_drivers);
+ if (ret) {
+ dev_err(dev, "err_dai_component\n");
+ goto err_pm_disable;
+ }
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret) {
+ pm_runtime_get_noresume(dev);
+ dev_err(dev, "failed to suspend device: %d\n", ret);
+ goto err_pm_disable;
+ }
+ afe_priv->pm_runtime_bypass_reg_ctl = false;
+
+ regcache_cache_only(afe->regmap, true);
+ regcache_mark_dirty(afe->regmap);
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+
+ return ret;
+}
+
+static const struct of_device_id mt8186_afe_pcm_dt_match[] = {
+ { .compatible = "mediatek,mt8186-sound", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt8186_afe_pcm_dt_match);
+
+static const struct dev_pm_ops mt8186_afe_pm_ops = {
+ SET_RUNTIME_PM_OPS(mt8186_afe_runtime_suspend,
+ mt8186_afe_runtime_resume, NULL)
+};
+
+static struct platform_driver mt8186_afe_pcm_driver = {
+ .driver = {
+ .name = "mt8186-audio",
+ .of_match_table = mt8186_afe_pcm_dt_match,
+ .pm = &mt8186_afe_pm_ops,
+ },
+ .probe = mt8186_afe_pcm_dev_probe,
+};
+
+module_platform_driver(mt8186_afe_pcm_driver);
+
+MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 8186");
+MODULE_AUTHOR("Jiaxin Yu <jiaxin.yu@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/mediatek/mt8186/mt8186-audsys-clk.c b/sound/soc/mediatek/mt8186/mt8186-audsys-clk.c
new file mode 100644
index 000000000000..578969ca91c8
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-audsys-clk.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8186-audsys-clk.h -- Mediatek 8186 audsys clock control
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include "mt8186-afe-common.h"
+#include "mt8186-audsys-clk.h"
+#include "mt8186-audsys-clkid.h"
+#include "mt8186-reg.h"
+
+struct afe_gate {
+ int id;
+ const char *name;
+ const char *parent_name;
+ int reg;
+ u8 bit;
+ const struct clk_ops *ops;
+ unsigned long flags;
+ u8 cg_flags;
+};
+
+#define GATE_AFE_FLAGS(_id, _name, _parent, _reg, _bit, _flags, _cgflags) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .reg = _reg, \
+ .bit = _bit, \
+ .flags = _flags, \
+ .cg_flags = _cgflags, \
+ }
+
+#define GATE_AFE(_id, _name, _parent, _reg, _bit) \
+ GATE_AFE_FLAGS(_id, _name, _parent, _reg, _bit, \
+ CLK_SET_RATE_PARENT, CLK_GATE_SET_TO_DISABLE)
+
+#define GATE_AUD0(_id, _name, _parent, _bit) \
+ GATE_AFE(_id, _name, _parent, AUDIO_TOP_CON0, _bit)
+
+#define GATE_AUD1(_id, _name, _parent, _bit) \
+ GATE_AFE(_id, _name, _parent, AUDIO_TOP_CON1, _bit)
+
+#define GATE_AUD2(_id, _name, _parent, _bit) \
+ GATE_AFE(_id, _name, _parent, AUDIO_TOP_CON2, _bit)
+
+static const struct afe_gate aud_clks[CLK_AUD_NR_CLK] = {
+ /* AUD0 */
+ GATE_AUD0(CLK_AUD_AFE, "aud_afe_clk", "top_audio", 2),
+ GATE_AUD0(CLK_AUD_22M, "aud_apll22m_clk", "top_aud_engen1", 8),
+ GATE_AUD0(CLK_AUD_24M, "aud_apll24m_clk", "top_aud_engen2", 9),
+ GATE_AUD0(CLK_AUD_APLL2_TUNER, "aud_apll2_tuner_clk", "top_aud_engen2", 18),
+ GATE_AUD0(CLK_AUD_APLL_TUNER, "aud_apll_tuner_clk", "top_aud_engen1", 19),
+ GATE_AUD0(CLK_AUD_TDM, "aud_tdm_clk", "top_aud_1", 20),
+ GATE_AUD0(CLK_AUD_ADC, "aud_adc_clk", "top_audio", 24),
+ GATE_AUD0(CLK_AUD_DAC, "aud_dac_clk", "top_audio", 25),
+ GATE_AUD0(CLK_AUD_DAC_PREDIS, "aud_dac_predis_clk", "top_audio", 26),
+ GATE_AUD0(CLK_AUD_TML, "aud_tml_clk", "top_audio", 27),
+ GATE_AUD0(CLK_AUD_NLE, "aud_nle_clk", "top_audio", 28),
+
+ /* AUD1 */
+ GATE_AUD1(CLK_AUD_I2S1_BCLK, "aud_i2s1_bclk", "top_audio", 4),
+ GATE_AUD1(CLK_AUD_I2S2_BCLK, "aud_i2s2_bclk", "top_audio", 5),
+ GATE_AUD1(CLK_AUD_I2S3_BCLK, "aud_i2s3_bclk", "top_audio", 6),
+ GATE_AUD1(CLK_AUD_I2S4_BCLK, "aud_i2s4_bclk", "top_audio", 7),
+ GATE_AUD1(CLK_AUD_CONNSYS_I2S_ASRC, "aud_connsys_i2s_asrc", "top_audio", 12),
+ GATE_AUD1(CLK_AUD_GENERAL1_ASRC, "aud_general1_asrc", "top_audio", 13),
+ GATE_AUD1(CLK_AUD_GENERAL2_ASRC, "aud_general2_asrc", "top_audio", 14),
+ GATE_AUD1(CLK_AUD_DAC_HIRES, "aud_dac_hires_clk", "top_audio_h", 15),
+ GATE_AUD1(CLK_AUD_ADC_HIRES, "aud_adc_hires_clk", "top_audio_h", 16),
+ GATE_AUD1(CLK_AUD_ADC_HIRES_TML, "aud_adc_hires_tml", "top_audio_h", 17),
+ GATE_AUD1(CLK_AUD_ADDA6_ADC, "aud_adda6_adc", "top_audio", 20),
+ GATE_AUD1(CLK_AUD_ADDA6_ADC_HIRES, "aud_adda6_adc_hires", "top_audio_h", 21),
+ GATE_AUD1(CLK_AUD_3RD_DAC, "aud_3rd_dac", "top_audio", 28),
+ GATE_AUD1(CLK_AUD_3RD_DAC_PREDIS, "aud_3rd_dac_predis", "top_audio", 29),
+ GATE_AUD1(CLK_AUD_3RD_DAC_TML, "aud_3rd_dac_tml", "top_audio", 30),
+ GATE_AUD1(CLK_AUD_3RD_DAC_HIRES, "aud_3rd_dac_hires", "top_audio_h", 31),
+
+ /* AUD2 */
+ GATE_AUD2(CLK_AUD_ETDM_IN1_BCLK, "aud_etdm_in1_bclk", "top_audio", 23),
+ GATE_AUD2(CLK_AUD_ETDM_OUT1_BCLK, "aud_etdm_out1_bclk", "top_audio", 24),
+};
+
+int mt8186_audsys_clk_register(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct clk *clk;
+ struct clk_lookup *cl;
+ int i;
+
+ afe_priv->lookup = devm_kcalloc(afe->dev, CLK_AUD_NR_CLK,
+ sizeof(*afe_priv->lookup),
+ GFP_KERNEL);
+
+ if (!afe_priv->lookup)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(aud_clks); i++) {
+ const struct afe_gate *gate = &aud_clks[i];
+
+ clk = clk_register_gate(afe->dev, gate->name, gate->parent_name,
+ gate->flags, afe->base_addr + gate->reg,
+ gate->bit, gate->cg_flags, NULL);
+
+ if (IS_ERR(clk)) {
+ dev_err(afe->dev, "Failed to register clk %s: %ld\n",
+ gate->name, PTR_ERR(clk));
+ continue;
+ }
+
+ /* add clk_lookup for devm_clk_get(SND_SOC_DAPM_CLOCK_SUPPLY) */
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->clk = clk;
+ cl->con_id = gate->name;
+ cl->dev_id = dev_name(afe->dev);
+ clkdev_add(cl);
+
+ afe_priv->lookup[i] = cl;
+ }
+
+ return 0;
+}
+
+void mt8186_audsys_clk_unregister(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct clk *clk;
+ struct clk_lookup *cl;
+ int i;
+
+ if (!afe_priv)
+ return;
+
+ for (i = 0; i < CLK_AUD_NR_CLK; i++) {
+ cl = afe_priv->lookup[i];
+ if (!cl)
+ continue;
+
+ clk = cl->clk;
+ clk_unregister_gate(clk);
+
+ clkdev_drop(cl);
+ }
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-audsys-clk.h b/sound/soc/mediatek/mt8186/mt8186-audsys-clk.h
new file mode 100644
index 000000000000..b8d6a06e11e8
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-audsys-clk.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mt8186-audsys-clk.h -- Mediatek 8186 audsys clock definition
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Trevor Wu <trevor.wu@mediatek.com>
+ */
+
+#ifndef _MT8186_AUDSYS_CLK_H_
+#define _MT8186_AUDSYS_CLK_H_
+
+int mt8186_audsys_clk_register(struct mtk_base_afe *afe);
+void mt8186_audsys_clk_unregister(struct mtk_base_afe *afe);
+
+#endif
diff --git a/sound/soc/mediatek/mt8186/mt8186-audsys-clkid.h b/sound/soc/mediatek/mt8186/mt8186-audsys-clkid.h
new file mode 100644
index 000000000000..3ce5937c1823
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-audsys-clkid.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mt8186-audsys-clkid.h -- Mediatek 8186 audsys clock id definition
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+ */
+
+#ifndef _MT8186_AUDSYS_CLKID_H_
+#define _MT8186_AUDSYS_CLKID_H_
+
+enum{
+ CLK_AUD_AFE,
+ CLK_AUD_22M,
+ CLK_AUD_24M,
+ CLK_AUD_APLL2_TUNER,
+ CLK_AUD_APLL_TUNER,
+ CLK_AUD_TDM,
+ CLK_AUD_ADC,
+ CLK_AUD_DAC,
+ CLK_AUD_DAC_PREDIS,
+ CLK_AUD_TML,
+ CLK_AUD_NLE,
+ CLK_AUD_I2S1_BCLK,
+ CLK_AUD_I2S2_BCLK,
+ CLK_AUD_I2S3_BCLK,
+ CLK_AUD_I2S4_BCLK,
+ CLK_AUD_CONNSYS_I2S_ASRC,
+ CLK_AUD_GENERAL1_ASRC,
+ CLK_AUD_GENERAL2_ASRC,
+ CLK_AUD_DAC_HIRES,
+ CLK_AUD_ADC_HIRES,
+ CLK_AUD_ADC_HIRES_TML,
+ CLK_AUD_ADDA6_ADC,
+ CLK_AUD_ADDA6_ADC_HIRES,
+ CLK_AUD_3RD_DAC,
+ CLK_AUD_3RD_DAC_PREDIS,
+ CLK_AUD_3RD_DAC_TML,
+ CLK_AUD_3RD_DAC_HIRES,
+ CLK_AUD_ETDM_IN1_BCLK,
+ CLK_AUD_ETDM_OUT1_BCLK,
+ CLK_AUD_NR_CLK,
+};
+
+#endif
diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
new file mode 100644
index 000000000000..266704556f37
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI ADDA Control
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include "mt8186-afe-clk.h"
+#include "mt8186-afe-common.h"
+#include "mt8186-afe-gpio.h"
+#include "mt8186-interconnection.h"
+
+enum {
+ UL_IIR_SW = 0,
+ UL_IIR_5HZ,
+ UL_IIR_10HZ,
+ UL_IIR_25HZ,
+ UL_IIR_50HZ,
+ UL_IIR_75HZ,
+};
+
+enum {
+ AUDIO_SDM_LEVEL_MUTE = 0,
+ AUDIO_SDM_LEVEL_NORMAL = 0x1d,
+ /* if you change level normal */
+ /* you need to change formula of hp impedance and dc trim too */
+};
+
+enum {
+ AUDIO_SDM_2ND = 0,
+ AUDIO_SDM_3RD,
+};
+
+enum {
+ DELAY_DATA_MISO1 = 0,
+ DELAY_DATA_MISO2,
+};
+
+enum {
+ MTK_AFE_ADDA_DL_RATE_8K = 0,
+ MTK_AFE_ADDA_DL_RATE_11K = 1,
+ MTK_AFE_ADDA_DL_RATE_12K = 2,
+ MTK_AFE_ADDA_DL_RATE_16K = 3,
+ MTK_AFE_ADDA_DL_RATE_22K = 4,
+ MTK_AFE_ADDA_DL_RATE_24K = 5,
+ MTK_AFE_ADDA_DL_RATE_32K = 6,
+ MTK_AFE_ADDA_DL_RATE_44K = 7,
+ MTK_AFE_ADDA_DL_RATE_48K = 8,
+ MTK_AFE_ADDA_DL_RATE_96K = 9,
+ MTK_AFE_ADDA_DL_RATE_192K = 10,
+};
+
+enum {
+ MTK_AFE_ADDA_UL_RATE_8K = 0,
+ MTK_AFE_ADDA_UL_RATE_16K = 1,
+ MTK_AFE_ADDA_UL_RATE_32K = 2,
+ MTK_AFE_ADDA_UL_RATE_48K = 3,
+ MTK_AFE_ADDA_UL_RATE_96K = 4,
+ MTK_AFE_ADDA_UL_RATE_192K = 5,
+ MTK_AFE_ADDA_UL_RATE_48K_HD = 6,
+};
+
+#define SDM_AUTO_RESET_THRESHOLD 0x190000
+
+struct mtk_afe_adda_priv {
+ int dl_rate;
+ int ul_rate;
+};
+
+static struct mtk_afe_adda_priv *get_adda_priv_by_name(struct mtk_base_afe *afe,
+ const char *name)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dai_id;
+
+ if (strncmp(name, "aud_dac", 7) == 0 || strncmp(name, "aud_adc", 7) == 0)
+ dai_id = MT8186_DAI_ADDA;
+ else
+ return NULL;
+
+ return afe_priv->dai_priv[dai_id];
+}
+
+static unsigned int adda_dl_rate_transform(struct mtk_base_afe *afe,
+ unsigned int rate)
+{
+ switch (rate) {
+ case 8000:
+ return MTK_AFE_ADDA_DL_RATE_8K;
+ case 11025:
+ return MTK_AFE_ADDA_DL_RATE_11K;
+ case 12000:
+ return MTK_AFE_ADDA_DL_RATE_12K;
+ case 16000:
+ return MTK_AFE_ADDA_DL_RATE_16K;
+ case 22050:
+ return MTK_AFE_ADDA_DL_RATE_22K;
+ case 24000:
+ return MTK_AFE_ADDA_DL_RATE_24K;
+ case 32000:
+ return MTK_AFE_ADDA_DL_RATE_32K;
+ case 44100:
+ return MTK_AFE_ADDA_DL_RATE_44K;
+ case 48000:
+ return MTK_AFE_ADDA_DL_RATE_48K;
+ case 96000:
+ return MTK_AFE_ADDA_DL_RATE_96K;
+ case 192000:
+ return MTK_AFE_ADDA_DL_RATE_192K;
+ default:
+ dev_info(afe->dev, "%s(), rate %d invalid, use 48kHz!!!\n",
+ __func__, rate);
+ }
+
+ return MTK_AFE_ADDA_DL_RATE_48K;
+}
+
+static unsigned int adda_ul_rate_transform(struct mtk_base_afe *afe,
+ unsigned int rate)
+{
+ switch (rate) {
+ case 8000:
+ return MTK_AFE_ADDA_UL_RATE_8K;
+ case 16000:
+ return MTK_AFE_ADDA_UL_RATE_16K;
+ case 32000:
+ return MTK_AFE_ADDA_UL_RATE_32K;
+ case 48000:
+ return MTK_AFE_ADDA_UL_RATE_48K;
+ case 96000:
+ return MTK_AFE_ADDA_UL_RATE_96K;
+ case 192000:
+ return MTK_AFE_ADDA_UL_RATE_192K;
+ default:
+ dev_info(afe->dev, "%s(), rate %d invalid, use 48kHz!!!\n",
+ __func__, rate);
+ }
+
+ return MTK_AFE_ADDA_UL_RATE_48K;
+}
+
+/* dai component */
+static const struct snd_kcontrol_new mtk_adda_dl_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN3, I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1 Switch", AFE_CONN3, I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN3, I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN3, I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN3_1, I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN3_1, I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1 Switch", AFE_CONN3_1, I_DL6_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH1 Switch", AFE_CONN3_1, I_DL8_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN3,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN3,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("GAIN1_OUT_CH1 Switch", AFE_CONN3,
+ I_GAIN1_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1 Switch", AFE_CONN3,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH1 Switch", AFE_CONN3,
+ I_PCM_2_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("SRC_1_OUT_CH1 Switch", AFE_CONN3_1,
+ I_SRC_1_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("SRC_2_OUT_CH1 Switch", AFE_CONN3_1,
+ I_SRC_2_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_adda_dl_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN4, I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN4, I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN4, I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN4, I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN4, I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN4, I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN4, I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN4_1, I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN4_1, I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2 Switch", AFE_CONN4_1, I_DL6_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH2 Switch", AFE_CONN4_1, I_DL8_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN4,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN4,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("GAIN1_OUT_CH2 Switch", AFE_CONN4,
+ I_GAIN1_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2 Switch", AFE_CONN4,
+ I_PCM_1_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2 Switch", AFE_CONN4,
+ I_PCM_2_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("SRC_1_OUT_CH2 Switch", AFE_CONN4_1,
+ I_SRC_1_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("SRC_2_OUT_CH2 Switch", AFE_CONN4_1,
+ I_SRC_2_OUT_CH2, 1, 0),
+};
+
+enum {
+ SUPPLY_SEQ_ADDA_AFE_ON,
+ SUPPLY_SEQ_ADDA_DL_ON,
+ SUPPLY_SEQ_ADDA_AUD_PAD_TOP,
+ SUPPLY_SEQ_ADDA_MTKAIF_CFG,
+ SUPPLY_SEQ_ADDA_FIFO,
+ SUPPLY_SEQ_ADDA_AP_DMIC,
+ SUPPLY_SEQ_ADDA_UL_ON,
+};
+
+static int mtk_adda_ul_src_dmic(struct mtk_base_afe *afe, int id)
+{
+ unsigned int reg;
+
+ switch (id) {
+ case MT8186_DAI_ADDA:
+ case MT8186_DAI_AP_DMIC:
+ reg = AFE_ADDA_UL_SRC_CON0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* dmic mode, 3.25M*/
+ regmap_update_bits(afe->regmap, reg,
+ DIGMIC_3P25M_1P625M_SEL_MASK_SFT, 0);
+ regmap_update_bits(afe->regmap, reg,
+ DMIC_LOW_POWER_CTL_MASK_SFT, 0);
+
+ /* turn on dmic, ch1, ch2 */
+ regmap_update_bits(afe->regmap, reg,
+ UL_SDM_3_LEVEL_MASK_SFT,
+ BIT(UL_SDM_3_LEVEL_SFT));
+ regmap_update_bits(afe->regmap, reg,
+ UL_MODE_3P25M_CH1_CTL_MASK_SFT,
+ BIT(UL_MODE_3P25M_CH1_CTL_SFT));
+ regmap_update_bits(afe->regmap, reg,
+ UL_MODE_3P25M_CH2_CTL_MASK_SFT,
+ BIT(UL_MODE_3P25M_CH2_CTL_SFT));
+
+ return 0;
+}
+
+static int mtk_adda_ul_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int mtkaif_dmic = afe_priv->mtkaif_dmic;
+
+ dev_dbg(afe->dev, "%s(), name %s, event 0x%x, mtkaif_dmic %d\n",
+ __func__, w->name, event, mtkaif_dmic);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ mt8186_afe_gpio_request(afe->dev, true, MT8186_DAI_ADDA, 1);
+
+ /* update setting to dmic */
+ if (mtkaif_dmic) {
+ /* mtkaif_rxif_data_mode = 1, dmic */
+ regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIF_RX_CFG0,
+ 0x1, 0x1);
+
+ /* dmic mode, 3.25M*/
+ regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIF_RX_CFG0,
+ MTKAIF_RXIF_VOICE_MODE_MASK_SFT,
+ 0x0);
+ mtk_adda_ul_src_dmic(afe, MT8186_DAI_ADDA);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* should delayed 1/fs(smallest is 8k) = 125us before afe off */
+ usleep_range(125, 135);
+ mt8186_afe_gpio_request(afe->dev, false, MT8186_DAI_ADDA, 1);
+
+ /* reset dmic */
+ afe_priv->mtkaif_dmic = 0;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mtk_adda_pad_top_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (afe_priv->mtkaif_protocol == MTKAIF_PROTOCOL_2_CLK_P2)
+ regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x39);
+ else
+ regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x31);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mtk_adda_mtkaif_cfg_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int delay_data;
+ int delay_cycle;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (afe_priv->mtkaif_protocol == MTKAIF_PROTOCOL_2_CLK_P2) {
+ /* set protocol 2 */
+ regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0, 0x10000);
+ /* mtkaif_rxif_clkinv_adc inverse */
+ regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIF_CFG0,
+ MTKAIF_RXIF_CLKINV_ADC_MASK_SFT,
+ BIT(MTKAIF_RXIF_CLKINV_ADC_SFT));
+
+ if (strcmp(w->name, "ADDA_MTKAIF_CFG") == 0) {
+ if (afe_priv->mtkaif_chosen_phase[0] < 0 &&
+ afe_priv->mtkaif_chosen_phase[1] < 0) {
+ dev_err(afe->dev,
+ "%s(), calib fail mtkaif_chosen_phase[0/1]:%d/%d\n",
+ __func__,
+ afe_priv->mtkaif_chosen_phase[0],
+ afe_priv->mtkaif_chosen_phase[1]);
+ break;
+ }
+
+ if (afe_priv->mtkaif_chosen_phase[0] < 0 ||
+ afe_priv->mtkaif_chosen_phase[1] < 0) {
+ dev_err(afe->dev,
+ "%s(), skip delay setting mtkaif_chosen_phase[0/1]:%d/%d\n",
+ __func__,
+ afe_priv->mtkaif_chosen_phase[0],
+ afe_priv->mtkaif_chosen_phase[1]);
+ break;
+ }
+ }
+
+ /* set delay for ch12 */
+ if (afe_priv->mtkaif_phase_cycle[0] >=
+ afe_priv->mtkaif_phase_cycle[1]) {
+ delay_data = DELAY_DATA_MISO1;
+ delay_cycle = afe_priv->mtkaif_phase_cycle[0] -
+ afe_priv->mtkaif_phase_cycle[1];
+ } else {
+ delay_data = DELAY_DATA_MISO2;
+ delay_cycle = afe_priv->mtkaif_phase_cycle[1] -
+ afe_priv->mtkaif_phase_cycle[0];
+ }
+
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_MTKAIF_RX_CFG2,
+ MTKAIF_RXIF_DELAY_DATA_MASK_SFT,
+ delay_data <<
+ MTKAIF_RXIF_DELAY_DATA_SFT);
+
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_MTKAIF_RX_CFG2,
+ MTKAIF_RXIF_DELAY_CYCLE_MASK_SFT,
+ delay_cycle <<
+ MTKAIF_RXIF_DELAY_CYCLE_SFT);
+
+ } else if (afe_priv->mtkaif_protocol == MTKAIF_PROTOCOL_2) {
+ regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0, 0x10000);
+ } else {
+ regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0, 0);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mtk_adda_dl_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+
+ dev_dbg(afe->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ mt8186_afe_gpio_request(afe->dev, true, MT8186_DAI_ADDA, 0);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ /* should delayed 1/fs(smallest is 8k) = 125us before afe off */
+ usleep_range(125, 135);
+ mt8186_afe_gpio_request(afe->dev, false, MT8186_DAI_ADDA, 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mt8186_adda_dmic_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ ucontrol->value.integer.value[0] = afe_priv->mtkaif_dmic;
+
+ return 0;
+}
+
+static int mt8186_adda_dmic_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dmic_on;
+
+ dmic_on = ucontrol->value.integer.value[0];
+
+ dev_dbg(afe->dev, "%s(), kcontrol name %s, dmic_on %d\n",
+ __func__, kcontrol->id.name, dmic_on);
+
+ if (afe_priv->mtkaif_dmic == dmic_on)
+ return 0;
+
+ afe_priv->mtkaif_dmic = dmic_on;
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new mtk_adda_controls[] = {
+ SOC_SINGLE("ADDA_DL_GAIN", AFE_ADDA_DL_SRC2_CON1,
+ DL_2_GAIN_CTL_PRE_SFT, DL_2_GAIN_CTL_PRE_MASK, 0),
+ SOC_SINGLE_BOOL_EXT("MTKAIF_DMIC Switch", 0,
+ mt8186_adda_dmic_get, mt8186_adda_dmic_set),
+};
+
+/* ADDA UL MUX */
+enum {
+ ADDA_UL_MUX_MTKAIF = 0,
+ ADDA_UL_MUX_AP_DMIC,
+ ADDA_UL_MUX_MASK = 0x1,
+};
+
+static const char * const adda_ul_mux_map[] = {
+ "MTKAIF", "AP_DMIC"
+};
+
+static int adda_ul_map_value[] = {
+ ADDA_UL_MUX_MTKAIF,
+ ADDA_UL_MUX_AP_DMIC,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(adda_ul_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ ADDA_UL_MUX_MASK,
+ adda_ul_mux_map,
+ adda_ul_map_value);
+
+static const struct snd_kcontrol_new adda_ul_mux_control =
+ SOC_DAPM_ENUM("ADDA_UL_MUX Select", adda_ul_mux_map_enum);
+
+static const struct snd_soc_dapm_widget mtk_dai_adda_widgets[] = {
+ /* inter-connections */
+ SND_SOC_DAPM_MIXER("ADDA_DL_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_adda_dl_ch1_mix,
+ ARRAY_SIZE(mtk_adda_dl_ch1_mix)),
+ SND_SOC_DAPM_MIXER("ADDA_DL_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_adda_dl_ch2_mix,
+ ARRAY_SIZE(mtk_adda_dl_ch2_mix)),
+
+ SND_SOC_DAPM_SUPPLY_S("ADDA Enable", SUPPLY_SEQ_ADDA_AFE_ON,
+ AFE_ADDA_UL_DL_CON0, ADDA_AFE_ON_SFT, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY_S("ADDA Playback Enable", SUPPLY_SEQ_ADDA_DL_ON,
+ AFE_ADDA_DL_SRC2_CON0,
+ DL_2_SRC_ON_CTL_PRE_SFT, 0,
+ mtk_adda_dl_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY_S("ADDA Capture Enable", SUPPLY_SEQ_ADDA_UL_ON,
+ AFE_ADDA_UL_SRC_CON0,
+ UL_SRC_ON_CTL_SFT, 0,
+ mtk_adda_ul_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY_S("AUD_PAD_TOP", SUPPLY_SEQ_ADDA_AUD_PAD_TOP,
+ 0, 0, 0,
+ mtk_adda_pad_top_event,
+ SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_SUPPLY_S("ADDA_MTKAIF_CFG", SUPPLY_SEQ_ADDA_MTKAIF_CFG,
+ SND_SOC_NOPM, 0, 0,
+ mtk_adda_mtkaif_cfg_event,
+ SND_SOC_DAPM_PRE_PMU),
+
+ SND_SOC_DAPM_SUPPLY_S("AP_DMIC_EN", SUPPLY_SEQ_ADDA_AP_DMIC,
+ AFE_ADDA_UL_SRC_CON0,
+ UL_AP_DMIC_ON_SFT, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY_S("ADDA_FIFO", SUPPLY_SEQ_ADDA_FIFO,
+ AFE_ADDA_UL_DL_CON0,
+ AFE_ADDA_FIFO_AUTO_RST_SFT, 1,
+ NULL, 0),
+
+ SND_SOC_DAPM_MUX("ADDA_UL_Mux", SND_SOC_NOPM, 0, 0,
+ &adda_ul_mux_control),
+
+ SND_SOC_DAPM_INPUT("AP_DMIC_INPUT"),
+
+ /* clock */
+ SND_SOC_DAPM_CLOCK_SUPPLY("top_mux_audio_h"),
+
+ SND_SOC_DAPM_CLOCK_SUPPLY("aud_dac_clk"),
+ SND_SOC_DAPM_CLOCK_SUPPLY("aud_dac_hires_clk"),
+ SND_SOC_DAPM_CLOCK_SUPPLY("aud_dac_predis_clk"),
+
+ SND_SOC_DAPM_CLOCK_SUPPLY("aud_adc_clk"),
+ SND_SOC_DAPM_CLOCK_SUPPLY("aud_adc_hires_clk"),
+};
+
+#define HIRES_THRESHOLD 48000
+static int mtk_afe_dac_hires_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = source;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_adda_priv *adda_priv;
+
+ adda_priv = get_adda_priv_by_name(afe, w->name);
+
+ if (!adda_priv) {
+ dev_err(afe->dev, "%s(), adda_priv == NULL", __func__);
+ return 0;
+ }
+
+ return (adda_priv->dl_rate > HIRES_THRESHOLD) ? 1 : 0;
+}
+
+static int mtk_afe_adc_hires_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = source;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_adda_priv *adda_priv;
+
+ adda_priv = get_adda_priv_by_name(afe, w->name);
+
+ if (!adda_priv) {
+ dev_err(afe->dev, "%s(), adda_priv == NULL", __func__);
+ return 0;
+ }
+
+ return (adda_priv->ul_rate > HIRES_THRESHOLD) ? 1 : 0;
+}
+
+static const struct snd_soc_dapm_route mtk_dai_adda_routes[] = {
+ /* playback */
+ {"ADDA_DL_CH1", "DL1_CH1 Switch", "DL1"},
+ {"ADDA_DL_CH2", "DL1_CH1 Switch", "DL1"},
+ {"ADDA_DL_CH2", "DL1_CH2 Switch", "DL1"},
+
+ {"ADDA_DL_CH1", "DL12_CH1 Switch", "DL12"},
+ {"ADDA_DL_CH2", "DL12_CH2 Switch", "DL12"},
+
+ {"ADDA_DL_CH1", "DL6_CH1 Switch", "DL6"},
+ {"ADDA_DL_CH2", "DL6_CH2 Switch", "DL6"},
+
+ {"ADDA_DL_CH1", "DL8_CH1 Switch", "DL8"},
+ {"ADDA_DL_CH2", "DL8_CH2 Switch", "DL8"},
+
+ {"ADDA_DL_CH1", "DL2_CH1 Switch", "DL2"},
+ {"ADDA_DL_CH2", "DL2_CH1 Switch", "DL2"},
+ {"ADDA_DL_CH2", "DL2_CH2 Switch", "DL2"},
+
+ {"ADDA_DL_CH1", "DL3_CH1 Switch", "DL3"},
+ {"ADDA_DL_CH2", "DL3_CH1 Switch", "DL3"},
+ {"ADDA_DL_CH2", "DL3_CH2 Switch", "DL3"},
+
+ {"ADDA_DL_CH1", "DL4_CH1 Switch", "DL4"},
+ {"ADDA_DL_CH2", "DL4_CH2 Switch", "DL4"},
+
+ {"ADDA_DL_CH1", "DL5_CH1 Switch", "DL5"},
+ {"ADDA_DL_CH2", "DL5_CH2 Switch", "DL5"},
+
+ {"ADDA Playback", NULL, "ADDA_DL_CH1"},
+ {"ADDA Playback", NULL, "ADDA_DL_CH2"},
+
+ {"ADDA Playback", NULL, "ADDA Enable"},
+ {"ADDA Playback", NULL, "ADDA Playback Enable"},
+
+ /* capture */
+ {"ADDA_UL_Mux", "MTKAIF", "ADDA Capture"},
+ {"ADDA_UL_Mux", "AP_DMIC", "AP DMIC Capture"},
+
+ {"ADDA Capture", NULL, "ADDA Enable"},
+ {"ADDA Capture", NULL, "ADDA Capture Enable"},
+ {"ADDA Capture", NULL, "AUD_PAD_TOP"},
+ {"ADDA Capture", NULL, "ADDA_MTKAIF_CFG"},
+
+ {"AP DMIC Capture", NULL, "ADDA Enable"},
+ {"AP DMIC Capture", NULL, "ADDA Capture Enable"},
+ {"AP DMIC Capture", NULL, "ADDA_FIFO"},
+ {"AP DMIC Capture", NULL, "AP_DMIC_EN"},
+
+ {"AP DMIC Capture", NULL, "AP_DMIC_INPUT"},
+
+ /* clk */
+ {"ADDA Playback", NULL, "aud_dac_clk"},
+ {"ADDA Playback", NULL, "aud_dac_predis_clk"},
+ {"ADDA Playback", NULL, "aud_dac_hires_clk", mtk_afe_dac_hires_connect},
+
+ {"ADDA Capture Enable", NULL, "aud_adc_clk"},
+ {"ADDA Capture Enable", NULL, "aud_adc_hires_clk",
+ mtk_afe_adc_hires_connect},
+
+ /* hires source from apll1 */
+ {"top_mux_audio_h", NULL, APLL2_W_NAME},
+
+ {"aud_dac_hires_clk", NULL, "top_mux_audio_h"},
+ {"aud_adc_hires_clk", NULL, "top_mux_audio_h"},
+};
+
+/* dai ops */
+static int mtk_dai_adda_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ unsigned int rate = params_rate(params);
+ int id = dai->id;
+ struct mtk_afe_adda_priv *adda_priv = afe_priv->dai_priv[id];
+
+ dev_dbg(afe->dev, "%s(), id %d, stream %d, rate %d\n",
+ __func__, id, substream->stream, rate);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ unsigned int dl_src2_con0;
+ unsigned int dl_src2_con1;
+
+ adda_priv->dl_rate = rate;
+
+ /* set sampling rate */
+ dl_src2_con0 = adda_dl_rate_transform(afe, rate) <<
+ DL_2_INPUT_MODE_CTL_SFT;
+
+ /* set output mode, UP_SAMPLING_RATE_X8 */
+ dl_src2_con0 |= (0x3 << DL_2_OUTPUT_SEL_CTL_SFT);
+
+ /* turn off mute function */
+ dl_src2_con0 |= BIT(DL_2_MUTE_CH2_OFF_CTL_PRE_SFT);
+ dl_src2_con0 |= BIT(DL_2_MUTE_CH1_OFF_CTL_PRE_SFT);
+
+ /* set voice input data if input sample rate is 8k or 16k */
+ if (rate == 8000 || rate == 16000)
+ dl_src2_con0 |= BIT(DL_2_VOICE_MODE_CTL_PRE_SFT);
+
+ /* SA suggest apply -0.3db to audio/speech path */
+ dl_src2_con1 = MTK_AFE_ADDA_DL_GAIN_NORMAL <<
+ DL_2_GAIN_CTL_PRE_SFT;
+
+ /* turn on down-link gain */
+ dl_src2_con0 |= BIT(DL_2_GAIN_ON_CTL_PRE_SFT);
+
+ if (id == MT8186_DAI_ADDA) {
+ /* clean predistortion */
+ regmap_write(afe->regmap, AFE_ADDA_PREDIS_CON0, 0);
+ regmap_write(afe->regmap, AFE_ADDA_PREDIS_CON1, 0);
+
+ regmap_write(afe->regmap,
+ AFE_ADDA_DL_SRC2_CON0, dl_src2_con0);
+ regmap_write(afe->regmap,
+ AFE_ADDA_DL_SRC2_CON1, dl_src2_con1);
+
+ /* set sdm gain */
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_DL_SDM_DCCOMP_CON,
+ ATTGAIN_CTL_MASK_SFT,
+ AUDIO_SDM_LEVEL_NORMAL <<
+ ATTGAIN_CTL_SFT);
+
+ /* Use new 2nd sdm */
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_DL_SDM_DITHER_CON,
+ AFE_DL_SDM_DITHER_64TAP_EN_MASK_SFT,
+ BIT(AFE_DL_SDM_DITHER_64TAP_EN_SFT));
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_DL_SDM_AUTO_RESET_CON,
+ AFE_DL_USE_NEW_2ND_SDM_MASK_SFT,
+ BIT(AFE_DL_USE_NEW_2ND_SDM_SFT));
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_DL_SDM_DCCOMP_CON,
+ USE_3RD_SDM_MASK_SFT,
+ AUDIO_SDM_2ND << USE_3RD_SDM_SFT);
+
+ /* sdm auto reset */
+ regmap_write(afe->regmap,
+ AFE_ADDA_DL_SDM_AUTO_RESET_CON,
+ SDM_AUTO_RESET_THRESHOLD);
+ regmap_update_bits(afe->regmap,
+ AFE_ADDA_DL_SDM_AUTO_RESET_CON,
+ SDM_AUTO_RESET_TEST_ON_MASK_SFT,
+ BIT(SDM_AUTO_RESET_TEST_ON_SFT));
+ }
+ } else {
+ unsigned int ul_src_con0 = 0;
+ unsigned int voice_mode = adda_ul_rate_transform(afe, rate);
+
+ adda_priv->ul_rate = rate;
+ ul_src_con0 |= (voice_mode << 17) & (0x7 << 17);
+
+ /* enable iir */
+ ul_src_con0 |= (1 << UL_IIR_ON_TMP_CTL_SFT) &
+ UL_IIR_ON_TMP_CTL_MASK_SFT;
+ ul_src_con0 |= (UL_IIR_SW << UL_IIRMODE_CTL_SFT) &
+ UL_IIRMODE_CTL_MASK_SFT;
+ switch (id) {
+ case MT8186_DAI_ADDA:
+ case MT8186_DAI_AP_DMIC:
+ /* 35Hz @ 48k */
+ regmap_write(afe->regmap,
+ AFE_ADDA_IIR_COEF_02_01, 0);
+ regmap_write(afe->regmap,
+ AFE_ADDA_IIR_COEF_04_03, 0x3fb8);
+ regmap_write(afe->regmap,
+ AFE_ADDA_IIR_COEF_06_05, 0x3fb80000);
+ regmap_write(afe->regmap,
+ AFE_ADDA_IIR_COEF_08_07, 0x3fb80000);
+ regmap_write(afe->regmap,
+ AFE_ADDA_IIR_COEF_10_09, 0xc048);
+
+ regmap_write(afe->regmap,
+ AFE_ADDA_UL_SRC_CON0, ul_src_con0);
+
+ /* Using Internal ADC */
+ regmap_update_bits(afe->regmap, AFE_ADDA_TOP_CON0, BIT(0), 0);
+
+ /* mtkaif_rxif_data_mode = 0, amic */
+ regmap_update_bits(afe->regmap, AFE_ADDA_MTKAIF_RX_CFG0, BIT(0), 0);
+ break;
+ default:
+ break;
+ }
+
+ /* ap dmic */
+ switch (id) {
+ case MT8186_DAI_AP_DMIC:
+ mtk_adda_ul_src_dmic(afe, id);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_adda_ops = {
+ .hw_params = mtk_dai_adda_hw_params,
+};
+
+/* dai driver */
+#define MTK_ADDA_PLAYBACK_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_ADDA_CAPTURE_RATES (SNDRV_PCM_RATE_8000 |\
+ SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 |\
+ SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_ADDA_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_adda_driver[] = {
+ {
+ .name = "ADDA",
+ .id = MT8186_DAI_ADDA,
+ .playback = {
+ .stream_name = "ADDA Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_ADDA_PLAYBACK_RATES,
+ .formats = MTK_ADDA_FORMATS,
+ },
+ .capture = {
+ .stream_name = "ADDA Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_ADDA_CAPTURE_RATES,
+ .formats = MTK_ADDA_FORMATS,
+ },
+ .ops = &mtk_dai_adda_ops,
+ },
+ {
+ .name = "AP_DMIC",
+ .id = MT8186_DAI_AP_DMIC,
+ .capture = {
+ .stream_name = "AP DMIC Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_ADDA_CAPTURE_RATES,
+ .formats = MTK_ADDA_FORMATS,
+ },
+ .ops = &mtk_dai_adda_ops,
+ },
+};
+
+int mt8186_dai_adda_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_adda_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_adda_driver);
+
+ dai->controls = mtk_adda_controls;
+ dai->num_controls = ARRAY_SIZE(mtk_adda_controls);
+ dai->dapm_widgets = mtk_dai_adda_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_adda_widgets);
+ dai->dapm_routes = mtk_dai_adda_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_adda_routes);
+
+ /* set dai priv */
+ ret = mt8186_dai_set_priv(afe, MT8186_DAI_ADDA,
+ sizeof(struct mtk_afe_adda_priv), NULL);
+ if (ret)
+ return ret;
+
+ /* ap dmic priv share with adda */
+ afe_priv->dai_priv[MT8186_DAI_AP_DMIC] =
+ afe_priv->dai_priv[MT8186_DAI_ADDA];
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-hostless.c b/sound/soc/mediatek/mt8186/mt8186-dai-hostless.c
new file mode 100644
index 000000000000..bf0d83840cf4
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-dai-hostless.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI Hostless Control
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include "mt8186-afe-common.h"
+
+static const struct snd_pcm_hardware mt8186_hostless_hardware = {
+ .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .period_bytes_min = 256,
+ .period_bytes_max = 4 * 48 * 1024,
+ .periods_min = 2,
+ .periods_max = 256,
+ .buffer_bytes_max = 4 * 48 * 1024,
+ .fifo_size = 0,
+};
+
+/* dai component */
+static const struct snd_soc_dapm_route mtk_dai_hostless_routes[] = {
+ /* Hostless ADDA Loopback */
+ {"ADDA_DL_CH1", "ADDA_UL_CH1 Switch", "Hostless LPBK DL"},
+ {"ADDA_DL_CH1", "ADDA_UL_CH2 Switch", "Hostless LPBK DL"},
+ {"ADDA_DL_CH2", "ADDA_UL_CH1 Switch", "Hostless LPBK DL"},
+ {"ADDA_DL_CH2", "ADDA_UL_CH2 Switch", "Hostless LPBK DL"},
+ {"I2S1_CH1", "ADDA_UL_CH1 Switch", "Hostless LPBK DL"},
+ {"I2S1_CH2", "ADDA_UL_CH2 Switch", "Hostless LPBK DL"},
+ {"I2S3_CH1", "ADDA_UL_CH1 Switch", "Hostless LPBK DL"},
+ {"I2S3_CH1", "ADDA_UL_CH2 Switch", "Hostless LPBK DL"},
+ {"I2S3_CH2", "ADDA_UL_CH1 Switch", "Hostless LPBK DL"},
+ {"I2S3_CH2", "ADDA_UL_CH2 Switch", "Hostless LPBK DL"},
+ {"Hostless LPBK UL", NULL, "ADDA_UL_Mux"},
+
+ /* Hostelss FM */
+ /* connsys_i2s to hw gain 1*/
+ {"Hostless FM UL", NULL, "Connsys I2S"},
+
+ {"HW_GAIN1_IN_CH1", "CONNSYS_I2S_CH1 Switch", "Hostless FM DL"},
+ {"HW_GAIN1_IN_CH2", "CONNSYS_I2S_CH2 Switch", "Hostless FM DL"},
+ /* hw gain to adda dl */
+ {"Hostless FM UL", NULL, "HW Gain 1 Out"},
+
+ {"ADDA_DL_CH1", "GAIN1_OUT_CH1 Switch", "Hostless FM DL"},
+ {"ADDA_DL_CH2", "GAIN1_OUT_CH2 Switch", "Hostless FM DL"},
+ /* hw gain to i2s3 */
+ {"I2S3_CH1", "GAIN1_OUT_CH1 Switch", "Hostless FM DL"},
+ {"I2S3_CH2", "GAIN1_OUT_CH2 Switch", "Hostless FM DL"},
+ /* hw gain to i2s1 */
+ {"I2S1_CH1", "GAIN1_OUT_CH1 Switch", "Hostless FM DL"},
+ {"I2S1_CH2", "GAIN1_OUT_CH2 Switch", "Hostless FM DL"},
+
+ /* Hostless_SRC */
+ {"ADDA_DL_CH1", "SRC_1_OUT_CH1 Switch", "Hostless_SRC_1_DL"},
+ {"ADDA_DL_CH2", "SRC_1_OUT_CH2 Switch", "Hostless_SRC_1_DL"},
+ {"I2S1_CH1", "SRC_1_OUT_CH1 Switch", "Hostless_SRC_1_DL"},
+ {"I2S1_CH2", "SRC_1_OUT_CH2 Switch", "Hostless_SRC_1_DL"},
+ {"I2S3_CH1", "SRC_1_OUT_CH1 Switch", "Hostless_SRC_1_DL"},
+ {"I2S3_CH2", "SRC_1_OUT_CH2 Switch", "Hostless_SRC_1_DL"},
+ {"Hostless_SRC_1_UL", NULL, "HW_SRC_1_Out"},
+
+ /* Hostless_SRC_bargein */
+ {"HW_SRC_1_IN_CH1", "I2S0_CH1 Switch", "Hostless_SRC_Bargein_DL"},
+ {"HW_SRC_1_IN_CH2", "I2S0_CH2 Switch", "Hostless_SRC_Bargein_DL"},
+ {"Hostless_SRC_Bargein_UL", NULL, "I2S0"},
+
+ /* Hostless AAudio */
+ {"Hostless HW Gain AAudio In", NULL, "HW Gain 2 In"},
+ {"Hostless SRC AAudio UL", NULL, "HW Gain 2 Out"},
+ {"HW_SRC_2_IN_CH1", "HW_GAIN2_OUT_CH1 Switch", "Hostless SRC AAudio DL"},
+ {"HW_SRC_2_IN_CH2", "HW_GAIN2_OUT_CH2 Switch", "Hostless SRC AAudio DL"},
+};
+
+/* dai ops */
+static int mtk_dai_hostless_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ snd_soc_set_runtime_hwparams(substream, &mt8186_hostless_hardware);
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0) {
+ dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_hostless_ops = {
+ .startup = mtk_dai_hostless_startup,
+};
+
+/* dai driver */
+#define MTK_HOSTLESS_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_HOSTLESS_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_hostless_driver[] = {
+ {
+ .name = "Hostless LPBK DAI",
+ .id = MT8186_DAI_HOSTLESS_LPBK,
+ .playback = {
+ .stream_name = "Hostless LPBK DL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Hostless LPBK UL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ {
+ .name = "Hostless FM DAI",
+ .id = MT8186_DAI_HOSTLESS_FM,
+ .playback = {
+ .stream_name = "Hostless FM DL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Hostless FM UL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ {
+ .name = "Hostless_SRC_1_DAI",
+ .id = MT8186_DAI_HOSTLESS_SRC_1,
+ .playback = {
+ .stream_name = "Hostless_SRC_1_DL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Hostless_SRC_1_UL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ {
+ .name = "Hostless_SRC_Bargein_DAI",
+ .id = MT8186_DAI_HOSTLESS_SRC_BARGEIN,
+ .playback = {
+ .stream_name = "Hostless_SRC_Bargein_DL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Hostless_SRC_Bargein_UL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ /* BE dai */
+ {
+ .name = "Hostless_UL1 DAI",
+ .id = MT8186_DAI_HOSTLESS_UL1,
+ .capture = {
+ .stream_name = "Hostless_UL1 UL",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ {
+ .name = "Hostless_UL2 DAI",
+ .id = MT8186_DAI_HOSTLESS_UL2,
+ .capture = {
+ .stream_name = "Hostless_UL2 UL",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ {
+ .name = "Hostless_UL3 DAI",
+ .id = MT8186_DAI_HOSTLESS_UL3,
+ .capture = {
+ .stream_name = "Hostless_UL3 UL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ {
+ .name = "Hostless_UL5 DAI",
+ .id = MT8186_DAI_HOSTLESS_UL5,
+ .capture = {
+ .stream_name = "Hostless_UL5 UL",
+ .channels_min = 1,
+ .channels_max = 12,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ {
+ .name = "Hostless_UL6 DAI",
+ .id = MT8186_DAI_HOSTLESS_UL6,
+ .capture = {
+ .stream_name = "Hostless_UL6 UL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ {
+ .name = "Hostless HW Gain AAudio DAI",
+ .id = MT8186_DAI_HOSTLESS_HW_GAIN_AAUDIO,
+ .capture = {
+ .stream_name = "Hostless HW Gain AAudio In",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+ {
+ .name = "Hostless SRC AAudio DAI",
+ .id = MT8186_DAI_HOSTLESS_SRC_AAUDIO,
+ .playback = {
+ .stream_name = "Hostless SRC AAudio DL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Hostless SRC AAudio UL",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HOSTLESS_RATES,
+ .formats = MTK_HOSTLESS_FORMATS,
+ },
+ .ops = &mtk_dai_hostless_ops,
+ },
+};
+
+int mt8186_dai_hostless_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_hostless_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_hostless_driver);
+
+ dai->dapm_routes = mtk_dai_hostless_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_hostless_routes);
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-hw-gain.c b/sound/soc/mediatek/mt8186/mt8186-dai-hw-gain.c
new file mode 100644
index 000000000000..33edd6cbde12
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-dai-hw-gain.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI HW Gain Control
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/regmap.h>
+#include "mt8186-afe-common.h"
+#include "mt8186-interconnection.h"
+
+#define HW_GAIN_1_EN_W_NAME "HW GAIN 1 Enable"
+#define HW_GAIN_2_EN_W_NAME "HW GAIN 2 Enable"
+
+/* dai component */
+static const struct snd_kcontrol_new mtk_hw_gain1_in_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("CONNSYS_I2S_CH1 Switch", AFE_CONN13_1,
+ I_CONNSYS_I2S_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_hw_gain1_in_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("CONNSYS_I2S_CH2 Switch", AFE_CONN14_1,
+ I_CONNSYS_I2S_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_hw_gain2_in_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN15,
+ I_ADDA_UL_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_hw_gain2_in_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN16,
+ I_ADDA_UL_CH2, 1, 0),
+};
+
+static int mtk_hw_gain_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int gain_cur;
+ unsigned int gain_con1;
+
+ dev_dbg(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (strcmp(w->name, HW_GAIN_1_EN_W_NAME) == 0) {
+ gain_cur = AFE_GAIN1_CUR;
+ gain_con1 = AFE_GAIN1_CON1;
+ } else {
+ gain_cur = AFE_GAIN2_CUR;
+ gain_con1 = AFE_GAIN2_CON1;
+ }
+
+ /* let hw gain ramp up, set cur gain to 0 */
+ regmap_update_bits(afe->regmap, gain_cur, AFE_GAIN1_CUR_MASK_SFT, 0);
+
+ /* set target gain to 0 */
+ regmap_update_bits(afe->regmap, gain_con1, GAIN1_TARGET_MASK_SFT, 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget mtk_dai_hw_gain_widgets[] = {
+ /* inter-connections */
+ SND_SOC_DAPM_MIXER("HW_GAIN1_IN_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_hw_gain1_in_ch1_mix,
+ ARRAY_SIZE(mtk_hw_gain1_in_ch1_mix)),
+ SND_SOC_DAPM_MIXER("HW_GAIN1_IN_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_hw_gain1_in_ch2_mix,
+ ARRAY_SIZE(mtk_hw_gain1_in_ch2_mix)),
+ SND_SOC_DAPM_MIXER("HW_GAIN2_IN_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_hw_gain2_in_ch1_mix,
+ ARRAY_SIZE(mtk_hw_gain2_in_ch1_mix)),
+ SND_SOC_DAPM_MIXER("HW_GAIN2_IN_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_hw_gain2_in_ch2_mix,
+ ARRAY_SIZE(mtk_hw_gain2_in_ch2_mix)),
+
+ SND_SOC_DAPM_SUPPLY(HW_GAIN_1_EN_W_NAME,
+ AFE_GAIN1_CON0, GAIN1_ON_SFT, 0,
+ mtk_hw_gain_event,
+ SND_SOC_DAPM_PRE_PMU),
+
+ SND_SOC_DAPM_SUPPLY(HW_GAIN_2_EN_W_NAME,
+ AFE_GAIN2_CON0, GAIN2_ON_SFT, 0,
+ mtk_hw_gain_event,
+ SND_SOC_DAPM_PRE_PMU),
+
+ SND_SOC_DAPM_INPUT("HW Gain 1 Out Endpoint"),
+ SND_SOC_DAPM_INPUT("HW Gain 2 Out Endpoint"),
+ SND_SOC_DAPM_OUTPUT("HW Gain 1 In Endpoint"),
+};
+
+static const struct snd_soc_dapm_route mtk_dai_hw_gain_routes[] = {
+ {"HW Gain 1 In", NULL, "HW_GAIN1_IN_CH1"},
+ {"HW Gain 1 In", NULL, "HW_GAIN1_IN_CH2"},
+ {"HW Gain 2 In", NULL, "HW_GAIN2_IN_CH1"},
+ {"HW Gain 2 In", NULL, "HW_GAIN2_IN_CH2"},
+
+ {"HW Gain 1 In", NULL, HW_GAIN_1_EN_W_NAME},
+ {"HW Gain 1 Out", NULL, HW_GAIN_1_EN_W_NAME},
+ {"HW Gain 2 In", NULL, HW_GAIN_2_EN_W_NAME},
+ {"HW Gain 2 Out", NULL, HW_GAIN_2_EN_W_NAME},
+
+ {"HW Gain 1 In Endpoint", NULL, "HW Gain 1 In"},
+ {"HW Gain 1 Out", NULL, "HW Gain 1 Out Endpoint"},
+ {"HW Gain 2 Out", NULL, "HW Gain 2 Out Endpoint"},
+};
+
+static const struct snd_kcontrol_new mtk_hw_gain_controls[] = {
+ SOC_SINGLE("HW Gain 1 Volume", AFE_GAIN1_CON1,
+ GAIN1_TARGET_SFT, GAIN1_TARGET_MASK, 0),
+ SOC_SINGLE("HW Gain 2 Volume", AFE_GAIN2_CON1,
+ GAIN2_TARGET_SFT, GAIN2_TARGET_MASK, 0),
+};
+
+/* dai ops */
+static int mtk_dai_gain_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ unsigned int rate = params_rate(params);
+ unsigned int rate_reg = mt8186_rate_transform(afe->dev, rate, dai->id);
+
+ dev_dbg(afe->dev, "%s(), id %d, stream %d, rate %d\n",
+ __func__, dai->id, substream->stream, rate);
+
+ /* rate */
+ regmap_update_bits(afe->regmap,
+ dai->id == MT8186_DAI_HW_GAIN_1 ?
+ AFE_GAIN1_CON0 : AFE_GAIN2_CON0,
+ GAIN1_MODE_MASK_SFT,
+ rate_reg << GAIN1_MODE_SFT);
+
+ /* sample per step */
+ regmap_update_bits(afe->regmap,
+ dai->id == MT8186_DAI_HW_GAIN_1 ?
+ AFE_GAIN1_CON0 : AFE_GAIN2_CON0,
+ GAIN1_SAMPLE_PER_STEP_MASK_SFT,
+ (dai->id == MT8186_DAI_HW_GAIN_1 ? 0x40 : 0x0) <<
+ GAIN1_SAMPLE_PER_STEP_SFT);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_gain_ops = {
+ .hw_params = mtk_dai_gain_hw_params,
+};
+
+/* dai driver */
+#define MTK_HW_GAIN_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_HW_GAIN_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_gain_driver[] = {
+ {
+ .name = "HW Gain 1",
+ .id = MT8186_DAI_HW_GAIN_1,
+ .playback = {
+ .stream_name = "HW Gain 1 In",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HW_GAIN_RATES,
+ .formats = MTK_HW_GAIN_FORMATS,
+ },
+ .capture = {
+ .stream_name = "HW Gain 1 Out",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HW_GAIN_RATES,
+ .formats = MTK_HW_GAIN_FORMATS,
+ },
+ .ops = &mtk_dai_gain_ops,
+ .symmetric_rate = 1,
+ .symmetric_channels = 1,
+ .symmetric_sample_bits = 1,
+ },
+ {
+ .name = "HW Gain 2",
+ .id = MT8186_DAI_HW_GAIN_2,
+ .playback = {
+ .stream_name = "HW Gain 2 In",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HW_GAIN_RATES,
+ .formats = MTK_HW_GAIN_FORMATS,
+ },
+ .capture = {
+ .stream_name = "HW Gain 2 Out",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_HW_GAIN_RATES,
+ .formats = MTK_HW_GAIN_FORMATS,
+ },
+ .ops = &mtk_dai_gain_ops,
+ .symmetric_rate = 1,
+ .symmetric_channels = 1,
+ .symmetric_sample_bits = 1,
+ },
+};
+
+int mt8186_dai_hw_gain_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_gain_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_gain_driver);
+
+ dai->controls = mtk_hw_gain_controls;
+ dai->num_controls = ARRAY_SIZE(mtk_hw_gain_controls);
+ dai->dapm_widgets = mtk_dai_hw_gain_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_hw_gain_widgets);
+ dai->dapm_routes = mtk_dai_hw_gain_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_hw_gain_routes);
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-i2s.c b/sound/soc/mediatek/mt8186/mt8186-dai-i2s.c
new file mode 100644
index 000000000000..ec79e2f2a54d
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-dai-i2s.c
@@ -0,0 +1,1223 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI I2S Control
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include "mt8186-afe-clk.h"
+#include "mt8186-afe-common.h"
+#include "mt8186-afe-gpio.h"
+#include "mt8186-interconnection.h"
+
+enum {
+ I2S_FMT_EIAJ = 0,
+ I2S_FMT_I2S = 1,
+};
+
+enum {
+ I2S_WLEN_16_BIT = 0,
+ I2S_WLEN_32_BIT = 1,
+};
+
+enum {
+ I2S_HD_NORMAL = 0,
+ I2S_HD_LOW_JITTER = 1,
+};
+
+enum {
+ I2S1_SEL_O28_O29 = 0,
+ I2S1_SEL_O03_O04 = 1,
+};
+
+enum {
+ I2S_IN_PAD_CONNSYS = 0,
+ I2S_IN_PAD_IO_MUX = 1,
+};
+
+struct mtk_afe_i2s_priv {
+ int id;
+ int rate; /* for determine which apll to use */
+ int low_jitter_en;
+ int master; /* only i2s0 has slave mode*/
+
+ const char *share_property_name;
+ int share_i2s_id;
+
+ int mclk_id;
+ int mclk_rate;
+ int mclk_apll;
+};
+
+static unsigned int get_i2s_wlen(snd_pcm_format_t format)
+{
+ return snd_pcm_format_physical_width(format) <= 16 ?
+ I2S_WLEN_16_BIT : I2S_WLEN_32_BIT;
+}
+
+#define MTK_AFE_I2S0_KCONTROL_NAME "I2S0_HD_Mux"
+#define MTK_AFE_I2S1_KCONTROL_NAME "I2S1_HD_Mux"
+#define MTK_AFE_I2S2_KCONTROL_NAME "I2S2_HD_Mux"
+#define MTK_AFE_I2S3_KCONTROL_NAME "I2S3_HD_Mux"
+#define MTK_AFE_I2S0_SRC_KCONTROL_NAME "I2S0_SRC_Mux"
+
+#define I2S0_HD_EN_W_NAME "I2S0_HD_EN"
+#define I2S1_HD_EN_W_NAME "I2S1_HD_EN"
+#define I2S2_HD_EN_W_NAME "I2S2_HD_EN"
+#define I2S3_HD_EN_W_NAME "I2S3_HD_EN"
+
+#define I2S0_MCLK_EN_W_NAME "I2S0_MCLK_EN"
+#define I2S1_MCLK_EN_W_NAME "I2S1_MCLK_EN"
+#define I2S2_MCLK_EN_W_NAME "I2S2_MCLK_EN"
+#define I2S3_MCLK_EN_W_NAME "I2S3_MCLK_EN"
+
+static int get_i2s_id_by_name(struct mtk_base_afe *afe,
+ const char *name)
+{
+ if (strncmp(name, "I2S0", 4) == 0)
+ return MT8186_DAI_I2S_0;
+ else if (strncmp(name, "I2S1", 4) == 0)
+ return MT8186_DAI_I2S_1;
+ else if (strncmp(name, "I2S2", 4) == 0)
+ return MT8186_DAI_I2S_2;
+ else if (strncmp(name, "I2S3", 4) == 0)
+ return MT8186_DAI_I2S_3;
+
+ return -EINVAL;
+}
+
+static struct mtk_afe_i2s_priv *get_i2s_priv_by_name(struct mtk_base_afe *afe,
+ const char *name)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dai_id = get_i2s_id_by_name(afe, name);
+
+ if (dai_id < 0)
+ return NULL;
+
+ return afe_priv->dai_priv[dai_id];
+}
+
+/* low jitter control */
+static const char * const mt8186_i2s_hd_str[] = {
+ "Normal", "Low_Jitter"
+};
+
+static const struct soc_enum mt8186_i2s_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8186_i2s_hd_str),
+ mt8186_i2s_hd_str),
+};
+
+static int mt8186_i2s_hd_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ i2s_priv = get_i2s_priv_by_name(afe, kcontrol->id.name);
+ ucontrol->value.integer.value[0] = i2s_priv->low_jitter_en;
+
+ return 0;
+}
+
+static int mt8186_i2s_hd_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int hd_en;
+
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
+ hd_en = ucontrol->value.integer.value[0];
+
+ dev_dbg(afe->dev, "%s(), kcontrol name %s, hd_en %d\n",
+ __func__, kcontrol->id.name, hd_en);
+
+ i2s_priv = get_i2s_priv_by_name(afe, kcontrol->id.name);
+ if (i2s_priv->low_jitter_en == hd_en)
+ return 0;
+
+ i2s_priv->low_jitter_en = hd_en;
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new mtk_dai_i2s_controls[] = {
+ SOC_ENUM_EXT(MTK_AFE_I2S0_KCONTROL_NAME, mt8186_i2s_enum[0],
+ mt8186_i2s_hd_get, mt8186_i2s_hd_set),
+ SOC_ENUM_EXT(MTK_AFE_I2S1_KCONTROL_NAME, mt8186_i2s_enum[0],
+ mt8186_i2s_hd_get, mt8186_i2s_hd_set),
+ SOC_ENUM_EXT(MTK_AFE_I2S2_KCONTROL_NAME, mt8186_i2s_enum[0],
+ mt8186_i2s_hd_get, mt8186_i2s_hd_set),
+ SOC_ENUM_EXT(MTK_AFE_I2S3_KCONTROL_NAME, mt8186_i2s_enum[0],
+ mt8186_i2s_hd_get, mt8186_i2s_hd_set),
+};
+
+/* dai component */
+/* i2s virtual mux to output widget */
+static const char * const i2s_mux_map[] = {
+ "Normal", "Dummy_Widget",
+};
+
+static int i2s_mux_map_value[] = {
+ 0, 1,
+};
+
+static SOC_VALUE_ENUM_SINGLE_AUTODISABLE_DECL(i2s_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ 1,
+ i2s_mux_map,
+ i2s_mux_map_value);
+
+static const struct snd_kcontrol_new i2s0_in_mux_control =
+ SOC_DAPM_ENUM("I2S0 In Select", i2s_mux_map_enum);
+
+static const struct snd_kcontrol_new i2s1_out_mux_control =
+ SOC_DAPM_ENUM("I2S1 Out Select", i2s_mux_map_enum);
+
+static const struct snd_kcontrol_new i2s2_in_mux_control =
+ SOC_DAPM_ENUM("I2S2 In Select", i2s_mux_map_enum);
+
+static const struct snd_kcontrol_new i2s3_out_mux_control =
+ SOC_DAPM_ENUM("I2S3 Out Select", i2s_mux_map_enum);
+
+/* i2s in lpbk */
+static const char * const i2s_lpbk_mux_map[] = {
+ "Normal", "Lpbk",
+};
+
+static int i2s_lpbk_mux_map_value[] = {
+ 0, 1,
+};
+
+static SOC_VALUE_ENUM_SINGLE_AUTODISABLE_DECL(i2s0_lpbk_mux_map_enum,
+ AFE_I2S_CON,
+ I2S_LOOPBACK_SFT,
+ 1,
+ i2s_lpbk_mux_map,
+ i2s_lpbk_mux_map_value);
+
+static const struct snd_kcontrol_new i2s0_lpbk_mux_control =
+ SOC_DAPM_ENUM("I2S Lpbk Select", i2s0_lpbk_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_AUTODISABLE_DECL(i2s2_lpbk_mux_map_enum,
+ AFE_I2S_CON2,
+ I2S3_LOOPBACK_SFT,
+ 1,
+ i2s_lpbk_mux_map,
+ i2s_lpbk_mux_map_value);
+
+static const struct snd_kcontrol_new i2s2_lpbk_mux_control =
+ SOC_DAPM_ENUM("I2S Lpbk Select", i2s2_lpbk_mux_map_enum);
+
+/* interconnection */
+static const struct snd_kcontrol_new mtk_i2s3_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN0,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN0,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN0,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1 Switch", AFE_CONN0,
+ I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH3 Switch", AFE_CONN0,
+ I_DL12_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1 Switch", AFE_CONN0_1,
+ I_DL6_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN0_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN0_1,
+ I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH1 Switch", AFE_CONN0_1,
+ I_DL8_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("GAIN1_OUT_CH1 Switch", AFE_CONN0,
+ I_GAIN1_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN0,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN0,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3 Switch", AFE_CONN0,
+ I_ADDA_UL_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1 Switch", AFE_CONN0,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("SRC_1_OUT_CH1 Switch", AFE_CONN0_1,
+ I_SRC_1_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s3_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN1,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN1,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN1,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN1,
+ I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH4 Switch", AFE_CONN1,
+ I_DL12_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2 Switch", AFE_CONN1_1,
+ I_DL6_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN1_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN1_1,
+ I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH2 Switch", AFE_CONN1_1,
+ I_DL8_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("GAIN1_OUT_CH2 Switch", AFE_CONN1,
+ I_GAIN1_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN1,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN1,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH3 Switch", AFE_CONN1,
+ I_ADDA_UL_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2 Switch", AFE_CONN1,
+ I_PCM_1_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2 Switch", AFE_CONN1,
+ I_PCM_2_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("SRC_1_OUT_CH2 Switch", AFE_CONN1_1,
+ I_SRC_1_OUT_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s1_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN28,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN28,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN28,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH1 Switch", AFE_CONN28,
+ I_DL12_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH3 Switch", AFE_CONN28,
+ I_DL12_CH3, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1 Switch", AFE_CONN28_1,
+ I_DL6_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN28_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN28_1,
+ I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH1 Switch", AFE_CONN28_1,
+ I_DL8_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("GAIN1_OUT_CH1 Switch", AFE_CONN28,
+ I_GAIN1_OUT_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN28,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH1 Switch", AFE_CONN28,
+ I_PCM_1_CAP_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("SRC_1_OUT_CH1 Switch", AFE_CONN28_1,
+ I_SRC_1_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_i2s1_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN29,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN29,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN29,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH2 Switch", AFE_CONN29,
+ I_DL12_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL12_CH4 Switch", AFE_CONN29,
+ I_DL12_CH4, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2 Switch", AFE_CONN29_1,
+ I_DL6_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN29_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN29_1,
+ I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL8_CH2 Switch", AFE_CONN29_1,
+ I_DL8_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("GAIN1_OUT_CH2 Switch", AFE_CONN29,
+ I_GAIN1_OUT_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN29,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_1_CAP_CH2 Switch", AFE_CONN29,
+ I_PCM_1_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("PCM_2_CAP_CH2 Switch", AFE_CONN29,
+ I_PCM_2_CAP_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("SRC_1_OUT_CH2 Switch", AFE_CONN29_1,
+ I_SRC_1_OUT_CH2, 1, 0),
+};
+
+enum {
+ SUPPLY_SEQ_APLL,
+ SUPPLY_SEQ_I2S_MCLK_EN,
+ SUPPLY_SEQ_I2S_HD_EN,
+ SUPPLY_SEQ_I2S_EN,
+};
+
+static int mtk_i2s_en_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ i2s_priv = get_i2s_priv_by_name(afe, w->name);
+
+ dev_dbg(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ mt8186_afe_gpio_request(afe->dev, true, i2s_priv->id, 0);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ mt8186_afe_gpio_request(afe->dev, false, i2s_priv->id, 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mtk_apll_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+
+ dev_dbg(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (strcmp(w->name, APLL1_W_NAME) == 0)
+ mt8186_apll1_enable(afe);
+ else
+ mt8186_apll2_enable(afe);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (strcmp(w->name, APLL1_W_NAME) == 0)
+ mt8186_apll1_disable(afe);
+ else
+ mt8186_apll2_disable(afe);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mtk_mclk_en_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ dev_dbg(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ i2s_priv = get_i2s_priv_by_name(afe, w->name);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ mt8186_mck_enable(afe, i2s_priv->mclk_id, i2s_priv->mclk_rate);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ i2s_priv->mclk_rate = 0;
+ mt8186_mck_disable(afe, i2s_priv->mclk_id);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget mtk_dai_i2s_widgets[] = {
+ SND_SOC_DAPM_INPUT("CONNSYS"),
+
+ SND_SOC_DAPM_MIXER("I2S1_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_i2s1_ch1_mix,
+ ARRAY_SIZE(mtk_i2s1_ch1_mix)),
+ SND_SOC_DAPM_MIXER("I2S1_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_i2s1_ch2_mix,
+ ARRAY_SIZE(mtk_i2s1_ch2_mix)),
+
+ SND_SOC_DAPM_MIXER("I2S3_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_i2s3_ch1_mix,
+ ARRAY_SIZE(mtk_i2s3_ch1_mix)),
+ SND_SOC_DAPM_MIXER("I2S3_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_i2s3_ch2_mix,
+ ARRAY_SIZE(mtk_i2s3_ch2_mix)),
+
+ /* i2s en*/
+ SND_SOC_DAPM_SUPPLY_S("I2S0_EN", SUPPLY_SEQ_I2S_EN,
+ AFE_I2S_CON, I2S_EN_SFT, 0,
+ mtk_i2s_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S("I2S1_EN", SUPPLY_SEQ_I2S_EN,
+ AFE_I2S_CON1, I2S_EN_SFT, 0,
+ mtk_i2s_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S("I2S2_EN", SUPPLY_SEQ_I2S_EN,
+ AFE_I2S_CON2, I2S_EN_SFT, 0,
+ mtk_i2s_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S("I2S3_EN", SUPPLY_SEQ_I2S_EN,
+ AFE_I2S_CON3, I2S_EN_SFT, 0,
+ mtk_i2s_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ /* i2s hd en */
+ SND_SOC_DAPM_SUPPLY_S(I2S0_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+ AFE_I2S_CON, I2S1_HD_EN_SFT, 0, NULL,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(I2S1_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+ AFE_I2S_CON1, I2S2_HD_EN_SFT, 0, NULL,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(I2S2_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+ AFE_I2S_CON2, I2S3_HD_EN_SFT, 0, NULL,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(I2S3_HD_EN_W_NAME, SUPPLY_SEQ_I2S_HD_EN,
+ AFE_I2S_CON3, I2S4_HD_EN_SFT, 0, NULL,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* i2s mclk en */
+ SND_SOC_DAPM_SUPPLY_S(I2S0_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_mclk_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(I2S1_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_mclk_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(I2S2_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_mclk_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(I2S3_MCLK_EN_W_NAME, SUPPLY_SEQ_I2S_MCLK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_mclk_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* apll */
+ SND_SOC_DAPM_SUPPLY_S(APLL1_W_NAME, SUPPLY_SEQ_APLL,
+ SND_SOC_NOPM, 0, 0,
+ mtk_apll_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY_S(APLL2_W_NAME, SUPPLY_SEQ_APLL,
+ SND_SOC_NOPM, 0, 0,
+ mtk_apll_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* allow i2s on without codec on */
+ SND_SOC_DAPM_OUTPUT("I2S_DUMMY_OUT"),
+ SND_SOC_DAPM_MUX("I2S1_Out_Mux",
+ SND_SOC_NOPM, 0, 0, &i2s1_out_mux_control),
+ SND_SOC_DAPM_MUX("I2S3_Out_Mux",
+ SND_SOC_NOPM, 0, 0, &i2s3_out_mux_control),
+ SND_SOC_DAPM_INPUT("I2S_DUMMY_IN"),
+ SND_SOC_DAPM_MUX("I2S0_In_Mux",
+ SND_SOC_NOPM, 0, 0, &i2s0_in_mux_control),
+ SND_SOC_DAPM_MUX("I2S2_In_Mux",
+ SND_SOC_NOPM, 0, 0, &i2s2_in_mux_control),
+
+ /* i2s in lpbk */
+ SND_SOC_DAPM_MUX("I2S0_Lpbk_Mux",
+ SND_SOC_NOPM, 0, 0, &i2s0_lpbk_mux_control),
+ SND_SOC_DAPM_MUX("I2S2_Lpbk_Mux",
+ SND_SOC_NOPM, 0, 0, &i2s2_lpbk_mux_control),
+};
+
+static int mtk_afe_i2s_share_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ i2s_priv = get_i2s_priv_by_name(afe, sink->name);
+ if (i2s_priv->share_i2s_id < 0)
+ return 0;
+
+ return i2s_priv->share_i2s_id == get_i2s_id_by_name(afe, source->name);
+}
+
+static int mtk_afe_i2s_hd_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ i2s_priv = get_i2s_priv_by_name(afe, sink->name);
+ if (get_i2s_id_by_name(afe, sink->name) ==
+ get_i2s_id_by_name(afe, source->name))
+ return i2s_priv->low_jitter_en;
+
+ /* check if share i2s need hd en */
+ if (i2s_priv->share_i2s_id < 0)
+ return 0;
+
+ if (i2s_priv->share_i2s_id == get_i2s_id_by_name(afe, source->name))
+ return i2s_priv->low_jitter_en;
+
+ return 0;
+}
+
+static int mtk_afe_i2s_apll_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+ int cur_apll;
+ int i2s_need_apll;
+
+ i2s_priv = get_i2s_priv_by_name(afe, w->name);
+ /* which apll */
+ cur_apll = mt8186_get_apll_by_name(afe, source->name);
+ /* choose APLL from i2s rate */
+ i2s_need_apll = mt8186_get_apll_by_rate(afe, i2s_priv->rate);
+
+ return (i2s_need_apll == cur_apll) ? 1 : 0;
+}
+
+static int mtk_afe_i2s_mclk_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+
+ i2s_priv = get_i2s_priv_by_name(afe, sink->name);
+ if (get_i2s_id_by_name(afe, sink->name) ==
+ get_i2s_id_by_name(afe, source->name))
+ return (i2s_priv->mclk_rate > 0) ? 1 : 0;
+
+ /* check if share i2s need mclk */
+ if (i2s_priv->share_i2s_id < 0)
+ return 0;
+
+ if (i2s_priv->share_i2s_id == get_i2s_id_by_name(afe, source->name))
+ return (i2s_priv->mclk_rate > 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int mtk_afe_mclk_apll_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mtk_afe_i2s_priv *i2s_priv;
+ int cur_apll;
+
+ i2s_priv = get_i2s_priv_by_name(afe, w->name);
+ /* which apll */
+ cur_apll = mt8186_get_apll_by_name(afe, source->name);
+
+ return (i2s_priv->mclk_apll == cur_apll) ? 1 : 0;
+}
+
+static const struct snd_soc_dapm_route mtk_dai_i2s_routes[] = {
+ {"Connsys I2S", NULL, "CONNSYS"},
+
+ /* i2s0 */
+ {"I2S0", NULL, "I2S0_EN"},
+ {"I2S0", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+ {"I2S0", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+ {"I2S0", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+
+ {"I2S0", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S0", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S0", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S0", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {I2S0_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+ {I2S0_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+ {"I2S0", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S0", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S0", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S0", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {I2S0_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+ {I2S0_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+ /* i2s1 */
+ {"I2S1_CH1", "DL1_CH1 Switch", "DL1"},
+ {"I2S1_CH2", "DL1_CH2 Switch", "DL1"},
+
+ {"I2S1_CH1", "DL2_CH1 Switch", "DL2"},
+ {"I2S1_CH2", "DL2_CH2 Switch", "DL2"},
+
+ {"I2S1_CH1", "DL3_CH1 Switch", "DL3"},
+ {"I2S1_CH2", "DL3_CH2 Switch", "DL3"},
+
+ {"I2S1_CH1", "DL12_CH1 Switch", "DL12"},
+ {"I2S1_CH2", "DL12_CH2 Switch", "DL12"},
+
+ {"I2S1_CH1", "DL12_CH3 Switch", "DL12"},
+ {"I2S1_CH2", "DL12_CH4 Switch", "DL12"},
+
+ {"I2S1_CH1", "DL6_CH1 Switch", "DL6"},
+ {"I2S1_CH2", "DL6_CH2 Switch", "DL6"},
+
+ {"I2S1_CH1", "DL4_CH1 Switch", "DL4"},
+ {"I2S1_CH2", "DL4_CH2 Switch", "DL4"},
+
+ {"I2S1_CH1", "DL5_CH1 Switch", "DL5"},
+ {"I2S1_CH2", "DL5_CH2 Switch", "DL5"},
+
+ {"I2S1_CH1", "DL8_CH1 Switch", "DL8"},
+ {"I2S1_CH2", "DL8_CH2 Switch", "DL8"},
+
+ {"I2S1", NULL, "I2S1_CH1"},
+ {"I2S1", NULL, "I2S1_CH2"},
+
+ {"I2S1", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+ {"I2S1", NULL, "I2S1_EN"},
+ {"I2S1", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+ {"I2S1", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+
+ {"I2S1", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S1", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S1", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S1", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {I2S1_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+ {I2S1_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+ {"I2S1", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S1", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S1", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S1", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {I2S1_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+ {I2S1_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+ /* i2s2 */
+ {"I2S2", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+ {"I2S2", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+ {"I2S2", NULL, "I2S2_EN"},
+ {"I2S2", NULL, "I2S3_EN", mtk_afe_i2s_share_connect},
+
+ {"I2S2", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S2", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S2", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S2", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {I2S2_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+ {I2S2_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+ {"I2S2", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S2", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S2", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S2", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {I2S2_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+ {I2S2_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+ /* i2s3 */
+ {"I2S3_CH1", "DL1_CH1 Switch", "DL1"},
+ {"I2S3_CH2", "DL1_CH2 Switch", "DL1"},
+
+ {"I2S3_CH1", "DL2_CH1 Switch", "DL2"},
+ {"I2S3_CH2", "DL2_CH2 Switch", "DL2"},
+
+ {"I2S3_CH1", "DL3_CH1 Switch", "DL3"},
+ {"I2S3_CH2", "DL3_CH2 Switch", "DL3"},
+
+ {"I2S3_CH1", "DL12_CH1 Switch", "DL12"},
+ {"I2S3_CH2", "DL12_CH2 Switch", "DL12"},
+
+ {"I2S3_CH1", "DL12_CH3 Switch", "DL12"},
+ {"I2S3_CH2", "DL12_CH4 Switch", "DL12"},
+
+ {"I2S3_CH1", "DL6_CH1 Switch", "DL6"},
+ {"I2S3_CH2", "DL6_CH2 Switch", "DL6"},
+
+ {"I2S3_CH1", "DL4_CH1 Switch", "DL4"},
+ {"I2S3_CH2", "DL4_CH2 Switch", "DL4"},
+
+ {"I2S3_CH1", "DL5_CH1 Switch", "DL5"},
+ {"I2S3_CH2", "DL5_CH2 Switch", "DL5"},
+
+ {"I2S3_CH1", "DL8_CH1 Switch", "DL8"},
+ {"I2S3_CH2", "DL8_CH2 Switch", "DL8"},
+
+ {"I2S3", NULL, "I2S3_CH1"},
+ {"I2S3", NULL, "I2S3_CH2"},
+
+ {"I2S3", NULL, "I2S0_EN", mtk_afe_i2s_share_connect},
+ {"I2S3", NULL, "I2S1_EN", mtk_afe_i2s_share_connect},
+ {"I2S3", NULL, "I2S2_EN", mtk_afe_i2s_share_connect},
+ {"I2S3", NULL, "I2S3_EN"},
+
+ {"I2S3", NULL, I2S0_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S3", NULL, I2S1_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S3", NULL, I2S2_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {"I2S3", NULL, I2S3_HD_EN_W_NAME, mtk_afe_i2s_hd_connect},
+ {I2S3_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_i2s_apll_connect},
+ {I2S3_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_i2s_apll_connect},
+
+ {"I2S3", NULL, I2S0_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S3", NULL, I2S1_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S3", NULL, I2S2_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {"I2S3", NULL, I2S3_MCLK_EN_W_NAME, mtk_afe_i2s_mclk_connect},
+ {I2S3_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_mclk_apll_connect},
+ {I2S3_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_mclk_apll_connect},
+
+ /* allow i2s on without codec on */
+ {"I2S0", NULL, "I2S0_In_Mux"},
+ {"I2S0_In_Mux", "Dummy_Widget", "I2S_DUMMY_IN"},
+
+ {"I2S1_Out_Mux", "Dummy_Widget", "I2S1"},
+ {"I2S_DUMMY_OUT", NULL, "I2S1_Out_Mux"},
+
+ {"I2S2", NULL, "I2S2_In_Mux"},
+ {"I2S2_In_Mux", "Dummy_Widget", "I2S_DUMMY_IN"},
+
+ {"I2S3_Out_Mux", "Dummy_Widget", "I2S3"},
+ {"I2S_DUMMY_OUT", NULL, "I2S3_Out_Mux"},
+
+ /* i2s in lpbk */
+ {"I2S0_Lpbk_Mux", "Lpbk", "I2S3"},
+ {"I2S2_Lpbk_Mux", "Lpbk", "I2S1"},
+ {"I2S0", NULL, "I2S0_Lpbk_Mux"},
+ {"I2S2", NULL, "I2S2_Lpbk_Mux"},
+};
+
+/* dai ops */
+static int mtk_dai_connsys_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ unsigned int rate = params_rate(params);
+ unsigned int rate_reg = mt8186_rate_transform(afe->dev,
+ rate, dai->id);
+ unsigned int i2s_con = 0;
+
+ dev_dbg(afe->dev, "%s(), id %d, stream %d, rate %d\n",
+ __func__, dai->id, substream->stream, rate);
+
+ /* non-inverse, i2s mode, slave, 16bits, from connsys */
+ i2s_con |= 0 << INV_PAD_CTRL_SFT;
+ i2s_con |= I2S_FMT_I2S << I2S_FMT_SFT;
+ i2s_con |= 1 << I2S_SRC_SFT;
+ i2s_con |= get_i2s_wlen(SNDRV_PCM_FORMAT_S16_LE) << I2S_WLEN_SFT;
+ i2s_con |= 0 << I2SIN_PAD_SEL_SFT;
+ regmap_write(afe->regmap, AFE_CONNSYS_I2S_CON, i2s_con);
+
+ /* use asrc */
+ regmap_update_bits(afe->regmap, AFE_CONNSYS_I2S_CON,
+ I2S_BYPSRC_MASK_SFT, 0);
+
+ /* slave mode, set i2s for asrc */
+ regmap_update_bits(afe->regmap, AFE_CONNSYS_I2S_CON,
+ I2S_MODE_MASK_SFT, rate_reg << I2S_MODE_SFT);
+
+ if (rate == 44100)
+ regmap_write(afe->regmap, AFE_ASRC_2CH_CON3, 0x1b9000);
+ else if (rate == 32000)
+ regmap_write(afe->regmap, AFE_ASRC_2CH_CON3, 0x140000);
+ else
+ regmap_write(afe->regmap, AFE_ASRC_2CH_CON3, 0x1e0000);
+
+ /* Calibration setting */
+ regmap_write(afe->regmap, AFE_ASRC_2CH_CON4, 0x140000);
+ regmap_write(afe->regmap, AFE_ASRC_2CH_CON9, 0x36000);
+ regmap_write(afe->regmap, AFE_ASRC_2CH_CON10, 0x2fc00);
+ regmap_write(afe->regmap, AFE_ASRC_2CH_CON6, 0x7ef4);
+ regmap_write(afe->regmap, AFE_ASRC_2CH_CON5, 0xff5986);
+
+ /* 0:Stereo 1:Mono */
+ regmap_update_bits(afe->regmap, AFE_ASRC_2CH_CON2,
+ CHSET_IS_MONO_MASK_SFT, 0);
+
+ return 0;
+}
+
+static int mtk_dai_connsys_i2s_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ dev_dbg(afe->dev, "%s(), cmd %d, stream %d\n",
+ __func__, cmd, substream->stream);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ /* i2s enable */
+ regmap_update_bits(afe->regmap,
+ AFE_CONNSYS_I2S_CON,
+ I2S_EN_MASK_SFT,
+ BIT(I2S_EN_SFT));
+
+ /* calibrator enable */
+ regmap_update_bits(afe->regmap,
+ AFE_ASRC_2CH_CON5,
+ CALI_EN_MASK_SFT,
+ BIT(CALI_EN_SFT));
+
+ /* asrc enable */
+ regmap_update_bits(afe->regmap,
+ AFE_ASRC_2CH_CON0,
+ CON0_CHSET_STR_CLR_MASK_SFT,
+ BIT(CON0_CHSET_STR_CLR_SFT));
+ regmap_update_bits(afe->regmap,
+ AFE_ASRC_2CH_CON0,
+ CON0_ASM_ON_MASK_SFT,
+ BIT(CON0_ASM_ON_SFT));
+
+ afe_priv->dai_on[dai->id] = true;
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ regmap_update_bits(afe->regmap, AFE_ASRC_2CH_CON0,
+ CON0_ASM_ON_MASK_SFT, 0);
+ regmap_update_bits(afe->regmap, AFE_ASRC_2CH_CON5,
+ CALI_EN_MASK_SFT, 0);
+
+ /* i2s disable */
+ regmap_update_bits(afe->regmap, AFE_CONNSYS_I2S_CON,
+ I2S_EN_MASK_SFT, 0);
+
+ /* bypass asrc */
+ regmap_update_bits(afe->regmap, AFE_CONNSYS_I2S_CON,
+ I2S_BYPSRC_MASK_SFT, BIT(I2S_BYPSRC_SFT));
+
+ afe_priv->dai_on[dai->id] = false;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_connsys_i2s_ops = {
+ .hw_params = mtk_dai_connsys_i2s_hw_params,
+ .trigger = mtk_dai_connsys_i2s_trigger,
+};
+
+/* i2s */
+static int mtk_dai_i2s_config(struct mtk_base_afe *afe,
+ struct snd_pcm_hw_params *params,
+ int i2s_id)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_i2s_priv *i2s_priv = afe_priv->dai_priv[i2s_id];
+
+ unsigned int rate = params_rate(params);
+ unsigned int rate_reg = mt8186_rate_transform(afe->dev,
+ rate, i2s_id);
+ snd_pcm_format_t format = params_format(params);
+ unsigned int i2s_con = 0;
+ int ret;
+
+ dev_dbg(afe->dev, "%s(), id %d, rate %d, format %d\n",
+ __func__, i2s_id, rate, format);
+
+ i2s_priv->rate = rate;
+
+ switch (i2s_id) {
+ case MT8186_DAI_I2S_0:
+ i2s_con = I2S_IN_PAD_IO_MUX << I2SIN_PAD_SEL_SFT;
+ i2s_con |= rate_reg << I2S_OUT_MODE_SFT;
+ i2s_con |= I2S_FMT_I2S << I2S_FMT_SFT;
+ i2s_con |= get_i2s_wlen(format) << I2S_WLEN_SFT;
+ regmap_update_bits(afe->regmap, AFE_I2S_CON,
+ 0xffffeffa, i2s_con);
+ break;
+ case MT8186_DAI_I2S_1:
+ i2s_con = I2S1_SEL_O28_O29 << I2S2_SEL_O03_O04_SFT;
+ i2s_con |= rate_reg << I2S2_OUT_MODE_SFT;
+ i2s_con |= I2S_FMT_I2S << I2S2_FMT_SFT;
+ i2s_con |= get_i2s_wlen(format) << I2S2_WLEN_SFT;
+ regmap_update_bits(afe->regmap, AFE_I2S_CON1,
+ 0xffffeffa, i2s_con);
+ break;
+ case MT8186_DAI_I2S_2:
+ i2s_con = 8 << I2S3_UPDATE_WORD_SFT;
+ i2s_con |= rate_reg << I2S3_OUT_MODE_SFT;
+ i2s_con |= I2S_FMT_I2S << I2S3_FMT_SFT;
+ i2s_con |= get_i2s_wlen(format) << I2S3_WLEN_SFT;
+ regmap_update_bits(afe->regmap, AFE_I2S_CON2,
+ 0xffffeffa, i2s_con);
+ break;
+ case MT8186_DAI_I2S_3:
+ i2s_con = rate_reg << I2S4_OUT_MODE_SFT;
+ i2s_con |= I2S_FMT_I2S << I2S4_FMT_SFT;
+ i2s_con |= get_i2s_wlen(format) << I2S4_WLEN_SFT;
+ regmap_update_bits(afe->regmap, AFE_I2S_CON3,
+ 0xffffeffa, i2s_con);
+ break;
+ default:
+ dev_err(afe->dev, "%s(), id %d not support\n",
+ __func__, i2s_id);
+ return -EINVAL;
+ }
+
+ /* set share i2s */
+ if (i2s_priv && i2s_priv->share_i2s_id >= 0) {
+ ret = mtk_dai_i2s_config(afe, params, i2s_priv->share_i2s_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_dai_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+ return mtk_dai_i2s_config(afe, params, dai->id);
+}
+
+static int mtk_dai_i2s_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_i2s_priv *i2s_priv = afe_priv->dai_priv[dai->id];
+ int apll;
+ int apll_rate;
+
+ if (dir != SND_SOC_CLOCK_OUT) {
+ dev_err(afe->dev, "%s(), dir != SND_SOC_CLOCK_OUT", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(afe->dev, "%s(), freq %d\n", __func__, freq);
+
+ apll = mt8186_get_apll_by_rate(afe, freq);
+ apll_rate = mt8186_get_apll_rate(afe, apll);
+
+ if (freq > apll_rate) {
+ dev_err(afe->dev, "%s(), freq > apll rate", __func__);
+ return -EINVAL;
+ }
+
+ if (apll_rate % freq != 0) {
+ dev_err(afe->dev, "%s(), APLL cannot generate freq Hz", __func__);
+ return -EINVAL;
+ }
+
+ i2s_priv->mclk_rate = freq;
+ i2s_priv->mclk_apll = apll;
+
+ if (i2s_priv->share_i2s_id > 0) {
+ struct mtk_afe_i2s_priv *share_i2s_priv;
+
+ share_i2s_priv = afe_priv->dai_priv[i2s_priv->share_i2s_id];
+ if (!share_i2s_priv) {
+ dev_err(afe->dev, "%s(), share_i2s_priv == NULL", __func__);
+ return -EINVAL;
+ }
+
+ share_i2s_priv->mclk_rate = i2s_priv->mclk_rate;
+ share_i2s_priv->mclk_apll = i2s_priv->mclk_apll;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_i2s_ops = {
+ .hw_params = mtk_dai_i2s_hw_params,
+ .set_sysclk = mtk_dai_i2s_set_sysclk,
+};
+
+/* dai driver */
+#define MTK_CONNSYS_I2S_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+
+#define MTK_I2S_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_i2s_driver[] = {
+ {
+ .name = "CONNSYS_I2S",
+ .id = MT8186_DAI_CONNSYS_I2S,
+ .capture = {
+ .stream_name = "Connsys I2S",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_CONNSYS_I2S_RATES,
+ .formats = MTK_I2S_FORMATS,
+ },
+ .ops = &mtk_dai_connsys_i2s_ops,
+ },
+ {
+ .name = "I2S0",
+ .id = MT8186_DAI_I2S_0,
+ .capture = {
+ .stream_name = "I2S0",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_I2S_RATES,
+ .formats = MTK_I2S_FORMATS,
+ },
+ .ops = &mtk_dai_i2s_ops,
+ },
+ {
+ .name = "I2S1",
+ .id = MT8186_DAI_I2S_1,
+ .playback = {
+ .stream_name = "I2S1",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_I2S_RATES,
+ .formats = MTK_I2S_FORMATS,
+ },
+ .ops = &mtk_dai_i2s_ops,
+ },
+ {
+ .name = "I2S2",
+ .id = MT8186_DAI_I2S_2,
+ .capture = {
+ .stream_name = "I2S2",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_I2S_RATES,
+ .formats = MTK_I2S_FORMATS,
+ },
+ .ops = &mtk_dai_i2s_ops,
+ },
+ {
+ .name = "I2S3",
+ .id = MT8186_DAI_I2S_3,
+ .playback = {
+ .stream_name = "I2S3",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_I2S_RATES,
+ .formats = MTK_I2S_FORMATS,
+ },
+ .ops = &mtk_dai_i2s_ops,
+ }
+};
+
+/* this enum is merely for mtk_afe_i2s_priv declare */
+enum {
+ DAI_I2S0 = 0,
+ DAI_I2S1,
+ DAI_I2S2,
+ DAI_I2S3,
+ DAI_I2S_NUM,
+};
+
+static const struct mtk_afe_i2s_priv mt8186_i2s_priv[DAI_I2S_NUM] = {
+ [DAI_I2S0] = {
+ .id = MT8186_DAI_I2S_0,
+ .mclk_id = MT8186_I2S0_MCK,
+ .share_property_name = "i2s0-share",
+ .share_i2s_id = -1,
+ },
+ [DAI_I2S1] = {
+ .id = MT8186_DAI_I2S_1,
+ .mclk_id = MT8186_I2S1_MCK,
+ .share_property_name = "i2s1-share",
+ .share_i2s_id = -1,
+ },
+ [DAI_I2S2] = {
+ .id = MT8186_DAI_I2S_2,
+ .mclk_id = MT8186_I2S2_MCK,
+ .share_property_name = "i2s2-share",
+ .share_i2s_id = -1,
+ },
+ [DAI_I2S3] = {
+ .id = MT8186_DAI_I2S_3,
+ /* clock gate naming is hf_faud_i2s4_m_ck*/
+ .mclk_id = MT8186_I2S4_MCK,
+ .share_property_name = "i2s3-share",
+ .share_i2s_id = -1,
+ }
+};
+
+static int mt8186_dai_i2s_get_share(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ const struct device_node *of_node = afe->dev->of_node;
+ const char *of_str;
+ const char *property_name;
+ struct mtk_afe_i2s_priv *i2s_priv;
+ int i;
+
+ for (i = 0; i < DAI_I2S_NUM; i++) {
+ i2s_priv = afe_priv->dai_priv[mt8186_i2s_priv[i].id];
+ property_name = mt8186_i2s_priv[i].share_property_name;
+ if (of_property_read_string(of_node, property_name, &of_str))
+ continue;
+ i2s_priv->share_i2s_id = get_i2s_id_by_name(afe, of_str);
+ }
+
+ return 0;
+}
+
+static int mt8186_dai_i2s_set_priv(struct mtk_base_afe *afe)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < DAI_I2S_NUM; i++) {
+ ret = mt8186_dai_set_priv(afe, mt8186_i2s_priv[i].id,
+ sizeof(struct mtk_afe_i2s_priv),
+ &mt8186_i2s_priv[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int mt8186_dai_i2s_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+ int ret;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_i2s_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_i2s_driver);
+
+ dai->controls = mtk_dai_i2s_controls;
+ dai->num_controls = ARRAY_SIZE(mtk_dai_i2s_controls);
+ dai->dapm_widgets = mtk_dai_i2s_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_i2s_widgets);
+ dai->dapm_routes = mtk_dai_i2s_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_i2s_routes);
+
+ /* set all dai i2s private data */
+ ret = mt8186_dai_i2s_set_priv(afe);
+ if (ret)
+ return ret;
+
+ /* parse share i2s */
+ ret = mt8186_dai_i2s_get_share(afe);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-pcm.c b/sound/soc/mediatek/mt8186/mt8186-dai-pcm.c
new file mode 100644
index 000000000000..41221a66111c
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-dai-pcm.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI I2S Control
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include "mt8186-afe-common.h"
+#include "mt8186-afe-gpio.h"
+#include "mt8186-interconnection.h"
+
+struct mtk_afe_pcm_priv {
+ unsigned int id;
+ unsigned int fmt;
+ unsigned int bck_invert;
+ unsigned int lck_invert;
+};
+
+enum aud_tx_lch_rpt {
+ AUD_TX_LCH_RPT_NO_REPEAT = 0,
+ AUD_TX_LCH_RPT_REPEAT = 1
+};
+
+enum aud_vbt_16k_mode {
+ AUD_VBT_16K_MODE_DISABLE = 0,
+ AUD_VBT_16K_MODE_ENABLE = 1
+};
+
+enum aud_ext_modem {
+ AUD_EXT_MODEM_SELECT_INTERNAL = 0,
+ AUD_EXT_MODEM_SELECT_EXTERNAL = 1
+};
+
+enum aud_pcm_sync_type {
+ /* bck sync length = 1 */
+ AUD_PCM_ONE_BCK_CYCLE_SYNC = 0,
+ /* bck sync length = PCM_INTF_CON1[9:13] */
+ AUD_PCM_EXTENDED_BCK_CYCLE_SYNC = 1
+};
+
+enum aud_bt_mode {
+ AUD_BT_MODE_DUAL_MIC_ON_TX = 0,
+ AUD_BT_MODE_SINGLE_MIC_ON_TX = 1
+};
+
+enum aud_pcm_afifo_src {
+ /* slave mode & external modem uses different crystal */
+ AUD_PCM_AFIFO_ASRC = 0,
+ /* slave mode & external modem uses the same crystal */
+ AUD_PCM_AFIFO_AFIFO = 1
+};
+
+enum aud_pcm_clock_source {
+ AUD_PCM_CLOCK_MASTER_MODE = 0,
+ AUD_PCM_CLOCK_SLAVE_MODE = 1
+};
+
+enum aud_pcm_wlen {
+ AUD_PCM_WLEN_PCM_32_BCK_CYCLES = 0,
+ AUD_PCM_WLEN_PCM_64_BCK_CYCLES = 1
+};
+
+enum aud_pcm_24bit {
+ AUD_PCM_24BIT_PCM_16_BITS = 0,
+ AUD_PCM_24BIT_PCM_24_BITS = 1
+};
+
+enum aud_pcm_mode {
+ AUD_PCM_MODE_PCM_MODE_8K = 0,
+ AUD_PCM_MODE_PCM_MODE_16K = 1,
+ AUD_PCM_MODE_PCM_MODE_32K = 2,
+ AUD_PCM_MODE_PCM_MODE_48K = 3,
+};
+
+enum aud_pcm_fmt {
+ AUD_PCM_FMT_I2S = 0,
+ AUD_PCM_FMT_EIAJ = 1,
+ AUD_PCM_FMT_PCM_MODE_A = 2,
+ AUD_PCM_FMT_PCM_MODE_B = 3
+};
+
+enum aud_bclk_out_inv {
+ AUD_BCLK_OUT_INV_NO_INVERSE = 0,
+ AUD_BCLK_OUT_INV_INVERSE = 1
+};
+
+enum aud_lrclk_out_inv {
+ AUD_LRCLK_OUT_INV_NO_INVERSE = 0,
+ AUD_LRCLK_OUT_INV_INVERSE = 1
+};
+
+enum aud_pcm_en {
+ AUD_PCM_EN_DISABLE = 0,
+ AUD_PCM_EN_ENABLE = 1
+};
+
+/* dai component */
+static const struct snd_kcontrol_new mtk_pcm_1_playback_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH1 Switch", AFE_CONN7,
+ I_ADDA_UL_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN7,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN7_1,
+ I_DL4_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_pcm_1_playback_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("ADDA_UL_CH2 Switch", AFE_CONN8,
+ I_ADDA_UL_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN8,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN8_1,
+ I_DL4_CH2, 1, 0),
+};
+
+static int mtk_pcm_en_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+
+ dev_dbg(afe->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ mt8186_afe_gpio_request(afe->dev, true, MT8186_DAI_PCM, 0);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ mt8186_afe_gpio_request(afe->dev, false, MT8186_DAI_PCM, 0);
+ break;
+ }
+
+ return 0;
+}
+
+/* pcm in/out lpbk */
+static const char * const pcm_lpbk_mux_map[] = {
+ "Normal", "Lpbk",
+};
+
+static int pcm_lpbk_mux_map_value[] = {
+ 0, 1,
+};
+
+static SOC_VALUE_ENUM_SINGLE_AUTODISABLE_DECL(pcm_in_lpbk_mux_map_enum,
+ PCM_INTF_CON1,
+ PCM_I2S_PCM_LOOPBACK_SFT,
+ 1,
+ pcm_lpbk_mux_map,
+ pcm_lpbk_mux_map_value);
+
+static const struct snd_kcontrol_new pcm_in_lpbk_mux_control =
+ SOC_DAPM_ENUM("PCM In Lpbk Select", pcm_in_lpbk_mux_map_enum);
+
+static SOC_VALUE_ENUM_SINGLE_AUTODISABLE_DECL(pcm_out_lpbk_mux_map_enum,
+ PCM_INTF_CON1,
+ PCM_I2S_PCM_LOOPBACK_SFT,
+ 1,
+ pcm_lpbk_mux_map,
+ pcm_lpbk_mux_map_value);
+
+static const struct snd_kcontrol_new pcm_out_lpbk_mux_control =
+ SOC_DAPM_ENUM("PCM Out Lpbk Select", pcm_out_lpbk_mux_map_enum);
+
+static const struct snd_soc_dapm_widget mtk_dai_pcm_widgets[] = {
+ /* inter-connections */
+ SND_SOC_DAPM_MIXER("PCM_1_PB_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_pcm_1_playback_ch1_mix,
+ ARRAY_SIZE(mtk_pcm_1_playback_ch1_mix)),
+ SND_SOC_DAPM_MIXER("PCM_1_PB_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_pcm_1_playback_ch2_mix,
+ ARRAY_SIZE(mtk_pcm_1_playback_ch2_mix)),
+
+ SND_SOC_DAPM_SUPPLY("PCM_1_EN",
+ PCM_INTF_CON1, PCM_EN_SFT, 0,
+ mtk_pcm_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* pcm in lpbk */
+ SND_SOC_DAPM_MUX("PCM_In_Lpbk_Mux",
+ SND_SOC_NOPM, 0, 0, &pcm_in_lpbk_mux_control),
+
+ /* pcm out lpbk */
+ SND_SOC_DAPM_MUX("PCM_Out_Lpbk_Mux",
+ SND_SOC_NOPM, 0, 0, &pcm_out_lpbk_mux_control),
+};
+
+static const struct snd_soc_dapm_route mtk_dai_pcm_routes[] = {
+ {"PCM 1 Playback", NULL, "PCM_1_PB_CH1"},
+ {"PCM 1 Playback", NULL, "PCM_1_PB_CH2"},
+
+ {"PCM 1 Playback", NULL, "PCM_1_EN"},
+ {"PCM 1 Capture", NULL, "PCM_1_EN"},
+
+ {"PCM_1_PB_CH1", "DL2_CH1 Switch", "DL2"},
+ {"PCM_1_PB_CH2", "DL2_CH2 Switch", "DL2"},
+
+ {"PCM_1_PB_CH1", "DL4_CH1 Switch", "DL4"},
+ {"PCM_1_PB_CH2", "DL4_CH2 Switch", "DL4"},
+
+ /* pcm out lpbk */
+ {"PCM_Out_Lpbk_Mux", "Lpbk", "PCM 1 Playback"},
+ {"I2S0", NULL, "PCM_Out_Lpbk_Mux"},
+
+ /* pcm in lpbk */
+ {"PCM_In_Lpbk_Mux", "Lpbk", "PCM 1 Capture"},
+ {"I2S3", NULL, "PCM_In_Lpbk_Mux"},
+};
+
+/* dai ops */
+static int mtk_dai_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int pcm_id = dai->id;
+ struct mtk_afe_pcm_priv *pcm_priv = afe_priv->dai_priv[pcm_id];
+ unsigned int rate = params_rate(params);
+ unsigned int rate_reg = mt8186_rate_transform(afe->dev, rate, dai->id);
+ snd_pcm_format_t format = params_format(params);
+ unsigned int data_width =
+ snd_pcm_format_width(format);
+ unsigned int wlen_width =
+ snd_pcm_format_physical_width(format);
+ unsigned int pcm_con = 0;
+
+ dev_dbg(afe->dev, "%s(), id %d, stream %d, widget active p %d, c %d\n",
+ __func__, dai->id, substream->stream, dai->playback_widget->active,
+ dai->capture_widget->active);
+ dev_dbg(afe->dev, "%s(), rate %d, rate_reg %d, data_width %d, wlen_width %d\n",
+ __func__, rate, rate_reg, data_width, wlen_width);
+
+ if (dai->playback_widget->active || dai->capture_widget->active)
+ return 0;
+
+ switch (dai->id) {
+ case MT8186_DAI_PCM:
+ pcm_con |= AUD_TX_LCH_RPT_NO_REPEAT << PCM_TX_LCH_RPT_SFT;
+ pcm_con |= AUD_VBT_16K_MODE_DISABLE << PCM_VBT_16K_MODE_SFT;
+ pcm_con |= AUD_EXT_MODEM_SELECT_EXTERNAL << PCM_EXT_MODEM_SFT;
+ pcm_con |= AUD_PCM_ONE_BCK_CYCLE_SYNC << PCM_SYNC_TYPE_SFT;
+ pcm_con |= AUD_BT_MODE_DUAL_MIC_ON_TX << PCM_BT_MODE_SFT;
+ pcm_con |= AUD_PCM_AFIFO_AFIFO << PCM_BYP_ASRC_SFT;
+ pcm_con |= AUD_PCM_CLOCK_MASTER_MODE << PCM_SLAVE_SFT;
+ pcm_con |= 0 << PCM_SYNC_LENGTH_SFT;
+
+ /* sampling rate */
+ pcm_con |= rate_reg << PCM_MODE_SFT;
+
+ /* format */
+ pcm_con |= pcm_priv->fmt << PCM_FMT_SFT;
+
+ /* 24bit data width */
+ if (data_width > 16)
+ pcm_con |= AUD_PCM_24BIT_PCM_24_BITS << PCM_24BIT_SFT;
+ else
+ pcm_con |= AUD_PCM_24BIT_PCM_16_BITS << PCM_24BIT_SFT;
+
+ /* wlen width*/
+ if (wlen_width > 16)
+ pcm_con |= AUD_PCM_WLEN_PCM_64_BCK_CYCLES << PCM_WLEN_SFT;
+ else
+ pcm_con |= AUD_PCM_WLEN_PCM_32_BCK_CYCLES << PCM_WLEN_SFT;
+
+ /* clock invert */
+ pcm_con |= pcm_priv->lck_invert << PCM_SYNC_OUT_INV_SFT;
+ pcm_con |= pcm_priv->bck_invert << PCM_BCLK_OUT_INV_SFT;
+
+ regmap_update_bits(afe->regmap, PCM_INTF_CON1, 0xfffffffe, pcm_con);
+ break;
+ default:
+ dev_err(afe->dev, "%s(), id %d not support\n", __func__, dai->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mtk_dai_pcm_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_pcm_priv *pcm_priv = afe_priv->dai_priv[dai->id];
+
+ /* DAI mode*/
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ pcm_priv->fmt = AUD_PCM_FMT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ pcm_priv->fmt = AUD_PCM_FMT_EIAJ;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ pcm_priv->fmt = AUD_PCM_FMT_PCM_MODE_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ pcm_priv->fmt = AUD_PCM_FMT_PCM_MODE_B;
+ break;
+ default:
+ pcm_priv->fmt = AUD_PCM_FMT_I2S;
+ }
+
+ /* DAI clock inversion*/
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ pcm_priv->bck_invert = AUD_BCLK_OUT_INV_NO_INVERSE;
+ pcm_priv->lck_invert = AUD_LRCLK_OUT_INV_NO_INVERSE;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ pcm_priv->bck_invert = AUD_BCLK_OUT_INV_NO_INVERSE;
+ pcm_priv->lck_invert = AUD_BCLK_OUT_INV_INVERSE;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ pcm_priv->bck_invert = AUD_BCLK_OUT_INV_INVERSE;
+ pcm_priv->lck_invert = AUD_LRCLK_OUT_INV_NO_INVERSE;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ pcm_priv->bck_invert = AUD_BCLK_OUT_INV_INVERSE;
+ pcm_priv->lck_invert = AUD_BCLK_OUT_INV_INVERSE;
+ break;
+ default:
+ pcm_priv->bck_invert = AUD_BCLK_OUT_INV_NO_INVERSE;
+ pcm_priv->lck_invert = AUD_LRCLK_OUT_INV_NO_INVERSE;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_pcm_ops = {
+ .hw_params = mtk_dai_pcm_hw_params,
+ .set_fmt = mtk_dai_pcm_set_fmt,
+};
+
+/* dai driver */
+#define MTK_PCM_RATES (SNDRV_PCM_RATE_8000 |\
+ SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 |\
+ SNDRV_PCM_RATE_48000)
+
+#define MTK_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_pcm_driver[] = {
+ {
+ .name = "PCM 1",
+ .id = MT8186_DAI_PCM,
+ .playback = {
+ .stream_name = "PCM 1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .capture = {
+ .stream_name = "PCM 1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_PCM_RATES,
+ .formats = MTK_PCM_FORMATS,
+ },
+ .ops = &mtk_dai_pcm_ops,
+ .symmetric_rate = 1,
+ .symmetric_sample_bits = 1,
+ },
+};
+
+static struct mtk_afe_pcm_priv *init_pcm_priv_data(struct mtk_base_afe *afe)
+{
+ struct mtk_afe_pcm_priv *pcm_priv;
+
+ pcm_priv = devm_kzalloc(afe->dev, sizeof(struct mtk_afe_pcm_priv),
+ GFP_KERNEL);
+ if (!pcm_priv)
+ return NULL;
+
+ pcm_priv->id = MT8186_DAI_PCM;
+ pcm_priv->fmt = AUD_PCM_FMT_I2S;
+ pcm_priv->bck_invert = AUD_BCLK_OUT_INV_NO_INVERSE;
+ pcm_priv->lck_invert = AUD_LRCLK_OUT_INV_NO_INVERSE;
+
+ return pcm_priv;
+}
+
+int mt8186_dai_pcm_register(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_pcm_priv *pcm_priv;
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_pcm_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_pcm_driver);
+
+ dai->dapm_widgets = mtk_dai_pcm_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_pcm_widgets);
+ dai->dapm_routes = mtk_dai_pcm_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_pcm_routes);
+
+ pcm_priv = init_pcm_priv_data(afe);
+ if (!pcm_priv)
+ return -ENOMEM;
+
+ afe_priv->dai_priv[MT8186_DAI_PCM] = pcm_priv;
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-src.c b/sound/soc/mediatek/mt8186/mt8186-dai-src.c
new file mode 100644
index 000000000000..67989ffd67ca
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-dai-src.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI SRC Control
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/regmap.h>
+#include "mt8186-afe-common.h"
+#include "mt8186-interconnection.h"
+
+struct mtk_afe_src_priv {
+ int dl_rate;
+ int ul_rate;
+};
+
+static const unsigned int src_iir_coeff_32_to_16[] = {
+ 0x0dbae6, 0xff9b0a, 0x0dbae6, 0x05e488, 0xe072b9, 0x000002,
+ 0x0dbae6, 0x000f3b, 0x0dbae6, 0x06a537, 0xe17d79, 0x000002,
+ 0x0dbae6, 0x01246a, 0x0dbae6, 0x087261, 0xe306be, 0x000002,
+ 0x0dbae6, 0x03437d, 0x0dbae6, 0x0bc16f, 0xe57c87, 0x000002,
+ 0x0dbae6, 0x072981, 0x0dbae6, 0x111dd3, 0xe94f2a, 0x000002,
+ 0x0dbae6, 0x0dc4a6, 0x0dbae6, 0x188611, 0xee85a0, 0x000002,
+ 0x0dbae6, 0x168b9a, 0x0dbae6, 0x200e8f, 0xf3ccf1, 0x000002,
+ 0x000000, 0x1b75cb, 0x1b75cb, 0x2374a2, 0x000000, 0x000001
+};
+
+static const unsigned int src_iir_coeff_44_to_16[] = {
+ 0x09ae28, 0xf7d97d, 0x09ae28, 0x212a3d, 0xe0ac3a, 0x000002,
+ 0x09ae28, 0xf8525a, 0x09ae28, 0x216d72, 0xe234be, 0x000002,
+ 0x09ae28, 0xf980f5, 0x09ae28, 0x22a057, 0xe45a81, 0x000002,
+ 0x09ae28, 0xfc0a08, 0x09ae28, 0x24d3bd, 0xe7752d, 0x000002,
+ 0x09ae28, 0x016162, 0x09ae28, 0x27da01, 0xeb6ea8, 0x000002,
+ 0x09ae28, 0x0b67df, 0x09ae28, 0x2aca4a, 0xef34c4, 0x000002,
+ 0x000000, 0x135c50, 0x135c50, 0x2c1079, 0x000000, 0x000001
+};
+
+static const unsigned int src_iir_coeff_44_to_32[] = {
+ 0x096966, 0x0c4d35, 0x096966, 0xedee81, 0xf05070, 0x000003,
+ 0x12d2cc, 0x193910, 0x12d2cc, 0xddbf4f, 0xe21e1d, 0x000002,
+ 0x12d2cc, 0x1a9e60, 0x12d2cc, 0xe18916, 0xe470fd, 0x000002,
+ 0x12d2cc, 0x1d06e0, 0x12d2cc, 0xe8a4a6, 0xe87b24, 0x000002,
+ 0x12d2cc, 0x207578, 0x12d2cc, 0xf4fe62, 0xef5917, 0x000002,
+ 0x12d2cc, 0x24055f, 0x12d2cc, 0x05ee2b, 0xf8b502, 0x000002,
+ 0x000000, 0x25a599, 0x25a599, 0x0fabe2, 0x000000, 0x000001
+};
+
+static const unsigned int src_iir_coeff_48_to_16[] = {
+ 0x0296a4, 0xfd69dd, 0x0296a4, 0x209439, 0xe01ff9, 0x000002,
+ 0x0f4ff3, 0xf0d6d4, 0x0f4ff3, 0x209bc9, 0xe076c3, 0x000002,
+ 0x0e8490, 0xf1fe63, 0x0e8490, 0x20cfd6, 0xe12124, 0x000002,
+ 0x14852f, 0xed794a, 0x14852f, 0x21503d, 0xe28b32, 0x000002,
+ 0x136222, 0xf17677, 0x136222, 0x225be1, 0xe56964, 0x000002,
+ 0x0a8d85, 0xfc4a97, 0x0a8d85, 0x24310c, 0xea6952, 0x000002,
+ 0x05eff5, 0x043455, 0x05eff5, 0x4ced8f, 0xe134d6, 0x000001,
+ 0x000000, 0x3aebe6, 0x3aebe6, 0x04f3b0, 0x000000, 0x000004
+};
+
+static const unsigned int src_iir_coeff_48_to_32[] = {
+ 0x10c1b8, 0x10a7df, 0x10c1b8, 0xe7514e, 0xe0b41f, 0x000002,
+ 0x10c1b8, 0x116257, 0x10c1b8, 0xe9402f, 0xe25aaa, 0x000002,
+ 0x10c1b8, 0x130c89, 0x10c1b8, 0xed3cc3, 0xe4dddb, 0x000002,
+ 0x10c1b8, 0x1600dd, 0x10c1b8, 0xf48000, 0xe90c55, 0x000002,
+ 0x10c1b8, 0x1a672e, 0x10c1b8, 0x00494c, 0xefa807, 0x000002,
+ 0x10c1b8, 0x1f38e6, 0x10c1b8, 0x0ee076, 0xf7c5f3, 0x000002,
+ 0x000000, 0x218370, 0x218370, 0x168b40, 0x000000, 0x000001
+};
+
+static const unsigned int src_iir_coeff_48_to_44[] = {
+ 0x0bf71c, 0x170f3f, 0x0bf71c, 0xe3a4c8, 0xf096cb, 0x000003,
+ 0x0bf71c, 0x17395e, 0x0bf71c, 0xe58085, 0xf210c8, 0x000003,
+ 0x0bf71c, 0x1782bd, 0x0bf71c, 0xe95ef6, 0xf4c899, 0x000003,
+ 0x0bf71c, 0x17cd97, 0x0bf71c, 0xf1608a, 0xfa3b18, 0x000003,
+ 0x000000, 0x2fdc6f, 0x2fdc6f, 0xf15663, 0x000000, 0x000001
+};
+
+static const unsigned int src_iir_coeff_96_to_16[] = {
+ 0x0805a1, 0xf21ae3, 0x0805a1, 0x3840bb, 0xe02a2e, 0x000002,
+ 0x0d5dd8, 0xe8f259, 0x0d5dd8, 0x1c0af6, 0xf04700, 0x000003,
+ 0x0bb422, 0xec08d9, 0x0bb422, 0x1bfccc, 0xf09216, 0x000003,
+ 0x08fde6, 0xf108be, 0x08fde6, 0x1bf096, 0xf10ae0, 0x000003,
+ 0x0ae311, 0xeeeda3, 0x0ae311, 0x37c646, 0xe385f5, 0x000002,
+ 0x044089, 0xfa7242, 0x044089, 0x37a785, 0xe56526, 0x000002,
+ 0x00c75c, 0xffb947, 0x00c75c, 0x378ba3, 0xe72c5f, 0x000002,
+ 0x000000, 0x0ef76e, 0x0ef76e, 0x377fda, 0x000000, 0x000001,
+};
+
+static const unsigned int src_iir_coeff_96_to_44[] = {
+ 0x08b543, 0xfd80f4, 0x08b543, 0x0e2332, 0xe06ed0, 0x000002,
+ 0x1b6038, 0xf90e7e, 0x1b6038, 0x0ec1ac, 0xe16f66, 0x000002,
+ 0x188478, 0xfbb921, 0x188478, 0x105859, 0xe2e596, 0x000002,
+ 0x13eff3, 0xffa707, 0x13eff3, 0x13455c, 0xe533b7, 0x000002,
+ 0x0dc239, 0x03d458, 0x0dc239, 0x17f120, 0xe8b617, 0x000002,
+ 0x0745f1, 0x05d790, 0x0745f1, 0x1e3d75, 0xed5f18, 0x000002,
+ 0x05641f, 0x085e2b, 0x05641f, 0x48efd0, 0xe3e9c8, 0x000001,
+ 0x000000, 0x28f632, 0x28f632, 0x273905, 0x000000, 0x000001,
+};
+
+static unsigned int mtk_get_src_freq_mode(struct mtk_base_afe *afe, int rate)
+{
+ switch (rate) {
+ case 8000:
+ return 0x50000;
+ case 11025:
+ return 0x6e400;
+ case 12000:
+ return 0x78000;
+ case 16000:
+ return 0xa0000;
+ case 22050:
+ return 0xdc800;
+ case 24000:
+ return 0xf0000;
+ case 32000:
+ return 0x140000;
+ case 44100:
+ return 0x1b9000;
+ case 48000:
+ return 0x1e0000;
+ case 88200:
+ return 0x372000;
+ case 96000:
+ return 0x3c0000;
+ case 176400:
+ return 0x6e4000;
+ case 192000:
+ return 0x780000;
+ default:
+ dev_err(afe->dev, "%s(), rate %d invalid!!!\n",
+ __func__, rate);
+ return 0;
+ }
+}
+
+static const unsigned int *get_iir_coeff(unsigned int rate_in,
+ unsigned int rate_out,
+ unsigned int *param_num)
+{
+ if (rate_in == 32000 && rate_out == 16000) {
+ *param_num = ARRAY_SIZE(src_iir_coeff_32_to_16);
+ return src_iir_coeff_32_to_16;
+ } else if (rate_in == 44100 && rate_out == 16000) {
+ *param_num = ARRAY_SIZE(src_iir_coeff_44_to_16);
+ return src_iir_coeff_44_to_16;
+ } else if (rate_in == 44100 && rate_out == 32000) {
+ *param_num = ARRAY_SIZE(src_iir_coeff_44_to_32);
+ return src_iir_coeff_44_to_32;
+ } else if ((rate_in == 48000 && rate_out == 16000) ||
+ (rate_in == 96000 && rate_out == 32000)) {
+ *param_num = ARRAY_SIZE(src_iir_coeff_48_to_16);
+ return src_iir_coeff_48_to_16;
+ } else if (rate_in == 48000 && rate_out == 32000) {
+ *param_num = ARRAY_SIZE(src_iir_coeff_48_to_32);
+ return src_iir_coeff_48_to_32;
+ } else if (rate_in == 48000 && rate_out == 44100) {
+ *param_num = ARRAY_SIZE(src_iir_coeff_48_to_44);
+ return src_iir_coeff_48_to_44;
+ } else if (rate_in == 96000 && rate_out == 16000) {
+ *param_num = ARRAY_SIZE(src_iir_coeff_96_to_16);
+ return src_iir_coeff_96_to_16;
+ } else if ((rate_in == 96000 && rate_out == 44100) ||
+ (rate_in == 48000 && rate_out == 22050)) {
+ *param_num = ARRAY_SIZE(src_iir_coeff_96_to_44);
+ return src_iir_coeff_96_to_44;
+ }
+
+ *param_num = 0;
+ return NULL;
+}
+
+static int mtk_set_src_1_param(struct mtk_base_afe *afe, int id)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_src_priv *src_priv = afe_priv->dai_priv[id];
+ unsigned int iir_coeff_num;
+ unsigned int iir_stage;
+ int rate_in = src_priv->dl_rate;
+ int rate_out = src_priv->ul_rate;
+ unsigned int out_freq_mode = mtk_get_src_freq_mode(afe, rate_out);
+ unsigned int in_freq_mode = mtk_get_src_freq_mode(afe, rate_in);
+
+ /* set out freq mode */
+ regmap_update_bits(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON3,
+ G_SRC_ASM_FREQ_4_MASK_SFT,
+ out_freq_mode << G_SRC_ASM_FREQ_4_SFT);
+
+ /* set in freq mode */
+ regmap_update_bits(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON4,
+ G_SRC_ASM_FREQ_5_MASK_SFT,
+ in_freq_mode << G_SRC_ASM_FREQ_5_SFT);
+
+ regmap_write(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON5, 0x3f5986);
+ regmap_write(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON5, 0x3f5987);
+ regmap_write(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON6, 0x1fbd);
+ regmap_write(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON2, 0);
+
+ /* set iir if in_rate > out_rate */
+ if (rate_in > rate_out) {
+ int i;
+ const unsigned int *iir_coeff = get_iir_coeff(rate_in, rate_out,
+ &iir_coeff_num);
+
+ if (iir_coeff_num == 0 || !iir_coeff) {
+ dev_err(afe->dev, "%s(), iir coeff error, num %d, coeff %p\n",
+ __func__, iir_coeff_num, iir_coeff);
+ return -EINVAL;
+ }
+
+ /* COEFF_SRAM_CTRL */
+ regmap_update_bits(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON0,
+ G_SRC_COEFF_SRAM_CTRL_MASK_SFT,
+ BIT(G_SRC_COEFF_SRAM_CTRL_SFT));
+ /* Clear coeff history to r/w coeff from the first position */
+ regmap_update_bits(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON13,
+ G_SRC_COEFF_SRAM_ADR_MASK_SFT, 0);
+ /* Write SRC coeff, should not read the reg during write */
+ for (i = 0; i < iir_coeff_num; i++)
+ regmap_write(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON12,
+ iir_coeff[i]);
+ /* disable sram access */
+ regmap_update_bits(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON0,
+ G_SRC_COEFF_SRAM_CTRL_MASK_SFT, 0);
+ /* CHSET_IIR_STAGE */
+ iir_stage = (iir_coeff_num / 6) - 1;
+ regmap_update_bits(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON2,
+ G_SRC_CHSET_IIR_STAGE_MASK_SFT,
+ iir_stage << G_SRC_CHSET_IIR_STAGE_SFT);
+ /* CHSET_IIR_EN */
+ regmap_update_bits(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON2,
+ G_SRC_CHSET_IIR_EN_MASK_SFT,
+ BIT(G_SRC_CHSET_IIR_EN_SFT));
+ } else {
+ /* CHSET_IIR_EN off */
+ regmap_update_bits(afe->regmap, AFE_GENERAL1_ASRC_2CH_CON2,
+ G_SRC_CHSET_IIR_EN_MASK_SFT, 0);
+ }
+
+ return 0;
+}
+
+static int mtk_set_src_2_param(struct mtk_base_afe *afe, int id)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_src_priv *src_priv = afe_priv->dai_priv[id];
+ unsigned int iir_coeff_num;
+ unsigned int iir_stage;
+ int rate_in = src_priv->dl_rate;
+ int rate_out = src_priv->ul_rate;
+ unsigned int out_freq_mode = mtk_get_src_freq_mode(afe, rate_out);
+ unsigned int in_freq_mode = mtk_get_src_freq_mode(afe, rate_in);
+
+ /* set out freq mode */
+ regmap_update_bits(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON3,
+ G_SRC_ASM_FREQ_4_MASK_SFT,
+ out_freq_mode << G_SRC_ASM_FREQ_4_SFT);
+
+ /* set in freq mode */
+ regmap_update_bits(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON4,
+ G_SRC_ASM_FREQ_5_MASK_SFT,
+ in_freq_mode << G_SRC_ASM_FREQ_5_SFT);
+
+ regmap_write(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON5, 0x3f5986);
+ regmap_write(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON5, 0x3f5987);
+ regmap_write(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON6, 0x1fbd);
+ regmap_write(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON2, 0);
+
+ /* set iir if in_rate > out_rate */
+ if (rate_in > rate_out) {
+ int i;
+ const unsigned int *iir_coeff = get_iir_coeff(rate_in, rate_out,
+ &iir_coeff_num);
+
+ if (iir_coeff_num == 0 || !iir_coeff) {
+ dev_err(afe->dev, "%s(), iir coeff error, num %d, coeff %p\n",
+ __func__, iir_coeff_num, iir_coeff);
+ return -EINVAL;
+ }
+
+ /* COEFF_SRAM_CTRL */
+ regmap_update_bits(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON0,
+ G_SRC_COEFF_SRAM_CTRL_MASK_SFT,
+ BIT(G_SRC_COEFF_SRAM_CTRL_SFT));
+ /* Clear coeff history to r/w coeff from the first position */
+ regmap_update_bits(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON13,
+ G_SRC_COEFF_SRAM_ADR_MASK_SFT, 0);
+ /* Write SRC coeff, should not read the reg during write */
+ for (i = 0; i < iir_coeff_num; i++)
+ regmap_write(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON12,
+ iir_coeff[i]);
+ /* disable sram access */
+ regmap_update_bits(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON0,
+ G_SRC_COEFF_SRAM_CTRL_MASK_SFT, 0);
+ /* CHSET_IIR_STAGE */
+ iir_stage = (iir_coeff_num / 6) - 1;
+ regmap_update_bits(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON2,
+ G_SRC_CHSET_IIR_STAGE_MASK_SFT,
+ iir_stage << G_SRC_CHSET_IIR_STAGE_SFT);
+ /* CHSET_IIR_EN */
+ regmap_update_bits(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON2,
+ G_SRC_CHSET_IIR_EN_MASK_SFT,
+ BIT(G_SRC_CHSET_IIR_EN_SFT));
+ } else {
+ /* CHSET_IIR_EN off */
+ regmap_update_bits(afe->regmap, AFE_GENERAL2_ASRC_2CH_CON2,
+ G_SRC_CHSET_IIR_EN_MASK_SFT, 0);
+ }
+
+ return 0;
+}
+
+#define HW_SRC_1_EN_W_NAME "HW_SRC_1_Enable"
+#define HW_SRC_2_EN_W_NAME "HW_SRC_2_Enable"
+
+static int mtk_hw_src_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int id;
+ struct mtk_afe_src_priv *src_priv;
+ unsigned int reg;
+
+ if (strcmp(w->name, HW_SRC_1_EN_W_NAME) == 0)
+ id = MT8186_DAI_SRC_1;
+ else
+ id = MT8186_DAI_SRC_2;
+
+ src_priv = afe_priv->dai_priv[id];
+
+ dev_dbg(afe->dev,
+ "%s(), name %s, event 0x%x, id %d, src_priv %p, dl_rate %d, ul_rate %d\n",
+ __func__, w->name, event, id, src_priv,
+ src_priv->dl_rate, src_priv->ul_rate);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (id == MT8186_DAI_SRC_1)
+ mtk_set_src_1_param(afe, id);
+ else
+ mtk_set_src_2_param(afe, id);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ reg = (id == MT8186_DAI_SRC_1) ?
+ AFE_GENERAL1_ASRC_2CH_CON0 : AFE_GENERAL2_ASRC_2CH_CON0;
+ /* ASM_ON */
+ regmap_update_bits(afe->regmap, reg,
+ G_SRC_ASM_ON_MASK_SFT,
+ BIT(G_SRC_ASM_ON_SFT));
+ /* CHSET_ON */
+ regmap_update_bits(afe->regmap, reg,
+ G_SRC_CHSET_ON_MASK_SFT,
+ BIT(G_SRC_CHSET_ON_SFT));
+ /* CHSET_STR_CLR */
+ regmap_update_bits(afe->regmap, reg,
+ G_SRC_CHSET_STR_CLR_MASK_SFT,
+ BIT(G_SRC_CHSET_STR_CLR_SFT));
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ reg = (id == MT8186_DAI_SRC_1) ?
+ AFE_GENERAL1_ASRC_2CH_CON0 : AFE_GENERAL2_ASRC_2CH_CON0;
+ /* ASM_OFF */
+ regmap_update_bits(afe->regmap, reg, G_SRC_ASM_ON_MASK_SFT, 0);
+ /* CHSET_OFF */
+ regmap_update_bits(afe->regmap, reg, G_SRC_CHSET_ON_MASK_SFT, 0);
+ /* CHSET_STR_CLR */
+ regmap_update_bits(afe->regmap, reg, G_SRC_CHSET_STR_CLR_MASK_SFT, 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* dai component */
+static const struct snd_kcontrol_new mtk_hw_src_1_in_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN40,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN40,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN40,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN40_1,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1 Switch", AFE_CONN40_1,
+ I_DL6_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH1 Switch", AFE_CONN40,
+ I_I2S0_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN40_1,
+ I_DL5_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_hw_src_1_in_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN41,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN41,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN41,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN41_1,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2 Switch", AFE_CONN41_1,
+ I_DL6_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("I2S0_CH2 Switch", AFE_CONN41,
+ I_I2S0_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN41_1,
+ I_DL5_CH2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_hw_src_2_in_ch1_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH1 Switch", AFE_CONN42,
+ I_DL1_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH1 Switch", AFE_CONN42,
+ I_DL2_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH1 Switch", AFE_CONN42,
+ I_DL3_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH1 Switch", AFE_CONN42,
+ I_DL4_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH1 Switch", AFE_CONN42_1,
+ I_DL5_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH1 Switch", AFE_CONN42_1,
+ I_DL6_CH1, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN2_OUT_CH1 Switch", AFE_CONN42,
+ I_GAIN2_OUT_CH1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mtk_hw_src_2_in_ch2_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("DL1_CH2 Switch", AFE_CONN43,
+ I_DL1_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL2_CH2 Switch", AFE_CONN43,
+ I_DL2_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL3_CH2 Switch", AFE_CONN43,
+ I_DL3_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL4_CH2 Switch", AFE_CONN43,
+ I_DL4_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL5_CH2 Switch", AFE_CONN43_1,
+ I_DL5_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("DL6_CH2 Switch", AFE_CONN43_1,
+ I_DL6_CH2, 1, 0),
+ SOC_DAPM_SINGLE_AUTODISABLE("HW_GAIN2_OUT_CH2 Switch", AFE_CONN43,
+ I_GAIN2_OUT_CH2, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget mtk_dai_src_widgets[] = {
+ /* inter-connections */
+ SND_SOC_DAPM_MIXER("HW_SRC_1_IN_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_hw_src_1_in_ch1_mix,
+ ARRAY_SIZE(mtk_hw_src_1_in_ch1_mix)),
+ SND_SOC_DAPM_MIXER("HW_SRC_1_IN_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_hw_src_1_in_ch2_mix,
+ ARRAY_SIZE(mtk_hw_src_1_in_ch2_mix)),
+ SND_SOC_DAPM_MIXER("HW_SRC_2_IN_CH1", SND_SOC_NOPM, 0, 0,
+ mtk_hw_src_2_in_ch1_mix,
+ ARRAY_SIZE(mtk_hw_src_2_in_ch1_mix)),
+ SND_SOC_DAPM_MIXER("HW_SRC_2_IN_CH2", SND_SOC_NOPM, 0, 0,
+ mtk_hw_src_2_in_ch2_mix,
+ ARRAY_SIZE(mtk_hw_src_2_in_ch2_mix)),
+
+ SND_SOC_DAPM_SUPPLY(HW_SRC_1_EN_W_NAME,
+ GENERAL_ASRC_EN_ON, GENERAL1_ASRC_EN_ON_SFT, 0,
+ mtk_hw_src_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD),
+
+ SND_SOC_DAPM_SUPPLY(HW_SRC_2_EN_W_NAME,
+ GENERAL_ASRC_EN_ON, GENERAL2_ASRC_EN_ON_SFT, 0,
+ mtk_hw_src_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD),
+
+ SND_SOC_DAPM_INPUT("HW SRC 1 Out Endpoint"),
+ SND_SOC_DAPM_INPUT("HW SRC 2 Out Endpoint"),
+ SND_SOC_DAPM_OUTPUT("HW SRC 1 In Endpoint"),
+ SND_SOC_DAPM_OUTPUT("HW SRC 2 In Endpoint"),
+};
+
+static int mtk_afe_src_en_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = source;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_src_priv *src_priv;
+
+ if (strcmp(w->name, HW_SRC_1_EN_W_NAME) == 0)
+ src_priv = afe_priv->dai_priv[MT8186_DAI_SRC_1];
+ else
+ src_priv = afe_priv->dai_priv[MT8186_DAI_SRC_2];
+
+ dev_dbg(afe->dev,
+ "%s(), source %s, sink %s, dl_rate %d, ul_rate %d\n",
+ __func__, source->name, sink->name,
+ src_priv->dl_rate, src_priv->ul_rate);
+
+ return (src_priv->dl_rate > 0 && src_priv->ul_rate > 0) ? 1 : 0;
+}
+
+static const struct snd_soc_dapm_route mtk_dai_src_routes[] = {
+ {"HW_SRC_1_IN_CH1", "DL1_CH1 Switch", "DL1"},
+ {"HW_SRC_1_IN_CH2", "DL1_CH2 Switch", "DL1"},
+ {"HW_SRC_2_IN_CH1", "DL1_CH1 Switch", "DL1"},
+ {"HW_SRC_2_IN_CH2", "DL1_CH2 Switch", "DL1"},
+ {"HW_SRC_1_IN_CH1", "DL2_CH1 Switch", "DL2"},
+ {"HW_SRC_1_IN_CH2", "DL2_CH2 Switch", "DL2"},
+ {"HW_SRC_2_IN_CH1", "DL2_CH1 Switch", "DL2"},
+ {"HW_SRC_2_IN_CH2", "DL2_CH2 Switch", "DL2"},
+ {"HW_SRC_1_IN_CH1", "DL3_CH1 Switch", "DL3"},
+ {"HW_SRC_1_IN_CH2", "DL3_CH2 Switch", "DL3"},
+ {"HW_SRC_2_IN_CH1", "DL3_CH1 Switch", "DL3"},
+ {"HW_SRC_2_IN_CH2", "DL3_CH2 Switch", "DL3"},
+ {"HW_SRC_1_IN_CH1", "DL6_CH1 Switch", "DL6"},
+ {"HW_SRC_1_IN_CH2", "DL6_CH2 Switch", "DL6"},
+ {"HW_SRC_2_IN_CH1", "DL6_CH1 Switch", "DL6"},
+ {"HW_SRC_2_IN_CH2", "DL6_CH2 Switch", "DL6"},
+ {"HW_SRC_1_IN_CH1", "DL5_CH1 Switch", "DL5"},
+ {"HW_SRC_1_IN_CH2", "DL5_CH2 Switch", "DL5"},
+ {"HW_SRC_2_IN_CH1", "DL5_CH1 Switch", "DL5"},
+ {"HW_SRC_2_IN_CH2", "DL5_CH2 Switch", "DL5"},
+ {"HW_SRC_1_IN_CH1", "DL4_CH1 Switch", "DL4"},
+ {"HW_SRC_1_IN_CH2", "DL4_CH2 Switch", "DL4"},
+ {"HW_SRC_2_IN_CH1", "DL4_CH1 Switch", "DL4"},
+ {"HW_SRC_2_IN_CH2", "DL4_CH2 Switch", "DL4"},
+
+ {"HW_SRC_1_In", NULL, "HW_SRC_1_IN_CH1"},
+ {"HW_SRC_1_In", NULL, "HW_SRC_1_IN_CH2"},
+
+ {"HW_SRC_2_In", NULL, "HW_SRC_2_IN_CH1"},
+ {"HW_SRC_2_In", NULL, "HW_SRC_2_IN_CH2"},
+
+ {"HW_SRC_1_In", NULL, HW_SRC_1_EN_W_NAME, mtk_afe_src_en_connect},
+ {"HW_SRC_1_Out", NULL, HW_SRC_1_EN_W_NAME, mtk_afe_src_en_connect},
+ {"HW_SRC_2_In", NULL, HW_SRC_2_EN_W_NAME, mtk_afe_src_en_connect},
+ {"HW_SRC_2_Out", NULL, HW_SRC_2_EN_W_NAME, mtk_afe_src_en_connect},
+
+ {"HW SRC 1 In Endpoint", NULL, "HW_SRC_1_In"},
+ {"HW SRC 2 In Endpoint", NULL, "HW_SRC_2_In"},
+ {"HW_SRC_1_Out", NULL, "HW SRC 1 Out Endpoint"},
+ {"HW_SRC_2_Out", NULL, "HW SRC 2 Out Endpoint"},
+};
+
+/* dai ops */
+static int mtk_dai_src_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int id = dai->id;
+ struct mtk_afe_src_priv *src_priv = afe_priv->dai_priv[id];
+ unsigned int sft, mask;
+ unsigned int rate = params_rate(params);
+ unsigned int rate_reg = mt8186_rate_transform(afe->dev, rate, id);
+
+ dev_dbg(afe->dev, "%s(), id %d, stream %d, rate %d\n",
+ __func__, id, substream->stream, rate);
+
+ /* rate */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ src_priv->dl_rate = rate;
+ if (id == MT8186_DAI_SRC_1) {
+ sft = GENERAL1_ASRCIN_MODE_SFT;
+ mask = GENERAL1_ASRCIN_MODE_MASK;
+ } else {
+ sft = GENERAL2_ASRCIN_MODE_SFT;
+ mask = GENERAL2_ASRCIN_MODE_MASK;
+ }
+ } else {
+ src_priv->ul_rate = rate;
+ if (id == MT8186_DAI_SRC_1) {
+ sft = GENERAL1_ASRCOUT_MODE_SFT;
+ mask = GENERAL1_ASRCOUT_MODE_MASK;
+ } else {
+ sft = GENERAL2_ASRCOUT_MODE_SFT;
+ mask = GENERAL2_ASRCOUT_MODE_MASK;
+ }
+ }
+
+ regmap_update_bits(afe->regmap, GENERAL_ASRC_MODE, mask << sft, rate_reg << sft);
+
+ return 0;
+}
+
+static int mtk_dai_src_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int id = dai->id;
+ struct mtk_afe_src_priv *src_priv = afe_priv->dai_priv[id];
+
+ dev_dbg(afe->dev, "%s(), id %d, stream %d\n",
+ __func__, id, substream->stream);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ src_priv->dl_rate = 0;
+ else
+ src_priv->ul_rate = 0;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_src_ops = {
+ .hw_params = mtk_dai_src_hw_params,
+ .hw_free = mtk_dai_src_hw_free,
+};
+
+/* dai driver */
+#define MTK_SRC_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_SRC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_src_driver[] = {
+ {
+ .name = "HW_SRC_1",
+ .id = MT8186_DAI_SRC_1,
+ .playback = {
+ .stream_name = "HW_SRC_1_In",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_SRC_RATES,
+ .formats = MTK_SRC_FORMATS,
+ },
+ .capture = {
+ .stream_name = "HW_SRC_1_Out",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_SRC_RATES,
+ .formats = MTK_SRC_FORMATS,
+ },
+ .ops = &mtk_dai_src_ops,
+ },
+ {
+ .name = "HW_SRC_2",
+ .id = MT8186_DAI_SRC_2,
+ .playback = {
+ .stream_name = "HW_SRC_2_In",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_SRC_RATES,
+ .formats = MTK_SRC_FORMATS,
+ },
+ .capture = {
+ .stream_name = "HW_SRC_2_Out",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MTK_SRC_RATES,
+ .formats = MTK_SRC_FORMATS,
+ },
+ .ops = &mtk_dai_src_ops,
+ },
+};
+
+int mt8186_dai_src_register(struct mtk_base_afe *afe)
+{
+ struct mtk_base_afe_dai *dai;
+ int ret;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_src_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_src_driver);
+
+ dai->dapm_widgets = mtk_dai_src_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_src_widgets);
+ dai->dapm_routes = mtk_dai_src_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_src_routes);
+
+ /* set dai priv */
+ ret = mt8186_dai_set_priv(afe, MT8186_DAI_SRC_1,
+ sizeof(struct mtk_afe_src_priv), NULL);
+ if (ret)
+ return ret;
+
+ ret = mt8186_dai_set_priv(afe, MT8186_DAI_SRC_2,
+ sizeof(struct mtk_afe_src_priv), NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-tdm.c b/sound/soc/mediatek/mt8186/mt8186-dai-tdm.c
new file mode 100644
index 000000000000..4148dceb3a4c
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-dai-tdm.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio DAI TDM Control
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+
+#include "mt8186-afe-clk.h"
+#include "mt8186-afe-common.h"
+#include "mt8186-afe-gpio.h"
+#include "mt8186-interconnection.h"
+
+#define TDM_HD_EN_W_NAME "TDM_HD_EN"
+#define TDM_MCLK_EN_W_NAME "TDM_MCLK_EN"
+#define MTK_AFE_TDM_KCONTROL_NAME "TDM_HD_Mux"
+
+struct mtk_afe_tdm_priv {
+ unsigned int id;
+ unsigned int rate; /* for determine which apll to use */
+ unsigned int bck_invert;
+ unsigned int lck_invert;
+ unsigned int lrck_width;
+ unsigned int mclk_id;
+ unsigned int mclk_multiple; /* according to sample rate */
+ unsigned int mclk_rate;
+ unsigned int mclk_apll;
+ unsigned int tdm_mode;
+ unsigned int data_mode;
+ unsigned int slave_mode;
+ unsigned int low_jitter_en;
+};
+
+enum {
+ TDM_IN_I2S = 0,
+ TDM_IN_LJ = 1,
+ TDM_IN_RJ = 2,
+ TDM_IN_DSP_A = 4,
+ TDM_IN_DSP_B = 5,
+};
+
+enum {
+ TDM_DATA_ONE_PIN = 0,
+ TDM_DATA_MULTI_PIN,
+};
+
+enum {
+ TDM_BCK_NON_INV = 0,
+ TDM_BCK_INV = 1,
+};
+
+enum {
+ TDM_LCK_NON_INV = 0,
+ TDM_LCK_INV = 1,
+};
+
+static unsigned int get_tdm_lrck_width(snd_pcm_format_t format,
+ unsigned int mode)
+{
+ if (mode == TDM_IN_DSP_A || mode == TDM_IN_DSP_B)
+ return 0;
+
+ return snd_pcm_format_physical_width(format) - 1;
+}
+
+static unsigned int get_tdm_ch_fixup(unsigned int channels)
+{
+ if (channels > 4)
+ return 8;
+ else if (channels > 2)
+ return 4;
+
+ return 2;
+}
+
+static unsigned int get_tdm_ch_per_sdata(unsigned int mode,
+ unsigned int channels)
+{
+ if (mode == TDM_IN_DSP_A || mode == TDM_IN_DSP_B)
+ return get_tdm_ch_fixup(channels);
+
+ return 2;
+}
+
+enum {
+ SUPPLY_SEQ_APLL,
+ SUPPLY_SEQ_TDM_MCK_EN,
+ SUPPLY_SEQ_TDM_HD_EN,
+ SUPPLY_SEQ_TDM_EN,
+};
+
+static int get_tdm_id_by_name(const char *name)
+{
+ return MT8186_DAI_TDM_IN;
+}
+
+static int mtk_tdm_en_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dai_id = get_tdm_id_by_name(w->name);
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id];
+
+ dev_dbg(cmpnt->dev, "%s(), name %s, event 0x%x\n",
+ __func__, w->name, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ mt8186_afe_gpio_request(afe->dev, true, tdm_priv->id, 0);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ mt8186_afe_gpio_request(afe->dev, false, tdm_priv->id, 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mtk_tdm_mck_en_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dai_id = get_tdm_id_by_name(w->name);
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id];
+
+ dev_dbg(cmpnt->dev, "%s(), name %s, event 0x%x, dai_id %d\n",
+ __func__, w->name, event, dai_id);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ mt8186_mck_enable(afe, tdm_priv->mclk_id, tdm_priv->mclk_rate);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ tdm_priv->mclk_rate = 0;
+ mt8186_mck_disable(afe, tdm_priv->mclk_id);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* dai component */
+/* tdm virtual mux to output widget */
+static const char * const tdm_mux_map[] = {
+ "Normal", "Dummy_Widget",
+};
+
+static int tdm_mux_map_value[] = {
+ 0, 1,
+};
+
+static SOC_VALUE_ENUM_SINGLE_AUTODISABLE_DECL(tdm_mux_map_enum,
+ SND_SOC_NOPM,
+ 0,
+ 1,
+ tdm_mux_map,
+ tdm_mux_map_value);
+
+static const struct snd_kcontrol_new tdm_in_mux_control =
+ SOC_DAPM_ENUM("TDM In Select", tdm_mux_map_enum);
+
+static const struct snd_soc_dapm_widget mtk_dai_tdm_widgets[] = {
+ SND_SOC_DAPM_CLOCK_SUPPLY("aud_tdm_clk"),
+
+ SND_SOC_DAPM_SUPPLY_S("TDM_EN", SUPPLY_SEQ_TDM_EN,
+ ETDM_IN1_CON0, ETDM_IN1_CON0_REG_ETDM_IN_EN_SFT,
+ 0, mtk_tdm_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ /* tdm hd en */
+ SND_SOC_DAPM_SUPPLY_S(TDM_HD_EN_W_NAME, SUPPLY_SEQ_TDM_HD_EN,
+ ETDM_IN1_CON2, ETDM_IN1_CON2_REG_CLOCK_SOURCE_SEL_SFT,
+ 0, NULL,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY_S(TDM_MCLK_EN_W_NAME, SUPPLY_SEQ_TDM_MCK_EN,
+ SND_SOC_NOPM, 0, 0,
+ mtk_tdm_mck_en_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_INPUT("TDM_DUMMY_IN"),
+
+ SND_SOC_DAPM_MUX("TDM_In_Mux",
+ SND_SOC_NOPM, 0, 0, &tdm_in_mux_control),
+};
+
+static int mtk_afe_tdm_mclk_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dai_id = get_tdm_id_by_name(w->name);
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id];
+
+ return (tdm_priv->mclk_rate > 0) ? 1 : 0;
+}
+
+static int mtk_afe_tdm_mclk_apll_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dai_id = get_tdm_id_by_name(w->name);
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id];
+ int cur_apll;
+
+ /* which apll */
+ cur_apll = mt8186_get_apll_by_name(afe, source->name);
+
+ return (tdm_priv->mclk_apll == cur_apll) ? 1 : 0;
+}
+
+static int mtk_afe_tdm_hd_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dai_id = get_tdm_id_by_name(w->name);
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id];
+
+ return tdm_priv->low_jitter_en;
+}
+
+static int mtk_afe_tdm_apll_connect(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget *w = sink;
+ struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dai_id = get_tdm_id_by_name(w->name);
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id];
+ int cur_apll;
+ int tdm_need_apll;
+
+ /* which apll */
+ cur_apll = mt8186_get_apll_by_name(afe, source->name);
+
+ /* choose APLL from tdm rate */
+ tdm_need_apll = mt8186_get_apll_by_rate(afe, tdm_priv->rate);
+
+ return (tdm_need_apll == cur_apll) ? 1 : 0;
+}
+
+/* low jitter control */
+static const char * const mt8186_tdm_hd_str[] = {
+ "Normal", "Low_Jitter"
+};
+
+static const struct soc_enum mt8186_tdm_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8186_tdm_hd_str),
+ mt8186_tdm_hd_str),
+};
+
+static int mt8186_tdm_hd_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dai_id = get_tdm_id_by_name(kcontrol->id.name);
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id];
+
+ ucontrol->value.integer.value[0] = tdm_priv->low_jitter_en;
+
+ return 0;
+}
+
+static int mt8186_tdm_hd_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int dai_id = get_tdm_id_by_name(kcontrol->id.name);
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id];
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int hd_en;
+
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
+ hd_en = ucontrol->value.integer.value[0];
+
+ dev_dbg(afe->dev, "%s(), kcontrol name %s, hd_en %d\n",
+ __func__, kcontrol->id.name, hd_en);
+
+ if (tdm_priv->low_jitter_en == hd_en)
+ return 0;
+
+ tdm_priv->low_jitter_en = hd_en;
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new mtk_dai_tdm_controls[] = {
+ SOC_ENUM_EXT(MTK_AFE_TDM_KCONTROL_NAME, mt8186_tdm_enum[0],
+ mt8186_tdm_hd_get, mt8186_tdm_hd_set),
+};
+
+static const struct snd_soc_dapm_route mtk_dai_tdm_routes[] = {
+ {"TDM IN", NULL, "aud_tdm_clk"},
+ {"TDM IN", NULL, "TDM_EN"},
+ {"TDM IN", NULL, TDM_HD_EN_W_NAME, mtk_afe_tdm_hd_connect},
+ {TDM_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_tdm_apll_connect},
+ {TDM_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_tdm_apll_connect},
+
+ {"TDM IN", NULL, TDM_MCLK_EN_W_NAME, mtk_afe_tdm_mclk_connect},
+ {TDM_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_tdm_mclk_apll_connect},
+ {TDM_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_tdm_mclk_apll_connect},
+
+ /* allow tdm on without codec on */
+ {"TDM IN", NULL, "TDM_In_Mux"},
+ {"TDM_In_Mux", "Dummy_Widget", "TDM_DUMMY_IN"},
+};
+
+/* dai ops */
+static int mtk_dai_tdm_cal_mclk(struct mtk_base_afe *afe,
+ struct mtk_afe_tdm_priv *tdm_priv,
+ int freq)
+{
+ int apll;
+ int apll_rate;
+
+ apll = mt8186_get_apll_by_rate(afe, freq);
+ apll_rate = mt8186_get_apll_rate(afe, apll);
+
+ if (!freq || freq > apll_rate) {
+ dev_err(afe->dev,
+ "%s(), freq(%d Hz) invalid\n", __func__, freq);
+ return -EINVAL;
+ }
+
+ if (apll_rate % freq != 0) {
+ dev_err(afe->dev,
+ "%s(), APLL cannot generate %d Hz", __func__, freq);
+ return -EINVAL;
+ }
+
+ tdm_priv->mclk_rate = freq;
+ tdm_priv->mclk_apll = apll;
+
+ return 0;
+}
+
+static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ int tdm_id = dai->id;
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[tdm_id];
+ unsigned int tdm_mode = tdm_priv->tdm_mode;
+ unsigned int data_mode = tdm_priv->data_mode;
+ unsigned int rate = params_rate(params);
+ unsigned int channels = params_channels(params);
+ snd_pcm_format_t format = params_format(params);
+ unsigned int bit_width =
+ snd_pcm_format_physical_width(format);
+ unsigned int tdm_channels = (data_mode == TDM_DATA_ONE_PIN) ?
+ get_tdm_ch_per_sdata(tdm_mode, channels) : 2;
+ unsigned int lrck_width =
+ get_tdm_lrck_width(format, tdm_mode);
+ unsigned int tdm_con = 0;
+ bool slave_mode = tdm_priv->slave_mode;
+ bool lrck_inv = tdm_priv->lck_invert;
+ bool bck_inv = tdm_priv->bck_invert;
+ unsigned int tran_rate;
+ unsigned int tran_relatch_rate;
+
+ tdm_priv->rate = rate;
+ tran_rate = mt8186_rate_transform(afe->dev, rate, dai->id);
+ tran_relatch_rate = mt8186_tdm_relatch_rate_transform(afe->dev, rate);
+
+ /* calculate mclk_rate, if not set explicitly */
+ if (!tdm_priv->mclk_rate) {
+ tdm_priv->mclk_rate = rate * tdm_priv->mclk_multiple;
+ mtk_dai_tdm_cal_mclk(afe, tdm_priv, tdm_priv->mclk_rate);
+ }
+
+ /* ETDM_IN1_CON0 */
+ tdm_con |= slave_mode << ETDM_IN1_CON0_REG_SLAVE_MODE_SFT;
+ tdm_con |= tdm_mode << ETDM_IN1_CON0_REG_FMT_SFT;
+ tdm_con |= (bit_width - 1) << ETDM_IN1_CON0_REG_BIT_LENGTH_SFT;
+ tdm_con |= (bit_width - 1) << ETDM_IN1_CON0_REG_WORD_LENGTH_SFT;
+ tdm_con |= (tdm_channels - 1) << ETDM_IN1_CON0_REG_CH_NUM_SFT;
+ /* need to disable sync mode otherwise this may cause latch data error */
+ tdm_con |= 0 << ETDM_IN1_CON0_REG_SYNC_MODE_SFT;
+ /* relatch 1x en clock fix to h26m */
+ tdm_con |= 0 << ETDM_IN1_CON0_REG_RELATCH_1X_EN_SEL_DOMAIN_SFT;
+ regmap_update_bits(afe->regmap, ETDM_IN1_CON0, ETDM_IN_CON0_CTRL_MASK, tdm_con);
+
+ /* ETDM_IN1_CON1 */
+ tdm_con = 0;
+ tdm_con |= 0 << ETDM_IN1_CON1_REG_LRCK_AUTO_MODE_SFT;
+ tdm_con |= 1 << ETDM_IN1_CON1_PINMUX_MCLK_CTRL_OE_SFT;
+ tdm_con |= (lrck_width - 1) << ETDM_IN1_CON1_REG_LRCK_WIDTH_SFT;
+ regmap_update_bits(afe->regmap, ETDM_IN1_CON1, ETDM_IN_CON1_CTRL_MASK, tdm_con);
+
+ /* ETDM_IN1_CON3 */
+ tdm_con = 0;
+ tdm_con = ETDM_IN_CON3_FS(tran_rate);
+ regmap_update_bits(afe->regmap, ETDM_IN1_CON3, ETDM_IN_CON3_CTRL_MASK, tdm_con);
+
+ /* ETDM_IN1_CON4 */
+ tdm_con = 0;
+ tdm_con = ETDM_IN_CON4_FS(tran_relatch_rate);
+ if (slave_mode) {
+ if (lrck_inv)
+ tdm_con |= ETDM_IN_CON4_CON0_SLAVE_LRCK_INV;
+ if (bck_inv)
+ tdm_con |= ETDM_IN_CON4_CON0_SLAVE_BCK_INV;
+ } else {
+ if (lrck_inv)
+ tdm_con |= ETDM_IN_CON4_CON0_MASTER_LRCK_INV;
+ if (bck_inv)
+ tdm_con |= ETDM_IN_CON4_CON0_MASTER_BCK_INV;
+ }
+ regmap_update_bits(afe->regmap, ETDM_IN1_CON4, ETDM_IN_CON4_CTRL_MASK, tdm_con);
+
+ /* ETDM_IN1_CON2 */
+ tdm_con = 0;
+ if (data_mode == TDM_DATA_MULTI_PIN) {
+ tdm_con |= ETDM_IN_CON2_MULTI_IP_2CH_MODE;
+ tdm_con |= ETDM_IN_CON2_MULTI_IP_CH(channels);
+ }
+ regmap_update_bits(afe->regmap, ETDM_IN1_CON2, ETDM_IN_CON2_CTRL_MASK, tdm_con);
+
+ /* ETDM_IN1_CON8 */
+ tdm_con = 0;
+ if (slave_mode) {
+ tdm_con |= 1 << ETDM_IN1_CON8_REG_ETDM_USE_AFIFO_SFT;
+ tdm_con |= 0 << ETDM_IN1_CON8_REG_AFIFO_CLOCK_DOMAIN_SEL_SFT;
+ tdm_con |= ETDM_IN_CON8_FS(tran_relatch_rate);
+ } else {
+ tdm_con |= 0 << ETDM_IN1_CON8_REG_ETDM_USE_AFIFO_SFT;
+ }
+ regmap_update_bits(afe->regmap, ETDM_IN1_CON8, ETDM_IN_CON8_CTRL_MASK, tdm_con);
+
+ return 0;
+}
+
+static int mtk_dai_tdm_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai->id];
+
+ if (dir != SND_SOC_CLOCK_IN) {
+ dev_err(afe->dev, "%s(), dir != SND_SOC_CLOCK_OUT", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(afe->dev, "%s(), freq %d\n", __func__, freq);
+
+ return mtk_dai_tdm_cal_mclk(afe, tdm_priv, freq);
+}
+
+static int mtk_dai_tdm_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai->id];
+
+ /* DAI mode*/
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ tdm_priv->tdm_mode = TDM_IN_I2S;
+ tdm_priv->data_mode = TDM_DATA_MULTI_PIN;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ tdm_priv->tdm_mode = TDM_IN_LJ;
+ tdm_priv->data_mode = TDM_DATA_MULTI_PIN;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ tdm_priv->tdm_mode = TDM_IN_RJ;
+ tdm_priv->data_mode = TDM_DATA_MULTI_PIN;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ tdm_priv->tdm_mode = TDM_IN_DSP_A;
+ tdm_priv->data_mode = TDM_DATA_ONE_PIN;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ tdm_priv->tdm_mode = TDM_IN_DSP_B;
+ tdm_priv->data_mode = TDM_DATA_ONE_PIN;
+ break;
+ default:
+ dev_err(afe->dev, "%s(), invalid DAIFMT_FORMAT_MASK", __func__);
+ return -EINVAL;
+ }
+
+ /* DAI clock inversion*/
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ tdm_priv->bck_invert = TDM_BCK_NON_INV;
+ tdm_priv->lck_invert = TDM_LCK_NON_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ tdm_priv->bck_invert = TDM_BCK_NON_INV;
+ tdm_priv->lck_invert = TDM_LCK_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ tdm_priv->bck_invert = TDM_BCK_INV;
+ tdm_priv->lck_invert = TDM_LCK_NON_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ tdm_priv->bck_invert = TDM_BCK_INV;
+ tdm_priv->lck_invert = TDM_LCK_INV;
+ break;
+ default:
+ dev_err(afe->dev, "%s(), invalid DAIFMT_INV_MASK", __func__);
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
+ tdm_priv->slave_mode = false;
+ break;
+ case SND_SOC_DAIFMT_BC_FC:
+ tdm_priv->slave_mode = true;
+ break;
+ default:
+ dev_err(afe->dev, "%s(), invalid DAIFMT_CLOCK_PROVIDER_MASK",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mtk_dai_tdm_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask,
+ unsigned int rx_mask,
+ int slots,
+ int slot_width)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai->id];
+
+ dev_dbg(dai->dev, "%s %d slot_width %d\n", __func__, dai->id, slot_width);
+
+ tdm_priv->lrck_width = slot_width;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops mtk_dai_tdm_ops = {
+ .hw_params = mtk_dai_tdm_hw_params,
+ .set_sysclk = mtk_dai_tdm_set_sysclk,
+ .set_fmt = mtk_dai_tdm_set_fmt,
+ .set_tdm_slot = mtk_dai_tdm_set_tdm_slot,
+};
+
+/* dai driver */
+#define MTK_TDM_RATES (SNDRV_PCM_RATE_8000_48000 |\
+ SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define MTK_TDM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver mtk_dai_tdm_driver[] = {
+ {
+ .name = "TDM IN",
+ .id = MT8186_DAI_TDM_IN,
+ .capture = {
+ .stream_name = "TDM IN",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = MTK_TDM_RATES,
+ .formats = MTK_TDM_FORMATS,
+ },
+ .ops = &mtk_dai_tdm_ops,
+ },
+};
+
+static struct mtk_afe_tdm_priv *init_tdm_priv_data(struct mtk_base_afe *afe)
+{
+ struct mtk_afe_tdm_priv *tdm_priv;
+
+ tdm_priv = devm_kzalloc(afe->dev, sizeof(struct mtk_afe_tdm_priv),
+ GFP_KERNEL);
+ if (!tdm_priv)
+ return NULL;
+
+ tdm_priv->mclk_multiple = 512;
+ tdm_priv->mclk_id = MT8186_TDM_MCK;
+ tdm_priv->id = MT8186_DAI_TDM_IN;
+
+ return tdm_priv;
+}
+
+int mt8186_dai_tdm_register(struct mtk_base_afe *afe)
+{
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct mtk_afe_tdm_priv *tdm_priv;
+ struct mtk_base_afe_dai *dai;
+
+ dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ list_add(&dai->list, &afe->sub_dais);
+
+ dai->dai_drivers = mtk_dai_tdm_driver;
+ dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_tdm_driver);
+
+ dai->controls = mtk_dai_tdm_controls;
+ dai->num_controls = ARRAY_SIZE(mtk_dai_tdm_controls);
+ dai->dapm_widgets = mtk_dai_tdm_widgets;
+ dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_tdm_widgets);
+ dai->dapm_routes = mtk_dai_tdm_routes;
+ dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_tdm_routes);
+
+ tdm_priv = init_tdm_priv_data(afe);
+ if (!tdm_priv)
+ return -ENOMEM;
+
+ afe_priv->dai_priv[MT8186_DAI_TDM_IN] = tdm_priv;
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-interconnection.h b/sound/soc/mediatek/mt8186/mt8186-interconnection.h
new file mode 100644
index 000000000000..5b188d93ebd3
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-interconnection.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Mediatek MT8186 audio driver interconnection definition
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+ */
+
+#ifndef _MT8186_INTERCONNECTION_H_
+#define _MT8186_INTERCONNECTION_H_
+
+/* in port define */
+#define I_I2S0_CH1 0
+#define I_I2S0_CH2 1
+#define I_ADDA_UL_CH1 3
+#define I_ADDA_UL_CH2 4
+#define I_DL1_CH1 5
+#define I_DL1_CH2 6
+#define I_DL2_CH1 7
+#define I_DL2_CH2 8
+#define I_PCM_1_CAP_CH1 9
+#define I_GAIN1_OUT_CH1 10
+#define I_GAIN1_OUT_CH2 11
+#define I_GAIN2_OUT_CH1 12
+#define I_GAIN2_OUT_CH2 13
+#define I_PCM_2_CAP_CH1 14
+#define I_ADDA_UL_CH3 17
+#define I_ADDA_UL_CH4 18
+#define I_DL12_CH1 19
+#define I_DL12_CH2 20
+#define I_DL12_CH3 5
+#define I_DL12_CH4 6
+#define I_PCM_2_CAP_CH2 21
+#define I_PCM_1_CAP_CH2 22
+#define I_DL3_CH1 23
+#define I_DL3_CH2 24
+#define I_I2S2_CH1 25
+#define I_I2S2_CH2 26
+#define I_I2S2_CH3 27
+#define I_I2S2_CH4 28
+
+/* in port define >= 32 */
+#define I_32_OFFSET 32
+#define I_CONNSYS_I2S_CH1 (34 - I_32_OFFSET)
+#define I_CONNSYS_I2S_CH2 (35 - I_32_OFFSET)
+#define I_SRC_1_OUT_CH1 (36 - I_32_OFFSET)
+#define I_SRC_1_OUT_CH2 (37 - I_32_OFFSET)
+#define I_SRC_2_OUT_CH1 (38 - I_32_OFFSET)
+#define I_SRC_2_OUT_CH2 (39 - I_32_OFFSET)
+#define I_DL4_CH1 (40 - I_32_OFFSET)
+#define I_DL4_CH2 (41 - I_32_OFFSET)
+#define I_DL5_CH1 (42 - I_32_OFFSET)
+#define I_DL5_CH2 (43 - I_32_OFFSET)
+#define I_DL6_CH1 (44 - I_32_OFFSET)
+#define I_DL6_CH2 (45 - I_32_OFFSET)
+#define I_DL7_CH1 (46 - I_32_OFFSET)
+#define I_DL7_CH2 (47 - I_32_OFFSET)
+#define I_DL8_CH1 (48 - I_32_OFFSET)
+#define I_DL8_CH2 (49 - I_32_OFFSET)
+#define I_TDM_IN_CH1 (56 - I_32_OFFSET)
+#define I_TDM_IN_CH2 (57 - I_32_OFFSET)
+#define I_TDM_IN_CH3 (58 - I_32_OFFSET)
+#define I_TDM_IN_CH4 (59 - I_32_OFFSET)
+#define I_TDM_IN_CH5 (60 - I_32_OFFSET)
+#define I_TDM_IN_CH6 (61 - I_32_OFFSET)
+#define I_TDM_IN_CH7 (62 - I_32_OFFSET)
+#define I_TDM_IN_CH8 (63 - I_32_OFFSET)
+
+#endif
diff --git a/sound/soc/mediatek/mt8186/mt8186-misc-control.c b/sound/soc/mediatek/mt8186/mt8186-misc-control.c
new file mode 100644
index 000000000000..2317de8c44c0
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-misc-control.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MediaTek ALSA SoC Audio Misc Control
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+
+#include "../common/mtk-afe-fe-dai.h"
+#include "../common/mtk-afe-platform-driver.h"
+#include "mt8186-afe-common.h"
+
+static const char * const mt8186_sgen_mode_str[] = {
+ "I0I1", "I2", "I3I4", "I5I6",
+ "I7I8", "I9I22", "I10I11", "I12I13",
+ "I14I21", "I15I16", "I17I18", "I19I20",
+ "I23I24", "I25I26", "I27I28", "I33",
+ "I34I35", "I36I37", "I38I39", "I40I41",
+ "I42I43", "I44I45", "I46I47", "I48I49",
+ "I56I57", "I58I59", "I60I61", "I62I63",
+ "O0O1", "O2", "O3O4", "O5O6",
+ "O7O8", "O9O10", "O11", "O12",
+ "O13O14", "O15O16", "O17O18", "O19O20",
+ "O21O22", "O23O24", "O25", "O28O29",
+ "O34", "O35", "O32O33", "O36O37",
+ "O38O39", "O30O31", "O40O41", "O42O43",
+ "O44O45", "O46O47", "O48O49", "O50O51",
+ "O58O59", "O60O61", "O62O63", "O64O65",
+ "O66O67", "O68O69", "O26O27", "OFF",
+};
+
+static const int mt8186_sgen_mode_idx[] = {
+ 0, 2, 4, 6,
+ 8, 22, 10, 12,
+ 14, -1, 18, 20,
+ 24, 26, 28, 33,
+ 34, 36, 38, 40,
+ 42, 44, 46, 48,
+ 56, 58, 60, 62,
+ 128, 130, 132, 134,
+ 135, 138, 139, 140,
+ 142, 144, 166, 148,
+ 150, 152, 153, 156,
+ 162, 163, 160, 164,
+ 166, -1, 168, 170,
+ 172, 174, 176, 178,
+ 186, 188, 190, 192,
+ 194, 196, -1, -1,
+};
+
+static const char * const mt8186_sgen_rate_str[] = {
+ "8K", "11K", "12K", "16K",
+ "22K", "24K", "32K", "44K",
+ "48K", "88k", "96k", "176k",
+ "192k"
+};
+
+static const int mt8186_sgen_rate_idx[] = {
+ 0, 1, 2, 4,
+ 5, 6, 8, 9,
+ 10, 11, 12, 13,
+ 14
+};
+
+/* this order must match reg bit amp_div_ch1/2 */
+static const char * const mt8186_sgen_amp_str[] = {
+ "1/128", "1/64", "1/32", "1/16", "1/8", "1/4", "1/2", "1" };
+
+static int mt8186_sgen_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ ucontrol->value.integer.value[0] = afe_priv->sgen_mode;
+
+ return 0;
+}
+
+static int mt8186_sgen_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int mode;
+ int mode_idx;
+
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
+ mode = ucontrol->value.integer.value[0];
+ mode_idx = mt8186_sgen_mode_idx[mode];
+
+ dev_dbg(afe->dev, "%s(), mode %d, mode_idx %d\n",
+ __func__, mode, mode_idx);
+
+ if (mode == afe_priv->sgen_mode)
+ return 0;
+
+ if (mode_idx >= 0) {
+ regmap_update_bits(afe->regmap, AFE_SINEGEN_CON2,
+ INNER_LOOP_BACK_MODE_MASK_SFT,
+ mode_idx << INNER_LOOP_BACK_MODE_SFT);
+ regmap_update_bits(afe->regmap, AFE_SINEGEN_CON0,
+ DAC_EN_MASK_SFT, BIT(DAC_EN_SFT));
+ } else {
+ /* disable sgen */
+ regmap_update_bits(afe->regmap, AFE_SINEGEN_CON0,
+ DAC_EN_MASK_SFT, 0);
+ regmap_update_bits(afe->regmap, AFE_SINEGEN_CON2,
+ INNER_LOOP_BACK_MODE_MASK_SFT,
+ 0x3f << INNER_LOOP_BACK_MODE_SFT);
+ }
+
+ afe_priv->sgen_mode = mode;
+
+ return 1;
+}
+
+static int mt8186_sgen_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ ucontrol->value.integer.value[0] = afe_priv->sgen_rate;
+
+ return 0;
+}
+
+static int mt8186_sgen_rate_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int rate;
+
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
+ rate = ucontrol->value.integer.value[0];
+
+ dev_dbg(afe->dev, "%s(), rate %d\n", __func__, rate);
+
+ if (rate == afe_priv->sgen_rate)
+ return 0;
+
+ regmap_update_bits(afe->regmap, AFE_SINEGEN_CON0,
+ SINE_MODE_CH1_MASK_SFT,
+ mt8186_sgen_rate_idx[rate] << SINE_MODE_CH1_SFT);
+
+ regmap_update_bits(afe->regmap, AFE_SINEGEN_CON0,
+ SINE_MODE_CH2_MASK_SFT,
+ mt8186_sgen_rate_idx[rate] << SINE_MODE_CH2_SFT);
+
+ afe_priv->sgen_rate = rate;
+
+ return 1;
+}
+
+static int mt8186_sgen_amplitude_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+
+ ucontrol->value.integer.value[0] = afe_priv->sgen_amplitude;
+ return 0;
+}
+
+static int mt8186_sgen_amplitude_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int amplitude;
+
+ if (ucontrol->value.enumerated.item[0] >= e->items)
+ return -EINVAL;
+
+ amplitude = ucontrol->value.integer.value[0];
+ if (amplitude > AMP_DIV_CH1_MASK) {
+ dev_err(afe->dev, "%s(), amplitude %d invalid\n",
+ __func__, amplitude);
+ return -EINVAL;
+ }
+
+ dev_dbg(afe->dev, "%s(), amplitude %d\n", __func__, amplitude);
+
+ if (amplitude == afe_priv->sgen_amplitude)
+ return 0;
+
+ regmap_update_bits(afe->regmap, AFE_SINEGEN_CON0,
+ AMP_DIV_CH1_MASK_SFT,
+ amplitude << AMP_DIV_CH1_SFT);
+ regmap_update_bits(afe->regmap, AFE_SINEGEN_CON0,
+ AMP_DIV_CH2_MASK_SFT,
+ amplitude << AMP_DIV_CH2_SFT);
+
+ afe_priv->sgen_amplitude = amplitude;
+
+ return 1;
+}
+
+static const struct soc_enum mt8186_afe_sgen_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8186_sgen_mode_str),
+ mt8186_sgen_mode_str),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8186_sgen_rate_str),
+ mt8186_sgen_rate_str),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8186_sgen_amp_str),
+ mt8186_sgen_amp_str),
+};
+
+static const struct snd_kcontrol_new mt8186_afe_sgen_controls[] = {
+ SOC_ENUM_EXT("Audio_SineGen_Switch", mt8186_afe_sgen_enum[0],
+ mt8186_sgen_get, mt8186_sgen_set),
+ SOC_ENUM_EXT("Audio_SineGen_SampleRate", mt8186_afe_sgen_enum[1],
+ mt8186_sgen_rate_get, mt8186_sgen_rate_set),
+ SOC_ENUM_EXT("Audio_SineGen_Amplitude", mt8186_afe_sgen_enum[2],
+ mt8186_sgen_amplitude_get, mt8186_sgen_amplitude_set),
+ SOC_SINGLE("Audio_SineGen_Mute_Ch1", AFE_SINEGEN_CON0,
+ MUTE_SW_CH1_MASK_SFT, MUTE_SW_CH1_MASK, 0),
+ SOC_SINGLE("Audio_SineGen_Mute_Ch2", AFE_SINEGEN_CON0,
+ MUTE_SW_CH2_MASK_SFT, MUTE_SW_CH2_MASK, 0),
+ SOC_SINGLE("Audio_SineGen_Freq_Div_Ch1", AFE_SINEGEN_CON0,
+ FREQ_DIV_CH1_SFT, FREQ_DIV_CH1_MASK, 0),
+ SOC_SINGLE("Audio_SineGen_Freq_Div_Ch2", AFE_SINEGEN_CON0,
+ FREQ_DIV_CH2_SFT, FREQ_DIV_CH2_MASK, 0),
+};
+
+int mt8186_add_misc_control(struct snd_soc_component *component)
+{
+ snd_soc_add_component_controls(component,
+ mt8186_afe_sgen_controls,
+ ARRAY_SIZE(mt8186_afe_sgen_controls));
+
+ return 0;
+}
diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-common.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-common.c
new file mode 100644
index 000000000000..4e66603d4cdb
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-common.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8186-mt6366-common.c
+// -- MT8186 MT6366 ALSA common driver
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+//
+#include <sound/soc.h>
+
+#include "../../codecs/mt6358.h"
+#include "../common/mtk-afe-platform-driver.h"
+#include "mt8186-afe-common.h"
+#include "mt8186-mt6366-common.h"
+
+int mt8186_mt6366_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_component *cmpnt_afe =
+ snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ struct snd_soc_component *cmpnt_codec =
+ asoc_rtd_to_codec(rtd, 0)->component;
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt_afe);
+ struct mt8186_afe_private *afe_priv = afe->platform_priv;
+ struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
+ int ret;
+
+ /* set mtkaif protocol */
+ mt6358_set_mtkaif_protocol(cmpnt_codec,
+ MT6358_MTKAIF_PROTOCOL_1);
+ afe_priv->mtkaif_protocol = MT6358_MTKAIF_PROTOCOL_1;
+
+ ret = snd_soc_dapm_sync(dapm);
+ if (ret) {
+ dev_err(rtd->dev, "failed to snd_soc_dapm_sync\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt8186_mt6366_init);
+
+int mt8186_mt6366_card_set_be_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ struct device_node *node,
+ char *link_name)
+{
+ int ret;
+
+ if (node && strcmp(link->name, link_name) == 0) {
+ ret = snd_soc_of_get_dai_link_codecs(card->dev, node, link);
+ if (ret < 0)
+ return dev_err_probe(card->dev, ret, "get dai link codecs fail\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt8186_mt6366_card_set_be_link);
diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-common.h b/sound/soc/mediatek/mt8186/mt8186-mt6366-common.h
new file mode 100644
index 000000000000..907d8f5e46b1
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-common.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mt8186-mt6366-common.h
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+ */
+
+#ifndef _MT8186_MT6366_COMMON_H_
+#define _MT8186_MT6366_COMMON_H_
+
+int mt8186_mt6366_init(struct snd_soc_pcm_runtime *rtd);
+int mt8186_mt6366_card_set_be_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ struct device_node *node,
+ char *link_name);
+#endif
diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c
new file mode 100644
index 000000000000..387f25cad809
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-da7219-max98357.c
@@ -0,0 +1,1002 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8186-mt6366-da7219-max98357.c
+// -- MT8186-MT6366-DA7219-MAX98357 ALSA SoC machine driver
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+//
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "../../codecs/da7219-aad.h"
+#include "../../codecs/da7219.h"
+#include "../../codecs/mt6358.h"
+#include "../common/mtk-afe-platform-driver.h"
+#include "mt8186-afe-common.h"
+#include "mt8186-afe-clk.h"
+#include "mt8186-afe-gpio.h"
+#include "mt8186-mt6366-common.h"
+
+#define DA7219_CODEC_DAI "da7219-hifi"
+#define DA7219_DEV_NAME "da7219.5-001a"
+
+struct mt8186_mt6366_da7219_max98357_priv {
+ struct snd_soc_jack headset_jack, hdmi_jack;
+};
+
+static struct snd_soc_codec_conf mt8186_mt6366_da7219_max98357_codec_conf[] = {
+ {
+ .dlc = COMP_CODEC_CONF("mt6358-sound"),
+ .name_prefix = "Mt6366",
+ },
+ {
+ .dlc = COMP_CODEC_CONF("bt-sco"),
+ .name_prefix = "Mt8186 bt",
+ },
+ {
+ .dlc = COMP_CODEC_CONF("hdmi-audio-codec"),
+ .name_prefix = "Mt8186 hdmi",
+ },
+};
+
+static int mt8186_da7219_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct mt8186_mt6366_da7219_max98357_priv *priv =
+ snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_jack *jack = &priv->headset_jack;
+ struct snd_soc_component *cmpnt_codec =
+ asoc_rtd_to_codec(rtd, 0)->component;
+ int ret;
+
+ /* Enable Headset and 4 Buttons Jack detection */
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ jack);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
+ da7219_aad_jack_det(cmpnt_codec, &priv->headset_jack);
+
+ return 0;
+}
+
+static int mt8186_da7219_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai;
+ unsigned int rate = params_rate(params);
+ unsigned int mclk_fs_ratio = 256;
+ unsigned int mclk_fs = rate * mclk_fs_ratio;
+ unsigned int freq;
+ int ret, j;
+
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(rtd, 0), 0,
+ mclk_fs, SND_SOC_CLOCK_OUT);
+ if (ret < 0) {
+ dev_err(rtd->dev, "failed to set cpu dai sysclk: %d\n", ret);
+ return ret;
+ }
+
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
+ if (!strcmp(codec_dai->component->name, DA7219_DEV_NAME)) {
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ DA7219_CLKSRC_MCLK,
+ mclk_fs,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "failed to set sysclk: %d\n",
+ ret);
+ return ret;
+ }
+
+ if ((rate % 8000) == 0)
+ freq = DA7219_PLL_FREQ_OUT_98304;
+ else
+ freq = DA7219_PLL_FREQ_OUT_90316;
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ DA7219_SYSCLK_PLL_SRM,
+ 0, freq);
+ if (ret) {
+ dev_err(rtd->dev, "failed to start PLL: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mt8186_da7219_i2s_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_dai *codec_dai;
+ int ret = 0, j;
+
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
+ if (!strcmp(codec_dai->component->name, DA7219_DEV_NAME)) {
+ ret = snd_soc_dai_set_pll(codec_dai,
+ 0, DA7219_SYSCLK_MCLK, 0, 0);
+ if (ret < 0) {
+ dev_err(rtd->dev, "failed to stop PLL: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_ops mt8186_da7219_i2s_ops = {
+ .hw_params = mt8186_da7219_i2s_hw_params,
+ .hw_free = mt8186_da7219_i2s_hw_free,
+};
+
+static int mt8186_mt6366_da7219_max98357_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_component *cmpnt_codec =
+ asoc_rtd_to_codec(rtd, 0)->component;
+ struct mt8186_mt6366_da7219_max98357_priv *priv =
+ snd_soc_card_get_drvdata(rtd->card);
+ int ret;
+
+ ret = snd_soc_card_jack_new(rtd->card, "HDMI Jack", SND_JACK_LINEOUT, &priv->hdmi_jack);
+ if (ret) {
+ dev_err(rtd->dev, "HDMI Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ return snd_soc_component_set_jack(cmpnt_codec, &priv->hdmi_jack, NULL);
+}
+
+static int mt8186_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params,
+ snd_pcm_format_t fmt)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ dev_dbg(rtd->dev, "%s(), fix format to %d\n", __func__, fmt);
+
+ /* fix BE i2s channel to 2 channel */
+ channels->min = 2;
+ channels->max = 2;
+
+ /* clean param mask first */
+ snd_mask_reset_range(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT),
+ 0, (__force unsigned int)SNDRV_PCM_FORMAT_LAST);
+
+ params_set_format(params, fmt);
+
+ return 0;
+}
+
+static int mt8186_i2s_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ return mt8186_hw_params_fixup(rtd, params, SNDRV_PCM_FORMAT_S32_LE);
+}
+
+static int mt8186_anx7625_i2s_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ return mt8186_hw_params_fixup(rtd, params, SNDRV_PCM_FORMAT_S24_LE);
+}
+
+static int mt8186_mt6366_da7219_max98357_playback_startup(struct snd_pcm_substream *substream)
+{
+ static const unsigned int rates[] = {
+ 48000
+ };
+ static const unsigned int channels[] = {
+ 2
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+ };
+
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list rate failed\n");
+ return ret;
+ }
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list channel failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_ops mt8186_mt6366_da7219_max98357_playback_ops = {
+ .startup = mt8186_mt6366_da7219_max98357_playback_startup,
+};
+
+static int mt8186_mt6366_da7219_max98357_capture_startup(struct snd_pcm_substream *substream)
+{
+ static const unsigned int rates[] = {
+ 48000
+ };
+ static const unsigned int channels[] = {
+ 1, 2
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+ };
+
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list rate failed\n");
+ return ret;
+ }
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list channel failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_ops mt8186_mt6366_da7219_max98357_capture_ops = {
+ .startup = mt8186_mt6366_da7219_max98357_capture_startup,
+};
+
+/* FE */
+SND_SOC_DAILINK_DEFS(playback1,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL1")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback12,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL12")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback2,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL2")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback3,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL3")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback4,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL4")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback5,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL5")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback6,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL6")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback7,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL7")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback8,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL8")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture1,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL1")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture2,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL2")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture3,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL3")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture4,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL4")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture5,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL5")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture6,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL6")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture7,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL7")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+/* hostless */
+SND_SOC_DAILINK_DEFS(hostless_lpbk,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless LPBK DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_fm,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless FM DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_src1,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_SRC_1_DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_src_bargein,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_SRC_Bargein_DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+/* BE */
+SND_SOC_DAILINK_DEFS(adda,
+ DAILINK_COMP_ARRAY(COMP_CPU("ADDA")),
+ DAILINK_COMP_ARRAY(COMP_CODEC("mt6358-sound",
+ "mt6358-snd-codec-aif1"),
+ COMP_CODEC("dmic-codec",
+ "dmic-hifi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(i2s0,
+ DAILINK_COMP_ARRAY(COMP_CPU("I2S0")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(i2s1,
+ DAILINK_COMP_ARRAY(COMP_CPU("I2S1")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(i2s2,
+ DAILINK_COMP_ARRAY(COMP_CPU("I2S2")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(i2s3,
+ DAILINK_COMP_ARRAY(COMP_CPU("I2S3")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hw_gain1,
+ DAILINK_COMP_ARRAY(COMP_CPU("HW Gain 1")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hw_gain2,
+ DAILINK_COMP_ARRAY(COMP_CPU("HW Gain 2")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hw_src1,
+ DAILINK_COMP_ARRAY(COMP_CPU("HW_SRC_1")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hw_src2,
+ DAILINK_COMP_ARRAY(COMP_CPU("HW_SRC_2")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(connsys_i2s,
+ DAILINK_COMP_ARRAY(COMP_CPU("CONNSYS_I2S")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(pcm1,
+ DAILINK_COMP_ARRAY(COMP_CPU("PCM 1")),
+ DAILINK_COMP_ARRAY(COMP_CODEC("bt-sco", "bt-sco-pcm-wb")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(tdm_in,
+ DAILINK_COMP_ARRAY(COMP_CPU("TDM IN")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+/* hostless */
+SND_SOC_DAILINK_DEFS(hostless_ul1,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_UL1 DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_ul2,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_UL2 DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_ul3,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_UL3 DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_ul5,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_UL5 DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_ul6,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_UL6 DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_hw_gain_aaudio,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless HW Gain AAudio DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_src_aaudio,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless SRC AAudio DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+static struct snd_soc_dai_link mt8186_mt6366_da7219_max98357_dai_links[] = {
+ /* Front End DAI links */
+ {
+ .name = "Playback_1",
+ .stream_name = "Playback_1",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ .ops = &mt8186_mt6366_da7219_max98357_playback_ops,
+ SND_SOC_DAILINK_REG(playback1),
+ },
+ {
+ .name = "Playback_12",
+ .stream_name = "Playback_12",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback12),
+ },
+ {
+ .name = "Playback_2",
+ .stream_name = "Playback_2",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ SND_SOC_DAILINK_REG(playback2),
+ },
+ {
+ .name = "Playback_3",
+ .stream_name = "Playback_3",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ .ops = &mt8186_mt6366_da7219_max98357_playback_ops,
+ SND_SOC_DAILINK_REG(playback3),
+ },
+ {
+ .name = "Playback_4",
+ .stream_name = "Playback_4",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback4),
+ },
+ {
+ .name = "Playback_5",
+ .stream_name = "Playback_5",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback5),
+ },
+ {
+ .name = "Playback_6",
+ .stream_name = "Playback_6",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback6),
+ },
+ {
+ .name = "Playback_7",
+ .stream_name = "Playback_7",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback7),
+ },
+ {
+ .name = "Playback_8",
+ .stream_name = "Playback_8",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback8),
+ },
+ {
+ .name = "Capture_1",
+ .stream_name = "Capture_1",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(capture1),
+ },
+ {
+ .name = "Capture_2",
+ .stream_name = "Capture_2",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ .ops = &mt8186_mt6366_da7219_max98357_capture_ops,
+ SND_SOC_DAILINK_REG(capture2),
+ },
+ {
+ .name = "Capture_3",
+ .stream_name = "Capture_3",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(capture3),
+ },
+ {
+ .name = "Capture_4",
+ .stream_name = "Capture_4",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ .ops = &mt8186_mt6366_da7219_max98357_capture_ops,
+ SND_SOC_DAILINK_REG(capture4),
+ },
+ {
+ .name = "Capture_5",
+ .stream_name = "Capture_5",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(capture5),
+ },
+ {
+ .name = "Capture_6",
+ .stream_name = "Capture_6",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ SND_SOC_DAILINK_REG(capture6),
+ },
+ {
+ .name = "Capture_7",
+ .stream_name = "Capture_7",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(capture7),
+ },
+ {
+ .name = "Hostless_LPBK",
+ .stream_name = "Hostless_LPBK",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_lpbk),
+ },
+ {
+ .name = "Hostless_FM",
+ .stream_name = "Hostless_FM",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_fm),
+ },
+ {
+ .name = "Hostless_SRC_1",
+ .stream_name = "Hostless_SRC_1",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_src1),
+ },
+ {
+ .name = "Hostless_SRC_Bargein",
+ .stream_name = "Hostless_SRC_Bargein",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_src_bargein),
+ },
+ {
+ .name = "Hostless_HW_Gain_AAudio",
+ .stream_name = "Hostless_HW_Gain_AAudio",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_hw_gain_aaudio),
+ },
+ {
+ .name = "Hostless_SRC_AAudio",
+ .stream_name = "Hostless_SRC_AAudio",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_src_aaudio),
+ },
+ /* Back End DAI links */
+ {
+ .name = "Primary Codec",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ .init = mt8186_mt6366_init,
+ SND_SOC_DAILINK_REG(adda),
+ },
+ {
+ .name = "I2S3",
+ .no_pcm = 1,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_IB_IF |
+ SND_SOC_DAIFMT_CBM_CFM,
+ .dpcm_playback = 1,
+ .ignore_suspend = 1,
+ .init = mt8186_mt6366_da7219_max98357_hdmi_init,
+ .be_hw_params_fixup = mt8186_anx7625_i2s_hw_params_fixup,
+ SND_SOC_DAILINK_REG(i2s3),
+ },
+ {
+ .name = "I2S0",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ .be_hw_params_fixup = mt8186_i2s_hw_params_fixup,
+ .ops = &mt8186_da7219_i2s_ops,
+ SND_SOC_DAILINK_REG(i2s0),
+ },
+ {
+ .name = "I2S1",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .ignore_suspend = 1,
+ .be_hw_params_fixup = mt8186_i2s_hw_params_fixup,
+ .init = mt8186_da7219_init,
+ .ops = &mt8186_da7219_i2s_ops,
+ SND_SOC_DAILINK_REG(i2s1),
+ },
+ {
+ .name = "I2S2",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ .be_hw_params_fixup = mt8186_i2s_hw_params_fixup,
+ SND_SOC_DAILINK_REG(i2s2),
+ },
+ {
+ .name = "HW Gain 1",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hw_gain1),
+ },
+ {
+ .name = "HW Gain 2",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hw_gain2),
+ },
+ {
+ .name = "HW_SRC_1",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hw_src1),
+ },
+ {
+ .name = "HW_SRC_2",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hw_src2),
+ },
+ {
+ .name = "CONNSYS_I2S",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(connsys_i2s),
+ },
+ {
+ .name = "PCM 1",
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_IF,
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(pcm1),
+ },
+ {
+ .name = "TDM IN",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(tdm_in),
+ },
+ /* dummy BE for ul memif to record from dl memif */
+ {
+ .name = "Hostless_UL1",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_ul1),
+ },
+ {
+ .name = "Hostless_UL2",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_ul2),
+ },
+ {
+ .name = "Hostless_UL3",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_ul3),
+ },
+ {
+ .name = "Hostless_UL5",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_ul5),
+ },
+ {
+ .name = "Hostless_UL6",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_ul6),
+ },
+};
+
+static const struct snd_soc_dapm_widget
+mt8186_mt6366_da7219_max98357_widgets[] = {
+ SND_SOC_DAPM_SPK("Speakers", NULL),
+ SND_SOC_DAPM_OUTPUT("HDMI1"),
+};
+
+static const struct snd_soc_dapm_route
+mt8186_mt6366_da7219_max98357_routes[] = {
+ /* SPK */
+ { "Speakers", NULL, "Speaker"},
+ /* HDMI */
+ { "HDMI1", NULL, "TX"},
+};
+
+static const struct snd_kcontrol_new
+mt8186_mt6366_da7219_max98357_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speakers"),
+ SOC_DAPM_PIN_SWITCH("HDMI1"),
+};
+
+static struct snd_soc_card mt8186_mt6366_da7219_max98357_soc_card = {
+ .name = "mt8186_mt6366_da7219_max98357",
+ .owner = THIS_MODULE,
+ .dai_link = mt8186_mt6366_da7219_max98357_dai_links,
+ .num_links = ARRAY_SIZE(mt8186_mt6366_da7219_max98357_dai_links),
+ .controls = mt8186_mt6366_da7219_max98357_controls,
+ .num_controls = ARRAY_SIZE(mt8186_mt6366_da7219_max98357_controls),
+ .dapm_widgets = mt8186_mt6366_da7219_max98357_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt8186_mt6366_da7219_max98357_widgets),
+ .dapm_routes = mt8186_mt6366_da7219_max98357_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8186_mt6366_da7219_max98357_routes),
+ .codec_conf = mt8186_mt6366_da7219_max98357_codec_conf,
+ .num_configs = ARRAY_SIZE(mt8186_mt6366_da7219_max98357_codec_conf),
+};
+
+static int mt8186_mt6366_da7219_max98357_dev_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card;
+ struct snd_soc_dai_link *dai_link;
+ struct mt8186_mt6366_da7219_max98357_priv *priv;
+ struct device_node *platform_node, *headset_codec, *playback_codec;
+ int ret, i;
+
+ card = (struct snd_soc_card *)device_get_match_data(&pdev->dev);
+ if (!card)
+ return -EINVAL;
+ card->dev = &pdev->dev;
+
+ platform_node = of_parse_phandle(pdev->dev.of_node, "mediatek,platform", 0);
+ if (!platform_node) {
+ ret = -EINVAL;
+ dev_err_probe(&pdev->dev, ret, "Property 'platform' missing or invalid\n");
+ return ret;
+ }
+
+ playback_codec = of_get_child_by_name(pdev->dev.of_node, "playback-codecs");
+ if (!playback_codec) {
+ ret = -EINVAL;
+ dev_err_probe(&pdev->dev, ret, "Property 'speaker-codecs' missing or invalid\n");
+ goto err_playback_codec;
+ }
+
+ headset_codec = of_get_child_by_name(pdev->dev.of_node, "headset-codec");
+ if (!headset_codec) {
+ ret = -EINVAL;
+ dev_err_probe(&pdev->dev, ret, "Property 'headset-codec' missing or invalid\n");
+ goto err_headset_codec;
+ }
+
+ for_each_card_prelinks(card, i, dai_link) {
+ ret = mt8186_mt6366_card_set_be_link(card, dai_link, playback_codec, "I2S3");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "%s set speaker_codec fail\n",
+ dai_link->name);
+ goto err_probe;
+ }
+
+ ret = mt8186_mt6366_card_set_be_link(card, dai_link, headset_codec, "I2S0");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "%s set headset_codec fail\n",
+ dai_link->name);
+ goto err_probe;
+ }
+
+ ret = mt8186_mt6366_card_set_be_link(card, dai_link, headset_codec, "I2S1");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "%s set headset_codec fail\n",
+ dai_link->name);
+ goto err_probe;
+ }
+
+ if (!dai_link->platforms->name)
+ dai_link->platforms->of_node = platform_node;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_probe;
+ }
+
+ snd_soc_card_set_drvdata(card, priv);
+
+ ret = mt8186_afe_gpio_init(&pdev->dev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "%s init gpio error\n", __func__);
+ goto err_probe;
+ }
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ dev_err_probe(&pdev->dev, ret, "%s snd_soc_register_card fail\n", __func__);
+
+err_probe:
+ of_node_put(headset_codec);
+err_headset_codec:
+ of_node_put(playback_codec);
+err_playback_codec:
+ of_node_put(platform_node);
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id mt8186_mt6366_da7219_max98357_dt_match[] = {
+ { .compatible = "mediatek,mt8186-mt6366-da7219-max98357-sound",
+ .data = &mt8186_mt6366_da7219_max98357_soc_card,
+ },
+ {}
+};
+#endif
+
+static struct platform_driver mt8186_mt6366_da7219_max98357_driver = {
+ .driver = {
+ .name = "mt8186_mt6366_da7219_max98357",
+#if IS_ENABLED(CONFIG_OF)
+ .of_match_table = mt8186_mt6366_da7219_max98357_dt_match,
+#endif
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = mt8186_mt6366_da7219_max98357_dev_probe,
+};
+
+module_platform_driver(mt8186_mt6366_da7219_max98357_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8186-MT6366-DA7219-MAX98357 ALSA SoC machine driver");
+MODULE_AUTHOR("Jiaxin Yu <jiaxin.yu@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("mt8186_mt6366_da7219_max98357 soc card");
diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
new file mode 100644
index 000000000000..891146fd6c2b
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mt8186-mt6366-rt1019-rt5682s.c
+// -- MT8186-MT6366-RT1019-RT5682S ALSA SoC machine driver
+//
+// Copyright (c) 2022 MediaTek Inc.
+// Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+//
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <sound/jack.h>
+#include <sound/pcm_params.h>
+#include <sound/rt5682.h>
+#include <sound/soc.h>
+
+#include "../../codecs/mt6358.h"
+#include "../../codecs/rt5682.h"
+#include "../common/mtk-afe-platform-driver.h"
+#include "mt8186-afe-common.h"
+#include "mt8186-afe-clk.h"
+#include "mt8186-afe-gpio.h"
+#include "mt8186-mt6366-common.h"
+
+#define RT1019_CODEC_DAI "HiFi"
+#define RT1019_DEV0_NAME "rt1019p"
+
+#define RT5682S_CODEC_DAI "rt5682s-aif1"
+#define RT5682S_DEV0_NAME "rt5682s.5-001a"
+
+struct mt8186_mt6366_rt1019_rt5682s_priv {
+ struct snd_soc_jack headset_jack, hdmi_jack;
+};
+
+static struct snd_soc_codec_conf mt8186_mt6366_rt1019_rt5682s_codec_conf[] = {
+ {
+ .dlc = COMP_CODEC_CONF("mt6358-sound"),
+ .name_prefix = "Mt6366",
+ },
+ {
+ .dlc = COMP_CODEC_CONF("bt-sco"),
+ .name_prefix = "Mt8186 bt",
+ },
+ {
+ .dlc = COMP_CODEC_CONF("hdmi-audio-codec"),
+ .name_prefix = "Mt8186 hdmi",
+ },
+};
+
+static int mt8186_rt5682s_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct mt8186_mt6366_rt1019_rt5682s_priv *priv =
+ snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_jack *jack = &priv->headset_jack;
+ struct snd_soc_component *cmpnt_codec =
+ asoc_rtd_to_codec(rtd, 0)->component;
+ int ret;
+
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ jack);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+
+ return snd_soc_component_set_jack(cmpnt_codec, jack, NULL);
+}
+
+static int mt8186_rt5682s_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ unsigned int rate = params_rate(params);
+ unsigned int mclk_fs_ratio = 128;
+ unsigned int mclk_fs = rate * mclk_fs_ratio;
+ int bitwidth;
+ int ret;
+
+ bitwidth = snd_pcm_format_width(params_format(params));
+ if (bitwidth < 0) {
+ dev_err(card->dev, "invalid bit width: %d\n", bitwidth);
+ return bitwidth;
+ }
+
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x00, 0x0, 0x2, bitwidth);
+ if (ret) {
+ dev_err(card->dev, "failed to set tdm slot\n");
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_pll(codec_dai, RT5682_PLL1,
+ RT5682_PLL1_S_BCLK1,
+ params_rate(params) * 64,
+ params_rate(params) * 512);
+ if (ret) {
+ dev_err(card->dev, "failed to set pll\n");
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ RT5682_SCLK_S_PLL1,
+ params_rate(params) * 512,
+ SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(card->dev, "failed to set sysclk\n");
+ return ret;
+ }
+
+ return snd_soc_dai_set_sysclk(cpu_dai, 0, mclk_fs, SND_SOC_CLOCK_OUT);
+}
+
+static const struct snd_soc_ops mt8186_rt5682s_i2s_ops = {
+ .hw_params = mt8186_rt5682s_i2s_hw_params,
+};
+
+static int mt8186_mt6366_rt1019_rt5682s_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_component *cmpnt_codec =
+ asoc_rtd_to_codec(rtd, 0)->component;
+ struct mt8186_mt6366_rt1019_rt5682s_priv *priv =
+ snd_soc_card_get_drvdata(rtd->card);
+ int ret;
+
+ ret = snd_soc_card_jack_new(rtd->card, "HDMI Jack", SND_JACK_LINEOUT, &priv->hdmi_jack);
+ if (ret) {
+ dev_err(rtd->dev, "HDMI Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ return snd_soc_component_set_jack(cmpnt_codec, &priv->hdmi_jack, NULL);
+}
+
+static int mt8186_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params,
+ snd_pcm_format_t fmt)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ dev_dbg(rtd->dev, "%s(), fix format to %d\n", __func__, fmt);
+
+ /* fix BE i2s channel to 2 channel */
+ channels->min = 2;
+ channels->max = 2;
+
+ /* clean param mask first */
+ snd_mask_reset_range(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT),
+ 0, (__force unsigned int)SNDRV_PCM_FORMAT_LAST);
+
+ params_set_format(params, fmt);
+
+ return 0;
+}
+
+static int mt8186_i2s_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ return mt8186_hw_params_fixup(rtd, params, SNDRV_PCM_FORMAT_S24_LE);
+}
+
+static int mt8186_it6505_i2s_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ return mt8186_hw_params_fixup(rtd, params, SNDRV_PCM_FORMAT_S32_LE);
+}
+
+static int mt8186_mt6366_rt1019_rt5682s_playback_startup(struct snd_pcm_substream *substream)
+{
+ static const unsigned int rates[] = {
+ 48000
+ };
+ static const unsigned int channels[] = {
+ 2
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+ };
+
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list rate failed\n");
+ return ret;
+ }
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list channel failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_ops mt8186_mt6366_rt1019_rt5682s_playback_ops = {
+ .startup = mt8186_mt6366_rt1019_rt5682s_playback_startup,
+};
+
+static int mt8186_mt6366_rt1019_rt5682s_capture_startup(struct snd_pcm_substream *substream)
+{
+ static const unsigned int rates[] = {
+ 48000
+ };
+ static const unsigned int channels[] = {
+ 1, 2
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+ };
+
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list rate failed\n");
+ return ret;
+ }
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ if (ret < 0) {
+ dev_err(rtd->dev, "hw_constraint_list channel failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_ops mt8186_mt6366_rt1019_rt5682s_capture_ops = {
+ .startup = mt8186_mt6366_rt1019_rt5682s_capture_startup,
+};
+
+/* FE */
+SND_SOC_DAILINK_DEFS(playback1,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL1")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback12,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL12")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback2,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL2")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback3,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL3")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback4,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL4")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback5,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL5")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback6,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL6")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback7,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL7")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(playback8,
+ DAILINK_COMP_ARRAY(COMP_CPU("DL8")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture1,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL1")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture2,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL2")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture3,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL3")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture4,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL4")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture5,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL5")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture6,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL6")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(capture7,
+ DAILINK_COMP_ARRAY(COMP_CPU("UL7")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+/* hostless */
+SND_SOC_DAILINK_DEFS(hostless_lpbk,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless LPBK DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_fm,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless FM DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_src1,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_SRC_1_DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_src_bargein,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_SRC_Bargein_DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+/* BE */
+SND_SOC_DAILINK_DEFS(adda,
+ DAILINK_COMP_ARRAY(COMP_CPU("ADDA")),
+ DAILINK_COMP_ARRAY(COMP_CODEC("mt6358-sound",
+ "mt6358-snd-codec-aif1"),
+ COMP_CODEC("dmic-codec",
+ "dmic-hifi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(i2s0,
+ DAILINK_COMP_ARRAY(COMP_CPU("I2S0")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(i2s1,
+ DAILINK_COMP_ARRAY(COMP_CPU("I2S1")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(i2s2,
+ DAILINK_COMP_ARRAY(COMP_CPU("I2S2")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(i2s3,
+ DAILINK_COMP_ARRAY(COMP_CPU("I2S3")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hw_gain1,
+ DAILINK_COMP_ARRAY(COMP_CPU("HW Gain 1")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hw_gain2,
+ DAILINK_COMP_ARRAY(COMP_CPU("HW Gain 2")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hw_src1,
+ DAILINK_COMP_ARRAY(COMP_CPU("HW_SRC_1")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hw_src2,
+ DAILINK_COMP_ARRAY(COMP_CPU("HW_SRC_2")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(connsys_i2s,
+ DAILINK_COMP_ARRAY(COMP_CPU("CONNSYS_I2S")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(pcm1,
+ DAILINK_COMP_ARRAY(COMP_CPU("PCM 1")),
+ DAILINK_COMP_ARRAY(COMP_CODEC("bt-sco", "bt-sco-pcm-wb")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(tdm_in,
+ DAILINK_COMP_ARRAY(COMP_CPU("TDM IN")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+/* hostless */
+SND_SOC_DAILINK_DEFS(hostless_ul1,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_UL1 DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_ul2,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_UL2 DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_ul3,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_UL3 DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_ul5,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_UL5 DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_ul6,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless_UL6 DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_hw_gain_aaudio,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless HW Gain AAudio DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(hostless_src_aaudio,
+ DAILINK_COMP_ARRAY(COMP_CPU("Hostless SRC AAudio DAI")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+static struct snd_soc_dai_link mt8186_mt6366_rt1019_rt5682s_dai_links[] = {
+ /* Front End DAI links */
+ {
+ .name = "Playback_1",
+ .stream_name = "Playback_1",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ .ops = &mt8186_mt6366_rt1019_rt5682s_playback_ops,
+ SND_SOC_DAILINK_REG(playback1),
+ },
+ {
+ .name = "Playback_12",
+ .stream_name = "Playback_12",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback12),
+ },
+ {
+ .name = "Playback_2",
+ .stream_name = "Playback_2",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ SND_SOC_DAILINK_REG(playback2),
+ },
+ {
+ .name = "Playback_3",
+ .stream_name = "Playback_3",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ .ops = &mt8186_mt6366_rt1019_rt5682s_playback_ops,
+ SND_SOC_DAILINK_REG(playback3),
+ },
+ {
+ .name = "Playback_4",
+ .stream_name = "Playback_4",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback4),
+ },
+ {
+ .name = "Playback_5",
+ .stream_name = "Playback_5",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback5),
+ },
+ {
+ .name = "Playback_6",
+ .stream_name = "Playback_6",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback6),
+ },
+ {
+ .name = "Playback_7",
+ .stream_name = "Playback_7",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback7),
+ },
+ {
+ .name = "Playback_8",
+ .stream_name = "Playback_8",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ SND_SOC_DAILINK_REG(playback8),
+ },
+ {
+ .name = "Capture_1",
+ .stream_name = "Capture_1",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(capture1),
+ },
+ {
+ .name = "Capture_2",
+ .stream_name = "Capture_2",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ .ops = &mt8186_mt6366_rt1019_rt5682s_capture_ops,
+ SND_SOC_DAILINK_REG(capture2),
+ },
+ {
+ .name = "Capture_3",
+ .stream_name = "Capture_3",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(capture3),
+ },
+ {
+ .name = "Capture_4",
+ .stream_name = "Capture_4",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ .ops = &mt8186_mt6366_rt1019_rt5682s_capture_ops,
+ SND_SOC_DAILINK_REG(capture4),
+ },
+ {
+ .name = "Capture_5",
+ .stream_name = "Capture_5",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(capture5),
+ },
+ {
+ .name = "Capture_6",
+ .stream_name = "Capture_6",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .dpcm_merged_format = 1,
+ .dpcm_merged_chan = 1,
+ .dpcm_merged_rate = 1,
+ SND_SOC_DAILINK_REG(capture6),
+ },
+ {
+ .name = "Capture_7",
+ .stream_name = "Capture_7",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ SND_SOC_DAILINK_REG(capture7),
+ },
+ {
+ .name = "Hostless_LPBK",
+ .stream_name = "Hostless_LPBK",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_lpbk),
+ },
+ {
+ .name = "Hostless_FM",
+ .stream_name = "Hostless_FM",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_fm),
+ },
+ {
+ .name = "Hostless_SRC_1",
+ .stream_name = "Hostless_SRC_1",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_src1),
+ },
+ {
+ .name = "Hostless_SRC_Bargein",
+ .stream_name = "Hostless_SRC_Bargein",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_src_bargein),
+ },
+ {
+ .name = "Hostless_HW_Gain_AAudio",
+ .stream_name = "Hostless_HW_Gain_AAudio",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_hw_gain_aaudio),
+ },
+ {
+ .name = "Hostless_SRC_AAudio",
+ .stream_name = "Hostless_SRC_AAudio",
+ .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
+ SND_SOC_DPCM_TRIGGER_PRE},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_src_aaudio),
+ },
+ /* Back End DAI links */
+ {
+ .name = "Primary Codec",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ .init = mt8186_mt6366_init,
+ SND_SOC_DAILINK_REG(adda),
+ },
+ {
+ .name = "I2S3",
+ .no_pcm = 1,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_IB_IF |
+ SND_SOC_DAIFMT_CBM_CFM,
+ .dpcm_playback = 1,
+ .ignore_suspend = 1,
+ .init = mt8186_mt6366_rt1019_rt5682s_hdmi_init,
+ .be_hw_params_fixup = mt8186_it6505_i2s_hw_params_fixup,
+ SND_SOC_DAILINK_REG(i2s3),
+ },
+ {
+ .name = "I2S0",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ .be_hw_params_fixup = mt8186_i2s_hw_params_fixup,
+ .ops = &mt8186_rt5682s_i2s_ops,
+ SND_SOC_DAILINK_REG(i2s0),
+ },
+ {
+ .name = "I2S1",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .ignore_suspend = 1,
+ .be_hw_params_fixup = mt8186_i2s_hw_params_fixup,
+ .init = mt8186_rt5682s_init,
+ .ops = &mt8186_rt5682s_i2s_ops,
+ SND_SOC_DAILINK_REG(i2s1),
+ },
+ {
+ .name = "I2S2",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ .be_hw_params_fixup = mt8186_i2s_hw_params_fixup,
+ SND_SOC_DAILINK_REG(i2s2),
+ },
+ {
+ .name = "HW Gain 1",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hw_gain1),
+ },
+ {
+ .name = "HW Gain 2",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hw_gain2),
+ },
+ {
+ .name = "HW_SRC_1",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hw_src1),
+ },
+ {
+ .name = "HW_SRC_2",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hw_src2),
+ },
+ {
+ .name = "CONNSYS_I2S",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(connsys_i2s),
+ },
+ {
+ .name = "PCM 1",
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_IF,
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(pcm1),
+ },
+ {
+ .name = "TDM IN",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(tdm_in),
+ },
+ /* dummy BE for ul memif to record from dl memif */
+ {
+ .name = "Hostless_UL1",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_ul1),
+ },
+ {
+ .name = "Hostless_UL2",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_ul2),
+ },
+ {
+ .name = "Hostless_UL3",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_ul3),
+ },
+ {
+ .name = "Hostless_UL5",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_ul5),
+ },
+ {
+ .name = "Hostless_UL6",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ SND_SOC_DAILINK_REG(hostless_ul6),
+ },
+};
+
+static const struct snd_soc_dapm_widget
+mt8186_mt6366_rt1019_rt5682s_widgets[] = {
+ SND_SOC_DAPM_SPK("Speakers", NULL),
+ SND_SOC_DAPM_OUTPUT("HDMI1"),
+};
+
+static const struct snd_soc_dapm_route
+mt8186_mt6366_rt1019_rt5682s_routes[] = {
+ /* SPK */
+ { "Speakers", NULL, "Speaker" },
+ /* HDMI */
+ { "HDMI1", NULL, "TX" },
+};
+
+static const struct snd_kcontrol_new
+mt8186_mt6366_rt1019_rt5682s_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speakers"),
+ SOC_DAPM_PIN_SWITCH("HDMI1"),
+};
+
+static struct snd_soc_card mt8186_mt6366_rt1019_rt5682s_soc_card = {
+ .name = "mt8186_mt6366_rt1019_rt5682s",
+ .owner = THIS_MODULE,
+ .dai_link = mt8186_mt6366_rt1019_rt5682s_dai_links,
+ .num_links = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_dai_links),
+ .controls = mt8186_mt6366_rt1019_rt5682s_controls,
+ .num_controls = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_controls),
+ .dapm_widgets = mt8186_mt6366_rt1019_rt5682s_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_widgets),
+ .dapm_routes = mt8186_mt6366_rt1019_rt5682s_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_routes),
+ .codec_conf = mt8186_mt6366_rt1019_rt5682s_codec_conf,
+ .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
+};
+
+static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card;
+ struct snd_soc_dai_link *dai_link;
+ struct mt8186_mt6366_rt1019_rt5682s_priv *priv;
+ struct device_node *platform_node, *headset_codec, *playback_codec;
+ int ret, i;
+
+ card = (struct snd_soc_card *)device_get_match_data(&pdev->dev);
+ if (!card)
+ return -EINVAL;
+ card->dev = &pdev->dev;
+
+ platform_node = of_parse_phandle(pdev->dev.of_node, "mediatek,platform", 0);
+ if (!platform_node) {
+ ret = -EINVAL;
+ dev_err_probe(&pdev->dev, ret, "Property 'platform' missing or invalid\n");
+ return ret;
+ }
+
+ playback_codec = of_get_child_by_name(pdev->dev.of_node, "playback-codecs");
+ if (!playback_codec) {
+ ret = -EINVAL;
+ dev_err_probe(&pdev->dev, ret, "Property 'speaker-codecs' missing or invalid\n");
+ goto err_playback_codec;
+ }
+
+ headset_codec = of_get_child_by_name(pdev->dev.of_node, "headset-codec");
+ if (!headset_codec) {
+ ret = -EINVAL;
+ dev_err_probe(&pdev->dev, ret, "Property 'headset-codec' missing or invalid\n");
+ goto err_headset_codec;
+ }
+
+ for_each_card_prelinks(card, i, dai_link) {
+ ret = mt8186_mt6366_card_set_be_link(card, dai_link, playback_codec, "I2S3");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "%s set speaker_codec fail\n",
+ dai_link->name);
+ goto err_probe;
+ }
+
+ ret = mt8186_mt6366_card_set_be_link(card, dai_link, headset_codec, "I2S0");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "%s set headset_codec fail\n",
+ dai_link->name);
+ goto err_probe;
+ }
+
+ ret = mt8186_mt6366_card_set_be_link(card, dai_link, headset_codec, "I2S1");
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "%s set headset_codec fail\n",
+ dai_link->name);
+ goto err_probe;
+ }
+
+ if (!dai_link->platforms->name)
+ dai_link->platforms->of_node = platform_node;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_probe;
+ }
+
+ snd_soc_card_set_drvdata(card, priv);
+
+ ret = mt8186_afe_gpio_init(&pdev->dev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "%s init gpio error\n", __func__);
+ goto err_probe;
+ }
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ dev_err_probe(&pdev->dev, ret, "%s snd_soc_register_card fail\n", __func__);
+
+err_probe:
+ of_node_put(headset_codec);
+err_headset_codec:
+ of_node_put(playback_codec);
+err_playback_codec:
+ of_node_put(platform_node);
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id mt8186_mt6366_rt1019_rt5682s_dt_match[] = {
+ { .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
+ .data = &mt8186_mt6366_rt1019_rt5682s_soc_card,
+ },
+ {}
+};
+#endif
+
+static struct platform_driver mt8186_mt6366_rt1019_rt5682s_driver = {
+ .driver = {
+ .name = "mt8186_mt6366_rt1019_rt5682s",
+#if IS_ENABLED(CONFIG_OF)
+ .of_match_table = mt8186_mt6366_rt1019_rt5682s_dt_match,
+#endif
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = mt8186_mt6366_rt1019_rt5682s_dev_probe,
+};
+
+module_platform_driver(mt8186_mt6366_rt1019_rt5682s_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("MT8186-MT6366-RT1019-RT5682S ALSA SoC machine driver");
+MODULE_AUTHOR("Jiaxin Yu <jiaxin.yu@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("mt8186_mt6366_rt1019_rt5682s soc card");
diff --git a/sound/soc/mediatek/mt8186/mt8186-reg.h b/sound/soc/mediatek/mt8186/mt8186-reg.h
new file mode 100644
index 000000000000..53c3eb7283d8
--- /dev/null
+++ b/sound/soc/mediatek/mt8186/mt8186-reg.h
@@ -0,0 +1,2913 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mt8186-reg.h -- Mediatek 8186 audio driver reg definition
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Jiaxin Yu <jiaxin.yu@mediatek.com>
+ */
+
+#ifndef _MT8186_REG_H_
+#define _MT8186_REG_H_
+
+/* reg bit enum */
+enum {
+ MT8186_MEMIF_PBUF_SIZE_32_BYTES,
+ MT8186_MEMIF_PBUF_SIZE_64_BYTES,
+ MT8186_MEMIF_PBUF_SIZE_128_BYTES,
+ MT8186_MEMIF_PBUF_SIZE_256_BYTES,
+ MT8186_MEMIF_PBUF_SIZE_NUM,
+};
+
+/*****************************************************************************
+ * R E G I S T E R D E F I N I T I O N
+ *****************************************************************************/
+/* AUDIO_TOP_CON0 */
+#define RESERVED_SFT 31
+#define RESERVED_MASK_SFT BIT(31)
+#define AHB_IDLE_EN_INT_SFT 30
+#define AHB_IDLE_EN_INT_MASK_SFT BIT(30)
+#define AHB_IDLE_EN_EXT_SFT 29
+#define AHB_IDLE_EN_EXT_MASK_SFT BIT(29)
+#define PDN_NLE_SFT 28
+#define PDN_NLE_MASK_SFT BIT(28)
+#define PDN_TML_SFT 27
+#define PDN_TML_MASK_SFT BIT(27)
+#define PDN_DAC_PREDIS_SFT 26
+#define PDN_DAC_PREDIS_MASK_SFT BIT(26)
+#define PDN_DAC_SFT 25
+#define PDN_DAC_MASK_SFT BIT(25)
+#define PDN_ADC_SFT 24
+#define PDN_ADC_MASK_SFT BIT(24)
+#define PDN_TDM_CK_SFT 20
+#define PDN_TDM_CK_MASK_SFT BIT(20)
+#define PDN_APLL_TUNER_SFT 19
+#define PDN_APLL_TUNER_MASK_SFT BIT(19)
+#define PDN_APLL2_TUNER_SFT 18
+#define PDN_APLL2_TUNER_MASK_SFT BIT(18)
+#define APB3_SEL_SFT 14
+#define APB3_SEL_MASK_SFT BIT(14)
+#define APB_R2T_SFT 13
+#define APB_R2T_MASK_SFT BIT(13)
+#define APB_W2T_SFT 12
+#define APB_W2T_MASK_SFT BIT(12)
+#define PDN_24M_SFT 9
+#define PDN_24M_MASK_SFT BIT(9)
+#define PDN_22M_SFT 8
+#define PDN_22M_MASK_SFT BIT(8)
+#define PDN_AFE_SFT 2
+#define PDN_AFE_MASK_SFT BIT(2)
+
+/* AUDIO_TOP_CON1 */
+#define PDN_3RD_DAC_HIRES_SFT 31
+#define PDN_3RD_DAC_HIRES_MASK_SFT BIT(31)
+#define PDN_3RD_DAC_TML_SFT 30
+#define PDN_3RD_DAC_TML_MASK_SFT BIT(30)
+#define PDN_3RD_DAC_PREDIS_SFT 29
+#define PDN_3RD_DAC_PREDIS_MASK_SFT BIT(29)
+#define PDN_3RD_DAC_SFT 28
+#define PDN_3RD_DAC_MASK_SFT BIT(28)
+#define I2S_SOFT_RST5_SFT 22
+#define I2S_SOFT_RST5_MASK_SFT BIT(22)
+#define PDN_ADDA6_ADC_HIRES_SFT 21
+#define PDN_ADDA6_ADC_HIRES_MASK_SFT BIT(21)
+#define PDN_ADDA6_ADC_SFT 20
+#define PDN_ADDA6_ADC_MASK_SFT BIT(20)
+#define PDN_ADC_HIRES_TML_SFT 17
+#define PDN_ADC_HIRES_TML_MASK_SFT BIT(17)
+#define PDN_ADC_HIRES_SFT 16
+#define PDN_ADC_HIRES_MASK_SFT BIT(16)
+#define PDN_DAC_HIRES_SFT 15
+#define PDN_DAC_HIRES_MASK_SFT BIT(15)
+#define PDN_GENERAL2_ASRC_SFT 14
+#define PDN_GENERAL2_ASRC_MASK_SFT BIT(14)
+#define PDN_GENERAL1_ASRC_SFT 13
+#define PDN_GENERAL1_ASRC_MASK_SFT BIT(13)
+#define PDN_CONNSYS_I2S_ASRC_SFT 12
+#define PDN_CONNSYS_I2S_ASRC_MASK_SFT BIT(12)
+#define I2S4_BCLK_SW_CG_SFT 7
+#define I2S4_BCLK_SW_CG_MASK_SFT BIT(7)
+#define I2S3_BCLK_SW_CG_SFT 6
+#define I2S3_BCLK_SW_CG_MASK_SFT BIT(6)
+#define I2S2_BCLK_SW_CG_SFT 5
+#define I2S2_BCLK_SW_CG_MASK_SFT BIT(5)
+#define I2S1_BCLK_SW_CG_SFT 4
+#define I2S1_BCLK_SW_CG_MASK_SFT BIT(4)
+#define I2S_SOFT_RST2_SFT 2
+#define I2S_SOFT_RST2_MASK_SFT BIT(2)
+#define I2S_SOFT_RST_SFT 1
+#define I2S_SOFT_RST_MASK_SFT BIT(1)
+
+/* AUDIO_TOP_CON3 */
+#define BUSY_SFT 31
+#define BUSY_MASK_SFT BIT(31)
+#define OS_DISABLE_SFT 30
+#define OS_DISABLE_MASK_SFT BIT(30)
+#define CG_DISABLE_SFT 29
+#define CG_DISABLE_MASK_SFT BIT(29)
+#define CLEAR_FLAG_SFT 0
+#define CLEAR_FLAG_MASK_SFT BIT(0)
+
+/* AFE_DAC_CON0 */
+#define VUL12_ON_SFT 31
+#define VUL12_ON_MASK_SFT BIT(31)
+#define MOD_DAI_ON_SFT 30
+#define MOD_DAI_ON_MASK_SFT BIT(30)
+#define DAI_ON_SFT 29
+#define DAI_ON_MASK_SFT BIT(29)
+#define DAI2_ON_SFT 28
+#define DAI2_ON_MASK_SFT BIT(28)
+#define VUL6_ON_SFT 23
+#define VUL6_ON_MASK_SFT BIT(23)
+#define VUL5_ON_SFT 22
+#define VUL5_ON_MASK_SFT BIT(22)
+#define VUL4_ON_SFT 21
+#define VUL4_ON_MASK_SFT BIT(21)
+#define VUL3_ON_SFT 20
+#define VUL3_ON_MASK_SFT BIT(20)
+#define VUL2_ON_SFT 19
+#define VUL2_ON_MASK_SFT BIT(19)
+#define VUL_ON_SFT 18
+#define VUL_ON_MASK_SFT BIT(18)
+#define AWB2_ON_SFT 17
+#define AWB2_ON_MASK_SFT BIT(17)
+#define AWB_ON_SFT 16
+#define AWB_ON_MASK_SFT BIT(16)
+#define DL12_ON_SFT 15
+#define DL12_ON_MASK_SFT BIT(15)
+#define DL8_ON_SFT 11
+#define DL8_ON_MASK_SFT BIT(11)
+#define DL7_ON_SFT 10
+#define DL7_ON_MASK_SFT BIT(10)
+#define DL6_ON_SFT 9
+#define DL6_ON_MASK_SFT BIT(9)
+#define DL5_ON_SFT 8
+#define DL5_ON_MASK_SFT BIT(8)
+#define DL4_ON_SFT 7
+#define DL4_ON_MASK_SFT BIT(7)
+#define DL3_ON_SFT 6
+#define DL3_ON_MASK_SFT BIT(6)
+#define DL2_ON_SFT 5
+#define DL2_ON_MASK_SFT BIT(5)
+#define DL1_ON_SFT 4
+#define DL1_ON_MASK_SFT BIT(4)
+#define AUDIO_AFE_ON_SFT 0
+#define AUDIO_AFE_ON_MASK_SFT BIT(0)
+
+/* AFE_DAC_MON */
+#define AFE_ON_RETM_SFT 0
+#define AFE_ON_RETM_MASK_SFT BIT(0)
+
+/* AFE_I2S_CON */
+#define BCK_NEG_EG_LATCH_SFT 30
+#define BCK_NEG_EG_LATCH_MASK_SFT BIT(30)
+#define BCK_INV_SFT 29
+#define BCK_INV_MASK_SFT BIT(29)
+#define I2SIN_PAD_SEL_SFT 28
+#define I2SIN_PAD_SEL_MASK_SFT BIT(28)
+#define I2S_LOOPBACK_SFT 20
+#define I2S_LOOPBACK_MASK_SFT BIT(20)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT BIT(17)
+#define I2S1_HD_EN_SFT 12
+#define I2S1_HD_EN_MASK_SFT BIT(12)
+#define I2S_OUT_MODE_SFT 8
+#define I2S_OUT_MODE_MASK_SFT GENMASK(11, 8)
+#define INV_PAD_CTRL_SFT 7
+#define INV_PAD_CTRL_MASK_SFT BIT(7)
+#define I2S_BYPSRC_SFT 6
+#define I2S_BYPSRC_MASK_SFT BIT(6)
+#define INV_LRCK_SFT 5
+#define INV_LRCK_MASK_SFT BIT(5)
+#define I2S_FMT_SFT 3
+#define I2S_FMT_MASK_SFT BIT(3)
+#define I2S_SRC_SFT 2
+#define I2S_SRC_MASK_SFT BIT(2)
+#define I2S_WLEN_SFT 1
+#define I2S_WLEN_MASK_SFT BIT(1)
+#define I2S_EN_SFT 0
+#define I2S_EN_MASK_SFT BIT(0)
+
+/* AFE_I2S_CON1 */
+#define I2S2_LR_SWAP_SFT 31
+#define I2S2_LR_SWAP_MASK_SFT BIT(31)
+#define I2S2_SEL_O19_O20_SFT 18
+#define I2S2_SEL_O19_O20_MASK_SFT BIT(18)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT BIT(17)
+#define I2S2_SEL_O03_O04_SFT 16
+#define I2S2_SEL_O03_O04_MASK_SFT BIT(16)
+#define I2S2_HD_EN_SFT 12
+#define I2S2_HD_EN_MASK_SFT BIT(12)
+#define I2S2_OUT_MODE_SFT 8
+#define I2S2_OUT_MODE_MASK_SFT GENMASK(11, 8)
+#define INV_LRCK_SFT 5
+#define INV_LRCK_MASK_SFT BIT(5)
+#define I2S2_FMT_SFT 3
+#define I2S2_FMT_MASK_SFT BIT(3)
+#define I2S2_WLEN_SFT 1
+#define I2S2_WLEN_MASK_SFT BIT(1)
+#define I2S2_EN_SFT 0
+#define I2S2_EN_MASK_SFT BIT(0)
+
+/* AFE_I2S_CON2 */
+#define I2S3_LR_SWAP_SFT 31
+#define I2S3_LR_SWAP_MASK_SFT BIT(31)
+#define I2S3_UPDATE_WORD_SFT 24
+#define I2S3_UPDATE_WORD_MASK_SFT GENMASK(28, 24)
+#define I2S3_BCK_INV_SFT 23
+#define I2S3_BCK_INV_MASK_SFT BIT(23)
+#define I2S3_FPGA_BIT_TEST_SFT 22
+#define I2S3_FPGA_BIT_TEST_MASK_SFT BIT(22)
+#define I2S3_FPGA_BIT_SFT 21
+#define I2S3_FPGA_BIT_MASK_SFT BIT(21)
+#define I2S3_LOOPBACK_SFT 20
+#define I2S3_LOOPBACK_MASK_SFT BIT(20)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT BIT(17)
+#define I2S3_HD_EN_SFT 12
+#define I2S3_HD_EN_MASK_SFT BIT(12)
+#define I2S3_OUT_MODE_SFT 8
+#define I2S3_OUT_MODE_MASK_SFT GENMASK(11, 8)
+#define I2S3_FMT_SFT 3
+#define I2S3_FMT_MASK_SFT BIT(3)
+#define I2S3_WLEN_SFT 1
+#define I2S3_WLEN_MASK_SFT BIT(1)
+#define I2S3_EN_SFT 0
+#define I2S3_EN_MASK_SFT BIT(0)
+
+/* AFE_I2S_CON3 */
+#define I2S4_LR_SWAP_SFT 31
+#define I2S4_LR_SWAP_MASK_SFT BIT(31)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT BIT(17)
+#define I2S4_HD_EN_SFT 12
+#define I2S4_HD_EN_MASK_SFT BIT(12)
+#define I2S4_OUT_MODE_SFT 8
+#define I2S4_OUT_MODE_MASK_SFT GENMASK(11, 8)
+#define INV_LRCK_SFT 5
+#define INV_LRCK_MASK_SFT BIT(5)
+#define I2S4_FMT_SFT 3
+#define I2S4_FMT_MASK_SFT BIT(3)
+#define I2S4_WLEN_SFT 1
+#define I2S4_WLEN_MASK_SFT BIT(1)
+#define I2S4_EN_SFT 0
+#define I2S4_EN_MASK_SFT BIT(0)
+
+/* AFE_I2S_CON4 */
+#define I2S_LOOPBACK_SFT 20
+#define I2S_LOOPBACK_MASK 0x1
+#define I2S_LOOPBACK_MASK_SFT BIT(20)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK 0x1
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT BIT(17)
+#define INV_LRCK_SFT 5
+#define INV_LRCK_MASK 0x1
+#define INV_LRCK_MASK_SFT BIT(5)
+
+/* AFE_CONNSYS_I2S_CON */
+#define BCK_NEG_EG_LATCH_SFT 30
+#define BCK_NEG_EG_LATCH_MASK_SFT BIT(30)
+#define BCK_INV_SFT 29
+#define BCK_INV_MASK_SFT BIT(29)
+#define I2SIN_PAD_SEL_SFT 28
+#define I2SIN_PAD_SEL_MASK_SFT BIT(28)
+#define I2S_LOOPBACK_SFT 20
+#define I2S_LOOPBACK_MASK_SFT BIT(20)
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_SFT 17
+#define I2S_ONOFF_NOT_RESET_CK_ENABLE_MASK_SFT BIT(17)
+#define I2S_MODE_SFT 8
+#define I2S_MODE_MASK_SFT GENMASK(11, 8)
+#define INV_PAD_CTRL_SFT 7
+#define INV_PAD_CTRL_MASK_SFT BIT(7)
+#define I2S_BYPSRC_SFT 6
+#define I2S_BYPSRC_MASK_SFT BIT(6)
+#define INV_LRCK_SFT 5
+#define INV_LRCK_MASK_SFT BIT(5)
+#define I2S_FMT_SFT 3
+#define I2S_FMT_MASK_SFT BIT(3)
+#define I2S_SRC_SFT 2
+#define I2S_SRC_MASK_SFT BIT(2)
+#define I2S_WLEN_SFT 1
+#define I2S_WLEN_MASK_SFT BIT(1)
+#define I2S_EN_SFT 0
+#define I2S_EN_MASK_SFT BIT(0)
+
+/* AFE_ASRC_2CH_CON2 */
+#define CHSET_O16BIT_SFT 19
+#define CHSET_O16BIT_MASK_SFT BIT(19)
+#define CHSET_CLR_IIR_HISTORY_SFT 17
+#define CHSET_CLR_IIR_HISTORY_MASK_SFT BIT(17)
+#define CHSET_IS_MONO_SFT 16
+#define CHSET_IS_MONO_MASK_SFT BIT(16)
+#define CHSET_IIR_EN_SFT 11
+#define CHSET_IIR_EN_MASK_SFT BIT(11)
+#define CHSET_IIR_STAGE_SFT 8
+#define CHSET_IIR_STAGE_MASK_SFT GENMASK(10, 8)
+#define CHSET_STR_CLR_SFT 5
+#define CHSET_STR_CLR_MASK_SFT BIT(5)
+#define CHSET_ON_SFT 2
+#define CHSET_ON_MASK_SFT BIT(2)
+#define COEFF_SRAM_CTRL_SFT 1
+#define COEFF_SRAM_CTRL_MASK_SFT BIT(1)
+#define ASM_ON_SFT 0
+#define ASM_ON_MASK_SFT BIT(0)
+
+/* AFE_GAIN1_CON0 */
+#define GAIN1_SAMPLE_PER_STEP_SFT 8
+#define GAIN1_SAMPLE_PER_STEP_MASK_SFT GENMASK(15, 8)
+#define GAIN1_MODE_SFT 4
+#define GAIN1_MODE_MASK_SFT GENMASK(7, 4)
+#define GAIN1_ON_SFT 0
+#define GAIN1_ON_MASK_SFT BIT(0)
+
+/* AFE_GAIN1_CON1 */
+#define GAIN1_TARGET_SFT 0
+#define GAIN1_TARGET_MASK 0xfffffff
+#define GAIN1_TARGET_MASK_SFT GENMASK(27, 0)
+
+/* AFE_GAIN2_CON0 */
+#define GAIN2_SAMPLE_PER_STEP_SFT 8
+#define GAIN2_SAMPLE_PER_STEP_MASK_SFT GENMASK(15, 8)
+#define GAIN2_MODE_SFT 4
+#define GAIN2_MODE_MASK_SFT GENMASK(7, 4)
+#define GAIN2_ON_SFT 0
+#define GAIN2_ON_MASK_SFT BIT(0)
+
+/* AFE_GAIN2_CON1 */
+#define GAIN2_TARGET_SFT 0
+#define GAIN2_TARGET_MASK 0xfffffff
+#define GAIN2_TARGET_MASK_SFT GENMASK(27, 0)
+
+/* AFE_GAIN1_CUR */
+#define AFE_GAIN1_CUR_SFT 0
+#define AFE_GAIN1_CUR_MASK_SFT GENMASK(27, 0)
+
+/* AFE_GAIN2_CUR */
+#define AFE_GAIN2_CUR_SFT 0
+#define AFE_GAIN2_CUR_MASK_SFT GENMASK(27, 0)
+
+/* PCM_INTF_CON1 */
+#define PCM_FIX_VALUE_SEL_SFT 31
+#define PCM_FIX_VALUE_SEL_MASK_SFT BIT(31)
+#define PCM_BUFFER_LOOPBACK_SFT 30
+#define PCM_BUFFER_LOOPBACK_MASK_SFT BIT(30)
+#define PCM_PARALLEL_LOOPBACK_SFT 29
+#define PCM_PARALLEL_LOOPBACK_MASK_SFT BIT(29)
+#define PCM_SERIAL_LOOPBACK_SFT 28
+#define PCM_SERIAL_LOOPBACK_MASK_SFT BIT(28)
+#define PCM_DAI_PCM_LOOPBACK_SFT 27
+#define PCM_DAI_PCM_LOOPBACK_MASK_SFT BIT(27)
+#define PCM_I2S_PCM_LOOPBACK_SFT 26
+#define PCM_I2S_PCM_LOOPBACK_MASK_SFT BIT(26)
+#define PCM_SYNC_DELSEL_SFT 25
+#define PCM_SYNC_DELSEL_MASK_SFT BIT(25)
+#define PCM_TX_LR_SWAP_SFT 24
+#define PCM_TX_LR_SWAP_MASK_SFT BIT(24)
+#define PCM_SYNC_OUT_INV_SFT 23
+#define PCM_SYNC_OUT_INV_MASK_SFT BIT(23)
+#define PCM_BCLK_OUT_INV_SFT 22
+#define PCM_BCLK_OUT_INV_MASK_SFT BIT(22)
+#define PCM_SYNC_IN_INV_SFT 21
+#define PCM_SYNC_IN_INV_MASK_SFT BIT(21)
+#define PCM_BCLK_IN_INV_SFT 20
+#define PCM_BCLK_IN_INV_MASK_SFT BIT(20)
+#define PCM_TX_LCH_RPT_SFT 19
+#define PCM_TX_LCH_RPT_MASK_SFT BIT(19)
+#define PCM_VBT_16K_MODE_SFT 18
+#define PCM_VBT_16K_MODE_MASK_SFT BIT(18)
+#define PCM_EXT_MODEM_SFT 17
+#define PCM_EXT_MODEM_MASK_SFT BIT(17)
+#define PCM_24BIT_SFT 16
+#define PCM_24BIT_MASK_SFT BIT(16)
+#define PCM_WLEN_SFT 14
+#define PCM_WLEN_MASK_SFT GENMASK(15, 14)
+#define PCM_SYNC_LENGTH_SFT 9
+#define PCM_SYNC_LENGTH_MASK_SFT GENMASK(13, 9)
+#define PCM_SYNC_TYPE_SFT 8
+#define PCM_SYNC_TYPE_MASK_SFT BIT(8)
+#define PCM_BT_MODE_SFT 7
+#define PCM_BT_MODE_MASK_SFT BIT(7)
+#define PCM_BYP_ASRC_SFT 6
+#define PCM_BYP_ASRC_MASK_SFT BIT(6)
+#define PCM_SLAVE_SFT 5
+#define PCM_SLAVE_MASK_SFT BIT(5)
+#define PCM_MODE_SFT 3
+#define PCM_MODE_MASK_SFT GENMASK(4, 3)
+#define PCM_FMT_SFT 1
+#define PCM_FMT_MASK_SFT GENMASK(2, 1)
+#define PCM_EN_SFT 0
+#define PCM_EN_MASK_SFT BIT(0)
+
+/* PCM_INTF_CON2 */
+#define PCM1_TX_FIFO_OV_SFT 31
+#define PCM1_TX_FIFO_OV_MASK_SFT BIT(31)
+#define PCM1_RX_FIFO_OV_SFT 30
+#define PCM1_RX_FIFO_OV_MASK_SFT BIT(30)
+#define PCM2_TX_FIFO_OV_SFT 29
+#define PCM2_TX_FIFO_OV_MASK_SFT BIT(29)
+#define PCM2_RX_FIFO_OV_SFT 28
+#define PCM2_RX_FIFO_OV_MASK_SFT BIT(28)
+#define PCM1_SYNC_GLITCH_SFT 27
+#define PCM1_SYNC_GLITCH_MASK_SFT BIT(27)
+#define PCM2_SYNC_GLITCH_SFT 26
+#define PCM2_SYNC_GLITCH_MASK_SFT BIT(26)
+#define TX3_RCH_DBG_MODE_SFT 17
+#define TX3_RCH_DBG_MODE_MASK_SFT BIT(17)
+#define PCM1_PCM2_LOOPBACK_SFT 16
+#define PCM1_PCM2_LOOPBACK_MASK_SFT BIT(16)
+#define DAI_PCM_LOOPBACK_CH_SFT 14
+#define DAI_PCM_LOOPBACK_CH_MASK_SFT GENMASK(15, 14)
+#define I2S_PCM_LOOPBACK_CH_SFT 12
+#define I2S_PCM_LOOPBACK_CH_MASK_SFT GENMASK(13, 12)
+#define TX_FIX_VALUE_SFT 0
+#define TX_FIX_VALUE_MASK_SFT GENMASK(7, 0)
+
+/* PCM2_INTF_CON */
+#define PCM2_TX_FIX_VALUE_SFT 24
+#define PCM2_TX_FIX_VALUE_MASK_SFT GENMASK(31, 24)
+#define PCM2_FIX_VALUE_SEL_SFT 23
+#define PCM2_FIX_VALUE_SEL_MASK_SFT BIT(23)
+#define PCM2_BUFFER_LOOPBACK_SFT 22
+#define PCM2_BUFFER_LOOPBACK_MASK_SFT BIT(22)
+#define PCM2_PARALLEL_LOOPBACK_SFT 21
+#define PCM2_PARALLEL_LOOPBACK_MASK_SFT BIT(21)
+#define PCM2_SERIAL_LOOPBACK_SFT 20
+#define PCM2_SERIAL_LOOPBACK_MASK_SFT BIT(20)
+#define PCM2_DAI_PCM_LOOPBACK_SFT 19
+#define PCM2_DAI_PCM_LOOPBACK_MASK_SFT BIT(19)
+#define PCM2_I2S_PCM_LOOPBACK_SFT 18
+#define PCM2_I2S_PCM_LOOPBACK_MASK_SFT BIT(18)
+#define PCM2_SYNC_DELSEL_SFT 17
+#define PCM2_SYNC_DELSEL_MASK_SFT BIT(17)
+#define PCM2_TX_LR_SWAP_SFT 16
+#define PCM2_TX_LR_SWAP_MASK_SFT BIT(16)
+#define PCM2_SYNC_IN_INV_SFT 15
+#define PCM2_SYNC_IN_INV_MASK_SFT BIT(15)
+#define PCM2_BCLK_IN_INV_SFT 14
+#define PCM2_BCLK_IN_INV_MASK_SFT BIT(14)
+#define PCM2_TX_LCH_RPT_SFT 13
+#define PCM2_TX_LCH_RPT_MASK_SFT BIT(13)
+#define PCM2_VBT_16K_MODE_SFT 12
+#define PCM2_VBT_16K_MODE_MASK_SFT BIT(12)
+#define PCM2_LOOPBACK_CH_SEL_SFT 10
+#define PCM2_LOOPBACK_CH_SEL_MASK_SFT GENMASK(11, 10)
+#define PCM2_TX2_BT_MODE_SFT 8
+#define PCM2_TX2_BT_MODE_MASK_SFT BIT(8)
+#define PCM2_BT_MODE_SFT 7
+#define PCM2_BT_MODE_MASK_SFT BIT(7)
+#define PCM2_AFIFO_SFT 6
+#define PCM2_AFIFO_MASK_SFT BIT(6)
+#define PCM2_WLEN_SFT 5
+#define PCM2_WLEN_MASK_SFT BIT(5)
+#define PCM2_MODE_SFT 3
+#define PCM2_MODE_MASK_SFT GENMASK(4, 3)
+#define PCM2_FMT_SFT 1
+#define PCM2_FMT_MASK_SFT GENMASK(2, 1)
+#define PCM2_EN_SFT 0
+#define PCM2_EN_MASK_SFT BIT(0)
+
+// AFE_CM1_CON
+#define CHANNEL_MERGE0_DEBUG_MODE_SFT (31)
+#define CHANNEL_MERGE0_DEBUG_MODE_MASK_SFT BIT(31)
+#define VUL3_BYPASS_CM_SFT (30)
+#define VUL3_BYPASS_CM_MASK (0x1)
+#define VUL3_BYPASS_CM_MASK_SFT BIT(30)
+#define CM1_DEBUG_MODE_SEL_SFT (29)
+#define CM1_DEBUG_MODE_SEL_MASK_SFT BIT(29)
+#define CHANNEL_MERGE0_UPDATE_CNT_SFT (16)
+#define CHANNEL_MERGE0_UPDATE_CNT_MASK_SFT GENMASK(28, 16)
+#define CM1_FS_SELECT_SFT (8)
+#define CM1_FS_SELECT_MASK_SFT GENMASK(12, 8)
+#define CHANNEL_MERGE0_CHNUM_SFT (3)
+#define CHANNEL_MERGE0_CHNUM_MASK_SFT GENMASK(7, 3)
+#define CHANNEL_MERGE0_BYTE_SWAP_SFT (1)
+#define CHANNEL_MERGE0_BYTE_SWAP_MASK_SFT BIT(1)
+#define CHANNEL_MERGE0_EN_SFT (0)
+#define CHANNEL_MERGE0_EN_MASK_SFT BIT(0)
+
+/* AFE_ADDA_MTKAIF_CFG0 */
+#define MTKAIF_RXIF_CLKINV_ADC_SFT 31
+#define MTKAIF_RXIF_CLKINV_ADC_MASK_SFT BIT(31)
+#define MTKAIF_RXIF_BYPASS_SRC_SFT 17
+#define MTKAIF_RXIF_BYPASS_SRC_MASK_SFT BIT(17)
+#define MTKAIF_RXIF_PROTOCOL2_SFT 16
+#define MTKAIF_RXIF_PROTOCOL2_MASK_SFT BIT(16)
+#define MTKAIF_TXIF_BYPASS_SRC_SFT 5
+#define MTKAIF_TXIF_BYPASS_SRC_MASK_SFT BIT(5)
+#define MTKAIF_TXIF_PROTOCOL2_SFT 4
+#define MTKAIF_TXIF_PROTOCOL2_MASK_SFT BIT(4)
+#define MTKAIF_TXIF_8TO5_SFT 2
+#define MTKAIF_TXIF_8TO5_MASK_SFT BIT(2)
+#define MTKAIF_RXIF_8TO5_SFT 1
+#define MTKAIF_RXIF_8TO5_MASK_SFT BIT(1)
+#define MTKAIF_IF_LOOPBACK1_SFT 0
+#define MTKAIF_IF_LOOPBACK1_MASK_SFT BIT(0)
+
+/* AFE_ADDA_MTKAIF_RX_CFG2 */
+#define MTKAIF_RXIF_DETECT_ON_PROTOCOL2_SFT 16
+#define MTKAIF_RXIF_DETECT_ON_PROTOCOL2_MASK_SFT BIT(16)
+#define MTKAIF_RXIF_DELAY_CYCLE_SFT 12
+#define MTKAIF_RXIF_DELAY_CYCLE_MASK_SFT GENMASK(15, 12)
+#define MTKAIF_RXIF_DELAY_DATA_SFT 8
+#define MTKAIF_RXIF_DELAY_DATA_MASK 0x1
+#define MTKAIF_RXIF_DELAY_DATA_MASK_SFT BIT(8)
+#define MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_SFT 4
+#define MTKAIF_RXIF_FIFO_RSP_PROTOCOL2_MASK_SFT GENMASK(6, 4)
+
+/* AFE_ADDA_DL_SRC2_CON0 */
+#define DL_2_INPUT_MODE_CTL_SFT 28
+#define DL_2_INPUT_MODE_CTL_MASK_SFT GENMASK(31, 28)
+#define DL_2_CH1_SATURATION_EN_CTL_SFT 27
+#define DL_2_CH1_SATURATION_EN_CTL_MASK_SFT BIT(27)
+#define DL_2_CH2_SATURATION_EN_CTL_SFT 26
+#define DL_2_CH2_SATURATION_EN_CTL_MASK_SFT BIT(26)
+#define DL_2_OUTPUT_SEL_CTL_SFT 24
+#define DL_2_OUTPUT_SEL_CTL_MASK_SFT GENMASK(25, 24)
+#define DL_2_FADEIN_0START_EN_SFT 16
+#define DL_2_FADEIN_0START_EN_MASK_SFT GENMASK(17, 16)
+#define DL_DISABLE_HW_CG_CTL_SFT 15
+#define DL_DISABLE_HW_CG_CTL_MASK_SFT BIT(15)
+#define C_DATA_EN_SEL_CTL_PRE_SFT 14
+#define C_DATA_EN_SEL_CTL_PRE_MASK_SFT BIT(14)
+#define DL_2_SIDE_TONE_ON_CTL_PRE_SFT 13
+#define DL_2_SIDE_TONE_ON_CTL_PRE_MASK_SFT BIT(13)
+#define DL_2_MUTE_CH1_OFF_CTL_PRE_SFT 12
+#define DL_2_MUTE_CH1_OFF_CTL_PRE_MASK_SFT BIT(12)
+#define DL_2_MUTE_CH2_OFF_CTL_PRE_SFT 11
+#define DL_2_MUTE_CH2_OFF_CTL_PRE_MASK_SFT BIT(11)
+#define DL2_ARAMPSP_CTL_PRE_SFT 9
+#define DL2_ARAMPSP_CTL_PRE_MASK_SFT GENMASK(10, 9)
+#define DL_2_IIRMODE_CTL_PRE_SFT 6
+#define DL_2_IIRMODE_CTL_PRE_MASK_SFT GENMASK(8, 6)
+#define DL_2_VOICE_MODE_CTL_PRE_SFT 5
+#define DL_2_VOICE_MODE_CTL_PRE_MASK_SFT BIT(5)
+#define D2_2_MUTE_CH1_ON_CTL_PRE_SFT 4
+#define D2_2_MUTE_CH1_ON_CTL_PRE_MASK_SFT BIT(4)
+#define D2_2_MUTE_CH2_ON_CTL_PRE_SFT 3
+#define D2_2_MUTE_CH2_ON_CTL_PRE_MASK_SFT BIT(3)
+#define DL_2_IIR_ON_CTL_PRE_SFT 2
+#define DL_2_IIR_ON_CTL_PRE_MASK_SFT BIT(2)
+#define DL_2_GAIN_ON_CTL_PRE_SFT 1
+#define DL_2_GAIN_ON_CTL_PRE_MASK_SFT BIT(1)
+#define DL_2_SRC_ON_CTL_PRE_SFT 0
+#define DL_2_SRC_ON_CTL_PRE_MASK_SFT BIT(0)
+
+/* AFE_ADDA_DL_SRC2_CON1 */
+#define DL_2_GAIN_CTL_PRE_SFT 16
+#define DL_2_GAIN_CTL_PRE_MASK 0xffff
+#define DL_2_GAIN_CTL_PRE_MASK_SFT GENMASK(31, 16)
+#define DL_2_GAIN_MODE_CTL_SFT 0
+#define DL_2_GAIN_MODE_CTL_MASK_SFT BIT(0)
+
+/* AFE_ADDA_UL_SRC_CON0 */
+#define ULCF_CFG_EN_CTL_SFT 31
+#define ULCF_CFG_EN_CTL_MASK_SFT BIT(31)
+#define UL_DMIC_PHASE_SEL_CH1_SFT 27
+#define UL_DMIC_PHASE_SEL_CH1_MASK_SFT GENMASK(29, 27)
+#define UL_DMIC_PHASE_SEL_CH2_SFT 24
+#define UL_DMIC_PHASE_SEL_CH2_MASK_SFT GENMASK(26, 24)
+#define UL_MODE_3P25M_CH2_CTL_SFT 22
+#define UL_MODE_3P25M_CH2_CTL_MASK_SFT BIT(22)
+#define UL_MODE_3P25M_CH1_CTL_SFT 21
+#define UL_MODE_3P25M_CH1_CTL_MASK_SFT BIT(21)
+#define UL_VOICE_MODE_CH1_CH2_CTL_SFT 17
+#define UL_VOICE_MODE_CH1_CH2_CTL_MASK_SFT GENMASK(19, 17)
+#define UL_AP_DMIC_ON_SFT 16
+#define UL_AP_DMIC_ON_MASK_SFT BIT(16)
+#define DMIC_LOW_POWER_CTL_SFT 14
+#define DMIC_LOW_POWER_CTL_MASK_SFT GENMASK(15, 14)
+#define UL_DISABLE_HW_CG_CTL_SFT 12
+#define UL_DISABLE_HW_CG_CTL_MASK_SFT BIT(12)
+#define UL_IIR_ON_TMP_CTL_SFT 10
+#define UL_IIR_ON_TMP_CTL_MASK_SFT BIT(10)
+#define UL_IIRMODE_CTL_SFT 7
+#define UL_IIRMODE_CTL_MASK_SFT GENMASK(9, 7)
+#define DIGMIC_4P33M_SEL_SFT 6
+#define DIGMIC_4P33M_SEL_MASK_SFT BIT(6)
+#define DIGMIC_3P25M_1P625M_SEL_SFT 5
+#define DIGMIC_3P25M_1P625M_SEL_MASK_SFT BIT(5)
+#define UL_LOOP_BACK_MODE_SFT 2
+#define UL_LOOP_BACK_MODE_MASK_SFT BIT(2)
+#define UL_SDM_3_LEVEL_SFT 1
+#define UL_SDM_3_LEVEL_MASK_SFT BIT(1)
+#define UL_SRC_ON_CTL_SFT 0
+#define UL_SRC_ON_CTL_MASK_SFT BIT(0)
+
+/* AFE_ADDA_UL_SRC_CON1 */
+#define C_DAC_EN_CTL_SFT 27
+#define C_DAC_EN_CTL_MASK_SFT BIT(27)
+#define C_MUTE_SW_CTL_SFT 26
+#define C_MUTE_SW_CTL_MASK_SFT BIT(26)
+#define ASDM_SRC_SEL_CTL_SFT 25
+#define ASDM_SRC_SEL_CTL_MASK_SFT BIT(25)
+#define C_AMP_DIV_CH2_CTL_SFT 21
+#define C_AMP_DIV_CH2_CTL_MASK_SFT GENMASK(23, 21)
+#define C_FREQ_DIV_CH2_CTL_SFT 16
+#define C_FREQ_DIV_CH2_CTL_MASK_SFT GENMASK(20, 16)
+#define C_SINE_MODE_CH2_CTL_SFT 12
+#define C_SINE_MODE_CH2_CTL_MASK_SFT GENMASK(15, 12)
+#define C_AMP_DIV_CH1_CTL_SFT 9
+#define C_AMP_DIV_CH1_CTL_MASK_SFT GENMASK(11, 9)
+#define C_FREQ_DIV_CH1_CTL_SFT 4
+#define C_FREQ_DIV_CH1_CTL_MASK_SFT GENMASK(8, 4)
+#define C_SINE_MODE_CH1_CTL_SFT 0
+#define C_SINE_MODE_CH1_CTL_MASK_SFT GENMASK(3, 0)
+
+/* AFE_ADDA_TOP_CON0 */
+#define C_LOOP_BACK_MODE_CTL_SFT 12
+#define C_LOOP_BACK_MODE_CTL_MASK_SFT GENMASK(15, 12)
+#define ADDA_UL_GAIN_MODE_SFT 8
+#define ADDA_UL_GAIN_MODE_MASK_SFT GENMASK(9, 8)
+#define C_EXT_ADC_CTL_SFT 0
+#define C_EXT_ADC_CTL_MASK_SFT BIT(0)
+
+/* AFE_ADDA_UL_DL_CON0 */
+#define AFE_ADDA_UL_LR_SWAP_SFT 31
+#define AFE_ADDA_UL_LR_SWAP_MASK_SFT BIT(31)
+#define AFE_ADDA_CKDIV_RST_SFT 30
+#define AFE_ADDA_CKDIV_RST_MASK_SFT BIT(30)
+#define AFE_ADDA_FIFO_AUTO_RST_SFT 29
+#define AFE_ADDA_FIFO_AUTO_RST_MASK_SFT BIT(29)
+#define AFE_ADDA_UL_FIFO_DIGMIC_TESTIN_SFT 21
+#define AFE_ADDA_UL_FIFO_DIGMIC_TESTIN_MASK_SFT GENMASK(22, 21)
+#define AFE_ADDA_UL_FIFO_DIGMIC_WDATA_TESTEN_SFT 20
+#define AFE_ADDA_UL_FIFO_DIGMIC_WDATA_TESTEN_MASK_SFT BIT(20)
+#define AFE_ADDA6_UL_LR_SWAP_SFT 15
+#define AFE_ADDA6_UL_LR_SWAP_MASK_SFT BIT(15)
+#define AFE_ADDA6_CKDIV_RST_SFT 14
+#define AFE_ADDA6_CKDIV_RST_MASK_SFT BIT(14)
+#define AFE_ADDA6_FIFO_AUTO_RST_SFT 13
+#define AFE_ADDA6_FIFO_AUTO_RST_MASK_SFT BIT(13)
+#define AFE_ADDA6_UL_FIFO_DIGMIC_TESTIN_SFT 5
+#define AFE_ADDA6_UL_FIFO_DIGMIC_TESTIN_MASK_SFT GENMASK(6, 5)
+#define AFE_ADDA6_UL_FIFO_DIGMIC_WDATA_TESTEN_SFT 4
+#define AFE_ADDA6_UL_FIFO_DIGMIC_WDATA_TESTEN_MASK_SFT BIT(4)
+#define ADDA_AFE_ON_SFT 0
+#define ADDA_AFE_ON_MASK_SFT BIT(0)
+
+/* AFE_SIDETONE_CON0 */
+#define R_RDY_SFT 30
+#define R_RDY_MASK_SFT BIT(30)
+#define W_RDY_SFT 29
+#define W_RDY_MASK_SFT BIT(29)
+#define R_W_EN_SFT 25
+#define R_W_EN_MASK_SFT BIT(25)
+#define R_W_SEL_SFT 24
+#define R_W_SEL_MASK_SFT BIT(24)
+#define SEL_CH2_SFT 23
+#define SEL_CH2_MASK_SFT BIT(23)
+#define SIDE_TONE_COEFFICIENT_ADDR_SFT 16
+#define SIDE_TONE_COEFFICIENT_ADDR_MASK_SFT GENMASK(20, 16)
+#define SIDE_TONE_COEFFICIENT_SFT 0
+#define SIDE_TONE_COEFFICIENT_MASK_SFT GENMASK(15, 0)
+
+/* AFE_SIDETONE_COEFF */
+#define SIDE_TONE_COEFF_SFT 0
+#define SIDE_TONE_COEFF_MASK_SFT GENMASK(15, 0)
+
+/* AFE_SIDETONE_CON1 */
+#define STF_BYPASS_MODE_SFT 31
+#define STF_BYPASS_MODE_MASK_SFT BIT(31)
+#define STF_BYPASS_MODE_O28_O29_SFT 30
+#define STF_BYPASS_MODE_O28_O29_MASK_SFT BIT(30)
+#define STF_BYPASS_MODE_I2S4_SFT 29
+#define STF_BYPASS_MODE_I2S4_MASK_SFT BIT(29)
+#define STF_BYPASS_MODE_DL3_SFT 27
+#define STF_BYPASS_MODE_DL3_MASK_SFT BIT(27)
+#define STF_BYPASS_MODE_I2S7_SFT 26
+#define STF_BYPASS_MODE_I2S7_MASK_SFT BIT(26)
+#define STF_BYPASS_MODE_I2S9_SFT 25
+#define STF_BYPASS_MODE_I2S9_MASK_SFT BIT(25)
+#define STF_O19O20_OUT_EN_SEL_SFT 13
+#define STF_O19O20_OUT_EN_SEL_MASK_SFT BIT(13)
+#define STF_SOURCE_FROM_O19O20_SFT 12
+#define STF_SOURCE_FROM_O19O20_MASK_SFT BIT(12)
+#define SIDE_TONE_ON_SFT 8
+#define SIDE_TONE_ON_MASK_SFT BIT(8)
+#define SIDE_TONE_HALF_TAP_NUM_SFT 0
+#define SIDE_TONE_HALF_TAP_NUM_MASK_SFT GENMASK(5, 0)
+
+/* AFE_SIDETONE_GAIN */
+#define POSITIVE_GAIN_SFT 16
+#define POSITIVE_GAIN_MASK_SFT GENMASK(18, 16)
+#define SIDE_TONE_GAIN_SFT 0
+#define SIDE_TONE_GAIN_MASK_SFT GENMASK(15, 0)
+
+/* AFE_ADDA_DL_SDM_DCCOMP_CON */
+#define USE_3RD_SDM_SFT 28
+#define USE_3RD_SDM_MASK_SFT BIT(28)
+#define DL_FIFO_START_POINT_SFT 24
+#define DL_FIFO_START_POINT_MASK_SFT GENMASK(26, 24)
+#define DL_FIFO_SWAP_SFT 20
+#define DL_FIFO_SWAP_MASK_SFT BIT(20)
+#define C_AUDSDM1ORDSELECT_CTL_SFT 19
+#define C_AUDSDM1ORDSELECT_CTL_MASK_SFT BIT(19)
+#define C_SDM7BITSEL_CTL_SFT 18
+#define C_SDM7BITSEL_CTL_MASK_SFT BIT(18)
+#define GAIN_AT_SDM_RST_PRE_CTL_SFT 15
+#define GAIN_AT_SDM_RST_PRE_CTL_MASK_SFT BIT(15)
+#define DL_DCM_AUTO_IDLE_EN_SFT 14
+#define DL_DCM_AUTO_IDLE_EN_MASK_SFT BIT(14)
+#define AFE_DL_SRC_DCM_EN_SFT 13
+#define AFE_DL_SRC_DCM_EN_MASK_SFT BIT(13)
+#define AFE_DL_POST_SRC_DCM_EN_SFT 12
+#define AFE_DL_POST_SRC_DCM_EN_MASK_SFT BIT(12)
+#define AUD_SDM_MONO_SFT 9
+#define AUD_SDM_MONO_MASK_SFT BIT(9)
+#define AUD_DC_COMP_EN_SFT 8
+#define AUD_DC_COMP_EN_MASK_SFT BIT(8)
+#define ATTGAIN_CTL_SFT 0
+#define ATTGAIN_CTL_MASK_SFT GENMASK(5, 0)
+
+/* AFE_SINEGEN_CON0 */
+#define DAC_EN_SFT 26
+#define DAC_EN_MASK 0x1
+#define DAC_EN_MASK_SFT BIT(26)
+#define MUTE_SW_CH2_SFT 25
+#define MUTE_SW_CH2_MASK 0x1
+#define MUTE_SW_CH2_MASK_SFT BIT(25)
+#define MUTE_SW_CH1_SFT 24
+#define MUTE_SW_CH1_MASK 0x1
+#define MUTE_SW_CH1_MASK_SFT BIT(24)
+#define SINE_MODE_CH2_SFT 20
+#define SINE_MODE_CH2_MASK 0xf
+#define SINE_MODE_CH2_MASK_SFT GENMASK(23, 20)
+#define AMP_DIV_CH2_SFT 17
+#define AMP_DIV_CH2_MASK 0x7
+#define AMP_DIV_CH2_MASK_SFT GENMASK(19, 17)
+#define FREQ_DIV_CH2_SFT 12
+#define FREQ_DIV_CH2_MASK 0x1f
+#define FREQ_DIV_CH2_MASK_SFT GENMASK(16, 12)
+#define SINE_MODE_CH1_SFT 8
+#define SINE_MODE_CH1_MASK 0xf
+#define SINE_MODE_CH1_MASK_SFT GENMASK(11, 8)
+#define AMP_DIV_CH1_SFT 5
+#define AMP_DIV_CH1_MASK 0x7
+#define AMP_DIV_CH1_MASK_SFT GENMASK(7, 5)
+#define FREQ_DIV_CH1_SFT 0
+#define FREQ_DIV_CH1_MASK 0x1f
+#define FREQ_DIV_CH1_MASK_SFT GENMASK(4, 0)
+
+/* AFE_SINEGEN_CON2 */
+#define INNER_LOOP_BACK_MODE_SFT 0
+#define INNER_LOOP_BACK_MODE_MASK_SFT GENMASK(7, 0)
+
+/* AFE_HD_ENGEN_ENABLE */
+#define AFE_24M_ON_SFT 1
+#define AFE_24M_ON_MASK_SFT BIT(1)
+#define AFE_22M_ON_SFT 0
+#define AFE_22M_ON_MASK_SFT BIT(0)
+
+/* AFE_ADDA_DL_NLE_FIFO_MON */
+#define DL_NLE_FIFO_WBIN_SFT 8
+#define DL_NLE_FIFO_WBIN_MASK_SFT GENMASK(11, 8)
+#define DL_NLE_FIFO_RBIN_SFT 4
+#define DL_NLE_FIFO_RBIN_MASK_SFT GENMASK(7, 4)
+#define DL_NLE_FIFO_RDACTIVE_SFT 3
+#define DL_NLE_FIFO_RDACTIVE_MASK_SFT BIT(3)
+#define DL_NLE_FIFO_STARTRD_SFT 2
+#define DL_NLE_FIFO_STARTRD_MASK_SFT BIT(2)
+#define DL_NLE_FIFO_RD_EMPTY_SFT 1
+#define DL_NLE_FIFO_RD_EMPTY_MASK_SFT BIT(1)
+#define DL_NLE_FIFO_WR_FULL_SFT 0
+#define DL_NLE_FIFO_WR_FULL_MASK_SFT BIT(0)
+
+/* AFE_DL1_CON0 */
+#define DL1_MODE_SFT 24
+#define DL1_MODE_MASK 0xf
+#define DL1_MODE_MASK_SFT GENMASK(27, 24)
+#define DL1_MINLEN_SFT 20
+#define DL1_MINLEN_MASK 0xf
+#define DL1_MINLEN_MASK_SFT GENMASK(23, 20)
+#define DL1_MAXLEN_SFT 16
+#define DL1_MAXLEN_MASK 0xf
+#define DL1_MAXLEN_MASK_SFT GENMASK(19, 16)
+#define DL1_SW_CLEAR_BUF_EMPTY_SFT 15
+#define DL1_SW_CLEAR_BUF_EMPTY_MASK 0x1
+#define DL1_SW_CLEAR_BUF_EMPTY_MASK_SFT BIT(15)
+#define DL1_PBUF_SIZE_SFT 12
+#define DL1_PBUF_SIZE_MASK 0x3
+#define DL1_PBUF_SIZE_MASK_SFT GENMASK(13, 12)
+#define DL1_MONO_SFT 8
+#define DL1_MONO_MASK 0x1
+#define DL1_MONO_MASK_SFT BIT(8)
+#define DL1_NORMAL_MODE_SFT 5
+#define DL1_NORMAL_MODE_MASK 0x1
+#define DL1_NORMAL_MODE_MASK_SFT BIT(5)
+#define DL1_HALIGN_SFT 4
+#define DL1_HALIGN_MASK 0x1
+#define DL1_HALIGN_MASK_SFT BIT(4)
+#define DL1_HD_MODE_SFT 0
+#define DL1_HD_MODE_MASK 0x3
+#define DL1_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_DL2_CON0 */
+#define DL2_MODE_SFT 24
+#define DL2_MODE_MASK 0xf
+#define DL2_MODE_MASK_SFT GENMASK(27, 24)
+#define DL2_MINLEN_SFT 20
+#define DL2_MINLEN_MASK 0xf
+#define DL2_MINLEN_MASK_SFT GENMASK(23, 20)
+#define DL2_MAXLEN_SFT 16
+#define DL2_MAXLEN_MASK 0xf
+#define DL2_MAXLEN_MASK_SFT GENMASK(19, 16)
+#define DL2_SW_CLEAR_BUF_EMPTY_SFT 15
+#define DL2_SW_CLEAR_BUF_EMPTY_MASK 0x1
+#define DL2_SW_CLEAR_BUF_EMPTY_MASK_SFT BIT(15)
+#define DL2_PBUF_SIZE_SFT 12
+#define DL2_PBUF_SIZE_MASK 0x3
+#define DL2_PBUF_SIZE_MASK_SFT GENMASK(13, 12)
+#define DL2_MONO_SFT 8
+#define DL2_MONO_MASK 0x1
+#define DL2_MONO_MASK_SFT BIT(8)
+#define DL2_NORMAL_MODE_SFT 5
+#define DL2_NORMAL_MODE_MASK 0x1
+#define DL2_NORMAL_MODE_MASK_SFT BIT(5)
+#define DL2_HALIGN_SFT 4
+#define DL2_HALIGN_MASK 0x1
+#define DL2_HALIGN_MASK_SFT BIT(4)
+#define DL2_HD_MODE_SFT 0
+#define DL2_HD_MODE_MASK 0x3
+#define DL2_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_DL3_CON0 */
+#define DL3_MODE_SFT 24
+#define DL3_MODE_MASK 0xf
+#define DL3_MODE_MASK_SFT GENMASK(27, 24)
+#define DL3_MINLEN_SFT 20
+#define DL3_MINLEN_MASK 0xf
+#define DL3_MINLEN_MASK_SFT GENMASK(23, 20)
+#define DL3_MAXLEN_SFT 16
+#define DL3_MAXLEN_MASK 0xf
+#define DL3_MAXLEN_MASK_SFT GENMASK(19, 16)
+#define DL3_SW_CLEAR_BUF_EMPTY_SFT 15
+#define DL3_SW_CLEAR_BUF_EMPTY_MASK 0x1
+#define DL3_SW_CLEAR_BUF_EMPTY_MASK_SFT BIT(15)
+#define DL3_PBUF_SIZE_SFT 12
+#define DL3_PBUF_SIZE_MASK 0x3
+#define DL3_PBUF_SIZE_MASK_SFT GENMASK(13, 12)
+#define DL3_MONO_SFT 8
+#define DL3_MONO_MASK 0x1
+#define DL3_MONO_MASK_SFT BIT(8)
+#define DL3_NORMAL_MODE_SFT 5
+#define DL3_NORMAL_MODE_MASK 0x1
+#define DL3_NORMAL_MODE_MASK_SFT BIT(5)
+#define DL3_HALIGN_SFT 4
+#define DL3_HALIGN_MASK 0x1
+#define DL3_HALIGN_MASK_SFT BIT(4)
+#define DL3_HD_MODE_SFT 0
+#define DL3_HD_MODE_MASK 0x3
+#define DL3_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_DL4_CON0 */
+#define DL4_MODE_SFT 24
+#define DL4_MODE_MASK 0xf
+#define DL4_MODE_MASK_SFT GENMASK(27, 24)
+#define DL4_MINLEN_SFT 20
+#define DL4_MINLEN_MASK 0xf
+#define DL4_MINLEN_MASK_SFT GENMASK(23, 20)
+#define DL4_MAXLEN_SFT 16
+#define DL4_MAXLEN_MASK 0xf
+#define DL4_MAXLEN_MASK_SFT GENMASK(19, 16)
+#define DL4_SW_CLEAR_BUF_EMPTY_SFT 15
+#define DL4_SW_CLEAR_BUF_EMPTY_MASK 0x1
+#define DL4_SW_CLEAR_BUF_EMPTY_MASK_SFT BIT(15)
+#define DL4_PBUF_SIZE_SFT 12
+#define DL4_PBUF_SIZE_MASK 0x3
+#define DL4_PBUF_SIZE_MASK_SFT GENMASK(13, 12)
+#define DL4_MONO_SFT 8
+#define DL4_MONO_MASK 0x1
+#define DL4_MONO_MASK_SFT BIT(8)
+#define DL4_NORMAL_MODE_SFT 5
+#define DL4_NORMAL_MODE_MASK 0x1
+#define DL4_NORMAL_MODE_MASK_SFT BIT(5)
+#define DL4_HALIGN_SFT 4
+#define DL4_HALIGN_MASK 0x1
+#define DL4_HALIGN_MASK_SFT BIT(4)
+#define DL4_HD_MODE_SFT 0
+#define DL4_HD_MODE_MASK 0x3
+#define DL4_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_DL5_CON0 */
+#define DL5_MODE_SFT 24
+#define DL5_MODE_MASK 0xf
+#define DL5_MODE_MASK_SFT GENMASK(27, 24)
+#define DL5_MINLEN_SFT 20
+#define DL5_MINLEN_MASK 0xf
+#define DL5_MINLEN_MASK_SFT GENMASK(23, 20)
+#define DL5_MAXLEN_SFT 16
+#define DL5_MAXLEN_MASK 0xf
+#define DL5_MAXLEN_MASK_SFT GENMASK(19, 16)
+#define DL5_SW_CLEAR_BUF_EMPTY_SFT 15
+#define DL5_SW_CLEAR_BUF_EMPTY_MASK 0x1
+#define DL5_SW_CLEAR_BUF_EMPTY_MASK_SFT BIT(15)
+#define DL5_PBUF_SIZE_SFT 12
+#define DL5_PBUF_SIZE_MASK 0x3
+#define DL5_PBUF_SIZE_MASK_SFT GENMASK(13, 12)
+#define DL5_MONO_SFT 8
+#define DL5_MONO_MASK 0x1
+#define DL5_MONO_MASK_SFT BIT(8)
+#define DL5_NORMAL_MODE_SFT 5
+#define DL5_NORMAL_MODE_MASK 0x1
+#define DL5_NORMAL_MODE_MASK_SFT BIT(5)
+#define DL5_HALIGN_SFT 4
+#define DL5_HALIGN_MASK 0x1
+#define DL5_HALIGN_MASK_SFT BIT(4)
+#define DL5_HD_MODE_SFT 0
+#define DL5_HD_MODE_MASK 0x3
+#define DL5_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_DL6_CON0 */
+#define DL6_MODE_SFT 24
+#define DL6_MODE_MASK 0xf
+#define DL6_MODE_MASK_SFT GENMASK(27, 24)
+#define DL6_MINLEN_SFT 20
+#define DL6_MINLEN_MASK 0xf
+#define DL6_MINLEN_MASK_SFT GENMASK(23, 20)
+#define DL6_MAXLEN_SFT 16
+#define DL6_MAXLEN_MASK 0xf
+#define DL6_MAXLEN_MASK_SFT GENMASK(19, 16)
+#define DL6_SW_CLEAR_BUF_EMPTY_SFT 15
+#define DL6_SW_CLEAR_BUF_EMPTY_MASK 0x1
+#define DL6_SW_CLEAR_BUF_EMPTY_MASK_SFT BIT(15)
+#define DL6_PBUF_SIZE_SFT 12
+#define DL6_PBUF_SIZE_MASK 0x3
+#define DL6_PBUF_SIZE_MASK_SFT GENMASK(13, 12)
+#define DL6_MONO_SFT 8
+#define DL6_MONO_MASK 0x1
+#define DL6_MONO_MASK_SFT BIT(8)
+#define DL6_NORMAL_MODE_SFT 5
+#define DL6_NORMAL_MODE_MASK 0x1
+#define DL6_NORMAL_MODE_MASK_SFT BIT(5)
+#define DL6_HALIGN_SFT 4
+#define DL6_HALIGN_MASK 0x1
+#define DL6_HALIGN_MASK_SFT BIT(4)
+#define DL6_HD_MODE_SFT 0
+#define DL6_HD_MODE_MASK 0x3
+#define DL6_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_DL7_CON0 */
+#define DL7_MODE_SFT 24
+#define DL7_MODE_MASK 0xf
+#define DL7_MODE_MASK_SFT GENMASK(27, 24)
+#define DL7_MINLEN_SFT 20
+#define DL7_MINLEN_MASK 0xf
+#define DL7_MINLEN_MASK_SFT GENMASK(23, 20)
+#define DL7_MAXLEN_SFT 16
+#define DL7_MAXLEN_MASK 0xf
+#define DL7_MAXLEN_MASK_SFT GENMASK(19, 16)
+#define DL7_SW_CLEAR_BUF_EMPTY_SFT 15
+#define DL7_SW_CLEAR_BUF_EMPTY_MASK 0x1
+#define DL7_SW_CLEAR_BUF_EMPTY_MASK_SFT BIT(15)
+#define DL7_PBUF_SIZE_SFT 12
+#define DL7_PBUF_SIZE_MASK 0x3
+#define DL7_PBUF_SIZE_MASK_SFT GENMASK(13, 12)
+#define DL7_MONO_SFT 8
+#define DL7_MONO_MASK 0x1
+#define DL7_MONO_MASK_SFT BIT(8)
+#define DL7_NORMAL_MODE_SFT 5
+#define DL7_NORMAL_MODE_MASK 0x1
+#define DL7_NORMAL_MODE_MASK_SFT BIT(5)
+#define DL7_HALIGN_SFT 4
+#define DL7_HALIGN_MASK 0x1
+#define DL7_HALIGN_MASK_SFT BIT(4)
+#define DL7_HD_MODE_SFT 0
+#define DL7_HD_MODE_MASK 0x3
+#define DL7_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_DL8_CON0 */
+#define DL8_MODE_SFT 24
+#define DL8_MODE_MASK 0xf
+#define DL8_MODE_MASK_SFT GENMASK(27, 24)
+#define DL8_MINLEN_SFT 20
+#define DL8_MINLEN_MASK 0xf
+#define DL8_MINLEN_MASK_SFT GENMASK(23, 20)
+#define DL8_MAXLEN_SFT 16
+#define DL8_MAXLEN_MASK 0xf
+#define DL8_MAXLEN_MASK_SFT GENMASK(19, 16)
+#define DL8_SW_CLEAR_BUF_EMPTY_SFT 15
+#define DL8_SW_CLEAR_BUF_EMPTY_MASK 0x1
+#define DL8_SW_CLEAR_BUF_EMPTY_MASK_SFT BIT(15)
+#define DL8_PBUF_SIZE_SFT 12
+#define DL8_PBUF_SIZE_MASK 0x3
+#define DL8_PBUF_SIZE_MASK_SFT GENMASK(13, 12)
+#define DL8_MONO_SFT 8
+#define DL8_MONO_MASK 0x1
+#define DL8_MONO_MASK_SFT BIT(8)
+#define DL8_NORMAL_MODE_SFT 5
+#define DL8_NORMAL_MODE_MASK 0x1
+#define DL8_NORMAL_MODE_MASK_SFT BIT(5)
+#define DL8_HALIGN_SFT 4
+#define DL8_HALIGN_MASK 0x1
+#define DL8_HALIGN_MASK_SFT BIT(4)
+#define DL8_HD_MODE_SFT 0
+#define DL8_HD_MODE_MASK 0x3
+#define DL8_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_DL12_CON0 */
+#define DL12_MODE_SFT 24
+#define DL12_MODE_MASK 0xf
+#define DL12_MODE_MASK_SFT GENMASK(27, 24)
+#define DL12_MINLEN_SFT 20
+#define DL12_MINLEN_MASK 0xf
+#define DL12_MINLEN_MASK_SFT GENMASK(23, 20)
+#define DL12_MAXLEN_SFT 16
+#define DL12_MAXLEN_MASK 0xf
+#define DL12_MAXLEN_MASK_SFT GENMASK(19, 16)
+#define DL12_SW_CLEAR_BUF_EMPTY_SFT 15
+#define DL12_SW_CLEAR_BUF_EMPTY_MASK 0x1
+#define DL12_SW_CLEAR_BUF_EMPTY_MASK_SFT BIT(15)
+#define DL12_PBUF_SIZE_SFT 12
+#define DL12_PBUF_SIZE_MASK 0x3
+#define DL12_PBUF_SIZE_MASK_SFT GENMASK(13, 12)
+#define DL12_4CH_EN_SFT 11
+#define DL12_4CH_EN_MASK 0x1
+#define DL12_4CH_EN_MASK_SFT BIT(11)
+#define DL12_MONO_SFT 8
+#define DL12_MONO_MASK 0x1
+#define DL12_MONO_MASK_SFT BIT(8)
+#define DL12_NORMAL_MODE_SFT 5
+#define DL12_NORMAL_MODE_MASK 0x1
+#define DL12_NORMAL_MODE_MASK_SFT BIT(5)
+#define DL12_HALIGN_SFT 4
+#define DL12_HALIGN_MASK 0x1
+#define DL12_HALIGN_MASK_SFT BIT(4)
+#define DL12_HD_MODE_SFT 0
+#define DL12_HD_MODE_MASK 0x3
+#define DL12_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_AWB_CON0 */
+#define AWB_MODE_SFT 24
+#define AWB_MODE_MASK 0xf
+#define AWB_MODE_MASK_SFT GENMASK(27, 24)
+#define AWB_SW_CLEAR_BUF_FULL_SFT 15
+#define AWB_SW_CLEAR_BUF_FULL_MASK 0x1
+#define AWB_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define AWB_R_MONO_SFT 9
+#define AWB_R_MONO_MASK 0x1
+#define AWB_R_MONO_MASK_SFT BIT(9)
+#define AWB_MONO_SFT 8
+#define AWB_MONO_MASK 0x1
+#define AWB_MONO_MASK_SFT BIT(8)
+#define AWB_WR_SIGN_SFT 6
+#define AWB_WR_SIGN_MASK 0x1
+#define AWB_WR_SIGN_MASK_SFT BIT(6)
+#define AWB_NORMAL_MODE_SFT 5
+#define AWB_NORMAL_MODE_MASK 0x1
+#define AWB_NORMAL_MODE_MASK_SFT BIT(5)
+#define AWB_HALIGN_SFT 4
+#define AWB_HALIGN_MASK 0x1
+#define AWB_HALIGN_MASK_SFT BIT(4)
+#define AWB_HD_MODE_SFT 0
+#define AWB_HD_MODE_MASK 0x3
+#define AWB_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_AWB2_CON0 */
+#define AWB2_MODE_SFT 24
+#define AWB2_MODE_MASK 0xf
+#define AWB2_MODE_MASK_SFT GENMASK(27, 24)
+#define AWB2_SW_CLEAR_BUF_FULL_SFT 15
+#define AWB2_SW_CLEAR_BUF_FULL_MASK 0x1
+#define AWB2_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define AWB2_R_MONO_SFT 9
+#define AWB2_R_MONO_MASK 0x1
+#define AWB2_R_MONO_MASK_SFT BIT(9)
+#define AWB2_MONO_SFT 8
+#define AWB2_MONO_MASK 0x1
+#define AWB2_MONO_MASK_SFT BIT(8)
+#define AWB2_WR_SIGN_SFT 6
+#define AWB2_WR_SIGN_MASK 0x1
+#define AWB2_WR_SIGN_MASK_SFT BIT(6)
+#define AWB2_NORMAL_MODE_SFT 5
+#define AWB2_NORMAL_MODE_MASK 0x1
+#define AWB2_NORMAL_MODE_MASK_SFT BIT(5)
+#define AWB2_HALIGN_SFT 4
+#define AWB2_HALIGN_MASK 0x1
+#define AWB2_HALIGN_MASK_SFT BIT(4)
+#define AWB2_HD_MODE_SFT 0
+#define AWB2_HD_MODE_MASK 0x3
+#define AWB2_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_VUL_CON0 */
+#define VUL_MODE_SFT 24
+#define VUL_MODE_MASK 0xf
+#define VUL_MODE_MASK_SFT GENMASK(27, 24)
+#define VUL_SW_CLEAR_BUF_FULL_SFT 15
+#define VUL_SW_CLEAR_BUF_FULL_MASK 0x1
+#define VUL_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define VUL_R_MONO_SFT 9
+#define VUL_R_MONO_MASK 0x1
+#define VUL_R_MONO_MASK_SFT BIT(9)
+#define VUL_MONO_SFT 8
+#define VUL_MONO_MASK 0x1
+#define VUL_MONO_MASK_SFT BIT(8)
+#define VUL_WR_SIGN_SFT 6
+#define VUL_WR_SIGN_MASK 0x1
+#define VUL_WR_SIGN_MASK_SFT BIT(6)
+#define VUL_NORMAL_MODE_SFT 5
+#define VUL_NORMAL_MODE_MASK 0x1
+#define VUL_NORMAL_MODE_MASK_SFT BIT(5)
+#define VUL_HALIGN_SFT 4
+#define VUL_HALIGN_MASK 0x1
+#define VUL_HALIGN_MASK_SFT BIT(4)
+#define VUL_HD_MODE_SFT 0
+#define VUL_HD_MODE_MASK 0x3
+#define VUL_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_VUL12_CON0 */
+#define VUL12_MODE_SFT 24
+#define VUL12_MODE_MASK 0xf
+#define VUL12_MODE_MASK_SFT GENMASK(27, 24)
+#define VUL12_SW_CLEAR_BUF_FULL_SFT 15
+#define VUL12_SW_CLEAR_BUF_FULL_MASK 0x1
+#define VUL12_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define VUL12_4CH_EN_SFT 11
+#define VUL12_4CH_EN_MASK 0x1
+#define VUL12_4CH_EN_MASK_SFT BIT(11)
+#define VUL12_R_MONO_SFT 9
+#define VUL12_R_MONO_MASK 0x1
+#define VUL12_R_MONO_MASK_SFT BIT(9)
+#define VUL12_MONO_SFT 8
+#define VUL12_MONO_MASK 0x1
+#define VUL12_MONO_MASK_SFT BIT(8)
+#define VUL12_WR_SIGN_SFT 6
+#define VUL12_WR_SIGN_MASK 0x1
+#define VUL12_WR_SIGN_MASK_SFT BIT(6)
+#define VUL12_NORMAL_MODE_SFT 5
+#define VUL12_NORMAL_MODE_MASK 0x1
+#define VUL12_NORMAL_MODE_MASK_SFT BIT(5)
+#define VUL12_HALIGN_SFT 4
+#define VUL12_HALIGN_MASK 0x1
+#define VUL12_HALIGN_MASK_SFT BIT(4)
+#define VUL12_HD_MODE_SFT 0
+#define VUL12_HD_MODE_MASK 0x3
+#define VUL12_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_VUL2_CON0 */
+#define VUL2_MODE_SFT 24
+#define VUL2_MODE_MASK 0xf
+#define VUL2_MODE_MASK_SFT GENMASK(27, 24)
+#define VUL2_SW_CLEAR_BUF_FULL_SFT 15
+#define VUL2_SW_CLEAR_BUF_FULL_MASK 0x1
+#define VUL2_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define VUL2_R_MONO_SFT 9
+#define VUL2_R_MONO_MASK 0x1
+#define VUL2_R_MONO_MASK_SFT BIT(9)
+#define VUL2_MONO_SFT 8
+#define VUL2_MONO_MASK 0x1
+#define VUL2_MONO_MASK_SFT BIT(8)
+#define VUL2_WR_SIGN_SFT 6
+#define VUL2_WR_SIGN_MASK 0x1
+#define VUL2_WR_SIGN_MASK_SFT BIT(6)
+#define VUL2_NORMAL_MODE_SFT 5
+#define VUL2_NORMAL_MODE_MASK 0x1
+#define VUL2_NORMAL_MODE_MASK_SFT BIT(5)
+#define VUL2_HALIGN_SFT 4
+#define VUL2_HALIGN_MASK 0x1
+#define VUL2_HALIGN_MASK_SFT BIT(4)
+#define VUL2_HD_MODE_SFT 0
+#define VUL2_HD_MODE_MASK 0x3
+#define VUL2_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_VUL3_CON0 */
+#define VUL3_MODE_SFT 24
+#define VUL3_MODE_MASK 0xf
+#define VUL3_MODE_MASK_SFT GENMASK(27, 24)
+#define VUL3_SW_CLEAR_BUF_FULL_SFT 15
+#define VUL3_SW_CLEAR_BUF_FULL_MASK 0x1
+#define VUL3_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define VUL3_R_MONO_SFT 9
+#define VUL3_R_MONO_MASK 0x1
+#define VUL3_R_MONO_MASK_SFT BIT(9)
+#define VUL3_MONO_SFT 8
+#define VUL3_MONO_MASK 0x1
+#define VUL3_MONO_MASK_SFT BIT(8)
+#define VUL3_WR_SIGN_SFT 6
+#define VUL3_WR_SIGN_MASK 0x1
+#define VUL3_WR_SIGN_MASK_SFT BIT(6)
+#define VUL3_NORMAL_MODE_SFT 5
+#define VUL3_NORMAL_MODE_MASK 0x1
+#define VUL3_NORMAL_MODE_MASK_SFT BIT(5)
+#define VUL3_HALIGN_SFT 4
+#define VUL3_HALIGN_MASK 0x1
+#define VUL3_HALIGN_MASK_SFT BIT(4)
+#define VUL3_HD_MODE_SFT 0
+#define VUL3_HD_MODE_MASK 0x3
+#define VUL3_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_VUL4_CON0 */
+#define VUL4_MODE_SFT 24
+#define VUL4_MODE_MASK 0xf
+#define VUL4_MODE_MASK_SFT GENMASK(27, 24)
+#define VUL4_SW_CLEAR_BUF_FULL_SFT 15
+#define VUL4_SW_CLEAR_BUF_FULL_MASK 0x1
+#define VUL4_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define VUL4_R_MONO_SFT 9
+#define VUL4_R_MONO_MASK 0x1
+#define VUL4_R_MONO_MASK_SFT BIT(9)
+#define VUL4_MONO_SFT 8
+#define VUL4_MONO_MASK 0x1
+#define VUL4_MONO_MASK_SFT BIT(8)
+#define VUL4_WR_SIGN_SFT 6
+#define VUL4_WR_SIGN_MASK 0x1
+#define VUL4_WR_SIGN_MASK_SFT BIT(6)
+#define VUL4_NORMAL_MODE_SFT 5
+#define VUL4_NORMAL_MODE_MASK 0x1
+#define VUL4_NORMAL_MODE_MASK_SFT BIT(5)
+#define VUL4_HALIGN_SFT 4
+#define VUL4_HALIGN_MASK 0x1
+#define VUL4_HALIGN_MASK_SFT BIT(4)
+#define VUL4_HD_MODE_SFT 0
+#define VUL4_HD_MODE_MASK 0x3
+#define VUL4_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_VUL5_CON0 */
+#define VUL5_MODE_SFT 24
+#define VUL5_MODE_MASK 0xf
+#define VUL5_MODE_MASK_SFT GENMASK(27, 24)
+#define VUL5_SW_CLEAR_BUF_FULL_SFT 15
+#define VUL5_SW_CLEAR_BUF_FULL_MASK 0x1
+#define VUL5_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define VUL5_R_MONO_SFT 9
+#define VUL5_R_MONO_MASK 0x1
+#define VUL5_R_MONO_MASK_SFT BIT(9)
+#define VUL5_MONO_SFT 8
+#define VUL5_MONO_MASK 0x1
+#define VUL5_MONO_MASK_SFT BIT(8)
+#define VUL5_WR_SIGN_SFT 6
+#define VUL5_WR_SIGN_MASK 0x1
+#define VUL5_WR_SIGN_MASK_SFT BIT(6)
+#define VUL5_NORMAL_MODE_SFT 5
+#define VUL5_NORMAL_MODE_MASK 0x1
+#define VUL5_NORMAL_MODE_MASK_SFT BIT(5)
+#define VUL5_HALIGN_SFT 4
+#define VUL5_HALIGN_MASK 0x1
+#define VUL5_HALIGN_MASK_SFT BIT(4)
+#define VUL5_HD_MODE_SFT 0
+#define VUL5_HD_MODE_MASK 0x3
+#define VUL5_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_VUL6_CON0 */
+#define VUL6_MODE_SFT 24
+#define VUL6_MODE_MASK 0xf
+#define VUL6_MODE_MASK_SFT GENMASK(27, 24)
+#define VUL6_SW_CLEAR_BUF_FULL_SFT 15
+#define VUL6_SW_CLEAR_BUF_FULL_MASK 0x1
+#define VUL6_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define VUL6_R_MONO_SFT 9
+#define VUL6_R_MONO_MASK 0x1
+#define VUL6_R_MONO_MASK_SFT BIT(9)
+#define VUL6_MONO_SFT 8
+#define VUL6_MONO_MASK 0x1
+#define VUL6_MONO_MASK_SFT BIT(8)
+#define VUL6_WR_SIGN_SFT 6
+#define VUL6_WR_SIGN_MASK 0x1
+#define VUL6_WR_SIGN_MASK_SFT BIT(6)
+#define VUL6_NORMAL_MODE_SFT 5
+#define VUL6_NORMAL_MODE_MASK 0x1
+#define VUL6_NORMAL_MODE_MASK_SFT BIT(5)
+#define VUL6_HALIGN_SFT 4
+#define VUL6_HALIGN_MASK 0x1
+#define VUL6_HALIGN_MASK_SFT BIT(4)
+#define VUL6_HD_MODE_SFT 0
+#define VUL6_HD_MODE_MASK 0x3
+#define VUL6_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_DAI_CON0 */
+#define DAI_MODE_SFT 24
+#define DAI_MODE_MASK 0x3
+#define DAI_MODE_MASK_SFT GENMASK(25, 24)
+#define DAI_SW_CLEAR_BUF_FULL_SFT 15
+#define DAI_SW_CLEAR_BUF_FULL_MASK 0x1
+#define DAI_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define DAI_DUPLICATE_WR_SFT 10
+#define DAI_DUPLICATE_WR_MASK 0x1
+#define DAI_DUPLICATE_WR_MASK_SFT BIT(10)
+#define DAI_MONO_SFT 8
+#define DAI_MONO_MASK 0x1
+#define DAI_MONO_MASK_SFT BIT(8)
+#define DAI_WR_SIGN_SFT 6
+#define DAI_WR_SIGN_MASK 0x1
+#define DAI_WR_SIGN_MASK_SFT BIT(6)
+#define DAI_NORMAL_MODE_SFT 5
+#define DAI_NORMAL_MODE_MASK 0x1
+#define DAI_NORMAL_MODE_MASK_SFT BIT(5)
+#define DAI_HALIGN_SFT 4
+#define DAI_HALIGN_MASK 0x1
+#define DAI_HALIGN_MASK_SFT BIT(4)
+#define DAI_HD_MODE_SFT 0
+#define DAI_HD_MODE_MASK 0x3
+#define DAI_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_MOD_DAI_CON0 */
+#define MOD_DAI_MODE_SFT 24
+#define MOD_DAI_MODE_MASK 0x3
+#define MOD_DAI_MODE_MASK_SFT GENMASK(25, 24)
+#define MOD_DAI_SW_CLEAR_BUF_FULL_SFT 15
+#define MOD_DAI_SW_CLEAR_BUF_FULL_MASK 0x1
+#define MOD_DAI_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define MOD_DAI_DUPLICATE_WR_SFT 10
+#define MOD_DAI_DUPLICATE_WR_MASK 0x1
+#define MOD_DAI_DUPLICATE_WR_MASK_SFT BIT(10)
+#define MOD_DAI_MONO_SFT 8
+#define MOD_DAI_MONO_MASK 0x1
+#define MOD_DAI_MONO_MASK_SFT BIT(8)
+#define MOD_DAI_WR_SIGN_SFT 6
+#define MOD_DAI_WR_SIGN_MASK 0x1
+#define MOD_DAI_WR_SIGN_MASK_SFT BIT(6)
+#define MOD_DAI_NORMAL_MODE_SFT 5
+#define MOD_DAI_NORMAL_MODE_MASK 0x1
+#define MOD_DAI_NORMAL_MODE_MASK_SFT BIT(5)
+#define MOD_DAI_HALIGN_SFT 4
+#define MOD_DAI_HALIGN_MASK 0x1
+#define MOD_DAI_HALIGN_MASK_SFT BIT(4)
+#define MOD_DAI_HD_MODE_SFT 0
+#define MOD_DAI_HD_MODE_MASK 0x3
+#define MOD_DAI_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_DAI2_CON0 */
+#define DAI2_MODE_SFT 24
+#define DAI2_MODE_MASK 0xf
+#define DAI2_MODE_MASK_SFT GENMASK(27, 24)
+#define DAI2_SW_CLEAR_BUF_FULL_SFT 15
+#define DAI2_SW_CLEAR_BUF_FULL_MASK 0x1
+#define DAI2_SW_CLEAR_BUF_FULL_MASK_SFT BIT(15)
+#define DAI2_DUPLICATE_WR_SFT 10
+#define DAI2_DUPLICATE_WR_MASK 0x1
+#define DAI2_DUPLICATE_WR_MASK_SFT BIT(10)
+#define DAI2_MONO_SFT 8
+#define DAI2_MONO_MASK 0x1
+#define DAI2_MONO_MASK_SFT BIT(8)
+#define DAI2_WR_SIGN_SFT 6
+#define DAI2_WR_SIGN_MASK 0x1
+#define DAI2_WR_SIGN_MASK_SFT BIT(6)
+#define DAI2_NORMAL_MODE_SFT 5
+#define DAI2_NORMAL_MODE_MASK 0x1
+#define DAI2_NORMAL_MODE_MASK_SFT BIT(5)
+#define DAI2_HALIGN_SFT 4
+#define DAI2_HALIGN_MASK 0x1
+#define DAI2_HALIGN_MASK_SFT BIT(4)
+#define DAI2_HD_MODE_SFT 0
+#define DAI2_HD_MODE_MASK 0x3
+#define DAI2_HD_MODE_MASK_SFT GENMASK(1, 0)
+
+/* AFE_MEMIF_CON0 */
+#define CPU_COMPACT_MODE_SFT 2
+#define CPU_COMPACT_MODE_MASK_SFT BIT(2)
+#define CPU_HD_ALIGN_SFT 1
+#define CPU_HD_ALIGN_MASK_SFT BIT(1)
+#define SYSRAM_SIGN_SFT 0
+#define SYSRAM_SIGN_MASK_SFT BIT(0)
+
+/* AFE_IRQ_MCU_CON0 */
+#define IRQ31_MCU_ON_SFT 31
+#define IRQ31_MCU_ON_MASK 0x1
+#define IRQ31_MCU_ON_MASK_SFT BIT(31)
+#define IRQ26_MCU_ON_SFT 26
+#define IRQ26_MCU_ON_MASK 0x1
+#define IRQ26_MCU_ON_MASK_SFT BIT(26)
+#define IRQ25_MCU_ON_SFT 25
+#define IRQ25_MCU_ON_MASK 0x1
+#define IRQ25_MCU_ON_MASK_SFT BIT(25)
+#define IRQ24_MCU_ON_SFT 24
+#define IRQ24_MCU_ON_MASK 0x1
+#define IRQ24_MCU_ON_MASK_SFT BIT(24)
+#define IRQ23_MCU_ON_SFT 23
+#define IRQ23_MCU_ON_MASK 0x1
+#define IRQ23_MCU_ON_MASK_SFT BIT(23)
+#define IRQ22_MCU_ON_SFT 22
+#define IRQ22_MCU_ON_MASK 0x1
+#define IRQ22_MCU_ON_MASK_SFT BIT(22)
+#define IRQ21_MCU_ON_SFT 21
+#define IRQ21_MCU_ON_MASK 0x1
+#define IRQ21_MCU_ON_MASK_SFT BIT(21)
+#define IRQ20_MCU_ON_SFT 20
+#define IRQ20_MCU_ON_MASK 0x1
+#define IRQ20_MCU_ON_MASK_SFT BIT(20)
+#define IRQ19_MCU_ON_SFT 19
+#define IRQ19_MCU_ON_MASK 0x1
+#define IRQ19_MCU_ON_MASK_SFT BIT(19)
+#define IRQ18_MCU_ON_SFT 18
+#define IRQ18_MCU_ON_MASK 0x1
+#define IRQ18_MCU_ON_MASK_SFT BIT(18)
+#define IRQ17_MCU_ON_SFT 17
+#define IRQ17_MCU_ON_MASK 0x1
+#define IRQ17_MCU_ON_MASK_SFT BIT(17)
+#define IRQ16_MCU_ON_SFT 16
+#define IRQ16_MCU_ON_MASK 0x1
+#define IRQ16_MCU_ON_MASK_SFT BIT(16)
+#define IRQ15_MCU_ON_SFT 15
+#define IRQ15_MCU_ON_MASK 0x1
+#define IRQ15_MCU_ON_MASK_SFT BIT(15)
+#define IRQ14_MCU_ON_SFT 14
+#define IRQ14_MCU_ON_MASK 0x1
+#define IRQ14_MCU_ON_MASK_SFT BIT(14)
+#define IRQ13_MCU_ON_SFT 13
+#define IRQ13_MCU_ON_MASK 0x1
+#define IRQ13_MCU_ON_MASK_SFT BIT(13)
+#define IRQ12_MCU_ON_SFT 12
+#define IRQ12_MCU_ON_MASK 0x1
+#define IRQ12_MCU_ON_MASK_SFT BIT(12)
+#define IRQ11_MCU_ON_SFT 11
+#define IRQ11_MCU_ON_MASK 0x1
+#define IRQ11_MCU_ON_MASK_SFT BIT(11)
+#define IRQ10_MCU_ON_SFT 10
+#define IRQ10_MCU_ON_MASK 0x1
+#define IRQ10_MCU_ON_MASK_SFT BIT(10)
+#define IRQ9_MCU_ON_SFT 9
+#define IRQ9_MCU_ON_MASK 0x1
+#define IRQ9_MCU_ON_MASK_SFT BIT(9)
+#define IRQ8_MCU_ON_SFT 8
+#define IRQ8_MCU_ON_MASK 0x1
+#define IRQ8_MCU_ON_MASK_SFT BIT(8)
+#define IRQ7_MCU_ON_SFT 7
+#define IRQ7_MCU_ON_MASK 0x1
+#define IRQ7_MCU_ON_MASK_SFT BIT(7)
+#define IRQ6_MCU_ON_SFT 6
+#define IRQ6_MCU_ON_MASK 0x1
+#define IRQ6_MCU_ON_MASK_SFT BIT(6)
+#define IRQ5_MCU_ON_SFT 5
+#define IRQ5_MCU_ON_MASK 0x1
+#define IRQ5_MCU_ON_MASK_SFT BIT(5)
+#define IRQ4_MCU_ON_SFT 4
+#define IRQ4_MCU_ON_MASK 0x1
+#define IRQ4_MCU_ON_MASK_SFT BIT(4)
+#define IRQ3_MCU_ON_SFT 3
+#define IRQ3_MCU_ON_MASK 0x1
+#define IRQ3_MCU_ON_MASK_SFT BIT(3)
+#define IRQ2_MCU_ON_SFT 2
+#define IRQ2_MCU_ON_MASK 0x1
+#define IRQ2_MCU_ON_MASK_SFT BIT(2)
+#define IRQ1_MCU_ON_SFT 1
+#define IRQ1_MCU_ON_MASK 0x1
+#define IRQ1_MCU_ON_MASK_SFT BIT(1)
+#define IRQ0_MCU_ON_SFT 0
+#define IRQ0_MCU_ON_MASK 0x1
+#define IRQ0_MCU_ON_MASK_SFT BIT(0)
+
+/* AFE_IRQ_MCU_CON1 */
+#define IRQ7_MCU_MODE_SFT 28
+#define IRQ7_MCU_MODE_MASK 0xf
+#define IRQ7_MCU_MODE_MASK_SFT GENMASK(31, 28)
+#define IRQ6_MCU_MODE_SFT 24
+#define IRQ6_MCU_MODE_MASK 0xf
+#define IRQ6_MCU_MODE_MASK_SFT GENMASK(27, 24)
+#define IRQ5_MCU_MODE_SFT 20
+#define IRQ5_MCU_MODE_MASK 0xf
+#define IRQ5_MCU_MODE_MASK_SFT GENMASK(23, 20)
+#define IRQ4_MCU_MODE_SFT 16
+#define IRQ4_MCU_MODE_MASK 0xf
+#define IRQ4_MCU_MODE_MASK_SFT GENMASK(19, 16)
+#define IRQ3_MCU_MODE_SFT 12
+#define IRQ3_MCU_MODE_MASK 0xf
+#define IRQ3_MCU_MODE_MASK_SFT GENMASK(15, 12)
+#define IRQ2_MCU_MODE_SFT 8
+#define IRQ2_MCU_MODE_MASK 0xf
+#define IRQ2_MCU_MODE_MASK_SFT GENMASK(11, 8)
+#define IRQ1_MCU_MODE_SFT 4
+#define IRQ1_MCU_MODE_MASK 0xf
+#define IRQ1_MCU_MODE_MASK_SFT GENMASK(7, 4)
+#define IRQ0_MCU_MODE_SFT 0
+#define IRQ0_MCU_MODE_MASK 0xf
+#define IRQ0_MCU_MODE_MASK_SFT GENMASK(3, 0)
+
+/* AFE_IRQ_MCU_CON2 */
+#define IRQ15_MCU_MODE_SFT 28
+#define IRQ15_MCU_MODE_MASK 0xf
+#define IRQ15_MCU_MODE_MASK_SFT GENMASK(31, 28)
+#define IRQ14_MCU_MODE_SFT 24
+#define IRQ14_MCU_MODE_MASK 0xf
+#define IRQ14_MCU_MODE_MASK_SFT GENMASK(27, 24)
+#define IRQ13_MCU_MODE_SFT 20
+#define IRQ13_MCU_MODE_MASK 0xf
+#define IRQ13_MCU_MODE_MASK_SFT GENMASK(23, 20)
+#define IRQ12_MCU_MODE_SFT 16
+#define IRQ12_MCU_MODE_MASK 0xf
+#define IRQ12_MCU_MODE_MASK_SFT GENMASK(19, 16)
+#define IRQ11_MCU_MODE_SFT 12
+#define IRQ11_MCU_MODE_MASK 0xf
+#define IRQ11_MCU_MODE_MASK_SFT GENMASK(15, 12)
+#define IRQ10_MCU_MODE_SFT 8
+#define IRQ10_MCU_MODE_MASK 0xf
+#define IRQ10_MCU_MODE_MASK_SFT GENMASK(11, 8)
+#define IRQ9_MCU_MODE_SFT 4
+#define IRQ9_MCU_MODE_MASK 0xf
+#define IRQ9_MCU_MODE_MASK_SFT GENMASK(7, 4)
+#define IRQ8_MCU_MODE_SFT 0
+#define IRQ8_MCU_MODE_MASK 0xf
+#define IRQ8_MCU_MODE_MASK_SFT GENMASK(3, 0)
+
+/* AFE_IRQ_MCU_CON3 */
+#define IRQ23_MCU_MODE_SFT 28
+#define IRQ23_MCU_MODE_MASK 0xf
+#define IRQ23_MCU_MODE_MASK_SFT GENMASK(31, 28)
+#define IRQ22_MCU_MODE_SFT 24
+#define IRQ22_MCU_MODE_MASK 0xf
+#define IRQ22_MCU_MODE_MASK_SFT GENMASK(27, 24)
+#define IRQ21_MCU_MODE_SFT 20
+#define IRQ21_MCU_MODE_MASK 0xf
+#define IRQ21_MCU_MODE_MASK_SFT GENMASK(23, 20)
+#define IRQ20_MCU_MODE_SFT 16
+#define IRQ20_MCU_MODE_MASK 0xf
+#define IRQ20_MCU_MODE_MASK_SFT GENMASK(19, 16)
+#define IRQ19_MCU_MODE_SFT 12
+#define IRQ19_MCU_MODE_MASK 0xf
+#define IRQ19_MCU_MODE_MASK_SFT GENMASK(15, 12)
+#define IRQ18_MCU_MODE_SFT 8
+#define IRQ18_MCU_MODE_MASK 0xf
+#define IRQ18_MCU_MODE_MASK_SFT GENMASK(11, 8)
+#define IRQ17_MCU_MODE_SFT 4
+#define IRQ17_MCU_MODE_MASK 0xf
+#define IRQ17_MCU_MODE_MASK_SFT GENMASK(7, 4)
+#define IRQ16_MCU_MODE_SFT 0
+#define IRQ16_MCU_MODE_MASK 0xf
+#define IRQ16_MCU_MODE_MASK_SFT GENMASK(3, 0)
+
+/* AFE_IRQ_MCU_CON4 */
+#define IRQ26_MCU_MODE_SFT 8
+#define IRQ26_MCU_MODE_MASK 0xf
+#define IRQ26_MCU_MODE_MASK_SFT GENMASK(11, 8)
+#define IRQ25_MCU_MODE_SFT 4
+#define IRQ25_MCU_MODE_MASK 0xf
+#define IRQ25_MCU_MODE_MASK_SFT GENMASK(7, 4)
+#define IRQ24_MCU_MODE_SFT 0
+#define IRQ24_MCU_MODE_MASK 0xf
+#define IRQ24_MCU_MODE_MASK_SFT GENMASK(3, 0)
+
+/* AFE_IRQ_MCU_CLR */
+#define IRQ31_MCU_CLR_SFT 31
+#define IRQ31_MCU_CLR_MASK_SFT BIT(31)
+#define IRQ26_MCU_CLR_SFT 26
+#define IRQ26_MCU_CLR_MASK_SFT BIT(26)
+#define IRQ25_MCU_CLR_SFT 25
+#define IRQ25_MCU_CLR_MASK_SFT BIT(25)
+#define IRQ24_MCU_CLR_SFT 24
+#define IRQ24_MCU_CLR_MASK_SFT BIT(24)
+#define IRQ23_MCU_CLR_SFT 23
+#define IRQ23_MCU_CLR_MASK_SFT BIT(23)
+#define IRQ22_MCU_CLR_SFT 22
+#define IRQ22_MCU_CLR_MASK_SFT BIT(22)
+#define IRQ21_MCU_CLR_SFT 21
+#define IRQ21_MCU_CLR_MASK_SFT BIT(21)
+#define IRQ20_MCU_CLR_SFT 20
+#define IRQ20_MCU_CLR_MASK_SFT BIT(20)
+#define IRQ19_MCU_CLR_SFT 19
+#define IRQ19_MCU_CLR_MASK_SFT BIT(19)
+#define IRQ18_MCU_CLR_SFT 18
+#define IRQ18_MCU_CLR_MASK_SFT BIT(18)
+#define IRQ17_MCU_CLR_SFT 17
+#define IRQ17_MCU_CLR_MASK_SFT BIT(17)
+#define IRQ16_MCU_CLR_SFT 16
+#define IRQ16_MCU_CLR_MASK_SFT BIT(16)
+#define IRQ15_MCU_CLR_SFT 15
+#define IRQ15_MCU_CLR_MASK_SFT BIT(15)
+#define IRQ14_MCU_CLR_SFT 14
+#define IRQ14_MCU_CLR_MASK_SFT BIT(14)
+#define IRQ13_MCU_CLR_SFT 13
+#define IRQ13_MCU_CLR_MASK_SFT BIT(13)
+#define IRQ12_MCU_CLR_SFT 12
+#define IRQ12_MCU_CLR_MASK_SFT BIT(12)
+#define IRQ11_MCU_CLR_SFT 11
+#define IRQ11_MCU_CLR_MASK_SFT BIT(11)
+#define IRQ10_MCU_CLR_SFT 10
+#define IRQ10_MCU_CLR_MASK_SFT BIT(10)
+#define IRQ9_MCU_CLR_SFT 9
+#define IRQ9_MCU_CLR_MASK_SFT BIT(9)
+#define IRQ8_MCU_CLR_SFT 8
+#define IRQ8_MCU_CLR_MASK_SFT BIT(8)
+#define IRQ7_MCU_CLR_SFT 7
+#define IRQ7_MCU_CLR_MASK_SFT BIT(7)
+#define IRQ6_MCU_CLR_SFT 6
+#define IRQ6_MCU_CLR_MASK_SFT BIT(6)
+#define IRQ5_MCU_CLR_SFT 5
+#define IRQ5_MCU_CLR_MASK_SFT BIT(5)
+#define IRQ4_MCU_CLR_SFT 4
+#define IRQ4_MCU_CLR_MASK_SFT BIT(4)
+#define IRQ3_MCU_CLR_SFT 3
+#define IRQ3_MCU_CLR_MASK_SFT BIT(3)
+#define IRQ2_MCU_CLR_SFT 2
+#define IRQ2_MCU_CLR_MASK_SFT BIT(2)
+#define IRQ1_MCU_CLR_SFT 1
+#define IRQ1_MCU_CLR_MASK_SFT BIT(1)
+#define IRQ0_MCU_CLR_SFT 0
+#define IRQ0_MCU_CLR_MASK_SFT BIT(0)
+
+/* AFE_IRQ_MCU_EN */
+#define IRQ31_MCU_EN_SFT 31
+#define IRQ30_MCU_EN_SFT 30
+#define IRQ29_MCU_EN_SFT 29
+#define IRQ28_MCU_EN_SFT 28
+#define IRQ27_MCU_EN_SFT 27
+#define IRQ26_MCU_EN_SFT 26
+#define IRQ25_MCU_EN_SFT 25
+#define IRQ24_MCU_EN_SFT 24
+#define IRQ23_MCU_EN_SFT 23
+#define IRQ22_MCU_EN_SFT 22
+#define IRQ21_MCU_EN_SFT 21
+#define IRQ20_MCU_EN_SFT 20
+#define IRQ19_MCU_EN_SFT 19
+#define IRQ18_MCU_EN_SFT 18
+#define IRQ17_MCU_EN_SFT 17
+#define IRQ16_MCU_EN_SFT 16
+#define IRQ15_MCU_EN_SFT 15
+#define IRQ14_MCU_EN_SFT 14
+#define IRQ13_MCU_EN_SFT 13
+#define IRQ12_MCU_EN_SFT 12
+#define IRQ11_MCU_EN_SFT 11
+#define IRQ10_MCU_EN_SFT 10
+#define IRQ9_MCU_EN_SFT 9
+#define IRQ8_MCU_EN_SFT 8
+#define IRQ7_MCU_EN_SFT 7
+#define IRQ6_MCU_EN_SFT 6
+#define IRQ5_MCU_EN_SFT 5
+#define IRQ4_MCU_EN_SFT 4
+#define IRQ3_MCU_EN_SFT 3
+#define IRQ2_MCU_EN_SFT 2
+#define IRQ1_MCU_EN_SFT 1
+#define IRQ0_MCU_EN_SFT 0
+
+/* AFE_IRQ_MCU_SCP_EN */
+#define IRQ31_MCU_SCP_EN_SFT 31
+#define IRQ30_MCU_SCP_EN_SFT 30
+#define IRQ29_MCU_SCP_EN_SFT 29
+#define IRQ28_MCU_SCP_EN_SFT 28
+#define IRQ27_MCU_SCP_EN_SFT 27
+#define IRQ26_MCU_SCP_EN_SFT 26
+#define IRQ25_MCU_SCP_EN_SFT 25
+#define IRQ24_MCU_SCP_EN_SFT 24
+#define IRQ23_MCU_SCP_EN_SFT 23
+#define IRQ22_MCU_SCP_EN_SFT 22
+#define IRQ21_MCU_SCP_EN_SFT 21
+#define IRQ20_MCU_SCP_EN_SFT 20
+#define IRQ19_MCU_SCP_EN_SFT 19
+#define IRQ18_MCU_SCP_EN_SFT 18
+#define IRQ17_MCU_SCP_EN_SFT 17
+#define IRQ16_MCU_SCP_EN_SFT 16
+#define IRQ15_MCU_SCP_EN_SFT 15
+#define IRQ14_MCU_SCP_EN_SFT 14
+#define IRQ13_MCU_SCP_EN_SFT 13
+#define IRQ12_MCU_SCP_EN_SFT 12
+#define IRQ11_MCU_SCP_EN_SFT 11
+#define IRQ10_MCU_SCP_EN_SFT 10
+#define IRQ9_MCU_SCP_EN_SFT 9
+#define IRQ8_MCU_SCP_EN_SFT 8
+#define IRQ7_MCU_SCP_EN_SFT 7
+#define IRQ6_MCU_SCP_EN_SFT 6
+#define IRQ5_MCU_SCP_EN_SFT 5
+#define IRQ4_MCU_SCP_EN_SFT 4
+#define IRQ3_MCU_SCP_EN_SFT 3
+#define IRQ2_MCU_SCP_EN_SFT 2
+#define IRQ1_MCU_SCP_EN_SFT 1
+#define IRQ0_MCU_SCP_EN_SFT 0
+
+/* AFE_IRQ_MCU_DSP_EN */
+#define IRQ31_MCU_DSP_EN_SFT 31
+#define IRQ30_MCU_DSP_EN_SFT 30
+#define IRQ29_MCU_DSP_EN_SFT 29
+#define IRQ28_MCU_DSP_EN_SFT 28
+#define IRQ27_MCU_DSP_EN_SFT 27
+#define IRQ26_MCU_DSP_EN_SFT 26
+#define IRQ25_MCU_DSP_EN_SFT 25
+#define IRQ24_MCU_DSP_EN_SFT 24
+#define IRQ23_MCU_DSP_EN_SFT 23
+#define IRQ22_MCU_DSP_EN_SFT 22
+#define IRQ21_MCU_DSP_EN_SFT 21
+#define IRQ20_MCU_DSP_EN_SFT 20
+#define IRQ19_MCU_DSP_EN_SFT 19
+#define IRQ18_MCU_DSP_EN_SFT 18
+#define IRQ17_MCU_DSP_EN_SFT 17
+#define IRQ16_MCU_DSP_EN_SFT 16
+#define IRQ15_MCU_DSP_EN_SFT 15
+#define IRQ14_MCU_DSP_EN_SFT 14
+#define IRQ13_MCU_DSP_EN_SFT 13
+#define IRQ12_MCU_DSP_EN_SFT 12
+#define IRQ11_MCU_DSP_EN_SFT 11
+#define IRQ10_MCU_DSP_EN_SFT 10
+#define IRQ9_MCU_DSP_EN_SFT 9
+#define IRQ8_MCU_DSP_EN_SFT 8
+#define IRQ7_MCU_DSP_EN_SFT 7
+#define IRQ6_MCU_DSP_EN_SFT 6
+#define IRQ5_MCU_DSP_EN_SFT 5
+#define IRQ4_MCU_DSP_EN_SFT 4
+#define IRQ3_MCU_DSP_EN_SFT 3
+#define IRQ2_MCU_DSP_EN_SFT 2
+#define IRQ1_MCU_DSP_EN_SFT 1
+#define IRQ0_MCU_DSP_EN_SFT 0
+
+/* AFE_AUD_PAD_TOP */
+#define AUD_PAD_TOP_MON_SFT 15
+#define AUD_PAD_TOP_MON_MASK_SFT GENMASK(31, 15)
+#define AUD_PAD_TOP_FIFO_RSP_SFT 4
+#define AUD_PAD_TOP_FIFO_RSP_MASK_SFT GENMASK(7, 4)
+#define RG_RX_PROTOCOL2_SFT 3
+#define RG_RX_PROTOCOL2_MASK_SFT BIT(3)
+#define RESERVDED_01_SFT 1
+#define RESERVDED_01_MASK_SFT GENMASK(2, 1)
+#define RG_RX_FIFO_ON_SFT 0
+#define RG_RX_FIFO_ON_MASK_SFT BIT(0)
+
+/* AFE_ADDA_MTKAIF_SYNCWORD_CFG */
+#define RG_ADDA6_MTKAIF_RX_SYNC_WORD2_DISABLE_SFT 23
+#define RG_ADDA6_MTKAIF_RX_SYNC_WORD2_DISABLE_MASK_SFT BIT(23)
+
+/* AFE_ADDA_MTKAIF_RX_CFG0 */
+#define MTKAIF_RXIF_VOICE_MODE_SFT 20
+#define MTKAIF_RXIF_VOICE_MODE_MASK_SFT GENMASK(23, 20)
+#define MTKAIF_RXIF_DETECT_ON_SFT 16
+#define MTKAIF_RXIF_DETECT_ON_MASK_SFT BIT(16)
+#define MTKAIF_RXIF_DATA_BIT_SFT 8
+#define MTKAIF_RXIF_DATA_BIT_MASK_SFT GENMASK(10, 8)
+#define MTKAIF_RXIF_FIFO_RSP_SFT 4
+#define MTKAIF_RXIF_FIFO_RSP_MASK_SFT GENMASK(6, 4)
+#define MTKAIF_RXIF_DATA_MODE_SFT 0
+#define MTKAIF_RXIF_DATA_MODE_MASK_SFT BIT(0)
+
+/* GENERAL_ASRC_MODE */
+#define GENERAL2_ASRCOUT_MODE_SFT 12
+#define GENERAL2_ASRCOUT_MODE_MASK 0xf
+#define GENERAL2_ASRCOUT_MODE_MASK_SFT GENMASK(15, 12)
+#define GENERAL2_ASRCIN_MODE_SFT 8
+#define GENERAL2_ASRCIN_MODE_MASK 0xf
+#define GENERAL2_ASRCIN_MODE_MASK_SFT GENMASK(11, 8)
+#define GENERAL1_ASRCOUT_MODE_SFT 4
+#define GENERAL1_ASRCOUT_MODE_MASK 0xf
+#define GENERAL1_ASRCOUT_MODE_MASK_SFT GENMASK(7, 4)
+#define GENERAL1_ASRCIN_MODE_SFT 0
+#define GENERAL1_ASRCIN_MODE_MASK 0xf
+#define GENERAL1_ASRCIN_MODE_MASK_SFT GENMASK(3, 0)
+
+/* GENERAL_ASRC_EN_ON */
+#define GENERAL2_ASRC_EN_ON_SFT 1
+#define GENERAL2_ASRC_EN_ON_MASK_SFT BIT(1)
+#define GENERAL1_ASRC_EN_ON_SFT 0
+#define GENERAL1_ASRC_EN_ON_MASK_SFT BIT(0)
+
+/* AFE_GENERAL1_ASRC_2CH_CON0 */
+#define G_SRC_CHSET_STR_CLR_SFT 4
+#define G_SRC_CHSET_STR_CLR_MASK_SFT BIT(4)
+#define G_SRC_CHSET_ON_SFT 2
+#define G_SRC_CHSET_ON_MASK_SFT BIT(2)
+#define G_SRC_COEFF_SRAM_CTRL_SFT 1
+#define G_SRC_COEFF_SRAM_CTRL_MASK_SFT BIT(1)
+#define G_SRC_ASM_ON_SFT 0
+#define G_SRC_ASM_ON_MASK_SFT BIT(0)
+
+/* AFE_GENERAL1_ASRC_2CH_CON3 */
+#define G_SRC_ASM_FREQ_4_SFT 0
+#define G_SRC_ASM_FREQ_4_MASK_SFT GENMASK(23, 0)
+
+/* AFE_GENERAL1_ASRC_2CH_CON4 */
+#define G_SRC_ASM_FREQ_5_SFT 0
+#define G_SRC_ASM_FREQ_5_MASK_SFT GENMASK(23, 0)
+
+/* AFE_GENERAL1_ASRC_2CH_CON13 */
+#define G_SRC_COEFF_SRAM_ADR_SFT 0
+#define G_SRC_COEFF_SRAM_ADR_MASK_SFT GENMASK(5, 0)
+
+/* AFE_GENERAL1_ASRC_2CH_CON2 */
+#define G_SRC_CHSET_O16BIT_SFT 19
+#define G_SRC_CHSET_O16BIT_MASK_SFT BIT(19)
+#define G_SRC_CHSET_CLR_IIR_HISTORY_SFT 17
+#define G_SRC_CHSET_CLR_IIR_HISTORY_MASK_SFT BIT(17)
+#define G_SRC_CHSET_IS_MONO_SFT 16
+#define G_SRC_CHSET_IS_MONO_MASK_SFT BIT(16)
+#define G_SRC_CHSET_IIR_EN_SFT 11
+#define G_SRC_CHSET_IIR_EN_MASK_SFT BIT(11)
+#define G_SRC_CHSET_IIR_STAGE_SFT 8
+#define G_SRC_CHSET_IIR_STAGE_MASK_SFT GENMASK(10, 8)
+#define G_SRC_CHSET_STR_CLR_RU_SFT 5
+#define G_SRC_CHSET_STR_CLR_RU_MASK_SFT BIT(5)
+#define G_SRC_CHSET_ON_SFT 2
+#define G_SRC_CHSET_ON_MASK_SFT BIT(2)
+#define G_SRC_COEFF_SRAM_CTRL_SFT 1
+#define G_SRC_COEFF_SRAM_CTRL_MASK_SFT BIT(1)
+#define G_SRC_ASM_ON_SFT 0
+#define G_SRC_ASM_ON_MASK_SFT BIT(0)
+
+/* AFE_ADDA_DL_SDM_DITHER_CON */
+#define AFE_DL_SDM_DITHER_64TAP_EN_SFT 20
+#define AFE_DL_SDM_DITHER_64TAP_EN_MASK_SFT BIT(20)
+#define AFE_DL_SDM_DITHER_EN_SFT 16
+#define AFE_DL_SDM_DITHER_EN_MASK_SFT BIT(16)
+#define AFE_DL_SDM_DITHER_GAIN_SFT 0
+#define AFE_DL_SDM_DITHER_GAIN_MASK_SFT GENMASK(7, 0)
+
+/* AFE_ADDA_DL_SDM_AUTO_RESET_CON */
+#define SDM_AUTO_RESET_TEST_ON_SFT 31
+#define SDM_AUTO_RESET_TEST_ON_MASK_SFT BIT(31)
+#define AFE_DL_USE_NEW_2ND_SDM_SFT 28
+#define AFE_DL_USE_NEW_2ND_SDM_MASK_SFT BIT(28)
+#define SDM_AUTO_RESET_COUNT_TH_SFT 0
+#define SDM_AUTO_RESET_COUNT_TH_MASK_SFT GENMASK(23, 0)
+
+/* AFE_ASRC_2CH_CON0 */
+#define CON0_CHSET_STR_CLR_SFT 4
+#define CON0_CHSET_STR_CLR_MASK_SFT BIT(4)
+#define CON0_ASM_ON_SFT 0
+#define CON0_ASM_ON_MASK_SFT BIT(0)
+
+/* AFE_ASRC_2CH_CON5 */
+#define CALI_EN_SFT 0
+#define CALI_EN_MASK_SFT BIT(0)
+
+/* FPGA_CFG4 */
+#define IRQ_COUNTER_SFT 3
+#define IRQ_COUNTER_MASK_SFT GENMASK(31, 3)
+#define IRQ_CLK_COUNTER_CLEAN_SFT 2
+#define IRQ_CLK_COUNTER_CLEAN_MASK_SFT BIT(2)
+#define IRQ_CLK_COUNTER_PAUSE_SFT 1
+#define IRQ_CLK_COUNTER_PAUSE_MASK_SFT BIT(1)
+#define IRQ_CLK_COUNTER_ON_SFT 0
+#define IRQ_CLK_COUNTER_ON_MASK_SFT BIT(0)
+
+/* FPGA_CFG5 */
+#define WR_MSTR_ON_SFT 16
+#define WR_MSTR_ON_MASK_SFT GENMASK(28, 16)
+#define WR_AG_SEL_SFT 0
+#define WR_AG_SEL_MASK_SFT GENMASK(12, 0)
+
+/* FPGA_CFG6 */
+#define WR_MSTR_REQ_REAL_SFT 16
+#define WR_MSTR_REQ_REAL_MASK_SFT GENMASK(28, 16)
+#define WR_MSTR_REQ_IN_SFT 0
+#define WR_MSTR_REQ_IN_MASK_SFT GENMASK(12, 0)
+
+/* FPGA_CFG7 */
+#define MEM1_WDATA_MON0_SFT 0
+#define MEM1_WDATA_MON0_MASK_SFT GENMASK(31, 0)
+
+/* FPGA_CFG8 */
+#define MEM1_WDATA_MON1_SFT 0
+#define MEM1_WDATA_MON1_MASK_SFT GENMASK(31, 0)
+
+/* FPGA_CFG9 */
+#define MEM_WE_SFT 31
+#define MEM_WE_MASK_SFT BIT(31)
+#define AFE_HREADY_SFT 30
+#define AFE_HREADY_MASK_SFT BIT(30)
+#define MEM_WR_REQ_SFT 29
+#define MEM_WR_REQ_MASK_SFT BIT(29)
+#define WR_AG_REG_MON_SFT 16
+#define WR_AG_REG_MON_MASK_SFT GENMASK(28, 16)
+#define HCLK_CK_SFT 15
+#define HCLK_CK_MASK_SFT BIT(15)
+#define MEM_RD_REQ_SFT 14
+#define MEM_RD_REQ_MASK_SFT BIT(14)
+#define RD_AG_REQ_MON_SFT 0
+#define RD_AG_REQ_MON_MASK_SFT GENMASK(13, 0)
+
+/* FPGA_CFG10 */
+#define MEM_BYTE_0_SFT 0
+#define MEM_BYTE_0_MASK_SFT GENMASK(31, 0)
+
+/* FPGA_CFG11 */
+#define MEM_BYTE_1_SFT 0
+#define MEM_BYTE_1_MASK_SFT GENMASK(31, 0)
+
+/* FPGA_CFG12 */
+#define RDATA_CNT_SFT 30
+#define RDATA_CNT_MASK_SFT GENMASK(31, 30)
+#define MS2_HREADY_SFT 29
+#define MS2_HREADY_MASK_SFT BIT(29)
+#define MS1_HREADY_SFT 28
+#define MS1_HREADY_MASK_SFT BIT(28)
+#define AG_SEL_SFT 0
+#define AG_SEL_MASK_SFT GENMASK(25, 0)
+
+/* FPGA_CFG13 */
+#define AFE_ST_SFT 27
+#define AFE_ST_MASK_SFT GENMASK(31, 27)
+#define AG_IN_SERVICE_SFT 0
+#define AG_IN_SERVICE_MASK_SFT GENMASK(25, 0)
+
+/* ETDM_IN1_CON0 */
+#define ETDM_IN1_CON0_REG_ETDM_IN_EN_SFT 0
+#define ETDM_IN1_CON0_REG_ETDM_IN_EN_MASK_SFT BIT(0)
+#define ETDM_IN1_CON0_REG_SYNC_MODE_SFT 1
+#define ETDM_IN1_CON0_REG_SYNC_MODE_MASK_SFT BIT(1)
+#define ETDM_IN1_CON0_REG_LSB_FIRST_SFT 3
+#define ETDM_IN1_CON0_REG_LSB_FIRST_MASK_SFT BIT(3)
+#define ETDM_IN1_CON0_REG_SOFT_RST_SFT 4
+#define ETDM_IN1_CON0_REG_SOFT_RST_MASK_SFT BIT(4)
+#define ETDM_IN1_CON0_REG_SLAVE_MODE_SFT 5
+#define ETDM_IN1_CON0_REG_SLAVE_MODE_MASK_SFT BIT(5)
+#define ETDM_IN1_CON0_REG_FMT_SFT 6
+#define ETDM_IN1_CON0_REG_FMT_MASK_SFT GENMASK(8, 6)
+#define ETDM_IN1_CON0_REG_LRCK_EDGE_SEL_SFT 10
+#define ETDM_IN1_CON0_REG_LRCK_EDGE_SEL_MASK_SFT BIT(10)
+#define ETDM_IN1_CON0_REG_BIT_LENGTH_SFT 11
+#define ETDM_IN1_CON0_REG_BIT_LENGTH_MASK_SFT GENMASK(15, 11)
+#define ETDM_IN1_CON0_REG_WORD_LENGTH_SFT 16
+#define ETDM_IN1_CON0_REG_WORD_LENGTH_MASK_SFT GENMASK(20, 16)
+#define ETDM_IN1_CON0_REG_CH_NUM_SFT 23
+#define ETDM_IN1_CON0_REG_CH_NUM_MASK_SFT GENMASK(27, 23)
+#define ETDM_IN1_CON0_REG_RELATCH_1X_EN_SEL_DOMAIN_SFT 28
+#define ETDM_IN1_CON0_REG_RELATCH_1X_EN_SEL_DOMAIN_MASK_SFT GENMASK(31, 28)
+#define ETDM_IN1_CON0_REG_VALID_TOGETHER_SFT 31
+#define ETDM_IN1_CON0_REG_VALID_TOGETHER_MASK_SFT BIT(31)
+#define ETDM_IN_CON0_CTRL_MASK 0x1f9ff9e2
+
+/* ETDM_IN1_CON1 */
+#define ETDM_IN1_CON1_REG_INITIAL_COUNT_SFT 0
+#define ETDM_IN1_CON1_REG_INITIAL_COUNT_MASK_SFT GENMASK(4, 0)
+#define ETDM_IN1_CON1_REG_INITIAL_POINT_SFT 5
+#define ETDM_IN1_CON1_REG_INITIAL_POINT_MASK_SFT GENMASK(9, 5)
+#define ETDM_IN1_CON1_REG_LRCK_AUTO_OFF_SFT 10
+#define ETDM_IN1_CON1_REG_LRCK_AUTO_OFF_MASK_SFT BIT(10)
+#define ETDM_IN1_CON1_REG_BCK_AUTO_OFF_SFT 11
+#define ETDM_IN1_CON1_REG_BCK_AUTO_OFF_MASK_SFT BIT(11)
+#define ETDM_IN1_CON1_REG_INITIAL_LRCK_SFT 13
+#define ETDM_IN1_CON1_REG_INITIAL_LRCK_MASK_SFT BIT(13)
+#define ETDM_IN1_CON1_REG_LRCK_RESET_SFT 15
+#define ETDM_IN1_CON1_REG_LRCK_RESET_MASK_SFT BIT(15)
+#define ETDM_IN1_CON1_PINMUX_MCLK_CTRL_OE_SFT 16
+#define ETDM_IN1_CON1_PINMUX_MCLK_CTRL_OE_MASK_SFT BIT(16)
+#define ETDM_IN1_CON1_REG_OUTPUT_CR_EN_SFT 18
+#define ETDM_IN1_CON1_REG_OUTPUT_CR_EN_MASK_SFT BIT(18)
+#define ETDM_IN1_CON1_REG_LR_ALIGN_SFT 19
+#define ETDM_IN1_CON1_REG_LR_ALIGN_MASK_SFT BIT(19)
+#define ETDM_IN1_CON1_REG_LRCK_WIDTH_SFT 20
+#define ETDM_IN1_CON1_REG_LRCK_WIDTH_MASK_SFT GENMASK(29, 20)
+#define ETDM_IN1_CON1_REG_DIRECT_INPUT_MASTER_BCK_SFT 30
+#define ETDM_IN1_CON1_REG_DIRECT_INPUT_MASTER_BCK_MASK_SFT BIT(30)
+#define ETDM_IN1_CON1_REG_LRCK_AUTO_MODE_SFT 31
+#define ETDM_IN1_CON1_REG_LRCK_AUTO_MODE_MASK_SFT BIT(31)
+#define ETDM_IN_CON1_CTRL_MASK 0xbff10000
+
+/* ETDM_IN1_CON2 */
+#define ETDM_IN1_CON2_REG_UPDATE_POINT_SFT 0
+#define ETDM_IN1_CON2_REG_UPDATE_POINT_MASK_SFT GENMASK(4, 0)
+#define ETDM_IN1_CON2_REG_UPDATE_GAP_SFT 5
+#define ETDM_IN1_CON2_REG_UPDATE_GAP_MASK_SFT GENMASK(9, 5)
+#define ETDM_IN1_CON2_REG_CLOCK_SOURCE_SEL_SFT 10
+#define ETDM_IN1_CON2_REG_CLOCK_SOURCE_SEL_MASK_SFT GENMASK(12, 10)
+#define ETDM_IN1_CON2_REG_AGENT_USE_ETDM_BCK_SFT 13
+#define ETDM_IN1_CON2_REG_AGENT_USE_ETDM_BCK_MASK_SFT BIT(13)
+#define ETDM_IN1_CON2_REG_CK_EN_SEL_AUTO_SFT 14
+#define ETDM_IN1_CON2_REG_CK_EN_SEL_AUTO_MASK_SFT BIT(14)
+#define ETDM_IN1_CON2_REG_MULTI_IP_ONE_DATA_CH_NUM_SFT 15
+#define ETDM_IN1_CON2_REG_MULTI_IP_ONE_DATA_CH_NUM_MASK_SFT GENMASK(19, 15)
+#define ETDM_IN1_CON2_REG_MASK_AUTO_SFT 20
+#define ETDM_IN1_CON2_REG_MASK_AUTO_MASK_SFT BIT(20)
+#define ETDM_IN1_CON2_REG_MASK_NUM_SFT 21
+#define ETDM_IN1_CON2_REG_MASK_NUM_MASK_SFT GENMASK(25, 21)
+#define ETDM_IN1_CON2_REG_UPDATE_POINT_AUTO_SFT 26
+#define ETDM_IN1_CON2_REG_UPDATE_POINT_AUTO_MASK_SFT BIT(26)
+#define ETDM_IN1_CON2_REG_SDATA_DELAY_0P5T_EN_SFT 27
+#define ETDM_IN1_CON2_REG_SDATA_DELAY_0P5T_EN_MASK_SFT BIT(27)
+#define ETDM_IN1_CON2_REG_SDATA_DELAY_BCK_INV_SFT 28
+#define ETDM_IN1_CON2_REG_SDATA_DELAY_BCK_INV_MASK_SFT BIT(28)
+#define ETDM_IN1_CON2_REG_LRCK_DELAY_0P5T_EN_SFT 29
+#define ETDM_IN1_CON2_REG_LRCK_DELAY_0P5T_EN_MASK_SFT BIT(29)
+#define ETDM_IN1_CON2_REG_LRCK_DELAY_BCK_INV_SFT 30
+#define ETDM_IN1_CON2_REG_LRCK_DELAY_BCK_INV_MASK_SFT BIT(30)
+#define ETDM_IN1_CON2_REG_MULTI_IP_MODE_SFT 31
+#define ETDM_IN1_CON2_REG_MULTI_IP_MODE_MASK_SFT BIT(31)
+#define ETDM_IN_CON2_CTRL_MASK 0x800f8000
+#define ETDM_IN_CON2_MULTI_IP_CH(x) (((x) - 1) << 15)
+#define ETDM_IN_CON2_MULTI_IP_2CH_MODE BIT(31)
+
+/* ETDM_IN1_CON3 */
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_0_SFT 0
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_0_MASK_SFT BIT(0)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_1_SFT 1
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_1_MASK_SFT BIT(1)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_2_SFT 2
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_2_MASK_SFT BIT(2)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_3_SFT 3
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_3_MASK_SFT BIT(3)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_4_SFT 4
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_4_MASK_SFT BIT(4)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_5_SFT 5
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_5_MASK_SFT BIT(5)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_6_SFT 6
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_6_MASK_SFT BIT(6)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_7_SFT 7
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_7_MASK_SFT BIT(7)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_8_SFT 8
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_8_MASK_SFT BIT(8)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_9_SFT 9
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_9_MASK_SFT BIT(9)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_10_SFT 10
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_10_MASK_SFT BIT(10)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_11_SFT 11
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_11_MASK_SFT BIT(11)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_12_SFT 12
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_12_MASK_SFT BIT(12)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_13_SFT 13
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_13_MASK_SFT BIT(13)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_14_SFT 14
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_14_MASK_SFT BIT(14)
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_15_SFT 15
+#define ETDM_IN1_CON3_REG_DISABLE_OUT_15_MASK_SFT BIT(15)
+#define ETDM_IN1_CON3_REG_RJ_DATA_RIGHT_ALIGN_SFT 16
+#define ETDM_IN1_CON3_REG_RJ_DATA_RIGHT_ALIGN_MASK_SFT BIT(16)
+#define ETDM_IN1_CON3_REG_MONITOR_SEL_SFT 17
+#define ETDM_IN1_CON3_REG_MONITOR_SEL_MASK_SFT GENMASK(18, 17)
+#define ETDM_IN1_CON3_REG_CNT_UPPER_LIMIT_SFT 19
+#define ETDM_IN1_CON3_REG_CNT_UPPER_LIMIT_MASK_SFT GENMASK(24, 19)
+#define ETDM_IN1_CON3_REG_COMPACT_SAMPLE_END_DIS_SFT 25
+#define ETDM_IN1_CON3_REG_COMPACT_SAMPLE_END_DIS_MASK_SFT BIT(25)
+#define ETDM_IN1_CON3_REG_FS_TIMING_SEL_SFT 26
+#define ETDM_IN1_CON3_REG_FS_TIMING_SEL_MASK_SFT GENMASK(30, 26)
+#define ETDM_IN1_CON3_REG_SAMPLE_END_MODE_SFT 31
+#define ETDM_IN1_CON3_REG_SAMPLE_END_MODE_MASK_SFT BIT(31)
+#define ETDM_IN_CON3_CTRL_MASK (0x7c000000)
+#define ETDM_IN_CON3_FS(x) (((x) & 0x1f) << 26)
+
+/* ETDM_IN1_CON4 */
+#define ETDM_IN1_CON4_REG_DSD_MODE_SFT 0
+#define ETDM_IN1_CON4_REG_DSD_MODE_MASK_SFT GENMASK(5, 0)
+#define ETDM_IN1_CON4_REG_DSD_REPACK_AUTO_MODE_SFT 8
+#define ETDM_IN1_CON4_REG_DSD_REPACK_AUTO_MODE_MASK_SFT BIT(8)
+#define ETDM_IN1_CON4_REG_REPACK_WORD_LENGTH_SFT 9
+#define ETDM_IN1_CON4_REG_REPACK_WORD_LENGTH_MASK_SFT GENMASK(10, 9)
+#define ETDM_IN1_CON4_REG_ASYNC_RESET_SFT 11
+#define ETDM_IN1_CON4_REG_ASYNC_RESET_MASK_SFT BIT(11)
+#define ETDM_IN1_CON4_REG_DSD_CHNUM_SFT 12
+#define ETDM_IN1_CON4_REG_DSD_CHNUM_MASK_SFT GENMASK(15, 12)
+#define ETDM_IN1_CON4_REG_SLAVE_BCK_INV_SFT 16
+#define ETDM_IN1_CON4_REG_SLAVE_BCK_INV_MASK_SFT BIT(16)
+#define ETDM_IN1_CON4_REG_SLAVE_LRCK_INV_SFT 17
+#define ETDM_IN1_CON4_REG_SLAVE_LRCK_INV_MASK_SFT BIT(17)
+#define ETDM_IN1_CON4_REG_MASTER_BCK_INV_SFT 18
+#define ETDM_IN1_CON4_REG_MASTER_BCK_INV_MASK_SFT BIT(18)
+#define ETDM_IN1_CON4_REG_MASTER_LRCK_INV_SFT 19
+#define ETDM_IN1_CON4_REG_MASTER_LRCK_INV_MASK_SFT BIT(19)
+#define ETDM_IN1_CON4_REG_RELATCH_1X_EN_SEL_SFT 20
+#define ETDM_IN1_CON4_REG_RELATCH_1X_EN_SEL_MASK_SFT GENMASK(24, 20)
+#define ETDM_IN1_CON4_REG_SAMPLE_END_POINT_SFT 25
+#define ETDM_IN1_CON4_REG_SAMPLE_END_POINT_MASK_SFT GENMASK(29, 25)
+#define ETDM_IN1_CON4_REG_WAIT_LAST_SAMPLE_SFT 30
+#define ETDM_IN1_CON4_REG_WAIT_LAST_SAMPLE_MASK_SFT BIT(30)
+#define ETDM_IN1_CON4_REG_MASTER_BCK_FORCE_ON_SFT 31
+#define ETDM_IN1_CON4_REG_MASTER_BCK_FORCE_ON_MASK_SFT BIT(31)
+#define ETDM_IN_CON4_CTRL_MASK 0x1ff0000
+#define ETDM_IN_CON4_FS(x) (((x) & 0x1f) << 20)
+#define ETDM_IN_CON4_CON0_MASTER_LRCK_INV BIT(19)
+#define ETDM_IN_CON4_CON0_MASTER_BCK_INV BIT(18)
+#define ETDM_IN_CON4_CON0_SLAVE_LRCK_INV BIT(17)
+#define ETDM_IN_CON4_CON0_SLAVE_BCK_INV BIT(16)
+
+/* ETDM_IN1_CON5 */
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_0_SFT 0
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_0_MASK_SFT BIT(0)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_1_SFT 1
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_1_MASK_SFT BIT(1)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_2_SFT 2
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_2_MASK_SFT BIT(2)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_3_SFT 3
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_3_MASK_SFT BIT(3)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_4_SFT 4
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_4_MASK_SFT BIT(4)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_5_SFT 5
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_5_MASK_SFT BIT(5)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_6_SFT 6
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_6_MASK_SFT BIT(6)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_7_SFT 7
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_7_MASK_SFT BIT(7)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_8_SFT 8
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_8_MASK_SFT BIT(8)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_9_SFT 9
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_9_MASK_SFT BIT(9)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_10_SFT 10
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_10_MASK_SFT BIT(10)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_11_SFT 11
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_11_MASK_SFT BIT(11)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_12_SFT 12
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_12_MASK_SFT BIT(12)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_13_SFT 13
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_13_MASK_SFT BIT(13)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_14_SFT 14
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_14_MASK_SFT BIT(14)
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_15_SFT 15
+#define ETDM_IN1_CON5_REG_ODD_FLAG_EN_15_MASK_SFT BIT(15)
+#define ETDM_IN1_CON5_REG_LR_SWAP_0_SFT 16
+#define ETDM_IN1_CON5_REG_LR_SWAP_0_MASK_SFT BIT(16)
+#define ETDM_IN1_CON5_REG_LR_SWAP_1_SFT 17
+#define ETDM_IN1_CON5_REG_LR_SWAP_1_MASK_SFT BIT(17)
+#define ETDM_IN1_CON5_REG_LR_SWAP_2_SFT 18
+#define ETDM_IN1_CON5_REG_LR_SWAP_2_MASK_SFT BIT(18)
+#define ETDM_IN1_CON5_REG_LR_SWAP_3_SFT 19
+#define ETDM_IN1_CON5_REG_LR_SWAP_3_MASK_SFT BIT(19)
+#define ETDM_IN1_CON5_REG_LR_SWAP_4_SFT 20
+#define ETDM_IN1_CON5_REG_LR_SWAP_4_MASK_SFT BIT(20)
+#define ETDM_IN1_CON5_REG_LR_SWAP_5_SFT 21
+#define ETDM_IN1_CON5_REG_LR_SWAP_5_MASK_SFT BIT(21)
+#define ETDM_IN1_CON5_REG_LR_SWAP_6_SFT 22
+#define ETDM_IN1_CON5_REG_LR_SWAP_6_MASK_SFT BIT(22)
+#define ETDM_IN1_CON5_REG_LR_SWAP_7_SFT 23
+#define ETDM_IN1_CON5_REG_LR_SWAP_7_MASK_SFT BIT(23)
+#define ETDM_IN1_CON5_REG_LR_SWAP_8_SFT 24
+#define ETDM_IN1_CON5_REG_LR_SWAP_8_MASK_SFT BIT(24)
+#define ETDM_IN1_CON5_REG_LR_SWAP_9_SFT 25
+#define ETDM_IN1_CON5_REG_LR_SWAP_9_MASK_SFT BIT(25)
+#define ETDM_IN1_CON5_REG_LR_SWAP_10_SFT 26
+#define ETDM_IN1_CON5_REG_LR_SWAP_10_MASK_SFT BIT(26)
+#define ETDM_IN1_CON5_REG_LR_SWAP_11_SFT 27
+#define ETDM_IN1_CON5_REG_LR_SWAP_11_MASK_SFT BIT(27)
+#define ETDM_IN1_CON5_REG_LR_SWAP_12_SFT 28
+#define ETDM_IN1_CON5_REG_LR_SWAP_12_MASK_SFT BIT(28)
+#define ETDM_IN1_CON5_REG_LR_SWAP_13_SFT 29
+#define ETDM_IN1_CON5_REG_LR_SWAP_13_MASK_SFT BIT(29)
+#define ETDM_IN1_CON5_REG_LR_SWAP_14_SFT 30
+#define ETDM_IN1_CON5_REG_LR_SWAP_14_MASK_SFT BIT(30)
+#define ETDM_IN1_CON5_REG_LR_SWAP_15_SFT 31
+#define ETDM_IN1_CON5_REG_LR_SWAP_15_MASK_SFT BIT(31)
+
+/* ETDM_IN1_CON6 */
+#define ETDM_IN1_CON6_LCH_DATA_REG_SFT 0
+#define ETDM_IN1_CON6_LCH_DATA_REG_MASK_SFT GENMASK(31, 0)
+
+/* ETDM_IN1_CON7 */
+#define ETDM_IN1_CON7_RCH_DATA_REG_SFT 0
+#define ETDM_IN1_CON7_RCH_DATA_REG_MASK_SFT GENMASK(31, 0)
+
+/* ETDM_IN1_CON8 */
+#define ETDM_IN1_CON8_REG_AFIFO_THRESHOLD_SFT 29
+#define ETDM_IN1_CON8_REG_AFIFO_THRESHOLD_MASK_SFT GENMASK(30, 29)
+#define ETDM_IN1_CON8_REG_CK_EN_SEL_MANUAL_SFT 16
+#define ETDM_IN1_CON8_REG_CK_EN_SEL_MANUAL_MASK_SFT GENMASK(25, 16)
+#define ETDM_IN1_CON8_REG_AFIFO_SW_RESET_SFT 15
+#define ETDM_IN1_CON8_REG_AFIFO_SW_RESET_MASK_SFT BIT(15)
+#define ETDM_IN1_CON8_REG_AFIFO_RESET_SEL_SFT 14
+#define ETDM_IN1_CON8_REG_AFIFO_RESET_SEL_MASK_SFT BIT(14)
+#define ETDM_IN1_CON8_REG_AFIFO_AUTO_RESET_DIS_SFT 9
+#define ETDM_IN1_CON8_REG_AFIFO_AUTO_RESET_DIS_MASK_SFT BIT(9)
+#define ETDM_IN1_CON8_REG_ETDM_USE_AFIFO_SFT 8
+#define ETDM_IN1_CON8_REG_ETDM_USE_AFIFO_MASK_SFT BIT(8)
+#define ETDM_IN1_CON8_REG_AFIFO_CLOCK_DOMAIN_SEL_SFT 5
+#define ETDM_IN1_CON8_REG_AFIFO_CLOCK_DOMAIN_SEL_MASK_SFT GENMASK(7, 5)
+#define ETDM_IN1_CON8_REG_AFIFO_MODE_SFT 0
+#define ETDM_IN1_CON8_REG_AFIFO_MODE_MASK_SFT GENMASK(4, 0)
+#define ETDM_IN_CON8_FS(x) (((x) & 0x1f) << 0)
+#define ETDM_IN_CON8_CTRL_MASK 0x13f
+
+#define AUDIO_TOP_CON0 0x0000
+#define AUDIO_TOP_CON1 0x0004
+#define AUDIO_TOP_CON2 0x0008
+#define AUDIO_TOP_CON3 0x000c
+#define AFE_DAC_CON0 0x0010
+#define AFE_I2S_CON 0x0018
+#define AFE_CONN0 0x0020
+#define AFE_CONN1 0x0024
+#define AFE_CONN2 0x0028
+#define AFE_CONN3 0x002c
+#define AFE_CONN4 0x0030
+#define AFE_I2S_CON1 0x0034
+#define AFE_I2S_CON2 0x0038
+#define AFE_I2S_CON3 0x0040
+#define AFE_CONN5 0x0044
+#define AFE_CONN_24BIT 0x0048
+#define AFE_DL1_CON0 0x004c
+#define AFE_DL1_BASE_MSB 0x0050
+#define AFE_DL1_BASE 0x0054
+#define AFE_DL1_CUR_MSB 0x0058
+#define AFE_DL1_CUR 0x005c
+#define AFE_DL1_END_MSB 0x0060
+#define AFE_DL1_END 0x0064
+#define AFE_DL2_CON0 0x0068
+#define AFE_DL2_BASE_MSB 0x006c
+#define AFE_DL2_BASE 0x0070
+#define AFE_DL2_CUR_MSB 0x0074
+#define AFE_DL2_CUR 0x0078
+#define AFE_DL2_END_MSB 0x007c
+#define AFE_DL2_END 0x0080
+#define AFE_DL3_CON0 0x0084
+#define AFE_DL3_BASE_MSB 0x0088
+#define AFE_DL3_BASE 0x008c
+#define AFE_DL3_CUR_MSB 0x0090
+#define AFE_DL3_CUR 0x0094
+#define AFE_DL3_END_MSB 0x0098
+#define AFE_DL3_END 0x009c
+#define AFE_CONN6 0x00bc
+#define AFE_DL4_CON0 0x00cc
+#define AFE_DL4_BASE_MSB 0x00d0
+#define AFE_DL4_BASE 0x00d4
+#define AFE_DL4_CUR_MSB 0x00d8
+#define AFE_DL4_CUR 0x00dc
+#define AFE_DL4_END_MSB 0x00e0
+#define AFE_DL4_END 0x00e4
+#define AFE_DL12_CON0 0x00e8
+#define AFE_DL12_BASE_MSB 0x00ec
+#define AFE_DL12_BASE 0x00f0
+#define AFE_DL12_CUR_MSB 0x00f4
+#define AFE_DL12_CUR 0x00f8
+#define AFE_DL12_END_MSB 0x00fc
+#define AFE_DL12_END 0x0100
+#define AFE_ADDA_DL_SRC2_CON0 0x0108
+#define AFE_ADDA_DL_SRC2_CON1 0x010c
+#define AFE_ADDA_UL_SRC_CON0 0x0114
+#define AFE_ADDA_UL_SRC_CON1 0x0118
+#define AFE_ADDA_TOP_CON0 0x0120
+#define AFE_ADDA_UL_DL_CON0 0x0124
+#define AFE_ADDA_SRC_DEBUG 0x012c
+#define AFE_ADDA_SRC_DEBUG_MON0 0x0130
+#define AFE_ADDA_SRC_DEBUG_MON1 0x0134
+#define AFE_ADDA_UL_SRC_MON0 0x0148
+#define AFE_ADDA_UL_SRC_MON1 0x014c
+#define AFE_SECURE_CON0 0x0150
+#define AFE_SRAM_BOUND 0x0154
+#define AFE_SECURE_CON1 0x0158
+#define AFE_SECURE_CONN0 0x015c
+#define AFE_VUL_CON0 0x0170
+#define AFE_VUL_BASE_MSB 0x0174
+#define AFE_VUL_BASE 0x0178
+#define AFE_VUL_CUR_MSB 0x017c
+#define AFE_VUL_CUR 0x0180
+#define AFE_VUL_END_MSB 0x0184
+#define AFE_VUL_END 0x0188
+#define AFE_SIDETONE_DEBUG 0x01d0
+#define AFE_SIDETONE_MON 0x01d4
+#define AFE_SINEGEN_CON2 0x01dc
+#define AFE_SIDETONE_CON0 0x01e0
+#define AFE_SIDETONE_COEFF 0x01e4
+#define AFE_SIDETONE_CON1 0x01e8
+#define AFE_SIDETONE_GAIN 0x01ec
+#define AFE_SINEGEN_CON0 0x01f0
+#define AFE_TOP_CON0 0x0200
+#define AFE_VUL2_CON0 0x020c
+#define AFE_VUL2_BASE_MSB 0x0210
+#define AFE_VUL2_BASE 0x0214
+#define AFE_VUL2_CUR_MSB 0x0218
+#define AFE_VUL2_CUR 0x021c
+#define AFE_VUL2_END_MSB 0x0220
+#define AFE_VUL2_END 0x0224
+#define AFE_VUL3_CON0 0x0228
+#define AFE_VUL3_BASE_MSB 0x022c
+#define AFE_VUL3_BASE 0x0230
+#define AFE_VUL3_CUR_MSB 0x0234
+#define AFE_VUL3_CUR 0x0238
+#define AFE_VUL3_END_MSB 0x023c
+#define AFE_VUL3_END 0x0240
+#define AFE_BUSY 0x0244
+#define AFE_BUS_CFG 0x0250
+#define AFE_ADDA_PREDIS_CON0 0x0260
+#define AFE_ADDA_PREDIS_CON1 0x0264
+#define AFE_I2S_MON 0x027c
+#define AFE_ADDA_IIR_COEF_02_01 0x0290
+#define AFE_ADDA_IIR_COEF_04_03 0x0294
+#define AFE_ADDA_IIR_COEF_06_05 0x0298
+#define AFE_ADDA_IIR_COEF_08_07 0x029c
+#define AFE_ADDA_IIR_COEF_10_09 0x02a0
+#define AFE_IRQ_MCU_CON1 0x02e4
+#define AFE_IRQ_MCU_CON2 0x02e8
+#define AFE_DAC_MON 0x02ec
+#define AFE_IRQ_MCU_CON3 0x02f0
+#define AFE_IRQ_MCU_CON4 0x02f4
+#define AFE_IRQ_MCU_CNT0 0x0300
+#define AFE_IRQ_MCU_CNT6 0x0304
+#define AFE_IRQ_MCU_CNT8 0x0308
+#define AFE_IRQ_MCU_DSP2_EN 0x030c
+#define AFE_IRQ0_MCU_CNT_MON 0x0310
+#define AFE_IRQ6_MCU_CNT_MON 0x0314
+#define AFE_VUL4_CON0 0x0358
+#define AFE_VUL4_BASE_MSB 0x035c
+#define AFE_VUL4_BASE 0x0360
+#define AFE_VUL4_CUR_MSB 0x0364
+#define AFE_VUL4_CUR 0x0368
+#define AFE_VUL4_END_MSB 0x036c
+#define AFE_VUL4_END 0x0370
+#define AFE_VUL12_CON0 0x0374
+#define AFE_VUL12_BASE_MSB 0x0378
+#define AFE_VUL12_BASE 0x037c
+#define AFE_VUL12_CUR_MSB 0x0380
+#define AFE_VUL12_CUR 0x0384
+#define AFE_VUL12_END_MSB 0x0388
+#define AFE_VUL12_END 0x038c
+#define AFE_IRQ3_MCU_CNT_MON 0x0398
+#define AFE_IRQ4_MCU_CNT_MON 0x039c
+#define AFE_IRQ_MCU_CON0 0x03a0
+#define AFE_IRQ_MCU_STATUS 0x03a4
+#define AFE_IRQ_MCU_CLR 0x03a8
+#define AFE_IRQ_MCU_CNT1 0x03ac
+#define AFE_IRQ_MCU_CNT2 0x03b0
+#define AFE_IRQ_MCU_EN 0x03b4
+#define AFE_IRQ_MCU_MON2 0x03b8
+#define AFE_IRQ_MCU_CNT5 0x03bc
+#define AFE_IRQ1_MCU_CNT_MON 0x03c0
+#define AFE_IRQ2_MCU_CNT_MON 0x03c4
+#define AFE_IRQ5_MCU_CNT_MON 0x03cc
+#define AFE_IRQ_MCU_DSP_EN 0x03d0
+#define AFE_IRQ_MCU_SCP_EN 0x03d4
+#define AFE_IRQ_MCU_CNT7 0x03dc
+#define AFE_IRQ7_MCU_CNT_MON 0x03e0
+#define AFE_IRQ_MCU_CNT3 0x03e4
+#define AFE_IRQ_MCU_CNT4 0x03e8
+#define AFE_IRQ_MCU_CNT11 0x03ec
+#define AFE_APLL1_TUNER_CFG 0x03f0
+#define AFE_APLL2_TUNER_CFG 0x03f4
+#define AFE_IRQ_MCU_MISS_CLR 0x03f8
+#define AFE_CONN33 0x0408
+#define AFE_IRQ_MCU_CNT12 0x040c
+#define AFE_GAIN1_CON0 0x0410
+#define AFE_GAIN1_CON1 0x0414
+#define AFE_GAIN1_CON2 0x0418
+#define AFE_GAIN1_CON3 0x041c
+#define AFE_CONN7 0x0420
+#define AFE_GAIN1_CUR 0x0424
+#define AFE_GAIN2_CON0 0x0428
+#define AFE_GAIN2_CON1 0x042c
+#define AFE_GAIN2_CON2 0x0430
+#define AFE_GAIN2_CON3 0x0434
+#define AFE_CONN8 0x0438
+#define AFE_GAIN2_CUR 0x043c
+#define AFE_CONN9 0x0440
+#define AFE_CONN10 0x0444
+#define AFE_CONN11 0x0448
+#define AFE_CONN12 0x044c
+#define AFE_CONN13 0x0450
+#define AFE_CONN14 0x0454
+#define AFE_CONN15 0x0458
+#define AFE_CONN16 0x045c
+#define AFE_CONN17 0x0460
+#define AFE_CONN18 0x0464
+#define AFE_CONN19 0x0468
+#define AFE_CONN20 0x046c
+#define AFE_CONN21 0x0470
+#define AFE_CONN22 0x0474
+#define AFE_CONN23 0x0478
+#define AFE_CONN24 0x047c
+#define AFE_CONN_RS 0x0494
+#define AFE_CONN_DI 0x0498
+#define AFE_CONN25 0x04b0
+#define AFE_CONN26 0x04b4
+#define AFE_CONN27 0x04b8
+#define AFE_CONN28 0x04bc
+#define AFE_CONN29 0x04c0
+#define AFE_CONN30 0x04c4
+#define AFE_CONN31 0x04c8
+#define AFE_CONN32 0x04cc
+#define AFE_SRAM_DELSEL_CON1 0x04f4
+#define AFE_CONN56 0x0500
+#define AFE_CONN57 0x0504
+#define AFE_CONN58 0x0508
+#define AFE_CONN59 0x050c
+#define AFE_CONN56_1 0x0510
+#define AFE_CONN57_1 0x0514
+#define AFE_CONN58_1 0x0518
+#define AFE_CONN59_1 0x051c
+#define PCM_INTF_CON1 0x0530
+#define PCM_INTF_CON2 0x0538
+#define PCM2_INTF_CON 0x053c
+#define AFE_CM1_CON 0x0550
+#define AFE_CONN34 0x0580
+#define FPGA_CFG0 0x05b0
+#define FPGA_CFG1 0x05b4
+#define FPGA_CFG2 0x05c0
+#define FPGA_CFG3 0x05c4
+#define AUDIO_TOP_DBG_CON 0x05c8
+#define AUDIO_TOP_DBG_MON0 0x05cc
+#define AUDIO_TOP_DBG_MON1 0x05d0
+#define AFE_IRQ8_MCU_CNT_MON 0x05e4
+#define AFE_IRQ11_MCU_CNT_MON 0x05e8
+#define AFE_IRQ12_MCU_CNT_MON 0x05ec
+#define AFE_IRQ_MCU_CNT9 0x0600
+#define AFE_IRQ_MCU_CNT10 0x0604
+#define AFE_IRQ_MCU_CNT13 0x0608
+#define AFE_IRQ_MCU_CNT14 0x060c
+#define AFE_IRQ_MCU_CNT15 0x0610
+#define AFE_IRQ_MCU_CNT16 0x0614
+#define AFE_IRQ_MCU_CNT17 0x0618
+#define AFE_IRQ_MCU_CNT18 0x061c
+#define AFE_IRQ_MCU_CNT19 0x0620
+#define AFE_IRQ_MCU_CNT20 0x0624
+#define AFE_IRQ_MCU_CNT21 0x0628
+#define AFE_IRQ_MCU_CNT22 0x062c
+#define AFE_IRQ_MCU_CNT23 0x0630
+#define AFE_IRQ_MCU_CNT24 0x0634
+#define AFE_IRQ_MCU_CNT25 0x0638
+#define AFE_IRQ_MCU_CNT26 0x063c
+#define AFE_IRQ9_MCU_CNT_MON 0x0660
+#define AFE_IRQ10_MCU_CNT_MON 0x0664
+#define AFE_IRQ13_MCU_CNT_MON 0x0668
+#define AFE_IRQ14_MCU_CNT_MON 0x066c
+#define AFE_IRQ15_MCU_CNT_MON 0x0670
+#define AFE_IRQ16_MCU_CNT_MON 0x0674
+#define AFE_IRQ17_MCU_CNT_MON 0x0678
+#define AFE_IRQ18_MCU_CNT_MON 0x067c
+#define AFE_IRQ19_MCU_CNT_MON 0x0680
+#define AFE_IRQ20_MCU_CNT_MON 0x0684
+#define AFE_IRQ21_MCU_CNT_MON 0x0688
+#define AFE_IRQ22_MCU_CNT_MON 0x068c
+#define AFE_IRQ23_MCU_CNT_MON 0x0690
+#define AFE_IRQ24_MCU_CNT_MON 0x0694
+#define AFE_IRQ25_MCU_CNT_MON 0x0698
+#define AFE_IRQ26_MCU_CNT_MON 0x069c
+#define AFE_IRQ31_MCU_CNT_MON 0x06a0
+#define AFE_GENERAL_REG0 0x0800
+#define AFE_GENERAL_REG1 0x0804
+#define AFE_GENERAL_REG2 0x0808
+#define AFE_GENERAL_REG3 0x080c
+#define AFE_GENERAL_REG4 0x0810
+#define AFE_GENERAL_REG5 0x0814
+#define AFE_GENERAL_REG6 0x0818
+#define AFE_GENERAL_REG7 0x081c
+#define AFE_GENERAL_REG8 0x0820
+#define AFE_GENERAL_REG9 0x0824
+#define AFE_GENERAL_REG10 0x0828
+#define AFE_GENERAL_REG11 0x082c
+#define AFE_GENERAL_REG12 0x0830
+#define AFE_GENERAL_REG13 0x0834
+#define AFE_GENERAL_REG14 0x0838
+#define AFE_GENERAL_REG15 0x083c
+#define AFE_CBIP_CFG0 0x0840
+#define AFE_CBIP_MON0 0x0844
+#define AFE_CBIP_SLV_MUX_MON0 0x0848
+#define AFE_CBIP_SLV_DECODER_MON0 0x084c
+#define AFE_ADDA6_MTKAIF_MON0 0x0854
+#define AFE_ADDA6_MTKAIF_MON1 0x0858
+#define AFE_AWB_CON0 0x085c
+#define AFE_AWB_BASE_MSB 0x0860
+#define AFE_AWB_BASE 0x0864
+#define AFE_AWB_CUR_MSB 0x0868
+#define AFE_AWB_CUR 0x086c
+#define AFE_AWB_END_MSB 0x0870
+#define AFE_AWB_END 0x0874
+#define AFE_AWB2_CON0 0x0878
+#define AFE_AWB2_BASE_MSB 0x087c
+#define AFE_AWB2_BASE 0x0880
+#define AFE_AWB2_CUR_MSB 0x0884
+#define AFE_AWB2_CUR 0x0888
+#define AFE_AWB2_END_MSB 0x088c
+#define AFE_AWB2_END 0x0890
+#define AFE_DAI_CON0 0x0894
+#define AFE_DAI_BASE_MSB 0x0898
+#define AFE_DAI_BASE 0x089c
+#define AFE_DAI_CUR_MSB 0x08a0
+#define AFE_DAI_CUR 0x08a4
+#define AFE_DAI_END_MSB 0x08a8
+#define AFE_DAI_END 0x08ac
+#define AFE_DAI2_CON0 0x08b0
+#define AFE_DAI2_BASE_MSB 0x08b4
+#define AFE_DAI2_BASE 0x08b8
+#define AFE_DAI2_CUR_MSB 0x08bc
+#define AFE_DAI2_CUR 0x08c0
+#define AFE_DAI2_END_MSB 0x08c4
+#define AFE_DAI2_END 0x08c8
+#define AFE_MEMIF_CON0 0x08cc
+#define AFE_CONN0_1 0x0900
+#define AFE_CONN1_1 0x0904
+#define AFE_CONN2_1 0x0908
+#define AFE_CONN3_1 0x090c
+#define AFE_CONN4_1 0x0910
+#define AFE_CONN5_1 0x0914
+#define AFE_CONN6_1 0x0918
+#define AFE_CONN7_1 0x091c
+#define AFE_CONN8_1 0x0920
+#define AFE_CONN9_1 0x0924
+#define AFE_CONN10_1 0x0928
+#define AFE_CONN11_1 0x092c
+#define AFE_CONN12_1 0x0930
+#define AFE_CONN13_1 0x0934
+#define AFE_CONN14_1 0x0938
+#define AFE_CONN15_1 0x093c
+#define AFE_CONN16_1 0x0940
+#define AFE_CONN17_1 0x0944
+#define AFE_CONN18_1 0x0948
+#define AFE_CONN19_1 0x094c
+#define AFE_CONN20_1 0x0950
+#define AFE_CONN21_1 0x0954
+#define AFE_CONN22_1 0x0958
+#define AFE_CONN23_1 0x095c
+#define AFE_CONN24_1 0x0960
+#define AFE_CONN25_1 0x0964
+#define AFE_CONN26_1 0x0968
+#define AFE_CONN27_1 0x096c
+#define AFE_CONN28_1 0x0970
+#define AFE_CONN29_1 0x0974
+#define AFE_CONN30_1 0x0978
+#define AFE_CONN31_1 0x097c
+#define AFE_CONN32_1 0x0980
+#define AFE_CONN33_1 0x0984
+#define AFE_CONN34_1 0x0988
+#define AFE_CONN_RS_1 0x098c
+#define AFE_CONN_DI_1 0x0990
+#define AFE_CONN_24BIT_1 0x0994
+#define AFE_CONN_REG 0x0998
+#define AFE_CONN35 0x09a0
+#define AFE_CONN36 0x09a4
+#define AFE_CONN37 0x09a8
+#define AFE_CONN38 0x09ac
+#define AFE_CONN35_1 0x09b0
+#define AFE_CONN36_1 0x09b4
+#define AFE_CONN37_1 0x09b8
+#define AFE_CONN38_1 0x09bc
+#define AFE_CONN39 0x09c0
+#define AFE_CONN40 0x09c4
+#define AFE_CONN41 0x09c8
+#define AFE_CONN42 0x09cc
+#define AFE_CONN39_1 0x09e0
+#define AFE_CONN40_1 0x09e4
+#define AFE_CONN41_1 0x09e8
+#define AFE_CONN42_1 0x09ec
+#define AFE_I2S_CON4 0x09f8
+#define AFE_CONN60 0x0a64
+#define AFE_CONN61 0x0a68
+#define AFE_CONN62 0x0a6c
+#define AFE_CONN63 0x0a70
+#define AFE_CONN64 0x0a74
+#define AFE_CONN65 0x0a78
+#define AFE_CONN66 0x0a7c
+#define AFE_ADDA6_TOP_CON0 0x0a80
+#define AFE_ADDA6_UL_SRC_CON0 0x0a84
+#define AFE_ADDA6_UL_SRC_CON1 0x0a88
+#define AFE_ADDA6_SRC_DEBUG 0x0a8c
+#define AFE_ADDA6_SRC_DEBUG_MON0 0x0a90
+#define AFE_ADDA6_ULCF_CFG_02_01 0x0aa0
+#define AFE_ADDA6_ULCF_CFG_04_03 0x0aa4
+#define AFE_ADDA6_ULCF_CFG_06_05 0x0aa8
+#define AFE_ADDA6_ULCF_CFG_08_07 0x0aac
+#define AFE_ADDA6_ULCF_CFG_10_09 0x0ab0
+#define AFE_ADDA6_ULCF_CFG_12_11 0x0ab4
+#define AFE_ADDA6_ULCF_CFG_14_13 0x0ab8
+#define AFE_ADDA6_ULCF_CFG_16_15 0x0abc
+#define AFE_ADDA6_ULCF_CFG_18_17 0x0ac0
+#define AFE_ADDA6_ULCF_CFG_20_19 0x0ac4
+#define AFE_ADDA6_ULCF_CFG_22_21 0x0ac8
+#define AFE_ADDA6_ULCF_CFG_24_23 0x0acc
+#define AFE_ADDA6_ULCF_CFG_26_25 0x0ad0
+#define AFE_ADDA6_ULCF_CFG_28_27 0x0ad4
+#define AFE_ADDA6_ULCF_CFG_30_29 0x0ad8
+#define AFE_ADD6A_UL_SRC_MON0 0x0ae4
+#define AFE_ADDA6_UL_SRC_MON1 0x0ae8
+#define AFE_CONN43 0x0af8
+#define AFE_CONN43_1 0x0afc
+#define AFE_MOD_DAI_CON0 0x0b00
+#define AFE_MOD_DAI_BASE_MSB 0x0b04
+#define AFE_MOD_DAI_BASE 0x0b08
+#define AFE_MOD_DAI_CUR_MSB 0x0b0c
+#define AFE_MOD_DAI_CUR 0x0b10
+#define AFE_MOD_DAI_END_MSB 0x0b14
+#define AFE_MOD_DAI_END 0x0b18
+#define AFE_AWB_RCH_MON 0x0b70
+#define AFE_AWB_LCH_MON 0x0b74
+#define AFE_VUL_RCH_MON 0x0b78
+#define AFE_VUL_LCH_MON 0x0b7c
+#define AFE_VUL12_RCH_MON 0x0b80
+#define AFE_VUL12_LCH_MON 0x0b84
+#define AFE_VUL2_RCH_MON 0x0b88
+#define AFE_VUL2_LCH_MON 0x0b8c
+#define AFE_DAI_DATA_MON 0x0b90
+#define AFE_MOD_DAI_DATA_MON 0x0b94
+#define AFE_DAI2_DATA_MON 0x0b98
+#define AFE_AWB2_RCH_MON 0x0b9c
+#define AFE_AWB2_LCH_MON 0x0ba0
+#define AFE_VUL3_RCH_MON 0x0ba4
+#define AFE_VUL3_LCH_MON 0x0ba8
+#define AFE_VUL4_RCH_MON 0x0bac
+#define AFE_VUL4_LCH_MON 0x0bb0
+#define AFE_VUL5_RCH_MON 0x0bb4
+#define AFE_VUL5_LCH_MON 0x0bb8
+#define AFE_VUL6_RCH_MON 0x0bbc
+#define AFE_VUL6_LCH_MON 0x0bc0
+#define AFE_DL1_RCH_MON 0x0bc4
+#define AFE_DL1_LCH_MON 0x0bc8
+#define AFE_DL2_RCH_MON 0x0bcc
+#define AFE_DL2_LCH_MON 0x0bd0
+#define AFE_DL12_RCH1_MON 0x0bd4
+#define AFE_DL12_LCH1_MON 0x0bd8
+#define AFE_DL12_RCH2_MON 0x0bdc
+#define AFE_DL12_LCH2_MON 0x0be0
+#define AFE_DL3_RCH_MON 0x0be4
+#define AFE_DL3_LCH_MON 0x0be8
+#define AFE_DL4_RCH_MON 0x0bec
+#define AFE_DL4_LCH_MON 0x0bf0
+#define AFE_DL5_RCH_MON 0x0bf4
+#define AFE_DL5_LCH_MON 0x0bf8
+#define AFE_DL6_RCH_MON 0x0bfc
+#define AFE_DL6_LCH_MON 0x0c00
+#define AFE_DL7_RCH_MON 0x0c04
+#define AFE_DL7_LCH_MON 0x0c08
+#define AFE_DL8_RCH_MON 0x0c0c
+#define AFE_DL8_LCH_MON 0x0c10
+#define AFE_VUL5_CON0 0x0c14
+#define AFE_VUL5_BASE_MSB 0x0c18
+#define AFE_VUL5_BASE 0x0c1c
+#define AFE_VUL5_CUR_MSB 0x0c20
+#define AFE_VUL5_CUR 0x0c24
+#define AFE_VUL5_END_MSB 0x0c28
+#define AFE_VUL5_END 0x0c2c
+#define AFE_VUL6_CON0 0x0c30
+#define AFE_VUL6_BASE_MSB 0x0c34
+#define AFE_VUL6_BASE 0x0c38
+#define AFE_VUL6_CUR_MSB 0x0c3c
+#define AFE_VUL6_CUR 0x0c40
+#define AFE_VUL6_END_MSB 0x0c44
+#define AFE_VUL6_END 0x0c48
+#define AFE_ADDA_DL_SDM_DCCOMP_CON 0x0c50
+#define AFE_ADDA_DL_SDM_TEST 0x0c54
+#define AFE_ADDA_DL_DC_COMP_CFG0 0x0c58
+#define AFE_ADDA_DL_DC_COMP_CFG1 0x0c5c
+#define AFE_ADDA_DL_SDM_FIFO_MON 0x0c60
+#define AFE_ADDA_DL_SRC_LCH_MON 0x0c64
+#define AFE_ADDA_DL_SRC_RCH_MON 0x0c68
+#define AFE_ADDA_DL_SDM_OUT_MON 0x0c6c
+#define AFE_ADDA_DL_SDM_DITHER_CON 0x0c70
+#define AFE_ADDA_DL_SDM_AUTO_RESET_CON 0x0c74
+#define AFE_CONNSYS_I2S_CON 0x0c78
+#define AFE_CONNSYS_I2S_MON 0x0c7c
+#define AFE_ASRC_2CH_CON0 0x0c80
+#define AFE_ASRC_2CH_CON1 0x0c84
+#define AFE_ASRC_2CH_CON2 0x0c88
+#define AFE_ASRC_2CH_CON3 0x0c8c
+#define AFE_ASRC_2CH_CON4 0x0c90
+#define AFE_ASRC_2CH_CON5 0x0c94
+#define AFE_ASRC_2CH_CON6 0x0c98
+#define AFE_ASRC_2CH_CON7 0x0c9c
+#define AFE_ASRC_2CH_CON8 0x0ca0
+#define AFE_ASRC_2CH_CON9 0x0ca4
+#define AFE_ASRC_2CH_CON10 0x0ca8
+#define AFE_ASRC_2CH_CON12 0x0cb0
+#define AFE_ASRC_2CH_CON13 0x0cb4
+#define AFE_ADDA6_IIR_COEF_02_01 0x0ce0
+#define AFE_ADDA6_IIR_COEF_04_03 0x0ce4
+#define AFE_ADDA6_IIR_COEF_06_05 0x0ce8
+#define AFE_ADDA6_IIR_COEF_08_07 0x0cec
+#define AFE_ADDA6_IIR_COEF_10_09 0x0cf0
+#define AFE_CONN67 0x0cf4
+#define AFE_CONN68 0x0cf8
+#define AFE_CONN69 0x0cfc
+#define AFE_SE_PROT_SIDEBAND 0x0d38
+#define AFE_SE_DOMAIN_SIDEBAND0 0x0d3c
+#define AFE_ADDA_PREDIS_CON2 0x0d40
+#define AFE_ADDA_PREDIS_CON3 0x0d44
+#define AFE_SE_DOMAIN_SIDEBAND1 0x0d54
+#define AFE_SE_DOMAIN_SIDEBAND2 0x0d58
+#define AFE_SE_DOMAIN_SIDEBAND3 0x0d5c
+#define AFE_CONN44 0x0d70
+#define AFE_CONN45 0x0d74
+#define AFE_CONN46 0x0d78
+#define AFE_CONN47 0x0d7c
+#define AFE_CONN44_1 0x0d80
+#define AFE_CONN45_1 0x0d84
+#define AFE_CONN46_1 0x0d88
+#define AFE_CONN47_1 0x0d8c
+#define AFE_HD_ENGEN_ENABLE 0x0dd0
+#define AFE_ADDA_DL_NLE_FIFO_MON 0x0dfc
+#define AFE_ADDA_MTKAIF_CFG0 0x0e00
+#define AFE_CONN67_1 0x0e04
+#define AFE_CONN68_1 0x0e08
+#define AFE_CONN69_1 0x0e0c
+#define AFE_ADDA_MTKAIF_SYNCWORD_CFG 0x0e14
+#define AFE_ADDA_MTKAIF_RX_CFG0 0x0e20
+#define AFE_ADDA_MTKAIF_RX_CFG1 0x0e24
+#define AFE_ADDA_MTKAIF_RX_CFG2 0x0e28
+#define AFE_ADDA_MTKAIF_MON0 0x0e34
+#define AFE_ADDA_MTKAIF_MON1 0x0e38
+#define AFE_AUD_PAD_TOP 0x0e40
+#define AFE_DL_NLE_R_CFG0 0x0e44
+#define AFE_DL_NLE_R_CFG1 0x0e48
+#define AFE_DL_NLE_L_CFG0 0x0e4c
+#define AFE_DL_NLE_L_CFG1 0x0e50
+#define AFE_DL_NLE_R_MON0 0x0e54
+#define AFE_DL_NLE_R_MON1 0x0e58
+#define AFE_DL_NLE_R_MON2 0x0e5c
+#define AFE_DL_NLE_L_MON0 0x0e60
+#define AFE_DL_NLE_L_MON1 0x0e64
+#define AFE_DL_NLE_L_MON2 0x0e68
+#define AFE_DL_NLE_GAIN_CFG0 0x0e6c
+#define AFE_ADDA6_MTKAIF_CFG0 0x0e70
+#define AFE_ADDA6_MTKAIF_RX_CFG0 0x0e74
+#define AFE_ADDA6_MTKAIF_RX_CFG1 0x0e78
+#define AFE_ADDA6_MTKAIF_RX_CFG2 0x0e7c
+#define AFE_GENERAL1_ASRC_2CH_CON0 0x0e80
+#define AFE_GENERAL1_ASRC_2CH_CON1 0x0e84
+#define AFE_GENERAL1_ASRC_2CH_CON2 0x0e88
+#define AFE_GENERAL1_ASRC_2CH_CON3 0x0e8c
+#define AFE_GENERAL1_ASRC_2CH_CON4 0x0e90
+#define AFE_GENERAL1_ASRC_2CH_CON5 0x0e94
+#define AFE_GENERAL1_ASRC_2CH_CON6 0x0e98
+#define AFE_GENERAL1_ASRC_2CH_CON7 0x0e9c
+#define AFE_GENERAL1_ASRC_2CH_CON8 0x0ea0
+#define AFE_GENERAL1_ASRC_2CH_CON9 0x0ea4
+#define AFE_GENERAL1_ASRC_2CH_CON10 0x0ea8
+#define AFE_GENERAL1_ASRC_2CH_CON12 0x0eb0
+#define AFE_GENERAL1_ASRC_2CH_CON13 0x0eb4
+#define GENERAL_ASRC_MODE 0x0eb8
+#define GENERAL_ASRC_EN_ON 0x0ebc
+#define AFE_CONN48 0x0ec0
+#define AFE_CONN49 0x0ec4
+#define AFE_CONN50 0x0ec8
+#define AFE_CONN51 0x0ecc
+#define AFE_CONN52 0x0ed0
+#define AFE_CONN53 0x0ed4
+#define AFE_CONN54 0x0ed8
+#define AFE_CONN55 0x0edc
+#define AFE_CONN48_1 0x0ee0
+#define AFE_CONN49_1 0x0ee4
+#define AFE_CONN50_1 0x0ee8
+#define AFE_CONN51_1 0x0eec
+#define AFE_CONN52_1 0x0ef0
+#define AFE_CONN53_1 0x0ef4
+#define AFE_CONN54_1 0x0ef8
+#define AFE_CONN55_1 0x0efc
+#define AFE_GENERAL2_ASRC_2CH_CON0 0x0f00
+#define AFE_GENERAL2_ASRC_2CH_CON1 0x0f04
+#define AFE_GENERAL2_ASRC_2CH_CON2 0x0f08
+#define AFE_GENERAL2_ASRC_2CH_CON3 0x0f0c
+#define AFE_GENERAL2_ASRC_2CH_CON4 0x0f10
+#define AFE_GENERAL2_ASRC_2CH_CON5 0x0f14
+#define AFE_GENERAL2_ASRC_2CH_CON6 0x0f18
+#define AFE_GENERAL2_ASRC_2CH_CON7 0x0f1c
+#define AFE_GENERAL2_ASRC_2CH_CON8 0x0f20
+#define AFE_GENERAL2_ASRC_2CH_CON9 0x0f24
+#define AFE_GENERAL2_ASRC_2CH_CON10 0x0f28
+#define AFE_GENERAL2_ASRC_2CH_CON12 0x0f30
+#define AFE_GENERAL2_ASRC_2CH_CON13 0x0f34
+#define AFE_DL5_CON0 0x0f4c
+#define AFE_DL5_BASE_MSB 0x0f50
+#define AFE_DL5_BASE 0x0f54
+#define AFE_DL5_CUR_MSB 0x0f58
+#define AFE_DL5_CUR 0x0f5c
+#define AFE_DL5_END_MSB 0x0f60
+#define AFE_DL5_END 0x0f64
+#define AFE_DL6_CON0 0x0f68
+#define AFE_DL6_BASE_MSB 0x0f6c
+#define AFE_DL6_BASE 0x0f70
+#define AFE_DL6_CUR_MSB 0x0f74
+#define AFE_DL6_CUR 0x0f78
+#define AFE_DL6_END_MSB 0x0f7c
+#define AFE_DL6_END 0x0f80
+#define AFE_DL7_CON0 0x0f84
+#define AFE_DL7_BASE_MSB 0x0f88
+#define AFE_DL7_BASE 0x0f8c
+#define AFE_DL7_CUR_MSB 0x0f90
+#define AFE_DL7_CUR 0x0f94
+#define AFE_DL7_END_MSB 0x0f98
+#define AFE_DL7_END 0x0f9c
+#define AFE_DL8_CON0 0x0fa0
+#define AFE_DL8_BASE_MSB 0x0fa4
+#define AFE_DL8_BASE 0x0fa8
+#define AFE_DL8_CUR_MSB 0x0fac
+#define AFE_DL8_CUR 0x0fb0
+#define AFE_DL8_END_MSB 0x0fb4
+#define AFE_DL8_END 0x0fb8
+#define AFE_SE_SECURE_CON 0x1004
+#define AFE_PROT_SIDEBAND_MON 0x1008
+#define AFE_DOMAIN_SIDEBAND0_MON 0x100c
+#define AFE_DOMAIN_SIDEBAND1_MON 0x1010
+#define AFE_DOMAIN_SIDEBAND2_MON 0x1014
+#define AFE_DOMAIN_SIDEBAND3_MON 0x1018
+#define AFE_SECURE_MASK_CONN0 0x1020
+#define AFE_SECURE_MASK_CONN1 0x1024
+#define AFE_SECURE_MASK_CONN2 0x1028
+#define AFE_SECURE_MASK_CONN3 0x102c
+#define AFE_SECURE_MASK_CONN4 0x1030
+#define AFE_SECURE_MASK_CONN5 0x1034
+#define AFE_SECURE_MASK_CONN6 0x1038
+#define AFE_SECURE_MASK_CONN7 0x103c
+#define AFE_SECURE_MASK_CONN8 0x1040
+#define AFE_SECURE_MASK_CONN9 0x1044
+#define AFE_SECURE_MASK_CONN10 0x1048
+#define AFE_SECURE_MASK_CONN11 0x104c
+#define AFE_SECURE_MASK_CONN12 0x1050
+#define AFE_SECURE_MASK_CONN13 0x1054
+#define AFE_SECURE_MASK_CONN14 0x1058
+#define AFE_SECURE_MASK_CONN15 0x105c
+#define AFE_SECURE_MASK_CONN16 0x1060
+#define AFE_SECURE_MASK_CONN17 0x1064
+#define AFE_SECURE_MASK_CONN18 0x1068
+#define AFE_SECURE_MASK_CONN19 0x106c
+#define AFE_SECURE_MASK_CONN20 0x1070
+#define AFE_SECURE_MASK_CONN21 0x1074
+#define AFE_SECURE_MASK_CONN22 0x1078
+#define AFE_SECURE_MASK_CONN23 0x107c
+#define AFE_SECURE_MASK_CONN24 0x1080
+#define AFE_SECURE_MASK_CONN25 0x1084
+#define AFE_SECURE_MASK_CONN26 0x1088
+#define AFE_SECURE_MASK_CONN27 0x108c
+#define AFE_SECURE_MASK_CONN28 0x1090
+#define AFE_SECURE_MASK_CONN29 0x1094
+#define AFE_SECURE_MASK_CONN30 0x1098
+#define AFE_SECURE_MASK_CONN31 0x109c
+#define AFE_SECURE_MASK_CONN32 0x10a0
+#define AFE_SECURE_MASK_CONN33 0x10a4
+#define AFE_SECURE_MASK_CONN34 0x10a8
+#define AFE_SECURE_MASK_CONN35 0x10ac
+#define AFE_SECURE_MASK_CONN36 0x10b0
+#define AFE_SECURE_MASK_CONN37 0x10b4
+#define AFE_SECURE_MASK_CONN38 0x10b8
+#define AFE_SECURE_MASK_CONN39 0x10bc
+#define AFE_SECURE_MASK_CONN40 0x10c0
+#define AFE_SECURE_MASK_CONN41 0x10c4
+#define AFE_SECURE_MASK_CONN42 0x10c8
+#define AFE_SECURE_MASK_CONN43 0x10cc
+#define AFE_SECURE_MASK_CONN44 0x10d0
+#define AFE_SECURE_MASK_CONN45 0x10d4
+#define AFE_SECURE_MASK_CONN46 0x10d8
+#define AFE_SECURE_MASK_CONN47 0x10dc
+#define AFE_SECURE_MASK_CONN48 0x10e0
+#define AFE_SECURE_MASK_CONN49 0x10e4
+#define AFE_SECURE_MASK_CONN50 0x10e8
+#define AFE_SECURE_MASK_CONN51 0x10ec
+#define AFE_SECURE_MASK_CONN52 0x10f0
+#define AFE_SECURE_MASK_CONN53 0x10f4
+#define AFE_SECURE_MASK_CONN54 0x10f8
+#define AFE_SECURE_MASK_CONN55 0x10fc
+#define AFE_SECURE_MASK_CONN56 0x1100
+#define AFE_SECURE_MASK_CONN57 0x1104
+#define AFE_SECURE_MASK_CONN0_1 0x1108
+#define AFE_SECURE_MASK_CONN1_1 0x110c
+#define AFE_SECURE_MASK_CONN2_1 0x1110
+#define AFE_SECURE_MASK_CONN3_1 0x1114
+#define AFE_SECURE_MASK_CONN4_1 0x1118
+#define AFE_SECURE_MASK_CONN5_1 0x111c
+#define AFE_SECURE_MASK_CONN6_1 0x1120
+#define AFE_SECURE_MASK_CONN7_1 0x1124
+#define AFE_SECURE_MASK_CONN8_1 0x1128
+#define AFE_SECURE_MASK_CONN9_1 0x112c
+#define AFE_SECURE_MASK_CONN10_1 0x1130
+#define AFE_SECURE_MASK_CONN11_1 0x1134
+#define AFE_SECURE_MASK_CONN12_1 0x1138
+#define AFE_SECURE_MASK_CONN13_1 0x113c
+#define AFE_SECURE_MASK_CONN14_1 0x1140
+#define AFE_SECURE_MASK_CONN15_1 0x1144
+#define AFE_SECURE_MASK_CONN16_1 0x1148
+#define AFE_SECURE_MASK_CONN17_1 0x114c
+#define AFE_SECURE_MASK_CONN18_1 0x1150
+#define AFE_SECURE_MASK_CONN19_1 0x1154
+#define AFE_SECURE_MASK_CONN20_1 0x1158
+#define AFE_SECURE_MASK_CONN21_1 0x115c
+#define AFE_SECURE_MASK_CONN22_1 0x1160
+#define AFE_SECURE_MASK_CONN23_1 0x1164
+#define AFE_SECURE_MASK_CONN24_1 0x1168
+#define AFE_SECURE_MASK_CONN25_1 0x116c
+#define AFE_SECURE_MASK_CONN26_1 0x1170
+#define AFE_SECURE_MASK_CONN27_1 0x1174
+#define AFE_SECURE_MASK_CONN28_1 0x1178
+#define AFE_SECURE_MASK_CONN29_1 0x117c
+#define AFE_SECURE_MASK_CONN30_1 0x1180
+#define AFE_SECURE_MASK_CONN31_1 0x1184
+#define AFE_SECURE_MASK_CONN32_1 0x1188
+#define AFE_SECURE_MASK_CONN33_1 0x118c
+#define AFE_SECURE_MASK_CONN34_1 0x1190
+#define AFE_SECURE_MASK_CONN35_1 0x1194
+#define AFE_SECURE_MASK_CONN36_1 0x1198
+#define AFE_SECURE_MASK_CONN37_1 0x119c
+#define AFE_SECURE_MASK_CONN38_1 0x11a0
+#define AFE_SECURE_MASK_CONN39_1 0x11a4
+#define AFE_SECURE_MASK_CONN40_1 0x11a8
+#define AFE_SECURE_MASK_CONN41_1 0x11ac
+#define AFE_SECURE_MASK_CONN42_1 0x11b0
+#define AFE_SECURE_MASK_CONN43_1 0x11b4
+#define AFE_SECURE_MASK_CONN44_1 0x11b8
+#define AFE_SECURE_MASK_CONN45_1 0x11bc
+#define AFE_SECURE_MASK_CONN46_1 0x11c0
+#define AFE_SECURE_MASK_CONN47_1 0x11c4
+#define AFE_SECURE_MASK_CONN48_1 0x11c8
+#define AFE_SECURE_MASK_CONN49_1 0x11cc
+#define AFE_SECURE_MASK_CONN50_1 0x11d0
+#define AFE_SECURE_MASK_CONN51_1 0x11d4
+#define AFE_SECURE_MASK_CONN52_1 0x11d8
+#define AFE_SECURE_MASK_CONN53_1 0x11dc
+#define AFE_SECURE_MASK_CONN54_1 0x11e0
+#define AFE_SECURE_MASK_CONN55_1 0x11e4
+#define AFE_SECURE_MASK_CONN56_1 0x11e8
+#define AFE_CONN60_1 0x11f0
+#define AFE_CONN61_1 0x11f4
+#define AFE_CONN62_1 0x11f8
+#define AFE_CONN63_1 0x11fc
+#define AFE_CONN64_1 0x1220
+#define AFE_CONN65_1 0x1224
+#define AFE_CONN66_1 0x1228
+#define FPGA_CFG4 0x1230
+#define FPGA_CFG5 0x1234
+#define FPGA_CFG6 0x1238
+#define FPGA_CFG7 0x123c
+#define FPGA_CFG8 0x1240
+#define FPGA_CFG9 0x1244
+#define FPGA_CFG10 0x1248
+#define FPGA_CFG11 0x124c
+#define FPGA_CFG12 0x1250
+#define FPGA_CFG13 0x1254
+#define ETDM_IN1_CON0 0x1430
+#define ETDM_IN1_CON1 0x1434
+#define ETDM_IN1_CON2 0x1438
+#define ETDM_IN1_CON3 0x143c
+#define ETDM_IN1_CON4 0x1440
+#define ETDM_IN1_CON5 0x1444
+#define ETDM_IN1_CON6 0x1448
+#define ETDM_IN1_CON7 0x144c
+#define ETDM_IN1_CON8 0x1450
+#define ETDM_OUT1_CON0 0x1454
+#define ETDM_OUT1_CON1 0x1458
+#define ETDM_OUT1_CON2 0x145c
+#define ETDM_OUT1_CON3 0x1460
+#define ETDM_OUT1_CON4 0x1464
+#define ETDM_OUT1_CON5 0x1468
+#define ETDM_OUT1_CON6 0x146c
+#define ETDM_OUT1_CON7 0x1470
+#define ETDM_OUT1_CON8 0x1474
+#define ETDM_IN1_MON 0x1478
+#define ETDM_OUT1_MON 0x147c
+#define ETDM_0_3_COWORK_CON0 0x18b0
+#define ETDM_0_3_COWORK_CON1 0x18b4
+#define ETDM_0_3_COWORK_CON3 0x18bc
+
+#define AFE_MAX_REGISTER ETDM_0_3_COWORK_CON3
+
+#define AFE_IRQ_STATUS_BITS 0x87FFFFFF
+#define AFE_IRQ_CNT_SHIFT 0
+#define AFE_IRQ_CNT_MASK 0x3ffff
+#endif
diff --git a/sound/soc/mediatek/mt8195/mt8195-afe-clk.c b/sound/soc/mediatek/mt8195/mt8195-afe-clk.c
index efd5cc364a35..2ee3872c83c3 100644
--- a/sound/soc/mediatek/mt8195/mt8195-afe-clk.c
+++ b/sound/soc/mediatek/mt8195/mt8195-afe-clk.c
@@ -284,7 +284,7 @@ static int mt8195_afe_enable_apll_tuner(struct mtk_base_afe *afe,
{
struct mt8195_afe_tuner_cfg *cfg = mt8195_afe_found_apll_tuner(id);
unsigned long flags;
- int ret = 0;
+ int ret;
if (!cfg)
return -EINVAL;
@@ -308,7 +308,7 @@ static int mt8195_afe_enable_apll_tuner(struct mtk_base_afe *afe,
spin_unlock_irqrestore(&cfg->ctrl_lock, flags);
- return ret;
+ return 0;
}
static int mt8195_afe_disable_apll_tuner(struct mtk_base_afe *afe,
@@ -316,7 +316,7 @@ static int mt8195_afe_disable_apll_tuner(struct mtk_base_afe *afe,
{
struct mt8195_afe_tuner_cfg *cfg = mt8195_afe_found_apll_tuner(id);
unsigned long flags;
- int ret = 0;
+ int ret;
if (!cfg)
return -EINVAL;
@@ -338,7 +338,7 @@ static int mt8195_afe_disable_apll_tuner(struct mtk_base_afe *afe,
if (ret)
return ret;
- return ret;
+ return 0;
}
int mt8195_afe_get_mclk_source_clk_id(int sel)
diff --git a/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c b/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c
index c02c10da3600..c2e268054773 100644
--- a/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c
+++ b/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c
@@ -2172,11 +2172,11 @@ static int mtk_dai_etdm_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
etdm_data->slave_mode = true;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
etdm_data->slave_mode = false;
break;
default:
diff --git a/sound/soc/mediatek/mt8195/mt8195-dai-pcm.c b/sound/soc/mediatek/mt8195/mt8195-dai-pcm.c
index 12644ded83d5..caceb0deb467 100644
--- a/sound/soc/mediatek/mt8195/mt8195-dai-pcm.c
+++ b/sound/soc/mediatek/mt8195/mt8195-dai-pcm.c
@@ -266,11 +266,11 @@ static int mtk_dai_pcm_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
pcmif_priv->slave_mode = 1;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
pcmif_priv->slave_mode = 0;
break;
default:
diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359.c b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
index 54a00b0699b1..c530e3fc27e4 100644
--- a/sound/soc/mediatek/mt8195/mt8195-mt6359.c
+++ b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
@@ -20,6 +20,8 @@
#include "../../codecs/rt1011.h"
#include "../../codecs/rt5682.h"
#include "../common/mtk-afe-platform-driver.h"
+#include "../common/mtk-dsp-sof-common.h"
+#include "../common/mtk-soc-card.h"
#include "mt8195-afe-clk.h"
#include "mt8195-afe-common.h"
@@ -54,13 +56,6 @@ struct mt8195_card_data {
unsigned long quirk;
};
-struct sof_conn_stream {
- const char *normal_link;
- const char *sof_link;
- const char *sof_dma;
- int stream_dir;
-};
-
struct mt8195_mt6359_priv {
struct snd_soc_jack headset_jack;
struct snd_soc_jack dp_jack;
@@ -374,7 +369,8 @@ static const struct snd_soc_ops mt8195_dptx_ops = {
static int mt8195_dptx_codec_init(struct snd_soc_pcm_runtime *rtd)
{
- struct mt8195_mt6359_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct mtk_soc_card_data *soc_card_data = snd_soc_card_get_drvdata(rtd->card);
+ struct mt8195_mt6359_priv *priv = soc_card_data->mach_priv;
struct snd_soc_component *cmpnt_codec =
asoc_rtd_to_codec(rtd, 0)->component;
int ret;
@@ -389,7 +385,8 @@ static int mt8195_dptx_codec_init(struct snd_soc_pcm_runtime *rtd)
static int mt8195_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd)
{
- struct mt8195_mt6359_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct mtk_soc_card_data *soc_card_data = snd_soc_card_get_drvdata(rtd->card);
+ struct mt8195_mt6359_priv *priv = soc_card_data->mach_priv;
struct snd_soc_component *cmpnt_codec =
asoc_rtd_to_codec(rtd, 0)->component;
int ret;
@@ -555,7 +552,8 @@ static int mt8195_rt5682_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_component *cmpnt_codec =
asoc_rtd_to_codec(rtd, 0)->component;
- struct mt8195_mt6359_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct mtk_soc_card_data *soc_card_data = snd_soc_card_get_drvdata(rtd->card);
+ struct mt8195_mt6359_priv *priv = soc_card_data->mach_priv;
struct snd_soc_jack *jack = &priv->headset_jack;
struct snd_soc_component *cmpnt_afe =
snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
@@ -722,7 +720,8 @@ static int mt8195_set_bias_level_post(struct snd_soc_card *card,
struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level)
{
struct snd_soc_component *component = dapm->component;
- struct mt8195_mt6359_priv *priv = snd_soc_card_get_drvdata(card);
+ struct mtk_soc_card_data *soc_card_data = snd_soc_card_get_drvdata(card);
+ struct mt8195_mt6359_priv *priv = soc_card_data->mach_priv;
int ret;
/*
@@ -1321,175 +1320,24 @@ static struct snd_soc_card mt8195_mt6359_soc_card = {
static int mt8195_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
- struct snd_soc_card *card = rtd->card;
- struct snd_soc_dai_link *sof_dai_link = NULL;
- struct snd_soc_pcm_runtime *runtime;
- struct snd_soc_dai *cpu_dai;
- int i, j, ret = 0;
-
- for (i = 0; i < ARRAY_SIZE(g_sof_conn_streams); i++) {
- const struct sof_conn_stream *conn = &g_sof_conn_streams[i];
-
- if (strcmp(rtd->dai_link->name, conn->normal_link))
- continue;
-
- for_each_card_rtds(card, runtime) {
- if (strcmp(runtime->dai_link->name, conn->sof_link))
- continue;
-
- for_each_rtd_cpu_dais(runtime, j, cpu_dai) {
- if (cpu_dai->stream_active[conn->stream_dir] > 0) {
- sof_dai_link = runtime->dai_link;
- break;
- }
- }
- break;
- }
+ int ret;
- if (sof_dai_link && sof_dai_link->be_hw_params_fixup)
- ret = sof_dai_link->be_hw_params_fixup(runtime, params);
-
- break;
- }
+ ret = mtk_sof_dai_link_fixup(rtd, params);
if (!strcmp(rtd->dai_link->name, "ETDM2_IN_BE") ||
!strcmp(rtd->dai_link->name, "ETDM1_OUT_BE")) {
- mt8195_etdm_hw_params_fixup(runtime, params);
+ mt8195_etdm_hw_params_fixup(rtd, params);
}
return ret;
}
-static int mt8195_mt6359_card_late_probe(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *runtime;
- struct snd_soc_component *sof_comp = NULL;
- int i;
-
- /* 1. find sof component */
- for_each_card_rtds(card, runtime) {
- for (i = 0; i < runtime->num_components; i++) {
- if (!runtime->components[i]->driver->name)
- continue;
- if (!strcmp(runtime->components[i]->driver->name, "sof-audio-component")) {
- sof_comp = runtime->components[i];
- break;
- }
- }
- }
-
- if (!sof_comp) {
- dev_info(card->dev, " probe without component\n");
- return 0;
- }
- /* 2. add route path and fixup callback */
- for (i = 0; i < ARRAY_SIZE(g_sof_conn_streams); i++) {
- const struct sof_conn_stream *conn = &g_sof_conn_streams[i];
- struct snd_soc_pcm_runtime *sof_rtd = NULL;
- struct snd_soc_pcm_runtime *normal_rtd = NULL;
- struct snd_soc_pcm_runtime *rtd = NULL;
-
- for_each_card_rtds(card, rtd) {
- if (!strcmp(rtd->dai_link->name, conn->sof_link)) {
- sof_rtd = rtd;
- continue;
- }
- if (!strcmp(rtd->dai_link->name, conn->normal_link)) {
- normal_rtd = rtd;
- continue;
- }
- if (normal_rtd && sof_rtd)
- break;
- }
- if (normal_rtd && sof_rtd) {
- int j;
- struct snd_soc_dai *cpu_dai;
-
- for_each_rtd_cpu_dais(sof_rtd, j, cpu_dai) {
- struct snd_soc_dapm_route route;
- struct snd_soc_dapm_path *p = NULL;
- struct snd_soc_dapm_widget *play_widget =
- cpu_dai->playback_widget;
- struct snd_soc_dapm_widget *cap_widget =
- cpu_dai->capture_widget;
- memset(&route, 0, sizeof(route));
- if (conn->stream_dir == SNDRV_PCM_STREAM_CAPTURE &&
- cap_widget) {
- snd_soc_dapm_widget_for_each_sink_path(cap_widget, p) {
- route.source = conn->sof_dma;
- route.sink = p->sink->name;
- snd_soc_dapm_add_routes(&card->dapm, &route, 1);
- }
- } else if (conn->stream_dir == SNDRV_PCM_STREAM_PLAYBACK &&
- play_widget){
- snd_soc_dapm_widget_for_each_source_path(play_widget, p) {
- route.source = p->source->name;
- route.sink = conn->sof_dma;
- snd_soc_dapm_add_routes(&card->dapm, &route, 1);
- }
- } else {
- dev_err(cpu_dai->dev, "stream dir and widget not pair\n");
- }
- }
- normal_rtd->dai_link->be_hw_params_fixup = mt8195_dai_link_fixup;
- }
- }
-
- return 0;
-}
-
-static int mt8195_dailink_parse_of(struct snd_soc_card *card, struct device_node *np,
- const char *propname)
-{
- struct device *dev = card->dev;
- struct snd_soc_dai_link *link;
- const char *dai_name = NULL;
- int i, j, ret, num_links;
-
- num_links = of_property_count_strings(np, "mediatek,dai-link");
-
- if (num_links < 0 || num_links > ARRAY_SIZE(mt8195_mt6359_dai_links)) {
- dev_dbg(dev, "number of dai-link is invalid\n");
- return -EINVAL;
- }
-
- card->dai_link = devm_kcalloc(dev, num_links, sizeof(*link), GFP_KERNEL);
- if (!card->dai_link)
- return -ENOMEM;
-
- card->num_links = 0;
- link = card->dai_link;
-
- for (i = 0; i < num_links; i++) {
- ret = of_property_read_string_index(np, propname, i, &dai_name);
- if (ret) {
- dev_dbg(dev, "ASoC: Property '%s' index %d could not be read: %d\n",
- propname, i, ret);
- return -EINVAL;
- }
-
- for (j = 0; j < ARRAY_SIZE(mt8195_mt6359_dai_links); j++) {
- if (!strcmp(dai_name, mt8195_mt6359_dai_links[j].name)) {
- memcpy(link, &mt8195_mt6359_dai_links[j],
- sizeof(struct snd_soc_dai_link));
- link++;
- card->num_links++;
- break;
- }
- }
- }
-
- if (card->num_links != num_links)
- return -EINVAL;
-
- return 0;
-}
-
static int mt8195_mt6359_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8195_mt6359_soc_card;
struct snd_soc_dai_link *dai_link;
- struct mt8195_mt6359_priv *priv;
+ struct mtk_soc_card_data *soc_card_data;
+ struct mt8195_mt6359_priv *mach_priv;
struct device_node *platform_node, *adsp_node, *dp_node, *hdmi_node;
struct mt8195_card_data *card_data;
int is5682s = 0;
@@ -1512,17 +1360,41 @@ static int mt8195_mt6359_dev_probe(struct platform_device *pdev)
if (strstr(card->name, "_5682s"))
is5682s = 1;
+ soc_card_data = devm_kzalloc(&pdev->dev, sizeof(*card_data), GFP_KERNEL);
+ if (!soc_card_data)
+ return -ENOMEM;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ mach_priv = devm_kzalloc(&pdev->dev, sizeof(*mach_priv), GFP_KERNEL);
+ if (!mach_priv)
return -ENOMEM;
+ soc_card_data->mach_priv = mach_priv;
+
+ adsp_node = of_parse_phandle(pdev->dev.of_node, "mediatek,adsp", 0);
+ if (adsp_node) {
+ struct mtk_sof_priv *sof_priv;
+
+ sof_priv = devm_kzalloc(&pdev->dev, sizeof(*sof_priv), GFP_KERNEL);
+ if (!sof_priv) {
+ ret = -ENOMEM;
+ goto err_kzalloc;
+ }
+ sof_priv->conn_streams = g_sof_conn_streams;
+ sof_priv->num_streams = ARRAY_SIZE(g_sof_conn_streams);
+ sof_priv->sof_dai_link_fixup = mt8195_dai_link_fixup;
+ soc_card_data->sof_priv = sof_priv;
+ card->late_probe = mtk_sof_card_late_probe;
+ sof_on = 1;
+ }
+
if (of_property_read_bool(pdev->dev.of_node, "mediatek,dai-link")) {
- ret = mt8195_dailink_parse_of(card, pdev->dev.of_node,
- "mediatek,dai-link");
+ ret = mtk_sof_dailink_parse_of(card, pdev->dev.of_node,
+ "mediatek,dai-link",
+ mt8195_mt6359_dai_links,
+ ARRAY_SIZE(mt8195_mt6359_dai_links));
if (ret) {
dev_dbg(&pdev->dev, "Parse dai-link fail\n");
- return -EINVAL;
+ goto err_parse_of;
}
} else {
if (!sof_on)
@@ -1533,13 +1405,10 @@ static int mt8195_mt6359_dev_probe(struct platform_device *pdev)
"mediatek,platform", 0);
if (!platform_node) {
dev_dbg(&pdev->dev, "Property 'platform' missing or invalid\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_platform_node;
}
- adsp_node = of_parse_phandle(pdev->dev.of_node, "mediatek,adsp", 0);
- if (adsp_node)
- sof_on = 1;
-
dp_node = of_parse_phandle(pdev->dev.of_node, "mediatek,dptx-codec", 0);
hdmi_node = of_parse_phandle(pdev->dev.of_node,
"mediatek,hdmi-codec", 0);
@@ -1612,17 +1481,17 @@ static int mt8195_mt6359_dev_probe(struct platform_device *pdev)
}
}
- if (sof_on)
- card->late_probe = mt8195_mt6359_card_late_probe;
-
- snd_soc_card_set_drvdata(card, priv);
+ snd_soc_card_set_drvdata(card, soc_card_data);
ret = devm_snd_soc_register_card(&pdev->dev, card);
of_node_put(platform_node);
- of_node_put(adsp_node);
of_node_put(dp_node);
of_node_put(hdmi_node);
+err_kzalloc:
+err_parse_of:
+err_platform_node:
+ of_node_put(adsp_node);
return ret;
}
diff --git a/sound/soc/meson/aiu-acodec-ctrl.c b/sound/soc/meson/aiu-acodec-ctrl.c
index 3776b073a3db..d0f0ada5f4bc 100644
--- a/sound/soc/meson/aiu-acodec-ctrl.c
+++ b/sound/soc/meson/aiu-acodec-ctrl.c
@@ -192,7 +192,6 @@ static const struct snd_soc_component_driver aiu_acodec_ctrl_component = {
.num_dapm_routes = ARRAY_SIZE(aiu_acodec_ctrl_routes),
.of_xlate_dai_name = aiu_acodec_of_xlate_dai_name,
.endianness = 1,
- .non_legacy_dai_naming = 1,
#ifdef CONFIG_DEBUG_FS
.debugfs_prefix = "acodec",
#endif
diff --git a/sound/soc/meson/aiu-codec-ctrl.c b/sound/soc/meson/aiu-codec-ctrl.c
index 286ac4983d40..84c10956c241 100644
--- a/sound/soc/meson/aiu-codec-ctrl.c
+++ b/sound/soc/meson/aiu-codec-ctrl.c
@@ -139,7 +139,6 @@ static const struct snd_soc_component_driver aiu_hdmi_ctrl_component = {
.num_dapm_routes = ARRAY_SIZE(aiu_hdmi_ctrl_routes),
.of_xlate_dai_name = aiu_hdmi_of_xlate_dai_name,
.endianness = 1,
- .non_legacy_dai_naming = 1,
#ifdef CONFIG_DEBUG_FS
.debugfs_prefix = "hdmi",
#endif
diff --git a/sound/soc/meson/aiu-encoder-i2s.c b/sound/soc/meson/aiu-encoder-i2s.c
index 67729de41a73..a0dd914c8ed1 100644
--- a/sound/soc/meson/aiu-encoder-i2s.c
+++ b/sound/soc/meson/aiu-encoder-i2s.c
@@ -229,7 +229,7 @@ static int aiu_encoder_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
unsigned int skew;
/* Only CPU Master / Codec Slave supported ATM */
- if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
+ if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) != SND_SOC_DAIFMT_BP_FP)
return -EINVAL;
if (inv == SND_SOC_DAIFMT_NB_IF ||
diff --git a/sound/soc/meson/axg-frddr.c b/sound/soc/meson/axg-frddr.c
index 37f4bb3469b5..61f9d417fd60 100644
--- a/sound/soc/meson/axg-frddr.c
+++ b/sound/soc/meson/axg-frddr.c
@@ -161,6 +161,7 @@ static const struct snd_soc_component_driver axg_frddr_component_drv = {
.hw_free = axg_fifo_pcm_hw_free,
.pointer = axg_fifo_pcm_pointer,
.trigger = axg_fifo_pcm_trigger,
+ .legacy_dai_naming = 1,
};
static const struct axg_fifo_match_data axg_frddr_match_data = {
@@ -286,6 +287,7 @@ static const struct snd_soc_component_driver g12a_frddr_component_drv = {
.hw_free = axg_fifo_pcm_hw_free,
.pointer = axg_fifo_pcm_pointer,
.trigger = axg_fifo_pcm_trigger,
+ .legacy_dai_naming = 1,
};
static const struct axg_fifo_match_data g12a_frddr_match_data = {
@@ -356,6 +358,7 @@ static const struct snd_soc_component_driver sm1_frddr_component_drv = {
.hw_free = axg_fifo_pcm_hw_free,
.pointer = axg_fifo_pcm_pointer,
.trigger = axg_fifo_pcm_trigger,
+ .legacy_dai_naming = 1,
};
static const struct axg_fifo_match_data sm1_frddr_match_data = {
diff --git a/sound/soc/meson/axg-pdm.c b/sound/soc/meson/axg-pdm.c
index 672e43a9729d..88ac58272f95 100644
--- a/sound/soc/meson/axg-pdm.c
+++ b/sound/soc/meson/axg-pdm.c
@@ -457,7 +457,9 @@ static struct snd_soc_dai_driver axg_pdm_dai_drv = {
.remove = axg_pdm_dai_remove,
};
-static const struct snd_soc_component_driver axg_pdm_component_drv = {};
+static const struct snd_soc_component_driver axg_pdm_component_drv = {
+ .legacy_dai_naming = 1,
+};
static const struct regmap_config axg_pdm_regmap_cfg = {
.reg_bits = 32,
diff --git a/sound/soc/meson/axg-spdifin.c b/sound/soc/meson/axg-spdifin.c
index 4ba44e0d65d9..e2cc4c4be758 100644
--- a/sound/soc/meson/axg-spdifin.c
+++ b/sound/soc/meson/axg-spdifin.c
@@ -390,6 +390,7 @@ static const struct snd_kcontrol_new axg_spdifin_controls[] = {
static const struct snd_soc_component_driver axg_spdifin_component_drv = {
.controls = axg_spdifin_controls,
.num_controls = ARRAY_SIZE(axg_spdifin_controls),
+ .legacy_dai_naming = 1,
};
static const struct regmap_config axg_spdifin_regmap_cfg = {
diff --git a/sound/soc/meson/axg-spdifout.c b/sound/soc/meson/axg-spdifout.c
index 3960d082e143..e8a12f15f3b4 100644
--- a/sound/soc/meson/axg-spdifout.c
+++ b/sound/soc/meson/axg-spdifout.c
@@ -383,6 +383,7 @@ static const struct snd_soc_component_driver axg_spdifout_component_drv = {
.dapm_routes = axg_spdifout_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(axg_spdifout_dapm_routes),
.set_bias_level = axg_spdifout_set_bias_level,
+ .legacy_dai_naming = 1,
};
static const struct regmap_config axg_spdifout_regmap_cfg = {
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
index e076ced30025..c040c83637e0 100644
--- a/sound/soc/meson/axg-tdm-interface.c
+++ b/sound/soc/meson/axg-tdm-interface.c
@@ -119,19 +119,19 @@ static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
if (!iface->mclk) {
dev_err(dai->dev, "cpu clock master: mclk missing\n");
return -ENODEV;
}
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
break;
- case SND_SOC_DAIFMT_CBS_CFM:
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BP_FC:
+ case SND_SOC_DAIFMT_BC_FP:
dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n");
fallthrough;
default:
@@ -326,8 +326,8 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream,
if (ret)
return ret;
- if ((iface->fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
- SND_SOC_DAIFMT_CBS_CFS) {
+ if ((iface->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) ==
+ SND_SOC_DAIFMT_BP_FP) {
ret = axg_tdm_iface_set_sclk(dai, params);
if (ret)
return ret;
diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c
index d6adf7edea41..e9208e74e965 100644
--- a/sound/soc/meson/axg-toddr.c
+++ b/sound/soc/meson/axg-toddr.c
@@ -182,6 +182,7 @@ static const struct snd_soc_component_driver axg_toddr_component_drv = {
.hw_free = axg_fifo_pcm_hw_free,
.pointer = axg_fifo_pcm_pointer,
.trigger = axg_fifo_pcm_trigger,
+ .legacy_dai_naming = 1,
};
static const struct axg_fifo_match_data axg_toddr_match_data = {
@@ -242,6 +243,7 @@ static const struct snd_soc_component_driver g12a_toddr_component_drv = {
.hw_free = axg_fifo_pcm_hw_free,
.pointer = axg_fifo_pcm_pointer,
.trigger = axg_fifo_pcm_trigger,
+ .legacy_dai_naming = 1,
};
static const struct axg_fifo_match_data g12a_toddr_match_data = {
@@ -312,6 +314,7 @@ static const struct snd_soc_component_driver sm1_toddr_component_drv = {
.hw_free = axg_fifo_pcm_hw_free,
.pointer = axg_fifo_pcm_pointer,
.trigger = axg_fifo_pcm_trigger,
+ .legacy_dai_naming = 1,
};
static const struct axg_fifo_match_data sm1_toddr_match_data = {
diff --git a/sound/soc/meson/g12a-toacodec.c b/sound/soc/meson/g12a-toacodec.c
index 1dfee1396843..ddc667956cf5 100644
--- a/sound/soc/meson/g12a-toacodec.c
+++ b/sound/soc/meson/g12a-toacodec.c
@@ -242,7 +242,6 @@ static const struct snd_soc_component_driver g12a_toacodec_component_drv = {
.dapm_routes = g12a_toacodec_routes,
.num_dapm_routes = ARRAY_SIZE(g12a_toacodec_routes),
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_component_driver sm1_toacodec_component_drv = {
@@ -254,7 +253,6 @@ static const struct snd_soc_component_driver sm1_toacodec_component_drv = {
.dapm_routes = g12a_toacodec_routes,
.num_dapm_routes = ARRAY_SIZE(g12a_toacodec_routes),
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config g12a_toacodec_regmap_cfg = {
diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
index 6c99052feafd..579a04ad4d19 100644
--- a/sound/soc/meson/g12a-tohdmitx.c
+++ b/sound/soc/meson/g12a-tohdmitx.c
@@ -226,7 +226,6 @@ static const struct snd_soc_component_driver g12a_tohdmitx_component_drv = {
.dapm_routes = g12a_tohdmitx_routes,
.num_dapm_routes = ARRAY_SIZE(g12a_tohdmitx_routes),
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config g12a_tohdmitx_regmap_cfg = {
diff --git a/sound/soc/meson/meson-codec-glue.c b/sound/soc/meson/meson-codec-glue.c
index 2870cfad813a..80c5ed196961 100644
--- a/sound/soc/meson/meson-codec-glue.c
+++ b/sound/soc/meson/meson-codec-glue.c
@@ -13,7 +13,7 @@
static struct snd_soc_dapm_widget *
meson_codec_glue_get_input(struct snd_soc_dapm_widget *w)
{
- struct snd_soc_dapm_path *p = NULL;
+ struct snd_soc_dapm_path *p;
struct snd_soc_dapm_widget *in;
snd_soc_dapm_widget_for_each_source_path(w, p) {
diff --git a/sound/soc/meson/t9015.c b/sound/soc/meson/t9015.c
index a9b8c4e77d40..9c6b4dac6893 100644
--- a/sound/soc/meson/t9015.c
+++ b/sound/soc/meson/t9015.c
@@ -234,7 +234,6 @@ static const struct snd_soc_component_driver t9015_codec_driver = {
.num_dapm_routes = ARRAY_SIZE(t9015_dapm_routes),
.suspend_bias_off = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config t9015_regmap_config = {
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 7afe1a1acc56..ac761d3a01c0 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -358,8 +358,8 @@ static int mxs_saif_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
* Saif internally could be slave when working on EXTMASTER mode.
* We just hide this to machine driver.
*/
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
if (saif->id == saif->master_id)
scr &= ~BM_SAIF_CTRL_SLAVE_MODE;
else
@@ -663,7 +663,8 @@ static struct snd_soc_dai_driver mxs_saif_dai = {
};
static const struct snd_soc_component_driver mxs_saif_component = {
- .name = "mxs-saif",
+ .name = "mxs-saif",
+ .legacy_dai_naming = 1,
};
static irqreturn_t mxs_saif_irq(int irq, void *dev_id)
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 9433cc927755..b791a2ba5ce5 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -91,13 +91,13 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_MSB |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+ SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_BC_FC);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A |
- SND_SOC_DAIFMT_NB_IF | SND_SOC_DAIFMT_CBS_CFS);
+ SND_SOC_DAIFMT_NB_IF | SND_SOC_DAIFMT_BP_FP);
if (ret < 0)
return ret;
@@ -129,14 +129,14 @@ static int magician_capture_hw_params(struct snd_pcm_substream *substream,
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai,
SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
+ SND_SOC_DAIFMT_BC_FC);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai,
SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
+ SND_SOC_DAIFMT_BP_FP);
if (ret < 0)
return ret;
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index 7e39210a0b38..fb5a4390443f 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -171,11 +171,11 @@ static int mmp_sspa_set_dai_fmt(struct snd_soc_dai *cpu_dai,
sspa->sp = SSPA_SP_WEN | SSPA_SP_S_RST | SSPA_SP_FFLUSH;
sspa->ctrl = 0;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
sspa->sp |= SSPA_SP_MSL;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
break;
default:
return -EINVAL;
@@ -456,10 +456,11 @@ static int mmp_sspa_close(struct snd_soc_component *component,
}
static const struct snd_soc_component_driver mmp_sspa_component = {
- .name = "mmp-sspa",
- .mmap = mmp_pcm_mmap,
- .open = mmp_sspa_open,
- .close = mmp_sspa_close,
+ .name = "mmp-sspa",
+ .mmap = mmp_pcm_mmap,
+ .open = mmp_sspa_open,
+ .close = mmp_sspa_close,
+ .legacy_dai_naming = 1,
};
static int asoc_mmp_sspa_probe(struct platform_device *pdev)
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 7f13a35e9cc1..430dd446321e 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -372,10 +372,10 @@ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- case SND_SOC_DAIFMT_CBM_CFS:
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
+ case SND_SOC_DAIFMT_BC_FP:
+ case SND_SOC_DAIFMT_BP_FP:
break;
default:
return -EINVAL;
@@ -432,14 +432,14 @@ static int pxa_ssp_configure_dai_fmt(struct ssp_priv *priv)
sscr1 |= SSCR1_RxTresh(8) | SSCR1_TxTresh(7);
- switch (priv->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (priv->dai_fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
sscr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR | SSCR1_SCFR;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BC_FP:
sscr1 |= SSCR1_SCLKDIR | SSCR1_SCFR;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
break;
default:
return -EINVAL;
@@ -484,9 +484,9 @@ static int pxa_ssp_configure_dai_fmt(struct ssp_priv *priv)
pxa_ssp_write_reg(ssp, SSCR1, sscr1);
pxa_ssp_write_reg(ssp, SSPSP, sspsp);
- switch (priv->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- case SND_SOC_DAIFMT_CBM_CFS:
+ switch (priv->dai_fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
+ case SND_SOC_DAIFMT_BC_FP:
scfr = pxa_ssp_read_reg(ssp, SSCR1) | SSCR1_SCFR;
pxa_ssp_write_reg(ssp, SSCR1, scfr);
@@ -848,16 +848,17 @@ static struct snd_soc_dai_driver pxa_ssp_dai = {
};
static const struct snd_soc_component_driver pxa_ssp_component = {
- .name = "pxa-ssp",
- .pcm_construct = pxa2xx_soc_pcm_new,
- .open = pxa2xx_soc_pcm_open,
- .close = pxa2xx_soc_pcm_close,
- .hw_params = pxa2xx_soc_pcm_hw_params,
- .prepare = pxa2xx_soc_pcm_prepare,
- .trigger = pxa2xx_soc_pcm_trigger,
- .pointer = pxa2xx_soc_pcm_pointer,
- .suspend = pxa_ssp_suspend,
- .resume = pxa_ssp_resume,
+ .name = "pxa-ssp",
+ .pcm_construct = pxa2xx_soc_pcm_new,
+ .open = pxa2xx_soc_pcm_open,
+ .close = pxa2xx_soc_pcm_close,
+ .hw_params = pxa2xx_soc_pcm_hw_params,
+ .prepare = pxa2xx_soc_pcm_prepare,
+ .trigger = pxa2xx_soc_pcm_trigger,
+ .pointer = pxa2xx_soc_pcm_pointer,
+ .suspend = pxa_ssp_suspend,
+ .resume = pxa_ssp_resume,
+ .legacy_dai_naming = 1,
};
#ifdef CONFIG_OF
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 746e6ec9198b..3e4c70403672 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -129,11 +129,11 @@ static int pxa2xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
break;
}
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
pxa_i2s.master = 1;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BC_FP:
pxa_i2s.master = 0;
break;
default:
@@ -355,16 +355,17 @@ static struct snd_soc_dai_driver pxa_i2s_dai = {
};
static const struct snd_soc_component_driver pxa_i2s_component = {
- .name = "pxa-i2s",
- .pcm_construct = pxa2xx_soc_pcm_new,
- .open = pxa2xx_soc_pcm_open,
- .close = pxa2xx_soc_pcm_close,
- .hw_params = pxa2xx_soc_pcm_hw_params,
- .prepare = pxa2xx_soc_pcm_prepare,
- .trigger = pxa2xx_soc_pcm_trigger,
- .pointer = pxa2xx_soc_pcm_pointer,
- .suspend = pxa2xx_soc_pcm_suspend,
- .resume = pxa2xx_soc_pcm_resume,
+ .name = "pxa-i2s",
+ .pcm_construct = pxa2xx_soc_pcm_new,
+ .open = pxa2xx_soc_pcm_open,
+ .close = pxa2xx_soc_pcm_close,
+ .hw_params = pxa2xx_soc_pcm_hw_params,
+ .prepare = pxa2xx_soc_pcm_prepare,
+ .trigger = pxa2xx_soc_pcm_trigger,
+ .pointer = pxa2xx_soc_pcm_pointer,
+ .suspend = pxa2xx_soc_pcm_suspend,
+ .resume = pxa2xx_soc_pcm_resume,
+ .legacy_dai_naming = 1,
};
static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index b0a4f7ca2751..e54b8961112f 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -172,7 +172,7 @@ static int msm8916_qdsp6_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
- snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
+ snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_BP_FP);
return apq8016_dai_init(rtd, qdsp6_dai_get_lpass_id(cpu_dai));
}
diff --git a/sound/soc/qcom/lpass-apq8016.c b/sound/soc/qcom/lpass-apq8016.c
index 3efa133d1c64..abaf694ee9a3 100644
--- a/sound/soc/qcom/lpass-apq8016.c
+++ b/sound/soc/qcom/lpass-apq8016.c
@@ -293,6 +293,7 @@ static struct lpass_variant apq8016_data = {
static const struct of_device_id apq8016_lpass_cpu_device_id[] __maybe_unused = {
{ .compatible = "qcom,lpass-cpu-apq8016", .data = &apq8016_data },
+ { .compatible = "qcom,apq8016-lpass-cpu", .data = &apq8016_data },
{}
};
MODULE_DEVICE_TABLE(of, apq8016_lpass_cpu_device_id);
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index e6846ad2b5fa..8a56f38dc7e8 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -472,6 +472,7 @@ static int asoc_qcom_of_xlate_dai_name(struct snd_soc_component *component,
static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
.name = "lpass-cpu",
.of_xlate_dai_name = asoc_qcom_of_xlate_dai_name,
+ .legacy_dai_naming = 1,
};
static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
@@ -1090,6 +1091,7 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0);
if (dsp_of_node) {
dev_err(dev, "DSP exists and holds audio resources\n");
+ of_node_put(dsp_of_node);
return -EBUSY;
}
@@ -1102,6 +1104,11 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
if (!match || !match->data)
return -EINVAL;
+ if (of_device_is_compatible(dev->of_node, "qcom,lpass-cpu-apq8016")) {
+ dev_warn(dev, "%s compatible is deprecated\n",
+ match->compatible);
+ }
+
drvdata->variant = (struct lpass_variant *)match->data;
variant = drvdata->variant;
diff --git a/sound/soc/qcom/qdsp6/audioreach.c b/sound/soc/qcom/qdsp6/audioreach.c
index 98c0efa1d0fe..01dac32c50fd 100644
--- a/sound/soc/qcom/qdsp6/audioreach.c
+++ b/sound/soc/qcom/qdsp6/audioreach.c
@@ -732,10 +732,10 @@ static int audioreach_i2s_set_media_format(struct q6apm_graph *graph,
intf_cfg->cfg.sd_line_idx = module->sd_line_idx;
switch (cfg->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
intf_cfg->cfg.ws_src = CONFIG_I2S_WS_SRC_INTERNAL;
break;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
/* CPU is slave */
intf_cfg->cfg.ws_src = CONFIG_I2S_WS_SRC_EXTERNAL;
break;
diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c
index 72c5719f1d25..1530e98df165 100644
--- a/sound/soc/qcom/qdsp6/q6adm.c
+++ b/sound/soc/qcom/qdsp6/q6adm.c
@@ -90,7 +90,7 @@ struct q6adm_session_map_node_v5 {
static struct q6copp *q6adm_find_copp(struct q6adm *adm, int port_idx,
int copp_idx)
{
- struct q6copp *c = NULL;
+ struct q6copp *c;
struct q6copp *ret = NULL;
unsigned long flags;
@@ -180,7 +180,7 @@ static int q6adm_callback(struct apr_device *adev, struct apr_resp_pkt *data)
u32 status;
u16 copp_id;
u16 reserved;
- } __packed * open = data->payload;
+ } __packed *open = data->payload;
copp = q6adm_find_copp(adm, port_idx, copp_idx);
if (!copp)
@@ -217,7 +217,7 @@ static struct q6copp *q6adm_alloc_copp(struct q6adm *adm, int port_idx)
idx = find_first_zero_bit(&adm->copp_bitmap[port_idx],
MAX_COPPS_PER_PORT);
- if (idx > MAX_COPPS_PER_PORT)
+ if (idx >= MAX_COPPS_PER_PORT)
return ERR_PTR(-EBUSY);
c = kzalloc(sizeof(*c), GFP_ATOMIC);
@@ -299,7 +299,7 @@ static struct q6copp *q6adm_find_matching_copp(struct q6adm *adm,
int channel_mode, int bit_width,
int app_type)
{
- struct q6copp *c = NULL;
+ struct q6copp *c;
struct q6copp *ret = NULL;
unsigned long flags;
diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
index 625724852a7f..919e326b9462 100644
--- a/sound/soc/qcom/qdsp6/q6afe.c
+++ b/sound/soc/qcom/qdsp6/q6afe.c
@@ -1328,11 +1328,11 @@ int q6afe_i2s_port_prepare(struct q6afe_port *port, struct q6afe_i2s_cfg *cfg)
pcfg->i2s_cfg.bit_width = cfg->bit_width;
pcfg->i2s_cfg.data_format = AFE_LINEAR_PCM_DATA;
- switch (cfg->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (cfg->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
pcfg->i2s_cfg.ws_src = AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
/* CPU is slave */
pcfg->i2s_cfg.ws_src = AFE_PORT_CONFIG_I2S_WS_SRC_EXTERNAL;
break;
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index b74b67720ef4..5fc8088e63c8 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -1205,17 +1205,18 @@ static const struct snd_soc_dapm_widget q6asm_dapm_widgets[] = {
};
static const struct snd_soc_component_driver q6asm_fe_dai_component = {
- .name = DRV_NAME,
- .open = q6asm_dai_open,
- .hw_params = q6asm_dai_hw_params,
- .close = q6asm_dai_close,
- .prepare = q6asm_dai_prepare,
- .trigger = q6asm_dai_trigger,
- .pointer = q6asm_dai_pointer,
- .pcm_construct = q6asm_dai_pcm_new,
- .compress_ops = &q6asm_dai_compress_ops,
- .dapm_widgets = q6asm_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(q6asm_dapm_widgets),
+ .name = DRV_NAME,
+ .open = q6asm_dai_open,
+ .hw_params = q6asm_dai_hw_params,
+ .close = q6asm_dai_close,
+ .prepare = q6asm_dai_prepare,
+ .trigger = q6asm_dai_trigger,
+ .pointer = q6asm_dai_pointer,
+ .pcm_construct = q6asm_dai_pcm_new,
+ .compress_ops = &q6asm_dai_compress_ops,
+ .dapm_widgets = q6asm_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(q6asm_dapm_widgets),
+ .legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver q6asm_fe_dais_template[] = {
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index 9251d8548965..195780f75d05 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -513,7 +513,7 @@ int q6asm_map_memory_regions(unsigned int dir, struct audio_client *ac,
return 0;
}
- buf = kzalloc(((sizeof(struct audio_buffer)) * periods), GFP_ATOMIC);
+ buf = kcalloc(periods, sizeof(*buf), GFP_ATOMIC);
if (!buf) {
spin_unlock_irqrestore(&ac->lock, flags);
return -ENOMEM;
diff --git a/sound/soc/qcom/sc7180.c b/sound/soc/qcom/sc7180.c
index efccb5c0b3e0..f5f7c64b23a2 100644
--- a/sound/soc/qcom/sc7180.c
+++ b/sound/soc/qcom/sc7180.c
@@ -155,7 +155,7 @@ static int sc7180_snd_startup(struct snd_pcm_substream *substream)
}
snd_soc_dai_set_fmt(codec_dai,
- SND_SOC_DAIFMT_CBS_CFS |
+ SND_SOC_DAIFMT_BC_FC |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_I2S);
diff --git a/sound/soc/qcom/sc7280.c b/sound/soc/qcom/sc7280.c
index 34cdb99d4ed6..da7469a6a267 100644
--- a/sound/soc/qcom/sc7280.c
+++ b/sound/soc/qcom/sc7280.c
@@ -19,9 +19,11 @@
#include "../codecs/rt5682s.h"
#include "common.h"
#include "lpass.h"
+#include "qdsp6/q6afe.h"
#define DEFAULT_MCLK_RATE 19200000
#define RT5682_PLL_FREQ (48000 * 512)
+#define MI2S_BCLK_RATE 1536000
struct sc7280_snd_data {
struct snd_soc_card card;
@@ -79,6 +81,7 @@ static int sc7280_headset_init(struct snd_soc_pcm_runtime *rtd)
case MI2S_PRIMARY:
case LPASS_CDC_DMA_RX0:
case LPASS_CDC_DMA_TX3:
+ case TX_CODEC_DMA_TX_3:
for_each_rtd_codec_dais(rtd, i, codec_dai) {
rval = snd_soc_component_set_jack(component, &pdata->hs_jack, NULL);
if (rval != 0 && rval != -ENOTSUPP) {
@@ -164,10 +167,14 @@ static int sc7280_init(struct snd_soc_pcm_runtime *rtd)
switch (cpu_dai->id) {
case MI2S_PRIMARY:
case LPASS_CDC_DMA_TX3:
+ case TX_CODEC_DMA_TX_3:
return sc7280_headset_init(rtd);
case LPASS_CDC_DMA_RX0:
case LPASS_CDC_DMA_VA_TX0:
case MI2S_SECONDARY:
+ case RX_CODEC_DMA_RX_0:
+ case SECONDARY_MI2S_RX:
+ case VA_CODEC_DMA_TX_0:
return 0;
case LPASS_DP_RX:
return sc7280_hdmi_init(rtd);
@@ -195,6 +202,10 @@ static int sc7280_snd_hw_params(struct snd_pcm_substream *substream,
switch (cpu_dai->id) {
case LPASS_CDC_DMA_TX3:
case LPASS_CDC_DMA_RX0:
+ case RX_CODEC_DMA_RX_0:
+ case SECONDARY_MI2S_RX:
+ case TX_CODEC_DMA_TX_3:
+ case VA_CODEC_DMA_TX_0:
for_each_rtd_codec_dais(rtd, i, codec_dai) {
sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
if (sruntime != ERR_PTR(-ENOTSUPP))
@@ -245,6 +256,9 @@ static int sc7280_snd_prepare(struct snd_pcm_substream *substream)
switch (cpu_dai->id) {
case LPASS_CDC_DMA_RX0:
case LPASS_CDC_DMA_TX3:
+ case RX_CODEC_DMA_RX_0:
+ case TX_CODEC_DMA_TX_3:
+ case VA_CODEC_DMA_TX_0:
return sc7280_snd_swr_prepare(substream);
default:
break;
@@ -263,6 +277,9 @@ static int sc7280_snd_hw_free(struct snd_pcm_substream *substream)
switch (cpu_dai->id) {
case LPASS_CDC_DMA_RX0:
case LPASS_CDC_DMA_TX3:
+ case RX_CODEC_DMA_RX_0:
+ case TX_CODEC_DMA_TX_3:
+ case VA_CODEC_DMA_TX_0:
if (sruntime && data->stream_prepared[cpu_dai->id]) {
sdw_disable_stream(sruntime);
sdw_deprepare_stream(sruntime);
@@ -291,6 +308,10 @@ static void sc7280_snd_shutdown(struct snd_pcm_substream *substream)
SNDRV_PCM_STREAM_PLAYBACK);
}
break;
+ case SECONDARY_MI2S_RX:
+ snd_soc_dai_set_sysclk(cpu_dai, Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT,
+ 0, SNDRV_PCM_STREAM_PLAYBACK);
+ break;
default:
break;
}
@@ -298,14 +319,26 @@ static void sc7280_snd_shutdown(struct snd_pcm_substream *substream)
static int sc7280_snd_startup(struct snd_pcm_substream *substream)
{
+ unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
+ unsigned int codec_dai_fmt = SND_SOC_DAIFMT_CBS_CFS;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret = 0;
switch (cpu_dai->id) {
case MI2S_PRIMARY:
ret = sc7280_rt5682_init(rtd);
break;
+ case SECONDARY_MI2S_RX:
+ codec_dai_fmt |= SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_I2S;
+
+ snd_soc_dai_set_sysclk(cpu_dai, Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT,
+ MI2S_BCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
+
+ snd_soc_dai_set_fmt(cpu_dai, fmt);
+ snd_soc_dai_set_fmt(codec_dai, codec_dai_fmt);
+ break;
default:
break;
}
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 61fda790f375..d8d35563af00 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -316,8 +316,8 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
static int sdm845_snd_startup(struct snd_pcm_substream *substream)
{
- unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
- unsigned int codec_dai_fmt = SND_SOC_DAIFMT_CBS_CFS;
+ unsigned int fmt = SND_SOC_DAIFMT_BP_FP;
+ unsigned int codec_dai_fmt = SND_SOC_DAIFMT_BC_FC;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_card *card = rtd->card;
struct sdm845_snd_data *data = snd_soc_card_get_drvdata(card);
@@ -356,7 +356,7 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
snd_soc_dai_set_sysclk(cpu_dai,
Q6AFE_LPASS_CLK_ID_QUAD_MI2S_IBIT,
MI2S_BCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
- snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBS_CFS);
+ snd_soc_dai_set_fmt(cpu_dai, fmt);
break;
diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c
index 6e1184c8b672..ce4a5713386a 100644
--- a/sound/soc/qcom/sm8250.c
+++ b/sound/soc/qcom/sm8250.c
@@ -96,8 +96,8 @@ static int sm8250_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
static int sm8250_snd_startup(struct snd_pcm_substream *substream)
{
- unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
- unsigned int codec_dai_fmt = SND_SOC_DAIFMT_CBS_CFS;
+ unsigned int fmt = SND_SOC_DAIFMT_BP_FP;
+ unsigned int codec_dai_fmt = SND_SOC_DAIFMT_BC_FC;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
diff --git a/sound/soc/rockchip/rk3288_hdmi_analog.c b/sound/soc/rockchip/rk3288_hdmi_analog.c
index bcdeddeba80c..0c6bd9a019db 100644
--- a/sound/soc/rockchip/rk3288_hdmi_analog.c
+++ b/sound/soc/rockchip/rk3288_hdmi_analog.c
@@ -169,7 +169,7 @@ static struct snd_soc_card snd_soc_card_rk = {
static int snd_rk_mc_probe(struct platform_device *pdev)
{
- int ret = 0;
+ int ret;
struct snd_soc_card *card = &snd_soc_card_rk;
struct device_node *np = pdev->dev.of_node;
struct rk_drvdata *machine;
@@ -253,7 +253,7 @@ static int snd_rk_mc_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, ret,
"Soc register card failed\n");
- return ret;
+ return 0;
}
static const struct of_device_id rockchip_sound_of_match[] = {
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 4ce5d2579387..f5f3540a9e18 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -13,6 +13,7 @@
#include <linux/of_gpio.h>
#include <linux/of_device.h>
#include <linux/clk.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
@@ -54,8 +55,38 @@ struct rk_i2s_dev {
const struct rk_i2s_pins *pins;
unsigned int bclk_ratio;
spinlock_t lock; /* tx/rx lock */
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *bclk_on;
+ struct pinctrl_state *bclk_off;
};
+static int i2s_pinctrl_select_bclk_on(struct rk_i2s_dev *i2s)
+{
+ int ret = 0;
+
+ if (!IS_ERR(i2s->pinctrl) && !IS_ERR_OR_NULL(i2s->bclk_on))
+ ret = pinctrl_select_state(i2s->pinctrl, i2s->bclk_on);
+
+ if (ret)
+ dev_err(i2s->dev, "bclk enable failed %d\n", ret);
+
+ return ret;
+}
+
+static int i2s_pinctrl_select_bclk_off(struct rk_i2s_dev *i2s)
+{
+
+ int ret = 0;
+
+ if (!IS_ERR(i2s->pinctrl) && !IS_ERR_OR_NULL(i2s->bclk_off))
+ ret = pinctrl_select_state(i2s->pinctrl, i2s->bclk_off);
+
+ if (ret)
+ dev_err(i2s->dev, "bclk disable failed %d\n", ret);
+
+ return ret;
+}
+
static int i2s_runtime_suspend(struct device *dev)
{
struct rk_i2s_dev *i2s = dev_get_drvdata(dev);
@@ -92,39 +123,46 @@ static inline struct rk_i2s_dev *to_info(struct snd_soc_dai *dai)
return snd_soc_dai_get_drvdata(dai);
}
-static void rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
+static int rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
{
unsigned int val = 0;
int retry = 10;
+ int ret = 0;
spin_lock(&i2s->lock);
if (on) {
- regmap_update_bits(i2s->regmap, I2S_DMACR,
- I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_ENABLE);
-
- regmap_update_bits(i2s->regmap, I2S_XFER,
- I2S_XFER_TXS_START | I2S_XFER_RXS_START,
- I2S_XFER_TXS_START | I2S_XFER_RXS_START);
-
+ ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
+ I2S_DMACR_TDE_ENABLE,
+ I2S_DMACR_TDE_ENABLE);
+ if (ret < 0)
+ goto end;
+ ret = regmap_update_bits(i2s->regmap, I2S_XFER,
+ I2S_XFER_TXS_START | I2S_XFER_RXS_START,
+ I2S_XFER_TXS_START | I2S_XFER_RXS_START);
+ if (ret < 0)
+ goto end;
i2s->tx_start = true;
} else {
i2s->tx_start = false;
- regmap_update_bits(i2s->regmap, I2S_DMACR,
- I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_DISABLE);
+ ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
+ I2S_DMACR_TDE_ENABLE,
+ I2S_DMACR_TDE_DISABLE);
+ if (ret < 0)
+ goto end;
if (!i2s->rx_start) {
- regmap_update_bits(i2s->regmap, I2S_XFER,
- I2S_XFER_TXS_START |
- I2S_XFER_RXS_START,
- I2S_XFER_TXS_STOP |
- I2S_XFER_RXS_STOP);
-
+ ret = regmap_update_bits(i2s->regmap, I2S_XFER,
+ I2S_XFER_TXS_START | I2S_XFER_RXS_START,
+ I2S_XFER_TXS_STOP | I2S_XFER_RXS_STOP);
+ if (ret < 0)
+ goto end;
udelay(150);
- regmap_update_bits(i2s->regmap, I2S_CLR,
- I2S_CLR_TXC | I2S_CLR_RXC,
- I2S_CLR_TXC | I2S_CLR_RXC);
-
+ ret = regmap_update_bits(i2s->regmap, I2S_CLR,
+ I2S_CLR_TXC | I2S_CLR_RXC,
+ I2S_CLR_TXC | I2S_CLR_RXC);
+ if (ret < 0)
+ goto end;
regmap_read(i2s->regmap, I2S_CLR, &val);
/* Should wait for clear operation to finish */
@@ -133,61 +171,80 @@ static void rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
retry--;
if (!retry) {
dev_warn(i2s->dev, "fail to clear\n");
+ ret = -EBUSY;
break;
}
}
}
}
+end:
spin_unlock(&i2s->lock);
+ if (ret < 0)
+ dev_err(i2s->dev, "lrclk update failed\n");
+
+ return ret;
}
-static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
+static int rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
{
unsigned int val = 0;
int retry = 10;
+ int ret = 0;
spin_lock(&i2s->lock);
if (on) {
- regmap_update_bits(i2s->regmap, I2S_DMACR,
- I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_ENABLE);
-
- regmap_update_bits(i2s->regmap, I2S_XFER,
- I2S_XFER_TXS_START | I2S_XFER_RXS_START,
- I2S_XFER_TXS_START | I2S_XFER_RXS_START);
-
+ ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
+ I2S_DMACR_RDE_ENABLE,
+ I2S_DMACR_RDE_ENABLE);
+ if (ret < 0)
+ goto end;
+
+ ret = regmap_update_bits(i2s->regmap, I2S_XFER,
+ I2S_XFER_TXS_START | I2S_XFER_RXS_START,
+ I2S_XFER_TXS_START | I2S_XFER_RXS_START);
+ if (ret < 0)
+ goto end;
i2s->rx_start = true;
} else {
i2s->rx_start = false;
- regmap_update_bits(i2s->regmap, I2S_DMACR,
- I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_DISABLE);
+ ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
+ I2S_DMACR_RDE_ENABLE,
+ I2S_DMACR_RDE_DISABLE);
+ if (ret < 0)
+ goto end;
if (!i2s->tx_start) {
- regmap_update_bits(i2s->regmap, I2S_XFER,
- I2S_XFER_TXS_START |
- I2S_XFER_RXS_START,
- I2S_XFER_TXS_STOP |
- I2S_XFER_RXS_STOP);
-
+ ret = regmap_update_bits(i2s->regmap, I2S_XFER,
+ I2S_XFER_TXS_START | I2S_XFER_RXS_START,
+ I2S_XFER_TXS_STOP | I2S_XFER_RXS_STOP);
+ if (ret < 0)
+ goto end;
udelay(150);
- regmap_update_bits(i2s->regmap, I2S_CLR,
- I2S_CLR_TXC | I2S_CLR_RXC,
- I2S_CLR_TXC | I2S_CLR_RXC);
-
+ ret = regmap_update_bits(i2s->regmap, I2S_CLR,
+ I2S_CLR_TXC | I2S_CLR_RXC,
+ I2S_CLR_TXC | I2S_CLR_RXC);
+ if (ret < 0)
+ goto end;
regmap_read(i2s->regmap, I2S_CLR, &val);
-
/* Should wait for clear operation to finish */
while (val) {
regmap_read(i2s->regmap, I2S_CLR, &val);
retry--;
if (!retry) {
dev_warn(i2s->dev, "fail to clear\n");
+ ret = -EBUSY;
break;
}
}
}
}
+end:
spin_unlock(&i2s->lock);
+ if (ret < 0)
+ dev_err(i2s->dev, "lrclk update failed\n");
+
+ return ret;
}
static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
@@ -199,13 +256,13 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
pm_runtime_get_sync(cpu_dai->dev);
mask = I2S_CKR_MSS_MASK;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
/* Set source clock in Master mode */
val = I2S_CKR_MSS_MASTER;
i2s->is_master_mode = true;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
val = I2S_CKR_MSS_SLAVE;
i2s->is_master_mode = false;
break;
@@ -425,17 +482,25 @@ static int rockchip_i2s_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- rockchip_snd_rxctrl(i2s, 1);
+ ret = rockchip_snd_rxctrl(i2s, 1);
else
- rockchip_snd_txctrl(i2s, 1);
+ ret = rockchip_snd_txctrl(i2s, 1);
+ if (ret < 0)
+ return ret;
+ i2s_pinctrl_select_bclk_on(i2s);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- rockchip_snd_rxctrl(i2s, 0);
- else
- rockchip_snd_txctrl(i2s, 0);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (!i2s->tx_start)
+ i2s_pinctrl_select_bclk_off(i2s);
+ ret = rockchip_snd_rxctrl(i2s, 0);
+ } else {
+ if (!i2s->rx_start)
+ i2s_pinctrl_select_bclk_off(i2s);
+ ret = rockchip_snd_txctrl(i2s, 0);
+ }
break;
default:
ret = -EINVAL;
@@ -498,6 +563,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
static const struct snd_soc_component_driver rockchip_i2s_component = {
.name = DRV_NAME,
+ .legacy_dai_naming = 1,
};
static bool rockchip_i2s_wr_reg(struct device *dev, unsigned int reg)
@@ -736,6 +802,22 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
}
i2s->bclk_ratio = 64;
+ i2s->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!IS_ERR(i2s->pinctrl)) {
+ i2s->bclk_on = pinctrl_lookup_state(i2s->pinctrl, "bclk_on");
+ if (!IS_ERR_OR_NULL(i2s->bclk_on)) {
+ i2s->bclk_off = pinctrl_lookup_state(i2s->pinctrl, "bclk_off");
+ if (IS_ERR_OR_NULL(i2s->bclk_off)) {
+ dev_err(&pdev->dev, "failed to find i2s bclk_off\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+ }
+ } else {
+ dev_dbg(&pdev->dev, "failed to find i2s pinctrl\n");
+ }
+
+ i2s_pinctrl_select_bclk_off(i2s);
dev_set_drvdata(&pdev->dev, i2s);
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
index 98700e75b82a..2550bd2a5e78 100644
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
@@ -404,19 +404,17 @@ static int rockchip_i2s_tdm_set_fmt(struct snd_soc_dai *cpu_dai,
int ret;
bool is_tdm = i2s_tdm->tdm_mode;
- ret = pm_runtime_get_sync(cpu_dai->dev);
- if (ret < 0 && ret != -EACCES) {
- pm_runtime_put_noidle(cpu_dai->dev);
+ ret = pm_runtime_resume_and_get(cpu_dai->dev);
+ if (ret < 0 && ret != -EACCES)
return ret;
- }
mask = I2S_CKR_MSS_MASK;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
val = I2S_CKR_MSS_MASTER;
i2s_tdm->is_master_mode = true;
break;
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
val = I2S_CKR_MSS_SLAVE;
i2s_tdm->is_master_mode = false;
break;
@@ -1120,6 +1118,7 @@ static const struct snd_soc_dai_ops rockchip_i2s_tdm_dai_ops = {
static const struct snd_soc_component_driver rockchip_i2s_tdm_component = {
.name = DRV_NAME,
+ .legacy_dai_naming = 1,
};
static bool rockchip_i2s_tdm_wr_reg(struct device *dev, unsigned int reg)
diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
index 64d9891b6434..a7549f827235 100644
--- a/sound/soc/rockchip/rockchip_pdm.c
+++ b/sound/soc/rockchip/rockchip_pdm.c
@@ -405,6 +405,7 @@ static struct snd_soc_dai_driver rockchip_pdm_dai = {
static const struct snd_soc_component_driver rockchip_pdm_component = {
.name = "rockchip-pdm",
+ .legacy_dai_naming = 1,
};
static int rockchip_pdm_runtime_suspend(struct device *dev)
@@ -688,11 +689,9 @@ static int rockchip_pdm_resume(struct device *dev)
struct rk_pdm_dev *pdm = dev_get_drvdata(dev);
int ret;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- }
ret = regcache_sync(pdm->regmap);
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index d027ca4b1796..8bef572d3cbc 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -225,6 +225,7 @@ static struct snd_soc_dai_driver rk_spdif_dai = {
static const struct snd_soc_component_driver rk_spdif_component = {
.name = "rockchip-spdif",
+ .legacy_dai_naming = 1,
};
static bool rk_spdif_wr_reg(struct device *dev, unsigned int reg)
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index a2221ebb1b6a..2a61e620cd3b 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -33,7 +33,8 @@ config SND_SAMSUNG_I2S
config SND_SOC_SAMSUNG_NEO1973_WM8753
tristate "Audio support for Openmoko Neo1973 Smartphones (GTA02)"
- depends on MACH_NEO1973_GTA02
+ depends on MACH_NEO1973_GTA02 || COMPILE_TEST
+ depends on SND_SOC_I2C_AND_SPI
select SND_S3C24XX_I2S
select SND_SOC_WM8753
select SND_SOC_BT_SCO
@@ -43,7 +44,8 @@ config SND_SOC_SAMSUNG_NEO1973_WM8753
config SND_SOC_SAMSUNG_JIVE_WM8750
tristate "SoC I2S Audio support for Jive"
- depends on MACH_JIVE && I2C
+ depends on MACH_JIVE && I2C || COMPILE_TEST && ARM
+ depends on SND_SOC_I2C_AND_SPI
select SND_SOC_WM8750
select SND_S3C2412_SOC_I2S
help
@@ -69,7 +71,7 @@ config SND_SOC_SAMSUNG_SMDK_WM8994
config SND_SOC_SAMSUNG_S3C24XX_UDA134X
tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
- depends on ARCH_S3C24XX
+ depends on ARCH_S3C24XX || COMPILE_TEST
select SND_S3C24XX_I2S
select SND_SOC_L3
select SND_SOC_UDA134X
@@ -81,21 +83,24 @@ config SND_SOC_SAMSUNG_SIMTEC
config SND_SOC_SAMSUNG_SIMTEC_TLV320AIC23
tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards"
- depends on ARCH_S3C24XX && I2C
+ depends on ARCH_S3C24XX || COMPILE_TEST
+ depends on I2C
select SND_S3C24XX_I2S
select SND_SOC_TLV320AIC23_I2C
select SND_SOC_SAMSUNG_SIMTEC
config SND_SOC_SAMSUNG_SIMTEC_HERMES
tristate "SoC I2S Audio support for Simtec Hermes board"
- depends on ARCH_S3C24XX && I2C
+ depends on ARCH_S3C24XX || COMPILE_TEST
+ depends on I2C
select SND_S3C24XX_I2S
select SND_SOC_TLV320AIC3X
select SND_SOC_SAMSUNG_SIMTEC
config SND_SOC_SAMSUNG_H1940_UDA1380
tristate "Audio support for the HP iPAQ H1940"
- depends on ARCH_H1940 && I2C
+ depends on ARCH_H1940 || COMPILE_TEST
+ depends on I2C
select SND_S3C24XX_I2S
select SND_SOC_UDA1380
help
@@ -103,7 +108,8 @@ config SND_SOC_SAMSUNG_H1940_UDA1380
config SND_SOC_SAMSUNG_RX1950_UDA1380
tristate "Audio support for the HP iPAQ RX1950"
- depends on MACH_RX1950 && I2C
+ depends on MACH_RX1950 || COMPILE_TEST
+ depends on I2C
select SND_S3C24XX_I2S
select SND_SOC_UDA1380
help
diff --git a/sound/soc/samsung/aries_wm8994.c b/sound/soc/samsung/aries_wm8994.c
index bb0cf4244e00..e7d52d27132e 100644
--- a/sound/soc/samsung/aries_wm8994.c
+++ b/sound/soc/samsung/aries_wm8994.c
@@ -432,7 +432,6 @@ static const struct snd_soc_component_driver aries_component = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver aries_ext_dai[] = {
@@ -628,8 +627,10 @@ static int aries_audio_probe(struct platform_device *pdev)
return -EINVAL;
codec = of_get_child_by_name(dev->of_node, "codec");
- if (!codec)
- return -EINVAL;
+ if (!codec) {
+ ret = -EINVAL;
+ goto out;
+ }
for_each_card_prelinks(card, i, dai_link) {
dai_link->codecs->of_node = of_parse_phandle(codec,
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
index 907266aee839..fa45a54ab18f 100644
--- a/sound/soc/samsung/h1940_uda1380.c
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -8,7 +8,7 @@
// Based on version from Arnaud Patard <arnaud.patard@rtp-net.org>
#include <linux/types.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <sound/soc.h>
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 70c827162be4..9505200f3d11 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -671,11 +671,11 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
tmp |= mod_slave;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
/*
* Set default source clock in Master mode, only when the
* CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
@@ -1143,6 +1143,8 @@ static const struct snd_soc_component_driver samsung_i2s_component = {
.suspend = i2s_suspend,
.resume = i2s_resume,
+
+ .legacy_dai_naming = 1,
};
#define SAMSUNG_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \
diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
index c98b68567a89..e9f2334028bf 100644
--- a/sound/soc/samsung/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -344,7 +344,7 @@ static int neo1973_probe(struct platform_device *pdev)
return devm_snd_soc_register_card(dev, &neo1973);
}
-struct platform_driver neo1973_audio = {
+static struct platform_driver neo1973_audio = {
.driver = {
.name = "neo1973-audio",
.pm = &snd_soc_pm_ops,
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 4c4dfde0568f..e859252ae5e6 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -340,8 +340,8 @@ static int s3c_pcm_set_fmt(struct snd_soc_dai *cpu_dai,
goto exit;
}
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
/* Nothing to do, Master by default */
break;
default:
@@ -480,7 +480,8 @@ static struct snd_soc_dai_driver s3c_pcm_dai[] = {
};
static const struct snd_soc_component_driver s3c_pcm_component = {
- .name = "s3c-pcm",
+ .name = "s3c-pcm",
+ .legacy_dai_naming = 1,
};
static int s3c_pcm_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/samsung/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
index ff3acc94a454..abf28321f7d7 100644
--- a/sound/soc/samsung/rx1950_uda1380.c
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -128,7 +128,7 @@ static int rx1950_startup(struct snd_pcm_substream *substream)
&hw_rates);
}
-struct gpio_desc *gpiod_speaker_power;
+static struct gpio_desc *gpiod_speaker_power;
static int rx1950_spk_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
@@ -228,7 +228,7 @@ static int rx1950_probe(struct platform_device *pdev)
return devm_snd_soc_register_card(dev, &rx1950_asoc);
}
-struct platform_driver rx1950_audio = {
+static struct platform_driver rx1950_audio = {
.driver = {
.name = "rx1950-audio",
.pm = &snd_soc_pm_ops,
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index de66cc422e6e..2b221cb0ed03 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -21,17 +21,6 @@
#include "regs-i2s-v2.h"
#include "s3c-i2s-v2.h"
-#undef S3C_IIS_V2_SUPPORTED
-
-#if defined(CONFIG_CPU_S3C2412) \
- || defined(CONFIG_ARCH_S3C64XX) || defined(CONFIG_CPU_S5PV210)
-#define S3C_IIS_V2_SUPPORTED
-#endif
-
-#ifndef S3C_IIS_V2_SUPPORTED
-#error Unsupported CPU model
-#endif
-
#define S3C2412_I2S_DEBUG_CON 0
static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai)
@@ -252,12 +241,12 @@ static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
iismod = readl(i2s->regs + S3C2412_IISMOD);
pr_debug("hw_params r: IISMOD: %x \n", iismod);
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
i2s->master = 0;
iismod |= S3C2412_IISMOD_SLAVE;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
i2s->master = 1;
iismod &= ~S3C2412_IISMOD_SLAVE;
break;
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index ec1c6f9d76ac..0579a352961c 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -192,9 +192,10 @@ static struct snd_soc_dai_driver s3c2412_i2s_dai = {
};
static const struct snd_soc_component_driver s3c2412_i2s_component = {
- .name = "s3c2412-i2s",
- .suspend = s3c2412_i2s_suspend,
- .resume = s3c2412_i2s_resume,
+ .name = "s3c2412-i2s",
+ .suspend = s3c2412_i2s_suspend,
+ .resume = s3c2412_i2s_resume,
+ .legacy_dai_naming = 1,
};
static int s3c2412_iis_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 0f46304eaa4f..7b7bbe007acd 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <sound/soc.h>
@@ -169,11 +168,11 @@ static int s3c24xx_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
pr_debug("hw_params r: IISMOD: %x \n", iismod);
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
iismod |= S3C2410_IISMOD_SLAVE;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
iismod &= ~S3C2410_IISMOD_SLAVE;
break;
default:
@@ -415,9 +414,10 @@ static struct snd_soc_dai_driver s3c24xx_i2s_dai = {
};
static const struct snd_soc_component_driver s3c24xx_i2s_component = {
- .name = "s3c24xx-i2s",
- .suspend = s3c24xx_i2s_suspend,
- .resume = s3c24xx_i2s_resume,
+ .name = "s3c24xx-i2s",
+ .suspend = s3c24xx_i2s_suspend,
+ .resume = s3c24xx_i2s_resume,
+ .legacy_dai_naming = 1,
};
static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c
index 02372109c251..da342da03880 100644
--- a/sound/soc/samsung/snow.c
+++ b/sound/soc/samsung/snow.c
@@ -216,7 +216,7 @@ static int snow_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, ret,
"snd_soc_register_card failed\n");
- return ret;
+ return 0;
}
static int snow_remove(struct platform_device *pdev)
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index 47b6d19e43ff..7d815e237e5c 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -352,9 +352,10 @@ static struct snd_soc_dai_driver samsung_spdif_dai = {
};
static const struct snd_soc_component_driver samsung_spdif_component = {
- .name = "samsung-spdif",
- .suspend = spdif_suspend,
- .resume = spdif_resume,
+ .name = "samsung-spdif",
+ .suspend = spdif_suspend,
+ .resume = spdif_resume,
+ .legacy_dai_naming = 1,
};
static int spdif_probe(struct platform_device *pdev)
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index e9a1eb6bdf66..f3edc2e3d9d7 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1646,10 +1646,10 @@ static int fsi_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
int ret;
/* set clock master audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
fsi->clk_master = 1; /* cpu is master */
break;
default:
diff --git a/sound/soc/sh/hac.c b/sound/soc/sh/hac.c
index 475fc984f8c5..46d145cbaf29 100644
--- a/sound/soc/sh/hac.c
+++ b/sound/soc/sh/hac.c
@@ -307,7 +307,8 @@ static struct snd_soc_dai_driver sh4_hac_dai[] = {
};
static const struct snd_soc_component_driver sh4_hac_component = {
- .name = "sh4-hac",
+ .name = "sh4-hac",
+ .legacy_dai_naming = 1,
};
static int hac_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index eb762ab94d3e..7e380d71b0f8 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -756,10 +756,10 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
/* set clock master for audio interface */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBP_CFP:
+ case SND_SOC_DAIFMT_BC_FC:
rdai->clk_master = 0;
break;
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
rdai->clk_master = 1; /* cpu is master */
break;
default:
@@ -1813,11 +1813,12 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
* snd_soc_component
*/
static const struct snd_soc_component_driver rsnd_soc_component = {
- .name = "rsnd",
- .probe = rsnd_debugfs_probe,
- .hw_params = rsnd_hw_params,
- .hw_free = rsnd_hw_free,
- .pointer = rsnd_pointer,
+ .name = "rsnd",
+ .probe = rsnd_debugfs_probe,
+ .hw_params = rsnd_hw_params,
+ .hw_free = rsnd_hw_free,
+ .pointer = rsnd_pointer,
+ .legacy_dai_naming = 1,
};
static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
@@ -1968,19 +1969,26 @@ static int rsnd_remove(struct platform_device *pdev)
rsnd_cmd_remove,
rsnd_adg_remove,
};
- int ret = 0, i;
+ int i;
pm_runtime_disable(&pdev->dev);
for_each_rsnd_dai(rdai, priv, i) {
- ret |= rsnd_dai_call(remove, &rdai->playback, priv);
- ret |= rsnd_dai_call(remove, &rdai->capture, priv);
+ int ret;
+
+ ret = rsnd_dai_call(remove, &rdai->playback, priv);
+ if (ret)
+ dev_warn(&pdev->dev, "Failed to remove playback dai #%d\n", i);
+
+ ret = rsnd_dai_call(remove, &rdai->capture, priv);
+ if (ret)
+ dev_warn(&pdev->dev, "Failed to remove capture dai #%d\n", i);
}
for (i = 0; i < ARRAY_SIZE(remove_func); i++)
remove_func[i](priv);
- return ret;
+ return 0;
}
static int __maybe_unused rsnd_suspend(struct device *dev)
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 4b8a63e336c7..281bc20d4c5d 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -67,6 +67,8 @@ static void rsnd_ssiu_busif_err_irq_ctrl(struct rsnd_mod *mod, int enable)
shift = 1;
offset = 1;
break;
+ default:
+ return;
}
for (i = 0; i < 4; i++) {
@@ -417,6 +419,7 @@ static struct rsnd_mod_ops rsnd_ssiu_ops_gen2 = {
.name = SSIU_NAME,
.dma_req = rsnd_ssiu_dma_req,
.init = rsnd_ssiu_init_gen2,
+ .quit = rsnd_ssiu_quit,
.start = rsnd_ssiu_start_gen2,
.stop = rsnd_ssiu_stop_gen2,
.get_status = rsnd_ssiu_get_status,
diff --git a/sound/soc/sh/rz-ssi.c b/sound/soc/sh/rz-ssi.c
index e392de7a262e..7ace0c0db5b1 100644
--- a/sound/soc/sh/rz-ssi.c
+++ b/sound/soc/sh/rz-ssi.c
@@ -767,7 +767,7 @@ static int rz_ssi_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai);
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_CBC_CFC:
+ case SND_SOC_DAIFMT_BP_FP:
break;
default:
dev_err(ssi->dev, "Codec should be clk and frame consumer\n");
@@ -906,10 +906,11 @@ static struct snd_soc_dai_driver rz_ssi_soc_dai[] = {
};
static const struct snd_soc_component_driver rz_ssi_soc_component = {
- .name = "rz-ssi",
- .open = rz_ssi_pcm_open,
- .pointer = rz_ssi_pcm_pointer,
- .pcm_construct = rz_ssi_pcm_new,
+ .name = "rz-ssi",
+ .open = rz_ssi_pcm_open,
+ .pointer = rz_ssi_pcm_pointer,
+ .pcm_construct = rz_ssi_pcm_new,
+ .legacy_dai_naming = 1,
};
static int rz_ssi_probe(struct platform_device *pdev)
@@ -1016,32 +1017,36 @@ static int rz_ssi_probe(struct platform_device *pdev)
ssi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(ssi->rstc)) {
- rz_ssi_release_dma_channels(ssi);
- return PTR_ERR(ssi->rstc);
+ ret = PTR_ERR(ssi->rstc);
+ goto err_reset;
}
reset_control_deassert(ssi->rstc);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
- rz_ssi_release_dma_channels(ssi);
- pm_runtime_disable(ssi->dev);
- reset_control_assert(ssi->rstc);
- return dev_err_probe(ssi->dev, ret, "pm_runtime_resume_and_get failed\n");
+ dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n");
+ goto err_pm;
}
ret = devm_snd_soc_register_component(&pdev->dev, &rz_ssi_soc_component,
rz_ssi_soc_dai,
ARRAY_SIZE(rz_ssi_soc_dai));
if (ret < 0) {
- rz_ssi_release_dma_channels(ssi);
-
- pm_runtime_put(ssi->dev);
- pm_runtime_disable(ssi->dev);
- reset_control_assert(ssi->rstc);
dev_err(&pdev->dev, "failed to register snd component\n");
+ goto err_snd_soc;
}
+ return 0;
+
+err_snd_soc:
+ pm_runtime_put(ssi->dev);
+err_pm:
+ pm_runtime_disable(ssi->dev);
+ reset_control_assert(ssi->rstc);
+err_reset:
+ rz_ssi_release_dma_channels(ssi);
+
return ret;
}
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index 0a8a3c314a73..f15ff36e7934 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -540,13 +540,14 @@ static void siu_pcm_free(struct snd_soc_component *component,
}
const struct snd_soc_component_driver siu_component = {
- .name = DRV_NAME,
- .open = siu_pcm_open,
- .close = siu_pcm_close,
- .prepare = siu_pcm_prepare,
- .trigger = siu_pcm_trigger,
- .pointer = siu_pcm_pointer_dma,
- .pcm_construct = siu_pcm_new,
- .pcm_destruct = siu_pcm_free,
+ .name = DRV_NAME,
+ .open = siu_pcm_open,
+ .close = siu_pcm_close,
+ .prepare = siu_pcm_prepare,
+ .trigger = siu_pcm_trigger,
+ .pointer = siu_pcm_pointer_dma,
+ .pcm_construct = siu_pcm_new,
+ .pcm_destruct = siu_pcm_free,
+ .legacy_dai_naming = 1,
};
EXPORT_SYMBOL_GPL(siu_component);
diff --git a/sound/soc/sh/ssi.c b/sound/soc/sh/ssi.c
index 15b01bcefca5..96cf523c2273 100644
--- a/sound/soc/sh/ssi.c
+++ b/sound/soc/sh/ssi.c
@@ -291,16 +291,16 @@ static int ssi_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
break;
- case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_BP_FC:
ssicr |= CR_SCK_MASTER;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BC_FP:
ssicr |= CR_SWS_MASTER;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
ssicr |= CR_SWS_MASTER | CR_SCK_MASTER;
break;
default:
@@ -377,7 +377,8 @@ static struct snd_soc_dai_driver sh4_ssi_dai[] = {
};
static const struct snd_soc_component_driver sh4_ssi_component = {
- .name = "sh4-ssi",
+ .name = "sh4-ssi",
+ .legacy_dai_naming = 1,
};
static int sh4_soc_dai_probe(struct platform_device *pdev)
diff --git a/sound/soc/soc-card.c b/sound/soc/soc-card.c
index 4158f5aacfd3..285ab4c9c716 100644
--- a/sound/soc/soc-card.c
+++ b/sound/soc/soc-card.c
@@ -197,6 +197,12 @@ int snd_soc_card_late_probe(struct snd_soc_card *card)
return 0;
}
+void snd_soc_card_fixup_controls(struct snd_soc_card *card)
+{
+ if (card->fixup_controls)
+ card->fixup_controls(card);
+}
+
int snd_soc_card_remove(struct snd_soc_card *card)
{
int ret = 0;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 9574f86dd4de..e824ff1a9fc0 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1214,7 +1214,6 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
{
struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
- unsigned int inv_dai_fmt;
unsigned int i;
int ret;
@@ -1227,18 +1226,11 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- /*
- * Flip the polarity for the "CPU" end of a CODEC<->CODEC link
- */
- inv_dai_fmt = snd_soc_daifmt_clock_provider_flipped(dai_fmt);
+ /* Flip the polarity for the "CPU" end of link */
+ dai_fmt = snd_soc_daifmt_clock_provider_flipped(dai_fmt);
for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
- unsigned int fmt = dai_fmt;
-
- if (snd_soc_component_is_codec(cpu_dai->component))
- fmt = inv_dai_fmt;
-
- ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+ ret = snd_soc_dai_set_fmt(cpu_dai, dai_fmt);
if (ret != 0 && ret != -ENOTSUPP)
return ret;
}
@@ -2074,6 +2066,7 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
goto probe_end;
snd_soc_dapm_new_widgets(card);
+ snd_soc_card_fixup_controls(card);
ret = snd_card_register(card->snd_card);
if (ret < 0) {
@@ -2326,14 +2319,12 @@ EXPORT_SYMBOL_GPL(snd_soc_register_card);
* @card: Card to unregister
*
*/
-int snd_soc_unregister_card(struct snd_soc_card *card)
+void snd_soc_unregister_card(struct snd_soc_card *card)
{
mutex_lock(&client_mutex);
snd_soc_unbind_card(card, true);
mutex_unlock(&client_mutex);
dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);
-
- return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_card);
@@ -2497,7 +2488,7 @@ static int snd_soc_register_dais(struct snd_soc_component *component,
for (i = 0; i < count; i++) {
dai = snd_soc_register_dai(component, dai_drv + i, count == 1 &&
- !component->driver->non_legacy_dai_naming);
+ component->driver->legacy_dai_naming);
if (dai == NULL) {
ret = -ENOMEM;
goto err;
@@ -3312,6 +3303,61 @@ int snd_soc_of_get_dai_name(struct device_node *of_node,
}
EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_name);
+static void __snd_soc_of_put_component(struct snd_soc_dai_link_component *component)
+{
+ if (component->of_node) {
+ of_node_put(component->of_node);
+ component->of_node = NULL;
+ }
+}
+
+static int __snd_soc_of_get_dai_link_component_alloc(
+ struct device *dev, struct device_node *of_node,
+ struct snd_soc_dai_link_component **ret_component,
+ int *ret_num)
+{
+ struct snd_soc_dai_link_component *component;
+ int num;
+
+ /* Count the number of CPUs/CODECs */
+ num = of_count_phandle_with_args(of_node, "sound-dai", "#sound-dai-cells");
+ if (num <= 0) {
+ if (num == -ENOENT)
+ dev_err(dev, "No 'sound-dai' property\n");
+ else
+ dev_err(dev, "Bad phandle in 'sound-dai'\n");
+ return num;
+ }
+ component = devm_kcalloc(dev, num, sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ *ret_component = component;
+ *ret_num = num;
+
+ return 0;
+}
+
+static int __snd_soc_of_get_dai_link_component_parse(
+ struct device_node *of_node,
+ struct snd_soc_dai_link_component *component, int index)
+{
+ struct of_phandle_args args;
+ int ret;
+
+ ret = of_parse_phandle_with_args(of_node, "sound-dai", "#sound-dai-cells",
+ index, &args);
+ if (ret)
+ return ret;
+
+ ret = snd_soc_get_dai_name(&args, &component->dai_name);
+ if (ret < 0)
+ return ret;
+
+ component->of_node = args.np;
+ return 0;
+}
+
/*
* snd_soc_of_put_dai_link_codecs - Dereference device nodes in the codecs array
* @dai_link: DAI link
@@ -3323,12 +3369,8 @@ void snd_soc_of_put_dai_link_codecs(struct snd_soc_dai_link *dai_link)
struct snd_soc_dai_link_component *component;
int index;
- for_each_link_codecs(dai_link, index, component) {
- if (!component->of_node)
- break;
- of_node_put(component->of_node);
- component->of_node = NULL;
- }
+ for_each_link_codecs(dai_link, index, component)
+ __snd_soc_of_put_component(component);
}
EXPORT_SYMBOL_GPL(snd_soc_of_put_dai_link_codecs);
@@ -3350,41 +3392,19 @@ int snd_soc_of_get_dai_link_codecs(struct device *dev,
struct device_node *of_node,
struct snd_soc_dai_link *dai_link)
{
- struct of_phandle_args args;
struct snd_soc_dai_link_component *component;
- char *name;
- int index, num_codecs, ret;
-
- /* Count the number of CODECs */
- name = "sound-dai";
- num_codecs = of_count_phandle_with_args(of_node, name,
- "#sound-dai-cells");
- if (num_codecs <= 0) {
- if (num_codecs == -ENOENT)
- dev_err(dev, "No 'sound-dai' property\n");
- else
- dev_err(dev, "Bad phandle in 'sound-dai'\n");
- return num_codecs;
- }
- component = devm_kcalloc(dev,
- num_codecs, sizeof(*component),
- GFP_KERNEL);
- if (!component)
- return -ENOMEM;
- dai_link->codecs = component;
- dai_link->num_codecs = num_codecs;
+ int index, ret;
+
+ ret = __snd_soc_of_get_dai_link_component_alloc(dev, of_node,
+ &dai_link->codecs, &dai_link->num_codecs);
+ if (ret < 0)
+ return ret;
/* Parse the list */
for_each_link_codecs(dai_link, index, component) {
- ret = of_parse_phandle_with_args(of_node, name,
- "#sound-dai-cells",
- index, &args);
+ ret = __snd_soc_of_get_dai_link_component_parse(of_node, component, index);
if (ret)
goto err;
- component->of_node = args.np;
- ret = snd_soc_get_dai_name(&args, &component->dai_name);
- if (ret < 0)
- goto err;
}
return 0;
err:
@@ -3406,12 +3426,8 @@ void snd_soc_of_put_dai_link_cpus(struct snd_soc_dai_link *dai_link)
struct snd_soc_dai_link_component *component;
int index;
- for_each_link_cpus(dai_link, index, component) {
- if (!component->of_node)
- break;
- of_node_put(component->of_node);
- component->of_node = NULL;
- }
+ for_each_link_cpus(dai_link, index, component)
+ __snd_soc_of_put_component(component);
}
EXPORT_SYMBOL_GPL(snd_soc_of_put_dai_link_cpus);
@@ -3430,45 +3446,24 @@ int snd_soc_of_get_dai_link_cpus(struct device *dev,
struct device_node *of_node,
struct snd_soc_dai_link *dai_link)
{
- struct of_phandle_args args;
struct snd_soc_dai_link_component *component;
- char *name;
- int index, num_codecs, ret;
-
- /* Count the number of CODECs */
- name = "sound-dai";
- num_codecs = of_count_phandle_with_args(of_node, name,
- "#sound-dai-cells");
- if (num_codecs <= 0) {
- if (num_codecs == -ENOENT)
- dev_err(dev, "No 'sound-dai' property\n");
- else
- dev_err(dev, "Bad phandle in 'sound-dai'\n");
- return num_codecs;
- }
- component = devm_kcalloc(dev,
- num_codecs, sizeof(*component),
- GFP_KERNEL);
- if (!component)
- return -ENOMEM;
- dai_link->cpus = component;
- dai_link->num_cpus = num_codecs;
+ int index, ret;
+
+ /* Count the number of CPUs */
+ ret = __snd_soc_of_get_dai_link_component_alloc(dev, of_node,
+ &dai_link->cpus, &dai_link->num_cpus);
+ if (ret < 0)
+ return ret;
/* Parse the list */
for_each_link_cpus(dai_link, index, component) {
- ret = of_parse_phandle_with_args(of_node, name,
- "#sound-dai-cells",
- index, &args);
+ ret = __snd_soc_of_get_dai_link_component_parse(of_node, component, index);
if (ret)
goto err;
- component->of_node = args.np;
- ret = snd_soc_get_dai_name(&args, &component->dai_name);
- if (ret < 0)
- goto err;
}
return 0;
err:
- snd_soc_of_put_dai_link_codecs(dai_link);
+ snd_soc_of_put_dai_link_cpus(dai_link);
dai_link->cpus = NULL;
dai_link->num_cpus = 0;
return ret;
diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
index 6078afe335f8..d530e8c2b77b 100644
--- a/sound/soc/soc-dai.c
+++ b/sound/soc/soc-dai.c
@@ -208,8 +208,7 @@ int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
int ret = -ENOTSUPP;
- if (dai->driver->ops &&
- dai->driver->ops->set_fmt)
+ if (dai->driver->ops && dai->driver->ops->set_fmt)
ret = dai->driver->ops->set_fmt(dai, fmt);
return soc_dai_ret(dai, ret);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index a8e842e02cdc..b05231414c1d 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -370,14 +370,14 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
case snd_soc_dapm_mixer_named_ctl:
mc = (struct soc_mixer_control *)kcontrol->private_value;
- if (mc->autodisable && snd_soc_volsw_is_stereo(mc))
- dev_warn(widget->dapm->dev,
- "ASoC: Unsupported stereo autodisable control '%s'\n",
- ctrl_name);
-
if (mc->autodisable) {
struct snd_soc_dapm_widget template;
+ if (snd_soc_volsw_is_stereo(mc))
+ dev_warn(widget->dapm->dev,
+ "ASoC: Unsupported stereo autodisable control '%s'\n",
+ ctrl_name);
+
name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name,
"Autodisable");
if (!name) {
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index d867f449d82d..bd88de056358 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
-#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/bitops.h>
#include <linux/ctype.h>
@@ -177,20 +176,28 @@ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- int platform_max;
-
- if (!mc->platform_max)
- mc->platform_max = mc->max;
- platform_max = mc->platform_max;
-
- if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
- else
+ const char *vol_string = NULL;
+ int max;
+
+ max = uinfo->value.integer.max = mc->max - mc->min;
+ if (mc->platform_max && mc->platform_max < max)
+ max = mc->platform_max;
+
+ if (max == 1) {
+ /* Even two value controls ending in Volume should always be integer */
+ vol_string = strstr(kcontrol->id.name, " Volume");
+ if (vol_string && !strcmp(vol_string, " Volume"))
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ else
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ } else {
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ }
uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
uinfo->value.integer.min = 0;
- uinfo->value.integer.max = platform_max - mc->min;
+ uinfo->value.integer.max = max;
+
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
@@ -203,7 +210,8 @@ EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
* Callback to provide information about a single mixer control, or a double
* mixer control that spans 2 registers of the SX TLV type. SX TLV controls
* have a range that represents both positive and negative values either side
- * of zero but without a sign bit.
+ * of zero but without a sign bit. min is the minimum register value, max is
+ * the number of steps.
*
* Returns 0 for success.
*/
@@ -212,12 +220,21 @@ int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
+ int max;
- snd_soc_info_volsw(kcontrol, uinfo);
- /* Max represents the number of levels in an SX control not the
- * maximum value, so add the minimum value back on
- */
- uinfo->value.integer.max += mc->min;
+ if (mc->platform_max)
+ max = mc->platform_max;
+ else
+ max = mc->max;
+
+ if (max == 1 && !strstr(kcontrol->id.name, " Volume"))
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ else
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+ uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = max;
return 0;
}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index a827cc3c158a..4f60c0a83311 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1209,8 +1209,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
return -EINVAL;
}
if (fe_substream->pcm->nonatomic && !be_substream->pcm->nonatomic) {
- dev_warn(be->dev, "%s: FE is nonatomic but BE is not, forcing BE as nonatomic\n",
- __func__);
+ dev_dbg(be->dev, "FE is nonatomic but BE is not, forcing BE as nonatomic\n");
be_substream->pcm->nonatomic = 1;
}
@@ -1318,6 +1317,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
if (!be->dai_link->no_pcm)
continue;
+ if (!snd_soc_dpcm_get_substream(be, stream))
+ continue;
+
for_each_rtd_dais(be, i, dai) {
w = snd_soc_dai_get_widget(dai, stream);
diff --git a/sound/soc/soc-topology-test.c b/sound/soc/soc-topology-test.c
index ae3968161509..2cd3540cec04 100644
--- a/sound/soc/soc-topology-test.c
+++ b/sound/soc/soc-topology-test.c
@@ -104,7 +104,6 @@ static const struct snd_soc_component_driver test_component = {
.name = "sound-soc-topology-test",
.probe = d_probe,
.remove = d_remove,
- .non_legacy_dai_naming = 1,
};
/* ===== TOPOLOGY TEMPLATES ================================================= */
@@ -238,7 +237,6 @@ static int d_probe_null_comp(struct snd_soc_component *component)
static const struct snd_soc_component_driver test_component_null_comp = {
.name = "sound-soc-topology-test",
.probe = d_probe_null_comp,
- .non_legacy_dai_naming = 1,
};
static void snd_soc_tplg_test_load_with_null_comp(struct kunit *test)
@@ -271,9 +269,7 @@ static void snd_soc_tplg_test_load_with_null_comp(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, ret);
/* cleanup */
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
-
+ snd_soc_unregister_card(&kunit_comp->card);
snd_soc_unregister_component(test_dev);
}
@@ -315,8 +311,7 @@ static void snd_soc_tplg_test_load_with_null_ops(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, ret);
/* cleanup */
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
+ snd_soc_unregister_card(&kunit_comp->card);
snd_soc_unregister_component(test_dev);
}
@@ -346,7 +341,6 @@ static int d_probe_null_fw(struct snd_soc_component *component)
static const struct snd_soc_component_driver test_component_null_fw = {
.name = "sound-soc-topology-test",
.probe = d_probe_null_fw,
- .non_legacy_dai_naming = 1,
};
static void snd_soc_tplg_test_load_with_null_fw(struct kunit *test)
@@ -379,8 +373,7 @@ static void snd_soc_tplg_test_load_with_null_fw(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, ret);
/* cleanup */
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
+ snd_soc_unregister_card(&kunit_comp->card);
snd_soc_unregister_component(test_dev);
}
@@ -428,8 +421,7 @@ static void snd_soc_tplg_test_load_empty_tplg(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, ret);
/* cleanup */
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
+ snd_soc_unregister_card(&kunit_comp->card);
snd_soc_unregister_component(test_dev);
}
@@ -484,8 +476,7 @@ static void snd_soc_tplg_test_load_empty_tplg_bad_magic(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, ret);
/* cleanup */
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
+ snd_soc_unregister_card(&kunit_comp->card);
snd_soc_unregister_component(test_dev);
}
@@ -540,8 +531,7 @@ static void snd_soc_tplg_test_load_empty_tplg_bad_abi(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, ret);
/* cleanup */
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
+ snd_soc_unregister_card(&kunit_comp->card);
snd_soc_unregister_component(test_dev);
}
@@ -596,8 +586,7 @@ static void snd_soc_tplg_test_load_empty_tplg_bad_size(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, ret);
/* cleanup */
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
+ snd_soc_unregister_card(&kunit_comp->card);
snd_soc_unregister_component(test_dev);
}
@@ -655,8 +644,7 @@ static void snd_soc_tplg_test_load_empty_tplg_bad_payload_size(struct kunit *tes
/* cleanup */
snd_soc_unregister_component(test_dev);
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
+ snd_soc_unregister_card(&kunit_comp->card);
}
// TEST CASE
@@ -704,8 +692,7 @@ static void snd_soc_tplg_test_load_pcm_tplg(struct kunit *test)
snd_soc_unregister_component(test_dev);
/* cleanup */
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
+ snd_soc_unregister_card(&kunit_comp->card);
}
// TEST CASE
@@ -757,8 +744,7 @@ static void snd_soc_tplg_test_load_pcm_tplg_reload_comp(struct kunit *test)
}
/* cleanup */
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
+ snd_soc_unregister_card(&kunit_comp->card);
}
// TEST CASE
@@ -806,8 +792,7 @@ static void snd_soc_tplg_test_load_pcm_tplg_reload_card(struct kunit *test)
if (ret != 0 && ret != -EPROBE_DEFER)
KUNIT_FAIL(test, "Failed to register card");
- ret = snd_soc_unregister_card(&kunit_comp->card);
- KUNIT_EXPECT_EQ(test, 0, ret);
+ snd_soc_unregister_card(&kunit_comp->card);
}
/* cleanup */
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 3f9d314fba16..b101db85446f 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -535,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
* return an -EINVAL error and prevent the card from
* being configured.
*/
- if (IS_ENABLED(CONFIG_SND_CTL_VALIDATION) && sbe->max > 512)
+ if (sbe->max > 512)
k->access |= SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK;
ext_ops = tplg->bytes_ext_ops;
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 594cb311ff30..70c380c0ac7b 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -141,7 +141,6 @@ static const struct snd_soc_component_driver dummy_codec = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
#define STUB_RATES SNDRV_PCM_RATE_8000_384000
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
index 4542868cd730..e90f173d067c 100644
--- a/sound/soc/sof/Kconfig
+++ b/sound/soc/sof/Kconfig
@@ -252,6 +252,13 @@ config SND_SOC_SOF_PROBE_WORK_QUEUE
When selected, the probe is handled in two steps, for example to
avoid lockdeps if request_module is used in the probe.
+# Supported IPC versions
+config SND_SOC_SOF_IPC3
+ bool
+
+config SND_SOC_SOF_INTEL_IPC4
+ bool
+
source "sound/soc/sof/amd/Kconfig"
source "sound/soc/sof/imx/Kconfig"
source "sound/soc/sof/intel/Kconfig"
diff --git a/sound/soc/sof/Makefile b/sound/soc/sof/Makefile
index 92b5e83601be..9a74ed116ed9 100644
--- a/sound/soc/sof/Makefile
+++ b/sound/soc/sof/Makefile
@@ -1,10 +1,18 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
snd-sof-objs := core.o ops.o loader.o ipc.o pcm.o pm.o debug.o topology.o\
- control.o trace.o iomem-utils.o sof-audio.o stream-ipc.o\
- ipc3-topology.o ipc3-control.o ipc3.o ipc3-pcm.o ipc3-loader.o\
- ipc3-dtrace.o\
- ipc4.o ipc4-loader.o
+ control.o trace.o iomem-utils.o sof-audio.o stream-ipc.o
+
+# IPC implementations
+ifneq ($(CONFIG_SND_SOC_SOF_IPC3),)
+snd-sof-objs += ipc3.o ipc3-loader.o ipc3-topology.o ipc3-control.o ipc3-pcm.o\
+ ipc3-dtrace.o
+endif
+ifneq ($(CONFIG_SND_SOC_SOF_INTEL_IPC4),)
+snd-sof-objs += ipc4.o ipc4-loader.o ipc4-topology.o ipc4-control.o ipc4-pcm.o
+endif
+
+# SOF client support
ifneq ($(CONFIG_SND_SOC_SOF_CLIENT),)
snd-sof-objs += sof-client.o
endif
diff --git a/sound/soc/sof/amd/Kconfig b/sound/soc/sof/amd/Kconfig
index 085232e04582..190c85d57047 100644
--- a/sound/soc/sof/amd/Kconfig
+++ b/sound/soc/sof/amd/Kconfig
@@ -17,6 +17,7 @@ if SND_SOC_SOF_AMD_TOPLEVEL
config SND_SOC_SOF_AMD_COMMON
tristate
select SND_SOC_SOF
+ select SND_SOC_SOF_IPC3
select SND_SOC_SOF_PCI_DEV
select SND_AMD_ACP_CONFIG
select SND_SOC_ACPI if ACPI
diff --git a/sound/soc/sof/amd/acp-dsp-offset.h b/sound/soc/sof/amd/acp-dsp-offset.h
index 40fbf11facba..56cefd4a84fc 100644
--- a/sound/soc/sof/amd/acp-dsp-offset.h
+++ b/sound/soc/sof/amd/acp-dsp-offset.h
@@ -46,12 +46,14 @@
#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_8 0xC3C
#define ACPAXI2AXI_ATU_CTRL 0xC40
#define ACP_SOFT_RESET 0x1000
+#define ACP_CONTROL 0x1004
#define ACP_I2S_PIN_CONFIG 0x1400
/* Registers from ACP_PGFSM block */
#define ACP_PGFSM_CONTROL 0x141C
#define ACP_PGFSM_STATUS 0x1420
+#define ACP_CLKMUX_SEL 0x1424
/* Registers from ACP_INTR block */
#define ACP_EXTERNAL_INTR_ENB 0x1800
diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
index 0c272573df97..c40d2900dd36 100644
--- a/sound/soc/sof/amd/acp.c
+++ b/sound/soc/sof/amd/acp.c
@@ -413,10 +413,46 @@ static int acp_init(struct snd_sof_dev *sdev)
dev_err(sdev->dev, "ACP power on failed\n");
return ret;
}
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x01);
/* Reset */
return acp_reset(sdev);
}
+int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ int ret;
+
+ ret = acp_reset(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "ACP Reset failed\n");
+ return ret;
+ }
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x00);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(amd_sof_acp_suspend, SND_SOC_SOF_AMD_COMMON);
+
+int amd_sof_acp_resume(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ ret = acp_init(sdev);
+ if (ret) {
+ dev_err(sdev->dev, "ACP Init failed\n");
+ return ret;
+ }
+
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CLKMUX_SEL, 0x03);
+
+ ret = acp_memory_init(sdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(amd_sof_acp_resume, SND_SOC_SOF_AMD_COMMON);
+
int amd_sof_acp_probe(struct snd_sof_dev *sdev)
{
struct pci_dev *pci = to_pci_dev(sdev->dev);
diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
index 291b44c54bcc..4c42b8fd6abf 100644
--- a/sound/soc/sof/amd/acp.h
+++ b/sound/soc/sof/amd/acp.h
@@ -216,6 +216,10 @@ int acp_sof_trace_init(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
struct sof_ipc_dma_trace_params_ext *dtrace_params);
int acp_sof_trace_release(struct snd_sof_dev *sdev);
+/* PM Callbacks */
+int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state);
+int amd_sof_acp_resume(struct snd_sof_dev *sdev);
+
struct sof_amd_acp_desc {
unsigned int host_bridge_id;
};
diff --git a/sound/soc/sof/amd/pci-rn.c b/sound/soc/sof/amd/pci-rn.c
index d5d9bcc2c997..3a7fed25a226 100644
--- a/sound/soc/sof/amd/pci-rn.c
+++ b/sound/soc/sof/amd/pci-rn.c
@@ -49,6 +49,7 @@ static const struct sof_amd_acp_desc renoir_chip_info = {
static const struct sof_dev_desc renoir_desc = {
.machines = snd_soc_acpi_amd_sof_machines,
+ .use_acpi_target_states = true,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
@@ -166,6 +167,9 @@ static struct pci_driver snd_sof_pci_amd_rn_driver = {
.id_table = rn_pci_ids,
.probe = acp_pci_rn_probe,
.remove = acp_pci_rn_remove,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
};
module_pci_driver(snd_sof_pci_amd_rn_driver);
diff --git a/sound/soc/sof/amd/renoir.c b/sound/soc/sof/amd/renoir.c
index 70190365328c..9261c8bc2236 100644
--- a/sound/soc/sof/amd/renoir.c
+++ b/sound/soc/sof/amd/renoir.c
@@ -173,6 +173,10 @@ struct snd_sof_dsp_ops sof_renoir_ops = {
/* Trace Logger */
.trace_init = acp_sof_trace_init,
.trace_release = acp_sof_trace_release,
+
+ /* PM */
+ .suspend = amd_sof_acp_suspend,
+ .resume = amd_sof_acp_resume,
};
EXPORT_SYMBOL(sof_renoir_ops);
diff --git a/sound/soc/sof/compress.c b/sound/soc/sof/compress.c
index 47639b6344c8..67139e15f862 100644
--- a/sound/soc/sof/compress.c
+++ b/sound/soc/sof/compress.c
@@ -167,11 +167,23 @@ static int sof_compr_set_params(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_compr_runtime *crtd = cstream->runtime;
struct sof_ipc_pcm_params_reply ipc_params_reply;
+ struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
+ struct sof_ipc_fw_version *v = &ready->version;
struct snd_compr_tstamp *tstamp;
- struct sof_ipc_pcm_params pcm;
+ struct sof_ipc_pcm_params *pcm;
struct snd_sof_pcm *spcm;
+ size_t ext_data_size;
int ret;
+ if (v->abi_version < SOF_ABI_VER(3, 22, 0)) {
+ dev_err(component->dev,
+ "Compress params not supported with FW ABI version %d:%d:%d\n",
+ SOF_ABI_VERSION_MAJOR(v->abi_version),
+ SOF_ABI_VERSION_MINOR(v->abi_version),
+ SOF_ABI_VERSION_PATCH(v->abi_version));
+ return -EINVAL;
+ }
+
tstamp = crtd->private_data;
spcm = snd_sof_find_spcm_dai(component, rtd);
@@ -179,40 +191,50 @@ static int sof_compr_set_params(struct snd_soc_component *component,
if (!spcm)
return -EINVAL;
+ ext_data_size = sizeof(params->codec);
+
+ if (sizeof(*pcm) + ext_data_size > sdev->ipc->max_payload_size)
+ return -EINVAL;
+
+ pcm = kzalloc(sizeof(*pcm) + ext_data_size, GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
cstream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV_SG;
cstream->dma_buffer.dev.dev = sdev->dev;
ret = snd_compr_malloc_pages(cstream, crtd->buffer_size);
if (ret < 0)
- return ret;
+ goto out;
ret = create_page_table(component, cstream, crtd->dma_area, crtd->dma_bytes);
if (ret < 0)
- return ret;
-
- memset(&pcm, 0, sizeof(pcm));
-
- pcm.params.buffer.pages = PFN_UP(crtd->dma_bytes);
- pcm.hdr.size = sizeof(pcm);
- pcm.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_PARAMS;
-
- pcm.comp_id = spcm->stream[cstream->direction].comp_id;
- pcm.params.hdr.size = sizeof(pcm.params);
- pcm.params.buffer.phy_addr = spcm->stream[cstream->direction].page_table.addr;
- pcm.params.buffer.size = crtd->dma_bytes;
- pcm.params.direction = cstream->direction;
- pcm.params.channels = params->codec.ch_out;
- pcm.params.rate = params->codec.sample_rate;
- pcm.params.buffer_fmt = SOF_IPC_BUFFER_INTERLEAVED;
- pcm.params.frame_fmt = SOF_IPC_FRAME_S32_LE;
- pcm.params.sample_container_bytes =
+ goto out;
+
+ pcm->params.buffer.pages = PFN_UP(crtd->dma_bytes);
+ pcm->hdr.size = sizeof(*pcm) + ext_data_size;
+ pcm->hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_PARAMS;
+
+ pcm->comp_id = spcm->stream[cstream->direction].comp_id;
+ pcm->params.hdr.size = sizeof(pcm->params) + ext_data_size;
+ pcm->params.buffer.phy_addr = spcm->stream[cstream->direction].page_table.addr;
+ pcm->params.buffer.size = crtd->dma_bytes;
+ pcm->params.direction = cstream->direction;
+ pcm->params.channels = params->codec.ch_out;
+ pcm->params.rate = params->codec.sample_rate;
+ pcm->params.buffer_fmt = SOF_IPC_BUFFER_INTERLEAVED;
+ pcm->params.frame_fmt = SOF_IPC_FRAME_S32_LE;
+ pcm->params.sample_container_bytes =
snd_pcm_format_physical_width(SNDRV_PCM_FORMAT_S32) >> 3;
- pcm.params.host_period_bytes = params->buffer.fragment_size;
+ pcm->params.host_period_bytes = params->buffer.fragment_size;
+ pcm->params.ext_data_length = ext_data_size;
+
+ memcpy((u8 *)pcm->params.ext_data, &params->codec, ext_data_size);
- ret = sof_ipc_tx_message(sdev->ipc, &pcm, sizeof(pcm),
+ ret = sof_ipc_tx_message(sdev->ipc, pcm, sizeof(*pcm) + ext_data_size,
&ipc_params_reply, sizeof(ipc_params_reply));
if (ret < 0) {
dev_err(component->dev, "error ipc failed\n");
- return ret;
+ goto out;
}
tstamp->byte_offset = sdev->stream_box.offset + ipc_params_reply.posn_offset;
@@ -220,7 +242,10 @@ static int sof_compr_set_params(struct snd_soc_component *component,
spcm->prepared[cstream->direction] = true;
- return 0;
+out:
+ kfree(pcm);
+
+ return ret;
}
static int sof_compr_get_params(struct snd_soc_component *component,
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 53719c04658f..c99b5e6c026c 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -189,7 +189,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
ret = snd_sof_probe(sdev);
if (ret < 0) {
dev_err(sdev->dev, "error: failed to probe DSP %d\n", ret);
- return ret;
+ goto probe_err;
}
sof_set_fw_state(sdev, SOF_FW_BOOT_PREPARE);
@@ -317,6 +317,8 @@ dbg_err:
snd_sof_free_debug(sdev);
dsp_err:
snd_sof_remove(sdev);
+probe_err:
+ sof_ops_free(sdev);
/* all resources freed, update state to match */
sof_set_fw_state(sdev, SOF_FW_BOOT_NOT_STARTED);
@@ -374,6 +376,7 @@ int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data)
!sof_ops(sdev)->block_read || !sof_ops(sdev)->block_write ||
!sof_ops(sdev)->send_msg || !sof_ops(sdev)->load_firmware ||
!sof_ops(sdev)->ipc_msg_data) {
+ sof_ops_free(sdev);
dev_err(dev, "error: missing mandatory ops\n");
return -EINVAL;
}
@@ -457,6 +460,8 @@ int snd_sof_device_remove(struct device *dev)
snd_sof_remove(sdev);
}
+ sof_ops_free(sdev);
+
/* release firmware */
snd_sof_fw_unload(sdev);
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
index cf1271eb29b2..d9a3ce7b69e1 100644
--- a/sound/soc/sof/debug.c
+++ b/sound/soc/sof/debug.c
@@ -252,9 +252,9 @@ static int memory_info_update(struct snd_sof_dev *sdev, char *buf, size_t buff_s
}
for (i = 0, len = 0; i < reply->num_elems; i++) {
- ret = snprintf(buf + len, buff_size - len, "zone %d.%d used %#8x free %#8x\n",
- reply->elems[i].zone, reply->elems[i].id,
- reply->elems[i].used, reply->elems[i].free);
+ ret = scnprintf(buf + len, buff_size - len, "zone %d.%d used %#8x free %#8x\n",
+ reply->elems[i].zone, reply->elems[i].id,
+ reply->elems[i].used, reply->elems[i].free);
if (ret < 0)
goto error;
len += ret;
@@ -428,7 +428,7 @@ static void snd_sof_ipc_dump(struct snd_sof_dev *sdev)
}
}
-void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev)
+void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev, const char *msg)
{
if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT) ||
sof_debug_check_flag(SOF_DBG_RETAIN_CTX)) {
@@ -441,8 +441,7 @@ void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev)
/* dump vital information to the logs */
snd_sof_ipc_dump(sdev);
- snd_sof_dsp_dbg_dump(sdev, "Firmware exception",
- SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_MBOX);
+ snd_sof_dsp_dbg_dump(sdev, msg, SOF_DBG_DUMP_REGS | SOF_DBG_DUMP_MBOX);
sof_fw_trace_fw_crashed(sdev);
}
EXPORT_SYMBOL(snd_sof_handle_fw_exception);
diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig
index 9b8d5bb1e449..cc6e695f913a 100644
--- a/sound/soc/sof/imx/Kconfig
+++ b/sound/soc/sof/imx/Kconfig
@@ -15,6 +15,7 @@ config SND_SOC_SOF_IMX_COMMON
tristate
select SND_SOC_SOF_OF_DEV
select SND_SOC_SOF
+ select SND_SOC_SOF_IPC3
select SND_SOC_SOF_XTENSA
select SND_SOC_SOF_COMPRESS
help
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
index 0def2aa5581d..3f54678e810b 100644
--- a/sound/soc/sof/intel/Kconfig
+++ b/sound/soc/sof/intel/Kconfig
@@ -40,6 +40,7 @@ if SND_SOC_SOF_ACPI
config SND_SOC_SOF_BAYTRAIL
tristate "SOF support for Baytrail, Braswell and Cherrytrail"
default SND_SOC_SOF_ACPI
+ select SND_SOC_SOF_IPC3
select SND_SOC_SOF_INTEL_COMMON
select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
select SND_SOC_SOF_ACPI_DEV
@@ -60,6 +61,7 @@ config SND_SOC_SOF_BAYTRAIL
config SND_SOC_SOF_BROADWELL
tristate "SOF support for Broadwell"
default SND_SOC_SOF_ACPI
+ select SND_SOC_SOF_IPC3
select SND_SOC_SOF_INTEL_COMMON
select SND_SOC_SOF_INTEL_HIFI_EP_IPC
select SND_SOC_SOF_ACPI_DEV
@@ -85,6 +87,7 @@ config SND_SOC_SOF_MERRIFIELD
tristate "SOF support for Tangier/Merrifield"
default SND_SOC_SOF_PCI
select SND_SOC_SOF_PCI_DEV
+ select SND_SOC_SOF_IPC3
select SND_SOC_SOF_INTEL_ATOM_HIFI_EP
help
This adds support for Sound Open Firmware for Intel(R) platforms
@@ -95,6 +98,8 @@ config SND_SOC_SOF_MERRIFIELD
config SND_SOC_SOF_INTEL_APL
tristate
select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
config SND_SOC_SOF_APOLLOLAKE
tristate "SOF support for Apollolake"
@@ -120,6 +125,8 @@ config SND_SOC_SOF_INTEL_CNL
tristate
select SND_SOC_SOF_HDA_COMMON
select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
config SND_SOC_SOF_CANNONLAKE
tristate "SOF support for Cannonlake"
@@ -154,6 +161,8 @@ config SND_SOC_SOF_INTEL_ICL
tristate
select SND_SOC_SOF_HDA_COMMON
select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
config SND_SOC_SOF_ICELAKE
tristate "SOF support for Icelake"
@@ -179,6 +188,8 @@ config SND_SOC_SOF_INTEL_TGL
tristate
select SND_SOC_SOF_HDA_COMMON
select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_IPC3
+ select SND_SOC_SOF_INTEL_IPC4
config SND_SOC_SOF_TIGERLAKE
tristate "SOF support for Tigerlake"
@@ -210,6 +221,22 @@ config SND_SOC_SOF_ALDERLAKE
Say Y if you have such a device.
If unsure select "N".
+config SND_SOC_SOF_INTEL_MTL
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOC_SOF_INTEL_IPC4
+
+config SND_SOC_SOF_METEORLAKE
+ tristate "SOF support for Meteorlake"
+ default SND_SOC_SOF_PCI
+ select SND_SOC_SOF_INTEL_MTL
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Meteorlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
config SND_SOC_SOF_HDA_COMMON
tristate
select SND_SOC_SOF_INTEL_COMMON
diff --git a/sound/soc/sof/intel/Makefile b/sound/soc/sof/intel/Makefile
index b9d51dc39ffa..a079159bb2f0 100644
--- a/sound/soc/sof/intel/Makefile
+++ b/sound/soc/sof/intel/Makefile
@@ -6,7 +6,7 @@ snd-sof-acpi-intel-bdw-objs := bdw.o
snd-sof-intel-hda-common-objs := hda.o hda-loader.o hda-stream.o hda-trace.o \
hda-dsp.o hda-ipc.o hda-ctrl.o hda-pcm.o \
hda-dai.o hda-bus.o \
- apl.o cnl.o tgl.o icl.o hda-common-ops.o
+ apl.o cnl.o tgl.o icl.o mtl.o hda-common-ops.o
snd-sof-intel-hda-common-$(CONFIG_SND_SOC_SOF_HDA_PROBES) += hda-probes.o
snd-sof-intel-hda-objs := hda-codec.o
@@ -24,9 +24,11 @@ snd-sof-pci-intel-apl-objs := pci-apl.o
snd-sof-pci-intel-cnl-objs := pci-cnl.o
snd-sof-pci-intel-icl-objs := pci-icl.o
snd-sof-pci-intel-tgl-objs := pci-tgl.o
+snd-sof-pci-intel-mtl-objs := pci-mtl.o
obj-$(CONFIG_SND_SOC_SOF_MERRIFIELD) += snd-sof-pci-intel-tng.o
obj-$(CONFIG_SND_SOC_SOF_INTEL_APL) += snd-sof-pci-intel-apl.o
obj-$(CONFIG_SND_SOC_SOF_INTEL_CNL) += snd-sof-pci-intel-cnl.o
obj-$(CONFIG_SND_SOC_SOF_INTEL_ICL) += snd-sof-pci-intel-icl.o
obj-$(CONFIG_SND_SOC_SOF_INTEL_TGL) += snd-sof-pci-intel-tgl.o
+obj-$(CONFIG_SND_SOC_SOF_INTEL_MTL) += snd-sof-pci-intel-mtl.o
diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
index 0cea280a6d2d..084c245a9522 100644
--- a/sound/soc/sof/intel/apl.c
+++ b/sound/soc/sof/intel/apl.c
@@ -101,6 +101,7 @@ const struct sof_intel_dsp_desc apl_chip_info = {
.ssp_base_offset = APL_SSP_BASE_OFFSET,
.quirks = SOF_INTEL_PROCEN_FMT_QUIRK,
.check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
.hw_ip_version = SOF_INTEL_CAVS_1_5_PLUS,
};
EXPORT_SYMBOL_NS(apl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/atom.c b/sound/soc/sof/intel/atom.c
index ff5900b155dc..bd9789b483b1 100644
--- a/sound/soc/sof/intel/atom.c
+++ b/sound/soc/sof/intel/atom.c
@@ -274,22 +274,22 @@ static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
const char *ssp_str)
{
const char *tplg_filename = NULL;
- char *filename;
- char *split_ext;
+ const char *split_ext;
+ char *filename, *tmp;
- filename = devm_kstrdup(sdev->dev, sof_tplg_filename, GFP_KERNEL);
+ filename = kstrdup(sof_tplg_filename, GFP_KERNEL);
if (!filename)
return NULL;
/* this assumes a .tplg extension */
- split_ext = strsep(&filename, ".");
- if (split_ext) {
+ tmp = filename;
+ split_ext = strsep(&tmp, ".");
+ if (split_ext)
tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
"%s-%s.tplg",
split_ext, ssp_str);
- if (!tplg_filename)
- return NULL;
- }
+ kfree(filename);
+
return tplg_filename;
}
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
index 26df780c702e..a446154f2803 100644
--- a/sound/soc/sof/intel/bdw.c
+++ b/sound/soc/sof/intel/bdw.c
@@ -681,11 +681,8 @@ static int sof_broadwell_probe(struct platform_device *pdev)
return -ENODEV;
}
- desc = device_get_match_data(dev);
- if (!desc)
- return -ENODEV;
-
- return sof_acpi_probe(pdev, device_get_match_data(dev));
+ desc = (const struct sof_dev_desc *)id->driver_data;
+ return sof_acpi_probe(pdev, desc);
}
/* acpi_driver definition */
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
index 4ed8381eceda..e6dc4ff531c3 100644
--- a/sound/soc/sof/intel/byt.c
+++ b/sound/soc/sof/intel/byt.c
@@ -465,10 +465,7 @@ static int sof_baytrail_probe(struct platform_device *pdev)
return -ENODEV;
}
- desc = device_get_match_data(&pdev->dev);
- if (!desc)
- return -ENODEV;
-
+ desc = (const struct sof_dev_desc *)id->driver_data;
if (desc == &sof_acpi_baytrail_desc && soc_intel_is_byt_cr(pdev))
desc = &sof_acpi_baytrailcr_desc;
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
index cd6e5f8a5eb4..a064453f0bc3 100644
--- a/sound/soc/sof/intel/cnl.c
+++ b/sound/soc/sof/intel/cnl.c
@@ -60,17 +60,23 @@ irqreturn_t cnl_ipc4_irq_thread(int irq, void *context)
if (primary & SOF_IPC4_MSG_DIR_MASK) {
/* Reply received */
- struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
- data->primary = primary;
- data->extension = extension;
+ data->primary = primary;
+ data->extension = extension;
- spin_lock_irq(&sdev->ipc_lock);
+ spin_lock_irq(&sdev->ipc_lock);
- snd_sof_ipc_get_reply(sdev);
- snd_sof_ipc_reply(sdev, data->primary);
+ snd_sof_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, data->primary);
- spin_unlock_irq(&sdev->ipc_lock);
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev,
+ "IPC reply before FW_READY: %#x|%#x\n",
+ primary, extension);
+ }
} else {
/* Notification received */
notification_data.primary = primary;
@@ -124,15 +130,20 @@ irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
CNL_DSP_REG_HIPCCTL,
CNL_DSP_REG_HIPCCTL_DONE, 0);
- spin_lock_irq(&sdev->ipc_lock);
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ spin_lock_irq(&sdev->ipc_lock);
- /* handle immediate reply from DSP core */
- hda_dsp_ipc_get_reply(sdev);
- snd_sof_ipc_reply(sdev, msg);
+ /* handle immediate reply from DSP core */
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
- cnl_ipc_dsp_done(sdev);
+ cnl_ipc_dsp_done(sdev);
- spin_unlock_irq(&sdev->ipc_lock);
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_READY: %#x\n",
+ msg);
+ }
ipc_irq = true;
}
@@ -401,6 +412,7 @@ const struct sof_intel_dsp_desc cnl_chip_info = {
.sdw_alh_base = SDW_ALH_BASE,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
.hw_ip_version = SOF_INTEL_CAVS_1_8,
};
EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
@@ -430,6 +442,7 @@ const struct sof_intel_dsp_desc jsl_chip_info = {
.sdw_alh_base = SDW_ALH_BASE,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
.hw_ip_version = SOF_INTEL_CAVS_2_0,
};
EXPORT_SYMBOL_NS(jsl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 9823230d2ef4..556e883a32ed 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -10,10 +10,23 @@
#include <sound/pcm_params.h>
#include <sound/hdaudio_ext.h>
+#include <sound/intel-nhlt.h>
+#include <sound/sof/ipc4/header.h>
+#include <uapi/sound/sof/header.h>
+#include "../ipc4-priv.h"
+#include "../ipc4-topology.h"
#include "../sof-priv.h"
#include "../sof-audio.h"
#include "hda.h"
+/*
+ * The default method is to fetch NHLT from BIOS. With this parameter set
+ * it is possible to override that with NHLT in the SOF topology manifest.
+ */
+static bool hda_use_tplg_nhlt;
+module_param_named(sof_use_tplg_nhlt, hda_use_tplg_nhlt, bool, 0444);
+MODULE_PARM_DESC(sof_use_tplg_nhlt, "SOF topology nhlt override");
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
struct hda_pipe_params {
@@ -113,12 +126,8 @@ hda_link_stream_assign(struct hdac_bus *bus,
}
if (res) {
- /*
- * Decouple host and link DMA. The decoupled flag
- * is updated in snd_hdac_ext_stream_decouple().
- */
- if (!res->decoupled)
- snd_hdac_ext_stream_decouple_locked(bus, res, true);
+ /* Make sure that host and link DMA is decoupled. */
+ snd_hdac_ext_stream_decouple_locked(bus, res, true);
res->link_locked = 1;
res->link_substream = substream;
@@ -171,7 +180,6 @@ static int hda_link_dma_params(struct hdac_ext_stream *hext_stream,
struct hdac_ext_link *link;
unsigned int format_val;
- snd_hdac_ext_stream_decouple(bus, hext_stream, true);
snd_hdac_ext_link_stream_reset(hext_stream);
format_val = snd_hdac_calc_stream_format(params->s_freq, params->ch,
@@ -208,7 +216,6 @@ static int hda_link_dma_hw_params(struct snd_pcm_substream *substream,
struct hdac_bus *bus = hstream->bus;
struct hdac_ext_link *link;
- /* get stored dma data if resuming from system suspend */
hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream);
if (!hext_stream) {
hext_stream = hda_link_stream_assign(bus, substream);
@@ -257,7 +264,6 @@ static int hda_link_dma_trigger(struct snd_pcm_substream *substream, int cmd)
struct hdac_ext_stream *hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream);
int ret;
- dev_dbg(cpu_dai->dev, "%s: cmd=%d\n", __func__, cmd);
if (!hext_stream)
return 0;
@@ -369,8 +375,7 @@ static int hda_dai_config_pause_push_ipc(struct snd_soc_dapm_widget *w)
return ret;
}
-static int ipc3_hda_dai_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+static int hda_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
{
struct hdac_ext_stream *hext_stream =
snd_soc_dai_get_dma_data(dai, substream);
@@ -382,7 +387,7 @@ static int ipc3_hda_dai_prepare(struct snd_pcm_substream *substream,
if (hext_stream && hext_stream->link_prepared)
return 0;
- dev_dbg(sdev->dev, "%s: prepare stream dir %d\n", __func__, substream->stream);
+ dev_dbg(sdev->dev, "prepare stream dir %d\n", substream->stream);
ret = hda_link_dma_prepare(substream);
if (ret < 0)
@@ -408,13 +413,15 @@ static int ipc3_hda_dai_trigger(struct snd_pcm_substream *substream,
struct snd_soc_dapm_widget *w;
int ret;
+ dev_dbg(dai->dev, "cmd=%d dai %s direction %d\n", cmd,
+ dai->name, substream->stream);
+
ret = hda_link_dma_trigger(substream, cmd);
if (ret < 0)
return ret;
w = snd_soc_dai_get_widget(dai, substream->stream);
- dev_dbg(dai->dev, "%s: cmd=%d\n", __func__, cmd);
switch (cmd) {
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
@@ -438,6 +445,91 @@ static int ipc3_hda_dai_trigger(struct snd_pcm_substream *substream,
return 0;
}
+/*
+ * In contrast to IPC3, the dai trigger in IPC4 mixes pipeline state changes
+ * (over IPC channel) and DMA state change (direct host register changes).
+ */
+static int ipc4_hda_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *hext_stream = snd_soc_dai_get_dma_data(dai, substream);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(dai->component);
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_sof_widget *swidget;
+ struct snd_soc_dapm_widget *w;
+ struct snd_soc_dai *codec_dai;
+ struct hdac_stream *hstream;
+ struct snd_soc_dai *cpu_dai;
+ int ret;
+
+ dev_dbg(dai->dev, "cmd=%d dai %s direction %d\n", cmd,
+ dai->name, substream->stream);
+
+ hstream = substream->runtime->private_data;
+ rtd = asoc_substream_to_rtd(substream);
+ cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
+
+ w = snd_soc_dai_get_widget(dai, substream->stream);
+ swidget = w->dobj.private;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ snd_hdac_ext_link_stream_start(hext_stream);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ {
+ struct snd_sof_widget *pipe_widget = swidget->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ return ret;
+
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+
+ snd_hdac_ext_link_stream_clear(hext_stream);
+
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_RESET);
+ if (ret < 0)
+ return ret;
+
+ pipeline->state = SOF_IPC4_PIPE_RESET;
+
+ ret = hda_link_dma_cleanup(substream, hstream, cpu_dai, codec_dai, false);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: failed to clean up link DMA\n", __func__);
+ return ret;
+ }
+ break;
+ }
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ {
+ struct snd_sof_widget *pipe_widget = swidget->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ return ret;
+
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+
+ snd_hdac_ext_link_stream_clear(hext_stream);
+ break;
+ }
+ default:
+ dev_err(sdev->dev, "%s: unknown trigger command %d\n", __func__, cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int hda_dai_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -454,7 +546,7 @@ static const struct snd_soc_dai_ops ipc3_hda_dai_ops = {
.hw_params = hda_dai_hw_params,
.hw_free = hda_dai_hw_free,
.trigger = ipc3_hda_dai_trigger,
- .prepare = ipc3_hda_dai_prepare,
+ .prepare = hda_dai_prepare,
};
static int hda_dai_suspend(struct hdac_bus *bus)
@@ -497,6 +589,14 @@ static int hda_dai_suspend(struct hdac_bus *bus)
return 0;
}
+
+static const struct snd_soc_dai_ops ipc4_hda_dai_ops = {
+ .hw_params = hda_dai_hw_params,
+ .hw_free = hda_dai_hw_free,
+ .trigger = ipc4_hda_dai_trigger,
+ .prepare = hda_dai_prepare,
+};
+
#endif
/* only one flag used so far to harden hw_params/hw_free/trigger/prepare */
@@ -608,6 +708,64 @@ static const struct snd_soc_dai_ops ipc3_ssp_dai_ops = {
.shutdown = ssp_dai_shutdown,
};
+static int ipc4_be_dai_common_trigger(struct snd_soc_dai *dai, int cmd, int stream)
+{
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+ struct snd_sof_widget *swidget;
+ struct snd_soc_dapm_widget *w;
+ struct snd_sof_dev *sdev;
+ int ret;
+
+ w = snd_soc_dai_get_widget(dai, stream);
+ swidget = w->dobj.private;
+ pipe_widget = swidget->pipe_widget;
+ pipeline = pipe_widget->private;
+ sdev = snd_soc_component_get_drvdata(swidget->scomp);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ return ret;
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_RESET);
+ if (ret < 0)
+ return ret;
+ pipeline->state = SOF_IPC4_PIPE_RESET;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ return ret;
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ipc4_be_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ return ipc4_be_dai_common_trigger(dai, cmd, substream->stream);
+}
+
+static const struct snd_soc_dai_ops ipc4_dmic_dai_ops = {
+ .trigger = ipc4_be_dai_trigger,
+};
+
+static const struct snd_soc_dai_ops ipc4_ssp_dai_ops = {
+ .trigger = ipc4_be_dai_trigger,
+};
+
void hda_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops)
{
int i;
@@ -627,11 +785,51 @@ void hda_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops)
#endif
}
break;
+ case SOF_INTEL_IPC4:
+ {
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+
+ for (i = 0; i < ops->num_drv; i++) {
+ if (strstr(ops->drv[i].name, "DMIC")) {
+ ops->drv[i].ops = &ipc4_dmic_dai_ops;
+ continue;
+ }
+ if (strstr(ops->drv[i].name, "SSP")) {
+ ops->drv[i].ops = &ipc4_ssp_dai_ops;
+ continue;
+ }
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ if (strstr(ops->drv[i].name, "iDisp") ||
+ strstr(ops->drv[i].name, "Analog") ||
+ strstr(ops->drv[i].name, "Digital"))
+ ops->drv[i].ops = &ipc4_hda_dai_ops;
+#endif
+ }
+
+ if (!hda_use_tplg_nhlt)
+ ipc4_data->nhlt = intel_nhlt_init(sdev->dev);
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE))
+ sdw_callback.trigger = ipc4_be_dai_common_trigger;
+
+ break;
+ }
default:
break;
}
}
+void hda_ops_free(struct snd_sof_dev *sdev)
+{
+ if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+
+ if (!hda_use_tplg_nhlt)
+ intel_nhlt_free(ipc4_data->nhlt);
+ }
+}
+EXPORT_SYMBOL_NS(hda_ops_free, SND_SOC_SOF_INTEL_HDA_COMMON);
+
/*
* common dai driver for skl+ platforms.
* some products who use this DAI array only physically have a subset of
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index e24eea725acb..eddfd77ad90f 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -617,6 +617,13 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
#endif
int ret, j;
+ /*
+ * The memory used for IMR boot loses its content in deeper than S3 state
+ * We must not try IMR boot on next power up (as it will fail).
+ */
+ if (sdev->system_suspend_target > SOF_SUSPEND_S3)
+ hda->skip_imr_boot = true;
+
hda_sdw_int_enable(sdev, false);
/* disable IPC interrupts */
@@ -743,7 +750,7 @@ int hda_dsp_resume(struct snd_sof_dev *sdev)
if (hlink->ref_count) {
ret = snd_hdac_ext_bus_link_power_up(hlink);
if (ret < 0) {
- dev_dbg(sdev->dev,
+ dev_err(sdev->dev,
"error %d in %s: failed to power up links",
ret, __func__);
return ret;
@@ -871,7 +878,7 @@ int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
/* no link can be powered in s0ix state */
ret = snd_hdac_ext_bus_link_power_down_all(bus);
if (ret < 0) {
- dev_dbg(sdev->dev,
+ dev_err(sdev->dev,
"error %d in %s: failed to power down links",
ret, __func__);
return ret;
@@ -940,13 +947,7 @@ void hda_dsp_d0i3_work(struct work_struct *work)
int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
{
- struct sof_ipc_pm_core_config pm_core_config = {
- .hdr = {
- .cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CORE_ENABLE,
- .size = sizeof(pm_core_config),
- },
- .enable_mask = sdev->enabled_cores_mask | BIT(core),
- };
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
int ret, ret1;
/* power up core */
@@ -961,9 +962,12 @@ int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
return 0;
+ /* No need to continue the set_core_state ops is not available */
+ if (!pm_ops->set_core_state)
+ return 0;
+
/* Now notify DSP for secondary cores */
- ret = sof_ipc_tx_message(sdev->ipc, &pm_core_config, sizeof(pm_core_config),
- &pm_core_config, sizeof(pm_core_config));
+ ret = pm_ops->set_core_state(sdev, core, true);
if (ret < 0) {
dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
core, ret);
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
index f08011249955..65e688f749ea 100644
--- a/sound/soc/sof/intel/hda-ipc.c
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -148,17 +148,23 @@ irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context)
if (primary & SOF_IPC4_MSG_DIR_MASK) {
/* Reply received */
- struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
- data->primary = primary;
- data->extension = extension;
+ data->primary = primary;
+ data->extension = extension;
- spin_lock_irq(&sdev->ipc_lock);
+ spin_lock_irq(&sdev->ipc_lock);
- snd_sof_ipc_get_reply(sdev);
- snd_sof_ipc_reply(sdev, data->primary);
+ snd_sof_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, data->primary);
- spin_unlock_irq(&sdev->ipc_lock);
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev,
+ "IPC reply before FW_READY: %#x|%#x\n",
+ primary, extension);
+ }
} else {
/* Notification received */
@@ -225,16 +231,21 @@ irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
* place, the message might not yet be marked as expecting a
* reply.
*/
- spin_lock_irq(&sdev->ipc_lock);
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ spin_lock_irq(&sdev->ipc_lock);
- /* handle immediate reply from DSP core */
- hda_dsp_ipc_get_reply(sdev);
- snd_sof_ipc_reply(sdev, msg);
+ /* handle immediate reply from DSP core */
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
- /* set the done bit */
- hda_dsp_ipc_dsp_done(sdev);
+ /* set the done bit */
+ hda_dsp_ipc_dsp_done(sdev);
- spin_unlock_irq(&sdev->ipc_lock);
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_READY: %#x\n",
+ msg);
+ }
ipc_irq = true;
}
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
index 145d483bd129..eb22eb3f6fee 100644
--- a/sound/soc/sof/intel/hda-loader.c
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -99,7 +99,7 @@ out_put:
* power on all host managed cores and only unstall/run the boot core to boot the
* DSP then turn off all non boot cores (if any) is powered on.
*/
-static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
@@ -369,9 +369,15 @@ int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
static int hda_dsp_boot_imr(struct snd_sof_dev *sdev)
{
+ const struct sof_intel_dsp_desc *chip_info;
int ret;
- ret = cl_dsp_init(sdev, 0, true);
+ chip_info = get_chip_info(sdev->pdata);
+ if (chip_info->cl_init)
+ ret = chip_info->cl_init(sdev, 0, true);
+ else
+ ret = -EINVAL;
+
if (!ret)
hda_sdw_process_wakeen(sdev);
@@ -389,8 +395,7 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
struct snd_dma_buffer dmab;
int ret, ret1, i;
- if (sdev->system_suspend_target < SOF_SUSPEND_S4 &&
- hda->imrboot_supported && !sdev->first_boot) {
+ if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) {
dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n");
hda->boot_iteration = 0;
ret = hda_dsp_boot_imr(sdev);
@@ -431,7 +436,10 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
"Attempting iteration %d of Core En/ROM load...\n", i);
hda->boot_iteration = i + 1;
- ret = cl_dsp_init(sdev, hext_stream->hstream.stream_tag, false);
+ if (chip_info->cl_init)
+ ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false);
+ else
+ ret = -EINVAL;
/* don't retry anymore if successful */
if (!ret)
@@ -471,11 +479,14 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
*/
hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS;
ret = hda_cl_copy_fw(sdev, hext_stream);
- if (!ret)
+ if (!ret) {
dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
- else
+ hda->skip_imr_boot = false;
+ } else {
snd_sof_dsp_dbg_dump(sdev, "Firmware download failed",
SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX);
+ hda->skip_imr_boot = true;
+ }
cleanup:
/*
@@ -530,7 +541,8 @@ int hda_dsp_post_fw_run(struct snd_sof_dev *sdev)
/* Check if IMR boot is usable */
if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT) &&
- sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT)
+ (sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT ||
+ sdev->pdata->ipc_type == SOF_INTEL_IPC4))
hdev->imrboot_supported = true;
}
diff --git a/sound/soc/sof/intel/hda-probes.c b/sound/soc/sof/intel/hda-probes.c
index 31e85d4aae8c..56a533c63cb0 100644
--- a/sound/soc/sof/intel/hda-probes.c
+++ b/sound/soc/sof/intel/hda-probes.c
@@ -25,9 +25,9 @@ hda_compr_get_stream(struct snd_compr_stream *cstream)
return cstream->runtime->private_data;
}
-static int hda_probes_compr_assign(struct sof_client_dev *cdev,
- struct snd_compr_stream *cstream,
- struct snd_soc_dai *dai, u32 *stream_id)
+static int hda_probes_compr_startup(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai, u32 *stream_id)
{
struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
struct hdac_ext_stream *hext_stream;
@@ -45,9 +45,9 @@ static int hda_probes_compr_assign(struct sof_client_dev *cdev,
return 0;
}
-static int hda_probes_compr_free(struct sof_client_dev *cdev,
- struct snd_compr_stream *cstream,
- struct snd_soc_dai *dai)
+static int hda_probes_compr_shutdown(struct sof_client_dev *cdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
{
struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
@@ -127,8 +127,8 @@ static int hda_probes_compr_pointer(struct sof_client_dev *cdev,
/* SOF client implementation */
static const struct sof_probes_host_ops hda_probes_ops = {
- .assign = hda_probes_compr_assign,
- .free = hda_probes_compr_free,
+ .startup = hda_probes_compr_startup,
+ .shutdown = hda_probes_compr_shutdown,
.set_params = hda_probes_compr_set_params,
.trigger = hda_probes_compr_trigger,
.pointer = hda_probes_compr_pointer,
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
index d95ae17e81cc..b58662faa4aa 100644
--- a/sound/soc/sof/intel/hda-stream.c
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -116,13 +116,13 @@ int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
int remain, ioc;
period_bytes = hstream->period_bytes;
- dev_dbg(sdev->dev, "%s: period_bytes:0x%x\n", __func__, period_bytes);
+ dev_dbg(sdev->dev, "period_bytes:0x%x\n", period_bytes);
if (!period_bytes)
period_bytes = hstream->bufsize;
periods = hstream->bufsize / period_bytes;
- dev_dbg(sdev->dev, "%s: periods:%d\n", __func__, periods);
+ dev_dbg(sdev->dev, "periods:%d\n", periods);
remain = hstream->bufsize % period_bytes;
if (remain)
@@ -271,7 +271,7 @@ int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
if (!found) {
- dev_dbg(sdev->dev, "%s: stream_tag %d not opened!\n",
+ dev_err(sdev->dev, "%s: stream_tag %d not opened!\n",
__func__, stream_tag);
return -ENODEV;
}
@@ -411,6 +411,11 @@ int hda_dsp_iccmax_stream_hw_params(struct snd_sof_dev *sdev, struct hdac_ext_st
return -ENODEV;
}
+ if (!dmab) {
+ dev_err(sdev->dev, "error: no dma buffer allocated!\n");
+ return -ENODEV;
+ }
+
if (hstream->posbuf)
*hstream->posbuf = 0;
@@ -485,16 +490,16 @@ int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
return -ENODEV;
}
- /* decouple host and link DMA */
- mask = 0x1 << hstream->index;
- snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
- mask, mask);
-
if (!dmab) {
dev_err(sdev->dev, "error: no dma buffer allocated!\n");
return -ENODEV;
}
+ /* decouple host and link DMA */
+ mask = 0x1 << hstream->index;
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
+ mask, mask);
+
/* clear stream status */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
SOF_HDA_CL_DMA_SD_INT_MASK |
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index bc07df1fc39f..6d4ecbe14adf 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -147,7 +147,7 @@ static int sdw_free_stream(struct device *dev,
return hda_ctrl_dai_widget_free(w, SOF_DAI_CONFIG_FLAGS_NONE, &data);
}
-static const struct sdw_intel_ops sdw_callback = {
+struct sdw_intel_ops sdw_callback = {
.params_stream = sdw_params_stream,
.free_stream = sdw_free_stream,
};
@@ -353,7 +353,7 @@ static inline bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
struct hda_dsp_msg_code {
u32 code;
- const char *msg;
+ const char *text;
};
#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
@@ -382,10 +382,7 @@ module_param_named(use_common_hdmi, hda_codec_use_common_hdmi, bool, 0444);
MODULE_PARM_DESC(use_common_hdmi, "SOF HDA use common HDMI codec driver");
#endif
-static const struct hda_dsp_msg_code hda_dsp_rom_msg[] = {
- {HDA_DSP_ROM_FW_MANIFEST_LOADED, "status: manifest loaded"},
- {HDA_DSP_ROM_FW_FW_LOADED, "status: fw loaded"},
- {HDA_DSP_ROM_FW_ENTERED, "status: fw entered"},
+static const struct hda_dsp_msg_code hda_dsp_rom_fw_error_texts[] = {
{HDA_DSP_ROM_CSE_ERROR, "error: cse error"},
{HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"},
{HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"},
@@ -404,26 +401,136 @@ static const struct hda_dsp_msg_code hda_dsp_rom_msg[] = {
{HDA_DSP_ROM_NULL_FW_ENTRY, "error: null FW entry point"},
};
-static void hda_dsp_get_status(struct snd_sof_dev *sdev, const char *level)
+#define FSR_ROM_STATE_ENTRY(state) {FSR_STATE_ROM_##state, #state}
+static const struct hda_dsp_msg_code fsr_rom_state_names[] = {
+ FSR_ROM_STATE_ENTRY(INIT),
+ FSR_ROM_STATE_ENTRY(INIT_DONE),
+ FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED),
+ FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED),
+ FSR_ROM_STATE_ENTRY(FW_FW_LOADED),
+ FSR_ROM_STATE_ENTRY(FW_ENTERED),
+ FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK),
+ FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET),
+ FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT),
+ FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT_DONE),
+ /* CSE states */
+ FSR_ROM_STATE_ENTRY(CSE_IMR_REQUEST),
+ FSR_ROM_STATE_ENTRY(CSE_IMR_GRANTED),
+ FSR_ROM_STATE_ENTRY(CSE_VALIDATE_IMAGE_REQUEST),
+ FSR_ROM_STATE_ENTRY(CSE_IMAGE_VALIDATED),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_IFACE_INIT),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_RESET_PHASE_1),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL_ENTRY),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL),
+ FSR_ROM_STATE_ENTRY(CSE_IPC_DOWN),
+};
+
+#define FSR_BRINGUP_STATE_ENTRY(state) {FSR_STATE_BRINGUP_##state, #state}
+static const struct hda_dsp_msg_code fsr_bringup_state_names[] = {
+ FSR_BRINGUP_STATE_ENTRY(INIT),
+ FSR_BRINGUP_STATE_ENTRY(INIT_DONE),
+ FSR_BRINGUP_STATE_ENTRY(HPSRAM_LOAD),
+ FSR_BRINGUP_STATE_ENTRY(UNPACK_START),
+ FSR_BRINGUP_STATE_ENTRY(IMR_RESTORE),
+ FSR_BRINGUP_STATE_ENTRY(FW_ENTERED),
+};
+
+#define FSR_WAIT_STATE_ENTRY(state) {FSR_WAIT_FOR_##state, #state}
+static const struct hda_dsp_msg_code fsr_wait_state_names[] = {
+ FSR_WAIT_STATE_ENTRY(IPC_BUSY),
+ FSR_WAIT_STATE_ENTRY(IPC_DONE),
+ FSR_WAIT_STATE_ENTRY(CACHE_INVALIDATION),
+ FSR_WAIT_STATE_ENTRY(LP_SRAM_OFF),
+ FSR_WAIT_STATE_ENTRY(DMA_BUFFER_FULL),
+ FSR_WAIT_STATE_ENTRY(CSE_CSR),
+};
+
+#define FSR_MODULE_NAME_ENTRY(mod) [FSR_MOD_##mod] = #mod
+static const char * const fsr_module_names[] = {
+ FSR_MODULE_NAME_ENTRY(ROM),
+ FSR_MODULE_NAME_ENTRY(ROM_BYP),
+ FSR_MODULE_NAME_ENTRY(BASE_FW),
+ FSR_MODULE_NAME_ENTRY(LP_BOOT),
+ FSR_MODULE_NAME_ENTRY(BRNGUP),
+ FSR_MODULE_NAME_ENTRY(ROM_EXT),
+};
+
+static const char *
+hda_dsp_get_state_text(u32 code, const struct hda_dsp_msg_code *msg_code,
+ size_t array_size)
{
- const struct sof_intel_dsp_desc *chip;
- u32 status;
int i;
- chip = get_chip_info(sdev->pdata);
- status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
- chip->rom_status_reg);
-
- for (i = 0; i < ARRAY_SIZE(hda_dsp_rom_msg); i++) {
- if (status == hda_dsp_rom_msg[i].code) {
- dev_printk(level, sdev->dev, "%s - code %8.8x\n",
- hda_dsp_rom_msg[i].msg, status);
- return;
- }
+ for (i = 0; i < array_size; i++) {
+ if (code == msg_code[i].code)
+ return msg_code[i].text;
}
+ return NULL;
+}
+
+static void hda_dsp_get_state(struct snd_sof_dev *sdev, const char *level)
+{
+ const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
+ const char *state_text, *error_text, *module_text;
+ u32 fsr, state, wait_state, module, error_code;
+
+ fsr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg);
+ state = FSR_TO_STATE_CODE(fsr);
+ wait_state = FSR_TO_WAIT_STATE_CODE(fsr);
+ module = FSR_TO_MODULE_CODE(fsr);
+
+ if (module > FSR_MOD_ROM_EXT)
+ module_text = "unknown";
+ else
+ module_text = fsr_module_names[module];
+
+ if (module == FSR_MOD_BRNGUP)
+ state_text = hda_dsp_get_state_text(state, fsr_bringup_state_names,
+ ARRAY_SIZE(fsr_bringup_state_names));
+ else
+ state_text = hda_dsp_get_state_text(state, fsr_rom_state_names,
+ ARRAY_SIZE(fsr_rom_state_names));
+
/* not for us, must be generic sof message */
- dev_dbg(sdev->dev, "unknown ROM status value %8.8x\n", status);
+ if (!state_text) {
+ dev_printk(level, sdev->dev, "%#010x: unknown ROM status value\n", fsr);
+ return;
+ }
+
+ if (wait_state) {
+ const char *wait_state_text;
+
+ wait_state_text = hda_dsp_get_state_text(wait_state, fsr_wait_state_names,
+ ARRAY_SIZE(fsr_wait_state_names));
+ if (!wait_state_text)
+ wait_state_text = "unknown";
+
+ dev_printk(level, sdev->dev,
+ "%#010x: module: %s, state: %s, waiting for: %s, %s\n",
+ fsr, module_text, state_text, wait_state_text,
+ fsr & FSR_HALTED ? "not running" : "running");
+ } else {
+ dev_printk(level, sdev->dev, "%#010x: module: %s, state: %s, %s\n",
+ fsr, module_text, state_text,
+ fsr & FSR_HALTED ? "not running" : "running");
+ }
+
+ error_code = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + 4);
+ if (!error_code)
+ return;
+
+ error_text = hda_dsp_get_state_text(error_code, hda_dsp_rom_fw_error_texts,
+ ARRAY_SIZE(hda_dsp_rom_fw_error_texts));
+ if (!error_text)
+ error_text = "unknown";
+
+ if (state == FSR_STATE_FW_ENTERED)
+ dev_printk(level, sdev->dev, "status code: %#x (%s)\n", error_code,
+ error_text);
+ else
+ dev_printk(level, sdev->dev, "error code: %#x (%s)\n", error_code,
+ error_text);
}
static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
@@ -467,7 +574,7 @@ static void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, const char *le
chip = get_chip_info(sdev->pdata);
for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) {
value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4);
- len += snprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
+ len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
}
dev_printk(level, sdev->dev, "extended rom status: %s", msg);
@@ -482,7 +589,7 @@ void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
u32 stack[HDA_DSP_STACK_DUMP_SIZE];
/* print ROM/FW status */
- hda_dsp_get_status(sdev, level);
+ hda_dsp_get_state(sdev, level);
if (flags & SOF_DBG_DUMP_REGS) {
u32 status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_STATUS);
@@ -669,13 +776,12 @@ static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
return tplg_filename;
}
-static int dmic_topology_fixup(struct snd_sof_dev *sdev,
- const char **tplg_filename,
- const char *idisp_str,
- int *dmic_found)
+static int dmic_detect_topology_fixup(struct snd_sof_dev *sdev,
+ const char **tplg_filename,
+ const char *idisp_str,
+ int *dmic_found,
+ bool tplg_fixup)
{
- const char *default_tplg_filename = *tplg_filename;
- const char *fixed_tplg_filename;
const char *dmic_str;
int dmic_num;
@@ -701,14 +807,19 @@ static int dmic_topology_fixup(struct snd_sof_dev *sdev,
break;
}
- fixed_tplg_filename = fixup_tplg_name(sdev, default_tplg_filename,
- idisp_str, dmic_str);
- if (!fixed_tplg_filename)
- return -ENOMEM;
+ if (tplg_fixup) {
+ const char *default_tplg_filename = *tplg_filename;
+ const char *fixed_tplg_filename;
+
+ fixed_tplg_filename = fixup_tplg_name(sdev, default_tplg_filename,
+ idisp_str, dmic_str);
+ if (!fixed_tplg_filename)
+ return -ENOMEM;
+ *tplg_filename = fixed_tplg_filename;
+ }
dev_info(sdev->dev, "DMICs detected in NHLT tables: %d\n", dmic_num);
*dmic_found = dmic_num;
- *tplg_filename = fixed_tplg_filename;
return 0;
}
@@ -1114,6 +1225,8 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev,
* - one external HDAudio codec
*/
if (!*mach && codec_num <= 2) {
+ bool tplg_fixup;
+
hda_mach = snd_soc_acpi_intel_hda_machines;
dev_info(bus->dev, "using HDA machine driver %s now\n",
@@ -1125,8 +1238,15 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev,
idisp_str = "";
/* topology: use the info from hda_machines */
- tplg_filename = hda_mach->sof_tplg_filename;
- ret = dmic_topology_fixup(sdev, &tplg_filename, idisp_str, &dmic_num);
+ if (pdata->tplg_filename) {
+ tplg_fixup = false;
+ tplg_filename = pdata->tplg_filename;
+ } else {
+ tplg_fixup = true;
+ tplg_filename = hda_mach->sof_tplg_filename;
+ }
+ ret = dmic_detect_topology_fixup(sdev, &tplg_filename, idisp_str, &dmic_num,
+ tplg_fixup);
if (ret < 0)
return;
@@ -1290,30 +1410,37 @@ static struct snd_soc_acpi_mach *hda_sdw_machine_select(struct snd_sof_dev *sdev
}
if (mach && mach->link_mask) {
int dmic_num = 0;
+ bool tplg_fixup;
+ const char *tplg_filename;
mach->mach_params.links = mach->links;
mach->mach_params.link_mask = mach->link_mask;
mach->mach_params.platform = dev_name(sdev->dev);
- pdata->fw_filename = pdata->desc->default_fw_filename[pdata->ipc_type];
- pdata->tplg_filename = mach->sof_tplg_filename;
+
+ if (pdata->tplg_filename) {
+ tplg_fixup = false;
+ } else {
+ tplg_fixup = true;
+ tplg_filename = mach->sof_tplg_filename;
+ }
/*
* DMICs use up to 4 pins and are typically pin-muxed with SoundWire
- * link 2 and 3, thus we only try to enable dmics if all conditions
- * are true:
- * a) link 2 and 3 are not used by SoundWire
+ * link 2 and 3, or link 1 and 2, thus we only try to enable dmics
+ * if all conditions are true:
+ * a) 2 or fewer links are used by SoundWire
* b) the NHLT table reports the presence of microphones
*/
- if (!(mach->link_mask & GENMASK(3, 2))) {
- const char *tplg_filename = mach->sof_tplg_filename;
+ if (hweight_long(mach->link_mask) <= 2) {
int ret;
- ret = dmic_topology_fixup(sdev, &tplg_filename, "", &dmic_num);
+ ret = dmic_detect_topology_fixup(sdev, &tplg_filename, "",
+ &dmic_num, tplg_fixup);
if (ret < 0)
return NULL;
-
- pdata->tplg_filename = tplg_filename;
}
+ if (tplg_fixup)
+ pdata->tplg_filename = tplg_filename;
mach->mach_params.dmic_num = dmic_num;
dev_dbg(sdev->dev,
@@ -1359,18 +1486,22 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
mach = snd_soc_acpi_find_machine(desc->machines);
if (mach) {
bool add_extension = false;
+ bool tplg_fixup = false;
/*
* If tplg file name is overridden, use it instead of
* the one set in mach table
*/
- if (!sof_pdata->tplg_filename)
+ if (!sof_pdata->tplg_filename) {
sof_pdata->tplg_filename = mach->sof_tplg_filename;
+ tplg_fixup = true;
+ }
/* report to machine driver if any DMICs are found */
mach->mach_params.dmic_num = check_dmic_num(sdev);
- if (mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER &&
+ if (tplg_fixup &&
+ mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER &&
mach->mach_params.dmic_num) {
tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
"%s%s%d%s",
@@ -1393,8 +1524,10 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
/* report SSP link mask to machine driver */
mach->mach_params.i2s_link_mask = check_nhlt_ssp_mask(sdev);
- if (mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER &&
+ if (tplg_fixup &&
+ mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER &&
mach->mach_params.i2s_link_mask) {
+ const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
int ssp_num;
if (hweight_long(mach->mach_params.i2s_link_mask) > 1 &&
@@ -1404,6 +1537,12 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
/* fls returns 1-based results, SSPs indices are 0-based */
ssp_num = fls(mach->mach_params.i2s_link_mask) - 1;
+ if (ssp_num >= chip->ssp_count) {
+ dev_err(sdev->dev, "Invalid SSP %d, max on this platform is %d\n",
+ ssp_num, chip->ssp_count);
+ return NULL;
+ }
+
tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
"%s%s%d",
sof_pdata->tplg_filename,
@@ -1416,7 +1555,7 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
add_extension = true;
}
- if (add_extension) {
+ if (tplg_fixup && add_extension) {
tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
"%s%s",
sof_pdata->tplg_filename,
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index 06476ffe96d7..5ef3e8775e36 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -187,6 +187,69 @@
#define HDA_DSP_STACK_DUMP_SIZE 32
+/* ROM/FW status register */
+#define FSR_STATE_MASK GENMASK(23, 0)
+#define FSR_WAIT_STATE_MASK GENMASK(27, 24)
+#define FSR_MODULE_MASK GENMASK(30, 28)
+#define FSR_HALTED BIT(31)
+#define FSR_TO_STATE_CODE(x) ((x) & FSR_STATE_MASK)
+#define FSR_TO_WAIT_STATE_CODE(x) (((x) & FSR_WAIT_STATE_MASK) >> 24)
+#define FSR_TO_MODULE_CODE(x) (((x) & FSR_MODULE_MASK) >> 28)
+
+/* Wait states */
+#define FSR_WAIT_FOR_IPC_BUSY 0x1
+#define FSR_WAIT_FOR_IPC_DONE 0x2
+#define FSR_WAIT_FOR_CACHE_INVALIDATION 0x3
+#define FSR_WAIT_FOR_LP_SRAM_OFF 0x4
+#define FSR_WAIT_FOR_DMA_BUFFER_FULL 0x5
+#define FSR_WAIT_FOR_CSE_CSR 0x6
+
+/* Module codes */
+#define FSR_MOD_ROM 0x0
+#define FSR_MOD_ROM_BYP 0x1
+#define FSR_MOD_BASE_FW 0x2
+#define FSR_MOD_LP_BOOT 0x3
+#define FSR_MOD_BRNGUP 0x4
+#define FSR_MOD_ROM_EXT 0x5
+
+/* State codes (module dependent) */
+/* Module independent states */
+#define FSR_STATE_INIT 0x0
+#define FSR_STATE_INIT_DONE 0x1
+#define FSR_STATE_FW_ENTERED 0x5
+
+/* ROM states */
+#define FSR_STATE_ROM_INIT FSR_STATE_INIT
+#define FSR_STATE_ROM_INIT_DONE FSR_STATE_INIT_DONE
+#define FSR_STATE_ROM_CSE_MANIFEST_LOADED 0x2
+#define FSR_STATE_ROM_FW_MANIFEST_LOADED 0x3
+#define FSR_STATE_ROM_FW_FW_LOADED 0x4
+#define FSR_STATE_ROM_FW_ENTERED FSR_STATE_FW_ENTERED
+#define FSR_STATE_ROM_VERIFY_FEATURE_MASK 0x6
+#define FSR_STATE_ROM_GET_LOAD_OFFSET 0x7
+#define FSR_STATE_ROM_FETCH_ROM_EXT 0x8
+#define FSR_STATE_ROM_FETCH_ROM_EXT_DONE 0x9
+
+/* (ROM) CSE states */
+#define FSR_STATE_ROM_CSE_IMR_REQUEST 0x10
+#define FSR_STATE_ROM_CSE_IMR_GRANTED 0x11
+#define FSR_STATE_ROM_CSE_VALIDATE_IMAGE_REQUEST 0x12
+#define FSR_STATE_ROM_CSE_IMAGE_VALIDATED 0x13
+
+#define FSR_STATE_ROM_CSE_IPC_IFACE_INIT 0x20
+#define FSR_STATE_ROM_CSE_IPC_RESET_PHASE_1 0x21
+#define FSR_STATE_ROM_CSE_IPC_OPERATIONAL_ENTRY 0x22
+#define FSR_STATE_ROM_CSE_IPC_OPERATIONAL 0x23
+#define FSR_STATE_ROM_CSE_IPC_DOWN 0x24
+
+/* BRINGUP (or BRNGUP) states */
+#define FSR_STATE_BRINGUP_INIT FSR_STATE_INIT
+#define FSR_STATE_BRINGUP_INIT_DONE FSR_STATE_INIT_DONE
+#define FSR_STATE_BRINGUP_HPSRAM_LOAD 0x2
+#define FSR_STATE_BRINGUP_UNPACK_START 0X3
+#define FSR_STATE_BRINGUP_IMR_RESTORE 0x4
+#define FSR_STATE_BRINGUP_FW_ENTERED FSR_STATE_FW_ENTERED
+
/* ROM status/error values */
#define HDA_DSP_ROM_STS_MASK GENMASK(23, 0)
#define HDA_DSP_ROM_INIT 0x1
@@ -419,6 +482,7 @@ enum sof_hda_D0_substate {
/* represents DSP HDA controller frontend - i.e. host facing control */
struct sof_intel_hda_dev {
bool imrboot_supported;
+ bool skip_imr_boot;
int boot_iteration;
@@ -605,6 +669,7 @@ struct hdac_ext_stream *hda_cl_stream_prepare(struct snd_sof_dev *sdev, unsigned
int direction);
int hda_cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
struct hdac_ext_stream *hext_stream);
+int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot);
#define HDA_CL_STREAM_FORMAT 0x40
/* pre and post fw run ops */
@@ -716,6 +781,8 @@ extern struct snd_sof_dsp_ops sof_tgl_ops;
int sof_tgl_ops_init(struct snd_sof_dev *sdev);
extern struct snd_sof_dsp_ops sof_icl_ops;
int sof_icl_ops_init(struct snd_sof_dev *sdev);
+extern struct snd_sof_dsp_ops sof_mtl_ops;
+int sof_mtl_ops_init(struct snd_sof_dev *sdev);
extern const struct sof_intel_dsp_desc apl_chip_info;
extern const struct sof_intel_dsp_desc cnl_chip_info;
@@ -725,6 +792,7 @@ extern const struct sof_intel_dsp_desc tglh_chip_info;
extern const struct sof_intel_dsp_desc ehl_chip_info;
extern const struct sof_intel_dsp_desc jsl_chip_info;
extern const struct sof_intel_dsp_desc adls_chip_info;
+extern const struct sof_intel_dsp_desc mtl_chip_info;
/* Probes support */
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
@@ -767,11 +835,13 @@ int hda_ctrl_dai_widget_free(struct snd_soc_dapm_widget *w, unsigned int quirk_f
extern int sof_hda_position_quirk;
void hda_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops);
+void hda_ops_free(struct snd_sof_dev *sdev);
/* IPC4 */
irqreturn_t cnl_ipc4_irq_thread(int irq, void *context);
int cnl_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context);
int hda_dsp_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg);
+extern struct sdw_intel_ops sdw_callback;
#endif
diff --git a/sound/soc/sof/intel/icl.c b/sound/soc/sof/intel/icl.c
index f19517dffd62..4e37b7fe0627 100644
--- a/sound/soc/sof/intel/icl.c
+++ b/sound/soc/sof/intel/icl.c
@@ -152,6 +152,7 @@ const struct sof_intel_dsp_desc icl_chip_info = {
.sdw_alh_base = SDW_ALH_BASE,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
.hw_ip_version = SOF_INTEL_CAVS_2_0,
};
EXPORT_SYMBOL_NS(icl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/mtl.c b/sound/soc/sof/intel/mtl.c
new file mode 100644
index 000000000000..96239ebb1eed
--- /dev/null
+++ b/sound/soc/sof/intel/mtl.c
@@ -0,0 +1,794 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+// Authors: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+/*
+ * Hardware interface for audio DSP on Meteorlake.
+ */
+
+#include <linux/firmware.h>
+#include <sound/sof/ipc4/header.h>
+#include "../ipc4-priv.h"
+#include "../ops.h"
+#include "hda.h"
+#include "hda-ipc.h"
+#include "../sof-audio.h"
+#include "mtl.h"
+
+static const struct snd_sof_debugfs_map mtl_dsp_debugfs[] = {
+ {"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
+static void mtl_ipc_host_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * clear busy interrupt to tell dsp controller this interrupt has been accepted,
+ * not trigger it again
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDR,
+ MTL_DSP_REG_HFIPCXTDR_BUSY, MTL_DSP_REG_HFIPCXTDR_BUSY);
+ /*
+ * clear busy bit to ack dsp the msg has been processed and send reply msg to dsp
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDA,
+ MTL_DSP_REG_HFIPCXTDA_BUSY, 0);
+}
+
+static void mtl_ipc_dsp_done(struct snd_sof_dev *sdev)
+{
+ /*
+ * set DONE bit - tell DSP we have received the reply msg from DSP, and processed it,
+ * don't send more reply to host
+ */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDA,
+ MTL_DSP_REG_HFIPCXIDA_DONE, MTL_DSP_REG_HFIPCXIDA_DONE);
+
+ /* unmask Done interrupt */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXCTL,
+ MTL_DSP_REG_HFIPCXCTL_DONE, MTL_DSP_REG_HFIPCXCTL_DONE);
+}
+
+/* Check if an IPC IRQ occurred */
+static bool mtl_dsp_check_ipc_irq(struct snd_sof_dev *sdev)
+{
+ u32 irq_status;
+ u32 hfintipptr;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, hfintipptr + MTL_DSP_IRQSTS);
+
+ dev_vdbg(sdev->dev, "irq handler: irq_status:0x%x\n", irq_status);
+
+ if (irq_status != U32_MAX && (irq_status & MTL_DSP_IRQSTS_IPC))
+ return true;
+
+ return false;
+}
+
+/* Check if an SDW IRQ occurred */
+static bool mtl_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ u32 irq_status;
+ u32 hfintipptr;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, hfintipptr + MTL_DSP_IRQSTS);
+
+ if (irq_status != U32_MAX && (irq_status & MTL_DSP_IRQSTS_SDW))
+ return true;
+
+ return false;
+}
+
+static int mtl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct sof_ipc4_msg *msg_data = msg->msg_data;
+
+ /* send the message via mailbox */
+ if (msg_data->data_size)
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
+ msg_data->data_size);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDDY,
+ msg_data->extension);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDR,
+ msg_data->primary | MTL_DSP_REG_HFIPCXIDR_BUSY);
+
+ return 0;
+}
+
+static void mtl_enable_ipc_interrupts(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* enable IPC DONE and BUSY interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ MTL_DSP_REG_HFIPCXCTL_BUSY | MTL_DSP_REG_HFIPCXCTL_DONE,
+ MTL_DSP_REG_HFIPCXCTL_BUSY | MTL_DSP_REG_HFIPCXCTL_DONE);
+}
+
+static void mtl_disable_ipc_interrupts(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+
+ /* disable IPC DONE and BUSY interrupts */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
+ MTL_DSP_REG_HFIPCXCTL_BUSY | MTL_DSP_REG_HFIPCXCTL_DONE, 0);
+}
+
+static int mtl_enable_interrupts(struct snd_sof_dev *sdev)
+{
+ u32 hfintipptr;
+ u32 irqinten;
+ u32 host_ipc;
+ u32 hipcie;
+ int ret;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+
+ /* Enable Host IPC and SOUNDWIRE */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, hfintipptr,
+ MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK,
+ MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK);
+
+ /* check if operation was successful */
+ host_ipc = MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK;
+ irqinten = snd_sof_dsp_read(sdev, HDA_DSP_BAR, hfintipptr);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, hfintipptr, irqinten,
+ (irqinten & host_ipc) == host_ipc,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to enable Host IPC and/or SOUNDWIRE\n");
+ return ret;
+ }
+
+ /* Set Host IPC interrupt enable */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE,
+ MTL_DSP_REG_HfHIPCIE_IE_MASK, MTL_DSP_REG_HfHIPCIE_IE_MASK);
+
+ /* check if operation was successful */
+ host_ipc = MTL_DSP_REG_HfHIPCIE_IE_MASK;
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE, hipcie,
+ (hipcie & host_ipc) == host_ipc,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to set Host IPC interrupt enable\n");
+ return ret;
+ }
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE,
+ MTL_DSP_REG_HfSNDWIE_IE_MASK, MTL_DSP_REG_HfSNDWIE_IE_MASK);
+ host_ipc = MTL_DSP_REG_HfSNDWIE_IE_MASK;
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE, hipcie,
+ (hipcie & host_ipc) == host_ipc,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to set SoundWire IPC interrupt enable\n");
+
+ return ret;
+}
+
+static int mtl_disable_interrupts(struct snd_sof_dev *sdev)
+{
+ u32 hfintipptr;
+ u32 irqinten;
+ u32 host_ipc;
+ u32 hipcie;
+ int ret1;
+ int ret;
+
+ /* read Interrupt IP Pointer */
+ hfintipptr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFINTIPPTR) & MTL_HFINTIPPTR_PTR_MASK;
+
+ /* Disable Host IPC and SOUNDWIRE */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, hfintipptr,
+ MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK, 0);
+
+ /* check if operation was successful */
+ host_ipc = MTL_IRQ_INTEN_L_HOST_IPC_MASK | MTL_IRQ_INTEN_L_SOUNDWIRE_MASK;
+ irqinten = snd_sof_dsp_read(sdev, HDA_DSP_BAR, hfintipptr);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, hfintipptr, irqinten,
+ (irqinten & host_ipc) == 0,
+ HDA_DSP_REG_POLL_INTERVAL_US, HDA_DSP_RESET_TIMEOUT_US);
+ /* Continue to disable other interrupts when error happens */
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to disable Host IPC and SoundWire\n");
+
+ /* Set Host IPC interrupt disable */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE,
+ MTL_DSP_REG_HfHIPCIE_IE_MASK, 0);
+
+ /* check if operation was successful */
+ host_ipc = MTL_DSP_REG_HfHIPCIE_IE_MASK;
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE);
+ ret1 = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfHIPCIE, hipcie,
+ (hipcie & host_ipc) == 0,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "failed to set Host IPC interrupt disable\n");
+ if (!ret)
+ ret = ret1;
+ }
+
+ /* Set SoundWire IPC interrupt disable */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE,
+ MTL_DSP_REG_HfSNDWIE_IE_MASK, 0);
+ host_ipc = MTL_DSP_REG_HfSNDWIE_IE_MASK;
+ hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE);
+ ret1 = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP_REG_HfSNDWIE, hipcie,
+ (hipcie & host_ipc) == 0,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret1 < 0) {
+ dev_err(sdev->dev, "failed to set SoundWire IPC interrupt disable\n");
+ if (!ret)
+ ret = ret1;
+ }
+
+ return ret;
+}
+
+/* pre fw run operations */
+static int mtl_dsp_pre_fw_run(struct snd_sof_dev *sdev)
+{
+ u32 dsphfpwrsts;
+ u32 dsphfdsscs;
+ u32 cpa;
+ u32 pgs;
+ int ret;
+
+ /* Set the DSP subsystem power on */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFDSSCS,
+ MTL_HFDSSCS_SPA_MASK, MTL_HFDSSCS_SPA_MASK);
+
+ /* Wait for unstable CPA read (1 then 0 then 1) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ cpa = MTL_HFDSSCS_CPA_MASK;
+ dsphfdsscs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFDSSCS);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_HFDSSCS, dsphfdsscs,
+ (dsphfdsscs & cpa) == cpa, HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to enable DSP subsystem\n");
+ return ret;
+ }
+
+ /* Power up gated-DSP-0 domain in order to access the DSP shim register block. */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFPWRCTL,
+ MTL_HFPWRCTL_WPDSPHPXPG, MTL_HFPWRCTL_WPDSPHPXPG);
+
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ pgs = MTL_HFPWRSTS_DSPHPXPGS_MASK;
+ dsphfpwrsts = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFPWRSTS);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_HFPWRSTS, dsphfpwrsts,
+ (dsphfpwrsts & pgs) == pgs,
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to power up gated DSP domain\n");
+
+ /* make sure SoundWire is not power-gated */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, MTL_HFPWRCTL,
+ MTL_HfPWRCTL_WPIOXPG(1), MTL_HfPWRCTL_WPIOXPG(1));
+ return ret;
+}
+
+static int mtl_dsp_post_fw_run(struct snd_sof_dev *sdev)
+{
+ int ret;
+
+ if (sdev->first_boot) {
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+
+ ret = hda_sdw_startup(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "could not startup SoundWire links\n");
+ return ret;
+ }
+
+ /* Check if IMR boot is usable */
+ if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT))
+ hdev->imrboot_supported = true;
+ }
+
+ hda_sdw_int_enable(sdev, true);
+ return 0;
+}
+
+static void mtl_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
+{
+ char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
+ u32 romdbgsts;
+ u32 romdbgerr;
+ u32 fwsts;
+ u32 fwlec;
+
+ fwsts = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_ROM_STS);
+ fwlec = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_ROM_ERROR);
+ romdbgsts = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFFLGPXQWY);
+ romdbgerr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFFLGPXQWY_ERROR);
+
+ dev_err(sdev->dev, "ROM status: %#x, ROM error: %#x\n", fwsts, fwlec);
+ dev_err(sdev->dev, "ROM debug status: %#x, ROM debug error: %#x\n", romdbgsts,
+ romdbgerr);
+ romdbgsts = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFFLGPXQWY + 0x8 * 3);
+ dev_printk(level, sdev->dev, "ROM feature bit%s enabled\n",
+ romdbgsts & BIT(24) ? "" : " not");
+}
+
+static bool mtl_dsp_primary_core_is_enabled(struct snd_sof_dev *sdev)
+{
+ int val;
+
+ val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE);
+ if (val != U32_MAX && val & MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK)
+ return true;
+
+ return false;
+}
+
+static int mtl_dsp_core_power_up(struct snd_sof_dev *sdev, int core)
+{
+ unsigned int cpa;
+ u32 dspcxctl;
+ int ret;
+
+ /* Only the primary core can be powered up by the host */
+ if (core != SOF_DSP_PRIMARY_CORE || mtl_dsp_primary_core_is_enabled(sdev))
+ return 0;
+
+ /* Program the owner of the IP & shim registers (10: Host CPU) */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE,
+ MTL_DSP2CXCTL_PRIMARY_CORE_OSEL,
+ 0x2 << MTL_DSP2CXCTL_PRIMARY_CORE_OSEL_SHIFT);
+
+ /* enable SPA bit */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE,
+ MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK,
+ MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK);
+
+ /* Wait for unstable CPA read (1 then 0 then 1) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ cpa = MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE, dspcxctl,
+ (dspcxctl & cpa) == cpa, HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: timeout on MTL_DSP2CXCTL_PRIMARY_CORE read\n",
+ __func__);
+
+ return ret;
+}
+
+static int mtl_dsp_core_power_down(struct snd_sof_dev *sdev, int core)
+{
+ u32 dspcxctl;
+ int ret;
+
+ /* Only the primary core can be powered down by the host */
+ if (core != SOF_DSP_PRIMARY_CORE || !mtl_dsp_primary_core_is_enabled(sdev))
+ return 0;
+
+ /* disable SPA bit */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE,
+ MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK, 0);
+
+ /* Wait for unstable CPA read (1 then 0 then 1) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_DSP2CXCTL_PRIMARY_CORE, dspcxctl,
+ !(dspcxctl & MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to power down primary core\n");
+
+ return ret;
+}
+
+static int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ unsigned int status;
+ u32 ipc_hdr;
+ int ret;
+
+ /* step 1: purge FW request */
+ ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
+ if (!imr_boot)
+ ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
+
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
+
+ /* step 2: power up primary core */
+ ret = mtl_dsp_core_power_up(sdev, SOF_DSP_PRIMARY_CORE);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "dsp core 0/1 power up failed\n");
+ goto err;
+ }
+
+ dev_dbg(sdev->dev, "Primary core power up successful\n");
+
+ /* step 3: wait for IPC DONE bit from ROM */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, chip->ipc_ack, status,
+ ((status & chip->ipc_ack_mask) == chip->ipc_ack_mask),
+ HDA_DSP_REG_POLL_INTERVAL_US, MTL_DSP_PURGE_TIMEOUT_US);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "timeout waiting for purge IPC done\n");
+ goto err;
+ }
+
+ /* set DONE bit to clear the reply IPC message */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, chip->ipc_ack, chip->ipc_ack_mask,
+ chip->ipc_ack_mask);
+
+ /* step 4: enable interrupts */
+ ret = mtl_enable_interrupts(sdev);
+ if (ret < 0) {
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev, "%s: failed to enable interrupts\n", __func__);
+ goto err;
+ }
+
+ mtl_enable_ipc_interrupts(sdev);
+
+ /*
+ * ACE workaround: don't wait for ROM INIT.
+ * The platform cannot catch ROM_INIT_DONE because of a very short
+ * timing window. Follow the recommendations and skip this part.
+ */
+
+ return 0;
+
+err:
+ snd_sof_dsp_dbg_dump(sdev, "MTL DSP init fail", 0);
+ mtl_dsp_core_power_down(sdev, SOF_DSP_PRIMARY_CORE);
+ return ret;
+}
+
+static irqreturn_t mtl_ipc_irq_thread(int irq, void *context)
+{
+ struct sof_ipc4_msg notification_data = {{ 0 }};
+ struct snd_sof_dev *sdev = context;
+ bool ipc_irq = false;
+ u32 hipcida;
+ u32 hipctdr;
+
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDA);
+
+ /* reply message from DSP */
+ if (hipcida & MTL_DSP_REG_HFIPCXIDA_DONE) {
+ /* DSP received the message */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXCTL,
+ MTL_DSP_REG_HFIPCXCTL_DONE, 0);
+
+ mtl_ipc_dsp_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDR);
+ if (hipctdr & MTL_DSP_REG_HFIPCXTDR_BUSY) {
+ /* Message from DSP (reply or notification) */
+ u32 extension = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDDY);
+ u32 primary = hipctdr & MTL_DSP_REG_HFIPCXTDR_MSG_MASK;
+
+ /*
+ * ACE fw sends a new fw ipc message to host to
+ * notify the status of the last host ipc message
+ */
+ if (primary & SOF_IPC4_MSG_DIR_MASK) {
+ /* Reply received */
+ if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
+ struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
+
+ data->primary = primary;
+ data->extension = extension;
+
+ spin_lock_irq(&sdev->ipc_lock);
+
+ snd_sof_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, data->primary);
+
+ spin_unlock_irq(&sdev->ipc_lock);
+ } else {
+ dev_dbg_ratelimited(sdev->dev,
+ "IPC reply before FW_READY: %#x|%#x\n",
+ primary, extension);
+ }
+ } else {
+ /* Notification received */
+ notification_data.primary = primary;
+ notification_data.extension = extension;
+
+ sdev->ipc->msg.rx_data = &notification_data;
+ snd_sof_ipc_msgs_rx(sdev);
+ sdev->ipc->msg.rx_data = NULL;
+ }
+
+ mtl_ipc_host_done(sdev);
+
+ ipc_irq = true;
+ }
+
+ if (!ipc_irq) {
+ /* This interrupt is not shared so no need to return IRQ_NONE. */
+ dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mtl_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MTL_DSP_MBOX_UPLINK_OFFSET;
+}
+
+static int mtl_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MTL_SRAM_WINDOW_OFFSET(id);
+}
+
+static int mtl_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
+{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+#endif
+ u32 dsphfdsscs;
+ u32 cpa;
+ int ret;
+ int i;
+
+ mtl_disable_ipc_interrupts(sdev);
+ ret = mtl_disable_interrupts(sdev);
+ if (ret)
+ return ret;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ hda_codec_jack_wake_enable(sdev, runtime_suspend);
+ /* power down all hda link */
+ snd_hdac_ext_bus_link_power_down_all(bus);
+#endif
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFPWRCTL,
+ MTL_HFPWRCTL_WPDSPHPXPG, 0);
+
+ /* Set the DSP subsystem power down */
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, MTL_HFDSSCS,
+ MTL_HFDSSCS_SPA_MASK, 0);
+
+ /* Wait for unstable CPA read (1 then 0 then 1) just after setting SPA bit */
+ usleep_range(1000, 1010);
+
+ /* poll with timeout to check if operation successful */
+ cpa = MTL_HFDSSCS_CPA_MASK;
+ dsphfdsscs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_HFDSSCS);
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, MTL_HFDSSCS, dsphfdsscs,
+ (dsphfdsscs & cpa) == 0, HDA_DSP_REG_POLL_INTERVAL_US,
+ HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to disable DSP subsystem\n");
+
+ /* reset ref counts for all cores */
+ for (i = 0; i < chip->cores_num; i++)
+ sdev->dsp_core_ref_count[i] = 0;
+
+ /* TODO: need to reset controller? */
+
+ /* display codec can be powered off after link reset */
+ hda_codec_i915_display_power(sdev, false);
+
+ return 0;
+}
+
+static int mtl_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = target_state,
+ .substate = target_state == SOF_DSP_PM_D0 ?
+ SOF_HDA_DSP_PM_D0I3 : 0,
+ };
+ int ret;
+
+ ret = mtl_suspend(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+}
+
+static int mtl_dsp_runtime_suspend(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D3,
+ };
+ int ret;
+
+ ret = mtl_suspend(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+static int mtl_resume(struct snd_sof_dev *sdev, bool runtime_resume)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_link *hlink = NULL;
+#endif
+
+ /* display codec must be powered before link reset */
+ hda_codec_i915_display_power(sdev, true);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ /* check jack status */
+ if (runtime_resume) {
+ hda_codec_jack_wake_enable(sdev, false);
+ if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
+ hda_codec_jack_check(sdev);
+ }
+
+ /* turn off the links that were off before suspend */
+ list_for_each_entry(hlink, &bus->hlink_list, list) {
+ if (!hlink->ref_count)
+ snd_hdac_ext_bus_link_power_down(hlink);
+ }
+
+ /* check dma status and clean up CORB/RIRB buffers */
+ if (!bus->cmd_dma_state)
+ snd_hdac_bus_stop_cmd_io(bus);
+#endif
+
+ return 0;
+}
+
+static int mtl_dsp_resume(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ .substate = SOF_HDA_DSP_PM_D0I0,
+ };
+ int ret;
+
+ ret = mtl_resume(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+static int mtl_dsp_runtime_resume(struct snd_sof_dev *sdev)
+{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+ int ret;
+
+ ret = mtl_resume(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
+}
+
+static void mtl_ipc_dump(struct snd_sof_dev *sdev)
+{
+ u32 hipcctl;
+ u32 hipcida;
+ u32 hipctdr;
+
+ /* read IPC status */
+ hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXIDA);
+ hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXCTL);
+ hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, MTL_DSP_REG_HFIPCXTDR);
+
+ /* dump the IPC regs */
+ /* TODO: parse the raw msg */
+ dev_err(sdev->dev,
+ "error: host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
+ hipcida, hipctdr, hipcctl);
+}
+
+/* Meteorlake ops */
+struct snd_sof_dsp_ops sof_mtl_ops;
+EXPORT_SYMBOL_NS(sof_mtl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+int sof_mtl_ops_init(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc4_fw_data *ipc4_data;
+
+ /* common defaults */
+ memcpy(&sof_mtl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
+
+ /* shutdown */
+ sof_mtl_ops.shutdown = hda_dsp_shutdown;
+
+ /* doorbell */
+ sof_mtl_ops.irq_thread = mtl_ipc_irq_thread;
+
+ /* ipc */
+ sof_mtl_ops.send_msg = mtl_ipc_send_msg;
+ sof_mtl_ops.get_mailbox_offset = mtl_dsp_ipc_get_mailbox_offset;
+ sof_mtl_ops.get_window_offset = mtl_dsp_ipc_get_window_offset;
+
+ /* debug */
+ sof_mtl_ops.debug_map = mtl_dsp_debugfs;
+ sof_mtl_ops.debug_map_count = ARRAY_SIZE(mtl_dsp_debugfs);
+ sof_mtl_ops.dbg_dump = mtl_dsp_dump;
+ sof_mtl_ops.ipc_dump = mtl_ipc_dump;
+
+ /* pre/post fw run */
+ sof_mtl_ops.pre_fw_run = mtl_dsp_pre_fw_run;
+ sof_mtl_ops.post_fw_run = mtl_dsp_post_fw_run;
+
+ /* parse platform specific extended manifest */
+ sof_mtl_ops.parse_platform_ext_manifest = NULL;
+
+ /* dsp core get/put */
+ /* TODO: add core_get and core_put */
+
+ /* PM */
+ sof_mtl_ops.suspend = mtl_dsp_suspend;
+ sof_mtl_ops.resume = mtl_dsp_resume;
+ sof_mtl_ops.runtime_suspend = mtl_dsp_runtime_suspend;
+ sof_mtl_ops.runtime_resume = mtl_dsp_runtime_resume;
+
+ sdev->private = devm_kzalloc(sdev->dev, sizeof(struct sof_ipc4_fw_data), GFP_KERNEL);
+ if (!sdev->private)
+ return -ENOMEM;
+
+ ipc4_data = sdev->private;
+ ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
+
+ /* set DAI ops */
+ hda_set_dai_drv_ops(sdev, &sof_mtl_ops);
+
+ return 0;
+};
+EXPORT_SYMBOL_NS(sof_mtl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
+
+const struct sof_intel_dsp_desc mtl_chip_info = {
+ .cores_num = 3,
+ .init_core_mask = BIT(0),
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = MTL_DSP_REG_HFIPCXIDR,
+ .ipc_req_mask = MTL_DSP_REG_HFIPCXIDR_BUSY,
+ .ipc_ack = MTL_DSP_REG_HFIPCXIDA,
+ .ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
+ .ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
+ .rom_status_reg = MTL_DSP_ROM_STS,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+ .sdw_shim_base = SDW_SHIM_BASE_ACE,
+ .sdw_alh_base = SDW_ALH_BASE_ACE,
+ .check_sdw_irq = mtl_dsp_check_sdw_irq,
+ .check_ipc_irq = mtl_dsp_check_ipc_irq,
+ .cl_init = mtl_dsp_cl_init,
+ .hw_ip_version = SOF_INTEL_ACE_1_0,
+};
+EXPORT_SYMBOL_NS(mtl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/intel/mtl.h b/sound/soc/sof/intel/mtl.h
new file mode 100644
index 000000000000..788bf0e3ea87
--- /dev/null
+++ b/sound/soc/sof/intel/mtl.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2020-2022 Intel Corporation. All rights reserved.
+ */
+
+/* DSP Registers */
+#define MTL_HFDSSCS 0x1000
+#define MTL_HFDSSCS_SPA_MASK BIT(16)
+#define MTL_HFDSSCS_CPA_MASK BIT(24)
+#define MTL_HFSNDWIE 0x114C
+#define MTL_HFPWRCTL 0x1D18
+#define MTL_HfPWRCTL_WPIOXPG(x) BIT((x) + 8)
+#define MTL_HFPWRCTL_WPDSPHPXPG BIT(0)
+#define MTL_HFPWRSTS 0x1D1C
+#define MTL_HFPWRSTS_DSPHPXPGS_MASK BIT(0)
+#define MTL_HFINTIPPTR 0x1108
+#define MTL_IRQ_INTEN_L_HOST_IPC_MASK BIT(0)
+#define MTL_IRQ_INTEN_L_SOUNDWIRE_MASK BIT(6)
+#define MTL_HFINTIPPTR_PTR_MASK GENMASK(20, 0)
+
+#define MTL_DSP2CXCAP_PRIMARY_CORE 0x178D00
+#define MTL_DSP2CXCTL_PRIMARY_CORE 0x178D04
+#define MTL_DSP2CXCTL_PRIMARY_CORE_SPA_MASK BIT(0)
+#define MTL_DSP2CXCTL_PRIMARY_CORE_CPA_MASK BIT(8)
+#define MTL_DSP2CXCTL_PRIMARY_CORE_OSEL GENMASK(25, 24)
+#define MTL_DSP2CXCTL_PRIMARY_CORE_OSEL_SHIFT 24
+
+/* IPC Registers */
+#define MTL_DSP_REG_HFIPCXTDR 0x73200
+#define MTL_DSP_REG_HFIPCXTDR_BUSY BIT(31)
+#define MTL_DSP_REG_HFIPCXTDR_MSG_MASK GENMASK(30, 0)
+#define MTL_DSP_REG_HFIPCXTDA 0x73204
+#define MTL_DSP_REG_HFIPCXTDA_BUSY BIT(31)
+#define MTL_DSP_REG_HFIPCXIDR 0x73210
+#define MTL_DSP_REG_HFIPCXIDR_BUSY BIT(31)
+#define MTL_DSP_REG_HFIPCXIDR_MSG_MASK GENMASK(30, 0)
+#define MTL_DSP_REG_HFIPCXIDA 0x73214
+#define MTL_DSP_REG_HFIPCXIDA_DONE BIT(31)
+#define MTL_DSP_REG_HFIPCXIDA_MSG_MASK GENMASK(30, 0)
+#define MTL_DSP_REG_HFIPCXCTL 0x73228
+#define MTL_DSP_REG_HFIPCXCTL_BUSY BIT(0)
+#define MTL_DSP_REG_HFIPCXCTL_DONE BIT(1)
+#define MTL_DSP_REG_HFIPCXTDDY 0x73300
+#define MTL_DSP_REG_HFIPCXIDDY 0x73380
+#define MTL_DSP_REG_HfHIPCIE 0x1140
+#define MTL_DSP_REG_HfHIPCIE_IE_MASK BIT(0)
+#define MTL_DSP_REG_HfSNDWIE 0x114C
+#define MTL_DSP_REG_HfSNDWIE_IE_MASK GENMASK(3, 0)
+
+#define MTL_DSP_IRQSTS 0x20
+#define MTL_DSP_IRQSTS_IPC BIT(0)
+#define MTL_DSP_IRQSTS_SDW BIT(6)
+
+#define MTL_DSP_PURGE_TIMEOUT_US 20000000 /* 20s */
+#define MTL_DSP_REG_POLL_INTERVAL_US 10 /* 10 us */
+
+/* Memory windows */
+#define MTL_SRAM_WINDOW_OFFSET(x) (0x180000 + 0x8000 * (x))
+
+#define MTL_DSP_MBOX_UPLINK_OFFSET (MTL_SRAM_WINDOW_OFFSET(0) + 0x1000)
+#define MTL_DSP_MBOX_UPLINK_SIZE 0x1000
+#define MTL_DSP_MBOX_DOWNLINK_OFFSET MTL_SRAM_WINDOW_OFFSET(1)
+#define MTL_DSP_MBOX_DOWNLINK_SIZE 0x1000
+
+/* FW registers */
+#define MTL_DSP_ROM_STS MTL_SRAM_WINDOW_OFFSET(0) /* ROM status */
+#define MTL_DSP_ROM_ERROR (MTL_SRAM_WINDOW_OFFSET(0) + 0x4) /* ROM error code */
+
+#define MTL_DSP_REG_HFFLGPXQWY 0x163200 /* ROM debug status */
+#define MTL_DSP_REG_HFFLGPXQWY_ERROR 0x163204 /* ROM debug error code */
+#define MTL_DSP_REG_HfIMRIS1 0x162088
+#define MTL_DSP_REG_HfIMRIS1_IU_MASK BIT(0)
+
diff --git a/sound/soc/sof/intel/pci-apl.c b/sound/soc/sof/intel/pci-apl.c
index 2de3658eb817..998e219011f0 100644
--- a/sound/soc/sof/intel/pci-apl.c
+++ b/sound/soc/sof/intel/pci-apl.c
@@ -44,6 +44,7 @@ static const struct sof_dev_desc bxt_desc = {
.nocodec_tplg_filename = "sof-apl-nocodec.tplg",
.ops = &sof_apl_ops,
.ops_init = sof_apl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc glk_desc = {
diff --git a/sound/soc/sof/intel/pci-cnl.c b/sound/soc/sof/intel/pci-cnl.c
index 87e587aef9c9..c797356f7028 100644
--- a/sound/soc/sof/intel/pci-cnl.c
+++ b/sound/soc/sof/intel/pci-cnl.c
@@ -73,6 +73,7 @@ static const struct sof_dev_desc cfl_desc = {
.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
.ops = &sof_cnl_ops,
.ops_init = sof_cnl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc cml_desc = {
diff --git a/sound/soc/sof/intel/pci-icl.c b/sound/soc/sof/intel/pci-icl.c
index 1c7f16ce531e..48f24f8ace26 100644
--- a/sound/soc/sof/intel/pci-icl.c
+++ b/sound/soc/sof/intel/pci-icl.c
@@ -45,6 +45,7 @@ static const struct sof_dev_desc icl_desc = {
.nocodec_tplg_filename = "sof-icl-nocodec.tplg",
.ops = &sof_icl_ops,
.ops_init = sof_icl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc jsl_desc = {
diff --git a/sound/soc/sof/intel/pci-mtl.c b/sound/soc/sof/intel/pci-mtl.c
new file mode 100644
index 000000000000..899b00d53d64
--- /dev/null
+++ b/sound/soc/sof/intel/pci-mtl.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
+//
+// Author: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+//
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include <sound/sof.h>
+#include "../ops.h"
+#include "../sof-pci-dev.h"
+
+/* platform specific devices */
+#include "hda.h"
+#include "mtl.h"
+
+static const struct sof_dev_desc mtl_desc = {
+ .use_acpi_target_states = true,
+ .machines = snd_soc_acpi_intel_mtl_machines,
+ .alt_machines = snd_soc_acpi_intel_mtl_sdw_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .chip_info = &mtl_chip_info,
+ .ipc_supported_mask = BIT(SOF_INTEL_IPC4),
+ .ipc_default = SOF_INTEL_IPC4,
+ .default_fw_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ipc4/mtl",
+ },
+ .default_tplg_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ace-tplg",
+ },
+ .default_fw_filename = {
+ [SOF_INTEL_IPC4] = "dsp_basefw.bin",
+ },
+ .nocodec_tplg_filename = "sof-mtl-nocodec.tplg",
+ .ops = &sof_mtl_ops,
+ .ops_init = sof_mtl_ops_init,
+};
+
+/* PCI IDs */
+static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE(0x8086, 0x7E28), /* MTL */
+ .driver_data = (unsigned long)&mtl_desc},
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+
+/* pci_driver definition */
+static struct pci_driver snd_sof_pci_intel_mtl_driver = {
+ .name = "sof-audio-pci-intel-mtl",
+ .id_table = sof_pci_ids,
+ .probe = hda_pci_intel_probe,
+ .remove = sof_pci_remove,
+ .shutdown = sof_pci_shutdown,
+ .driver = {
+ .pm = &sof_pci_pm,
+ },
+};
+module_pci_driver(snd_sof_pci_intel_mtl_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
+MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
index 58a9bd92a237..ccc44ba3ad94 100644
--- a/sound/soc/sof/intel/pci-tgl.c
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -73,6 +73,7 @@ static const struct sof_dev_desc tglh_desc = {
.nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
.ops = &sof_tgl_ops,
.ops_init = sof_tgl_ops_init,
+ .ops_free = hda_ops_free,
};
static const struct sof_dev_desc ehl_desc = {
diff --git a/sound/soc/sof/intel/shim.h b/sound/soc/sof/intel/shim.h
index 1fd7b485d821..638159bee864 100644
--- a/sound/soc/sof/intel/shim.h
+++ b/sound/soc/sof/intel/shim.h
@@ -20,6 +20,7 @@ enum sof_intel_hw_ip_version {
SOF_INTEL_CAVS_1_8, /* CannonLake, CometLake, CoffeeLake */
SOF_INTEL_CAVS_2_0, /* IceLake, JasperLake */
SOF_INTEL_CAVS_2_5, /* TigerLake, AlderLake */
+ SOF_INTEL_ACE_1_0, /* MeteorLake */
};
/*
@@ -185,6 +186,7 @@ struct sof_intel_dsp_desc {
enum sof_intel_hw_ip_version hw_ip_version;
bool (*check_sdw_irq)(struct snd_sof_dev *sdev);
bool (*check_ipc_irq)(struct snd_sof_dev *sdev);
+ int (*cl_init)(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot);
};
extern struct snd_sof_dsp_ops sof_tng_ops;
diff --git a/sound/soc/sof/intel/tgl.c b/sound/soc/sof/intel/tgl.c
index 1ddc492f1b13..6dfb4786c782 100644
--- a/sound/soc/sof/intel/tgl.c
+++ b/sound/soc/sof/intel/tgl.c
@@ -24,40 +24,30 @@ static const struct snd_sof_debugfs_map tgl_dsp_debugfs[] = {
static int tgl_dsp_core_get(struct snd_sof_dev *sdev, int core)
{
- struct sof_ipc_pm_core_config pm_core_config = {
- .hdr = {
- .cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CORE_ENABLE,
- .size = sizeof(pm_core_config),
- },
- .enable_mask = sdev->enabled_cores_mask | BIT(core),
- };
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
/* power up primary core if not already powered up and return */
if (core == SOF_DSP_PRIMARY_CORE)
return hda_dsp_enable_core(sdev, BIT(core));
- /* notify DSP for secondary cores */
- return sof_ipc_tx_message(sdev->ipc, &pm_core_config, sizeof(pm_core_config),
- &pm_core_config, sizeof(pm_core_config));
+ if (pm_ops->set_core_state)
+ return pm_ops->set_core_state(sdev, core, true);
+
+ return 0;
}
static int tgl_dsp_core_put(struct snd_sof_dev *sdev, int core)
{
- struct sof_ipc_pm_core_config pm_core_config = {
- .hdr = {
- .cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CORE_ENABLE,
- .size = sizeof(pm_core_config),
- },
- .enable_mask = sdev->enabled_cores_mask & ~BIT(core),
- };
+ const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
/* power down primary core and return */
if (core == SOF_DSP_PRIMARY_CORE)
return hda_dsp_core_reset_power_down(sdev, BIT(core));
- /* notify DSP for secondary cores */
- return sof_ipc_tx_message(sdev->ipc, &pm_core_config, sizeof(pm_core_config),
- &pm_core_config, sizeof(pm_core_config));
+ if (pm_ops->set_core_state)
+ return pm_ops->set_core_state(sdev, core, false);
+
+ return 0;
}
/* Tigerlake ops */
@@ -137,6 +127,7 @@ const struct sof_intel_dsp_desc tgl_chip_info = {
.sdw_alh_base = SDW_ALH_BASE,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
.hw_ip_version = SOF_INTEL_CAVS_2_5,
};
EXPORT_SYMBOL_NS(tgl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
@@ -159,6 +150,7 @@ const struct sof_intel_dsp_desc tglh_chip_info = {
.sdw_alh_base = SDW_ALH_BASE,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
.hw_ip_version = SOF_INTEL_CAVS_2_5,
};
EXPORT_SYMBOL_NS(tglh_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
@@ -181,6 +173,7 @@ const struct sof_intel_dsp_desc ehl_chip_info = {
.sdw_alh_base = SDW_ALH_BASE,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
.hw_ip_version = SOF_INTEL_CAVS_2_5,
};
EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
@@ -203,6 +196,7 @@ const struct sof_intel_dsp_desc adls_chip_info = {
.sdw_alh_base = SDW_ALH_BASE,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_ipc_irq = hda_dsp_check_ipc_irq,
+ .cl_init = cl_dsp_init,
.hw_ip_version = SOF_INTEL_CAVS_2_5,
};
EXPORT_SYMBOL_NS(adls_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index c5aef5fc056b..6ed3f9b6a0c4 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -155,12 +155,22 @@ struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev)
init_waitqueue_head(&msg->waitq);
- /*
- * Use IPC3 ops as it is the only available version now. With the addition of new IPC
- * versions, this will need to be modified to use the selected version at runtime.
- */
- ipc->ops = &ipc3_ops;
- ops = ipc->ops;
+ switch (sdev->pdata->ipc_type) {
+#if defined(CONFIG_SND_SOC_SOF_IPC3)
+ case SOF_IPC:
+ ops = &ipc3_ops;
+ break;
+#endif
+#if defined(CONFIG_SND_SOC_SOF_INTEL_IPC4)
+ case SOF_INTEL_IPC4:
+ ops = &ipc4_ops;
+ break;
+#endif
+ default:
+ dev_err(sdev->dev, "Not supported IPC version: %d\n",
+ sdev->pdata->ipc_type);
+ return NULL;
+ }
/* check for mandatory ops */
if (!ops->tx_msg || !ops->rx_msg || !ops->set_get_data || !ops->get_reply) {
@@ -190,6 +200,8 @@ struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev)
return NULL;
}
+ ipc->ops = ops;
+
return ipc;
}
EXPORT_SYMBOL(snd_sof_ipc_init);
diff --git a/sound/soc/sof/ipc3-dtrace.c b/sound/soc/sof/ipc3-dtrace.c
index b4e1343f9138..b815b0244d9e 100644
--- a/sound/soc/sof/ipc3-dtrace.c
+++ b/sound/soc/sof/ipc3-dtrace.c
@@ -18,6 +18,7 @@
enum sof_dtrace_state {
SOF_DTRACE_DISABLED,
SOF_DTRACE_STOPPED,
+ SOF_DTRACE_INITIALIZING,
SOF_DTRACE_ENABLED,
};
@@ -32,6 +33,15 @@ struct sof_dtrace_priv {
enum sof_dtrace_state dtrace_state;
};
+static bool trace_pos_update_expected(struct sof_dtrace_priv *priv)
+{
+ if (priv->dtrace_state == SOF_DTRACE_ENABLED ||
+ priv->dtrace_state == SOF_DTRACE_INITIALIZING)
+ return true;
+
+ return false;
+}
+
static int trace_filter_append_elem(struct snd_sof_dev *sdev, u32 key, u32 value,
struct sof_ipc_trace_filter_elem *elem_list,
int capacity, int *counter)
@@ -157,9 +167,8 @@ static int ipc3_trace_update_filter(struct snd_sof_dev *sdev, int num_elems,
msg->elem_cnt = num_elems;
memcpy(&msg->elems[0], elems, num_elems * sizeof(*elems));
- ret = pm_runtime_get_sync(sdev->dev);
+ ret = pm_runtime_resume_and_get(sdev->dev);
if (ret < 0 && ret != -EACCES) {
- pm_runtime_put_noidle(sdev->dev);
dev_err(sdev->dev, "enabling device failed: %d\n", ret);
goto error;
}
@@ -242,6 +251,21 @@ static int debugfs_create_trace_filter(struct snd_sof_dev *sdev)
return 0;
}
+static bool sof_dtrace_set_host_offset(struct sof_dtrace_priv *priv, u32 new_offset)
+{
+ u32 host_offset = READ_ONCE(priv->host_offset);
+
+ if (host_offset != new_offset) {
+ /* This is a bit paranoid and unlikely that it is needed */
+ u32 ret = cmpxchg(&priv->host_offset, host_offset, new_offset);
+
+ if (ret == host_offset)
+ return true;
+ }
+
+ return false;
+}
+
static size_t sof_dtrace_avail(struct snd_sof_dev *sdev,
loff_t pos, size_t buffer_size)
{
@@ -274,7 +298,7 @@ static size_t sof_wait_dtrace_avail(struct snd_sof_dev *sdev, loff_t pos,
if (ret)
return ret;
- if (priv->dtrace_state != SOF_DTRACE_ENABLED && priv->dtrace_draining) {
+ if (priv->dtrace_draining && !trace_pos_update_expected(priv)) {
/*
* tracing has ended and all traces have been
* read by client, return EOF
@@ -328,6 +352,10 @@ static ssize_t dfsentry_dtrace_read(struct file *file, char __user *buffer,
return -EIO;
}
+ /* no new trace data */
+ if (!avail)
+ return 0;
+
/* make sure count is <= avail */
if (count > avail)
count = avail;
@@ -358,7 +386,7 @@ static int dfsentry_dtrace_release(struct inode *inode, struct file *file)
/* avoid duplicate traces at next open */
if (priv->dtrace_state != SOF_DTRACE_ENABLED)
- priv->host_offset = 0;
+ sof_dtrace_set_host_offset(priv, 0);
return 0;
}
@@ -434,7 +462,7 @@ static int ipc3_dtrace_enable(struct snd_sof_dev *sdev)
params.buffer.pages = priv->dma_trace_pages;
params.stream_tag = 0;
- priv->host_offset = 0;
+ sof_dtrace_set_host_offset(priv, 0);
priv->dtrace_draining = false;
ret = sof_dtrace_host_init(sdev, &priv->dmatb, &params);
@@ -442,9 +470,10 @@ static int ipc3_dtrace_enable(struct snd_sof_dev *sdev)
dev_err(sdev->dev, "Host dtrace init failed: %d\n", ret);
return ret;
}
- dev_dbg(sdev->dev, "%s: stream_tag: %d\n", __func__, params.stream_tag);
+ dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
/* send IPC to the DSP */
+ priv->dtrace_state = SOF_DTRACE_INITIALIZING;
ret = sof_ipc_tx_message(sdev->ipc, &params, sizeof(params), &ipc_reply, sizeof(ipc_reply));
if (ret < 0) {
dev_err(sdev->dev, "can't set params for DMA for trace %d\n", ret);
@@ -452,17 +481,18 @@ static int ipc3_dtrace_enable(struct snd_sof_dev *sdev)
}
start:
+ priv->dtrace_state = SOF_DTRACE_ENABLED;
+
ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_START);
if (ret < 0) {
dev_err(sdev->dev, "Host dtrace trigger start failed: %d\n", ret);
goto trace_release;
}
- priv->dtrace_state = SOF_DTRACE_ENABLED;
-
return 0;
trace_release:
+ priv->dtrace_state = SOF_DTRACE_DISABLED;
sof_dtrace_host_release(sdev);
return ret;
}
@@ -514,8 +544,7 @@ static int ipc3_dtrace_init(struct snd_sof_dev *sdev)
goto table_err;
priv->dma_trace_pages = ret;
- dev_dbg(sdev->dev, "%s: dma_trace_pages: %d\n", __func__,
- priv->dma_trace_pages);
+ dev_dbg(sdev->dev, "dma_trace_pages: %d\n", priv->dma_trace_pages);
if (sdev->first_boot) {
ret = debugfs_create_dtrace(sdev);
@@ -546,11 +575,9 @@ int ipc3_dtrace_posn_update(struct snd_sof_dev *sdev,
if (!sdev->fw_trace_is_supported)
return 0;
- if (priv->dtrace_state == SOF_DTRACE_ENABLED &&
- priv->host_offset != posn->host_offset) {
- priv->host_offset = posn->host_offset;
+ if (trace_pos_update_expected(priv) &&
+ sof_dtrace_set_host_offset(priv, posn->host_offset))
wake_up(&priv->trace_sleep);
- }
if (posn->overflow != 0)
dev_err(sdev->dev,
diff --git a/sound/soc/sof/ipc3-loader.c b/sound/soc/sof/ipc3-loader.c
index f3c741b49519..bf423ca4e97b 100644
--- a/sound/soc/sof/ipc3-loader.c
+++ b/sound/soc/sof/ipc3-loader.c
@@ -75,13 +75,12 @@ static int ipc3_fw_ext_man_get_config_data(struct snd_sof_dev *sdev,
elems_size = config->hdr.size - sizeof(struct sof_ext_man_elem_header);
elems_counter = elems_size / sizeof(struct sof_config_elem);
- dev_dbg(sdev->dev, "%s can hold up to %d config elements\n",
- __func__, elems_counter);
+ dev_dbg(sdev->dev, "manifest can hold up to %d config elements\n", elems_counter);
for (i = 0; i < elems_counter; ++i) {
elem = &config->elems[i];
- dev_dbg(sdev->dev, "%s get index %d token %d val %d\n",
- __func__, i, elem->token, elem->value);
+ dev_dbg(sdev->dev, "get index %d token %d val %d\n",
+ i, elem->token, elem->value);
switch (elem->token) {
case SOF_EXT_MAN_CONFIG_EMPTY:
/* unused memory space is zero filled - mapped to EMPTY elements */
@@ -110,7 +109,7 @@ static int ipc3_fw_ext_man_get_config_data(struct snd_sof_dev *sdev,
return 0;
}
-static ssize_t ipc3_fw_ext_man_size(const struct firmware *fw)
+static ssize_t ipc3_fw_ext_man_size(struct snd_sof_dev *sdev, const struct firmware *fw)
{
const struct sof_ext_man_header *head;
@@ -132,6 +131,8 @@ static ssize_t ipc3_fw_ext_man_size(const struct firmware *fw)
return head->full_size;
/* otherwise given fw don't have an extended manifest */
+ dev_dbg(sdev->dev, "Unexpected extended manifest magic number: %#x\n",
+ head->magic);
return 0;
}
@@ -148,7 +149,7 @@ static size_t sof_ipc3_fw_parse_ext_man(struct snd_sof_dev *sdev)
head = (struct sof_ext_man_header *)fw->data;
remaining = head->full_size - head->header_size;
- ext_man_size = ipc3_fw_ext_man_size(fw);
+ ext_man_size = ipc3_fw_ext_man_size(sdev, fw);
/* Assert firmware starts with extended manifest */
if (ext_man_size <= 0)
@@ -323,10 +324,10 @@ static int sof_ipc3_load_fw_to_dsp(struct snd_sof_dev *sdev)
header = (struct snd_sof_fw_header *)(fw->data + plat_data->fw_offset);
load_module = sof_ops(sdev)->load_module;
if (!load_module) {
- dev_dbg(sdev->dev, "%s: Using generic module loading\n", __func__);
+ dev_dbg(sdev->dev, "Using generic module loading\n");
load_module = sof_ipc3_parse_module_memcpy;
} else {
- dev_dbg(sdev->dev, "%s: Using custom module loading\n", __func__);
+ dev_dbg(sdev->dev, "Using custom module loading\n");
}
/* parse each module */
diff --git a/sound/soc/sof/ipc3-pcm.c b/sound/soc/sof/ipc3-pcm.c
index c8774a891d6f..9c6a84bdeca7 100644
--- a/sound/soc/sof/ipc3-pcm.c
+++ b/sound/soc/sof/ipc3-pcm.c
@@ -115,6 +115,9 @@ static int sof_ipc3_pcm_hw_params(struct snd_soc_component *component,
pcm.params.no_stream_position = 1;
}
+ if (platform_params->cont_update_posn)
+ pcm.params.cont_update_posn = 1;
+
dev_dbg(component->dev, "stream_tag %d", pcm.params.stream_tag);
/* send hw_params IPC to the DSP */
@@ -344,10 +347,10 @@ static int sof_ipc3_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
channels->min, channels->max);
break;
case SOF_DAI_AMD_DMIC:
- rate->min = private->dai_config->acpdmic.fsync_rate;
- rate->max = private->dai_config->acpdmic.fsync_rate;
- channels->min = private->dai_config->acpdmic.tdm_slots;
- channels->max = private->dai_config->acpdmic.tdm_slots;
+ rate->min = private->dai_config->acpdmic.pdm_rate;
+ rate->max = private->dai_config->acpdmic.pdm_rate;
+ channels->min = private->dai_config->acpdmic.pdm_ch;
+ channels->max = private->dai_config->acpdmic.pdm_ch;
dev_dbg(component->dev,
"AMD_DMIC rate_min: %d rate_max: %d\n", rate->min, rate->max);
diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c
index 10740c55294d..65923e7a5976 100644
--- a/sound/soc/sof/ipc3-topology.c
+++ b/sound/soc/sof/ipc3-topology.c
@@ -17,6 +17,9 @@
/* Full volume for default values */
#define VOL_ZERO_DB BIT(VOLUME_FWL)
+/* size of tplg ABI in bytes */
+#define SOF_IPC3_TPLG_ABI_SIZE 3
+
struct sof_widget_data {
int ctrl_type;
int ipc_cmd;
@@ -263,6 +266,16 @@ static const struct sof_topology_token afe_tokens[] = {
offsetof(struct sof_ipc_dai_mtk_afe_params, format)},
};
+/* ACPDMIC */
+static const struct sof_topology_token acpdmic_tokens[] = {
+ {SOF_TKN_AMD_ACPDMIC_RATE,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_acpdmic_params, pdm_rate)},
+ {SOF_TKN_AMD_ACPDMIC_CH,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_acpdmic_params, pdm_ch)},
+};
+
/* Core tokens */
static const struct sof_topology_token core_tokens[] = {
{SOF_TKN_COMP_CORE_ID, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
@@ -297,6 +310,7 @@ static const struct sof_token_info ipc3_token_list[SOF_TOKEN_COUNT] = {
[SOF_ESAI_TOKENS] = {"ESAI tokens", esai_tokens, ARRAY_SIZE(esai_tokens)},
[SOF_SAI_TOKENS] = {"SAI tokens", sai_tokens, ARRAY_SIZE(sai_tokens)},
[SOF_AFE_TOKENS] = {"AFE tokens", afe_tokens, ARRAY_SIZE(afe_tokens)},
+ [SOF_ACPDMIC_TOKENS] = {"ACPDMIC tokens", acpdmic_tokens, ARRAY_SIZE(acpdmic_tokens)},
};
/**
@@ -1117,20 +1131,22 @@ static int sof_link_acp_dmic_load(struct snd_soc_component *scomp, struct snd_so
struct snd_soc_tplg_hw_config *hw_config = slink->hw_configs;
struct sof_dai_private_data *private = dai->private;
u32 size = sizeof(*config);
+ int ret;
/* handle master/slave and inverted clocks */
sof_dai_set_format(hw_config, config);
- /* init IPC */
- memset(&config->acpdmic, 0, sizeof(config->acpdmic));
config->hdr.size = size;
- config->acpdmic.fsync_rate = le32_to_cpu(hw_config->fsync_rate);
- config->acpdmic.tdm_slots = le32_to_cpu(hw_config->tdm_slots);
+ /* parse the required set of ACPDMIC tokens based on num_hw_cfgs */
+ ret = sof_update_ipc_object(scomp, &config->acpdmic, SOF_ACPDMIC_TOKENS, slink->tuples,
+ slink->num_tuples, size, slink->num_hw_configs);
+ if (ret < 0)
+ return ret;
dev_info(scomp->dev, "ACP_DMIC config ACP%d channel %d rate %d\n",
- config->dai_index, config->acpdmic.tdm_slots,
- config->acpdmic.fsync_rate);
+ config->dai_index, config->acpdmic.pdm_ch,
+ config->acpdmic.pdm_rate);
dai->number_configs = 1;
dai->current_config = 0;
@@ -1436,8 +1452,8 @@ static int sof_ipc3_widget_setup_comp_dai(struct snd_sof_widget *swidget)
if (ret < 0)
goto free;
- dev_dbg(scomp->dev, "%s dai %s: type %d index %d\n",
- __func__, swidget->widget->name, comp_dai->type, comp_dai->dai_index);
+ dev_dbg(scomp->dev, "dai %s: type %d index %d\n",
+ swidget->widget->name, comp_dai->type, comp_dai->dai_index);
sof_dbg_comp_config(scomp, &comp_dai->config);
/* now update DAI config */
@@ -1628,6 +1644,7 @@ static int sof_ipc3_control_load_bytes(struct snd_sof_dev *sdev, struct snd_sof_
return 0;
err:
kfree(scontrol->ipc_control_data);
+ scontrol->ipc_control_data = NULL;
return ret;
}
@@ -2302,6 +2319,45 @@ static int sof_ipc3_dai_get_clk(struct snd_sof_dev *sdev, struct snd_sof_dai *da
return -EINVAL;
}
+static int sof_ipc3_parse_manifest(struct snd_soc_component *scomp, int index,
+ struct snd_soc_tplg_manifest *man)
+{
+ u32 size = le32_to_cpu(man->priv.size);
+ u32 abi_version;
+
+ /* backward compatible with tplg without ABI info */
+ if (!size) {
+ dev_dbg(scomp->dev, "No topology ABI info\n");
+ return 0;
+ }
+
+ if (size != SOF_IPC3_TPLG_ABI_SIZE) {
+ dev_err(scomp->dev, "%s: Invalid topology ABI size: %u\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ dev_info(scomp->dev,
+ "Topology: ABI %d:%d:%d Kernel ABI %d:%d:%d\n",
+ man->priv.data[0], man->priv.data[1], man->priv.data[2],
+ SOF_ABI_MAJOR, SOF_ABI_MINOR, SOF_ABI_PATCH);
+
+ abi_version = SOF_ABI_VER(man->priv.data[0], man->priv.data[1], man->priv.data[2]);
+
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, abi_version)) {
+ dev_err(scomp->dev, "%s: Incompatible topology ABI version\n", __func__);
+ return -EINVAL;
+ }
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS) &&
+ SOF_ABI_VERSION_MINOR(abi_version) > SOF_ABI_MINOR) {
+ dev_err(scomp->dev, "%s: Topology ABI is more recent than kernel\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* token list for each topology object */
static enum sof_tokens host_token_list[] = {
SOF_CORE_TOKENS,
@@ -2412,4 +2468,5 @@ const struct sof_ipc_tplg_ops ipc3_tplg_ops = {
.dai_get_clk = sof_ipc3_dai_get_clk,
.set_up_all_pipelines = sof_ipc3_set_up_all_pipelines,
.tear_down_all_pipelines = sof_ipc3_tear_down_all_pipelines,
+ .parse_manifest = sof_ipc3_parse_manifest,
};
diff --git a/sound/soc/sof/ipc3.c b/sound/soc/sof/ipc3.c
index dff5feaad370..82fa320253be 100644
--- a/sound/soc/sof/ipc3.c
+++ b/sound/soc/sof/ipc3.c
@@ -147,6 +147,8 @@ static void ipc3_log_header(struct device *dev, u8 *text, u32 cmd)
case SOF_IPC_TRACE_DMA_PARAMS:
str2 = "DMA_PARAMS"; break;
case SOF_IPC_TRACE_DMA_POSITION:
+ if (!sof_debug_check_flag(SOF_DBG_PRINT_DMA_POSITION_UPDATE_LOGS))
+ return;
str2 = "DMA_POSITION"; break;
case SOF_IPC_TRACE_DMA_PARAMS_EXT:
str2 = "DMA_PARAMS_EXT"; break;
@@ -290,7 +292,7 @@ static int ipc3_wait_tx_done(struct snd_sof_ipc *ipc, void *reply_data)
dev_err(sdev->dev,
"ipc tx timed out for %#x (msg/reply size: %d/%zu)\n",
hdr->cmd, hdr->size, msg->reply_size);
- snd_sof_handle_fw_exception(ipc->sdev);
+ snd_sof_handle_fw_exception(ipc->sdev, "IPC timeout");
ret = -ETIMEDOUT;
} else {
ret = msg->reply_error;
@@ -299,7 +301,8 @@ static int ipc3_wait_tx_done(struct snd_sof_ipc *ipc, void *reply_data)
"ipc tx error for %#x (msg/reply size: %d/%zu): %d\n",
hdr->cmd, hdr->size, msg->reply_size, ret);
} else {
- ipc3_log_header(sdev->dev, "ipc tx succeeded", hdr->cmd);
+ if (sof_debug_check_flag(SOF_DBG_PRINT_IPC_SUCCESS_LOGS))
+ ipc3_log_header(sdev->dev, "ipc tx succeeded", hdr->cmd);
if (msg->reply_size)
/* copy the data returned from DSP */
memcpy(reply_data, msg->reply_data,
@@ -755,13 +758,10 @@ int sof_ipc3_validate_fw_version(struct snd_sof_dev *sdev)
return -EINVAL;
}
- if (SOF_ABI_VERSION_MINOR(v->abi_version) > SOF_ABI_MINOR) {
- if (!IS_ENABLED(CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS)) {
- dev_warn(sdev->dev, "FW ABI is more recent than kernel\n");
- } else {
- dev_err(sdev->dev, "FW ABI is more recent than kernel\n");
- return -EINVAL;
- }
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS) &&
+ SOF_ABI_VERSION_MINOR(v->abi_version) > SOF_ABI_MINOR) {
+ dev_err(sdev->dev, "FW ABI is more recent than kernel\n");
+ return -EINVAL;
}
if (ready->flags & SOF_IPC_INFO_BUILD) {
@@ -1037,6 +1037,23 @@ static void sof_ipc3_rx_msg(struct snd_sof_dev *sdev)
ipc3_log_header(sdev->dev, "ipc rx done", hdr.cmd);
}
+static int sof_ipc3_set_core_state(struct snd_sof_dev *sdev, int core_idx, bool on)
+{
+ struct sof_ipc_pm_core_config core_cfg = {
+ .hdr.size = sizeof(core_cfg),
+ .hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CORE_ENABLE,
+ };
+ struct sof_ipc_reply reply;
+
+ if (on)
+ core_cfg.enable_mask = sdev->enabled_cores_mask | BIT(core_idx);
+ else
+ core_cfg.enable_mask = sdev->enabled_cores_mask & ~BIT(core_idx);
+
+ return sof_ipc3_tx_msg(sdev, &core_cfg, sizeof(core_cfg),
+ &reply, sizeof(reply), false);
+}
+
static int sof_ipc3_ctx_ipc(struct snd_sof_dev *sdev, int cmd)
{
struct sof_ipc_pm_ctx pm_ctx = {
@@ -1063,6 +1080,7 @@ static int sof_ipc3_ctx_restore(struct snd_sof_dev *sdev)
static const struct sof_ipc_pm_ops ipc3_pm_ops = {
.ctx_save = sof_ipc3_ctx_save,
.ctx_restore = sof_ipc3_ctx_restore,
+ .set_core_state = sof_ipc3_set_core_state,
};
const struct sof_ipc_ops ipc3_ops = {
diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c
new file mode 100644
index 000000000000..0d5a578c3496
--- /dev/null
+++ b/sound/soc/sof/ipc4-control.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+//
+
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ipc4-priv.h"
+#include "ipc4-topology.h"
+
+static int sof_ipc4_set_get_kcontrol_data(struct snd_sof_control *scontrol, bool set)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_ops *iops = sdev->ipc->ops;
+ struct sof_ipc4_msg *msg = &cdata->msg;
+ struct snd_sof_widget *swidget;
+ bool widget_found = false;
+
+ /* find widget associated with the control */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->comp_id == scontrol->comp_id) {
+ widget_found = true;
+ break;
+ }
+ }
+
+ if (!widget_found) {
+ dev_err(scomp->dev, "Failed to find widget for kcontrol %s\n", scontrol->name);
+ return -ENOENT;
+ }
+
+ /*
+ * Volatile controls should always be part of static pipelines and the widget use_count
+ * would always be > 0 in this case. For the others, just return the cached value if the
+ * widget is not set up.
+ */
+ if (!swidget->use_count)
+ return 0;
+
+ msg->primary &= ~SOF_IPC4_MOD_INSTANCE_MASK;
+ msg->primary |= SOF_IPC4_MOD_INSTANCE(swidget->instance_id);
+
+ return iops->set_get_data(sdev, msg, msg->data_size, set);
+}
+
+static int
+sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget,
+ struct snd_sof_control *scontrol)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct sof_ipc4_gain *gain = swidget->private;
+ struct sof_ipc4_msg *msg = &cdata->msg;
+ struct sof_ipc4_gain_data data;
+ bool all_channels_equal = true;
+ u32 value;
+ int ret, i;
+
+ /* check if all channel values are equal */
+ value = cdata->chanv[0].value;
+ for (i = 1; i < scontrol->num_channels; i++) {
+ if (cdata->chanv[i].value != value) {
+ all_channels_equal = false;
+ break;
+ }
+ }
+
+ /*
+ * notify DSP with a single IPC message if all channel values are equal. Otherwise send
+ * a separate IPC for each channel.
+ */
+ for (i = 0; i < scontrol->num_channels; i++) {
+ if (all_channels_equal) {
+ data.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
+ data.init_val = cdata->chanv[0].value;
+ } else {
+ data.channels = cdata->chanv[i].channel;
+ data.init_val = cdata->chanv[i].value;
+ }
+
+ /* set curve type and duration from topology */
+ data.curve_duration = gain->data.curve_duration;
+ data.curve_type = gain->data.curve_type;
+
+ msg->data_ptr = &data;
+ msg->data_size = sizeof(data);
+
+ ret = sof_ipc4_set_get_kcontrol_data(scontrol, true);
+ msg->data_ptr = NULL;
+ msg->data_size = 0;
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to set volume update for %s\n",
+ scontrol->name);
+ return ret;
+ }
+
+ if (all_channels_equal)
+ break;
+ }
+
+ return 0;
+}
+
+static bool sof_ipc4_volume_put(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct snd_soc_component *scomp = scontrol->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ unsigned int channels = scontrol->num_channels;
+ struct snd_sof_widget *swidget;
+ bool widget_found = false;
+ bool change = false;
+ unsigned int i;
+ int ret;
+
+ /* update each channel */
+ for (i = 0; i < channels; i++) {
+ u32 value = mixer_to_ipc(ucontrol->value.integer.value[i],
+ scontrol->volume_table, scontrol->max + 1);
+
+ change = change || (value != cdata->chanv[i].value);
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = value;
+ }
+
+ if (!pm_runtime_active(scomp->dev))
+ return change;
+
+ /* find widget associated with the control */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->comp_id == scontrol->comp_id) {
+ widget_found = true;
+ break;
+ }
+ }
+
+ if (!widget_found) {
+ dev_err(scomp->dev, "Failed to find widget for kcontrol %s\n", scontrol->name);
+ return false;
+ }
+
+ ret = sof_ipc4_set_volume_data(sdev, swidget, scontrol);
+ if (ret < 0)
+ return false;
+
+ return change;
+}
+
+static int sof_ipc4_volume_get(struct snd_sof_control *scontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ unsigned int channels = scontrol->num_channels;
+ unsigned int i;
+
+ for (i = 0; i < channels; i++)
+ ucontrol->value.integer.value[i] = ipc_to_mixer(cdata->chanv[i].value,
+ scontrol->volume_table,
+ scontrol->max + 1);
+
+ return 0;
+}
+
+/* set up all controls for the widget */
+static int sof_ipc4_widget_kcontrol_setup(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ struct snd_sof_control *scontrol;
+ int ret;
+
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list)
+ if (scontrol->comp_id == swidget->comp_id) {
+ ret = sof_ipc4_set_volume_data(sdev, swidget, scontrol);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: kcontrol %d set up failed for widget %s\n",
+ __func__, scontrol->comp_id, swidget->widget->name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+sof_ipc4_set_up_volume_table(struct snd_sof_control *scontrol, int tlv[SOF_TLV_ITEMS], int size)
+{
+ int i;
+
+ /* init the volume table */
+ scontrol->volume_table = kcalloc(size, sizeof(u32), GFP_KERNEL);
+ if (!scontrol->volume_table)
+ return -ENOMEM;
+
+ /* populate the volume table */
+ for (i = 0; i < size ; i++) {
+ u32 val = vol_compute_gain(i, tlv);
+ u64 q31val = ((u64)val) << 15; /* Can be over Q1.31, need to saturate */
+
+ scontrol->volume_table[i] = q31val > SOF_IPC4_VOL_ZERO_DB ?
+ SOF_IPC4_VOL_ZERO_DB : q31val;
+ }
+
+ return 0;
+}
+
+const struct sof_ipc_tplg_control_ops tplg_ipc4_control_ops = {
+ .volume_put = sof_ipc4_volume_put,
+ .volume_get = sof_ipc4_volume_get,
+ .widget_kcontrol_setup = sof_ipc4_widget_kcontrol_setup,
+ .set_up_volume_table = sof_ipc4_set_up_volume_table,
+};
diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
new file mode 100644
index 000000000000..732872395980
--- /dev/null
+++ b/sound/soc/sof/ipc4-pcm.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+
+#include <sound/pcm_params.h>
+#include <sound/sof/ipc4/header.h>
+#include "sof-audio.h"
+#include "sof-priv.h"
+#include "ipc4-priv.h"
+#include "ipc4-topology.h"
+
+int sof_ipc4_set_pipeline_state(struct snd_sof_dev *sdev, u32 id, u32 state)
+{
+ struct sof_ipc4_msg msg = {{ 0 }};
+ u32 primary;
+
+ dev_dbg(sdev->dev, "ipc4 set pipeline %d state %d", id, state);
+
+ primary = state;
+ primary |= SOF_IPC4_GLB_PIPE_STATE_ID(id);
+ primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_SET_PIPELINE_STATE);
+ primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
+
+ msg.primary = primary;
+
+ return sof_ipc_tx_message(sdev->ipc, &msg, 0, NULL, 0);
+}
+EXPORT_SYMBOL(sof_ipc4_set_pipeline_state);
+
+static int sof_ipc4_trigger_pipelines(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int state)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_sof_widget *pipeline_widget;
+ struct snd_soc_dapm_widget_list *list;
+ struct snd_soc_dapm_widget *widget;
+ struct sof_ipc4_pipeline *pipeline;
+ struct snd_sof_widget *swidget;
+ struct snd_sof_pcm *spcm;
+ int ret = 0;
+ int num_widgets;
+
+ spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
+ list = spcm->stream[substream->stream].list;
+
+ for_each_dapm_widgets(list, num_widgets, widget) {
+ swidget = widget->dobj.private;
+
+ if (!swidget)
+ continue;
+
+ /*
+ * set pipeline state for both FE and BE pipelines for RUNNING state.
+ * For PAUSE/RESET, set the pipeline state only for the FE pipeline.
+ */
+ switch (state) {
+ case SOF_IPC4_PIPE_PAUSED:
+ case SOF_IPC4_PIPE_RESET:
+ if (!WIDGET_IS_AIF(swidget->id))
+ continue;
+ break;
+ default:
+ break;
+ }
+
+ /* find pipeline widget for the pipeline that this widget belongs to */
+ pipeline_widget = swidget->pipe_widget;
+ pipeline = (struct sof_ipc4_pipeline *)pipeline_widget->private;
+
+ if (pipeline->state == state)
+ continue;
+
+ /* first set the pipeline to PAUSED state */
+ if (pipeline->state != SOF_IPC4_PIPE_PAUSED) {
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to pause pipeline %d\n",
+ swidget->pipeline_id);
+ return ret;
+ }
+ }
+
+ pipeline->state = SOF_IPC4_PIPE_PAUSED;
+
+ if (pipeline->state == state)
+ continue;
+
+ /* then set the final state */
+ ret = sof_ipc4_set_pipeline_state(sdev, swidget->pipeline_id, state);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to set state %d for pipeline %d\n",
+ state, swidget->pipeline_id);
+ break;
+ }
+
+ pipeline->state = state;
+ }
+
+ return ret;
+}
+
+static int sof_ipc4_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ int state;
+
+ /* determine the pipeline state */
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ state = SOF_IPC4_PIPE_PAUSED;
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_START:
+ state = SOF_IPC4_PIPE_RUNNING;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ state = SOF_IPC4_PIPE_PAUSED;
+ break;
+ default:
+ dev_err(component->dev, "%s: unhandled trigger cmd %d\n", __func__, cmd);
+ return -EINVAL;
+ }
+
+ /* set the pipeline state */
+ return sof_ipc4_trigger_pipelines(component, substream, state);
+}
+
+static int sof_ipc4_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return sof_ipc4_trigger_pipelines(component, substream, SOF_IPC4_PIPE_RESET);
+}
+
+static void ipc4_ssp_dai_config_pcm_params_match(struct snd_sof_dev *sdev, const char *link_name,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_sof_dai_link *slink;
+ struct snd_sof_dai *dai;
+ bool dai_link_found = false;
+ int i;
+
+ list_for_each_entry(slink, &sdev->dai_link_list, list) {
+ if (!strcmp(slink->link->name, link_name)) {
+ dai_link_found = true;
+ break;
+ }
+ }
+
+ if (!dai_link_found)
+ return;
+
+ for (i = 0; i < slink->num_hw_configs; i++) {
+ struct snd_soc_tplg_hw_config *hw_config = &slink->hw_configs[i];
+
+ if (params_rate(params) == le32_to_cpu(hw_config->fsync_rate)) {
+ /* set current config for all DAI's with matching name */
+ list_for_each_entry(dai, &sdev->dai_list, list)
+ if (!strcmp(slink->link->name, dai->name))
+ dai->current_config = le32_to_cpu(hw_config->id);
+ break;
+ }
+ }
+}
+
+static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
+ struct snd_sof_dai *dai = snd_sof_find_dai(component, rtd->dai_link->name);
+ struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct sof_ipc4_copier *ipc4_copier;
+ struct snd_soc_dpcm *dpcm;
+
+ if (!dai) {
+ dev_err(component->dev, "%s: No DAI found with name %s\n", __func__,
+ rtd->dai_link->name);
+ return -EINVAL;
+ }
+
+ ipc4_copier = dai->private;
+ if (!ipc4_copier) {
+ dev_err(component->dev, "%s: No private data found for DAI %s\n",
+ __func__, rtd->dai_link->name);
+ return -EINVAL;
+ }
+
+ /* always set BE format to 32-bits for both playback and capture */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S32_LE);
+
+ rate->min = ipc4_copier->available_fmt.base_config->audio_fmt.sampling_frequency;
+ rate->max = rate->min;
+
+ /*
+ * Set trigger order for capture to SND_SOC_DPCM_TRIGGER_PRE. This is required
+ * to ensure that the BE DAI pipeline gets stopped/suspended before the FE DAI
+ * pipeline gets triggered and the pipeline widgets are freed.
+ */
+ for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) {
+ struct snd_soc_pcm_runtime *fe = dpcm->fe;
+
+ fe->dai_link->trigger[SNDRV_PCM_STREAM_CAPTURE] = SND_SOC_DPCM_TRIGGER_PRE;
+ }
+
+ switch (ipc4_copier->dai_type) {
+ case SOF_DAI_INTEL_SSP:
+ ipc4_ssp_dai_config_pcm_params_match(sdev, (char *)rtd->dai_link->name, params);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+const struct sof_ipc_pcm_ops ipc4_pcm_ops = {
+ .trigger = sof_ipc4_pcm_trigger,
+ .hw_free = sof_ipc4_pcm_hw_free,
+ .dai_link_fixup = sof_ipc4_pcm_dai_link_fixup,
+};
diff --git a/sound/soc/sof/ipc4-priv.h b/sound/soc/sof/ipc4-priv.h
index 2b71d5675933..9492fe1796c2 100644
--- a/sound/soc/sof/ipc4-priv.h
+++ b/sound/soc/sof/ipc4-priv.h
@@ -18,11 +18,13 @@
* @manifest_fw_hdr_offset: FW header offset in the manifest
* @num_fw_modules : Number of modules in base FW
* @fw_modules: Array of base FW modules
+ * @nhlt: NHLT table either from the BIOS or the topology manifest
*/
struct sof_ipc4_fw_data {
u32 manifest_fw_hdr_offset;
int num_fw_modules;
void *fw_modules;
+ void *nhlt;
};
/**
@@ -40,5 +42,10 @@ struct sof_ipc4_fw_module {
};
extern const struct sof_ipc_fw_loader_ops ipc4_loader_ops;
+extern const struct sof_ipc_tplg_ops ipc4_tplg_ops;
+extern const struct sof_ipc_tplg_control_ops tplg_ipc4_control_ops;
+extern const struct sof_ipc_pcm_ops ipc4_pcm_ops;
+
+int sof_ipc4_set_pipeline_state(struct snd_sof_dev *sdev, u32 id, u32 state);
#endif
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
new file mode 100644
index 000000000000..af072b484a60
--- /dev/null
+++ b/sound/soc/sof/ipc4-topology.c
@@ -0,0 +1,1921 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2022 Intel Corporation. All rights reserved.
+//
+//
+#include <uapi/sound/sof/tokens.h>
+#include <sound/pcm_params.h>
+#include <sound/sof/ext_manifest4.h>
+#include <sound/intel-nhlt.h>
+#include "sof-priv.h"
+#include "sof-audio.h"
+#include "ipc4-priv.h"
+#include "ipc4-topology.h"
+#include "ops.h"
+
+#define SOF_IPC4_GAIN_PARAM_ID 0
+#define SOF_IPC4_TPLG_ABI_SIZE 6
+
+static DEFINE_IDA(alh_group_ida);
+
+static const struct sof_topology_token ipc4_sched_tokens[] = {
+ {SOF_TKN_SCHED_LP_MODE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_pipeline, lp_mode)}
+};
+
+static const struct sof_topology_token pipeline_tokens[] = {
+ {SOF_TKN_SCHED_DYNAMIC_PIPELINE, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_widget, dynamic_pipeline_widget)},
+};
+
+static const struct sof_topology_token ipc4_comp_tokens[] = {
+ {SOF_TKN_COMP_CPC, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_base_module_cfg, cpc)},
+ {SOF_TKN_COMP_IS_PAGES, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_base_module_cfg, is_pages)},
+};
+
+static const struct sof_topology_token ipc4_audio_format_buffer_size_tokens[] = {
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IBS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_base_module_cfg, ibs)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OBS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_base_module_cfg, obs)},
+};
+
+static const struct sof_topology_token ipc4_in_audio_format_tokens[] = {
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_RATE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_audio_format, sampling_frequency)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_BIT_DEPTH, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_audio_format, bit_depth)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_MAP, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_audio_format, ch_map)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_audio_format, ch_cfg)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_INTERLEAVING_STYLE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct sof_ipc4_audio_format, interleaving_style)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_IN_FMT_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_audio_format, fmt_cfg)},
+};
+
+static const struct sof_topology_token ipc4_out_audio_format_tokens[] = {
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_RATE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_audio_format, sampling_frequency)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_BIT_DEPTH, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_audio_format, bit_depth)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_MAP, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_audio_format, ch_map)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_audio_format, ch_cfg)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_INTERLEAVING_STYLE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct sof_ipc4_audio_format, interleaving_style)},
+ {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_FMT_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_audio_format, fmt_cfg)},
+};
+
+static const struct sof_topology_token ipc4_copier_gateway_cfg_tokens[] = {
+ {SOF_TKN_CAVS_AUDIO_FORMAT_DMA_BUFFER_SIZE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32, 0},
+};
+
+static const struct sof_topology_token ipc4_copier_tokens[] = {
+ {SOF_TKN_INTEL_COPIER_NODE_TYPE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32, 0},
+};
+
+static const struct sof_topology_token ipc4_audio_fmt_num_tokens[] = {
+ {SOF_TKN_COMP_NUM_AUDIO_FORMATS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ 0},
+};
+
+static const struct sof_topology_token dai_tokens[] = {
+ {SOF_TKN_DAI_TYPE, SND_SOC_TPLG_TUPLE_TYPE_STRING, get_token_dai_type,
+ offsetof(struct sof_ipc4_copier, dai_type)},
+ {SOF_TKN_DAI_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_copier, dai_index)},
+};
+
+/* Component extended tokens */
+static const struct sof_topology_token comp_ext_tokens[] = {
+ {SOF_TKN_COMP_UUID, SND_SOC_TPLG_TUPLE_TYPE_UUID, get_token_uuid,
+ offsetof(struct snd_sof_widget, uuid)},
+};
+
+static const struct sof_topology_token gain_tokens[] = {
+ {SOF_TKN_GAIN_RAMP_TYPE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct sof_ipc4_gain_data, curve_type)},
+ {SOF_TKN_GAIN_RAMP_DURATION,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_gain_data, curve_duration)},
+ {SOF_TKN_GAIN_VAL, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct sof_ipc4_gain_data, init_val)},
+};
+
+/* SRC */
+static const struct sof_topology_token src_tokens[] = {
+ {SOF_TKN_SRC_RATE_OUT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc4_src, sink_rate)},
+};
+
+static const struct sof_token_info ipc4_token_list[SOF_TOKEN_COUNT] = {
+ [SOF_DAI_TOKENS] = {"DAI tokens", dai_tokens, ARRAY_SIZE(dai_tokens)},
+ [SOF_PIPELINE_TOKENS] = {"Pipeline tokens", pipeline_tokens, ARRAY_SIZE(pipeline_tokens)},
+ [SOF_SCHED_TOKENS] = {"Scheduler tokens", ipc4_sched_tokens,
+ ARRAY_SIZE(ipc4_sched_tokens)},
+ [SOF_COMP_EXT_TOKENS] = {"Comp extended tokens", comp_ext_tokens,
+ ARRAY_SIZE(comp_ext_tokens)},
+ [SOF_COMP_TOKENS] = {"IPC4 Component tokens",
+ ipc4_comp_tokens, ARRAY_SIZE(ipc4_comp_tokens)},
+ [SOF_IN_AUDIO_FORMAT_TOKENS] = {"IPC4 Input Audio format tokens",
+ ipc4_in_audio_format_tokens, ARRAY_SIZE(ipc4_in_audio_format_tokens)},
+ [SOF_OUT_AUDIO_FORMAT_TOKENS] = {"IPC4 Output Audio format tokens",
+ ipc4_out_audio_format_tokens, ARRAY_SIZE(ipc4_out_audio_format_tokens)},
+ [SOF_AUDIO_FORMAT_BUFFER_SIZE_TOKENS] = {"IPC4 Audio format buffer size tokens",
+ ipc4_audio_format_buffer_size_tokens,
+ ARRAY_SIZE(ipc4_audio_format_buffer_size_tokens)},
+ [SOF_COPIER_GATEWAY_CFG_TOKENS] = {"IPC4 Copier gateway config tokens",
+ ipc4_copier_gateway_cfg_tokens, ARRAY_SIZE(ipc4_copier_gateway_cfg_tokens)},
+ [SOF_COPIER_TOKENS] = {"IPC4 Copier tokens", ipc4_copier_tokens,
+ ARRAY_SIZE(ipc4_copier_tokens)},
+ [SOF_AUDIO_FMT_NUM_TOKENS] = {"IPC4 Audio format number tokens",
+ ipc4_audio_fmt_num_tokens, ARRAY_SIZE(ipc4_audio_fmt_num_tokens)},
+ [SOF_GAIN_TOKENS] = {"Gain tokens", gain_tokens, ARRAY_SIZE(gain_tokens)},
+ [SOF_SRC_TOKENS] = {"SRC tokens", src_tokens, ARRAY_SIZE(src_tokens)},
+};
+
+static void sof_ipc4_dbg_audio_format(struct device *dev,
+ struct sof_ipc4_audio_format *format,
+ size_t object_size, int num_format)
+{
+ struct sof_ipc4_audio_format *fmt;
+ void *ptr = format;
+ int i;
+
+ for (i = 0; i < num_format; i++, ptr = (u8 *)ptr + object_size) {
+ fmt = ptr;
+ dev_dbg(dev,
+ " #%d: %uKHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n",
+ i, fmt->sampling_frequency, fmt->bit_depth, fmt->ch_map,
+ fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg);
+ }
+}
+
+/**
+ * sof_ipc4_get_audio_fmt - get available audio formats from swidget->tuples
+ * @scomp: pointer to pointer to SOC component
+ * @swidget: pointer to struct snd_sof_widget containing tuples
+ * @available_fmt: pointer to struct sof_ipc4_available_audio_format being filling in
+ * @has_out_format: true if available_fmt contains output format
+ *
+ * Return: 0 if successful
+ */
+static int sof_ipc4_get_audio_fmt(struct snd_soc_component *scomp,
+ struct snd_sof_widget *swidget,
+ struct sof_ipc4_available_audio_format *available_fmt,
+ bool has_out_format)
+{
+ struct sof_ipc4_base_module_cfg *base_config;
+ struct sof_ipc4_audio_format *out_format;
+ int audio_fmt_num = 0;
+ int ret, i;
+
+ ret = sof_update_ipc_object(scomp, &audio_fmt_num,
+ SOF_AUDIO_FMT_NUM_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(audio_fmt_num), 1);
+ if (ret || audio_fmt_num <= 0) {
+ dev_err(scomp->dev, "Invalid number of audio formats: %d\n", audio_fmt_num);
+ return -EINVAL;
+ }
+ available_fmt->audio_fmt_num = audio_fmt_num;
+
+ dev_dbg(scomp->dev, "Number of audio formats: %d\n", available_fmt->audio_fmt_num);
+
+ base_config = kcalloc(available_fmt->audio_fmt_num, sizeof(*base_config), GFP_KERNEL);
+ if (!base_config)
+ return -ENOMEM;
+
+ /* set cpc and is_pages for all base_cfg */
+ for (i = 0; i < available_fmt->audio_fmt_num; i++) {
+ ret = sof_update_ipc_object(scomp, &base_config[i],
+ SOF_COMP_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*base_config), 1);
+ if (ret) {
+ dev_err(scomp->dev, "parse comp tokens failed %d\n", ret);
+ goto err_in;
+ }
+ }
+
+ /* copy the ibs/obs for each base_cfg */
+ ret = sof_update_ipc_object(scomp, base_config,
+ SOF_AUDIO_FORMAT_BUFFER_SIZE_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*base_config),
+ available_fmt->audio_fmt_num);
+ if (ret) {
+ dev_err(scomp->dev, "parse buffer size tokens failed %d\n", ret);
+ goto err_in;
+ }
+
+ for (i = 0; i < available_fmt->audio_fmt_num; i++)
+ dev_dbg(scomp->dev, "%d: ibs: %d obs: %d cpc: %d is_pages: %d\n", i,
+ base_config[i].ibs, base_config[i].obs,
+ base_config[i].cpc, base_config[i].is_pages);
+
+ ret = sof_update_ipc_object(scomp, &base_config->audio_fmt,
+ SOF_IN_AUDIO_FORMAT_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*base_config),
+ available_fmt->audio_fmt_num);
+ if (ret) {
+ dev_err(scomp->dev, "parse base_config audio_fmt tokens failed %d\n", ret);
+ goto err_in;
+ }
+
+ dev_dbg(scomp->dev, "Get input audio formats for %s\n", swidget->widget->name);
+ sof_ipc4_dbg_audio_format(scomp->dev, &base_config->audio_fmt,
+ sizeof(*base_config),
+ available_fmt->audio_fmt_num);
+
+ available_fmt->base_config = base_config;
+
+ if (!has_out_format)
+ return 0;
+
+ out_format = kcalloc(available_fmt->audio_fmt_num, sizeof(*out_format), GFP_KERNEL);
+ if (!out_format) {
+ ret = -ENOMEM;
+ goto err_in;
+ }
+
+ ret = sof_update_ipc_object(scomp, out_format,
+ SOF_OUT_AUDIO_FORMAT_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*out_format),
+ available_fmt->audio_fmt_num);
+
+ if (ret) {
+ dev_err(scomp->dev, "parse output audio_fmt tokens failed\n");
+ goto err_out;
+ }
+
+ available_fmt->out_audio_fmt = out_format;
+ dev_dbg(scomp->dev, "Get output audio formats for %s\n", swidget->widget->name);
+ sof_ipc4_dbg_audio_format(scomp->dev, out_format, sizeof(*out_format),
+ available_fmt->audio_fmt_num);
+
+ return 0;
+
+err_out:
+ kfree(out_format);
+err_in:
+ kfree(base_config);
+
+ return ret;
+}
+
+/* release the memory allocated in sof_ipc4_get_audio_fmt */
+static void sof_ipc4_free_audio_fmt(struct sof_ipc4_available_audio_format *available_fmt)
+
+{
+ kfree(available_fmt->base_config);
+ available_fmt->base_config = NULL;
+ kfree(available_fmt->out_audio_fmt);
+ available_fmt->out_audio_fmt = NULL;
+}
+
+static void sof_ipc4_widget_free_comp(struct snd_sof_widget *swidget)
+{
+ kfree(swidget->private);
+}
+
+static int sof_ipc4_widget_set_module_info(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_ipc4_fw_module *fw_modules = ipc4_data->fw_modules;
+ int i;
+
+ if (!fw_modules) {
+ dev_err(sdev->dev, "no fw_module information\n");
+ return -EINVAL;
+ }
+
+ /* set module info */
+ for (i = 0; i < ipc4_data->num_fw_modules; i++) {
+ if (guid_equal(&swidget->uuid, &fw_modules[i].man4_module_entry.uuid)) {
+ swidget->module_info = &fw_modules[i];
+ return 0;
+ }
+ }
+
+ dev_err(sdev->dev, "failed to find module info for widget %s with UUID %pUL\n",
+ swidget->widget->name, &swidget->uuid);
+ return -EINVAL;
+}
+
+static int sof_ipc4_widget_setup_msg(struct snd_sof_widget *swidget, struct sof_ipc4_msg *msg)
+{
+ struct sof_ipc4_fw_module *fw_module;
+ uint32_t type;
+ int ret;
+
+ ret = sof_ipc4_widget_set_module_info(swidget);
+ if (ret)
+ return ret;
+
+ fw_module = swidget->module_info;
+
+ msg->primary = fw_module->man4_module_entry.id;
+ msg->primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_INIT_INSTANCE);
+ msg->primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg->primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ msg->extension = SOF_IPC4_MOD_EXT_PPL_ID(swidget->pipeline_id);
+ msg->extension |= SOF_IPC4_MOD_EXT_CORE_ID(swidget->core);
+
+ type = fw_module->man4_module_entry.type & SOF_IPC4_MODULE_DP ? 1 : 0;
+ msg->extension |= SOF_IPC4_MOD_EXT_DOMAIN(type);
+
+ return 0;
+}
+
+static int sof_ipc4_widget_setup_pcm(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_available_audio_format *available_fmt;
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_copier *ipc4_copier;
+ int node_type = 0;
+ int ret, i;
+
+ ipc4_copier = kzalloc(sizeof(*ipc4_copier), GFP_KERNEL);
+ if (!ipc4_copier)
+ return -ENOMEM;
+
+ swidget->private = ipc4_copier;
+ available_fmt = &ipc4_copier->available_fmt;
+
+ dev_dbg(scomp->dev, "Updating IPC structure for %s\n", swidget->widget->name);
+
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, available_fmt, true);
+ if (ret)
+ goto free_copier;
+
+ available_fmt->dma_buffer_size = kcalloc(available_fmt->audio_fmt_num, sizeof(u32),
+ GFP_KERNEL);
+ if (!available_fmt->dma_buffer_size) {
+ ret = -ENOMEM;
+ goto free_available_fmt;
+ }
+
+ ret = sof_update_ipc_object(scomp, available_fmt->dma_buffer_size,
+ SOF_COPIER_GATEWAY_CFG_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(u32),
+ available_fmt->audio_fmt_num);
+ if (ret) {
+ dev_err(scomp->dev, "Failed to parse dma buffer size in audio format for %s\n",
+ swidget->widget->name);
+ goto err;
+ }
+
+ dev_dbg(scomp->dev, "dma buffer size:\n");
+ for (i = 0; i < available_fmt->audio_fmt_num; i++)
+ dev_dbg(scomp->dev, "%d: %u\n", i,
+ available_fmt->dma_buffer_size[i]);
+
+ ret = sof_update_ipc_object(scomp, &node_type,
+ SOF_COPIER_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(node_type), 1);
+
+ if (ret) {
+ dev_err(scomp->dev, "parse host copier node type token failed %d\n",
+ ret);
+ goto err;
+ }
+ dev_dbg(scomp->dev, "host copier '%s' node_type %u\n", swidget->widget->name, node_type);
+
+ ipc4_copier->data.gtw_cfg.node_id = SOF_IPC4_NODE_TYPE(node_type);
+ ipc4_copier->gtw_attr = kzalloc(sizeof(*ipc4_copier->gtw_attr), GFP_KERNEL);
+ if (!ipc4_copier->gtw_attr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ipc4_copier->copier_config = (uint32_t *)ipc4_copier->gtw_attr;
+ ipc4_copier->data.gtw_cfg.config_length =
+ sizeof(struct sof_ipc4_gtw_attributes) >> 2;
+
+ /* set up module info and message header */
+ ret = sof_ipc4_widget_setup_msg(swidget, &ipc4_copier->msg);
+ if (ret)
+ goto free_gtw_attr;
+
+ return 0;
+
+free_gtw_attr:
+ kfree(ipc4_copier->gtw_attr);
+err:
+ kfree(available_fmt->dma_buffer_size);
+free_available_fmt:
+ sof_ipc4_free_audio_fmt(available_fmt);
+free_copier:
+ kfree(ipc4_copier);
+ swidget->private = NULL;
+ return ret;
+}
+
+static void sof_ipc4_widget_free_comp_pcm(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_copier *ipc4_copier = swidget->private;
+ struct sof_ipc4_available_audio_format *available_fmt;
+
+ if (!ipc4_copier)
+ return;
+
+ available_fmt = &ipc4_copier->available_fmt;
+ kfree(available_fmt->dma_buffer_size);
+ kfree(available_fmt->base_config);
+ kfree(available_fmt->out_audio_fmt);
+ kfree(ipc4_copier->gtw_attr);
+ kfree(ipc4_copier);
+ swidget->private = NULL;
+}
+
+static int sof_ipc4_widget_setup_comp_dai(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_available_audio_format *available_fmt;
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_ipc4_copier *ipc4_copier;
+ int node_type = 0;
+ int ret, i;
+
+ ipc4_copier = kzalloc(sizeof(*ipc4_copier), GFP_KERNEL);
+ if (!ipc4_copier)
+ return -ENOMEM;
+
+ available_fmt = &ipc4_copier->available_fmt;
+
+ dev_dbg(scomp->dev, "Updating IPC structure for %s\n", swidget->widget->name);
+
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, available_fmt, true);
+ if (ret)
+ goto free_copier;
+
+ available_fmt->dma_buffer_size = kcalloc(available_fmt->audio_fmt_num, sizeof(u32),
+ GFP_KERNEL);
+ if (!available_fmt->dma_buffer_size) {
+ ret = -ENOMEM;
+ goto free_available_fmt;
+ }
+
+ ret = sof_update_ipc_object(scomp, available_fmt->dma_buffer_size,
+ SOF_COPIER_GATEWAY_CFG_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(u32),
+ available_fmt->audio_fmt_num);
+ if (ret) {
+ dev_err(scomp->dev, "Failed to parse dma buffer size in audio format for %s\n",
+ swidget->widget->name);
+ goto err;
+ }
+
+ for (i = 0; i < available_fmt->audio_fmt_num; i++)
+ dev_dbg(scomp->dev, "%d: dma buffer size: %u\n", i,
+ available_fmt->dma_buffer_size[i]);
+
+ ret = sof_update_ipc_object(scomp, &node_type,
+ SOF_COPIER_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(node_type), 1);
+ if (ret) {
+ dev_err(scomp->dev, "parse dai node type failed %d\n", ret);
+ goto err;
+ }
+
+ ret = sof_update_ipc_object(scomp, ipc4_copier,
+ SOF_DAI_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(u32), 1);
+ if (ret) {
+ dev_err(scomp->dev, "parse dai copier node token failed %d\n", ret);
+ goto err;
+ }
+
+ dev_dbg(scomp->dev, "dai %s node_type %u dai_type %u dai_index %d\n", swidget->widget->name,
+ node_type, ipc4_copier->dai_type, ipc4_copier->dai_index);
+
+ ipc4_copier->data.gtw_cfg.node_id = SOF_IPC4_NODE_TYPE(node_type);
+
+ switch (ipc4_copier->dai_type) {
+ case SOF_DAI_INTEL_ALH:
+ {
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_alh_configuration_blob *blob;
+ struct snd_sof_widget *w;
+
+ blob = kzalloc(sizeof(*blob), GFP_KERNEL);
+ if (!blob) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ list_for_each_entry(w, &sdev->widget_list, list) {
+ if (w->widget->sname &&
+ strcmp(w->widget->sname, swidget->widget->sname))
+ continue;
+
+ blob->alh_cfg.count++;
+ }
+
+ ipc4_copier->copier_config = (uint32_t *)blob;
+ ipc4_copier->data.gtw_cfg.config_length = sizeof(*blob) >> 2;
+ break;
+ }
+ case SOF_DAI_INTEL_SSP:
+ /* set SSP DAI index as the node_id */
+ ipc4_copier->data.gtw_cfg.node_id |=
+ SOF_IPC4_NODE_INDEX_INTEL_SSP(ipc4_copier->dai_index);
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ /* set DMIC DAI index as the node_id */
+ ipc4_copier->data.gtw_cfg.node_id |=
+ SOF_IPC4_NODE_INDEX_INTEL_DMIC(ipc4_copier->dai_index);
+ break;
+ default:
+ ipc4_copier->gtw_attr = kzalloc(sizeof(*ipc4_copier->gtw_attr), GFP_KERNEL);
+ if (!ipc4_copier->gtw_attr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ipc4_copier->copier_config = (uint32_t *)ipc4_copier->gtw_attr;
+ ipc4_copier->data.gtw_cfg.config_length =
+ sizeof(struct sof_ipc4_gtw_attributes) >> 2;
+ break;
+ }
+
+ dai->scomp = scomp;
+ dai->private = ipc4_copier;
+
+ /* set up module info and message header */
+ ret = sof_ipc4_widget_setup_msg(swidget, &ipc4_copier->msg);
+ if (ret)
+ goto free_copier_config;
+
+ return 0;
+
+free_copier_config:
+ kfree(ipc4_copier->copier_config);
+err:
+ kfree(available_fmt->dma_buffer_size);
+free_available_fmt:
+ sof_ipc4_free_audio_fmt(available_fmt);
+free_copier:
+ kfree(ipc4_copier);
+ dai->private = NULL;
+ dai->scomp = NULL;
+ return ret;
+}
+
+static void sof_ipc4_widget_free_comp_dai(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_available_audio_format *available_fmt;
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_ipc4_copier *ipc4_copier;
+
+ if (!dai)
+ return;
+
+ if (!dai->private) {
+ kfree(dai);
+ swidget->private = NULL;
+ return;
+ }
+
+ ipc4_copier = dai->private;
+ available_fmt = &ipc4_copier->available_fmt;
+
+ kfree(available_fmt->dma_buffer_size);
+ kfree(available_fmt->base_config);
+ kfree(available_fmt->out_audio_fmt);
+ if (ipc4_copier->dai_type != SOF_DAI_INTEL_SSP &&
+ ipc4_copier->dai_type != SOF_DAI_INTEL_DMIC)
+ kfree(ipc4_copier->copier_config);
+ kfree(dai->private);
+ kfree(dai);
+ swidget->private = NULL;
+}
+
+static int sof_ipc4_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_pipeline *pipeline;
+ int ret;
+
+ pipeline = kzalloc(sizeof(*pipeline), GFP_KERNEL);
+ if (!pipeline)
+ return -ENOMEM;
+
+ ret = sof_update_ipc_object(scomp, pipeline, SOF_SCHED_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*pipeline), 1);
+ if (ret) {
+ dev_err(scomp->dev, "parsing scheduler tokens failed\n");
+ goto err;
+ }
+
+ /* parse one set of pipeline tokens */
+ ret = sof_update_ipc_object(scomp, swidget, SOF_PIPELINE_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*swidget), 1);
+ if (ret) {
+ dev_err(scomp->dev, "parsing pipeline tokens failed\n");
+ goto err;
+ }
+
+ /* TODO: Get priority from topology */
+ pipeline->priority = 0;
+
+ dev_dbg(scomp->dev, "pipeline '%s': id %d pri %d lp mode %d\n",
+ swidget->widget->name, swidget->pipeline_id,
+ pipeline->priority, pipeline->lp_mode);
+
+ swidget->private = pipeline;
+
+ pipeline->msg.primary = SOF_IPC4_GLB_PIPE_PRIORITY(pipeline->priority);
+ pipeline->msg.primary |= SOF_IPC4_GLB_PIPE_INSTANCE_ID(swidget->pipeline_id);
+ pipeline->msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_CREATE_PIPELINE);
+ pipeline->msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ pipeline->msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
+
+ pipeline->msg.extension = pipeline->lp_mode;
+ pipeline->state = SOF_IPC4_PIPE_UNINITIALIZED;
+
+ return 0;
+err:
+ kfree(pipeline);
+ return ret;
+}
+
+static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_fw_module *fw_module;
+ struct snd_sof_control *scontrol;
+ struct sof_ipc4_gain *gain;
+ int ret;
+
+ gain = kzalloc(sizeof(*gain), GFP_KERNEL);
+ if (!gain)
+ return -ENOMEM;
+
+ swidget->private = gain;
+
+ gain->data.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
+ gain->data.init_val = SOF_IPC4_VOL_ZERO_DB;
+
+ /* The out_audio_fmt in topology is ignored as it is not required to be sent to the FW */
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &gain->available_fmt, false);
+ if (ret)
+ goto err;
+
+ ret = sof_update_ipc_object(scomp, &gain->data, SOF_GAIN_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(gain->data), 1);
+ if (ret) {
+ dev_err(scomp->dev, "Parsing gain tokens failed\n");
+ goto err;
+ }
+
+ dev_dbg(scomp->dev,
+ "pga widget %s: ramp type: %d, ramp duration %d, initial gain value: %#x, cpc %d\n",
+ swidget->widget->name, gain->data.curve_type, gain->data.curve_duration,
+ gain->data.init_val, gain->base_config.cpc);
+
+ ret = sof_ipc4_widget_setup_msg(swidget, &gain->msg);
+ if (ret)
+ goto err;
+
+ fw_module = swidget->module_info;
+
+ /* update module ID for all kcontrols for this widget */
+ list_for_each_entry(scontrol, &sdev->kcontrol_list, list)
+ if (scontrol->comp_id == swidget->comp_id) {
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct sof_ipc4_msg *msg = &cdata->msg;
+
+ msg->primary |= fw_module->man4_module_entry.id;
+ }
+
+ return 0;
+err:
+ sof_ipc4_free_audio_fmt(&gain->available_fmt);
+ kfree(gain);
+ swidget->private = NULL;
+ return ret;
+}
+
+static void sof_ipc4_widget_free_comp_pga(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_gain *gain = swidget->private;
+
+ if (!gain)
+ return;
+
+ sof_ipc4_free_audio_fmt(&gain->available_fmt);
+ kfree(swidget->private);
+ swidget->private = NULL;
+}
+
+static int sof_ipc4_widget_setup_comp_mixer(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_mixer *mixer;
+ int ret;
+
+ dev_dbg(scomp->dev, "Updating IPC structure for %s\n", swidget->widget->name);
+
+ mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return -ENOMEM;
+
+ swidget->private = mixer;
+
+ /* The out_audio_fmt in topology is ignored as it is not required to be sent to the FW */
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &mixer->available_fmt, false);
+ if (ret)
+ goto err;
+
+ ret = sof_ipc4_widget_setup_msg(swidget, &mixer->msg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ sof_ipc4_free_audio_fmt(&mixer->available_fmt);
+ kfree(mixer);
+ swidget->private = NULL;
+ return ret;
+}
+
+static int sof_ipc4_widget_setup_comp_src(struct snd_sof_widget *swidget)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_src *src;
+ int ret;
+
+ dev_dbg(scomp->dev, "Updating IPC structure for %s\n", swidget->widget->name);
+
+ src = kzalloc(sizeof(*src), GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+
+ swidget->private = src;
+
+ /* The out_audio_fmt in topology is ignored as it is not required by SRC */
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &src->available_fmt, false);
+ if (ret)
+ goto err;
+
+ ret = sof_update_ipc_object(scomp, src, SOF_SRC_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(src), 1);
+ if (ret) {
+ dev_err(scomp->dev, "Parsing SRC tokens failed\n");
+ goto err;
+ }
+
+ dev_dbg(scomp->dev, "SRC sink rate %d\n", src->sink_rate);
+
+ ret = sof_ipc4_widget_setup_msg(swidget, &src->msg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ sof_ipc4_free_audio_fmt(&src->available_fmt);
+ kfree(src);
+ swidget->private = NULL;
+ return ret;
+}
+
+static void sof_ipc4_widget_free_comp_src(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_src *src = swidget->private;
+
+ if (!src)
+ return;
+
+ sof_ipc4_free_audio_fmt(&src->available_fmt);
+ kfree(swidget->private);
+ swidget->private = NULL;
+}
+
+static void sof_ipc4_widget_free_comp_mixer(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_mixer *mixer = swidget->private;
+
+ if (!mixer)
+ return;
+
+ sof_ipc4_free_audio_fmt(&mixer->available_fmt);
+ kfree(swidget->private);
+ swidget->private = NULL;
+}
+
+static void
+sof_ipc4_update_pipeline_mem_usage(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget,
+ struct sof_ipc4_base_module_cfg *base_config)
+{
+ struct sof_ipc4_fw_module *fw_module = swidget->module_info;
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+ int task_mem, queue_mem;
+ int ibs, bss, total;
+
+ ibs = base_config->ibs;
+ bss = base_config->is_pages;
+
+ task_mem = SOF_IPC4_PIPELINE_OBJECT_SIZE;
+ task_mem += SOF_IPC4_MODULE_INSTANCE_LIST_ITEM_SIZE + bss;
+
+ if (fw_module->man4_module_entry.type & SOF_IPC4_MODULE_LL) {
+ task_mem += SOF_IPC4_FW_ROUNDUP(SOF_IPC4_LL_TASK_OBJECT_SIZE);
+ task_mem += SOF_IPC4_FW_MAX_QUEUE_COUNT * SOF_IPC4_MODULE_INSTANCE_LIST_ITEM_SIZE;
+ task_mem += SOF_IPC4_LL_TASK_LIST_ITEM_SIZE;
+ } else {
+ task_mem += SOF_IPC4_FW_ROUNDUP(SOF_IPC4_DP_TASK_OBJECT_SIZE);
+ task_mem += SOF_IPC4_DP_TASK_LIST_SIZE;
+ }
+
+ ibs = SOF_IPC4_FW_ROUNDUP(ibs);
+ queue_mem = SOF_IPC4_FW_MAX_QUEUE_COUNT * (SOF_IPC4_DATA_QUEUE_OBJECT_SIZE + ibs);
+
+ total = SOF_IPC4_FW_PAGE(task_mem + queue_mem);
+
+ pipe_widget = swidget->pipe_widget;
+ pipeline = pipe_widget->private;
+ pipeline->mem_usage += total;
+}
+
+static int sof_ipc4_widget_assign_instance_id(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_fw_module *fw_module = swidget->module_info;
+ int max_instances = fw_module->man4_module_entry.instance_max_count;
+
+ swidget->instance_id = ida_alloc_max(&fw_module->m_ida, max_instances, GFP_KERNEL);
+ if (swidget->instance_id < 0) {
+ dev_err(sdev->dev, "failed to assign instance id for widget %s",
+ swidget->widget->name);
+ return swidget->instance_id;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_init_audio_fmt(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget,
+ struct sof_ipc4_base_module_cfg *base_config,
+ struct sof_ipc4_audio_format *out_format,
+ struct snd_pcm_hw_params *params,
+ struct sof_ipc4_available_audio_format *available_fmt,
+ size_t object_offset)
+{
+ void *ptr = available_fmt->ref_audio_fmt;
+ u32 valid_bits;
+ u32 channels;
+ u32 rate;
+ int sample_valid_bits;
+ int i;
+
+ if (!ptr) {
+ dev_err(sdev->dev, "no reference formats for %s\n", swidget->widget->name);
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ sample_valid_bits = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ sample_valid_bits = 24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ sample_valid_bits = 32;
+ break;
+ default:
+ dev_err(sdev->dev, "invalid pcm frame format %d\n", params_format(params));
+ return -EINVAL;
+ }
+
+ if (!available_fmt->audio_fmt_num) {
+ dev_err(sdev->dev, "no formats available for %s\n", swidget->widget->name);
+ return -EINVAL;
+ }
+
+ /*
+ * Search supported audio formats to match rate, channels ,and
+ * sample_valid_bytes from runtime params
+ */
+ for (i = 0; i < available_fmt->audio_fmt_num; i++, ptr = (u8 *)ptr + object_offset) {
+ struct sof_ipc4_audio_format *fmt = ptr;
+
+ rate = fmt->sampling_frequency;
+ channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(fmt->fmt_cfg);
+ valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(fmt->fmt_cfg);
+ if (params_rate(params) == rate && params_channels(params) == channels &&
+ sample_valid_bits == valid_bits) {
+ dev_dbg(sdev->dev, "matching audio format index for %uHz, %ubit, %u channels: %d\n",
+ rate, valid_bits, channels, i);
+
+ /* copy ibs/obs and input format */
+ memcpy(base_config, &available_fmt->base_config[i],
+ sizeof(struct sof_ipc4_base_module_cfg));
+
+ /* copy output format */
+ if (out_format)
+ memcpy(out_format, &available_fmt->out_audio_fmt[i],
+ sizeof(struct sof_ipc4_audio_format));
+ break;
+ }
+ }
+
+ if (i == available_fmt->audio_fmt_num) {
+ dev_err(sdev->dev, "%s: Unsupported audio format: %uHz, %ubit, %u channels\n",
+ __func__, params_rate(params), sample_valid_bits, params_channels(params));
+ return -EINVAL;
+ }
+
+ dev_dbg(sdev->dev, "Init input audio formats for %s\n", swidget->widget->name);
+ sof_ipc4_dbg_audio_format(sdev->dev, &base_config->audio_fmt,
+ sizeof(*base_config), 1);
+ if (out_format) {
+ dev_dbg(sdev->dev, "Init output audio formats for %s\n", swidget->widget->name);
+ sof_ipc4_dbg_audio_format(sdev->dev, out_format,
+ sizeof(*out_format), 1);
+ }
+
+ /* Return the index of the matched format */
+ return i;
+}
+
+static void sof_ipc4_unprepare_copier_module(struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_copier *ipc4_copier = NULL;
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+
+ /* reset pipeline memory usage */
+ pipe_widget = swidget->pipe_widget;
+ pipeline = pipe_widget->private;
+ pipeline->mem_usage = 0;
+
+ if (WIDGET_IS_AIF(swidget->id)) {
+ ipc4_copier = swidget->private;
+ } else if (WIDGET_IS_DAI(swidget->id)) {
+ struct snd_sof_dai *dai = swidget->private;
+
+ ipc4_copier = dai->private;
+ if (ipc4_copier->dai_type == SOF_DAI_INTEL_ALH) {
+ struct sof_ipc4_alh_configuration_blob *blob;
+ unsigned int group_id;
+
+ blob = (struct sof_ipc4_alh_configuration_blob *)ipc4_copier->copier_config;
+ if (blob->alh_cfg.count > 1) {
+ group_id = SOF_IPC4_NODE_INDEX(ipc4_copier->data.gtw_cfg.node_id) -
+ ALH_MULTI_GTW_BASE;
+ ida_free(&alh_group_ida, group_id);
+ }
+ }
+ }
+
+ if (ipc4_copier) {
+ kfree(ipc4_copier->ipc_config_data);
+ ipc4_copier->ipc_config_data = NULL;
+ ipc4_copier->ipc_config_size = 0;
+ }
+}
+
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT)
+static int snd_sof_get_hw_config_params(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
+ int *sample_rate, int *channel_count, int *bit_depth)
+{
+ struct snd_soc_tplg_hw_config *hw_config;
+ struct snd_sof_dai_link *slink;
+ bool dai_link_found = false;
+ bool hw_cfg_found = false;
+ int i;
+
+ /* get current hw_config from link */
+ list_for_each_entry(slink, &sdev->dai_link_list, list) {
+ if (!strcmp(slink->link->name, dai->name)) {
+ dai_link_found = true;
+ break;
+ }
+ }
+
+ if (!dai_link_found) {
+ dev_err(sdev->dev, "%s: no DAI link found for DAI %s\n", __func__, dai->name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < slink->num_hw_configs; i++) {
+ hw_config = &slink->hw_configs[i];
+ if (dai->current_config == le32_to_cpu(hw_config->id)) {
+ hw_cfg_found = true;
+ break;
+ }
+ }
+
+ if (!hw_cfg_found) {
+ dev_err(sdev->dev, "%s: no matching hw_config found for DAI %s\n", __func__,
+ dai->name);
+ return -EINVAL;
+ }
+
+ *bit_depth = le32_to_cpu(hw_config->tdm_slot_width);
+ *channel_count = le32_to_cpu(hw_config->tdm_slots);
+ *sample_rate = le32_to_cpu(hw_config->fsync_rate);
+
+ dev_dbg(sdev->dev, "sample rate: %d sample width: %d channels: %d\n",
+ *sample_rate, *bit_depth, *channel_count);
+
+ return 0;
+}
+
+static int snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
+ struct snd_pcm_hw_params *params, u32 dai_index,
+ u32 linktype, u8 dir, u32 **dst, u32 *len)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct nhlt_specific_cfg *cfg;
+ int sample_rate, channel_count;
+ int bit_depth, ret;
+ u32 nhlt_type;
+
+ /* convert to NHLT type */
+ switch (linktype) {
+ case SOF_DAI_INTEL_DMIC:
+ nhlt_type = NHLT_LINK_DMIC;
+ bit_depth = params_width(params);
+ channel_count = params_channels(params);
+ sample_rate = params_rate(params);
+ break;
+ case SOF_DAI_INTEL_SSP:
+ nhlt_type = NHLT_LINK_SSP;
+ ret = snd_sof_get_hw_config_params(sdev, dai, &sample_rate, &channel_count,
+ &bit_depth);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return 0;
+ }
+
+ dev_dbg(sdev->dev, "dai index %d nhlt type %d direction %d\n",
+ dai_index, nhlt_type, dir);
+
+ /* find NHLT blob with matching params */
+ cfg = intel_nhlt_get_endpoint_blob(sdev->dev, ipc4_data->nhlt, dai_index, nhlt_type,
+ bit_depth, bit_depth, channel_count, sample_rate,
+ dir, 0);
+
+ if (!cfg) {
+ dev_err(sdev->dev,
+ "no matching blob for sample rate: %d sample width: %d channels: %d\n",
+ sample_rate, bit_depth, channel_count);
+ return -EINVAL;
+ }
+
+ /* config length should be in dwords */
+ *len = cfg->size >> 2;
+ *dst = (u32 *)cfg->caps;
+
+ return 0;
+}
+#else
+static int snd_sof_get_nhlt_endpoint_data(struct snd_sof_dev *sdev, struct snd_sof_dai *dai,
+ struct snd_pcm_hw_params *params, u32 dai_index,
+ u32 linktype, u8 dir, u32 **dst, u32 *len)
+{
+ return 0;
+}
+#endif
+
+static int
+sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *pipeline_params, int dir)
+{
+ struct sof_ipc4_available_audio_format *available_fmt;
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_copier_data *copier_data;
+ struct snd_pcm_hw_params *ref_params;
+ struct sof_ipc4_copier *ipc4_copier;
+ struct snd_sof_dai *dai;
+ struct snd_mask *fmt;
+ int out_sample_valid_bits;
+ size_t ref_audio_fmt_size;
+ void **ipc_config_data;
+ int *ipc_config_size;
+ u32 **data;
+ int ipc_size, ret;
+
+ dev_dbg(sdev->dev, "copier %s, type %d", swidget->widget->name, swidget->id);
+
+ switch (swidget->id) {
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_aif_out:
+ {
+ struct sof_ipc4_gtw_attributes *gtw_attr;
+ struct snd_sof_widget *pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+
+ pipe_widget = swidget->pipe_widget;
+ pipeline = pipe_widget->private;
+ ipc4_copier = (struct sof_ipc4_copier *)swidget->private;
+ gtw_attr = ipc4_copier->gtw_attr;
+ copier_data = &ipc4_copier->data;
+ available_fmt = &ipc4_copier->available_fmt;
+
+ /*
+ * base_config->audio_fmt and out_audio_fmt represent the input and output audio
+ * formats. Use the input format as the reference to match pcm params for playback
+ * and the output format as reference for capture.
+ */
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
+ available_fmt->ref_audio_fmt = &available_fmt->base_config->audio_fmt;
+ ref_audio_fmt_size = sizeof(struct sof_ipc4_base_module_cfg);
+ } else {
+ available_fmt->ref_audio_fmt = available_fmt->out_audio_fmt;
+ ref_audio_fmt_size = sizeof(struct sof_ipc4_audio_format);
+ }
+ copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
+ copier_data->gtw_cfg.node_id |=
+ SOF_IPC4_NODE_INDEX(platform_params->stream_tag - 1);
+
+ /* set gateway attributes */
+ gtw_attr->lp_buffer_alloc = pipeline->lp_mode;
+ ref_params = fe_params;
+ break;
+ }
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ {
+ dai = swidget->private;
+
+ ipc4_copier = (struct sof_ipc4_copier *)dai->private;
+ copier_data = &ipc4_copier->data;
+ available_fmt = &ipc4_copier->available_fmt;
+ if (dir == SNDRV_PCM_STREAM_CAPTURE) {
+ available_fmt->ref_audio_fmt = available_fmt->out_audio_fmt;
+ ref_audio_fmt_size = sizeof(struct sof_ipc4_audio_format);
+
+ /*
+ * modify the input params for the dai copier as it only supports
+ * 32-bit always
+ */
+ fmt = hw_param_mask(pipeline_params, SNDRV_PCM_HW_PARAM_FORMAT);
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S32_LE);
+ } else {
+ available_fmt->ref_audio_fmt = &available_fmt->base_config->audio_fmt;
+ ref_audio_fmt_size = sizeof(struct sof_ipc4_base_module_cfg);
+ }
+
+ ref_params = pipeline_params;
+
+ ret = snd_sof_get_nhlt_endpoint_data(sdev, dai, fe_params, ipc4_copier->dai_index,
+ ipc4_copier->dai_type, dir,
+ &ipc4_copier->copier_config,
+ &copier_data->gtw_cfg.config_length);
+ if (ret < 0)
+ return ret;
+
+ break;
+ }
+ default:
+ dev_err(sdev->dev, "unsupported type %d for copier %s",
+ swidget->id, swidget->widget->name);
+ return -EINVAL;
+ }
+
+ /* set input and output audio formats */
+ ret = sof_ipc4_init_audio_fmt(sdev, swidget, &copier_data->base_config,
+ &copier_data->out_format, ref_params,
+ available_fmt, ref_audio_fmt_size);
+ if (ret < 0)
+ return ret;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ {
+ /*
+ * Only SOF_DAI_INTEL_ALH needs copier_data to set blob.
+ * That's why only ALH dai's blob is set after sof_ipc4_init_audio_fmt
+ */
+ if (ipc4_copier->dai_type == SOF_DAI_INTEL_ALH) {
+ struct sof_ipc4_alh_configuration_blob *blob;
+ struct sof_ipc4_copier_data *alh_data;
+ struct sof_ipc4_copier *alh_copier;
+ struct snd_sof_widget *w;
+ u32 ch_mask = 0;
+ u32 ch_map;
+ int i;
+
+ blob = (struct sof_ipc4_alh_configuration_blob *)ipc4_copier->copier_config;
+
+ blob->gw_attr.lp_buffer_alloc = 0;
+
+ /* Get channel_mask from ch_map */
+ ch_map = copier_data->base_config.audio_fmt.ch_map;
+ for (i = 0; ch_map; i++) {
+ if ((ch_map & 0xf) != 0xf)
+ ch_mask |= BIT(i);
+ ch_map >>= 4;
+ }
+
+ /*
+ * Set each gtw_cfg.node_id to blob->alh_cfg.mapping[]
+ * for all widgets with the same stream name
+ */
+ i = 0;
+ list_for_each_entry(w, &sdev->widget_list, list) {
+ if (w->widget->sname &&
+ strcmp(w->widget->sname, swidget->widget->sname))
+ continue;
+
+ dai = w->private;
+ alh_copier = (struct sof_ipc4_copier *)dai->private;
+ alh_data = &alh_copier->data;
+ blob->alh_cfg.mapping[i].alh_id = alh_data->gtw_cfg.node_id;
+ blob->alh_cfg.mapping[i].channel_mask = ch_mask;
+ i++;
+ }
+ if (blob->alh_cfg.count > 1) {
+ int group_id;
+
+ group_id = ida_alloc_max(&alh_group_ida, ALH_MULTI_GTW_COUNT,
+ GFP_KERNEL);
+
+ if (group_id < 0)
+ return group_id;
+
+ /* add multi-gateway base */
+ group_id += ALH_MULTI_GTW_BASE;
+ copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
+ copier_data->gtw_cfg.node_id |= SOF_IPC4_NODE_INDEX(group_id);
+ }
+ }
+ }
+ }
+
+ /* modify the input params for the next widget */
+ fmt = hw_param_mask(pipeline_params, SNDRV_PCM_HW_PARAM_FORMAT);
+ out_sample_valid_bits =
+ SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(copier_data->out_format.fmt_cfg);
+ snd_mask_none(fmt);
+ switch (out_sample_valid_bits) {
+ case 16:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ break;
+ case 24:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ break;
+ case 32:
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S32_LE);
+ break;
+ default:
+ dev_err(sdev->dev, "invalid sample frame format %d\n",
+ params_format(pipeline_params));
+ return -EINVAL;
+ }
+
+ /* set the gateway dma_buffer_size using the matched ID returned above */
+ copier_data->gtw_cfg.dma_buffer_size = available_fmt->dma_buffer_size[ret];
+
+ data = &ipc4_copier->copier_config;
+ ipc_config_size = &ipc4_copier->ipc_config_size;
+ ipc_config_data = &ipc4_copier->ipc_config_data;
+
+ /* config_length is DWORD based */
+ ipc_size = sizeof(*copier_data) + copier_data->gtw_cfg.config_length * 4;
+
+ dev_dbg(sdev->dev, "copier %s, IPC size is %d", swidget->widget->name, ipc_size);
+
+ *ipc_config_data = kzalloc(ipc_size, GFP_KERNEL);
+ if (!*ipc_config_data)
+ return -ENOMEM;
+
+ *ipc_config_size = ipc_size;
+
+ /* copy IPC data */
+ memcpy(*ipc_config_data, (void *)copier_data, sizeof(*copier_data));
+ if (copier_data->gtw_cfg.config_length)
+ memcpy(*ipc_config_data + sizeof(*copier_data),
+ *data, copier_data->gtw_cfg.config_length * 4);
+
+ /* update pipeline memory usage */
+ sof_ipc4_update_pipeline_mem_usage(sdev, swidget, &copier_data->base_config);
+
+ return 0;
+}
+
+static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *pipeline_params, int dir)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_gain *gain = swidget->private;
+ int ret;
+
+ gain->available_fmt.ref_audio_fmt = &gain->available_fmt.base_config->audio_fmt;
+
+ /* output format is not required to be sent to the FW for gain */
+ ret = sof_ipc4_init_audio_fmt(sdev, swidget, &gain->base_config,
+ NULL, pipeline_params, &gain->available_fmt,
+ sizeof(gain->base_config));
+ if (ret < 0)
+ return ret;
+
+ /* update pipeline memory usage */
+ sof_ipc4_update_pipeline_mem_usage(sdev, swidget, &gain->base_config);
+
+ return 0;
+}
+
+static int sof_ipc4_prepare_mixer_module(struct snd_sof_widget *swidget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *pipeline_params, int dir)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_mixer *mixer = swidget->private;
+ int ret;
+
+ /* only 32bit is supported by mixer */
+ mixer->available_fmt.ref_audio_fmt = &mixer->available_fmt.base_config->audio_fmt;
+
+ /* output format is not required to be sent to the FW for mixer */
+ ret = sof_ipc4_init_audio_fmt(sdev, swidget, &mixer->base_config,
+ NULL, pipeline_params, &mixer->available_fmt,
+ sizeof(mixer->base_config));
+ if (ret < 0)
+ return ret;
+
+ /* update pipeline memory usage */
+ sof_ipc4_update_pipeline_mem_usage(sdev, swidget, &mixer->base_config);
+
+ return 0;
+}
+
+static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
+ struct snd_pcm_hw_params *fe_params,
+ struct snd_sof_platform_stream_params *platform_params,
+ struct snd_pcm_hw_params *pipeline_params, int dir)
+{
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_src *src = swidget->private;
+ struct snd_interval *rate;
+ int ret;
+
+ src->available_fmt.ref_audio_fmt = &src->available_fmt.base_config->audio_fmt;
+
+ /* output format is not required to be sent to the FW for SRC */
+ ret = sof_ipc4_init_audio_fmt(sdev, swidget, &src->base_config,
+ NULL, pipeline_params, &src->available_fmt,
+ sizeof(src->base_config));
+ if (ret < 0)
+ return ret;
+
+ /* update pipeline memory usage */
+ sof_ipc4_update_pipeline_mem_usage(sdev, swidget, &src->base_config);
+
+ /* update pipeline_params for sink widgets */
+ rate = hw_param_interval(pipeline_params, SNDRV_PCM_HW_PARAM_RATE);
+ rate->min = src->sink_rate;
+ rate->max = rate->min;
+
+ return 0;
+}
+
+static int sof_ipc4_control_load_volume(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol)
+{
+ struct sof_ipc4_control_data *control_data;
+ struct sof_ipc4_msg *msg;
+ int i;
+
+ scontrol->size = struct_size(control_data, chanv, scontrol->num_channels);
+
+ /* scontrol->ipc_control_data will be freed in sof_control_unload */
+ scontrol->ipc_control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->ipc_control_data)
+ return -ENOMEM;
+
+ control_data = scontrol->ipc_control_data;
+ control_data->index = scontrol->index;
+
+ msg = &control_data->msg;
+ msg->primary = SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_LARGE_CONFIG_SET);
+ msg->primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg->primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ msg->extension = SOF_IPC4_MOD_EXT_MSG_PARAM_ID(SOF_IPC4_GAIN_PARAM_ID);
+
+ /* set default volume values to 0dB in control */
+ for (i = 0; i < scontrol->num_channels; i++) {
+ control_data->chanv[i].channel = i;
+ control_data->chanv[i].value = SOF_IPC4_VOL_ZERO_DB;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_control_setup(struct snd_sof_dev *sdev, struct snd_sof_control *scontrol)
+{
+ switch (scontrol->info_type) {
+ case SND_SOC_TPLG_CTL_VOLSW:
+ case SND_SOC_TPLG_CTL_VOLSW_SX:
+ case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
+ return sof_ipc4_control_load_volume(sdev, scontrol);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ struct snd_sof_widget *pipe_widget = swidget->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline;
+ struct sof_ipc4_msg *msg;
+ void *ipc_data = NULL;
+ u32 ipc_size = 0;
+ int ret;
+
+ switch (swidget->id) {
+ case snd_soc_dapm_scheduler:
+ pipeline = swidget->private;
+
+ dev_dbg(sdev->dev, "pipeline: %d memory pages: %d\n", swidget->pipeline_id,
+ pipeline->mem_usage);
+
+ msg = &pipeline->msg;
+ msg->primary |= pipeline->mem_usage;
+ break;
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_aif_out:
+ {
+ struct sof_ipc4_copier *ipc4_copier = swidget->private;
+
+ ipc_size = ipc4_copier->ipc_config_size;
+ ipc_data = ipc4_copier->ipc_config_data;
+
+ msg = &ipc4_copier->msg;
+ break;
+ }
+ case snd_soc_dapm_dai_in:
+ case snd_soc_dapm_dai_out:
+ {
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_ipc4_copier *ipc4_copier = dai->private;
+
+ ipc_size = ipc4_copier->ipc_config_size;
+ ipc_data = ipc4_copier->ipc_config_data;
+
+ msg = &ipc4_copier->msg;
+ break;
+ }
+ case snd_soc_dapm_pga:
+ {
+ struct sof_ipc4_gain *gain = swidget->private;
+
+ ipc_size = sizeof(struct sof_ipc4_base_module_cfg) +
+ sizeof(struct sof_ipc4_gain_data);
+ ipc_data = gain;
+
+ msg = &gain->msg;
+ break;
+ }
+ case snd_soc_dapm_mixer:
+ {
+ struct sof_ipc4_mixer *mixer = swidget->private;
+
+ ipc_size = sizeof(mixer->base_config);
+ ipc_data = &mixer->base_config;
+
+ msg = &mixer->msg;
+ break;
+ }
+ case snd_soc_dapm_src:
+ {
+ struct sof_ipc4_src *src = swidget->private;
+
+ ipc_size = sizeof(struct sof_ipc4_base_module_cfg) + sizeof(src->sink_rate);
+ ipc_data = src;
+
+ msg = &src->msg;
+ break;
+ }
+ default:
+ dev_err(sdev->dev, "widget type %d not supported", swidget->id);
+ return -EINVAL;
+ }
+
+ if (swidget->id != snd_soc_dapm_scheduler) {
+ ret = sof_ipc4_widget_assign_instance_id(sdev, swidget);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to assign instance id for %s\n",
+ swidget->widget->name);
+ return ret;
+ }
+ pipeline = pipe_widget->private;
+ msg->primary &= ~SOF_IPC4_MOD_INSTANCE_MASK;
+ msg->primary |= SOF_IPC4_MOD_INSTANCE(swidget->instance_id);
+
+ msg->extension &= ~SOF_IPC4_MOD_EXT_PARAM_SIZE_MASK;
+ msg->extension |= ipc_size >> 2;
+ }
+ dev_dbg(sdev->dev, "Create widget %s instance %d - pipe %d - core %d\n",
+ swidget->widget->name, swidget->instance_id, swidget->pipeline_id, swidget->core);
+
+ msg->data_size = ipc_size;
+ msg->data_ptr = ipc_data;
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg, ipc_size, NULL, 0);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to create module %s\n", swidget->widget->name);
+
+ return ret;
+}
+
+static int sof_ipc4_widget_free(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
+{
+ struct sof_ipc4_fw_module *fw_module = swidget->module_info;
+ int ret = 0;
+
+ /* freeing a pipeline frees all the widgets associated with it */
+ if (swidget->id == snd_soc_dapm_scheduler) {
+ struct sof_ipc4_pipeline *pipeline = swidget->private;
+ struct sof_ipc4_msg msg = {{ 0 }};
+ u32 header;
+
+ header = SOF_IPC4_GLB_PIPE_INSTANCE_ID(swidget->pipeline_id);
+ header |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_DELETE_PIPELINE);
+ header |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ header |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
+
+ msg.primary = header;
+
+ ret = sof_ipc_tx_message(sdev->ipc, &msg, 0, NULL, 0);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to free pipeline widget %s\n",
+ swidget->widget->name);
+
+ pipeline->mem_usage = 0;
+ pipeline->state = SOF_IPC4_PIPE_UNINITIALIZED;
+ } else {
+ ida_free(&fw_module->m_ida, swidget->instance_id);
+ }
+
+ return ret;
+}
+
+static int sof_ipc4_route_setup(struct snd_sof_dev *sdev, struct snd_sof_route *sroute)
+{
+ struct snd_sof_widget *src_widget = sroute->src_widget;
+ struct snd_sof_widget *sink_widget = sroute->sink_widget;
+ struct sof_ipc4_fw_module *src_fw_module = src_widget->module_info;
+ struct sof_ipc4_fw_module *sink_fw_module = sink_widget->module_info;
+ struct sof_ipc4_msg msg = {{ 0 }};
+ u32 header, extension;
+ int src_queue = 0;
+ int dst_queue = 0;
+ int ret;
+
+ dev_dbg(sdev->dev, "bind %s -> %s\n",
+ src_widget->widget->name, sink_widget->widget->name);
+
+ header = src_fw_module->man4_module_entry.id;
+ header |= SOF_IPC4_MOD_INSTANCE(src_widget->instance_id);
+ header |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_BIND);
+ header |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ header |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ extension = sink_fw_module->man4_module_entry.id;
+ extension |= SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE(sink_widget->instance_id);
+ extension |= SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID(dst_queue);
+ extension |= SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID(src_queue);
+
+ msg.primary = header;
+ msg.extension = extension;
+
+ ret = sof_ipc_tx_message(sdev->ipc, &msg, 0, NULL, 0);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: failed to bind modules %s -> %s\n",
+ __func__, src_widget->widget->name, sink_widget->widget->name);
+
+ return ret;
+}
+
+static int sof_ipc4_route_free(struct snd_sof_dev *sdev, struct snd_sof_route *sroute)
+{
+ struct snd_sof_widget *src_widget = sroute->src_widget;
+ struct snd_sof_widget *sink_widget = sroute->sink_widget;
+ struct sof_ipc4_fw_module *src_fw_module = src_widget->module_info;
+ struct sof_ipc4_fw_module *sink_fw_module = sink_widget->module_info;
+ struct sof_ipc4_msg msg = {{ 0 }};
+ u32 header, extension;
+ int src_queue = 0;
+ int dst_queue = 0;
+ int ret;
+
+ dev_dbg(sdev->dev, "unbind modules %s -> %s\n",
+ src_widget->widget->name, sink_widget->widget->name);
+
+ header = src_fw_module->man4_module_entry.id;
+ header |= SOF_IPC4_MOD_INSTANCE(src_widget->instance_id);
+ header |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_UNBIND);
+ header |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ header |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+
+ extension = sink_fw_module->man4_module_entry.id;
+ extension |= SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE(sink_widget->instance_id);
+ extension |= SOF_IPC4_MOD_EXT_DST_MOD_QUEUE_ID(dst_queue);
+ extension |= SOF_IPC4_MOD_EXT_SRC_MOD_QUEUE_ID(src_queue);
+
+ msg.primary = header;
+ msg.extension = extension;
+
+ ret = sof_ipc_tx_message(sdev->ipc, &msg, 0, NULL, 0);
+ if (ret < 0)
+ dev_err(sdev->dev, "failed to unbind modules %s -> %s\n",
+ src_widget->widget->name, sink_widget->widget->name);
+
+ return ret;
+}
+
+static int sof_ipc4_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget,
+ unsigned int flags, struct snd_sof_dai_config_data *data)
+{
+ struct snd_sof_widget *pipe_widget = swidget->pipe_widget;
+ struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
+ struct snd_sof_dai *dai = swidget->private;
+ struct sof_ipc4_gtw_attributes *gtw_attr;
+ struct sof_ipc4_copier_data *copier_data;
+ struct sof_ipc4_copier *ipc4_copier;
+
+ if (!dai || !dai->private) {
+ dev_err(sdev->dev, "Invalid DAI or DAI private data for %s\n",
+ swidget->widget->name);
+ return -EINVAL;
+ }
+
+ ipc4_copier = (struct sof_ipc4_copier *)dai->private;
+ copier_data = &ipc4_copier->data;
+
+ if (!data)
+ return 0;
+
+ switch (ipc4_copier->dai_type) {
+ case SOF_DAI_INTEL_HDA:
+ gtw_attr = ipc4_copier->gtw_attr;
+ gtw_attr->lp_buffer_alloc = pipeline->lp_mode;
+ fallthrough;
+ case SOF_DAI_INTEL_ALH:
+ copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
+ copier_data->gtw_cfg.node_id |= SOF_IPC4_NODE_INDEX(data->dai_data);
+ break;
+ case SOF_DAI_INTEL_DMIC:
+ case SOF_DAI_INTEL_SSP:
+ /* nothing to do for SSP/DMIC */
+ break;
+ default:
+ dev_err(sdev->dev, "%s: unsupported dai type %d\n", __func__,
+ ipc4_copier->dai_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_parse_manifest(struct snd_soc_component *scomp, int index,
+ struct snd_soc_tplg_manifest *man)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ struct sof_manifest_tlv *manifest_tlv;
+ struct sof_manifest *manifest;
+ u32 size = le32_to_cpu(man->priv.size);
+ u8 *man_ptr = man->priv.data;
+ u32 len_check;
+ int i;
+
+ if (!size || size < SOF_IPC4_TPLG_ABI_SIZE) {
+ dev_err(scomp->dev, "%s: Invalid topology ABI size: %u\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ manifest = (struct sof_manifest *)man_ptr;
+
+ dev_info(scomp->dev,
+ "Topology: ABI %d:%d:%d Kernel ABI %u:%u:%u\n",
+ le16_to_cpu(manifest->abi_major), le16_to_cpu(manifest->abi_minor),
+ le16_to_cpu(manifest->abi_patch),
+ SOF_ABI_MAJOR, SOF_ABI_MINOR, SOF_ABI_PATCH);
+
+ /* TODO: Add ABI compatibility check */
+
+ /* no more data after the ABI version */
+ if (size <= SOF_IPC4_TPLG_ABI_SIZE)
+ return 0;
+
+ manifest_tlv = manifest->items;
+ len_check = sizeof(struct sof_manifest);
+ for (i = 0; i < le16_to_cpu(manifest->count); i++) {
+ len_check += sizeof(struct sof_manifest_tlv) + le32_to_cpu(manifest_tlv->size);
+ if (len_check > size)
+ return -EINVAL;
+
+ switch (le32_to_cpu(manifest_tlv->type)) {
+ case SOF_MANIFEST_DATA_TYPE_NHLT:
+ /* no NHLT in BIOS, so use the one from topology manifest */
+ if (ipc4_data->nhlt)
+ break;
+ ipc4_data->nhlt = devm_kmemdup(sdev->dev, manifest_tlv->data,
+ le32_to_cpu(manifest_tlv->size), GFP_KERNEL);
+ if (!ipc4_data->nhlt)
+ return -ENOMEM;
+ break;
+ default:
+ dev_warn(scomp->dev, "Skipping unknown manifest data type %d\n",
+ manifest_tlv->type);
+ break;
+ }
+ man_ptr += sizeof(struct sof_manifest_tlv) + le32_to_cpu(manifest_tlv->size);
+ manifest_tlv = (struct sof_manifest_tlv *)man_ptr;
+ }
+
+ return 0;
+}
+
+static int sof_ipc4_dai_get_clk(struct snd_sof_dev *sdev, struct snd_sof_dai *dai, int clk_type)
+{
+ struct sof_ipc4_copier *ipc4_copier = dai->private;
+ struct snd_soc_tplg_hw_config *hw_config;
+ struct snd_sof_dai_link *slink;
+ bool dai_link_found = false;
+ bool hw_cfg_found = false;
+ int i;
+
+ if (!ipc4_copier)
+ return 0;
+
+ list_for_each_entry(slink, &sdev->dai_link_list, list) {
+ if (!strcmp(slink->link->name, dai->name)) {
+ dai_link_found = true;
+ break;
+ }
+ }
+
+ if (!dai_link_found) {
+ dev_err(sdev->dev, "no DAI link found for DAI %s\n", dai->name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < slink->num_hw_configs; i++) {
+ hw_config = &slink->hw_configs[i];
+ if (dai->current_config == le32_to_cpu(hw_config->id)) {
+ hw_cfg_found = true;
+ break;
+ }
+ }
+
+ if (!hw_cfg_found) {
+ dev_err(sdev->dev, "no matching hw_config found for DAI %s\n", dai->name);
+ return -EINVAL;
+ }
+
+ switch (ipc4_copier->dai_type) {
+ case SOF_DAI_INTEL_SSP:
+ switch (clk_type) {
+ case SOF_DAI_CLK_INTEL_SSP_MCLK:
+ return le32_to_cpu(hw_config->mclk_rate);
+ case SOF_DAI_CLK_INTEL_SSP_BCLK:
+ return le32_to_cpu(hw_config->bclk_rate);
+ default:
+ dev_err(sdev->dev, "Invalid clk type for SSP %d\n", clk_type);
+ break;
+ }
+ break;
+ default:
+ dev_err(sdev->dev, "DAI type %d not supported yet!\n", ipc4_copier->dai_type);
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static enum sof_tokens host_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_AUDIO_FORMAT_BUFFER_SIZE_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_OUT_AUDIO_FORMAT_TOKENS,
+ SOF_COPIER_GATEWAY_CFG_TOKENS,
+ SOF_COPIER_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static enum sof_tokens pipeline_token_list[] = {
+ SOF_SCHED_TOKENS,
+ SOF_PIPELINE_TOKENS,
+};
+
+static enum sof_tokens dai_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_AUDIO_FORMAT_BUFFER_SIZE_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_OUT_AUDIO_FORMAT_TOKENS,
+ SOF_COPIER_GATEWAY_CFG_TOKENS,
+ SOF_COPIER_TOKENS,
+ SOF_DAI_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static enum sof_tokens pga_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_GAIN_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_AUDIO_FORMAT_BUFFER_SIZE_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static enum sof_tokens mixer_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_AUDIO_FORMAT_BUFFER_SIZE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static enum sof_tokens src_token_list[] = {
+ SOF_COMP_TOKENS,
+ SOF_SRC_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_AUDIO_FORMAT_BUFFER_SIZE_TOKENS,
+ SOF_COMP_EXT_TOKENS,
+};
+
+static const struct sof_ipc_tplg_widget_ops tplg_ipc4_widget_ops[SND_SOC_DAPM_TYPE_COUNT] = {
+ [snd_soc_dapm_aif_in] = {sof_ipc4_widget_setup_pcm, sof_ipc4_widget_free_comp_pcm,
+ host_token_list, ARRAY_SIZE(host_token_list), NULL,
+ sof_ipc4_prepare_copier_module,
+ sof_ipc4_unprepare_copier_module},
+ [snd_soc_dapm_aif_out] = {sof_ipc4_widget_setup_pcm, sof_ipc4_widget_free_comp_pcm,
+ host_token_list, ARRAY_SIZE(host_token_list), NULL,
+ sof_ipc4_prepare_copier_module,
+ sof_ipc4_unprepare_copier_module},
+ [snd_soc_dapm_dai_in] = {sof_ipc4_widget_setup_comp_dai, sof_ipc4_widget_free_comp_dai,
+ dai_token_list, ARRAY_SIZE(dai_token_list), NULL,
+ sof_ipc4_prepare_copier_module,
+ sof_ipc4_unprepare_copier_module},
+ [snd_soc_dapm_dai_out] = {sof_ipc4_widget_setup_comp_dai, sof_ipc4_widget_free_comp_dai,
+ dai_token_list, ARRAY_SIZE(dai_token_list), NULL,
+ sof_ipc4_prepare_copier_module,
+ sof_ipc4_unprepare_copier_module},
+ [snd_soc_dapm_scheduler] = {sof_ipc4_widget_setup_comp_pipeline, sof_ipc4_widget_free_comp,
+ pipeline_token_list, ARRAY_SIZE(pipeline_token_list), NULL,
+ NULL, NULL},
+ [snd_soc_dapm_pga] = {sof_ipc4_widget_setup_comp_pga, sof_ipc4_widget_free_comp_pga,
+ pga_token_list, ARRAY_SIZE(pga_token_list), NULL,
+ sof_ipc4_prepare_gain_module,
+ NULL},
+ [snd_soc_dapm_mixer] = {sof_ipc4_widget_setup_comp_mixer, sof_ipc4_widget_free_comp_mixer,
+ mixer_token_list, ARRAY_SIZE(mixer_token_list),
+ NULL, sof_ipc4_prepare_mixer_module,
+ NULL},
+ [snd_soc_dapm_src] = {sof_ipc4_widget_setup_comp_src, sof_ipc4_widget_free_comp_src,
+ src_token_list, ARRAY_SIZE(src_token_list),
+ NULL, sof_ipc4_prepare_src_module,
+ NULL},
+};
+
+const struct sof_ipc_tplg_ops ipc4_tplg_ops = {
+ .widget = tplg_ipc4_widget_ops,
+ .token_list = ipc4_token_list,
+ .control_setup = sof_ipc4_control_setup,
+ .control = &tplg_ipc4_control_ops,
+ .widget_setup = sof_ipc4_widget_setup,
+ .widget_free = sof_ipc4_widget_free,
+ .route_setup = sof_ipc4_route_setup,
+ .route_free = sof_ipc4_route_free,
+ .dai_config = sof_ipc4_dai_config,
+ .parse_manifest = sof_ipc4_parse_manifest,
+ .dai_get_clk = sof_ipc4_dai_get_clk,
+};
diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
new file mode 100644
index 000000000000..0aa87a8add5d
--- /dev/null
+++ b/sound/soc/sof/ipc4-topology.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_IPC4_TOPOLOGY_H__
+#define __INCLUDE_SOUND_SOF_IPC4_TOPOLOGY_H__
+
+#include <sound/sof/ipc4/header.h>
+
+#define SOF_IPC4_FW_PAGE_SIZE BIT(12)
+#define SOF_IPC4_FW_PAGE(x) ((((x) + BIT(12) - 1) & ~(BIT(12) - 1)) >> 12)
+#define SOF_IPC4_FW_ROUNDUP(x) (((x) + BIT(6) - 1) & (~(BIT(6) - 1)))
+
+#define SOF_IPC4_MODULE_LOAD_TYPE GENMASK(3, 0)
+#define SOF_IPC4_MODULE_AUTO_START BIT(4)
+/*
+ * Two module schedule domains in fw :
+ * LL domain - Low latency domain
+ * DP domain - Data processing domain
+ * The LL setting should be equal to !DP setting
+ */
+#define SOF_IPC4_MODULE_LL BIT(5)
+#define SOF_IPC4_MODULE_DP BIT(6)
+#define SOF_IPC4_MODULE_LIB_CODE BIT(7)
+
+#define SOF_IPC4_MODULE_INSTANCE_LIST_ITEM_SIZE 12
+#define SOF_IPC4_PIPELINE_OBJECT_SIZE 448
+#define SOF_IPC4_DATA_QUEUE_OBJECT_SIZE 128
+#define SOF_IPC4_LL_TASK_OBJECT_SIZE 72
+#define SOF_IPC4_DP_TASK_OBJECT_SIZE 104
+#define SOF_IPC4_DP_TASK_LIST_SIZE (12 + 8)
+#define SOF_IPC4_LL_TASK_LIST_ITEM_SIZE 12
+#define SOF_IPC4_FW_MAX_PAGE_COUNT 20
+#define SOF_IPC4_FW_MAX_QUEUE_COUNT 8
+
+/* Node index and mask applicable for host copier and ALH/HDA type DAI copiers */
+#define SOF_IPC4_NODE_INDEX_MASK 0xFF
+#define SOF_IPC4_NODE_INDEX(x) ((x) & SOF_IPC4_NODE_INDEX_MASK)
+#define SOF_IPC4_NODE_TYPE(x) ((x) << 8)
+
+/* Node ID for SSP type DAI copiers */
+#define SOF_IPC4_NODE_INDEX_INTEL_SSP(x) (((x) & 0xf) << 4)
+
+/* Node ID for DMIC type DAI copiers */
+#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) (((x) & 0x7) << 5)
+
+#define SOF_IPC4_GAIN_ALL_CHANNELS_MASK 0xffffffff
+#define SOF_IPC4_VOL_ZERO_DB 0x7fffffff
+
+#define ALH_MAX_NUMBER_OF_GTW 16
+
+/*
+ * The base of multi-gateways. Multi-gateways addressing starts from
+ * ALH_MULTI_GTW_BASE and there are ALH_MULTI_GTW_COUNT multi-sources
+ * and ALH_MULTI_GTW_COUNT multi-sinks available.
+ * Addressing is continuous from ALH_MULTI_GTW_BASE to
+ * ALH_MULTI_GTW_BASE + ALH_MULTI_GTW_COUNT - 1.
+ */
+#define ALH_MULTI_GTW_BASE 0x50
+/* A magic number from FW */
+#define ALH_MULTI_GTW_COUNT 8
+
+/**
+ * struct sof_ipc4_pipeline - pipeline config data
+ * @priority: Priority of this pipeline
+ * @lp_mode: Low power mode
+ * @mem_usage: Memory usage
+ * @state: Pipeline state
+ * @msg: message structure for pipeline
+ */
+struct sof_ipc4_pipeline {
+ uint32_t priority;
+ uint32_t lp_mode;
+ uint32_t mem_usage;
+ int state;
+ struct sof_ipc4_msg msg;
+};
+
+/**
+ * struct sof_ipc4_available_audio_format - Available audio formats
+ * @base_config: Available base config
+ * @out_audio_fmt: Available output audio format
+ * @ref_audio_fmt: Reference audio format to match runtime audio format
+ * @dma_buffer_size: Available Gateway DMA buffer size (in bytes)
+ * @audio_fmt_num: Number of available audio formats
+ */
+struct sof_ipc4_available_audio_format {
+ struct sof_ipc4_base_module_cfg *base_config;
+ struct sof_ipc4_audio_format *out_audio_fmt;
+ struct sof_ipc4_audio_format *ref_audio_fmt;
+ u32 *dma_buffer_size;
+ int audio_fmt_num;
+};
+
+/**
+ * struct sof_copier_gateway_cfg - IPC gateway configuration
+ * @node_id: ID of Gateway Node
+ * @dma_buffer_size: Preferred Gateway DMA buffer size (in bytes)
+ * @config_length: Length of gateway node configuration blob specified in #config_data
+ * config_data: Gateway node configuration blob
+ */
+struct sof_copier_gateway_cfg {
+ uint32_t node_id;
+ uint32_t dma_buffer_size;
+ uint32_t config_length;
+ uint32_t config_data[];
+};
+
+/**
+ * struct sof_ipc4_copier_data - IPC data for copier
+ * @base_config: Base configuration including input audio format
+ * @out_format: Output audio format
+ * @copier_feature_mask: Copier feature mask
+ * @gtw_cfg: Gateway configuration
+ */
+struct sof_ipc4_copier_data {
+ struct sof_ipc4_base_module_cfg base_config;
+ struct sof_ipc4_audio_format out_format;
+ uint32_t copier_feature_mask;
+ struct sof_copier_gateway_cfg gtw_cfg;
+};
+
+/**
+ * struct sof_ipc4_gtw_attributes: Gateway attributes
+ * @lp_buffer_alloc: Gateway data requested in low power memory
+ * @alloc_from_reg_file: Gateway data requested in register file memory
+ * @rsvd: reserved for future use
+ */
+struct sof_ipc4_gtw_attributes {
+ uint32_t lp_buffer_alloc : 1;
+ uint32_t alloc_from_reg_file : 1;
+ uint32_t rsvd : 30;
+};
+
+/** struct sof_ipc4_alh_multi_gtw_cfg: ALH gateway cfg data
+ * @count: Number of streams (valid items in mapping array)
+ * @alh_id: ALH stream id of a single ALH stream aggregated
+ * @channel_mask: Channel mask
+ * @mapping: ALH streams
+ */
+struct sof_ipc4_alh_multi_gtw_cfg {
+ uint32_t count;
+ struct {
+ uint32_t alh_id;
+ uint32_t channel_mask;
+ } mapping[ALH_MAX_NUMBER_OF_GTW];
+} __packed;
+
+/** struct sof_ipc4_alh_configuration_blob: ALH blob
+ * @gw_attr: Gateway attributes
+ * @alh_cfg: ALH configuration data
+ */
+struct sof_ipc4_alh_configuration_blob {
+ struct sof_ipc4_gtw_attributes gw_attr;
+ struct sof_ipc4_alh_multi_gtw_cfg alh_cfg;
+};
+
+/**
+ * struct sof_ipc4_copier - copier config data
+ * @data: IPC copier data
+ * @copier_config: Copier + blob
+ * @ipc_config_size: Size of copier_config
+ * @available_fmt: Available audio format
+ * @frame_fmt: frame format
+ * @msg: message structure for copier
+ * @gtw_attr: Gateway attributes for copier blob
+ * @dai_type: DAI type
+ * @dai_index: DAI index
+ */
+struct sof_ipc4_copier {
+ struct sof_ipc4_copier_data data;
+ u32 *copier_config;
+ uint32_t ipc_config_size;
+ void *ipc_config_data;
+ struct sof_ipc4_available_audio_format available_fmt;
+ u32 frame_fmt;
+ struct sof_ipc4_msg msg;
+ struct sof_ipc4_gtw_attributes *gtw_attr;
+ u32 dai_type;
+ int dai_index;
+};
+
+/**
+ * struct sof_ipc4_ctrl_value_chan: generic channel mapped value data
+ * @channel: Channel ID
+ * @value: gain value
+ */
+struct sof_ipc4_ctrl_value_chan {
+ u32 channel;
+ u32 value;
+};
+
+/**
+ * struct sof_ipc4_control_data - IPC data for kcontrol IO
+ * @msg: message structure for kcontrol IO
+ * @index: pipeline ID
+ * @chanv: channel ID and value array used by volume type controls
+ * @data: data for binary kcontrols
+ */
+struct sof_ipc4_control_data {
+ struct sof_ipc4_msg msg;
+ int index;
+
+ union {
+ struct sof_ipc4_ctrl_value_chan chanv[0];
+ struct sof_abi_hdr data[0];
+ };
+};
+
+/**
+ * struct sof_ipc4_gain_data - IPC gain blob
+ * @channels: Channels
+ * @init_val: Initial value
+ * @curve_type: Curve type
+ * @reserved: reserved for future use
+ * @curve_duration: Curve duration
+ */
+struct sof_ipc4_gain_data {
+ uint32_t channels;
+ uint32_t init_val;
+ uint32_t curve_type;
+ uint32_t reserved;
+ uint32_t curve_duration;
+} __aligned(8);
+
+/**
+ * struct sof_ipc4_gain - gain config data
+ * @base_config: IPC base config data
+ * @data: IPC gain blob
+ * @available_fmt: Available audio format
+ * @msg: message structure for gain
+ */
+struct sof_ipc4_gain {
+ struct sof_ipc4_base_module_cfg base_config;
+ struct sof_ipc4_gain_data data;
+ struct sof_ipc4_available_audio_format available_fmt;
+ struct sof_ipc4_msg msg;
+};
+
+/**
+ * struct sof_ipc4_mixer - mixer config data
+ * @base_config: IPC base config data
+ * @available_fmt: Available audio format
+ * @msg: IPC4 message struct containing header and data info
+ */
+struct sof_ipc4_mixer {
+ struct sof_ipc4_base_module_cfg base_config;
+ struct sof_ipc4_available_audio_format available_fmt;
+ struct sof_ipc4_msg msg;
+};
+
+/**
+ * struct sof_ipc4_src SRC config data
+ * @base_config: IPC base config data
+ * @sink_rate: Output rate for sink module
+ * @available_fmt: Available audio format
+ * @msg: IPC4 message struct containing header and data info
+ */
+struct sof_ipc4_src {
+ struct sof_ipc4_base_module_cfg base_config;
+ uint32_t sink_rate;
+ struct sof_ipc4_available_audio_format available_fmt;
+ struct sof_ipc4_msg msg;
+};
+
+#endif
diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
index 658802c86685..432b812bdf9c 100644
--- a/sound/soc/sof/ipc4.c
+++ b/sound/soc/sof/ipc4.c
@@ -574,7 +574,7 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
data_size = sizeof(struct sof_ipc4_notify_resource_data);
break;
default:
- dev_dbg(sdev->dev, "%s: Unhandled DSP message: %#x|%#x\n", __func__,
+ dev_dbg(sdev->dev, "Unhandled DSP message: %#x|%#x\n",
ipc4_msg->primary, ipc4_msg->extension);
break;
}
@@ -597,10 +597,53 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
}
}
+static int sof_ipc4_set_core_state(struct snd_sof_dev *sdev, int core_idx, bool on)
+{
+ struct sof_ipc4_dx_state_info dx_state;
+ struct sof_ipc4_msg msg;
+
+ dx_state.core_mask = BIT(core_idx);
+ if (on)
+ dx_state.dx_mask = BIT(core_idx);
+ else
+ dx_state.dx_mask = 0;
+
+ msg.primary = SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_MOD_SET_DX);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG);
+ msg.extension = 0;
+ msg.data_ptr = &dx_state;
+ msg.data_size = sizeof(dx_state);
+
+ return sof_ipc4_tx_msg(sdev, &msg, msg.data_size, NULL, 0, false);
+}
+
+/*
+ * The context save callback is used to send a message to the firmware notifying
+ * it that the primary core is going to be turned off, which is used as an
+ * indication to prepare for a full power down, thus preparing for IMR boot
+ * (when supported)
+ *
+ * Note: in IPC4 there is no message used to restore context, thus no context
+ * restore callback is implemented
+ */
+static int sof_ipc4_ctx_save(struct snd_sof_dev *sdev)
+{
+ return sof_ipc4_set_core_state(sdev, SOF_DSP_PRIMARY_CORE, false);
+}
+
+static const struct sof_ipc_pm_ops ipc4_pm_ops = {
+ .ctx_save = sof_ipc4_ctx_save,
+ .set_core_state = sof_ipc4_set_core_state,
+};
+
const struct sof_ipc_ops ipc4_ops = {
.tx_msg = sof_ipc4_tx_msg,
.rx_msg = sof_ipc4_rx_msg,
.set_get_data = sof_ipc4_set_get_data,
.get_reply = sof_ipc4_get_reply,
+ .pm = &ipc4_pm_ops,
.fw_loader = &ipc4_loader_ops,
+ .tplg = &ipc4_tplg_ops,
+ .pcm = &ipc4_pcm_ops,
};
diff --git a/sound/soc/sof/mediatek/Kconfig b/sound/soc/sof/mediatek/Kconfig
index a149dd1b3f44..4a2eddf6009a 100644
--- a/sound/soc/sof/mediatek/Kconfig
+++ b/sound/soc/sof/mediatek/Kconfig
@@ -15,6 +15,7 @@ config SND_SOC_SOF_MTK_COMMON
tristate
select SND_SOC_SOF_OF_DEV
select SND_SOC_SOF
+ select SND_SOC_SOF_IPC3
select SND_SOC_SOF_XTENSA
select SND_SOC_SOF_COMPRESS
help
diff --git a/sound/soc/sof/mediatek/adsp_helper.h b/sound/soc/sof/mediatek/adsp_helper.h
index 4ab998756bbc..d41e904e6614 100644
--- a/sound/soc/sof/mediatek/adsp_helper.h
+++ b/sound/soc/sof/mediatek/adsp_helper.h
@@ -20,6 +20,7 @@ struct mtk_adsp_chip_info {
u32 sramsize;
u32 dramsize;
u32 cfgregsize;
+ u32 shared_size;
void __iomem *va_sram; /* corresponding to pa_sram */
void __iomem *va_dram; /* corresponding to pa_dram */
void __iomem *va_cfgreg;
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186-clk.c b/sound/soc/sof/mediatek/mt8186/mt8186-clk.c
index 22220fd50b62..2df3b7ae1c6f 100644
--- a/sound/soc/sof/mediatek/mt8186/mt8186-clk.c
+++ b/sound/soc/sof/mediatek/mt8186/mt8186-clk.c
@@ -18,8 +18,8 @@
#include "mt8186-clk.h"
static const char *adsp_clks[ADSP_CLK_MAX] = {
- [CLK_TOP_AUDIODSP] = "audiodsp_sel",
- [CLK_TOP_ADSP_BUS] = "adsp_bus_sel",
+ [CLK_TOP_AUDIODSP] = "audiodsp",
+ [CLK_TOP_ADSP_BUS] = "adsp_bus",
};
int mt8186_adsp_init_clock(struct snd_sof_dev *sdev)
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195-clk.c b/sound/soc/sof/mediatek/mt8195/mt8195-clk.c
index 6bcb4b9b00fb..9ef08e43aa38 100644
--- a/sound/soc/sof/mediatek/mt8195/mt8195-clk.c
+++ b/sound/soc/sof/mediatek/mt8195/mt8195-clk.c
@@ -132,6 +132,13 @@ static int adsp_default_clk_init(struct snd_sof_dev *sdev, bool enable)
return ret;
}
+ ret = clk_set_parent(priv->clk[CLK_TOP_AUDIO_H],
+ priv->clk[CLK_TOP_CLK26M]);
+ if (ret) {
+ dev_err(dev, "set audio_h_sel failed %d\n", ret);
+ return ret;
+ }
+
ret = adsp_enable_all_clock(sdev);
if (ret) {
dev_err(dev, "failed to adsp_enable_clock: %d\n", ret);
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195-loader.c b/sound/soc/sof/mediatek/mt8195/mt8195-loader.c
index ed18d6379e92..4be99ff4ebd3 100644
--- a/sound/soc/sof/mediatek/mt8195/mt8195-loader.c
+++ b/sound/soc/sof/mediatek/mt8195/mt8195-loader.c
@@ -21,7 +21,7 @@ void sof_hifixdsp_boot_sequence(struct snd_sof_dev *sdev, u32 boot_addr)
/* pull high StatVectorSel to use AltResetVec (set bit4 to 1) */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
- DSP_RESET_SW, DSP_RESET_SW);
+ STATVECTOR_SEL, STATVECTOR_SEL);
/* toggle DReset & BReset */
/* pull high DReset & BReset */
@@ -29,6 +29,9 @@ void sof_hifixdsp_boot_sequence(struct snd_sof_dev *sdev, u32 boot_addr)
ADSP_BRESET_SW | ADSP_DRESET_SW,
ADSP_BRESET_SW | ADSP_DRESET_SW);
+ /* delay 10 DSP cycles at 26M about 1us by IP vendor's suggestion */
+ udelay(1);
+
/* pull low DReset & BReset */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
ADSP_BRESET_SW | ADSP_DRESET_SW,
@@ -46,11 +49,13 @@ void sof_hifixdsp_boot_sequence(struct snd_sof_dev *sdev, u32 boot_addr)
void sof_hifixdsp_shutdown(struct snd_sof_dev *sdev)
{
- /* Clear to 0 firstly */
- snd_sof_dsp_write(sdev, DSP_REG_BAR, DSP_RESET_SW, 0x0);
-
/* RUN_STALL pull high again to reset */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
ADSP_RUNSTALL, ADSP_RUNSTALL);
+
+ /* pull high DReset & BReset */
+ snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
+ ADSP_BRESET_SW | ADSP_DRESET_SW,
+ ADSP_BRESET_SW | ADSP_DRESET_SW);
}
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c
index 30111ab23bf5..9c146015cd1b 100644
--- a/sound/soc/sof/mediatek/mt8195/mt8195.c
+++ b/sound/soc/sof/mediatek/mt8195/mt8195.c
@@ -145,6 +145,14 @@ static int platform_parse_resource(struct platform_device *pdev, void *data)
dev_dbg(dev, "DMA %pR\n", &res);
+ adsp->pa_shared_dram = (phys_addr_t)res.start;
+ adsp->shared_size = resource_size(&res);
+ if (adsp->pa_shared_dram & DRAM_REMAP_MASK) {
+ dev_err(dev, "adsp shared dma memory(%#x) is not 4K-aligned\n",
+ (u32)adsp->pa_shared_dram);
+ return -EINVAL;
+ }
+
ret = of_reserved_mem_device_init(dev);
if (ret) {
dev_err(dev, "of_reserved_mem_device_init failed\n");
@@ -273,23 +281,18 @@ static int adsp_shared_base_ioremap(struct platform_device *pdev, void *data)
{
struct device *dev = &pdev->dev;
struct mtk_adsp_chip_info *adsp = data;
- u32 shared_size;
/* remap shared-dram base to be non-cachable */
- shared_size = TOTAL_SIZE_SHARED_DRAM_FROM_TAIL;
- adsp->pa_shared_dram = adsp->pa_dram + adsp->dramsize - shared_size;
- if (adsp->va_dram) {
- adsp->shared_dram = adsp->va_dram + DSP_DRAM_SIZE - shared_size;
- } else {
- adsp->shared_dram = devm_ioremap(dev, adsp->pa_shared_dram,
- shared_size);
- if (!adsp->shared_dram) {
- dev_err(dev, "ioremap failed for shared DRAM\n");
- return -ENOMEM;
- }
+ adsp->shared_dram = devm_ioremap(dev, adsp->pa_shared_dram,
+ adsp->shared_size);
+ if (!adsp->shared_dram) {
+ dev_err(dev, "failed to ioremap base %pa size %#x\n",
+ adsp->shared_dram, adsp->shared_size);
+ return -ENOMEM;
}
+
dev_dbg(dev, "shared-dram vbase=%p, phy addr :%pa, size=%#x\n",
- adsp->shared_dram, &adsp->pa_shared_dram, shared_size);
+ adsp->shared_dram, &adsp->pa_shared_dram, adsp->shared_size);
return 0;
}
@@ -361,9 +364,11 @@ static int mt8195_dsp_probe(struct snd_sof_dev *sdev)
goto err_adsp_sram_power_off;
}
- sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap_wc(sdev->dev,
- priv->adsp->pa_dram,
- priv->adsp->dramsize);
+ priv->adsp->va_sram = sdev->bar[SOF_FW_BLK_TYPE_IRAM];
+
+ sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap(sdev->dev,
+ priv->adsp->pa_dram,
+ priv->adsp->dramsize);
if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
dev_err(sdev->dev, "failed to ioremap base %pa size %#x\n",
&priv->adsp->pa_dram, priv->adsp->dramsize);
@@ -438,6 +443,19 @@ static int mt8195_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
{
struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
int ret;
+ u32 reset_sw, dbg_pc;
+
+ /* wait dsp enter idle, timeout is 1 second */
+ ret = snd_sof_dsp_read_poll_timeout(sdev, DSP_REG_BAR,
+ DSP_RESET_SW, reset_sw,
+ ((reset_sw & ADSP_PWAIT) == ADSP_PWAIT),
+ SUSPEND_DSP_IDLE_POLL_INTERVAL_US,
+ SUSPEND_DSP_IDLE_TIMEOUT_US);
+ if (ret < 0) {
+ dbg_pc = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGPC);
+ dev_warn(sdev->dev, "dsp not idle, powering off anyway : swrest %#x, pc %#x, ret %d\n",
+ reset_sw, dbg_pc, ret);
+ }
/* stall and reset dsp */
sof_hifixdsp_shutdown(sdev);
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.h b/sound/soc/sof/mediatek/mt8195/mt8195.h
index 929424182357..7ffd523f936c 100644
--- a/sound/soc/sof/mediatek/mt8195/mt8195.h
+++ b/sound/soc/sof/mediatek/mt8195/mt8195.h
@@ -34,6 +34,7 @@ struct snd_sof_dev;
#define ADSP_DRESET_SW BIT(1)
#define ADSP_RUNSTALL BIT(3)
#define STATVECTOR_SEL BIT(4)
+#define ADSP_PWAIT BIT(16)
#define DSP_PFAULTBUS 0x0028
#define DSP_PFAULTINFO 0x002c
#define DSP_GPR00 0x0030
@@ -153,6 +154,10 @@ struct snd_sof_dev;
#define DRAM_REMAP_SHIFT 12
#define DRAM_REMAP_MASK (BIT(DRAM_REMAP_SHIFT) - 1)
+/* suspend dsp idle check interval and timeout */
+#define SUSPEND_DSP_IDLE_TIMEOUT_US 1000000 /* timeout to wait dsp idle, 1 sec */
+#define SUSPEND_DSP_IDLE_POLL_INTERVAL_US 500 /* 0.5 msec */
+
void sof_hifixdsp_boot_sequence(struct snd_sof_dev *sdev, u32 boot_addr);
void sof_hifixdsp_shutdown(struct snd_sof_dev *sdev);
#endif
diff --git a/sound/soc/sof/ops.h b/sound/soc/sof/ops.h
index b79ae4f66eba..55d43adb6a29 100644
--- a/sound/soc/sof/ops.h
+++ b/sound/soc/sof/ops.h
@@ -29,6 +29,12 @@ static inline int sof_ops_init(struct snd_sof_dev *sdev)
return 0;
}
+static inline void sof_ops_free(struct snd_sof_dev *sdev)
+{
+ if (sdev->pdata->desc->ops_free)
+ sdev->pdata->desc->ops_free(sdev);
+}
+
/* Mandatory operations are verified during probing */
/* init */
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
index a76d0b5b2ad9..6cb6a432be5e 100644
--- a/sound/soc/sof/pcm.c
+++ b/sound/soc/sof/pcm.c
@@ -604,6 +604,14 @@ static int sof_pcm_probe(struct snd_soc_component *component)
const char *tplg_filename;
int ret;
+ /*
+ * make sure the device is pm_runtime_active before loading the
+ * topology and initiating IPC or bus transactions
+ */
+ ret = pm_runtime_resume_and_get(component->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
/* load the default topology */
sdev->component = component;
@@ -621,6 +629,9 @@ static int sof_pcm_probe(struct snd_soc_component *component)
return ret;
}
+ pm_runtime_mark_last_busy(component->dev);
+ pm_runtime_put_autosuspend(component->dev);
+
return ret;
}
@@ -671,4 +682,6 @@ void snd_sof_new_platform_drv(struct snd_sof_dev *sdev)
/* increment module refcount when a pcm is opened */
pd->module_get_upon_open = 1;
+
+ pd->legacy_dai_naming = 1;
}
diff --git a/sound/soc/sof/sof-audio.h b/sound/soc/sof/sof-audio.h
index 27cc5fb642e5..4284ea2f3a1f 100644
--- a/sound/soc/sof/sof-audio.h
+++ b/sound/soc/sof/sof-audio.h
@@ -168,6 +168,7 @@ struct sof_ipc_tplg_widget_ops {
* @dai_get_clk: Function pointer for getting the DAI clock setting
* @set_up_all_pipelines: Function pointer for setting up all topology pipelines
* @tear_down_all_pipelines: Function pointer for tearing down all topology pipelines
+ * @parse_manifest: Optional function pointer for ipc4 specific parsing of topology manifest
*/
struct sof_ipc_tplg_ops {
const struct sof_ipc_tplg_widget_ops *widget;
@@ -185,6 +186,8 @@ struct sof_ipc_tplg_ops {
int (*dai_get_clk)(struct snd_sof_dev *sdev, struct snd_sof_dai *dai, int clk_type);
int (*set_up_all_pipelines)(struct snd_sof_dev *sdev, bool verify);
int (*tear_down_all_pipelines)(struct snd_sof_dev *sdev, bool verify);
+ int (*parse_manifest)(struct snd_soc_component *scomp, int index,
+ struct snd_soc_tplg_manifest *man);
};
/** struct snd_sof_tuple - Tuple info
@@ -225,6 +228,15 @@ enum sof_tokens {
SOF_AFE_TOKENS,
SOF_CORE_TOKENS,
SOF_COMP_EXT_TOKENS,
+ SOF_IN_AUDIO_FORMAT_TOKENS,
+ SOF_OUT_AUDIO_FORMAT_TOKENS,
+ SOF_AUDIO_FORMAT_BUFFER_SIZE_TOKENS,
+ SOF_COPIER_GATEWAY_CFG_TOKENS,
+ SOF_COPIER_TOKENS,
+ SOF_AUDIO_FMT_NUM_TOKENS,
+ SOF_COPIER_FORMAT_TOKENS,
+ SOF_GAIN_TOKENS,
+ SOF_ACPDMIC_TOKENS,
/* this should be the last */
SOF_TOKEN_COUNT,
diff --git a/sound/soc/sof/sof-client-ipc-msg-injector.c b/sound/soc/sof/sof-client-ipc-msg-injector.c
index 6bdfa527b7f7..752d5320680f 100644
--- a/sound/soc/sof/sof-client-ipc-msg-injector.c
+++ b/sound/soc/sof/sof-client-ipc-msg-injector.c
@@ -181,7 +181,7 @@ static ssize_t sof_msg_inject_ipc4_dfs_write(struct file *file,
struct sof_client_dev *cdev = file->private_data;
struct sof_msg_inject_priv *priv = cdev->data;
struct sof_ipc4_msg *ipc4_msg = priv->tx_buffer;
- ssize_t size;
+ size_t data_size;
int ret;
if (*ppos)
@@ -191,25 +191,20 @@ static ssize_t sof_msg_inject_ipc4_dfs_write(struct file *file,
return -EINVAL;
/* copy the header first */
- size = simple_write_to_buffer(&ipc4_msg->header_u64,
- sizeof(ipc4_msg->header_u64),
- ppos, buffer, count);
- if (size < 0)
- return size;
- if (size != sizeof(ipc4_msg->header_u64))
+ if (copy_from_user(&ipc4_msg->header_u64, buffer,
+ sizeof(ipc4_msg->header_u64)))
return -EFAULT;
- count -= size;
+ data_size = count - sizeof(ipc4_msg->header_u64);
+ if (data_size > priv->max_msg_size)
+ return -EINVAL;
+
/* Copy the payload */
- size = simple_write_to_buffer(ipc4_msg->data_ptr,
- priv->max_msg_size, ppos, buffer,
- count);
- if (size < 0)
- return size;
- if (size != count)
+ if (copy_from_user(ipc4_msg->data_ptr,
+ buffer + sizeof(ipc4_msg->header_u64), data_size))
return -EFAULT;
- ipc4_msg->data_size = count;
+ ipc4_msg->data_size = data_size;
/* Initialize the reply storage */
ipc4_msg = priv->rx_buffer;
@@ -221,9 +216,9 @@ static ssize_t sof_msg_inject_ipc4_dfs_write(struct file *file,
/* return the error code if test failed */
if (ret < 0)
- size = ret;
+ return ret;
- return size;
+ return count;
};
static int sof_msg_inject_dfs_release(struct inode *inode, struct file *file)
diff --git a/sound/soc/sof/sof-client-probes.c b/sound/soc/sof/sof-client-probes.c
index 34e6bd356e71..eb246b823461 100644
--- a/sound/soc/sof/sof-client-probes.c
+++ b/sound/soc/sof/sof-client-probes.c
@@ -270,9 +270,9 @@ static int sof_probes_compr_startup(struct snd_compr_stream *cstream,
if (ret)
return ret;
- ret = ops->assign(cdev, cstream, dai, &priv->extractor_stream_tag);
+ ret = ops->startup(cdev, cstream, dai, &priv->extractor_stream_tag);
if (ret) {
- dev_err(dai->dev, "Failed to assign probe stream: %d\n", ret);
+ dev_err(dai->dev, "Failed to startup probe stream: %d\n", ret);
priv->extractor_stream_tag = SOF_PROBES_INVALID_NODE_ID;
sof_client_core_module_put(cdev);
}
@@ -310,7 +310,7 @@ exit:
priv->extractor_stream_tag = SOF_PROBES_INVALID_NODE_ID;
snd_compr_free_pages(cstream);
- ret = ops->free(cdev, cstream, dai);
+ ret = ops->shutdown(cdev, cstream, dai);
sof_client_core_module_put(cdev);
@@ -667,6 +667,7 @@ static const struct snd_soc_component_driver sof_probes_component = {
.name = "sof-probes-component",
.compress_ops = &sof_probes_compressed_ops,
.module_get_upon_open = 1,
+ .legacy_dai_naming = 1,
};
SND_SOC_DAILINK_DEF(dummy, DAILINK_COMP_ARRAY(COMP_DUMMY()));
@@ -693,6 +694,10 @@ static int sof_probes_client_probe(struct auxiliary_device *auxdev,
if (!sof_probes_enabled)
return -ENXIO;
+ /* only ipc3 is supported */
+ if (sof_client_get_ipc_type(cdev) != SOF_IPC)
+ return -ENXIO;
+
if (!dev->platform_data) {
dev_err(dev, "missing platform data\n");
return -ENODEV;
@@ -704,7 +709,7 @@ static int sof_probes_client_probe(struct auxiliary_device *auxdev,
ops = dev->platform_data;
- if (!ops->assign || !ops->free || !ops->set_params || !ops->trigger ||
+ if (!ops->startup || !ops->shutdown || !ops->set_params || !ops->trigger ||
!ops->pointer) {
dev_err(dev, "missing platform callback(s)\n");
return -ENODEV;
diff --git a/sound/soc/sof/sof-client-probes.h b/sound/soc/sof/sof-client-probes.h
index 0f9ed4569fd3..9e43f3c444f8 100644
--- a/sound/soc/sof/sof-client-probes.h
+++ b/sound/soc/sof/sof-client-probes.h
@@ -14,10 +14,10 @@ struct snd_soc_dai;
* DSP and host, like HDA.
*/
struct sof_probes_host_ops {
- int (*assign)(struct sof_client_dev *cdev, struct snd_compr_stream *cstream,
- struct snd_soc_dai *dai, u32 *stream_id);
- int (*free)(struct sof_client_dev *cdev, struct snd_compr_stream *cstream,
- struct snd_soc_dai *dai);
+ int (*startup)(struct sof_client_dev *cdev, struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai, u32 *stream_id);
+ int (*shutdown)(struct sof_client_dev *cdev, struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai);
int (*set_params)(struct sof_client_dev *cdev, struct snd_compr_stream *cstream,
struct snd_compr_params *params,
struct snd_soc_dai *dai);
diff --git a/sound/soc/sof/sof-client.c b/sound/soc/sof/sof-client.c
index 16cca666bb85..125aa2137195 100644
--- a/sound/soc/sof/sof-client.c
+++ b/sound/soc/sof/sof-client.c
@@ -383,8 +383,8 @@ void sof_client_ipc_rx_dispatcher(struct snd_sof_dev *sdev, void *msg_buf)
msg_type = SOF_IPC4_NOTIFICATION_TYPE_GET(msg->primary);
} else {
- dev_dbg_once(sdev->dev, "%s: Not supported IPC version: %d\n",
- __func__, sdev->pdata->ipc_type);
+ dev_dbg_once(sdev->dev, "Not supported IPC version: %d\n",
+ sdev->pdata->ipc_type);
return;
}
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
index f0f3d72c0da7..823583086279 100644
--- a/sound/soc/sof/sof-priv.h
+++ b/sound/soc/sof/sof-priv.h
@@ -37,6 +37,12 @@
#define SOF_DBG_IGNORE_D3_PERSISTENT BIT(7) /* ignore the DSP D3 persistent capability
* and always download firmware upon D3 exit
*/
+#define SOF_DBG_PRINT_DMA_POSITION_UPDATE_LOGS BIT(8) /* print DMA position updates
+ * in dmesg logs
+ */
+#define SOF_DBG_PRINT_IPC_SUCCESS_LOGS BIT(9) /* print IPC success
+ * in dmesg logs
+ */
/* Flag definitions used for controlling the DSP dump behavior */
#define SOF_DBG_DUMP_REGS BIT(0)
@@ -120,6 +126,7 @@ struct snd_sof_platform_stream_params {
bool use_phy_address;
u32 phy_addr;
bool no_ipc_position;
+ bool cont_update_posn;
};
/*
@@ -378,12 +385,14 @@ struct sof_ipc_fw_tracing_ops {
/**
* struct sof_ipc_pm_ops - IPC-specific PM ops
- * @ctx_save: Function pointer for context save
- * @ctx_restore: Function pointer for context restore
+ * @ctx_save: Optional function pointer for context save
+ * @ctx_restore: Optional function pointer for context restore
+ * @set_core_state: Optional function pointer for turning on/off a DSP core
*/
struct sof_ipc_pm_ops {
int (*ctx_save)(struct snd_sof_dev *sdev);
int (*ctx_restore)(struct snd_sof_dev *sdev);
+ int (*set_core_state)(struct snd_sof_dev *sdev, int core_idx, bool on);
};
/**
@@ -657,7 +666,7 @@ void sof_print_oops_and_stack(struct snd_sof_dev *sdev, const char *level,
u32 panic_code, u32 tracep_code, void *oops,
struct sof_ipc_panic_info *panic_info,
void *stack, size_t stack_words);
-void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev);
+void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev, const char *msg);
int snd_sof_dbg_memory_info_init(struct snd_sof_dev *sdev);
int snd_sof_debugfs_add_region_item_iomem(struct snd_sof_dev *sdev,
enum snd_sof_fw_blk_type blk_type, u32 offset, size_t size,
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index b1fcab7ce48e..9273a70fec25 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -36,9 +36,6 @@
#define TLV_STEP 1
#define TLV_MUTE 2
-/* size of tplg abi in byte */
-#define SOF_TPLG_ABI_SIZE 3
-
/**
* sof_update_ipc_object - Parse multiple sets of tokens within the token array associated with the
* token ID.
@@ -1141,6 +1138,21 @@ static int spcm_bind(struct snd_soc_component *scomp, struct snd_sof_pcm *spcm,
return 0;
}
+static int sof_get_token_value(u32 token_id, struct snd_sof_tuple *tuples, int num_tuples)
+{
+ int i;
+
+ if (!tuples)
+ return -EINVAL;
+
+ for (i = 0; i < num_tuples; i++) {
+ if (tuples[i].token == token_id)
+ return tuples[i].value.v;
+ }
+
+ return -EINVAL;
+}
+
static int sof_widget_parse_tokens(struct snd_soc_component *scomp, struct snd_sof_widget *swidget,
struct snd_soc_tplg_dapm_widget *tw,
enum sof_tokens *object_token_list, int count)
@@ -1168,6 +1180,8 @@ static int sof_widget_parse_tokens(struct snd_soc_component *scomp, struct snd_s
/* parse token list for widget */
for (i = 0; i < count; i++) {
+ int num_sets = 1;
+
if (object_token_list[i] >= SOF_TOKEN_COUNT) {
dev_err(scomp->dev, "Invalid token id %d for widget %s\n",
object_token_list[i], swidget->widget->name);
@@ -1175,8 +1189,9 @@ static int sof_widget_parse_tokens(struct snd_soc_component *scomp, struct snd_s
goto err;
}
- /* parse and save UUID in swidget */
- if (object_token_list[i] == SOF_COMP_EXT_TOKENS) {
+ switch (object_token_list[i]) {
+ case SOF_COMP_EXT_TOKENS:
+ /* parse and save UUID in swidget */
ret = sof_parse_tokens(scomp, swidget,
token_list[object_token_list[i]].tokens,
token_list[object_token_list[i]].count,
@@ -1189,11 +1204,41 @@ static int sof_widget_parse_tokens(struct snd_soc_component *scomp, struct snd_s
}
continue;
+ case SOF_IN_AUDIO_FORMAT_TOKENS:
+ case SOF_OUT_AUDIO_FORMAT_TOKENS:
+ case SOF_COPIER_GATEWAY_CFG_TOKENS:
+ case SOF_AUDIO_FORMAT_BUFFER_SIZE_TOKENS:
+ num_sets = sof_get_token_value(SOF_TKN_COMP_NUM_AUDIO_FORMATS,
+ swidget->tuples, swidget->num_tuples);
+
+ if (num_sets < 0) {
+ dev_err(sdev->dev, "Invalid audio format count for %s\n",
+ swidget->widget->name);
+ ret = num_sets;
+ goto err;
+ }
+
+ if (num_sets > 1) {
+ struct snd_sof_tuple *new_tuples;
+
+ num_tuples += token_list[object_token_list[i]].count * num_sets;
+ new_tuples = krealloc(swidget->tuples,
+ sizeof(*new_tuples) * num_tuples, GFP_KERNEL);
+ if (!new_tuples) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ swidget->tuples = new_tuples;
+ }
+ break;
+ default:
+ break;
}
/* copy one set of tuples per token ID into swidget->tuples */
ret = sof_copy_tuples(sdev, private->array, le32_to_cpu(private->size),
- object_token_list[i], 1, swidget->tuples,
+ object_token_list[i], num_sets, swidget->tuples,
num_tuples, &swidget->num_tuples);
if (ret < 0) {
dev_err(scomp->dev, "Failed parsing %s for widget %s err: %d\n",
@@ -1208,21 +1253,6 @@ err:
return ret;
}
-static int sof_get_token_value(u32 token_id, struct snd_sof_tuple *tuples, int num_tuples)
-{
- int i;
-
- if (!tuples)
- return -EINVAL;
-
- for (i = 0; i < num_tuples; i++) {
- if (tuples[i].token == token_id)
- return tuples[i].value.v;
- }
-
- return -EINVAL;
-}
-
/* external widget init - used for any driver specific init */
static int sof_widget_ready(struct snd_soc_component *scomp, int index,
struct snd_soc_dapm_widget *w,
@@ -1389,7 +1419,6 @@ static int sof_widget_unload(struct snd_soc_component *scomp,
struct soc_bytes_ext *sbe;
struct snd_sof_dai *dai;
struct soc_enum *se;
- int ret = 0;
int i;
swidget = dobj->private;
@@ -1450,7 +1479,7 @@ out:
list_del(&swidget->list);
kfree(swidget);
- return ret;
+ return 0;
}
/*
@@ -1709,6 +1738,10 @@ static int sof_link_load(struct snd_soc_component *scomp, int index, struct snd_
token_id = SOF_AFE_TOKENS;
num_tuples += token_list[SOF_AFE_TOKENS].count;
break;
+ case SOF_DAI_AMD_DMIC:
+ token_id = SOF_ACPDMIC_TOKENS;
+ num_tuples += token_list[SOF_ACPDMIC_TOKENS].count;
+ break;
default:
break;
}
@@ -1987,45 +2020,11 @@ static int sof_complete(struct snd_soc_component *scomp)
static int sof_manifest(struct snd_soc_component *scomp, int index,
struct snd_soc_tplg_manifest *man)
{
- u32 size;
- u32 abi_version;
-
- size = le32_to_cpu(man->priv.size);
-
- /* backward compatible with tplg without ABI info */
- if (!size) {
- dev_dbg(scomp->dev, "No topology ABI info\n");
- return 0;
- }
-
- if (size != SOF_TPLG_ABI_SIZE) {
- dev_err(scomp->dev, "error: invalid topology ABI size\n");
- return -EINVAL;
- }
-
- dev_info(scomp->dev,
- "Topology: ABI %d:%d:%d Kernel ABI %d:%d:%d\n",
- man->priv.data[0], man->priv.data[1],
- man->priv.data[2], SOF_ABI_MAJOR, SOF_ABI_MINOR,
- SOF_ABI_PATCH);
-
- abi_version = SOF_ABI_VER(man->priv.data[0],
- man->priv.data[1],
- man->priv.data[2]);
-
- if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION, abi_version)) {
- dev_err(scomp->dev, "error: incompatible topology ABI version\n");
- return -EINVAL;
- }
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ const struct sof_ipc_tplg_ops *ipc_tplg_ops = sdev->ipc->ops->tplg;
- if (SOF_ABI_VERSION_MINOR(abi_version) > SOF_ABI_MINOR) {
- if (!IS_ENABLED(CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS)) {
- dev_warn(scomp->dev, "warn: topology ABI is more recent than kernel\n");
- } else {
- dev_err(scomp->dev, "error: topology ABI is more recent than kernel\n");
- return -EINVAL;
- }
- }
+ if (ipc_tplg_ops->parse_manifest)
+ return ipc_tplg_ops->parse_manifest(scomp, index, man);
return 0;
}
diff --git a/sound/soc/spear/spdif_in.c b/sound/soc/spear/spdif_in.c
index 4b68d6ee75da..4ad8b1fc713a 100644
--- a/sound/soc/spear/spdif_in.c
+++ b/sound/soc/spear/spdif_in.c
@@ -172,7 +172,8 @@ static struct snd_soc_dai_driver spdif_in_dai = {
};
static const struct snd_soc_component_driver spdif_in_component = {
- .name = "spdif-in",
+ .name = "spdif-in",
+ .legacy_dai_naming = 1,
};
static irqreturn_t spdif_in_irq(int irq, void *arg)
diff --git a/sound/soc/spear/spdif_out.c b/sound/soc/spear/spdif_out.c
index 549295a6ed50..fb107c5790ad 100644
--- a/sound/soc/spear/spdif_out.c
+++ b/sound/soc/spear/spdif_out.c
@@ -273,7 +273,8 @@ static struct snd_soc_dai_driver spdif_out_dai = {
};
static const struct snd_soc_component_driver spdif_out_component = {
- .name = "spdif-out",
+ .name = "spdif-out",
+ .legacy_dai_naming = 1,
};
static int spdif_out_probe(struct platform_device *pdev)
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
index 34668fe3909d..a4d74d1e3c24 100644
--- a/sound/soc/sti/sti_uniperif.c
+++ b/sound/soc/sti/sti_uniperif.c
@@ -376,7 +376,8 @@ static const struct snd_soc_dai_driver sti_uniperiph_dai_template = {
static const struct snd_soc_component_driver sti_uniperiph_dai_component = {
.name = "sti_cpu_dai",
.suspend = sti_uniperiph_suspend,
- .resume = sti_uniperiph_resume
+ .resume = sti_uniperiph_resume,
+ .legacy_dai_naming = 1,
};
static int sti_uniperiph_cpu_dai_of(struct device_node *node,
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
index 6ee714542b84..04f2912e1418 100644
--- a/sound/soc/stm/stm32_adfsdm.c
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -149,6 +149,7 @@ static const struct snd_soc_dai_driver stm32_adfsdm_dai = {
static const struct snd_soc_component_driver stm32_adfsdm_dai_component = {
.name = "stm32_dfsdm_audio",
+ .legacy_dai_naming = 1,
};
static void stm32_memcpy_32to16(void *dest, const void *src, size_t n)
@@ -296,7 +297,7 @@ static int stm32_adfsdm_pcm_new(struct snd_soc_component *component,
static int stm32_adfsdm_dummy_cb(const void *data, void *private)
{
/*
- * This dummmy callback is requested by iio_channel_get_all_cb() API,
+ * This dummy callback is requested by iio_channel_get_all_cb() API,
* but the stm32_dfsdm_get_buff_cb() API is used instead, to optimize
* DMA transfers.
*/
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
index ac5dff4d1677..6aafe793eec4 100644
--- a/sound/soc/stm/stm32_i2s.c
+++ b/sound/soc/stm/stm32_i2s.c
@@ -593,16 +593,16 @@ static int stm32_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
}
/* DAI clock master masks */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
i2s->ms_flg = I2S_MS_SLAVE;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
i2s->ms_flg = I2S_MS_MASTER;
break;
default:
dev_err(cpu_dai->dev, "Unsupported mode %#x\n",
- fmt & SND_SOC_DAIFMT_MASTER_MASK);
+ fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK);
return -EINVAL;
}
@@ -978,6 +978,7 @@ static const struct snd_dmaengine_pcm_config stm32_i2s_pcm_config = {
static const struct snd_soc_component_driver stm32_i2s_component = {
.name = "stm32-i2s",
+ .legacy_dai_naming = 1,
};
static void stm32_i2s_dai_init(struct snd_soc_pcm_stream *stream,
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index dd636af81c9b..eb31b49e6597 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -45,8 +45,6 @@
#define STM_SAI_B_ID 0x1
#define STM_SAI_IS_SUB_A(x) ((x)->id == STM_SAI_A_ID)
-#define STM_SAI_IS_SUB_B(x) ((x)->id == STM_SAI_B_ID)
-#define STM_SAI_BLOCK_NAME(x) (((x)->id == STM_SAI_A_ID) ? "A" : "B")
#define SAI_SYNC_NONE 0x0
#define SAI_SYNC_INTERNAL 0x1
@@ -719,18 +717,18 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr);
/* DAI clock master masks */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
/* codec is master */
cr1 |= SAI_XCR1_SLAVE;
sai->master = false;
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
sai->master = true;
break;
default:
dev_err(cpu_dai->dev, "Unsupported mode %#x\n",
- fmt & SND_SOC_DAIFMT_MASTER_MASK);
+ fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK);
return -EINVAL;
}
@@ -1338,6 +1336,7 @@ static const struct snd_dmaengine_pcm_config stm32_sai_pcm_config_spdif = {
static const struct snd_soc_component_driver stm32_component = {
.name = "stm32-sai",
+ .legacy_dai_naming = 1,
};
static const struct of_device_id stm32_sai_sub_ids[] = {
diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
index 6f7882c4fe6a..0f7146756717 100644
--- a/sound/soc/stm/stm32_spdifrx.c
+++ b/sound/soc/stm/stm32_spdifrx.c
@@ -888,6 +888,7 @@ static const struct snd_pcm_hardware stm32_spdifrx_pcm_hw = {
static const struct snd_soc_component_driver stm32_spdifrx_component = {
.name = "stm32-spdifrx",
+ .legacy_dai_naming = 1,
};
static const struct snd_dmaengine_pcm_config stm32_spdifrx_pcm_config = {
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 60712f24ade5..830beb38bf15 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -250,37 +250,33 @@ struct sun4i_codec {
static void sun4i_codec_start_playback(struct sun4i_codec *scodec)
{
/* Flush TX FIFO */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
- BIT(SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH),
- BIT(SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH));
+ regmap_set_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
+ BIT(SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH));
/* Enable DAC DRQ */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
- BIT(SUN4I_CODEC_DAC_FIFOC_DAC_DRQ_EN),
- BIT(SUN4I_CODEC_DAC_FIFOC_DAC_DRQ_EN));
+ regmap_set_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
+ BIT(SUN4I_CODEC_DAC_FIFOC_DAC_DRQ_EN));
}
static void sun4i_codec_stop_playback(struct sun4i_codec *scodec)
{
/* Disable DAC DRQ */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
- BIT(SUN4I_CODEC_DAC_FIFOC_DAC_DRQ_EN),
- 0);
+ regmap_clear_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
+ BIT(SUN4I_CODEC_DAC_FIFOC_DAC_DRQ_EN));
}
static void sun4i_codec_start_capture(struct sun4i_codec *scodec)
{
/* Enable ADC DRQ */
- regmap_field_update_bits(scodec->reg_adc_fifoc,
- BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN),
- BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN));
+ regmap_field_set_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN));
}
static void sun4i_codec_stop_capture(struct sun4i_codec *scodec)
{
/* Disable ADC DRQ */
- regmap_field_update_bits(scodec->reg_adc_fifoc,
- BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN), 0);
+ regmap_field_clear_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN));
}
static int sun4i_codec_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -323,8 +319,7 @@ static int sun4i_codec_prepare_capture(struct snd_pcm_substream *substream,
/* Flush RX FIFO */
- regmap_field_update_bits(scodec->reg_adc_fifoc,
- BIT(SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH),
+ regmap_field_set_bits(scodec->reg_adc_fifoc,
BIT(SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH));
@@ -365,8 +360,7 @@ static int sun4i_codec_prepare_playback(struct snd_pcm_substream *substream,
u32 val;
/* Flush the TX FIFO */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
- BIT(SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH),
+ regmap_set_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
BIT(SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH));
/* Set TX FIFO Empty Trigger Level */
@@ -386,9 +380,8 @@ static int sun4i_codec_prepare_playback(struct snd_pcm_substream *substream,
val);
/* Send zeros when we have an underrun */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
- BIT(SUN4I_CODEC_DAC_FIFOC_SEND_LASAT),
- 0);
+ regmap_clear_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
+ BIT(SUN4I_CODEC_DAC_FIFOC_SEND_LASAT));
return 0;
};
@@ -485,33 +478,27 @@ static int sun4i_codec_hw_params_capture(struct sun4i_codec *scodec,
/* Set the number of channels we want to use */
if (params_channels(params) == 1)
- regmap_field_update_bits(scodec->reg_adc_fifoc,
- BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN),
+ regmap_field_set_bits(scodec->reg_adc_fifoc,
BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN));
else
- regmap_field_update_bits(scodec->reg_adc_fifoc,
- BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN),
- 0);
+ regmap_field_clear_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN));
/* Set the number of sample bits to either 16 or 24 bits */
if (hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min == 32) {
- regmap_field_update_bits(scodec->reg_adc_fifoc,
- BIT(SUN4I_CODEC_ADC_FIFOC_RX_SAMPLE_BITS),
+ regmap_field_set_bits(scodec->reg_adc_fifoc,
BIT(SUN4I_CODEC_ADC_FIFOC_RX_SAMPLE_BITS));
- regmap_field_update_bits(scodec->reg_adc_fifoc,
- BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE),
- 0);
+ regmap_field_clear_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE));
scodec->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
} else {
- regmap_field_update_bits(scodec->reg_adc_fifoc,
- BIT(SUN4I_CODEC_ADC_FIFOC_RX_SAMPLE_BITS),
- 0);
+ regmap_field_clear_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_RX_SAMPLE_BITS));
/* Fill most significant bits with valid data MSB */
- regmap_field_update_bits(scodec->reg_adc_fifoc,
- BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE),
+ regmap_field_set_bits(scodec->reg_adc_fifoc,
BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE));
scodec->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
@@ -543,24 +530,20 @@ static int sun4i_codec_hw_params_playback(struct sun4i_codec *scodec,
/* Set the number of sample bits to either 16 or 24 bits */
if (hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min == 32) {
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
- BIT(SUN4I_CODEC_DAC_FIFOC_TX_SAMPLE_BITS),
+ regmap_set_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
BIT(SUN4I_CODEC_DAC_FIFOC_TX_SAMPLE_BITS));
/* Set TX FIFO mode to padding the LSBs with 0 */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
- BIT(SUN4I_CODEC_DAC_FIFOC_TX_FIFO_MODE),
- 0);
+ regmap_clear_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
+ BIT(SUN4I_CODEC_DAC_FIFOC_TX_FIFO_MODE));
scodec->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
} else {
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
- BIT(SUN4I_CODEC_DAC_FIFOC_TX_SAMPLE_BITS),
- 0);
+ regmap_clear_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
+ BIT(SUN4I_CODEC_DAC_FIFOC_TX_SAMPLE_BITS));
/* Set TX FIFO mode to repeat the MSB */
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
- BIT(SUN4I_CODEC_DAC_FIFOC_TX_FIFO_MODE),
+ regmap_set_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
BIT(SUN4I_CODEC_DAC_FIFOC_TX_FIFO_MODE));
scodec->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
@@ -624,8 +607,7 @@ static int sun4i_codec_startup(struct snd_pcm_substream *substream,
* Stop issuing DRQ when we have room for less than 16 samples
* in our TX FIFO
*/
- regmap_update_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
- 3 << SUN4I_CODEC_DAC_FIFOC_DRQ_CLR_CNT,
+ regmap_set_bits(scodec->regmap, SUN4I_CODEC_DAC_FIFOC,
3 << SUN4I_CODEC_DAC_FIFOC_DRQ_CLR_CNT);
return clk_prepare_enable(scodec->clk_module);
@@ -899,7 +881,6 @@ static const struct snd_soc_component_driver sun4i_codec_codec = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_component_driver sun7i_codec_codec = {
@@ -912,7 +893,6 @@ static const struct snd_soc_component_driver sun7i_codec_codec = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
/*** sun6i Codec ***/
@@ -1220,7 +1200,6 @@ static const struct snd_soc_component_driver sun6i_codec_codec = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
/* sun8i A23 codec */
@@ -1248,11 +1227,11 @@ static const struct snd_soc_component_driver sun8i_a23_codec_codec = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct snd_soc_component_driver sun4i_codec_component = {
- .name = "sun4i-codec",
+ .name = "sun4i-codec",
+ .legacy_dai_naming = 1,
};
#define SUN4I_CODEC_RATES SNDRV_PCM_RATE_CONTINUOUS
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 7047f71629ab..6028871825ba 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -161,6 +161,8 @@ struct sun4i_i2s;
* @field_clkdiv_mclk_en: regmap field to enable mclk output.
* @field_fmt_wss: regmap field to set word select size.
* @field_fmt_sr: regmap field to set sample resolution.
+ * @num_din_pins: input pins
+ * @num_dout_pins: output pins (currently set but unused)
* @bclk_dividers: bit clock dividers array
* @num_bclk_dividers: number of bit clock dividers
* @mclk_dividers: mclk dividers array
@@ -702,13 +704,13 @@ static int sun4i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
SUN4I_I2S_FMT0_FMT_MASK, val);
/* DAI clock master masks */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
/* BCLK and LRCLK master */
val = SUN4I_I2S_CTRL_MODE_MASTER;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
/* BCLK and LRCLK slave */
val = SUN4I_I2S_CTRL_MODE_SLAVE;
break;
@@ -802,13 +804,13 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
SUN8I_I2S_TX_CHAN_OFFSET(offset));
/* DAI clock master masks */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
/* BCLK and LRCLK master */
val = SUN8I_I2S_CTRL_BCLK_OUT | SUN8I_I2S_CTRL_LRCK_OUT;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
/* BCLK and LRCLK slave */
val = 0;
break;
@@ -909,13 +911,13 @@ static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET(offset));
/* DAI clock master masks */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
/* BCLK and LRCLK master */
val = SUN8I_I2S_CTRL_BCLK_OUT | SUN8I_I2S_CTRL_LRCK_OUT;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
/* BCLK and LRCLK slave */
val = 0;
break;
@@ -1123,7 +1125,8 @@ static struct snd_soc_dai_driver sun4i_i2s_dai = {
};
static const struct snd_soc_component_driver sun4i_i2s_component = {
- .name = "sun4i-dai",
+ .name = "sun4i-dai",
+ .legacy_dai_naming = 1,
};
static bool sun4i_i2s_rd_reg(struct device *dev, unsigned int reg)
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index 17090f43150e..bcceebca915a 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -583,7 +583,8 @@ static const struct of_device_id sun4i_spdif_of_match[] = {
MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match);
static const struct snd_soc_component_driver sun4i_spdif_component = {
- .name = "sun4i-spdif",
+ .name = "sun4i-spdif",
+ .legacy_dai_naming = 1,
};
static int sun4i_spdif_runtime_suspend(struct device *dev)
diff --git a/sound/soc/sunxi/sun50i-codec-analog.c b/sound/soc/sunxi/sun50i-codec-analog.c
index a41e25ad0aaf..e1e5e8de0130 100644
--- a/sound/soc/sunxi/sun50i-codec-analog.c
+++ b/sound/soc/sunxi/sun50i-codec-analog.c
@@ -117,6 +117,7 @@
#define SUN50I_ADDA_HS_MBIAS_CTRL_MMICBIASEN 7
#define SUN50I_ADDA_JACK_MIC_CTRL 0x1d
+#define SUN50I_ADDA_JACK_MIC_CTRL_INNERRESEN 6
#define SUN50I_ADDA_JACK_MIC_CTRL_HMICBIASEN 5
/* mixer controls */
@@ -507,6 +508,7 @@ static int sun50i_codec_analog_probe(struct platform_device *pdev)
{
struct regmap *regmap;
void __iomem *base;
+ bool enable;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
@@ -520,6 +522,12 @@ static int sun50i_codec_analog_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
+ enable = device_property_read_bool(&pdev->dev,
+ "allwinner,internal-bias-resistor");
+ regmap_update_bits(regmap, SUN50I_ADDA_JACK_MIC_CTRL,
+ BIT(SUN50I_ADDA_JACK_MIC_CTRL_INNERRESEN),
+ enable << SUN50I_ADDA_JACK_MIC_CTRL_INNERRESEN);
+
return devm_snd_soc_register_component(&pdev->dev,
&sun50i_codec_analog_cmpnt_drv,
NULL, 0);
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index 0bea2162f68d..9844978d91e6 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -286,11 +286,11 @@ static int sun8i_codec_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
u32 dsp_format, format, invert, value;
/* clock masters */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS: /* Codec slave, DAI master */
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_CBC_CFC: /* Codec slave, DAI master */
value = 0x1;
break;
- case SND_SOC_DAIFMT_CBM_CFM: /* Codec Master, DAI slave */
+ case SND_SOC_DAIFMT_CBP_CFP: /* Codec Master, DAI slave */
value = 0x0;
break;
default:
@@ -1278,7 +1278,6 @@ static const struct snd_soc_component_driver sun8i_soc_component = {
.probe = sun8i_codec_component_probe,
.idle_bias_on = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static const struct regmap_config sun8i_codec_regmap_config = {
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
index 2482d9867357..b6712a3d1fa1 100644
--- a/sound/soc/tegra/Kconfig
+++ b/sound/soc/tegra/Kconfig
@@ -85,6 +85,15 @@ config SND_SOC_TEGRA210_I2S
compatible devices.
Say Y or M if you want to add support for Tegra210 I2S module.
+config SND_SOC_TEGRA210_OPE
+ tristate "Tegra210 OPE module"
+ help
+ Config to enable the Output Processing Engine (OPE) which includes
+ Parametric Equalizer (PEQ) and Multi Band Dynamic Range Compressor
+ (MBDRC) sub blocks for data processing. It can support up to 8
+ channels.
+ Say Y or M if you want to add support for Tegra210 OPE module.
+
config SND_SOC_TEGRA186_ASRC
tristate "Tegra186 ASRC module"
help
diff --git a/sound/soc/tegra/Makefile b/sound/soc/tegra/Makefile
index 70a498ddb2fa..b723c78e665d 100644
--- a/sound/soc/tegra/Makefile
+++ b/sound/soc/tegra/Makefile
@@ -19,6 +19,7 @@ snd-soc-tegra210-sfc-objs := tegra210_sfc.o
snd-soc-tegra210-amx-objs := tegra210_amx.o
snd-soc-tegra210-adx-objs := tegra210_adx.o
snd-soc-tegra210-mixer-objs := tegra210_mixer.o
+snd-soc-tegra210-ope-objs := tegra210_ope.o tegra210_mbdrc.o tegra210_peq.o
obj-$(CONFIG_SND_SOC_TEGRA) += snd-soc-tegra-pcm.o
obj-$(CONFIG_SND_SOC_TEGRA20_AC97) += snd-soc-tegra20-ac97.o
@@ -38,6 +39,7 @@ obj-$(CONFIG_SND_SOC_TEGRA210_SFC) += snd-soc-tegra210-sfc.o
obj-$(CONFIG_SND_SOC_TEGRA210_AMX) += snd-soc-tegra210-amx.o
obj-$(CONFIG_SND_SOC_TEGRA210_ADX) += snd-soc-tegra210-adx.o
obj-$(CONFIG_SND_SOC_TEGRA210_MIXER) += snd-soc-tegra210-mixer.o
+obj-$(CONFIG_SND_SOC_TEGRA210_OPE) += snd-soc-tegra210-ope.o
# Tegra machine Support
snd-soc-tegra-wm8903-objs := tegra_wm8903.o
diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c
index c454a34c15c4..87facfbcdd11 100644
--- a/sound/soc/tegra/tegra20_ac97.c
+++ b/sound/soc/tegra/tegra20_ac97.c
@@ -239,7 +239,8 @@ static struct snd_soc_dai_driver tegra20_ac97_dai = {
};
static const struct snd_soc_component_driver tegra20_ac97_component = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .legacy_dai_naming = 1,
};
static bool tegra20_ac97_wr_rd_reg(struct device *dev, unsigned int reg)
@@ -353,6 +354,7 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
}
} else {
dev_err(&pdev->dev, "no codec-reset GPIO supplied\n");
+ ret = -EINVAL;
goto err_clk_put;
}
@@ -360,6 +362,7 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
"nvidia,codec-sync-gpio", 0);
if (!gpio_is_valid(ac97->sync_gpio)) {
dev_err(&pdev->dev, "no codec-sync GPIO supplied\n");
+ ret = -EINVAL;
goto err_clk_put;
}
diff --git a/sound/soc/tegra/tegra20_das.c b/sound/soc/tegra/tegra20_das.c
index 69c651274c12..c620ab0c601f 100644
--- a/sound/soc/tegra/tegra20_das.c
+++ b/sound/soc/tegra/tegra20_das.c
@@ -13,84 +13,119 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/soc.h>
-#include "tegra20_das.h"
#define DRV_NAME "tegra20-das"
-static struct tegra20_das *das;
+/* Register TEGRA20_DAS_DAP_CTRL_SEL */
+#define TEGRA20_DAS_DAP_CTRL_SEL 0x00
+#define TEGRA20_DAS_DAP_CTRL_SEL_COUNT 5
+#define TEGRA20_DAS_DAP_CTRL_SEL_STRIDE 4
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_MS_SEL_P 31
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_MS_SEL_S 1
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_P 30
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_S 1
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_P 29
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_S 1
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P 0
+#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_S 5
+
+/* Values for field TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL */
+#define TEGRA20_DAS_DAP_SEL_DAC1 0
+#define TEGRA20_DAS_DAP_SEL_DAC2 1
+#define TEGRA20_DAS_DAP_SEL_DAC3 2
+#define TEGRA20_DAS_DAP_SEL_DAP1 16
+#define TEGRA20_DAS_DAP_SEL_DAP2 17
+#define TEGRA20_DAS_DAP_SEL_DAP3 18
+#define TEGRA20_DAS_DAP_SEL_DAP4 19
+#define TEGRA20_DAS_DAP_SEL_DAP5 20
+
+/* Register TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL */
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL 0x40
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_COUNT 3
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE 4
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_P 28
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_S 4
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_P 24
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_S 4
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_P 0
+#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_S 4
-static inline void tegra20_das_write(u32 reg, u32 val)
-{
- regmap_write(das->regmap, reg, val);
-}
-
-static inline u32 tegra20_das_read(u32 reg)
-{
- u32 val;
+/*
+ * Values for:
+ * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL
+ * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL
+ * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL
+ */
+#define TEGRA20_DAS_DAC_SEL_DAP1 0
+#define TEGRA20_DAS_DAC_SEL_DAP2 1
+#define TEGRA20_DAS_DAC_SEL_DAP3 2
+#define TEGRA20_DAS_DAC_SEL_DAP4 3
+#define TEGRA20_DAS_DAC_SEL_DAP5 4
- regmap_read(das->regmap, reg, &val);
- return val;
-}
+/*
+ * Names/IDs of the DACs/DAPs.
+ */
-int tegra20_das_connect_dap_to_dac(int dap, int dac)
-{
- u32 addr;
- u32 reg;
+#define TEGRA20_DAS_DAP_ID_1 0
+#define TEGRA20_DAS_DAP_ID_2 1
+#define TEGRA20_DAS_DAP_ID_3 2
+#define TEGRA20_DAS_DAP_ID_4 3
+#define TEGRA20_DAS_DAP_ID_5 4
- if (!das)
- return -ENODEV;
+#define TEGRA20_DAS_DAC_ID_1 0
+#define TEGRA20_DAS_DAC_ID_2 1
+#define TEGRA20_DAS_DAC_ID_3 2
- addr = TEGRA20_DAS_DAP_CTRL_SEL +
- (dap * TEGRA20_DAS_DAP_CTRL_SEL_STRIDE);
- reg = dac << TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P;
+struct tegra20_das {
+ struct regmap *regmap;
+};
- tegra20_das_write(addr, reg);
+/*
+ * Terminology:
+ * DAS: Digital audio switch (HW module controlled by this driver)
+ * DAP: Digital audio port (port/pins on Tegra device)
+ * DAC: Digital audio controller (e.g. I2S or AC97 controller elsewhere)
+ *
+ * The Tegra DAS is a mux/cross-bar which can connect each DAP to a specific
+ * DAC, or another DAP. When DAPs are connected, one must be the master and
+ * one the slave. Each DAC allows selection of a specific DAP for input, to
+ * cater for the case where N DAPs are connected to 1 DAC for broadcast
+ * output.
+ *
+ * This driver is dumb; no attempt is made to ensure that a valid routing
+ * configuration is programmed.
+ */
- return 0;
+static inline void tegra20_das_write(struct tegra20_das *das, u32 reg, u32 val)
+{
+ regmap_write(das->regmap, reg, val);
}
-EXPORT_SYMBOL_GPL(tegra20_das_connect_dap_to_dac);
-int tegra20_das_connect_dap_to_dap(int dap, int otherdap, int master,
- int sdata1rx, int sdata2rx)
+static void tegra20_das_connect_dap_to_dac(struct tegra20_das *das, int dap, int dac)
{
u32 addr;
u32 reg;
- if (!das)
- return -ENODEV;
-
addr = TEGRA20_DAS_DAP_CTRL_SEL +
(dap * TEGRA20_DAS_DAP_CTRL_SEL_STRIDE);
- reg = (otherdap << TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P) |
- (!!sdata2rx << TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_P) |
- (!!sdata1rx << TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_P) |
- (!!master << TEGRA20_DAS_DAP_CTRL_SEL_DAP_MS_SEL_P);
-
- tegra20_das_write(addr, reg);
+ reg = dac << TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P;
- return 0;
+ tegra20_das_write(das, addr, reg);
}
-EXPORT_SYMBOL_GPL(tegra20_das_connect_dap_to_dap);
-int tegra20_das_connect_dac_to_dap(int dac, int dap)
+static void tegra20_das_connect_dac_to_dap(struct tegra20_das *das, int dac, int dap)
{
u32 addr;
u32 reg;
- if (!das)
- return -ENODEV;
-
addr = TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL +
(dac * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE);
reg = dap << TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_P |
dap << TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_P |
dap << TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_P;
- tegra20_das_write(addr, reg);
-
- return 0;
+ tegra20_das_write(das, addr, reg);
}
-EXPORT_SYMBOL_GPL(tegra20_das_connect_dac_to_dap);
#define LAST_REG(name) \
(TEGRA20_DAS_##name + \
@@ -120,73 +155,31 @@ static const struct regmap_config tegra20_das_regmap_config = {
static int tegra20_das_probe(struct platform_device *pdev)
{
void __iomem *regs;
- int ret = 0;
-
- if (das)
- return -ENODEV;
+ struct tegra20_das *das;
das = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_das), GFP_KERNEL);
- if (!das) {
- ret = -ENOMEM;
- goto err;
- }
- das->dev = &pdev->dev;
+ if (!das)
+ return -ENOMEM;
regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(regs)) {
- ret = PTR_ERR(regs);
- goto err;
- }
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
das->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
&tegra20_das_regmap_config);
if (IS_ERR(das->regmap)) {
dev_err(&pdev->dev, "regmap init failed\n");
- ret = PTR_ERR(das->regmap);
- goto err;
- }
-
- ret = tegra20_das_connect_dap_to_dac(TEGRA20_DAS_DAP_ID_1,
- TEGRA20_DAS_DAP_SEL_DAC1);
- if (ret) {
- dev_err(&pdev->dev, "Can't set up DAS DAP connection\n");
- goto err;
- }
- ret = tegra20_das_connect_dac_to_dap(TEGRA20_DAS_DAC_ID_1,
- TEGRA20_DAS_DAC_SEL_DAP1);
- if (ret) {
- dev_err(&pdev->dev, "Can't set up DAS DAC connection\n");
- goto err;
- }
-
- ret = tegra20_das_connect_dap_to_dac(TEGRA20_DAS_DAP_ID_3,
- TEGRA20_DAS_DAP_SEL_DAC3);
- if (ret) {
- dev_err(&pdev->dev, "Can't set up DAS DAP connection\n");
- goto err;
+ return PTR_ERR(das->regmap);
}
- ret = tegra20_das_connect_dac_to_dap(TEGRA20_DAS_DAC_ID_3,
- TEGRA20_DAS_DAC_SEL_DAP3);
- if (ret) {
- dev_err(&pdev->dev, "Can't set up DAS DAC connection\n");
- goto err;
- }
-
- platform_set_drvdata(pdev, das);
-
- return 0;
-
-err:
- das = NULL;
- return ret;
-}
-
-static int tegra20_das_remove(struct platform_device *pdev)
-{
- if (!das)
- return -ENODEV;
- das = NULL;
+ tegra20_das_connect_dap_to_dac(das, TEGRA20_DAS_DAP_ID_1,
+ TEGRA20_DAS_DAP_SEL_DAC1);
+ tegra20_das_connect_dac_to_dap(das, TEGRA20_DAS_DAC_ID_1,
+ TEGRA20_DAS_DAC_SEL_DAP1);
+ tegra20_das_connect_dap_to_dac(das, TEGRA20_DAS_DAP_ID_3,
+ TEGRA20_DAS_DAP_SEL_DAC3);
+ tegra20_das_connect_dac_to_dap(das, TEGRA20_DAS_DAC_ID_3,
+ TEGRA20_DAS_DAC_SEL_DAP3);
return 0;
}
@@ -198,7 +191,6 @@ static const struct of_device_id tegra20_das_of_match[] = {
static struct platform_driver tegra20_das_driver = {
.probe = tegra20_das_probe,
- .remove = tegra20_das_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = tegra20_das_of_match,
diff --git a/sound/soc/tegra/tegra20_das.h b/sound/soc/tegra/tegra20_das.h
deleted file mode 100644
index 18e832ded73a..000000000000
--- a/sound/soc/tegra/tegra20_das.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * tegra20_das.h - Definitions for Tegra20 DAS driver
- *
- * Author: Stephen Warren <swarren@nvidia.com>
- * Copyright (C) 2010,2012 - NVIDIA, Inc.
- */
-
-#ifndef __TEGRA20_DAS_H__
-#define __TEGRA20_DAS_H__
-
-/* Register TEGRA20_DAS_DAP_CTRL_SEL */
-#define TEGRA20_DAS_DAP_CTRL_SEL 0x00
-#define TEGRA20_DAS_DAP_CTRL_SEL_COUNT 5
-#define TEGRA20_DAS_DAP_CTRL_SEL_STRIDE 4
-#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_MS_SEL_P 31
-#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_MS_SEL_S 1
-#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_P 30
-#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA1_TX_RX_S 1
-#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_P 29
-#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_SDATA2_TX_RX_S 1
-#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_P 0
-#define TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL_S 5
-
-/* Values for field TEGRA20_DAS_DAP_CTRL_SEL_DAP_CTRL_SEL */
-#define TEGRA20_DAS_DAP_SEL_DAC1 0
-#define TEGRA20_DAS_DAP_SEL_DAC2 1
-#define TEGRA20_DAS_DAP_SEL_DAC3 2
-#define TEGRA20_DAS_DAP_SEL_DAP1 16
-#define TEGRA20_DAS_DAP_SEL_DAP2 17
-#define TEGRA20_DAS_DAP_SEL_DAP3 18
-#define TEGRA20_DAS_DAP_SEL_DAP4 19
-#define TEGRA20_DAS_DAP_SEL_DAP5 20
-
-/* Register TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL */
-#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL 0x40
-#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_COUNT 3
-#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_STRIDE 4
-#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_P 28
-#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL_S 4
-#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_P 24
-#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL_S 4
-#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_P 0
-#define TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL_S 4
-
-/*
- * Values for:
- * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA2_SEL
- * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_SDATA1_SEL
- * TEGRA20_DAS_DAC_INPUT_DATA_CLK_SEL_DAC_CLK_SEL
- */
-#define TEGRA20_DAS_DAC_SEL_DAP1 0
-#define TEGRA20_DAS_DAC_SEL_DAP2 1
-#define TEGRA20_DAS_DAC_SEL_DAP3 2
-#define TEGRA20_DAS_DAC_SEL_DAP4 3
-#define TEGRA20_DAS_DAC_SEL_DAP5 4
-
-/*
- * Names/IDs of the DACs/DAPs.
- */
-
-#define TEGRA20_DAS_DAP_ID_1 0
-#define TEGRA20_DAS_DAP_ID_2 1
-#define TEGRA20_DAS_DAP_ID_3 2
-#define TEGRA20_DAS_DAP_ID_4 3
-#define TEGRA20_DAS_DAP_ID_5 4
-
-#define TEGRA20_DAS_DAC_ID_1 0
-#define TEGRA20_DAS_DAC_ID_2 1
-#define TEGRA20_DAS_DAC_ID_3 2
-
-struct tegra20_das {
- struct device *dev;
- struct regmap *regmap;
-};
-
-/*
- * Terminology:
- * DAS: Digital audio switch (HW module controlled by this driver)
- * DAP: Digital audio port (port/pins on Tegra device)
- * DAC: Digital audio controller (e.g. I2S or AC97 controller elsewhere)
- *
- * The Tegra DAS is a mux/cross-bar which can connect each DAP to a specific
- * DAC, or another DAP. When DAPs are connected, one must be the master and
- * one the slave. Each DAC allows selection of a specific DAP for input, to
- * cater for the case where N DAPs are connected to 1 DAC for broadcast
- * output.
- *
- * This driver is dumb; no attempt is made to ensure that a valid routing
- * configuration is programmed.
- */
-
-/*
- * Connect a DAP to a DAC
- * dap_id: DAP to connect: TEGRA20_DAS_DAP_ID_*
- * dac_sel: DAC to connect to: TEGRA20_DAS_DAP_SEL_DAC*
- */
-extern int tegra20_das_connect_dap_to_dac(int dap, int dac);
-
-/*
- * Connect a DAP to another DAP
- * dap_id: DAP to connect: TEGRA20_DAS_DAP_ID_*
- * other_dap_sel: DAP to connect to: TEGRA20_DAS_DAP_SEL_DAP*
- * master: Is this DAP the master (1) or slave (0)
- * sdata1rx: Is this DAP's SDATA1 pin RX (1) or TX (0)
- * sdata2rx: Is this DAP's SDATA2 pin RX (1) or TX (0)
- */
-extern int tegra20_das_connect_dap_to_dap(int dap, int otherdap,
- int master, int sdata1rx,
- int sdata2rx);
-
-/*
- * Connect a DAC's input to a DAP
- * (DAC outputs are selected by the DAP)
- * dac_id: DAC ID to connect: TEGRA20_DAS_DAC_ID_*
- * dap_sel: DAP to receive input from: TEGRA20_DAS_DAC_SEL_DAP*
- */
-extern int tegra20_das_connect_dac_to_dap(int dac, int dap);
-
-#endif
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 27365a877e47..fff0cd6588f5 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -95,11 +95,11 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
}
mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
val |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
break;
default:
return -EINVAL;
@@ -338,7 +338,8 @@ static const struct snd_soc_dai_driver tegra20_i2s_dai_template = {
};
static const struct snd_soc_component_driver tegra20_i2s_component = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .legacy_dai_naming = 1,
};
static bool tegra20_i2s_wr_rd_reg(struct device *dev, unsigned int reg)
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 64c2f304f254..ca7b222e07d0 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -264,6 +264,7 @@ static struct snd_soc_dai_driver tegra20_spdif_dai = {
static const struct snd_soc_component_driver tegra20_spdif_component = {
.name = "tegra20-spdif",
+ .legacy_dai_naming = 1,
};
static bool tegra20_spdif_wr_rd_reg(struct device *dev, unsigned int reg)
diff --git a/sound/soc/tegra/tegra210_adx.c b/sound/soc/tegra/tegra210_adx.c
index 3785cade2d9a..49691d2cce50 100644
--- a/sound/soc/tegra/tegra210_adx.c
+++ b/sound/soc/tegra/tegra210_adx.c
@@ -191,7 +191,7 @@ static int tegra210_adx_put_byte_map(struct snd_kcontrol *kcontrol,
unsigned char *bytes_map = (unsigned char *)&adx->map;
int value = ucontrol->value.integer.value[0];
struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;;
+ (struct soc_mixer_control *)kcontrol->private_value;
if (value == bytes_map[mc->reg])
return 0;
diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c
index e1f90daea7a1..b38d205b69cc 100644
--- a/sound/soc/tegra/tegra210_ahub.c
+++ b/sound/soc/tegra/tegra210_ahub.c
@@ -170,6 +170,11 @@ static struct snd_soc_dai_driver tegra210_ahub_dais[] = {
DAI(MIXER1 TX3),
DAI(MIXER1 TX4),
DAI(MIXER1 TX5),
+ /* XBAR -> OPE -> XBAR */
+ DAI(OPE1 RX),
+ DAI(OPE1 TX),
+ DAI(OPE2 RX),
+ DAI(OPE2 TX),
};
static struct snd_soc_dai_driver tegra186_ahub_dais[] = {
@@ -294,6 +299,9 @@ static struct snd_soc_dai_driver tegra186_ahub_dais[] = {
DAI(ASRC1 RX6),
DAI(ASRC1 TX6),
DAI(ASRC1 RX7),
+ /* XBAR -> OPE -> XBAR */
+ DAI(OPE1 RX),
+ DAI(OPE1 TX),
};
static const char * const tegra210_ahub_mux_texts[] = {
@@ -337,6 +345,8 @@ static const char * const tegra210_ahub_mux_texts[] = {
"MIXER1 TX3",
"MIXER1 TX4",
"MIXER1 TX5",
+ "OPE1",
+ "OPE2",
};
static const char * const tegra186_ahub_mux_texts[] = {
@@ -408,6 +418,7 @@ static const char * const tegra186_ahub_mux_texts[] = {
"ASRC1 TX4",
"ASRC1 TX5",
"ASRC1 TX6",
+ "OPE1",
};
static const unsigned int tegra210_ahub_mux_values[] = {
@@ -459,6 +470,9 @@ static const unsigned int tegra210_ahub_mux_values[] = {
MUX_VALUE(1, 2),
MUX_VALUE(1, 3),
MUX_VALUE(1, 4),
+ /* OPE */
+ MUX_VALUE(2, 0),
+ MUX_VALUE(2, 1),
};
static const unsigned int tegra186_ahub_mux_values[] = {
@@ -540,6 +554,8 @@ static const unsigned int tegra186_ahub_mux_values[] = {
MUX_VALUE(3, 27),
MUX_VALUE(3, 28),
MUX_VALUE(3, 29),
+ /* OPE */
+ MUX_VALUE(2, 0),
};
/* Controls for t210 */
@@ -584,6 +600,8 @@ MUX_ENUM_CTRL_DECL(t210_mixer17_tx, 0x26);
MUX_ENUM_CTRL_DECL(t210_mixer18_tx, 0x27);
MUX_ENUM_CTRL_DECL(t210_mixer19_tx, 0x28);
MUX_ENUM_CTRL_DECL(t210_mixer110_tx, 0x29);
+MUX_ENUM_CTRL_DECL(t210_ope1_tx, 0x40);
+MUX_ENUM_CTRL_DECL(t210_ope2_tx, 0x41);
/* Controls for t186 */
MUX_ENUM_CTRL_DECL_186(t186_admaif1_tx, 0x00);
@@ -657,6 +675,7 @@ MUX_ENUM_CTRL_DECL_186(t186_asrc14_tx, 0x6f);
MUX_ENUM_CTRL_DECL_186(t186_asrc15_tx, 0x70);
MUX_ENUM_CTRL_DECL_186(t186_asrc16_tx, 0x71);
MUX_ENUM_CTRL_DECL_186(t186_asrc17_tx, 0x72);
+MUX_ENUM_CTRL_DECL_186(t186_ope1_tx, 0x40);
/* Controls for t234 */
MUX_ENUM_CTRL_DECL_234(t234_mvc1_tx, 0x44);
@@ -758,6 +777,8 @@ static const struct snd_soc_dapm_widget tegra210_ahub_widgets[] = {
TX_WIDGETS("MIXER1 TX3"),
TX_WIDGETS("MIXER1 TX4"),
TX_WIDGETS("MIXER1 TX5"),
+ WIDGETS("OPE1", t210_ope1_tx),
+ WIDGETS("OPE2", t210_ope2_tx),
};
static const struct snd_soc_dapm_widget tegra186_ahub_widgets[] = {
@@ -867,6 +888,7 @@ static const struct snd_soc_dapm_widget tegra186_ahub_widgets[] = {
TX_WIDGETS("ASRC1 TX4"),
TX_WIDGETS("ASRC1 TX5"),
TX_WIDGETS("ASRC1 TX6"),
+ WIDGETS("OPE1", t186_ope1_tx),
};
static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = {
@@ -976,6 +998,7 @@ static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = {
TX_WIDGETS("ASRC1 TX4"),
TX_WIDGETS("ASRC1 TX5"),
TX_WIDGETS("ASRC1 TX6"),
+ WIDGETS("OPE1", t186_ope1_tx),
};
#define TEGRA_COMMON_MUX_ROUTES(name) \
@@ -1018,7 +1041,11 @@ static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = {
{ name " Mux", "MIXER1 TX2", "MIXER1 TX2 XBAR-RX" }, \
{ name " Mux", "MIXER1 TX3", "MIXER1 TX3 XBAR-RX" }, \
{ name " Mux", "MIXER1 TX4", "MIXER1 TX4 XBAR-RX" }, \
- { name " Mux", "MIXER1 TX5", "MIXER1 TX5 XBAR-RX" },
+ { name " Mux", "MIXER1 TX5", "MIXER1 TX5 XBAR-RX" }, \
+ { name " Mux", "OPE1", "OPE1 XBAR-RX" },
+
+#define TEGRA210_ONLY_MUX_ROUTES(name) \
+ { name " Mux", "OPE2", "OPE2 XBAR-RX" },
#define TEGRA186_ONLY_MUX_ROUTES(name) \
{ name " Mux", "ADMAIF11", "ADMAIF11 XBAR-RX" }, \
@@ -1050,10 +1077,11 @@ static const struct snd_soc_dapm_widget tegra234_ahub_widgets[] = {
{ name " Mux", "ASRC1 TX5", "ASRC1 TX5 XBAR-RX" }, \
{ name " Mux", "ASRC1 TX6", "ASRC1 TX6 XBAR-RX" },
-#define TEGRA210_MUX_ROUTES(name) \
- TEGRA_COMMON_MUX_ROUTES(name)
+#define TEGRA210_MUX_ROUTES(name) \
+ TEGRA_COMMON_MUX_ROUTES(name) \
+ TEGRA210_ONLY_MUX_ROUTES(name)
-#define TEGRA186_MUX_ROUTES(name) \
+#define TEGRA186_MUX_ROUTES(name) \
TEGRA_COMMON_MUX_ROUTES(name) \
TEGRA186_ONLY_MUX_ROUTES(name)
@@ -1121,6 +1149,8 @@ static const struct snd_soc_dapm_route tegra210_ahub_routes[] = {
TEGRA210_MUX_ROUTES("MIXER1 RX8")
TEGRA210_MUX_ROUTES("MIXER1 RX9")
TEGRA210_MUX_ROUTES("MIXER1 RX10")
+ TEGRA210_MUX_ROUTES("OPE1")
+ TEGRA210_MUX_ROUTES("OPE2")
};
static const struct snd_soc_dapm_route tegra186_ahub_routes[] = {
@@ -1215,6 +1245,7 @@ static const struct snd_soc_dapm_route tegra186_ahub_routes[] = {
TEGRA186_MUX_ROUTES("ASRC1 RX5")
TEGRA186_MUX_ROUTES("ASRC1 RX6")
TEGRA186_MUX_ROUTES("ASRC1 RX7")
+ TEGRA186_MUX_ROUTES("OPE1")
};
static const struct snd_soc_component_driver tegra210_ahub_component = {
diff --git a/sound/soc/tegra/tegra210_i2s.c b/sound/soc/tegra/tegra210_i2s.c
index 9552bbb939dd..39ffa4d76b59 100644
--- a/sound/soc/tegra/tegra210_i2s.c
+++ b/sound/soc/tegra/tegra210_i2s.c
@@ -214,11 +214,11 @@ static int tegra210_i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int mask, val;
mask = I2S_CTRL_MASTER_EN_MASK;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
val = 0;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BP_FP:
val = I2S_CTRL_MASTER_EN;
break;
default:
@@ -803,7 +803,6 @@ static const struct snd_soc_component_driver tegra210_i2s_cmpnt = {
.num_dapm_routes = ARRAY_SIZE(tegra210_i2s_routes),
.controls = tegra210_i2s_controls,
.num_controls = ARRAY_SIZE(tegra210_i2s_controls),
- .non_legacy_dai_naming = 1,
};
static bool tegra210_i2s_wr_reg(struct device *dev, unsigned int reg)
diff --git a/sound/soc/tegra/tegra210_mbdrc.c b/sound/soc/tegra/tegra210_mbdrc.c
new file mode 100644
index 000000000000..d786daa6aba6
--- /dev/null
+++ b/sound/soc/tegra/tegra210_mbdrc.c
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// tegra210_mbdrc.c - Tegra210 MBDRC driver
+//
+// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "tegra210_mbdrc.h"
+#include "tegra210_ope.h"
+
+#define MBDRC_FILTER_REG(reg, id) \
+ ((reg) + ((id) * TEGRA210_MBDRC_FILTER_PARAM_STRIDE))
+
+#define MBDRC_FILTER_REG_DEFAULTS(id) \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_IIR_CFG, id), 0x00000005}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_IN_ATTACK, id), 0x3e48590c}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_IN_RELEASE, id), 0x08414e9f}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_FAST_ATTACK, id), 0x7fffffff}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_IN_THRESHOLD, id), 0x06145082}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_OUT_THRESHOLD, id), 0x060d379b}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_RATIO_1ST, id), 0x0000a000}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_RATIO_2ND, id), 0x00002000}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_RATIO_3RD, id), 0x00000b33}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_RATIO_4TH, id), 0x00000800}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_RATIO_5TH, id), 0x0000019a}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_MAKEUP_GAIN, id), 0x00000002}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_INIT_GAIN, id), 0x00066666}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_GAIN_ATTACK, id), 0x00d9ba0e}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_GAIN_RELEASE, id), 0x3e48590c}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_FAST_RELEASE, id), 0x7ffff26a}, \
+ { MBDRC_FILTER_REG(TEGRA210_MBDRC_CFG_RAM_CTRL, id), 0x4000}
+
+static const struct reg_default tegra210_mbdrc_reg_defaults[] = {
+ { TEGRA210_MBDRC_CFG, 0x0030de51},
+ { TEGRA210_MBDRC_CHANNEL_MASK, 0x00000003},
+ { TEGRA210_MBDRC_FAST_FACTOR, 0x30000800},
+
+ MBDRC_FILTER_REG_DEFAULTS(0),
+ MBDRC_FILTER_REG_DEFAULTS(1),
+ MBDRC_FILTER_REG_DEFAULTS(2),
+};
+
+/* Default MBDRC parameters */
+static const struct tegra210_mbdrc_config mbdrc_init_config = {
+ .mode = 0, /* Bypass */
+ .rms_off = 48,
+ .peak_rms_mode = 1, /* PEAK */
+ .fliter_structure = 0, /* All-pass tree */
+ .shift_ctrl = 30,
+ .frame_size = 32,
+ .channel_mask = 0x3,
+ .fa_factor = 2048,
+ .fr_factor = 14747,
+
+ .band_params[MBDRC_LOW_BAND] = {
+ .band = MBDRC_LOW_BAND,
+ .iir_stages = 5,
+ .in_attack_tc = 1044928780,
+ .in_release_tc = 138497695,
+ .fast_attack_tc = 2147483647,
+ .in_threshold = {130, 80, 20, 6},
+ .out_threshold = {155, 55, 13, 6},
+ .ratio = {40960, 8192, 2867, 2048, 410},
+ .makeup_gain = 4,
+ .gain_init = 419430,
+ .gain_attack_tc = 14268942,
+ .gain_release_tc = 1440547090,
+ .fast_release_tc = 2147480170,
+
+ .biquad_params = {
+ /*
+ * Gains:
+ *
+ * b0, b1, a0,
+ * a1, a2,
+ */
+
+ /* Band-0 */
+ 961046798, -2030431983, 1073741824,
+ 2030431983, -961046798,
+ /* Band-1 */
+ 1030244425, -2099481453, 1073741824,
+ 2099481453, -1030244425,
+ /* Band-2 */
+ 1067169294, -2136327263, 1073741824,
+ 2136327263, -1067169294,
+ /* Band-3 */
+ 434951949, -1306567134, 1073741824,
+ 1306567134, -434951949,
+ /* Band-4 */
+ 780656019, -1605955641, 1073741824,
+ 1605955641, -780656019,
+ /* Band-5 */
+ 1024497031, -1817128152, 1073741824,
+ 1817128152, -1024497031,
+ /* Band-6 */
+ 1073741824, 0, 0,
+ 0, 0,
+ /* Band-7 */
+ 1073741824, 0, 0,
+ 0, 0,
+ }
+ },
+
+ .band_params[MBDRC_MID_BAND] = {
+ .band = MBDRC_MID_BAND,
+ .iir_stages = 5,
+ .in_attack_tc = 1581413104,
+ .in_release_tc = 35494783,
+ .fast_attack_tc = 2147483647,
+ .in_threshold = {130, 50, 30, 6},
+ .out_threshold = {106, 50, 30, 13},
+ .ratio = {40960, 2867, 4096, 2867, 410},
+ .makeup_gain = 6,
+ .gain_init = 419430,
+ .gain_attack_tc = 4766887,
+ .gain_release_tc = 1044928780,
+ .fast_release_tc = 2147480170,
+
+ .biquad_params = {
+ /*
+ * Gains:
+ *
+ * b0, b1, a0,
+ * a1, a2,
+ */
+
+ /* Band-0 */
+ -1005668963, 1073741824, 0,
+ 1005668963, 0,
+ /* Band-1 */
+ 998437058, -2067742187, 1073741824,
+ 2067742187, -998437058,
+ /* Band-2 */
+ 1051963422, -2121153948, 1073741824,
+ 2121153948, -1051963422,
+ /* Band-3 */
+ 434951949, -1306567134, 1073741824,
+ 1306567134, -434951949,
+ /* Band-4 */
+ 780656019, -1605955641, 1073741824,
+ 1605955641, -780656019,
+ /* Band-5 */
+ 1024497031, -1817128152, 1073741824,
+ 1817128152, -1024497031,
+ /* Band-6 */
+ 1073741824, 0, 0,
+ 0, 0,
+ /* Band-7 */
+ 1073741824, 0, 0,
+ 0, 0,
+ }
+ },
+
+ .band_params[MBDRC_HIGH_BAND] = {
+ .band = MBDRC_HIGH_BAND,
+ .iir_stages = 5,
+ .in_attack_tc = 2144750688,
+ .in_release_tc = 70402888,
+ .fast_attack_tc = 2147483647,
+ .in_threshold = {130, 50, 30, 6},
+ .out_threshold = {106, 50, 30, 13},
+ .ratio = {40960, 2867, 4096, 2867, 410},
+ .makeup_gain = 6,
+ .gain_init = 419430,
+ .gain_attack_tc = 4766887,
+ .gain_release_tc = 1044928780,
+ .fast_release_tc = 2147480170,
+
+ .biquad_params = {
+ /*
+ * Gains:
+ *
+ * b0, b1, a0,
+ * a1, a2,
+ */
+
+ /* Band-0 */
+ 1073741824, 0, 0,
+ 0, 0,
+ /* Band-1 */
+ 1073741824, 0, 0,
+ 0, 0,
+ /* Band-2 */
+ 1073741824, 0, 0,
+ 0, 0,
+ /* Band-3 */
+ -619925131, 1073741824, 0,
+ 619925131, 0,
+ /* Band-4 */
+ 606839335, -1455425976, 1073741824,
+ 1455425976, -606839335,
+ /* Band-5 */
+ 917759617, -1724690840, 1073741824,
+ 1724690840, -917759617,
+ /* Band-6 */
+ 1073741824, 0, 0,
+ 0, 0,
+ /* Band-7 */
+ 1073741824, 0, 0,
+ 0, 0,
+ }
+ }
+};
+
+static void tegra210_mbdrc_write_ram(struct regmap *regmap, unsigned int reg_ctrl,
+ unsigned int reg_data, unsigned int ram_offset,
+ unsigned int *data, size_t size)
+{
+ unsigned int val;
+ unsigned int i;
+
+ val = ram_offset & TEGRA210_MBDRC_RAM_CTRL_RAM_ADDR_MASK;
+ val |= TEGRA210_MBDRC_RAM_CTRL_ADDR_INIT_EN;
+ val |= TEGRA210_MBDRC_RAM_CTRL_SEQ_ACCESS_EN;
+ val |= TEGRA210_MBDRC_RAM_CTRL_RW_WRITE;
+
+ regmap_write(regmap, reg_ctrl, val);
+
+ for (i = 0; i < size; i++)
+ regmap_write(regmap, reg_data, data[i]);
+}
+
+static int tegra210_mbdrc_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int val;
+
+ regmap_read(ope->mbdrc_regmap, mc->reg, &val);
+
+ ucontrol->value.integer.value[0] = (val >> mc->shift) & mc->max;
+
+ return 0;
+}
+
+static int tegra210_mbdrc_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int val = ucontrol->value.integer.value[0];
+ bool change = false;
+
+ val = val << mc->shift;
+
+ regmap_update_bits_check(ope->mbdrc_regmap, mc->reg,
+ (mc->max << mc->shift), val, &change);
+
+ return change ? 1 : 0;
+}
+
+static int tegra210_mbdrc_get_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int val;
+
+ regmap_read(ope->mbdrc_regmap, e->reg, &val);
+
+ ucontrol->value.enumerated.item[0] = (val >> e->shift_l) & e->mask;
+
+ return 0;
+}
+
+static int tegra210_mbdrc_put_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ bool change = false;
+ unsigned int val;
+ unsigned int mask;
+
+ if (ucontrol->value.enumerated.item[0] > e->items - 1)
+ return -EINVAL;
+
+ val = ucontrol->value.enumerated.item[0] << e->shift_l;
+ mask = e->mask << e->shift_l;
+
+ regmap_update_bits_check(ope->mbdrc_regmap, e->reg, mask, val,
+ &change);
+
+ return change ? 1 : 0;
+}
+
+static int tegra210_mbdrc_band_params_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_soc_bytes *params = (void *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ u32 *data = (u32 *)ucontrol->value.bytes.data;
+ u32 regs = params->soc.base;
+ u32 mask = params->soc.mask;
+ u32 shift = params->shift;
+ unsigned int i;
+
+ for (i = 0; i < params->soc.num_regs; i++, regs += cmpnt->val_bytes) {
+ regmap_read(ope->mbdrc_regmap, regs, &data[i]);
+
+ data[i] = ((data[i] & mask) >> shift);
+ }
+
+ return 0;
+}
+
+static int tegra210_mbdrc_band_params_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_soc_bytes *params = (void *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ u32 *data = (u32 *)ucontrol->value.bytes.data;
+ u32 regs = params->soc.base;
+ u32 mask = params->soc.mask;
+ u32 shift = params->shift;
+ bool change = false;
+ unsigned int i;
+
+ for (i = 0; i < params->soc.num_regs; i++, regs += cmpnt->val_bytes) {
+ bool update = false;
+
+ regmap_update_bits_check(ope->mbdrc_regmap, regs, mask,
+ data[i] << shift, &update);
+
+ change |= update;
+ }
+
+ return change ? 1 : 0;
+}
+
+static int tegra210_mbdrc_threshold_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_soc_bytes *params = (void *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ u32 *data = (u32 *)ucontrol->value.bytes.data;
+ u32 regs = params->soc.base;
+ u32 num_regs = params->soc.num_regs;
+ u32 val;
+ unsigned int i;
+
+ for (i = 0; i < num_regs; i += 4, regs += cmpnt->val_bytes) {
+ regmap_read(ope->mbdrc_regmap, regs, &val);
+
+ data[i] = (val & TEGRA210_MBDRC_THRESH_1ST_MASK) >>
+ TEGRA210_MBDRC_THRESH_1ST_SHIFT;
+ data[i + 1] = (val & TEGRA210_MBDRC_THRESH_2ND_MASK) >>
+ TEGRA210_MBDRC_THRESH_2ND_SHIFT;
+ data[i + 2] = (val & TEGRA210_MBDRC_THRESH_3RD_MASK) >>
+ TEGRA210_MBDRC_THRESH_3RD_SHIFT;
+ data[i + 3] = (val & TEGRA210_MBDRC_THRESH_4TH_MASK) >>
+ TEGRA210_MBDRC_THRESH_4TH_SHIFT;
+ }
+
+ return 0;
+}
+
+static int tegra210_mbdrc_threshold_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_soc_bytes *params = (void *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ u32 *data = (u32 *)ucontrol->value.bytes.data;
+ u32 regs = params->soc.base;
+ u32 num_regs = params->soc.num_regs;
+ bool change = false;
+ unsigned int i;
+
+ for (i = 0; i < num_regs; i += 4, regs += cmpnt->val_bytes) {
+ bool update = false;
+
+ data[i] = (((data[i] >> TEGRA210_MBDRC_THRESH_1ST_SHIFT) &
+ TEGRA210_MBDRC_THRESH_1ST_MASK) |
+ ((data[i + 1] >> TEGRA210_MBDRC_THRESH_2ND_SHIFT) &
+ TEGRA210_MBDRC_THRESH_2ND_MASK) |
+ ((data[i + 2] >> TEGRA210_MBDRC_THRESH_3RD_SHIFT) &
+ TEGRA210_MBDRC_THRESH_3RD_MASK) |
+ ((data[i + 3] >> TEGRA210_MBDRC_THRESH_4TH_SHIFT) &
+ TEGRA210_MBDRC_THRESH_4TH_MASK));
+
+ regmap_update_bits_check(ope->mbdrc_regmap, regs, 0xffffffff,
+ data[i], &update);
+
+ change |= update;
+ }
+
+ return change ? 1 : 0;
+}
+
+static int tegra210_mbdrc_biquad_coeffs_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_soc_bytes *params = (void *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ u32 *data = (u32 *)ucontrol->value.bytes.data;
+
+ memset(data, 0, params->soc.num_regs * cmpnt->val_bytes);
+
+ return 0;
+}
+
+static int tegra210_mbdrc_biquad_coeffs_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_soc_bytes *params = (void *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ u32 reg_ctrl = params->soc.base;
+ u32 reg_data = reg_ctrl + cmpnt->val_bytes;
+ u32 *data = (u32 *)ucontrol->value.bytes.data;
+
+ tegra210_mbdrc_write_ram(ope->mbdrc_regmap, reg_ctrl, reg_data,
+ params->shift, data, params->soc.num_regs);
+
+ return 1;
+}
+
+static int tegra210_mbdrc_param_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_bytes *params = (void *)kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = params->num_regs * sizeof(u32);
+
+ return 0;
+}
+
+static int tegra210_mbdrc_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ int val;
+
+ regmap_read(ope->mbdrc_regmap, mc->reg, &val);
+
+ ucontrol->value.integer.value[0] =
+ ((val >> mc->shift) - TEGRA210_MBDRC_MASTER_VOL_MIN);
+
+ return 0;
+}
+
+static int tegra210_mbdrc_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ int val = ucontrol->value.integer.value[0];
+ bool change = false;
+
+ val += TEGRA210_MBDRC_MASTER_VOL_MIN;
+
+ regmap_update_bits_check(ope->mbdrc_regmap, mc->reg,
+ mc->max << mc->shift, val << mc->shift,
+ &change);
+
+ regmap_read(ope->mbdrc_regmap, mc->reg, &val);
+
+ return change ? 1 : 0;
+}
+
+static const char * const tegra210_mbdrc_mode_text[] = {
+ "Bypass", "Fullband", "Dualband", "Multiband"
+};
+
+static const struct soc_enum tegra210_mbdrc_mode_enum =
+ SOC_ENUM_SINGLE(TEGRA210_MBDRC_CFG, TEGRA210_MBDRC_CFG_MBDRC_MODE_SHIFT,
+ 4, tegra210_mbdrc_mode_text);
+
+static const char * const tegra210_mbdrc_peak_rms_text[] = {
+ "Peak", "RMS"
+};
+
+static const struct soc_enum tegra210_mbdrc_peak_rms_enum =
+ SOC_ENUM_SINGLE(TEGRA210_MBDRC_CFG, TEGRA210_MBDRC_CFG_PEAK_RMS_SHIFT,
+ 2, tegra210_mbdrc_peak_rms_text);
+
+static const char * const tegra210_mbdrc_filter_structure_text[] = {
+ "All-pass-tree", "Flexible"
+};
+
+static const struct soc_enum tegra210_mbdrc_filter_structure_enum =
+ SOC_ENUM_SINGLE(TEGRA210_MBDRC_CFG,
+ TEGRA210_MBDRC_CFG_FILTER_STRUCTURE_SHIFT, 2,
+ tegra210_mbdrc_filter_structure_text);
+
+static const char * const tegra210_mbdrc_frame_size_text[] = {
+ "N1", "N2", "N4", "N8", "N16", "N32", "N64"
+};
+
+static const struct soc_enum tegra210_mbdrc_frame_size_enum =
+ SOC_ENUM_SINGLE(TEGRA210_MBDRC_CFG, TEGRA210_MBDRC_CFG_FRAME_SIZE_SHIFT,
+ 7, tegra210_mbdrc_frame_size_text);
+
+#define TEGRA_MBDRC_BYTES_EXT(xname, xbase, xregs, xshift, xmask, xinfo) \
+ TEGRA_SOC_BYTES_EXT(xname, xbase, xregs, xshift, xmask, \
+ tegra210_mbdrc_band_params_get, \
+ tegra210_mbdrc_band_params_put, \
+ tegra210_mbdrc_param_info)
+
+#define TEGRA_MBDRC_BAND_BYTES_EXT(xname, xbase, xshift, xmask, xinfo) \
+ TEGRA_MBDRC_BYTES_EXT(xname, xbase, TEGRA210_MBDRC_FILTER_COUNT, \
+ xshift, xmask, xinfo)
+
+static const DECLARE_TLV_DB_MINMAX(mdbrc_vol_tlv, -25600, 25500);
+
+static const struct snd_kcontrol_new tegra210_mbdrc_controls[] = {
+ SOC_ENUM_EXT("MBDRC Peak RMS Mode", tegra210_mbdrc_peak_rms_enum,
+ tegra210_mbdrc_get_enum, tegra210_mbdrc_put_enum),
+
+ SOC_ENUM_EXT("MBDRC Filter Structure",
+ tegra210_mbdrc_filter_structure_enum,
+ tegra210_mbdrc_get_enum, tegra210_mbdrc_put_enum),
+
+ SOC_ENUM_EXT("MBDRC Frame Size", tegra210_mbdrc_frame_size_enum,
+ tegra210_mbdrc_get_enum, tegra210_mbdrc_put_enum),
+
+ SOC_ENUM_EXT("MBDRC Mode", tegra210_mbdrc_mode_enum,
+ tegra210_mbdrc_get_enum, tegra210_mbdrc_put_enum),
+
+ SOC_SINGLE_EXT("MBDRC RMS Offset", TEGRA210_MBDRC_CFG,
+ TEGRA210_MBDRC_CFG_RMS_OFFSET_SHIFT, 0x1ff, 0,
+ tegra210_mbdrc_get, tegra210_mbdrc_put),
+
+ SOC_SINGLE_EXT("MBDRC Shift Control", TEGRA210_MBDRC_CFG,
+ TEGRA210_MBDRC_CFG_SHIFT_CTRL_SHIFT, 0x1f, 0,
+ tegra210_mbdrc_get, tegra210_mbdrc_put),
+
+ SOC_SINGLE_EXT("MBDRC Fast Attack Factor", TEGRA210_MBDRC_FAST_FACTOR,
+ TEGRA210_MBDRC_FAST_FACTOR_ATTACK_SHIFT, 0xffff, 0,
+ tegra210_mbdrc_get, tegra210_mbdrc_put),
+
+ SOC_SINGLE_EXT("MBDRC Fast Release Factor", TEGRA210_MBDRC_FAST_FACTOR,
+ TEGRA210_MBDRC_FAST_FACTOR_RELEASE_SHIFT, 0xffff, 0,
+ tegra210_mbdrc_get, tegra210_mbdrc_put),
+
+ SOC_SINGLE_RANGE_EXT_TLV("MBDRC Master Volume",
+ TEGRA210_MBDRC_MASTER_VOL,
+ TEGRA210_MBDRC_MASTER_VOL_SHIFT,
+ 0, 0x1ff, 0,
+ tegra210_mbdrc_vol_get, tegra210_mbdrc_vol_put,
+ mdbrc_vol_tlv),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC IIR Stages", TEGRA210_MBDRC_IIR_CFG,
+ TEGRA210_MBDRC_FILTER_COUNT,
+ TEGRA210_MBDRC_IIR_CFG_NUM_STAGES_SHIFT,
+ TEGRA210_MBDRC_IIR_CFG_NUM_STAGES_MASK,
+ tegra210_mbdrc_band_params_get,
+ tegra210_mbdrc_band_params_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC In Attack Time Const", TEGRA210_MBDRC_IN_ATTACK,
+ TEGRA210_MBDRC_FILTER_COUNT,
+ TEGRA210_MBDRC_IN_ATTACK_TC_SHIFT,
+ TEGRA210_MBDRC_IN_ATTACK_TC_MASK,
+ tegra210_mbdrc_band_params_get,
+ tegra210_mbdrc_band_params_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC In Release Time Const", TEGRA210_MBDRC_IN_RELEASE,
+ TEGRA210_MBDRC_FILTER_COUNT,
+ TEGRA210_MBDRC_IN_RELEASE_TC_SHIFT,
+ TEGRA210_MBDRC_IN_RELEASE_TC_MASK,
+ tegra210_mbdrc_band_params_get,
+ tegra210_mbdrc_band_params_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC Fast Attack Time Const", TEGRA210_MBDRC_FAST_ATTACK,
+ TEGRA210_MBDRC_FILTER_COUNT,
+ TEGRA210_MBDRC_FAST_ATTACK_TC_SHIFT,
+ TEGRA210_MBDRC_FAST_ATTACK_TC_MASK,
+ tegra210_mbdrc_band_params_get,
+ tegra210_mbdrc_band_params_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC In Threshold", TEGRA210_MBDRC_IN_THRESHOLD,
+ TEGRA210_MBDRC_FILTER_COUNT * 4, 0, 0xffffffff,
+ tegra210_mbdrc_threshold_get,
+ tegra210_mbdrc_threshold_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC Out Threshold", TEGRA210_MBDRC_OUT_THRESHOLD,
+ TEGRA210_MBDRC_FILTER_COUNT * 4, 0, 0xffffffff,
+ tegra210_mbdrc_threshold_get,
+ tegra210_mbdrc_threshold_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC Ratio", TEGRA210_MBDRC_RATIO_1ST,
+ TEGRA210_MBDRC_FILTER_COUNT * 5,
+ TEGRA210_MBDRC_RATIO_1ST_SHIFT, TEGRA210_MBDRC_RATIO_1ST_MASK,
+ tegra210_mbdrc_band_params_get,
+ tegra210_mbdrc_band_params_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC Makeup Gain", TEGRA210_MBDRC_MAKEUP_GAIN,
+ TEGRA210_MBDRC_FILTER_COUNT,
+ TEGRA210_MBDRC_MAKEUP_GAIN_SHIFT,
+ TEGRA210_MBDRC_MAKEUP_GAIN_MASK,
+ tegra210_mbdrc_band_params_get,
+ tegra210_mbdrc_band_params_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC Init Gain", TEGRA210_MBDRC_INIT_GAIN,
+ TEGRA210_MBDRC_FILTER_COUNT,
+ TEGRA210_MBDRC_INIT_GAIN_SHIFT,
+ TEGRA210_MBDRC_INIT_GAIN_MASK,
+ tegra210_mbdrc_band_params_get,
+ tegra210_mbdrc_band_params_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC Attack Gain", TEGRA210_MBDRC_GAIN_ATTACK,
+ TEGRA210_MBDRC_FILTER_COUNT,
+ TEGRA210_MBDRC_GAIN_ATTACK_SHIFT,
+ TEGRA210_MBDRC_GAIN_ATTACK_MASK,
+ tegra210_mbdrc_band_params_get,
+ tegra210_mbdrc_band_params_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC Release Gain", TEGRA210_MBDRC_GAIN_RELEASE,
+ TEGRA210_MBDRC_FILTER_COUNT,
+ TEGRA210_MBDRC_GAIN_RELEASE_SHIFT,
+ TEGRA210_MBDRC_GAIN_RELEASE_MASK,
+ tegra210_mbdrc_band_params_get,
+ tegra210_mbdrc_band_params_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC Fast Release Gain",
+ TEGRA210_MBDRC_FAST_RELEASE,
+ TEGRA210_MBDRC_FILTER_COUNT,
+ TEGRA210_MBDRC_FAST_RELEASE_SHIFT,
+ TEGRA210_MBDRC_FAST_RELEASE_MASK,
+ tegra210_mbdrc_band_params_get,
+ tegra210_mbdrc_band_params_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC Low Band Biquad Coeffs",
+ TEGRA210_MBDRC_CFG_RAM_CTRL,
+ TEGRA210_MBDRC_MAX_BIQUAD_STAGES * 5, 0, 0xffffffff,
+ tegra210_mbdrc_biquad_coeffs_get,
+ tegra210_mbdrc_biquad_coeffs_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC Mid Band Biquad Coeffs",
+ TEGRA210_MBDRC_CFG_RAM_CTRL +
+ TEGRA210_MBDRC_FILTER_PARAM_STRIDE,
+ TEGRA210_MBDRC_MAX_BIQUAD_STAGES * 5, 0, 0xffffffff,
+ tegra210_mbdrc_biquad_coeffs_get,
+ tegra210_mbdrc_biquad_coeffs_put,
+ tegra210_mbdrc_param_info),
+
+ TEGRA_SOC_BYTES_EXT("MBDRC High Band Biquad Coeffs",
+ TEGRA210_MBDRC_CFG_RAM_CTRL +
+ (TEGRA210_MBDRC_FILTER_PARAM_STRIDE * 2),
+ TEGRA210_MBDRC_MAX_BIQUAD_STAGES * 5, 0, 0xffffffff,
+ tegra210_mbdrc_biquad_coeffs_get,
+ tegra210_mbdrc_biquad_coeffs_put,
+ tegra210_mbdrc_param_info),
+};
+
+static bool tegra210_mbdrc_wr_reg(struct device *dev, unsigned int reg)
+{
+ if (reg >= TEGRA210_MBDRC_IIR_CFG)
+ reg -= ((reg - TEGRA210_MBDRC_IIR_CFG) %
+ (TEGRA210_MBDRC_FILTER_PARAM_STRIDE *
+ TEGRA210_MBDRC_FILTER_COUNT));
+
+ switch (reg) {
+ case TEGRA210_MBDRC_SOFT_RESET:
+ case TEGRA210_MBDRC_CG:
+ case TEGRA210_MBDRC_CFG ... TEGRA210_MBDRC_CFG_RAM_DATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tegra210_mbdrc_rd_reg(struct device *dev, unsigned int reg)
+{
+ if (tegra210_mbdrc_wr_reg(dev, reg))
+ return true;
+
+ if (reg >= TEGRA210_MBDRC_IIR_CFG)
+ reg -= ((reg - TEGRA210_MBDRC_IIR_CFG) %
+ (TEGRA210_MBDRC_FILTER_PARAM_STRIDE *
+ TEGRA210_MBDRC_FILTER_COUNT));
+
+ switch (reg) {
+ case TEGRA210_MBDRC_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tegra210_mbdrc_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg >= TEGRA210_MBDRC_IIR_CFG)
+ reg -= ((reg - TEGRA210_MBDRC_IIR_CFG) %
+ (TEGRA210_MBDRC_FILTER_PARAM_STRIDE *
+ TEGRA210_MBDRC_FILTER_COUNT));
+
+ switch (reg) {
+ case TEGRA210_MBDRC_SOFT_RESET:
+ case TEGRA210_MBDRC_STATUS:
+ case TEGRA210_MBDRC_CFG_RAM_CTRL:
+ case TEGRA210_MBDRC_CFG_RAM_DATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tegra210_mbdrc_precious_reg(struct device *dev, unsigned int reg)
+{
+ if (reg >= TEGRA210_MBDRC_IIR_CFG)
+ reg -= ((reg - TEGRA210_MBDRC_IIR_CFG) %
+ (TEGRA210_MBDRC_FILTER_PARAM_STRIDE *
+ TEGRA210_MBDRC_FILTER_COUNT));
+
+ switch (reg) {
+ case TEGRA210_MBDRC_CFG_RAM_DATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tegra210_mbdrc_regmap_cfg = {
+ .name = "mbdrc",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = TEGRA210_MBDRC_MAX_REG,
+ .writeable_reg = tegra210_mbdrc_wr_reg,
+ .readable_reg = tegra210_mbdrc_rd_reg,
+ .volatile_reg = tegra210_mbdrc_volatile_reg,
+ .precious_reg = tegra210_mbdrc_precious_reg,
+ .reg_defaults = tegra210_mbdrc_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tegra210_mbdrc_reg_defaults),
+ .cache_type = REGCACHE_FLAT,
+};
+
+int tegra210_mbdrc_hw_params(struct snd_soc_component *cmpnt)
+{
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ const struct tegra210_mbdrc_config *conf = &mbdrc_init_config;
+ u32 val = 0;
+ unsigned int i;
+
+ regmap_read(ope->mbdrc_regmap, TEGRA210_MBDRC_CFG, &val);
+
+ val &= TEGRA210_MBDRC_CFG_MBDRC_MODE_MASK;
+
+ if (val == TEGRA210_MBDRC_CFG_MBDRC_MODE_BYPASS)
+ return 0;
+
+ for (i = 0; i < MBDRC_NUM_BAND; i++) {
+ const struct tegra210_mbdrc_band_params *params =
+ &conf->band_params[i];
+
+ u32 reg_off = i * TEGRA210_MBDRC_FILTER_PARAM_STRIDE;
+
+ tegra210_mbdrc_write_ram(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_CFG_RAM_CTRL,
+ reg_off + TEGRA210_MBDRC_CFG_RAM_DATA,
+ 0, (u32 *)&params->biquad_params[0],
+ TEGRA210_MBDRC_MAX_BIQUAD_STAGES * 5);
+ }
+ return 0;
+}
+
+int tegra210_mbdrc_component_init(struct snd_soc_component *cmpnt)
+{
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ const struct tegra210_mbdrc_config *conf = &mbdrc_init_config;
+ unsigned int i;
+ u32 val;
+
+ pm_runtime_get_sync(cmpnt->dev);
+
+ /* Initialize MBDRC registers and AHUB RAM with default params */
+ regmap_update_bits(ope->mbdrc_regmap, TEGRA210_MBDRC_CFG,
+ TEGRA210_MBDRC_CFG_MBDRC_MODE_MASK,
+ conf->mode << TEGRA210_MBDRC_CFG_MBDRC_MODE_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap, TEGRA210_MBDRC_CFG,
+ TEGRA210_MBDRC_CFG_RMS_OFFSET_MASK,
+ conf->rms_off << TEGRA210_MBDRC_CFG_RMS_OFFSET_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap, TEGRA210_MBDRC_CFG,
+ TEGRA210_MBDRC_CFG_PEAK_RMS_MASK,
+ conf->peak_rms_mode << TEGRA210_MBDRC_CFG_PEAK_RMS_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap, TEGRA210_MBDRC_CFG,
+ TEGRA210_MBDRC_CFG_FILTER_STRUCTURE_MASK,
+ conf->fliter_structure <<
+ TEGRA210_MBDRC_CFG_FILTER_STRUCTURE_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap, TEGRA210_MBDRC_CFG,
+ TEGRA210_MBDRC_CFG_SHIFT_CTRL_MASK,
+ conf->shift_ctrl << TEGRA210_MBDRC_CFG_SHIFT_CTRL_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap, TEGRA210_MBDRC_CFG,
+ TEGRA210_MBDRC_CFG_FRAME_SIZE_MASK,
+ __ffs(conf->frame_size) <<
+ TEGRA210_MBDRC_CFG_FRAME_SIZE_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap, TEGRA210_MBDRC_CHANNEL_MASK,
+ TEGRA210_MBDRC_CHANNEL_MASK_MASK,
+ conf->channel_mask << TEGRA210_MBDRC_CHANNEL_MASK_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap, TEGRA210_MBDRC_FAST_FACTOR,
+ TEGRA210_MBDRC_FAST_FACTOR_ATTACK_MASK,
+ conf->fa_factor << TEGRA210_MBDRC_FAST_FACTOR_ATTACK_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap, TEGRA210_MBDRC_FAST_FACTOR,
+ TEGRA210_MBDRC_FAST_FACTOR_ATTACK_MASK,
+ conf->fr_factor << TEGRA210_MBDRC_FAST_FACTOR_ATTACK_SHIFT);
+
+ for (i = 0; i < MBDRC_NUM_BAND; i++) {
+ const struct tegra210_mbdrc_band_params *params =
+ &conf->band_params[i];
+ u32 reg_off = i * TEGRA210_MBDRC_FILTER_PARAM_STRIDE;
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_IIR_CFG,
+ TEGRA210_MBDRC_IIR_CFG_NUM_STAGES_MASK,
+ params->iir_stages <<
+ TEGRA210_MBDRC_IIR_CFG_NUM_STAGES_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_IN_ATTACK,
+ TEGRA210_MBDRC_IN_ATTACK_TC_MASK,
+ params->in_attack_tc <<
+ TEGRA210_MBDRC_IN_ATTACK_TC_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_IN_RELEASE,
+ TEGRA210_MBDRC_IN_RELEASE_TC_MASK,
+ params->in_release_tc <<
+ TEGRA210_MBDRC_IN_RELEASE_TC_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_FAST_ATTACK,
+ TEGRA210_MBDRC_FAST_ATTACK_TC_MASK,
+ params->fast_attack_tc <<
+ TEGRA210_MBDRC_FAST_ATTACK_TC_SHIFT);
+
+ val = (((params->in_threshold[0] >>
+ TEGRA210_MBDRC_THRESH_1ST_SHIFT) &
+ TEGRA210_MBDRC_THRESH_1ST_MASK) |
+ ((params->in_threshold[1] >>
+ TEGRA210_MBDRC_THRESH_2ND_SHIFT) &
+ TEGRA210_MBDRC_THRESH_2ND_MASK) |
+ ((params->in_threshold[2] >>
+ TEGRA210_MBDRC_THRESH_3RD_SHIFT) &
+ TEGRA210_MBDRC_THRESH_3RD_MASK) |
+ ((params->in_threshold[3] >>
+ TEGRA210_MBDRC_THRESH_4TH_SHIFT) &
+ TEGRA210_MBDRC_THRESH_4TH_MASK));
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_IN_THRESHOLD,
+ 0xffffffff, val);
+
+ val = (((params->out_threshold[0] >>
+ TEGRA210_MBDRC_THRESH_1ST_SHIFT) &
+ TEGRA210_MBDRC_THRESH_1ST_MASK) |
+ ((params->out_threshold[1] >>
+ TEGRA210_MBDRC_THRESH_2ND_SHIFT) &
+ TEGRA210_MBDRC_THRESH_2ND_MASK) |
+ ((params->out_threshold[2] >>
+ TEGRA210_MBDRC_THRESH_3RD_SHIFT) &
+ TEGRA210_MBDRC_THRESH_3RD_MASK) |
+ ((params->out_threshold[3] >>
+ TEGRA210_MBDRC_THRESH_4TH_SHIFT) &
+ TEGRA210_MBDRC_THRESH_4TH_MASK));
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_OUT_THRESHOLD,
+ 0xffffffff, val);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_RATIO_1ST,
+ TEGRA210_MBDRC_RATIO_1ST_MASK,
+ params->ratio[0] << TEGRA210_MBDRC_RATIO_1ST_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_RATIO_2ND,
+ TEGRA210_MBDRC_RATIO_2ND_MASK,
+ params->ratio[1] << TEGRA210_MBDRC_RATIO_2ND_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_RATIO_3RD,
+ TEGRA210_MBDRC_RATIO_3RD_MASK,
+ params->ratio[2] << TEGRA210_MBDRC_RATIO_3RD_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_RATIO_4TH,
+ TEGRA210_MBDRC_RATIO_4TH_MASK,
+ params->ratio[3] << TEGRA210_MBDRC_RATIO_4TH_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_RATIO_5TH,
+ TEGRA210_MBDRC_RATIO_5TH_MASK,
+ params->ratio[4] << TEGRA210_MBDRC_RATIO_5TH_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_MAKEUP_GAIN,
+ TEGRA210_MBDRC_MAKEUP_GAIN_MASK,
+ params->makeup_gain <<
+ TEGRA210_MBDRC_MAKEUP_GAIN_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_INIT_GAIN,
+ TEGRA210_MBDRC_INIT_GAIN_MASK,
+ params->gain_init <<
+ TEGRA210_MBDRC_INIT_GAIN_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_GAIN_ATTACK,
+ TEGRA210_MBDRC_GAIN_ATTACK_MASK,
+ params->gain_attack_tc <<
+ TEGRA210_MBDRC_GAIN_ATTACK_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_GAIN_RELEASE,
+ TEGRA210_MBDRC_GAIN_RELEASE_MASK,
+ params->gain_release_tc <<
+ TEGRA210_MBDRC_GAIN_RELEASE_SHIFT);
+
+ regmap_update_bits(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_FAST_RELEASE,
+ TEGRA210_MBDRC_FAST_RELEASE_MASK,
+ params->fast_release_tc <<
+ TEGRA210_MBDRC_FAST_RELEASE_SHIFT);
+
+ tegra210_mbdrc_write_ram(ope->mbdrc_regmap,
+ reg_off + TEGRA210_MBDRC_CFG_RAM_CTRL,
+ reg_off + TEGRA210_MBDRC_CFG_RAM_DATA, 0,
+ (u32 *)&params->biquad_params[0],
+ TEGRA210_MBDRC_MAX_BIQUAD_STAGES * 5);
+ }
+
+ pm_runtime_put_sync(cmpnt->dev);
+
+ snd_soc_add_component_controls(cmpnt, tegra210_mbdrc_controls,
+ ARRAY_SIZE(tegra210_mbdrc_controls));
+
+ return 0;
+}
+
+int tegra210_mbdrc_regmap_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra210_ope *ope = dev_get_drvdata(dev);
+ struct device_node *child;
+ struct resource mem;
+ void __iomem *regs;
+ int err;
+
+ child = of_get_child_by_name(dev->of_node, "dynamic-range-compressor");
+ if (!child)
+ return -ENODEV;
+
+ err = of_address_to_resource(child, 0, &mem);
+ of_node_put(child);
+ if (err < 0) {
+ dev_err(dev, "fail to get MBDRC resource\n");
+ return err;
+ }
+
+ mem.flags = IORESOURCE_MEM;
+ regs = devm_ioremap_resource(dev, &mem);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ ope->mbdrc_regmap = devm_regmap_init_mmio(dev, regs,
+ &tegra210_mbdrc_regmap_cfg);
+ if (IS_ERR(ope->mbdrc_regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(ope->mbdrc_regmap);
+ }
+
+ regcache_cache_only(ope->mbdrc_regmap, true);
+
+ return 0;
+}
diff --git a/sound/soc/tegra/tegra210_mbdrc.h b/sound/soc/tegra/tegra210_mbdrc.h
new file mode 100644
index 000000000000..4c48da0e1dea
--- /dev/null
+++ b/sound/soc/tegra/tegra210_mbdrc.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tegra210_mbdrc.h - Definitions for Tegra210 MBDRC driver
+ *
+ * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ */
+
+#ifndef __TEGRA210_MBDRC_H__
+#define __TEGRA210_MBDRC_H__
+
+#include <linux/platform_device.h>
+#include <sound/soc.h>
+
+/* Register offsets from TEGRA210_MBDRC*_BASE */
+#define TEGRA210_MBDRC_SOFT_RESET 0x4
+#define TEGRA210_MBDRC_CG 0x8
+#define TEGRA210_MBDRC_STATUS 0xc
+#define TEGRA210_MBDRC_CFG 0x28
+#define TEGRA210_MBDRC_CHANNEL_MASK 0x2c
+#define TEGRA210_MBDRC_MASTER_VOL 0x30
+#define TEGRA210_MBDRC_FAST_FACTOR 0x34
+
+#define TEGRA210_MBDRC_FILTER_COUNT 3
+#define TEGRA210_MBDRC_FILTER_PARAM_STRIDE 0x4
+
+#define TEGRA210_MBDRC_IIR_CFG 0x38
+#define TEGRA210_MBDRC_IN_ATTACK 0x44
+#define TEGRA210_MBDRC_IN_RELEASE 0x50
+#define TEGRA210_MBDRC_FAST_ATTACK 0x5c
+#define TEGRA210_MBDRC_IN_THRESHOLD 0x68
+#define TEGRA210_MBDRC_OUT_THRESHOLD 0x74
+#define TEGRA210_MBDRC_RATIO_1ST 0x80
+#define TEGRA210_MBDRC_RATIO_2ND 0x8c
+#define TEGRA210_MBDRC_RATIO_3RD 0x98
+#define TEGRA210_MBDRC_RATIO_4TH 0xa4
+#define TEGRA210_MBDRC_RATIO_5TH 0xb0
+#define TEGRA210_MBDRC_MAKEUP_GAIN 0xbc
+#define TEGRA210_MBDRC_INIT_GAIN 0xc8
+#define TEGRA210_MBDRC_GAIN_ATTACK 0xd4
+#define TEGRA210_MBDRC_GAIN_RELEASE 0xe0
+#define TEGRA210_MBDRC_FAST_RELEASE 0xec
+#define TEGRA210_MBDRC_CFG_RAM_CTRL 0xf8
+#define TEGRA210_MBDRC_CFG_RAM_DATA 0x104
+
+#define TEGRA210_MBDRC_MAX_REG (TEGRA210_MBDRC_CFG_RAM_DATA + \
+ (TEGRA210_MBDRC_FILTER_PARAM_STRIDE * \
+ (TEGRA210_MBDRC_FILTER_COUNT - 1)))
+
+/* Fields for TEGRA210_MBDRC_CFG */
+#define TEGRA210_MBDRC_CFG_RMS_OFFSET_SHIFT 16
+#define TEGRA210_MBDRC_CFG_RMS_OFFSET_MASK (0x1ff << TEGRA210_MBDRC_CFG_RMS_OFFSET_SHIFT)
+
+#define TEGRA210_MBDRC_CFG_PEAK_RMS_SHIFT 14
+#define TEGRA210_MBDRC_CFG_PEAK_RMS_MASK (0x1 << TEGRA210_MBDRC_CFG_PEAK_RMS_SHIFT)
+#define TEGRA210_MBDRC_CFG_PEAK (1 << TEGRA210_MBDRC_CFG_PEAK_RMS_SHIFT)
+
+#define TEGRA210_MBDRC_CFG_FILTER_STRUCTURE_SHIFT 13
+#define TEGRA210_MBDRC_CFG_FILTER_STRUCTURE_MASK (0x1 << TEGRA210_MBDRC_CFG_FILTER_STRUCTURE_SHIFT)
+#define TEGRA210_MBDRC_CFG_FILTER_STRUCTURE_FLEX (1 << TEGRA210_MBDRC_CFG_FILTER_STRUCTURE_SHIFT)
+
+#define TEGRA210_MBDRC_CFG_SHIFT_CTRL_SHIFT 8
+#define TEGRA210_MBDRC_CFG_SHIFT_CTRL_MASK (0x1f << TEGRA210_MBDRC_CFG_SHIFT_CTRL_SHIFT)
+
+#define TEGRA210_MBDRC_CFG_FRAME_SIZE_SHIFT 4
+#define TEGRA210_MBDRC_CFG_FRAME_SIZE_MASK (0xf << TEGRA210_MBDRC_CFG_FRAME_SIZE_SHIFT)
+
+#define TEGRA210_MBDRC_CFG_MBDRC_MODE_SHIFT 0
+#define TEGRA210_MBDRC_CFG_MBDRC_MODE_MASK (0x3 << TEGRA210_MBDRC_CFG_MBDRC_MODE_SHIFT)
+#define TEGRA210_MBDRC_CFG_MBDRC_MODE_BYPASS (0 << TEGRA210_MBDRC_CFG_MBDRC_MODE_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_CHANNEL_MASK */
+#define TEGRA210_MBDRC_CHANNEL_MASK_SHIFT 0
+#define TEGRA210_MBDRC_CHANNEL_MASK_MASK (0xff << TEGRA210_MBDRC_CHANNEL_MASK_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_MASTER_VOL */
+#define TEGRA210_MBDRC_MASTER_VOL_SHIFT 23
+#define TEGRA210_MBDRC_MASTER_VOL_MIN -256
+#define TEGRA210_MBDRC_MASTER_VOL_MAX 256
+
+/* Fields for TEGRA210_MBDRC_FAST_FACTOR */
+#define TEGRA210_MBDRC_FAST_FACTOR_RELEASE_SHIFT 16
+#define TEGRA210_MBDRC_FAST_FACTOR_RELEASE_MASK (0xffff << TEGRA210_MBDRC_FAST_FACTOR_RELEASE_SHIFT)
+
+#define TEGRA210_MBDRC_FAST_FACTOR_ATTACK_SHIFT 0
+#define TEGRA210_MBDRC_FAST_FACTOR_ATTACK_MASK (0xffff << TEGRA210_MBDRC_FAST_FACTOR_ATTACK_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_IIR_CFG */
+#define TEGRA210_MBDRC_IIR_CFG_NUM_STAGES_SHIFT 0
+#define TEGRA210_MBDRC_IIR_CFG_NUM_STAGES_MASK (0xf << TEGRA210_MBDRC_IIR_CFG_NUM_STAGES_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_IN_ATTACK */
+#define TEGRA210_MBDRC_IN_ATTACK_TC_SHIFT 0
+#define TEGRA210_MBDRC_IN_ATTACK_TC_MASK (0xffffffff << TEGRA210_MBDRC_IN_ATTACK_TC_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_IN_RELEASE */
+#define TEGRA210_MBDRC_IN_RELEASE_TC_SHIFT 0
+#define TEGRA210_MBDRC_IN_RELEASE_TC_MASK (0xffffffff << TEGRA210_MBDRC_IN_RELEASE_TC_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_FAST_ATTACK */
+#define TEGRA210_MBDRC_FAST_ATTACK_TC_SHIFT 0
+#define TEGRA210_MBDRC_FAST_ATTACK_TC_MASK (0xffffffff << TEGRA210_MBDRC_FAST_ATTACK_TC_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_IN_THRESHOLD / TEGRA210_MBDRC_OUT_THRESHOLD */
+#define TEGRA210_MBDRC_THRESH_4TH_SHIFT 24
+#define TEGRA210_MBDRC_THRESH_4TH_MASK (0xff << TEGRA210_MBDRC_THRESH_4TH_SHIFT)
+
+#define TEGRA210_MBDRC_THRESH_3RD_SHIFT 16
+#define TEGRA210_MBDRC_THRESH_3RD_MASK (0xff << TEGRA210_MBDRC_THRESH_3RD_SHIFT)
+
+#define TEGRA210_MBDRC_THRESH_2ND_SHIFT 8
+#define TEGRA210_MBDRC_THRESH_2ND_MASK (0xff << TEGRA210_MBDRC_THRESH_2ND_SHIFT)
+
+#define TEGRA210_MBDRC_THRESH_1ST_SHIFT 0
+#define TEGRA210_MBDRC_THRESH_1ST_MASK (0xff << TEGRA210_MBDRC_THRESH_1ST_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_RATIO_1ST */
+#define TEGRA210_MBDRC_RATIO_1ST_SHIFT 0
+#define TEGRA210_MBDRC_RATIO_1ST_MASK (0xffff << TEGRA210_MBDRC_RATIO_1ST_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_RATIO_2ND */
+#define TEGRA210_MBDRC_RATIO_2ND_SHIFT 0
+#define TEGRA210_MBDRC_RATIO_2ND_MASK (0xffff << TEGRA210_MBDRC_RATIO_2ND_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_RATIO_3RD */
+#define TEGRA210_MBDRC_RATIO_3RD_SHIFT 0
+#define TEGRA210_MBDRC_RATIO_3RD_MASK (0xffff << TEGRA210_MBDRC_RATIO_3RD_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_RATIO_4TH */
+#define TEGRA210_MBDRC_RATIO_4TH_SHIFT 0
+#define TEGRA210_MBDRC_RATIO_4TH_MASK (0xffff << TEGRA210_MBDRC_RATIO_4TH_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_RATIO_5TH */
+#define TEGRA210_MBDRC_RATIO_5TH_SHIFT 0
+#define TEGRA210_MBDRC_RATIO_5TH_MASK (0xffff << TEGRA210_MBDRC_RATIO_5TH_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_MAKEUP_GAIN */
+#define TEGRA210_MBDRC_MAKEUP_GAIN_SHIFT 0
+#define TEGRA210_MBDRC_MAKEUP_GAIN_MASK (0x3f << TEGRA210_MBDRC_MAKEUP_GAIN_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_INIT_GAIN */
+#define TEGRA210_MBDRC_INIT_GAIN_SHIFT 0
+#define TEGRA210_MBDRC_INIT_GAIN_MASK (0xffffffff << TEGRA210_MBDRC_INIT_GAIN_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_GAIN_ATTACK */
+#define TEGRA210_MBDRC_GAIN_ATTACK_SHIFT 0
+#define TEGRA210_MBDRC_GAIN_ATTACK_MASK (0xffffffff << TEGRA210_MBDRC_GAIN_ATTACK_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_GAIN_RELEASE */
+#define TEGRA210_MBDRC_GAIN_RELEASE_SHIFT 0
+#define TEGRA210_MBDRC_GAIN_RELEASE_MASK (0xffffffff << TEGRA210_MBDRC_GAIN_RELEASE_SHIFT)
+
+/* Fields for TEGRA210_MBDRC_FAST_RELEASE */
+#define TEGRA210_MBDRC_FAST_RELEASE_SHIFT 0
+#define TEGRA210_MBDRC_FAST_RELEASE_MASK (0xffffffff << TEGRA210_MBDRC_FAST_RELEASE_SHIFT)
+
+#define TEGRA210_MBDRC_RAM_CTRL_RW_READ 0
+#define TEGRA210_MBDRC_RAM_CTRL_RW_WRITE (1 << 14)
+#define TEGRA210_MBDRC_RAM_CTRL_ADDR_INIT_EN (1 << 13)
+#define TEGRA210_MBDRC_RAM_CTRL_SEQ_ACCESS_EN (1 << 12)
+#define TEGRA210_MBDRC_RAM_CTRL_RAM_ADDR_MASK 0x1ff
+
+/*
+ * Order and size of each structure element for following structures should not
+ * be altered size order of elements and their size are based on PEQ co-eff ram
+ * and shift ram layout.
+ */
+#define TEGRA210_MBDRC_THRESHOLD_NUM 4
+#define TEGRA210_MBDRC_RATIO_NUM (TEGRA210_MBDRC_THRESHOLD_NUM + 1)
+#define TEGRA210_MBDRC_MAX_BIQUAD_STAGES 8
+
+/* Order of these enums are same as the order of band specific hw registers */
+enum {
+ MBDRC_LOW_BAND,
+ MBDRC_MID_BAND,
+ MBDRC_HIGH_BAND,
+ MBDRC_NUM_BAND,
+};
+
+struct tegra210_mbdrc_band_params {
+ u32 band;
+ u32 iir_stages;
+ u32 in_attack_tc;
+ u32 in_release_tc;
+ u32 fast_attack_tc;
+ u32 in_threshold[TEGRA210_MBDRC_THRESHOLD_NUM];
+ u32 out_threshold[TEGRA210_MBDRC_THRESHOLD_NUM];
+ u32 ratio[TEGRA210_MBDRC_RATIO_NUM];
+ u32 makeup_gain;
+ u32 gain_init;
+ u32 gain_attack_tc;
+ u32 gain_release_tc;
+ u32 fast_release_tc;
+ /* For biquad_params[][5] order of coeff is b0, b1, a0, a1, a2 */
+ u32 biquad_params[TEGRA210_MBDRC_MAX_BIQUAD_STAGES * 5];
+};
+
+struct tegra210_mbdrc_config {
+ unsigned int mode;
+ unsigned int rms_off;
+ unsigned int peak_rms_mode;
+ unsigned int fliter_structure;
+ unsigned int shift_ctrl;
+ unsigned int frame_size;
+ unsigned int channel_mask;
+ unsigned int fa_factor; /* Fast attack factor */
+ unsigned int fr_factor; /* Fast release factor */
+ struct tegra210_mbdrc_band_params band_params[MBDRC_NUM_BAND];
+};
+
+int tegra210_mbdrc_regmap_init(struct platform_device *pdev);
+int tegra210_mbdrc_component_init(struct snd_soc_component *cmpnt);
+int tegra210_mbdrc_hw_params(struct snd_soc_component *cmpnt);
+
+#endif
diff --git a/sound/soc/tegra/tegra210_ope.c b/sound/soc/tegra/tegra210_ope.c
new file mode 100644
index 000000000000..3dd2bdec657b
--- /dev/null
+++ b/sound/soc/tegra/tegra210_ope.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// tegra210_ope.c - Tegra210 OPE driver
+//
+// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "tegra210_mbdrc.h"
+#include "tegra210_ope.h"
+#include "tegra210_peq.h"
+#include "tegra_cif.h"
+
+static const struct reg_default tegra210_ope_reg_defaults[] = {
+ { TEGRA210_OPE_RX_INT_MASK, 0x00000001},
+ { TEGRA210_OPE_RX_CIF_CTRL, 0x00007700},
+ { TEGRA210_OPE_TX_INT_MASK, 0x00000001},
+ { TEGRA210_OPE_TX_CIF_CTRL, 0x00007700},
+ { TEGRA210_OPE_CG, 0x1},
+};
+
+static int tegra210_ope_set_audio_cif(struct tegra210_ope *ope,
+ struct snd_pcm_hw_params *params,
+ unsigned int reg)
+{
+ int channels, audio_bits;
+ struct tegra_cif_conf cif_conf;
+
+ memset(&cif_conf, 0, sizeof(struct tegra_cif_conf));
+
+ channels = params_channels(params);
+ if (channels < 2)
+ return -EINVAL;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ audio_bits = TEGRA_ACIF_BITS_16;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ audio_bits = TEGRA_ACIF_BITS_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cif_conf.audio_ch = channels;
+ cif_conf.client_ch = channels;
+ cif_conf.audio_bits = audio_bits;
+ cif_conf.client_bits = audio_bits;
+
+ tegra_set_cif(ope->regmap, reg, &cif_conf);
+
+ return 0;
+}
+
+static int tegra210_ope_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct device *dev = dai->dev;
+ struct tegra210_ope *ope = snd_soc_dai_get_drvdata(dai);
+ int err;
+
+ /* Set RX and TX CIF */
+ err = tegra210_ope_set_audio_cif(ope, params,
+ TEGRA210_OPE_RX_CIF_CTRL);
+ if (err) {
+ dev_err(dev, "Can't set OPE RX CIF: %d\n", err);
+ return err;
+ }
+
+ err = tegra210_ope_set_audio_cif(ope, params,
+ TEGRA210_OPE_TX_CIF_CTRL);
+ if (err) {
+ dev_err(dev, "Can't set OPE TX CIF: %d\n", err);
+ return err;
+ }
+
+ tegra210_mbdrc_hw_params(dai->component);
+
+ return err;
+}
+
+static int tegra210_ope_component_probe(struct snd_soc_component *cmpnt)
+{
+ struct tegra210_ope *ope = dev_get_drvdata(cmpnt->dev);
+
+ tegra210_peq_component_init(cmpnt);
+ tegra210_mbdrc_component_init(cmpnt);
+
+ /*
+ * The OPE, PEQ and MBDRC functionalities are combined under one
+ * device registered by OPE driver. In fact OPE HW block includes
+ * sub blocks PEQ and MBDRC. However driver registers separate
+ * regmap interfaces for each of these. ASoC core depends on
+ * dev_get_regmap() to populate the regmap field for a given ASoC
+ * component. A component can have one regmap reference and since
+ * the DAPM routes depend on OPE regmap only, below explicit
+ * assignment is done to highlight this. This is needed for ASoC
+ * core to access correct regmap during DAPM path setup.
+ */
+ snd_soc_component_init_regmap(cmpnt, ope->regmap);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops tegra210_ope_dai_ops = {
+ .hw_params = tegra210_ope_hw_params,
+};
+
+static struct snd_soc_dai_driver tegra210_ope_dais[] = {
+ {
+ .name = "OPE-RX-CIF",
+ .playback = {
+ .stream_name = "RX-CIF-Playback",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .stream_name = "RX-CIF-Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ },
+ {
+ .name = "OPE-TX-CIF",
+ .playback = {
+ .stream_name = "TX-CIF-Playback",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .stream_name = "TX-CIF-Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .ops = &tegra210_ope_dai_ops,
+ }
+};
+
+static const struct snd_soc_dapm_widget tegra210_ope_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("TX", NULL, 0, TEGRA210_OPE_ENABLE,
+ TEGRA210_OPE_EN_SHIFT, 0),
+};
+
+#define OPE_ROUTES(sname) \
+ { "RX XBAR-" sname, NULL, "XBAR-TX" }, \
+ { "RX-CIF-" sname, NULL, "RX XBAR-" sname }, \
+ { "RX", NULL, "RX-CIF-" sname }, \
+ { "TX-CIF-" sname, NULL, "TX" }, \
+ { "TX XBAR-" sname, NULL, "TX-CIF-" sname }, \
+ { "XBAR-RX", NULL, "TX XBAR-" sname }
+
+static const struct snd_soc_dapm_route tegra210_ope_routes[] = {
+ { "TX", NULL, "RX" },
+ OPE_ROUTES("Playback"),
+ OPE_ROUTES("Capture"),
+};
+
+static const char * const tegra210_ope_data_dir_text[] = {
+ "MBDRC to PEQ",
+ "PEQ to MBDRC"
+};
+
+static const struct soc_enum tegra210_ope_data_dir_enum =
+ SOC_ENUM_SINGLE(TEGRA210_OPE_DIR, TEGRA210_OPE_DIR_SHIFT,
+ 2, tegra210_ope_data_dir_text);
+
+static int tegra210_ope_get_data_dir(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+
+ ucontrol->value.enumerated.item[0] = ope->data_dir;
+
+ return 0;
+}
+
+static int tegra210_ope_put_data_dir(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == ope->data_dir)
+ return 0;
+
+ ope->data_dir = value;
+
+ return 1;
+}
+
+static const struct snd_kcontrol_new tegra210_ope_controls[] = {
+ SOC_ENUM_EXT("Data Flow Direction", tegra210_ope_data_dir_enum,
+ tegra210_ope_get_data_dir, tegra210_ope_put_data_dir),
+};
+
+static const struct snd_soc_component_driver tegra210_ope_cmpnt = {
+ .probe = tegra210_ope_component_probe,
+ .dapm_widgets = tegra210_ope_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tegra210_ope_widgets),
+ .dapm_routes = tegra210_ope_routes,
+ .num_dapm_routes = ARRAY_SIZE(tegra210_ope_routes),
+ .controls = tegra210_ope_controls,
+ .num_controls = ARRAY_SIZE(tegra210_ope_controls),
+};
+
+static bool tegra210_ope_wr_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TEGRA210_OPE_RX_INT_MASK ... TEGRA210_OPE_RX_CIF_CTRL:
+ case TEGRA210_OPE_TX_INT_MASK ... TEGRA210_OPE_TX_CIF_CTRL:
+ case TEGRA210_OPE_ENABLE ... TEGRA210_OPE_CG:
+ case TEGRA210_OPE_DIR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tegra210_ope_rd_reg(struct device *dev, unsigned int reg)
+{
+ if (tegra210_ope_wr_reg(dev, reg))
+ return true;
+
+ switch (reg) {
+ case TEGRA210_OPE_RX_STATUS:
+ case TEGRA210_OPE_RX_INT_STATUS:
+ case TEGRA210_OPE_TX_STATUS:
+ case TEGRA210_OPE_TX_INT_STATUS:
+ case TEGRA210_OPE_STATUS:
+ case TEGRA210_OPE_INT_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tegra210_ope_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TEGRA210_OPE_RX_STATUS:
+ case TEGRA210_OPE_RX_INT_STATUS:
+ case TEGRA210_OPE_TX_STATUS:
+ case TEGRA210_OPE_TX_INT_STATUS:
+ case TEGRA210_OPE_SOFT_RESET:
+ case TEGRA210_OPE_STATUS:
+ case TEGRA210_OPE_INT_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tegra210_ope_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = TEGRA210_OPE_DIR,
+ .writeable_reg = tegra210_ope_wr_reg,
+ .readable_reg = tegra210_ope_rd_reg,
+ .volatile_reg = tegra210_ope_volatile_reg,
+ .reg_defaults = tegra210_ope_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tegra210_ope_reg_defaults),
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int tegra210_ope_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra210_ope *ope;
+ void __iomem *regs;
+ int err;
+
+ ope = devm_kzalloc(dev, sizeof(*ope), GFP_KERNEL);
+ if (!ope)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ ope->regmap = devm_regmap_init_mmio(dev, regs,
+ &tegra210_ope_regmap_config);
+ if (IS_ERR(ope->regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(ope->regmap);
+ }
+
+ regcache_cache_only(ope->regmap, true);
+
+ dev_set_drvdata(dev, ope);
+
+ err = tegra210_peq_regmap_init(pdev);
+ if (err < 0) {
+ dev_err(dev, "PEQ init failed\n");
+ return err;
+ }
+
+ err = tegra210_mbdrc_regmap_init(pdev);
+ if (err < 0) {
+ dev_err(dev, "MBDRC init failed\n");
+ return err;
+ }
+
+ err = devm_snd_soc_register_component(dev, &tegra210_ope_cmpnt,
+ tegra210_ope_dais,
+ ARRAY_SIZE(tegra210_ope_dais));
+ if (err) {
+ dev_err(dev, "can't register OPE component, err: %d\n", err);
+ return err;
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int tegra210_ope_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra210_ope_runtime_suspend(struct device *dev)
+{
+ struct tegra210_ope *ope = dev_get_drvdata(dev);
+
+ tegra210_peq_save(ope->peq_regmap, ope->peq_biquad_gains,
+ ope->peq_biquad_shifts);
+
+ regcache_cache_only(ope->mbdrc_regmap, true);
+ regcache_cache_only(ope->peq_regmap, true);
+ regcache_cache_only(ope->regmap, true);
+
+ regcache_mark_dirty(ope->regmap);
+ regcache_mark_dirty(ope->peq_regmap);
+ regcache_mark_dirty(ope->mbdrc_regmap);
+
+ return 0;
+}
+
+static int __maybe_unused tegra210_ope_runtime_resume(struct device *dev)
+{
+ struct tegra210_ope *ope = dev_get_drvdata(dev);
+
+ regcache_cache_only(ope->regmap, false);
+ regcache_cache_only(ope->peq_regmap, false);
+ regcache_cache_only(ope->mbdrc_regmap, false);
+
+ regcache_sync(ope->regmap);
+ regcache_sync(ope->peq_regmap);
+ regcache_sync(ope->mbdrc_regmap);
+
+ tegra210_peq_restore(ope->peq_regmap, ope->peq_biquad_gains,
+ ope->peq_biquad_shifts);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra210_ope_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra210_ope_runtime_suspend,
+ tegra210_ope_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id tegra210_ope_of_match[] = {
+ { .compatible = "nvidia,tegra210-ope" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra210_ope_of_match);
+
+static struct platform_driver tegra210_ope_driver = {
+ .driver = {
+ .name = "tegra210-ope",
+ .of_match_table = tegra210_ope_of_match,
+ .pm = &tegra210_ope_pm_ops,
+ },
+ .probe = tegra210_ope_probe,
+ .remove = tegra210_ope_remove,
+};
+module_platform_driver(tegra210_ope_driver)
+
+MODULE_AUTHOR("Sumit Bhattacharya <sumitb@nvidia.com>");
+MODULE_DESCRIPTION("Tegra210 OPE ASoC driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/tegra/tegra210_ope.h b/sound/soc/tegra/tegra210_ope.h
new file mode 100644
index 000000000000..2835af6ce631
--- /dev/null
+++ b/sound/soc/tegra/tegra210_ope.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tegra210_ope.h - Definitions for Tegra210 OPE driver
+ *
+ * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ */
+
+#ifndef __TEGRA210_OPE_H__
+#define __TEGRA210_OPE_H__
+
+#include <linux/regmap.h>
+#include <sound/soc.h>
+
+#include "tegra210_peq.h"
+
+/*
+ * OPE_RX registers are with respect to XBAR.
+ * The data comes from XBAR to OPE
+ */
+#define TEGRA210_OPE_RX_STATUS 0xc
+#define TEGRA210_OPE_RX_INT_STATUS 0x10
+#define TEGRA210_OPE_RX_INT_MASK 0x14
+#define TEGRA210_OPE_RX_INT_SET 0x18
+#define TEGRA210_OPE_RX_INT_CLEAR 0x1c
+#define TEGRA210_OPE_RX_CIF_CTRL 0x20
+
+/*
+ * OPE_TX registers are with respect to XBAR.
+ * The data goes out from OPE to XBAR
+ */
+#define TEGRA210_OPE_TX_STATUS 0x4c
+#define TEGRA210_OPE_TX_INT_STATUS 0x50
+#define TEGRA210_OPE_TX_INT_MASK 0x54
+#define TEGRA210_OPE_TX_INT_SET 0x58
+#define TEGRA210_OPE_TX_INT_CLEAR 0x5c
+#define TEGRA210_OPE_TX_CIF_CTRL 0x60
+
+/* OPE Gloabal registers */
+#define TEGRA210_OPE_ENABLE 0x80
+#define TEGRA210_OPE_SOFT_RESET 0x84
+#define TEGRA210_OPE_CG 0x88
+#define TEGRA210_OPE_STATUS 0x8c
+#define TEGRA210_OPE_INT_STATUS 0x90
+#define TEGRA210_OPE_DIR 0x94
+
+/* Fields for TEGRA210_OPE_ENABLE */
+#define TEGRA210_OPE_EN_SHIFT 0
+#define TEGRA210_OPE_EN (1 << TEGRA210_OPE_EN_SHIFT)
+
+/* Fields for TEGRA210_OPE_SOFT_RESET */
+#define TEGRA210_OPE_SOFT_RESET_SHIFT 0
+#define TEGRA210_OPE_SOFT_RESET_EN (1 << TEGRA210_OPE_SOFT_RESET_SHIFT)
+
+#define TEGRA210_OPE_DIR_SHIFT 0
+
+struct tegra210_ope {
+ struct regmap *regmap;
+ struct regmap *peq_regmap;
+ struct regmap *mbdrc_regmap;
+ u32 peq_biquad_gains[TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH];
+ u32 peq_biquad_shifts[TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH];
+ unsigned int data_dir;
+};
+
+/* Extension of soc_bytes structure defined in sound/soc.h */
+struct tegra_soc_bytes {
+ struct soc_bytes soc;
+ u32 shift; /* Used as offset for AHUB RAM related programing */
+};
+
+/* Utility structures for using mixer control of type snd_soc_bytes */
+#define TEGRA_SOC_BYTES_EXT(xname, xbase, xregs, xshift, xmask, \
+ xhandler_get, xhandler_put, xinfo) \
+{ \
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .info = xinfo, \
+ .get = xhandler_get, \
+ .put = xhandler_put, \
+ .private_value = ((unsigned long)&(struct tegra_soc_bytes) \
+ { \
+ .soc.base = xbase, \
+ .soc.num_regs = xregs, \
+ .soc.mask = xmask, \
+ .shift = xshift \
+ }) \
+}
+
+#endif
diff --git a/sound/soc/tegra/tegra210_peq.c b/sound/soc/tegra/tegra210_peq.c
new file mode 100644
index 000000000000..205d956abb42
--- /dev/null
+++ b/sound/soc/tegra/tegra210_peq.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// tegra210_peq.c - Tegra210 PEQ driver
+//
+// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "tegra210_ope.h"
+#include "tegra210_peq.h"
+
+static const struct reg_default tegra210_peq_reg_defaults[] = {
+ { TEGRA210_PEQ_CFG, 0x00000013},
+ { TEGRA210_PEQ_CFG_RAM_CTRL, 0x00004000},
+ { TEGRA210_PEQ_CFG_RAM_SHIFT_CTRL, 0x00004000},
+};
+
+static const u32 biquad_init_gains[TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH] = {
+ 1495012349, /* Pre-gain */
+
+ /* Gains : b0, b1, a0, a1, a2 */
+ 536870912, -1073741824, 536870912, 2143508246, -1069773768, /* Band-0 */
+ 134217728, -265414508, 131766272, 2140402222, -1071252997, /* Band-1 */
+ 268435456, -233515765, -33935948, 1839817267, -773826124, /* Band-2 */
+ 536870912, -672537913, 139851540, 1886437554, -824433167, /* Band-3 */
+ 268435456, -114439279, 173723964, 205743566, 278809729, /* Band-4 */
+ 1, 0, 0, 0, 0, /* Band-5 */
+ 1, 0, 0, 0, 0, /* Band-6 */
+ 1, 0, 0, 0, 0, /* Band-7 */
+ 1, 0, 0, 0, 0, /* Band-8 */
+ 1, 0, 0, 0, 0, /* Band-9 */
+ 1, 0, 0, 0, 0, /* Band-10 */
+ 1, 0, 0, 0, 0, /* Band-11 */
+
+ 963423114, /* Post-gain */
+};
+
+static const u32 biquad_init_shifts[TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH] = {
+ 23, /* Pre-shift */
+ 30, 30, 30, 30, 30, 0, 0, 0, 0, 0, 0, 0, /* Shift for bands */
+ 28, /* Post-shift */
+};
+
+static s32 biquad_coeff_buffer[TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH];
+
+static void tegra210_peq_read_ram(struct regmap *regmap, unsigned int reg_ctrl,
+ unsigned int reg_data, unsigned int ram_offset,
+ unsigned int *data, size_t size)
+{
+ unsigned int val;
+ unsigned int i;
+
+ val = ram_offset & TEGRA210_PEQ_RAM_CTRL_RAM_ADDR_MASK;
+ val |= TEGRA210_PEQ_RAM_CTRL_ADDR_INIT_EN;
+ val |= TEGRA210_PEQ_RAM_CTRL_SEQ_ACCESS_EN;
+ val |= TEGRA210_PEQ_RAM_CTRL_RW_READ;
+
+ regmap_write(regmap, reg_ctrl, val);
+
+ /*
+ * Since all ahub non-io modules work under same ahub clock it is not
+ * necessary to check ahub read busy bit after every read.
+ */
+ for (i = 0; i < size; i++)
+ regmap_read(regmap, reg_data, &data[i]);
+}
+
+static void tegra210_peq_write_ram(struct regmap *regmap, unsigned int reg_ctrl,
+ unsigned int reg_data, unsigned int ram_offset,
+ unsigned int *data, size_t size)
+{
+ unsigned int val;
+ unsigned int i;
+
+ val = ram_offset & TEGRA210_PEQ_RAM_CTRL_RAM_ADDR_MASK;
+ val |= TEGRA210_PEQ_RAM_CTRL_ADDR_INIT_EN;
+ val |= TEGRA210_PEQ_RAM_CTRL_SEQ_ACCESS_EN;
+ val |= TEGRA210_PEQ_RAM_CTRL_RW_WRITE;
+
+ regmap_write(regmap, reg_ctrl, val);
+
+ for (i = 0; i < size; i++)
+ regmap_write(regmap, reg_data, data[i]);
+}
+
+static int tegra210_peq_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int mask = (1 << fls(mc->max)) - 1;
+ unsigned int val;
+
+ regmap_read(ope->peq_regmap, mc->reg, &val);
+
+ ucontrol->value.integer.value[0] = (val >> mc->shift) & mask;
+
+ if (!mc->invert)
+ return 0;
+
+ ucontrol->value.integer.value[0] =
+ mc->max - ucontrol->value.integer.value[0];
+
+ return 0;
+}
+
+static int tegra210_peq_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int mask = (1 << fls(mc->max)) - 1;
+ bool change = false;
+ unsigned int val;
+
+ val = (ucontrol->value.integer.value[0] & mask);
+
+ if (mc->invert)
+ val = mc->max - val;
+
+ val = val << mc->shift;
+
+ regmap_update_bits_check(ope->peq_regmap, mc->reg, (mask << mc->shift),
+ val, &change);
+
+ return change ? 1 : 0;
+}
+
+static int tegra210_peq_ram_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_soc_bytes *params = (void *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ u32 i, reg_ctrl = params->soc.base;
+ u32 reg_data = reg_ctrl + cmpnt->val_bytes;
+ s32 *data = (s32 *)biquad_coeff_buffer;
+
+ pm_runtime_get_sync(cmpnt->dev);
+
+ tegra210_peq_read_ram(ope->peq_regmap, reg_ctrl, reg_data,
+ params->shift, data, params->soc.num_regs);
+
+ pm_runtime_put_sync(cmpnt->dev);
+
+ for (i = 0; i < params->soc.num_regs; i++)
+ ucontrol->value.integer.value[i] = (long)data[i];
+
+ return 0;
+}
+
+static int tegra210_peq_ram_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct tegra_soc_bytes *params = (void *)kcontrol->private_value;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ u32 i, reg_ctrl = params->soc.base;
+ u32 reg_data = reg_ctrl + cmpnt->val_bytes;
+ s32 *data = (s32 *)biquad_coeff_buffer;
+
+ for (i = 0; i < params->soc.num_regs; i++)
+ data[i] = (s32)ucontrol->value.integer.value[i];
+
+ pm_runtime_get_sync(cmpnt->dev);
+
+ tegra210_peq_write_ram(ope->peq_regmap, reg_ctrl, reg_data,
+ params->shift, data, params->soc.num_regs);
+
+ pm_runtime_put_sync(cmpnt->dev);
+
+ return 1;
+}
+
+static int tegra210_peq_param_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_bytes *params = (void *)kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->value.integer.min = INT_MIN;
+ uinfo->value.integer.max = INT_MAX;
+ uinfo->count = params->num_regs;
+
+ return 0;
+}
+
+#define TEGRA210_PEQ_GAIN_PARAMS_CTRL(chan) \
+ TEGRA_SOC_BYTES_EXT("PEQ Channel-" #chan " Biquad Gain Params", \
+ TEGRA210_PEQ_CFG_RAM_CTRL, \
+ TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH, \
+ (TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH * chan), 0xffffffff, \
+ tegra210_peq_ram_get, tegra210_peq_ram_put, \
+ tegra210_peq_param_info)
+
+#define TEGRA210_PEQ_SHIFT_PARAMS_CTRL(chan) \
+ TEGRA_SOC_BYTES_EXT("PEQ Channel-" #chan " Biquad Shift Params", \
+ TEGRA210_PEQ_CFG_RAM_SHIFT_CTRL, \
+ TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH, \
+ (TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH * chan), 0x1f, \
+ tegra210_peq_ram_get, tegra210_peq_ram_put, \
+ tegra210_peq_param_info)
+
+static const struct snd_kcontrol_new tegra210_peq_controls[] = {
+ SOC_SINGLE_EXT("PEQ Active", TEGRA210_PEQ_CFG,
+ TEGRA210_PEQ_CFG_MODE_SHIFT, 1, 0,
+ tegra210_peq_get, tegra210_peq_put),
+
+ SOC_SINGLE_EXT("PEQ Biquad Stages", TEGRA210_PEQ_CFG,
+ TEGRA210_PEQ_CFG_BIQUAD_STAGES_SHIFT,
+ TEGRA210_PEQ_MAX_BIQUAD_STAGES - 1, 0,
+ tegra210_peq_get, tegra210_peq_put),
+
+ TEGRA210_PEQ_GAIN_PARAMS_CTRL(0),
+ TEGRA210_PEQ_GAIN_PARAMS_CTRL(1),
+ TEGRA210_PEQ_GAIN_PARAMS_CTRL(2),
+ TEGRA210_PEQ_GAIN_PARAMS_CTRL(3),
+ TEGRA210_PEQ_GAIN_PARAMS_CTRL(4),
+ TEGRA210_PEQ_GAIN_PARAMS_CTRL(5),
+ TEGRA210_PEQ_GAIN_PARAMS_CTRL(6),
+ TEGRA210_PEQ_GAIN_PARAMS_CTRL(7),
+
+ TEGRA210_PEQ_SHIFT_PARAMS_CTRL(0),
+ TEGRA210_PEQ_SHIFT_PARAMS_CTRL(1),
+ TEGRA210_PEQ_SHIFT_PARAMS_CTRL(2),
+ TEGRA210_PEQ_SHIFT_PARAMS_CTRL(3),
+ TEGRA210_PEQ_SHIFT_PARAMS_CTRL(4),
+ TEGRA210_PEQ_SHIFT_PARAMS_CTRL(5),
+ TEGRA210_PEQ_SHIFT_PARAMS_CTRL(6),
+ TEGRA210_PEQ_SHIFT_PARAMS_CTRL(7),
+};
+
+static bool tegra210_peq_wr_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TEGRA210_PEQ_SOFT_RESET:
+ case TEGRA210_PEQ_CG:
+ case TEGRA210_PEQ_CFG ... TEGRA210_PEQ_CFG_RAM_SHIFT_DATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tegra210_peq_rd_reg(struct device *dev, unsigned int reg)
+{
+ if (tegra210_peq_wr_reg(dev, reg))
+ return true;
+
+ switch (reg) {
+ case TEGRA210_PEQ_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tegra210_peq_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TEGRA210_PEQ_SOFT_RESET:
+ case TEGRA210_PEQ_STATUS:
+ case TEGRA210_PEQ_CFG_RAM_CTRL ... TEGRA210_PEQ_CFG_RAM_SHIFT_DATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tegra210_peq_precious_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TEGRA210_PEQ_CFG_RAM_DATA:
+ case TEGRA210_PEQ_CFG_RAM_SHIFT_DATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tegra210_peq_regmap_config = {
+ .name = "peq",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = TEGRA210_PEQ_CFG_RAM_SHIFT_DATA,
+ .writeable_reg = tegra210_peq_wr_reg,
+ .readable_reg = tegra210_peq_rd_reg,
+ .volatile_reg = tegra210_peq_volatile_reg,
+ .precious_reg = tegra210_peq_precious_reg,
+ .reg_defaults = tegra210_peq_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tegra210_peq_reg_defaults),
+ .cache_type = REGCACHE_FLAT,
+};
+
+void tegra210_peq_restore(struct regmap *regmap, u32 *biquad_gains,
+ u32 *biquad_shifts)
+{
+ unsigned int i;
+
+ for (i = 0; i < TEGRA210_PEQ_MAX_CHANNELS; i++) {
+ tegra210_peq_write_ram(regmap, TEGRA210_PEQ_CFG_RAM_CTRL,
+ TEGRA210_PEQ_CFG_RAM_DATA,
+ (i * TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH),
+ biquad_gains,
+ TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH);
+
+ tegra210_peq_write_ram(regmap,
+ TEGRA210_PEQ_CFG_RAM_SHIFT_CTRL,
+ TEGRA210_PEQ_CFG_RAM_SHIFT_DATA,
+ (i * TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH),
+ biquad_shifts,
+ TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH);
+
+ }
+}
+
+void tegra210_peq_save(struct regmap *regmap, u32 *biquad_gains,
+ u32 *biquad_shifts)
+{
+ unsigned int i;
+
+ for (i = 0; i < TEGRA210_PEQ_MAX_CHANNELS; i++) {
+ tegra210_peq_read_ram(regmap,
+ TEGRA210_PEQ_CFG_RAM_CTRL,
+ TEGRA210_PEQ_CFG_RAM_DATA,
+ (i * TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH),
+ biquad_gains,
+ TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH);
+
+ tegra210_peq_read_ram(regmap,
+ TEGRA210_PEQ_CFG_RAM_SHIFT_CTRL,
+ TEGRA210_PEQ_CFG_RAM_SHIFT_DATA,
+ (i * TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH),
+ biquad_shifts,
+ TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH);
+ }
+}
+
+int tegra210_peq_component_init(struct snd_soc_component *cmpnt)
+{
+ struct tegra210_ope *ope = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int i;
+
+ pm_runtime_get_sync(cmpnt->dev);
+ regmap_update_bits(ope->peq_regmap, TEGRA210_PEQ_CFG,
+ TEGRA210_PEQ_CFG_MODE_MASK,
+ 0 << TEGRA210_PEQ_CFG_MODE_SHIFT);
+ regmap_update_bits(ope->peq_regmap, TEGRA210_PEQ_CFG,
+ TEGRA210_PEQ_CFG_BIQUAD_STAGES_MASK,
+ (TEGRA210_PEQ_BIQUAD_INIT_STAGE - 1) <<
+ TEGRA210_PEQ_CFG_BIQUAD_STAGES_SHIFT);
+
+ /* Initialize PEQ AHUB RAM with default params */
+ for (i = 0; i < TEGRA210_PEQ_MAX_CHANNELS; i++) {
+
+ /* Set default gain params */
+ tegra210_peq_write_ram(ope->peq_regmap,
+ TEGRA210_PEQ_CFG_RAM_CTRL,
+ TEGRA210_PEQ_CFG_RAM_DATA,
+ (i * TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH),
+ (u32 *)&biquad_init_gains,
+ TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH);
+
+ /* Set default shift params */
+ tegra210_peq_write_ram(ope->peq_regmap,
+ TEGRA210_PEQ_CFG_RAM_SHIFT_CTRL,
+ TEGRA210_PEQ_CFG_RAM_SHIFT_DATA,
+ (i * TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH),
+ (u32 *)&biquad_init_shifts,
+ TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH);
+
+ }
+
+ pm_runtime_put_sync(cmpnt->dev);
+
+ snd_soc_add_component_controls(cmpnt, tegra210_peq_controls,
+ ARRAY_SIZE(tegra210_peq_controls));
+
+ return 0;
+}
+
+int tegra210_peq_regmap_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra210_ope *ope = dev_get_drvdata(dev);
+ struct device_node *child;
+ struct resource mem;
+ void __iomem *regs;
+ int err;
+
+ child = of_get_child_by_name(dev->of_node, "equalizer");
+ if (!child)
+ return -ENODEV;
+
+ err = of_address_to_resource(child, 0, &mem);
+ of_node_put(child);
+ if (err < 0) {
+ dev_err(dev, "fail to get PEQ resource\n");
+ return err;
+ }
+
+ mem.flags = IORESOURCE_MEM;
+ regs = devm_ioremap_resource(dev, &mem);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+ ope->peq_regmap = devm_regmap_init_mmio(dev, regs,
+ &tegra210_peq_regmap_config);
+ if (IS_ERR(ope->peq_regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(ope->peq_regmap);
+ }
+
+ regcache_cache_only(ope->peq_regmap, true);
+
+ return 0;
+}
diff --git a/sound/soc/tegra/tegra210_peq.h b/sound/soc/tegra/tegra210_peq.h
new file mode 100644
index 000000000000..6d3de4ff05cc
--- /dev/null
+++ b/sound/soc/tegra/tegra210_peq.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tegra210_peq.h - Definitions for Tegra210 PEQ driver
+ *
+ * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
+ *
+ */
+
+#ifndef __TEGRA210_PEQ_H__
+#define __TEGRA210_PEQ_H__
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+
+/* Register offsets from PEQ base */
+#define TEGRA210_PEQ_SOFT_RESET 0x0
+#define TEGRA210_PEQ_CG 0x4
+#define TEGRA210_PEQ_STATUS 0x8
+#define TEGRA210_PEQ_CFG 0xc
+#define TEGRA210_PEQ_CFG_RAM_CTRL 0x10
+#define TEGRA210_PEQ_CFG_RAM_DATA 0x14
+#define TEGRA210_PEQ_CFG_RAM_SHIFT_CTRL 0x18
+#define TEGRA210_PEQ_CFG_RAM_SHIFT_DATA 0x1c
+
+/* Fields in TEGRA210_PEQ_CFG */
+#define TEGRA210_PEQ_CFG_BIQUAD_STAGES_SHIFT 2
+#define TEGRA210_PEQ_CFG_BIQUAD_STAGES_MASK (0xf << TEGRA210_PEQ_CFG_BIQUAD_STAGES_SHIFT)
+
+#define TEGRA210_PEQ_CFG_MODE_SHIFT 0
+#define TEGRA210_PEQ_CFG_MODE_MASK (0x1 << TEGRA210_PEQ_CFG_MODE_SHIFT)
+
+#define TEGRA210_PEQ_RAM_CTRL_RW_READ 0
+#define TEGRA210_PEQ_RAM_CTRL_RW_WRITE (1 << 14)
+#define TEGRA210_PEQ_RAM_CTRL_ADDR_INIT_EN (1 << 13)
+#define TEGRA210_PEQ_RAM_CTRL_SEQ_ACCESS_EN (1 << 12)
+#define TEGRA210_PEQ_RAM_CTRL_RAM_ADDR_MASK 0x1ff
+
+/* PEQ register definition ends here */
+#define TEGRA210_PEQ_MAX_BIQUAD_STAGES 12
+
+#define TEGRA210_PEQ_MAX_CHANNELS 8
+
+#define TEGRA210_PEQ_BIQUAD_INIT_STAGE 5
+
+#define TEGRA210_PEQ_GAIN_PARAM_SIZE_PER_CH (2 + TEGRA210_PEQ_MAX_BIQUAD_STAGES * 5)
+#define TEGRA210_PEQ_SHIFT_PARAM_SIZE_PER_CH (2 + TEGRA210_PEQ_MAX_BIQUAD_STAGES)
+
+int tegra210_peq_regmap_init(struct platform_device *pdev);
+int tegra210_peq_component_init(struct snd_soc_component *cmpnt);
+void tegra210_peq_restore(struct regmap *regmap, u32 *biquad_gains,
+ u32 *biquad_shifts);
+void tegra210_peq_save(struct regmap *regmap, u32 *biquad_gains,
+ u32 *biquad_shifts);
+
+#endif
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 084a533bf4f2..10cd37096fb3 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -87,11 +87,11 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
}
mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
break;
default:
return -EINVAL;
@@ -331,7 +331,8 @@ static const struct snd_soc_dai_driver tegra30_i2s_dai_template = {
};
static const struct snd_soc_component_driver tegra30_i2s_component = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .legacy_dai_naming = 1,
};
static bool tegra30_i2s_wr_rd_reg(struct device *dev, unsigned int reg)
diff --git a/sound/soc/ti/davinci-i2s.c b/sound/soc/ti/davinci-i2s.c
index 0363a088d2e0..e6e77a5f3c1e 100644
--- a/sound/soc/ti/davinci-i2s.c
+++ b/sound/soc/ti/davinci-i2s.c
@@ -230,15 +230,15 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
dev->fmt = fmt;
/* set master/slave audio interface */
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
/* cpu is master */
pcr = DAVINCI_MCBSP_PCR_FSXM |
DAVINCI_MCBSP_PCR_FSRM |
DAVINCI_MCBSP_PCR_CLKXM |
DAVINCI_MCBSP_PCR_CLKRM;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BC_FP:
pcr = DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_FSXM;
/*
* Selection of the clock input pin that is the
@@ -260,7 +260,7 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
}
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
/* codec is master */
pcr = 0;
break;
@@ -395,12 +395,12 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
}
- master = dev->fmt & SND_SOC_DAIFMT_MASTER_MASK;
+ master = dev->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK;
fmt = params_format(params);
mcbsp_word_length = asp_word_length[fmt];
switch (master) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
freq = clk_get_rate(dev->clk);
srgr = DAVINCI_MCBSP_SRGR_FSGM |
DAVINCI_MCBSP_SRGR_CLKSM;
@@ -426,7 +426,7 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
clk_div &= 0xFF;
srgr |= clk_div;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BC_FP:
srgr = DAVINCI_MCBSP_SRGR_FSGM;
clk_div = dev->clk_div - 1;
srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length * 8 - 1);
@@ -434,7 +434,7 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
clk_div &= 0xFF;
srgr |= clk_div;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
/* Clock and frame sync given from external sources */
i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
srgr = DAVINCI_MCBSP_SRGR_FSGM;
@@ -473,15 +473,15 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
fmt = double_fmt[fmt];
}
switch (master) {
- case SND_SOC_DAIFMT_CBS_CFS:
- case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_BP_FC:
rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(0);
xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(0);
rcr |= DAVINCI_MCBSP_RCR_RPHASE;
xcr |= DAVINCI_MCBSP_XCR_XPHASE;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BC_FC:
+ case SND_SOC_DAIFMT_BC_FP:
rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(element_cnt - 1);
xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(element_cnt - 1);
break;
@@ -492,13 +492,13 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
mcbsp_word_length = asp_word_length[fmt];
switch (master) {
- case SND_SOC_DAIFMT_CBS_CFS:
- case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_BP_FC:
rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(0);
xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(0);
break;
- case SND_SOC_DAIFMT_CBM_CFM:
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BC_FC:
+ case SND_SOC_DAIFMT_BC_FP:
rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(element_cnt - 1);
xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(element_cnt - 1);
break;
@@ -640,7 +640,8 @@ static struct snd_soc_dai_driver davinci_i2s_dai = {
};
static const struct snd_soc_component_driver davinci_i2s_component = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .legacy_dai_naming = 1,
};
static int davinci_i2s_probe(struct platform_device *pdev)
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index 377be2e2b6ee..ca5d1bb6ac59 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -492,8 +492,8 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, FSRDLY(data_delay),
FSRDLY(3));
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
/* codec is clock and frame slave */
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
@@ -510,7 +510,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp->bclk_master = 1;
break;
- case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_BP_FC:
/* codec is clock slave and frame master */
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
@@ -527,7 +527,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp->bclk_master = 1;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BC_FP:
/* codec is clock master and frame slave */
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
@@ -544,7 +544,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp->bclk_master = 0;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
/* codec is clock and frame master */
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
@@ -1765,7 +1765,8 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
};
static const struct snd_soc_component_driver davinci_mcasp_component = {
- .name = "davinci-mcasp",
+ .name = "davinci-mcasp",
+ .legacy_dai_naming = 1,
};
/* Some HW specific values and defaults. The rest is filled in from DT. */
@@ -2111,8 +2112,7 @@ static int davinci_mcasp_gpio_request(struct gpio_chip *chip, unsigned offset)
}
/* Do not change the PIN yet */
-
- return pm_runtime_get_sync(mcasp->dev);
+ return pm_runtime_resume_and_get(mcasp->dev);
}
static void davinci_mcasp_gpio_free(struct gpio_chip *chip, unsigned offset)
diff --git a/sound/soc/ti/davinci-vcif.c b/sound/soc/ti/davinci-vcif.c
index f810123cc407..36fa97e2b9e2 100644
--- a/sound/soc/ti/davinci-vcif.c
+++ b/sound/soc/ti/davinci-vcif.c
@@ -185,7 +185,8 @@ static struct snd_soc_dai_driver davinci_vcif_dai = {
};
static const struct snd_soc_component_driver davinci_vcif_component = {
- .name = "davinci-vcif",
+ .name = "davinci-vcif",
+ .legacy_dai_naming = 1,
};
static int davinci_vcif_probe(struct platform_device *pdev)
diff --git a/sound/soc/ti/omap-dmic.c b/sound/soc/ti/omap-dmic.c
index f3eed20611a3..825c70a443da 100644
--- a/sound/soc/ti/omap-dmic.c
+++ b/sound/soc/ti/omap-dmic.c
@@ -453,7 +453,8 @@ static struct snd_soc_dai_driver omap_dmic_dai = {
};
static const struct snd_soc_component_driver omap_dmic_component = {
- .name = "omap-dmic",
+ .name = "omap-dmic",
+ .legacy_dai_naming = 1,
};
static int asoc_dmic_probe(struct platform_device *pdev)
diff --git a/sound/soc/ti/omap-hdmi.c b/sound/soc/ti/omap-hdmi.c
index 3328c02f93c7..0dc0475670ff 100644
--- a/sound/soc/ti/omap-hdmi.c
+++ b/sound/soc/ti/omap-hdmi.c
@@ -275,6 +275,7 @@ static const struct snd_soc_dai_ops hdmi_dai_ops = {
static const struct snd_soc_component_driver omap_hdmi_component = {
.name = "omapdss_hdmi",
+ .legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver omap5_hdmi_dai = {
diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
index 9933b33c80ca..c4ac1f30b9fe 100644
--- a/sound/soc/ti/omap-mcbsp.c
+++ b/sound/soc/ti/omap-mcbsp.c
@@ -1026,8 +1026,8 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
/* In McBSP master modes, FRAME (i.e. sample rate) is generated
* by _counting_ BCLKs. Calculate frame size in BCLKs */
- master = mcbsp->fmt & SND_SOC_DAIFMT_MASTER_MASK;
- if (master == SND_SOC_DAIFMT_CBS_CFS) {
+ master = mcbsp->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK;
+ if (master == SND_SOC_DAIFMT_BP_FP) {
div = mcbsp->clk_div ? mcbsp->clk_div : 1;
framesize = (mcbsp->in_freq / div) / params_rate(params);
@@ -1126,20 +1126,20 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
return -EINVAL;
}
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
/* McBSP master. Set FS and bit clocks as outputs */
regs->pcr0 |= FSXM | FSRM |
CLKXM | CLKRM;
/* Sample rate generator drives the FS */
regs->srgr2 |= FSGM;
break;
- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_BC_FP:
/* McBSP slave. FS clock as output */
regs->srgr2 |= FSGM;
regs->pcr0 |= FSXM | FSRM;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_BC_FC:
/* McBSP slave */
break;
default:
@@ -1307,7 +1307,8 @@ static struct snd_soc_dai_driver omap_mcbsp_dai = {
};
static const struct snd_soc_component_driver omap_mcbsp_component = {
- .name = "omap-mcbsp",
+ .name = "omap-mcbsp",
+ .legacy_dai_naming = 1,
};
static struct omap_mcbsp_platform_data omap2420_pdata = {
diff --git a/sound/soc/ti/omap-mcpdm.c b/sound/soc/ti/omap-mcpdm.c
index fafb2998ad0d..0b18a7bfd3fd 100644
--- a/sound/soc/ti/omap-mcpdm.c
+++ b/sound/soc/ti/omap-mcpdm.c
@@ -524,9 +524,10 @@ static struct snd_soc_dai_driver omap_mcpdm_dai = {
};
static const struct snd_soc_component_driver omap_mcpdm_component = {
- .name = "omap-mcpdm",
- .suspend = omap_mcpdm_suspend,
- .resume = omap_mcpdm_resume,
+ .name = "omap-mcpdm",
+ .suspend = omap_mcpdm_suspend,
+ .resume = omap_mcpdm_resume,
+ .legacy_dai_naming = 1,
};
void omap_mcpdm_configure_dn_offsets(struct snd_soc_pcm_runtime *rtd,
diff --git a/sound/soc/uniphier/evea.c b/sound/soc/uniphier/evea.c
index 96343d19a1e0..42403ae8e31b 100644
--- a/sound/soc/uniphier/evea.c
+++ b/sound/soc/uniphier/evea.c
@@ -397,7 +397,6 @@ static struct snd_soc_component_driver soc_codec_evea = {
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
- .non_legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver soc_dai_evea[] = {
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
index 4f41bb0ab2b0..fdd55d772b8e 100644
--- a/sound/soc/ux500/mop500.c
+++ b/sound/soc/ux500/mop500.c
@@ -4,8 +4,6 @@
*
* Author: Ola Lilja (ola.o.lilja@stericsson.com)
* for ST-Ericsson.
- *
- * License terms:
*/
#include <asm/mach-types.h>
diff --git a/sound/soc/ux500/mop500_ab8500.c b/sound/soc/ux500/mop500_ab8500.c
index 1ea1729984a9..e5e73a2bd9fe 100644
--- a/sound/soc/ux500/mop500_ab8500.c
+++ b/sound/soc/ux500/mop500_ab8500.c
@@ -5,8 +5,6 @@
* Author: Ola Lilja <ola.o.lilja@stericsson.com>,
* Kristoffer Karlsson <kristoffer.karlsson@stericsson.com>
* for ST-Ericsson.
- *
- * License terms:
*/
#include <linux/module.h>
diff --git a/sound/soc/ux500/mop500_ab8500.h b/sound/soc/ux500/mop500_ab8500.h
index 087ef246d87d..98de80a9cc4f 100644
--- a/sound/soc/ux500/mop500_ab8500.h
+++ b/sound/soc/ux500/mop500_ab8500.h
@@ -4,8 +4,6 @@
*
* Author: Ola Lilja <ola.o.lilja@stericsson.com>
* for ST-Ericsson.
- *
- * License terms:
*/
#ifndef MOP500_AB8500_H
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 21052378a32e..9d99ea6d7f30 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -5,8 +5,6 @@
* Author: Ola Lilja <ola.o.lilja@stericsson.com>,
* Roger Nilsson <roger.xr.nilsson@stericsson.com>
* for ST-Ericsson.
- *
- * License terms:
*/
#include <linux/module.h>
@@ -191,8 +189,8 @@ static int setup_clocking(struct snd_soc_dai *dai,
return -EINVAL;
}
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BC_FC:
dev_dbg(dai->dev, "%s: Codec is master.\n", __func__);
msp_config->iodelay = 0x20;
@@ -204,7 +202,7 @@ static int setup_clocking(struct snd_soc_dai *dai,
break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_BP_FP:
dev_dbg(dai->dev, "%s: Codec is slave.\n", __func__);
msp_config->tx_clk_sel = TX_CLK_SEL_SRG;
@@ -328,15 +326,15 @@ static int setup_msp_config(struct snd_pcm_substream *substream,
dev_dbg(dai->dev, "%s: rate: %u, channels: %d.\n", __func__,
runtime->rate, runtime->channels);
switch (fmt &
- (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_MASTER_MASK)) {
- case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS:
+ (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK)) {
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_BP_FP:
dev_dbg(dai->dev, "%s: SND_SOC_DAIFMT_I2S.\n", __func__);
msp_config->default_protdesc = 1;
msp_config->protocol = MSP_I2S_PROTOCOL;
break;
- case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_BC_FC:
dev_dbg(dai->dev, "%s: SND_SOC_DAIFMT_I2S.\n", __func__);
msp_config->data_size = MSP_DATA_BITS_16;
@@ -348,10 +346,10 @@ static int setup_msp_config(struct snd_pcm_substream *substream,
break;
- case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBS_CFS:
- case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM:
- case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBS_CFS:
- case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_BC_FC:
+ case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_BC_FC:
dev_dbg(dai->dev, "%s: PCM format.\n", __func__);
msp_config->data_size = MSP_DATA_BITS_16;
@@ -477,7 +475,7 @@ static int ux500_msp_dai_prepare(struct snd_pcm_substream *substream,
}
/* Set OPP-level */
- if ((drvdata->fmt & SND_SOC_DAIFMT_MASTER_MASK) &&
+ if ((drvdata->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) &&
(drvdata->msp->f_bitclk > 19200000)) {
/* If the bit-clock is higher than 19.2MHz, Vape should be
* run in 100% OPP. Only when bit-clock is used (MSP master)
@@ -544,13 +542,13 @@ static int ux500_msp_dai_set_dai_fmt(struct snd_soc_dai *dai,
dev_dbg(dai->dev, "%s: MSP %d: Enter.\n", __func__, dai->id);
switch (fmt & (SND_SOC_DAIFMT_FORMAT_MASK |
- SND_SOC_DAIFMT_MASTER_MASK)) {
- case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS:
- case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM:
- case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBS_CFS:
- case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBM_CFM:
- case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBS_CFS:
- case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM:
+ SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK)) {
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_BC_FC:
+ case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_BC_FC:
+ case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_BC_FC:
break;
default:
@@ -731,7 +729,8 @@ static struct snd_soc_dai_driver ux500_msp_dai_drv = {
};
static const struct snd_soc_component_driver ux500_msp_component = {
- .name = "ux500-msp",
+ .name = "ux500-msp",
+ .legacy_dai_naming = 1,
};
diff --git a/sound/soc/ux500/ux500_msp_dai.h b/sound/soc/ux500/ux500_msp_dai.h
index fcd4b26f5d2d..30bf70838196 100644
--- a/sound/soc/ux500/ux500_msp_dai.h
+++ b/sound/soc/ux500/ux500_msp_dai.h
@@ -5,8 +5,6 @@
* Author: Ola Lilja <ola.o.lilja@stericsson.com>,
* Roger Nilsson <roger.xr.nilsson@stericsson.com>
* for ST-Ericsson.
- *
- * License terms:
*/
#ifndef UX500_msp_dai_H
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
index fd0b88bb7921..d113411a19f8 100644
--- a/sound/soc/ux500/ux500_msp_i2s.c
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -6,8 +6,6 @@
* Roger Nilsson <roger.xr.nilsson@stericsson.com>,
* Sandeep Kaushik <sandeep.kaushik@st.com>
* for ST-Ericsson.
- *
- * License terms:
*/
#include <linux/module.h>
diff --git a/sound/soc/ux500/ux500_msp_i2s.h b/sound/soc/ux500/ux500_msp_i2s.h
index 756b3973af9a..d45b5e2831cc 100644
--- a/sound/soc/ux500/ux500_msp_i2s.h
+++ b/sound/soc/ux500/ux500_msp_i2s.h
@@ -4,8 +4,6 @@
*
* Author: Ola Lilja <ola.o.lilja@stericsson.com>,
* for ST-Ericsson.
- *
- * License terms:
*/
diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
index 18191084b8b8..d3802e5ef196 100644
--- a/sound/soc/ux500/ux500_pcm.c
+++ b/sound/soc/ux500/ux500_pcm.c
@@ -5,8 +5,6 @@
* Author: Ola Lilja <ola.o.lilja@stericsson.com>,
* Roger Nilsson <roger.xr.nilsson@stericsson.com>
* for ST-Ericsson.
- *
- * License terms:
*/
#include <asm/page.h>
diff --git a/sound/soc/ux500/ux500_pcm.h b/sound/soc/ux500/ux500_pcm.h
index ff3ef7223db6..bd4348ebf9a1 100644
--- a/sound/soc/ux500/ux500_pcm.h
+++ b/sound/soc/ux500/ux500_pcm.h
@@ -5,8 +5,6 @@
* Author: Ola Lilja <ola.o.lilja@stericsson.com>,
* Roger Nilsson <roger.xr.nilsson@stericsson.com>
* for ST-Ericsson.
- *
- * License terms:
*/
#ifndef UX500_PCM_H
#define UX500_PCM_H
diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
index 5c4158069a5a..ff1fe62fea70 100644
--- a/sound/soc/xilinx/xlnx_formatter_pcm.c
+++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
@@ -575,14 +575,14 @@ static int xlnx_formatter_pcm_new(struct snd_soc_component *component,
}
static const struct snd_soc_component_driver xlnx_asoc_component = {
- .name = DRV_NAME,
- .set_sysclk = xlnx_formatter_set_sysclk,
- .open = xlnx_formatter_pcm_open,
- .close = xlnx_formatter_pcm_close,
- .hw_params = xlnx_formatter_pcm_hw_params,
- .trigger = xlnx_formatter_pcm_trigger,
- .pointer = xlnx_formatter_pcm_pointer,
- .pcm_construct = xlnx_formatter_pcm_new,
+ .name = DRV_NAME,
+ .set_sysclk = xlnx_formatter_set_sysclk,
+ .open = xlnx_formatter_pcm_open,
+ .close = xlnx_formatter_pcm_close,
+ .hw_params = xlnx_formatter_pcm_hw_params,
+ .trigger = xlnx_formatter_pcm_trigger,
+ .pointer = xlnx_formatter_pcm_pointer,
+ .pcm_construct = xlnx_formatter_pcm_new,
};
static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
@@ -703,7 +703,7 @@ static int xlnx_formatter_pcm_remove(struct platform_device *pdev)
dev_err(&pdev->dev, "audio formatter reset failed\n");
clk_disable_unprepare(adata->axi_clk);
- return ret;
+ return 0;
}
static const struct of_device_id xlnx_formatter_pcm_of_match[] = {
diff --git a/sound/soc/xilinx/xlnx_i2s.c b/sound/soc/xilinx/xlnx_i2s.c
index 4cc6ee7c81a3..9de92d35e30e 100644
--- a/sound/soc/xilinx/xlnx_i2s.c
+++ b/sound/soc/xilinx/xlnx_i2s.c
@@ -158,6 +158,7 @@ static const struct snd_soc_dai_ops xlnx_i2s_dai_ops = {
static const struct snd_soc_component_driver xlnx_i2s_component = {
.name = DRV_NAME,
+ .legacy_dai_naming = 1,
};
static const struct of_device_id xlnx_i2s_of_match[] = {
diff --git a/sound/soc/xilinx/xlnx_spdif.c b/sound/soc/xilinx/xlnx_spdif.c
index cba0e868a7d7..7342048e9875 100644
--- a/sound/soc/xilinx/xlnx_spdif.c
+++ b/sound/soc/xilinx/xlnx_spdif.c
@@ -226,6 +226,7 @@ static struct snd_soc_dai_driver xlnx_spdif_rx_dai = {
static const struct snd_soc_component_driver xlnx_spdif_component = {
.name = "xlnx-spdif",
+ .legacy_dai_naming = 1,
};
static const struct of_device_id xlnx_spdif_of_match[] = {
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
index aeb4b2c4d1d3..a8f156540b50 100644
--- a/sound/soc/xtensa/xtfpga-i2s.c
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -339,7 +339,7 @@ static int xtfpga_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
{
if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF)
return -EINVAL;
- if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
+ if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) != SND_SOC_DAIFMT_BP_FP)
return -EINVAL;
if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_I2S)
return -EINVAL;
@@ -475,19 +475,20 @@ static int xtfpga_pcm_new(struct snd_soc_component *component,
}
static const struct snd_soc_component_driver xtfpga_i2s_component = {
- .name = DRV_NAME,
- .open = xtfpga_pcm_open,
- .close = xtfpga_pcm_close,
- .hw_params = xtfpga_pcm_hw_params,
- .trigger = xtfpga_pcm_trigger,
- .pointer = xtfpga_pcm_pointer,
- .pcm_construct = xtfpga_pcm_new,
+ .name = DRV_NAME,
+ .open = xtfpga_pcm_open,
+ .close = xtfpga_pcm_close,
+ .hw_params = xtfpga_pcm_hw_params,
+ .trigger = xtfpga_pcm_trigger,
+ .pointer = xtfpga_pcm_pointer,
+ .pcm_construct = xtfpga_pcm_new,
+ .legacy_dai_naming = 1,
};
static const struct snd_soc_dai_ops xtfpga_i2s_dai_ops = {
.startup = xtfpga_i2s_startup,
.hw_params = xtfpga_i2s_hw_params,
- .set_fmt = xtfpga_i2s_set_fmt,
+ .set_fmt = xtfpga_i2s_set_fmt,
};
static struct snd_soc_dai_driver xtfpga_i2s_dai[] = {
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 7168f1c6a37a..32c39d8bd2e5 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -175,7 +175,7 @@ static int usb6fire_pcm_stream_start(struct pcm_runtime *rt)
}
}
- /* wait for first out urb to return (sent in in urb handler) */
+ /* wait for first out urb to return (sent in urb handler) */
wait_event_timeout(rt->stream_wait_queue, rt->stream_wait_cond,
HZ);
if (rt->stream_wait_cond)
diff --git a/sound/usb/bcd2000/bcd2000.c b/sound/usb/bcd2000/bcd2000.c
index cd4a0bc6d278..7aec0a95c609 100644
--- a/sound/usb/bcd2000/bcd2000.c
+++ b/sound/usb/bcd2000/bcd2000.c
@@ -348,7 +348,8 @@ static int bcd2000_init_midi(struct bcd2000 *bcd2k)
static void bcd2000_free_usb_related_resources(struct bcd2000 *bcd2k,
struct usb_interface *interface)
{
- /* usb_kill_urb not necessary, urb is aborted automatically */
+ usb_kill_urb(bcd2k->midi_out_urb);
+ usb_kill_urb(bcd2k->midi_in_urb);
usb_free_urb(bcd2k->midi_out_urb);
usb_free_urb(bcd2k->midi_in_urb);
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 0fff96a5d3ab..d356743de2ff 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -387,6 +387,14 @@ static const struct usb_audio_device_name usb_audio_names[] = {
DEVICE_NAME(0x05e1, 0x0408, "Syntek", "STK1160"),
DEVICE_NAME(0x05e1, 0x0480, "Hauppauge", "Woodbury"),
+ /* ASUS ROG Zenith II: this machine has also two devices, one for
+ * the front headphone and another for the rest
+ */
+ PROFILE_NAME(0x0b05, 0x1915, "ASUS", "Zenith II Front Headphone",
+ "Zenith-II-Front-Headphone"),
+ PROFILE_NAME(0x0b05, 0x1916, "ASUS", "Zenith II Main Audio",
+ "Zenith-II-Main-Audio"),
+
/* ASUS ROG Strix */
PROFILE_NAME(0x0b05, 0x1917,
"Realtek", "ALC1220-VB-DT", "Realtek-ALC1220-VB-Desktop"),
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index f9c921683948..0d7b73bf7945 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -133,7 +133,7 @@ static inline bool ep_state_running(struct snd_usb_endpoint *ep)
static inline bool ep_state_update(struct snd_usb_endpoint *ep, int old, int new)
{
- return atomic_cmpxchg(&ep->state, old, new) == old;
+ return atomic_try_cmpxchg(&ep->state, &old, new);
}
/**
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index 71f17f02f341..cf650fab54d7 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -225,7 +225,7 @@ static int hiface_pcm_stream_start(struct pcm_runtime *rt)
}
}
- /* wait for first out urb to return (sent in in urb handler) */
+ /* wait for first out urb to return (sent in urb handler) */
wait_event_timeout(rt->stream_wait_queue, rt->stream_wait_cond,
HZ);
if (rt->stream_wait_cond) {
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index 16e644330c4d..cd41aa7f0385 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -235,7 +235,7 @@ static ssize_t serial_number_show(struct device *dev,
struct snd_card *card = dev_to_snd_card(dev);
struct usb_line6_pod *pod = card->private_data;
- return sprintf(buf, "%u\n", pod->serial_number);
+ return sysfs_emit(buf, "%u\n", pod->serial_number);
}
/*
@@ -247,8 +247,8 @@ static ssize_t firmware_version_show(struct device *dev,
struct snd_card *card = dev_to_snd_card(dev);
struct usb_line6_pod *pod = card->private_data;
- return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100,
- pod->firmware_version % 100);
+ return sysfs_emit(buf, "%d.%02d\n", pod->firmware_version / 100,
+ pod->firmware_version % 100);
}
/*
@@ -260,7 +260,7 @@ static ssize_t device_id_show(struct device *dev,
struct snd_card *card = dev_to_snd_card(dev);
struct usb_line6_pod *pod = card->private_data;
- return sprintf(buf, "%d\n", pod->device_id);
+ return sysfs_emit(buf, "%d\n", pod->device_id);
}
/*
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index b24bc82f89e3..ffd8c157a281 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -146,7 +146,7 @@ static ssize_t serial_number_show(struct device *dev,
struct snd_card *card = dev_to_snd_card(dev);
struct usb_line6_podhd *pod = card->private_data;
- return sprintf(buf, "%u\n", pod->serial_number);
+ return sysfs_emit(buf, "%u\n", pod->serial_number);
}
static ssize_t firmware_version_show(struct device *dev,
@@ -155,7 +155,7 @@ static ssize_t firmware_version_show(struct device *dev,
struct snd_card *card = dev_to_snd_card(dev);
struct usb_line6_podhd *pod = card->private_data;
- return sprintf(buf, "%06x\n", pod->firmware_version);
+ return sysfs_emit(buf, "%06x\n", pod->firmware_version);
}
static DEVICE_ATTR_RO(firmware_version);
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 3c795675f048..f4bd1e8ae4b6 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -374,13 +374,28 @@ static const struct usbmix_name_map corsair_virtuoso_map[] = {
{ 0 }
};
-/* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX
- * response for Input Gain Pad (id=19, control=12) and the connector status
- * for SPDIF terminal (id=18). Skip them.
- */
-static const struct usbmix_name_map asus_rog_map[] = {
- { 18, NULL }, /* OT, connector control */
- { 19, NULL, 12 }, /* FU, Input Gain Pad */
+/* ASUS ROG Zenith II with Realtek ALC1220-VB */
+static const struct usbmix_name_map asus_zenith_ii_map[] = {
+ { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
+ { 16, "Speaker" }, /* OT */
+ { 22, "Speaker Playback" }, /* FU */
+ { 7, "Line" }, /* IT */
+ { 19, "Line Capture" }, /* FU */
+ { 8, "Mic" }, /* IT */
+ { 20, "Mic Capture" }, /* FU */
+ { 9, "Front Mic" }, /* IT */
+ { 21, "Front Mic Capture" }, /* FU */
+ { 17, "IEC958" }, /* OT */
+ { 23, "IEC958 Playback" }, /* FU */
+ {}
+};
+
+static const struct usbmix_connector_map asus_zenith_ii_connector_map[] = {
+ { 10, 16 }, /* (Back) Speaker */
+ { 11, 17 }, /* SPDIF */
+ { 13, 7 }, /* Line */
+ { 14, 8 }, /* Mic */
+ { 15, 9 }, /* Front Mic */
{}
};
@@ -611,9 +626,10 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
.map = gigabyte_b450_map,
.connector_map = gigabyte_b450_connector_map,
},
- { /* ASUS ROG Zenith II */
+ { /* ASUS ROG Zenith II (main audio) */
.id = USB_ID(0x0b05, 0x1916),
- .map = asus_rog_map,
+ .map = asus_zenith_ii_map,
+ .connector_map = asus_zenith_ii_connector_map,
},
{ /* ASUS ROG Strix */
.id = USB_ID(0x0b05, 0x1917),
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index d35cf54cab33..ab0d459f4271 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -24,6 +24,7 @@
#include <sound/asoundef.h>
#include <sound/core.h>
#include <sound/control.h>
+#include <sound/hda_verbs.h>
#include <sound/hwdep.h>
#include <sound/info.h>
#include <sound/tlv.h>
@@ -1934,13 +1935,194 @@ static int snd_soundblaster_e1_switch_create(struct usb_mixer_interface *mixer)
NULL);
}
+/*
+ * Dell WD15 dock jack detection
+ *
+ * The WD15 contains an ALC4020 USB audio controller and ALC3263 audio codec
+ * from Realtek. It is a UAC 1 device, and UAC 1 does not support jack
+ * detection. Instead, jack detection works by sending HD Audio commands over
+ * vendor-type USB messages.
+ */
+
+#define HDA_VERB_CMD(V, N, D) (((N) << 20) | ((V) << 8) | (D))
+
+#define REALTEK_HDA_VALUE 0x0038
+
+#define REALTEK_HDA_SET 62
+#define REALTEK_MANUAL_MODE 72
+#define REALTEK_HDA_GET_OUT 88
+#define REALTEK_HDA_GET_IN 89
+
+#define REALTEK_AUDIO_FUNCTION_GROUP 0x01
+#define REALTEK_LINE1 0x1a
+#define REALTEK_VENDOR_REGISTERS 0x20
+#define REALTEK_HP_OUT 0x21
+
+#define REALTEK_CBJ_CTRL2 0x50
+
+#define REALTEK_JACK_INTERRUPT_NODE 5
+
+#define REALTEK_MIC_FLAG 0x100
+
+static int realtek_hda_set(struct snd_usb_audio *chip, u32 cmd)
+{
+ struct usb_device *dev = chip->dev;
+ __be32 buf = cpu_to_be32(cmd);
+
+ return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), REALTEK_HDA_SET,
+ USB_RECIP_DEVICE | USB_TYPE_VENDOR | USB_DIR_OUT,
+ REALTEK_HDA_VALUE, 0, &buf, sizeof(buf));
+}
+
+static int realtek_hda_get(struct snd_usb_audio *chip, u32 cmd, u32 *value)
+{
+ struct usb_device *dev = chip->dev;
+ int err;
+ __be32 buf = cpu_to_be32(cmd);
+
+ err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), REALTEK_HDA_GET_OUT,
+ USB_RECIP_DEVICE | USB_TYPE_VENDOR | USB_DIR_OUT,
+ REALTEK_HDA_VALUE, 0, &buf, sizeof(buf));
+ if (err < 0)
+ return err;
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), REALTEK_HDA_GET_IN,
+ USB_RECIP_DEVICE | USB_TYPE_VENDOR | USB_DIR_IN,
+ REALTEK_HDA_VALUE, 0, &buf, sizeof(buf));
+ if (err < 0)
+ return err;
+
+ *value = be32_to_cpu(buf);
+ return 0;
+}
+
+static int realtek_ctl_connector_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *cval = kcontrol->private_data;
+ struct snd_usb_audio *chip = cval->head.mixer->chip;
+ u32 pv = kcontrol->private_value;
+ u32 node_id = pv & 0xff;
+ u32 sense;
+ u32 cbj_ctrl2;
+ bool presence;
+ int err;
+
+ err = snd_usb_lock_shutdown(chip);
+ if (err < 0)
+ return err;
+ err = realtek_hda_get(chip,
+ HDA_VERB_CMD(AC_VERB_GET_PIN_SENSE, node_id, 0),
+ &sense);
+ if (err < 0)
+ goto err;
+ if (pv & REALTEK_MIC_FLAG) {
+ err = realtek_hda_set(chip,
+ HDA_VERB_CMD(AC_VERB_SET_COEF_INDEX,
+ REALTEK_VENDOR_REGISTERS,
+ REALTEK_CBJ_CTRL2));
+ if (err < 0)
+ goto err;
+ err = realtek_hda_get(chip,
+ HDA_VERB_CMD(AC_VERB_GET_PROC_COEF,
+ REALTEK_VENDOR_REGISTERS, 0),
+ &cbj_ctrl2);
+ if (err < 0)
+ goto err;
+ }
+err:
+ snd_usb_unlock_shutdown(chip);
+ if (err < 0)
+ return err;
+
+ presence = sense & AC_PINSENSE_PRESENCE;
+ if (pv & REALTEK_MIC_FLAG)
+ presence = presence && (cbj_ctrl2 & 0x0070) == 0x0070;
+ ucontrol->value.integer.value[0] = presence;
+ return 0;
+}
+
+static const struct snd_kcontrol_new realtek_connector_ctl_ro = {
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .name = "", /* will be filled later manually */
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .info = snd_ctl_boolean_mono_info,
+ .get = realtek_ctl_connector_get,
+};
+
+static int realtek_resume_jack(struct usb_mixer_elem_list *list)
+{
+ snd_ctl_notify(list->mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &list->kctl->id);
+ return 0;
+}
+
+static int realtek_add_jack(struct usb_mixer_interface *mixer,
+ char *name, u32 val)
+{
+ struct usb_mixer_elem_info *cval;
+ struct snd_kcontrol *kctl;
+
+ cval = kzalloc(sizeof(*cval), GFP_KERNEL);
+ if (!cval)
+ return -ENOMEM;
+ snd_usb_mixer_elem_init_std(&cval->head, mixer,
+ REALTEK_JACK_INTERRUPT_NODE);
+ cval->head.resume = realtek_resume_jack;
+ cval->val_type = USB_MIXER_BOOLEAN;
+ cval->channels = 1;
+ cval->min = 0;
+ cval->max = 1;
+ kctl = snd_ctl_new1(&realtek_connector_ctl_ro, cval);
+ if (!kctl) {
+ kfree(cval);
+ return -ENOMEM;
+ }
+ kctl->private_value = val;
+ strscpy(kctl->id.name, name, sizeof(kctl->id.name));
+ kctl->private_free = snd_usb_mixer_elem_free;
+ return snd_usb_mixer_add_control(&cval->head, kctl);
+}
+
+static int dell_dock_mixer_create(struct usb_mixer_interface *mixer)
+{
+ int err;
+ struct usb_device *dev = mixer->chip->dev;
+
+ /* Power down the audio codec to avoid loud pops in the next step. */
+ realtek_hda_set(mixer->chip,
+ HDA_VERB_CMD(AC_VERB_SET_POWER_STATE,
+ REALTEK_AUDIO_FUNCTION_GROUP,
+ AC_PWRST_D3));
+
+ /*
+ * Turn off 'manual mode' in case it was enabled. This removes the need
+ * to power cycle the dock after it was attached to a Windows machine.
+ */
+ snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), REALTEK_MANUAL_MODE,
+ USB_RECIP_DEVICE | USB_TYPE_VENDOR | USB_DIR_OUT,
+ 0, 0, NULL, 0);
+
+ err = realtek_add_jack(mixer, "Line Out Jack", REALTEK_LINE1);
+ if (err < 0)
+ return err;
+ err = realtek_add_jack(mixer, "Headphone Jack", REALTEK_HP_OUT);
+ if (err < 0)
+ return err;
+ err = realtek_add_jack(mixer, "Headset Mic Jack",
+ REALTEK_HP_OUT | REALTEK_MIC_FLAG);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
static void dell_dock_init_vol(struct snd_usb_audio *chip, int ch, int id)
{
u16 buf = 0;
snd_usb_ctl_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
- ch, snd_usb_ctrl_intf(chip) | (id << 8),
+ (UAC_FU_VOLUME << 8) | ch,
+ snd_usb_ctrl_intf(chip) | (id << 8),
&buf, 2);
}
@@ -3238,6 +3420,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x1235, 0x8213): /* Focusrite Scarlett 8i6 3rd Gen */
case USB_ID(0x1235, 0x8214): /* Focusrite Scarlett 18i8 3rd Gen */
case USB_ID(0x1235, 0x8215): /* Focusrite Scarlett 18i20 3rd Gen */
+ case USB_ID(0x1235, 0x820c): /* Focusrite Clarett+ 8Pre */
err = snd_scarlett_gen2_init(mixer);
break;
@@ -3245,6 +3428,9 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
err = snd_soundblaster_e1_switch_create(mixer);
break;
case USB_ID(0x0bda, 0x4014): /* Dell WD15 dock */
+ err = dell_dock_mixer_create(mixer);
+ if (err < 0)
+ break;
err = dell_dock_mixer_init(mixer);
break;
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
index 69a2cd429ee2..9d11bb08667e 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett_gen2.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Focusrite Scarlett Gen 2/3 Driver for ALSA
+ * Focusrite Scarlett Gen 2/3 and Clarett+ Driver for ALSA
*
* Supported models:
* - 6i6/18i8/18i20 Gen 2
* - Solo/2i2/4i4/8i6/18i8/18i20 Gen 3
+ * - Clarett+ 8Pre
*
* Copyright (c) 2018-2022 by Geoffrey D. Bennett <g at b4.vu>
* Copyright (c) 2020-2021 by Vladimir Sadovnikov <sadko4u@gmail.com>
+ * Copyright (c) 2022 by Christian Colglazier <christian@cacolglazier.com>
*
* Based on the Scarlett (Gen 1) Driver for ALSA:
*
@@ -51,6 +53,9 @@
* Support for phantom power, direct monitoring, speaker switching,
* and talkback added in May-June 2021.
*
+ * Support for Clarett+ 8Pre added in Aug 2022 by Christian
+ * Colglazier.
+ *
* This ALSA mixer gives access to (model-dependent):
* - input, output, mixer-matrix muxes
* - mixer-matrix gain stages
@@ -203,7 +208,8 @@ enum {
SCARLETT2_CONFIG_SET_NO_MIXER = 0,
SCARLETT2_CONFIG_SET_GEN_2 = 1,
SCARLETT2_CONFIG_SET_GEN_3 = 2,
- SCARLETT2_CONFIG_SET_COUNT = 3
+ SCARLETT2_CONFIG_SET_CLARETT = 3,
+ SCARLETT2_CONFIG_SET_COUNT = 4
};
/* Hardware port types:
@@ -841,6 +847,61 @@ static const struct scarlett2_device_info s18i20_gen3_info = {
} },
};
+static const struct scarlett2_device_info clarett_8pre_info = {
+ .usb_id = USB_ID(0x1235, 0x820c),
+
+ .config_set = SCARLETT2_CONFIG_SET_CLARETT,
+ .line_out_hw_vol = 1,
+ .level_input_count = 2,
+ .air_input_count = 8,
+
+ .line_out_descrs = {
+ "Monitor L",
+ "Monitor R",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "Headphones 1 L",
+ "Headphones 1 R",
+ "Headphones 2 L",
+ "Headphones 2 R",
+ },
+
+ .port_count = {
+ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 10 },
+ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 8 },
+ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
+ [SCARLETT2_PORT_TYPE_PCM] = { 20, 18 },
+ },
+
+ .mux_assignment = { {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_ADAT, 0, 4 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 12 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 22 },
+ { 0, 0, 0 },
+ } },
+};
+
static const struct scarlett2_device_info *scarlett2_devices[] = {
/* Supported Gen 2 devices */
&s6i6_gen2_info,
@@ -855,6 +916,9 @@ static const struct scarlett2_device_info *scarlett2_devices[] = {
&s18i8_gen3_info,
&s18i20_gen3_info,
+ /* Supported Clarett+ devices */
+ &clarett_8pre_info,
+
/* End of list */
NULL
};
@@ -1047,6 +1111,29 @@ static const struct scarlett2_config
[SCARLETT2_CONFIG_TALKBACK_MAP] = {
.offset = 0xb0, .size = 16, .activate = 10 },
+
+/* Clarett+ 8Pre */
+}, {
+ [SCARLETT2_CONFIG_DIM_MUTE] = {
+ .offset = 0x31, .size = 8, .activate = 2 },
+
+ [SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
+ .offset = 0x34, .size = 16, .activate = 1 },
+
+ [SCARLETT2_CONFIG_MUTE_SWITCH] = {
+ .offset = 0x5c, .size = 8, .activate = 1 },
+
+ [SCARLETT2_CONFIG_SW_HW_SWITCH] = {
+ .offset = 0x66, .size = 8, .activate = 3 },
+
+ [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+ .offset = 0x7c, .size = 8, .activate = 7 },
+
+ [SCARLETT2_CONFIG_AIR_SWITCH] = {
+ .offset = 0x95, .size = 8, .activate = 8 },
+
+ [SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
+ .offset = 0x8d, .size = 8, .activate = 6 },
} };
/* proprietary request/response format */
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index e692ae04436a..d45d1d7e6664 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -1269,7 +1269,7 @@ static inline void fill_playback_urb_dsd_dop(struct snd_usb_substream *subs,
unsigned int wrap = subs->buffer_bytes;
u8 *dst = urb->transfer_buffer;
u8 *src = runtime->dma_area;
- u8 marker[] = { 0x05, 0xfa };
+ static const u8 marker[] = { 0x05, 0xfa };
unsigned int queued = 0;
/*
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 968d90caeefa..168fd802d70b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1843,6 +1843,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */
QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x1397, 0x0507, /* Behringer UMC202HD */
+ QUIRK_FLAG_PLAYBACK_FIRST | QUIRK_FLAG_GENERIC_IMPLICIT_FB),
DEVICE_FLG(0x1397, 0x0508, /* Behringer UMC204HD */
QUIRK_FLAG_PLAYBACK_FIRST | QUIRK_FLAG_GENERIC_IMPLICIT_FB),
DEVICE_FLG(0x1397, 0x0509, /* Behringer UMC404HD */
diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
index e83e6e47a21e..938dec0dfaad 100644
--- a/tools/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
@@ -45,7 +45,6 @@
exit(code); \
} while (0)
-int done;
int rcvbufsz;
char name[100];
int dbg;
@@ -285,7 +284,6 @@ int main(int argc, char *argv[])
pid_t rtid = 0;
int fd = 0;
- int count = 0;
int write_file = 0;
int maskset = 0;
char *logfile = NULL;
@@ -495,7 +493,6 @@ int main(int argc, char *argv[])
len2 = 0;
/* For nested attributes, na follows */
na = (struct nlattr *) NLA_DATA(na);
- done = 0;
while (len2 < aggr_len) {
switch (na->nla_type) {
case TASKSTATS_TYPE_PID:
@@ -509,7 +506,6 @@ int main(int argc, char *argv[])
printf("TGID\t%d\n", rtid);
break;
case TASKSTATS_TYPE_STATS:
- count++;
if (print_delays)
print_delayacct((struct taskstats *) NLA_DATA(na));
if (print_io_accounting)
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 3bb134355874..316917b98707 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -75,9 +75,11 @@ struct kvm_regs {
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
-#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
+ KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16
-#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
+#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
+ KVM_ARM_DEVICE_ID_SHIFT)
/* Supported device IDs */
#define KVM_ARM_DEVICE_VGIC_V2 0
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 7a6b14874d65..a73cf01a1606 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -74,6 +74,7 @@ struct kvm_s390_io_adapter_req {
#define KVM_S390_VM_CRYPTO 2
#define KVM_S390_VM_CPU_MODEL 3
#define KVM_S390_VM_MIGRATION 4
+#define KVM_S390_VM_CPU_TOPOLOGY 5
/* kvm attributes for mem_ctrl */
#define KVM_S390_VM_MEM_ENABLE_CMMA 0
diff --git a/tools/arch/x86/include/asm/amd-ibs.h b/tools/arch/x86/include/asm/amd-ibs.h
index 765e9e752d03..9a3312e12e2e 100644
--- a/tools/arch/x86/include/asm/amd-ibs.h
+++ b/tools/arch/x86/include/asm/amd-ibs.h
@@ -29,7 +29,10 @@ union ibs_fetch_ctl {
rand_en:1, /* 57: random tagging enable */
fetch_l2_miss:1,/* 58: L2 miss for sampled fetch
* (needs IbsFetchComp) */
- reserved:5; /* 59-63: reserved */
+ l3_miss_only:1, /* 59: Collect L3 miss samples only */
+ fetch_oc_miss:1,/* 60: Op cache miss for the sampled fetch */
+ fetch_l3_miss:1,/* 61: L3 cache miss for the sampled fetch */
+ reserved:2; /* 62-63: reserved */
};
};
@@ -38,14 +41,14 @@ union ibs_op_ctl {
__u64 val;
struct {
__u64 opmaxcnt:16, /* 0-15: periodic op max. count */
- reserved0:1, /* 16: reserved */
+ l3_miss_only:1, /* 16: Collect L3 miss samples only */
op_en:1, /* 17: op sampling enable */
op_val:1, /* 18: op sample valid */
cnt_ctl:1, /* 19: periodic op counter control */
opmaxcnt_ext:7, /* 20-26: upper 7 bits of periodic op maximum count */
- reserved1:5, /* 27-31: reserved */
+ reserved0:5, /* 27-31: reserved */
opcurcnt:27, /* 32-58: periodic op counter current count */
- reserved2:5; /* 59-63: reserved */
+ reserved1:5; /* 59-63: reserved */
};
};
@@ -71,11 +74,12 @@ union ibs_op_data {
union ibs_op_data2 {
__u64 val;
struct {
- __u64 data_src:3, /* 0-2: data source */
+ __u64 data_src_lo:3, /* 0-2: data source low */
reserved0:1, /* 3: reserved */
rmt_node:1, /* 4: destination node */
cache_hit_st:1, /* 5: cache hit state */
- reserved1:57; /* 5-63: reserved */
+ data_src_hi:2, /* 6-7: data source high */
+ reserved1:56; /* 8-63: reserved */
};
};
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index a77b915d36a8..235dc85c91c3 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -219,7 +219,7 @@
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 or above (Zen) */
+#define X86_FEATURE_ZEN (7*32+28) /* "" CPU based on Zen microarchitecture */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
@@ -303,6 +303,7 @@
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
+#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
@@ -353,6 +354,7 @@
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
+#define X86_FEATURE_X2AVIC (15*32+18) /* Virtual x2apic */
#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* Virtual SPEC_CTRL */
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */
@@ -456,5 +458,6 @@
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
+#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index cc615be27a54..6674bdb096f3 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -150,6 +150,10 @@
* are restricted to targets in
* kernel.
*/
+#define ARCH_CAP_PBRSB_NO BIT(24) /*
+ * Not susceptible to Post-Barrier
+ * Return Stack Buffer Predictions.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
@@ -231,6 +235,12 @@
#define PERF_CAP_PT_IDX 16
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
+#define PERF_CAP_PEBS_TRAP BIT_ULL(6)
+#define PERF_CAP_ARCH_REG BIT_ULL(7)
+#define PERF_CAP_PEBS_FORMAT 0xf00
+#define PERF_CAP_PEBS_BASELINE BIT_ULL(14)
+#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
+ PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE)
#define MSR_IA32_RTIT_CTL 0x00000570
#define RTIT_CTL_TRACEEN BIT(0)
@@ -388,6 +398,7 @@
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
#define MSR_PLATFORM_ENERGY_STATUS 0x0000064D
+#define MSR_SECONDARY_TURBO_RATIO_LIMIT 0x00000650
#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
#define MSR_PKG_ANY_CORE_C0_RES 0x00000659
@@ -1018,6 +1029,7 @@
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
#define MSR_IA32_VMX_VMFUNC 0x00000491
+#define MSR_IA32_VMX_PROCBASED_CTLS3 0x00000492
/* VMX_BASIC bits and bitmasks */
#define VMX_BASIC_VMCS_SIZE_SHIFT 32
diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h
index fee7983a90b4..11ff975242ca 100644
--- a/tools/arch/x86/include/asm/rmwcc.h
+++ b/tools/arch/x86/include/asm/rmwcc.h
@@ -2,8 +2,6 @@
#ifndef _TOOLS_LINUX_ASM_X86_RMWcc
#define _TOOLS_LINUX_ASM_X86_RMWcc
-#ifdef CONFIG_CC_HAS_ASM_GOTO
-
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
@@ -20,23 +18,4 @@ cc_label: \
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
-#else /* !CONFIG_CC_HAS_ASM_GOTO */
-
-#define __GEN_RMWcc(fullop, var, cc, ...) \
-do { \
- char c; \
- asm volatile (fullop "; set" cc " %1" \
- : "+m" (var), "=qm" (c) \
- : __VA_ARGS__ : "memory"); \
- return c != 0; \
-} while (0)
-
-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
- __GEN_RMWcc(op " " arg0, var, cc)
-
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
-
-#endif /* CONFIG_CC_HAS_ASM_GOTO */
-
#endif /* _TOOLS_LINUX_ASM_X86_RMWcc */
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index ec53c9fa1da9..46de10a809ec 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -306,7 +306,8 @@ struct kvm_pit_state {
struct kvm_pit_channel_state channels[3];
};
-#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
+#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
+#define KVM_PIT_FLAGS_SPEAKER_DATA_ON 0x00000002
struct kvm_pit_state2 {
struct kvm_pit_channel_state channels[3];
@@ -325,6 +326,7 @@ struct kvm_reinject_control {
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
#define KVM_VCPUEVENT_VALID_SMM 0x00000008
#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
+#define KVM_VCPUEVENT_VALID_TRIPLE_FAULT 0x00000020
/* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01
@@ -359,7 +361,10 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
- __u8 reserved[27];
+ struct {
+ __u8 pending;
+ } triple_fault;
+ __u8 reserved[26];
__u8 exception_has_payload;
__u64 exception_payload;
};
@@ -434,6 +439,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
+#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index 946d761adbd3..a5faf6d88f1b 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -91,6 +91,7 @@
#define EXIT_REASON_UMWAIT 67
#define EXIT_REASON_TPAUSE 68
#define EXIT_REASON_BUS_LOCK 74
+#define EXIT_REASON_NOTIFY 75
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -153,7 +154,8 @@
{ EXIT_REASON_XRSTORS, "XRSTORS" }, \
{ EXIT_REASON_UMWAIT, "UMWAIT" }, \
{ EXIT_REASON_TPAUSE, "TPAUSE" }, \
- { EXIT_REASON_BUS_LOCK, "BUS_LOCK" }
+ { EXIT_REASON_BUS_LOCK, "BUS_LOCK" }, \
+ { EXIT_REASON_NOTIFY, "NOTIFY" }
#define VMX_EXIT_REASON_FLAGS \
{ VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" }
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index b11cfc86a3d0..243b79f2b451 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -34,8 +34,8 @@ else
endif
FEATURE_USER = .bpf
-FEATURE_TESTS = libbfd disassembler-four-args
-FEATURE_DISPLAY = libbfd disassembler-four-args
+FEATURE_TESTS = libbfd disassembler-four-args disassembler-init-styled
+FEATURE_DISPLAY = libbfd
check_feat := 1
NON_CHECK_FEAT_TARGETS := clean bpftool_clean runqslower_clean resolve_btfids_clean
@@ -56,6 +56,9 @@ endif
ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif
+ifeq ($(feature-disassembler-init-styled), 1)
+CFLAGS += -DDISASM_INIT_STYLED
+endif
$(OUTPUT)%.yacc.c: $(srctree)/tools/bpf/%.y
$(QUIET_BISON)$(YACC) -o $@ -d $<
diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c
index c8ae95804728..a90a5d110f92 100644
--- a/tools/bpf/bpf_jit_disasm.c
+++ b/tools/bpf/bpf_jit_disasm.c
@@ -28,6 +28,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <limits.h>
+#include <tools/dis-asm-compat.h>
#define CMD_ACTION_SIZE_BUFFER 10
#define CMD_ACTION_READ_ALL 3
@@ -64,7 +65,9 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
assert(bfdf);
assert(bfd_check_format(bfdf, bfd_object));
- init_disassemble_info(&info, stdout, (fprintf_ftype) fprintf);
+ init_disassemble_info_compat(&info, stdout,
+ (fprintf_ftype) fprintf,
+ fprintf_styled);
info.arch = bfd_get_arch(bfdf);
info.mach = bfd_get_mach(bfdf);
info.buffer = image;
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 6b5b3a99f79d..4a95c017ad4c 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -93,8 +93,11 @@ INSTALL ?= install
RM ?= rm -f
FEATURE_USER = .bpftool
-FEATURE_TESTS = libbfd disassembler-four-args libcap clang-bpf-co-re
-FEATURE_DISPLAY = libbfd disassembler-four-args libcap clang-bpf-co-re
+FEATURE_TESTS = libbfd libbfd-liberty libbfd-liberty-z \
+ disassembler-four-args disassembler-init-styled libcap \
+ clang-bpf-co-re
+FEATURE_DISPLAY = libbfd libbfd-liberty libbfd-liberty-z \
+ libcap clang-bpf-co-re
check_feat := 1
NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
@@ -115,6 +118,9 @@ endif
ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif
+ifeq ($(feature-disassembler-init-styled), 1)
+ CFLAGS += -DDISASM_INIT_STYLED
+endif
LIBS = $(LIBBPF) -lelf -lz
LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf -lz
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index 24734f2249d6..aaf99a0168c9 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -24,6 +24,7 @@
#include <sys/stat.h>
#include <limits.h>
#include <bpf/libbpf.h>
+#include <tools/dis-asm-compat.h>
#include "json_writer.h"
#include "main.h"
@@ -39,15 +40,12 @@ static void get_exec_path(char *tpath, size_t size)
}
static int oper_count;
-static int fprintf_json(void *out, const char *fmt, ...)
+static int printf_json(void *out, const char *fmt, va_list ap)
{
- va_list ap;
char *s;
int err;
- va_start(ap, fmt);
err = vasprintf(&s, fmt, ap);
- va_end(ap);
if (err < 0)
return -1;
@@ -73,6 +71,32 @@ static int fprintf_json(void *out, const char *fmt, ...)
return 0;
}
+static int fprintf_json(void *out, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = printf_json(out, fmt, ap);
+ va_end(ap);
+
+ return r;
+}
+
+static int fprintf_json_styled(void *out,
+ enum disassembler_style style __maybe_unused,
+ const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = printf_json(out, fmt, ap);
+ va_end(ap);
+
+ return r;
+}
+
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
const char *arch, const char *disassembler_options,
const struct btf *btf,
@@ -99,11 +123,13 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
assert(bfd_check_format(bfdf, bfd_object));
if (json_output)
- init_disassemble_info(&info, stdout,
- (fprintf_ftype) fprintf_json);
+ init_disassemble_info_compat(&info, stdout,
+ (fprintf_ftype) fprintf_json,
+ fprintf_json_styled);
else
- init_disassemble_info(&info, stdout,
- (fprintf_ftype) fprintf);
+ init_disassemble_info_compat(&info, stdout,
+ (fprintf_ftype) fprintf,
+ fprintf_styled);
/* Update architecture info for offload. */
if (arch) {
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 888a0421d43b..fc6ce0b2535a 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -70,6 +70,7 @@ FEATURE_TESTS_BASIC := \
libaio \
libzstd \
disassembler-four-args \
+ disassembler-init-styled \
file-handle
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
@@ -134,8 +135,7 @@ FEATURE_DISPLAY ?= \
get_cpuid \
bpf \
libaio \
- libzstd \
- disassembler-four-args
+ libzstd
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 7c2a17e23c30..04b07ff88234 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -18,6 +18,7 @@ FILES= \
test-libbfd.bin \
test-libbfd-buildid.bin \
test-disassembler-four-args.bin \
+ test-disassembler-init-styled.bin \
test-reallocarray.bin \
test-libbfd-liberty.bin \
test-libbfd-liberty-z.bin \
@@ -89,6 +90,8 @@ all: $(FILES)
__BUILD = $(CC) $(CFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS)
BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1
+ BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+ BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap
__BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS)
BUILDXX = $(__BUILDXX) > $(@:.bin=.make.output) 2>&1
@@ -96,7 +99,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(
###############################
$(OUTPUT)test-all.bin:
- $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap
+ $(BUILD_ALL) || $(BUILD_ALL) -lopcodes -liberty
$(OUTPUT)test-hello.bin:
$(BUILD)
@@ -240,13 +243,18 @@ $(OUTPUT)test-libpython.bin:
$(BUILD) $(FLAGS_PYTHON_EMBED)
$(OUTPUT)test-libbfd.bin:
- $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+ $(BUILD_BFD)
$(OUTPUT)test-libbfd-buildid.bin:
- $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+ $(BUILD_BFD) || $(BUILD_BFD) -liberty || $(BUILD_BFD) -liberty -lz
$(OUTPUT)test-disassembler-four-args.bin:
- $(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
+ $(BUILD_BFD) -lopcodes || $(BUILD_BFD) -lopcodes -liberty || \
+ $(BUILD_BFD) -lopcodes -liberty -lz
+
+$(OUTPUT)test-disassembler-init-styled.bin:
+ $(BUILD_BFD) -lopcodes || $(BUILD_BFD) -lopcodes -liberty || \
+ $(BUILD_BFD) -lopcodes -liberty -lz
$(OUTPUT)test-reallocarray.bin:
$(BUILD)
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 5ffafb967b6e..957c02c7b163 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -166,6 +166,10 @@
# include "test-disassembler-four-args.c"
#undef main
+#define main main_test_disassembler_init_styled
+# include "test-disassembler-init-styled.c"
+#undef main
+
#define main main_test_libzstd
# include "test-libzstd.c"
#undef main
diff --git a/tools/build/feature/test-disassembler-init-styled.c b/tools/build/feature/test-disassembler-init-styled.c
new file mode 100644
index 000000000000..f1ce0ec3bee9
--- /dev/null
+++ b/tools/build/feature/test-disassembler-init-styled.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <dis-asm.h>
+
+int main(void)
+{
+ struct disassemble_info info;
+
+ init_disassemble_info(&info, stdout,
+ NULL, NULL);
+
+ return 0;
+}
diff --git a/tools/build/feature/test-libcrypto.c b/tools/build/feature/test-libcrypto.c
index a98174e0569c..bc34a5bbb504 100644
--- a/tools/build/feature/test-libcrypto.c
+++ b/tools/build/feature/test-libcrypto.c
@@ -1,16 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
+#include <openssl/evp.h>
#include <openssl/sha.h>
#include <openssl/md5.h>
int main(void)
{
- MD5_CTX context;
+ EVP_MD_CTX *mdctx;
unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH];
unsigned char dat[] = "12345";
+ unsigned int digest_len;
- MD5_Init(&context);
- MD5_Update(&context, &dat[0], sizeof(dat));
- MD5_Final(&md[0], &context);
+ mdctx = EVP_MD_CTX_new();
+ if (!mdctx)
+ return 0;
+
+ EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
+ EVP_DigestUpdate(mdctx, &dat[0], sizeof(dat));
+ EVP_DigestFinal_ex(mdctx, &md[0], &digest_len);
+ EVP_MD_CTX_free(mdctx);
SHA1(&dat[0], sizeof(dat), &md[0]);
diff --git a/tools/cgroup/memcg_shrinker.py b/tools/cgroup/memcg_shrinker.py
new file mode 100644
index 000000000000..706ab27666a4
--- /dev/null
+++ b/tools/cgroup/memcg_shrinker.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 Roman Gushchin <roman.gushchin@linux.dev>
+# Copyright (C) 2022 Meta
+
+import os
+import argparse
+import sys
+
+
+def scan_cgroups(cgroup_root):
+ cgroups = {}
+
+ for root, subdirs, _ in os.walk(cgroup_root):
+ for cgroup in subdirs:
+ path = os.path.join(root, cgroup)
+ ino = os.stat(path).st_ino
+ cgroups[ino] = path
+
+ # (memcg ino, path)
+ return cgroups
+
+
+def scan_shrinkers(shrinker_debugfs):
+ shrinkers = []
+
+ for root, subdirs, _ in os.walk(shrinker_debugfs):
+ for shrinker in subdirs:
+ count_path = os.path.join(root, shrinker, "count")
+ with open(count_path) as f:
+ for line in f.readlines():
+ items = line.split(' ')
+ ino = int(items[0])
+ # (count, shrinker, memcg ino)
+ shrinkers.append((int(items[1]), shrinker, ino))
+ return shrinkers
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Display biggest shrinkers')
+ parser.add_argument('-n', '--lines', type=int, help='Number of lines to print')
+
+ args = parser.parse_args()
+
+ cgroups = scan_cgroups("/sys/fs/cgroup/")
+ shrinkers = scan_shrinkers("/sys/kernel/debug/shrinker/")
+ shrinkers = sorted(shrinkers, reverse = True, key = lambda x: x[0])
+
+ n = 0
+ for s in shrinkers:
+ count, name, ino = (s[0], s[1], s[2])
+ if count == 0:
+ break
+
+ if ino == 0 or ino == 1:
+ cg = "/"
+ else:
+ try:
+ cg = cgroups[ino]
+ except KeyError:
+ cg = "unknown (%d)" % ino
+
+ print("%-8s %-20s %s" % (count, name, cg))
+
+ n += 1
+ if args.lines and n >= args.lines:
+ break
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/include/asm-generic/bitops/non-atomic.h b/tools/include/asm-generic/bitops/non-atomic.h
index 7e10c4b50c5d..0c472a833408 100644
--- a/tools/include/asm-generic/bitops/non-atomic.h
+++ b/tools/include/asm-generic/bitops/non-atomic.h
@@ -2,10 +2,10 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
-#include <asm/types.h>
+#include <linux/bits.h>
/**
- * __set_bit - Set a bit in memory
+ * ___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -13,7 +13,8 @@
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __set_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+___set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -21,7 +22,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
*p |= mask;
}
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -30,7 +32,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
}
/**
- * __change_bit - Toggle a bit in memory
+ * ___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
@@ -38,7 +40,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __change_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+___change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -47,7 +50,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
}
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * ___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
@@ -55,7 +58,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -66,7 +70,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * ___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
@@ -74,7 +78,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -85,8 +90,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
}
/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
- volatile unsigned long *addr)
+static __always_inline bool
+___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -97,11 +102,12 @@ static inline int __test_and_change_bit(int nr,
}
/**
- * test_bit - Determine whether a bit is set
+ * _test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static inline int test_bit(int nr, const volatile unsigned long *addr)
+static __always_inline bool
+_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index afdf93bebaaf..65d0747c5205 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -11,10 +11,10 @@
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
-int __bitmap_weight(const unsigned long *bitmap, int bits);
+unsigned int __bitmap_weight(const unsigned long *bitmap, int bits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
-int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
bool __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
@@ -45,7 +45,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
}
-static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+static inline bool bitmap_empty(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -53,7 +53,7 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -61,7 +61,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static inline unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -146,7 +146,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits,
* @src2: operand 2
* @nbits: size of bitmap
*/
-static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
+static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 5fca38fe1ba8..f18683b95ea6 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -26,6 +26,22 @@ extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
+ * Defined here because those may be needed by architecture-specific static
+ * inlines.
+ */
+
+#define bitop(op, nr, addr) \
+ op(nr, addr)
+
+#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
+#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
+#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
+#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
+#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
+#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
+#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
+
+/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*
diff --git a/tools/include/linux/compiler_types.h b/tools/include/linux/compiler_types.h
index 24ae3054f304..1bdd834bdd57 100644
--- a/tools/include/linux/compiler_types.h
+++ b/tools/include/linux/compiler_types.h
@@ -36,4 +36,8 @@
#include <linux/compiler-gcc.h>
#endif
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/tools/include/linux/list.h b/tools/include/linux/list.h
index b2fc48d5478c..a4dfb6a7cc6a 100644
--- a/tools/include/linux/list.h
+++ b/tools/include/linux/list.h
@@ -385,6 +385,17 @@ static inline void list_splice_tail_init(struct list_head *list,
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
/**
+ * list_last_entry_or_null - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_last_entry_or_null(ptr, type, member) \
+ (!list_empty(ptr) ? list_last_entry(ptr, type, member) : NULL)
+
+/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
diff --git a/tools/include/tools/dis-asm-compat.h b/tools/include/tools/dis-asm-compat.h
new file mode 100644
index 000000000000..70f331e23ed3
--- /dev/null
+++ b/tools/include/tools/dis-asm-compat.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+#ifndef _TOOLS_DIS_ASM_COMPAT_H
+#define _TOOLS_DIS_ASM_COMPAT_H
+
+#include <stdio.h>
+#include <dis-asm.h>
+
+/* define types for older binutils version, to centralize ifdef'ery a bit */
+#ifndef DISASM_INIT_STYLED
+enum disassembler_style {DISASSEMBLER_STYLE_NOT_EMPTY};
+typedef int (*fprintf_styled_ftype) (void *, enum disassembler_style, const char*, ...);
+#endif
+
+/*
+ * Trivial fprintf wrapper to be used as the fprintf_styled_func argument to
+ * init_disassemble_info_compat() when normal fprintf suffices.
+ */
+static inline int fprintf_styled(void *out,
+ enum disassembler_style style,
+ const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ (void)style;
+
+ va_start(args, fmt);
+ r = vfprintf(out, fmt, args);
+ va_end(args);
+
+ return r;
+}
+
+/*
+ * Wrapper for init_disassemble_info() that hides version
+ * differences. Depending on binutils version and architecture either
+ * fprintf_func or fprintf_styled_func will be called.
+ */
+static inline void init_disassemble_info_compat(struct disassemble_info *info,
+ void *stream,
+ fprintf_ftype unstyled_func,
+ fprintf_styled_ftype styled_func)
+{
+#ifdef DISASM_INIT_STYLED
+ init_disassemble_info(info, stream,
+ unstyled_func,
+ styled_func);
+#else
+ (void)styled_func;
+ init_disassemble_info(info, stream,
+ unstyled_func);
+#endif
+}
+
+#endif /* _TOOLS_DIS_ASM_COMPAT_H */
diff --git a/tools/include/uapi/asm-generic/fcntl.h b/tools/include/uapi/asm-generic/fcntl.h
index 1ecdb911add8..b02c8e0f4057 100644
--- a/tools/include/uapi/asm-generic/fcntl.h
+++ b/tools/include/uapi/asm-generic/fcntl.h
@@ -143,7 +143,7 @@
* record locks, but are "owned" by the open file description, not the
* process. This means that they are inherited across fork() like BSD (flock)
* locks, and they are only released automatically when the last reference to
- * the the open file against which they were acquired is put.
+ * the open file against which they were acquired is put.
*/
#define F_OFD_GETLK 36
#define F_OFD_SETLK 37
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index b28ff5d88145..520ad2691a99 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -751,14 +751,27 @@ typedef struct drm_i915_irq_wait {
/* Must be kept compact -- no holes and well documented */
-typedef struct drm_i915_getparam {
+/**
+ * struct drm_i915_getparam - Driver parameter query structure.
+ */
+struct drm_i915_getparam {
+ /** @param: Driver parameter to query. */
__s32 param;
- /*
+
+ /**
+ * @value: Address of memory where queried value should be put.
+ *
* WARNING: Using pointers instead of fixed-size u64 means we need to write
* compat32 code. Don't repeat this mistake.
*/
int __user *value;
-} drm_i915_getparam_t;
+};
+
+/**
+ * typedef drm_i915_getparam_t - Driver parameter query structure.
+ * See struct drm_i915_getparam.
+ */
+typedef struct drm_i915_getparam drm_i915_getparam_t;
/* Ioctl to set kernel params:
*/
@@ -1239,76 +1252,119 @@ struct drm_i915_gem_exec_object2 {
__u64 rsvd2;
};
+/**
+ * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf
+ * ioctl.
+ *
+ * The request will wait for input fence to signal before submission.
+ *
+ * The returned output fence will be signaled after the completion of the
+ * request.
+ */
struct drm_i915_gem_exec_fence {
- /**
- * User's handle for a drm_syncobj to wait on or signal.
- */
+ /** @handle: User's handle for a drm_syncobj to wait on or signal. */
__u32 handle;
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_EXEC_FENCE_WAIT:
+ * Wait for the input fence before request submission.
+ *
+ * I915_EXEC_FENCE_SIGNAL:
+ * Return request completion fence as output
+ */
+ __u32 flags;
#define I915_EXEC_FENCE_WAIT (1<<0)
#define I915_EXEC_FENCE_SIGNAL (1<<1)
#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
- __u32 flags;
};
-/*
- * See drm_i915_gem_execbuffer_ext_timeline_fences.
- */
-#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
-
-/*
+/**
+ * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences
+ * for execbuf ioctl.
+ *
* This structure describes an array of drm_syncobj and associated points for
* timeline variants of drm_syncobj. It is invalid to append this structure to
* the execbuf if I915_EXEC_FENCE_ARRAY is set.
*/
struct drm_i915_gem_execbuffer_ext_timeline_fences {
+#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
+ /** @base: Extension link. See struct i915_user_extension. */
struct i915_user_extension base;
/**
- * Number of element in the handles_ptr & value_ptr arrays.
+ * @fence_count: Number of elements in the @handles_ptr & @value_ptr
+ * arrays.
*/
__u64 fence_count;
/**
- * Pointer to an array of struct drm_i915_gem_exec_fence of length
- * fence_count.
+ * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence
+ * of length @fence_count.
*/
__u64 handles_ptr;
/**
- * Pointer to an array of u64 values of length fence_count. Values
- * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
- * drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
+ * @values_ptr: Pointer to an array of u64 values of length
+ * @fence_count.
+ * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
+ * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+ * binary one.
*/
__u64 values_ptr;
};
+/**
+ * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2
+ * ioctl.
+ */
struct drm_i915_gem_execbuffer2 {
- /**
- * List of gem_exec_object2 structs
- */
+ /** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */
__u64 buffers_ptr;
+
+ /** @buffer_count: Number of elements in @buffers_ptr array */
__u32 buffer_count;
- /** Offset in the batchbuffer to start execution from. */
+ /**
+ * @batch_start_offset: Offset in the batchbuffer to start execution
+ * from.
+ */
__u32 batch_start_offset;
- /** Bytes used in batchbuffer from batch_start_offset */
+
+ /**
+ * @batch_len: Length in bytes of the batch buffer, starting from the
+ * @batch_start_offset. If 0, length is assumed to be the batch buffer
+ * object size.
+ */
__u32 batch_len;
+
+ /** @DR1: deprecated */
__u32 DR1;
+
+ /** @DR4: deprecated */
__u32 DR4;
+
+ /** @num_cliprects: See @cliprects_ptr */
__u32 num_cliprects;
+
/**
- * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
- * & I915_EXEC_USE_EXTENSIONS are not set.
+ * @cliprects_ptr: Kernel clipping was a DRI1 misfeature.
+ *
+ * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or
+ * I915_EXEC_USE_EXTENSIONS flags are not set.
*
* If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
- * of struct drm_i915_gem_exec_fence and num_cliprects is the length
- * of the array.
+ * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the
+ * array.
*
* If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
- * single struct i915_user_extension and num_cliprects is 0.
+ * single &i915_user_extension and num_cliprects is 0.
*/
__u64 cliprects_ptr;
+
+ /** @flags: Execbuf flags */
+ __u64 flags;
#define I915_EXEC_RING_MASK (0x3f)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
@@ -1326,10 +1382,6 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
- __u64 flags;
- __u64 rsvd1; /* now used for context info */
- __u64 rsvd2;
-};
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
@@ -1432,9 +1484,23 @@ struct drm_i915_gem_execbuffer2 {
* drm_i915_gem_execbuffer_ext enum.
*/
#define I915_EXEC_USE_EXTENSIONS (1 << 21)
-
#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
+ /** @rsvd1: Context id */
+ __u64 rsvd1;
+
+ /**
+ * @rsvd2: in and out sync_file file descriptors.
+ *
+ * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the
+ * lower 32 bits of this field will have the in sync_file fd (input).
+ *
+ * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this
+ * field will have the out sync_file fd (output).
+ */
+ __u64 rsvd2;
+};
+
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
@@ -1814,19 +1880,58 @@ struct drm_i915_gem_context_create {
__u32 pad;
};
+/**
+ * struct drm_i915_gem_context_create_ext - Structure for creating contexts.
+ */
struct drm_i915_gem_context_create_ext {
- __u32 ctx_id; /* output: id of new context*/
+ /** @ctx_id: Id of the created context (output) */
+ __u32 ctx_id;
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS:
+ *
+ * Extensions may be appended to this structure and driver must check
+ * for those. See @extensions.
+ *
+ * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE
+ *
+ * Created context will have single timeline.
+ */
__u32 flags;
#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
+ *
+ * I915_CONTEXT_CREATE_EXT_SETPARAM:
+ * Context parameter to set or query during context creation.
+ * See struct drm_i915_gem_context_create_ext_setparam.
+ *
+ * I915_CONTEXT_CREATE_EXT_CLONE:
+ * This extension has been removed. On the off chance someone somewhere
+ * has attempted to use it, never re-use this extension number.
+ */
__u64 extensions;
+#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
};
+/**
+ * struct drm_i915_gem_context_param - Context parameter to set or query.
+ */
struct drm_i915_gem_context_param {
+ /** @ctx_id: Context id */
__u32 ctx_id;
+
+ /** @size: Size of the parameter @value */
__u32 size;
+
+ /** @param: Parameter to set or query */
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance
@@ -1973,6 +2078,7 @@ struct drm_i915_gem_context_param {
#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/* Must be kept compact -- no holes and well documented */
+ /** @value: Context parameter value to be set or queried */
__u64 value;
};
@@ -2371,23 +2477,29 @@ struct i915_context_param_engines {
struct i915_engine_class_instance engines[N__]; \
} __attribute__((packed)) name__
+/**
+ * struct drm_i915_gem_context_create_ext_setparam - Context parameter
+ * to set or query during context creation.
+ */
struct drm_i915_gem_context_create_ext_setparam {
-#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+ /** @base: Extension link. See struct i915_user_extension. */
struct i915_user_extension base;
+
+ /**
+ * @param: Context parameter to set or query.
+ * See struct drm_i915_gem_context_param.
+ */
struct drm_i915_gem_context_param param;
};
-/* This API has been removed. On the off chance someone somewhere has
- * attempted to use it, never re-use this extension number.
- */
-#define I915_CONTEXT_CREATE_EXT_CLONE 1
-
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
};
-/*
+/**
+ * struct drm_i915_gem_vm_control - Structure to create or destroy VM.
+ *
* DRM_I915_GEM_VM_CREATE -
*
* Create a new virtual memory address space (ppGTT) for use within a context
@@ -2397,20 +2509,23 @@ struct drm_i915_gem_context_destroy {
* The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
* returned in the outparam @id.
*
- * No flags are defined, with all bits reserved and must be zero.
- *
* An extension chain maybe provided, starting with @extensions, and terminated
* by the @next_extension being 0. Currently, no extensions are defined.
*
* DRM_I915_GEM_VM_DESTROY -
*
- * Destroys a previously created VM id, specified in @id.
+ * Destroys a previously created VM id, specified in @vm_id.
*
* No extensions or flags are allowed currently, and so must be zero.
*/
struct drm_i915_gem_vm_control {
+ /** @extensions: Zero-terminated chain of extensions. */
__u64 extensions;
+
+ /** @flags: reserved for future usage, currently MBZ */
__u32 flags;
+
+ /** @vm_id: Id of the VM created or to be destroyed */
__u32 vm_id;
};
@@ -3207,36 +3322,6 @@ struct drm_i915_gem_memory_class_instance {
* struct drm_i915_memory_region_info - Describes one region as known to the
* driver.
*
- * Note that we reserve some stuff here for potential future work. As an example
- * we might want expose the capabilities for a given region, which could include
- * things like if the region is CPU mappable/accessible, what are the supported
- * mapping types etc.
- *
- * Note that to extend struct drm_i915_memory_region_info and struct
- * drm_i915_query_memory_regions in the future the plan is to do the following:
- *
- * .. code-block:: C
- *
- * struct drm_i915_memory_region_info {
- * struct drm_i915_gem_memory_class_instance region;
- * union {
- * __u32 rsvd0;
- * __u32 new_thing1;
- * };
- * ...
- * union {
- * __u64 rsvd1[8];
- * struct {
- * __u64 new_thing2;
- * __u64 new_thing3;
- * ...
- * };
- * };
- * };
- *
- * With this things should remain source compatible between versions for
- * userspace, even as we add new fields.
- *
* Note this is using both struct drm_i915_query_item and struct drm_i915_query.
* For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
* at &drm_i915_query_item.query_id.
@@ -3248,14 +3333,81 @@ struct drm_i915_memory_region_info {
/** @rsvd0: MBZ */
__u32 rsvd0;
- /** @probed_size: Memory probed by the driver (-1 = unknown) */
+ /**
+ * @probed_size: Memory probed by the driver
+ *
+ * Note that it should not be possible to ever encounter a zero value
+ * here, also note that no current region type will ever return -1 here.
+ * Although for future region types, this might be a possibility. The
+ * same applies to the other size fields.
+ */
__u64 probed_size;
- /** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
+ /**
+ * @unallocated_size: Estimate of memory remaining
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting.
+ * Without this (or if this is an older kernel) the value here will
+ * always equal the @probed_size. Note this is only currently tracked
+ * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here
+ * will always equal the @probed_size).
+ */
__u64 unallocated_size;
- /** @rsvd1: MBZ */
- __u64 rsvd1[8];
+ union {
+ /** @rsvd1: MBZ */
+ __u64 rsvd1[8];
+ struct {
+ /**
+ * @probed_cpu_visible_size: Memory probed by the driver
+ * that is CPU accessible.
+ *
+ * This will be always be <= @probed_size, and the
+ * remainder (if there is any) will not be CPU
+ * accessible.
+ *
+ * On systems without small BAR, the @probed_size will
+ * always equal the @probed_cpu_visible_size, since all
+ * of it will be CPU accessible.
+ *
+ * Note this is only tracked for
+ * I915_MEMORY_CLASS_DEVICE regions (for other types the
+ * value here will always equal the @probed_size).
+ *
+ * Note that if the value returned here is zero, then
+ * this must be an old kernel which lacks the relevant
+ * small-bar uAPI support (including
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on
+ * such systems we should never actually end up with a
+ * small BAR configuration, assuming we are able to load
+ * the kernel module. Hence it should be safe to treat
+ * this the same as when @probed_cpu_visible_size ==
+ * @probed_size.
+ */
+ __u64 probed_cpu_visible_size;
+
+ /**
+ * @unallocated_cpu_visible_size: Estimate of CPU
+ * visible memory remaining.
+ *
+ * Note this is only tracked for
+ * I915_MEMORY_CLASS_DEVICE regions (for other types the
+ * value here will always equal the
+ * @probed_cpu_visible_size).
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always
+ * equal the @probed_cpu_visible_size. Note this is only
+ * currently tracked for I915_MEMORY_CLASS_DEVICE
+ * regions (for other types the value here will also
+ * always equal the @probed_cpu_visible_size).
+ *
+ * If this is an older kernel the value here will be
+ * zero, see also @probed_cpu_visible_size.
+ */
+ __u64 unallocated_cpu_visible_size;
+ };
+ };
};
/**
@@ -3329,11 +3481,11 @@ struct drm_i915_query_memory_regions {
* struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
* extension support using struct i915_user_extension.
*
- * Note that in the future we want to have our buffer flags here, at least for
- * the stuff that is immutable. Previously we would have two ioctls, one to
- * create the object with gem_create, and another to apply various parameters,
- * however this creates some ambiguity for the params which are considered
- * immutable. Also in general we're phasing out the various SET/GET ioctls.
+ * Note that new buffer flags should be added here, at least for the stuff that
+ * is immutable. Previously we would have two ioctls, one to create the object
+ * with gem_create, and another to apply various parameters, however this
+ * creates some ambiguity for the params which are considered immutable. Also in
+ * general we're phasing out the various SET/GET ioctls.
*/
struct drm_i915_gem_create_ext {
/**
@@ -3341,7 +3493,6 @@ struct drm_i915_gem_create_ext {
*
* The (page-aligned) allocated size for the object will be returned.
*
- *
* DG2 64K min page size implications:
*
* On discrete platforms, starting from DG2, we have to contend with GTT
@@ -3353,7 +3504,9 @@ struct drm_i915_gem_create_ext {
*
* Note that the returned size here will always reflect any required
* rounding up done by the kernel, i.e 4K will now become 64K on devices
- * such as DG2.
+ * such as DG2. The kernel will always select the largest minimum
+ * page-size for the set of possible placements as the value to use when
+ * rounding up the @size.
*
* Special DG2 GTT address alignment requirement:
*
@@ -3377,14 +3530,58 @@ struct drm_i915_gem_create_ext {
* is deemed to be a good compromise.
*/
__u64 size;
+
/**
* @handle: Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
- /** @flags: MBZ */
+
+ /**
+ * @flags: Optional flags.
+ *
+ * Supported values:
+ *
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
+ * the object will need to be accessed via the CPU.
+ *
+ * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only
+ * strictly required on configurations where some subset of the device
+ * memory is directly visible/mappable through the CPU (which we also
+ * call small BAR), like on some DG2+ systems. Note that this is quite
+ * undesirable, but due to various factors like the client CPU, BIOS etc
+ * it's something we can expect to see in the wild. See
+ * &drm_i915_memory_region_info.probed_cpu_visible_size for how to
+ * determine if this system applies.
+ *
+ * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to
+ * ensure the kernel can always spill the allocation to system memory,
+ * if the object can't be allocated in the mappable part of
+ * I915_MEMORY_CLASS_DEVICE.
+ *
+ * Also note that since the kernel only supports flat-CCS on objects
+ * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
+ * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
+ * flat-CCS.
+ *
+ * Without this hint, the kernel will assume that non-mappable
+ * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
+ * kernel can still migrate the object to the mappable part, as a last
+ * resort, if userspace ever CPU faults this object, but this might be
+ * expensive, and so ideally should be avoided.
+ *
+ * On older kernels which lack the relevant small-bar uAPI support (see
+ * also &drm_i915_memory_region_info.probed_cpu_visible_size),
+ * usage of the flag will result in an error, but it should NEVER be
+ * possible to end up with a small BAR configuration, assuming we can
+ * also successfully load the i915 kernel module. In such cases the
+ * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as
+ * such there are zero restrictions on where the object can be placed.
+ */
+#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
__u32 flags;
+
/**
* @extensions: The chain of extensions to apply to this object.
*
@@ -3443,6 +3640,22 @@ struct drm_i915_gem_create_ext {
* At which point we get the object handle in &drm_i915_gem_create_ext.handle,
* along with the final object size in &drm_i915_gem_create_ext.size, which
* should account for any rounding up, if required.
+ *
+ * Note that userspace has no means of knowing the current backing region
+ * for objects where @num_regions is larger than one. The kernel will only
+ * ensure that the priority order of the @regions array is honoured, either
+ * when initially placing the object, or when moving memory around due to
+ * memory pressure
+ *
+ * On Flat-CCS capable HW, compression is supported for the objects residing
+ * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other
+ * memory class in @regions and migrated (by i915, due to memory
+ * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to
+ * decompress the content. But i915 doesn't have the required information to
+ * decompress the userspace compressed objects.
+ *
+ * So i915 supports Flat-CCS, on the objects which can reside only on
+ * I915_MEMORY_CLASS_DEVICE regions.
*/
struct drm_i915_gem_create_ext_memory_regions {
/** @base: Extension link. See struct i915_user_extension. */
diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h
index 9f4428be3e36..a756b29afcc2 100644
--- a/tools/include/uapi/linux/fscrypt.h
+++ b/tools/include/uapi/linux/fscrypt.h
@@ -27,7 +27,8 @@
#define FSCRYPT_MODE_AES_128_CBC 5
#define FSCRYPT_MODE_AES_128_CTS 6
#define FSCRYPT_MODE_ADIANTUM 9
-/* If adding a mode number > 9, update FSCRYPT_MODE_MAX in fscrypt_private.h */
+#define FSCRYPT_MODE_AES_256_HCTR2 10
+/* If adding a mode number > 10, update FSCRYPT_MODE_MAX in fscrypt_private.h */
/*
* Legacy policy version; ad-hoc KDF and no key verification.
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index cb6e3846d27b..eed0315a77a6 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -270,6 +270,8 @@ struct kvm_xen_exit {
#define KVM_EXIT_X86_BUS_LOCK 33
#define KVM_EXIT_XEN 34
#define KVM_EXIT_RISCV_SBI 35
+#define KVM_EXIT_RISCV_CSR 36
+#define KVM_EXIT_NOTIFY 37
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -496,6 +498,18 @@ struct kvm_run {
unsigned long args[6];
unsigned long ret[2];
} riscv_sbi;
+ /* KVM_EXIT_RISCV_CSR */
+ struct {
+ unsigned long csr_num;
+ unsigned long new_value;
+ unsigned long write_mask;
+ unsigned long ret_value;
+ } riscv_csr;
+ /* KVM_EXIT_NOTIFY */
+ struct {
+#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
+ __u32 flags;
+ } notify;
/* Fix the size of the union. */
char padding[256];
};
@@ -1157,6 +1171,12 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_VM_TSC_CONTROL 214
#define KVM_CAP_SYSTEM_EVENT_DATA 215
#define KVM_CAP_ARM_SYSTEM_SUSPEND 216
+#define KVM_CAP_S390_PROTECTED_DUMP 217
+#define KVM_CAP_X86_TRIPLE_FAULT_EVENT 218
+#define KVM_CAP_X86_NOTIFY_VMEXIT 219
+#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220
+#define KVM_CAP_S390_ZPCI_OP 221
+#define KVM_CAP_S390_CPU_TOPOLOGY 222
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1660,6 +1680,55 @@ struct kvm_s390_pv_unp {
__u64 tweak;
};
+enum pv_cmd_dmp_id {
+ KVM_PV_DUMP_INIT,
+ KVM_PV_DUMP_CONFIG_STOR_STATE,
+ KVM_PV_DUMP_COMPLETE,
+ KVM_PV_DUMP_CPU,
+};
+
+struct kvm_s390_pv_dmp {
+ __u64 subcmd;
+ __u64 buff_addr;
+ __u64 buff_len;
+ __u64 gaddr; /* For dump storage state */
+ __u64 reserved[4];
+};
+
+enum pv_cmd_info_id {
+ KVM_PV_INFO_VM,
+ KVM_PV_INFO_DUMP,
+};
+
+struct kvm_s390_pv_info_dump {
+ __u64 dump_cpu_buffer_len;
+ __u64 dump_config_mem_buffer_per_1m;
+ __u64 dump_config_finalize_len;
+};
+
+struct kvm_s390_pv_info_vm {
+ __u64 inst_calls_list[4];
+ __u64 max_cpus;
+ __u64 max_guests;
+ __u64 max_guest_addr;
+ __u64 feature_indication;
+};
+
+struct kvm_s390_pv_info_header {
+ __u32 id;
+ __u32 len_max;
+ __u32 len_written;
+ __u32 reserved;
+};
+
+struct kvm_s390_pv_info {
+ struct kvm_s390_pv_info_header header;
+ union {
+ struct kvm_s390_pv_info_dump dump;
+ struct kvm_s390_pv_info_vm vm;
+ };
+};
+
enum pv_cmd_id {
KVM_PV_ENABLE,
KVM_PV_DISABLE,
@@ -1668,6 +1737,8 @@ enum pv_cmd_id {
KVM_PV_VERIFY,
KVM_PV_PREP_RESET,
KVM_PV_UNSHARE_ALL,
+ KVM_PV_INFO,
+ KVM_PV_DUMP,
};
struct kvm_pv_cmd {
@@ -2119,4 +2190,41 @@ struct kvm_stats_desc {
/* Available with KVM_CAP_XSAVE2 */
#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
+/* Available with KVM_CAP_S390_PROTECTED_DUMP */
+#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd)
+
+/* Available with KVM_CAP_X86_NOTIFY_VMEXIT */
+#define KVM_X86_NOTIFY_VMEXIT_ENABLED (1ULL << 0)
+#define KVM_X86_NOTIFY_VMEXIT_USER (1ULL << 1)
+
+/* Available with KVM_CAP_S390_ZPCI_OP */
+#define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op)
+
+struct kvm_s390_zpci_op {
+ /* in */
+ __u32 fh; /* target device */
+ __u8 op; /* operation to perform */
+ __u8 pad[3];
+ union {
+ /* for KVM_S390_ZPCIOP_REG_AEN */
+ struct {
+ __u64 ibv; /* Guest addr of interrupt bit vector */
+ __u64 sb; /* Guest addr of summary bit */
+ __u32 flags;
+ __u32 noi; /* Number of interrupts */
+ __u8 isc; /* Guest interrupt subclass */
+ __u8 sbo; /* Offset of guest summary bit vector */
+ __u16 pad;
+ } reg_aen;
+ __u64 reserved[8];
+ } u;
+};
+
+/* types for kvm_s390_zpci_op->op */
+#define KVM_S390_ZPCIOP_REG_AEN 0
+#define KVM_S390_ZPCIOP_DEREG_AEN 1
+
+/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
+#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
+
#endif /* __LINUX_KVM_H */
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 4653834f078f..581ed4bdc062 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -301,6 +301,7 @@ enum {
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } && !PERF_FORMAT_GROUP
*
* { u64 nr;
@@ -308,6 +309,7 @@ enum {
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 value;
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } cntr[nr];
* } && PERF_FORMAT_GROUP
* };
@@ -317,8 +319,9 @@ enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3,
+ PERF_FORMAT_LOST = 1U << 4,
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
+ PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
@@ -1310,7 +1313,7 @@ union perf_mem_data_src {
#define PERF_MEM_SNOOP_SHIFT 19
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
-/* 1 free */
+#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */
#define PERF_MEM_SNOOPX_SHIFT 38
/* locked instruction */
diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h
index cab645d4a645..f9f115a7c75b 100644
--- a/tools/include/uapi/linux/vhost.h
+++ b/tools/include/uapi/linux/vhost.h
@@ -171,4 +171,13 @@
#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
struct vhost_vring_state)
+/* Suspend a device so it does not process virtqueue requests anymore
+ *
+ * After the return of ioctl the device must preserve all the necessary state
+ * (the virtqueue vring base plus the possible device specific states) that is
+ * required for restoring in the future. The device must not change its
+ * configuration after that point.
+ */
+#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D)
+
#endif
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index 354f8cdc0880..c3e4871967bc 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -5,9 +5,9 @@
*/
#include <linux/bitmap.h>
-int __bitmap_weight(const unsigned long *bitmap, int bits)
+unsigned int __bitmap_weight(const unsigned long *bitmap, int bits)
{
- int k, w = 0, lim = bits/BITS_PER_LONG;
+ unsigned int k, w = 0, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]);
@@ -57,7 +57,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits,
return ret;
}
-int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
index bd6f4505e7b1..70adf7b119b9 100644
--- a/tools/lib/bpf/skel_internal.h
+++ b/tools/lib/bpf/skel_internal.h
@@ -66,13 +66,13 @@ struct bpf_load_and_run_opts {
const char *errstr;
};
-long bpf_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
+long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
unsigned int size)
{
#ifdef __KERNEL__
- return bpf_sys_bpf(cmd, attr, size);
+ return kern_sys_bpf(cmd, attr, size);
#else
return syscall(__NR_bpf, cmd, attr, size);
#endif
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index 384d5e076ee4..6cd0be7c1bb4 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -309,7 +309,7 @@ bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
return perf_cpu_map__idx(cpus, cpu) != -1;
}
-struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map)
+struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
{
struct perf_cpu result = {
.cpu = -1
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index 952f3520d5c2..8ce5bbd09666 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -305,6 +305,9 @@ int perf_evsel__read_size(struct perf_evsel *evsel)
if (read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
+ if (read_format & PERF_FORMAT_LOST)
+ entry += sizeof(u64);
+
if (read_format & PERF_FORMAT_GROUP) {
nr = evsel->nr_members;
size += sizeof(u64);
@@ -314,24 +317,98 @@ int perf_evsel__read_size(struct perf_evsel *evsel)
return size;
}
+/* This only reads values for the leader */
+static int perf_evsel__read_group(struct perf_evsel *evsel, int cpu_map_idx,
+ int thread, struct perf_counts_values *count)
+{
+ size_t size = perf_evsel__read_size(evsel);
+ int *fd = FD(evsel, cpu_map_idx, thread);
+ u64 read_format = evsel->attr.read_format;
+ u64 *data;
+ int idx = 1;
+
+ if (fd == NULL || *fd < 0)
+ return -EINVAL;
+
+ data = calloc(1, size);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (readn(*fd, data, size) <= 0) {
+ free(data);
+ return -errno;
+ }
+
+ /*
+ * This reads only the leader event intentionally since we don't have
+ * perf counts values for sibling events.
+ */
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ count->ena = data[idx++];
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ count->run = data[idx++];
+
+ /* value is always available */
+ count->val = data[idx++];
+ if (read_format & PERF_FORMAT_ID)
+ count->id = data[idx++];
+ if (read_format & PERF_FORMAT_LOST)
+ count->lost = data[idx++];
+
+ free(data);
+ return 0;
+}
+
+/*
+ * The perf read format is very flexible. It needs to set the proper
+ * values according to the read format.
+ */
+static void perf_evsel__adjust_values(struct perf_evsel *evsel, u64 *buf,
+ struct perf_counts_values *count)
+{
+ u64 read_format = evsel->attr.read_format;
+ int n = 0;
+
+ count->val = buf[n++];
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ count->ena = buf[n++];
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ count->run = buf[n++];
+
+ if (read_format & PERF_FORMAT_ID)
+ count->id = buf[n++];
+
+ if (read_format & PERF_FORMAT_LOST)
+ count->lost = buf[n++];
+}
+
int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
struct perf_counts_values *count)
{
size_t size = perf_evsel__read_size(evsel);
int *fd = FD(evsel, cpu_map_idx, thread);
+ u64 read_format = evsel->attr.read_format;
+ struct perf_counts_values buf;
memset(count, 0, sizeof(*count));
if (fd == NULL || *fd < 0)
return -EINVAL;
+ if (read_format & PERF_FORMAT_GROUP)
+ return perf_evsel__read_group(evsel, cpu_map_idx, thread, count);
+
if (MMAP(evsel, cpu_map_idx, thread) &&
+ !(read_format & (PERF_FORMAT_ID | PERF_FORMAT_LOST)) &&
!perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
return 0;
- if (readn(*fd, count->values, size) <= 0)
+ if (readn(*fd, buf.values, size) <= 0)
return -errno;
+ perf_evsel__adjust_values(evsel, buf.values, count);
return 0;
}
diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h
index 2a912a1f1989..a99a75d9e78f 100644
--- a/tools/lib/perf/include/internal/evsel.h
+++ b/tools/lib/perf/include/internal/evsel.h
@@ -30,6 +30,10 @@ struct perf_sample_id {
struct perf_cpu cpu;
pid_t tid;
+ /* Guest machine pid and VCPU, valid only if machine_pid is non-zero */
+ pid_t machine_pid;
+ struct perf_cpu vcpu;
+
/* Holds total ID period value for PERF_SAMPLE_READ processing. */
u64 period;
};
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index 24de795b09bb..03aceb72a783 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -23,7 +23,7 @@ LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
-LIBPERF_API struct perf_cpu perf_cpu_map__max(struct perf_cpu_map *map);
+LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index e7758707cadd..93bf93a59c99 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/limits.h>
#include <linux/bpf.h>
+#include <linux/compiler.h>
#include <sys/types.h> /* pid_t */
#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
@@ -76,7 +77,7 @@ struct perf_record_lost_samples {
};
/*
- * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
+ * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID | PERF_FORMAT_LOST
*/
struct perf_record_read {
struct perf_event_header header;
@@ -85,6 +86,7 @@ struct perf_record_read {
__u64 time_enabled;
__u64 time_running;
__u64 id;
+ __u64 lost;
};
struct perf_record_throttle {
@@ -153,22 +155,60 @@ enum {
PERF_CPU_MAP__MASK = 1,
};
+/*
+ * Array encoding of a perf_cpu_map where nr is the number of entries in cpu[]
+ * and each entry is a value for a CPU in the map.
+ */
struct cpu_map_entries {
__u16 nr;
__u16 cpu[];
};
-struct perf_record_record_cpu_map {
+/* Bitmap encoding of a perf_cpu_map where bitmap entries are 32-bit. */
+struct perf_record_mask_cpu_map32 {
+ /* Number of mask values. */
+ __u16 nr;
+ /* Constant 4. */
+ __u16 long_size;
+ /* Bitmap data. */
+ __u32 mask[];
+};
+
+/* Bitmap encoding of a perf_cpu_map where bitmap entries are 64-bit. */
+struct perf_record_mask_cpu_map64 {
+ /* Number of mask values. */
__u16 nr;
+ /* Constant 8. */
__u16 long_size;
- unsigned long mask[];
+ /* Legacy padding. */
+ char __pad[4];
+ /* Bitmap data. */
+ __u64 mask[];
};
-struct perf_record_cpu_map_data {
+/*
+ * 'struct perf_record_cpu_map_data' is packed as unfortunately an earlier
+ * version had unaligned data and we wish to retain file format compatibility.
+ * -irogers
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpacked"
+#pragma GCC diagnostic ignored "-Wattributes"
+
+struct __packed perf_record_cpu_map_data {
__u16 type;
- char data[];
+ union {
+ /* Used when type == PERF_CPU_MAP__CPUS. */
+ struct cpu_map_entries cpus_data;
+ /* Used when type == PERF_CPU_MAP__MASK and long_size == 4. */
+ struct perf_record_mask_cpu_map32 mask32_data;
+ /* Used when type == PERF_CPU_MAP__MASK and long_size == 8. */
+ struct perf_record_mask_cpu_map64 mask64_data;
+ };
};
+#pragma GCC diagnostic pop
+
struct perf_record_cpu_map {
struct perf_event_header header;
struct perf_record_cpu_map_data data;
@@ -237,6 +277,11 @@ struct id_index_entry {
__u64 tid;
};
+struct id_index_entry_2 {
+ __u64 machine_pid;
+ __u64 vcpu;
+};
+
struct perf_record_id_index {
struct perf_event_header header;
__u64 nr;
@@ -274,6 +319,8 @@ struct perf_record_auxtrace_error {
__u64 ip;
__u64 time;
char msg[MAX_AUXTRACE_ERROR_MSG];
+ __u32 machine_pid;
+ __u32 vcpu;
};
struct perf_record_aux {
@@ -389,6 +436,7 @@ enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_TIME_CONV = 79,
PERF_RECORD_HEADER_FEATURE = 80,
PERF_RECORD_COMPRESSED = 81,
+ PERF_RECORD_FINISHED_INIT = 82,
PERF_RECORD_HEADER_MAX
};
diff --git a/tools/lib/perf/include/perf/evsel.h b/tools/lib/perf/include/perf/evsel.h
index 699c0ed97d34..6f92204075c2 100644
--- a/tools/lib/perf/include/perf/evsel.h
+++ b/tools/lib/perf/include/perf/evsel.h
@@ -18,8 +18,10 @@ struct perf_counts_values {
uint64_t val;
uint64_t ena;
uint64_t run;
+ uint64_t id;
+ uint64_t lost;
};
- uint64_t values[3];
+ uint64_t values[5];
};
};
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
index 89be89afb24d..a11fc51bfb68 100644
--- a/tools/lib/perf/tests/test-evsel.c
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdarg.h>
#include <stdio.h>
+#include <string.h>
#include <linux/perf_event.h>
+#include <linux/kernel.h>
#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <perf/evsel.h>
+#include <internal/evsel.h>
#include <internal/tests.h>
#include "tests.h"
@@ -189,6 +192,163 @@ static int test_stat_user_read(int event)
return 0;
}
+static int test_stat_read_format_single(struct perf_event_attr *attr, struct perf_thread_map *threads)
+{
+ struct perf_evsel *evsel;
+ struct perf_counts_values counts;
+ volatile int count = 0x100000;
+ int err;
+
+ evsel = perf_evsel__new(attr);
+ __T("failed to create evsel", evsel);
+
+ /* skip old kernels that don't support the format */
+ err = perf_evsel__open(evsel, NULL, threads);
+ if (err < 0)
+ return 0;
+
+ while (count--) ;
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(evsel, 0, 0, &counts);
+
+ __T("failed to read value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read LOST", counts.lost == 0);
+
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+ return 0;
+}
+
+static int test_stat_read_format_group(struct perf_event_attr *attr, struct perf_thread_map *threads)
+{
+ struct perf_evsel *leader, *member;
+ struct perf_counts_values counts;
+ volatile int count = 0x100000;
+ int err;
+
+ attr->read_format |= PERF_FORMAT_GROUP;
+ leader = perf_evsel__new(attr);
+ __T("failed to create leader", leader);
+
+ attr->read_format &= ~PERF_FORMAT_GROUP;
+ member = perf_evsel__new(attr);
+ __T("failed to create member", member);
+
+ member->leader = leader;
+ leader->nr_members = 2;
+
+ /* skip old kernels that don't support the format */
+ err = perf_evsel__open(leader, NULL, threads);
+ if (err < 0)
+ return 0;
+ err = perf_evsel__open(member, NULL, threads);
+ if (err < 0)
+ return 0;
+
+ while (count--) ;
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(leader, 0, 0, &counts);
+
+ __T("failed to read leader value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read leader TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read leader TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read leader ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read leader LOST", counts.lost == 0);
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(member, 0, 0, &counts);
+
+ __T("failed to read member value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read member TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read member TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read member ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read member LOST", counts.lost == 0);
+
+ perf_evsel__close(member);
+ perf_evsel__close(leader);
+ perf_evsel__delete(member);
+ perf_evsel__delete(leader);
+ return 0;
+}
+
+static int test_stat_read_format(void)
+{
+ struct perf_thread_map *threads;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ };
+ int err, i;
+
+#define FMT(_fmt) PERF_FORMAT_ ## _fmt
+#define FMT_TIME (FMT(TOTAL_TIME_ENABLED) | FMT(TOTAL_TIME_RUNNING))
+
+ uint64_t test_formats [] = {
+ 0,
+ FMT_TIME,
+ FMT(ID),
+ FMT(LOST),
+ FMT_TIME | FMT(ID),
+ FMT_TIME | FMT(LOST),
+ FMT_TIME | FMT(ID) | FMT(LOST),
+ FMT(ID) | FMT(LOST),
+ };
+
+#undef FMT
+#undef FMT_TIME
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ for (i = 0; i < (int)ARRAY_SIZE(test_formats); i++) {
+ attr.read_format = test_formats[i];
+ __T_VERBOSE("testing single read with read_format: %lx\n",
+ (unsigned long)test_formats[i]);
+
+ err = test_stat_read_format_single(&attr, threads);
+ __T("failed to read single format", err == 0);
+ }
+
+ perf_thread_map__put(threads);
+
+ threads = perf_thread_map__new_array(2, NULL);
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+ perf_thread_map__set_pid(threads, 1, 0);
+
+ for (i = 0; i < (int)ARRAY_SIZE(test_formats); i++) {
+ attr.read_format = test_formats[i];
+ __T_VERBOSE("testing group read with read_format: %lx\n",
+ (unsigned long)test_formats[i]);
+
+ err = test_stat_read_format_group(&attr, threads);
+ __T("failed to read group format", err == 0);
+ }
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
int test_evsel(int argc, char **argv)
{
__T_START;
@@ -200,6 +360,7 @@ int test_evsel(int argc, char **argv)
test_stat_thread_enable();
test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
+ test_stat_read_format();
__T_END;
return tests_failed == 0 ? 0 : -1;
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 0cec74da7ffe..e55fdf952a3a 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -162,32 +162,34 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
/*
* Unfortunately these have to be hard coded because the noreturn
- * attribute isn't provided in ELF data.
+ * attribute isn't provided in ELF data. Keep 'em sorted.
*/
static const char * const global_noreturns[] = {
+ "__invalid_creds",
+ "__module_put_and_kthread_exit",
+ "__reiserfs_panic",
"__stack_chk_fail",
- "panic",
+ "__ubsan_handle_builtin_unreachable",
+ "cpu_bringup_and_idle",
+ "cpu_startup_entry",
"do_exit",
+ "do_group_exit",
"do_task_dead",
- "kthread_exit",
- "make_task_dead",
- "__module_put_and_kthread_exit",
+ "ex_handler_msr_mce",
+ "fortify_panic",
"kthread_complete_and_exit",
- "__reiserfs_panic",
+ "kthread_exit",
+ "kunit_try_catch_throw",
"lbug_with_loc",
- "fortify_panic",
- "usercopy_abort",
"machine_real_restart",
+ "make_task_dead",
+ "panic",
"rewind_stack_and_make_dead",
- "kunit_try_catch_throw",
- "xen_start_kernel",
- "cpu_bringup_and_idle",
- "do_group_exit",
+ "sev_es_terminate",
+ "snp_abort",
"stop_this_cpu",
- "__invalid_creds",
- "cpu_startup_entry",
- "__ubsan_handle_builtin_unreachable",
- "ex_handler_msr_mce",
+ "usercopy_abort",
+ "xen_start_kernel",
};
if (!func)
@@ -4096,7 +4098,8 @@ static int validate_ibt(struct objtool_file *file)
* These sections can reference text addresses, but not with
* the intent to indirect branch to them.
*/
- if (!strncmp(sec->name, ".discard", 8) ||
+ if ((!strncmp(sec->name, ".discard", 8) &&
+ strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
!strncmp(sec->name, ".debug", 6) ||
!strcmp(sec->name, ".altinstructions") ||
!strcmp(sec->name, ".ibt_endbr_seal") ||
diff --git a/tools/perf/Build b/tools/perf/Build
index db61dbe2b543..496b096153bb 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -25,6 +25,7 @@ perf-y += builtin-data.o
perf-y += builtin-version.o
perf-y += builtin-c2c.o
perf-y += builtin-daemon.o
+perf-y += builtin-kwork.o
perf-$(CONFIG_TRACE) += builtin-trace.o
perf-$(CONFIG_LIBELF) += builtin-probe.o
diff --git a/tools/perf/Documentation/guest-files.txt b/tools/perf/Documentation/guest-files.txt
new file mode 100644
index 000000000000..8cc0b092f996
--- /dev/null
+++ b/tools/perf/Documentation/guest-files.txt
@@ -0,0 +1,16 @@
+include::guestmount.txt[]
+
+--guestkallsyms=<path>::
+ Guest OS /proc/kallsyms file copy. perf reads it to get guest
+ kernel symbols. Users copy it out from guest OS.
+
+--guestmodules=<path>::
+ Guest OS /proc/modules file copy. perf reads it to get guest
+ kernel module information. Users copy it out from guest OS.
+
+--guestvmlinux=<path>::
+ Guest OS kernel vmlinux.
+
+--guest-code::
+ Indicate that guest code can be found in the hypervisor process,
+ which is a common case for KVM test programs.
diff --git a/tools/perf/Documentation/guestmount.txt b/tools/perf/Documentation/guestmount.txt
new file mode 100644
index 000000000000..6edf12363add
--- /dev/null
+++ b/tools/perf/Documentation/guestmount.txt
@@ -0,0 +1,11 @@
+--guestmount=<path>::
+ Guest OS root file system mount directory. Users mount guest OS
+ root directories under <path> by a specific filesystem access method,
+ typically, sshfs.
+ For example, start 2 guest OS, one's pid is 8888 and the other's is 9999:
+[verse]
+ $ mkdir \~/guestmount
+ $ cd \~/guestmount
+ $ sshfs -o allow_other,direct_io -p 5551 localhost:/ 8888/
+ $ sshfs -o allow_other,direct_io -p 5552 localhost:/ 9999/
+ $ perf {GMEXAMPLECMD} --guestmount=~/guestmount {GMEXAMPLESUBCMD}
diff --git a/tools/perf/Documentation/intel-hybrid.txt b/tools/perf/Documentation/intel-hybrid.txt
index c9302096dc46..e7a776ad25d7 100644
--- a/tools/perf/Documentation/intel-hybrid.txt
+++ b/tools/perf/Documentation/intel-hybrid.txt
@@ -21,11 +21,6 @@ cat /sys/devices/cpu_atom/cpus
It indicates cpu0-cpu15 are core cpus and cpu16-cpu23 are atom cpus.
-Quickstart
-
-List hybrid event
------------------
-
As before, use perf-list to list the symbolic event.
perf list
@@ -40,7 +35,6 @@ the event is belong to. Same event name but with different pmu can
be supported.
Enable hybrid event with a specific pmu
----------------------------------------
To enable a core only event or atom only event, following syntax is supported:
@@ -53,7 +47,6 @@ For example, count the 'cycles' event on core cpus.
perf stat -e cpu_core/cycles/
Create two events for one hardware event automatically
-------------------------------------------------------
When creating one event and the event is available on both atom and core,
two events are created automatically. One is for atom, the other is for
@@ -132,7 +125,6 @@ For perf-stat result, it displays two events:
The first 'cycles' is core event, the second 'cycles' is atom event.
Thread mode example:
---------------------
perf-stat reports the scaled counts for hybrid event and with a percentage
displayed. The percentage is the event's running time/enabling time.
@@ -176,14 +168,12 @@ perf_event_attr:
604,097,080 cpu_atom/cycles/ (99.57%)
perf-record:
-------------
If there is no '-e' specified in perf record, on hybrid platform,
it creates two default 'cycles' and adds them to event list. One
is for core, the other is for atom.
perf-stat:
-----------
If there is no '-e' specified in perf stat, on hybrid platform,
besides of software events, following events are created and
diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt
index 25c52efcc7f0..e1e8fdbe06b9 100644
--- a/tools/perf/Documentation/perf-buildid-list.txt
+++ b/tools/perf/Documentation/perf-buildid-list.txt
@@ -33,6 +33,10 @@ OPTIONS
-k::
--kernel::
Show running kernel build id.
+-m::
+--kernel-maps::
+ Show buildid, start/end text address, and path of running kernel and
+ its modules.
-v::
--verbose::
Be more verbose.
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index 6f69173731aa..f1f7ae6b08d1 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -109,7 +109,9 @@ REPORT OPTIONS
-d::
--display::
- Switch to HITM type (rmt, lcl) to display and sort on. Total HITMs as default.
+ Switch to HITM type (rmt, lcl) or peer snooping type (peer) to display
+ and sort on. Total HITMs (tot) as default, except Arm64 uses peer mode
+ as default.
--stitch-lbr::
Show callgraph with stitched LBRs, which may have more complete
@@ -174,12 +176,18 @@ For each cacheline in the 1) list we display following data:
Cacheline
- cacheline address (hex number)
- Rmt/Lcl Hitm
+ Rmt/Lcl Hitm (Display with HITM types)
- cacheline percentage of all Remote/Local HITM accesses
- LLC Load Hitm - Total, LclHitm, RmtHitm
+ Peer Snoop (Display with peer type)
+ - cacheline percentage of all peer accesses
+
+ LLC Load Hitm - Total, LclHitm, RmtHitm (For display with HITM types)
- count of Total/Local/Remote load HITMs
+ Load Peer - Total, Local, Remote (For display with peer type)
+ - count of Total/Local/Remote load from peer cache or DRAM
+
Total records
- sum of all cachelines accesses
@@ -201,16 +209,21 @@ For each cacheline in the 1) list we display following data:
- count of LLC load accesses, includes LLC hits and LLC HITMs
RMT Load Hit - RmtHit, RmtHitm
- - count of remote load accesses, includes remote hits and remote HITMs
+ - count of remote load accesses, includes remote hits and remote HITMs;
+ on Arm neoverse cores, RmtHit is used to account remote accesses,
+ includes remote DRAM or any upward cache level in remote node
Load Dram - Lcl, Rmt
- count of local and remote DRAM accesses
For each offset in the 2) list we display following data:
- HITM - Rmt, Lcl
+ HITM - Rmt, Lcl (Display with HITM types)
- % of Remote/Local HITM accesses for given offset within cacheline
+ Peer Snoop - Rmt, Lcl (Display with peer type)
+ - % of Remote/Local peer accesses for given offset within cacheline
+
Store Refs - L1 Hit, L1 Miss, N/A
- % of store accesses that hit L1, missed L1 and N/A (no available) memory
level for given offset within cacheline
@@ -227,9 +240,12 @@ For each offset in the 2) list we display following data:
Code address
- code address responsible for the accesses
- cycles - rmt hitm, lcl hitm, load
+ cycles - rmt hitm, lcl hitm, load (Display with HITM types)
- sum of cycles for given accesses - Remote/Local HITM and generic load
+ cycles - rmt peer, lcl peer, load (Display with peer type)
+ - sum of cycles for given accesses - Remote/Local peer load and generic load
+
cpu cnt
- number of cpus that participated on the access
@@ -251,7 +267,8 @@ The 'Node' field displays nodes that accesses given cacheline
offset. Its output comes in 3 flavors:
- node IDs separated by ','
- node IDs with stats for each ID, in following format:
- Node{cpus %hitms %stores}
+ Node{cpus %hitms %stores} (Display with HITM types)
+ Node{cpus %peers %stores} (Display with peer type)
- node IDs with list of affected CPUs in following format:
Node{cpu list}
diff --git a/tools/perf/Documentation/perf-dlfilter.txt b/tools/perf/Documentation/perf-dlfilter.txt
index 594f5a5a0c9e..fb22e3b31dc5 100644
--- a/tools/perf/Documentation/perf-dlfilter.txt
+++ b/tools/perf/Documentation/perf-dlfilter.txt
@@ -107,9 +107,31 @@ struct perf_dlfilter_sample {
__u64 raw_callchain_nr; /* Number of raw_callchain entries */
const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
const char *event;
+ __s32 machine_pid;
+ __s32 vcpu;
};
----
+Note: 'machine_pid' and 'vcpu' are not original members, but were added together later.
+'size' can be used to determine their presence at run time.
+PERF_DLFILTER_HAS_MACHINE_PID will be defined if they are present at compile time.
+For example:
+[source,c]
+----
+#include <perf/perf_dlfilter.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+static inline bool have_machine_pid(const struct perf_dlfilter_sample *sample)
+{
+#ifdef PERF_DLFILTER_HAS_MACHINE_PID
+ return sample->size >= offsetof(struct perf_dlfilter_sample, vcpu) + sizeof(sample->vcpu);
+#else
+ return false;
+#endif
+}
+----
+
The perf_dlfilter_fns structure
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index 0570a1ccd344..ffc293fdf61d 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -85,6 +85,27 @@ include::itrace.txt[]
without updating it. Currently this option is supported only by
Intel PT, refer linkperf:perf-intel-pt[1]
+--guest-data=<path>,<pid>[,<time offset>[,<time scale>]]::
+ Insert events from a perf.data file recorded in a virtual machine at
+ the same time as the input perf.data file was recorded on the host.
+ The Process ID (PID) of the QEMU hypervisor process must be provided,
+ and the time offset and time scale (multiplier) will likely be needed
+ to convert guest time stamps into host time stamps. For example, for
+ x86 the TSC Offset and Multiplier could be provided for a virtual machine
+ using Linux command line option no-kvmclock.
+ Currently only mmap, mmap2, comm, task, context_switch, ksymbol,
+ and text_poke events are inserted, as well as build ID information.
+ The QEMU option -name debug-threads=on is needed so that thread names
+ can be used to determine which thread is running which VCPU. Note
+ libvirt seems to use this by default.
+ When using perf record in the guest, option --sample-identifier
+ should be used, and also --buildid-all and --switch-events may be
+ useful.
+
+:GMEXAMPLECMD: inject
+:GMEXAMPLESUBCMD:
+include::guestmount.txt[]
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1],
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index 238ab9d3cb93..3dc3f0ccbd51 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -267,7 +267,7 @@ Note that, as with all events, the event is suffixed with event modifiers:
H host
p precise ip
-'h', 'G' and 'H' are for virtualization which is not supported by Intel PT.
+'h', 'G' and 'H' are for virtualization which are not used by Intel PT.
'p' is also not relevant to Intel PT. So only options 'u' and 'k' are
meaningful for Intel PT.
@@ -1218,10 +1218,10 @@ XED
include::build-xed.txt[]
-Tracing Virtual Machines
-------------------------
+Tracing Virtual Machines (kernel only)
+--------------------------------------
-Currently, only kernel tracing is supported and only with either "timeless" decoding
+Currently, kernel tracing is supported with either "timeless" decoding
(i.e. no TSC timestamps) or VM Time Correlation. VM Time Correlation is an extra step
using 'perf inject' and requires unchanging VMX TSC Offset and no VMX TSC Scaling.
@@ -1400,6 +1400,179 @@ There were none.
:17006 17006 [001] 11500.262869216: ffffffff8220116e error_entry+0xe ([guest.kernel.kallsyms]) pushq %rax
+Tracing Virtual Machines (including user space)
+-----------------------------------------------
+
+It is possible to use perf record to record sideband events within a virtual machine, so that an Intel PT trace on the host can be decoded.
+Sideband events from the guest perf.data file can be injected into the host perf.data file using perf inject.
+
+Here is an example of the steps needed:
+
+On the guest machine:
+
+Check that no-kvmclock kernel command line option was used to boot:
+
+Note, this is essential to enable time correlation between host and guest machines.
+
+ $ cat /proc/cmdline
+ BOOT_IMAGE=/boot/vmlinuz-5.10.0-16-amd64 root=UUID=cb49c910-e573-47e0-bce7-79e293df8e1d ro no-kvmclock
+
+There is no BPF support at present so, if possible, disable JIT compiling:
+
+ $ echo 0 | sudo tee /proc/sys/net/core/bpf_jit_enable
+ 0
+
+Start perf record to collect sideband events:
+
+ $ sudo perf record -o guest-sideband-testing-guest-perf.data --sample-identifier --buildid-all --switch-events --kcore -a -e dummy
+
+On the host machine:
+
+Start perf record to collect Intel PT trace:
+
+Note, the host trace will get very big, very fast, so the steps from starting to stopping the host trace really need to be done so that they happen in the shortest time possible.
+
+ $ sudo perf record -o guest-sideband-testing-host-perf.data -m,64M --kcore -a -e intel_pt/cyc/
+
+On the guest machine:
+
+Run a small test case, just 'uname' in this example:
+
+ $ uname
+ Linux
+
+On the host machine:
+
+Stop the Intel PT trace:
+
+ ^C
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 76.122 MB guest-sideband-testing-host-perf.data ]
+
+On the guest machine:
+
+Stop the Intel PT trace:
+
+ ^C
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 1.247 MB guest-sideband-testing-guest-perf.data ]
+
+And then copy guest-sideband-testing-guest-perf.data to the host (not shown here).
+
+On the host machine:
+
+With the 2 perf.data recordings, and with their ownership changed to the user.
+
+Identify the TSC Offset:
+
+ $ perf inject -i guest-sideband-testing-host-perf.data --vm-time-correlation=dry-run
+ VMCS: 0x103fc6 TSC Offset 0xfffffa6ae070cb20
+ VMCS: 0x103ff2 TSC Offset 0xfffffa6ae070cb20
+ VMCS: 0x10fdaa TSC Offset 0xfffffa6ae070cb20
+ VMCS: 0x24d57c TSC Offset 0xfffffa6ae070cb20
+
+Correct Intel PT TSC timestamps for the guest machine:
+
+ $ perf inject -i guest-sideband-testing-host-perf.data --vm-time-correlation=0xfffffa6ae070cb20 --force
+
+Identify the guest machine PID:
+
+ $ perf script -i guest-sideband-testing-host-perf.data --no-itrace --show-task-events | grep KVM
+ CPU 0/KVM 0 [000] 0.000000: PERF_RECORD_COMM: CPU 0/KVM:13376/13381
+ CPU 1/KVM 0 [000] 0.000000: PERF_RECORD_COMM: CPU 1/KVM:13376/13382
+ CPU 2/KVM 0 [000] 0.000000: PERF_RECORD_COMM: CPU 2/KVM:13376/13383
+ CPU 3/KVM 0 [000] 0.000000: PERF_RECORD_COMM: CPU 3/KVM:13376/13384
+
+Note, the QEMU option -name debug-threads=on is needed so that thread names
+can be used to determine which thread is running which VCPU as above. libvirt seems to use this by default.
+
+Create a guestmount, assuming the guest machine is 'vm_to_test':
+
+ $ mkdir -p ~/guestmount/13376
+ $ sshfs -o direct_io vm_to_test:/ ~/guestmount/13376
+
+Inject the guest perf.data file into the host perf.data file:
+
+Note, due to the guestmount option, guest object files and debug files will be copied into the build ID cache from the guest machine, with the notable exception of VDSO.
+If needed, VDSO can be copied manually in a fashion similar to that used by the perf-archive script.
+
+ $ perf inject -i guest-sideband-testing-host-perf.data -o inj --guestmount ~/guestmount --guest-data=guest-sideband-testing-guest-perf.data,13376,0xfffffa6ae070cb20
+
+Show an excerpt from the result. In this case the CPU and time range have been to chosen to show interaction between guest and host when 'uname' is starting to run on the guest machine:
+
+Notes:
+
+ - the CPU displayed, [002] in this case, is always the host CPU
+ - events happening in the virtual machine start with VM:13376 VCPU:003, which shows the hypervisor PID 13376 and the VCPU number
+ - only calls and errors are displayed i.e. --itrace=ce
+ - branches entering and exiting the virtual machine are split, and show as 2 branches to/from "0 [unknown] ([unknown])"
+
+ $ perf script -i inj --itrace=ce -F+machine_pid,+vcpu,+addr,+pid,+tid,-period --ns --time 7919.408803365,7919.408804631 -C 2
+ CPU 3/KVM 13376/13384 [002] 7919.408803365: branches: ffffffffc0f8ebe0 vmx_vcpu_enter_exit+0xc0 ([kernel.kallsyms]) => ffffffffc0f8edc0 __vmx_vcpu_run+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803365: branches: ffffffffc0f8edd5 __vmx_vcpu_run+0x15 ([kernel.kallsyms]) => ffffffffc0f8eca0 vmx_update_host_rsp+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803365: branches: ffffffffc0f8ee1b __vmx_vcpu_run+0x5b ([kernel.kallsyms]) => ffffffffc0f8ed60 vmx_vmenter+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803461: branches: ffffffffc0f8ed62 vmx_vmenter+0x2 ([kernel.kallsyms]) => 0 [unknown] ([unknown])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408803461: branches: 0 [unknown] ([unknown]) => 7f851c9b5a5c init_cacheinfo+0x3ac (/usr/lib/x86_64-linux-gnu/libc-2.31.so)
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408803567: branches: 7f851c9b5a5a init_cacheinfo+0x3aa (/usr/lib/x86_64-linux-gnu/libc-2.31.so) => 0 [unknown] ([unknown])
+ CPU 3/KVM 13376/13384 [002] 7919.408803567: branches: 0 [unknown] ([unknown]) => ffffffffc0f8ed80 vmx_vmexit+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803596: branches: ffffffffc0f6619a vmx_vcpu_run+0x26a ([kernel.kallsyms]) => ffffffffb2255c60 x86_virt_spec_ctrl+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803801: branches: ffffffffc0f66445 vmx_vcpu_run+0x515 ([kernel.kallsyms]) => ffffffffb2290b30 native_write_msr+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803850: branches: ffffffffc0f661f8 vmx_vcpu_run+0x2c8 ([kernel.kallsyms]) => ffffffffc1092300 kvm_load_host_xsave_state+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803850: branches: ffffffffc1092327 kvm_load_host_xsave_state+0x27 ([kernel.kallsyms]) => ffffffffc1092220 kvm_load_host_xsave_state.part.0+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803862: branches: ffffffffc0f662cf vmx_vcpu_run+0x39f ([kernel.kallsyms]) => ffffffffc0f63f90 vmx_recover_nmi_blocking+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803862: branches: ffffffffc0f662e9 vmx_vcpu_run+0x3b9 ([kernel.kallsyms]) => ffffffffc0f619a0 __vmx_complete_interrupts+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803872: branches: ffffffffc109cfb2 vcpu_enter_guest+0x752 ([kernel.kallsyms]) => ffffffffc0f5f570 vmx_handle_exit_irqoff+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803881: branches: ffffffffc109d028 vcpu_enter_guest+0x7c8 ([kernel.kallsyms]) => ffffffffb234f900 __srcu_read_lock+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803897: branches: ffffffffc109d06f vcpu_enter_guest+0x80f ([kernel.kallsyms]) => ffffffffc0f72e30 vmx_handle_exit+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803897: branches: ffffffffc0f72e3d vmx_handle_exit+0xd ([kernel.kallsyms]) => ffffffffc0f727c0 __vmx_handle_exit+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803897: branches: ffffffffc0f72b15 __vmx_handle_exit+0x355 ([kernel.kallsyms]) => ffffffffc0f60ae0 vmx_flush_pml_buffer+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803903: branches: ffffffffc0f72994 __vmx_handle_exit+0x1d4 ([kernel.kallsyms]) => ffffffffc10b7090 kvm_emulate_cpuid+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803903: branches: ffffffffc10b70f1 kvm_emulate_cpuid+0x61 ([kernel.kallsyms]) => ffffffffc10b6e10 kvm_cpuid+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803941: branches: ffffffffc10b7125 kvm_emulate_cpuid+0x95 ([kernel.kallsyms]) => ffffffffc1093110 kvm_skip_emulated_instruction+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803941: branches: ffffffffc109311f kvm_skip_emulated_instruction+0xf ([kernel.kallsyms]) => ffffffffc0f5e180 vmx_get_rflags+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803951: branches: ffffffffc109312a kvm_skip_emulated_instruction+0x1a ([kernel.kallsyms]) => ffffffffc0f5fd30 vmx_skip_emulated_instruction+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803951: branches: ffffffffc0f5fd79 vmx_skip_emulated_instruction+0x49 ([kernel.kallsyms]) => ffffffffc0f5fb50 skip_emulated_instruction+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803956: branches: ffffffffc0f5fc68 skip_emulated_instruction+0x118 ([kernel.kallsyms]) => ffffffffc0f6a940 vmx_cache_reg+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803964: branches: ffffffffc0f5fc11 skip_emulated_instruction+0xc1 ([kernel.kallsyms]) => ffffffffc0f5f9e0 vmx_set_interrupt_shadow+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803980: branches: ffffffffc109f8b1 vcpu_run+0x71 ([kernel.kallsyms]) => ffffffffc10ad2f0 kvm_cpu_has_pending_timer+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803980: branches: ffffffffc10ad2fb kvm_cpu_has_pending_timer+0xb ([kernel.kallsyms]) => ffffffffc10b0490 apic_has_pending_timer+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803991: branches: ffffffffc109f899 vcpu_run+0x59 ([kernel.kallsyms]) => ffffffffc109c860 vcpu_enter_guest+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803993: branches: ffffffffc109cd4c vcpu_enter_guest+0x4ec ([kernel.kallsyms]) => ffffffffc0f69140 vmx_prepare_switch_to_guest+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803996: branches: ffffffffc109cd7d vcpu_enter_guest+0x51d ([kernel.kallsyms]) => ffffffffb234f930 __srcu_read_unlock+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803996: branches: ffffffffc109cd9c vcpu_enter_guest+0x53c ([kernel.kallsyms]) => ffffffffc0f609b0 vmx_sync_pir_to_irr+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803996: branches: ffffffffc0f60a6d vmx_sync_pir_to_irr+0xbd ([kernel.kallsyms]) => ffffffffc10adc20 kvm_lapic_find_highest_irr+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804010: branches: ffffffffc0f60abd vmx_sync_pir_to_irr+0x10d ([kernel.kallsyms]) => ffffffffc0f60820 vmx_set_rvi+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804019: branches: ffffffffc109ceca vcpu_enter_guest+0x66a ([kernel.kallsyms]) => ffffffffb2249840 fpregs_assert_state_consistent+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804021: branches: ffffffffc109cf10 vcpu_enter_guest+0x6b0 ([kernel.kallsyms]) => ffffffffc0f65f30 vmx_vcpu_run+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804024: branches: ffffffffc0f6603b vmx_vcpu_run+0x10b ([kernel.kallsyms]) => ffffffffb229bed0 __get_current_cr3_fast+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804024: branches: ffffffffc0f66055 vmx_vcpu_run+0x125 ([kernel.kallsyms]) => ffffffffb2253050 cr4_read_shadow+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804030: branches: ffffffffc0f6608d vmx_vcpu_run+0x15d ([kernel.kallsyms]) => ffffffffc10921e0 kvm_load_guest_xsave_state+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804030: branches: ffffffffc1092207 kvm_load_guest_xsave_state+0x27 ([kernel.kallsyms]) => ffffffffc1092110 kvm_load_guest_xsave_state.part.0+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804032: branches: ffffffffc0f660c6 vmx_vcpu_run+0x196 ([kernel.kallsyms]) => ffffffffb22061a0 perf_guest_get_msrs+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804032: branches: ffffffffb22061a9 perf_guest_get_msrs+0x9 ([kernel.kallsyms]) => ffffffffb220cda0 intel_guest_get_msrs+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804039: branches: ffffffffc0f66109 vmx_vcpu_run+0x1d9 ([kernel.kallsyms]) => ffffffffc0f652c0 clear_atomic_switch_msr+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804040: branches: ffffffffc0f66119 vmx_vcpu_run+0x1e9 ([kernel.kallsyms]) => ffffffffc0f73f60 intel_pmu_lbr_is_enabled+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804042: branches: ffffffffc0f73f81 intel_pmu_lbr_is_enabled+0x21 ([kernel.kallsyms]) => ffffffffc10b68e0 kvm_find_cpuid_entry+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804045: branches: ffffffffc0f66454 vmx_vcpu_run+0x524 ([kernel.kallsyms]) => ffffffffc0f61ff0 vmx_update_hv_timer+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804057: branches: ffffffffc0f66142 vmx_vcpu_run+0x212 ([kernel.kallsyms]) => ffffffffc10af100 kvm_wait_lapic_expire+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804057: branches: ffffffffc0f66156 vmx_vcpu_run+0x226 ([kernel.kallsyms]) => ffffffffb2255c60 x86_virt_spec_ctrl+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804057: branches: ffffffffc0f66161 vmx_vcpu_run+0x231 ([kernel.kallsyms]) => ffffffffc0f8eb20 vmx_vcpu_enter_exit+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804057: branches: ffffffffc0f8eb44 vmx_vcpu_enter_exit+0x24 ([kernel.kallsyms]) => ffffffffb2353e10 rcu_note_context_switch+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804057: branches: ffffffffb2353e1c rcu_note_context_switch+0xc ([kernel.kallsyms]) => ffffffffb2353db0 rcu_qs+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804066: branches: ffffffffc0f8ebe0 vmx_vcpu_enter_exit+0xc0 ([kernel.kallsyms]) => ffffffffc0f8edc0 __vmx_vcpu_run+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804066: branches: ffffffffc0f8edd5 __vmx_vcpu_run+0x15 ([kernel.kallsyms]) => ffffffffc0f8eca0 vmx_update_host_rsp+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804066: branches: ffffffffc0f8ee1b __vmx_vcpu_run+0x5b ([kernel.kallsyms]) => ffffffffc0f8ed60 vmx_vmenter+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804162: branches: ffffffffc0f8ed62 vmx_vmenter+0x2 ([kernel.kallsyms]) => 0 [unknown] ([unknown])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804162: branches: 0 [unknown] ([unknown]) => 7f851c9b5a5c init_cacheinfo+0x3ac (/usr/lib/x86_64-linux-gnu/libc-2.31.so)
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804273: branches: 7f851cb7c0e4 _dl_init+0x74 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) => 7f851cb7bf50 call_init.part.0+0x0 (/usr/lib/x86_64-linux-gnu/ld-2.31.so)
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804526: branches: 55e0c00136f0 _start+0x0 (/usr/bin/uname) => ffffffff83200ac0 asm_exc_page_fault+0x0 ([kernel.kallsyms])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804526: branches: ffffffff83200ac3 asm_exc_page_fault+0x3 ([kernel.kallsyms]) => ffffffff83201290 error_entry+0x0 ([kernel.kallsyms])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804534: branches: ffffffff832012fa error_entry+0x6a ([kernel.kallsyms]) => ffffffff830b59a0 sync_regs+0x0 ([kernel.kallsyms])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804631: branches: ffffffff83200ad9 asm_exc_page_fault+0x19 ([kernel.kallsyms]) => ffffffff830b8210 exc_page_fault+0x0 ([kernel.kallsyms])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804631: branches: ffffffff830b82a4 exc_page_fault+0x94 ([kernel.kallsyms]) => ffffffff830b80e0 __kvm_handle_async_pf+0x0 ([kernel.kallsyms])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804631: branches: ffffffff830b80ed __kvm_handle_async_pf+0xd ([kernel.kallsyms]) => ffffffff830b80c0 kvm_read_and_reset_apf_flags+0x0 ([kernel.kallsyms])
+
+
Tracing Virtual Machines - Guest Code
-------------------------------------
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index 83c742adf86e..2ad3f5d9f72b 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -77,26 +77,11 @@ OPTIONS
Collect host side performance profile.
--guest::
Collect guest side performance profile.
---guestmount=<path>::
- Guest os root file system mount directory. Users mounts guest os
- root directories under <path> by a specific filesystem access method,
- typically, sshfs. For example, start 2 guest os. The one's pid is 8888
- and the other's is 9999.
- #mkdir ~/guestmount; cd ~/guestmount
- #sshfs -o allow_other,direct_io -p 5551 localhost:/ 8888/
- #sshfs -o allow_other,direct_io -p 5552 localhost:/ 9999/
- #perf kvm --host --guest --guestmount=~/guestmount top
---guestkallsyms=<path>::
- Guest os /proc/kallsyms file copy. 'perf' kvm' reads it to get guest
- kernel symbols. Users copy it out from guest os.
---guestmodules=<path>::
- Guest os /proc/modules file copy. 'perf' kvm' reads it to get guest
- kernel module information. Users copy it out from guest os.
---guestvmlinux=<path>::
- Guest os kernel vmlinux.
---guest-code::
- Indicate that guest code can be found in the hypervisor process,
- which is a common case for KVM test programs.
+
+:GMEXAMPLECMD: kvm --host --guest
+:GMEXAMPLESUBCMD: top
+include::guest-files.txt[]
+
-v::
--verbose::
Be more verbose (show counter open errors, etc).
diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
new file mode 100644
index 000000000000..3c36324712b6
--- /dev/null
+++ b/tools/perf/Documentation/perf-kwork.txt
@@ -0,0 +1,180 @@
+perf-kowrk(1)
+=============
+
+NAME
+----
+perf-kwork - Tool to trace/measure kernel work properties (latencies)
+
+SYNOPSIS
+--------
+[verse]
+'perf kwork' {record}
+
+DESCRIPTION
+-----------
+There are several variants of 'perf kwork':
+
+ 'perf kwork record <command>' to record the kernel work
+ of an arbitrary workload.
+
+ 'perf kwork report' to report the per kwork runtime.
+
+ 'perf kwork latency' to report the per kwork latencies.
+
+ 'perf kwork timehist' provides an analysis of kernel work events.
+
+ Example usage:
+ perf kwork record -- sleep 1
+ perf kwork report
+ perf kwork report -b
+ perf kwork latency
+ perf kwork latency -b
+ perf kwork timehist
+
+ By default it shows the individual work events such as irq, workqeueu,
+ including the run time and delay (time between raise and actually entry):
+
+ Runtime start Runtime end Cpu Kwork name Runtime Delaytime
+ (TYPE)NAME:NUM (msec) (msec)
+ ----------------- ----------------- ------ ------------------------- ---------- ----------
+ 1811186.976062 1811186.976327 [0000] (s)RCU:9 0.266 0.114
+ 1811186.978452 1811186.978547 [0000] (s)SCHED:7 0.095 0.171
+ 1811186.980327 1811186.980490 [0000] (s)SCHED:7 0.162 0.083
+ 1811186.981221 1811186.981271 [0000] (s)SCHED:7 0.050 0.077
+ 1811186.984267 1811186.984318 [0000] (s)SCHED:7 0.051 0.075
+ 1811186.987252 1811186.987315 [0000] (s)SCHED:7 0.063 0.081
+ 1811186.987785 1811186.987843 [0006] (s)RCU:9 0.058 0.645
+ 1811186.988319 1811186.988383 [0000] (s)SCHED:7 0.064 0.143
+ 1811186.989404 1811186.989607 [0002] (s)TIMER:1 0.203 0.111
+ 1811186.989660 1811186.989732 [0002] (s)SCHED:7 0.072 0.310
+ 1811186.991295 1811186.991407 [0002] eth0:10 0.112
+ 1811186.991639 1811186.991734 [0002] (s)NET_RX:3 0.095 0.277
+ 1811186.989860 1811186.991826 [0002] (w)vmstat_shepherd 1.966 0.345
+ ...
+
+ Times are in msec.usec.
+
+OPTIONS
+-------
+-D::
+--dump-raw-trace=::
+ Display verbose dump of the sched data.
+
+-f::
+--force::
+ Don't complain, do it.
+
+-k::
+--kwork::
+ List of kwork to profile (irq, softirq, workqueue, etc)
+
+-v::
+--verbose::
+ Be more verbose. (show symbol address, etc)
+
+OPTIONS for 'perf kwork report'
+----------------------------
+
+-b::
+--use-bpf::
+ Use BPF to measure kwork runtime
+
+-C::
+--cpu::
+ Only show events for the given CPU(s) (comma separated list).
+
+-i::
+--input::
+ Input file name. (default: perf.data unless stdin is a fifo)
+
+-n::
+--name::
+ Only show events for the given name.
+
+-s::
+--sort::
+ Sort by key(s): runtime, max, count
+
+-S::
+--with-summary::
+ Show summary with statistics
+
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
+OPTIONS for 'perf kwork latency'
+----------------------------
+
+-b::
+--use-bpf::
+ Use BPF to measure kwork latency
+
+-C::
+--cpu::
+ Only show events for the given CPU(s) (comma separated list).
+
+-i::
+--input::
+ Input file name. (default: perf.data unless stdin is a fifo)
+
+-n::
+--name::
+ Only show events for the given name.
+
+-s::
+--sort::
+ Sort by key(s): avg, max, count
+
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
+OPTIONS for 'perf kwork timehist'
+---------------------------------
+
+-C::
+--cpu::
+ Only show events for the given CPU(s) (comma separated list).
+
+-g::
+--call-graph::
+ Display call chains if present (default off).
+
+-i::
+--input::
+ Input file name. (default: perf.data unless stdin is a fifo)
+
+-k::
+--vmlinux=<file>::
+ Vmlinux pathname
+
+-n::
+--name::
+ Only show events for the given name.
+
+--kallsyms=<file>::
+ Kallsyms pathname
+
+--max-stack::
+ Maximum number of functions to display in backtrace, default 5.
+
+--symfs=<directory>::
+ Look for files with symbols relative to this directory.
+
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
+SEE ALSO
+--------
+linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 656b537b2fba..193c5d8b8db9 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -8,7 +8,7 @@ perf-lock - Analyze lock events
SYNOPSIS
--------
[verse]
-'perf lock' {record|report|script|info}
+'perf lock' {record|report|script|info|contention}
DESCRIPTION
-----------
@@ -27,6 +27,8 @@ and statistics with this 'perf lock' command.
'perf lock info' shows metadata like threads or addresses
of lock instances.
+ 'perf lock contention' shows contention statistics.
+
COMMON OPTIONS
--------------
@@ -46,6 +48,13 @@ COMMON OPTIONS
--force::
Don't complain, do it.
+--vmlinux=<file>::
+ vmlinux pathname
+
+--kallsyms=<file>::
+ kallsyms pathname
+
+
REPORT OPTIONS
--------------
@@ -96,6 +105,50 @@ INFO OPTIONS
--map::
dump map of lock instances (address:name table)
+CONTENTION OPTIONS
+--------------
+
+-k::
+--key=<value>::
+ Sorting key. Possible values: contended, wait_total (default),
+ wait_max, wait_min, avg_wait.
+
+-F::
+--field=<value>::
+ Output fields. By default it shows all but the wait_min fields
+ and users can customize that using this. Possible values:
+ contended, wait_total, wait_max, wait_min, avg_wait.
+
+-t::
+--threads::
+ Show per-thread lock contention stat
+
+-b::
+--use-bpf::
+ Use BPF program to collect lock contention stats instead of
+ using the input data.
+
+-a::
+--all-cpus::
+ System-wide collection from all CPUs.
+
+-C::
+--cpu::
+ Collect samples only on the list of CPUs provided. Multiple CPUs can be
+ provided as a comma-separated list with no space: 0,1. Ranges of CPUs
+ are specified with -: 0-2. Default is to monitor all CPUs.
+
+-p::
+--pid=::
+ Record events on existing process ID (comma separated list).
+
+--tid=::
+ Record events on existing thread ID (comma separated list).
+
+--map-nr-entries::
+ Maximum number of BPF map entries (default: 10240).
+
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index cf8ad50f3de1..0228efc96686 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -275,6 +275,11 @@ OPTIONS
User can change the size by passing the size after comma like
"--call-graph dwarf,4096".
+ When "fp" recording is used, perf tries to save stack enties
+ up to the number specified in sysctl.kernel.perf_event_max_stack
+ by default. User can change the number by passing it after comma
+ like "--call-graph fp,32".
+
-q::
--quiet::
Don't print any message, useful for scripting.
@@ -313,6 +318,11 @@ OPTIONS
--sample-cpu::
Record the sample cpu.
+--sample-identifier::
+ Record the sample identifier i.e. PERF_SAMPLE_IDENTIFIER bit set in
+ the sample_type member of the struct perf_event_attr argument to the
+ perf_event_open system call.
+
-n::
--no-samples::
Don't sample.
@@ -387,6 +397,9 @@ following filters are defined:
- abort_tx: only when the target is a hardware transaction abort
- cond: conditional branches
- save_type: save branch type during sampling in case binary is not available later
+ For the platforms with Intel Arch LBR support (12th-Gen+ client or
+ 4th-Gen Xeon+ server), the save branch type is unconditionally enabled
+ when the taken branch stack sampling is enabled.
+
The option requires at least one branch type among any, any_call, any_ret, ind_call, cond.
@@ -747,8 +760,6 @@ events in data directory files. Option specified with no or empty value
defaults to CPU layout. Masks defined or provided by the option value are
filtered through the mask provided by -C option.
-include::intel-hybrid.txt[]
-
--debuginfod[=URLs]::
Specify debuginfod URL to be used when cacheing perf.data binaries,
it follows the same syntax as the DEBUGINFOD_URLS variable, like:
@@ -768,6 +779,8 @@ include::intel-hybrid.txt[]
only, as of now. So the applications built without the frame
pointer might see bogus addresses.
+include::intel-hybrid.txt[]
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1], linkperf:perf-intel-pt[1]
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 1a557ff8f210..68e37de5fae4 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -79,6 +79,9 @@ OPTIONS
--dump-raw-trace=::
Display verbose dump of the trace data.
+--dump-unsorted-raw-trace=::
+ Same as --dump-raw-trace but not sorted in time order.
+
-L::
--Latency=::
Show latency attributes (irqs/preemption disabled, etc).
@@ -130,7 +133,8 @@ OPTIONS
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output,
brstackinsn, brstackinsnlen, brstackoff, callindent, insn, insnlen, synth,
- phys_addr, metric, misc, srccode, ipc, data_page_size, code_page_size, ins_lat.
+ phys_addr, metric, misc, srccode, ipc, data_page_size, code_page_size, ins_lat,
+ machine_pid, vcpu.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@@ -223,6 +227,10 @@ OPTIONS
The ipc (instructions per cycle) field is synthesized and may have a value when
Instruction Trace decoding.
+ The machine_pid and vcpu fields are derived from data resulting from using
+ perf inject to insert a perf.data file recorded inside a virtual machine into
+ a perf.data file recorded on the host at the same time.
+
Finally, a user may not set fields to none for all event types.
i.e., -F "" is not allowed.
@@ -499,9 +507,9 @@ include::itrace.txt[]
The known limitations include exception handing such as
setjmp/longjmp will have calls/returns not match.
---guest-code::
- Indicate that guest code can be found in the hypervisor process,
- which is a common case for KVM test programs.
+:GMEXAMPLECMD: script
+:GMEXAMPLESUBCMD:
+include::guest-files.txt[]
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index d8a33f4a47c5..d7ff1867feda 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -570,6 +570,27 @@ Additional metrics may be printed with all earlier fields being empty.
include::intel-hybrid.txt[]
+JSON FORMAT
+-----------
+
+With -j, perf stat is able to print out a JSON format output
+that can be used for parsing.
+
+- timestamp : optional usec time stamp in fractions of second (with -I)
+- optional aggregate options:
+ - core : core identifier (with --per-core)
+ - die : die identifier (with --per-die)
+ - socket : socket identifier (with --per-socket)
+ - node : node identifier (with --per-node)
+ - thread : thread identifier (with --per-thread)
+- counter-value : counter value
+- unit : unit of the counter value or empty
+- event : event name
+- variance : optional variance if multiple values are collected (with -r)
+- runtime : run time of counter
+- metric-value : optional metric value
+- metric-unit : optional unit of metric
+
SEE ALSO
--------
linkperf:perf-top[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index f56d0e0fbff6..635ba043fd7d 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -419,18 +419,20 @@ Example:
cpu_core cpu list : 0-15
cpu_atom cpu list : 16-23
- HEADER_HYBRID_CPU_PMU_CAPS = 31,
+ HEADER_PMU_CAPS = 31,
- A list of hybrid CPU PMU capabilities.
+ List of pmu capabilities (except cpu pmu which is already
+ covered by HEADER_CPU_PMU_CAPS). Note that hybrid cpu pmu
+ capabilities are also stored here.
struct {
u32 nr_pmu;
struct {
- u32 nr_cpu_pmu_caps;
+ u32 nr_caps;
{
char name[];
char value[];
- } [nr_cpu_pmu_caps];
+ } [nr_caps];
char pmu_name[];
} [nr_pmu];
};
@@ -607,6 +609,16 @@ struct compressed_event {
char data[];
};
+ PERF_RECORD_FINISHED_INIT = 82,
+
+Marks the end of records for the system, pre-existing threads in system wide
+sessions, etc. Those are the ones prefixed PERF_RECORD_USER_*.
+
+This is used, for instance, to 'perf inject' events after init and before
+regular events, those emitted by the kernel, to support combining guest and
+host records.
+
+
The header is followed by compressed data frame that can be decompressed
into array of perf trace records. The size of the entire compressed event
record including the header is limited by the max value of header.size.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 73e0762092fe..2171f02daf59 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -241,15 +241,15 @@ endif
# Try different combinations to accommodate systems that only have
# python[2][3]-config in weird combinations in the following order of
# priority from lowest to highest:
-# * python3-config
-# * python-config
# * python2-config as per pep-0394.
+# * python-config
+# * python3-config
# * $(PYTHON)-config (If PYTHON is user supplied but PYTHON_CONFIG isn't)
#
PYTHON_AUTO := python-config
-PYTHON_AUTO := $(if $(call get-executable,python3-config),python3-config,$(PYTHON_AUTO))
-PYTHON_AUTO := $(if $(call get-executable,python-config),python-config,$(PYTHON_AUTO))
PYTHON_AUTO := $(if $(call get-executable,python2-config),python2-config,$(PYTHON_AUTO))
+PYTHON_AUTO := $(if $(call get-executable,python-config),python-config,$(PYTHON_AUTO))
+PYTHON_AUTO := $(if $(call get-executable,python3-config),python3-config,$(PYTHON_AUTO))
# If PYTHON is defined but PYTHON_CONFIG isn't, then take $(PYTHON)-config as if it was the user
# supplied value for PYTHON_CONFIG. Because it's "user supplied", error out if it doesn't exist.
@@ -265,7 +265,7 @@ endif
# defined. get-executable-or-default fails with an error if the first argument is supplied but
# doesn't exist.
override PYTHON_CONFIG := $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON_AUTO))
-override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_AUTO)))
+override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_CONFIG)))
grep-libs = $(filter -l%,$(1))
strip-libs = $(filter-out -l%,$(1))
@@ -297,8 +297,6 @@ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_LDFLAGS-libaio = -lrt
-FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
-
CORE_CFLAGS += -fno-omit-frame-pointer
CORE_CFLAGS += -ggdb3
CORE_CFLAGS += -funwind-tables
@@ -328,8 +326,8 @@ ifneq ($(TCMALLOC),)
endif
ifeq ($(FEATURES_DUMP),)
-# We will display at the end of this Makefile.config, using $(call feature_display_entries)
-# As we may retry some feature detection here, see the disassembler-four-args case, for instance
+# We will display at the end of this Makefile.config, using $(call feature_display_entries),
+# as we may retry some feature detection here.
FEATURE_DISPLAY_DEFERRED := 1
include $(srctree)/tools/build/Makefile.feature
else
@@ -342,7 +340,7 @@ endif
ifeq ($(DEBUG),0)
ifeq ($(feature-fortify-source), 1)
- CORE_CFLAGS += -D_FORTIFY_SOURCE=2
+ CORE_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2
endif
endif
@@ -889,6 +887,25 @@ else
endif
endif
+ifneq ($(NO_JEVENTS),1)
+ ifeq ($(wildcard pmu-events/arch/$(SRCARCH)/mapfile.csv),)
+ NO_JEVENTS := 1
+ endif
+endif
+ifneq ($(NO_JEVENTS),1)
+ NO_JEVENTS := 0
+ ifndef PYTHON
+ $(warning No python interpreter disabling jevent generation)
+ NO_JEVENTS := 1
+ else
+ # jevents.py uses f-strings present in Python 3.6 released in Dec. 2016.
+ JEVENTS_PYTHON_GOOD := $(shell $(PYTHON) -c 'import sys;print("1" if(sys.version_info.major >= 3 and sys.version_info.minor >= 6) else "0")' 2> /dev/null)
+ ifneq ($(JEVENTS_PYTHON_GOOD), 1)
+ $(warning Python interpreter too old (older than 3.6) disabling jevent generation)
+ NO_JEVENTS := 1
+ endif
+ endif
+endif
ifndef NO_LIBBFD
ifeq ($(feature-libbfd), 1)
@@ -904,14 +921,13 @@ ifndef NO_LIBBFD
ifeq ($(feature-libbfd-liberty), 1)
EXTLIBS += -lbfd -lopcodes -liberty
- FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
else
ifeq ($(feature-libbfd-liberty-z), 1)
EXTLIBS += -lbfd -lopcodes -liberty -lz
- FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
endif
endif
$(call feature_check,disassembler-four-args)
+ $(call feature_check,disassembler-init-styled)
endif
ifeq ($(feature-libbfd-buildid), 1)
@@ -1025,6 +1041,10 @@ ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif
+ifeq ($(feature-disassembler-init-styled), 1)
+ CFLAGS += -DDISASM_INIT_STYLED
+endif
+
ifeq (${IS_64_BIT}, 1)
ifndef NO_PERF_READ_VDSO32
$(call feature_check,compile-32)
@@ -1329,7 +1349,7 @@ endif
# re-generate FEATURE-DUMP as we may have called feature_check, found out
# extra libraries to add to LDFLAGS of some other test and then redo those
-# tests, see the block about libbfd, disassembler-four-args, for instance.
+# tests.
$(shell rm -f $(FEATURE_DUMP_FILENAME))
$(foreach feat,$(FEATURE_TESTS),$(shell echo "$(call feature_assign,$(feat))" >> $(FEATURE_DUMP_FILENAME)))
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 8f738e11356d..e5921b347153 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -651,25 +651,15 @@ strip: $(PROGRAMS) $(OUTPUT)perf
PERF_IN := $(OUTPUT)perf-in.o
-JEVENTS := $(OUTPUT)pmu-events/jevents
-JEVENTS_IN := $(OUTPUT)pmu-events/jevents-in.o
-
PMU_EVENTS_IN := $(OUTPUT)pmu-events/pmu-events-in.o
-
-export JEVENTS
+export NO_JEVENTS
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
$(PERF_IN): prepare FORCE
$(Q)$(MAKE) $(build)=perf
-$(JEVENTS_IN): FORCE
- $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=jevents
-
-$(JEVENTS): $(JEVENTS_IN)
- $(QUIET_LINK)$(HOSTCC) $(JEVENTS_IN) -o $@
-
-$(PMU_EVENTS_IN): $(JEVENTS) FORCE
+$(PMU_EVENTS_IN): FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events
$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
@@ -1015,7 +1005,8 @@ install-tests: all install-gtk
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
$(INSTALL) tests/shell/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
- $(INSTALL) tests/shell/lib/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
+ $(INSTALL) tests/shell/lib/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
+ $(INSTALL) tests/shell/lib/*.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
install-bin: install-tools install-tests install-traceevent-plugins
@@ -1038,7 +1029,8 @@ SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp)
SKELETONS := $(SKEL_OUT)/bpf_prog_profiler.skel.h
SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h
SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h
-SKELETONS += $(SKEL_OUT)/off_cpu.skel.h
+SKELETONS += $(SKEL_OUT)/off_cpu.skel.h $(SKEL_OUT)/lock_contention.skel.h
+SKELETONS += $(SKEL_OUT)/kwork_trace.skel.h
$(SKEL_TMP_OUT) $(LIBBPF_OUTPUT):
$(Q)$(MKDIR) -p $@
@@ -1089,7 +1081,7 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS)
$(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
- $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
+ $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)$(LIBJVMTI).so
$(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
$(OUTPUT)util/intel-pt-decoder/inat-tables.c \
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 1b54638d53b0..a346d5f3dafa 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -438,7 +438,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
if (opts->full_auxtrace) {
struct evsel *tracking_evsel;
- err = parse_events(evlist, "dummy:u", NULL);
+ err = parse_event(evlist, "dummy:u");
if (err)
goto out;
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 6f4db2ac5420..d4c234076541 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -257,7 +257,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
evsel__set_sample_bit(arm_spe_evsel, PHYS_ADDR);
/* Add dummy event to keep tracking */
- err = parse_events(evlist, "dummy:u", NULL);
+ err = parse_event(evlist, "dummy:u");
if (err)
return err;
diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c
index 79124bba713e..f849b1e88d43 100644
--- a/tools/perf/arch/arm64/util/pmu.c
+++ b/tools/perf/arch/arm64/util/pmu.c
@@ -3,7 +3,7 @@
#include "../../../util/cpumap.h"
#include "../../../util/pmu.h"
-const struct pmu_events_map *pmu_events_map__find(void)
+const struct pmu_events_table *pmu_events_table__find(void)
{
struct perf_pmu *pmu = NULL;
@@ -18,7 +18,7 @@ const struct pmu_events_map *pmu_events_map__find(void)
if (pmu->cpus->nr != cpu__max_cpu().cpu)
return NULL;
- return perf_pmu__find_map(pmu);
+ return perf_pmu__find_table(pmu);
}
return NULL;
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index 28d793390198..70b5bcbc15df 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -2,7 +2,6 @@ perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
perf-y += arch-tests.o
-perf-y += rdpmc.o
perf-y += sample-parsing.o
perf-$(CONFIG_AUXTRACE) += insn-x86.o intel-pt-pkt-decoder-test.o
perf-$(CONFIG_X86_64) += bp-modify.o
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
index 64fb73d14d2f..04018b8aa85b 100644
--- a/tools/perf/arch/x86/tests/arch-tests.c
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -3,7 +3,6 @@
#include "tests/tests.h"
#include "arch-tests.h"
-DEFINE_SUITE("x86 rdpmc", rdpmc);
#ifdef HAVE_AUXTRACE_SUPPORT
DEFINE_SUITE("x86 instruction decoder - new instructions", insn_x86);
DEFINE_SUITE("Intel PT packet decoder", intel_pt_pkt_decoder);
@@ -14,7 +13,6 @@ DEFINE_SUITE("x86 bp modify", bp_modify);
DEFINE_SUITE("x86 Sample parsing", x86_sample_parsing);
struct test_suite *arch_tests[] = {
- &suite__rdpmc,
#ifdef HAVE_DWARF_UNWIND_SUPPORT
&suite__dwarf_unwind,
#endif
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
index cb5b2c6c3b3b..360a082fc928 100644
--- a/tools/perf/arch/x86/tests/intel-cqm.c
+++ b/tools/perf/arch/x86/tests/intel-cqm.c
@@ -56,7 +56,7 @@ int test__intel_cqm_count_nmi_context(struct test_suite *test __maybe_unused, in
return TEST_FAIL;
}
- ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
+ ret = parse_event(evlist, "intel_cqm/llc_occupancy/");
if (ret) {
pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n");
err = TEST_SKIP;
diff --git a/tools/perf/arch/x86/tests/rdpmc.c b/tools/perf/arch/x86/tests/rdpmc.c
deleted file mode 100644
index 498413ad9c97..000000000000
--- a/tools/perf/arch/x86/tests/rdpmc.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <errno.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include "perf-sys.h"
-#include "debug.h"
-#include "tests/tests.h"
-#include "cloexec.h"
-#include "event.h"
-#include <internal/lib.h> // page_size
-#include "arch-tests.h"
-
-static u64 rdpmc(unsigned int counter)
-{
- unsigned int low, high;
-
- asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
-
- return low | ((u64)high) << 32;
-}
-
-static u64 rdtsc(void)
-{
- unsigned int low, high;
-
- asm volatile("rdtsc" : "=a" (low), "=d" (high));
-
- return low | ((u64)high) << 32;
-}
-
-static u64 mmap_read_self(void *addr)
-{
- struct perf_event_mmap_page *pc = addr;
- u32 seq, idx, time_mult = 0, time_shift = 0;
- u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
-
- do {
- seq = pc->lock;
- barrier();
-
- enabled = pc->time_enabled;
- running = pc->time_running;
-
- if (enabled != running) {
- cyc = rdtsc();
- time_mult = pc->time_mult;
- time_shift = pc->time_shift;
- time_offset = pc->time_offset;
- }
-
- idx = pc->index;
- count = pc->offset;
- if (idx)
- count += rdpmc(idx - 1);
-
- barrier();
- } while (pc->lock != seq);
-
- if (enabled != running) {
- u64 quot, rem;
-
- quot = (cyc >> time_shift);
- rem = cyc & (((u64)1 << time_shift) - 1);
- delta = time_offset + quot * time_mult +
- ((rem * time_mult) >> time_shift);
-
- enabled += delta;
- if (idx)
- running += delta;
-
- quot = count / running;
- rem = count % running;
- count = quot * enabled + (rem * enabled) / running;
- }
-
- return count;
-}
-
-/*
- * If the RDPMC instruction faults then signal this back to the test parent task:
- */
-static void segfault_handler(int sig __maybe_unused,
- siginfo_t *info __maybe_unused,
- void *uc __maybe_unused)
-{
- exit(-1);
-}
-
-static int __test__rdpmc(void)
-{
- volatile int tmp = 0;
- u64 i, loops = 1000;
- int n;
- int fd;
- void *addr;
- struct perf_event_attr attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_INSTRUCTIONS,
- .exclude_kernel = 1,
- };
- u64 delta_sum = 0;
- struct sigaction sa;
- char sbuf[STRERR_BUFSIZE];
-
- sigfillset(&sa.sa_mask);
- sa.sa_sigaction = segfault_handler;
- sa.sa_flags = 0;
- sigaction(SIGSEGV, &sa, NULL);
-
- fd = sys_perf_event_open(&attr, 0, -1, -1,
- perf_event_open_cloexec_flag());
- if (fd < 0) {
- pr_err("Error: sys_perf_event_open() syscall returned "
- "with %d (%s)\n", fd,
- str_error_r(errno, sbuf, sizeof(sbuf)));
- return -1;
- }
-
- addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
- if (addr == (void *)(-1)) {
- pr_err("Error: mmap() syscall returned with (%s)\n",
- str_error_r(errno, sbuf, sizeof(sbuf)));
- goto out_close;
- }
-
- for (n = 0; n < 6; n++) {
- u64 stamp, now, delta;
-
- stamp = mmap_read_self(addr);
-
- for (i = 0; i < loops; i++)
- tmp++;
-
- now = mmap_read_self(addr);
- loops *= 10;
-
- delta = now - stamp;
- pr_debug("%14d: %14Lu\n", n, (long long)delta);
-
- delta_sum += delta;
- }
-
- munmap(addr, page_size);
- pr_debug(" ");
-out_close:
- close(fd);
-
- if (!delta_sum)
- return -1;
-
- return 0;
-}
-
-int test__rdpmc(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
-{
- int status = 0;
- int wret = 0;
- int ret;
- int pid;
-
- pid = fork();
- if (pid < 0)
- return -1;
-
- if (!pid) {
- ret = __test__rdpmc();
-
- exit(ret);
- }
-
- wret = waitpid(pid, &status, 0);
- if (wret < 0 || status)
- return -1;
-
- return 0;
-}
diff --git a/tools/perf/arch/x86/util/cpuid.h b/tools/perf/arch/x86/util/cpuid.h
new file mode 100644
index 000000000000..0a3ae0ace7e9
--- /dev/null
+++ b/tools/perf/arch/x86/util/cpuid.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef PERF_CPUID_H
+#define PERF_CPUID_H 1
+
+
+static inline void
+cpuid(unsigned int op, unsigned int op2, unsigned int *a, unsigned int *b,
+ unsigned int *c, unsigned int *d)
+{
+ /*
+ * Preserve %ebx/%rbx register by either placing it in %rdi or saving it
+ * on the stack - x86-64 needs to avoid the stack red zone. In PIC
+ * compilations %ebx contains the address of the global offset
+ * table. %rbx is occasionally used to address stack variables in
+ * presence of dynamic allocas.
+ */
+ asm(
+#if defined(__x86_64__)
+ "mov %%rbx, %%rdi\n"
+ "cpuid\n"
+ "xchg %%rdi, %%rbx\n"
+#else
+ "pushl %%ebx\n"
+ "cpuid\n"
+ "movl %%ebx, %%edi\n"
+ "popl %%ebx\n"
+#endif
+ : "=a"(*a), "=D"(*b), "=c"(*c), "=d"(*d)
+ : "a"(op), "2"(op2));
+}
+
+void get_cpuid_0(char *vendor, unsigned int *lvl);
+
+#endif
diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
index 68f681ad54c1..cb59ce9b9638 100644
--- a/tools/perf/arch/x86/util/evlist.c
+++ b/tools/perf/arch/x86/util/evlist.c
@@ -3,20 +3,66 @@
#include "util/pmu.h"
#include "util/evlist.h"
#include "util/parse-events.h"
+#include "util/event.h"
+#include "util/pmu-hybrid.h"
#include "topdown.h"
-#define TOPDOWN_L1_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}"
-#define TOPDOWN_L2_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}"
+static int ___evlist__add_default_attrs(struct evlist *evlist,
+ struct perf_event_attr *attrs,
+ size_t nr_attrs)
+{
+ struct perf_cpu_map *cpus;
+ struct evsel *evsel, *n;
+ struct perf_pmu *pmu;
+ LIST_HEAD(head);
+ size_t i = 0;
+
+ for (i = 0; i < nr_attrs; i++)
+ event_attr_init(attrs + i);
+
+ if (!perf_pmu__has_hybrid())
+ return evlist__add_attrs(evlist, attrs, nr_attrs);
+
+ for (i = 0; i < nr_attrs; i++) {
+ if (attrs[i].type == PERF_TYPE_SOFTWARE) {
+ evsel = evsel__new(attrs + i);
+ if (evsel == NULL)
+ goto out_delete_partial_list;
+ list_add_tail(&evsel->core.node, &head);
+ continue;
+ }
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ evsel = evsel__new(attrs + i);
+ if (evsel == NULL)
+ goto out_delete_partial_list;
+ evsel->core.attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
+ cpus = perf_cpu_map__get(pmu->cpus);
+ evsel->core.cpus = cpus;
+ evsel->core.own_cpus = perf_cpu_map__get(cpus);
+ evsel->pmu_name = strdup(pmu->name);
+ list_add_tail(&evsel->core.node, &head);
+ }
+ }
+
+ evlist__splice_list_tail(evlist, &head);
+
+ return 0;
+
+out_delete_partial_list:
+ __evlist__for_each_entry_safe(&head, n, evsel)
+ evsel__delete(evsel);
+ return -1;
+}
-int arch_evlist__add_default_attrs(struct evlist *evlist)
+int arch_evlist__add_default_attrs(struct evlist *evlist,
+ struct perf_event_attr *attrs,
+ size_t nr_attrs)
{
- if (!pmu_have_event("cpu", "slots"))
- return 0;
+ if (nr_attrs)
+ return ___evlist__add_default_attrs(evlist, attrs, nr_attrs);
- if (pmu_have_event("cpu", "topdown-heavy-ops"))
- return parse_events(evlist, TOPDOWN_L2_EVENTS, NULL);
- else
- return parse_events(evlist, TOPDOWN_L1_EVENTS, NULL);
+ return topdown_parse_events(evlist);
}
struct evsel *arch_evlist__leader(struct list_head *list)
diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c
index 3501399cef35..ea3972d785d1 100644
--- a/tools/perf/arch/x86/util/evsel.c
+++ b/tools/perf/arch/x86/util/evsel.c
@@ -6,6 +6,10 @@
#include "util/pmu.h"
#include "linux/string.h"
#include "evsel.h"
+#include "util/debug.h"
+
+#define IBS_FETCH_L3MISSONLY (1ULL << 59)
+#define IBS_OP_L3MISSONLY (1ULL << 16)
void arch_evsel__set_sample_weight(struct evsel *evsel)
{
@@ -61,3 +65,71 @@ bool arch_evsel__must_be_in_group(const struct evsel *evsel)
(strcasestr(evsel->name, "slots") ||
strcasestr(evsel->name, "topdown"));
}
+
+int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
+{
+ u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK;
+ u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
+ const char *event_name;
+
+ if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event])
+ event_name = evsel__hw_names[event];
+ else
+ event_name = "unknown-hardware";
+
+ /* The PMU type is not required for the non-hybrid platform. */
+ if (!pmu)
+ return scnprintf(bf, size, "%s", event_name);
+
+ return scnprintf(bf, size, "%s/%s/",
+ evsel->pmu_name ? evsel->pmu_name : "cpu",
+ event_name);
+}
+
+static void ibs_l3miss_warn(void)
+{
+ pr_warning(
+"WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
+"and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
+}
+
+void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
+{
+ struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
+ static int warned_once;
+ /* 0: Uninitialized, 1: Yes, -1: No */
+ static int is_amd;
+
+ if (warned_once || is_amd == -1)
+ return;
+
+ if (!is_amd) {
+ struct perf_env *env = evsel__env(evsel);
+
+ if (!perf_env__cpuid(env) || !env->cpuid ||
+ !strstarts(env->cpuid, "AuthenticAMD")) {
+ is_amd = -1;
+ return;
+ }
+ is_amd = 1;
+ }
+
+ evsel_pmu = evsel__find_pmu(evsel);
+ if (!evsel_pmu)
+ return;
+
+ ibs_fetch_pmu = perf_pmu__find("ibs_fetch");
+ ibs_op_pmu = perf_pmu__find("ibs_op");
+
+ if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
+ if (attr->config & IBS_FETCH_L3MISSONLY) {
+ ibs_l3miss_warn();
+ warned_once = 1;
+ }
+ } else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
+ if (attr->config & IBS_OP_L3MISSONLY) {
+ ibs_l3miss_warn();
+ warned_once = 1;
+ }
+ }
+}
diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
index 578c8c568ffd..a51444a77a5f 100644
--- a/tools/perf/arch/x86/util/header.c
+++ b/tools/perf/arch/x86/util/header.c
@@ -9,18 +9,17 @@
#include "../../../util/debug.h"
#include "../../../util/header.h"
+#include "cpuid.h"
-static inline void
-cpuid(unsigned int op, unsigned int *a, unsigned int *b, unsigned int *c,
- unsigned int *d)
+void get_cpuid_0(char *vendor, unsigned int *lvl)
{
- __asm__ __volatile__ (".byte 0x53\n\tcpuid\n\t"
- "movl %%ebx, %%esi\n\t.byte 0x5b"
- : "=a" (*a),
- "=S" (*b),
- "=c" (*c),
- "=d" (*d)
- : "a" (op));
+ unsigned int b, c, d;
+
+ cpuid(0, 0, lvl, &b, &c, &d);
+ strncpy(&vendor[0], (char *)(&b), 4);
+ strncpy(&vendor[4], (char *)(&d), 4);
+ strncpy(&vendor[8], (char *)(&c), 4);
+ vendor[12] = '\0';
}
static int
@@ -31,14 +30,10 @@ __get_cpuid(char *buffer, size_t sz, const char *fmt)
int nb;
char vendor[16];
- cpuid(0, &lvl, &b, &c, &d);
- strncpy(&vendor[0], (char *)(&b), 4);
- strncpy(&vendor[4], (char *)(&d), 4);
- strncpy(&vendor[8], (char *)(&c), 4);
- vendor[12] = '\0';
+ get_cpuid_0(vendor, &lvl);
if (lvl >= 1) {
- cpuid(1, &a, &b, &c, &d);
+ cpuid(1, 0, &a, &b, &c, &d);
family = (a >> 8) & 0xf; /* bits 11 - 8 */
model = (a >> 4) & 0xf; /* Bits 7 - 4 */
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index bcccfbade5c6..439c2956f3e7 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -233,7 +233,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
struct evsel *tracking_evsel;
int err;
- err = parse_events(evlist, "dummy:u", NULL);
+ err = parse_event(evlist, "dummy:u");
if (err)
return err;
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 06c2cdfd8f2f..13933020a79e 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -426,7 +426,7 @@ static int intel_pt_track_switches(struct evlist *evlist)
if (!evlist__can_select_event(evlist, sched_switch))
return -EPERM;
- err = parse_events(evlist, sched_switch, NULL);
+ err = parse_event(evlist, sched_switch);
if (err) {
pr_debug2("%s: failed to parse %s, error %d\n",
__func__, sched_switch, err);
diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c
index 792cd75ade33..404de795ec0b 100644
--- a/tools/perf/arch/x86/util/iostat.c
+++ b/tools/perf/arch/x86/util/iostat.c
@@ -316,7 +316,7 @@ static int iostat_event_group(struct evlist *evl,
sprintf(iostat_cmd, iostat_cmd_template,
list->rps[idx]->pmu_idx, list->rps[idx]->pmu_idx,
list->rps[idx]->pmu_idx, list->rps[idx]->pmu_idx);
- ret = parse_events(evl, iostat_cmd, NULL);
+ ret = parse_event(evl, iostat_cmd);
if (ret)
goto err;
}
diff --git a/tools/perf/arch/x86/util/topdown.c b/tools/perf/arch/x86/util/topdown.c
index f81a7cfe4d63..54810f9acd6f 100644
--- a/tools/perf/arch/x86/util/topdown.c
+++ b/tools/perf/arch/x86/util/topdown.c
@@ -3,9 +3,17 @@
#include "api/fs/fs.h"
#include "util/pmu.h"
#include "util/topdown.h"
+#include "util/evlist.h"
+#include "util/debug.h"
+#include "util/pmu-hybrid.h"
#include "topdown.h"
#include "evsel.h"
+#define TOPDOWN_L1_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}"
+#define TOPDOWN_L1_EVENTS_CORE "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/}"
+#define TOPDOWN_L2_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}"
+#define TOPDOWN_L2_EVENTS_CORE "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/,cpu_core/topdown-heavy-ops/,cpu_core/topdown-br-mispredict/,cpu_core/topdown-fetch-lat/,cpu_core/topdown-mem-bound/}"
+
/* Check whether there is a PMU which supports the perf metrics. */
bool topdown_sys_has_perf_metrics(void)
{
@@ -73,3 +81,46 @@ bool arch_topdown_sample_read(struct evsel *leader)
return false;
}
+
+const char *arch_get_topdown_pmu_name(struct evlist *evlist, bool warn)
+{
+ const char *pmu_name;
+
+ if (!perf_pmu__has_hybrid())
+ return "cpu";
+
+ if (!evlist->hybrid_pmu_name) {
+ if (warn)
+ pr_warning("WARNING: default to use cpu_core topdown events\n");
+ evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu("core");
+ }
+
+ pmu_name = evlist->hybrid_pmu_name;
+
+ return pmu_name;
+}
+
+int topdown_parse_events(struct evlist *evlist)
+{
+ const char *topdown_events;
+ const char *pmu_name;
+
+ if (!topdown_sys_has_perf_metrics())
+ return 0;
+
+ pmu_name = arch_get_topdown_pmu_name(evlist, false);
+
+ if (pmu_have_event(pmu_name, "topdown-heavy-ops")) {
+ if (!strcmp(pmu_name, "cpu_core"))
+ topdown_events = TOPDOWN_L2_EVENTS_CORE;
+ else
+ topdown_events = TOPDOWN_L2_EVENTS;
+ } else {
+ if (!strcmp(pmu_name, "cpu_core"))
+ topdown_events = TOPDOWN_L1_EVENTS_CORE;
+ else
+ topdown_events = TOPDOWN_L1_EVENTS;
+ }
+
+ return parse_event(evlist, topdown_events);
+}
diff --git a/tools/perf/arch/x86/util/topdown.h b/tools/perf/arch/x86/util/topdown.h
index 46bf9273e572..7eb81f042838 100644
--- a/tools/perf/arch/x86/util/topdown.h
+++ b/tools/perf/arch/x86/util/topdown.h
@@ -3,5 +3,6 @@
#define _TOPDOWN_H 1
bool topdown_sys_has_perf_metrics(void);
+int topdown_parse_events(struct evlist *evlist);
#endif
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index 559365f8fe52..eb2b5195bd02 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -1,7 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
+#include <math.h>
+#include <string.h>
+#include "../../../util/debug.h"
#include "../../../util/tsc.h"
+#include "cpuid.h"
u64 rdtsc(void)
{
@@ -11,3 +15,76 @@ u64 rdtsc(void)
return low | ((u64)high) << 32;
}
+
+/*
+ * Derive the TSC frequency in Hz from the /proc/cpuinfo, for example:
+ * ...
+ * model name : Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz
+ * ...
+ * will return 3000000000.
+ */
+static double cpuinfo_tsc_freq(void)
+{
+ double result = 0;
+ FILE *cpuinfo;
+ char *line = NULL;
+ size_t len = 0;
+
+ cpuinfo = fopen("/proc/cpuinfo", "r");
+ if (!cpuinfo) {
+ pr_err("Failed to read /proc/cpuinfo for TSC frequency");
+ return NAN;
+ }
+ while (getline(&line, &len, cpuinfo) > 0) {
+ if (!strncmp(line, "model name", 10)) {
+ char *pos = strstr(line + 11, " @ ");
+
+ if (pos && sscanf(pos, " @ %lfGHz", &result) == 1) {
+ result *= 1000000000;
+ goto out;
+ }
+ }
+ }
+out:
+ if (fpclassify(result) == FP_ZERO)
+ pr_err("Failed to find TSC frequency in /proc/cpuinfo");
+
+ free(line);
+ fclose(cpuinfo);
+ return result;
+}
+
+double arch_get_tsc_freq(void)
+{
+ unsigned int a, b, c, d, lvl;
+ static bool cached;
+ static double tsc;
+ char vendor[16];
+
+ if (cached)
+ return tsc;
+
+ cached = true;
+ get_cpuid_0(vendor, &lvl);
+ if (!strstr(vendor, "Intel"))
+ return 0;
+
+ /*
+ * Don't support Time Stamp Counter and
+ * Nominal Core Crystal Clock Information Leaf.
+ */
+ if (lvl < 0x15) {
+ tsc = cpuinfo_tsc_freq();
+ return tsc;
+ }
+
+ cpuid(0x15, 0, &a, &b, &c, &d);
+ /* TSC frequency is not enumerated */
+ if (!a || !b || !c) {
+ tsc = cpuinfo_tsc_freq();
+ return tsc;
+ }
+
+ tsc = (double)c * (double)b / (double)a;
+ return tsc;
+}
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 2ffe071dbcff..f839e69492e8 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -50,7 +50,9 @@ struct perf_annotate {
bool use_tui;
#endif
bool use_stdio, use_stdio2;
+#ifdef HAVE_GTK2_SUPPORT
bool use_gtk;
+#endif
bool skip_missing;
bool has_br_stack;
bool group_set;
@@ -526,7 +528,9 @@ int cmd_annotate(int argc, const char **argv)
OPT_BOOLEAN('q', "quiet", &quiet, "do now show any message"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
+#ifdef HAVE_GTK2_SUPPORT
OPT_BOOLEAN(0, "gtk", &annotate.use_gtk, "Use the GTK interface"),
+#endif
#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"),
#endif
@@ -614,10 +618,12 @@ int cmd_annotate(int argc, const char **argv)
if (annotate_check_args(&annotate.opts) < 0)
return -EINVAL;
+#ifdef HAVE_GTK2_SUPPORT
if (symbol_conf.show_nr_samples && annotate.use_gtk) {
pr_err("--show-nr-samples is not available in --gtk mode at this time\n");
return ret;
}
+#endif
ret = symbol__validate_sym_arguments();
if (ret)
@@ -656,8 +662,10 @@ int cmd_annotate(int argc, const char **argv)
else if (annotate.use_tui)
use_browser = 1;
#endif
+#ifdef HAVE_GTK2_SUPPORT
else if (annotate.use_gtk)
use_browser = 2;
+#endif
setup_browser(true);
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index cebadd632234..00bfe89f0b5d 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -12,14 +12,44 @@
#include "util/build-id.h"
#include "util/debug.h"
#include "util/dso.h"
+#include "util/map.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/session.h"
#include "util/symbol.h"
#include "util/data.h"
#include <errno.h>
+#include <inttypes.h>
#include <linux/err.h>
+static int buildid__map_cb(struct map *map, void *arg __maybe_unused)
+{
+ const struct dso *dso = map->dso;
+ char bid_buf[SBUILD_ID_SIZE];
+
+ memset(bid_buf, 0, sizeof(bid_buf));
+ if (dso->has_build_id)
+ build_id__sprintf(&dso->bid, bid_buf);
+ printf("%s %16" PRIx64 " %16" PRIx64, bid_buf, map->start, map->end);
+ if (dso->long_name != NULL) {
+ printf(" %s", dso->long_name);
+ } else if (dso->short_name != NULL) {
+ printf(" %s", dso->short_name);
+ }
+ printf("\n");
+
+ return 0;
+}
+
+static void buildid__show_kernel_maps(void)
+{
+ struct machine *machine;
+
+ machine = machine__new_host();
+ machine__for_each_kernel_map(machine, buildid__map_cb, NULL);
+ machine__delete(machine);
+}
+
static int sysfs__fprintf_build_id(FILE *fp)
{
char sbuild_id[SBUILD_ID_SIZE];
@@ -99,6 +129,7 @@ out:
int cmd_buildid_list(int argc, const char **argv)
{
bool show_kernel = false;
+ bool show_kernel_maps = false;
bool with_hits = false;
bool force = false;
const struct option options[] = {
@@ -106,6 +137,8 @@ int cmd_buildid_list(int argc, const char **argv)
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_BOOLEAN('k', "kernel", &show_kernel, "Show current kernel build id"),
+ OPT_BOOLEAN('m', "kernel-maps", &show_kernel_maps,
+ "Show build id of current kernel + modules"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_END()
};
@@ -117,8 +150,12 @@ int cmd_buildid_list(int argc, const char **argv)
argc = parse_options(argc, argv, options, buildid_list_usage, 0);
setup_pager();
- if (show_kernel)
+ if (show_kernel) {
return !(sysfs__fprintf_build_id(stdout) > 0);
+ } else if (show_kernel_maps) {
+ buildid__show_kernel_maps();
+ return 0;
+ }
return perf_session__list_build_ids(force, with_hits);
}
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 4898ee57d156..653e13b5037e 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -55,6 +55,8 @@ struct c2c_hists {
struct compute_stats {
struct stats lcl_hitm;
struct stats rmt_hitm;
+ struct stats lcl_peer;
+ struct stats rmt_peer;
struct stats load;
};
@@ -113,16 +115,18 @@ struct perf_c2c {
};
enum {
- DISPLAY_LCL,
- DISPLAY_RMT,
- DISPLAY_TOT,
+ DISPLAY_LCL_HITM,
+ DISPLAY_RMT_HITM,
+ DISPLAY_TOT_HITM,
+ DISPLAY_SNP_PEER,
DISPLAY_MAX,
};
static const char *display_str[DISPLAY_MAX] = {
- [DISPLAY_LCL] = "Local",
- [DISPLAY_RMT] = "Remote",
- [DISPLAY_TOT] = "Total",
+ [DISPLAY_LCL_HITM] = "Local HITMs",
+ [DISPLAY_RMT_HITM] = "Remote HITMs",
+ [DISPLAY_TOT_HITM] = "Total HITMs",
+ [DISPLAY_SNP_PEER] = "Peer Snoop",
};
static const struct option c2c_options[] = {
@@ -154,6 +158,8 @@ static void *c2c_he_zalloc(size_t size)
init_stats(&c2c_he->cstats.lcl_hitm);
init_stats(&c2c_he->cstats.rmt_hitm);
+ init_stats(&c2c_he->cstats.lcl_peer);
+ init_stats(&c2c_he->cstats.rmt_peer);
init_stats(&c2c_he->cstats.load);
return &c2c_he->he;
@@ -253,6 +259,10 @@ static void compute_stats(struct c2c_hist_entry *c2c_he,
update_stats(&cstats->rmt_hitm, weight);
else if (stats->lcl_hitm)
update_stats(&cstats->lcl_hitm, weight);
+ else if (stats->rmt_peer)
+ update_stats(&cstats->rmt_peer, weight);
+ else if (stats->lcl_peer)
+ update_stats(&cstats->lcl_peer, weight);
else if (stats->load)
update_stats(&cstats->load, weight);
}
@@ -650,6 +660,9 @@ __f ## _cmp(struct perf_hpp_fmt *fmt __maybe_unused, \
STAT_FN(rmt_hitm)
STAT_FN(lcl_hitm)
+STAT_FN(rmt_peer)
+STAT_FN(lcl_peer)
+STAT_FN(tot_peer)
STAT_FN(store)
STAT_FN(st_l1hit)
STAT_FN(st_l1miss)
@@ -787,7 +800,7 @@ percent_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
return hpp_color_scnprintf(hpp, "%*.2f%%", width - 1, per);
}
-static double percent_hitm(struct c2c_hist_entry *c2c_he)
+static double percent_costly_snoop(struct c2c_hist_entry *c2c_he)
{
struct c2c_hists *hists;
struct c2c_stats *stats;
@@ -800,17 +813,22 @@ static double percent_hitm(struct c2c_hist_entry *c2c_he)
total = &hists->stats;
switch (c2c.display) {
- case DISPLAY_RMT:
+ case DISPLAY_RMT_HITM:
st = stats->rmt_hitm;
tot = total->rmt_hitm;
break;
- case DISPLAY_LCL:
+ case DISPLAY_LCL_HITM:
st = stats->lcl_hitm;
tot = total->lcl_hitm;
break;
- case DISPLAY_TOT:
+ case DISPLAY_TOT_HITM:
st = stats->tot_hitm;
tot = total->tot_hitm;
+ break;
+ case DISPLAY_SNP_PEER:
+ st = stats->tot_peer;
+ tot = total->tot_peer;
+ break;
default:
break;
}
@@ -827,8 +845,8 @@ static double percent_hitm(struct c2c_hist_entry *c2c_he)
})
static int
-percent_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
- struct hist_entry *he)
+percent_costly_snoop_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
@@ -836,20 +854,20 @@ percent_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
double per;
c2c_he = container_of(he, struct c2c_hist_entry, he);
- per = percent_hitm(c2c_he);
+ per = percent_costly_snoop(c2c_he);
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int
-percent_hitm_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
- struct hist_entry *he)
+percent_costly_snoop_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
{
- return percent_color(fmt, hpp, he, percent_hitm);
+ return percent_color(fmt, hpp, he, percent_costly_snoop);
}
static int64_t
-percent_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
- struct hist_entry *left, struct hist_entry *right)
+percent_costly_snoop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
{
struct c2c_hist_entry *c2c_left;
struct c2c_hist_entry *c2c_right;
@@ -859,8 +877,8 @@ percent_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
c2c_left = container_of(left, struct c2c_hist_entry, he);
c2c_right = container_of(right, struct c2c_hist_entry, he);
- per_left = percent_hitm(c2c_left);
- per_right = percent_hitm(c2c_right);
+ per_left = percent_costly_snoop(c2c_left);
+ per_right = percent_costly_snoop(c2c_right);
return per_left - per_right;
}
@@ -899,6 +917,8 @@ static double percent_ ## __f(struct c2c_hist_entry *c2c_he) \
PERCENT_FN(rmt_hitm)
PERCENT_FN(lcl_hitm)
+PERCENT_FN(rmt_peer)
+PERCENT_FN(lcl_peer)
PERCENT_FN(st_l1hit)
PERCENT_FN(st_l1miss)
PERCENT_FN(st_na)
@@ -966,6 +986,68 @@ percent_lcl_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
}
static int
+percent_lcl_peer_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per = PERCENT(he, lcl_peer);
+ char buf[10];
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_lcl_peer_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_lcl_peer);
+}
+
+static int64_t
+percent_lcl_peer_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = PERCENT(left, lcl_peer);
+ per_right = PERCENT(right, lcl_peer);
+
+ return per_left - per_right;
+}
+
+static int
+percent_rmt_peer_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per = PERCENT(he, rmt_peer);
+ char buf[10];
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_rmt_peer_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_rmt_peer);
+}
+
+static int64_t
+percent_rmt_peer_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = PERCENT(left, rmt_peer);
+ per_right = PERCENT(right, rmt_peer);
+
+ return per_left - per_right;
+}
+
+static int
percent_stores_l1hit_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
@@ -1142,18 +1224,22 @@ node_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
advance_hpp(hpp, ret);
switch (c2c.display) {
- case DISPLAY_RMT:
+ case DISPLAY_RMT_HITM:
ret = display_metrics(hpp, stats->rmt_hitm,
c2c_he->stats.rmt_hitm);
break;
- case DISPLAY_LCL:
+ case DISPLAY_LCL_HITM:
ret = display_metrics(hpp, stats->lcl_hitm,
c2c_he->stats.lcl_hitm);
break;
- case DISPLAY_TOT:
+ case DISPLAY_TOT_HITM:
ret = display_metrics(hpp, stats->tot_hitm,
c2c_he->stats.tot_hitm);
break;
+ case DISPLAY_SNP_PEER:
+ ret = display_metrics(hpp, stats->tot_peer,
+ c2c_he->stats.tot_peer);
+ break;
default:
break;
}
@@ -1213,6 +1299,8 @@ __func(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he) \
MEAN_ENTRY(mean_rmt_entry, rmt_hitm);
MEAN_ENTRY(mean_lcl_entry, lcl_hitm);
MEAN_ENTRY(mean_load_entry, load);
+MEAN_ENTRY(mean_rmt_peer_entry, rmt_peer);
+MEAN_ENTRY(mean_lcl_peer_entry, lcl_peer);
static int
cpucnt_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
@@ -1360,6 +1448,30 @@ static struct c2c_dimension dim_rmt_hitm = {
.width = 7,
};
+static struct c2c_dimension dim_tot_peer = {
+ .header = HEADER_SPAN("------- Load Peer -------", "Total", 2),
+ .name = "tot_peer",
+ .cmp = tot_peer_cmp,
+ .entry = tot_peer_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_lcl_peer = {
+ .header = HEADER_SPAN_LOW("Local"),
+ .name = "lcl_peer",
+ .cmp = lcl_peer_cmp,
+ .entry = lcl_peer_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_rmt_peer = {
+ .header = HEADER_SPAN_LOW("Remote"),
+ .name = "rmt_peer",
+ .cmp = rmt_peer_cmp,
+ .entry = rmt_peer_entry,
+ .width = 7,
+};
+
static struct c2c_dimension dim_cl_rmt_hitm = {
.header = HEADER_SPAN("----- HITM -----", "Rmt", 1),
.name = "cl_rmt_hitm",
@@ -1376,6 +1488,22 @@ static struct c2c_dimension dim_cl_lcl_hitm = {
.width = 7,
};
+static struct c2c_dimension dim_cl_rmt_peer = {
+ .header = HEADER_SPAN("----- Peer -----", "Rmt", 1),
+ .name = "cl_rmt_peer",
+ .cmp = rmt_peer_cmp,
+ .entry = rmt_peer_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_cl_lcl_peer = {
+ .header = HEADER_SPAN_LOW("Lcl"),
+ .name = "cl_lcl_peer",
+ .cmp = lcl_peer_cmp,
+ .entry = lcl_peer_entry,
+ .width = 7,
+};
+
static struct c2c_dimension dim_tot_stores = {
.header = HEADER_BOTH("Total", "Stores"),
.name = "tot_stores",
@@ -1488,17 +1616,18 @@ static struct c2c_dimension dim_tot_loads = {
.width = 7,
};
-static struct c2c_header percent_hitm_header[] = {
- [DISPLAY_LCL] = HEADER_BOTH("Lcl", "Hitm"),
- [DISPLAY_RMT] = HEADER_BOTH("Rmt", "Hitm"),
- [DISPLAY_TOT] = HEADER_BOTH("Tot", "Hitm"),
+static struct c2c_header percent_costly_snoop_header[] = {
+ [DISPLAY_LCL_HITM] = HEADER_BOTH("Lcl", "Hitm"),
+ [DISPLAY_RMT_HITM] = HEADER_BOTH("Rmt", "Hitm"),
+ [DISPLAY_TOT_HITM] = HEADER_BOTH("Tot", "Hitm"),
+ [DISPLAY_SNP_PEER] = HEADER_BOTH("Peer", "Snoop"),
};
-static struct c2c_dimension dim_percent_hitm = {
- .name = "percent_hitm",
- .cmp = percent_hitm_cmp,
- .entry = percent_hitm_entry,
- .color = percent_hitm_color,
+static struct c2c_dimension dim_percent_costly_snoop = {
+ .name = "percent_costly_snoop",
+ .cmp = percent_costly_snoop_cmp,
+ .entry = percent_costly_snoop_entry,
+ .color = percent_costly_snoop_color,
.width = 7,
};
@@ -1520,6 +1649,24 @@ static struct c2c_dimension dim_percent_lcl_hitm = {
.width = 7,
};
+static struct c2c_dimension dim_percent_rmt_peer = {
+ .header = HEADER_SPAN("-- Peer Snoop --", "Rmt", 1),
+ .name = "percent_rmt_peer",
+ .cmp = percent_rmt_peer_cmp,
+ .entry = percent_rmt_peer_entry,
+ .color = percent_rmt_peer_color,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_percent_lcl_peer = {
+ .header = HEADER_SPAN_LOW("Lcl"),
+ .name = "percent_lcl_peer",
+ .cmp = percent_lcl_peer_cmp,
+ .entry = percent_lcl_peer_entry,
+ .color = percent_lcl_peer_color,
+ .width = 7,
+};
+
static struct c2c_dimension dim_percent_stores_l1hit = {
.header = HEADER_SPAN("------- Store Refs ------", "L1 Hit", 2),
.name = "percent_stores_l1hit",
@@ -1588,12 +1735,6 @@ static struct c2c_dimension dim_dso = {
.se = &sort_dso,
};
-static struct c2c_header header_node[3] = {
- HEADER_LOW("Node"),
- HEADER_LOW("Node{cpus %hitms %stores}"),
- HEADER_LOW("Node{cpu list}"),
-};
-
static struct c2c_dimension dim_node = {
.name = "node",
.cmp = empty_cmp,
@@ -1625,6 +1766,22 @@ static struct c2c_dimension dim_mean_load = {
.width = 8,
};
+static struct c2c_dimension dim_mean_rmt_peer = {
+ .header = HEADER_SPAN("---------- cycles ----------", "rmt peer", 2),
+ .name = "mean_rmt_peer",
+ .cmp = empty_cmp,
+ .entry = mean_rmt_peer_entry,
+ .width = 8,
+};
+
+static struct c2c_dimension dim_mean_lcl_peer = {
+ .header = HEADER_SPAN_LOW("lcl peer"),
+ .name = "mean_lcl_peer",
+ .cmp = empty_cmp,
+ .entry = mean_lcl_peer_entry,
+ .width = 8,
+};
+
static struct c2c_dimension dim_cpucnt = {
.header = HEADER_BOTH("cpu", "cnt"),
.name = "cpucnt",
@@ -1672,8 +1829,13 @@ static struct c2c_dimension *dimensions[] = {
&dim_tot_hitm,
&dim_lcl_hitm,
&dim_rmt_hitm,
+ &dim_tot_peer,
+ &dim_lcl_peer,
+ &dim_rmt_peer,
&dim_cl_lcl_hitm,
&dim_cl_rmt_hitm,
+ &dim_cl_lcl_peer,
+ &dim_cl_rmt_peer,
&dim_tot_stores,
&dim_stores_l1hit,
&dim_stores_l1miss,
@@ -1688,9 +1850,11 @@ static struct c2c_dimension *dimensions[] = {
&dim_ld_rmthit,
&dim_tot_recs,
&dim_tot_loads,
- &dim_percent_hitm,
+ &dim_percent_costly_snoop,
&dim_percent_rmt_hitm,
&dim_percent_lcl_hitm,
+ &dim_percent_rmt_peer,
+ &dim_percent_lcl_peer,
&dim_percent_stores_l1hit,
&dim_percent_stores_l1miss,
&dim_percent_stores_na,
@@ -1703,6 +1867,8 @@ static struct c2c_dimension *dimensions[] = {
&dim_node,
&dim_mean_rmt,
&dim_mean_lcl,
+ &dim_mean_rmt_peer,
+ &dim_mean_lcl_peer,
&dim_mean_load,
&dim_cpucnt,
&dim_srcline,
@@ -1941,18 +2107,22 @@ static bool he__display(struct hist_entry *he, struct c2c_stats *stats)
c2c_he = container_of(he, struct c2c_hist_entry, he);
switch (c2c.display) {
- case DISPLAY_LCL:
+ case DISPLAY_LCL_HITM:
he->filtered = filter_display(c2c_he->stats.lcl_hitm,
stats->lcl_hitm);
break;
- case DISPLAY_RMT:
+ case DISPLAY_RMT_HITM:
he->filtered = filter_display(c2c_he->stats.rmt_hitm,
stats->rmt_hitm);
break;
- case DISPLAY_TOT:
+ case DISPLAY_TOT_HITM:
he->filtered = filter_display(c2c_he->stats.tot_hitm,
stats->tot_hitm);
break;
+ case DISPLAY_SNP_PEER:
+ he->filtered = filter_display(c2c_he->stats.tot_peer,
+ stats->tot_peer);
+ break;
default:
break;
}
@@ -1972,15 +2142,17 @@ static inline bool is_valid_hist_entry(struct hist_entry *he)
return true;
switch (c2c.display) {
- case DISPLAY_LCL:
+ case DISPLAY_LCL_HITM:
has_record = !!c2c_he->stats.lcl_hitm;
break;
- case DISPLAY_RMT:
+ case DISPLAY_RMT_HITM:
has_record = !!c2c_he->stats.rmt_hitm;
break;
- case DISPLAY_TOT:
+ case DISPLAY_TOT_HITM:
has_record = !!c2c_he->stats.tot_hitm;
break;
+ case DISPLAY_SNP_PEER:
+ has_record = !!c2c_he->stats.tot_peer;
default:
break;
}
@@ -2069,9 +2241,33 @@ static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
return 0;
}
+static struct c2c_header header_node_0 = HEADER_LOW("Node");
+static struct c2c_header header_node_1_hitms_stores =
+ HEADER_LOW("Node{cpus %hitms %stores}");
+static struct c2c_header header_node_1_peers_stores =
+ HEADER_LOW("Node{cpus %peers %stores}");
+static struct c2c_header header_node_2 = HEADER_LOW("Node{cpu list}");
+
static void setup_nodes_header(void)
{
- dim_node.header = header_node[c2c.node_info];
+ switch (c2c.node_info) {
+ case 0:
+ dim_node.header = header_node_0;
+ break;
+ case 1:
+ if (c2c.display == DISPLAY_SNP_PEER)
+ dim_node.header = header_node_1_peers_stores;
+ else
+ dim_node.header = header_node_1_hitms_stores;
+ break;
+ case 2:
+ dim_node.header = header_node_2;
+ break;
+ default:
+ break;
+ }
+
+ return;
}
static int setup_nodes(struct perf_session *session)
@@ -2136,13 +2332,14 @@ static int setup_nodes(struct perf_session *session)
}
#define HAS_HITMS(__h) ((__h)->stats.lcl_hitm || (__h)->stats.rmt_hitm)
+#define HAS_PEER(__h) ((__h)->stats.lcl_peer || (__h)->stats.rmt_peer)
static int resort_shared_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
c2c_he = container_of(he, struct c2c_hist_entry, he);
- if (HAS_HITMS(c2c_he)) {
+ if (HAS_HITMS(c2c_he) || HAS_PEER(c2c_he)) {
c2c.shared_clines++;
c2c_add_stats(&c2c.shared_clines_stats, &c2c_he->stats);
}
@@ -2202,6 +2399,8 @@ static void print_c2c__display_stats(FILE *out)
fprintf(out, " Load LLC Misses : %10d\n", llc_misses);
fprintf(out, " Load access blocked by data : %10d\n", stats->blk_data);
fprintf(out, " Load access blocked by address : %10d\n", stats->blk_addr);
+ fprintf(out, " Load HIT Local Peer : %10d\n", stats->lcl_peer);
+ fprintf(out, " Load HIT Remote Peer : %10d\n", stats->rmt_peer);
fprintf(out, " LLC Misses to Local DRAM : %10.1f%%\n", ((double)stats->lcl_dram/(double)llc_misses) * 100.);
fprintf(out, " LLC Misses to Remote DRAM : %10.1f%%\n", ((double)stats->rmt_dram/(double)llc_misses) * 100.);
fprintf(out, " LLC Misses to Remote cache (HIT) : %10.1f%%\n", ((double)stats->rmt_hit /(double)llc_misses) * 100.);
@@ -2230,6 +2429,7 @@ static void print_shared_cacheline_info(FILE *out)
fprintf(out, " L1D hits on shared lines : %10d\n", stats->ld_l1hit);
fprintf(out, " L2D hits on shared lines : %10d\n", stats->ld_l2hit);
fprintf(out, " LLC hits on shared lines : %10d\n", stats->ld_llchit + stats->lcl_hitm);
+ fprintf(out, " Load hits on peer cache or nodes : %10d\n", stats->lcl_peer + stats->rmt_peer);
fprintf(out, " Locked Access on shared lines : %10d\n", stats->locks);
fprintf(out, " Blocked Access on shared lines : %10d\n", stats->blk_data + stats->blk_addr);
fprintf(out, " Store HITs on shared lines : %10d\n", stats->store);
@@ -2272,13 +2472,22 @@ static void print_pareto(FILE *out)
int ret;
const char *cl_output;
- cl_output = "cl_num,"
- "cl_rmt_hitm,"
- "cl_lcl_hitm,"
- "cl_stores_l1hit,"
- "cl_stores_l1miss,"
- "cl_stores_na,"
- "dcacheline";
+ if (c2c.display != DISPLAY_SNP_PEER)
+ cl_output = "cl_num,"
+ "cl_rmt_hitm,"
+ "cl_lcl_hitm,"
+ "cl_stores_l1hit,"
+ "cl_stores_l1miss,"
+ "cl_stores_na,"
+ "dcacheline";
+ else
+ cl_output = "cl_num,"
+ "cl_rmt_peer,"
+ "cl_lcl_peer,"
+ "cl_stores_l1hit,"
+ "cl_stores_l1miss,"
+ "cl_stores_na,"
+ "dcacheline";
perf_hpp_list__init(&hpp_list);
ret = hpp_list__parse(&hpp_list, cl_output, NULL);
@@ -2314,7 +2523,7 @@ static void print_c2c_info(FILE *out, struct perf_session *session)
fprintf(out, "%-36s: %s\n", first ? " Events" : "", evsel__name(evsel));
first = false;
}
- fprintf(out, " Cachelines sort on : %s HITMs\n",
+ fprintf(out, " Cachelines sort on : %s\n",
display_str[c2c.display]);
fprintf(out, " Cacheline data grouping : %s\n", c2c.cl_sort);
}
@@ -2471,7 +2680,7 @@ static int perf_c2c_browser__title(struct hist_browser *browser,
{
scnprintf(bf, size,
"Shared Data Cache Line Table "
- "(%lu entries, sorted on %s HITMs)",
+ "(%lu entries, sorted on %s)",
browser->nr_non_filtered_entries,
display_str[c2c.display]);
return 0;
@@ -2585,7 +2794,7 @@ static int ui_quirks(void)
nodestr = "CL";
}
- dim_percent_hitm.header = percent_hitm_header[c2c.display];
+ dim_percent_costly_snoop.header = percent_costly_snoop_header[c2c.display];
/* Fix the zero line for dcacheline column. */
buf = fill_line("Cacheline", dim_dcacheline.width +
@@ -2669,14 +2878,16 @@ static int setup_callchain(struct evlist *evlist)
static int setup_display(const char *str)
{
- const char *display = str ?: "tot";
+ const char *display = str;
if (!strcmp(display, "tot"))
- c2c.display = DISPLAY_TOT;
+ c2c.display = DISPLAY_TOT_HITM;
else if (!strcmp(display, "rmt"))
- c2c.display = DISPLAY_RMT;
+ c2c.display = DISPLAY_RMT_HITM;
else if (!strcmp(display, "lcl"))
- c2c.display = DISPLAY_LCL;
+ c2c.display = DISPLAY_LCL_HITM;
+ else if (!strcmp(display, "peer"))
+ c2c.display = DISPLAY_SNP_PEER;
else {
pr_err("failed: unknown display type: %s\n", str);
return -1;
@@ -2723,10 +2934,12 @@ static int build_cl_output(char *cl_sort, bool no_source)
}
if (asprintf(&c2c.cl_output,
- "%s%s%s%s%s%s%s%s%s%s",
+ "%s%s%s%s%s%s%s%s%s%s%s%s",
c2c.use_stdio ? "cl_num_empty," : "",
- "percent_rmt_hitm,"
- "percent_lcl_hitm,"
+ c2c.display == DISPLAY_SNP_PEER ? "percent_rmt_peer,"
+ "percent_lcl_peer," :
+ "percent_rmt_hitm,"
+ "percent_lcl_hitm,",
"percent_stores_l1hit,"
"percent_stores_l1miss,"
"percent_stores_na,"
@@ -2734,8 +2947,10 @@ static int build_cl_output(char *cl_sort, bool no_source)
add_pid ? "pid," : "",
add_tid ? "tid," : "",
add_iaddr ? "iaddr," : "",
- "mean_rmt,"
- "mean_lcl,"
+ c2c.display == DISPLAY_SNP_PEER ? "mean_rmt_peer,"
+ "mean_lcl_peer," :
+ "mean_rmt,"
+ "mean_lcl,",
"mean_load,"
"tot_recs,"
"cpucnt,",
@@ -2756,6 +2971,7 @@ err:
static int setup_coalesce(const char *coalesce, bool no_source)
{
const char *c = coalesce ?: coalesce_default;
+ const char *sort_str = NULL;
if (asprintf(&c2c.cl_sort, "offset,%s", c) < 0)
return -ENOMEM;
@@ -2763,12 +2979,16 @@ static int setup_coalesce(const char *coalesce, bool no_source)
if (build_cl_output(c2c.cl_sort, no_source))
return -1;
- if (asprintf(&c2c.cl_resort, "offset,%s",
- c2c.display == DISPLAY_TOT ?
- "tot_hitm" :
- c2c.display == DISPLAY_RMT ?
- "rmt_hitm,lcl_hitm" :
- "lcl_hitm,rmt_hitm") < 0)
+ if (c2c.display == DISPLAY_TOT_HITM)
+ sort_str = "tot_hitm";
+ else if (c2c.display == DISPLAY_RMT_HITM)
+ sort_str = "rmt_hitm,lcl_hitm";
+ else if (c2c.display == DISPLAY_LCL_HITM)
+ sort_str = "lcl_hitm,rmt_hitm";
+ else if (c2c.display == DISPLAY_SNP_PEER)
+ sort_str = "tot_peer";
+
+ if (asprintf(&c2c.cl_resort, "offset,%s", sort_str) < 0)
return -ENOMEM;
pr_debug("coalesce sort fields: %s\n", c2c.cl_sort);
@@ -2814,7 +3034,7 @@ static int perf_c2c__report(int argc, const char **argv)
"print_type,threshold[,print_limit],order,sort_key[,branch],value",
callchain_help, &parse_callchain_opt,
callchain_default_opt),
- OPT_STRING('d', "display", &display, "Switch HITM output type", "lcl,rmt"),
+ OPT_STRING('d', "display", &display, "Switch HITM output type", "tot,lcl,rmt,peer"),
OPT_STRING('c', "coalesce", &coalesce, "coalesce fields",
"coalesce fields: pid,tid,iaddr,dso"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
@@ -2848,27 +3068,39 @@ static int perf_c2c__report(int argc, const char **argv)
data.path = input_name;
data.force = symbol_conf.force;
+ session = perf_session__new(&data, &c2c.tool);
+ if (IS_ERR(session)) {
+ err = PTR_ERR(session);
+ pr_debug("Error creating perf session\n");
+ goto out;
+ }
+
+ /*
+ * Use the 'tot' as default display type if user doesn't specify it;
+ * since Arm64 platform doesn't support HITMs flag, use 'peer' as the
+ * default display type.
+ */
+ if (!display) {
+ if (!strcmp(perf_env__arch(&session->header.env), "arm64"))
+ display = "peer";
+ else
+ display = "tot";
+ }
+
err = setup_display(display);
if (err)
- goto out;
+ goto out_session;
err = setup_coalesce(coalesce, no_source);
if (err) {
pr_debug("Failed to initialize hists\n");
- goto out;
+ goto out_session;
}
err = c2c_hists__init(&c2c.hists, "dcacheline", 2);
if (err) {
pr_debug("Failed to initialize hists\n");
- goto out;
- }
-
- session = perf_session__new(&data, &c2c.tool);
- if (IS_ERR(session)) {
- err = PTR_ERR(session);
- pr_debug("Error creating perf session\n");
- goto out;
+ goto out_session;
}
session->itrace_synth_opts = &itrace_synth_opts;
@@ -2876,7 +3108,7 @@ static int perf_c2c__report(int argc, const char **argv)
err = setup_nodes(session);
if (err) {
pr_err("Failed setup nodes\n");
- goto out;
+ goto out_session;
}
err = mem2node__init(&c2c.mem2node, &session->header.env);
@@ -2909,27 +3141,45 @@ static int perf_c2c__report(int argc, const char **argv)
goto out_mem2node;
}
- output_str = "cl_idx,"
- "dcacheline,"
- "dcacheline_node,"
- "dcacheline_count,"
- "percent_hitm,"
- "tot_hitm,lcl_hitm,rmt_hitm,"
- "tot_recs,"
- "tot_loads,"
- "tot_stores,"
- "stores_l1hit,stores_l1miss,stores_na,"
- "ld_fbhit,ld_l1hit,ld_l2hit,"
- "ld_lclhit,lcl_hitm,"
- "ld_rmthit,rmt_hitm,"
- "dram_lcl,dram_rmt";
-
- if (c2c.display == DISPLAY_TOT)
+ if (c2c.display != DISPLAY_SNP_PEER)
+ output_str = "cl_idx,"
+ "dcacheline,"
+ "dcacheline_node,"
+ "dcacheline_count,"
+ "percent_costly_snoop,"
+ "tot_hitm,lcl_hitm,rmt_hitm,"
+ "tot_recs,"
+ "tot_loads,"
+ "tot_stores,"
+ "stores_l1hit,stores_l1miss,stores_na,"
+ "ld_fbhit,ld_l1hit,ld_l2hit,"
+ "ld_lclhit,lcl_hitm,"
+ "ld_rmthit,rmt_hitm,"
+ "dram_lcl,dram_rmt";
+ else
+ output_str = "cl_idx,"
+ "dcacheline,"
+ "dcacheline_node,"
+ "dcacheline_count,"
+ "percent_costly_snoop,"
+ "tot_peer,lcl_peer,rmt_peer,"
+ "tot_recs,"
+ "tot_loads,"
+ "tot_stores,"
+ "stores_l1hit,stores_l1miss,stores_na,"
+ "ld_fbhit,ld_l1hit,ld_l2hit,"
+ "ld_lclhit,lcl_hitm,"
+ "ld_rmthit,rmt_hitm,"
+ "dram_lcl,dram_rmt";
+
+ if (c2c.display == DISPLAY_TOT_HITM)
sort_str = "tot_hitm";
- else if (c2c.display == DISPLAY_RMT)
+ else if (c2c.display == DISPLAY_RMT_HITM)
sort_str = "rmt_hitm";
- else if (c2c.display == DISPLAY_LCL)
+ else if (c2c.display == DISPLAY_LCL_HITM)
sort_str = "lcl_hitm";
+ else if (c2c.display == DISPLAY_SNP_PEER)
+ sort_str = "tot_peer";
c2c_hists__reinit(&c2c.hists, output_str, sort_str);
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 54d4e508a092..2a0f992ca0be 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -26,6 +26,7 @@
#include "util/thread.h"
#include "util/namespaces.h"
#include "util/util.h"
+#include "util/tsc.h"
#include <internal/lib.h>
@@ -35,8 +36,70 @@
#include <linux/list.h>
#include <linux/string.h>
+#include <linux/zalloc.h>
+#include <linux/hash.h>
#include <errno.h>
#include <signal.h>
+#include <inttypes.h>
+
+struct guest_event {
+ struct perf_sample sample;
+ union perf_event *event;
+ char event_buf[PERF_SAMPLE_MAX_SIZE];
+};
+
+struct guest_id {
+ /* hlist_node must be first, see free_hlist() */
+ struct hlist_node node;
+ u64 id;
+ u64 host_id;
+ u32 vcpu;
+};
+
+struct guest_tid {
+ /* hlist_node must be first, see free_hlist() */
+ struct hlist_node node;
+ /* Thread ID of QEMU thread */
+ u32 tid;
+ u32 vcpu;
+};
+
+struct guest_vcpu {
+ /* Current host CPU */
+ u32 cpu;
+ /* Thread ID of QEMU thread */
+ u32 tid;
+};
+
+struct guest_session {
+ char *perf_data_file;
+ u32 machine_pid;
+ u64 time_offset;
+ double time_scale;
+ struct perf_tool tool;
+ struct perf_data data;
+ struct perf_session *session;
+ char *tmp_file_name;
+ int tmp_fd;
+ struct perf_tsc_conversion host_tc;
+ struct perf_tsc_conversion guest_tc;
+ bool copy_kcore_dir;
+ bool have_tc;
+ bool fetched;
+ bool ready;
+ u16 dflt_id_hdr_size;
+ u64 dflt_id;
+ u64 highest_id;
+ /* Array of guest_vcpu */
+ struct guest_vcpu *vcpu;
+ size_t vcpu_cnt;
+ /* Hash table for guest_id */
+ struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
+ /* Hash table for guest_tid */
+ struct hlist_head tids[PERF_EVLIST__HLIST_SIZE];
+ /* Place to stash next guest event */
+ struct guest_event ev;
+};
struct perf_inject {
struct perf_tool tool;
@@ -59,6 +122,7 @@ struct perf_inject {
struct itrace_synth_opts itrace_synth_opts;
char event_copy[PERF_SAMPLE_MAX_SIZE];
struct perf_file_section secs[HEADER_FEAT_BITS];
+ struct guest_session guest_session;
};
struct event_entry {
@@ -698,6 +762,841 @@ found:
return perf_event__repipe(tool, event_sw, &sample_sw, machine);
}
+static struct guest_vcpu *guest_session__vcpu(struct guest_session *gs, u32 vcpu)
+{
+ if (realloc_array_as_needed(gs->vcpu, gs->vcpu_cnt, vcpu, NULL))
+ return NULL;
+ return &gs->vcpu[vcpu];
+}
+
+static int guest_session__output_bytes(struct guest_session *gs, void *buf, size_t sz)
+{
+ ssize_t ret = writen(gs->tmp_fd, buf, sz);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int guest_session__repipe(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct guest_session *gs = container_of(tool, struct guest_session, tool);
+
+ return guest_session__output_bytes(gs, event, event->header.size);
+}
+
+static int guest_session__map_tid(struct guest_session *gs, u32 tid, u32 vcpu)
+{
+ struct guest_tid *guest_tid = zalloc(sizeof(*guest_tid));
+ int hash;
+
+ if (!guest_tid)
+ return -ENOMEM;
+
+ guest_tid->tid = tid;
+ guest_tid->vcpu = vcpu;
+ hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
+ hlist_add_head(&guest_tid->node, &gs->tids[hash]);
+
+ return 0;
+}
+
+static int host_peek_vm_comms_cb(struct perf_session *session __maybe_unused,
+ union perf_event *event,
+ u64 offset __maybe_unused, void *data)
+{
+ struct guest_session *gs = data;
+ unsigned int vcpu;
+ struct guest_vcpu *guest_vcpu;
+ int ret;
+
+ if (event->header.type != PERF_RECORD_COMM ||
+ event->comm.pid != gs->machine_pid)
+ return 0;
+
+ /*
+ * QEMU option -name debug-threads=on, causes thread names formatted as
+ * below, although it is not an ABI. Also libvirt seems to use this by
+ * default. Here we rely on it to tell us which thread is which VCPU.
+ */
+ ret = sscanf(event->comm.comm, "CPU %u/KVM", &vcpu);
+ if (ret <= 0)
+ return ret;
+ pr_debug("Found VCPU: tid %u comm %s vcpu %u\n",
+ event->comm.tid, event->comm.comm, vcpu);
+ if (vcpu > INT_MAX) {
+ pr_err("Invalid VCPU %u\n", vcpu);
+ return -EINVAL;
+ }
+ guest_vcpu = guest_session__vcpu(gs, vcpu);
+ if (!guest_vcpu)
+ return -ENOMEM;
+ if (guest_vcpu->tid && guest_vcpu->tid != event->comm.tid) {
+ pr_err("Fatal error: Two threads found with the same VCPU\n");
+ return -EINVAL;
+ }
+ guest_vcpu->tid = event->comm.tid;
+
+ return guest_session__map_tid(gs, event->comm.tid, vcpu);
+}
+
+static int host_peek_vm_comms(struct perf_session *session, struct guest_session *gs)
+{
+ return perf_session__peek_events(session, session->header.data_offset,
+ session->header.data_size,
+ host_peek_vm_comms_cb, gs);
+}
+
+static bool evlist__is_id_used(struct evlist *evlist, u64 id)
+{
+ return evlist__id2sid(evlist, id);
+}
+
+static u64 guest_session__allocate_new_id(struct guest_session *gs, struct evlist *host_evlist)
+{
+ do {
+ gs->highest_id += 1;
+ } while (!gs->highest_id || evlist__is_id_used(host_evlist, gs->highest_id));
+
+ return gs->highest_id;
+}
+
+static int guest_session__map_id(struct guest_session *gs, u64 id, u64 host_id, u32 vcpu)
+{
+ struct guest_id *guest_id = zalloc(sizeof(*guest_id));
+ int hash;
+
+ if (!guest_id)
+ return -ENOMEM;
+
+ guest_id->id = id;
+ guest_id->host_id = host_id;
+ guest_id->vcpu = vcpu;
+ hash = hash_64(guest_id->id, PERF_EVLIST__HLIST_BITS);
+ hlist_add_head(&guest_id->node, &gs->heads[hash]);
+
+ return 0;
+}
+
+static u64 evlist__find_highest_id(struct evlist *evlist)
+{
+ struct evsel *evsel;
+ u64 highest_id = 1;
+
+ evlist__for_each_entry(evlist, evsel) {
+ u32 j;
+
+ for (j = 0; j < evsel->core.ids; j++) {
+ u64 id = evsel->core.id[j];
+
+ if (id > highest_id)
+ highest_id = id;
+ }
+ }
+
+ return highest_id;
+}
+
+static int guest_session__map_ids(struct guest_session *gs, struct evlist *host_evlist)
+{
+ struct evlist *evlist = gs->session->evlist;
+ struct evsel *evsel;
+ int ret;
+
+ evlist__for_each_entry(evlist, evsel) {
+ u32 j;
+
+ for (j = 0; j < evsel->core.ids; j++) {
+ struct perf_sample_id *sid;
+ u64 host_id;
+ u64 id;
+
+ id = evsel->core.id[j];
+ sid = evlist__id2sid(evlist, id);
+ if (!sid || sid->cpu.cpu == -1)
+ continue;
+ host_id = guest_session__allocate_new_id(gs, host_evlist);
+ ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 id)
+{
+ struct hlist_head *head;
+ struct guest_id *guest_id;
+ int hash;
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &gs->heads[hash];
+
+ hlist_for_each_entry(guest_id, head, node)
+ if (guest_id->id == id)
+ return guest_id;
+
+ return NULL;
+}
+
+static int process_attr(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+
+ return perf_event__process_attr(tool, event, &inject->session->evlist);
+}
+
+static int guest_session__add_attr(struct guest_session *gs, struct evsel *evsel)
+{
+ struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
+ struct perf_event_attr attr = evsel->core.attr;
+ u64 *id_array;
+ u32 *vcpu_array;
+ int ret = -ENOMEM;
+ u32 i;
+
+ id_array = calloc(evsel->core.ids, sizeof(*id_array));
+ if (!id_array)
+ return -ENOMEM;
+
+ vcpu_array = calloc(evsel->core.ids, sizeof(*vcpu_array));
+ if (!vcpu_array)
+ goto out;
+
+ for (i = 0; i < evsel->core.ids; i++) {
+ u64 id = evsel->core.id[i];
+ struct guest_id *guest_id = guest_session__lookup_id(gs, id);
+
+ if (!guest_id) {
+ pr_err("Failed to find guest id %"PRIu64"\n", id);
+ ret = -EINVAL;
+ goto out;
+ }
+ id_array[i] = guest_id->host_id;
+ vcpu_array[i] = guest_id->vcpu;
+ }
+
+ attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
+ attr.exclude_host = 1;
+ attr.exclude_guest = 0;
+
+ ret = perf_event__synthesize_attr(&inject->tool, &attr, evsel->core.ids,
+ id_array, process_attr);
+ if (ret)
+ pr_err("Failed to add guest attr.\n");
+
+ for (i = 0; i < evsel->core.ids; i++) {
+ struct perf_sample_id *sid;
+ u32 vcpu = vcpu_array[i];
+
+ sid = evlist__id2sid(inject->session->evlist, id_array[i]);
+ /* Guest event is per-thread from the host point of view */
+ sid->cpu.cpu = -1;
+ sid->tid = gs->vcpu[vcpu].tid;
+ sid->machine_pid = gs->machine_pid;
+ sid->vcpu.cpu = vcpu;
+ }
+out:
+ free(vcpu_array);
+ free(id_array);
+ return ret;
+}
+
+static int guest_session__add_attrs(struct guest_session *gs)
+{
+ struct evlist *evlist = gs->session->evlist;
+ struct evsel *evsel;
+ int ret;
+
+ evlist__for_each_entry(evlist, evsel) {
+ ret = guest_session__add_attr(gs, evsel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int synthesize_id_index(struct perf_inject *inject, size_t new_cnt)
+{
+ struct perf_session *session = inject->session;
+ struct evlist *evlist = session->evlist;
+ struct machine *machine = &session->machines.host;
+ size_t from = evlist->core.nr_entries - new_cnt;
+
+ return __perf_event__synthesize_id_index(&inject->tool, perf_event__repipe,
+ evlist, machine, from);
+}
+
+static struct guest_tid *guest_session__lookup_tid(struct guest_session *gs, u32 tid)
+{
+ struct hlist_head *head;
+ struct guest_tid *guest_tid;
+ int hash;
+
+ hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
+ head = &gs->tids[hash];
+
+ hlist_for_each_entry(guest_tid, head, node)
+ if (guest_tid->tid == tid)
+ return guest_tid;
+
+ return NULL;
+}
+
+static bool dso__is_in_kernel_space(struct dso *dso)
+{
+ if (dso__is_vdso(dso))
+ return false;
+
+ return dso__is_kcore(dso) ||
+ dso->kernel ||
+ is_kernel_module(dso->long_name, PERF_RECORD_MISC_CPUMODE_UNKNOWN);
+}
+
+static u64 evlist__first_id(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.ids)
+ return evsel->core.id[0];
+ }
+ return 0;
+}
+
+static int process_build_id(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+
+ return perf_event__process_build_id(inject->session, event);
+}
+
+static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
+{
+ struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
+ u8 cpumode = dso__is_in_kernel_space(dso) ?
+ PERF_RECORD_MISC_GUEST_KERNEL :
+ PERF_RECORD_MISC_GUEST_USER;
+
+ if (!machine)
+ return -ENOMEM;
+
+ dso->hit = 1;
+
+ return perf_event__synthesize_build_id(&inject->tool, dso, cpumode,
+ process_build_id, machine);
+}
+
+static int guest_session__add_build_ids(struct guest_session *gs)
+{
+ struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
+ struct machine *machine = &gs->session->machines.host;
+ struct dso *dso;
+ int ret;
+
+ /* Build IDs will be put in the Build ID feature section */
+ perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID);
+
+ dsos__for_each_with_build_id(dso, &machine->dsos.head) {
+ ret = synthesize_build_id(inject, dso, gs->machine_pid);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int guest_session__ksymbol_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct guest_session *gs = container_of(tool, struct guest_session, tool);
+
+ /* Only support out-of-line i.e. no BPF support */
+ if (event->ksymbol.ksym_type != PERF_RECORD_KSYMBOL_TYPE_OOL)
+ return 0;
+
+ return guest_session__output_bytes(gs, event, event->header.size);
+}
+
+static int guest_session__start(struct guest_session *gs, const char *name, bool force)
+{
+ char tmp_file_name[] = "/tmp/perf-inject-guest_session-XXXXXX";
+ struct perf_session *session;
+ int ret;
+
+ /* Only these events will be injected */
+ gs->tool.mmap = guest_session__repipe;
+ gs->tool.mmap2 = guest_session__repipe;
+ gs->tool.comm = guest_session__repipe;
+ gs->tool.fork = guest_session__repipe;
+ gs->tool.exit = guest_session__repipe;
+ gs->tool.lost = guest_session__repipe;
+ gs->tool.context_switch = guest_session__repipe;
+ gs->tool.ksymbol = guest_session__ksymbol_event;
+ gs->tool.text_poke = guest_session__repipe;
+ /*
+ * Processing a build ID creates a struct dso with that build ID. Later,
+ * all guest dsos are iterated and the build IDs processed into the host
+ * session where they will be output to the Build ID feature section
+ * when the perf.data file header is written.
+ */
+ gs->tool.build_id = perf_event__process_build_id;
+ /* Process the id index to know what VCPU an ID belongs to */
+ gs->tool.id_index = perf_event__process_id_index;
+
+ gs->tool.ordered_events = true;
+ gs->tool.ordering_requires_timestamps = true;
+
+ gs->data.path = name;
+ gs->data.force = force;
+ gs->data.mode = PERF_DATA_MODE_READ;
+
+ session = perf_session__new(&gs->data, &gs->tool);
+ if (IS_ERR(session))
+ return PTR_ERR(session);
+ gs->session = session;
+
+ /*
+ * Initial events have zero'd ID samples. Get default ID sample size
+ * used for removing them.
+ */
+ gs->dflt_id_hdr_size = session->machines.host.id_hdr_size;
+ /* And default ID for adding back a host-compatible ID sample */
+ gs->dflt_id = evlist__first_id(session->evlist);
+ if (!gs->dflt_id) {
+ pr_err("Guest data has no sample IDs");
+ return -EINVAL;
+ }
+
+ /* Temporary file for guest events */
+ gs->tmp_file_name = strdup(tmp_file_name);
+ if (!gs->tmp_file_name)
+ return -ENOMEM;
+ gs->tmp_fd = mkstemp(gs->tmp_file_name);
+ if (gs->tmp_fd < 0)
+ return -errno;
+
+ if (zstd_init(&gs->session->zstd_data, 0) < 0)
+ pr_warning("Guest session decompression initialization failed.\n");
+
+ /*
+ * perf does not support processing 2 sessions simultaneously, so output
+ * guest events to a temporary file.
+ */
+ ret = perf_session__process_events(gs->session);
+ if (ret)
+ return ret;
+
+ if (lseek(gs->tmp_fd, 0, SEEK_SET))
+ return -errno;
+
+ return 0;
+}
+
+/* Free hlist nodes assuming hlist_node is the first member of hlist entries */
+static void free_hlist(struct hlist_head *heads, size_t hlist_sz)
+{
+ struct hlist_node *pos, *n;
+ size_t i;
+
+ for (i = 0; i < hlist_sz; ++i) {
+ hlist_for_each_safe(pos, n, &heads[i]) {
+ hlist_del(pos);
+ free(pos);
+ }
+ }
+}
+
+static void guest_session__exit(struct guest_session *gs)
+{
+ if (gs->session) {
+ perf_session__delete(gs->session);
+ free_hlist(gs->heads, PERF_EVLIST__HLIST_SIZE);
+ free_hlist(gs->tids, PERF_EVLIST__HLIST_SIZE);
+ }
+ if (gs->tmp_file_name) {
+ if (gs->tmp_fd >= 0)
+ close(gs->tmp_fd);
+ unlink(gs->tmp_file_name);
+ free(gs->tmp_file_name);
+ }
+ free(gs->vcpu);
+ free(gs->perf_data_file);
+}
+
+static void get_tsc_conv(struct perf_tsc_conversion *tc, struct perf_record_time_conv *time_conv)
+{
+ tc->time_shift = time_conv->time_shift;
+ tc->time_mult = time_conv->time_mult;
+ tc->time_zero = time_conv->time_zero;
+ tc->time_cycles = time_conv->time_cycles;
+ tc->time_mask = time_conv->time_mask;
+ tc->cap_user_time_zero = time_conv->cap_user_time_zero;
+ tc->cap_user_time_short = time_conv->cap_user_time_short;
+}
+
+static void guest_session__get_tc(struct guest_session *gs)
+{
+ struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
+
+ get_tsc_conv(&gs->host_tc, &inject->session->time_conv);
+ get_tsc_conv(&gs->guest_tc, &gs->session->time_conv);
+}
+
+static void guest_session__convert_time(struct guest_session *gs, u64 guest_time, u64 *host_time)
+{
+ u64 tsc;
+
+ if (!guest_time) {
+ *host_time = 0;
+ return;
+ }
+
+ if (gs->guest_tc.cap_user_time_zero)
+ tsc = perf_time_to_tsc(guest_time, &gs->guest_tc);
+ else
+ tsc = guest_time;
+
+ /*
+ * This is the correct order of operations for x86 if the TSC Offset and
+ * Multiplier values are used.
+ */
+ tsc -= gs->time_offset;
+ tsc /= gs->time_scale;
+
+ if (gs->host_tc.cap_user_time_zero)
+ *host_time = tsc_to_perf_time(tsc, &gs->host_tc);
+ else
+ *host_time = tsc;
+}
+
+static int guest_session__fetch(struct guest_session *gs)
+{
+ void *buf = gs->ev.event_buf;
+ struct perf_event_header *hdr = buf;
+ size_t hdr_sz = sizeof(*hdr);
+ ssize_t ret;
+
+ ret = readn(gs->tmp_fd, buf, hdr_sz);
+ if (ret < 0)
+ return ret;
+
+ if (!ret) {
+ /* Zero size means EOF */
+ hdr->size = 0;
+ return 0;
+ }
+
+ buf += hdr_sz;
+
+ ret = readn(gs->tmp_fd, buf, hdr->size - hdr_sz);
+ if (ret < 0)
+ return ret;
+
+ gs->ev.event = (union perf_event *)gs->ev.event_buf;
+ gs->ev.sample.time = 0;
+
+ if (hdr->type >= PERF_RECORD_USER_TYPE_START) {
+ pr_err("Unexpected type fetching guest event");
+ return 0;
+ }
+
+ ret = evlist__parse_sample(gs->session->evlist, gs->ev.event, &gs->ev.sample);
+ if (ret) {
+ pr_err("Parse failed fetching guest event");
+ return ret;
+ }
+
+ if (!gs->have_tc) {
+ guest_session__get_tc(gs);
+ gs->have_tc = true;
+ }
+
+ guest_session__convert_time(gs, gs->ev.sample.time, &gs->ev.sample.time);
+
+ return 0;
+}
+
+static int evlist__append_id_sample(struct evlist *evlist, union perf_event *ev,
+ const struct perf_sample *sample)
+{
+ struct evsel *evsel;
+ void *array;
+ int ret;
+
+ evsel = evlist__id2evsel(evlist, sample->id);
+ array = ev;
+
+ if (!evsel) {
+ pr_err("No evsel for id %"PRIu64"\n", sample->id);
+ return -EINVAL;
+ }
+
+ array += ev->header.size;
+ ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
+ if (ret < 0)
+ return ret;
+
+ if (ret & 7) {
+ pr_err("Bad id sample size %d\n", ret);
+ return -EINVAL;
+ }
+
+ ev->header.size += ret;
+
+ return 0;
+}
+
+static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
+{
+ struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
+ int ret;
+
+ if (!gs->ready)
+ return 0;
+
+ while (1) {
+ struct perf_sample *sample;
+ struct guest_id *guest_id;
+ union perf_event *ev;
+ u16 id_hdr_size;
+ u8 cpumode;
+ u64 id;
+
+ if (!gs->fetched) {
+ ret = guest_session__fetch(gs);
+ if (ret)
+ return ret;
+ gs->fetched = true;
+ }
+
+ ev = gs->ev.event;
+ sample = &gs->ev.sample;
+
+ if (!ev->header.size)
+ return 0; /* EOF */
+
+ if (sample->time > timestamp)
+ return 0;
+
+ /* Change cpumode to guest */
+ cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ if (cpumode & PERF_RECORD_MISC_USER)
+ cpumode = PERF_RECORD_MISC_GUEST_USER;
+ else
+ cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
+ ev->header.misc &= ~PERF_RECORD_MISC_CPUMODE_MASK;
+ ev->header.misc |= cpumode;
+
+ id = sample->id;
+ if (!id) {
+ id = gs->dflt_id;
+ id_hdr_size = gs->dflt_id_hdr_size;
+ } else {
+ struct evsel *evsel = evlist__id2evsel(gs->session->evlist, id);
+
+ id_hdr_size = evsel__id_hdr_size(evsel);
+ }
+
+ if (id_hdr_size & 7) {
+ pr_err("Bad id_hdr_size %u\n", id_hdr_size);
+ return -EINVAL;
+ }
+
+ if (ev->header.size & 7) {
+ pr_err("Bad event size %u\n", ev->header.size);
+ return -EINVAL;
+ }
+
+ /* Remove guest id sample */
+ ev->header.size -= id_hdr_size;
+
+ if (ev->header.size & 7) {
+ pr_err("Bad raw event size %u\n", ev->header.size);
+ return -EINVAL;
+ }
+
+ guest_id = guest_session__lookup_id(gs, id);
+ if (!guest_id) {
+ pr_err("Guest event with unknown id %llu\n",
+ (unsigned long long)id);
+ return -EINVAL;
+ }
+
+ /* Change to host ID to avoid conflicting ID values */
+ sample->id = guest_id->host_id;
+ sample->stream_id = guest_id->host_id;
+
+ if (sample->cpu != (u32)-1) {
+ if (sample->cpu >= gs->vcpu_cnt) {
+ pr_err("Guest event with unknown VCPU %u\n",
+ sample->cpu);
+ return -EINVAL;
+ }
+ /* Change to host CPU instead of guest VCPU */
+ sample->cpu = gs->vcpu[sample->cpu].cpu;
+ }
+
+ /* New id sample with new ID and CPU */
+ ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
+ if (ret)
+ return ret;
+
+ if (ev->header.size & 7) {
+ pr_err("Bad new event size %u\n", ev->header.size);
+ return -EINVAL;
+ }
+
+ gs->fetched = false;
+
+ ret = output_bytes(inject, ev, ev->header.size);
+ if (ret)
+ return ret;
+ }
+}
+
+static int guest_session__flush_events(struct guest_session *gs)
+{
+ return guest_session__inject_events(gs, -1);
+}
+
+static int host__repipe(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+ int ret;
+
+ ret = guest_session__inject_events(&inject->guest_session, sample->time);
+ if (ret)
+ return ret;
+
+ return perf_event__repipe(tool, event, sample, machine);
+}
+
+static int host__finished_init(struct perf_session *session, union perf_event *event)
+{
+ struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
+ struct guest_session *gs = &inject->guest_session;
+ int ret;
+
+ /*
+ * Peek through host COMM events to find QEMU threads and the VCPU they
+ * are running.
+ */
+ ret = host_peek_vm_comms(session, gs);
+ if (ret)
+ return ret;
+
+ if (!gs->vcpu_cnt) {
+ pr_err("No VCPU threads found for pid %u\n", gs->machine_pid);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate new (unused) host sample IDs and map them to the guest IDs.
+ */
+ gs->highest_id = evlist__find_highest_id(session->evlist);
+ ret = guest_session__map_ids(gs, session->evlist);
+ if (ret)
+ return ret;
+
+ ret = guest_session__add_attrs(gs);
+ if (ret)
+ return ret;
+
+ ret = synthesize_id_index(inject, gs->session->evlist->core.nr_entries);
+ if (ret) {
+ pr_err("Failed to synthesize id_index\n");
+ return ret;
+ }
+
+ ret = guest_session__add_build_ids(gs);
+ if (ret) {
+ pr_err("Failed to add guest build IDs\n");
+ return ret;
+ }
+
+ gs->ready = true;
+
+ ret = guest_session__inject_events(gs, 0);
+ if (ret)
+ return ret;
+
+ return perf_event__repipe_op2_synth(session, event);
+}
+
+/*
+ * Obey finished-round ordering. The FINISHED_ROUND event is first processed
+ * which flushes host events to file up until the last flush time. Then inject
+ * guest events up to the same time. Finally write out the FINISHED_ROUND event
+ * itself.
+ */
+static int host__finished_round(struct perf_tool *tool,
+ union perf_event *event,
+ struct ordered_events *oe)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+ int ret = perf_event__process_finished_round(tool, event, oe);
+ u64 timestamp = ordered_events__last_flush_time(oe);
+
+ if (ret)
+ return ret;
+
+ ret = guest_session__inject_events(&inject->guest_session, timestamp);
+ if (ret)
+ return ret;
+
+ return perf_event__repipe_oe_synth(tool, event, oe);
+}
+
+static int host__context_switch(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+ bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
+ struct guest_session *gs = &inject->guest_session;
+ u32 pid = event->context_switch.next_prev_pid;
+ u32 tid = event->context_switch.next_prev_tid;
+ struct guest_tid *guest_tid;
+ u32 vcpu;
+
+ if (out || pid != gs->machine_pid)
+ goto out;
+
+ guest_tid = guest_session__lookup_tid(gs, tid);
+ if (!guest_tid)
+ goto out;
+
+ if (sample->cpu == (u32)-1) {
+ pr_err("Switch event does not have CPU\n");
+ return -EINVAL;
+ }
+
+ vcpu = guest_tid->vcpu;
+ if (vcpu >= gs->vcpu_cnt)
+ return -EINVAL;
+
+ /* Guest is switching in, record which CPU the VCPU is now running on */
+ gs->vcpu[vcpu].cpu = sample->cpu;
+out:
+ return host__repipe(tool, event, sample, machine);
+}
+
static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
@@ -767,6 +1666,61 @@ static int parse_vm_time_correlation(const struct option *opt, const char *str,
return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
}
+static int parse_guest_data(const struct option *opt, const char *str, int unset)
+{
+ struct perf_inject *inject = opt->value;
+ struct guest_session *gs = &inject->guest_session;
+ char *tok;
+ char *s;
+
+ if (unset)
+ return 0;
+
+ if (!str)
+ goto bad_args;
+
+ s = strdup(str);
+ if (!s)
+ return -ENOMEM;
+
+ gs->perf_data_file = strsep(&s, ",");
+ if (!gs->perf_data_file)
+ goto bad_args;
+
+ gs->copy_kcore_dir = has_kcore_dir(gs->perf_data_file);
+ if (gs->copy_kcore_dir)
+ inject->output.is_dir = true;
+
+ tok = strsep(&s, ",");
+ if (!tok)
+ goto bad_args;
+ gs->machine_pid = strtoul(tok, NULL, 0);
+ if (!inject->guest_session.machine_pid)
+ goto bad_args;
+
+ gs->time_scale = 1;
+
+ tok = strsep(&s, ",");
+ if (!tok)
+ goto out;
+ gs->time_offset = strtoull(tok, NULL, 0);
+
+ tok = strsep(&s, ",");
+ if (!tok)
+ goto out;
+ gs->time_scale = strtod(tok, NULL);
+ if (!gs->time_scale)
+ goto bad_args;
+out:
+ return 0;
+
+bad_args:
+ pr_err("--guest-data option requires guest perf.data file name, "
+ "guest machine PID, and optionally guest timestamp offset, "
+ "and guest timestamp scale factor, separated by commas.\n");
+ return -1;
+}
+
static int save_section_info_cb(struct perf_file_section *section,
struct perf_header *ph __maybe_unused,
int feat, int fd __maybe_unused, void *data)
@@ -809,7 +1763,7 @@ static bool keep_feat(int feat)
case HEADER_CPU_PMU_CAPS:
case HEADER_CLOCK_DATA:
case HEADER_HYBRID_TOPOLOGY:
- case HEADER_HYBRID_CPU_PMU_CAPS:
+ case HEADER_PMU_CAPS:
return true;
/* Information that can be updated */
case HEADER_BUILD_ID:
@@ -896,6 +1850,22 @@ static int copy_kcore_dir(struct perf_inject *inject)
return ret;
}
+static int guest_session__copy_kcore_dir(struct guest_session *gs)
+{
+ struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
+ char *cmd;
+ int ret;
+
+ ret = asprintf(&cmd, "cp -r -n %s/kcore_dir %s/kcore_dir__%u >/dev/null 2>&1",
+ gs->perf_data_file, inject->output.path, gs->machine_pid);
+ if (ret < 0)
+ return ret;
+ pr_debug("%s\n", cmd);
+ ret = system(cmd);
+ free(cmd);
+ return ret;
+}
+
static int output_fd(struct perf_inject *inject)
{
return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
@@ -904,6 +1874,7 @@ static int output_fd(struct perf_inject *inject)
static int __cmd_inject(struct perf_inject *inject)
{
int ret = -EINVAL;
+ struct guest_session *gs = &inject->guest_session;
struct perf_session *session = inject->session;
int fd = output_fd(inject);
u64 output_data_offset;
@@ -968,6 +1939,47 @@ static int __cmd_inject(struct perf_inject *inject)
output_data_offset = roundup(8192 + session->header.data_offset, 4096);
if (inject->strip)
strip_init(inject);
+ } else if (gs->perf_data_file) {
+ char *name = gs->perf_data_file;
+
+ /*
+ * Not strictly necessary, but keep these events in order wrt
+ * guest events.
+ */
+ inject->tool.mmap = host__repipe;
+ inject->tool.mmap2 = host__repipe;
+ inject->tool.comm = host__repipe;
+ inject->tool.fork = host__repipe;
+ inject->tool.exit = host__repipe;
+ inject->tool.lost = host__repipe;
+ inject->tool.context_switch = host__repipe;
+ inject->tool.ksymbol = host__repipe;
+ inject->tool.text_poke = host__repipe;
+ /*
+ * Once the host session has initialized, set up sample ID
+ * mapping and feed in guest attrs, build IDs and initial
+ * events.
+ */
+ inject->tool.finished_init = host__finished_init;
+ /* Obey finished round ordering */
+ inject->tool.finished_round = host__finished_round,
+ /* Keep track of which CPU a VCPU is runnng on */
+ inject->tool.context_switch = host__context_switch;
+ /*
+ * Must order events to be able to obey finished round
+ * ordering.
+ */
+ inject->tool.ordered_events = true;
+ inject->tool.ordering_requires_timestamps = true;
+ /* Set up a separate session to process guest perf.data file */
+ ret = guest_session__start(gs, name, session->data->force);
+ if (ret) {
+ pr_err("Failed to process %s, error %d\n", name, ret);
+ return ret;
+ }
+ /* Allow space in the header for guest attributes */
+ output_data_offset += gs->session->header.data_offset;
+ output_data_offset = roundup(output_data_offset, 4096);
}
if (!inject->itrace_synth_opts.set)
@@ -980,6 +1992,18 @@ static int __cmd_inject(struct perf_inject *inject)
if (ret)
return ret;
+ if (gs->session) {
+ /*
+ * Remaining guest events have later timestamps. Flush them
+ * out to file.
+ */
+ ret = guest_session__flush_events(gs);
+ if (ret) {
+ pr_err("Failed to flush guest events\n");
+ return ret;
+ }
+ }
+
if (!inject->is_pipe && !inject->in_place_update) {
struct inject_fc inj_fc = {
.fc.copy = feat_copy_cb,
@@ -1014,8 +2038,17 @@ static int __cmd_inject(struct perf_inject *inject)
if (inject->copy_kcore_dir) {
ret = copy_kcore_dir(inject);
- if (ret)
+ if (ret) {
+ pr_err("Failed to copy kcore\n");
return ret;
+ }
+ }
+ if (gs->copy_kcore_dir) {
+ ret = guest_session__copy_kcore_dir(gs);
+ if (ret) {
+ pr_err("Failed to copy guest kcore\n");
+ return ret;
+ }
}
}
@@ -1061,6 +2094,7 @@ int cmd_inject(int argc, const char **argv)
.stat = perf_event__repipe_op2_synth,
.stat_round = perf_event__repipe_op2_synth,
.feature = perf_event__repipe_op2_synth,
+ .finished_init = perf_event__repipe_op2_synth,
.compressed = perf_event__repipe_op4_synth,
.auxtrace = perf_event__repipe_auxtrace,
},
@@ -1112,6 +2146,12 @@ int cmd_inject(int argc, const char **argv)
OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
"correlate time between VM guests and the host",
parse_vm_time_correlation),
+ OPT_CALLBACK_OPTARG(0, "guest-data", &inject, NULL, "opts",
+ "inject events from a guest perf.data file",
+ parse_guest_data),
+ OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
+ "guest mount directory under which every guest os"
+ " instance has a subdir"),
OPT_END()
};
const char * const inject_usage[] = {
@@ -1242,6 +2282,8 @@ int cmd_inject(int argc, const char **argv)
ret = __cmd_inject(&inject);
+ guest_session__exit(&inject.guest_session);
+
out_delete:
zstd_fini(&(inject.session->zstd_data));
perf_session__delete(inject.session);
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 3696ae97f149..7d9ec1bac1a2 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1638,14 +1638,14 @@ int cmd_kvm(int argc, const char **argv)
return __cmd_record(file_name, argc, argv);
else if (strlen(argv[0]) > 2 && strstarts("report", argv[0]))
return __cmd_report(file_name, argc, argv);
- else if (!strncmp(argv[0], "diff", 4))
+ else if (strlen(argv[0]) > 2 && strstarts("diff", argv[0]))
return cmd_diff(argc, argv);
- else if (!strncmp(argv[0], "top", 3))
+ else if (!strcmp(argv[0], "top"))
return cmd_top(argc, argv);
- else if (!strncmp(argv[0], "buildid-list", 12))
+ else if (strlen(argv[0]) > 2 && strstarts("buildid-list", argv[0]))
return __cmd_buildid_list(file_name, argc, argv);
#ifdef HAVE_KVM_STAT_SUPPORT
- else if (!strncmp(argv[0], "stat", 4))
+ else if (strlen(argv[0]) > 2 && strstarts("stat", argv[0]))
return kvm_cmd_stat(file_name, argc, argv);
#endif
else
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
new file mode 100644
index 000000000000..fb8c63656ad8
--- /dev/null
+++ b/tools/perf/builtin-kwork.c
@@ -0,0 +1,1832 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * builtin-kwork.c
+ *
+ * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com>
+ */
+
+#include "builtin.h"
+
+#include "util/data.h"
+#include "util/kwork.h"
+#include "util/debug.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/string2.h"
+#include "util/callchain.h"
+#include "util/evsel_fprintf.h"
+
+#include <subcmd/pager.h>
+#include <subcmd/parse-options.h>
+
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/err.h>
+#include <linux/time64.h>
+#include <linux/zalloc.h>
+
+/*
+ * report header elements width
+ */
+#define PRINT_CPU_WIDTH 4
+#define PRINT_COUNT_WIDTH 9
+#define PRINT_RUNTIME_WIDTH 10
+#define PRINT_LATENCY_WIDTH 10
+#define PRINT_TIMESTAMP_WIDTH 17
+#define PRINT_KWORK_NAME_WIDTH 30
+#define RPINT_DECIMAL_WIDTH 3
+#define PRINT_BRACKETPAIR_WIDTH 2
+#define PRINT_TIME_UNIT_SEC_WIDTH 2
+#define PRINT_TIME_UNIT_MESC_WIDTH 3
+#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
+#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
+#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
+#define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
+
+struct sort_dimension {
+ const char *name;
+ int (*cmp)(struct kwork_work *l, struct kwork_work *r);
+ struct list_head list;
+};
+
+static int id_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->cpu > r->cpu)
+ return 1;
+ if (l->cpu < r->cpu)
+ return -1;
+
+ if (l->id > r->id)
+ return 1;
+ if (l->id < r->id)
+ return -1;
+
+ return 0;
+}
+
+static int count_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->nr_atoms > r->nr_atoms)
+ return 1;
+ if (l->nr_atoms < r->nr_atoms)
+ return -1;
+
+ return 0;
+}
+
+static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->total_runtime > r->total_runtime)
+ return 1;
+ if (l->total_runtime < r->total_runtime)
+ return -1;
+
+ return 0;
+}
+
+static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->max_runtime > r->max_runtime)
+ return 1;
+ if (l->max_runtime < r->max_runtime)
+ return -1;
+
+ return 0;
+}
+
+static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ u64 avgl, avgr;
+
+ if (!r->nr_atoms)
+ return 1;
+ if (!l->nr_atoms)
+ return -1;
+
+ avgl = l->total_latency / l->nr_atoms;
+ avgr = r->total_latency / r->nr_atoms;
+
+ if (avgl > avgr)
+ return 1;
+ if (avgl < avgr)
+ return -1;
+
+ return 0;
+}
+
+static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->max_latency > r->max_latency)
+ return 1;
+ if (l->max_latency < r->max_latency)
+ return -1;
+
+ return 0;
+}
+
+static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
+ const char *tok, struct list_head *list)
+{
+ size_t i;
+ static struct sort_dimension max_sort_dimension = {
+ .name = "max",
+ .cmp = max_runtime_cmp,
+ };
+ static struct sort_dimension id_sort_dimension = {
+ .name = "id",
+ .cmp = id_cmp,
+ };
+ static struct sort_dimension runtime_sort_dimension = {
+ .name = "runtime",
+ .cmp = runtime_cmp,
+ };
+ static struct sort_dimension count_sort_dimension = {
+ .name = "count",
+ .cmp = count_cmp,
+ };
+ static struct sort_dimension avg_sort_dimension = {
+ .name = "avg",
+ .cmp = avg_latency_cmp,
+ };
+ struct sort_dimension *available_sorts[] = {
+ &id_sort_dimension,
+ &max_sort_dimension,
+ &count_sort_dimension,
+ &runtime_sort_dimension,
+ &avg_sort_dimension,
+ };
+
+ if (kwork->report == KWORK_REPORT_LATENCY)
+ max_sort_dimension.cmp = max_latency_cmp;
+
+ for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
+ if (!strcmp(available_sorts[i]->name, tok)) {
+ list_add_tail(&available_sorts[i]->list, list);
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static void setup_sorting(struct perf_kwork *kwork,
+ const struct option *options,
+ const char * const usage_msg[])
+{
+ char *tmp, *tok, *str = strdup(kwork->sort_order);
+
+ for (tok = strtok_r(str, ", ", &tmp);
+ tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
+ usage_with_options_msg(usage_msg, options,
+ "Unknown --sort key: `%s'", tok);
+ }
+
+ pr_debug("Sort order: %s\n", kwork->sort_order);
+ free(str);
+}
+
+static struct kwork_atom *atom_new(struct perf_kwork *kwork,
+ struct perf_sample *sample)
+{
+ unsigned long i;
+ struct kwork_atom_page *page;
+ struct kwork_atom *atom = NULL;
+
+ list_for_each_entry(page, &kwork->atom_page_list, list) {
+ if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
+ i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
+ BUG_ON(i >= NR_ATOM_PER_PAGE);
+ atom = &page->atoms[i];
+ goto found_atom;
+ }
+ }
+
+ /*
+ * new page
+ */
+ page = zalloc(sizeof(*page));
+ if (page == NULL) {
+ pr_err("Failed to zalloc kwork atom page\n");
+ return NULL;
+ }
+
+ i = 0;
+ atom = &page->atoms[0];
+ list_add_tail(&page->list, &kwork->atom_page_list);
+
+found_atom:
+ set_bit(i, page->bitmap);
+ atom->time = sample->time;
+ atom->prev = NULL;
+ atom->page_addr = page;
+ atom->bit_inpage = i;
+ return atom;
+}
+
+static void atom_free(struct kwork_atom *atom)
+{
+ if (atom->prev != NULL)
+ atom_free(atom->prev);
+
+ clear_bit(atom->bit_inpage,
+ ((struct kwork_atom_page *)atom->page_addr)->bitmap);
+}
+
+static void atom_del(struct kwork_atom *atom)
+{
+ list_del(&atom->list);
+ atom_free(atom);
+}
+
+static int work_cmp(struct list_head *list,
+ struct kwork_work *l, struct kwork_work *r)
+{
+ int ret = 0;
+ struct sort_dimension *sort;
+
+ BUG_ON(list_empty(list));
+
+ list_for_each_entry(sort, list, list) {
+ ret = sort->cmp(l, r);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct kwork_work *work_search(struct rb_root_cached *root,
+ struct kwork_work *key,
+ struct list_head *sort_list)
+{
+ int cmp;
+ struct kwork_work *work;
+ struct rb_node *node = root->rb_root.rb_node;
+
+ while (node) {
+ work = container_of(node, struct kwork_work, node);
+ cmp = work_cmp(sort_list, key, work);
+ if (cmp > 0)
+ node = node->rb_left;
+ else if (cmp < 0)
+ node = node->rb_right;
+ else {
+ if (work->name == NULL)
+ work->name = key->name;
+ return work;
+ }
+ }
+ return NULL;
+}
+
+static void work_insert(struct rb_root_cached *root,
+ struct kwork_work *key, struct list_head *sort_list)
+{
+ int cmp;
+ bool leftmost = true;
+ struct kwork_work *cur;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
+
+ while (*new) {
+ cur = container_of(*new, struct kwork_work, node);
+ parent = *new;
+ cmp = work_cmp(sort_list, key, cur);
+
+ if (cmp > 0)
+ new = &((*new)->rb_left);
+ else {
+ new = &((*new)->rb_right);
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(&key->node, parent, new);
+ rb_insert_color_cached(&key->node, root, leftmost);
+}
+
+static struct kwork_work *work_new(struct kwork_work *key)
+{
+ int i;
+ struct kwork_work *work = zalloc(sizeof(*work));
+
+ if (work == NULL) {
+ pr_err("Failed to zalloc kwork work\n");
+ return NULL;
+ }
+
+ for (i = 0; i < KWORK_TRACE_MAX; i++)
+ INIT_LIST_HEAD(&work->atom_list[i]);
+
+ work->id = key->id;
+ work->cpu = key->cpu;
+ work->name = key->name;
+ work->class = key->class;
+ return work;
+}
+
+static struct kwork_work *work_findnew(struct rb_root_cached *root,
+ struct kwork_work *key,
+ struct list_head *sort_list)
+{
+ struct kwork_work *work = work_search(root, key, sort_list);
+
+ if (work != NULL)
+ return work;
+
+ work = work_new(key);
+ if (work)
+ work_insert(root, work, sort_list);
+
+ return work;
+}
+
+static void profile_update_timespan(struct perf_kwork *kwork,
+ struct perf_sample *sample)
+{
+ if (!kwork->summary)
+ return;
+
+ if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
+ kwork->timestart = sample->time;
+
+ if (kwork->timeend < sample->time)
+ kwork->timeend = sample->time;
+}
+
+static bool profile_event_match(struct perf_kwork *kwork,
+ struct kwork_work *work,
+ struct perf_sample *sample)
+{
+ int cpu = work->cpu;
+ u64 time = sample->time;
+ struct perf_time_interval *ptime = &kwork->ptime;
+
+ if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
+ return false;
+
+ if (((ptime->start != 0) && (ptime->start > time)) ||
+ ((ptime->end != 0) && (ptime->end < time)))
+ return false;
+
+ if ((kwork->profile_name != NULL) &&
+ (work->name != NULL) &&
+ (strcmp(work->name, kwork->profile_name) != 0))
+ return false;
+
+ profile_update_timespan(kwork, sample);
+ return true;
+}
+
+static int work_push_atom(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ enum kwork_trace_type src_type,
+ enum kwork_trace_type dst_type,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct kwork_work **ret_work)
+{
+ struct kwork_atom *atom, *dst_atom;
+ struct kwork_work *work, key;
+
+ BUG_ON(class->work_init == NULL);
+ class->work_init(class, &key, evsel, sample, machine);
+
+ atom = atom_new(kwork, sample);
+ if (atom == NULL)
+ return -1;
+
+ work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
+ if (work == NULL) {
+ free(atom);
+ return -1;
+ }
+
+ if (!profile_event_match(kwork, work, sample))
+ return 0;
+
+ if (dst_type < KWORK_TRACE_MAX) {
+ dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
+ struct kwork_atom, list);
+ if (dst_atom != NULL) {
+ atom->prev = dst_atom;
+ list_del(&dst_atom->list);
+ }
+ }
+
+ if (ret_work != NULL)
+ *ret_work = work;
+
+ list_add_tail(&atom->list, &work->atom_list[src_type]);
+
+ return 0;
+}
+
+static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ enum kwork_trace_type src_type,
+ enum kwork_trace_type dst_type,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct kwork_work **ret_work)
+{
+ struct kwork_atom *atom, *src_atom;
+ struct kwork_work *work, key;
+
+ BUG_ON(class->work_init == NULL);
+ class->work_init(class, &key, evsel, sample, machine);
+
+ work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
+ if (ret_work != NULL)
+ *ret_work = work;
+
+ if (work == NULL)
+ return NULL;
+
+ if (!profile_event_match(kwork, work, sample))
+ return NULL;
+
+ atom = list_last_entry_or_null(&work->atom_list[dst_type],
+ struct kwork_atom, list);
+ if (atom != NULL)
+ return atom;
+
+ src_atom = atom_new(kwork, sample);
+ if (src_atom != NULL)
+ list_add_tail(&src_atom->list, &work->atom_list[src_type]);
+ else {
+ if (ret_work != NULL)
+ *ret_work = NULL;
+ }
+
+ return NULL;
+}
+
+static void report_update_exit_event(struct kwork_work *work,
+ struct kwork_atom *atom,
+ struct perf_sample *sample)
+{
+ u64 delta;
+ u64 exit_time = sample->time;
+ u64 entry_time = atom->time;
+
+ if ((entry_time != 0) && (exit_time >= entry_time)) {
+ delta = exit_time - entry_time;
+ if ((delta > work->max_runtime) ||
+ (work->max_runtime == 0)) {
+ work->max_runtime = delta;
+ work->max_runtime_start = entry_time;
+ work->max_runtime_end = exit_time;
+ }
+ work->total_runtime += delta;
+ work->nr_atoms++;
+ }
+}
+
+static int report_entry_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
+ KWORK_TRACE_MAX, evsel, sample,
+ machine, NULL);
+}
+
+static int report_exit_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct kwork_atom *atom = NULL;
+ struct kwork_work *work = NULL;
+
+ atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
+ KWORK_TRACE_ENTRY, evsel, sample,
+ machine, &work);
+ if (work == NULL)
+ return -1;
+
+ if (atom != NULL) {
+ report_update_exit_event(work, atom, sample);
+ atom_del(atom);
+ }
+
+ return 0;
+}
+
+static void latency_update_entry_event(struct kwork_work *work,
+ struct kwork_atom *atom,
+ struct perf_sample *sample)
+{
+ u64 delta;
+ u64 entry_time = sample->time;
+ u64 raise_time = atom->time;
+
+ if ((raise_time != 0) && (entry_time >= raise_time)) {
+ delta = entry_time - raise_time;
+ if ((delta > work->max_latency) ||
+ (work->max_latency == 0)) {
+ work->max_latency = delta;
+ work->max_latency_start = raise_time;
+ work->max_latency_end = entry_time;
+ }
+ work->total_latency += delta;
+ work->nr_atoms++;
+ }
+}
+
+static int latency_raise_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
+ KWORK_TRACE_MAX, evsel, sample,
+ machine, NULL);
+}
+
+static int latency_entry_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct kwork_atom *atom = NULL;
+ struct kwork_work *work = NULL;
+
+ atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
+ KWORK_TRACE_RAISE, evsel, sample,
+ machine, &work);
+ if (work == NULL)
+ return -1;
+
+ if (atom != NULL) {
+ latency_update_entry_event(work, atom, sample);
+ atom_del(atom);
+ }
+
+ return 0;
+}
+
+static void timehist_save_callchain(struct perf_kwork *kwork,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine)
+{
+ struct symbol *sym;
+ struct thread *thread;
+ struct callchain_cursor_node *node;
+ struct callchain_cursor *cursor = &callchain_cursor;
+
+ if (!kwork->show_callchain || sample->callchain == NULL)
+ return;
+
+ /* want main thread for process - has maps */
+ thread = machine__findnew_thread(machine, sample->pid, sample->pid);
+ if (thread == NULL) {
+ pr_debug("Failed to get thread for pid %d\n", sample->pid);
+ return;
+ }
+
+ if (thread__resolve_callchain(thread, cursor, evsel, sample,
+ NULL, NULL, kwork->max_stack + 2) != 0) {
+ pr_debug("Failed to resolve callchain, skipping\n");
+ goto out_put;
+ }
+
+ callchain_cursor_commit(cursor);
+
+ while (true) {
+ node = callchain_cursor_current(cursor);
+ if (node == NULL)
+ break;
+
+ sym = node->ms.sym;
+ if (sym) {
+ if (!strcmp(sym->name, "__softirqentry_text_start") ||
+ !strcmp(sym->name, "__do_softirq"))
+ sym->ignore = 1;
+ }
+
+ callchain_cursor_advance(cursor);
+ }
+
+out_put:
+ thread__put(thread);
+}
+
+static void timehist_print_event(struct perf_kwork *kwork,
+ struct kwork_work *work,
+ struct kwork_atom *atom,
+ struct perf_sample *sample,
+ struct addr_location *al)
+{
+ char entrytime[32], exittime[32];
+ char kwork_name[PRINT_KWORK_NAME_WIDTH];
+
+ /*
+ * runtime start
+ */
+ timestamp__scnprintf_usec(atom->time,
+ entrytime, sizeof(entrytime));
+ printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
+
+ /*
+ * runtime end
+ */
+ timestamp__scnprintf_usec(sample->time,
+ exittime, sizeof(exittime));
+ printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
+
+ /*
+ * cpu
+ */
+ printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
+
+ /*
+ * kwork name
+ */
+ if (work->class && work->class->work_name) {
+ work->class->work_name(work, kwork_name,
+ PRINT_KWORK_NAME_WIDTH);
+ printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
+ } else
+ printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
+
+ /*
+ *runtime
+ */
+ printf(" %*.*f ",
+ PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)(sample->time - atom->time) / NSEC_PER_MSEC);
+
+ /*
+ * delaytime
+ */
+ if (atom->prev != NULL)
+ printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
+ else
+ printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
+
+ /*
+ * callchain
+ */
+ if (kwork->show_callchain) {
+ printf(" ");
+ sample__fprintf_sym(sample, al, 0,
+ EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
+ EVSEL__PRINT_CALLCHAIN_ARROW |
+ EVSEL__PRINT_SKIP_IGNORED,
+ &callchain_cursor, symbol_conf.bt_stop_list,
+ stdout);
+ }
+
+ printf("\n");
+}
+
+static int timehist_raise_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
+ KWORK_TRACE_MAX, evsel, sample,
+ machine, NULL);
+}
+
+static int timehist_entry_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ int ret;
+ struct kwork_work *work = NULL;
+
+ ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
+ KWORK_TRACE_RAISE, evsel, sample,
+ machine, &work);
+ if (ret)
+ return ret;
+
+ if (work != NULL)
+ timehist_save_callchain(kwork, sample, evsel, machine);
+
+ return 0;
+}
+
+static int timehist_exit_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct kwork_atom *atom = NULL;
+ struct kwork_work *work = NULL;
+ struct addr_location al;
+
+ if (machine__resolve(machine, &al, sample) < 0) {
+ pr_debug("Problem processing event, skipping it\n");
+ return -1;
+ }
+
+ atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
+ KWORK_TRACE_ENTRY, evsel, sample,
+ machine, &work);
+ if (work == NULL)
+ return -1;
+
+ if (atom != NULL) {
+ work->nr_atoms++;
+ timehist_print_event(kwork, work, atom, sample, &al);
+ atom_del(atom);
+ }
+
+ return 0;
+}
+
+static struct kwork_class kwork_irq;
+static int process_irq_handler_entry_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->entry_event)
+ return kwork->tp_handler->entry_event(kwork, &kwork_irq,
+ evsel, sample, machine);
+ return 0;
+}
+
+static int process_irq_handler_exit_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->exit_event)
+ return kwork->tp_handler->exit_event(kwork, &kwork_irq,
+ evsel, sample, machine);
+ return 0;
+}
+
+const struct evsel_str_handler irq_tp_handlers[] = {
+ { "irq:irq_handler_entry", process_irq_handler_entry_event, },
+ { "irq:irq_handler_exit", process_irq_handler_exit_event, },
+};
+
+static int irq_class_init(struct kwork_class *class,
+ struct perf_session *session)
+{
+ if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
+ pr_err("Failed to set irq tracepoints handlers\n");
+ return -1;
+ }
+
+ class->work_root = RB_ROOT_CACHED;
+ return 0;
+}
+
+static void irq_work_init(struct kwork_class *class,
+ struct kwork_work *work,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ work->class = class;
+ work->cpu = sample->cpu;
+ work->id = evsel__intval(evsel, sample, "irq");
+ work->name = evsel__strval(evsel, sample, "name");
+}
+
+static void irq_work_name(struct kwork_work *work, char *buf, int len)
+{
+ snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
+}
+
+static struct kwork_class kwork_irq = {
+ .name = "irq",
+ .type = KWORK_CLASS_IRQ,
+ .nr_tracepoints = 2,
+ .tp_handlers = irq_tp_handlers,
+ .class_init = irq_class_init,
+ .work_init = irq_work_init,
+ .work_name = irq_work_name,
+};
+
+static struct kwork_class kwork_softirq;
+static int process_softirq_raise_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->raise_event)
+ return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+static int process_softirq_entry_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->entry_event)
+ return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+static int process_softirq_exit_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->exit_event)
+ return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+const struct evsel_str_handler softirq_tp_handlers[] = {
+ { "irq:softirq_raise", process_softirq_raise_event, },
+ { "irq:softirq_entry", process_softirq_entry_event, },
+ { "irq:softirq_exit", process_softirq_exit_event, },
+};
+
+static int softirq_class_init(struct kwork_class *class,
+ struct perf_session *session)
+{
+ if (perf_session__set_tracepoints_handlers(session,
+ softirq_tp_handlers)) {
+ pr_err("Failed to set softirq tracepoints handlers\n");
+ return -1;
+ }
+
+ class->work_root = RB_ROOT_CACHED;
+ return 0;
+}
+
+static char *evsel__softirq_name(struct evsel *evsel, u64 num)
+{
+ char *name = NULL;
+ bool found = false;
+ struct tep_print_flag_sym *sym = NULL;
+ struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
+
+ if ((args == NULL) || (args->next == NULL))
+ return NULL;
+
+ /* skip softirq field: "REC->vec" */
+ for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
+ if ((eval_flag(sym->value) == (unsigned long long)num) &&
+ (strlen(sym->str) != 0)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return NULL;
+
+ name = strdup(sym->str);
+ if (name == NULL) {
+ pr_err("Failed to copy symbol name\n");
+ return NULL;
+ }
+ return name;
+}
+
+static void softirq_work_init(struct kwork_class *class,
+ struct kwork_work *work,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ u64 num = evsel__intval(evsel, sample, "vec");
+
+ work->id = num;
+ work->class = class;
+ work->cpu = sample->cpu;
+ work->name = evsel__softirq_name(evsel, num);
+}
+
+static void softirq_work_name(struct kwork_work *work, char *buf, int len)
+{
+ snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
+}
+
+static struct kwork_class kwork_softirq = {
+ .name = "softirq",
+ .type = KWORK_CLASS_SOFTIRQ,
+ .nr_tracepoints = 3,
+ .tp_handlers = softirq_tp_handlers,
+ .class_init = softirq_class_init,
+ .work_init = softirq_work_init,
+ .work_name = softirq_work_name,
+};
+
+static struct kwork_class kwork_workqueue;
+static int process_workqueue_activate_work_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->raise_event)
+ return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+static int process_workqueue_execute_start_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->entry_event)
+ return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+static int process_workqueue_execute_end_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->exit_event)
+ return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+const struct evsel_str_handler workqueue_tp_handlers[] = {
+ { "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
+ { "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
+ { "workqueue:workqueue_execute_end", process_workqueue_execute_end_event, },
+};
+
+static int workqueue_class_init(struct kwork_class *class,
+ struct perf_session *session)
+{
+ if (perf_session__set_tracepoints_handlers(session,
+ workqueue_tp_handlers)) {
+ pr_err("Failed to set workqueue tracepoints handlers\n");
+ return -1;
+ }
+
+ class->work_root = RB_ROOT_CACHED;
+ return 0;
+}
+
+static void workqueue_work_init(struct kwork_class *class,
+ struct kwork_work *work,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ char *modp = NULL;
+ unsigned long long function_addr = evsel__intval(evsel,
+ sample, "function");
+
+ work->class = class;
+ work->cpu = sample->cpu;
+ work->id = evsel__intval(evsel, sample, "work");
+ work->name = function_addr == 0 ? NULL :
+ machine__resolve_kernel_addr(machine, &function_addr, &modp);
+}
+
+static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
+{
+ if (work->name != NULL)
+ snprintf(buf, len, "(w)%s", work->name);
+ else
+ snprintf(buf, len, "(w)0x%" PRIx64, work->id);
+}
+
+static struct kwork_class kwork_workqueue = {
+ .name = "workqueue",
+ .type = KWORK_CLASS_WORKQUEUE,
+ .nr_tracepoints = 3,
+ .tp_handlers = workqueue_tp_handlers,
+ .class_init = workqueue_class_init,
+ .work_init = workqueue_work_init,
+ .work_name = workqueue_work_name,
+};
+
+static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
+ [KWORK_CLASS_IRQ] = &kwork_irq,
+ [KWORK_CLASS_SOFTIRQ] = &kwork_softirq,
+ [KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
+};
+
+static void print_separator(int len)
+{
+ printf(" %.*s\n", len, graph_dotted_line);
+}
+
+static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
+{
+ int ret = 0;
+ char kwork_name[PRINT_KWORK_NAME_WIDTH];
+ char max_runtime_start[32], max_runtime_end[32];
+ char max_latency_start[32], max_latency_end[32];
+
+ printf(" ");
+
+ /*
+ * kwork name
+ */
+ if (work->class && work->class->work_name) {
+ work->class->work_name(work, kwork_name,
+ PRINT_KWORK_NAME_WIDTH);
+ ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
+ } else {
+ ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
+ }
+
+ /*
+ * cpu
+ */
+ ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
+
+ /*
+ * total runtime
+ */
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ ret += printf(" %*.*f ms |",
+ PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)work->total_runtime / NSEC_PER_MSEC);
+ } else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
+ ret += printf(" %*.*f ms |",
+ PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)work->total_latency /
+ work->nr_atoms / NSEC_PER_MSEC);
+ }
+
+ /*
+ * count
+ */
+ ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
+
+ /*
+ * max runtime, max runtime start, max runtime end
+ */
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ timestamp__scnprintf_usec(work->max_runtime_start,
+ max_runtime_start,
+ sizeof(max_runtime_start));
+ timestamp__scnprintf_usec(work->max_runtime_end,
+ max_runtime_end,
+ sizeof(max_runtime_end));
+ ret += printf(" %*.*f ms | %*s s | %*s s |",
+ PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)work->max_runtime / NSEC_PER_MSEC,
+ PRINT_TIMESTAMP_WIDTH, max_runtime_start,
+ PRINT_TIMESTAMP_WIDTH, max_runtime_end);
+ }
+ /*
+ * max delay, max delay start, max delay end
+ */
+ else if (kwork->report == KWORK_REPORT_LATENCY) {
+ timestamp__scnprintf_usec(work->max_latency_start,
+ max_latency_start,
+ sizeof(max_latency_start));
+ timestamp__scnprintf_usec(work->max_latency_end,
+ max_latency_end,
+ sizeof(max_latency_end));
+ ret += printf(" %*.*f ms | %*s s | %*s s |",
+ PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)work->max_latency / NSEC_PER_MSEC,
+ PRINT_TIMESTAMP_WIDTH, max_latency_start,
+ PRINT_TIMESTAMP_WIDTH, max_latency_end);
+ }
+
+ printf("\n");
+ return ret;
+}
+
+static int report_print_header(struct perf_kwork *kwork)
+{
+ int ret;
+
+ printf("\n ");
+ ret = printf(" %-*s | %-*s |",
+ PRINT_KWORK_NAME_WIDTH, "Kwork Name",
+ PRINT_CPU_WIDTH, "Cpu");
+
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ ret += printf(" %-*s |",
+ PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
+ } else if (kwork->report == KWORK_REPORT_LATENCY) {
+ ret += printf(" %-*s |",
+ PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
+ }
+
+ ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
+
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ ret += printf(" %-*s | %-*s | %-*s |",
+ PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
+ PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
+ PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
+ } else if (kwork->report == KWORK_REPORT_LATENCY) {
+ ret += printf(" %-*s | %-*s | %-*s |",
+ PRINT_LATENCY_HEADER_WIDTH, "Max delay",
+ PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
+ PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
+ }
+
+ printf("\n");
+ print_separator(ret);
+ return ret;
+}
+
+static void timehist_print_header(void)
+{
+ /*
+ * header row
+ */
+ printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n",
+ PRINT_TIMESTAMP_WIDTH, "Runtime start",
+ PRINT_TIMESTAMP_WIDTH, "Runtime end",
+ PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
+ PRINT_KWORK_NAME_WIDTH, "Kwork name",
+ PRINT_RUNTIME_WIDTH, "Runtime",
+ PRINT_RUNTIME_WIDTH, "Delaytime");
+
+ /*
+ * units row
+ */
+ printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n",
+ PRINT_TIMESTAMP_WIDTH, "",
+ PRINT_TIMESTAMP_WIDTH, "",
+ PRINT_TIMEHIST_CPU_WIDTH, "",
+ PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
+ PRINT_RUNTIME_WIDTH, "(msec)",
+ PRINT_RUNTIME_WIDTH, "(msec)");
+
+ /*
+ * separator
+ */
+ printf(" %.*s %.*s %.*s %.*s %.*s %.*s\n",
+ PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
+ PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
+ PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
+ PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
+ PRINT_RUNTIME_WIDTH, graph_dotted_line,
+ PRINT_RUNTIME_WIDTH, graph_dotted_line);
+}
+
+static void print_summary(struct perf_kwork *kwork)
+{
+ u64 time = kwork->timeend - kwork->timestart;
+
+ printf(" Total count : %9" PRIu64 "\n", kwork->all_count);
+ printf(" Total runtime (msec) : %9.3f (%.3f%% load average)\n",
+ (double)kwork->all_runtime / NSEC_PER_MSEC,
+ time == 0 ? 0 : (double)kwork->all_runtime / time);
+ printf(" Total time span (msec) : %9.3f\n",
+ (double)time / NSEC_PER_MSEC);
+}
+
+static unsigned long long nr_list_entry(struct list_head *head)
+{
+ struct list_head *pos;
+ unsigned long long n = 0;
+
+ list_for_each(pos, head)
+ n++;
+
+ return n;
+}
+
+static void print_skipped_events(struct perf_kwork *kwork)
+{
+ int i;
+ const char *const kwork_event_str[] = {
+ [KWORK_TRACE_RAISE] = "raise",
+ [KWORK_TRACE_ENTRY] = "entry",
+ [KWORK_TRACE_EXIT] = "exit",
+ };
+
+ if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
+ (kwork->nr_events != 0)) {
+ printf(" INFO: %.3f%% skipped events (%" PRIu64 " including ",
+ (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
+ (double)kwork->nr_events * 100.0,
+ kwork->nr_skipped_events[KWORK_TRACE_MAX]);
+
+ for (i = 0; i < KWORK_TRACE_MAX; i++) {
+ printf("%" PRIu64 " %s%s",
+ kwork->nr_skipped_events[i],
+ kwork_event_str[i],
+ (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
+ }
+ }
+
+ if (verbose > 0)
+ printf(" INFO: use %lld atom pages\n",
+ nr_list_entry(&kwork->atom_page_list));
+}
+
+static void print_bad_events(struct perf_kwork *kwork)
+{
+ if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
+ printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
+ (double)kwork->nr_lost_events /
+ (double)kwork->nr_events * 100.0,
+ kwork->nr_lost_events, kwork->nr_events,
+ kwork->nr_lost_chunks);
+ }
+}
+
+static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
+{
+ struct rb_node *node;
+ struct kwork_work *data;
+ struct rb_root_cached *root = &class->work_root;
+
+ pr_debug("Sorting %s ...\n", class->name);
+ for (;;) {
+ node = rb_first_cached(root);
+ if (!node)
+ break;
+
+ rb_erase_cached(node, root);
+ data = rb_entry(node, struct kwork_work, node);
+ work_insert(&kwork->sorted_work_root,
+ data, &kwork->sort_list);
+ }
+}
+
+static void perf_kwork__sort(struct perf_kwork *kwork)
+{
+ struct kwork_class *class;
+
+ list_for_each_entry(class, &kwork->class_list, list)
+ work_sort(kwork, class);
+}
+
+static int perf_kwork__check_config(struct perf_kwork *kwork,
+ struct perf_session *session)
+{
+ int ret;
+ struct evsel *evsel;
+ struct kwork_class *class;
+
+ static struct trace_kwork_handler report_ops = {
+ .entry_event = report_entry_event,
+ .exit_event = report_exit_event,
+ };
+ static struct trace_kwork_handler latency_ops = {
+ .raise_event = latency_raise_event,
+ .entry_event = latency_entry_event,
+ };
+ static struct trace_kwork_handler timehist_ops = {
+ .raise_event = timehist_raise_event,
+ .entry_event = timehist_entry_event,
+ .exit_event = timehist_exit_event,
+ };
+
+ switch (kwork->report) {
+ case KWORK_REPORT_RUNTIME:
+ kwork->tp_handler = &report_ops;
+ break;
+ case KWORK_REPORT_LATENCY:
+ kwork->tp_handler = &latency_ops;
+ break;
+ case KWORK_REPORT_TIMEHIST:
+ kwork->tp_handler = &timehist_ops;
+ break;
+ default:
+ pr_debug("Invalid report type %d\n", kwork->report);
+ return -1;
+ }
+
+ list_for_each_entry(class, &kwork->class_list, list)
+ if ((class->class_init != NULL) &&
+ (class->class_init(class, session) != 0))
+ return -1;
+
+ if (kwork->cpu_list != NULL) {
+ ret = perf_session__cpu_bitmap(session,
+ kwork->cpu_list,
+ kwork->cpu_bitmap);
+ if (ret < 0) {
+ pr_err("Invalid cpu bitmap\n");
+ return -1;
+ }
+ }
+
+ if (kwork->time_str != NULL) {
+ ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
+ if (ret != 0) {
+ pr_err("Invalid time span\n");
+ return -1;
+ }
+ }
+
+ list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
+ if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
+ pr_debug("Samples do not have callchains\n");
+ kwork->show_callchain = 0;
+ symbol_conf.use_callchain = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int perf_kwork__read_events(struct perf_kwork *kwork)
+{
+ int ret = -1;
+ struct perf_session *session = NULL;
+
+ struct perf_data data = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = kwork->force,
+ };
+
+ session = perf_session__new(&data, &kwork->tool);
+ if (IS_ERR(session)) {
+ pr_debug("Error creating perf session\n");
+ return PTR_ERR(session);
+ }
+
+ symbol__init(&session->header.env);
+
+ if (perf_kwork__check_config(kwork, session) != 0)
+ goto out_delete;
+
+ if (session->tevent.pevent &&
+ tep_set_function_resolver(session->tevent.pevent,
+ machine__resolve_kernel_addr,
+ &session->machines.host) < 0) {
+ pr_err("Failed to set libtraceevent function resolver\n");
+ goto out_delete;
+ }
+
+ if (kwork->report == KWORK_REPORT_TIMEHIST)
+ timehist_print_header();
+
+ ret = perf_session__process_events(session);
+ if (ret) {
+ pr_debug("Failed to process events, error %d\n", ret);
+ goto out_delete;
+ }
+
+ kwork->nr_events = session->evlist->stats.nr_events[0];
+ kwork->nr_lost_events = session->evlist->stats.total_lost;
+ kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
+
+out_delete:
+ perf_session__delete(session);
+ return ret;
+}
+
+static void process_skipped_events(struct perf_kwork *kwork,
+ struct kwork_work *work)
+{
+ int i;
+ unsigned long long count;
+
+ for (i = 0; i < KWORK_TRACE_MAX; i++) {
+ count = nr_list_entry(&work->atom_list[i]);
+ kwork->nr_skipped_events[i] += count;
+ kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
+ }
+}
+
+struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct kwork_work *key)
+{
+ struct kwork_work *work = NULL;
+
+ work = work_new(key);
+ if (work == NULL)
+ return NULL;
+
+ work_insert(&class->work_root, work, &kwork->cmp_id);
+ return work;
+}
+
+static void sig_handler(int sig)
+{
+ /*
+ * Simply capture termination signal so that
+ * the program can continue after pause returns
+ */
+ pr_debug("Captuer signal %d\n", sig);
+}
+
+static int perf_kwork__report_bpf(struct perf_kwork *kwork)
+{
+ int ret;
+
+ signal(SIGINT, sig_handler);
+ signal(SIGTERM, sig_handler);
+
+ ret = perf_kwork__trace_prepare_bpf(kwork);
+ if (ret)
+ return -1;
+
+ printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
+
+ perf_kwork__trace_start();
+
+ /*
+ * a simple pause, wait here for stop signal
+ */
+ pause();
+
+ perf_kwork__trace_finish();
+
+ perf_kwork__report_read_bpf(kwork);
+
+ perf_kwork__report_cleanup_bpf();
+
+ return 0;
+}
+
+static int perf_kwork__report(struct perf_kwork *kwork)
+{
+ int ret;
+ struct rb_node *next;
+ struct kwork_work *work;
+
+ if (kwork->use_bpf)
+ ret = perf_kwork__report_bpf(kwork);
+ else
+ ret = perf_kwork__read_events(kwork);
+
+ if (ret != 0)
+ return -1;
+
+ perf_kwork__sort(kwork);
+
+ setup_pager();
+
+ ret = report_print_header(kwork);
+ next = rb_first_cached(&kwork->sorted_work_root);
+ while (next) {
+ work = rb_entry(next, struct kwork_work, node);
+ process_skipped_events(kwork, work);
+
+ if (work->nr_atoms != 0) {
+ report_print_work(kwork, work);
+ if (kwork->summary) {
+ kwork->all_runtime += work->total_runtime;
+ kwork->all_count += work->nr_atoms;
+ }
+ }
+ next = rb_next(next);
+ }
+ print_separator(ret);
+
+ if (kwork->summary) {
+ print_summary(kwork);
+ print_separator(ret);
+ }
+
+ print_bad_events(kwork);
+ print_skipped_events(kwork);
+ printf("\n");
+
+ return 0;
+}
+
+typedef int (*tracepoint_handler)(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine);
+
+static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine)
+{
+ int err = 0;
+
+ if (evsel->handler != NULL) {
+ tracepoint_handler f = evsel->handler;
+
+ err = f(tool, evsel, sample, machine);
+ }
+
+ return err;
+}
+
+static int perf_kwork__timehist(struct perf_kwork *kwork)
+{
+ /*
+ * event handlers for timehist option
+ */
+ kwork->tool.comm = perf_event__process_comm;
+ kwork->tool.exit = perf_event__process_exit;
+ kwork->tool.fork = perf_event__process_fork;
+ kwork->tool.attr = perf_event__process_attr;
+ kwork->tool.tracing_data = perf_event__process_tracing_data;
+ kwork->tool.build_id = perf_event__process_build_id;
+ kwork->tool.ordered_events = true;
+ kwork->tool.ordering_requires_timestamps = true;
+ symbol_conf.use_callchain = kwork->show_callchain;
+
+ if (symbol__validate_sym_arguments()) {
+ pr_err("Failed to validate sym arguments\n");
+ return -1;
+ }
+
+ setup_pager();
+
+ return perf_kwork__read_events(kwork);
+}
+
+static void setup_event_list(struct perf_kwork *kwork,
+ const struct option *options,
+ const char * const usage_msg[])
+{
+ int i;
+ struct kwork_class *class;
+ char *tmp, *tok, *str;
+
+ if (kwork->event_list_str == NULL)
+ goto null_event_list_str;
+
+ str = strdup(kwork->event_list_str);
+ for (tok = strtok_r(str, ", ", &tmp);
+ tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ for (i = 0; i < KWORK_CLASS_MAX; i++) {
+ class = kwork_class_supported_list[i];
+ if (strcmp(tok, class->name) == 0) {
+ list_add_tail(&class->list, &kwork->class_list);
+ break;
+ }
+ }
+ if (i == KWORK_CLASS_MAX) {
+ usage_with_options_msg(usage_msg, options,
+ "Unknown --event key: `%s'", tok);
+ }
+ }
+ free(str);
+
+null_event_list_str:
+ /*
+ * config all kwork events if not specified
+ */
+ if (list_empty(&kwork->class_list)) {
+ for (i = 0; i < KWORK_CLASS_MAX; i++) {
+ list_add_tail(&kwork_class_supported_list[i]->list,
+ &kwork->class_list);
+ }
+ }
+
+ pr_debug("Config event list:");
+ list_for_each_entry(class, &kwork->class_list, list)
+ pr_debug(" %s", class->name);
+ pr_debug("\n");
+}
+
+static int perf_kwork__record(struct perf_kwork *kwork,
+ int argc, const char **argv)
+{
+ const char **rec_argv;
+ unsigned int rec_argc, i, j;
+ struct kwork_class *class;
+
+ const char *const record_args[] = {
+ "record",
+ "-a",
+ "-R",
+ "-m", "1024",
+ "-c", "1",
+ };
+
+ rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+
+ list_for_each_entry(class, &kwork->class_list, list)
+ rec_argc += 2 * class->nr_tracepoints;
+
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ if (rec_argv == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = strdup(record_args[i]);
+
+ list_for_each_entry(class, &kwork->class_list, list) {
+ for (j = 0; j < class->nr_tracepoints; j++) {
+ rec_argv[i++] = strdup("-e");
+ rec_argv[i++] = strdup(class->tp_handlers[j].name);
+ }
+ }
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ pr_debug("record comm: ");
+ for (j = 0; j < rec_argc; j++)
+ pr_debug("%s ", rec_argv[j]);
+ pr_debug("\n");
+
+ return cmd_record(i, rec_argv);
+}
+
+int cmd_kwork(int argc, const char **argv)
+{
+ static struct perf_kwork kwork = {
+ .class_list = LIST_HEAD_INIT(kwork.class_list),
+ .tool = {
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .sample = perf_kwork__process_tracepoint_sample,
+ },
+ .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
+ .sort_list = LIST_HEAD_INIT(kwork.sort_list),
+ .cmp_id = LIST_HEAD_INIT(kwork.cmp_id),
+ .sorted_work_root = RB_ROOT_CACHED,
+ .tp_handler = NULL,
+ .profile_name = NULL,
+ .cpu_list = NULL,
+ .time_str = NULL,
+ .force = false,
+ .event_list_str = NULL,
+ .summary = false,
+ .sort_order = NULL,
+ .show_callchain = false,
+ .max_stack = 5,
+ .timestart = 0,
+ .timeend = 0,
+ .nr_events = 0,
+ .nr_lost_chunks = 0,
+ .nr_lost_events = 0,
+ .all_runtime = 0,
+ .all_count = 0,
+ .nr_skipped_events = { 0 },
+ };
+ static const char default_report_sort_order[] = "runtime, max, count";
+ static const char default_latency_sort_order[] = "avg, max, count";
+ const struct option kwork_options[] = {
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
+ "list of kwork to profile (irq, softirq, workqueue, etc)"),
+ OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
+ OPT_END()
+ };
+ const struct option report_options[] = {
+ OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
+ "sort by key(s): runtime, max, count"),
+ OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
+ "list of cpus to profile"),
+ OPT_STRING('n', "name", &kwork.profile_name, "name",
+ "event name to profile"),
+ OPT_STRING(0, "time", &kwork.time_str, "str",
+ "Time span for analysis (start,stop)"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_BOOLEAN('S', "with-summary", &kwork.summary,
+ "Show summary with statistics"),
+#ifdef HAVE_BPF_SKEL
+ OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
+ "Use BPF to measure kwork runtime"),
+#endif
+ OPT_PARENT(kwork_options)
+ };
+ const struct option latency_options[] = {
+ OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
+ "sort by key(s): avg, max, count"),
+ OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
+ "list of cpus to profile"),
+ OPT_STRING('n', "name", &kwork.profile_name, "name",
+ "event name to profile"),
+ OPT_STRING(0, "time", &kwork.time_str, "str",
+ "Time span for analysis (start,stop)"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+#ifdef HAVE_BPF_SKEL
+ OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
+ "Use BPF to measure kwork latency"),
+#endif
+ OPT_PARENT(kwork_options)
+ };
+ const struct option timehist_options[] = {
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
+ "file", "kallsyms pathname"),
+ OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
+ "Display call chains if present"),
+ OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
+ "Maximum number of functions to display backtrace."),
+ OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
+ "Look for files with symbols relative to this directory"),
+ OPT_STRING(0, "time", &kwork.time_str, "str",
+ "Time span for analysis (start,stop)"),
+ OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
+ "list of cpus to profile"),
+ OPT_STRING('n', "name", &kwork.profile_name, "name",
+ "event name to profile"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_PARENT(kwork_options)
+ };
+ const char *kwork_usage[] = {
+ NULL,
+ NULL
+ };
+ const char * const report_usage[] = {
+ "perf kwork report [<options>]",
+ NULL
+ };
+ const char * const latency_usage[] = {
+ "perf kwork latency [<options>]",
+ NULL
+ };
+ const char * const timehist_usage[] = {
+ "perf kwork timehist [<options>]",
+ NULL
+ };
+ const char *const kwork_subcommands[] = {
+ "record", "report", "latency", "timehist", NULL
+ };
+
+ argc = parse_options_subcommand(argc, argv, kwork_options,
+ kwork_subcommands, kwork_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc)
+ usage_with_options(kwork_usage, kwork_options);
+
+ setup_event_list(&kwork, kwork_options, kwork_usage);
+ sort_dimension__add(&kwork, "id", &kwork.cmp_id);
+
+ if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
+ return perf_kwork__record(&kwork, argc, argv);
+ else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
+ kwork.sort_order = default_report_sort_order;
+ if (argc > 1) {
+ argc = parse_options(argc, argv, report_options, report_usage, 0);
+ if (argc)
+ usage_with_options(report_usage, report_options);
+ }
+ kwork.report = KWORK_REPORT_RUNTIME;
+ setup_sorting(&kwork, report_options, report_usage);
+ return perf_kwork__report(&kwork);
+ } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
+ kwork.sort_order = default_latency_sort_order;
+ if (argc > 1) {
+ argc = parse_options(argc, argv, latency_options, latency_usage, 0);
+ if (argc)
+ usage_with_options(latency_usage, latency_options);
+ }
+ kwork.report = KWORK_REPORT_LATENCY;
+ setup_sorting(&kwork, latency_options, latency_usage);
+ return perf_kwork__report(&kwork);
+ } else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
+ if (argc > 1) {
+ argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
+ if (argc)
+ usage_with_options(timehist_usage, timehist_options);
+ }
+ kwork.report = KWORK_REPORT_TIMEHIST;
+ return perf_kwork__timehist(&kwork);
+ } else
+ usage_with_options(kwork_usage, kwork_options);
+
+ return 0;
+}
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 468958154ed9..744dd3520584 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -10,7 +10,7 @@
*/
#include "builtin.h"
-#include "util/parse-events.h"
+#include "util/print-events.h"
#include "util/pmu.h"
#include "util/pmu-hybrid.h"
#include "util/debug.h"
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 23a33ac15e68..dd11d3471baf 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -9,16 +9,21 @@
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
+#include "util/target.h"
+#include "util/callchain.h"
+#include "util/lock-contention.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/trace-event.h"
+#include "util/tracepoint.h"
#include "util/debug.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/data.h"
#include "util/string2.h"
+#include "util/map.h"
#include <sys/types.h>
#include <sys/prctl.h>
@@ -32,8 +37,10 @@
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <linux/err.h>
+#include <linux/stringify.h>
static struct perf_session *session;
+static struct target target;
/* based on kernel/lockdep.c */
#define LOCKHASH_BITS 12
@@ -44,81 +51,23 @@ static struct hlist_head lockhash_table[LOCKHASH_SIZE];
#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
-struct lock_stat {
- struct hlist_node hash_entry;
- struct rb_node rb; /* used for sorting */
-
- u64 addr; /* address of lockdep_map, used as ID */
- char *name; /* for strcpy(), we cannot use const */
-
- unsigned int nr_acquire;
- unsigned int nr_acquired;
- unsigned int nr_contended;
- unsigned int nr_release;
-
- unsigned int nr_readlock;
- unsigned int nr_trylock;
-
- /* these times are in nano sec. */
- u64 avg_wait_time;
- u64 wait_time_total;
- u64 wait_time_min;
- u64 wait_time_max;
-
- int broken; /* flag of blacklist */
- int combined;
-};
-
-/*
- * States of lock_seq_stat
- *
- * UNINITIALIZED is required for detecting first event of acquire.
- * As the nature of lock events, there is no guarantee
- * that the first event for the locks are acquire,
- * it can be acquired, contended or release.
- */
-#define SEQ_STATE_UNINITIALIZED 0 /* initial state */
-#define SEQ_STATE_RELEASED 1
-#define SEQ_STATE_ACQUIRING 2
-#define SEQ_STATE_ACQUIRED 3
-#define SEQ_STATE_READ_ACQUIRED 4
-#define SEQ_STATE_CONTENDED 5
-
-/*
- * MAX_LOCK_DEPTH
- * Imported from include/linux/sched.h.
- * Should this be synchronized?
- */
-#define MAX_LOCK_DEPTH 48
-
-/*
- * struct lock_seq_stat:
- * Place to put on state of one lock sequence
- * 1) acquire -> acquired -> release
- * 2) acquire -> contended -> acquired -> release
- * 3) acquire (with read or try) -> release
- * 4) Are there other patterns?
- */
-struct lock_seq_stat {
- struct list_head list;
- int state;
- u64 prev_event_time;
- u64 addr;
-
- int read_count;
-};
-
-struct thread_stat {
- struct rb_node rb;
-
- u32 tid;
- struct list_head seq_list;
-};
-
static struct rb_root thread_stats;
static bool combine_locks;
static bool show_thread_stats;
+static bool use_bpf;
+static unsigned long bpf_map_entries = 10240;
+
+static enum {
+ LOCK_AGGR_ADDR,
+ LOCK_AGGR_TASK,
+ LOCK_AGGR_CALLER,
+} aggr_mode = LOCK_AGGR_ADDR;
+
+static u64 sched_text_start;
+static u64 sched_text_end;
+static u64 lock_text_start;
+static u64 lock_text_end;
static struct thread_stat *thread_stat_find(u32 tid)
{
@@ -251,6 +200,31 @@ struct lock_key {
struct list_head list;
};
+static void lock_stat_key_print_time(unsigned long long nsec, int len)
+{
+ static const struct {
+ float base;
+ const char *unit;
+ } table[] = {
+ { 1e9 * 3600, "h " },
+ { 1e9 * 60, "m " },
+ { 1e9, "s " },
+ { 1e6, "ms" },
+ { 1e3, "us" },
+ { 0, NULL },
+ };
+
+ for (int i = 0; table[i].unit; i++) {
+ if (nsec < table[i].base)
+ continue;
+
+ pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
+ return;
+ }
+
+ pr_info("%*llu %s", len - 3, nsec, "ns");
+}
+
#define PRINT_KEY(member) \
static void lock_stat_key_print_ ## member(struct lock_key *key, \
struct lock_stat *ls) \
@@ -258,11 +232,18 @@ static void lock_stat_key_print_ ## member(struct lock_key *key, \
pr_info("%*llu", key->len, (unsigned long long)ls->member); \
}
+#define PRINT_TIME(member) \
+static void lock_stat_key_print_ ## member(struct lock_key *key, \
+ struct lock_stat *ls) \
+{ \
+ lock_stat_key_print_time((unsigned long long)ls->member, key->len); \
+}
+
PRINT_KEY(nr_acquired)
PRINT_KEY(nr_contended)
-PRINT_KEY(avg_wait_time)
-PRINT_KEY(wait_time_total)
-PRINT_KEY(wait_time_max)
+PRINT_TIME(avg_wait_time)
+PRINT_TIME(wait_time_total)
+PRINT_TIME(wait_time_max)
static void lock_stat_key_print_wait_time_min(struct lock_key *key,
struct lock_stat *ls)
@@ -272,7 +253,7 @@ static void lock_stat_key_print_wait_time_min(struct lock_key *key,
if (wait_time == ULLONG_MAX)
wait_time = 0;
- pr_info("%*"PRIu64, key->len, wait_time);
+ lock_stat_key_print_time(wait_time, key->len);
}
@@ -288,21 +269,36 @@ static const char *output_fields;
#define DEF_KEY_LOCK(name, header, fn_suffix, len) \
{ #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} }
-struct lock_key keys[] = {
+static struct lock_key report_keys[] = {
DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10),
DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
- DEF_KEY_LOCK(avg_wait, "avg wait (ns)", avg_wait_time, 15),
- DEF_KEY_LOCK(wait_total, "total wait (ns)", wait_time_total, 15),
- DEF_KEY_LOCK(wait_max, "max wait (ns)", wait_time_max, 15),
- DEF_KEY_LOCK(wait_min, "min wait (ns)", wait_time_min, 15),
+ DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
+ DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
+ DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
+ DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
/* extra comparisons much complicated should be here */
{ }
};
-static int select_key(void)
+static struct lock_key contention_keys[] = {
+ DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
+ DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
+ DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
+ DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
+ DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
+
+ /* extra comparisons much complicated should be here */
+ { }
+};
+
+static int select_key(bool contention)
{
int i;
+ struct lock_key *keys = report_keys;
+
+ if (contention)
+ keys = contention_keys;
for (i = 0; keys[i].name; i++) {
if (!strcmp(keys[i].name, sort_key)) {
@@ -320,9 +316,13 @@ static int select_key(void)
return -1;
}
-static int add_output_field(struct list_head *head, char *name)
+static int add_output_field(bool contention, char *name)
{
int i;
+ struct lock_key *keys = report_keys;
+
+ if (contention)
+ keys = contention_keys;
for (i = 0; keys[i].name; i++) {
if (strcmp(keys[i].name, name))
@@ -330,7 +330,7 @@ static int add_output_field(struct list_head *head, char *name)
/* prevent double link */
if (list_empty(&keys[i].list))
- list_add_tail(&keys[i].list, head);
+ list_add_tail(&keys[i].list, &lock_keys);
return 0;
}
@@ -339,10 +339,14 @@ static int add_output_field(struct list_head *head, char *name)
return -1;
}
-static int setup_output_field(const char *str)
+static int setup_output_field(bool contention, const char *str)
{
char *tok, *tmp, *orig;
int i, ret = 0;
+ struct lock_key *keys = report_keys;
+
+ if (contention)
+ keys = contention_keys;
/* no output field given: use all of them */
if (str == NULL) {
@@ -359,7 +363,7 @@ static int setup_output_field(const char *str)
return -ENOMEM;
while ((tok = strsep(&tmp, ",")) != NULL){
- ret = add_output_field(&lock_keys, tok);
+ ret = add_output_field(contention, tok);
if (ret < 0)
break;
}
@@ -451,7 +455,19 @@ static struct lock_stat *pop_from_result(void)
return container_of(node, struct lock_stat, rb);
}
-static struct lock_stat *lock_stat_findnew(u64 addr, const char *name)
+static struct lock_stat *lock_stat_find(u64 addr)
+{
+ struct hlist_head *entry = lockhashentry(addr);
+ struct lock_stat *ret;
+
+ hlist_for_each_entry(ret, entry, hash_entry) {
+ if (ret->addr == addr)
+ return ret;
+ }
+ return NULL;
+}
+
+static struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
{
struct hlist_head *entry = lockhashentry(addr);
struct lock_stat *ret, *new;
@@ -466,13 +482,13 @@ static struct lock_stat *lock_stat_findnew(u64 addr, const char *name)
goto alloc_failed;
new->addr = addr;
- new->name = zalloc(sizeof(char) * strlen(name) + 1);
+ new->name = strdup(name);
if (!new->name) {
free(new);
goto alloc_failed;
}
- strcpy(new->name, name);
+ new->flags = flags;
new->wait_time_min = ULLONG_MAX;
hlist_add_head(&new->hash_entry, entry);
@@ -484,17 +500,29 @@ alloc_failed:
}
struct trace_lock_handler {
+ /* it's used on CONFIG_LOCKDEP */
int (*acquire_event)(struct evsel *evsel,
struct perf_sample *sample);
+ /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
int (*acquired_event)(struct evsel *evsel,
struct perf_sample *sample);
+ /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
int (*contended_event)(struct evsel *evsel,
struct perf_sample *sample);
+ /* it's used on CONFIG_LOCKDEP */
int (*release_event)(struct evsel *evsel,
struct perf_sample *sample);
+
+ /* it's used when CONFIG_LOCKDEP is off */
+ int (*contention_begin_event)(struct evsel *evsel,
+ struct perf_sample *sample);
+
+ /* it's used when CONFIG_LOCKDEP is off */
+ int (*contention_end_event)(struct evsel *evsel,
+ struct perf_sample *sample);
};
static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr)
@@ -542,12 +570,22 @@ static int report_lock_acquire_event(struct evsel *evsel,
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
int flag = evsel__intval(evsel, sample, "flags");
+ u64 key;
- /* abuse ls->addr for tid */
- if (show_thread_stats)
- addr = sample->tid;
+ switch (aggr_mode) {
+ case LOCK_AGGR_ADDR:
+ key = addr;
+ break;
+ case LOCK_AGGR_TASK:
+ key = sample->tid;
+ break;
+ case LOCK_AGGR_CALLER:
+ default:
+ pr_err("Invalid aggregation mode: %d\n", aggr_mode);
+ return -EINVAL;
+ }
- ls = lock_stat_findnew(addr, name);
+ ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
@@ -615,11 +653,22 @@ static int report_lock_acquired_event(struct evsel *evsel,
u64 contended_term;
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
+ u64 key;
- if (show_thread_stats)
- addr = sample->tid;
+ switch (aggr_mode) {
+ case LOCK_AGGR_ADDR:
+ key = addr;
+ break;
+ case LOCK_AGGR_TASK:
+ key = sample->tid;
+ break;
+ case LOCK_AGGR_CALLER:
+ default:
+ pr_err("Invalid aggregation mode: %d\n", aggr_mode);
+ return -EINVAL;
+ }
- ls = lock_stat_findnew(addr, name);
+ ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
@@ -677,11 +726,22 @@ static int report_lock_contended_event(struct evsel *evsel,
struct lock_seq_stat *seq;
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
+ u64 key;
- if (show_thread_stats)
- addr = sample->tid;
+ switch (aggr_mode) {
+ case LOCK_AGGR_ADDR:
+ key = addr;
+ break;
+ case LOCK_AGGR_TASK:
+ key = sample->tid;
+ break;
+ case LOCK_AGGR_CALLER:
+ default:
+ pr_err("Invalid aggregation mode: %d\n", aggr_mode);
+ return -EINVAL;
+ }
- ls = lock_stat_findnew(addr, name);
+ ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
@@ -732,11 +792,22 @@ static int report_lock_release_event(struct evsel *evsel,
struct lock_seq_stat *seq;
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
+ u64 key;
- if (show_thread_stats)
- addr = sample->tid;
+ switch (aggr_mode) {
+ case LOCK_AGGR_ADDR:
+ key = addr;
+ break;
+ case LOCK_AGGR_TASK:
+ key = sample->tid;
+ break;
+ case LOCK_AGGR_CALLER:
+ default:
+ pr_err("Invalid aggregation mode: %d\n", aggr_mode);
+ return -EINVAL;
+ }
- ls = lock_stat_findnew(addr, name);
+ ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
@@ -783,6 +854,314 @@ end:
return 0;
}
+bool is_lock_function(struct machine *machine, u64 addr)
+{
+ if (!sched_text_start) {
+ struct map *kmap;
+ struct symbol *sym;
+
+ sym = machine__find_kernel_symbol_by_name(machine,
+ "__sched_text_start",
+ &kmap);
+ if (!sym) {
+ /* to avoid retry */
+ sched_text_start = 1;
+ return false;
+ }
+
+ sched_text_start = kmap->unmap_ip(kmap, sym->start);
+
+ /* should not fail from here */
+ sym = machine__find_kernel_symbol_by_name(machine,
+ "__sched_text_end",
+ &kmap);
+ sched_text_end = kmap->unmap_ip(kmap, sym->start);
+
+ sym = machine__find_kernel_symbol_by_name(machine,
+ "__lock_text_start",
+ &kmap);
+ lock_text_start = kmap->unmap_ip(kmap, sym->start);
+
+ sym = machine__find_kernel_symbol_by_name(machine,
+ "__lock_text_end",
+ &kmap);
+ lock_text_end = kmap->unmap_ip(kmap, sym->start);
+ }
+
+ /* failed to get kernel symbols */
+ if (sched_text_start == 1)
+ return false;
+
+ /* mutex and rwsem functions are in sched text section */
+ if (sched_text_start <= addr && addr < sched_text_end)
+ return true;
+
+ /* spinlock functions are in lock text section */
+ if (lock_text_start <= addr && addr < lock_text_end)
+ return true;
+
+ return false;
+}
+
+static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample,
+ char *buf, int size)
+{
+ struct thread *thread;
+ struct callchain_cursor *cursor = &callchain_cursor;
+ struct machine *machine = &session->machines.host;
+ struct symbol *sym;
+ int skip = 0;
+ int ret;
+
+ /* lock names will be replaced to task name later */
+ if (show_thread_stats)
+ return -1;
+
+ thread = machine__findnew_thread(machine, -1, sample->pid);
+ if (thread == NULL)
+ return -1;
+
+ /* use caller function name from the callchain */
+ ret = thread__resolve_callchain(thread, cursor, evsel, sample,
+ NULL, NULL, CONTENTION_STACK_DEPTH);
+ if (ret != 0) {
+ thread__put(thread);
+ return -1;
+ }
+
+ callchain_cursor_commit(cursor);
+ thread__put(thread);
+
+ while (true) {
+ struct callchain_cursor_node *node;
+
+ node = callchain_cursor_current(cursor);
+ if (node == NULL)
+ break;
+
+ /* skip first few entries - for lock functions */
+ if (++skip <= CONTENTION_STACK_SKIP)
+ goto next;
+
+ sym = node->ms.sym;
+ if (sym && !is_lock_function(machine, node->ip)) {
+ struct map *map = node->ms.map;
+ u64 offset;
+
+ offset = map->map_ip(map, node->ip) - sym->start;
+
+ if (offset)
+ scnprintf(buf, size, "%s+%#lx", sym->name, offset);
+ else
+ strlcpy(buf, sym->name, size);
+ return 0;
+ }
+
+next:
+ callchain_cursor_advance(cursor);
+ }
+ return -1;
+}
+
+static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample)
+{
+ struct callchain_cursor *cursor = &callchain_cursor;
+ struct machine *machine = &session->machines.host;
+ struct thread *thread;
+ u64 hash = 0;
+ int skip = 0;
+ int ret;
+
+ thread = machine__findnew_thread(machine, -1, sample->pid);
+ if (thread == NULL)
+ return -1;
+
+ /* use caller function name from the callchain */
+ ret = thread__resolve_callchain(thread, cursor, evsel, sample,
+ NULL, NULL, CONTENTION_STACK_DEPTH);
+ thread__put(thread);
+
+ if (ret != 0)
+ return -1;
+
+ callchain_cursor_commit(cursor);
+
+ while (true) {
+ struct callchain_cursor_node *node;
+
+ node = callchain_cursor_current(cursor);
+ if (node == NULL)
+ break;
+
+ /* skip first few entries - for lock functions */
+ if (++skip <= CONTENTION_STACK_SKIP)
+ goto next;
+
+ if (node->ms.sym && is_lock_function(machine, node->ip))
+ goto next;
+
+ hash ^= hash_long((unsigned long)node->ip, 64);
+
+next:
+ callchain_cursor_advance(cursor);
+ }
+ return hash;
+}
+
+static int report_lock_contention_begin_event(struct evsel *evsel,
+ struct perf_sample *sample)
+{
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
+ u64 addr = evsel__intval(evsel, sample, "lock_addr");
+ u64 key;
+
+ switch (aggr_mode) {
+ case LOCK_AGGR_ADDR:
+ key = addr;
+ break;
+ case LOCK_AGGR_TASK:
+ key = sample->tid;
+ break;
+ case LOCK_AGGR_CALLER:
+ key = callchain_id(evsel, sample);
+ break;
+ default:
+ pr_err("Invalid aggregation mode: %d\n", aggr_mode);
+ return -EINVAL;
+ }
+
+ ls = lock_stat_find(key);
+ if (!ls) {
+ char buf[128];
+ const char *caller = buf;
+ unsigned int flags = evsel__intval(evsel, sample, "flags");
+
+ if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
+ caller = "Unknown";
+
+ ls = lock_stat_findnew(key, caller, flags);
+ if (!ls)
+ return -ENOMEM;
+ }
+
+ ts = thread_stat_findnew(sample->tid);
+ if (!ts)
+ return -ENOMEM;
+
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -ENOMEM;
+
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ case SEQ_STATE_ACQUIRED:
+ break;
+ case SEQ_STATE_CONTENDED:
+ /*
+ * It can have nested contention begin with mutex spinning,
+ * then we would use the original contention begin event and
+ * ignore the second one.
+ */
+ goto end;
+ case SEQ_STATE_ACQUIRING:
+ case SEQ_STATE_READ_ACQUIRED:
+ case SEQ_STATE_RELEASED:
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_CONTENDED]++;
+ }
+ list_del_init(&seq->list);
+ free(seq);
+ goto end;
+ default:
+ BUG_ON("Unknown state of lock sequence found!\n");
+ break;
+ }
+
+ if (seq->state != SEQ_STATE_CONTENDED) {
+ seq->state = SEQ_STATE_CONTENDED;
+ seq->prev_event_time = sample->time;
+ ls->nr_contended++;
+ }
+end:
+ return 0;
+}
+
+static int report_lock_contention_end_event(struct evsel *evsel,
+ struct perf_sample *sample)
+{
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
+ u64 contended_term;
+ u64 addr = evsel__intval(evsel, sample, "lock_addr");
+ u64 key;
+
+ switch (aggr_mode) {
+ case LOCK_AGGR_ADDR:
+ key = addr;
+ break;
+ case LOCK_AGGR_TASK:
+ key = sample->tid;
+ break;
+ case LOCK_AGGR_CALLER:
+ key = callchain_id(evsel, sample);
+ break;
+ default:
+ pr_err("Invalid aggregation mode: %d\n", aggr_mode);
+ return -EINVAL;
+ }
+
+ ls = lock_stat_find(key);
+ if (!ls)
+ return 0;
+
+ ts = thread_stat_find(sample->tid);
+ if (!ts)
+ return 0;
+
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -ENOMEM;
+
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ goto end;
+ case SEQ_STATE_CONTENDED:
+ contended_term = sample->time - seq->prev_event_time;
+ ls->wait_time_total += contended_term;
+ if (contended_term < ls->wait_time_min)
+ ls->wait_time_min = contended_term;
+ if (ls->wait_time_max < contended_term)
+ ls->wait_time_max = contended_term;
+ break;
+ case SEQ_STATE_ACQUIRING:
+ case SEQ_STATE_ACQUIRED:
+ case SEQ_STATE_READ_ACQUIRED:
+ case SEQ_STATE_RELEASED:
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_ACQUIRED]++;
+ }
+ list_del_init(&seq->list);
+ free(seq);
+ goto end;
+ default:
+ BUG_ON("Unknown state of lock sequence found!\n");
+ break;
+ }
+
+ seq->state = SEQ_STATE_ACQUIRED;
+ ls->nr_acquired++;
+ ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
+end:
+ return 0;
+}
+
/* lock oriented handlers */
/* TODO: handlers for CPU oriented, thread oriented */
static struct trace_lock_handler report_lock_ops = {
@@ -790,8 +1169,16 @@ static struct trace_lock_handler report_lock_ops = {
.acquired_event = report_lock_acquired_event,
.contended_event = report_lock_contended_event,
.release_event = report_lock_release_event,
+ .contention_begin_event = report_lock_contention_begin_event,
+ .contention_end_event = report_lock_contention_end_event,
};
+static struct trace_lock_handler contention_lock_ops = {
+ .contention_begin_event = report_lock_contention_begin_event,
+ .contention_end_event = report_lock_contention_end_event,
+};
+
+
static struct trace_lock_handler *trace_handler;
static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample)
@@ -822,13 +1209,34 @@ static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *
return 0;
}
+static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample)
+{
+ if (trace_handler->contention_begin_event)
+ return trace_handler->contention_begin_event(evsel, sample);
+ return 0;
+}
+
+static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample)
+{
+ if (trace_handler->contention_end_event)
+ return trace_handler->contention_end_event(evsel, sample);
+ return 0;
+}
+
static void print_bad_events(int bad, int total)
{
/* Output for debug, this have to be removed */
int i;
+ int broken = 0;
const char *name[4] =
{ "acquire", "acquired", "contended", "release" };
+ for (i = 0; i < BROKEN_MAX; i++)
+ broken += bad_hist[i];
+
+ if (broken == 0 && !verbose)
+ return;
+
pr_info("\n=== output for debug===\n\n");
pr_info("bad: %d, total: %d\n", bad, total);
pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
@@ -1016,6 +1424,83 @@ static void sort_result(void)
}
}
+static const char *get_type_str(struct lock_stat *st)
+{
+ static const struct {
+ unsigned int flags;
+ const char *name;
+ } table[] = {
+ { 0, "semaphore" },
+ { LCB_F_SPIN, "spinlock" },
+ { LCB_F_SPIN | LCB_F_READ, "rwlock:R" },
+ { LCB_F_SPIN | LCB_F_WRITE, "rwlock:W"},
+ { LCB_F_READ, "rwsem:R" },
+ { LCB_F_WRITE, "rwsem:W" },
+ { LCB_F_RT, "rtmutex" },
+ { LCB_F_RT | LCB_F_READ, "rwlock-rt:R" },
+ { LCB_F_RT | LCB_F_WRITE, "rwlock-rt:W"},
+ { LCB_F_PERCPU | LCB_F_READ, "pcpu-sem:R" },
+ { LCB_F_PERCPU | LCB_F_WRITE, "pcpu-sem:W" },
+ { LCB_F_MUTEX, "mutex" },
+ { LCB_F_MUTEX | LCB_F_SPIN, "mutex" },
+ };
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(table); i++) {
+ if (table[i].flags == st->flags)
+ return table[i].name;
+ }
+ return "unknown";
+}
+
+static void sort_contention_result(void)
+{
+ sort_result();
+}
+
+static void print_contention_result(void)
+{
+ struct lock_stat *st;
+ struct lock_key *key;
+ int bad, total;
+
+ list_for_each_entry(key, &lock_keys, list)
+ pr_info("%*s ", key->len, key->header);
+
+ if (show_thread_stats)
+ pr_info(" %10s %s\n\n", "pid", "comm");
+ else
+ pr_info(" %10s %s\n\n", "type", "caller");
+
+ bad = total = 0;
+ if (use_bpf)
+ bad = bad_hist[BROKEN_CONTENDED];
+
+ while ((st = pop_from_result())) {
+ total += use_bpf ? st->nr_contended : 1;
+ if (st->broken)
+ bad++;
+
+ list_for_each_entry(key, &lock_keys, list) {
+ key->print(key, st);
+ pr_info(" ");
+ }
+
+ if (show_thread_stats) {
+ struct thread *t;
+ int pid = st->addr;
+
+ /* st->addr contains tid of thread */
+ t = perf_session__findnew(session, pid);
+ pr_info(" %10d %s\n", pid, thread__comm_str(t));
+ continue;
+ }
+
+ pr_info(" %10s %s\n", get_type_str(st), st->name);
+ }
+
+ print_bad_events(bad, total);
+}
+
static const struct evsel_str_handler lock_tracepoints[] = {
{ "lock:lock_acquire", evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
{ "lock:lock_acquired", evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
@@ -1023,6 +1508,11 @@ static const struct evsel_str_handler lock_tracepoints[] = {
{ "lock:lock_release", evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
};
+static const struct evsel_str_handler contention_tracepoints[] = {
+ { "lock:contention_begin", evsel__process_contention_begin, },
+ { "lock:contention_end", evsel__process_contention_end, },
+};
+
static bool force;
static int __cmd_report(bool display_info)
@@ -1031,6 +1521,7 @@ static int __cmd_report(bool display_info)
struct perf_tool eops = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
+ .mmap = perf_event__process_mmap,
.namespaces = perf_event__process_namespaces,
.ordered_events = true,
};
@@ -1046,6 +1537,8 @@ static int __cmd_report(bool display_info)
return PTR_ERR(session);
}
+ /* for lock function check */
+ symbol_conf.sort_by_name = true;
symbol__init(&session->header.env);
if (!perf_session__has_traces(session, "lock record"))
@@ -1056,12 +1549,20 @@ static int __cmd_report(bool display_info)
goto out_delete;
}
- if (setup_output_field(output_fields))
+ if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
+ pr_err("Initializing perf session tracepoint handlers failed\n");
+ goto out_delete;
+ }
+
+ if (setup_output_field(false, output_fields))
goto out_delete;
- if (select_key())
+ if (select_key(false))
goto out_delete;
+ if (show_thread_stats)
+ aggr_mode = LOCK_AGGR_TASK;
+
err = perf_session__process_events(session);
if (err)
goto out_delete;
@@ -1080,26 +1581,184 @@ out_delete:
return err;
}
+static void sighandler(int sig __maybe_unused)
+{
+}
+
+static int __cmd_contention(int argc, const char **argv)
+{
+ int err = -EINVAL;
+ struct perf_tool eops = {
+ .sample = process_sample_event,
+ .comm = perf_event__process_comm,
+ .mmap = perf_event__process_mmap,
+ .ordered_events = true,
+ };
+ struct perf_data data = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
+ };
+ struct lock_contention con = {
+ .target = &target,
+ .result = &lockhash_table[0],
+ .map_nr_entries = bpf_map_entries,
+ };
+
+ session = perf_session__new(use_bpf ? NULL : &data, &eops);
+ if (IS_ERR(session)) {
+ pr_err("Initializing perf session failed\n");
+ return PTR_ERR(session);
+ }
+
+ /* for lock function check */
+ symbol_conf.sort_by_name = true;
+ symbol__init(&session->header.env);
+
+ if (use_bpf) {
+ err = target__validate(&target);
+ if (err) {
+ char errbuf[512];
+
+ target__strerror(&target, err, errbuf, 512);
+ pr_err("%s\n", errbuf);
+ goto out_delete;
+ }
+
+ signal(SIGINT, sighandler);
+ signal(SIGCHLD, sighandler);
+ signal(SIGTERM, sighandler);
+
+ con.machine = &session->machines.host;
+
+ con.evlist = evlist__new();
+ if (con.evlist == NULL) {
+ err = -ENOMEM;
+ goto out_delete;
+ }
+
+ err = evlist__create_maps(con.evlist, &target);
+ if (err < 0)
+ goto out_delete;
+
+ if (argc) {
+ err = evlist__prepare_workload(con.evlist, &target,
+ argv, false, NULL);
+ if (err < 0)
+ goto out_delete;
+ }
+
+ if (lock_contention_prepare(&con) < 0) {
+ pr_err("lock contention BPF setup failed\n");
+ goto out_delete;
+ }
+ } else {
+ if (!perf_session__has_traces(session, "lock record"))
+ goto out_delete;
+
+ if (!evlist__find_evsel_by_str(session->evlist,
+ "lock:contention_begin")) {
+ pr_err("lock contention evsel not found\n");
+ goto out_delete;
+ }
+
+ if (perf_session__set_tracepoints_handlers(session,
+ contention_tracepoints)) {
+ pr_err("Initializing perf session tracepoint handlers failed\n");
+ goto out_delete;
+ }
+ }
+
+ if (setup_output_field(true, output_fields))
+ goto out_delete;
+
+ if (select_key(true))
+ goto out_delete;
+
+ if (show_thread_stats)
+ aggr_mode = LOCK_AGGR_TASK;
+ else
+ aggr_mode = LOCK_AGGR_CALLER;
+
+ if (use_bpf) {
+ lock_contention_start();
+ if (argc)
+ evlist__start_workload(con.evlist);
+
+ /* wait for signal */
+ pause();
+
+ lock_contention_stop();
+ lock_contention_read(&con);
+
+ /* abuse bad hist stats for lost entries */
+ bad_hist[BROKEN_CONTENDED] = con.lost;
+ } else {
+ err = perf_session__process_events(session);
+ if (err)
+ goto out_delete;
+ }
+
+ setup_pager();
+
+ sort_contention_result();
+ print_contention_result();
+
+out_delete:
+ evlist__delete(con.evlist);
+ lock_contention_finish();
+ perf_session__delete(session);
+ return err;
+}
+
+
static int __cmd_record(int argc, const char **argv)
{
const char *record_args[] = {
"record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
};
+ const char *callgraph_args[] = {
+ "--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH),
+ };
unsigned int rec_argc, i, j, ret;
+ unsigned int nr_tracepoints;
+ unsigned int nr_callgraph_args = 0;
const char **rec_argv;
+ bool has_lock_stat = true;
for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
- pr_err("tracepoint %s is not enabled. "
- "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
- lock_tracepoints[i].name);
- return 1;
+ pr_debug("tracepoint %s is not enabled. "
+ "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
+ lock_tracepoints[i].name);
+ has_lock_stat = false;
+ break;
+ }
+ }
+
+ if (has_lock_stat)
+ goto setup_args;
+
+ for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) {
+ if (!is_valid_tracepoint(contention_tracepoints[i].name)) {
+ pr_err("tracepoint %s is not enabled.\n",
+ contention_tracepoints[i].name);
+ return 1;
}
}
- rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+ nr_callgraph_args = ARRAY_SIZE(callgraph_args);
+
+setup_args:
+ rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1;
+
+ if (has_lock_stat)
+ nr_tracepoints = ARRAY_SIZE(lock_tracepoints);
+ else
+ nr_tracepoints = ARRAY_SIZE(contention_tracepoints);
+
/* factor of 2 is for -e in front of each tracepoint */
- rec_argc += 2 * ARRAY_SIZE(lock_tracepoints);
+ rec_argc += 2 * nr_tracepoints;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
@@ -1108,11 +1767,24 @@ static int __cmd_record(int argc, const char **argv)
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
- for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) {
+ for (j = 0; j < nr_tracepoints; j++) {
+ const char *ev_name;
+
+ if (has_lock_stat)
+ ev_name = strdup(lock_tracepoints[j].name);
+ else
+ ev_name = strdup(contention_tracepoints[j].name);
+
+ if (!ev_name)
+ return -ENOMEM;
+
rec_argv[i++] = "-e";
- rec_argv[i++] = strdup(lock_tracepoints[j].name);
+ rec_argv[i++] = ev_name;
}
+ for (j = 0; j < nr_callgraph_args; j++, i++)
+ rec_argv[i] = callgraph_args[j];
+
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
@@ -1123,6 +1795,24 @@ static int __cmd_record(int argc, const char **argv)
return ret;
}
+static int parse_map_entry(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ unsigned long *len = (unsigned long *)opt->value;
+ unsigned long val;
+ char *endptr;
+
+ errno = 0;
+ val = strtoul(str, &endptr, 0);
+ if (*endptr != '\0' || errno != 0) {
+ pr_err("invalid BPF map length: %s\n", str);
+ return -1;
+ }
+
+ *len = val;
+ return 0;
+}
+
int cmd_lock(int argc, const char **argv)
{
const struct option lock_options[] = {
@@ -1130,6 +1820,10 @@ int cmd_lock(int argc, const char **argv)
OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
+ "file", "kallsyms pathname"),
OPT_END()
};
@@ -1154,12 +1848,34 @@ int cmd_lock(int argc, const char **argv)
OPT_PARENT(lock_options)
};
+ struct option contention_options[] = {
+ OPT_STRING('k', "key", &sort_key, "wait_total",
+ "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"),
+ OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait",
+ "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"),
+ OPT_BOOLEAN('t', "threads", &show_thread_stats,
+ "show per-thread lock stats"),
+ OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"),
+ OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
+ "System-wide collection from all CPUs"),
+ OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
+ "List of cpus to monitor"),
+ OPT_STRING('p', "pid", &target.pid, "pid",
+ "Trace on existing process id"),
+ OPT_STRING(0, "tid", &target.tid, "tid",
+ "Trace on existing thread id (exclusive to --pid)"),
+ OPT_CALLBACK(0, "map-nr-entries", &bpf_map_entries, "num",
+ "Max number of BPF map entries", parse_map_entry),
+ OPT_PARENT(lock_options)
+ };
+
const char * const info_usage[] = {
"perf lock info [<options>]",
NULL
};
const char *const lock_subcommands[] = { "record", "report", "script",
- "info", NULL };
+ "info", "contention",
+ "contention", NULL };
const char *lock_usage[] = {
NULL,
NULL
@@ -1168,6 +1884,10 @@ int cmd_lock(int argc, const char **argv)
"perf lock report [<options>]",
NULL
};
+ const char * const contention_usage[] = {
+ "perf lock contention [<options>]",
+ NULL
+ };
unsigned int i;
int rc = 0;
@@ -1203,6 +1923,20 @@ int cmd_lock(int argc, const char **argv)
/* recycling report_lock_ops */
trace_handler = &report_lock_ops;
rc = __cmd_report(true);
+ } else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) {
+ trace_handler = &contention_lock_ops;
+ sort_key = "wait_total";
+ output_fields = "contended,wait_total,wait_max,avg_wait";
+
+#ifndef HAVE_BPF_SKEL
+ set_option_nobuild(contention_options, 'b', "use-bpf",
+ "no BUILD_BPF_SKEL=1", false);
+#endif
+ if (argc) {
+ argc = parse_options(argc, argv, contention_options,
+ contention_usage, 0);
+ }
+ rc = __cmd_contention(argc, argv);
} else {
usage_with_options(lock_usage, lock_options);
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 9a71f0330137..4713f0f3a6cf 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1388,6 +1388,11 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
+static struct perf_event_header finished_init_event = {
+ .size = sizeof(struct perf_event_header),
+ .type = PERF_RECORD_FINISHED_INIT,
+};
+
static void record__adjust_affinity(struct record *rec, struct mmap *map)
{
if (rec->opts.affinity != PERF_AFFINITY_SYS &&
@@ -1696,6 +1701,14 @@ static int record__synthesize_workload(struct record *rec, bool tail)
return err;
}
+static int write_finished_init(struct record *rec, bool tail)
+{
+ if (rec->opts.tail_synthesize != tail)
+ return 0;
+
+ return record__write(rec, NULL, &finished_init_event, sizeof(finished_init_event));
+}
+
static int record__synthesize(struct record *rec, bool tail);
static int
@@ -1710,6 +1723,8 @@ record__switch_output(struct record *rec, bool at_exit)
record__aio_mmap_read_sync(rec);
+ write_finished_init(rec, true);
+
record__synthesize(rec, true);
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, true);
@@ -1764,6 +1779,7 @@ record__switch_output(struct record *rec, bool at_exit)
*/
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, false);
+ write_finished_init(rec, false);
}
return fd;
}
@@ -1834,13 +1850,11 @@ static int record__synthesize(struct record *rec, bool tail)
goto out;
/* Synthesize id_index before auxtrace_info */
- if (rec->opts.auxtrace_sample_mode || rec->opts.full_auxtrace) {
- err = perf_event__synthesize_id_index(tool,
- process_synthesized_event,
- session->evlist, machine);
- if (err)
- goto out;
- }
+ err = perf_event__synthesize_id_index(tool,
+ process_synthesized_event,
+ session->evlist, machine);
+ if (err)
+ goto out;
if (rec->opts.full_auxtrace) {
err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
@@ -2421,6 +2435,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
trigger_ready(&auxtrace_snapshot_trigger);
trigger_ready(&switch_output_trigger);
perf_hooks__invoke_record_start();
+
+ /*
+ * Must write FINISHED_INIT so it will be seen after all other
+ * synthesized user events, but before any regular events.
+ */
+ err = write_finished_init(rec, false);
+ if (err < 0)
+ goto out_child;
+
for (;;) {
unsigned long long hits = thread->samples;
@@ -2565,6 +2588,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n",
record__waking(rec));
+ write_finished_init(rec, true);
+
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, true);
@@ -3193,6 +3218,8 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
"Record the sampled code address (ip) page size"),
OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
+ OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
+ "Record the sample identifier"),
OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
&record.opts.sample_time_set,
"Record the sample timestamps"),
@@ -3805,6 +3832,9 @@ int cmd_record(int argc, const char **argv)
goto out_opts;
}
+ if (rec->opts.kcore)
+ rec->opts.text_poke = true;
+
if (rec->opts.kcore || record__threads_enabled(rec))
rec->data.is_dir = true;
@@ -3966,8 +3996,15 @@ int cmd_record(int argc, const char **argv)
arch__add_leaf_frame_record_opts(&rec->opts);
err = -ENOMEM;
- if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
- usage_with_options(record_usage, record_options);
+ if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) {
+ if (rec->opts.target.pid != NULL) {
+ pr_err("Couldn't create thread/CPU maps: %s\n",
+ errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
+ goto out;
+ }
+ else
+ usage_with_options(record_usage, record_options);
+ }
err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
if (err)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index afe4a5539ecc..91ed41cc7d88 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -74,7 +74,9 @@ struct report {
#ifdef HAVE_SLANG_SUPPORT
bool use_tui;
#endif
+#ifdef HAVE_GTK2_SUPPORT
bool use_gtk;
+#endif
bool use_stdio;
bool show_full_info;
bool show_threads;
@@ -1227,7 +1229,9 @@ int cmd_report(int argc, const char **argv)
#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
#endif
+#ifdef HAVE_GTK2_SUPPORT
OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
+#endif
OPT_BOOLEAN(0, "stdio", &report.use_stdio,
"Use the stdio interface"),
OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
@@ -1516,8 +1520,10 @@ repeat:
else if (report.use_tui)
use_browser = 1;
#endif
+#ifdef HAVE_GTK2_SUPPORT
else if (report.use_gtk)
use_browser = 2;
+#endif
/* Force tty output for header output and per-thread stat. */
if (report.header || report.header_only || report.show_threads)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 646bd938927a..a5cf243c337f 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -3355,7 +3355,8 @@ static bool schedstat_events_exposed(void)
static int __cmd_record(int argc, const char **argv)
{
unsigned int rec_argc, i, j;
- const char **rec_argv;
+ char **rec_argv;
+ const char **rec_argv_copy;
const char * const record_args[] = {
"record",
"-a",
@@ -3384,6 +3385,7 @@ static int __cmd_record(int argc, const char **argv)
ARRAY_SIZE(schedstat_args) : 0;
struct tep_event *waking_event;
+ int ret;
/*
* +2 for either "-e", "sched:sched_wakeup" or
@@ -3391,14 +3393,18 @@ static int __cmd_record(int argc, const char **argv)
*/
rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
-
if (rec_argv == NULL)
return -ENOMEM;
+ rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
+ if (rec_argv_copy == NULL) {
+ free(rec_argv);
+ return -ENOMEM;
+ }
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
- rec_argv[i++] = "-e";
+ rec_argv[i++] = strdup("-e");
waking_event = trace_event__tp_format("sched", "sched_waking");
if (!IS_ERR(waking_event))
rec_argv[i++] = strdup("sched:sched_waking");
@@ -3409,11 +3415,19 @@ static int __cmd_record(int argc, const char **argv)
rec_argv[i++] = strdup(schedstat_args[j]);
for (j = 1; j < (unsigned int)argc; j++, i++)
- rec_argv[i] = argv[j];
+ rec_argv[i] = strdup(argv[j]);
BUG_ON(i != rec_argc);
- return cmd_record(i, rec_argv);
+ memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
+ ret = cmd_record(rec_argc, rec_argv_copy);
+
+ for (i = 0; i < rec_argc; i++)
+ free(rec_argv[i]);
+ free(rec_argv);
+ free(rec_argv_copy);
+
+ return ret;
}
int cmd_sched(int argc, const char **argv)
@@ -3563,7 +3577,7 @@ int cmd_sched(int argc, const char **argv)
if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
return __cmd_record(argc, argv);
- } else if (!strncmp(argv[0], "lat", 3)) {
+ } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
sched.tp_handler = &lat_ops;
if (argc > 1) {
argc = parse_options(argc, argv, latency_options, latency_usage, 0);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index c689054002cc..13580a9c50b8 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -125,6 +125,8 @@ enum perf_output_field {
PERF_OUTPUT_CODE_PAGE_SIZE = 1ULL << 34,
PERF_OUTPUT_INS_LAT = 1ULL << 35,
PERF_OUTPUT_BRSTACKINSNLEN = 1ULL << 36,
+ PERF_OUTPUT_MACHINE_PID = 1ULL << 37,
+ PERF_OUTPUT_VCPU = 1ULL << 38,
};
struct perf_script {
@@ -193,6 +195,8 @@ struct output_option {
{.str = "code_page_size", .field = PERF_OUTPUT_CODE_PAGE_SIZE},
{.str = "ins_lat", .field = PERF_OUTPUT_INS_LAT},
{.str = "brstackinsnlen", .field = PERF_OUTPUT_BRSTACKINSNLEN},
+ {.str = "machine_pid", .field = PERF_OUTPUT_MACHINE_PID},
+ {.str = "vcpu", .field = PERF_OUTPUT_VCPU},
};
enum {
@@ -746,6 +750,13 @@ static int perf_sample__fprintf_start(struct perf_script *script,
int printed = 0;
char tstr[128];
+ if (PRINT_FIELD(MACHINE_PID) && sample->machine_pid)
+ printed += fprintf(fp, "VM:%5d ", sample->machine_pid);
+
+ /* Print VCPU only for guest events i.e. with machine_pid */
+ if (PRINT_FIELD(VCPU) && sample->machine_pid)
+ printed += fprintf(fp, "VCPU:%03d ", sample->vcpu);
+
if (PRINT_FIELD(COMM)) {
const char *comm = thread ? thread__comm_str(thread) : ":-1";
@@ -3633,6 +3644,9 @@ int process_thread_map_event(struct perf_session *session,
struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
+ if (dump_trace)
+ perf_event__fprintf_thread_map(event, stdout);
+
if (script->threads) {
pr_warning("Extra thread map event, ignoring.\n");
return 0;
@@ -3652,6 +3666,9 @@ int process_cpu_map_event(struct perf_session *session,
struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
+ if (dump_trace)
+ perf_event__fprintf_cpu_map(event, stdout);
+
if (script->cpus) {
pr_warning("Extra cpu map event, ignoring.\n");
return 0;
@@ -3740,6 +3757,7 @@ int cmd_script(int argc, const char **argv)
bool header = false;
bool header_only = false;
bool script_started = false;
+ bool unsorted_dump = false;
char *rec_script_path = NULL;
char *rep_script_path = NULL;
struct perf_session *session;
@@ -3788,6 +3806,8 @@ int cmd_script(int argc, const char **argv)
const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
+ OPT_BOOLEAN(0, "dump-unsorted-raw-trace", &unsorted_dump,
+ "dump unsorted raw trace in ASCII"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('L', "Latency", &latency_format,
@@ -3841,7 +3861,7 @@ int cmd_script(int argc, const char **argv)
OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL,
"Run xed disassembler on output", parse_xed),
OPT_CALLBACK_OPTARG(0, "call-trace", &itrace_synth_opts, NULL, NULL,
- "Decode calls from from itrace", parse_call_trace),
+ "Decode calls from itrace", parse_call_trace),
OPT_CALLBACK_OPTARG(0, "call-ret-trace", &itrace_synth_opts, NULL, NULL,
"Decode calls and returns from itrace", parse_callret_trace),
OPT_STRING(0, "graph-function", &symbol_conf.graph_function, "symbol[,symbol...]",
@@ -3950,6 +3970,11 @@ int cmd_script(int argc, const char **argv)
data.path = input_name;
data.force = symbol_conf.force;
+ if (unsorted_dump) {
+ dump_trace = true;
+ script.tool.ordered_events = false;
+ }
+
if (symbol__validate_sym_arguments())
return -1;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index d2ecd4d29624..54cd29d07ca8 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -826,6 +826,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
}
evlist__for_each_entry(evsel_list, counter) {
+ counter->reset_group = false;
if (bpf_counter__load(counter, &target))
return -1;
if (!evsel__is_bpf(counter))
@@ -966,18 +967,18 @@ try_again_reset:
return err;
}
- /*
- * Enable counters and exec the command:
- */
- if (forks) {
- err = enable_counters();
- if (err)
- return -1;
+ err = enable_counters();
+ if (err)
+ return -1;
+
+ /* Exec the command, if any */
+ if (forks)
evlist__start_workload(evsel_list);
- t0 = rdclock();
- clock_gettime(CLOCK_MONOTONIC, &ref_time);
+ t0 = rdclock();
+ clock_gettime(CLOCK_MONOTONIC, &ref_time);
+ if (forks) {
if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
status = dispatch_events(forks, timeout, interval, &times);
if (child_pid != -1) {
@@ -995,13 +996,6 @@ try_again_reset:
if (WIFSIGNALED(status))
psignal(WTERMSIG(status), argv[0]);
} else {
- err = enable_counters();
- if (err)
- return -1;
-
- t0 = rdclock();
- clock_gettime(CLOCK_MONOTONIC, &ref_time);
-
status = dispatch_events(forks, timeout, interval, &times);
}
@@ -1256,6 +1250,8 @@ static struct option stat_options[] = {
"Merge identical named hybrid events"),
OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
"print counts with custom separator"),
+ OPT_BOOLEAN('j', "json-output", &stat_config.json_output,
+ "print counts in JSON format"),
OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
"monitor event in cgroup name only", parse_stat_cgroups),
OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
@@ -1442,6 +1438,7 @@ static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode)
case AGGR_GLOBAL:
case AGGR_THREAD:
case AGGR_UNSET:
+ case AGGR_MAX:
default:
return NULL;
}
@@ -1466,6 +1463,7 @@ static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode)
case AGGR_GLOBAL:
case AGGR_THREAD:
case AGGR_UNSET:
+ case AGGR_MAX:
default:
return NULL;
}
@@ -1616,6 +1614,7 @@ static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode)
case AGGR_GLOBAL:
case AGGR_THREAD:
case AGGR_UNSET:
+ case AGGR_MAX:
default:
return NULL;
}
@@ -1636,6 +1635,7 @@ static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode)
case AGGR_GLOBAL:
case AGGR_THREAD:
case AGGR_UNSET:
+ case AGGR_MAX:
default:
return NULL;
}
@@ -1686,12 +1686,6 @@ static int add_default_attributes(void)
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
};
- struct perf_event_attr default_sw_attrs[] = {
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
-};
/*
* Detailed stats (-d), covering the L1 and last level data caches:
@@ -1783,6 +1777,9 @@ static int add_default_attributes(void)
(PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
+
+ struct perf_event_attr default_null_attrs[] = {};
+
/* Set attrs if no event is selected and !null_run: */
if (stat_config.null_run)
return 0;
@@ -1861,22 +1858,11 @@ static int add_default_attributes(void)
unsigned int max_level = 1;
char *str = NULL;
bool warn = false;
- const char *pmu_name = "cpu";
+ const char *pmu_name = arch_get_topdown_pmu_name(evsel_list, true);
if (!force_metric_only)
stat_config.metric_only = true;
- if (perf_pmu__has_hybrid()) {
- if (!evsel_list->hybrid_pmu_name) {
- pr_warning("WARNING: default to use cpu_core topdown events\n");
- evsel_list->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu("core");
- }
-
- pmu_name = evsel_list->hybrid_pmu_name;
- if (!pmu_name)
- return -1;
- }
-
if (pmu_have_event(pmu_name, topdown_metric_L2_attrs[5])) {
metric_attrs = topdown_metric_L2_attrs;
max_level = 2;
@@ -1947,30 +1933,6 @@ setup_metrics:
}
if (!evsel_list->core.nr_entries) {
- if (perf_pmu__has_hybrid()) {
- struct parse_events_error errinfo;
- const char *hybrid_str = "cycles,instructions,branches,branch-misses";
-
- if (target__has_cpu(&target))
- default_sw_attrs[0].config = PERF_COUNT_SW_CPU_CLOCK;
-
- if (evlist__add_default_attrs(evsel_list,
- default_sw_attrs) < 0) {
- return -1;
- }
-
- parse_events_error__init(&errinfo);
- err = parse_events(evsel_list, hybrid_str, &errinfo);
- if (err) {
- fprintf(stderr,
- "Cannot set up hybrid events %s: %d\n",
- hybrid_str, err);
- parse_events_error__print(&errinfo, hybrid_str);
- }
- parse_events_error__exit(&errinfo);
- return err ? -1 : 0;
- }
-
if (target__has_cpu(&target))
default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
@@ -1988,7 +1950,8 @@ setup_metrics:
return -1;
stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
- if (arch_evlist__add_default_attrs(evsel_list) < 0)
+ /* Platform specific attrs */
+ if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
return -1;
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index afce731cec16..e2e9ad929baf 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -36,6 +36,7 @@
#include "util/data.h"
#include "util/debug.h"
#include "util/string2.h"
+#include "util/tracepoint.h"
#include <linux/err.h>
#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f075cf37a65e..0bd9d01c0df9 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -53,6 +53,7 @@
#include "trace-event.h"
#include "util/parse-events.h"
#include "util/bpf-loader.h"
+#include "util/tracepoint.h"
#include "callchain.h"
#include "print_binary.h"
#include "string2.h"
@@ -2748,7 +2749,7 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
/*
* Suppress this argument if its value is zero and
- * and we don't have a string associated in an
+ * we don't have a string associated in an
* strarray for it.
*/
if (val == 0 &&
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 7303e80a639c..d03afea86217 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -38,6 +38,7 @@ int cmd_mem(int argc, const char **argv);
int cmd_data(int argc, const char **argv);
int cmd_ftrace(int argc, const char **argv);
int cmd_daemon(int argc, const char **argv);
+int cmd_kwork(int argc, const char **argv);
int find_scripts(char **scripts_array, char **scripts_path_array, int num,
int pathlen);
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 4aa034aefa33..8fcab5ad00c5 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -18,6 +18,7 @@ perf-iostat mainporcelain common
perf-kallsyms mainporcelain common
perf-kmem mainporcelain common
perf-kvm mainporcelain common
+perf-kwork mainporcelain common
perf-list mainporcelain common
perf-lock mainporcelain common
perf-mem mainporcelain common
diff --git a/tools/perf/include/perf/perf_dlfilter.h b/tools/perf/include/perf/perf_dlfilter.h
index 3eef03d661b4..a26e2f129f83 100644
--- a/tools/perf/include/perf/perf_dlfilter.h
+++ b/tools/perf/include/perf/perf_dlfilter.h
@@ -9,6 +9,12 @@
#include <linux/perf_event.h>
#include <linux/types.h>
+/*
+ * The following macro can be used to determine if this header defines
+ * perf_dlfilter_sample machine_pid and vcpu.
+ */
+#define PERF_DLFILTER_HAS_MACHINE_PID
+
/* Definitions for perf_dlfilter_sample flags */
enum {
PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
@@ -62,6 +68,8 @@ struct perf_dlfilter_sample {
__u64 raw_callchain_nr; /* Number of raw_callchain entries */
const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
const char *event;
+ __s32 machine_pid;
+ __s32 vcpu;
};
/*
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 0170cb0819d6..c21b3973641a 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -91,6 +91,7 @@ static struct cmd_struct commands[] = {
{ "data", cmd_data, 0 },
{ "ftrace", cmd_ftrace, 0 },
{ "daemon", cmd_daemon, 0 },
+ { "kwork", cmd_kwork, 0 },
};
struct pager_config {
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index a055dee6a46a..04ef95174660 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -1,7 +1,3 @@
-hostprogs := jevents
-
-jevents-y += json.o jsmn.o jevents.o
-HOSTCFLAGS_jevents.o = -I$(srctree)/tools/include
pmu-events-y += pmu-events.o
JDIR = pmu-events/arch/$(SRCARCH)
JSON = $(shell [ -d $(JDIR) ] && \
@@ -9,10 +5,23 @@ JSON = $(shell [ -d $(JDIR) ] && \
JDIR_TEST = pmu-events/arch/test
JSON_TEST = $(shell [ -d $(JDIR_TEST) ] && \
find $(JDIR_TEST) -name '*.json')
+JEVENTS_PY = pmu-events/jevents.py
+
+ifeq ($(JEVENTS_ARCH),)
+JEVENTS_ARCH=$(SRCARCH)
+endif
#
# Locate/process JSON files in pmu-events/arch/
# directory and create tables in pmu-events.c.
#
-$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JSON_TEST) $(JEVENTS)
- $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
+
+ifeq ($(NO_JEVENTS),1)
+$(OUTPUT)pmu-events/pmu-events.c: pmu-events/empty-pmu-events.c
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,gen)cp $< $@
+else
+$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JSON_TEST) $(JEVENTS_PY)
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,gen)$(PYTHON) $(JEVENTS_PY) $(JEVENTS_ARCH) pmu-events/arch $@
+endif
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index ed29e4433c67..406f6edd4e12 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -27,7 +27,9 @@
0x00000000410fd0d0,v1,arm/cortex-a77,core
0x00000000410fd400,v1,arm/neoverse-v1,core
0x00000000410fd410,v1,arm/cortex-a78,core
+0x00000000410fd4b0,v1,arm/cortex-a78,core
0x00000000410fd440,v1,arm/cortex-x1,core
+0x00000000410fd4c0,v1,arm/cortex-x1,core
0x00000000410fd460,v1,arm/cortex-a510,core
0x00000000410fd470,v1,arm/cortex-a710,core
0x00000000410fd480,v1,arm/cortex-x2,core
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/pai.json b/tools/perf/pmu-events/arch/s390/cf_z16/pai.json
new file mode 100644
index 000000000000..cf8563d059b9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/pai.json
@@ -0,0 +1,1101 @@
+[
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4096",
+ "EventName": "CRYPTO_ALL",
+ "BriefDescription": "CRYPTO ALL",
+ "PublicDescription": "Sums of all non zero cryptography counters"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4097",
+ "EventName": "KM_DEA",
+ "BriefDescription": "KM DEA",
+ "PublicDescription": "KM-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4098",
+ "EventName": "KM_TDEA_128",
+ "BriefDescription": "KM TDEA 128",
+ "PublicDescription": "KM-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4099",
+ "EventName": "KM_TDEA_192",
+ "BriefDescription": "KM TDEA 192",
+ "PublicDescription": "KM-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4100",
+ "EventName": "KM_ENCRYPTED_DEA",
+ "BriefDescription": "KM ENCRYPTED DEA",
+ "PublicDescription": "KM-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4101",
+ "EventName": "KM_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KM ENCRYPTED TDEA 128",
+ "PublicDescription": "KM-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4102",
+ "EventName": "KM_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KM ENCRYPTED TDEA 192",
+ "PublicDescription": "KM-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4103",
+ "EventName": "KM_AES_128",
+ "BriefDescription": "KM AES 128",
+ "PublicDescription": "KM-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4104",
+ "EventName": "KM_AES_192",
+ "BriefDescription": "KM AES 192",
+ "PublicDescription": "KM-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4105",
+ "EventName": "KM_AES_256",
+ "BriefDescription": "KM AES 256",
+ "PublicDescription": "KM-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4106",
+ "EventName": "KM_ENCRYPTED_AES_128",
+ "BriefDescription": "KM ENCRYPTED AES 128",
+ "PublicDescription": "KM-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4107",
+ "EventName": "KM_ENCRYPTED_AES_192",
+ "BriefDescription": "KM ENCRYPTED AES 192",
+ "PublicDescription": "KM-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4108",
+ "EventName": "KM_ENCRYPTED_AES_256",
+ "BriefDescription": "KM ENCRYPTED AES 256",
+ "PublicDescription": "KM-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4109",
+ "EventName": "KM_XTS_AES_128",
+ "BriefDescription": "KM XTS AES 128",
+ "PublicDescription": "KM-XTS-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4110",
+ "EventName": "KM_XTS_AES_256",
+ "BriefDescription": "KM XTS AES 256",
+ "PublicDescription": "KM-XTS-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4111",
+ "EventName": "KM_XTS_ENCRYPTED_AES_128",
+ "BriefDescription": "KM XTS ENCRYPTED AES 128",
+ "PublicDescription": "KM-XTS-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4112",
+ "EventName": "KM_XTS_ENCRYPTED_AES_256",
+ "BriefDescription": "KM XTS ENCRYPTED AES 256",
+ "PublicDescription": "KM-XTS-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4113",
+ "EventName": "KMC_DEA",
+ "BriefDescription": "KMC DEA",
+ "PublicDescription": "KMC-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4114",
+ "EventName": "KMC_TDEA_128",
+ "BriefDescription": "KMC TDEA 128",
+ "PublicDescription": "KMC-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4115",
+ "EventName": "KMC_TDEA_192",
+ "BriefDescription": "KMC TDEA 192",
+ "PublicDescription": "KMC-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4116",
+ "EventName": "KMC_ENCRYPTED_DEA",
+ "BriefDescription": "KMC ENCRYPTED DEA",
+ "PublicDescription": "KMC-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4117",
+ "EventName": "KMC_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMC ENCRYPTED TDEA 128",
+ "PublicDescription": "KMC-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4118",
+ "EventName": "KMC_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMC ENCRYPTED TDEA 192",
+ "PublicDescription": "KMC-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4119",
+ "EventName": "KMC_AES_128",
+ "BriefDescription": "KMC AES 128",
+ "PublicDescription": "KMC-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4120",
+ "EventName": "KMC_AES_192",
+ "BriefDescription": "KMC AES 192",
+ "PublicDescription": "KMC-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4121",
+ "EventName": "KMC_AES_256",
+ "BriefDescription": "KMC AES 256",
+ "PublicDescription": "KMC-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4122",
+ "EventName": "KMC_ENCRYPTED_AES_128",
+ "BriefDescription": "KMC ENCRYPTED AES 128",
+ "PublicDescription": "KMC-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4123",
+ "EventName": "KMC_ENCRYPTED_AES_192",
+ "BriefDescription": "KMC ENCRYPTED AES 192",
+ "PublicDescription": "KMC-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4124",
+ "EventName": "KMC_ENCRYPTED_AES_256",
+ "BriefDescription": "KMC ENCRYPTED AES 256",
+ "PublicDescription": "KMC-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4125",
+ "EventName": "KMC_PRNG",
+ "BriefDescription": "KMC PRNG",
+ "PublicDescription": "KMC-PRNG function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4126",
+ "EventName": "KMA_GCM_AES_128",
+ "BriefDescription": "KMA GCM AES 128",
+ "PublicDescription": "KMA-GCM-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4127",
+ "EventName": "KMA_GCM_AES_192",
+ "BriefDescription": "KMA GCM AES 192",
+ "PublicDescription": "KMA-GCM-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4128",
+ "EventName": "KMA_GCM_AES_256",
+ "BriefDescription": "KMA GCM AES 256",
+ "PublicDescription": "KMA-GCM-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4129",
+ "EventName": "KMA_GCM_ENCRYPTED_AES_128",
+ "BriefDescription": "KMA GCM ENCRYPTED AES 128",
+ "PublicDescription": "KMA-GCM-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4130",
+ "EventName": "KMA_GCM_ENCRYPTED_AES_192",
+ "BriefDescription": "KMA GCM ENCRYPTED AES 192",
+ "PublicDescription": "KMA-GCM-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4131",
+ "EventName": "KMA_GCM_ENCRYPTED_AES_256",
+ "BriefDescription": "KMA GCM ENCRYPTED AES 256",
+ "PublicDescription": "KMA-GCM-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4132",
+ "EventName": "KMF_DEA",
+ "BriefDescription": "KMF DEA",
+ "PublicDescription": "KMF-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4133",
+ "EventName": "KMF_TDEA_128",
+ "BriefDescription": "KMF TDEA 128",
+ "PublicDescription": "KMF-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4134",
+ "EventName": "KMF_TDEA_192",
+ "BriefDescription": "KMF TDEA 192",
+ "PublicDescription": "KMF-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4135",
+ "EventName": "KMF_ENCRYPTED_DEA",
+ "BriefDescription": "KMF ENCRYPTED DEA",
+ "PublicDescription": "KMF-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4136",
+ "EventName": "KMF_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMF ENCRYPTED TDEA 128",
+ "PublicDescription": "KMF-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4137",
+ "EventName": "KMF_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMF ENCRYPTED TDEA 192",
+ "PublicDescription": "KMF-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4138",
+ "EventName": "KMF_AES_128",
+ "BriefDescription": "KMF AES 128",
+ "PublicDescription": "KMF-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4139",
+ "EventName": "KMF_AES_192",
+ "BriefDescription": "KMF AES 192",
+ "PublicDescription": "KMF-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4140",
+ "EventName": "KMF_AES_256",
+ "BriefDescription": "KMF AES 256",
+ "PublicDescription": "KMF-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4141",
+ "EventName": "KMF_ENCRYPTED_AES_128",
+ "BriefDescription": "KMF ENCRYPTED AES 128",
+ "PublicDescription": "KMF-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4142",
+ "EventName": "KMF_ENCRYPTED_AES_192",
+ "BriefDescription": "KMF ENCRYPTED AES 192",
+ "PublicDescription": "KMF-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4143",
+ "EventName": "KMF_ENCRYPTED_AES_256",
+ "BriefDescription": "KMF ENCRYPTED AES 256",
+ "PublicDescription": "KMF-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4144",
+ "EventName": "KMCTR_DEA",
+ "BriefDescription": "KMCTR DEA",
+ "PublicDescription": "KMCTR-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4145",
+ "EventName": "KMCTR_TDEA_128",
+ "BriefDescription": "KMCTR TDEA 128",
+ "PublicDescription": "KMCTR-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4146",
+ "EventName": "KMCTR_TDEA_192",
+ "BriefDescription": "KMCTR TDEA 192",
+ "PublicDescription": "KMCTR-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4147",
+ "EventName": "KMCTR_ENCRYPTED_DEA",
+ "BriefDescription": "KMCTR ENCRYPTED DEA",
+ "PublicDescription": "KMCTR-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4148",
+ "EventName": "KMCTR_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMCTR ENCRYPTED TDEA 128",
+ "PublicDescription": "KMCTR-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4149",
+ "EventName": "KMCTR_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMCTR ENCRYPTED TDEA 192",
+ "PublicDescription": "KMCTR-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4150",
+ "EventName": "KMCTR_AES_128",
+ "BriefDescription": "KMCTR AES 128",
+ "PublicDescription": "KMCTR-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4151",
+ "EventName": "KMCTR_AES_192",
+ "BriefDescription": "KMCTR AES 192",
+ "PublicDescription": "KMCTR-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4152",
+ "EventName": "KMCTR_AES_256",
+ "BriefDescription": "KMCTR AES 256",
+ "PublicDescription": "KMCTR-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4153",
+ "EventName": "KMCTR_ENCRYPTED_AES_128",
+ "BriefDescription": "KMCTR ENCRYPTED AES 128",
+ "PublicDescription": "KMCTR-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4154",
+ "EventName": "KMCTR_ENCRYPTED_AES_192",
+ "BriefDescription": "KMCTR ENCRYPTED AES 192",
+ "PublicDescription": "KMCTR-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4155",
+ "EventName": "KMCTR_ENCRYPTED_AES_256",
+ "BriefDescription": "KMCTR ENCRYPTED AES 256",
+ "PublicDescription": "KMCTR-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4156",
+ "EventName": "KMO_DEA",
+ "BriefDescription": "KMO DEA",
+ "PublicDescription": "KMO-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4157",
+ "EventName": "KMO_TDEA_128",
+ "BriefDescription": "KMO TDEA 128",
+ "PublicDescription": "KMO-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4158",
+ "EventName": "KMO_TDEA_192",
+ "BriefDescription": "KMO TDEA 192",
+ "PublicDescription": "KMO-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4159",
+ "EventName": "KMO_ENCRYPTED_DEA",
+ "BriefDescription": "KMO ENCRYPTED DEA",
+ "PublicDescription": "KMO-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4160",
+ "EventName": "KMO_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMO ENCRYPTED TDEA 128",
+ "PublicDescription": "KMO-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4161",
+ "EventName": "KMO_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMO ENCRYPTED TDEA 192",
+ "PublicDescription": "KMO-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4162",
+ "EventName": "KMO_AES_128",
+ "BriefDescription": "KMO AES 128",
+ "PublicDescription": "KMO-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4163",
+ "EventName": "KMO_AES_192",
+ "BriefDescription": "KMO AES 192",
+ "PublicDescription": "KMO-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4164",
+ "EventName": "KMO_AES_256",
+ "BriefDescription": "KMO AES 256",
+ "PublicDescription": "KMO-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4165",
+ "EventName": "KMO_ENCRYPTED_AES_128",
+ "BriefDescription": "KMO ENCRYPTED AES 128",
+ "PublicDescription": "KMO-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4166",
+ "EventName": "KMO_ENCRYPTED_AES_192",
+ "BriefDescription": "KMO ENCRYPTED AES 192",
+ "PublicDescription": "KMO-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4167",
+ "EventName": "KMO_ENCRYPTED_AES_256",
+ "BriefDescription": "KMO ENCRYPTED AES 256",
+ "PublicDescription": "KMO-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4168",
+ "EventName": "KIMD_SHA_1",
+ "BriefDescription": "KIMD SHA 1",
+ "PublicDescription": "KIMD-SHA-1 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4169",
+ "EventName": "KIMD_SHA_256",
+ "BriefDescription": "KIMD SHA 256",
+ "PublicDescription": "KIMD-SHA-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4170",
+ "EventName": "KIMD_SHA_512",
+ "BriefDescription": "KIMD SHA 512",
+ "PublicDescription": "KIMD-SHA-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4171",
+ "EventName": "KIMD_SHA3_224",
+ "BriefDescription": "KIMD SHA3 224",
+ "PublicDescription": "KIMD-SHA3-224 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4172",
+ "EventName": "KIMD_SHA3_256",
+ "BriefDescription": "KIMD SHA3 256",
+ "PublicDescription": "KIMD-SHA3-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4173",
+ "EventName": "KIMD_SHA3_384",
+ "BriefDescription": "KIMD SHA3 384",
+ "PublicDescription": "KIMD-SHA3-384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4174",
+ "EventName": "KIMD_SHA3_512",
+ "BriefDescription": "KIMD SHA3 512",
+ "PublicDescription": "KIMD-SHA3-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4175",
+ "EventName": "KIMD_SHAKE_128",
+ "BriefDescription": "KIMD SHAKE 128",
+ "PublicDescription": "KIMD-SHAKE-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4176",
+ "EventName": "KIMD_SHAKE_256",
+ "BriefDescription": "KIMD SHAKE 256",
+ "PublicDescription": "KIMD-SHAKE-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4177",
+ "EventName": "KIMD_GHASH",
+ "BriefDescription": "KIMD GHASH",
+ "PublicDescription": "KIMD-GHASH function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4178",
+ "EventName": "KLMD_SHA_1",
+ "BriefDescription": "KLMD SHA 1",
+ "PublicDescription": "KLMD-SHA-1 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4179",
+ "EventName": "KLMD_SHA_256",
+ "BriefDescription": "KLMD SHA 256",
+ "PublicDescription": "KLMD-SHA-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4180",
+ "EventName": "KLMD_SHA_512",
+ "BriefDescription": "KLMD SHA 512",
+ "PublicDescription": "KLMD-SHA-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4181",
+ "EventName": "KLMD_SHA3_224",
+ "BriefDescription": "KLMD SHA3 224",
+ "PublicDescription": "KLMD-SHA3-224 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4182",
+ "EventName": "KLMD_SHA3_256",
+ "BriefDescription": "KLMD SHA3 256",
+ "PublicDescription": "KLMD-SHA3-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4183",
+ "EventName": "KLMD_SHA3_384",
+ "BriefDescription": "KLMD SHA3 384",
+ "PublicDescription": "KLMD-SHA3-384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4184",
+ "EventName": "KLMD_SHA3_512",
+ "BriefDescription": "KLMD SHA3 512",
+ "PublicDescription": "KLMD-SHA3-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4185",
+ "EventName": "KLMD_SHAKE_128",
+ "BriefDescription": "KLMD SHAKE 128",
+ "PublicDescription": "KLMD-SHAKE-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4186",
+ "EventName": "KLMD_SHAKE_256",
+ "BriefDescription": "KLMD SHAKE 256",
+ "PublicDescription": "KLMD-SHAKE-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4187",
+ "EventName": "KMAC_DEA",
+ "BriefDescription": "KMAC DEA",
+ "PublicDescription": "KMAC-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4188",
+ "EventName": "KMAC_TDEA_128",
+ "BriefDescription": "KMAC TDEA 128",
+ "PublicDescription": "KMAC-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4189",
+ "EventName": "KMAC_TDEA_192",
+ "BriefDescription": "KMAC TDEA 192",
+ "PublicDescription": "KMAC-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4190",
+ "EventName": "KMAC_ENCRYPTED_DEA",
+ "BriefDescription": "KMAC ENCRYPTED DEA",
+ "PublicDescription": "KMAC-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4191",
+ "EventName": "KMAC_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMAC ENCRYPTED TDEA 128",
+ "PublicDescription": "KMAC-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4192",
+ "EventName": "KMAC_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMAC ENCRYPTED TDEA 192",
+ "PublicDescription": "KMAC-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4193",
+ "EventName": "KMAC_AES_128",
+ "BriefDescription": "KMAC AES 128",
+ "PublicDescription": "KMAC-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4194",
+ "EventName": "KMAC_AES_192",
+ "BriefDescription": "KMAC AES 192",
+ "PublicDescription": "KMAC-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4195",
+ "EventName": "KMAC_AES_256",
+ "BriefDescription": "KMAC AES 256",
+ "PublicDescription": "KMAC-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4196",
+ "EventName": "KMAC_ENCRYPTED_AES_128",
+ "BriefDescription": "KMAC ENCRYPTED AES 128",
+ "PublicDescription": "KMAC-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4197",
+ "EventName": "KMAC_ENCRYPTED_AES_192",
+ "BriefDescription": "KMAC ENCRYPTED AES 192",
+ "PublicDescription": "KMAC-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4198",
+ "EventName": "KMAC_ENCRYPTED_AES_256",
+ "BriefDescription": "KMAC ENCRYPTED AES 256",
+ "PublicDescription": "KMAC-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4199",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING DEA",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4200",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING TDEA 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4201",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING TDEA 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4202",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED DEA",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4203",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED TDEA 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA- 128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4204",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED TDEA 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA- 192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4205",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING AES 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4206",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING AES 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4207",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING AES 256",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4208",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES- 128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4209",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES- 192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4210",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 256A",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES- 256A function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4211",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING AES 128",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4212",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING AES 256",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4213",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING ENCRYPTED AES 128",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4214",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING ENCRYPTED AES 256",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4215",
+ "EventName": "PCC_SCALAR_MULTIPLY_P256",
+ "BriefDescription": "PCC SCALAR MULTIPLY P256",
+ "PublicDescription": "PCC-Scalar-Multiply-P256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4216",
+ "EventName": "PCC_SCALAR_MULTIPLY_P384",
+ "BriefDescription": "PCC SCALAR MULTIPLY P384",
+ "PublicDescription": "PCC-Scalar-Multiply-P384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4217",
+ "EventName": "PCC_SCALAR_MULTIPLY_P521",
+ "BriefDescription": "PCC SCALAR MULTIPLY P521",
+ "PublicDescription": "PCC-Scalar-Multiply-P521 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4218",
+ "EventName": "PCC_SCALAR_MULTIPLY_ED25519",
+ "BriefDescription": "PCC SCALAR MULTIPLY ED25519",
+ "PublicDescription": "PCC-Scalar-Multiply-Ed25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4219",
+ "EventName": "PCC_SCALAR_MULTIPLY_ED448",
+ "BriefDescription": "PCC SCALAR MULTIPLY ED448",
+ "PublicDescription": "PCC-Scalar-Multiply-Ed448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4220",
+ "EventName": "PCC_SCALAR_MULTIPLY_X25519",
+ "BriefDescription": "PCC SCALAR MULTIPLY X25519",
+ "PublicDescription": "PCC-Scalar-Multiply-X25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4221",
+ "EventName": "PCC_SCALAR_MULTIPLY_X448",
+ "BriefDescription": "PCC SCALAR MULTIPLY X448",
+ "PublicDescription": "PCC-Scalar-Multiply-X448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4222",
+ "EventName": "PRNO_SHA_512_DRNG",
+ "BriefDescription": "PRNO SHA 512 DRNG",
+ "PublicDescription": "PRNO-SHA-512-DRNG function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4223",
+ "EventName": "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
+ "BriefDescription": "PRNO TRNG QUERY RAW TO CONDITIONED RATIO",
+ "PublicDescription": "PRNO-TRNG-Query-Raw-to-Conditioned-Ratio function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4224",
+ "EventName": "PRNO_TRNG",
+ "BriefDescription": "PRNO TRNG",
+ "PublicDescription": "PRNO-TRNG function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4225",
+ "EventName": "KDSA_ECDSA_VERIFY_P256",
+ "BriefDescription": "KDSA ECDSA VERIFY P256",
+ "PublicDescription": "KDSA-ECDSA-Verify-P256 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4226",
+ "EventName": "KDSA_ECDSA_VERIFY_P384",
+ "BriefDescription": "KDSA ECDSA VERIFY P384",
+ "PublicDescription": "KDSA-ECDSA-Verify-P384 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4227",
+ "EventName": "KDSA_ECDSA_VERIFY_P521",
+ "BriefDescription": "KDSA ECDSA VERIFY P521",
+ "PublicDescription": "KDSA-ECDSA-Verify-P521 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4228",
+ "EventName": "KDSA_ECDSA_SIGN_P256",
+ "BriefDescription": "KDSA ECDSA SIGN P256",
+ "PublicDescription": "KDSA-ECDSA-Sign-P256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4229",
+ "EventName": "KDSA_ECDSA_SIGN_P384",
+ "BriefDescription": "KDSA ECDSA SIGN P384",
+ "PublicDescription": "KDSA-ECDSA-Sign-P384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4230",
+ "EventName": "KDSA_ECDSA_SIGN_P521",
+ "BriefDescription": "KDSA ECDSA SIGN P521",
+ "PublicDescription": "KDSA-ECDSA-Sign-P521 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4231",
+ "EventName": "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
+ "BriefDescription": "KDSA ENCRYPTED ECDSA SIGN P256",
+ "PublicDescription": "KDSA-Encrypted-ECDSA-Sign-P256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4232",
+ "EventName": "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
+ "BriefDescription": "KDSA ENCRYPTED ECDSA SIGN P384",
+ "PublicDescription": "KDSA-Encrypted-ECDSA-Sign-P384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4233",
+ "EventName": "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
+ "BriefDescription": "KDSA ENCRYPTED ECDSA SIGN P521",
+ "PublicDescription": "KDSA-Encrypted-ECDSA-Sign-P521 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4234",
+ "EventName": "KDSA_EDDSA_VERIFY_ED25519",
+ "BriefDescription": "KDSA EDDSA VERIFY ED25519",
+ "PublicDescription": "KDSA-EdDSA-Verify-Ed25519 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4235",
+ "EventName": "KDSA_EDDSA_VERIFY_ED448",
+ "BriefDescription": "KDSA EDDSA VERIFY ED448",
+ "PublicDescription": "KDSA-EdDSA-Verify-Ed448 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4236",
+ "EventName": "KDSA_EDDSA_SIGN_ED25519",
+ "BriefDescription": "KDSA EDDSA SIGN ED25519",
+ "PublicDescription": "KDSA-EdDSA-Sign-Ed25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4237",
+ "EventName": "KDSA_EDDSA_SIGN_ED448",
+ "BriefDescription": "KDSA EDDSA SIGN ED448",
+ "PublicDescription": "KDSA-EdDSA-Sign-Ed448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4238",
+ "EventName": "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
+ "BriefDescription": "KDSA ENCRYPTED EDDSA SIGN ED25519",
+ "PublicDescription": "KDSA-Encrypted-EdDSA-Sign-Ed25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4239",
+ "EventName": "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
+ "BriefDescription": "KDSA ENCRYPTED EDDSA SIGN ED448",
+ "PublicDescription": "KDSA-Encrypted-EdDSA-Sign-Ed448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4240",
+ "EventName": "PCKMO_ENCRYPT_DEA_KEY",
+ "BriefDescription": "PCKMO ENCRYPT DEA KEY",
+ "PublicDescription": "PCKMO-Encrypt-DEA-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4241",
+ "EventName": "PCKMO_ENCRYPT_TDEA_128_KEY",
+ "BriefDescription": "PCKMO ENCRYPT TDEA 128 KEY",
+ "PublicDescription": "PCKMO-Encrypt-TDEA-128-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4242",
+ "EventName": "PCKMO_ENCRYPT_TDEA_192_KEY",
+ "BriefDescription": "PCKMO ENCRYPT TDEA 192 KEY",
+ "PublicDescription": "PCKMO-Encrypt-TDEA-192-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4243",
+ "EventName": "PCKMO_ENCRYPT_AES_128_KEY",
+ "BriefDescription": "PCKMO ENCRYPT AES 128 KEY",
+ "PublicDescription": "PCKMO-Encrypt-AES-128-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4244",
+ "EventName": "PCKMO_ENCRYPT_AES_192_KEY",
+ "BriefDescription": "PCKMO ENCRYPT AES 192 KEY",
+ "PublicDescription": "PCKMO-Encrypt-AES-192-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4245",
+ "EventName": "PCKMO_ENCRYPT_AES_256_KEY",
+ "BriefDescription": "PCKMO ENCRYPT AES 256 KEY",
+ "PublicDescription": "PCKMO-Encrypt-AES-256-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4246",
+ "EventName": "PCKMO_ENCRYPT_ECC_P256_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC P256 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-P256-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4247",
+ "EventName": "PCKMO_ENCRYPT_ECC_P384_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC P384 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-P384-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4248",
+ "EventName": "PCKMO_ENCRYPT_ECC_P521_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC P521 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-P521-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4249",
+ "EventName": "PCKMO_ENCRYPT_ECC_ED25519_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC ED25519 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-Ed25519-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4250",
+ "EventName": "PCKMO_ENCRYPT_ECC_ED448_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC ED448 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-Ed448-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4251",
+ "EventName": "IBM_RESERVED_155",
+ "BriefDescription": "IBM RESERVED_155",
+ "PublicDescription": "Reserved for IBM use"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4252",
+ "EventName": "IBM_RESERVED_156",
+ "BriefDescription": "IBM RESERVED_156",
+ "PublicDescription": "Reserved for IBM use"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/test/test_soc/cpu/metrics.json b/tools/perf/pmu-events/arch/test/test_soc/cpu/metrics.json
new file mode 100644
index 000000000000..42d9b5242fd7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/test/test_soc/cpu/metrics.json
@@ -0,0 +1,64 @@
+[
+ {
+ "MetricExpr": "1 / IPC",
+ "MetricName": "CPI"
+ },
+ {
+ "MetricExpr": "inst_retired.any / cpu_clk_unhalted.thread",
+ "MetricName": "IPC",
+ "MetricGroup": "group1"
+ },
+ {
+ "MetricExpr": "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * ( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "l1d\\-loads\\-misses / inst_retired.any",
+ "MetricName": "dcache_miss_cpi"
+ },
+ {
+ "MetricExpr": "l1i\\-loads\\-misses / inst_retired.any",
+ "MetricName": "icache_miss_cycles"
+ },
+ {
+ "MetricExpr": "(dcache_miss_cpi + icache_miss_cycles)",
+ "MetricName": "cache_miss_cycles",
+ "MetricGroup": "group1"
+ },
+ {
+ "MetricExpr": "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit",
+ "MetricName": "DCache_L2_All_Hits"
+ },
+ {
+ "MetricExpr": "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss",
+ "MetricName": "DCache_L2_All_Miss"
+ },
+ {
+ "MetricExpr": "dcache_l2_all_hits + dcache_l2_all_miss",
+ "MetricName": "DCache_L2_All"
+ },
+ {
+ "MetricExpr": "d_ratio(dcache_l2_all_hits, dcache_l2_all)",
+ "MetricName": "DCache_L2_Hits"
+ },
+ {
+ "MetricExpr": "d_ratio(dcache_l2_all_miss, dcache_l2_all)",
+ "MetricName": "DCache_L2_Misses"
+ },
+ {
+ "MetricExpr": "ipc + M2",
+ "MetricName": "M1"
+ },
+ {
+ "MetricExpr": "ipc + M1",
+ "MetricName": "M2"
+ },
+ {
+ "MetricExpr": "1/M3",
+ "MetricName": "M3"
+ },
+ {
+ "MetricExpr": "64 * l1d.replacement / 1000000000 / duration_time",
+ "MetricName": "L1D_Cache_Fill_BW"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
index f8bdf7812b51..095dd8c7f161 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
@@ -592,13 +592,13 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
"MetricName": "IpBranch",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL",
"MetricName": "IpCall",
"Unit": "cpu_atom"
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/cache.json b/tools/perf/pmu-events/arch/x86/alderlake/cache.json
index b83ed129c454..887dce4dfeba 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/cache.json
@@ -1,45 +1,49 @@
[
{
- "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or tlb miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x38",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or tlb miss which hit in DRAM or MMIO (Non-DRAM).",
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in DRAM or MMIO (Non-DRAM).",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or tlb miss which hit in the L2 cache.",
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or tlb miss which hit in the last level cache or other core with HITE/F/M.",
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the LLC or other core with HITE/F/M.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_atom"
},
@@ -51,6 +55,7 @@
"EventName": "MEM_BOUND_STALLS.LOAD",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x7",
"Unit": "cpu_atom"
},
@@ -62,6 +67,7 @@
"EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_atom"
},
@@ -73,6 +79,7 @@
"EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_atom"
},
@@ -84,11 +91,12 @@
"EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of load ops retired that hit in DRAM.",
+ "BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"Data_LA": "1",
@@ -101,7 +109,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of load ops retired that hit in the L2 cache.",
+ "BriefDescription": "Counts the number of load uops retired that hit in the L2 cache.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"Data_LA": "1",
@@ -114,9 +122,10 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of load ops retired that hit in the L3 cache.",
+ "BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
+ "Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"PEBS": "1",
@@ -133,6 +142,7 @@
"EventName": "MEM_SCHEDULER_BLOCK.ALL",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "20003",
+ "Speculative": "1",
"UMask": "0x7",
"Unit": "cpu_atom"
},
@@ -144,6 +154,7 @@
"EventName": "MEM_SCHEDULER_BLOCK.LD_BUF",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "20003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_atom"
},
@@ -155,6 +166,7 @@
"EventName": "MEM_SCHEDULER_BLOCK.RSV",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "20003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_atom"
},
@@ -166,6 +178,7 @@
"EventName": "MEM_SCHEDULER_BLOCK.ST_BUF",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "20003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_atom"
},
@@ -202,6 +215,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
+ "L1_Hit_Indication": "1",
"MSRIndex": "0x3F6",
"MSRValue": "0x80",
"PEBS": "2",
@@ -218,6 +232,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
+ "L1_Hit_Indication": "1",
"MSRIndex": "0x3F6",
"MSRValue": "0x10",
"PEBS": "2",
@@ -234,6 +249,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
+ "L1_Hit_Indication": "1",
"MSRIndex": "0x3F6",
"MSRValue": "0x100",
"PEBS": "2",
@@ -250,6 +266,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
+ "L1_Hit_Indication": "1",
"MSRIndex": "0x3F6",
"MSRValue": "0x20",
"PEBS": "2",
@@ -266,6 +283,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
+ "L1_Hit_Indication": "1",
"MSRIndex": "0x3F6",
"MSRValue": "0x4",
"PEBS": "2",
@@ -282,6 +300,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
+ "L1_Hit_Indication": "1",
"MSRIndex": "0x3F6",
"MSRValue": "0x200",
"PEBS": "2",
@@ -298,6 +317,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
+ "L1_Hit_Indication": "1",
"MSRIndex": "0x3F6",
"MSRValue": "0x40",
"PEBS": "2",
@@ -314,6 +334,7 @@
"Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
+ "L1_Hit_Indication": "1",
"MSRIndex": "0x3F6",
"MSRValue": "0x8",
"PEBS": "2",
@@ -324,7 +345,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts all the retired split loads.",
+ "BriefDescription": "Counts the number of retired split load uops.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"Data_LA": "1",
@@ -338,11 +359,13 @@
},
{
"BriefDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled.",
- "CollectPEBSRecord": "2",
+ "CollectPEBSRecord": "3",
"Counter": "0,1,2,3,4,5",
+ "Data_LA": "1",
"EventCode": "0xd0",
"EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
- "PEBS": "1",
+ "L1_Hit_Indication": "1",
+ "PEBS": "2",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
"UMask": "0x6",
@@ -350,7 +373,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -367,10 +390,23 @@
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "L1D.HWPF_MISS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "L1D.HWPF_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -378,6 +414,7 @@
"EventName": "L1D.REPLACEMENT",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -389,6 +426,7 @@
"EventName": "L1D_PEND_MISS.FB_FULL",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -402,6 +440,7 @@
"EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -413,6 +452,7 @@
"EventName": "L1D_PEND_MISS.L2_STALL",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -424,6 +464,7 @@
"EventName": "L1D_PEND_MISS.L2_STALLS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -435,6 +476,7 @@
"EventName": "L1D_PEND_MISS.PENDING",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -447,6 +489,7 @@
"EventName": "L1D_PEND_MISS.PENDING_CYCLES",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -458,17 +501,31 @@
"EventName": "L2_LINES_IN.ALL",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1f",
"Unit": "cpu_core"
},
{
- "BriefDescription": "All L2 requests.[This event is alias to L2_RQSTS.REFERENCES]",
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache[This event is alias to L2_RQSTS.REFERENCES]",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.ALL",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xff",
"Unit": "cpu_core"
},
@@ -480,6 +537,7 @@
"EventName": "L2_REQUEST.MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x3f",
"Unit": "cpu_core"
},
@@ -491,17 +549,19 @@
"EventName": "L2_RQSTS.ALL_CODE_RD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xe4",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Demand Data Read requests",
+ "BriefDescription": "Demand Data Read access L2 cache",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xe1",
"Unit": "cpu_core"
},
@@ -513,10 +573,23 @@
"EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x27",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_HWPF",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xf0",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "RFO requests to L2 cache.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -524,6 +597,7 @@
"EventName": "L2_RQSTS.ALL_RFO",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xe2",
"Unit": "cpu_core"
},
@@ -535,6 +609,7 @@
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xc4",
"Unit": "cpu_core"
},
@@ -546,6 +621,7 @@
"EventName": "L2_RQSTS.CODE_RD_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x24",
"Unit": "cpu_core"
},
@@ -557,21 +633,35 @@
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xc1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "BriefDescription": "Demand Data Read miss L2 cache",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x21",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.HWPF_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x30",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Read requests with true-miss in L2 cache.[This event is alias to L2_REQUEST.MISS]",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -579,17 +669,19 @@
"EventName": "L2_RQSTS.MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x3f",
"Unit": "cpu_core"
},
{
- "BriefDescription": "All L2 requests.[This event is alias to L2_REQUEST.ALL]",
+ "BriefDescription": "All accesses to L2 cache[This event is alias to L2_REQUEST.ALL]",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xff",
"Unit": "cpu_core"
},
@@ -601,6 +693,7 @@
"EventName": "L2_RQSTS.RFO_HIT",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xc2",
"Unit": "cpu_core"
},
@@ -612,6 +705,7 @@
"EventName": "L2_RQSTS.RFO_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x22",
"Unit": "cpu_core"
},
@@ -623,6 +717,7 @@
"EventName": "L2_RQSTS.SWPF_HIT",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xc8",
"Unit": "cpu_core"
},
@@ -634,22 +729,36 @@
"EventName": "L2_RQSTS.SWPF_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x28",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x41",
"Unit": "cpu_core"
},
{
- "BriefDescription": "All retired load instructions.",
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -662,7 +771,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "All retired store instructions.",
+ "BriefDescription": "Retired store instructions.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -764,6 +873,7 @@
"EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0xfd",
"Unit": "cpu_core"
},
@@ -961,7 +1071,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x44",
@@ -983,7 +1093,7 @@
},
{
"BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -993,8 +1103,8 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
+ "BriefDescription": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
@@ -1005,7 +1115,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
"MSRIndex": "0x1a6,0x1a7",
@@ -1015,13 +1125,14 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "OFFCORE_REQUESTS.ALL_REQUESTS",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x80",
"Unit": "cpu_core"
},
@@ -1033,6 +1144,7 @@
"EventName": "OFFCORE_REQUESTS.DATA_RD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -1044,6 +1156,7 @@
"EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1051,22 +1164,26 @@
"BriefDescription": "This event is deprecated. Refer to new event OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
+ "Errata": "ADL038",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"CounterMask": "1",
+ "Errata": "ADL038",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -1079,17 +1196,20 @@
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
+ "Errata": "ADL038",
"EventCode": "0x20",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -1101,6 +1221,7 @@
"EventName": "SW_PREFETCH_ACCESS.NTA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1112,6 +1233,7 @@
"EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -1123,6 +1245,7 @@
"EventName": "SW_PREFETCH_ACCESS.T0",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -1134,7 +1257,8 @@
"EventName": "SW_PREFETCH_ACCESS.T1_T2",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json b/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json
index 310c2a8f3e6b..48a4605fc057 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json
@@ -7,6 +7,7 @@
"EventName": "MACHINE_CLEARS.FP_ASSIST",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "20003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_atom"
},
@@ -23,7 +24,7 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "ARITH.FPDIV_ACTIVE",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
@@ -31,6 +32,7 @@
"EventName": "ARITH.FPDIV_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -42,50 +44,55 @@
"EventName": "ASSISTS.FP",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "ASSISTS.SSE_AVX_MIX",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.SSE_AVX_MIX",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_0",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_5",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -155,4 +162,4 @@
"UMask": "0x2",
"Unit": "cpu_core"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
index 908588f63314..2cfa70b2d5e1 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
@@ -7,6 +7,7 @@
"EventName": "BACLEARS.ANY",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_atom"
},
@@ -18,6 +19,7 @@
"EventName": "ICACHE.ACCESSES",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x3",
"Unit": "cpu_atom"
},
@@ -29,6 +31,7 @@
"EventName": "ICACHE.MISSES",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_atom"
},
@@ -40,6 +43,7 @@
"EventName": "DECODE.LCP",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "500009",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -51,6 +55,7 @@
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -295,6 +300,21 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MS_FLOWS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
@@ -310,7 +330,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc6",
@@ -332,6 +352,7 @@
"EventName": "ICACHE_DATA.STALLS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "500009",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -343,6 +364,7 @@
"EventName": "ICACHE_TAG.STALLS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -355,6 +377,7 @@
"EventName": "IDQ.DSB_CYCLES_ANY",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -367,6 +390,7 @@
"EventName": "IDQ.DSB_CYCLES_OK",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -378,6 +402,7 @@
"EventName": "IDQ.DSB_UOPS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -390,6 +415,7 @@
"EventName": "IDQ.MITE_CYCLES_ANY",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -402,6 +428,7 @@
"EventName": "IDQ.MITE_CYCLES_OK",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -413,6 +440,7 @@
"EventName": "IDQ.MITE_UOPS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -425,6 +453,7 @@
"EventName": "IDQ.MS_CYCLES_ANY",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_core"
},
@@ -438,6 +467,7 @@
"EventName": "IDQ.MS_SWITCHES",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_core"
},
@@ -449,6 +479,7 @@
"EventName": "IDQ.MS_UOPS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_core"
},
@@ -460,6 +491,7 @@
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -472,6 +504,7 @@
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -485,7 +518,8 @@
"Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/memory.json b/tools/perf/pmu-events/arch/x86/alderlake/memory.json
index 1d4d1ebe2a74..586fb961e46d 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/memory.json
@@ -1,52 +1,61 @@
[
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.",
+ "CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.ANY_AT_RET",
+ "PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0xff",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.",
- "Counter": "0,1,2,3",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.L1_BOUND_AT_RET",
+ "PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0xf4",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to other block cases when load subsequently retires when load subsequently retires.",
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.OTHER_AT_RET",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0xc0",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a pagewalk when load subsequently retires.",
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.PGWALK_AT_RET",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0xa0",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a store address match when load subsequently retires.",
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.ST_ADDR_AT_RET",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x84",
"Unit": "cpu_atom"
},
@@ -58,12 +67,13 @@
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "20003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -74,7 +84,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -92,6 +102,7 @@
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x6",
"Unit": "cpu_core"
},
@@ -103,6 +114,7 @@
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -115,6 +127,7 @@
"EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -127,11 +140,12 @@
"EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x3",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "MEMORY_ACTIVITY.STALLS_L2_MISS",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"CounterMask": "5",
@@ -139,11 +153,12 @@
"EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x5",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "MEMORY_ACTIVITY.STALLS_L3_MISS",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"CounterMask": "9",
@@ -151,6 +166,7 @@
"EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x9",
"Unit": "cpu_core"
},
@@ -283,7 +299,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Retired instructions with at least 1 store uop. This PEBS event is the trigger for stores sampled by the PEBS Store Facility.",
+ "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
"CollectPEBSRecord": "2",
"Data_LA": "1",
"EventCode": "0xcd",
@@ -295,7 +311,7 @@
},
{
"BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -306,7 +322,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
@@ -315,4 +331,4 @@
"UMask": "0x1",
"Unit": "cpu_core"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/other.json b/tools/perf/pmu-events/arch/x86/alderlake/other.json
index dc810f093fb0..67a9c13cc71d 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/other.json
@@ -1,7 +1,7 @@
[
{
"BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -12,7 +12,7 @@
},
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -23,7 +23,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5",
"EventCode": "0xB7",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -33,74 +33,68 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
- "CollectPEBSRecord": "2",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xc1",
- "EventName": "ASSISTS.ANY",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "SampleAfterValue": "100003",
- "UMask": "0x1f",
- "Unit": "cpu_core"
- },
- {
- "BriefDescription": "Count all other microcode assist beyond FP, AVX_TILE_MIX and A/D assists (counted by their own sub-events). This includes assists at uop writeback like AVX* load/store (non-FP) assists, Null Assist in SNC (due to lack of FP precision format convert with FMA3x3 uarch) or assists generated by ROB (like assists to due to Missprediction for FSW register - fixed in SNC)",
+ "BriefDescription": "ASSISTS.HARDWARE",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.HARDWARE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "ASSISTS.PAGE_FAULT",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc1",
"EventName": "ASSISTS.PAGE_FAULT",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "CORE_POWER.LICENSE_1",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LICENSE_1",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "CORE_POWER.LICENSE_2",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LICENSE_2",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "CORE_POWER.LICENSE_3",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "CORE_POWER.LICENSE_3",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
{
"BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -111,7 +105,7 @@
},
{
"BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -122,7 +116,7 @@
},
{
"BriefDescription": "Counts streaming stores that have any type of response.",
- "Counter": "0,1,2,3",
+ "Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2A,0x2B",
"EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
@@ -132,7 +126,61 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.COUNT",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "XQ.FULL_CYCLES",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"CounterMask": "1",
@@ -140,7 +188,8 @@
"EventName": "XQ.FULL_CYCLES",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
index de2c6e0ef654..d02e078a90c9 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
@@ -23,7 +23,31 @@
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and Interrupt call and return.",
+ "BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0xc4",
@@ -35,6 +59,54 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.COND",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of near CALL branch instructions retired.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
@@ -47,6 +119,66 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfd",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.NEAR_RETURN",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.COND_TAKEN",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
@@ -58,12 +190,121 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT_CALL",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.COND",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.COND_TAKEN",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
"CollectPEBSRecord": "2",
- "Counter": "33",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
"PEBScounters": "33",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_atom"
},
@@ -75,25 +316,28 @@
"EventName": "CPU_CLK_UNHALTED.CORE_P",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
"CollectPEBSRecord": "2",
- "Counter": "34",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PEBScounters": "34",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x3",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
"CollectPEBSRecord": "2",
- "Counter": "33",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PEBScounters": "33",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_atom"
},
@@ -105,12 +349,13 @@
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of instructions retired. (Fixed event)",
+ "BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
"CollectPEBSRecord": "2",
- "Counter": "32",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PEBScounters": "32",
@@ -119,6 +364,17 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Counts the total number of instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
"BriefDescription": "This event is deprecated. Refer to new event LD_BLOCKS.ADDRESS_ALIAS",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
@@ -162,6 +418,7 @@
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "20003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_atom"
},
@@ -173,6 +430,7 @@
"EventName": "MACHINE_CLEARS.MRN_NUKE",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x80",
"Unit": "cpu_atom"
},
@@ -182,9 +440,9 @@
"Counter": "0,1,2,3,4,5",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
- "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "20003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_atom"
},
@@ -196,6 +454,7 @@
"EventName": "MACHINE_CLEARS.SLOW",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "20003",
+ "Speculative": "1",
"UMask": "0x6f",
"Unit": "cpu_atom"
},
@@ -207,17 +466,19 @@
"EventName": "MACHINE_CLEARS.SMC",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "20003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots not consumed due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing uops from the UROM until a specified older uop retires.",
+ "BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x75",
"EventName": "SERIALIZATION.NON_C01_MS_SCB",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_atom"
},
@@ -229,6 +490,7 @@
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"Unit": "cpu_atom"
},
{
@@ -239,6 +501,7 @@
"EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_atom"
},
@@ -250,6 +513,7 @@
"EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x3",
"Unit": "cpu_atom"
},
@@ -261,6 +525,7 @@
"EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_atom"
},
@@ -272,6 +537,7 @@
"EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_atom"
},
@@ -283,6 +549,7 @@
"EventName": "TOPDOWN_BE_BOUND.ALL",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"Unit": "cpu_atom"
},
{
@@ -293,6 +560,7 @@
"EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_atom"
},
@@ -304,6 +572,7 @@
"EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_atom"
},
@@ -315,6 +584,7 @@
"EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_atom"
},
@@ -326,6 +596,7 @@
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_atom"
},
@@ -337,6 +608,7 @@
"EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x40",
"Unit": "cpu_atom"
},
@@ -348,6 +620,7 @@
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_atom"
},
@@ -359,6 +632,7 @@
"EventName": "TOPDOWN_FE_BOUND.ALL",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"Unit": "cpu_atom"
},
{
@@ -369,6 +643,7 @@
"EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_atom"
},
@@ -380,6 +655,7 @@
"EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x40",
"Unit": "cpu_atom"
},
@@ -391,6 +667,7 @@
"EventName": "TOPDOWN_FE_BOUND.CISC",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_atom"
},
@@ -402,6 +679,7 @@
"EventName": "TOPDOWN_FE_BOUND.DECODE",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_atom"
},
@@ -413,17 +691,19 @@
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8d",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to a latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x72",
"Unit": "cpu_atom"
},
@@ -435,6 +715,7 @@
"EventName": "TOPDOWN_FE_BOUND.ITLB",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_atom"
},
@@ -446,6 +727,7 @@
"EventName": "TOPDOWN_FE_BOUND.OTHER",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x80",
"Unit": "cpu_atom"
},
@@ -457,6 +739,7 @@
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_atom"
},
@@ -527,6 +810,7 @@
"EventName": "ARITH.DIVIDER_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x9",
"Unit": "cpu_core"
},
@@ -539,6 +823,7 @@
"EventName": "ARITH.DIV_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x9",
"Unit": "cpu_core"
},
@@ -551,11 +836,24 @@
"EventName": "ARITH.FP_DIVIDER_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.IDIV_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.IDIV_ACTIVE",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
@@ -563,10 +861,23 @@
"EventName": "ARITH.INT_DIVIDER_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1f",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "All branch instructions retired.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
@@ -709,7 +1020,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "number of branch instructions retired that were mispredicted and taken. Non PEBS",
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
@@ -757,6 +1068,42 @@
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C01",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C02",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C0_WAIT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x70",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
@@ -764,6 +1111,7 @@
"EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -775,22 +1123,24 @@
"EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "25003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xec",
"EventName": "CPU_CLK_UNHALTED.PAUSE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x40",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
"Counter": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
@@ -798,6 +1148,7 @@
"EventName": "CPU_CLK_UNHALTED.PAUSE_INST",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x40",
"Unit": "cpu_core"
},
@@ -808,26 +1159,41 @@
"EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
"CollectPEBSRecord": "2",
- "Counter": "34",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PEBScounters": "34",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x3",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Core cycles when the thread is not in halt state",
"CollectPEBSRecord": "2",
- "Counter": "33",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PEBScounters": "33",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -839,6 +1205,7 @@
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"Unit": "cpu_core"
},
{
@@ -850,6 +1217,7 @@
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -862,6 +1230,7 @@
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -874,6 +1243,7 @@
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
@@ -886,6 +1256,7 @@
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0xc",
"Unit": "cpu_core"
},
@@ -898,6 +1269,7 @@
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x5",
"Unit": "cpu_core"
},
@@ -910,6 +1282,7 @@
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -921,6 +1294,7 @@
"EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -932,6 +1306,7 @@
"EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -943,6 +1318,7 @@
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -954,6 +1330,7 @@
"EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
@@ -966,6 +1343,7 @@
"EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x21",
"Unit": "cpu_core"
},
@@ -978,10 +1356,23 @@
"EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x40",
"Unit": "cpu_core"
},
{
+ "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "Instruction decoders utilized in a cycle",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -989,13 +1380,14 @@
"EventName": "INST_DECODED.DECODERS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
"CollectPEBSRecord": "2",
- "Counter": "32",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PEBS": "1",
"PEBScounters": "32",
@@ -1015,7 +1407,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "INST_RETIRED.MACRO_FUSED",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
@@ -1026,7 +1418,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "Number of all retired NOP instructions.",
+ "BriefDescription": "Retired NOP instructions.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
@@ -1039,7 +1431,7 @@
{
"BriefDescription": "Precise instruction retired with PEBS precise-distribution",
"CollectPEBSRecord": "2",
- "Counter": "32",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "1",
"PEBScounters": "32",
@@ -1048,7 +1440,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "INST_RETIRED.REP_ITERATION",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
@@ -1066,6 +1458,7 @@
"EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "500009",
+ "Speculative": "1",
"UMask": "0x80",
"Unit": "cpu_core"
},
@@ -1077,11 +1470,12 @@
"EventName": "INT_MISC.RECOVERY_CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "500009",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xad",
@@ -1090,6 +1484,7 @@
"MSRValue": "0x7",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"TakenAlone": "1",
"UMask": "0x40",
"Unit": "cpu_core"
@@ -1102,11 +1497,12 @@
"EventName": "INT_MISC.UOP_DROPPING",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "INT_VEC_RETIRED.128BIT",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
@@ -1117,7 +1513,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "INT_VEC_RETIRED.256BIT",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
@@ -1150,7 +1546,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "INT_VEC_RETIRED.MUL_256",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
@@ -1161,7 +1557,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
@@ -1172,7 +1568,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_128",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
@@ -1183,7 +1579,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_256",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe7",
@@ -1201,6 +1597,7 @@
"EventName": "LD_BLOCKS.ADDRESS_ALIAS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -1212,6 +1609,7 @@
"EventName": "LD_BLOCKS.NO_SR",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x88",
"Unit": "cpu_core"
},
@@ -1223,6 +1621,7 @@
"EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x82",
"Unit": "cpu_core"
},
@@ -1234,6 +1633,7 @@
"EventName": "LOAD_HIT_PREFETCH.SWPF",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1244,8 +1644,9 @@
"CounterMask": "1",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_ACTIVE",
- "PEBScounters": "0,1,2,3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1256,8 +1657,9 @@
"CounterMask": "6",
"EventCode": "0xa8",
"EventName": "LSD.CYCLES_OK",
- "PEBScounters": "0,1,2,3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1269,6 +1671,7 @@
"EventName": "LSD.UOPS",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1282,6 +1685,7 @@
"EventName": "MACHINE_CLEARS.COUNT",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1293,17 +1697,19 @@
"EventName": "MACHINE_CLEARS.SMC",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "MISC2_RETIRED.LFENCE",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xe0",
"EventName": "MISC2_RETIRED.LFENCE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "400009",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_core"
},
@@ -1326,6 +1732,7 @@
"EventName": "RESOURCE_STALLS.SB",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -1337,6 +1744,7 @@
"EventName": "RESOURCE_STALLS.SCOREBOARD",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -1348,6 +1756,7 @@
"EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -1357,6 +1766,7 @@
"EventCode": "0xa4",
"EventName": "TOPDOWN.BAD_SPEC_SLOTS",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -1366,27 +1776,30 @@
"EventCode": "0xa4",
"EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa4",
"EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
{
"BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
"CollectPEBSRecord": "2",
- "Counter": "35",
+ "Counter": "Fixed counter 3",
"EventName": "TOPDOWN.SLOTS",
"PEBScounters": "35",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -1398,17 +1811,19 @@
"EventName": "TOPDOWN.SLOTS_P",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "UOPS_DECODED.DEC0_UOPS",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x76",
"EventName": "UOPS_DECODED.DEC0_UOPS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1420,6 +1835,7 @@
"EventName": "UOPS_DISPATCHED.PORT_0",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1431,6 +1847,7 @@
"EventName": "UOPS_DISPATCHED.PORT_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -1442,6 +1859,7 @@
"EventName": "UOPS_DISPATCHED.PORT_2_3_10",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -1453,6 +1871,7 @@
"EventName": "UOPS_DISPATCHED.PORT_4_9",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
@@ -1464,6 +1883,7 @@
"EventName": "UOPS_DISPATCHED.PORT_5_11",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_core"
},
@@ -1475,6 +1895,7 @@
"EventName": "UOPS_DISPATCHED.PORT_6",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x40",
"Unit": "cpu_core"
},
@@ -1486,6 +1907,7 @@
"EventName": "UOPS_DISPATCHED.PORT_7_8",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x80",
"Unit": "cpu_core"
},
@@ -1498,6 +1920,7 @@
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -1510,6 +1933,7 @@
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -1522,6 +1946,7 @@
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -1534,6 +1959,7 @@
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -1546,6 +1972,7 @@
"EventName": "UOPS_EXECUTED.CYCLES_GE_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1558,6 +1985,7 @@
"EventName": "UOPS_EXECUTED.CYCLES_GE_2",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1570,6 +1998,7 @@
"EventName": "UOPS_EXECUTED.CYCLES_GE_3",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1582,6 +2011,7 @@
"EventName": "UOPS_EXECUTED.CYCLES_GE_4",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1595,6 +2025,7 @@
"Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1608,6 +2039,7 @@
"Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1619,6 +2051,7 @@
"EventName": "UOPS_EXECUTED.THREAD",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1630,6 +2063,7 @@
"EventName": "UOPS_EXECUTED.X87",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
@@ -1641,6 +2075,7 @@
"EventName": "UOPS_ISSUED.ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1",
"Unit": "cpu_core"
},
@@ -1657,7 +2092,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "Retired uops except the last uop of each instruction.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
@@ -1668,7 +2103,7 @@
"Unit": "cpu_core"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "UOPS_RETIRED.MS",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
@@ -1718,4 +2153,4 @@
"UMask": "0x2",
"Unit": "cpu_core"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json b/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json
index 50de82c29944..b1ae349f5f21 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json
@@ -3,7 +3,7 @@
"BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
"Counter": "Fixed",
"CounterType": "PGMABLE",
- "EventCode": "0xff",
+ "EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
"Unit": "CLOCK"
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json
index 1cc39aa032e1..12baf768ad8d 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json
@@ -7,6 +7,7 @@
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xe",
"Unit": "cpu_atom"
},
@@ -18,17 +19,55 @@
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0xe",
"Unit": "cpu_atom"
},
{
- "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a DTLB miss when load subsequently retires.",
+ "BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.MISS_CAUSED_WALK",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks due to an instruction fetch that miss the PDE (Page Directory Entry) cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.PDE_CACHE_MISS",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3,4,5",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5",
"EventCode": "0x05",
"EventName": "LD_HEAD.DTLB_MISS_AT_RET",
"PEBScounters": "0,1,2,3,4,5",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x90",
"Unit": "cpu_atom"
},
@@ -40,6 +79,7 @@
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_core"
},
@@ -52,6 +92,7 @@
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
@@ -63,6 +104,7 @@
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0xe",
"Unit": "cpu_core"
},
@@ -74,6 +116,7 @@
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -85,6 +128,7 @@
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -96,6 +140,7 @@
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -107,6 +152,7 @@
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
@@ -118,6 +164,7 @@
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_core"
},
@@ -130,6 +177,7 @@
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
@@ -141,6 +189,7 @@
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0xe",
"Unit": "cpu_core"
},
@@ -152,6 +201,7 @@
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x8",
"Unit": "cpu_core"
},
@@ -163,6 +213,7 @@
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -174,6 +225,7 @@
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -185,6 +237,7 @@
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
@@ -196,6 +249,7 @@
"EventName": "ITLB_MISSES.STLB_HIT",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x20",
"Unit": "cpu_core"
},
@@ -208,6 +262,7 @@
"EventName": "ITLB_MISSES.WALK_ACTIVE",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
},
@@ -219,6 +274,7 @@
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0xe",
"Unit": "cpu_core"
},
@@ -230,6 +286,7 @@
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4",
"Unit": "cpu_core"
},
@@ -241,6 +298,7 @@
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2",
"Unit": "cpu_core"
},
@@ -252,7 +310,8 @@
"EventName": "ITLB_MISSES.WALK_PENDING",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10",
"Unit": "cpu_core"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/cache.json b/tools/perf/pmu-events/arch/x86/bonnell/cache.json
index 71653bfe7093..86582bb8aa39 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/cache.json
@@ -743,4 +743,4 @@
"SampleAfterValue": "10000",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json b/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
index f8055ff47f19..1fa347d07c98 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
@@ -258,4 +258,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
index e852eb2cc878..21fe5fe229aa 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
@@ -88,4 +88,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/memory.json b/tools/perf/pmu-events/arch/x86/bonnell/memory.json
index 2aa4c41f528e..f8b45b6fb4d3 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/memory.json
@@ -151,4 +151,4 @@
"SampleAfterValue": "200000",
"UMask": "0x86"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/other.json b/tools/perf/pmu-events/arch/x86/bonnell/other.json
index 114c062e7e96..e0bdcfbfa9dc 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/other.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/other.json
@@ -447,4 +447,4 @@
"SampleAfterValue": "200000",
"UMask": "0xc0"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
index 896b738e59b6..f5123c99a7ba 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
@@ -353,4 +353,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
index c2363b8e61b4..e8512c585572 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
@@ -121,4 +121,4 @@
"SampleAfterValue": "200000",
"UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
index 91d23341eabd..d65afe3d0b06 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
@@ -130,44 +130,26 @@
"MetricName": "FLOPc_SMT"
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.THREAD )",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "FP_Arith_Utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) ) / ( 2 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) )",
"MetricGroup": "Cor;Flops;HPC_SMT",
"MetricName": "FP_Arith_Utilization_SMT",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common). SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
- },
- {
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
- },
- {
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
- },
- {
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"MetricGroup": "SMT",
@@ -257,41 +239,52 @@
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fed;FetchBW",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
- "MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Mem;MemoryBound;MemoryBW",
- "MetricName": "MLP"
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBound;MemoryBW",
+ "MetricName": "MLP"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
@@ -306,13 +299,13 @@
"MetricName": "L2MPKI"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses;Offcore",
"MetricName": "L2MPKI_All"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all demand loads (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "L2MPKI_Load"
@@ -349,6 +342,48 @@
"MetricName": "Page_Walks_Utilization_SMT"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -364,7 +399,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
index 890412f02e06..f3d7fced28b6 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -3407,4 +3407,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
index 9ad37dddb354..6322116d0d46 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
@@ -190,4 +190,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
index f0bcb945ff76..37ce8034b2ed 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
@@ -292,4 +292,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
index f4eebecf371f..2a7797738159 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
@@ -3050,4 +3050,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/other.json b/tools/perf/pmu-events/arch/x86/broadwell/other.json
index 4b360fe96698..917d145d5227 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/other.json
@@ -41,4 +41,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
index 18d21b94a4b9..e9a604e2d67c 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -1377,4 +1377,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json
new file mode 100644
index 000000000000..d1805b3a5e3d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json
@@ -0,0 +1,152 @@
+[
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
+ "UMask": "0x86",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in I-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in I-state.",
+ "UMask": "0x88",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in M-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in M-state.",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
+ "UMask": "0x8f",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
+ "UMask": "0x16",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in I-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in I-state.",
+ "UMask": "0x18",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in M-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in M-state.",
+ "UMask": "0x11",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
+ "UMask": "0x1f",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
+ "UMask": "0x26",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in M-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in M-state.",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
+ "UMask": "0x2f",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
+ "PerPkg": "1",
+ "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "UMask": "0x48",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
+ "PerPkg": "1",
+ "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "UMask": "0x44",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
+ "PerPkg": "1",
+ "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json b/tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json
new file mode 100644
index 000000000000..73c2261e1e94
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json
@@ -0,0 +1,82 @@
+[
+ {
+ "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
+ "UMask": "0x01",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from it's allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0,",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from it's allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
+ "UMask": "0x01",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.;",
+ "Counter": "0,",
+ "CounterMask": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "UMask": "0x01",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries that are in DirectData mode. Such entry is defined as valid when it is allocated till data sent to Core (first chunk, IDI0). Applicable for IA Cores' requests in normal case.",
+ "Counter": "0,",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.DRD_DIRECT",
+ "PerPkg": "1",
+ "PublicDescription": "Each cycle count number of valid coherent Data Read entries that are in DirectData mode. Such entry is defined as valid when it is allocated till data sent to Core (first chunk, IDI0). Applicable for IA Cores' requests in normal case.",
+ "UMask": "0x02",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "0,1",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "UMask": "0x01",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Core coherent Data Read entries allocated in DirectData mode",
+ "Counter": "0,1",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.DRD_DIRECT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Core coherent Data Read entries allocated in DirectData mode.",
+ "UMask": "0x02",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
+ "Counter": "0,1",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
+ "UMask": "0x20",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/uncore.json b/tools/perf/pmu-events/arch/x86/broadwell/uncore.json
deleted file mode 100644
index 28e1e159a3cb..000000000000
--- a/tools/perf/pmu-events/arch/x86/broadwell/uncore.json
+++ /dev/null
@@ -1,278 +0,0 @@
-[
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x41",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x81",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
- "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "PublicDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x44",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x48",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x11",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
- "BriefDescription": "L3 Lookup read request that access cache and found line in M-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x21",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
- "BriefDescription": "L3 Lookup write request that access cache and found line in M-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x81",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
- "BriefDescription": "L3 Lookup any request that access cache and found line in M-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x18",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
- "BriefDescription": "L3 Lookup read request that access cache and found line in I-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x88",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
- "BriefDescription": "L3 Lookup any request that access cache and found line in I-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x1f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
- "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x2f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
- "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x8f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
- "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x86",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
- "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x16",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
- "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x26",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
- "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
- "BriefDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from it's allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
- "PublicDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from it's allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
- "Counter": "0,",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x02",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.DRD_DIRECT",
- "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries that are in DirectData mode. Such entry is defined as valid when it is allocated till data sent to Core (first chunk, IDI0). Applicable for IA Cores' requests in normal case.",
- "PublicDescription": "Each cycle count number of 'valid' coherent Data Read entries that are in DirectData mode. Such entry is defined as valid when it is allocated till data sent to Core (first chunk, IDI0). Applicable for IA Cores' requests in normal case.",
- "Counter": "0,",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
- "BriefDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
- "PublicDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x02",
- "EventName": "UNC_ARB_TRK_REQUESTS.DRD_DIRECT",
- "BriefDescription": "Number of Core coherent Data Read entries allocated in DirectData mode",
- "PublicDescription": "Number of Core coherent Data Read entries allocated in DirectData mode.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x20",
- "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
- "BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
- "PublicDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x84",
- "UMask": "0x01",
- "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
- "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
- "PublicDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
- "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.;",
- "PublicDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "Counter": "0,",
- "CounterMask": "1",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "NCU",
- "EventCode": "0x0",
- "UMask": "0x01",
- "EventName": "UNC_CLOCK.SOCKET",
- "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
- "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "Counter": "FIXED",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
index 818a8b132c08..6a6de8790f25 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
@@ -385,4 +385,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
index 73b6865a769d..b6fdf5ba2c9a 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
@@ -47,7 +47,7 @@
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)",
"MetricGroup": "TopdownL1",
"MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided."
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. "
},
{
"BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
@@ -130,44 +130,26 @@
"MetricName": "FLOPc_SMT"
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.THREAD )",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "FP_Arith_Utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) ) / ( 2 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) )",
"MetricGroup": "Cor;Flops;HPC_SMT",
"MetricName": "FP_Arith_Utilization_SMT",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common). SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
- },
- {
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
- },
- {
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
- },
- {
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"MetricGroup": "SMT",
@@ -204,7 +186,7 @@
"MetricName": "IpTB"
},
{
- "BriefDescription": "Branch instructions per taken branch.",
+ "BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "BpTkBranch"
@@ -257,41 +239,52 @@
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fed;FetchBW",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
- "MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Mem;MemoryBound;MemoryBW",
- "MetricName": "MLP"
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBound;MemoryBW",
+ "MetricName": "MLP"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
@@ -306,13 +299,13 @@
"MetricName": "L2MPKI"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses;Offcore",
"MetricName": "L2MPKI_All"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all demand loads (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "L2MPKI_Load"
@@ -349,6 +342,48 @@
"MetricName": "Page_Walks_Utilization_SMT"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -364,7 +399,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
@@ -415,6 +451,12 @@
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "cbox_0@event\\=0x0@ / #num_dies / duration_time / 1000000000",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
index 0f4de912d099..4b77181b2c53 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -806,4 +806,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
index fdf5dc40b835..46cf18490140 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
@@ -190,4 +190,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
index f0bcb945ff76..37ce8034b2ed 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
@@ -292,4 +292,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
index 604059e7eb58..a3a5cc6dab42 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
@@ -429,4 +429,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/other.json b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
index 4b360fe96698..917d145d5227 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
@@ -41,4 +41,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
index 7580b8af0d13..85654037b768 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -1378,4 +1378,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json
index 58ed6d33d1f4..c4d154944ab6 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json
@@ -1,316 +1,3729 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
+ "BriefDescription": "Bounce Control",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_C_BOUNCE_CONTROL",
+ "PerPkg": "1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
"Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBO"
},
{
- "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
+ "BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "FaST wire asserted",
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_C_FAST_ASSERTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local distress or incoming distress signals are asserted. Incoming distress includes both up and dn.",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Any Request",
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.ANY",
- "Filter": "filter_state=0x1",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x11",
"Unit": "CBO"
},
{
- "BriefDescription": "M line evictions from LLC (writebacks to memory)",
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Read transactions",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Any Read Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Read transactions",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "UMask": "0x9",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Writeback transactions from L2 to the LLC This includes all write transactions -- both Cachable and UC.",
+ "UMask": "0x5",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.I_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in M state",
"Counter": "0,1,2,3",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
"Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_READ",
- "Filter": "filter_opc=0x182",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; DRd hitting non-M with raw CV=0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Clean Victim with raw CV=0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Number of times that an RFO hit in S state. This is useful for determining if it might be good for a workload to use RspIWB instead of RspSWB.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE0",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 0",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE1",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE2",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 2",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE3",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 3",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.LRU_DECREMENT",
+ "PerPkg": "1",
+ "PublicDescription": "How often all LRU bits were decremented by 1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
+ "PerPkg": "1",
+ "PublicDescription": "How often we picked a victim that had a non-zero age",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
+ "BriefDescription": "AD Ring In Use; Down and Even",
"Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "filter_opc=0x187",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.CW",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "MMIO reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "BriefDescription": "AK Ring In Use; Down and Even",
"Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_READ",
- "Filter": "filter_opc=0x187,filter_nc=1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.CW",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "MMIO writes. Derived from unc_c_tor_inserts.miss_opcode",
+ "BriefDescription": "BL Ring in Use; Down and Even",
"Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_WRITE",
- "Filter": "filter_opc=0x18f,filter_nc=1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_ODD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters for Down polarity",
+ "UMask": "0xCC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
+ "BriefDescription": "AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IPQ is externally startved and therefore we are blocking the IRQ.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IRQ is externally starved and therefore we are blocking the IPQ.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; Number of times that the ISMQ Bid.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IPQ in Internal Starvation.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IRQ in Internal Starvation.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the ISMQ in Internal Starvation.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from an address conflicts. Address conflicts out of the IPQ should be rare. They will generally only occur if two different sockets are sending requests to the same address at the same time. This is a true conflict case, unlike the IPQ Address Conflict which is commonly caused by prefetching characteristics.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject. TOR rejects from the IPQ can be caused by the Egress being full or Address Conflicts.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from the Egress being full. IPQ requests make use of the AD Egress for regular responses, the BL egress to forward data, and the AK egress to return credits.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Target Node Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request from the IPQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because of an address match in the TOR. In order to maintain coherency, requests to the same address are not allowed to pass each other up in the Cbo. Therefore, if there is an outstanding request to a given address, one cannot issue another request to that address until it is complete. This comes up most commonly with prefetches. Outstanding prefetches occasionally will not complete their memory fetch and a demand request to the same address will then sit in the IRQ and get retried until the prefetch fills the data into the LLC. Therefore, it will not be uncommon to see this case in high bandwidth streaming workloads when the LLC Prefetcher in the core is enabled.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of IRQ retries that occur. Requests from the IRQ are retried if they are rejected from the TOR pipeline for a variety of reasons. Some of the most common reasons include if the Egress is full, there are no RTIDs, or there is a Physical Address match to another outstanding request.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because it failed to acquire an entry in the Egress. The egress is the buffer that queues up for allocating onto the ring. IRQ requests can make use of all four rings and all four Egresses. If any of the queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of requests rejects because of lack of QPI Ingress credits. These credits are required in order to send transactions to the QPI agent. Please see the QPI_IGR_CREDITS events for more information.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that requests from the IRQ were retried because there were no RTIDs available. RTIDs are required after a request misses the LLC and needs to send snoops and/or requests to memory. If there are no RTIDs available, requests will queue up in the IRQ and retry until one becomes available. Note that there are multiple RTID pools for the different sockets. There may be cases where the local RTIDs are all used, but requests destined for remote memory can still acquire an RTID because there are remote RTIDs available. This event does not provide any filtering for this case.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an BL packet to the Sbo.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the total number of times that a request from the ISMQ retried because of a TOR reject. ISMQ requests generally will not need to retry (or at least ISMQ retries are less common than IRQ retries). ISMQ requests will retry if they are not able to acquire a needed Egress credit to get onto the ring, or for cache evictions that need to acquire an RTID. Most ISMQ requests already have an RTID, so eviction retries will be less common here.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by a lack of Egress credits. The egress is the buffer that queues up for allocating onto the ring. If any of the Egress queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by no RTIDs. M-state cache evictions are serviced through the ISMQ, and must acquire an RTID in order to write back to memory. If no RTIDs are available, they will be retried.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x80",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried because of it lacked credits to send an BL packet to the Sbo.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; PRQ Rejects",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For AD Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For BL Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
- "Filter": "filter_opc=0x190",
+ "EventName": "UNC_C_TOR_INSERTS.ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "BriefDescription": "TOR Inserts; Evictions",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x191",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Eviction transactions inserted into the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "BriefDescription": "TOR Inserts; Local Memory",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
- "Filter": "filter_opc=0x192",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x28",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
+ "BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisifed by an opcode, inserted into the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x21",
"Unit": "CBO"
},
{
- "BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
+ "BriefDescription": "TOR Inserts; Misses to Local Memory",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x2A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisifed by an opcode, inserted into the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x23",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
+ "BriefDescription": "TOR Inserts; Miss Opcode Match",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
- "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
- "Filter": "filter_opc=0x180,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
"PerPkg": "1",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x8A",
"Unit": "CBO"
},
{
- "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x181",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisifed by an opcode, inserted into the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x83",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "filter_opc=0x18c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched (matches an RTID destination) transactions inserted into the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "filter_opc=0x18d",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched eviction transactions inserted into the TOR.",
+ "UMask": "0x44",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched miss requests that were inserted into the TOR.",
+ "UMask": "0x4A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x41",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched write transactions inserted into the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Opcode Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"UMask": "0x1",
"Unit": "CBO"
},
{
- "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
+ "BriefDescription": "TOR Inserts; Remote Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x88",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisifed by an opcode, inserted into the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Writebacks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Write transactions inserted into the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); All valid TOR entries. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding eviction transactions in the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x28",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisifed by an opcode, in the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss All",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding miss requests in the TOR. 'Miss' means the allocation requires an RTID. This generally means that the request was sent to memory or MMIO.",
+ "UMask": "0xA",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x2A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
"EventCode": "0x36",
- "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
- "Filter": "filter_opc=0x182",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisifed by an opcode, in the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x23",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries for miss transactions that match an opcode. This generally means that the request was sent to memory or MMIO.",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "read requests to home agent",
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x8A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisifed by an opcode, in the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x83",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of NID matched outstanding requests in the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid.In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding NID matched eviction transactions in the TOR .",
+ "UMask": "0x44",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID.",
+ "UMask": "0x4A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match a NID and an opcode.",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); NID matched write transactions int the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc).",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x88",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisifed by an opcode, in the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Write transactions in the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AK ring. This is commonly used for snoop responses coming from the core and destined for a Cachebo.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the BL ring. This is commonly used for transfering writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the core AD egress spent in starvation",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK_BOTH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that both AK egresses spent in starvation",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.BL_BOTH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that both BL egresses spent in starvation",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the cachebo IV egress spent in starvation",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming snoop hazard",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that could not take the bypass.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the bypass.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Direct2Core messages sent",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles in which Direct2Core was disabled",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Reads where Direct2Core overridden",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lat Opt Return",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_H_DIRECTORY_LAT_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "Directory Latency Optimization Data Return Path Taken. When directory mode is enabled and the directory retuned for a read is Dir=I, then data can be returned using a faster path if certain conditions are met (credits, free pipeline, etc).",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that did not have to send any snoops because the directory bit was clear.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that had to send one or more snoops because the directory bit was set.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory clears. This occurs when snoops were sent and all returned with RspI.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory sets. This occurs when a remote read transaction requests memory, bringing it to a remote cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; HOM Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.HOM",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Invalidations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; HOM Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.HOM",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; HOM Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.HOM",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Invalidations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.HUB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.SAT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS0",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS1",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS2",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS3",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS0",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS1",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Cancelled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.CANCELLED",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.; OSB Snoop broadcast cancelled due to D2C or Other. OSB cancel is counted when OSB local read is not allowed even when the transaction in local InItoE. It also counts D2C OSB cancel, but also includes the cases were D2C was not set in the first place for the transaction coming from the ring.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Reads Local - Useful",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL_USEFUL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote - Useful",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE_USEFUL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Local InvItoEs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from the local socket.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from remote sockets.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming ead requests. This is a good proxy for LLC Read Misses (including RFOs).",
"UMask": "0x3",
"Unit": "HA"
},
{
- "BriefDescription": "read requests to local home agent",
+ "BriefDescription": "Read and Write Requests; Local Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the local socket. This is a good proxy for LLC Read Misses (including RFOs) from the local socket.",
"UMask": "0x1",
"Unit": "HA"
},
{
- "BriefDescription": "read requests to remote home agent",
+ "BriefDescription": "Read and Write Requests; Remote Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the remote socket. This is a good proxy for LLC Read Misses (including RFOs) from the remote socket.",
"UMask": "0x2",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to home agent",
+ "BriefDescription": "Read and Write Requests; Writes",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming write requests.",
"UMask": "0xC",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to local home agent",
+ "BriefDescription": "Read and Write Requests; Local Writes",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from the local socket.",
"UMask": "0x4",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to remote home agent",
+ "BriefDescription": "Read and Write Requests; Remote Writes",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from remote sockets.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
"UMask": "0x8",
"Unit": "HA"
},
{
- "BriefDescription": "Conflict requests (requests for same address from multiple agents simultaneously)",
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of reads when the snoop was on the critical path to the data return.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of reads when the snoop was on the critical path to the data return.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; Tracked for snoops from both local and remote sockets.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
"Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
+ "BriefDescription": "Snoop Responses Received; RspI",
"Counter": "0,1,2,3",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "EventName": "UNC_H_SNOOP_RESP.RSPI",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x20",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache with no writeback to memory",
+ "BriefDescription": "Snoop Responses Received; RspIFwd",
"Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"UMask": "0x4",
"Unit": "HA"
},
{
- "BriefDescription": "Shared line response from remote cache",
+ "BriefDescription": "Snoop Responses Received; RspS",
"Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
"Unit": "HA"
},
{
- "BriefDescription": "Shared line forwarded from remote cache",
+ "BriefDescription": "Snoop Responses Received; RspSFwd",
"Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its currentl copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; Rsp*Fwd*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; Rsp*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Other",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for all other snoop responses.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its currentl copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 2",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 3",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 4",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 5",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 6",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 7",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 10",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 11",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 8",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 9",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles Completely Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Counts the number of cycles when the HA tracker pool (HT) is completely used including reserved HT entries. It will not return valid count when BT is disabled.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles GP Completely Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.GP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Counts the number of cycles when the general purpose (GP) HA tracker pool (HT) is completely used. It will not return valid count when BT is disabled.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Requests coming from both local and remote sockets.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Local InvItoE Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Remote InvItoE Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Local Read Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Remote Read Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Local Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Remote Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumultor; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumultor; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_H_TxR_AD.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.; Filter for outbound NDR transactions sent on the AD ring. NDR stands for non-data response and is generally used for completions that do not include data. AD NDR is used for transactions to remote sockets.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to the cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent directly to the requesting core.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to a remote socket over QPI.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
"UMask": "0x8",
"Unit": "HA"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json
index f4b0745cdbbf..83ff0542dbc0 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json
@@ -1,86 +1,2915 @@
[
{
- "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM CAS commands issued on this channel.",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_READ",
+ "EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Read CAS commands issued on this channel (including underfills).",
"UMask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. AutoPre is only used in systems that are using closed page policy. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the number of underfill reads that are issued by the memory controller. This will generally be about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ. While it is possible for underfills to be issed in both WMM and RMM, this event counts both.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_WRITE",
+ "EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Write CAS commands issued on this channel.",
"UMask": "0xC",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of Opportunistic DRAM Write CAS commands issued on this channel while in Read-Major-Mode.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Clockticks",
"Counter": "0,1,2,3",
"EventName": "UNC_M_DCLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
+ "BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; We group these two modes together so that we can use four counters to track each of the major modes at one time. These major modes are used whenever there is an ISOCH txn in the memory controller. In these mode, only ISOCH transactions are processed.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This major mode is used to drain starved underfill reads. Regular reads and writes are blocked and only underfill reads will be processed.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; Read Major Mode is the default mode for the iMC, as reads are generally more critical to forward progress than writes.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This mode is triggered when the WPQ hits high occupancy and causes writes to be higher priority than reads. This can cause blips in the available read bandwidth in the system and temporarily increase read latencies in order to achieve better bus utilizations and higher bandwidth.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_DCLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_DCLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_DCLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.; Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts another read.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts a write.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of the page close counter expiring. This does not include implicit precharge commands sent in auto-precharge mode.",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charges due to page misses",
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of page misses. This does not include explicit precharge commands sent with CAS commands in Auto-Precharge mode. This does not include PRE commands sent as a result of the page close counter expiration.",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for reads",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to read",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for writes",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to write",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE MXB write buffer occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.RMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.WMM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-other.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-other.json
new file mode 100644
index 000000000000..fc7e0867fcc5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-other.json
@@ -0,0 +1,1233 @@
+[
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of clocks in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CLFlush",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CRd",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; DRd",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.DRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIRdCur",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIItoM",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; RFO",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; WbMtoI",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch TimeOut",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_TIMEOUT",
+ "PerPkg": "1",
+ "PublicDescription": "Indicates the fetch for a previous prefetch wasn't accepted by the prefetch. This happens in the case of a prefetch TimeOut",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Data Throttled",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.DATA_THROTTLE",
+ "PerPkg": "1",
+ "PublicDescription": "IRP throttled switch data",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Valid",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit E or S",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit I",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit M",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Miss",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpCode",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpData",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpInv",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Atomic",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of atomic transactions",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Other",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of 'other' kinds of transactions.",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Write Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of write prefetches.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xF",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Dn",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xF",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xF",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0xF",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK CounterClockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AD",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_BL",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "Counter": "0,1",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number outstanding register requests within message channel tracker",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; PREQ, PSMI, P2U, Thermal, PCUSMI, PMI",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json
index dd1b95655d1d..c3325dd61202 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json
@@ -1,91 +1,511 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
+ "BriefDescription": "pclk Cycles",
"Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "EventCode": "0x6A",
+ "EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "EventCode": "0x6B",
+ "EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xA",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "EventCode": "0x6C",
+ "EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6D",
+ "EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6E",
+ "EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6F",
+ "EventName": "UNC_P_CORE15_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_P_CORE16_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_P_CORE17_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x62",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x64",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x65",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x66",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x67",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x68",
+ "EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_P_DEMOTIONS_CORE10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3B",
+ "EventName": "UNC_P_DEMOTIONS_CORE11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_P_DEMOTIONS_CORE12",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_P_DEMOTIONS_CORE13",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_P_DEMOTIONS_CORE14",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_P_DEMOTIONS_CORE15",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_P_DEMOTIONS_CORE16",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_P_DEMOTIONS_CORE17",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_P_DEMOTIONS_CORE8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_P_DEMOTIONS_CORE9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the OS is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C1E",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C1E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C1E. This event can be used in conjunction with edge detect to count C1E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C7 State Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C7_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C7. This event can be used in conjunction with edge detect to count C7 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "PerPkg": "1",
+ "PublicDescription": "Ring GV with same final and initial frequency",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
index 818a8b132c08..6a6de8790f25 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
@@ -385,4 +385,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
index b055947c0afe..a3a15ee52841 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
@@ -75,12 +75,6 @@
"MetricName": "UpTB"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
- "MetricGroup": "Pipeline;Mem",
- "MetricName": "CPI"
- },
- {
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
@@ -130,44 +124,26 @@
"MetricName": "FLOPc_SMT"
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.THREAD )",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "FP_Arith_Utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) ) / ( 2 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) )",
"MetricGroup": "Cor;Flops;HPC_SMT",
"MetricName": "FP_Arith_Utilization_SMT",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common). SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
- },
- {
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
- },
- {
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
- },
- {
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"MetricGroup": "SMT",
@@ -257,41 +233,52 @@
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fed;FetchBW",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
- "MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Mem;MemoryBound;MemoryBW",
- "MetricName": "MLP"
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (BR_MISP_RETIRED.ALL_BRANCHES * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / CPU_CLK_UNHALTED.THREAD) / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY )) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBound;MemoryBW",
+ "MetricName": "MLP"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
@@ -306,13 +293,13 @@
"MetricName": "L2MPKI"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses;Offcore",
"MetricName": "L2MPKI_All"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all demand loads (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "L2MPKI_Load"
@@ -349,6 +336,48 @@
"MetricName": "Page_Walks_Utilization_SMT"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -364,7 +393,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
@@ -415,6 +445,12 @@
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "cbox_0@event\\=0x0@ / #num_dies / duration_time / 1000000000",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
@@ -461,5 +497,439 @@
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "CPU operating frequency (in GHz)",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ ) / 1000000000",
+ "MetricGroup": "",
+ "MetricName": "cpu_operating_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Cycles per instruction retired; indicating how much time each executed instruction took; in units of cycles.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "cpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory load instructions to the total number completed instructions",
+ "MetricExpr": "MEM_UOPS_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "loads_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory store instructions to the total number completed instructions",
+ "MetricExpr": "MEM_UOPS_RETIRED.ALL_STORES / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "stores_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L1 data cache (includes data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L1D.REPLACEMENT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1d_mpi_includes_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of demand load requests hitting in L1 data cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L1_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1d_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing in L1 instruction cache (includes prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.ALL_CODE_RD / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed demand load requests hitting in L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L2_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L2 cache (includes code+data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_LINES_IN.ALL / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_mpi_includes_code_plus_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed data read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_code_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "( cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ + cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x192@ ) / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "( cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x181@ + cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x191@ ) / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "llc_code_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand and prefetch data read miss (read memory access) in nano seconds",
+ "MetricExpr": "( 1000000000 * ( cbox@UNC_C_TOR_OCCUPANCY.MISS_OPCODE\\,filter_opc\\=0x182@ / cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ ) / ( UNC_C_CLOCKTICKS / ( source_count(UNC_C_CLOCKTICKS) * #num_packages ) ) ) * duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_demand_plus_prefetch_miss_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand and prefetch data read miss (read memory access) addressed to local memory in nano seconds",
+ "MetricExpr": "( 1000000000 * ( cbox@UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE\\,filter_opc\\=0x182@ / cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ ) / ( UNC_C_CLOCKTICKS / ( source_count(UNC_C_CLOCKTICKS) * #num_packages ) ) ) * duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_demand_plus_prefetch_miss_latency_for_local_requests",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand and prefetch data read miss (read memory access) addressed to remote memory in nano seconds",
+ "MetricExpr": "( 1000000000 * ( cbox@UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE\\,filter_opc\\=0x182@ / cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ ) / ( UNC_C_CLOCKTICKS / ( source_count(UNC_C_CLOCKTICKS) * #num_packages ) ) ) * duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_demand_plus_prefetch_miss_latency_for_remote_requests",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB.",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "itlb_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the Instruction Translation Lookaside Buffer (ITLB) and further levels of TLB.",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "itlb_large_page_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_load_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_store_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Memory read that miss the last level cache (LLC) addressed to local DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ / ( cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ + cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_local_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Memory reads that miss the last level cache (LLC) addressed to remote DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ / ( cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ + cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_remote_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uncore operating frequency in GHz",
+ "MetricExpr": "UNC_C_CLOCKTICKS / ( source_count(UNC_C_CLOCKTICKS) * #num_packages ) / 1000000000",
+ "MetricGroup": "",
+ "MetricName": "uncore_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Intel(R) Quick Path Interconnect (QPI) data transmit bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_Q_TxL_FLITS_G0.DATA * 8 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "qpi_data_transmit_bw_only_data",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory read bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.RD * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory write bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.WR * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory bandwidth (MB/sec)",
+ "MetricExpr": "(( UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "( cbox@UNC_C_TOR_INSERTS.OPCODE\\,filter_opc\\=0x19e@ * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "(( cbox@UNC_C_TOR_INSERTS.OPCODE\\,filter_opc\\=0x1c8\\,filter_tid\\=0x3e@ + cbox@UNC_C_TOR_INSERTS.OPCODE\\,filter_opc\\=0x180\\,filter_tid\\=0x3e@ ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Uops delivered from decoded instruction cache (decoded stream buffer or DSB) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.DSB_UOPS / UOPS_ISSUED.ANY )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_decoded_icache_dsb",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from legacy decode pipeline (Micro-instruction Translation Engine or MITE) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MITE_UOPS / UOPS_ISSUED.ANY )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_legacy_decode_pipeline_mite",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from microcode sequencer (MS) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MS_UOPS / UOPS_ISSUED.ANY )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_microcode_sequencer_ms",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from loop stream detector(LSD) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( LSD.UOPS / UOPS_ISSUED.ANY )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_loop_stream_detector_lsd",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Machine_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "MetricExpr": "100 * ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1;PGO",
+ "MetricName": "tma_frontend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.",
+ "MetricExpr": "100 * ( ( 4 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "Frontend;TmaL2;m_tma_frontend_bound_percent",
+ "MetricName": "tma_fetch_latency_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "100 * ( ICACHE.IFDATA_STALL / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_icache_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.",
+ "MetricExpr": "100 * ( ( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=0x1@ + 7 * ITLB_MISSES.WALK_COMPLETED ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_itlb_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.",
+ "MetricExpr": "100 * ( ( 12 ) * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_branch_resteers_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.",
+ "MetricExpr": "100 * ( DSB2MITE_SWITCHES.PENALTY_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "DSBmiss;FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_dsb_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs.",
+ "MetricExpr": "100 * ( ILD_STALL.LCP / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_lcp_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.",
+ "MetricExpr": "100 * ( ( 2 ) * IDQ.MS_SWITCHES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;MicroSeq;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_ms_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.",
+ "MetricExpr": "100 * ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( 4 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;m_tma_frontend_bound_percent",
+ "MetricName": "tma_fetch_bandwidth_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "MetricExpr": "100 * ( ( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) / 2 )",
+ "MetricGroup": "DSBmiss;FetchBW;TmaL3;m_tma_fetch_bandwidth_percent",
+ "MetricName": "tma_mite_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "MetricExpr": "100 * ( ( IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) / 2 )",
+ "MetricGroup": "DSB;FetchBW;TmaL3;m_tma_fetch_bandwidth_percent",
+ "MetricName": "tma_dsb_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "MetricExpr": "100 * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_bad_speculation_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.",
+ "MetricExpr": "100 * ( ( BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT ) ) * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;m_tma_bad_speculation_percent",
+ "MetricName": "tma_branch_mispredicts_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.",
+ "MetricExpr": "100 * ( ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT ) ) * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) )",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;m_tma_bad_speculation_percent",
+ "MetricName": "tma_machine_clears_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "MetricExpr": "100 * ( 1 - ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_backend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "MetricExpr": "100 * ( ( ( CYCLE_ACTIVITY.STALLS_MEM_ANY + RESOURCE_STALLS.SB ) / ( ( CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - ( UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if ( ( INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) > 1.8 ) else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC ) - ( RS_EVENTS.EMPTY_CYCLES if ( ( ( 4 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) > 0.1 ) else 0 ) + RESOURCE_STALLS.SB ) ) ) * ( 1 - ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) ) )",
+ "MetricGroup": "Backend;TmaL2;m_tma_backend_bound_percent",
+ "MetricName": "tma_memory_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.",
+ "MetricExpr": "100 * ( max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) , 0 ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l1_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l2_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( MEM_LOAD_UOPS_RETIRED.L3_HIT / ( MEM_LOAD_UOPS_RETIRED.L3_HIT + ( 7 ) * MEM_LOAD_UOPS_RETIRED.L3_MISS ) ) * CYCLE_ACTIVITY.STALLS_L2_MISS / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l3_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( min( ( ( 1 - ( MEM_LOAD_UOPS_RETIRED.L3_HIT / ( MEM_LOAD_UOPS_RETIRED.L3_HIT + ( 7 ) * MEM_LOAD_UOPS_RETIRED.L3_MISS ) ) ) * CYCLE_ACTIVITY.STALLS_L2_MISS / ( CPU_CLK_UNHALTED.THREAD ) ) , ( 1 ) ) )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_dram_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.",
+ "MetricExpr": "100 * ( RESOURCE_STALLS.SB / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_store_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "MetricExpr": "100 * ( ( 1 - ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) ) - ( ( ( CYCLE_ACTIVITY.STALLS_MEM_ANY + RESOURCE_STALLS.SB ) / ( ( CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - ( UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if ( ( INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) > 1.8 ) else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC ) - ( RS_EVENTS.EMPTY_CYCLES if ( ( ( 4 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) > 0.1 ) else 0 ) + RESOURCE_STALLS.SB ) ) ) * ( 1 - ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) ) ) )",
+ "MetricGroup": "Backend;TmaL2;Compute;m_tma_backend_bound_percent",
+ "MetricName": "tma_core_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.",
+ "MetricExpr": "100 * ( ARITH.FPU_DIV_ACTIVE / ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) )",
+ "MetricGroup": "TmaL3;m_tma_core_bound_percent",
+ "MetricName": "tma_divider_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "MetricExpr": "100 * ( ( ( ( CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - ( UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if ( ( INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) > 1.8 ) else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC ) - ( RS_EVENTS.EMPTY_CYCLES if ( ( ( 4 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) > 0.1 ) else 0 ) + RESOURCE_STALLS.SB ) ) - RESOURCE_STALLS.SB - CYCLE_ACTIVITY.STALLS_MEM_ANY ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "PortsUtil;TmaL3;m_tma_core_bound_percent",
+ "MetricName": "tma_ports_utilization_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. ",
+ "MetricExpr": "100 * ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_retiring_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved.",
+ "MetricExpr": "100 * ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) )",
+ "MetricGroup": "Retire;TmaL2;m_tma_retiring_percent",
+ "MetricName": "tma_light_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "MetricExpr": "100 * ( ( INST_RETIRED.X87 * ( ( UOPS_RETIRED.RETIRE_SLOTS ) / INST_RETIRED.ANY ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) + ( ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) + ( min( ( ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) , ( 1 ) ) ) )",
+ "MetricGroup": "HPC;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_fp_arith_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or microcoded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "Retire;TmaL2;m_tma_retiring_percent",
+ "MetricName": "tma_heavy_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided.",
+ "MetricExpr": "100 * ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "MicroSeq;TmaL3;m_tma_heavy_operations_percent",
+ "MetricName": "tma_microcode_sequencer_percent",
+ "ScaleUnit": "1%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
index 127abe08362f..2efc4c0ee740 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -814,9 +814,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0244",
+ "MSRValue": "0x4003C0244",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -829,7 +828,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -840,9 +838,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0091",
+ "MSRValue": "0x4003C0091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -855,7 +852,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C07F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -866,9 +862,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C07F7",
+ "MSRValue": "0x4003C07F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -881,7 +876,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C8FFF",
"Offcore": "1",
- "PublicDescription": "Counts all requests hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -894,7 +888,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -905,9 +898,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0122",
+ "MSRValue": "0x4003C0122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -920,7 +912,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -933,7 +924,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -946,7 +936,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0200",
"Offcore": "1",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -959,7 +948,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0100",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -973,4 +961,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
index 9ad37dddb354..93bbc8600321 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
@@ -5,6 +5,7 @@
"CounterHTOff": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
"UMask": "0x4"
},
@@ -14,6 +15,7 @@
"CounterHTOff": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
"UMask": "0x8"
},
@@ -23,6 +25,7 @@
"CounterHTOff": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
@@ -32,6 +35,7 @@
"CounterHTOff": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
"UMask": "0x20"
},
@@ -59,6 +63,7 @@
"CounterHTOff": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
"UMask": "0x3"
},
@@ -68,6 +73,7 @@
"CounterHTOff": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -77,6 +83,7 @@
"CounterHTOff": "0,1,2,3",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
@@ -190,4 +197,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
index f0bcb945ff76..37ce8034b2ed 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
@@ -292,4 +292,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
index cce993b197e3..545f61f691b9 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
@@ -247,7 +247,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00244",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -258,9 +257,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000244",
+ "MSRValue": "0x604000244",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -273,7 +271,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -284,9 +281,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000091",
+ "MSRValue": "0x604000091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -297,9 +293,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063BC00091",
+ "MSRValue": "0x63BC00091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -312,7 +307,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -323,9 +317,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x087FC00091",
+ "MSRValue": "0x87FC00091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -338,20 +331,18 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC007F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch)miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x06040007F7",
+ "MSRValue": "0x6040007F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch)miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -362,9 +353,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063BC007F7",
+ "MSRValue": "0x63BC007F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -377,7 +367,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC007F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -388,9 +377,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x087FC007F7",
+ "MSRValue": "0x87FC007F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -403,7 +391,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC08FFF",
"Offcore": "1",
- "PublicDescription": "Counts all requests miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -416,7 +403,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -427,9 +413,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000122",
+ "MSRValue": "0x604000122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -442,7 +427,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -455,7 +439,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -468,7 +451,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00200",
"Offcore": "1",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -481,7 +463,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00100",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -684,4 +665,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/other.json b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
index 4b360fe96698..917d145d5227 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
@@ -41,4 +41,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
index 18d21b94a4b9..f0f30081d683 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -1369,7 +1369,7 @@
"BriefDescription": "Cycles with less than 10 actually retired uops.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
- "CounterMask": "10",
+ "CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"Invert": "1",
@@ -1377,4 +1377,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json
index 58ed6d33d1f4..abee6f773c1f 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json
@@ -1,12 +1,63 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
+ "BriefDescription": "Bounce Control",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_C_BOUNCE_CONTROL",
+ "PerPkg": "1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
"Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBO"
},
{
+ "BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "FaST wire asserted",
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_C_FAST_ASSERTED",
+ "PerPkg": "1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "CBO"
+ },
+ {
"BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
"Counter": "0,1,2,3",
"EventCode": "0x34",
@@ -18,6 +69,24 @@
"Unit": "CBO"
},
{
+ "BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Any Read Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
"BriefDescription": "M line evictions from LLC (writebacks to memory)",
"Counter": "0,1,2,3",
"EventCode": "0x37",
@@ -28,6 +97,856 @@
"Unit": "CBO"
},
{
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.I_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Clean Victim with raw CV=0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; DRd hitting non-M with raw CV=0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xCC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Target Node Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.NID",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.NID",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; PRQ Rejects",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For AD Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For BL Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Opcode Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
+ "Filter": "filter_opc=0x180,filter_tid=0x3e",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
+ "Filter": "filter_opc=0x181",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_FULL",
+ "Filter": "filter_opc=0x18c",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+ "Filter": "filter_opc=0x18d",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.PCIE_READ",
+ "Filter": "filter_opc=0x19e",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.PCIE_WRITE",
+ "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Evictions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Writebacks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
"BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -39,6 +958,17 @@
"Unit": "CBO"
},
{
+ "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
+ "Filter": "filter_opc=0x182",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
"BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -138,71 +1068,156 @@
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
- "Filter": "filter_opc=0x180,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
- "UMask": "0x1",
+ "UMask": "0x41",
"Unit": "CBO"
},
{
- "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x181",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "UMask": "0x44",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "filter_opc=0x18c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "UMask": "0x48",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "filter_opc=0x18d",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "UMask": "0x50",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "UMask": "0x43",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
+ "PerPkg": "1",
+ "UMask": "0x4A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Local Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x2A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Local Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x23",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x83",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
"UMask": "0x1",
"Unit": "CBO"
},
{
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
"BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
@@ -212,7 +1227,1090 @@
"Unit": "CBO"
},
{
- "BriefDescription": "read requests to home agent",
+ "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch)",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
+ "Filter": "filter_opc=0x182",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss All",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x43",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "UMask": "0x4A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x2A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x23",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x83",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.LRU_DECREMENT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK_BOTH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.BL_BOTH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AD_CORE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; Address & Opcode Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.FILT",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; Address",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.ADDR",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.OPC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; AD Opcodes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.AD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; BL Opcodes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.BL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; AK Opcodes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lat Opt Return",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_H_DIRECTORY_LAT_OPT",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Invalidations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; HOM Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.HOM",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; HOM Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.HOM",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Invalidations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; HOM Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.HOM",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI2",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Cancelled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.CANCELLED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Reads Local - Useful",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL_USEFUL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote - Useful",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE_USEFUL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
@@ -221,7 +2319,16 @@
"Unit": "HA"
},
{
- "BriefDescription": "read requests to local home agent",
+ "BriefDescription": "Read and Write Requests; Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Local Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
@@ -230,7 +2337,7 @@
"Unit": "HA"
},
{
- "BriefDescription": "read requests to remote home agent",
+ "BriefDescription": "Read and Write Requests; Remote Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
@@ -239,49 +2346,391 @@
"Unit": "HA"
},
{
- "BriefDescription": "write requests to home agent",
+ "BriefDescription": "Read and Write Requests; Local Writes",
"Counter": "0,1,2,3",
"EventCode": "0x1",
- "EventName": "UNC_H_REQUESTS.WRITES",
+ "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
- "UMask": "0xC",
+ "UMask": "0x4",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to local home agent",
+ "BriefDescription": "Read and Write Requests; Remote Writes",
"Counter": "0,1,2,3",
"EventCode": "0x1",
- "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
+ "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
- "UMask": "0x4",
+ "UMask": "0x8",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to remote home agent",
+ "BriefDescription": "Read and Write Requests; Local InvItoEs",
"Counter": "0,1,2,3",
"EventCode": "0x1",
- "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "HA"
},
{
- "BriefDescription": "Conflict requests (requests for same address from multiple agents simultaneously)",
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RspI",
"Counter": "0,1,2,3",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+ "EventName": "UNC_H_SNOOP_RESP.RSPI",
"PerPkg": "1",
- "UMask": "0x40",
+ "UMask": "0x1",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
+ "BriefDescription": "Shared line response from remote cache",
"Counter": "0,1,2,3",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
"ScaleUnit": "64Bytes",
- "UMask": "0x20",
+ "UMask": "0x2",
"Unit": "HA"
},
{
@@ -295,23 +2744,905 @@
"Unit": "HA"
},
{
- "BriefDescription": "Shared line response from remote cache",
+ "BriefDescription": "Shared line forwarded from remote cache",
"Counter": "0,1,2,3",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSPS",
+ "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
"ScaleUnit": "64Bytes",
- "UMask": "0x2",
+ "UMask": "0x8",
"Unit": "HA"
},
{
- "BriefDescription": "Shared line forwarded from remote cache",
+ "BriefDescription": "Snoop Responses Received; Rsp*WB",
"Counter": "0,1,2,3",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
"PerPkg": "1",
"ScaleUnit": "64Bytes",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Other",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
"UMask": "0x8",
"Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles GP Completely Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.GP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles Completely Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Local Read Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Remote Read Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Local Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Remote Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Local InvItoE Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Remote InvItoE Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumultor; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumultor; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.SAT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.HUB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS3",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_H_TxR_AD.HOM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
index 824961318c1e..071ce45620d2 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
@@ -1,6 +1,6 @@
[
{
- "BriefDescription": "QPI clock ticks",
+ "BriefDescription": "Number of qfclks",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
@@ -8,6 +8,683 @@
"Unit": "QPI LL"
},
{
+ "BriefDescription": "Count of CTO Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_CTO_COUNT",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.SUCCESS_RBT_HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_HIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_MISS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss and Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_MISS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss, Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT_MISS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_BYPASSED",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_Q_RxL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_Q_TxL_BYPASSED",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_Q_TxL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
"BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
"Counter": "0,1,2,3",
"EventName": "QPI_DATA_BANDWIDTH_TX",
@@ -17,6 +694,15 @@
"Unit": "QPI LL"
},
{
+ "BriefDescription": "Number of data flits transmitted ",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G0.DATA",
+ "PerPkg": "1",
+ "ScaleUnit": "8Bytes",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
"BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
"Counter": "0,1,2,3",
"EventName": "QPI_CTL_BANDWIDTH_TX",
@@ -24,5 +710,745 @@
"ScaleUnit": "8Bytes",
"UMask": "0x4",
"Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Number of non data (control) flits transmitted ",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
+ "PerPkg": "1",
+ "ScaleUnit": "8Bytes",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_Q_TxL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_Q_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for Shared VN",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for Shared VN",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Returned",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURNS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; LinkInit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.EGRESS_CREDITS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; GV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.GV",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json
index 66eed399724c..302e956a82ed 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json
@@ -1,5 +1,77 @@
[
{
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -10,6 +82,34 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "read requests to memory controller",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -20,44 +120,323 @@
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks",
+ "BriefDescription": "write requests to memory controller",
"Counter": "0,1,2,3",
- "EventName": "UNC_M_CLOCKTICKS",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_CLOCKTICKS_P",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_M_DCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
"PerPkg": "1",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
+ "BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_M_POWER_PCU_THROTTLING",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charges due to page misses",
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
@@ -66,7 +445,16 @@
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for reads",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to read",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
@@ -75,12 +463,2445 @@
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for writes",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to write",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE MXB write buffer occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.WMM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.RMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clockticks in the Memory Controller using one of the programmable counters",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_M_CLOCKTICKS_P",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clockticks in the Memory Controller using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-other.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-other.json
new file mode 100644
index 000000000000..289a726c9ac3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-other.json
@@ -0,0 +1,3252 @@
+[
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIRdCur",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CRd",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; DRd",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.DRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; RFO",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIItoM",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; WbMtoI",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CLFlush",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch TimeOut",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_TIMEOUT",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Valid",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Data Throttled",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.DATA_THROTTLE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "Counter": "0,1",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "Counter": "0,1",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "Counter": "0,1",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Miss",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit I",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit E or S",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit M",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpCode",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpData",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpInv",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Write Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Atomic",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Other",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.ORDERINGQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.UP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Dn",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.DN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AK",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_BL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AK",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
+ "EventCode": "0x1",
+ "EventName": "UNC_R3_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO9",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO10",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO11",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO12",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO13",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO14_16",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO_15_17",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0xA",
+ "EventName": "UNC_R3_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Any",
+ "Counter": "0,1,2",
+ "EventCode": "0xA",
+ "EventName": "UNC_R3_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ring Stop Starved; AK",
+ "Counter": "0,1,2",
+ "EventCode": "0xE",
+ "EventName": "UNC_R3_RING_SINK_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; HOM",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; SNP",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NDR",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NCB",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NCS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_AK",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_AD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_BL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_AK",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
+ "EventCode": "0xB",
+ "EventName": "UNC_R3_IOT_BACKPRESSURE.SAT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
+ "EventCode": "0xB",
+ "EventName": "UNC_R3_IOT_BACKPRESSURE.HUB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
+ "EventCode": "0xD",
+ "EventName": "UNC_R3_IOT_CTS_HI.CTS2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
+ "EventCode": "0xD",
+ "EventName": "UNC_R3_IOT_CTS_HI.CTS3",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0xC",
+ "EventName": "UNC_R3_IOT_CTS_LO.CTS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0xC",
+ "EventName": "UNC_R3_IOT_CTS_LO.CTS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2B",
+ "EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2B",
+ "EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; All",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; All",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; All",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Bounce Control",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_S_BOUNCE_CONTROL",
+ "PerPkg": "1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_S_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "FaST wire asserted",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_S_FAST_ASSERTED",
+ "PerPkg": "1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Event",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Event",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Event",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_S_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_S_RING_IV_USED.DN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.AD_CACHE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.AK_CORE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.BL_CORE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.IV_CORE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; IVF Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "Counter": "0,1",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json
index dd1b95655d1d..3ffb70ff573d 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json
@@ -1,91 +1,456 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
+ "BriefDescription": "pclk Cycles",
"Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "EventCode": "0x6A",
+ "EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "EventCode": "0x6B",
+ "EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xA",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "EventCode": "0x6C",
+ "EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6D",
+ "EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6E",
+ "EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6F",
+ "EventName": "UNC_P_CORE15_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_P_CORE16_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_P_CORE17_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x62",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x64",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x65",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x66",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x67",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x68",
+ "EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_P_DEMOTIONS_CORE10",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3B",
+ "EventName": "UNC_P_DEMOTIONS_CORE11",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_P_DEMOTIONS_CORE12",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_P_DEMOTIONS_CORE13",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_P_DEMOTIONS_CORE14",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_P_DEMOTIONS_CORE15",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_P_DEMOTIONS_CORE16",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_P_DEMOTIONS_CORE17",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_P_DEMOTIONS_CORE8",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_P_DEMOTIONS_CORE9",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C7 State Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C7_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C1E",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C1E_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
index 818a8b132c08..6a6de8790f25 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
@@ -385,4 +385,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
index fcaa487b8737..716c1b507496 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
@@ -482,7 +482,7 @@
"UMask": "0x4"
},
{
- "BriefDescription": "Retired load instructions with remote Intel Optane DC persistent memory as the data source where the data request missed all caches. Precise event.",
+ "BriefDescription": "Retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches. Precise event.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
@@ -490,7 +490,7 @@
"EventCode": "0xD3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
"PEBS": "1",
- "PublicDescription": "Counts retired load instructions with remote Intel Optane DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
+ "PublicDescription": "Counts retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
"SampleAfterValue": "100007",
"UMask": "0x10"
},
@@ -590,7 +590,7 @@
"UMask": "0x20"
},
{
- "BriefDescription": "Retired load instructions with local Intel Optane DC persistent memory as the data source where the data request missed all caches. Precise event.",
+ "BriefDescription": "Retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches. Precise event.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
@@ -598,7 +598,7 @@
"EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
"PEBS": "1",
- "PublicDescription": "Counts retired load instructions with local Intel Optane DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
+ "PublicDescription": "Counts retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
"SampleAfterValue": "100003",
"UMask": "0x80"
},
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 5a1631448b46..46613504b816 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -95,13 +95,13 @@
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) * ( ( (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) / ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (min( 9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE , max( CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS , 0 ) ) / CPU_CLK_UNHALTED.THREAD) / (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) ) + ( (EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (( 9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE ) / CPU_CLK_UNHALTED.THREAD) / #(EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) ) ) ",
- "MetricGroup": "Mem;MemoryTLB",
+ "MetricGroup": "Mem;MemoryTLB;Offcore",
"MetricName": "Memory_Data_TLBs"
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * ( ( (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) / ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (min( 9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE , max( CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS , 0 ) ) / CPU_CLK_UNHALTED.THREAD) / (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) ) + ( (EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (( 9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / #(EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) ) ) ",
- "MetricGroup": "Mem;MemoryTLB;_SMT",
+ "MetricGroup": "Mem;MemoryTLB;Offcore_SMT",
"MetricName": "Memory_Data_TLBs_SMT"
},
{
@@ -159,12 +159,6 @@
"MetricName": "UpTB"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
- "MetricGroup": "Pipeline;Mem",
- "MetricName": "CPI"
- },
- {
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
@@ -214,42 +208,36 @@
"MetricName": "FLOPc_SMT"
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.THREAD )",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "FP_Arith_Utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) ) / ( 2 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) )",
"MetricGroup": "Cor;Flops;HPC_SMT",
"MetricName": "FP_Arith_Utilization_SMT",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common). SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "( 1 - ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)))) / ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)))) < ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1 ) if 0 > 0.5 else 0",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "Core_Bound_Likely"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
- },
- {
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "( 1 - ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) / ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) < ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1 ) if (1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_UNHALTED.REF_XCLK_ANY / 2 )) > 0.5 else 0",
+ "MetricGroup": "Cor;SMT_SMT",
+ "MetricName": "Core_Bound_Likely_SMT"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
@@ -342,48 +330,84 @@
"PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "IpSWPF"
+ },
+ {
"BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary;TmaL1",
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
"MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "Fetch_UpC"
},
{
- "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
- "MetricExpr": "LSD.UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
- "MetricGroup": "Fed;LSD",
- "MetricName": "LSD_Coverage"
- },
- {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
"MetricGroup": "DSB;Fed;FetchBW",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset/see of/the Instruction_Fetch_BW Bottleneck.",
- "MetricExpr": "(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / CPU_CLK_UNHALTED.THREAD / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)))",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / DSB2MITE_SWITCHES.COUNT",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "DSB_Switch_Cost"
+ },
+ {
+ "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
+ "MetricExpr": "100 * ( (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / CPU_CLK_UNHALTED.THREAD / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))) )",
"MetricGroup": "DSBmiss;Fed",
- "MetricName": "DSB_Misses_Cost"
+ "MetricName": "DSB_Misses"
},
{
- "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset/see of/the Instruction_Fetch_BW Bottleneck.",
- "MetricExpr": "(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))",
+ "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
+ "MetricExpr": "100 * ( (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "DSBmiss;Fed_SMT",
- "MetricName": "DSB_Misses_Cost_SMT"
+ "MetricName": "DSB_Misses_SMT"
},
{
- "BriefDescription": "Number of Instructions per non-speculative DSB miss",
+ "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
"MetricGroup": "DSBmiss;Fed",
"MetricName": "IpDSB_Miss_Ret"
},
{
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
+ },
+ {
"BriefDescription": "Fraction of branches that are non-taken conditionals",
"MetricExpr": "BR_INST_RETIRED.NOT_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;Branches;CodeGen;PGO",
@@ -408,11 +432,10 @@
"MetricName": "Jump"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -421,30 +444,6 @@
"MetricName": "MLP"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "L3_Cache_Access_BW"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
@@ -463,13 +462,13 @@
"MetricName": "L2MPKI"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses;Offcore",
"MetricName": "L2MPKI_All"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all demand loads (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "L2MPKI_Load"
@@ -493,7 +492,7 @@
"MetricName": "L3MPKI"
},
{
- "BriefDescription": "Fill Buffer (FB) true hits per kilo instructions for retired demand loads",
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "FB_HPKI"
@@ -512,6 +511,30 @@
"MetricName": "Page_Walks_Utilization_SMT"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
"BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
"MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
"MetricGroup": "L2Evicts;Mem;Server",
@@ -524,6 +547,30 @@
"MetricName": "L2_Evictions_NonSilent_PKI"
},
{
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -539,7 +586,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
@@ -668,6 +716,12 @@
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "cha_0@event\\=0x0@ / #num_dies / duration_time / 1000000000",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
@@ -714,5 +768,537 @@
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "Percentage of time spent in the active CPU power state C0",
+ "MetricExpr": "100 * CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "",
+ "MetricName": "cpu_utilization_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "CPU operating frequency (in GHz)",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ ) / 1000000000",
+ "MetricGroup": "",
+ "MetricName": "cpu_operating_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Cycles per instruction retired; indicating how much time each executed instruction took; in units of cycles.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "cpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory load instructions to the total number completed instructions",
+ "MetricExpr": "MEM_INST_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "loads_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory store instructions to the total number completed instructions",
+ "MetricExpr": "MEM_INST_RETIRED.ALL_STORES / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "stores_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L1 data cache (includes data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L1D.REPLACEMENT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1d_mpi_includes_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of demand load requests hitting in L1 data cache to the total number of completed instructions ",
+ "MetricExpr": "MEM_LOAD_RETIRED.L1_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1d_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing in L1 instruction cache (includes prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.ALL_CODE_RD / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed demand load requests hitting in L2 cache to the total number of completed instructions ",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L2 cache (includes code+data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_LINES_IN.ALL / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_mpi_includes_code_plus_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed data read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_code_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x12D4043300000000@ / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x12CC023300000000@ / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "llc_code_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand and prefetch data read miss (read memory access) in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( cha@unc_cha_tor_occupancy.ia_miss\\,config1\\=0x4043300000000@ / cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043300000000@ ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_CLOCKTICKS) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_demand_plus_prefetch_miss_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand and prefetch data read miss (read memory access) addressed to local memory in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( cha@unc_cha_tor_occupancy.ia_miss\\,config1\\=0x4043200000000@ / cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043200000000@ ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_CLOCKTICKS) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_demand_plus_prefetch_miss_latency_for_local_requests",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand and prefetch data read miss (read memory access) addressed to remote memory in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( cha@unc_cha_tor_occupancy.ia_miss\\,config1\\=0x4043100000000@ / cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043100000000@ ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_CLOCKTICKS) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_demand_plus_prefetch_miss_latency_for_remote_requests",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB.",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "itlb_2nd_level_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the Instruction Translation Lookaside Buffer (ITLB) and further levels of TLB.",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "itlb_2nd_level_large_page_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_load_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the Data Translation Lookaside Buffer (DTLB) and further levels of TLB.",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_2mb_large_page_load_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_store_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Memory read that miss the last level cache (LLC) addressed to local DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043200000000@ / ( cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043200000000@ + cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043100000000@ )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_local_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Memory reads that miss the last level cache (LLC) addressed to remote DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043100000000@ / ( cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043200000000@ + cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043100000000@ )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_remote_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uncore operating frequency in GHz",
+ "MetricExpr": "UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_CLOCKTICKS) * #num_packages ) / 1000000000",
+ "MetricGroup": "",
+ "MetricName": "uncore_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_UPI_TxL_FLITS.ALL_DATA * (64 / 9.0) / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "upi_data_transmit_bw_only_data",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory read bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.RD * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory write bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.WR * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory bandwidth (MB/sec)",
+ "MetricExpr": "(( UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory read bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_PMM_RPQ_INSERTS * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "pmem_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory write bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_PMM_WPQ_INSERTS * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "pmem_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory bandwidth (MB/sec)",
+ "MetricExpr": "(( UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "pmem_memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "(( UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3 ) * 4 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "(( UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART0 + UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART1 + UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART2 + UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART3 ) * 4 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Uops delivered from decoded instruction cache (decoded stream buffer or DSB) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS ) )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_decoded_icache_dsb",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from legacy decode pipeline (Micro-instruction Translation Engine or MITE) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MITE_UOPS / ( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS ) )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_legacy_decode_pipeline_mite",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from microcode sequencer (MS) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MS_UOPS / ( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS ) )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_microcode_sequencer_ms",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.READS_LOCAL * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_local_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.WRITES_LOCAL * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_local_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to remote memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.READS_REMOTE * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_remote_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Machine_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "MetricExpr": "100 * ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1;PGO",
+ "MetricName": "tma_frontend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.",
+ "MetricExpr": "100 * ( ( 4 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "Frontend;TmaL2;m_tma_frontend_bound_percent",
+ "MetricName": "tma_fetch_latency_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "100 * ( ( ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@ ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_icache_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.",
+ "MetricExpr": "100 * ( ICACHE_64B.IFTAG_STALL / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_itlb_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.",
+ "MetricExpr": "100 * ( INT_MISC.CLEAR_RESTEER_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) + ( ( 9 ) * BACLEARS.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) )",
+ "MetricGroup": "FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_branch_resteers_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.",
+ "MetricExpr": "100 * ( DSB2MITE_SWITCHES.PENALTY_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "DSBmiss;FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_dsb_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs.",
+ "MetricExpr": "100 * ( ILD_STALL.LCP / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_lcp_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.",
+ "MetricExpr": "100 * ( ( 2 ) * IDQ.MS_SWITCHES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;MicroSeq;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_ms_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.",
+ "MetricExpr": "100 * ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( 4 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;m_tma_frontend_bound_percent",
+ "MetricName": "tma_fetch_bandwidth_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "MetricExpr": "100 * ( ( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) / 2 )",
+ "MetricGroup": "DSBmiss;FetchBW;TmaL3;m_tma_fetch_bandwidth_percent",
+ "MetricName": "tma_mite_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "MetricExpr": "100 * ( ( IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) / 2 )",
+ "MetricGroup": "DSB;FetchBW;TmaL3;m_tma_fetch_bandwidth_percent",
+ "MetricName": "tma_dsb_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "MetricExpr": "100 * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_bad_speculation_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.",
+ "MetricExpr": "100 * ( ( BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT ) ) * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;m_tma_bad_speculation_percent",
+ "MetricName": "tma_branch_mispredicts_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.",
+ "MetricExpr": "100 * ( ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT ) ) * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) )",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;m_tma_bad_speculation_percent",
+ "MetricName": "tma_machine_clears_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "MetricExpr": "100 * ( 1 - ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( UOPS_ISSUED.ANY + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_backend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "MetricExpr": "100 * ( ( ( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / ( CYCLE_ACTIVITY.STALLS_TOTAL + ( EXE_ACTIVITY.1_PORTS_UTIL + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * EXE_ACTIVITY.2_PORTS_UTIL ) + EXE_ACTIVITY.BOUND_ON_STORES ) ) * ( 1 - ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( UOPS_ISSUED.ANY + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "Backend;TmaL2;m_tma_backend_bound_percent",
+ "MetricName": "tma_memory_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.",
+ "MetricExpr": "100 * ( max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) , 0 ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l1_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) / ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ ) ) * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l2_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l3_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( min( ( ( ( CYCLE_ACTIVITY.STALLS_L3_MISS / ( CPU_CLK_UNHALTED.THREAD ) + ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) - ( ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) / ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ ) ) * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( min( ( ( ( ( 1 - ( ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) / ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) + ( 25 * ( ( MEM_LOAD_RETIRED.LOCAL_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) + 33 * ( ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) ) ) ) ) * ( CYCLE_ACTIVITY.STALLS_L3_MISS / ( CPU_CLK_UNHALTED.THREAD ) + ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) - ( ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) / ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ ) ) * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) if ( ( 1000000 ) * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM ) > MEM_LOAD_RETIRED.L1_MISS ) else 0 ) ) , ( 1 ) ) ) ) ) , ( 1 ) ) )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_dram_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory by loads, PMM stands for Persistent Memory Module. ",
+ "MetricExpr": "100 * ( min( ( ( ( ( 1 - ( ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) / ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) + ( 25 * ( ( MEM_LOAD_RETIRED.LOCAL_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) + 33 * ( ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) ) ) ) ) * ( CYCLE_ACTIVITY.STALLS_L3_MISS / ( CPU_CLK_UNHALTED.THREAD ) + ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) - ( ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) / ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ ) ) * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) if ( ( 1000000 ) * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM ) > MEM_LOAD_RETIRED.L1_MISS ) else 0 ) ) , ( 1 ) ) )",
+ "MetricGroup": "MemoryBound;Server;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_pmm_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.",
+ "MetricExpr": "100 * ( EXE_ACTIVITY.BOUND_ON_STORES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_store_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "MetricExpr": "100 * ( ( 1 - ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( UOPS_ISSUED.ANY + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / ( CYCLE_ACTIVITY.STALLS_TOTAL + ( EXE_ACTIVITY.1_PORTS_UTIL + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * EXE_ACTIVITY.2_PORTS_UTIL ) + EXE_ACTIVITY.BOUND_ON_STORES ) ) * ( 1 - ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( UOPS_ISSUED.ANY + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) )",
+ "MetricGroup": "Backend;TmaL2;Compute;m_tma_backend_bound_percent",
+ "MetricName": "tma_core_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.",
+ "MetricExpr": "100 * ( ARITH.DIVIDER_ACTIVE / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "TmaL3;m_tma_core_bound_percent",
+ "MetricName": "tma_divider_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "MetricExpr": "100 * ( ( EXE_ACTIVITY.EXE_BOUND_0_PORTS + ( EXE_ACTIVITY.1_PORTS_UTIL + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * EXE_ACTIVITY.2_PORTS_UTIL ) ) / ( CPU_CLK_UNHALTED.THREAD ) if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else ( EXE_ACTIVITY.1_PORTS_UTIL + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * EXE_ACTIVITY.2_PORTS_UTIL ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "PortsUtil;TmaL3;m_tma_core_bound_percent",
+ "MetricName": "tma_ports_utilization_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. ",
+ "MetricExpr": "100 * ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_retiring_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved.",
+ "MetricExpr": "100 * ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "Retire;TmaL2;m_tma_retiring_percent",
+ "MetricName": "tma_light_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD ) + ( ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) + ( min( ( ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) , ( 1 ) ) ) )",
+ "MetricGroup": "HPC;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_fp_arith_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_memory_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * UOPS_RETIRED.MACRO_FUSED / ( UOPS_RETIRED.RETIRE_SLOTS ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_fused_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * ( BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED ) / ( UOPS_RETIRED.RETIRE_SLOTS ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_non_fused_branches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * INST_RETIRED.NOP / ( UOPS_RETIRED.RETIRE_SLOTS ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_nop_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "MetricExpr": "100 * ( max( 0 , ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) - ( ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD ) + ( ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) + ( min( ( ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) , ( 1 ) ) ) ) + ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY ) + ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * UOPS_RETIRED.MACRO_FUSED / ( UOPS_RETIRED.RETIRE_SLOTS ) ) + ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * ( BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) + ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * INST_RETIRED.NOP / ( UOPS_RETIRED.RETIRE_SLOTS ) ) ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_other_light_ops_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or microcoded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "MetricExpr": "100 * ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "Retire;TmaL2;m_tma_retiring_percent",
+ "MetricName": "tma_heavy_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "TmaL3;m_tma_heavy_operations_percent",
+ "MetricName": "tma_few_uops_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided.",
+ "MetricExpr": "100 * ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "MicroSeq;TmaL3;m_tma_heavy_operations_percent",
+ "MetricName": "tma_microcode_sequencer_percent",
+ "ScaleUnit": "1%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
index 41a3d13fc4b2..48bb1b38dde6 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
@@ -120,4 +120,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1e"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
index ecce4273ae52..8633ee406813 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
@@ -527,4 +527,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
index 60d8a99813b9..6baa338e72f1 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
@@ -40,6 +40,69 @@
"UMask": "0x40"
},
{
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Number of hardware interrupts received by the processor.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
index 79fda10ec4bb..f085b9145952 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
@@ -166,6 +166,17 @@
"UMask": "0x20"
},
{
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
index a416515d41da..6facfb244cd3 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
@@ -1,5 +1,32 @@
[
{
+ "BriefDescription": "DRAM Page Activate commands sent due to a write request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Underfill Read CAS Commands issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -20,6 +47,15 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -40,6 +76,15 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "All DRAM CAS Commands issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Memory controller clock ticks",
"Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
@@ -85,18 +130,90 @@
"Unit": "iMC"
},
{
- "BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
+ "BriefDescription": "Read Pending Queue Allocations",
"Counter": "0,1,2,3",
- "EventCode": "0xE3",
- "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All hits to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All Clean line misses to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.MISS_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All dirty line misses to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.MISS_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_WPQ_OCCUPANCY",
"PerPkg": "1",
"Unit": "iMC"
},
{
+ "BriefDescription": "Read Pending Queue Occupancy of all read requests for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_READ_LATENCY",
+ "MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
+ "MetricName": "UNC_M_PMM_READ_LATENCY",
+ "PerPkg": "1",
+ "ScaleUnit": "6000000000ns",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
- "EventCode": "0xE7",
- "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
"PerPkg": "1",
"Unit": "iMC"
},
@@ -110,15 +227,61 @@
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory bandwidth read (MB/sec)",
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth total (MB/sec). Derived from unc_m_pmm_rpq_inserts",
"Counter": "0,1,2,3",
"EventCode": "0xE3",
- "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "EventName": "UNC_M_PMM_BANDWIDTH.TOTAL",
+ "MetricExpr": "UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS",
+ "MetricName": "UNC_M_PMM_BANDWIDTH.TOTAL",
"PerPkg": "1",
"ScaleUnit": "6.103515625E-5MB/sec",
"Unit": "iMC"
},
{
+ "BriefDescription": "All commands for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Regular reads(RPQ) commands for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write commands for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.WR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Underfill read commands for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.UFILL_RD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE7",
+ "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Intel Optane DC persistent memory bandwidth write (MB/sec). Derived from unc_m_pmm_wpq_inserts",
"Counter": "0,1,2,3",
"EventCode": "0xE7",
@@ -128,233 +291,4259 @@
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory bandwidth write (MB/sec)",
+ "BriefDescription": "Write Pending Queue Occupancy of all write requests for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
- "EventCode": "0xE7",
- "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
"PerPkg": "1",
- "ScaleUnit": "6.103515625E-5MB/sec",
+ "UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory bandwidth total (MB/sec). Derived from unc_m_pmm_rpq_inserts",
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
"Counter": "0,1,2,3",
- "EventCode": "0xE3",
- "EventName": "UNC_M_PMM_BANDWIDTH.TOTAL",
- "MetricExpr": "UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS",
- "MetricName": "UNC_M_PMM_BANDWIDTH.TOTAL",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.RD",
"PerPkg": "1",
- "ScaleUnit": "6.103515625E-5MB/sec",
+ "UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory bandwidth total (MB/sec)",
+ "BriefDescription": "DRAM Activate Count; Activate due to Bypass",
"Counter": "0,1,2,3",
- "EventCode": "0xE3",
- "EventName": "UNC_M_PMM_RPQ_INSERTS",
- "MetricExpr": "UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS",
- "MetricName": "UNC_M_PMM_BANDWIDTH.TOTAL",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
"PerPkg": "1",
- "ScaleUnit": "6.103515625E-5MB/sec",
+ "UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "Read Pending Queue Occupancy of all read requests for Intel Optane DC persistent memory",
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
"Counter": "0,1,2,3",
- "EventCode": "0xE0",
- "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
"Counter": "0,1,2,3",
- "EventCode": "0xE0",
- "EventName": "UNC_M_PMM_READ_LATENCY",
- "MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
- "MetricName": "UNC_M_PMM_READ_LATENCY",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in RMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Read ISOCH Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Write ISOCH Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
"PerPkg": "1",
- "ScaleUnit": "6000000000ns",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory read latency (ns)",
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
"Counter": "0,1,2,3",
- "EventCode": "0xE0",
- "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
- "MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
- "MetricName": "UNC_M_PMM_READ_LATENCY",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
"PerPkg": "1",
- "ScaleUnit": "6000000000ns",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM Page Activate commands sent due to a write request",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
"Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_M_ACT_COUNT.WR",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
"PerPkg": "1",
- "PublicDescription": "Counts DRAM Page Activate commands sent on this channel due to a write request to the iMC (Memory Controller). Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS (Column Access Select) command.",
"UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "All DRAM CAS Commands issued",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
"Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.ALL",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_M_POWER_PCU_THROTTLING",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pre-charge for writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK15",
"PerPkg": "1",
- "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, so this event increments for every read and write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
"UMask": "0xF",
"Unit": "iMC"
},
{
- "BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
+ "BriefDescription": "RD_CAS Access to Rank 0; All Banks",
"Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
"PerPkg": "1",
- "PublicDescription": "Counts CAS (Column Access Select) regular read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every regular read. This event only counts regular reads and does not includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM Underfill Read CAS Commands issued",
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
"Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
"PerPkg": "1",
- "PublicDescription": "Counts CAS (Column Access Select) underfill read commands issued to DRAM due to a partial write, on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this command counts underfill reads. Partial writes must be completed by first reading in the underfill from DRAM and then merging in the partial write data before writing the full line back to DRAM. This event will generally count about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ (due to a previous write request).",
"UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
"Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
"PerPkg": "1",
- "PublicDescription": "Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
"UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "All commands for Intel Optane DC persistent memory",
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
"Counter": "0,1,2,3",
- "EventCode": "0xEA",
- "EventName": "UNC_M_PMM_CMD1.ALL",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK1",
"PerPkg": "1",
- "PublicDescription": "All commands for Intel Optane DC persistent memory",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Regular reads(RPQ) commands for Intel Optane DC persistent memory",
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 2",
"Counter": "0,1,2,3",
- "EventCode": "0xEA",
- "EventName": "UNC_M_PMM_CMD1.RD",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK2",
"PerPkg": "1",
- "PublicDescription": "All Reads - RPQ or Ufill",
"UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Underfill read commands for Intel Optane DC persistent memory",
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 3",
"Counter": "0,1,2,3",
- "EventCode": "0xEA",
- "EventName": "UNC_M_PMM_CMD1.UFILL_RD",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK8",
"PerPkg": "1",
- "PublicDescription": "Underfill reads",
"UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "Write commands for Intel Optane DC persistent memory",
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 9",
"Counter": "0,1,2,3",
- "EventCode": "0xEA",
- "EventName": "UNC_M_PMM_CMD1.WR",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK4",
"PerPkg": "1",
- "PublicDescription": "Writes",
"UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "Write Pending Queue Occupancy of all write requests for Intel Optane DC persistent memory",
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 5",
"Counter": "0,1,2,3",
- "EventCode": "0xE4",
- "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
"PerPkg": "1",
- "PublicDescription": "Write Pending Queue Occupancy of all write requests for Intel Optane DC persistent memory",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Read Pending Queue Allocations",
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
"Counter": "0,1,2,3",
- "EventCode": "0x10",
- "EventName": "UNC_M_RPQ_INSERTS",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
"PerPkg": "1",
- "PublicDescription": "Counts the number of read requests allocated into the Read Pending Queue (RPQ). This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. The requests deallocate after the read CAS command has been issued to DRAM. This event counts both Isochronous and non-Isochronous requests which were issued to the RPQ.",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Read Pending Queue Occupancy",
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_M_RPQ_OCCUPANCY",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
"PerPkg": "1",
- "PublicDescription": "Counts the number of entries in the Read Pending Queue (RPQ) at each cycle. This can then be used to calculate both the average occupancy of the queue (in conjunction with the number of cycles not empty) and the average latency in the queue (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate from the RPQ after the CAS command has been issued to memory.",
+ "UMask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "All hits to Near Memory(DRAM cache) in Memory Mode",
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
"Counter": "0,1,2,3",
- "EventCode": "0xD3",
- "EventName": "UNC_M_TAGCHK.HIT",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
"PerPkg": "1",
- "PublicDescription": "Tag Check; Hit",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "All Clean line misses to Near Memory(DRAM cache) in Memory Mode",
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
"Counter": "0,1,2,3",
- "EventCode": "0xD3",
- "EventName": "UNC_M_TAGCHK.MISS_CLEAN",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
"PerPkg": "1",
- "PublicDescription": "Tag Check; Clean",
"UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "All dirty line misses to Near Memory(DRAM cache) in Memory Mode",
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
"Counter": "0,1,2,3",
- "EventCode": "0xD3",
- "EventName": "UNC_M_TAGCHK.MISS_DIRTY",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
"PerPkg": "1",
- "PublicDescription": "Tag Check; Dirty",
"UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "Write Pending Queue Allocations",
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
"Counter": "0,1,2,3",
- "EventCode": "0x20",
- "EventName": "UNC_M_WPQ_INSERTS",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
"PerPkg": "1",
- "PublicDescription": "Counts the number of writes requests allocated into the Write Pending Queue (WPQ). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (Memory Controller). The write requests deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC.",
+ "UMask": "0x5",
"Unit": "iMC"
},
{
- "BriefDescription": "Write Pending Queue Occupancy",
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
"Counter": "0,1,2,3",
- "EventCode": "0x81",
- "EventName": "UNC_M_WPQ_OCCUPANCY",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; Read Accepts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; Read Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; NM read completions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; NM write completions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; FM read completions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; FM write completions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; Write Accepts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; Write Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Alloc",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.ALLOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Dealloc",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Reject",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.REJ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Valid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.VLD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Read Starved",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.NMRD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Write Starved",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.NMWR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Read Starved",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.FMRD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Write Starved",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.FMWR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M_SB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Not-Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M_SB_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.WRS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Block region reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Block region writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Dealloc all commands (for error flows)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Patrol inserts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.PATROL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.WRS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Block region reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Block region writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Patrol",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PATROL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected; NM requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.NM_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected; FM requests rejected due to full address conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.FM_ADDR_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected; Patrol requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.PATROL_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Read - Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMRD_SET",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Read - Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMRD_SET",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Write - Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMWR_SET",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Write - Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMWR_SET",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMRD_CLR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Read - Clear",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMRD_CLR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Write - Clear",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMWR_CLR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Write - Clear",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMWR_CLR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Read",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.NMRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Read",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.FMRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.NMWR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.FMWR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.NEW",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.NEW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_HIT",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_MISS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.RD_MISS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.OCC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.OCC",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clockticks in the Memory Controller using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_CLOCKTICKS_F",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M_PMM_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Cycles Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M_PMM_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RPQ GNTs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.RPQ_GNTS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Underfill GNTs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.WPQ_GNTS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Misc GNTs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.MISC_GNT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Misc Commands (error, flow ACKs)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.MISC",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Opportunistic Reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.OPP_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Expected No data packet (ERID matched NDP encoding)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.NODATA_EXP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Unexpected No data packet (ERID matched a Read, but data was a NDP)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.NODATA_UNEXP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Requests - Slot 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.REQS_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Requests - Slot 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.REQS_SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM ECC Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.PMM_ECC_ERROR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM ERID detectable parity error",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.PMM_ERID_ERROR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Major Mode; Cycles PMM is in Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEC",
+ "EventName": "UNC_M_PMM_MAJMODE1.RD_CYC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Major Mode; Cycles PMM is in Partial Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEC",
+ "EventName": "UNC_M_PMM_MAJMODE1.PARTIAL_WR_CYC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEC",
+ "EventName": "UNC_M_PMM_MAJMODE1.PARTIAL_WR_ENTER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEC",
+ "EventName": "UNC_M_PMM_MAJMODE1.PARTIAL_WR_EXIT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_MAJMODE2.DRAM_CYC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xED",
+ "EventName": "UNC_M_MAJMODE2.DRAM_CYC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_MAJMODE2.DRAM_ENTER",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xED",
+ "EventName": "UNC_M_MAJMODE2.DRAM_ENTER",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_MAJMODE2.PMM_ENTER",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xED",
+ "EventName": "UNC_M_MAJMODE2.PMM_ENTER",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Cycles Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M_PMM_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M_PMM_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.PWR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PMM_WPQ_PCOMMIT",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE8",
+ "EventName": "UNC_M_PMM_WPQ_PCOMMIT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PMM_WPQ_PCOMMIT_CYC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE9",
+ "EventName": "UNC_M_PMM_WPQ_PCOMMIT_CYC",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Major Mode; Cycles PMM is in Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xEC",
+ "EventName": "UNC_M_PMM_MAJMODE1.WR_CYC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_MAJMODE2.PMM_CYC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xED",
+ "EventName": "UNC_M_MAJMODE2.PMM_CYC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Persistent Mem writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Persistent Mem writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Persistent Mem reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Persistent Mem reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_RDS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts. Is there a filter of sorts???",
+ "UMask": "0x04",
"Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
index aa460d0c4851..a29bba230f49 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
@@ -1,87 +1,774 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
+ "BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
"Counter": "0,1,2,3",
- "EventName": "UNC_CHA_CLOCKTICKS",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to core (bypassing the CHA)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Partial Non-Isochronous writes to the iMC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Writes to iMC issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefecth requests that got turn into a demand request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to the Intel UPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Dirty line read hits(Regular and RFO) to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clean line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Dirty line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Read requests to Intel Optane DC persistent memory issued to the iMC from M2M",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write requests to Intel Optane DC persistent memory issued to the iMC from M2M",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CLOCKTICKS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CHA"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
+ "BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x0C",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from local home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from local home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspIFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspSFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "config1=0x40e33",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ",
"PerPkg": "1",
- "UMask": "0x21",
+ "UMask": "0x31",
"Unit": "CHA"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) ",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x40e33",
+ "EventName": "UNC_C_TOR_INSERTS.REM_ALL",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_FAST_ASSERTED.HORZ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xA5",
+ "EventName": "UNC_C_FAST_ASSERTED",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_MISS",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_HIT",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_READ",
- "Filter": "config1=0x40040e33",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_MISS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ_MISS",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "MMIO reads",
+ "BriefDescription": "TOR Inserts; Hits from Local IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses from Local IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local iA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local iA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x40040e33",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_WRITE",
- "Filter": "config1=0x40041e33",
+ "EventName": "LLC_MISSES.UNCACHEABLE",
+ "Filter": "config1=0x40e33",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "MMIO writes",
+ "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x40041e33",
+ "EventName": "LLC_MISSES.MMIO_READ",
+ "Filter": "config1=0x40040e33",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "config1=0x41833",
+ "EventName": "LLC_MISSES.MMIO_WRITE",
+ "Filter": "config1=0x40041e33",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "Streaming stores (full cache line)",
+ "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "EventName": "LLC_REFERENCES.STREAMING_FULL",
"Filter": "config1=0x41833",
"PerPkg": "1",
"ScaleUnit": "64Bytes",
@@ -100,36 +787,73 @@
"Unit": "CHA"
},
{
- "BriefDescription": "Streaming stores (partial cache line)",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x41a33",
+ "BriefDescription": "TOR Occupancy; All from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "read requests from home agent",
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCS VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
"Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_FAST_ASSERTED.HORZ",
"PerPkg": "1",
- "UMask": "0x03",
+ "UMask": "0x02",
"Unit": "CHA"
},
{
- "BriefDescription": "read requests from local home agent",
+ "BriefDescription": "Uncore cache clock ticks",
"Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
"PerPkg": "1",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "read requests from remote home agent",
+ "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a remote socket",
"Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
@@ -138,42 +862,612 @@
"Unit": "CHA"
},
{
- "BriefDescription": "write requests from home agent",
+ "BriefDescription": "RspI Snoop Responses Received",
"Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
"PerPkg": "1",
- "UMask": "0x0C",
+ "UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "write requests from local home agent",
+ "BriefDescription": "Rsp*WB Snoop Responses Received",
"Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspCnflct* Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for M-state entries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for E-state entries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for S-state entries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
"PerPkg": "1",
"UMask": "0x04",
"Unit": "CHA"
},
{
- "BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
+ "BriefDescription": "This event is deprecated. ",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UPI_DATA_BANDWIDTH_TX",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
"PerPkg": "1",
- "ScaleUnit": "7.11E-06Bytes",
- "UMask": "0xf",
- "Unit": "UPI LL"
+ "UMask": "0x30",
+ "Unit": "CHA"
},
{
- "BriefDescription": "UPI interconnect send bandwidth for payload",
+ "BriefDescription": "Lines Victimized; Lines in M state",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
"PerPkg": "1",
- "ScaleUnit": "7.11E-06Bytes",
- "UMask": "0xf",
- "Unit": "UPI LL"
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in F State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Clockticks of the IIO Traffic Controller",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part1 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part2 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part3 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part1",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part2",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part3",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
},
{
"BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
@@ -239,18 +1533,47 @@
"Unit": "IIO"
},
{
- "BriefDescription": "PCI Express bandwidth writing at IIO",
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "MetricName": "LLC_MISSES.PCIE_WRITE",
"PerPkg": "1",
"PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
@@ -317,1731 +1640,21323 @@
"Unit": "IIO"
},
{
- "BriefDescription": "PCI Express bandwidth reading at IIO",
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part0 to an IIO target",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part1 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part2 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part3 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part0 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part1 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part2 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part3 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
"PerPkg": "1",
"PortMask": "0x01",
- "ScaleUnit": "4Bytes",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 1",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 2",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 3",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x0f",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x0f",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests",
+ "Counter": "0,1",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue",
+ "Counter": "0,1",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Occupancy of the IRP FAF queue",
+ "Counter": "0,1",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks of the Intel Ultra Path Interconnect (UPI)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Data Response packets that go direct to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Intel UPI is in L1 power mode (shutdown)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles the Rx of the Intel UPI is in L0p power mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.ALL_NULL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in which the Tx of the Intel Ultra Path Interconnect (UPI) is in L0p power mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs that bypassed the TxL Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_UPI_TxL_BYPASSED",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.ALL_NULL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Protocol header and credit FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Idle FLITs transmitted",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x47",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Null FLITs transmitted from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid data FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UPI_DATA_BANDWIDTH_TX",
+ "PerPkg": "1",
+ "ScaleUnit": "7.11E-06Bytes",
+ "UMask": "0xf",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UPI interconnect send bandwidth for payload",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "ScaleUnit": "7.11E-06Bytes",
+ "UMask": "0xf",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Data Response packets that go direct to Intel UPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit",
+ "Counter": "0,1,2",
+ "EventCode": "0x29",
+ "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_Egress.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles - at UCLK",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in I State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in L State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in I State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in L State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC; Critical Priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC; All, regardless of priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.FROM_TRANSGRESS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FROM_TRANSGRESS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches; Mesh Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches; MC Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2M_TRACKER_PENDING_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK; CRD Transactions to Cbo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK; NDR Transactions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.NDR",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_TxC_BL.DRS_UPI",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x40",
+ "EventName": "UNC_NoUnit_TxC_BL.DRS_UPI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_UPI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clean line read hits(Regular and RFO) to Near Memory(DRAM cache) in Memory Mode and regular reads to DRAM in 1LM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "PerPkg": "1",
+ "UMask": "0xA0",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Read Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Write Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Write Compare Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Read Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Write Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Write Compare Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Prefetch Read Cam Hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Read Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Write Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Write Compare Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Sideband",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_TxC_AK_SIDEBAND.RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Sideband",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_TxC_AK_SIDEBAND.WR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC RPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_PMM_RPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC RPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_PMM_RPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC RPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_PMM_RPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_PMM_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_PMM_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_PMM_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Intermediate bypass Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Single External Snoops",
"Counter": "0,1,2,3",
"EventCode": "0x33",
- "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
"PerPkg": "1",
- "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
- "UMask": "0x42",
+ "UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
+ "BriefDescription": "Core Cross Snoops Issued; Single Core Requests",
"Counter": "0,1,2,3",
"EventCode": "0x33",
- "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
"PerPkg": "1",
- "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
- "UMask": "0x82",
+ "UMask": "0x41",
"Unit": "CHA"
},
{
- "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "BriefDescription": "Core Cross Snoops Issued; Single Eviction",
"Counter": "0,1,2,3",
- "EventCode": "0x53",
- "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
"PerPkg": "1",
- "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed",
- "UMask": "0x02",
+ "UMask": "0x81",
"Unit": "CHA"
},
{
- "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "BriefDescription": "Core Cross Snoops Issued; Any Single Snoop",
"Counter": "0,1,2,3",
- "EventCode": "0x53",
- "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
"PerPkg": "1",
- "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed",
- "UMask": "0x01",
+ "UMask": "0xE1",
"Unit": "CHA"
},
{
- "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "BriefDescription": "Core Cross Snoops Issued; Multiple External Snoops",
"Counter": "0,1,2,3",
- "EventCode": "0x54",
- "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Any Cycle with Multiple Snoops",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "UMask": "0xE2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; External Snoop to Remote Node",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Core Request to Remote Node",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Eviction to Remote Node",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Any Snoop to Remote Node",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xE4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.READ",
"PerPkg": "1",
- "PublicDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
"Counter": "0,1,2,3",
- "EventCode": "0x54",
- "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
"PerPkg": "1",
- "PublicDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
"UMask": "0x02",
"Unit": "CHA"
},
{
- "BriefDescription": "FaST wire asserted; Horizontal",
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; SF/LLC HitS/F and op is RdInvOwn",
"Counter": "0,1,2,3",
- "EventCode": "0xA5",
- "EventName": "UNC_CHA_FAST_ASSERTED.HORZ",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
- "UMask": "0x02",
+ "UMask": "0x20",
"Unit": "CHA"
},
{
- "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; No SF/LLC HitS/F and op is RdInvOwn",
"Counter": "0,1,2,3",
- "EventCode": "0x5F",
- "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
"PerPkg": "1",
- "PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*)",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache to SHARed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Deallocate HtiME$ on Reads without RspFwdI*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "HA to iMC Reads Issued; ISOCH",
"Counter": "0,1,2,3",
"EventCode": "0x59",
- "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
"PerPkg": "1",
- "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
- "UMask": "0x01",
+ "UMask": "0x02",
"Unit": "CHA"
},
{
- "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "BriefDescription": "Writes Issued to the iMC by the HA; Partial Non-ISOCH",
"Counter": "0,1,2,3",
"EventCode": "0x5B",
- "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; Full Line MIG",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_MIG",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; Partial MIG",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.INVITOM",
"PerPkg": "1",
- "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Lines Victimized; Lines in E state",
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations dropped due to IODC Full",
"Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.IODCFULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x02",
"Unit": "CHA"
},
{
- "BriefDescription": "Lines Victimized; Lines in F State",
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IDOC allocation dropped due to OSB gate",
"Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.OSBGATED",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbPushMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBPUSHMTOI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to conflicting transaction",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.SNPOUT",
"PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x08",
"Unit": "CHA"
},
{
- "BriefDescription": "Lines Victimized; Lines in M state",
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to any reason",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.ANY",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LOCAL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x91",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "EventName": "UNC_C_LLC_VICTIMS.LOCAL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
- "UMask": "0x01",
+ "UMask": "0x2f",
"Unit": "CHA"
},
{
- "BriefDescription": "Lines Victimized; Lines in S State",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "EventName": "UNC_C_LLC_VICTIMS.REMOTE",
"PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
- "UMask": "0x04",
+ "UMask": "0x80",
"Unit": "CHA"
},
{
- "BriefDescription": "Number of times that an RFO hit in S state.",
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
"Counter": "0,1,2,3",
"EventCode": "0x39",
- "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; MC0_SMI0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; MC1_SMI1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC0_SMI2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC1_SMI3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC1_SMI3",
"PerPkg": "1",
- "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
"UMask": "0x08",
"Unit": "CHA"
},
{
- "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC2_SMI4",
"Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC2_SMI4",
"PerPkg": "1",
- "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
"UMask": "0x10",
"Unit": "CHA"
},
{
- "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC3_SMI5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from remote home agent",
"Counter": "0,1,2,3",
"EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.ALL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.BCST_LOC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.BCST_REM",
"PerPkg": "1",
- "PublicDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
"UMask": "0x20",
"Unit": "CHA"
},
{
- "BriefDescription": "Ingress (from CMS) Allocations; IRQ",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.DIRECT_LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.DIRECT_REM",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RspFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_FWD_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.EVICT",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.PRQ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IPQ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.HIT",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.MISS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI; Pushed to LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI; Pushed to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC0_SMI0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC1_SMI1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC0_SMI2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC1_SMI3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC1_SMI3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC2_SMI4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC2_SMI4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC3_SMI5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C1 State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C1_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C1 Transition",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C1_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C6 State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C6_STATE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C6 Transition",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C6_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; GV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.GV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CMS_CLOCKTICKS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_H_CLOCK",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAE",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAE",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SRC_THRTL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xA4",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IRQ Rejected",
"Counter": "0,1,2,3",
"EventCode": "0x13",
- "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; RRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; WBQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "BriefDescription": "Ingress Probe Queue Rejects; AD RSP on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x19",
- "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.ANY_IPQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
"PerPkg": "1",
- "PublicDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
"UMask": "0x80",
"Unit": "CHA"
},
{
- "BriefDescription": "Ingress (from CMS) Occupancy; IRQ",
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x25",
+ "EventName": "UNC_H_RxC_ISMQ1_REJECT.ANY_ISMQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_H_RxC_ISMQ1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; IPQ",
"EventCode": "0x11",
- "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; RRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; WBQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "BriefDescription": "Other Retries; AD RSP on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
"UMask": "0x02",
"Unit": "CHA"
},
{
- "BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "BriefDescription": "Other Retries; BL RSP on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
"UMask": "0x04",
"Unit": "CHA"
},
{
- "BriefDescription": "RspCnflct* Snoop Responses Received",
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.ANY_PRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspCnflct* Snoop Response was received. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent. This triggers conflict resolution hardware. This covers both the opcode RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
"Unit": "CHA"
},
{
- "BriefDescription": "RspI Snoop Responses Received",
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "RspIFwd Snoop Responses Received",
+ "BriefDescription": "Request Queue Retries; AD RSP on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
"UMask": "0x04",
"Unit": "CHA"
},
{
- "BriefDescription": "RspSFwd Snoop Responses Received",
+ "BriefDescription": "Request Queue Retries; BL WB on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
"UMask": "0x08",
"Unit": "CHA"
},
{
- "BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
+ "BriefDescription": "Request Queue Retries; BL NCB on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to its home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in >= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to its home socket to be written back to memory.",
"UMask": "0x20",
"Unit": "CHA"
},
{
- "BriefDescription": "Rsp*WB Snoop Responses Received",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to its home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This response will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
"UMask": "0x10",
"Unit": "CHA"
},
{
- "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "BriefDescription": "Request Queue Retries; Merging these two together to make room for ANY_REJECT_*0",
"Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
- "Filter": "config1=0x40433",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.ANY_RRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.ANY_WBQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.IV",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.IV",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspHitFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSP_HITFSE",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "BriefDescription": "Core Cross Snoop Responses; Core RspHitFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspHitFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspHitFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0xE1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspSFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspSFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspSFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0xE2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspIFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspIFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspIFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0xE4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspSFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspSFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspSFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0xE8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspIFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspIFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0xF0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.RRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.RRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x60",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WBQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WBQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0xA0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Deprecated": "1",
"EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
- "Filter": "config1=0x40433",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ_HIT",
"PerPkg": "1",
- "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0x21",
+ "UMask": "0x14",
"Unit": "CHA"
},
{
- "BriefDescription": "Clockticks of the IIO Traffic Controller",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local IO",
"Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_IIO_CLOCKTICKS",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
"PerPkg": "1",
- "PublicDescription": "Counts clockticks of the 1GHz trafiic controller clock in the IIO unit.",
- "Unit": "IIO"
+ "UMask": "0x34",
+ "Unit": "CHA"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
+ "BriefDescription": "TOR Inserts; All from Local iA and IO",
"Counter": "0,1,2,3",
- "EventCode": "0xC2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
- "FCMask": "0x4",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_IO_IA",
+ "PerPkg": "1",
+ "UMask": "0x35",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_HIT",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses from Local",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_MISS",
+ "PerPkg": "1",
+ "UMask": "0x25",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_HIT",
+ "PerPkg": "1",
+ "UMask": "0x17",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_MISS",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; VNA Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VNA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; VN0 Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; AD REQ Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; AD RSP VN0 Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL RSP Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL DRS Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL NCB Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL NCS Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD VNA Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL VNA Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_BL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD REQ VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD RSP VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL RSP VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL DRS VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCB VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast snoop for Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast snoops for Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Directed snoops for Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Directed snoops for Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
"PerPkg": "1",
- "PortMask": "0x0f",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
"UMask": "0x03",
- "Unit": "IIO"
+ "Unit": "CHA"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "BriefDescription": "Cache and Snoop Filter Lookups; Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; External Snoop Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "UMask": "0x09",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Local",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x91",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - All Lines",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x2F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; SF/LLC Evictions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hit (Not a Miss)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x60",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0xA0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; IRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; SF/LLC Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; PRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; IPQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hit (Not a Miss)",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in M State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in E State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in F State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in M State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in E State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in F State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_F",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - All Lines",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "PerPkg": "1",
+ "UMask": "0x8F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RdCur misses from Local IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RDCUR",
+ "Filter": "config1=0x43C33",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from Local IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; ItoM misses from Local IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "Filter": "config1=0x49033",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM Misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "Filter": "config1=0x49033",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RDCUR isses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RDCUR",
+ "Filter": "config1=0x43C33",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in IODC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.IODC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in TOR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory mode related events; Counts the number of times CHA saw NM Set conflict in TOR and the transaction was rejected",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR_REJECT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 0",
"Counter": "0,1,2,3",
"EventCode": "0xC2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
- "FCMask": "0x4",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT0",
+ "FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
- "UMask": "0x03",
+ "UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 1",
"Counter": "0,1,2,3",
"EventCode": "0xC2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
- "FCMask": "0x4",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT1",
+ "FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
- "UMask": "0x03",
+ "UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 2",
"Counter": "0,1,2,3",
"EventCode": "0xC2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
- "FCMask": "0x4",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT2",
+ "FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
- "UMask": "0x03",
+ "UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 3",
"Counter": "0,1,2,3",
"EventCode": "0xC2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
- "FCMask": "0x4",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT3",
+ "FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
- "UMask": "0x03",
+ "UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
+ "BriefDescription": "Num Link Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_IIO_LINK_NUM_CORR_ERR",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num Link Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_IIO_LINK_NUM_RETRIES",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number packets that passed the Mask/Match Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_IIO_MASK_MATCH",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "UNC_IIO_NOTHING",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_IIO_NOTHING",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
"Counter": "2,3",
- "EventCode": "0xD5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
- "FCMask": "0x04",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART0",
+ "FCMask": "0x7",
"PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
- "UMask": "0x0f",
+ "PortMask": "0x1",
+ "UMask": "0x1",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
"Counter": "2,3",
- "EventCode": "0xD5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
- "FCMask": "0x04",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART1",
+ "FCMask": "0x7",
"PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
- "UMask": "0x01",
+ "PortMask": "0x2",
+ "UMask": "0x1",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 1",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
"Counter": "2,3",
- "EventCode": "0xD5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
- "FCMask": "0x04",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART2",
+ "FCMask": "0x7",
"PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 1",
- "UMask": "0x02",
+ "PortMask": "0x4",
+ "UMask": "0x1",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 2",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
"Counter": "2,3",
- "EventCode": "0xD5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
- "FCMask": "0x04",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART3",
+ "FCMask": "0x7",
"PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 2",
- "UMask": "0x04",
+ "PortMask": "0x8",
+ "UMask": "0x1",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 3",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
"Counter": "2,3",
- "EventCode": "0xD5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
- "FCMask": "0x04",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART0",
+ "FCMask": "0x7",
"PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 3",
- "UMask": "0x08",
+ "PortMask": "0x1",
+ "UMask": "0x2",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
"Counter": "2,3",
+ "Deprecated": "1",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Symbol Times on Link",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_IIO_SYMBOL_TIMES",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; Vtd hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L4_PAGE_HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; context cache miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.CTXT_MISS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L1 miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L1_MISS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L2 miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L2_MISS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L3 miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L3_MISS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB is full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB1_MISS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_VTD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part1",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part2",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part3",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part1 by a different IIO unit",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part2 by a different IIO unit",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part3 by a different IIO unit",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
"UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part0 to an IIO target",
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part1 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part2 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part3 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
"UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part0",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part1",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part2",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part3",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part0 by a different IIO unit",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part1 by a different IIO unit",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part2 by a different IIO unit",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part3 by a different IIO unit",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
"UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "PortMask": "0x10",
+ "UMask": "0x01",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "PortMask": "0x20",
+ "UMask": "0x01",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "PortMask": "0x10",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "PortMask": "0x20",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part1 to the MMIO space of an IIO target.In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Total IRP occupancy of inbound read and write requests.",
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
"Counter": "0,1",
"EventCode": "0xF",
- "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Snoops",
+ "Counter": "0,1",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "IRP Clocks",
+ "Counter": "0,1",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIRdCur",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CRd",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; DRd",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.DRD",
"PerPkg": "1",
- "PublicDescription": "Total IRP occupancy of inbound read and write requests. This is effectively the sum of read occupancy and write occupancy.",
"UMask": "0x4",
"Unit": "IRP"
},
{
- "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
+ "BriefDescription": "Coherent Ops; PCIDCAHin5t",
"Counter": "0,1",
"EventCode": "0x10",
- "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
"PerPkg": "1",
- "PublicDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline to coherent memory, without a RFO. PCIITOM is a speculative Invalidate to Modified command that requests ownership of the cacheline and does not move data from the mesh to IRP cache.",
- "UMask": "0x10",
+ "UMask": "0x20",
"Unit": "IRP"
},
{
- "BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline.",
+ "BriefDescription": "Coherent Ops; WbMtoI",
"Counter": "0,1",
"EventCode": "0x10",
- "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CLFlush",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "Counter": "0,1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "Counter": "0,1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
"PerPkg": "1",
- "PublicDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline to coherent memory. RFO is a Read For Ownership command that requests ownership of the cacheline and moves data from the mesh to IRP cache.",
"UMask": "0x8",
"Unit": "IRP"
},
{
- "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
"Counter": "0,1",
- "EventCode": "0x18",
- "EventName": "UNC_I_FAF_INSERTS",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
"PerPkg": "1",
- "PublicDescription": "Inbound read requests to coherent memory, received by the IRP and inserted into the Fire and Forget queue (FAF), a queue used for processing inbound reads in the IRP.",
+ "UMask": "0x10",
"Unit": "IRP"
},
{
- "BriefDescription": "Occupancy of the IRP FAF queue.",
+ "BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
"Counter": "0,1",
- "EventCode": "0x19",
- "EventName": "UNC_I_FAF_OCCUPANCY",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
"PerPkg": "1",
- "PublicDescription": "Occupancy of the IRP Fire and Forget (FAF) queue, a queue used for processing inbound reads in the IRP.",
+ "UMask": "0x20",
"Unit": "IRP"
},
{
- "BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
"Counter": "0,1",
- "EventCode": "0x11",
- "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0",
+ "Counter": "0,1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.UNKNOWN",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_M",
"PerPkg": "1",
- "PublicDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
"UMask": "0x8",
"Unit": "IRP"
},
{
- "BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
- "Counter": "0,1,2,3",
- "EventCode": "0x22",
- "EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
+ "BriefDescription": "Misc Events - Set 1; Lost Forward",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Valid",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Requests",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_P2P_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_P2P_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P reads",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P Writes",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.WR",
"PerPkg": "1",
- "PublicDescription": "Counts traffic in which the M2M (Mesh to Memory) to iMC (Memory Controller) bypass was not taken",
"UMask": "0x2",
- "Unit": "M2M"
+ "Unit": "IRP"
},
{
- "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "BriefDescription": "P2P Transactions; P2P Message",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P completions",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; Match if remote only",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if remote and target matches",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if local only",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if local and target matches",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Miss",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit I",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit E or S",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit M",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpCode",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpData",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpInv",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Atomic",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Other",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_TxR2_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations",
+ "Counter": "0,1",
+ "EventCode": "0xB",
+ "EventName": "UNC_I_TxC_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Cycles Full",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Inserts",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Cycles Full",
+ "Counter": "0,1",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Inserts",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Cycles Full",
+ "Counter": "0,1",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Inserts",
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x1B",
+ "EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xC",
+ "EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x72",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x74",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "PerPkg": "1",
+ "UMask": "0x7e",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "PerPkg": "1",
+ "UMask": "0x71",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
"Counter": "0,1,2,3",
- "EventCode": "0x24",
- "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
"PerPkg": "1",
- "PublicDescription": "Counts cycles when direct to core mode (which bypasses the CHA) was disabled",
- "Unit": "M2M"
+ "UMask": "0x1",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Messages sent direct to core (bypassing the CHA)",
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
"Counter": "0,1,2,3",
- "EventCode": "0x23",
- "EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
"PerPkg": "1",
- "PublicDescription": "Counts when messages were sent direct to core (bypassing the CHA)",
- "Unit": "M2M"
+ "UMask": "0x02",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
"Counter": "0,1,2,3",
- "EventCode": "0x25",
- "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
"PerPkg": "1",
- "PublicDescription": "Counts reads in which direct to core transactions (which would have bypassed the CHA) were overridden",
- "Unit": "M2M"
+ "UMask": "0x04",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
"Counter": "0,1,2,3",
- "EventCode": "0x28",
- "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
"PerPkg": "1",
- "PublicDescription": "Counts reads in which direct to Intel Ultra Path Interconnect (UPI) transactions (which would have bypassed the CHA) were overridden",
- "Unit": "M2M"
+ "UMask": "0x8",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
"Counter": "0,1,2,3",
- "EventCode": "0x27",
- "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
"PerPkg": "1",
- "PublicDescription": "Counts cycles when the ability to send messages direct to the Intel Ultra Path Interconnect (bypassing the CHA) was disabled",
- "Unit": "M2M"
+ "UMask": "0x10",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Messages sent direct to the Intel UPI",
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
"Counter": "0,1,2,3",
- "EventCode": "0x26",
- "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
"PerPkg": "1",
- "PublicDescription": "Counts when messages were sent direct to the Intel Ultra Path Interconnect (bypassing the CHA)",
- "Unit": "M2M"
+ "UMask": "0x1",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
"Counter": "0,1,2,3",
- "EventCode": "0x29",
- "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
"PerPkg": "1",
- "PublicDescription": "Counts when a read message that was sent direct to the Intel Ultra Path Interconnect (bypassing the CHA) was overridden",
- "Unit": "M2M"
+ "UMask": "0x2",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
"Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in Any State (A, I, S or unused)",
- "UMask": "0x1",
- "Unit": "M2M"
+ "UMask": "0x4",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
"Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in the A (SnoopAll) state, indicating the cacheline is stored in another socket in any state, and we must snoop the other sockets to make sure we get the latest data. The data may be stored in any state in the local socket.",
"UMask": "0x8",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
"Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the I (Invalid) state indicating the cacheline is not stored in another socket, and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
"UMask": "0x2",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
"Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the S (Shared) state indicating the cacheline is either stored in another socket in the S(hared) state , and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
"UMask": "0x4",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from A (SnoopAll) to I (Invalid)",
"UMask": "0x20",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from A (SnoopAll) to S (Shared)",
"UMask": "0x40",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "EventCode": "0x20",
+ "EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "L1 Req Nack",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_UPI_POWER_L1_NACK",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "L1 Req (same as L1 Ack)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_UPI_POWER_L1_REQ",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory to a new state",
"UMask": "0x1",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from I (Invalid) to A (SnoopAll)",
"UMask": "0x4",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0. Receive side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT1",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from I (Invalid) to S (Shared)",
"UMask": "0x2",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "BriefDescription": "Valid Flits Received; Slot 2",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; LLCRD Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCRD",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from S (Shared) to A (SnoopAll)",
"UMask": "0x10",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "BriefDescription": "Valid Flits Received; LLCTRL",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.PROT_HDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.REQ",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from S (Shared) to I (Invalid)",
"UMask": "0x8",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Reads to iMC issued",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
"Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.ALL",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.RSP",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller).",
"UMask": "0x4",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
"Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller). It only counts normal priority non-isochronous reads.",
"UMask": "0x1",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Read requests to Intel Optane DC persistent memory issued to the iMC from M2M",
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
"Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
"PerPkg": "1",
- "PublicDescription": "M2M Reads Issued to iMC; All, regardless of priority.",
"UMask": "0x8",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Writes to iMC issued",
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
"Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues writes to the iMC (Memory Controller).",
"UMask": "0x10",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
"Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
"PerPkg": "1",
- "PublicDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
- "UMask": "0x80",
- "Unit": "M2M"
+ "UMask": "0x20",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Partial Non-Isochronous writes to the iMC",
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
"Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues partial writes to the iMC (Memory Controller). It only counts normal priority non-isochronous writes.",
"UMask": "0x2",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Write requests to Intel Optane DC persistent memory issued to the iMC from M2M",
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
"Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
"PerPkg": "1",
- "PublicDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
"UMask": "0x20",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Prefecth requests that got turn into a demand request",
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
"Counter": "0,1,2,3",
- "EventCode": "0x56",
- "EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) promotes a outstanding request in the prefetch queue due to a subsequent demand read request that entered the M2M with the same address. Explanatory Side Note: The Prefecth queue is made of CAM (Content Addressable Memory)",
- "Unit": "M2M"
+ "UMask": "0x40",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
"Counter": "0,1,2,3",
- "EventCode": "0x57",
- "EventName": "UNC_M2M_PREFCAM_INSERTS",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) recieves a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory",
- "Unit": "M2M"
+ "UMask": "0x80",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "AD Ingress (from CMS) Queue Inserts",
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
"Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "EventCode": "0x28",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
"PerPkg": "1",
- "PublicDescription": "Counts when the a new entry is Received(RxC) and then added to the AD (Address Ring) Ingress Queue from the CMS (Common Mesh Stop). This is generally used for reads, and",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0. Transmit side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Slot 0",
"Counter": "0,1,2,3",
"EventCode": "0x2",
- "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT0",
"PerPkg": "1",
- "PublicDescription": "AD Ingress (from CMS) Occupancy",
- "Unit": "M2M"
+ "UMask": "0x1",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "BriefDescription": "Valid Flits Sent; Slot 1",
"Counter": "0,1,2,3",
- "EventCode": "0x5",
- "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT1",
"PerPkg": "1",
- "PublicDescription": "BL Ingress (from CMS) Allocations",
- "Unit": "M2M"
+ "UMask": "0x2",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "BriefDescription": "Valid Flits Sent; Slot 2",
"Counter": "0,1,2,3",
- "EventCode": "0x6",
- "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT2",
"PerPkg": "1",
- "PublicDescription": "BL Ingress (from CMS) Occupancy",
- "Unit": "M2M"
+ "UMask": "0x4",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Dirty line read hits(Regular and RFO) to Near Memory(DRAM cache) in Memory Mode",
+ "BriefDescription": "Valid Flits Sent; LLCRD Not Empty",
"Counter": "0,1,2,3",
- "EventCode": "0x2C",
- "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCRD",
"PerPkg": "1",
- "PublicDescription": "Tag Hit; Read Hit from NearMem, Dirty Line",
- "UMask": "0x02",
- "Unit": "M2M"
+ "UMask": "0x10",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Clean line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
+ "BriefDescription": "Valid Flits Sent; LLCTRL",
"Counter": "0,1,2,3",
- "EventCode": "0x2C",
- "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
"PerPkg": "1",
- "PublicDescription": "Tag Hit; Underfill Rd Hit from NearMem, Clean Line",
- "UMask": "0x04",
- "Unit": "M2M"
+ "UMask": "0x40",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Dirty line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.PROTHDR",
"Counter": "0,1,2,3",
- "EventCode": "0x2C",
- "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "Deprecated": "1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.PROT_HDR",
"PerPkg": "1",
- "PublicDescription": "Tag Hit; Underfill Rd Hit from NearMem, Dirty Line",
- "UMask": "0x08",
- "Unit": "M2M"
+ "UMask": "0x80",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "AD Egress (to CMS) Allocations",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
"Counter": "0,1,2,3",
- "EventCode": "0x9",
- "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.REQ",
"PerPkg": "1",
- "PublicDescription": "AD Egress (to CMS) Allocations",
- "Unit": "M2M"
+ "UMask": "0x8",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
"Counter": "0,1,2,3",
- "EventCode": "0xA",
- "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.SNP",
"PerPkg": "1",
- "PublicDescription": "AD Egress (to CMS) Occupancy",
- "Unit": "M2M"
+ "UMask": "0x9",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "BL Egress (to CMS) Allocations; All",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
"Counter": "0,1,2,3",
- "EventCode": "0x15",
- "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.WB",
"PerPkg": "1",
- "PublicDescription": "BL Egress (to CMS) Allocations; All",
- "UMask": "0x03",
- "Unit": "M2M"
+ "UMask": "0xC",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "BL Egress (to CMS) Occupancy; All",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
"Counter": "0,1,2,3",
- "EventCode": "0x16",
- "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.ALL",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NCB",
"PerPkg": "1",
- "PublicDescription": "BL Egress (to CMS) Occupancy; All",
- "UMask": "0x03",
- "Unit": "M2M"
+ "UMask": "0xE",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit.",
- "Counter": "0,1,2",
- "EventCode": "0x29",
- "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NCS",
"PerPkg": "1",
- "PublicDescription": "Count cases where flow control queue that sits between the Intel Ultra Path Interconnect (UPI) and the mesh spawns a prefetch to the iMC (Memory Controller)",
- "Unit": "M3UPI"
+ "UMask": "0xF",
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Clocks of the Intel Ultra Path Interconnect (UPI)",
+ "BriefDescription": "Tx Flit Buffer Allocations",
"Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_UPI_CLOCKTICKS",
+ "EventCode": "0x40",
+ "EventName": "UNC_UPI_TxL_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts clockticks of the fixed frequency clock controlling the Intel Ultra Path Interconnect (UPI). This clock runs at1/8th the 'GT/s' speed of the UPI link. For example, a 9.6GT/s link will have a fixed Frequency of 1.2 Ghz.",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Data Response packets that go direct to core",
+ "BriefDescription": "Tx Flit Buffer Occupancy",
"Counter": "0,1,2,3",
- "EventCode": "0x12",
- "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "EventCode": "0x42",
+ "EventName": "UNC_UPI_TxL_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to core bypassing the CHA.",
- "UMask": "0x1",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Data Response packets that go direct to Intel UPI",
+ "BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
"Counter": "0,1,2,3",
- "EventCode": "0x12",
- "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "EventCode": "0x45",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
"PerPkg": "1",
- "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to Intel Ultra Path Interconnect (UPI) bypassing the CHA .",
- "UMask": "0x2",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Cycles Intel UPI is in L1 power mode (shutdown)",
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
"Counter": "0,1,2,3",
- "EventCode": "0x21",
- "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "EventCode": "0x44",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Counts cycles when the Intel Ultra Path Interconnect (UPI) is in L1 power mode. L1 is a mode that totally shuts down the UPI link. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another, this event only coutns when both links are shutdown.",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Cycles the Rx of the Intel UPI is in L0p power mode",
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
"Counter": "0,1,2,3",
- "EventCode": "0x25",
- "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
"PerPkg": "1",
- "PublicDescription": "Counts cycles when the the receive side (Rx) of the Intel Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
+ "UMask": "0x80",
"Unit": "UPI LL"
},
{
- "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "BriefDescription": "Valid Flits Received; Protocol Header",
"Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
- "UMask": "0x1",
+ "UMask": "0x80",
"Unit": "UPI LL"
},
{
- "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "BriefDescription": "Valid Flits Sent; Protocol Header",
"Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
- "UMask": "0x2",
+ "UMask": "0x80",
"Unit": "UPI LL"
},
{
- "BriefDescription": "FLITs received which bypassed the Slot0 Recieve Buffer",
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
"Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) whcih bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
- "UMask": "0x4",
+ "UMask": "0x20",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Valid data FLITs received from any slot",
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.LOC",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.REM",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.DATA_HDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NON_DATA_HDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.DUAL_SLOT_HDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.SGL_SLOT_HDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Idle",
"Counter": "0,1,2,3",
"EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x47",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Request Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "UMask": "0x0108",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x09",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Snoop Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "UMask": "0x0109",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "UMask": "0x0A",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x010A",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0C",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x010C",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0x0D",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x010D",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0x0E",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x010E",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
"PerPkg": "1",
- "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) received from any of the 3 Intel Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
"UMask": "0x0F",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Null FLITs received from any slot",
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
"Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
"PerPkg": "1",
- "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) received from any of the 3 Intel Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
- "UMask": "0x27",
+ "UMask": "0x010F",
+ "UMaskExt": "0x01",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Protocol header and credit FLITs received from any slot",
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Request",
"Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
"PerPkg": "1",
- "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) received from any of the 3 UPI slots on this UPI unit.",
- "UMask": "0x97",
+ "UMask": "0x08",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Cycles in which the Tx of the Intel Ultra Path Interconnect (UPI) is in L0p power mode",
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Request Opcode",
"Counter": "0,1,2,3",
- "EventCode": "0x27",
- "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
"PerPkg": "1",
- "PublicDescription": "Counts cycles when the transmit side (Tx) of the Intel Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
+ "UMask": "0x108",
+ "UMaskExt": "0x1",
"Unit": "UPI LL"
},
{
- "BriefDescription": "FLITs that bypassed the TxL Buffer",
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Snoop",
"Counter": "0,1,2,3",
- "EventCode": "0x41",
- "EventName": "UNC_UPI_TxL_BYPASSED",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the TxL(transmit) FLIT buffer and pass directly out the UPI Link. Generally, when data is transmitted across the Intel Ultra Path Interconnect (UPI), it will bypass the TxQ and pass directly to the link. However, the TxQ will be used in L0p (Low Power) mode and (Link Layer Retry) LLR mode, increasing latency to transfer out to the link.",
+ "UMask": "0x09",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Null FLITs transmitted from any slot",
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Snoop Opcode",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
"PerPkg": "1",
- "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) transmitted via any of the 3 Intel Ulra Path Interconnect (UPI) slots on this UPI unit.",
- "UMask": "0x27",
+ "UMask": "0x109",
+ "UMaskExt": "0x1",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Valid Flits Sent; Data",
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
"PerPkg": "1",
- "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
- "UMask": "0x8",
+ "UMask": "0x0A",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Idle FLITs transmitted",
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
"PerPkg": "1",
- "PublicDescription": "Counts when the Intel Ultra Path Interconnect(UPI) transmits an idle FLIT(80 bit FLow control unITs). Every UPI cycle must be sending either data FLITs, protocol/credit FLITs or idle FLITs.",
- "UMask": "0x47",
+ "UMask": "0x10A",
+ "UMaskExt": "0x1",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
"PerPkg": "1",
- "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) transmitted across any of the 3 UPI (Ultra Path Interconnect) slots on this UPI unit.",
- "UMask": "0x97",
+ "UMask": "0x0C",
"Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10C",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0x0D",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10D",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0x0E",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10E",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10F",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "UMask": "0x01AA",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x012A",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1AA",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x12A",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VNA",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; VNA Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Writebacks",
+ "Counter": "0,1,2",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Requests",
+ "Counter": "0,1,2",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Snoops",
+ "Counter": "0,1,2",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
+ "EventCode": "0x1",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2U Sent",
+ "Counter": "0,1,2",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M3UPI_D2U_SENT",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO0_IIO1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO2",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO3",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO4",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO5",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; All IIO targets for NCS are in single mask. ORs them together",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; Selected M2p BL NCS credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; BL - Slot 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AK - Slot 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AK - Slot 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 REQ Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 SNP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 RSP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 WB Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 REQ Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 SNP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 RSP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Inserts",
+ "Counter": "0,1,2",
+ "EventCode": "0x2F",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Occupancy",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1_NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1_NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 RSP Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 WB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 NCB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 NCS Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1 RSP Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1 WB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1_NCS Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1_NCB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; CHA on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_CHA",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; CHA on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_CHA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_NON_IDLE",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_NON_IDLE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "Counter": "0,1,2",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_NONSNP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "Counter": "0,1,2",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_NONSNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "Counter": "0,1,2",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_VN2SNP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "Counter": "0,1,2",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_VN0SNP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache",
+ "Counter": "0,1,2",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache",
+ "Counter": "0,1,2",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; Parallel Bias to VN0",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; Parallel Bias to VN1",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN0",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN1",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN0",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN1",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; AD, BL Parallel Win",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on Idle",
+ "Counter": "0,1,2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on BL Arb",
+ "Counter": "0,1,2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; Any In BGF FIFO",
+ "Counter": "0,1,2",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; Any in BGF Path",
+ "Counter": "0,1,2",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; No D2K For Arb",
+ "Counter": "0,1,2",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; VNA In Use",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Packets in BGF FIFO",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Packets in BGF Path",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Transmit Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; D2K Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; All",
+ "Counter": "0,1,2",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; No BGF Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_BGF",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; No TxQ Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_TXQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC",
+ "Counter": "0,1,2",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; One Message",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; Two Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.2_MSGS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; Three Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.3_MSGS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; One Message in non-VNA",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG_VNX",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; All",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Needs Data Flit",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Bubble",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Not Avail",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Acumullate",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Accumulate Ready",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Accumulate Wasted",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Run-Ahead - Blocked",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Run-Ahead - Message",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Ok",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Message",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_MSG",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Flit Finished",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_FLIT",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2; Rate-matching Stall",
+ "Counter": "0,1,2",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2; Rate-matching Stall - No Message",
+ "Counter": "0,1,2",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; All",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No BGF Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No TxQ Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No BGF Credits + No Extra Message Slotted",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No TxQ Credits + No Extra Message Slotted",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - One Slot Taken",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ONE_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - Two Slots Taken",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.TWO_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - Three Slots Taken",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.THREE_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; VN0",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; VN1",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel Attempt",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel Success",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel AD Lost",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_AD_LOST",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel BL Lost",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_BL_LOST",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Can't Slot AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Can't Slot BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Arrived",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARRIVED",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Lost Arbitration",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARB_LOST",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Slotted",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.SLOTTED",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Dropped - Old",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_OLD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Dropped - Wrap",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_WRAP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Used",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.USED",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Corrected",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level < 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level < 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level < 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Any In Use",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2C Sent",
+ "Counter": "0,1,2",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M3UPI_D2C_SENT",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_3",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VNA",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Received; VLW",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received; MSI",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received; IPI",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "Counter": "0,1",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "Counter": "0,1",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Counter": "0,1",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "Counter": "0,1",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_GTONE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_GTONE",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.SNP",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.SNP",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.HA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.HA",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.TOR",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.TOR",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.EX_RDS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5F",
+ "EventName": "UNC_H_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.EX_RDS",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RFO_HIT_S",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RFO_HIT_S",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.READS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.READS",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.READS_LOCAL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.READS_LOCAL",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.WRITES",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.WRITES",
+ "UMask": "0xC",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "Deprecated": "1",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "UMask": "0x20",
+ "Unit": "CHA"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json
new file mode 100644
index 000000000000..64301a600ede
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json
@@ -0,0 +1,201 @@
+[
+ {
+ "BriefDescription": "pclk Cycles",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_DEMOTIONS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 0 Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 1 Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 2 Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 3 Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_MCP_PROCHOT_CYCLES",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_P_MCP_PROCHOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
index 792ca39f013a..dd334b416c57 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
@@ -281,4 +281,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json b/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
index 734be4ea095f..d674ee88c3a5 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
@@ -1,25 +1,94 @@
[
{
- "BriefDescription": "Counts the number of first level data cacheline (dirty) evictions caused by misses, stores, and prefetches.",
+ "BriefDescription": "Counts the number of core requests (demand and L1 prefetchers) rejected by the L2 queue (L2Q) due to a full condition.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "CORE_REJECT_L2Q.ANY",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2 queue (L2Q) due to a full or nearly full condition, which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the External Queue (XQ), but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a cores dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event). Counts on a per core basis.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "DL1.DIRTY_EVICTION",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_XQ.ANY",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBL (L2 misses) and WOB (L2 write-back victims).",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the total number of L2 Cache accesses. Counts on a per core basis.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of L2 Cache Accesses, includes hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only. Counts on a per core basis.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache accesses that resulted in a hit. Counts on a per core basis.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HIT",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of L2 Cache accesses that resulted in a hit from a front door request only (does not include rejects or recycles), Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache accesses that resulted in a miss. Counts on a per core basis.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of first level data cacheline (dirty) evictions caused by misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
+ "PublicDescription": "Counts the number of L2 Cache accesses that resulted in a miss from a front door request only (does not include rejects or recycles). Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts the number of L2 Cache accesses that miss the L2 and get rejected. Counts on a per core basis.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.REJECTS",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of L2 Cache accesses that miss the L2 and get BBL reject short and long rejects (includes those counted in L2_reject_XQ.any). Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x41"
},
@@ -29,21 +98,31 @@
"Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x4f"
},
{
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x38"
+ },
+ {
"BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in DRAM or MMIO (Non-DRAM).",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of cycles a core is stalled due to an instruction cache or translation lookaside buffer (TLB) access which hit in DRAM or MMIO (non-DRAM).",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in DRAM or MMIO (non-DRAM).",
"SampleAfterValue": "200003",
"UMask": "0x20"
},
@@ -53,9 +132,9 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of cycles a core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) access which hit in the L2 cache.",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
"SampleAfterValue": "200003",
"UMask": "0x8"
},
@@ -65,19 +144,29 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of cycles a core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) access which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
"SampleAfterValue": "200003",
"UMask": "0x10"
},
{
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7"
+ },
+ {
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x4"
@@ -88,9 +177,8 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 cache.",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
@@ -100,34 +188,48 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
- "BriefDescription": "Counts the number of cycles a core is stalled due to a store buffer being full.",
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a store buffer being full.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.STORE_BUFFER_FULL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x40"
},
{
- "BriefDescription": "Counts the number of load ops retired that hit in DRAM.",
+ "BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
+ "CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x80"
},
{
+ "BriefDescription": "Counts the number of load uops retired that hit in the L3 cache, in which a snoop was required and modified data was forwarded from another core or module.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
"BriefDescription": "Counts the number of load uops retired that hit in the L1 data cache.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -179,6 +281,7 @@
"BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
+ "Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"PEBS": "1",
@@ -187,6 +290,19 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "Counts the number of memory uops retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST)",
+ "SampleAfterValue": "200003",
+ "UMask": "0x83"
+ },
+ {
"BriefDescription": "Counts the number of load uops retired.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -213,14 +329,810 @@
"UMask": "0x82"
},
{
+ "BriefDescription": "Counts the number of load uops retired that performed one or more locks.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired that were splits.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x43"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split load uops.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split store uops.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3001F803C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x801F803C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0400",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001F803C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2001F803C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x401F803C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0800",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x101F803C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010003C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004003C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008003C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002003C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001003C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that were supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x201F803C0000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to instruction cache misses.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json b/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
index 2515b9aa6e66..2e1b80c714fd 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
@@ -1,16 +1,29 @@
[
{
- "BriefDescription": "Counts the number of cycles the floating point divider is busy. Does not imply a stall waiting for the divider.",
+ "BriefDescription": "Counts the number of cycles the floating point divider is busy.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.FPDIV",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles the floating point divider is busy. Does not imply a stall waiting for the divider.",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
+ "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt).",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -21,4 +34,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json b/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json
index b7b8cb7bd868..5d938a5dafcf 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json
@@ -1,11 +1,11 @@
[
{
- "BriefDescription": "Counts the total number of BACLEARS.",
+ "BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"SampleAfterValue": "200003",
@@ -17,7 +17,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.COND",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x10"
@@ -28,7 +28,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.INDIRECT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x2"
@@ -39,18 +39,18 @@
"Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.RETURN",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x8"
},
{
- "BriefDescription": "Counts the number of BACLEARS due to a non-indirect, non-conditional jump.",
+ "BriefDescription": "Counts the number of BACLEARS due to a direct, unconditional jump.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.UNCOND",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x4"
@@ -61,7 +61,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe9",
"EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x1"
@@ -72,22 +72,34 @@
"Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line or byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
"SampleAfterValue": "200003",
"UMask": "0x3"
},
{
+ "BriefDescription": "Counts the number of instruction cache hits.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of requests that hit in the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts the number of instruction cache misses.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of missed requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
"SampleAfterValue": "200003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json b/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
index 4e4eab23a300..15eba23796e4 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
@@ -1,25 +1,111 @@
[
{
- "BriefDescription": "Counts the number of memory ordering machine clears triggered by a snoop from an external agent.",
+ "BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of memory ordering machine clears triggered by a snoop from an external agent. Does not count internally generated machine clears such as those due to disambiguations.",
"SampleAfterValue": "20003",
"UMask": "0x2"
},
{
+ "BriefDescription": "Counts the number of misaligned load uops that are 4K page splits.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of misaligned store uops that are 4K page splits.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3002184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3002184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache.",
"Counter": "0,1,2,3",
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x2104000001",
+ "MSRValue": "0x2184000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -29,9 +115,8 @@
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x2104000001",
+ "MSRValue": "0x2184000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -41,9 +126,8 @@
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x2104000001",
+ "MSRValue": "0x2184000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -53,9 +137,8 @@
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x2104000001",
+ "MSRValue": "0x2184000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -65,9 +148,8 @@
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x2104000002",
+ "MSRValue": "0x2184000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -77,10 +159,284 @@
"EventCode": "0XB7",
"EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x2104000002",
+ "MSRValue": "0x2184000002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x802184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x802184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2002184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2002184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.OTHER.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184008000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184008000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x402184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x402184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all hardware and software prefetches that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PREFETCHES.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000470",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000800",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000800",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x102184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x102184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x202184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x202184000000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/other.json b/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
index 8692d4847476..4a1b7cc5aa23 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
@@ -6,7 +6,7 @@
"EdgeDetect": "1",
"EventCode": "0x63",
"EventName": "BUS_LOCK.ALL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003"
},
@@ -27,7 +27,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "BUS_LOCK.CYCLES_OTHER_BLOCK",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x2"
@@ -38,7 +38,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "BUS_LOCK.CYCLES_SELF_BLOCK",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x1"
@@ -71,7 +71,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_DRAM_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x4"
@@ -82,7 +82,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_L2_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x1"
@@ -93,7 +93,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_LLC_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x2"
@@ -104,7 +104,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.MASKED",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
"SampleAfterValue": "200003",
@@ -116,7 +116,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of core cycles during which there are pending interrupts while interrupts are masked (disabled). Increments by 1 each core cycle that both EFLAGS.IF is 0 and an INTR is pending (which means the APIC is telling the ROB to cause an INTR). This event does not increment if EFLAGS.IF is 0 but all interrupt in the APICs Interrupt Request Register (IRR) are inhibited by the PPR (thus either by ISRV or TPR) because in these cases the interrupts would be held up in the APIC and would not be pended to the ROB. This event does count when an interrupt is only inhibited by MOV/POP SS state machines or the STI state machine. These extra inhibits only last for a single instructions and would not be important.",
"SampleAfterValue": "200003",
@@ -128,12 +128,111 @@
"Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.RECEIVED",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "203",
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all code reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000044",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3000000010000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8003000000000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response.",
"Counter": "0,1,2,3",
"EventCode": "0XB7",
@@ -141,7 +240,39 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -153,7 +284,39 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000001",
+ "Offcore": "1",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -165,7 +328,347 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800000010000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000040",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000020",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000010000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000000010000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400000010000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all hardware and software prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PREFETCHES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10470",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100000010000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100184000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000100000000000",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200000010000",
+ "Offcore": "1",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json b/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
index c18acb422145..09919fdb9a38 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
@@ -132,6 +132,17 @@
"UMask": "0x7e"
},
{
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
+ },
+ {
"BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -159,7 +170,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe8",
"EventName": "BTCLEAR.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
"SampleAfterValue": "200003"
@@ -169,9 +180,9 @@
"CollectPEBSRecord": "2",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "33",
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
@@ -181,9 +192,9 @@
"Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
"SampleAfterValue": "2000003"
},
{
@@ -192,7 +203,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
"SampleAfterValue": "2000003",
@@ -203,7 +214,7 @@
"CollectPEBSRecord": "2",
"Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "34",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
"SampleAfterValue": "2000003",
@@ -216,7 +227,7 @@
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -226,18 +237,19 @@
"Counter": "0,1,2,3",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003"
},
{
- "BriefDescription": "Counts the number of cycles the integer divider is busy. Does not imply a stall waiting for the divider.",
+ "BriefDescription": "Counts the number of cycles the integer divider is busy.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.IDIV",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles the integer divider is busy. Does not imply a stall waiting for the divider.",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
@@ -264,29 +276,107 @@
"SampleAfterValue": "2000003"
},
{
- "BriefDescription": "Counts the total number of machine clears including memory ordering, memory disambiguation, self-modifying code, page faults and floating point assist.",
+ "BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked for any of the following reasons: DTLB miss, address alias, store forward or data unknown (includes memory disambiguation blocks and ESP consuming load blocks).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of machine clears for any reason including, but not limited to, memory ordering, memory disambiguation, SMC, and FP assist.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "20003"
},
{
+ "BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "20003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.PAGE_FAULT",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "20003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "20003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ) even if an FE_bound event occurs during this period. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"SampleAfterValue": "1000003",
"UMask": "0x6"
},
{
"BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to fast nukes such as memory ordering and memory disambiguation machine clears.",
+ "CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
@@ -297,6 +387,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
@@ -311,7 +402,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x4"
@@ -322,7 +413,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MONUKE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x2"
@@ -333,7 +424,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003"
},
@@ -343,7 +434,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x1"
@@ -354,7 +445,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x2"
@@ -365,7 +456,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x8"
@@ -376,7 +467,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x20"
@@ -387,7 +478,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x40"
@@ -398,7 +489,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x10"
@@ -409,18 +500,18 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.STORE_BUFFER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
{
- "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
+ "BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003"
},
@@ -430,7 +521,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"SampleAfterValue": "1000003",
@@ -442,7 +533,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
"SampleAfterValue": "1000003",
@@ -454,7 +545,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.CISC",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x1"
@@ -465,7 +556,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.DECODE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x8"
@@ -476,7 +567,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
"SampleAfterValue": "1000003",
@@ -488,7 +579,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.OTHER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x80"
@@ -499,7 +590,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x4"
@@ -515,16 +606,58 @@
"SampleAfterValue": "1000003"
},
{
+ "BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the total number of uops retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of integer divide uops retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
"BriefDescription": "Counts the number of uops that are from complex flows issued by the micro-sequencer (MS).",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
- "PDIR_COUNTER": "na",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of uops that are from complex flows issued by the Microcode Sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops retired, includes those in MS flows.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.X87",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
index c58b589ff80f..b82f11591f13 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
@@ -5,18 +5,18 @@
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x80"
},
{
- "BriefDescription": "Counts the number of first level TLB misses but second level hits due to loads that did not start a page walk. Account for all pages sizes. Will result in a DTLB write from STLB.",
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Account for all page sizes. Will result in a DTLB write from STLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x20"
@@ -33,12 +33,24 @@
"UMask": "0xe"
},
{
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1GB pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
"SampleAfterValue": "200003",
@@ -50,21 +62,21 @@
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
- "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for loads every cycle.",
+ "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for loads every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
+ "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
"SampleAfterValue": "200003",
"UMask": "0x10"
},
@@ -74,19 +86,52 @@
"Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of page walks due to storse that miss the PDE (Page Directory Entry) cache.",
"SampleAfterValue": "2000003",
"UMask": "0x80"
},
{
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Account for all pages sizes. Will result in a DTLB write from STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
"SampleAfterValue": "2000003",
@@ -98,7 +143,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
"SampleAfterValue": "2000003",
@@ -110,7 +155,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
"SampleAfterValue": "200003",
@@ -122,11 +167,11 @@
"Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDE_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of Extended Page Directory Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "UMask": "0x1"
},
{
"BriefDescription": "Counts the number of Extended Page Directory Entry misses.",
@@ -134,11 +179,11 @@
"Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDE_MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number Extended Page Directory Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x2"
},
{
"BriefDescription": "Counts the number of Extended Page Directory Pointer Entry hits.",
@@ -146,7 +191,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDPE_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number Extended Page Directory Pointer Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
@@ -158,19 +203,31 @@
"Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDPE_MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number Extended Page Directory Pointer Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
"UMask": "0x8"
},
{
+ "BriefDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4f",
+ "EventName": "EPT.WALK_PENDING",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
"BriefDescription": "Counts the number of times there was an ITLB miss and a new translation was filled into the ITLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "ITLB.FILLS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) and a new translation was filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
"SampleAfterValue": "200003",
@@ -182,29 +239,52 @@
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.PDE_CACHE_MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
"UMask": "0x80"
},
{
- "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will results in a DTLB write from STLB.",
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
"UMask": "0x20"
},
{
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 1G page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "PDIR_COUNTER": "NA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
"SampleAfterValue": "2000003",
@@ -216,7 +296,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
"SampleAfterValue": "2000003",
@@ -228,14 +308,25 @@
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for instruction fetches every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk).",
"SampleAfterValue": "200003",
"UMask": "0x10"
},
{
- "BriefDescription": "Counts the number of memory retired ops that missed in the second level TLB.",
+ "BriefDescription": "Counts the number of retired loads that are blocked due to a first level TLB miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DTLB_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired that missed in the second level TLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -247,7 +338,7 @@
"UMask": "0x13"
},
{
- "BriefDescription": "Counts the number of load ops retired that miss in the second Level TLB.",
+ "BriefDescription": "Counts the number of load uops retired that miss in the second Level TLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -259,7 +350,7 @@
"UMask": "0x11"
},
{
- "BriefDescription": "Counts the number of store ops retired that miss in the second level TLB.",
+ "BriefDescription": "Counts the number of store uops retired that miss in the second level TLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -270,4 +361,4 @@
"SampleAfterValue": "200003",
"UMask": "0x12"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/cache.json b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
index 0b887d73b7f3..ed957d4f9c6d 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
@@ -1300,4 +1300,4 @@
"SampleAfterValue": "100007",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json b/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json
index bb364a04a75f..37174392a510 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json
@@ -30,4 +30,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/frontend.json b/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
index 120ff65897c0..216da6e121c8 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
@@ -79,4 +79,4 @@
"SampleAfterValue": "200003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/memory.json b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
index 6252503f68a1..9f6f0328249e 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
@@ -31,4 +31,4 @@
"SampleAfterValue": "200003",
"UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
index 5dba4313013f..42ff0b134aeb 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
@@ -354,7 +354,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel(R) architecture processors.",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
index d5e89c74a9be..2e17e02e1463 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
@@ -75,4 +75,4 @@
"SampleAfterValue": "200003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
index 59c039169eb8..16e8913c0434 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
@@ -1462,4 +1462,4 @@
"SampleAfterValue": "100007",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json b/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json
index c1f00c9470f4..9c3d22439530 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json
@@ -35,4 +35,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
index 3fdc788a2b20..4c2abfbac8f8 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
@@ -95,4 +95,4 @@
"SampleAfterValue": "200003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
index e26763d16d52..ae0cb3451866 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
@@ -35,4 +35,4 @@
"SampleAfterValue": "200003",
"UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
index 4d7e3129e5ac..2b712b12cc1f 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
@@ -428,7 +428,7 @@
"EventName": "MACHINE_CLEARS.SMC",
"PDIR_COUNTER": "na",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel(R) architecture processors.",
"SampleAfterValue": "20003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
index 36eaec87eead..1f7db22c15e6 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
@@ -218,4 +218,4 @@
"SampleAfterValue": "20003",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/cache.json b/tools/perf/pmu-events/arch/x86/haswell/cache.json
index 91464cfb9615..3b0f3a264246 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/cache.json
@@ -556,7 +556,7 @@
"UMask": "0x20"
},
{
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "Retired load uops.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
@@ -564,11 +564,12 @@
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
+ "PublicDescription": "Counts all retired load uops. This event accounts for SW prefetch uops of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
"SampleAfterValue": "2000003",
"UMask": "0x81"
},
{
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
@@ -577,6 +578,7 @@
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"L1_Hit_Indication": "1",
"PEBS": "1",
+ "PublicDescription": "Counts all retired store uops.",
"SampleAfterValue": "2000003",
"UMask": "0x82"
},
@@ -790,20 +792,19 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand & prefetch code readshit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0244",
+ "MSRValue": "0x4003C0244",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand & prefetch data readshit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -811,20 +812,18 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand & prefetch data readshit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0091",
+ "MSRValue": "0x4003C0091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -837,7 +836,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C07F7",
"Offcore": "1",
- "PublicDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -848,14 +846,13 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C07F7",
+ "MSRValue": "0x4003C07F7",
"Offcore": "1",
- "PublicDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all requests hit in the L3",
+ "BriefDescription": "Counts all requestshit in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -863,12 +860,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C8FFF",
"Offcore": "1",
- "PublicDescription": "Counts all requests hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand & prefetch RFOshit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -876,25 +872,23 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand & prefetch RFOshit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0122",
+ "MSRValue": "0x4003C0122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand code readshit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -902,25 +896,23 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
"Offcore": "1",
- "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand code readshit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0004",
+ "MSRValue": "0x4003C0004",
"Offcore": "1",
- "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts demand data readshit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -928,25 +920,23 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
"Offcore": "1",
- "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts demand data readshit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0001",
+ "MSRValue": "0x4003C0001",
"Offcore": "1",
- "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand data writes (RFOs)hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -954,25 +944,23 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand data writes (RFOs)hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0002",
+ "MSRValue": "0x4003C0002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code readshit in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -980,12 +968,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0040",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data readshit in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -993,12 +980,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0010",
"Offcore": "1",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOshit in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -1006,12 +992,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0020",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code readshit in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -1019,12 +1004,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0200",
"Offcore": "1",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data readshit in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -1032,12 +1016,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0080",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOshit in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -1045,7 +1028,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0100",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1058,4 +1040,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
index 55cf5b96464e..7cf203a90a74 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
@@ -100,4 +100,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/frontend.json b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
index 0c8d5ccf1276..c45a09abe5d3 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
@@ -301,4 +301,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
index 3ade2c19533e..75dc6dd9a7bc 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
@@ -111,18 +111,12 @@
"MetricName": "CoreIPC_SMT"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
- },
- {
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"MetricGroup": "SMT",
@@ -171,17 +165,28 @@
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fed;FetchBW",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -190,24 +195,6 @@
"MetricName": "MLP"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
@@ -239,6 +226,48 @@
"MetricName": "Page_Walks_Utilization_SMT"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json
index 8b69493e3726..9e5a1e0966d9 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json
@@ -225,7 +225,7 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch code readsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -233,25 +233,23 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00244",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch code readsmiss the L3 and the data is returned from local dram",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400244",
+ "MSRValue": "0x100400244",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch data readsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -259,20 +257,18 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch data readsmiss the L3 and the data is returned from local dram",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400091",
+ "MSRValue": "0x100400091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -285,7 +281,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC007F7",
"Offcore": "1",
- "PublicDescription": "miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -296,14 +291,13 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01004007F7",
+ "MSRValue": "0x1004007F7",
"Offcore": "1",
- "PublicDescription": "miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all requests miss in the L3",
+ "BriefDescription": "Counts all requestsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -311,12 +305,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC08FFF",
"Offcore": "1",
- "PublicDescription": "Counts all requests miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch RFOsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -324,25 +317,23 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch RFOsmiss the L3 and the data is returned from local dram",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400122",
+ "MSRValue": "0x100400122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads miss in the L3",
+ "BriefDescription": "Counts all demand code readsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -350,25 +341,23 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00004",
"Offcore": "1",
- "PublicDescription": "Counts all demand code reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand code readsmiss the L3 and the data is returned from local dram",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400004",
+ "MSRValue": "0x100400004",
"Offcore": "1",
- "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads miss in the L3",
+ "BriefDescription": "Counts demand data readsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -376,25 +365,23 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00001",
"Offcore": "1",
- "PublicDescription": "Counts demand data reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts demand data readsmiss the L3 and the data is returned from local dram",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400001",
+ "MSRValue": "0x100400001",
"Offcore": "1",
- "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "BriefDescription": "Counts all demand data writes (RFOs)miss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -402,25 +389,23 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand data writes (RFOs)miss the L3 and the data is returned from local dram",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400002",
+ "MSRValue": "0x100400002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code readsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -428,12 +413,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00040",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data readsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -441,12 +425,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00010",
"Offcore": "1",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -454,12 +437,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00020",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code readsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -467,12 +449,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00200",
"Offcore": "1",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data readsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -480,12 +461,11 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00080",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOsmiss in the L3",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
@@ -493,7 +473,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFFC00100",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -681,4 +660,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/other.json b/tools/perf/pmu-events/arch/x86/haswell/other.json
index 4c6b9d34325a..7ca34f09b185 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/other.json
@@ -40,4 +40,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
index a53f28ec9270..42f6a8100661 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
@@ -1035,7 +1035,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "PublicDescription": "Cycles per core when uops are exectuted in port 0.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -1056,7 +1055,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "PublicDescription": "Cycles per core when uops are exectuted in port 1.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
@@ -1117,7 +1115,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "PublicDescription": "Cycles per core when uops are exectuted in port 4.",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
@@ -1138,7 +1135,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "PublicDescription": "Cycles per core when uops are exectuted in port 5.",
"SampleAfterValue": "2000003",
"UMask": "0x20"
},
@@ -1159,7 +1155,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "PublicDescription": "Cycles per core when uops are exectuted in port 6.",
"SampleAfterValue": "2000003",
"UMask": "0x40"
},
@@ -1295,11 +1290,11 @@
"BriefDescription": "Cycles with less than 10 actually retired uops.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
- "CounterMask": "10",
+ "CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"Invert": "1",
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json b/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json
index 8f2ae2891042..56c4b380dc95 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json
@@ -19,11 +19,11 @@
"Unit": "ARB"
},
{
- "BriefDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from it's allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
+ "BriefDescription": "Each cycle counts number of all Core outgoing valid entries. Such entry is defined as valid from its allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
"PerPkg": "1",
- "PublicDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from it's allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
+ "PublicDescription": "Each cycle counts number of all Core outgoing valid entries. Such entry is defined as valid from its allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
"UMask": "0x01",
"Unit": "ARB"
},
@@ -34,6 +34,7 @@
"EventCode": "0x80",
"EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
"PerPkg": "1",
+ "PublicDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.\n",
"UMask": "0x01",
"Unit": "ARB"
},
@@ -64,6 +65,6 @@
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
"PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "Unit": "NCU"
+ "Unit": "CLOCK"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
index ba3e77a9f9a0..57d2a6452fec 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
@@ -481,4 +481,4 @@
"SampleAfterValue": "100003",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/cache.json b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
index 85eb998dd39e..7557a203a1b6 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
@@ -20,7 +20,7 @@
"UMask": "0x2"
},
{
- "BriefDescription": "L1D miss oustandings duration in cycles",
+ "BriefDescription": "L1D miss outstanding duration in cycles",
"Counter": "2",
"CounterHTOff": "2",
"EventCode": "0x48",
@@ -592,7 +592,7 @@
"UMask": "0x20"
},
{
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "Retired load uops.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
@@ -600,11 +600,12 @@
"EventCode": "0xD0",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
+ "PublicDescription": "Counts all retired load uops. This event accounts for SW prefetch uops of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
"SampleAfterValue": "2000003",
"UMask": "0x81"
},
{
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
@@ -613,6 +614,7 @@
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"L1_Hit_Indication": "1",
"PEBS": "1",
+ "PublicDescription": "Counts all retired store uops.",
"SampleAfterValue": "2000003",
"UMask": "0x82"
},
@@ -832,9 +834,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0244",
+ "MSRValue": "0x4003C0244",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -847,7 +848,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -858,9 +858,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0091",
+ "MSRValue": "0x4003C0091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -873,7 +872,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C07F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -884,9 +882,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C07F7",
+ "MSRValue": "0x4003C07F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -899,7 +896,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C8FFF",
"Offcore": "1",
- "PublicDescription": "Counts all requests hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -912,7 +908,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -923,9 +918,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0122",
+ "MSRValue": "0x4003C0122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -938,7 +932,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
"Offcore": "1",
- "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -949,9 +942,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0004",
+ "MSRValue": "0x4003C0004",
"Offcore": "1",
- "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -964,7 +956,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
"Offcore": "1",
- "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -975,9 +966,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0001",
+ "MSRValue": "0x4003C0001",
"Offcore": "1",
- "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -990,7 +980,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1001,9 +990,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0002",
+ "MSRValue": "0x4003C0002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1016,7 +1004,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0040",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1029,7 +1016,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0010",
"Offcore": "1",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1042,7 +1028,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0020",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1055,7 +1040,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0200",
"Offcore": "1",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1068,7 +1052,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0080",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1081,7 +1064,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0100",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1094,4 +1076,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
index 55cf5b96464e..7cf203a90a74 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
@@ -100,4 +100,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
index 0c8d5ccf1276..c45a09abe5d3 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
@@ -301,4 +301,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
index c99734fd907d..d31d76db9d84 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
@@ -75,12 +75,6 @@
"MetricName": "UpTB"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
- "MetricGroup": "Pipeline;Mem",
- "MetricName": "CPI"
- },
- {
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
@@ -111,18 +105,12 @@
"MetricName": "CoreIPC_SMT"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
- },
- {
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"MetricGroup": "SMT",
@@ -171,17 +159,28 @@
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fed;FetchBW",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -190,24 +189,6 @@
"MetricName": "MLP"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
@@ -239,6 +220,48 @@
"MetricName": "Page_Walks_Utilization_SMT"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -299,6 +322,12 @@
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "cbox_0@event\\=0x0@ / #num_dies / duration_time / 1000000000",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
@@ -345,5 +374,404 @@
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "CPU operating frequency (in GHz)",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ ) / 1000000000",
+ "MetricGroup": "",
+ "MetricName": "cpu_operating_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Cycles per instruction retired; indicating how much time each executed instruction took; in units of cycles.",
+ "MetricExpr": " CPU_CLK_UNHALTED.THREAD / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "cpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory load instructions to the total number completed instructions",
+ "MetricExpr": " MEM_UOPS_RETIRED.ALL_LOADS / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "loads_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory store instructions to the total number completed instructions",
+ "MetricExpr": " MEM_UOPS_RETIRED.ALL_STORES / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "stores_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L1 data cache (includes data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": " L1D.REPLACEMENT / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "l1d_mpi_includes_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of demand load requests hitting in L1 data cache to the total number of completed instructions",
+ "MetricExpr": " MEM_LOAD_UOPS_RETIRED.L1_HIT / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "l1d_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing in L1 instruction cache (includes prefetches) to the total number of completed instructions",
+ "MetricExpr": " L2_RQSTS.ALL_CODE_RD / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed demand load requests hitting in L2 cache to the total number of completed instructions",
+ "MetricExpr": " MEM_LOAD_UOPS_RETIRED.L2_HIT / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L2 cache (includes code+data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": " L2_LINES_IN.ALL / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "l2_mpi_includes_code_plus_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed data read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": " MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": " L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_code_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB.",
+ "MetricExpr": " ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "itlb_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the Instruction Translation Lookaside Buffer (ITLB) and further levels of TLB.",
+ "MetricExpr": " ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "itlb_large_page_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": " DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "dtlb_load_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": " DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "dtlb_store_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Intel(R) Quick Path Interconnect (QPI) data transmit bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_Q_TxL_FLITS_G0.DATA * 8 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "qpi_data_transmit_bw_only_data",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory read bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.RD * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory write bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.WR * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory bandwidth (MB/sec)",
+ "MetricExpr": "(( UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "( cbox@UNC_C_TOR_INSERTS.OPCODE\\,filter_opc\\=0x19e@ * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "( cbox@UNC_C_TOR_INSERTS.OPCODE\\,filter_opc\\=0x1c8\\,filter_tid\\=0x3e@ * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Uops delivered from decoded instruction cache (decoded stream buffer or DSB) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.DSB_UOPS / UOPS_ISSUED.ANY )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_frodecoded_icache_dsb",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from legacy decode pipeline (Micro-instruction Translation Engine or MITE) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MITE_UOPS / UOPS_ISSUED.ANY )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_frolegacy_decode_pipeline_mite",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from microcode sequencer (MS) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MS_UOPS / UOPS_ISSUED.ANY )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_fromicrocode_sequencer_ms",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from loop stream detector(LSD) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( UOPS_ISSUED.ANY - IDQ.MITE_UOPS - IDQ.MS_UOPS - IDQ.DSB_UOPS ) / UOPS_ISSUED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_froloop_streadetector_lsd",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "( cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ + cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x192@ ) / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "( cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x181@ + cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x191@ ) / INST_RETIRED.ANY ",
+ "MetricGroup": "",
+ "MetricName": "llc_code_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Memory read that miss the last level cache (LLC) addressed to local DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ / ( cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ + cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_local_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Memory reads that miss the last level cache (LLC) addressed to remote DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ / ( cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ + cbox@UNC_C_TOR_INSERTS.MISS_OPCODE\\,filter_opc\\=0x182@ )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_remote_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Machine_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "MetricExpr": "100 * ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1, PGO",
+ "MetricName": "tma_frontend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.",
+ "MetricExpr": "100 * ( ( 4 ) * ( min( CPU_CLK_UNHALTED.THREAD , IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "Frontend, TmaL2",
+ "MetricName": "tma_fetch_latency_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "100 * ( ICACHE.IFDATA_STALL / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot, FetchLat, IcMiss",
+ "MetricName": "tma_icache_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.",
+ "MetricExpr": "100 * ( ( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot, FetchLat, MemoryTLB",
+ "MetricName": "tma_itlb_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.",
+ "MetricExpr": "100 * ( ( 12 ) * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat",
+ "MetricName": "tma_branch_resteers_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.",
+ "MetricExpr": "100 * ( DSB2MITE_SWITCHES.PENALTY_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "DSBmiss, FetchLat",
+ "MetricName": "tma_dsb_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs.",
+ "MetricExpr": "100 * ( ILD_STALL.LCP / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat",
+ "MetricName": "tma_lcp_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.",
+ "MetricExpr": "100 * ( ( 2 ) * IDQ.MS_SWITCHES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat, MicroSeq",
+ "MetricName": "tma_ms_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.",
+ "MetricExpr": "100 * ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( 4 ) * ( min( CPU_CLK_UNHALTED.THREAD , IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "FetchBW, Frontend, TmaL2",
+ "MetricName": "tma_fetch_bandwidth_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "MetricExpr": "100 * ( ( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) / 2 )",
+ "MetricGroup": "DSBmiss, FetchBW",
+ "MetricName": "tma_mite_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "MetricExpr": "100 * ( ( IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) / 2 )",
+ "MetricGroup": "DSB, FetchBW",
+ "MetricName": "tma_dsb_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "MetricExpr": "100 * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_bad_speculation_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.",
+ "MetricExpr": "100 * ( ( BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT ) ) * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "BadSpec, BrMispredicts, TmaL2",
+ "MetricName": "tma_branch_mispredicts_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.",
+ "MetricExpr": "100 * ( ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT ) ) * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) )",
+ "MetricGroup": "BadSpec, MachineClears, TmaL2",
+ "MetricName": "tma_machine_clears_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "MetricExpr": "100 * ( 1 - ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_backend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "MetricExpr": "100 * ( ( ( ( min( CPU_CLK_UNHALTED.THREAD , CYCLE_ACTIVITY.STALLS_LDM_PENDING ) ) + RESOURCE_STALLS.SB ) / ( ( ( min( CPU_CLK_UNHALTED.THREAD , CYCLE_ACTIVITY.CYCLES_NO_EXECUTE ) ) + ( cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - ( cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if ( ( INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) > 1.8 ) else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ ) ) / 2 - ( RS_EVENTS.EMPTY_CYCLES if ( ( ( 4 ) * ( min( CPU_CLK_UNHALTED.THREAD , IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) > 0.1 ) else 0 ) + RESOURCE_STALLS.SB ) if #SMT_on else ( ( min( CPU_CLK_UNHALTED.THREAD , CYCLE_ACTIVITY.CYCLES_NO_EXECUTE ) ) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - ( cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if ( ( INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) > 1.8 ) else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ ) - ( RS_EVENTS.EMPTY_CYCLES if ( ( ( 4 ) * ( min( CPU_CLK_UNHALTED.THREAD , IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) > 0.1 ) else 0 ) + RESOURCE_STALLS.SB ) ) ) * ( 1 - ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) ) )",
+ "MetricGroup": "Backend, TmaL2",
+ "MetricName": "tma_memory_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.",
+ "MetricExpr": "100 * ( max( ( ( min( CPU_CLK_UNHALTED.THREAD , CYCLE_ACTIVITY.STALLS_LDM_PENDING ) ) - CYCLE_ACTIVITY.STALLS_L1D_PENDING ) / ( CPU_CLK_UNHALTED.THREAD ) , 0 ) )",
+ "MetricGroup": "CacheMisses, MemoryBound, TmaL3mem",
+ "MetricName": "tma_l1_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "CacheMisses, MemoryBound, TmaL3mem",
+ "MetricName": "tma_l2_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( MEM_LOAD_UOPS_RETIRED.L3_HIT / ( MEM_LOAD_UOPS_RETIRED.L3_HIT + ( 7 ) * MEM_LOAD_UOPS_RETIRED.L3_MISS ) ) * CYCLE_ACTIVITY.STALLS_L2_PENDING / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "CacheMisses, MemoryBound, TmaL3mem",
+ "MetricName": "tma_l3_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( min( ( ( 1 - ( MEM_LOAD_UOPS_RETIRED.L3_HIT / ( MEM_LOAD_UOPS_RETIRED.L3_HIT + ( 7 ) * MEM_LOAD_UOPS_RETIRED.L3_MISS ) ) ) * CYCLE_ACTIVITY.STALLS_L2_PENDING / ( CPU_CLK_UNHALTED.THREAD ) ) , ( 1 ) ) )",
+ "MetricGroup": "MemoryBound, TmaL3mem",
+ "MetricName": "tma_drabound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.",
+ "MetricExpr": "100 * ( RESOURCE_STALLS.SB / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "MemoryBound, TmaL3mem",
+ "MetricName": "tma_store_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "MetricExpr": "100 * ( ( 1 - ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) ) - ( ( ( ( min( CPU_CLK_UNHALTED.THREAD , CYCLE_ACTIVITY.STALLS_LDM_PENDING ) ) + RESOURCE_STALLS.SB ) / ( ( ( min( CPU_CLK_UNHALTED.THREAD , CYCLE_ACTIVITY.CYCLES_NO_EXECUTE ) ) + ( cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - ( cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if ( ( INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) > 1.8 ) else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ ) ) / 2 - ( RS_EVENTS.EMPTY_CYCLES if ( ( ( 4 ) * ( min( CPU_CLK_UNHALTED.THREAD , IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) > 0.1 ) else 0 ) + RESOURCE_STALLS.SB ) if #SMT_on else ( ( min( CPU_CLK_UNHALTED.THREAD , CYCLE_ACTIVITY.CYCLES_NO_EXECUTE ) ) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - ( cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if ( ( INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) > 1.8 ) else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ ) - ( RS_EVENTS.EMPTY_CYCLES if ( ( ( 4 ) * ( min( CPU_CLK_UNHALTED.THREAD , IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) > 0.1 ) else 0 ) + RESOURCE_STALLS.SB ) ) ) * ( 1 - ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) ) ) )",
+ "MetricGroup": "Backend, TmaL2, Compute",
+ "MetricName": "tma_core_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.",
+ "MetricExpr": "100 * ( 10 * ARITH.DIVIDER_UOPS / ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) )",
+ "MetricGroup": "",
+ "MetricName": "tma_divider_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "MetricExpr": "100 * ( ( ( ( ( min( CPU_CLK_UNHALTED.THREAD , CYCLE_ACTIVITY.CYCLES_NO_EXECUTE ) ) + ( cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - ( cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if ( ( INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) > 1.8 ) else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ ) ) / 2 - ( RS_EVENTS.EMPTY_CYCLES if ( ( ( 4 ) * ( min( CPU_CLK_UNHALTED.THREAD , IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) > 0.1 ) else 0 ) + RESOURCE_STALLS.SB ) if #SMT_on else ( ( min( CPU_CLK_UNHALTED.THREAD , CYCLE_ACTIVITY.CYCLES_NO_EXECUTE ) ) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x1@ - ( cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x3@ if ( ( INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) > 1.8 ) else cpu@UOPS_EXECUTED.CORE\\,cmask\\=0x2@ ) - ( RS_EVENTS.EMPTY_CYCLES if ( ( ( 4 ) * ( min( CPU_CLK_UNHALTED.THREAD , IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) > 0.1 ) else 0 ) + RESOURCE_STALLS.SB ) ) - RESOURCE_STALLS.SB - ( min( CPU_CLK_UNHALTED.THREAD , CYCLE_ACTIVITY.STALLS_LDM_PENDING ) ) ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "PortsUtil",
+ "MetricName": "tma_ports_utilization_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. ",
+ "MetricExpr": "100 * ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_retiring_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved.",
+ "MetricExpr": "100 * ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) )",
+ "MetricGroup": "Retire, TmaL2",
+ "MetricName": "tma_light_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or microcoded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "Retire, TmaL2",
+ "MetricName": "tma_heavy_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided.",
+ "MetricExpr": "100 * ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "MicroSeq",
+ "MetricName": "tma_microcode_sequencer_percent",
+ "ScaleUnit": "1%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/memory.json b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
index 6ffb5067f4eb..fdabc9fe12a5 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
@@ -233,7 +233,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00244",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -244,9 +243,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0600400244",
+ "MSRValue": "0x600400244",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -259,7 +257,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -270,9 +267,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0600400091",
+ "MSRValue": "0x600400091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -283,9 +279,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063F800091",
+ "MSRValue": "0x63F800091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -298,7 +293,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -309,9 +303,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00091",
+ "MSRValue": "0x83FC00091",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -324,7 +317,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC007F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -335,9 +327,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x06004007F7",
+ "MSRValue": "0x6004007F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -348,9 +339,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063F8007F7",
+ "MSRValue": "0x63F8007F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -363,7 +353,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC007F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -374,9 +363,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC007F7",
+ "MSRValue": "0x83FC007F7",
"Offcore": "1",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -389,7 +377,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC08FFF",
"Offcore": "1",
- "PublicDescription": "Counts all requests miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -402,7 +389,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -413,9 +399,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0600400122",
+ "MSRValue": "0x600400122",
"Offcore": "1",
- "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -428,7 +413,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00004",
"Offcore": "1",
- "PublicDescription": "Counts all demand code reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -439,9 +423,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0600400004",
+ "MSRValue": "0x600400004",
"Offcore": "1",
- "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -454,7 +437,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00001",
"Offcore": "1",
- "PublicDescription": "Counts demand data reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -465,9 +447,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0600400001",
+ "MSRValue": "0x600400001",
"Offcore": "1",
- "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -480,7 +461,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -491,9 +471,8 @@
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0600400002",
+ "MSRValue": "0x600400002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -506,7 +485,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00002",
"Offcore": "1",
- "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -519,7 +497,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00040",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -532,7 +509,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00010",
"Offcore": "1",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -545,7 +521,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00020",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -558,7 +533,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00200",
"Offcore": "1",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -571,7 +545,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00080",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -584,7 +557,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00100",
"Offcore": "1",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -772,4 +744,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/other.json b/tools/perf/pmu-events/arch/x86/haswellx/other.json
index 4c6b9d34325a..7ca34f09b185 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/other.json
@@ -40,4 +40,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
index a53f28ec9270..42f6a8100661 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
@@ -1035,7 +1035,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "PublicDescription": "Cycles per core when uops are exectuted in port 0.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -1056,7 +1055,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "PublicDescription": "Cycles per core when uops are exectuted in port 1.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
@@ -1117,7 +1115,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "PublicDescription": "Cycles per core when uops are exectuted in port 4.",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
@@ -1138,7 +1135,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "PublicDescription": "Cycles per core when uops are exectuted in port 5.",
"SampleAfterValue": "2000003",
"UMask": "0x20"
},
@@ -1159,7 +1155,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA1",
"EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "PublicDescription": "Cycles per core when uops are exectuted in port 6.",
"SampleAfterValue": "2000003",
"UMask": "0x40"
},
@@ -1295,11 +1290,11 @@
"BriefDescription": "Cycles with less than 10 actually retired uops.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3",
- "CounterMask": "10",
+ "CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"Invert": "1",
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json
index 58ed6d33d1f4..56047f9c6f20 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json
@@ -1,12 +1,63 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
+ "BriefDescription": "Bounce Control",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_C_BOUNCE_CONTROL",
+ "PerPkg": "1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
"Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBO"
},
{
+ "BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "FaST wire asserted",
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_C_FAST_ASSERTED",
+ "PerPkg": "1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "CBO"
+ },
+ {
"BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
"Counter": "0,1,2,3",
"EventCode": "0x34",
@@ -18,6 +69,24 @@
"Unit": "CBO"
},
{
+ "BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Any Read Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
"BriefDescription": "M line evictions from LLC (writebacks to memory)",
"Counter": "0,1,2,3",
"EventCode": "0x37",
@@ -28,6 +97,983 @@
"Unit": "CBO"
},
{
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Clean Victim with raw CV=0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; DRd hitting non-M with raw CV=0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.LRU_DECREMENT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xCC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "UNC_C_RING_SINK_STARVED.AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "UNC_C_RING_SINK_STARVED.AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "UNC_C_RING_SINK_STARVED.IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "UNC_C_RING_SINK_STARVED.BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Target Node Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.NID",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.NID",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No AD Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No BL Sbo Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; Target Node Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; PRQ Rejects",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For AD Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For BL Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Opcode Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
+ "Filter": "filter_opc=0x180,filter_tid=0x3e",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
+ "Filter": "filter_opc=0x181",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_FULL",
+ "Filter": "filter_opc=0x18c",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+ "Filter": "filter_opc=0x18d",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.PCIE_READ",
+ "Filter": "filter_opc=0x19e",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.PCIE_WRITE",
+ "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Evictions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Writebacks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Miss Opcode Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
"BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -138,71 +1184,156 @@
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
- "Filter": "filter_opc=0x180,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
- "UMask": "0x1",
+ "UMask": "0x41",
"Unit": "CBO"
},
{
- "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x181",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "UMask": "0x44",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "filter_opc=0x18c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "UMask": "0x48",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "filter_opc=0x18d",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "UMask": "0x50",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "UMask": "0x43",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
+ "PerPkg": "1",
+ "UMask": "0x4A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Local Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x2A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Local Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x23",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x83",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
"UMask": "0x1",
"Unit": "CBO"
},
{
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
"BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
@@ -212,7 +1343,1089 @@
"Unit": "CBO"
},
{
- "BriefDescription": "read requests to home agent",
+ "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch)",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
+ "Filter": "filter_opc=0x182",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss All",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x43",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "UMask": "0x4A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x2A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x23",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x83",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK_BOTH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.BL_BOTH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AD_CORE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.I_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; Address & Opcode Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.FILT",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; Address",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.ADDR",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.OPC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; AD Opcodes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.AD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; BL Opcodes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.BL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "QPI Address/Opcode Match; AK Opcodes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_ADDR_OPC_MATCH.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lat Opt Return",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_H_DIRECTORY_LAT_OPT",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Invalidations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; HOM Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.HOM",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; HOM Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.HOM",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is AckCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RsSFwd or RspSFwdWb",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Invalidations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALL",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; HOM Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.HOM",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI2",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.SAT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.HUB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS3",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Cancelled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.CANCELLED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Reads Local - Useful",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL_USEFUL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote - Useful",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE_USEFUL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
@@ -221,7 +2434,16 @@
"Unit": "HA"
},
{
- "BriefDescription": "read requests to local home agent",
+ "BriefDescription": "Read and Write Requests; Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Local Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
@@ -230,7 +2452,7 @@
"Unit": "HA"
},
{
- "BriefDescription": "read requests to remote home agent",
+ "BriefDescription": "Read and Write Requests; Remote Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
@@ -239,49 +2461,427 @@
"Unit": "HA"
},
{
- "BriefDescription": "write requests to home agent",
+ "BriefDescription": "Read and Write Requests; Local Writes",
"Counter": "0,1,2,3",
"EventCode": "0x1",
- "EventName": "UNC_H_REQUESTS.WRITES",
+ "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
- "UMask": "0xC",
+ "UMask": "0x4",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to local home agent",
+ "BriefDescription": "Read and Write Requests; Remote Writes",
"Counter": "0,1,2,3",
"EventCode": "0x1",
- "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
+ "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
- "UMask": "0x4",
+ "UMask": "0x8",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to remote home agent",
+ "BriefDescription": "Read and Write Requests; Local InvItoEs",
"Counter": "0,1,2,3",
"EventCode": "0x1",
- "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "HA"
},
{
- "BriefDescription": "Conflict requests (requests for same address from multiple agents simultaneously)",
+ "BriefDescription": "HA AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RspI",
"Counter": "0,1,2,3",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+ "EventName": "UNC_H_SNOOP_RESP.RSPI",
"PerPkg": "1",
- "UMask": "0x40",
+ "UMask": "0x1",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
+ "BriefDescription": "Shared line response from remote cache",
"Counter": "0,1,2,3",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
"ScaleUnit": "64Bytes",
- "UMask": "0x20",
+ "UMask": "0x2",
"Unit": "HA"
},
{
@@ -295,23 +2895,743 @@
"Unit": "HA"
},
{
- "BriefDescription": "Shared line response from remote cache",
+ "BriefDescription": "Shared line forwarded from remote cache",
"Counter": "0,1,2,3",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSPS",
+ "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
"ScaleUnit": "64Bytes",
- "UMask": "0x2",
+ "UMask": "0x8",
"Unit": "HA"
},
{
- "BriefDescription": "Shared line forwarded from remote cache",
+ "BriefDescription": "Snoop Responses Received; Rsp*WB",
"Counter": "0,1,2,3",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
"PerPkg": "1",
"ScaleUnit": "64Bytes",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Other",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles GP Completely Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.GP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles Completely Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Local Read Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Remote Read Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Local Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Remote Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Local InvItoE Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumultor; Remote InvItoE Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumultor; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumultor; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_H_TxR_AD.HOM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
"UMask": "0x8",
"Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
index 824961318c1e..3e48ff3516b0 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
@@ -1,6 +1,6 @@
[
{
- "BriefDescription": "QPI clock ticks",
+ "BriefDescription": "Number of qfclks",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
@@ -8,6 +8,979 @@
"Unit": "QPI LL"
},
{
+ "BriefDescription": "Count of CTO Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_CTO_COUNT",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.SUCCESS_RBT_HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_HIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_MISS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss and Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_MISS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss, Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT_MISS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_BYPASSED",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; LinkInit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; Normal Operations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_Q_RxL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.EGRESS_CREDITS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; GV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.GV",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_Q_TxL_BYPASSED",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_Q_TxL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
"BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
"Counter": "0,1,2,3",
"EventName": "QPI_DATA_BANDWIDTH_TX",
@@ -17,6 +990,15 @@
"Unit": "QPI LL"
},
{
+ "BriefDescription": "Number of data flits transmitted ",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G0.DATA",
+ "PerPkg": "1",
+ "ScaleUnit": "8Bytes",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
"BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
"Counter": "0,1,2,3",
"EventName": "QPI_CTL_BANDWIDTH_TX",
@@ -24,5 +1006,449 @@
"ScaleUnit": "8Bytes",
"UMask": "0x4",
"Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Number of non data (control) flits transmitted ",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
+ "PerPkg": "1",
+ "ScaleUnit": "8Bytes",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_Q_TxL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_Q_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for Shared VN",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for Shared VN",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Returned",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURNS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "Unit": "QPI LL"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json
index 66eed399724c..db3418db312e 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json
@@ -1,5 +1,77 @@
[
{
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -10,6 +82,34 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "read requests to memory controller",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -20,44 +120,323 @@
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks",
+ "BriefDescription": "write requests to memory controller",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Clockticks",
"Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
+ "BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
+ "BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_M_POWER_PCU_THROTTLING",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charges due to page misses",
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
@@ -66,7 +445,16 @@
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for reads",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to read",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
@@ -75,12 +463,2437 @@
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for writes",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to write",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE MXB write buffer occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.WMM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.RMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Clockticks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_M_DCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-other.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-other.json
new file mode 100644
index 000000000000..135b59f34f37
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-other.json
@@ -0,0 +1,3170 @@
+[
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIRdCur",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CRd",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; DRd",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.DRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; RFO",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIItoM",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; WbMtoI",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CLFlush",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Valid",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Data Throttled",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.DATA_THROTTLE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "Counter": "0,1",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "Counter": "0,1",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "Counter": "0,1",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Miss",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit I",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit E or S",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit M",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpCode",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpData",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpInv",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Write Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Atomic",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Other",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.ORDERINGQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch TimeOut",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_TIMEOUT",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.UP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Dn",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.DN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AK",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_BL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AK",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
+ "EventCode": "0x1",
+ "EventName": "UNC_R3_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO9",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO10",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO11",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO12",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO13",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO14_16",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO_15_17",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
+ "EventCode": "0xB",
+ "EventName": "UNC_R3_IOT_BACKPRESSURE.SAT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "Counter": "0,1,2",
+ "EventCode": "0xB",
+ "EventName": "UNC_R3_IOT_BACKPRESSURE.HUB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
+ "EventCode": "0xD",
+ "EventName": "UNC_R3_IOT_CTS_HI.CTS2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "Counter": "0,1,2",
+ "EventCode": "0xD",
+ "EventName": "UNC_R3_IOT_CTS_HI.CTS3",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0xC",
+ "EventName": "UNC_R3_IOT_CTS_LO.CTS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "Counter": "0,1,2",
+ "EventCode": "0xC",
+ "EventName": "UNC_R3_IOT_CTS_LO.CTS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0xA",
+ "EventName": "UNC_R3_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Any",
+ "Counter": "0,1,2",
+ "EventCode": "0xA",
+ "EventName": "UNC_R3_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ring Stop Starved; AK",
+ "Counter": "0,1,2",
+ "EventCode": "0xE",
+ "EventName": "UNC_R3_RING_SINK_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; HOM",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; SNP",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NDR",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NCB",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NCS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2B",
+ "EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2B",
+ "EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_AK",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_AD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_BL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_AK",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Bounce Control",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_S_BOUNCE_CONTROL",
+ "PerPkg": "1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_S_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "FaST wire asserted",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_S_FAST_ASSERTED",
+ "PerPkg": "1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Event",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Event",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Event",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_S_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_S_RING_IV_USED.DN",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.AD_CACHE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.AK_CORE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.BL_CORE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.IV_CORE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Bypass; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; IVF Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AD - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AD - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; BL - Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; BL - Bounces",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBO"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "Counter": "0,1",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_CLOCKTICKS",
+ "Counter": "0,1",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json
index dd1b95655d1d..86b7c22af96b 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json
@@ -1,91 +1,496 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
+ "BriefDescription": "pclk Cycles",
"Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "EventCode": "0x6A",
+ "EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "EventCode": "0x6B",
+ "EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xA",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "EventCode": "0x6C",
+ "EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6D",
+ "EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6E",
+ "EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6F",
+ "EventName": "UNC_P_CORE15_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x70",
+ "EventName": "UNC_P_CORE16_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x71",
+ "EventName": "UNC_P_CORE17_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x62",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x64",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x65",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x66",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x67",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x68",
+ "EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x69",
+ "EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_P_DEMOTIONS_CORE10",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3B",
+ "EventName": "UNC_P_DEMOTIONS_CORE11",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_P_DEMOTIONS_CORE12",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_P_DEMOTIONS_CORE13",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_P_DEMOTIONS_CORE14",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_P_DEMOTIONS_CORE15",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_P_DEMOTIONS_CORE16",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_P_DEMOTIONS_CORE17",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_P_DEMOTIONS_CORE8",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_P_DEMOTIONS_CORE9",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_P_FREQ_BAND0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_P_FREQ_BAND1_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_P_FREQ_BAND2_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_P_FREQ_BAND3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C7 State Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C7_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_UFS_TRANSITIONS_NO_CHANGE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "UNC_P_UFS_TRANSITIONS_NO_CHANGE",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C1E",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C1E_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "UNC_P_UFS_TRANSITIONS_RING_GV",
"PerPkg": "1",
"Unit": "PCU"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
index ba3e77a9f9a0..57d2a6452fec 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
@@ -481,4 +481,4 @@
"SampleAfterValue": "100003",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/cache.json b/tools/perf/pmu-events/arch/x86/icelake/cache.json
index 9989f3338f0a..b4f28f24ee63 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/cache.json
@@ -303,7 +303,7 @@
"UMask": "0x41"
},
{
- "BriefDescription": "All retired load instructions.",
+ "BriefDescription": "Retired load instructions.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -311,12 +311,12 @@
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
"SampleAfterValue": "1000003",
"UMask": "0x81"
},
{
- "BriefDescription": "All retired store instructions.",
+ "BriefDescription": "Retired store instructions.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -325,7 +325,7 @@
"L1_Hit_Indication": "1",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
+ "PublicDescription": "Counts all retired store instructions.",
"SampleAfterValue": "1000003",
"UMask": "0x82"
},
diff --git a/tools/perf/pmu-events/arch/x86/icelake/floating-point.json b/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
index 4347e2d0d090..1925388969bb 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
@@ -99,4 +99,4 @@
"SampleAfterValue": "100003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/frontend.json b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
index b510dd5d80da..739361d3f52f 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
@@ -494,4 +494,4 @@
"Speculative": "1",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
index 622c392f59be..f0356d66a927 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
@@ -38,7 +38,7 @@
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
"MetricExpr": "TOPDOWN.SLOTS / ( TOPDOWN.SLOTS / 2 ) if #SMT_on else 1",
- "MetricGroup": "SMT",
+ "MetricGroup": "SMT;TmaL1",
"MetricName": "Slots_Utilization"
},
{
@@ -61,25 +61,19 @@
"MetricName": "FLOPc"
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "FP_Arith_Utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
- },
- {
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
"MetricGroup": "SMT",
@@ -170,12 +164,24 @@
"PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "IpSWPF"
+ },
+ {
"BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary;TmaL1",
"MetricName": "Instructions"
},
{
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
"MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
@@ -194,12 +200,24 @@
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Number of Instructions per non-speculative DSB miss",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "DSB_Switch_Cost"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
"MetricGroup": "DSBmiss;Fed",
"MetricName": "IpDSB_Miss_Ret"
},
{
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
"BriefDescription": "Fraction of branches that are non-taken conditionals",
"MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;Branches;CodeGen;PGO",
@@ -230,11 +248,10 @@
"MetricName": "Other_Branches"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -243,30 +260,6 @@
"MetricName": "MLP"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "L3_Cache_Access_BW"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
@@ -285,13 +278,13 @@
"MetricName": "L2MPKI"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
"MetricExpr": "1000 * ( ( OFFCORE_REQUESTS.ALL_DATA_RD - OFFCORE_REQUESTS.DEMAND_DATA_RD ) + L2_RQSTS.ALL_DEMAND_MISS + L2_RQSTS.SWPF_MISS ) / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses;Offcore",
"MetricName": "L2MPKI_All"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all demand loads (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "L2MPKI_Load"
@@ -309,7 +302,7 @@
"MetricName": "L3MPKI"
},
{
- "BriefDescription": "Fill Buffer (FB) true hits per kilo instructions for retired demand loads",
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "FB_HPKI"
@@ -322,6 +315,54 @@
"MetricName": "Page_Walks_Utilization"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -337,7 +378,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
diff --git a/tools/perf/pmu-events/arch/x86/icelake/uncore-other.json b/tools/perf/pmu-events/arch/x86/icelake/uncore-other.json
new file mode 100644
index 000000000000..e007b976547d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelake/uncore-other.json
@@ -0,0 +1,31 @@
+[
+ {
+ "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, etc.",
+ "Counter": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, etc.",
+ "UMask": "0x01",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Total number of all outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "Counter": "1",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Total number of all outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "UMask": "0x01",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "UNC_CLOCK.SOCKET",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_CLOCK.SOCKET",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
index a006fd7f7b18..58809e16bf98 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
@@ -242,4 +242,4 @@
"Speculative": "1",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/cache.json b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
index 95fcbec188f8..775190bdd063 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
@@ -291,7 +291,7 @@
"UMask": "0x4f"
},
{
- "BriefDescription": "All retired load instructions.",
+ "BriefDescription": "Retired load instructions.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -299,12 +299,12 @@
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
"SampleAfterValue": "1000003",
"UMask": "0x81"
},
{
- "BriefDescription": "All retired store instructions.",
+ "BriefDescription": "Retired store instructions.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -313,7 +313,7 @@
"L1_Hit_Indication": "1",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
+ "PublicDescription": "Counts all retired store instructions.",
"SampleAfterValue": "1000003",
"UMask": "0x82"
},
@@ -409,7 +409,6 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
"SampleAfterValue": "20011",
- "Speculative": "1",
"UMask": "0x4"
},
{
@@ -473,7 +472,6 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
"SampleAfterValue": "20011",
- "Speculative": "1",
"UMask": "0x2"
},
{
@@ -867,7 +865,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_HIT",
@@ -878,7 +876,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
@@ -889,7 +887,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
@@ -900,7 +898,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
@@ -911,7 +909,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
@@ -922,7 +920,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
@@ -933,7 +931,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
@@ -944,7 +942,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
@@ -955,7 +953,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
index 4347e2d0d090..1925388969bb 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
@@ -99,4 +99,4 @@
"SampleAfterValue": "100003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
index f217c3211ba2..eb27d9d9c8be 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
@@ -481,4 +481,4 @@
"Speculative": "1",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
index be70672bfdb0..e905458b34b8 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
@@ -18,24 +18,6 @@
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Ret;Retire",
- "MetricName": "UPI"
- },
- {
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "UOPS_RETIRED.SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fed;FetchBW",
- "MetricName": "UpTB"
- },
- {
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
- "MetricGroup": "Pipeline;Mem",
- "MetricName": "CPI"
- },
- {
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
@@ -50,7 +32,7 @@
{
"BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
"MetricExpr": "TOPDOWN.SLOTS / ( TOPDOWN.SLOTS / 2 ) if #SMT_on else 1",
- "MetricGroup": "SMT",
+ "MetricGroup": "SMT;TmaL1",
"MetricName": "Slots_Utilization"
},
{
@@ -73,25 +55,19 @@
"MetricName": "FLOPc"
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "FP_Arith_Utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
- },
- {
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
"MetricGroup": "SMT",
@@ -182,36 +158,54 @@
"PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "IpSWPF"
+ },
+ {
"BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary;TmaL1",
"MetricName": "Instructions"
},
{
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
"MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "Fetch_UpC"
},
{
- "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
- "MetricExpr": "LSD.UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
- "MetricGroup": "Fed;LSD",
- "MetricName": "LSD_Coverage"
- },
- {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
"MetricGroup": "DSB;Fed;FetchBW",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Number of Instructions per non-speculative DSB miss",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "DSB_Switch_Cost"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
"MetricGroup": "DSBmiss;Fed",
"MetricName": "IpDSB_Miss_Ret"
},
{
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
"BriefDescription": "Fraction of branches that are non-taken conditionals",
"MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;Branches;CodeGen;PGO",
@@ -242,11 +236,10 @@
"MetricName": "Other_Branches"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -255,30 +248,6 @@
"MetricName": "MLP"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "L3_Cache_Access_BW"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
@@ -297,13 +266,13 @@
"MetricName": "L2MPKI"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
"MetricExpr": "1000 * ( ( OFFCORE_REQUESTS.ALL_DATA_RD - OFFCORE_REQUESTS.DEMAND_DATA_RD ) + L2_RQSTS.ALL_DEMAND_MISS + L2_RQSTS.SWPF_MISS ) / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses;Offcore",
"MetricName": "L2MPKI_All"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all demand loads (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "L2MPKI_Load"
@@ -321,7 +290,7 @@
"MetricName": "L3MPKI"
},
{
- "BriefDescription": "Fill Buffer (FB) true hits per kilo instructions for retired demand loads",
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "FB_HPKI"
@@ -334,6 +303,30 @@
"MetricName": "Page_Walks_Utilization"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
"BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
"MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
"MetricGroup": "L2Evicts;Mem;Server",
@@ -346,6 +339,30 @@
"MetricName": "L2_Evictions_NonSilent_PKI"
},
{
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -361,7 +378,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
@@ -469,6 +487,12 @@
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "cha_0@event\\=0x0@ / #num_dies / duration_time / 1000000000",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
@@ -497,5 +521,544 @@
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "Percentage of time spent in the active CPU power state C0",
+ "MetricExpr": "100 * CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "",
+ "MetricName": "cpu_utilization_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "CPU operating frequency (in GHz)",
+ "MetricExpr": "(( CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ ) / 1000000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "cpu_operating_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Cycles per instruction retired; indicating how much time each executed instruction took; in units of cycles.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "cpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory load instructions to the total number completed instructions",
+ "MetricExpr": "MEM_INST_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "loads_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory store instructions to the total number completed instructions",
+ "MetricExpr": "MEM_INST_RETIRED.ALL_STORES / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "stores_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L1 data cache (includes data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L1D.REPLACEMENT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1d_mpi_includes_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of demand load requests hitting in L1 data cache to the total number of completed instructions ",
+ "MetricExpr": "MEM_LOAD_RETIRED.L1_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1d_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing in L1 instruction cache (includes prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.ALL_CODE_RD / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed demand load requests hitting in L2 cache to the total number of completed instructions ",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L2 cache (includes code+data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_LINES_IN.ALL / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_mpi_includes_code_plus_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed data read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_code_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "( UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA + UNC_CHA_TOR_INSERTS.IA_MISS_DRD + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF ) / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "( UNC_CHA_TOR_INSERTS.IA_MISS_CRD ) / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "llc_code_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_demand_data_read_miss_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to local memory in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_demand_data_read_miss_latency_for_local_requests",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to remote memory in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_demand_data_read_miss_latency_for_remote_requests",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to Intel(R) Optane(TM) Persistent Memory(PMEM) in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_demand_data_read_miss_to_pmem_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to DRAM in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_demand_data_read_miss_to_dram_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB.",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "itlb_2nd_level_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the Instruction Translation Lookaside Buffer (ITLB) and further levels of TLB.",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "itlb_2nd_level_large_page_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_load_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the Data Translation Lookaside Buffer (DTLB) and further levels of TLB.",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_2mb_large_page_load_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_store_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Memory read that miss the last level cache (LLC) addressed to local DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * ( UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL ) / ( UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_local_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Memory reads that miss the last level cache (LLC) addressed to remote DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * ( UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE ) / ( UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_remote_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uncore operating frequency in GHz",
+ "MetricExpr": "( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_CLOCKTICKS) * #num_packages ) / 1000000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "uncore_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_UPI_TxL_FLITS.ALL_DATA * (64 / 9.0) / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "upi_data_transmit_bw_only_data",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory read bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.RD * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory write bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.WR * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory bandwidth (MB/sec)",
+ "MetricExpr": "(( UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory read bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_PMM_RPQ_INSERTS * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "pmem_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory write bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_PMM_WPQ_INSERTS * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "pmem_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory bandwidth (MB/sec)",
+ "MetricExpr": "(( UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "pmem_memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "(( UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR + UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "(( UNC_CHA_TOR_INSERTS.IO_HIT_ITOM + UNC_CHA_TOR_INSERTS.IO_MISS_ITOM + UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Uops delivered from decoded instruction cache (decoded stream buffer or DSB) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS ) )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_decoded_icache_dsb",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from legacy decode pipeline (Micro-instruction Translation Engine or MITE) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MITE_UOPS / ( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS ) )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_legacy_decode_pipeline_mite",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from microcode sequencer (MS) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MS_UOPS / ( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS ) )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_microcode_sequencer_ms",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.READS_LOCAL * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_local_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.WRITES_LOCAL * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_local_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to remote memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.READS_REMOTE * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_remote_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to remote memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.WRITES_REMOTE * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_remote_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Machine_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "MetricExpr": "100 * ( topdown\\-fe\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) )",
+ "MetricGroup": "TmaL1;PGO",
+ "MetricName": "tma_frontend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.",
+ "MetricExpr": "100 * ( ( ( 5 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING ) / ( slots ) )",
+ "MetricGroup": "Frontend;TmaL2;m_tma_frontend_bound_percent",
+ "MetricName": "tma_fetch_latency_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "100 * ( ICACHE_16B.IFDATA_STALL / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_icache_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.",
+ "MetricExpr": "100 * ( ICACHE_64B.IFTAG_STALL / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_itlb_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.",
+ "MetricExpr": "100 * ( INT_MISC.CLEAR_RESTEER_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) + ( ( 10 ) * BACLEARS.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) )",
+ "MetricGroup": "FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_branch_resteers_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.",
+ "MetricExpr": "100 * ( DSB2MITE_SWITCHES.PENALTY_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "DSBmiss;FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_dsb_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs.",
+ "MetricExpr": "100 * ( ILD_STALL.LCP / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_lcp_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.",
+ "MetricExpr": "100 * ( ( 3 ) * IDQ.MS_SWITCHES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;MicroSeq;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_ms_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.",
+ "MetricExpr": "100 * ( max( 0 , ( topdown\\-fe\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) ) - ( ( ( 5 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING ) / ( slots ) ) ) )",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;m_tma_frontend_bound_percent",
+ "MetricName": "tma_fetch_bandwidth_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "MetricExpr": "100 * ( ( IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK ) / ( CPU_CLK_UNHALTED.DISTRIBUTED ) / 2 )",
+ "MetricGroup": "DSBmiss;FetchBW;TmaL3;m_tma_fetch_bandwidth_percent",
+ "MetricName": "tma_mite_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "MetricExpr": "100 * ( ( IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK ) / ( CPU_CLK_UNHALTED.DISTRIBUTED ) / 2 )",
+ "MetricGroup": "DSB;FetchBW;TmaL3;m_tma_fetch_bandwidth_percent",
+ "MetricName": "tma_dsb_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "MetricExpr": "100 * ( max( 1 - ( ( topdown\\-fe\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) ) + ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) + ( ( 5 ) * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@ ) / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) , 0 ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_bad_speculation_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.",
+ "MetricExpr": "100 * ( ( BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT ) ) * ( max( 1 - ( ( topdown\\-fe\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) ) + ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) + ( ( 5 ) * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@ ) / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) , 0 ) ) )",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;m_tma_bad_speculation_percent",
+ "MetricName": "tma_branch_mispredicts_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.",
+ "MetricExpr": "100 * ( max( 0 , ( max( 1 - ( ( topdown\\-fe\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) ) + ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) + ( ( 5 ) * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@ ) / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) , 0 ) ) - ( ( BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT ) ) * ( max( 1 - ( ( topdown\\-fe\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) ) + ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) + ( ( 5 ) * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@ ) / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) , 0 ) ) ) ) )",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;m_tma_bad_speculation_percent",
+ "MetricName": "tma_machine_clears_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "MetricExpr": "100 * ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) + ( ( 5 ) * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@ ) / ( slots ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_backend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "MetricExpr": "100 * ( ( ( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / ( CYCLE_ACTIVITY.STALLS_TOTAL + ( EXE_ACTIVITY.1_PORTS_UTIL + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * EXE_ACTIVITY.2_PORTS_UTIL ) + EXE_ACTIVITY.BOUND_ON_STORES ) ) * ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) + ( ( 5 ) * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@ ) / ( slots ) ) )",
+ "MetricGroup": "Backend;TmaL2;m_tma_backend_bound_percent",
+ "MetricName": "tma_memory_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.",
+ "MetricExpr": "100 * ( max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) , 0 ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l1_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) / ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + L1D_PEND_MISS.FB_FULL_PERIODS ) ) * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l2_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l3_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( min( ( ( ( CYCLE_ACTIVITY.STALLS_L3_MISS / ( CPU_CLK_UNHALTED.THREAD ) + ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) - ( ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) / ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + L1D_PEND_MISS.FB_FULL_PERIODS ) ) * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( min( ( ( ( ( 1 - ( ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) / ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) + ( 25 * ( ( MEM_LOAD_RETIRED.LOCAL_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) + 33 * ( ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) ) ) ) ) * ( CYCLE_ACTIVITY.STALLS_L3_MISS / ( CPU_CLK_UNHALTED.THREAD ) + ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) - ( ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) / ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + L1D_PEND_MISS.FB_FULL_PERIODS ) ) * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) if ( ( 1000000 ) * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM ) > MEM_LOAD_RETIRED.L1_MISS ) else 0 ) ) , ( 1 ) ) ) ) ) , ( 1 ) ) )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_dram_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory by loads, PMM stands for Persistent Memory Module. ",
+ "MetricExpr": "100 * ( min( ( ( ( ( 1 - ( ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) / ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) + ( 25 * ( ( MEM_LOAD_RETIRED.LOCAL_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) + 33 * ( ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) ) ) ) ) * ( CYCLE_ACTIVITY.STALLS_L3_MISS / ( CPU_CLK_UNHALTED.THREAD ) + ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) - ( ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) / ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + L1D_PEND_MISS.FB_FULL_PERIODS ) ) * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) if ( ( 1000000 ) * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM ) > MEM_LOAD_RETIRED.L1_MISS ) else 0 ) ) , ( 1 ) ) )",
+ "MetricGroup": "MemoryBound;Server;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_pmm_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.",
+ "MetricExpr": "100 * ( EXE_ACTIVITY.BOUND_ON_STORES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_store_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "MetricExpr": "100 * ( max( 0 , ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) + ( ( 5 ) * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@ ) / ( slots ) ) - ( ( ( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / ( CYCLE_ACTIVITY.STALLS_TOTAL + ( EXE_ACTIVITY.1_PORTS_UTIL + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * EXE_ACTIVITY.2_PORTS_UTIL ) + EXE_ACTIVITY.BOUND_ON_STORES ) ) * ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) + ( ( 5 ) * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=0x1\\,edge\\=0x1@ ) / ( slots ) ) ) ) )",
+ "MetricGroup": "Backend;TmaL2;Compute;m_tma_backend_bound_percent",
+ "MetricName": "tma_core_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.",
+ "MetricExpr": "100 * ( ARITH.DIVIDER_ACTIVE / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "TmaL3;m_tma_core_bound_percent",
+ "MetricName": "tma_divider_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. ",
+ "MetricExpr": "( 100 * ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) + ( 0 * slots )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_retiring_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved.",
+ "MetricExpr": "100 * ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@ ) / IDQ.MITE_UOPS ) ) )",
+ "MetricGroup": "Retire;TmaL2;m_tma_retiring_percent",
+ "MetricName": "tma_light_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "MetricExpr": "100 * ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD ) + ( ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( min( ( ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) , ( 1 ) ) ) )",
+ "MetricGroup": "HPC;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_fp_arith_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "100 * ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@ ) / IDQ.MITE_UOPS ) ) ) * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_memory_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
+ "MetricExpr": "100 * ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@ ) / IDQ.MITE_UOPS ) ) ) * BR_INST_RETIRED.ALL_BRANCHES / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_branch_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body.",
+ "MetricExpr": "100 * ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@ ) / IDQ.MITE_UOPS ) ) ) * INST_RETIRED.NOP / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_nop_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "MetricExpr": "100 * ( max( 0 , ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@ ) / IDQ.MITE_UOPS ) ) ) - ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD ) + ( ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( min( ( ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) , ( 1 ) ) ) ) + ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@ ) / IDQ.MITE_UOPS ) ) ) * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY ) + ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@ ) / IDQ.MITE_UOPS ) ) ) * BR_INST_RETIRED.ALL_BRANCHES / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@ ) / IDQ.MITE_UOPS ) ) ) * INST_RETIRED.NOP / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_other_light_ops_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or microcoded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "MetricExpr": "100 * ( ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@ ) / IDQ.MITE_UOPS )",
+ "MetricGroup": "Retire;TmaL2;m_tma_retiring_percent",
+ "MetricName": "tma_heavy_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions.",
+ "MetricExpr": "100 * ( ( ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=0x1@ ) / IDQ.MITE_UOPS ) - ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) ) )",
+ "MetricGroup": "TmaL3;m_tma_heavy_operations_percent",
+ "MetricName": "tma_few_uops_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided.",
+ "MetricExpr": "100 * ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( slots ) )",
+ "MetricGroup": "MicroSeq;TmaL3;m_tma_heavy_operations_percent",
+ "MetricName": "tma_microcode_sequencer_percent",
+ "ScaleUnit": "1%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/memory.json b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
index 58b03a8a1b95..48e8d1102b9d 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
@@ -306,7 +306,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_MISS",
@@ -317,7 +317,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by the local socket.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by the local socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
@@ -328,7 +328,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/other.json b/tools/perf/pmu-events/arch/x86/icelakex/other.json
index c9bf6808ead7..919e620e7db8 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/other.json
@@ -44,7 +44,6 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's cache, after the data is forwarded back to the requestor and indicating the data was found unmodified in the (FE) Forward or Exclusive State in this cores caches cache. A single snoop response from the core counts on all hyperthreads of the core.",
"SampleAfterValue": "1000003",
- "Speculative": "1",
"UMask": "0x20"
},
{
@@ -56,7 +55,6 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's caches, after the data is forwarded back to the requestor, and indicating the data was found modified(M) in this cores caches cache (aka HitM response). A single snoop response from the core counts on all hyperthreads of the core.",
"SampleAfterValue": "1000003",
- "Speculative": "1",
"UMask": "0x10"
},
{
@@ -68,7 +66,6 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated in this core's caches without being forwarded back to the requestor. The line was in Forward, Shared or Exclusive (FSE) state in this cores caches. A single snoop response from the core counts on all hyperthreads of the core.",
"SampleAfterValue": "1000003",
- "Speculative": "1",
"UMask": "0x2"
},
{
@@ -80,7 +77,6 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts responses to snoops indicating that the data was not found (IHitI) in this core's caches. A single snoop response from the core counts on all hyperthreads of the Core.",
"SampleAfterValue": "1000003",
- "Speculative": "1",
"UMask": "0x1"
},
{
@@ -92,7 +88,6 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (FS) Forward or Shared state. A single snoop response from the core counts on all hyperthreads of the core.",
"SampleAfterValue": "1000003",
- "Speculative": "1",
"UMask": "0x40"
},
{
@@ -104,7 +99,6 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (M)odified state. A single snoop response from the core counts on all hyperthreads of the core.",
"SampleAfterValue": "1000003",
- "Speculative": "1",
"UMask": "0x8"
},
{
@@ -116,7 +110,6 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts responses to snoops indicating the line was kept on this core in the (S)hared state, and that the data was found unmodified but not forwarded back to the requestor, initially the data was found in the cache in the (FSE) Forward, Shared state or Exclusive state. A single snoop response from the core counts on all hyperthreads of the core.",
"SampleAfterValue": "1000003",
- "Speculative": "1",
"UMask": "0x4"
},
{
@@ -428,7 +421,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
@@ -439,7 +432,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.DRAM",
@@ -450,7 +443,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
@@ -461,7 +454,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.LOCAL_PMM",
@@ -472,7 +465,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
@@ -483,7 +476,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_PMM",
@@ -494,7 +487,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE",
@@ -505,7 +498,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
@@ -516,7 +509,18 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x731800477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.REMOTE_PMM",
@@ -527,7 +531,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.SNC_DRAM",
@@ -538,7 +542,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.SNC_PMM",
@@ -558,5 +562,16 @@
"Offcore": "1",
"SampleAfterValue": "100003",
"UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xFBFF80822",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
index 95c1008ef057..396868f70004 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
@@ -215,6 +215,18 @@
"UMask": "0x20"
},
{
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
index 5f0d2c462940..6872ae4b29d9 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
@@ -329,5 +329,1528 @@
"PerPkg": "1",
"UMask": "0x01",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Scoreboard Accesses Accepted",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x05",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Scoreboard Accesses Rejected",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x0A",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM underfill read CAS commands issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : Activate due to Bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/auto-pre",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/ auto-pre",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Read Accepts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Read Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : NM read completions",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : NM write completions",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : FM read completions",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : FM write completions",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Write Accepts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Write Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Alloc",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.ALLOC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Dealloc",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Reject",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.VLD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.NM_RD_STARVED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.NMRD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.NM_WR_STARVED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.NMWR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.FM_RD_STARVED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FMRD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.FM_WR_STARVED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FMWR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.FM_TGR_WR_STARVED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FMTGRWR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.RDS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Writes",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.WRS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Block region reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Block region writes",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.RDS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Block region reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Block region writes",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : NM requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.NM_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : FM requests rejected due to full address conflict",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.FM_ADDR_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : Patrol requests rejected due to set conflict",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.PATROL_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.CANARY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.NM_RD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.FM_RD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMRD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.NM_WR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMWR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.FM_WR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMWR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.FM_TGR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.NM_RD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NMRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.FM_RD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FMRD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.NM_WR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NMWR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.FM_WR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FMWR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.FM_TGR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FMTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.NM_RD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.NMRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.FM_RD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FMRD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.NM_WR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.NMWR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.FM_WR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FMWR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.FM_TGR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FMTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.NEW",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.NEW",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_HIT",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_MISS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.RD_MISS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.OCC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.OCC",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.RD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.WR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.WR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.TOTAL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.TOTAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDA",
+ "EventName": "UNC_M_SB_PREF_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDB",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.DDR_EARLY_CMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PARITY_ERRORS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M_PARITY_ERRORS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clock-Enabled Self-Refresh",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M_POWER_SELF_REFRESH",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M_RDB_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NOT_EMPTY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M_RDB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL_PCH0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL_PCH1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M_SB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Not-Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M_SB_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL_PCH0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL_PCH1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to page miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+ "PerPkg": "1",
+ "UMask": "0x0c",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_PREF_OCCUPANCY.PMM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xdb",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.PMEM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.NMRD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.NMWR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : RPQ GNTs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.RPQ_GNTS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Underfill GNTs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.WPQ_GNTS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Misc GNTs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.MISC_GNT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Misc Commands (error, flow ACKs)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.MISC",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : Opportunistic Reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.OPP_RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : Expected No data packet (ERID matched NDP encoding)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.NODATA_EXP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : Unexpected No data packet (ERID matched a Read, but data was a NDP)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.NODATA_UNEXP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : Read Requests - Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.REQS_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : Read Requests - Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.REQS_SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : ECC Errors",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.PMM_ECC_ERROR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : ERID detectable parity error",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.PMM_ERID_ERROR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.PMM_ERID_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.CAS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.PWR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.FMRD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.FMWR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Persistent Mem reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Persistent Mem writes",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Persistent Mem reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Persistent Mem writes",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : DDR4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDA",
+ "EventName": "UNC_M_SB_PREF_INSERTS.DDR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : Persistent Mem",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDA",
+ "EventName": "UNC_M_SB_PREF_INSERTS.PMM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : DDR4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDB",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M_PMM_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M_PMM_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M_PMM_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M_PMM_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PMM_WPQ_FLUSH",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe8",
+ "EventName": "UNC_M_PMM_WPQ_FLUSH",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PMM_WPQ_FLUSH_CYC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe9",
+ "EventName": "UNC_M_PMM_WPQ_FLUSH_CYC",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : Persistent Mem",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xdb",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.PMM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for the Memory Controller",
+ "Counter": "4",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_M_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Valid",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.NM_RD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read Starved",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.NM_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write Starved",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.FM_RD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read Starved",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.FM_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write Starved",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.FM_TGR_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Set",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read - Set",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write - Set",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write - Set",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Set",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDE",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read - Set",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDE",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write - Set",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDE",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write - Set",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDE",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xDE",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json
index 71e052667e50..7783aa2ef5d1 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json
@@ -403,6 +403,1165 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Clockticks of the integrated IO (IIO) traffic controller",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops : WbMtoI",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in any state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in A state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in I state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in S state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates : From/to any state. Note: event counts are incorrect in 2LM mode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Clean NearMem Read Hit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Dirty NearMem Read Hit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to memory (M2M)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0f",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Non Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Non Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
"BriefDescription": "CMS Clockticks",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
@@ -412,6 +1571,114 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Clockticks of the IO coherency tracker (IRP)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Occupancy of the IRP FAF queue",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to PCI (M2P)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2P_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to UPI (M3UPI)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of kfclks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
"BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
@@ -489,6 +1756,15 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "CounterType": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
"BriefDescription": "TOR Inserts : ItoMs issued by IO Devices",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
@@ -585,6 +1861,26 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Valid Flits Sent : Null FLITs transmitted to any slot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ },
+ {
"BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed locally",
"CounterType": "PGMABLE",
"EventCode": "0x36",
@@ -813,6 +2109,35 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Free running counter that increments for IIO clocktick",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : PMM - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x0720",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : PMM - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x1C80",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
"BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores that missed the LLC",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
@@ -942,503 +2267,13371 @@
"Unit": "CHA"
},
{
- "BriefDescription": "Clockticks of the integrated IO (IIO) traffic controller",
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x01",
- "EventName": "UNC_IIO_CLOCKTICKS",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x04",
"PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x03",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 7",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 6",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 5",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 4",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 3",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 2",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 1",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0xff",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices to locally HOMed memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xCC42FF04",
+ "UMaskExt": "0xCC42FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices to remotely HOMed memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xCC437F04",
+ "UMaskExt": "0xCC437F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices to locally HOMed memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xCD42FF04",
+ "UMaskExt": "0xCD42FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices to remotely HOMed memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xCD437F04",
+ "UMaskExt": "0xCD437F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline directory state lookups : Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline directory state lookups : Snoop Needed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline directory state updates : Directory Updated memory write from TOR pipe",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - All Lines",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x200F",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote - All Lines",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "PerPkg": "1",
+ "UMask": "0x800F",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_2LM_NM_SETCONFLICTS.TOR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_2LM_NM_SETCONFLICTS.SF",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_2LM_NM_SETCONFLICTS.LLC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local INVITOE requests (exclusive ownership of a cache line without receiving data) that miss the SF/LLC and remote INVITOE requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspI",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspIFwd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspSFwd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0xC001FFff",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCRD",
+ "PerPkg": "1",
+ "UMask": "0xcccffd01",
+ "UMaskExt": "0xcccffd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDRD",
+ "PerPkg": "1",
+ "UMask": "0xccd7fd01",
+ "UMaskExt": "0xccd7fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFD01",
+ "UMaskExt": "0xC80FFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "PerPkg": "1",
+ "UMask": "0xC817FD01",
+ "UMaskExt": "0xC817FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0xCCC7FD01",
+ "UMaskExt": "0xCCC7FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FD01",
+ "UMaskExt": "0xC807FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0xCCC7FE01",
+ "UMaskExt": "0xCCC7FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0xc803fe04",
+ "UMaskExt": "0xc803fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0xc803fe04",
+ "UMaskExt": "0xc803fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xcc43fe04",
+ "UMaskExt": "0xcc43fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "UMask": "0x01",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "UMask": "0x01",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "UMask": "0x01",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "UMask": "0x01",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "UMask": "0x04",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "UMask": "0x04",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "UMask": "0x04",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "UMask": "0x04",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "UMask": "0x80",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "UMask": "0x80",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "UMask": "0x80",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "UMask": "0x80",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
"Counter": "2,3",
"CounterType": "PGMABLE",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "UMask": "0x01",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
"Counter": "2,3",
"CounterType": "PGMABLE",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "UMask": "0x01",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
"Counter": "2,3",
"CounterType": "PGMABLE",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "UMask": "0x01",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
"Counter": "2,3",
"CounterType": "PGMABLE",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit M",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "PerPkg": "1",
+ "UMaskExt": "0x1E",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Clean NearMem Underfill Hit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Dirty NearMem Underfill Hit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_TAG_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_EGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction was overridden",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x0704",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x0701",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1C10",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1C01",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x1C02",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x06",
+ "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x09",
+ "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0A",
+ "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_2LM_NM_INVITOX.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_2LM_NM_INVITOX.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_2LM_NM_INVITOX.SETCONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_2LM_NM_SETCONFLICTS2.MEMWR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_2LM_NM_SETCONFLICTS2.MEMWRNI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Intermediate bypass Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.REMOTE_ONE",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single External Snoops",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Core Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Eviction",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Single Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "PerPkg": "1",
+ "UMask": "0xF1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.REMOTE_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple External Snoops",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Core Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Eviction",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "UMask": "0xF2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_TOR_DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_NO_D2C",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_DRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.EXTCMP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.PULL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.NOP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.IDLE_DUE_SUPPRESS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : Remote socket ownership read requests that hit in S state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : Remote socket WBMtoE requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : Remote socket writeback to I or S requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed : Remote socket read requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed : Remote socket write (i.e. writeback) requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : Remote socket RdInvOwn requests to shared line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : Remote socket RdInvOwn requests that are not to shared line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : Remote socket read or invalidate requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache to SHARed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Deallocate HtiME$ on Reads without RspFwdI*",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "HA to iMC Reads Issued : ISOCH",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in M state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in E state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local Only",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ONLY",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote Only",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ONLY",
+ "PerPkg": "1",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in M State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "PerPkg": "1",
+ "UMask": "0x2001",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in E State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "PerPkg": "1",
+ "UMask": "0x2002",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "PerPkg": "1",
+ "UMask": "0x2004",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote - Lines in M State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "PerPkg": "1",
+ "UMask": "0x8001",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote - Lines in E State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "PerPkg": "1",
+ "UMask": "0x8002",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote - Lines in S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "PerPkg": "1",
+ "UMask": "0x8004",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Write Combining Aliasing",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local InvItoE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local Rd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Remote Rd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.REMOTE_READ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Remote Rd InvItoE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.REMOTE_READINVITOE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Off",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.RMW_SETMATCH",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_PAMATCH",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLOWSNP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_WAYMATCH",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLWAYRSV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.PTL_INPIPE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IRQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.FSF_VICP",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ONE_FSF_VIC",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.TORID_MATCH_GO_P",
+ "PerPkg": "1",
+ "UMaskExt": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IPQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.WAY_MATCH",
+ "PerPkg": "1",
+ "UMaskExt": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ONE_RSP_CON",
+ "PerPkg": "1",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IDX_INPIPE",
+ "PerPkg": "1",
+ "UMaskExt": "0x100",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.SETMATCHENTRYWSCT",
+ "PerPkg": "1",
+ "UMaskExt": "0x200",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ALLRSFWAYS_RES",
+ "PerPkg": "1",
+ "UMaskExt": "0x800",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.RRQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "UMaskExt": "0x1000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ISMQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "UMaskExt": "0x2000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.SF_WAYS_RES",
+ "PerPkg": "1",
+ "UMaskExt": "0x4000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.LLC_WAYS_RES",
+ "PerPkg": "1",
+ "UMaskExt": "0x8000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.NOTALLOWSNOOP",
+ "PerPkg": "1",
+ "UMaskExt": "0x10000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.TOPA_MATCH",
+ "PerPkg": "1",
+ "UMaskExt": "0x20000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IVEGRCREDIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x40000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.BLEGRCREDIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x80000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ADEGRCREDIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x100000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.AKEGRCREDIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x200000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.HACREDIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x400000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_AD_REQ",
+ "PerPkg": "1",
+ "UMaskExt": "0x800000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_AD_RSP",
+ "PerPkg": "1",
+ "UMaskExt": "0x1000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_RSP",
+ "PerPkg": "1",
+ "UMaskExt": "0x2000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_WB",
+ "PerPkg": "1",
+ "UMaskExt": "0x4000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCB",
+ "PerPkg": "1",
+ "UMaskExt": "0x8000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCS",
+ "PerPkg": "1",
+ "UMaskExt": "0x10000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC8",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC9",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC10",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC11",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC11",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC12",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC12",
+ "PerPkg": "1",
+ "UMaskExt": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC13",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC13",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ Rejected",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IPQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : RRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : WBQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : IRQ",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : IPQ",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : RRQ",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : WBQ",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
"UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Snoops sent for Local Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Snoops sent for Remote Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast snoops for Local Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast snoops for Remote Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Directed snoops for Local Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Directed snoops for Remote Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : Rsp*WB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPWB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : Rsp*Fwd*WB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWDWB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RSPCNFLCT*",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspFwd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspI",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspIFwd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspSFwd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*WB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPWB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWDWB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspCnflct",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspFwd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIFwdM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPIFWDM",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIDataM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPDATAM",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit SF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit SF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC8",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC9",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC10",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC11",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC11",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC12",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC12",
+ "PerPkg": "1",
+ "UMaskExt": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC13",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC13",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 0?)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - No Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - Conflict",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 1?)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - No Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - Conflict",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
"Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
"Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
"Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
"Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB lookups first",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB lookups all",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.ALL_LOOKUPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 4K Page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "PerPkg": "1",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "BriefDescription": ": IOTLB Hits to a 2M Page",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 1G Page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Fills (same as IOTLB miss)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.MISSES",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache lookups",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache hits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache lookup",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOMMU memory access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Cycles PWT full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.CYC_PWT_FULL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Interrupt Entry cache lookup",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.INT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Interrupt Entry cache hit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.INT_CACHE_HITS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : Drop request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.ALL.DROP",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "UMask": "0x01",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "UMask": "0x01",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "UMask": "0x01",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "UMask": "0x01",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "UMask": "0x04",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "UMask": "0x04",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "UMask": "0x04",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "UMask": "0x04",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "UMask": "0x80",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "UMask": "0x80",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "UMask": "0x80",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "UMask": "0x80",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "UMask": "0x04",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "UMask": "0x04",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "UMask": "0x04",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "UMask": "0x04",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
+ "BriefDescription": "Total Write Cache Occupancy : Any Source",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0F",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy : Snoops",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0F",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops : CLFlush",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Slow path fwpf didn't find prefetch",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.SLOWPATH_FWPF_NO_PRF",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of I Line",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of S Line",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of E Line",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of M Line",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Invalid",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Valid",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P reads",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P Writes",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P Message",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P completions",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : Match if remote only",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if remote and target matches",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if local only",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if local and target matches",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Miss",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit I",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit E or S",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpCode",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpData",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpInv",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Writes",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Atomic",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Other",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Select Source",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.ORDERINGQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_EGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x0101",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0102",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ALL",
+ "PerPkg": "1",
+ "UMask": "0x0104",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x0140",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x0201",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0202",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ALL",
+ "PerPkg": "1",
+ "UMask": "0x0204",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x0240",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH2_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x0440",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "PerPkg": "1",
+ "UMask": "0x0401",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x0402",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0404",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0408",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "PerPkg": "1",
+ "UMask": "0x0410",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "UMaskExt": "0x05",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_NI",
+ "PerPkg": "1",
+ "UMaskExt": "0x06",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL",
+ "PerPkg": "1",
+ "UMask": "0x0801",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x0802",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0804",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0808",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_ALL",
+ "PerPkg": "1",
+ "UMask": "0x0810",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "UMaskExt": "0x09",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_NI",
+ "PerPkg": "1",
+ "UMaskExt": "0x0A",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches : Mesh Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches : MC Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK : NDR Transactions",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.NDR",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK : CRD Transactions to Cbo",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "PerPkg": "1",
+ "UMask": "0xA0",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to QPI",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_UPI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Mirror",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Mirror",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x63",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x63",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x63",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Mirror",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AK_0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AK_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : VNA Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Writebacks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Snoops",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : BL - Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AK - Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AK - Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.VN01_PARALLEL_WIN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : Max Parallel Win",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ALL_PARALLEL_WIN",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 1",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 2",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : Any In BGF FIFO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : Any in BGF Path",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : No D2K For Arb",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.VN0_NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.VN1_NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.LT1_FOR_D2K",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.LT2_FOR_D2K",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : VNA In Use",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Packets in BGF FIFO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Packets in BGF Path",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Transmit Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : D2K Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Credits Consumed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.CONSUMED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : TSV High",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.TSV_HI",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : Cycle valid for Flit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.VALID_FOR_FLIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : No BGF Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_BGF",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : No TxQ Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_TXQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Needs Data Flit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Acumullate",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate Ready",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate Wasted",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Message",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_DURING",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_AFTER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_SENT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_AFTER",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Ok",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Message",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_MSG",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Flit Finished",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_FLIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Message",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Two Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.2_MSGS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Three Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.3_MSGS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Message in non-VNA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG_VNX",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Slot Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Two Slots Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : All Slots Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_3",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : TSV High",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.TSV_HI",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : Cycle valid for Flit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.VALID_FOR_FLIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No BGF Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No TxQ Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : VN0",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : VN1",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Parallel Attempt",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Parallel Success",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Can't Slot AD",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Can't Slot BL",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : NCS on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : REQ on AD",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : SNP on AD",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : RSP on AD",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : RSP on BL",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : WB on BL",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : NCB on BL",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : NCS on BL",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : REQ on AD",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : SNP on AD",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : RSP on AD",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : RSP on BL",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : WB on BL",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : NCB on BL",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : NCS on BL",
+ "Counter": "0,1,2",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Corrected",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT10",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Any In Use",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 REQ Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 SNP Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 RSP Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 WB Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 REQ Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 SNP Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 RSP Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 NCB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 NCS Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 NCS Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 NCB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 NCB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 NCS Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1 WB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1_NCS Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1_NCB Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCS Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCB Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_THROUGH",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_WRPULL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_THROUGH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_WRPULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VNA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VNA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 REQ Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 RSP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 SNP Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : REQ on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : SNP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : RSP on AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : RSP on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : WB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : NCB on BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0xA0",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0xC0",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Received : VLW",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : MSI",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : IPI",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : Doorbell",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : Interrupt",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack : Assert to ACK",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Direct packet attempts : D2C",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Direct packet attempts : D2K",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x09",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "UMask": "0x109",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "UMask": "0x0A",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10A",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0C",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10C",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Writeback",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0x0D",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10D",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0x0E",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10E",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10F",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Conflict",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1AA",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Invalid",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x12A",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCRD Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCTRL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Protocol Header",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x47",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x09",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "UMask": "0x109",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "UMask": "0x0A",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10A",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0C",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10C",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Writeback",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0x0D",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10D",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0x0E",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10E",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10F",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Conflict",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1AA",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Invalid",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x12A",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCRD Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCTRL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Protocol Header",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Idle",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x47",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cache Lookups : I State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.I",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_S",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - E State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_E",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - H State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_H",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : E State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.E",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : M State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.M",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : F State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.F",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "PerPkg": "1",
+ "UMask": "0x1BC8FF",
+ "UMaskExt": "0x1BC8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - iA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IPQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0xC000FF04",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0xC000FF01",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0xC000FF05",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Hits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.DDR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR4",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : MMCFG Access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Local Targets",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "PerPkg": "1",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Remote Targets",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REMOTE_TGT",
+ "PerPkg": "1",
+ "UMaskExt": "0x100",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x200",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x400",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NearMem",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NEARMEM",
+ "PerPkg": "1",
+ "UMaskExt": "0x400000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NotNearMem",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NOT_NEARMEM",
+ "PerPkg": "1",
+ "UMaskExt": "0x800000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NonCoherent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "PerPkg": "1",
+ "UMaskExt": "0x1000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just ISOC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "PerPkg": "1",
+ "UMaskExt": "0x2000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - iA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IPQ",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0xC000FF04",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0xC000FF01",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0xC000FF05",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Hits",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Misses",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : MMCFG Access",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Local Targets",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "PerPkg": "1",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Remote Targets",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REMOTE_TGT",
+ "PerPkg": "1",
+ "UMaskExt": "0x100",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x200",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x400",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NearMem",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NEARMEM",
+ "PerPkg": "1",
+ "UMaskExt": "0x400000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NotNearMem",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NOT_NEARMEM",
+ "PerPkg": "1",
+ "UMaskExt": "0x800000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "PerPkg": "1",
+ "UMaskExt": "0x1000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just ISOC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "PerPkg": "1",
+ "UMaskExt": "0x2000000",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
"Counter": "2,3",
"CounterType": "PGMABLE",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x10",
+ "PortMask": "0x100",
"UMask": "0x01",
"Unit": "IIO"
},
@@ -1447,46 +15640,46 @@
"Counter": "2,3",
"CounterType": "PGMABLE",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x20",
+ "PortMask": "0x200",
"UMask": "0x01",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
"Counter": "2,3",
"CounterType": "PGMABLE",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x40",
- "UMask": "0x01",
+ "PortMask": "0x100",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
"Counter": "2,3",
"CounterType": "PGMABLE",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x80",
- "UMask": "0x01",
+ "PortMask": "0x200",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
"Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x10",
+ "PortMask": "0x100",
"UMask": "0x04",
"Unit": "IIO"
},
@@ -1494,36 +15687,132 @@
"BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
"Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x20",
+ "PortMask": "0x200",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
"Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x40",
- "UMask": "0x04",
+ "PortMask": "0x100",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
"Counter": "2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x80",
- "UMask": "0x04",
+ "PortMask": "0x200",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
@@ -1531,10 +15820,10 @@
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x10",
+ "PortMask": "0x100",
"UMask": "0x01",
"Unit": "IIO"
},
@@ -1543,35 +15832,35 @@
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x20",
+ "PortMask": "0x200",
"UMask": "0x01",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x40",
- "UMask": "0x01",
+ "PortMask": "0x100",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x80",
- "UMask": "0x01",
+ "PortMask": "0x200",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
@@ -1579,10 +15868,10 @@
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x10",
+ "PortMask": "0x100",
"UMask": "0x04",
"Unit": "IIO"
},
@@ -1591,71 +15880,83 @@
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x20",
+ "PortMask": "0x200",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x40",
- "UMask": "0x04",
+ "PortMask": "0x100",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x80",
- "UMask": "0x04",
+ "PortMask": "0x200",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x10",
- "UMask": "0x80",
+ "PortMask": "0x100",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x20",
- "UMask": "0x80",
+ "PortMask": "0x200",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "BriefDescription": "Data requested of the CPU : Messages",
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x40",
- "UMask": "0x80",
+ "PortMask": "0x100",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
@@ -1663,34 +15964,34 @@
"Counter": "0,1",
"CounterType": "PGMABLE",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x80",
+ "PortMask": "0x100",
"UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Number requests PCIe makes of the main die : All",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x85",
- "EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0xFF",
- "UMask": "0x01",
+ "PortMask": "0x200",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x10",
+ "PortMask": "0x100",
"UMask": "0x01",
"Unit": "IIO"
},
@@ -1698,84 +15999,168 @@
"BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x20",
+ "PortMask": "0x200",
"UMask": "0x01",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x40",
- "UMask": "0x01",
+ "PortMask": "0x200",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x80",
- "UMask": "0x01",
+ "PortMask": "0x100",
+ "UMask": "0x04",
"Unit": "IIO"
},
{
"BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x10",
+ "PortMask": "0x200",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x20",
- "UMask": "0x04",
+ "PortMask": "0x100",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x40",
- "UMask": "0x04",
+ "PortMask": "0x200",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x80",
- "UMask": "0x04",
+ "PortMask": "0x100",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
@@ -1783,10 +16168,10 @@
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x10",
+ "PortMask": "0x100",
"UMask": "0x01",
"Unit": "IIO"
},
@@ -1795,35 +16180,35 @@
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x20",
+ "PortMask": "0x200",
"UMask": "0x01",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x40",
- "UMask": "0x01",
+ "PortMask": "0x100",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x80",
- "UMask": "0x01",
+ "PortMask": "0x200",
+ "UMask": "0x02",
"Unit": "IIO"
},
{
@@ -1831,10 +16216,10 @@
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x10",
+ "PortMask": "0x100",
"UMask": "0x04",
"Unit": "IIO"
},
@@ -1843,59 +16228,83 @@
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x20",
+ "PortMask": "0x200",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x40",
- "UMask": "0x04",
+ "PortMask": "0x100",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x80",
- "UMask": "0x04",
+ "PortMask": "0x200",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x10",
- "UMask": "0x80",
+ "PortMask": "0x100",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x20",
- "UMask": "0x80",
+ "PortMask": "0x200",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
@@ -1903,10 +16312,10 @@
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x40",
+ "PortMask": "0x100",
"UMask": "0x80",
"Unit": "IIO"
},
@@ -1915,447 +16324,17783 @@
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x80",
+ "PortMask": "0x200",
"UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Free running counter that increments for IIO clocktick",
- "CounterType": "FREERUN",
- "EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_NI_MISS",
"PerPkg": "1",
- "Unit": "IIO"
+ "UMaskExt": "0x20",
+ "Unit": "M2M"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch1",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
- "FCMask": "0x04",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_NI_MISS",
"PerPkg": "1",
- "PortMask": "0x01",
- "UMask": "0x03",
- "Unit": "IIO"
+ "UMaskExt": "0x0C",
+ "Unit": "M2M"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "BriefDescription": "Prefetch CAM Cycles Full : Channel 0",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
- "FCMask": "0x04",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH0",
"PerPkg": "1",
- "PortMask": "0x02",
- "UMask": "0x03",
- "Unit": "IIO"
+ "UMask": "0x01",
+ "Unit": "M2M"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "BriefDescription": "Prefetch CAM Cycles Full : Channel 1",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
- "FCMask": "0x04",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH1",
"PerPkg": "1",
- "PortMask": "0x04",
- "UMask": "0x03",
- "Unit": "IIO"
+ "UMask": "0x02",
+ "Unit": "M2M"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "BriefDescription": "Prefetch CAM Cycles Full : Channel 2",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
- "FCMask": "0x04",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH2",
"PerPkg": "1",
- "PortMask": "0x08",
- "UMask": "0x03",
- "Unit": "IIO"
+ "UMask": "0x04",
+ "Unit": "M2M"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 0",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
- "FCMask": "0x04",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH0",
"PerPkg": "1",
- "PortMask": "0x10",
- "UMask": "0x03",
- "Unit": "IIO"
+ "UMask": "0x01",
+ "Unit": "M2M"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 1",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
- "FCMask": "0x04",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH1",
"PerPkg": "1",
- "PortMask": "0x20",
- "UMask": "0x03",
- "Unit": "IIO"
+ "UMask": "0x02",
+ "Unit": "M2M"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 2",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
- "FCMask": "0x04",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH2",
"PerPkg": "1",
- "PortMask": "0x40",
- "UMask": "0x03",
- "Unit": "IIO"
+ "UMask": "0x04",
+ "Unit": "M2M"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "BriefDescription": "Prefetch CAM Deallocs",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
- "FCMask": "0x04",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA0_INVAL",
"PerPkg": "1",
- "PortMask": "0x80",
- "UMask": "0x03",
- "Unit": "IIO"
+ "UMask": "0x01",
+ "Unit": "M2M"
},
{
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0",
- "Counter": "2,3",
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA1_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_MISS_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_RSP_PDRESET",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA0_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA1_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_MISS_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_RSP_PDRESET",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_HITA0_INVAL",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_HITA1_INVAL",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_MISS_INVAL",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_RSP_PDRESET",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - Ch 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH2_XPT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - Ch 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH2_UPI",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_SECURE_DROP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.NOT_PF_SAD_REGION",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.STOP_B2B",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.ERRORBLK_RxC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.WPQ_PROXY",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.RPQ_PROXY",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.XPT_THRESH",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.UPI_THRESH",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_SECURE_DROP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.NOT_PF_SAD_REGION",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.STOP_B2B",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.ERRORBLK_RxC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.WPQ_PROXY",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.RPQ_PROXY",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.XPT_THRESH",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.UPI_THRESH",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_SECURE_DROP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.NOT_PF_SAD_REGION",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.STOP_B2B",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.ERRORBLK_RxC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_CAM_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.WPQ_PROXY",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.RPQ_PROXY",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.XPT_THRESH",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.UPI_THRESH",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH2_XPT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH2_UPI",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_NONTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_PWR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_NONTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_PWR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : All",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_IDI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_IDI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_M2P_TxC_CREDITS.PRQ",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M2P_TxC_CREDITS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBA",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBA",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB9",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB9",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb9",
+ "EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb9",
+ "EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe6",
+ "EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe6",
+ "EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
- "FCMask": "0x04",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
"PerPkg": "1",
"UMask": "0x01",
- "Unit": "IIO"
+ "Unit": "M2PCIe"
},
{
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 7",
- "Counter": "2,3",
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
- "FCMask": "0x04",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
"PerPkg": "1",
- "UMask": "0x80",
- "Unit": "IIO"
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
},
{
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 6",
- "Counter": "2,3",
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
- "FCMask": "0x04",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_CRD",
"PerPkg": "1",
"UMask": "0x40",
- "Unit": "IIO"
+ "Unit": "M2PCIe"
},
{
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 5",
- "Counter": "2,3",
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
- "FCMask": "0x04",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG1",
"PerPkg": "1",
"UMask": "0x20",
- "Unit": "IIO"
+ "Unit": "M2PCIe"
},
{
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 4",
- "Counter": "2,3",
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
- "FCMask": "0x04",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9e",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9e",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG1",
"PerPkg": "1",
"UMask": "0x10",
- "Unit": "IIO"
+ "Unit": "M2PCIe"
},
{
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 3",
- "Counter": "2,3",
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
- "FCMask": "0x04",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.IV_AG0",
"PerPkg": "1",
"UMask": "0x08",
- "Unit": "IIO"
+ "Unit": "M2PCIe"
},
{
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 2",
- "Counter": "2,3",
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
- "FCMask": "0x04",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG0",
"PerPkg": "1",
"UMask": "0x04",
- "Unit": "IIO"
+ "Unit": "M2PCIe"
},
{
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 1",
- "Counter": "2,3",
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
- "FCMask": "0x04",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb3",
+ "EventName": "UNC_M2P_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb3",
+ "EventName": "UNC_M2P_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M3UPI_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M3UPI_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.UP_ODD",
"PerPkg": "1",
"UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xae",
+ "EventName": "UNC_CHA_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe4",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counting disabled",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_IIO_NOTHING",
+ "PerPkg": "1",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "BriefDescription": "PWT occupancy",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
- "FCMask": "0x04",
+ "EventCode": "0x42",
+ "EventName": "UNC_IIO_PWT_OCCUPANCY",
"PerPkg": "1",
- "PortMask": "0xff",
- "UMask": "0x03",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
- "Counter": "2,3",
+ "BriefDescription": "Symbol Times on Link",
+ "Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
- "FCMask": "0x04",
+ "EventCode": "0x82",
+ "EventName": "UNC_IIO_SYMBOL_TIMES",
"PerPkg": "1",
- "UMask": "0xff",
"Unit": "IIO"
},
{
- "BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "BriefDescription": "P2P Requests",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x1F",
- "EventName": "UNC_I_MISC1.LOST_FWD",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_P2P_INSERTS",
"PerPkg": "1",
- "UMask": "0x10",
"Unit": "IRP"
},
{
- "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline",
+ "BriefDescription": "P2P Occupancy",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x10",
- "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_P2P_OCCUPANCY",
"PerPkg": "1",
- "UMask": "0x10",
"Unit": "IRP"
},
{
- "BriefDescription": "Coherent Ops : WbMtoI",
+ "BriefDescription": "AK Egress Allocations",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x10",
- "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "EventCode": "0x0B",
+ "EventName": "UNC_I_TxC_AK_INSERTS",
"PerPkg": "1",
- "UMask": "0x40",
"Unit": "IRP"
},
{
- "BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory",
+ "BriefDescription": "BL DRS Egress Cycles Full",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x0f",
- "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "EventCode": "0x05",
+ "EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
- "UMask": "0x04",
"Unit": "IRP"
},
{
- "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "BriefDescription": "BL DRS Egress Inserts",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x20",
- "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "EventCode": "0x02",
+ "EventName": "UNC_I_TxC_BL_DRS_INSERTS",
"PerPkg": "1",
- "UMask": "0x01",
"Unit": "IRP"
},
{
- "BriefDescription": "Inbound write (fast path) requests received by the IRP",
+ "BriefDescription": "BL DRS Egress Occupancy",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x11",
- "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "EventCode": "0x08",
+ "EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
"PerPkg": "1",
- "UMask": "0x08",
"Unit": "IRP"
},
{
- "BriefDescription": "Clockticks of the IO coherency tracker (IRP)",
+ "BriefDescription": "BL NCB Egress Cycles Full",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x01",
- "EventName": "UNC_I_CLOCKTICKS",
+ "EventCode": "0x06",
+ "EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
"Unit": "IRP"
},
{
- "BriefDescription": "FAF RF full",
+ "BriefDescription": "BL NCB Egress Inserts",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x17",
- "EventName": "UNC_I_FAF_FULL",
+ "EventCode": "0x03",
+ "EventName": "UNC_I_TxC_BL_NCB_INSERTS",
"PerPkg": "1",
"Unit": "IRP"
},
{
- "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue",
+ "BriefDescription": "BL NCB Egress Occupancy",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x18",
- "EventName": "UNC_I_FAF_INSERTS",
+ "EventCode": "0x09",
+ "EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
"PerPkg": "1",
"Unit": "IRP"
},
{
- "BriefDescription": "Occupancy of the IRP FAF queue",
+ "BriefDescription": "BL NCS Egress Cycles Full",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x19",
- "EventName": "UNC_I_FAF_OCCUPANCY",
+ "EventCode": "0x07",
+ "EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
"Unit": "IRP"
},
{
- "BriefDescription": "FAF allocation -- sent to ADQ",
+ "BriefDescription": "BL NCS Egress Inserts",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x16",
- "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "EventCode": "0x04",
+ "EventName": "UNC_I_TxC_BL_NCS_INSERTS",
"PerPkg": "1",
"Unit": "IRP"
},
{
- "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "BriefDescription": "BL NCS Egress Occupancy",
"Counter": "0,1",
"CounterType": "PGMABLE",
- "EventCode": "0x12",
- "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "EventCode": "0x0A",
+ "EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
"PerPkg": "1",
- "UMask": "0x78",
"Unit": "IRP"
},
{
- "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in any state",
+ "BriefDescription": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD0 Egress Credits Stalls",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_TxR2_AD0_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD1 Egress Credits Stalls",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1B",
+ "EventName": "UNC_I_TxR2_AD1_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0D",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0E",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0C",
+ "EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
"PerPkg": "1",
- "UMask": "0x01",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in A state",
+ "BriefDescription": "Write Tracker Inserts",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "EventCode": "0x64",
+ "EventName": "UNC_M2M_MIRR_WRQ_INSERTS",
"PerPkg": "1",
- "UMask": "0x08",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in I state",
+ "BriefDescription": "Write Tracker Occupancy",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "EventCode": "0x65",
+ "EventName": "UNC_M2M_MIRR_WRQ_OCCUPANCY",
"PerPkg": "1",
- "UMask": "0x02",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in S state",
+ "BriefDescription": "UNC_M2M_PREFCAM_CIS_DROPS",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "EventCode": "0x73",
+ "EventName": "UNC_M2M_PREFCAM_CIS_DROPS",
"PerPkg": "1",
- "UMask": "0x04",
"Unit": "M2M"
},
{
- "BriefDescription": "Tag Hit : Clean NearMem Read Hit",
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x2C",
- "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
+ "EventCode": "0x79",
+ "EventName": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
"PerPkg": "1",
- "UMask": "0x01",
"Unit": "M2M"
},
{
- "BriefDescription": "Tag Hit : Dirty NearMem Read Hit",
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_INSERTS",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x2C",
- "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "EventCode": "0x78",
+ "EventName": "UNC_M2M_PREFCAM_RxC_INSERTS",
"PerPkg": "1",
- "UMask": "0x02",
"Unit": "M2M"
},
{
- "BriefDescription": "Clockticks of the mesh to memory (M2M)",
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventName": "UNC_M2M_CLOCKTICKS",
+ "EventCode": "0x77",
+ "EventName": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
"PerPkg": "1",
"Unit": "M2M"
},
{
- "BriefDescription": "CMS Clockticks",
+ "BriefDescription": "Source Throttle",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "EventCode": "0xae",
+ "EventName": "UNC_M2M_RING_SRC_THRTL",
"PerPkg": "1",
"Unit": "M2M"
},
{
- "BriefDescription": "M2M Reads Issued to iMC : PMM - All Channels",
+ "BriefDescription": "AD Ingress (from CMS) Full",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "EventCode": "0x04",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
"PerPkg": "1",
- "UMask": "0x0720",
- "UMaskExt": "0x07",
"Unit": "M2M"
},
{
- "BriefDescription": "M2M Writes Issued to iMC : PMM - All Channels",
+ "BriefDescription": "AD Ingress (from CMS) Not Empty",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "EventCode": "0x03",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
"PerPkg": "1",
- "UMask": "0x1C80",
- "UMaskExt": "0x1C",
"Unit": "M2M"
},
{
- "BriefDescription": "Clockticks of the mesh to PCI (M2P)",
+ "BriefDescription": "AK Egress (to CMS) Allocations",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x01",
- "EventName": "UNC_M2P_CLOCKTICKS",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M2M_RxC_AK_WR_CMP",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x08",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x07",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_SCOREBOARD_AD_RETRY_ACCEPTS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2M_SCOREBOARD_AD_RETRY_ACCEPTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_SCOREBOARD_AD_RETRY_REJECTS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2M_SCOREBOARD_AD_RETRY_REJECTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Retry - Mem Mirroring Mode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_SCOREBOARD_BL_RETRY_ACCEPTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Retry - Mem Mirroring Mode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_M2M_SCOREBOARD_BL_RETRY_REJECTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Scoreboard Accepts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_M2M_SCOREBOARD_RD_ACCEPTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Scoreboard Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_M2M_SCOREBOARD_RD_REJECTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Scoreboard Accepts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x31",
+ "EventName": "UNC_M2M_SCOREBOARD_WR_ACCEPTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Scoreboard Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2M_SCOREBOARD_WR_REJECTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0d",
+ "EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0e",
+ "EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0c",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0b",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0f",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AKC Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M2M_TxC_AKC_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xae",
+ "EventName": "UNC_M2P_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED_1",
"PerPkg": "1",
"Unit": "M2PCIe"
},
@@ -2364,172 +34109,4449 @@
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0xc0",
- "EventName": "UNC_M2P_CMS_CLOCKTICKS",
+ "EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
"PerPkg": "1",
- "Unit": "M2PCIe"
+ "Unit": "M3UPI"
},
{
- "BriefDescription": "Clockticks of the mesh to UPI (M3UPI)",
+ "BriefDescription": "D2C Sent",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x01",
- "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M3UPI_D2C_SENT",
"PerPkg": "1",
"Unit": "M3UPI"
},
{
- "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
- "Counter": "FIXED",
- "CounterType": "FIXED",
- "EventCode": "0xff",
- "EventName": "UNC_U_CLOCKTICKS",
+ "BriefDescription": "D2U Sent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M3UPI_D2U_SENT",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xae",
+ "EventName": "UNC_M3UPI_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Occupancy",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FlowQ Generated Prefetch",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
"PerPkg": "1",
"Unit": "UBOX"
},
{
- "BriefDescription": "Valid Flits Received : All Data",
+ "BriefDescription": "RACU Request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x03",
- "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "EventCode": "0x16",
+ "EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
"PerPkg": "1",
- "UMask": "0x0F",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Valid Flits Received : All Non Data",
+ "BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x03",
- "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "EventCode": "0x20",
+ "EventName": "UNC_UPI_PHY_INIT_CYCLES",
"PerPkg": "1",
- "UMask": "0x97",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Valid Flits Sent : All Data",
+ "BriefDescription": "L1 Req Nack",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "EventCode": "0x23",
+ "EventName": "UNC_UPI_POWER_L1_NACK",
"PerPkg": "1",
- "UMask": "0x0F",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Valid Flits Sent : All Non Data",
+ "BriefDescription": "L1 Req (same as L1 Ack)",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "EventCode": "0x22",
+ "EventName": "UNC_UPI_POWER_L1_REQ",
"PerPkg": "1",
- "UMask": "0x97",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Number of kfclks",
+ "BriefDescription": "Cycles in L0p",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x01",
- "EventName": "UNC_UPI_CLOCKTICKS",
+ "EventCode": "0x25",
+ "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
"PerPkg": "1",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Cycles in L1",
+ "BriefDescription": "Cycles in L0",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x21",
- "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "EventCode": "0x24",
+ "EventName": "UNC_UPI_RxL0_POWER_CYCLES",
"PerPkg": "1",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Cycles in L0p",
+ "BriefDescription": "CRC Errors Detected",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x27",
- "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "EventCode": "0x0B",
+ "EventName": "UNC_UPI_RxL_CRC_ERRORS",
"PerPkg": "1",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Valid Flits Sent : Null FLITs transmitted to any slot",
+ "BriefDescription": "LLR Requests Sent",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "EventCode": "0x08",
+ "EventName": "UNC_UPI_RxL_CRC_LLR_REQ_TRANSMIT",
"PerPkg": "1",
- "UMask": "0x27",
"Unit": "UPI LL"
},
{
- "BriefDescription": "Valid Flits Received : Null FLITs received from any slot",
+ "BriefDescription": "VN0 Credit Consumed",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x03",
- "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "EventCode": "0x39",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
"PerPkg": "1",
- "UMask": "0x27",
"Unit": "UPI LL"
},
{
- "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices to locally HOMed memory",
+ "BriefDescription": "VN1 Credit Consumed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3A",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x28",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x26",
+ "EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_UPI_TxL_BYPASSED",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_UPI_TxL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1FFFFF",
+ "UMaskExt": "0x1FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "PerPkg": "1",
+ "UMask": "0x1bc1ff",
+ "UMaskExt": "0x1bc1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV",
+ "PerPkg": "1",
+ "UMask": "0x1A44FF",
+ "UMaskExt": "0x1A44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "PerPkg": "1",
+ "UMask": "0x1bd0ff",
+ "UMaskExt": "0x1bd0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LOC_HOM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS",
+ "PerPkg": "1",
+ "UMask": "0x0bdfff",
+ "UMaskExt": "0x0bdf",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REM_HOM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS",
+ "PerPkg": "1",
+ "UMask": "0x15dfff",
+ "UMaskExt": "0x15df",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate requests that come from a Remote socket",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x1A04FF",
+ "UMaskExt": "0x1A04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Requests that come from a Remote socket",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x1A01FF",
+ "UMaskExt": "0x1A01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests that come from a Remote socket",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x1A08FF",
+ "UMaskExt": "0x1A08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ_REMOTE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x1a10ff",
+ "UMaskExt": "0x1a10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Snoop Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP",
+ "PerPkg": "1",
+ "UMask": "0x1C19FF",
+ "UMaskExt": "0x1C19",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1844FF",
+ "UMaskExt": "0x1844",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x19C1FF",
+ "UMaskExt": "0x19C1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x19C8FF",
+ "UMaskExt": "0x19C8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ_LOCAL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x19d0ff",
+ "UMaskExt": "0x19d0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LLC_PF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x189dff",
+ "UMaskExt": "0x189d",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xCD42FF04",
- "UMaskExt": "0xCD42FF",
+ "UMask": "0xC827FD01",
+ "UMaskExt": "0xC827FD",
"Unit": "CHA"
},
{
- "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices to remotely HOMed memory",
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xCD437F04",
- "UMaskExt": "0xCD437F",
+ "UMask": "0xC8A7FD01",
+ "UMaskExt": "0xC8A7FD",
"Unit": "CHA"
},
{
- "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices to locally HOMed memory",
+ "BriefDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xCC42FF04",
- "UMaskExt": "0xCC42FF",
+ "UMask": "0xC827FE01",
+ "UMaskExt": "0xC827FE",
"Unit": "CHA"
},
{
- "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices to remotely HOMed memory",
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
"PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xCC437F04",
- "UMaskExt": "0xCC437F",
+ "UMask": "0xC8A7FE01",
+ "UMaskExt": "0xC8A7FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFD01",
+ "UMaskExt": "0xC88FFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC897FD01",
+ "UMaskExt": "0xC897FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0xC827FD01",
+ "UMaskExt": "0xC827FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC8A7FD01",
+ "UMaskExt": "0xC8A7FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FD01",
+ "UMaskExt": "0xC887FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFE01",
+ "UMaskExt": "0xC88FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC897FE01",
+ "UMaskExt": "0xC897FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0xC827FE01",
+ "UMaskExt": "0xC827FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC8A7FE01",
+ "UMaskExt": "0xC8A7FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FE01",
+ "UMaskExt": "0xC887FE",
"Unit": "CHA"
},
{
- "BriefDescription": "Multi-socket cacheline Directory Updates : From/to any state. Note: event counts are incorrect in 2LM mode.",
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
- "EventCode": "0x2e",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC803FD04",
+ "UMaskExt": "0xC803FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FD04",
+ "UMaskExt": "0xCC43FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC803FD04",
+ "UMaskExt": "0xC803FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC803FF04",
+ "UMaskExt": "0xC803FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD",
+ "PerPkg": "1",
+ "UMask": "0xC817FF01",
+ "UMaskExt": "0xC817FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0xC827FF01",
+ "UMaskExt": "0xC827FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC8A7FF01",
+ "UMaskExt": "0xC8A7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFF01",
+ "UMaskExt": "0xC88FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC803FF04",
+ "UMaskExt": "0xC803FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FF04",
+ "UMaskExt": "0xCC43FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FF01",
+ "UMaskExt": "0xC887FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0xCCC7FF01",
+ "UMaskExt": "0xCCC7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0xC827FF01",
+ "UMaskExt": "0xC827FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC8A7FF01",
+ "UMaskExt": "0xC8A7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFF01",
+ "UMaskExt": "0xC88FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC897FF01",
+ "UMaskExt": "0xC897FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC896FE01",
+ "UMaskExt": "0xC896FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8977E01",
+ "UMaskExt": "0xC8977E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC806FE01",
+ "UMaskExt": "0xC806FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8077E01",
+ "UMaskExt": "0xC8077E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC886FE01",
+ "UMaskExt": "0xC886FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8877E01",
+ "UMaskExt": "0xC8877E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushOpts issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "UMask": "0xC8D7FF01",
+ "UMaskExt": "0xC8D7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC47FF01",
+ "UMaskExt": "0xCC47FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0xCC23FF04",
+ "UMaskExt": "0xCC23FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0xC8C3FF04",
+ "UMaskExt": "0xC8C3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0xcc27ff01",
+ "UMaskExt": "0xcc27ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8978A01",
+ "UMaskExt": "0xC8978A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8968A01",
+ "UMaskExt": "0xC8968A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8970A01",
+ "UMaskExt": "0xC8970A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc8678a01",
+ "UMaskExt": "0xc8678a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc8668a01",
+ "UMaskExt": "0xc8668a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc8670a01",
+ "UMaskExt": "0xc8670a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_DRAM",
+ "PerPkg": "1",
+ "UMask": "0xC8678601",
+ "UMaskExt": "0xC86786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_LOCAL_DRAM",
+ "PerPkg": "1",
+ "UMask": "0xC8668601",
+ "UMaskExt": "0xC86686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc86f8a01",
+ "UMaskExt": "0xc86f8a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc86e8a01",
+ "UMaskExt": "0xc86e8a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc86f0a01",
+ "UMaskExt": "0xc86f0a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_DRAM",
+ "PerPkg": "1",
+ "UMask": "0xC86F8601",
+ "UMaskExt": "0xC86F86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_DRAM",
+ "PerPkg": "1",
+ "UMask": "0xC86E8601",
+ "UMaskExt": "0xC86E86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8168A01",
+ "UMaskExt": "0xC8168A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8170A01",
+ "UMaskExt": "0xC8170A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8978A01",
+ "UMaskExt": "0xC8978A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8968A01",
+ "UMaskExt": "0xC8968A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8970A01",
+ "UMaskExt": "0xC8970A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR",
+ "PerPkg": "1",
+ "UMask": "0xc867fe01",
+ "UMaskExt": "0xc867fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc8678a01",
+ "UMaskExt": "0xc8678a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc8668a01",
+ "UMaskExt": "0xc8668a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc8670a01",
+ "UMaskExt": "0xc8670a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR",
+ "PerPkg": "1",
+ "UMask": "0xc86ffe01",
+ "UMaskExt": "0xc86ffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc86f8a01",
+ "UMaskExt": "0xc86f8a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc86e8a01",
+ "UMaskExt": "0xc86e8a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0xc86f0a01",
+ "UMaskExt": "0xc86f0a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "1",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "2",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "3",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "4",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "5",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "6",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "7",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "8",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "9",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART0_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "13",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART4_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "12",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART3_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "11",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART2_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "10",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART1_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "15",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART6_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "14",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART5_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "16",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART7_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "UMask": "0xC86FFE01",
+ "UMaskExt": "0xC86FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.UPI_NCB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.UPI_NCS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_M2P_TxC_CREDITS.PMM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2P_TxC_CREDITS.PMM",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_0",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_0",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.PMM_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed locally Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x800",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed remotely Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x1000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remote snoop request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x400",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ANY_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Write Request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.OTHER_REQ_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_OR_INV_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Local request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Local LLC prefetch requests (from LLC) Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remote non-snoop request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.PREF_OR_DMND_REMOTE_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x200",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.MISS_ALL",
+ "PerPkg": "1",
+ "UMask": "0x1fe001",
+ "UMaskExt": "0x1fe0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "PerPkg": "1",
+ "UMask": "0x1fc1ff",
+ "UMaskExt": "0x1fc1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x1bc101",
+ "UMaskExt": "0x1bc1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DMND_READ_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x841ff",
+ "UMaskExt": "0x841",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x842ff",
+ "UMaskExt": "0x842",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x888ff",
+ "UMaskExt": "0x888",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x17c2ff",
+ "UMaskExt": "0x17c2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All transactions from Remote Agents",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x1e20ff",
+ "UMaskExt": "0x1e20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.PMM_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.UBOX_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M2M_DISTRESS_PMM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xF2",
+ "EventName": "UNC_M2M_DISTRESS_PMM",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_DISTRESS_PMM_MEMMODE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xF1",
+ "EventName": "UNC_M2M_DISTRESS_PMM_MEMMODE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0702",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x0740",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1C04",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1C08",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x1C20",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR, acting as Cache - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1C40",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FROM_TGR",
+ "PerPkg": "1",
+ "UMaskExt": "0x1D",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI_MISS",
+ "PerPkg": "1",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x2a",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : PMM - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x0120",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x0108",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR, acting as Cache - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x0110",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : PMM - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x0220",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x0208",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR, acting as Cache - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x0210",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x0708",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR, acting as Cache - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x0710",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : PMM - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x0480",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x0420",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR, acting as Cache - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x0440",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : PMM - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x0880",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x0820",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR, acting as Cache - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x0840",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC RPQ Cycles w/Credits - PMM : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD_PMM.CHN0",
"PerPkg": "1",
- "PublicDescription": "Multi-socket cacheline Directory Updates : From/to any state. Note: event counts are incorrect in 2LM mode.",
"UMask": "0x01",
"Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC RPQ Cycles w/Credits - PMM : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD_PMM.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC RPQ Cycles w/Credits - PMM : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD_PMM.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - PMM : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD_PMM.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - PMM : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD_PMM.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - PMM : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD_PMM.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.PMM_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6d",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x2a",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 4K page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_4K_HITS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 2M page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_2M_HITS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 1G page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_1G_HITS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWT Hit to a 256T page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_512G_HITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache fill",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_CACHE_FILLS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Global IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_GBL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Domain-selective IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_DOMAIN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Page-selective IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_PAGE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache global invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_GBL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Domain-selective Context cache invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DOMAIN",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Device-selective Context cache invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DEVICE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : MsgB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Multi-cast",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Ubox",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Remote P2P",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Local P2P",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Confined P2P",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Abort",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "ITC address map 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Issuing to IOMMU",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Processing response from IOMMU",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Request Ownership",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Issuing final read or write of line",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Writing line",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Passing data to be written",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Occupancy of outbound request queue : To device",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC5",
+ "EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Request Ownership",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Writing line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Passing data to be written",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Request Ownership",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Writing line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Passing data to be written",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Request Ownership",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Writing line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Passing data to be written",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Request Ownership",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Writing line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Passing data to be written",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Request Ownership",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Writing line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Passing data to be written",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound cacheline requests issued : 64B requests issued to device",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_IIO_OUTBOUND_CL_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound TLP (transaction layer packet) requests issued : To device",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_IIO_OUTBOUND_TLP_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : From IRP",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.IRP",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : From ITC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.ITC",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : Completion allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.PREALLOC",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "PerPkg": "1",
+ "UMask": "0xC867FF01",
+ "UMaskExt": "0xC867FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "PerPkg": "1",
+ "UMask": "0xC86FFF01",
+ "UMaskExt": "0xC86FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "PerPkg": "1",
+ "UMask": "0xC87FDE01",
+ "UMaskExt": "0xC87FDE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC80EFE01",
+ "UMaskExt": "0xC80EFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC80F7E01",
+ "UMaskExt": "0xC80F7E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC88EFE01",
+ "UMaskExt": "0xC88EFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC88F7E01",
+ "UMaskExt": "0xC88F7E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD47FF01",
+ "UMaskExt": "0xCD47FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC47FD01",
+ "UMaskExt": "0xCC47FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC47FE01",
+ "UMaskExt": "0xCC47FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "UMask": "0xC877DE01",
+ "UMaskExt": "0xC877DE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefCode issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0xCCCFFF01",
+ "UMaskExt": "0xCCCFFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in TOR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0xCCCFFD01",
+ "UMaskExt": "0xCCCFFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0xCCD7FD01",
+ "UMaskExt": "0xCCD7FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0xCCCFFE01",
+ "UMaskExt": "0xCCCFFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0xCCCFFD01",
+ "UMaskExt": "0xCCCFFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0xCCD7FD01",
+ "UMaskExt": "0xCCD7FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0xCCCFFE01",
+ "UMaskExt": "0xCCCFFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0xCCD7FE01",
+ "UMaskExt": "0xCCD7FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x67",
+ "EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x67",
+ "EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IRQ_PMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.PRQ_PMM",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.PMM_MEMMODE_TOR_MATCH",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.PMM_MEMMODE_TORMATCH_MULTI",
+ "PerPkg": "1",
+ "UMaskExt": "0x400",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PMM Access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PMM",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PMM Access",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PMM",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.PMM_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8678A01",
+ "UMaskExt": "0xC8678A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8668A01",
+ "UMaskExt": "0xC8668A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remote memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8670A01",
+ "UMaskExt": "0xC8670A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC86F8A01",
+ "UMaskExt": "0xC86F8A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC86E8A01",
+ "UMaskExt": "0xC86E8A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC86F0A01",
+ "UMaskExt": "0xC86F0A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8678A01",
+ "UMaskExt": "0xC8678A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8668A01",
+ "UMaskExt": "0xC8668A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8670A01",
+ "UMaskExt": "0xC8670A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC86F8A01",
+ "UMaskExt": "0xC86F8A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC86E8A01",
+ "UMaskExt": "0xC86E8A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC86F0A01",
+ "UMaskExt": "0xC86F0A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DDR4 Access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DDR4 Access",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8978601",
+ "UMaskExt": "0xC89786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8968601",
+ "UMaskExt": "0xC89686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8970601",
+ "UMaskExt": "0xC89706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8678601",
+ "UMaskExt": "0xC86786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8668601",
+ "UMaskExt": "0xC86686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8670601",
+ "UMaskExt": "0xC86706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC86F8601",
+ "UMaskExt": "0xC86F86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC86E8601",
+ "UMaskExt": "0xC86E86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC86F0601",
+ "UMaskExt": "0xC86F06",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8168601",
+ "UMaskExt": "0xC81686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8170601",
+ "UMaskExt": "0xC81706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8978601",
+ "UMaskExt": "0xC89786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8968601",
+ "UMaskExt": "0xC89686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8970601",
+ "UMaskExt": "0xC89706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8678601",
+ "UMaskExt": "0xC86786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8668601",
+ "UMaskExt": "0xC86686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8670601",
+ "UMaskExt": "0xC86706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC86F8601",
+ "UMaskExt": "0xC86F86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC86E8601",
+ "UMaskExt": "0xC86E86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC86F0601",
+ "UMaskExt": "0xC86F06",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FD04",
+ "UMaskExt": "0xC8F3FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefData issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0xCCD7FF01",
+ "UMaskExt": "0xCCD7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "UMask": "0xC867FE01",
+ "UMaskExt": "0xC867FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "UMask": "0xC867FE01",
+ "UMaskExt": "0xC867FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "UMask": "0xC86FFE01",
+ "UMaskExt": "0xC86FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC80EFE01",
+ "UMaskExt": "0xC80EFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC80F7E01",
+ "UMaskExt": "0xC80F7E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC88EFE01",
+ "UMaskExt": "0xC88EFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC88F7E01",
+ "UMaskExt": "0xC88F7E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0xC8C7FF01",
+ "UMaskExt": "0xC8C7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "UMask": "0xC8D7FF01",
+ "UMaskExt": "0xC8D7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD47FF01",
+ "UMaskExt": "0xCD47FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC57FF01",
+ "UMaskExt": "0xCC57FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0xCC27FF01",
+ "UMaskExt": "0xCC27FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC47FF01",
+ "UMaskExt": "0xCC47FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC47FD01",
+ "UMaskExt": "0xCC47FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC47FE01",
+ "UMaskExt": "0xCC47FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "UMask": "0xC877DE01",
+ "UMaskExt": "0xC877DE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "PerPkg": "1",
+ "UMask": "0xC87FDE01",
+ "UMaskExt": "0xC87FDE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "PerPkg": "1",
+ "UMask": "0xC867FF01",
+ "UMaskExt": "0xC867FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "PerPkg": "1",
+ "UMask": "0xC86FFF01",
+ "UMaskExt": "0xC86FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0xCCCFFF01",
+ "UMaskExt": "0xCCCFFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0xCC23FF04",
+ "UMaskExt": "0xCC23FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0xC8C3FF04",
+ "UMaskExt": "0xC8C3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FF04",
+ "UMaskExt": "0xCD43FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FD04",
+ "UMaskExt": "0xCD43FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FE04",
+ "UMaskExt": "0xCD43FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc8678601",
+ "UMaskExt": "0xc86786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc8668601",
+ "UMaskExt": "0xc86686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc8670601",
+ "UMaskExt": "0xc86706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc86f8601",
+ "UMaskExt": "0xc86f86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc86e8601",
+ "UMaskExt": "0xc86e86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc86f0601",
+ "UMaskExt": "0xc86f06",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc8678601",
+ "UMaskExt": "0xc86786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc8668601",
+ "UMaskExt": "0xc86686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc8670601",
+ "UMaskExt": "0xc86706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc86f8601",
+ "UMaskExt": "0xc86f86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc86e8601",
+ "UMaskExt": "0xc86e86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0xc86f0601",
+ "UMaskExt": "0xc86f06",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "PerPkg": "1",
+ "UMask": "0xcc3fff01",
+ "UMaskExt": "0xcc3fff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "PerPkg": "1",
+ "UMask": "0x71",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "PerPkg": "1",
+ "UMask": "0x7e",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x74",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x72",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_SPECITOM",
+ "PerPkg": "1",
+ "UMask": "0xcc57fe01",
+ "UMaskExt": "0xcc57fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SpecItoMs issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_SPECITOM",
+ "PerPkg": "1",
+ "UMask": "0xcc57fe01",
+ "UMaskExt": "0xcc57fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FE01",
+ "UMaskExt": "0xC837FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FD01",
+ "UMaskExt": "0xC837FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FF01",
+ "UMaskExt": "0xC837FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SpecItoMs issued by iA Cores that hit in the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_SPECITOM",
+ "PerPkg": "1",
+ "UMask": "0xcc57fd01",
+ "UMaskExt": "0xcc57fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBStoIs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "PerPkg": "1",
+ "UMask": "0xcc67ff01",
+ "UMaskExt": "0xcc67ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoIs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "PerPkg": "1",
+ "UMask": "0xcc37ff01",
+ "UMaskExt": "0xcc37ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBMtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0xcc2fff01",
+ "UMaskExt": "0xcc2fff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FF01",
+ "UMaskExt": "0xC837FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FD01",
+ "UMaskExt": "0xC837FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FE01",
+ "UMaskExt": "0xC837FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy - Prefetches",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x77",
+ "EventName": "UNC_M2M_RxC_AD_PREF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Code Read Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x1BD001",
+ "UMaskExt": "0x1BD0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_MISS",
+ "PerPkg": "1",
+ "UMask": "0x1BC801",
+ "UMaskExt": "0x1BC8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x1BD9FF",
+ "UMaskExt": "0x1BD9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Read Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x1BD901",
+ "UMaskExt": "0x1BD9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally HOMed Read Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_LOC_HOM",
+ "PerPkg": "1",
+ "UMask": "0x0BD901",
+ "UMaskExt": "0x0BD9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely HOMed Read Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_REM_HOM",
+ "PerPkg": "1",
+ "UMask": "0x13D901",
+ "UMaskExt": "0x13D9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally Requested Reads that are Locally HOMed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_LOC_HOM",
+ "PerPkg": "1",
+ "UMask": "0x09D9FF",
+ "UMaskExt": "0x09D9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely Requested Reads that are Locally HOMed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_REMOTE_LOC_HOM",
+ "PerPkg": "1",
+ "UMask": "0x0A19FF",
+ "UMaskExt": "0x0A19",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally Requested Reads that are Remotely HOMed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_REM_HOM",
+ "PerPkg": "1",
+ "UMask": "0x11D9FF",
+ "UMaskExt": "0x11D9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Reads that Hit the Snoop Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_SF_HIT",
+ "PerPkg": "1",
+ "UMask": "0x1BD90E",
+ "UMaskExt": "0x1BD9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely requested Read or Snoop Misses that are Remotely HOMed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_OR_SNOOP_REMOTE_MISS_REM_HOM",
+ "PerPkg": "1",
+ "UMask": "0x161901",
+ "UMaskExt": "0x1619",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts : All Ports",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Filters Requests for those that write info into the cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "PerPkg": "1",
+ "UMask": "0x1A42FF",
+ "UMaskExt": "0x1A42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOC_HOM",
+ "PerPkg": "1",
+ "UMask": "0x0BDFFF",
+ "UMaskExt": "0x0BDF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REM_HOM",
+ "PerPkg": "1",
+ "UMask": "0x15DFFF",
+ "UMaskExt": "0x15DF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests that come from a Remote socket",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x1A10FF",
+ "UMaskExt": "0x1A10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x19D0FF",
+ "UMaskExt": "0x19D0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Prefetch requests to the LLC that come from the local socket (usually the core)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x189DFF",
+ "UMaskExt": "0x189D",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Code Reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "PerPkg": "1",
+ "UMask": "0x1BD0FF",
+ "UMaskExt": "0x1BD0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH0_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.XPTUPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH2_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH1_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH0_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPTUPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH2_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH1_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
index 2d1368958762..281f3605881d 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
@@ -6,5 +6,230 @@
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
"Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_DEMOTIONS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 0 Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 1 Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 2 Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 3 Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX256 Frequency Clipping",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_P_FREQ_CLIP_AVX256",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX512 Frequency Clipping",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_P_FREQ_CLIP_AVX512",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x06",
+ "EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0A",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x09",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C0 and C1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C6 and C7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "Unit": "PCU"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
index bc43ea855840..d70864da5c67 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
@@ -266,4 +266,4 @@
"Speculative": "1",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
index 62e9705daa19..8adb2e45e23d 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
@@ -1099,4 +1099,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
index db8b1c4fceb0..4c2ac010cf55 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
@@ -166,4 +166,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
index c956a0a51312..2b1a82dd86ab 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
@@ -312,4 +312,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
index 87670226f52d..3f48e75f8a86 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
@@ -130,18 +130,12 @@
"MetricName": "FLOPc_SMT"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
- },
- {
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"MetricGroup": "SMT",
@@ -197,17 +191,34 @@
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fed;FetchBW",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -216,24 +227,6 @@
"MetricName": "MLP"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
@@ -265,6 +258,48 @@
"MetricName": "Page_Walks_Utilization_SMT"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -280,7 +315,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
index 5f98f7746cf7..30fc0af61eb3 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
@@ -233,4 +233,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/other.json b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
index 83fe8f79adc6..2d62521791d8 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
@@ -41,4 +41,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
index 2de31c56c2a5..d89d3f8db190 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
@@ -676,7 +676,7 @@
"UMask": "0x3"
},
{
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
@@ -1269,4 +1269,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/uncore-other.json b/tools/perf/pmu-events/arch/x86/ivybridge/uncore-other.json
index 6278068908cf..88f1e326205f 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/uncore-other.json
@@ -82,10 +82,10 @@
{
"BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
"Counter": "Fixed",
+ "EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
"PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "UMask": "0x01",
"Unit": "ARB"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
index 8cf1549797b0..a5e387bbb134 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
@@ -177,4 +177,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/cache.json b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
index 9bbf2bc59859..27576d53b347 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
@@ -1257,4 +1257,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json b/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
index db8b1c4fceb0..4c2ac010cf55 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
@@ -166,4 +166,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
index c956a0a51312..2b1a82dd86ab 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
@@ -312,4 +312,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
index 8d0ddcbd6c7c..19c7f3b41102 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
@@ -130,18 +130,12 @@
"MetricName": "FLOPc_SMT"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
- },
- {
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
"MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"MetricGroup": "SMT",
@@ -197,17 +191,34 @@
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fed;FetchBW",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -216,24 +227,6 @@
"MetricName": "MLP"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
@@ -265,6 +258,48 @@
"MetricName": "Page_Walks_Utilization_SMT"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -280,7 +315,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
@@ -319,6 +355,12 @@
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "cbox_0@event\\=0x0@ / #num_dies / duration_time / 1000000000",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/memory.json b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
index f904140203fe..99b71e43acad 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
@@ -500,4 +500,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/other.json b/tools/perf/pmu-events/arch/x86/ivytown/other.json
index 83fe8f79adc6..2d62521791d8 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/other.json
@@ -41,4 +41,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json
index 267410594833..93e07385eeec 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json
@@ -1,321 +1,3390 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
+ "BriefDescription": "Uncore Clocks",
"Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBO"
},
{
- "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
+ "BriefDescription": "Counter 0 Occupancy",
+ "Counter": "1,2,3",
+ "EventCode": "0x1f",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Any Request",
"Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.ANY",
- "Filter": "filter_state=0x1",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x11",
"Unit": "CBO"
},
{
- "BriefDescription": "M line evictions from LLC (writebacks to memory)",
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.; Read transactions",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.; Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "UMask": "0x9",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.; Writeback transactions from L2 to the LLC This includes all write transactions -- both Cachable and UC.",
+ "UMask": "0x5",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in M state",
"Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode.demand",
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
"Counter": "0,1",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_READ",
- "Filter": "filter_opc=0x182",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Number of times that an RFO hit in S state. This is useful for determining if it might be good for a workload to use RspIWB instead of RspSWB.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 0",
+ "Counter": "0,1",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.AGE0",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 0",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 1",
+ "Counter": "0,1",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.AGE1",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 2",
+ "Counter": "0,1",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.AGE2",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 2",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 3",
+ "Counter": "0,1",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.AGE3",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 3",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "Counter": "0,1",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.LRU_DECREMENT",
+ "PerPkg": "1",
+ "PublicDescription": "How often all LRU bits were decremented by 1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "Counter": "0,1",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
+ "PerPkg": "1",
+ "PublicDescription": "How often we picked a victim that had a non-zero age",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Counterclockwise",
+ "Counter": "2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Clockwise",
+ "Counter": "2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "Counter": "2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xCC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "Counter": "2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x33",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Counterclockwise",
+ "Counter": "2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Clockwise",
+ "Counter": "2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads. Derived from unc_c_tor_inserts.miss_opcode.uncacheable",
+ "BriefDescription": "AK Ring In Use; Down",
+ "Counter": "2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xCC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "Counter": "2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x33",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Counterclockwise",
+ "Counter": "2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Clockwise",
+ "Counter": "2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "Counter": "2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xCC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "Counter": "2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x33",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd on Vring 0",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd on VRing 1",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AD_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.: Acknowledgements to core",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.: Data Responses to core",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.: Snoops of processor's cache.",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "IV Ring in Use; Any",
+ "Counter": "2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters any polarity",
+ "UMask": "0xF",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "IV Ring in Use; Down",
+ "Counter": "2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_C_RING_IV_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for Down polarity",
+ "UMask": "0xCC",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "IV Ring in Use; Up",
+ "Counter": "2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_C_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for Up polarity",
+ "UMask": "0x33",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IPQ is externally startved and therefore we are blocking the IRQ.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IRQ is externally starved and therefore we are blocking the IPQ.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; Number of times that the ISMQ Bid.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ is blocking the ingress queue and causing the starvation.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations: IRQ Rejected",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; VFIFO",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.VFIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.; Counts the number of allocations into the IRQ Ordering FIFO. In JKT, it is necessary to keep IO requests in order. Therefore, they are allocated into an ordering FIFO that sits next to the IRQ, and must be satisfied from the FIFO in order (with respect to each other). This event, in conjunction with the Occupancy Accumulator event, can be used to calculate average lifetime in the FIFO. Transactions are allocated into the FIFO as soon as they enter the Cachebo (and the IRQ) and are deallocated from the FIFO as soon as they are deallocated from the IRQ.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IPQ in Internal Starvation.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IRQ in Internal Starvation.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the ISMQ in Internal Starvation.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from an address conflicts. Address conflicts out of the IPQ should be rare. They will generally only occur if two different sockets are sending requests to the same address at the same time. This is a true conflict case, unlike the IPQ Address Conflict which is commonly caused by prefetching characteristics.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject. TOR rejects from the IPQ can be caused by the Egress being full or Address Conflicts.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from the Egress being full. IPQ requests make use of the AD Egress for regular responses, the BL egress to forward data, and the AK egress to return credits.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because of an address match in the TOR. In order to maintain coherency, requests to the same address are not allowed to pass each other up in the Cbo. Therefore, if there is an outstanding request to a given address, one cannot issue another request to that address until it is complete. This comes up most commonly with prefetches. Outstanding prefetches occasionally will not complete their memory fetch and a demand request to the same address will then sit in the IRQ and get retried until the prefetch fills the data into the LLC. Therefore, it will not be uncommon to see this case in high bandwidth streaming workloads when the LLC Prefetcher in the core is enabled.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of IRQ retries that occur. Requests from the IRQ are retried if they are rejected from the TOR pipeline for a variety of reasons. Some of the most common reasons include if the Egress is full, there are no RTIDs, or there is a Physical Address match to another outstanding request.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because it failed to acquire an entry in the Egress. The egress is the buffer that queues up for allocating onto the ring. IRQ requests can make use of all four rings and all four Egresses. If any of the queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of requests rejects because of lack of QPI Ingress credits. These credits are required in order to send transactions to the QPI agent. Please see the QPI_IGR_CREDITS events for more information.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that requests from the IRQ were retried because there were no RTIDs available. RTIDs are required after a request misses the LLC and needs to send snoops and/or requests to memory. If there are no RTIDs available, requests will queue up in the IRQ and retry until one becomes available. Note that there are multiple RTID pools for the different sockets. There may be cases where the local RTIDs are all used, but requests destined for remote memory can still acquire an RTID because there are remote RTIDs available. This event does not provide any filtering for this case.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the total number of times that a request from the ISMQ retried because of a TOR reject. ISMQ requests generally will not need to retry (or at least ISMQ retries are less common than IRQ retries). ISMQ requests will retry if they are not able to acquire a needed Egress credit to get onto the ring, or for cache evictions that need to acquire an RTID. Most ISMQ requests already have an RTID, so eviction retries will be less common here.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by a lack of Egress credits. The egress is the buffer that queues up for allocating onto the ring. If any of the Egress queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by no RTIDs. M-state cache evictions are serviced through the ISMQ, and must acquire an RTID in order to write back to memory. If no RTIDs are available, they will be retried.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No WB Credits",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Retries of writes to local memory due to lack of HT WB credits",
+ "UMask": "0x80",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; VFIFO",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.VFIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.; Accumulates the number of used entries in the IRQ Ordering FIFO in each cycle. In JKT, it is necessary to keep IO requests in order. Therefore, they are allocated into an ordering FIFO that sits next to the IRQ, and must be satisfied from the FIFO in order (with respect to each other). This event, in conjunction with the Allocations event, can be used to calculate average lifetime in the FIFO. This event can be used in conjunction with the Not Empty event to calculate average queue occupancy. Transactions are allocated into the FIFO as soon as they enter the Cachebo (and the IRQ) and are deallocated from the FIFO as soon as they are deallocated from the IRQ.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "filter_opc=0x187",
+ "EventName": "UNC_C_TOR_INSERTS.ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode.rfo_prefetch",
+ "BriefDescription": "TOR Inserts; Evictions",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
- "Filter": "filter_opc=0x190",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Eviction transactions inserted into the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode.code",
+ "BriefDescription": "TOR Inserts; Local Memory",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x191",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x28",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode.data_read",
+ "BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
- "Filter": "filter_opc=0x192",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisifed by an opcode, inserted into the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x21",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe allocating writes that miss LLC - DDIO misses. Derived from unc_c_tor_inserts.miss_opcode.ddio_miss",
+ "BriefDescription": "TOR Inserts; Misses to Local Memory",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "Filter": "filter_opc=0x19c",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x2A",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode.pcie_read",
+ "BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisifed by an opcode, inserted into the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x23",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses for ItoM writes (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.miss_opcode.itom_write",
+ "BriefDescription": "TOR Inserts; Miss Opcode Match",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.ITOM_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses for PCIe non-snoop reads. Derived from unc_c_tor_inserts.miss_opcode.pcie_read",
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_NON_SNOOP_READ",
- "Filter": "filter_opc=0x1e4",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x8A",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses for PCIe non-snoop writes (full line). Derived from unc_c_tor_inserts.miss_opcode.pcie_write",
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
- "Filter": "filter_opc=0x1e6",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisifed by an opcode, inserted into the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x83",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode.streaming_full",
+ "BriefDescription": "TOR Inserts; NID Matched",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "filter_opc=0x18c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched (matches an RTID destination) transactions inserted into the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode.streaming_partial",
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "filter_opc=0x18d",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched eviction transactions inserted into the TOR.",
+ "UMask": "0x44",
"Unit": "CBO"
},
{
- "BriefDescription": "Partial PCIe reads. Derived from unc_c_tor_inserts.opcode.pcie_partial",
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_PARTIAL_READ",
- "Filter": "filter_opc=0x195",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched miss requests that were inserted into the TOR.",
+ "UMask": "0x4A",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe allocating writes that hit in LLC (DDIO hits). Derived from unc_c_tor_inserts.opcode.ddio_hit",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x19c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode.pcie_read_current",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x41",
"Unit": "CBO"
},
{
- "BriefDescription": "ItoM write hits (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.opcode.itom_write_hit",
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.ITOM_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched write transactions inserted into the TOR.",
+ "UMask": "0x50",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe non-snoop reads. Derived from unc_c_tor_inserts.opcode.pcie_read",
+ "BriefDescription": "TOR Inserts; Opcode Match",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_READ",
- "Filter": "filter_opc=0x1e4",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"UMask": "0x1",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe non-snoop writes (partial). Derived from unc_c_tor_inserts.opcode.pcie_partial_write",
+ "BriefDescription": "TOR Inserts; Remote Memory",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
- "Filter": "filter_opc=0x1e5",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x88",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe non-snoop writes (full line). Derived from unc_c_tor_inserts.opcode.pcie_full_write",
+ "BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_WRITE",
- "Filter": "filter_opc=0x1e6",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisifed by an opcode, inserted into the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Writebacks",
+ "Counter": "0,1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Write transactions inserted into the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); All valid TOR entries. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding eviction transactions in the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x28",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisifed by an opcode, in the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss All",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding miss requests in the TOR. 'Miss' means the allocation requires an RTID. This generally means that the request was sent to memory or MMIO.",
+ "UMask": "0xA",
"Unit": "CBO"
},
{
- "BriefDescription": "Occupancy for all LLC misses that are addressed to local memory",
+ "BriefDescription": "TOR Occupancy",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
"UMask": "0x2A",
"Unit": "CBO"
},
{
- "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode.llc_data_read",
+ "BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
"EventCode": "0x36",
- "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
- "Filter": "filter_opc=0x182",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisifed by an opcode, in the TOR that are satisifed by locally HOMed memory.",
+ "UMask": "0x23",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries for miss transactions that match an opcode. This generally means that the request was sent to memory or MMIO.",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "Occupancy for all LLC misses that are addressed to remote memory",
+ "BriefDescription": "TOR Occupancy",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
"UMask": "0x8A",
"Unit": "CBO"
},
{
- "BriefDescription": "Read requests to home agent",
+ "BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisifed by an opcode, in the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x83",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of NID matched outstanding requests in the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid.In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding NID matched eviction transactions in the TOR .",
+ "UMask": "0x44",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID.",
+ "UMask": "0x4A",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match a NID and an opcode.",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); NID matched write transactions int the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc).",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x88",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisifed by an opcode, in the TOR that are satisifed by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Write transactions in the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto AD Ring",
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto AK Ring",
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Onto BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AK ring. This is commonly used for snoop responses coming from the core and destined for a Cachebo.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the BL ring. This is commonly used for transfering writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the core AD egress spent in starvation",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK_BOTH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that both AK egresses spent in starvation",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the cachebo IV egress spent in starvation",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BT Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x52",
+ "EventName": "UNC_H_BT_BYPASS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of transactions that bypass the BT (fifo) to HT",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty: Local",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty: Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Local",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Reads Local",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Reads Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Writes Local",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Writes Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming snoop hazard",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that could not take the bypass.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the bypass.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Acknowledge Conflicts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.ACKCNFLTS",
+ "PerPkg": "1",
+ "PublicDescription": "Count the number of Ackcnflts",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Cmp Fwds",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.CMP_FWDS",
+ "PerPkg": "1",
+ "PublicDescription": "Count the number of Cmp_Fwd. This will give the number of late conflicts.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Conflict Detected",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are handling conflicts.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Last in conflict chain",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.LAST",
+ "PerPkg": "1",
+ "PublicDescription": "Count every last conflictor in conflict chain. Can be used to compute the average conflict chain length as (#Ackcnflts/#LastConflictor)+1. This can be used to give a feel for the conflict chain lenghts while analyzing lock kernels.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Direct2Core messages sent",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles in which Direct2Core was disabled",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Reads where Direct2Core overridden",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lat Opt Return",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_H_DIRECTORY_LAT_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "Directory Latency Optimization Data Return Path Taken. When directory mode is enabled and the directory retuned for a read is Dir=I, then data can be returned using a faster path if certain conditions are met (credits, free pipeline, etc).",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: Any state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that did not have to send any snoops because the directory bit was clear.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: Snoop A",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNOOP_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: Snoop S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNOOP_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that had to send one or more snoops because the directory bit was set.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: I State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: A2I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: A2S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory clears. This occurs when snoops were sent and all returned with RspI.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: I2A",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: I2S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: S2A",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: S2I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory sets. This occurs when a remote read transaction requests memory, bringing it to a remote cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD QPI Link 2 Credit Accumulator",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x59",
+ "EventName": "UNC_H_IGR_AD_QPI2_ACCUMULATOR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of credits available to the QPI Link 2 AD Ingress buffer.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL QPI Link 2 Credit Accumulator",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5a",
+ "EventName": "UNC_H_IGR_BL_QPI2_ACCUMULATOR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of credits available to the QPI Link 2 BL Ingress buffer.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD QPI Link 2 Credit Accumulator",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x59",
+ "EventName": "UNC_H_IGR_CREDITS_AD_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of credits available to the QPI Link 2 AD Ingress buffer.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL QPI Link 2 Credit Accumulator",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_IGR_CREDITS_BL_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of credits available to the QPI Link 2 BL Ingress buffer.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0xF",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IODC Conflicts; Any Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_IODC_CONFLICTS.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IODC Conflicts; Last Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_IODC_CONFLICTS.LAST",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IODC Conflicts: Remote InvItoE - Same RTID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_IODC_CONFLICTS.REMOTE_INVI2E_SAME_RTID",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IODC Conflicts: Remote (Other) - Same Addr",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_IODC_CONFLICTS.REMOTE_OTHER_SAME_ADDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IODC Inserts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_H_IODC_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "IODC Allocations",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Num IODC 0 Length Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_IODC_OLEN_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Num IODC 0 Length Writebacks M to I - All of which are dropped.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Local InvItoEs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from the local socket.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from remote sockets.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming ead requests. This is a good proxy for LLC Read Misses (including RFOs).",
"UMask": "0x3",
"Unit": "HA"
},
{
- "BriefDescription": "Write requests to home agent",
+ "BriefDescription": "Read and Write Requests; Local Reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the local socket. This is a good proxy for LLC Read Misses (including RFOs) from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote Reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the remote socket. This is a good proxy for LLC Read Misses (including RFOs) from the remote socket.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Writes",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming write requests.",
"UMask": "0xC",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
+ "BriefDescription": "Read and Write Requests; Local Writes",
"Counter": "0,1,2,3",
- "EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from the local socket.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from remote sockets.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xCC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xCC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xCC",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_VR1_EVEN",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
"UMask": "0x20",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache with no writeback to memory",
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RspI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RspIFwd",
"Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"UMask": "0x4",
"Unit": "HA"
},
{
- "BriefDescription": "Shared line response from remote cache",
+ "BriefDescription": "Snoop Responses Received; RspS",
"Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
"Unit": "HA"
},
{
- "BriefDescription": "Shared line forwarded from remote cache",
+ "BriefDescription": "Snoop Responses Received; RspSFwd",
"Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its currentl copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; Rsp*Fwd*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; Rsp*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Other",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for all other snoop responses.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its currentl copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 2",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 3",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 4",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 5",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 6",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 7",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 10",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 11",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 8",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 9",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_H_TxR_AD.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.; Filter for outbound NDR transactions sent on the AD ring. NDR stands for non-data response and is generally used for completions that do not include data. AD NDR is used for transactions to remote sockets.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Occupancy; Filter for occupancy from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Occupancy; Filter for occupancy from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK: CRD Transactions to Cbo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe",
+ "EventName": "UNC_H_TxR_AK.CRD_CBO",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Occupancy; Filter for occupancy from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Occupancy; Filter for occupancy from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to the cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent directly to the requesting core.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to a remote socket over QPI.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy: All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy; Filter for occupancy from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy; Filter for occupancy from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
"UMask": "0x8",
"Unit": "HA"
}
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
index b798a860bc81..b3b1a08d4acf 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
@@ -1,48 +1,1767 @@
[
{
- "BriefDescription": "QPI clock ticks. Use to get percentages for QPI cycles events",
+ "BriefDescription": "Number of qfclks",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of clocks in the QPI LL. This clock runs at 1/8th the GT/s speed of the QPI link. For example, a 8GT/s link will have qfclk or 1GHz. JKT does not support dynamic link speeds, so this frequency is fixed.",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Cycles where receiving QPI link is in half-width mode",
+ "BriefDescription": "Count of CTO Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_CTO_COUNT",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because there were not enough Egress credits. Had there been enough credits, the spawn would have worked as the RBT bit was set and the RBT tag matched.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match and there weren't enough Egress credits. The valid bit was set.",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because there were not enough Egress credits AND the RBT bit was not set, but the RBT tag matched.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss, Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match, the valid bit was not set and there weren't enough Egress credits.",
+ "UMask": "0x80",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match although the valid bit was set and there were enough Egress credits.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the route-back table (RBT) specified that the transaction should not trigger a direct2core tranaction. This is common for IO transactions. There were enough Egress credits and the RBT tag matched but the valid bit was not set.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss and Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match and the valid bit was not set although there were enough Egress credits.",
+ "UMask": "0x40",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.SUCCESS_RBT_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn was successful. There were sufficient credits, the RBT valid bit was set and there was an RBT tag match. The message was marked to spawn direct2core.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
"Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
- "MetricExpr": "(UNC_Q_RxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
- "MetricName": "rxl0p_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xf",
+ "EventName": "UNC_Q_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; LinkInit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).; CRC errors detected during link initialization.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; Normal Operations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).; CRC errors detected during normal operation.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the HOM message class.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NCB message class.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NCS message class.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NDR message class.",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the SNP message class.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the HOM message class.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NCB message class.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NCS message class.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NDR message class.",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the SNP message class.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_Q_RxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors DRS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors DRS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors HOM flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors HOM flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCB flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCB flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NDR flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NDR flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors SNP flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors SNP flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flitsreceived over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of flits received over QPI that do not hold protocol payload. When QPI is not in a power saving state, it continuously transmits flits across the link. When there are no protocol flits to send, it will send IDLE and NULL flits across. These flits sometimes do carry a payload, such as credit returns, but are generall not considered part of the QPI bandwidth.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Non-Data protocol Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits received across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data.",
+ "UMask": "0x18",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits received over QPI on the home channel.",
+ "UMask": "0x6",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits received over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request received over QPI on the home channel. This basically counts the number of remote memory requests received over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits received over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are received on the home channel.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass flits. These packets are generally used to transmit non-coherent data across QPI.",
+ "UMask": "0xC",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass data flits. These flits are generally used to transmit non-coherent data across QPI. This does not include a count of the DRS (coherent) data flits. This only counts the data flits, not the NCB headers.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass non-data flits. These packets are generally used to transmit non-coherent data across QPI, and the flits counted here are for headers and other non-data flits. This includes extended headers.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of NCS (non-coherent standard) flits received over QPI. This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets to the local socket which use the AK ring.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets destined for Route-thru to a remote socket.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_Q_RxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "UMask": "0x1",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Cycles where transmitting QPI link is in half-width mode",
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_Q_RxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the HOM message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the DRS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the SNP message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NDR message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NCS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NCB message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.EGRESS_CREDITS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet because there were insufficient BGF credits. For details on a message class granularity, use the Egress Credit Occupancy events.",
+ "UMask": "0x40",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; GV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.GV",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled because a GV transition (frequency transition) was taking place.",
+ "UMask": "0x80",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the HOM message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the DRS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the SNP message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NDR message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NCS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NCB message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
- "MetricExpr": "(UNC_Q_TxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
- "MetricName": "txl0p_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Number of data flits transmitted ",
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_Q_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_Q_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.; When LLR is almost full, we block some but not all packets.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.; When LLR is totally full, we are not allowed to send any packets.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_Q_TxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
"UMask": "0x2",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Number of non data (control) flits transmitted ",
+ "BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency.",
+ "UMask": "0x18",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits transmitted over QPI on the home channel.",
+ "UMask": "0x6",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits transmitted over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request transmitted over QPI on the home channel. This basically counts the number of remote memory requests transmitted over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits transmitted over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are transmitted on the home channel.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass flits. These packets are generally used to transmit non-coherent data across QPI.",
+ "UMask": "0xC",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass data flits. These flits are generally used to transmit non-coherent data across QPI. This does not include a count of the DRS (coherent) data flits. This only counts the data flits, not te NCB headers.",
"UMask": "0x4",
"Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass non-data flits. These packets are generally used to transmit non-coherent data across QPI, and the flits counted here are for headers and other non-data flits. This includes extended headers.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of NCS (non-coherent standard) flits transmitted over QPI. This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets to the local socket which use the AK ring.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets destined for Route-thru to a remote socket.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_Q_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_Q_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO fro Snoop messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for Shared VN",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1f",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1f",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for Shared VN",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1f",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2b",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2b",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2c",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2c",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Returned",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURNS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits returned.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "QPI LL"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
index e8917cb59566..63b49b712c62 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
@@ -1,77 +1,1812 @@
[
{
- "BriefDescription": "Memory page activates for reads and writes",
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.RD",
"PerPkg": "1",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM CAS commands issued on this channel.",
+ "UMask": "0xF",
"Unit": "iMC"
},
{
- "BriefDescription": "Read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_READ",
+ "EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Read CAS commands issued on this channel (including underfills).",
"UMask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "Write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. AutoPre is only used in systems that are using closed page policy. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_WRITE",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the number of underfill reads that are issued by the memory controller. This will generally be about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ. While it is possible for underfills to be issed in both WMM and RMM, this event counts both.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Write CAS commands issued on this channel.",
"UMask": "0xC",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks. Use to generate percentages for memory controller CYCLES events",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of Opportunistic DRAM Write CAS commands issued on this channel while in Read-Major-Mode.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Clockticks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_M_DCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
"Counter": "0,1,2,3",
- "EventName": "UNC_M_CLOCKTICKS",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; We group these two modes together so that we can use four counters to track each of the major modes at one time. These major modes are used whenever there is an ISOCH txn in the memory controller. In these mode, only ISOCH transactions are processed.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This major mode is used to drain starved underfill reads. Regular reads and writes are blocked and only underfill reads will be processed.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; Read Major Mode is the default mode for the iMC, as reads are generally more critical to forward progress than writes.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This mode is triggered when the WPQ hits high occupancy and causes writes to be higher priority than reads. This can cause blips in the available read bandwidth in the system and temporarily increase read latencies in order to achieve better bus utilizations and higher bandwidth.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.; Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory page conflicts",
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts another read.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts a write.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of the page close counter expiring. This does not include implicit precharge commands sent in auto-precharge mode.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of page misses. This does not include explicit precharge commands sent with CAS commands in Auto-Precharge mode. This does not include PRE commands sent as a result of the page close counter expiration.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to read",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to write",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE MXB write buffer occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.RMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.WMM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-other.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-other.json
new file mode 100644
index 000000000000..af289aa6c98e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-other.json
@@ -0,0 +1,2398 @@
+[
+ {
+ "BriefDescription": "Address Match (Conflict) Count; Conflict Merges",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_ADDRESS_MATCH.MERGE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.; When two requests to the same address from the same source are received back to back, it is possible to merge the two of them together.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Address Match (Conflict) Count; Conflict Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_ADDRESS_MATCH.STALL_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.; When it is not possible to merge two conflicting requests, a stall event occurs. This is bad for performance.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ack Pending Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ack Pending Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.; Tracks all requests from any source port.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Ownership Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_CACHE_OWN_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Ownership Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_CACHE_OWN_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Read Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_CACHE_READ_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Read Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_CACHE_READ_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of clocks in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0xb",
+ "EventName": "UNC_I_RxR_AK_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the AK Ingress is full. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xa",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0xc",
+ "EventName": "UNC_I_RxR_AK_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the AK Ingress in each cycles. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Tickle Count; Ownership Lost",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TICKLES.LOST_OWNERSHIP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.; Tracks the number of requests that lost ownership as a result of a tickle. When a tickle comes in, if the request is not at the head of the queue in the switch, then that request as well as any requests behind it in the switch queue will lose ownership and have to re-acquire it later when they get to the head of the queue. This will therefore track the number of requests that lost ownership and not just the number of tickles.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Tickle Count; Data Returned",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TICKLES.TOP_OF_QUEUE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.; Tracks the number of cases when a tickle was received but the requests was at the head of the queue in the switch. In this case, data is returned rather than releasing ownership.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count: Read Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.PD_PREFETCHES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREFETCHES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xe",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xf",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xd",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ordering Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x1a",
+ "EventName": "UNC_I_WRITE_ORDERING_STALL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are pending write ACK's in the switch but the switch->IRP pipeline is not utilized.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xCC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xCC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xCC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd on VRing 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters any polarity",
+ "UMask": "0xFF",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_R2_RING_IV_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters for Counterclockwise polarity",
+ "UMask": "0xCC",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_R2_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters for Clockwise polarity",
+ "UMask": "0x33",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RxR_AK_BOUNCES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Counterclockwise",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RxR_AK_BOUNCES.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Clockwise",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RxR_AK_BOUNCES.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_TxR_NACK_CCW.AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_TxR_NACK_CCW.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK CounterClockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_TxR_NACK_CCW.BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CW NACK; AD CW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CW NACK; AK CW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK Clockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CW NACK; BL CW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Clockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
+ "EventCode": "0x1",
+ "EventName": "UNC_R3_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO10",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 10",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO11",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 11",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO12",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 12",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO13",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 13",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO14",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 14&16",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO8",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 8",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO9",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 9",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO0",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 0",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO1",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO2",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 2",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO3",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 3",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO4",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 4",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO5",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 5",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO6",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 6",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO7",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 7",
+ "UMask": "0x80",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2f",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA0",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; HA0",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2f",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA1",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; HA1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2f",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; R2 NCB Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2f",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; R2 NCS Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "Counter": "0,1",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xCC",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xCC",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xCC",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Even on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Odd on VRing 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2",
+ "EventCode": "0xA",
+ "EventName": "UNC_R3_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters any polarity",
+ "UMask": "0xFF",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0xa",
+ "EventName": "UNC_R3_RING_IV_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters for Counterclockwise polarity",
+ "UMask": "0xCC",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "Counter": "0,1,2",
+ "EventCode": "0xa",
+ "EventName": "UNC_R3_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters for Clockwise polarity",
+ "UMask": "0x33",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "AD Ingress Bypassed",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_R3_RxR_AD_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the AD Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Bypassed",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_R3_RxR_BYPASSED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; HOM",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NCB",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NCS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NDR",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; SNP",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; AK CCW",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_TxR_NACK_CCW.AD",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; BL CW",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_TxR_NACK_CCW.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; BL CCW",
+ "Counter": "0,1",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_TxR_NACK_CCW.BL",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; AD CW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK_CW.AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; AD CCW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK_CW.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; AK CW",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK_CW.BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Clockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Non-Coherent Standard (NCS).",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Cycles with no VNA credits available",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_R3_VNA_CREDIT_CYCLES_OUT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI uclk cycles when the transmitted has no VNA credits available and therefore cannot send any requests on this channel. Note that this does not mean that no flits can be transmitted, as those holding VN0 credits will still (potentially) be able to transmit. Generally it is the goal of the uncore that VNA credits should not run out, as this can substantially throttle back useful QPI bandwidth.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Cycles with 1 or more VNA credits in use",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R3_VNA_CREDIT_CYCLES_USED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI uclk cycles with one or more VNA credits in use. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average number of used VNA credits.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times an IDI Lock/SplitLock sequence was started",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "Counter": "0,1",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; PREQ, PSMI, P2U, Thermal, PCUSMI, PMI",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
index 635c09fda1d9..0ba63a97ddfa 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
@@ -1,176 +1,520 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
+ "BriefDescription": "pclk Cycles",
"Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core 0 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_BAND0_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band0_cycles %",
+ "EventCode": "0x70",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core 10 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_BAND1_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band1_cycles %",
+ "EventCode": "0x7a",
+ "EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core 11 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_BAND2_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band2_cycles %",
+ "EventCode": "0x7b",
+ "EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core 12 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_BAND3_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band3_cycles %",
+ "EventCode": "0x7c",
+ "EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band0_cycles",
+ "BriefDescription": "Core 13 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_BAND0_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band0_cycles %",
+ "EventCode": "0x7d",
+ "EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band1_cycles",
+ "BriefDescription": "Core 14 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_BAND1_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band1_cycles %",
+ "EventCode": "0x7e",
+ "EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band2_cycles",
+ "BriefDescription": "Core 1 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_BAND2_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band2_cycles %",
+ "EventCode": "0x71",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band3_cycles",
+ "BriefDescription": "Core 2 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_BAND3_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band3_cycles %",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core 3 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core 4 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
+ "BriefDescription": "Core 5 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
+ "BriefDescription": "Core 6 C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xa",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 7 C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 8 C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 9 C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE0",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE1",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE10",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE11",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE12",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE13",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE14",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE2",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE3",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE4",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE5",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE6",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE7",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1f",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE8",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE9",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 0 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 1 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1f",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 10 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_DEMOTIONS_CORE10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 11 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_P_DEMOTIONS_CORE11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 12 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x44",
+ "EventName": "UNC_P_DEMOTIONS_CORE12",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 13 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_P_DEMOTIONS_CORE13",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 14 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_P_DEMOTIONS_CORE14",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 2 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 3 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 4 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 5 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 6 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 7 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 8 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_P_DEMOTIONS_CORE8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 9 C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_P_DEMOTIONS_CORE9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_P_FREQ_BAND0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_P_FREQ_BAND1_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_P_FREQ_BAND2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe",
+ "EventName": "UNC_P_FREQ_BAND3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Current Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when current is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the OS is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x7",
- "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_CURRENT_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_current_cycles %",
+ "EventCode": "0x61",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Perf P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x62",
+ "EventName": "UNC_P_FREQ_MIN_PERF_P_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when Perf P Limit is preventing us from dropping the frequency lower. Perf P Limit is an algorithm that takes input from remote sockets when determining if a socket should drop it's frequency down. This is largely to minimize increases in snoop and remote read latencies.",
"Unit": "PCU"
},
{
@@ -178,96 +522,165 @@
"Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
+ "BriefDescription": "Memory Phase Shedding Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
- "Filter": "filter_band0=12",
- "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_1200mhz_cycles %",
+ "EventCode": "0x2f",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
+ "BriefDescription": "Package C State Exit Latency",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
- "Filter": "filter_band1=20",
- "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_2000mhz_cycles %",
+ "EventCode": "0x26",
+ "EventName": "UNC_P_PKG_C_EXIT_LATENCY",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is transitioning from package C2 to C3.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
+ "BriefDescription": "Package C State Exit Latency",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
- "Filter": "filter_band2=30",
- "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_3000mhz_cycles %",
+ "EventCode": "0x26",
+ "EventName": "UNC_P_PKG_C_EXIT_LATENCY_SEL",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is transitioning from package C2 to C3.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
+ "BriefDescription": "Package C State Residency - C0",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
- "Filter": "filter_band3=40",
- "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_4000mhz_cycles %",
+ "EventCode": "0x2a",
+ "EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C0_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is in C0",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
+ "BriefDescription": "Package C State Residency - C2",
"Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band0=12",
- "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_1200mhz_cycles %",
+ "EventCode": "0x2b",
+ "EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C2_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is in C2",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
+ "BriefDescription": "Package C State Residency - C3",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band1=20",
- "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_2000mhz_cycles %",
+ "EventCode": "0x2c",
+ "EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C3_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is in C3",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
+ "BriefDescription": "Package C State Residency - C6",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band2=30",
- "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_3000mhz_cycles %",
+ "EventCode": "0x2d",
+ "EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C6_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is in C6",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band3=40",
- "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_4000mhz_cycles %",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles Changing Voltage",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_CHANGE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition. This event is calculated by or'ing together the increasing and decreasing events.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles Decreasing Voltage",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_DECREASE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is decreasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles Increasing Voltage",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_INCREASE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is increasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
}
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
index da6a3e09a782..6624d02ad715 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
@@ -195,4 +195,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/cache.json b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
index 97c7e0ceed18..f98649fb92b4 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
@@ -1263,4 +1263,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json b/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
index 713878fd062b..eb2ff2cfdf6b 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
@@ -135,4 +135,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
index 4bc0954448d2..0b4dbce2f1c0 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
@@ -311,4 +311,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
index 2800264c12aa..c0fbb4f31241 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -124,7 +124,7 @@
"MetricName": "FLOPc_SMT"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
@@ -142,6 +142,12 @@
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fed;FetchBW",
@@ -163,7 +169,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
@@ -202,6 +209,12 @@
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "cbox_0@event\\=0x0@ / #num_dies / duration_time / 1000000000",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/memory.json b/tools/perf/pmu-events/arch/x86/jaketown/memory.json
index 29b70f21a44b..23756ca9b7da 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/memory.json
@@ -419,4 +419,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/other.json b/tools/perf/pmu-events/arch/x86/jaketown/other.json
index e251f535ec09..2f873ab14156 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/other.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/other.json
@@ -55,4 +55,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
index 87737c92c067..61a3db4d67d5 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
@@ -410,8 +410,8 @@
},
{
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
- "CounterHTOff": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
+ "CounterHTOff": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -439,8 +439,8 @@
},
{
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "Counter": "Fixed counter 2",
- "CounterHTOff": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
+ "CounterHTOff": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
@@ -542,8 +542,8 @@
},
{
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
- "CounterHTOff": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
+ "CounterHTOff": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
"SampleAfterValue": "2000003",
@@ -599,7 +599,7 @@
"UMask": "0x3"
},
{
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
@@ -1199,4 +1199,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
index 3fa61d962607..351f8b040ed1 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
@@ -1,210 +1,1946 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
+ "BriefDescription": "Uncore Clocks",
"Counter": "0,1,2,3",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
"Unit": "CBO"
},
{
- "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
+ "BriefDescription": "Counter 0 Occupancy",
+ "Counter": "1,2,3",
+ "EventCode": "0x1f",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x3",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; RTID",
"Counter": "0,1",
"EventCode": "0x34",
- "EventName": "UNC_C_LLC_LOOKUP.ANY",
- "Filter": "filter_state=0x1",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x11",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x41",
"Unit": "CBO"
},
{
- "BriefDescription": "M line evictions from LLC (writebacks to memory)",
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x9",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Write Requests",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x5",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in M state",
"Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode.demand",
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
"Counter": "0,1",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_READ",
- "Filter": "filter_opc=0x182",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x40",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads. Derived from unc_c_tor_inserts.miss_opcode.uncacheable",
+ "BriefDescription": "Lines Victimized; Lines in S State",
"Counter": "0,1",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "filter_opc=0x187",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x8",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe allocating writes that miss LLC - DDIO misses. Derived from unc_c_tor_inserts.miss_opcode.ddio_miss",
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "Counter": "2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "Counter": "2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in JKT. Therefore, if one wants to monitor the 'Even' ring, they should select both UP_EVEN and DN_EVEN. To monitor the 'Odd' ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0xf",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; VFIFO",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.VFIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; VFIFO",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.VFIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Evictions",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "Filter": "filter_opc=0x19c",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x4",
"Unit": "CBO"
},
{
- "BriefDescription": "LLC misses for ItoM writes (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.miss_opcode.itom_write",
+ "BriefDescription": "TOR Inserts; Miss All",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.ITOM_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0xa",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode.streaming_full",
+ "BriefDescription": "TOR Inserts; Miss Opcode Match",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "filter_opc=0x18c",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode.streaming_partial",
+ "BriefDescription": "TOR Inserts; NID Matched",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "filter_opc=0x18d",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x48",
"Unit": "CBO"
},
{
- "BriefDescription": "Partial PCIe reads. Derived from unc_c_tor_inserts.opcode.pcie_partial",
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_PARTIAL_READ",
- "Filter": "filter_opc=0x195",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x44",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe allocating writes that hit in LLC (DDIO hits). Derived from unc_c_tor_inserts.opcode.ddio_hit",
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x19c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x4a",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode.pcie_read_current",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x43",
"Unit": "CBO"
},
{
- "BriefDescription": "ItoM write hits (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.opcode.itom_write_hit",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.ITOM_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x41",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe non-snoop reads. Derived from unc_c_tor_inserts.opcode.pcie_read",
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_READ",
- "Filter": "filter_opc=0x1e4",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x50",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe non-snoop writes (partial). Derived from unc_c_tor_inserts.opcode.pcie_partial_write",
+ "BriefDescription": "TOR Inserts; Opcode Match",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
- "Filter": "filter_opc=0x1e5",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
"UMask": "0x1",
"Unit": "CBO"
},
{
- "BriefDescription": "PCIe non-snoop writes (full line). Derived from unc_c_tor_inserts.opcode.pcie_full_write",
+ "BriefDescription": "TOR Inserts; Writebacks",
"Counter": "0,1",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_WRITE",
- "Filter": "filter_opc=0x1e6",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
+ "PublicDescription": "Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x4",
"Unit": "CBO"
},
{
- "BriefDescription": "Occupancy counter for all LLC misses; we divide this by UNC_C_CLOCKTICKS to get average Q depth",
+ "BriefDescription": "TOR Occupancy; Miss All",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
- "Filter": "filter_opc=0x182",
- "MetricExpr": "(UNC_C_TOR_OCCUPANCY.MISS_ALL / UNC_C_CLOCKTICKS) * 100.",
- "MetricName": "tor_occupancy.miss_all %",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
"UMask": "0xa",
"Unit": "CBO"
},
{
- "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode.llc_data_read",
+ "BriefDescription": "TOR Occupancy; Miss Opcode Match",
"EventCode": "0x36",
- "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
"UMask": "0x3",
"Unit": "CBO"
},
{
- "BriefDescription": "read requests to home agent",
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x48",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x44",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x4a",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x43",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x1",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x10",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x20",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x40",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x8",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Conflict Detected",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; No Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.NO_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Direct2Core messages sent",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles in which Direct2Core was disabled",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Reads where Direct2Core overridden",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
"UMask": "0x3",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to home agent",
+ "BriefDescription": "Read and Write Requests; Writes",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
"UMask": "0xc",
"Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Allocations; All Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_H_TRACKER_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the local HA tracker pool. This can be used in conjunction with the occupancy accumulation event in order to calculate average latency. One cannot filter between reads and writes. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xf",
+ "EventName": "UNC_H_TxR_AD.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Snoops",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xf",
+ "EventName": "UNC_H_TxR_AD.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Occupancy",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Occupancy",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Occupancy",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe",
+ "EventName": "UNC_H_TxR_AK_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound NDR transactions sent on the AK ring. NDR stands for 'non-data response' and is generally used for completions that do not include data. AK NDR is used for messages to the local socket.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Occupancy",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Occupancy",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Occupancy",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; Scheduler 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; Scheduler 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
index 1b53c0e609e3..750870fd1cb1 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
@@ -1,48 +1,850 @@
[
{
- "BriefDescription": "QPI clock ticks. Used to get percentages of QPI cycles events",
+ "BriefDescription": "Number of qfclks",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of clocks in the QPI LL. This clock runs at 1/8th the 'GT/s' speed of the QPI link. For example, a 8GT/s link will have qfclk or 1GHz. JKT does not support dynamic link speeds, so this frequency is fixed.",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Cycles where receiving QPI link is in half-width mode",
+ "BriefDescription": "Count of CTO Events",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_CTO_COUNT",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Not Set",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.SUCCESS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exlusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
"Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
- "MetricExpr": "(UNC_Q_RxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
- "MetricName": "rxl0p_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xf",
+ "EventName": "UNC_Q_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; LinkInit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; Normal Operations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1d",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_Q_RxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Non-Data protocol Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x18",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x6",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0xc",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_Q_RxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_Q_RxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - HOM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - DRS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - SNP",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NDR",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x20",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; Egress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.EGRESS_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x40",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Cycles where transmitting QPI link is in half-width mode",
+ "BriefDescription": "Stalls Sending to R3QPI; GV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.GV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x80",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
- "MetricExpr": "(UNC_Q_TxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
- "MetricName": "txl0p_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_Q_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Number of data flits transmitted ",
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_Q_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_Q_TxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
"UMask": "0x2",
"Unit": "QPI LL"
},
{
- "BriefDescription": "Number of non data (control) flits transmitted ",
+ "BriefDescription": "Flits Transferred - Group 0; Idle and Null Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x18",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x6",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
"UMask": "0x4",
"Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_Q_TxL_FLITS_G1.SNP",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0xc",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x4",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_Q_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_Q_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Returned",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1c",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURNS",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits returned.",
+ "Unit": "QPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "QPI LL"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
index 8551cebeba23..a165a77947a0 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
@@ -1,82 +1,493 @@
[
{
- "BriefDescription": "Memory page activates",
+ "BriefDescription": "DRAM Activate Count",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"Unit": "iMC"
},
{
- "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_READ",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
"UMask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_WRITE",
+ "EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
"UMask": "0xc",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks. Used to get percentages of memory controller cycles events",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "uclks",
"Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "Uncore Fixed Counter - uclks",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory page conflicts",
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Occupancy counter for memory read queue",
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
"Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY",
"PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_WPQ_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-other.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-other.json
new file mode 100644
index 000000000000..588549a668bd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-other.json
@@ -0,0 +1,1538 @@
+[
+ {
+ "BriefDescription": "Address Match (Conflict) Count; Conflict Merges",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_ADDRESS_MATCH.MERGE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Address Match (Conflict) Count; Conflict Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_ADDRESS_MATCH.STALL_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ack Pending Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ack Pending Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Ownership Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_CACHE_OWN_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Ownership Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_CACHE_OWN_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Read Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_CACHE_READ_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Read Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_CACHE_READ_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Occupancy; Any Source",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Occupancy; Select Source",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "Counter": "0,1",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of clocks in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0xB",
+ "EventName": "UNC_I_RxR_AK_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the AK Ingress is full. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0xC",
+ "EventName": "UNC_I_RxR_AK_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the AK Ingress in each cycles. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "Counter": "0,1",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Tickle Count; Ownership Lost",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TICKLES.LOST_OWNERSHIP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Tickle Count; Data Returned",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TICKLES.TOP_OF_QUEUE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.PD_PREFETCHES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of 'Inbound' transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of 'Inbound' transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of 'Inbound' transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ordering Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_WRITE_ORDERING_STALL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are pending write ACK's in the switch but the switch->IRP pipeline is not utilized.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RxR_AK_BOUNCES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress NACK; AD",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACKS.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the Egress received a NACK from the ring and could not issue a transaction.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress NACK; AK",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACKS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the Egress received a NACK from the ring and could not issue a transaction.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress NACK; BL",
+ "Counter": "0,1",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACKS.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the Egress received a NACK from the ring and could not issue a transaction.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
+ "EventCode": "0x1",
+ "EventName": "UNC_R3_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Acquired",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the NCS/NCB/DRS credit is acquried in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Acquired",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the NCS/NCB/DRS credit is acquried in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Acquired",
+ "Counter": "0,1",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the NCS/NCB/DRS credit is acquried in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Rejected",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Rejected",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Rejected",
+ "Counter": "0,1",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit In Use",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit In Use",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit In Use",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Any",
+ "Counter": "0,1,2",
+ "EventCode": "0xa",
+ "EventName": "UNC_R3_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.",
+ "UMask": "0xf",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Bypassed",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_R3_RxR_BYPASSED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; DRS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; HOM",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NDR",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; SNP",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; HOM",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NCB",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NCS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NDR",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; SNP",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions",
+ "Counter": "0,1",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transfered). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transfered in a given message class using an qfclk event.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Cycles with no VNA credits available",
+ "Counter": "0,1",
+ "EventCode": "0x31",
+ "EventName": "UNC_R3_VNA_CREDIT_CYCLES_OUT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI uclk cycles when the transmitted has no VNA credits available and therefore cannot send any requests on this channel. Note that this does not mean that no flits can be transmitted, as those holding VN0 credits will still (potentially) be able to transmit. Generally it is the goal of the uncore that VNA credits should not run out, as this can substantially throttle back useful QPI bandwidth.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Cycles with 1 or more VNA credits in use",
+ "Counter": "0,1",
+ "EventCode": "0x32",
+ "EventName": "UNC_R3_VNA_CREDIT_CYCLES_USED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI uclk cycles with one or more VNA credits in use. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average number of used VNA credits.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "Counter": "0,1",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times an IDI Lock/SplitLock sequence was started",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "MsgCh Requests by Size; 4B Requests",
+ "Counter": "0,1",
+ "EventCode": "0x47",
+ "EventName": "UNC_U_MSG_CHNL_SIZE_COUNT.4B",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of transactions on the message channel filtered by request size. This includes both reads and writes.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "MsgCh Requests by Size; 8B Requests",
+ "Counter": "0,1",
+ "EventCode": "0x47",
+ "EventName": "UNC_U_MSG_CHNL_SIZE_COUNT.8B",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of transactions on the message channel filtered by request size. This includes both reads and writes.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; ACK to Deassert",
+ "Counter": "0,1",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ACK_TO_DEASSERT",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "Counter": "0,1",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS.COUNT",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "Counter": "0,1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
index 8755693d86c6..817ea6d7f785 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
@@ -1,272 +1,361 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
+ "BriefDescription": "pclk Cycles",
"Counter": "0,1,2,3",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_BAND0_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band0_cycles %",
+ "EventCode": "0x3",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_BAND1_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band1_cycles %",
+ "EventCode": "0x4",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_BAND2_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band2_cycles %",
+ "EventCode": "0x5",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_BAND3_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band3_cycles %",
+ "EventCode": "0x6",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band0_cycles",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_BAND0_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band0_cycles %",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transistioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band1_cycles",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_BAND1_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band1_cycles %",
+ "EventCode": "0x8",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band2_cycles",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_BAND2_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band2_cycles %",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band3_cycles",
+ "BriefDescription": "Core C State Transition Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_BAND3_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band3_cycles %",
+ "EventCode": "0xa",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core C State Demotions",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "EventCode": "0x1e",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
+ "BriefDescription": "Core C State Demotions",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "EventCode": "0x1f",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
+ "BriefDescription": "Core C State Demotions",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "EventCode": "0x20",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
+ "BriefDescription": "Core C State Demotions",
"Counter": "0,1,2,3",
- "EventCode": "0xa",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "EventCode": "0x21",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb",
+ "EventName": "UNC_P_FREQ_BAND0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xc",
+ "EventName": "UNC_P_FREQ_BAND1_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd",
+ "EventName": "UNC_P_FREQ_BAND2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe",
+ "EventName": "UNC_P_FREQ_BAND3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Current Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when current is the upper limit on frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the OS is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0x7",
- "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_CURRENT_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_current_cycles %",
+ "EventCode": "0x1",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Perf P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_P_FREQ_MIN_PERF_P_CYCLES",
+ "ExtSel": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when Perf P Limit is preventing us from dropping the frequency lower. Perf P Limit is an algorithm that takes input from remote sockets when determining if a socket should drop it's frequency down. This is largely to minimize increases in snoop and remote read latencies.",
"Unit": "PCU"
},
{
"BriefDescription": "Cycles spent changing Frequency",
"Counter": "0,1,2,3",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
+ "BriefDescription": "Memory Phase Shedding Cycles",
"Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
- "Filter": "filter_band0=12",
- "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_1200mhz_cycles %",
+ "EventCode": "0x2f",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
+ "BriefDescription": "Number of cores in C0",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
- "Filter": "filter_band1=20",
- "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_2000mhz_cycles %",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
"PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
+ "BriefDescription": "Number of cores in C0",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
- "Filter": "filter_band2=30",
- "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_3000mhz_cycles %",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
"PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
+ "BriefDescription": "Number of cores in C0",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
- "Filter": "filter_band3=40",
- "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_4000mhz_cycles %",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
"PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
+ "BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Interal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
"Counter": "0,1,2,3",
"EventCode": "0xb",
- "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band0=12",
- "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_1200mhz_cycles %",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "ExtSel": "1",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
+ "BriefDescription": "Cycles Changing Voltage",
"Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band1=20",
- "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_2000mhz_cycles %",
+ "EventCode": "0x3",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_CHANGE",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition. This event is calculated by or'ing together the increasing and decreasing events.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
+ "BriefDescription": "Cycles Decreasing Voltage",
"Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band2=30",
- "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_3000mhz_cycles %",
+ "EventCode": "0x2",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_DECREASE",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is decreasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
+ "BriefDescription": "Cycles Increasing Voltage",
"Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band3=40",
- "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_4000mhz_cycles %",
+ "EventCode": "0x1",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_INCREASE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is increasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
}
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
index 4dd136d00a10..98362abba1a7 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
@@ -146,4 +146,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
index 1bd50b186e93..5e10eabda300 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
@@ -2300,4 +2300,4 @@
"SampleAfterValue": "100007",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json b/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
index 5fce5020efa1..ff5db600e420 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
@@ -26,4 +26,4 @@
"SampleAfterValue": "200003",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json b/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
index d075ab594d75..63343a0d1e86 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
@@ -55,4 +55,4 @@
"SampleAfterValue": "200003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
index 5e6ca6896af1..2611defaeaa2 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
@@ -1107,4 +1107,4 @@
"SampleAfterValue": "100007",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
index 8f4213e5fbfd..1f13bc2686cb 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
@@ -374,4 +374,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json
deleted file mode 100644
index e3bcd86c4f56..000000000000
--- a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json
+++ /dev/null
@@ -1,42 +0,0 @@
-[
- {
- "BriefDescription": "ddr bandwidth read (CPU traffic only) (MB/sec). ",
- "Counter": "0,1,2,3",
- "EventCode": "0x03",
- "EventName": "UNC_M_CAS_COUNT.RD",
- "PerPkg": "1",
- "ScaleUnit": "6.4e-05MiB",
- "UMask": "0x01",
- "Unit": "imc"
- },
- {
- "BriefDescription": "ddr bandwidth write (CPU traffic only) (MB/sec). ",
- "Counter": "0,1,2,3",
- "EventCode": "0x03",
- "EventName": "UNC_M_CAS_COUNT.WR",
- "PerPkg": "1",
- "ScaleUnit": "6.4e-05MiB",
- "UMask": "0x02",
- "Unit": "imc"
- },
- {
- "BriefDescription": "mcdram bandwidth read (CPU traffic only) (MB/sec). ",
- "Counter": "0,1,2,3",
- "EventCode": "0x01",
- "EventName": "UNC_E_RPQ_INSERTS",
- "PerPkg": "1",
- "ScaleUnit": "6.4e-05MiB",
- "UMask": "0x01",
- "Unit": "edc_eclk"
- },
- {
- "BriefDescription": "mcdram bandwidth write (CPU traffic only) (MB/sec). ",
- "Counter": "0,1,2,3",
- "EventCode": "0x02",
- "EventName": "UNC_E_WPQ_INSERTS",
- "PerPkg": "1",
- "ScaleUnit": "6.4e-05MiB",
- "UMask": "0x01",
- "Unit": "edc_eclk"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-other.json b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-other.json
new file mode 100644
index 000000000000..a5e1a9a47e73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-other.json
@@ -0,0 +1,4103 @@
+[
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ or PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of read requests and streaming stores that hit in MCDRAM cache and the data in MCDRAM is clean with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_EDC_ACCESS.HIT_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "EDC_UCLK"
+ },
+ {
+ "BriefDescription": "Counts the number of read requests and streaming stores that hit in MCDRAM cache and the data in MCDRAM is dirty with respect to DDR. This event is only valid in cache and hybrid memory mode. ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_EDC_ACCESS.HIT_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "EDC_UCLK"
+ },
+ {
+ "BriefDescription": "Counts the number of read requests and streaming stores that miss in MCDRAM cache and the data evicted from the MCDRAM is clean with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_EDC_ACCESS.MISS_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "EDC_UCLK"
+ },
+ {
+ "BriefDescription": "Counts the number of read requests and streaming stores that miss in MCDRAM cache and the data evicted from the MCDRAM is dirty with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_EDC_ACCESS.MISS_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "EDC_UCLK"
+ },
+ {
+ "BriefDescription": "Number of EDC Hits or Misses. Miss I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_EDC_ACCESS.MISS_INVALID",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "EDC_UCLK"
+ },
+ {
+ "BriefDescription": "ECLK count",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_E_E_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "EDC_ECLK"
+ },
+ {
+ "BriefDescription": "Counts the number of read requests received by the MCDRAM controller. This event is valid in all three memory modes: flat, cache and hybrid. In cache and hybrid memory mode, this event counts all read requests as well as streaming stores that hit or miss in the MCDRAM cache. ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x01",
+ "EventName": "UNC_E_RPQ_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "EDC_ECLK"
+ },
+ {
+ "BriefDescription": "UCLK count",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_E_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "EDC_UCLK"
+ },
+ {
+ "BriefDescription": "Counts the number of write requests received by the MCDRAM controller. This event is valid in all three memory modes: flat, cache and hybrid. In cache and hybrid memory mode, this event counts all streaming stores, writebacks and, read requests that miss in MCDRAM cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_WPQ_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "EDC_ECLK"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x81",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x81",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x89",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x89",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8B",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8B",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD1",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD1",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x87",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x87",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8D",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8D",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8F",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8F",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD3",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD3",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD7",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD7",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Writeback transactions from L2 to the LLC This includes all write transactions -- both Cachable and UC.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized that Match NID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Read transactions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized that Does Not Match NID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_H_CLOCK",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_EGRESS_HORZ_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_EGRESS_HORZ_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_EGRESS_HORZ_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Bypass. AD ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_EGRESS_HORZ_BYPASS.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Bypass. AK ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_EGRESS_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Bypass. BL ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_EGRESS_HORZ_BYPASS.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Bypass. IV ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_EGRESS_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_EGRESS_HORZ_INSERTS.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_EGRESS_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_EGRESS_HORZ_INSERTS.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_EGRESS_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_EGRESS_HORZ_NACK.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_EGRESS_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_EGRESS_HORZ_NACK.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_EGRESS_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_EGRESS_HORZ_STARVED.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_EGRESS_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_EGRESS_HORZ_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_EGRESS_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAE",
+ "EventName": "UNC_H_EGRESS_ORDERING.IV_SNP_GO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAE",
+ "EventName": "UNC_H_EGRESS_ORDERING.IV_SNP_GO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. AD ring agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. AD ring agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. AK ring agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. AK ring agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. BL ring agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. BL ring agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. IV ring agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs Onto AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs Onto BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation Onto AK Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation Onto BL Ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts cycles source throttling is adderted - horizontal",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA5",
+ "EventName": "UNC_H_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts cycles source throttling is adderted - vertical",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA5",
+ "EventName": "UNC_H_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop - Left",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAD",
+ "EventName": "UNC_H_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop - Right",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAD",
+ "EventName": "UNC_H_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_INGRESS_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_INGRESS_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - IRQ Rejected",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_INGRESS_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_INGRESS_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - PRQ Rejected",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_INGRESS_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles with the IPQ in Internal Starvation.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_INGRESS_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles with the IRQ in Internal Starvation.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_INGRESS_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles with the ISMQ in Internal Starvation.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_INGRESS_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress internal starvation cycles. Counts cycles in internal starvation. This occurs when one or more of the entries in the ingress queue are being starved out by other entries in the queue.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_INGRESS_INT_STARVED.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_INGRESS_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_INGRESS_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_INGRESS_OCCUPANCY.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - PRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_INGRESS_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - PRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_INGRESS_OCCUPANCY.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.ANY_REJECT_IPQ0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous events in the Cbo. CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous events in the Cbo. CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous events in the Cbo. RFO HitS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous events in the Cbo. Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous events in the Cbo. Write Combining Aliasing",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type - Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type - Data Responses to core.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type - Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type - Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type - Data Responses to core.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type - Snoops of processor's cache.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal ring sink starvation count - AD ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal ring sink starvation count - AK ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal ring sink starvation count - BL ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal ring sink starvation count - IV ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical ring sink starvation count - AD ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical ring sink starvation count - AK ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical ring sink starvation count - BL ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical ring sink starvation count - IV ring",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts cycles in throttle mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA4",
+ "EventName": "UNC_H_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_SF_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Read transactions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_SF_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_SF_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "UMask": "0x09",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Writeback transactions from L2 to the LLC This includes all write transactions -- both Cachable and UC.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_SF_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -SF/LLC Evictions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x32",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -Hit (Not a Miss)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMask": "0x1F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x38",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMask": "0x2F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -SF/LLC Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x32",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -Hit (Not a Miss)",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMask": "0x1F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IPQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x38",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IPQ hit",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IPQ miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IRQ or PRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IRQ or PRQ hit",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IRQ or PRQ miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMask": "0x2F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -PRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -PRQ hit",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.PRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -PRQ miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_H_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop - Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAC",
+ "EventName": "UNC_H_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop - Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAC",
+ "EventName": "UNC_H_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AD_0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AD_1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AK_0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AK_0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AK_1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AK_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. BL_0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. BL_1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AD_0",
+ "Counter": "0,1",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AD_1",
+ "Counter": "0,1",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AK_0",
+ "Counter": "0,1",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.AK_0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AK_1",
+ "Counter": "0,1",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.AK_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. BL_0",
+ "Counter": "0,1",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. BL_1",
+ "Counter": "0,1",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AD_0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AD_1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AK_0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AK_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_CRD_0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AK_CRD_0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_CRD_1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AK_CRD_1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. BL_0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. BL_1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.ALL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_INGRESS_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.CBO_IDI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_INGRESS_CYCLES_NE.CBO_IDI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.CBO_NCB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_INGRESS_CYCLES_NE.CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.CBO_NCS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_INGRESS_CYCLES_NE.CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CAS All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "iMC_DCLK"
+ },
+ {
+ "BriefDescription": "CAS Reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC_DCLK"
+ },
+ {
+ "BriefDescription": "CAS Writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC_DCLK"
+ },
+ {
+ "BriefDescription": "DCLK count",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_M_D_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC_DCLK"
+ },
+ {
+ "BriefDescription": "UCLK count",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_M_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC_UCLK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
index eda299ef5ff8..821cdd44a12f 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
@@ -62,4 +62,4 @@
"SampleAfterValue": "100003",
"UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index f5a382421a60..7f2d777fd97f 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -1,50 +1,34 @@
Family-model,Version,Filename,EventType
-GenuineIntel-6-56,v5,broadwellde,core
-GenuineIntel-6-3D,v17,broadwell,core
-GenuineIntel-6-47,v17,broadwell,core
-GenuineIntel-6-4F,v10,broadwellx,core
-GenuineIntel-6-1C,v4,bonnell,core
-GenuineIntel-6-26,v4,bonnell,core
-GenuineIntel-6-27,v4,bonnell,core
-GenuineIntel-6-36,v4,bonnell,core
-GenuineIntel-6-35,v4,bonnell,core
-GenuineIntel-6-5C,v8,goldmont,core
-GenuineIntel-6-7A,v1,goldmontplus,core
-GenuineIntel-6-3C,v24,haswell,core
-GenuineIntel-6-45,v24,haswell,core
-GenuineIntel-6-46,v24,haswell,core
-GenuineIntel-6-3F,v17,haswellx,core
-GenuineIntel-6-3A,v18,ivybridge,core
-GenuineIntel-6-3E,v19,ivytown,core
-GenuineIntel-6-2D,v20,jaketown,core
-GenuineIntel-6-57,v9,knightslanding,core
-GenuineIntel-6-85,v9,knightslanding,core
-GenuineIntel-6-1E,v2,nehalemep,core
-GenuineIntel-6-1F,v2,nehalemep,core
-GenuineIntel-6-1A,v2,nehalemep,core
-GenuineIntel-6-2E,v2,nehalemex,core
-GenuineIntel-6-[4589]E,v24,skylake,core
-GenuineIntel-6-A[56],v24,skylake,core
-GenuineIntel-6-37,v13,silvermont,core
-GenuineIntel-6-4D,v13,silvermont,core
-GenuineIntel-6-4C,v13,silvermont,core
-GenuineIntel-6-2A,v15,sandybridge,core
+GenuineIntel-6-9[7A],v1.13,alderlake,core
+GenuineIntel-6-(1C|26|27|35|36),v4,bonnell,core
+GenuineIntel-6-(3D|47),v26,broadwell,core
+GenuineIntel-6-56,v23,broadwellde,core
+GenuineIntel-6-4F,v19,broadwellx,core
+GenuineIntel-6-55-[56789ABCDEF],v1.16,cascadelakex,core
+GenuineIntel-6-96,v1.03,elkhartlake,core
+GenuineIntel-6-5[CF],v13,goldmont,core
+GenuineIntel-6-7A,v1.01,goldmontplus,core
+GenuineIntel-6-(3C|45|46),v31,haswell,core
+GenuineIntel-6-3F,v25,haswellx,core
+GenuineIntel-6-(7D|7E|A7),v1.14,icelake,core
+GenuineIntel-6-6[AC],v1.15,icelakex,core
+GenuineIntel-6-3A,v22,ivybridge,core
+GenuineIntel-6-3E,v21,ivytown,core
+GenuineIntel-6-2D,v21,jaketown,core
+GenuineIntel-6-(57|85),v9,knightslanding,core
+GenuineIntel-6-AA,v1.00,meteorlake,core
+GenuineIntel-6-1[AEF],v3,nehalemep,core
+GenuineIntel-6-2E,v3,nehalemex,core
+GenuineIntel-6-2A,v17,sandybridge,core
+GenuineIntel-6-8F,v1.04,sapphirerapids,core
+GenuineIntel-6-(37|4C|4D),v14,silvermont,core
+GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v53,skylake,core
+GenuineIntel-6-55-[01234],v1.28,skylakex,core
+GenuineIntel-6-86,v1.20,snowridgex,core
+GenuineIntel-6-8[CD],v1.07,tigerlake,core
GenuineIntel-6-2C,v2,westmereep-dp,core
-GenuineIntel-6-25,v2,westmereep-sp,core
-GenuineIntel-6-2F,v2,westmereex,core
-GenuineIntel-6-55-[01234],v1,skylakex,core
-GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
-GenuineIntel-6-7D,v1,icelake,core
-GenuineIntel-6-7E,v1,icelake,core
-GenuineIntel-6-8[CD],v1,tigerlake,core
-GenuineIntel-6-A7,v1,icelake,core
-GenuineIntel-6-6A,v1,icelakex,core
-GenuineIntel-6-6C,v1,icelakex,core
-GenuineIntel-6-86,v1,tremontx,core
-GenuineIntel-6-96,v1,elkhartlake,core
-GenuineIntel-6-97,v1,alderlake,core
-GenuineIntel-6-9A,v1,alderlake,core
-GenuineIntel-6-8F,v1,sapphirerapids,core
+GenuineIntel-6-25,v3,westmereep-sp,core
+GenuineIntel-6-2F,v3,westmereex,core
AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core
AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core
AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen3,core
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
new file mode 100644
index 000000000000..32b2aa9b1475
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
@@ -0,0 +1,262 @@
+[
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of store ops retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "CollectPEBSRecord": "3",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "TakenAlone": "1",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "CollectPEBSRecord": "3",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "TakenAlone": "1",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "CollectPEBSRecord": "3",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "TakenAlone": "1",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "CollectPEBSRecord": "3",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "TakenAlone": "1",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "CollectPEBSRecord": "3",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "TakenAlone": "1",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "CollectPEBSRecord": "3",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "TakenAlone": "1",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "CollectPEBSRecord": "3",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "TakenAlone": "1",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "CollectPEBSRecord": "3",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "TakenAlone": "1",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "CollectPEBSRecord": "3",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read access L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
new file mode 100644
index 000000000000..9657768fc95a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
@@ -0,0 +1,24 @@
+[
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
new file mode 100644
index 000000000000..15b2294a8ae7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
@@ -0,0 +1,185 @@
+[
+ {
+ "BriefDescription": "Counts cacheable demand data reads were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership, including SWPREFETCHW which is an RFO were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "1009",
+ "TakenAlone": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "20011",
+ "TakenAlone": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "503",
+ "TakenAlone": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "TakenAlone": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "101",
+ "TakenAlone": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "2003",
+ "TakenAlone": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "50021",
+ "TakenAlone": "1",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "CollectPEBSRecord": "2",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/other.json b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
new file mode 100644
index 000000000000..14273ac54d2c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
@@ -0,0 +1,46 @@
+[
+ {
+ "BriefDescription": "Counts cacheable demand data reads Catch all value for any response types - this includes response types not define in the OCR. If this is set all other response types will be ignored",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership, including SWPREFETCHW which is an RFO Catch all value for any response types - this includes response types not define in the OCR. If this is set all other response types will be ignored",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
new file mode 100644
index 000000000000..0a7981675b6c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
@@ -0,0 +1,254 @@
+[
+ {
+ "BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "200003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "200003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "CollectPEBSRecord": "2",
+ "Counter": "33",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "PEBScounters": "33",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles[This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "CollectPEBSRecord": "2",
+ "Counter": "34",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PEBScounters": "34",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "CollectPEBSRecord": "2",
+ "Counter": "33",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PEBScounters": "33",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles[This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "CollectPEBSRecord": "2",
+ "Counter": "32",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ALL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x72",
+ "EventName": "TOPDOWN_RETIRING.ALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "34",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PEBScounters": "34",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "CollectPEBSRecord": "2",
+ "Counter": "33",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PEBScounters": "33",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "32",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "35",
+ "EventName": "TOPDOWN.SLOTS",
+ "PEBScounters": "35",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json
new file mode 100644
index 000000000000..3087730cca7b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json
@@ -0,0 +1,46 @@
+[
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
index bcf74d793ae2..1ee91300baf9 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
@@ -1773,7 +1773,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the IO, CSR, MMIO unit",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
@@ -1784,7 +1784,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the LLC and not found in a sibling core",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and not found in a sibling core",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
@@ -1795,7 +1795,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HIT in a sibling core",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
@@ -1806,7 +1806,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HITM in a sibling core",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
@@ -1861,7 +1861,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HIT in a remote cache",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HIT in a remote cache",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
@@ -1872,7 +1872,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HITM in a remote cache",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HITM in a remote cache",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
@@ -3226,4 +3226,4 @@
"SampleAfterValue": "200000",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json b/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
index 39af1329224a..666e466d351c 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
@@ -226,4 +226,4 @@
"SampleAfterValue": "200000",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json b/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
index 8ac5c24888c5..c561ac24d91d 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
@@ -23,4 +23,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/memory.json b/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
index 26138ae639f4..6e95de3f3409 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
@@ -286,7 +286,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the local DRAM.",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the local DRAM.",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
@@ -297,7 +297,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the remote DRAM",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the remote DRAM",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
@@ -736,4 +736,4 @@
"SampleAfterValue": "100000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json b/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
index 6d3247c55bcd..e88c0802e679 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
@@ -106,4 +106,4 @@
"SampleAfterValue": "200000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
index 21a0f8fd057e..01542c4ea678 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
@@ -1,3184 +1,3184 @@
[
{
- "EventCode": "0x63",
+ "BriefDescription": "Cycles L1D locked",
"Counter": "0,1",
- "UMask": "0x2",
+ "EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles L1D locked"
+ "UMask": "0x2"
},
{
- "EventCode": "0x63",
+ "BriefDescription": "Cycles L1D and L2 locked",
"Counter": "0,1",
- "UMask": "0x1",
+ "EventCode": "0x63",
"EventName": "CACHE_LOCK_CYCLES.L1D_L2",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles L1D and L2 locked"
+ "UMask": "0x1"
},
{
- "EventCode": "0x51",
+ "BriefDescription": "L1D cache lines replaced in M state",
"Counter": "0,1",
- "UMask": "0x4",
+ "EventCode": "0x51",
"EventName": "L1D.M_EVICT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D cache lines replaced in M state"
+ "UMask": "0x4"
},
{
- "EventCode": "0x51",
+ "BriefDescription": "L1D cache lines allocated in the M state",
"Counter": "0,1",
- "UMask": "0x2",
+ "EventCode": "0x51",
"EventName": "L1D.M_REPL",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D cache lines allocated in the M state"
+ "UMask": "0x2"
},
{
- "EventCode": "0x51",
+ "BriefDescription": "L1D snoop eviction of cache lines in M state",
"Counter": "0,1",
- "UMask": "0x8",
+ "EventCode": "0x51",
"EventName": "L1D.M_SNOOP_EVICT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D snoop eviction of cache lines in M state"
+ "UMask": "0x8"
},
{
- "EventCode": "0x51",
+ "BriefDescription": "L1 data cache lines allocated",
"Counter": "0,1",
- "UMask": "0x1",
+ "EventCode": "0x51",
"EventName": "L1D.REPL",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache lines allocated"
+ "UMask": "0x1"
},
{
- "EventCode": "0x43",
+ "BriefDescription": "All references to the L1 data cache",
"Counter": "0,1",
- "UMask": "0x1",
+ "EventCode": "0x43",
"EventName": "L1D_ALL_REF.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "All references to the L1 data cache"
+ "UMask": "0x1"
},
{
- "EventCode": "0x43",
+ "BriefDescription": "L1 data cacheable reads and writes",
"Counter": "0,1",
- "UMask": "0x2",
+ "EventCode": "0x43",
"EventName": "L1D_ALL_REF.CACHEABLE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cacheable reads and writes"
+ "UMask": "0x2"
},
{
- "EventCode": "0x40",
+ "BriefDescription": "L1 data cache read in E state",
"Counter": "0,1",
- "UMask": "0x4",
+ "EventCode": "0x40",
"EventName": "L1D_CACHE_LD.E_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in E state"
+ "UMask": "0x4"
},
{
- "EventCode": "0x40",
+ "BriefDescription": "L1 data cache read in I state (misses)",
"Counter": "0,1",
- "UMask": "0x1",
+ "EventCode": "0x40",
"EventName": "L1D_CACHE_LD.I_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in I state (misses)"
+ "UMask": "0x1"
},
{
- "EventCode": "0x40",
+ "BriefDescription": "L1 data cache reads",
"Counter": "0,1",
- "UMask": "0x8",
- "EventName": "L1D_CACHE_LD.M_STATE",
+ "EventCode": "0x40",
+ "EventName": "L1D_CACHE_LD.MESI",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in M state"
+ "UMask": "0xf"
},
{
- "EventCode": "0x40",
+ "BriefDescription": "L1 data cache read in M state",
"Counter": "0,1",
- "UMask": "0xf",
- "EventName": "L1D_CACHE_LD.MESI",
+ "EventCode": "0x40",
+ "EventName": "L1D_CACHE_LD.M_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache reads"
+ "UMask": "0x8"
},
{
- "EventCode": "0x40",
+ "BriefDescription": "L1 data cache read in S state",
"Counter": "0,1",
- "UMask": "0x2",
+ "EventCode": "0x40",
"EventName": "L1D_CACHE_LD.S_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in S state"
+ "UMask": "0x2"
},
{
- "EventCode": "0x42",
+ "BriefDescription": "L1 data cache load locks in E state",
"Counter": "0,1",
- "UMask": "0x4",
+ "EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.E_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load locks in E state"
+ "UMask": "0x4"
},
{
- "EventCode": "0x42",
+ "BriefDescription": "L1 data cache load lock hits",
"Counter": "0,1",
- "UMask": "0x1",
+ "EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load lock hits"
+ "UMask": "0x1"
},
{
- "EventCode": "0x42",
+ "BriefDescription": "L1 data cache load locks in M state",
"Counter": "0,1",
- "UMask": "0x8",
+ "EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.M_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load locks in M state"
+ "UMask": "0x8"
},
{
- "EventCode": "0x42",
+ "BriefDescription": "L1 data cache load locks in S state",
"Counter": "0,1",
- "UMask": "0x2",
+ "EventCode": "0x42",
"EventName": "L1D_CACHE_LOCK.S_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load locks in S state"
+ "UMask": "0x2"
},
{
- "EventCode": "0x53",
+ "BriefDescription": "L1D load lock accepted in fill buffer",
"Counter": "0,1",
- "UMask": "0x1",
+ "EventCode": "0x53",
"EventName": "L1D_CACHE_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D load lock accepted in fill buffer"
+ "UMask": "0x1"
},
{
- "EventCode": "0x52",
+ "BriefDescription": "L1D prefetch load lock accepted in fill buffer",
"Counter": "0,1",
- "UMask": "0x1",
+ "EventCode": "0x52",
"EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D prefetch load lock accepted in fill buffer"
+ "UMask": "0x1"
},
{
- "EventCode": "0x41",
+ "BriefDescription": "L1 data cache stores in E state",
"Counter": "0,1",
- "UMask": "0x4",
+ "EventCode": "0x41",
"EventName": "L1D_CACHE_ST.E_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache stores in E state"
+ "UMask": "0x4"
},
{
- "EventCode": "0x41",
+ "BriefDescription": "L1 data cache stores in M state",
"Counter": "0,1",
- "UMask": "0x8",
+ "EventCode": "0x41",
"EventName": "L1D_CACHE_ST.M_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache stores in M state"
+ "UMask": "0x8"
},
{
- "EventCode": "0x41",
+ "BriefDescription": "L1 data cache stores in S state",
"Counter": "0,1",
- "UMask": "0x2",
+ "EventCode": "0x41",
"EventName": "L1D_CACHE_ST.S_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache stores in S state"
+ "UMask": "0x2"
},
{
- "EventCode": "0x4E",
+ "BriefDescription": "L1D hardware prefetch misses",
"Counter": "0,1",
- "UMask": "0x2",
+ "EventCode": "0x4E",
"EventName": "L1D_PREFETCH.MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D hardware prefetch misses"
+ "UMask": "0x2"
},
{
- "EventCode": "0x4E",
+ "BriefDescription": "L1D hardware prefetch requests",
"Counter": "0,1",
- "UMask": "0x1",
+ "EventCode": "0x4E",
"EventName": "L1D_PREFETCH.REQUESTS",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D hardware prefetch requests"
+ "UMask": "0x1"
},
{
- "EventCode": "0x4E",
+ "BriefDescription": "L1D hardware prefetch requests triggered",
"Counter": "0,1",
- "UMask": "0x4",
+ "EventCode": "0x4E",
"EventName": "L1D_PREFETCH.TRIGGERS",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D hardware prefetch requests triggered"
+ "UMask": "0x4"
},
{
- "EventCode": "0x28",
+ "BriefDescription": "L1 writebacks to L2 in E state",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x28",
"EventName": "L1D_WB_L2.E_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in E state"
+ "UMask": "0x4"
},
{
- "EventCode": "0x28",
+ "BriefDescription": "L1 writebacks to L2 in I state (misses)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x28",
"EventName": "L1D_WB_L2.I_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in I state (misses)"
+ "UMask": "0x1"
},
{
- "EventCode": "0x28",
+ "BriefDescription": "All L1 writebacks to L2",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L1D_WB_L2.M_STATE",
+ "EventCode": "0x28",
+ "EventName": "L1D_WB_L2.MESI",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in M state"
+ "UMask": "0xf"
},
{
- "EventCode": "0x28",
+ "BriefDescription": "L1 writebacks to L2 in M state",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L1D_WB_L2.MESI",
+ "EventCode": "0x28",
+ "EventName": "L1D_WB_L2.M_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "All L1 writebacks to L2"
+ "UMask": "0x8"
},
{
- "EventCode": "0x28",
+ "BriefDescription": "L1 writebacks to L2 in S state",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x28",
"EventName": "L1D_WB_L2.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in S state"
+ "UMask": "0x2"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "All L2 data requests",
"Counter": "0,1,2,3",
- "UMask": "0xff",
+ "EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 data requests"
+ "UMask": "0xff"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "L2 data demand loads in E state",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in E state"
+ "UMask": "0x4"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "L2 data demand loads in I state (misses)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in I state (misses)"
+ "UMask": "0x1"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "L2 data demand requests",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
+ "EventCode": "0x26",
+ "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in M state"
+ "UMask": "0xf"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "L2 data demand loads in M state",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
+ "EventCode": "0x26",
+ "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand requests"
+ "UMask": "0x8"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "L2 data demand loads in S state",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in S state"
+ "UMask": "0x2"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "L2 data prefetches in E state",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in E state"
+ "UMask": "0x40"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "L2 data prefetches in the I state (misses)",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in the I state (misses)"
+ "UMask": "0x10"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "All L2 data prefetches",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
+ "EventCode": "0x26",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in M state"
+ "UMask": "0xf0"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "L2 data prefetches in M state",
"Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
+ "EventCode": "0x26",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 data prefetches"
+ "UMask": "0x80"
},
{
- "EventCode": "0x26",
+ "BriefDescription": "L2 data prefetches in the S state",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x26",
"EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in the S state"
+ "UMask": "0x20"
},
{
- "EventCode": "0xF1",
+ "BriefDescription": "L2 lines alloacated",
"Counter": "0,1,2,3",
- "UMask": "0x7",
+ "EventCode": "0xF1",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines alloacated"
+ "UMask": "0x7"
},
{
- "EventCode": "0xF1",
+ "BriefDescription": "L2 lines allocated in the E state",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xF1",
"EventName": "L2_LINES_IN.E_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines allocated in the E state"
+ "UMask": "0x4"
},
{
- "EventCode": "0xF1",
+ "BriefDescription": "L2 lines allocated in the S state",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xF1",
"EventName": "L2_LINES_IN.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines allocated in the S state"
+ "UMask": "0x2"
},
{
- "EventCode": "0xF2",
+ "BriefDescription": "L2 lines evicted",
"Counter": "0,1,2,3",
- "UMask": "0xf",
+ "EventCode": "0xF2",
"EventName": "L2_LINES_OUT.ANY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines evicted"
+ "UMask": "0xf"
},
{
- "EventCode": "0xF2",
+ "BriefDescription": "L2 lines evicted by a demand request",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines evicted by a demand request"
+ "UMask": "0x1"
},
{
- "EventCode": "0xF2",
+ "BriefDescription": "L2 modified lines evicted by a demand request",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xF2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 modified lines evicted by a demand request"
+ "UMask": "0x2"
},
{
- "EventCode": "0xF2",
+ "BriefDescription": "L2 lines evicted by a prefetch request",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines evicted by a prefetch request"
+ "UMask": "0x4"
},
{
- "EventCode": "0xF2",
+ "BriefDescription": "L2 modified lines evicted by a prefetch request",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xF2",
"EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 modified lines evicted by a prefetch request"
+ "UMask": "0x8"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "L2 instruction fetches",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_RQSTS.IFETCH_HIT",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.IFETCHES",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetch hits"
+ "UMask": "0x30"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "L2 instruction fetch hits",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_RQSTS.IFETCH_MISS",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.IFETCH_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetch misses"
+ "UMask": "0x10"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "L2 instruction fetch misses",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.IFETCHES",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.IFETCH_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetches"
+ "UMask": "0x20"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "L2 load hits",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.LD_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 load hits"
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "L2 load misses",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.LD_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 load misses"
+ "UMask": "0x2"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "L2 requests",
"Counter": "0,1,2,3",
- "UMask": "0x3",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.LOADS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 requests"
+ "UMask": "0x3"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "All L2 misses",
"Counter": "0,1,2,3",
- "UMask": "0xaa",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 misses"
+ "UMask": "0xaa"
},
{
+ "BriefDescription": "All L2 prefetches",
+ "Counter": "0,1,2,3",
"EventCode": "0x24",
+ "EventName": "L2_RQSTS.PREFETCHES",
+ "SampleAfterValue": "200000",
+ "UMask": "0xc0"
+ },
+ {
+ "BriefDescription": "L2 prefetch hits",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 prefetch hits"
+ "UMask": "0x40"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "L2 prefetch misses",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.PREFETCH_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 prefetch misses"
+ "UMask": "0x80"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "All L2 requests",
"Counter": "0,1,2,3",
- "UMask": "0xc0",
- "EventName": "L2_RQSTS.PREFETCHES",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 prefetches"
+ "UMask": "0xff"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "L2 RFO requests",
"Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "L2_RQSTS.REFERENCES",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFOS",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 requests"
+ "UMask": "0xc"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "L2 RFO hits",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO hits"
+ "UMask": "0x4"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "L2 RFO misses",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO misses"
+ "UMask": "0x8"
},
{
- "EventCode": "0x24",
+ "BriefDescription": "All L2 transactions",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.RFOS",
- "SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO requests"
- },
- {
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "L2_TRANSACTIONS.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 transactions"
+ "UMask": "0x80"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "L2 fill transactions",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.FILL",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 fill transactions"
+ "UMask": "0x20"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "L2 instruction fetch transactions",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.IFETCH",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetch transactions"
+ "UMask": "0x4"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "L1D writeback to L2 transactions",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.L1D_WB",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D writeback to L2 transactions"
+ "UMask": "0x10"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "L2 Load transactions",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.LOAD",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 Load transactions"
+ "UMask": "0x1"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "L2 prefetch transactions",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.PREFETCH",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 prefetch transactions"
+ "UMask": "0x8"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "L2 RFO transactions",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.RFO",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO transactions"
+ "UMask": "0x2"
},
{
- "EventCode": "0xF0",
+ "BriefDescription": "L2 writeback to LLC transactions",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0xF0",
"EventName": "L2_TRANSACTIONS.WB",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 writeback to LLC transactions"
+ "UMask": "0x40"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "L2 demand lock RFOs in E state",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.E_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in E state"
+ "UMask": "0x40"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "All demand L2 lock RFOs that hit the cache",
"Counter": "0,1,2,3",
- "UMask": "0xe0",
+ "EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.HIT",
"SampleAfterValue": "100000",
- "BriefDescription": "All demand L2 lock RFOs that hit the cache"
+ "UMask": "0xe0"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "L2 demand lock RFOs in I state (misses)",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.I_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in I state (misses)"
+ "UMask": "0x10"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "All demand L2 lock RFOs",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_WRITE.LOCK.M_STATE",
+ "EventCode": "0x27",
+ "EventName": "L2_WRITE.LOCK.MESI",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in M state"
+ "UMask": "0xf0"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "L2 demand lock RFOs in M state",
"Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "L2_WRITE.LOCK.MESI",
+ "EventCode": "0x27",
+ "EventName": "L2_WRITE.LOCK.M_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "All demand L2 lock RFOs"
+ "UMask": "0x80"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "L2 demand lock RFOs in S state",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x27",
"EventName": "L2_WRITE.LOCK.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in S state"
+ "UMask": "0x20"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "All L2 demand store RFOs that hit the cache",
"Counter": "0,1,2,3",
- "UMask": "0xe",
+ "EventCode": "0x27",
"EventName": "L2_WRITE.RFO.HIT",
"SampleAfterValue": "100000",
- "BriefDescription": "All L2 demand store RFOs that hit the cache"
+ "UMask": "0xe"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "L2 demand store RFOs in I state (misses)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x27",
"EventName": "L2_WRITE.RFO.I_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand store RFOs in I state (misses)"
+ "UMask": "0x1"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "All L2 demand store RFOs",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_WRITE.RFO.M_STATE",
+ "EventCode": "0x27",
+ "EventName": "L2_WRITE.RFO.MESI",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand store RFOs in M state"
+ "UMask": "0xf"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "L2 demand store RFOs in M state",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_WRITE.RFO.MESI",
+ "EventCode": "0x27",
+ "EventName": "L2_WRITE.RFO.M_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "All L2 demand store RFOs"
+ "UMask": "0x8"
},
{
- "EventCode": "0x27",
+ "BriefDescription": "L2 demand store RFOs in S state",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x27",
"EventName": "L2_WRITE.RFO.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand store RFOs in S state"
+ "UMask": "0x2"
},
{
- "EventCode": "0x2E",
+ "BriefDescription": "Longest latency cache miss",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100000",
- "BriefDescription": "Longest latency cache miss"
+ "UMask": "0x41"
},
{
- "EventCode": "0x2E",
+ "BriefDescription": "Longest latency cache reference",
"Counter": "0,1,2,3",
- "UMask": "0x4f",
+ "EventCode": "0x2E",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "200000",
- "BriefDescription": "Longest latency cache reference"
+ "UMask": "0x4f"
},
{
- "PEBS": "1",
- "EventCode": "0xB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_INST_RETIRED.LOADS",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired which contains a load (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_INST_RETIRED.STORES",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired which contains a store (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
- "SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
- "SampleAfterValue": "10000",
- "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
- "SampleAfterValue": "40000",
- "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
- "SampleAfterValue": "40000",
- "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)"
- },
- {
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
- "SampleAfterValue": "100000",
- "BriefDescription": "Offcore L1 data cache writebacks"
- },
- {
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_SQ_FULL",
- "SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests blocked due to Super Queue full"
- },
- {
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Super Queue lock splits across a cache line"
- },
- {
- "EventCode": "0x6",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "STORE_BLOCKS.AT_RET",
- "SampleAfterValue": "200000",
- "BriefDescription": "Loads delayed with at-Retirement block code"
- },
- {
- "EventCode": "0x6",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "STORE_BLOCKS.L1D_BLOCK",
- "SampleAfterValue": "200000",
- "BriefDescription": "Cacheable loads delayed with L1D block code"
- },
- {
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x0",
+ "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x0",
+ "PEBS": "2",
"SampleAfterValue": "2000000",
- "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x400",
+ "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
"SampleAfterValue": "100",
- "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x80",
+ "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
"SampleAfterValue": "1000",
- "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x10",
+ "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
"SampleAfterValue": "10000",
- "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x4000",
+ "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x4000",
+ "PEBS": "2",
"SampleAfterValue": "5",
- "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x800",
+ "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PEBS": "2",
"SampleAfterValue": "50",
- "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x100",
+ "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
"SampleAfterValue": "500",
- "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x20",
+ "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
"SampleAfterValue": "5000",
- "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x8000",
+ "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x8000",
+ "PEBS": "2",
"SampleAfterValue": "3",
- "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x4",
+ "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
"SampleAfterValue": "50000",
- "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x1000",
+ "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x1000",
+ "PEBS": "2",
"SampleAfterValue": "20",
- "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x200",
+ "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
"SampleAfterValue": "200",
- "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x40",
+ "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
"SampleAfterValue": "2000",
- "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x8",
+ "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
"SampleAfterValue": "20000",
- "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x2000",
+ "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)",
"Counter": "3",
- "UMask": "0x10",
+ "EventCode": "0xB",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x2000",
+ "PEBS": "2",
"SampleAfterValue": "10",
- "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F11",
+ "BriefDescription": "Instructions retired which contains a load (Precise Event)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "MEM_INST_RETIRED.LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instructions retired which contains a store (Precise Event)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "MEM_INST_RETIRED.STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
+ "PEBS": "1",
+ "SampleAfterValue": "200000",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "10000",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "40000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "40000",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Offcore L1 data cache writebacks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
+ "SampleAfterValue": "100000",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Offcore requests blocked due to Super Queue full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_SQ_FULL",
+ "SampleAfterValue": "100000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F11",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF11",
+ "BriefDescription": "All offcore data reads",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF11",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore data reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8011",
+ "BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8011",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x111",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x111",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x211",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x211",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x411",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x411",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x711",
+ "BriefDescription": "Offcore data reads satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x711",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4711",
+ "BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4711",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1811",
+ "BriefDescription": "Offcore data reads satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1811",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3811",
+ "BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3811",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1011",
+ "BriefDescription": "Offcore data reads that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1011",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x811",
+ "BriefDescription": "Offcore data reads that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x811",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F44",
+ "BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F44",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF44",
+ "BriefDescription": "All offcore code reads",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF44",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore code reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8044",
+ "BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8044",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x144",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x144",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x244",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x244",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x444",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x444",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x744",
+ "BriefDescription": "Offcore code reads satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x744",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4744",
+ "BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4744",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1844",
+ "BriefDescription": "Offcore code reads satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1844",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3844",
+ "BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3844",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1044",
+ "BriefDescription": "Offcore code reads that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1044",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x844",
+ "BriefDescription": "Offcore code reads that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x844",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7FFF",
+ "BriefDescription": "Offcore requests satisfied by any cache or DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7FFF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFFFF",
+ "BriefDescription": "All offcore requests",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFFFF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x80FF",
+ "BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x80FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1FF",
+ "BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2FF",
+ "BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4FF",
+ "BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7FF",
+ "BriefDescription": "Offcore requests satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x47FF",
+ "BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x47FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x18FF",
+ "BriefDescription": "Offcore requests satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x18FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x38FF",
+ "BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x38FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x10FF",
+ "BriefDescription": "Offcore requests that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x10FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8FF",
+ "BriefDescription": "Offcore requests that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F22",
+ "BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F22",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF22",
+ "BriefDescription": "All offcore RFO requests",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF22",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore RFO requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8022",
+ "BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8022",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x122",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x122",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x222",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x222",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x422",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x422",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x722",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x722",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4722",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4722",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1822",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1822",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3822",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3822",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1022",
+ "BriefDescription": "Offcore RFO requests that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1022",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x822",
+ "BriefDescription": "Offcore RFO requests that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x822",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F08",
+ "BriefDescription": "Offcore writebacks to any cache or DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F08",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF08",
+ "BriefDescription": "All offcore writebacks",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF08",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore writebacks",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8008",
+ "BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8008",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x108",
+ "BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x108",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x408",
+ "BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x408",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x708",
+ "BriefDescription": "Offcore writebacks to the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x708",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4708",
+ "BriefDescription": "Offcore writebacks to the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4708",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1808",
+ "BriefDescription": "Offcore writebacks to a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1808",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3808",
+ "BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3808",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1008",
+ "BriefDescription": "Offcore writebacks that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1008",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x808",
+ "BriefDescription": "Offcore writebacks that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x808",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F77",
+ "BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F77",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF77",
+ "BriefDescription": "All offcore code or data read requests",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF77",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore code or data read requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8077",
+ "BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8077",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x177",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x177",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x277",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x277",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x477",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x477",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x777",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x777",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4777",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4777",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1877",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1877",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3877",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3877",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1077",
+ "BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1077",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x877",
+ "BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x877",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F33",
+ "BriefDescription": "Offcore request = all data, response = any cache_dram",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F33",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any cache_dram",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF33",
+ "BriefDescription": "Offcore request = all data, response = any location",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF33",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any location",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8033",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8033",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x133",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x133",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x233",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x233",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x433",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x433",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x733",
+ "BriefDescription": "Offcore request = all data, response = local cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x733",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = local cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4733",
+ "BriefDescription": "Offcore request = all data, response = local cache or dram",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4733",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = local cache or dram",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1833",
+ "BriefDescription": "Offcore request = all data, response = remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1833",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3833",
+ "BriefDescription": "Offcore request = all data, response = remote cache or dram",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3833",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = remote cache or dram",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1033",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1033",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HIT in a remote cache ",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x833",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x833",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F03",
+ "BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F03",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF03",
+ "BriefDescription": "All offcore demand data requests",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF03",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand data requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8003",
+ "BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8003",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x103",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x103",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x203",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x203",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x403",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x403",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x703",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x703",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4703",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4703",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1803",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1803",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3803",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3803",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1003",
+ "BriefDescription": "Offcore demand data requests that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1003",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x803",
+ "BriefDescription": "Offcore demand data requests that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x803",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F01",
+ "BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F01",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF01",
+ "BriefDescription": "All offcore demand data reads",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF01",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand data reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8001",
+ "BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8001",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x101",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x101",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x201",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x201",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x401",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x401",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x701",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x701",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4701",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4701",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1801",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1801",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3801",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3801",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1001",
+ "BriefDescription": "Offcore demand data reads that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1001",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x801",
+ "BriefDescription": "Offcore demand data reads that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x801",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F04",
+ "BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F04",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF04",
+ "BriefDescription": "All offcore demand code reads",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF04",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand code reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8004",
+ "BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8004",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x104",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x104",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x204",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x204",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x404",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x404",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x704",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x704",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4704",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4704",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1804",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1804",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3804",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3804",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1004",
+ "BriefDescription": "Offcore demand code reads that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1004",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x804",
+ "BriefDescription": "Offcore demand code reads that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x804",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F02",
+ "BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F02",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF02",
+ "BriefDescription": "All offcore demand RFO requests",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF02",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand RFO requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8002",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8002",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x102",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x102",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x202",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x202",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x402",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x402",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x702",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x702",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4702",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4702",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1802",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1802",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3802",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3802",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1002",
+ "BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1002",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x802",
+ "BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x802",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F80",
+ "BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F80",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF80",
+ "BriefDescription": "All offcore other requests",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF80",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore other requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8080",
+ "BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8080",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x180",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x180",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x280",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x280",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x480",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x480",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x780",
+ "BriefDescription": "Offcore other requests satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x780",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4780",
+ "BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4780",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1880",
+ "BriefDescription": "Offcore other requests satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1880",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3880",
+ "BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3880",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1080",
+ "BriefDescription": "Offcore other requests that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1080",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x880",
+ "BriefDescription": "Offcore other requests that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x880",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F30",
+ "BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F30",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF30",
+ "BriefDescription": "All offcore prefetch data requests",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF30",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch data requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8030",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8030",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x130",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x130",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x230",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x230",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x430",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x430",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x730",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x730",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4730",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4730",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1830",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1830",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3830",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3830",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1030",
+ "BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1030",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x830",
+ "BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x830",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F10",
+ "BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F10",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF10",
+ "BriefDescription": "All offcore prefetch data reads",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF10",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch data reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8010",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8010",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x110",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x110",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x210",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x210",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x410",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x410",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x710",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x710",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4710",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4710",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1810",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1810",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3810",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3810",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1010",
+ "BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1010",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x810",
+ "BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x810",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F40",
+ "BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F40",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF40",
+ "BriefDescription": "All offcore prefetch code reads",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF40",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch code reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8040",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8040",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x140",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x140",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x240",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x240",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x440",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x440",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x740",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x740",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4740",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4740",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1840",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1840",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3840",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3840",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1040",
+ "BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1040",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x840",
+ "BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x840",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F20",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F20",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF20",
+ "BriefDescription": "All offcore prefetch RFO requests",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF20",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch RFO requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8020",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8020",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x120",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x120",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x220",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x220",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x420",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x420",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x720",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x720",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4720",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4720",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1820",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1820",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3820",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3820",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1020",
+ "BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1020",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x820",
+ "BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x820",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x7F70",
+ "BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F70",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xFF70",
+ "BriefDescription": "All offcore prefetch requests",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF70",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x8070",
+ "BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8070",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x170",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x170",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x270",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x270",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x470",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x470",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x770",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x770",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4770",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4770",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1870",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1870",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x3870",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3870",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x1070",
+ "BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1070",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x870",
+ "BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x870",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Super Queue lock splits across a cache line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Loads delayed with at-Retirement block code",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "STORE_BLOCKS.AT_RET",
+ "SampleAfterValue": "200000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cacheable loads delayed with L1D block code",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "STORE_BLOCKS.L1D_BLOCK",
+ "SampleAfterValue": "200000",
+ "UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json b/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
index 7d2f71a9dee3..666e466d351c 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
@@ -1,229 +1,229 @@
[
{
- "PEBS": "1",
- "EventCode": "0xF7",
+ "BriefDescription": "X87 Floating point assists (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xF7",
"EventName": "FP_ASSIST.ALL",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "X87 Floating point assists (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xF7",
+ "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xF7",
"EventName": "FP_ASSIST.INPUT",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "EventCode": "0xF7",
+ "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xF7",
"EventName": "FP_ASSIST.OUTPUT",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)"
+ "UMask": "0x2"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "MMX Uops",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.MMX",
"SampleAfterValue": "2000000",
- "BriefDescription": "MMX Uops"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "SSE2 integer Uops",
+ "Counter": "0,1,2,3",
"EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "SSE* FP double precision Uops",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE* FP double precision Uops"
+ "UMask": "0x80"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "SSE and SSE2 FP Uops",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE and SSE2 FP Uops"
+ "UMask": "0x4"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "SSE FP packed Uops",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE FP packed Uops"
+ "UMask": "0x10"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "SSE FP scalar Uops",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE FP scalar Uops"
+ "UMask": "0x20"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "SSE* FP single precision Uops",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE* FP single precision Uops"
+ "UMask": "0x40"
},
{
- "EventCode": "0x10",
+ "BriefDescription": "Computational floating-point operations executed",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
- "SampleAfterValue": "2000000",
- "BriefDescription": "SSE2 integer Uops"
- },
- {
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000000",
- "BriefDescription": "Computational floating-point operations executed"
+ "UMask": "0x1"
},
{
- "EventCode": "0xCC",
+ "BriefDescription": "All Floating Point to and from MMX transitions",
"Counter": "0,1,2,3",
- "UMask": "0x3",
+ "EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "All Floating Point to and from MMX transitions"
+ "UMask": "0x3"
},
{
- "EventCode": "0xCC",
+ "BriefDescription": "Transitions from MMX to Floating Point instructions",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_FP",
"SampleAfterValue": "2000000",
- "BriefDescription": "Transitions from MMX to Floating Point instructions"
+ "UMask": "0x1"
},
{
- "EventCode": "0xCC",
+ "BriefDescription": "Transitions from Floating Point to MMX instructions",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xCC",
"EventName": "FP_MMX_TRANS.TO_MMX",
"SampleAfterValue": "2000000",
- "BriefDescription": "Transitions from Floating Point to MMX instructions"
+ "UMask": "0x2"
},
{
- "EventCode": "0x12",
+ "BriefDescription": "128 bit SIMD integer pack operations",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x12",
"EventName": "SIMD_INT_128.PACK",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer pack operations"
+ "UMask": "0x4"
},
{
- "EventCode": "0x12",
+ "BriefDescription": "128 bit SIMD integer arithmetic operations",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_ARITH",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer arithmetic operations"
+ "UMask": "0x20"
},
{
- "EventCode": "0x12",
+ "BriefDescription": "128 bit SIMD integer logical operations",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_LOGICAL",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer logical operations"
+ "UMask": "0x10"
},
{
- "EventCode": "0x12",
+ "BriefDescription": "128 bit SIMD integer multiply operations",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_MPY",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer multiply operations"
+ "UMask": "0x1"
},
{
- "EventCode": "0x12",
+ "BriefDescription": "128 bit SIMD integer shift operations",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x12",
"EventName": "SIMD_INT_128.PACKED_SHIFT",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer shift operations"
+ "UMask": "0x2"
},
{
- "EventCode": "0x12",
+ "BriefDescription": "128 bit SIMD integer shuffle/move operations",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x12",
"EventName": "SIMD_INT_128.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer shuffle/move operations"
+ "UMask": "0x40"
},
{
- "EventCode": "0x12",
+ "BriefDescription": "128 bit SIMD integer unpack operations",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0x12",
"EventName": "SIMD_INT_128.UNPACK",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer unpack operations"
+ "UMask": "0x8"
},
{
- "EventCode": "0xFD",
+ "BriefDescription": "SIMD integer 64 bit pack operations",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACK",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit pack operations"
+ "UMask": "0x4"
},
{
- "EventCode": "0xFD",
+ "BriefDescription": "SIMD integer 64 bit arithmetic operations",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_ARITH",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit arithmetic operations"
+ "UMask": "0x20"
},
{
- "EventCode": "0xFD",
+ "BriefDescription": "SIMD integer 64 bit logical operations",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_LOGICAL",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit logical operations"
+ "UMask": "0x10"
},
{
- "EventCode": "0xFD",
+ "BriefDescription": "SIMD integer 64 bit packed multiply operations",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_MPY",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit packed multiply operations"
+ "UMask": "0x1"
},
{
- "EventCode": "0xFD",
+ "BriefDescription": "SIMD integer 64 bit shift operations",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xFD",
"EventName": "SIMD_INT_64.PACKED_SHIFT",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit shift operations"
+ "UMask": "0x2"
},
{
- "EventCode": "0xFD",
+ "BriefDescription": "SIMD integer 64 bit shuffle/move operations",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0xFD",
"EventName": "SIMD_INT_64.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit shuffle/move operations"
+ "UMask": "0x40"
},
{
- "EventCode": "0xFD",
+ "BriefDescription": "SIMD integer 64 bit unpack operations",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xFD",
"EventName": "SIMD_INT_64.UNPACK",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit unpack operations"
+ "UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json b/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
index e5e21e03444d..c561ac24d91d 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
@@ -1,26 +1,26 @@
[
{
- "EventCode": "0xD0",
+ "BriefDescription": "Instructions decoded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xD0",
"EventName": "MACRO_INSTS.DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions decoded"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA6",
+ "BriefDescription": "Macro-fused instructions decoded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xA6",
"EventName": "MACRO_INSTS.FUSIONS_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Macro-fused instructions decoded"
+ "UMask": "0x1"
},
{
- "EventCode": "0x19",
+ "BriefDescription": "Two Uop instructions decoded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x19",
"EventName": "TWO_UOP_INSTS_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Two Uop instructions decoded"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/memory.json b/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
index f914a4525b65..6e95de3f3409 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
@@ -1,739 +1,739 @@
[
{
- "EventCode": "0xB7",
- "MSRValue": "0x6011",
+ "BriefDescription": "Offcore data reads satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6011",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF811",
+ "BriefDescription": "Offcore data reads that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF811",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4011",
+ "BriefDescription": "Offcore data reads satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4011",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2011",
+ "BriefDescription": "Offcore data reads satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2011",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6044",
+ "BriefDescription": "Offcore code reads satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6044",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF844",
+ "BriefDescription": "Offcore code reads that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF844",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4044",
+ "BriefDescription": "Offcore code reads satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4044",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2044",
+ "BriefDescription": "Offcore code reads satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2044",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x60FF",
+ "BriefDescription": "Offcore requests satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x60FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF8FF",
+ "BriefDescription": "Offcore requests that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF8FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x40FF",
+ "BriefDescription": "Offcore requests satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x40FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x20FF",
+ "BriefDescription": "Offcore requests satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x20FF",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6022",
+ "BriefDescription": "Offcore RFO requests satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6022",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF822",
+ "BriefDescription": "Offcore RFO requests that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF822",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4022",
+ "BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4022",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2022",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2022",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6008",
+ "BriefDescription": "Offcore writebacks to any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6008",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF808",
+ "BriefDescription": "Offcore writebacks that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF808",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4008",
+ "BriefDescription": "Offcore writebacks to the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4008",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2008",
+ "BriefDescription": "Offcore writebacks to a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2008",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6077",
+ "BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6077",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF877",
+ "BriefDescription": "Offcore code or data read requests that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF877",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4077",
+ "BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4077",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2077",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2077",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6033",
+ "BriefDescription": "Offcore request = all data, response = any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6033",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF833",
+ "BriefDescription": "Offcore request = all data, response = any LLC miss",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF833",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any LLC miss",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4033",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the local DRAM.",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4033",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the local DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2033",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2033",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6003",
+ "BriefDescription": "Offcore demand data requests satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6003",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF803",
+ "BriefDescription": "Offcore demand data requests that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF803",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4003",
+ "BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4003",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2003",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2003",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6001",
+ "BriefDescription": "Offcore demand data reads satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6001",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF801",
+ "BriefDescription": "Offcore demand data reads that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF801",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4001",
+ "BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4001",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2001",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2001",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6004",
+ "BriefDescription": "Offcore demand code reads satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6004",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF804",
+ "BriefDescription": "Offcore demand code reads that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF804",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4004",
+ "BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4004",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2004",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2004",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6002",
+ "BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6002",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF802",
+ "BriefDescription": "Offcore demand RFO requests that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF802",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4002",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4002",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2002",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2002",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6080",
+ "BriefDescription": "Offcore other requests satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6080",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF880",
+ "BriefDescription": "Offcore other requests that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF880",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2080",
+ "BriefDescription": "Offcore other requests satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2080",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6030",
+ "BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6030",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF830",
+ "BriefDescription": "Offcore prefetch data requests that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF830",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4030",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4030",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2030",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2030",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6010",
+ "BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6010",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF810",
+ "BriefDescription": "Offcore prefetch data reads that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF810",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4010",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4010",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2010",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2010",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6040",
+ "BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6040",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF840",
+ "BriefDescription": "Offcore prefetch code reads that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF840",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4040",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4040",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2040",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2040",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6020",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6020",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF820",
+ "BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF820",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4020",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4020",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2020",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2020",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x6070",
+ "BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6070",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0xF870",
+ "BriefDescription": "Offcore prefetch requests that missed the LLC",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF870",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x4070",
+ "BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4070",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x2070",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
"Counter": "2",
- "UMask": "0x1",
+ "EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2070",
+ "Offcore": "1",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/other.json b/tools/perf/pmu-events/arch/x86/nehalemex/other.json
index af0860622445..f6887b234b0e 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/other.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/other.json
@@ -1,210 +1,146 @@
[
{
- "EventCode": "0xE8",
+ "BriefDescription": "ES segment renames",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BPU_CLEARS.EARLY",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Early Branch Prediciton Unit clears"
- },
- {
- "EventCode": "0xE8",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BPU_CLEARS.LATE",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Late Branch Prediction Unit clears"
- },
- {
- "EventCode": "0xE5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BPU_MISSED_CALL_RET",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Branch prediction unit missed call or return"
- },
- {
"EventCode": "0xD5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ES_REG_RENAMES",
"SampleAfterValue": "2000000",
- "BriefDescription": "ES segment renames"
+ "UMask": "0x1"
},
{
- "EventCode": "0x6C",
+ "BriefDescription": "I/O transactions",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x6C",
"EventName": "IO_TRANSACTIONS",
"SampleAfterValue": "2000000",
- "BriefDescription": "I/O transactions"
+ "UMask": "0x1"
},
{
- "EventCode": "0x80",
+ "BriefDescription": "L1I instruction fetch stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x80",
"EventName": "L1I.CYCLES_STALLED",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I instruction fetch stall cycles"
+ "UMask": "0x4"
},
{
- "EventCode": "0x80",
+ "BriefDescription": "L1I instruction fetch hits",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x80",
"EventName": "L1I.HITS",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I instruction fetch hits"
+ "UMask": "0x1"
},
{
- "EventCode": "0x80",
+ "BriefDescription": "L1I instruction fetch misses",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x80",
"EventName": "L1I.MISSES",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I instruction fetch misses"
+ "UMask": "0x2"
},
{
- "EventCode": "0x80",
+ "BriefDescription": "L1I Instruction fetches",
"Counter": "0,1,2,3",
- "UMask": "0x3",
+ "EventCode": "0x80",
"EventName": "L1I.READS",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I Instruction fetches"
+ "UMask": "0x3"
},
{
- "EventCode": "0x82",
+ "BriefDescription": "Large ITLB hit",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x82",
"EventName": "LARGE_ITLB.HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "Large ITLB hit"
+ "UMask": "0x1"
},
{
- "EventCode": "0x13",
+ "BriefDescription": "All loads dispatched",
"Counter": "0,1,2,3",
- "UMask": "0x7",
+ "EventCode": "0x13",
"EventName": "LOAD_DISPATCH.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "All loads dispatched"
+ "UMask": "0x7"
},
{
- "EventCode": "0x13",
+ "BriefDescription": "Loads dispatched from the MOB",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x13",
"EventName": "LOAD_DISPATCH.MOB",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loads dispatched from the MOB"
+ "UMask": "0x4"
},
{
- "EventCode": "0x13",
+ "BriefDescription": "Loads dispatched that bypass the MOB",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loads dispatched that bypass the MOB"
+ "UMask": "0x1"
},
{
- "EventCode": "0x13",
+ "BriefDescription": "Loads dispatched from stage 305",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x13",
"EventName": "LOAD_DISPATCH.RS_DELAYED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loads dispatched from stage 305"
+ "UMask": "0x2"
},
{
- "EventCode": "0x7",
+ "BriefDescription": "False dependencies due to partial address aliasing",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x7",
"EventName": "PARTIAL_ADDRESS_ALIAS",
"SampleAfterValue": "200000",
- "BriefDescription": "False dependencies due to partial address aliasing"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "RAT_STALLS.ANY",
- "SampleAfterValue": "2000000",
- "BriefDescription": "All RAT stall cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RAT_STALLS.FLAGS",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Flag stall cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "RAT_STALLS.REGISTERS",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Partial register stall cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RAT_STALLS.ROB_READ_PORT",
- "SampleAfterValue": "2000000",
- "BriefDescription": "ROB read port stalls cycles"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD2",
+ "BriefDescription": "All Store buffer stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RAT_STALLS.SCOREBOARD",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Scoreboard stall cycles"
- },
- {
"EventCode": "0x4",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "SB_DRAIN.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All Store buffer stall cycles"
+ "UMask": "0x7"
},
{
- "EventCode": "0xD4",
+ "BriefDescription": "Segment rename stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xD4",
"EventName": "SEG_RENAME_STALLS",
"SampleAfterValue": "2000000",
- "BriefDescription": "Segment rename stall cycles"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB8",
+ "BriefDescription": "Thread responded HIT to snoop",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HIT",
"SampleAfterValue": "100000",
- "BriefDescription": "Thread responded HIT to snoop"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB8",
+ "BriefDescription": "Thread responded HITE to snoop",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITE",
"SampleAfterValue": "100000",
- "BriefDescription": "Thread responded HITE to snoop"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB8",
+ "BriefDescription": "Thread responded HITM to snoop",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xB8",
"EventName": "SNOOP_RESPONSE.HITM",
"SampleAfterValue": "100000",
- "BriefDescription": "Thread responded HITM to snoop"
+ "UMask": "0x4"
},
{
- "EventCode": "0xF6",
+ "BriefDescription": "Super Queue full stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xF6",
"EventName": "SQ_FULL_STALL_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Super Queue full stall cycles"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json b/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
index 41006ddcd893..6fc1a6efd8e8 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
@@ -1,881 +1,945 @@
[
{
- "EventCode": "0x14",
+ "BriefDescription": "Cycles the divider is busy",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x14",
"EventName": "ARITH.CYCLES_DIV_BUSY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles the divider is busy"
+ "UMask": "0x1"
},
{
- "EventCode": "0x14",
- "Invert": "1",
+ "BriefDescription": "Divide Operations executed",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x14",
"EventName": "ARITH.DIV",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Divide Operations executed",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0x14",
+ "BriefDescription": "Multiply operations executed",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x14",
"EventName": "ARITH.MUL",
"SampleAfterValue": "2000000",
- "BriefDescription": "Multiply operations executed"
+ "UMask": "0x2"
},
{
- "EventCode": "0xE6",
+ "BriefDescription": "BACLEAR asserted with bad target address",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xE6",
"EventName": "BACLEAR.BAD_TARGET",
"SampleAfterValue": "2000000",
- "BriefDescription": "BACLEAR asserted with bad target address"
+ "UMask": "0x2"
},
{
- "EventCode": "0xE6",
+ "BriefDescription": "BACLEAR asserted, regardless of cause",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xE6",
"EventName": "BACLEAR.CLEAR",
"SampleAfterValue": "2000000",
- "BriefDescription": "BACLEAR asserted, regardless of cause "
+ "UMask": "0x1"
},
{
- "EventCode": "0xA7",
+ "BriefDescription": "Instruction queue forced BACLEAR",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xA7",
"EventName": "BACLEAR_FORCE_IQ",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instruction queue forced BACLEAR"
+ "UMask": "0x1"
},
{
- "EventCode": "0xE0",
+ "BriefDescription": "Early Branch Prediciton Unit clears",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Late Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Branch prediction unit missed call or return",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE5",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Branch instructions decoded",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE0",
"EventName": "BR_INST_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Branch instructions decoded"
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Branch instructions executed",
"Counter": "0,1,2,3",
- "UMask": "0x7f",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "Branch instructions executed"
+ "UMask": "0x7f"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Conditional branch instructions executed",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.COND",
"SampleAfterValue": "200000",
- "BriefDescription": "Conditional branch instructions executed"
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Unconditional branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT",
"SampleAfterValue": "200000",
- "BriefDescription": "Unconditional branches executed"
+ "UMask": "0x2"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Unconditional call branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
- "BriefDescription": "Unconditional call branches executed"
+ "UMask": "0x10"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Indirect call branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
- "BriefDescription": "Indirect call branches executed"
+ "UMask": "0x20"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Indirect non call branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "20000",
- "BriefDescription": "Indirect non call branches executed"
+ "UMask": "0x4"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Call branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.NEAR_CALLS",
"SampleAfterValue": "20000",
- "BriefDescription": "Call branches executed"
+ "UMask": "0x30"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "All non call branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x7",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.NON_CALLS",
"SampleAfterValue": "200000",
- "BriefDescription": "All non call branches executed"
+ "UMask": "0x7"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Indirect return branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.RETURN_NEAR",
"SampleAfterValue": "20000",
- "BriefDescription": "Indirect return branches executed"
+ "UMask": "0x8"
},
{
- "EventCode": "0x88",
+ "BriefDescription": "Taken branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN",
"SampleAfterValue": "200000",
- "BriefDescription": "Taken branches executed"
+ "UMask": "0x40"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "BriefDescription": "Retired branch instructions (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired branch instructions (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "BriefDescription": "Retired conditional branch instructions (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired conditional branch instructions (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "BriefDescription": "Retired near call instructions (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xC4",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "Retired near call instructions (Precise Event)"
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Mispredicted branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x7f",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.ANY",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted branches executed"
+ "UMask": "0x7f"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Mispredicted conditional branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.COND",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted conditional branches executed"
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Mispredicted unconditional branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted unconditional branches executed"
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Mispredicted non call branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted non call branches executed"
+ "UMask": "0x10"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Mispredicted indirect call branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted indirect call branches executed"
+ "UMask": "0x20"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Mispredicted indirect non call branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted indirect non call branches executed"
+ "UMask": "0x4"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Mispredicted call branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NEAR_CALLS",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted call branches executed"
+ "UMask": "0x30"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Mispredicted non call branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x7",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NON_CALLS",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted non call branches executed"
+ "UMask": "0x7"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Mispredicted return branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.RETURN_NEAR",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted return branches executed"
+ "UMask": "0x8"
},
{
- "EventCode": "0x89",
+ "BriefDescription": "Mispredicted taken branches executed",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted taken branches executed"
+ "UMask": "0x40"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "BriefDescription": "Mispredicted near retired calls (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PEBS": "1",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted near retired calls (Precise Event)"
+ "UMask": "0x2"
},
{
- "EventCode": "0x0",
+ "BriefDescription": "Reference cycles when thread is not halted (fixed counter)",
"Counter": "Fixed counter 3",
- "UMask": "0x0",
+ "EventCode": "0x0",
"EventName": "CPU_CLK_UNHALTED.REF",
"SampleAfterValue": "2000000",
- "BriefDescription": "Reference cycles when thread is not halted (fixed counter)"
+ "UMask": "0x0"
},
{
- "EventCode": "0x3C",
+ "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_P",
"SampleAfterValue": "100000",
- "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)"
+ "UMask": "0x1"
},
{
- "EventCode": "0x0",
+ "BriefDescription": "Cycles when thread is not halted (fixed counter)",
"Counter": "Fixed counter 2",
- "UMask": "0x0",
+ "EventCode": "0x0",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles when thread is not halted (fixed counter)"
+ "UMask": "0x0"
},
{
- "EventCode": "0x3C",
+ "BriefDescription": "Cycles when thread is not halted (programmable counter)",
"Counter": "0,1,2,3",
- "UMask": "0x0",
+ "EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles when thread is not halted (programmable counter)"
+ "UMask": "0x0"
},
{
- "EventCode": "0x3C",
- "Invert": "1",
+ "BriefDescription": "Total CPU cycles",
"Counter": "0,1,2,3",
- "UMask": "0x0",
+ "CounterMask": "2",
+ "EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Total CPU cycles",
- "CounterMask": "2"
+ "UMask": "0x0"
},
{
- "EventCode": "0x87",
+ "BriefDescription": "Any Instruction Length Decoder stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0xf",
+ "EventCode": "0x87",
"EventName": "ILD_STALL.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Any Instruction Length Decoder stall cycles"
+ "UMask": "0xf"
},
{
- "EventCode": "0x87",
+ "BriefDescription": "Instruction Queue full stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x87",
"EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instruction Queue full stall cycles"
+ "UMask": "0x4"
},
{
- "EventCode": "0x87",
+ "BriefDescription": "Length Change Prefix stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000000",
- "BriefDescription": "Length Change Prefix stall cycles"
+ "UMask": "0x1"
},
{
- "EventCode": "0x87",
+ "BriefDescription": "Stall cycles due to BPU MRU bypass",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x87",
"EventName": "ILD_STALL.MRU",
"SampleAfterValue": "2000000",
- "BriefDescription": "Stall cycles due to BPU MRU bypass"
+ "UMask": "0x2"
},
{
- "EventCode": "0x87",
+ "BriefDescription": "Regen stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0x87",
"EventName": "ILD_STALL.REGEN",
"SampleAfterValue": "2000000",
- "BriefDescription": "Regen stall cycles"
+ "UMask": "0x8"
},
{
- "EventCode": "0x18",
+ "BriefDescription": "Instructions that must be decoded by decoder 0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x18",
"EventName": "INST_DECODED.DEC0",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions that must be decoded by decoder 0"
+ "UMask": "0x1"
},
{
- "EventCode": "0x1E",
+ "BriefDescription": "Instructions written to instruction queue.",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INST_QUEUE_WRITE_CYCLES",
+ "EventCode": "0x17",
+ "EventName": "INST_QUEUE_WRITES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles instructions are written to the instruction queue"
+ "UMask": "0x1"
},
{
- "EventCode": "0x17",
+ "BriefDescription": "Cycles instructions are written to the instruction queue",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INST_QUEUE_WRITES",
+ "EventCode": "0x1E",
+ "EventName": "INST_QUEUE_WRITE_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions written to instruction queue."
+ "UMask": "0x1"
},
{
- "EventCode": "0x0",
+ "BriefDescription": "Instructions retired (fixed counter)",
"Counter": "Fixed counter 1",
- "UMask": "0x0",
+ "EventCode": "0x0",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired (fixed counter)"
+ "UMask": "0x0"
},
{
- "PEBS": "1",
- "EventCode": "0xC0",
+ "BriefDescription": "Instructions retired (Programmable counter and Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xC0",
"EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired (Programmable counter and Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC0",
+ "BriefDescription": "Retired MMX instructions (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xC0",
"EventName": "INST_RETIRED.MMX",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired MMX instructions (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "EventCode": "0xC0",
- "Invert": "1",
+ "BriefDescription": "Total cycles (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "16",
+ "EventCode": "0xC0",
"EventName": "INST_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Total cycles (Precise Event)",
- "CounterMask": "16"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "Counter": "0,1,2,3",
+ "CounterMask": "16",
"EventCode": "0xC0",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "Invert": "1",
+ "PEBS": "2",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired floating-point operations (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xC0",
"EventName": "INST_RETIRED.X87",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired floating-point operations (Precise Event)"
+ "UMask": "0x2"
},
{
- "EventCode": "0x4C",
+ "BriefDescription": "Load operations conflicting with software prefetches",
"Counter": "0,1",
- "UMask": "0x1",
+ "EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE",
"SampleAfterValue": "200000",
- "BriefDescription": "Load operations conflicting with software prefetches"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
+ "BriefDescription": "Cycles when uops were delivered by the LSD",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
"EventName": "LSD.ACTIVE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles when uops were delivered by the LSD",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "Invert": "1",
+ "BriefDescription": "Cycles no uops were delivered by the LSD",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
"EventName": "LSD.INACTIVE",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no uops were delivered by the LSD",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0x20",
+ "BriefDescription": "Loops that can't stream from the instruction queue",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x20",
"EventName": "LSD_OVERFLOW",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loops that can't stream from the instruction queue"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC3",
+ "BriefDescription": "Cycles machine clear asserted",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "20000",
- "BriefDescription": "Cycles machine clear asserted"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC3",
+ "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.MEM_ORDER",
"SampleAfterValue": "20000",
- "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC3",
+ "BriefDescription": "Self-Modifying Code detected",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xC3",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20000",
- "BriefDescription": "Self-Modifying Code detected"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "All RAT stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xf"
+ },
+ {
+ "BriefDescription": "Flag stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Partial register stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ROB read port stalls cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Scoreboard stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.SCOREBOARD",
"SampleAfterValue": "2000000",
- "BriefDescription": "Resource related stall cycles"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Resource related stall cycles",
+ "Counter": "0,1,2,3",
"EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FPU control word write stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.FPCW",
"SampleAfterValue": "2000000",
- "BriefDescription": "FPU control word write stall cycles"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Load buffer stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.LOAD",
"SampleAfterValue": "2000000",
- "BriefDescription": "Load buffer stall cycles"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "MXCSR rename stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.MXCSR",
"SampleAfterValue": "2000000",
- "BriefDescription": "MXCSR rename stall cycles"
+ "UMask": "0x40"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Other Resource related stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.OTHER",
"SampleAfterValue": "2000000",
- "BriefDescription": "Other Resource related stall cycles"
+ "UMask": "0x80"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "ROB full stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.ROB_FULL",
"SampleAfterValue": "2000000",
- "BriefDescription": "ROB full stall cycles"
+ "UMask": "0x10"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Reservation Station full stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS_FULL",
"SampleAfterValue": "2000000",
- "BriefDescription": "Reservation Station full stall cycles"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA2",
+ "BriefDescription": "Store buffer stall cycles",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.STORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Store buffer stall cycles"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "EventCode": "0xC7",
+ "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "EventCode": "0xC7",
+ "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC7",
+ "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "EventCode": "0xC7",
+ "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xC7",
+ "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0xC7",
"EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)"
+ "UMask": "0x10"
},
{
- "EventCode": "0xDB",
+ "BriefDescription": "Stack pointer instructions decoded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOP_UNFUSION",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Uop unfusions due to FP exceptions"
- },
- {
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "UOPS_DECODED.ESP_FOLDING",
"SampleAfterValue": "2000000",
- "BriefDescription": "Stack pointer instructions decoded"
+ "UMask": "0x4"
},
{
- "EventCode": "0xD1",
+ "BriefDescription": "Stack pointer sync operations",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xD1",
"EventName": "UOPS_DECODED.ESP_SYNC",
"SampleAfterValue": "2000000",
- "BriefDescription": "Stack pointer sync operations"
+ "UMask": "0x8"
},
{
- "EventCode": "0xD1",
+ "BriefDescription": "Uops decoded by Microcode Sequencer",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterMask": "1",
+ "EventCode": "0xD1",
"EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops decoded by Microcode Sequencer",
- "CounterMask": "1"
+ "UMask": "0x2"
},
{
- "EventCode": "0xD1",
- "Invert": "1",
+ "BriefDescription": "Cycles no Uops are decoded",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0xD1",
"EventName": "UOPS_DECODED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops are decoded",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
"AnyThread": "1",
+ "BriefDescription": "Cycles Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops executed on any port (core count)",
- "CounterMask": "1"
+ "UMask": "0x3f"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
"AnyThread": "1",
+ "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
- "CounterMask": "1"
+ "UMask": "0x1f"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
- "SampleAfterValue": "2000000",
"BriefDescription": "Uops executed on any port (core count)",
+ "Counter": "0,1,2,3",
"CounterMask": "1",
- "EdgeDetect": "1"
- },
- {
+ "EdgeDetect": "1",
"EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
"Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on ports 0-4 (core count)",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "UMask": "0x3f"
},
{
+ "AnyThread": "1",
+ "BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
"EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
"Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops issued on any port (core count)",
- "CounterMask": "1"
+ "UMask": "0x1f"
},
{
+ "AnyThread": "1",
+ "BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
"EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
"Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x3f"
+ },
+ {
"AnyThread": "1",
+ "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
- "CounterMask": "1"
+ "UMask": "0x1f"
},
{
- "EventCode": "0xB1",
+ "BriefDescription": "Uops executed on port 0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT0",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 0"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
+ "BriefDescription": "Uops issued on ports 0, 1 or 5",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops issued on ports 0, 1 or 5"
+ "UMask": "0x40"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
+ "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
- "CounterMask": "1"
+ "UMask": "0x40"
},
{
- "EventCode": "0xB1",
+ "BriefDescription": "Uops executed on port 1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 1"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED.PORT2_CORE",
+ "BriefDescription": "Uops issued on ports 2, 3 or 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.PORT234_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 2 (core count)"
+ "UMask": "0x80"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED.PORT234_CORE",
+ "BriefDescription": "Uops executed on port 2 (core count)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.PORT2_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops issued on ports 2, 3 or 4"
+ "UMask": "0x4"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"AnyThread": "1",
+ "BriefDescription": "Uops executed on port 3 (core count)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT3_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 3 (core count)"
+ "UMask": "0x8"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"AnyThread": "1",
+ "BriefDescription": "Uops executed on port 4 (core count)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT4_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 4 (core count)"
+ "UMask": "0x10"
},
{
- "EventCode": "0xB1",
+ "BriefDescription": "Uops executed on port 5",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT5",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 5"
+ "UMask": "0x20"
},
{
- "EventCode": "0xE",
+ "BriefDescription": "Uops issued",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xE",
"EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops issued"
+ "UMask": "0x1"
},
{
- "EventCode": "0xE",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
+ "BriefDescription": "Cycles no Uops were issued on any thread",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xE",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops were issued on any thread",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
+ "BriefDescription": "Cycles Uops were issued on either thread",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xE",
"EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops were issued on either thread",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xE",
+ "BriefDescription": "Fused Uops issued",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xE",
"EventName": "UOPS_ISSUED.FUSED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Fused Uops issued"
+ "UMask": "0x2"
},
{
- "EventCode": "0xE",
- "Invert": "1",
+ "BriefDescription": "Cycles no Uops were issued",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0xE",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops were issued",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
+ "BriefDescription": "Cycles Uops are being retired",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops are being retired",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
+ "BriefDescription": "Uops retired (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xC2",
"EventName": "UOPS_RETIRED.ANY",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops retired (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
+ "BriefDescription": "Macro-fused Uops retired (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xC2",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Macro-fused Uops retired (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
+ "BriefDescription": "Retirement slots used (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xC2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retirement slots used (Precise Event)"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
- "Invert": "1",
+ "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
- "Invert": "1",
+ "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "16",
+ "EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
- "CounterMask": "16"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xC0",
- "Invert": "1",
+ "BriefDescription": "Uop unfusions due to FP exceptions",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "EventCode": "0xDB",
+ "EventName": "UOP_UNFUSION",
"SampleAfterValue": "2000000",
- "BriefDescription": "Total cycles (Precise Event)",
- "CounterMask": "16"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
index 0596094e0ee9..e88c0802e679 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
@@ -1,109 +1,109 @@
[
{
- "EventCode": "0x8",
+ "BriefDescription": "DTLB load misses",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB load misses"
+ "UMask": "0x1"
},
{
- "EventCode": "0x8",
+ "BriefDescription": "DTLB load miss caused by low part of address",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.PDE_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB load miss caused by low part of address"
+ "UMask": "0x20"
},
{
- "EventCode": "0x8",
+ "BriefDescription": "DTLB second level hit",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "DTLB second level hit"
+ "UMask": "0x10"
},
{
- "EventCode": "0x8",
+ "BriefDescription": "DTLB load miss page walks complete",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB load miss page walks complete"
+ "UMask": "0x2"
},
{
- "EventCode": "0x49",
+ "BriefDescription": "DTLB misses",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x49",
"EventName": "DTLB_MISSES.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB misses"
+ "UMask": "0x1"
},
{
- "EventCode": "0x49",
+ "BriefDescription": "DTLB first level misses but second level hit",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x49",
"EventName": "DTLB_MISSES.STLB_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB first level misses but second level hit"
+ "UMask": "0x10"
},
{
- "EventCode": "0x49",
+ "BriefDescription": "DTLB miss page walks",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x49",
"EventName": "DTLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB miss page walks"
+ "UMask": "0x2"
},
{
- "EventCode": "0xAE",
+ "BriefDescription": "ITLB flushes",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xAE",
"EventName": "ITLB_FLUSH",
"SampleAfterValue": "2000000",
- "BriefDescription": "ITLB flushes"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC8",
+ "BriefDescription": "ITLB miss",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ITLB_MISS_RETIRED",
- "SampleAfterValue": "200000",
- "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)"
- },
- {
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ITLB_MISSES.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "ITLB miss"
+ "UMask": "0x1"
},
{
- "EventCode": "0x85",
+ "BriefDescription": "ITLB miss page walks",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
- "BriefDescription": "ITLB miss page walks"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC8",
+ "EventName": "ITLB_MISS_RETIRED",
"PEBS": "1",
- "EventCode": "0xCB",
+ "SampleAfterValue": "200000",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "EventCode": "0xCB",
"EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that miss the DTLB (Precise Event)"
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "EventCode": "0xC",
+ "BriefDescription": "Retired stores that miss the DTLB (Precise Event)",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xC",
"EventName": "MEM_STORE_RETIRED.DTLB_MISS",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired stores that miss the DTLB (Precise Event)"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
index 92a7269eb444..a1d622352131 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
@@ -1876,4 +1876,4 @@
"SampleAfterValue": "100003",
"UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
index 713878fd062b..eb2ff2cfdf6b 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
@@ -135,4 +135,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
index fa22f9463b66..e2c82e43a2de 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
@@ -176,7 +176,7 @@
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.MS_CYCLES",
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel 64 and IA-32 Architectures Optimization Reference Manual for more information.",
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual for more information.",
"SampleAfterValue": "2000003",
"UMask": "0x30"
},
@@ -311,4 +311,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
index 931892d34076..3c283ca309f3 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
@@ -442,4 +442,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/other.json b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
index e251f535ec09..2f873ab14156 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
@@ -55,4 +55,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
index b9a3f194a00a..2c3b6c92aa6b 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
@@ -609,7 +609,7 @@
"UMask": "0x3"
},
{
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
@@ -652,7 +652,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x03",
"EventName": "LD_BLOCKS.STORE_FORWARD",
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
"SampleAfterValue": "100003",
"UMask": "0x2"
},
@@ -778,7 +778,7 @@
"CounterMask": "1",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
- "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel 64 and IA-32 Architectures Optimization Reference Manual.",
+ "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual.",
"SampleAfterValue": "2000003",
"UMask": "0x20"
},
@@ -797,7 +797,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x59",
"EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
- "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
+ "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
"SampleAfterValue": "2000003",
"UMask": "0x40"
},
@@ -1209,4 +1209,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
index c8e7050d9c26..ae7ed267b2a2 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
@@ -124,7 +124,7 @@
"MetricName": "FLOPc_SMT"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
@@ -142,6 +142,12 @@
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
"MetricGroup": "DSB;Fed;FetchBW",
@@ -163,7 +169,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/uncore-other.json b/tools/perf/pmu-events/arch/x86/sandybridge/uncore-other.json
index 6278068908cf..88f1e326205f 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/uncore-other.json
@@ -82,10 +82,10 @@
{
"BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
"Counter": "Fixed",
+ "EventCode": "0xff",
"EventName": "UNC_CLOCK.SOCKET",
"PerPkg": "1",
"PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "UMask": "0x01",
"Unit": "ARB"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
index 4dd136d00a10..98362abba1a7 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
@@ -146,4 +146,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
index 6fa723c9a6f6..348476ce8107 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
@@ -1,5 +1,16 @@
[
{
+ "BriefDescription": "L1D.HWPF_MISS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "L1D.HWPF_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
"BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -8,6 +19,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -19,6 +31,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -32,6 +45,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -42,6 +56,7 @@
"EventName": "L1D_PEND_MISS.L2_STALL",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -53,6 +68,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -64,6 +80,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -76,6 +93,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -87,6 +105,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1f"
},
{
@@ -97,6 +116,7 @@
"EventName": "L2_LINES_OUT.NON_SILENT",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -108,17 +128,31 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
- "BriefDescription": "All L2 requests.[This event is alias to L2_RQSTS.REFERENCES]",
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache[This event is alias to L2_RQSTS.REFERENCES]",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.ALL",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts all L2 requests.[This event is alias to L2_RQSTS.REFERENCES]",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses.[This event is alias to L2_RQSTS.REFERENCES]",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xff"
},
{
@@ -130,6 +164,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses.[This event is alias to L2_RQSTS.MISS]",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x3f"
},
{
@@ -141,17 +176,19 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of L2 code requests.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xe4"
},
{
- "BriefDescription": "Demand Data Read requests",
+ "BriefDescription": "Demand Data Read access L2 cache",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xe1"
},
{
@@ -163,6 +200,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x27"
},
{
@@ -174,9 +212,21 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts demand requests to L2 cache.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xe7"
},
{
+ "BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_HWPF",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xf0"
+ },
+ {
"BriefDescription": "RFO requests to L2 cache",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -185,6 +235,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xe2"
},
{
@@ -196,6 +247,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xc4"
},
{
@@ -207,6 +259,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts L2 cache misses when fetching instructions.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x24"
},
{
@@ -218,20 +271,33 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xc1"
},
{
- "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "BriefDescription": "Demand Data Read miss L2 cache",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x21"
},
{
+ "BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.HWPF_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x30"
+ },
+ {
"BriefDescription": "Read requests with true-miss in L2 cache.[This event is alias to L2_REQUEST.MISS]",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -240,17 +306,19 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses.[This event is alias to L2_REQUEST.MISS]",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x3f"
},
{
- "BriefDescription": "All L2 requests.[This event is alias to L2_REQUEST.ALL]",
+ "BriefDescription": "All accesses to L2 cache[This event is alias to L2_REQUEST.ALL]",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts all L2 requests.[This event is alias to L2_REQUEST.ALL]",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses.[This event is alias to L2_REQUEST.ALL]",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xff"
},
{
@@ -262,6 +330,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xc2"
},
{
@@ -273,6 +342,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x22"
},
{
@@ -284,6 +354,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0xc8"
},
{
@@ -295,20 +366,35 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x28"
},
{
- "BriefDescription": "LONGEST_LAT_CACHE.MISS",
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
"PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x41"
},
{
- "BriefDescription": "All retired load instructions.",
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -316,12 +402,12 @@
"EventName": "MEM_INST_RETIRED.ALL_LOADS",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
"SampleAfterValue": "1000003",
"UMask": "0x81"
},
{
- "BriefDescription": "All retired store instructions.",
+ "BriefDescription": "Retired store instructions.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -330,7 +416,7 @@
"L1_Hit_Indication": "1",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
+ "PublicDescription": "Counts all retired store instructions.",
"SampleAfterValue": "1000003",
"UMask": "0x82"
},
@@ -424,6 +510,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0xfd"
},
{
@@ -952,6 +1039,17 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO), hardware prefetch RFOs (which bring data to L2), and software prefetches for exclusive ownership (PREFETCHW) that hit to a (M)odified cacheline in the L3 or snoop filter.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.RFO_TO_CORE.L3_HIT_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F80040022",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket.",
"Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
@@ -970,6 +1068,7 @@
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x80"
},
{
@@ -981,6 +1080,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -992,6 +1092,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -1002,6 +1103,7 @@
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -1013,6 +1115,7 @@
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -1024,6 +1127,7 @@
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -1034,6 +1138,7 @@
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -1045,6 +1150,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -1056,6 +1162,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of PREFETCHW instructions executed.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -1067,6 +1174,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -1078,6 +1186,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
index 53d35dddd313..32074d455691 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
@@ -8,6 +8,7 @@
"EventName": "ARITH.FPDIV_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -19,6 +20,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts all microcode Floating Point assists.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -29,6 +31,7 @@
"EventName": "ASSISTS.SSE_AVX_MIX",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -39,6 +42,7 @@
"EventName": "FP_ARITH_DISPATCHED.PORT_0",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -49,6 +53,7 @@
"EventName": "FP_ARITH_DISPATCHED.PORT_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -59,6 +64,7 @@
"EventName": "FP_ARITH_DISPATCHED.PORT_5",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
index 04ba0269c73c..44ecf38ad970 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
@@ -8,6 +8,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
"SampleAfterValue": "500009",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -19,6 +20,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -313,6 +315,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
"SampleAfterValue": "500009",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -324,6 +327,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
"SampleAfterValue": "200003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -336,6 +340,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -348,6 +353,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -359,6 +365,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -371,6 +378,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -383,6 +391,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -394,6 +403,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -406,6 +416,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x20"
},
{
@@ -419,6 +430,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x20"
},
{
@@ -430,6 +442,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS).",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x20"
},
{
@@ -441,6 +454,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -453,6 +467,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -466,6 +481,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
index 7436ced3e04e..6e761b628ca4 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
@@ -8,6 +8,7 @@
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x6"
},
{
@@ -19,6 +20,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -30,6 +32,7 @@
"EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -41,6 +44,7 @@
"EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x3"
},
{
@@ -52,6 +56,7 @@
"EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x5"
},
{
@@ -63,6 +68,7 @@
"EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x9"
},
{
@@ -194,12 +200,13 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Retired instructions with at least 1 store uop. This PEBS event is the trigger for stores sampled by the PEBS Store Facility.",
+ "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
"CollectPEBSRecord": "2",
"Data_LA": "1",
"EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
"PEBS": "2",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
"SampleAfterValue": "1000003",
"UMask": "0x2"
},
@@ -270,6 +277,17 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F04C04477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM.",
"Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
@@ -388,6 +406,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x80"
},
{
@@ -399,6 +418,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -410,6 +430,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
index 7d6f8e25bb10..95dbef8ae80a 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
@@ -7,6 +7,7 @@
"EventName": "ASSISTS.PAGE_FAULT",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -16,6 +17,7 @@
"EventName": "EXE.AMX_BUSY",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -173,6 +175,17 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L1D.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response.",
"Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
@@ -206,6 +219,17 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10808",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
"Counter": "0,1,2,3",
"EventCode": "0x2A,0x2B",
@@ -342,10 +366,51 @@
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.COUNT",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
"EventName": "RS_EMPTY.CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x7"
},
{
@@ -357,6 +422,7 @@
"EventName": "XQ.FULL_CYCLES",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
index b0920f5b25ed..df4f3d714e6e 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
@@ -22,6 +22,7 @@
"EventName": "ARITH.DIVIDER_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x9"
},
{
@@ -34,6 +35,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x9"
},
{
@@ -45,6 +47,7 @@
"EventName": "ARITH.FP_DIVIDER_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -54,8 +57,8 @@
"EventCode": "0xb0",
"EventName": "ARITH.IDIV_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "PublicDescription": "ARITH.IDIV_ACTIVE",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -67,6 +70,7 @@
"EventName": "ARITH.INT_DIVIDER_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -78,6 +82,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1f"
},
{
@@ -223,7 +228,7 @@
"UMask": "0x10"
},
{
- "BriefDescription": "number of branch instructions retired that were mispredicted and taken. Non PEBS",
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc5",
@@ -291,6 +296,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -302,6 +308,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x20"
},
{
@@ -313,6 +320,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x70"
},
{
@@ -324,6 +332,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -335,6 +344,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
"SampleAfterValue": "25003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -345,6 +355,7 @@
"EventName": "CPU_CLK_UNHALTED.PAUSE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x40"
},
{
@@ -356,6 +367,7 @@
"EventName": "CPU_CLK_UNHALTED.PAUSE_INST",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x40"
},
{
@@ -366,6 +378,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -376,9 +389,22 @@
"PEBScounters": "34",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x3"
},
{
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Core cycles when the thread is not in halt state",
"CollectPEBSRecord": "2",
"Counter": "Fixed counter 1",
@@ -386,6 +412,7 @@
"PEBScounters": "33",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -396,7 +423,8 @@
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003"
+ "SampleAfterValue": "2000003",
+ "Speculative": "1"
},
{
"BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
@@ -407,6 +435,7 @@
"EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -418,6 +447,7 @@
"EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -429,6 +459,7 @@
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -440,6 +471,7 @@
"EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0xc"
},
{
@@ -451,6 +483,7 @@
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x5"
},
{
@@ -462,6 +495,7 @@
"EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -473,6 +507,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -484,6 +519,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -495,6 +531,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -506,6 +543,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -517,6 +555,7 @@
"EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x21"
},
{
@@ -529,6 +568,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x40"
},
{
@@ -540,6 +580,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x80"
},
{
@@ -551,6 +592,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -586,12 +628,13 @@
"UMask": "0x10"
},
{
- "BriefDescription": "Number of all retired NOP instructions.",
+ "BriefDescription": "Retired NOP instructions.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc0",
"EventName": "INST_RETIRED.NOP",
"PEBScounters": "1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all retired NOP or ENDBR32/64 instructions",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
@@ -625,6 +668,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
"SampleAfterValue": "500009",
+ "Speculative": "1",
"UMask": "0x80"
},
{
@@ -634,6 +678,7 @@
"EventName": "INT_MISC.MBA_STALLS",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x20"
},
{
@@ -645,6 +690,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
"SampleAfterValue": "500009",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -657,6 +703,7 @@
"MSRValue": "0x7",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"TakenAlone": "1",
"UMask": "0x40"
},
@@ -669,6 +716,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -762,6 +810,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -773,6 +822,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x88"
},
{
@@ -784,6 +834,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x82"
},
{
@@ -795,6 +846,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -807,6 +859,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -819,6 +872,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -830,6 +884,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -843,6 +898,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of machine clears (nukes) of any type.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -854,6 +910,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -864,6 +921,7 @@
"EventName": "MISC2_RETIRED.LFENCE",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "400009",
+ "Speculative": "1",
"UMask": "0x20"
},
{
@@ -886,6 +944,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -896,6 +955,7 @@
"EventName": "RESOURCE_STALLS.SCOREBOARD",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -907,6 +967,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Number of slots in TMA method where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -916,6 +977,7 @@
"EventName": "TOPDOWN.BAD_SPEC_SLOTS",
"PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -925,6 +987,7 @@
"EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
"PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of specualtive operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -935,6 +998,7 @@
"EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -945,6 +1009,7 @@
"PEBScounters": "35",
"PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -956,6 +1021,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
"SampleAfterValue": "10000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -966,6 +1032,7 @@
"EventName": "UOPS_DECODED.DEC0_UOPS",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -977,6 +1044,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Number of uops dispatch to execution port 0.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -988,6 +1056,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Number of uops dispatch to execution port 1.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -999,6 +1068,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -1010,6 +1080,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -1021,6 +1092,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x20"
},
{
@@ -1032,6 +1104,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Number of uops dispatch to execution port 6.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x40"
},
{
@@ -1043,6 +1116,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x80"
},
{
@@ -1054,6 +1128,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of uops executed from any thread.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -1066,6 +1141,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -1078,6 +1154,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -1090,6 +1167,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -1102,6 +1180,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -1114,6 +1193,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -1126,6 +1206,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -1138,6 +1219,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -1150,6 +1232,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -1163,6 +1246,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -1175,6 +1259,7 @@
"Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -1185,6 +1270,7 @@
"EventName": "UOPS_EXECUTED.THREAD",
"PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -1196,6 +1282,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of x87 uops executed.",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -1207,6 +1294,7 @@
"PEBScounters": "0,1,2,3,4,5,6,7",
"PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
"SampleAfterValue": "2000003",
+ "Speculative": "1",
"UMask": "0x1"
},
{
@@ -1222,12 +1310,13 @@
"UMask": "0x2"
},
{
- "BriefDescription": "UOPS_RETIRED.HEAVY",
+ "BriefDescription": "Retired uops except the last uop of each instruction.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.HEAVY",
"PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
index 8f9497838bd4..e194dfc5c25b 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
@@ -12,12 +12,6 @@
"MetricName": "IPC"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
- "MetricGroup": "Pipeline;Mem",
- "MetricName": "CPI"
- },
- {
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
@@ -498,6 +492,12 @@
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "uncore_cha_0@event\\=0x1@ / #num_dies / duration_time / 1000000000",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
@@ -526,5 +526,565 @@
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "Percentage of time spent in the active CPU power state C0",
+ "MetricExpr": "100 * CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "",
+ "MetricName": "cpu_utilization_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "CPU operating frequency (in GHz)",
+ "MetricExpr": "(( CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ ) / 1000000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "cpu_operating_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Cycles per instruction retired; indicating how much time each executed instruction took; in units of cycles.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "cpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory load instructions to the total number completed instructions",
+ "MetricExpr": "MEM_INST_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "loads_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory store instructions to the total number completed instructions",
+ "MetricExpr": "MEM_INST_RETIRED.ALL_STORES / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "stores_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L1 data cache (includes data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L1D.REPLACEMENT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1d_mpi_includes_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of demand load requests hitting in L1 data cache to the total number of completed instructions ",
+ "MetricExpr": "MEM_LOAD_RETIRED.L1_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1d_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing in L1 instruction cache (includes prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.ALL_CODE_RD / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed demand load requests hitting in L2 cache to the total number of completed instructions ",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L2 cache (includes code+data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_LINES_IN.ALL / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_mpi_includes_code_plus_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed data read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_code_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "( UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA + UNC_CHA_TOR_INSERTS.IA_MISS_DRD + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF ) / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "( UNC_CHA_TOR_INSERTS.IA_MISS_CRD ) / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "llc_code_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_demand_data_read_miss_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to local memory in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_demand_data_read_miss_latency_for_local_requests",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to remote memory in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_demand_data_read_miss_latency_for_remote_requests",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to Intel(R) Optane(TM) Persistent Memory(PMEM) in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_demand_data_read_miss_to_pmem_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Average latency of a last level cache (LLC) demand data read miss (read memory access) addressed to DRAM in nano seconds",
+ "MetricExpr": "( ( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR ) / ( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR) * #num_packages ) ) ) * duration_time )",
+ "MetricGroup": "",
+ "MetricName": "llc_demand_data_read_miss_to_dram_latency",
+ "ScaleUnit": "1ns"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB.",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "itlb_2nd_level_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the Instruction Translation Lookaside Buffer (ITLB) and further levels of TLB.",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "itlb_2nd_level_large_page_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_load_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the Data Translation Lookaside Buffer (DTLB) and further levels of TLB.",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_2mb_large_page_load_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_store_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Memory read that miss the last level cache (LLC) addressed to local DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * ( UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL ) / ( UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_local_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Memory reads that miss the last level cache (LLC) addressed to remote DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * ( UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE ) / ( UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE + UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_remote_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uncore operating frequency in GHz",
+ "MetricExpr": "( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_CLOCKTICKS) * #num_packages ) / 1000000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "uncore_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_UPI_TxL_FLITS.ALL_DATA * (64 / 9.0) / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "upi_data_transmit_bw_only_data",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory read bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.RD * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory write bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.WR * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory bandwidth (MB/sec)",
+ "MetricExpr": "(( UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory read bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_PMM_RPQ_INSERTS * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "pmem_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory write bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_PMM_WPQ_INSERTS * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "pmem_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Intel(R) Optane(TM) Persistent Memory(PMEM) memory bandwidth (MB/sec)",
+ "MetricExpr": "(( UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "pmem_memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "( UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "(( UNC_CHA_TOR_INSERTS.IO_ITOM + UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Uops delivered from decoded instruction cache (decoded stream buffer or DSB) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS ) )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_decoded_icache_dsb",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from legacy decode pipeline (Micro-instruction Translation Engine or MITE) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MITE_UOPS / ( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS ) )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_legacy_decode_pipeline_mite",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from microcode sequencer (MS) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MS_UOPS / ( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS + LSD.UOPS ) )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_microcode_sequencer_ms",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.READS_LOCAL * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_local_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.WRITES_LOCAL * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_local_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to remote memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.READS_REMOTE * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_remote_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to remote memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.WRITES_REMOTE * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_remote_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Machine_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "MetricExpr": "100 * ( topdown\\-fe\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) )",
+ "MetricGroup": "TmaL1;PGO",
+ "MetricName": "tma_frontend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.",
+ "MetricExpr": "100 * ( ( topdown\\-fetch\\-lat / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) ) )",
+ "MetricGroup": "Frontend;TmaL2;m_tma_frontend_bound_percent",
+ "MetricName": "tma_fetch_latency_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "100 * ( ICACHE_DATA.STALLS / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_icache_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.",
+ "MetricExpr": "100 * ( ICACHE_TAG.STALLS / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_itlb_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.",
+ "MetricExpr": "100 * ( INT_MISC.CLEAR_RESTEER_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) + ( INT_MISC.UNKNOWN_BRANCH_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) ) )",
+ "MetricGroup": "FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_branch_resteers_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.",
+ "MetricExpr": "100 * ( DSB2MITE_SWITCHES.PENALTY_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "DSBmiss;FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_dsb_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs.",
+ "MetricExpr": "100 * ( DECODE.LCP / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_lcp_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.",
+ "MetricExpr": "100 * ( ( 3 ) * IDQ.MS_SWITCHES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;MicroSeq;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_ms_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.",
+ "MetricExpr": "100 * ( max( 0 , ( topdown\\-fe\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) ) - ( ( topdown\\-fetch\\-lat / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) ) ) ) )",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;m_tma_frontend_bound_percent",
+ "MetricName": "tma_fetch_bandwidth_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "MetricExpr": "100 * ( ( IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK ) / ( CPU_CLK_UNHALTED.DISTRIBUTED ) / 2 )",
+ "MetricGroup": "DSBmiss;FetchBW;TmaL3;m_tma_fetch_bandwidth_percent",
+ "MetricName": "tma_mite_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "MetricExpr": "100 * ( ( IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK ) / ( CPU_CLK_UNHALTED.DISTRIBUTED ) / 2 )",
+ "MetricGroup": "DSB;FetchBW;TmaL3;m_tma_fetch_bandwidth_percent",
+ "MetricName": "tma_dsb_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "MetricExpr": "100 * ( max( 1 - ( ( topdown\\-fe\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) ) + ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) , 0 ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_bad_speculation_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.",
+ "MetricExpr": "( 100 * ( topdown\\-br\\-mispredict / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) + ( 0 * slots )",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;m_tma_bad_speculation_percent",
+ "MetricName": "tma_branch_mispredicts_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.",
+ "MetricExpr": "100 * ( max( 0 , ( max( 1 - ( ( topdown\\-fe\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) - INT_MISC.UOP_DROPPING / ( slots ) ) + ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) , 0 ) ) - ( topdown\\-br\\-mispredict / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) )",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;m_tma_bad_speculation_percent",
+ "MetricName": "tma_machine_clears_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "MetricExpr": "( 100 * ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) + ( 0 * slots )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_backend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "MetricExpr": "( 100 * ( topdown\\-mem\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) + ( 0 * slots )",
+ "MetricGroup": "Backend;TmaL2;m_tma_backend_bound_percent",
+ "MetricName": "tma_memory_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.",
+ "MetricExpr": "100 * ( max( ( EXE_ACTIVITY.BOUND_ON_LOADS - MEMORY_ACTIVITY.STALLS_L1D_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) , 0 ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l1_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l2_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l3_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( min( ( ( ( MEMORY_ACTIVITY.STALLS_L3_MISS / ( CPU_CLK_UNHALTED.THREAD ) ) - ( min( ( ( ( ( 1 - ( ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) / ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) + ( 25 * ( ( MEM_LOAD_RETIRED.LOCAL_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) + 33 * ( ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) ) ) ) ) * ( MEMORY_ACTIVITY.STALLS_L3_MISS / ( CPU_CLK_UNHALTED.THREAD ) ) ) if ( ( 1000000 ) * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM ) > MEM_LOAD_RETIRED.L1_MISS ) else 0 ) ) , ( 1 ) ) ) ) ) , ( 1 ) ) )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_dram_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory by loads, PMM stands for Persistent Memory Module. ",
+ "MetricExpr": "100 * ( min( ( ( ( ( 1 - ( ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) / ( ( 19 * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + 10 * ( ( MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) + ( 25 * ( ( MEM_LOAD_RETIRED.LOCAL_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) + 33 * ( ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) ) ) ) ) ) ) * ( MEMORY_ACTIVITY.STALLS_L3_MISS / ( CPU_CLK_UNHALTED.THREAD ) ) ) if ( ( 1000000 ) * ( MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM ) > MEM_LOAD_RETIRED.L1_MISS ) else 0 ) ) , ( 1 ) ) )",
+ "MetricGroup": "MemoryBound;Server;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_pmm_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.",
+ "MetricExpr": "100 * ( EXE_ACTIVITY.BOUND_ON_STORES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_store_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "MetricExpr": "( 100 * ( max( 0 , ( topdown\\-be\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-mem\\-bound / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) ) + ( 0 * slots )",
+ "MetricGroup": "Backend;TmaL2;Compute;m_tma_backend_bound_percent",
+ "MetricName": "tma_core_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.",
+ "MetricExpr": "100 * ( ARITH.DIVIDER_ACTIVE / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "TmaL3;m_tma_core_bound_percent",
+ "MetricName": "tma_divider_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "MetricExpr": "( 100 * ( ( EXE_ACTIVITY.EXE_BOUND_0_PORTS + ( EXE_ACTIVITY.1_PORTS_UTIL + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@ ) ) / ( CPU_CLK_UNHALTED.THREAD ) if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS ) ) else ( EXE_ACTIVITY.1_PORTS_UTIL + ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@ ) / ( CPU_CLK_UNHALTED.THREAD ) ) ) + ( 0 * slots )",
+ "MetricGroup": "PortsUtil;TmaL3;m_tma_core_bound_percent",
+ "MetricName": "tma_ports_utilization_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. ",
+ "MetricExpr": "( 100 * ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) + ( 0 * slots )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_retiring_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved.",
+ "MetricExpr": "( 100 * ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) ) + ( 0 * slots )",
+ "MetricGroup": "Retire;TmaL2;m_tma_retiring_percent",
+ "MetricName": "tma_light_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "MetricExpr": "100 * ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD ) + ( ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( min( ( ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.VECTOR ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) , ( 1 ) ) ) + ( cpu@AMX_OPS_RETIRED.BF16\\,cmask\\=0x1@ / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) )",
+ "MetricGroup": "HPC;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_fp_arith_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
+ "MetricExpr": "100 * ( ( ( INT_VEC_RETIRED.ADD_128 + INT_VEC_RETIRED.VNNI_128 ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( ( INT_VEC_RETIRED.ADD_256 + INT_VEC_RETIRED.MUL_256 + INT_VEC_RETIRED.VNNI_256 ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( INT_VEC_RETIRED.SHUFFLES / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_int_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "100 * ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) * MEM_UOP_RETIRED.ANY / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_memory_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.",
+ "MetricExpr": "100 * ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) * INST_RETIRED.MACRO_FUSED / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_fused_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
+ "MetricExpr": "100 * ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) * ( BR_INST_RETIRED.ALL_BRANCHES - INST_RETIRED.MACRO_FUSED ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_non_fused_branches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body.",
+ "MetricExpr": "100 * ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) * INST_RETIRED.NOP / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_nop_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "MetricExpr": "100 * ( max( 0 , ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) - ( ( ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD ) + ( ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( min( ( ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.VECTOR ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) , ( 1 ) ) ) + ( cpu@AMX_OPS_RETIRED.BF16\\,cmask\\=0x1@ / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) ) + ( ( ( INT_VEC_RETIRED.ADD_128 + INT_VEC_RETIRED.VNNI_128 ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( ( INT_VEC_RETIRED.ADD_256 + INT_VEC_RETIRED.MUL_256 + INT_VEC_RETIRED.VNNI_256 ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( INT_VEC_RETIRED.SHUFFLES / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) ) + ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) * MEM_UOP_RETIRED.ANY / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) * INST_RETIRED.MACRO_FUSED / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) * ( BR_INST_RETIRED.ALL_BRANCHES - INST_RETIRED.MACRO_FUSED ) / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) + ( ( max( 0 , ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) ) * INST_RETIRED.NOP / ( ( topdown\\-retiring / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) * ( slots ) ) ) ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_other_light_ops_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or microcoded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "MetricExpr": "( 100 * ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) ) + ( 0 * slots )",
+ "MetricGroup": "Retire;TmaL2;m_tma_retiring_percent",
+ "MetricName": "tma_heavy_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions.",
+ "MetricExpr": "100 * ( ( topdown\\-heavy\\-ops / ( topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound ) ) - ( UOPS_RETIRED.MS / ( slots ) ) )",
+ "MetricGroup": "TmaL3;m_tma_heavy_operations_percent",
+ "MetricName": "tma_few_uops_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided.",
+ "MetricExpr": "100 * ( UOPS_RETIRED.MS / ( slots ) )",
+ "MetricGroup": "MicroSeq;TmaL3;m_tma_heavy_operations_percent",
+ "MetricName": "tma_microcode_sequencer_percent",
+ "ScaleUnit": "1%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-other.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-other.json
index 9b8664c50213..495ceee21071 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-other.json
@@ -21,15 +21,6 @@
"Unit": "UPI LL"
},
{
- "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
- "Counter": "FIXED",
- "CounterType": "FIXED",
- "EventCode": "0xff",
- "EventName": "UNC_U_CLOCKTICKS",
- "PerPkg": "1",
- "Unit": "UBOX"
- },
- {
"BriefDescription": "IRP Clockticks",
"Counter": "0,1",
"CounterType": "PGMABLE",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json
index cba69368308e..f591f4fedc0b 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json
@@ -8,6 +8,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x20"
},
{
@@ -20,6 +21,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -31,6 +33,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0xe"
},
{
@@ -42,6 +45,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -53,6 +57,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -64,6 +69,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -75,6 +81,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -86,6 +93,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x20"
},
{
@@ -98,6 +106,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -109,6 +118,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0xe"
},
{
@@ -120,6 +130,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x8"
},
{
@@ -131,6 +142,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -142,6 +154,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -153,6 +166,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -164,6 +178,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x20"
},
{
@@ -176,6 +191,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10"
},
{
@@ -187,6 +203,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0xe"
},
{
@@ -198,6 +215,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x4"
},
{
@@ -209,6 +227,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x2"
},
{
@@ -220,6 +239,7 @@
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
"SampleAfterValue": "100003",
+ "Speculative": "1",
"UMask": "0x10"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/cache.json b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
index e16e1d910e4a..7959504dff29 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
@@ -807,4 +807,4 @@
"SampleAfterValue": "200003",
"UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json b/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json
index 1d75b35694ac..aa4faf110512 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json
@@ -8,4 +8,4 @@
"SampleAfterValue": "200003",
"UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/frontend.json b/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
index a4c98e43f677..43e5e48f7212 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
@@ -71,4 +71,4 @@
"SampleAfterValue": "200003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/memory.json b/tools/perf/pmu-events/arch/x86/silvermont/memory.json
index 5e21fc3fd078..0f5fba43da4c 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/memory.json
@@ -8,4 +8,4 @@
"SampleAfterValue": "200003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/other.json b/tools/perf/pmu-events/arch/x86/silvermont/other.json
index 16d16a1ce6de..4db59d84c144 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/other.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/other.json
@@ -17,4 +17,4 @@
"SampleAfterValue": "200003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
index 03a4c7f26698..e42a37eabc17 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
@@ -313,4 +313,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
index f4b8a1ef48f6..b50cee3a5e4c 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
@@ -66,4 +66,4 @@
"SampleAfterValue": "100003",
"UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
index 73cfb2a39722..d6cee5ae4402 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
@@ -70,4 +70,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1e"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
index ecce4273ae52..8633ee406813 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
@@ -527,4 +527,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/other.json b/tools/perf/pmu-events/arch/x86/skylake/other.json
index 4f4839024915..8f4bc8892c47 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/other.json
@@ -17,4 +17,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index defbca9a6038..73fa72d3dcb1 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -95,13 +95,13 @@
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) * ( ( (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) / ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (min( 9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE , max( CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS , 0 ) ) / CPU_CLK_UNHALTED.THREAD) / (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) ) + ( (EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (( 9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE ) / CPU_CLK_UNHALTED.THREAD) / #(EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) ) ) ",
- "MetricGroup": "Mem;MemoryTLB",
+ "MetricGroup": "Mem;MemoryTLB;Offcore",
"MetricName": "Memory_Data_TLBs"
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * ( ( (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) / ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (min( 9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE , max( CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS , 0 ) ) / CPU_CLK_UNHALTED.THREAD) / (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) ) + ( (EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (( 9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / #(EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) ) ) ",
- "MetricGroup": "Mem;MemoryTLB;_SMT",
+ "MetricGroup": "Mem;MemoryTLB;Offcore_SMT",
"MetricName": "Memory_Data_TLBs_SMT"
},
{
@@ -214,42 +214,36 @@
"MetricName": "FLOPc_SMT"
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.THREAD )",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "FP_Arith_Utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) ) / ( 2 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) )",
"MetricGroup": "Cor;Flops;HPC_SMT",
"MetricName": "FP_Arith_Utilization_SMT",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common). SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
- },
- {
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "( 1 - ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)))) / ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)))) < ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1 ) if 0 > 0.5 else 0",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "Core_Bound_Likely"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "( 1 - ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) / ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) < ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1 ) if (1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_UNHALTED.REF_XCLK_ANY / 2 )) > 0.5 else 0",
+ "MetricGroup": "Cor;SMT_SMT",
+ "MetricName": "Core_Bound_Likely_SMT"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
@@ -335,12 +329,30 @@
"PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "IpSWPF"
+ },
+ {
"BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary;TmaL1",
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
"MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
@@ -353,24 +365,48 @@
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset/see of/the Instruction_Fetch_BW Bottleneck.",
- "MetricExpr": "(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / CPU_CLK_UNHALTED.THREAD / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)))",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / DSB2MITE_SWITCHES.COUNT",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "DSB_Switch_Cost"
+ },
+ {
+ "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
+ "MetricExpr": "100 * ( (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / CPU_CLK_UNHALTED.THREAD / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))) )",
"MetricGroup": "DSBmiss;Fed",
- "MetricName": "DSB_Misses_Cost"
+ "MetricName": "DSB_Misses"
},
{
- "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset/see of/the Instruction_Fetch_BW Bottleneck.",
- "MetricExpr": "(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))",
+ "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
+ "MetricExpr": "100 * ( (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "DSBmiss;Fed_SMT",
- "MetricName": "DSB_Misses_Cost_SMT"
+ "MetricName": "DSB_Misses_SMT"
},
{
- "BriefDescription": "Number of Instructions per non-speculative DSB miss",
+ "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
"MetricGroup": "DSBmiss;Fed",
"MetricName": "IpDSB_Miss_Ret"
},
{
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
+ },
+ {
"BriefDescription": "Fraction of branches that are non-taken conditionals",
"MetricExpr": "BR_INST_RETIRED.NOT_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;Branches;CodeGen;PGO",
@@ -395,11 +431,10 @@
"MetricName": "Jump"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -408,30 +443,6 @@
"MetricName": "MLP"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "L3_Cache_Access_BW"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
@@ -450,13 +461,13 @@
"MetricName": "L2MPKI"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses;Offcore",
"MetricName": "L2MPKI_All"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all demand loads (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "L2MPKI_Load"
@@ -480,7 +491,7 @@
"MetricName": "L3MPKI"
},
{
- "BriefDescription": "Fill Buffer (FB) true hits per kilo instructions for retired demand loads",
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "FB_HPKI"
@@ -499,6 +510,54 @@
"MetricName": "Page_Walks_Utilization_SMT"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -514,7 +573,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json b/tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json
new file mode 100644
index 000000000000..edb1014bee0f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json
@@ -0,0 +1,142 @@
+[
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
+ "UMask": "0x86",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in I-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in I-state.",
+ "UMask": "0x88",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in M-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in M-state.",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
+ "UMask": "0x8f",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
+ "UMask": "0x16",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in I-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in I-state.",
+ "UMask": "0x18",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
+ "UMask": "0x1f",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
+ "UMask": "0x26",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in M-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in M-state.",
+ "UMask": "0x21",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state",
+ "Counter": "0,1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
+ "UMask": "0x2f",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
+ "PerPkg": "1",
+ "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "UMask": "0x48",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
+ "PerPkg": "1",
+ "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "UMask": "0x44",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "UMask": "0x81",
+ "Unit": "CBO"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "Counter": "0,1",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
+ "PerPkg": "1",
+ "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "UMask": "0x41",
+ "Unit": "CBO"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/uncore-other.json b/tools/perf/pmu-events/arch/x86/skylake/uncore-other.json
new file mode 100644
index 000000000000..bf5d4acdd6b8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/uncore-other.json
@@ -0,0 +1,79 @@
+[
+ {
+ "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
+ "Counter": "0,1",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
+ "UMask": "0x01",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all Core entries outstanding for the memory controller. The outstanding interval starts after LLC miss till return of first data chunk. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of all Core entries outstanding for the memory controller. The outstanding interval starts after LLC miss till return of first data chunk. Accounts for Coherent and non-coherent traffic.",
+ "UMask": "0x01",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "CounterMask": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "UMask": "0x01",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Core Data Read entries outstanding for the memory controller. The outstanding interval starts after LLC miss till return of first data chunk.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Core Data Read entries outstanding for the memory controller. The outstanding interval starts after LLC miss till return of first data chunk.",
+ "UMask": "0x02",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Core coherent Data Read requests sent to memory controller whose data is returned directly to requesting agent.",
+ "Counter": "0,1",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Core coherent Data Read requests sent to memory controller whose data is returned directly to requesting agent.",
+ "UMask": "0x02",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Core coherent Data Read requests sent to memory controller whose data is returned directly to requesting agent.",
+ "Counter": "0,1",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.DRD_DIRECT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Core coherent Data Read requests sent to memory controller whose data is returned directly to requesting agent.",
+ "UMask": "0x02",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
+ "Counter": "0,1",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
+ "UMask": "0x20",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/uncore.json b/tools/perf/pmu-events/arch/x86/skylake/uncore.json
deleted file mode 100644
index dbc193252fb3..000000000000
--- a/tools/perf/pmu-events/arch/x86/skylake/uncore.json
+++ /dev/null
@@ -1,254 +0,0 @@
-[
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x41",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x81",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
- "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "PublicDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x44",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x48",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x21",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
- "BriefDescription": "L3 Lookup write request that access cache and found line in M-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x81",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
- "BriefDescription": "L3 Lookup any request that access cache and found line in M-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x18",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
- "BriefDescription": "L3 Lookup read request that access cache and found line in I-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x88",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
- "BriefDescription": "L3 Lookup any request that access cache and found line in I-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x1f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
- "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x2f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
- "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x8f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
- "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x86",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
- "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x16",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
- "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x26",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
- "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
- "BriefDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from its allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
- "PublicDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from its allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
- "Counter": "0",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
- "BriefDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
- "PublicDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x02",
- "EventName": "UNC_ARB_TRK_REQUESTS.DRD_DIRECT",
- "BriefDescription": "Number of Core coherent Data Read entries allocated in DirectData mode",
- "PublicDescription": "Number of Core coherent Data Read entries allocated in DirectData mode.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x20",
- "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
- "BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
- "PublicDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x84",
- "UMask": "0x01",
- "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
- "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
- "PublicDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
- "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.;",
- "PublicDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "Counter": "0",
- "CounterMask": "1",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "NCU",
- "EventCode": "0x0",
- "UMask": "0x01",
- "EventName": "UNC_CLOCK.SOCKET",
- "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
- "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "Counter": "FIXED",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
index 792ca39f013a..dd334b416c57 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
@@ -281,4 +281,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
index 9e873ab22450..09810e3d688c 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
@@ -90,4 +90,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1e"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
index ecce4273ae52..8633ee406813 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
@@ -527,4 +527,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/other.json b/tools/perf/pmu-events/arch/x86/skylakex/other.json
index 779654e62d97..403805e7e581 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/other.json
@@ -40,6 +40,69 @@
"UMask": "0x40"
},
{
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Number of hardware interrupts received by the processor.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
@@ -70,6 +133,7 @@
"UMask": "0x2"
},
{
+ "BriefDescription": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x09",
@@ -77,4 +141,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index 79fda10ec4bb..f085b9145952 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -166,6 +166,17 @@
"UMask": "0x20"
},
{
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index b016f7d1ff3d..6a6764e1504b 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -95,13 +95,13 @@
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) * ( ( (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) / ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (min( 9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE , max( CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS , 0 ) ) / CPU_CLK_UNHALTED.THREAD) / (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) ) + ( (EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) ) * ( (( 9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE ) / CPU_CLK_UNHALTED.THREAD) / #(EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) ) ) ",
- "MetricGroup": "Mem;MemoryTLB",
+ "MetricGroup": "Mem;MemoryTLB;Offcore",
"MetricName": "Memory_Data_TLBs"
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
"MetricExpr": "100 * ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * ( ( (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) / ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (min( 9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE , max( CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS , 0 ) ) / CPU_CLK_UNHALTED.THREAD) / (max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / CPU_CLK_UNHALTED.THREAD , 0 )) ) + ( (EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) / #((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * ( (( 9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / #(EXE_ACTIVITY.BOUND_ON_STORES / CPU_CLK_UNHALTED.THREAD) ) ) ",
- "MetricGroup": "Mem;MemoryTLB;_SMT",
+ "MetricGroup": "Mem;MemoryTLB;Offcore_SMT",
"MetricName": "Memory_Data_TLBs_SMT"
},
{
@@ -159,12 +159,6 @@
"MetricName": "UpTB"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
- "MetricGroup": "Pipeline;Mem",
- "MetricName": "CPI"
- },
- {
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Pipeline",
@@ -214,42 +208,36 @@
"MetricName": "FLOPc_SMT"
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width)",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.THREAD )",
"MetricGroup": "Cor;Flops;HPC",
"MetricName": "FP_Arith_Utilization",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
- "BriefDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). SMT version; use when SMT is enabled and measuring per logical CPU.",
"MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) ) / ( 2 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) )",
"MetricGroup": "Cor;Flops;HPC_SMT",
"MetricName": "FP_Arith_Utilization_SMT",
- "PublicDescription": "Actual per-core usage of the Floating Point execution units (regardless of the vector width). Values > 1 are possible due to Fused-Multiply Add (FMA) counting. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common). SMT version; use when SMT is enabled and measuring per logical CPU."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
"MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "ILP"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
- },
- {
- "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "( 1 - ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)))) / ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - ( UOPS_ISSUED.ANY + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD)))) < ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * CPU_CLK_UNHALTED.THREAD)) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1 ) if 0 > 0.5 else 0",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "Core_Bound_Likely"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Bad;BadSpec;BrMispredicts",
- "MetricName": "IpMispredict"
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "( 1 - ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) / ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) if ((1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ((( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES)) * (1 - (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - ( UOPS_ISSUED.ANY + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) < ((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL)) / CPU_CLK_UNHALTED.THREAD if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else (EXE_ACTIVITY.1_PORTS_UTIL + (UOPS_RETIRED.RETIRE_SLOTS / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * EXE_ACTIVITY.2_PORTS_UTIL) / CPU_CLK_UNHALTED.THREAD) else 1 ) if (1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_UNHALTED.REF_XCLK_ANY / 2 )) > 0.5 else 0",
+ "MetricGroup": "Cor;SMT_SMT",
+ "MetricName": "Core_Bound_Likely_SMT"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
@@ -342,12 +330,30 @@
"PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "IpSWPF"
+ },
+ {
"BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary;TmaL1",
"MetricName": "Instructions"
},
{
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Retire"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
"MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
@@ -360,24 +366,48 @@
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset/see of/the Instruction_Fetch_BW Bottleneck.",
- "MetricExpr": "(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / CPU_CLK_UNHALTED.THREAD / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)))",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / DSB2MITE_SWITCHES.COUNT",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "DSB_Switch_Cost"
+ },
+ {
+ "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
+ "MetricExpr": "100 * ( (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / CPU_CLK_UNHALTED.THREAD / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD))) )",
"MetricGroup": "DSBmiss;Fed",
- "MetricName": "DSB_Misses_Cost"
+ "MetricName": "DSB_Misses"
},
{
- "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset/see of/the Instruction_Fetch_BW Bottleneck.",
- "MetricExpr": "(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))",
+ "BriefDescription": "Total penalty related to DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck.",
+ "MetricExpr": "100 * ( (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * (DSB2MITE_SWITCHES.PENALTY_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) + ((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ) / 2) / #((IDQ_UOPS_NOT_DELIVERED.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) - (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
"MetricGroup": "DSBmiss;Fed_SMT",
- "MetricName": "DSB_Misses_Cost_SMT"
+ "MetricName": "DSB_Misses_SMT"
},
{
- "BriefDescription": "Number of Instructions per non-speculative DSB miss",
+ "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
"MetricGroup": "DSBmiss;Fed",
"MetricName": "IpDSB_Miss_Ret"
},
{
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * CPU_CLK_UNHALTED.THREAD))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * CPU_CLK_UNHALTED.THREAD)) ) * (4 * CPU_CLK_UNHALTED.THREAD) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": " ( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) ) / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) * ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * INT_MISC.CLEAR_RESTEER_CYCLES / CPU_CLK_UNHALTED.THREAD) / #(4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) ) * (4 * ( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
+ },
+ {
"BriefDescription": "Fraction of branches that are non-taken conditionals",
"MetricExpr": "BR_INST_RETIRED.NOT_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;Branches;CodeGen;PGO",
@@ -402,11 +432,10 @@
"MetricName": "Jump"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles)",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
- "MetricName": "Load_Miss_Real_Latency",
- "PublicDescription": "Actual Average Latency for L1 data-cache miss demand load instructions (in core cycles). Latency may be overestimated for multi-load instructions - e.g. repeat strings."
+ "MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -415,30 +444,6 @@
"MetricName": "MLP"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L1D_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L2_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW",
- "MetricName": "L3_Cache_Fill_BW"
- },
- {
- "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
- "MetricGroup": "Mem;MemoryBW;Offcore",
- "MetricName": "L3_Cache_Access_BW"
- },
- {
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
@@ -457,13 +462,13 @@
"MetricName": "L2MPKI"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses;Offcore",
"MetricName": "L2MPKI_All"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all demand loads (including speculative)",
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
"MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "L2MPKI_Load"
@@ -487,7 +492,7 @@
"MetricName": "L3MPKI"
},
{
- "BriefDescription": "Fill Buffer (FB) true hits per kilo instructions for retired demand loads",
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
"MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
"MetricGroup": "Mem;CacheMisses",
"MetricName": "FB_HPKI"
@@ -506,6 +511,30 @@
"MetricName": "Page_Walks_Utilization_SMT"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
"BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
"MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
"MetricGroup": "L2Evicts;Mem;Server",
@@ -518,6 +547,30 @@
"MetricName": "L2_Evictions_NonSilent_PKI"
},
{
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
"BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
"MetricGroup": "HPC;Summary",
@@ -533,7 +586,8 @@
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
"MetricGroup": "Cor;Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
@@ -644,6 +698,12 @@
"MetricName": "Socket_CLKS"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "cha_0@event\\=0x0@ / #num_dies / duration_time / 1000000000",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
@@ -690,5 +750,488 @@
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "Percentage of time spent in the active CPU power state C0",
+ "MetricExpr": "100 * CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "",
+ "MetricName": "cpu_utilization_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "CPU operating frequency (in GHz)",
+ "MetricExpr": "(( CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC * #SYSTEM_TSC_FREQ ) / 1000000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "cpu_operating_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Cycles per instruction retired; indicating how much time each executed instruction took; in units of cycles.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "cpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory load instructions to the total number completed instructions",
+ "MetricExpr": "MEM_INST_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "loads_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "The ratio of number of completed memory store instructions to the total number completed instructions",
+ "MetricExpr": "MEM_INST_RETIRED.ALL_STORES / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "stores_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L1 data cache (includes data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L1D.REPLACEMENT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1d_mpi_includes_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of demand load requests hitting in L1 data cache to the total number of completed instructions ",
+ "MetricExpr": "MEM_LOAD_RETIRED.L1_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1d_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing in L1 instruction cache (includes prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.ALL_CODE_RD / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed demand load requests hitting in L2 cache to the total number of completed instructions ",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_hits_per_instr",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of requests missing L2 cache (includes code+data+rfo w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "L2_LINES_IN.ALL / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_mpi_includes_code_plus_data_plus_rfo_with_prefetches",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed data read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_data_read_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read request missing L2 cache to the total number of completed instructions",
+ "MetricExpr": "L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "l2_demand_code_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of data read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x12D4043300000000@ / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "llc_data_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of code read requests missing last level core cache (includes demand w/ prefetches) to the total number of completed instructions",
+ "MetricExpr": "cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x12CC023300000000@ / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "llc_code_read_mpi_demand_plus_prefetch",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB.",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "itlb_2nd_level_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte and 4 megabyte page sizes) caused by a code fetch to the total number of completed instructions. This implies it missed in the Instruction Translation Lookaside Buffer (ITLB) and further levels of TLB.",
+ "MetricExpr": "ITLB_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "itlb_2nd_level_large_page_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_load_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for 2 megabyte page sizes) caused by demand data loads to the total number of completed instructions. This implies it missed in the Data Translation Lookaside Buffer (DTLB) and further levels of TLB.",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_2mb_large_page_load_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Ratio of number of completed page walks (for all page sizes) caused by demand data stores to the total number of completed instructions. This implies it missed in the DTLB and further levels of TLB.",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "",
+ "MetricName": "dtlb_2nd_level_store_mpi",
+ "ScaleUnit": "1per_instr"
+ },
+ {
+ "BriefDescription": "Memory read that miss the last level cache (LLC) addressed to local DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043200000000@ / ( cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043200000000@ + cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043100000000@ )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_local_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Memory reads that miss the last level cache (LLC) addressed to remote DRAM as a percentage of total memory read accesses, does not include LLC prefetches.",
+ "MetricExpr": "100 * cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043100000000@ / ( cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043200000000@ + cha@unc_cha_tor_inserts.ia_miss\\,config1\\=0x4043100000000@ )",
+ "MetricGroup": "",
+ "MetricName": "numa_percent_reads_addressed_to_remote_dram",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uncore operating frequency in GHz",
+ "MetricExpr": "( UNC_CHA_CLOCKTICKS / ( source_count(UNC_CHA_CLOCKTICKS) * #num_packages ) / 1000000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "uncore_frequency",
+ "ScaleUnit": "1GHz"
+ },
+ {
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_UPI_TxL_FLITS.ALL_DATA * (64 / 9.0) / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "upi_data_transmit_bw_only_data",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory read bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.RD * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory write bandwidth (MB/sec)",
+ "MetricExpr": "( UNC_M_CAS_COUNT.WR * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "DDR memory bandwidth (MB/sec)",
+ "MetricExpr": "(( UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR ) * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "memory_bandwidth_total",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO reads that are initiated by end device controllers that are requesting memory from the CPU.",
+ "MetricExpr": "(( UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3 ) * 4 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth of IO writes that are initiated by end device controllers that are writing memory to the CPU.",
+ "MetricExpr": "(( UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART0 + UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART1 + UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART2 + UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART3 ) * 4 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "io_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Uops delivered from decoded instruction cache (decoded stream buffer or DSB) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.DSB_UOPS / UOPS_ISSUED.ANY )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_decoded_icache_dsb",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from legacy decode pipeline (Micro-instruction Translation Engine or MITE) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MITE_UOPS / UOPS_ISSUED.ANY )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_legacy_decode_pipeline_mite",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Uops delivered from microcode sequencer (MS) as a percent of total uops delivered to Instruction Decode Queue",
+ "MetricExpr": "100 * ( IDQ.MS_UOPS / UOPS_ISSUED.ANY )",
+ "MetricGroup": "",
+ "MetricName": "percent_uops_delivered_from_microcode_sequencer_ms",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.READS_LOCAL * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_local_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of write requests that miss the last level cache (LLC) and go to local memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.WRITES_LOCAL * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_local_memory_bandwidth_write",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "Bandwidth (MB/sec) of read requests that miss the last level cache (LLC) and go to remote memory.",
+ "MetricExpr": "( UNC_CHA_REQUESTS.READS_REMOTE * 64 / 1000000) / duration_time",
+ "MetricGroup": "",
+ "MetricName": "llc_miss_remote_memory_bandwidth_read",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Machine_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "MetricExpr": "100 * ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1;PGO",
+ "MetricName": "tma_frontend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period.",
+ "MetricExpr": "100 * ( ( 4 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "Frontend;TmaL2;m_tma_frontend_bound_percent",
+ "MetricName": "tma_fetch_latency_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "100 * ( ( ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=0x1\\,edge\\=0x1@ ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_icache_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses.",
+ "MetricExpr": "100 * ( ICACHE_64B.IFTAG_STALL / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_itlb_misses_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings.",
+ "MetricExpr": "100 * ( INT_MISC.CLEAR_RESTEER_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) + ( ( 9 ) * BACLEARS.ANY / ( CPU_CLK_UNHALTED.THREAD ) ) )",
+ "MetricGroup": "FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_branch_resteers_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty.",
+ "MetricExpr": "100 * ( DSB2MITE_SWITCHES.PENALTY_CYCLES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "DSBmiss;FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_dsb_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs.",
+ "MetricExpr": "100 * ( ILD_STALL.LCP / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_lcp_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals.",
+ "MetricExpr": "100 * ( ( 2 ) * IDQ.MS_SWITCHES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "FetchLat;MicroSeq;TmaL3;m_tma_fetch_latency_percent",
+ "MetricName": "tma_ms_switches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend.",
+ "MetricExpr": "100 * ( ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( 4 ) * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;m_tma_frontend_bound_percent",
+ "MetricName": "tma_fetch_bandwidth_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "MetricExpr": "100 * ( ( IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) / 2 )",
+ "MetricGroup": "DSBmiss;FetchBW;TmaL3;m_tma_fetch_bandwidth_percent",
+ "MetricName": "tma_mite_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "MetricExpr": "100 * ( ( IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS ) / ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) / 2 )",
+ "MetricGroup": "DSB;FetchBW;TmaL3;m_tma_fetch_bandwidth_percent",
+ "MetricName": "tma_dsb_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "MetricExpr": "100 * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_bad_speculation_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path.",
+ "MetricExpr": "100 * ( ( BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT ) ) * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;m_tma_bad_speculation_percent",
+ "MetricName": "tma_branch_mispredicts_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes.",
+ "MetricExpr": "100 * ( ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT ) ) * ( ( UOPS_ISSUED.ANY - ( UOPS_RETIRED.RETIRE_SLOTS ) + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) )",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;m_tma_bad_speculation_percent",
+ "MetricName": "tma_machine_clears_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "MetricExpr": "100 * ( 1 - ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( UOPS_ISSUED.ANY + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_backend_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "MetricExpr": "100 * ( ( ( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / ( CYCLE_ACTIVITY.STALLS_TOTAL + ( EXE_ACTIVITY.1_PORTS_UTIL + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * EXE_ACTIVITY.2_PORTS_UTIL ) + EXE_ACTIVITY.BOUND_ON_STORES ) ) * ( 1 - ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( UOPS_ISSUED.ANY + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "Backend;TmaL2;m_tma_backend_bound_percent",
+ "MetricName": "tma_memory_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache.",
+ "MetricExpr": "100 * ( max( ( CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) , 0 ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l1_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) / ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ ) ) * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l2_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( ( CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_l3_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance.",
+ "MetricExpr": "100 * ( min( ( ( CYCLE_ACTIVITY.STALLS_L3_MISS / ( CPU_CLK_UNHALTED.THREAD ) + ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) - ( ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) / ( ( MEM_LOAD_RETIRED.L2_HIT * ( 1 + ( MEM_LOAD_RETIRED.FB_HIT / ( MEM_LOAD_RETIRED.L1_MISS ) ) ) ) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=0x1@ ) ) * ( ( CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS ) / ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) , ( 1 ) ) )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_dram_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck.",
+ "MetricExpr": "100 * ( EXE_ACTIVITY.BOUND_ON_STORES / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "MemoryBound;TmaL3mem;TmaL3;m_tma_memory_bound_percent",
+ "MetricName": "tma_store_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "MetricExpr": "100 * ( ( 1 - ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( UOPS_ISSUED.ANY + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES ) / ( CYCLE_ACTIVITY.STALLS_TOTAL + ( EXE_ACTIVITY.1_PORTS_UTIL + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * EXE_ACTIVITY.2_PORTS_UTIL ) + EXE_ACTIVITY.BOUND_ON_STORES ) ) * ( 1 - ( IDQ_UOPS_NOT_DELIVERED.CORE / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( UOPS_ISSUED.ANY + ( 4 ) * ( ( INT_MISC.RECOVERY_CYCLES_ANY / 2 ) if #SMT_on else INT_MISC.RECOVERY_CYCLES ) ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) )",
+ "MetricGroup": "Backend;TmaL2;Compute;m_tma_backend_bound_percent",
+ "MetricName": "tma_core_bound_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication.",
+ "MetricExpr": "100 * ( ARITH.DIVIDER_ACTIVE / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "TmaL3;m_tma_core_bound_percent",
+ "MetricName": "tma_divider_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "MetricExpr": "100 * ( ( EXE_ACTIVITY.EXE_BOUND_0_PORTS + ( EXE_ACTIVITY.1_PORTS_UTIL + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * EXE_ACTIVITY.2_PORTS_UTIL ) ) / ( CPU_CLK_UNHALTED.THREAD ) if ( ARITH.DIVIDER_ACTIVE < ( CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY ) ) else ( EXE_ACTIVITY.1_PORTS_UTIL + ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * EXE_ACTIVITY.2_PORTS_UTIL ) / ( CPU_CLK_UNHALTED.THREAD ) )",
+ "MetricGroup": "PortsUtil;TmaL3;m_tma_core_bound_percent",
+ "MetricName": "tma_ports_utilization_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. ",
+ "MetricExpr": "100 * ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "TmaL1",
+ "MetricName": "tma_retiring_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved.",
+ "MetricExpr": "100 * ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "Retire;TmaL2;m_tma_retiring_percent",
+ "MetricName": "tma_light_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD ) + ( ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) + ( min( ( ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) , ( 1 ) ) ) )",
+ "MetricGroup": "HPC;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_fp_arith_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_memory_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * UOPS_RETIRED.MACRO_FUSED / ( UOPS_RETIRED.RETIRE_SLOTS ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_fused_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * ( BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED ) / ( UOPS_RETIRED.RETIRE_SLOTS ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_non_fused_branches_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * INST_RETIRED.NOP / ( UOPS_RETIRED.RETIRE_SLOTS ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_nop_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "MetricExpr": "100 * ( max( 0 , ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) - ( ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD ) + ( ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) + ( min( ( ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) , ( 1 ) ) ) ) + ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY ) + ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * UOPS_RETIRED.MACRO_FUSED / ( UOPS_RETIRED.RETIRE_SLOTS ) ) + ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * ( BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED ) / ( UOPS_RETIRED.RETIRE_SLOTS ) ) + ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) ) * INST_RETIRED.NOP / ( UOPS_RETIRED.RETIRE_SLOTS ) ) ) ) )",
+ "MetricGroup": "Pipeline;TmaL3;m_tma_light_operations_percent",
+ "MetricName": "tma_other_light_ops_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or microcoded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "MetricExpr": "100 * ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "Retire;TmaL2;m_tma_retiring_percent",
+ "MetricName": "tma_heavy_operations_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions.",
+ "MetricExpr": "100 * ( ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY ) / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) - ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) ) )",
+ "MetricGroup": "TmaL3;m_tma_heavy_operations_percent",
+ "MetricName": "tma_few_uops_instructions_percent",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided.",
+ "MetricExpr": "100 * ( ( ( UOPS_RETIRED.RETIRE_SLOTS ) / UOPS_ISSUED.ANY ) * IDQ.MS_UOPS / ( ( 4 ) * ( ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else ( CPU_CLK_UNHALTED.THREAD ) ) ) )",
+ "MetricGroup": "MicroSeq;TmaL3;m_tma_heavy_operations_percent",
+ "MetricName": "tma_microcode_sequencer_percent",
+ "ScaleUnit": "1%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
index 4dcbac887380..0746fcf2ebd9 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
@@ -1,5 +1,32 @@
[
{
+ "BriefDescription": "DRAM Page Activate commands sent due to a write request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Underfill Read CAS Commands issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -20,6 +47,15 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -40,6 +76,15 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "All DRAM CAS Commands issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Memory controller clock ticks",
"Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
@@ -85,89 +130,3543 @@
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM Page Activate commands sent due to a write request",
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_WPQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
"Counter": "0,1,2,3",
"EventCode": "0x1",
- "EventName": "UNC_M_ACT_COUNT.WR",
+ "EventName": "UNC_M_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
"PerPkg": "1",
- "PublicDescription": "Counts DRAM Page Activate commands sent on this channel due to a write request to the iMC (Memory Controller). Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS (Column Access Select) command.",
"UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "All DRAM CAS Commands issued",
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.ALL",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
"PerPkg": "1",
- "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, so this event increments for every read and write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
- "UMask": "0xF",
+ "UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in WMM",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
"PerPkg": "1",
- "PublicDescription": "Counts CAS (Column Access Select) regular read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every regular read. This event only counts regular reads and does not includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
- "UMask": "0x1",
+ "UMask": "0x10",
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM Underfill Read CAS Commands issued",
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in RMM",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
"PerPkg": "1",
- "PublicDescription": "Counts CAS (Column Access Select) underfill read commands issued to DRAM due to a partial write, on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this command counts underfill reads. Partial writes must be completed by first reading in the underfill from DRAM and then merging in the partial write data before writing the full line back to DRAM. This event will generally count about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ (due to a previous write request).",
- "UMask": "0x2",
+ "UMask": "0x20",
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Read ISOCH Mode",
"Counter": "0,1,2,3",
"EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "EventName": "UNC_M_CAS_COUNT.RD_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Write ISOCH Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
"PerPkg": "1",
- "PublicDescription": "Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
"UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "Read Pending Queue Allocations",
+ "BriefDescription": "ECC Correctable Errors",
"Counter": "0,1,2,3",
- "EventCode": "0x10",
- "EventName": "UNC_M_RPQ_INSERTS",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of read requests allocated into the Read Pending Queue (RPQ). This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. The requests deallocate after the read CAS command has been issued to DRAM. This event counts both Isochronous and non-Isochronous requests which were issued to the RPQ.",
"Unit": "iMC"
},
{
- "BriefDescription": "Read Pending Queue Occupancy",
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
"Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_M_RPQ_OCCUPANCY",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
"PerPkg": "1",
- "PublicDescription": "Counts the number of entries in the Read Pending Queue (RPQ) at each cycle. This can then be used to calculate both the average occupancy of the queue (in conjunction with the number of cycles not empty) and the average latency in the queue (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate from the RPQ after the CAS command has been issued to memory.",
+ "UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Write Pending Queue Allocations",
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
"Counter": "0,1,2,3",
- "EventCode": "0x20",
- "EventName": "UNC_M_WPQ_INSERTS",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
"PerPkg": "1",
- "PublicDescription": "Counts the number of writes requests allocated into the Write Pending Queue (WPQ). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (Memory Controller). The write requests deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC.",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Write Pending Queue Occupancy",
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
"Counter": "0,1,2,3",
- "EventCode": "0x81",
- "EventName": "UNC_M_WPQ_OCCUPANCY",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_M_POWER_PCU_THROTTLING",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pre-charge for writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clockticks in the Memory Controller using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_CLOCKTICKS_F",
"PerPkg": "1",
- "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts. Is there a filter of sorts???",
"Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
index aa0f67613c4a..f55aeadc630f 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
@@ -1,135 +1,471 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CLOCKTICKS",
"Counter": "0,1,2,3",
- "EventName": "UNC_CHA_CLOCKTICKS",
+ "Deprecated": "1",
+ "EventName": "UNC_C_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
"PerPkg": "1",
+ "UMask": "0x08",
"Unit": "CHA"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
+ "BriefDescription": "read requests from home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from home agent",
"Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x0C",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from local home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from local home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspIFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspSFwd Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "config1=0x40e33",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ",
"PerPkg": "1",
- "UMask": "0x21",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x31",
"Unit": "CHA"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) ",
+ "BriefDescription": "This event is deprecated. ",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x40e33",
+ "EventName": "UNC_C_TOR_INSERTS.REM_ALL",
"PerPkg": "1",
- "UMask": "0x21",
+ "UMask": "0x30",
"Unit": "CHA"
},
{
- "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_FAST_ASSERTED.HORZ",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xA5",
+ "EventName": "UNC_C_FAST_ASSERTED",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_READ",
- "Filter": "config1=0x40040e33",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_HIT",
"PerPkg": "1",
- "UMask": "0x21",
+ "UMask": "0x11",
"Unit": "CHA"
},
{
- "BriefDescription": "MMIO reads",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x40040e33",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_MISS",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_HIT",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_WRITE",
- "Filter": "config1=0x40041e33",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_HIT",
"PerPkg": "1",
- "UMask": "0x21",
+ "UMask": "0x14",
"Unit": "CHA"
},
{
- "BriefDescription": "MMIO writes",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_MISS",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x40041e33",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ_MISS",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "BriefDescription": "TOR Inserts; Hits from Local IO",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "config1=0x41833",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x21",
+ "UMask": "0x14",
"Unit": "CHA"
},
{
- "BriefDescription": "Streaming stores (full cache line)",
+ "BriefDescription": "TOR Inserts; Misses from Local IO",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x41833",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x21",
+ "UMask": "0x24",
"Unit": "CHA"
},
{
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "BriefDescription": "TOR Inserts; All from Local iA",
"Counter": "0,1,2,3",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "config1=0x41a33",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x21",
+ "UMask": "0x31",
"Unit": "CHA"
},
{
- "BriefDescription": "Streaming stores (partial cache line)",
+ "BriefDescription": "TOR Inserts; Hits from Local iA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x41a33",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
"UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "read requests from home agent",
+ "BriefDescription": "TOR Occupancy; All from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCS VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
"Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_FAST_ASSERTED.HORZ",
"PerPkg": "1",
- "UMask": "0x03",
+ "UMask": "0x02",
"Unit": "CHA"
},
{
- "BriefDescription": "read requests from local home agent",
+ "BriefDescription": "Clockticks of the uncore caching & home agent (CHA)",
"Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
"PerPkg": "1",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "read requests from remote home agent",
+ "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a remote socket",
"Counter": "0,1,2,3",
"EventCode": "0x50",
"EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
@@ -138,119 +474,611 @@
"Unit": "CHA"
},
{
- "BriefDescription": "write requests from home agent",
+ "BriefDescription": "RspI Snoop Responses Received",
"Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
"PerPkg": "1",
- "UMask": "0x0C",
+ "UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "write requests from local home agent",
+ "BriefDescription": "Rsp*WB Snoop Responses Received",
"Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspCnflct* Snoop Responses Received",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for M-state entries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for E-state entries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for S-state entries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
"PerPkg": "1",
"UMask": "0x04",
"Unit": "CHA"
},
{
- "BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
+ "BriefDescription": "This event is deprecated. ",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UPI_DATA_BANDWIDTH_TX",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
"PerPkg": "1",
- "ScaleUnit": "7.11E-06Bytes",
- "UMask": "0xf",
- "Unit": "UPI LL"
+ "UMask": "0x30",
+ "Unit": "CHA"
},
{
- "BriefDescription": "UPI interconnect send bandwidth for payload",
+ "BriefDescription": "Lines Victimized; Lines in M state",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
"PerPkg": "1",
- "ScaleUnit": "7.11E-06Bytes",
- "UMask": "0xf",
- "Unit": "UPI LL"
+ "UMask": "0x01",
+ "Unit": "CHA"
},
{
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in F State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Clockticks of the IIO Traffic Controller",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
"Counter": "0,1",
+ "Deprecated": "1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part1 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part2 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part3 by a different IIO unit",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "ScaleUnit": "4Bytes",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "ScaleUnit": "4Bytes",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "ScaleUnit": "4Bytes",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "ScaleUnit": "4Bytes",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_READ",
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
"FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
"PerPkg": "1",
"PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
- "BriefDescription": "PCI Express bandwidth reading at IIO",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part1",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
"FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
"PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part2",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part3",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
"Unit": "IIO"
},
{
@@ -308,7 +1136,7 @@
"EventName": "LLC_MISSES.PCIE_WRITE",
"FCMask": "0x07",
"Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
"MetricName": "LLC_MISSES.PCIE_WRITE",
"PerPkg": "1",
"PortMask": "0x01",
@@ -317,1660 +1145,21612 @@
"Unit": "IIO"
},
{
- "BriefDescription": "PCI Express bandwidth writing at IIO",
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_READ",
"FCMask": "0x07",
"Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
"PerPkg": "1",
"PortMask": "0x01",
"ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part1 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part2 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part3 to an IIO target",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
"UMask": "0x01",
"Unit": "IIO"
},
{
- "BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part0 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part1 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part2 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part3 by a different IIO unit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 1",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 2",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 3",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x0f",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
+ "Counter": "2,3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x0f",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests",
+ "Counter": "0,1",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue",
+ "Counter": "0,1",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Occupancy of the IRP FAF queue",
+ "Counter": "0,1",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks of the Intel Ultra Path Interconnect (UPI)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Data Response packets that go direct to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles Intel UPI is in L1 power mode (shutdown)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles the Rx of the Intel UPI is in L0p power mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.ALL_NULL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in which the Tx of the Intel Ultra Path Interconnect (UPI) is in L0p power mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FLITs that bypassed the TxL Buffer",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_UPI_TxL_BYPASSED",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.ALL_NULL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Protocol header and credit FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Idle FLITs transmitted",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x47",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Null FLITs transmitted from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid data FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UPI_DATA_BANDWIDTH_TX",
+ "PerPkg": "1",
+ "ScaleUnit": "7.11E-06Bytes",
+ "UMask": "0xf",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UPI interconnect send bandwidth for payload",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "ScaleUnit": "7.11E-06Bytes",
+ "UMask": "0xf",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Data Response packets that go direct to Intel UPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to core (bypassing the CHA)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Partial Non-Isochronous writes to the iMC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Writes to iMC issued",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch requests that got turn into a demand request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to the Intel UPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit",
+ "Counter": "0,1,2",
+ "EventCode": "0x29",
+ "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Intermediate bypass Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Single External Snoops",
"Counter": "0,1,2,3",
"EventCode": "0x33",
- "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
"PerPkg": "1",
- "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
- "UMask": "0x42",
+ "UMask": "0x21",
"Unit": "CHA"
},
{
- "BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
+ "BriefDescription": "Core Cross Snoops Issued; Single Core Requests",
"Counter": "0,1,2,3",
"EventCode": "0x33",
- "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
"PerPkg": "1",
- "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
- "UMask": "0x82",
+ "UMask": "0x41",
"Unit": "CHA"
},
{
- "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "BriefDescription": "Core Cross Snoops Issued; Single Eviction",
"Counter": "0,1,2,3",
- "EventCode": "0x53",
- "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
"PerPkg": "1",
- "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed",
- "UMask": "0x02",
+ "UMask": "0x81",
"Unit": "CHA"
},
{
- "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "BriefDescription": "Core Cross Snoops Issued; Any Single Snoop",
"Counter": "0,1,2,3",
- "EventCode": "0x53",
- "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
"PerPkg": "1",
- "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed",
- "UMask": "0x01",
+ "UMask": "0xE1",
"Unit": "CHA"
},
{
- "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "BriefDescription": "Core Cross Snoops Issued; Multiple External Snoops",
"Counter": "0,1,2,3",
- "EventCode": "0x54",
- "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Any Cycle with Multiple Snoops",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "UMask": "0xE2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; External Snoop to Remote Node",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Core Request to Remote Node",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Eviction to Remote Node",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Any Snoop to Remote Node",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xE4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.READ",
"PerPkg": "1",
- "PublicDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
"Counter": "0,1,2,3",
- "EventCode": "0x54",
- "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
"PerPkg": "1",
- "PublicDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
"UMask": "0x02",
"Unit": "CHA"
},
{
- "BriefDescription": "FaST wire asserted; Horizontal",
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; SF/LLC HitS/F and op is RdInvOwn",
"Counter": "0,1,2,3",
- "EventCode": "0xA5",
- "EventName": "UNC_CHA_FAST_ASSERTED.HORZ",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
"PerPkg": "1",
- "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
- "UMask": "0x02",
+ "UMask": "0x20",
"Unit": "CHA"
},
{
- "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; No SF/LLC HitS/F and op is RdInvOwn",
"Counter": "0,1,2,3",
- "EventCode": "0x5F",
- "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
"PerPkg": "1",
- "PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*)",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache to SHARed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Deallocate HtiME$ on Reads without RspFwdI*",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "HA to iMC Reads Issued; ISOCH",
"Counter": "0,1,2,3",
"EventCode": "0x59",
- "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
"PerPkg": "1",
- "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
- "UMask": "0x01",
+ "UMask": "0x02",
"Unit": "CHA"
},
{
- "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "BriefDescription": "Writes Issued to the iMC by the HA; Partial Non-ISOCH",
"Counter": "0,1,2,3",
"EventCode": "0x5B",
- "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; Full Line MIG",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_MIG",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; Partial MIG",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.INVITOM",
"PerPkg": "1",
- "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Lines Victimized; Lines in E state",
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations dropped due to IODC Full",
"Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.IODCFULL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x02",
"Unit": "CHA"
},
{
- "BriefDescription": "Lines Victimized; Lines in F State",
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IDOC allocation dropped due to OSB gate",
"Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.OSBGATED",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbPushMtoI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBPUSHMTOI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to conflicting transaction",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.SNPOUT",
"PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x08",
"Unit": "CHA"
},
{
- "BriefDescription": "Lines Victimized; Lines in M state",
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to any reason",
"Counter": "0,1,2,3",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.ANY",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LOCAL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x91",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "EventName": "UNC_C_LLC_VICTIMS.LOCAL",
"PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
- "UMask": "0x01",
+ "UMask": "0x2f",
"Unit": "CHA"
},
{
- "BriefDescription": "Lines Victimized; Lines in S State",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
"Counter": "0,1,2,3",
+ "Deprecated": "1",
"EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "EventName": "UNC_C_LLC_VICTIMS.REMOTE",
"PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
- "UMask": "0x04",
+ "UMask": "0x80",
"Unit": "CHA"
},
{
- "BriefDescription": "Number of times that an RFO hit in S state.",
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
"Counter": "0,1,2,3",
"EventCode": "0x39",
- "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; MC0_SMI0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; MC1_SMI1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC0_SMI2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC1_SMI3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC1_SMI3",
"PerPkg": "1",
- "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
"UMask": "0x08",
"Unit": "CHA"
},
{
- "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC2_SMI4",
"Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC2_SMI4",
"PerPkg": "1",
- "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
"UMask": "0x10",
"Unit": "CHA"
},
{
- "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC3_SMI5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from remote home agent",
"Counter": "0,1,2,3",
"EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.ALL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.BCST_LOC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.BCST_REM",
"PerPkg": "1",
- "PublicDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
"UMask": "0x20",
"Unit": "CHA"
},
{
- "BriefDescription": "Ingress (from CMS) Allocations; IRQ",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.DIRECT_LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.DIRECT_REM",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RspFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_FWD_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.EVICT",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.PRQ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IPQ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.HIT",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.MISS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI; Pushed to LLC",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI; Pushed to Memory",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC0_SMI0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC1_SMI1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC0_SMI2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC1_SMI3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC1_SMI3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC2_SMI4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC2_SMI4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC3_SMI5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C1 State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C1_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C1 Transition",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C1_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C6 State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C6_STATE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C6 Transition",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C6_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; GV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.GV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CMS_CLOCKTICKS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_H_CLOCK",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAE",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAE",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SRC_THRTL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xA4",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IRQ Rejected",
"Counter": "0,1,2,3",
"EventCode": "0x13",
- "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; RRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; WBQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "BriefDescription": "Ingress Probe Queue Rejects; AD RSP on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x19",
- "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.ANY_IPQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
"PerPkg": "1",
- "PublicDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
"UMask": "0x80",
"Unit": "CHA"
},
{
- "BriefDescription": "Ingress (from CMS) Occupancy; IRQ",
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x25",
+ "EventName": "UNC_H_RxC_ISMQ1_REJECT.ANY_ISMQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_H_RxC_ISMQ1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; IPQ",
"EventCode": "0x11",
- "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; RRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; WBQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "BriefDescription": "Other Retries; AD RSP on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
"UMask": "0x02",
"Unit": "CHA"
},
{
- "BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "BriefDescription": "Other Retries; BL RSP on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
"UMask": "0x04",
"Unit": "CHA"
},
{
- "BriefDescription": "RspCnflct* Snoop Responses Received",
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.ANY_PRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspCnflct* Snoop Response was received. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent. This triggers conflict resolution hardware. This covers both the opcode RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
"Unit": "CHA"
},
{
- "BriefDescription": "RspI Snoop Responses Received",
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
"UMask": "0x01",
"Unit": "CHA"
},
{
- "BriefDescription": "RspIFwd Snoop Responses Received",
+ "BriefDescription": "Request Queue Retries; AD RSP on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
"UMask": "0x04",
"Unit": "CHA"
},
{
- "BriefDescription": "RspSFwd Snoop Responses Received",
+ "BriefDescription": "Request Queue Retries; BL WB on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
"UMask": "0x08",
"Unit": "CHA"
},
{
- "BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
+ "BriefDescription": "Request Queue Retries; BL NCB on VN0",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to its home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in &gt;= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to its home socket to be written back to memory.",
"UMask": "0x20",
"Unit": "CHA"
},
{
- "BriefDescription": "Rsp*WB Snoop Responses Received",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
"Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to its home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This response will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
"UMask": "0x10",
"Unit": "CHA"
},
{
- "BriefDescription": "Clockticks of the IIO Traffic Controller",
+ "BriefDescription": "Request Queue Retries; Merging these two together to make room for ANY_REJECT_*0",
"Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_IIO_CLOCKTICKS",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
"PerPkg": "1",
- "PublicDescription": "Counts clockticks of the 1GHz trafiic controller clock in the IIO unit.",
- "Unit": "IIO"
+ "UMask": "0x20",
+ "Unit": "CHA"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
+ "BriefDescription": "Request Queue Retries; Allow Snoop",
"Counter": "0,1,2,3",
- "EventCode": "0xC2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
- "FCMask": "0x4",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.ANY_RRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.ANY_WBQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; HA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; LLC Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; SF Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Victim",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Allow Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.IV",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.IV",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspHitFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspHitFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspHitFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspHitFSE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0xE1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspSFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspSFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspSFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0xE2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspIFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspIFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspIFwdFE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0xE4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspSFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspSFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspSFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0xE8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspIFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspIFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0xF0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.RRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.RRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x60",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WBQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WBQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0xA0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local iA and IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_IO_IA",
+ "PerPkg": "1",
+ "UMask": "0x35",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_HIT",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses from Local",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_MISS",
+ "PerPkg": "1",
+ "UMask": "0x25",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_HIT",
+ "PerPkg": "1",
+ "UMask": "0x17",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_MISS",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; VNA Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VNA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; VN0 Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; AD REQ Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; AD RSP VN0 Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL RSP Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL DRS Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL NCB Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL NCS Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD VNA Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL VNA Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_BL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD REQ VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD RSP VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL RSP VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL DRS VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCB VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast snoop for Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast snoops for Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Directed snoops for Local Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Directed snoops for Remote Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspFwd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
"PerPkg": "1",
- "PortMask": "0x0f",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
"UMask": "0x03",
- "Unit": "IIO"
+ "Unit": "CHA"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "BriefDescription": "Cache and Snoop Filter Lookups; Write Requests",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; External Snoop Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "UMask": "0x09",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Local",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Remote",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x91",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - All Lines",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x2F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; IRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; SF/LLC Evictions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; PRQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; IPQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hit (Not a Miss)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x60",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0xA0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; IRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; SF/LLC Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; PRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; IPQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hit (Not a Miss)",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; ANY0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in M State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in E State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in F State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in M State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in E State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in F State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_F",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - All Lines",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "PerPkg": "1",
+ "UMask": "0x8F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RdCur misses from Local IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RDCUR",
+ "Filter": "config1=0x43C33",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from Local IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; ItoM misses from Local IO",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "Filter": "config1=0x49033",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM Misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "Filter": "config1=0x49033",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RDCUR misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RDCUR",
+ "Filter": "config1=0x43C33",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 0",
"Counter": "0,1,2,3",
"EventCode": "0xC2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
- "FCMask": "0x4",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT0",
+ "FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
- "UMask": "0x03",
+ "UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 1",
"Counter": "0,1,2,3",
"EventCode": "0xC2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
- "FCMask": "0x4",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT1",
+ "FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
- "UMask": "0x03",
+ "UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 2",
"Counter": "0,1,2,3",
"EventCode": "0xC2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
- "FCMask": "0x4",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT2",
+ "FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
- "UMask": "0x03",
+ "UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 3",
"Counter": "0,1,2,3",
"EventCode": "0xC2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
- "FCMask": "0x4",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT3",
+ "FCMask": "0x7",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
- "UMask": "0x03",
+ "UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
+ "BriefDescription": "Num Link Correctable Errors",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_IIO_LINK_NUM_CORR_ERR",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num Link Retries",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_IIO_LINK_NUM_RETRIES",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number packets that passed the Mask/Match Filter",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_IIO_MASK_MATCH",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "UNC_IIO_NOTHING",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_IIO_NOTHING",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
"Counter": "2,3",
- "EventCode": "0xD5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
- "FCMask": "0x04",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART0",
+ "FCMask": "0x7",
"PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
- "UMask": "0x0f",
+ "PortMask": "0x1",
+ "UMask": "0x1",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
"Counter": "2,3",
- "EventCode": "0xD5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
- "FCMask": "0x04",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART1",
+ "FCMask": "0x7",
"PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
- "UMask": "0x01",
+ "PortMask": "0x2",
+ "UMask": "0x1",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 1",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
"Counter": "2,3",
- "EventCode": "0xD5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
- "FCMask": "0x04",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART2",
+ "FCMask": "0x7",
"PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 1",
- "UMask": "0x02",
+ "PortMask": "0x4",
+ "UMask": "0x1",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 2",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
"Counter": "2,3",
- "EventCode": "0xD5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
- "FCMask": "0x04",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART3",
+ "FCMask": "0x7",
"PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 2",
- "UMask": "0x04",
+ "PortMask": "0x8",
+ "UMask": "0x1",
"Unit": "IIO"
},
{
- "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 3",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
"Counter": "2,3",
- "EventCode": "0xD5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
- "FCMask": "0x04",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART0",
+ "FCMask": "0x7",
"PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer occupancy of completions with data: Part 3",
- "UMask": "0x08",
+ "PortMask": "0x1",
+ "UMask": "0x2",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
"Counter": "2,3",
+ "Deprecated": "1",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Symbol Times on Link",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_IIO_SYMBOL_TIMES",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; Vtd hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L4_PAGE_HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; context cache miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.CTXT_MISS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L1 miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L1_MISS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L2 miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L2_MISS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L3 miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L3_MISS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB is full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB1_MISS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_VTD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "Counter": "0,1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Counter": "2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part1",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part2",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part3",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part1 by a different IIO unit",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part2 by a different IIO unit",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part3 by a different IIO unit",
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
"Counter": "2,3",
"EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
"UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part0 to an IIO target",
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part1 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part2 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part3 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1",
"EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
"UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part0",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part1",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part2",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part3",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part0 by a different IIO unit",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part1 by a different IIO unit",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part2 by a different IIO unit",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x80",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part3 by a different IIO unit",
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
"Counter": "0,1,2,3",
"EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
"UMask": "0x02",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "PortMask": "0x10",
+ "UMask": "0x01",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
+ "PortMask": "0x20",
+ "UMask": "0x01",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD0",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "PortMask": "0x10",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD1",
"FCMask": "0x07",
"PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "PortMask": "0x20",
"UMask": "0x04",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
+ "UMask": "0x10",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
+ "UMask": "0x20",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part1 to the MMIO space of an IIO target.In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
"Counter": "0,1,2,3",
"EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
"FCMask": "0x07",
"PerPkg": "1",
"PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
"Unit": "IIO"
},
{
- "BriefDescription": "Total IRP occupancy of inbound read and write requests.",
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
"Counter": "0,1",
"EventCode": "0xF",
- "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Snoops",
+ "Counter": "0,1",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "IRP Clocks",
+ "Counter": "0,1",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIRdCur",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CRd",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; DRd",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.DRD",
"PerPkg": "1",
- "PublicDescription": "Total IRP occupancy of inbound read and write requests. This is effectively the sum of read occupancy and write occupancy.",
"UMask": "0x4",
"Unit": "IRP"
},
{
- "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
+ "BriefDescription": "Coherent Ops; PCIDCAHin5t",
"Counter": "0,1",
"EventCode": "0x10",
- "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
"PerPkg": "1",
- "PublicDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline to coherent memory, without a RFO. PCIITOM is a speculative Invalidate to Modified command that requests ownership of the cacheline and does not move data from the mesh to IRP cache.",
- "UMask": "0x10",
+ "UMask": "0x20",
"Unit": "IRP"
},
{
- "BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline.",
+ "BriefDescription": "Coherent Ops; WbMtoI",
"Counter": "0,1",
"EventCode": "0x10",
- "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CLFlush",
+ "Counter": "0,1",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "Counter": "0,1",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "Counter": "0,1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "Counter": "0,1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
"PerPkg": "1",
- "PublicDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline to coherent memory. RFO is a Read For Ownership command that requests ownership of the cacheline and moves data from the mesh to IRP cache.",
"UMask": "0x8",
"Unit": "IRP"
},
{
- "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
"Counter": "0,1",
- "EventCode": "0x18",
- "EventName": "UNC_I_FAF_INSERTS",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
"PerPkg": "1",
- "PublicDescription": "Inbound read requests to coherent memory, received by the IRP and inserted into the Fire and Forget queue (FAF), a queue used for processing inbound reads in the IRP.",
+ "UMask": "0x20",
"Unit": "IRP"
},
{
- "BriefDescription": "Occupancy of the IRP FAF queue.",
+ "BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
"Counter": "0,1",
- "EventCode": "0x19",
- "EventName": "UNC_I_FAF_OCCUPANCY",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
"PerPkg": "1",
- "PublicDescription": "Occupancy of the IRP Fire and Forget (FAF) queue, a queue used for processing inbound reads in the IRP.",
+ "UMask": "0x40",
"Unit": "IRP"
},
{
- "BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "BriefDescription": "Misc Events - Set 0",
"Counter": "0,1",
- "EventCode": "0x11",
- "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.UNKNOWN",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_M",
"PerPkg": "1",
- "PublicDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
"UMask": "0x8",
"Unit": "IRP"
},
{
- "BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
+ "BriefDescription": "Misc Events - Set 1; Lost Forward",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Valid",
+ "Counter": "0,1",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Requests",
+ "Counter": "0,1",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_P2P_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_P2P_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P reads",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P Writes",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P Message",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P completions",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; Match if remote only",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if remote and target matches",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if local only",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if local and target matches",
+ "Counter": "0,1",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Miss",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit I",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit E or S",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit M",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpCode",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpData",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpInv",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Atomic",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Other",
+ "Counter": "0,1",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_TxR2_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations",
+ "Counter": "0,1",
+ "EventCode": "0xB",
+ "EventName": "UNC_I_TxC_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Cycles Full",
+ "Counter": "0,1",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Inserts",
+ "Counter": "0,1",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Cycles Full",
+ "Counter": "0,1",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Inserts",
+ "Counter": "0,1",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Cycles Full",
+ "Counter": "0,1",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Inserts",
+ "Counter": "0,1",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
+ "EventCode": "0x1B",
+ "EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
+ "EventCode": "0xC",
+ "EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x72",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x74",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "PerPkg": "1",
+ "UMask": "0x7e",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "Counter": "0,1",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "PerPkg": "1",
+ "UMask": "0x71",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "L1 Req Nack",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x23",
+ "EventName": "UNC_UPI_POWER_L1_NACK",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "L1 Req (same as L1 Ack)",
"Counter": "0,1,2,3",
"EventCode": "0x22",
- "EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
+ "EventName": "UNC_UPI_POWER_L1_REQ",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
"PerPkg": "1",
- "PublicDescription": "Counts traffic in which the M2M (Mesh to Memory) to iMC (Memory Controller) bypass was not taken",
"UMask": "0x2",
- "Unit": "M2M"
+ "Unit": "UPI LL"
},
{
- "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0. Receive side",
"Counter": "0,1,2,3",
"EventCode": "0x24",
- "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_UPI_RxL_CRC_ERRORS",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "LLR Requests Sent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_UPI_RxL_CRC_LLR_REQ_TRANSMIT",
"PerPkg": "1",
- "PublicDescription": "Counts cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3A",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; LLCRD Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; LLCTRL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.PROT_HDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.RSP",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0xB",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0xD",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x29",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0. Transmit side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Slot 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Slot 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Slot 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; LLCRD Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; LLCTRL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.PROTHDR",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.PROT_HDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0xE",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0xF",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_UPI_TxL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x44",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Protocol Header",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Protocol Header",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.LOC",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.REM",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.DATA_HDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NON_DATA_HDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x10",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.DUAL_SLOT_HDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.SGL_SLOT_HDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x40",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "UMask": "0xA",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "UMask": "0xC",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Idle",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x47",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Request Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "UMask": "0x0108",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x09",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Snoop Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "UMask": "0x0109",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "UMask": "0x0A",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x010A",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0C",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x010C",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0x0D",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x010D",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0x0E",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x010E",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "UMask": "0x010F",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Request Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Snoop",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x09",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Snoop Opcode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "UMask": "0x109",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "UMask": "0x0A",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10A",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0C",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10C",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0x0D",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10D",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0x0E",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10E",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "UMask": "0x10F",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "UMask": "0x01AA",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x012A",
+ "UMaskExt": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Conflict",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1AA",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Invalid",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x12A",
+ "UMaskExt": "0x1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_Egress.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
"Unit": "M2M"
},
{
- "BriefDescription": "Messages sent direct to core (bypassing the CHA)",
+ "BriefDescription": "Cycles - at UCLK",
"Counter": "0,1,2,3",
- "EventCode": "0x23",
- "EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
+ "EventName": "UNC_M2M_CLOCKTICKS",
"PerPkg": "1",
- "PublicDescription": "Counts when messages were sent direct to core (bypassing the CHA)",
"Unit": "M2M"
},
{
- "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "BriefDescription": "Directory Hit; On Dirty Line in I State",
"Counter": "0,1,2,3",
- "EventCode": "0x25",
- "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
"PerPkg": "1",
- "PublicDescription": "Counts reads in which direct to core transactions (which would have bypassed the CHA) were overridden",
+ "UMask": "0x1",
"Unit": "M2M"
},
{
- "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "BriefDescription": "Directory Hit; On Dirty Line in S State",
"Counter": "0,1,2,3",
- "EventCode": "0x28",
- "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
"PerPkg": "1",
- "PublicDescription": "Counts reads in which direct to Intel Ultra Path Interconnect (UPI) transactions (which would have bypassed the CHA) were overridden",
+ "UMask": "0x2",
"Unit": "M2M"
},
{
- "BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "BriefDescription": "Directory Hit; On Dirty Line in L State",
"Counter": "0,1,2,3",
- "EventCode": "0x27",
- "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
"PerPkg": "1",
- "PublicDescription": "Counts cycles when the ability to send messages direct to the Intel Ultra Path Interconnect (bypassing the CHA) was disabled",
+ "UMask": "0x4",
"Unit": "M2M"
},
{
- "BriefDescription": "Messages sent direct to the Intel UPI",
+ "BriefDescription": "Directory Hit; On Dirty Line in A State",
"Counter": "0,1,2,3",
- "EventCode": "0x26",
- "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
"PerPkg": "1",
- "PublicDescription": "Counts when messages were sent direct to the Intel Ultra Path Interconnect (bypassing the CHA)",
+ "UMask": "0x8",
"Unit": "M2M"
},
{
- "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "BriefDescription": "Directory Hit; On NonDirty Line in I State",
"Counter": "0,1,2,3",
- "EventCode": "0x29",
- "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
"PerPkg": "1",
- "PublicDescription": "Counts when a read message that was sent direct to the Intel Ultra Path Interconnect (bypassing the CHA) was overridden",
+ "UMask": "0x10",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "BriefDescription": "Directory Hit; On NonDirty Line in S State",
"Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in I State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in Any State (A, I, S or unused)",
"UMask": "0x1",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "BriefDescription": "Directory Miss; On Dirty Line in S State",
"Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in L State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in the A (SnoopAll) state, indicating the cacheline is stored in another socket in any state, and we must snoop the other sockets to make sure we get the latest data. The data may be stored in any state in the local socket.",
"UMask": "0x8",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "BriefDescription": "Directory Miss; On NonDirty Line in I State",
"Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in S State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in L State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in A State",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC; Critical Priority",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ISOCH",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the I (Invalid) state indicating the cacheline is not stored in another socket, and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
"UMask": "0x2",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "BriefDescription": "M2M Reads Issued to iMC; All, regardless of priority",
"Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.FROM_TRANSGRESS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the S (Shared) state indicating the cacheline is either stored in another socket in the S(hared) state , and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
"UMask": "0x4",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "BriefDescription": "M2M Writes Issued to iMC; ISOCH Partial",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from A (SnoopAll) to I (Invalid)",
- "UMask": "0x20",
+ "UMask": "0x8",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FROM_TRANSGRESS",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from A (SnoopAll) to S (Shared)",
"UMask": "0x40",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "BriefDescription": "Number Packet Header Matches; Mesh Match",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MESH",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory to a new state",
"UMask": "0x1",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "BriefDescription": "Number Packet Header Matches; MC Match",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x53",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from I (Invalid) to A (SnoopAll)",
"UMask": "0x4",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "BriefDescription": "Number AD Ingress Credits",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH1",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from I (Invalid) to S (Shared)",
"UMask": "0x2",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "BriefDescription": "Tracker Cycles Full; Channel 2",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH2",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from S (Shared) to A (SnoopAll)",
- "UMask": "0x10",
+ "UMask": "0x4",
"Unit": "M2M"
},
{
- "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 0",
"Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH0",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from S (Shared) to I (Invalid)",
- "UMask": "0x8",
+ "UMask": "0x1",
"Unit": "M2M"
},
{
- "BriefDescription": "Reads to iMC issued",
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 1",
"Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.ALL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH2",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller).",
"UMask": "0x4",
"Unit": "M2M"
},
{
- "BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
+ "BriefDescription": "Tracker Inserts; Channel 0",
"Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller). It only counts normal priority non-isochronous reads.",
"UMask": "0x1",
"Unit": "M2M"
},
{
- "BriefDescription": "Writes to iMC issued",
+ "BriefDescription": "Tracker Inserts; Channel 1",
"Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2M_TRACKER_PENDING_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR4",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues writes to the iMC (Memory Controller).",
"UMask": "0x10",
"Unit": "M2M"
},
{
- "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
"Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
"PerPkg": "1",
- "PublicDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
"UMask": "0x80",
"Unit": "M2M"
},
{
- "BriefDescription": "Partial Non-Isochronous writes to the iMC",
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
"Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IV_BNC",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues partial writes to the iMC (Memory Controller). It only counts normal priority non-isochronous writes.",
- "UMask": "0x2",
+ "UMask": "0x08",
"Unit": "M2M"
},
{
- "BriefDescription": "Prefecth requests that got turn into a demand request",
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
"Counter": "0,1,2,3",
- "EventCode": "0x56",
- "EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_BNC",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) promotes a outstanding request in the prefetch queue due to a subsequent demand read request that entered the M2M with the same address. Explanatory Side Note: The Prefecth queue is made of CAM (Content Addressable Memory)",
+ "UMask": "0x01",
"Unit": "M2M"
},
{
- "BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
"Counter": "0,1,2,3",
- "EventCode": "0x57",
- "EventName": "UNC_M2M_PREFCAM_INSERTS",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) recieves a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory",
+ "UMask": "0x10",
"Unit": "M2M"
},
{
- "BriefDescription": "AD Ingress (from CMS) Queue Inserts",
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
"Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AK_BNC",
"PerPkg": "1",
- "PublicDescription": "Counts when the a new entry is Received(RxC) and then added to the AD (Address Ring) Ingress Queue from the CMS (Common Mesh Stop). This is generally used for reads, and",
+ "UMask": "0x02",
"Unit": "M2M"
},
{
- "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_BNC",
"PerPkg": "1",
- "PublicDescription": "AD Ingress (from CMS) Occupancy",
+ "UMask": "0x04",
"Unit": "M2M"
},
{
- "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
"Counter": "0,1,2,3",
- "EventCode": "0x5",
- "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
"PerPkg": "1",
- "PublicDescription": "BL Ingress (from CMS) Allocations",
+ "UMask": "0x40",
"Unit": "M2M"
},
{
- "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
"Counter": "0,1,2,3",
- "EventCode": "0x6",
- "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.IV_BNC",
"PerPkg": "1",
- "PublicDescription": "BL Ingress (from CMS) Occupancy",
+ "UMask": "0x08",
"Unit": "M2M"
},
{
- "BriefDescription": "AD Egress (to CMS) Allocations",
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
"Counter": "0,1,2,3",
- "EventCode": "0x9",
- "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_BNC",
"PerPkg": "1",
- "PublicDescription": "AD Egress (to CMS) Allocations",
+ "UMask": "0x01",
"Unit": "M2M"
},
{
- "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
"Counter": "0,1,2,3",
- "EventCode": "0xA",
- "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
"PerPkg": "1",
- "PublicDescription": "AD Egress (to CMS) Occupancy",
+ "UMask": "0x10",
"Unit": "M2M"
},
{
- "BriefDescription": "BL Egress (to CMS) Allocations; All",
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
"Counter": "0,1,2,3",
- "EventCode": "0x15",
- "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE",
+ "EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD",
+ "EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK; CRD Transactions to Cbo",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK; NDR Transactions",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.NDR",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
"PerPkg": "1",
- "PublicDescription": "BL Egress (to CMS) Allocations; All",
"UMask": "0x03",
"Unit": "M2M"
},
{
- "BriefDescription": "BL Egress (to CMS) Occupancy; All",
+ "BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
"Counter": "0,1,2,3",
"EventCode": "0x16",
- "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.ALL",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_TxC_BL.DRS_UPI",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x40",
+ "EventName": "UNC_NoUnit_TxC_BL.DRS_UPI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_UPI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Not Taken",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "PerPkg": "1",
+ "UMask": "0xA0",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
"PerPkg": "1",
- "PublicDescription": "BL Egress (to CMS) Occupancy; All",
"UMask": "0x03",
"Unit": "M2M"
},
{
- "BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit.",
- "Counter": "0,1,2",
- "EventCode": "0x29",
- "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
"PerPkg": "1",
- "PublicDescription": "Count cases where flow control queue that sits between the Intel Ultra Path Interconnect (UPI) and the mesh spawns a prefetch to the iMC (Memory Controller)",
- "Unit": "M3UPI"
+ "UMask": "0x01",
+ "Unit": "M2M"
},
{
- "BriefDescription": "Clocks of the Intel Ultra Path Interconnect (UPI)",
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
"Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_UPI_CLOCKTICKS",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
"PerPkg": "1",
- "PublicDescription": "Counts clockticks of the fixed frequency clock controlling the Intel Ultra Path Interconnect (UPI). This clock runs at1/8th the 'GT/s' speed of the UPI link. For example, a 9.6GT/s link will have a fixed Frequency of 1.2 Ghz.",
- "Unit": "UPI LL"
+ "UMask": "0x02",
+ "Unit": "M2M"
},
{
- "BriefDescription": "Data Response packets that go direct to core",
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Read Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Write Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Write Compare Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Read Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Write Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Write Compare Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Prefetch Read Cam Hit",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
"Counter": "0,1,2,3",
"EventCode": "0x12",
- "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
"PerPkg": "1",
- "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to core bypassing the CHA.",
- "UMask": "0x1",
- "Unit": "UPI LL"
+ "UMask": "0x01",
+ "Unit": "M2M"
},
{
- "BriefDescription": "Data Response packets that go direct to Intel UPI",
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
"Counter": "0,1,2,3",
"EventCode": "0x12",
- "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
"PerPkg": "1",
- "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to Intel Ultra Path Interconnect (UPI) bypassing the CHA .",
- "UMask": "0x2",
- "Unit": "UPI LL"
+ "UMask": "0x02",
+ "Unit": "M2M"
},
{
- "BriefDescription": "Cycles Intel UPI is in L1 power mode (shutdown)",
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Read Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Write Credit Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Write Compare Request",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; All",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Sideband",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_TxC_AK_SIDEBAND.RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Sideband",
"Counter": "0,1,2,3",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_TxC_AK_SIDEBAND.WR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VNA",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
"EventCode": "0x21",
- "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
"PerPkg": "1",
- "PublicDescription": "Counts cycles when the Intel Ultra Path Interconnect (UPI) is in L1 power mode. L1 is a mode that totally shuts down the UPI link. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another, this event only coutns when both links are shutdown.",
- "Unit": "UPI LL"
+ "UMask": "0x10",
+ "Unit": "M3UPI"
},
{
- "BriefDescription": "Cycles the Rx of the Intel UPI is in L0p power mode",
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; VNA Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Writebacks",
+ "Counter": "0,1,2",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Requests",
+ "Counter": "0,1,2",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Snoops",
+ "Counter": "0,1,2",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "Counter": "0,1,2",
+ "EventCode": "0x1",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2U Sent",
+ "Counter": "0,1,2",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M3UPI_D2U_SENT",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO0_IIO1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO2",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO3",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO4",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO5",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; All IIO targets for NCS are in single mask. ORs them together",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; Selected M2p BL NCS credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; BL - Slot 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AK - Slot 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AK - Slot 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "Counter": "0,1,2",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 REQ Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 SNP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 RSP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 WB Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 REQ Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 SNP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 RSP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Inserts",
+ "Counter": "0,1,2",
+ "EventCode": "0x2F",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Occupancy",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1_NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1_NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 RSP Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 WB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 NCB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 NCS Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1 RSP Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1 WB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1_NCS Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1_NCB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 WB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCS Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCB Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; CHA on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_CHA",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; CHA on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_CHA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_NON_IDLE",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_NON_IDLE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "Counter": "0,1,2",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_NONSNP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "Counter": "0,1,2",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_NONSNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "Counter": "0,1,2",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_VN2SNP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "Counter": "0,1,2",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_VN0SNP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "Counter": "0,1,2",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "Counter": "0,1,2",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "Counter": "0,1,2",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "Counter": "0,1,2",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "Counter": "0,1,2",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "Counter": "0,1,2",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "Counter": "0,1,2",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "Counter": "0,1,2",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "Counter": "0,1,2",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "Counter": "0,1,2",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache",
+ "Counter": "0,1,2",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "Counter": "0,1,2",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "Counter": "0,1,2",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "Counter": "0,1,2",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "Counter": "0,1,2",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "Counter": "0,1,2",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "Counter": "0,1,2",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache",
+ "Counter": "0,1,2",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; Parallel Bias to VN0",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; Parallel Bias to VN1",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN0",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN1",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN0",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN1",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; AD, BL Parallel Win",
+ "Counter": "0,1,2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on Idle",
+ "Counter": "0,1,2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on BL Arb",
+ "Counter": "0,1,2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 2",
+ "Counter": "0,1,2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; Any In BGF FIFO",
+ "Counter": "0,1,2",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; Any in BGF Path",
+ "Counter": "0,1,2",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; No D2K For Arb",
+ "Counter": "0,1,2",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; VNA In Use",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Packets in BGF FIFO",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Packets in BGF Path",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Transmit Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; D2K Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "Counter": "0,1,2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; All",
+ "Counter": "0,1,2",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; No BGF Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_BGF",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; No TxQ Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_TXQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "Counter": "0,1,2",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC",
+ "Counter": "0,1,2",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; One Message",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; Two Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.2_MSGS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; Three Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.3_MSGS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; One Message in non-VNA",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG_VNX",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; All",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Needs Data Flit",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Bubble",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Not Avail",
+ "Counter": "0,1,2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Acumullate",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Accumulate Ready",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Accumulate Wasted",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Run-Ahead - Blocked",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Run-Ahead - Message",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Ok",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Message",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_MSG",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Flit Finished",
+ "Counter": "0,1,2",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_FLIT",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2; Rate-matching Stall",
+ "Counter": "0,1,2",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2; Rate-matching Stall - No Message",
+ "Counter": "0,1,2",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; All",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No BGF Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No TxQ Credits",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No BGF Credits + No Extra Message Slotted",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No TxQ Credits + No Extra Message Slotted",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - One Slot Taken",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ONE_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - Two Slots Taken",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.TWO_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - Three Slots Taken",
+ "Counter": "0,1,2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.THREE_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; VN0",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; VN1",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel Attempt",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel Success",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel AD Lost",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_AD_LOST",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel BL Lost",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_BL_LOST",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Can't Slot AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Can't Slot BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; REQ on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; SNP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; RSP on AD",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; RSP on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; WB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; NCB on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; NCS on BL",
+ "Counter": "0,1,2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Arrived",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARRIVED",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Lost Arbitration",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARB_LOST",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Slotted",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.SLOTTED",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Dropped - Old",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_OLD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Dropped - Wrap",
+ "Counter": "0,1,2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_WRAP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Used",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.USED",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Corrected",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level &lt; 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level &lt; 4",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level &lt; 5",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Any In Use",
+ "Counter": "0,1,2",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "Counter": "0,1,2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "Counter": "0,1,2",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "Counter": "0,1,2",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "Counter": "0,1,2",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "Counter": "0,1,2",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "Counter": "0,1,2",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "Counter": "0,1,2",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2C Sent",
+ "Counter": "0,1,2",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M3UPI_D2C_SENT",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "Counter": "0,1,2",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "Counter": "0,1,2",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "Counter": "0,1,2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_3",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "Counter": "0,1,2",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VNA",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 REQ Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 RSP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 SNP Messages",
+ "Counter": "0,1,2",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Received; VLW",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received; MSI",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received; IPI",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received",
+ "Counter": "0,1",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "Counter": "0,1",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "Counter": "0,1",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "Counter": "0,1",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Counter": "0,1",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "Counter": "0,1",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_GTONE",
"Counter": "0,1,2,3",
- "EventCode": "0x25",
- "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.CORE_GTONE",
"PerPkg": "1",
- "PublicDescription": "Counts cycles when the the receive side (Rx) of the Intel Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_GTONE",
+ "UMask": "0x42",
+ "Unit": "CHA"
},
{
- "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_GTONE",
"Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.SNP",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_DIR_LOOKUP.SNP",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.SNP",
"UMask": "0x1",
- "Unit": "UPI LL"
+ "Unit": "CHA"
},
{
- "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.HA",
"Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "Deprecated": "1",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.HA",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.TOR",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_DIR_UPDATE.TOR",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.TOR",
"UMask": "0x2",
- "Unit": "UPI LL"
+ "Unit": "CHA"
},
{
- "BriefDescription": "FLITs received which bypassed the Slot0 Recieve Buffer",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.EX_RDS",
"Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "Deprecated": "1",
+ "EventCode": "0x5F",
+ "EventName": "UNC_H_HITME_HIT.EX_RDS",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) whcih bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
- "UMask": "0x4",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.EX_RDS",
+ "UMask": "0x1",
+ "Unit": "CHA"
},
{
- "BriefDescription": "Valid data FLITs received from any slot",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RFO_HIT_S",
"Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.RFO_HIT_S",
"PerPkg": "1",
- "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) received from any of the 3 Intel Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
- "UMask": "0x0F",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RFO_HIT_S",
+ "UMask": "0x8",
+ "Unit": "CHA"
},
{
- "BriefDescription": "Null FLITs received from any slot",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_LOCAL",
"Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
"PerPkg": "1",
- "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) received from any of the 3 Intel Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
- "UMask": "0x27",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "UMask": "0x10",
+ "Unit": "CHA"
},
{
- "BriefDescription": "Protocol header and credit FLITs received from any slot",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_REMOTE",
"Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
"PerPkg": "1",
- "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) received from any of the 3 UPI slots on this UPI unit.",
- "UMask": "0x97",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "UMask": "0x20",
+ "Unit": "CHA"
},
{
- "BriefDescription": "Cycles in which the Tx of the Intel Ultra Path Interconnect (UPI) is in L0p power mode",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.READS",
"Counter": "0,1,2,3",
- "EventCode": "0x27",
- "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
- "PublicDescription": "Counts cycles when the transmit side (Tx) of the Intel Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.READS",
+ "UMask": "0x3",
+ "Unit": "CHA"
},
{
- "BriefDescription": "FLITs that bypassed the TxL Buffer",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.READS_LOCAL",
"Counter": "0,1,2,3",
- "EventCode": "0x41",
- "EventName": "UNC_UPI_TxL_BYPASSED",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.READS_LOCAL",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the TxL(transmit) FLIT buffer and pass directly out the UPI Link. Generally, when data is transmitted across the Intel Ultra Path Interconnect (UPI), it will bypass the TxQ and pass directly to the link. However, the TxQ will be used in L0p (Low Power) mode and (Link Layer Retry) LLR mode, increasing latency to transfer out to the link.",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.READS_LOCAL",
+ "UMask": "0x1",
+ "Unit": "CHA"
},
{
- "BriefDescription": "Null FLITs transmitted from any slot",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.WRITES",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
- "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) transmitted via any of the 3 Intel Ulra Path Interconnect (UPI) slots on this UPI unit.",
- "UMask": "0x27",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.WRITES",
+ "UMask": "0xC",
+ "Unit": "CHA"
},
{
- "BriefDescription": "Valid Flits Sent; Data",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.WRITES_LOCAL",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
- "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
- "UMask": "0x8",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "UMask": "0x4",
+ "Unit": "CHA"
},
{
- "BriefDescription": "Idle FLITs transmitted",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.IRQ",
"PerPkg": "1",
- "PublicDescription": "Counts when the Intel Ultra Path Interconnect(UPI) transmits an idle FLIT(80 bit FLow control unITs). Every UPI cycle must be sending either data FLITs, protocol/credit FLITs or idle FLITs.",
- "UMask": "0x47",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ",
+ "UMask": "0x1",
+ "Unit": "CHA"
},
{
- "BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
"Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.PA_MATCH",
"PerPkg": "1",
- "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) transmitted across any of the 3 UPI (Ultra Path Interconnect) slots on this UPI unit.",
- "UMask": "0x97",
- "Unit": "UPI LL"
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "Deprecated": "1",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "Counter": "0,1,2,3",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "UMask": "0x20",
+ "Unit": "CHA"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json
new file mode 100644
index 000000000000..64301a600ede
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json
@@ -0,0 +1,201 @@
+[
+ {
+ "BriefDescription": "pclk Cycles",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_DEMOTIONS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 0 Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 1 Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 2 Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 3 Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x5",
+ "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_MCP_PROCHOT_CYCLES",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x6",
+ "EventName": "UNC_P_MCP_PROCHOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
index 792ca39f013a..dd334b416c57 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
@@ -281,4 +281,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/cache.json b/tools/perf/pmu-events/arch/x86/snowridgex/cache.json
index e142f294b42e..d674ee88c3a5 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/cache.json
@@ -5,20 +5,20 @@
"Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "CORE_REJECT_L2Q.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2 queue (L2Q) due to a full or nearly full condition, which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the External Queue (XQ), but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a cores dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event). Counts on a per core basis.",
"SampleAfterValue": "200003"
},
{
- "BriefDescription": "Counts the number of first level data cacheline (dirty) evictions caused by misses, stores, and prefetches.",
+ "BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x51",
"EventName": "DL1.DIRTY_EVICTION",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of first level data cacheline (dirty) evictions caused by misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
+ "PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
@@ -28,7 +28,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x30",
"EventName": "L2_REJECT_XQ.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBL (L2 misses) and WOB (L2 write-back victims).",
"SampleAfterValue": "200003"
@@ -39,7 +39,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.ALL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of L2 Cache Accesses, includes hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only. Counts on a per core basis.",
"SampleAfterValue": "200003"
@@ -50,7 +50,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of L2 Cache accesses that resulted in a hit from a front door request only (does not include rejects or recycles), Counts on a per core basis.",
"SampleAfterValue": "200003",
@@ -62,7 +62,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of L2 Cache accesses that resulted in a miss from a front door request only (does not include rejects or recycles). Counts on a per core basis.",
"SampleAfterValue": "200003",
@@ -74,7 +74,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_REQUEST.REJECTS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of L2 Cache accesses that miss the L2 and get BBL reject short and long rejects (includes those counted in L2_reject_XQ.any). Counts on a per core basis.",
"SampleAfterValue": "200003",
@@ -86,9 +86,9 @@
"Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
"UMask": "0x41"
},
@@ -98,7 +98,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x2e",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
"SampleAfterValue": "200003",
@@ -120,9 +120,9 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of cycles a core is stalled due to an instruction cache or translation lookaside buffer (TLB) access which hit in DRAM or MMIO (non-DRAM).",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in DRAM or MMIO (non-DRAM).",
"SampleAfterValue": "200003",
"UMask": "0x20"
},
@@ -132,9 +132,9 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) access which hit in the L2 cache.",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
"SampleAfterValue": "200003",
"UMask": "0x8"
},
@@ -144,9 +144,9 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) access which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
"SampleAfterValue": "200003",
"UMask": "0x10"
},
@@ -166,7 +166,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x4"
@@ -177,7 +177,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x1"
@@ -188,25 +188,25 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
- "BriefDescription": "Counts the number of cycles a core is stalled due to a store buffer being full.",
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a store buffer being full.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "MEM_BOUND_STALLS.STORE_BUFFER_FULL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x40"
},
{
- "BriefDescription": "Counts the number of load ops retired that hit in DRAM.",
+ "BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -218,7 +218,7 @@
"UMask": "0x80"
},
{
- "BriefDescription": "Counts the number of retired loads that hit in the L3 cache, in which a snoop was required and modified data was forwarded from another core.",
+ "BriefDescription": "Counts the number of load uops retired that hit in the L3 cache, in which a snoop was required and modified data was forwarded from another core or module.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -281,6 +281,7 @@
"BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
+ "Data_LA": "1",
"EventCode": "0xd1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"PEBS": "1",
@@ -289,7 +290,7 @@
"UMask": "0x4"
},
{
- "BriefDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST)",
+ "BriefDescription": "Counts the number of memory uops retired.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -297,6 +298,7 @@
"EventName": "MEM_UOPS_RETIRED.ALL",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST)",
"SampleAfterValue": "200003",
"UMask": "0x83"
},
@@ -351,7 +353,7 @@
"UMask": "0x43"
},
{
- "BriefDescription": "Counts the number of retired split loads uops.",
+ "BriefDescription": "Counts the number of retired split load uops.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -1128,9 +1130,9 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ICACHE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/floating-point.json b/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json
index c7780fa54689..2e1b80c714fd 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json
@@ -1,12 +1,13 @@
[
{
- "BriefDescription": "Counts the number of cycles the floating point divider is busy. Does not imply a stall waiting for the divider.",
+ "BriefDescription": "Counts the number of cycles the floating point divider is busy.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.FPDIV",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles the floating point divider is busy. Does not imply a stall waiting for the divider.",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
@@ -16,7 +17,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.FP_ASSIST",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
"SampleAfterValue": "20003",
@@ -33,4 +34,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/frontend.json b/tools/perf/pmu-events/arch/x86/snowridgex/frontend.json
index c752c52ba03e..5d938a5dafcf 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/frontend.json
@@ -5,7 +5,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"SampleAfterValue": "200003",
@@ -17,7 +17,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.COND",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x10"
@@ -28,7 +28,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.INDIRECT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x2"
@@ -39,7 +39,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.RETURN",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x8"
@@ -50,7 +50,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe6",
"EventName": "BACLEARS.UNCOND",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x4"
@@ -61,7 +61,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe9",
"EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x1"
@@ -72,7 +72,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.ACCESSES",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line or byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
"SampleAfterValue": "200003",
@@ -84,7 +84,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of requests that hit in the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
"SampleAfterValue": "200003",
@@ -96,10 +96,10 @@
"Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "ICACHE.MISSES",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of missed requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
"SampleAfterValue": "200003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/memory.json
index 76eaefafdc89..15eba23796e4 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/memory.json
@@ -5,7 +5,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "20003",
"UMask": "0x2"
@@ -439,4 +439,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/other.json b/tools/perf/pmu-events/arch/x86/snowridgex/other.json
index 2766e9dfc325..4a1b7cc5aa23 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/other.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/other.json
@@ -6,7 +6,7 @@
"EdgeDetect": "1",
"EventCode": "0x63",
"EventName": "BUS_LOCK.ALL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003"
},
@@ -27,7 +27,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "BUS_LOCK.CYCLES_OTHER_BLOCK",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x2"
@@ -38,7 +38,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x63",
"EventName": "BUS_LOCK.CYCLES_SELF_BLOCK",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x1"
@@ -71,7 +71,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_DRAM_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x4"
@@ -82,7 +82,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_L2_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x1"
@@ -93,7 +93,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x34",
"EventName": "C0_STALLS.LOAD_LLC_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x2"
@@ -104,7 +104,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.MASKED",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
"SampleAfterValue": "200003",
@@ -116,7 +116,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of core cycles during which there are pending interrupts while interrupts are masked (disabled). Increments by 1 each core cycle that both EFLAGS.IF is 0 and an INTR is pending (which means the APIC is telling the ROB to cause an INTR). This event does not increment if EFLAGS.IF is 0 but all interrupt in the APICs Interrupt Request Register (IRR) are inhibited by the PPR (thus either by ISRV or TPR) because in these cases the interrupts would be held up in the APIC and would not be pended to the ROB. This event does count when an interrupt is only inhibited by MOV/POP SS state machines or the STI state machine. These extra inhibits only last for a single instructions and would not be important.",
"SampleAfterValue": "200003",
@@ -128,7 +128,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xcb",
"EventName": "HW_INTERRUPTS.RECEIVED",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "203",
"UMask": "0x1"
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/pipeline.json b/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json
index 38dc8044767b..09919fdb9a38 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json
@@ -170,7 +170,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe8",
"EventName": "BTCLEAR.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
"SampleAfterValue": "200003"
@@ -180,9 +180,9 @@
"CollectPEBSRecord": "2",
"Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.CORE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "33",
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
@@ -192,9 +192,9 @@
"Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.CORE_P",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
"SampleAfterValue": "2000003"
},
{
@@ -203,7 +203,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
"SampleAfterValue": "2000003",
@@ -214,7 +214,7 @@
"CollectPEBSRecord": "2",
"Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "34",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
"SampleAfterValue": "2000003",
@@ -227,7 +227,7 @@
"EventCode": "0x3c",
"EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
@@ -237,18 +237,19 @@
"Counter": "0,1,2,3",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003"
},
{
- "BriefDescription": "Counts the number of cycles the integer divider is busy. Does not imply a stall waiting for the divider.",
+ "BriefDescription": "Counts the number of cycles the integer divider is busy.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0xcd",
"EventName": "CYCLES_DIV_BUSY.IDIV",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles the integer divider is busy. Does not imply a stall waiting for the divider.",
"SampleAfterValue": "200003",
"UMask": "0x1"
},
@@ -324,7 +325,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "20003"
},
@@ -334,7 +335,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.DISAMBIGUATION",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "20003",
"UMask": "0x8"
@@ -345,18 +346,18 @@
"Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.PAGE_FAULT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "20003",
"UMask": "0x20"
},
{
- "BriefDescription": "Counts the number of machine clears due typically to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0xc3",
"EventName": "MACHINE_CLEARS.SMC",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "20003",
"UMask": "0x1"
@@ -367,7 +368,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.ALL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ) even if an FE_bound event occurs during this period. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"SampleAfterValue": "1000003",
@@ -401,7 +402,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x4"
@@ -412,7 +413,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x73",
"EventName": "TOPDOWN_BAD_SPECULATION.MONUKE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x2"
@@ -423,7 +424,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003"
},
@@ -433,7 +434,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x1"
@@ -444,7 +445,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x2"
@@ -455,7 +456,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x8"
@@ -466,7 +467,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REGISTER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x20"
@@ -477,7 +478,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x40"
@@ -488,7 +489,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x10"
@@ -499,18 +500,18 @@
"Counter": "0,1,2,3",
"EventCode": "0x74",
"EventName": "TOPDOWN_BE_BOUND.STORE_BUFFER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x4"
},
{
- "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
+ "BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ALL",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003"
},
@@ -520,7 +521,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"SampleAfterValue": "1000003",
@@ -532,7 +533,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
"SampleAfterValue": "1000003",
@@ -544,7 +545,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.CISC",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x1"
@@ -555,40 +556,18 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.DECODE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x8"
},
{
- "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
- "CollectPEBSRecord": "2",
- "Counter": "0,1,2,3",
- "EventCode": "0x71",
- "EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH",
- "PDIR_COUNTER": "na",
- "PEBScounters": "0,1,2,3",
- "SampleAfterValue": "1000003",
- "UMask": "0x8d"
- },
- {
- "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to a latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
- "CollectPEBSRecord": "2",
- "Counter": "0,1,2,3",
- "EventCode": "0x71",
- "EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
- "PDIR_COUNTER": "na",
- "PEBScounters": "0,1,2,3",
- "SampleAfterValue": "1000003",
- "UMask": "0x72"
- },
- {
"BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ITLB misses.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.ITLB",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
"SampleAfterValue": "1000003",
@@ -600,7 +579,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.OTHER",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x80"
@@ -611,7 +590,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x71",
"EventName": "TOPDOWN_FE_BOUND.PREDECODE",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "1000003",
"UMask": "0x4"
@@ -632,7 +611,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x0e",
"EventName": "UOPS_ISSUED.ANY",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
"SampleAfterValue": "200003"
@@ -664,7 +643,6 @@
"Counter": "0,1,2,3",
"EventCode": "0xc2",
"EventName": "UOPS_RETIRED.MS",
- "PDIR_COUNTER": "na",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of uops that are from complex flows issued by the Microcode Sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json
new file mode 100644
index 000000000000..f2c17f19299f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json
@@ -0,0 +1,619 @@
+[
+ {
+ "BriefDescription": "Pre-charge for reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pre-charge for writes",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "LLC_MISSES.MEM_READ",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x0f",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "read requests to memory controller",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x0f",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "LLC_MISSES.MEM_WRITE",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x30",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "write requests to memory controller",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x30",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS commands issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3f",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.OPPORTUNISTIC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to page table",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Memory controller clock ticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Half clockticks for IMC",
+ "Counter": "FIXED",
+ "CounterType": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_HCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : All Activates",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x0B",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1C",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM underfill read CAS commands issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : Activate due to Bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/auto-pre",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/ auto-pre",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.RD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.WR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.WR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.TOTAL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.TOTAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PARITY_ERRORS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M_PARITY_ERRORS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
+ "MetricName": "power_channel_ppd %",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles Memory is in self refresh power mode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M_POWER_SELF_REFRESH",
+ "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
+ "MetricName": "power_self_refresh %",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M_RDB_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NOT_EMPTY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M_RDB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL_PCH0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL_PCH1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL_PCH0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL_PCH1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pre-charges due to page misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+ "PerPkg": "1",
+ "UMask": "0x0c",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for the Memory Controller",
+ "Counter": "4",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_M_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-other.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-other.json
new file mode 100644
index 000000000000..1701db46696d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-other.json
@@ -0,0 +1,25192 @@
+[
+ {
+ "BriefDescription": "Uncore cache clock ticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : All Lines Victimized",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local read requests that miss the SF/LLC and remote read requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local write requests that miss the SF/LLC and remote write requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x0c",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for E-state entries",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for M-state entries",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for S-state entries",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "UMask": "0xC001FF01",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD01",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFD01",
+ "UMaskExt": "0xC80FFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FD01",
+ "UMaskExt": "0xC807FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.UNCACHEABLE",
+ "Filter": "config1=0x40e33",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.MMIO_READ",
+ "Filter": "config1=0x40040e33",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.MMIO_WRITE",
+ "Filter": "config1=0x40041e33",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_FULL",
+ "Filter": "config1=0x41833",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+ "Filter": "config1=0x41a33",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFE01",
+ "UMaskExt": "0xC80FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FE01",
+ "UMaskExt": "0xC807FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "UMask": "0xC001FF04",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD04",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE04",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "UMask": "0xC001FF01",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD01",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFE01",
+ "UMaskExt": "0xC80FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FE01",
+ "UMaskExt": "0xC807FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "UMask": "0xC001FF04",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD04",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE04",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FE04",
+ "UMaskExt": "0xCC43FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFD01",
+ "UMaskExt": "0xC88FFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0xC827FD01",
+ "UMaskExt": "0xC827FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC8A7FD01",
+ "UMaskExt": "0xC8A7FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FD01",
+ "UMaskExt": "0xC887FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFE01",
+ "UMaskExt": "0xC88FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0xC827FE01",
+ "UMaskExt": "0xC827FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC8A7FE01",
+ "UMaskExt": "0xC8A7FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FE01",
+ "UMaskExt": "0xC887FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0xC827FD01",
+ "UMaskExt": "0xC827FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC8A7FD01",
+ "UMaskExt": "0xC8A7FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0xC827FE01",
+ "UMaskExt": "0xC827FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FD04",
+ "UMaskExt": "0xCC43FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FF04",
+ "UMaskExt": "0xCC43FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FF01",
+ "UMaskExt": "0xC887FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FF01",
+ "UMaskExt": "0xC807FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0xC827FF01",
+ "UMaskExt": "0xC827FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC8A7FF01",
+ "UMaskExt": "0xC8A7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRDs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFF01",
+ "UMaskExt": "0xC80FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FF01",
+ "UMaskExt": "0xC807FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0xC827FF01",
+ "UMaskExt": "0xC827FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC8A7FF01",
+ "UMaskExt": "0xC8A7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRDs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFF01",
+ "UMaskExt": "0xC80FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0xC8C7FF01",
+ "UMaskExt": "0xC8C7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FF04",
+ "UMaskExt": "0xCD43FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FD04",
+ "UMaskExt": "0xCD43FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FE04",
+ "UMaskExt": "0xCD43FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR",
+ "PerPkg": "1",
+ "UMask": "0xc867fe01",
+ "UMaskExt": "0xc867fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR",
+ "PerPkg": "1",
+ "UMask": "0xc86ffe01",
+ "UMaskExt": "0xc86ffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "UMask": "0xC86FFE01",
+ "UMaskExt": "0xC86FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "PerPkg": "1",
+ "UMask": "0xC87FDE01",
+ "UMaskExt": "0xC87FDE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "UMask": "0xC877DE01",
+ "UMaskExt": "0xC877DE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FE04",
+ "UMaskExt": "0xC8F3FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xc8f3fe04",
+ "UMaskExt": "0xc8f3fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FD04",
+ "UMaskExt": "0xC8F3FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FF04",
+ "UMaskExt": "0xC8F3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FF04",
+ "UMaskExt": "0xC8F3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "UMask": "0xC867FE01",
+ "UMaskExt": "0xC867FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x1BC1FF",
+ "UMaskExt": "0x1BC1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Clockticks of the integrated IO (IIO) traffic controller",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_WRITE",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_READ",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for IIO clocktick",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 7",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 6",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 5",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 4",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 3",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 2",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 1",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0xff",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops : WbMtoI",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0f",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clockticks of the IO coherency tracker (IRP)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Occupancy of the IRP FAF queue",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to memory (M2M)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to PCI (M2P)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2P_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "CounterType": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - All Lines",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x200F",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counter 0 Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local INVITOE requests (exclusive ownership of a cache line without receiving data) that miss the SF/LLC and remote INVITOE requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0xC001FFff",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFD01",
+ "UMaskExt": "0xC80FFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FD01",
+ "UMaskExt": "0xC807FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0xc803fe04",
+ "UMaskExt": "0xc803fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0xc803fe04",
+ "UMaskExt": "0xc803fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xcc43fe04",
+ "UMaskExt": "0xcc43fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Intermediate bypass Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single External Snoops",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Core Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Eviction",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Single Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "PerPkg": "1",
+ "UMask": "0xF1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple External Snoops",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Core Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Eviction",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "UMask": "0xF2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_TOR_DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_NO_D2C",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_DRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.EXTCMP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.PULL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.NOP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.IDLE_DUE_SUPPRESS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "HA to iMC Reads Issued : ISOCH",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in M state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in E state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local Only",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ONLY",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in M State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "PerPkg": "1",
+ "UMask": "0x2001",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in E State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "PerPkg": "1",
+ "UMask": "0x2002",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "PerPkg": "1",
+ "UMask": "0x2004",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Silent Snoop Eviction",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Write Combining Aliasing",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.RMW_SETMATCH",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_PAMATCH",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLOWSNP",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_WAYMATCH",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLWAYRSV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.PTL_INPIPE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IRQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.FSF_VICP",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ONE_FSF_VIC",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.TORID_MATCH_GO_P",
+ "PerPkg": "1",
+ "UMaskExt": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IPQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.WAY_MATCH",
+ "PerPkg": "1",
+ "UMaskExt": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ONE_RSP_CON",
+ "PerPkg": "1",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IDX_INPIPE",
+ "PerPkg": "1",
+ "UMaskExt": "0x100",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.SETMATCHENTRYWSCT",
+ "PerPkg": "1",
+ "UMaskExt": "0x200",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ALLRSFWAYS_RES",
+ "PerPkg": "1",
+ "UMaskExt": "0x800",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.RRQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "UMaskExt": "0x1000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ISMQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "UMaskExt": "0x2000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.SF_WAYS_RES",
+ "PerPkg": "1",
+ "UMaskExt": "0x4000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.LLC_WAYS_RES",
+ "PerPkg": "1",
+ "UMaskExt": "0x8000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.NOTALLOWSNOOP",
+ "PerPkg": "1",
+ "UMaskExt": "0x10000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.TOPA_MATCH",
+ "PerPkg": "1",
+ "UMaskExt": "0x20000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IVEGRCREDIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x40000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.BLEGRCREDIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x80000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ADEGRCREDIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x100000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.AKEGRCREDIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x200000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.HACREDIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x400000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_AD_REQ",
+ "PerPkg": "1",
+ "UMaskExt": "0x800000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_AD_RSP",
+ "PerPkg": "1",
+ "UMaskExt": "0x1000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_RSP",
+ "PerPkg": "1",
+ "UMaskExt": "0x2000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_WB",
+ "PerPkg": "1",
+ "UMaskExt": "0x4000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCB",
+ "PerPkg": "1",
+ "UMaskExt": "0x8000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCS",
+ "PerPkg": "1",
+ "UMaskExt": "0x10000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC8",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC9",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC10",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC11",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC11",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC12",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC12",
+ "PerPkg": "1",
+ "UMaskExt": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC13",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC13",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ Rejected",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : IRQ",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD REQ on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL RSP on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL WB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCB on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCS on VN0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI AK Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI IV Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : ANY0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : HA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : SF Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Victim",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC OR SF Way",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Allow Snoop",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : PhyAddr Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Snoops sent for Local Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast snoops for Local Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Directed snoops for Local Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspI",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspIFwd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspSFwd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*WB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPWB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*FWD*WB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWDWB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspCnflct",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspFwd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIFwdM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPIFWDM",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIDataM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPDATAM",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit SF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit SF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC8",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC9",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC10",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC11",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC11",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC12",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC12",
+ "PerPkg": "1",
+ "UMaskExt": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC13",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC13",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 0?)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - No Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - Conflict",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 1?)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - No Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - Conflict",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : I State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.I",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_S",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - E State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_E",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - H State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_H",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : S State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : E State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.E",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : M State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.M",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : F State",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.F",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "PerPkg": "1",
+ "UMask": "0x1BC8FF",
+ "UMaskExt": "0x1BC8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - iA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0xC000FF04",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0xC000FF01",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0xC000FF05",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Hits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.DDR",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR4",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : MMCFG Access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Local Targets",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "PerPkg": "1",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x200",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x400",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NearMem",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NEARMEM",
+ "PerPkg": "1",
+ "UMaskExt": "0x400000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NotNearMem",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NOT_NEARMEM",
+ "PerPkg": "1",
+ "UMaskExt": "0x800000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NonCoherent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "PerPkg": "1",
+ "UMaskExt": "0x1000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just ISOC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "PerPkg": "1",
+ "UMaskExt": "0x2000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - iA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0xC000FF04",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0xC000FF01",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0xC000FF05",
+ "UMaskExt": "0xC000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Hits",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Misses",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : MMCFG Access",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Local Targets",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "PerPkg": "1",
+ "UMaskExt": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x200",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x400",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NearMem",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NEARMEM",
+ "PerPkg": "1",
+ "UMaskExt": "0x400000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NotNearMem",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NOT_NEARMEM",
+ "PerPkg": "1",
+ "UMaskExt": "0x800000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "PerPkg": "1",
+ "UMaskExt": "0x1000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just ISOC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "PerPkg": "1",
+ "UMaskExt": "0x2000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBA",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBA",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB9",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB9",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xae",
+ "EventName": "UNC_CHA_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe4",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1FFFFF",
+ "UMaskExt": "0x1FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "PerPkg": "1",
+ "UMask": "0x1bc1ff",
+ "UMaskExt": "0x1bc1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Requests",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV",
+ "PerPkg": "1",
+ "UMask": "0x1A44FF",
+ "UMaskExt": "0x1A44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "PerPkg": "1",
+ "UMask": "0x1bd0ff",
+ "UMaskExt": "0x1bd0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFD01",
+ "UMaskExt": "0xC88FFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FD01",
+ "UMaskExt": "0xC887FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFE01",
+ "UMaskExt": "0xC88FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC8A7FE01",
+ "UMaskExt": "0xC8A7FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FE01",
+ "UMaskExt": "0xC887FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC803FD04",
+ "UMaskExt": "0xC803FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FD04",
+ "UMaskExt": "0xCC43FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC803FD04",
+ "UMaskExt": "0xC803FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC803FF04",
+ "UMaskExt": "0xC803FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFF01",
+ "UMaskExt": "0xC88FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC803FF04",
+ "UMaskExt": "0xC803FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FF04",
+ "UMaskExt": "0xCC43FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FF01",
+ "UMaskExt": "0xC887FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFF01",
+ "UMaskExt": "0xC88FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushOpts issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "UMask": "0xC8D7FF01",
+ "UMaskExt": "0xC8D7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0xCC23FF04",
+ "UMaskExt": "0xCC23FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0xC8C3FF04",
+ "UMaskExt": "0xC8C3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0xcc27ff01",
+ "UMaskExt": "0xcc27ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR",
+ "PerPkg": "1",
+ "UMask": "0xc867fe01",
+ "UMaskExt": "0xc867fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR",
+ "PerPkg": "1",
+ "UMask": "0xc86ffe01",
+ "UMaskExt": "0xc86ffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed locally Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x800",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ANY_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Write Request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.OTHER_REQ_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_OR_INV_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Local request Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F",
+ "PerPkg": "1",
+ "UMaskExt": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.MISS_ALL",
+ "PerPkg": "1",
+ "UMask": "0x1fe001",
+ "UMaskExt": "0x1fe0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "PerPkg": "1",
+ "UMask": "0x1fc1ff",
+ "UMaskExt": "0x1fc1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x1bc101",
+ "UMaskExt": "0x1bc1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DMND_READ_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x841ff",
+ "UMaskExt": "0x841",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x842ff",
+ "UMaskExt": "0x842",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x888ff",
+ "UMaskExt": "0x888",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "PerPkg": "1",
+ "UMask": "0xC867FF01",
+ "UMaskExt": "0xC867FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "PerPkg": "1",
+ "UMask": "0xC86FFF01",
+ "UMaskExt": "0xC86FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DDR4 Access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DDR4 Access",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FD04",
+ "UMaskExt": "0xC8F3FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "UMask": "0xC867FE01",
+ "UMaskExt": "0xC867FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "UMask": "0xC86FFE01",
+ "UMaskExt": "0xC86FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0xC8C7FF01",
+ "UMaskExt": "0xC8C7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "UMask": "0xC8D7FF01",
+ "UMaskExt": "0xC8D7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0xCC27FF01",
+ "UMaskExt": "0xCC27FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "UMask": "0xC877DE01",
+ "UMaskExt": "0xC877DE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "PerPkg": "1",
+ "UMask": "0xC87FDE01",
+ "UMaskExt": "0xC87FDE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "PerPkg": "1",
+ "UMask": "0xC867FF01",
+ "UMaskExt": "0xC867FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "PerPkg": "1",
+ "UMask": "0xC86FFF01",
+ "UMaskExt": "0xC86FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0xCC23FF04",
+ "UMaskExt": "0xCC23FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0xC8C3FF04",
+ "UMaskExt": "0xC8C3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FF04",
+ "UMaskExt": "0xCD43FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FD04",
+ "UMaskExt": "0xCD43FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FE04",
+ "UMaskExt": "0xCD43FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "PerPkg": "1",
+ "UMask": "0xcc3fff01",
+ "UMaskExt": "0xcc3fff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FE01",
+ "UMaskExt": "0xC837FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FD01",
+ "UMaskExt": "0xC837FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FF01",
+ "UMaskExt": "0xC837FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBStoIs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "PerPkg": "1",
+ "UMask": "0xcc67ff01",
+ "UMaskExt": "0xcc67ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoIs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "PerPkg": "1",
+ "UMask": "0xcc37ff01",
+ "UMaskExt": "0xcc37ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBMtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0xcc2fff01",
+ "UMaskExt": "0xcc2fff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FF01",
+ "UMaskExt": "0xC837FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FD01",
+ "UMaskExt": "0xC837FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0xC837FE01",
+ "UMaskExt": "0xC837FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Code Read Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x1BD001",
+ "UMaskExt": "0x1BD0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_MISS",
+ "PerPkg": "1",
+ "UMask": "0x1BC801",
+ "UMaskExt": "0x1BC8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x1BD9FF",
+ "UMaskExt": "0x1BD9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Read Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x1BD901",
+ "UMaskExt": "0x1BD9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally HOMed Read Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_LOC_HOM",
+ "PerPkg": "1",
+ "UMask": "0x0BD901",
+ "UMaskExt": "0x0BD9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely HOMed Read Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_REM_HOM",
+ "PerPkg": "1",
+ "UMask": "0x13D901",
+ "UMaskExt": "0x13D9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally Requested Reads that are Locally HOMed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_LOC_HOM",
+ "PerPkg": "1",
+ "UMask": "0x09D9FF",
+ "UMaskExt": "0x09D9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely Requested Reads that are Locally HOMed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_REMOTE_LOC_HOM",
+ "PerPkg": "1",
+ "UMask": "0x0A19FF",
+ "UMaskExt": "0x0A19",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally Requested Reads that are Remotely HOMed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_REM_HOM",
+ "PerPkg": "1",
+ "UMask": "0x11D9FF",
+ "UMaskExt": "0x11D9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Reads that Hit the Snoop Filter",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_SF_HIT",
+ "PerPkg": "1",
+ "UMask": "0x1BD90E",
+ "UMaskExt": "0x1BD9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely requested Read or Snoop Misses that are Remotely HOMed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_OR_SNOOP_REMOTE_MISS_REM_HOM",
+ "PerPkg": "1",
+ "UMask": "0x161901",
+ "UMaskExt": "0x1619",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Filters Requests for those that write info into the cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "PerPkg": "1",
+ "UMask": "0x1A42FF",
+ "UMaskExt": "0x1A42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Code Reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "PerPkg": "1",
+ "UMask": "0x1BD0FF",
+ "UMaskExt": "0x1BD0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB lookups first",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB lookups all",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.ALL_LOOKUPS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 4K Page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 2M Page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 1G Page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Fills (same as IOTLB miss)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.MISSES",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache lookups",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache hits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache lookup",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOMMU memory access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Cycles PWT full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.CYC_PWT_FULL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Interrupt Entry cache lookup",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.INT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Interrupt Entry cache hit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.INT_CACHE_HITS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : Drop request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.ALL.DROP",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Counting disabled",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_IIO_NOTHING",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PWT occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Symbol Times on Link",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_IIO_SYMBOL_TIMES",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "1",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "2",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "3",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "4",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "5",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "6",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "7",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "8",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "9",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART0_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "13",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART4_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "12",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART3_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "11",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART2_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "10",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART1_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "15",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART6_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "14",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART5_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "Counter": "16",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART7_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 4K page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_4K_HITS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 2M page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_2M_HITS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 1G page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_1G_HITS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWT Hit to a 256T page",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_512G_HITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache fill",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_CACHE_FILLS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Global IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_GBL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Domain-selective IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_DOMAIN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Page-selective IOTLB invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_PAGE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache global invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_GBL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Domain-selective Context cache invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DOMAIN",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Device-selective Context cache invalidation cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DEVICE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : MsgB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Multi-cast",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Ubox",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Remote P2P",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Local P2P",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Confined P2P",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Abort",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "ITC address map 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Issuing to IOMMU",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Processing response from IOMMU",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Request Ownership",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Issuing final read or write of line",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Writing line",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Passing data to be written",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Occupancy of outbound request queue : To device",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC5",
+ "EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Request Ownership",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Writing line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Passing data to be written",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Request Ownership",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Writing line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Passing data to be written",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Request Ownership",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Writing line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Passing data to be written",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Request Ownership",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Writing line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Passing data to be written",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing to IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Processing response from IOMMU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Request Ownership",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing final read or write of line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Writing line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Passing data to be written",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound cacheline requests issued : 64B requests issued to device",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_IIO_OUTBOUND_CL_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound TLP (transaction layer packet) requests issued : To device",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_IIO_OUTBOUND_TLP_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : From IRP",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.IRP",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : From ITC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.ITC",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : Completion allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.PREALLOC",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts : All Ports",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0xFF",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit M",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy : Any Source",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0F",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy : Snoops",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0F",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops : CLFlush",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Slow path fwpf didn't find prefetch",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.SLOWPATH_FWPF_NO_PRF",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of I Line",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of S Line",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of E Line",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of M Line",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Invalid",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Valid",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P reads",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P Writes",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P Message",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P completions",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : Match if remote only",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if remote and target matches",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if local only",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if local and target matches",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Miss",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit I",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit E or S",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpCode",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpData",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpInv",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Writes",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Atomic",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Other",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Select Source",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.ORDERINGQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Requests",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_P2P_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Occupancy",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_P2P_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0B",
+ "EventName": "UNC_I_TxC_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Cycles Full",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Inserts",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Occupancy",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x08",
+ "EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Cycles Full",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x06",
+ "EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Inserts",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Occupancy",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x09",
+ "EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Cycles Full",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x07",
+ "EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Inserts",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Occupancy",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0A",
+ "EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD0 Egress Credits Stalls",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_TxR2_AD0_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD1 Egress Credits Stalls",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1B",
+ "EventName": "UNC_I_TxR2_AD1_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0D",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0E",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0C",
+ "EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "PerPkg": "1",
+ "UMask": "0x71",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "PerPkg": "1",
+ "UMask": "0x7e",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x74",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x72",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_EGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction was overridden",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x0704",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x0701",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1C10",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1C01",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x1C02",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x06",
+ "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x09",
+ "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0A",
+ "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_EGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x0101",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0102",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ALL",
+ "PerPkg": "1",
+ "UMask": "0x0104",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x0140",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x0201",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0202",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ALL",
+ "PerPkg": "1",
+ "UMask": "0x0204",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x0240",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "PerPkg": "1",
+ "UMask": "0x0401",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x0402",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0404",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0408",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "PerPkg": "1",
+ "UMask": "0x0410",
+ "UMaskExt": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "UMaskExt": "0x05",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL",
+ "PerPkg": "1",
+ "UMask": "0x0801",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x0802",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0804",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0808",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_ALL",
+ "PerPkg": "1",
+ "UMask": "0x0810",
+ "UMaskExt": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "UMaskExt": "0x09",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches : Mesh Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches : MC Match",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK : NDR Transactions",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.NDR",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK : CRD Transactions to Cbo",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "PerPkg": "1",
+ "UMask": "0xA0",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Regular : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Special : Channel 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Mirror",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Mirror",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x63",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x63",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Mirror",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_NI_MISS",
+ "PerPkg": "1",
+ "UMaskExt": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_NI_MISS",
+ "PerPkg": "1",
+ "UMaskExt": "0x0C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA0_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA1_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_MISS_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_RSP_PDRESET",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA0_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA1_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_MISS_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_RSP_PDRESET",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT - Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT - Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_SECURE_DROP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.NOT_PF_SAD_REGION",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.STOP_B2B",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.ERRORBLK_RxC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.WPQ_PROXY",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.RPQ_PROXY",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.XPT_THRESH",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_SECURE_DROP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.NOT_PF_SAD_REGION",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.STOP_B2B",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.ERRORBLK_RxC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.WPQ_PROXY",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.RPQ_PROXY",
+ "PerPkg": "1",
+ "UMaskExt": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.XPT_THRESH",
+ "PerPkg": "1",
+ "UMaskExt": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_NONTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_PWR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_NONTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_PWR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x64",
+ "EventName": "UNC_M2M_MIRR_WRQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x65",
+ "EventName": "UNC_M2M_MIRR_WRQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x73",
+ "EventName": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x79",
+ "EventName": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_INSERTS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x78",
+ "EventName": "UNC_M2M_PREFCAM_RxC_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x77",
+ "EventName": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xae",
+ "EventName": "UNC_M2M_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M2M_RxC_AK_WR_CMP",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x08",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x07",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0d",
+ "EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0e",
+ "EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0c",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0b",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0f",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AKC Credits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M2M_TxC_AKC_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x0702",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x0740",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1C04",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1C08",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FROM_TGR",
+ "PerPkg": "1",
+ "UMaskExt": "0x1D",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI_MISS",
+ "PerPkg": "1",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6f",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy - Prefetches",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x77",
+ "EventName": "UNC_M2M_RxC_AD_PREF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH0_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.XPTUPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH2_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH1_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH0_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPTUPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH2_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH1_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AK_0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AK_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : All",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCB",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_IDI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_IDI",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCB",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCS",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_M2P_TxC_CREDITS.PRQ",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M2P_TxC_CREDITS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb9",
+ "EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb9",
+ "EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe6",
+ "EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe6",
+ "EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9e",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9e",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb3",
+ "EventName": "UNC_M2P_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb3",
+ "EventName": "UNC_M2P_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xae",
+ "EventName": "UNC_M2P_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Message Received : VLW",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : MSI",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : IPI",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : Doorbell",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : Interrupt",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack : Assert to ACK",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4F",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json
new file mode 100644
index 000000000000..281f3605881d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json
@@ -0,0 +1,235 @@
+[
+ {
+ "BriefDescription": "Clockticks of the power control unit (PCU)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_DEMOTIONS",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 0 Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 1 Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 2 Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 3 Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX256 Frequency Clipping",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x49",
+ "EventName": "UNC_P_FREQ_CLIP_AVX256",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX512 Frequency Clipping",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x4a",
+ "EventName": "UNC_P_FREQ_CLIP_AVX512",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x06",
+ "EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0A",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x09",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C0 and C1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C6 and C7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json
index ecbfc335a9b6..b82f11591f13 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json
@@ -5,18 +5,18 @@
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x80"
},
{
- "BriefDescription": "Counts the number of first level TLB misses but second level hits due to loads that did not start a page walk. Account for all pages sizes. Will result in a DTLB write from STLB.",
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Account for all page sizes. Will result in a DTLB write from STLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "200003",
"UMask": "0x20"
@@ -38,7 +38,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1GB pages. Includes page walks that page fault.",
"SampleAfterValue": "200003",
@@ -50,7 +50,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
"SampleAfterValue": "200003",
@@ -62,21 +62,21 @@
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
"SampleAfterValue": "200003",
"UMask": "0x2"
},
{
- "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for loads every cycle.",
+ "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for loads every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
+ "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
"SampleAfterValue": "200003",
"UMask": "0x10"
},
@@ -86,9 +86,8 @@
"Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of page walks due to storse that miss the PDE (Page Directory Entry) cache.",
"SampleAfterValue": "2000003",
"UMask": "0x80"
},
@@ -98,7 +97,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
"UMask": "0x20"
@@ -120,7 +119,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
"SampleAfterValue": "200003",
@@ -132,7 +131,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
"SampleAfterValue": "2000003",
@@ -144,7 +143,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
"SampleAfterValue": "2000003",
@@ -156,7 +155,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
"SampleAfterValue": "200003",
@@ -168,11 +167,11 @@
"Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDE_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of Extended Page Directory Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "UMask": "0x1"
},
{
"BriefDescription": "Counts the number of Extended Page Directory Entry misses.",
@@ -180,11 +179,11 @@
"Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDE_MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number Extended Page Directory Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x2"
},
{
"BriefDescription": "Counts the number of Extended Page Directory Pointer Entry hits.",
@@ -192,7 +191,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDPE_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number Extended Page Directory Pointer Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
@@ -204,7 +203,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.EPDPE_MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number Extended Page Directory Pointer Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
@@ -216,7 +215,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x4f",
"EventName": "EPT.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"SampleAfterValue": "200003",
@@ -228,7 +227,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "ITLB.FILLS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) and a new translation was filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
"SampleAfterValue": "200003",
@@ -240,18 +239,18 @@
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.PDE_CACHE_MISS",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
"UMask": "0x80"
},
{
- "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will results in a DTLB write from STLB.",
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"SampleAfterValue": "2000003",
"UMask": "0x20"
@@ -273,7 +272,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
"SampleAfterValue": "200003",
@@ -285,7 +284,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
"SampleAfterValue": "2000003",
@@ -297,7 +296,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
"SampleAfterValue": "2000003",
@@ -309,7 +308,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PDIR_COUNTER": "NA",
"PEBScounters": "0,1,2,3",
"PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for instruction fetches every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk).",
"SampleAfterValue": "200003",
@@ -327,7 +326,7 @@
"UMask": "0x8"
},
{
- "BriefDescription": "Counts the number of memory retired ops that missed in the second level TLB.",
+ "BriefDescription": "Counts the number of memory uops retired that missed in the second level TLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -339,7 +338,7 @@
"UMask": "0x13"
},
{
- "BriefDescription": "Counts the number of load ops retired that miss in the second Level TLB.",
+ "BriefDescription": "Counts the number of load uops retired that miss in the second Level TLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -351,7 +350,7 @@
"UMask": "0x11"
},
{
- "BriefDescription": "Counts the number of store ops retired that miss in the second level TLB.",
+ "BriefDescription": "Counts the number of store uops retired that miss in the second level TLB.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -362,4 +361,4 @@
"SampleAfterValue": "200003",
"UMask": "0x12"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/cache.json b/tools/perf/pmu-events/arch/x86/tigerlake/cache.json
index 0569b2c704ca..5ccf0edc29ac 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/cache.json
@@ -113,6 +113,17 @@
"UMask": "0xe4"
},
{
+ "BriefDescription": "Demand Data Read access L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1"
+ },
+ {
"BriefDescription": "RFO requests to L2 cache",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -157,17 +168,39 @@
"UMask": "0xc1"
},
{
- "BriefDescription": "All requests that miss L2 cache",
+ "BriefDescription": "Demand Data Read miss L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "L2_RQSTS.MISS",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts all requests that miss L2 cache.",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses.",
"SampleAfterValue": "200003",
"UMask": "0x3f"
},
{
+ "BriefDescription": "All accesses to L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
"BriefDescription": "RFO requests that hit L2 cache",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -353,7 +386,7 @@
"UMask": "0x12"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "Snoop hit a modified(HITM) or clean line(HIT_W_FWD) in another on-pkg core which forwarded the data back due to a retired load instruction.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -361,6 +394,7 @@
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions where a cross-core snoop hit in another cores caches on this socket, the data was forwarded back to the requesting core as the data was modified (SNOOP_HITM) or the L3 did not have the data(SNOOP_HIT_WITH_FWD).",
"SampleAfterValue": "20011",
"UMask": "0x4"
},
@@ -391,7 +425,7 @@
"UMask": "0x8"
},
{
- "BriefDescription": "TBD",
+ "BriefDescription": "Snoop hit without forwarding in another on-pkg core due to a retired load instruction, data was supplied by the L3.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
"Data_LA": "1",
@@ -399,6 +433,7 @@
"EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
"PEBS": "1",
"PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions in which the L3 supplied the data and a cross-core snoop hit in another cores caches on this socket but that other core did not forward the data back (SNOOP_HIT_NO_FWD).",
"SampleAfterValue": "20011",
"UMask": "0x2"
},
@@ -503,7 +538,6 @@
"MSRValue": "0x10003C0001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -517,7 +551,6 @@
"MSRValue": "0x8003C0001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -531,7 +564,6 @@
"MSRValue": "0x10003C0002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -714,4 +746,4 @@
"SampleAfterValue": "100003",
"UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json b/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json
index de8eb2b34a3a..978b494c7458 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json
@@ -98,4 +98,4 @@
"SampleAfterValue": "100003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json b/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
index 2eaa33cc574e..ccdd8fd99556 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
@@ -475,4 +475,4 @@
"SampleAfterValue": "1000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/memory.json b/tools/perf/pmu-events/arch/x86/tigerlake/memory.json
index 0948de0b160c..6071794cbd32 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/memory.json
@@ -292,4 +292,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/other.json b/tools/perf/pmu-events/arch/x86/tigerlake/other.json
index 65539490e18f..3ed22dbd0982 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/other.json
@@ -42,7 +42,6 @@
"MSRValue": "0x10800",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
index a8aa1b455c77..1f273144f8e8 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
@@ -432,13 +432,13 @@
"UMask": "0x40"
},
{
- "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
+ "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
"EventCode": "0xa6",
"EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
"SampleAfterValue": "1000003",
"UMask": "0x80"
},
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
index 00a16f1a0f44..03c97bd74ad9 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
@@ -1,20 +1,26 @@
[
{
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * (( BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) ) / TOPDOWN.SLOTS)",
+ "MetricGroup": "Ret",
+ "MetricName": "Branching_Overhead"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricExpr": "100 * (( 5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING ) / TOPDOWN.SLOTS) * ( (ICACHE_64B.IFTAG_STALL / CPU_CLK_UNHALTED.THREAD) + (ICACHE_16B.IFDATA_STALL / CPU_CLK_UNHALTED.THREAD) + (10 * BACLEARS.ANY / CPU_CLK_UNHALTED.THREAD) ) / #(( 5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING ) / TOPDOWN.SLOTS)",
+ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricName": "Big_Code"
+ },
+ {
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
+ "MetricGroup": "Ret;Summary",
"MetricName": "IPC"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;FetchBW;PGO",
- "MetricName": "IpTB"
- },
- {
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / IPC",
- "MetricGroup": "Pipeline",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
+ "MetricGroup": "Pipeline;Mem",
"MetricName": "CPI"
},
{
@@ -24,28 +30,48 @@
"MetricName": "CLKS"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "TOPDOWN.SLOTS / ( TOPDOWN.SLOTS / 2 ) if #SMT_on else 1",
"MetricGroup": "SMT;TmaL1",
+ "MetricName": "Slots_Utilization"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "Execute_per_Issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "Ret;SMT;TmaL1",
"MetricName": "CoreIPC"
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
"MetricExpr": "( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / CPU_CLK_UNHALTED.DISTRIBUTED",
- "MetricGroup": "Flops",
+ "MetricGroup": "Ret;Flops",
"MetricName": "FLOPc"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / ( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 )",
- "MetricGroup": "Pipeline;PortsUtil",
- "MetricName": "ILP"
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricExpr": "( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "FP_Arith_Utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "IpMispredict"
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "ILP"
},
{
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
@@ -68,99 +94,279 @@
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Branches;InsType",
+ "MetricGroup": "Branches;Fed;InsType",
"MetricName": "IpBranch"
},
{
"BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "MetricGroup": "Branches",
+ "MetricGroup": "Branches;Fed;PGO",
"MetricName": "IpCall"
},
{
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO",
+ "MetricName": "IpTB"
+ },
+ {
"BriefDescription": "Branch instructions per taken branch. ",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;PGO",
+ "MetricGroup": "Branches;Fed;PGO",
"MetricName": "BpTkBranch"
},
{
"BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )",
- "MetricGroup": "Flops;FpArith;InsType",
+ "MetricGroup": "Flops;InsType",
"MetricName": "IpFLOP"
},
{
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) )",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "IpArith",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "IpArith_Scalar_SP",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "IpArith_Scalar_DP",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX128",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX256",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX512",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "IpSWPF"
+ },
+ {
"BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
"MetricExpr": "INST_RETIRED.ANY",
"MetricGroup": "Summary;TmaL1",
"MetricName": "Instructions"
},
{
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "Fetch_UpC"
+ },
+ {
"BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
"MetricExpr": "LSD.UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
- "MetricGroup": "LSD",
+ "MetricGroup": "Fed;LSD",
"MetricName": "LSD_Coverage"
},
{
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
"MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
- "MetricGroup": "DSB;FetchBW",
+ "MetricGroup": "DSB;Fed;FetchBW",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "DSB_Switch_Cost"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "IpDSB_Miss_Ret"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "Cond_NT"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "Cond_TK"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "CallRet"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "Jump"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - ( (BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES) + (BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES) + (( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES) + ((BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES) )",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "Other_Branches"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
- "MetricGroup": "MemoryBound;MemoryLat",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
"MetricName": "Load_Miss_Real_Latency"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
"MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "MemoryBound;MemoryBW",
+ "MetricGroup": "Mem;MemoryBound;MemoryBW",
"MetricName": "MLP"
},
{
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L1MPKI_Load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;Backend;CacheMisses",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses;Offcore",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2MPKI_Load"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2HPKI_Load"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L3MPKI"
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "FB_HPKI"
+ },
+ {
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
"MetricConstraint": "NO_NMI_WATCHDOG",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING ) / ( 2 * CORE_CLKS )",
- "MetricGroup": "MemoryTLB",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "MetricGroup": "Mem;MemoryTLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
"MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "MemoryBW",
+ "MetricGroup": "Mem;MemoryBW",
"MetricName": "L1D_Cache_Fill_BW"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
"MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "MemoryBW",
+ "MetricGroup": "Mem;MemoryBW",
"MetricName": "L2_Cache_Fill_BW"
},
{
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
"BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
"MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
- "MetricGroup": "MemoryBW;Offcore",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
"MetricName": "L3_Cache_Access_BW"
},
{
- "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
- "MetricGroup": "CacheMisses",
- "MetricName": "L1MPKI"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
},
{
- "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "CacheMisses",
- "MetricName": "L2MPKI"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
},
{
- "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
- "MetricGroup": "CacheMisses",
- "MetricName": "L3MPKI"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
},
{
"BriefDescription": "Average CPU Utilization",
@@ -177,8 +383,9 @@
{
"BriefDescription": "Giga Floating Point Operations Per Second",
"MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
- "MetricGroup": "Flops;HPC",
- "MetricName": "GFLOPs"
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
@@ -187,8 +394,29 @@
"MetricName": "Turbo_Utilization"
},
{
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0",
+ "MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "Power",
+ "MetricName": "Power_License0_Utilization",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
+ "MetricExpr": "CORE_POWER.LVL1_TURBO_LICENSE / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "Power",
+ "MetricName": "Power_License1_Utilization",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
+ "MetricExpr": "CORE_POWER.LVL2_TURBO_LICENSE / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "Power",
+ "MetricName": "Power_License2_Utilization",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
+ },
+ {
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0",
"MetricGroup": "SMT",
"MetricName": "SMT_2T_Utilization"
},
@@ -199,6 +427,24 @@
"MetricName": "Kernel_Utilization"
},
{
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "Kernel_CPI"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "BriefDescription": "Average number of parallel requests to external memory. Accounts for all requests",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / arb@event\\=0x81\\,umask\\=0x1@",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "MEM_Parallel_Requests"
+ },
+ {
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
"MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
"MetricGroup": "Branches;OS",
@@ -217,6 +463,18 @@
"MetricName": "C7_Core_Residency"
},
{
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
@@ -227,5 +485,23 @@
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C8 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c8\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C8_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C9 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c9\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C9_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C10 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c10\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C10_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json
new file mode 100644
index 000000000000..734b1845c8e2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json
@@ -0,0 +1,65 @@
+[
+ {
+ "BriefDescription": "Each cycle count number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from it's allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic.",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "UMask": "0x01",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts every read (RdCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "Counter": "1",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_MC0_RDCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_MC0_RDCAS_COUNT_FREERUN",
+ "Unit": "h_imc"
+ },
+ {
+ "BriefDescription": "Counts every 64B read and write request entering the Memory Controller to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_MC0_TOTAL_REQCOUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_MC0_TOTAL_REQCOUNT_FREERUN",
+ "Unit": "h_imc"
+ },
+ {
+ "BriefDescription": "Counts every write (WrCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "Counter": "2",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_MC0_WRCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_MC0_WRCAS_COUNT_FREERUN",
+ "Unit": "h_imc"
+ },
+ {
+ "BriefDescription": "Counts every read (RdCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "Counter": "4",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_MC1_RDCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_MC1_RDCAS_COUNT_FREERUN",
+ "Unit": "h_imc"
+ },
+ {
+ "BriefDescription": "Counts every 64B read and write request entering the Memory Controller to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "Counter": "3",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_MC1_TOTAL_REQCOUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_MC1_TOTAL_REQCOUNT_FREERUN",
+ "Unit": "h_imc"
+ },
+ {
+ "BriefDescription": "Counts every write (WrCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "Counter": "5",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_MC1_WRCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_MC1_WRCAS_COUNT_FREERUN",
+ "Unit": "h_imc"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json
index 3ebec78969b0..fd364abf8002 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json
@@ -222,4 +222,4 @@
"SampleAfterValue": "100007",
"UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json
deleted file mode 100644
index b7ff25a5d717..000000000000
--- a/tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json
+++ /dev/null
@@ -1,245 +0,0 @@
-[
- {
- "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x04",
- "EventName": "LLC_MISSES.MEM_READ",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x0f",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "read requests to memory controller",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x04",
- "EventName": "UNC_M_CAS_COUNT.RD",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x0f",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x04",
- "EventName": "LLC_MISSES.MEM_WRITE",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x30",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "write requests to memory controller",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x04",
- "EventName": "UNC_M_CAS_COUNT.WR",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x30",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Memory controller clock ticks",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventName": "UNC_M_CLOCKTICKS",
- "PerPkg": "1",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Pre-charge for reads",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_M_PRE_COUNT.RD",
- "PerPkg": "1",
- "UMask": "0x04",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Pre-charge for writes",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_M_PRE_COUNT.WR",
- "PerPkg": "1",
- "UMask": "0x08",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "DRAM Activate Count : All Activates",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x01",
- "EventName": "UNC_M_ACT_COUNT.ALL",
- "PerPkg": "1",
- "PublicDescription": "DRAM Activate Count : All Activates : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
- "UMask": "0x0B",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "All DRAM CAS commands issued",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x04",
- "EventName": "UNC_M_CAS_COUNT.ALL",
- "PerPkg": "1",
- "PublicDescription": "Counts the total number of DRAM CAS commands issued on this channel.",
- "UMask": "0x3f",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Number of DRAM Refreshes Issued",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x45",
- "EventName": "UNC_M_DRAM_REFRESH.HIGH",
- "PerPkg": "1",
- "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
- "UMask": "0x04",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Number of DRAM Refreshes Issued",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x45",
- "EventName": "UNC_M_DRAM_REFRESH.OPPORTUNISTIC",
- "PerPkg": "1",
- "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
- "UMask": "0x01",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Number of DRAM Refreshes Issued",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x45",
- "EventName": "UNC_M_DRAM_REFRESH.PANIC",
- "PerPkg": "1",
- "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
- "UMask": "0x02",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Half clockticks for IMC",
- "Counter": "FIXED",
- "CounterType": "FIXED",
- "EventCode": "0xff",
- "EventName": "UNC_M_HCLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Half clockticks for IMC",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "DRAM Precharge commands.",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_M_PRE_COUNT.ALL",
- "PerPkg": "1",
- "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
- "UMask": "0x1C",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "DRAM Precharge commands. : Precharge due to page table",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_M_PRE_COUNT.PGT",
- "PerPkg": "1",
- "PublicDescription": "DRAM Precharge commands. : Precharge due to page table : Counts the number of DRAM Precharge commands sent on this channel. : Prechages from Page Table",
- "UMask": "0x10",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Read Pending Queue Allocations",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x10",
- "EventName": "UNC_M_RPQ_INSERTS.PCH0",
- "PerPkg": "1",
- "PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
- "UMask": "0x01",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Read Pending Queue Allocations",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x10",
- "EventName": "UNC_M_RPQ_INSERTS.PCH1",
- "PerPkg": "1",
- "PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
- "UMask": "0x02",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Read Pending Queue Occupancy",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x80",
- "EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
- "PerPkg": "1",
- "PublicDescription": "Read Pending Queue Occupancy : Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Read Pending Queue Occupancy",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x81",
- "EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
- "PerPkg": "1",
- "PublicDescription": "Read Pending Queue Occupancy : Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Write Pending Queue Allocations",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x20",
- "EventName": "UNC_M_WPQ_INSERTS.PCH0",
- "PerPkg": "1",
- "PublicDescription": "Write Pending Queue Allocations : Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
- "UMask": "0x01",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Write Pending Queue Allocations",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x20",
- "EventName": "UNC_M_WPQ_INSERTS.PCH1",
- "PerPkg": "1",
- "PublicDescription": "Write Pending Queue Allocations : Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
- "UMask": "0x02",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Write Pending Queue Occupancy",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x82",
- "EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
- "PerPkg": "1",
- "PublicDescription": "Write Pending Queue Occupancy : Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Write Pending Queue Occupancy",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
- "PerPkg": "1",
- "PublicDescription": "Write Pending Queue Occupancy : Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
- "Unit": "iMC"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json b/tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json
deleted file mode 100644
index 5194ce1b4390..000000000000
--- a/tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json
+++ /dev/null
@@ -1,2395 +0,0 @@
-[
- {
- "BriefDescription": "Uncore cache clock ticks",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventName": "UNC_CHA_CLOCKTICKS",
- "PerPkg": "1",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "config1=0x40e33",
- "PerPkg": "1",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) ",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x40e33",
- "PerPkg": "1",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_READ",
- "Filter": "config1=0x40040e33",
- "PerPkg": "1",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO reads",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x40040e33",
- "PerPkg": "1",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_WRITE",
- "Filter": "config1=0x40041e33",
- "PerPkg": "1",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO writes",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x40041e33",
- "PerPkg": "1",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "config1=0x41833",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (full cache line)",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x41833",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "config1=0x41a33",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (partial cache line)",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
- "Filter": "config1=0x41a33",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "read requests from home agent",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS",
- "PerPkg": "1",
- "UMask": "0x03",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "write requests from home agent",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES",
- "PerPkg": "1",
- "UMask": "0x0c",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_READ",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "MetricName": "LLC_MISSES.PCIE_WRITE",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "MetricName": "LLC_MISSES.PCIE_WRITE",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "CMS Clockticks",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_CHA_CMS_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "CMS Clockticks",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x59",
- "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
- "PerPkg": "1",
- "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "CHA to iMC Full Line Writes Issued : Full Line Non-ISOCH",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x5B",
- "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
- "PerPkg": "1",
- "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to any of the memory controller channels.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x34",
- "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
- "PerPkg": "1",
- "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
- "UMask": "0x1BC1FF",
- "UMaskExt": "0x1BC1",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Lines Victimized : All Lines Victimized",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.ALL",
- "PerPkg": "1",
- "PublicDescription": "Lines Victimized : All Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
- "UMask": "0x0F",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Snoop filter capacity evictions for E-state entries.",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
- "PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
- "UMask": "0x02",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Snoop filter capacity evictions for M-state entries.",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
- "PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Snoop filter capacity evictions for S-state entries.",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
- "PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
- "UMask": "0x04",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : All requests from iA Cores",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from iA Cores : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FF01",
- "UMaskExt": "0xC001FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : CLFlushes issued by iA Cores",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : CLFlushes issued by iA Cores : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC8C7FF01",
- "UMaskExt": "0xC8C7FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : CRDs issued by iA Cores",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : CRDs issued by iA Cores : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC80FFF01",
- "UMaskExt": "0xC80FFF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC827FF01",
- "UMaskExt": "0xC827FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC8A7FF01",
- "UMaskExt": "0xC8A7FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FD01",
- "UMaskExt": "0xC001FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC80FFD01",
- "UMaskExt": "0xC80FFD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC88FFD01",
- "UMaskExt": "0xC88FFD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC827FD01",
- "UMaskExt": "0xC827FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC8A7FD01",
- "UMaskExt": "0xC8A7FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC807FD01",
- "UMaskExt": "0xC807FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC887FD01",
- "UMaskExt": "0xC887FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC80FFE01",
- "UMaskExt": "0xC80FFE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC88FFE01",
- "UMaskExt": "0xC88FFE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC827FE01",
- "UMaskExt": "0xC827FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC8A7FE01",
- "UMaskExt": "0xC8A7FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
- "UMask": "0xc867fe01",
- "UMaskExt": "0xc867fe",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts; WCiL misses from local IA",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
- "UMask": "0xc86ffe01",
- "UMaskExt": "0xc86ffe",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC807FE01",
- "UMaskExt": "0xC807FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC887FE01",
- "UMaskExt": "0xC887FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC877DE01",
- "UMaskExt": "0xC877DE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC86FFE01",
- "UMaskExt": "0xC86FFE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC867FE01",
- "UMaskExt": "0xC867FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC87FDE01",
- "UMaskExt": "0xC87FDE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : RFOs issued by iA Cores",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : RFOs issued by iA Cores : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC807FF01",
- "UMaskExt": "0xC807FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC887FF01",
- "UMaskExt": "0xC887FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : All requests from IO Devices",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from IO Devices : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FF04",
- "UMaskExt": "0xC001FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from IO Devices that hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FD04",
- "UMaskExt": "0xC001FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xCC43FD04",
- "UMaskExt": "0xCC43FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xCD43FD04",
- "UMaskExt": "0xCD43FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC8F3FD04",
- "UMaskExt": "0xC8F3FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xCC43FF04",
- "UMaskExt": "0xCC43FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xCD43FF04",
- "UMaskExt": "0xCD43FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from IO Devices that missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FE04",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xCC43FE04",
- "UMaskExt": "0xCC43FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xCD43FE04",
- "UMaskExt": "0xCD43FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC8F3FE04",
- "UMaskExt": "0xC8F3FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices : Counts the number of entries successfuly inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC8F3FF04",
- "UMaskExt": "0xC8F3FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : All requests from iA Cores",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : All requests from iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FF01",
- "UMaskExt": "0xC001FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : CRDs issued by iA Cores",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : CRDs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC80FFF01",
- "UMaskExt": "0xC80FFF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC827FF01",
- "UMaskExt": "0xC827FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC8A7FF01",
- "UMaskExt": "0xC8A7FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FD01",
- "UMaskExt": "0xC001FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC827FD01",
- "UMaskExt": "0xC827FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC8A7FD01",
- "UMaskExt": "0xC8A7FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC80FFE01",
- "UMaskExt": "0xC80FFE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC827FE01",
- "UMaskExt": "0xC827FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC807FE01",
- "UMaskExt": "0xC807FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC807FF01",
- "UMaskExt": "0xC807FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : All requests from IO Devices",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : All requests from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FF04",
- "UMaskExt": "0xC001FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FD04",
- "UMaskExt": "0xC001FD",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC001FE04",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xc8f3fe04",
- "UMaskExt": "0xc8f3fe",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices",
- "CounterType": "PGMABLE",
- "EventCode": "0x36",
- "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
- "PerPkg": "1",
- "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xC8F3FF04",
- "UMaskExt": "0xC8F3FF",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Clockticks of the integrated IO (IIO) traffic controller",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x01",
- "EventName": "UNC_IIO_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Clockticks of the integrated IO (IIO) traffic controller",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Free running counter that increments for IIO clocktick",
- "CounterType": "FREERUN",
- "EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
- "PerPkg": "1",
- "PublicDescription": "Free running counter that increments for integrated IO (IIO) traffic controller clockticks",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PortMask": "0xff",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0-7",
- "UMask": "0x03",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
- "UMask": "0x03",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 1",
- "UMask": "0x03",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 2",
- "UMask": "0x03",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 3",
- "UMask": "0x03",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 4",
- "UMask": "0x03",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 5",
- "UMask": "0x03",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 6",
- "UMask": "0x03",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc2",
- "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 7",
- "UMask": "0x03",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer Occupancy : Part 0-7",
- "UMask": "0xff",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer Occupancy : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 1",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer Occupancy : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 1",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 2",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer Occupancy : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 2",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 3",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer Occupancy : Part 3 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 3",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 4",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer Occupancy : Part 4 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 4",
- "UMask": "0x10",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 5",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer Occupancy : Part 5 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 5",
- "UMask": "0x20",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 6",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer Occupancy : Part 6 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 6",
- "UMask": "0x40",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 7",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xd5",
- "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
- "FCMask": "0x04",
- "PerPkg": "1",
- "PublicDescription": "PCIe Completion Buffer Occupancy : Part 7 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 7",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number requests PCIe makes of the main die : All",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x85",
- "EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0xFF",
- "PublicDescription": "Number requests PCIe makes of the main die : All : Counts full PCIe requests before they're broken into a series of cache-line size requests as measured by DATA_REQ_OF_CPU and TXN_REQ_OF_CPU.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
- "UMask": "0x80",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x10",
- "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x20",
- "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x40",
- "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x80",
- "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory.",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x0f",
- "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
- "PerPkg": "1",
- "PublicDescription": "Total IRP occupancy of inbound read and write requests to coherent memory. This is effectively the sum of read occupancy and write occupancy.",
- "UMask": "0x04",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "Clockticks of the IO coherency tracker (IRP)",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x01",
- "EventName": "UNC_I_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Clockticks of the IO coherency tracker (IRP)",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x10",
- "EventName": "UNC_I_COHERENT_OPS.PCITOM",
- "PerPkg": "1",
- "PublicDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline to coherent memory, without a RFO. PCIITOM is a speculative Invalidate to Modified command that requests ownership of the cacheline and does not move data from the mesh to IRP cache.",
- "UMask": "0x10",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "Coherent Ops : WbMtoI",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x10",
- "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
- "PerPkg": "1",
- "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations servied by the IRP",
- "UMask": "0x40",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "FAF RF full",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x17",
- "EventName": "UNC_I_FAF_FULL",
- "PerPkg": "1",
- "PublicDescription": "FAF RF full",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x18",
- "EventName": "UNC_I_FAF_INSERTS",
- "PerPkg": "1",
- "PublicDescription": "Inbound read requests to coherent memory, received by the IRP and inserted into the Fire and Forget queue (FAF), a queue used for processing inbound reads in the IRP.",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "Occupancy of the IRP FAF queue.",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x19",
- "EventName": "UNC_I_FAF_OCCUPANCY",
- "PerPkg": "1",
- "PublicDescription": "Occupancy of the IRP Fire and Forget (FAF) queue, a queue used for processing inbound reads in the IRP.",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "FAF allocation -- sent to ADQ",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x16",
- "EventName": "UNC_I_FAF_TRANSACTIONS",
- "PerPkg": "1",
- "PublicDescription": "FAF allocation -- sent to ADQ",
- "Unit": "IRP"
- },
- {
- "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x20",
- "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
- "PerPkg": "1",
- "PublicDescription": ": All Inserts Inbound (p2p + faf + cset)",
- "UMask": "0x01",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "Misc Events - Set 1 : Lost Forward",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x1F",
- "EventName": "UNC_I_MISC1.LOST_FWD",
- "PerPkg": "1",
- "PublicDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
- "UMask": "0x10",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x12",
- "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
- "PerPkg": "1",
- "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M line in the IIO cache",
- "UMask": "0x78",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "Inbound write (fast path) requests received by the IRP.",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x11",
- "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
- "PerPkg": "1",
- "PublicDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
- "UMask": "0x08",
- "Unit": "IRP"
- },
- {
- "BriefDescription": "Clockticks of the mesh to memory (M2M)",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventName": "UNC_M2M_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Clockticks of the mesh to memory (M2M)",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "CMS Clockticks",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_M2M_CMS_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "CMS Clockticks",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Clockticks of the mesh to PCI (M2P)",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x01",
- "EventName": "UNC_M2P_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Clockticks of the mesh to PCI (M2P)",
- "Unit": "M2PCIe"
- },
- {
- "BriefDescription": "CMS Clockticks",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0xc0",
- "EventName": "UNC_M2P_CMS_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "CMS Clockticks",
- "Unit": "M2PCIe"
- },
- {
- "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
- "Counter": "FIXED",
- "CounterType": "FIXED",
- "EventCode": "0xff",
- "EventName": "UNC_U_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
- "Unit": "UBOX"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/uncore-power.json b/tools/perf/pmu-events/arch/x86/tremontx/uncore-power.json
deleted file mode 100644
index ea62c092b43f..000000000000
--- a/tools/perf/pmu-events/arch/x86/tremontx/uncore-power.json
+++ /dev/null
@@ -1,11 +0,0 @@
-[
- {
- "BriefDescription": "Clockticks of the power control unit (PCU)",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventName": "UNC_P_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Clockticks of the power control unit (PCU)",
- "Unit": "PCU"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
index 0f01cf223777..37ed2742fec6 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
@@ -2814,4 +2814,4 @@
"SampleAfterValue": "200000",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
index 39af1329224a..666e466d351c 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
@@ -226,4 +226,4 @@
"SampleAfterValue": "200000",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json
index 8ac5c24888c5..c561ac24d91d 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json
@@ -23,4 +23,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json
index 36fbea313c6f..7e529b367c21 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json
@@ -755,4 +755,4 @@
"SampleAfterValue": "100000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
index d63e469a43e1..8099e6700e31 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
@@ -170,4 +170,4 @@
"SampleAfterValue": "200000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
index 39af1329224a..666e466d351c 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
@@ -226,4 +226,4 @@
"SampleAfterValue": "200000",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json
index 8ac5c24888c5..c561ac24d91d 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json
@@ -23,4 +23,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
index 0252f77a844b..e7affdf7f41b 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
@@ -146,4 +146,4 @@
"SampleAfterValue": "200000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
index 39af1329224a..666e466d351c 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
@@ -226,4 +226,4 @@
"SampleAfterValue": "200000",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/frontend.json b/tools/perf/pmu-events/arch/x86/westmereex/frontend.json
index 8ac5c24888c5..c561ac24d91d 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/frontend.json
@@ -23,4 +23,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
index 5d1e017d1261..0c3501e6e5a3 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
@@ -170,4 +170,4 @@
"SampleAfterValue": "200000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c
new file mode 100644
index 000000000000..5ed8c0aa4817
--- /dev/null
+++ b/tools/perf/pmu-events/empty-pmu-events.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * An empty pmu-events.c file used when there is no architecture json files in
+ * arch or when the jevents.py script cannot be run.
+ *
+ * The test cpu/soc is provided for testing.
+ */
+#include "pmu-events/pmu-events.h"
+#include "util/header.h"
+#include "util/pmu.h"
+#include <string.h>
+#include <stddef.h>
+
+static const struct pmu_event pme_test_soc_cpu[] = {
+ {
+ .name = "l3_cache_rd",
+ .event = "event=0x40",
+ .desc = "L3 cache access, read",
+ .topic = "cache",
+ .long_desc = "Attributable Level 3 cache access, read",
+ },
+ {
+ .name = "segment_reg_loads.any",
+ .event = "event=0x6,period=200000,umask=0x80",
+ .desc = "Number of segment register loads",
+ .topic = "other",
+ },
+ {
+ .name = "dispatch_blocked.any",
+ .event = "event=0x9,period=200000,umask=0x20",
+ .desc = "Memory cluster signals to block micro-op dispatch for any reason",
+ .topic = "other",
+ },
+ {
+ .name = "eist_trans",
+ .event = "event=0x3a,period=200000,umask=0x0",
+ .desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
+ .topic = "other",
+ },
+ {
+ .name = "uncore_hisi_ddrc.flux_wcmd",
+ .event = "event=0x2",
+ .desc = "DDRC write commands. Unit: hisi_sccl,ddrc ",
+ .topic = "uncore",
+ .long_desc = "DDRC write commands",
+ .pmu = "hisi_sccl,ddrc",
+ },
+ {
+ .name = "unc_cbo_xsnp_response.miss_eviction",
+ .event = "event=0x22,umask=0x81",
+ .desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core. Unit: uncore_cbox ",
+ .topic = "uncore",
+ .long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
+ .pmu = "uncore_cbox",
+ },
+ {
+ .name = "event-hyphen",
+ .event = "event=0xe0,umask=0x00",
+ .desc = "UNC_CBO_HYPHEN. Unit: uncore_cbox ",
+ .topic = "uncore",
+ .long_desc = "UNC_CBO_HYPHEN",
+ .pmu = "uncore_cbox",
+ },
+ {
+ .name = "event-two-hyph",
+ .event = "event=0xc0,umask=0x00",
+ .desc = "UNC_CBO_TWO_HYPH. Unit: uncore_cbox ",
+ .topic = "uncore",
+ .long_desc = "UNC_CBO_TWO_HYPH",
+ .pmu = "uncore_cbox",
+ },
+ {
+ .name = "uncore_hisi_l3c.rd_hit_cpipe",
+ .event = "event=0x7",
+ .desc = "Total read hits. Unit: hisi_sccl,l3c ",
+ .topic = "uncore",
+ .long_desc = "Total read hits",
+ .pmu = "hisi_sccl,l3c",
+ },
+ {
+ .name = "uncore_imc_free_running.cache_miss",
+ .event = "event=0x12",
+ .desc = "Total cache misses. Unit: uncore_imc_free_running ",
+ .topic = "uncore",
+ .long_desc = "Total cache misses",
+ .pmu = "uncore_imc_free_running",
+ },
+ {
+ .name = "uncore_imc.cache_hits",
+ .event = "event=0x34",
+ .desc = "Total cache hits. Unit: uncore_imc ",
+ .topic = "uncore",
+ .long_desc = "Total cache hits",
+ .pmu = "uncore_imc",
+ },
+ {
+ .name = "bp_l1_btb_correct",
+ .event = "event=0x8a",
+ .desc = "L1 BTB Correction",
+ .topic = "branch",
+ },
+ {
+ .name = "bp_l2_btb_correct",
+ .event = "event=0x8b",
+ .desc = "L2 BTB Correction",
+ .topic = "branch",
+ },
+ {
+ .metric_expr = "1 / IPC",
+ .metric_name = "CPI",
+ },
+ {
+ .metric_expr = "inst_retired.any / cpu_clk_unhalted.thread",
+ .metric_name = "IPC",
+ .metric_group = "group1",
+ },
+ {
+ .metric_expr = "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * "
+ "( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))",
+ .metric_name = "Frontend_Bound_SMT",
+ },
+ {
+ .metric_expr = "l1d\\-loads\\-misses / inst_retired.any",
+ .metric_name = "dcache_miss_cpi",
+ },
+ {
+ .metric_expr = "l1i\\-loads\\-misses / inst_retired.any",
+ .metric_name = "icache_miss_cycles",
+ },
+ {
+ .metric_expr = "(dcache_miss_cpi + icache_miss_cycles)",
+ .metric_name = "cache_miss_cycles",
+ .metric_group = "group1",
+ },
+ {
+ .metric_expr = "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit",
+ .metric_name = "DCache_L2_All_Hits",
+ },
+ {
+ .metric_expr = "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + "
+ "l2_rqsts.pf_miss + l2_rqsts.rfo_miss",
+ .metric_name = "DCache_L2_All_Miss",
+ },
+ {
+ .metric_expr = "dcache_l2_all_hits + dcache_l2_all_miss",
+ .metric_name = "DCache_L2_All",
+ },
+ {
+ .metric_expr = "d_ratio(dcache_l2_all_hits, dcache_l2_all)",
+ .metric_name = "DCache_L2_Hits",
+ },
+ {
+ .metric_expr = "d_ratio(dcache_l2_all_miss, dcache_l2_all)",
+ .metric_name = "DCache_L2_Misses",
+ },
+ {
+ .metric_expr = "ipc + M2",
+ .metric_name = "M1",
+ },
+ {
+ .metric_expr = "ipc + M1",
+ .metric_name = "M2",
+ },
+ {
+ .metric_expr = "1/M3",
+ .metric_name = "M3",
+ },
+ {
+ .metric_expr = "64 * l1d.replacement / 1000000000 / duration_time",
+ .metric_name = "L1D_Cache_Fill_BW",
+ },
+ {
+ .name = 0,
+ .event = 0,
+ .desc = 0,
+ },
+};
+
+/* Struct used to make the PMU event table implementation opaque to callers. */
+struct pmu_events_table {
+ const struct pmu_event *entries;
+};
+
+/*
+ * Map a CPU to its table of PMU events. The CPU is identified by the
+ * cpuid field, which is an arch-specific identifier for the CPU.
+ * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
+ * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
+ *
+ * The cpuid can contain any character other than the comma.
+ */
+struct pmu_events_map {
+ const char *arch;
+ const char *cpuid;
+ const struct pmu_events_table table;
+};
+
+/*
+ * Global table mapping each known CPU for the architecture to its
+ * table of PMU events.
+ */
+static const struct pmu_events_map pmu_events_map[] = {
+ {
+ .arch = "testarch",
+ .cpuid = "testcpu",
+ .table = { pme_test_soc_cpu },
+ },
+ {
+ .arch = 0,
+ .cpuid = 0,
+ .table = { 0 },
+ },
+};
+
+static const struct pmu_event pme_test_soc_sys[] = {
+ {
+ .name = "sys_ddr_pmu.write_cycles",
+ .event = "event=0x2b",
+ .desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
+ .compat = "v8",
+ .topic = "uncore",
+ .pmu = "uncore_sys_ddr_pmu",
+ },
+ {
+ .name = "sys_ccn_pmu.read_cycles",
+ .event = "config=0x2c",
+ .desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
+ .compat = "0x01",
+ .topic = "uncore",
+ .pmu = "uncore_sys_ccn_pmu",
+ },
+ {
+ .name = 0,
+ .event = 0,
+ .desc = 0,
+ },
+};
+
+struct pmu_sys_events {
+ const char *name;
+ const struct pmu_events_table table;
+};
+
+static const struct pmu_sys_events pmu_sys_event_tables[] = {
+ {
+ .table = { pme_test_soc_sys },
+ .name = "pme_test_soc_sys",
+ },
+ {
+ .table = { 0 }
+ },
+};
+
+int pmu_events_table_for_each_event(const struct pmu_events_table *table, pmu_event_iter_fn fn,
+ void *data)
+{
+ for (const struct pmu_event *pe = &table->entries[0];
+ pe->name || pe->metric_group || pe->metric_name;
+ pe++) {
+ int ret = fn(pe, table, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu)
+{
+ const struct pmu_events_table *table = NULL;
+ char *cpuid = perf_pmu__getcpuid(pmu);
+ int i;
+
+ /* on some platforms which uses cpus map, cpuid can be NULL for
+ * PMUs other than CORE PMUs.
+ */
+ if (!cpuid)
+ return NULL;
+
+ i = 0;
+ for (;;) {
+ const struct pmu_events_map *map = &pmu_events_map[i++];
+
+ if (!map->cpuid)
+ break;
+
+ if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
+ table = &map->table;
+ break;
+ }
+ }
+ free(cpuid);
+ return table;
+}
+
+const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
+{
+ for (const struct pmu_events_map *tables = &pmu_events_map[0];
+ tables->arch;
+ tables++) {
+ if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
+ return &tables->table;
+ }
+ return NULL;
+}
+
+int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
+{
+ for (const struct pmu_events_map *tables = &pmu_events_map[0];
+ tables->arch;
+ tables++) {
+ int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+const struct pmu_events_table *find_sys_events_table(const char *name)
+{
+ for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
+ tables->name;
+ tables++) {
+ if (!strcmp(tables->name, name))
+ return &tables->table;
+ }
+ return NULL;
+}
+
+int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
+{
+ for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
+ tables->name;
+ tables++) {
+ int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
deleted file mode 100644
index e597e4bac90f..000000000000
--- a/tools/perf/pmu-events/jevents.c
+++ /dev/null
@@ -1,1342 +0,0 @@
-#define _XOPEN_SOURCE 500 /* needed for nftw() */
-#define _GNU_SOURCE /* needed for asprintf() */
-
-/* Parse event JSON files */
-
-/*
- * Copyright (c) 2014, Intel Corporation
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <string.h>
-#include <ctype.h>
-#include <unistd.h>
-#include <stdarg.h>
-#include <libgen.h>
-#include <limits.h>
-#include <dirent.h>
-#include <sys/time.h> /* getrlimit */
-#include <sys/resource.h> /* getrlimit */
-#include <ftw.h>
-#include <sys/stat.h>
-#include <linux/compiler.h>
-#include <linux/list.h>
-#include "jsmn.h"
-#include "json.h"
-#include "pmu-events.h"
-
-int verbose;
-char *prog;
-
-struct json_event {
- char *name;
- char *compat;
- char *event;
- char *desc;
- char *long_desc;
- char *pmu;
- char *unit;
- char *perpkg;
- char *aggr_mode;
- char *metric_expr;
- char *metric_name;
- char *metric_group;
- char *deprecated;
- char *metric_constraint;
-};
-
-static enum aggr_mode_class convert(const char *aggr_mode)
-{
- if (!strcmp(aggr_mode, "PerCore"))
- return PerCore;
- else if (!strcmp(aggr_mode, "PerChip"))
- return PerChip;
-
- pr_err("%s: Wrong AggregationMode value '%s'\n", prog, aggr_mode);
- return -1;
-}
-
-static LIST_HEAD(sys_event_tables);
-
-struct sys_event_table {
- struct list_head list;
- char *soc_id;
-};
-
-static void free_sys_event_tables(void)
-{
- struct sys_event_table *et, *next;
-
- list_for_each_entry_safe(et, next, &sys_event_tables, list) {
- free(et->soc_id);
- free(et);
- }
-}
-
-int eprintf(int level, int var, const char *fmt, ...)
-{
-
- int ret;
- va_list args;
-
- if (var < level)
- return 0;
-
- va_start(args, fmt);
-
- ret = vfprintf(stderr, fmt, args);
-
- va_end(args);
-
- return ret;
-}
-
-static void addfield(char *map, char **dst, const char *sep,
- const char *a, jsmntok_t *bt)
-{
- unsigned int len = strlen(a) + 1 + strlen(sep);
- int olen = *dst ? strlen(*dst) : 0;
- int blen = bt ? json_len(bt) : 0;
- char *out;
-
- out = realloc(*dst, len + olen + blen);
- if (!out) {
- /* Don't add field in this case */
- return;
- }
- *dst = out;
-
- if (!olen)
- *(*dst) = 0;
- else
- strcat(*dst, sep);
- strcat(*dst, a);
- if (bt)
- strncat(*dst, map + bt->start, blen);
-}
-
-static void fixname(char *s)
-{
- for (; *s; s++)
- *s = tolower(*s);
-}
-
-static void fixdesc(char *s)
-{
- char *e = s + strlen(s);
-
- /* Remove trailing dots that look ugly in perf list */
- --e;
- while (e >= s && isspace(*e))
- --e;
- if (*e == '.')
- *e = 0;
-}
-
-/* Add escapes for '\' so they are proper C strings. */
-static char *fixregex(char *s)
-{
- int len = 0;
- int esc_count = 0;
- char *fixed = NULL;
- char *p, *q;
-
- /* Count the number of '\' in string */
- for (p = s; *p; p++) {
- ++len;
- if (*p == '\\')
- ++esc_count;
- }
-
- if (esc_count == 0)
- return s;
-
- /* allocate space for a new string */
- fixed = (char *) malloc(len + esc_count + 1);
- if (!fixed)
- return NULL;
-
- /* copy over the characters */
- q = fixed;
- for (p = s; *p; p++) {
- if (*p == '\\') {
- *q = '\\';
- ++q;
- }
- *q = *p;
- ++q;
- }
- *q = '\0';
- return fixed;
-}
-
-static struct msrmap {
- const char *num;
- const char *pname;
-} msrmap[] = {
- { "0x3F6", "ldlat=" },
- { "0x1A6", "offcore_rsp=" },
- { "0x1A7", "offcore_rsp=" },
- { "0x3F7", "frontend=" },
- { NULL, NULL }
-};
-
-static void cut_comma(char *map, jsmntok_t *newval)
-{
- int i;
-
- /* Cut off everything after comma */
- for (i = newval->start; i < newval->end; i++) {
- if (map[i] == ',')
- newval->end = i;
- }
-}
-
-static struct msrmap *lookup_msr(char *map, jsmntok_t *val)
-{
- jsmntok_t newval = *val;
- static bool warned;
- int i;
-
- cut_comma(map, &newval);
- for (i = 0; msrmap[i].num; i++)
- if (json_streq(map, &newval, msrmap[i].num))
- return &msrmap[i];
- if (!warned) {
- warned = true;
- pr_err("%s: Unknown MSR in event file %.*s\n", prog,
- json_len(val), map + val->start);
- }
- return NULL;
-}
-
-static struct map {
- const char *json;
- const char *perf;
-} unit_to_pmu[] = {
- { "CBO", "uncore_cbox" },
- { "QPI LL", "uncore_qpi" },
- { "SBO", "uncore_sbox" },
- { "iMPH-U", "uncore_arb" },
- { "CPU-M-CF", "cpum_cf" },
- { "CPU-M-SF", "cpum_sf" },
- { "UPI LL", "uncore_upi" },
- { "hisi_sicl,cpa", "hisi_sicl,cpa"},
- { "hisi_sccl,ddrc", "hisi_sccl,ddrc" },
- { "hisi_sccl,hha", "hisi_sccl,hha" },
- { "hisi_sccl,l3c", "hisi_sccl,l3c" },
- /* it's not realistic to keep adding these, we need something more scalable ... */
- { "imx8_ddr", "imx8_ddr" },
- { "L3PMC", "amd_l3" },
- { "DFPMC", "amd_df" },
- { "cpu_core", "cpu_core" },
- { "cpu_atom", "cpu_atom" },
- {}
-};
-
-static const char *field_to_perf(struct map *table, char *map, jsmntok_t *val)
-{
- int i;
-
- for (i = 0; table[i].json; i++) {
- if (json_streq(map, val, table[i].json))
- return table[i].perf;
- }
- return NULL;
-}
-
-#define EXPECT(e, t, m) do { if (!(e)) { \
- jsmntok_t *loc = (t); \
- if (!(t)->start && (t) > tokens) \
- loc = (t) - 1; \
- pr_err("%s:%d: " m ", got %s\n", fn, \
- json_line(map, loc), \
- json_name(t)); \
- err = -EIO; \
- goto out_free; \
-} } while (0)
-
-static char *topic;
-
-static char *get_topic(void)
-{
- char *tp;
- int i;
-
- /* tp is free'd in process_one_file() */
- i = asprintf(&tp, "%s", topic);
- if (i < 0) {
- pr_info("%s: asprintf() error %s\n", prog);
- return NULL;
- }
-
- for (i = 0; i < (int) strlen(tp); i++) {
- char c = tp[i];
-
- if (c == '-')
- tp[i] = ' ';
- else if (c == '.') {
- tp[i] = '\0';
- break;
- }
- }
-
- return tp;
-}
-
-static int add_topic(char *bname)
-{
- free(topic);
- topic = strdup(bname);
- if (!topic) {
- pr_info("%s: strdup() error %s for file %s\n", prog,
- strerror(errno), bname);
- return -ENOMEM;
- }
- return 0;
-}
-
-struct perf_entry_data {
- FILE *outfp;
- char *topic;
-};
-
-static int close_table;
-
-static void print_events_table_prefix(FILE *fp, const char *tblname)
-{
- fprintf(fp, "static const struct pmu_event %s[] = {\n", tblname);
- close_table = 1;
-}
-
-static int print_events_table_entry(void *data, struct json_event *je)
-{
- struct perf_entry_data *pd = data;
- FILE *outfp = pd->outfp;
- char *topic_local = pd->topic;
-
- /*
- * TODO: Remove formatting chars after debugging to reduce
- * string lengths.
- */
- fprintf(outfp, "{\n");
-
- if (je->name)
- fprintf(outfp, "\t.name = \"%s\",\n", je->name);
- if (je->event)
- fprintf(outfp, "\t.event = \"%s\",\n", je->event);
- fprintf(outfp, "\t.desc = \"%s\",\n", je->desc);
- if (je->compat)
- fprintf(outfp, "\t.compat = \"%s\",\n", je->compat);
- fprintf(outfp, "\t.topic = \"%s\",\n", topic_local);
- if (je->long_desc && je->long_desc[0])
- fprintf(outfp, "\t.long_desc = \"%s\",\n", je->long_desc);
- if (je->pmu)
- fprintf(outfp, "\t.pmu = \"%s\",\n", je->pmu);
- if (je->unit)
- fprintf(outfp, "\t.unit = \"%s\",\n", je->unit);
- if (je->perpkg)
- fprintf(outfp, "\t.perpkg = \"%s\",\n", je->perpkg);
- if (je->aggr_mode)
- fprintf(outfp, "\t.aggr_mode = \"%d\",\n", convert(je->aggr_mode));
- if (je->metric_expr)
- fprintf(outfp, "\t.metric_expr = \"%s\",\n", je->metric_expr);
- if (je->metric_name)
- fprintf(outfp, "\t.metric_name = \"%s\",\n", je->metric_name);
- if (je->metric_group)
- fprintf(outfp, "\t.metric_group = \"%s\",\n", je->metric_group);
- if (je->deprecated)
- fprintf(outfp, "\t.deprecated = \"%s\",\n", je->deprecated);
- if (je->metric_constraint)
- fprintf(outfp, "\t.metric_constraint = \"%s\",\n", je->metric_constraint);
- fprintf(outfp, "},\n");
-
- return 0;
-}
-
-struct event_struct {
- struct list_head list;
- char *name;
- char *event;
- char *compat;
- char *desc;
- char *long_desc;
- char *pmu;
- char *unit;
- char *perpkg;
- char *aggr_mode;
- char *metric_expr;
- char *metric_name;
- char *metric_group;
- char *deprecated;
- char *metric_constraint;
-};
-
-#define ADD_EVENT_FIELD(field) do { if (je->field) { \
- es->field = strdup(je->field); \
- if (!es->field) \
- goto out_free; \
-} } while (0)
-
-#define FREE_EVENT_FIELD(field) free(es->field)
-
-#define TRY_FIXUP_FIELD(field) do { if (es->field && !je->field) {\
- je->field = strdup(es->field); \
- if (!je->field) \
- return -ENOMEM; \
-} } while (0)
-
-#define FOR_ALL_EVENT_STRUCT_FIELDS(op) do { \
- op(name); \
- op(event); \
- op(desc); \
- op(long_desc); \
- op(pmu); \
- op(unit); \
- op(perpkg); \
- op(aggr_mode); \
- op(metric_expr); \
- op(metric_name); \
- op(metric_group); \
- op(deprecated); \
-} while (0)
-
-static LIST_HEAD(arch_std_events);
-
-static void free_arch_std_events(void)
-{
- struct event_struct *es, *next;
-
- list_for_each_entry_safe(es, next, &arch_std_events, list) {
- FOR_ALL_EVENT_STRUCT_FIELDS(FREE_EVENT_FIELD);
- list_del_init(&es->list);
- free(es);
- }
-}
-
-static int save_arch_std_events(void *data __maybe_unused, struct json_event *je)
-{
- struct event_struct *es;
-
- es = malloc(sizeof(*es));
- if (!es)
- return -ENOMEM;
- memset(es, 0, sizeof(*es));
- FOR_ALL_EVENT_STRUCT_FIELDS(ADD_EVENT_FIELD);
- list_add_tail(&es->list, &arch_std_events);
- return 0;
-out_free:
- FOR_ALL_EVENT_STRUCT_FIELDS(FREE_EVENT_FIELD);
- free(es);
- return -ENOMEM;
-}
-
-static void print_events_table_suffix(FILE *outfp)
-{
- fprintf(outfp, "{\n");
-
- fprintf(outfp, "\t.name = 0,\n");
- fprintf(outfp, "\t.event = 0,\n");
- fprintf(outfp, "\t.desc = 0,\n");
-
- fprintf(outfp, "},\n");
- fprintf(outfp, "};\n");
- close_table = 0;
-}
-
-static struct fixed {
- const char *name;
- const char *event;
-} fixed[] = {
- { "inst_retired.any", "event=0xc0,period=2000003" },
- { "inst_retired.any_p", "event=0xc0,period=2000003" },
- { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03,period=2000003" },
- { "cpu_clk_unhalted.thread", "event=0x3c,period=2000003" },
- { "cpu_clk_unhalted.core", "event=0x3c,period=2000003" },
- { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1,period=2000003" },
- { NULL, NULL},
-};
-
-/*
- * Handle different fixed counter encodings between JSON and perf.
- */
-static char *real_event(const char *name, char *event)
-{
- int i;
-
- if (!name)
- return NULL;
-
- for (i = 0; fixed[i].name; i++)
- if (!strcasecmp(name, fixed[i].name))
- return (char *)fixed[i].event;
- return event;
-}
-
-static int
-try_fixup(const char *fn, char *arch_std, struct json_event *je, char **event)
-{
- /* try to find matching event from arch standard values */
- struct event_struct *es;
-
- list_for_each_entry(es, &arch_std_events, list) {
- if (!strcmp(arch_std, es->name)) {
- FOR_ALL_EVENT_STRUCT_FIELDS(TRY_FIXUP_FIELD);
- *event = je->event;
- return 0;
- }
- }
-
- pr_err("%s: could not find matching %s for %s\n",
- prog, arch_std, fn);
- return -1;
-}
-
-/* Call func with each event in the json file */
-static int json_events(const char *fn,
- int (*func)(void *data, struct json_event *je),
- void *data)
-{
- int err;
- size_t size;
- jsmntok_t *tokens, *tok;
- int i, j, len;
- char *map;
- char buf[128];
-
- if (!fn)
- return -ENOENT;
-
- tokens = parse_json(fn, &map, &size, &len);
- if (!tokens)
- return -EIO;
- EXPECT(tokens->type == JSMN_ARRAY, tokens, "expected top level array");
- tok = tokens + 1;
- for (i = 0; i < tokens->size; i++) {
- char *event = NULL;
- char *extra_desc = NULL;
- char *filter = NULL;
- struct json_event je = {};
- char *arch_std = NULL;
- unsigned long long eventcode = 0;
- unsigned long long configcode = 0;
- struct msrmap *msr = NULL;
- jsmntok_t *msrval = NULL;
- jsmntok_t *precise = NULL;
- jsmntok_t *obj = tok++;
- bool configcode_present = false;
- char *umask = NULL;
- char *cmask = NULL;
- char *inv = NULL;
- char *any = NULL;
- char *edge = NULL;
- char *period = NULL;
- char *fc_mask = NULL;
- char *ch_mask = NULL;
-
- EXPECT(obj->type == JSMN_OBJECT, obj, "expected object");
- for (j = 0; j < obj->size; j += 2) {
- jsmntok_t *field, *val;
- int nz;
- char *s;
-
- field = tok + j;
- EXPECT(field->type == JSMN_STRING, tok + j,
- "Expected field name");
- val = tok + j + 1;
- EXPECT(val->type == JSMN_STRING, tok + j + 1,
- "Expected string value");
-
- nz = !json_streq(map, val, "0");
- /* match_field */
- if (json_streq(map, field, "UMask") && nz) {
- addfield(map, &umask, "", "umask=", val);
- } else if (json_streq(map, field, "CounterMask") && nz) {
- addfield(map, &cmask, "", "cmask=", val);
- } else if (json_streq(map, field, "Invert") && nz) {
- addfield(map, &inv, "", "inv=", val);
- } else if (json_streq(map, field, "AnyThread") && nz) {
- addfield(map, &any, "", "any=", val);
- } else if (json_streq(map, field, "EdgeDetect") && nz) {
- addfield(map, &edge, "", "edge=", val);
- } else if (json_streq(map, field, "SampleAfterValue") && nz) {
- addfield(map, &period, "", "period=", val);
- } else if (json_streq(map, field, "FCMask") && nz) {
- addfield(map, &fc_mask, "", "fc_mask=", val);
- } else if (json_streq(map, field, "PortMask") && nz) {
- addfield(map, &ch_mask, "", "ch_mask=", val);
- } else if (json_streq(map, field, "EventCode")) {
- char *code = NULL;
- addfield(map, &code, "", "", val);
- eventcode |= strtoul(code, NULL, 0);
- free(code);
- } else if (json_streq(map, field, "ConfigCode")) {
- char *code = NULL;
- addfield(map, &code, "", "", val);
- configcode |= strtoul(code, NULL, 0);
- free(code);
- configcode_present = true;
- } else if (json_streq(map, field, "ExtSel")) {
- char *code = NULL;
- addfield(map, &code, "", "", val);
- eventcode |= strtoul(code, NULL, 0) << 8;
- free(code);
- } else if (json_streq(map, field, "EventName")) {
- addfield(map, &je.name, "", "", val);
- } else if (json_streq(map, field, "Compat")) {
- addfield(map, &je.compat, "", "", val);
- } else if (json_streq(map, field, "BriefDescription")) {
- addfield(map, &je.desc, "", "", val);
- fixdesc(je.desc);
- } else if (json_streq(map, field,
- "PublicDescription")) {
- addfield(map, &je.long_desc, "", "", val);
- fixdesc(je.long_desc);
- } else if (json_streq(map, field, "PEBS") && nz) {
- precise = val;
- } else if (json_streq(map, field, "MSRIndex") && nz) {
- msr = lookup_msr(map, val);
- } else if (json_streq(map, field, "MSRValue")) {
- msrval = val;
- } else if (json_streq(map, field, "Errata") &&
- !json_streq(map, val, "null")) {
- addfield(map, &extra_desc, ". ",
- " Spec update: ", val);
- } else if (json_streq(map, field, "Data_LA") && nz) {
- addfield(map, &extra_desc, ". ",
- " Supports address when precise",
- NULL);
- } else if (json_streq(map, field, "Unit")) {
- const char *ppmu;
-
- ppmu = field_to_perf(unit_to_pmu, map, val);
- if (ppmu) {
- je.pmu = strdup(ppmu);
- } else {
- if (!je.pmu)
- je.pmu = strdup("uncore_");
- addfield(map, &je.pmu, "", "", val);
- for (s = je.pmu; *s; s++)
- *s = tolower(*s);
- }
- } else if (json_streq(map, field, "Filter")) {
- addfield(map, &filter, "", "", val);
- } else if (json_streq(map, field, "ScaleUnit")) {
- addfield(map, &je.unit, "", "", val);
- } else if (json_streq(map, field, "PerPkg")) {
- addfield(map, &je.perpkg, "", "", val);
- } else if (json_streq(map, field, "AggregationMode")) {
- addfield(map, &je.aggr_mode, "", "", val);
- } else if (json_streq(map, field, "Deprecated")) {
- addfield(map, &je.deprecated, "", "", val);
- } else if (json_streq(map, field, "MetricName")) {
- addfield(map, &je.metric_name, "", "", val);
- } else if (json_streq(map, field, "MetricGroup")) {
- addfield(map, &je.metric_group, "", "", val);
- } else if (json_streq(map, field, "MetricConstraint")) {
- addfield(map, &je.metric_constraint, "", "", val);
- } else if (json_streq(map, field, "MetricExpr")) {
- addfield(map, &je.metric_expr, "", "", val);
- } else if (json_streq(map, field, "ArchStdEvent")) {
- addfield(map, &arch_std, "", "", val);
- for (s = arch_std; *s; s++)
- *s = tolower(*s);
- }
- /* ignore unknown fields */
- }
- if (precise && je.desc && !strstr(je.desc, "(Precise Event)")) {
- if (json_streq(map, precise, "2"))
- addfield(map, &extra_desc, " ",
- "(Must be precise)", NULL);
- else
- addfield(map, &extra_desc, " ",
- "(Precise event)", NULL);
- }
- if (configcode_present)
- snprintf(buf, sizeof buf, "config=%#llx", configcode);
- else
- snprintf(buf, sizeof buf, "event=%#llx", eventcode);
- addfield(map, &event, ",", buf, NULL);
- if (any)
- addfield(map, &event, ",", any, NULL);
- if (ch_mask)
- addfield(map, &event, ",", ch_mask, NULL);
- if (cmask)
- addfield(map, &event, ",", cmask, NULL);
- if (edge)
- addfield(map, &event, ",", edge, NULL);
- if (fc_mask)
- addfield(map, &event, ",", fc_mask, NULL);
- if (inv)
- addfield(map, &event, ",", inv, NULL);
- if (period)
- addfield(map, &event, ",", period, NULL);
- if (umask)
- addfield(map, &event, ",", umask, NULL);
-
- if (je.desc && extra_desc)
- addfield(map, &je.desc, " ", extra_desc, NULL);
- if (je.long_desc && extra_desc)
- addfield(map, &je.long_desc, " ", extra_desc, NULL);
- if (je.pmu) {
- addfield(map, &je.desc, ". ", "Unit: ", NULL);
- addfield(map, &je.desc, "", je.pmu, NULL);
- addfield(map, &je.desc, "", " ", NULL);
- }
- if (filter)
- addfield(map, &event, ",", filter, NULL);
- if (msr != NULL)
- addfield(map, &event, ",", msr->pname, msrval);
- if (je.name)
- fixname(je.name);
-
- if (arch_std) {
- /*
- * An arch standard event is referenced, so try to
- * fixup any unassigned values.
- */
- err = try_fixup(fn, arch_std, &je, &event);
- if (err)
- goto free_strings;
- }
- je.event = real_event(je.name, event);
- err = func(data, &je);
-free_strings:
- free(umask);
- free(cmask);
- free(inv);
- free(any);
- free(edge);
- free(period);
- free(fc_mask);
- free(ch_mask);
- free(event);
- free(je.desc);
- free(je.name);
- free(je.compat);
- free(je.long_desc);
- free(extra_desc);
- free(je.pmu);
- free(filter);
- free(je.perpkg);
- free(je.aggr_mode);
- free(je.deprecated);
- free(je.unit);
- free(je.metric_expr);
- free(je.metric_name);
- free(je.metric_group);
- free(je.metric_constraint);
- free(arch_std);
-
- if (err)
- break;
- tok += j;
- }
- EXPECT(tok - tokens == len, tok, "unexpected objects at end");
- err = 0;
-out_free:
- free_json(map, size, tokens);
- return err;
-}
-
-static char *file_name_to_table_name(char *fname)
-{
- unsigned int i;
- int n;
- int c;
- char *tblname;
-
- /*
- * Ensure tablename starts with alphabetic character.
- * Derive rest of table name from basename of the JSON file,
- * replacing hyphens and stripping out .json suffix.
- */
- n = asprintf(&tblname, "pme_%s", fname);
- if (n < 0) {
- pr_info("%s: asprintf() error %s for file %s\n", prog,
- strerror(errno), fname);
- return NULL;
- }
-
- for (i = 0; i < strlen(tblname); i++) {
- c = tblname[i];
-
- if (c == '-' || c == '/')
- tblname[i] = '_';
- else if (c == '.') {
- tblname[i] = '\0';
- break;
- } else if (!isalnum(c) && c != '_') {
- pr_err("%s: Invalid character '%c' in file name %s\n",
- prog, c, basename(fname));
- free(tblname);
- tblname = NULL;
- break;
- }
- }
-
- return tblname;
-}
-
-static bool is_sys_dir(char *fname)
-{
- size_t len = strlen(fname), len2 = strlen("/sys");
-
- if (len2 > len)
- return false;
- return !strcmp(fname+len-len2, "/sys");
-}
-
-static void print_mapping_table_prefix(FILE *outfp)
-{
- fprintf(outfp, "const struct pmu_events_map pmu_events_map[] = {\n");
-}
-
-static void print_mapping_table_suffix(FILE *outfp)
-{
- /*
- * Print the terminating, NULL entry.
- */
- fprintf(outfp, "{\n");
- fprintf(outfp, "\t.cpuid = 0,\n");
- fprintf(outfp, "\t.version = 0,\n");
- fprintf(outfp, "\t.type = 0,\n");
- fprintf(outfp, "\t.table = 0,\n");
- fprintf(outfp, "},\n");
-
- /* and finally, the closing curly bracket for the struct */
- fprintf(outfp, "};\n");
-}
-
-static void print_mapping_test_table(FILE *outfp)
-{
- /*
- * Print the terminating, NULL entry.
- */
- fprintf(outfp, "{\n");
- fprintf(outfp, "\t.cpuid = \"testcpu\",\n");
- fprintf(outfp, "\t.version = \"v1\",\n");
- fprintf(outfp, "\t.type = \"core\",\n");
- fprintf(outfp, "\t.table = pme_test_soc_cpu,\n");
- fprintf(outfp, "},\n");
-}
-
-static void print_system_event_mapping_table_prefix(FILE *outfp)
-{
- fprintf(outfp, "\nconst struct pmu_sys_events pmu_sys_event_tables[] = {");
-}
-
-static void print_system_event_mapping_table_suffix(FILE *outfp)
-{
- fprintf(outfp, "\n\t{\n\t\t.table = 0\n\t},");
- fprintf(outfp, "\n};\n");
-}
-
-static int process_system_event_tables(FILE *outfp)
-{
- struct sys_event_table *sys_event_table;
-
- print_system_event_mapping_table_prefix(outfp);
-
- list_for_each_entry(sys_event_table, &sys_event_tables, list) {
- fprintf(outfp, "\n\t{\n\t\t.table = %s,\n\t\t.name = \"%s\",\n\t},",
- sys_event_table->soc_id,
- sys_event_table->soc_id);
- }
-
- print_system_event_mapping_table_suffix(outfp);
-
- return 0;
-}
-
-static int process_mapfile(FILE *outfp, char *fpath)
-{
- int n = 16384;
- FILE *mapfp;
- char *save = NULL;
- char *line, *p;
- int line_num;
- char *tblname;
- int ret = 0;
-
- pr_info("%s: Processing mapfile %s\n", prog, fpath);
-
- line = malloc(n);
- if (!line)
- return -1;
-
- mapfp = fopen(fpath, "r");
- if (!mapfp) {
- pr_info("%s: Error %s opening %s\n", prog, strerror(errno),
- fpath);
- free(line);
- return -1;
- }
-
- print_mapping_table_prefix(outfp);
-
- /* Skip first line (header) */
- p = fgets(line, n, mapfp);
- if (!p)
- goto out;
-
- line_num = 1;
- while (1) {
- char *cpuid, *version, *type, *fname;
-
- line_num++;
- p = fgets(line, n, mapfp);
- if (!p)
- break;
-
- if (line[0] == '#' || line[0] == '\n')
- continue;
-
- if (line[strlen(line)-1] != '\n') {
- /* TODO Deal with lines longer than 16K */
- pr_info("%s: Mapfile %s: line %d too long, aborting\n",
- prog, fpath, line_num);
- ret = -1;
- goto out;
- }
- line[strlen(line)-1] = '\0';
-
- cpuid = fixregex(strtok_r(p, ",", &save));
- version = strtok_r(NULL, ",", &save);
- fname = strtok_r(NULL, ",", &save);
- type = strtok_r(NULL, ",", &save);
-
- tblname = file_name_to_table_name(fname);
- fprintf(outfp, "{\n");
- fprintf(outfp, "\t.cpuid = \"%s\",\n", cpuid);
- fprintf(outfp, "\t.version = \"%s\",\n", version);
- fprintf(outfp, "\t.type = \"%s\",\n", type);
-
- /*
- * CHECK: We can't use the type (eg "core") field in the
- * table name. For us to do that, we need to somehow tweak
- * the other caller of file_name_to_table(), process_json()
- * to determine the type. process_json() file has no way
- * of knowing these are "core" events unless file name has
- * core in it. If filename has core in it, we can safely
- * ignore the type field here also.
- */
- fprintf(outfp, "\t.table = %s\n", tblname);
- fprintf(outfp, "},\n");
- }
-
-out:
- print_mapping_test_table(outfp);
- print_mapping_table_suffix(outfp);
- fclose(mapfp);
- free(line);
- return ret;
-}
-
-/*
- * If we fail to locate/process JSON and map files, create a NULL mapping
- * table. This would at least allow perf to build even if we can't find/use
- * the aliases.
- */
-static void create_empty_mapping(const char *output_file)
-{
- FILE *outfp;
-
- pr_info("%s: Creating empty pmu_events_map[] table\n", prog);
-
- /* Truncate file to clear any partial writes to it */
- outfp = fopen(output_file, "w");
- if (!outfp) {
- perror("fopen()");
- _Exit(1);
- }
-
- fprintf(outfp, "#include \"pmu-events/pmu-events.h\"\n");
- print_mapping_table_prefix(outfp);
- print_mapping_table_suffix(outfp);
- print_system_event_mapping_table_prefix(outfp);
- print_system_event_mapping_table_suffix(outfp);
- fclose(outfp);
-}
-
-static int get_maxfds(void)
-{
- struct rlimit rlim;
-
- if (getrlimit(RLIMIT_NOFILE, &rlim) == 0)
- return min(rlim.rlim_max / 2, (rlim_t)512);
-
- return 512;
-}
-
-/*
- * nftw() doesn't let us pass an argument to the processing function,
- * so use a global variables.
- */
-static FILE *eventsfp;
-static char *mapfile;
-
-static int is_leaf_dir(const char *fpath)
-{
- DIR *d;
- struct dirent *dir;
- int res = 1;
-
- d = opendir(fpath);
- if (!d)
- return 0;
-
- while ((dir = readdir(d)) != NULL) {
- if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
- continue;
-
- if (dir->d_type == DT_DIR) {
- res = 0;
- break;
- } else if (dir->d_type == DT_UNKNOWN) {
- char path[PATH_MAX];
- struct stat st;
-
- sprintf(path, "%s/%s", fpath, dir->d_name);
- if (stat(path, &st))
- break;
-
- if (S_ISDIR(st.st_mode)) {
- res = 0;
- break;
- }
- }
- }
-
- closedir(d);
-
- return res;
-}
-
-static int is_json_file(const char *name)
-{
- const char *suffix;
-
- if (strlen(name) < 5)
- return 0;
-
- suffix = name + strlen(name) - 5;
-
- if (strncmp(suffix, ".json", 5) == 0)
- return 1;
- return 0;
-}
-
-static int preprocess_arch_std_files(const char *fpath, const struct stat *sb,
- int typeflag, struct FTW *ftwbuf)
-{
- int level = ftwbuf->level;
- int is_file = typeflag == FTW_F;
-
- if (level == 1 && is_file && is_json_file(fpath))
- return json_events(fpath, save_arch_std_events, (void *)sb);
-
- return 0;
-}
-
-static int process_one_file(const char *fpath, const struct stat *sb,
- int typeflag, struct FTW *ftwbuf)
-{
- char *tblname, *bname;
- int is_dir = typeflag == FTW_D;
- int is_file = typeflag == FTW_F;
- int level = ftwbuf->level;
- int err = 0;
-
- if (level >= 2 && is_dir) {
- int count = 0;
- /*
- * For level 2 directory, bname will include parent name,
- * like vendor/platform. So search back from platform dir
- * to find this.
- * Something similar for level 3 directory, but we're a PMU
- * category folder, like vendor/platform/cpu.
- */
- bname = (char *) fpath + ftwbuf->base - 2;
- for (;;) {
- if (*bname == '/')
- count++;
- if (count == level - 1)
- break;
- bname--;
- }
- bname++;
- } else
- bname = (char *) fpath + ftwbuf->base;
-
- pr_debug("%s %d %7jd %-20s %s\n",
- is_file ? "f" : is_dir ? "d" : "x",
- level, sb->st_size, bname, fpath);
-
- /* base dir or too deep */
- if (level == 0 || level > 4)
- return 0;
-
-
- /* model directory, reset topic */
- if ((level == 1 && is_dir && is_leaf_dir(fpath)) ||
- (level >= 2 && is_dir && is_leaf_dir(fpath))) {
- if (close_table)
- print_events_table_suffix(eventsfp);
-
- /*
- * Drop file name suffix. Replace hyphens with underscores.
- * Fail if file name contains any alphanum characters besides
- * underscores.
- */
- tblname = file_name_to_table_name(bname);
- if (!tblname) {
- pr_info("%s: Error determining table name for %s\n", prog,
- bname);
- return -1;
- }
-
- if (is_sys_dir(bname)) {
- struct sys_event_table *sys_event_table;
-
- sys_event_table = malloc(sizeof(*sys_event_table));
- if (!sys_event_table)
- return -1;
-
- sys_event_table->soc_id = strdup(tblname);
- if (!sys_event_table->soc_id) {
- free(sys_event_table);
- return -1;
- }
- list_add_tail(&sys_event_table->list,
- &sys_event_tables);
- }
-
- print_events_table_prefix(eventsfp, tblname);
- return 0;
- }
-
- /*
- * Save the mapfile name for now. We will process mapfile
- * after processing all JSON files (so we can write out the
- * mapping table after all PMU events tables).
- *
- */
- if (level == 1 && is_file) {
- if (!strcmp(bname, "mapfile.csv")) {
- mapfile = strdup(fpath);
- return 0;
- }
- if (is_json_file(bname))
- pr_debug("%s: ArchStd json is preprocessed %s\n", prog, fpath);
- else
- pr_info("%s: Ignoring file %s\n", prog, fpath);
- return 0;
- }
-
- /*
- * If the file name does not have a .json extension,
- * ignore it. It could be a readme.txt for instance.
- */
- if (is_file) {
- if (!is_json_file(bname)) {
- pr_info("%s: Ignoring file without .json suffix %s\n", prog,
- fpath);
- return 0;
- }
- }
-
- if (level > 1 && add_topic(bname))
- return -ENOMEM;
-
- /*
- * Assume all other files are JSON files.
- *
- * If mapfile refers to 'power7_core.json', we create a table
- * named 'power7_core'. Any inconsistencies between the mapfile
- * and directory tree could result in build failure due to table
- * names not being found.
- *
- * At least for now, be strict with processing JSON file names.
- * i.e. if JSON file name cannot be mapped to C-style table name,
- * fail.
- */
- if (is_file) {
- struct perf_entry_data data = {
- .topic = get_topic(),
- .outfp = eventsfp,
- };
-
- err = json_events(fpath, print_events_table_entry, &data);
-
- free(data.topic);
- }
-
- return err;
-}
-
-#ifndef PATH_MAX
-#define PATH_MAX 4096
-#endif
-
-/*
- * Starting in directory 'start_dirname', find the "mapfile.csv" and
- * the set of JSON files for the architecture 'arch'.
- *
- * From each JSON file, create a C-style "PMU events table" from the
- * JSON file (see struct pmu_event).
- *
- * From the mapfile, create a mapping between the CPU revisions and
- * PMU event tables (see struct pmu_events_map).
- *
- * Write out the PMU events tables and the mapping table to pmu-event.c.
- */
-int main(int argc, char *argv[])
-{
- int rc, ret = 0, empty_map = 0;
- int maxfds;
- char ldirname[PATH_MAX];
- const char *arch;
- const char *output_file;
- const char *start_dirname;
- const char *err_string_ext = "";
- struct stat stbuf;
-
- prog = basename(argv[0]);
- if (argc < 4) {
- pr_err("Usage: %s <arch> <starting_dir> <output_file>\n", prog);
- return 1;
- }
-
- arch = argv[1];
- start_dirname = argv[2];
- output_file = argv[3];
-
- if (argc > 4)
- verbose = atoi(argv[4]);
-
- eventsfp = fopen(output_file, "w");
- if (!eventsfp) {
- pr_err("%s Unable to create required file %s (%s)\n",
- prog, output_file, strerror(errno));
- return 2;
- }
-
- sprintf(ldirname, "%s/%s", start_dirname, arch);
-
- /* If architecture does not have any event lists, bail out */
- if (stat(ldirname, &stbuf) < 0) {
- pr_info("%s: Arch %s has no PMU event lists\n", prog, arch);
- empty_map = 1;
- goto err_close_eventsfp;
- }
-
- /* Include pmu-events.h first */
- fprintf(eventsfp, "#include \"pmu-events/pmu-events.h\"\n");
-
- /*
- * The mapfile allows multiple CPUids to point to the same JSON file,
- * so, not sure if there is a need for symlinks within the pmu-events
- * directory.
- *
- * For now, treat symlinks of JSON files as regular files and create
- * separate tables for each symlink (presumably, each symlink refers
- * to specific version of the CPU).
- */
-
- maxfds = get_maxfds();
- rc = nftw(ldirname, preprocess_arch_std_files, maxfds, 0);
- if (rc)
- goto err_processing_std_arch_event_dir;
-
- rc = nftw(ldirname, process_one_file, maxfds, 0);
- if (rc)
- goto err_processing_dir;
-
- sprintf(ldirname, "%s/test", start_dirname);
-
- rc = nftw(ldirname, preprocess_arch_std_files, maxfds, 0);
- if (rc)
- goto err_processing_std_arch_event_dir;
-
- rc = nftw(ldirname, process_one_file, maxfds, 0);
- if (rc)
- goto err_processing_dir;
-
- if (close_table)
- print_events_table_suffix(eventsfp);
-
- if (!mapfile) {
- pr_info("%s: No CPU->JSON mapping?\n", prog);
- empty_map = 1;
- goto err_close_eventsfp;
- }
-
- rc = process_mapfile(eventsfp, mapfile);
- if (rc) {
- pr_info("%s: Error processing mapfile %s\n", prog, mapfile);
- /* Make build fail */
- ret = 1;
- goto err_close_eventsfp;
- }
-
- rc = process_system_event_tables(eventsfp);
- fclose(eventsfp);
- if (rc) {
- ret = 1;
- goto err_out;
- }
-
- free_arch_std_events();
- free_sys_event_tables();
- free(mapfile);
- return 0;
-
-err_processing_std_arch_event_dir:
- err_string_ext = " for std arch event";
-err_processing_dir:
- if (verbose) {
- pr_info("%s: Error walking file tree %s%s\n", prog, ldirname,
- err_string_ext);
- empty_map = 1;
- } else if (rc < 0) {
- ret = 1;
- } else {
- empty_map = 1;
- }
-err_close_eventsfp:
- fclose(eventsfp);
- if (empty_map)
- create_empty_mapping(output_file);
-err_out:
- free_arch_std_events();
- free_sys_event_tables();
- free(mapfile);
- return ret;
-}
diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
new file mode 100755
index 000000000000..0daa3e007528
--- /dev/null
+++ b/tools/perf/pmu-events/jevents.py
@@ -0,0 +1,725 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+"""Convert directories of JSON events to C code."""
+import argparse
+import csv
+import json
+import os
+import sys
+from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
+import collections
+
+# Global command line arguments.
+_args = None
+# List of event tables generated from "/sys" directories.
+_sys_event_tables = []
+# Map from an event name to an architecture standard
+# JsonEvent. Architecture standard events are in json files in the top
+# f'{_args.starting_dir}/{_args.arch}' directory.
+_arch_std_events = {}
+# Track whether an events table is currently being defined and needs closing.
+_close_table = False
+# Events to write out when the table is closed
+_pending_events = []
+# Global BigCString shared by all structures.
+_bcs = None
+# Order specific JsonEvent attributes will be visited.
+_json_event_attributes = [
+ # cmp_sevent related attributes.
+ 'name', 'pmu', 'topic', 'desc', 'metric_name', 'metric_group',
+ # Seems useful, put it early.
+ 'event',
+ # Short things in alphabetical order.
+ 'aggr_mode', 'compat', 'deprecated', 'perpkg', 'unit',
+ # Longer things (the last won't be iterated over during decompress).
+ 'metric_constraint', 'metric_expr', 'long_desc'
+]
+
+
+def removesuffix(s: str, suffix: str) -> str:
+ """Remove the suffix from a string
+
+ The removesuffix function is added to str in Python 3.9. We aim for 3.6
+ compatibility and so provide our own function here.
+ """
+ return s[0:-len(suffix)] if s.endswith(suffix) else s
+
+
+def file_name_to_table_name(parents: Sequence[str], dirname: str) -> str:
+ """Generate a C table name from directory names."""
+ tblname = 'pme'
+ for p in parents:
+ tblname += '_' + p
+ tblname += '_' + dirname
+ return tblname.replace('-', '_')
+
+def c_len(s: str) -> int:
+ """Return the length of s a C string
+
+ This doesn't handle all escape characters properly. It first assumes
+ all \ are for escaping, it then adjusts as it will have over counted
+ \\. The code uses \000 rather than \0 as a terminator as an adjacent
+ number would be folded into a string of \0 (ie. "\0" + "5" doesn't
+ equal a terminator followed by the number 5 but the escape of
+ \05). The code adjusts for \000 but not properly for all octal, hex
+ or unicode values.
+ """
+ try:
+ utf = s.encode(encoding='utf-8',errors='strict')
+ except:
+ print(f'broken string {s}')
+ raise
+ return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
+
+class BigCString:
+ """A class to hold many strings concatenated together.
+
+ Generating a large number of stand-alone C strings creates a large
+ number of relocations in position independent code. The BigCString
+ is a helper for this case. It builds a single string which within it
+ are all the other C strings (to avoid memory issues the string
+ itself is held as a list of strings). The offsets within the big
+ string are recorded and when stored to disk these don't need
+ relocation. To reduce the size of the string further, identical
+ strings are merged. If a longer string ends-with the same value as a
+ shorter string, these entries are also merged.
+ """
+ strings: Set[str]
+ big_string: Sequence[str]
+ offsets: Dict[str, int]
+
+ def __init__(self):
+ self.strings = set()
+
+ def add(self, s: str) -> None:
+ """Called to add to the big string."""
+ self.strings.add(s)
+
+ def compute(self) -> None:
+ """Called once all strings are added to compute the string and offsets."""
+
+ folded_strings = {}
+ # Determine if two strings can be folded, ie. let 1 string use the
+ # end of another. First reverse all strings and sort them.
+ sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
+
+ # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
+ # for each string to see if there is a better candidate to fold it
+ # into, in the example rather than using 'yz' we can use'xyz' at
+ # an offset of 1. We record which string can be folded into which
+ # in folded_strings, we don't need to record the offset as it is
+ # trivially computed from the string lengths.
+ for pos,s in enumerate(sorted_reversed_strings):
+ best_pos = pos
+ for check_pos in range(pos + 1, len(sorted_reversed_strings)):
+ if sorted_reversed_strings[check_pos].startswith(s):
+ best_pos = check_pos
+ else:
+ break
+ if pos != best_pos:
+ folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
+
+ # Compute reverse mappings for debugging.
+ fold_into_strings = collections.defaultdict(set)
+ for key, val in folded_strings.items():
+ if key != val:
+ fold_into_strings[val].add(key)
+
+ # big_string_offset is the current location within the C string
+ # being appended to - comments, etc. don't count. big_string is
+ # the string contents represented as a list. Strings are immutable
+ # in Python and so appending to one causes memory issues, while
+ # lists are mutable.
+ big_string_offset = 0
+ self.big_string = []
+ self.offsets = {}
+
+ # Emit all strings that aren't folded in a sorted manner.
+ for s in sorted(self.strings):
+ if s not in folded_strings:
+ self.offsets[s] = big_string_offset
+ self.big_string.append(f'/* offset={big_string_offset} */ "')
+ self.big_string.append(s)
+ self.big_string.append('"')
+ if s in fold_into_strings:
+ self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
+ self.big_string.append('\n')
+ big_string_offset += c_len(s)
+ continue
+
+ # Compute the offsets of the folded strings.
+ for s in folded_strings.keys():
+ assert s not in self.offsets
+ folded_s = folded_strings[s]
+ self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
+
+_bcs = BigCString()
+
+class JsonEvent:
+ """Representation of an event loaded from a json file dictionary."""
+
+ def __init__(self, jd: dict):
+ """Constructor passed the dictionary of parsed json values."""
+
+ def llx(x: int) -> str:
+ """Convert an int to a string similar to a printf modifier of %#llx."""
+ return '0' if x == 0 else hex(x)
+
+ def fixdesc(s: str) -> str:
+ """Fix formatting issue for the desc string."""
+ if s is None:
+ return None
+ return removesuffix(removesuffix(removesuffix(s, '. '),
+ '. '), '.').replace('\n', '\\n').replace(
+ '\"', '\\"').replace('\r', '\\r')
+
+ def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
+ """Returns the aggr_mode_class enum value associated with the JSON string."""
+ if not aggr_mode:
+ return None
+ aggr_mode_to_enum = {
+ 'PerChip': '1',
+ 'PerCore': '2',
+ }
+ return aggr_mode_to_enum[aggr_mode]
+
+ def lookup_msr(num: str) -> Optional[str]:
+ """Converts the msr number, or first in a list to the appropriate event field."""
+ if not num:
+ return None
+ msrmap = {
+ 0x3F6: 'ldlat=',
+ 0x1A6: 'offcore_rsp=',
+ 0x1A7: 'offcore_rsp=',
+ 0x3F7: 'frontend=',
+ }
+ return msrmap[int(num.split(',', 1)[0], 0)]
+
+ def real_event(name: str, event: str) -> Optional[str]:
+ """Convert well known event names to an event string otherwise use the event argument."""
+ fixed = {
+ 'inst_retired.any': 'event=0xc0,period=2000003',
+ 'inst_retired.any_p': 'event=0xc0,period=2000003',
+ 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
+ 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
+ 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
+ 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
+ }
+ if not name:
+ return None
+ if name.lower() in fixed:
+ return fixed[name.lower()]
+ return event
+
+ def unit_to_pmu(unit: str) -> Optional[str]:
+ """Convert a JSON Unit to Linux PMU name."""
+ if not unit:
+ return None
+ # Comment brought over from jevents.c:
+ # it's not realistic to keep adding these, we need something more scalable ...
+ table = {
+ 'CBO': 'uncore_cbox',
+ 'QPI LL': 'uncore_qpi',
+ 'SBO': 'uncore_sbox',
+ 'iMPH-U': 'uncore_arb',
+ 'CPU-M-CF': 'cpum_cf',
+ 'CPU-M-SF': 'cpum_sf',
+ 'PAI-CRYPTO' : 'pai_crypto',
+ 'UPI LL': 'uncore_upi',
+ 'hisi_sicl,cpa': 'hisi_sicl,cpa',
+ 'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
+ 'hisi_sccl,hha': 'hisi_sccl,hha',
+ 'hisi_sccl,l3c': 'hisi_sccl,l3c',
+ 'imx8_ddr': 'imx8_ddr',
+ 'L3PMC': 'amd_l3',
+ 'DFPMC': 'amd_df',
+ 'cpu_core': 'cpu_core',
+ 'cpu_atom': 'cpu_atom',
+ }
+ return table[unit] if unit in table else f'uncore_{unit.lower()}'
+
+ eventcode = 0
+ if 'EventCode' in jd:
+ eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
+ if 'ExtSel' in jd:
+ eventcode |= int(jd['ExtSel']) << 8
+ configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
+ self.name = jd['EventName'].lower() if 'EventName' in jd else None
+ self.topic = ''
+ self.compat = jd.get('Compat')
+ self.desc = fixdesc(jd.get('BriefDescription'))
+ self.long_desc = fixdesc(jd.get('PublicDescription'))
+ precise = jd.get('PEBS')
+ msr = lookup_msr(jd.get('MSRIndex'))
+ msrval = jd.get('MSRValue')
+ extra_desc = ''
+ if 'Data_LA' in jd:
+ extra_desc += ' Supports address when precise'
+ if 'Errata' in jd:
+ extra_desc += '.'
+ if 'Errata' in jd:
+ extra_desc += ' Spec update: ' + jd['Errata']
+ self.pmu = unit_to_pmu(jd.get('Unit'))
+ filter = jd.get('Filter')
+ self.unit = jd.get('ScaleUnit')
+ self.perpkg = jd.get('PerPkg')
+ self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
+ self.deprecated = jd.get('Deprecated')
+ self.metric_name = jd.get('MetricName')
+ self.metric_group = jd.get('MetricGroup')
+ self.metric_constraint = jd.get('MetricConstraint')
+ self.metric_expr = jd.get('MetricExpr')
+ if self.metric_expr:
+ self.metric_expr = self.metric_expr.replace('\\', '\\\\')
+ arch_std = jd.get('ArchStdEvent')
+ if precise and self.desc and '(Precise Event)' not in self.desc:
+ extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
+ 'event)')
+ event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
+ event_fields = [
+ ('AnyThread', 'any='),
+ ('PortMask', 'ch_mask='),
+ ('CounterMask', 'cmask='),
+ ('EdgeDetect', 'edge='),
+ ('FCMask', 'fc_mask='),
+ ('Invert', 'inv='),
+ ('SampleAfterValue', 'period='),
+ ('UMask', 'umask='),
+ ]
+ for key, value in event_fields:
+ if key in jd and jd[key] != '0':
+ event += ',' + value + jd[key]
+ if filter:
+ event += f',{filter}'
+ if msr:
+ event += f',{msr}{msrval}'
+ if self.desc and extra_desc:
+ self.desc += extra_desc
+ if self.long_desc and extra_desc:
+ self.long_desc += extra_desc
+ if self.pmu:
+ if self.desc and not self.desc.endswith('. '):
+ self.desc += '. '
+ self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
+ if arch_std and arch_std.lower() in _arch_std_events:
+ event = _arch_std_events[arch_std.lower()].event
+ # Copy from the architecture standard event to self for undefined fields.
+ for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
+ if hasattr(self, attr) and not getattr(self, attr):
+ setattr(self, attr, value)
+
+ self.event = real_event(self.name, event)
+
+ def __repr__(self) -> str:
+ """String representation primarily for debugging."""
+ s = '{\n'
+ for attr, value in self.__dict__.items():
+ if value:
+ s += f'\t{attr} = {value},\n'
+ return s + '}'
+
+ def build_c_string(self) -> str:
+ s = ''
+ for attr in _json_event_attributes:
+ x = getattr(self, attr)
+ s += f'{x}\\000' if x else '\\000'
+ return s
+
+ def to_c_string(self) -> str:
+ """Representation of the event as a C struct initializer."""
+
+ s = self.build_c_string()
+ return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
+
+
+def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
+ """Read json events from the specified file."""
+
+ try:
+ result = json.load(open(path), object_hook=JsonEvent)
+ except BaseException as err:
+ print(f"Exception processing {path}")
+ raise
+ for event in result:
+ event.topic = topic
+ return result
+
+def preprocess_arch_std_files(archpath: str) -> None:
+ """Read in all architecture standard events."""
+ global _arch_std_events
+ for item in os.scandir(archpath):
+ if item.is_file() and item.name.endswith('.json'):
+ for event in read_json_events(item.path, topic=''):
+ if event.name:
+ _arch_std_events[event.name.lower()] = event
+
+
+def print_events_table_prefix(tblname: str) -> None:
+ """Called when a new events table is started."""
+ global _close_table
+ if _close_table:
+ raise IOError('Printing table prefix but last table has no suffix')
+ _args.output_file.write(f'static const struct compact_pmu_event {tblname}[] = {{\n')
+ _close_table = True
+
+
+def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
+ """Add contents of file to _pending_events table."""
+ if not _close_table:
+ raise IOError('Table entries missing prefix')
+ for e in read_json_events(item.path, topic):
+ _pending_events.append(e)
+
+
+def print_events_table_suffix() -> None:
+ """Optionally close events table."""
+
+ def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
+ def fix_none(s: Optional[str]) -> str:
+ if s is None:
+ return ''
+ return s
+
+ return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
+ fix_none(j.metric_name))
+
+ global _close_table
+ if not _close_table:
+ return
+
+ global _pending_events
+ for event in sorted(_pending_events, key=event_cmp_key):
+ _args.output_file.write(event.to_c_string())
+ _pending_events = []
+
+ _args.output_file.write('};\n\n')
+ _close_table = False
+
+def get_topic(topic: str) -> str:
+ if topic.endswith('metrics.json'):
+ return 'metrics'
+ return removesuffix(topic, '.json').replace('-', ' ')
+
+def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
+
+ if item.is_dir():
+ return
+
+ # base dir or too deep
+ level = len(parents)
+ if level == 0 or level > 4:
+ return
+
+ # Ignore other directories. If the file name does not have a .json
+ # extension, ignore it. It could be a readme.txt for instance.
+ if not item.is_file() or not item.name.endswith('.json'):
+ return
+
+ topic = get_topic(item.name)
+ for event in read_json_events(item.path, topic):
+ _bcs.add(event.build_c_string())
+
+def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
+ """Process a JSON file during the main walk."""
+ global _sys_event_tables
+
+ def is_leaf_dir(path: str) -> bool:
+ for item in os.scandir(path):
+ if item.is_dir():
+ return False
+ return True
+
+ # model directory, reset topic
+ if item.is_dir() and is_leaf_dir(item.path):
+ print_events_table_suffix()
+
+ tblname = file_name_to_table_name(parents, item.name)
+ if item.name == 'sys':
+ _sys_event_tables.append(tblname)
+ print_events_table_prefix(tblname)
+ return
+
+ # base dir or too deep
+ level = len(parents)
+ if level == 0 or level > 4:
+ return
+
+ # Ignore other directories. If the file name does not have a .json
+ # extension, ignore it. It could be a readme.txt for instance.
+ if not item.is_file() or not item.name.endswith('.json'):
+ return
+
+ add_events_table_entries(item, get_topic(item.name))
+
+
+def print_mapping_table(archs: Sequence[str]) -> None:
+ """Read the mapfile and generate the struct from cpuid string to event table."""
+ _args.output_file.write("""
+/* Struct used to make the PMU event table implementation opaque to callers. */
+struct pmu_events_table {
+ const struct compact_pmu_event *entries;
+ size_t length;
+};
+
+/*
+ * Map a CPU to its table of PMU events. The CPU is identified by the
+ * cpuid field, which is an arch-specific identifier for the CPU.
+ * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
+ * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
+ *
+ * The cpuid can contain any character other than the comma.
+ */
+struct pmu_events_map {
+ const char *arch;
+ const char *cpuid;
+ struct pmu_events_table table;
+};
+
+/*
+ * Global table mapping each known CPU for the architecture to its
+ * table of PMU events.
+ */
+const struct pmu_events_map pmu_events_map[] = {
+""")
+ for arch in archs:
+ if arch == 'test':
+ _args.output_file.write("""{
+\t.arch = "testarch",
+\t.cpuid = "testcpu",
+\t.table = {
+\t.entries = pme_test_soc_cpu,
+\t.length = ARRAY_SIZE(pme_test_soc_cpu),
+\t}
+},
+""")
+ else:
+ with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
+ table = csv.reader(csvfile)
+ first = True
+ for row in table:
+ # Skip the first row or any row beginning with #.
+ if not first and len(row) > 0 and not row[0].startswith('#'):
+ tblname = file_name_to_table_name([], row[2].replace('/', '_'))
+ cpuid = row[0].replace('\\', '\\\\')
+ _args.output_file.write(f"""{{
+\t.arch = "{arch}",
+\t.cpuid = "{cpuid}",
+\t.table = {{
+\t\t.entries = {tblname},
+\t\t.length = ARRAY_SIZE({tblname})
+\t}}
+}},
+""")
+ first = False
+
+ _args.output_file.write("""{
+\t.arch = 0,
+\t.cpuid = 0,
+\t.table = { 0, 0 },
+}
+};
+""")
+
+
+def print_system_mapping_table() -> None:
+ """C struct mapping table array for tables from /sys directories."""
+ _args.output_file.write("""
+struct pmu_sys_events {
+\tconst char *name;
+\tstruct pmu_events_table table;
+};
+
+static const struct pmu_sys_events pmu_sys_event_tables[] = {
+""")
+ for tblname in _sys_event_tables:
+ _args.output_file.write(f"""\t{{
+\t\t.table = {{
+\t\t\t.entries = {tblname},
+\t\t\t.length = ARRAY_SIZE({tblname})
+\t\t}},
+\t\t.name = \"{tblname}\",
+\t}},
+""")
+ _args.output_file.write("""\t{
+\t\t.table = { 0, 0 }
+\t},
+};
+
+static void decompress(int offset, struct pmu_event *pe)
+{
+\tconst char *p = &big_c_string[offset];
+""")
+ for attr in _json_event_attributes:
+ _args.output_file.write(f"""
+\tpe->{attr} = (*p == '\\0' ? NULL : p);
+""")
+ if attr == _json_event_attributes[-1]:
+ continue
+ _args.output_file.write('\twhile (*p++);')
+ _args.output_file.write("""}
+
+int pmu_events_table_for_each_event(const struct pmu_events_table *table,
+ pmu_event_iter_fn fn,
+ void *data)
+{
+ for (size_t i = 0; i < table->length; i++) {
+ struct pmu_event pe;
+ int ret;
+
+ decompress(table->entries[i].offset, &pe);
+ ret = fn(&pe, table, data);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu)
+{
+ const struct pmu_events_table *table = NULL;
+ char *cpuid = perf_pmu__getcpuid(pmu);
+ int i;
+
+ /* on some platforms which uses cpus map, cpuid can be NULL for
+ * PMUs other than CORE PMUs.
+ */
+ if (!cpuid)
+ return NULL;
+
+ i = 0;
+ for (;;) {
+ const struct pmu_events_map *map = &pmu_events_map[i++];
+ if (!map->arch)
+ break;
+
+ if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
+ table = &map->table;
+ break;
+ }
+ }
+ free(cpuid);
+ return table;
+}
+
+const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
+{
+ for (const struct pmu_events_map *tables = &pmu_events_map[0];
+ tables->arch;
+ tables++) {
+ if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
+ return &tables->table;
+ }
+ return NULL;
+}
+
+int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
+{
+ for (const struct pmu_events_map *tables = &pmu_events_map[0];
+ tables->arch;
+ tables++) {
+ int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+const struct pmu_events_table *find_sys_events_table(const char *name)
+{
+ for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
+ tables->name;
+ tables++) {
+ if (!strcmp(tables->name, name))
+ return &tables->table;
+ }
+ return NULL;
+}
+
+int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
+{
+ for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
+ tables->name;
+ tables++) {
+ int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+""")
+
+
+def main() -> None:
+ global _args
+
+ def dir_path(path: str) -> str:
+ """Validate path is a directory for argparse."""
+ if os.path.isdir(path):
+ return path
+ raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
+
+ def ftw(path: str, parents: Sequence[str],
+ action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
+ """Replicate the directory/file walking behavior of C's file tree walk."""
+ for item in os.scandir(path):
+ action(parents, item)
+ if item.is_dir():
+ ftw(item.path, parents + [item.name], action)
+
+ ap = argparse.ArgumentParser()
+ ap.add_argument('arch', help='Architecture name like x86')
+ ap.add_argument(
+ 'starting_dir',
+ type=dir_path,
+ help='Root of tree containing architecture directories containing json files'
+ )
+ ap.add_argument(
+ 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
+ _args = ap.parse_args()
+
+ _args.output_file.write("""
+#include "pmu-events/pmu-events.h"
+#include "util/header.h"
+#include "util/pmu.h"
+#include <string.h>
+#include <stddef.h>
+
+struct compact_pmu_event {
+ int offset;
+};
+
+""")
+ archs = []
+ for item in os.scandir(_args.starting_dir):
+ if not item.is_dir():
+ continue
+ if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
+ archs.append(item.name)
+
+ if len(archs) < 2:
+ raise IOError(f'Missing architecture directory \'{_args.arch}\'')
+
+ archs.sort()
+ for arch in archs:
+ arch_path = f'{_args.starting_dir}/{arch}'
+ preprocess_arch_std_files(arch_path)
+ ftw(arch_path, [], preprocess_one_file)
+
+ _bcs.compute()
+ _args.output_file.write('static const char *const big_c_string =\n')
+ for s in _bcs.big_string:
+ _args.output_file.write(s)
+ _args.output_file.write(';\n\n')
+ for arch in archs:
+ arch_path = f'{_args.starting_dir}/{arch}'
+ ftw(arch_path, [], process_one_file)
+ print_events_table_suffix()
+
+ print_mapping_table(archs)
+ print_system_mapping_table()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/perf/pmu-events/jsmn.c b/tools/perf/pmu-events/jsmn.c
deleted file mode 100644
index 831dc44c4558..000000000000
--- a/tools/perf/pmu-events/jsmn.c
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * Copyright (c) 2010 Serge A. Zaitsev
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- * Slightly modified by AK to not assume 0 terminated input.
- */
-
-#include <stdlib.h>
-#include "jsmn.h"
-#define JSMN_STRICT
-
-/*
- * Allocates a fresh unused token from the token pool.
- */
-static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser,
- jsmntok_t *tokens, size_t num_tokens)
-{
- jsmntok_t *tok;
-
- if ((unsigned)parser->toknext >= num_tokens)
- return NULL;
- tok = &tokens[parser->toknext++];
- tok->start = tok->end = -1;
- tok->size = 0;
- return tok;
-}
-
-/*
- * Fills token type and boundaries.
- */
-static void jsmn_fill_token(jsmntok_t *token, jsmntype_t type,
- int start, int end)
-{
- token->type = type;
- token->start = start;
- token->end = end;
- token->size = 0;
-}
-
-/*
- * Fills next available token with JSON primitive.
- */
-static jsmnerr_t jsmn_parse_primitive(jsmn_parser *parser, const char *js,
- size_t len,
- jsmntok_t *tokens, size_t num_tokens)
-{
- jsmntok_t *token;
- int start;
-
- start = parser->pos;
-
- for (; parser->pos < len; parser->pos++) {
- switch (js[parser->pos]) {
-#ifndef JSMN_STRICT
- /*
- * In strict mode primitive must be followed by ","
- * or "}" or "]"
- */
- case ':':
-#endif
- case '\t':
- case '\r':
- case '\n':
- case ' ':
- case ',':
- case ']':
- case '}':
- goto found;
- default:
- break;
- }
- if (js[parser->pos] < 32 || js[parser->pos] >= 127) {
- parser->pos = start;
- return JSMN_ERROR_INVAL;
- }
- }
-#ifdef JSMN_STRICT
- /*
- * In strict mode primitive must be followed by a
- * comma/object/array.
- */
- parser->pos = start;
- return JSMN_ERROR_PART;
-#endif
-
-found:
- token = jsmn_alloc_token(parser, tokens, num_tokens);
- if (token == NULL) {
- parser->pos = start;
- return JSMN_ERROR_NOMEM;
- }
- jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos);
- parser->pos--; /* parent sees closing brackets */
- return JSMN_SUCCESS;
-}
-
-/*
- * Fills next token with JSON string.
- */
-static jsmnerr_t jsmn_parse_string(jsmn_parser *parser, const char *js,
- size_t len,
- jsmntok_t *tokens, size_t num_tokens)
-{
- jsmntok_t *token;
- int start = parser->pos;
-
- /* Skip starting quote */
- parser->pos++;
-
- for (; parser->pos < len; parser->pos++) {
- char c = js[parser->pos];
-
- /* Quote: end of string */
- if (c == '\"') {
- token = jsmn_alloc_token(parser, tokens, num_tokens);
- if (token == NULL) {
- parser->pos = start;
- return JSMN_ERROR_NOMEM;
- }
- jsmn_fill_token(token, JSMN_STRING, start+1,
- parser->pos);
- return JSMN_SUCCESS;
- }
-
- /* Backslash: Quoted symbol expected */
- if (c == '\\') {
- parser->pos++;
- switch (js[parser->pos]) {
- /* Allowed escaped symbols */
- case '\"':
- case '/':
- case '\\':
- case 'b':
- case 'f':
- case 'r':
- case 'n':
- case 't':
- break;
- /* Allows escaped symbol \uXXXX */
- case 'u':
- /* TODO */
- break;
- /* Unexpected symbol */
- default:
- parser->pos = start;
- return JSMN_ERROR_INVAL;
- }
- }
- }
- parser->pos = start;
- return JSMN_ERROR_PART;
-}
-
-/*
- * Parse JSON string and fill tokens.
- */
-jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js, size_t len,
- jsmntok_t *tokens, unsigned int num_tokens)
-{
- jsmnerr_t r;
- int i;
- jsmntok_t *token;
-#ifdef JSMN_STRICT
- /*
- * Keeps track of whether a new object/list/primitive is expected. New items are only
- * allowed after an opening brace, comma or colon. A closing brace after a comma is not
- * valid JSON.
- */
- int expecting_item = 1;
-#endif
-
- for (; parser->pos < len; parser->pos++) {
- char c;
- jsmntype_t type;
-
- c = js[parser->pos];
- switch (c) {
- case '{':
- case '[':
-#ifdef JSMN_STRICT
- if (!expecting_item)
- return JSMN_ERROR_INVAL;
-#endif
- token = jsmn_alloc_token(parser, tokens, num_tokens);
- if (token == NULL)
- return JSMN_ERROR_NOMEM;
- if (parser->toksuper != -1)
- tokens[parser->toksuper].size++;
- token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY);
- token->start = parser->pos;
- parser->toksuper = parser->toknext - 1;
- break;
- case '}':
- case ']':
-#ifdef JSMN_STRICT
- if (expecting_item)
- return JSMN_ERROR_INVAL;
-#endif
- type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY);
- for (i = parser->toknext - 1; i >= 0; i--) {
- token = &tokens[i];
- if (token->start != -1 && token->end == -1) {
- if (token->type != type)
- return JSMN_ERROR_INVAL;
- parser->toksuper = -1;
- token->end = parser->pos + 1;
- break;
- }
- }
- /* Error if unmatched closing bracket */
- if (i == -1)
- return JSMN_ERROR_INVAL;
- for (; i >= 0; i--) {
- token = &tokens[i];
- if (token->start != -1 && token->end == -1) {
- parser->toksuper = i;
- break;
- }
- }
- break;
- case '\"':
-#ifdef JSMN_STRICT
- if (!expecting_item)
- return JSMN_ERROR_INVAL;
- expecting_item = 0;
-#endif
- r = jsmn_parse_string(parser, js, len, tokens,
- num_tokens);
- if (r < 0)
- return r;
- if (parser->toksuper != -1)
- tokens[parser->toksuper].size++;
- break;
- case '\t':
- case '\r':
- case '\n':
- case ' ':
- break;
-#ifdef JSMN_STRICT
- case ':':
- case ',':
- if (expecting_item)
- return JSMN_ERROR_INVAL;
- expecting_item = 1;
- break;
- /*
- * In strict mode primitives are:
- * numbers and booleans.
- */
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- case 't':
- case 'f':
- case 'n':
-#else
- case ':':
- case ',':
- break;
- /*
- * In non-strict mode every unquoted value
- * is a primitive.
- */
- /*FALL THROUGH */
- default:
-#endif
-
-#ifdef JSMN_STRICT
- if (!expecting_item)
- return JSMN_ERROR_INVAL;
- expecting_item = 0;
-#endif
- r = jsmn_parse_primitive(parser, js, len, tokens,
- num_tokens);
- if (r < 0)
- return r;
- if (parser->toksuper != -1)
- tokens[parser->toksuper].size++;
- break;
-
-#ifdef JSMN_STRICT
- /* Unexpected char in strict mode */
- default:
- return JSMN_ERROR_INVAL;
-#endif
- }
- }
-
- for (i = parser->toknext - 1; i >= 0; i--) {
- /* Unmatched opened object or array */
- if (tokens[i].start != -1 && tokens[i].end == -1)
- return JSMN_ERROR_PART;
- }
-
-#ifdef JSMN_STRICT
- return expecting_item ? JSMN_ERROR_INVAL : JSMN_SUCCESS;
-#else
- return JSMN_SUCCESS;
-#endif
-}
-
-/*
- * Creates a new parser based over a given buffer with an array of tokens
- * available.
- */
-void jsmn_init(jsmn_parser *parser)
-{
- parser->pos = 0;
- parser->toknext = 0;
- parser->toksuper = -1;
-}
-
-const char *jsmn_strerror(jsmnerr_t err)
-{
- switch (err) {
- case JSMN_ERROR_NOMEM:
- return "No enough tokens";
- case JSMN_ERROR_INVAL:
- return "Invalid character inside JSON string";
- case JSMN_ERROR_PART:
- return "The string is not a full JSON packet, more bytes expected";
- case JSMN_SUCCESS:
- return "Success";
- default:
- return "Unknown json error";
- }
-}
diff --git a/tools/perf/pmu-events/jsmn.h b/tools/perf/pmu-events/jsmn.h
deleted file mode 100644
index 1bdfd55fff30..000000000000
--- a/tools/perf/pmu-events/jsmn.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __JSMN_H_
-#define __JSMN_H_
-
-/*
- * JSON type identifier. Basic types are:
- * o Object
- * o Array
- * o String
- * o Other primitive: number, boolean (true/false) or null
- */
-typedef enum {
- JSMN_PRIMITIVE = 0,
- JSMN_OBJECT = 1,
- JSMN_ARRAY = 2,
- JSMN_STRING = 3
-} jsmntype_t;
-
-typedef enum {
- /* Not enough tokens were provided */
- JSMN_ERROR_NOMEM = -1,
- /* Invalid character inside JSON string */
- JSMN_ERROR_INVAL = -2,
- /* The string is not a full JSON packet, more bytes expected */
- JSMN_ERROR_PART = -3,
- /* Everything was fine */
- JSMN_SUCCESS = 0
-} jsmnerr_t;
-
-/*
- * JSON token description.
- * @param type type (object, array, string etc.)
- * @param start start position in JSON data string
- * @param end end position in JSON data string
- */
-typedef struct {
- jsmntype_t type;
- int start;
- int end;
- int size;
-} jsmntok_t;
-
-/*
- * JSON parser. Contains an array of token blocks available. Also stores
- * the string being parsed now and current position in that string
- */
-typedef struct {
- unsigned int pos; /* offset in the JSON string */
- int toknext; /* next token to allocate */
- int toksuper; /* superior token node, e.g parent object or array */
-} jsmn_parser;
-
-/*
- * Create JSON parser over an array of tokens
- */
-void jsmn_init(jsmn_parser *parser);
-
-/*
- * Run JSON parser. It parses a JSON data string into and array of tokens,
- * each describing a single JSON object.
- */
-jsmnerr_t jsmn_parse(jsmn_parser *parser, const char *js,
- size_t len,
- jsmntok_t *tokens, unsigned int num_tokens);
-
-const char *jsmn_strerror(jsmnerr_t err);
-
-#endif /* __JSMN_H_ */
diff --git a/tools/perf/pmu-events/json.c b/tools/perf/pmu-events/json.c
deleted file mode 100644
index 0544398d6e2d..000000000000
--- a/tools/perf/pmu-events/json.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/* Parse JSON files using the JSMN parser. */
-
-/*
- * Copyright (c) 2014, Intel Corporation
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <errno.h>
-#include <unistd.h>
-#include "jsmn.h"
-#include "json.h"
-#include <linux/kernel.h>
-
-
-static char *mapfile(const char *fn, size_t *size)
-{
- unsigned ps = sysconf(_SC_PAGESIZE);
- struct stat st;
- char *map = NULL;
- int err;
- int fd = open(fn, O_RDONLY);
-
- if (fd < 0 && verbose > 0 && fn) {
- pr_err("Error opening events file '%s': %s\n", fn,
- strerror(errno));
- }
-
- if (fd < 0)
- return NULL;
- err = fstat(fd, &st);
- if (err < 0)
- goto out;
- *size = st.st_size;
- map = mmap(NULL,
- (st.st_size + ps - 1) & ~(ps - 1),
- PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
- if (map == MAP_FAILED)
- map = NULL;
-out:
- close(fd);
- return map;
-}
-
-static void unmapfile(char *map, size_t size)
-{
- unsigned ps = sysconf(_SC_PAGESIZE);
- munmap(map, roundup(size, ps));
-}
-
-/*
- * Parse json file using jsmn. Return array of tokens,
- * and mapped file. Caller needs to free array.
- */
-jsmntok_t *parse_json(const char *fn, char **map, size_t *size, int *len)
-{
- jsmn_parser parser;
- jsmntok_t *tokens;
- jsmnerr_t res;
- unsigned sz;
-
- *map = mapfile(fn, size);
- if (!*map)
- return NULL;
- /* Heuristic */
- sz = *size * 16;
- tokens = malloc(sz);
- if (!tokens)
- goto error;
- jsmn_init(&parser);
- res = jsmn_parse(&parser, *map, *size, tokens,
- sz / sizeof(jsmntok_t));
- if (res != JSMN_SUCCESS) {
- pr_err("%s: json error %s\n", fn, jsmn_strerror(res));
- goto error_free;
- }
- if (len)
- *len = parser.toknext;
- return tokens;
-error_free:
- free(tokens);
-error:
- unmapfile(*map, *size);
- return NULL;
-}
-
-void free_json(char *map, size_t size, jsmntok_t *tokens)
-{
- free(tokens);
- unmapfile(map, size);
-}
-
-static int countchar(char *map, char c, int end)
-{
- int i;
- int count = 0;
- for (i = 0; i < end; i++)
- if (map[i] == c)
- count++;
- return count;
-}
-
-/* Return line number of a jsmn token */
-int json_line(char *map, jsmntok_t *t)
-{
- return countchar(map, '\n', t->start) + 1;
-}
-
-static const char * const jsmn_types[] = {
- [JSMN_PRIMITIVE] = "primitive",
- [JSMN_ARRAY] = "array",
- [JSMN_OBJECT] = "object",
- [JSMN_STRING] = "string"
-};
-
-#define LOOKUP(a, i) ((i) < (sizeof(a)/sizeof(*(a))) ? ((a)[i]) : "?")
-
-/* Return type name of a jsmn token */
-const char *json_name(jsmntok_t *t)
-{
- return LOOKUP(jsmn_types, t->type);
-}
-
-int json_len(jsmntok_t *t)
-{
- return t->end - t->start;
-}
-
-/* Is string t equal to s? */
-int json_streq(char *map, jsmntok_t *t, const char *s)
-{
- unsigned len = json_len(t);
- return len == strlen(s) && !strncasecmp(map + t->start, s, len);
-}
diff --git a/tools/perf/pmu-events/json.h b/tools/perf/pmu-events/json.h
deleted file mode 100644
index fbcd5a0590ad..000000000000
--- a/tools/perf/pmu-events/json.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef JSON_H
-#define JSON_H 1
-
-#include "jsmn.h"
-
-jsmntok_t *parse_json(const char *fn, char **map, size_t *size, int *len);
-void free_json(char *map, size_t size, jsmntok_t *tokens);
-int json_line(char *map, jsmntok_t *t);
-const char *json_name(jsmntok_t *t);
-int json_streq(char *map, jsmntok_t *t, const char *s);
-int json_len(jsmntok_t *t);
-
-extern int verbose;
-
-#include <stdbool.h>
-
-extern int eprintf(int level, int var, const char *fmt, ...);
-#define pr_fmt(fmt) fmt
-
-#define pr_err(fmt, ...) \
- eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
-
-#define pr_info(fmt, ...) \
- eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__)
-
-#define pr_debug(fmt, ...) \
- eprintf(2, verbose, pr_fmt(fmt), ##__VA_ARGS__)
-
-#ifndef roundup
-#define roundup(x, y) ( \
-{ \
- const typeof(y) __y = y; \
- (((x) + (__y - 1)) / __y) * __y; \
-} \
-)
-#endif
-
-#endif
diff --git a/tools/perf/pmu-events/pmu-events.h b/tools/perf/pmu-events/pmu-events.h
index 6efe73976440..fe343c4d8016 100644
--- a/tools/perf/pmu-events/pmu-events.h
+++ b/tools/perf/pmu-events/pmu-events.h
@@ -2,6 +2,8 @@
#ifndef PMU_EVENTS_H
#define PMU_EVENTS_H
+struct perf_pmu;
+
enum aggr_mode_class {
PerChip = 1,
PerCore
@@ -28,32 +30,20 @@ struct pmu_event {
const char *metric_constraint;
};
-/*
- *
- * Map a CPU to its table of PMU events. The CPU is identified by the
- * cpuid field, which is an arch-specific identifier for the CPU.
- * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
- * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
- *
- * The cpuid can contain any character other than the comma.
- */
-struct pmu_events_map {
- const char *cpuid;
- const char *version;
- const char *type; /* core, uncore etc */
- const struct pmu_event *table;
-};
+struct pmu_events_table;
-struct pmu_sys_events {
- const char *name;
- const struct pmu_event *table;
-};
+typedef int (*pmu_event_iter_fn)(const struct pmu_event *pe,
+ const struct pmu_events_table *table,
+ void *data);
-/*
- * Global table mapping each known CPU for the architecture to its
- * table of PMU events.
- */
-extern const struct pmu_events_map pmu_events_map[];
-extern const struct pmu_sys_events pmu_sys_event_tables[];
+int pmu_events_table_for_each_event(const struct pmu_events_table *table, pmu_event_iter_fn fn,
+ void *data);
+
+const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu);
+const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid);
+int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data);
+
+const struct pmu_events_table *find_sys_events_table(const char *name);
+int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data);
#endif
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
index db0036129307..cc76be005d5e 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -1,5 +1,9 @@
perf-y += Context.o
-CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes
+CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
CFLAGS_Context.o += -Wno-switch-default -Wno-shadow
+
+ifeq ($(CC_NO_CLANG), 1)
+ CFLAGS_Context.o += -Wno-unused-command-line-argument
+endif
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index 9b7746b89381..6be7fd8fd615 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -197,7 +197,12 @@ def common_start_str(comm, sample):
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- return "%16s %5u/%-5u [%03u] %9u.%09u " % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000)
+ if "machine_pid" in sample:
+ machine_pid = sample["machine_pid"]
+ vcpu = sample["vcpu"]
+ return "VM:%5d VCPU:%03d %16s %5u/%-5u [%03u] %9u.%09u " % (machine_pid, vcpu, comm, pid, tid, cpu, ts / 1000000000, ts %1000000000)
+ else:
+ return "%16s %5u/%-5u [%03u] %9u.%09u " % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000)
def print_common_start(comm, sample, name):
flags_disp = get_optional_null(sample, "flags_disp")
@@ -379,9 +384,19 @@ def process_event(param_dict):
sys.exit(1)
def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
+ if len(x) >= 2 and x[0]:
+ machine_pid = x[0]
+ vcpu = x[1]
+ else:
+ machine_pid = 0
+ vcpu = -1
try:
- print("%16s %5u/%-5u [%03u] %9u.%09u error type %u code %u: %s ip 0x%16x" %
- ("Trace error", pid, tid, cpu, ts / 1000000000, ts %1000000000, typ, code, msg, ip))
+ if machine_pid:
+ print("VM:%5d VCPU:%03d %16s %5u/%-5u [%03u] %9u.%09u error type %u code %u: %s ip 0x%16x" %
+ (machine_pid, vcpu, "Trace error", pid, tid, cpu, ts / 1000000000, ts %1000000000, typ, code, msg, ip))
+ else:
+ print("%16s %5u/%-5u [%03u] %9u.%09u error type %u code %u: %s ip 0x%16x" %
+ ("Trace error", pid, tid, cpu, ts / 1000000000, ts %1000000000, typ, code, msg, ip))
except broken_pipe_exception:
# Stop python printing broken pipe errors and traceback
sys.stdout = open(os.devnull, 'w')
@@ -396,14 +411,21 @@ def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_pree
preempt_str = "preempt"
else:
preempt_str = ""
+ if len(x) >= 2 and x[0]:
+ machine_pid = x[0]
+ vcpu = x[1]
+ else:
+ vcpu = None;
if machine_pid == -1:
machine_str = ""
- else:
+ elif vcpu is None:
machine_str = "machine PID %d" % machine_pid
+ else:
+ machine_str = "machine PID %d VCPU %d" % (machine_pid, vcpu)
switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
(out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str)
if glb_args.all_switch_events:
- print(switch_str);
+ print(switch_str)
else:
global glb_switch_str
glb_switch_str[cpu] = switch_str
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index af2b37ef7c70..2064a640facb 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
perf-y += builtin-test.o
+perf-y += builtin-test-list.o
perf-y += parse-events.o
perf-y += dso-data.o
perf-y += attr.o
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c
index ab4b98b3165d..7981c69ed1b4 100644
--- a/tools/perf/tests/bpf-script-example.c
+++ b/tools/perf/tests/bpf-script-example.c
@@ -17,20 +17,31 @@ static void *(*bpf_map_lookup_elem)(void *map, void *key) =
static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) =
(void *) BPF_FUNC_map_update_elem;
-struct bpf_map_def {
- unsigned int type;
- unsigned int key_size;
- unsigned int value_size;
- unsigned int max_entries;
-};
+/*
+ * Following macros are taken from tools/lib/bpf/bpf_helpers.h,
+ * and are used to create BTF defined maps. It is easier to take
+ * 2 simple macros, than being able to include above header in
+ * runtime.
+ *
+ * __uint - defines integer attribute of BTF map definition,
+ * Such attributes are represented using a pointer to an array,
+ * in which dimensionality of array encodes specified integer
+ * value.
+ *
+ * __type - defines pointer variable with typeof(val) type for
+ * attributes like key or value, which will be defined by the
+ * size of the type.
+ */
+#define __uint(name, val) int (*name)[val]
+#define __type(name, val) typeof(val) *name
#define SEC(NAME) __attribute__((section(NAME), used))
-struct bpf_map_def SEC("maps") flip_table = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} flip_table SEC(".maps");
SEC("func=do_epoll_wait")
int bpf_func__SyS_epoll_pwait(void *ctx)
diff --git a/tools/perf/tests/builtin-test-list.c b/tools/perf/tests/builtin-test-list.c
new file mode 100644
index 000000000000..a65b9e547d82
--- /dev/null
+++ b/tools/perf/tests/builtin-test-list.c
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/zalloc.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <subcmd/exec-cmd.h>
+#include <subcmd/parse-options.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include "builtin.h"
+#include "builtin-test-list.h"
+#include "color.h"
+#include "debug.h"
+#include "hist.h"
+#include "intlist.h"
+#include "string2.h"
+#include "symbol.h"
+#include "tests.h"
+#include "util/rlimit.h"
+
+
+/*
+ * As this is a singleton built once for the run of the process, there is
+ * no value in trying to free it and just let it stay around until process
+ * exits when it's cleaned up.
+ */
+static size_t files_num = 0;
+static struct script_file *files = NULL;
+static int files_max_width = 0;
+
+static const char *shell_tests__dir(char *path, size_t size)
+{
+ const char *devel_dirs[] = { "./tools/perf/tests", "./tests", };
+ char *exec_path;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(devel_dirs); ++i) {
+ struct stat st;
+
+ if (!lstat(devel_dirs[i], &st)) {
+ scnprintf(path, size, "%s/shell", devel_dirs[i]);
+ if (!lstat(devel_dirs[i], &st))
+ return path;
+ }
+ }
+
+ /* Then installed path. */
+ exec_path = get_argv_exec_path();
+ scnprintf(path, size, "%s/tests/shell", exec_path);
+ free(exec_path);
+ return path;
+}
+
+static const char *shell_test__description(char *description, size_t size,
+ const char *path, const char *name)
+{
+ FILE *fp;
+ char filename[PATH_MAX];
+ int ch;
+
+ path__join(filename, sizeof(filename), path, name);
+ fp = fopen(filename, "r");
+ if (!fp)
+ return NULL;
+
+ /* Skip first line - should be #!/bin/sh Shebang */
+ do {
+ ch = fgetc(fp);
+ } while (ch != EOF && ch != '\n');
+
+ description = fgets(description, size, fp);
+ fclose(fp);
+
+ /* Assume first char on line is omment everything after that desc */
+ return description ? strim(description + 1) : NULL;
+}
+
+/* Is this full file path a shell script */
+static bool is_shell_script(const char *path)
+{
+ const char *ext;
+
+ ext = strrchr(path, '.');
+ if (!ext)
+ return false;
+ if (!strcmp(ext, ".sh")) { /* Has .sh extension */
+ if (access(path, R_OK | X_OK) == 0) /* Is executable */
+ return true;
+ }
+ return false;
+}
+
+/* Is this file in this dir a shell script (for test purposes) */
+static bool is_test_script(const char *path, const char *name)
+{
+ char filename[PATH_MAX];
+
+ path__join(filename, sizeof(filename), path, name);
+ if (!is_shell_script(filename)) return false;
+ return true;
+}
+
+/* Duplicate a string and fall over and die if we run out of memory */
+static char *strdup_check(const char *str)
+{
+ char *newstr;
+
+ newstr = strdup(str);
+ if (!newstr) {
+ pr_err("Out of memory while duplicating test script string\n");
+ abort();
+ }
+ return newstr;
+}
+
+static void append_script(const char *dir, const char *file, const char *desc)
+{
+ struct script_file *files_tmp;
+ size_t files_num_tmp;
+ int width;
+
+ files_num_tmp = files_num + 1;
+ if (files_num_tmp >= SIZE_MAX) {
+ pr_err("Too many script files\n");
+ abort();
+ }
+ /* Realloc is good enough, though we could realloc by chunks, not that
+ * anyone will ever measure performance here */
+ files_tmp = realloc(files,
+ (files_num_tmp + 1) * sizeof(struct script_file));
+ if (files_tmp == NULL) {
+ pr_err("Out of memory while building test list\n");
+ abort();
+ }
+ /* Add file to end and NULL terminate the struct array */
+ files = files_tmp;
+ files_num = files_num_tmp;
+ files[files_num - 1].dir = strdup_check(dir);
+ files[files_num - 1].file = strdup_check(file);
+ files[files_num - 1].desc = strdup_check(desc);
+ files[files_num].dir = NULL;
+ files[files_num].file = NULL;
+ files[files_num].desc = NULL;
+
+ width = strlen(desc); /* Track max width of desc */
+ if (width > files_max_width)
+ files_max_width = width;
+}
+
+static void append_scripts_in_dir(const char *path)
+{
+ struct dirent **entlist;
+ struct dirent *ent;
+ int n_dirs, i;
+ char filename[PATH_MAX];
+
+ /* List files, sorted by alpha */
+ n_dirs = scandir(path, &entlist, NULL, alphasort);
+ if (n_dirs == -1)
+ return;
+ for (i = 0; i < n_dirs && (ent = entlist[i]); i++) {
+ if (ent->d_name[0] == '.')
+ continue; /* Skip hidden files */
+ if (is_test_script(path, ent->d_name)) { /* It's a test */
+ char bf[256];
+ const char *desc = shell_test__description
+ (bf, sizeof(bf), path, ent->d_name);
+
+ if (desc) /* It has a desc line - valid script */
+ append_script(path, ent->d_name, desc);
+ } else if (is_directory(path, ent)) { /* Scan the subdir */
+ path__join(filename, sizeof(filename),
+ path, ent->d_name);
+ append_scripts_in_dir(filename);
+ }
+ }
+ for (i = 0; i < n_dirs; i++) /* Clean up */
+ zfree(&entlist[i]);
+ free(entlist);
+}
+
+const struct script_file *list_script_files(void)
+{
+ char path_dir[PATH_MAX];
+ const char *path;
+
+ if (files)
+ return files; /* Singleton - we already know our list */
+
+ path = shell_tests__dir(path_dir, sizeof(path_dir)); /* Walk dir */
+ append_scripts_in_dir(path);
+
+ return files;
+}
+
+int list_script_max_width(void)
+{
+ list_script_files(); /* Ensure we have scanned all scripts */
+ return files_max_width;
+}
diff --git a/tools/perf/tests/builtin-test-list.h b/tools/perf/tests/builtin-test-list.h
new file mode 100644
index 000000000000..eb81f3aa6683
--- /dev/null
+++ b/tools/perf/tests/builtin-test-list.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+struct script_file {
+ char *dir;
+ char *file;
+ char *desc;
+};
+
+/* List available script tests to run - singleton - never freed */
+const struct script_file *list_script_files(void);
+/* Get maximum width of description string */
+int list_script_max_width(void);
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 81cf241cd109..7122eae1d98d 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -28,6 +28,8 @@
#include <subcmd/exec-cmd.h>
#include <linux/zalloc.h>
+#include "builtin-test-list.h"
+
static bool dont_fork;
struct test_suite *__weak arch_tests[] = {
@@ -274,91 +276,6 @@ static int test_and_print(struct test_suite *t, int subtest)
return err;
}
-static const char *shell_test__description(char *description, size_t size,
- const char *path, const char *name)
-{
- FILE *fp;
- char filename[PATH_MAX];
- int ch;
-
- path__join(filename, sizeof(filename), path, name);
- fp = fopen(filename, "r");
- if (!fp)
- return NULL;
-
- /* Skip shebang */
- do {
- ch = fgetc(fp);
- } while (ch != EOF && ch != '\n');
-
- description = fgets(description, size, fp);
- fclose(fp);
-
- return description ? strim(description + 1) : NULL;
-}
-
-#define for_each_shell_test(entlist, nr, base, ent) \
- for (int __i = 0; __i < nr && (ent = entlist[__i]); __i++) \
- if (!is_directory(base, ent) && \
- is_executable_file(base, ent) && \
- ent->d_name[0] != '.')
-
-static const char *shell_tests__dir(char *path, size_t size)
-{
- const char *devel_dirs[] = { "./tools/perf/tests", "./tests", };
- char *exec_path;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(devel_dirs); ++i) {
- struct stat st;
- if (!lstat(devel_dirs[i], &st)) {
- scnprintf(path, size, "%s/shell", devel_dirs[i]);
- if (!lstat(devel_dirs[i], &st))
- return path;
- }
- }
-
- /* Then installed path. */
- exec_path = get_argv_exec_path();
- scnprintf(path, size, "%s/tests/shell", exec_path);
- free(exec_path);
- return path;
-}
-
-static int shell_tests__max_desc_width(void)
-{
- struct dirent **entlist;
- struct dirent *ent;
- int n_dirs, e;
- char path_dir[PATH_MAX];
- const char *path = shell_tests__dir(path_dir, sizeof(path_dir));
- int width = 0;
-
- if (path == NULL)
- return -1;
-
- n_dirs = scandir(path, &entlist, NULL, alphasort);
- if (n_dirs == -1)
- return -1;
-
- for_each_shell_test(entlist, n_dirs, path, ent) {
- char bf[256];
- const char *desc = shell_test__description(bf, sizeof(bf), path, ent->d_name);
-
- if (desc) {
- int len = strlen(desc);
-
- if (width < len)
- width = len;
- }
- }
-
- for (e = 0; e < n_dirs; e++)
- zfree(&entlist[e]);
- free(entlist);
- return width;
-}
-
struct shell_test {
const char *dir;
const char *file;
@@ -385,33 +302,17 @@ static int shell_test__run(struct test_suite *test, int subdir __maybe_unused)
static int run_shell_tests(int argc, const char *argv[], int i, int width,
struct intlist *skiplist)
{
- struct dirent **entlist;
- struct dirent *ent;
- int n_dirs, e;
- char path_dir[PATH_MAX];
- struct shell_test st = {
- .dir = shell_tests__dir(path_dir, sizeof(path_dir)),
- };
-
- if (st.dir == NULL)
- return -1;
+ struct shell_test st;
+ const struct script_file *files, *file;
- n_dirs = scandir(st.dir, &entlist, NULL, alphasort);
- if (n_dirs == -1) {
- pr_err("failed to open shell test directory: %s\n",
- st.dir);
- return -1;
- }
-
- for_each_shell_test(entlist, n_dirs, st.dir, ent) {
+ files = list_script_files();
+ if (!files)
+ return 0;
+ for (file = files; file->dir; file++) {
int curr = i++;
- char desc[256];
struct test_case test_cases[] = {
{
- .desc = shell_test__description(desc,
- sizeof(desc),
- st.dir,
- ent->d_name),
+ .desc = file->desc,
.run_case = shell_test__run,
},
{ .name = NULL, }
@@ -421,12 +322,13 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width,
.test_cases = test_cases,
.priv = &st,
};
+ st.dir = file->dir;
if (test_suite.desc == NULL ||
!perf_test__matches(test_suite.desc, curr, argc, argv))
continue;
- st.file = ent->d_name;
+ st.file = file->file;
pr_info("%3d: %-*s:", i, width, test_suite.desc);
if (intlist__find(skiplist, i)) {
@@ -436,10 +338,6 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width,
test_and_print(&test_suite, 0);
}
-
- for (e = 0; e < n_dirs; e++)
- zfree(&entlist[e]);
- free(entlist);
return 0;
}
@@ -448,7 +346,7 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
struct test_suite *t;
unsigned int j, k;
int i = 0;
- int width = shell_tests__max_desc_width();
+ int width = list_script_max_width();
for_each_test(j, k, t) {
int len = strlen(test_description(t, -1));
@@ -529,36 +427,22 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
static int perf_test__list_shell(int argc, const char **argv, int i)
{
- struct dirent **entlist;
- struct dirent *ent;
- int n_dirs, e;
- char path_dir[PATH_MAX];
- const char *path = shell_tests__dir(path_dir, sizeof(path_dir));
-
- if (path == NULL)
- return -1;
+ const struct script_file *files, *file;
- n_dirs = scandir(path, &entlist, NULL, alphasort);
- if (n_dirs == -1)
- return -1;
-
- for_each_shell_test(entlist, n_dirs, path, ent) {
+ files = list_script_files();
+ if (!files)
+ return 0;
+ for (file = files; file->dir; file++) {
int curr = i++;
- char bf[256];
struct test_suite t = {
- .desc = shell_test__description(bf, sizeof(bf), path, ent->d_name),
+ .desc = file->desc
};
if (!perf_test__matches(t.desc, curr, argc, argv))
continue;
pr_info("%3d: %s\n", i, t.desc);
-
}
-
- for (e = 0; e < n_dirs; e++)
- zfree(&entlist[e]);
- free(entlist);
return 0;
}
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 5610767b407f..95feb6ef34a0 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -638,7 +638,7 @@ static int do_test_code_reading(bool try_kcore)
str = do_determine_event(excl_kernel);
pr_debug("Parsing event '%s'\n", str);
- ret = parse_events(evlist, str, NULL);
+ ret = parse_event(evlist, str);
if (ret < 0) {
pr_debug("parse_events failed\n");
goto out_put;
diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c
index f94929ebb54b..7ea150cdc137 100644
--- a/tools/perf/tests/cpumap.c
+++ b/tools/perf/tests/cpumap.c
@@ -17,21 +17,23 @@ static int process_event_mask(struct perf_tool *tool __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_record_cpu_map *map_event = &event->cpu_map;
- struct perf_record_record_cpu_map *mask;
struct perf_record_cpu_map_data *data;
struct perf_cpu_map *map;
int i;
+ unsigned int long_size;
data = &map_event->data;
TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__MASK);
- mask = (struct perf_record_record_cpu_map *)data->data;
+ long_size = data->mask32_data.long_size;
- TEST_ASSERT_VAL("wrong nr", mask->nr == 1);
+ TEST_ASSERT_VAL("wrong long_size", long_size == 4 || long_size == 8);
+
+ TEST_ASSERT_VAL("wrong nr", data->mask32_data.nr == 1);
for (i = 0; i < 20; i++) {
- TEST_ASSERT_VAL("wrong cpu", test_bit(i, mask->mask));
+ TEST_ASSERT_VAL("wrong cpu", perf_record_cpu_map_data__test_bit(i, data));
}
map = cpu_map__new_data(data);
@@ -51,7 +53,6 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_record_cpu_map *map_event = &event->cpu_map;
- struct cpu_map_entries *cpus;
struct perf_record_cpu_map_data *data;
struct perf_cpu_map *map;
@@ -59,11 +60,9 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__CPUS);
- cpus = (struct cpu_map_entries *)data->data;
-
- TEST_ASSERT_VAL("wrong nr", cpus->nr == 2);
- TEST_ASSERT_VAL("wrong cpu", cpus->cpu[0] == 1);
- TEST_ASSERT_VAL("wrong cpu", cpus->cpu[1] == 256);
+ TEST_ASSERT_VAL("wrong nr", data->cpus_data.nr == 2);
+ TEST_ASSERT_VAL("wrong cpu", data->cpus_data.cpu[0] == 1);
+ TEST_ASSERT_VAL("wrong cpu", data->cpus_data.cpu[1] == 256);
map = cpu_map__new_data(data);
TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 2);
diff --git a/tools/perf/tests/event-times.c b/tools/perf/tests/event-times.c
index 7606eb3df92f..e155f0e0e04d 100644
--- a/tools/perf/tests/event-times.c
+++ b/tools/perf/tests/event-times.c
@@ -174,7 +174,7 @@ static int test_times(int (attach)(struct evlist *),
goto out_err;
}
- err = parse_events(evlist, "cpu-clock:u", NULL);
+ err = parse_event(evlist, "cpu-clock:u");
if (err) {
pr_debug("failed to parse event cpu-clock:u\n");
goto out_err;
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index 9d3c64974f77..e94fed901992 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -27,7 +27,7 @@ static int perf_evsel__roundtrip_cache_name_test(void)
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
__evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
- err = parse_events(evlist, name, NULL);
+ err = parse_event(evlist, name);
if (err)
ret = err;
}
@@ -75,7 +75,7 @@ static int __perf_evsel__name_array_test(const char *const names[], int nr_names
return -ENOMEM;
for (i = 0; i < nr_names; ++i) {
- err = parse_events(evlist, names[i], NULL);
+ err = parse_event(evlist, names[i]);
if (err) {
pr_debug("failed to parse event '%s', err %d\n",
names[i], err);
diff --git a/tools/perf/tests/expand-cgroup.c b/tools/perf/tests/expand-cgroup.c
index dfefe5b60eb2..51fb5f34c1dd 100644
--- a/tools/perf/tests/expand-cgroup.c
+++ b/tools/perf/tests/expand-cgroup.c
@@ -180,33 +180,14 @@ static int expand_metric_events(void)
struct evlist *evlist;
struct rblist metric_events;
const char metric_str[] = "CPI";
-
- struct pmu_event pme_test[] = {
- {
- .metric_expr = "instructions / cycles",
- .metric_name = "IPC",
- },
- {
- .metric_expr = "1 / IPC",
- .metric_name = "CPI",
- },
- {
- .metric_expr = NULL,
- .metric_name = NULL,
- },
- };
- const struct pmu_events_map ev_map = {
- .cpuid = "test",
- .version = "1",
- .type = "core",
- .table = pme_test,
- };
+ const struct pmu_events_table *pme_test;
evlist = evlist__new();
TEST_ASSERT_VAL("failed to get evlist", evlist);
rblist__init(&metric_events);
- ret = metricgroup__parse_groups_test(evlist, &ev_map, metric_str,
+ pme_test = find_core_events_table("testarch", "testcpu");
+ ret = metricgroup__parse_groups_test(evlist, pme_test, metric_str,
false, false, &metric_events);
if (ret < 0) {
pr_debug("failed to parse '%s' metric\n", metric_str);
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index 5c0032fe93ae..2efe9e3a63b8 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include "util/debug.h"
#include "util/expr.h"
+#include "util/header.h"
#include "util/smt.h"
#include "tests.h"
+#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <linux/zalloc.h>
@@ -69,6 +71,11 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
double val, num_cpus, num_cores, num_dies, num_packages;
int ret;
struct expr_parse_ctx *ctx;
+ bool is_intel = false;
+ char buf[128];
+
+ if (!get_cpuid(buf, sizeof(buf)))
+ is_intel = strstr(buf, "Intel") != NULL;
TEST_ASSERT_EQUAL("ids_union", test_ids_union(), 0);
@@ -175,6 +182,12 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
if (num_dies) // Some platforms do not have CPU die support, for example s390
TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
+ TEST_ASSERT_VAL("#system_tsc_freq", expr__parse(&val, ctx, "#system_tsc_freq") == 0);
+ if (is_intel)
+ TEST_ASSERT_VAL("#system_tsc_freq > 0", val > 0);
+ else
+ TEST_ASSERT_VAL("#system_tsc_freq == 0", fpclassify(val) == FP_ZERO);
+
/*
* Source count returns the number of events aggregating in a leader
* event including the leader. Check parsing yields an id.
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 17f4fcd6bdce..b42d37ff2399 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -706,7 +706,7 @@ static int test__hists_cumulate(struct test_suite *test __maybe_unused, int subt
TEST_ASSERT_VAL("No memory", evlist);
- err = parse_events(evlist, "cpu-clock", NULL);
+ err = parse_event(evlist, "cpu-clock");
if (err)
goto out;
err = TEST_FAIL;
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 08cbeb9e39ae..8e1ceeb9b7b6 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -111,10 +111,10 @@ static int test__hists_filter(struct test_suite *test __maybe_unused, int subtes
TEST_ASSERT_VAL("No memory", evlist);
- err = parse_events(evlist, "cpu-clock", NULL);
+ err = parse_event(evlist, "cpu-clock");
if (err)
goto out;
- err = parse_events(evlist, "task-clock", NULL);
+ err = parse_event(evlist, "task-clock");
if (err)
goto out;
err = TEST_FAIL;
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index c575e13a850d..14b2ff808b5e 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -276,10 +276,10 @@ static int test__hists_link(struct test_suite *test __maybe_unused, int subtest
if (evlist == NULL)
return -ENOMEM;
- err = parse_events(evlist, "cpu-clock", NULL);
+ err = parse_event(evlist, "cpu-clock");
if (err)
goto out;
- err = parse_events(evlist, "task-clock", NULL);
+ err = parse_event(evlist, "task-clock");
if (err)
goto out;
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index 0bde4a768c15..62b0093253e3 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -593,7 +593,7 @@ static int test__hists_output(struct test_suite *test __maybe_unused, int subtes
TEST_ASSERT_VAL("No memory", evlist);
- err = parse_events(evlist, "cpu-clock", NULL);
+ err = parse_event(evlist, "cpu-clock");
if (err)
goto out;
err = TEST_FAIL;
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index dd2067312452..8f4f9b632e1e 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -89,8 +89,8 @@ static int test__keep_tracking(struct test_suite *test __maybe_unused, int subte
perf_evlist__set_maps(&evlist->core, cpus, threads);
- CHECK__(parse_events(evlist, "dummy:u", NULL));
- CHECK__(parse_events(evlist, "cycles:u", NULL));
+ CHECK__(parse_event(evlist, "dummy:u"));
+ CHECK__(parse_event(evlist, "cycles:u"));
evlist__config(evlist, &opts, NULL);
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 30bbe144648a..dfb6173b2a82 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -170,14 +170,139 @@ out_free_threads:
return err;
}
+static int test_stat_user_read(int event)
+{
+ struct perf_counts_values counts = { .val = 0 };
+ struct perf_thread_map *threads;
+ struct perf_evsel *evsel;
+ struct perf_event_mmap_page *pc;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = event,
+#ifdef __aarch64__
+ .config1 = 0x2, /* Request user access */
+#endif
+ };
+ int err, i, ret = TEST_FAIL;
+ bool opened = false, mapped = false;
+
+ threads = perf_thread_map__new_dummy();
+ TEST_ASSERT_VAL("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ TEST_ASSERT_VAL("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ if (err) {
+ pr_err("failed to open evsel: %s\n", strerror(-err));
+ ret = TEST_SKIP;
+ goto out;
+ }
+ opened = true;
+
+ err = perf_evsel__mmap(evsel, 0);
+ if (err) {
+ pr_err("failed to mmap evsel: %s\n", strerror(-err));
+ goto out;
+ }
+ mapped = true;
+
+ pc = perf_evsel__mmap_base(evsel, 0, 0);
+ if (!pc) {
+ pr_err("failed to get mmapped address\n");
+ goto out;
+ }
+
+ if (!pc->cap_user_rdpmc || !pc->index) {
+ pr_err("userspace counter access not %s\n",
+ !pc->cap_user_rdpmc ? "supported" : "enabled");
+ ret = TEST_SKIP;
+ goto out;
+ }
+ if (pc->pmc_width < 32) {
+ pr_err("userspace counter width not set (%d)\n", pc->pmc_width);
+ goto out;
+ }
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ if (counts.val == 0) {
+ pr_err("failed to read value for evsel\n");
+ goto out;
+ }
+
+ for (i = 0; i < 5; i++) {
+ volatile int count = 0x10000 << i;
+ __u64 start, end, last = 0;
+
+ pr_debug("\tloop = %u, ", count);
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ start = counts.val;
+
+ while (count--) ;
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ end = counts.val;
+
+ if ((end - start) < last) {
+ pr_err("invalid counter data: end=%llu start=%llu last= %llu\n",
+ end, start, last);
+ goto out;
+ }
+ last = end - start;
+ pr_debug("count = %llu\n", end - start);
+ }
+ ret = TEST_OK;
+
+out:
+ if (mapped)
+ perf_evsel__munmap(evsel);
+ if (opened)
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+ return ret;
+}
+
+static int test__mmap_user_read_instr(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
+}
+
+static int test__mmap_user_read_cycles(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
+}
+
static struct test_case tests__basic_mmap[] = {
TEST_CASE_REASON("Read samples using the mmap interface",
basic_mmap,
"permissions"),
+ TEST_CASE_REASON("User space counter reading of instructions",
+ mmap_user_read_instr,
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+ "permissions"
+#else
+ "unsupported"
+#endif
+ ),
+ TEST_CASE_REASON("User space counter reading of cycles",
+ mmap_user_read_cycles,
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+ "permissions"
+#else
+ "unsupported"
+#endif
+ ),
{ .name = NULL, }
};
struct test_suite suite__basic_mmap = {
- .desc = "Read samples using the mmap interface",
+ .desc = "mmap interface tests",
.test_cases = tests__basic_mmap,
};
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
index 07b6f4ec024f..68f5a2a03242 100644
--- a/tools/perf/tests/parse-metric.c
+++ b/tools/perf/tests/parse-metric.c
@@ -13,79 +13,6 @@
#include "stat.h"
#include "pmu.h"
-static struct pmu_event pme_test[] = {
-{
- .metric_expr = "inst_retired.any / cpu_clk_unhalted.thread",
- .metric_name = "IPC",
- .metric_group = "group1",
-},
-{
- .metric_expr = "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * "
- "( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))",
- .metric_name = "Frontend_Bound_SMT",
-},
-{
- .metric_expr = "l1d\\-loads\\-misses / inst_retired.any",
- .metric_name = "dcache_miss_cpi",
-},
-{
- .metric_expr = "l1i\\-loads\\-misses / inst_retired.any",
- .metric_name = "icache_miss_cycles",
-},
-{
- .metric_expr = "(dcache_miss_cpi + icache_miss_cycles)",
- .metric_name = "cache_miss_cycles",
- .metric_group = "group1",
-},
-{
- .metric_expr = "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit",
- .metric_name = "DCache_L2_All_Hits",
-},
-{
- .metric_expr = "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + "
- "l2_rqsts.pf_miss + l2_rqsts.rfo_miss",
- .metric_name = "DCache_L2_All_Miss",
-},
-{
- .metric_expr = "dcache_l2_all_hits + dcache_l2_all_miss",
- .metric_name = "DCache_L2_All",
-},
-{
- .metric_expr = "d_ratio(dcache_l2_all_hits, dcache_l2_all)",
- .metric_name = "DCache_L2_Hits",
-},
-{
- .metric_expr = "d_ratio(dcache_l2_all_miss, dcache_l2_all)",
- .metric_name = "DCache_L2_Misses",
-},
-{
- .metric_expr = "ipc + m2",
- .metric_name = "M1",
-},
-{
- .metric_expr = "ipc + m1",
- .metric_name = "M2",
-},
-{
- .metric_expr = "1/m3",
- .metric_name = "M3",
-},
-{
- .metric_expr = "64 * l1d.replacement / 1000000000 / duration_time",
- .metric_name = "L1D_Cache_Fill_BW",
-},
-{
- .name = NULL,
-}
-};
-
-static const struct pmu_events_map map = {
- .cpuid = "test",
- .version = "1",
- .type = "core",
- .table = pme_test,
-};
-
struct value {
const char *event;
u64 val;
@@ -145,6 +72,7 @@ static int __compute_metric(const char *name, struct value *vals,
struct rblist metric_events = {
.nr_entries = 0,
};
+ const struct pmu_events_table *pme_test;
struct perf_cpu_map *cpus;
struct runtime_stat st;
struct evlist *evlist;
@@ -168,7 +96,8 @@ static int __compute_metric(const char *name, struct value *vals,
runtime_stat__init(&st);
/* Parse the metric into metric_events list. */
- err = metricgroup__parse_groups_test(evlist, &map, name,
+ pme_test = find_core_events_table("testarch", "testcpu");
+ err = metricgroup__parse_groups_test(evlist, pme_test, name,
false, false,
&metric_events);
if (err)
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 7c7d20fc503a..c3aaa1ddff29 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -62,7 +62,7 @@ static int test__tsc_is_supported(struct test_suite *test __maybe_unused,
* This function implements a test that checks that the conversion of perf time
* to and from TSC is consistent with the order of events. If the test passes
* %0 is returned, otherwise %-1 is returned. If TSC conversion is not
- * supported then then the test passes but " (not supported)" is printed.
+ * supported then the test passes but " (not supported)" is printed.
*/
static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
@@ -100,7 +100,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
perf_evlist__set_maps(&evlist->core, cpus, threads);
- CHECK__(parse_events(evlist, "cycles:u", NULL));
+ CHECK__(parse_event(evlist, "cycles:u"));
evlist__config(evlist, &opts, NULL);
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index f13368569d8b..097e05c796ab 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -9,10 +9,12 @@
#include <linux/zalloc.h>
#include "debug.h"
#include "../pmu-events/pmu-events.h"
+#include <perf/evlist.h>
#include "util/evlist.h"
#include "util/expr.h"
#include "util/parse-events.h"
#include "metricgroup.h"
+#include "stat.h"
struct perf_pmu_test_event {
/* used for matching against events from generated pmu-events.c */
@@ -272,32 +274,6 @@ static bool is_same(const char *reference, const char *test)
return !strcmp(reference, test);
}
-static const struct pmu_events_map *__test_pmu_get_events_map(void)
-{
- const struct pmu_events_map *map;
-
- for (map = &pmu_events_map[0]; map->cpuid; map++) {
- if (!strcmp(map->cpuid, "testcpu"))
- return map;
- }
-
- pr_err("could not find test events map\n");
-
- return NULL;
-}
-
-static const struct pmu_event *__test_pmu_get_sys_events_table(void)
-{
- const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
-
- for ( ; tables->name; tables++) {
- if (!strcmp("pme_test_soc_sys", tables->name))
- return tables->table;
- }
-
- return NULL;
-}
-
static int compare_pmu_events(const struct pmu_event *e1, const struct pmu_event *e2)
{
if (!is_same(e1->name, e2->name)) {
@@ -447,85 +423,104 @@ static int compare_alias_to_test_event(struct perf_pmu_alias *alias,
return 0;
}
-/* Verify generated events from pmu-events.c are as expected */
-static int test__pmu_event_table(struct test_suite *test __maybe_unused,
- int subtest __maybe_unused)
+static int test__pmu_event_table_core_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *data)
{
- const struct pmu_event *sys_event_tables = __test_pmu_get_sys_events_table();
- const struct pmu_events_map *map = __test_pmu_get_events_map();
- const struct pmu_event *table;
- int map_events = 0, expected_events;
+ int *map_events = data;
+ struct perf_pmu_test_event const **test_event_table;
+ bool found = false;
- /* ignore 3x sentinels */
- expected_events = ARRAY_SIZE(core_events) +
- ARRAY_SIZE(uncore_events) +
- ARRAY_SIZE(sys_events) - 3;
+ if (!pe->name)
+ return 0;
- if (!map || !sys_event_tables)
- return -1;
+ if (pe->pmu)
+ test_event_table = &uncore_events[0];
+ else
+ test_event_table = &core_events[0];
+
+ for (; *test_event_table; test_event_table++) {
+ struct perf_pmu_test_event const *test_event = *test_event_table;
+ struct pmu_event const *event = &test_event->event;
- for (table = map->table; table->name; table++) {
- struct perf_pmu_test_event const **test_event_table;
- bool found = false;
+ if (strcmp(pe->name, event->name))
+ continue;
+ found = true;
+ (*map_events)++;
- if (table->pmu)
- test_event_table = &uncore_events[0];
- else
- test_event_table = &core_events[0];
+ if (compare_pmu_events(pe, event))
+ return -1;
- for (; *test_event_table; test_event_table++) {
- struct perf_pmu_test_event const *test_event = *test_event_table;
- struct pmu_event const *event = &test_event->event;
+ pr_debug("testing event table %s: pass\n", pe->name);
+ }
+ if (!found) {
+ pr_err("testing event table: could not find event %s\n", pe->name);
+ return -1;
+ }
+ return 0;
+}
- if (strcmp(table->name, event->name))
- continue;
- found = true;
- map_events++;
+static int test__pmu_event_table_sys_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *data)
+{
+ int *map_events = data;
+ struct perf_pmu_test_event const **test_event_table;
+ bool found = false;
- if (compare_pmu_events(table, event))
- return -1;
+ test_event_table = &sys_events[0];
- pr_debug("testing event table %s: pass\n", table->name);
- }
+ for (; *test_event_table; test_event_table++) {
+ struct perf_pmu_test_event const *test_event = *test_event_table;
+ struct pmu_event const *event = &test_event->event;
- if (!found) {
- pr_err("testing event table: could not find event %s\n",
- table->name);
- return -1;
- }
- }
+ if (strcmp(pe->name, event->name))
+ continue;
+ found = true;
+ (*map_events)++;
- for (table = sys_event_tables; table->name; table++) {
- struct perf_pmu_test_event const **test_event_table;
- bool found = false;
+ if (compare_pmu_events(pe, event))
+ return TEST_FAIL;
- test_event_table = &sys_events[0];
+ pr_debug("testing sys event table %s: pass\n", pe->name);
+ }
+ if (!found) {
+ pr_debug("testing sys event table: could not find event %s\n", pe->name);
+ return TEST_FAIL;
+ }
+ return TEST_OK;
+}
- for (; *test_event_table; test_event_table++) {
- struct perf_pmu_test_event const *test_event = *test_event_table;
- struct pmu_event const *event = &test_event->event;
+/* Verify generated events from pmu-events.c are as expected */
+static int test__pmu_event_table(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ const struct pmu_events_table *sys_event_table = find_sys_events_table("pme_test_soc_sys");
+ const struct pmu_events_table *table = find_core_events_table("testarch", "testcpu");
+ int map_events = 0, expected_events, err;
- if (strcmp(table->name, event->name))
- continue;
- found = true;
- map_events++;
+ /* ignore 3x sentinels */
+ expected_events = ARRAY_SIZE(core_events) +
+ ARRAY_SIZE(uncore_events) +
+ ARRAY_SIZE(sys_events) - 3;
- if (compare_pmu_events(table, event))
- return -1;
+ if (!table || !sys_event_table)
+ return -1;
- pr_debug("testing sys event table %s: pass\n", table->name);
- }
- if (!found) {
- pr_debug("testing event table: could not find event %s\n",
- table->name);
- return -1;
- }
- }
+ err = pmu_events_table_for_each_event(table, test__pmu_event_table_core_callback,
+ &map_events);
+ if (err)
+ return err;
+
+ err = pmu_events_table_for_each_event(sys_event_table, test__pmu_event_table_sys_callback,
+ &map_events);
+ if (err)
+ return err;
if (map_events != expected_events) {
pr_err("testing event table: found %d, but expected %d\n",
map_events, expected_events);
- return -1;
+ return TEST_FAIL;
}
return 0;
@@ -549,10 +544,10 @@ static int __test_core_pmu_event_aliases(char *pmu_name, int *count)
struct perf_pmu *pmu;
LIST_HEAD(aliases);
int res = 0;
- const struct pmu_events_map *map = __test_pmu_get_events_map();
+ const struct pmu_events_table *table = find_core_events_table("testarch", "testcpu");
struct perf_pmu_alias *a, *tmp;
- if (!map)
+ if (!table)
return -1;
test_event_table = &core_events[0];
@@ -563,7 +558,7 @@ static int __test_core_pmu_event_aliases(char *pmu_name, int *count)
pmu->name = pmu_name;
- pmu_add_cpu_aliases_map(&aliases, pmu, map);
+ pmu_add_cpu_aliases_table(&aliases, pmu, table);
for (; *test_event_table; test_event_table++) {
struct perf_pmu_test_event const *test_event = *test_event_table;
@@ -602,14 +597,14 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
struct perf_pmu *pmu = &test_pmu->pmu;
const char *pmu_name = pmu->name;
struct perf_pmu_alias *a, *tmp, *alias;
- const struct pmu_events_map *map;
+ const struct pmu_events_table *events_table;
LIST_HEAD(aliases);
int res = 0;
- map = __test_pmu_get_events_map();
- if (!map)
+ events_table = find_core_events_table("testarch", "testcpu");
+ if (!events_table)
return -1;
- pmu_add_cpu_aliases_map(&aliases, pmu, map);
+ pmu_add_cpu_aliases_table(&aliases, pmu, events_table);
pmu_add_sys_aliases(&aliases, pmu);
/* Count how many aliases we generated */
@@ -812,6 +807,15 @@ static int check_parse_id(const char *id, struct parse_events_error *error,
for (cur = strchr(dup, '@') ; cur; cur = strchr(++cur, '@'))
*cur = '/';
+ if (fake_pmu) {
+ /*
+ * Every call to __parse_events will try to initialize the PMU
+ * state from sysfs and then clean it up at the end. Reset the
+ * PMU events to the test state so that we don't pick up
+ * erroneous prefixes and suffixes.
+ */
+ perf_pmu__test_parse_init();
+ }
ret = __parse_events(evlist, dup, error, fake_pmu);
free(dup);
@@ -819,27 +823,6 @@ static int check_parse_id(const char *id, struct parse_events_error *error,
return ret;
}
-static int check_parse_cpu(const char *id, bool same_cpu, const struct pmu_event *pe)
-{
- struct parse_events_error error;
- int ret;
-
- parse_events_error__init(&error);
- ret = check_parse_id(id, &error, NULL);
- if (ret && same_cpu) {
- pr_warning("Parse event failed metric '%s' id '%s' expr '%s'\n",
- pe->metric_name, id, pe->metric_expr);
- pr_warning("Error string '%s' help '%s'\n", error.str,
- error.help);
- } else if (ret) {
- pr_debug3("Parse event failed, but for an event that may not be supported by this CPU.\nid '%s' metric '%s' expr '%s'\n",
- id, pe->metric_name, pe->metric_expr);
- ret = 0;
- }
- parse_events_error__exit(&error);
- return ret;
-}
-
static int check_parse_fake(const char *id)
{
struct parse_events_error error;
@@ -851,168 +834,116 @@ static int check_parse_fake(const char *id)
return ret;
}
-static void expr_failure(const char *msg,
- const struct pmu_events_map *map,
- const struct pmu_event *pe)
-{
- pr_debug("%s for map %s %s %s\n",
- msg, map->cpuid, map->version, map->type);
- pr_debug("On metric %s\n", pe->metric_name);
- pr_debug("On expression %s\n", pe->metric_expr);
-}
-
struct metric {
struct list_head list;
struct metric_ref metric_ref;
};
-static int resolve_metric_simple(struct expr_parse_ctx *pctx,
- struct list_head *compound_list,
- const struct pmu_events_map *map,
- const char *metric_name)
+static int test__parsing_callback(const struct pmu_event *pe, const struct pmu_events_table *table,
+ void *data)
{
- struct hashmap_entry *cur, *cur_tmp;
- struct metric *metric, *tmp;
- size_t bkt;
- bool all;
- int rc;
-
- do {
- all = true;
- hashmap__for_each_entry_safe(pctx->ids, cur, cur_tmp, bkt) {
- struct metric_ref *ref;
- const struct pmu_event *pe;
-
- pe = metricgroup__find_metric(cur->key, map);
- if (!pe)
- continue;
-
- if (!strcmp(metric_name, (char *)cur->key)) {
- pr_warning("Recursion detected for metric %s\n", metric_name);
- rc = -1;
- goto out_err;
- }
+ int *failures = data;
+ int k;
+ struct evlist *evlist;
+ struct perf_cpu_map *cpus;
+ struct runtime_stat st;
+ struct evsel *evsel;
+ struct rblist metric_events = {
+ .nr_entries = 0,
+ };
+ int err = 0;
- all = false;
+ if (!pe->metric_expr)
+ return 0;
- /* The metric key itself needs to go out.. */
- expr__del_id(pctx, cur->key);
+ pr_debug("Found metric '%s'\n", pe->metric_name);
+ (*failures)++;
- metric = malloc(sizeof(*metric));
- if (!metric) {
- rc = -ENOMEM;
- goto out_err;
- }
+ /*
+ * We need to prepare evlist for stat mode running on CPU 0
+ * because that's where all the stats are going to be created.
+ */
+ evlist = evlist__new();
+ if (!evlist)
+ return -ENOMEM;
- ref = &metric->metric_ref;
- ref->metric_name = pe->metric_name;
- ref->metric_expr = pe->metric_expr;
- list_add_tail(&metric->list, compound_list);
+ cpus = perf_cpu_map__new("0");
+ if (!cpus) {
+ evlist__delete(evlist);
+ return -ENOMEM;
+ }
- rc = expr__find_ids(pe->metric_expr, NULL, pctx);
- if (rc)
- goto out_err;
- break; /* The hashmap has been modified, so restart */
+ perf_evlist__set_maps(&evlist->core, cpus, NULL);
+ runtime_stat__init(&st);
+
+ err = metricgroup__parse_groups_test(evlist, table, pe->metric_name,
+ false, false,
+ &metric_events);
+ if (err) {
+ if (!strcmp(pe->metric_name, "M1") || !strcmp(pe->metric_name, "M2") ||
+ !strcmp(pe->metric_name, "M3")) {
+ (*failures)--;
+ pr_debug("Expected broken metric %s skipping\n", pe->metric_name);
+ err = 0;
}
- } while (!all);
-
- return 0;
+ goto out_err;
+ }
-out_err:
- list_for_each_entry_safe(metric, tmp, compound_list, list)
- free(metric);
+ err = evlist__alloc_stats(evlist, false);
+ if (err)
+ goto out_err;
+ /*
+ * Add all ids with a made up value. The value may trigger divide by
+ * zero when subtracted and so try to make them unique.
+ */
+ k = 1;
+ perf_stat__reset_shadow_stats();
+ evlist__for_each_entry(evlist, evsel) {
+ perf_stat__update_shadow_stats(evsel, k, 0, &st);
+ if (!strcmp(evsel->name, "duration_time"))
+ update_stats(&walltime_nsecs_stats, k);
+ k++;
+ }
+ evlist__for_each_entry(evlist, evsel) {
+ struct metric_event *me = metricgroup__lookup(&metric_events, evsel, false);
- return rc;
+ if (me != NULL) {
+ struct metric_expr *mexp;
+ list_for_each_entry (mexp, &me->head, nd) {
+ if (strcmp(mexp->metric_name, pe->metric_name))
+ continue;
+ pr_debug("Result %f\n", test_generic_metric(mexp, 0, &st));
+ err = 0;
+ (*failures)--;
+ goto out_err;
+ }
+ }
+ }
+ pr_debug("Didn't find parsed metric %s", pe->metric_name);
+ err = 1;
+out_err:
+ if (err)
+ pr_debug("Broken metric %s\n", pe->metric_name);
+
+ /* ... cleanup. */
+ metricgroup__rblist_exit(&metric_events);
+ runtime_stat__exit(&st);
+ evlist__free_stats(evlist);
+ perf_cpu_map__put(cpus);
+ evlist__delete(evlist);
+ return err;
}
static int test__parsing(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
- const struct pmu_events_map *cpus_map = pmu_events_map__find();
- const struct pmu_events_map *map;
- const struct pmu_event *pe;
- int i, j, k;
- int ret = 0;
- struct expr_parse_ctx *ctx;
- double result;
-
- ctx = expr__ctx_new();
- if (!ctx) {
- pr_debug("expr__ctx_new failed");
- return TEST_FAIL;
- }
- i = 0;
- for (;;) {
- map = &pmu_events_map[i++];
- if (!map->table)
- break;
- j = 0;
- for (;;) {
- struct metric *metric, *tmp;
- struct hashmap_entry *cur;
- LIST_HEAD(compound_list);
- size_t bkt;
-
- pe = &map->table[j++];
- if (!pe->name && !pe->metric_group && !pe->metric_name)
- break;
- if (!pe->metric_expr)
- continue;
- expr__ctx_clear(ctx);
- if (expr__find_ids(pe->metric_expr, NULL, ctx) < 0) {
- expr_failure("Parse find ids failed", map, pe);
- ret++;
- continue;
- }
+ int failures = 0;
- if (resolve_metric_simple(ctx, &compound_list, map,
- pe->metric_name)) {
- expr_failure("Could not resolve metrics", map, pe);
- ret++;
- goto exit; /* Don't tolerate errors due to severity */
- }
+ pmu_for_each_core_event(test__parsing_callback, &failures);
+ pmu_for_each_sys_event(test__parsing_callback, &failures);
- /*
- * Add all ids with a made up value. The value may
- * trigger divide by zero when subtracted and so try to
- * make them unique.
- */
- k = 1;
- hashmap__for_each_entry(ctx->ids, cur, bkt)
- expr__add_id_val(ctx, strdup(cur->key), k++);
-
- hashmap__for_each_entry(ctx->ids, cur, bkt) {
- if (check_parse_cpu(cur->key, map == cpus_map,
- pe))
- ret++;
- }
-
- list_for_each_entry_safe(metric, tmp, &compound_list, list) {
- expr__add_ref(ctx, &metric->metric_ref);
- free(metric);
- }
-
- if (expr__parse(&result, ctx, pe->metric_expr)) {
- /*
- * Parsing failed, make numbers go from large to
- * small which can resolve divide by zero
- * issues.
- */
- k = 1024;
- hashmap__for_each_entry(ctx->ids, cur, bkt)
- expr__add_id_val(ctx, strdup(cur->key), k--);
- if (expr__parse(&result, ctx, pe->metric_expr)) {
- expr_failure("Parse failed", map, pe);
- ret++;
- }
- }
- }
- }
- expr__ctx_free(ctx);
- /* TODO: fail when not ok */
-exit:
- return ret == 0 ? TEST_OK : TEST_SKIP;
+ return failures == 0 ? TEST_OK : TEST_FAIL;
}
struct test_metric {
@@ -1084,6 +1015,16 @@ out:
return ret;
}
+static int test__parsing_fake_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *data __maybe_unused)
+{
+ if (!pe->metric_expr)
+ return 0;
+
+ return metric_parse_fake(pe->metric_expr);
+}
+
/*
* Parse all the metrics for current architecture,
* or all defined cpus via the 'fake_pmu'
@@ -1092,36 +1033,19 @@ out:
static int test__parsing_fake(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
- const struct pmu_events_map *map;
- const struct pmu_event *pe;
- unsigned int i, j;
int err = 0;
- for (i = 0; i < ARRAY_SIZE(metrics); i++) {
+ for (size_t i = 0; i < ARRAY_SIZE(metrics); i++) {
err = metric_parse_fake(metrics[i].str);
if (err)
return err;
}
- i = 0;
- for (;;) {
- map = &pmu_events_map[i++];
- if (!map->table)
- break;
- j = 0;
- for (;;) {
- pe = &map->table[j++];
- if (!pe->name && !pe->metric_group && !pe->metric_name)
- break;
- if (!pe->metric_expr)
- continue;
- err = metric_parse_fake(pe->metric_expr);
- if (err)
- return err;
- }
- }
+ err = pmu_for_each_core_event(test__parsing_fake_callback, NULL);
+ if (err)
+ return err;
- return 0;
+ return pmu_for_each_sys_event(test__parsing_fake_callback, NULL);
}
static struct test_case pmu_events_tests[] = {
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 07f2411b0ad4..20930dd48ee0 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -86,10 +86,15 @@ static bool samples_same(const struct perf_sample *s1,
COMP(read.time_running);
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- for (i = 0; i < s1->read.group.nr; i++)
- MCOMP(read.group.values[i]);
+ for (i = 0; i < s1->read.group.nr; i++) {
+ /* FIXME: check values without LOST */
+ if (read_format & PERF_FORMAT_LOST)
+ MCOMP(read.group.values[i]);
+ }
} else {
COMP(read.one.id);
+ if (read_format & PERF_FORMAT_LOST)
+ COMP(read.one.lost);
}
}
@@ -263,7 +268,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
.data = (void *)aux_data,
},
};
- struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
+ struct sample_read_value values[] = {{1, 5, 0}, {9, 3, 0}, {2, 7, 0}, {6, 4, 1},};
struct perf_sample sample_out, sample_out_endian;
size_t i, sz, bufsz;
int err, ret = -1;
@@ -286,6 +291,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
} else {
sample.read.one.value = 0x08789faeb786aa87ULL;
sample.read.one.id = 99;
+ sample.read.one.lost = 1;
}
sz = perf_event__sample_event_size(&sample, sample_type, read_format);
@@ -370,7 +376,7 @@ out_free:
*/
static int test__sample_parsing(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
- const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
+ const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 28, 29, 30, 31};
u64 sample_type;
u64 sample_regs;
size_t i;
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
new file mode 100644
index 000000000000..d90f8d102eb9
--- /dev/null
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Basic sanity check of perf JSON output as specified in the man page.
+
+import argparse
+import sys
+import json
+
+ap = argparse.ArgumentParser()
+ap.add_argument('--no-args', action='store_true')
+ap.add_argument('--interval', action='store_true')
+ap.add_argument('--system-wide-no-aggr', action='store_true')
+ap.add_argument('--system-wide', action='store_true')
+ap.add_argument('--event', action='store_true')
+ap.add_argument('--per-core', action='store_true')
+ap.add_argument('--per-thread', action='store_true')
+ap.add_argument('--per-die', action='store_true')
+ap.add_argument('--per-node', action='store_true')
+ap.add_argument('--per-socket', action='store_true')
+args = ap.parse_args()
+
+Lines = sys.stdin.readlines()
+
+def isfloat(num):
+ try:
+ float(num)
+ return True
+ except ValueError:
+ return False
+
+
+def isint(num):
+ try:
+ int(num)
+ return True
+ except ValueError:
+ return False
+
+def is_counter_value(num):
+ return isfloat(num) or num == '<not counted>' or num == '<not supported>'
+
+def check_json_output(expected_items):
+ if expected_items != -1:
+ for line in Lines:
+ if 'failed' not in line:
+ count = 0
+ count = line.count(',')
+ if count != expected_items and count >= 1 and count <= 3 and 'metric-value' in line:
+ # Events that generate >1 metric may have isolated metric
+ # values and possibly other prefixes like interval, core and
+ # aggregate-number.
+ continue
+ if count != expected_items:
+ raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
+ f' in \'{line}\'')
+ checks = {
+ 'aggregate-number': lambda x: isfloat(x),
+ 'core': lambda x: True,
+ 'counter-value': lambda x: is_counter_value(x),
+ 'cgroup': lambda x: True,
+ 'cpu': lambda x: isint(x),
+ 'die': lambda x: True,
+ 'event': lambda x: True,
+ 'event-runtime': lambda x: isfloat(x),
+ 'interval': lambda x: isfloat(x),
+ 'metric-unit': lambda x: True,
+ 'metric-value': lambda x: isfloat(x),
+ 'node': lambda x: True,
+ 'pcnt-running': lambda x: isfloat(x),
+ 'socket': lambda x: True,
+ 'thread': lambda x: True,
+ 'unit': lambda x: True,
+ }
+ input = '[\n' + ','.join(Lines) + '\n]'
+ for item in json.loads(input):
+ for key, value in item.items():
+ if key not in checks:
+ raise RuntimeError(f'Unexpected key: key={key} value={value}')
+ if not checks[key](value):
+ raise RuntimeError(f'Check failed for: key={key} value={value}')
+
+
+try:
+ if args.no_args or args.system_wide or args.event:
+ expected_items = 6
+ elif args.interval or args.per_thread or args.system_wide_no_aggr:
+ expected_items = 7
+ elif args.per_core or args.per_socket or args.per_node or args.per_die:
+ expected_items = 8
+ else:
+ # If no option is specified, don't check the number of items.
+ expected_items = -1
+ check_json_output(expected_items)
+except:
+ print('Test failed for input:\n' + '\n'.join(Lines))
+ raise
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
index 96e0739f7478..d2eba583a2ac 100755
--- a/tools/perf/tests/shell/record_offcpu.sh
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -19,20 +19,26 @@ trap_cleanup() {
}
trap trap_cleanup exit term int
-test_offcpu() {
- echo "Basic off-cpu test"
+test_offcpu_priv() {
+ echo "Checking off-cpu privilege"
+
if [ `id -u` != 0 ]
then
- echo "Basic off-cpu test [Skipped permission]"
+ echo "off-cpu test [Skipped permission]"
err=2
return
fi
- if perf record --off-cpu -o ${perfdata} --quiet true 2>&1 | grep BUILD_BPF_SKEL
+ if perf record --off-cpu -o /dev/null --quiet true 2>&1 | grep BUILD_BPF_SKEL
then
- echo "Basic off-cpu test [Skipped missing BPF support]"
+ echo "off-cpu test [Skipped missing BPF support]"
err=2
return
fi
+}
+
+test_offcpu_basic() {
+ echo "Basic off-cpu test"
+
if ! perf record --off-cpu -e dummy -o ${perfdata} sleep 1 2> /dev/null
then
echo "Basic off-cpu test [Failed record]"
@@ -41,7 +47,7 @@ test_offcpu() {
fi
if ! perf evlist -i ${perfdata} | grep -q "offcpu-time"
then
- echo "Basic off-cpu test [Failed record]"
+ echo "Basic off-cpu test [Failed no event]"
err=1
return
fi
@@ -54,7 +60,44 @@ test_offcpu() {
echo "Basic off-cpu test [Success]"
}
-test_offcpu
+test_offcpu_child() {
+ echo "Child task off-cpu test"
+
+ # perf bench sched messaging creates 400 processes
+ if ! perf record --off-cpu -e dummy -o ${perfdata} -- \
+ perf bench sched messaging -g 10 > /dev/null 2&>1
+ then
+ echo "Child task off-cpu test [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf evlist -i ${perfdata} | grep -q "offcpu-time"
+ then
+ echo "Child task off-cpu test [Failed no event]"
+ err=1
+ return
+ fi
+ # each process waits for read and write, so it should be more than 800 events
+ if ! perf report -i ${perfdata} -s comm -q -n -t ';' --percent-limit=90 | \
+ awk -F ";" '{ if (NF > 3 && int($3) < 800) exit 1; }'
+ then
+ echo "Child task off-cpu test [Failed invalid output]"
+ err=1
+ return
+ fi
+ echo "Child task off-cpu test [Success]"
+}
+
+
+test_offcpu_priv
+
+if [ $err = 0 ]; then
+ test_offcpu_basic
+fi
+
+if [ $err = 0 ]; then
+ test_offcpu_child
+fi
cleanup
exit $err
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index 38c26f3ef4c1..eb5196f58190 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -8,7 +8,8 @@ set -e
function commachecker()
{
- local -i cnt=0 exp=0
+ local -i cnt=0
+ local exp=0
case "$1"
in "--no-args") exp=6
@@ -17,7 +18,7 @@ function commachecker()
;; "--interval") exp=7
;; "--per-thread") exp=7
;; "--system-wide-no-aggr") exp=7
- [ $(uname -m) = "s390x" ] && exp=6
+ [ $(uname -m) = "s390x" ] && exp='^[6-7]$'
;; "--per-core") exp=8
;; "--per-socket") exp=8
;; "--per-node") exp=8
@@ -34,7 +35,7 @@ function commachecker()
x=$(echo $line | tr -d -c ',')
cnt="${#x}"
# echo $line $cnt
- [ "$cnt" -ne "$exp" ] && {
+ [[ ! "$cnt" =~ $exp ]] && {
echo "wrong number of fields. expected $exp in $line" 1>&2
exit 1;
}
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
new file mode 100755
index 000000000000..ea8714a36051
--- /dev/null
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -0,0 +1,147 @@
+#!/bin/bash
+# perf stat JSON output linter
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Checks various perf stat JSON output commands for the
+# correct number of fields.
+
+set -e
+
+pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py
+if [ "x$PYTHON" == "x" ]
+then
+ if which python3 > /dev/null
+ then
+ PYTHON=python3
+ elif which python > /dev/null
+ then
+ PYTHON=python
+ else
+ echo Skipping test, python not detected please set environment variable PYTHON.
+ exit 2
+ fi
+fi
+
+# Return true if perf_event_paranoid is > $1 and not running as root.
+function ParanoidAndNotRoot()
+{
+ [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ]
+}
+
+check_no_args()
+{
+ echo -n "Checking json output: no args "
+ perf stat -j true 2>&1 | $PYTHON $pythonchecker --no-args
+ echo "[Success]"
+}
+
+check_system_wide()
+{
+ echo -n "Checking json output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j -a true 2>&1 | $PYTHON $pythonchecker --system-wide
+ echo "[Success]"
+}
+
+check_system_wide_no_aggr()
+{
+ echo -n "Checking json output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ echo -n "Checking json output: system wide no aggregation "
+ perf stat -j -A -a --no-merge true 2>&1 | $PYTHON $pythonchecker --system-wide-no-aggr
+ echo "[Success]"
+}
+
+check_interval()
+{
+ echo -n "Checking json output: interval "
+ perf stat -j -I 1000 true 2>&1 | $PYTHON $pythonchecker --interval
+ echo "[Success]"
+}
+
+
+check_event()
+{
+ echo -n "Checking json output: event "
+ perf stat -j -e cpu-clock true 2>&1 | $PYTHON $pythonchecker --event
+ echo "[Success]"
+}
+
+check_per_core()
+{
+ echo -n "Checking json output: per core "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-core -a true 2>&1 | $PYTHON $pythonchecker --per-core
+ echo "[Success]"
+}
+
+check_per_thread()
+{
+ echo -n "Checking json output: per thread "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-thread -a true 2>&1 | $PYTHON $pythonchecker --per-thread
+ echo "[Success]"
+}
+
+check_per_die()
+{
+ echo -n "Checking json output: per die "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-die -a true 2>&1 | $PYTHON $pythonchecker --per-die
+ echo "[Success]"
+}
+
+check_per_node()
+{
+ echo -n "Checking json output: per node "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-node -a true 2>&1 | $PYTHON $pythonchecker --per-node
+ echo "[Success]"
+}
+
+check_per_socket()
+{
+ echo -n "Checking json output: per socket "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-socket -a true 2>&1 | $PYTHON $pythonchecker --per-socket
+ echo "[Success]"
+}
+
+check_no_args
+check_system_wide
+check_system_wide_no_aggr
+check_interval
+check_event
+check_per_core
+check_per_thread
+check_per_die
+check_per_node
+check_per_socket
+exit 0
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
index 9313ef2739e0..26a51b48aee4 100755
--- a/tools/perf/tests/shell/stat.sh
+++ b/tools/perf/tests/shell/stat.sh
@@ -28,6 +28,24 @@ test_stat_record_report() {
echo "stat record and report test [Success]"
}
+test_stat_repeat_weak_groups() {
+ echo "stat repeat weak groups test"
+ if ! perf stat -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}' \
+ true 2>&1 | grep -q 'seconds time elapsed'
+ then
+ echo "stat repeat weak groups test [Skipped event parsing failed]"
+ return
+ fi
+ if ! perf stat -r2 -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}:W' \
+ true > /dev/null 2>&1
+ then
+ echo "stat repeat weak groups test [Failed]"
+ err=1
+ return
+ fi
+ echo "stat repeat weak groups test [Success]"
+}
+
test_topdown_groups() {
# Topdown events must be grouped with the slots event first. Test that
# parse-events reorders this.
@@ -75,6 +93,7 @@ test_topdown_weak_groups() {
test_default_stat
test_stat_record_report
+test_stat_repeat_weak_groups
test_topdown_groups
test_topdown_weak_groups
exit $err
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
index e7c59e5a7a98..6e79349e42be 100755
--- a/tools/perf/tests/shell/stat_all_metrics.sh
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -1,26 +1,41 @@
-#!/bin/sh
+#!/bin/bash
# perf all metrics test
# SPDX-License-Identifier: GPL-2.0
-set -e
-
err=0
for m in $(perf list --raw-dump metrics); do
echo "Testing $m"
result=$(perf stat -M "$m" true 2>&1)
- if [[ ! "$result" =~ "$m" ]] && [[ ! "$result" =~ "<not supported>" ]]; then
- # We failed to see the metric and the events are support. Possibly the
- # workload was too small so retry with something longer.
- result=$(perf stat -M "$m" perf bench internals synthesize 2>&1)
- if [[ ! "$result" =~ "$m" ]]; then
- echo "Metric '$m' not printed in:"
- echo "$result"
- if [[ "$result" =~ "FP_ARITH" && "$err" != "1" ]]; then
- echo "Skip, not fail, for FP issues"
- err=2
- else
- err=1
- fi
+ if [[ "$result" =~ "${m:0:50}" ]] || [[ "$result" =~ "<not supported>" ]]
+ then
+ continue
+ fi
+ # Failed so try system wide.
+ result=$(perf stat -M "$m" -a true 2>&1)
+ if [[ "$result" =~ "${m:0:50}" ]]
+ then
+ continue
+ fi
+ # Failed again, possibly the workload was too small so retry with something
+ # longer.
+ result=$(perf stat -M "$m" perf bench internals synthesize 2>&1)
+ if [[ "$result" =~ "${m:0:50}" ]]
+ then
+ continue
+ fi
+ echo "Metric '$m' not printed in:"
+ echo "$result"
+ if [[ "$err" != "1" ]]
+ then
+ err=2
+ if [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
+ then
+ echo "Skip, not fail, for FP issues"
+ elif [[ "$result" =~ "PMM" ]]
+ then
+ echo "Skip, not fail, for Optane memory issues"
+ else
+ err=1
fi
fi
done
diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh
index e59044edc406..0d47479adba8 100755
--- a/tools/perf/tests/shell/test_arm_spe.sh
+++ b/tools/perf/tests/shell/test_arm_spe.sh
@@ -23,17 +23,20 @@ glb_err=0
cleanup_files()
{
rm -f ${perfdata}
+ rm -f ${perfdata}.old
exit $glb_err
}
trap cleanup_files exit term int
arm_spe_report() {
- if [ $2 != 0 ]; then
+ if [ $2 = 0 ]; then
+ echo "$1: PASS"
+ elif [ $2 = 2 ]; then
+ echo "$1: SKIPPED"
+ else
echo "$1: FAIL"
glb_err=$2
- else
- echo "$1: PASS"
fi
}
@@ -85,5 +88,26 @@ arm_spe_snapshot_test() {
arm_spe_report "SPE snapshot testing" $err
}
+arm_spe_system_wide_test() {
+ echo "Recording trace with system-wide mode $perfdata"
+
+ perf record -o - -e dummy -a -B true > /dev/null 2>&1
+ if [ $? != 0 ]; then
+ arm_spe_report "SPE system-wide testing" 2
+ return
+ fi
+
+ perf record -o ${perfdata} -e arm_spe// -a --no-bpf-event \
+ -- dd if=/dev/zero of=/dev/null count=100000 > /dev/null 2>&1
+
+ perf_script_samples dd &&
+ perf_report_samples dd
+
+ err=$?
+ arm_spe_report "SPE system-wide testing" $err
+}
+
arm_spe_snapshot_test
+arm_spe_system_wide_test
+
exit $glb_err
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
new file mode 100755
index 000000000000..c644f94a6500
--- /dev/null
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -0,0 +1,114 @@
+#!/bin/sh
+# Check branch stack sampling
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2022
+
+# we need a C compiler to build the test programs
+# so bail if none is found
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+# skip the test if the hardware doesn't support branch stack sampling
+perf record -b -o- -B true > /dev/null 2>&1 || exit 2
+
+TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX)
+
+cleanup() {
+ rm -rf $TMPDIR
+}
+
+trap cleanup exit term int
+
+gen_test_program() {
+ # generate test program
+ cat << EOF > $1
+#define BENCH_RUNS 999999
+int cnt;
+void bar(void) {
+} /* return */
+void foo(void) {
+ bar(); /* call */
+} /* return */
+void bench(void) {
+ void (*foo_ind)(void) = foo;
+ if ((cnt++) % 3) /* branch (cond) */
+ foo(); /* call */
+ bar(); /* call */
+ foo_ind(); /* call (ind) */
+}
+int main(void)
+{
+ int cnt = 0;
+ while (1) {
+ if ((cnt++) > BENCH_RUNS)
+ break;
+ bench(); /* call */
+ } /* branch (uncond) */
+ return 0;
+}
+EOF
+}
+
+test_user_branches() {
+ echo "Testing user branch stack sampling"
+
+ gen_test_program "$TEMPDIR/program.c"
+ cc -fno-inline -g "$TEMPDIR/program.c" -o $TMPDIR/a.out
+
+ perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u -- $TMPDIR/a.out > /dev/null 2>&1
+ perf script -i $TMPDIR/perf.data --fields brstacksym | xargs -n1 > $TMPDIR/perf.script
+
+ # example of branch entries:
+ # foo+0x14/bar+0x40/P/-/-/0/CALL
+
+ set -x
+ egrep -m1 "^bench\+[^ ]*/foo\+[^ ]*/IND_CALL$" $TMPDIR/perf.script
+ egrep -m1 "^foo\+[^ ]*/bar\+[^ ]*/CALL$" $TMPDIR/perf.script
+ egrep -m1 "^bench\+[^ ]*/foo\+[^ ]*/CALL$" $TMPDIR/perf.script
+ egrep -m1 "^bench\+[^ ]*/bar\+[^ ]*/CALL$" $TMPDIR/perf.script
+ egrep -m1 "^bar\+[^ ]*/foo\+[^ ]*/RET$" $TMPDIR/perf.script
+ egrep -m1 "^foo\+[^ ]*/bench\+[^ ]*/RET$" $TMPDIR/perf.script
+ egrep -m1 "^bench\+[^ ]*/bench\+[^ ]*/COND$" $TMPDIR/perf.script
+ egrep -m1 "^main\+[^ ]*/main\+[^ ]*/UNCOND$" $TMPDIR/perf.script
+ set +x
+
+ # some branch types are still not being tested:
+ # IND COND_CALL COND_RET SYSCALL SYSRET IRQ SERROR NO_TX
+}
+
+# first argument <arg0> is the argument passed to "--branch-stack <arg0>,save_type,u"
+# second argument are the expected branch types for the given filter
+test_filter() {
+ local filter=$1
+ local expect=$2
+
+ echo "Testing branch stack filtering permutation ($filter,$expect)"
+
+ gen_test_program "$TEMPDIR/program.c"
+ cc -fno-inline -g "$TEMPDIR/program.c" -o $TMPDIR/a.out
+
+ perf record -o $TMPDIR/perf.data --branch-filter $filter,save_type,u -- $TMPDIR/a.out > /dev/null 2>&1
+ perf script -i $TMPDIR/perf.data --fields brstack | xargs -n1 > $TMPDIR/perf.script
+
+ # fail if we find any branch type that doesn't match any of the expected ones
+ # also consider UNKNOWN branch types (-)
+ if egrep -vm1 "^[^ ]*/($expect|-|( *))$" $TMPDIR/perf.script; then
+ return 1
+ fi
+}
+
+set -e
+
+test_user_branches
+
+test_filter "any_call" "CALL|IND_CALL|COND_CALL|SYSCALL|IRQ"
+test_filter "call" "CALL|SYSCALL"
+test_filter "cond" "COND"
+test_filter "any_ret" "RET|COND_RET|SYSRET|ERET"
+
+test_filter "call,cond" "CALL|SYSCALL|COND"
+test_filter "any_call,cond" "CALL|IND_CALL|COND_CALL|IRQ|SYSCALL|COND"
+test_filter "cond,any_call,any_ret" "COND|CALL|IND_CALL|COND_CALL|SYSCALL|IRQ|RET|COND_RET|SYSRET|ERET"
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 0c0c2328bf4e..2d46af9ef935 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -324,6 +324,7 @@ out_free_nodes:
static int test__switch_tracking(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
const char *sched_switch = "sched:sched_switch";
+ const char *cycles = "cycles:u";
struct switch_tracking switch_tracking = { .tids = NULL, };
struct record_opts opts = {
.mmap_pages = UINT_MAX,
@@ -363,7 +364,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub
perf_evlist__set_maps(&evlist->core, cpus, threads);
/* First event */
- err = parse_events(evlist, "cpu-clock:u", NULL);
+ err = parse_event(evlist, "cpu-clock:u");
if (err) {
pr_debug("Failed to parse event dummy:u\n");
goto out_err;
@@ -372,12 +373,19 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub
cpu_clocks_evsel = evlist__last(evlist);
/* Second event */
- if (perf_pmu__has_hybrid())
- err = parse_events(evlist, "cpu_core/cycles/u", NULL);
- else
- err = parse_events(evlist, "cycles:u", NULL);
+ if (perf_pmu__has_hybrid()) {
+ cycles = "cpu_core/cycles/u";
+ err = parse_event(evlist, cycles);
+ if (err) {
+ cycles = "cpu_atom/cycles/u";
+ pr_debug("Trying %s\n", cycles);
+ err = parse_event(evlist, cycles);
+ }
+ } else {
+ err = parse_event(evlist, cycles);
+ }
if (err) {
- pr_debug("Failed to parse event cycles:u\n");
+ pr_debug("Failed to parse event %s\n", cycles);
goto out_err;
}
@@ -390,7 +398,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub
goto out;
}
- err = parse_events(evlist, sched_switch, NULL);
+ err = parse_event(evlist, sched_switch);
if (err) {
pr_debug("Failed to parse event %s\n", sched_switch);
goto out_err;
@@ -420,7 +428,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub
evsel__set_sample_bit(cycles_evsel, TIME);
/* Fourth event */
- err = parse_events(evlist, "dummy:u", NULL);
+ err = parse_event(evlist, "dummy:u");
if (err) {
pr_debug("Failed to parse event dummy:u\n");
goto out_err;
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index 17311ad9f9af..de3701a2a212 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -14,6 +14,8 @@ struct file;
struct pid;
struct cred;
struct socket;
+struct sock;
+struct sk_buff;
#define __sockaddr_check_size(size) \
BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -69,6 +71,9 @@ struct msghdr {
unsigned int msg_flags; /* flags on received message */
__kernel_size_t msg_controllen; /* ancillary data buffer length */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
+ struct ubuf_info *msg_ubuf;
+ int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
};
struct user_msghdr {
@@ -416,10 +421,9 @@ extern int recvmsg_copy_msghdr(struct msghdr *msg,
struct user_msghdr __user *umsg, unsigned flags,
struct sockaddr __user **uaddr,
struct iovec **iov);
-extern int __copy_msghdr_from_user(struct msghdr *kmsg,
- struct user_msghdr __user *umsg,
- struct sockaddr __user **save_addr,
- struct iovec __user **uiov, size_t *nsegs);
+extern int __copy_msghdr(struct msghdr *kmsg,
+ struct user_msghdr *umsg,
+ struct sockaddr __user **save_addr);
/* helpers which do the actual work for syscalls */
extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
@@ -428,10 +432,6 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int addr_len);
-extern int __sys_accept4_file(struct file *file, unsigned file_flags,
- struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen, int flags,
- unsigned long nofile);
extern struct file *do_accept(struct file *file, unsigned file_flags,
struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index a51267d88ca9..9dfae1bda9cc 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -26,6 +26,8 @@ perf-y += mmap.o
perf-y += memswap.o
perf-y += parse-events.o
perf-y += parse-events-hybrid.o
+perf-y += print-events.o
+perf-y += tracepoint.o
perf-y += perf_regs.o
perf-y += path.o
perf-y += print_binary.o
@@ -148,6 +150,8 @@ perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_ftrace.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_off_cpu.o
+perf-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork.o
+perf-$(CONFIG_PERF_BPF_SKEL) += bpf_lock_contention.o
perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
perf-$(CONFIG_LIBELF) += symbol-elf.o
perf-$(CONFIG_LIBELF) += probe-file.o
@@ -285,6 +289,7 @@ CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ET
CFLAGS_parse-events.o += -Wno-redundant-decls
CFLAGS_expr.o += -Wno-redundant-decls
CFLAGS_header.o += -include $(OUTPUT)PERF-VERSION-FILE
+CFLAGS_arm-spe.o += -I$(srctree)/tools/arch/arm64/include/
$(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c FORCE
$(call rule_mkdir)
diff --git a/tools/perf/util/amd-sample-raw.c b/tools/perf/util/amd-sample-raw.c
index d19d765195c5..238305868644 100644
--- a/tools/perf/util/amd-sample-raw.c
+++ b/tools/perf/util/amd-sample-raw.c
@@ -18,6 +18,7 @@
#include "pmu-events/pmu-events.h"
static u32 cpu_family, cpu_model, ibs_fetch_type, ibs_op_type;
+static bool zen4_ibs_extensions;
static void pr_ibs_fetch_ctl(union ibs_fetch_ctl reg)
{
@@ -39,6 +40,7 @@ static void pr_ibs_fetch_ctl(union ibs_fetch_ctl reg)
};
const char *ic_miss_str = NULL;
const char *l1tlb_pgsz_str = NULL;
+ char l3_miss_str[sizeof(" L3MissOnly _ FetchOcMiss _ FetchL3Miss _")] = "";
if (cpu_family == 0x19 && cpu_model < 0x10) {
/*
@@ -53,12 +55,19 @@ static void pr_ibs_fetch_ctl(union ibs_fetch_ctl reg)
ic_miss_str = ic_miss_strs[reg.ic_miss];
}
+ if (zen4_ibs_extensions) {
+ snprintf(l3_miss_str, sizeof(l3_miss_str),
+ " L3MissOnly %d FetchOcMiss %d FetchL3Miss %d",
+ reg.l3_miss_only, reg.fetch_oc_miss, reg.fetch_l3_miss);
+ }
+
printf("ibs_fetch_ctl:\t%016llx MaxCnt %7d Cnt %7d Lat %5d En %d Val %d Comp %d%s "
- "PhyAddrValid %d%s L1TlbMiss %d L2TlbMiss %d RandEn %d%s\n",
+ "PhyAddrValid %d%s L1TlbMiss %d L2TlbMiss %d RandEn %d%s%s\n",
reg.val, reg.fetch_maxcnt << 4, reg.fetch_cnt << 4, reg.fetch_lat,
reg.fetch_en, reg.fetch_val, reg.fetch_comp, ic_miss_str ? : "",
reg.phy_addr_valid, l1tlb_pgsz_str ? : "", reg.l1tlb_miss, reg.l2tlb_miss,
- reg.rand_en, reg.fetch_comp ? (reg.fetch_l2_miss ? " L2Miss 1" : " L2Miss 0") : "");
+ reg.rand_en, reg.fetch_comp ? (reg.fetch_l2_miss ? " L2Miss 1" : " L2Miss 0") : "",
+ l3_miss_str);
}
static void pr_ic_ibs_extd_ctl(union ic_ibs_extd_ctl reg)
@@ -68,9 +77,15 @@ static void pr_ic_ibs_extd_ctl(union ic_ibs_extd_ctl reg)
static void pr_ibs_op_ctl(union ibs_op_ctl reg)
{
- printf("ibs_op_ctl:\t%016llx MaxCnt %9d En %d Val %d CntCtl %d=%s CurCnt %9d\n",
- reg.val, ((reg.opmaxcnt_ext << 16) | reg.opmaxcnt) << 4, reg.op_en, reg.op_val,
- reg.cnt_ctl, reg.cnt_ctl ? "uOps" : "cycles", reg.opcurcnt);
+ char l3_miss_only[sizeof(" L3MissOnly _")] = "";
+
+ if (zen4_ibs_extensions)
+ snprintf(l3_miss_only, sizeof(l3_miss_only), " L3MissOnly %d", reg.l3_miss_only);
+
+ printf("ibs_op_ctl:\t%016llx MaxCnt %9d%s En %d Val %d CntCtl %d=%s CurCnt %9d\n",
+ reg.val, ((reg.opmaxcnt_ext << 16) | reg.opmaxcnt) << 4, l3_miss_only,
+ reg.op_en, reg.op_val, reg.cnt_ctl,
+ reg.cnt_ctl ? "uOps" : "cycles", reg.opcurcnt);
}
static void pr_ibs_op_data(union ibs_op_data reg)
@@ -84,7 +99,34 @@ static void pr_ibs_op_data(union ibs_op_data reg)
reg.op_brn_ret, reg.op_rip_invalid, reg.op_brn_fuse, reg.op_microcode);
}
-static void pr_ibs_op_data2(union ibs_op_data2 reg)
+static void pr_ibs_op_data2_extended(union ibs_op_data2 reg)
+{
+ static const char * const data_src_str[] = {
+ "",
+ " DataSrc 1=Local L3 or other L1/L2 in CCX",
+ " DataSrc 2=A peer cache in a near CCX",
+ " DataSrc 3=Data returned from DRAM",
+ " DataSrc 4=(reserved)",
+ " DataSrc 5=A peer cache in a far CCX",
+ " DataSrc 6=DRAM address map with \"long latency\" bit set",
+ " DataSrc 7=Data returned from MMIO/Config/PCI/APIC",
+ " DataSrc 8=Extension Memory (S-Link, GenZ, etc)",
+ " DataSrc 9=(reserved)",
+ " DataSrc 10=(reserved)",
+ " DataSrc 11=(reserved)",
+ " DataSrc 12=Peer Agent Memory",
+ /* 13 to 31 are reserved. Avoid printing them. */
+ };
+ int data_src = (reg.data_src_hi << 3) | reg.data_src_lo;
+
+ printf("ibs_op_data2:\t%016llx %sRmtNode %d%s\n", reg.val,
+ (data_src == 1 || data_src == 2 || data_src == 5) ?
+ (reg.cache_hit_st ? "CacheHitSt 1=O-State " : "CacheHitSt 0=M-state ") : "",
+ reg.rmt_node,
+ data_src < (int)ARRAY_SIZE(data_src_str) ? data_src_str[data_src] : "");
+}
+
+static void pr_ibs_op_data2_default(union ibs_op_data2 reg)
{
static const char * const data_src_str[] = {
"",
@@ -98,9 +140,16 @@ static void pr_ibs_op_data2(union ibs_op_data2 reg)
};
printf("ibs_op_data2:\t%016llx %sRmtNode %d%s\n", reg.val,
- reg.data_src == 2 ? (reg.cache_hit_st ? "CacheHitSt 1=O-State "
+ reg.data_src_lo == 2 ? (reg.cache_hit_st ? "CacheHitSt 1=O-State "
: "CacheHitSt 0=M-state ") : "",
- reg.rmt_node, data_src_str[reg.data_src]);
+ reg.rmt_node, data_src_str[reg.data_src_lo]);
+}
+
+static void pr_ibs_op_data2(union ibs_op_data2 reg)
+{
+ if (zen4_ibs_extensions)
+ return pr_ibs_op_data2_extended(reg);
+ pr_ibs_op_data2_default(reg);
}
static void pr_ibs_op_data3(union ibs_op_data3 reg)
@@ -279,6 +328,9 @@ bool evlist__has_amd_ibs(struct evlist *evlist)
pmu_mapping += strlen(pmu_mapping) + 1 /* '\0' */;
}
+ if (perf_env__find_pmu_cap(env, "ibs_op", "zen4_ibs_extensions"))
+ zen4_ibs_extensions = 1;
+
if (ibs_fetch_type || ibs_op_type) {
if (!cpu_family)
parse_cpuid(env);
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 82cc396ef516..2c6a485c3de5 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1720,6 +1720,7 @@ fallback:
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <linux/btf.h>
+#include <tools/dis-asm-compat.h>
static int symbol__disassemble_bpf(struct symbol *sym,
struct annotate_args *args)
@@ -1762,9 +1763,9 @@ static int symbol__disassemble_bpf(struct symbol *sym,
ret = errno;
goto out;
}
- init_disassemble_info(&info, s,
- (fprintf_ftype) fprintf);
-
+ init_disassemble_info_compat(&info, s,
+ (fprintf_ftype) fprintf,
+ fprintf_styled);
info.arch = bfd_get_arch(bfdf);
info.mach = bfd_get_mach(bfdf);
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
index 5e390a1a79ab..091987dd3966 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
@@ -220,6 +220,7 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
break;
case ARM_SPE_DATA_SOURCE:
+ decoder->record.source = payload;
break;
case ARM_SPE_BAD:
break;
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
index 69b31084d6be..46a61df1145b 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
@@ -29,6 +29,17 @@ enum arm_spe_op_type {
ARM_SPE_ST = 1 << 1,
};
+enum arm_spe_neoverse_data_source {
+ ARM_SPE_NV_L1D = 0x0,
+ ARM_SPE_NV_L2 = 0x8,
+ ARM_SPE_NV_PEER_CORE = 0x9,
+ ARM_SPE_NV_LOCAL_CLUSTER = 0xa,
+ ARM_SPE_NV_SYS_CACHE = 0xb,
+ ARM_SPE_NV_PEER_CLUSTER = 0xc,
+ ARM_SPE_NV_REMOTE = 0xd,
+ ARM_SPE_NV_DRAM = 0xe,
+};
+
struct arm_spe_record {
enum arm_spe_sample_type type;
int err;
@@ -40,6 +51,7 @@ struct arm_spe_record {
u64 virt_addr;
u64 phys_addr;
u64 context_id;
+ u16 source;
};
struct arm_spe_insn;
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index d040406f3314..22dcfe07e886 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -34,6 +34,7 @@
#include "arm-spe-decoder/arm-spe-decoder.h"
#include "arm-spe-decoder/arm-spe-pkt-decoder.h"
+#include "../../arch/arm64/include/asm/cputype.h"
#define MAX_TIMESTAMP (~0ULL)
struct arm_spe {
@@ -45,6 +46,7 @@ struct arm_spe {
struct perf_session *session;
struct machine *machine;
u32 pmu_type;
+ u64 midr;
struct perf_tsc_conversion tc;
@@ -387,35 +389,128 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
return arm_spe_deliver_synth_event(spe, speq, event, &sample);
}
-static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
+static const struct midr_range neoverse_spe[] = {
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ {},
+};
+
+static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
{
- union perf_mem_data_src data_src = { 0 };
+ /*
+ * Even though four levels of cache hierarchy are possible, no known
+ * production Neoverse systems currently include more than three levels
+ * so for the time being we assume three exist. If a production system
+ * is built with four the this function would have to be changed to
+ * detect the number of levels for reporting.
+ */
- if (record->op == ARM_SPE_LD)
- data_src.mem_op = PERF_MEM_OP_LOAD;
- else if (record->op == ARM_SPE_ST)
- data_src.mem_op = PERF_MEM_OP_STORE;
- else
- return 0;
+ /*
+ * We have no data on the hit level or data source for stores in the
+ * Neoverse SPE records.
+ */
+ if (record->op & ARM_SPE_ST) {
+ data_src->mem_lvl = PERF_MEM_LVL_NA;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NA;
+ return;
+ }
+
+ switch (record->source) {
+ case ARM_SPE_NV_L1D:
+ data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ case ARM_SPE_NV_L2:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ case ARM_SPE_NV_PEER_CORE:
+ data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ /*
+ * We don't know if this is L1, L2 but we do know it was a cache-2-cache
+ * transfer, so set SNOOPX_PEER
+ */
+ case ARM_SPE_NV_LOCAL_CLUSTER:
+ case ARM_SPE_NV_PEER_CLUSTER:
+ data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ /*
+ * System cache is assumed to be L3
+ */
+ case ARM_SPE_NV_SYS_CACHE:
+ data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
+ break;
+ /*
+ * We don't know what level it hit in, except it came from the other
+ * socket
+ */
+ case ARM_SPE_NV_REMOTE:
+ data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
+ data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ break;
+ case ARM_SPE_NV_DRAM:
+ data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
+ data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+ data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ break;
+ default:
+ break;
+ }
+}
+static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
+ union perf_mem_data_src *data_src)
+{
if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
- data_src.mem_lvl = PERF_MEM_LVL_L3;
+ data_src->mem_lvl = PERF_MEM_LVL_L3;
if (record->type & ARM_SPE_LLC_MISS)
- data_src.mem_lvl |= PERF_MEM_LVL_MISS;
+ data_src->mem_lvl |= PERF_MEM_LVL_MISS;
else
- data_src.mem_lvl |= PERF_MEM_LVL_HIT;
+ data_src->mem_lvl |= PERF_MEM_LVL_HIT;
} else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
- data_src.mem_lvl = PERF_MEM_LVL_L1;
+ data_src->mem_lvl = PERF_MEM_LVL_L1;
if (record->type & ARM_SPE_L1D_MISS)
- data_src.mem_lvl |= PERF_MEM_LVL_MISS;
+ data_src->mem_lvl |= PERF_MEM_LVL_MISS;
else
- data_src.mem_lvl |= PERF_MEM_LVL_HIT;
+ data_src->mem_lvl |= PERF_MEM_LVL_HIT;
}
if (record->type & ARM_SPE_REMOTE_ACCESS)
- data_src.mem_lvl |= PERF_MEM_LVL_REM_CCE1;
+ data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
+}
+
+static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
+{
+ union perf_mem_data_src data_src = { 0 };
+ bool is_neoverse = is_midr_in_range(midr, neoverse_spe);
+
+ if (record->op == ARM_SPE_LD)
+ data_src.mem_op = PERF_MEM_OP_LOAD;
+ else if (record->op == ARM_SPE_ST)
+ data_src.mem_op = PERF_MEM_OP_STORE;
+ else
+ return 0;
+
+ if (is_neoverse)
+ arm_spe__synth_data_source_neoverse(record, &data_src);
+ else
+ arm_spe__synth_data_source_generic(record, &data_src);
if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
data_src.mem_dtlb = PERF_MEM_TLB_WK;
@@ -436,7 +531,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
u64 data_src;
int err;
- data_src = arm_spe__synth_data_source(record);
+ data_src = arm_spe__synth_data_source(record, spe->midr);
if (spe->sample_flc) {
if (record->type & ARM_SPE_L1D_MISS) {
@@ -1178,6 +1273,8 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX;
struct perf_record_time_conv *tc = &session->time_conv;
+ const char *cpuid = perf_env__cpuid(session->evlist->env);
+ u64 midr = strtol(cpuid, NULL, 16);
struct arm_spe *spe;
int err;
@@ -1197,6 +1294,7 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
spe->machine = &session->machines.host; /* No kvm support */
spe->auxtrace_type = auxtrace_info->type;
spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
+ spe->midr = midr;
spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 511dd3caa1bc..6edab8a16de6 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1189,9 +1189,10 @@ void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
free(buffer);
}
-void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
- int code, int cpu, pid_t pid, pid_t tid, u64 ip,
- const char *msg, u64 timestamp)
+void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
+ int code, int cpu, pid_t pid, pid_t tid, u64 ip,
+ const char *msg, u64 timestamp,
+ pid_t machine_pid, int vcpu)
{
size_t size;
@@ -1207,12 +1208,26 @@ void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int
auxtrace_error->ip = ip;
auxtrace_error->time = timestamp;
strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
-
- size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
- strlen(auxtrace_error->msg) + 1;
+ if (machine_pid) {
+ auxtrace_error->fmt = 2;
+ auxtrace_error->machine_pid = machine_pid;
+ auxtrace_error->vcpu = vcpu;
+ size = sizeof(*auxtrace_error);
+ } else {
+ size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
+ strlen(auxtrace_error->msg) + 1;
+ }
auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
}
+void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
+ int code, int cpu, pid_t pid, pid_t tid, u64 ip,
+ const char *msg, u64 timestamp)
+{
+ auxtrace_synth_guest_error(auxtrace_error, type, code, cpu, pid, tid,
+ ip, msg, timestamp, 0, -1);
+}
+
int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
struct perf_tool *tool,
struct perf_session *session,
@@ -1662,6 +1677,9 @@ size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
if (!e->fmt)
msg = (const char *)&e->time;
+ if (e->fmt >= 2 && e->machine_pid)
+ ret += fprintf(fp, " machine_pid %d vcpu %d", e->machine_pid, e->vcpu);
+
ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
e->cpu, e->pid, e->tid, e->ip, e->code, msg);
return ret;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index cd0d25c2751c..6a4fbfd34c6b 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -595,6 +595,10 @@ int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
bool needs_swap);
void auxtrace_index__free(struct list_head *head);
+void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
+ int code, int cpu, pid_t pid, pid_t tid, u64 ip,
+ const char *msg, u64 timestamp,
+ pid_t machine_pid, int vcpu);
void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
const char *msg, u64 timestamp);
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index d2c9b09ddb48..e2052f4fed33 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -1879,7 +1879,7 @@ struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
return ERR_PTR(-ENOMEM);
- err = parse_events(evlist, event_definition, NULL);
+ err = parse_event(evlist, event_definition);
free(event_definition);
if (err) {
diff --git a/tools/perf/util/bpf_kwork.c b/tools/perf/util/bpf_kwork.c
new file mode 100644
index 000000000000..b629dd679d3f
--- /dev/null
+++ b/tools/perf/util/bpf_kwork.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bpf_kwork.c
+ *
+ * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com>
+ */
+
+#include <time.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <linux/time64.h>
+
+#include "util/debug.h"
+#include "util/kwork.h"
+
+#include <bpf/bpf.h>
+
+#include "util/bpf_skel/kwork_trace.skel.h"
+
+/*
+ * This should be in sync with "util/kwork_trace.bpf.c"
+ */
+#define MAX_KWORKNAME 128
+
+struct work_key {
+ u32 type;
+ u32 cpu;
+ u64 id;
+};
+
+struct report_data {
+ u64 nr;
+ u64 total_time;
+ u64 max_time;
+ u64 max_time_start;
+ u64 max_time_end;
+};
+
+struct kwork_class_bpf {
+ struct kwork_class *class;
+
+ void (*load_prepare)(struct perf_kwork *kwork);
+ int (*get_work_name)(struct work_key *key, char **ret_name);
+};
+
+static struct kwork_trace_bpf *skel;
+
+static struct timespec ts_start;
+static struct timespec ts_end;
+
+void perf_kwork__trace_start(void)
+{
+ clock_gettime(CLOCK_MONOTONIC, &ts_start);
+ skel->bss->enabled = 1;
+}
+
+void perf_kwork__trace_finish(void)
+{
+ clock_gettime(CLOCK_MONOTONIC, &ts_end);
+ skel->bss->enabled = 0;
+}
+
+static int get_work_name_from_map(struct work_key *key, char **ret_name)
+{
+ char name[MAX_KWORKNAME] = { 0 };
+ int fd = bpf_map__fd(skel->maps.perf_kwork_names);
+
+ *ret_name = NULL;
+
+ if (fd < 0) {
+ pr_debug("Invalid names map fd\n");
+ return 0;
+ }
+
+ if ((bpf_map_lookup_elem(fd, key, name) == 0) && (strlen(name) != 0)) {
+ *ret_name = strdup(name);
+ if (*ret_name == NULL) {
+ pr_err("Failed to copy work name\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void irq_load_prepare(struct perf_kwork *kwork)
+{
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ bpf_program__set_autoload(skel->progs.report_irq_handler_entry, true);
+ bpf_program__set_autoload(skel->progs.report_irq_handler_exit, true);
+ }
+}
+
+static struct kwork_class_bpf kwork_irq_bpf = {
+ .load_prepare = irq_load_prepare,
+ .get_work_name = get_work_name_from_map,
+};
+
+static void softirq_load_prepare(struct perf_kwork *kwork)
+{
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ bpf_program__set_autoload(skel->progs.report_softirq_entry, true);
+ bpf_program__set_autoload(skel->progs.report_softirq_exit, true);
+ } else if (kwork->report == KWORK_REPORT_LATENCY) {
+ bpf_program__set_autoload(skel->progs.latency_softirq_raise, true);
+ bpf_program__set_autoload(skel->progs.latency_softirq_entry, true);
+ }
+}
+
+static struct kwork_class_bpf kwork_softirq_bpf = {
+ .load_prepare = softirq_load_prepare,
+ .get_work_name = get_work_name_from_map,
+};
+
+static void workqueue_load_prepare(struct perf_kwork *kwork)
+{
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ bpf_program__set_autoload(skel->progs.report_workqueue_execute_start, true);
+ bpf_program__set_autoload(skel->progs.report_workqueue_execute_end, true);
+ } else if (kwork->report == KWORK_REPORT_LATENCY) {
+ bpf_program__set_autoload(skel->progs.latency_workqueue_activate_work, true);
+ bpf_program__set_autoload(skel->progs.latency_workqueue_execute_start, true);
+ }
+}
+
+static struct kwork_class_bpf kwork_workqueue_bpf = {
+ .load_prepare = workqueue_load_prepare,
+ .get_work_name = get_work_name_from_map,
+};
+
+static struct kwork_class_bpf *
+kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
+ [KWORK_CLASS_IRQ] = &kwork_irq_bpf,
+ [KWORK_CLASS_SOFTIRQ] = &kwork_softirq_bpf,
+ [KWORK_CLASS_WORKQUEUE] = &kwork_workqueue_bpf,
+};
+
+static bool valid_kwork_class_type(enum kwork_class_type type)
+{
+ return type >= 0 && type < KWORK_CLASS_MAX ? true : false;
+}
+
+static int setup_filters(struct perf_kwork *kwork)
+{
+ u8 val = 1;
+ int i, nr_cpus, key, fd;
+ struct perf_cpu_map *map;
+
+ if (kwork->cpu_list != NULL) {
+ fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
+ if (fd < 0) {
+ pr_debug("Invalid cpu filter fd\n");
+ return -1;
+ }
+
+ map = perf_cpu_map__new(kwork->cpu_list);
+ if (map == NULL) {
+ pr_debug("Invalid cpu_list\n");
+ return -1;
+ }
+
+ nr_cpus = libbpf_num_possible_cpus();
+ for (i = 0; i < perf_cpu_map__nr(map); i++) {
+ struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
+
+ if (cpu.cpu >= nr_cpus) {
+ perf_cpu_map__put(map);
+ pr_err("Requested cpu %d too large\n", cpu.cpu);
+ return -1;
+ }
+ bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
+ }
+ perf_cpu_map__put(map);
+
+ skel->bss->has_cpu_filter = 1;
+ }
+
+ if (kwork->profile_name != NULL) {
+ if (strlen(kwork->profile_name) >= MAX_KWORKNAME) {
+ pr_err("Requested name filter %s too large, limit to %d\n",
+ kwork->profile_name, MAX_KWORKNAME - 1);
+ return -1;
+ }
+
+ fd = bpf_map__fd(skel->maps.perf_kwork_name_filter);
+ if (fd < 0) {
+ pr_debug("Invalid name filter fd\n");
+ return -1;
+ }
+
+ key = 0;
+ bpf_map_update_elem(fd, &key, kwork->profile_name, BPF_ANY);
+
+ skel->bss->has_name_filter = 1;
+ }
+
+ return 0;
+}
+
+int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork)
+{
+ struct bpf_program *prog;
+ struct kwork_class *class;
+ struct kwork_class_bpf *class_bpf;
+ enum kwork_class_type type;
+
+ skel = kwork_trace_bpf__open();
+ if (!skel) {
+ pr_debug("Failed to open kwork trace skeleton\n");
+ return -1;
+ }
+
+ /*
+ * set all progs to non-autoload,
+ * then set corresponding progs according to config
+ */
+ bpf_object__for_each_program(prog, skel->obj)
+ bpf_program__set_autoload(prog, false);
+
+ list_for_each_entry(class, &kwork->class_list, list) {
+ type = class->type;
+ if (!valid_kwork_class_type(type) ||
+ (kwork_class_bpf_supported_list[type] == NULL)) {
+ pr_err("Unsupported bpf trace class %s\n", class->name);
+ goto out;
+ }
+
+ class_bpf = kwork_class_bpf_supported_list[type];
+ class_bpf->class = class;
+
+ if (class_bpf->load_prepare != NULL)
+ class_bpf->load_prepare(kwork);
+ }
+
+ if (kwork_trace_bpf__load(skel)) {
+ pr_debug("Failed to load kwork trace skeleton\n");
+ goto out;
+ }
+
+ if (setup_filters(kwork))
+ goto out;
+
+ if (kwork_trace_bpf__attach(skel)) {
+ pr_debug("Failed to attach kwork trace skeleton\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ kwork_trace_bpf__destroy(skel);
+ return -1;
+}
+
+static int add_work(struct perf_kwork *kwork,
+ struct work_key *key,
+ struct report_data *data)
+{
+ struct kwork_work *work;
+ struct kwork_class_bpf *bpf_trace;
+ struct kwork_work tmp = {
+ .id = key->id,
+ .name = NULL,
+ .cpu = key->cpu,
+ };
+ enum kwork_class_type type = key->type;
+
+ if (!valid_kwork_class_type(type)) {
+ pr_debug("Invalid class type %d to add work\n", type);
+ return -1;
+ }
+
+ bpf_trace = kwork_class_bpf_supported_list[type];
+ tmp.class = bpf_trace->class;
+
+ if ((bpf_trace->get_work_name != NULL) &&
+ (bpf_trace->get_work_name(key, &tmp.name)))
+ return -1;
+
+ work = perf_kwork_add_work(kwork, tmp.class, &tmp);
+ if (work == NULL)
+ return -1;
+
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ work->nr_atoms = data->nr;
+ work->total_runtime = data->total_time;
+ work->max_runtime = data->max_time;
+ work->max_runtime_start = data->max_time_start;
+ work->max_runtime_end = data->max_time_end;
+ } else if (kwork->report == KWORK_REPORT_LATENCY) {
+ work->nr_atoms = data->nr;
+ work->total_latency = data->total_time;
+ work->max_latency = data->max_time;
+ work->max_latency_start = data->max_time_start;
+ work->max_latency_end = data->max_time_end;
+ } else {
+ pr_debug("Invalid bpf report type %d\n", kwork->report);
+ return -1;
+ }
+
+ kwork->timestart = (u64)ts_start.tv_sec * NSEC_PER_SEC + ts_start.tv_nsec;
+ kwork->timeend = (u64)ts_end.tv_sec * NSEC_PER_SEC + ts_end.tv_nsec;
+
+ return 0;
+}
+
+int perf_kwork__report_read_bpf(struct perf_kwork *kwork)
+{
+ struct report_data data;
+ struct work_key key = {
+ .type = 0,
+ .cpu = 0,
+ .id = 0,
+ };
+ struct work_key prev = {
+ .type = 0,
+ .cpu = 0,
+ .id = 0,
+ };
+ int fd = bpf_map__fd(skel->maps.perf_kwork_report);
+
+ if (fd < 0) {
+ pr_debug("Invalid report fd\n");
+ return -1;
+ }
+
+ while (!bpf_map_get_next_key(fd, &prev, &key)) {
+ if ((bpf_map_lookup_elem(fd, &key, &data)) != 0) {
+ pr_debug("Failed to lookup report elem\n");
+ return -1;
+ }
+
+ if ((data.nr != 0) && (add_work(kwork, &key, &data) != 0))
+ return -1;
+
+ prev = key;
+ }
+ return 0;
+}
+
+void perf_kwork__report_cleanup_bpf(void)
+{
+ kwork_trace_bpf__destroy(skel);
+}
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
new file mode 100644
index 000000000000..c591a66733ef
--- /dev/null
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/debug.h"
+#include "util/evlist.h"
+#include "util/machine.h"
+#include "util/map.h"
+#include "util/symbol.h"
+#include "util/target.h"
+#include "util/thread_map.h"
+#include "util/lock-contention.h"
+#include <linux/zalloc.h>
+#include <bpf/bpf.h>
+
+#include "bpf_skel/lock_contention.skel.h"
+
+static struct lock_contention_bpf *skel;
+
+/* should be same as bpf_skel/lock_contention.bpf.c */
+struct lock_contention_key {
+ s32 stack_id;
+};
+
+struct lock_contention_data {
+ u64 total_time;
+ u64 min_time;
+ u64 max_time;
+ u32 count;
+ u32 flags;
+};
+
+int lock_contention_prepare(struct lock_contention *con)
+{
+ int i, fd;
+ int ncpus = 1, ntasks = 1;
+ struct evlist *evlist = con->evlist;
+ struct target *target = con->target;
+
+ skel = lock_contention_bpf__open();
+ if (!skel) {
+ pr_err("Failed to open lock-contention BPF skeleton\n");
+ return -1;
+ }
+
+ bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
+ bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
+
+ if (target__has_cpu(target))
+ ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
+ if (target__has_task(target))
+ ntasks = perf_thread_map__nr(evlist->core.threads);
+
+ bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
+ bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
+
+ if (lock_contention_bpf__load(skel) < 0) {
+ pr_err("Failed to load lock-contention BPF skeleton\n");
+ return -1;
+ }
+
+ if (target__has_cpu(target)) {
+ u32 cpu;
+ u8 val = 1;
+
+ skel->bss->has_cpu = 1;
+ fd = bpf_map__fd(skel->maps.cpu_filter);
+
+ for (i = 0; i < ncpus; i++) {
+ cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
+ bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
+ }
+ }
+
+ if (target__has_task(target)) {
+ u32 pid;
+ u8 val = 1;
+
+ skel->bss->has_task = 1;
+ fd = bpf_map__fd(skel->maps.task_filter);
+
+ for (i = 0; i < ntasks; i++) {
+ pid = perf_thread_map__pid(evlist->core.threads, i);
+ bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
+ }
+ }
+
+ if (target__none(target) && evlist->workload.pid > 0) {
+ u32 pid = evlist->workload.pid;
+ u8 val = 1;
+
+ skel->bss->has_task = 1;
+ fd = bpf_map__fd(skel->maps.task_filter);
+ bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
+ }
+
+ lock_contention_bpf__attach(skel);
+ return 0;
+}
+
+int lock_contention_start(void)
+{
+ skel->bss->enabled = 1;
+ return 0;
+}
+
+int lock_contention_stop(void)
+{
+ skel->bss->enabled = 0;
+ return 0;
+}
+
+int lock_contention_read(struct lock_contention *con)
+{
+ int fd, stack;
+ s32 prev_key, key;
+ struct lock_contention_data data;
+ struct lock_stat *st;
+ struct machine *machine = con->machine;
+ u64 stack_trace[CONTENTION_STACK_DEPTH];
+
+ fd = bpf_map__fd(skel->maps.lock_stat);
+ stack = bpf_map__fd(skel->maps.stacks);
+
+ con->lost = skel->bss->lost;
+
+ prev_key = 0;
+ while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
+ struct map *kmap;
+ struct symbol *sym;
+ int idx;
+
+ bpf_map_lookup_elem(fd, &key, &data);
+ st = zalloc(sizeof(*st));
+ if (st == NULL)
+ return -1;
+
+ st->nr_contended = data.count;
+ st->wait_time_total = data.total_time;
+ st->wait_time_max = data.max_time;
+ st->wait_time_min = data.min_time;
+
+ if (data.count)
+ st->avg_wait_time = data.total_time / data.count;
+
+ st->flags = data.flags;
+
+ bpf_map_lookup_elem(stack, &key, stack_trace);
+
+ /* skip BPF + lock internal functions */
+ idx = CONTENTION_STACK_SKIP;
+ while (is_lock_function(machine, stack_trace[idx]) &&
+ idx < CONTENTION_STACK_DEPTH - 1)
+ idx++;
+
+ st->addr = stack_trace[idx];
+ sym = machine__find_kernel_symbol(machine, st->addr, &kmap);
+
+ if (sym) {
+ unsigned long offset;
+ int ret = 0;
+
+ offset = kmap->map_ip(kmap, st->addr) - sym->start;
+
+ if (offset)
+ ret = asprintf(&st->name, "%s+%#lx", sym->name, offset);
+ else
+ st->name = strdup(sym->name);
+
+ if (ret < 0 || st->name == NULL)
+ return -1;
+ } else if (asprintf(&st->name, "%#lx", (unsigned long)st->addr) < 0) {
+ free(st);
+ return -1;
+ }
+
+ hlist_add_head(&st->hash_entry, con->result);
+ prev_key = key;
+ }
+
+ return 0;
+}
+
+int lock_contention_finish(void)
+{
+ if (skel) {
+ skel->bss->enabled = 0;
+ lock_contention_bpf__destroy(skel);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
index f289b7713598..c257813e674e 100644
--- a/tools/perf/util/bpf_off_cpu.c
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -11,11 +11,13 @@
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/cgroup.h"
+#include "util/strlist.h"
#include <bpf/bpf.h>
#include "bpf_skel/off_cpu.skel.h"
#define MAX_STACKS 32
+#define MAX_PROC 4096
/* we don't need actual timestamp, just want to put the samples at last */
#define OFF_CPU_TIMESTAMP (~0ull << 32)
@@ -78,6 +80,7 @@ static void off_cpu_start(void *arg)
u8 val = 1;
skel->bss->has_task = 1;
+ skel->bss->uses_tgid = 1;
fd = bpf_map__fd(skel->maps.task_filter);
pid = perf_thread_map__pid(evlist->core.threads, 0);
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
@@ -124,6 +127,8 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
{
int err, fd, i;
int ncpus = 1, ntasks = 1, ncgrps = 1;
+ struct strlist *pid_slist = NULL;
+ struct str_node *pos;
if (off_cpu_config(evlist) < 0) {
pr_err("Failed to config off-cpu BPF event\n");
@@ -142,9 +147,34 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
}
- if (target__has_task(target)) {
+ if (target->pid) {
+ pid_slist = strlist__new(target->pid, NULL);
+ if (!pid_slist) {
+ pr_err("Failed to create a strlist for pid\n");
+ return -1;
+ }
+
+ ntasks = 0;
+ strlist__for_each_entry(pos, pid_slist) {
+ char *end_ptr;
+ int pid = strtol(pos->s, &end_ptr, 10);
+
+ if (pid == INT_MIN || pid == INT_MAX ||
+ (*end_ptr != '\0' && *end_ptr != ','))
+ continue;
+
+ ntasks++;
+ }
+
+ if (ntasks < MAX_PROC)
+ ntasks = MAX_PROC;
+
+ bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
+ } else if (target__has_task(target)) {
ntasks = perf_thread_map__nr(evlist->core.threads);
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
+ } else if (target__none(target)) {
+ bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC);
}
if (evlist__first(evlist)->cgrp) {
@@ -184,7 +214,26 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
}
}
- if (target__has_task(target)) {
+ if (target->pid) {
+ u8 val = 1;
+
+ skel->bss->has_task = 1;
+ skel->bss->uses_tgid = 1;
+ fd = bpf_map__fd(skel->maps.task_filter);
+
+ strlist__for_each_entry(pos, pid_slist) {
+ char *end_ptr;
+ u32 tgid;
+ int pid = strtol(pos->s, &end_ptr, 10);
+
+ if (pid == INT_MIN || pid == INT_MAX ||
+ (*end_ptr != '\0' && *end_ptr != ','))
+ continue;
+
+ tgid = pid;
+ bpf_map_update_elem(fd, &tgid, &val, BPF_ANY);
+ }
+ } else if (target__has_task(target)) {
u32 pid;
u8 val = 1;
diff --git a/tools/perf/util/bpf_skel/kwork_trace.bpf.c b/tools/perf/util/bpf_skel/kwork_trace.bpf.c
new file mode 100644
index 000000000000..063c124e0999
--- /dev/null
+++ b/tools/perf/util/bpf_skel/kwork_trace.bpf.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2022, Huawei
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define KWORK_COUNT 100
+#define MAX_KWORKNAME 128
+
+/*
+ * This should be in sync with "util/kwork.h"
+ */
+enum kwork_class_type {
+ KWORK_CLASS_IRQ,
+ KWORK_CLASS_SOFTIRQ,
+ KWORK_CLASS_WORKQUEUE,
+ KWORK_CLASS_MAX,
+};
+
+struct work_key {
+ __u32 type;
+ __u32 cpu;
+ __u64 id;
+};
+
+struct report_data {
+ __u64 nr;
+ __u64 total_time;
+ __u64 max_time;
+ __u64 max_time_start;
+ __u64 max_time_end;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct work_key));
+ __uint(value_size, MAX_KWORKNAME);
+ __uint(max_entries, KWORK_COUNT);
+} perf_kwork_names SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct work_key));
+ __uint(value_size, sizeof(__u64));
+ __uint(max_entries, KWORK_COUNT);
+} perf_kwork_time SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct work_key));
+ __uint(value_size, sizeof(struct report_data));
+ __uint(max_entries, KWORK_COUNT);
+} perf_kwork_report SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} perf_kwork_cpu_filter SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, MAX_KWORKNAME);
+ __uint(max_entries, 1);
+} perf_kwork_name_filter SEC(".maps");
+
+int enabled = 0;
+int has_cpu_filter = 0;
+int has_name_filter = 0;
+
+static __always_inline int local_strncmp(const char *s1,
+ unsigned int sz, const char *s2)
+{
+ int ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < sz; i++) {
+ ret = (unsigned char)s1[i] - (unsigned char)s2[i];
+ if (ret || !s1[i] || !s2[i])
+ break;
+ }
+
+ return ret;
+}
+
+static __always_inline int trace_event_match(struct work_key *key, char *name)
+{
+ __u8 *cpu_val;
+ char *name_val;
+ __u32 zero = 0;
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (!enabled)
+ return 0;
+
+ if (has_cpu_filter) {
+ cpu_val = bpf_map_lookup_elem(&perf_kwork_cpu_filter, &cpu);
+ if (!cpu_val)
+ return 0;
+ }
+
+ if (has_name_filter && (name != NULL)) {
+ name_val = bpf_map_lookup_elem(&perf_kwork_name_filter, &zero);
+ if (name_val &&
+ (local_strncmp(name_val, MAX_KWORKNAME, name) != 0)) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static __always_inline void do_update_time(void *map, struct work_key *key,
+ __u64 time_start, __u64 time_end)
+{
+ struct report_data zero, *data;
+ __s64 delta = time_end - time_start;
+
+ if (delta < 0)
+ return;
+
+ data = bpf_map_lookup_elem(map, key);
+ if (!data) {
+ __builtin_memset(&zero, 0, sizeof(zero));
+ bpf_map_update_elem(map, key, &zero, BPF_NOEXIST);
+ data = bpf_map_lookup_elem(map, key);
+ if (!data)
+ return;
+ }
+
+ if ((delta > data->max_time) ||
+ (data->max_time == 0)) {
+ data->max_time = delta;
+ data->max_time_start = time_start;
+ data->max_time_end = time_end;
+ }
+
+ data->total_time += delta;
+ data->nr++;
+}
+
+static __always_inline void do_update_timestart(void *map, struct work_key *key)
+{
+ __u64 ts = bpf_ktime_get_ns();
+
+ bpf_map_update_elem(map, key, &ts, BPF_ANY);
+}
+
+static __always_inline void do_update_timeend(void *report_map, void *time_map,
+ struct work_key *key)
+{
+ __u64 *time = bpf_map_lookup_elem(time_map, key);
+
+ if (time) {
+ bpf_map_delete_elem(time_map, key);
+ do_update_time(report_map, key, *time, bpf_ktime_get_ns());
+ }
+}
+
+static __always_inline void do_update_name(void *map,
+ struct work_key *key, char *name)
+{
+ if (!bpf_map_lookup_elem(map, key))
+ bpf_map_update_elem(map, key, name, BPF_ANY);
+}
+
+static __always_inline int update_timestart(void *map, struct work_key *key)
+{
+ if (!trace_event_match(key, NULL))
+ return 0;
+
+ do_update_timestart(map, key);
+ return 0;
+}
+
+static __always_inline int update_timestart_and_name(void *time_map,
+ void *names_map,
+ struct work_key *key,
+ char *name)
+{
+ if (!trace_event_match(key, name))
+ return 0;
+
+ do_update_timestart(time_map, key);
+ do_update_name(names_map, key, name);
+
+ return 0;
+}
+
+static __always_inline int update_timeend(void *report_map,
+ void *time_map, struct work_key *key)
+{
+ if (!trace_event_match(key, NULL))
+ return 0;
+
+ do_update_timeend(report_map, time_map, key);
+
+ return 0;
+}
+
+static __always_inline int update_timeend_and_name(void *report_map,
+ void *time_map,
+ void *names_map,
+ struct work_key *key,
+ char *name)
+{
+ if (!trace_event_match(key, name))
+ return 0;
+
+ do_update_timeend(report_map, time_map, key);
+ do_update_name(names_map, key, name);
+
+ return 0;
+}
+
+SEC("tracepoint/irq/irq_handler_entry")
+int report_irq_handler_entry(struct trace_event_raw_irq_handler_entry *ctx)
+{
+ char name[MAX_KWORKNAME];
+ struct work_key key = {
+ .type = KWORK_CLASS_IRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->irq,
+ };
+ void *name_addr = (void *)ctx + (ctx->__data_loc_name & 0xffff);
+
+ bpf_probe_read_kernel_str(name, sizeof(name), name_addr);
+
+ return update_timestart_and_name(&perf_kwork_time,
+ &perf_kwork_names, &key, name);
+}
+
+SEC("tracepoint/irq/irq_handler_exit")
+int report_irq_handler_exit(struct trace_event_raw_irq_handler_exit *ctx)
+{
+ struct work_key key = {
+ .type = KWORK_CLASS_IRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->irq,
+ };
+
+ return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
+}
+
+static char softirq_name_list[NR_SOFTIRQS][MAX_KWORKNAME] = {
+ { "HI" },
+ { "TIMER" },
+ { "NET_TX" },
+ { "NET_RX" },
+ { "BLOCK" },
+ { "IRQ_POLL" },
+ { "TASKLET" },
+ { "SCHED" },
+ { "HRTIMER" },
+ { "RCU" },
+};
+
+SEC("tracepoint/irq/softirq_entry")
+int report_softirq_entry(struct trace_event_raw_softirq *ctx)
+{
+ unsigned int vec = ctx->vec;
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)vec,
+ };
+
+ if (vec < NR_SOFTIRQS) {
+ return update_timestart_and_name(&perf_kwork_time,
+ &perf_kwork_names, &key,
+ softirq_name_list[vec]);
+ }
+
+ return 0;
+}
+
+SEC("tracepoint/irq/softirq_exit")
+int report_softirq_exit(struct trace_event_raw_softirq *ctx)
+{
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->vec,
+ };
+
+ return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
+}
+
+SEC("tracepoint/irq/softirq_raise")
+int latency_softirq_raise(struct trace_event_raw_softirq *ctx)
+{
+ unsigned int vec = ctx->vec;
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)vec,
+ };
+
+ if (vec < NR_SOFTIRQS) {
+ return update_timestart_and_name(&perf_kwork_time,
+ &perf_kwork_names, &key,
+ softirq_name_list[vec]);
+ }
+
+ return 0;
+}
+
+SEC("tracepoint/irq/softirq_entry")
+int latency_softirq_entry(struct trace_event_raw_softirq *ctx)
+{
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->vec,
+ };
+
+ return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
+}
+
+SEC("tracepoint/workqueue/workqueue_execute_start")
+int report_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start *ctx)
+{
+ struct work_key key = {
+ .type = KWORK_CLASS_WORKQUEUE,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->work,
+ };
+
+ return update_timestart(&perf_kwork_time, &key);
+}
+
+SEC("tracepoint/workqueue/workqueue_execute_end")
+int report_workqueue_execute_end(struct trace_event_raw_workqueue_execute_end *ctx)
+{
+ char name[MAX_KWORKNAME];
+ struct work_key key = {
+ .type = KWORK_CLASS_WORKQUEUE,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->work,
+ };
+ unsigned long long func_addr = (unsigned long long)ctx->function;
+
+ __builtin_memset(name, 0, sizeof(name));
+ bpf_snprintf(name, sizeof(name), "%ps", &func_addr, sizeof(func_addr));
+
+ return update_timeend_and_name(&perf_kwork_report, &perf_kwork_time,
+ &perf_kwork_names, &key, name);
+}
+
+SEC("tracepoint/workqueue/workqueue_activate_work")
+int latency_workqueue_activate_work(struct trace_event_raw_workqueue_activate_work *ctx)
+{
+ struct work_key key = {
+ .type = KWORK_CLASS_WORKQUEUE,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->work,
+ };
+
+ return update_timestart(&perf_kwork_time, &key);
+}
+
+SEC("tracepoint/workqueue/workqueue_execute_start")
+int latency_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start *ctx)
+{
+ char name[MAX_KWORKNAME];
+ struct work_key key = {
+ .type = KWORK_CLASS_WORKQUEUE,
+ .cpu = bpf_get_smp_processor_id(),
+ .id = (__u64)ctx->work,
+ };
+ unsigned long long func_addr = (unsigned long long)ctx->function;
+
+ __builtin_memset(name, 0, sizeof(name));
+ bpf_snprintf(name, sizeof(name), "%ps", &func_addr, sizeof(func_addr));
+
+ return update_timeend_and_name(&perf_kwork_report, &perf_kwork_time,
+ &perf_kwork_names, &key, name);
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
new file mode 100644
index 000000000000..9e8b94eb6320
--- /dev/null
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2022 Google
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+/* maximum stack trace depth */
+#define MAX_STACKS 8
+
+/* default buffer size */
+#define MAX_ENTRIES 10240
+
+struct contention_key {
+ __s32 stack_id;
+};
+
+struct contention_data {
+ __u64 total_time;
+ __u64 min_time;
+ __u64 max_time;
+ __u32 count;
+ __u32 flags;
+};
+
+struct tstamp_data {
+ __u64 timestamp;
+ __u64 lock;
+ __u32 flags;
+ __s32 stack_id;
+};
+
+/* callstack storage */
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, MAX_STACKS * sizeof(__u64));
+ __uint(max_entries, MAX_ENTRIES);
+} stacks SEC(".maps");
+
+/* maintain timestamp at the beginning of contention */
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct tstamp_data);
+} tstamp SEC(".maps");
+
+/* actual lock contention statistics */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct contention_key));
+ __uint(value_size, sizeof(struct contention_data));
+ __uint(max_entries, MAX_ENTRIES);
+} lock_stat SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} cpu_filter SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} task_filter SEC(".maps");
+
+/* control flags */
+int enabled;
+int has_cpu;
+int has_task;
+
+/* error stat */
+unsigned long lost;
+
+static inline int can_record(void)
+{
+ if (has_cpu) {
+ __u32 cpu = bpf_get_smp_processor_id();
+ __u8 *ok;
+
+ ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
+ if (!ok)
+ return 0;
+ }
+
+ if (has_task) {
+ __u8 *ok;
+ __u32 pid = bpf_get_current_pid_tgid();
+
+ ok = bpf_map_lookup_elem(&task_filter, &pid);
+ if (!ok)
+ return 0;
+ }
+
+ return 1;
+}
+
+SEC("tp_btf/contention_begin")
+int contention_begin(u64 *ctx)
+{
+ struct task_struct *curr;
+ struct tstamp_data *pelem;
+
+ if (!enabled || !can_record())
+ return 0;
+
+ curr = bpf_get_current_task_btf();
+ pelem = bpf_task_storage_get(&tstamp, curr, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!pelem || pelem->lock)
+ return 0;
+
+ pelem->timestamp = bpf_ktime_get_ns();
+ pelem->lock = (__u64)ctx[0];
+ pelem->flags = (__u32)ctx[1];
+ pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP);
+
+ if (pelem->stack_id < 0)
+ lost++;
+ return 0;
+}
+
+SEC("tp_btf/contention_end")
+int contention_end(u64 *ctx)
+{
+ struct task_struct *curr;
+ struct tstamp_data *pelem;
+ struct contention_key key;
+ struct contention_data *data;
+ __u64 duration;
+
+ if (!enabled)
+ return 0;
+
+ curr = bpf_get_current_task_btf();
+ pelem = bpf_task_storage_get(&tstamp, curr, NULL, 0);
+ if (!pelem || pelem->lock != ctx[0])
+ return 0;
+
+ duration = bpf_ktime_get_ns() - pelem->timestamp;
+
+ key.stack_id = pelem->stack_id;
+ data = bpf_map_lookup_elem(&lock_stat, &key);
+ if (!data) {
+ struct contention_data first = {
+ .total_time = duration,
+ .max_time = duration,
+ .min_time = duration,
+ .count = 1,
+ .flags = pelem->flags,
+ };
+
+ bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
+ pelem->lock = 0;
+ return 0;
+ }
+
+ __sync_fetch_and_add(&data->total_time, duration);
+ __sync_fetch_and_add(&data->count, 1);
+
+ /* FIXME: need atomic operations */
+ if (data->max_time < duration)
+ data->max_time = duration;
+ if (data->min_time > duration)
+ data->min_time = duration;
+
+ pelem->lock = 0;
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
index cc6d7fd55118..c4ba2bcf179f 100644
--- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -12,6 +12,9 @@
#define TASK_INTERRUPTIBLE 0x0001
#define TASK_UNINTERRUPTIBLE 0x0002
+/* create a new thread */
+#define CLONE_THREAD 0x10000
+
#define MAX_STACKS 32
#define MAX_ENTRIES 102400
@@ -85,6 +88,7 @@ int enabled = 0;
int has_cpu = 0;
int has_task = 0;
int has_cgroup = 0;
+int uses_tgid = 0;
const volatile bool has_prev_state = false;
const volatile bool needs_cgroup = false;
@@ -144,7 +148,12 @@ static inline int can_record(struct task_struct *t, int state)
if (has_task) {
__u8 *ok;
- __u32 pid = t->pid;
+ __u32 pid;
+
+ if (uses_tgid)
+ pid = t->tgid;
+ else
+ pid = t->pid;
ok = bpf_map_lookup_elem(&task_filter, &pid);
if (!ok)
@@ -214,6 +223,33 @@ next:
return 0;
}
+SEC("tp_btf/task_newtask")
+int on_newtask(u64 *ctx)
+{
+ struct task_struct *task;
+ u64 clone_flags;
+ u32 pid;
+ u8 val = 1;
+
+ if (!uses_tgid)
+ return 0;
+
+ task = (struct task_struct *)bpf_get_current_task();
+
+ pid = BPF_CORE_READ(task, tgid);
+ if (!bpf_map_lookup_elem(&task_filter, &pid))
+ return 0;
+
+ task = (struct task_struct *)ctx[0];
+ clone_flags = ctx[1];
+
+ pid = task->tgid;
+ if (!(clone_flags & CLONE_THREAD))
+ bpf_map_update_elem(&task_filter, &pid, &val, BPF_NOEXIST);
+
+ return 0;
+}
+
SEC("tp_btf/sched_switch")
int on_switch(u64 *ctx)
{
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 328668f38c69..ec18ed5caf3e 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -300,12 +300,6 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size,
return __dso__build_id_filename(dso, bf, size, is_debug, is_kallsyms);
}
-#define dsos__for_each_with_build_id(pos, head) \
- list_for_each_entry(pos, head, node) \
- if (!pos->has_build_id) \
- continue; \
- else
-
static int write_buildid(const char *name, size_t name_len, struct build_id *bid,
pid_t pid, u16 misc, struct feat_fd *fd)
{
@@ -567,14 +561,11 @@ char *build_id_cache__cachedir(const char *sbuild_id, const char *name,
char *realname = (char *)name, *filename;
bool slash = is_kallsyms || is_vdso;
- if (!slash) {
+ if (!slash)
realname = nsinfo__realpath(name, nsi);
- if (!realname)
- return NULL;
- }
if (asprintf(&filename, "%s%s%s%s%s", buildid_dir, slash ? "/" : "",
- is_vdso ? DSO__NAME_VDSO : realname,
+ is_vdso ? DSO__NAME_VDSO : (realname ? realname : name),
sbuild_id ? "/" : "", sbuild_id ?: "") < 0)
filename = NULL;
@@ -631,9 +622,12 @@ static int build_id_cache__add_sdt_cache(const char *sbuild_id,
#endif
static char *build_id_cache__find_debug(const char *sbuild_id,
- struct nsinfo *nsi)
+ struct nsinfo *nsi,
+ const char *root_dir)
{
+ const char *dirname = "/usr/lib/debug/.build-id/";
char *realname = NULL;
+ char dirbuf[PATH_MAX];
char *debugfile;
struct nscookie nsc;
size_t len = 0;
@@ -642,8 +636,12 @@ static char *build_id_cache__find_debug(const char *sbuild_id,
if (!debugfile)
goto out;
- len = __symbol__join_symfs(debugfile, PATH_MAX,
- "/usr/lib/debug/.build-id/");
+ if (root_dir) {
+ path__join(dirbuf, PATH_MAX, root_dir, dirname);
+ dirname = dirbuf;
+ }
+
+ len = __symbol__join_symfs(debugfile, PATH_MAX, dirname);
snprintf(debugfile + len, PATH_MAX - len, "%.2s/%s.debug", sbuild_id,
sbuild_id + 2);
@@ -654,17 +652,21 @@ static char *build_id_cache__find_debug(const char *sbuild_id,
nsinfo__mountns_exit(&nsc);
#ifdef HAVE_DEBUGINFOD_SUPPORT
- if (realname == NULL) {
- debuginfod_client* c = debuginfod_begin();
- if (c != NULL) {
- int fd = debuginfod_find_debuginfo(c,
- (const unsigned char*)sbuild_id, 0,
- &realname);
- if (fd >= 0)
- close(fd); /* retaining reference by realname */
- debuginfod_end(c);
- }
- }
+ if (realname == NULL) {
+ debuginfod_client* c;
+
+ pr_debug("Downloading debug info with build id %s\n", sbuild_id);
+
+ c = debuginfod_begin();
+ if (c != NULL) {
+ int fd = debuginfod_find_debuginfo(c,
+ (const unsigned char*)sbuild_id, 0,
+ &realname);
+ if (fd >= 0)
+ close(fd); /* retaining reference by realname */
+ debuginfod_end(c);
+ }
+ }
#endif
out:
@@ -674,14 +676,18 @@ out:
int
build_id_cache__add(const char *sbuild_id, const char *name, const char *realname,
- struct nsinfo *nsi, bool is_kallsyms, bool is_vdso)
+ struct nsinfo *nsi, bool is_kallsyms, bool is_vdso,
+ const char *proper_name, const char *root_dir)
{
const size_t size = PATH_MAX;
char *filename = NULL, *dir_name = NULL, *linkname = zalloc(size), *tmp;
char *debugfile = NULL;
int err = -1;
- dir_name = build_id_cache__cachedir(sbuild_id, name, nsi, is_kallsyms,
+ if (!proper_name)
+ proper_name = name;
+
+ dir_name = build_id_cache__cachedir(sbuild_id, proper_name, nsi, is_kallsyms,
is_vdso);
if (!dir_name)
goto out_free;
@@ -721,7 +727,7 @@ build_id_cache__add(const char *sbuild_id, const char *name, const char *realnam
*/
if (!is_kallsyms && !is_vdso &&
strncmp(".ko", name + strlen(name) - 3, 3)) {
- debugfile = build_id_cache__find_debug(sbuild_id, nsi);
+ debugfile = build_id_cache__find_debug(sbuild_id, nsi, root_dir);
if (debugfile) {
zfree(&filename);
if (asprintf(&filename, "%s/%s", dir_name,
@@ -787,8 +793,9 @@ out_free:
return err;
}
-int build_id_cache__add_s(const char *sbuild_id, const char *name,
- struct nsinfo *nsi, bool is_kallsyms, bool is_vdso)
+int __build_id_cache__add_s(const char *sbuild_id, const char *name,
+ struct nsinfo *nsi, bool is_kallsyms, bool is_vdso,
+ const char *proper_name, const char *root_dir)
{
char *realname = NULL;
int err = -1;
@@ -802,8 +809,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name,
goto out_free;
}
- err = build_id_cache__add(sbuild_id, name, realname, nsi, is_kallsyms, is_vdso);
-
+ err = build_id_cache__add(sbuild_id, name, realname, nsi,
+ is_kallsyms, is_vdso, proper_name, root_dir);
out_free:
if (!is_kallsyms)
free(realname);
@@ -812,14 +819,16 @@ out_free:
static int build_id_cache__add_b(const struct build_id *bid,
const char *name, struct nsinfo *nsi,
- bool is_kallsyms, bool is_vdso)
+ bool is_kallsyms, bool is_vdso,
+ const char *proper_name,
+ const char *root_dir)
{
char sbuild_id[SBUILD_ID_SIZE];
build_id__sprintf(bid, sbuild_id);
- return build_id_cache__add_s(sbuild_id, name, nsi, is_kallsyms,
- is_vdso);
+ return __build_id_cache__add_s(sbuild_id, name, nsi, is_kallsyms,
+ is_vdso, proper_name, root_dir);
}
bool build_id_cache__cached(const char *sbuild_id)
@@ -902,6 +911,10 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
bool is_kallsyms = dso__is_kallsyms(dso);
bool is_vdso = dso__is_vdso(dso);
const char *name = dso->long_name;
+ const char *proper_name = NULL;
+ const char *root_dir = NULL;
+ char *allocated_name = NULL;
+ int ret = 0;
if (!dso->has_build_id)
return 0;
@@ -911,11 +924,28 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
name = machine->mmap_name;
}
+ if (!machine__is_host(machine)) {
+ if (*machine->root_dir) {
+ root_dir = machine->root_dir;
+ ret = asprintf(&allocated_name, "%s/%s", root_dir, name);
+ if (ret < 0)
+ return ret;
+ proper_name = name;
+ name = allocated_name;
+ } else if (is_kallsyms) {
+ /* Cannot get guest kallsyms */
+ return 0;
+ }
+ }
+
if (!is_kallsyms && dso__build_id_mismatch(dso, name))
- return 0;
+ goto out_free;
- return build_id_cache__add_b(&dso->bid, name, dso->nsinfo,
- is_kallsyms, is_vdso);
+ ret = build_id_cache__add_b(&dso->bid, name, dso->nsinfo,
+ is_kallsyms, is_vdso, proper_name, root_dir);
+out_free:
+ free(allocated_name);
+ return ret;
}
static int
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index c19617151670..4e3a1169379b 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -66,10 +66,18 @@ int build_id_cache__list_build_ids(const char *pathname, struct nsinfo *nsi,
struct strlist **result);
bool build_id_cache__cached(const char *sbuild_id);
int build_id_cache__add(const char *sbuild_id, const char *name, const char *realname,
- struct nsinfo *nsi, bool is_kallsyms, bool is_vdso);
-int build_id_cache__add_s(const char *sbuild_id,
- const char *name, struct nsinfo *nsi,
- bool is_kallsyms, bool is_vdso);
+ struct nsinfo *nsi, bool is_kallsyms, bool is_vdso,
+ const char *proper_name, const char *root_dir);
+int __build_id_cache__add_s(const char *sbuild_id,
+ const char *name, struct nsinfo *nsi,
+ bool is_kallsyms, bool is_vdso,
+ const char *proper_name, const char *root_dir);
+static inline int build_id_cache__add_s(const char *sbuild_id,
+ const char *name, struct nsinfo *nsi,
+ bool is_kallsyms, bool is_vdso)
+{
+ return __build_id_cache__add_s(sbuild_id, name, nsi, is_kallsyms, is_vdso, NULL, NULL);
+}
int build_id_cache__remove_s(const char *sbuild_id);
extern char buildid_dir[];
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 5c27a4b2e7a7..7e663673f79f 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -31,6 +31,7 @@
#include "callchain.h"
#include "branch.h"
#include "symbol.h"
+#include "util.h"
#include "../perf.h"
#define CALLCHAIN_PARAM_DEFAULT \
@@ -266,12 +267,17 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
do {
/* Framepointer style */
if (!strncmp(name, "fp", sizeof("fp"))) {
- if (!strtok_r(NULL, ",", &saveptr)) {
- param->record_mode = CALLCHAIN_FP;
- ret = 0;
- } else
- pr_err("callchain: No more arguments "
- "needed for --call-graph fp\n");
+ ret = 0;
+ param->record_mode = CALLCHAIN_FP;
+
+ tok = strtok_r(NULL, ",", &saveptr);
+ if (tok) {
+ unsigned long size;
+
+ size = strtoul(tok, &name, 0);
+ if (size < (unsigned) sysctl__max_stack())
+ param->max_stack = size;
+ }
break;
/* Dwarf style */
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 12b2243222b0..ae43fb88f444 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -22,54 +22,102 @@ static int max_node_num;
*/
static int *cpunode_map;
-static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
+bool perf_record_cpu_map_data__test_bit(int i,
+ const struct perf_record_cpu_map_data *data)
+{
+ int bit_word32 = i / 32;
+ __u32 bit_mask32 = 1U << (i & 31);
+ int bit_word64 = i / 64;
+ __u64 bit_mask64 = ((__u64)1) << (i & 63);
+
+ return (data->mask32_data.long_size == 4)
+ ? (bit_word32 < data->mask32_data.nr) &&
+ (data->mask32_data.mask[bit_word32] & bit_mask32) != 0
+ : (bit_word64 < data->mask64_data.nr) &&
+ (data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
+}
+
+/* Read ith mask value from data into the given 64-bit sized bitmap */
+static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
+ int i, unsigned long *bitmap)
+{
+#if __SIZEOF_LONG__ == 8
+ if (data->mask32_data.long_size == 4)
+ bitmap[0] = data->mask32_data.mask[i];
+ else
+ bitmap[0] = data->mask64_data.mask[i];
+#else
+ if (data->mask32_data.long_size == 4) {
+ bitmap[0] = data->mask32_data.mask[i];
+ bitmap[1] = 0;
+ } else {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
+ bitmap[1] = (unsigned long)data->mask64_data.mask[i];
+#else
+ bitmap[0] = (unsigned long)data->mask64_data.mask[i];
+ bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
+#endif
+ }
+#endif
+}
+static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
{
struct perf_cpu_map *map;
- map = perf_cpu_map__empty_new(cpus->nr);
+ map = perf_cpu_map__empty_new(data->cpus_data.nr);
if (map) {
unsigned i;
- for (i = 0; i < cpus->nr; i++) {
+ for (i = 0; i < data->cpus_data.nr; i++) {
/*
* Special treatment for -1, which is not real cpu number,
* and we need to use (int) -1 to initialize map[i],
* otherwise it would become 65535.
*/
- if (cpus->cpu[i] == (u16) -1)
+ if (data->cpus_data.cpu[i] == (u16) -1)
map->map[i].cpu = -1;
else
- map->map[i].cpu = (int) cpus->cpu[i];
+ map->map[i].cpu = (int) data->cpus_data.cpu[i];
}
}
return map;
}
-static struct perf_cpu_map *cpu_map__from_mask(struct perf_record_record_cpu_map *mask)
+static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
{
+ DECLARE_BITMAP(local_copy, 64);
+ int weight = 0, mask_nr = data->mask32_data.nr;
struct perf_cpu_map *map;
- int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
- nr = bitmap_weight(mask->mask, nbits);
+ for (int i = 0; i < mask_nr; i++) {
+ perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
+ weight += bitmap_weight(local_copy, 64);
+ }
+
+ map = perf_cpu_map__empty_new(weight);
+ if (!map)
+ return NULL;
- map = perf_cpu_map__empty_new(nr);
- if (map) {
- int cpu, i = 0;
+ for (int i = 0, j = 0; i < mask_nr; i++) {
+ int cpus_per_i = (i * data->mask32_data.long_size * BITS_PER_BYTE);
+ int cpu;
- for_each_set_bit(cpu, mask->mask, nbits)
- map->map[i++].cpu = cpu;
+ perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
+ for_each_set_bit(cpu, local_copy, 64)
+ map->map[j++].cpu = cpu + cpus_per_i;
}
return map;
}
-struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data)
+struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
{
if (data->type == PERF_CPU_MAP__CPUS)
- return cpu_map__from_entries((struct cpu_map_entries *)data->data);
+ return cpu_map__from_entries(data);
else
- return cpu_map__from_mask((struct perf_record_record_cpu_map *)data->data);
+ return cpu_map__from_mask(data);
}
size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 703ae6d3386e..fa8a5acdcae1 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -37,9 +37,11 @@ struct cpu_aggr_map {
struct perf_record_cpu_map_data;
+bool perf_record_cpu_map_data__test_bit(int i, const struct perf_record_cpu_map_data *data);
+
struct perf_cpu_map *perf_cpu_map__empty_new(int nr);
-struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data);
+struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data);
size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size);
size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size);
size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp);
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 8b95fb3c4d7b..16db965ac995 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -1451,7 +1451,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq,
* tidq->packet->instr_count represents the number of
* instructions in the current etm packet.
*
- * Period instructions (Pi) contains the the number of
+ * Period instructions (Pi) contains the number of
* instructions executed after the sample point(n) from the
* previous etm packet. This will always be less than
* etm->instructions_sample_period.
diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c
index f1ab6edba446..613d6ae82663 100644
--- a/tools/perf/util/data-convert-json.c
+++ b/tools/perf/util/data-convert-json.c
@@ -149,6 +149,7 @@ static int process_sample_event(struct perf_tool *tool,
struct convert_json *c = container_of(tool, struct convert_json, tool);
FILE *out = c->out;
struct addr_location al, tal;
+ u64 sample_type = __evlist__combined_sample_type(evsel->evlist);
u8 cpumode = PERF_RECORD_MISC_USER;
if (machine__resolve(machine, &al, sample) < 0) {
@@ -168,7 +169,9 @@ static int process_sample_event(struct perf_tool *tool,
output_json_key_format(out, true, 3, "pid", "%i", al.thread->pid_);
output_json_key_format(out, true, 3, "tid", "%i", al.thread->tid);
- if (al.thread->cpu >= 0)
+ if ((sample_type & PERF_SAMPLE_CPU))
+ output_json_key_format(out, true, 3, "cpu", "%i", sample->cpu);
+ else if (al.thread->cpu >= 0)
output_json_key_format(out, true, 3, "cpu", "%i", al.thread->cpu);
output_json_key_string(out, true, 3, "comm", thread__comm_str(al.thread));
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index caabeac24c69..a7f68c309545 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
+#include <linux/err.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
@@ -481,16 +482,21 @@ int perf_data__make_kcore_dir(struct perf_data *data, char *buf, size_t buf_sz)
bool has_kcore_dir(const char *path)
{
- char *kcore_dir;
- int ret;
-
- if (asprintf(&kcore_dir, "%s/kcore_dir", path) < 0)
- return false;
-
- ret = access(kcore_dir, F_OK);
+ struct dirent *d = ERR_PTR(-EINVAL);
+ const char *name = "kcore_dir";
+ DIR *dir = opendir(path);
+ size_t n = strlen(name);
+ bool result = false;
+
+ if (dir) {
+ while (d && !result) {
+ d = readdir(dir);
+ result = d ? strncmp(d->d_name, name, n) : false;
+ }
+ closedir(dir);
+ }
- free(kcore_dir);
- return !ret;
+ return result;
}
char *perf_data__kallsyms_name(struct perf_data *data)
@@ -512,6 +518,25 @@ char *perf_data__kallsyms_name(struct perf_data *data)
return kallsyms_name;
}
+char *perf_data__guest_kallsyms_name(struct perf_data *data, pid_t machine_pid)
+{
+ char *kallsyms_name;
+ struct stat st;
+
+ if (!data->is_dir)
+ return NULL;
+
+ if (asprintf(&kallsyms_name, "%s/kcore_dir__%d/kallsyms", data->path, machine_pid) < 0)
+ return NULL;
+
+ if (stat(kallsyms_name, &st)) {
+ free(kallsyms_name);
+ return NULL;
+ }
+
+ return kallsyms_name;
+}
+
bool is_perf_data(const char *path)
{
bool ret = false;
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 7de53d6e2d7f..effcc195d7e9 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -4,6 +4,7 @@
#include <stdio.h>
#include <stdbool.h>
+#include <unistd.h>
#include <linux/types.h>
enum perf_data_mode {
@@ -101,5 +102,6 @@ unsigned long perf_data__size(struct perf_data *data);
int perf_data__make_kcore_dir(struct perf_data *data, char *buf, size_t buf_sz);
bool has_kcore_dir(const char *path);
char *perf_data__kallsyms_name(struct perf_data *data);
+char *perf_data__guest_kallsyms_name(struct perf_data *data, pid_t machine_pid);
bool is_perf_data(const char *path);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index db964d5a52af..54e4d4495e00 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -495,6 +495,8 @@ int dlfilter__do_filter_event(struct dlfilter *d,
ASSIGN(misc);
ASSIGN(raw_size);
ASSIGN(raw_data);
+ ASSIGN(machine_pid);
+ ASSIGN(vcpu);
if (sample->branch_stack) {
d_sample.brstack_nr = sample->branch_stack->nr;
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 97047a11282b..66981c7a9a18 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -227,6 +227,12 @@ struct dso {
#define dso__for_each_symbol(dso, pos, n) \
symbols__for_each_entry(&(dso)->symbols, pos, n)
+#define dsos__for_each_with_build_id(pos, head) \
+ list_for_each_entry(pos, head, node) \
+ if (!pos->has_build_id) \
+ continue; \
+ else
+
static inline void dso__set_loaded(struct dso *dso)
{
dso->loaded = true;
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index b97366f77bbf..2bd23e4cf19e 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -23,8 +23,19 @@ static int __dso_id__cmp(struct dso_id *a, struct dso_id *b)
if (a->ino > b->ino) return -1;
if (a->ino < b->ino) return 1;
- if (a->ino_generation > b->ino_generation) return -1;
- if (a->ino_generation < b->ino_generation) return 1;
+ /*
+ * Synthesized MMAP events have zero ino_generation, avoid comparing
+ * them with MMAP events with actual ino_generation.
+ *
+ * I found it harmful because the mismatch resulted in a new
+ * dso that did not have a build ID whereas the original dso did have a
+ * build ID. The build ID was essential because the object was not found
+ * otherwise. - Adrian
+ */
+ if (a->ino_generation && b->ino_generation) {
+ if (a->ino_generation > b->ino_generation) return -1;
+ if (a->ino_generation < b->ino_generation) return 1;
+ }
return 0;
}
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 579e44c59914..5b8cf6a421a4 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -179,7 +179,7 @@ static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
void perf_env__exit(struct perf_env *env)
{
- int i;
+ int i, j;
perf_env__purge_bpf(env);
perf_env__purge_cgroups(env);
@@ -196,6 +196,8 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ for (i = 0; i < env->nr_cpu_pmu_caps; i++)
+ zfree(&env->cpu_pmu_caps[i]);
zfree(&env->cpu_pmu_caps);
zfree(&env->numa_map);
@@ -217,11 +219,13 @@ void perf_env__exit(struct perf_env *env)
}
zfree(&env->hybrid_nodes);
- for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) {
- zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps);
- zfree(&env->hybrid_cpc_nodes[i].pmu_name);
+ for (i = 0; i < env->nr_pmus_with_caps; i++) {
+ for (j = 0; j < env->pmu_caps[i].nr_caps; j++)
+ zfree(&env->pmu_caps[i].caps[j]);
+ zfree(&env->pmu_caps[i].caps);
+ zfree(&env->pmu_caps[i].pmu_name);
}
- zfree(&env->hybrid_cpc_nodes);
+ zfree(&env->pmu_caps);
}
void perf_env__init(struct perf_env *env)
@@ -527,3 +531,51 @@ int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
}
+
+char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
+ const char *cap)
+{
+ char *cap_eq;
+ int cap_size;
+ char **ptr;
+ int i, j;
+
+ if (!pmu_name || !cap)
+ return NULL;
+
+ cap_size = strlen(cap);
+ cap_eq = zalloc(cap_size + 2);
+ if (!cap_eq)
+ return NULL;
+
+ memcpy(cap_eq, cap, cap_size);
+ cap_eq[cap_size] = '=';
+
+ if (!strcmp(pmu_name, "cpu")) {
+ for (i = 0; i < env->nr_cpu_pmu_caps; i++) {
+ if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) {
+ free(cap_eq);
+ return &env->cpu_pmu_caps[i][cap_size + 1];
+ }
+ }
+ goto out;
+ }
+
+ for (i = 0; i < env->nr_pmus_with_caps; i++) {
+ if (strcmp(env->pmu_caps[i].pmu_name, pmu_name))
+ continue;
+
+ ptr = env->pmu_caps[i].caps;
+
+ for (j = 0; j < env->pmu_caps[i].nr_caps; j++) {
+ if (!strncmp(ptr[j], cap_eq, cap_size + 1)) {
+ free(cap_eq);
+ return &ptr[j][cap_size + 1];
+ }
+ }
+ }
+
+out:
+ free(cap_eq);
+ return NULL;
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index a3541f98e1fc..4566c51f2fd9 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -43,10 +43,10 @@ struct hybrid_node {
char *cpus;
};
-struct hybrid_cpc_node {
- int nr_cpu_pmu_caps;
+struct pmu_caps {
+ int nr_caps;
unsigned int max_branches;
- char *cpu_pmu_caps;
+ char **caps;
char *pmu_name;
};
@@ -74,14 +74,14 @@ struct perf_env {
int nr_groups;
int nr_cpu_pmu_caps;
int nr_hybrid_nodes;
- int nr_hybrid_cpc_nodes;
+ int nr_pmus_with_caps;
char *cmdline;
const char **cmdline_argv;
char *sibling_cores;
char *sibling_dies;
char *sibling_threads;
char *pmu_mappings;
- char *cpu_pmu_caps;
+ char **cpu_pmu_caps;
struct cpu_topology_map *cpu;
struct cpu_cache_level *caches;
int caches_cnt;
@@ -94,7 +94,7 @@ struct perf_env {
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
struct hybrid_node *hybrid_nodes;
- struct hybrid_cpc_node *hybrid_cpc_nodes;
+ struct pmu_caps *pmu_caps;
#ifdef HAVE_LIBBPF_SUPPORT
/*
* bpf_info_lock protects bpf rbtrees. This is needed because the
@@ -172,4 +172,6 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
+char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
+ const char *cap);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 0476bb3a4188..1fa14598b916 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -76,6 +76,7 @@ static const char *perf_event__names[] = {
[PERF_RECORD_TIME_CONV] = "TIME_CONV",
[PERF_RECORD_HEADER_FEATURE] = "FEATURE",
[PERF_RECORD_COMPRESSED] = "COMPRESSED",
+ [PERF_RECORD_FINISHED_INIT] = "FINISHED_INIT",
};
const char *perf_event__name(unsigned int id)
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index cdd72e05fd28..12eae6917022 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -65,7 +65,8 @@ struct stack_dump {
struct sample_read_value {
u64 value;
- u64 id;
+ u64 id; /* only if PERF_FORMAT_ID */
+ u64 lost; /* only if PERF_FORMAT_LOST */
};
struct sample_read {
@@ -80,6 +81,24 @@ struct sample_read {
};
};
+static inline size_t sample_read_value_size(u64 read_format)
+{
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_LOST)
+ return sizeof(struct sample_read_value);
+ else
+ return offsetof(struct sample_read_value, lost);
+}
+
+static inline struct sample_read_value *
+next_sample_read_value(struct sample_read_value *v, u64 read_format)
+{
+ return (void *)v + sample_read_value_size(read_format);
+}
+
+#define sample_read_group__for_each(v, nr, rf) \
+ for (int __i = 0; __i < (int)nr; v = next_sample_read_value(v, rf), __i++)
+
struct ip_callchain {
u64 nr;
u64 ips[];
@@ -148,6 +167,8 @@ struct perf_sample {
u64 code_page_size;
u64 cgroup;
u32 flags;
+ u32 machine_pid;
+ u32 vcpu;
u16 insn_len;
u8 cpumode;
u16 misc;
@@ -461,10 +482,6 @@ size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FIL
int kallsyms__get_function_start(const char *kallsyms_filename,
const char *symbol_name, u64 *addr);
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max);
-void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
- u16 type, int max);
-
void event_attr_init(struct perf_event_attr *attr);
int perf_event_paranoid(void);
@@ -482,4 +499,25 @@ void arch_perf_synthesize_sample_weight(const struct perf_sample *data, __u64 *a
const char *arch_perf_header_entry(const char *se_header);
int arch_support_sort_key(const char *sort_key);
+static inline bool perf_event_header__cpumode_is_guest(u8 cpumode)
+{
+ return cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
+ cpumode == PERF_RECORD_MISC_GUEST_USER;
+}
+
+static inline bool perf_event_header__misc_is_guest(u16 misc)
+{
+ return perf_event_header__cpumode_is_guest(misc & PERF_RECORD_MISC_CPUMODE_MASK);
+}
+
+static inline bool perf_event_header__is_guest(const struct perf_event_header *header)
+{
+ return perf_event_header__misc_is_guest(header->misc);
+}
+
+static inline bool perf_event__is_guest(const union perf_event *event)
+{
+ return perf_event_header__is_guest(&event->header);
+}
+
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/events_stats.h b/tools/perf/util/events_stats.h
index 1b0006092265..040ab9d0a803 100644
--- a/tools/perf/util/events_stats.h
+++ b/tools/perf/util/events_stats.h
@@ -22,7 +22,7 @@
*
* The total_period is needed because by default auto-freq is used, so
* multiplying nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
- * the total number of low level events, it is necessary to to sum all struct
+ * the total number of low level events, it is necessary to sum all struct
* perf_record_sample.period and stash the result in total_period.
*/
struct events_stats {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 48af7d379d82..48167f3941a6 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -309,7 +309,7 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
return evsel;
}
-static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
+int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
{
struct evsel *evsel, *n;
LIST_HEAD(head);
@@ -342,9 +342,14 @@ int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *a
return evlist__add_attrs(evlist, attrs, nr_attrs);
}
-__weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused)
+__weak int arch_evlist__add_default_attrs(struct evlist *evlist,
+ struct perf_event_attr *attrs,
+ size_t nr_attrs)
{
- return 0;
+ if (!nr_attrs)
+ return 0;
+
+ return __evlist__add_default_attrs(evlist, attrs, nr_attrs);
}
struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
@@ -1244,34 +1249,8 @@ bool evlist__valid_read_format(struct evlist *evlist)
u16 evlist__id_hdr_size(struct evlist *evlist)
{
struct evsel *first = evlist__first(evlist);
- struct perf_sample *data;
- u64 sample_type;
- u16 size = 0;
-
- if (!first->core.attr.sample_id_all)
- goto out;
-
- sample_type = first->core.attr.sample_type;
-
- if (sample_type & PERF_SAMPLE_TID)
- size += sizeof(data->tid) * 2;
-
- if (sample_type & PERF_SAMPLE_TIME)
- size += sizeof(data->time);
-
- if (sample_type & PERF_SAMPLE_ID)
- size += sizeof(data->id);
- if (sample_type & PERF_SAMPLE_STREAM_ID)
- size += sizeof(data->stream_id);
-
- if (sample_type & PERF_SAMPLE_CPU)
- size += sizeof(data->cpu) * 2;
-
- if (sample_type & PERF_SAMPLE_IDENTIFIER)
- size += sizeof(data->id);
-out:
- return size;
+ return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0;
}
bool evlist__valid_sample_id_all(struct evlist *evlist)
@@ -1533,10 +1512,22 @@ int evlist__start_workload(struct evlist *evlist)
int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
{
struct evsel *evsel = evlist__event2evsel(evlist, event);
+ int ret;
if (!evsel)
return -EFAULT;
- return evsel__parse_sample(evsel, event, sample);
+ ret = evsel__parse_sample(evsel, event, sample);
+ if (ret)
+ return ret;
+ if (perf_guest && sample->id) {
+ struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id);
+
+ if (sid) {
+ sample->machine_pid = sid->machine_pid;
+ sample->vcpu = sid->vcpu.cpu;
+ }
+ }
+ return 0;
}
int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 1bde9ccf4e7d..351ba2887a79 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -104,13 +104,18 @@ static inline int evlist__add_default(struct evlist *evlist)
return __evlist__add_default(evlist, true);
}
+int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs);
+
int __evlist__add_default_attrs(struct evlist *evlist,
struct perf_event_attr *attrs, size_t nr_attrs);
+int arch_evlist__add_default_attrs(struct evlist *evlist,
+ struct perf_event_attr *attrs,
+ size_t nr_attrs);
+
#define evlist__add_default_attrs(evlist, array) \
- __evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
+ arch_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
-int arch_evlist__add_default_attrs(struct evlist *evlist);
struct evsel *arch_evlist__leader(struct list_head *list);
int evlist__add_dummy(struct evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 094b0a9c0bc0..18c3eb864d55 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -594,9 +594,14 @@ static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
return r;
}
+int __weak arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
+{
+ return scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
+}
+
static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
{
- int r = scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
+ int r = arch_evsel__hw_name(evsel, bf, size);
return r + evsel__add_modifiers(evsel, bf + r, size - r);
}
@@ -1092,6 +1097,11 @@ void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_un
{
}
+void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
+ struct perf_event_attr *attr __maybe_unused)
+{
+}
+
static void evsel__set_default_freq_period(struct record_opts *opts,
struct perf_event_attr *attr)
{
@@ -1375,6 +1385,8 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
if (evsel__is_offcpu_event(evsel))
evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
+
+ arch__post_evsel_config(evsel, attr);
}
int evsel__set_filter(struct evsel *evsel, const char *filter)
@@ -1529,7 +1541,7 @@ static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
}
static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
- u64 val, u64 ena, u64 run)
+ u64 val, u64 ena, u64 run, u64 lost)
{
struct perf_counts_values *count;
@@ -1538,6 +1550,7 @@ static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
count->val = val;
count->ena = ena;
count->run = run;
+ count->lost = lost;
perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
}
@@ -1546,7 +1559,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int
{
u64 read_format = leader->core.attr.read_format;
struct sample_read_value *v;
- u64 nr, ena = 0, run = 0, i;
+ u64 nr, ena = 0, run = 0, lost = 0;
nr = *data++;
@@ -1559,18 +1572,18 @@ static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
run = *data++;
- v = (struct sample_read_value *) data;
-
- evsel__set_count(leader, cpu_map_idx, thread, v[0].value, ena, run);
-
- for (i = 1; i < nr; i++) {
+ v = (void *)data;
+ sample_read_group__for_each(v, nr, read_format) {
struct evsel *counter;
- counter = evlist__id2evsel(leader->evlist, v[i].id);
+ counter = evlist__id2evsel(leader->evlist, v->id);
if (!counter)
return -EINVAL;
- evsel__set_count(counter, cpu_map_idx, thread, v[i].value, ena, run);
+ if (read_format & PERF_FORMAT_LOST)
+ lost = v->lost;
+
+ evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost);
}
return 0;
@@ -2358,6 +2371,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
data->misc = event->header.misc;
data->id = -1ULL;
data->data_src = PERF_MEM_DATA_SRC_NONE;
+ data->vcpu = -1;
if (event->header.type != PERF_RECORD_SAMPLE) {
if (!evsel->core.attr.sample_id_all)
@@ -2462,8 +2476,8 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
if (data->read.group.nr > max_group_nr)
return -EFAULT;
- sz = data->read.group.nr *
- sizeof(struct sample_read_value);
+
+ sz = data->read.group.nr * sample_read_value_size(read_format);
OVERFLOW_CHECK(array, sz, max_size);
data->read.group.values =
(struct sample_read_value *)array;
@@ -2472,6 +2486,12 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
OVERFLOW_CHECK_u64(array);
data->read.one.id = *array;
array++;
+
+ if (read_format & PERF_FORMAT_LOST) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.one.lost = *array;
+ array++;
+ }
}
}
@@ -2717,6 +2737,32 @@ int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
return 0;
}
+u16 evsel__id_hdr_size(struct evsel *evsel)
+{
+ u64 sample_type = evsel->core.attr.sample_type;
+ u16 size = 0;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ size += sizeof(u64);
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ size += sizeof(u64);
+
+ if (sample_type & PERF_SAMPLE_ID)
+ size += sizeof(u64);
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ size += sizeof(u64);
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ size += sizeof(u64);
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ size += sizeof(u64);
+
+ return size;
+}
+
struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
{
return tep_find_field(evsel->tp_format, name);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 73ea48e94079..d927713b513e 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -271,6 +271,7 @@ extern const char *const evsel__hw_names[PERF_COUNT_HW_MAX];
extern const char *const evsel__sw_names[PERF_COUNT_SW_MAX];
extern char *evsel__bpf_counter_events;
bool evsel__match_bpf_counter_events(const char *name);
+int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size);
int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
const char *evsel__name(struct evsel *evsel);
@@ -297,6 +298,7 @@ void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
void arch_evsel__set_sample_weight(struct evsel *evsel);
void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr);
+void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr);
int evsel__set_filter(struct evsel *evsel, const char *filter);
int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
@@ -380,6 +382,8 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
u64 *timestamp);
+u16 evsel__id_hdr_size(struct evsel *evsel);
+
static inline struct evsel *evsel__next(struct evsel *evsel)
{
return list_entry(evsel->core.node.next, struct evsel, core.node);
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index 675f318ce7c1..c15a9852fa41 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -12,6 +12,7 @@
#include "expr-bison.h"
#include "expr-flex.h"
#include "smt.h"
+#include "tsc.h"
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
@@ -402,6 +403,13 @@ double expr_id_data__source_count(const struct expr_id_data *data)
return data->val.source_count;
}
+#if !defined(__i386__) && !defined(__x86_64__)
+double arch_get_tsc_freq(void)
+{
+ return 0.0;
+}
+#endif
+
double expr__get_literal(const char *literal)
{
static struct cpu_topology *topology;
@@ -417,6 +425,11 @@ double expr__get_literal(const char *literal)
goto out;
}
+ if (!strcasecmp("#system_tsc_freq", literal)) {
+ result = arch_get_tsc_freq();
+ goto out;
+ }
+
/*
* Assume that topology strings are consistent, such as CPUs "0-1"
* wouldn't be listed as "0,1", and so after deduplication the number of
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index aed49806a09b..953338b9e887 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -30,7 +30,11 @@
#define BUILD_ID_URANDOM /* different uuid for each run */
-#ifdef HAVE_LIBCRYPTO
+// FIXME, remove this and fix the deprecation warnings before its removed and
+// We'll break for good here...
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
+#ifdef HAVE_LIBCRYPTO_SUPPORT
#define BUILD_ID_MD5
#undef BUILD_ID_SHA /* does not seem to work well when linked with Java */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 6ad629db63b7..c30c29c51410 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1512,18 +1512,13 @@ static int write_compressed(struct feat_fd *ff __maybe_unused,
return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
}
-static int write_per_cpu_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu,
- bool write_pmu)
+static int __write_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu,
+ bool write_pmu)
{
struct perf_pmu_caps *caps = NULL;
- int nr_caps;
int ret;
- nr_caps = perf_pmu__caps_parse(pmu);
- if (nr_caps < 0)
- return nr_caps;
-
- ret = do_write(ff, &nr_caps, sizeof(nr_caps));
+ ret = do_write(ff, &pmu->nr_caps, sizeof(pmu->nr_caps));
if (ret < 0)
return ret;
@@ -1550,33 +1545,60 @@ static int write_cpu_pmu_caps(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
struct perf_pmu *cpu_pmu = perf_pmu__find("cpu");
+ int ret;
if (!cpu_pmu)
return -ENOENT;
- return write_per_cpu_pmu_caps(ff, cpu_pmu, false);
+ ret = perf_pmu__caps_parse(cpu_pmu);
+ if (ret < 0)
+ return ret;
+
+ return __write_pmu_caps(ff, cpu_pmu, false);
}
-static int write_hybrid_cpu_pmu_caps(struct feat_fd *ff,
- struct evlist *evlist __maybe_unused)
+static int write_pmu_caps(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
{
- struct perf_pmu *pmu;
- u32 nr_pmu = perf_pmu__hybrid_pmu_num();
+ struct perf_pmu *pmu = NULL;
+ int nr_pmu = 0;
int ret;
- if (nr_pmu == 0)
- return -ENOENT;
+ while ((pmu = perf_pmu__scan(pmu))) {
+ if (!pmu->name || !strcmp(pmu->name, "cpu") ||
+ perf_pmu__caps_parse(pmu) <= 0)
+ continue;
+ nr_pmu++;
+ }
ret = do_write(ff, &nr_pmu, sizeof(nr_pmu));
if (ret < 0)
return ret;
+ if (!nr_pmu)
+ return 0;
+
+ /*
+ * Write hybrid pmu caps first to maintain compatibility with
+ * older perf tool.
+ */
+ pmu = NULL;
perf_pmu__for_each_hybrid_pmu(pmu) {
- ret = write_per_cpu_pmu_caps(ff, pmu, true);
+ ret = __write_pmu_caps(ff, pmu, true);
if (ret < 0)
return ret;
}
+ pmu = NULL;
+ while ((pmu = perf_pmu__scan(pmu))) {
+ if (!pmu->name || !strcmp(pmu->name, "cpu") ||
+ !pmu->nr_caps || perf_pmu__is_hybrid(pmu->name))
+ continue;
+
+ ret = __write_pmu_caps(ff, pmu, true);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
@@ -2051,32 +2073,20 @@ static void print_compressed(struct feat_fd *ff, FILE *fp)
ff->ph->env.comp_level, ff->ph->env.comp_ratio);
}
-static void print_per_cpu_pmu_caps(FILE *fp, int nr_caps, char *cpu_pmu_caps,
- char *pmu_name)
+static void __print_pmu_caps(FILE *fp, int nr_caps, char **caps, char *pmu_name)
{
- const char *delimiter;
- char *str, buf[128];
+ const char *delimiter = "";
+ int i;
if (!nr_caps) {
- if (!pmu_name)
- fprintf(fp, "# cpu pmu capabilities: not available\n");
- else
- fprintf(fp, "# %s pmu capabilities: not available\n", pmu_name);
+ fprintf(fp, "# %s pmu capabilities: not available\n", pmu_name);
return;
}
- if (!pmu_name)
- scnprintf(buf, sizeof(buf), "# cpu pmu capabilities: ");
- else
- scnprintf(buf, sizeof(buf), "# %s pmu capabilities: ", pmu_name);
-
- delimiter = buf;
-
- str = cpu_pmu_caps;
- while (nr_caps--) {
- fprintf(fp, "%s%s", delimiter, str);
+ fprintf(fp, "# %s pmu capabilities: ", pmu_name);
+ for (i = 0; i < nr_caps; i++) {
+ fprintf(fp, "%s%s", delimiter, caps[i]);
delimiter = ", ";
- str += strlen(str) + 1;
}
fprintf(fp, "\n");
@@ -2084,19 +2094,18 @@ static void print_per_cpu_pmu_caps(FILE *fp, int nr_caps, char *cpu_pmu_caps,
static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
{
- print_per_cpu_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps,
- ff->ph->env.cpu_pmu_caps, NULL);
+ __print_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps,
+ ff->ph->env.cpu_pmu_caps, (char *)"cpu");
}
-static void print_hybrid_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
+static void print_pmu_caps(struct feat_fd *ff, FILE *fp)
{
- struct hybrid_cpc_node *n;
+ struct pmu_caps *pmu_caps;
- for (int i = 0; i < ff->ph->env.nr_hybrid_cpc_nodes; i++) {
- n = &ff->ph->env.hybrid_cpc_nodes[i];
- print_per_cpu_pmu_caps(fp, n->nr_cpu_pmu_caps,
- n->cpu_pmu_caps,
- n->pmu_name);
+ for (int i = 0; i < ff->ph->env.nr_pmus_with_caps; i++) {
+ pmu_caps = &ff->ph->env.pmu_caps[i];
+ __print_pmu_caps(fp, pmu_caps->nr_caps, pmu_caps->caps,
+ pmu_caps->pmu_name);
}
}
@@ -3207,28 +3216,26 @@ static int process_compressed(struct feat_fd *ff,
return 0;
}
-static int process_per_cpu_pmu_caps(struct feat_fd *ff, int *nr_cpu_pmu_caps,
- char **cpu_pmu_caps,
- unsigned int *max_branches)
+static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps,
+ char ***caps, unsigned int *max_branches)
{
- char *name, *value;
- struct strbuf sb;
- u32 nr_caps;
+ char *name, *value, *ptr;
+ u32 nr_pmu_caps, i;
+
+ *nr_caps = 0;
+ *caps = NULL;
- if (do_read_u32(ff, &nr_caps))
+ if (do_read_u32(ff, &nr_pmu_caps))
return -1;
- if (!nr_caps) {
- pr_debug("cpu pmu capabilities not available\n");
+ if (!nr_pmu_caps)
return 0;
- }
-
- *nr_cpu_pmu_caps = nr_caps;
- if (strbuf_init(&sb, 128) < 0)
+ *caps = zalloc(sizeof(char *) * nr_pmu_caps);
+ if (!*caps)
return -1;
- while (nr_caps--) {
+ for (i = 0; i < nr_pmu_caps; i++) {
name = do_read_string(ff);
if (!name)
goto error;
@@ -3237,12 +3244,10 @@ static int process_per_cpu_pmu_caps(struct feat_fd *ff, int *nr_cpu_pmu_caps,
if (!value)
goto free_name;
- if (strbuf_addf(&sb, "%s=%s", name, value) < 0)
+ if (asprintf(&ptr, "%s=%s", name, value) < 0)
goto free_value;
- /* include a NULL character at the end */
- if (strbuf_add(&sb, "", 1) < 0)
- goto free_value;
+ (*caps)[i] = ptr;
if (!strcmp(name, "branches"))
*max_branches = atoi(value);
@@ -3250,7 +3255,7 @@ static int process_per_cpu_pmu_caps(struct feat_fd *ff, int *nr_cpu_pmu_caps,
free(value);
free(name);
}
- *cpu_pmu_caps = strbuf_detach(&sb, NULL);
+ *nr_caps = nr_pmu_caps;
return 0;
free_value:
@@ -3258,64 +3263,76 @@ free_value:
free_name:
free(name);
error:
- strbuf_release(&sb);
+ for (; i > 0; i--)
+ free((*caps)[i - 1]);
+ free(*caps);
+ *caps = NULL;
+ *nr_caps = 0;
return -1;
}
static int process_cpu_pmu_caps(struct feat_fd *ff,
void *data __maybe_unused)
{
- return process_per_cpu_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
- &ff->ph->env.cpu_pmu_caps,
- &ff->ph->env.max_branches);
+ int ret = __process_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
+ &ff->ph->env.cpu_pmu_caps,
+ &ff->ph->env.max_branches);
+
+ if (!ret && !ff->ph->env.cpu_pmu_caps)
+ pr_debug("cpu pmu capabilities not available\n");
+ return ret;
}
-static int process_hybrid_cpu_pmu_caps(struct feat_fd *ff,
- void *data __maybe_unused)
+static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
{
- struct hybrid_cpc_node *nodes;
+ struct pmu_caps *pmu_caps;
u32 nr_pmu, i;
int ret;
+ int j;
if (do_read_u32(ff, &nr_pmu))
return -1;
if (!nr_pmu) {
- pr_debug("hybrid cpu pmu capabilities not available\n");
+ pr_debug("pmu capabilities not available\n");
return 0;
}
- nodes = zalloc(sizeof(*nodes) * nr_pmu);
- if (!nodes)
+ pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
+ if (!pmu_caps)
return -ENOMEM;
for (i = 0; i < nr_pmu; i++) {
- struct hybrid_cpc_node *n = &nodes[i];
-
- ret = process_per_cpu_pmu_caps(ff, &n->nr_cpu_pmu_caps,
- &n->cpu_pmu_caps,
- &n->max_branches);
+ ret = __process_pmu_caps(ff, &pmu_caps[i].nr_caps,
+ &pmu_caps[i].caps,
+ &pmu_caps[i].max_branches);
if (ret)
goto err;
- n->pmu_name = do_read_string(ff);
- if (!n->pmu_name) {
+ pmu_caps[i].pmu_name = do_read_string(ff);
+ if (!pmu_caps[i].pmu_name) {
ret = -1;
goto err;
}
+ if (!pmu_caps[i].nr_caps) {
+ pr_debug("%s pmu capabilities not available\n",
+ pmu_caps[i].pmu_name);
+ }
}
- ff->ph->env.nr_hybrid_cpc_nodes = nr_pmu;
- ff->ph->env.hybrid_cpc_nodes = nodes;
+ ff->ph->env.nr_pmus_with_caps = nr_pmu;
+ ff->ph->env.pmu_caps = pmu_caps;
return 0;
err:
for (i = 0; i < nr_pmu; i++) {
- free(nodes[i].cpu_pmu_caps);
- free(nodes[i].pmu_name);
+ for (j = 0; j < pmu_caps[i].nr_caps; j++)
+ free(pmu_caps[i].caps[j]);
+ free(pmu_caps[i].caps);
+ free(pmu_caps[i].pmu_name);
}
- free(nodes);
+ free(pmu_caps);
return ret;
}
@@ -3381,7 +3398,7 @@ const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
FEAT_OPR(CLOCK_DATA, clock_data, false),
FEAT_OPN(HYBRID_TOPOLOGY, hybrid_topology, true),
- FEAT_OPR(HYBRID_CPU_PMU_CAPS, hybrid_cpu_pmu_caps, false),
+ FEAT_OPR(PMU_CAPS, pmu_caps, false),
};
struct header_print_data {
@@ -4363,6 +4380,9 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
struct evsel *evsel;
struct perf_cpu_map *map;
+ if (dump_trace)
+ perf_event__fprintf_event_update(event, stdout);
+
if (!pevlist || *pevlist == NULL)
return -EINVAL;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 56916dabce7b..2d5e601ba60f 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -46,7 +46,7 @@ enum {
HEADER_CPU_PMU_CAPS,
HEADER_CLOCK_DATA,
HEADER_HYBRID_TOPOLOGY,
- HEADER_HYBRID_CPU_PMU_CAPS,
+ HEADER_PMU_CAPS,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 62b2f375a94d..d5e9fc8106dd 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -74,10 +74,12 @@ struct intel_pt {
bool data_queued;
bool est_tsc;
bool sync_switch;
+ bool sync_switch_not_supported;
bool mispred_all;
bool use_thread_stack;
bool callstack;
bool cap_event_trace;
+ bool have_guest_sideband;
unsigned int br_stack_sz;
unsigned int br_stack_sz_plus;
int have_sched_switch;
@@ -195,6 +197,9 @@ struct intel_pt_queue {
struct thread *guest_thread;
struct thread *unknown_guest_thread;
pid_t guest_machine_pid;
+ pid_t guest_pid;
+ pid_t guest_tid;
+ int vcpu;
bool exclude_kernel;
bool have_sample;
u64 time;
@@ -685,7 +690,7 @@ static int intel_pt_get_guest(struct intel_pt_queue *ptq)
struct machine *machine;
pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
- if (ptq->guest_machine && pid == ptq->guest_machine_pid)
+ if (ptq->guest_machine && pid == ptq->guest_machine->pid)
return 0;
ptq->guest_machine = NULL;
@@ -705,7 +710,6 @@ static int intel_pt_get_guest(struct intel_pt_queue *ptq)
return -1;
ptq->guest_machine = machine;
- ptq->guest_machine_pid = pid;
return 0;
}
@@ -759,28 +763,44 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
if (nr) {
- if ((!symbol_conf.guest_code && cpumode != PERF_RECORD_MISC_GUEST_KERNEL) ||
- intel_pt_get_guest(ptq))
+ if (ptq->pt->have_guest_sideband) {
+ if (!ptq->guest_machine || ptq->guest_machine_pid != ptq->pid) {
+ intel_pt_log("ERROR: guest sideband but no guest machine\n");
+ return -EINVAL;
+ }
+ } else if ((!symbol_conf.guest_code && cpumode != PERF_RECORD_MISC_GUEST_KERNEL) ||
+ intel_pt_get_guest(ptq)) {
+ intel_pt_log("ERROR: no guest machine\n");
return -EINVAL;
+ }
machine = ptq->guest_machine;
thread = ptq->guest_thread;
if (!thread) {
- if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL)
+ if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL) {
+ intel_pt_log("ERROR: no guest thread\n");
return -EINVAL;
+ }
thread = ptq->unknown_guest_thread;
}
} else {
thread = ptq->thread;
if (!thread) {
- if (cpumode != PERF_RECORD_MISC_KERNEL)
+ if (cpumode != PERF_RECORD_MISC_KERNEL) {
+ intel_pt_log("ERROR: no thread\n");
return -EINVAL;
+ }
thread = ptq->pt->unknown_thread;
}
}
while (1) {
- if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
+ if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso) {
+ if (al.map)
+ intel_pt_log("ERROR: thread has no dso for %#" PRIx64 "\n", *ip);
+ else
+ intel_pt_log("ERROR: thread has no map for %#" PRIx64 "\n", *ip);
return -EINVAL;
+ }
if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
dso__data_status_seen(al.map->dso,
@@ -821,8 +841,12 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
len = dso__data_read_offset(al.map->dso, machine,
offset, buf,
INTEL_PT_INSN_BUF_SZ);
- if (len <= 0)
+ if (len <= 0) {
+ intel_pt_log("ERROR: failed to read at %" PRIu64 " ", offset);
+ if (intel_pt_enable_logging)
+ dso__fprintf(al.map->dso, intel_pt_log_fp());
return -EINVAL;
+ }
if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
return -EINVAL;
@@ -1370,6 +1394,55 @@ static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
}
}
+static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
+{
+ struct machines *machines = &ptq->pt->session->machines;
+ struct machine *machine;
+ pid_t machine_pid = ptq->pid;
+ pid_t tid;
+ int vcpu;
+
+ if (machine_pid <= 0)
+ return 0; /* Not a guest machine */
+
+ machine = machines__find(machines, machine_pid);
+ if (!machine)
+ return 0; /* Not a guest machine */
+
+ if (ptq->guest_machine != machine) {
+ ptq->guest_machine = NULL;
+ thread__zput(ptq->guest_thread);
+ thread__zput(ptq->unknown_guest_thread);
+
+ ptq->unknown_guest_thread = machine__find_thread(machine, 0, 0);
+ if (!ptq->unknown_guest_thread)
+ return -1;
+ ptq->guest_machine = machine;
+ }
+
+ vcpu = ptq->thread ? ptq->thread->guest_cpu : -1;
+ if (vcpu < 0)
+ return -1;
+
+ tid = machine__get_current_tid(machine, vcpu);
+
+ if (ptq->guest_thread && ptq->guest_thread->tid != tid)
+ thread__zput(ptq->guest_thread);
+
+ if (!ptq->guest_thread) {
+ ptq->guest_thread = machine__find_thread(machine, -1, tid);
+ if (!ptq->guest_thread)
+ return -1;
+ }
+
+ ptq->guest_machine_pid = machine_pid;
+ ptq->guest_pid = ptq->guest_thread->pid_;
+ ptq->guest_tid = tid;
+ ptq->vcpu = vcpu;
+
+ return 0;
+}
+
static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
struct auxtrace_queue *queue)
{
@@ -1390,6 +1463,13 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
if (queue->cpu == -1)
ptq->cpu = ptq->thread->cpu;
}
+
+ if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) {
+ ptq->guest_machine_pid = 0;
+ ptq->guest_pid = -1;
+ ptq->guest_tid = -1;
+ ptq->vcpu = -1;
+ }
}
static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
@@ -1577,6 +1657,17 @@ static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
sample->pid = ptq->pid;
sample->tid = ptq->tid;
+
+ if (ptq->pt->have_guest_sideband) {
+ if ((ptq->state->from_ip && ptq->state->from_nr) ||
+ (ptq->state->to_ip && ptq->state->to_nr)) {
+ sample->pid = ptq->guest_pid;
+ sample->tid = ptq->guest_tid;
+ sample->machine_pid = ptq->guest_machine_pid;
+ sample->vcpu = ptq->vcpu;
+ }
+ }
+
sample->cpu = ptq->cpu;
sample->insn_len = ptq->insn_len;
memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
@@ -2324,7 +2415,8 @@ static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq)
}
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
- pid_t pid, pid_t tid, u64 ip, u64 timestamp)
+ pid_t pid, pid_t tid, u64 ip, u64 timestamp,
+ pid_t machine_pid, int vcpu)
{
union perf_event event;
char msg[MAX_AUXTRACE_ERROR_MSG];
@@ -2341,8 +2433,9 @@ static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
- auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
- code, cpu, pid, tid, ip, msg, timestamp);
+ auxtrace_synth_guest_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
+ code, cpu, pid, tid, ip, msg, timestamp,
+ machine_pid, vcpu);
err = perf_session__deliver_synth_event(pt->session, &event, NULL);
if (err)
@@ -2357,11 +2450,22 @@ static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
{
struct intel_pt *pt = ptq->pt;
u64 tm = ptq->timestamp;
+ pid_t machine_pid = 0;
+ pid_t pid = ptq->pid;
+ pid_t tid = ptq->tid;
+ int vcpu = -1;
tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
- return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
- ptq->tid, state->from_ip, tm);
+ if (pt->have_guest_sideband && state->from_nr) {
+ machine_pid = ptq->guest_machine_pid;
+ vcpu = ptq->vcpu;
+ pid = ptq->guest_pid;
+ tid = ptq->guest_tid;
+ }
+
+ return intel_pt_synth_error(pt, state->err, ptq->cpu, pid, tid,
+ state->from_ip, tm, machine_pid, vcpu);
}
static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
@@ -2624,6 +2728,9 @@ static void intel_pt_enable_sync_switch(struct intel_pt *pt)
{
unsigned int i;
+ if (pt->sync_switch_not_supported)
+ return;
+
pt->sync_switch = true;
for (i = 0; i < pt->queues.nr_queues; i++) {
@@ -2635,6 +2742,23 @@ static void intel_pt_enable_sync_switch(struct intel_pt *pt)
}
}
+static void intel_pt_disable_sync_switch(struct intel_pt *pt)
+{
+ unsigned int i;
+
+ pt->sync_switch = false;
+
+ for (i = 0; i < pt->queues.nr_queues; i++) {
+ struct auxtrace_queue *queue = &pt->queues.queue_array[i];
+ struct intel_pt_queue *ptq = queue->priv;
+
+ if (ptq) {
+ ptq->sync_switch = false;
+ intel_pt_next_tid(pt, ptq);
+ }
+ }
+}
+
/*
* To filter against time ranges, it is only necessary to look at the next start
* or end time.
@@ -2928,7 +3052,8 @@ static int intel_pt_process_timeless_sample(struct intel_pt *pt,
static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
{
return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
- sample->pid, sample->tid, 0, sample->time);
+ sample->pid, sample->tid, 0, sample->time,
+ sample->machine_pid, sample->vcpu);
}
static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
@@ -3066,6 +3191,33 @@ static int intel_pt_context_switch_in(struct intel_pt *pt,
return machine__set_current_tid(pt->machine, cpu, pid, tid);
}
+static int intel_pt_guest_context_switch(struct intel_pt *pt,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
+ struct machines *machines = &pt->session->machines;
+ struct machine *machine = machines__find(machines, sample->machine_pid);
+
+ pt->have_guest_sideband = true;
+
+ /*
+ * sync_switch cannot handle guest machines at present, so just disable
+ * it.
+ */
+ pt->sync_switch_not_supported = true;
+ if (pt->sync_switch)
+ intel_pt_disable_sync_switch(pt);
+
+ if (out)
+ return 0;
+
+ if (!machine)
+ return -EINVAL;
+
+ return machine__set_current_tid(machine, sample->vcpu, sample->pid, sample->tid);
+}
+
static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
struct perf_sample *sample)
{
@@ -3073,6 +3225,9 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
pid_t pid, tid;
int cpu, ret;
+ if (perf_event__is_guest(event))
+ return intel_pt_guest_context_switch(pt, event, sample);
+
cpu = sample->cpu;
if (pt->have_sched_switch == 3) {
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index a23255773c60..4e6632203704 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -845,8 +845,13 @@ jit_process(struct perf_session *session,
if (jit_detect(filename, pid, nsi)) {
nsinfo__put(nsi);
- // Strip //anon* mmaps if we processed a jitdump for this pid
- if (jit_has_pid(machine, pid) && (strncmp(filename, "//anon", 6) == 0))
+ /*
+ * Strip //anon*, [anon:* and /memfd:* mmaps if we processed a jitdump for this pid
+ */
+ if (jit_has_pid(machine, pid) &&
+ ((strncmp(filename, "//anon", 6) == 0) ||
+ (strncmp(filename, "[anon:", 6) == 0) ||
+ (strncmp(filename, "/memfd:", 7) == 0)))
return 1;
return 0;
diff --git a/tools/perf/util/kwork.h b/tools/perf/util/kwork.h
new file mode 100644
index 000000000000..320c0a6d2e08
--- /dev/null
+++ b/tools/perf/util/kwork.h
@@ -0,0 +1,257 @@
+#ifndef PERF_UTIL_KWORK_H
+#define PERF_UTIL_KWORK_H
+
+#include "perf.h"
+
+#include "util/tool.h"
+#include "util/event.h"
+#include "util/evlist.h"
+#include "util/session.h"
+#include "util/time-utils.h"
+
+#include <linux/list.h>
+#include <linux/bitmap.h>
+
+enum kwork_class_type {
+ KWORK_CLASS_IRQ,
+ KWORK_CLASS_SOFTIRQ,
+ KWORK_CLASS_WORKQUEUE,
+ KWORK_CLASS_MAX,
+};
+
+enum kwork_report_type {
+ KWORK_REPORT_RUNTIME,
+ KWORK_REPORT_LATENCY,
+ KWORK_REPORT_TIMEHIST,
+};
+
+enum kwork_trace_type {
+ KWORK_TRACE_RAISE,
+ KWORK_TRACE_ENTRY,
+ KWORK_TRACE_EXIT,
+ KWORK_TRACE_MAX,
+};
+
+/*
+ * data structure:
+ *
+ * +==================+ +============+ +======================+
+ * | class | | work | | atom |
+ * +==================+ +============+ +======================+
+ * +------------+ | +-----+ | | +------+ | | +-------+ +-----+ |
+ * | perf_kwork | +-> | irq | --------|+-> | eth0 | --+-> | raise | - | ... | --+ +-----------+
+ * +-----+------+ || +-----+ ||| +------+ ||| +-------+ +-----+ | | | |
+ * | || ||| ||| | +-> | atom_page |
+ * | || ||| ||| +-------+ +-----+ | | |
+ * | class_list ||| |+-> | entry | - | ... | ----> | |
+ * | || ||| ||| +-------+ +-----+ | | |
+ * | || ||| ||| | +-> | |
+ * | || ||| ||| +-------+ +-----+ | | | |
+ * | || ||| |+-> | exit | - | ... | --+ +-----+-----+
+ * | || ||| | | +-------+ +-----+ | |
+ * | || ||| | | | |
+ * | || ||| +-----+ | | | |
+ * | || |+-> | ... | | | | |
+ * | || | | +-----+ | | | |
+ * | || | | | | | |
+ * | || +---------+ | | +-----+ | | +-------+ +-----+ | |
+ * | +-> | softirq | -------> | RCU | ---+-> | raise | - | ... | --+ +-----+-----+
+ * | || +---------+ | | +-----+ ||| +-------+ +-----+ | | | |
+ * | || | | ||| | +-> | atom_page |
+ * | || | | ||| +-------+ +-----+ | | |
+ * | || | | |+-> | entry | - | ... | ----> | |
+ * | || | | ||| +-------+ +-----+ | | |
+ * | || | | ||| | +-> | |
+ * | || | | ||| +-------+ +-----+ | | | |
+ * | || | | |+-> | exit | - | ... | --+ +-----+-----+
+ * | || | | | | +-------+ +-----+ | |
+ * | || | | | | | |
+ * | || +-----------+ | | +-----+ | | | |
+ * | +-> | workqueue | -----> | ... | | | | |
+ * | | +-----------+ | | +-----+ | | | |
+ * | +==================+ +============+ +======================+ |
+ * | |
+ * +----> atom_page_list ---------------------------------------------------------+
+ *
+ */
+
+struct kwork_atom {
+ struct list_head list;
+ u64 time;
+ struct kwork_atom *prev;
+
+ void *page_addr;
+ unsigned long bit_inpage;
+};
+
+#define NR_ATOM_PER_PAGE 128
+struct kwork_atom_page {
+ struct list_head list;
+ struct kwork_atom atoms[NR_ATOM_PER_PAGE];
+ DECLARE_BITMAP(bitmap, NR_ATOM_PER_PAGE);
+};
+
+struct kwork_class;
+struct kwork_work {
+ /*
+ * class field
+ */
+ struct rb_node node;
+ struct kwork_class *class;
+
+ /*
+ * work field
+ */
+ u64 id;
+ int cpu;
+ char *name;
+
+ /*
+ * atom field
+ */
+ u64 nr_atoms;
+ struct list_head atom_list[KWORK_TRACE_MAX];
+
+ /*
+ * runtime report
+ */
+ u64 max_runtime;
+ u64 max_runtime_start;
+ u64 max_runtime_end;
+ u64 total_runtime;
+
+ /*
+ * latency report
+ */
+ u64 max_latency;
+ u64 max_latency_start;
+ u64 max_latency_end;
+ u64 total_latency;
+};
+
+struct kwork_class {
+ struct list_head list;
+ const char *name;
+ enum kwork_class_type type;
+
+ unsigned int nr_tracepoints;
+ const struct evsel_str_handler *tp_handlers;
+
+ struct rb_root_cached work_root;
+
+ int (*class_init)(struct kwork_class *class,
+ struct perf_session *session);
+
+ void (*work_init)(struct kwork_class *class,
+ struct kwork_work *work,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine);
+
+ void (*work_name)(struct kwork_work *work,
+ char *buf, int len);
+};
+
+struct perf_kwork;
+struct trace_kwork_handler {
+ int (*raise_event)(struct perf_kwork *kwork,
+ struct kwork_class *class, struct evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
+
+ int (*entry_event)(struct perf_kwork *kwork,
+ struct kwork_class *class, struct evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
+
+ int (*exit_event)(struct perf_kwork *kwork,
+ struct kwork_class *class, struct evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
+};
+
+struct perf_kwork {
+ /*
+ * metadata
+ */
+ struct perf_tool tool;
+ struct list_head class_list;
+ struct list_head atom_page_list;
+ struct list_head sort_list, cmp_id;
+ struct rb_root_cached sorted_work_root;
+ const struct trace_kwork_handler *tp_handler;
+
+ /*
+ * profile filters
+ */
+ const char *profile_name;
+
+ const char *cpu_list;
+ DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+
+ const char *time_str;
+ struct perf_time_interval ptime;
+
+ /*
+ * options for command
+ */
+ bool force;
+ const char *event_list_str;
+ enum kwork_report_type report;
+
+ /*
+ * options for subcommand
+ */
+ bool summary;
+ const char *sort_order;
+ bool show_callchain;
+ unsigned int max_stack;
+ bool use_bpf;
+
+ /*
+ * statistics
+ */
+ u64 timestart;
+ u64 timeend;
+
+ unsigned long nr_events;
+ unsigned long nr_lost_chunks;
+ unsigned long nr_lost_events;
+
+ u64 all_runtime;
+ u64 all_count;
+ u64 nr_skipped_events[KWORK_TRACE_MAX + 1];
+};
+
+struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct kwork_work *key);
+
+#ifdef HAVE_BPF_SKEL
+
+int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork);
+int perf_kwork__report_read_bpf(struct perf_kwork *kwork);
+void perf_kwork__report_cleanup_bpf(void);
+
+void perf_kwork__trace_start(void);
+void perf_kwork__trace_finish(void);
+
+#else /* !HAVE_BPF_SKEL */
+
+static inline int
+perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
+{
+ return -1;
+}
+
+static inline int
+perf_kwork__report_read_bpf(struct perf_kwork *kwork __maybe_unused)
+{
+ return -1;
+}
+
+static inline void perf_kwork__report_cleanup_bpf(void) {}
+
+static inline void perf_kwork__trace_start(void) {}
+static inline void perf_kwork__trace_finish(void) {}
+
+#endif /* HAVE_BPF_SKEL */
+
+#endif /* PERF_UTIL_KWORK_H */
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 96c8ef60f4f8..2dc797007419 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -25,7 +25,7 @@
"$CLANG_OPTIONS $PERF_BPF_INC_OPTIONS $KERNEL_INC_OPTIONS " \
"-Wno-unused-value -Wno-pointer-sign " \
"-working-directory $WORKING_DIR " \
- "-c \"$CLANG_SOURCE\" -target bpf $CLANG_EMIT_LLVM -O2 -o - $LLVM_OPTIONS_PIPE"
+ "-c \"$CLANG_SOURCE\" -target bpf $CLANG_EMIT_LLVM -g -O2 -o - $LLVM_OPTIONS_PIPE"
struct llvm_param llvm_param = {
.clang_path = "clang",
diff --git a/tools/perf/util/lock-contention.h b/tools/perf/util/lock-contention.h
new file mode 100644
index 000000000000..2146efc33396
--- /dev/null
+++ b/tools/perf/util/lock-contention.h
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef PERF_LOCK_CONTENTION_H
+#define PERF_LOCK_CONTENTION_H
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+struct lock_stat {
+ struct hlist_node hash_entry;
+ struct rb_node rb; /* used for sorting */
+
+ u64 addr; /* address of lockdep_map, used as ID */
+ char *name; /* for strcpy(), we cannot use const */
+
+ unsigned int nr_acquire;
+ unsigned int nr_acquired;
+ unsigned int nr_contended;
+ unsigned int nr_release;
+
+ union {
+ unsigned int nr_readlock;
+ unsigned int flags;
+ };
+ unsigned int nr_trylock;
+
+ /* these times are in nano sec. */
+ u64 avg_wait_time;
+ u64 wait_time_total;
+ u64 wait_time_min;
+ u64 wait_time_max;
+
+ int broken; /* flag of blacklist */
+ int combined;
+};
+
+/*
+ * States of lock_seq_stat
+ *
+ * UNINITIALIZED is required for detecting first event of acquire.
+ * As the nature of lock events, there is no guarantee
+ * that the first event for the locks are acquire,
+ * it can be acquired, contended or release.
+ */
+#define SEQ_STATE_UNINITIALIZED 0 /* initial state */
+#define SEQ_STATE_RELEASED 1
+#define SEQ_STATE_ACQUIRING 2
+#define SEQ_STATE_ACQUIRED 3
+#define SEQ_STATE_READ_ACQUIRED 4
+#define SEQ_STATE_CONTENDED 5
+
+/*
+ * MAX_LOCK_DEPTH
+ * Imported from include/linux/sched.h.
+ * Should this be synchronized?
+ */
+#define MAX_LOCK_DEPTH 48
+
+/*
+ * struct lock_seq_stat:
+ * Place to put on state of one lock sequence
+ * 1) acquire -> acquired -> release
+ * 2) acquire -> contended -> acquired -> release
+ * 3) acquire (with read or try) -> release
+ * 4) Are there other patterns?
+ */
+struct lock_seq_stat {
+ struct list_head list;
+ int state;
+ u64 prev_event_time;
+ u64 addr;
+
+ int read_count;
+};
+
+struct thread_stat {
+ struct rb_node rb;
+
+ u32 tid;
+ struct list_head seq_list;
+};
+
+/*
+ * CONTENTION_STACK_DEPTH
+ * Number of stack trace entries to find callers
+ */
+#define CONTENTION_STACK_DEPTH 8
+
+/*
+ * CONTENTION_STACK_SKIP
+ * Number of stack trace entries to skip when finding callers.
+ * The first few entries belong to the locking implementation itself.
+ */
+#define CONTENTION_STACK_SKIP 3
+
+/*
+ * flags for lock:contention_begin
+ * Imported from include/trace/events/lock.h.
+ */
+#define LCB_F_SPIN (1U << 0)
+#define LCB_F_READ (1U << 1)
+#define LCB_F_WRITE (1U << 2)
+#define LCB_F_RT (1U << 3)
+#define LCB_F_PERCPU (1U << 4)
+#define LCB_F_MUTEX (1U << 5)
+
+struct evlist;
+struct machine;
+struct target;
+
+struct lock_contention {
+ struct evlist *evlist;
+ struct target *target;
+ struct machine *machine;
+ struct hlist_head *result;
+ unsigned long map_nr_entries;
+ unsigned long lost;
+};
+
+#ifdef HAVE_BPF_SKEL
+
+int lock_contention_prepare(struct lock_contention *con);
+int lock_contention_start(void);
+int lock_contention_stop(void);
+int lock_contention_read(struct lock_contention *con);
+int lock_contention_finish(void);
+
+#else /* !HAVE_BPF_SKEL */
+
+static inline int lock_contention_prepare(struct lock_contention *con __maybe_unused)
+{
+ return 0;
+}
+
+static inline int lock_contention_start(void) { return 0; }
+static inline int lock_contention_stop(void) { return 0; }
+static inline int lock_contention_finish(void) { return 0; }
+
+static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
+{
+ return 0;
+}
+
+#endif /* HAVE_BPF_SKEL */
+
+bool is_lock_function(struct machine *machine, u64 addr);
+
+#endif /* PERF_LOCK_CONTENTION_H */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 009061852808..2a16cae28407 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -236,6 +236,7 @@ void machine__exit(struct machine *machine)
zfree(&machine->root_dir);
zfree(&machine->mmap_name);
zfree(&machine->current_tid);
+ zfree(&machine->kallsyms_filename);
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
@@ -1742,6 +1743,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
struct map *map;
enum dso_space_type dso_space;
bool is_kernel_mmap;
+ const char *mmap_name = machine->mmap_name;
/* If we have maps from kcore then we do not need or want any others */
if (machine__uses_kcore(machine))
@@ -1752,8 +1754,16 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
else
dso_space = DSO_SPACE__KERNEL_GUEST;
- is_kernel_mmap = memcmp(xm->name, machine->mmap_name,
- strlen(machine->mmap_name) - 1) == 0;
+ is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
+ if (!is_kernel_mmap && !machine__is_host(machine)) {
+ /*
+ * If the event was recorded inside the guest and injected into
+ * the host perf.data file, then it will match a host mmap_name,
+ * so try that - see machine__set_mmap_name().
+ */
+ mmap_name = "[kernel.kallsyms]";
+ is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
+ }
if (xm->name[0] == '/' ||
(!is_kernel_mmap && xm->name[0] == '[')) {
map = machine__addnew_module_map(machine, xm->start,
@@ -1767,7 +1777,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
dso__set_build_id(map->dso, bid);
} else if (is_kernel_mmap) {
- const char *symbol_name = (xm->name + strlen(machine->mmap_name));
+ const char *symbol_name = xm->name + strlen(mmap_name);
/*
* Should be there already, from the build-id table in
* the header.
@@ -3174,9 +3184,7 @@ int machines__for_each_thread(struct machines *machines,
pid_t machine__get_current_tid(struct machine *machine, int cpu)
{
- int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
-
- if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
+ if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
return -1;
return machine->current_tid[cpu];
@@ -3186,26 +3194,16 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
pid_t tid)
{
struct thread *thread;
- int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
+ const pid_t init_val = -1;
if (cpu < 0)
return -EINVAL;
- if (!machine->current_tid) {
- int i;
-
- machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
- if (!machine->current_tid)
- return -ENOMEM;
- for (i = 0; i < nr_cpus; i++)
- machine->current_tid[i] = -1;
- }
-
- if (cpu >= nr_cpus) {
- pr_err("Requested CPU %d too large. ", cpu);
- pr_err("Consider raising MAX_NR_CPUS\n");
- return -EINVAL;
- }
+ if (realloc_array_as_needed(machine->current_tid,
+ machine->current_tid_sz,
+ (unsigned int)cpu,
+ &init_val))
+ return -ENOMEM;
machine->current_tid[cpu] = tid;
@@ -3327,3 +3325,18 @@ int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv
}
return err;
}
+
+int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
+{
+ struct maps *maps = machine__kernel_maps(machine);
+ struct map *map;
+ int err = 0;
+
+ for (map = maps__first(maps); map != NULL; map = map__next(map)) {
+ err = fn(map, priv);
+ if (err != 0) {
+ break;
+ }
+ }
+ return err;
+}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 5d7daf7cb7bc..74935dfaa937 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -48,6 +48,7 @@ struct machine {
bool single_address_space;
char *root_dir;
char *mmap_name;
+ char *kallsyms_filename;
struct threads threads[THREADS__TABLE_SIZE];
struct vdso_info *vdso_info;
struct perf_env *env;
@@ -56,6 +57,7 @@ struct machine {
struct map *vmlinux_map;
u64 kernel_start;
pid_t *current_tid;
+ size_t current_tid_sz;
union { /* Tool specific area */
void *priv;
u64 db_id;
@@ -262,6 +264,11 @@ typedef int (*machine__dso_t)(struct dso *dso, struct machine *machine, void *pr
int machine__for_each_dso(struct machine *machine, machine__dso_t fn,
void *priv);
+
+typedef int (*machine__map_t)(struct map *map, void *priv);
+int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn,
+ void *priv);
+
int machine__for_each_thread(struct machine *machine,
int (*fn)(struct thread *thread, void *p),
void *priv);
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index c3c21a9c350b..764883183519 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -410,6 +410,11 @@ static const char * const snoop_access[] = {
"HitM",
};
+static const char * const snoopx_access[] = {
+ "Fwd",
+ "Peer",
+};
+
int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
{
size_t i, l = 0;
@@ -430,13 +435,20 @@ int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
}
l += scnprintf(out + l, sz - l, snoop_access[i]);
}
- if (mem_info &&
- (mem_info->data_src.mem_snoopx & PERF_MEM_SNOOPX_FWD)) {
+
+ m = 0;
+ if (mem_info)
+ m = mem_info->data_src.mem_snoopx;
+
+ for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
+ if (!(m & 0x1))
+ continue;
+
if (l) {
strcat(out, " or ");
l += 4;
}
- l += scnprintf(out + l, sz - l, "Fwd");
+ l += scnprintf(out + l, sz - l, snoopx_access[i]);
}
if (*out == '\0')
@@ -513,6 +525,7 @@ int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
u64 op = data_src->mem_op;
u64 lvl = data_src->mem_lvl;
u64 snoop = data_src->mem_snoop;
+ u64 snoopx = data_src->mem_snoopx;
u64 lock = data_src->mem_lock;
u64 blk = data_src->mem_blk;
/*
@@ -532,6 +545,12 @@ do { \
stats->tot_hitm++; \
} while (0)
+#define PEER_INC(__f) \
+do { \
+ stats->__f++; \
+ stats->tot_peer++; \
+} while (0)
+
#define P(a, b) PERF_MEM_##a##_##b
stats->nr_entries++;
@@ -555,12 +574,20 @@ do { \
if (lvl & P(LVL, IO)) stats->ld_io++;
if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
- if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
+ if (lvl & P(LVL, L2)) {
+ stats->ld_l2hit++;
+
+ if (snoopx & P(SNOOPX, PEER))
+ PEER_INC(lcl_peer);
+ }
if (lvl & P(LVL, L3 )) {
if (snoop & P(SNOOP, HITM))
HITM_INC(lcl_hitm);
else
stats->ld_llchit++;
+
+ if (snoopx & P(SNOOPX, PEER))
+ PEER_INC(lcl_peer);
}
if (lvl & P(LVL, LOC_RAM)) {
@@ -585,10 +612,14 @@ do { \
if ((lvl & P(LVL, REM_CCE1)) ||
(lvl & P(LVL, REM_CCE2)) ||
mrem) {
- if (snoop & P(SNOOP, HIT))
+ if (snoop & P(SNOOP, HIT)) {
stats->rmt_hit++;
- else if (snoop & P(SNOOP, HITM))
+ } else if (snoop & P(SNOOP, HITM)) {
HITM_INC(rmt_hitm);
+ } else if (snoopx & P(SNOOPX, PEER)) {
+ stats->rmt_hit++;
+ PEER_INC(rmt_peer);
+ }
}
if ((lvl & P(LVL, MISS)))
@@ -652,6 +683,9 @@ void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
stats->lcl_hitm += add->lcl_hitm;
stats->rmt_hitm += add->rmt_hitm;
stats->tot_hitm += add->tot_hitm;
+ stats->lcl_peer += add->lcl_peer;
+ stats->rmt_peer += add->rmt_peer;
+ stats->tot_peer += add->tot_peer;
stats->rmt_hit += add->rmt_hit;
stats->lcl_dram += add->lcl_dram;
stats->rmt_dram += add->rmt_dram;
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index 8a8b568baeee..12372309d60e 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -78,6 +78,9 @@ struct c2c_stats {
u32 lcl_hitm; /* count of loads with local HITM */
u32 rmt_hitm; /* count of loads with remote HITM */
u32 tot_hitm; /* count of loads with local and remote HITM */
+ u32 lcl_peer; /* count of loads with local peer cache */
+ u32 rmt_peer; /* count of loads with remote peer cache */
+ u32 tot_peer; /* count of loads with local and remote peer cache */
u32 rmt_hit; /* count of loads with remote hit clean; */
u32 lcl_dram; /* count of loads miss to local DRAM */
u32 rmt_dram; /* count of loads miss to remote DRAM */
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 8f7baeabc5cf..464475fd6b9a 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -502,14 +502,14 @@ struct metricgroup_print_sys_idata {
bool details;
};
-typedef int (*metricgroup_sys_event_iter_fn)(const struct pmu_event *pe, void *);
-
struct metricgroup_iter_data {
- metricgroup_sys_event_iter_fn fn;
+ pmu_event_iter_fn fn;
void *data;
};
-static int metricgroup__sys_event_iter(const struct pmu_event *pe, void *data)
+static int metricgroup__sys_event_iter(const struct pmu_event *pe,
+ const struct pmu_events_table *table,
+ void *data)
{
struct metricgroup_iter_data *d = data;
struct perf_pmu *pmu = NULL;
@@ -522,13 +522,15 @@ static int metricgroup__sys_event_iter(const struct pmu_event *pe, void *data)
if (!pmu->id || strcmp(pmu->id, pe->compat))
continue;
- return d->fn(pe, d->data);
+ return d->fn(pe, table, d->data);
}
return 0;
}
-static int metricgroup__print_sys_event_iter(const struct pmu_event *pe, void *data)
+static int metricgroup__print_sys_event_iter(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *data)
{
struct metricgroup_print_sys_idata *d = data;
@@ -536,15 +538,40 @@ static int metricgroup__print_sys_event_iter(const struct pmu_event *pe, void *d
d->details, d->groups, d->metriclist);
}
+struct metricgroup_print_data {
+ const char *pmu_name;
+ struct strlist *metriclist;
+ char *filter;
+ struct rblist *groups;
+ bool metricgroups;
+ bool raw;
+ bool details;
+};
+
+static int metricgroup__print_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *vdata)
+{
+ struct metricgroup_print_data *data = vdata;
+
+ if (!pe->metric_expr)
+ return 0;
+
+ if (data->pmu_name && perf_pmu__is_hybrid(pe->pmu) && strcmp(data->pmu_name, pe->pmu))
+ return 0;
+
+ return metricgroup__print_pmu_event(pe, data->metricgroups, data->filter,
+ data->raw, data->details, data->groups,
+ data->metriclist);
+}
+
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
bool raw, bool details, const char *pmu_name)
{
- const struct pmu_events_map *map = pmu_events_map__find();
- const struct pmu_event *pe;
- int i;
struct rblist groups;
struct rb_node *node, *next;
struct strlist *metriclist = NULL;
+ const struct pmu_events_table *table;
if (!metricgroups) {
metriclist = strlist__new(NULL, NULL);
@@ -556,23 +583,22 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
groups.node_new = mep_new;
groups.node_cmp = mep_cmp;
groups.node_delete = mep_delete;
- for (i = 0; map; i++) {
- pe = &map->table[i];
+ table = pmu_events_table__find();
+ if (table) {
+ struct metricgroup_print_data data = {
+ .pmu_name = pmu_name,
+ .metriclist = metriclist,
+ .metricgroups = metricgroups,
+ .filter = filter,
+ .raw = raw,
+ .details = details,
+ .groups = &groups,
+ };
- if (!pe->name && !pe->metric_group && !pe->metric_name)
- break;
- if (!pe->metric_expr)
- continue;
- if (pmu_name && perf_pmu__is_hybrid(pe->pmu) &&
- strcmp(pmu_name, pe->pmu)) {
- continue;
- }
- if (metricgroup__print_pmu_event(pe, metricgroups, filter,
- raw, details, &groups,
- metriclist) < 0)
- return;
+ pmu_events_table_for_each_event(table,
+ metricgroup__print_callback,
+ &data);
}
-
{
struct metricgroup_iter_data data = {
.fn = metricgroup__print_sys_event_iter,
@@ -850,16 +876,20 @@ struct metricgroup_add_iter_data {
bool metric_no_group;
struct metric *root_metric;
const struct visited_metric *visited;
- const struct pmu_events_map *map;
+ const struct pmu_events_table *table;
};
+static bool metricgroup__find_metric(const char *metric,
+ const struct pmu_events_table *table,
+ struct pmu_event *pe);
+
static int add_metric(struct list_head *metric_list,
const struct pmu_event *pe,
const char *modifier,
bool metric_no_group,
struct metric *root_metric,
const struct visited_metric *visited,
- const struct pmu_events_map *map);
+ const struct pmu_events_table *table);
/**
* resolve_metric - Locate metrics within the root metric and recursively add
@@ -874,7 +904,7 @@ static int add_metric(struct list_head *metric_list,
* metrics. When adding a root this argument is NULL.
* @visited: A singly linked list of metric names being added that is used to
* detect recursion.
- * @map: The map that is searched for metrics, most commonly the table for the
+ * @table: The table that is searched for metrics, most commonly the table for the
* architecture perf is running upon.
*/
static int resolve_metric(struct list_head *metric_list,
@@ -882,13 +912,13 @@ static int resolve_metric(struct list_head *metric_list,
bool metric_no_group,
struct metric *root_metric,
const struct visited_metric *visited,
- const struct pmu_events_map *map)
+ const struct pmu_events_table *table)
{
struct hashmap_entry *cur;
size_t bkt;
struct to_resolve {
/* The metric to resolve. */
- const struct pmu_event *pe;
+ struct pmu_event pe;
/*
* The key in the IDs map, this may differ from in case,
* etc. from pe->metric_name.
@@ -902,16 +932,15 @@ static int resolve_metric(struct list_head *metric_list,
* the pending array.
*/
hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
- const struct pmu_event *pe;
+ struct pmu_event pe;
- pe = metricgroup__find_metric(cur->key, map);
- if (pe) {
+ if (metricgroup__find_metric(cur->key, table, &pe)) {
pending = realloc(pending,
(pending_cnt + 1) * sizeof(struct to_resolve));
if (!pending)
return -ENOMEM;
- pending[pending_cnt].pe = pe;
+ memcpy(&pending[pending_cnt].pe, &pe, sizeof(pe));
pending[pending_cnt].key = cur->key;
pending_cnt++;
}
@@ -926,8 +955,8 @@ static int resolve_metric(struct list_head *metric_list,
* context.
*/
for (i = 0; i < pending_cnt; i++) {
- ret = add_metric(metric_list, pending[i].pe, modifier, metric_no_group,
- root_metric, visited, map);
+ ret = add_metric(metric_list, &pending[i].pe, modifier, metric_no_group,
+ root_metric, visited, table);
if (ret)
break;
}
@@ -950,7 +979,7 @@ static int resolve_metric(struct list_head *metric_list,
* metrics. When adding a root this argument is NULL.
* @visited: A singly linked list of metric names being added that is used to
* detect recursion.
- * @map: The map that is searched for metrics, most commonly the table for the
+ * @table: The table that is searched for metrics, most commonly the table for the
* architecture perf is running upon.
*/
static int __add_metric(struct list_head *metric_list,
@@ -960,7 +989,7 @@ static int __add_metric(struct list_head *metric_list,
int runtime,
struct metric *root_metric,
const struct visited_metric *visited,
- const struct pmu_events_map *map)
+ const struct pmu_events_table *table)
{
const struct visited_metric *vm;
int ret;
@@ -1032,7 +1061,7 @@ static int __add_metric(struct list_head *metric_list,
} else {
/* Resolve referenced metrics. */
ret = resolve_metric(metric_list, modifier, metric_no_group, root_metric,
- &visited_node, map);
+ &visited_node, table);
}
if (ret) {
@@ -1045,30 +1074,35 @@ static int __add_metric(struct list_head *metric_list,
return ret;
}
-#define map_for_each_event(__pe, __idx, __map) \
- if (__map) \
- for (__idx = 0, __pe = &__map->table[__idx]; \
- __pe->name || __pe->metric_group || __pe->metric_name; \
- __pe = &__map->table[++__idx])
-
-#define map_for_each_metric(__pe, __idx, __map, __metric) \
- map_for_each_event(__pe, __idx, __map) \
- if (__pe->metric_expr && \
- (match_metric(__pe->metric_group, __metric) || \
- match_metric(__pe->metric_name, __metric)))
+struct metricgroup__find_metric_data {
+ const char *metric;
+ struct pmu_event *pe;
+};
-const struct pmu_event *metricgroup__find_metric(const char *metric,
- const struct pmu_events_map *map)
+static int metricgroup__find_metric_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *vdata)
{
- const struct pmu_event *pe;
- int i;
+ struct metricgroup__find_metric_data *data = vdata;
- map_for_each_event(pe, i, map) {
- if (match_metric(pe->metric_name, metric))
- return pe;
- }
+ if (!match_metric(pe->metric_name, data->metric))
+ return 0;
- return NULL;
+ memcpy(data->pe, pe, sizeof(*pe));
+ return 1;
+}
+
+static bool metricgroup__find_metric(const char *metric,
+ const struct pmu_events_table *table,
+ struct pmu_event *pe)
+{
+ struct metricgroup__find_metric_data data = {
+ .metric = metric,
+ .pe = pe,
+ };
+
+ return pmu_events_table_for_each_event(table, metricgroup__find_metric_callback, &data)
+ ? true : false;
}
static int add_metric(struct list_head *metric_list,
@@ -1077,7 +1111,7 @@ static int add_metric(struct list_head *metric_list,
bool metric_no_group,
struct metric *root_metric,
const struct visited_metric *visited,
- const struct pmu_events_map *map)
+ const struct pmu_events_table *table)
{
int ret = 0;
@@ -1085,7 +1119,7 @@ static int add_metric(struct list_head *metric_list,
if (!strstr(pe->metric_expr, "?")) {
ret = __add_metric(metric_list, pe, modifier, metric_no_group, 0,
- root_metric, visited, map);
+ root_metric, visited, table);
} else {
int j, count;
@@ -1098,14 +1132,15 @@ static int add_metric(struct list_head *metric_list,
for (j = 0; j < count && !ret; j++)
ret = __add_metric(metric_list, pe, modifier, metric_no_group, j,
- root_metric, visited, map);
+ root_metric, visited, table);
}
return ret;
}
static int metricgroup__add_metric_sys_event_iter(const struct pmu_event *pe,
- void *data)
+ const struct pmu_events_table *table __maybe_unused,
+ void *data)
{
struct metricgroup_add_iter_data *d = data;
int ret;
@@ -1114,7 +1149,7 @@ static int metricgroup__add_metric_sys_event_iter(const struct pmu_event *pe,
return 0;
ret = add_metric(d->metric_list, pe, d->modifier, d->metric_no_group,
- d->root_metric, d->visited, d->map);
+ d->root_metric, d->visited, d->table);
if (ret)
goto out;
@@ -1152,6 +1187,33 @@ static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
return right_count - left_count;
}
+struct metricgroup__add_metric_data {
+ struct list_head *list;
+ const char *metric_name;
+ const char *modifier;
+ bool metric_no_group;
+ bool has_match;
+};
+
+static int metricgroup__add_metric_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table,
+ void *vdata)
+{
+ struct metricgroup__add_metric_data *data = vdata;
+ int ret = 0;
+
+ if (pe->metric_expr &&
+ (match_metric(pe->metric_group, data->metric_name) ||
+ match_metric(pe->metric_name, data->metric_name))) {
+
+ data->has_match = true;
+ ret = add_metric(data->list, pe, data->modifier, data->metric_no_group,
+ /*root_metric=*/NULL,
+ /*visited_metrics=*/NULL, table);
+ }
+ return ret;
+}
+
/**
* metricgroup__add_metric - Find and add a metric, or a metric group.
* @metric_name: The name of the metric or metric group. For example, "IPC"
@@ -1162,32 +1224,37 @@ static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
* global. Grouping is the default but due to multiplexing the
* user may override.
* @metric_list: The list that the metric or metric group are added to.
- * @map: The map that is searched for metrics, most commonly the table for the
+ * @table: The table that is searched for metrics, most commonly the table for the
* architecture perf is running upon.
*/
static int metricgroup__add_metric(const char *metric_name, const char *modifier,
bool metric_no_group,
struct list_head *metric_list,
- const struct pmu_events_map *map)
+ const struct pmu_events_table *table)
{
- const struct pmu_event *pe;
LIST_HEAD(list);
- int i, ret;
+ int ret;
bool has_match = false;
- /*
- * Iterate over all metrics seeing if metric matches either the name or
- * group. When it does add the metric to the list.
- */
- map_for_each_metric(pe, i, map, metric_name) {
- has_match = true;
- ret = add_metric(&list, pe, modifier, metric_no_group,
- /*root_metric=*/NULL,
- /*visited_metrics=*/NULL, map);
+ {
+ struct metricgroup__add_metric_data data = {
+ .list = &list,
+ .metric_name = metric_name,
+ .modifier = modifier,
+ .metric_no_group = metric_no_group,
+ .has_match = false,
+ };
+ /*
+ * Iterate over all metrics seeing if metric matches either the
+ * name or group. When it does add the metric to the list.
+ */
+ ret = pmu_events_table_for_each_event(table, metricgroup__add_metric_callback,
+ &data);
if (ret)
goto out;
- }
+ has_match = data.has_match;
+ }
{
struct metricgroup_iter_data data = {
.fn = metricgroup__add_metric_sys_event_iter,
@@ -1198,7 +1265,7 @@ static int metricgroup__add_metric(const char *metric_name, const char *modifier
.metric_no_group = metric_no_group,
.has_match = &has_match,
.ret = &ret,
- .map = map,
+ .table = table,
},
};
@@ -1227,12 +1294,12 @@ out:
* global. Grouping is the default but due to multiplexing the
* user may override.
* @metric_list: The list that metrics are added to.
- * @map: The map that is searched for metrics, most commonly the table for the
+ * @table: The table that is searched for metrics, most commonly the table for the
* architecture perf is running upon.
*/
static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
struct list_head *metric_list,
- const struct pmu_events_map *map)
+ const struct pmu_events_table *table)
{
char *list_itr, *list_copy, *metric_name, *modifier;
int ret, count = 0;
@@ -1249,7 +1316,7 @@ static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
ret = metricgroup__add_metric(metric_name, modifier,
metric_no_group, metric_list,
- map);
+ table);
if (ret == -EINVAL)
pr_err("Cannot find metric or group `%s'\n", metric_name);
@@ -1440,7 +1507,7 @@ static int parse_groups(struct evlist *perf_evlist, const char *str,
bool metric_no_merge,
struct perf_pmu *fake_pmu,
struct rblist *metric_events_list,
- const struct pmu_events_map *map)
+ const struct pmu_events_table *table)
{
struct evlist *combined_evlist = NULL;
LIST_HEAD(metric_list);
@@ -1451,7 +1518,7 @@ static int parse_groups(struct evlist *perf_evlist, const char *str,
if (metric_events_list->nr_entries == 0)
metricgroup__rblist_init(metric_events_list);
ret = metricgroup__add_metric_list(str, metric_no_group,
- &metric_list, map);
+ &metric_list, table);
if (ret)
goto out;
@@ -1586,43 +1653,47 @@ int metricgroup__parse_groups(const struct option *opt,
struct rblist *metric_events)
{
struct evlist *perf_evlist = *(struct evlist **)opt->value;
- const struct pmu_events_map *map = pmu_events_map__find();
+ const struct pmu_events_table *table = pmu_events_table__find();
return parse_groups(perf_evlist, str, metric_no_group,
- metric_no_merge, NULL, metric_events, map);
+ metric_no_merge, NULL, metric_events, table);
}
int metricgroup__parse_groups_test(struct evlist *evlist,
- const struct pmu_events_map *map,
+ const struct pmu_events_table *table,
const char *str,
bool metric_no_group,
bool metric_no_merge,
struct rblist *metric_events)
{
return parse_groups(evlist, str, metric_no_group,
- metric_no_merge, &perf_pmu__fake, metric_events, map);
+ metric_no_merge, &perf_pmu__fake, metric_events, table);
+}
+
+static int metricgroup__has_metric_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *vdata)
+{
+ const char *metric = vdata;
+
+ if (!pe->metric_expr)
+ return 0;
+
+ if (match_metric(pe->metric_name, metric))
+ return 1;
+
+ return 0;
}
bool metricgroup__has_metric(const char *metric)
{
- const struct pmu_events_map *map = pmu_events_map__find();
- const struct pmu_event *pe;
- int i;
+ const struct pmu_events_table *table = pmu_events_table__find();
- if (!map)
+ if (!table)
return false;
- for (i = 0; ; i++) {
- pe = &map->table[i];
-
- if (!pe->name && !pe->metric_group && !pe->metric_name)
- break;
- if (!pe->metric_expr)
- continue;
- if (match_metric(pe->metric_name, metric))
- return true;
- }
- return false;
+ return pmu_events_table_for_each_event(table, metricgroup__has_metric_callback,
+ (void *)metric) ? true : false;
}
int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index 2b42b778d1bf..016b3b1a289a 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -11,7 +11,6 @@ struct evlist;
struct evsel;
struct option;
struct rblist;
-struct pmu_events_map;
struct cgroup;
/**
@@ -70,10 +69,8 @@ int metricgroup__parse_groups(const struct option *opt,
bool metric_no_group,
bool metric_no_merge,
struct rblist *metric_events);
-const struct pmu_event *metricgroup__find_metric(const char *metric,
- const struct pmu_events_map *map);
int metricgroup__parse_groups_test(struct evlist *evlist,
- const struct pmu_events_map *map,
+ const struct pmu_events_table *table,
const char *str,
bool metric_no_group,
bool metric_no_merge,
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index 0b05c3c0aeaa..8febbd7c98ca 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -75,4 +75,10 @@ void ordered_events__set_copy_on_queue(struct ordered_events *oe, bool copy)
{
oe->copy_on_queue = copy;
}
+
+static inline u64 ordered_events__last_flush_time(struct ordered_events *oe)
+{
+ return oe->last_flush;
+}
+
#endif /* __ORDERED_EVENTS_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 7ed235740431..f05e15acd33f 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -5,18 +5,12 @@
#include <dirent.h>
#include <errno.h>
#include <sys/ioctl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
#include <sys/param.h>
#include "term.h"
-#include "build-id.h"
#include "evlist.h"
#include "evsel.h"
-#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "parse-events.h"
-#include <subcmd/exec-cmd.h>
#include "string2.h"
#include "strlist.h"
#include "bpf-loader.h"
@@ -24,23 +18,24 @@
#include <api/fs/tracing_path.h>
#include <perf/cpumap.h>
#include "parse-events-bison.h"
-#define YY_EXTRA_TYPE void*
#include "parse-events-flex.h"
#include "pmu.h"
-#include "thread_map.h"
-#include "probe-file.h"
#include "asm/bug.h"
#include "util/parse-branch-options.h"
-#include "metricgroup.h"
#include "util/evsel_config.h"
#include "util/event.h"
-#include "util/pfm.h"
+#include "perf.h"
#include "util/parse-events-hybrid.h"
#include "util/pmu-hybrid.h"
-#include "perf.h"
+#include "tracepoint.h"
#define MAX_NAME_LEN 100
+struct perf_pmu_event_symbol {
+ char *symbol;
+ enum perf_pmu_event_symbol_type type;
+};
+
#ifdef PARSER_DEBUG
extern int parse_events_debug;
#endif
@@ -154,21 +149,6 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
},
};
-struct event_symbol event_symbols_tool[PERF_TOOL_MAX] = {
- [PERF_TOOL_DURATION_TIME] = {
- .symbol = "duration_time",
- .alias = "",
- },
- [PERF_TOOL_USER_TIME] = {
- .symbol = "user_time",
- .alias = "",
- },
- [PERF_TOOL_SYSTEM_TIME] = {
- .symbol = "system_time",
- .alias = "",
- },
-};
-
#define __PERF_EVENT_FIELD(config, name) \
((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
@@ -177,121 +157,6 @@ struct event_symbol event_symbols_tool[PERF_TOOL_MAX] = {
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
-#define for_each_subsystem(sys_dir, sys_dirent) \
- while ((sys_dirent = readdir(sys_dir)) != NULL) \
- if (sys_dirent->d_type == DT_DIR && \
- (strcmp(sys_dirent->d_name, ".")) && \
- (strcmp(sys_dirent->d_name, "..")))
-
-static int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
-{
- char evt_path[MAXPATHLEN];
- int fd;
-
- snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, evt_dir->d_name);
- fd = open(evt_path, O_RDONLY);
- if (fd < 0)
- return -EINVAL;
- close(fd);
-
- return 0;
-}
-
-#define for_each_event(dir_path, evt_dir, evt_dirent) \
- while ((evt_dirent = readdir(evt_dir)) != NULL) \
- if (evt_dirent->d_type == DT_DIR && \
- (strcmp(evt_dirent->d_name, ".")) && \
- (strcmp(evt_dirent->d_name, "..")) && \
- (!tp_event_has_id(dir_path, evt_dirent)))
-
-#define MAX_EVENT_LENGTH 512
-
-struct tracepoint_path *tracepoint_id_to_path(u64 config)
-{
- struct tracepoint_path *path = NULL;
- DIR *sys_dir, *evt_dir;
- struct dirent *sys_dirent, *evt_dirent;
- char id_buf[24];
- int fd;
- u64 id;
- char evt_path[MAXPATHLEN];
- char *dir_path;
-
- sys_dir = tracing_events__opendir();
- if (!sys_dir)
- return NULL;
-
- for_each_subsystem(sys_dir, sys_dirent) {
- dir_path = get_events_file(sys_dirent->d_name);
- if (!dir_path)
- continue;
- evt_dir = opendir(dir_path);
- if (!evt_dir)
- goto next;
-
- for_each_event(dir_path, evt_dir, evt_dirent) {
-
- scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
- evt_dirent->d_name);
- fd = open(evt_path, O_RDONLY);
- if (fd < 0)
- continue;
- if (read(fd, id_buf, sizeof(id_buf)) < 0) {
- close(fd);
- continue;
- }
- close(fd);
- id = atoll(id_buf);
- if (id == config) {
- put_events_file(dir_path);
- closedir(evt_dir);
- closedir(sys_dir);
- path = zalloc(sizeof(*path));
- if (!path)
- return NULL;
- if (asprintf(&path->system, "%.*s", MAX_EVENT_LENGTH, sys_dirent->d_name) < 0) {
- free(path);
- return NULL;
- }
- if (asprintf(&path->name, "%.*s", MAX_EVENT_LENGTH, evt_dirent->d_name) < 0) {
- zfree(&path->system);
- free(path);
- return NULL;
- }
- return path;
- }
- }
- closedir(evt_dir);
-next:
- put_events_file(dir_path);
- }
-
- closedir(sys_dir);
- return NULL;
-}
-
-struct tracepoint_path *tracepoint_name_to_path(const char *name)
-{
- struct tracepoint_path *path = zalloc(sizeof(*path));
- char *str = strchr(name, ':');
-
- if (path == NULL || str == NULL) {
- free(path);
- return NULL;
- }
-
- path->system = strndup(name, str - name);
- path->name = strdup(str+1);
-
- if (path->system == NULL || path->name == NULL) {
- zfree(&path->system);
- zfree(&path->name);
- zfree(&path);
- }
-
- return path;
-}
-
const char *event_type(int type)
{
switch (type) {
@@ -2375,6 +2240,17 @@ int __parse_events(struct evlist *evlist, const char *str,
return ret;
}
+int parse_event(struct evlist *evlist, const char *str)
+{
+ struct parse_events_error err;
+ int ret;
+
+ parse_events_error__init(&err);
+ ret = parse_events(evlist, str, &err);
+ parse_events_error__exit(&err);
+ return ret;
+}
+
void parse_events_error__init(struct parse_events_error *err)
{
bzero(err, sizeof(*err));
@@ -2391,10 +2267,8 @@ void parse_events_error__exit(struct parse_events_error *err)
void parse_events_error__handle(struct parse_events_error *err, int idx,
char *str, char *help)
{
- if (WARN(!str, "WARNING: failed to provide error string\n")) {
- free(help);
- return;
- }
+ if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
+ goto out_free;
switch (err->num_errors) {
case 0:
err->idx = idx;
@@ -2419,6 +2293,11 @@ void parse_events_error__handle(struct parse_events_error *err, int idx,
break;
}
err->num_errors++;
+ return;
+
+out_free:
+ free(str);
+ free(help);
}
#define MAX_WIDTH 1000
@@ -2666,571 +2545,6 @@ int exclude_perf(const struct option *opt,
NULL);
}
-static const char * const event_type_descriptors[] = {
- "Hardware event",
- "Software event",
- "Tracepoint event",
- "Hardware cache event",
- "Raw hardware event descriptor",
- "Hardware breakpoint",
-};
-
-static int cmp_string(const void *a, const void *b)
-{
- const char * const *as = a;
- const char * const *bs = b;
-
- return strcmp(*as, *bs);
-}
-
-/*
- * Print the events from <debugfs_mount_point>/tracing/events
- */
-
-void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
- bool name_only)
-{
- DIR *sys_dir, *evt_dir;
- struct dirent *sys_dirent, *evt_dirent;
- char evt_path[MAXPATHLEN];
- char *dir_path;
- char **evt_list = NULL;
- unsigned int evt_i = 0, evt_num = 0;
- bool evt_num_known = false;
-
-restart:
- sys_dir = tracing_events__opendir();
- if (!sys_dir)
- return;
-
- if (evt_num_known) {
- evt_list = zalloc(sizeof(char *) * evt_num);
- if (!evt_list)
- goto out_close_sys_dir;
- }
-
- for_each_subsystem(sys_dir, sys_dirent) {
- if (subsys_glob != NULL &&
- !strglobmatch(sys_dirent->d_name, subsys_glob))
- continue;
-
- dir_path = get_events_file(sys_dirent->d_name);
- if (!dir_path)
- continue;
- evt_dir = opendir(dir_path);
- if (!evt_dir)
- goto next;
-
- for_each_event(dir_path, evt_dir, evt_dirent) {
- if (event_glob != NULL &&
- !strglobmatch(evt_dirent->d_name, event_glob))
- continue;
-
- if (!evt_num_known) {
- evt_num++;
- continue;
- }
-
- snprintf(evt_path, MAXPATHLEN, "%s:%s",
- sys_dirent->d_name, evt_dirent->d_name);
-
- evt_list[evt_i] = strdup(evt_path);
- if (evt_list[evt_i] == NULL) {
- put_events_file(dir_path);
- goto out_close_evt_dir;
- }
- evt_i++;
- }
- closedir(evt_dir);
-next:
- put_events_file(dir_path);
- }
- closedir(sys_dir);
-
- if (!evt_num_known) {
- evt_num_known = true;
- goto restart;
- }
- qsort(evt_list, evt_num, sizeof(char *), cmp_string);
- evt_i = 0;
- while (evt_i < evt_num) {
- if (name_only) {
- printf("%s ", evt_list[evt_i++]);
- continue;
- }
- printf(" %-50s [%s]\n", evt_list[evt_i++],
- event_type_descriptors[PERF_TYPE_TRACEPOINT]);
- }
- if (evt_num && pager_in_use())
- printf("\n");
-
-out_free:
- evt_num = evt_i;
- for (evt_i = 0; evt_i < evt_num; evt_i++)
- zfree(&evt_list[evt_i]);
- zfree(&evt_list);
- return;
-
-out_close_evt_dir:
- closedir(evt_dir);
-out_close_sys_dir:
- closedir(sys_dir);
-
- printf("FATAL: not enough memory to print %s\n",
- event_type_descriptors[PERF_TYPE_TRACEPOINT]);
- if (evt_list)
- goto out_free;
-}
-
-/*
- * Check whether event is in <debugfs_mount_point>/tracing/events
- */
-
-int is_valid_tracepoint(const char *event_string)
-{
- DIR *sys_dir, *evt_dir;
- struct dirent *sys_dirent, *evt_dirent;
- char evt_path[MAXPATHLEN];
- char *dir_path;
-
- sys_dir = tracing_events__opendir();
- if (!sys_dir)
- return 0;
-
- for_each_subsystem(sys_dir, sys_dirent) {
- dir_path = get_events_file(sys_dirent->d_name);
- if (!dir_path)
- continue;
- evt_dir = opendir(dir_path);
- if (!evt_dir)
- goto next;
-
- for_each_event(dir_path, evt_dir, evt_dirent) {
- snprintf(evt_path, MAXPATHLEN, "%s:%s",
- sys_dirent->d_name, evt_dirent->d_name);
- if (!strcmp(evt_path, event_string)) {
- closedir(evt_dir);
- closedir(sys_dir);
- return 1;
- }
- }
- closedir(evt_dir);
-next:
- put_events_file(dir_path);
- }
- closedir(sys_dir);
- return 0;
-}
-
-static bool is_event_supported(u8 type, u64 config)
-{
- bool ret = true;
- int open_return;
- struct evsel *evsel;
- struct perf_event_attr attr = {
- .type = type,
- .config = config,
- .disabled = 1,
- };
- struct perf_thread_map *tmap = thread_map__new_by_tid(0);
-
- if (tmap == NULL)
- return false;
-
- evsel = evsel__new(&attr);
- if (evsel) {
- open_return = evsel__open(evsel, NULL, tmap);
- ret = open_return >= 0;
-
- if (open_return == -EACCES) {
- /*
- * This happens if the paranoid value
- * /proc/sys/kernel/perf_event_paranoid is set to 2
- * Re-run with exclude_kernel set; we don't do that
- * by default as some ARM machines do not support it.
- *
- */
- evsel->core.attr.exclude_kernel = 1;
- ret = evsel__open(evsel, NULL, tmap) >= 0;
- }
- evsel__delete(evsel);
- }
-
- perf_thread_map__put(tmap);
- return ret;
-}
-
-void print_sdt_events(const char *subsys_glob, const char *event_glob,
- bool name_only)
-{
- struct probe_cache *pcache;
- struct probe_cache_entry *ent;
- struct strlist *bidlist, *sdtlist;
- struct strlist_config cfg = {.dont_dupstr = true};
- struct str_node *nd, *nd2;
- char *buf, *path, *ptr = NULL;
- bool show_detail = false;
- int ret;
-
- sdtlist = strlist__new(NULL, &cfg);
- if (!sdtlist) {
- pr_debug("Failed to allocate new strlist for SDT\n");
- return;
- }
- bidlist = build_id_cache__list_all(true);
- if (!bidlist) {
- pr_debug("Failed to get buildids: %d\n", errno);
- return;
- }
- strlist__for_each_entry(nd, bidlist) {
- pcache = probe_cache__new(nd->s, NULL);
- if (!pcache)
- continue;
- list_for_each_entry(ent, &pcache->entries, node) {
- if (!ent->sdt)
- continue;
- if (subsys_glob &&
- !strglobmatch(ent->pev.group, subsys_glob))
- continue;
- if (event_glob &&
- !strglobmatch(ent->pev.event, event_glob))
- continue;
- ret = asprintf(&buf, "%s:%s@%s", ent->pev.group,
- ent->pev.event, nd->s);
- if (ret > 0)
- strlist__add(sdtlist, buf);
- }
- probe_cache__delete(pcache);
- }
- strlist__delete(bidlist);
-
- strlist__for_each_entry(nd, sdtlist) {
- buf = strchr(nd->s, '@');
- if (buf)
- *(buf++) = '\0';
- if (name_only) {
- printf("%s ", nd->s);
- continue;
- }
- nd2 = strlist__next(nd);
- if (nd2) {
- ptr = strchr(nd2->s, '@');
- if (ptr)
- *ptr = '\0';
- if (strcmp(nd->s, nd2->s) == 0)
- show_detail = true;
- }
- if (show_detail) {
- path = build_id_cache__origname(buf);
- ret = asprintf(&buf, "%s@%s(%.12s)", nd->s, path, buf);
- if (ret > 0) {
- printf(" %-50s [%s]\n", buf, "SDT event");
- free(buf);
- }
- free(path);
- } else
- printf(" %-50s [%s]\n", nd->s, "SDT event");
- if (nd2) {
- if (strcmp(nd->s, nd2->s) != 0)
- show_detail = false;
- if (ptr)
- *ptr = '@';
- }
- }
- strlist__delete(sdtlist);
-}
-
-int print_hwcache_events(const char *event_glob, bool name_only)
-{
- unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0;
- char name[64], new_name[128];
- char **evt_list = NULL, **evt_pmus = NULL;
- bool evt_num_known = false;
- struct perf_pmu *pmu = NULL;
-
- if (perf_pmu__has_hybrid()) {
- npmus = perf_pmu__hybrid_pmu_num();
- evt_pmus = zalloc(sizeof(char *) * npmus);
- if (!evt_pmus)
- goto out_enomem;
- }
-
-restart:
- if (evt_num_known) {
- evt_list = zalloc(sizeof(char *) * evt_num);
- if (!evt_list)
- goto out_enomem;
- }
-
- for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
- for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
- /* skip invalid cache type */
- if (!evsel__is_cache_op_valid(type, op))
- continue;
-
- for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
- unsigned int hybrid_supported = 0, j;
- bool supported;
-
- __evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
- if (event_glob != NULL && !strglobmatch(name, event_glob))
- continue;
-
- if (!perf_pmu__has_hybrid()) {
- if (!is_event_supported(PERF_TYPE_HW_CACHE,
- type | (op << 8) | (i << 16))) {
- continue;
- }
- } else {
- perf_pmu__for_each_hybrid_pmu(pmu) {
- if (!evt_num_known) {
- evt_num++;
- continue;
- }
-
- supported = is_event_supported(
- PERF_TYPE_HW_CACHE,
- type | (op << 8) | (i << 16) |
- ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT));
- if (supported) {
- snprintf(new_name, sizeof(new_name), "%s/%s/",
- pmu->name, name);
- evt_pmus[hybrid_supported] = strdup(new_name);
- hybrid_supported++;
- }
- }
-
- if (hybrid_supported == 0)
- continue;
- }
-
- if (!evt_num_known) {
- evt_num++;
- continue;
- }
-
- if ((hybrid_supported == 0) ||
- (hybrid_supported == npmus)) {
- evt_list[evt_i] = strdup(name);
- if (npmus > 0) {
- for (j = 0; j < npmus; j++)
- zfree(&evt_pmus[j]);
- }
- } else {
- for (j = 0; j < hybrid_supported; j++) {
- evt_list[evt_i++] = evt_pmus[j];
- evt_pmus[j] = NULL;
- }
- continue;
- }
-
- if (evt_list[evt_i] == NULL)
- goto out_enomem;
- evt_i++;
- }
- }
- }
-
- if (!evt_num_known) {
- evt_num_known = true;
- goto restart;
- }
-
- for (evt_i = 0; evt_i < evt_num; evt_i++) {
- if (!evt_list[evt_i])
- break;
- }
-
- evt_num = evt_i;
- qsort(evt_list, evt_num, sizeof(char *), cmp_string);
- evt_i = 0;
- while (evt_i < evt_num) {
- if (name_only) {
- printf("%s ", evt_list[evt_i++]);
- continue;
- }
- printf(" %-50s [%s]\n", evt_list[evt_i++],
- event_type_descriptors[PERF_TYPE_HW_CACHE]);
- }
- if (evt_num && pager_in_use())
- printf("\n");
-
-out_free:
- evt_num = evt_i;
- for (evt_i = 0; evt_i < evt_num; evt_i++)
- zfree(&evt_list[evt_i]);
- zfree(&evt_list);
-
- for (evt_i = 0; evt_i < npmus; evt_i++)
- zfree(&evt_pmus[evt_i]);
- zfree(&evt_pmus);
- return evt_num;
-
-out_enomem:
- printf("FATAL: not enough memory to print %s\n", event_type_descriptors[PERF_TYPE_HW_CACHE]);
- if (evt_list)
- goto out_free;
- return evt_num;
-}
-
-static void print_tool_event(const struct event_symbol *syms, const char *event_glob,
- bool name_only)
-{
- if (syms->symbol == NULL)
- return;
-
- if (event_glob && !(strglobmatch(syms->symbol, event_glob) ||
- (syms->alias && strglobmatch(syms->alias, event_glob))))
- return;
-
- if (name_only)
- printf("%s ", syms->symbol);
- else {
- char name[MAX_NAME_LEN];
- if (syms->alias && strlen(syms->alias))
- snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
- else
- strlcpy(name, syms->symbol, MAX_NAME_LEN);
- printf(" %-50s [%s]\n", name, "Tool event");
- }
-}
-
-void print_tool_events(const char *event_glob, bool name_only)
-{
- // Start at 1 because the first enum entry symbols no tool event
- for (int i = 1; i < PERF_TOOL_MAX; ++i) {
- print_tool_event(event_symbols_tool + i, event_glob, name_only);
- }
- if (pager_in_use())
- printf("\n");
-}
-
-void print_symbol_events(const char *event_glob, unsigned type,
- struct event_symbol *syms, unsigned max,
- bool name_only)
-{
- unsigned int i, evt_i = 0, evt_num = 0;
- char name[MAX_NAME_LEN];
- char **evt_list = NULL;
- bool evt_num_known = false;
-
-restart:
- if (evt_num_known) {
- evt_list = zalloc(sizeof(char *) * evt_num);
- if (!evt_list)
- goto out_enomem;
- syms -= max;
- }
-
- for (i = 0; i < max; i++, syms++) {
- /*
- * New attr.config still not supported here, the latest
- * example was PERF_COUNT_SW_CGROUP_SWITCHES
- */
- if (syms->symbol == NULL)
- continue;
-
- if (event_glob != NULL && !(strglobmatch(syms->symbol, event_glob) ||
- (syms->alias && strglobmatch(syms->alias, event_glob))))
- continue;
-
- if (!is_event_supported(type, i))
- continue;
-
- if (!evt_num_known) {
- evt_num++;
- continue;
- }
-
- if (!name_only && strlen(syms->alias))
- snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
- else
- strlcpy(name, syms->symbol, MAX_NAME_LEN);
-
- evt_list[evt_i] = strdup(name);
- if (evt_list[evt_i] == NULL)
- goto out_enomem;
- evt_i++;
- }
-
- if (!evt_num_known) {
- evt_num_known = true;
- goto restart;
- }
- qsort(evt_list, evt_num, sizeof(char *), cmp_string);
- evt_i = 0;
- while (evt_i < evt_num) {
- if (name_only) {
- printf("%s ", evt_list[evt_i++]);
- continue;
- }
- printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]);
- }
- if (evt_num && pager_in_use())
- printf("\n");
-
-out_free:
- evt_num = evt_i;
- for (evt_i = 0; evt_i < evt_num; evt_i++)
- zfree(&evt_list[evt_i]);
- zfree(&evt_list);
- return;
-
-out_enomem:
- printf("FATAL: not enough memory to print %s\n", event_type_descriptors[type]);
- if (evt_list)
- goto out_free;
-}
-
-/*
- * Print the help text for the event symbols:
- */
-void print_events(const char *event_glob, bool name_only, bool quiet_flag,
- bool long_desc, bool details_flag, bool deprecated,
- const char *pmu_name)
-{
- print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
- event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
-
- print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
- event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
- print_tool_events(event_glob, name_only);
-
- print_hwcache_events(event_glob, name_only);
-
- print_pmu_events(event_glob, name_only, quiet_flag, long_desc,
- details_flag, deprecated, pmu_name);
-
- if (event_glob != NULL)
- return;
-
- if (!name_only) {
- printf(" %-50s [%s]\n",
- "rNNN",
- event_type_descriptors[PERF_TYPE_RAW]);
- printf(" %-50s [%s]\n",
- "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
- event_type_descriptors[PERF_TYPE_RAW]);
- if (pager_in_use())
- printf(" (see 'man perf-list' on how to encode it)\n\n");
-
- printf(" %-50s [%s]\n",
- "mem:<addr>[/len][:access]",
- event_type_descriptors[PERF_TYPE_BREAKPOINT]);
- if (pager_in_use())
- printf("\n");
- }
-
- print_tracepoint_events(NULL, NULL, name_only);
-
- print_sdt_events(NULL, NULL, name_only);
-
- metricgroup__print(true, true, NULL, name_only, details_flag,
- pmu_name);
-
- print_libpfm_events(name_only, long_desc);
-}
-
int parse_events__is_hardcoded_term(struct parse_events_term *term)
{
return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index a38b8b160e80..7e6a601d9cd0 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -11,7 +11,6 @@
#include <linux/perf_event.h>
#include <string.h>
-struct list_head;
struct evsel;
struct evlist;
struct parse_events_error;
@@ -19,35 +18,29 @@ struct parse_events_error;
struct option;
struct perf_pmu;
-struct tracepoint_path {
- char *system;
- char *name;
- struct tracepoint_path *next;
-};
-
-struct tracepoint_path *tracepoint_id_to_path(u64 config);
-struct tracepoint_path *tracepoint_name_to_path(const char *name);
bool have_tracepoints(struct list_head *evlist);
const char *event_type(int type);
int parse_events_option(const struct option *opt, const char *str, int unset);
int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset);
+__attribute__((nonnull(1, 2, 3)))
int __parse_events(struct evlist *evlist, const char *str, struct parse_events_error *error,
struct perf_pmu *fake_pmu);
+__attribute__((nonnull))
static inline int parse_events(struct evlist *evlist, const char *str,
struct parse_events_error *err)
{
return __parse_events(evlist, str, err, NULL);
}
+int parse_event(struct evlist *evlist, const char *str);
+
int parse_events_terms(struct list_head *terms, const char *str);
int parse_filter(const struct option *opt, const char *str, int unset);
int exclude_perf(const struct option *opt, const char *arg, int unset);
-#define EVENTS_HELP_MAX (128*1024)
-
enum perf_pmu_event_symbol_type {
PMU_EVENT_SYMBOL_ERR, /* not a PMU EVENT */
PMU_EVENT_SYMBOL, /* normal style PMU event */
@@ -56,11 +49,6 @@ enum perf_pmu_event_symbol_type {
PMU_EVENT_SYMBOL_SUFFIX2, /* suffix of pre-suf2 style event */
};
-struct perf_pmu_event_symbol {
- char *symbol;
- enum perf_pmu_event_symbol_type type;
-};
-
enum {
PARSE_EVENTS__TERM_TYPE_NUM,
PARSE_EVENTS__TERM_TYPE_STR,
@@ -219,28 +207,13 @@ void parse_events_update_lists(struct list_head *list_event,
void parse_events_evlist_error(struct parse_events_state *parse_state,
int idx, const char *str);
-void print_events(const char *event_glob, bool name_only, bool quiet,
- bool long_desc, bool details_flag, bool deprecated,
- const char *pmu_name);
-
struct event_symbol {
const char *symbol;
const char *alias;
};
extern struct event_symbol event_symbols_hw[];
extern struct event_symbol event_symbols_sw[];
-void print_symbol_events(const char *event_glob, unsigned type,
- struct event_symbol *syms, unsigned max,
- bool name_only);
-void print_tool_events(const char *event_glob, bool name_only);
-void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
- bool name_only);
-int print_hwcache_events(const char *event_glob, bool name_only);
-void print_sdt_events(const char *subsys_glob, const char *event_glob,
- bool name_only);
-int is_valid_tracepoint(const char *event_string);
-int valid_event_mount(const char *eventfs);
char *parse_events_formats_error_string(char *additional_terms);
void parse_events_error__init(struct parse_events_error *err);
diff --git a/tools/perf/util/perf_api_probe.c b/tools/perf/util/perf_api_probe.c
index c28dd50bd571..e1e2d701599c 100644
--- a/tools/perf/util/perf_api_probe.c
+++ b/tools/perf/util/perf_api_probe.c
@@ -23,7 +23,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, struct perf_cpu cpu, const cha
if (!evlist)
return -ENOMEM;
- if (parse_events(evlist, str, NULL))
+ if (parse_event(evlist, str))
goto out_delete;
evsel = evlist__first(evlist);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 9a1c7e63e663..89655d53117a 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -690,7 +690,7 @@ static int is_arm_pmu_core(const char *name)
return file_available(path);
}
-static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
+char *perf_pmu__getcpuid(struct perf_pmu *pmu)
{
char *cpuid;
static bool printed;
@@ -710,36 +710,9 @@ static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
return cpuid;
}
-const struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
+__weak const struct pmu_events_table *pmu_events_table__find(void)
{
- const struct pmu_events_map *map;
- char *cpuid = perf_pmu__getcpuid(pmu);
- int i;
-
- /* on some platforms which uses cpus map, cpuid can be NULL for
- * PMUs other than CORE PMUs.
- */
- if (!cpuid)
- return NULL;
-
- i = 0;
- for (;;) {
- map = &pmu_events_map[i++];
- if (!map->table) {
- map = NULL;
- break;
- }
-
- if (!strcmp_cpuid_str(map->cpuid, cpuid))
- break;
- }
- free(cpuid);
- return map;
-}
-
-const struct pmu_events_map *__weak pmu_events_map__find(void)
-{
- return perf_pmu__find_map(NULL);
+ return perf_pmu__find_table(NULL);
}
/*
@@ -818,81 +791,63 @@ out:
return res;
}
-/*
- * From the pmu_events_map, find the table of PMU events that corresponds
- * to the current running CPU. Then, add all PMU events from that table
- * as aliases.
- */
-void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
- const struct pmu_events_map *map)
+struct pmu_add_cpu_aliases_map_data {
+ struct list_head *head;
+ const char *name;
+ const char *cpu_name;
+ struct perf_pmu *pmu;
+};
+
+static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *vdata)
{
- int i;
- const char *name = pmu->name;
- /*
- * Found a matching PMU events table. Create aliases
- */
- i = 0;
- while (1) {
- const char *cpu_name = is_arm_pmu_core(name) ? name : "cpu";
- const struct pmu_event *pe = &map->table[i++];
- const char *pname = pe->pmu ? pe->pmu : cpu_name;
+ struct pmu_add_cpu_aliases_map_data *data = vdata;
+ const char *pname = pe->pmu ? pe->pmu : data->cpu_name;
- if (!pe->name) {
- if (pe->metric_group || pe->metric_name)
- continue;
- break;
- }
+ if (!pe->name)
+ return 0;
- if (pmu->is_uncore && pmu_uncore_alias_match(pname, name))
- goto new_alias;
+ if (data->pmu->is_uncore && pmu_uncore_alias_match(pname, data->name))
+ goto new_alias;
- if (strcmp(pname, name))
- continue;
+ if (strcmp(pname, data->name))
+ return 0;
new_alias:
- /* need type casts to override 'const' */
- __perf_pmu__new_alias(head, NULL, (char *)pe->name,
- (char *)pe->desc, (char *)pe->event,
- pe);
- }
+ /* need type casts to override 'const' */
+ __perf_pmu__new_alias(data->head, NULL, (char *)pe->name, (char *)pe->desc,
+ (char *)pe->event, pe);
+ return 0;
}
-static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
+/*
+ * From the pmu_events_map, find the table of PMU events that corresponds
+ * to the current running CPU. Then, add all PMU events from that table
+ * as aliases.
+ */
+void pmu_add_cpu_aliases_table(struct list_head *head, struct perf_pmu *pmu,
+ const struct pmu_events_table *table)
{
- const struct pmu_events_map *map;
-
- map = perf_pmu__find_map(pmu);
- if (!map)
- return;
+ struct pmu_add_cpu_aliases_map_data data = {
+ .head = head,
+ .name = pmu->name,
+ .cpu_name = is_arm_pmu_core(pmu->name) ? pmu->name : "cpu",
+ .pmu = pmu,
+ };
- pmu_add_cpu_aliases_map(head, pmu, map);
+ pmu_events_table_for_each_event(table, pmu_add_cpu_aliases_map_callback, &data);
}
-void pmu_for_each_sys_event(pmu_sys_event_iter_fn fn, void *data)
+static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
{
- int i = 0;
-
- while (1) {
- const struct pmu_sys_events *event_table;
- int j = 0;
-
- event_table = &pmu_sys_event_tables[i++];
-
- if (!event_table->table)
- break;
+ const struct pmu_events_table *table;
- while (1) {
- const struct pmu_event *pe = &event_table->table[j++];
- int ret;
-
- if (!pe->name && !pe->metric_group && !pe->metric_name)
- break;
+ table = perf_pmu__find_table(pmu);
+ if (!table)
+ return;
- ret = fn(pe, data);
- if (ret)
- break;
- }
- }
+ pmu_add_cpu_aliases_table(head, pmu, table);
}
struct pmu_sys_event_iter_data {
@@ -900,7 +855,9 @@ struct pmu_sys_event_iter_data {
struct perf_pmu *pmu;
};
-static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe, void *data)
+static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *data)
{
struct pmu_sys_event_iter_data *idata = data;
struct perf_pmu *pmu = idata->pmu;
@@ -1890,7 +1847,11 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
const char *sysfs = sysfs__mountpoint();
DIR *caps_dir;
struct dirent *evt_ent;
- int nr_caps = 0;
+
+ if (pmu->caps_initialized)
+ return pmu->nr_caps;
+
+ pmu->nr_caps = 0;
if (!sysfs)
return -1;
@@ -1898,8 +1859,10 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
snprintf(caps_path, PATH_MAX,
"%s" EVENT_SOURCE_DEVICE_PATH "%s/caps", sysfs, pmu->name);
- if (stat(caps_path, &st) < 0)
+ if (stat(caps_path, &st) < 0) {
+ pmu->caps_initialized = true;
return 0; /* no error if caps does not exist */
+ }
caps_dir = opendir(caps_path);
if (!caps_dir)
@@ -1926,13 +1889,14 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
continue;
}
- nr_caps++;
+ pmu->nr_caps++;
fclose(file);
}
closedir(caps_dir);
- return nr_caps;
+ pmu->caps_initialized = true;
+ return pmu->nr_caps;
}
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 541889fa9f9c..a7b0f9507510 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -46,6 +46,8 @@ struct perf_pmu {
struct perf_cpu_map *cpus;
struct list_head format; /* HEAD struct perf_pmu_format -> list */
struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */
+ bool caps_initialized;
+ u32 nr_caps;
struct list_head caps; /* HEAD struct perf_pmu_caps -> list */
struct list_head list; /* ELEM */
struct list_head hybrid_list;
@@ -123,16 +125,14 @@ int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
int perf_pmu__test(void);
struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
-void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
- const struct pmu_events_map *map);
+void pmu_add_cpu_aliases_table(struct list_head *head, struct perf_pmu *pmu,
+ const struct pmu_events_table *table);
-const struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
-const struct pmu_events_map *pmu_events_map__find(void);
+char *perf_pmu__getcpuid(struct perf_pmu *pmu);
+const struct pmu_events_table *pmu_events_table__find(void);
bool pmu_uncore_alias_match(const char *pmu_name, const char *name);
void perf_pmu_free_alias(struct perf_pmu_alias *alias);
-typedef int (*pmu_sys_event_iter_fn)(const struct pmu_event *pe, void *data);
-void pmu_for_each_sys_event(pmu_sys_event_iter_fn fn, void *data);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
int perf_pmu__caps_parse(struct perf_pmu *pmu);
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
new file mode 100644
index 000000000000..ba1ab5134685
--- /dev/null
+++ b/tools/perf/util/print-events.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dirent.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/param.h>
+
+#include <api/fs/tracing_path.h>
+#include <linux/stddef.h>
+#include <linux/perf_event.h>
+#include <linux/zalloc.h>
+#include <subcmd/pager.h>
+
+#include "build-id.h"
+#include "debug.h"
+#include "evsel.h"
+#include "metricgroup.h"
+#include "parse-events.h"
+#include "pmu.h"
+#include "print-events.h"
+#include "probe-file.h"
+#include "string2.h"
+#include "strlist.h"
+#include "thread_map.h"
+#include "tracepoint.h"
+#include "pfm.h"
+#include "pmu-hybrid.h"
+
+#define MAX_NAME_LEN 100
+
+static const char * const event_type_descriptors[] = {
+ "Hardware event",
+ "Software event",
+ "Tracepoint event",
+ "Hardware cache event",
+ "Raw hardware event descriptor",
+ "Hardware breakpoint",
+};
+
+static const struct event_symbol event_symbols_tool[PERF_TOOL_MAX] = {
+ [PERF_TOOL_DURATION_TIME] = {
+ .symbol = "duration_time",
+ .alias = "",
+ },
+ [PERF_TOOL_USER_TIME] = {
+ .symbol = "user_time",
+ .alias = "",
+ },
+ [PERF_TOOL_SYSTEM_TIME] = {
+ .symbol = "system_time",
+ .alias = "",
+ },
+};
+
+static int cmp_string(const void *a, const void *b)
+{
+ const char * const *as = a;
+ const char * const *bs = b;
+
+ return strcmp(*as, *bs);
+}
+
+/*
+ * Print the events from <debugfs_mount_point>/tracing/events
+ */
+void print_tracepoint_events(const char *subsys_glob,
+ const char *event_glob, bool name_only)
+{
+ DIR *sys_dir, *evt_dir;
+ struct dirent *sys_dirent, *evt_dirent;
+ char evt_path[MAXPATHLEN];
+ char *dir_path;
+ char **evt_list = NULL;
+ unsigned int evt_i = 0, evt_num = 0;
+ bool evt_num_known = false;
+
+restart:
+ sys_dir = tracing_events__opendir();
+ if (!sys_dir)
+ return;
+
+ if (evt_num_known) {
+ evt_list = zalloc(sizeof(char *) * evt_num);
+ if (!evt_list)
+ goto out_close_sys_dir;
+ }
+
+ for_each_subsystem(sys_dir, sys_dirent) {
+ if (subsys_glob != NULL &&
+ !strglobmatch(sys_dirent->d_name, subsys_glob))
+ continue;
+
+ dir_path = get_events_file(sys_dirent->d_name);
+ if (!dir_path)
+ continue;
+ evt_dir = opendir(dir_path);
+ if (!evt_dir)
+ goto next;
+
+ for_each_event(dir_path, evt_dir, evt_dirent) {
+ if (event_glob != NULL &&
+ !strglobmatch(evt_dirent->d_name, event_glob))
+ continue;
+
+ if (!evt_num_known) {
+ evt_num++;
+ continue;
+ }
+
+ snprintf(evt_path, MAXPATHLEN, "%s:%s",
+ sys_dirent->d_name, evt_dirent->d_name);
+
+ evt_list[evt_i] = strdup(evt_path);
+ if (evt_list[evt_i] == NULL) {
+ put_events_file(dir_path);
+ goto out_close_evt_dir;
+ }
+ evt_i++;
+ }
+ closedir(evt_dir);
+next:
+ put_events_file(dir_path);
+ }
+ closedir(sys_dir);
+
+ if (!evt_num_known) {
+ evt_num_known = true;
+ goto restart;
+ }
+ qsort(evt_list, evt_num, sizeof(char *), cmp_string);
+ evt_i = 0;
+ while (evt_i < evt_num) {
+ if (name_only) {
+ printf("%s ", evt_list[evt_i++]);
+ continue;
+ }
+ printf(" %-50s [%s]\n", evt_list[evt_i++],
+ event_type_descriptors[PERF_TYPE_TRACEPOINT]);
+ }
+ if (evt_num && pager_in_use())
+ printf("\n");
+
+out_free:
+ evt_num = evt_i;
+ for (evt_i = 0; evt_i < evt_num; evt_i++)
+ zfree(&evt_list[evt_i]);
+ zfree(&evt_list);
+ return;
+
+out_close_evt_dir:
+ closedir(evt_dir);
+out_close_sys_dir:
+ closedir(sys_dir);
+
+ printf("FATAL: not enough memory to print %s\n",
+ event_type_descriptors[PERF_TYPE_TRACEPOINT]);
+ if (evt_list)
+ goto out_free;
+}
+
+void print_sdt_events(const char *subsys_glob, const char *event_glob,
+ bool name_only)
+{
+ struct probe_cache *pcache;
+ struct probe_cache_entry *ent;
+ struct strlist *bidlist, *sdtlist;
+ struct strlist_config cfg = {.dont_dupstr = true};
+ struct str_node *nd, *nd2;
+ char *buf, *path, *ptr = NULL;
+ bool show_detail = false;
+ int ret;
+
+ sdtlist = strlist__new(NULL, &cfg);
+ if (!sdtlist) {
+ pr_debug("Failed to allocate new strlist for SDT\n");
+ return;
+ }
+ bidlist = build_id_cache__list_all(true);
+ if (!bidlist) {
+ pr_debug("Failed to get buildids: %d\n", errno);
+ return;
+ }
+ strlist__for_each_entry(nd, bidlist) {
+ pcache = probe_cache__new(nd->s, NULL);
+ if (!pcache)
+ continue;
+ list_for_each_entry(ent, &pcache->entries, node) {
+ if (!ent->sdt)
+ continue;
+ if (subsys_glob &&
+ !strglobmatch(ent->pev.group, subsys_glob))
+ continue;
+ if (event_glob &&
+ !strglobmatch(ent->pev.event, event_glob))
+ continue;
+ ret = asprintf(&buf, "%s:%s@%s", ent->pev.group,
+ ent->pev.event, nd->s);
+ if (ret > 0)
+ strlist__add(sdtlist, buf);
+ }
+ probe_cache__delete(pcache);
+ }
+ strlist__delete(bidlist);
+
+ strlist__for_each_entry(nd, sdtlist) {
+ buf = strchr(nd->s, '@');
+ if (buf)
+ *(buf++) = '\0';
+ if (name_only) {
+ printf("%s ", nd->s);
+ continue;
+ }
+ nd2 = strlist__next(nd);
+ if (nd2) {
+ ptr = strchr(nd2->s, '@');
+ if (ptr)
+ *ptr = '\0';
+ if (strcmp(nd->s, nd2->s) == 0)
+ show_detail = true;
+ }
+ if (show_detail) {
+ path = build_id_cache__origname(buf);
+ ret = asprintf(&buf, "%s@%s(%.12s)", nd->s, path, buf);
+ if (ret > 0) {
+ printf(" %-50s [%s]\n", buf, "SDT event");
+ free(buf);
+ }
+ free(path);
+ } else
+ printf(" %-50s [%s]\n", nd->s, "SDT event");
+ if (nd2) {
+ if (strcmp(nd->s, nd2->s) != 0)
+ show_detail = false;
+ if (ptr)
+ *ptr = '@';
+ }
+ }
+ strlist__delete(sdtlist);
+}
+
+static bool is_event_supported(u8 type, unsigned int config)
+{
+ bool ret = true;
+ int open_return;
+ struct evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .disabled = 1,
+ };
+ struct perf_thread_map *tmap = thread_map__new_by_tid(0);
+
+ if (tmap == NULL)
+ return false;
+
+ evsel = evsel__new(&attr);
+ if (evsel) {
+ open_return = evsel__open(evsel, NULL, tmap);
+ ret = open_return >= 0;
+
+ if (open_return == -EACCES) {
+ /*
+ * This happens if the paranoid value
+ * /proc/sys/kernel/perf_event_paranoid is set to 2
+ * Re-run with exclude_kernel set; we don't do that
+ * by default as some ARM machines do not support it.
+ *
+ */
+ evsel->core.attr.exclude_kernel = 1;
+ ret = evsel__open(evsel, NULL, tmap) >= 0;
+ }
+ evsel__delete(evsel);
+ }
+
+ perf_thread_map__put(tmap);
+ return ret;
+}
+
+int print_hwcache_events(const char *event_glob, bool name_only)
+{
+ unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0;
+ char name[64], new_name[128];
+ char **evt_list = NULL, **evt_pmus = NULL;
+ bool evt_num_known = false;
+ struct perf_pmu *pmu = NULL;
+
+ if (perf_pmu__has_hybrid()) {
+ npmus = perf_pmu__hybrid_pmu_num();
+ evt_pmus = zalloc(sizeof(char *) * npmus);
+ if (!evt_pmus)
+ goto out_enomem;
+ }
+
+restart:
+ if (evt_num_known) {
+ evt_list = zalloc(sizeof(char *) * evt_num);
+ if (!evt_list)
+ goto out_enomem;
+ }
+
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!evsel__is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ unsigned int hybrid_supported = 0, j;
+ bool supported;
+
+ __evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
+ if (event_glob != NULL && !strglobmatch(name, event_glob))
+ continue;
+
+ if (!perf_pmu__has_hybrid()) {
+ if (!is_event_supported(PERF_TYPE_HW_CACHE,
+ type | (op << 8) | (i << 16))) {
+ continue;
+ }
+ } else {
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (!evt_num_known) {
+ evt_num++;
+ continue;
+ }
+
+ supported = is_event_supported(
+ PERF_TYPE_HW_CACHE,
+ type | (op << 8) | (i << 16) |
+ ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT));
+ if (supported) {
+ snprintf(new_name, sizeof(new_name),
+ "%s/%s/", pmu->name, name);
+ evt_pmus[hybrid_supported] =
+ strdup(new_name);
+ hybrid_supported++;
+ }
+ }
+
+ if (hybrid_supported == 0)
+ continue;
+ }
+
+ if (!evt_num_known) {
+ evt_num++;
+ continue;
+ }
+
+ if ((hybrid_supported == 0) ||
+ (hybrid_supported == npmus)) {
+ evt_list[evt_i] = strdup(name);
+ if (npmus > 0) {
+ for (j = 0; j < npmus; j++)
+ zfree(&evt_pmus[j]);
+ }
+ } else {
+ for (j = 0; j < hybrid_supported; j++) {
+ evt_list[evt_i++] = evt_pmus[j];
+ evt_pmus[j] = NULL;
+ }
+ continue;
+ }
+
+ if (evt_list[evt_i] == NULL)
+ goto out_enomem;
+ evt_i++;
+ }
+ }
+ }
+
+ if (!evt_num_known) {
+ evt_num_known = true;
+ goto restart;
+ }
+
+ for (evt_i = 0; evt_i < evt_num; evt_i++) {
+ if (!evt_list[evt_i])
+ break;
+ }
+
+ evt_num = evt_i;
+ qsort(evt_list, evt_num, sizeof(char *), cmp_string);
+ evt_i = 0;
+ while (evt_i < evt_num) {
+ if (name_only) {
+ printf("%s ", evt_list[evt_i++]);
+ continue;
+ }
+ printf(" %-50s [%s]\n", evt_list[evt_i++],
+ event_type_descriptors[PERF_TYPE_HW_CACHE]);
+ }
+ if (evt_num && pager_in_use())
+ printf("\n");
+
+out_free:
+ evt_num = evt_i;
+ for (evt_i = 0; evt_i < evt_num; evt_i++)
+ zfree(&evt_list[evt_i]);
+ zfree(&evt_list);
+
+ for (evt_i = 0; evt_i < npmus; evt_i++)
+ zfree(&evt_pmus[evt_i]);
+ zfree(&evt_pmus);
+ return evt_num;
+
+out_enomem:
+ printf("FATAL: not enough memory to print %s\n",
+ event_type_descriptors[PERF_TYPE_HW_CACHE]);
+ if (evt_list)
+ goto out_free;
+ return evt_num;
+}
+
+static void print_tool_event(const struct event_symbol *syms, const char *event_glob,
+ bool name_only)
+{
+ if (syms->symbol == NULL)
+ return;
+
+ if (event_glob && !(strglobmatch(syms->symbol, event_glob) ||
+ (syms->alias && strglobmatch(syms->alias, event_glob))))
+ return;
+
+ if (name_only)
+ printf("%s ", syms->symbol);
+ else {
+ char name[MAX_NAME_LEN];
+
+ if (syms->alias && strlen(syms->alias))
+ snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
+ else
+ strlcpy(name, syms->symbol, MAX_NAME_LEN);
+ printf(" %-50s [%s]\n", name, "Tool event");
+ }
+}
+
+void print_tool_events(const char *event_glob, bool name_only)
+{
+ // Start at 1 because the first enum entry means no tool event.
+ for (int i = 1; i < PERF_TOOL_MAX; ++i)
+ print_tool_event(event_symbols_tool + i, event_glob, name_only);
+
+ if (pager_in_use())
+ printf("\n");
+}
+
+void print_symbol_events(const char *event_glob, unsigned int type,
+ struct event_symbol *syms, unsigned int max,
+ bool name_only)
+{
+ unsigned int i, evt_i = 0, evt_num = 0;
+ char name[MAX_NAME_LEN];
+ char **evt_list = NULL;
+ bool evt_num_known = false;
+
+restart:
+ if (evt_num_known) {
+ evt_list = zalloc(sizeof(char *) * evt_num);
+ if (!evt_list)
+ goto out_enomem;
+ syms -= max;
+ }
+
+ for (i = 0; i < max; i++, syms++) {
+ /*
+ * New attr.config still not supported here, the latest
+ * example was PERF_COUNT_SW_CGROUP_SWITCHES
+ */
+ if (syms->symbol == NULL)
+ continue;
+
+ if (event_glob != NULL && !(strglobmatch(syms->symbol, event_glob) ||
+ (syms->alias && strglobmatch(syms->alias, event_glob))))
+ continue;
+
+ if (!is_event_supported(type, i))
+ continue;
+
+ if (!evt_num_known) {
+ evt_num++;
+ continue;
+ }
+
+ if (!name_only && strlen(syms->alias))
+ snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
+ else
+ strlcpy(name, syms->symbol, MAX_NAME_LEN);
+
+ evt_list[evt_i] = strdup(name);
+ if (evt_list[evt_i] == NULL)
+ goto out_enomem;
+ evt_i++;
+ }
+
+ if (!evt_num_known) {
+ evt_num_known = true;
+ goto restart;
+ }
+ qsort(evt_list, evt_num, sizeof(char *), cmp_string);
+ evt_i = 0;
+ while (evt_i < evt_num) {
+ if (name_only) {
+ printf("%s ", evt_list[evt_i++]);
+ continue;
+ }
+ printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]);
+ }
+ if (evt_num && pager_in_use())
+ printf("\n");
+
+out_free:
+ evt_num = evt_i;
+ for (evt_i = 0; evt_i < evt_num; evt_i++)
+ zfree(&evt_list[evt_i]);
+ zfree(&evt_list);
+ return;
+
+out_enomem:
+ printf("FATAL: not enough memory to print %s\n", event_type_descriptors[type]);
+ if (evt_list)
+ goto out_free;
+}
+
+/*
+ * Print the help text for the event symbols:
+ */
+void print_events(const char *event_glob, bool name_only, bool quiet_flag,
+ bool long_desc, bool details_flag, bool deprecated,
+ const char *pmu_name)
+{
+ print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
+ event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
+
+ print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
+ event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
+ print_tool_events(event_glob, name_only);
+
+ print_hwcache_events(event_glob, name_only);
+
+ print_pmu_events(event_glob, name_only, quiet_flag, long_desc,
+ details_flag, deprecated, pmu_name);
+
+ if (event_glob != NULL)
+ return;
+
+ if (!name_only) {
+ printf(" %-50s [%s]\n",
+ "rNNN",
+ event_type_descriptors[PERF_TYPE_RAW]);
+ printf(" %-50s [%s]\n",
+ "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
+ event_type_descriptors[PERF_TYPE_RAW]);
+ if (pager_in_use())
+ printf(" (see 'man perf-list' on how to encode it)\n\n");
+
+ printf(" %-50s [%s]\n",
+ "mem:<addr>[/len][:access]",
+ event_type_descriptors[PERF_TYPE_BREAKPOINT]);
+ if (pager_in_use())
+ printf("\n");
+ }
+
+ print_tracepoint_events(NULL, NULL, name_only);
+
+ print_sdt_events(NULL, NULL, name_only);
+
+ metricgroup__print(true, true, NULL, name_only, details_flag,
+ pmu_name);
+
+ print_libpfm_events(name_only, long_desc);
+}
diff --git a/tools/perf/util/print-events.h b/tools/perf/util/print-events.h
new file mode 100644
index 000000000000..1da9910d83a6
--- /dev/null
+++ b/tools/perf/util/print-events.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_PRINT_EVENTS_H
+#define __PERF_PRINT_EVENTS_H
+
+#include <stdbool.h>
+
+struct event_symbol;
+
+void print_events(const char *event_glob, bool name_only, bool quiet_flag,
+ bool long_desc, bool details_flag, bool deprecated,
+ const char *pmu_name);
+int print_hwcache_events(const char *event_glob, bool name_only);
+void print_sdt_events(const char *subsys_glob, const char *event_glob,
+ bool name_only);
+void print_symbol_events(const char *event_glob, unsigned int type,
+ struct event_symbol *syms, unsigned int max,
+ bool name_only);
+void print_tool_events(const char *event_glob, bool name_only);
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
+ bool name_only);
+
+#endif /* __PERF_PRINT_EVENTS_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 062b5cbe67af..785246ff4179 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1349,7 +1349,7 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
/*
* Adjust the number of lines here.
* If the number of lines == 1, the
- * the end of line should be equal to
+ * end of line should be equal to
* the start of line.
*/
lr->end--;
@@ -1775,8 +1775,10 @@ int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
if (!pev->event && pev->point.function && pev->point.line
&& !pev->point.lazy_line && !pev->point.offset) {
if (asprintf(&pev->event, "%s_L%d", pev->point.function,
- pev->point.line) < 0)
- return -ENOMEM;
+ pev->point.line) < 0) {
+ ret = -ENOMEM;
+ goto out;
+ }
}
/* Copy arguments and ensure return probe has no C argument */
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 5b09ecbb05dc..7b58f6c7c69d 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -121,7 +121,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call
evlist__for_each_entry(evlist, evsel)
evsel__config_leader_sampling(evsel, evlist);
- if (opts->full_auxtrace) {
+ if (opts->full_auxtrace || opts->sample_identifier) {
/*
* Need to be able to synthesize and parse selected events with
* arbitrary sample types, which requires always being able to
@@ -238,7 +238,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
if (!temp_evlist)
return false;
- err = parse_events(temp_evlist, str, NULL);
+ err = parse_event(temp_evlist, str);
if (err)
goto out_delete;
diff --git a/tools/perf/util/record.h b/tools/perf/util/record.h
index be9a957501f4..4269e916f450 100644
--- a/tools/perf/util/record.h
+++ b/tools/perf/util/record.h
@@ -28,6 +28,7 @@ struct record_opts {
bool sample_time;
bool sample_time_set;
bool sample_cpu;
+ bool sample_identifier;
bool period;
bool period_set;
bool running_time;
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
index cd3a34840389..9a631d97471c 100644
--- a/tools/perf/util/s390-sample-raw.c
+++ b/tools/perf/util/s390-sample-raw.c
@@ -129,28 +129,46 @@ static int get_counterset_start(int setnr)
}
}
+struct get_counter_name_data {
+ int wanted;
+ const char *result;
+};
+
+static int get_counter_name_callback(const struct pmu_event *evp,
+ const struct pmu_events_table *table __maybe_unused,
+ void *vdata)
+{
+ struct get_counter_name_data *data = vdata;
+ int rc, event_nr;
+
+ if (evp->name == NULL || evp->event == NULL)
+ return 0;
+ rc = sscanf(evp->event, "event=%x", &event_nr);
+ if (rc == 1 && event_nr == data->wanted) {
+ data->result = evp->name;
+ return 1; /* Terminate the search. */
+ }
+ return 0;
+}
+
/* Scan the PMU table and extract the logical name of a counter from the
* PMU events table. Input is the counter set and counter number with in the
* set. Construct the event number and use this as key. If they match return
* the name of this counter.
* If no match is found a NULL pointer is returned.
*/
-static const char *get_counter_name(int set, int nr, const struct pmu_events_map *map)
+static const char *get_counter_name(int set, int nr, const struct pmu_events_table *table)
{
- int rc, event_nr, wanted = get_counterset_start(set) + nr;
+ struct get_counter_name_data data = {
+ .wanted = get_counterset_start(set) + nr,
+ .result = NULL,
+ };
- if (map) {
- const struct pmu_event *evp = map->table;
+ if (!table)
+ return NULL;
- for (; evp->name || evp->event || evp->desc; ++evp) {
- if (evp->name == NULL || evp->event == NULL)
- continue;
- rc = sscanf(evp->event, "event=%x", &event_nr);
- if (rc == 1 && event_nr == wanted)
- return evp->name;
- }
- }
- return NULL;
+ pmu_events_table_for_each_event(table, get_counter_name_callback, &data);
+ return data.result;
}
static void s390_cpumcfdg_dump(struct perf_sample *sample)
@@ -159,10 +177,10 @@ static void s390_cpumcfdg_dump(struct perf_sample *sample)
unsigned char *buf = sample->raw_data;
const char *color = PERF_COLOR_BLUE;
struct cf_ctrset_entry *cep, ce;
- const struct pmu_events_map *map;
+ const struct pmu_events_table *table;
u64 *p;
- map = pmu_events_map__find();
+ table = pmu_events_table__find();
while (offset < len) {
cep = (struct cf_ctrset_entry *)(buf + offset);
@@ -180,7 +198,7 @@ static void s390_cpumcfdg_dump(struct perf_sample *sample)
color_fprintf(stdout, color, " [%#08zx] Counterset:%d"
" Counters:%d\n", offset, ce.set, ce.ctr);
for (i = 0, p = (u64 *)(cep + 1); i < ce.ctr; ++i, ++p) {
- const char *ev_name = get_counter_name(ce.set, i, map);
+ const char *ev_name = get_counter_name(ce.set, i, table);
color_fprintf(stdout, color,
"\tCounter:%03d %s Value:%#018lx\n", i,
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 7b342ce38d99..c92326c2233a 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -1,6 +1,6 @@
perf-$(CONFIG_LIBPERL) += trace-event-perl.o
perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
-CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
+CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
-CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow
+CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-error=deprecated-declarations
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index adba01b7d9dd..1f2040f36d4e 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -131,7 +131,7 @@ static void handler_call_die(const char *handler_name)
}
/*
- * Insert val into into the dictionary and decrement the reference counter.
+ * Insert val into the dictionary and decrement the reference counter.
* This is necessary for dictionaries since PyDict_SetItemString() does not
* steal a reference, as opposed to PyTuple_SetItem().
*/
@@ -642,15 +642,19 @@ exit:
return pylist;
}
-static PyObject *get_sample_value_as_tuple(struct sample_read_value *value)
+static PyObject *get_sample_value_as_tuple(struct sample_read_value *value,
+ u64 read_format)
{
PyObject *t;
- t = PyTuple_New(2);
+ t = PyTuple_New(3);
if (!t)
Py_FatalError("couldn't create Python tuple");
PyTuple_SetItem(t, 0, PyLong_FromUnsignedLongLong(value->id));
PyTuple_SetItem(t, 1, PyLong_FromUnsignedLongLong(value->value));
+ if (read_format & PERF_FORMAT_LOST)
+ PyTuple_SetItem(t, 2, PyLong_FromUnsignedLongLong(value->lost));
+
return t;
}
@@ -681,12 +685,17 @@ static void set_sample_read_in_dict(PyObject *dict_sample,
Py_FatalError("couldn't create Python list");
if (read_format & PERF_FORMAT_GROUP) {
- for (i = 0; i < sample->read.group.nr; i++) {
- PyObject *t = get_sample_value_as_tuple(&sample->read.group.values[i]);
+ struct sample_read_value *v = sample->read.group.values;
+
+ i = 0;
+ sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+ PyObject *t = get_sample_value_as_tuple(v, read_format);
PyList_SET_ITEM(values, i, t);
+ i++;
}
} else {
- PyObject *t = get_sample_value_as_tuple(&sample->read.one);
+ PyObject *t = get_sample_value_as_tuple(&sample->read.one,
+ read_format);
PyList_SET_ITEM(values, 0, t);
}
pydict_set_item_string_decref(dict_sample, "values", values);
@@ -861,6 +870,13 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
brstacksym = python_process_brstacksym(sample, al->thread);
pydict_set_item_string_decref(dict, "brstacksym", brstacksym);
+ if (sample->machine_pid) {
+ pydict_set_item_string_decref(dict_sample, "machine_pid",
+ _PyLong_FromLong(sample->machine_pid));
+ pydict_set_item_string_decref(dict_sample, "vcpu",
+ _PyLong_FromLong(sample->vcpu));
+ }
+
pydict_set_item_string_decref(dict_sample, "cpumode",
_PyLong_FromLong((unsigned long)sample->cpumode));
@@ -1509,7 +1525,7 @@ static void python_do_process_switch(union perf_event *event,
np_tid = event->context_switch.next_prev_tid;
}
- t = tuple_new(9);
+ t = tuple_new(11);
if (!t)
return;
@@ -1522,6 +1538,8 @@ static void python_do_process_switch(union perf_event *event,
tuple_set_s32(t, 6, machine->pid);
tuple_set_bool(t, 7, out);
tuple_set_bool(t, 8, out_preempt);
+ tuple_set_s32(t, 9, sample->machine_pid);
+ tuple_set_s32(t, 10, sample->vcpu);
call_object(handler, t, handler_name);
@@ -1559,7 +1577,7 @@ static void python_process_auxtrace_error(struct perf_session *session __maybe_u
msg = (const char *)&e->time;
}
- t = tuple_new(9);
+ t = tuple_new(11);
tuple_set_u32(t, 0, e->type);
tuple_set_u32(t, 1, e->code);
@@ -1570,6 +1588,8 @@ static void python_process_auxtrace_error(struct perf_session *session __maybe_u
tuple_set_u64(t, 6, tm);
tuple_set_string(t, 7, msg);
tuple_set_u32(t, 8, cpumode);
+ tuple_set_s32(t, 9, e->machine_pid);
+ tuple_set_s32(t, 10, e->vcpu);
call_object(handler, t, handler_name);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 0aa818977d2b..192c9274f7ad 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -374,10 +374,6 @@ static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_finished_round(struct perf_tool *tool,
- union perf_event *event,
- struct ordered_events *oe);
-
static int skipn(int fd, off_t n)
{
char buf[4096];
@@ -534,7 +530,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->build_id = process_event_op2_stub;
if (tool->finished_round == NULL) {
if (tool->ordered_events)
- tool->finished_round = process_finished_round;
+ tool->finished_round = perf_event__process_finished_round;
else
tool->finished_round = process_finished_round_stub;
}
@@ -562,6 +558,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->feature = process_event_op2_stub;
if (tool->compressed == NULL)
tool->compressed = perf_session__process_compressed_event;
+ if (tool->finished_init == NULL)
+ tool->finished_init = process_event_op2_stub;
}
static void swap_sample_id_all(union perf_event *event, void *data)
@@ -897,6 +895,10 @@ static void perf_event__auxtrace_error_swap(union perf_event *event,
event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
if (event->auxtrace_error.fmt)
event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
+ if (event->auxtrace_error.fmt >= 2) {
+ event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
+ event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
+ }
}
static void perf_event__thread_map_swap(union perf_event *event,
@@ -914,30 +916,30 @@ static void perf_event__cpu_map_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
struct perf_record_cpu_map_data *data = &event->cpu_map.data;
- struct cpu_map_entries *cpus;
- struct perf_record_record_cpu_map *mask;
- unsigned i;
data->type = bswap_16(data->type);
switch (data->type) {
case PERF_CPU_MAP__CPUS:
- cpus = (struct cpu_map_entries *)data->data;
-
- cpus->nr = bswap_16(cpus->nr);
+ data->cpus_data.nr = bswap_16(data->cpus_data.nr);
- for (i = 0; i < cpus->nr; i++)
- cpus->cpu[i] = bswap_16(cpus->cpu[i]);
+ for (unsigned i = 0; i < data->cpus_data.nr; i++)
+ data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
break;
case PERF_CPU_MAP__MASK:
- mask = (struct perf_record_record_cpu_map *)data->data;
+ data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
- mask->nr = bswap_16(mask->nr);
- mask->long_size = bswap_16(mask->long_size);
-
- switch (mask->long_size) {
- case 4: mem_bswap_32(&mask->mask, mask->nr); break;
- case 8: mem_bswap_64(&mask->mask, mask->nr); break;
+ switch (data->mask32_data.long_size) {
+ case 4:
+ data->mask32_data.nr = bswap_16(data->mask32_data.nr);
+ for (unsigned i = 0; i < data->mask32_data.nr; i++)
+ data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
+ break;
+ case 8:
+ data->mask64_data.nr = bswap_16(data->mask64_data.nr);
+ for (unsigned i = 0; i < data->mask64_data.nr; i++)
+ data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
+ break;
default:
pr_err("cpu_map swap: unsupported long size\n");
}
@@ -1067,9 +1069,9 @@ static perf_event__swap_op perf_event__swap_ops[] = {
* Flush every events below timestamp 7
* etc...
*/
-static int process_finished_round(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct ordered_events *oe)
+int perf_event__process_finished_round(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct ordered_events *oe)
{
if (dump_trace)
fprintf(stdout, "\n");
@@ -1281,21 +1283,25 @@ static void sample_read__printf(struct perf_sample *sample, u64 read_format)
sample->read.time_running);
if (read_format & PERF_FORMAT_GROUP) {
- u64 i;
+ struct sample_read_value *value = sample->read.group.values;
printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
- for (i = 0; i < sample->read.group.nr; i++) {
- struct sample_read_value *value;
-
- value = &sample->read.group.values[i];
+ sample_read_group__for_each(value, sample->read.group.nr, read_format) {
printf("..... id %016" PRIx64
- ", value %016" PRIx64 "\n",
+ ", value %016" PRIx64,
value->id, value->value);
+ if (read_format & PERF_FORMAT_LOST)
+ printf(", lost %" PRIu64, value->lost);
+ printf("\n");
}
- } else
- printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
+ } else {
+ printf("..... id %016" PRIx64 ", value %016" PRIx64,
sample->read.one.id, sample->read.one.value);
+ if (read_format & PERF_FORMAT_LOST)
+ printf(", lost %" PRIu64, sample->read.one.lost);
+ printf("\n");
+ }
}
static void dump_event(struct evlist *evlist, union perf_event *event,
@@ -1409,6 +1415,9 @@ static void dump_read(struct evsel *evsel, union perf_event *event)
if (read_format & PERF_FORMAT_ID)
printf("... id : %" PRI_lu64 "\n", read_event->id);
+
+ if (read_format & PERF_FORMAT_LOST)
+ printf("... lost : %" PRI_lu64 "\n", read_event->lost);
}
static struct machine *machines__find_for_cpumode(struct machines *machines,
@@ -1420,7 +1429,9 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
(sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
u32 pid;
- if (event->header.type == PERF_RECORD_MMAP
+ if (sample->machine_pid)
+ pid = sample->machine_pid;
+ else if (event->header.type == PERF_RECORD_MMAP
|| event->header.type == PERF_RECORD_MMAP2)
pid = event->mmap.pid;
else
@@ -1475,14 +1486,14 @@ static int deliver_sample_group(struct evlist *evlist,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
- struct machine *machine)
+ struct machine *machine,
+ u64 read_format)
{
int ret = -EINVAL;
- u64 i;
+ struct sample_read_value *v = sample->read.group.values;
- for (i = 0; i < sample->read.group.nr; i++) {
- ret = deliver_sample_value(evlist, tool, event, sample,
- &sample->read.group.values[i],
+ sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+ ret = deliver_sample_value(evlist, tool, event, sample, v,
machine);
if (ret)
break;
@@ -1506,7 +1517,7 @@ static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
/* For PERF_SAMPLE_READ we have either single or group mode. */
if (read_format & PERF_FORMAT_GROUP)
return deliver_sample_group(evlist, tool, event, sample,
- machine);
+ machine, read_format);
else
return deliver_sample_value(evlist, tool, event, sample,
&sample->read.one, machine);
@@ -1706,6 +1717,8 @@ static s64 perf_session__process_user_event(struct perf_session *session,
if (err)
dump_event(session->evlist, event, file_offset, &sample, file_path);
return err;
+ case PERF_RECORD_FINISHED_INIT:
+ return tool->finished_init(session, event);
default:
return -EINVAL;
}
@@ -2751,39 +2764,120 @@ void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
fprintf(fp, "# ========\n#\n");
}
+static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid)
+{
+ struct machine *machine = machines__findnew(&session->machines, machine_pid);
+ struct thread *thread;
+
+ if (!machine)
+ return -ENOMEM;
+
+ machine->single_address_space = session->machines.host.single_address_space;
+
+ thread = machine__idle_thread(machine);
+ if (!thread)
+ return -ENOMEM;
+ thread__put(thread);
+
+ machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid);
+
+ return 0;
+}
+
+static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
+ pid_t tid, int guest_cpu)
+{
+ struct machine *machine = &session->machines.host;
+ struct thread *thread = machine__findnew_thread(machine, pid, tid);
+
+ if (!thread)
+ return -ENOMEM;
+ thread->guest_cpu = guest_cpu;
+ thread__put(thread);
+
+ return 0;
+}
+
int perf_event__process_id_index(struct perf_session *session,
union perf_event *event)
{
struct evlist *evlist = session->evlist;
struct perf_record_id_index *ie = &event->id_index;
+ size_t sz = ie->header.size - sizeof(*ie);
size_t i, nr, max_nr;
+ size_t e1_sz = sizeof(struct id_index_entry);
+ size_t e2_sz = sizeof(struct id_index_entry_2);
+ size_t etot_sz = e1_sz + e2_sz;
+ struct id_index_entry_2 *e2;
+ pid_t last_pid = 0;
- max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
- sizeof(struct id_index_entry);
+ max_nr = sz / e1_sz;
nr = ie->nr;
- if (nr > max_nr)
+ if (nr > max_nr) {
+ printf("Too big: nr %zu max_nr %zu\n", nr, max_nr);
return -EINVAL;
+ }
+
+ if (sz >= nr * etot_sz) {
+ max_nr = sz / etot_sz;
+ if (nr > max_nr) {
+ printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr);
+ return -EINVAL;
+ }
+ e2 = (void *)ie + sizeof(*ie) + nr * e1_sz;
+ } else {
+ e2 = NULL;
+ }
if (dump_trace)
fprintf(stdout, " nr: %zu\n", nr);
- for (i = 0; i < nr; i++) {
+ for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) {
struct id_index_entry *e = &ie->entries[i];
struct perf_sample_id *sid;
+ int ret;
if (dump_trace) {
fprintf(stdout, " ... id: %"PRI_lu64, e->id);
fprintf(stdout, " idx: %"PRI_lu64, e->idx);
fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
- fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid);
+ fprintf(stdout, " tid: %"PRI_ld64, e->tid);
+ if (e2) {
+ fprintf(stdout, " machine_pid: %"PRI_ld64, e2->machine_pid);
+ fprintf(stdout, " vcpu: %"PRI_lu64"\n", e2->vcpu);
+ } else {
+ fprintf(stdout, "\n");
+ }
}
sid = evlist__id2sid(evlist, e->id);
if (!sid)
return -ENOENT;
+
sid->idx = e->idx;
sid->cpu.cpu = e->cpu;
sid->tid = e->tid;
+
+ if (!e2)
+ continue;
+
+ sid->machine_pid = e2->machine_pid;
+ sid->vcpu.cpu = e2->vcpu;
+
+ if (!sid->machine_pid)
+ continue;
+
+ if (sid->machine_pid != last_pid) {
+ ret = perf_session__register_guest(session, sid->machine_pid);
+ if (ret)
+ return ret;
+ last_pid = sid->machine_pid;
+ perf_guest = true;
+ }
+
+ ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu);
+ if (ret)
+ return ret;
}
return 0;
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 34500a3da735..be5871ea558f 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -155,4 +155,8 @@ int perf_session__deliver_synth_event(struct perf_session *session,
int perf_event__process_id_index(struct perf_session *session,
union perf_event *event);
+int perf_event__process_finished_round(struct perf_tool *tool,
+ union perf_event *event,
+ struct ordered_events *oe);
+
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index c255a2c90cd6..5b1e6468d5e8 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -11,7 +11,7 @@ def clang_has_option(option):
return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ]
if cc_is_clang:
- from distutils.sysconfig import get_config_vars
+ from sysconfig import get_config_vars
vars = get_config_vars()
for var in ('CFLAGS', 'OPT'):
vars[var] = sub("-specs=[^ ]+", "", vars[var])
@@ -28,10 +28,10 @@ if cc_is_clang:
if not clang_has_option("-ffat-lto-objects"):
vars[var] = sub("-ffat-lto-objects", "", vars[var])
-from distutils.core import setup, Extension
+from setuptools import setup, Extension
-from distutils.command.build_ext import build_ext as _build_ext
-from distutils.command.install_lib import install_lib as _install_lib
+from setuptools.command.build_ext import build_ext as _build_ext
+from setuptools.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
@@ -48,7 +48,9 @@ class install_lib(_install_lib):
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls', '-DPYTHON_PERF' ]
-if not cc_is_clang:
+if cc_is_clang:
+ cflags += ["-Wno-unused-command-line-argument" ]
+else:
cflags += ['-Wno-cast-function-type' ]
src_perf = getenv('srctree') + '/tools/perf'
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 606f09b09226..b82844cb0ce7 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -28,15 +28,21 @@
static void print_running(struct perf_stat_config *config,
u64 run, u64 ena)
{
- if (config->csv_output) {
- fprintf(config->output, "%s%" PRIu64 "%s%.2f",
- config->csv_sep,
- run,
- config->csv_sep,
- ena ? 100.0 * run / ena : 100.0);
- } else if (run != ena) {
+
+ double enabled_percent = 100;
+
+ if (run != ena)
+ enabled_percent = 100 * run / ena;
+ if (config->json_output)
+ fprintf(config->output,
+ "\"event-runtime\" : %" PRIu64 ", \"pcnt-running\" : %.2f, ",
+ run, enabled_percent);
+ else if (config->csv_output)
+ fprintf(config->output,
+ "%s%" PRIu64 "%s%.2f", config->csv_sep,
+ run, config->csv_sep, enabled_percent);
+ else if (run != ena)
fprintf(config->output, " (%.2f%%)", 100.0 * run / ena);
- }
}
static void print_noise_pct(struct perf_stat_config *config,
@@ -44,7 +50,9 @@ static void print_noise_pct(struct perf_stat_config *config,
{
double pct = rel_stddev_stats(total, avg);
- if (config->csv_output)
+ if (config->json_output)
+ fprintf(config->output, "\"variance\" : %.2f, ", pct);
+ else if (config->csv_output)
fprintf(config->output, "%s%.2f%%", config->csv_sep, pct);
else if (pct)
fprintf(config->output, " ( +-%6.2f%% )", pct);
@@ -66,7 +74,11 @@ static void print_cgroup(struct perf_stat_config *config, struct evsel *evsel)
{
if (nr_cgroups) {
const char *cgrp_name = evsel->cgrp ? evsel->cgrp->name : "";
- fprintf(config->output, "%s%s", config->csv_sep, cgrp_name);
+
+ if (config->json_output)
+ fprintf(config->output, "\"cgroup\" : \"%s\", ", cgrp_name);
+ else
+ fprintf(config->output, "%s%s", config->csv_sep, cgrp_name);
}
}
@@ -74,69 +86,123 @@ static void print_cgroup(struct perf_stat_config *config, struct evsel *evsel)
static void aggr_printout(struct perf_stat_config *config,
struct evsel *evsel, struct aggr_cpu_id id, int nr)
{
+
+
+ if (config->json_output && !config->interval)
+ fprintf(config->output, "{");
+
switch (config->aggr_mode) {
case AGGR_CORE:
- fprintf(config->output, "S%d-D%d-C%*d%s%*d%s",
- id.socket,
- id.die,
- config->csv_output ? 0 : -8,
- id.core,
- config->csv_sep,
- config->csv_output ? 0 : 4,
- nr,
- config->csv_sep);
+ if (config->json_output) {
+ fprintf(config->output,
+ "\"core\" : \"S%d-D%d-C%d\", \"aggregate-number\" : %d, ",
+ id.socket,
+ id.die,
+ id.core,
+ nr);
+ } else {
+ fprintf(config->output, "S%d-D%d-C%*d%s%*d%s",
+ id.socket,
+ id.die,
+ config->csv_output ? 0 : -8,
+ id.core,
+ config->csv_sep,
+ config->csv_output ? 0 : 4,
+ nr,
+ config->csv_sep);
+ }
break;
case AGGR_DIE:
- fprintf(config->output, "S%d-D%*d%s%*d%s",
- id.socket,
- config->csv_output ? 0 : -8,
- id.die,
- config->csv_sep,
- config->csv_output ? 0 : 4,
- nr,
- config->csv_sep);
+ if (config->json_output) {
+ fprintf(config->output,
+ "\"die\" : \"S%d-D%d\", \"aggregate-number\" : %d, ",
+ id.socket,
+ id.die,
+ nr);
+ } else {
+ fprintf(config->output, "S%d-D%*d%s%*d%s",
+ id.socket,
+ config->csv_output ? 0 : -8,
+ id.die,
+ config->csv_sep,
+ config->csv_output ? 0 : 4,
+ nr,
+ config->csv_sep);
+ }
break;
case AGGR_SOCKET:
- fprintf(config->output, "S%*d%s%*d%s",
- config->csv_output ? 0 : -5,
- id.socket,
- config->csv_sep,
- config->csv_output ? 0 : 4,
- nr,
- config->csv_sep);
- break;
+ if (config->json_output) {
+ fprintf(config->output,
+ "\"socket\" : \"S%d\", \"aggregate-number\" : %d, ",
+ id.socket,
+ nr);
+ } else {
+ fprintf(config->output, "S%*d%s%*d%s",
+ config->csv_output ? 0 : -5,
+ id.socket,
+ config->csv_sep,
+ config->csv_output ? 0 : 4,
+ nr,
+ config->csv_sep);
+ }
+ break;
case AGGR_NODE:
- fprintf(config->output, "N%*d%s%*d%s",
- config->csv_output ? 0 : -5,
- id.node,
- config->csv_sep,
- config->csv_output ? 0 : 4,
- nr,
- config->csv_sep);
- break;
+ if (config->json_output) {
+ fprintf(config->output, "\"node\" : \"N%d\", \"aggregate-number\" : %d, ",
+ id.node,
+ nr);
+ } else {
+ fprintf(config->output, "N%*d%s%*d%s",
+ config->csv_output ? 0 : -5,
+ id.node,
+ config->csv_sep,
+ config->csv_output ? 0 : 4,
+ nr,
+ config->csv_sep);
+ }
+ break;
case AGGR_NONE:
- if (evsel->percore && !config->percore_show_thread) {
- fprintf(config->output, "S%d-D%d-C%*d%s",
- id.socket,
- id.die,
- config->csv_output ? 0 : -3,
- id.core, config->csv_sep);
- } else if (id.cpu.cpu > -1) {
- fprintf(config->output, "CPU%*d%s",
- config->csv_output ? 0 : -7,
- id.cpu.cpu, config->csv_sep);
+ if (config->json_output) {
+ if (evsel->percore && !config->percore_show_thread) {
+ fprintf(config->output, "\"core\" : \"S%d-D%d-C%d\"",
+ id.socket,
+ id.die,
+ id.core);
+ } else if (id.core > -1) {
+ fprintf(config->output, "\"cpu\" : \"%d\", ",
+ id.cpu.cpu);
+ }
+ } else {
+ if (evsel->percore && !config->percore_show_thread) {
+ fprintf(config->output, "S%d-D%d-C%*d%s",
+ id.socket,
+ id.die,
+ config->csv_output ? 0 : -3,
+ id.core, config->csv_sep);
+ } else if (id.core > -1) {
+ fprintf(config->output, "CPU%*d%s",
+ config->csv_output ? 0 : -7,
+ id.cpu.cpu, config->csv_sep);
+ }
}
break;
case AGGR_THREAD:
- fprintf(config->output, "%*s-%*d%s",
- config->csv_output ? 0 : 16,
- perf_thread_map__comm(evsel->core.threads, id.thread),
- config->csv_output ? 0 : -8,
- perf_thread_map__pid(evsel->core.threads, id.thread),
- config->csv_sep);
+ if (config->json_output) {
+ fprintf(config->output, "\"thread\" : \"%s-%d\", ",
+ perf_thread_map__comm(evsel->core.threads, id.thread),
+ perf_thread_map__pid(evsel->core.threads, id.thread));
+ } else {
+ fprintf(config->output, "%*s-%*d%s",
+ config->csv_output ? 0 : 16,
+ perf_thread_map__comm(evsel->core.threads, id.thread),
+ config->csv_output ? 0 : -8,
+ perf_thread_map__pid(evsel->core.threads, id.thread),
+ config->csv_sep);
+ }
break;
case AGGR_GLOBAL:
case AGGR_UNSET:
+ case AGGR_MAX:
default:
break;
}
@@ -234,6 +300,31 @@ static void print_metric_csv(struct perf_stat_config *config __maybe_unused,
fprintf(out, "%s%s%s%s", config->csv_sep, vals, config->csv_sep, skip_spaces(unit));
}
+static void print_metric_json(struct perf_stat_config *config __maybe_unused,
+ void *ctx,
+ const char *color __maybe_unused,
+ const char *fmt __maybe_unused,
+ const char *unit, double val)
+{
+ struct outstate *os = ctx;
+ FILE *out = os->fh;
+
+ fprintf(out, "\"metric-value\" : %f, ", val);
+ fprintf(out, "\"metric-unit\" : \"%s\"", unit);
+ if (!config->metric_only)
+ fprintf(out, "}");
+}
+
+static void new_line_json(struct perf_stat_config *config, void *ctx)
+{
+ struct outstate *os = ctx;
+
+ fputc('\n', os->fh);
+ if (os->prefix)
+ fprintf(os->fh, "%s", os->prefix);
+ aggr_printout(config, os->evsel, os->id, os->nr);
+}
+
/* Filter out some columns that don't work well in metrics only mode */
static bool valid_only_metric(const char *unit)
@@ -300,6 +391,27 @@ static void print_metric_only_csv(struct perf_stat_config *config __maybe_unused
fprintf(out, "%s%s", vals, config->csv_sep);
}
+static void print_metric_only_json(struct perf_stat_config *config __maybe_unused,
+ void *ctx, const char *color __maybe_unused,
+ const char *fmt,
+ const char *unit, double val)
+{
+ struct outstate *os = ctx;
+ FILE *out = os->fh;
+ char buf[64], *vals, *ends;
+ char tbuf[1024];
+
+ if (!valid_only_metric(unit))
+ return;
+ unit = fixunit(tbuf, os->evsel, unit);
+ snprintf(buf, sizeof(buf), fmt, val);
+ ends = vals = skip_spaces(buf);
+ while (isdigit(*ends) || *ends == '.')
+ ends++;
+ *ends = 0;
+ fprintf(out, "{\"metric-value\" : \"%s\"}", vals);
+}
+
static void new_line_metric(struct perf_stat_config *config __maybe_unused,
void *ctx __maybe_unused)
{
@@ -318,10 +430,13 @@ static void print_metric_header(struct perf_stat_config *config,
os->evsel->priv != os->evsel->evlist->selected->priv)
return;
- if (!valid_only_metric(unit))
+ if (!valid_only_metric(unit) && !config->json_output)
return;
unit = fixunit(tbuf, os->evsel, unit);
- if (config->csv_output)
+
+ if (config->json_output)
+ fprintf(os->fh, "\"unit\" : \"%s\"", unit);
+ else if (config->csv_output)
fprintf(os->fh, "%s%s", unit, config->csv_sep);
else
fprintf(os->fh, "%*s ", config->metric_only_len, unit);
@@ -367,14 +482,27 @@ static void abs_printout(struct perf_stat_config *config,
aggr_printout(config, evsel, id, nr);
- fprintf(output, fmt, avg, config->csv_sep);
+ if (config->json_output)
+ fprintf(output, "\"counter-value\" : \"%f\", ", avg);
+ else
+ fprintf(output, fmt, avg, config->csv_sep);
- if (evsel->unit)
- fprintf(output, "%-*s%s",
- config->csv_output ? 0 : config->unit_width,
- evsel->unit, config->csv_sep);
+ if (config->json_output) {
+ if (evsel->unit) {
+ fprintf(output, "\"unit\" : \"%s\", ",
+ evsel->unit);
+ }
+ } else {
+ if (evsel->unit)
+ fprintf(output, "%-*s%s",
+ config->csv_output ? 0 : config->unit_width,
+ evsel->unit, config->csv_sep);
+ }
- fprintf(output, "%-*s", config->csv_output ? 0 : 25, evsel__name(evsel));
+ if (config->json_output)
+ fprintf(output, "\"event\" : \"%s\", ", evsel__name(evsel));
+ else
+ fprintf(output, "%-*s", config->csv_output ? 0 : 32, evsel__name(evsel));
print_cgroup(config, evsel);
}
@@ -416,34 +544,30 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int
.nr = nr,
.evsel = counter,
};
- print_metric_t pm = print_metric_std;
+ print_metric_t pm;
new_line_t nl;
- if (config->metric_only) {
- nl = new_line_metric;
- if (config->csv_output)
- pm = print_metric_only_csv;
- else
- pm = print_metric_only;
- } else
- nl = new_line_std;
-
- if (config->csv_output && !config->metric_only) {
- static int aggr_fields[] = {
- [AGGR_GLOBAL] = 0,
- [AGGR_THREAD] = 1,
+ if (config->csv_output) {
+ static const int aggr_fields[AGGR_MAX] = {
[AGGR_NONE] = 1,
+ [AGGR_GLOBAL] = 0,
[AGGR_SOCKET] = 2,
[AGGR_DIE] = 2,
[AGGR_CORE] = 2,
+ [AGGR_THREAD] = 1,
+ [AGGR_UNSET] = 0,
+ [AGGR_NODE] = 0,
};
- pm = print_metric_csv;
- nl = new_line_csv;
- os.nfields = 3;
- os.nfields += aggr_fields[config->aggr_mode];
- if (counter->cgrp)
- os.nfields++;
+ pm = config->metric_only ? print_metric_only_csv : print_metric_csv;
+ nl = config->metric_only ? new_line_metric : new_line_csv;
+ os.nfields = 3 + aggr_fields[config->aggr_mode] + (counter->cgrp ? 1 : 0);
+ } else if (config->json_output) {
+ pm = config->metric_only ? print_metric_only_json : print_metric_json;
+ nl = config->metric_only ? new_line_metric : new_line_json;
+ } else {
+ pm = config->metric_only ? print_metric_only : print_metric_std;
+ nl = config->metric_only ? new_line_metric : new_line_std;
}
if (!config->no_csv_summary && config->csv_output &&
@@ -458,10 +582,15 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int
}
aggr_printout(config, counter, id, nr);
- fprintf(config->output, "%*s%s",
- config->csv_output ? 0 : 18,
- counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
- config->csv_sep);
+ if (config->json_output) {
+ fprintf(config->output, "\"counter-value\" : \"%s\", ",
+ counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED);
+ } else {
+ fprintf(config->output, "%*s%s",
+ config->csv_output ? 0 : 18,
+ counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
+ config->csv_sep);
+ }
if (counter->supported) {
if (!evlist__has_hybrid(counter->evlist)) {
@@ -471,21 +600,32 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int
}
}
- fprintf(config->output, "%-*s%s",
- config->csv_output ? 0 : config->unit_width,
- counter->unit, config->csv_sep);
+ if (config->json_output) {
+ fprintf(config->output, "\"unit\" : \"%s\", ", counter->unit);
+ } else {
+ fprintf(config->output, "%-*s%s",
+ config->csv_output ? 0 : config->unit_width,
+ counter->unit, config->csv_sep);
+ }
- fprintf(config->output, "%*s",
- config->csv_output ? 0 : -25, evsel__name(counter));
+ if (config->json_output) {
+ fprintf(config->output, "\"event\" : \"%s\", ",
+ evsel__name(counter));
+ } else {
+ fprintf(config->output, "%*s",
+ config->csv_output ? 0 : -25, evsel__name(counter));
+ }
print_cgroup(config, counter);
- if (!config->csv_output)
+ if (!config->csv_output && !config->json_output)
pm(config, &os, NULL, NULL, "", 0);
print_noise(config, counter, noise);
print_running(config, run, ena);
if (config->csv_output)
pm(config, &os, NULL, NULL, "", 0);
+ else if (config->json_output)
+ pm(config, &os, NULL, NULL, "", 0);
return;
}
@@ -500,12 +640,15 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int
if (config->csv_output && !config->metric_only) {
print_noise(config, counter, noise);
print_running(config, run, ena);
+ } else if (config->json_output && !config->metric_only) {
+ print_noise(config, counter, noise);
+ print_running(config, run, ena);
}
perf_stat__print_shadow_stats(config, counter, uval,
first_shadow_cpu_map_idx(config, counter, &id),
&out, &config->metric_events, st);
- if (!config->csv_output && !config->metric_only) {
+ if (!config->csv_output && !config->metric_only && !config->json_output) {
print_noise(config, counter, noise);
print_running(config, run, ena);
}
@@ -1004,8 +1147,12 @@ static void print_metric_headers(struct perf_stat_config *config,
struct outstate os = {
.fh = config->output
};
+ bool first = true;
+
+ if (config->json_output && !config->interval)
+ fprintf(config->output, "{");
- if (prefix)
+ if (prefix && !config->json_output)
fprintf(config->output, "%s", prefix);
if (!config->csv_output && !no_indent)
@@ -1025,6 +1172,9 @@ static void print_metric_headers(struct perf_stat_config *config,
os.evsel = counter;
out.ctx = &os;
out.print_metric = print_metric_header;
+ if (!first && config->json_output)
+ fprintf(config->output, ", ");
+ first = false;
out.new_line = new_line_metric;
out.force_header = true;
perf_stat__print_shadow_stats(config, counter, 0,
@@ -1033,6 +1183,8 @@ static void print_metric_headers(struct perf_stat_config *config,
&config->metric_events,
&rt_stat);
}
+ if (config->json_output)
+ fprintf(config->output, "}");
fputc('\n', config->output);
}
@@ -1048,10 +1200,18 @@ static void print_interval(struct perf_stat_config *config,
if (config->interval_clear)
puts(CONSOLE_CLEAR);
- if (!config->iostat_run)
- sprintf(prefix, "%6lu.%09lu%s", (unsigned long) ts->tv_sec, ts->tv_nsec, config->csv_sep);
-
- if ((num_print_interval == 0 && !config->csv_output) || config->interval_clear) {
+ if (!config->iostat_run && !config->json_output)
+ sprintf(prefix, "%6lu.%09lu%s", (unsigned long) ts->tv_sec,
+ ts->tv_nsec, config->csv_sep);
+ if (!config->iostat_run && config->json_output && !config->metric_only)
+ sprintf(prefix, "{\"interval\" : %lu.%09lu, ", (unsigned long)
+ ts->tv_sec, ts->tv_nsec);
+ if (!config->iostat_run && config->json_output && config->metric_only)
+ sprintf(prefix, "{\"interval\" : %lu.%09lu}", (unsigned long)
+ ts->tv_sec, ts->tv_nsec);
+
+ if ((num_print_interval == 0 && !config->csv_output && !config->json_output)
+ || config->interval_clear) {
switch (config->aggr_mode) {
case AGGR_NODE:
fprintf(output, "# time node cpus");
@@ -1091,12 +1251,19 @@ static void print_interval(struct perf_stat_config *config,
fprintf(output, " counts %*s events\n", unit_width, "unit");
}
case AGGR_UNSET:
+ case AGGR_MAX:
break;
}
}
- if ((num_print_interval == 0 || config->interval_clear) && metric_only)
+ if ((num_print_interval == 0 || config->interval_clear)
+ && metric_only && !config->json_output)
print_metric_headers(config, evlist, " ", true);
+ if ((num_print_interval == 0 || config->interval_clear)
+ && metric_only && config->json_output) {
+ fprintf(output, "{");
+ print_metric_headers(config, evlist, " ", true);
+ }
if (++num_print_interval == 25)
num_print_interval = 0;
}
@@ -1110,7 +1277,7 @@ static void print_header(struct perf_stat_config *config,
fflush(stdout);
- if (!config->csv_output) {
+ if (!config->csv_output && !config->json_output) {
fprintf(output, "\n");
fprintf(output, " Performance counter stats for ");
if (_target->bpf_str)
@@ -1303,6 +1470,9 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
num_print_iv = 0;
if (config->aggr_mode == AGGR_GLOBAL && prefix && !config->iostat_run)
fprintf(config->output, "%s", prefix);
+
+ if (config->json_output && !config->metric_only)
+ fprintf(config->output, "}");
}
switch (config->aggr_mode) {
@@ -1341,12 +1511,13 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
}
}
break;
+ case AGGR_MAX:
case AGGR_UNSET:
default:
break;
}
- if (!interval && !config->csv_output)
+ if (!interval && !config->csv_output && !config->json_output)
print_footer(config);
fflush(config->output);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 979c8cb918f7..788ce5e46470 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -1193,7 +1193,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
&rsd);
if (retiring > 0.7)
color = PERF_COLOR_GREEN;
- print_metric(config, ctxp, color, "%8.1f%%", "retiring",
+ print_metric(config, ctxp, color, "%8.1f%%", "Retiring",
retiring * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
full_td(cpu_map_idx, st, &rsd)) {
@@ -1202,7 +1202,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
&rsd);
if (fe_bound > 0.2)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
+ print_metric(config, ctxp, color, "%8.1f%%", "Frontend Bound",
fe_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
full_td(cpu_map_idx, st, &rsd)) {
@@ -1211,7 +1211,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
&rsd);
if (be_bound > 0.2)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "backend bound",
+ print_metric(config, ctxp, color, "%8.1f%%", "Backend Bound",
be_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
full_td(cpu_map_idx, st, &rsd)) {
@@ -1220,7 +1220,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
&rsd);
if (bad_spec > 0.1)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
+ print_metric(config, ctxp, color, "%8.1f%%", "Bad Speculation",
bad_spec * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) &&
full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1234,13 +1234,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (retiring > 0.7 && heavy_ops > 0.1)
color = PERF_COLOR_GREEN;
- print_metric(config, ctxp, color, "%8.1f%%", "heavy operations",
+ print_metric(config, ctxp, color, "%8.1f%%", "Heavy Operations",
heavy_ops * 100.);
if (retiring > 0.7 && light_ops > 0.6)
color = PERF_COLOR_GREEN;
else
color = NULL;
- print_metric(config, ctxp, color, "%8.1f%%", "light operations",
+ print_metric(config, ctxp, color, "%8.1f%%", "Light Operations",
light_ops * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) &&
full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1254,13 +1254,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (bad_spec > 0.1 && br_mis > 0.05)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "branch mispredict",
+ print_metric(config, ctxp, color, "%8.1f%%", "Branch Mispredict",
br_mis * 100.);
if (bad_spec > 0.1 && m_clears > 0.05)
color = PERF_COLOR_RED;
else
color = NULL;
- print_metric(config, ctxp, color, "%8.1f%%", "machine clears",
+ print_metric(config, ctxp, color, "%8.1f%%", "Machine Clears",
m_clears * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) &&
full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1274,13 +1274,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (fe_bound > 0.2 && fetch_lat > 0.15)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "fetch latency",
+ print_metric(config, ctxp, color, "%8.1f%%", "Fetch Latency",
fetch_lat * 100.);
if (fe_bound > 0.2 && fetch_bw > 0.1)
color = PERF_COLOR_RED;
else
color = NULL;
- print_metric(config, ctxp, color, "%8.1f%%", "fetch bandwidth",
+ print_metric(config, ctxp, color, "%8.1f%%", "Fetch Bandwidth",
fetch_bw * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) &&
full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1294,13 +1294,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (be_bound > 0.2 && mem_bound > 0.2)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "memory bound",
+ print_metric(config, ctxp, color, "%8.1f%%", "Memory Bound",
mem_bound * 100.);
if (be_bound > 0.2 && core_bound > 0.1)
color = PERF_COLOR_RED;
else
color = NULL;
- print_metric(config, ctxp, color, "%8.1f%%", "Core bound",
+ print_metric(config, ctxp, color, "%8.1f%%", "Core Bound",
core_bound * 100.);
} else if (evsel->metric_expr) {
generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 37ea2d044708..0882b4754fcf 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -401,6 +401,7 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
aggr->ena += count->ena;
aggr->run += count->run;
case AGGR_UNSET:
+ case AGGR_MAX:
default:
break;
}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index b5aeb8e6d34b..668250022f8c 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -57,6 +57,7 @@ enum aggr_mode {
AGGR_THREAD,
AGGR_UNSET,
AGGR_NODE,
+ AGGR_MAX
};
enum {
@@ -121,6 +122,7 @@ struct perf_stat_config {
bool no_inherit;
bool identifier;
bool csv_output;
+ bool json_output;
bool interval_clear;
bool metric_only;
bool null_run;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index b3be5b1d9dbb..75bec32d4f57 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1305,16 +1305,29 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
if (elf_read_program_header(syms_ss->elf,
(u64)sym.st_value, &phdr)) {
- pr_warning("%s: failed to find program header for "
+ pr_debug4("%s: failed to find program header for "
"symbol: %s st_value: %#" PRIx64 "\n",
__func__, elf_name, (u64)sym.st_value);
- continue;
+ pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+ "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n",
+ __func__, (u64)sym.st_value, (u64)shdr.sh_addr,
+ (u64)shdr.sh_offset);
+ /*
+ * Fail to find program header, let's rollback
+ * to use shdr.sh_addr and shdr.sh_offset to
+ * calibrate symbol's file address, though this
+ * is not necessary for normal C ELF file, we
+ * still need to handle java JIT symbols in this
+ * case.
+ */
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ } else {
+ pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+ "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
+ __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
+ (u64)phdr.p_offset);
+ sym.st_value -= phdr.p_vaddr - phdr.p_offset;
}
- pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
- "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
- __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
- (u64)phdr.p_offset);
- sym.st_value -= phdr.p_vaddr - phdr.p_offset;
}
demangled = demangle_sym(dso, kmodule, elf_name);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index f72baf636724..a4b22caa7c24 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -2300,11 +2300,13 @@ do_kallsyms:
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
{
int err;
- const char *kallsyms_filename = NULL;
+ const char *kallsyms_filename;
struct machine *machine = map__kmaps(map)->machine;
char path[PATH_MAX];
- if (machine__is_default_guest(machine)) {
+ if (machine->kallsyms_filename) {
+ kallsyms_filename = machine->kallsyms_filename;
+ } else if (machine__is_default_guest(machine)) {
/*
* if the user specified a vmlinux filename, use it and only
* it, reporting errors to the user if it cannot be used.
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 84d17bd4efae..812424dbf2d5 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -1184,52 +1184,48 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
return err;
}
-static void synthesize_cpus(struct cpu_map_entries *cpus,
- struct perf_cpu_map *map)
+static void synthesize_cpus(struct perf_record_cpu_map_data *data,
+ const struct perf_cpu_map *map)
{
int i, map_nr = perf_cpu_map__nr(map);
- cpus->nr = map_nr;
+ data->cpus_data.nr = map_nr;
for (i = 0; i < map_nr; i++)
- cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu;
+ data->cpus_data.cpu[i] = perf_cpu_map__cpu(map, i).cpu;
}
-static void synthesize_mask(struct perf_record_record_cpu_map *mask,
- struct perf_cpu_map *map, int max)
+static void synthesize_mask(struct perf_record_cpu_map_data *data,
+ const struct perf_cpu_map *map, int max)
{
- int i;
+ int idx;
+ struct perf_cpu cpu;
+
+ /* Due to padding, the 4bytes per entry mask variant is always smaller. */
+ data->mask32_data.nr = BITS_TO_U32(max);
+ data->mask32_data.long_size = 4;
- mask->nr = BITS_TO_LONGS(max);
- mask->long_size = sizeof(long);
+ perf_cpu_map__for_each_cpu(cpu, idx, map) {
+ int bit_word = cpu.cpu / 32;
+ __u32 bit_mask = 1U << (cpu.cpu & 31);
- for (i = 0; i < perf_cpu_map__nr(map); i++)
- set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask);
+ data->mask32_data.mask[bit_word] |= bit_mask;
+ }
}
-static size_t cpus_size(struct perf_cpu_map *map)
+static size_t cpus_size(const struct perf_cpu_map *map)
{
return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
}
-static size_t mask_size(struct perf_cpu_map *map, int *max)
+static size_t mask_size(const struct perf_cpu_map *map, int *max)
{
- int i;
-
- *max = 0;
-
- for (i = 0; i < perf_cpu_map__nr(map); i++) {
- /* bit position of the cpu is + 1 */
- int bit = perf_cpu_map__cpu(map, i).cpu + 1;
-
- if (bit > *max)
- *max = bit;
- }
-
- return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
+ *max = perf_cpu_map__max(map).cpu;
+ return sizeof(struct perf_record_mask_cpu_map32) + BITS_TO_U32(*max) * sizeof(__u32);
}
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
+static void *cpu_map_data__alloc(const struct perf_cpu_map *map, size_t *size,
+ u16 *type, int *max)
{
size_t size_cpus, size_mask;
bool is_dummy = perf_cpu_map__empty(map);
@@ -1258,30 +1254,31 @@ void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int
*type = PERF_CPU_MAP__MASK;
}
- *size += sizeof(struct perf_record_cpu_map_data);
+ *size += sizeof(__u16); /* For perf_record_cpu_map_data.type. */
*size = PERF_ALIGN(*size, sizeof(u64));
return zalloc(*size);
}
-void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
- u16 type, int max)
+static void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data,
+ const struct perf_cpu_map *map,
+ u16 type, int max)
{
data->type = type;
switch (type) {
case PERF_CPU_MAP__CPUS:
- synthesize_cpus((struct cpu_map_entries *) data->data, map);
+ synthesize_cpus(data, map);
break;
case PERF_CPU_MAP__MASK:
- synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
+ synthesize_mask(data, map, max);
default:
break;
}
}
-static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
+static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
{
- size_t size = sizeof(struct perf_record_cpu_map);
+ size_t size = sizeof(struct perf_event_header);
struct perf_record_cpu_map *event;
int max;
u16 type;
@@ -1299,7 +1296,7 @@ static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
}
int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct perf_cpu_map *map,
+ const struct perf_cpu_map *map,
perf_event__handler_t process,
struct machine *machine)
{
@@ -1432,11 +1429,12 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
result += sizeof(u64);
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- result += sz;
+ sz = sample_read_value_size(read_format);
+ result += sz * sample->read.group.nr;
} else {
result += sizeof(u64);
+ if (read_format & PERF_FORMAT_LOST)
+ result += sizeof(u64);
}
}
@@ -1521,6 +1519,20 @@ void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
*array = data->weight;
}
+static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
+ const struct perf_sample *sample)
+{
+ size_t sz = sample_read_value_size(read_format);
+ struct sample_read_value *v = sample->read.group.values;
+
+ sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ memcpy(array, v, sz);
+ array = (void *)array + sz;
+ }
+ return array;
+}
+
int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
const struct perf_sample *sample)
{
@@ -1602,13 +1614,16 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- memcpy(array, sample->read.group.values, sz);
- array = (void *)array + sz;
+ array = copy_read_group_values(array, read_format,
+ sample);
} else {
*array = sample->read.one.id;
array++;
+
+ if (read_format & PERF_FORMAT_LOST) {
+ *array = sample->read.one.lost;
+ array++;
+ }
}
}
@@ -1712,48 +1727,112 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
return 0;
}
-int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
- struct evlist *evlist, struct machine *machine)
+int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample)
+{
+ __u64 *start = array;
+
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union u64_swap u;
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val32[0] = sample->pid;
+ u.val32[1] = sample->tid;
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ *array = sample->time;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ *array = sample->stream_id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val32[0] = sample->cpu;
+ u.val32[1] = 0;
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ *array = sample->id;
+ array++;
+ }
+
+ return (void *)array - (void *)start;
+}
+
+int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
+ struct evlist *evlist, struct machine *machine, size_t from)
{
union perf_event *ev;
struct evsel *evsel;
- size_t nr = 0, i = 0, sz, max_nr, n;
+ size_t nr = 0, i = 0, sz, max_nr, n, pos;
+ size_t e1_sz = sizeof(struct id_index_entry);
+ size_t e2_sz = sizeof(struct id_index_entry_2);
+ size_t etot_sz = e1_sz + e2_sz;
+ bool e2_needed = false;
int err;
- pr_debug2("Synthesizing id index\n");
-
- max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
- sizeof(struct id_index_entry);
+ max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz;
- evlist__for_each_entry(evlist, evsel)
+ pos = 0;
+ evlist__for_each_entry(evlist, evsel) {
+ if (pos++ < from)
+ continue;
nr += evsel->core.ids;
+ }
+
+ if (!nr)
+ return 0;
+
+ pr_debug2("Synthesizing id index\n");
n = nr > max_nr ? max_nr : nr;
- sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
+ sz = sizeof(struct perf_record_id_index) + n * etot_sz;
ev = zalloc(sz);
if (!ev)
return -ENOMEM;
+ sz = sizeof(struct perf_record_id_index) + n * e1_sz;
+
ev->id_index.header.type = PERF_RECORD_ID_INDEX;
- ev->id_index.header.size = sz;
ev->id_index.nr = n;
+ pos = 0;
evlist__for_each_entry(evlist, evsel) {
u32 j;
- for (j = 0; j < evsel->core.ids; j++) {
+ if (pos++ < from)
+ continue;
+ for (j = 0; j < evsel->core.ids; j++, i++) {
struct id_index_entry *e;
+ struct id_index_entry_2 *e2;
struct perf_sample_id *sid;
if (i >= n) {
+ ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0);
err = process(tool, ev, NULL, machine);
if (err)
goto out_err;
nr -= n;
i = 0;
+ e2_needed = false;
}
- e = &ev->id_index.entries[i++];
+ e = &ev->id_index.entries[i];
e->id = evsel->core.id[j];
@@ -1766,11 +1845,18 @@ int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_
e->idx = sid->idx;
e->cpu = sid->cpu.cpu;
e->tid = sid->tid;
+
+ if (sid->machine_pid)
+ e2_needed = true;
+
+ e2 = (void *)ev + sz;
+ e2[i].machine_pid = sid->machine_pid;
+ e2[i].vcpu = sid->vcpu.cpu;
}
}
- sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
- ev->id_index.header.size = sz;
+ sz = sizeof(struct perf_record_id_index) + nr * e1_sz;
+ ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0);
ev->id_index.nr = nr;
err = process(tool, ev, NULL, machine);
@@ -1780,6 +1866,12 @@ out_err:
return err;
}
+int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
+ struct evlist *evlist, struct machine *machine)
+{
+ return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0);
+}
+
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct perf_thread_map *threads,
perf_event__handler_t process, bool needs_mmap,
diff --git a/tools/perf/util/synthetic-events.h b/tools/perf/util/synthetic-events.h
index 78a0450db164..53737d1619a4 100644
--- a/tools/perf/util/synthetic-events.h
+++ b/tools/perf/util/synthetic-events.h
@@ -46,7 +46,7 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool, union perf_event *e
int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process);
int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process);
int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_cpu_map(struct perf_tool *tool, struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_cpu_map(struct perf_tool *tool, const struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine);
int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
@@ -55,6 +55,8 @@ int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evs
int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session, struct evlist *evlist, perf_event__handler_t process);
int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, struct evlist *evlist, struct machine *machine);
+int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, struct evlist *evlist, struct machine *machine, size_t from);
+int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample);
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
int perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, bool mmap_data);
int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 665e5c0618ed..e3e5427e1c3c 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -47,6 +47,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
thread->tid = tid;
thread->ppid = -1;
thread->cpu = -1;
+ thread->guest_cpu = -1;
thread->lbr_stitch_enable = false;
INIT_LIST_HEAD(&thread->namespaces_list);
INIT_LIST_HEAD(&thread->comm_list);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index b066fb30d203..241f300d7d6e 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -39,6 +39,7 @@ struct thread {
pid_t tid;
pid_t ppid;
int cpu;
+ int guest_cpu; /* For QEMU thread */
refcount_t refcnt;
bool comm_set;
int comm_len;
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index f2352dba1875..c957fb849ac6 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -76,7 +76,8 @@ struct perf_tool {
stat_config,
stat,
stat_round,
- feature;
+ feature,
+ finished_init;
event_op4 compressed;
event_op3 auxtrace;
bool ordered_events;
diff --git a/tools/perf/util/topdown.c b/tools/perf/util/topdown.c
index a369f84ceb6a..1090841550f7 100644
--- a/tools/perf/util/topdown.c
+++ b/tools/perf/util/topdown.c
@@ -65,3 +65,10 @@ __weak bool arch_topdown_sample_read(struct evsel *leader __maybe_unused)
{
return false;
}
+
+__weak const char *arch_get_topdown_pmu_name(struct evlist *evlist
+ __maybe_unused,
+ bool warn __maybe_unused)
+{
+ return "cpu";
+}
diff --git a/tools/perf/util/topdown.h b/tools/perf/util/topdown.h
index 118e75281f93..f9531528c559 100644
--- a/tools/perf/util/topdown.h
+++ b/tools/perf/util/topdown.h
@@ -2,11 +2,12 @@
#ifndef TOPDOWN_H
#define TOPDOWN_H 1
#include "evsel.h"
+#include "evlist.h"
bool arch_topdown_check_group(bool *warn);
void arch_topdown_group_warn(void);
bool arch_topdown_sample_read(struct evsel *leader);
-
+const char *arch_get_topdown_pmu_name(struct evlist *evlist, bool warn);
int topdown_filter_events(const char **attr, char **str, bool use_group,
const char *pmu_name);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index a65f65d0857e..892c323b4ac9 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -19,16 +19,24 @@
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <internal/lib.h> // page_size
+#include <sys/param.h>
#include "trace-event.h"
+#include "tracepoint.h"
#include <api/fs/tracing_path.h>
#include "evsel.h"
#include "debug.h"
#define VERSION "0.6"
+#define MAX_EVENT_LENGTH 512
static int output_fd;
+struct tracepoint_path {
+ char *system;
+ char *name;
+ struct tracepoint_path *next;
+};
int bigendian(void)
{
@@ -400,6 +408,94 @@ put_tracepoints_path(struct tracepoint_path *tps)
}
}
+static struct tracepoint_path *tracepoint_id_to_path(u64 config)
+{
+ struct tracepoint_path *path = NULL;
+ DIR *sys_dir, *evt_dir;
+ struct dirent *sys_dirent, *evt_dirent;
+ char id_buf[24];
+ int fd;
+ u64 id;
+ char evt_path[MAXPATHLEN];
+ char *dir_path;
+
+ sys_dir = tracing_events__opendir();
+ if (!sys_dir)
+ return NULL;
+
+ for_each_subsystem(sys_dir, sys_dirent) {
+ dir_path = get_events_file(sys_dirent->d_name);
+ if (!dir_path)
+ continue;
+ evt_dir = opendir(dir_path);
+ if (!evt_dir)
+ goto next;
+
+ for_each_event(dir_path, evt_dir, evt_dirent) {
+
+ scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
+ evt_dirent->d_name);
+ fd = open(evt_path, O_RDONLY);
+ if (fd < 0)
+ continue;
+ if (read(fd, id_buf, sizeof(id_buf)) < 0) {
+ close(fd);
+ continue;
+ }
+ close(fd);
+ id = atoll(id_buf);
+ if (id == config) {
+ put_events_file(dir_path);
+ closedir(evt_dir);
+ closedir(sys_dir);
+ path = zalloc(sizeof(*path));
+ if (!path)
+ return NULL;
+ if (asprintf(&path->system, "%.*s",
+ MAX_EVENT_LENGTH, sys_dirent->d_name) < 0) {
+ free(path);
+ return NULL;
+ }
+ if (asprintf(&path->name, "%.*s",
+ MAX_EVENT_LENGTH, evt_dirent->d_name) < 0) {
+ zfree(&path->system);
+ free(path);
+ return NULL;
+ }
+ return path;
+ }
+ }
+ closedir(evt_dir);
+next:
+ put_events_file(dir_path);
+ }
+
+ closedir(sys_dir);
+ return NULL;
+}
+
+static struct tracepoint_path *tracepoint_name_to_path(const char *name)
+{
+ struct tracepoint_path *path = zalloc(sizeof(*path));
+ char *str = strchr(name, ':');
+
+ if (path == NULL || str == NULL) {
+ free(path);
+ return NULL;
+ }
+
+ path->system = strndup(name, str - name);
+ path->name = strdup(str+1);
+
+ if (path->system == NULL || path->name == NULL) {
+ zfree(&path->system);
+ zfree(&path->name);
+ zfree(&path);
+ }
+
+ return path;
+}
+
static struct tracepoint_path *
get_tracepoints_path(struct list_head *pattrs)
{
diff --git a/tools/perf/util/tracepoint.c b/tools/perf/util/tracepoint.c
new file mode 100644
index 000000000000..89ef56c43311
--- /dev/null
+++ b/tools/perf/util/tracepoint.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "tracepoint.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <sys/param.h>
+#include <unistd.h>
+
+#include <api/fs/tracing_path.h>
+
+int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
+{
+ char evt_path[MAXPATHLEN];
+ int fd;
+
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, evt_dir->d_name);
+ fd = open(evt_path, O_RDONLY);
+ if (fd < 0)
+ return -EINVAL;
+ close(fd);
+
+ return 0;
+}
+
+/*
+ * Check whether event is in <debugfs_mount_point>/tracing/events
+ */
+int is_valid_tracepoint(const char *event_string)
+{
+ DIR *sys_dir, *evt_dir;
+ struct dirent *sys_dirent, *evt_dirent;
+ char evt_path[MAXPATHLEN];
+ char *dir_path;
+
+ sys_dir = tracing_events__opendir();
+ if (!sys_dir)
+ return 0;
+
+ for_each_subsystem(sys_dir, sys_dirent) {
+ dir_path = get_events_file(sys_dirent->d_name);
+ if (!dir_path)
+ continue;
+ evt_dir = opendir(dir_path);
+ if (!evt_dir)
+ goto next;
+
+ for_each_event(dir_path, evt_dir, evt_dirent) {
+ snprintf(evt_path, MAXPATHLEN, "%s:%s",
+ sys_dirent->d_name, evt_dirent->d_name);
+ if (!strcmp(evt_path, event_string)) {
+ closedir(evt_dir);
+ closedir(sys_dir);
+ return 1;
+ }
+ }
+ closedir(evt_dir);
+next:
+ put_events_file(dir_path);
+ }
+ closedir(sys_dir);
+ return 0;
+}
diff --git a/tools/perf/util/tracepoint.h b/tools/perf/util/tracepoint.h
new file mode 100644
index 000000000000..c4a110fe87d7
--- /dev/null
+++ b/tools/perf/util/tracepoint.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_TRACEPOINT_H
+#define __PERF_TRACEPOINT_H
+
+#include <dirent.h>
+#include <string.h>
+
+int tp_event_has_id(const char *dir_path, struct dirent *evt_dir);
+
+#define for_each_event(dir_path, evt_dir, evt_dirent) \
+ while ((evt_dirent = readdir(evt_dir)) != NULL) \
+ if (evt_dirent->d_type == DT_DIR && \
+ (strcmp(evt_dirent->d_name, ".")) && \
+ (strcmp(evt_dirent->d_name, "..")) && \
+ (!tp_event_has_id(dir_path, evt_dirent)))
+
+#define for_each_subsystem(sys_dir, sys_dirent) \
+ while ((sys_dirent = readdir(sys_dir)) != NULL) \
+ if (sys_dirent->d_type == DT_DIR && \
+ (strcmp(sys_dirent->d_name, ".")) && \
+ (strcmp(sys_dirent->d_name, "..")))
+
+int is_valid_tracepoint(const char *event_string);
+
+#endif /* __PERF_TRACEPOINT_H */
diff --git a/tools/perf/util/tsc.h b/tools/perf/util/tsc.h
index 7d83a31732a7..88fd1c4c1cb8 100644
--- a/tools/perf/util/tsc.h
+++ b/tools/perf/util/tsc.h
@@ -25,6 +25,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
u64 rdtsc(void);
+double arch_get_tsc_freq(void);
size_t perf_event__fprintf_time_conv(union perf_event *event, FILE *fp);
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index eeb83c80f458..391c1e928bd7 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/time64.h>
+#include <linux/overflow.h>
#include <unistd.h>
#include "cap.h"
#include "strlist.h"
@@ -200,7 +201,7 @@ static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
return rmdir(path);
}
-static int rm_rf_kcore_dir(const char *path)
+static int rm_rf_a_kcore_dir(const char *path, const char *name)
{
char kcore_dir_path[PATH_MAX];
const char *pat[] = {
@@ -210,11 +211,44 @@ static int rm_rf_kcore_dir(const char *path)
NULL,
};
- snprintf(kcore_dir_path, sizeof(kcore_dir_path), "%s/kcore_dir", path);
+ snprintf(kcore_dir_path, sizeof(kcore_dir_path), "%s/%s", path, name);
return rm_rf_depth_pat(kcore_dir_path, 0, pat);
}
+static bool kcore_dir_filter(const char *name __maybe_unused, struct dirent *d)
+{
+ const char *pat[] = {
+ "kcore_dir",
+ "kcore_dir__[1-9]*",
+ NULL,
+ };
+
+ return match_pat(d->d_name, pat);
+}
+
+static int rm_rf_kcore_dir(const char *path)
+{
+ struct strlist *kcore_dirs;
+ struct str_node *nd;
+ int ret;
+
+ kcore_dirs = lsdir(path, kcore_dir_filter);
+
+ if (!kcore_dirs)
+ return 0;
+
+ strlist__for_each_entry(nd, kcore_dirs) {
+ ret = rm_rf_a_kcore_dir(path, nd->s);
+ if (ret)
+ return ret;
+ }
+
+ strlist__delete(kcore_dirs);
+
+ return 0;
+}
+
int rm_rf_perf_data(const char *path)
{
const char *pat[] = {
@@ -467,3 +501,35 @@ char *filename_with_chroot(int pid, const char *filename)
return new_name;
}
+
+/*
+ * Reallocate an array *arr of size *arr_sz so that it is big enough to contain
+ * x elements of size msz, initializing new entries to *init_val or zero if
+ * init_val is NULL
+ */
+int do_realloc_array_as_needed(void **arr, size_t *arr_sz, size_t x, size_t msz, const void *init_val)
+{
+ size_t new_sz = *arr_sz;
+ void *new_arr;
+ size_t i;
+
+ if (!new_sz)
+ new_sz = msz >= 64 ? 1 : roundup(64, msz); /* Start with at least 64 bytes */
+ while (x >= new_sz) {
+ if (check_mul_overflow(new_sz, (size_t)2, &new_sz))
+ return -ENOMEM;
+ }
+ if (new_sz == *arr_sz)
+ return 0;
+ new_arr = calloc(new_sz, msz);
+ if (!new_arr)
+ return -ENOMEM;
+ memcpy(new_arr, *arr, *arr_sz * msz);
+ if (init_val) {
+ for (i = *arr_sz; i < new_sz; i++)
+ memcpy(new_arr + (i * msz), init_val, msz);
+ }
+ *arr = new_arr;
+ *arr_sz = new_sz;
+ return 0;
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 0f78f1e7782d..c1f2d423a9ec 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -79,4 +79,19 @@ struct perf_debuginfod {
void perf_debuginfod_setup(struct perf_debuginfod *di);
char *filename_with_chroot(int pid, const char *filename);
+
+int do_realloc_array_as_needed(void **arr, size_t *arr_sz, size_t x,
+ size_t msz, const void *init_val);
+
+#define realloc_array_as_needed(a, n, x, v) ({ \
+ typeof(x) __x = (x); \
+ __x >= (n) ? \
+ do_realloc_array_as_needed((void **)&(a), \
+ &(n), \
+ __x, \
+ sizeof(*(a)), \
+ (const void *)(v)) : \
+ 0; \
+ })
+
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/power/x86/intel-speed-select/hfi-events.c b/tools/power/x86/intel-speed-select/hfi-events.c
index 761375062505..f0ed69721308 100644
--- a/tools/power/x86/intel-speed-select/hfi-events.c
+++ b/tools/power/x86/intel-speed-select/hfi-events.c
@@ -144,7 +144,7 @@ static int family_handler(struct nl_msg *msg, void *arg)
continue;
res->id = nla_get_u32(tb2[CTRL_ATTR_MCAST_GRP_ID]);
break;
- };
+ }
return 0;
}
diff --git a/tools/power/x86/intel-speed-select/isst-daemon.c b/tools/power/x86/intel-speed-select/isst-daemon.c
index dd372924bc82..d0400c6684ba 100644
--- a/tools/power/x86/intel-speed-select/isst-daemon.c
+++ b/tools/power/x86/intel-speed-select/isst-daemon.c
@@ -41,7 +41,7 @@ void process_level_change(int cpu)
time_t tm;
int ret;
- if (pkg_id >= MAX_PACKAGE_COUNT || die_id > MAX_DIE_PER_PACKAGE) {
+ if (pkg_id >= MAX_PACKAGE_COUNT || die_id >= MAX_DIE_PER_PACKAGE) {
debug_printf("Invalid package/die info for cpu:%d\n", cpu);
return;
}
diff --git a/tools/testing/crypto/chacha20-s390/test-cipher.c b/tools/testing/crypto/chacha20-s390/test-cipher.c
index 34e8b855266f..8141d45df51a 100644
--- a/tools/testing/crypto/chacha20-s390/test-cipher.c
+++ b/tools/testing/crypto/chacha20-s390/test-cipher.c
@@ -252,29 +252,26 @@ static int __init chacha_s390_test_init(void)
memset(plain, 'a', data_size);
get_random_bytes(plain, (data_size > 256 ? 256 : data_size));
- cipher_generic = vmalloc(data_size);
+ cipher_generic = vzalloc(data_size);
if (!cipher_generic) {
pr_info("could not allocate cipher_generic buffer\n");
ret = -2;
goto out;
}
- memset(cipher_generic, 0, data_size);
- cipher_s390 = vmalloc(data_size);
+ cipher_s390 = vzalloc(data_size);
if (!cipher_s390) {
pr_info("could not allocate cipher_s390 buffer\n");
ret = -2;
goto out;
}
- memset(cipher_s390, 0, data_size);
- revert = vmalloc(data_size);
+ revert = vzalloc(data_size);
if (!revert) {
pr_info("could not allocate revert buffer\n");
ret = -2;
goto out;
}
- memset(revert, 0, data_size);
if (debug)
print_hex_dump(KERN_INFO, "src: ", DUMP_PREFIX_OFFSET,
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 33543231d453..500be85729cc 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -47,6 +47,7 @@ cxl_core-y += $(CXL_CORE_SRC)/memdev.o
cxl_core-y += $(CXL_CORE_SRC)/mbox.o
cxl_core-y += $(CXL_CORE_SRC)/pci.o
cxl_core-y += $(CXL_CORE_SRC)/hdm.o
+cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
cxl_core-y += config_check.o
obj-m += test/
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index 431f2bddf6c8..a072b2d3e726 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -14,7 +14,7 @@
#define NR_CXL_HOST_BRIDGES 2
#define NR_CXL_ROOT_PORTS 2
#define NR_CXL_SWITCH_PORTS 2
-#define NR_CXL_PORT_DECODERS 2
+#define NR_CXL_PORT_DECODERS 8
static struct platform_device *cxl_acpi;
static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
@@ -118,7 +118,7 @@ static struct {
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
.qtg_id = 0,
- .window_size = SZ_256M,
+ .window_size = SZ_256M * 4UL,
},
.target = { 0 },
},
@@ -133,7 +133,7 @@ static struct {
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
.qtg_id = 1,
- .window_size = SZ_256M * 2,
+ .window_size = SZ_256M * 8UL,
},
.target = { 0, 1, },
},
@@ -148,7 +148,7 @@ static struct {
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = 2,
- .window_size = SZ_256M,
+ .window_size = SZ_256M * 4UL,
},
.target = { 0 },
},
@@ -163,7 +163,7 @@ static struct {
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = 3,
- .window_size = SZ_256M * 2,
+ .window_size = SZ_256M * 8UL,
},
.target = { 0, 1, },
},
@@ -429,6 +429,50 @@ static int map_targets(struct device *dev, void *data)
return 0;
}
+static int mock_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ int id = cxld->id;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
+ if (port->commit_end + 1 != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end + 1);
+ return -EBUSY;
+ }
+
+ port->commit_end++;
+ cxld->flags |= CXL_DECODER_F_ENABLE;
+
+ return 0;
+}
+
+static int mock_decoder_reset(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ int id = cxld->id;
+
+ if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
+ return 0;
+
+ dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
+ if (port->commit_end != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order reset, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end);
+ return -EBUSY;
+ }
+
+ port->commit_end--;
+ cxld->flags &= ~CXL_DECODER_F_ENABLE;
+
+ return 0;
+}
+
static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
{
struct cxl_port *port = cxlhdm->port;
@@ -451,25 +495,39 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
struct cxl_decoder *cxld;
int rc;
- if (target_count)
- cxld = cxl_switch_decoder_alloc(port, target_count);
- else
- cxld = cxl_endpoint_decoder_alloc(port);
- if (IS_ERR(cxld)) {
- dev_warn(&port->dev,
- "Failed to allocate the decoder\n");
- return PTR_ERR(cxld);
+ if (target_count) {
+ struct cxl_switch_decoder *cxlsd;
+
+ cxlsd = cxl_switch_decoder_alloc(port, target_count);
+ if (IS_ERR(cxlsd)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxlsd);
+ }
+ cxld = &cxlsd->cxld;
+ } else {
+ struct cxl_endpoint_decoder *cxled;
+
+ cxled = cxl_endpoint_decoder_alloc(port);
+
+ if (IS_ERR(cxled)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxled);
+ }
+ cxld = &cxled->cxld;
}
- cxld->decoder_range = (struct range) {
+ cxld->hpa_range = (struct range) {
.start = 0,
.end = -1,
};
- cxld->flags = CXL_DECODER_F_ENABLE;
cxld->interleave_ways = min_not_zero(target_count, 1);
cxld->interleave_granularity = SZ_4K;
cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->commit = mock_decoder_commit;
+ cxld->reset = mock_decoder_reset;
if (target_count) {
rc = device_for_each_child(port->uport, &ctx,
@@ -569,44 +627,6 @@ static void mock_companion(struct acpi_device *adev, struct device *dev)
#define SZ_512G (SZ_64G * 8)
#endif
-static struct platform_device *alloc_memdev(int id)
-{
- struct resource res[] = {
- [0] = {
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .flags = IORESOURCE_MEM,
- .desc = IORES_DESC_PERSISTENT_MEMORY,
- },
- };
- struct platform_device *pdev;
- int i, rc;
-
- for (i = 0; i < ARRAY_SIZE(res); i++) {
- struct cxl_mock_res *r = alloc_mock_res(SZ_256M);
-
- if (!r)
- return NULL;
- res[i].start = r->range.start;
- res[i].end = r->range.end;
- }
-
- pdev = platform_device_alloc("cxl_mem", id);
- if (!pdev)
- return NULL;
-
- rc = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
- if (rc)
- goto err;
-
- return pdev;
-
-err:
- platform_device_put(pdev);
- return NULL;
-}
-
static __init int cxl_test_init(void)
{
int rc, i;
@@ -619,7 +639,8 @@ static __init int cxl_test_init(void)
goto err_gen_pool_create;
}
- rc = gen_pool_add(cxl_mock_pool, SZ_512G, SZ_64G, NUMA_NO_NODE);
+ rc = gen_pool_add(cxl_mock_pool, iomem_resource.end + 1 - SZ_64G,
+ SZ_64G, NUMA_NO_NODE);
if (rc)
goto err_gen_pool_add;
@@ -708,7 +729,7 @@ static __init int cxl_test_init(void)
struct platform_device *dport = cxl_switch_dport[i];
struct platform_device *pdev;
- pdev = alloc_memdev(i);
+ pdev = platform_device_alloc("cxl_mem", i);
if (!pdev)
goto err_mem;
pdev->dev.parent = &dport->dev;
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 6b9239b2afd4..aa2df3a15051 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -10,6 +10,7 @@
#include <cxlmem.h>
#define LSA_SIZE SZ_128K
+#define DEV_SIZE SZ_2G
#define EFFECT(x) (1U << x)
static struct cxl_cel_entry mock_cel[] = {
@@ -26,6 +27,10 @@ static struct cxl_cel_entry mock_cel[] = {
.effect = cpu_to_le16(0),
},
{
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
+ .effect = cpu_to_le16(0),
+ },
+ {
.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
.effect = cpu_to_le16(EFFECT(1) | EFFECT(2)),
},
@@ -97,42 +102,37 @@ static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
{
- struct platform_device *pdev = to_platform_device(cxlds->dev);
struct cxl_mbox_identify id = {
.fw_revision = { "mock fw v1 " },
.lsa_size = cpu_to_le32(LSA_SIZE),
- /* FIXME: Add partition support */
- .partition_align = cpu_to_le64(0),
+ .partition_align =
+ cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
+ .total_capacity =
+ cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
};
- u64 capacity = 0;
- int i;
if (cmd->size_out < sizeof(id))
return -EINVAL;
- for (i = 0; i < 2; i++) {
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res)
- break;
-
- capacity += resource_size(res) / CXL_CAPACITY_MULTIPLIER;
+ memcpy(cmd->payload_out, &id, sizeof(id));
- if (le64_to_cpu(id.partition_align))
- continue;
+ return 0;
+}
- if (res->desc == IORES_DESC_PERSISTENT_MEMORY)
- id.persistent_capacity = cpu_to_le64(
- resource_size(res) / CXL_CAPACITY_MULTIPLIER);
- else
- id.volatile_capacity = cpu_to_le64(
- resource_size(res) / CXL_CAPACITY_MULTIPLIER);
- }
+static int mock_partition_info(struct cxl_dev_state *cxlds,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_get_partition_info pi = {
+ .active_volatile_cap =
+ cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
+ .active_persistent_cap =
+ cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
+ };
- id.total_capacity = cpu_to_le64(capacity);
+ if (cmd->size_out < sizeof(pi))
+ return -EINVAL;
- memcpy(cmd->payload_out, &id, sizeof(id));
+ memcpy(cmd->payload_out, &pi, sizeof(pi));
return 0;
}
@@ -221,6 +221,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *
case CXL_MBOX_OP_GET_LSA:
rc = mock_get_lsa(cxlds, cmd);
break;
+ case CXL_MBOX_OP_GET_PARTITION_INFO:
+ rc = mock_partition_info(cxlds, cmd);
+ break;
case CXL_MBOX_OP_SET_LSA:
rc = mock_set_lsa(cxlds, cmd);
break;
@@ -282,7 +285,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+ if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(dev, cxlmd);
return 0;
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index f1f8c40948c5..bce6a21df0d5 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -208,13 +208,15 @@ int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, CXL);
-bool __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
- struct cxl_hdm *cxlhdm)
+int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
+ struct cxl_hdm *cxlhdm)
{
int rc = 0, index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (!ops || !ops->is_mock_dev(cxlds->dev))
+ if (ops && ops->is_mock_dev(cxlds->dev))
+ rc = 0;
+ else
rc = cxl_hdm_decode_init(cxlds, cxlhdm);
put_cxl_mock_ops(index);
diff --git a/tools/testing/memblock/Makefile b/tools/testing/memblock/Makefile
index a698e24b35e7..246f7ac8489b 100644
--- a/tools/testing/memblock/Makefile
+++ b/tools/testing/memblock/Makefile
@@ -45,9 +45,8 @@ help:
@echo ' clean - Remove generated files and symlinks in the directory'
@echo ''
@echo 'Configuration:'
+ @echo ' make MEMBLOCK_DEBUG=1 - enable memblock_dbg() messages'
@echo ' make NUMA=1 - simulate enabled NUMA'
- @echo ' make MOVABLE_NODE=1 - override `movable_node_is_enabled`'
- @echo ' definition to simulate movable NUMA nodes'
@echo ' make 32BIT_PHYS_ADDR_T=1 - Use 32 bit physical addresses'
vpath %.c ../../lib
diff --git a/tools/testing/memblock/README b/tools/testing/memblock/README
index ca6afcff013a..7ca437d81806 100644
--- a/tools/testing/memblock/README
+++ b/tools/testing/memblock/README
@@ -33,12 +33,23 @@ To run the tests, build the main target and run it:
$ make && ./main
-A successful run produces no output. It is also possible to override different
-configuration parameters. For example, to simulate enabled NUMA, use:
+A successful run produces no output. It is possible to control the behavior
+by passing options from command line. For example, to include verbose output,
+append the `-v` options when you run the tests:
+
+$ ./main -v
+
+This will print information about which functions are being tested and the
+number of test cases that passed.
+
+For the full list of options from command line, see `./main --help`.
+
+It is also possible to override different configuration parameters to change
+the test functions. For example, to simulate enabled NUMA, use:
$ make NUMA=1
-For the full list of options, see `make help`.
+For the full list of build options, see `make help`.
Project structure
=================
diff --git a/tools/testing/memblock/TODO b/tools/testing/memblock/TODO
index cd1a30d5acc9..33044c634ea7 100644
--- a/tools/testing/memblock/TODO
+++ b/tools/testing/memblock/TODO
@@ -1,25 +1,17 @@
TODO
=====
-1. Add verbose output (e.g., what is being tested and how many tests cases are
- passing)
-
-2. Add flags to Makefile:
- + verbosity level
- + enable memblock_dbg() messages (i.e. pass "-D CONFIG_DEBUG_MEMORY_INIT"
- flag)
-
-3. Add tests trying to memblock_add() or memblock_reserve() 129th region.
+1. Add tests trying to memblock_add() or memblock_reserve() 129th region.
This will trigger memblock_double_array(), make sure it succeeds.
*Important:* These tests require valid memory ranges, use dummy physical
memory block from common.c to implement them. It is also very
likely that the current MEM_SIZE won't be enough for these
test cases. Use realloc to adjust the size accordingly.
-4. Add test cases using this functions (implement them for both directions):
+2. Add test cases using this functions (implement them for both directions):
+ memblock_alloc_raw()
+ memblock_alloc_exact_nid_raw()
+ memblock_alloc_try_nid_raw()
-5. Add tests for memblock_alloc_node() to check if the correct NUMA node is set
+3. Add tests for memblock_alloc_node() to check if the correct NUMA node is set
for the new region
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 94b52a8718b5..fdb7f5db7308 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -2,6 +2,17 @@
#ifndef _MM_INTERNAL_H
#define _MM_INTERNAL_H
+/*
+ * Enable memblock_dbg() messages
+ */
+#ifdef MEMBLOCK_DEBUG
+static int memblock_debug = 1;
+#endif
+
+#define pr_warn_ratelimited(fmt, ...) printf(fmt, ##__VA_ARGS__)
+
+bool mirrored_kernelcore = false;
+
struct page {};
void memblock_free_pages(struct page *page, unsigned long pfn,
diff --git a/tools/testing/memblock/linux/kmemleak.h b/tools/testing/memblock/linux/kmemleak.h
index 462f8c5e8aa0..5fed13bb9ec4 100644
--- a/tools/testing/memblock/linux/kmemleak.h
+++ b/tools/testing/memblock/linux/kmemleak.h
@@ -7,7 +7,7 @@ static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
}
static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
- int min_count, gfp_t gfp)
+ gfp_t gfp)
{
}
diff --git a/tools/testing/memblock/linux/memory_hotplug.h b/tools/testing/memblock/linux/memory_hotplug.h
index 47988765a219..dabe2c556858 100644
--- a/tools/testing/memblock/linux/memory_hotplug.h
+++ b/tools/testing/memblock/linux/memory_hotplug.h
@@ -7,13 +7,11 @@
#include <linux/cache.h>
#include <linux/types.h>
+extern bool movable_node_enabled;
+
static inline bool movable_node_is_enabled(void)
{
-#ifdef MOVABLE_NODE
- return true;
-#else
- return false;
-#endif
+ return movable_node_enabled;
}
#endif
diff --git a/tools/testing/memblock/main.c b/tools/testing/memblock/main.c
index fb183c9e76d1..4ca1024342b1 100644
--- a/tools/testing/memblock/main.c
+++ b/tools/testing/memblock/main.c
@@ -3,9 +3,11 @@
#include "tests/alloc_api.h"
#include "tests/alloc_helpers_api.h"
#include "tests/alloc_nid_api.h"
+#include "tests/common.h"
int main(int argc, char **argv)
{
+ parse_args(argc, argv);
memblock_basic_checks();
memblock_alloc_checks();
memblock_alloc_helpers_checks();
diff --git a/tools/testing/memblock/scripts/Makefile.include b/tools/testing/memblock/scripts/Makefile.include
index 641569ccb7b0..aa6d82d56a23 100644
--- a/tools/testing/memblock/scripts/Makefile.include
+++ b/tools/testing/memblock/scripts/Makefile.include
@@ -6,14 +6,14 @@ ifeq ($(NUMA), 1)
CFLAGS += -D CONFIG_NUMA
endif
-# Simulate movable NUMA memory regions
-ifeq ($(MOVABLE_NODE), 1)
- CFLAGS += -D MOVABLE_NODE
-endif
-
# Use 32 bit physical addresses.
# Remember to install 32-bit version of dependencies.
ifeq ($(32BIT_PHYS_ADDR_T), 1)
CFLAGS += -m32 -U CONFIG_PHYS_ADDR_T_64BIT
LDFLAGS += -m32
endif
+
+# Enable memblock_dbg() messages
+ifeq ($(MEMBLOCK_DEBUG), 1)
+ CFLAGS += -D MEMBLOCK_DEBUG
+endif
diff --git a/tools/testing/memblock/tests/alloc_api.c b/tools/testing/memblock/tests/alloc_api.c
index d1aa7e15c18d..a14f38eb8a89 100644
--- a/tools/testing/memblock/tests/alloc_api.c
+++ b/tools/testing/memblock/tests/alloc_api.c
@@ -10,6 +10,8 @@ static int alloc_top_down_simple_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_2;
phys_addr_t expected_start;
@@ -19,12 +21,14 @@ static int alloc_top_down_simple_check(void)
allocated_ptr = memblock_alloc(size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == size);
- assert(rgn->base == expected_start);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, expected_start);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -55,6 +59,8 @@ static int alloc_top_down_disjoint_check(void)
struct region r1;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r2_size = SZ_16;
/* Use custom alignment */
phys_addr_t alignment = SMP_CACHE_BYTES * 2;
@@ -73,15 +79,17 @@ static int alloc_top_down_disjoint_check(void)
allocated_ptr = memblock_alloc(r2_size, alignment);
- assert(allocated_ptr);
- assert(rgn1->size == r1.size);
- assert(rgn1->base == r1.base);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
- assert(rgn2->size == r2_size);
- assert(rgn2->base == expected_start);
+ ASSERT_EQ(rgn2->size, r2_size);
+ ASSERT_EQ(rgn2->base, expected_start);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -101,6 +109,8 @@ static int alloc_top_down_before_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
/*
* The first region ends at the aligned address to test region merging
*/
@@ -114,12 +124,14 @@ static int alloc_top_down_before_check(void)
allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == total_size);
- assert(rgn->base == memblock_end_of_DRAM() - total_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -141,6 +153,8 @@ static int alloc_top_down_after_check(void)
struct region r1;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r2_size = SZ_512;
phys_addr_t total_size;
@@ -158,12 +172,14 @@ static int alloc_top_down_after_check(void)
allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == total_size);
- assert(rgn->base == r1.base - r2_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r1.base - r2_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -186,6 +202,8 @@ static int alloc_top_down_second_fit_check(void)
struct region r1, r2;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_1K;
phys_addr_t total_size;
@@ -204,12 +222,14 @@ static int alloc_top_down_second_fit_check(void)
allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == r2.size + r3_size);
- assert(rgn->base == r2.base - r3_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, r2.size + r3_size);
+ ASSERT_EQ(rgn->base, r2.base - r3_size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -231,6 +251,8 @@ static int alloc_in_between_generic_check(void)
struct region r1, r2;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t r3_size = SZ_64;
/*
@@ -254,12 +276,14 @@ static int alloc_in_between_generic_check(void)
allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == total_size);
- assert(rgn->base == r1.base - r2.size - r3_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r1.base - r2.size - r3_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -281,6 +305,8 @@ static int alloc_small_gaps_generic_check(void)
{
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t region_size = SZ_1K;
phys_addr_t gap_size = SZ_256;
phys_addr_t region_end;
@@ -296,7 +322,9 @@ static int alloc_small_gaps_generic_check(void)
allocated_ptr = memblock_alloc(region_size, SMP_CACHE_BYTES);
- assert(!allocated_ptr);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
return 0;
}
@@ -309,6 +337,8 @@ static int alloc_all_reserved_generic_check(void)
{
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
setup_memblock();
/* Simulate full memory */
@@ -316,7 +346,9 @@ static int alloc_all_reserved_generic_check(void)
allocated_ptr = memblock_alloc(SZ_256, SMP_CACHE_BYTES);
- assert(!allocated_ptr);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
return 0;
}
@@ -338,6 +370,8 @@ static int alloc_no_space_generic_check(void)
{
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
setup_memblock();
phys_addr_t available_size = SZ_256;
@@ -348,7 +382,9 @@ static int alloc_no_space_generic_check(void)
allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
- assert(!allocated_ptr);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
return 0;
}
@@ -369,6 +405,8 @@ static int alloc_limited_space_generic_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t available_size = SZ_256;
phys_addr_t reserved_size = MEM_SIZE - available_size;
@@ -379,12 +417,14 @@ static int alloc_limited_space_generic_check(void)
allocated_ptr = memblock_alloc(available_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == MEM_SIZE);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, MEM_SIZE);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, MEM_SIZE);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == MEM_SIZE);
+ test_pass_pop();
return 0;
}
@@ -399,14 +439,18 @@ static int alloc_no_memory_generic_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
reset_memblock_regions();
allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
- assert(!allocated_ptr);
- assert(rgn->size == 0);
- assert(rgn->base == 0);
- assert(memblock.reserved.total_size == 0);
+ ASSERT_EQ(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, 0);
+ ASSERT_EQ(rgn->base, 0);
+ ASSERT_EQ(memblock.reserved.total_size, 0);
+
+ test_pass_pop();
return 0;
}
@@ -421,16 +465,20 @@ static int alloc_bottom_up_simple_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
setup_memblock();
allocated_ptr = memblock_alloc(SZ_2, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == SZ_2);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, SZ_2);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == SZ_2);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, SZ_2);
+
+ test_pass_pop();
return 0;
}
@@ -459,6 +507,8 @@ static int alloc_bottom_up_disjoint_check(void)
struct region r1;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r2_size = SZ_16;
/* Use custom alignment */
phys_addr_t alignment = SMP_CACHE_BYTES * 2;
@@ -477,16 +527,18 @@ static int alloc_bottom_up_disjoint_check(void)
allocated_ptr = memblock_alloc(r2_size, alignment);
- assert(allocated_ptr);
+ ASSERT_NE(allocated_ptr, NULL);
- assert(rgn1->size == r1.size);
- assert(rgn1->base == r1.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
- assert(rgn2->size == r2_size);
- assert(rgn2->base == expected_start);
+ ASSERT_EQ(rgn2->size, r2_size);
+ ASSERT_EQ(rgn2->base, expected_start);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -506,6 +558,8 @@ static int alloc_bottom_up_before_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_512;
phys_addr_t r2_size = SZ_128;
phys_addr_t total_size = r1_size + r2_size;
@@ -516,12 +570,14 @@ static int alloc_bottom_up_before_check(void)
allocated_ptr = memblock_alloc(r1_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == total_size);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -542,6 +598,8 @@ static int alloc_bottom_up_after_check(void)
struct region r1;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r2_size = SZ_512;
phys_addr_t total_size;
@@ -559,12 +617,14 @@ static int alloc_bottom_up_after_check(void)
allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == total_size);
- assert(rgn->base == r1.base);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r1.base);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -588,6 +648,8 @@ static int alloc_bottom_up_second_fit_check(void)
struct region r1, r2;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_1K;
phys_addr_t total_size;
@@ -606,12 +668,14 @@ static int alloc_bottom_up_second_fit_check(void)
allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == r2.size + r3_size);
- assert(rgn->base == r2.base);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, r2.size + r3_size);
+ ASSERT_EQ(rgn->base, r2.base);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -619,6 +683,7 @@ static int alloc_bottom_up_second_fit_check(void)
/* Test case wrappers */
static int alloc_simple_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_simple_check();
memblock_set_bottom_up(true);
@@ -629,6 +694,7 @@ static int alloc_simple_check(void)
static int alloc_disjoint_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_disjoint_check();
memblock_set_bottom_up(true);
@@ -639,6 +705,7 @@ static int alloc_disjoint_check(void)
static int alloc_before_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_before_check();
memblock_set_bottom_up(true);
@@ -649,6 +716,7 @@ static int alloc_before_check(void)
static int alloc_after_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_after_check();
memblock_set_bottom_up(true);
@@ -659,6 +727,7 @@ static int alloc_after_check(void)
static int alloc_in_between_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_in_between_generic_check();
memblock_set_bottom_up(true);
@@ -669,6 +738,7 @@ static int alloc_in_between_check(void)
static int alloc_second_fit_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_second_fit_check();
memblock_set_bottom_up(true);
@@ -679,6 +749,7 @@ static int alloc_second_fit_check(void)
static int alloc_small_gaps_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_small_gaps_generic_check();
memblock_set_bottom_up(true);
@@ -689,6 +760,7 @@ static int alloc_small_gaps_check(void)
static int alloc_all_reserved_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_all_reserved_generic_check();
memblock_set_bottom_up(true);
@@ -699,6 +771,7 @@ static int alloc_all_reserved_check(void)
static int alloc_no_space_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_no_space_generic_check();
memblock_set_bottom_up(true);
@@ -709,6 +782,7 @@ static int alloc_no_space_check(void)
static int alloc_limited_space_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_limited_space_generic_check();
memblock_set_bottom_up(true);
@@ -719,6 +793,7 @@ static int alloc_limited_space_check(void)
static int alloc_no_memory_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_no_memory_generic_check();
memblock_set_bottom_up(true);
@@ -729,6 +804,12 @@ static int alloc_no_memory_check(void)
int memblock_alloc_checks(void)
{
+ const char *func_testing = "memblock_alloc";
+
+ prefix_reset();
+ prefix_push(func_testing);
+ test_print("Running %s tests...\n", func_testing);
+
reset_memblock_attributes();
dummy_physical_memory_init();
@@ -746,5 +827,7 @@ int memblock_alloc_checks(void)
dummy_physical_memory_cleanup();
+ prefix_pop();
+
return 0;
}
diff --git a/tools/testing/memblock/tests/alloc_helpers_api.c b/tools/testing/memblock/tests/alloc_helpers_api.c
index 963a966db461..1069b4bdd5fd 100644
--- a/tools/testing/memblock/tests/alloc_helpers_api.c
+++ b/tools/testing/memblock/tests/alloc_helpers_api.c
@@ -21,6 +21,8 @@ static int alloc_from_simple_generic_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_16;
phys_addr_t min_addr;
@@ -31,14 +33,16 @@ static int alloc_from_simple_generic_check(void)
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
- assert(rgn->size == size);
- assert(rgn->base == min_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -64,6 +68,8 @@ static int alloc_from_misaligned_generic_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_32;
phys_addr_t min_addr;
@@ -75,14 +81,16 @@ static int alloc_from_misaligned_generic_check(void)
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == size);
- assert(rgn->base == memblock_end_of_DRAM() - SMP_CACHE_BYTES);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - SMP_CACHE_BYTES);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
return 0;
}
@@ -110,6 +118,8 @@ static int alloc_from_top_down_high_addr_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_32;
phys_addr_t min_addr;
@@ -120,12 +130,14 @@ static int alloc_from_top_down_high_addr_check(void)
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->size == size);
- assert(rgn->base == memblock_end_of_DRAM() - SMP_CACHE_BYTES);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - SMP_CACHE_BYTES);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
return 0;
}
@@ -151,6 +163,8 @@ static int alloc_from_top_down_no_space_above_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_64;
phys_addr_t r2_size = SZ_2;
phys_addr_t total_size = r1_size + r2_size;
@@ -165,12 +179,14 @@ static int alloc_from_top_down_no_space_above_check(void)
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->base == min_addr - r1_size);
- assert(rgn->size == total_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, min_addr - r1_size);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -186,6 +202,8 @@ static int alloc_from_top_down_min_addr_cap_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_64;
phys_addr_t min_addr;
phys_addr_t start_addr;
@@ -199,12 +217,14 @@ static int alloc_from_top_down_min_addr_cap_check(void)
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->base == start_addr);
- assert(rgn->size == MEM_SIZE);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, start_addr);
+ ASSERT_EQ(rgn->size, MEM_SIZE);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, MEM_SIZE);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == MEM_SIZE);
+ test_pass_pop();
return 0;
}
@@ -230,6 +250,8 @@ static int alloc_from_bottom_up_high_addr_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_32;
phys_addr_t min_addr;
@@ -240,12 +262,14 @@ static int alloc_from_bottom_up_high_addr_check(void)
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->size == size);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -270,6 +294,8 @@ static int alloc_from_bottom_up_no_space_above_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_64;
phys_addr_t min_addr;
phys_addr_t r2_size;
@@ -284,12 +310,14 @@ static int alloc_from_bottom_up_no_space_above_check(void)
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->base == memblock_start_of_DRAM());
- assert(rgn->size == r1_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+ ASSERT_EQ(rgn->size, r1_size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == r1_size + r2_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, r1_size + r2_size);
+
+ test_pass_pop();
return 0;
}
@@ -304,6 +332,8 @@ static int alloc_from_bottom_up_min_addr_cap_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_64;
phys_addr_t min_addr;
phys_addr_t start_addr;
@@ -315,12 +345,14 @@ static int alloc_from_bottom_up_min_addr_cap_check(void)
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->base == start_addr);
- assert(rgn->size == r1_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, start_addr);
+ ASSERT_EQ(rgn->size, r1_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == r1_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r1_size);
+
+ test_pass_pop();
return 0;
}
@@ -328,6 +360,7 @@ static int alloc_from_bottom_up_min_addr_cap_check(void)
/* Test case wrappers */
static int alloc_from_simple_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_from_simple_generic_check();
memblock_set_bottom_up(true);
@@ -338,6 +371,7 @@ static int alloc_from_simple_check(void)
static int alloc_from_misaligned_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_from_misaligned_generic_check();
memblock_set_bottom_up(true);
@@ -348,6 +382,7 @@ static int alloc_from_misaligned_check(void)
static int alloc_from_high_addr_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_from_top_down_high_addr_check();
memblock_set_bottom_up(true);
@@ -358,6 +393,7 @@ static int alloc_from_high_addr_check(void)
static int alloc_from_no_space_above_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_from_top_down_no_space_above_check();
memblock_set_bottom_up(true);
@@ -368,6 +404,7 @@ static int alloc_from_no_space_above_check(void)
static int alloc_from_min_addr_cap_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_from_top_down_min_addr_cap_check();
memblock_set_bottom_up(true);
@@ -378,6 +415,12 @@ static int alloc_from_min_addr_cap_check(void)
int memblock_alloc_helpers_checks(void)
{
+ const char *func_testing = "memblock_alloc_from";
+
+ prefix_reset();
+ prefix_push(func_testing);
+ test_print("Running %s tests...\n", func_testing);
+
reset_memblock_attributes();
dummy_physical_memory_init();
@@ -389,5 +432,7 @@ int memblock_alloc_helpers_checks(void)
dummy_physical_memory_cleanup();
+ prefix_pop();
+
return 0;
}
diff --git a/tools/testing/memblock/tests/alloc_nid_api.c b/tools/testing/memblock/tests/alloc_nid_api.c
index 6390206e50e1..255fd514e9f5 100644
--- a/tools/testing/memblock/tests/alloc_nid_api.c
+++ b/tools/testing/memblock/tests/alloc_nid_api.c
@@ -21,6 +21,8 @@ static int alloc_try_nid_top_down_simple_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_128;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -36,15 +38,17 @@ static int alloc_try_nid_top_down_simple_check(void)
b = (char *)allocated_ptr;
rgn_end = rgn->base + rgn->size;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, max_addr - size);
+ ASSERT_EQ(rgn_end, max_addr);
- assert(rgn->size == size);
- assert(rgn->base == max_addr - size);
- assert(rgn_end == max_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -72,6 +76,8 @@ static int alloc_try_nid_top_down_end_misaligned_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_128;
phys_addr_t misalign = SZ_2;
phys_addr_t min_addr;
@@ -88,15 +94,17 @@ static int alloc_try_nid_top_down_end_misaligned_check(void)
b = (char *)allocated_ptr;
rgn_end = rgn->base + rgn->size;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == size);
- assert(rgn->base == max_addr - size - misalign);
- assert(rgn_end < max_addr);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, max_addr - size - misalign);
+ ASSERT_LT(rgn_end, max_addr);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
return 0;
}
@@ -122,6 +130,8 @@ static int alloc_try_nid_exact_address_generic_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -137,15 +147,17 @@ static int alloc_try_nid_exact_address_generic_check(void)
b = (char *)allocated_ptr;
rgn_end = rgn->base + rgn->size;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
+ ASSERT_EQ(rgn_end, max_addr);
- assert(rgn->size == size);
- assert(rgn->base == min_addr);
- assert(rgn_end == max_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -173,6 +185,8 @@ static int alloc_try_nid_top_down_narrow_range_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -186,14 +200,16 @@ static int alloc_try_nid_top_down_narrow_range_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, max_addr - size);
- assert(rgn->size == size);
- assert(rgn->base == max_addr - size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -222,6 +238,8 @@ static int alloc_try_nid_low_max_generic_check(void)
{
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -234,7 +252,9 @@ static int alloc_try_nid_low_max_generic_check(void)
allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
min_addr, max_addr, NUMA_NO_NODE);
- assert(!allocated_ptr);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
return 0;
}
@@ -259,6 +279,8 @@ static int alloc_try_nid_min_reserved_generic_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_128;
phys_addr_t r2_size = SZ_64;
phys_addr_t total_size = r1_size + r2_size;
@@ -278,14 +300,16 @@ static int alloc_try_nid_min_reserved_generic_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == total_size);
- assert(rgn->base == reserved_base);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, reserved_base);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -310,6 +334,8 @@ static int alloc_try_nid_max_reserved_generic_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_64;
phys_addr_t r2_size = SZ_128;
phys_addr_t total_size = r1_size + r2_size;
@@ -327,14 +353,16 @@ static int alloc_try_nid_max_reserved_generic_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, min_addr);
- assert(rgn->size == total_size);
- assert(rgn->base == min_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -364,6 +392,8 @@ static int alloc_try_nid_top_down_reserved_with_space_check(void)
char *b;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_64;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t total_size;
@@ -389,17 +419,19 @@ static int alloc_try_nid_top_down_reserved_with_space_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn1->size, r1.size + r3_size);
+ ASSERT_EQ(rgn1->base, max_addr - r3_size);
- assert(rgn1->size == r1.size + r3_size);
- assert(rgn1->base == max_addr - r3_size);
+ ASSERT_EQ(rgn2->size, r2.size);
+ ASSERT_EQ(rgn2->base, r2.base);
- assert(rgn2->size == r2.size);
- assert(rgn2->base == r2.base);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -427,6 +459,8 @@ static int alloc_try_nid_reserved_full_merge_generic_check(void)
char *b;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_64;
phys_addr_t total_size;
phys_addr_t max_addr;
@@ -451,14 +485,16 @@ static int alloc_try_nid_reserved_full_merge_generic_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == total_size);
- assert(rgn->base == r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r2.base);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -489,6 +525,8 @@ static int alloc_try_nid_top_down_reserved_no_space_check(void)
char *b;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_256;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t total_size;
@@ -514,17 +552,19 @@ static int alloc_try_nid_top_down_reserved_no_space_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
- assert(rgn1->size == r1.size);
- assert(rgn1->base == r1.base);
+ ASSERT_EQ(rgn2->size, r2.size + r3_size);
+ ASSERT_EQ(rgn2->base, r2.base - r3_size);
- assert(rgn2->size == r2.size + r3_size);
- assert(rgn2->base == r2.base - r3_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -554,6 +594,8 @@ static int alloc_try_nid_reserved_all_generic_check(void)
void *allocated_ptr = NULL;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_256;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t max_addr;
@@ -576,7 +618,9 @@ static int alloc_try_nid_reserved_all_generic_check(void)
allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
min_addr, max_addr, NUMA_NO_NODE);
- assert(!allocated_ptr);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
return 0;
}
@@ -592,6 +636,8 @@ static int alloc_try_nid_top_down_cap_max_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -605,14 +651,16 @@ static int alloc_try_nid_top_down_cap_max_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
- assert(rgn->size == size);
- assert(rgn->base == memblock_end_of_DRAM() - size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -628,6 +676,8 @@ static int alloc_try_nid_top_down_cap_min_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -641,14 +691,16 @@ static int alloc_try_nid_top_down_cap_min_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == size);
- assert(rgn->base == memblock_end_of_DRAM() - size);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
return 0;
}
@@ -673,6 +725,8 @@ static int alloc_try_nid_bottom_up_simple_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_128;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -689,15 +743,17 @@ static int alloc_try_nid_bottom_up_simple_check(void)
b = (char *)allocated_ptr;
rgn_end = rgn->base + rgn->size;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
+ ASSERT_LT(rgn_end, max_addr);
- assert(rgn->size == size);
- assert(rgn->base == min_addr);
- assert(rgn_end < max_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -725,6 +781,8 @@ static int alloc_try_nid_bottom_up_start_misaligned_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_128;
phys_addr_t misalign = SZ_2;
phys_addr_t min_addr;
@@ -742,15 +800,17 @@ static int alloc_try_nid_bottom_up_start_misaligned_check(void)
b = (char *)allocated_ptr;
rgn_end = rgn->base + rgn->size;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
+ ASSERT_LT(rgn_end, max_addr);
- assert(rgn->size == size);
- assert(rgn->base == min_addr + (SMP_CACHE_BYTES - misalign));
- assert(rgn_end < max_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -778,6 +838,8 @@ static int alloc_try_nid_bottom_up_narrow_range_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -792,14 +854,16 @@ static int alloc_try_nid_bottom_up_narrow_range_check(void)
NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == size);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
return 0;
}
@@ -829,6 +893,8 @@ static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
char *b;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_64;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t total_size;
@@ -855,17 +921,19 @@ static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn1->size == r1.size);
- assert(rgn1->base == max_addr);
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, max_addr);
- assert(rgn2->size == r2.size + r3_size);
- assert(rgn2->base == r2.base);
+ ASSERT_EQ(rgn2->size, r2.size + r3_size);
+ ASSERT_EQ(rgn2->base, r2.base);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -899,6 +967,8 @@ static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
char *b;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_256;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t total_size;
@@ -925,20 +995,22 @@ static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn3->size, r3_size);
+ ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
- assert(rgn3->size == r3_size);
- assert(rgn3->base == memblock_start_of_DRAM());
+ ASSERT_EQ(rgn2->size, r2.size);
+ ASSERT_EQ(rgn2->base, r2.base);
- assert(rgn2->size == r2.size);
- assert(rgn2->base == r2.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
- assert(rgn1->size == r1.size);
- assert(rgn1->base == r1.base);
+ ASSERT_EQ(memblock.reserved.cnt, 3);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 3);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -954,6 +1026,8 @@ static int alloc_try_nid_bottom_up_cap_max_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -968,14 +1042,16 @@ static int alloc_try_nid_bottom_up_cap_max_check(void)
NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
- assert(rgn->size == size);
- assert(rgn->base == min_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -991,6 +1067,8 @@ static int alloc_try_nid_bottom_up_cap_min_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -1005,14 +1083,16 @@ static int alloc_try_nid_bottom_up_cap_min_check(void)
NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
- assert(rgn->size == size);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -1020,6 +1100,7 @@ static int alloc_try_nid_bottom_up_cap_min_check(void)
/* Test case wrappers */
static int alloc_try_nid_simple_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_simple_check();
memblock_set_bottom_up(true);
@@ -1030,6 +1111,7 @@ static int alloc_try_nid_simple_check(void)
static int alloc_try_nid_misaligned_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_end_misaligned_check();
memblock_set_bottom_up(true);
@@ -1040,6 +1122,7 @@ static int alloc_try_nid_misaligned_check(void)
static int alloc_try_nid_narrow_range_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_narrow_range_check();
memblock_set_bottom_up(true);
@@ -1050,6 +1133,7 @@ static int alloc_try_nid_narrow_range_check(void)
static int alloc_try_nid_reserved_with_space_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_reserved_with_space_check();
memblock_set_bottom_up(true);
@@ -1060,6 +1144,7 @@ static int alloc_try_nid_reserved_with_space_check(void)
static int alloc_try_nid_reserved_no_space_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_reserved_no_space_check();
memblock_set_bottom_up(true);
@@ -1070,6 +1155,7 @@ static int alloc_try_nid_reserved_no_space_check(void)
static int alloc_try_nid_cap_max_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_cap_max_check();
memblock_set_bottom_up(true);
@@ -1080,6 +1166,7 @@ static int alloc_try_nid_cap_max_check(void)
static int alloc_try_nid_cap_min_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_cap_min_check();
memblock_set_bottom_up(true);
@@ -1090,6 +1177,7 @@ static int alloc_try_nid_cap_min_check(void)
static int alloc_try_nid_min_reserved_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_min_reserved_generic_check();
memblock_set_bottom_up(true);
@@ -1100,6 +1188,7 @@ static int alloc_try_nid_min_reserved_check(void)
static int alloc_try_nid_max_reserved_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_max_reserved_generic_check();
memblock_set_bottom_up(true);
@@ -1110,6 +1199,7 @@ static int alloc_try_nid_max_reserved_check(void)
static int alloc_try_nid_exact_address_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_exact_address_generic_check();
memblock_set_bottom_up(true);
@@ -1120,6 +1210,7 @@ static int alloc_try_nid_exact_address_check(void)
static int alloc_try_nid_reserved_full_merge_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_reserved_full_merge_generic_check();
memblock_set_bottom_up(true);
@@ -1130,6 +1221,7 @@ static int alloc_try_nid_reserved_full_merge_check(void)
static int alloc_try_nid_reserved_all_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_reserved_all_generic_check();
memblock_set_bottom_up(true);
@@ -1140,6 +1232,7 @@ static int alloc_try_nid_reserved_all_check(void)
static int alloc_try_nid_low_max_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_low_max_generic_check();
memblock_set_bottom_up(true);
@@ -1150,6 +1243,12 @@ static int alloc_try_nid_low_max_check(void)
int memblock_alloc_nid_checks(void)
{
+ const char *func_testing = "memblock_alloc_try_nid";
+
+ prefix_reset();
+ prefix_push(func_testing);
+ test_print("Running %s tests...\n", func_testing);
+
reset_memblock_attributes();
dummy_physical_memory_init();
@@ -1170,5 +1269,7 @@ int memblock_alloc_nid_checks(void)
dummy_physical_memory_cleanup();
+ prefix_pop();
+
return 0;
}
diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c
index a7bc180316d6..66f46f261e66 100644
--- a/tools/testing/memblock/tests/basic_api.c
+++ b/tools/testing/memblock/tests/basic_api.c
@@ -4,21 +4,29 @@
#include "basic_api.h"
#define EXPECTED_MEMBLOCK_REGIONS 128
+#define FUNC_ADD "memblock_add"
+#define FUNC_RESERVE "memblock_reserve"
+#define FUNC_REMOVE "memblock_remove"
+#define FUNC_FREE "memblock_free"
static int memblock_initialization_check(void)
{
- assert(memblock.memory.regions);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.max == EXPECTED_MEMBLOCK_REGIONS);
- assert(strcmp(memblock.memory.name, "memory") == 0);
+ PREFIX_PUSH();
- assert(memblock.reserved.regions);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.memory.max == EXPECTED_MEMBLOCK_REGIONS);
- assert(strcmp(memblock.reserved.name, "reserved") == 0);
+ ASSERT_NE(memblock.memory.regions, NULL);
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
+ ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0);
- assert(!memblock.bottom_up);
- assert(memblock.current_limit == MEMBLOCK_ALLOC_ANYWHERE);
+ ASSERT_NE(memblock.reserved.regions, NULL);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
+ ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0);
+
+ ASSERT_EQ(memblock.bottom_up, false);
+ ASSERT_EQ(memblock.current_limit, MEMBLOCK_ALLOC_ANYWHERE);
+
+ test_pass_pop();
return 0;
}
@@ -40,14 +48,18 @@ static int memblock_add_simple_check(void)
.size = SZ_4M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r.base, r.size);
- assert(rgn->base == r.base);
- assert(rgn->size == r.size);
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, r.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r.size);
+ test_pass_pop();
return 0;
}
@@ -69,18 +81,22 @@ static int memblock_add_node_simple_check(void)
.size = SZ_16M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG);
- assert(rgn->base == r.base);
- assert(rgn->size == r.size);
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, r.size);
#ifdef CONFIG_NUMA
- assert(rgn->nid == 1);
+ ASSERT_EQ(rgn->nid, 1);
#endif
- assert(rgn->flags == MEMBLOCK_HOTPLUG);
+ ASSERT_EQ(rgn->flags, MEMBLOCK_HOTPLUG);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r.size);
+ test_pass_pop();
return 0;
}
@@ -113,18 +129,22 @@ static int memblock_add_disjoint_check(void)
.size = SZ_8K
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
- assert(rgn1->base == r1.base);
- assert(rgn1->size == r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+
+ ASSERT_EQ(rgn2->base, r2.base);
+ ASSERT_EQ(rgn2->size, r2.size);
- assert(rgn2->base == r2.base);
- assert(rgn2->size == r2.size);
+ ASSERT_EQ(memblock.memory.cnt, 2);
+ ASSERT_EQ(memblock.memory.total_size, r1.size + r2.size);
- assert(memblock.memory.cnt == 2);
- assert(memblock.memory.total_size == r1.size + r2.size);
+ test_pass_pop();
return 0;
}
@@ -162,17 +182,21 @@ static int memblock_add_overlap_top_check(void)
.size = SZ_512M
};
+ PREFIX_PUSH();
+
total_size = (r1.base - r2.base) + r1.size;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
- assert(rgn->base == r2.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -210,17 +234,21 @@ static int memblock_add_overlap_bottom_check(void)
.size = SZ_1G
};
+ PREFIX_PUSH();
+
total_size = (r2.base - r1.base) + r2.size;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -255,15 +283,19 @@ static int memblock_add_within_check(void)
.size = SZ_1M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == r1.size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r1.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r1.size);
+ test_pass_pop();
return 0;
}
@@ -279,19 +311,27 @@ static int memblock_add_twice_check(void)
.size = SZ_2M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r.base, r.size);
memblock_add(r.base, r.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r.size);
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r.size);
+
+ test_pass_pop();
return 0;
}
static int memblock_add_checks(void)
{
+ prefix_reset();
+ prefix_push(FUNC_ADD);
+ test_print("Running %s tests...\n", FUNC_ADD);
+
memblock_add_simple_check();
memblock_add_node_simple_check();
memblock_add_disjoint_check();
@@ -300,6 +340,8 @@ static int memblock_add_checks(void)
memblock_add_within_check();
memblock_add_twice_check();
+ prefix_pop();
+
return 0;
}
@@ -320,11 +362,15 @@ static int memblock_reserve_simple_check(void)
.size = SZ_128M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r.base, r.size);
- assert(rgn->base == r.base);
- assert(rgn->size == r.size);
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, r.size);
+
+ test_pass_pop();
return 0;
}
@@ -356,18 +402,22 @@ static int memblock_reserve_disjoint_check(void)
.size = SZ_512M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
- assert(rgn1->base == r1.base);
- assert(rgn1->size == r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+
+ ASSERT_EQ(rgn2->base, r2.base);
+ ASSERT_EQ(rgn2->size, r2.size);
- assert(rgn2->base == r2.base);
- assert(rgn2->size == r2.size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, r1.size + r2.size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == r1.size + r2.size);
+ test_pass_pop();
return 0;
}
@@ -406,17 +456,21 @@ static int memblock_reserve_overlap_top_check(void)
.size = SZ_1G
};
+ PREFIX_PUSH();
+
total_size = (r1.base - r2.base) + r1.size;
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
- assert(rgn->base == r2.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -455,17 +509,21 @@ static int memblock_reserve_overlap_bottom_check(void)
.size = SZ_128K
};
+ PREFIX_PUSH();
+
total_size = (r2.base - r1.base) + r2.size;
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -502,15 +560,19 @@ static int memblock_reserve_within_check(void)
.size = SZ_64K
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == r1.size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r1.size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == r1.size);
+ test_pass_pop();
return 0;
}
@@ -527,19 +589,27 @@ static int memblock_reserve_twice_check(void)
.size = SZ_2M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r.base, r.size);
memblock_reserve(r.base, r.size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == r.size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r.size);
+
+ test_pass_pop();
return 0;
}
static int memblock_reserve_checks(void)
{
+ prefix_reset();
+ prefix_push(FUNC_RESERVE);
+ test_print("Running %s tests...\n", FUNC_RESERVE);
+
memblock_reserve_simple_check();
memblock_reserve_disjoint_check();
memblock_reserve_overlap_top_check();
@@ -547,6 +617,8 @@ static int memblock_reserve_checks(void)
memblock_reserve_within_check();
memblock_reserve_twice_check();
+ prefix_pop();
+
return 0;
}
@@ -581,16 +653,20 @@ static int memblock_remove_simple_check(void)
.size = SZ_4M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
memblock_remove(r1.base, r1.size);
- assert(rgn->base == r2.base);
- assert(rgn->size == r2.size);
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, r2.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r2.size);
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r2.size);
+
+ test_pass_pop();
return 0;
}
@@ -626,15 +702,19 @@ static int memblock_remove_absent_check(void)
.size = SZ_1G
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == r1.size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r1.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r1.size);
+ test_pass_pop();
return 0;
}
@@ -674,6 +754,8 @@ static int memblock_remove_overlap_top_check(void)
.size = SZ_32M
};
+ PREFIX_PUSH();
+
r1_end = r1.base + r1.size;
r2_end = r2.base + r2.size;
total_size = r1_end - r2_end;
@@ -682,11 +764,13 @@ static int memblock_remove_overlap_top_check(void)
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
- assert(rgn->base == r1.base + r2.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r1.base + r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -724,17 +808,22 @@ static int memblock_remove_overlap_bottom_check(void)
.size = SZ_256M
};
+ PREFIX_PUSH();
+
total_size = r2.base - r1.base;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == total_size);
return 0;
}
@@ -774,6 +863,8 @@ static int memblock_remove_within_check(void)
.size = SZ_1M
};
+ PREFIX_PUSH();
+
r1_size = r2.base - r1.base;
r2_size = (r1.base + r1.size) - (r2.base + r2.size);
total_size = r1_size + r2_size;
@@ -782,26 +873,34 @@ static int memblock_remove_within_check(void)
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
- assert(rgn1->base == r1.base);
- assert(rgn1->size == r1_size);
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1_size);
+
+ ASSERT_EQ(rgn2->base, r2.base + r2.size);
+ ASSERT_EQ(rgn2->size, r2_size);
- assert(rgn2->base == r2.base + r2.size);
- assert(rgn2->size == r2_size);
+ ASSERT_EQ(memblock.memory.cnt, 2);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
- assert(memblock.memory.cnt == 2);
- assert(memblock.memory.total_size == total_size);
+ test_pass_pop();
return 0;
}
static int memblock_remove_checks(void)
{
+ prefix_reset();
+ prefix_push(FUNC_REMOVE);
+ test_print("Running %s tests...\n", FUNC_REMOVE);
+
memblock_remove_simple_check();
memblock_remove_absent_check();
memblock_remove_overlap_top_check();
memblock_remove_overlap_bottom_check();
memblock_remove_within_check();
+ prefix_pop();
+
return 0;
}
@@ -835,16 +934,20 @@ static int memblock_free_simple_check(void)
.size = SZ_1M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
memblock_free((void *)r1.base, r1.size);
- assert(rgn->base == r2.base);
- assert(rgn->size == r2.size);
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, r2.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r2.size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == r2.size);
+ test_pass_pop();
return 0;
}
@@ -880,15 +983,19 @@ static int memblock_free_absent_check(void)
.size = SZ_128M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == r1.size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r1.size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == r1.size);
+ test_pass_pop();
return 0;
}
@@ -928,17 +1035,21 @@ static int memblock_free_overlap_top_check(void)
.size = SZ_8M
};
+ PREFIX_PUSH();
+
total_size = (r1.size + r1.base) - (r2.base + r2.size);
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
- assert(rgn->base == r2.base + r2.size);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r2.base + r2.size);
+ ASSERT_EQ(rgn->size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -973,17 +1084,21 @@ static int memblock_free_overlap_bottom_check(void)
.size = SZ_32M
};
+ PREFIX_PUSH();
+
total_size = r2.base - r1.base;
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -1024,6 +1139,8 @@ static int memblock_free_within_check(void)
.size = SZ_1M
};
+ PREFIX_PUSH();
+
r1_size = r2.base - r1.base;
r2_size = (r1.base + r1.size) - (r2.base + r2.size);
total_size = r1_size + r2_size;
@@ -1032,26 +1149,34 @@ static int memblock_free_within_check(void)
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
- assert(rgn1->base == r1.base);
- assert(rgn1->size == r1_size);
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1_size);
- assert(rgn2->base == r2.base + r2.size);
- assert(rgn2->size == r2_size);
+ ASSERT_EQ(rgn2->base, r2.base + r2.size);
+ ASSERT_EQ(rgn2->size, r2_size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
static int memblock_free_checks(void)
{
+ prefix_reset();
+ prefix_push(FUNC_FREE);
+ test_print("Running %s tests...\n", FUNC_FREE);
+
memblock_free_simple_check();
memblock_free_absent_check();
memblock_free_overlap_top_check();
memblock_free_overlap_bottom_check();
memblock_free_within_check();
+ prefix_pop();
+
return 0;
}
diff --git a/tools/testing/memblock/tests/common.c b/tools/testing/memblock/tests/common.c
index 62d3191f7c9a..e43b2676af81 100644
--- a/tools/testing/memblock/tests/common.c
+++ b/tools/testing/memblock/tests/common.c
@@ -1,11 +1,39 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "tests/common.h"
#include <string.h>
+#include <getopt.h>
+#include <linux/memory_hotplug.h>
+#include <linux/build_bug.h>
#define INIT_MEMBLOCK_REGIONS 128
#define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
+#define PREFIXES_MAX 15
+#define DELIM ": "
static struct test_memory memory_block;
+static const char __maybe_unused *prefixes[PREFIXES_MAX];
+static int __maybe_unused nr_prefixes;
+
+static const char *short_opts = "mv";
+static const struct option long_opts[] = {
+ {"movable-node", 0, NULL, 'm'},
+ {"verbose", 0, NULL, 'v'},
+ {NULL, 0, NULL, 0}
+};
+
+static const char * const help_opts[] = {
+ "disallow allocations from regions marked as hotplugged\n\t\t\t"
+ "by simulating enabling the \"movable_node\" kernel\n\t\t\t"
+ "parameter",
+ "enable verbose output, which includes the name of the\n\t\t\t"
+ "memblock function being tested, the name of the test,\n\t\t\t"
+ "and whether the test passed or failed."
+};
+
+static int verbose;
+
+/* sets global variable returned by movable_node_is_enabled() stub */
+bool movable_node_enabled;
void reset_memblock_regions(void)
{
@@ -46,3 +74,93 @@ void dummy_physical_memory_cleanup(void)
{
free(memory_block.base);
}
+
+static void usage(const char *prog)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(help_opts) != ARRAY_SIZE(long_opts) - 1);
+
+ printf("Usage: %s [-%s]\n", prog, short_opts);
+
+ for (int i = 0; long_opts[i].name; i++) {
+ printf(" -%c, --%-12s\t%s\n", long_opts[i].val,
+ long_opts[i].name, help_opts[i]);
+ }
+
+ exit(1);
+}
+
+void parse_args(int argc, char **argv)
+{
+ int c;
+
+ while ((c = getopt_long_only(argc, argv, short_opts, long_opts,
+ NULL)) != -1) {
+ switch (c) {
+ case 'm':
+ movable_node_enabled = true;
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ default:
+ usage(argv[0]);
+ }
+ }
+}
+
+void print_prefixes(const char *postfix)
+{
+ for (int i = 0; i < nr_prefixes; i++)
+ test_print("%s%s", prefixes[i], DELIM);
+ test_print(postfix);
+}
+
+void test_fail(void)
+{
+ if (verbose) {
+ ksft_test_result_fail(": ");
+ print_prefixes("failed\n");
+ }
+}
+
+void test_pass(void)
+{
+ if (verbose) {
+ ksft_test_result_pass(": ");
+ print_prefixes("passed\n");
+ }
+}
+
+void test_print(const char *fmt, ...)
+{
+ if (verbose) {
+ int saved_errno = errno;
+ va_list args;
+
+ va_start(args, fmt);
+ errno = saved_errno;
+ vprintf(fmt, args);
+ va_end(args);
+ }
+}
+
+void prefix_reset(void)
+{
+ memset(prefixes, 0, PREFIXES_MAX * sizeof(char *));
+ nr_prefixes = 0;
+}
+
+void prefix_push(const char *prefix)
+{
+ assert(nr_prefixes < PREFIXES_MAX);
+ prefixes[nr_prefixes] = prefix;
+ nr_prefixes++;
+}
+
+void prefix_pop(void)
+{
+ if (nr_prefixes > 0) {
+ prefixes[nr_prefixes - 1] = 0;
+ nr_prefixes--;
+ }
+}
diff --git a/tools/testing/memblock/tests/common.h b/tools/testing/memblock/tests/common.h
index 619054d03219..3e7f23d341d7 100644
--- a/tools/testing/memblock/tests/common.h
+++ b/tools/testing/memblock/tests/common.h
@@ -7,9 +7,49 @@
#include <linux/types.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
+#include <linux/printk.h>
+#include <../selftests/kselftest.h>
#define MEM_SIZE SZ_16K
+/**
+ * ASSERT_EQ():
+ * Check the condition
+ * @_expected == @_seen
+ * If false, print failed test message (if in VERBOSE mode) and then assert
+ */
+#define ASSERT_EQ(_expected, _seen) do { \
+ if ((_expected) != (_seen)) \
+ test_fail(); \
+ assert((_expected) == (_seen)); \
+} while (0)
+
+/**
+ * ASSERT_NE():
+ * Check the condition
+ * @_expected != @_seen
+ * If false, print failed test message (if in VERBOSE mode) and then assert
+ */
+#define ASSERT_NE(_expected, _seen) do { \
+ if ((_expected) == (_seen)) \
+ test_fail(); \
+ assert((_expected) != (_seen)); \
+} while (0)
+
+/**
+ * ASSERT_LT():
+ * Check the condition
+ * @_expected < @_seen
+ * If false, print failed test message (if in VERBOSE mode) and then assert
+ */
+#define ASSERT_LT(_expected, _seen) do { \
+ if ((_expected) >= (_seen)) \
+ test_fail(); \
+ assert((_expected) < (_seen)); \
+} while (0)
+
+#define PREFIX_PUSH() prefix_push(__func__)
+
/*
* Available memory registered with memblock needs to be valid for allocs
* test to run. This is a convenience wrapper for memory allocated in
@@ -30,5 +70,19 @@ void reset_memblock_attributes(void);
void setup_memblock(void);
void dummy_physical_memory_init(void);
void dummy_physical_memory_cleanup(void);
+void parse_args(int argc, char **argv);
+
+void test_fail(void);
+void test_pass(void);
+void test_print(const char *fmt, ...);
+void prefix_reset(void);
+void prefix_push(const char *prefix);
+void prefix_pop(void);
+
+static inline void test_pass_pop(void)
+{
+ test_pass();
+ prefix_pop();
+}
#endif
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 5047d8eef53e..c2064a35688b 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -12,11 +12,13 @@ TARGETS += cpu-hotplug
TARGETS += damon
TARGETS += drivers/dma-buf
TARGETS += drivers/s390x/uvdevice
+TARGETS += drivers/net/bonding
TARGETS += efivarfs
TARGETS += exec
TARGETS += filesystems
TARGETS += filesystems/binderfs
TARGETS += filesystems/epoll
+TARGETS += filesystems/fat
TARGETS += firmware
TARGETS += fpu
TARGETS += ftrace
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index a33874b081b6..e89685bd587c 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -28,6 +28,7 @@
#include "bpf_iter_test_kern6.skel.h"
#include "bpf_iter_bpf_link.skel.h"
#include "bpf_iter_ksym.skel.h"
+#include "bpf_iter_sockmap.skel.h"
static int duration;
@@ -67,6 +68,50 @@ free_link:
bpf_link__destroy(link);
}
+static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
+ struct bpf_map *map)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ struct bpf_link *link;
+ char buf[16] = {};
+ int iter_fd, len;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.map.map_fd = bpf_map__fd(map);
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(prog, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_map_iter"))
+ return;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
+ bpf_link__destroy(link);
+ return;
+ }
+
+ /* Close link and map fd prematurely */
+ bpf_link__destroy(link);
+ bpf_object__destroy_skeleton(*skel);
+ *skel = NULL;
+
+ /* Try to let map free work to run first if map is freed */
+ usleep(100);
+ /* Memory used by both sock map and sock local storage map are
+ * freed after two synchronize_rcu() calls, so wait for it
+ */
+ kern_sync_rcu();
+ kern_sync_rcu();
+
+ /* Read after both map fd and link fd are closed */
+ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+ ;
+ ASSERT_GE(len, 0, "read_iterator");
+
+ close(iter_fd);
+}
+
static int read_fd_into_buffer(int fd, char *buf, int size)
{
int bufleft = size;
@@ -634,6 +679,12 @@ static void test_bpf_hash_map(void)
goto out;
}
+ /* Sleepable program is prohibited for hash map iterator */
+ linfo.map.map_fd = map_fd;
+ link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
+ if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
+ goto out;
+
linfo.map.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
@@ -827,6 +878,20 @@ out:
bpf_iter_bpf_array_map__destroy(skel);
}
+static void test_bpf_array_map_iter_fd(void)
+{
+ struct bpf_iter_bpf_array_map *skel;
+
+ skel = bpf_iter_bpf_array_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
+ return;
+
+ do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
+ skel->maps.arraymap1);
+
+ bpf_iter_bpf_array_map__destroy(skel);
+}
+
static void test_bpf_percpu_array_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -1009,6 +1074,20 @@ out:
bpf_iter_bpf_sk_storage_helpers__destroy(skel);
}
+static void test_bpf_sk_stoarge_map_iter_fd(void)
+{
+ struct bpf_iter_bpf_sk_storage_map *skel;
+
+ skel = bpf_iter_bpf_sk_storage_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
+ return;
+
+ do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
+ skel->maps.sk_stg_map);
+
+ bpf_iter_bpf_sk_storage_map__destroy(skel);
+}
+
static void test_bpf_sk_storage_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -1044,7 +1123,15 @@ static void test_bpf_sk_storage_map(void)
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
- link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
+ link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
+ err = libbpf_get_error(link);
+ if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
+ if (!err)
+ bpf_link__destroy(link);
+ goto out;
+ }
+
+ link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
@@ -1052,6 +1139,7 @@ static void test_bpf_sk_storage_map(void)
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
+ skel->bss->to_add_val = time(NULL);
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
@@ -1065,6 +1153,13 @@ static void test_bpf_sk_storage_map(void)
if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
+ for (i = 0; i < num_sockets; i++) {
+ err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
+ if (!ASSERT_OK(err, "map_lookup") ||
+ !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
+ break;
+ }
+
close_iter:
close(iter_fd);
free_link:
@@ -1217,6 +1312,19 @@ out:
bpf_iter_task_vma__destroy(skel);
}
+void test_bpf_sockmap_map_iter_fd(void)
+{
+ struct bpf_iter_sockmap *skel;
+
+ skel = bpf_iter_sockmap__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
+ return;
+
+ do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
+
+ bpf_iter_sockmap__destroy(skel);
+}
+
void test_bpf_iter(void)
{
if (test__start_subtest("btf_id_or_null"))
@@ -1267,10 +1375,14 @@ void test_bpf_iter(void)
test_bpf_percpu_hash_map();
if (test__start_subtest("bpf_array_map"))
test_bpf_array_map();
+ if (test__start_subtest("bpf_array_map_iter_fd"))
+ test_bpf_array_map_iter_fd();
if (test__start_subtest("bpf_percpu_array_map"))
test_bpf_percpu_array_map();
if (test__start_subtest("bpf_sk_storage_map"))
test_bpf_sk_storage_map();
+ if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
+ test_bpf_sk_stoarge_map_iter_fd();
if (test__start_subtest("bpf_sk_storage_delete"))
test_bpf_sk_storage_delete();
if (test__start_subtest("bpf_sk_storage_get"))
@@ -1283,4 +1395,6 @@ void test_bpf_iter(void)
test_link_iter();
if (test__start_subtest("ksym"))
test_ksym_iter();
+ if (test__start_subtest("bpf_sockmap_map_iter_fd"))
+ test_bpf_sockmap_map_iter_fd();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index 02bb8cbf9194..da860b07abb5 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -3,6 +3,7 @@
#include <test_progs.h>
#include <network_helpers.h>
#include <bpf/btf.h>
+#include "bind4_prog.skel.h"
typedef int (*test_cb)(struct bpf_object *obj);
@@ -407,6 +408,98 @@ static void test_func_replace_global_func(void)
prog_name, false, NULL);
}
+static int find_prog_btf_id(const char *name, __u32 attach_prog_fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ struct btf *btf;
+ int ret;
+
+ ret = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
+ if (ret)
+ return ret;
+
+ if (!info.btf_id)
+ return -EINVAL;
+
+ btf = btf__load_from_kernel_by_id(info.btf_id);
+ ret = libbpf_get_error(btf);
+ if (ret)
+ return ret;
+
+ ret = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
+ btf__free(btf);
+ return ret;
+}
+
+static int load_fentry(int attach_prog_fd, int attach_btf_id)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ .attach_prog_fd = attach_prog_fd,
+ .attach_btf_id = attach_btf_id,
+ );
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ return bpf_prog_load(BPF_PROG_TYPE_TRACING,
+ "bind4_fentry",
+ "GPL",
+ insns,
+ ARRAY_SIZE(insns),
+ &opts);
+}
+
+static void test_fentry_to_cgroup_bpf(void)
+{
+ struct bind4_prog *skel = NULL;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int cgroup_fd = -1;
+ int fentry_fd = -1;
+ int btf_id;
+
+ cgroup_fd = test__join_cgroup("/fentry_to_cgroup_bpf");
+ if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
+ return;
+
+ skel = bind4_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ goto cleanup;
+
+ skel->links.bind_v4_prog = bpf_program__attach_cgroup(skel->progs.bind_v4_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.bind_v4_prog, "bpf_program__attach_cgroup"))
+ goto cleanup;
+
+ btf_id = find_prog_btf_id("bind_v4_prog", bpf_program__fd(skel->progs.bind_v4_prog));
+ if (!ASSERT_GE(btf_id, 0, "find_prog_btf_id"))
+ goto cleanup;
+
+ fentry_fd = load_fentry(bpf_program__fd(skel->progs.bind_v4_prog), btf_id);
+ if (!ASSERT_GE(fentry_fd, 0, "load_fentry"))
+ goto cleanup;
+
+ /* Make sure bpf_obj_get_info_by_fd works correctly when attaching
+ * to another BPF program.
+ */
+
+ ASSERT_OK(bpf_obj_get_info_by_fd(fentry_fd, &info, &info_len),
+ "bpf_obj_get_info_by_fd");
+
+ ASSERT_EQ(info.btf_id, 0, "info.btf_id");
+ ASSERT_EQ(info.attach_btf_id, btf_id, "info.attach_btf_id");
+ ASSERT_GT(info.attach_btf_obj_id, 0, "info.attach_btf_obj_id");
+
+cleanup:
+ if (cgroup_fd >= 0)
+ close(cgroup_fd);
+ if (fentry_fd >= 0)
+ close(fentry_fd);
+ bind4_prog__destroy(skel);
+}
+
/* NOTE: affect other tests, must run in serial mode */
void serial_test_fexit_bpf2bpf(void)
{
@@ -430,4 +523,6 @@ void serial_test_fexit_bpf2bpf(void)
test_fmod_ret_freplace();
if (test__start_subtest("func_replace_global_func"))
test_func_replace_global_func();
+ if (test__start_subtest("fentry_to_cgroup_bpf"))
+ test_fentry_to_cgroup_bpf();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/lru_bug.c b/tools/testing/selftests/bpf/prog_tests/lru_bug.c
new file mode 100644
index 000000000000..3c7822390827
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lru_bug.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#include "lru_bug.skel.h"
+
+void test_lru_bug(void)
+{
+ struct lru_bug *skel;
+ int ret;
+
+ skel = lru_bug__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "lru_bug__open_and_load"))
+ return;
+ ret = lru_bug__attach(skel);
+ if (!ASSERT_OK(ret, "lru_bug__attach"))
+ goto end;
+ usleep(1);
+ ASSERT_OK(skel->data->result, "prealloc_lru_pop doesn't call check_and_init_map_value");
+end:
+ lru_bug__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
index 0aa3cd34cbe3..d7a69217fb68 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
@@ -112,3 +112,12 @@ int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
return 0;
}
+
+SEC("iter.s/bpf_map_elem")
+int sleepable_dummy_dump(struct bpf_iter__bpf_map_elem *ctx)
+{
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(ctx->meta->seq, "map dump starts\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
index 6b70ccaba301..c7b8e006b171 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
@@ -16,19 +16,37 @@ struct {
__u32 val_sum = 0;
__u32 ipv6_sk_count = 0;
+__u32 to_add_val = 0;
SEC("iter/bpf_sk_storage_map")
-int dump_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
+int rw_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
{
struct sock *sk = ctx->sk;
__u32 *val = ctx->value;
- if (sk == (void *)0 || val == (void *)0)
+ if (sk == NULL || val == NULL)
return 0;
if (sk->sk_family == AF_INET6)
ipv6_sk_count++;
val_sum += *val;
+
+ *val += to_add_val;
+
+ return 0;
+}
+
+SEC("iter/bpf_sk_storage_map")
+int oob_write_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
+{
+ struct sock *sk = ctx->sk;
+ __u32 *val = ctx->value;
+
+ if (sk == NULL || val == NULL)
+ return 0;
+
+ *(val + 1) = 0xdeadbeef;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/lru_bug.c b/tools/testing/selftests/bpf/progs/lru_bug.c
new file mode 100644
index 000000000000..687081a724b3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lru_bug.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_value {
+ struct task_struct __kptr *ptr;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_value);
+} lru_map SEC(".maps");
+
+int pid = 0;
+int result = 1;
+
+SEC("fentry/bpf_ktime_get_ns")
+int printk(void *ctx)
+{
+ struct map_value v = {};
+
+ if (pid == bpf_get_current_task_btf()->pid)
+ bpf_map_update_elem(&lru_map, &(int){0}, &v, 0);
+ return 0;
+}
+
+SEC("fentry/do_nanosleep")
+int nanosleep(void *ctx)
+{
+ struct map_value val = {}, *v;
+ struct task_struct *current;
+
+ bpf_map_update_elem(&lru_map, &(int){0}, &val, 0);
+ v = bpf_map_lookup_elem(&lru_map, &(int){0});
+ if (!v)
+ return 0;
+ bpf_map_delete_elem(&lru_map, &(int){0});
+ current = bpf_get_current_task_btf();
+ v->ptr = current;
+ pid = current->pid;
+ bpf_ktime_get_ns();
+ result = !v->ptr;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile
new file mode 100644
index 000000000000..ab6c54b12098
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for net selftests
+
+TEST_PROGS := bond-break-lacpdu-tx.sh
+
+include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
new file mode 100755
index 000000000000..47ab90596acb
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Regression Test:
+# Verify LACPDUs get transmitted after setting the MAC address of
+# the bond.
+#
+# https://bugzilla.redhat.com/show_bug.cgi?id=2020773
+#
+# +---------+
+# | fab-br0 |
+# +---------+
+# |
+# +---------+
+# | fbond |
+# +---------+
+# | |
+# +------+ +------+
+# |veth1 | |veth2 |
+# +------+ +------+
+#
+# We use veths instead of physical interfaces
+
+set -e
+tmp=$(mktemp -q dump.XXXXXX)
+cleanup() {
+ ip link del fab-br0 >/dev/null 2>&1 || :
+ ip link del fbond >/dev/null 2>&1 || :
+ ip link del veth1-bond >/dev/null 2>&1 || :
+ ip link del veth2-bond >/dev/null 2>&1 || :
+ modprobe -r bonding >/dev/null 2>&1 || :
+ rm -f -- ${tmp}
+}
+
+trap cleanup 0 1 2
+cleanup
+sleep 1
+
+# create the bridge
+ip link add fab-br0 address 52:54:00:3B:7C:A6 mtu 1500 type bridge \
+ forward_delay 15
+
+# create the bond
+ip link add fbond type bond mode 4 miimon 200 xmit_hash_policy 1 \
+ ad_actor_sys_prio 65535 lacp_rate fast
+
+# set bond address
+ip link set fbond address 52:54:00:3B:7C:A6
+ip link set fbond up
+
+# set again bond sysfs parameters
+ip link set fbond type bond ad_actor_sys_prio 65535
+
+# create veths
+ip link add name veth1-bond type veth peer name veth1-end
+ip link add name veth2-bond type veth peer name veth2-end
+
+# add ports
+ip link set fbond master fab-br0
+ip link set veth1-bond down master fbond
+ip link set veth2-bond down master fbond
+
+# bring up
+ip link set veth1-end up
+ip link set veth2-end up
+ip link set fab-br0 up
+ip link set fbond up
+ip addr add dev fab-br0 10.0.0.3
+
+tcpdump -n -i veth1-end -e ether proto 0x8809 >${tmp} 2>&1 &
+sleep 15
+pkill tcpdump >/dev/null 2>&1
+rc=0
+num=$(grep "packets captured" ${tmp} | awk '{print $1}')
+if test "$num" -gt 0; then
+ echo "PASS, captured ${num}"
+else
+ echo "FAIL"
+ rc=1
+fi
+exit $rc
diff --git a/tools/testing/selftests/drivers/net/bonding/config b/tools/testing/selftests/drivers/net/bonding/config
new file mode 100644
index 000000000000..dc1c22de3c92
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/config
@@ -0,0 +1 @@
+CONFIG_BONDING=y
diff --git a/tools/testing/selftests/drivers/net/bonding/settings b/tools/testing/selftests/drivers/net/bonding/settings
new file mode 100644
index 000000000000..867e118223cd
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/bonding/settings
@@ -0,0 +1 @@
+timeout=60
diff --git a/Documentation/vm/.gitignore b/tools/testing/selftests/filesystems/fat/.gitignore
index bc74f5643008..b89920ed841c 100644
--- a/Documentation/vm/.gitignore
+++ b/tools/testing/selftests/filesystems/fat/.gitignore
@@ -1,3 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-page-types
-slabinfo
+rename_exchange
diff --git a/tools/testing/selftests/filesystems/fat/Makefile b/tools/testing/selftests/filesystems/fat/Makefile
new file mode 100644
index 000000000000..902033f6ef09
--- /dev/null
+++ b/tools/testing/selftests/filesystems/fat/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_PROGS := run_fat_tests.sh
+TEST_GEN_PROGS_EXTENDED := rename_exchange
+CFLAGS += -O2 -g -Wall $(KHDR_INCLUDES)
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/fat/config b/tools/testing/selftests/filesystems/fat/config
new file mode 100644
index 000000000000..6cf95e787a17
--- /dev/null
+++ b/tools/testing/selftests/filesystems/fat/config
@@ -0,0 +1,2 @@
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VFAT_FS=y
diff --git a/tools/testing/selftests/filesystems/fat/rename_exchange.c b/tools/testing/selftests/filesystems/fat/rename_exchange.c
new file mode 100644
index 000000000000..e488ad354fce
--- /dev/null
+++ b/tools/testing/selftests/filesystems/fat/rename_exchange.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Program that atomically exchanges two paths using
+ * the renameat2() system call RENAME_EXCHANGE flag.
+ *
+ * Copyright 2022 Red Hat Inc.
+ * Author: Javier Martinez Canillas <javierm@redhat.com>
+ */
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+void print_usage(const char *program)
+{
+ printf("Usage: %s [oldpath] [newpath]\n", program);
+ printf("Atomically exchange oldpath and newpath\n");
+}
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ if (argc != 3) {
+ print_usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+
+ ret = renameat2(AT_FDCWD, argv[1], AT_FDCWD, argv[2], RENAME_EXCHANGE);
+ if (ret) {
+ perror("rename exchange failed");
+ exit(EXIT_FAILURE);
+ }
+
+ exit(EXIT_SUCCESS);
+}
diff --git a/tools/testing/selftests/filesystems/fat/run_fat_tests.sh b/tools/testing/selftests/filesystems/fat/run_fat_tests.sh
new file mode 100644
index 000000000000..7f35dc3d15df
--- /dev/null
+++ b/tools/testing/selftests/filesystems/fat/run_fat_tests.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run filesystem operations tests on an 1 MiB disk image that is formatted with
+# a vfat filesystem and mounted in a temporary directory using a loop device.
+#
+# Copyright 2022 Red Hat Inc.
+# Author: Javier Martinez Canillas <javierm@redhat.com>
+
+set -e
+set -u
+set -o pipefail
+
+BASE_DIR="$(dirname $0)"
+TMP_DIR="$(mktemp -d /tmp/fat_tests_tmp.XXXX)"
+IMG_PATH="${TMP_DIR}/fat.img"
+MNT_PATH="${TMP_DIR}/mnt"
+
+cleanup()
+{
+ mountpoint -q "${MNT_PATH}" && unmount_image
+ rm -rf "${TMP_DIR}"
+}
+trap cleanup SIGINT SIGTERM EXIT
+
+create_loopback()
+{
+ touch "${IMG_PATH}"
+ chattr +C "${IMG_PATH}" >/dev/null 2>&1 || true
+
+ truncate -s 1M "${IMG_PATH}"
+ mkfs.vfat "${IMG_PATH}" >/dev/null 2>&1
+}
+
+mount_image()
+{
+ mkdir -p "${MNT_PATH}"
+ sudo mount -o loop "${IMG_PATH}" "${MNT_PATH}"
+}
+
+rename_exchange_test()
+{
+ local rename_exchange="${BASE_DIR}/rename_exchange"
+ local old_path="${MNT_PATH}/old_file"
+ local new_path="${MNT_PATH}/new_file"
+
+ echo old | sudo tee "${old_path}" >/dev/null 2>&1
+ echo new | sudo tee "${new_path}" >/dev/null 2>&1
+ sudo "${rename_exchange}" "${old_path}" "${new_path}" >/dev/null 2>&1
+ sudo sync -f "${MNT_PATH}"
+ grep new "${old_path}" >/dev/null 2>&1
+ grep old "${new_path}" >/dev/null 2>&1
+}
+
+rename_exchange_subdir_test()
+{
+ local rename_exchange="${BASE_DIR}/rename_exchange"
+ local dir_path="${MNT_PATH}/subdir"
+ local old_path="${MNT_PATH}/old_file"
+ local new_path="${dir_path}/new_file"
+
+ sudo mkdir -p "${dir_path}"
+ echo old | sudo tee "${old_path}" >/dev/null 2>&1
+ echo new | sudo tee "${new_path}" >/dev/null 2>&1
+ sudo "${rename_exchange}" "${old_path}" "${new_path}" >/dev/null 2>&1
+ sudo sync -f "${MNT_PATH}"
+ grep new "${old_path}" >/dev/null 2>&1
+ grep old "${new_path}" >/dev/null 2>&1
+}
+
+unmount_image()
+{
+ sudo umount "${MNT_PATH}" &> /dev/null
+}
+
+create_loopback
+mount_image
+rename_exchange_test
+rename_exchange_subdir_test
+unmount_image
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
index 60c02b482be8..c300eb020262 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: Generic dynamic event - add/remove eprobe events
-# requires: dynamic_events events/syscalls/sys_enter_openat "e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]":README
+# requires: dynamic_events events/syscalls/sys_enter_openat "<attached-group>.<attached-event> [<args>]":README
echo 0 > events/enable
@@ -87,4 +87,11 @@ echo "-:eprobes/$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
! grep -q "$EPROBE" dynamic_events
! test -d events/eprobes/$EPROBE
+if grep -q "e\[:\[<group>/]\[<event>]]" README; then
+ echo "e:mygroup/ $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
+ test -d events/mygroup
+ echo "-:mygroup/" >> dynamic_events
+ ! test -d events/mygroup
+fi
+
clear_trace
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc
index b4da41d126d5..13d43f40a6fc 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc
@@ -23,4 +23,11 @@ grep -q myevent1 dynamic_events
echo > dynamic_events
+if grep -q "p\[:\[<group>/]\[<event>]]" README; then
+ echo "p:mygroup/ $PLACE" >> dynamic_events
+ test -d events/mygroup
+ echo "-:mygroup/" >> dynamic_events
+ ! test -d events/mygroup
+fi
+
clear_trace
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
index fa928b431555..9e85d3019ff0 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
@@ -21,7 +21,7 @@ check_error 'p:^/bar vfs_read' # NO_GROUP_NAME
check_error 'p:^12345678901234567890123456789012345678901234567890123456789012345/bar vfs_read' # GROUP_TOO_LONG
check_error 'p:^foo.1/bar vfs_read' # BAD_GROUP_NAME
-check_error 'p:foo/^ vfs_read' # NO_EVENT_NAME
+check_error 'p:^ vfs_read' # NO_EVENT_NAME
check_error 'p:foo/^12345678901234567890123456789012345678901234567890123456789012345 vfs_read' # EVENT_TOO_LONG
check_error 'p:foo/^bar.1 vfs_read' # BAD_EVENT_NAME
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index c7f47429d6cd..4c122f1b1737 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -4,6 +4,8 @@ include ../../../build/Build.include
all:
top_srcdir = ../../../..
+include $(top_srcdir)/scripts/subarch.include
+ARCH ?= $(SUBARCH)
# For cross-builds to work, UNAME_M has to map to ARCH and arch specific
# directories and targets in this Makefile. "uname -m" doesn't map to
@@ -197,7 +199,8 @@ endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
- -I$(<D) -Iinclude/$(UNAME_M) -I.. $(EXTRA_CFLAGS) $(KHDR_INCLUDES)
+ -I$(<D) -Iinclude/$(UNAME_M) -I ../rseq -I.. $(EXTRA_CFLAGS) \
+ $(KHDR_INCLUDES)
no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
$(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
@@ -206,7 +209,7 @@ no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
pgste-option = $(call try-run, echo 'int main() { return 0; }' | \
$(CC) -Werror -Wl$(comma)--s390-pgste -x c - -o "$$TMP",-Wl$(comma)--s390-pgste)
-
+LDLIBS += -ldl
LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
# After inclusion, $(OUTPUT) is defined and
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index a54d4d05a058..fac248a43666 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -20,15 +20,7 @@
#include "processor.h"
#include "test_util.h"
-static __thread volatile struct rseq __rseq = {
- .cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
-};
-
-/*
- * Use an arbitrary, bogus signature for configuring rseq, this test does not
- * actually enter an rseq critical section.
- */
-#define RSEQ_SIG 0xdeadbeef
+#include "../rseq/rseq.c"
/*
* Any bug related to task migration is likely to be timing-dependent; perform
@@ -49,12 +41,16 @@ static void guest_code(void)
GUEST_SYNC(0);
}
-static void sys_rseq(int flags)
+/*
+ * We have to perform direct system call for getcpu() because it's
+ * not available until glic 2.29.
+ */
+static void sys_getcpu(unsigned *cpu)
{
int r;
- r = syscall(__NR_rseq, &__rseq, sizeof(__rseq), flags, RSEQ_SIG);
- TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
+ r = syscall(__NR_getcpu, cpu, NULL, NULL);
+ TEST_ASSERT(!r, "getcpu failed, errno = %d (%s)", errno, strerror(errno));
}
static int next_cpu(int cpu)
@@ -101,7 +97,7 @@ static void *migration_worker(void *__rseq_tid)
atomic_inc(&seq_cnt);
/*
- * Ensure the odd count is visible while sched_getcpu() isn't
+ * Ensure the odd count is visible while getcpu() isn't
* stable, i.e. while changing affinity is in-progress.
*/
smp_wmb();
@@ -142,10 +138,10 @@ static void *migration_worker(void *__rseq_tid)
* check completes.
*
* 3. To ensure the read-side makes efficient forward progress,
- * e.g. if sched_getcpu() involves a syscall. Stalling the
- * read-side means the test will spend more time waiting for
- * sched_getcpu() to stabilize and less time trying to hit
- * the timing-dependent bug.
+ * e.g. if getcpu() involves a syscall. Stalling the read-side
+ * means the test will spend more time waiting for getcpu()
+ * to stabilize and less time trying to hit the timing-dependent
+ * bug.
*
* Because any bug in this area is likely to be timing-dependent,
* run with a range of delays at 1us intervals from 1us to 10us
@@ -218,7 +214,9 @@ int main(int argc, char *argv[])
calc_min_max_cpu();
- sys_rseq(0);
+ r = rseq_register_current_thread();
+ TEST_ASSERT(!r, "rseq_register_current_thread failed, errno = %d (%s)",
+ errno, strerror(errno));
/*
* Create and run a dummy VM that immediately exits to userspace via
@@ -238,9 +236,9 @@ int main(int argc, char *argv[])
/*
* Verify rseq's CPU matches sched's CPU. Ensure migration
- * doesn't occur between sched_getcpu() and reading the rseq
- * cpu_id by rereading both if the sequence count changes, or
- * if the count is odd (migration in-progress).
+ * doesn't occur between getcpu() and reading the rseq cpu_id
+ * by rereading both if the sequence count changes, or if the
+ * count is odd (migration in-progress).
*/
do {
/*
@@ -250,13 +248,13 @@ int main(int argc, char *argv[])
snapshot = atomic_read(&seq_cnt) & ~1;
/*
- * Ensure reading sched_getcpu() and rseq.cpu_id
- * complete in a single "no migration" window, i.e. are
- * not reordered across the seq_cnt reads.
+ * Ensure calling getcpu() and reading rseq.cpu_id complete
+ * in a single "no migration" window, i.e. are not reordered
+ * across the seq_cnt reads.
*/
smp_rmb();
- cpu = sched_getcpu();
- rseq_cpu = READ_ONCE(__rseq.cpu_id);
+ sys_getcpu(&cpu);
+ rseq_cpu = rseq_current_cpu_raw();
smp_rmb();
} while (snapshot != atomic_read(&seq_cnt));
@@ -267,9 +265,9 @@ int main(int argc, char *argv[])
/*
* Sanity check that the test was able to enter the guest a reasonable
* number of times, e.g. didn't get stalled too often/long waiting for
- * sched_getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a
- * fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs
- * than migrations given the 1us+ delay in the migration task.
+ * getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a fairly
+ * conservative ratio on x86-64, which can do _more_ KVM_RUNs than
+ * migrations given the 1us+ delay in the migration task.
*/
TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
"Only performed %d KVM_RUNs, task stalled too much?\n", i);
@@ -278,7 +276,7 @@ int main(int argc, char *argv[])
kvm_vm_free(vm);
- sys_rseq(RSEQ_FLAG_UNREGISTER);
+ rseq_unregister_current_thread();
return 0;
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
index 6ec901dab61e..069589c52f41 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
@@ -59,6 +59,7 @@ int main(int argc, char *argv[])
int ret;
union cpuid10_eax eax;
union perf_capabilities host_cap;
+ uint64_t val;
host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES);
host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT);
@@ -91,11 +92,17 @@ int main(int argc, char *argv[])
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format);
ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format);
- /* testcase 3, check invalid LBR format is rejected */
- /* Note, on Arch LBR capable platforms, LBR_FMT in perf capability msr is 0x3f,
- * to avoid the failure, use a true invalid format 0x30 for the test. */
- ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0x30);
- TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
+ /*
+ * Testcase 3, check that an "invalid" LBR format is rejected. Only an
+ * exact match of the host's format (and 0/disabled) is allowed.
+ */
+ for (val = 1; val <= PMU_CAP_LBR_FMT; val++) {
+ if (val == (host_cap.capabilities & PMU_CAP_LBR_FMT))
+ continue;
+
+ ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val);
+ TEST_ASSERT(!ret, "Bad LBR FMT = 0x%lx didn't fail", val);
+ }
printf("Completed perf capability tests.\n");
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/landlock/Makefile b/tools/testing/selftests/landlock/Makefile
index a6959df28eb0..02868ac3bc71 100644
--- a/tools/testing/selftests/landlock/Makefile
+++ b/tools/testing/selftests/landlock/Makefile
@@ -9,10 +9,13 @@ TEST_GEN_PROGS := $(src_test:.c=)
TEST_GEN_PROGS_EXTENDED := true
OVERRIDE_TARGETS := 1
+top_srcdir := ../../../..
include ../lib.mk
+khdr_dir = $(top_srcdir)/usr/include
+
$(OUTPUT)/true: true.c
$(LINK.c) $< $(LDLIBS) -o $@ -static
-$(OUTPUT)/%_test: %_test.c ../kselftest_harness.h common.h
- $(LINK.c) $< $(LDLIBS) -o $@ -lcap
+$(OUTPUT)/%_test: %_test.c $(khdr_dir)/linux/landlock.h ../kselftest_harness.h common.h
+ $(LINK.c) $< $(LDLIBS) -o $@ -lcap -I$(khdr_dir)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 947fc72413e9..d44c72b3abe3 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -40,6 +40,7 @@ ifeq (0,$(MAKELEVEL))
endif
endif
selfdir = $(realpath $(dir $(filter %/lib.mk,$(MAKEFILE_LIST))))
+top_srcdir = $(selfdir)/../../..
# The following are built by lib.mk common compile rules.
# TEST_CUSTOM_PROGS should be used by tests that require
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 892306bdb47d..0e5751af6247 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -38,4 +38,5 @@ ioam6_parser
toeplitz
tun
cmsg_sender
-unix_connect \ No newline at end of file
+unix_connect
+tap \ No newline at end of file
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index e2dfef8b78a7..c0ee2955fe54 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -57,7 +57,7 @@ TEST_GEN_FILES += ipsec
TEST_GEN_FILES += ioam6_parser
TEST_GEN_FILES += gro
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
-TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls tun
+TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls tun tap
TEST_GEN_FILES += toeplitz
TEST_GEN_FILES += cmsg_sender
TEST_GEN_FILES += stress_reuseport_listen
diff --git a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
index a15d21dc035a..56eb83d1a3bd 100755
--- a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
@@ -181,37 +181,43 @@ ping_ipv6()
send_src_ipv4()
{
- $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:4::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:4::2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:4::2-2001:db8:4::fd" \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B "2001:db8:4::2-2001:db8:4::fd" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
@@ -226,13 +232,15 @@ send_flowlabel()
send_src_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:4::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:4::2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:4::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:4::2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
diff --git a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
index a73f52efcb6c..0446db9c6f74 100755
--- a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
@@ -276,37 +276,43 @@ ping_ipv6()
send_src_ipv4()
{
- $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
@@ -321,13 +327,15 @@ send_flowlabel()
send_src_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
index 8fea2c2e0b25..d40183b4eccc 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
@@ -278,37 +278,43 @@ ping_ipv6()
send_src_ipv4()
{
- $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
@@ -323,13 +329,15 @@ send_flowlabel()
send_src_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index e2ea6c126c99..24d4e9cb617e 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -553,6 +553,18 @@ static void set_nonblock(int fd, bool nonblock)
fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
}
+static void shut_wr(int fd)
+{
+ /* Close our write side, ev. give some time
+ * for address notification and/or checking
+ * the current status
+ */
+ if (cfg_wait)
+ usleep(cfg_wait);
+
+ shutdown(fd, SHUT_WR);
+}
+
static int copyfd_io_poll(int infd, int peerfd, int outfd, bool *in_closed_after_out)
{
struct pollfd fds = {
@@ -630,14 +642,7 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd, bool *in_closed_after
/* ... and peer also closed already */
break;
- /* ... but we still receive.
- * Close our write side, ev. give some time
- * for address notification and/or checking
- * the current status
- */
- if (cfg_wait)
- usleep(cfg_wait);
- shutdown(peerfd, SHUT_WR);
+ shut_wr(peerfd);
} else {
if (errno == EINTR)
continue;
@@ -767,7 +772,7 @@ static int copyfd_io_mmap(int infd, int peerfd, int outfd,
if (err)
return err;
- shutdown(peerfd, SHUT_WR);
+ shut_wr(peerfd);
err = do_recvfile(peerfd, outfd);
*in_closed_after_out = true;
@@ -791,6 +796,9 @@ static int copyfd_io_sendfile(int infd, int peerfd, int outfd,
err = do_sendfile(infd, peerfd, size);
if (err)
return err;
+
+ shut_wr(peerfd);
+
err = do_recvfile(peerfd, outfd);
*in_closed_after_out = true;
}
diff --git a/tools/testing/selftests/net/tap.c b/tools/testing/selftests/net/tap.c
new file mode 100644
index 000000000000..247c3b3ac1c9
--- /dev/null
+++ b/tools/testing/selftests/net/tap.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <linux/if_tun.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <linux/virtio_net.h>
+#include <netinet/ip.h>
+#include <netinet/udp.h>
+#include "../kselftest_harness.h"
+
+static const char param_dev_tap_name[] = "xmacvtap0";
+static const char param_dev_dummy_name[] = "xdummy0";
+static unsigned char param_hwaddr_src[] = { 0x00, 0xfe, 0x98, 0x14, 0x22, 0x42 };
+static unsigned char param_hwaddr_dest[] = {
+ 0x00, 0xfe, 0x98, 0x94, 0xd2, 0x43
+};
+
+#define MAX_RTNL_PAYLOAD (2048)
+#define PKT_DATA 0xCB
+#define TEST_PACKET_SZ (sizeof(struct virtio_net_hdr) + ETH_HLEN + ETH_MAX_MTU)
+
+static struct rtattr *rtattr_add(struct nlmsghdr *nh, unsigned short type,
+ unsigned short len)
+{
+ struct rtattr *rta =
+ (struct rtattr *)((uint8_t *)nh + RTA_ALIGN(nh->nlmsg_len));
+ rta->rta_type = type;
+ rta->rta_len = RTA_LENGTH(len);
+ nh->nlmsg_len = RTA_ALIGN(nh->nlmsg_len) + RTA_ALIGN(rta->rta_len);
+ return rta;
+}
+
+static struct rtattr *rtattr_begin(struct nlmsghdr *nh, unsigned short type)
+{
+ return rtattr_add(nh, type, 0);
+}
+
+static void rtattr_end(struct nlmsghdr *nh, struct rtattr *attr)
+{
+ uint8_t *end = (uint8_t *)nh + nh->nlmsg_len;
+
+ attr->rta_len = end - (uint8_t *)attr;
+}
+
+static struct rtattr *rtattr_add_str(struct nlmsghdr *nh, unsigned short type,
+ const char *s)
+{
+ struct rtattr *rta = rtattr_add(nh, type, strlen(s));
+
+ memcpy(RTA_DATA(rta), s, strlen(s));
+ return rta;
+}
+
+static struct rtattr *rtattr_add_strsz(struct nlmsghdr *nh, unsigned short type,
+ const char *s)
+{
+ struct rtattr *rta = rtattr_add(nh, type, strlen(s) + 1);
+
+ strcpy(RTA_DATA(rta), s);
+ return rta;
+}
+
+static struct rtattr *rtattr_add_any(struct nlmsghdr *nh, unsigned short type,
+ const void *arr, size_t len)
+{
+ struct rtattr *rta = rtattr_add(nh, type, len);
+
+ memcpy(RTA_DATA(rta), arr, len);
+ return rta;
+}
+
+static int dev_create(const char *dev, const char *link_type,
+ int (*fill_rtattr)(struct nlmsghdr *nh),
+ int (*fill_info_data)(struct nlmsghdr *nh))
+{
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ unsigned char data[MAX_RTNL_PAYLOAD];
+ } req;
+ struct rtattr *link_info, *info_data;
+ int ret, rtnl;
+
+ rtnl = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE);
+ if (rtnl < 0) {
+ fprintf(stderr, "%s: socket %s\n", __func__, strerror(errno));
+ return 1;
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE;
+ req.nh.nlmsg_type = RTM_NEWLINK;
+
+ req.info.ifi_family = AF_UNSPEC;
+ req.info.ifi_type = 1;
+ req.info.ifi_index = 0;
+ req.info.ifi_flags = IFF_BROADCAST | IFF_UP;
+ req.info.ifi_change = 0xffffffff;
+
+ rtattr_add_str(&req.nh, IFLA_IFNAME, dev);
+
+ if (fill_rtattr) {
+ ret = fill_rtattr(&req.nh);
+ if (ret)
+ return ret;
+ }
+
+ link_info = rtattr_begin(&req.nh, IFLA_LINKINFO);
+
+ rtattr_add_strsz(&req.nh, IFLA_INFO_KIND, link_type);
+
+ if (fill_info_data) {
+ info_data = rtattr_begin(&req.nh, IFLA_INFO_DATA);
+ ret = fill_info_data(&req.nh);
+ if (ret)
+ return ret;
+ rtattr_end(&req.nh, info_data);
+ }
+
+ rtattr_end(&req.nh, link_info);
+
+ ret = send(rtnl, &req, req.nh.nlmsg_len, 0);
+ if (ret < 0)
+ fprintf(stderr, "%s: send %s\n", __func__, strerror(errno));
+ ret = (unsigned int)ret != req.nh.nlmsg_len;
+
+ close(rtnl);
+ return ret;
+}
+
+static int dev_delete(const char *dev)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ unsigned char data[MAX_RTNL_PAYLOAD];
+ } req;
+ int ret, rtnl;
+
+ rtnl = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE);
+ if (rtnl < 0) {
+ fprintf(stderr, "%s: socket %s\n", __func__, strerror(errno));
+ return 1;
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_type = RTM_DELLINK;
+
+ req.info.ifi_family = AF_UNSPEC;
+
+ rtattr_add_str(&req.nh, IFLA_IFNAME, dev);
+
+ ret = send(rtnl, &req, req.nh.nlmsg_len, 0);
+ if (ret < 0)
+ fprintf(stderr, "%s: send %s\n", __func__, strerror(errno));
+
+ ret = (unsigned int)ret != req.nh.nlmsg_len;
+
+ close(rtnl);
+ return ret;
+}
+
+static int macvtap_fill_rtattr(struct nlmsghdr *nh)
+{
+ int ifindex;
+
+ ifindex = if_nametoindex(param_dev_dummy_name);
+ if (ifindex == 0) {
+ fprintf(stderr, "%s: ifindex %s\n", __func__, strerror(errno));
+ return -errno;
+ }
+
+ rtattr_add_any(nh, IFLA_LINK, &ifindex, sizeof(ifindex));
+ rtattr_add_any(nh, IFLA_ADDRESS, param_hwaddr_src, ETH_ALEN);
+
+ return 0;
+}
+
+static int opentap(const char *devname)
+{
+ int ifindex;
+ char buf[256];
+ int fd;
+ struct ifreq ifr;
+
+ ifindex = if_nametoindex(devname);
+ if (ifindex == 0) {
+ fprintf(stderr, "%s: ifindex %s\n", __func__, strerror(errno));
+ return -errno;
+ }
+
+ sprintf(buf, "/dev/tap%d", ifindex);
+ fd = open(buf, O_RDWR | O_NONBLOCK);
+ if (fd < 0) {
+ fprintf(stderr, "%s: open %s\n", __func__, strerror(errno));
+ return -errno;
+ }
+
+ memset(&ifr, 0, sizeof(ifr));
+ strcpy(ifr.ifr_name, devname);
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR | IFF_MULTI_QUEUE;
+ if (ioctl(fd, TUNSETIFF, &ifr, sizeof(ifr)) < 0)
+ return -errno;
+ return fd;
+}
+
+size_t build_eth(uint8_t *buf, uint16_t proto)
+{
+ struct ethhdr *eth = (struct ethhdr *)buf;
+
+ eth->h_proto = htons(proto);
+ memcpy(eth->h_source, param_hwaddr_src, ETH_ALEN);
+ memcpy(eth->h_dest, param_hwaddr_dest, ETH_ALEN);
+
+ return ETH_HLEN;
+}
+
+static uint32_t add_csum(const uint8_t *buf, int len)
+{
+ uint32_t sum = 0;
+ uint16_t *sbuf = (uint16_t *)buf;
+
+ while (len > 1) {
+ sum += *sbuf++;
+ len -= 2;
+ }
+
+ if (len)
+ sum += *(uint8_t *)sbuf;
+
+ return sum;
+}
+
+static uint16_t finish_ip_csum(uint32_t sum)
+{
+ uint16_t lo = sum & 0xffff;
+ uint16_t hi = sum >> 16;
+
+ return ~(lo + hi);
+
+}
+
+static uint16_t build_ip_csum(const uint8_t *buf, int len,
+ uint32_t sum)
+{
+ sum += add_csum(buf, len);
+ return finish_ip_csum(sum);
+}
+
+static int build_ipv4_header(uint8_t *buf, int payload_len)
+{
+ struct iphdr *iph = (struct iphdr *)buf;
+
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->ttl = 8;
+ iph->tot_len =
+ htons(sizeof(*iph) + sizeof(struct udphdr) + payload_len);
+ iph->id = htons(1337);
+ iph->protocol = IPPROTO_UDP;
+ iph->saddr = htonl((172 << 24) | (17 << 16) | 2);
+ iph->daddr = htonl((172 << 24) | (17 << 16) | 1);
+ iph->check = build_ip_csum(buf, iph->ihl << 2, 0);
+
+ return iph->ihl << 2;
+}
+
+static int build_udp_packet(uint8_t *buf, int payload_len, bool csum_off)
+{
+ const int ip4alen = sizeof(uint32_t);
+ struct udphdr *udph = (struct udphdr *)buf;
+ int len = sizeof(*udph) + payload_len;
+ uint32_t sum = 0;
+
+ udph->source = htons(22);
+ udph->dest = htons(58822);
+ udph->len = htons(len);
+
+ memset(buf + sizeof(struct udphdr), PKT_DATA, payload_len);
+
+ sum = add_csum(buf - 2 * ip4alen, 2 * ip4alen);
+ sum += htons(IPPROTO_UDP) + udph->len;
+
+ if (!csum_off)
+ sum += add_csum(buf, len);
+
+ udph->check = finish_ip_csum(sum);
+
+ return sizeof(*udph) + payload_len;
+}
+
+size_t build_test_packet_valid_udp_gso(uint8_t *buf, size_t payload_len)
+{
+ uint8_t *cur = buf;
+ struct virtio_net_hdr *vh = (struct virtio_net_hdr *)buf;
+
+ vh->hdr_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct udphdr);
+ vh->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ vh->csum_start = ETH_HLEN + sizeof(struct iphdr);
+ vh->csum_offset = __builtin_offsetof(struct udphdr, check);
+ vh->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ vh->gso_size = ETH_DATA_LEN - sizeof(struct iphdr);
+ cur += sizeof(*vh);
+
+ cur += build_eth(cur, ETH_P_IP);
+ cur += build_ipv4_header(cur, payload_len);
+ cur += build_udp_packet(cur, payload_len, true);
+
+ return cur - buf;
+}
+
+size_t build_test_packet_valid_udp_csum(uint8_t *buf, size_t payload_len)
+{
+ uint8_t *cur = buf;
+ struct virtio_net_hdr *vh = (struct virtio_net_hdr *)buf;
+
+ vh->flags = VIRTIO_NET_HDR_F_DATA_VALID;
+ vh->gso_type = VIRTIO_NET_HDR_GSO_NONE;
+ cur += sizeof(*vh);
+
+ cur += build_eth(cur, ETH_P_IP);
+ cur += build_ipv4_header(cur, payload_len);
+ cur += build_udp_packet(cur, payload_len, false);
+
+ return cur - buf;
+}
+
+size_t build_test_packet_crash_tap_invalid_eth_proto(uint8_t *buf,
+ size_t payload_len)
+{
+ uint8_t *cur = buf;
+ struct virtio_net_hdr *vh = (struct virtio_net_hdr *)buf;
+
+ vh->hdr_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct udphdr);
+ vh->flags = 0;
+ vh->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ vh->gso_size = ETH_DATA_LEN - sizeof(struct iphdr);
+ cur += sizeof(*vh);
+
+ cur += build_eth(cur, 0);
+ cur += sizeof(struct iphdr) + sizeof(struct udphdr);
+ cur += build_ipv4_header(cur, payload_len);
+ cur += build_udp_packet(cur, payload_len, true);
+ cur += payload_len;
+
+ return cur - buf;
+}
+
+FIXTURE(tap)
+{
+ int fd;
+};
+
+FIXTURE_SETUP(tap)
+{
+ int ret;
+
+ ret = dev_create(param_dev_dummy_name, "dummy", NULL, NULL);
+ EXPECT_EQ(ret, 0);
+
+ ret = dev_create(param_dev_tap_name, "macvtap", macvtap_fill_rtattr,
+ NULL);
+ EXPECT_EQ(ret, 0);
+
+ self->fd = opentap(param_dev_tap_name);
+ ASSERT_GE(self->fd, 0);
+}
+
+FIXTURE_TEARDOWN(tap)
+{
+ int ret;
+
+ if (self->fd != -1)
+ close(self->fd);
+
+ ret = dev_delete(param_dev_tap_name);
+ EXPECT_EQ(ret, 0);
+
+ ret = dev_delete(param_dev_dummy_name);
+ EXPECT_EQ(ret, 0);
+}
+
+TEST_F(tap, test_packet_valid_udp_gso)
+{
+ uint8_t pkt[TEST_PACKET_SZ];
+ size_t off;
+ int ret;
+
+ memset(pkt, 0, sizeof(pkt));
+ off = build_test_packet_valid_udp_gso(pkt, 1021);
+ ret = write(self->fd, pkt, off);
+ ASSERT_EQ(ret, off);
+}
+
+TEST_F(tap, test_packet_valid_udp_csum)
+{
+ uint8_t pkt[TEST_PACKET_SZ];
+ size_t off;
+ int ret;
+
+ memset(pkt, 0, sizeof(pkt));
+ off = build_test_packet_valid_udp_csum(pkt, 1024);
+ ret = write(self->fd, pkt, off);
+ ASSERT_EQ(ret, off);
+}
+
+TEST_F(tap, test_packet_crash_tap_invalid_eth_proto)
+{
+ uint8_t pkt[TEST_PACKET_SZ];
+ size_t off;
+ int ret;
+
+ memset(pkt, 0, sizeof(pkt));
+ off = build_test_packet_crash_tap_invalid_eth_proto(pkt, 1024);
+ ret = write(self->fd, pkt, off);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EINVAL);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
index d4ffebb989f8..7060bae04ec8 100755
--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
@@ -14,13 +14,17 @@
# nft_flowtable.sh -o8000 -l1500 -r2000
#
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+nsr1="nsr1-$sfx"
+nsr2="nsr2-$sfx"
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
ret=0
-ns1in=""
-ns2in=""
+nsin=""
ns1out=""
ns2out=""
@@ -36,21 +40,19 @@ checktool (){
checktool "nft --version" "run test without nft tool"
checktool "ip -Version" "run test without ip tool"
checktool "which nc" "run test without nc (netcat)"
-checktool "ip netns add nsr1" "create net namespace"
+checktool "ip netns add $nsr1" "create net namespace $nsr1"
-ip netns add ns1
-ip netns add ns2
-
-ip netns add nsr2
+ip netns add $ns1
+ip netns add $ns2
+ip netns add $nsr2
cleanup() {
- for i in 1 2; do
- ip netns del ns$i
- ip netns del nsr$i
- done
+ ip netns del $ns1
+ ip netns del $ns2
+ ip netns del $nsr1
+ ip netns del $nsr2
- rm -f "$ns1in" "$ns1out"
- rm -f "$ns2in" "$ns2out"
+ rm -f "$nsin" "$ns1out" "$ns2out"
[ $log_netns -eq 0 ] && sysctl -q net.netfilter.nf_log_all_netns=$log_netns
}
@@ -59,22 +61,21 @@ trap cleanup EXIT
sysctl -q net.netfilter.nf_log_all_netns=1
-ip link add veth0 netns nsr1 type veth peer name eth0 netns ns1
-ip link add veth1 netns nsr1 type veth peer name veth0 netns nsr2
+ip link add veth0 netns $nsr1 type veth peer name eth0 netns $ns1
+ip link add veth1 netns $nsr1 type veth peer name veth0 netns $nsr2
-ip link add veth1 netns nsr2 type veth peer name eth0 netns ns2
+ip link add veth1 netns $nsr2 type veth peer name eth0 netns $ns2
for dev in lo veth0 veth1; do
- for i in 1 2; do
- ip -net nsr$i link set $dev up
- done
+ ip -net $nsr1 link set $dev up
+ ip -net $nsr2 link set $dev up
done
-ip -net nsr1 addr add 10.0.1.1/24 dev veth0
-ip -net nsr1 addr add dead:1::1/64 dev veth0
+ip -net $nsr1 addr add 10.0.1.1/24 dev veth0
+ip -net $nsr1 addr add dead:1::1/64 dev veth0
-ip -net nsr2 addr add 10.0.2.1/24 dev veth1
-ip -net nsr2 addr add dead:2::1/64 dev veth1
+ip -net $nsr2 addr add 10.0.2.1/24 dev veth1
+ip -net $nsr2 addr add dead:2::1/64 dev veth1
# set different MTUs so we need to push packets coming from ns1 (large MTU)
# to ns2 (smaller MTU) to stack either to perform fragmentation (ip_no_pmtu_disc=1),
@@ -106,85 +107,76 @@ do
esac
done
-if ! ip -net nsr1 link set veth0 mtu $omtu; then
+if ! ip -net $nsr1 link set veth0 mtu $omtu; then
exit 1
fi
-ip -net ns1 link set eth0 mtu $omtu
+ip -net $ns1 link set eth0 mtu $omtu
-if ! ip -net nsr2 link set veth1 mtu $rmtu; then
+if ! ip -net $nsr2 link set veth1 mtu $rmtu; then
exit 1
fi
-ip -net ns2 link set eth0 mtu $rmtu
+ip -net $ns2 link set eth0 mtu $rmtu
# transfer-net between nsr1 and nsr2.
# these addresses are not used for connections.
-ip -net nsr1 addr add 192.168.10.1/24 dev veth1
-ip -net nsr1 addr add fee1:2::1/64 dev veth1
-
-ip -net nsr2 addr add 192.168.10.2/24 dev veth0
-ip -net nsr2 addr add fee1:2::2/64 dev veth0
-
-for i in 1 2; do
- ip netns exec nsr$i sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
- ip netns exec nsr$i sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
-
- ip -net ns$i link set lo up
- ip -net ns$i link set eth0 up
- ip -net ns$i addr add 10.0.$i.99/24 dev eth0
- ip -net ns$i route add default via 10.0.$i.1
- ip -net ns$i addr add dead:$i::99/64 dev eth0
- ip -net ns$i route add default via dead:$i::1
- if ! ip netns exec ns$i sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null; then
+ip -net $nsr1 addr add 192.168.10.1/24 dev veth1
+ip -net $nsr1 addr add fee1:2::1/64 dev veth1
+
+ip -net $nsr2 addr add 192.168.10.2/24 dev veth0
+ip -net $nsr2 addr add fee1:2::2/64 dev veth0
+
+for i in 0 1; do
+ ip netns exec $nsr1 sysctl net.ipv4.conf.veth$i.forwarding=1 > /dev/null
+ ip netns exec $nsr2 sysctl net.ipv4.conf.veth$i.forwarding=1 > /dev/null
+done
+
+for ns in $ns1 $ns2;do
+ ip -net $ns link set lo up
+ ip -net $ns link set eth0 up
+
+ if ! ip netns exec $ns sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null; then
echo "ERROR: Check Originator/Responder values (problem during address addition)"
exit 1
fi
-
# don't set ip DF bit for first two tests
- ip netns exec ns$i sysctl net.ipv4.ip_no_pmtu_disc=1 > /dev/null
+ ip netns exec $ns sysctl net.ipv4.ip_no_pmtu_disc=1 > /dev/null
done
-ip -net nsr1 route add default via 192.168.10.2
-ip -net nsr2 route add default via 192.168.10.1
+ip -net $ns1 addr add 10.0.1.99/24 dev eth0
+ip -net $ns2 addr add 10.0.2.99/24 dev eth0
+ip -net $ns1 route add default via 10.0.1.1
+ip -net $ns2 route add default via 10.0.2.1
+ip -net $ns1 addr add dead:1::99/64 dev eth0
+ip -net $ns2 addr add dead:2::99/64 dev eth0
+ip -net $ns1 route add default via dead:1::1
+ip -net $ns2 route add default via dead:2::1
+
+ip -net $nsr1 route add default via 192.168.10.2
+ip -net $nsr2 route add default via 192.168.10.1
-ip netns exec nsr1 nft -f - <<EOF
+ip netns exec $nsr1 nft -f - <<EOF
table inet filter {
flowtable f1 {
hook ingress priority 0
devices = { veth0, veth1 }
}
+ counter routed_orig { }
+ counter routed_repl { }
+
chain forward {
type filter hook forward priority 0; policy drop;
# flow offloaded? Tag ct with mark 1, so we can detect when it fails.
- meta oif "veth1" tcp dport 12345 flow offload @f1 counter
-
- # use packet size to trigger 'should be offloaded by now'.
- # otherwise, if 'flow offload' expression never offloads, the
- # test will pass.
- tcp dport 12345 meta length gt 200 ct mark set 1 counter
+ meta oif "veth1" tcp dport 12345 ct mark set 1 flow add @f1 counter name routed_orig accept
- # this turns off flow offloading internally, so expect packets again
- tcp flags fin,rst ct mark set 0 accept
-
- # this allows large packets from responder, we need this as long
- # as PMTUd is off.
- # This rule is deleted for the last test, when we expect PMTUd
- # to kick in and ensure all packets meet mtu requirements.
- meta length gt $lmtu accept comment something-to-grep-for
-
- # next line blocks connection w.o. working offload.
- # we only do this for reverse dir, because we expect packets to
- # enter slow path due to MTU mismatch of veth0 and veth1.
- tcp sport 12345 ct mark 1 counter log prefix "mark failure " drop
+ # count packets supposedly offloaded as per direction.
+ ct mark 1 counter name ct direction map { original : routed_orig, reply : routed_repl } accept
ct state established,related accept
- # for packets that we can't offload yet, i.e. SYN (any ct that is not confirmed)
- meta length lt 200 oif "veth1" tcp dport 12345 counter accept
-
meta nfproto ipv4 meta l4proto icmp accept
meta nfproto ipv6 meta l4proto icmpv6 accept
}
@@ -197,30 +189,30 @@ if [ $? -ne 0 ]; then
fi
# test basic connectivity
-if ! ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
- echo "ERROR: ns1 cannot reach ns2" 1>&2
+if ! ip netns exec $ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
+ echo "ERROR: $ns1 cannot reach ns2" 1>&2
exit 1
fi
-if ! ip netns exec ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then
- echo "ERROR: ns2 cannot reach ns1" 1>&2
+if ! ip netns exec $ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then
+ echo "ERROR: $ns2 cannot reach $ns1" 1>&2
exit 1
fi
if [ $ret -eq 0 ];then
- echo "PASS: netns routing/connectivity: ns1 can reach ns2"
+ echo "PASS: netns routing/connectivity: $ns1 can reach $ns2"
fi
-ns1in=$(mktemp)
+nsin=$(mktemp)
ns1out=$(mktemp)
-ns2in=$(mktemp)
ns2out=$(mktemp)
make_file()
{
name=$1
- SIZE=$((RANDOM % (1024 * 8)))
+ SIZE=$((RANDOM % (1024 * 128)))
+ SIZE=$((SIZE + (1024 * 8)))
TSIZE=$((SIZE * 1024))
dd if=/dev/urandom of="$name" bs=1024 count=$SIZE 2> /dev/null
@@ -231,6 +223,38 @@ make_file()
dd if=/dev/urandom conf=notrunc of="$name" bs=1 count=$SIZE 2> /dev/null
}
+check_counters()
+{
+ local what=$1
+ local ok=1
+
+ local orig=$(ip netns exec $nsr1 nft reset counter inet filter routed_orig | grep packets)
+ local repl=$(ip netns exec $nsr1 nft reset counter inet filter routed_repl | grep packets)
+
+ local orig_cnt=${orig#*bytes}
+ local repl_cnt=${repl#*bytes}
+
+ local fs=$(du -sb $nsin)
+ local max_orig=${fs%%/*}
+ local max_repl=$((max_orig/4))
+
+ if [ $orig_cnt -gt $max_orig ];then
+ echo "FAIL: $what: original counter $orig_cnt exceeds expected value $max_orig" 1>&2
+ ret=1
+ ok=0
+ fi
+
+ if [ $repl_cnt -gt $max_repl ];then
+ echo "FAIL: $what: reply counter $repl_cnt exceeds expected value $max_repl" 1>&2
+ ret=1
+ ok=0
+ fi
+
+ if [ $ok -eq 1 ]; then
+ echo "PASS: $what"
+ fi
+}
+
check_transfer()
{
in=$1
@@ -255,11 +279,11 @@ test_tcp_forwarding_ip()
local dstport=$4
local lret=0
- ip netns exec $nsb nc -w 5 -l -p 12345 < "$ns2in" > "$ns2out" &
+ ip netns exec $nsb nc -w 5 -l -p 12345 < "$nsin" > "$ns2out" &
lpid=$!
sleep 1
- ip netns exec $nsa nc -w 4 "$dstip" "$dstport" < "$ns1in" > "$ns1out" &
+ ip netns exec $nsa nc -w 4 "$dstip" "$dstport" < "$nsin" > "$ns1out" &
cpid=$!
sleep 3
@@ -274,11 +298,11 @@ test_tcp_forwarding_ip()
wait
- if ! check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"; then
+ if ! check_transfer "$nsin" "$ns2out" "ns1 -> ns2"; then
lret=1
fi
- if ! check_transfer "$ns2in" "$ns1out" "ns1 <- ns2"; then
+ if ! check_transfer "$nsin" "$ns1out" "ns1 <- ns2"; then
lret=1
fi
@@ -295,41 +319,59 @@ test_tcp_forwarding()
test_tcp_forwarding_nat()
{
local lret
+ local pmtu
test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
lret=$?
+ pmtu=$3
+ what=$4
+
if [ $lret -eq 0 ] ; then
+ if [ $pmtu -eq 1 ] ;then
+ check_counters "flow offload for ns1/ns2 with masquerade and pmtu discovery $what"
+ else
+ echo "PASS: flow offload for ns1/ns2 with masquerade $what"
+ fi
+
test_tcp_forwarding_ip "$1" "$2" 10.6.6.6 1666
lret=$?
+ if [ $pmtu -eq 1 ] ;then
+ check_counters "flow offload for ns1/ns2 with dnat and pmtu discovery $what"
+ elif [ $lret -eq 0 ] ; then
+ echo "PASS: flow offload for ns1/ns2 with dnat $what"
+ fi
fi
return $lret
}
-make_file "$ns1in"
-make_file "$ns2in"
+make_file "$nsin"
# First test:
# No PMTU discovery, nsr1 is expected to fragment packets from ns1 to ns2 as needed.
-if test_tcp_forwarding ns1 ns2; then
+# Due to MTU mismatch in both directions, all packets (except small packets like pure
+# acks) have to be handled by normal forwarding path. Therefore, packet counters
+# are not checked.
+if test_tcp_forwarding $ns1 $ns2; then
echo "PASS: flow offloaded for ns1/ns2"
else
echo "FAIL: flow offload for ns1/ns2:" 1>&2
- ip netns exec nsr1 nft list ruleset
+ ip netns exec $nsr1 nft list ruleset
ret=1
fi
# delete default route, i.e. ns2 won't be able to reach ns1 and
# will depend on ns1 being masqueraded in nsr1.
# expect ns1 has nsr1 address.
-ip -net ns2 route del default via 10.0.2.1
-ip -net ns2 route del default via dead:2::1
-ip -net ns2 route add 192.168.10.1 via 10.0.2.1
+ip -net $ns2 route del default via 10.0.2.1
+ip -net $ns2 route del default via dead:2::1
+ip -net $ns2 route add 192.168.10.1 via 10.0.2.1
# Second test:
-# Same, but with NAT enabled.
-ip netns exec nsr1 nft -f - <<EOF
+# Same, but with NAT enabled. Same as in first test: we expect normal forward path
+# to handle most packets.
+ip netns exec $nsr1 nft -f - <<EOF
table ip nat {
chain prerouting {
type nat hook prerouting priority 0; policy accept;
@@ -343,47 +385,45 @@ table ip nat {
}
EOF
-if test_tcp_forwarding_nat ns1 ns2; then
- echo "PASS: flow offloaded for ns1/ns2 with NAT"
-else
+if ! test_tcp_forwarding_nat $ns1 $ns2 0 ""; then
echo "FAIL: flow offload for ns1/ns2 with NAT" 1>&2
- ip netns exec nsr1 nft list ruleset
+ ip netns exec $nsr1 nft list ruleset
ret=1
fi
# Third test:
-# Same as second test, but with PMTU discovery enabled.
-handle=$(ip netns exec nsr1 nft -a list table inet filter | grep something-to-grep-for | cut -d \# -f 2)
-
-if ! ip netns exec nsr1 nft delete rule inet filter forward $handle; then
- echo "FAIL: Could not delete large-packet accept rule"
- exit 1
-fi
-
-ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
-ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
-
-if test_tcp_forwarding_nat ns1 ns2; then
- echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery"
-else
+# Same as second test, but with PMTU discovery enabled. This
+# means that we expect the fastpath to handle packets as soon
+# as the endpoints adjust the packet size.
+ip netns exec $ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
+ip netns exec $ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
+
+# reset counters.
+# With pmtu in-place we'll also check that nft counters
+# are lower than file size and packets were forwarded via flowtable layer.
+# For earlier tests (large mtus), packets cannot be handled via flowtable
+# (except pure acks and other small packets).
+ip netns exec $nsr1 nft reset counters table inet filter >/dev/null
+
+if ! test_tcp_forwarding_nat $ns1 $ns2 1 ""; then
echo "FAIL: flow offload for ns1/ns2 with NAT and pmtu discovery" 1>&2
- ip netns exec nsr1 nft list ruleset
+ ip netns exec $nsr1 nft list ruleset
fi
# Another test:
# Add bridge interface br0 to Router1, with NAT enabled.
-ip -net nsr1 link add name br0 type bridge
-ip -net nsr1 addr flush dev veth0
-ip -net nsr1 link set up dev veth0
-ip -net nsr1 link set veth0 master br0
-ip -net nsr1 addr add 10.0.1.1/24 dev br0
-ip -net nsr1 addr add dead:1::1/64 dev br0
-ip -net nsr1 link set up dev br0
+ip -net $nsr1 link add name br0 type bridge
+ip -net $nsr1 addr flush dev veth0
+ip -net $nsr1 link set up dev veth0
+ip -net $nsr1 link set veth0 master br0
+ip -net $nsr1 addr add 10.0.1.1/24 dev br0
+ip -net $nsr1 addr add dead:1::1/64 dev br0
+ip -net $nsr1 link set up dev br0
-ip netns exec nsr1 sysctl net.ipv4.conf.br0.forwarding=1 > /dev/null
+ip netns exec $nsr1 sysctl net.ipv4.conf.br0.forwarding=1 > /dev/null
# br0 with NAT enabled.
-ip netns exec nsr1 nft -f - <<EOF
+ip netns exec $nsr1 nft -f - <<EOF
flush table ip nat
table ip nat {
chain prerouting {
@@ -398,59 +438,56 @@ table ip nat {
}
EOF
-if test_tcp_forwarding_nat ns1 ns2; then
- echo "PASS: flow offloaded for ns1/ns2 with bridge NAT"
-else
+if ! test_tcp_forwarding_nat $ns1 $ns2 1 "on bridge"; then
echo "FAIL: flow offload for ns1/ns2 with bridge NAT" 1>&2
- ip netns exec nsr1 nft list ruleset
+ ip netns exec $nsr1 nft list ruleset
ret=1
fi
+
# Another test:
# Add bridge interface br0 to Router1, with NAT and VLAN.
-ip -net nsr1 link set veth0 nomaster
-ip -net nsr1 link set down dev veth0
-ip -net nsr1 link add link veth0 name veth0.10 type vlan id 10
-ip -net nsr1 link set up dev veth0
-ip -net nsr1 link set up dev veth0.10
-ip -net nsr1 link set veth0.10 master br0
-
-ip -net ns1 addr flush dev eth0
-ip -net ns1 link add link eth0 name eth0.10 type vlan id 10
-ip -net ns1 link set eth0 up
-ip -net ns1 link set eth0.10 up
-ip -net ns1 addr add 10.0.1.99/24 dev eth0.10
-ip -net ns1 route add default via 10.0.1.1
-ip -net ns1 addr add dead:1::99/64 dev eth0.10
-
-if test_tcp_forwarding_nat ns1 ns2; then
- echo "PASS: flow offloaded for ns1/ns2 with bridge NAT and VLAN"
-else
+ip -net $nsr1 link set veth0 nomaster
+ip -net $nsr1 link set down dev veth0
+ip -net $nsr1 link add link veth0 name veth0.10 type vlan id 10
+ip -net $nsr1 link set up dev veth0
+ip -net $nsr1 link set up dev veth0.10
+ip -net $nsr1 link set veth0.10 master br0
+
+ip -net $ns1 addr flush dev eth0
+ip -net $ns1 link add link eth0 name eth0.10 type vlan id 10
+ip -net $ns1 link set eth0 up
+ip -net $ns1 link set eth0.10 up
+ip -net $ns1 addr add 10.0.1.99/24 dev eth0.10
+ip -net $ns1 route add default via 10.0.1.1
+ip -net $ns1 addr add dead:1::99/64 dev eth0.10
+
+if ! test_tcp_forwarding_nat $ns1 $ns2 1 "bridge and VLAN"; then
echo "FAIL: flow offload for ns1/ns2 with bridge NAT and VLAN" 1>&2
- ip netns exec nsr1 nft list ruleset
+ ip netns exec $nsr1 nft list ruleset
ret=1
fi
# restore test topology (remove bridge and VLAN)
-ip -net nsr1 link set veth0 nomaster
-ip -net nsr1 link set veth0 down
-ip -net nsr1 link set veth0.10 down
-ip -net nsr1 link delete veth0.10 type vlan
-ip -net nsr1 link delete br0 type bridge
-ip -net ns1 addr flush dev eth0.10
-ip -net ns1 link set eth0.10 down
-ip -net ns1 link set eth0 down
-ip -net ns1 link delete eth0.10 type vlan
+ip -net $nsr1 link set veth0 nomaster
+ip -net $nsr1 link set veth0 down
+ip -net $nsr1 link set veth0.10 down
+ip -net $nsr1 link delete veth0.10 type vlan
+ip -net $nsr1 link delete br0 type bridge
+ip -net $ns1 addr flush dev eth0.10
+ip -net $ns1 link set eth0.10 down
+ip -net $ns1 link set eth0 down
+ip -net $ns1 link delete eth0.10 type vlan
# restore address in ns1 and nsr1
-ip -net ns1 link set eth0 up
-ip -net ns1 addr add 10.0.1.99/24 dev eth0
-ip -net ns1 route add default via 10.0.1.1
-ip -net ns1 addr add dead:1::99/64 dev eth0
-ip -net ns1 route add default via dead:1::1
-ip -net nsr1 addr add 10.0.1.1/24 dev veth0
-ip -net nsr1 addr add dead:1::1/64 dev veth0
-ip -net nsr1 link set up dev veth0
+ip -net $ns1 link set eth0 up
+ip -net $ns1 addr add 10.0.1.99/24 dev eth0
+ip -net $ns1 route add default via 10.0.1.1
+ip -net $ns1 addr add dead:1::99/64 dev eth0
+ip -net $ns1 route add default via dead:1::1
+ip -net $nsr1 addr add 10.0.1.1/24 dev veth0
+ip -net $nsr1 addr add dead:1::1/64 dev veth0
+ip -net $nsr1 link set up dev veth0
KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1)
KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1)
@@ -480,23 +517,23 @@ do_esp() {
}
-do_esp nsr1 192.168.10.1 192.168.10.2 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2
+do_esp $nsr1 192.168.10.1 192.168.10.2 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2
-do_esp nsr2 192.168.10.2 192.168.10.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1
+do_esp $nsr2 192.168.10.2 192.168.10.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1
-ip netns exec nsr1 nft delete table ip nat
+ip netns exec $nsr1 nft delete table ip nat
# restore default routes
-ip -net ns2 route del 192.168.10.1 via 10.0.2.1
-ip -net ns2 route add default via 10.0.2.1
-ip -net ns2 route add default via dead:2::1
+ip -net $ns2 route del 192.168.10.1 via 10.0.2.1
+ip -net $ns2 route add default via 10.0.2.1
+ip -net $ns2 route add default via dead:2::1
-if test_tcp_forwarding ns1 ns2; then
- echo "PASS: ipsec tunnel mode for ns1/ns2"
+if test_tcp_forwarding $ns1 $ns2; then
+ check_counters "ipsec tunnel mode for ns1/ns2"
else
echo "FAIL: ipsec tunnel mode for ns1/ns2"
- ip netns exec nsr1 nft list ruleset 1>&2
- ip netns exec nsr1 cat /proc/net/xfrm_stat 1>&2
+ ip netns exec $nsr1 nft list ruleset 1>&2
+ ip netns exec $nsr1 cat /proc/net/xfrm_stat 1>&2
fi
exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh b/tools/testing/selftests/netfilter/nft_trans_stress.sh
index f1affd12c4b1..a7f62ad4f661 100755
--- a/tools/testing/selftests/netfilter/nft_trans_stress.sh
+++ b/tools/testing/selftests/netfilter/nft_trans_stress.sh
@@ -9,8 +9,27 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
-testns=testns1
+testns=testns-$(mktemp -u "XXXXXXXX")
+
tables="foo bar baz quux"
+global_ret=0
+eret=0
+lret=0
+
+check_result()
+{
+ local r=$1
+ local OK="PASS"
+
+ if [ $r -ne 0 ] ;then
+ OK="FAIL"
+ global_ret=$r
+ fi
+
+ echo "$OK: nft $2 test returned $r"
+
+ eret=0
+}
nft --version > /dev/null 2>&1
if [ $? -ne 0 ];then
@@ -59,16 +78,66 @@ done)
sleep 1
+ip netns exec "$testns" nft -f "$tmp"
for i in $(seq 1 10) ; do ip netns exec "$testns" nft -f "$tmp" & done
for table in $tables;do
- randsleep=$((RANDOM%10))
+ randsleep=$((RANDOM%2))
sleep $randsleep
- ip netns exec "$testns" nft delete table inet $table 2>/dev/null
+ ip netns exec "$testns" nft delete table inet $table
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ eret=$lret
+ fi
done
-randsleep=$((RANDOM%10))
-sleep $randsleep
+check_result $eret "add/delete"
+
+for i in $(seq 1 10) ; do
+ (echo "flush ruleset"; cat "$tmp") | ip netns exec "$testns" nft -f /dev/stdin
+
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ eret=$lret
+ fi
+done
+
+check_result $eret "reload"
+
+for i in $(seq 1 10) ; do
+ (echo "flush ruleset"; cat "$tmp"
+ echo "insert rule inet foo INPUT meta nftrace set 1"
+ echo "insert rule inet foo OUTPUT meta nftrace set 1"
+ ) | ip netns exec "$testns" nft -f /dev/stdin
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ eret=$lret
+ fi
+
+ (echo "flush ruleset"; cat "$tmp"
+ ) | ip netns exec "$testns" nft -f /dev/stdin
+
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ eret=$lret
+ fi
+done
+
+check_result $eret "add/delete with nftrace enabled"
+
+echo "insert rule inet foo INPUT meta nftrace set 1" >> $tmp
+echo "insert rule inet foo OUTPUT meta nftrace set 1" >> $tmp
+
+for i in $(seq 1 10) ; do
+ (echo "flush ruleset"; cat "$tmp") | ip netns exec "$testns" nft -f /dev/stdin
+
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ eret=1
+ fi
+done
+
+check_result $lret "add/delete with nftrace enabled"
pkill -9 ping
@@ -76,3 +145,5 @@ wait
rm -f "$tmp"
ip netns del "$testns"
+
+exit $global_ret
diff --git a/tools/testing/selftests/powerpc/include/basic_asm.h b/tools/testing/selftests/powerpc/include/basic_asm.h
index 886dc026fe7a..26cde8ea1f49 100644
--- a/tools/testing/selftests/powerpc/include/basic_asm.h
+++ b/tools/testing/selftests/powerpc/include/basic_asm.h
@@ -5,6 +5,16 @@
#include <ppc-asm.h>
#include <asm/unistd.h>
+#ifdef __powerpc64__
+#define PPC_LL ld
+#define PPC_STL std
+#define PPC_STLU stdu
+#else
+#define PPC_LL lwz
+#define PPC_STL stw
+#define PPC_STLU stwu
+#endif
+
#define LOAD_REG_IMMEDIATE(reg, expr) \
lis reg, (expr)@highest; \
ori reg, reg, (expr)@higher; \
@@ -14,16 +24,20 @@
/*
* Note: These macros assume that variables being stored on the stack are
- * doublewords, while this is usually the case it may not always be the
+ * sizeof(long), while this is usually the case it may not always be the
* case for each use case.
*/
+#ifdef __powerpc64__
+
+// ABIv2
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define STACK_FRAME_MIN_SIZE 32
#define STACK_FRAME_TOC_POS 24
#define __STACK_FRAME_PARAM(_param) (32 + ((_param)*8))
#define __STACK_FRAME_LOCAL(_num_params, _var_num) \
((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*8))
-#else
+
+#else // ABIv1 below
#define STACK_FRAME_MIN_SIZE 112
#define STACK_FRAME_TOC_POS 40
#define __STACK_FRAME_PARAM(i) (48 + ((i)*8))
@@ -34,7 +48,24 @@
*/
#define __STACK_FRAME_LOCAL(_num_params, _var_num) \
(112 + ((_var_num)*8))
-#endif
+
+
+#endif // ABIv2
+
+// Common 64-bit
+#define STACK_FRAME_LR_POS 16
+#define STACK_FRAME_CR_POS 8
+
+#else // 32-bit below
+
+#define STACK_FRAME_MIN_SIZE 16
+#define STACK_FRAME_LR_POS 4
+
+#define __STACK_FRAME_PARAM(_param) (STACK_FRAME_MIN_SIZE + ((_param)*4))
+#define __STACK_FRAME_LOCAL(_num_params, _var_num) \
+ ((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*4))
+
+#endif // __powerpc64__
/* Parameter x saved to the stack */
#define STACK_FRAME_PARAM(var) __STACK_FRAME_PARAM(var)
@@ -42,8 +73,6 @@
/* Local variable x saved to the stack after x parameters */
#define STACK_FRAME_LOCAL(num_params, var) \
__STACK_FRAME_LOCAL(num_params, var)
-#define STACK_FRAME_LR_POS 16
-#define STACK_FRAME_CR_POS 8
/*
* It is very important to note here that _extra is the extra amount of
@@ -56,19 +85,21 @@
* preprocessed incorrectly, hence r0.
*/
#define PUSH_BASIC_STACK(_extra) \
- mflr r0; \
- std r0, STACK_FRAME_LR_POS(%r1); \
- stdu %r1, -(_extra + STACK_FRAME_MIN_SIZE)(%r1); \
- mfcr r0; \
- stw r0, STACK_FRAME_CR_POS(%r1); \
- std %r2, STACK_FRAME_TOC_POS(%r1);
+ mflr r0; \
+ PPC_STL r0, STACK_FRAME_LR_POS(%r1); \
+ PPC_STLU %r1, -(((_extra + 15) & ~15) + STACK_FRAME_MIN_SIZE)(%r1);
#define POP_BASIC_STACK(_extra) \
- ld %r2, STACK_FRAME_TOC_POS(%r1); \
- lwz r0, STACK_FRAME_CR_POS(%r1); \
- mtcr r0; \
- addi %r1, %r1, (_extra + STACK_FRAME_MIN_SIZE); \
- ld r0, STACK_FRAME_LR_POS(%r1); \
+ addi %r1, %r1, (((_extra + 15) & ~15) + STACK_FRAME_MIN_SIZE); \
+ PPC_LL r0, STACK_FRAME_LR_POS(%r1); \
mtlr r0;
+.macro OP_REGS op, reg_width, start_reg, end_reg, base_reg, base_reg_offset=0, skip=0
+ .set i, \start_reg
+ .rept (\end_reg - \start_reg + 1)
+ \op i, (\reg_width * (i - \skip) + \base_reg_offset)(\base_reg)
+ .set i, i + 1
+ .endr
+.endm
+
#endif /* _SELFTESTS_POWERPC_BASIC_ASM_H */
diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h
index c422be8a42b2..d5a547f72669 100644
--- a/tools/testing/selftests/powerpc/include/reg.h
+++ b/tools/testing/selftests/powerpc/include/reg.h
@@ -55,6 +55,10 @@
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF)
#define SPRN_PVR 0x11F
+#define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */
+#define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */
+#define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */
+
#define SPRN_DSCR_PRIV 0x11 /* Privilege State DSCR */
#define SPRN_DSCR 0x03 /* Data Stream Control Register */
#define SPRN_PPR 896 /* Program Priority Register */
@@ -123,45 +127,44 @@
"li 30, %[" #_asm_symbol_name_immed "];" \
"li 31, %[" #_asm_symbol_name_immed "];"
-#define ASM_LOAD_FPR_SINGLE_PRECISION(_asm_symbol_name_addr) \
- "lfs 0, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 1, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 2, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 3, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 4, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 5, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 6, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 7, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 8, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 9, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 10, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 11, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 12, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 13, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 14, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 15, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 16, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 17, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 18, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 19, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 20, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 21, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 22, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 23, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 24, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 25, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 26, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 27, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 28, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 29, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 30, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 31, 0(%[" #_asm_symbol_name_addr "]);"
+#define ASM_LOAD_FPR(_asm_symbol_name_addr) \
+ "lfd 0, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 1, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 2, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 3, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 4, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 5, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 6, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 7, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 8, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 9, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 10, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 11, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 12, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 13, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 14, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 15, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 16, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 17, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 18, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 19, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 20, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 21, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 22, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 23, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 24, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 25, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 26, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 27, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 28, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 29, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 30, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 31, 0(%[" #_asm_symbol_name_addr "]);"
#ifndef __ASSEMBLER__
void store_gpr(unsigned long *addr);
void load_gpr(unsigned long *addr);
-void load_fpr_single_precision(float *addr);
-void store_fpr_single_precision(float *addr);
+void store_fpr(double *addr);
#endif /* end of __ASSEMBLER__ */
#endif /* _SELFTESTS_POWERPC_REG_H */
diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index b9fa9cd709df..e222a5858450 100644
--- a/tools/testing/selftests/powerpc/include/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
@@ -74,6 +74,16 @@ static inline bool have_hwcap2(unsigned long ftr2)
}
#endif
+static inline char *auxv_base_platform(void)
+{
+ return ((char *)get_auxv_entry(AT_BASE_PLATFORM));
+}
+
+static inline char *auxv_platform(void)
+{
+ return ((char *)get_auxv_entry(AT_PLATFORM));
+}
+
bool is_ppc64le(void);
int using_hash_mmu(bool *using_hash);
diff --git a/tools/testing/selftests/powerpc/lib/reg.S b/tools/testing/selftests/powerpc/lib/reg.S
index 9304ea7d59b9..6d1af4a9a6b4 100644
--- a/tools/testing/selftests/powerpc/lib/reg.S
+++ b/tools/testing/selftests/powerpc/lib/reg.S
@@ -53,79 +53,42 @@ FUNC_START(store_gpr)
blr
FUNC_END(store_gpr)
-/* Single Precision Float - float buf[32] */
-FUNC_START(load_fpr_single_precision)
- lfs 0, 0*4(3)
- lfs 1, 1*4(3)
- lfs 2, 2*4(3)
- lfs 3, 3*4(3)
- lfs 4, 4*4(3)
- lfs 5, 5*4(3)
- lfs 6, 6*4(3)
- lfs 7, 7*4(3)
- lfs 8, 8*4(3)
- lfs 9, 9*4(3)
- lfs 10, 10*4(3)
- lfs 11, 11*4(3)
- lfs 12, 12*4(3)
- lfs 13, 13*4(3)
- lfs 14, 14*4(3)
- lfs 15, 15*4(3)
- lfs 16, 16*4(3)
- lfs 17, 17*4(3)
- lfs 18, 18*4(3)
- lfs 19, 19*4(3)
- lfs 20, 20*4(3)
- lfs 21, 21*4(3)
- lfs 22, 22*4(3)
- lfs 23, 23*4(3)
- lfs 24, 24*4(3)
- lfs 25, 25*4(3)
- lfs 26, 26*4(3)
- lfs 27, 27*4(3)
- lfs 28, 28*4(3)
- lfs 29, 29*4(3)
- lfs 30, 30*4(3)
- lfs 31, 31*4(3)
+/* Double Precision Float - double buf[32] */
+FUNC_START(store_fpr)
+ stfd 0, 0*8(3)
+ stfd 1, 1*8(3)
+ stfd 2, 2*8(3)
+ stfd 3, 3*8(3)
+ stfd 4, 4*8(3)
+ stfd 5, 5*8(3)
+ stfd 6, 6*8(3)
+ stfd 7, 7*8(3)
+ stfd 8, 8*8(3)
+ stfd 9, 9*8(3)
+ stfd 10, 10*8(3)
+ stfd 11, 11*8(3)
+ stfd 12, 12*8(3)
+ stfd 13, 13*8(3)
+ stfd 14, 14*8(3)
+ stfd 15, 15*8(3)
+ stfd 16, 16*8(3)
+ stfd 17, 17*8(3)
+ stfd 18, 18*8(3)
+ stfd 19, 19*8(3)
+ stfd 20, 20*8(3)
+ stfd 21, 21*8(3)
+ stfd 22, 22*8(3)
+ stfd 23, 23*8(3)
+ stfd 24, 24*8(3)
+ stfd 25, 25*8(3)
+ stfd 26, 26*8(3)
+ stfd 27, 27*8(3)
+ stfd 28, 28*8(3)
+ stfd 29, 29*8(3)
+ stfd 30, 30*8(3)
+ stfd 31, 31*8(3)
blr
-FUNC_END(load_fpr_single_precision)
-
-/* Single Precision Float - float buf[32] */
-FUNC_START(store_fpr_single_precision)
- stfs 0, 0*4(3)
- stfs 1, 1*4(3)
- stfs 2, 2*4(3)
- stfs 3, 3*4(3)
- stfs 4, 4*4(3)
- stfs 5, 5*4(3)
- stfs 6, 6*4(3)
- stfs 7, 7*4(3)
- stfs 8, 8*4(3)
- stfs 9, 9*4(3)
- stfs 10, 10*4(3)
- stfs 11, 11*4(3)
- stfs 12, 12*4(3)
- stfs 13, 13*4(3)
- stfs 14, 14*4(3)
- stfs 15, 15*4(3)
- stfs 16, 16*4(3)
- stfs 17, 17*4(3)
- stfs 18, 18*4(3)
- stfs 19, 19*4(3)
- stfs 20, 20*4(3)
- stfs 21, 21*4(3)
- stfs 22, 22*4(3)
- stfs 23, 23*4(3)
- stfs 24, 24*4(3)
- stfs 25, 25*4(3)
- stfs 26, 26*4(3)
- stfs 27, 27*4(3)
- stfs 28, 28*4(3)
- stfs 29, 29*4(3)
- stfs 30, 30*4(3)
- stfs 31, 31*4(3)
- blr
-FUNC_END(store_fpr_single_precision)
+FUNC_END(store_fpr)
/* VMX/VSX registers - unsigned long buf[128] */
FUNC_START(loadvsx)
diff --git a/tools/testing/selftests/powerpc/math/.gitignore b/tools/testing/selftests/powerpc/math/.gitignore
index d0c23b2e4b60..07b4893ef7af 100644
--- a/tools/testing/selftests/powerpc/math/.gitignore
+++ b/tools/testing/selftests/powerpc/math/.gitignore
@@ -7,3 +7,4 @@ fpu_signal
vmx_signal
vsx_preempt
fpu_denormal
+mma
diff --git a/tools/testing/selftests/powerpc/math/mma.S b/tools/testing/selftests/powerpc/math/mma.S
index 8528c9849565..61cc88b1b26b 100644
--- a/tools/testing/selftests/powerpc/math/mma.S
+++ b/tools/testing/selftests/powerpc/math/mma.S
@@ -20,6 +20,9 @@ test_mma:
/* xvi16ger2s */
.long 0xec042958
+ /* Deprime the accumulator - xxmfacc 0 */
+ .long 0x7c000162
+
/* Store result in image passed in r5 */
stxvw4x 0,0,5
addi 5,5,16
diff --git a/tools/testing/selftests/powerpc/mce/.gitignore b/tools/testing/selftests/powerpc/mce/.gitignore
new file mode 100644
index 000000000000..f5921462a495
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mce/.gitignore
@@ -0,0 +1 @@
+inject-ra-err
diff --git a/tools/testing/selftests/powerpc/papr_attributes/attr_test.c b/tools/testing/selftests/powerpc/papr_attributes/attr_test.c
index bab0dc06e90b..9b655be641c9 100644
--- a/tools/testing/selftests/powerpc/papr_attributes/attr_test.c
+++ b/tools/testing/selftests/powerpc/papr_attributes/attr_test.c
@@ -7,6 +7,7 @@
* Copyright 2022, Pratik Rajesh Sampat, IBM Corp.
*/
+#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <dirent.h>
@@ -32,7 +33,7 @@ enum type {
NUM_VAL
};
-int value_type(int id)
+static int value_type(int id)
{
int val_type;
@@ -54,15 +55,21 @@ int value_type(int id)
return val_type;
}
-int verify_energy_info(void)
+static int verify_energy_info(void)
{
const char *path = "/sys/firmware/papr/energy_scale_info";
struct dirent *entry;
struct stat s;
DIR *dirp;
- if (stat(path, &s) || !S_ISDIR(s.st_mode))
- return -1;
+ errno = 0;
+ if (stat(path, &s)) {
+ SKIP_IF(errno == ENOENT);
+ FAIL_IF(errno);
+ }
+
+ FAIL_IF(!S_ISDIR(s.st_mode));
+
dirp = opendir(path);
while ((entry = readdir(dirp)) != NULL) {
@@ -76,25 +83,24 @@ int verify_energy_info(void)
id = atoi(entry->d_name);
attr_type = value_type(id);
- if (attr_type == INVALID)
- return -1;
+ FAIL_IF(attr_type == INVALID);
/* Check if the files exist and have data in them */
sprintf(file_name, "%s/%d/desc", path, id);
f = fopen(file_name, "r");
- if (!f || fgetc(f) == EOF)
- return -1;
+ FAIL_IF(!f);
+ FAIL_IF(fgetc(f) == EOF);
sprintf(file_name, "%s/%d/value", path, id);
f = fopen(file_name, "r");
- if (!f || fgetc(f) == EOF)
- return -1;
+ FAIL_IF(!f);
+ FAIL_IF(fgetc(f) == EOF);
if (attr_type == STR_VAL) {
sprintf(file_name, "%s/%d/value_desc", path, id);
f = fopen(file_name, "r");
- if (!f || fgetc(f) == EOF)
- return -1;
+ FAIL_IF(!f);
+ FAIL_IF(fgetc(f) == EOF);
}
}
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index edbd96d3b2ab..30803353bd7c 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -8,7 +8,7 @@ EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
top_srcdir = ../../../../..
include ../../lib.mk
-all: $(TEST_GEN_PROGS) ebb sampling_tests
+all: $(TEST_GEN_PROGS) ebb sampling_tests event_code_tests
$(TEST_GEN_PROGS): $(EXTRA_SOURCES)
@@ -27,6 +27,7 @@ override define RUN_TESTS
$(DEFAULT_RUN_TESTS)
TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
+ TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
endef
DEFAULT_EMIT_TESTS := $(EMIT_TESTS)
@@ -34,6 +35,7 @@ override define EMIT_TESTS
$(DEFAULT_EMIT_TESTS)
TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+ TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
endef
DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
@@ -41,12 +43,14 @@ override define INSTALL_RULE
$(DEFAULT_INSTALL_RULE)
TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
+ TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
endef
clean:
$(RM) $(TEST_GEN_PROGS) $(OUTPUT)/loop.o
TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
+ TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
ebb:
TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
@@ -54,4 +58,7 @@ ebb:
sampling_tests:
TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
-.PHONY: all run_tests clean ebb sampling_tests
+event_code_tests:
+ TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
+
+.PHONY: all run_tests clean ebb sampling_tests event_code_tests
diff --git a/tools/testing/selftests/powerpc/pmu/branch_loops.S b/tools/testing/selftests/powerpc/pmu/branch_loops.S
new file mode 100644
index 000000000000..de758dd3cecf
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/branch_loops.S
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <ppc-asm.h>
+
+ .text
+
+#define ITER_SHIFT 31
+
+FUNC_START(indirect_branch_loop)
+ li r3, 1
+ sldi r3, r3, ITER_SHIFT
+
+1: cmpdi r3, 0
+ beqlr
+
+ addi r3, r3, -1
+
+ ld r4, 2f@got(%r2)
+ mtctr r4
+ bctr
+
+ .balign 32
+2: b 1b
+
+FUNC_END(indirect_branch_loop)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
index 2920fb39439b..64d8dfdac74a 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
@@ -21,3 +21,4 @@ back_to_back_ebbs_test
lost_exception_test
no_handler_test
cycles_with_mmcr2_test
+regs_access_pmccext_test
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
index 4b45a2e70f62..fc32187d483d 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
@@ -50,6 +50,7 @@ int cycles_with_mmcr2(void)
expected[1] = MMCR2_EXPECTED_2;
i = 0;
bad_mmcr2 = false;
+ actual = 0;
/* Make sure we loop until we take at least one EBB */
while ((ebb_state.stats.ebb_count < 20 && !bad_mmcr2) ||
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore b/tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore
new file mode 100644
index 000000000000..5710683da525
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore
@@ -0,0 +1,20 @@
+blacklisted_events_test
+event_alternatives_tests_p10
+event_alternatives_tests_p9
+generic_events_valid_test
+group_constraint_cache_test
+group_constraint_l2l3_sel_test
+group_constraint_mmcra_sample_test
+group_constraint_pmc56_test
+group_constraint_pmc_count_test
+group_constraint_radix_scope_qual_test
+group_constraint_repeat_test
+group_constraint_thresh_cmp_test
+group_constraint_thresh_ctl_test
+group_constraint_thresh_sel_test
+group_constraint_unit_test
+group_pmc56_exclude_constraints_test
+hw_cache_event_type_test
+invalid_event_code_test
+reserved_bits_mmcra_sample_elig_mode_test
+reserved_bits_mmcra_thresh_ctl_test
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile b/tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile
new file mode 100644
index 000000000000..4e07d7046457
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -m64
+
+TEST_GEN_PROGS := group_constraint_pmc56_test group_pmc56_exclude_constraints_test group_constraint_pmc_count_test \
+ group_constraint_repeat_test group_constraint_radix_scope_qual_test reserved_bits_mmcra_sample_elig_mode_test \
+ group_constraint_mmcra_sample_test invalid_event_code_test reserved_bits_mmcra_thresh_ctl_test \
+ blacklisted_events_test event_alternatives_tests_p9 event_alternatives_tests_p10 generic_events_valid_test \
+ group_constraint_l2l3_sel_test group_constraint_cache_test group_constraint_thresh_cmp_test \
+ group_constraint_unit_test group_constraint_thresh_ctl_test group_constraint_thresh_sel_test \
+ hw_cache_event_type_test
+
+top_srcdir = ../../../../../..
+include ../../../lib.mk
+
+$(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c ../sampling_tests/misc.h ../sampling_tests/misc.c
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c
new file mode 100644
index 000000000000..fafeff19cb34
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <sys/prctl.h>
+#include <limits.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+#define PM_DTLB_MISS_16G 0x1c058
+#define PM_DERAT_MISS_2M 0x1c05a
+#define PM_DTLB_MISS_2M 0x1c05c
+#define PM_MRK_DTLB_MISS_1G 0x1d15c
+#define PM_DTLB_MISS_4K 0x2c056
+#define PM_DERAT_MISS_1G 0x2c05a
+#define PM_MRK_DERAT_MISS_2M 0x2d152
+#define PM_MRK_DTLB_MISS_4K 0x2d156
+#define PM_MRK_DTLB_MISS_16G 0x2d15e
+#define PM_DTLB_MISS_64K 0x3c056
+#define PM_MRK_DERAT_MISS_1G 0x3d152
+#define PM_MRK_DTLB_MISS_64K 0x3d156
+#define PM_DISP_HELD_SYNC_HOLD 0x4003c
+#define PM_DTLB_MISS_16M 0x4c056
+#define PM_DTLB_MISS_1G 0x4c05a
+#define PM_MRK_DTLB_MISS_16M 0x4c15e
+#define PM_MRK_ST_DONE_L2 0x10134
+#define PM_RADIX_PWC_L1_HIT 0x1f056
+#define PM_FLOP_CMPL 0x100f4
+#define PM_MRK_NTF_FIN 0x20112
+#define PM_RADIX_PWC_L2_HIT 0x2d024
+#define PM_IFETCH_THROTTLE 0x3405e
+#define PM_MRK_L2_TM_ST_ABORT_SISTER 0x3e15c
+#define PM_RADIX_PWC_L3_HIT 0x3f056
+#define PM_RUN_CYC_SMT2_MODE 0x3006c
+#define PM_TM_TX_PASS_RUN_INST 0x4e014
+
+#define PVR_POWER9_CUMULUS 0x00002000
+
+int blacklist_events_dd21[] = {
+ PM_MRK_ST_DONE_L2,
+ PM_RADIX_PWC_L1_HIT,
+ PM_FLOP_CMPL,
+ PM_MRK_NTF_FIN,
+ PM_RADIX_PWC_L2_HIT,
+ PM_IFETCH_THROTTLE,
+ PM_MRK_L2_TM_ST_ABORT_SISTER,
+ PM_RADIX_PWC_L3_HIT,
+ PM_RUN_CYC_SMT2_MODE,
+ PM_TM_TX_PASS_RUN_INST,
+ PM_DISP_HELD_SYNC_HOLD,
+};
+
+int blacklist_events_dd22[] = {
+ PM_DTLB_MISS_16G,
+ PM_DERAT_MISS_2M,
+ PM_DTLB_MISS_2M,
+ PM_MRK_DTLB_MISS_1G,
+ PM_DTLB_MISS_4K,
+ PM_DERAT_MISS_1G,
+ PM_MRK_DERAT_MISS_2M,
+ PM_MRK_DTLB_MISS_4K,
+ PM_MRK_DTLB_MISS_16G,
+ PM_DTLB_MISS_64K,
+ PM_MRK_DERAT_MISS_1G,
+ PM_MRK_DTLB_MISS_64K,
+ PM_DISP_HELD_SYNC_HOLD,
+ PM_DTLB_MISS_16M,
+ PM_DTLB_MISS_1G,
+ PM_MRK_DTLB_MISS_16M,
+};
+
+int pvr_min;
+
+/*
+ * check for power9 support for 2.1 and
+ * 2.2 model where blacklist is applicable.
+ */
+int check_for_power9_version(void)
+{
+ pvr_min = PVR_MIN(mfspr(SPRN_PVR));
+
+ SKIP_IF(PVR_VER(pvr) != POWER9);
+ SKIP_IF(!(pvr & PVR_POWER9_CUMULUS));
+
+ SKIP_IF(!(3 - pvr_min));
+
+ return 0;
+}
+
+/*
+ * Testcase to ensure that using blacklisted bits in
+ * event code should cause event_open to fail in power9
+ */
+
+static int blacklisted_events(void)
+{
+ struct event event;
+ int i = 0;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * check for power9 support for 2.1 and
+ * 2.2 model where blacklist is applicable.
+ */
+ SKIP_IF(check_for_power9_version());
+
+ /* Skip for Generic compat mode */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ if (pvr_min == 1) {
+ for (i = 0; i < ARRAY_SIZE(blacklist_events_dd21); i++) {
+ event_init(&event, blacklist_events_dd21[i]);
+ FAIL_IF(!event_open(&event));
+ }
+ } else if (pvr_min == 2) {
+ for (i = 0; i < ARRAY_SIZE(blacklist_events_dd22); i++) {
+ event_init(&event, blacklist_events_dd22[i]);
+ FAIL_IF(!event_open(&event));
+ }
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(blacklisted_events, "blacklisted_events");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c
new file mode 100644
index 000000000000..8be7aada6523
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+#define PM_RUN_CYC_ALT 0x200f4
+#define PM_INST_DISP 0x200f2
+#define PM_BR_2PATH 0x20036
+#define PM_LD_MISS_L1 0x3e054
+#define PM_RUN_INST_CMPL_ALT 0x400fa
+
+#define EventCode_1 0x100fc
+#define EventCode_2 0x200fa
+#define EventCode_3 0x300fc
+#define EventCode_4 0x400fc
+
+/*
+ * Check for event alternatives.
+ */
+
+static int event_alternatives_tests_p10(void)
+{
+ struct event *e, events[5];
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * PVR check is used here since PMU specific data like
+ * alternative events is handled by respective PMU driver
+ * code and using PVR will work correctly for all cases
+ * including generic compat mode.
+ */
+ SKIP_IF(PVR_VER(mfspr(SPRN_PVR)) != POWER10);
+
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /*
+ * Test for event alternative for 0x0001e
+ * and 0x00002.
+ */
+ e = &events[0];
+ event_init(e, 0x0001e);
+
+ e = &events[1];
+ event_init(e, EventCode_1);
+
+ e = &events[2];
+ event_init(e, EventCode_2);
+
+ e = &events[3];
+ event_init(e, EventCode_3);
+
+ e = &events[4];
+ event_init(e, EventCode_4);
+
+ FAIL_IF(event_open(&events[0]));
+
+ /*
+ * Expected to pass since 0x0001e has alternative event
+ * 0x600f4 in PMC6. So it can go in with other events
+ * in PMC1 to PMC4.
+ */
+ for (i = 1; i < 5; i++)
+ FAIL_IF(event_open_with_group(&events[i], events[0].fd));
+
+ for (i = 0; i < 5; i++)
+ event_close(&events[i]);
+
+ e = &events[0];
+ event_init(e, 0x00002);
+
+ e = &events[1];
+ event_init(e, EventCode_1);
+
+ e = &events[2];
+ event_init(e, EventCode_2);
+
+ e = &events[3];
+ event_init(e, EventCode_3);
+
+ e = &events[4];
+ event_init(e, EventCode_4);
+
+ FAIL_IF(event_open(&events[0]));
+
+ /*
+ * Expected to pass since 0x00020 has alternative event
+ * 0x500fa in PMC5. So it can go in with other events
+ * in PMC1 to PMC4.
+ */
+ for (i = 1; i < 5; i++)
+ FAIL_IF(event_open_with_group(&events[i], events[0].fd));
+
+ for (i = 0; i < 5; i++)
+ event_close(&events[i]);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(event_alternatives_tests_p10, "event_alternatives_tests_p10");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c
new file mode 100644
index 000000000000..f7dcf0e0447c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+#define PM_RUN_CYC_ALT 0x200f4
+#define PM_INST_DISP 0x200f2
+#define PM_BR_2PATH 0x20036
+#define PM_LD_MISS_L1 0x3e054
+#define PM_RUN_INST_CMPL_ALT 0x400fa
+
+#define EventCode_1 0x200fa
+#define EventCode_2 0x200fc
+#define EventCode_3 0x300fc
+#define EventCode_4 0x400fc
+
+/*
+ * Check for event alternatives.
+ */
+
+static int event_alternatives_tests_p9(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * PVR check is used here since PMU specific data like
+ * alternative events is handled by respective PMU driver
+ * code and using PVR will work correctly for all cases
+ * including generic compat mode.
+ */
+ SKIP_IF(PVR_VER(mfspr(SPRN_PVR)) != POWER9);
+
+ /* Skip for generic compat PMU */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /* Init the event for PM_RUN_CYC_ALT */
+ event_init(&leader, PM_RUN_CYC_ALT);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_1);
+
+ /*
+ * Expected to pass since PM_RUN_CYC_ALT in PMC2 has alternative event
+ * 0x600f4. So it can go in with EventCode_1 which is using PMC2
+ */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ event_init(&leader, PM_INST_DISP);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+ /*
+ * Expected to pass since PM_INST_DISP in PMC2 has alternative event
+ * 0x300f2 in PMC3. So it can go in with EventCode_2 which is using PMC2
+ */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ event_init(&leader, PM_BR_2PATH);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+ /*
+ * Expected to pass since PM_BR_2PATH in PMC2 has alternative event
+ * 0x40036 in PMC4. So it can go in with EventCode_2 which is using PMC2
+ */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ event_init(&leader, PM_LD_MISS_L1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_3);
+ /*
+ * Expected to pass since PM_LD_MISS_L1 in PMC3 has alternative event
+ * 0x400f0 in PMC4. So it can go in with EventCode_3 which is using PMC3
+ */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ event_init(&leader, PM_RUN_INST_CMPL_ALT);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_4);
+ /*
+ * Expected to pass since PM_RUN_INST_CMPL_ALT in PMC4 has alternative event
+ * 0x500fa in PMC5. So it can go in with EventCode_4 which is using PMC4
+ */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(event_alternatives_tests_p9, "event_alternatives_tests_p9");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c
new file mode 100644
index 000000000000..0d237c15d3f2
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <sys/prctl.h>
+#include <limits.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase to ensure that using invalid event in generic
+ * event for PERF_TYPE_HARDWARE should fail
+ */
+
+static int generic_events_valid_test(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* generic events is different in compat_mode */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /*
+ * Invalid generic events in power10:
+ * - PERF_COUNT_HW_BUS_CYCLES
+ * - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+ * - PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+ * - PERF_COUNT_HW_REF_CPU_CYCLES
+ */
+ if (PVR_VER(mfspr(SPRN_PVR)) == POWER10) {
+ event_init_opts(&event, PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_INSTRUCTIONS,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_CACHE_REFERENCES,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_CACHE_MISSES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BRANCH_MISSES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BUS_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+
+ event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+
+ event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+
+ event_init_opts(&event, PERF_COUNT_HW_REF_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+ } else if (PVR_VER(mfspr(SPRN_PVR)) == POWER9) {
+ /*
+ * Invalid generic events in power9:
+ * - PERF_COUNT_HW_BUS_CYCLES
+ * - PERF_COUNT_HW_REF_CPU_CYCLES
+ */
+ event_init_opts(&event, PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_CACHE_REFERENCES,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_CACHE_MISSES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BRANCH_MISSES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BUS_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+
+ event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_REF_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(generic_events_valid_test, "generic_events_valid_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c
new file mode 100644
index 000000000000..f4be05aa3a3d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/* All L1 D cache load references counted at finish, gated by reject */
+#define EventCode_1 0x1100fc
+/* Load Missed L1 */
+#define EventCode_2 0x23e054
+/* Load Missed L1 */
+#define EventCode_3 0x13e054
+
+/*
+ * Testcase for group constraint check of data and instructions
+ * cache qualifier bits which is used to program cache select field in
+ * Monitor Mode Control Register 1 (MMCR1: 16-17) for l1 cache.
+ * All events in the group should match cache select bits otherwise
+ * event_open for the group will fail.
+ */
+static int group_constraint_cache(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Init the events for the group contraint check for l1 cache select bits */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+
+ /* Expected to fail as sibling event doesn't request same l1 cache select bits as leader */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint l1 cache select test */
+ event_init(&event, EventCode_3);
+
+ /* Expected to succeed as sibling event request same l1 cache select bits as leader */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_cache, "group_constraint_cache");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c
new file mode 100644
index 000000000000..85a636886069
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/* All successful D-side store dispatches for this thread */
+#define EventCode_1 0x010000046080
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+#define EventCode_2 0x26880
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+#define EventCode_3 0x010000026880
+
+/*
+ * Testcase for group constraint check of l2l3_sel bits which is
+ * used to program l2l3 select field in Monitor Mode Control Register 0
+ * (MMCR0: 56-60).
+ * All events in the group should match l2l3_sel bits otherwise
+ * event_open for the group should fail.
+ */
+static int group_constraint_l2l3_sel(void)
+{
+ struct event event, leader;
+
+ /*
+ * Check for platform support for the test.
+ * This test is only aplicable on power10
+ */
+ SKIP_IF(platform_check_for_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the events for the group contraint check for l2l3_sel bits */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+
+ /* Expected to fail as sibling event doesn't request same l2l3_sel bits as leader */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint l2l3_sel test */
+ event_init(&event, EventCode_3);
+
+ /* Expected to succeed as sibling event request same l2l3_sel bits as leader */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_l2l3_sel, "group_constraint_l2l3_sel");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c
new file mode 100644
index 000000000000..ff625b5d80eb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+#define EventCode_1 0x35340401e0
+#define EventCode_2 0x353c0101ec
+#define EventCode_3 0x35340101ec
+/*
+ * Test that using different sample bits in
+ * event code cause failure in schedule for
+ * group of events.
+ */
+
+static int group_constraint_mmcra_sample(void)
+{
+ struct event event, leader;
+
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Events with different "sample" field values
+ * in a group will fail to schedule.
+ * Use event with load only sampling mode as
+ * group leader. Use event with store only sampling
+ * as sibling event.
+ */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+
+ /* Expected to fail as sibling event doesn't use same sampling bits as leader */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_init(&event, EventCode_3);
+
+ /* Expected to pass as sibling event use same sampling bits as leader */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_mmcra_sample, "group_constraint_mmcra_sample");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c
new file mode 100644
index 000000000000..f5ee4796d46c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase for checking constraint checks for
+ * Performance Monitor Counter 5 (PMC5) and also
+ * Performance Monitor Counter 6 (PMC6). Events using
+ * PMC5/PMC6 shouldn't have other fields in event
+ * code like cache bits, thresholding or marked bit.
+ */
+
+static int group_constraint_pmc56(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Events using PMC5 and PMC6 with cache bit
+ * set in event code is expected to fail.
+ */
+ event_init(&event, 0x2500fa);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x2600f4);
+ FAIL_IF(!event_open(&event));
+
+ /*
+ * PMC5 and PMC6 only supports base events:
+ * ie 500fa and 600f4. Other combinations
+ * should fail.
+ */
+ event_init(&event, 0x501e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x6001e);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x501fa);
+ FAIL_IF(!event_open(&event));
+
+ /*
+ * Events using PMC5 and PMC6 with random
+ * sampling bits set in event code should fail
+ * to schedule.
+ */
+ event_init(&event, 0x35340500fa);
+ FAIL_IF(!event_open(&event));
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_pmc56, "group_constraint_pmc56");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c
new file mode 100644
index 000000000000..af7c5c75101c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase for number of counters in use.
+ * The number of programmable counters is from
+ * performance monitor counter 1 to performance
+ * monitor counter 4 (PMC1-PMC4). If number of
+ * counters in use exceeds the limit, next event
+ * should fail to schedule.
+ */
+
+static int group_constraint_pmc_count(void)
+{
+ struct event *e, events[5];
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Test for number of counters in use.
+ * Use PMC1 to PMC4 for leader and 3 sibling
+ * events. Trying to open fourth event should
+ * fail here.
+ */
+ e = &events[0];
+ event_init(e, 0x1001a);
+
+ e = &events[1];
+ event_init(e, 0x200fc);
+
+ e = &events[2];
+ event_init(e, 0x30080);
+
+ e = &events[3];
+ event_init(e, 0x40054);
+
+ e = &events[4];
+ event_init(e, 0x0002c);
+
+ FAIL_IF(event_open(&events[0]));
+
+ /*
+ * The event_open will fail on event 4 if constraint
+ * check fails
+ */
+ for (i = 1; i < 5; i++) {
+ if (i == 4)
+ FAIL_IF(!event_open_with_group(&events[i], events[0].fd));
+ else
+ FAIL_IF(event_open_with_group(&events[i], events[0].fd));
+ }
+
+ for (i = 1; i < 4; i++)
+ event_close(&events[i]);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_pmc_count, "group_constraint_pmc_count");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c
new file mode 100644
index 000000000000..9225618b846a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/* PM_DATA_RADIX_PROCESS_L2_PTE_FROM_L2 */
+#define EventCode_1 0x14242
+/* PM_DATA_RADIX_PROCESS_L2_PTE_FROM_L3 */
+#define EventCode_2 0x24242
+
+/*
+ * Testcase for group constraint check for radix_scope_qual
+ * field which is used to program Monitor Mode Control
+ * egister (MMCR1) bit 18.
+ * All events in the group should match radix_scope_qual,
+ * bits otherwise event_open for the group should fail.
+ */
+
+static int group_constraint_radix_scope_qual(void)
+{
+ struct event event, leader;
+
+ /*
+ * Check for platform support for the test.
+ * This test is aplicable on power10 only.
+ */
+ SKIP_IF(platform_check_for_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the events for the group contraint check for radix_scope_qual bits */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, 0x200fc);
+
+ /* Expected to fail as sibling event doesn't request same radix_scope_qual bits as leader */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_init(&event, EventCode_2);
+ /* Expected to pass as sibling event request same radix_scope_qual bits as leader */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_radix_scope_qual,
+ "group_constraint_radix_scope_qual");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c
new file mode 100644
index 000000000000..371cd05bb3ed
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/* The processor's L1 data cache was reloaded */
+#define EventCode1 0x21C040
+#define EventCode2 0x22C040
+
+/*
+ * Testcase for group constraint check
+ * when using events with same PMC.
+ * Multiple events in a group shouldn't
+ * ask for same PMC. If so it should fail.
+ */
+
+static int group_constraint_repeat(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Two events in a group using same PMC
+ * should fail to get scheduled. Usei same PMC2
+ * for leader and sibling event which is expected
+ * to fail.
+ */
+ event_init(&leader, EventCode1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode1);
+
+ /* Expected to fail since sibling event is requesting same PMC as leader */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_init(&event, EventCode2);
+
+ /* Expected to pass since sibling event is requesting different PMC */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_repeat, "group_constraint_repeat");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c
new file mode 100644
index 000000000000..9f1197104e8c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Primary PMU events used here is PM_MRK_INST_CMPL (0x401e0) and
+ * PM_THRESH_MET (0x101ec)
+ * Threshold event selection used is issue to complete for cycles
+ * Sampling criteria is Load or Store only sampling
+ */
+#define p9_EventCode_1 0x13e35340401e0
+#define p9_EventCode_2 0x17d34340101ec
+#define p9_EventCode_3 0x13e35340101ec
+#define p10_EventCode_1 0x35340401e0
+#define p10_EventCode_2 0x35340101ec
+
+/*
+ * Testcase for group constraint check of thresh_cmp bits which is
+ * used to program thresh compare field in Monitor Mode Control Register A
+ * (MMCRA: 9-18 bits for power9 and MMCRA: 8-18 bits for power10).
+ * All events in the group should match thresh compare bits otherwise
+ * event_open for the group will fail.
+ */
+static int group_constraint_thresh_cmp(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ if (have_hwcap2(PPC_FEATURE2_ARCH_3_1)) {
+ /* Init the events for the group contraint check for thresh_cmp bits */
+ event_init(&leader, p10_EventCode_1);
+
+ /* Add the thresh_cmp value for leader in config1 */
+ leader.attr.config1 = 1000;
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, p10_EventCode_2);
+
+ /* Add the different thresh_cmp value from the leader event in config1 */
+ event.attr.config1 = 2000;
+
+ /* Expected to fail as sibling and leader event request different thresh_cmp bits */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint thresh compare test */
+ event_init(&event, p10_EventCode_2);
+
+ /* Add the same thresh_cmp value for leader and sibling event in config1 */
+ event.attr.config1 = 1000;
+
+ /* Expected to succeed as sibling and leader event request same thresh_cmp bits */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+ } else {
+ /* Init the events for the group contraint check for thresh_cmp bits */
+ event_init(&leader, p9_EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, p9_EventCode_2);
+
+ /* Expected to fail as sibling and leader event request different thresh_cmp bits */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint thresh compare test */
+ event_init(&event, p9_EventCode_3);
+
+ /* Expected to succeed as sibling and leader event request same thresh_cmp bits */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_thresh_cmp, "group_constraint_thresh_cmp");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c
new file mode 100644
index 000000000000..e0852ebc1671
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Primary PMU events used here are PM_MRK_INST_CMPL (0x401e0) and
+ * PM_THRESH_MET (0x101ec).
+ * Threshold event selection used is issue to complete and issue to
+ * finished for cycles
+ * Sampling criteria is Load or Store only sampling
+ */
+#define EventCode_1 0x35340401e0
+#define EventCode_2 0x34340101ec
+#define EventCode_3 0x35340101ec
+
+/*
+ * Testcase for group constraint check of thresh_ctl bits which is
+ * used to program thresh compare field in Monitor Mode Control Register A
+ * (MMCR0: 48-55).
+ * All events in the group should match thresh ctl bits otherwise
+ * event_open for the group will fail.
+ */
+static int group_constraint_thresh_ctl(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Init the events for the group contraint thresh control test */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+
+ /* Expected to fail as sibling and leader event request different thresh_ctl bits */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint thresh control test */
+ event_init(&event, EventCode_3);
+
+ /* Expected to succeed as sibling and leader event request same thresh_ctl bits */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_thresh_ctl, "group_constraint_thresh_ctl");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c
new file mode 100644
index 000000000000..50a8cd843ce7
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Primary PMU events used here are PM_MRK_INST_CMPL (0x401e0) and
+ * PM_THRESH_MET (0x101ec).
+ * Threshold event selection used is issue to complete
+ * Sampling criteria is Load or Store only sampling
+ */
+#define EventCode_1 0x35340401e0
+#define EventCode_2 0x35540101ec
+#define EventCode_3 0x35340101ec
+
+/*
+ * Testcase for group constraint check of thresh_sel bits which is
+ * used to program thresh select field in Monitor Mode Control Register A
+ * (MMCRA: 45-57).
+ * All events in the group should match thresh sel bits otherwise
+ * event_open for the group will fail.
+ */
+static int group_constraint_thresh_sel(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Init the events for the group contraint thresh select test */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+
+ /* Expected to fail as sibling and leader event request different thresh_sel bits */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint thresh select test */
+ event_init(&event, EventCode_3);
+
+ /* Expected to succeed as sibling and leader event request same thresh_sel bits */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_thresh_sel, "group_constraint_thresh_sel");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c
new file mode 100644
index 000000000000..a2c18923dcec
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/* All successful D-side store dispatches for this thread with PMC 2 */
+#define EventCode_1 0x26080
+/* All successful D-side store dispatches for this thread with PMC 4 */
+#define EventCode_2 0x46080
+/* All successful D-side store dispatches for this thread that were L2 Miss with PMC 3 */
+#define EventCode_3 0x36880
+
+/*
+ * Testcase for group constraint check of unit and pmc bits which is
+ * used to program corresponding unit and pmc field in Monitor Mode
+ * Control Register 1 (MMCR1)
+ * One of the event in the group should use PMC 4 incase units field
+ * value is within 6 to 9 otherwise event_open for the group will fail.
+ */
+static int group_constraint_unit(void)
+{
+ struct event *e, events[3];
+
+ /*
+ * Check for platform support for the test.
+ * Constraint to use PMC4 with one of the event in group,
+ * when the unit is within 6 to 9 is only applicable on
+ * power9.
+ */
+ SKIP_IF(platform_check_for_tests());
+ SKIP_IF(have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the events for the group contraint check for unit bits */
+ e = &events[0];
+ event_init(e, EventCode_1);
+
+ /* Expected to fail as PMC 4 is not used with unit field value 6 to 9 */
+ FAIL_IF(!event_open(&events[0]));
+
+ /* Init the events for the group contraint check for unit bits */
+ e = &events[1];
+ event_init(e, EventCode_2);
+
+ /* Expected to pass as PMC 4 is used with unit field value 6 to 9 */
+ FAIL_IF(event_open(&events[1]));
+
+ /* Init the event for the group contraint unit test */
+ e = &events[2];
+ event_init(e, EventCode_3);
+
+ /* Expected to fail as PMC4 is not being used */
+ FAIL_IF(!event_open_with_group(&events[2], events[0].fd));
+
+ /* Expected to succeed as event using PMC4 */
+ FAIL_IF(event_open_with_group(&events[2], events[1].fd));
+
+ event_close(&events[0]);
+ event_close(&events[1]);
+ event_close(&events[2]);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_unit, "group_constraint_unit");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c
new file mode 100644
index 000000000000..cff9ac170df6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include <sys/prctl.h>
+#include <limits.h>
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase for group constraint check for
+ * Performance Monitor Counter 5 (PMC5) and also
+ * Performance Monitor Counter 6 (PMC6).
+ * Test that pmc5/6 is excluded from constraint
+ * check when scheduled along with group of events.
+ */
+
+static int group_pmc56_exclude_constraints(void)
+{
+ struct event *e, events[3];
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * PMC5/6 is excluded from constraint bit
+ * check along with group of events. Use
+ * group of events with PMC5, PMC6 and also
+ * event with cache bit (dc_ic) set. Test expects
+ * this set of events to go in as a group.
+ */
+ e = &events[0];
+ event_init(e, 0x500fa);
+
+ e = &events[1];
+ event_init(e, 0x600f4);
+
+ e = &events[2];
+ event_init(e, 0x22C040);
+
+ FAIL_IF(event_open(&events[0]));
+
+ /*
+ * The event_open will fail if constraint check fails.
+ * Since we are asking for events in a group and since
+ * PMC5/PMC6 is excluded from group constraints, even_open
+ * should pass.
+ */
+ for (i = 1; i < 3; i++)
+ FAIL_IF(event_open_with_group(&events[i], events[0].fd));
+
+ for (i = 0; i < 3; i++)
+ event_close(&events[i]);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_pmc56_exclude_constraints, "group_pmc56_exclude_constraints");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c
new file mode 100644
index 000000000000..a45b1da5b568
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Load Missed L1, for power9 its pointing to PM_LD_MISS_L1_FIN (0x2c04e) and
+ * for power10 its pointing to PM_LD_MISS_L1 (0x3e054)
+ *
+ * Hardware cache level : PERF_COUNT_HW_CACHE_L1D
+ * Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_READ
+ * Hardware cache event result type : PERF_COUNT_HW_CACHE_RESULT_MISS
+ */
+#define EventCode_1 0x10000
+/*
+ * Hardware cache level : PERF_COUNT_HW_CACHE_L1D
+ * Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_WRITE
+ * Hardware cache event result type : PERF_COUNT_HW_CACHE_RESULT_ACCESS
+ */
+#define EventCode_2 0x0100
+/*
+ * Hardware cache level : PERF_COUNT_HW_CACHE_DTLB
+ * Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_WRITE
+ * Hardware cache event result type : PERF_COUNT_HW_CACHE_RESULT_ACCESS
+ */
+#define EventCode_3 0x0103
+/*
+ * Hardware cache level : PERF_COUNT_HW_CACHE_L1D
+ * Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_READ
+ * Hardware cache event result type : Invalid ( > PERF_COUNT_HW_CACHE_RESULT_MAX)
+ */
+#define EventCode_4 0x030000
+
+/*
+ * A perf test to check valid hardware cache events.
+ */
+static int hw_cache_event_type_test(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Skip for Generic compat PMU */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /* Init the event to test hardware cache event */
+ event_init_opts(&event, EventCode_1, PERF_TYPE_HW_CACHE, "event");
+
+ /* Expected to success as its pointing to L1 load miss */
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ /* Init the event to test hardware cache event */
+ event_init_opts(&event, EventCode_2, PERF_TYPE_HW_CACHE, "event");
+
+ /* Expected to fail as the corresponding cache event entry have 0 in that index */
+ FAIL_IF(!event_open(&event));
+ event_close(&event);
+
+ /* Init the event to test hardware cache event */
+ event_init_opts(&event, EventCode_3, PERF_TYPE_HW_CACHE, "event");
+
+ /* Expected to fail as the corresponding cache event entry have -1 in that index */
+ FAIL_IF(!event_open(&event));
+ event_close(&event);
+
+ /* Init the event to test hardware cache event */
+ event_init_opts(&event, EventCode_4, PERF_TYPE_HW_CACHE, "event");
+
+ /* Expected to fail as hardware cache event result type is Invalid */
+ FAIL_IF(!event_open(&event));
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(hw_cache_event_type_test, "hw_cache_event_type_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c
new file mode 100644
index 000000000000..f51fcab837fc
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <sys/prctl.h>
+#include <limits.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/* The data cache was reloaded from local core's L3 due to a demand load */
+#define EventCode_1 0x1340000001c040
+/* PM_DATA_RADIX_PROCESS_L2_PTE_FROM_L2 */
+#define EventCode_2 0x14242
+/* Event code with IFM, EBB, BHRB bits set in event code */
+#define EventCode_3 0xf00000000000001e
+
+/*
+ * Some of the bits in the event code is
+ * reserved for specific platforms.
+ * Event code bits 52-59 are reserved in power9,
+ * whereas in power10, these are used for programming
+ * Monitor Mode Control Register 3 (MMCR3).
+ * Bit 9 in event code is reserved in power9,
+ * whereas it is used for programming "radix_scope_qual"
+ * bit 18 in Monitor Mode Control Register 1 (MMCR1).
+ *
+ * Testcase to ensure that using reserved bits in
+ * event code should cause event_open to fail.
+ */
+
+static int invalid_event_code(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Events using MMCR3 bits and radix scope qual bits
+ * should fail in power9 and should succeed in power10.
+ * Init the events and check for pass/fail in event open.
+ */
+ if (have_hwcap2(PPC_FEATURE2_ARCH_3_1)) {
+ event_init(&event, EventCode_1);
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init(&event, EventCode_2);
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+ } else {
+ event_init(&event, EventCode_1);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, EventCode_2);
+ FAIL_IF(!event_open(&event));
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(invalid_event_code, "invalid_event_code");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c
new file mode 100644
index 000000000000..4c119c821b99
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase for reserved bits in Monitor Mode Control
+ * Register A (MMCRA) Random Sampling Mode (SM) value.
+ * As per Instruction Set Architecture (ISA), the values
+ * 0x5, 0x9, 0xD, 0x19, 0x1D, 0x1A, 0x1E are reserved
+ * for sampling mode field. Test that having these reserved
+ * bit values should cause event_open to fail.
+ * Input event code uses these sampling bits along with
+ * 401e0 (PM_MRK_INST_CMPL).
+ */
+
+static int reserved_bits_mmcra_sample_elig_mode(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Skip for Generic compat PMU */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /*
+ * MMCRA Random Sampling Mode (SM) values: 0x5
+ * 0x9, 0xD, 0x19, 0x1D, 0x1A, 0x1E is reserved.
+ * Expected to fail when using these reserved values.
+ */
+ event_init(&event, 0x50401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x90401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0xD0401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x190401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x1D0401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x1A0401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x1E0401e0);
+ FAIL_IF(!event_open(&event));
+
+ /*
+ * MMCRA Random Sampling Mode (SM) value 0x10
+ * is reserved in power10 and 0xC is reserved in
+ * power9.
+ */
+ if (PVR_VER(mfspr(SPRN_PVR)) == POWER10) {
+ event_init(&event, 0x100401e0);
+ FAIL_IF(!event_open(&event));
+ } else if (PVR_VER(mfspr(SPRN_PVR)) == POWER9) {
+ event_init(&event, 0xC0401e0);
+ FAIL_IF(!event_open(&event));
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(reserved_bits_mmcra_sample_elig_mode,
+ "reserved_bits_mmcra_sample_elig_mode");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c
new file mode 100644
index 000000000000..4ea1c2f8913f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase for reserved bits in Monitor Mode
+ * Control Register A (MMCRA) thresh_ctl bits.
+ * For MMCRA[48:51]/[52:55]) Threshold Start/Stop,
+ * 0b11110000/0b00001111 is reserved.
+ */
+
+static int reserved_bits_mmcra_thresh_ctl(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Skip for Generic compat PMU */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /*
+ * MMCRA[48:51]/[52:55]) Threshold Start/Stop
+ * events Selection. 0b11110000/0b00001111 is reserved.
+ * Expected to fail when using these reserved values.
+ */
+ event_init(&event, 0xf0340401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x0f340401e0);
+ FAIL_IF(!event_open(&event));
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(reserved_bits_mmcra_thresh_ctl, "reserved_bits_mmcra_thresh_ctl");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore b/tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore
index 0fce5a694684..f93b4c7c3a8a 100644
--- a/tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore
@@ -1,11 +1,21 @@
-mmcr0_exceptionbits_test
+bhrb_filter_map_test
+bhrb_no_crash_wo_pmu_test
+intr_regs_no_crash_wo_pmu_test
mmcr0_cc56run_test
-mmcr0_pmccext_test
-mmcr0_pmcjce_test
+mmcr0_exceptionbits_test
mmcr0_fc56_pmc1ce_test
mmcr0_fc56_pmc56_test
+mmcr0_pmccext_test
+mmcr0_pmcjce_test
mmcr1_comb_test
-mmcr2_l2l3_test
+mmcr1_sel_unit_cache_test
mmcr2_fcs_fch_test
+mmcr2_l2l3_test
mmcr3_src_test
+mmcra_bhrb_any_test
+mmcra_bhrb_cond_test
+mmcra_bhrb_disable_no_branch_test
+mmcra_bhrb_disable_test
+mmcra_bhrb_ind_call_test
+mmcra_thresh_cmp_test
mmcra_thresh_marked_sample_test
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile b/tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile
index a785c6a173b9..9e67351fb252 100644
--- a/tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile
@@ -4,9 +4,12 @@ CFLAGS += -m64
TEST_GEN_PROGS := mmcr0_exceptionbits_test mmcr0_cc56run_test mmcr0_pmccext_test \
mmcr0_pmcjce_test mmcr0_fc56_pmc1ce_test mmcr0_fc56_pmc56_test \
mmcr1_comb_test mmcr2_l2l3_test mmcr2_fcs_fch_test \
- mmcr3_src_test mmcra_thresh_marked_sample_test
+ mmcr3_src_test mmcra_thresh_marked_sample_test mmcra_thresh_cmp_test \
+ mmcra_bhrb_ind_call_test mmcra_bhrb_any_test mmcra_bhrb_cond_test \
+ mmcra_bhrb_disable_test bhrb_no_crash_wo_pmu_test intr_regs_no_crash_wo_pmu_test \
+ bhrb_filter_map_test mmcr1_sel_unit_cache_test mmcra_bhrb_disable_no_branch_test
top_srcdir = ../../../../../..
include ../../../lib.mk
-$(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c misc.c misc.h ../loop.S
+$(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c misc.c misc.h ../loop.S ../branch_loops.S
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c
new file mode 100644
index 000000000000..8182647c63c8
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/*
+ * A perf sampling test to check bhrb filter
+ * map. All the branch filters are not supported
+ * in powerpc. Supported filters in:
+ * power10: any, any_call, ind_call, cond
+ * power9: any, any_call
+ *
+ * Testcase checks event open for invalid bhrb filter
+ * types should fail and valid filter types should pass.
+ * Testcase does validity check for these branch
+ * sample types.
+ */
+
+/* Invalid types for powerpc */
+/* Valid bhrb filters in power9/power10 */
+int bhrb_filter_map_valid_common[] = {
+ PERF_SAMPLE_BRANCH_ANY,
+ PERF_SAMPLE_BRANCH_ANY_CALL,
+};
+
+/* Valid bhrb filters in power10 */
+int bhrb_filter_map_valid_p10[] = {
+ PERF_SAMPLE_BRANCH_IND_CALL,
+ PERF_SAMPLE_BRANCH_COND,
+};
+
+#define EventCode 0x1001e
+
+static int bhrb_filter_map_test(void)
+{
+ struct event event;
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Skip for Generic compat PMU since
+ * bhrb filters is not supported
+ */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /* Init the event for the sampling test */
+ event_init(&event, EventCode);
+
+ event.attr.sample_period = 1000;
+ event.attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
+ event.attr.disabled = 1;
+
+ /* Invalid filter maps which are expected to fail in event_open */
+ for (i = PERF_SAMPLE_BRANCH_USER_SHIFT; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
+ /* Skip the valid branch sample type */
+ if (i == PERF_SAMPLE_BRANCH_ANY_SHIFT || i == PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT \
+ || i == PERF_SAMPLE_BRANCH_IND_CALL_SHIFT || i == PERF_SAMPLE_BRANCH_COND_SHIFT)
+ continue;
+ event.attr.branch_sample_type = 1U << i;
+ FAIL_IF(!event_open(&event));
+ }
+
+ /* valid filter maps for power9/power10 which are expected to pass in event_open */
+ for (i = 0; i < ARRAY_SIZE(bhrb_filter_map_valid_common); i++) {
+ event.attr.branch_sample_type = bhrb_filter_map_valid_common[i];
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+ }
+
+ /*
+ * filter maps which are valid in power10 and invalid in power9.
+ * PVR check is used here since PMU specific data like bhrb filter
+ * alternative tests is handled by respective PMU driver code and
+ * using PVR will work correctly for all cases including generic
+ * compat mode.
+ */
+ if (PVR_VER(mfspr(SPRN_PVR)) == POWER10) {
+ for (i = 0; i < ARRAY_SIZE(bhrb_filter_map_valid_p10); i++) {
+ event.attr.branch_sample_type = bhrb_filter_map_valid_p10[i];
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(bhrb_filter_map_valid_p10); i++) {
+ event.attr.branch_sample_type = bhrb_filter_map_valid_p10[i];
+ FAIL_IF(!event_open(&event));
+ }
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(bhrb_filter_map_test, "bhrb_filter_map_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c
new file mode 100644
index 000000000000..4644c6782974
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/*
+ * A perf sampling test for making sure
+ * enabling branch stack doesn't crash in any
+ * environment, say:
+ * - With generic compat PMU
+ * - without any PMU registered
+ * - With platform specific PMU
+ * A fix for bhrb sampling crash was added in kernel
+ * via commit: b460b512417a ("powerpc/perf: Fix crashes
+ * with generic_compat_pmu & BHRB")
+ *
+ * This testcase exercises this code by doing branch
+ * stack enable for software event. s/w event is used
+ * since software event will work even in platform
+ * without PMU.
+ */
+static int bhrb_no_crash_wo_pmu_test(void)
+{
+ struct event event;
+
+ /*
+ * Init the event for the sampling test.
+ * This uses software event which works on
+ * any platform.
+ */
+ event_init_opts(&event, 0, PERF_TYPE_SOFTWARE, "cycles");
+
+ event.attr.sample_period = 1000;
+ event.attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
+ event.attr.disabled = 1;
+
+ /*
+ * Return code of event_open is not
+ * considered since test just expects no crash from
+ * using PERF_SAMPLE_BRANCH_STACK. Also for environment
+ * like generic compat PMU, branch stack is unsupported.
+ */
+ event_open(&event);
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(bhrb_no_crash_wo_pmu_test, "bhrb_no_crash_wo_pmu_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c
new file mode 100644
index 000000000000..839d2d225da0
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/*
+ * A perf sampling test for making sure
+ * sampling with -intr-regs doesn't crash
+ * in any environment, say:
+ * - With generic compat PMU
+ * - without any PMU registered
+ * - With platform specific PMU.
+ * A fix for crash with intr_regs was
+ * addressed in commit: f75e7d73bdf7 in kernel.
+ *
+ * This testcase exercises this code path by doing
+ * intr_regs using software event. Software event is
+ * used since s/w event will work even in platform
+ * without PMU.
+ */
+static int intr_regs_no_crash_wo_pmu_test(void)
+{
+ struct event event;
+
+ /*
+ * Init the event for the sampling test.
+ * This uses software event which works on
+ * any platform.
+ */
+ event_init_opts(&event, 0, PERF_TYPE_SOFTWARE, "cycles");
+
+ event.attr.sample_period = 1000;
+ event.attr.sample_type = PERF_SAMPLE_REGS_INTR;
+ event.attr.disabled = 1;
+
+ /*
+ * Return code of event_open is not considered
+ * since test just expects no crash from using
+ * PERF_SAMPLE_REGS_INTR.
+ */
+ event_open(&event);
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(intr_regs_no_crash_wo_pmu_test, "intr_regs_no_crash_wo_pmu_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
index c01a31d5f4ee..eac6420abdf1 100644
--- a/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
@@ -60,6 +60,8 @@ static void init_ev_encodes(void)
switch (pvr) {
case POWER10:
+ ev_mask_thd_cmp = 0x3ffff;
+ ev_shift_thd_cmp = 0;
ev_mask_rsq = 1;
ev_shift_rsq = 9;
ev_mask_comb = 3;
@@ -119,12 +121,10 @@ int check_extended_regs_support(void)
return -1;
}
-int check_pvr_for_sampling_tests(void)
+int platform_check_for_tests(void)
{
pvr = PVR_VER(mfspr(SPRN_PVR));
- platform_extended_mask = perf_get_platform_reg_mask();
-
/*
* Check for supported platforms
* for sampling test
@@ -136,19 +136,33 @@ int check_pvr_for_sampling_tests(void)
* Check PMU driver registered by looking for
* PPC_FEATURE2_EBB bit in AT_HWCAP2
*/
- if (!have_hwcap2(PPC_FEATURE2_EBB))
+ if (!have_hwcap2(PPC_FEATURE2_EBB) || !have_hwcap2(PPC_FEATURE2_ARCH_3_00))
goto out;
+ return 0;
+
+out:
+ printf("%s: Tests unsupported for this platform\n", __func__);
+ return -1;
+}
+
+int check_pvr_for_sampling_tests(void)
+{
+ SKIP_IF(platform_check_for_tests());
+
+ platform_extended_mask = perf_get_platform_reg_mask();
/* check if platform supports extended regs */
if (check_extended_regs_support())
goto out;
init_ev_encodes();
return 0;
+
out:
printf("%s: Sampling tests un-supported\n", __func__);
return -1;
}
+
/*
* Allocate mmap buffer of "mmap_pages" number of
* pages.
@@ -257,13 +271,32 @@ u64 *get_intr_regs(struct event *event, void *sample_buff)
u64 *intr_regs;
size_t size = 0;
- if ((type ^ PERF_SAMPLE_REGS_INTR))
+ if ((type ^ (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_BRANCH_STACK)) &&
+ (type ^ PERF_SAMPLE_REGS_INTR))
return NULL;
intr_regs = (u64 *)perf_read_first_sample(sample_buff, &size);
if (!intr_regs)
return NULL;
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ /*
+ * PERF_RECORD_SAMPLE and PERF_SAMPLE_BRANCH_STACK:
+ * struct {
+ * struct perf_event_header hdr;
+ * u64 number_of_branches;
+ * struct perf_branch_entry[number_of_branches];
+ * u64 data[];
+ * };
+ * struct perf_branch_entry {
+ * u64 from;
+ * u64 to;
+ * u64 misc;
+ * };
+ */
+ intr_regs += ((*intr_regs) * 3) + 1;
+ }
+
/*
* First entry in the sample buffer used to specify
* PERF_SAMPLE_REGS_ABI_64, skip perf regs abi to access
@@ -410,3 +443,95 @@ u64 get_reg_value(u64 *intr_regs, char *register_name)
return *(intr_regs + register_bit_position);
}
+
+int get_thresh_cmp_val(struct event event)
+{
+ int exp = 0;
+ u64 result = 0;
+ u64 value;
+
+ if (!have_hwcap2(PPC_FEATURE2_ARCH_3_1))
+ return EV_CODE_EXTRACT(event.attr.config, thd_cmp);
+
+ value = EV_CODE_EXTRACT(event.attr.config1, thd_cmp);
+
+ if (!value)
+ return value;
+
+ /*
+ * Incase of P10, thresh_cmp value is not part of raw event code
+ * and provided via attr.config1 parameter. To program threshold in MMCRA,
+ * take a 18 bit number N and shift right 2 places and increment
+ * the exponent E by 1 until the upper 10 bits of N are zero.
+ * Write E to the threshold exponent and write the lower 8 bits of N
+ * to the threshold mantissa.
+ * The max threshold that can be written is 261120.
+ */
+ if (value > 261120)
+ value = 261120;
+ while ((64 - __builtin_clzl(value)) > 8) {
+ exp++;
+ value >>= 2;
+ }
+
+ /*
+ * Note that it is invalid to write a mantissa with the
+ * upper 2 bits of mantissa being zero, unless the
+ * exponent is also zero.
+ */
+ if (!(value & 0xC0) && exp)
+ result = -1;
+ else
+ result = (exp << 8) | value;
+ return result;
+}
+
+/*
+ * Utility function to check for generic compat PMU
+ * by comparing base_platform value from auxv and real
+ * PVR value.
+ */
+static bool auxv_generic_compat_pmu(void)
+{
+ int base_pvr = 0;
+
+ if (!strcmp(auxv_base_platform(), "power9"))
+ base_pvr = POWER9;
+ else if (!strcmp(auxv_base_platform(), "power10"))
+ base_pvr = POWER10;
+
+ return (!base_pvr);
+}
+
+/*
+ * Check for generic compat PMU.
+ * First check for presence of pmu_name from
+ * "/sys/bus/event_source/devices/cpu/caps".
+ * If doesn't exist, fallback to using value
+ * auxv.
+ */
+bool check_for_generic_compat_pmu(void)
+{
+ char pmu_name[256];
+
+ memset(pmu_name, 0, sizeof(pmu_name));
+ if (read_sysfs_file("bus/event_source/devices/cpu/caps/pmu_name",
+ pmu_name, sizeof(pmu_name)) < 0)
+ return auxv_generic_compat_pmu();
+
+ if (!strcmp(pmu_name, "ISAv3"))
+ return true;
+ else
+ return false;
+}
+
+/*
+ * Check if system is booted in compat mode.
+ */
+bool check_for_compat_mode(void)
+{
+ char *platform = auxv_platform();
+ char *base_platform = auxv_base_platform();
+
+ return strcmp(platform, base_platform);
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h
index 7675f3177725..4181755cf5a0 100644
--- a/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h
@@ -5,6 +5,7 @@
* Copyright 2022, Kajol Jain, IBM Corp.
*/
+#include <sys/stat.h>
#include "../event.h"
#define POWER10 0x80
@@ -17,6 +18,8 @@
#define MMCR1_RSQ 0x200000000000ULL /* radix scope qual field */
#define BHRB_DISABLE 0x2000000000ULL /* MMCRA BHRB DISABLE bit */
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
extern int ev_mask_pmcxsel, ev_shift_pmcxsel;
extern int ev_mask_marked, ev_shift_marked;
extern int ev_mask_comb, ev_shift_comb;
@@ -35,6 +38,7 @@ extern int ev_mask_mmcr3_src, ev_shift_mmcr3_src;
extern int pvr;
extern u64 platform_extended_mask;
extern int check_pvr_for_sampling_tests(void);
+extern int platform_check_for_tests(void);
/*
* Event code field extraction macro.
@@ -52,6 +56,9 @@ void *__event_read_samples(void *sample_buff, size_t *size, u64 *sample_count);
int collect_samples(void *sample_buff);
u64 *get_intr_regs(struct event *event, void *sample_buff);
u64 get_reg_value(u64 *intr_regs, char *register_name);
+int get_thresh_cmp_val(struct event event);
+bool check_for_generic_compat_pmu(void);
+bool check_for_compat_mode(void);
static inline int get_mmcr0_fc56(u64 mmcr0, int pmc)
{
@@ -184,7 +191,7 @@ static inline int get_mmcra_sm(u64 mmcra, int pmc)
return ((mmcra >> 42) & 0x3);
}
-static inline int get_mmcra_bhrb_disable(u64 mmcra, int pmc)
+static inline u64 get_mmcra_bhrb_disable(u64 mmcra, int pmc)
{
if (pvr == POWER10)
return mmcra & BHRB_DISABLE;
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c
new file mode 100644
index 000000000000..f0c003282630
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+#define MALLOC_SIZE (0x10000 * 10) /* Ought to be enough .. */
+
+/* The data cache was reloaded from local core's L3 due to a demand load */
+#define EventCode 0x21c040
+
+/*
+ * A perf sampling test for mmcr1
+ * fields : pmcxsel, unit, cache.
+ */
+static int mmcr1_sel_unit_cache(void)
+{
+ struct event event;
+ u64 *intr_regs;
+ char *p;
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ p = malloc(MALLOC_SIZE);
+ FAIL_IF(!p);
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.sample_period = 1;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ event_enable(&event);
+
+ /* workload to make the event overflow */
+ for (i = 0; i < MALLOC_SIZE; i += 0x10000)
+ p[i] = i;
+
+ event_disable(&event);
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /*
+ * Verify that pmcxsel, unit and cache field of MMCR1
+ * match with corresponding event code fields
+ */
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, pmcxsel) !=
+ get_mmcr1_pmcxsel(get_reg_value(intr_regs, "MMCR1"), 1));
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, unit) !=
+ get_mmcr1_unit(get_reg_value(intr_regs, "MMCR1"), 1));
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, cache) !=
+ get_mmcr1_cache(get_reg_value(intr_regs, "MMCR1"), 1));
+
+ free(p);
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ FAIL_IF(test_harness(mmcr1_sel_unit_cache, "mmcr1_sel_unit_cache"));
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c
new file mode 100644
index 000000000000..14854694af62
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/* Instructions */
+#define EventCode 0x500fa
+
+/* ifm field for any branch mode */
+#define IFM_ANY_BRANCH 0x0
+
+/*
+ * A perf sampling test for mmcra
+ * field: ifm for bhrb any call.
+ */
+static int mmcra_bhrb_any_test(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
+ event.attr.exclude_kernel = 1;
+
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that ifm bit is set properly in MMCRA */
+ FAIL_IF(get_mmcra_ifm(get_reg_value(intr_regs, "MMCRA"), 5) != IFM_ANY_BRANCH);
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_bhrb_any_test, "mmcra_bhrb_any_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c
new file mode 100644
index 000000000000..3e08176eb7f8
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/* Instructions */
+#define EventCode 0x500fa
+
+/* ifm field for conditional branch mode */
+#define IFM_COND_BRANCH 0x3
+
+/*
+ * A perf sampling test for mmcra
+ * field: ifm for bhrb cond call.
+ */
+static int mmcra_bhrb_cond_test(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /*
+ * Check for platform support for the test.
+ * This test is only aplicable on power10
+ */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_COND;
+ event.attr.exclude_kernel = 1;
+
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that ifm bit is set properly in MMCRA */
+ FAIL_IF(get_mmcra_ifm(get_reg_value(intr_regs, "MMCRA"), 5) != IFM_COND_BRANCH);
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_bhrb_cond_test, "mmcra_bhrb_cond_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c
new file mode 100644
index 000000000000..488c865387e4
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/* Instructions */
+#define EventCode 0x500fa
+
+/*
+ * A perf sampling test for mmcra
+ * field: bhrb_disable.
+ */
+static int mmcra_bhrb_disable_no_branch_test(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /*
+ * Check for platform support for the test.
+ * This test is only aplicable on power10
+ */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.exclude_kernel = 1;
+
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that bhrb_disable bit is set in MMCRA for non-branch samples */
+ FAIL_IF(!get_mmcra_bhrb_disable(get_reg_value(intr_regs, "MMCRA"), 5));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_bhrb_disable_no_branch_test, "mmcra_bhrb_disable_no_branch_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c
new file mode 100644
index 000000000000..186a853c0f62
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/* Instructions */
+#define EventCode 0x500fa
+
+/*
+ * A perf sampling test for mmcra
+ * field: bhrb_disable.
+ */
+static int mmcra_bhrb_disable_test(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /*
+ * Check for platform support for the test.
+ * This test is only aplicable on power10
+ */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
+ event.attr.exclude_kernel = 1;
+
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that bhrb_disable bit is set in MMCRA */
+ FAIL_IF(get_mmcra_bhrb_disable(get_reg_value(intr_regs, "MMCRA"), 5));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_bhrb_disable_test, "mmcra_bhrb_disable_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c
new file mode 100644
index 000000000000..f0706730c099
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void indirect_branch_loop(void);
+
+/* Instructions */
+#define EventCode 0x500fa
+
+/* ifm field for indirect branch mode */
+#define IFM_IND_BRANCH 0x2
+
+/*
+ * A perf sampling test for mmcra
+ * field: ifm for bhrb ind_call.
+ */
+static int mmcra_bhrb_ind_call_test(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /*
+ * Check for platform support for the test.
+ * This test is only aplicable on power10
+ */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_IND_CALL;
+ event.attr.exclude_kernel = 1;
+
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ indirect_branch_loop();
+
+ FAIL_IF(event_disable(&event));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that ifm bit is set properly in MMCRA */
+ FAIL_IF(get_mmcra_ifm(get_reg_value(intr_regs, "MMCRA"), 5) != IFM_IND_BRANCH);
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_bhrb_ind_call_test, "mmcra_bhrb_ind_call_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c
new file mode 100644
index 000000000000..904362f172c9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/*
+ * Primary PMU event used here is PM_MRK_INST_CMPL (0x401e0)
+ * Threshold event selection used is issue to complete for cycles
+ * Sampling criteria is Load only sampling
+ */
+#define p9_EventCode 0x13E35340401e0
+#define p10_EventCode 0x35340401e0
+
+extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
+
+/* A perf sampling test to test mmcra fields */
+static int mmcra_thresh_cmp(void)
+{
+ struct event event;
+ u64 *intr_regs;
+ u64 dummy;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Skip for comapt mode */
+ SKIP_IF(check_for_compat_mode());
+
+ /* Init the event for the sampling test */
+ if (!have_hwcap2(PPC_FEATURE2_ARCH_3_1)) {
+ event_init_sampling(&event, p9_EventCode);
+ } else {
+ event_init_sampling(&event, p10_EventCode);
+ event.attr.config1 = 1000;
+ }
+
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop_with_ll_sc(1000000, &dummy);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that thresh cmp match with the corresponding event code fields */
+ FAIL_IF(get_thresh_cmp_val(event) !=
+ get_mmcra_thd_cmp(get_reg_value(intr_regs, "MMCRA"), 4));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ FAIL_IF(test_harness(mmcra_thresh_cmp, "mmcra_thresh_cmp"));
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
index a500639da97a..2f02cb54224d 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,15 +1,41 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
- ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
- ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
- perf-hwbreak ptrace-syscall ptrace-perf-hwbreak
+
+TM_TESTS := ptrace-tm-gpr
+TM_TESTS += ptrace-tm-spd-gpr
+TM_TESTS += ptrace-tm-spd-tar
+TM_TESTS += ptrace-tm-spd-vsx
+TM_TESTS += ptrace-tm-spr
+TM_TESTS += ptrace-tm-tar
+TM_TESTS += ptrace-tm-vsx
+
+TESTS_64 := $(TM_TESTS)
+TESTS_64 += core-pkey
+TESTS_64 += perf-hwbreak
+TESTS_64 += ptrace-hwbreak
+TESTS_64 += ptrace-perf-hwbreak
+TESTS_64 += ptrace-pkey
+TESTS_64 += ptrace-syscall
+TESTS_64 += ptrace-tar
+TESTS_64 += ptrace-vsx
+
+TESTS += ptrace-gpr
+
+TEST_GEN_PROGS := $(TESTS) $(TESTS_64)
+
+LOCAL_HDRS += $(patsubst %,$(selfdir)/powerpc/ptrace/%,$(wildcard *.h))
top_srcdir = ../../../../..
include ../../lib.mk
-CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm -fno-pie
+TM_TESTS := $(patsubst %,$(OUTPUT)/%,$(TM_TESTS))
+TESTS_64 := $(patsubst %,$(OUTPUT)/%,$(TESTS_64))
+
+$(TESTS_64): CFLAGS += -m64
+$(TM_TESTS): CFLAGS += -I../tm -mhtm
+
+CFLAGS += -I../../../../../usr/include -fno-pie
-$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: child.h
+$(OUTPUT)/ptrace-gpr: ptrace-gpr.S
$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: LDLIBS += -pthread
-$(TEST_GEN_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
+$(TEST_GEN_PROGS): ../harness.c ../utils.c ../lib/reg.S
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S
new file mode 100644
index 000000000000..070e8443e3cc
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * test helper assembly functions
+ *
+ * Copyright (C) 2016 Simon Guo, IBM Corporation.
+ * Copyright 2022 Michael Ellerman, IBM Corporation.
+ */
+#include "basic_asm.h"
+
+#define GPR_SIZE __SIZEOF_LONG__
+#define FIRST_GPR 14
+#define NUM_GPRS (32 - FIRST_GPR)
+#define STACK_SIZE (NUM_GPRS * GPR_SIZE)
+
+// gpr_child_loop(int *read_flag, int *write_flag,
+// unsigned long *gpr_buf, double *fpr_buf);
+FUNC_START(gpr_child_loop)
+ // r3 = read_flag
+ // r4 = write_flag
+ // r5 = gpr_buf
+ // r6 = fpr_buf
+ PUSH_BASIC_STACK(STACK_SIZE)
+
+ // Save non-volatile GPRs
+ OP_REGS PPC_STL, GPR_SIZE, FIRST_GPR, 31, %r1, STACK_FRAME_LOCAL(0, 0), FIRST_GPR
+
+ // Load GPRs with expected values
+ OP_REGS PPC_LL, GPR_SIZE, FIRST_GPR, 31, r5, 0, FIRST_GPR
+
+ // Load FPRs with expected values
+ OP_REGS lfd, 8, 0, 31, r6
+
+ // Signal to parent that we're ready
+ li r0, 1
+ stw r0, 0(r4)
+
+ // Wait for parent to finish
+1: lwz r0, 0(r3)
+ cmpwi r0, 0
+ beq 1b // Loop while flag is zero
+
+ // Save GPRs back to caller buffer
+ OP_REGS PPC_STL, GPR_SIZE, FIRST_GPR, 31, r5, 0, FIRST_GPR
+
+ // Save FPRs
+ OP_REGS stfd, 8, 0, 31, r6
+
+ // Reload non-volatile GPRs
+ OP_REGS PPC_LL, GPR_SIZE, FIRST_GPR, 31, %r1, STACK_FRAME_LOCAL(0, 0), FIRST_GPR
+
+ POP_BASIC_STACK(STACK_SIZE)
+ blr
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
index 17cd480c8780..9ed87d297799 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
@@ -7,72 +7,127 @@
#include "ptrace.h"
#include "ptrace-gpr.h"
#include "reg.h"
+#include <time.h>
/* Tracer and Tracee Shared Data */
int shm_id;
int *cptr, *pptr;
-float a = FPR_1;
-float b = FPR_2;
-float c = FPR_3;
+extern void gpr_child_loop(int *read_flag, int *write_flag,
+ unsigned long *gpr_buf, double *fpr_buf);
-void gpr(void)
+unsigned long child_gpr_val, parent_gpr_val;
+double child_fpr_val, parent_fpr_val;
+
+static int child(void)
{
- unsigned long gpr_buf[18];
- float fpr_buf[32];
+ unsigned long gpr_buf[32];
+ double fpr_buf[32];
+ int i;
cptr = (int *)shmat(shm_id, NULL, 0);
+ memset(gpr_buf, 0, sizeof(gpr_buf));
+ memset(fpr_buf, 0, sizeof(fpr_buf));
- asm __volatile__(
- ASM_LOAD_GPR_IMMED(gpr_1)
- ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
- :
- : [gpr_1]"i"(GPR_1), [flt_1] "b" (&a)
- : "memory", "r6", "r7", "r8", "r9", "r10",
- "r11", "r12", "r13", "r14", "r15", "r16", "r17",
- "r18", "r19", "r20", "r21", "r22", "r23", "r24",
- "r25", "r26", "r27", "r28", "r29", "r30", "r31"
- );
-
- cptr[1] = 1;
+ for (i = 0; i < 32; i++) {
+ gpr_buf[i] = child_gpr_val;
+ fpr_buf[i] = child_fpr_val;
+ }
- while (!cptr[0])
- asm volatile("" : : : "memory");
+ gpr_child_loop(&cptr[0], &cptr[1], gpr_buf, fpr_buf);
shmdt((void *)cptr);
- store_gpr(gpr_buf);
- store_fpr_single_precision(fpr_buf);
-
- if (validate_gpr(gpr_buf, GPR_3))
- exit(1);
- if (validate_fpr_float(fpr_buf, c))
- exit(1);
+ FAIL_IF(validate_gpr(gpr_buf, parent_gpr_val));
+ FAIL_IF(validate_fpr_double(fpr_buf, parent_fpr_val));
- exit(0);
+ return 0;
}
int trace_gpr(pid_t child)
{
+ __u64 tmp, fpr[32], *peeked_fprs;
unsigned long gpr[18];
- unsigned long fpr[32];
FAIL_IF(start_trace(child));
+
+ // Check child GPRs match what we expect using GETREGS
FAIL_IF(show_gpr(child, gpr));
- FAIL_IF(validate_gpr(gpr, GPR_1));
+ FAIL_IF(validate_gpr(gpr, child_gpr_val));
+
+ // Check child FPRs match what we expect using GETFPREGS
FAIL_IF(show_fpr(child, fpr));
- FAIL_IF(validate_fpr(fpr, FPR_1_REP));
- FAIL_IF(write_gpr(child, GPR_3));
- FAIL_IF(write_fpr(child, FPR_3_REP));
+ memcpy(&tmp, &child_fpr_val, sizeof(tmp));
+ FAIL_IF(validate_fpr(fpr, tmp));
+
+ // Check child FPRs match what we expect using PEEKUSR
+ peeked_fprs = peek_fprs(child);
+ FAIL_IF(!peeked_fprs);
+ FAIL_IF(validate_fpr(peeked_fprs, tmp));
+ free(peeked_fprs);
+
+ // Write child GPRs using SETREGS
+ FAIL_IF(write_gpr(child, parent_gpr_val));
+
+ // Write child FPRs using SETFPREGS
+ memcpy(&tmp, &parent_fpr_val, sizeof(tmp));
+ FAIL_IF(write_fpr(child, tmp));
+
+ // Check child FPRs match what we just set, using PEEKUSR
+ peeked_fprs = peek_fprs(child);
+ FAIL_IF(!peeked_fprs);
+ FAIL_IF(validate_fpr(peeked_fprs, tmp));
+
+ // Write child FPRs using POKEUSR
+ FAIL_IF(poke_fprs(child, (unsigned long *)peeked_fprs));
+
+ // Child will check its FPRs match before exiting
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
+#ifndef __LONG_WIDTH__
+#define __LONG_WIDTH__ (sizeof(long) * 8)
+#endif
+
+static uint64_t rand_reg(void)
+{
+ uint64_t result;
+ long r;
+
+ r = random();
+
+ // Small values are typical
+ result = r & 0xffff;
+ if (r & 0x10000)
+ return result;
+
+ // Pointers tend to have high bits set
+ result |= random() << (__LONG_WIDTH__ - 31);
+ if (r & 0x100000)
+ return result;
+
+ // And sometimes we want a full 64-bit value
+ result ^= random() << 16;
+
+ return result;
+}
+
int ptrace_gpr(void)
{
- pid_t pid;
+ unsigned long seed;
int ret, status;
+ pid_t pid;
+
+ seed = getpid() ^ time(NULL);
+ printf("srand(%lu)\n", seed);
+ srand(seed);
+
+ child_gpr_val = rand_reg();
+ child_fpr_val = rand_reg();
+ parent_gpr_val = rand_reg();
+ parent_fpr_val = rand_reg();
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
pid = fork();
@@ -81,7 +136,7 @@ int ptrace_gpr(void)
return TEST_FAIL;
}
if (pid == 0)
- gpr();
+ exit(child());
if (pid) {
pptr = (int *)shmat(shm_id, NULL, 0);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
index c5cd53181e2e..a5470b88bd08 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
@@ -12,10 +12,10 @@
#define FPR_3 0.003
#define FPR_4 0.004
-#define FPR_1_REP 0x3f50624de0000000
-#define FPR_2_REP 0x3f60624de0000000
-#define FPR_3_REP 0x3f689374c0000000
-#define FPR_4_REP 0x3f70624de0000000
+#define FPR_1_REP 0x3f50624dd2f1a9fcull
+#define FPR_2_REP 0x3f60624dd2f1a9fcull
+#define FPR_3_REP 0x3f689374bc6a7efaull
+#define FPR_4_REP 0x3f70624dd2f1a9fcull
/* Buffer must have 18 elements */
int validate_gpr(unsigned long *gpr, unsigned long val)
@@ -36,13 +36,13 @@ int validate_gpr(unsigned long *gpr, unsigned long val)
}
/* Buffer must have 32 elements */
-int validate_fpr(unsigned long *fpr, unsigned long val)
+int validate_fpr(__u64 *fpr, __u64 val)
{
int i, found = 1;
for (i = 0; i < 32; i++) {
if (fpr[i] != val) {
- printf("FPR[%d]: %lx Expected: %lx\n", i, fpr[i], val);
+ printf("FPR[%d]: %llx Expected: %llx\n", i, fpr[i], val);
found = 0;
}
}
@@ -53,7 +53,7 @@ int validate_fpr(unsigned long *fpr, unsigned long val)
}
/* Buffer must have 32 elements */
-int validate_fpr_float(float *fpr, float val)
+int validate_fpr_double(double *fpr, double val)
{
int i, found = 1;
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
index 67ca297c5cca..5dc152b162df 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
@@ -12,15 +12,15 @@
int shm_id;
unsigned long *cptr, *pptr;
-float a = FPR_1;
-float b = FPR_2;
-float c = FPR_3;
+double a = FPR_1;
+double b = FPR_2;
+double c = FPR_3;
void tm_gpr(void)
{
unsigned long gpr_buf[18];
unsigned long result, texasr;
- float fpr_buf[32];
+ double fpr_buf[32];
printf("Starting the child\n");
cptr = (unsigned long *)shmat(shm_id, NULL, 0);
@@ -29,12 +29,12 @@ trans:
cptr[1] = 0;
asm __volatile__(
ASM_LOAD_GPR_IMMED(gpr_1)
- ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
+ ASM_LOAD_FPR(flt_1)
"1: ;"
"tbegin.;"
"beq 2f;"
ASM_LOAD_GPR_IMMED(gpr_2)
- ASM_LOAD_FPR_SINGLE_PRECISION(flt_2)
+ ASM_LOAD_FPR(flt_2)
"tsuspend.;"
"li 7, 1;"
"stw 7, 0(%[cptr1]);"
@@ -70,12 +70,12 @@ trans:
shmdt((void *)cptr);
store_gpr(gpr_buf);
- store_fpr_single_precision(fpr_buf);
+ store_fpr(fpr_buf);
if (validate_gpr(gpr_buf, GPR_3))
exit(1);
- if (validate_fpr_float(fpr_buf, c))
+ if (validate_fpr_double(fpr_buf, c))
exit(1);
exit(0);
@@ -87,7 +87,7 @@ trans:
int trace_tm_gpr(pid_t child)
{
unsigned long gpr[18];
- unsigned long fpr[32];
+ __u64 fpr[32];
FAIL_IF(start_trace(child));
FAIL_IF(show_gpr(child, gpr));
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
index 6f2bce1b6c5d..458cc1a70ccf 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
@@ -12,10 +12,10 @@
int shm_id;
int *cptr, *pptr;
-float a = FPR_1;
-float b = FPR_2;
-float c = FPR_3;
-float d = FPR_4;
+double a = FPR_1;
+double b = FPR_2;
+double c = FPR_3;
+double d = FPR_4;
__attribute__((used)) void wait_parent(void)
{
@@ -28,7 +28,7 @@ void tm_spd_gpr(void)
{
unsigned long gpr_buf[18];
unsigned long result, texasr;
- float fpr_buf[32];
+ double fpr_buf[32];
cptr = (int *)shmat(shm_id, NULL, 0);
@@ -36,7 +36,7 @@ trans:
cptr[2] = 0;
asm __volatile__(
ASM_LOAD_GPR_IMMED(gpr_1)
- ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
+ ASM_LOAD_FPR(flt_1)
"1: ;"
"tbegin.;"
@@ -45,7 +45,7 @@ trans:
ASM_LOAD_GPR_IMMED(gpr_2)
"tsuspend.;"
ASM_LOAD_GPR_IMMED(gpr_4)
- ASM_LOAD_FPR_SINGLE_PRECISION(flt_4)
+ ASM_LOAD_FPR(flt_4)
"bl wait_parent;"
"tresume.;"
@@ -77,12 +77,12 @@ trans:
shmdt((void *)cptr);
store_gpr(gpr_buf);
- store_fpr_single_precision(fpr_buf);
+ store_fpr(fpr_buf);
if (validate_gpr(gpr_buf, GPR_3))
exit(1);
- if (validate_fpr_float(fpr_buf, c))
+ if (validate_fpr_double(fpr_buf, c))
exit(1);
exit(0);
}
@@ -93,7 +93,7 @@ trans:
int trace_tm_spd_gpr(pid_t child)
{
unsigned long gpr[18];
- unsigned long fpr[32];
+ __u64 fpr[32];
FAIL_IF(start_trace(child));
FAIL_IF(show_gpr(child, gpr));
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace.h b/tools/testing/selftests/powerpc/ptrace/ptrace.h
index 5181ad9b4b6c..4e0233c0f2b3 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace.h
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace.h
@@ -4,6 +4,9 @@
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
+
+#define __SANE_USERSPACE_TYPES__
+
#include <inttypes.h>
#include <unistd.h>
#include <stdlib.h>
@@ -20,6 +23,7 @@
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/user.h>
+#include <sys/syscall.h>
#include <linux/elf.h>
#include <linux/types.h>
#include <linux/auxvec.h>
@@ -30,8 +34,8 @@
#define TEST_FAIL 1
struct fpr_regs {
- unsigned long fpr[32];
- unsigned long fpscr;
+ __u64 fpr[32];
+ __u64 fpscr;
};
struct tm_spr_regs {
@@ -318,7 +322,7 @@ fail:
}
/* FPR */
-int show_fpr(pid_t child, unsigned long *fpr)
+int show_fpr(pid_t child, __u64 *fpr)
{
struct fpr_regs *regs;
int ret, i;
@@ -337,7 +341,7 @@ int show_fpr(pid_t child, unsigned long *fpr)
return TEST_PASS;
}
-int write_fpr(pid_t child, unsigned long val)
+int write_fpr(pid_t child, __u64 val)
{
struct fpr_regs *regs;
int ret, i;
@@ -360,7 +364,7 @@ int write_fpr(pid_t child, unsigned long val)
return TEST_PASS;
}
-int show_ckpt_fpr(pid_t child, unsigned long *fpr)
+int show_ckpt_fpr(pid_t child, __u64 *fpr)
{
struct fpr_regs *regs;
struct iovec iov;
@@ -437,6 +441,70 @@ int show_gpr(pid_t child, unsigned long *gpr)
return TEST_PASS;
}
+long sys_ptrace(enum __ptrace_request request, pid_t pid, unsigned long addr, unsigned long data)
+{
+ return syscall(__NR_ptrace, request, pid, (void *)addr, data);
+}
+
+// 33 because of FPSCR
+#define PT_NUM_FPRS (33 * (sizeof(__u64) / sizeof(unsigned long)))
+
+__u64 *peek_fprs(pid_t child)
+{
+ unsigned long *fprs, *p, addr;
+ long ret;
+ int i;
+
+ fprs = malloc(sizeof(unsigned long) * PT_NUM_FPRS);
+ if (!fprs) {
+ perror("malloc() failed");
+ return NULL;
+ }
+
+ for (i = 0, p = fprs; i < PT_NUM_FPRS; i++, p++) {
+ addr = sizeof(unsigned long) * (PT_FPR0 + i);
+ ret = sys_ptrace(PTRACE_PEEKUSER, child, addr, (unsigned long)p);
+ if (ret) {
+ perror("ptrace(PTRACE_PEEKUSR) failed");
+ return NULL;
+ }
+ }
+
+ addr = sizeof(unsigned long) * (PT_FPR0 + i);
+ ret = sys_ptrace(PTRACE_PEEKUSER, child, addr, (unsigned long)&addr);
+ if (!ret) {
+ printf("ptrace(PTRACE_PEEKUSR) succeeded unexpectedly!\n");
+ return NULL;
+ }
+
+ return (__u64 *)fprs;
+}
+
+int poke_fprs(pid_t child, unsigned long *fprs)
+{
+ unsigned long *p, addr;
+ long ret;
+ int i;
+
+ for (i = 0, p = fprs; i < PT_NUM_FPRS; i++, p++) {
+ addr = sizeof(unsigned long) * (PT_FPR0 + i);
+ ret = sys_ptrace(PTRACE_POKEUSER, child, addr, *p);
+ if (ret) {
+ perror("ptrace(PTRACE_POKEUSR) failed");
+ return -1;
+ }
+ }
+
+ addr = sizeof(unsigned long) * (PT_FPR0 + i);
+ ret = sys_ptrace(PTRACE_POKEUSER, child, addr, addr);
+ if (!ret) {
+ printf("ptrace(PTRACE_POKEUSR) succeeded unexpectedly!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
int write_gpr(pid_t child, unsigned long val)
{
struct pt_regs *regs;
@@ -742,4 +810,3 @@ void analyse_texasr(unsigned long texasr)
}
void store_gpr(unsigned long *addr);
-void store_fpr(float *addr);
diff --git a/tools/testing/selftests/powerpc/security/.gitignore b/tools/testing/selftests/powerpc/security/.gitignore
index 93614b125ded..9357b186b13c 100644
--- a/tools/testing/selftests/powerpc/security/.gitignore
+++ b/tools/testing/selftests/powerpc/security/.gitignore
@@ -2,3 +2,4 @@
rfi_flush
entry_flush
spectre_v2
+uaccess_flush
diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c
index 28604c9f805c..e5962f4794f5 100644
--- a/tools/testing/selftests/proc/proc-pid-vm.c
+++ b/tools/testing/selftests/proc/proc-pid-vm.c
@@ -211,10 +211,19 @@ static int make_exe(const uint8_t *payload, size_t len)
}
#endif
-static bool g_vsyscall = false;
+/*
+ * 0: vsyscall VMA doesn't exist vsyscall=none
+ * 1: vsyscall VMA is r-xp vsyscall=emulate
+ * 2: vsyscall VMA is --xp vsyscall=xonly
+ */
+static int g_vsyscall;
+static const char *str_vsyscall;
-static const char str_vsyscall[] =
+static const char str_vsyscall_0[] = "";
+static const char str_vsyscall_1[] =
"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n";
+static const char str_vsyscall_2[] =
+"ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]\n";
#ifdef __x86_64__
static void sigaction_SIGSEGV(int _, siginfo_t *__, void *___)
@@ -223,7 +232,7 @@ static void sigaction_SIGSEGV(int _, siginfo_t *__, void *___)
}
/*
- * vsyscall page can't be unmapped, probe it with memory load.
+ * vsyscall page can't be unmapped, probe it directly.
*/
static void vsyscall(void)
{
@@ -246,13 +255,52 @@ static void vsyscall(void)
act.sa_sigaction = sigaction_SIGSEGV;
(void)sigaction(SIGSEGV, &act, NULL);
+ /* gettimeofday(NULL, NULL); */
+ asm volatile (
+ "call %P0"
+ :
+ : "i" (0xffffffffff600000), "D" (NULL), "S" (NULL)
+ : "rax", "rcx", "r11"
+ );
+ exit(0);
+ }
+ waitpid(pid, &wstatus, 0);
+ if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0) {
+ /* vsyscall page exists and is executable. */
+ } else {
+ /* vsyscall page doesn't exist. */
+ g_vsyscall = 0;
+ return;
+ }
+
+ pid = fork();
+ if (pid < 0) {
+ fprintf(stderr, "fork, errno %d\n", errno);
+ exit(1);
+ }
+ if (pid == 0) {
+ struct rlimit rlim = {0, 0};
+ (void)setrlimit(RLIMIT_CORE, &rlim);
+
+ /* Hide "segfault at ffffffffff600000" messages. */
+ struct sigaction act;
+ memset(&act, 0, sizeof(struct sigaction));
+ act.sa_flags = SA_SIGINFO;
+ act.sa_sigaction = sigaction_SIGSEGV;
+ (void)sigaction(SIGSEGV, &act, NULL);
+
*(volatile int *)0xffffffffff600000UL;
exit(0);
}
waitpid(pid, &wstatus, 0);
if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0) {
- g_vsyscall = true;
+ /* vsyscall page is readable and executable. */
+ g_vsyscall = 1;
+ return;
}
+
+ /* vsyscall page is executable but unreadable. */
+ g_vsyscall = 2;
}
int main(void)
@@ -261,6 +309,19 @@ int main(void)
int exec_fd;
vsyscall();
+ switch (g_vsyscall) {
+ case 0:
+ str_vsyscall = str_vsyscall_0;
+ break;
+ case 1:
+ str_vsyscall = str_vsyscall_1;
+ break;
+ case 2:
+ str_vsyscall = str_vsyscall_2;
+ break;
+ default:
+ abort();
+ }
atexit(ate);
@@ -314,7 +375,7 @@ int main(void)
/* Test /proc/$PID/maps */
{
- const size_t len = strlen(buf0) + (g_vsyscall ? strlen(str_vsyscall) : 0);
+ const size_t len = strlen(buf0) + strlen(str_vsyscall);
char buf[256];
ssize_t rv;
int fd;
@@ -327,7 +388,7 @@ int main(void)
rv = read(fd, buf, sizeof(buf));
assert(rv == len);
assert(memcmp(buf, buf0, strlen(buf0)) == 0);
- if (g_vsyscall) {
+ if (g_vsyscall > 0) {
assert(memcmp(buf + strlen(buf0), str_vsyscall, strlen(str_vsyscall)) == 0);
}
}
@@ -374,7 +435,7 @@ int main(void)
assert(memmem(buf, rv, S[i], strlen(S[i])));
}
- if (g_vsyscall) {
+ if (g_vsyscall > 0) {
assert(memmem(buf, rv, str_vsyscall, strlen(str_vsyscall)));
}
}
diff --git a/tools/testing/selftests/sgx/defines.h b/tools/testing/selftests/sgx/defines.h
index 02d775789ea7..d8587c971941 100644
--- a/tools/testing/selftests/sgx/defines.h
+++ b/tools/testing/selftests/sgx/defines.h
@@ -24,6 +24,9 @@ enum encl_op_type {
ENCL_OP_PUT_TO_ADDRESS,
ENCL_OP_GET_FROM_ADDRESS,
ENCL_OP_NOP,
+ ENCL_OP_EACCEPT,
+ ENCL_OP_EMODPE,
+ ENCL_OP_INIT_TCS_PAGE,
ENCL_OP_MAX,
};
@@ -53,4 +56,24 @@ struct encl_op_get_from_addr {
uint64_t addr;
};
+struct encl_op_eaccept {
+ struct encl_op_header header;
+ uint64_t epc_addr;
+ uint64_t flags;
+ uint64_t ret;
+};
+
+struct encl_op_emodpe {
+ struct encl_op_header header;
+ uint64_t epc_addr;
+ uint64_t flags;
+};
+
+struct encl_op_init_tcs_page {
+ struct encl_op_header header;
+ uint64_t tcs_page;
+ uint64_t ssa;
+ uint64_t entry;
+};
+
#endif /* DEFINES_H */
diff --git a/tools/testing/selftests/sgx/load.c b/tools/testing/selftests/sgx/load.c
index 006b464c8fc9..94bdeac1cf04 100644
--- a/tools/testing/selftests/sgx/load.c
+++ b/tools/testing/selftests/sgx/load.c
@@ -130,6 +130,47 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
return true;
}
+/*
+ * Parse the enclave code's symbol table to locate and return address of
+ * the provided symbol
+ */
+uint64_t encl_get_entry(struct encl *encl, const char *symbol)
+{
+ Elf64_Shdr *sections;
+ Elf64_Sym *symtab;
+ Elf64_Ehdr *ehdr;
+ char *sym_names;
+ int num_sym;
+ int i;
+
+ ehdr = encl->bin;
+ sections = encl->bin + ehdr->e_shoff;
+
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ if (sections[i].sh_type == SHT_SYMTAB) {
+ symtab = (Elf64_Sym *)((char *)encl->bin + sections[i].sh_offset);
+ num_sym = sections[i].sh_size / sections[i].sh_entsize;
+ break;
+ }
+ }
+
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ if (sections[i].sh_type == SHT_STRTAB) {
+ sym_names = (char *)encl->bin + sections[i].sh_offset;
+ break;
+ }
+ }
+
+ for (i = 0; i < num_sym; i++) {
+ Elf64_Sym *sym = &symtab[i];
+
+ if (!strcmp(symbol, sym_names + sym->st_name))
+ return (uint64_t)sym->st_value;
+ }
+
+ return 0;
+}
+
bool encl_load(const char *path, struct encl *encl, unsigned long heap_size)
{
const char device_path[] = "/dev/sgx_enclave";
diff --git a/tools/testing/selftests/sgx/main.c b/tools/testing/selftests/sgx/main.c
index dd74fa42302e..9820b3809c69 100644
--- a/tools/testing/selftests/sgx/main.c
+++ b/tools/testing/selftests/sgx/main.c
@@ -25,6 +25,18 @@ static const uint64_t MAGIC = 0x1122334455667788ULL;
static const uint64_t MAGIC2 = 0x8877665544332211ULL;
vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
+/*
+ * Security Information (SECINFO) data structure needed by a few SGX
+ * instructions (eg. ENCLU[EACCEPT] and ENCLU[EMODPE]) holds meta-data
+ * about an enclave page. &enum sgx_secinfo_page_state specifies the
+ * secinfo flags used for page state.
+ */
+enum sgx_secinfo_page_state {
+ SGX_SECINFO_PENDING = (1 << 3),
+ SGX_SECINFO_MODIFIED = (1 << 4),
+ SGX_SECINFO_PR = (1 << 5),
+};
+
struct vdso_symtab {
Elf64_Sym *elf_symtab;
const char *elf_symstrtab;
@@ -74,6 +86,15 @@ static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
return true;
}
+static inline int sgx2_supported(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ __cpuid_count(SGX_CPUID, 0x0, eax, ebx, ecx, edx);
+
+ return eax & 0x2;
+}
+
static unsigned long elf_sym_hash(const char *name)
{
unsigned long h = 0, high;
@@ -110,6 +131,24 @@ static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
}
/*
+ * Return the offset in the enclave where the TCS segment can be found.
+ * The first RW segment loaded is the TCS.
+ */
+static off_t encl_get_tcs_offset(struct encl *encl)
+{
+ int i;
+
+ for (i = 0; i < encl->nr_segments; i++) {
+ struct encl_segment *seg = &encl->segment_tbl[i];
+
+ if (i == 0 && seg->prot == (PROT_READ | PROT_WRITE))
+ return seg->offset;
+ }
+
+ return -1;
+}
+
+/*
* Return the offset in the enclave where the data segment can be found.
* The first RW segment loaded is the TCS, skip that to get info on the
* data segment.
@@ -339,7 +378,127 @@ TEST_F(enclave, unclobbered_vdso_oversubscribed)
EXPECT_EQ(get_op.value, MAGIC);
EXPECT_EEXIT(&self->run);
EXPECT_EQ(self->run.user_data, 0);
+}
+
+TEST_F_TIMEOUT(enclave, unclobbered_vdso_oversubscribed_remove, 900)
+{
+ struct sgx_enclave_remove_pages remove_ioc;
+ struct sgx_enclave_modify_types modt_ioc;
+ struct encl_op_get_from_buf get_op;
+ struct encl_op_eaccept eaccept_op;
+ struct encl_op_put_to_buf put_op;
+ struct encl_segment *heap;
+ unsigned long total_mem;
+ int ret, errno_save;
+ unsigned long addr;
+ unsigned long i;
+
+ /*
+ * Create enclave with additional heap that is as big as all
+ * available physical SGX memory.
+ */
+ total_mem = get_total_epc_mem();
+ ASSERT_NE(total_mem, 0);
+ TH_LOG("Creating an enclave with %lu bytes heap may take a while ...",
+ total_mem);
+ ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
+
+ /*
+ * Hardware (SGX2) and kernel support is needed for this test. Start
+ * with check that test has a chance of succeeding.
+ */
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
+
+ if (ret == -1) {
+ if (errno == ENOTTY)
+ SKIP(return,
+ "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
+ else if (errno == ENODEV)
+ SKIP(return, "System does not support SGX2");
+ }
+
+ /*
+ * Invalid parameters were provided during sanity check,
+ * expect command to fail.
+ */
+ EXPECT_EQ(ret, -1);
+
+ /* SGX2 is supported by kernel and hardware, test can proceed. */
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ heap = &self->encl.segment_tbl[self->encl.nr_segments - 1];
+
+ put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
+ put_op.value = MAGIC;
+
+ EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.user_data, 0);
+
+ get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
+ get_op.value = 0;
+
+ EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
+
+ EXPECT_EQ(get_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.user_data, 0);
+
+ /* Trim entire heap. */
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+
+ modt_ioc.offset = heap->offset;
+ modt_ioc.length = heap->size;
+ modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
+
+ TH_LOG("Changing type of %zd bytes to trimmed may take a while ...",
+ heap->size);
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(modt_ioc.result, 0);
+ EXPECT_EQ(modt_ioc.count, heap->size);
+
+ /* EACCEPT all removed pages. */
+ addr = self->encl.encl_base + heap->offset;
+
+ eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ TH_LOG("Entering enclave to run EACCEPT for each page of %zd bytes may take a while ...",
+ heap->size);
+ for (i = 0; i < heap->size; i += 4096) {
+ eaccept_op.epc_addr = addr + i;
+ eaccept_op.ret = 0;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ ASSERT_EQ(eaccept_op.ret, 0);
+ ASSERT_EQ(self->run.function, EEXIT);
+ }
+
+ /* Complete page removal. */
+ memset(&remove_ioc, 0, sizeof(remove_ioc));
+
+ remove_ioc.offset = heap->offset;
+ remove_ioc.length = heap->size;
+
+ TH_LOG("Removing %zd bytes from enclave may take a while ...",
+ heap->size);
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(remove_ioc.count, heap->size);
}
TEST_F(enclave, clobbered_vdso)
@@ -555,4 +714,1280 @@ TEST_F(enclave, pte_permissions)
EXPECT_EQ(self->run.exception_addr, 0);
}
+/*
+ * Modifying permissions of TCS page should not be possible.
+ */
+TEST_F(enclave, tcs_permissions)
+{
+ struct sgx_enclave_restrict_permissions ioc;
+ int ret, errno_save;
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ /*
+ * Ensure kernel supports needed ioctl() and system supports needed
+ * commands.
+ */
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ /*
+ * Invalid parameters were provided during sanity check,
+ * expect command to fail.
+ */
+ ASSERT_EQ(ret, -1);
+
+ /* ret == -1 */
+ if (errno_save == ENOTTY)
+ SKIP(return,
+ "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
+ else if (errno_save == ENODEV)
+ SKIP(return, "System does not support SGX2");
+
+ /*
+ * Attempt to make TCS page read-only. This is not allowed and
+ * should be prevented by the kernel.
+ */
+ ioc.offset = encl_get_tcs_offset(&self->encl);
+ ioc.length = PAGE_SIZE;
+ ioc.permissions = SGX_SECINFO_R;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, -1);
+ EXPECT_EQ(errno_save, EINVAL);
+ EXPECT_EQ(ioc.result, 0);
+ EXPECT_EQ(ioc.count, 0);
+}
+
+/*
+ * Enclave page permission test.
+ *
+ * Modify and restore enclave page's EPCM (enclave) permissions from
+ * outside enclave (ENCLS[EMODPR] via kernel) as well as from within
+ * enclave (via ENCLU[EMODPE]). Check for page fault if
+ * VMA allows access but EPCM permissions do not.
+ */
+TEST_F(enclave, epcm_permissions)
+{
+ struct sgx_enclave_restrict_permissions restrict_ioc;
+ struct encl_op_get_from_addr get_addr_op;
+ struct encl_op_put_to_addr put_addr_op;
+ struct encl_op_eaccept eaccept_op;
+ struct encl_op_emodpe emodpe_op;
+ unsigned long data_start;
+ int ret, errno_save;
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ /*
+ * Ensure kernel supports needed ioctl() and system supports needed
+ * commands.
+ */
+ memset(&restrict_ioc, 0, sizeof(restrict_ioc));
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
+ &restrict_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ /*
+ * Invalid parameters were provided during sanity check,
+ * expect command to fail.
+ */
+ ASSERT_EQ(ret, -1);
+
+ /* ret == -1 */
+ if (errno_save == ENOTTY)
+ SKIP(return,
+ "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
+ else if (errno_save == ENODEV)
+ SKIP(return, "System does not support SGX2");
+
+ /*
+ * Page that will have its permissions changed is the second data
+ * page in the .data segment. This forms part of the local encl_buffer
+ * within the enclave.
+ *
+ * At start of test @data_start should have EPCM as well as PTE and
+ * VMA permissions of RW.
+ */
+
+ data_start = self->encl.encl_base +
+ encl_get_data_offset(&self->encl) + PAGE_SIZE;
+
+ /*
+ * Sanity check that page at @data_start is writable before making
+ * any changes to page permissions.
+ *
+ * Start by writing MAGIC to test page.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = data_start;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory that was just written to, confirming that
+ * page is writable.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = data_start;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Change EPCM permissions to read-only. Kernel still considers
+ * the page writable.
+ */
+ memset(&restrict_ioc, 0, sizeof(restrict_ioc));
+
+ restrict_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
+ restrict_ioc.length = PAGE_SIZE;
+ restrict_ioc.permissions = SGX_SECINFO_R;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
+ &restrict_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(restrict_ioc.result, 0);
+ EXPECT_EQ(restrict_ioc.count, 4096);
+
+ /*
+ * EPCM permissions changed from kernel, need to EACCEPT from enclave.
+ */
+ eaccept_op.epc_addr = data_start;
+ eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_REG | SGX_SECINFO_PR;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ /*
+ * EPCM permissions of page is now read-only, expect #PF
+ * on EPCM when attempting to write to page from within enclave.
+ */
+ put_addr_op.value = MAGIC2;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(self->run.function, ERESUME);
+ EXPECT_EQ(self->run.exception_vector, 14);
+ EXPECT_EQ(self->run.exception_error_code, 0x8007);
+ EXPECT_EQ(self->run.exception_addr, data_start);
+
+ self->run.exception_vector = 0;
+ self->run.exception_error_code = 0;
+ self->run.exception_addr = 0;
+
+ /*
+ * Received AEX but cannot return to enclave at same entrypoint,
+ * need different TCS from where EPCM permission can be made writable
+ * again.
+ */
+ self->run.tcs = self->encl.encl_base + PAGE_SIZE;
+
+ /*
+ * Enter enclave at new TCS to change EPCM permissions to be
+ * writable again and thus fix the page fault that triggered the
+ * AEX.
+ */
+
+ emodpe_op.epc_addr = data_start;
+ emodpe_op.flags = SGX_SECINFO_R | SGX_SECINFO_W;
+ emodpe_op.header.type = ENCL_OP_EMODPE;
+
+ EXPECT_EQ(ENCL_CALL(&emodpe_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Attempt to return to main TCS to resume execution at faulting
+ * instruction, PTE should continue to allow writing to the page.
+ */
+ self->run.tcs = self->encl.encl_base;
+
+ /*
+ * Wrong page permissions that caused original fault has
+ * now been fixed via EPCM permissions.
+ * Resume execution in main TCS to re-attempt the memory access.
+ */
+ self->run.tcs = self->encl.encl_base;
+
+ EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
+ ERESUME, 0, 0,
+ &self->run),
+ 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ get_addr_op.value = 0;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC2);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.user_data, 0);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+}
+
+/*
+ * Test the addition of pages to an initialized enclave via writing to
+ * a page belonging to the enclave's address space but was not added
+ * during enclave creation.
+ */
+TEST_F(enclave, augment)
+{
+ struct encl_op_get_from_addr get_addr_op;
+ struct encl_op_put_to_addr put_addr_op;
+ struct encl_op_eaccept eaccept_op;
+ size_t total_size = 0;
+ void *addr;
+ int i;
+
+ if (!sgx2_supported())
+ SKIP(return, "SGX2 not supported");
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ for (i = 0; i < self->encl.nr_segments; i++) {
+ struct encl_segment *seg = &self->encl.segment_tbl[i];
+
+ total_size += seg->size;
+ }
+
+ /*
+ * Actual enclave size is expected to be larger than the loaded
+ * test enclave since enclave size must be a power of 2 in bytes
+ * and test_encl does not consume it all.
+ */
+ EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
+
+ /*
+ * Create memory mapping for the page that will be added. New
+ * memory mapping is for one page right after all existing
+ * mappings.
+ * Kernel will allow new mapping using any permissions if it
+ * falls into the enclave's address range but not backed
+ * by existing enclave pages.
+ */
+ addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_SHARED | MAP_FIXED, self->encl.fd, 0);
+ EXPECT_NE(addr, MAP_FAILED);
+
+ self->run.exception_vector = 0;
+ self->run.exception_error_code = 0;
+ self->run.exception_addr = 0;
+
+ /*
+ * Attempt to write to the new page from within enclave.
+ * Expected to fail since page is not (yet) part of the enclave.
+ * The first #PF will trigger the addition of the page to the
+ * enclave, but since the new page needs an EACCEPT from within the
+ * enclave before it can be used it would not be possible
+ * to successfully return to the failing instruction. This is the
+ * cause of the second #PF captured here having the SGX bit set,
+ * it is from hardware preventing the page from being used.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = (unsigned long)addr;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(self->run.function, ERESUME);
+ EXPECT_EQ(self->run.exception_vector, 14);
+ EXPECT_EQ(self->run.exception_addr, (unsigned long)addr);
+
+ if (self->run.exception_error_code == 0x6) {
+ munmap(addr, PAGE_SIZE);
+ SKIP(return, "Kernel does not support adding pages to initialized enclave");
+ }
+
+ EXPECT_EQ(self->run.exception_error_code, 0x8007);
+
+ self->run.exception_vector = 0;
+ self->run.exception_error_code = 0;
+ self->run.exception_addr = 0;
+
+ /* Handle AEX by running EACCEPT from new entry point. */
+ self->run.tcs = self->encl.encl_base + PAGE_SIZE;
+
+ eaccept_op.epc_addr = self->encl.encl_base + total_size;
+ eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ /* Can now return to main TCS to resume execution. */
+ self->run.tcs = self->encl.encl_base;
+
+ EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
+ ERESUME, 0, 0,
+ &self->run),
+ 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory from newly added page that was just written to,
+ * confirming that data previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = (unsigned long)addr;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ munmap(addr, PAGE_SIZE);
+}
+
+/*
+ * Test for the addition of pages to an initialized enclave via a
+ * pre-emptive run of EACCEPT on page to be added.
+ */
+TEST_F(enclave, augment_via_eaccept)
+{
+ struct encl_op_get_from_addr get_addr_op;
+ struct encl_op_put_to_addr put_addr_op;
+ struct encl_op_eaccept eaccept_op;
+ size_t total_size = 0;
+ void *addr;
+ int i;
+
+ if (!sgx2_supported())
+ SKIP(return, "SGX2 not supported");
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ for (i = 0; i < self->encl.nr_segments; i++) {
+ struct encl_segment *seg = &self->encl.segment_tbl[i];
+
+ total_size += seg->size;
+ }
+
+ /*
+ * Actual enclave size is expected to be larger than the loaded
+ * test enclave since enclave size must be a power of 2 in bytes while
+ * test_encl does not consume it all.
+ */
+ EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
+
+ /*
+ * mmap() a page at end of existing enclave to be used for dynamic
+ * EPC page.
+ *
+ * Kernel will allow new mapping using any permissions if it
+ * falls into the enclave's address range but not backed
+ * by existing enclave pages.
+ */
+
+ addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED,
+ self->encl.fd, 0);
+ EXPECT_NE(addr, MAP_FAILED);
+
+ self->run.exception_vector = 0;
+ self->run.exception_error_code = 0;
+ self->run.exception_addr = 0;
+
+ /*
+ * Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again
+ * without a #PF). All should be transparent to userspace.
+ */
+ eaccept_op.epc_addr = self->encl.encl_base + total_size;
+ eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ if (self->run.exception_vector == 14 &&
+ self->run.exception_error_code == 4 &&
+ self->run.exception_addr == self->encl.encl_base + total_size) {
+ munmap(addr, PAGE_SIZE);
+ SKIP(return, "Kernel does not support adding pages to initialized enclave");
+ }
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ /*
+ * New page should be accessible from within enclave - attempt to
+ * write to it.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = (unsigned long)addr;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory from newly added page that was just written to,
+ * confirming that data previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = (unsigned long)addr;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ munmap(addr, PAGE_SIZE);
+}
+
+/*
+ * SGX2 page type modification test in two phases:
+ * Phase 1:
+ * Create a new TCS, consisting out of three new pages (stack page with regular
+ * page type, SSA page with regular page type, and TCS page with TCS page
+ * type) in an initialized enclave and run a simple workload within it.
+ * Phase 2:
+ * Remove the three pages added in phase 1, add a new regular page at the
+ * same address that previously hosted the TCS page and verify that it can
+ * be modified.
+ */
+TEST_F(enclave, tcs_create)
+{
+ struct encl_op_init_tcs_page init_tcs_page_op;
+ struct sgx_enclave_remove_pages remove_ioc;
+ struct encl_op_get_from_addr get_addr_op;
+ struct sgx_enclave_modify_types modt_ioc;
+ struct encl_op_put_to_addr put_addr_op;
+ struct encl_op_get_from_buf get_buf_op;
+ struct encl_op_put_to_buf put_buf_op;
+ void *addr, *tcs, *stack_end, *ssa;
+ struct encl_op_eaccept eaccept_op;
+ size_t total_size = 0;
+ uint64_t val_64;
+ int errno_save;
+ int ret, i;
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl,
+ _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ /*
+ * Hardware (SGX2) and kernel support is needed for this test. Start
+ * with check that test has a chance of succeeding.
+ */
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
+
+ if (ret == -1) {
+ if (errno == ENOTTY)
+ SKIP(return,
+ "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
+ else if (errno == ENODEV)
+ SKIP(return, "System does not support SGX2");
+ }
+
+ /*
+ * Invalid parameters were provided during sanity check,
+ * expect command to fail.
+ */
+ EXPECT_EQ(ret, -1);
+
+ /*
+ * Add three regular pages via EAUG: one will be the TCS stack, one
+ * will be the TCS SSA, and one will be the new TCS. The stack and
+ * SSA will remain as regular pages, the TCS page will need its
+ * type changed after populated with needed data.
+ */
+ for (i = 0; i < self->encl.nr_segments; i++) {
+ struct encl_segment *seg = &self->encl.segment_tbl[i];
+
+ total_size += seg->size;
+ }
+
+ /*
+ * Actual enclave size is expected to be larger than the loaded
+ * test enclave since enclave size must be a power of 2 in bytes while
+ * test_encl does not consume it all.
+ */
+ EXPECT_LT(total_size + 3 * PAGE_SIZE, self->encl.encl_size);
+
+ /*
+ * mmap() three pages at end of existing enclave to be used for the
+ * three new pages.
+ */
+ addr = mmap((void *)self->encl.encl_base + total_size, 3 * PAGE_SIZE,
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
+ self->encl.fd, 0);
+ EXPECT_NE(addr, MAP_FAILED);
+
+ self->run.exception_vector = 0;
+ self->run.exception_error_code = 0;
+ self->run.exception_addr = 0;
+
+ stack_end = (void *)self->encl.encl_base + total_size;
+ tcs = (void *)self->encl.encl_base + total_size + PAGE_SIZE;
+ ssa = (void *)self->encl.encl_base + total_size + 2 * PAGE_SIZE;
+
+ /*
+ * Run EACCEPT on each new page to trigger the
+ * EACCEPT->(#PF)->EAUG->EACCEPT(again without a #PF) flow.
+ */
+
+ eaccept_op.epc_addr = (unsigned long)stack_end;
+ eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ if (self->run.exception_vector == 14 &&
+ self->run.exception_error_code == 4 &&
+ self->run.exception_addr == (unsigned long)stack_end) {
+ munmap(addr, 3 * PAGE_SIZE);
+ SKIP(return, "Kernel does not support adding pages to initialized enclave");
+ }
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ eaccept_op.epc_addr = (unsigned long)ssa;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ eaccept_op.epc_addr = (unsigned long)tcs;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ /*
+ * Three new pages added to enclave. Now populate the TCS page with
+ * needed data. This should be done from within enclave. Provide
+ * the function that will do the actual data population with needed
+ * data.
+ */
+
+ /*
+ * New TCS will use the "encl_dyn_entry" entrypoint that expects
+ * stack to begin in page before TCS page.
+ */
+ val_64 = encl_get_entry(&self->encl, "encl_dyn_entry");
+ EXPECT_NE(val_64, 0);
+
+ init_tcs_page_op.tcs_page = (unsigned long)tcs;
+ init_tcs_page_op.ssa = (unsigned long)total_size + 2 * PAGE_SIZE;
+ init_tcs_page_op.entry = val_64;
+ init_tcs_page_op.header.type = ENCL_OP_INIT_TCS_PAGE;
+
+ EXPECT_EQ(ENCL_CALL(&init_tcs_page_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /* Change TCS page type to TCS. */
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+
+ modt_ioc.offset = total_size + PAGE_SIZE;
+ modt_ioc.length = PAGE_SIZE;
+ modt_ioc.page_type = SGX_PAGE_TYPE_TCS;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(modt_ioc.result, 0);
+ EXPECT_EQ(modt_ioc.count, 4096);
+
+ /* EACCEPT new TCS page from enclave. */
+ eaccept_op.epc_addr = (unsigned long)tcs;
+ eaccept_op.flags = SGX_SECINFO_TCS | SGX_SECINFO_MODIFIED;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ /* Run workload from new TCS. */
+ self->run.tcs = (unsigned long)tcs;
+
+ /*
+ * Simple workload to write to data buffer and read value back.
+ */
+ put_buf_op.header.type = ENCL_OP_PUT_TO_BUFFER;
+ put_buf_op.value = MAGIC;
+
+ EXPECT_EQ(ENCL_CALL(&put_buf_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ get_buf_op.header.type = ENCL_OP_GET_FROM_BUFFER;
+ get_buf_op.value = 0;
+
+ EXPECT_EQ(ENCL_CALL(&get_buf_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_buf_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Phase 2 of test:
+ * Remove pages associated with new TCS, create a regular page
+ * where TCS page used to be and verify it can be used as a regular
+ * page.
+ */
+
+ /* Start page removal by requesting change of page type to PT_TRIM. */
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+
+ modt_ioc.offset = total_size;
+ modt_ioc.length = 3 * PAGE_SIZE;
+ modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(modt_ioc.result, 0);
+ EXPECT_EQ(modt_ioc.count, 3 * PAGE_SIZE);
+
+ /*
+ * Enter enclave via TCS #1 and approve page removal by sending
+ * EACCEPT for each of three removed pages.
+ */
+ self->run.tcs = self->encl.encl_base;
+
+ eaccept_op.epc_addr = (unsigned long)stack_end;
+ eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ eaccept_op.epc_addr = (unsigned long)tcs;
+ eaccept_op.ret = 0;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ eaccept_op.epc_addr = (unsigned long)ssa;
+ eaccept_op.ret = 0;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ /* Send final ioctl() to complete page removal. */
+ memset(&remove_ioc, 0, sizeof(remove_ioc));
+
+ remove_ioc.offset = total_size;
+ remove_ioc.length = 3 * PAGE_SIZE;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(remove_ioc.count, 3 * PAGE_SIZE);
+
+ /*
+ * Enter enclave via TCS #1 and access location where TCS #3 was to
+ * trigger dynamic add of regular page at that location.
+ */
+ eaccept_op.epc_addr = (unsigned long)tcs;
+ eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ /*
+ * New page should be accessible from within enclave - write to it.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = (unsigned long)tcs;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory from newly added page that was just written to,
+ * confirming that data previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = (unsigned long)tcs;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ munmap(addr, 3 * PAGE_SIZE);
+}
+
+/*
+ * Ensure sane behavior if user requests page removal, does not run
+ * EACCEPT from within enclave but still attempts to finalize page removal
+ * with the SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl(). The latter should fail
+ * because the removal was not EACCEPTed from within the enclave.
+ */
+TEST_F(enclave, remove_added_page_no_eaccept)
+{
+ struct sgx_enclave_remove_pages remove_ioc;
+ struct encl_op_get_from_addr get_addr_op;
+ struct sgx_enclave_modify_types modt_ioc;
+ struct encl_op_put_to_addr put_addr_op;
+ unsigned long data_start;
+ int ret, errno_save;
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ /*
+ * Hardware (SGX2) and kernel support is needed for this test. Start
+ * with check that test has a chance of succeeding.
+ */
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
+
+ if (ret == -1) {
+ if (errno == ENOTTY)
+ SKIP(return,
+ "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
+ else if (errno == ENODEV)
+ SKIP(return, "System does not support SGX2");
+ }
+
+ /*
+ * Invalid parameters were provided during sanity check,
+ * expect command to fail.
+ */
+ EXPECT_EQ(ret, -1);
+
+ /*
+ * Page that will be removed is the second data page in the .data
+ * segment. This forms part of the local encl_buffer within the
+ * enclave.
+ */
+ data_start = self->encl.encl_base +
+ encl_get_data_offset(&self->encl) + PAGE_SIZE;
+
+ /*
+ * Sanity check that page at @data_start is writable before
+ * removing it.
+ *
+ * Start by writing MAGIC to test page.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = data_start;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory that was just written to, confirming that data
+ * previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = data_start;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /* Start page removal by requesting change of page type to PT_TRIM */
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+
+ modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
+ modt_ioc.length = PAGE_SIZE;
+ modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(modt_ioc.result, 0);
+ EXPECT_EQ(modt_ioc.count, 4096);
+
+ /* Skip EACCEPT */
+
+ /* Send final ioctl() to complete page removal */
+ memset(&remove_ioc, 0, sizeof(remove_ioc));
+
+ remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
+ remove_ioc.length = PAGE_SIZE;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ /* Operation not permitted since EACCEPT was omitted. */
+ EXPECT_EQ(ret, -1);
+ EXPECT_EQ(errno_save, EPERM);
+ EXPECT_EQ(remove_ioc.count, 0);
+}
+
+/*
+ * Request enclave page removal but instead of correctly following with
+ * EACCEPT a read attempt to page is made from within the enclave.
+ */
+TEST_F(enclave, remove_added_page_invalid_access)
+{
+ struct encl_op_get_from_addr get_addr_op;
+ struct encl_op_put_to_addr put_addr_op;
+ struct sgx_enclave_modify_types ioc;
+ unsigned long data_start;
+ int ret, errno_save;
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ /*
+ * Hardware (SGX2) and kernel support is needed for this test. Start
+ * with check that test has a chance of succeeding.
+ */
+ memset(&ioc, 0, sizeof(ioc));
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
+
+ if (ret == -1) {
+ if (errno == ENOTTY)
+ SKIP(return,
+ "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
+ else if (errno == ENODEV)
+ SKIP(return, "System does not support SGX2");
+ }
+
+ /*
+ * Invalid parameters were provided during sanity check,
+ * expect command to fail.
+ */
+ EXPECT_EQ(ret, -1);
+
+ /*
+ * Page that will be removed is the second data page in the .data
+ * segment. This forms part of the local encl_buffer within the
+ * enclave.
+ */
+ data_start = self->encl.encl_base +
+ encl_get_data_offset(&self->encl) + PAGE_SIZE;
+
+ /*
+ * Sanity check that page at @data_start is writable before
+ * removing it.
+ *
+ * Start by writing MAGIC to test page.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = data_start;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory that was just written to, confirming that data
+ * previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = data_start;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /* Start page removal by requesting change of page type to PT_TRIM. */
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
+ ioc.length = PAGE_SIZE;
+ ioc.page_type = SGX_PAGE_TYPE_TRIM;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(ioc.result, 0);
+ EXPECT_EQ(ioc.count, 4096);
+
+ /*
+ * Read from page that was just removed.
+ */
+ get_addr_op.value = 0;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ /*
+ * From kernel perspective the page is present but according to SGX the
+ * page should not be accessible so a #PF with SGX bit set is
+ * expected.
+ */
+
+ EXPECT_EQ(self->run.function, ERESUME);
+ EXPECT_EQ(self->run.exception_vector, 14);
+ EXPECT_EQ(self->run.exception_error_code, 0x8005);
+ EXPECT_EQ(self->run.exception_addr, data_start);
+}
+
+/*
+ * Request enclave page removal and correctly follow with
+ * EACCEPT but do not follow with removal ioctl() but instead a read attempt
+ * to removed page is made from within the enclave.
+ */
+TEST_F(enclave, remove_added_page_invalid_access_after_eaccept)
+{
+ struct encl_op_get_from_addr get_addr_op;
+ struct encl_op_put_to_addr put_addr_op;
+ struct sgx_enclave_modify_types ioc;
+ struct encl_op_eaccept eaccept_op;
+ unsigned long data_start;
+ int ret, errno_save;
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ /*
+ * Hardware (SGX2) and kernel support is needed for this test. Start
+ * with check that test has a chance of succeeding.
+ */
+ memset(&ioc, 0, sizeof(ioc));
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
+
+ if (ret == -1) {
+ if (errno == ENOTTY)
+ SKIP(return,
+ "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
+ else if (errno == ENODEV)
+ SKIP(return, "System does not support SGX2");
+ }
+
+ /*
+ * Invalid parameters were provided during sanity check,
+ * expect command to fail.
+ */
+ EXPECT_EQ(ret, -1);
+
+ /*
+ * Page that will be removed is the second data page in the .data
+ * segment. This forms part of the local encl_buffer within the
+ * enclave.
+ */
+ data_start = self->encl.encl_base +
+ encl_get_data_offset(&self->encl) + PAGE_SIZE;
+
+ /*
+ * Sanity check that page at @data_start is writable before
+ * removing it.
+ *
+ * Start by writing MAGIC to test page.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = data_start;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory that was just written to, confirming that data
+ * previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = data_start;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /* Start page removal by requesting change of page type to PT_TRIM. */
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
+ ioc.length = PAGE_SIZE;
+ ioc.page_type = SGX_PAGE_TYPE_TRIM;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(ioc.result, 0);
+ EXPECT_EQ(ioc.count, 4096);
+
+ eaccept_op.epc_addr = (unsigned long)data_start;
+ eaccept_op.ret = 0;
+ eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ /* Skip ioctl() to remove page. */
+
+ /*
+ * Read from page that was just removed.
+ */
+ get_addr_op.value = 0;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ /*
+ * From kernel perspective the page is present but according to SGX the
+ * page should not be accessible so a #PF with SGX bit set is
+ * expected.
+ */
+
+ EXPECT_EQ(self->run.function, ERESUME);
+ EXPECT_EQ(self->run.exception_vector, 14);
+ EXPECT_EQ(self->run.exception_error_code, 0x8005);
+ EXPECT_EQ(self->run.exception_addr, data_start);
+}
+
+TEST_F(enclave, remove_untouched_page)
+{
+ struct sgx_enclave_remove_pages remove_ioc;
+ struct sgx_enclave_modify_types modt_ioc;
+ struct encl_op_eaccept eaccept_op;
+ unsigned long data_start;
+ int ret, errno_save;
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ /*
+ * Hardware (SGX2) and kernel support is needed for this test. Start
+ * with check that test has a chance of succeeding.
+ */
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
+
+ if (ret == -1) {
+ if (errno == ENOTTY)
+ SKIP(return,
+ "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
+ else if (errno == ENODEV)
+ SKIP(return, "System does not support SGX2");
+ }
+
+ /*
+ * Invalid parameters were provided during sanity check,
+ * expect command to fail.
+ */
+ EXPECT_EQ(ret, -1);
+
+ /* SGX2 is supported by kernel and hardware, test can proceed. */
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ data_start = self->encl.encl_base +
+ encl_get_data_offset(&self->encl) + PAGE_SIZE;
+
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+
+ modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
+ modt_ioc.length = PAGE_SIZE;
+ modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(modt_ioc.result, 0);
+ EXPECT_EQ(modt_ioc.count, 4096);
+
+ /*
+ * Enter enclave via TCS #1 and approve page removal by sending
+ * EACCEPT for removed page.
+ */
+
+ eaccept_op.epc_addr = data_start;
+ eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ memset(&remove_ioc, 0, sizeof(remove_ioc));
+
+ remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
+ remove_ioc.length = PAGE_SIZE;
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(remove_ioc.count, 4096);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/sgx/main.h b/tools/testing/selftests/sgx/main.h
index b45c52ec7ab3..fc585be97e2f 100644
--- a/tools/testing/selftests/sgx/main.h
+++ b/tools/testing/selftests/sgx/main.h
@@ -38,6 +38,7 @@ void encl_delete(struct encl *ctx);
bool encl_load(const char *path, struct encl *encl, unsigned long heap_size);
bool encl_measure(struct encl *encl);
bool encl_build(struct encl *encl);
+uint64_t encl_get_entry(struct encl *encl, const char *symbol);
int sgx_enter_enclave(void *rdi, void *rsi, long rdx, u32 function, void *r8, void *r9,
struct sgx_enclave_run *run);
diff --git a/tools/testing/selftests/sgx/sigstruct.c b/tools/testing/selftests/sgx/sigstruct.c
index 50c5ab1aa6fa..a07896a46364 100644
--- a/tools/testing/selftests/sgx/sigstruct.c
+++ b/tools/testing/selftests/sgx/sigstruct.c
@@ -17,6 +17,12 @@
#include "defines.h"
#include "main.h"
+/*
+ * FIXME: OpenSSL 3.0 has deprecated some functions. For now just ignore
+ * the warnings.
+ */
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
struct q1q2_ctx {
BN_CTX *bn_ctx;
BIGNUM *m;
diff --git a/tools/testing/selftests/sgx/test_encl.c b/tools/testing/selftests/sgx/test_encl.c
index 4fca01cfd898..c0d6397295e3 100644
--- a/tools/testing/selftests/sgx/test_encl.c
+++ b/tools/testing/selftests/sgx/test_encl.c
@@ -11,6 +11,42 @@
*/
static uint8_t encl_buffer[8192] = { 1 };
+enum sgx_enclu_function {
+ EACCEPT = 0x5,
+ EMODPE = 0x6,
+};
+
+static void do_encl_emodpe(void *_op)
+{
+ struct sgx_secinfo secinfo __aligned(sizeof(struct sgx_secinfo)) = {0};
+ struct encl_op_emodpe *op = _op;
+
+ secinfo.flags = op->flags;
+
+ asm volatile(".byte 0x0f, 0x01, 0xd7"
+ :
+ : "a" (EMODPE),
+ "b" (&secinfo),
+ "c" (op->epc_addr));
+}
+
+static void do_encl_eaccept(void *_op)
+{
+ struct sgx_secinfo secinfo __aligned(sizeof(struct sgx_secinfo)) = {0};
+ struct encl_op_eaccept *op = _op;
+ int rax;
+
+ secinfo.flags = op->flags;
+
+ asm volatile(".byte 0x0f, 0x01, 0xd7"
+ : "=a" (rax)
+ : "a" (EACCEPT),
+ "b" (&secinfo),
+ "c" (op->epc_addr));
+
+ op->ret = rax;
+}
+
static void *memcpy(void *dest, const void *src, size_t n)
{
size_t i;
@@ -21,6 +57,35 @@ static void *memcpy(void *dest, const void *src, size_t n)
return dest;
}
+static void *memset(void *dest, int c, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ ((char *)dest)[i] = c;
+
+ return dest;
+}
+
+static void do_encl_init_tcs_page(void *_op)
+{
+ struct encl_op_init_tcs_page *op = _op;
+ void *tcs = (void *)op->tcs_page;
+ uint32_t val_32;
+
+ memset(tcs, 0, 16); /* STATE and FLAGS */
+ memcpy(tcs + 16, &op->ssa, 8); /* OSSA */
+ memset(tcs + 24, 0, 4); /* CSSA */
+ val_32 = 1;
+ memcpy(tcs + 28, &val_32, 4); /* NSSA */
+ memcpy(tcs + 32, &op->entry, 8); /* OENTRY */
+ memset(tcs + 40, 0, 24); /* AEP, OFSBASE, OGSBASE */
+ val_32 = 0xFFFFFFFF;
+ memcpy(tcs + 64, &val_32, 4); /* FSLIMIT */
+ memcpy(tcs + 68, &val_32, 4); /* GSLIMIT */
+ memset(tcs + 72, 0, 4024); /* Reserved */
+}
+
static void do_encl_op_put_to_buf(void *op)
{
struct encl_op_put_to_buf *op2 = op;
@@ -62,6 +127,9 @@ void encl_body(void *rdi, void *rsi)
do_encl_op_put_to_addr,
do_encl_op_get_from_addr,
do_encl_op_nop,
+ do_encl_eaccept,
+ do_encl_emodpe,
+ do_encl_init_tcs_page,
};
struct encl_op_header *op = (struct encl_op_header *)rdi;
diff --git a/tools/testing/selftests/sgx/test_encl_bootstrap.S b/tools/testing/selftests/sgx/test_encl_bootstrap.S
index 82fb0dfcbd23..03ae0f57e29d 100644
--- a/tools/testing/selftests/sgx/test_encl_bootstrap.S
+++ b/tools/testing/selftests/sgx/test_encl_bootstrap.S
@@ -45,6 +45,12 @@ encl_entry:
# TCS #2. By adding the value of encl_stack to it, we get
# the absolute address for the stack.
lea (encl_stack)(%rbx), %rax
+ jmp encl_entry_core
+encl_dyn_entry:
+ # Entry point for dynamically created TCS page expected to follow
+ # its stack directly.
+ lea -1(%rbx), %rax
+encl_entry_core:
xchg %rsp, %rax
push %rax
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 108587cb327a..d9fa6a9ea584 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -93,6 +93,7 @@ TEST_PROGS := run_vmtests.sh
TEST_FILES := test_vmalloc.sh
TEST_FILES += test_hmm.sh
+TEST_FILES += va_128TBswitch.sh
include ../lib.mk
diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c
index 203323967b50..529f53b40296 100644
--- a/tools/testing/selftests/vm/hmm-tests.c
+++ b/tools/testing/selftests/vm/hmm-tests.c
@@ -36,6 +36,7 @@
* in the usual include/uapi/... directory.
*/
#include "../../../../lib/test_hmm_uapi.h"
+#include "../../../../mm/gup_test.h"
struct hmm_buffer {
void *ptr;
@@ -46,12 +47,22 @@ struct hmm_buffer {
uint64_t faults;
};
+enum {
+ HMM_PRIVATE_DEVICE_ONE,
+ HMM_PRIVATE_DEVICE_TWO,
+ HMM_COHERENCE_DEVICE_ONE,
+ HMM_COHERENCE_DEVICE_TWO,
+};
+
#define TWOMEG (1 << 21)
#define HMM_BUFFER_SIZE (1024 << 12)
#define HMM_PATH_MAX 64
#define NTIMES 10
#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
+/* Just the flags we need, copied from mm.h: */
+#define FOLL_WRITE 0x01 /* check pte is writable */
+#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite */
FIXTURE(hmm)
{
@@ -60,6 +71,21 @@ FIXTURE(hmm)
unsigned int page_shift;
};
+FIXTURE_VARIANT(hmm)
+{
+ int device_number;
+};
+
+FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
+{
+ .device_number = HMM_PRIVATE_DEVICE_ONE,
+};
+
+FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
+{
+ .device_number = HMM_COHERENCE_DEVICE_ONE,
+};
+
FIXTURE(hmm2)
{
int fd0;
@@ -68,6 +94,24 @@ FIXTURE(hmm2)
unsigned int page_shift;
};
+FIXTURE_VARIANT(hmm2)
+{
+ int device_number0;
+ int device_number1;
+};
+
+FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
+{
+ .device_number0 = HMM_PRIVATE_DEVICE_ONE,
+ .device_number1 = HMM_PRIVATE_DEVICE_TWO,
+};
+
+FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
+{
+ .device_number0 = HMM_COHERENCE_DEVICE_ONE,
+ .device_number1 = HMM_COHERENCE_DEVICE_TWO,
+};
+
static int hmm_open(int unit)
{
char pathname[HMM_PATH_MAX];
@@ -81,12 +125,19 @@ static int hmm_open(int unit)
return fd;
}
+static bool hmm_is_coherent_type(int dev_num)
+{
+ return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
+}
+
FIXTURE_SETUP(hmm)
{
self->page_size = sysconf(_SC_PAGE_SIZE);
self->page_shift = ffs(self->page_size) - 1;
- self->fd = hmm_open(0);
+ self->fd = hmm_open(variant->device_number);
+ if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
+ SKIP(exit(0), "DEVICE_COHERENT not available");
ASSERT_GE(self->fd, 0);
}
@@ -95,9 +146,11 @@ FIXTURE_SETUP(hmm2)
self->page_size = sysconf(_SC_PAGE_SIZE);
self->page_shift = ffs(self->page_size) - 1;
- self->fd0 = hmm_open(0);
+ self->fd0 = hmm_open(variant->device_number0);
+ if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
+ SKIP(exit(0), "DEVICE_COHERENT not available");
ASSERT_GE(self->fd0, 0);
- self->fd1 = hmm_open(1);
+ self->fd1 = hmm_open(variant->device_number1);
ASSERT_GE(self->fd1, 0);
}
@@ -211,6 +264,20 @@ static void hmm_nanosleep(unsigned int n)
nanosleep(&t, NULL);
}
+static int hmm_migrate_sys_to_dev(int fd,
+ struct hmm_buffer *buffer,
+ unsigned long npages)
+{
+ return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
+}
+
+static int hmm_migrate_dev_to_sys(int fd,
+ struct hmm_buffer *buffer,
+ unsigned long npages)
+{
+ return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
+}
+
/*
* Simple NULL test of device open/close.
*/
@@ -875,7 +942,7 @@ TEST_F(hmm, migrate)
ptr[i] = i;
/* Migrate memory to device. */
- ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
@@ -923,7 +990,7 @@ TEST_F(hmm, migrate_fault)
ptr[i] = i;
/* Migrate memory to device. */
- ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
@@ -936,7 +1003,7 @@ TEST_F(hmm, migrate_fault)
ASSERT_EQ(ptr[i], i);
/* Migrate memory to the device again. */
- ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
@@ -976,7 +1043,7 @@ TEST_F(hmm, migrate_shared)
ASSERT_NE(buffer->ptr, MAP_FAILED);
/* Migrate memory to device. */
- ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, -ENOENT);
hmm_buffer_free(buffer);
@@ -1015,7 +1082,7 @@ TEST_F(hmm2, migrate_mixed)
p = buffer->ptr;
/* Migrating a protected area should be an error. */
- ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages);
+ ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
ASSERT_EQ(ret, -EINVAL);
/* Punch a hole after the first page address. */
@@ -1023,7 +1090,7 @@ TEST_F(hmm2, migrate_mixed)
ASSERT_EQ(ret, 0);
/* We expect an error if the vma doesn't cover the range. */
- ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3);
+ ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
ASSERT_EQ(ret, -EINVAL);
/* Page 2 will be a read-only zero page. */
@@ -1055,13 +1122,13 @@ TEST_F(hmm2, migrate_mixed)
/* Now try to migrate pages 2-5 to device 1. */
buffer->ptr = p + 2 * self->page_size;
- ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4);
+ ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, 4);
/* Page 5 won't be migrated to device 0 because it's on device 1. */
buffer->ptr = p + 5 * self->page_size;
- ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
+ ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
ASSERT_EQ(ret, -ENOENT);
buffer->ptr = p;
@@ -1070,8 +1137,12 @@ TEST_F(hmm2, migrate_mixed)
}
/*
- * Migrate anonymous memory to device private memory and fault it back to system
- * memory multiple times.
+ * Migrate anonymous memory to device memory and back to system memory
+ * multiple times. In case of private zone configuration, this is done
+ * through fault pages accessed by CPU. In case of coherent zone configuration,
+ * the pages from the device should be explicitly migrated back to system memory.
+ * The reason is Coherent device zone has coherent access by CPU, therefore
+ * it will not generate any page fault.
*/
TEST_F(hmm, migrate_multiple)
{
@@ -1107,8 +1178,7 @@ TEST_F(hmm, migrate_multiple)
ptr[i] = i;
/* Migrate memory to device. */
- ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer,
- npages);
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
@@ -1116,7 +1186,13 @@ TEST_F(hmm, migrate_multiple)
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
- /* Fault pages back to system memory and check them. */
+ /* Migrate back to system memory and check them. */
+ if (hmm_is_coherent_type(variant->device_number)) {
+ ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+ }
+
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
@@ -1354,13 +1430,13 @@ TEST_F(hmm2, snapshot)
/* Page 5 will be migrated to device 0. */
buffer->ptr = p + 5 * self->page_size;
- ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
+ ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, 1);
/* Page 6 will be migrated to device 1. */
buffer->ptr = p + 6 * self->page_size;
- ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1);
+ ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, 1);
@@ -1377,9 +1453,16 @@ TEST_F(hmm2, snapshot)
ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
- ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
- HMM_DMIRROR_PROT_WRITE);
- ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
+ if (!hmm_is_coherent_type(variant->device_number0)) {
+ ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
+ HMM_DMIRROR_PROT_WRITE);
+ ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
+ } else {
+ ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
+ HMM_DMIRROR_PROT_WRITE);
+ ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
+ HMM_DMIRROR_PROT_WRITE);
+ }
hmm_buffer_free(buffer);
}
@@ -1520,9 +1603,19 @@ TEST_F(hmm2, double_map)
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
- /* Punch a hole after the first page address. */
- ret = munmap(buffer->ptr + self->page_size, self->page_size);
+ /* Migrate pages to device 1 and try to read from device 0. */
+ ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+ ASSERT_EQ(buffer->faults, 1);
+
+ /* Check what device 0 read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
hmm_buffer_free(buffer);
}
@@ -1685,4 +1778,190 @@ TEST_F(hmm, exclusive_cow)
hmm_buffer_free(buffer);
}
+static int gup_test_exec(int gup_fd, unsigned long addr, int cmd,
+ int npages, int size, int flags)
+{
+ struct gup_test gup = {
+ .nr_pages_per_call = npages,
+ .addr = addr,
+ .gup_flags = FOLL_WRITE | flags,
+ .size = size,
+ };
+
+ if (ioctl(gup_fd, cmd, &gup)) {
+ perror("ioctl on error\n");
+ return errno;
+ }
+
+ return 0;
+}
+
+/*
+ * Test get user device pages through gup_test. Setting PIN_LONGTERM flag.
+ * This should trigger a migration back to system memory for both, private
+ * and coherent type pages.
+ * This test makes use of gup_test module. Make sure GUP_TEST_CONFIG is added
+ * to your configuration before you run it.
+ */
+TEST_F(hmm, hmm_gup_test)
+{
+ struct hmm_buffer *buffer;
+ int gup_fd;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ int *ptr;
+ int ret;
+ unsigned char *m;
+
+ gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
+ if (gup_fd == -1)
+ SKIP(return, "Skipping test, could not find gup_test driver");
+
+ npages = 4;
+ size = npages << self->page_shift;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+
+ buffer->ptr = mmap(NULL, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Migrate memory to device. */
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ ASSERT_EQ(gup_test_exec(gup_fd,
+ (unsigned long)buffer->ptr,
+ GUP_BASIC_TEST, 1, self->page_size, 0), 0);
+ ASSERT_EQ(gup_test_exec(gup_fd,
+ (unsigned long)buffer->ptr + 1 * self->page_size,
+ GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0);
+ ASSERT_EQ(gup_test_exec(gup_fd,
+ (unsigned long)buffer->ptr + 2 * self->page_size,
+ PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0);
+ ASSERT_EQ(gup_test_exec(gup_fd,
+ (unsigned long)buffer->ptr + 3 * self->page_size,
+ PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0);
+
+ /* Take snapshot to CPU pagetables */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+ m = buffer->mirror;
+ if (hmm_is_coherent_type(variant->device_number)) {
+ ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
+ ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
+ } else {
+ ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
+ ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
+ }
+ ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
+ ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]);
+ /*
+ * Check again the content on the pages. Make sure there's no
+ * corrupted data.
+ */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ close(gup_fd);
+ hmm_buffer_free(buffer);
+}
+
+/*
+ * Test copy-on-write in device pages.
+ * In case of writing to COW private page(s), a page fault will migrate pages
+ * back to system memory first. Then, these pages will be duplicated. In case
+ * of COW device coherent type, pages are duplicated directly from device
+ * memory.
+ */
+TEST_F(hmm, hmm_cow_in_device)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ int *ptr;
+ int ret;
+ unsigned char *m;
+ pid_t pid;
+ int status;
+
+ npages = 4;
+ size = npages << self->page_shift;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+
+ buffer->ptr = mmap(NULL, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Migrate memory to device. */
+
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ pid = fork();
+ if (pid == -1)
+ ASSERT_EQ(pid, 0);
+ if (!pid) {
+ /* Child process waitd for SIGTERM from the parent. */
+ while (1) {
+ }
+ perror("Should not reach this\n");
+ exit(0);
+ }
+ /* Parent process writes to COW pages(s) and gets a
+ * new copy in system. In case of device private pages,
+ * this write causes a migration to system mem first.
+ */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Terminate child and wait */
+ EXPECT_EQ(0, kill(pid, SIGTERM));
+ EXPECT_EQ(pid, waitpid(pid, &status, 0));
+ EXPECT_NE(0, WIFSIGNALED(status));
+ EXPECT_EQ(SIGTERM, WTERMSIG(status));
+
+ /* Take snapshot to CPU pagetables */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+ m = buffer->mirror;
+ for (i = 0; i < npages; i++)
+ ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]);
+
+ hmm_buffer_free(buffer);
+}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/vm/hugepage-mremap.c b/tools/testing/selftests/vm/hugepage-mremap.c
index 585978f181ed..e63a0214f639 100644
--- a/tools/testing/selftests/vm/hugepage-mremap.c
+++ b/tools/testing/selftests/vm/hugepage-mremap.c
@@ -107,7 +107,7 @@ static void register_region_with_uffd(char *addr, size_t len)
int main(int argc, char *argv[])
{
- size_t length;
+ size_t length = 0;
if (argc != 2 && argc != 3) {
printf("Usage: %s [length_in_MB] <hugetlb_file>\n", argv[0]);
diff --git a/tools/testing/selftests/vm/hugetlb-madvise.c b/tools/testing/selftests/vm/hugetlb-madvise.c
index 6c6af40f5747..3c9943131881 100644
--- a/tools/testing/selftests/vm/hugetlb-madvise.c
+++ b/tools/testing/selftests/vm/hugetlb-madvise.c
@@ -89,10 +89,11 @@ void write_fault_pages(void *addr, unsigned long nr_pages)
void read_fault_pages(void *addr, unsigned long nr_pages)
{
- unsigned long i, tmp;
+ unsigned long dummy = 0;
+ unsigned long i;
for (i = 0; i < nr_pages; i++)
- tmp += *((unsigned long *)(addr + (i * huge_page_size)));
+ dummy += *((unsigned long *)(addr + (i * huge_page_size)));
}
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/vm/mrelease_test.c b/tools/testing/selftests/vm/mrelease_test.c
index 96671c2f7d48..6c62966ab5db 100644
--- a/tools/testing/selftests/vm/mrelease_test.c
+++ b/tools/testing/selftests/vm/mrelease_test.c
@@ -62,19 +62,22 @@ static int alloc_noexit(unsigned long nr_pages, int pipefd)
/* The process_mrelease calls in this test are expected to fail */
static void run_negative_tests(int pidfd)
{
+ int res;
/* Test invalid flags. Expect to fail with EINVAL error code. */
if (!syscall(__NR_process_mrelease, pidfd, (unsigned int)-1) ||
errno != EINVAL) {
+ res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
perror("process_mrelease with wrong flags");
- exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
+ exit(res);
}
/*
* Test reaping while process is alive with no pending SIGKILL.
* Expect to fail with EINVAL error code.
*/
if (!syscall(__NR_process_mrelease, pidfd, 0) || errno != EINVAL) {
+ res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
perror("process_mrelease on a live process");
- exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
+ exit(res);
}
}
@@ -100,8 +103,9 @@ int main(void)
/* Test a wrong pidfd */
if (!syscall(__NR_process_mrelease, -1, 0) || errno != EBADF) {
+ res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
perror("process_mrelease with wrong pidfd");
- exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
+ exit(res);
}
/* Start the test with 1MB child memory allocation */
@@ -156,8 +160,9 @@ retry:
run_negative_tests(pidfd);
if (kill(pid, SIGKILL)) {
+ res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
perror("kill");
- exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
+ exit(res);
}
success = (syscall(__NR_process_mrelease, pidfd, 0) == 0);
@@ -172,9 +177,10 @@ retry:
if (errno == ESRCH) {
retry = (size <= MAX_SIZE_MB);
} else {
+ res = (errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
perror("process_mrelease");
waitpid(pid, NULL, 0);
- exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
+ exit(res);
}
}
diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh
index 41fce8bea929..de86983b8a0f 100755
--- a/tools/testing/selftests/vm/run_vmtests.sh
+++ b/tools/testing/selftests/vm/run_vmtests.sh
@@ -151,7 +151,7 @@ if [ $VADDR64 -ne 0 ]; then
run_test ./virtual_address_range
# virtual address 128TB switch test
- run_test ./va_128TBswitch
+ run_test ./va_128TBswitch.sh
fi # VADDR64
# vmalloc stability smoke test
@@ -179,4 +179,17 @@ run_test ./ksm_tests -N -m 1
# KSM test with 2 NUMA nodes and merge_across_nodes = 0
run_test ./ksm_tests -N -m 0
+# protection_keys tests
+if [ -x ./protection_keys_32 ]
+then
+ run_test ./protection_keys_32
+fi
+
+if [ -x ./protection_keys_64 ]
+then
+ run_test ./protection_keys_64
+fi
+
+run_test ./soft-dirty
+
exit $exitcode
diff --git a/tools/testing/selftests/vm/soft-dirty.c b/tools/testing/selftests/vm/soft-dirty.c
index 08ab62a4a9d0..e3a43f5d4fa2 100644
--- a/tools/testing/selftests/vm/soft-dirty.c
+++ b/tools/testing/selftests/vm/soft-dirty.c
@@ -121,13 +121,76 @@ static void test_hugepage(int pagemap_fd, int pagesize)
free(map);
}
+static void test_mprotect(int pagemap_fd, int pagesize, bool anon)
+{
+ const char *type[] = {"file", "anon"};
+ const char *fname = "./soft-dirty-test-file";
+ int test_fd;
+ char *map;
+
+ if (anon) {
+ map = mmap(NULL, pagesize, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ if (!map)
+ ksft_exit_fail_msg("anon mmap failed\n");
+ } else {
+ test_fd = open(fname, O_RDWR | O_CREAT);
+ if (test_fd < 0) {
+ ksft_test_result_skip("Test %s open() file failed\n", __func__);
+ return;
+ }
+ unlink(fname);
+ ftruncate(test_fd, pagesize);
+ map = mmap(NULL, pagesize, PROT_READ|PROT_WRITE,
+ MAP_SHARED, test_fd, 0);
+ if (!map)
+ ksft_exit_fail_msg("file mmap failed\n");
+ }
+
+ *map = 1;
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
+ "Test %s-%s dirty bit of new written page\n",
+ __func__, type[anon]);
+ clear_softdirty();
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
+ "Test %s-%s soft-dirty clear after clear_refs\n",
+ __func__, type[anon]);
+ mprotect(map, pagesize, PROT_READ);
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
+ "Test %s-%s soft-dirty clear after marking RO\n",
+ __func__, type[anon]);
+ mprotect(map, pagesize, PROT_READ|PROT_WRITE);
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 0,
+ "Test %s-%s soft-dirty clear after marking RW\n",
+ __func__, type[anon]);
+ *map = 2;
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
+ "Test %s-%s soft-dirty after rewritten\n",
+ __func__, type[anon]);
+
+ munmap(map, pagesize);
+
+ if (!anon)
+ close(test_fd);
+}
+
+static void test_mprotect_anon(int pagemap_fd, int pagesize)
+{
+ test_mprotect(pagemap_fd, pagesize, true);
+}
+
+static void test_mprotect_file(int pagemap_fd, int pagesize)
+{
+ test_mprotect(pagemap_fd, pagesize, false);
+}
+
int main(int argc, char **argv)
{
int pagemap_fd;
int pagesize;
ksft_print_header();
- ksft_set_plan(5);
+ ksft_set_plan(15);
pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY);
if (pagemap_fd < 0)
@@ -138,6 +201,8 @@ int main(int argc, char **argv)
test_simple(pagemap_fd, pagesize);
test_vma_reuse(pagemap_fd, pagesize);
test_hugepage(pagemap_fd, pagesize);
+ test_mprotect_anon(pagemap_fd, pagesize);
+ test_mprotect_file(pagemap_fd, pagesize);
close(pagemap_fd);
diff --git a/tools/testing/selftests/vm/test_hmm.sh b/tools/testing/selftests/vm/test_hmm.sh
index 0647b525a625..539c9371e592 100755
--- a/tools/testing/selftests/vm/test_hmm.sh
+++ b/tools/testing/selftests/vm/test_hmm.sh
@@ -40,11 +40,26 @@ check_test_requirements()
load_driver()
{
- modprobe $DRIVER > /dev/null 2>&1
+ if [ $# -eq 0 ]; then
+ modprobe $DRIVER > /dev/null 2>&1
+ else
+ if [ $# -eq 2 ]; then
+ modprobe $DRIVER spm_addr_dev0=$1 spm_addr_dev1=$2
+ > /dev/null 2>&1
+ else
+ echo "Missing module parameters. Make sure pass"\
+ "spm_addr_dev0 and spm_addr_dev1"
+ usage
+ fi
+ fi
if [ $? == 0 ]; then
major=$(awk "\$2==\"HMM_DMIRROR\" {print \$1}" /proc/devices)
mknod /dev/hmm_dmirror0 c $major 0
mknod /dev/hmm_dmirror1 c $major 1
+ if [ $# -eq 2 ]; then
+ mknod /dev/hmm_dmirror2 c $major 2
+ mknod /dev/hmm_dmirror3 c $major 3
+ fi
fi
}
@@ -58,7 +73,7 @@ run_smoke()
{
echo "Running smoke test. Note, this test provides basic coverage."
- load_driver
+ load_driver $1 $2
$(dirname "${BASH_SOURCE[0]}")/hmm-tests
unload_driver
}
@@ -75,6 +90,9 @@ usage()
echo "# Smoke testing"
echo "./${TEST_NAME}.sh smoke"
echo
+ echo "# Smoke testing with SPM enabled"
+ echo "./${TEST_NAME}.sh smoke <spm_addr_dev0> <spm_addr_dev1>"
+ echo
exit 0
}
@@ -84,7 +102,7 @@ function run_test()
usage
else
if [ "$1" = "smoke" ]; then
- run_smoke
+ run_smoke $2 $3
else
usage
fi
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 4bc24581760d..7c3f1b0ab468 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -931,7 +931,7 @@ static int faulting_process(int signal_test)
unsigned long split_nr_pages;
unsigned long lastnr;
struct sigaction act;
- unsigned long signalled = 0;
+ volatile unsigned long signalled = 0;
split_nr_pages = (nr_pages + 1) / 2;
@@ -946,7 +946,7 @@ static int faulting_process(int signal_test)
}
for (nr = 0; nr < split_nr_pages; nr++) {
- int steps = 1;
+ volatile int steps = 1;
unsigned long offset = nr * page_size;
if (signal_test) {
diff --git a/tools/testing/selftests/vm/va_128TBswitch.c b/tools/testing/selftests/vm/va_128TBswitch.c
index da6ec3b53ea8..1d2068989883 100644
--- a/tools/testing/selftests/vm/va_128TBswitch.c
+++ b/tools/testing/selftests/vm/va_128TBswitch.c
@@ -231,7 +231,7 @@ static struct testcase hugetlb_testcases[] = {
static int run_test(struct testcase *test, int count)
{
void *p;
- int i, ret = 0;
+ int i, ret = KSFT_PASS;
for (i = 0; i < count; i++) {
struct testcase *t = test + i;
@@ -242,13 +242,13 @@ static int run_test(struct testcase *test, int count)
if (p == MAP_FAILED) {
printf("FAILED\n");
- ret = 1;
+ ret = KSFT_FAIL;
continue;
}
if (t->low_addr_required && p >= (void *)(ADDR_SWITCH_HINT)) {
printf("FAILED\n");
- ret = 1;
+ ret = KSFT_FAIL;
} else {
/*
* Do a dereference of the address returned so that we catch
@@ -280,7 +280,7 @@ int main(int argc, char **argv)
int ret;
if (!supported_arch())
- return 0;
+ return KSFT_SKIP;
ret = run_test(testcases, ARRAY_SIZE(testcases));
if (argc == 2 && !strcmp(argv[1], "--run-hugetlb"))
diff --git a/tools/testing/selftests/vm/va_128TBswitch.sh b/tools/testing/selftests/vm/va_128TBswitch.sh
new file mode 100755
index 000000000000..41580751dc51
--- /dev/null
+++ b/tools/testing/selftests/vm/va_128TBswitch.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2022 Adam Sindelar (Meta) <adam@wowsignal.io>
+#
+# This is a test for mmap behavior with 5-level paging. This script wraps the
+# real test to check that the kernel is configured to support at least 5
+# pagetable levels.
+
+# 1 means the test failed
+exitcode=1
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+fail()
+{
+ echo "$1"
+ exit $exitcode
+}
+
+check_supported_x86_64()
+{
+ local config="/proc/config.gz"
+ [[ -f "${config}" ]] || config="/boot/config-$(uname -r)"
+ [[ -f "${config}" ]] || fail "Cannot find kernel config in /proc or /boot"
+
+ # gzip -dcfq automatically handles both compressed and plaintext input.
+ # See man 1 gzip under '-f'.
+ local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2)
+
+ if [[ "${pg_table_levels}" -lt 5 ]]; then
+ echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test"
+ exit $ksft_skip
+ fi
+}
+
+check_test_requirements()
+{
+ # The test supports x86_64 and powerpc64. We currently have no useful
+ # eligibility check for powerpc64, and the test itself will reject other
+ # architectures.
+ case `uname -m` in
+ "x86_64")
+ check_supported_x86_64
+ ;;
+ *)
+ return 0
+ ;;
+ esac
+}
+
+check_test_requirements
+./va_128TBswitch
diff --git a/tools/testing/selftests/wireguard/qemu/arch/riscv32.config b/tools/testing/selftests/wireguard/qemu/arch/riscv32.config
index 0bd0e72d95d4..2fc36efb166d 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/riscv32.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/riscv32.config
@@ -1,3 +1,4 @@
+CONFIG_NONPORTABLE=y
CONFIG_ARCH_RV32I=y
CONFIG_MMU=y
CONFIG_FPU=y
diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
index b00b1bfd9d8e..cb1108bc9249 100644
--- a/tools/thermal/tmon/sysfs.c
+++ b/tools/thermal/tmon/sysfs.c
@@ -13,6 +13,7 @@
#include <stdint.h>
#include <dirent.h>
#include <libintl.h>
+#include <limits.h>
#include <ctype.h>
#include <time.h>
#include <syslog.h>
@@ -33,9 +34,9 @@ int sysfs_set_ulong(char *path, char *filename, unsigned long val)
{
FILE *fd;
int ret = -1;
- char filepath[256];
+ char filepath[PATH_MAX + 2]; /* NUL and '/' */
- snprintf(filepath, 256, "%s/%s", path, filename);
+ snprintf(filepath, sizeof(filepath), "%s/%s", path, filename);
fd = fopen(filepath, "w");
if (!fd) {
@@ -57,9 +58,9 @@ static int sysfs_get_ulong(char *path, char *filename, unsigned long *p_ulong)
{
FILE *fd;
int ret = -1;
- char filepath[256];
+ char filepath[PATH_MAX + 2]; /* NUL and '/' */
- snprintf(filepath, 256, "%s/%s", path, filename);
+ snprintf(filepath, sizeof(filepath), "%s/%s", path, filename);
fd = fopen(filepath, "r");
if (!fd) {
@@ -76,9 +77,9 @@ static int sysfs_get_string(char *path, char *filename, char *str)
{
FILE *fd;
int ret = -1;
- char filepath[256];
+ char filepath[PATH_MAX + 2]; /* NUL and '/' */
- snprintf(filepath, 256, "%s/%s", path, filename);
+ snprintf(filepath, sizeof(filepath), "%s/%s", path, filename);
fd = fopen(filepath, "r");
if (!fd) {
@@ -199,8 +200,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name,
{
unsigned long trip_instance = 0;
char cdev_name_linked[256];
- char cdev_name[256];
- char cdev_trip_name[256];
+ char cdev_name[PATH_MAX];
+ char cdev_trip_name[PATH_MAX];
int cdev_id;
if (nl->d_type == DT_LNK) {
@@ -213,7 +214,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name,
return -EINVAL;
}
/* find the link to real cooling device record binding */
- snprintf(cdev_name, 256, "%s/%s", tz_name, nl->d_name);
+ snprintf(cdev_name, sizeof(cdev_name) - 2, "%s/%s",
+ tz_name, nl->d_name);
memset(cdev_name_linked, 0, sizeof(cdev_name_linked));
if (readlink(cdev_name, cdev_name_linked,
sizeof(cdev_name_linked) - 1) != -1) {
@@ -226,8 +228,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name,
/* find the trip point in which the cdev is binded to
* in this tzone
*/
- snprintf(cdev_trip_name, 256, "%s%s", nl->d_name,
- "_trip_point");
+ snprintf(cdev_trip_name, sizeof(cdev_trip_name) - 1,
+ "%s%s", nl->d_name, "_trip_point");
sysfs_get_ulong(tz_name, cdev_trip_name,
&trip_instance);
/* validate trip point range, e.g. trip could return -1
diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile
index 3822f4ea5f49..22e28b76f800 100644
--- a/tools/tracing/rtla/Makefile
+++ b/tools/tracing/rtla/Makefile
@@ -1,6 +1,6 @@
NAME := rtla
# Follow the kernel version
-VERSION := $(shell cat VERSION 2> /dev/null || make -sC ../../.. kernelversion)
+VERSION := $(shell cat VERSION 2> /dev/null || make -sC ../../.. kernelversion | grep -v make)
# From libtracefs:
# Makefiles suck: This macro sets a default value of $(2) for the
@@ -30,8 +30,8 @@ WOPTS := -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_A
TRACEFS_HEADERS := $$($(PKG_CONFIG) --cflags libtracefs)
-CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS)
-LDFLAGS := -ggdb
+CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS)
+LDFLAGS := -ggdb $(EXTRA_LDFLAGS)
LIBS := $$($(PKG_CONFIG) --libs libtracefs)
SRC := $(wildcard src/*.c)
@@ -61,40 +61,50 @@ endif
LIBTRACEEVENT_MIN_VERSION = 1.5
LIBTRACEFS_MIN_VERSION = 1.3
+.PHONY: all warnings show_warnings
+all: warnings rtla
+
TEST_LIBTRACEEVENT = $(shell sh -c "$(PKG_CONFIG) --atleast-version $(LIBTRACEEVENT_MIN_VERSION) libtraceevent > /dev/null 2>&1 || echo n")
ifeq ("$(TEST_LIBTRACEEVENT)", "n")
-.PHONY: warning_traceevent
-warning_traceevent:
- @echo "********************************************"
- @echo "** NOTICE: libtraceevent version $(LIBTRACEEVENT_MIN_VERSION) or higher not found"
- @echo "**"
- @echo "** Consider installing the latest libtraceevent from your"
- @echo "** distribution, e.g., 'dnf install libtraceevent' on Fedora,"
- @echo "** or from source:"
- @echo "**"
- @echo "** https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ "
- @echo "**"
- @echo "********************************************"
+WARNINGS = show_warnings
+MISSING_LIBS += echo "** libtraceevent version $(LIBTRACEEVENT_MIN_VERSION) or higher";
+MISSING_PACKAGES += "libtraceevent-devel"
+MISSING_SOURCE += echo "** https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ ";
endif
TEST_LIBTRACEFS = $(shell sh -c "$(PKG_CONFIG) --atleast-version $(LIBTRACEFS_MIN_VERSION) libtracefs > /dev/null 2>&1 || echo n")
ifeq ("$(TEST_LIBTRACEFS)", "n")
-.PHONY: warning_tracefs
-warning_tracefs:
- @echo "********************************************"
- @echo "** NOTICE: libtracefs version $(LIBTRACEFS_MIN_VERSION) or higher not found"
- @echo "**"
- @echo "** Consider installing the latest libtracefs from your"
- @echo "** distribution, e.g., 'dnf install libtracefs' on Fedora,"
- @echo "** or from source:"
- @echo "**"
- @echo "** https://git.kernel.org/pub/scm/libs/libtrace/libtracefs.git/ "
- @echo "**"
- @echo "********************************************"
+WARNINGS = show_warnings
+MISSING_LIBS += echo "** libtracefs version $(LIBTRACEFS_MIN_VERSION) or higher";
+MISSING_PACKAGES += "libtracefs-devel"
+MISSING_SOURCE += echo "** https://git.kernel.org/pub/scm/libs/libtrace/libtracefs.git/ ";
endif
-.PHONY: all
-all: rtla
+define show_dependencies
+ @echo "********************************************"; \
+ echo "** NOTICE: Failed build dependencies"; \
+ echo "**"; \
+ echo "** Required Libraries:"; \
+ $(MISSING_LIBS) \
+ echo "**"; \
+ echo "** Consider installing the latest libtracefs from your"; \
+ echo "** distribution, e.g., 'dnf install $(MISSING_PACKAGES)' on Fedora,"; \
+ echo "** or from source:"; \
+ echo "**"; \
+ $(MISSING_SOURCE) \
+ echo "**"; \
+ echo "********************************************"
+endef
+
+show_warnings:
+ $(call show_dependencies);
+
+ifneq ("$(WARNINGS)", "")
+ERROR_OUT = $(error Please add the necessary dependencies)
+
+warnings: $(WARNINGS)
+ $(ERROR_OUT)
+endif
rtla: $(OBJ)
$(CC) -o rtla $(LDFLAGS) $(OBJ) $(LIBS)
@@ -108,9 +118,9 @@ install: doc_install
$(INSTALL) rtla -m 755 $(DESTDIR)$(BINDIR)
$(STRIP) $(DESTDIR)$(BINDIR)/rtla
@test ! -f $(DESTDIR)$(BINDIR)/osnoise || rm $(DESTDIR)$(BINDIR)/osnoise
- ln -s $(DESTDIR)$(BINDIR)/rtla $(DESTDIR)$(BINDIR)/osnoise
+ ln -s rtla $(DESTDIR)$(BINDIR)/osnoise
@test ! -f $(DESTDIR)$(BINDIR)/timerlat || rm $(DESTDIR)$(BINDIR)/timerlat
- ln -s $(DESTDIR)$(BINDIR)/rtla $(DESTDIR)$(BINDIR)/timerlat
+ ln -s rtla $(DESTDIR)$(BINDIR)/timerlat
.PHONY: clean tarball
clean: doc_clean
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
index f3ec628f5e51..4b48af8a8309 100644
--- a/tools/tracing/rtla/src/timerlat_hist.c
+++ b/tools/tracing/rtla/src/timerlat_hist.c
@@ -892,7 +892,7 @@ int timerlat_hist_main(int argc, char *argv[])
return_value = 0;
if (trace_is_off(&tool->trace, &record->trace)) {
- printf("rtla timelat hit stop tracing\n");
+ printf("rtla timerlat hit stop tracing\n");
if (params->trace_output) {
printf(" Saving trace to %s\n", params->trace_output);
save_trace_to_file(record->trace.inst, params->trace_output);
diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
index 35452a1d45e9..334271935222 100644
--- a/tools/tracing/rtla/src/timerlat_top.c
+++ b/tools/tracing/rtla/src/timerlat_top.c
@@ -687,7 +687,7 @@ int timerlat_top_main(int argc, char *argv[])
return_value = 0;
if (trace_is_off(&top->trace, &record->trace)) {
- printf("rtla timelat hit stop tracing\n");
+ printf("rtla timerlat hit stop tracing\n");
if (params->trace_output) {
printf(" Saving trace to %s\n", params->trace_output);
save_trace_to_file(record->trace.inst, params->trace_output);
diff --git a/tools/tracing/rtla/src/trace.c b/tools/tracing/rtla/src/trace.c
index 5784c9f9e570..e1ba6d9f4265 100644
--- a/tools/tracing/rtla/src/trace.c
+++ b/tools/tracing/rtla/src/trace.c
@@ -134,13 +134,18 @@ void trace_instance_destroy(struct trace_instance *trace)
if (trace->inst) {
disable_tracer(trace->inst);
destroy_instance(trace->inst);
+ trace->inst = NULL;
}
- if (trace->seq)
+ if (trace->seq) {
free(trace->seq);
+ trace->seq = NULL;
+ }
- if (trace->tep)
+ if (trace->tep) {
tep_free(trace->tep);
+ trace->tep = NULL;
+ }
}
/*
diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
index 5352167a1e75..663a047f794d 100644
--- a/tools/tracing/rtla/src/utils.c
+++ b/tools/tracing/rtla/src/utils.c
@@ -106,8 +106,9 @@ int parse_cpu_list(char *cpu_list, char **monitored_cpus)
nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
- mon_cpus = malloc(nr_cpus * sizeof(char));
- memset(mon_cpus, 0, (nr_cpus * sizeof(char)));
+ mon_cpus = calloc(nr_cpus, sizeof(char));
+ if (!mon_cpus)
+ goto err;
for (p = cpu_list; *p; ) {
cpu = atoi(p);
@@ -224,7 +225,7 @@ long parse_ns_duration(char *val)
#elif __arm__
# define __NR_sched_setattr 380
# define __NR_sched_getattr 381
-#elif __aarch64__
+#elif __aarch64__ || __riscv
# define __NR_sched_setattr 274
# define __NR_sched_getattr 275
#elif __powerpc__
diff --git a/tools/verification/dot2/Makefile b/tools/verification/dot2/Makefile
new file mode 100644
index 000000000000..021beb07a521
--- /dev/null
+++ b/tools/verification/dot2/Makefile
@@ -0,0 +1,26 @@
+INSTALL=install
+
+prefix ?= /usr
+bindir ?= $(prefix)/bin
+mandir ?= $(prefix)/share/man
+miscdir ?= $(prefix)/share/dot2
+srcdir ?= $(prefix)/src
+
+PYLIB ?= $(shell python3 -c 'import sysconfig; print (sysconfig.get_path("purelib"))')
+
+.PHONY: all
+all:
+
+.PHONY: clean
+clean:
+
+.PHONY: install
+install:
+ $(INSTALL) automata.py -D -m 644 $(DESTDIR)$(PYLIB)/dot2/automata.py
+ $(INSTALL) dot2c.py -D -m 644 $(DESTDIR)$(PYLIB)/dot2/dot2c.py
+ $(INSTALL) dot2c -D -m 755 $(DESTDIR)$(bindir)/
+ $(INSTALL) dot2k.py -D -m 644 $(DESTDIR)$(PYLIB)/dot2/dot2k.py
+ $(INSTALL) dot2k -D -m 755 $(DESTDIR)$(bindir)/
+
+ mkdir -p ${miscdir}/
+ cp -rp dot2k_templates $(DESTDIR)$(miscdir)/
diff --git a/tools/verification/dot2/automata.py b/tools/verification/dot2/automata.py
new file mode 100644
index 000000000000..baffeb960ff0
--- /dev/null
+++ b/tools/verification/dot2/automata.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+#
+# Automata object: parse an automata in dot file digraph format into a python object
+#
+# For further information, see:
+# Documentation/trace/rv/deterministic_automata.rst
+
+import ntpath
+
+class Automata:
+ """Automata class: Reads a dot file and part it as an automata.
+
+ Attributes:
+ dot_file: A dot file with an state_automaton definition.
+ """
+
+ invalid_state_str = "INVALID_STATE"
+
+ def __init__(self, file_path):
+ self.__dot_path = file_path
+ self.name = self.__get_model_name()
+ self.__dot_lines = self.__open_dot()
+ self.states, self.initial_state, self.final_states = self.__get_state_variables()
+ self.events = self.__get_event_variables()
+ self.function = self.__create_matrix()
+
+ def __get_model_name(self):
+ basename = ntpath.basename(self.__dot_path)
+ if basename.endswith(".dot") == False:
+ print("not a dot file")
+ raise Exception("not a dot file: %s" % self.__dot_path)
+
+ model_name = basename[0:-4]
+ if model_name.__len__() == 0:
+ raise Exception("not a dot file: %s" % self.__dot_path)
+
+ return model_name
+
+ def __open_dot(self):
+ cursor = 0
+ dot_lines = []
+ try:
+ dot_file = open(self.__dot_path)
+ except:
+ raise Exception("Cannot open the file: %s" % self.__dot_path)
+
+ dot_lines = dot_file.read().splitlines()
+ dot_file.close()
+
+ # checking the first line:
+ line = dot_lines[cursor].split()
+
+ if (line[0] != "digraph") and (line[1] != "state_automaton"):
+ raise Exception("Not a valid .dot format: %s" % self.__dot_path)
+ else:
+ cursor += 1
+ return dot_lines
+
+ def __get_cursor_begin_states(self):
+ cursor = 0
+ while self.__dot_lines[cursor].split()[0] != "{node":
+ cursor += 1
+ return cursor
+
+ def __get_cursor_begin_events(self):
+ cursor = 0
+ while self.__dot_lines[cursor].split()[0] != "{node":
+ cursor += 1
+ while self.__dot_lines[cursor].split()[0] == "{node":
+ cursor += 1
+ # skip initial state transition
+ cursor += 1
+ return cursor
+
+ def __get_state_variables(self):
+ # wait for node declaration
+ states = []
+ final_states = []
+
+ has_final_states = False
+ cursor = self.__get_cursor_begin_states()
+
+ # process nodes
+ while self.__dot_lines[cursor].split()[0] == "{node":
+ line = self.__dot_lines[cursor].split()
+ raw_state = line[-1]
+
+ # "enabled_fired"}; -> enabled_fired
+ state = raw_state.replace('"', '').replace('};', '').replace(',','_')
+ if state[0:7] == "__init_":
+ initial_state = state[7:]
+ else:
+ states.append(state)
+ if self.__dot_lines[cursor].__contains__("doublecircle") == True:
+ final_states.append(state)
+ has_final_states = True
+
+ if self.__dot_lines[cursor].__contains__("ellipse") == True:
+ final_states.append(state)
+ has_final_states = True
+
+ cursor += 1
+
+ states = sorted(set(states))
+ states.remove(initial_state)
+
+ # Insert the initial state at the bein og the states
+ states.insert(0, initial_state)
+
+ if has_final_states == False:
+ final_states.append(initial_state)
+
+ return states, initial_state, final_states
+
+ def __get_event_variables(self):
+ # here we are at the begin of transitions, take a note, we will return later.
+ cursor = self.__get_cursor_begin_events()
+
+ events = []
+ while self.__dot_lines[cursor][1] == '"':
+ # transitions have the format:
+ # "all_fired" -> "both_fired" [ label = "disable_irq" ];
+ # ------------ event is here ------------^^^^^
+ if self.__dot_lines[cursor].split()[1] == "->":
+ line = self.__dot_lines[cursor].split()
+ event = line[-2].replace('"','')
+
+ # when a transition has more than one lables, they are like this
+ # "local_irq_enable\nhw_local_irq_enable_n"
+ # so split them.
+
+ event = event.replace("\\n", " ")
+ for i in event.split():
+ events.append(i)
+ cursor += 1
+
+ return sorted(set(events))
+
+ def __create_matrix(self):
+ # transform the array into a dictionary
+ events = self.events
+ states = self.states
+ events_dict = {}
+ states_dict = {}
+ nr_event = 0
+ for event in events:
+ events_dict[event] = nr_event
+ nr_event += 1
+
+ nr_state = 0
+ for state in states:
+ states_dict[state] = nr_state
+ nr_state += 1
+
+ # declare the matrix....
+ matrix = [[ self.invalid_state_str for x in range(nr_event)] for y in range(nr_state)]
+
+ # and we are back! Let's fill the matrix
+ cursor = self.__get_cursor_begin_events()
+
+ while self.__dot_lines[cursor][1] == '"':
+ if self.__dot_lines[cursor].split()[1] == "->":
+ line = self.__dot_lines[cursor].split()
+ origin_state = line[0].replace('"','').replace(',','_')
+ dest_state = line[2].replace('"','').replace(',','_')
+ possible_events = line[-2].replace('"','').replace("\\n", " ")
+ for event in possible_events.split():
+ matrix[states_dict[origin_state]][events_dict[event]] = dest_state
+ cursor += 1
+
+ return matrix
diff --git a/tools/verification/dot2/dot2c b/tools/verification/dot2/dot2c
new file mode 100644
index 000000000000..3fe89ab88b65
--- /dev/null
+++ b/tools/verification/dot2/dot2c
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+#
+# dot2c: parse an automata in dot file digraph format into a C
+#
+# This program was written in the development of this paper:
+# de Oliveira, D. B. and Cucinotta, T. and de Oliveira, R. S.
+# "Efficient Formal Verification for the Linux Kernel." International
+# Conference on Software Engineering and Formal Methods. Springer, Cham, 2019.
+#
+# For further information, see:
+# Documentation/trace/rv/deterministic_automata.rst
+
+if __name__ == '__main__':
+ from dot2 import dot2c
+ import argparse
+ import sys
+
+ parser = argparse.ArgumentParser(description='dot2c: converts a .dot file into a C structure')
+ parser.add_argument('dot_file', help='The dot file to be converted')
+
+ args = parser.parse_args()
+ d = dot2c.Dot2c(args.dot_file)
+ d.print_model_classic()
diff --git a/tools/verification/dot2/dot2c.py b/tools/verification/dot2/dot2c.py
new file mode 100644
index 000000000000..fa73353f7e56
--- /dev/null
+++ b/tools/verification/dot2/dot2c.py
@@ -0,0 +1,254 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+#
+# dot2c: parse an automata in dot file digraph format into a C
+#
+# This program was written in the development of this paper:
+# de Oliveira, D. B. and Cucinotta, T. and de Oliveira, R. S.
+# "Efficient Formal Verification for the Linux Kernel." International
+# Conference on Software Engineering and Formal Methods. Springer, Cham, 2019.
+#
+# For further information, see:
+# Documentation/trace/rv/deterministic_automata.rst
+
+from dot2.automata import Automata
+
+class Dot2c(Automata):
+ enum_suffix = ""
+ enum_states_def = "states"
+ enum_events_def = "events"
+ struct_automaton_def = "automaton"
+ var_automaton_def = "aut"
+
+ def __init__(self, file_path):
+ super().__init__(file_path)
+ self.line_length = 100
+
+ def __buff_to_string(self, buff):
+ string = ""
+
+ for line in buff:
+ string = string + line + "\n"
+
+ # cut off the last \n
+ return string[:-1]
+
+ def __get_enum_states_content(self):
+ buff = []
+ buff.append("\t%s%s = 0," % (self.initial_state, self.enum_suffix))
+ for state in self.states:
+ if state != self.initial_state:
+ buff.append("\t%s%s," % (state, self.enum_suffix))
+ buff.append("\tstate_max%s" % (self.enum_suffix))
+
+ return buff
+
+ def get_enum_states_string(self):
+ buff = self.__get_enum_states_content()
+ return self.__buff_to_string(buff)
+
+ def format_states_enum(self):
+ buff = []
+ buff.append("enum %s {" % self.enum_states_def)
+ buff.append(self.get_enum_states_string())
+ buff.append("};\n")
+
+ return buff
+
+ def __get_enum_events_content(self):
+ buff = []
+ first = True
+ for event in self.events:
+ if first:
+ buff.append("\t%s%s = 0," % (event, self.enum_suffix))
+ first = False
+ else:
+ buff.append("\t%s%s," % (event, self.enum_suffix))
+
+ buff.append("\tevent_max%s" % self.enum_suffix)
+
+ return buff
+
+ def get_enum_events_string(self):
+ buff = self.__get_enum_events_content()
+ return self.__buff_to_string(buff)
+
+ def format_events_enum(self):
+ buff = []
+ buff.append("enum %s {" % self.enum_events_def)
+ buff.append(self.get_enum_events_string())
+ buff.append("};\n")
+
+ return buff
+
+ def get_minimun_type(self):
+ min_type = "unsigned char"
+
+ if self.states.__len__() > 255:
+ min_type = "unsigned short"
+
+ if self.states.__len__() > 65535:
+ min_type = "unsigned int"
+
+ if self.states.__len__() > 1000000:
+ raise Exception("Too many states: %d" % self.states.__len__())
+
+ return min_type
+
+ def format_automaton_definition(self):
+ min_type = self.get_minimun_type()
+ buff = []
+ buff.append("struct %s {" % self.struct_automaton_def)
+ buff.append("\tchar *state_names[state_max%s];" % (self.enum_suffix))
+ buff.append("\tchar *event_names[event_max%s];" % (self.enum_suffix))
+ buff.append("\t%s function[state_max%s][event_max%s];" % (min_type, self.enum_suffix, self.enum_suffix))
+ buff.append("\t%s initial_state;" % min_type)
+ buff.append("\tbool final_states[state_max%s];" % (self.enum_suffix))
+ buff.append("};\n")
+ return buff
+
+ def format_aut_init_header(self):
+ buff = []
+ buff.append("struct %s %s = {" % (self.struct_automaton_def, self.var_automaton_def))
+ return buff
+
+ def __get_string_vector_per_line_content(self, buff):
+ first = True
+ string = ""
+ for entry in buff:
+ if first:
+ string = string + "\t\t\"" + entry
+ first = False;
+ else:
+ string = string + "\",\n\t\t\"" + entry
+ string = string + "\""
+
+ return string
+
+ def get_aut_init_events_string(self):
+ return self.__get_string_vector_per_line_content(self.events)
+
+ def get_aut_init_states_string(self):
+ return self.__get_string_vector_per_line_content(self.states)
+
+ def format_aut_init_events_string(self):
+ buff = []
+ buff.append("\t.event_names = {")
+ buff.append(self.get_aut_init_events_string())
+ buff.append("\t},")
+ return buff
+
+ def format_aut_init_states_string(self):
+ buff = []
+ buff.append("\t.state_names = {")
+ buff.append(self.get_aut_init_states_string())
+ buff.append("\t},")
+
+ return buff
+
+ def __get_max_strlen_of_states(self):
+ max_state_name = max(self.states, key = len).__len__()
+ return max(max_state_name, self.invalid_state_str.__len__())
+
+ def __get_state_string_length(self):
+ maxlen = self.__get_max_strlen_of_states() + self.enum_suffix.__len__()
+ return "%" + str(maxlen) + "s"
+
+ def get_aut_init_function(self):
+ nr_states = self.states.__len__()
+ nr_events = self.events.__len__()
+ buff = []
+
+ strformat = self.__get_state_string_length()
+
+ for x in range(nr_states):
+ line = "\t\t{ "
+ for y in range(nr_events):
+ next_state = self.function[x][y]
+ if next_state != self.invalid_state_str:
+ next_state = self.function[x][y] + self.enum_suffix
+
+ if y != nr_events-1:
+ line = line + strformat % next_state + ", "
+ else:
+ line = line + strformat % next_state + " },"
+ buff.append(line)
+
+ return self.__buff_to_string(buff)
+
+ def format_aut_init_function(self):
+ buff = []
+ buff.append("\t.function = {")
+ buff.append(self.get_aut_init_function())
+ buff.append("\t},")
+
+ return buff
+
+ def get_aut_init_initial_state(self):
+ return self.initial_state
+
+ def format_aut_init_initial_state(self):
+ buff = []
+ initial_state = self.get_aut_init_initial_state()
+ buff.append("\t.initial_state = " + initial_state + self.enum_suffix + ",")
+
+ return buff
+
+ def get_aut_init_final_states(self):
+ line = ""
+ first = True
+ for state in self.states:
+ if first == False:
+ line = line + ', '
+ else:
+ first = False
+
+ if self.final_states.__contains__(state):
+ line = line + '1'
+ else:
+ line = line + '0'
+ return line
+
+ def format_aut_init_final_states(self):
+ buff = []
+ buff.append("\t.final_states = { %s }," % self.get_aut_init_final_states())
+
+ return buff
+
+ def __get_automaton_initialization_footer_string(self):
+ footer = "};\n"
+ return footer
+
+ def format_aut_init_footer(self):
+ buff = []
+ buff.append(self.__get_automaton_initialization_footer_string())
+
+ return buff
+
+ def format_invalid_state(self):
+ buff = []
+ buff.append("#define %s state_max%s\n" % (self.invalid_state_str, self.enum_suffix))
+
+ return buff
+
+ def format_model(self):
+ buff = []
+ buff += self.format_states_enum()
+ buff += self.format_invalid_state()
+ buff += self.format_events_enum()
+ buff += self.format_automaton_definition()
+ buff += self.format_aut_init_header()
+ buff += self.format_aut_init_states_string()
+ buff += self.format_aut_init_events_string()
+ buff += self.format_aut_init_function()
+ buff += self.format_aut_init_initial_state()
+ buff += self.format_aut_init_final_states()
+ buff += self.format_aut_init_footer()
+
+ return buff
+
+ def print_model_classic(self):
+ buff = self.format_model()
+ print(self.__buff_to_string(buff))
diff --git a/tools/verification/dot2/dot2k b/tools/verification/dot2/dot2k
new file mode 100644
index 000000000000..9dcd38abe20a
--- /dev/null
+++ b/tools/verification/dot2/dot2k
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+#
+# dot2k: transform dot files into a monitor for the Linux kernel.
+#
+# For further information, see:
+# Documentation/trace/rv/da_monitor_synthesis.rst
+
+if __name__ == '__main__':
+ from dot2.dot2k import dot2k
+ import argparse
+ import ntpath
+ import os
+ import platform
+ import sys
+ import sys
+ import argparse
+
+ parser = argparse.ArgumentParser(description='transform .dot file into kernel rv monitor')
+ parser.add_argument('-d', "--dot", dest="dot_file", required=True)
+ parser.add_argument('-t', "--monitor_type", dest="monitor_type", required=True)
+ parser.add_argument('-n', "--model_name", dest="model_name", required=False)
+ parser.add_argument("-D", "--description", dest="description", required=False)
+ params = parser.parse_args()
+
+ print("Opening and parsing the dot file %s" % params.dot_file)
+ try:
+ monitor=dot2k(params.dot_file, params.monitor_type)
+ except Exception as e:
+ print('Error: '+ str(e))
+ print("Sorry : :-(")
+ sys.exit(1)
+
+ # easier than using argparse action.
+ if params.model_name != None:
+ print(params.model_name)
+
+ print("Writing the monitor into the directory %s" % monitor.name)
+ monitor.print_files()
+ print("Almost done, checklist")
+ print(" - Edit the %s/%s.c to add the instrumentation" % (monitor.name, monitor.name))
+ print(" - Edit include/trace/events/rv.h to add the tracepoint entry")
+ print(" - Move it to the kernel's monitor directory")
+ print(" - Edit kernel/trace/rv/Makefile")
+ print(" - Edit kernel/trace/rv/Kconfig")
diff --git a/tools/verification/dot2/dot2k.py b/tools/verification/dot2/dot2k.py
new file mode 100644
index 000000000000..016550fccf1f
--- /dev/null
+++ b/tools/verification/dot2/dot2k.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
+#
+# dot2k: transform dot files into a monitor for the Linux kernel.
+#
+# For further information, see:
+# Documentation/trace/rv/da_monitor_synthesis.rst
+
+from dot2.dot2c import Dot2c
+import platform
+import os
+
+class dot2k(Dot2c):
+ monitor_types = { "global" : 1, "per_cpu" : 2, "per_task" : 3 }
+ monitor_templates_dir = "dot2k/rv_templates/"
+ monitor_type = "per_cpu"
+
+ def __init__(self, file_path, MonitorType):
+ super().__init__(file_path)
+
+ self.monitor_type = self.monitor_types.get(MonitorType)
+ if self.monitor_type == None:
+ raise Exception("Unknown monitor type: %s" % MonitorType)
+
+ self.monitor_type = MonitorType
+ self.__fill_rv_templates_dir()
+ self.main_c = self.__open_file(self.monitor_templates_dir + "main_" + MonitorType + ".c")
+ self.enum_suffix = "_%s" % self.name
+
+ def __fill_rv_templates_dir(self):
+
+ if os.path.exists(self.monitor_templates_dir) == True:
+ return
+
+ if platform.system() != "Linux":
+ raise Exception("I can only run on Linux.")
+
+ kernel_path = "/lib/modules/%s/build/tools/verification/dot2/dot2k_templates/" % (platform.release())
+
+ if os.path.exists(kernel_path) == True:
+ self.monitor_templates_dir = kernel_path
+ return
+
+ if os.path.exists("/usr/share/dot2/dot2k_templates/") == True:
+ self.monitor_templates_dir = "/usr/share/dot2/dot2k_templates/"
+ return
+
+ raise Exception("Could not find the template directory, do you have the kernel source installed?")
+
+
+ def __open_file(self, path):
+ try:
+ fd = open(path)
+ except OSError:
+ raise Exception("Cannot open the file: %s" % path)
+
+ content = fd.read()
+
+ return content
+
+ def __buff_to_string(self, buff):
+ string = ""
+
+ for line in buff:
+ string = string + line + "\n"
+
+ # cut off the last \n
+ return string[:-1]
+
+ def fill_tracepoint_handlers_skel(self):
+ buff = []
+ for event in self.events:
+ buff.append("static void handle_%s(void *data, /* XXX: fill header */)" % event)
+ buff.append("{")
+ if self.monitor_type == "per_task":
+ buff.append("\tstruct task_struct *p = /* XXX: how do I get p? */;");
+ buff.append("\tda_handle_event_%s(p, %s%s);" % (self.name, event, self.enum_suffix));
+ else:
+ buff.append("\tda_handle_event_%s(%s%s);" % (self.name, event, self.enum_suffix));
+ buff.append("}")
+ buff.append("")
+ return self.__buff_to_string(buff)
+
+ def fill_tracepoint_attach_probe(self):
+ buff = []
+ for event in self.events:
+ buff.append("\trv_attach_trace_probe(\"%s\", /* XXX: tracepoint */, handle_%s);" % (self.name, event))
+ return self.__buff_to_string(buff)
+
+ def fill_tracepoint_detach_helper(self):
+ buff = []
+ for event in self.events:
+ buff.append("\trv_detach_trace_probe(\"%s\", /* XXX: tracepoint */, handle_%s);" % (self.name, event))
+ return self.__buff_to_string(buff)
+
+ def fill_main_c(self):
+ main_c = self.main_c
+ min_type = self.get_minimun_type()
+ nr_events = self.events.__len__()
+ tracepoint_handlers = self.fill_tracepoint_handlers_skel()
+ tracepoint_attach = self.fill_tracepoint_attach_probe()
+ tracepoint_detach = self.fill_tracepoint_detach_helper()
+
+ main_c = main_c.replace("MIN_TYPE", min_type)
+ main_c = main_c.replace("MODEL_NAME", self.name)
+ main_c = main_c.replace("NR_EVENTS", str(nr_events))
+ main_c = main_c.replace("TRACEPOINT_HANDLERS_SKEL", tracepoint_handlers)
+ main_c = main_c.replace("TRACEPOINT_ATTACH", tracepoint_attach)
+ main_c = main_c.replace("TRACEPOINT_DETACH", tracepoint_detach)
+
+ return main_c
+
+ def fill_model_h_header(self):
+ buff = []
+ buff.append("/*")
+ buff.append(" * Automatically generated C representation of %s automaton" % (self.name))
+ buff.append(" * For further information about this format, see kernel documentation:")
+ buff.append(" * Documentation/trace/rv/deterministic_automata.rst")
+ buff.append(" */")
+ buff.append("")
+
+ return buff
+
+ def fill_model_h(self):
+ #
+ # Adjust the definition names
+ #
+ self.enum_states_def = "states_%s" % self.name
+ self.enum_events_def = "events_%s" % self.name
+ self.struct_automaton_def = "automaton_%s" % self.name
+ self.var_automaton_def = "automaton_%s" % self.name
+
+ buff = self.fill_model_h_header()
+ buff += self.format_model()
+
+ return self.__buff_to_string(buff)
+
+ def __create_directory(self):
+ try:
+ os.mkdir(self.name)
+ except FileExistsError:
+ return
+ except:
+ print("Fail creating the output dir: %s" % self.name)
+
+ def __create_file(self, file_name, content):
+ path = "%s/%s" % (self.name, file_name)
+ try:
+ file = open(path, 'w')
+ except FileExistsError:
+ return
+ except:
+ print("Fail creating file: %s" % path)
+
+ file.write(content)
+
+ file.close()
+
+ def __get_main_name(self):
+ path = "%s/%s" % (self.name, "main.c")
+ if os.path.exists(path) == False:
+ return "main.c"
+ return "__main.c"
+
+ def print_files(self):
+ main_c = self.fill_main_c()
+ model_h = self.fill_model_h()
+
+ self.__create_directory()
+
+ path = "%s.c" % self.name
+ self.__create_file(path, main_c)
+
+ path = "%s.h" % self.name
+ self.__create_file(path, model_h)
diff --git a/tools/verification/dot2/dot2k_templates/main_global.c b/tools/verification/dot2/dot2k_templates/main_global.c
new file mode 100644
index 000000000000..f4b712dbc92e
--- /dev/null
+++ b/tools/verification/dot2/dot2k_templates/main_global.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+#include <rv/instrumentation.h>
+#include <rv/da_monitor.h>
+
+#define MODULE_NAME "MODEL_NAME"
+
+/*
+ * XXX: include required tracepoint headers, e.g.,
+ * #include <trace/events/sched.h>
+ */
+#include <trace/events/rv.h>
+
+/*
+ * This is the self-generated part of the monitor. Generally, there is no need
+ * to touch this section.
+ */
+#include "MODEL_NAME.h"
+
+/*
+ * Declare the deterministic automata monitor.
+ *
+ * The rv monitor reference is needed for the monitor declaration.
+ */
+struct rv_monitor rv_MODEL_NAME;
+DECLARE_DA_MON_GLOBAL(MODEL_NAME, MIN_TYPE);
+
+/*
+ * This is the instrumentation part of the monitor.
+ *
+ * This is the section where manual work is required. Here the kernel events
+ * are translated into model's event.
+ *
+ */
+TRACEPOINT_HANDLERS_SKEL
+static int enable_MODEL_NAME(void)
+{
+ int retval;
+
+ retval = da_monitor_init_MODEL_NAME();
+ if (retval)
+ return retval;
+
+TRACEPOINT_ATTACH
+
+ return 0;
+}
+
+static void disable_MODEL_NAME(void)
+{
+ rv_MODEL_NAME.enabled = 0;
+
+TRACEPOINT_DETACH
+
+ da_monitor_destroy_MODEL_NAME();
+}
+
+/*
+ * This is the monitor register section.
+ */
+struct rv_monitor rv_MODEL_NAME = {
+ .name = "MODEL_NAME",
+ .description = "auto-generated MODEL_NAME",
+ .enable = enable_MODEL_NAME,
+ .disable = disable_MODEL_NAME,
+ .reset = da_monitor_reset_all_MODEL_NAME,
+ .enabled = 0,
+};
+
+static int register_MODEL_NAME(void)
+{
+ rv_register_monitor(&rv_MODEL_NAME);
+ return 0;
+}
+
+static void unregister_MODEL_NAME(void)
+{
+ rv_unregister_monitor(&rv_MODEL_NAME);
+}
+
+module_init(register_MODEL_NAME);
+module_exit(unregister_MODEL_NAME);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("dot2k: auto-generated");
+MODULE_DESCRIPTION("MODEL_NAME");
diff --git a/tools/verification/dot2/dot2k_templates/main_per_cpu.c b/tools/verification/dot2/dot2k_templates/main_per_cpu.c
new file mode 100644
index 000000000000..4080d1ca3354
--- /dev/null
+++ b/tools/verification/dot2/dot2k_templates/main_per_cpu.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+#include <rv/instrumentation.h>
+#include <rv/da_monitor.h>
+
+#define MODULE_NAME "MODEL_NAME"
+
+/*
+ * XXX: include required tracepoint headers, e.g.,
+ * #include <linux/trace/events/sched.h>
+ */
+#include <trace/events/rv.h>
+
+/*
+ * This is the self-generated part of the monitor. Generally, there is no need
+ * to touch this section.
+ */
+#include "MODEL_NAME.h"
+
+/*
+ * Declare the deterministic automata monitor.
+ *
+ * The rv monitor reference is needed for the monitor declaration.
+ */
+struct rv_monitor rv_MODEL_NAME;
+DECLARE_DA_MON_PER_CPU(MODEL_NAME, MIN_TYPE);
+
+/*
+ * This is the instrumentation part of the monitor.
+ *
+ * This is the section where manual work is required. Here the kernel events
+ * are translated into model's event.
+ *
+ */
+TRACEPOINT_HANDLERS_SKEL
+static int enable_MODEL_NAME(void)
+{
+ int retval;
+
+ retval = da_monitor_init_MODEL_NAME();
+ if (retval)
+ return retval;
+
+TRACEPOINT_ATTACH
+
+ return 0;
+}
+
+static void disable_MODEL_NAME(void)
+{
+ rv_MODEL_NAME.enabled = 0;
+
+TRACEPOINT_DETACH
+
+ da_monitor_destroy_MODEL_NAME();
+}
+
+/*
+ * This is the monitor register section.
+ */
+struct rv_monitor rv_MODEL_NAME = {
+ .name = "MODEL_NAME",
+ .description = "auto-generated MODEL_NAME",
+ .enable = enable_MODEL_NAME,
+ .disable = disable_MODEL_NAME,
+ .reset = da_monitor_reset_all_MODEL_NAME,
+ .enabled = 0,
+};
+
+static int register_MODEL_NAME(void)
+{
+ rv_register_monitor(&rv_MODEL_NAME);
+ return 0;
+}
+
+static void unregister_MODEL_NAME(void)
+{
+ rv_unregister_monitor(&rv_MODEL_NAME);
+}
+
+module_init(register_MODEL_NAME);
+module_exit(unregister_MODEL_NAME);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("dot2k: auto-generated");
+MODULE_DESCRIPTION("MODEL_NAME");
diff --git a/tools/verification/dot2/dot2k_templates/main_per_task.c b/tools/verification/dot2/dot2k_templates/main_per_task.c
new file mode 100644
index 000000000000..89197175384f
--- /dev/null
+++ b/tools/verification/dot2/dot2k_templates/main_per_task.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
+#include <linux/tracepoint.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rv.h>
+#include <rv/instrumentation.h>
+#include <rv/da_monitor.h>
+
+#define MODULE_NAME "MODEL_NAME"
+
+/*
+ * XXX: include required tracepoint headers, e.g.,
+ * #include <linux/trace/events/sched.h>
+ */
+#include <trace/events/rv.h>
+
+/*
+ * This is the self-generated part of the monitor. Generally, there is no need
+ * to touch this section.
+ */
+#include "MODEL_NAME.h"
+
+/*
+ * Declare the deterministic automata monitor.
+ *
+ * The rv monitor reference is needed for the monitor declaration.
+ */
+struct rv_monitor rv_MODEL_NAME;
+DECLARE_DA_MON_PER_TASK(MODEL_NAME, MIN_TYPE);
+
+/*
+ * This is the instrumentation part of the monitor.
+ *
+ * This is the section where manual work is required. Here the kernel events
+ * are translated into model's event.
+ *
+ */
+TRACEPOINT_HANDLERS_SKEL
+static int enable_MODEL_NAME(void)
+{
+ int retval;
+
+ retval = da_monitor_init_MODEL_NAME();
+ if (retval)
+ return retval;
+
+TRACEPOINT_ATTACH
+
+ return 0;
+}
+
+static void disable_MODEL_NAME(void)
+{
+ rv_MODEL_NAME.enabled = 0;
+
+TRACEPOINT_DETACH
+
+ da_monitor_destroy_MODEL_NAME();
+}
+
+/*
+ * This is the monitor register section.
+ */
+struct rv_monitor rv_MODEL_NAME = {
+ .name = "MODEL_NAME",
+ .description = "auto-generated MODEL_NAME",
+ .enable = enable_MODEL_NAME,
+ .disable = disable_MODEL_NAME,
+ .reset = da_monitor_reset_all_MODEL_NAME,
+ .enabled = 0,
+};
+
+static int register_MODEL_NAME(void)
+{
+ rv_register_monitor(&rv_MODEL_NAME);
+ return 0;
+}
+
+static void unregister_MODEL_NAME(void)
+{
+ rv_unregister_monitor(&rv_MODEL_NAME);
+}
+
+module_init(register_MODEL_NAME);
+module_exit(unregister_MODEL_NAME);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("dot2k: auto-generated");
+MODULE_DESCRIPTION("MODEL_NAME");
diff --git a/tools/verification/models/wip.dot b/tools/verification/models/wip.dot
new file mode 100644
index 000000000000..2a53a9700a89
--- /dev/null
+++ b/tools/verification/models/wip.dot
@@ -0,0 +1,16 @@
+digraph state_automaton {
+ {node [shape = circle] "non_preemptive"};
+ {node [shape = plaintext, style=invis, label=""] "__init_preemptive"};
+ {node [shape = doublecircle] "preemptive"};
+ {node [shape = circle] "preemptive"};
+ "__init_preemptive" -> "preemptive";
+ "non_preemptive" [label = "non_preemptive"];
+ "non_preemptive" -> "non_preemptive" [ label = "sched_waking" ];
+ "non_preemptive" -> "preemptive" [ label = "preempt_enable" ];
+ "preemptive" [label = "preemptive"];
+ "preemptive" -> "non_preemptive" [ label = "preempt_disable" ];
+ { rank = min ;
+ "__init_preemptive";
+ "preemptive";
+ }
+}
diff --git a/tools/verification/models/wwnr.dot b/tools/verification/models/wwnr.dot
new file mode 100644
index 000000000000..1b206e83129c
--- /dev/null
+++ b/tools/verification/models/wwnr.dot
@@ -0,0 +1,16 @@
+digraph state_automaton {
+ {node [shape = plaintext, style=invis, label=""] "__init_not_running"};
+ {node [shape = ellipse] "not_running"};
+ {node [shape = plaintext] "not_running"};
+ {node [shape = plaintext] "running"};
+ "__init_not_running" -> "not_running";
+ "not_running" [label = "not_running", color = green3];
+ "not_running" -> "not_running" [ label = "wakeup" ];
+ "not_running" -> "running" [ label = "switch_in" ];
+ "running" [label = "running"];
+ "running" -> "not_running" [ label = "switch_out" ];
+ { rank = min ;
+ "__init_not_running";
+ "not_running";
+ }
+}
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 0b493542e61a..21593bf97755 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -29,7 +29,6 @@
#define READ 0
#define WRITE 1
-typedef unsigned long long phys_addr_t;
typedef unsigned long long dma_addr_t;
typedef size_t __kernel_size_t;
typedef unsigned int __wsum;
@@ -136,6 +135,7 @@ static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t
#endif
#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
+#define dev_warn_once(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
diff --git a/tools/virtio/linux/vringh.h b/tools/virtio/linux/vringh.h
index 9348957be56e..e11c6aece734 100644
--- a/tools/virtio/linux/vringh.h
+++ b/tools/virtio/linux/vringh.h
@@ -1 +1,2 @@
+#include <limits.h>
#include "../../../include/linux/vringh.h"
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index 23f142af544a..86a410ddcedd 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -102,8 +102,8 @@ static void vq_reset(struct vq_info *info, int num, struct virtio_device *vdev)
memset(info->ring, 0, vring_size(num, 4096));
vring_init(&info->vring, num, info->ring, 4096);
- info->vq = __vring_new_virtqueue(info->idx, info->vring, vdev, true,
- false, vq_notify, vq_callback, "test");
+ info->vq = vring_new_virtqueue(info->idx, num, 4096, vdev, true, false,
+ info->ring, vq_notify, vq_callback, "test");
assert(info->vq);
info->vq->priv = info;
}
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
index c149427eb1c9..ec2e67c85b84 100644
--- a/tools/vm/page_owner_sort.c
+++ b/tools/vm/page_owner_sort.c
@@ -8,7 +8,7 @@
* Or sort by total memory:
* ./page_owner_sort -m page_owner_full.txt sorted_page_owner.txt
*
- * See Documentation/vm/page_owner.rst
+ * See Documentation/mm/page_owner.rst
*/
#include <stdio.h>
@@ -470,23 +470,23 @@ static bool match_str_list(const char *str, char **list, int list_size)
static bool is_need(char *buf)
{
- if ((filter & FILTER_UNRELEASE) && get_free_ts_nsec(buf) != 0)
- return false;
- if ((filter & FILTER_PID) && !match_num_list(get_pid(buf), fc.pids, fc.pids_size))
- return false;
- if ((filter & FILTER_TGID) &&
- !match_num_list(get_tgid(buf), fc.tgids, fc.tgids_size))
- return false;
+ if ((filter & FILTER_UNRELEASE) && get_free_ts_nsec(buf) != 0)
+ return false;
+ if ((filter & FILTER_PID) && !match_num_list(get_pid(buf), fc.pids, fc.pids_size))
+ return false;
+ if ((filter & FILTER_TGID) &&
+ !match_num_list(get_tgid(buf), fc.tgids, fc.tgids_size))
+ return false;
- char *comm = get_comm(buf);
+ char *comm = get_comm(buf);
- if ((filter & FILTER_COMM) &&
- !match_str_list(comm, fc.comms, fc.comms_size)) {
- free(comm);
- return false;
- }
+ if ((filter & FILTER_COMM) &&
+ !match_str_list(comm, fc.comms, fc.comms_size)) {
free(comm);
- return true;
+ return false;
+ }
+ free(comm);
+ return true;
}
static void add_list(char *buf, int len, char *ext_buf)
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 5b98f3ee58a5..0fffaeedee76 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -125,7 +125,7 @@ static void usage(void)
"-n|--numa Show NUMA information\n"
"-N|--lines=K Show the first K slabs\n"
"-o|--ops Show kmem_cache_ops\n"
- "-P|--partial Sort by number of partial slabs\n"
+ "-P|--partial Sort by number of partial slabs\n"
"-r|--report Detailed report on single slabs\n"
"-s|--shrink Shrink slabs\n"
"-S|--Size Sort by size\n"
@@ -1067,15 +1067,27 @@ static void sort_slabs(void)
for (s2 = s1 + 1; s2 < slabinfo + slabs; s2++) {
int result;
- if (sort_size)
- result = slab_size(s1) < slab_size(s2);
- else if (sort_active)
- result = slab_activity(s1) < slab_activity(s2);
- else if (sort_loss)
- result = slab_waste(s1) < slab_waste(s2);
- else if (sort_partial)
- result = s1->partial < s2->partial;
- else
+ if (sort_size) {
+ if (slab_size(s1) == slab_size(s2))
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = slab_size(s1) < slab_size(s2);
+ } else if (sort_active) {
+ if (slab_activity(s1) == slab_activity(s2))
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = slab_activity(s1) < slab_activity(s2);
+ } else if (sort_loss) {
+ if (slab_waste(s1) == slab_waste(s2))
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = slab_waste(s1) < slab_waste(s2);
+ } else if (sort_partial) {
+ if (s1->partial == s2->partial)
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = s1->partial < s2->partial;
+ } else
result = strcasecmp(s1->name, s2->name);
if (show_inverted)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 32896c845ffe..584a5bab3af3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -484,6 +484,10 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu->ready = false;
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
vcpu->last_used_slot = NULL;
+
+ /* Fill the stats id string for the vcpu */
+ snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
+ task_pid_nr(current), id);
}
static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -698,30 +702,31 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
/*
* .change_pte() must be surrounded by .invalidate_range_{start,end}().
- * If mmu_notifier_count is zero, then no in-progress invalidations,
- * including this one, found a relevant memslot at start(); rechecking
- * memslots here is unnecessary. Note, a false positive (count elevated
- * by a different invalidation) is sub-optimal but functionally ok.
+ * If mmu_invalidate_in_progress is zero, then no in-progress
+ * invalidations, including this one, found a relevant memslot at
+ * start(); rechecking memslots here is unnecessary. Note, a false
+ * positive (count elevated by a different invalidation) is sub-optimal
+ * but functionally ok.
*/
WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
- if (!READ_ONCE(kvm->mmu_notifier_count))
+ if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
return;
kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
}
-void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
- unsigned long end)
+void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
+ unsigned long end)
{
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
- kvm->mmu_notifier_count++;
- if (likely(kvm->mmu_notifier_count == 1)) {
- kvm->mmu_notifier_range_start = start;
- kvm->mmu_notifier_range_end = end;
+ kvm->mmu_invalidate_in_progress++;
+ if (likely(kvm->mmu_invalidate_in_progress == 1)) {
+ kvm->mmu_invalidate_range_start = start;
+ kvm->mmu_invalidate_range_end = end;
} else {
/*
* Fully tracking multiple concurrent ranges has diminishing
@@ -732,10 +737,10 @@ void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
* accumulate and persist until all outstanding invalidates
* complete.
*/
- kvm->mmu_notifier_range_start =
- min(kvm->mmu_notifier_range_start, start);
- kvm->mmu_notifier_range_end =
- max(kvm->mmu_notifier_range_end, end);
+ kvm->mmu_invalidate_range_start =
+ min(kvm->mmu_invalidate_range_start, start);
+ kvm->mmu_invalidate_range_end =
+ max(kvm->mmu_invalidate_range_end, end);
}
}
@@ -748,7 +753,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
.end = range->end,
.pte = __pte(0),
.handler = kvm_unmap_gfn_range,
- .on_lock = kvm_inc_notifier_count,
+ .on_lock = kvm_mmu_invalidate_begin,
.on_unlock = kvm_arch_guest_memory_reclaimed,
.flush_on_ret = true,
.may_block = mmu_notifier_range_blockable(range),
@@ -759,7 +764,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
/*
* Prevent memslot modification between range_start() and range_end()
* so that conditionally locking provides the same result in both
- * functions. Without that guarantee, the mmu_notifier_count
+ * functions. Without that guarantee, the mmu_invalidate_in_progress
* adjustments will be imbalanced.
*
* Pairs with the decrement in range_end().
@@ -775,7 +780,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* any given time, and the caches themselves can check for hva overlap,
* i.e. don't need to rely on memslot overlap checks for performance.
* Because this runs without holding mmu_lock, the pfn caches must use
- * mn_active_invalidate_count (see above) instead of mmu_notifier_count.
+ * mn_active_invalidate_count (see above) instead of
+ * mmu_invalidate_in_progress.
*/
gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
hva_range.may_block);
@@ -785,22 +791,22 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
return 0;
}
-void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
- unsigned long end)
+void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+ unsigned long end)
{
/*
* This sequence increase will notify the kvm page fault that
* the page that is going to be mapped in the spte could have
* been freed.
*/
- kvm->mmu_notifier_seq++;
+ kvm->mmu_invalidate_seq++;
smp_wmb();
/*
* The above sequence increase must be visible before the
* below count decrease, which is ensured by the smp_wmb above
- * in conjunction with the smp_rmb in mmu_notifier_retry().
+ * in conjunction with the smp_rmb in mmu_invalidate_retry().
*/
- kvm->mmu_notifier_count--;
+ kvm->mmu_invalidate_in_progress--;
}
static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
@@ -812,7 +818,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
.end = range->end,
.pte = __pte(0),
.handler = (void *)kvm_null_fn,
- .on_lock = kvm_dec_notifier_count,
+ .on_lock = kvm_mmu_invalidate_end,
.on_unlock = (void *)kvm_null_fn,
.flush_on_ret = false,
.may_block = mmu_notifier_range_blockable(range),
@@ -833,7 +839,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
if (wake)
rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
- BUG_ON(kvm->mmu_notifier_count < 0);
+ BUG_ON(kvm->mmu_invalidate_in_progress < 0);
}
static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
@@ -1017,21 +1023,21 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
}
}
-static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
+static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
{
static DEFINE_MUTEX(kvm_debugfs_lock);
struct dentry *dent;
char dir_name[ITOA_MAX_LEN * 2];
struct kvm_stat_data *stat_data;
const struct _kvm_stats_desc *pdesc;
- int i, ret;
+ int i, ret = -ENOMEM;
int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
kvm_vcpu_stats_header.num_desc;
if (!debugfs_initialized())
return 0;
- snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
+ snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
mutex_lock(&kvm_debugfs_lock);
dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
if (dent) {
@@ -1050,13 +1056,13 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
sizeof(*kvm->debugfs_stat_data),
GFP_KERNEL_ACCOUNT);
if (!kvm->debugfs_stat_data)
- return -ENOMEM;
+ goto out_err;
for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
pdesc = &kvm_vm_stats_desc[i];
stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
if (!stat_data)
- return -ENOMEM;
+ goto out_err;
stat_data->kvm = kvm;
stat_data->desc = pdesc;
@@ -1071,7 +1077,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
pdesc = &kvm_vcpu_stats_desc[i];
stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
if (!stat_data)
- return -ENOMEM;
+ goto out_err;
stat_data->kvm = kvm;
stat_data->desc = pdesc;
@@ -1083,12 +1089,13 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
}
ret = kvm_arch_create_vm_debugfs(kvm);
- if (ret) {
- kvm_destroy_vm_debugfs(kvm);
- return i;
- }
+ if (ret)
+ goto out_err;
return 0;
+out_err:
+ kvm_destroy_vm_debugfs(kvm);
+ return ret;
}
/*
@@ -1119,7 +1126,7 @@ int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
return 0;
}
-static struct kvm *kvm_create_vm(unsigned long type)
+static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
{
struct kvm *kvm = kvm_arch_alloc_vm();
struct kvm_memslots *slots;
@@ -1129,6 +1136,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (!kvm)
return ERR_PTR(-ENOMEM);
+ /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */
+ __module_get(kvm_chardev_ops.owner);
+
KVM_MMU_LOCK_INIT(kvm);
mmgrab(current->mm);
kvm->mm = current->mm;
@@ -1155,6 +1165,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
*/
kvm->debugfs_dentry = ERR_PTR(-ENOENT);
+ snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
+ task_pid_nr(current));
+
if (init_srcu_struct(&kvm->srcu))
goto out_err_no_srcu;
if (init_srcu_struct(&kvm->irq_srcu))
@@ -1203,6 +1216,14 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (r)
goto out_err_no_mmu_notifier;
+ r = kvm_coalesced_mmio_init(kvm);
+ if (r < 0)
+ goto out_no_coalesced_mmio;
+
+ r = kvm_create_vm_debugfs(kvm, fdname);
+ if (r)
+ goto out_err_no_debugfs;
+
r = kvm_arch_post_init_vm(kvm);
if (r)
goto out_err;
@@ -1214,19 +1235,13 @@ static struct kvm *kvm_create_vm(unsigned long type)
preempt_notifier_inc();
kvm_init_pm_notifier(kvm);
- /*
- * When the fd passed to this ioctl() is opened it pins the module,
- * but try_module_get() also prevents getting a reference if the module
- * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait").
- */
- if (!try_module_get(kvm_chardev_ops.owner)) {
- r = -ENODEV;
- goto out_err;
- }
-
return kvm;
out_err:
+ kvm_destroy_vm_debugfs(kvm);
+out_err_no_debugfs:
+ kvm_coalesced_mmio_free(kvm);
+out_no_coalesced_mmio:
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
if (kvm->mmu_notifier.ops)
mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
@@ -1245,6 +1260,7 @@ out_err_no_irq_srcu:
out_err_no_srcu:
kvm_arch_free_vm(kvm);
mmdrop(current->mm);
+ module_put(kvm_chardev_ops.owner);
return ERR_PTR(r);
}
@@ -2502,7 +2518,7 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
{
unsigned int flags = FOLL_HWPOISON;
struct page *page;
- int npages = 0;
+ int npages;
might_sleep();
@@ -3916,10 +3932,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
if (r)
goto unlock_vcpu_destroy;
- /* Fill the stats id string for the vcpu */
- snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
- task_pid_nr(current), id);
-
/* Now it's all set up, let userspace reach it */
kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
@@ -4368,7 +4380,7 @@ void kvm_unregister_device_ops(u32 type)
static int kvm_ioctl_create_device(struct kvm *kvm,
struct kvm_create_device *cd)
{
- const struct kvm_device_ops *ops = NULL;
+ const struct kvm_device_ops *ops;
struct kvm_device *dev;
bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
int type;
@@ -4886,28 +4898,25 @@ EXPORT_SYMBOL_GPL(file_is_kvm);
static int kvm_dev_ioctl_create_vm(unsigned long type)
{
- int r;
+ char fdname[ITOA_MAX_LEN + 1];
+ int r, fd;
struct kvm *kvm;
struct file *file;
- kvm = kvm_create_vm(type);
- if (IS_ERR(kvm))
- return PTR_ERR(kvm);
-#ifdef CONFIG_KVM_MMIO
- r = kvm_coalesced_mmio_init(kvm);
- if (r < 0)
- goto put_kvm;
-#endif
- r = get_unused_fd_flags(O_CLOEXEC);
- if (r < 0)
- goto put_kvm;
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ snprintf(fdname, sizeof(fdname), "%d", fd);
- snprintf(kvm->stats_id, sizeof(kvm->stats_id),
- "kvm-%d", task_pid_nr(current));
+ kvm = kvm_create_vm(type, fdname);
+ if (IS_ERR(kvm)) {
+ r = PTR_ERR(kvm);
+ goto put_fd;
+ }
file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
if (IS_ERR(file)) {
- put_unused_fd(r);
r = PTR_ERR(file);
goto put_kvm;
}
@@ -4918,18 +4927,15 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
* cases it will be called by the final fput(file) and will take
* care of doing kvm_put_kvm(kvm).
*/
- if (kvm_create_vm_debugfs(kvm, r) < 0) {
- put_unused_fd(r);
- fput(file);
- return -ENOMEM;
- }
kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
- fd_install(r, file);
- return r;
+ fd_install(fd, file);
+ return fd;
put_kvm:
kvm_put_kvm(kvm);
+put_fd:
+ put_unused_fd(fd);
return r;
}
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index ab519f72f2cd..68ff41d39545 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -112,27 +112,28 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
{
/*
* mn_active_invalidate_count acts for all intents and purposes
- * like mmu_notifier_count here; but the latter cannot be used
- * here because the invalidation of caches in the mmu_notifier
- * event occurs _before_ mmu_notifier_count is elevated.
+ * like mmu_invalidate_in_progress here; but the latter cannot
+ * be used here because the invalidation of caches in the
+ * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
+ * is elevated.
*
* Note, it does not matter that mn_active_invalidate_count
* is not protected by gpc->lock. It is guaranteed to
* be elevated before the mmu_notifier acquires gpc->lock, and
- * isn't dropped until after mmu_notifier_seq is updated.
+ * isn't dropped until after mmu_invalidate_seq is updated.
*/
if (kvm->mn_active_invalidate_count)
return true;
/*
* Ensure mn_active_invalidate_count is read before
- * mmu_notifier_seq. This pairs with the smp_wmb() in
+ * mmu_invalidate_seq. This pairs with the smp_wmb() in
* mmu_notifier_invalidate_range_end() to guarantee either the
* old (non-zero) value of mn_active_invalidate_count or the
- * new (incremented) value of mmu_notifier_seq is observed.
+ * new (incremented) value of mmu_invalidate_seq is observed.
*/
smp_rmb();
- return kvm->mmu_notifier_seq != mmu_seq;
+ return kvm->mmu_invalidate_seq != mmu_seq;
}
static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
@@ -155,7 +156,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
gpc->valid = false;
do {
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
write_unlock_irq(&gpc->lock);